summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig18
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_memhotplug.c108
-rw-r--r--drivers/acpi/acpi_pad.c3
-rw-r--r--drivers/acpi/acpi_platform.c72
-rw-r--r--drivers/acpi/acpica/Makefile10
-rw-r--r--drivers/acpi/acpica/accommon.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h19
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h23
-rw-r--r--drivers/acpi/acpica/acglobal.h43
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h47
-rw-r--r--drivers/acpi/acpica/acmacros.h173
-rw-r--r--drivers/acpi/acpica/acnamesp.h16
-rw-r--r--drivers/acpi/acpica/acobject.h4
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h27
-rw-r--r--drivers/acpi/acpica/acpredef.h31
-rw-r--r--drivers/acpi/acpica/acresrc.h8
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h59
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h8
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c8
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c20
-rw-r--r--drivers/acpi/acpica/dsopcode.c16
-rw-r--r--drivers/acpi/acpica/dsutils.c12
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload.c7
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c14
-rw-r--r--drivers/acpi/acpica/evgpeblk.c24
-rw-r--r--drivers/acpi/acpica/evgpeinit.c5
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c529
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c584
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c6
-rw-r--r--drivers/acpi/acpica/evxface.c36
-rw-r--r--drivers/acpi/acpica/evxfevnt.c7
-rw-r--r--drivers/acpi/acpica/evxfgpe.c11
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c22
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c21
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c3
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c5
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c10
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c6
-rw-r--r--drivers/acpi/acpica/exregion.c25
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c31
-rw-r--r--drivers/acpi/acpica/exstoren.c4
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c24
-rw-r--r--drivers/acpi/acpica/hwacpi.c13
-rw-r--r--drivers/acpi/acpica/hwesleep.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c8
-rw-r--r--drivers/acpi/acpica/hwsleep.c8
-rw-r--r--drivers/acpi/acpica/hwtimer.c9
-rw-r--r--drivers/acpi/acpica/hwvalid.c20
-rw-r--r--drivers/acpi/acpica/hwxface.c137
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c13
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c14
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c3
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c29
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c5
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c586
-rw-r--r--drivers/acpi/acpica/nsprepkg.c621
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c5
-rw-r--r--drivers/acpi/acpica/nssearch.c7
-rw-r--r--drivers/acpi/acpica/nsutils.c88
-rw-r--r--drivers/acpi/acpica/nswalk.c6
-rw-r--r--drivers/acpi/acpica/nsxfeval.c19
-rw-r--r--drivers/acpi/acpica/nsxfname.c20
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/acpica/psloop.c623
-rw-r--r--drivers/acpi/acpica/psobject.c647
-rw-r--r--drivers/acpi/acpica/psopcode.c174
-rw-r--r--drivers/acpi/acpica/psopinfo.c223
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c10
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c8
-rw-r--r--drivers/acpi/acpica/rscreate.c9
-rw-r--r--drivers/acpi/acpica/rsdump.c424
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c454
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c40
-rw-r--r--drivers/acpi/acpica/rslist.c9
-rw-r--r--drivers/acpi/acpica/rsmemory.c8
-rw-r--r--drivers/acpi/acpica/rsmisc.c76
-rw-r--r--drivers/acpi/acpica/rsserial.c10
-rw-r--r--drivers/acpi/acpica/rsutils.c14
-rw-r--r--drivers/acpi/acpica/rsxface.c107
-rw-r--r--drivers/acpi/acpica/tbfadt.c7
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c7
-rw-r--r--drivers/acpi/acpica/tbxfload.c4
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utclib.c749
-rw-r--r--drivers/acpi/acpica/utcopy.c6
-rw-r--r--drivers/acpi/acpica/utdebug.c120
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c70
-rw-r--r--drivers/acpi/acpica/uteval.c4
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c11
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c16
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c830
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c4
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c218
-rw-r--r--drivers/acpi/acpica/utresrc.c83
-rw-r--r--drivers/acpi/acpica/utstate.c42
-rw-r--r--drivers/acpi/acpica/utstring.c574
-rw-r--r--drivers/acpi/acpica/uttrack.c18
-rw-r--r--drivers/acpi/acpica/utxface.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c6
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/apei-base.c3
-rw-r--r--drivers/acpi/apei/cper.c19
-rw-r--r--drivers/acpi/apei/erst-dbg.c11
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c270
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/container.c211
-rw-r--r--drivers/acpi/csrt.c159
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/device_pm.c362
-rw-r--r--drivers/acpi/dock.c44
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/glue.c96
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h31
-rw-r--r--drivers/acpi/numa.c31
-rw-r--r--drivers/acpi/osl.c206
-rw-r--r--drivers/acpi/pci_bind.c122
-rw-r--r--drivers/acpi/pci_link.c47
-rw-r--r--drivers/acpi/pci_root.c101
-rw-r--r--drivers/acpi/pci_slot.c7
-rw-r--r--drivers/acpi/power.c735
-rw-r--r--drivers/acpi/proc.c9
-rw-r--r--drivers/acpi/processor_driver.c64
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c7
-rw-r--r--drivers/acpi/sbs.c6
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c978
-rw-r--r--drivers/acpi/sleep.c97
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c6
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/amba/tegra-ahb.c13
-rw-r--r--drivers/ata/Kconfig26
-rw-r--r--drivers/ata/ahci.c101
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ahci_platform.c46
-rw-r--r--drivers/ata/ata_piix.c461
-rw-r--r--drivers/ata/libahci.c126
-rw-r--r--drivers/ata/libata-acpi.c22
-rw-r--r--drivers/ata/libata-core.c53
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c13
-rw-r--r--drivers/ata/pata_at91.c6
-rw-r--r--drivers/ata/pata_bf54x.c6
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5536.c32
-rw-r--r--drivers/ata/pata_ep93xx.c19
-rw-r--r--drivers/ata/pata_icside.c21
-rw-r--r--drivers/ata/pata_imx.c8
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c13
-rw-r--r--drivers/ata/pata_macio.c33
-rw-r--r--drivers/ata/pata_mpc52xx.c33
-rw-r--r--drivers/ata/pata_octeon_cf.c425
-rw-r--r--drivers/ata/pata_of_platform.c10
-rw-r--r--drivers/ata/pata_palmld.c10
-rw-r--r--drivers/ata/pata_pdc2027x.c3
-rw-r--r--drivers/ata/pata_platform.c35
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c6
-rw-r--r--drivers/ata/pata_rdc.c6
-rw-r--r--drivers/ata/pata_sch.c3
-rw-r--r--drivers/ata/pata_sil680.c3
-rw-r--r--[-rwxr-xr-x]drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_highbank.c16
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_promise.c15
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sx4.c14
-rw-r--r--drivers/ata/sata_vsc.c7
-rw-r--r--drivers/atm/ambassador.c53
-rw-r--r--drivers/atm/eni.c18
-rw-r--r--drivers/atm/firestream.c32
-rw-r--r--drivers/atm/fore200e.c70
-rw-r--r--drivers/atm/he.c36
-rw-r--r--drivers/atm/horizon.c12
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/atm/iphase.c11
-rw-r--r--drivers/atm/iphase.h146
-rw-r--r--drivers/atm/lanai.c28
-rw-r--r--drivers/atm/nicstar.c18
-rw-r--r--drivers/atm/solos-pci.c188
-rw-r--r--drivers/atm/zatm.c31
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c10
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/core.c29
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/dma-buf.c11
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c552
-rw-r--r--drivers/base/memory.c10
-rw-r--r--drivers/base/pinctrl.c69
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/opp.c19
-rw-r--r--drivers/base/power/qos.c11
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/Makefile2
-rw-r--r--drivers/base/regmap/internal.h22
-rw-r--r--drivers/base/regmap/regcache-flat.c72
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c89
-rw-r--r--drivers/base/regmap/regmap-irq.c125
-rw-r--r--drivers/base/regmap/regmap-mmio.c79
-rw-r--r--drivers/base/regmap/regmap-spi.c54
-rw-r--r--drivers/base/regmap/regmap.c353
-rw-r--r--drivers/bcma/Kconfig8
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h24
-rw-r--r--drivers/bcma/driver_chipcommon.c83
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c6
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c6
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c6
-rw-r--r--drivers/bcma/driver_gmac_cmn.c2
-rw-r--r--drivers/bcma/driver_gpio.c114
-rw-r--r--drivers/bcma/driver_mips.c195
-rw-r--r--drivers/bcma/driver_pci.c4
-rw-r--r--drivers/bcma/driver_pci_host.c75
-rw-r--r--drivers/bcma/host_pci.c8
-rw-r--r--drivers/bcma/main.c26
-rw-r--r--drivers/block/aoe/aoe.h57
-rw-r--r--drivers/block/aoe/aoeblk.c104
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/aoe/aoecmd.c715
-rw-r--r--drivers/block/aoe/aoedev.c243
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/aoe/aoenet.c15
-rw-r--r--drivers/block/cciss.c111
-rw-r--r--drivers/block/cpqarray.c16
-rw-r--r--drivers/block/drbd/Kconfig10
-rw-r--r--drivers/block/drbd/Makefile2
-rw-r--r--drivers/block/drbd/drbd_actlog.c702
-rw-r--r--drivers/block/drbd/drbd_bitmap.c249
-rw-r--r--drivers/block/drbd/drbd_int.h1365
-rw-r--r--drivers/block/drbd/drbd_interval.c207
-rw-r--r--drivers/block/drbd/drbd_interval.h40
-rw-r--r--drivers/block/drbd/drbd_main.c3781
-rw-r--r--drivers/block/drbd/drbd_nl.c3276
-rw-r--r--drivers/block/drbd/drbd_nla.c55
-rw-r--r--drivers/block/drbd/drbd_nla.h8
-rw-r--r--drivers/block/drbd/drbd_proc.c41
-rw-r--r--drivers/block/drbd/drbd_receiver.c3894
-rw-r--r--drivers/block/drbd/drbd_req.c1574
-rw-r--r--drivers/block/drbd/drbd_req.h188
-rw-r--r--drivers/block/drbd/drbd_state.c1863
-rw-r--r--drivers/block/drbd/drbd_state.h161
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c1237
-rw-r--r--drivers/block/drbd/drbd_wrappers.h11
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c24
-rw-r--r--drivers/block/nvme.c17
-rw-r--r--drivers/block/paride/Kconfig4
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ps3vram.c4
-rw-r--r--drivers/block/rbd.c1389
-rw-r--r--drivers/block/rbd_types.h2
-rw-r--r--drivers/block/sunvdc.c11
-rw-r--r--drivers/block/swim.c12
-rw-r--r--drivers/block/swim3.c3
-rw-r--r--drivers/block/umem.c3
-rw-r--r--drivers/block/virtio_blk.c20
-rw-r--r--drivers/block/xen-blkback/blkback.c305
-rw-r--r--drivers/block/xen-blkback/common.h16
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c203
-rw-r--r--drivers/block/xsysace.c19
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/ath3k.c12
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/bus/omap-ocp2scp.c6
-rw-r--r--drivers/bus/omap_l3_noc.c6
-rw-r--r--drivers/cdrom/gdrom.c18
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/agp/ali-agp.c3
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/amd64-agp.c15
-rw-r--r--drivers/char/agp/ati-agp.c3
-rw-r--r--drivers/char/agp/efficeon-agp.c4
-rw-r--r--drivers/char/agp/i460-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.h91
-rw-r--r--drivers/char/agp/intel-gtt.c320
-rw-r--r--drivers/char/agp/nvidia-agp.c4
-rw-r--r--drivers/char/agp/sgi-agp.c2
-rw-r--r--drivers/char/agp/sis-agp.c5
-rw-r--r--drivers/char/agp/sworks-agp.c4
-rw-r--r--drivers/char/agp/uninorth-agp.c4
-rw-r--r--drivers/char/agp/via-agp.c3
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/atmel-rng.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c4
-rw-r--r--drivers/char/hw_random/exynos-rng.c15
-rw-r--r--drivers/char/hw_random/n2-drv.c6
-rw-r--r--drivers/char/hw_random/octeon-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c8
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c2
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c4
-rw-r--r--drivers/char/hw_random/tx4939-rng.c7
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/pcmcia/Kconfig4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c707
-rw-r--r--drivers/char/random.c46
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.c114
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm_acpi.c8
-rw-r--r--drivers/char/tpm/tpm_atmel.c7
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c7
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c887
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.h61
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c96
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h5
-rw-r--r--drivers/char/tpm/tpm_nsc.c7
-rw-r--r--drivers/char/tpm/tpm_tis.c64
-rw-r--r--drivers/char/virtio_console.c328
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile14
-rw-r--r--drivers/clk/clk-bcm2835.c9
-rw-r--r--drivers/clk/clk-divider.c6
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-fixed-rate.c3
-rw-r--r--drivers/clk/clk-highbank.c20
-rw-r--r--drivers/clk/clk-max77686.c37
-rw-r--r--drivers/clk/clk-nomadik.c1
-rw-r--r--drivers/clk/clk-prima2.c205
-rw-r--r--drivers/clk/clk-sunxi.c30
-rw-r--r--drivers/clk/clk-twl6040.c6
-rw-r--r--drivers/clk/clk-vt8500.c143
-rw-r--r--drivers/clk/clk-zynq.c14
-rw-r--r--drivers/clk/clk.c169
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile3
-rw-r--r--drivers/clk/mvebu/clk-core.c675
-rw-r--r--drivers/clk/mvebu/clk-core.h18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c189
-rw-r--r--drivers/clk/mvebu/clk-cpu.h22
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c250
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.h22
-rw-r--r--drivers/clk/mvebu/clk.c27
-rw-r--r--drivers/clk/mxs/clk-imx23.c2
-rw-r--r--drivers/clk/mxs/clk-imx28.c4
-rw-r--r--drivers/clk/spear/spear1310_clock.c1
-rw-r--r--drivers/clk/tegra/Makefile11
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c87
-rw-r--r--drivers/clk/tegra/clk-divider.c187
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c179
-rw-r--r--drivers/clk/tegra/clk-periph.c218
-rw-r--r--drivers/clk/tegra/clk-pll-out.c123
-rw-r--r--drivers/clk/tegra/clk-pll.c648
-rw-r--r--drivers/clk/tegra/clk-super.c166
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1355
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1994
-rw-r--r--drivers/clk/tegra/clk.c85
-rw-r--r--drivers/clk/tegra/clk.h502
-rw-r--r--drivers/clk/ux500/abx500-clk.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c1
-rw-r--r--drivers/clk/versatile/clk-vexpress.c11
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss.c99
-rw-r--r--drivers/clk/x86/clk-lpss.h36
-rw-r--r--drivers/clk/x86/clk-lpt.c86
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c391
-rw-r--r--drivers/clocksource/arm_generic.c232
-rw-r--r--drivers/clocksource/bcm2835_timer.c9
-rw-r--r--drivers/clocksource/clksrc-of.c35
-rw-r--r--drivers/clocksource/cs5535-clockevt.c11
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c6
-rw-r--r--drivers/clocksource/em_sti.c8
-rw-r--r--drivers/clocksource/nomadik-mtu.c44
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c6
-rw-r--r--drivers/clocksource/sunxi_timer.c21
-rw-r--r--drivers/clocksource/tcb_clksrc.c7
-rw-r--r--drivers/clocksource/tegra20_timer.c281
-rw-r--r--drivers/clocksource/time-armada-370-xp.c11
-rw-r--r--drivers/clocksource/vt8500_timer.c180
-rw-r--r--drivers/connector/connector.c8
-rw-r--r--drivers/cpufreq/Kconfig7
-rw-r--r--drivers/cpufreq/Kconfig.arm34
-rw-r--r--drivers/cpufreq/Kconfig.x8621
-rw-r--r--drivers/cpufreq/Makefile18
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c15
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c47
-rw-r--r--drivers/cpufreq/cpufreq.c460
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.c131
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c70
-rw-r--r--drivers/cpufreq/cpufreq_stats.c60
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c (renamed from drivers/cpufreq/db8500-cpufreq.c)109
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c192
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h48
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c153
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c389
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c179
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c120
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c336
-rw-r--r--drivers/cpufreq/intel_pstate.c823
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c259
-rw-r--r--drivers/cpufreq/longhaul.c10
-rw-r--r--drivers/cpufreq/maple-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c7
-rw-r--r--drivers/cpufreq/powernow-k8.c46
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
-rw-r--r--drivers/cpuidle/Kconfig6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c106
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/cpuidle/driver.c33
-rw-r--r--drivers/cpuidle/governors/menu.c8
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/atmel-aes.c6
-rw-r--r--drivers/crypto/atmel-sha.c6
-rw-r--r--drivers/crypto/atmel-tdes.c6
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/geode-aes.c8
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/mv_cesa.c2
-rw-r--r--drivers/crypto/n2_core.c46
-rw-r--r--drivers/crypto/nx/nx-842.c20
-rw-r--r--drivers/crypto/nx/nx.c8
-rw-r--r--drivers/crypto/omap-sham.c7
-rw-r--r--drivers/crypto/picoxcell_crypto.c7
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/talitos.c3
-rw-r--r--drivers/crypto/tegra-aes.c16
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/exynos4_bus.c100
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/bestcomm/Kconfig36
-rw-r--r--drivers/dma/bestcomm/Makefile14
-rw-r--r--drivers/dma/bestcomm/ata.c157
-rw-r--r--drivers/dma/bestcomm/bcom_ata_task.c67
-rw-r--r--drivers/dma/bestcomm/bcom_fec_rx_task.c78
-rw-r--r--drivers/dma/bestcomm/bcom_fec_tx_task.c91
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_rx_task.c63
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_tx_task.c69
-rw-r--r--drivers/dma/bestcomm/bestcomm.c531
-rw-r--r--drivers/dma/bestcomm/fec.c270
-rw-r--r--drivers/dma/bestcomm/gen_bd.c354
-rw-r--r--drivers/dma/bestcomm/sram.c178
-rw-r--r--drivers/dma/coh901318.c1302
-rw-r--r--drivers/dma/coh901318.h (renamed from drivers/dma/coh901318_lli.h)35
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dmatest.c49
-rw-r--r--drivers/dma/dw_dmac.c9
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dca.c9
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h13
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v2.h8
-rw-r--r--drivers/dma/ioat/dma_v3.c10
-rw-r--r--drivers/dma/ioat/pci.c9
-rw-r--r--drivers/dma/iop-adma.c2
-rw-r--r--drivers/dma/mmp_pdma.c9
-rw-r--r--drivers/dma/mmp_tdma.c9
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c432
-rw-r--r--drivers/dma/mv_xor.h36
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/shdma.c2
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c26
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/Kconfig39
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/amd64_edac.c222
-rw-r--r--drivers/edac/amd64_edac.h12
-rw-r--r--drivers/edac/amd76x_edac.c8
-rw-r--r--drivers/edac/cell_edac.c8
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c7
-rw-r--r--drivers/edac/e7xxx_edac.c7
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_mc_sysfs.c19
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/highbank_mc_edac.c6
-rw-r--r--drivers/edac/i3000_edac.c7
-rw-r--r--drivers/edac/i3200_edac.c7
-rw-r--r--drivers/edac/i5000_edac.c7
-rw-r--r--drivers/edac/i5100_edac.c24
-rw-r--r--drivers/edac/i5400_edac.c7
-rw-r--r--drivers/edac/i7300_edac.c9
-rw-r--r--drivers/edac/i7core_edac.c7
-rw-r--r--drivers/edac/i82443bxgx_edac.c8
-rw-r--r--drivers/edac/i82860_edac.c8
-rw-r--r--drivers/edac/i82875p_edac.c8
-rw-r--r--drivers/edac/i82975x_edac.c8
-rw-r--r--drivers/edac/mce_amd.c166
-rw-r--r--drivers/edac/mce_amd.h13
-rw-r--r--drivers/edac/mpc85xx_edac.c12
-rw-r--r--drivers/edac/mv64x60_edac.c10
-rw-r--r--drivers/edac/octeon_edac-l2c.c208
-rw-r--r--drivers/edac/octeon_edac-lmc.c186
-rw-r--r--drivers/edac/octeon_edac-pc.c143
-rw-r--r--drivers/edac/octeon_edac-pci.c111
-rw-r--r--drivers/edac/pasemi_edac.c8
-rw-r--r--drivers/edac/ppc4xx_edac.c27
-rw-r--r--drivers/edac/r82600_edac.c8
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/edac/tile_edac.c8
-rw-r--r--drivers/edac/x38_edac.c7
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c811
-rw-r--r--drivers/extcon/extcon-class.c2
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max77693.c1011
-rw-r--r--drivers/extcon/extcon-max8997.c760
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/dcdbas.c6
-rw-r--r--drivers/firmware/dmi_scan.c80
-rw-r--r--drivers/firmware/efivars.c660
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/firmware/memmap.c196
-rw-r--r--drivers/gpio/Kconfig26
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-ab8500.c520
-rw-r--r--drivers/gpio/gpio-da9052.c6
-rw-r--r--drivers/gpio/gpio-da9055.c8
-rw-r--r--drivers/gpio/gpio-ich.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c41
-rw-r--r--drivers/gpio/gpio-mxs.c9
-rw-r--r--drivers/gpio/gpio-samsung.c19
-rw-r--r--drivers/gpio/gpio-spear-spics.c8
-rw-r--r--drivers/gpio/gpio-stp-xway.c9
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpio-tps6586x.c9
-rw-r--r--drivers/gpio/gpio-ts5500.c6
-rw-r--r--drivers/gpio/gpio-twl4030.c12
-rw-r--r--drivers/gpio/gpio-viperboard.c517
-rw-r--r--drivers/gpio/gpiolib-of.c37
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_mm.c86
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig32
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c178
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c179
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c141
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h69
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c116
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1953
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c242
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c508
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c457
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h80
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1836
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2050
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c837
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c86
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c457
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c428
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c73
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c98
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c139
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h483
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c358
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c109
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h315
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1989
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c978
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c235
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c90
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c697
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c342
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c99
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c250
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1150
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c394
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c132
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c112
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c335
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c769
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h145
-rw-r--r--drivers/gpu/drm/radeon/ni.c473
-rw-r--r--drivers/gpu/drm/radeon/nid.h87
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c582
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c405
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h47
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c192
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c41
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv515.c124
-rw-r--r--drivers/gpu/drm/radeon/rv770.c105
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c439
-rw-r--r--drivers/gpu/drm/radeon/sid.h137
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c830
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c114
-rw-r--r--drivers/gpu/drm/tegra/drm.h216
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1320
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c322
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/udl/Kconfig2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-a4tech.c13
-rw-r--r--drivers/hid/hid-apple.c19
-rw-r--r--drivers/hid/hid-aureal.c13
-rw-r--r--drivers/hid/hid-axff.c14
-rw-r--r--drivers/hid/hid-belkin.c13
-rw-r--r--drivers/hid/hid-cherry.c13
-rw-r--r--drivers/hid/hid-chicony.c13
-rw-r--r--drivers/hid/hid-core.c17
-rw-r--r--drivers/hid/hid-cypress.c13
-rw-r--r--drivers/hid/hid-dr.c13
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-emsff.c13
-rw-r--r--drivers/hid/hid-ezkey.c13
-rw-r--r--drivers/hid/hid-gaff.c13
-rw-r--r--drivers/hid/hid-generic.c14
-rw-r--r--drivers/hid/hid-gyration.c13
-rw-r--r--drivers/hid/hid-holtek-kbd.c13
-rw-r--r--drivers/hid/hid-holtekff.c15
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hid/hid-icade.c19
-rw-r--r--drivers/hid/hid-ids.h24
-rw-r--r--drivers/hid/hid-kensington.c13
-rw-r--r--drivers/hid/hid-keytouch.c13
-rw-r--r--drivers/hid/hid-kye.c13
-rw-r--r--drivers/hid/hid-lcpower.c13
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c14
-rw-r--r--drivers/hid/hid-lg.c212
-rw-r--r--drivers/hid/hid-lg4ff.c17
-rw-r--r--drivers/hid/hid-magicmouse.c19
-rw-r--r--drivers/hid/hid-microsoft.c13
-rw-r--r--drivers/hid/hid-monterey.c13
-rw-r--r--drivers/hid/hid-multitouch.c163
-rw-r--r--drivers/hid/hid-ntrig.c81
-rw-r--r--drivers/hid/hid-ortek.c13
-rw-r--r--drivers/hid/hid-petalynx.c13
-rw-r--r--drivers/hid/hid-picolcd_cir.c2
-rw-r--r--drivers/hid/hid-picolcd_core.c13
-rw-r--r--drivers/hid/hid-pl.c26
-rw-r--r--drivers/hid/hid-primax.c13
-rw-r--r--drivers/hid/hid-prodikeys.c19
-rw-r--r--drivers/hid/hid-ps3remote.c13
-rw-r--r--drivers/hid/hid-roccat-lua.c14
-rw-r--r--drivers/hid/hid-saitek.c13
-rw-r--r--drivers/hid/hid-samsung.c13
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sjoy.c13
-rw-r--r--drivers/hid/hid-sony.c59
-rw-r--r--drivers/hid/hid-speedlink.c13
-rw-r--r--drivers/hid/hid-steelseries.c393
-rw-r--r--drivers/hid/hid-sunplus.c13
-rw-r--r--drivers/hid/hid-thingm.c272
-rw-r--r--drivers/hid/hid-tivo.c13
-rw-r--r--drivers/hid/hid-tmff.c13
-rw-r--r--drivers/hid/hid-topseed.c13
-rw-r--r--drivers/hid/hid-twinhan.c13
-rw-r--r--drivers/hid/hid-uclogic.c13
-rw-r--r--drivers/hid/hid-wacom.c18
-rw-r--r--drivers/hid/hid-waltop.c13
-rw-r--r--drivers/hid/hid-wiimote-core.c19
-rw-r--r--drivers/hid/hid-wiimote-debug.c2
-rw-r--r--drivers/hid/hid-wiimote-ext.c8
-rw-r--r--drivers/hid/hid-zpff.c13
-rw-r--r--drivers/hid/hid-zydacron.c13
-rw-r--r--drivers/hid/hidraw.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c112
-rw-r--r--drivers/hid/uhid.c95
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hsi/clients/hsi_char.c8
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c93
-rw-r--r--drivers/hv/connection.c232
-rw-r--r--drivers/hv/hv.c72
-rw-r--r--drivers/hv/hv_balloon.c98
-rw-r--r--drivers/hv/hv_util.c46
-rw-r--r--drivers/hv/hyperv_vmbus.h65
-rw-r--r--drivers/hv/ring_buffer.c130
-rw-r--r--drivers/hv/vmbus_drv.c54
-rw-r--r--drivers/hwmon/Kconfig32
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adm1021.c4
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1031.c12
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/ads7828.c6
-rw-r--r--drivers/hwmon/adt7410.c28
-rw-r--r--drivers/hwmon/adt7462.c20
-rw-r--r--drivers/hwmon/adt7470.c20
-rw-r--r--drivers/hwmon/adt7475.c18
-rw-r--r--drivers/hwmon/amc6821.c32
-rw-r--r--drivers/hwmon/asb100.c10
-rw-r--r--drivers/hwmon/asc7621.c26
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/dme1737.c15
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc6w201.c8
-rw-r--r--drivers/hwmon/f71882fg.c25
-rw-r--r--drivers/hwmon/f75375s.c12
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gl518sm.c10
-rw-r--r--drivers/hwmon/gl520sm.c9
-rw-r--r--drivers/hwmon/gpio-fan.c4
-rw-r--r--drivers/hwmon/hwmon-vid.c10
-rw-r--r--drivers/hwmon/hwmon.c26
-rw-r--r--drivers/hwmon/ina209.c636
-rw-r--r--drivers/hwmon/it87.c970
-rw-r--r--drivers/hwmon/jc42.c10
-rw-r--r--drivers/hwmon/lm63.c8
-rw-r--r--drivers/hwmon/lm73.c152
-rw-r--r--drivers/hwmon/lm75.h2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c6
-rw-r--r--drivers/hwmon/lm80.c8
-rw-r--r--drivers/hwmon/lm85.c10
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/lm93.c28
-rw-r--r--drivers/hwmon/lm95245.c4
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max6639.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c4
-rw-r--r--drivers/hwmon/max6697.c726
-rw-r--r--drivers/hwmon/ntc_thermistor.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c75
-rw-r--r--drivers/hwmon/pmbus/pmbus.h11
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c840
-rw-r--r--drivers/hwmon/pmbus/zl6100.c176
-rw-r--r--drivers/hwmon/sht15.c157
-rw-r--r--drivers/hwmon/sis5595.c6
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/hwmon/thmc50.c6
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c14
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c2
-rw-r--r--drivers/hwmon/vexpress.c5
-rw-r--r--drivers/hwmon/via686a.c17
-rw-r--r--drivers/hwmon/vt1211.c10
-rw-r--r--drivers/hwmon/vt8231.c22
-rw-r--r--drivers/hwmon/w83627ehf.c116
-rw-r--r--drivers/hwmon/w83627hf.c104
-rw-r--r--drivers/hwmon/w83781d.c17
-rw-r--r--drivers/hwmon/w83791d.c10
-rw-r--r--drivers/hwmon/w83792d.c25
-rw-r--r--drivers/hwmon/w83793.c18
-rw-r--r--drivers/hwmon/w83795.c28
-rw-r--r--drivers/hwmon/w83l786ng.c17
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/Kconfig37
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c8
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c10
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c8
-rw-r--r--drivers/i2c/busses/i2c-amd756.c7
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c7
-rw-r--r--drivers/i2c/busses/i2c-at91.c354
-rw-r--r--drivers/i2c/busses/i2c-au1550.c6
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c300
-rw-r--r--drivers/i2c/busses/i2c-cpm.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c6
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c6
-rw-r--r--drivers/i2c/busses/i2c-elektor.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c14
-rw-r--r--drivers/i2c/busses/i2c-highlander.c6
-rw-r--r--drivers/i2c/busses/i2c-hydra.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c41
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c6
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c38
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c18
-rw-r--r--drivers/i2c/busses/i2c-mxs.c14
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c14
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c171
-rw-r--r--drivers/i2c/busses/i2c-octeon.c10
-rw-r--r--drivers/i2c/busses/i2c-omap.c246
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c6
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c6
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c8
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c6
-rw-r--r--drivers/i2c/busses/i2c-piix4.c37
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c6
-rw-r--r--drivers/i2c/busses/i2c-pnx.c6
-rw-r--r--drivers/i2c/busses/i2c-powermac.c16
-rw-r--r--drivers/i2c/busses/i2c-puv3.c6
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c18
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c214
-rw-r--r--drivers/i2c/busses/i2c-s6000.c8
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c8
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c152
-rw-r--r--drivers/i2c/busses/i2c-sirf.c19
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c4
-rw-r--r--drivers/i2c/busses/i2c-sis630.c8
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c6
-rw-r--r--drivers/i2c/busses/i2c-stu300.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c19
-rw-r--r--drivers/i2c/busses/i2c-via.c6
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c480
-rw-r--r--drivers/i2c/busses/i2c-xiic.c8
-rw-r--r--drivers/i2c/busses/i2c-xlr.c15
-rw-r--r--drivers/i2c/busses/scx200_acb.c16
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c153
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c10
-rw-r--r--drivers/ide/Kconfig13
-rw-r--r--drivers/ide/aec62xx.c8
-rw-r--r--drivers/ide/alim15x3.c10
-rw-r--r--drivers/ide/amd74xx.c4
-rw-r--r--drivers/ide/atiixp.c4
-rw-r--r--drivers/ide/cmd64x.c4
-rw-r--r--drivers/ide/cs5520.c4
-rw-r--r--drivers/ide/cs5530.c6
-rw-r--r--drivers/ide/cs5535.c5
-rw-r--r--drivers/ide/cy82c693.c11
-rw-r--r--drivers/ide/delkin_cb.c5
-rw-r--r--drivers/ide/hpt366.c42
-rw-r--r--drivers/ide/icside.c15
-rw-r--r--drivers/ide/ide-pci-generic.c4
-rw-r--r--drivers/ide/ide_platform.c14
-rw-r--r--drivers/ide/it8172.c5
-rw-r--r--drivers/ide/it8213.c4
-rw-r--r--drivers/ide/it821x.c10
-rw-r--r--drivers/ide/jmicron.c4
-rw-r--r--drivers/ide/ns87415.c8
-rw-r--r--drivers/ide/opti621.c4
-rw-r--r--drivers/ide/palm_bk3710.c7
-rw-r--r--drivers/ide/pdc202xx_new.c10
-rw-r--r--drivers/ide/pdc202xx_old.c8
-rw-r--r--drivers/ide/piix.c8
-rw-r--r--drivers/ide/pmac.c16
-rw-r--r--drivers/ide/rapide.c7
-rw-r--r--drivers/ide/rz1000.c6
-rw-r--r--drivers/ide/sc1200.c4
-rw-r--r--drivers/ide/scc_pata.c20
-rw-r--r--drivers/ide/serverworks.c4
-rw-r--r--drivers/ide/sgiioc4.c13
-rw-r--r--drivers/ide/siimage.c13
-rw-r--r--drivers/ide/sis5513.c10
-rw-r--r--drivers/ide/sl82c105.c4
-rw-r--r--drivers/ide/slc90e66.c5
-rw-r--r--drivers/ide/tc86c001.c12
-rw-r--r--drivers/ide/triflex.c5
-rw-r--r--drivers/ide/trm290.c6
-rw-r--r--drivers/ide/via82cxxx.c8
-rw-r--r--drivers/idle/Kconfig1
-rw-r--r--drivers/idle/i7300_idle.c8
-rw-r--r--drivers/idle/intel_idle.c281
-rw-r--r--drivers/iio/accel/Kconfig39
-rw-r--r--drivers/iio/accel/Makefile9
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/accel/kxsd9.c (renamed from drivers/staging/iio/accel/kxsd9.c)16
-rw-r--r--drivers/iio/accel/st_accel.h47
-rw-r--r--drivers/iio/accel/st_accel_buffer.c114
-rw-r--r--drivers/iio/accel/st_accel_core.c500
-rw-r--r--drivers/iio/accel/st_accel_i2c.c86
-rw-r--r--drivers/iio/accel/st_accel_spi.c85
-rw-r--r--drivers/iio/adc/Kconfig18
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7266.c14
-rw-r--r--drivers/iio/adc/ad7298.c6
-rw-r--r--drivers/iio/adc/ad7476.c6
-rw-r--r--drivers/iio/adc/ad7791.c10
-rw-r--r--drivers/iio/adc/ad7887.c6
-rw-r--r--drivers/iio/adc/at91_adc.c14
-rw-r--r--drivers/iio/adc/lp8788_adc.c24
-rw-r--r--drivers/iio/adc/max1363.c182
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c260
-rw-r--r--drivers/iio/adc/viperboard_adc.c181
-rw-r--r--drivers/iio/amplifiers/ad8366.c6
-rw-r--r--drivers/iio/buffer_cb.c4
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig13
-rw-r--r--drivers/iio/common/hid-sensors/Makefile3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c11
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.h57
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c5
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h2
-rw-r--r--drivers/iio/common/st_sensors/Kconfig14
-rw-r--r--drivers/iio/common/st_sensors/Makefile10
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c116
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c446
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c81
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c128
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c77
-rw-r--r--drivers/iio/dac/ad5064.c18
-rw-r--r--drivers/iio/dac/ad5360.c15
-rw-r--r--drivers/iio/dac/ad5380.c28
-rw-r--r--drivers/iio/dac/ad5421.c13
-rw-r--r--drivers/iio/dac/ad5446.c24
-rw-r--r--drivers/iio/dac/ad5449.c6
-rw-r--r--drivers/iio/dac/ad5504.c18
-rw-r--r--drivers/iio/dac/ad5624r_spi.c12
-rw-r--r--drivers/iio/dac/ad5686.c19
-rw-r--r--drivers/iio/dac/ad5755.c23
-rw-r--r--drivers/iio/dac/ad5764.c13
-rw-r--r--drivers/iio/dac/ad5791.c25
-rw-r--r--drivers/iio/dac/max517.c6
-rw-r--r--drivers/iio/dac/mcp4725.c8
-rw-r--r--drivers/iio/frequency/ad9523.c20
-rw-r--r--drivers/iio/frequency/adf4350.c8
-rw-r--r--drivers/iio/gyro/Kconfig56
-rw-r--r--drivers/iio/gyro/Makefile14
-rw-r--r--drivers/iio/gyro/adis16080.c (renamed from drivers/staging/iio/gyro/adis16080_core.c)154
-rw-r--r--drivers/iio/gyro/adxrs450.c (renamed from drivers/staging/iio/gyro/adxrs450_core.c)202
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c156
-rw-r--r--drivers/iio/gyro/itg3200_core.c401
-rw-r--r--drivers/iio/gyro/st_gyro.h45
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c114
-rw-r--r--drivers/iio/gyro/st_gyro_core.c368
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c84
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c83
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile5
-rw-r--r--drivers/iio/imu/adis16400.h (renamed from drivers/staging/iio/imu/adis16400.h)141
-rw-r--r--drivers/iio/imu/adis16400_buffer.c96
-rw-r--r--drivers/iio/imu/adis16400_core.c965
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig13
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c795
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h246
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c196
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c155
-rw-r--r--drivers/iio/industrialio-trigger.c12
-rw-r--r--drivers/iio/inkern.c53
-rw-r--r--drivers/iio/kfifo_buf.c1
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c8
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/lm3533-als.c19
-rw-r--r--drivers/iio/light/tsl2563.c (renamed from drivers/staging/iio/light/tsl2563.c)96
-rw-r--r--drivers/iio/light/vcnl4000.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig31
-rw-r--r--drivers/iio/magnetometer/Makefile7
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/magnetometer/st_magn.h45
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c98
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c400
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c80
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c79
-rw-r--r--drivers/infiniband/core/cma.c9
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c7
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h8
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_pd.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c797
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h33
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c20
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c10
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c27
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/user.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c9
-rw-r--r--drivers/infiniband/hw/nes/nes.c14
-rw-r--r--drivers/infiniband/hw/nes/nes.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c34
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c42
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c26
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c314
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c178
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/gameport/emu10k1-gp.c6
-rw-r--r--drivers/input/gameport/fm801-gp.c6
-rw-r--r--drivers/input/input-mt.c3
-rw-r--r--drivers/input/input.c193
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/as5011.c29
-rw-r--r--drivers/input/joystick/maplecontrol.c6
-rw-r--r--drivers/input/joystick/walkera0701.c87
-rw-r--r--drivers/input/joystick/xpad.c33
-rw-r--r--drivers/input/keyboard/Kconfig19
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c6
-rw-r--r--drivers/input/keyboard/adp5588-keys.c18
-rw-r--r--drivers/input/keyboard/adp5589-keys.c21
-rw-r--r--drivers/input/keyboard/atkbd.c74
-rw-r--r--drivers/input/keyboard/bf54x-keys.c6
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c4
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c6
-rw-r--r--drivers/input/keyboard/goldfish_events.c194
-rw-r--r--drivers/input/keyboard/gpio_keys.c103
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c39
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/imx_keypad.c52
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c6
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c6
-rw-r--r--drivers/input/keyboard/lm8323.c8
-rw-r--r--drivers/input/keyboard/lm8333.c6
-rw-r--r--drivers/input/keyboard/locomokbd.c8
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c129
-rw-r--r--drivers/input/keyboard/max7359_keypad.c6
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c6
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c12
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c38
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/omap4-keypad.c10
-rw-r--r--drivers/input/keyboard/opencores-kbd.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c6
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c6
-rw-r--r--drivers/input/keyboard/qt1070.c8
-rw-r--r--drivers/input/keyboard/qt2160.c166
-rw-r--r--drivers/input/keyboard/samsung-keypad.c109
-rw-r--r--drivers/input/keyboard/sh_keysc.c6
-rw-r--r--drivers/input/keyboard/spear-keyboard.c100
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c142
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c8
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c179
-rw-r--r--drivers/input/keyboard/tegra-kbc.c482
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c6
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c8
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c6
-rw-r--r--drivers/input/matrix-keymap.c23
-rw-r--r--drivers/input/misc/88pm80x_onkey.c6
-rw-r--r--drivers/input/misc/88pm860x_onkey.c6
-rw-r--r--drivers/input/misc/Kconfig29
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c6
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x.c7
-rw-r--r--drivers/input/misc/atlas_btns.c2
-rw-r--r--drivers/input/misc/bfin_rotary.c6
-rw-r--r--drivers/input/misc/bma150.c42
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c6
-rw-r--r--drivers/input/misc/cobalt_btns.c6
-rw-r--r--drivers/input/misc/da9052_onkey.c28
-rw-r--r--drivers/input/misc/da9055_onkey.c171
-rw-r--r--drivers/input/misc/dm355evm_keys.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c8
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c6
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/misc/kxtj9.c16
-rw-r--r--drivers/input/misc/m68kspkr.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c6
-rw-r--r--drivers/input/misc/mma8450.c6
-rw-r--r--drivers/input/misc/mpu3050.c8
-rw-r--r--drivers/input/misc/pcap_keys.c6
-rw-r--r--drivers/input/misc/pcf50633-input.c6
-rw-r--r--drivers/input/misc/pcf8574_keypad.c6
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c20
-rw-r--r--drivers/input/misc/rb532_button.c6
-rw-r--r--drivers/input/misc/retu-pwrbutton.c99
-rw-r--r--drivers/input/misc/rotary_encoder.c9
-rw-r--r--drivers/input/misc/sgi_btns.c6
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c3
-rw-r--r--drivers/input/misc/twl4030-vibra.c47
-rw-r--r--drivers/input/misc/twl6040-vibra.c106
-rw-r--r--drivers/input/misc/wistron_btns.c20
-rw-r--r--drivers/input/misc/wm831x-on.c15
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/Kconfig22
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/alps.c783
-rw-r--r--drivers/input/mouse/alps.h145
-rw-r--r--drivers/input/mouse/cyapa.c973
-rw-r--r--drivers/input/mouse/cypress_ps2.c725
-rw-r--r--drivers/input/mouse/cypress_ps2.h191
-rw-r--r--drivers/input/mouse/gpio_mouse.c6
-rw-r--r--drivers/input/mouse/maplemouse.c6
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c32
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/pxa930_trkball.c6
-rw-r--r--drivers/input/mouse/sentelic.c2
-rw-r--r--drivers/input/mouse/synaptics.c32
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/altera_ps2.c6
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/arc_ps2.c275
-rw-r--r--drivers/input/serio/ct82c710.c6
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c13
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h9
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/maceps2.c8
-rw-r--r--drivers/input/serio/pcips2.c6
-rw-r--r--drivers/input/serio/q40kbd.c6
-rw-r--r--drivers/input/serio/rpckbd.c6
-rw-r--r--drivers/input/serio/sa1111ps2.c12
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/serio/xilinx_ps2.c8
-rw-r--r--drivers/input/tablet/wacom_sys.c64
-rw-r--r--drivers/input/tablet/wacom_wac.c224
-rw-r--r--drivers/input/tablet/wacom_wac.h4
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c8
-rw-r--r--drivers/input/touchscreen/Kconfig20
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c6
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c6
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c8
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c125
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c19
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c7
-rw-r--r--drivers/input/touchscreen/da9034-ts.c6
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c69
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c28
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c8
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c479
-rw-r--r--drivers/input/touchscreen/htcpen.c6
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c14
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c6
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c6
-rw-r--r--drivers/input/touchscreen/max11801_ts.c8
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c6
-rw-r--r--drivers/input/touchscreen/mms114.c118
-rw-r--r--drivers/input/touchscreen/pcap_ts.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c6
-rw-r--r--drivers/input/touchscreen/st1232.c8
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c135
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c398
-rw-r--r--drivers/input/touchscreen/ti_tscadc.c486
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c6
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c4
-rw-r--r--drivers/input/touchscreen/tsc2005.c9
-rw-r--r--drivers/input/touchscreen/tsc2007.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c8
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c6
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c16
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/amd_iommu.c204
-rw-r--r--drivers/iommu/amd_iommu_init.c34
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c69
-rw-r--r--drivers/iommu/intel_irq_remapping.c48
-rw-r--r--drivers/iommu/irq_remapping.c231
-rw-r--r--drivers/iommu/irq_remapping.h1
-rw-r--r--drivers/iommu/omap-iommu.c74
-rw-r--r--drivers/iommu/omap-iommu.h3
-rw-r--r--drivers/iommu/omap-iommu2.c36
-rw-r--r--drivers/iommu/tegra-gart.c6
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/ipack/devices/Kconfig2
-rw-r--r--drivers/ipack/devices/ipoctal.c130
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile12
-rw-r--r--drivers/irqchip/exynos-combiner.c230
-rw-r--r--drivers/irqchip/irq-gic.c845
-rw-r--r--drivers/irqchip/irq-vic.c489
-rw-r--r--drivers/irqchip/irqchip.c30
-rw-r--r--drivers/irqchip/irqchip.h29
-rw-r--r--drivers/irqchip/spear-shirq.c321
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/capi/Kconfig1
-rw-r--r--drivers/isdn/divert/divert_init.c33
-rw-r--r--drivers/isdn/divert/isdn_divert.c423
-rw-r--r--drivers/isdn/divert/isdn_divert.h28
-rw-r--r--drivers/isdn/gigaset/Kconfig1
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c31
-rw-r--r--drivers/isdn/gigaset/ev-layer.c124
-rw-r--r--drivers/isdn/gigaset/gigaset.h9
-rw-r--r--drivers/isdn/gigaset/interface.c60
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c8
-rw-r--r--drivers/isdn/hardware/avm/c4.c3
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c3
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h6
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c9
-rw-r--r--drivers/isdn/hardware/eicon/pc.h4
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig1
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c16
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c10
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c14
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c6
-rw-r--r--drivers/isdn/hisax/Kconfig15
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c3
-rw-r--r--drivers/isdn/hisax/asuscom.c9
-rw-r--r--drivers/isdn/hisax/avm_a1.c3
-rw-r--r--drivers/isdn/hisax/avm_a1p.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c17
-rw-r--r--drivers/isdn/hisax/avma1_cs.c12
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c16
-rw-r--r--drivers/isdn/hisax/bkm_a8.c18
-rw-r--r--drivers/isdn/hisax/config.c26
-rw-r--r--drivers/isdn/hisax/diva.c31
-rw-r--r--drivers/isdn/hisax/elsa.c31
-rw-r--r--drivers/isdn/hisax/elsa_cs.c12
-rw-r--r--drivers/isdn/hisax/enternow_pci.c14
-rw-r--r--drivers/isdn/hisax/gazel.c11
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c16
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/hfc_sx.c9
-rw-r--r--drivers/isdn/hisax/hfcscard.c9
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c22
-rw-r--r--drivers/isdn/hisax/icc.c3
-rw-r--r--drivers/isdn/hisax/isac.c7
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/isdn/hisax/ix1_micro.c9
-rw-r--r--drivers/isdn/hisax/mic.c3
-rw-r--r--drivers/isdn/hisax/niccy.c6
-rw-r--r--drivers/isdn/hisax/nj_s.c14
-rw-r--r--drivers/isdn/hisax/nj_u.c14
-rw-r--r--drivers/isdn/hisax/s0box.c3
-rw-r--r--drivers/isdn/hisax/saphir.c3
-rw-r--r--drivers/isdn/hisax/sedlbauer.c23
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c12
-rw-r--r--drivers/isdn/hisax/sportster.c6
-rw-r--r--drivers/isdn/hisax/teleint.c3
-rw-r--r--drivers/isdn/hisax/teles0.c3
-rw-r--r--drivers/isdn/hisax/teles3.c9
-rw-r--r--drivers/isdn/hisax/teles_cs.c12
-rw-r--r--drivers/isdn/hisax/telespci.c5
-rw-r--r--drivers/isdn/hisax/w6692.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c8
-rw-r--r--drivers/isdn/i4l/isdn_common.c14
-rw-r--r--drivers/isdn/i4l/isdn_common.h2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c59
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.h1
-rw-r--r--drivers/isdn/mISDN/core.c8
-rw-r--r--drivers/isdn/mISDN/dsp_core.c3
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-triggers.c25
-rw-r--r--drivers/leds/leds-88pm860x.c9
-rw-r--r--drivers/leds/leds-adp5520.c4
-rw-r--r--drivers/leds/leds-bd2802.c10
-rw-r--r--drivers/leds/leds-clevo-mail.c11
-rw-r--r--drivers/leds/leds-cobalt-qube.c11
-rw-r--r--drivers/leds/leds-cobalt-raq.c11
-rw-r--r--drivers/leds/leds-da903x.c10
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c38
-rw-r--r--drivers/leds/leds-lm355x.c4
-rw-r--r--drivers/leds/leds-lm3642.c12
-rw-r--r--drivers/leds/leds-lp3944.c2
-rw-r--r--drivers/leds/leds-lp5521.c13
-rw-r--r--drivers/leds/leds-lp5523.c24
-rw-r--r--drivers/leds/leds-lt3593.c20
-rw-r--r--drivers/leds/leds-net48xx.c2
-rw-r--r--drivers/leds/leds-netxbig.c2
-rw-r--r--drivers/leds/leds-ns2.c36
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/leds/leds-rb532.c2
-rw-r--r--drivers/leds/leds-renesas-tpu.c25
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-wm8350.c4
-rw-r--r--drivers/leds/leds-wrap.c2
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c2
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/macio_asic.c6
-rw-r--r--drivers/macintosh/mediabay.c3
-rw-r--r--drivers/macintosh/rack-meter.c12
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c18
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c35
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c14
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c13
-rw-r--r--drivers/macintosh/windfarm_pm112.c6
-rw-r--r--drivers/macintosh/windfarm_pm121.c4
-rw-r--r--drivers/macintosh/windfarm_pm72.c4
-rw-r--r--drivers/macintosh/windfarm_pm81.c4
-rw-r--r--drivers/macintosh/windfarm_pm91.c4
-rw-r--r--drivers/macintosh/windfarm_rm31.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c13
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile1
-rw-r--r--drivers/mailbox/pl320-ipc.c199
-rw-r--r--drivers/md/dm-bio-prison.c25
-rw-r--r--drivers/md/dm-bio-prison.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-delay.c5
-rw-r--r--drivers/md/dm-flakey.c21
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c64
-rw-r--r--drivers/md/dm-kcopyd.c18
-rw-r--r--drivers/md/dm-linear.c6
-rw-r--r--drivers/md/dm-raid.c107
-rw-r--r--drivers/md/dm-raid1.c75
-rw-r--r--drivers/md/dm-snap.c90
-rw-r--r--drivers/md/dm-stripe.c20
-rw-r--r--drivers/md/dm-table.c41
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c245
-rw-r--r--drivers/md/dm-verity.c25
-rw-r--r--drivers/md/dm-zero.c5
-rw-r--r--drivers/md/dm.c88
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md.c258
-rw-r--r--drivers/md/md.h28
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c12
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h16
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c50
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c20
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c16
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c14
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c55
-rw-r--r--drivers/media/Kconfig7
-rw-r--r--drivers/media/common/Kconfig7
-rw-r--r--drivers/media/common/b2c2/Kconfig5
-rw-r--r--drivers/media/common/siano/Kconfig18
-rw-r--r--drivers/media/common/siano/Makefile6
-rw-r--r--drivers/media/common/siano/smscoreapi.c2
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/siano/smsir.h9
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.h1
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c16
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c8
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c24
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.h6
-rw-r--r--drivers/media/dvb-frontends/ds3000.c15
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/mt312.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c6
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c19
-rw-r--r--drivers/media/dvb-frontends/tda10071.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/firewire/firedtv.h1
-rw-r--r--drivers/media/i2c/adp1653.c4
-rw-r--r--drivers/media/i2c/adv7180.c8
-rw-r--r--drivers/media/i2c/adv7183.c15
-rw-r--r--drivers/media/i2c/adv7604.c16
-rw-r--r--drivers/media/i2c/as3645a.c10
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c14
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c10
-rw-r--r--drivers/media/i2c/s5k4ecgx.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c219
-rw-r--r--drivers/media/i2c/smiapp-pll.h61
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c74
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg-defs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h2
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c88
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c55
-rw-r--r--drivers/media/i2c/vs6624.c19
-rw-r--r--drivers/media/mmc/siano/Kconfig3
-rw-r--r--drivers/media/mmc/siano/smssdio.c4
-rw-r--r--drivers/media/pci/bt8xx/bt878.c11
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c34
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c16
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c7
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c14
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c45
-rw-r--r--drivers/media/pci/cx23885/cimax2.c17
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c16
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c1
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c1
-rw-r--r--drivers/media/pci/cx23885/netup-init.c1
-rw-r--r--drivers/media/pci/cx25821/Kconfig2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c44
-rw-r--r--drivers/media/pci/cx25821/cx25821-biffuncs.h6
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c8
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c54
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c47
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c31
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c7
-rw-r--r--drivers/media/pci/cx88/cx88-core.c12
-rw-r--r--drivers/media/pci/cx88/cx88-input.c8
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c26
-rw-r--r--drivers/media/pci/cx88/cx88-video.c8
-rw-r--r--drivers/media/pci/cx88/cx88.h4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c9
-rw-r--r--drivers/media/pci/dm1105/dm1105.c24
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c9
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c4
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/mantis/mantis_input.c5
-rw-r--r--drivers/media/pci/mantis/mantis_pci.c2
-rw-r--r--drivers/media/pci/mantis/mantis_uart.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1033.c6
-rw-r--r--drivers/media/pci/meye/meye.c9
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c8
-rw-r--r--drivers/media/pci/ngene/ngene-core.c12
-rw-r--r--drivers/media/pci/ngene/ngene.h5
-rw-r--r--drivers/media/pci/pluto2/pluto2.c25
-rw-r--r--drivers/media/pci/pt1/pt1.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c13
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c26
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c16
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c12
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c15
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c4
-rw-r--r--drivers/media/pci/ttpci/budget-av.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.c20
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c20
-rw-r--r--drivers/media/platform/coda.c10
-rw-r--r--drivers/media/platform/davinci/Kconfig2
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c12
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c20
-rw-r--r--drivers/media/platform/davinci/isif.c9
-rw-r--r--drivers/media/platform/davinci/vpbe.c12
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c318
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c9
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif.c14
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c28
-rw-r--r--drivers/media/platform/davinci/vpss.c7
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c14
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/fsl-viu.c12
-rw-r--r--drivers/media/platform/m2m-deinterlace.c24
-rw-r--r--drivers/media/platform/mem2mem_testdev.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c20
-rw-r--r--drivers/media/platform/omap/omap_vout.c57
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c38
-rw-r--r--drivers/media/platform/omap/omap_voutlib.h3
-rw-r--r--drivers/media/platform/omap24xxcam.c2
-rw-r--r--drivers/media/platform/omap3isp/isp.c91
-rw-r--r--drivers/media/platform/omap3isp/isp.h5
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c227
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h10
-rw-r--r--drivers/media/platform/omap3isp/isphist.c8
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c41
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h99
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c5
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c6
-rw-r--r--drivers/media/platform/s3c-camif/Makefile5
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c1672
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c660
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h393
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c606
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h269
-rw-r--r--drivers/media/platform/s5p-fimc/Kconfig1
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c11
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c12
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c14
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c16
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c10
-rw-r--r--drivers/media/platform/s5p-fimc/mipi-csis.c14
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c8
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c100
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c16
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig3
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c6
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c8
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c18
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c17
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c6
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c8
-rw-r--r--drivers/media/platform/sh_vou.c6
-rw-r--r--drivers/media/platform/soc_camera/Kconfig1
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c6
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c22
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c20
-rw-r--r--drivers/media/platform/timblogiw.c12
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/vivi.c8
-rw-r--r--drivers/media/radio/Kconfig2
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-cadet.c3
-rw-r--r--drivers/media/radio/radio-isa.c10
-rw-r--r--drivers/media/radio/radio-keene.c1
-rw-r--r--drivers/media/radio/radio-maxiradio.c7
-rw-r--r--drivers/media/radio/radio-sf16fmi.c4
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c19
-rw-r--r--drivers/media/radio/radio-si4713.c1
-rw-r--r--drivers/media/radio/radio-tea5764.c12
-rw-r--r--drivers/media/radio/radio-timb.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c5
-rw-r--r--drivers/media/radio/saa7706h.c8
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c8
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c8
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c10
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c35
-rw-r--r--drivers/media/rc/fintek-cir.c10
-rw-r--r--drivers/media/rc/gpio-ir-recv.c8
-rw-r--r--drivers/media/rc/iguanair.c10
-rw-r--r--drivers/media/rc/imon.c48
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c4
-rw-r--r--drivers/media/rc/ir-lirc-codec.c4
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c4
-rw-r--r--drivers/media/rc/ir-nec-decoder.c4
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c14
-rw-r--r--drivers/media/rc/ir-rc5-sz-decoder.c6
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c8
-rw-r--r--drivers/media/rc/ir-rx51.c15
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ir-sony-decoder.c17
-rw-r--r--drivers/media/rc/ite-cir.c10
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c2
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c2
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c17
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-loopback.c2
-rw-r--r--drivers/media/rc/rc-main.c73
-rw-r--r--drivers/media/rc/redrat3.c10
-rw-r--r--drivers/media/rc/streamzap.c6
-rw-r--r--drivers/media/rc/ttusbir.c10
-rw-r--r--drivers/media/rc/winbond-cir.c119
-rw-r--r--drivers/media/tuners/fc2580.c61
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c2
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c5
-rw-r--r--drivers/media/usb/au0828/au0828-video.c16
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c8
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c8
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c11
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c16
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c146
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c15
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c84
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c16
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/gspca.h2
-rw-r--r--drivers/media/usb/gspca/jeilinj.c6
-rw-r--r--drivers/media/usb/gspca/kinect.c1
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/usb/gspca/pac7302.c62
-rw-r--r--drivers/media/usb/gspca/sonixb.c14
-rw-r--r--drivers/media/usb/gspca/sonixj.c1
-rw-r--r--drivers/media/usb/gspca/spca506.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c8
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/siano/Kconfig3
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c23
-rw-r--r--drivers/media/usb/stk1160/stk1160.h5
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c5
-rw-r--r--drivers/media/usb/tlg2300/pd-dvb.c1
-rw-r--r--drivers/media/usb/tlg2300/pd-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c20
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c1
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c10
-rw-r--r--drivers/media/usb/usbvision/usbvision.h2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c14
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c10
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c73
-rw-r--r--drivers/media/usb/uvc/uvc_video.c1
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h8
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c3
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c304
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c700
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c56
-rw-r--r--drivers/memory/emif.c8
-rw-r--r--drivers/memory/tegra20-mc.c11
-rw-r--r--drivers/memory/tegra30-mc.c11
-rw-r--r--drivers/memstick/Kconfig2
-rw-r--r--drivers/memstick/host/Kconfig12
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptscsih.c1
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/pci.c11
-rw-r--r--drivers/mfd/Kconfig65
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab8500-core.c126
-rw-r--r--drivers/mfd/arizona-core.c24
-rw-r--r--drivers/mfd/arizona-irq.c19
-rw-r--r--drivers/mfd/as3711.c217
-rw-r--r--drivers/mfd/da9052-core.c273
-rw-r--r--drivers/mfd/da9052-i2c.c61
-rw-r--r--drivers/mfd/da9052-irq.c288
-rw-r--r--drivers/mfd/db8500-prcmu.c152
-rw-r--r--drivers/mfd/intel_msic.c9
-rw-r--r--drivers/mfd/jz4740-adc.c20
-rw-r--r--drivers/mfd/lpc_ich.c16
-rw-r--r--drivers/mfd/max77686.c18
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/max8997.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c94
-rw-r--r--drivers/mfd/mc13xxx-i2c.c22
-rw-r--r--drivers/mfd/mc13xxx-spi.c29
-rw-r--r--drivers/mfd/mc13xxx.h18
-rw-r--r--drivers/mfd/mfd-core.c15
-rw-r--r--drivers/mfd/omap-usb-host.c3
-rw-r--r--drivers/mfd/pcf50633-core.c5
-rw-r--r--drivers/mfd/rc5t583-irq.c2
-rw-r--r--drivers/mfd/retu-mfd.c263
-rw-r--r--drivers/mfd/rtl8411.c29
-rw-r--r--drivers/mfd/rts5209.c21
-rw-r--r--drivers/mfd/rts5229.c21
-rw-r--r--drivers/mfd/rtsx_pcr.c36
-rw-r--r--drivers/mfd/sec-core.c75
-rw-r--r--drivers/mfd/sec-irq.c102
-rw-r--r--drivers/mfd/sta2x11-mfd.c536
-rw-r--r--drivers/mfd/stmpe-i2c.c8
-rw-r--r--drivers/mfd/stmpe.c213
-rw-r--r--drivers/mfd/tc3589x.c17
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c274
-rw-r--r--drivers/mfd/tps6507x.c21
-rw-r--r--drivers/mfd/tps65090.c312
-rw-r--r--drivers/mfd/tps65217.c12
-rw-r--r--drivers/mfd/tps6586x.c103
-rw-r--r--drivers/mfd/tps65910-irq.c260
-rw-r--r--drivers/mfd/tps65910.c234
-rw-r--r--drivers/mfd/tps80031.c573
-rw-r--r--drivers/mfd/twl-core.c227
-rw-r--r--drivers/mfd/twl4030-irq.c10
-rw-r--r--drivers/mfd/twl4030-madc.c14
-rw-r--r--drivers/mfd/twl4030-power.c126
-rw-r--r--drivers/mfd/twl6030-irq.c4
-rw-r--r--drivers/mfd/twl6040-irq.c205
-rw-r--r--drivers/mfd/twl6040.c (renamed from drivers/mfd/twl6040-core.c)146
-rw-r--r--drivers/mfd/vexpress-config.c8
-rw-r--r--drivers/mfd/vexpress-sysreg.c34
-rw-r--r--drivers/mfd/viperboard.c137
-rw-r--r--drivers/mfd/wm5102-tables.c46
-rw-r--r--drivers/mfd/wm5110-tables.c1
-rw-r--r--drivers/mfd/wm8994-core.c17
-rw-r--r--drivers/misc/Kconfig16
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel-ssc.c18
-rw-r--r--drivers/misc/cb710/Kconfig2
-rw-r--r--drivers/misc/lattice-ecp3-config.c243
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c170
-rw-r--r--drivers/misc/mei/client.c729
-rw-r--r--drivers/misc/mei/client.h102
-rw-r--r--drivers/misc/mei/hbm.c669
-rw-r--r--drivers/misc/mei/hbm.h39
-rw-r--r--drivers/misc/mei/hw-me-regs.h167
-rw-r--r--drivers/misc/mei/hw-me.c576
-rw-r--r--drivers/misc/mei/hw-me.h48
-rw-r--r--drivers/misc/mei/hw.h125
-rw-r--r--drivers/misc/mei/init.c572
-rw-r--r--drivers/misc/mei/interface.c388
-rw-r--r--drivers/misc/mei/interface.h81
-rw-r--r--drivers/misc/mei/interrupt.c656
-rw-r--r--drivers/misc/mei/iorw.c366
-rw-r--r--drivers/misc/mei/main.c536
-rw-r--r--drivers/misc/mei/mei_dev.h350
-rw-r--r--drivers/misc/mei/pci-me.c396
-rw-r--r--drivers/misc/mei/wd.c79
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c34
-rw-r--r--drivers/misc/ti-st/Kconfig2
-rw-r--r--drivers/misc/ti-st/st_core.c3
-rw-r--r--drivers/misc/ti-st/st_kim.c37
-rw-r--r--drivers/misc/vmw_vmci/Kconfig16
-rw-r--r--drivers/misc/vmw_vmci/Makefile4
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c1214
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h182
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c500
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c604
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h51
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c117
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h50
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c224
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.h25
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c759
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c142
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c1043
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c3425
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h191
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c229
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.h59
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.c226
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.h30
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/card/sdio_uart.c13
-rw-r--r--drivers/mmc/core/Kconfig3
-rw-r--r--drivers/mmc/host/Kconfig22
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c2
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c9
-rw-r--r--drivers/mmc/host/mmci.c306
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/mvsdio.c92
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c34
-rw-r--r--drivers/mmc/host/sdhci-acpi.c6
-rw-r--r--drivers/mmc/host/sdhci-s3c.c7
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/ar7part.c7
-rw-r--r--drivers/mtd/bcm63xxpart.c32
-rw-r--r--drivers/mtd/chips/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c16
-rw-r--r--drivers/mtd/cmdlinepart.c91
-rw-r--r--drivers/mtd/devices/Kconfig5
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/docprobe.c2
-rw-r--r--drivers/mtd/devices/m25p80.c48
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c20
-rw-r--r--drivers/mtd/devices/spear_smi.c34
-rw-r--r--drivers/mtd/devices/sst25l.c10
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c15
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c9
-rw-r--r--drivers/mtd/maps/ck804xrom.c6
-rw-r--r--drivers/mtd/maps/esb2rom.c8
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/lantiq-flash.c16
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c4
-rw-r--r--drivers/mtd/maps/pci.c8
-rw-r--r--drivers/mtd/maps/physmap_of.c21
-rw-r--r--drivers/mtd/maps/pismo.c31
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/maps/scb2_flash.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c6
-rw-r--r--drivers/mtd/maps/vmu-flash.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c51
-rw-r--r--drivers/mtd/mtdoops.c15
-rw-r--r--drivers/mtd/nand/Kconfig41
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c34
-rw-r--r--drivers/mtd/nand/au1550nd.c8
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h22
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c108
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c8
-rw-r--r--drivers/mtd/nand/cafe_nand.c12
-rw-r--r--drivers/mtd/nand/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/davinci_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c162
-rw-r--r--drivers/mtd/nand/denali.h5
-rw-r--r--drivers/mtd/nand/denali_dt.c167
-rw-r--r--drivers/mtd/nand/denali_pci.c144
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c73
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c17
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c134
-rw-r--r--drivers/mtd/nand/gpio.c34
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c10
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c44
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c15
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c15
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c14
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/mxc_nand.c24
-rw-r--r--drivers/mtd/nand/nand_base.c117
-rw-r--r--drivers/mtd/nand/nandsim.c191
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/nomadik_nand.c235
-rw-r--r--drivers/mtd/nand/nuc900_nand.c6
-rw-r--r--drivers/mtd/nand/omap2.c6
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c14
-rw-r--r--drivers/mtd/nand/sh_flctl.c306
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/socrates_nand.c6
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c7
-rw-r--r--drivers/mtd/ofpart.c5
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c73
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c171
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c152
-rw-r--r--drivers/mtd/tests/mtd_readtest.c44
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c88
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c44
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c124
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c73
-rw-r--r--drivers/mtd/ubi/attach.c23
-rw-r--r--drivers/mtd/ubi/build.c12
-rw-r--r--drivers/mtd/ubi/debug.c34
-rw-r--r--drivers/mtd/ubi/debug.h57
-rw-r--r--drivers/mtd/ubi/fastmap.c6
-rw-r--r--drivers/mtd/ubi/gluebi.c28
-rw-r--r--drivers/mtd/ubi/io.c14
-rw-r--r--drivers/mtd/ubi/ubi.h40
-rw-r--r--drivers/mtd/ubi/upd.c6
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/mtd/ubi/wl.c7
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c106
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_main.c287
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/bonding/bonding.h15
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c6
-rw-r--r--drivers/net/can/Kconfig38
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c10
-rw-r--r--drivers/net/can/c_can/Kconfig2
-rw-r--r--drivers/net/can/c_can/c_can.c20
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/dev.c26
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/led.c124
-rw-r--r--drivers/net/can/mcp251x.c23
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig14
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c8
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c2
-rw-r--r--drivers/net/can/slcan.c8
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/ti_hecc.c14
-rw-r--r--drivers/net/can/usb/Kconfig8
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c1031
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/dsa/mv88e6060.c54
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c125
-rw-r--r--drivers/net/dsa/mv88e6131.c114
-rw-r--r--drivers/net/dsa/mv88e6xxx.c141
-rw-r--r--drivers/net/dsa/mv88e6xxx.h11
-rw-r--r--drivers/net/dummy.c10
-rw-r--r--drivers/net/ethernet/3com/3c501.c896
-rw-r--r--drivers/net/ethernet/3com/3c501.h91
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c7
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c1
-rw-r--r--drivers/net/ethernet/3com/Kconfig20
-rw-r--r--drivers/net/ethernet/3com/Makefile1
-rw-r--r--drivers/net/ethernet/8390/3c503.c777
-rw-r--r--drivers/net/ethernet/8390/3c503.h91
-rw-r--r--drivers/net/ethernet/8390/Kconfig120
-rw-r--r--drivers/net/ethernet/8390/Makefile10
-rw-r--r--drivers/net/ethernet/8390/ac3200.c431
-rw-r--r--drivers/net/ethernet/8390/ax88796.c8
-rw-r--r--drivers/net/ethernet/8390/e2100.c489
-rw-r--r--drivers/net/ethernet/8390/es3210.c445
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c505
-rw-r--r--drivers/net/ethernet/8390/hp.c438
-rw-r--r--drivers/net/ethernet/8390/lne390.c433
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c1
-rw-r--r--drivers/net/ethernet/8390/ne3210.c346
-rw-r--r--drivers/net/ethernet/8390/smc-ultra32.c463
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c13
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/Kconfig15
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/depca.c1910
-rw-r--r--drivers/net/ethernet/amd/depca.h183
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c50
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c79
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c7
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig18
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1461
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h453
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h174
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1030
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h3274
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c458
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1727
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c203
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3198
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h809
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c1651
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h360
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1161
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h65
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/macb.c7
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c92
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c491
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h459
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c14
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/Kconfig16
-rw-r--r--drivers/net/ethernet/dec/Makefile1
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c1961
-rw-r--r--drivers/net/ethernet/dec/ewrk3.h322
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dlink/Kconfig32
-rw-r--r--drivers/net/ethernet/dlink/Makefile2
-rw-r--r--drivers/net/ethernet/dlink/de600.c529
-rw-r--r--drivers/net/ethernet/dlink/de600.h168
-rw-r--r--drivers/net/ethernet/dlink/de620.c987
-rw-r--r--drivers/net/ethernet/dlink/de620.h117
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1
-rw-r--r--drivers/net/ethernet/dnet.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c51
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c254
-rw-r--r--drivers/net/ethernet/ethoc.c63
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.c356
-rw-r--r--drivers/net/ethernet/freescale/fec.h23
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c66
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c252
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h210
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig25
-rw-r--r--drivers/net/ethernet/fujitsu/Makefile2
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c791
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c1483
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c1671
-rw-r--r--drivers/net/ethernet/i825xx/3c505.h292
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c938
-rw-r--r--drivers/net/ethernet/i825xx/82596.c94
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig94
-rw-r--r--drivers/net/ethernet/i825xx/Makefile8
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c1822
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c1661
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.h179
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c1337
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c1346
-rw-r--r--drivers/net/ethernet/i825xx/ni52.h310
-rw-r--r--drivers/net/ethernet/i825xx/znet.c928
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h20
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c19
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/icplus/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/Kconfig19
-rw-r--r--drivers/net/ethernet/intel/e100.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c339
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h95
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c57
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h58
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h195
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h282
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c254
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h366
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c426
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h268
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c164
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h74
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c745
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c354
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h242
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c277
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h252
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c630
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h19
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h69
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c242
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c865
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c87
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c320
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c411
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c203
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c56
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c12
-rw-r--r--drivers/net/ethernet/marvell/Kconfig24
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c227
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2846
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c870
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c30
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c44
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/microchip/Kconfig4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig3
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c1075
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.h278
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c39
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c9
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c7
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c30
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h544
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3011
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h438
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2054
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c726
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c550
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h108
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c250
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h194
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c97
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c823
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1222
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c653
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c271
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/racal/Kconfig33
-rw-r--r--drivers/net/ethernet/racal/Makefile5
-rw-r--r--drivers/net/ethernet/racal/ni5010.c771
-rw-r--r--drivers/net/ethernet/racal/ni5010.h144
-rw-r--r--drivers/net/ethernet/rdc/r6040.c14
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c19
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/Kconfig4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c121
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/seeq/Kconfig12
-rw-r--r--drivers/net/ethernet/seeq/Makefile1
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c749
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.h156
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/silan/Kconfig6
-rw-r--r--drivers/net/ethernet/silan/sc92031.c12
-rw-r--r--drivers/net/ethernet/sis/sis900.c22
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c11
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/Kconfig8
-rw-r--r--drivers/net/ethernet/sun/niu.c48
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunqe.c7
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c6
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpmac.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c530
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c107
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h24
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c77
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h12
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c13
-rw-r--r--drivers/net/ethernet/via/via-rhine.c56
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c37
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c7
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/hamradio/Kconfig4
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/hamradio/dmascc.c7
-rw-r--r--drivers/net/hamradio/scc.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hippi/Kconfig8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/ieee802154/at86rf230.c12
-rw-r--r--drivers/net/ieee802154/fakehard.c1
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/Kconfig38
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c56
-rw-r--r--drivers/net/macvtap.c1
-rw-r--r--drivers/net/netconsole.c44
-rw-r--r--drivers/net/ntb_netdev.c408
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/icplus.c29
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c4
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/micrel.c64
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/phy/realtek.c50
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/ppp/Kconfig23
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/rionet.c8
-rw-r--r--drivers/net/slip/Kconfig1
-rw-r--r--drivers/net/team/Kconfig3
-rw-r--r--drivers/net/team/team.c261
-rw-r--r--drivers/net/team/team_mode_activebackup.c13
-rw-r--r--drivers/net/tun.c205
-rw-r--r--drivers/net/usb/Kconfig20
-rw-r--r--drivers/net/usb/asix.h18
-rw-r--r--drivers/net/usb/asix_common.c94
-rw-r--r--drivers/net/usb/asix_devices.c51
-rw-r--r--drivers/net/usb/ax88172a.c19
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c52
-rw-r--r--drivers/net/usb/cdc_mbim.c19
-rw-r--r--drivers/net/usb/cdc_ncm.c61
-rw-r--r--drivers/net/usb/dm9601.c56
-rw-r--r--drivers/net/usb/hso.c57
-rw-r--r--drivers/net/usb/kalmia.c1
-rw-r--r--drivers/net/usb/pegasus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c39
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c441
-rw-r--r--drivers/net/usb/usbnet.c69
-rw-r--r--drivers/net/veth.c177
-rw-r--r--drivers/net/virtio_net.c281
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c234
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h3
-rw-r--r--drivers/net/vxlan.c25
-rw-r--r--drivers/net/wan/Kconfig60
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c9
-rw-r--r--drivers/net/wan/cycx_drv.c569
-rw-r--r--drivers/net/wan/cycx_main.c346
-rw-r--r--drivers/net/wan/cycx_x25.c1602
-rw-r--r--drivers/net/wan/farsync.c6
-rw-r--r--drivers/net/wan/hdlc.c9
-rw-r--r--drivers/net/wan/x25_asy.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c1
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h3
-rw-r--r--drivers/net/wimax/i2400m/netdev.c41
-rw-r--r--drivers/net/wimax/i2400m/rx.c17
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/Kconfig16
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo_cs.c5
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c133
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c36
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig6
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h180
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c145
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h100
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h146
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h132
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h76
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h169
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c117
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c305
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h59
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c150
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig2
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h19
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c43
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c115
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c133
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/regd.c37
-rw-r--r--drivers/net/wireless/ath/regd.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig29
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c572
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c603
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c490
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c410
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c132
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c223
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c824
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h362
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h363
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c1020
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1116
-rw-r--r--drivers/net/wireless/atmel_cs.c5
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/b43.h5
-rw-r--r--drivers/net/wireless/b43/dma.h2
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/main.h5
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c56
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h35
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c395
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h66
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c2277
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.h183
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c43
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c1468
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h113
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c114
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/scb.h1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c40
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c14
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c98
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c105
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c103
-rw-r--r--drivers/net/wireless/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c183
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c55
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c109
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c98
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c229
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c346
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c514
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h164
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c955
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c378
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h282
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h369
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h140
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h312
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h561
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h380
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h580
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h952
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c640
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c992
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1314
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h500
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c311
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c682
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c292
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c3080
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h393
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c356
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c442
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c1241
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h374
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c519
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h214
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c916
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c472
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/7000.c111
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h17
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c58
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c361
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c89
-rw-r--r--drivers/net/wireless/libertas/cfg.c45
-rw-r--r--drivers/net/wireless/libertas/cfg.h3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c178
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c261
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h26
-rw-r--r--drivers/net/wireless/mwifiex/11n.c55
-rw-r--r--drivers/net/wireless/mwifiex/11n.h6
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c6
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/README1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c211
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c159
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c30
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/mwifiex/decl.h18
-rw-r--r--drivers/net/wireless/mwifiex/fw.h141
-rw-r--r--drivers/net/wireless/mwifiex/init.c12
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h8
-rw-r--r--drivers/net/wireless/mwifiex/join.c44
-rw-r--r--drivers/net/wireless/mwifiex/main.h30
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c1264
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h228
-rw-r--r--drivers/net/wireless/mwifiex/scan.c101
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c30
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c6
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c44
-rw-r--r--drivers/net/wireless/mwifiex/usb.c34
-rw-r--r--drivers/net/wireless/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mwifiex/util.h8
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c15
-rw-r--r--drivers/net/wireless/mwl8k.c362
-rw-r--r--drivers/net/wireless/orinoco/main.c17
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c11
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/p54pci.c7
-rw-r--r--drivers/net/wireless/p54/p54usb.c14
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c14
-rw-r--r--drivers/net/wireless/ray_cs.c19
-rw-r--r--drivers/net/wireless/rndis_wlan.c9
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c816
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c36
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c53
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h30
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c116
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c78
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig2
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig50
-rw-r--r--drivers/net/wireless/rtlwifi/base.c14
-rw-r--r--drivers/net/wireless/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c13
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c37
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c17
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c65
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/Kconfig9
-rw-r--r--drivers/net/wireless/ti/Makefile4
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c (renamed from drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c3
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c37
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h20
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.c116
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.h111
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c195
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c501
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.h140
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h40
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c87
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h55
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c80
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h52
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h77
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c272
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c326
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.h127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c54
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h50
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig5
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c77
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c423
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h81
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h110
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c326
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1596
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c33
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c696
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h144
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c34
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c31
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c298
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h35
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h58
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c123
-rw-r--r--drivers/net/xen-netfront.c27
-rw-r--r--drivers/nfc/Kconfig16
-rw-r--r--drivers/nfc/Makefile3
-rw-r--r--drivers/nfc/microread/Kconfig35
-rw-r--r--drivers/nfc/microread/Makefile10
-rw-r--r--drivers/nfc/microread/i2c.c340
-rw-r--r--drivers/nfc/microread/mei.c246
-rw-r--r--drivers/nfc/microread/microread.c728
-rw-r--r--drivers/nfc/microread/microread.h33
-rw-r--r--drivers/nfc/nfcwilink.c10
-rw-r--r--drivers/nfc/pn533.c1593
-rw-r--r--drivers/nfc/pn544/Kconfig23
-rw-r--r--drivers/nfc/pn544/Makefile5
-rw-r--r--drivers/nfc/pn544/i2c.c52
-rw-r--r--drivers/nfc/pn544/pn544.c65
-rw-r--r--drivers/ntb/Kconfig13
-rw-r--r--drivers/ntb/Makefile3
-rw-r--r--drivers/ntb/ntb_hw.c1141
-rw-r--r--drivers/ntb/ntb_hw.h181
-rw-r--r--drivers/ntb/ntb_regs.h139
-rw-r--r--drivers/ntb/ntb_transport.c1441
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c441
-rw-r--r--drivers/of/device.c13
-rw-r--r--drivers/of/fdt.c10
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_private.h36
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/of/selftest.c54
-rw-r--r--drivers/parisc/Kconfig1
-rw-r--r--drivers/parisc/dino.c15
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/parisc/pdc_stable.c6
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_gsc.c23
-rw-r--r--drivers/parport/parport_pc.c55
-rw-r--r--drivers/parport/parport_serial.c42
-rw-r--r--drivers/parport/parport_sunbpp.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c56
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c11
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c60
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c15
-rw-r--r--drivers/pci/hotplug/shpchp.h3
-rw-r--r--drivers/pci/hotplug/shpchp_core.c36
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c6
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c26
-rw-r--r--drivers/pci/pci-acpi.c56
-rw-r--r--drivers/pci/pci-sysfs.c85
-rw-r--r--drivers/pci/pci.c26
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c63
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c20
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/cs.c37
-rw-r--r--drivers/pcmcia/i82092.c8
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/vrc4171_card.c1
-rw-r--r--drivers/pinctrl/Kconfig39
-rw-r--r--drivers/pinctrl/Makefile11
-rw-r--r--drivers/pinctrl/core.c120
-rw-r--r--drivers/pinctrl/core.h29
-rw-r--r--drivers/pinctrl/devicetree.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c17
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c16
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c11
-rw-r--r--drivers/pinctrl/pinconf-generic.c4
-rw-r--r--drivers/pinctrl/pinconf.c207
-rw-r--r--drivers/pinctrl/pinctrl-ab8500.c484
-rw-r--r--drivers/pinctrl/pinctrl-ab8505.c380
-rw-r--r--drivers/pinctrl/pinctrl-ab8540.c407
-rw-r--r--drivers/pinctrl/pinctrl-ab9540.c485
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c1012
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h234
-rw-r--r--drivers/pinctrl/pinctrl-at91.c46
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c8
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c8
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c22
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c51
-rw-r--r--drivers/pinctrl/pinctrl-imx.c25
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx35.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx53.c4
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c56
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h1
-rw-r--r--drivers/pinctrl/pinctrl-mmp2.c2
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c23
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8540.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-stn8815.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c405
-rw-r--r--drivers/pinctrl/pinctrl-pxa168.c2
-rw-r--r--drivers/pinctrl/pinctrl-pxa3xx.c7
-rw-r--r--drivers/pinctrl/pinctrl-pxa910.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c41
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h2
-rw-r--r--drivers/pinctrl/pinctrl-single.c86
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c70
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c1505
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h478
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c16
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h16
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c2769
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c8
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c6
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/pinctrl/pinctrl-xway.c71
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig116
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile21
-rw-r--r--drivers/pinctrl/sh-pfc/core.c (renamed from drivers/sh/pfc/core.c)355
-rw-r--r--drivers/pinctrl/sh-pfc/core.h72
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c (renamed from drivers/sh/pfc/gpio.c)114
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2612
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c2624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c1592
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c2131
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c2834
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c1658
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c2798
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c1236
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c1779
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c1903
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c2225
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c2475
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c2282
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c1304
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c837
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c582
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c (renamed from drivers/sh/pfc/pinctrl.c)170
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h195
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c13
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c11
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h11
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c2
-rw-r--r--drivers/platform/Kconfig4
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/goldfish/Kconfig5
-rw-r--r--drivers/platform/goldfish/Makefile5
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c612
-rw-r--r--drivers/platform/goldfish/pdev_bus.c240
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/platform/x86/acer-wmi.c68
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/amilo-rfkill.c4
-rw-r--r--drivers/platform/x86/apple-gmux.c7
-rw-r--r--drivers/platform/x86/asus-laptop.c27
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/classmate-laptop.c10
-rw-r--r--drivers/platform/x86/compal-laptop.c10
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c10
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c17
-rw-r--r--drivers/platform/x86/hp-wmi.c8
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c13
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c6
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c6
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c14
-rw-r--r--drivers/platform/x86/samsung-q10.c6
-rw-r--r--drivers/platform/x86/sony-laptop.c19
-rw-r--r--drivers/platform/x86/tc1100-wmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c17
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/platform/x86/xo1-rfkill.c6
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/pnp/interface.c105
-rw-r--r--drivers/pnp/manager.c25
-rw-r--r--drivers/pnp/pnpacpi/core.c10
-rw-r--r--drivers/pnp/pnpbios/Kconfig4
-rw-r--r--drivers/power/88pm860x_battery.c13
-rw-r--r--drivers/power/Kconfig32
-rw-r--r--drivers/power/Makefile6
-rw-r--r--drivers/power/ab8500_bmdata.c507
-rw-r--r--drivers/power/ab8500_btemp.c235
-rw-r--r--drivers/power/ab8500_charger.c1113
-rw-r--r--drivers/power/ab8500_fg.c497
-rw-r--r--drivers/power/abx500_chargalg.c232
-rw-r--r--drivers/power/avs/smartreflex.c2
-rw-r--r--drivers/power/bq2415x_charger.c1666
-rw-r--r--drivers/power/bq27x00_battery.c20
-rw-r--r--drivers/power/charger-manager.c348
-rw-r--r--drivers/power/da9030_battery.c1
-rw-r--r--drivers/power/da9052-battery.c46
-rw-r--r--drivers/power/ds2782_battery.c73
-rw-r--r--drivers/power/generic-adc-battery.c25
-rw-r--r--drivers/power/goldfish_battery.c236
-rw-r--r--drivers/power/jz4740-battery.c46
-rw-r--r--drivers/power/lp8727_charger.c8
-rw-r--r--drivers/power/lp8788-charger.c92
-rw-r--r--drivers/power/max17040_battery.c4
-rw-r--r--drivers/power/max17042_battery.c3
-rw-r--r--drivers/power/max8925_power.c51
-rw-r--r--drivers/power/olpc_battery.c2
-rw-r--r--drivers/power/pm2301_charger.c1088
-rw-r--r--drivers/power/pm2301_charger.h513
-rw-r--r--drivers/power/power_supply_core.c100
-rw-r--r--drivers/power/power_supply_sysfs.c5
-rw-r--r--drivers/power/reset/Kconfig17
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/gpio-poweroff.c39
-rw-r--r--drivers/power/reset/qnap-poweroff.c116
-rw-r--r--drivers/power/reset/restart-poweroff.c65
-rw-r--r--drivers/power/rx51_battery.c251
-rw-r--r--drivers/power/twl4030_charger.c12
-rw-r--r--drivers/pps/clients/Kconfig2
-rw-r--r--drivers/pps/clients/pps-gpio.c2
-rw-r--r--drivers/pps/clients/pps-ldisc.c30
-rw-r--r--drivers/pps/pps.c47
-rw-r--r--drivers/ps3/ps3-lpm.c2
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3av.c2
-rw-r--r--drivers/pwm/Kconfig39
-rw-r--r--drivers/pwm/Makefile5
-rw-r--r--drivers/pwm/core.c29
-rw-r--r--drivers/pwm/pwm-imx.c8
-rw-r--r--drivers/pwm/pwm-lpc32xx.c29
-rw-r--r--drivers/pwm/pwm-mxs.c6
-rw-r--r--drivers/pwm/pwm-puv3.c6
-rw-r--r--drivers/pwm/pwm-pxa.c6
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-spear.c276
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/pwm/pwm-tiecap.c54
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c68
-rw-r--r--drivers/pwm/pwm-tipwmss.c139
-rw-r--r--drivers/pwm/pwm-tipwmss.h39
-rw-r--r--drivers/pwm/pwm-twl-led.c344
-rw-r--r--drivers/pwm/pwm-twl.c359
-rw-r--r--drivers/pwm/pwm-twl6030.c184
-rw-r--r--drivers/pwm/pwm-vt8500.c102
-rw-r--r--drivers/regulator/88pm8607.c40
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/anatop-regulator.c43
-rw-r--r--drivers/regulator/arizona-micsupp.c78
-rw-r--r--drivers/regulator/as3711-regulator.c2
-rw-r--r--drivers/regulator/core.c58
-rw-r--r--drivers/regulator/da9052-regulator.c46
-rw-r--r--drivers/regulator/da9055-regulator.c15
-rw-r--r--drivers/regulator/dbx500-prcmu.c1
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/gpio-regulator.c9
-rw-r--r--drivers/regulator/lp3971.c22
-rw-r--r--drivers/regulator/lp3972.c22
-rw-r--r--drivers/regulator/lp872x.c36
-rw-r--r--drivers/regulator/lp8755.c566
-rw-r--r--drivers/regulator/lp8788-buck.c41
-rw-r--r--drivers/regulator/lp8788-ldo.c133
-rw-r--r--drivers/regulator/max77686.c29
-rw-r--r--drivers/regulator/max8907-regulator.c10
-rw-r--r--drivers/regulator/max8925-regulator.c3
-rw-r--r--drivers/regulator/max8973-regulator.c12
-rw-r--r--drivers/regulator/max8997.c164
-rw-r--r--drivers/regulator/max8998.c58
-rw-r--r--drivers/regulator/mc13892-regulator.c111
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c25
-rw-r--r--drivers/regulator/mc13xxx.h4
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c9
-rw-r--r--drivers/regulator/s2mps11.c4
-rw-r--r--drivers/regulator/s5m8767.c270
-rw-r--r--drivers/regulator/tps51632-regulator.c152
-rw-r--r--drivers/regulator/tps6507x-regulator.c92
-rw-r--r--drivers/regulator/tps65090-regulator.c106
-rw-r--r--drivers/regulator/tps65217-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c54
-rw-r--r--drivers/regulator/tps65910-regulator.c8
-rw-r--r--drivers/regulator/tps80031-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig7
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/rpmsg/Kconfig3
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c12
-rw-r--r--drivers/rtc/Kconfig111
-rw-r--r--drivers/rtc/Makefile10
-rw-r--r--drivers/rtc/class.c12
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-88pm80x.c6
-rw-r--r--drivers/rtc/rtc-88pm860x.c10
-rw-r--r--drivers/rtc/rtc-ab8500.c6
-rw-r--r--drivers/rtc/rtc-at91rm9200.c17
-rw-r--r--drivers/rtc/rtc-at91sam9.c6
-rw-r--r--drivers/rtc/rtc-au1xxx.c6
-rw-r--r--drivers/rtc/rtc-bfin.c6
-rw-r--r--drivers/rtc/rtc-bq32k.c4
-rw-r--r--drivers/rtc/rtc-bq4802.c6
-rw-r--r--drivers/rtc/rtc-cmos.c24
-rw-r--r--drivers/rtc/rtc-coh901331.c7
-rw-r--r--drivers/rtc/rtc-da9052.c24
-rw-r--r--drivers/rtc/rtc-da9055.c413
-rw-r--r--drivers/rtc/rtc-davinci.c53
-rw-r--r--drivers/rtc/rtc-dev.c30
-rw-r--r--drivers/rtc/rtc-dm355evm.c6
-rw-r--r--drivers/rtc/rtc-ds1286.c6
-rw-r--r--drivers/rtc/rtc-ds1302.c4
-rw-r--r--drivers/rtc/rtc-ds1305.c14
-rw-r--r--drivers/rtc/rtc-ds1307.c19
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-ds1390.c6
-rw-r--r--drivers/rtc/rtc-ds1511.c8
-rw-r--r--drivers/rtc/rtc-ds1553.c6
-rw-r--r--drivers/rtc/rtc-ds1742.c6
-rw-r--r--drivers/rtc/rtc-ds2404.c18
-rw-r--r--drivers/rtc/rtc-ds3232.c8
-rw-r--r--drivers/rtc/rtc-ds3234.c6
-rw-r--r--drivers/rtc/rtc-efi.c10
-rw-r--r--drivers/rtc/rtc-ep93xx.c6
-rw-r--r--drivers/rtc/rtc-fm3130.c41
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c292
-rw-r--r--drivers/rtc/rtc-imxdi.c20
-rw-r--r--drivers/rtc/rtc-isl12022.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c3
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-lp8788.c338
-rw-r--r--drivers/rtc/rtc-lpc32xx.c6
-rw-r--r--drivers/rtc/rtc-ls1x.c6
-rw-r--r--drivers/rtc/rtc-m41t93.c6
-rw-r--r--drivers/rtc/rtc-m41t94.c6
-rw-r--r--drivers/rtc/rtc-m48t35.c6
-rw-r--r--drivers/rtc/rtc-m48t59.c6
-rw-r--r--drivers/rtc/rtc-m48t86.c6
-rw-r--r--drivers/rtc/rtc-max6902.c6
-rw-r--r--drivers/rtc/rtc-max77686.c641
-rw-r--r--drivers/rtc/rtc-max8907.c12
-rw-r--r--drivers/rtc/rtc-max8925.c6
-rw-r--r--drivers/rtc/rtc-max8997.c552
-rw-r--r--drivers/rtc/rtc-max8998.c6
-rw-r--r--drivers/rtc/rtc-mpc5121.c13
-rw-r--r--drivers/rtc/rtc-mrst.c12
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-mxc.c6
-rw-r--r--drivers/rtc/rtc-nuc900.c6
-rw-r--r--drivers/rtc/rtc-omap.c80
-rw-r--r--drivers/rtc/rtc-pcap.c6
-rw-r--r--drivers/rtc/rtc-pcf2123.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c357
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pcf8583.c8
-rw-r--r--drivers/rtc/rtc-pl031.c12
-rw-r--r--drivers/rtc/rtc-pm8xxx.c6
-rw-r--r--drivers/rtc/rtc-puv3.c6
-rw-r--r--drivers/rtc/rtc-pxa.c23
-rw-r--r--drivers/rtc/rtc-r9701.c6
-rw-r--r--drivers/rtc/rtc-rc5t583.c6
-rw-r--r--drivers/rtc/rtc-rs5c313.c9
-rw-r--r--drivers/rtc/rtc-rs5c348.c6
-rw-r--r--drivers/rtc/rtc-rs5c372.c10
-rw-r--r--drivers/rtc/rtc-rv3029c2.c8
-rw-r--r--drivers/rtc/rtc-rx4581.c314
-rw-r--r--drivers/rtc/rtc-rx8025.c8
-rw-r--r--drivers/rtc/rtc-rx8581.c8
-rw-r--r--drivers/rtc/rtc-s3c.c78
-rw-r--r--drivers/rtc/rtc-sa1100.c15
-rw-r--r--drivers/rtc/rtc-snvs.c16
-rw-r--r--drivers/rtc/rtc-spear.c99
-rw-r--r--drivers/rtc/rtc-stk17ta8.c6
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c3
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-tegra.c21
-rw-r--r--drivers/rtc/rtc-test.c18
-rw-r--r--drivers/rtc/rtc-tile.c6
-rw-r--r--drivers/rtc/rtc-tps6586x.c356
-rw-r--r--drivers/rtc/rtc-tps65910.c59
-rw-r--r--drivers/rtc/rtc-tps80031.c349
-rw-r--r--drivers/rtc/rtc-twl.c44
-rw-r--r--drivers/rtc/rtc-vr41xx.c8
-rw-r--r--drivers/rtc/rtc-vt8500.c62
-rw-r--r--drivers/rtc/rtc-wm831x.c13
-rw-r--r--drivers/rtc/rtc-wm8350.c4
-rw-r--r--drivers/rtc/systohc.c44
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_3990_erp.c8
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_diag.c12
-rw-r--r--drivers/s390/block/dasd_eckd.c32
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/scm_blk.h41
-rw-r--r--drivers/s390/char/Kconfig8
-rw-r--r--drivers/s390/char/con3215.c20
-rw-r--r--drivers/s390/char/fs3270.c29
-rw-r--r--drivers/s390/char/keyboard.h16
-rw-r--r--drivers/s390/char/raw3270.c613
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c8
-rw-r--r--drivers/s390/char/sclp_cmd.c10
-rw-r--r--drivers/s390/char/sclp_tty.c14
-rw-r--r--drivers/s390/char/sclp_vt220.c12
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tty3270.c191
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/char/zcore.c64
-rw-r--r--drivers/s390/cio/chsc.c81
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c22
-rw-r--r--drivers/s390/cio/device.h5
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/device_pgid.c123
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/s390/kvm/virtio_ccw.c2
-rw-r--r--drivers/s390/net/Kconfig4
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h10
-rw-r--r--drivers/s390/net/qeth_core_main.c256
-rw-r--r--drivers/s390/net/qeth_core_mpc.c1
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_core_sys.c3
-rw-r--r--drivers/s390/net/qeth_l2_main.c16
-rw-r--r--drivers/s390/net/qeth_l3_main.c35
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/Kconfig7
-rw-r--r--drivers/sbus/char/bbc_i2c.c6
-rw-r--r--drivers/sbus/char/display7seg.c6
-rw-r--r--drivers/sbus/char/envctrl.c6
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/sbus/char/uctrl.c6
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/NCR_D700.c12
-rw-r--r--drivers/scsi/NCR_Q720.c2
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/a2091.c9
-rw-r--r--drivers/scsi/aacraid/aachba.c87
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/linit.c15
-rw-r--r--drivers/scsi/advansys.c152
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c23
-rw-r--r--drivers/scsi/arm/Kconfig10
-rw-r--r--drivers/scsi/arm/acornscsi.c7
-rw-r--r--drivers/scsi/arm/arxescsi.c7
-rw-r--r--drivers/scsi/arm/cumana_1.c8
-rw-r--r--drivers/scsi/arm/cumana_2.c8
-rw-r--r--drivers/scsi/arm/eesox.c7
-rw-r--r--drivers/scsi/arm/oak.c7
-rw-r--r--drivers/scsi/arm/powertec.c8
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c236
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h93
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c124
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1063
-rw-r--r--drivers/scsi/be2iscsi/be_main.h152
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c424
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h23
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bvme6000_scsi.c6
-rw-r--r--drivers/scsi/csiostor/Kconfig19
-rw-r--r--drivers/scsi/csiostor/Makefile11
-rw-r--r--drivers/scsi/csiostor/csio_attr.c796
-rw-r--r--drivers/scsi/csiostor/csio_defs.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4395
-rw-r--r--drivers/scsi/csiostor/csio_hw.h665
-rw-r--r--drivers/scsi/csiostor/csio_init.c1269
-rw-r--r--drivers/scsi/csiostor/csio_init.h158
-rw-r--r--drivers/scsi/csiostor/csio_isr.c624
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2135
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h255
-rw-r--r--drivers/scsi/csiostor/csio_mb.c1750
-rw-r--r--drivers/scsi/csiostor/csio_mb.h278
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c913
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h141
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2555
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h342
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1632
-rw-r--r--drivers/scsi/csiostor/csio_wr.h512
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h539
-rw-r--r--drivers/scsi/dc395x.c51
-rw-r--r--drivers/scsi/device_handler/Kconfig4
-rw-r--r--drivers/scsi/dmx3191d.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c7
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c17
-rw-r--r--drivers/scsi/gvp11.c11
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/hpsa.c92
-rw-r--r--drivers/scsi/hptiop.c416
-rw-r--r--drivers/scsi/hptiop.h72
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c32
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/isci/init.c10
-rw-r--r--drivers/scsi/jazz_esp.c6
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/Kconfig67
-rw-r--r--drivers/scsi/mpt3sas/Makefile8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h1164
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3323
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h560
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h1665
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h346
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h295
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h437
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4840
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h1139
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1650
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3297
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h418
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8166
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2128
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c434
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h193
-rw-r--r--drivers/scsi/mvme16x_scsi.c8
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c7
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h14
-rw-r--r--drivers/scsi/mvsas/mv_chips.h2
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.h6
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/nsp32.c16
-rw-r--r--drivers/scsi/osd/osd_uld.c54
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c36
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c28
-rw-r--r--drivers/scsi/pmcraid.c31
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla1280.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c72
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c153
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c79
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c6
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c20
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_pm.c98
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/scsi_transport_srp.c51
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sim710.c11
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/sun3x_esp.c6
-rw-r--r--drivers/scsi/sun_esp.c30
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c23
-rw-r--r--drivers/scsi/tmscsim.c23
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/scsi/virtio_scsi.c32
-rw-r--r--drivers/scsi/vmw_pvscsi.c9
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro7xx.c12
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/clk/cpg.c7
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/sn/ioc3.c14
-rw-r--r--drivers/spi/Kconfig63
-rw-r--r--drivers/spi/Makefile10
-rw-r--r--drivers/spi/spi-altera.c8
-rw-r--r--drivers/spi/spi-ath79.c121
-rw-r--r--drivers/spi/spi-atmel.c23
-rw-r--r--drivers/spi/spi-au1550.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c201
-rw-r--r--drivers/spi/spi-bfin-sport.c11
-rw-r--r--drivers/spi/spi-bfin5xx.c9
-rw-r--r--drivers/spi/spi-bitbang.c60
-rw-r--r--drivers/spi/spi-clps711x.c296
-rw-r--r--drivers/spi/spi-coldfire-qspi.c9
-rw-r--r--drivers/spi/spi-davinci.c125
-rw-r--r--drivers/spi/spi-dw-mmio.c6
-rw-r--r--drivers/spi/spi-dw-pci.c6
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-ep93xx.c15
-rw-r--r--drivers/spi/spi-falcon.c9
-rw-r--r--drivers/spi/spi-fsl-espi.c8
-rw-r--r--drivers/spi/spi-fsl-lib.c4
-rw-r--r--drivers/spi/spi-fsl-spi.c18
-rw-r--r--drivers/spi/spi-gpio.c36
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-mpc512x-psc.c27
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c8
-rw-r--r--drivers/spi/spi-mpc52xx.c8
-rw-r--r--drivers/spi/spi-mxs.c17
-rw-r--r--drivers/spi/spi-nuc900.c6
-rw-r--r--drivers/spi/spi-oc-tiny.c18
-rw-r--r--drivers/spi/spi-octeon.c6
-rw-r--r--drivers/spi/spi-omap-100k.c8
-rw-r--r--drivers/spi/spi-omap-uwire.c6
-rw-r--r--drivers/spi/spi-omap2-mcspi.c125
-rw-r--r--drivers/spi/spi-orion.c48
-rw-r--r--drivers/spi/spi-pl022.c61
-rw-r--r--drivers/spi/spi-ppc4xx.c10
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c392
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c139
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c490
-rw-r--r--drivers/spi/spi-pxa2xx.c1105
-rw-r--r--drivers/spi/spi-pxa2xx.h221
-rw-r--r--drivers/spi/spi-rspi.c10
-rw-r--r--drivers/spi/spi-s3c24xx.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c202
-rw-r--r--drivers/spi/spi-sh-hspi.c51
-rw-r--r--drivers/spi/spi-sh-msiof.c62
-rw-r--r--drivers/spi/spi-sh.c6
-rw-r--r--drivers/spi/spi-sirf.c23
-rw-r--r--drivers/spi/spi-stmp.c664
-rw-r--r--drivers/spi/spi-tegra20-sflash.c671
-rw-r--r--drivers/spi/spi-tegra20-slink.c1353
-rw-r--r--drivers/spi/spi-ti-ssp.c6
-rw-r--r--drivers/spi/spi-tle62x0.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c12
-rw-r--r--drivers/spi/spi-txx9.c12
-rw-r--r--drivers/spi/spi-xcomm.c6
-rw-r--r--drivers/spi/spi-xilinx.c6
-rw-r--r--drivers/spi/spi.c118
-rw-r--r--drivers/spi/spidev.c16
-rw-r--r--drivers/ssb/Kconfig13
-rw-r--r--drivers/ssb/Makefile2
-rw-r--r--drivers/ssb/driver_chipcommon.c78
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c140
-rw-r--r--drivers/ssb/driver_extif.c43
-rw-r--r--drivers/ssb/driver_gige.c14
-rw-r--r--drivers/ssb/driver_gpio.c210
-rw-r--r--drivers/ssb/driver_mipscore.c51
-rw-r--r--drivers/ssb/driver_pcicore.c10
-rw-r--r--drivers/ssb/main.c51
-rw-r--r--drivers/ssb/pcihost_wrapper.c6
-rw-r--r--drivers/ssb/ssb_private.h37
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig29
-rw-r--r--drivers/staging/android/alarm-dev.c277
-rw-r--r--drivers/staging/android/android_alarm.h19
-rw-r--r--drivers/staging/android/binder.c9
-rw-r--r--drivers/staging/android/binder.h6
-rw-r--r--drivers/staging/asus_oled/asus_oled.c15
-rw-r--r--drivers/staging/bcm/Adapter.h20
-rw-r--r--drivers/staging/bcm/Bcmchar.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c4
-rw-r--r--drivers/staging/bcm/CmHost.c6
-rw-r--r--drivers/staging/bcm/CmHost.h12
-rw-r--r--drivers/staging/bcm/Debug.h356
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c34
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h149
-rw-r--r--drivers/staging/bcm/InterfaceDld.c6
-rw-r--r--drivers/staging/bcm/InterfaceInit.c14
-rw-r--r--drivers/staging/bcm/Ioctl.h6
-rw-r--r--drivers/staging/bcm/Macros.h25
-rw-r--r--drivers/staging/bcm/Misc.c10
-rw-r--r--drivers/staging/bcm/PHSDefines.h200
-rw-r--r--drivers/staging/bcm/PHSModule.c160
-rw-r--r--drivers/staging/bcm/PHSModule.h14
-rw-r--r--drivers/staging/bcm/Protocol.h177
-rw-r--r--drivers/staging/bcm/Prototypes.h6
-rw-r--r--drivers/staging/bcm/Qos.c46
-rw-r--r--drivers/staging/bcm/hostmibs.c12
-rw-r--r--drivers/staging/bcm/led_control.c8
-rw-r--r--drivers/staging/bcm/led_control.h138
-rw-r--r--drivers/staging/bcm/nvm.c87
-rw-r--r--drivers/staging/bcm/nvm.h665
-rw-r--r--drivers/staging/bcm/target_params.h128
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c2
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h2
-rw-r--r--drivers/staging/ccg/Kconfig2
-rw-r--r--drivers/staging/ccg/u_ether.c10
-rw-r--r--drivers/staging/ccg/u_serial.c13
-rw-r--r--drivers/staging/ced1401/ced_ioc.c18
-rw-r--r--drivers/staging/ced1401/usb1401.c14
-rw-r--r--drivers/staging/ced1401/usb1401.h2
-rw-r--r--drivers/staging/comedi/Kconfig47
-rw-r--r--drivers/staging/comedi/Makefile19
-rw-r--r--drivers/staging/comedi/comedi.h12
-rw-r--r--drivers/staging/comedi/comedi_buf.c415
-rw-r--r--drivers/staging/comedi/comedi_compat32.c4
-rw-r--r--drivers/staging/comedi/comedi_fops.c1008
-rw-r--r--drivers/staging/comedi/comedi_internal.h33
-rw-r--r--drivers/staging/comedi/comedi_pci.c140
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c160
-rw-r--r--drivers/staging/comedi/comedi_usb.c108
-rw-r--r--drivers/staging/comedi/comedidev.h370
-rw-r--r--drivers/staging/comedi/drivers.c859
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c9
-rw-r--r--drivers/staging/comedi/drivers/Makefile4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c26
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c807
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c263
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c589
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c10
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c114
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c257
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c282
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c187
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c482
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c172
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.h10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c9
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c17
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c39
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c12
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c13
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c8
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c9
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c20
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c8
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c8
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c132
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c9
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c77
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c9
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c14
-rw-r--r--drivers/staging/comedi/drivers/das08.c358
-rw-r--r--drivers/staging/comedi/drivers/das08.h2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c152
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c217
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c121
-rw-r--r--drivers/staging/comedi/drivers/das16.c2
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c11
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c30
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c11
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c11
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c13
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c15
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c9
-rw-r--r--drivers/staging/comedi/drivers/me4000.c14
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/mite.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c119
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c303
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c257
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c399
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c19
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.c63
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.h8
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c3
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c62
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c72
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c589
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c10
-rw-r--r--drivers/staging/comedi/drivers/s626.c8
-rw-r--r--drivers/staging/comedi/drivers/skel.c12
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c6
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c29
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c8
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c33
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c1272
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c8
-rw-r--r--drivers/staging/comedi/proc.c9
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c5
-rw-r--r--drivers/staging/csr/bh.c2
-rw-r--r--drivers/staging/csr/drv.c6
-rw-r--r--drivers/staging/csr/sme_sys.c41
-rw-r--r--drivers/staging/csr/unifi_sme.c3
-rw-r--r--drivers/staging/cxt1e1/linux.c4
-rw-r--r--drivers/staging/dgrp/Kconfig2
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c13
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c81
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c1
-rw-r--r--drivers/staging/echo/echo.c42
-rw-r--r--drivers/staging/et131x/README4
-rw-r--r--drivers/staging/et131x/et131x.c801
-rw-r--r--drivers/staging/et131x/et131x.h96
-rw-r--r--drivers/staging/frontier/alphatrack.c51
-rw-r--r--drivers/staging/frontier/tranzport.c26
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h33
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c30
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c10
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c153
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c110
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c166
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c39
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h84
-rw-r--r--drivers/staging/ft1000/ft1000.h35
-rw-r--r--drivers/staging/fwserial/Kconfig6
-rw-r--r--drivers/staging/fwserial/TODO29
-rw-r--r--drivers/staging/fwserial/fwserial.c412
-rw-r--r--drivers/staging/fwserial/fwserial.h25
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c8
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c9
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c8
-rw-r--r--drivers/staging/goldfish/Kconfig13
-rw-r--r--drivers/staging/goldfish/Makefile6
-rw-r--r--drivers/staging/goldfish/README12
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c363
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c444
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h72
-rw-r--r--drivers/staging/iio/Kconfig14
-rw-r--r--drivers/staging/iio/Makefile3
-rw-r--r--drivers/staging/iio/accel/Kconfig30
-rw-r--r--drivers/staging/iio/accel/Makefile2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h8
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c18
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c12
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c13
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c6
-rw-r--r--drivers/staging/iio/adc/Kconfig4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c6
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c534
-rw-r--r--drivers/staging/iio/frequency/ad5930.c5
-rw-r--r--drivers/staging/iio/frequency/ad9850.c5
-rw-r--r--drivers/staging/iio/frequency/ad9852.c5
-rw-r--r--drivers/staging/iio/gyro/Kconfig21
-rw-r--r--drivers/staging/iio/gyro/Makefile9
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h62
-rw-r--r--drivers/staging/iio/iio_hwmon.c83
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c8
-rw-r--r--drivers/staging/iio/imu/Kconfig17
-rw-r--r--drivers/staging/iio/imu/Makefile7
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c1320
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c204
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c74
-rw-r--r--drivers/staging/iio/light/Kconfig10
-rw-r--r--drivers/staging/iio/light/Makefile1
-rw-r--r--drivers/staging/iio/light/tsl2563.h9
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c78
-rw-r--r--drivers/staging/iio/meter/Kconfig2
-rw-r--r--drivers/staging/iio/meter/ade7753.c6
-rw-r--r--drivers/staging/iio/meter/ade7754.c5
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c28
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c12
-rw-r--r--drivers/staging/iio/meter/ade7759.c5
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c44
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c5
-rw-r--r--drivers/staging/iio/ring_sw.c366
-rw-r--r--drivers/staging/iio/ring_sw.h30
-rw-r--r--drivers/staging/iio/trigger/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c1
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c8
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c6
-rw-r--r--drivers/staging/keucr/usb.c188
-rw-r--r--drivers/staging/line6/Kconfig10
-rw-r--r--drivers/staging/line6/capture.c10
-rw-r--r--drivers/staging/line6/driver.c86
-rw-r--r--drivers/staging/line6/driver.h13
-rw-r--r--drivers/staging/line6/midi.c2
-rw-r--r--drivers/staging/line6/midi.h4
-rw-r--r--drivers/staging/line6/midibuf.c25
-rw-r--r--drivers/staging/line6/midibuf.h22
-rw-r--r--drivers/staging/line6/pcm.c36
-rw-r--r--drivers/staging/line6/playback.c9
-rw-r--r--drivers/staging/line6/pod.c105
-rw-r--r--drivers/staging/line6/toneport.c6
-rw-r--r--drivers/staging/line6/variax.c14
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c4
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c42
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c2
-rw-r--r--drivers/staging/media/go7007/s2250-board.c13
-rw-r--r--drivers/staging/media/go7007/wis-ov7640.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7115.c20
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c13
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c13
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/nvec/TODO4
-rw-r--r--drivers/staging/nvec/nvec.c95
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_kbd.c42
-rw-r--r--drivers/staging/nvec/nvec_power.c8
-rw-r--r--drivers/staging/nvec/nvec_ps2.c37
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet.c8
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.c13
-rw-r--r--drivers/staging/omap-thermal/omap-thermal-common.c4
-rw-r--r--drivers/staging/omapdrm/Kconfig2
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO3
-rw-r--r--drivers/staging/omapdrm/omap_connector.c115
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c510
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h5
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c172
-rw-r--r--drivers/staging/omapdrm/omap_drv.c446
-rw-r--r--drivers/staging/omapdrm/omap_drv.h144
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c136
-rw-r--r--drivers/staging/omapdrm/omap_fb.c1
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/staging/omapdrm/omap_gem.c42
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c13
-rw-r--r--drivers/staging/omapdrm/omap_irq.c322
-rw-r--r--drivers/staging/omapdrm/omap_plane.c456
-rw-r--r--drivers/staging/omapdrm/tcm.h2
-rw-r--r--drivers/staging/ozwpan/TODO3
-rw-r--r--drivers/staging/ozwpan/ozcdev.c52
-rw-r--r--drivers/staging/ozwpan/ozcdev.h1
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c18
-rw-r--r--drivers/staging/ozwpan/ozevent.c8
-rw-r--r--drivers/staging/ozwpan/ozhcd.c152
-rw-r--r--drivers/staging/ozwpan/ozmain.c2
-rw-r--r--drivers/staging/ozwpan/ozpd.c88
-rw-r--r--drivers/staging/ozwpan/ozpd.h4
-rw-r--r--drivers/staging/ozwpan/ozproto.c84
-rw-r--r--drivers/staging/ozwpan/ozproto.h2
-rw-r--r--drivers/staging/ozwpan/ozusbif.h8
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c22
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c26
-rw-r--r--drivers/staging/panel/panel.c31
-rw-r--r--drivers/staging/quickstart/quickstart.c2
-rw-r--r--drivers/staging/ramster/Kconfig31
-rw-r--r--drivers/staging/ramster/Makefile6
-rw-r--r--drivers/staging/ramster/tmem.c894
-rw-r--r--drivers/staging/ramster/tmem.h259
-rw-r--r--drivers/staging/ramster/zcache-main.c1820
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.c71
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c408
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c3
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c301
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c6
-rw-r--r--drivers/staging/rtl8192u/changes1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/aes.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/arc4.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/crypto_compat.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h102
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c394
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c280
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c31
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c63
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h13
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c124
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h83
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c18
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl_crypto.h93
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.h2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c13
-rw-r--r--drivers/staging/rtl8192u/r8192U.h299
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c589
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c197
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h24
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h16
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c37
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTGen.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTType.h9
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h44
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c109
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c127
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h1044
-rw-r--r--drivers/staging/rtl8712/ethernet.h7
-rw-r--r--drivers/staging/rtl8712/hal_init.c17
-rw-r--r--drivers/staging/rtl8712/ieee80211.h2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c66
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c181
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h56
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c393
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h6
-rw-r--r--drivers/staging/rtl8712/sta_info.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c35
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c15
-rw-r--r--drivers/staging/rtl8712/wifi.h171
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c3
-rw-r--r--drivers/staging/sb105x/Kconfig2
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h2
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c20
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c9
-rw-r--r--drivers/staging/sbe-2t3e3/module.c7
-rw-r--r--drivers/staging/sep/sep_crypto.c10
-rw-r--r--drivers/staging/sep/sep_main.c44
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c41
-rw-r--r--drivers/staging/slicoss/slic.h504
-rw-r--r--drivers/staging/slicoss/slichw.h6
-rw-r--r--drivers/staging/slicoss/slicoss.c35
-rw-r--r--drivers/staging/speakup/Kconfig2
-rw-r--r--drivers/staging/speakup/buffers.c14
-rw-r--r--drivers/staging/speakup/fakekey.c2
-rw-r--r--drivers/staging/speakup/i18n.c12
-rw-r--r--drivers/staging/speakup/i18n.h12
-rw-r--r--drivers/staging/speakup/keyhelp.c39
-rw-r--r--drivers/staging/speakup/kobjects.c84
-rw-r--r--drivers/staging/speakup/main.c370
-rw-r--r--drivers/staging/speakup/selection.c16
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup.h72
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c6
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c2
-rw-r--r--drivers/staging/speakup/speakup_apollo.c8
-rw-r--r--drivers/staging/speakup/speakup_audptr.c2
-rw-r--r--drivers/staging/speakup/speakup_bns.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c6
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c6
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dummy.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c6
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c2
-rw-r--r--drivers/staging/speakup/speakup_spkout.c2
-rw-r--r--drivers/staging/speakup/speakup_txprt.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h8
-rw-r--r--drivers/staging/speakup/synth.c42
-rw-r--r--drivers/staging/speakup/thread.c4
-rw-r--r--drivers/staging/speakup/varhandlers.c66
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c18
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c13
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c34
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c12
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c11
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c3
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c1
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c6
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c12
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c23
-rw-r--r--drivers/staging/usbip/Kconfig2
-rw-r--r--drivers/staging/usbip/stub_dev.c42
-rw-r--r--drivers/staging/usbip/stub_rx.c5
-rw-r--r--drivers/staging/usbip/stub_tx.c1
-rw-r--r--drivers/staging/usbip/usbip_common.c3
-rw-r--r--drivers/staging/usbip/usbip_event.c6
-rw-r--r--drivers/staging/usbip/userspace/.gitignore28
-rw-r--r--drivers/staging/usbip/userspace/Makefile.am2
-rw-r--r--drivers/staging/usbip/userspace/README2
-rw-r--r--drivers/staging/usbip/userspace/configure.ac20
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am4
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c15
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c96
-rw-r--r--drivers/staging/usbip/vhci_hcd.c80
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/staging/usbip/vhci_tx.c14
-rw-r--r--drivers/staging/vme/devices/Kconfig2
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c15
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/channel.c11
-rw-r--r--drivers/staging/vt6655/device.h24
-rw-r--r--drivers/staging/vt6655/rxtx.c6
-rw-r--r--drivers/staging/vt6655/wcmd.c2
-rw-r--r--drivers/staging/vt6655/wmgr.c2
-rw-r--r--drivers/staging/vt6656/80211mgr.c36
-rw-r--r--drivers/staging/vt6656/80211mgr.h169
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c10
-rw-r--r--drivers/staging/vt6656/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6656/baseband.c91
-rw-r--r--drivers/staging/vt6656/baseband.h70
-rw-r--r--drivers/staging/vt6656/bssdb.c435
-rw-r--r--drivers/staging/vt6656/bssdb.h173
-rw-r--r--drivers/staging/vt6656/card.c366
-rw-r--r--drivers/staging/vt6656/card.h47
-rw-r--r--drivers/staging/vt6656/channel.c157
-rw-r--r--drivers/staging/vt6656/channel.h9
-rw-r--r--drivers/staging/vt6656/control.c55
-rw-r--r--drivers/staging/vt6656/control.h26
-rw-r--r--drivers/staging/vt6656/datarate.c109
-rw-r--r--drivers/staging/vt6656/datarate.h43
-rw-r--r--drivers/staging/vt6656/desc.h195
-rw-r--r--drivers/staging/vt6656/device.h879
-rw-r--r--drivers/staging/vt6656/device_cfg.h12
-rw-r--r--drivers/staging/vt6656/dpc.c470
-rw-r--r--drivers/staging/vt6656/dpc.h16
-rw-r--r--drivers/staging/vt6656/firmware.c33
-rw-r--r--drivers/staging/vt6656/firmware.h17
-rw-r--r--drivers/staging/vt6656/hostap.c147
-rw-r--r--drivers/staging/vt6656/hostap.h4
-rw-r--r--drivers/staging/vt6656/int.c22
-rw-r--r--drivers/staging/vt6656/int.h8
-rw-r--r--drivers/staging/vt6656/iocmd.h55
-rw-r--r--drivers/staging/vt6656/iowpa.h8
-rw-r--r--drivers/staging/vt6656/iwctl.c184
-rw-r--r--drivers/staging/vt6656/key.c364
-rw-r--r--drivers/staging/vt6656/key.h104
-rw-r--r--drivers/staging/vt6656/mac.c92
-rw-r--r--drivers/staging/vt6656/mac.h35
-rw-r--r--drivers/staging/vt6656/main_usb.c530
-rw-r--r--drivers/staging/vt6656/power.c104
-rw-r--r--drivers/staging/vt6656/power.h16
-rw-r--r--drivers/staging/vt6656/rf.c178
-rw-r--r--drivers/staging/vt6656/rf.h26
-rw-r--r--drivers/staging/vt6656/rxtx.c965
-rw-r--r--drivers/staging/vt6656/rxtx.h31
-rw-r--r--drivers/staging/vt6656/tether.c8
-rw-r--r--drivers/staging/vt6656/tether.h2
-rw-r--r--drivers/staging/vt6656/ttype.h23
-rw-r--r--drivers/staging/vt6656/usbpipe.c216
-rw-r--r--drivers/staging/vt6656/usbpipe.h40
-rw-r--r--drivers/staging/vt6656/wcmd.c323
-rw-r--r--drivers/staging/vt6656/wcmd.h18
-rw-r--r--drivers/staging/vt6656/wctl.c50
-rw-r--r--drivers/staging/vt6656/wctl.h10
-rw-r--r--drivers/staging/vt6656/wmgr.c1406
-rw-r--r--drivers/staging/vt6656/wmgr.h394
-rw-r--r--drivers/staging/vt6656/wpa.c30
-rw-r--r--drivers/staging/vt6656/wpa.h4
-rw-r--r--drivers/staging/vt6656/wpa2.c41
-rw-r--r--drivers/staging/vt6656/wpactl.c34
-rw-r--r--drivers/staging/vt6656/wpactl.h2
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c8
-rw-r--r--drivers/staging/wlags49_h2/ap_h25.c78
-rw-r--r--drivers/staging/wlags49_h2/sta_h2.c80
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c128
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c14
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.h94
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c1113
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.h58
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.h12
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h38
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c9
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h31
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c42
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c84
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_init.c119
-rw-r--r--drivers/staging/xgifb/vb_init.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c684
-rw-r--r--drivers/staging/xgifb/vb_struct.h5
-rw-r--r--drivers/staging/xgifb/vb_table.h168
-rw-r--r--drivers/staging/zcache/Kconfig34
-rw-r--r--drivers/staging/zcache/Makefile5
-rw-r--r--drivers/staging/zcache/TODO69
-rw-r--r--drivers/staging/zcache/ramster.h (renamed from drivers/staging/ramster/ramster.h)0
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.c (renamed from drivers/staging/ramster/ramster/heartbeat.c)0
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.h (renamed from drivers/staging/ramster/ramster/heartbeat.h)0
-rw-r--r--drivers/staging/zcache/ramster/masklog.c (renamed from drivers/staging/ramster/ramster/masklog.c)0
-rw-r--r--drivers/staging/zcache/ramster/masklog.h (renamed from drivers/staging/ramster/ramster/masklog.h)0
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.c (renamed from drivers/staging/ramster/ramster/nodemanager.c)0
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.h (renamed from drivers/staging/ramster/ramster/nodemanager.h)0
-rw-r--r--drivers/staging/zcache/ramster/r2net.c (renamed from drivers/staging/ramster/ramster/r2net.c)0
-rw-r--r--drivers/staging/zcache/ramster/ramster.c (renamed from drivers/staging/ramster/ramster/ramster.c)34
-rw-r--r--drivers/staging/zcache/ramster/ramster.h (renamed from drivers/staging/ramster/ramster/ramster.h)0
-rw-r--r--drivers/staging/zcache/ramster/ramster_nodemanager.h (renamed from drivers/staging/ramster/ramster/ramster_nodemanager.h)0
-rw-r--r--drivers/staging/zcache/ramster/tcp.c (renamed from drivers/staging/ramster/ramster/tcp.c)0
-rw-r--r--drivers/staging/zcache/ramster/tcp.h (renamed from drivers/staging/ramster/ramster/tcp.h)0
-rw-r--r--drivers/staging/zcache/ramster/tcp_internal.h (renamed from drivers/staging/ramster/ramster/tcp_internal.h)0
-rw-r--r--drivers/staging/zcache/tmem.c327
-rw-r--r--drivers/staging/zcache/tmem.h83
-rw-r--r--drivers/staging/zcache/zbud.c (renamed from drivers/staging/ramster/zbud.c)43
-rw-r--r--drivers/staging/zcache/zbud.h (renamed from drivers/staging/ramster/zbud.h)0
-rw-r--r--drivers/staging/zcache/zcache-main.c2558
-rw-r--r--drivers/staging/zcache/zcache.h (renamed from drivers/staging/ramster/zcache.h)0
-rw-r--r--drivers/staging/zram/Kconfig2
-rw-r--r--drivers/staging/zram/zram.txt27
-rw-r--r--drivers/staging/zram/zram_drv.c323
-rw-r--r--drivers/staging/zram/zram_drv.h17
-rw-r--r--drivers/staging/zram/zram_sysfs.c16
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h2
-rw-r--r--drivers/target/iscsi/iscsi_target.c84
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c8
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/Kconfig2
-rw-r--r--drivers/target/sbp/sbp_target.c26
-rw-r--r--drivers/target/target_core_alua.c346
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c705
-rw-r--r--drivers/target/target_core_device.c716
-rw-r--r--drivers/target/target_core_fabric_configfs.c42
-rw-r--r--drivers/target/target_core_fabric_lib.c3
-rw-r--r--drivers/target/target_core_file.c279
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_hba.c9
-rw-r--r--drivers/target/target_core_iblock.c501
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h16
-rw-r--r--drivers/target/target_core_pr.c1225
-rw-r--r--drivers/target/target_core_pr.h10
-rw-r--r--drivers/target/target_core_pscsi.c349
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/target/target_core_rd.c126
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c167
-rw-r--r--drivers/target/target_core_spc.c534
-rw-r--r--drivers/target/target_core_stat.c312
-rw-r--r--drivers/target/target_core_tmr.c9
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c695
-rw-r--r--drivers/target/target_core_ua.c20
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c14
-rw-r--r--drivers/thermal/exynos_thermal.c14
-rw-r--r--drivers/tty/Kconfig32
-rw-r--r--drivers/tty/Makefile4
-rw-r--r--drivers/tty/amiserial.c18
-rw-r--r--drivers/tty/bfin_jtag_comm.c22
-rw-r--r--drivers/tty/cyclades.c297
-rw-r--r--drivers/tty/ehv_bytechan.c13
-rw-r--r--drivers/tty/goldfish.c328
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/hvc/hvc_console.c6
-rw-r--r--drivers/tty/hvc/hvcs.c6
-rw-r--r--drivers/tty/hvc/hvsi.c32
-rw-r--r--drivers/tty/ipwireless/hardware.c4
-rw-r--r--drivers/tty/ipwireless/tty.c12
-rw-r--r--drivers/tty/isicom.c12
-rw-r--r--drivers/tty/metag_da.c677
-rw-r--r--drivers/tty/moxa.c10
-rw-r--r--drivers/tty/mxser.c50
-rw-r--r--drivers/tty/n_gsm.c120
-rw-r--r--drivers/tty/n_tty.c59
-rw-r--r--drivers/tty/nozomi.c37
-rw-r--r--drivers/tty/pty.c30
-rw-r--r--drivers/tty/rocket.c60
-rw-r--r--drivers/tty/serial/21285.c3
-rw-r--r--drivers/tty/serial/68328serial.c19
-rw-r--r--drivers/tty/serial/8250/8250.c143
-rw-r--r--drivers/tty/serial/8250/8250.h51
-rw-r--r--drivers/tty/serial/8250/8250_dma.c216
-rw-r--r--drivers/tty/serial/8250/8250_dw.c259
-rw-r--r--drivers/tty/serial/8250/8250_early.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c384
-rw-r--r--drivers/tty/serial/8250/Kconfig27
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig41
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c8
-rw-r--r--drivers/tty/serial/altera_uart.c8
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c11
-rw-r--r--drivers/tty/serial/apbuart.c3
-rw-r--r--drivers/tty/serial/ar933x_uart.c15
-rw-r--r--drivers/tty/serial/arc_uart.c104
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c9
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c12
-rw-r--r--drivers/tty/serial/bfin_uart.c10
-rw-r--r--drivers/tty/serial/clps711x.c8
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/crisv10.c35
-rw-r--r--drivers/tty/serial/dz.c4
-rw-r--r--drivers/tty/serial/efm32-uart.c52
-rw-r--r--drivers/tty/serial/icom.c10
-rw-r--r--drivers/tty/serial/ifx6x60.c15
-rw-r--r--drivers/tty/serial/imx.c282
-rw-r--r--drivers/tty/serial/ioc3_serial.c11
-rw-r--r--drivers/tty/serial/ioc4_serial.c13
-rw-r--r--drivers/tty/serial/ip22zilog.c30
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c18
-rw-r--r--drivers/tty/serial/kgdb_nmi.c13
-rw-r--r--drivers/tty/serial/lantiq.c20
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c30
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/max3100.c13
-rw-r--r--drivers/tty/serial/max310x.c8
-rw-r--r--drivers/tty/serial/mcf.c73
-rw-r--r--drivers/tty/serial/mfd.c15
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c8
-rw-r--r--drivers/tty/serial/mpsc.c15
-rw-r--r--drivers/tty/serial/mrst_max3110.c19
-rw-r--r--drivers/tty/serial/msm_serial.c16
-rw-r--r--drivers/tty/serial/msm_serial_hs.c19
-rw-r--r--drivers/tty/serial/msm_smd_tty.c4
-rw-r--r--drivers/tty/serial/mux.c9
-rw-r--r--drivers/tty/serial/mxs-auart.c17
-rw-r--r--drivers/tty/serial/netx-serial.c4
-rw-r--r--drivers/tty/serial/nwpserial.c6
-rw-r--r--drivers/tty/serial/of_serial.c7
-rw-r--r--drivers/tty/serial/omap-serial.c54
-rw-r--r--drivers/tty/serial/pch_uart.c90
-rw-r--r--drivers/tty/serial/pmac_zilog.c36
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c3
-rw-r--r--drivers/tty/serial/pxa.c17
-rw-r--r--drivers/tty/serial/rp2.c885
-rw-r--r--drivers/tty/serial/sa1100.c3
-rw-r--r--drivers/tty/serial/samsung.c16
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc26xx.c29
-rw-r--r--drivers/tty/serial/sccnxp.c179
-rw-r--r--drivers/tty/serial/serial-tegra.c1401
-rw-r--r--drivers/tty/serial/serial_core.c94
-rw-r--r--drivers/tty/serial/serial_ks8695.c3
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c52
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c55
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h3
-rw-r--r--drivers/tty/serial/sn_console.c16
-rw-r--r--drivers/tty/serial/sunhv.c33
-rw-r--r--drivers/tty/serial/sunsab.c28
-rw-r--r--drivers/tty/serial/sunsu.c18
-rw-r--r--drivers/tty/serial/sunzilog.c39
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/uartlite.c119
-rw-r--r--drivers/tty/serial/ucc_uart.c10
-rw-r--r--drivers/tty/serial/vr41xx_siu.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c54
-rw-r--r--drivers/tty/serial/xilinx_uartps.c50
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink.c50
-rw-r--r--drivers/tty/synclink_gt.c61
-rw-r--r--drivers/tty/synclinkmp.c103
-rw-r--r--drivers/tty/sysrq.c277
-rw-r--r--drivers/tty/tty_buffer.c131
-rw-r--r--drivers/tty/tty_io.c15
-rw-r--r--drivers/tty/tty_ioctl.c12
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/tty/vt/Makefile4
-rw-r--r--drivers/tty/vt/keyboard.c25
-rw-r--r--drivers/tty/vt/vt.c16
-rw-r--r--drivers/uio/Kconfig1
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c2
-rw-r--r--drivers/usb/chipidea/ci13xxx_imx.h2
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/usbmisc_imx6q.c6
-rw-r--r--drivers/usb/class/Kconfig2
-rw-r--r--drivers/usb/class/cdc-acm.c16
-rw-r--r--drivers/usb/core/Makefile1
-rw-r--r--drivers/usb/core/devices.c13
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c49
-rw-r--r--drivers/usb/core/hub.c787
-rw-r--r--drivers/usb/core/hub.h122
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/port.c202
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c31
-rw-r--r--drivers/usb/core/usb.h12
-rw-r--r--drivers/usb/dwc3/Kconfig31
-rw-r--r--drivers/usb/dwc3/Makefile10
-rw-r--r--drivers/usb/dwc3/core.c31
-rw-r--r--drivers/usb/dwc3/core.h24
-rw-r--r--drivers/usb/dwc3/debugfs.c40
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c57
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c152
-rw-r--r--drivers/usb/dwc3/gadget.c293
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/Kconfig30
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/acm_ms.c42
-rw-r--r--drivers/usb/gadget/amd5536udc.c63
-rw-r--r--drivers/usb/gadget/amd5536udc.h2
-rw-r--r--drivers/usb/gadget/at91_udc.c17
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c12
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c13
-rw-r--r--drivers/usb/gadget/cdc2.c36
-rw-r--r--drivers/usb/gadget/composite.c326
-rw-r--r--drivers/usb/gadget/dbgp.c14
-rw-r--r--drivers/usb/gadget/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/f_acm.c153
-rw-r--r--drivers/usb/gadget/f_fs.c11
-rw-r--r--drivers/usb/gadget/f_loopback.c103
-rw-r--r--drivers/usb/gadget/f_mass_storage.c37
-rw-r--r--drivers/usb/gadget/f_ncm.c18
-rw-r--r--drivers/usb/gadget/f_obex.c4
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/f_sourcesink.c200
-rw-r--r--drivers/usb/gadget/f_uac2.c9
-rw-r--r--drivers/usb/gadget/f_uvc.c3
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c40
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c102
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h5
-rw-r--r--drivers/usb/gadget/functions.c116
-rw-r--r--drivers/usb/gadget/fusb300_udc.c80
-rw-r--r--drivers/usb/gadget/fusb300_udc.h2
-rw-r--r--drivers/usb/gadget/g_zero.h35
-rw-r--r--drivers/usb/gadget/gmidi.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c70
-rw-r--r--drivers/usb/gadget/goku_udc.h1
-rw-r--r--drivers/usb/gadget/imx_udc.c12
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c12
-rw-r--r--drivers/usb/gadget/m66592-udc.c84
-rw-r--r--drivers/usb/gadget/m66592-udc.h1
-rw-r--r--drivers/usb/gadget/multi.c71
-rw-r--r--drivers/usb/gadget/mv_udc_core.c250
-rw-r--r--drivers/usb/gadget/net2280.c15
-rw-r--r--drivers/usb/gadget/nokia.c43
-rw-r--r--drivers/usb/gadget/omap_udc.c51
-rw-r--r--drivers/usb/gadget/pch_udc.c67
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c77
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h1
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c61
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c17
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c56
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c20
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c65
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h1
-rw-r--r--drivers/usb/gadget/serial.c118
-rw-r--r--drivers/usb/gadget/storage_common.c61
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c3
-rw-r--r--drivers/usb/gadget/u_ether.c10
-rw-r--r--drivers/usb/gadget/u_serial.c330
-rw-r--r--drivers/usb/gadget/u_serial.h13
-rw-r--r--drivers/usb/gadget/udc-core.c157
-rw-r--r--drivers/usb/gadget/webcam.c2
-rw-r--r--drivers/usb/gadget/zero.c233
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-fsl.c9
-rw-r--r--drivers/usb/host/ehci-grlib.c9
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c9
-rw-r--r--drivers/usb/host/ehci-mv.c5
-rw-r--r--drivers/usb/host/ehci-mxc.c147
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/ehci-platform.c7
-rw-r--r--drivers/usb/host/ehci-ppc-of.c8
-rw-r--r--drivers/usb/host/ehci-q.c50
-rw-r--r--drivers/usb/host/ehci-s5p.c83
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/ehci-sead3.c8
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c97
-rw-r--r--drivers/usb/host/ehci-timer.c29
-rw-r--r--drivers/usb/host/ehci-vt8500.c8
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/ehci.h7
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/imx21-hcd.c1
-rw-r--r--drivers/usb/host/isp1760-hcd.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c87
-rw-r--r--drivers/usb/host/ohci-nxp.c7
-rw-r--r--drivers/usb/host/ohci-platform.c7
-rw-r--r--drivers/usb/host/ohci-q.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/pci-quirks.c1
-rw-r--r--drivers/usb/host/uhci-debug.c178
-rw-r--r--drivers/usb/host/uhci-hcd.c46
-rw-r--r--drivers/usb/host/uhci-hcd.h4
-rw-r--r--drivers/usb/host/uhci-hub.c7
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-hub.c38
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c24
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/misc/Kconfig6
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/usb3503.c325
-rw-r--r--drivers/usb/misc/usbtest.c15
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/am35x.c2
-rw-r--r--drivers/usb/musb/blackfin.c2
-rw-r--r--drivers/usb/musb/cppi_dma.c4
-rw-r--r--drivers/usb/musb/da8xx.c7
-rw-r--r--drivers/usb/musb/davinci.c7
-rw-r--r--drivers/usb/musb/musb_core.c18
-rw-r--r--drivers/usb/musb/musb_dsps.c15
-rw-r--r--drivers/usb/musb/musb_gadget.c22
-rw-r--r--drivers/usb/musb/musb_host.c44
-rw-r--r--drivers/usb/musb/musb_io.h21
-rw-r--r--drivers/usb/musb/omap2430.c91
-rw-r--r--drivers/usb/musb/omap2430.h9
-rw-r--r--drivers/usb/musb/tusb6010.c7
-rw-r--r--drivers/usb/musb/ux500.c12
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/gpio_vbus.c12
-rw-r--r--drivers/usb/otg/msm_otg.c13
-rw-r--r--drivers/usb/otg/mv_otg.c88
-rw-r--r--drivers/usb/otg/mxs-phy.c26
-rw-r--r--drivers/usb/otg/otg.c235
-rw-r--r--drivers/usb/otg/twl4030-usb.c3
-rw-r--r--drivers/usb/phy/Kconfig29
-rw-r--r--drivers/usb/phy/Makefile3
-rw-r--r--drivers/usb/phy/mv_u3d_phy.c8
-rw-r--r--drivers/usb/phy/omap-control-usb.c295
-rw-r--r--drivers/usb/phy/omap-usb2.c72
-rw-r--r--drivers/usb/phy/omap-usb3.c355
-rw-r--r--drivers/usb/phy/samsung-usbphy.c930
-rw-r--r--drivers/usb/phy/tegra_usb_phy.c132
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/usb/renesas_usbhs/common.c9
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c24
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c3
-rw-r--r--drivers/usb/serial/Kconfig14
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/aircable.c17
-rw-r--r--drivers/usb/serial/ark3116.c12
-rw-r--r--drivers/usb/serial/belkin_sa.c12
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cyberjack.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/f81232.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c45
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h15
-rw-r--r--drivers/usb/serial/garmin_gps.c9
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/io_edgeport.c39
-rw-r--r--drivers/usb/serial/io_ti.c118
-rw-r--r--drivers/usb/serial/ir-usb.c9
-rw-r--r--drivers/usb/serial/iuu_phoenix.c9
-rw-r--r--drivers/usb/serial/keyspan.c64
-rw-r--r--drivers/usb/serial/keyspan_pda.c9
-rw-r--r--drivers/usb/serial/kl5kusb105.c10
-rw-r--r--drivers/usb/serial/kobil_sct.c9
-rw-r--r--drivers/usb/serial/mct_u232.c33
-rw-r--r--drivers/usb/serial/metro-usb.c9
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c10
-rw-r--r--drivers/usb/serial/navman.c9
-rw-r--r--drivers/usb/serial/omninet.c10
-rw-r--r--drivers/usb/serial/opticon.c11
-rw-r--r--drivers/usb/serial/option.c56
-rw-r--r--drivers/usb/serial/oti6858.c9
-rw-r--r--drivers/usb/serial/pl2303.c15
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c47
-rw-r--r--drivers/usb/serial/safe_serial.c15
-rw-r--r--drivers/usb/serial/sierra.c25
-rw-r--r--drivers/usb/serial/spcp8x5.c24
-rw-r--r--drivers/usb/serial/ssu100.c50
-rw-r--r--drivers/usb/serial/symbolserial.c9
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c44
-rw-r--r--drivers/usb/serial/usb-serial.c28
-rw-r--r--drivers/usb/serial/usb_wwan.c25
-rw-r--r--drivers/usb/serial/xsens_mt.c86
-rw-r--r--drivers/usb/storage/initializers.c76
-rw-r--r--drivers/usb/storage/initializers.h4
-rw-r--r--drivers/usb/storage/uas.c124
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h329
-rw-r--r--drivers/usb/storage/usb.c15
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c6
-rw-r--r--drivers/uwb/lc-rc.c21
-rw-r--r--drivers/vfio/pci/vfio_pci.c83
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c4
-rw-r--r--drivers/vfio/vfio.c34
-rw-r--r--drivers/vhost/Kconfig4
-rw-r--r--drivers/vhost/Kconfig.tcm4
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/tcm_vhost.c8
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/Kconfig66
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/acornfb.c22
-rw-r--r--drivers/video/arcfb.c10
-rw-r--r--drivers/video/arkfb.c10
-rw-r--r--drivers/video/asiliantfb.c18
-rw-r--r--drivers/video/aty/aty128fb.c57
-rw-r--r--drivers/video/aty/atyfb_base.c84
-rw-r--r--drivers/video/aty/mach64_ct.c6
-rw-r--r--drivers/video/aty/mach64_cursor.c2
-rw-r--r--drivers/video/aty/radeon_base.c20
-rw-r--r--drivers/video/aty/radeon_monitor.c24
-rw-r--r--drivers/video/au1100fb.c6
-rw-r--r--drivers/video/au1200fb.c6
-rw-r--r--drivers/video/auo_k1900fb.c6
-rw-r--r--drivers/video/auo_k1901fb.c6
-rw-r--r--drivers/video/auo_k190x.c7
-rw-r--r--drivers/video/backlight/88pm860x_bl.c23
-rw-r--r--drivers/video/backlight/Kconfig24
-rw-r--r--drivers/video/backlight/Makefile89
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ams369fg06.c104
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/as3711_bl.c380
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c7
-rw-r--r--drivers/video/backlight/backlight.c29
-rw-r--r--drivers/video/backlight/corgi_lcd.c38
-rw-r--r--drivers/video/backlight/da903x_bl.c15
-rw-r--r--drivers/video/backlight/da9052_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c4
-rw-r--r--drivers/video/backlight/hp680_bl.c4
-rw-r--r--drivers/video/backlight/hx8357.c497
-rw-r--r--drivers/video/backlight/ili9320.c14
-rw-r--r--drivers/video/backlight/ili9320.h2
-rw-r--r--drivers/video/backlight/jornada720_bl.c31
-rw-r--r--drivers/video/backlight/l4f00242t03.c39
-rw-r--r--drivers/video/backlight/lcd.c8
-rw-r--r--drivers/video/backlight/ld9040.c109
-rw-r--r--drivers/video/backlight/lm3630_bl.c6
-rw-r--r--drivers/video/backlight/lm3639_bl.c7
-rw-r--r--drivers/video/backlight/lms283gf05.c21
-rw-r--r--drivers/video/backlight/lms501kf03.c441
-rw-r--r--drivers/video/backlight/locomolcd.c38
-rw-r--r--drivers/video/backlight/lp855x_bl.c218
-rw-r--r--drivers/video/backlight/ltv350qv.c10
-rw-r--r--drivers/video/backlight/max8925_bl.c11
-rw-r--r--drivers/video/backlight/omap1_bl.c14
-rw-r--r--drivers/video/backlight/ot200_bl.c1
-rw-r--r--drivers/video/backlight/pandora_bl.c8
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c8
-rw-r--r--drivers/video/backlight/platform_lcd.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c8
-rw-r--r--drivers/video/backlight/s6e63m0.c155
-rw-r--r--drivers/video/backlight/tdo24m.c43
-rw-r--r--drivers/video/backlight/tosa_bl.c9
-rw-r--r--drivers/video/backlight/tosa_lcd.c36
-rw-r--r--drivers/video/backlight/vgg2432a4.c19
-rw-r--r--drivers/video/bf537-lq035.c18
-rw-r--r--drivers/video/bf54x-lq043fb.c6
-rw-r--r--drivers/video/bfin-lq035q1-fb.c14
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c6
-rw-r--r--drivers/video/bfin_adv7393fb.c10
-rw-r--r--drivers/video/broadsheetfb.c16
-rw-r--r--drivers/video/bw2.c23
-rw-r--r--drivers/video/carminefb.c16
-rw-r--r--drivers/video/cg14.c12
-rw-r--r--drivers/video/cg3.c26
-rw-r--r--drivers/video/cg6.c12
-rw-r--r--drivers/video/chipsfb.c13
-rw-r--r--drivers/video/cirrusfb.c44
-rw-r--r--drivers/video/clps711xfb.c9
-rw-r--r--drivers/video/cobalt_lcdfb.c8
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c10
-rw-r--r--drivers/video/console/newport_con.c11
-rw-r--r--drivers/video/console/softcursor.c3
-rw-r--r--drivers/video/console/sticore.c83
-rw-r--r--drivers/video/cyber2000fb.c23
-rw-r--r--drivers/video/da8xx-fb.c180
-rw-r--r--drivers/video/dnfb.c6
-rw-r--r--drivers/video/efifb.c4
-rw-r--r--drivers/video/ep93xx-fb.c8
-rw-r--r--drivers/video/exynos/exynos_dp_core.c716
-rw-r--r--drivers/video/exynos/exynos_dp_core.h21
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c77
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h3
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c74
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c1
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_lowlevel.c1
-rw-r--r--drivers/video/exynos/s6e8ax0.c14
-rw-r--r--drivers/video/ffb.c6
-rw-r--r--drivers/video/fm2fb.c14
-rw-r--r--drivers/video/fsl-diu-fb.c229
-rw-r--r--drivers/video/gbefb.c24
-rw-r--r--drivers/video/geode/Kconfig14
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/geode/gxfb_core.c20
-rw-r--r--drivers/video/geode/lxfb_core.c20
-rw-r--r--drivers/video/goldfishfb.c318
-rw-r--r--drivers/video/grvga.c12
-rw-r--r--drivers/video/gxt4500.c28
-rw-r--r--drivers/video/hecubafb.c10
-rw-r--r--drivers/video/hgafb.c12
-rw-r--r--drivers/video/hitfb.c10
-rw-r--r--drivers/video/hpfb.c9
-rw-r--r--drivers/video/i740fb.c15
-rw-r--r--drivers/video/i810/i810_main.c72
-rw-r--r--drivers/video/i810/i810_main.h2
-rw-r--r--drivers/video/igafb.c2
-rw-r--r--drivers/video/imsttfb.c17
-rw-r--r--drivers/video/imxfb.c17
-rw-r--r--drivers/video/intelfb/intelfbdrv.c29
-rw-r--r--drivers/video/jz4740_fb.c14
-rw-r--r--drivers/video/kyro/fbdev.c21
-rw-r--r--drivers/video/leo.c6
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c20
-rw-r--r--drivers/video/mbx/mbxdebugfs.c4
-rw-r--r--drivers/video/mbx/mbxfb.c18
-rw-r--r--drivers/video/metronomefb.c22
-rw-r--r--drivers/video/mmp/Kconfig11
-rw-r--r--drivers/video/mmp/Makefile1
-rw-r--r--drivers/video/mmp/core.c258
-rw-r--r--drivers/video/mmp/fb/Kconfig13
-rw-r--r--drivers/video/mmp/fb/Makefile1
-rw-r--r--drivers/video/mmp/fb/mmpfb.c685
-rw-r--r--drivers/video/mmp/fb/mmpfb.h54
-rw-r--r--drivers/video/mmp/hw/Kconfig20
-rw-r--r--drivers/video/mmp/hw/Makefile2
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c591
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.h1974
-rw-r--r--drivers/video/mmp/hw/mmp_spi.c180
-rw-r--r--drivers/video/mmp/panel/Kconfig6
-rw-r--r--drivers/video/mmp/panel/Makefile1
-rw-r--r--drivers/video/mmp/panel/tpo_tj032md01bw.c186
-rw-r--r--drivers/video/msm/mddi.c9
-rw-r--r--drivers/video/mx3fb.c2
-rw-r--r--drivers/video/mxsfb.c15
-rw-r--r--drivers/video/neofb.c26
-rw-r--r--drivers/video/nuc900fb.c4
-rw-r--r--drivers/video/nvidia/nvidia.c49
-rw-r--r--drivers/video/omap/Kconfig2
-rw-r--r--drivers/video/omap/lcd_mipid.c2
-rw-r--r--drivers/video/omap2/Kconfig7
-rw-r--r--drivers/video/omap2/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c25
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c60
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c40
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c91
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c26
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c45
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c17
-rw-r--r--drivers/video/omap2/displays/panel-taal.c72
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c33
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c24
-rw-r--r--drivers/video/omap2/dss/Kconfig35
-rw-r--r--drivers/video/omap2/dss/Makefile7
-rw-r--r--drivers/video/omap2/dss/apply.c331
-rw-r--r--drivers/video/omap2/dss/core.c72
-rw-r--r--drivers/video/omap2/dss/dispc-compat.c667
-rw-r--r--drivers/video/omap2/dss/dispc-compat.h30
-rw-r--r--drivers/video/omap2/dss/dispc.c1063
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c321
-rw-r--r--drivers/video/omap2/dss/display.c386
-rw-r--r--drivers/video/omap2/dss/dpi.c126
-rw-r--r--drivers/video/omap2/dss/dsi.c247
-rw-r--r--drivers/video/omap2/dss/dss.c101
-rw-r--r--drivers/video/omap2/dss/dss.h124
-rw-r--r--drivers/video/omap2/dss/dss_features.c16
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c165
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c82
-rw-r--r--drivers/video/omap2/dss/manager.c39
-rw-r--r--drivers/video/omap2/dss/output.c90
-rw-r--r--drivers/video/omap2/dss/overlay.c17
-rw-r--r--drivers/video/omap2/dss/rfbi.c23
-rw-r--r--drivers/video/omap2/dss/sdi.c11
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h3
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c11
-rw-r--r--drivers/video/omap2/dss/venc.c11
-rw-r--r--drivers/video/omap2/dss/venc_panel.c19
-rw-r--r--drivers/video/omap2/omapfb/Kconfig1
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c46
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c204
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c4
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h20
-rw-r--r--drivers/video/omap2/vram.c514
-rw-r--r--drivers/video/omap2/vrfb.c9
-rw-r--r--drivers/video/p9100.c6
-rw-r--r--drivers/video/platinumfb.c11
-rw-r--r--drivers/video/pm2fb.c17
-rw-r--r--drivers/video/pm3fb.c17
-rw-r--r--drivers/video/pmag-ba-fb.c6
-rw-r--r--drivers/video/pmagb-b-fb.c12
-rw-r--r--drivers/video/ps3fb.c4
-rw-r--r--drivers/video/pvr2fb.c28
-rw-r--r--drivers/video/pxa168fb.c8
-rw-r--r--drivers/video/pxa3xx-gcu.c8
-rw-r--r--drivers/video/pxafb.c33
-rw-r--r--drivers/video/q40fb.c6
-rw-r--r--drivers/video/riva/fbdev.c45
-rw-r--r--drivers/video/riva/rivafb-i2c.c9
-rw-r--r--drivers/video/s1d13xxxfb.c10
-rw-r--r--drivers/video/s3c-fb.c46
-rw-r--r--drivers/video/s3c2410fb.c16
-rw-r--r--drivers/video/s3fb.c16
-rw-r--r--drivers/video/sa1100fb.c8
-rw-r--r--drivers/video/savage/savagefb_driver.c23
-rw-r--r--drivers/video/sgivwfb.c12
-rw-r--r--drivers/video/sh7760fb.c6
-rw-r--r--drivers/video/sh_mipi_dsi.c73
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c92
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sis/sis_main.c140
-rw-r--r--drivers/video/sis/sis_main.h20
-rw-r--r--drivers/video/skeletonfb.c17
-rw-r--r--drivers/video/sm501fb.c16
-rw-r--r--drivers/video/ssd1307fb.c397
-rw-r--r--drivers/video/sstfb.c33
-rw-r--r--drivers/video/sunxvr1000.c10
-rw-r--r--drivers/video/sunxvr2500.c12
-rw-r--r--drivers/video/sunxvr500.c12
-rw-r--r--drivers/video/tcx.c6
-rw-r--r--drivers/video/tdfxfb.c30
-rw-r--r--drivers/video/tgafb.c45
-rw-r--r--drivers/video/tmiofb.c8
-rw-r--r--drivers/video/tridentfb.c28
-rw-r--r--drivers/video/uvesafb.c74
-rw-r--r--drivers/video/vermilion/vermilion.c7
-rw-r--r--drivers/video/vfb.c6
-rw-r--r--drivers/video/vga16fb.c10
-rw-r--r--drivers/video/via/dvi.c10
-rw-r--r--drivers/video/via/dvi.h4
-rw-r--r--drivers/video/via/hw.c16
-rw-r--r--drivers/video/via/hw.h4
-rw-r--r--drivers/video/via/lcd.c10
-rw-r--r--drivers/video/via/lcd.h6
-rw-r--r--drivers/video/via/via-core.c19
-rw-r--r--drivers/video/via/via-gpio.c2
-rw-r--r--drivers/video/via/viafbdev.c12
-rw-r--r--drivers/video/vt8500lcdfb.c6
-rw-r--r--drivers/video/vt8623fb.c8
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/wm8505fb.c6
-rw-r--r--drivers/video/wmt_ge_rops.c6
-rw-r--r--drivers/video/xen-fbfront.c7
-rw-r--r--drivers/video/xilinxfb.c8
-rw-r--r--drivers/virt/fsl_hypervisor.c3
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/virtio/virtio.c30
-rw-r--r--drivers/virtio/virtio_balloon.c11
-rw-r--r--drivers/virtio/virtio_mmio.c36
-rw-r--r--drivers/virtio/virtio_pci.c28
-rw-r--r--drivers/virtio/virtio_ring.c46
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/vlynq/vlynq.c6
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/Kconfig1
-rw-r--r--drivers/w1/masters/ds1wm.c52
-rw-r--r--drivers/w1/masters/ds2482.c51
-rw-r--r--drivers/w1/masters/mxc_w1.c51
-rw-r--r--drivers/w1/masters/omap_hdq.c8
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--drivers/watchdog/Kconfig18
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c8
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c12
-rw-r--r--drivers/watchdog/at91sam9_wdt.c13
-rw-r--r--drivers/watchdog/ath79_wdt.c13
-rw-r--r--drivers/watchdog/coh901327_wdt.c12
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c4
-rw-r--r--drivers/watchdog/da9055_wdt.c211
-rw-r--r--drivers/watchdog/davinci_wdt.c11
-rw-r--r--drivers/watchdog/dw_wdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c20
-rw-r--r--drivers/watchdog/jz4740_wdt.c6
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/max63xx_wdt.c7
-rw-r--r--drivers/watchdog/mpcore_wdt.c19
-rw-r--r--drivers/watchdog/omap_wdt.c312
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c6
-rw-r--r--drivers/watchdog/s3c2410_wdt.c6
-rw-r--r--drivers/watchdog/sp5100_tco.c321
-rw-r--r--drivers/watchdog/sp5100_tco.h46
-rw-r--r--drivers/watchdog/sp805_wdt.c11
-rw-r--r--drivers/watchdog/twl4030_wdt.c196
-rw-r--r--drivers/watchdog/txx9wdt.c19
-rw-r--r--drivers/xen/cpu_hotplug.c4
-rw-r--r--drivers/xen/events.c11
-rw-r--r--drivers/xen/gntdev.c130
-rw-r--r--drivers/xen/grant-table.c50
-rw-r--r--drivers/xen/pcpu.c3
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/privcmd.c89
-rw-r--r--drivers/xen/swiotlb-xen.c29
-rw-r--r--drivers/xen/xen-acpi-pad.c3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c13
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c14
-rw-r--r--drivers/zorro/zorro-driver.c4
5245 files changed, 389869 insertions, 158609 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index f5fb0722a63a..202fa6d051b9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,6 +134,8 @@ source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
+source "drivers/mailbox/Kconfig"
+
source "drivers/iommu/Kconfig"
source "drivers/remoteproc/Kconfig"
@@ -150,6 +152,8 @@ source "drivers/memory/Kconfig"
source "drivers/iio/Kconfig"
+source "drivers/ntb/Kconfig"
+
source "drivers/vme/Kconfig"
source "drivers/pwm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7863b9fee50b..dce39a95fa71 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_PNP) += pnp/
obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
-obj-$(CONFIG_DMA_ENGINE) += dma/
+obj-$(CONFIG_DMADEVICES) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -130,6 +130,7 @@ obj-y += platform/
#common clk code
obj-y += clk/
+obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
@@ -146,3 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
+obj-$(CONFIG_NTB) += ntb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 0300bf612946..1a4ed64586a7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -176,7 +176,6 @@ config ACPI_FAN
config ACPI_DOCK
bool "Dock"
- depends on EXPERIMENTAL
help
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
@@ -202,7 +201,7 @@ config ACPI_PROCESSOR
the module will be called processor.
config ACPI_IPMI
tristate "IPMI"
- depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+ depends on IPMI_SI && IPMI_HANDLER
default n
help
This driver enables the ACPI to access the BMC controller. And it
@@ -214,14 +213,13 @@ config ACPI_IPMI
config ACPI_HOTPLUG_CPU
bool
- depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
+ depends on ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
default y
config ACPI_PROCESSOR_AGGREGATOR
tristate "Processor Aggregator"
depends on ACPI_PROCESSOR
- depends on EXPERIMENTAL
depends on X86
help
ACPI 4.0 defines processor Aggregator, which enables OS to perform
@@ -267,6 +265,15 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
+config ACPI_INITRD_TABLE_OVERRIDE
+ bool "ACPI tables can be passed via uncompressed cpio in initrd"
+ default n
+ help
+ This option provides functionality to override arbitrary ACPI tables
+ via initrd. No functional change if no ACPI tables are passed via
+ initrd, therefore it's safe to say Y.
+ See Documentation/acpi/initrd_table_override.txt for details
+
config ACPI_BLACKLIST_YEAR
int "Disable ACPI for systems before Jan 1st this year" if X86_32
default 0
@@ -328,8 +335,7 @@ config X86_PM_TIMER
systems require this timer.
config ACPI_CONTAINER
- tristate "Container and Module Devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
help
This driver supports ACPI Container and Module devices (IDs
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 2a4502becd13..474fcfeba66c 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,7 +37,8 @@ acpi-y += resource.o
acpi-y += processor_core.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
-acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
+acpi-y += pci_root.o pci_link.o pci_irq.o
+acpi-y += csrt.o
acpi-y += acpi_platform.o
acpi-y += power.o
acpi-y += event.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index d5fdd36190cc..6d5bf649196d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -60,7 +60,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
#endif
static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device, int type);
+static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
@@ -337,7 +337,7 @@ static int acpi_ac_resume(struct device *dev)
}
#endif
-static int acpi_ac_remove(struct acpi_device *device, int type)
+static int acpi_ac_remove(struct acpi_device *device)
{
struct acpi_ac *ac = NULL;
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index eb30e5ab4cab..da1f82b445e0 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
#define MEMORY_POWER_OFF_STATE 2
static int acpi_memory_device_add(struct acpi_device *device);
-static int acpi_memory_device_remove(struct acpi_device *device, int type);
+static int acpi_memory_device_remove(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
@@ -153,51 +153,46 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
return 0;
}
-static int
-acpi_memory_get_device(acpi_handle handle,
- struct acpi_memory_device **mem_device)
+static int acpi_memory_get_device(acpi_handle handle,
+ struct acpi_memory_device **mem_device)
{
- acpi_status status;
- acpi_handle phandle;
struct acpi_device *device = NULL;
- struct acpi_device *pdevice = NULL;
- int result;
+ int result = 0;
+ acpi_scan_lock_acquire();
- if (!acpi_bus_get_device(handle, &device) && device)
+ acpi_bus_get_device(handle, &device);
+ if (device)
goto end;
- status = acpi_get_parent(handle, &phandle);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Cannot find acpi parent"));
- return -EINVAL;
- }
-
- /* Get the parent device */
- result = acpi_bus_get_device(phandle, &pdevice);
- if (result) {
- acpi_handle_warn(phandle, "Cannot get acpi bus device\n");
- return -EINVAL;
- }
-
/*
* Now add the notified device. This creates the acpi_device
* and invokes .add function
*/
- result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
+ result = acpi_bus_scan(handle);
if (result) {
- acpi_handle_warn(handle, "Cannot add acpi bus\n");
- return -EINVAL;
+ acpi_handle_warn(handle, "ACPI namespace scan failed\n");
+ result = -EINVAL;
+ goto out;
+ }
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_warn(handle, "Missing device object\n");
+ result = -EINVAL;
+ goto out;
}
- end:
+ end:
*mem_device = acpi_driver_data(device);
if (!(*mem_device)) {
dev_err(&device->dev, "driver data not found\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto out;
}
- return 0;
+ out:
+ acpi_scan_lock_release();
+ return result;
}
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
@@ -226,16 +221,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
struct acpi_memory_info *info;
int node;
-
- /* Get the range from the _CRS */
- result = acpi_memory_get_device_resources(mem_device);
- if (result) {
- dev_err(&mem_device->device->dev,
- "get_device_resources failed\n");
- mem_device->state = MEMORY_INVALID_STATE;
- return result;
- }
-
node = acpi_get_node(mem_device->device->handle);
/*
* Tell the VM there is more memory here...
@@ -295,9 +280,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- int result = 0;
+ int result = 0, nid;
struct acpi_memory_info *info, *n;
+ nid = acpi_get_node(mem_device->device->handle);
+
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (info->failed)
/* The kernel does not use this memory block */
@@ -310,7 +297,9 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
*/
return -EBUSY;
- result = remove_memory(info->start_addr, info->length);
+ if (nid < 0)
+ nid = memory_add_physaddr_to_nid(info->start_addr);
+ result = remove_memory(nid, info->start_addr, info->length);
if (result)
return result;
@@ -327,6 +316,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
struct acpi_device *device;
struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_status status;
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -342,14 +332,6 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
- if (acpi_memory_check_device(mem_device))
- break;
-
- if (acpi_memory_enable_device(mem_device)) {
- acpi_handle_err(handle,"Cannot enable memory device\n");
- break;
- }
-
ost_code = ACPI_OST_SC_SUCCESS;
break;
@@ -357,29 +339,40 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived EJECT REQUEST notification for device\n"));
+ status = AE_ERROR;
+ acpi_scan_lock_acquire();
+
if (acpi_bus_get_device(handle, &device)) {
acpi_handle_err(handle, "Device doesn't exist\n");
- break;
+ goto unlock;
}
mem_device = acpi_driver_data(device);
if (!mem_device) {
acpi_handle_err(handle, "Driver Data is NULL\n");
- break;
+ goto unlock;
}
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
if (!ej_event) {
pr_err(PREFIX "No memory, dropping EJECT\n");
- break;
+ goto unlock;
}
- ej_event->handle = handle;
+ get_device(&device->dev);
+ ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
- (void *)ej_event);
+ /* The eject is carried out asynchronously. */
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&device->dev);
+ kfree(ej_event);
+ }
- /* eject is performed asynchronously */
- return;
+ unlock:
+ acpi_scan_lock_release();
+ if (ACPI_SUCCESS(status))
+ return;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
@@ -390,7 +383,6 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
- return;
}
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
@@ -445,7 +437,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
return result;
}
-static int acpi_memory_device_remove(struct acpi_device *device, int type)
+static int acpi_memory_device_remove(struct acpi_device *device)
{
struct acpi_memory_device *mem_device = NULL;
int result;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 16fa979f7180..31de1043eea0 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -482,8 +482,7 @@ static int acpi_pad_add(struct acpi_device *device)
return 0;
}
-static int acpi_pad_remove(struct acpi_device *device,
- int type)
+static int acpi_pad_remove(struct acpi_device *device)
{
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(0);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index db129b9f52cb..26fce4b8a632 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -13,6 +13,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,18 +22,59 @@
ACPI_MODULE_NAME("platform");
+/* Flags for acpi_create_platform_device */
+#define ACPI_PLATFORM_CLK BIT(0)
+
+/*
+ * The following ACPI IDs are known to be suitable for representing as
+ * platform devices.
+ */
+static const struct acpi_device_id acpi_platform_device_ids[] = {
+
+ { "PNP0D40" },
+
+ /* Haswell LPSS devices */
+ { "INT33C0", ACPI_PLATFORM_CLK },
+ { "INT33C1", ACPI_PLATFORM_CLK },
+ { "INT33C2", ACPI_PLATFORM_CLK },
+ { "INT33C3", ACPI_PLATFORM_CLK },
+ { "INT33C4", ACPI_PLATFORM_CLK },
+ { "INT33C5", ACPI_PLATFORM_CLK },
+ { "INT33C6", ACPI_PLATFORM_CLK },
+ { "INT33C7", ACPI_PLATFORM_CLK },
+
+ { }
+};
+
+static int acpi_create_platform_clks(struct acpi_device *adev)
+{
+ static struct platform_device *pdev;
+
+ /* Create Lynxpoint LPSS clocks */
+ if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
+ pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @id: ACPI device ID used to match @adev.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
* resources and returns a pointer to it. Otherwise, return %NULL.
*
- * The platform device's name will be taken from the @adev's _HID and _UID.
+ * Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+static int acpi_create_platform_device(struct acpi_device *adev,
+ const struct acpi_device_id *id)
{
+ unsigned long flags = id->driver_data;
struct platform_device *pdev = NULL;
struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
@@ -41,20 +83,28 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
struct resource *resources;
int count;
+ if (flags & ACPI_PLATFORM_CLK) {
+ int ret = acpi_create_platform_clks(adev);
+ if (ret) {
+ dev_err(&adev->dev, "failed to create clocks\n");
+ return ret;
+ }
+ }
+
/* If the ACPI node already has a physical device attached, skip it. */
if (adev->physical_node_count)
- return NULL;
+ return 0;
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count <= 0)
- return NULL;
+ return 0;
resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list);
- return NULL;
+ return -ENOMEM;
}
count = 0;
list_for_each_entry(rentry, &resource_list, node)
@@ -100,5 +150,15 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
}
kfree(resources);
- return pdev;
+ return 1;
+}
+
+static struct acpi_scan_handler platform_handler = {
+ .ids = acpi_platform_device_ids,
+ .attach = acpi_create_platform_device,
+};
+
+void __init acpi_platform_init(void)
+{
+ acpi_scan_add_handler(&platform_handler);
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index c8bc24bd1f72..a1b9bf5085a2 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -31,6 +31,7 @@ acpi-y += \
evgpeinit.o \
evgpeutil.o \
evglock.o \
+ evhandler.o \
evmisc.o \
evregion.o \
evrgnini.o \
@@ -90,6 +91,7 @@ acpi-y += \
nsobject.o \
nsparse.o \
nspredef.o \
+ nsprepkg.o \
nsrepair.o \
nsrepair2.o \
nssearch.o \
@@ -104,7 +106,9 @@ acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
acpi-y += \
psargs.o \
psloop.o \
+ psobject.o \
psopcode.o \
+ psopinfo.o \
psparse.o \
psscope.o \
pstree.o \
@@ -126,7 +130,7 @@ acpi-y += \
rsutils.o \
rsxface.o
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
+acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
acpi-y += \
tbfadt.o \
@@ -155,12 +159,14 @@ acpi-y += \
utmutex.o \
utobject.o \
utosi.o \
+ utownerid.o \
utresrc.o \
utstate.o \
+ utstring.o \
utxface.o \
utxfinit.o \
utxferror.o \
utxfmutex.o
-acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o
+acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8a7d51bfb3b3..8a6c4a0d22db 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
*
* Note: The order of these include files is important.
*/
+#include <acpi/acconfig.h> /* Global configuration constants */
#include "acmacros.h" /* C macros */
#include "aclocal.h" /* Internal data types */
#include "acobject.h" /* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 432a318c9ed1..9feba08c29fe 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,21 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
char *block_arg))
/*
+ * dbconvert - miscellaneous conversion routines
+ */
+ acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+
+acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
+
+acpi_status
+acpi_db_convert_to_object(acpi_object_type type,
+ char *string, union acpi_object *object);
+
+u8 *acpi_db_encode_pld_buffer(struct acpi_pld_info *pld_info);
+
+void acpi_db_dump_pld_buffer(union acpi_object *obj_desc);
+
+/*
* dbmethod - control method commands
*/
void
@@ -191,6 +206,8 @@ void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
+void acpi_db_delete_objects(u32 count, union acpi_object *objects);
+
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
#endif
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index ed33ebcdaebe..427db72a6302 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index e975c6720448..ab0e97710381 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -158,10 +158,23 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *context);
/*
- * evregion - Address Space handling
+ * evhandler - Address space handling
*/
+u8
+acpi_ev_has_default_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status acpi_ev_install_region_handlers(void);
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context);
+
+/*
+ * evregion - Operation region support
+ */
acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
@@ -180,12 +193,6 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id,
- acpi_adr_space_handler handler,
- acpi_adr_space_setup setup, void *context);
-
-acpi_status
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 64472e4ec329..ecb49927b817 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,14 +192,6 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
-/* Mutex for _OSI support */
-
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
-
-/* Reader/Writer lock is used for namespace walk and dynamic table unload */
-
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
-
/*****************************************************************************
*
* Mutual exclusion within ACPICA subsystem
@@ -233,6 +225,14 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+/* Mutex for _OSI support */
+
+ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+
+/* Reader/Writer lock is used for namespace walk and dynamic table unload */
+
+ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+
/*****************************************************************************
*
* Miscellaneous globals
@@ -252,7 +252,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
+ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
@@ -304,6 +304,7 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
+ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
#endif
/*****************************************************************************
@@ -365,19 +366,18 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
*
****************************************************************************/
-extern struct acpi_fixed_event_info
- acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_fixed_event_handler
- acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
-*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-
#if (!ACPI_REDUCED_HARDWARE)
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
+ACPI_EXTERN struct acpi_gpe_block_info
+ *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
+ACPI_EXTERN struct acpi_fixed_event_handler
+ acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+extern struct acpi_fixed_event_info
+ acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -405,7 +405,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
/*****************************************************************************
*
- * Debugger globals
+ * Debugger and Disassembler globals
*
****************************************************************************/
@@ -413,8 +413,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
#ifdef ACPI_DISASSEMBLER
+u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+
ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
+ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
#endif
#ifdef ACPI_DEBUGGER
@@ -426,6 +430,7 @@ extern u8 acpi_gbl_db_terminate_threads;
ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
+ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index d902d31abc6c..6357e932bfd9 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index eb308635da72..8af8c9bdeb35 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -458,7 +458,7 @@ void acpi_ex_reacquire_interpreter(void);
void acpi_ex_relinquish_interpreter(void);
-void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
+u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
void acpi_ex_acquire_global_lock(u32 rule);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ff8bd0061e8b..805f419086ab 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -189,11 +189,10 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
-#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
-#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
-#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */
-#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
-#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
+#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
+#define ANOBJ_IS_REFERENCED 0x80 /* iASL only: Object was referenced */
/* Internal ACPI table management - master table list */
@@ -411,11 +410,10 @@ struct acpi_gpe_notify_info {
struct acpi_gpe_notify_info *next;
};
-struct acpi_gpe_notify_object {
- struct acpi_namespace_node *node;
- struct acpi_gpe_notify_object *next;
-};
-
+/*
+ * GPE dispatch info. At any time, the GPE can have at most one type
+ * of dispatch - Method, Handler, or Implicit Notify.
+ */
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
@@ -679,6 +677,8 @@ struct acpi_opcode_info {
u8 type; /* Opcode type */
};
+/* Value associated with the parse object */
+
union acpi_parse_value {
u64 integer; /* Integer constant (Up to 64 bits) */
u32 size; /* bytelist or field size */
@@ -1025,6 +1025,31 @@ struct acpi_port_info {
/*****************************************************************************
*
+ * Disassembler
+ *
+ ****************************************************************************/
+
+struct acpi_external_list {
+ char *path;
+ char *internal_path;
+ struct acpi_external_list *next;
+ u32 value;
+ u16 length;
+ u8 type;
+ u8 flags;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_IPATH_ALLOCATED 0x01
+
+struct acpi_external_file {
+ char *path;
+ struct acpi_external_file *next;
+};
+
+/*****************************************************************************
+ *
* Debugger
*
****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 5efad99f2169..ed7943b9044f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,14 +49,18 @@
* get into potential aligment issues -- see the STORE macros below.
* Use with care.
*/
-#define ACPI_GET8(ptr) *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_GET16(ptr) *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_GET32(ptr) *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_GET64(ptr) *ACPI_CAST_PTR (u64, ptr)
-#define ACPI_SET8(ptr) *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_SET16(ptr) *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_SET32(ptr) *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)
+#define ACPI_CAST8(ptr) ACPI_CAST_PTR (u8, (ptr))
+#define ACPI_CAST16(ptr) ACPI_CAST_PTR (u16, (ptr))
+#define ACPI_CAST32(ptr) ACPI_CAST_PTR (u32, (ptr))
+#define ACPI_CAST64(ptr) ACPI_CAST_PTR (u64, (ptr))
+#define ACPI_GET8(ptr) (*ACPI_CAST8 (ptr))
+#define ACPI_GET16(ptr) (*ACPI_CAST16 (ptr))
+#define ACPI_GET32(ptr) (*ACPI_CAST32 (ptr))
+#define ACPI_GET64(ptr) (*ACPI_CAST64 (ptr))
+#define ACPI_SET8(ptr, val) (*ACPI_CAST8 (ptr) = (u8) (val))
+#define ACPI_SET16(ptr, val) (*ACPI_CAST16 (ptr) = (u16) (val))
+#define ACPI_SET32(ptr, val) (*ACPI_CAST32 (ptr) = (u32) (val))
+#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
* printf() format helpers
@@ -293,6 +297,26 @@
#define ACPI_16BIT_MASK 0x0000FFFF
#define ACPI_24BIT_MASK 0x00FFFFFF
+/* Macros to extract flag bits from position zero */
+
+#define ACPI_GET_1BIT_FLAG(value) ((value) & ACPI_1BIT_MASK)
+#define ACPI_GET_2BIT_FLAG(value) ((value) & ACPI_2BIT_MASK)
+#define ACPI_GET_3BIT_FLAG(value) ((value) & ACPI_3BIT_MASK)
+#define ACPI_GET_4BIT_FLAG(value) ((value) & ACPI_4BIT_MASK)
+
+/* Macros to extract flag bits from position one and above */
+
+#define ACPI_EXTRACT_1BIT_FLAG(field, position) (ACPI_GET_1BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_2BIT_FLAG(field, position) (ACPI_GET_2BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_3BIT_FLAG(field, position) (ACPI_GET_3BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_4BIT_FLAG(field, position) (ACPI_GET_4BIT_FLAG ((field) >> position))
+
+/* ACPI Pathname helpers */
+
+#define ACPI_IS_ROOT_PREFIX(c) ((c) == (u8) 0x5C) /* Backslash */
+#define ACPI_IS_PARENT_PREFIX(c) ((c) == (u8) 0x5E) /* Carat */
+#define ACPI_IS_PATH_SEPARATOR(c) ((c) == (u8) 0x2E) /* Period (dot) */
+
/*
* An object of type struct acpi_namespace_node can appear in some contexts
* where a pointer to an object of type union acpi_operand_object can also
@@ -364,137 +388,6 @@
#endif /* ACPI_NO_ERROR_MESSAGES */
-/*
- * Debug macros that are conditionally compiled
- */
-#ifdef ACPI_DEBUG_OUTPUT
-/*
- * Function entry tracing
- */
-#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
-#define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS, (void *)b)
-#define ACPI_FUNCTION_TRACE_U32(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS, (u32)b)
-#define ACPI_FUNCTION_TRACE_STR(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS, (char *)b)
-
-#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
-
-/*
- * Function exit tracing.
- * WARNING: These macros include a return statement. This is usually considered
- * bad form, but having a separate exit macro is very ugly and difficult to maintain.
- * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
- * so that "_AcpiFunctionName" is defined.
- *
- * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
- * about these constructs.
- */
-#ifdef ACPI_USE_DO_WHILE_0
-#define ACPI_DO_WHILE0(a) do a while(0)
-#else
-#define ACPI_DO_WHILE0(a) a
-#endif
-
-#define return_VOID ACPI_DO_WHILE0 ({ \
- acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
- return;})
-/*
- * There are two versions of most of the return macros. The default version is
- * safer, since it avoids side-effects by guaranteeing that the argument will
- * not be evaluated twice.
- *
- * A less-safe version of the macros is provided for optional use if the
- * compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
- */
-#ifndef ACPI_SIMPLE_RETURN_MACROS
-
-#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
- register acpi_status _s = (s); \
- acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
- return (_s); })
-#define return_PTR(s) ACPI_DO_WHILE0 ({ \
- register void *_s = (void *) (s); \
- acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
- return (_s); })
-#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
- register u64 _s = (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
- return (_s); })
-#define return_UINT8(s) ACPI_DO_WHILE0 ({ \
- register u8 _s = (u8) (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
- return (_s); })
-#define return_UINT32(s) ACPI_DO_WHILE0 ({ \
- register u32 _s = (u32) (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
- return (_s); })
-#else /* Use original less-safe macros */
-
-#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
- return((s)); })
-#define return_PTR(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
- return((s)); })
-#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) (s)); \
- return((s)); })
-#define return_UINT8(s) return_VALUE(s)
-#define return_UINT32(s) return_VALUE(s)
-
-#endif /* ACPI_SIMPLE_RETURN_MACROS */
-
-/* Conditional execution */
-
-#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
-#define _VERBOSE_STRUCTURES
-
-/* Various object display routines for debug */
-
-#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
-#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
-#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
-#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
-#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
-
-#else
-/*
- * This is the non-debug case -- make everything go away,
- * leaving no executable debug code!
- */
-#define ACPI_DEBUG_EXEC(a)
-#define ACPI_DEBUG_ONLY_MEMBERS(a)
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_TRACE_PTR(a, b)
-#define ACPI_FUNCTION_TRACE_U32(a, b)
-#define ACPI_FUNCTION_TRACE_STR(a, b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
-#define ACPI_FUNCTION_ENTRY()
-#define ACPI_DUMP_STACK_ENTRY(a)
-#define ACPI_DUMP_OPERANDS(a, b, c)
-#define ACPI_DUMP_ENTRY(a, b)
-#define ACPI_DUMP_TABLES(a, b)
-#define ACPI_DUMP_PATHNAME(a, b, c, d)
-#define ACPI_DUMP_BUFFER(a, b)
-#define ACPI_DEBUG_PRINT(pl)
-#define ACPI_DEBUG_PRINT_RAW(pl)
-
-#define return_VOID return
-#define return_ACPI_STATUS(s) return(s)
-#define return_VALUE(s) return(s)
-#define return_UINT8(s) return(s)
-#define return_UINT32(s) return(s)
-#define return_PTR(s) return(s)
-
-#endif /* ACPI_DEBUG_OUTPUT */
-
#if (!ACPI_REDUCED_HARDWARE)
#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
#else
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9b19d4b86424..02cd5482ff8b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -218,6 +218,18 @@ acpi_ns_check_parameter_count(char *pathname,
u32 user_param_count,
const union acpi_predefined_info *info);
+acpi_status
+acpi_ns_check_object_type(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr,
+ u32 expected_btypes, u32 package_index);
+
+/*
+ * nsprepkg - Validation of predefined name packages
+ */
+acpi_status
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
/*
* nsnames - Name and Scope manipulation
*/
@@ -333,8 +345,6 @@ acpi_ns_install_node(struct acpi_walk_state *walk_state,
/*
* nsutils - Utility functions
*/
-u8 acpi_ns_valid_root_prefix(char prefix);
-
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
u32 acpi_ns_local(acpi_object_type type);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 24eb9eac9514..cc7ab6dd724e 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,7 +307,7 @@ struct acpi_object_addr_handler {
struct acpi_namespace_node *node; /* Parent device */
void *context;
acpi_adr_space_setup setup;
- union acpi_operand_object *region_list; /* regions using this handler */
+ union acpi_operand_object *region_list; /* Regions using this handler */
union acpi_operand_object *next;
};
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index d786a5128b78..3fc9ca7e8aa3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index eefcf47a61a0..aed319318835 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,7 +105,28 @@ union acpi_parse_object *acpi_ps_find_name(union acpi_parse_object *scope,
union acpi_parse_object *acpi_ps_get_parent(union acpi_parse_object *op);
/*
- * psopcode - AML Opcode information
+ * psobject - support for parse object processing
+ */
+acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op);
+
+acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start, union acpi_parse_object **new_op);
+
+acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status);
+
+acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status);
+
+/*
+ * psopinfo - AML Opcode information
*/
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
@@ -211,8 +232,6 @@ void acpi_ps_free_op(union acpi_parse_object *op);
u8 acpi_ps_is_leading_char(u32 c);
-u8 acpi_ps_is_prefix_char(u32 c);
-
#ifdef ACPI_FUTURE_USAGE
u32 acpi_ps_get_name(union acpi_parse_object *op);
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 9dfa1c83bd4e..752cc40cdc1e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -1,12 +1,11 @@
/******************************************************************************
*
* Name: acpredef - Information table for ACPI predefined methods and objects
- * $Revision: 1.1 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,13 +50,13 @@
*
* 1) PTYPE1 packages do not contain sub-packages.
*
- * ACPI_PTYPE1_FIXED: Fixed length, 1 or 2 object types:
+ * ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
* object type
* count
* object type
* count
*
- * ACPI_PTYPE1_VAR: Variable length:
+ * ACPI_PTYPE1_VAR: Variable-length length:
* object type (Int/Buf/Ref)
*
* ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -85,10 +84,10 @@
* count
* (Used for _CST)
*
- * ACPI_PTYPE2_FIXED: Each subpackage is of fixed length
+ * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length
* (Used for _PRT)
*
- * ACPI_PTYPE2_MIN: Each subpackage has a variable but minimum length
+ * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length
* (Used for _HPX)
*
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
@@ -124,7 +123,8 @@ enum acpi_return_package_types {
* These are the names that can actually be evaluated via acpi_evaluate_object.
* Not present in this table are the following:
*
- * 1) Predefined/Reserved names that are never evaluated via acpi_evaluate_object:
+ * 1) Predefined/Reserved names that are never evaluated via
+ * acpi_evaluate_object:
* _Lxx and _Exx GPE methods
* _Qxx EC methods
* _T_x compiler temporary variables
@@ -149,6 +149,8 @@ enum acpi_return_package_types {
* information about the expected structure of the package. This information
* is saved here (rather than in a separate table) in order to minimize the
* overall size of the stored data.
+ *
+ * Note: The additional braces are intended to promote portability.
*/
static const union acpi_predefined_info predefined_names[] = {
{{"_AC0", 0, ACPI_RTYPE_INTEGER}},
@@ -212,9 +214,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_BCT", 1, ACPI_RTYPE_INTEGER}},
{{"_BDN", 0, ACPI_RTYPE_INTEGER}},
{{"_BFS", 1, 0}},
- {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
- ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
+ {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}},
{{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
@@ -236,7 +237,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
{{"_CDM", 0, ACPI_RTYPE_INTEGER}},
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0,
+ 0}},
{{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
@@ -251,7 +253,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
- {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+ {{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,
+ 0}},
{{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
@@ -342,8 +345,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
- {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (2 Str) */
- {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 2,0}, 0,0}},
+ {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}},
{{"_MSG", 1, 0}},
{{"_MSM", 4, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 0347d0993497..f691d0e4d9fa 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -347,18 +347,21 @@ extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
- * rsdump
+ * rsdumpinfo
*/
extern struct acpi_rsdump_info acpi_rs_dump_irq[];
+extern struct acpi_rsdump_info acpi_rs_dump_prt[];
extern struct acpi_rsdump_info acpi_rs_dump_dma[];
extern struct acpi_rsdump_info acpi_rs_dump_start_dpf[];
extern struct acpi_rsdump_info acpi_rs_dump_end_dpf[];
extern struct acpi_rsdump_info acpi_rs_dump_io[];
+extern struct acpi_rsdump_info acpi_rs_dump_io_flags[];
extern struct acpi_rsdump_info acpi_rs_dump_fixed_io[];
extern struct acpi_rsdump_info acpi_rs_dump_vendor[];
extern struct acpi_rsdump_info acpi_rs_dump_end_tag[];
extern struct acpi_rsdump_info acpi_rs_dump_memory24[];
extern struct acpi_rsdump_info acpi_rs_dump_memory32[];
+extern struct acpi_rsdump_info acpi_rs_dump_memory_flags[];
extern struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[];
extern struct acpi_rsdump_info acpi_rs_dump_address16[];
extern struct acpi_rsdump_info acpi_rs_dump_address32[];
@@ -372,6 +375,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_general_flags[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 937e66c65d1e..7896d85876ca 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 6712965ba8ae..7755e915a007 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index b0f5f92b674a..0082fa0a6139 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -483,39 +483,17 @@ acpi_ut_short_divide(u64 in_dividend,
/*
* utmisc
*/
-void ut_convert_backslashes(char *pathname);
-
const char *acpi_ut_validate_exception(acpi_status status);
u8 acpi_ut_is_pci_root_bridge(char *id);
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
-
acpi_status
acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void *target_object,
acpi_pkg_callback walk_callback, void *context);
-void acpi_ut_strupr(char *src_string);
-
-void acpi_ut_strlwr(char *src_string);
-
-int acpi_ut_stricmp(char *string1, char *string2);
-
-void acpi_ut_print_string(char *string, u8 max_length);
-
-u8 acpi_ut_valid_acpi_name(u32 name);
-
-void acpi_ut_repair_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-
/* Values for Base above (16=Hex, 10=Decimal) */
#define ACPI_ANY_BASE 0
@@ -532,15 +510,25 @@ acpi_ut_display_init_pathname(u8 type,
#endif
/*
+ * utownerid - Support for Table/Method Owner IDs
+ */
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+
+/*
* utresrc
*/
acpi_status
-acpi_ut_walk_aml_resources(u8 *aml,
+acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
+ u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function,
void **context);
-acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index);
+acpi_status
+acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
+ void *aml, u8 *return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -554,6 +542,27 @@ acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
/*
+ * utstring - String and character utilities
+ */
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+
+void acpi_ut_print_string(char *string, u8 max_length);
+
+void ut_convert_backslashes(char *pathname);
+
+u8 acpi_ut_valid_acpi_name(u32 name);
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position);
+
+void acpi_ut_repair_name(char *name);
+
+/*
* utmutex - mutex support
*/
acpi_status acpi_ut_mutex_initialize(void);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index c26f8ff6c3b9..48a3e331b72d 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 968449685e06..87c26366d1df 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,6 +199,12 @@ struct aml_resource_fixed_dma {
struct aml_resource_large_header {
AML_RESOURCE_LARGE_HEADER_COMMON};
+/* General Flags for address space resource descriptors */
+
+#define ACPI_RESOURCE_FLAG_DEC 2
+#define ACPI_RESOURCE_FLAG_MIF 4
+#define ACPI_RESOURCE_FLAG_MAF 8
+
struct aml_resource_memory24 {
AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
u16 minimum;
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index c8b5e2565b98..fb09b08d7080 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 57895db3231a..7ea0f162f11c 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index b5b904ee815f..feadeed1012d 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 87eff701ecfa..bc8e63f7784b 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 52eb4e01622a..a9ffd44c18fe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
#include "acinterp.h"
#include "acnamesp.h"
#ifdef ACPI_DISASSEMBLER
-#include <acpi/acdisasm.h>
+#include "acdisasm.h"
#endif
#define _COMPONENT ACPI_DISPATCHER
@@ -151,6 +151,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(mutex_desc);
return_ACPI_STATUS(status);
}
@@ -378,7 +379,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
info->parameters = &this_walk_state->operands[0];
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 9a83b7e0f3ba..3da80460ce38 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c9f15d3a3686..e20e9f84eee8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -388,7 +388,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *parent;
union acpi_operand_object *obj_desc = NULL;
acpi_status status = AE_OK;
- unsigned i;
+ u32 i;
u16 index;
u16 reference_count;
@@ -525,7 +525,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated",
i, element_count));
} else if (i < element_count) {
/*
@@ -703,7 +703,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Truncate value if we are executing from a 32-bit ACPI table */
#ifndef ACPI_NO_METHOD_EXECUTION
- acpi_ex_truncate_for32bit_table(obj_desc);
+ (void)acpi_ex_truncate_for32bit_table(obj_desc);
#endif
break;
@@ -725,8 +725,18 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case AML_TYPE_LITERAL:
obj_desc->integer.value = op->common.value.integer;
+
#ifndef ACPI_NO_METHOD_EXECUTION
- acpi_ex_truncate_for32bit_table(obj_desc);
+ if (acpi_ex_truncate_for32bit_table(obj_desc)) {
+
+ /* Warn if we found a 64-bit constant in a 32-bit table */
+
+ ACPI_WARNING((AE_INFO,
+ "Truncated 64-bit constant found in 32-bit table: %8.8X%8.8X => %8.8X",
+ ACPI_FORMAT_UINT64(op->common.
+ value.integer),
+ (u32)obj_desc->integer.value));
+ }
#endif
break;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index d09c6b4bab2c..ee6367b8eaf7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -486,18 +486,18 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
/*
- * This is where we evaluate the signature_string and oem_iDString
- * and oem_table_iDString of the data_table_region declaration
+ * This is where we evaluate the Signature string, oem_id string,
+ * and oem_table_id string of the Data Table Region declaration
*/
node = op->common.node;
- /* next_op points to signature_string op */
+ /* next_op points to Signature string op */
next_op = op->common.value.arg;
/*
- * Evaluate/create the signature_string and oem_iDString
- * and oem_table_iDString operands
+ * Evaluate/create the Signature string, oem_id string,
+ * and oem_table_id string operands
*/
status = acpi_ds_create_operands(walk_state, next_op);
if (ACPI_FAILURE(status)) {
@@ -505,8 +505,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
}
/*
- * Resolve the signature_string and oem_iDString
- * and oem_table_iDString operands
+ * Resolve the Signature string, oem_id string,
+ * and oem_table_id string operands
*/
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index afeb99f49482..4d8c992a51d8 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -178,7 +178,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
- return_UINT8(TRUE);
+ return_VALUE(TRUE);
}
/*
@@ -210,7 +210,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/*
@@ -307,7 +307,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_UINT8(TRUE);
+ return_VALUE(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 58593931be96..44f8325c2bae 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -149,7 +149,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
/* Truncate the predicate to 32-bits if necessary */
- acpi_ex_truncate_for32bit_table(local_obj_desc);
+ (void)acpi_ex_truncate_for32bit_table(local_obj_desc);
/*
* Save the result of the predicate evaluation on
@@ -706,7 +706,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* ACPI 2.0 support for 64-bit integers: Truncate numeric
* result value if we are executing from a 32-bit ACPI table
*/
- acpi_ex_truncate_for32bit_table(walk_state->result_obj);
+ (void)acpi_ex_truncate_for32bit_table(walk_state->result_obj);
/*
* Check if we just completed the evaluation of a
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 557510084c7a..6e17c0e24e63 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,7 +50,7 @@
#include "acnamesp.h"
#ifdef ACPI_ASL_COMPILER
-#include <acpi/acdisasm.h>
+#include "acdisasm.h"
#endif
#define _COMPONENT ACPI_DISPATCHER
@@ -178,7 +178,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
+ acpi_dm_add_to_external_list(op, path, ACPI_TYPE_DEVICE,
+ 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 379835748357..4407ff2377d5 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -222,7 +222,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
*/
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) "
- "for Scope operator, changed to type ANY\n",
+ "for Scope operator, changed to type ANY",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index f6c4295470ae..d67891de1b54 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3e65a15a735f..ecb12e2137ff 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d4acfbbe5b29..b8ea0b26cde3 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index af14a7137632..a621481c6cf2 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 36d120574423..b9adb9a7ed85 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -561,8 +561,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = AE_NO_MEMORY;
} else {
/*
- * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
- * control method that corresponds to this GPE
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
+ * _Lxx/_Exx control method that corresponds to this GPE
*/
info->prefix_node =
local_gpe_event_info->dispatch.method_node;
@@ -707,7 +707,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE%02X", gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
}
}
@@ -724,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE%02X", gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
@@ -765,7 +765,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE%2X - event disabled",
+ "Unable to queue handler for GPE%02X - event disabled",
gpe_number));
}
break;
@@ -784,7 +784,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
break;
}
- return_UINT32(ACPI_INTERRUPT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_HANDLED);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 1571a61a7833..a2d688bbac02 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -405,13 +405,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
(*return_gpe_block) = gpe_block;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
- (u32) gpe_block->block_base_number,
- (u32) (gpe_block->block_base_number +
- (gpe_block->gpe_count - 1)),
- gpe_device->name.ascii, gpe_block->register_count,
- interrupt_number));
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n",
+ (u32)gpe_block->block_base_number,
+ (u32)(gpe_block->block_base_number +
+ (gpe_block->gpe_count - 1)),
+ gpe_device->name.ascii, gpe_block->register_count,
+ interrupt_number));
/* Update global count of currently available GPEs */
@@ -496,9 +496,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
}
if (gpe_enabled_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Enabled %u GPEs in this block\n",
- gpe_enabled_count));
+ ACPI_INFO((AE_INFO,
+ "Enabled %u GPEs in block %02X to %02X",
+ gpe_enabled_count, (u32)gpe_block->block_base_number,
+ (u32)(gpe_block->block_base_number +
+ (gpe_block->gpe_count - 1))));
}
gpe_block->initialized = TRUE;
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index da0add858f81..72b8f6b3f4ca 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,9 @@ acpi_status acpi_ev_gpe_initialize(void)
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Initializing General Purpose Events (GPEs):\n"));
+
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 228a0c3b1d49..b24dbb80fab8 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
new file mode 100644
index 000000000000..d4f83112c2e2
--- /dev/null
+++ b/drivers/acpi/acpica/evhandler.c
@@ -0,0 +1,529 @@
+/******************************************************************************
+ *
+ * Module Name: evhandler - Support for Address Space handlers
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evhandler")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/* These are the address spaces that will get default handlers */
+
+u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
+ ACPI_ADR_SPACE_SYSTEM_MEMORY,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ ACPI_ADR_SPACE_PCI_CONFIG,
+ ACPI_ADR_SPACE_DATA_TABLE
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_region_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs the core subsystem default address space handlers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_region_handlers(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * All address spaces (PCI Config, EC, SMBus) are scope dependent and
+ * registration must occur for a specific device.
+ *
+ * In the case of the system memory and IO address spaces there is
+ * currently no device associated with the address space. For these we
+ * use the root.
+ *
+ * We install the default PCI config space handler at the root so that
+ * this space is immediately available even though the we have not
+ * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
+ * specification which states that the PCI config space must be always
+ * available -- even though we are nowhere near ready to find the PCI root
+ * buses at this point.
+ *
+ * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
+ * has already been installed (via acpi_install_address_space_handler).
+ * Similar for AE_SAME_HANDLER.
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+ status = acpi_ev_install_space_handler(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i],
+ ACPI_DEFAULT_HANDLER,
+ NULL, NULL);
+ switch (status) {
+ case AE_OK:
+ case AE_SAME_HANDLER:
+ case AE_ALREADY_EXISTS:
+
+ /* These exceptions are all OK */
+
+ status = AE_OK;
+ break;
+
+ default:
+
+ goto unlock_and_exit;
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_has_default_handler
+ *
+ * PARAMETERS: node - Namespace node for the device
+ * space_id - The address space ID
+ *
+ * RETURN: TRUE if default handler is installed, FALSE otherwise
+ *
+ * DESCRIPTION: Check if the default handler is installed for the requested
+ * space ID.
+ *
+ ******************************************************************************/
+
+u8
+acpi_ev_has_default_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+
+ /* Must have an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the linked list of handlers for this object */
+
+ while (handler_obj) {
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
+ return (TRUE);
+ }
+ }
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_handler
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: This routine installs an address handler into objects that are
+ * of type Region or Device.
+ *
+ * If the Object is a Device, and the device has a handler of
+ * the same type then the search is terminated in that branch.
+ *
+ * This is because the existing handler is closer in proximity
+ * to any more regions than the one we are trying to install.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *next_handler_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ev_install_handler);
+
+ handler_obj = (union acpi_operand_object *)context;
+
+ /* Parameter validation */
+
+ if (!handler_obj) {
+ return (AE_OK);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions and objects that are allowed to have
+ * address space handlers
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Devices are handled different than regions */
+
+ if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
+
+ /* Check if this Device already has a handler for this address space */
+
+ next_handler_obj = obj_desc->device.handler;
+ while (next_handler_obj) {
+
+ /* Found a handler, is it for the same address space? */
+
+ if (next_handler_obj->address_space.space_id ==
+ handler_obj->address_space.space_id) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler for region [%s] in device %p(%p) "
+ "handler %p\n",
+ acpi_ut_get_region_name
+ (handler_obj->address_space.
+ space_id), obj_desc,
+ next_handler_obj,
+ handler_obj));
+
+ /*
+ * Since the object we found it on was a device, then it
+ * means that someone has already installed a handler for
+ * the branch of the namespace from this device on. Just
+ * bail out telling the walk routine to not traverse this
+ * branch. This preserves the scoping rule for handlers.
+ */
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Walk the linked list of handlers attached to this device */
+
+ next_handler_obj = next_handler_obj->address_space.next;
+ }
+
+ /*
+ * As long as the device didn't have a handler for this space we
+ * don't care about it. We just ignore it and proceed.
+ */
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
+
+ /* This region is for a different address space, just ignore it */
+
+ return (AE_OK);
+ }
+
+ /*
+ * Now we have a region and it is for the handler's address space type.
+ *
+ * First disconnect region for any previous handler (if any)
+ */
+ acpi_ev_detach_region(obj_desc, FALSE);
+
+ /* Connect the region to the new handler */
+
+ status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_space_handler
+ *
+ * PARAMETERS: node - Namespace node for the device
+ * space_id - The address space ID
+ * handler - Address of the handler
+ * setup - Address of the setup function
+ * context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ * Assumes namespace is locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ acpi_status status;
+ acpi_object_type type;
+ u8 flags = 0;
+
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
+
+ /*
+ * This registration is valid for only the types below and the root. This
+ * is where the default handlers get placed.
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if (handler == ACPI_DEFAULT_HANDLER) {
+ flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ handler = acpi_ex_system_memory_space_handler;
+ setup = acpi_ev_system_memory_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ handler = acpi_ex_system_io_space_handler;
+ setup = acpi_ev_io_space_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ handler = acpi_ex_pci_config_space_handler;
+ setup = acpi_ev_pci_config_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_CMOS:
+ handler = acpi_ex_cmos_space_handler;
+ setup = acpi_ev_cmos_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ handler = acpi_ex_pci_bar_space_handler;
+ setup = acpi_ev_pci_bar_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ handler = acpi_ex_data_table_space_handler;
+ setup = NULL;
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* If the caller hasn't specified a setup routine, use the default */
+
+ if (!setup) {
+ setup = acpi_ev_default_region_setup;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ /*
+ * The attached device object already exists. Make sure the handler
+ * is not already installed.
+ */
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the handler list for this device */
+
+ while (handler_obj) {
+
+ /* Same space_id indicates a handler already installed */
+
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler ==
+ handler) {
+ /*
+ * It is (relatively) OK to attempt to install the SAME
+ * handler twice. This can easily happen with the
+ * PCI_Config space.
+ */
+ status = AE_SAME_HANDLER;
+ goto unlock_and_exit;
+ } else {
+ /* A handler is already installed */
+
+ status = AE_ALREADY_EXISTS;
+ }
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Creating object on Device %p while installing handler\n",
+ node));
+
+ /* obj_desc does not exist, create one */
+
+ if (node->type == ACPI_TYPE_ANY) {
+ type = ACPI_TYPE_DEVICE;
+ } else {
+ type = node->type;
+ }
+
+ obj_desc = acpi_ut_create_internal_object(type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init new descriptor */
+
+ obj_desc->common.type = (u8)type;
+
+ /* Attach the new object to the Node */
+
+ status = acpi_ns_attach_object(node, obj_desc, type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ acpi_ut_get_node_name(node), node, obj_desc));
+
+ /*
+ * Install the handler
+ *
+ * At this point there is no existing handler. Just allocate the object
+ * for the handler and link it into the list.
+ */
+ handler_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
+ if (!handler_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init handler obj */
+
+ handler_obj->address_space.space_id = (u8)space_id;
+ handler_obj->address_space.handler_flags = flags;
+ handler_obj->address_space.region_list = NULL;
+ handler_obj->address_space.node = node;
+ handler_obj->address_space.handler = handler;
+ handler_obj->address_space.context = context;
+ handler_obj->address_space.setup = setup;
+
+ /* Install at head of Device.address_space list */
+
+ handler_obj->address_space.next = obj_desc->device.handler;
+
+ /*
+ * The Device object is the first reference on the handler_obj.
+ * Each region that uses the handler adds a reference.
+ */
+ obj_desc->device.handler = handler_obj;
+
+ /*
+ * Walk the namespace finding all of the regions this
+ * handler will manage.
+ *
+ * Start at the device and search the branch toward
+ * the leaf nodes until either the leaf is encountered or
+ * a device is detected that has an address handler of the
+ * same type.
+ *
+ * In either case, back up and search down the remainder
+ * of the branch
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK,
+ acpi_ev_install_handler, NULL,
+ handler_obj, NULL);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 51f537937c1f..c986b2336b81 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 0cc6a16fedc7..6555e350fc1f 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -1,11 +1,11 @@
/******************************************************************************
*
- * Module Name: evregion - ACPI address_space (op_region) handler dispatch
+ * Module Name: evregion - Operation Region support
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,10 +50,9 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evregion")
+extern u8 acpi_gbl_default_address_spaces[];
+
/* Local prototypes */
-static u8
-acpi_ev_has_default_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id);
static void acpi_ev_orphan_ec_reg_method(void);
@@ -61,135 +60,6 @@ static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
- u32 level, void *context, void **return_value);
-
-/* These are the address spaces that will get default handlers */
-
-#define ACPI_NUM_DEFAULT_SPACES 4
-
-static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
- ACPI_ADR_SPACE_SYSTEM_MEMORY,
- ACPI_ADR_SPACE_SYSTEM_IO,
- ACPI_ADR_SPACE_PCI_CONFIG,
- ACPI_ADR_SPACE_DATA_TABLE
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_install_region_handlers
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Installs the core subsystem default address space handlers.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_region_handlers(void)
-{
- acpi_status status;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ev_install_region_handlers);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * All address spaces (PCI Config, EC, SMBus) are scope dependent and
- * registration must occur for a specific device.
- *
- * In the case of the system memory and IO address spaces there is
- * currently no device associated with the address space. For these we
- * use the root.
- *
- * We install the default PCI config space handler at the root so that
- * this space is immediately available even though the we have not
- * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
- * specification which states that the PCI config space must be always
- * available -- even though we are nowhere near ready to find the PCI root
- * buses at this point.
- *
- * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
- * has already been installed (via acpi_install_address_space_handler).
- * Similar for AE_SAME_HANDLER.
- */
- for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
- status = acpi_ev_install_space_handler(acpi_gbl_root_node,
- acpi_gbl_default_address_spaces
- [i],
- ACPI_DEFAULT_HANDLER,
- NULL, NULL);
- switch (status) {
- case AE_OK:
- case AE_SAME_HANDLER:
- case AE_ALREADY_EXISTS:
-
- /* These exceptions are all OK */
-
- status = AE_OK;
- break;
-
- default:
-
- goto unlock_and_exit;
- }
- }
-
- unlock_and_exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_has_default_handler
- *
- * PARAMETERS: node - Namespace node for the device
- * space_id - The address space ID
- *
- * RETURN: TRUE if default handler is installed, FALSE otherwise
- *
- * DESCRIPTION: Check if the default handler is installed for the requested
- * space ID.
- *
- ******************************************************************************/
-
-static u8
-acpi_ev_has_default_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id)
-{
- union acpi_operand_object *obj_desc;
- union acpi_operand_object *handler_obj;
-
- /* Must have an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
- handler_obj = obj_desc->device.handler;
-
- /* Walk the linked list of handlers for this object */
-
- while (handler_obj) {
- if (handler_obj->address_space.space_id == space_id) {
- if (handler_obj->address_space.handler_flags &
- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
- return (TRUE);
- }
- }
-
- handler_obj = handler_obj->address_space.next;
- }
- }
-
- return (FALSE);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize_op_regions
@@ -241,91 +111,6 @@ acpi_status acpi_ev_initialize_op_regions(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ev_execute_reg_method
- *
- * PARAMETERS: region_obj - Region object
- * function - Passed to _REG: On (1) or Off (0)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute _REG method for a region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
-{
- struct acpi_evaluate_info *info;
- union acpi_operand_object *args[3];
- union acpi_operand_object *region_obj2;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_execute_reg_method);
-
- region_obj2 = acpi_ns_get_secondary_object(region_obj);
- if (!region_obj2) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- if (region_obj2->extra.method_REG == NULL) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Allocate and initialize the evaluation information block */
-
- info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
- if (!info) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- info->prefix_node = region_obj2->extra.method_REG;
- info->pathname = NULL;
- info->parameters = args;
- info->flags = ACPI_IGNORE_RETURN_VALUE;
-
- /*
- * The _REG method has two arguments:
- *
- * arg0 - Integer:
- * Operation region space ID Same value as region_obj->Region.space_id
- *
- * arg1 - Integer:
- * connection status 1 for connecting the handler, 0 for disconnecting
- * the handler (Passed as a parameter)
- */
- args[0] =
- acpi_ut_create_integer_object((u64) region_obj->region.space_id);
- if (!args[0]) {
- status = AE_NO_MEMORY;
- goto cleanup1;
- }
-
- args[1] = acpi_ut_create_integer_object((u64) function);
- if (!args[1]) {
- status = AE_NO_MEMORY;
- goto cleanup2;
- }
-
- args[2] = NULL; /* Terminate list */
-
- /* Execute the method, no return value */
-
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, info->prefix_node, NULL));
-
- status = acpi_ns_evaluate(info);
- acpi_ut_remove_reference(args[1]);
-
- cleanup2:
- acpi_ut_remove_reference(args[0]);
-
- cleanup1:
- ACPI_FREE(info);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
@@ -709,351 +494,86 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_install_handler
- *
- * PARAMETERS: walk_namespace callback
- *
- * DESCRIPTION: This routine installs an address handler into objects that are
- * of type Region or Device.
- *
- * If the Object is a Device, and the device has a handler of
- * the same type then the search is terminated in that branch.
- *
- * This is because the existing handler is closer in proximity
- * to any more regions than the one we are trying to install.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
- u32 level, void *context, void **return_value)
-{
- union acpi_operand_object *handler_obj;
- union acpi_operand_object *next_handler_obj;
- union acpi_operand_object *obj_desc;
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_NAME(ev_install_handler);
-
- handler_obj = (union acpi_operand_object *)context;
-
- /* Parameter validation */
-
- if (!handler_obj) {
- return (AE_OK);
- }
-
- /* Convert and validate the device handle */
-
- node = acpi_ns_validate_handle(obj_handle);
- if (!node) {
- return (AE_BAD_PARAMETER);
- }
-
- /*
- * We only care about regions and objects that are allowed to have
- * address space handlers
- */
- if ((node->type != ACPI_TYPE_DEVICE) &&
- (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
- return (AE_OK);
- }
-
- /* Check for an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
-
- /* No object, just exit */
-
- return (AE_OK);
- }
-
- /* Devices are handled different than regions */
-
- if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
-
- /* Check if this Device already has a handler for this address space */
-
- next_handler_obj = obj_desc->device.handler;
- while (next_handler_obj) {
-
- /* Found a handler, is it for the same address space? */
-
- if (next_handler_obj->address_space.space_id ==
- handler_obj->address_space.space_id) {
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Found handler for region [%s] in device %p(%p) "
- "handler %p\n",
- acpi_ut_get_region_name
- (handler_obj->address_space.
- space_id), obj_desc,
- next_handler_obj,
- handler_obj));
-
- /*
- * Since the object we found it on was a device, then it
- * means that someone has already installed a handler for
- * the branch of the namespace from this device on. Just
- * bail out telling the walk routine to not traverse this
- * branch. This preserves the scoping rule for handlers.
- */
- return (AE_CTRL_DEPTH);
- }
-
- /* Walk the linked list of handlers attached to this device */
-
- next_handler_obj = next_handler_obj->address_space.next;
- }
-
- /*
- * As long as the device didn't have a handler for this space we
- * don't care about it. We just ignore it and proceed.
- */
- return (AE_OK);
- }
-
- /* Object is a Region */
-
- if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
-
- /* This region is for a different address space, just ignore it */
-
- return (AE_OK);
- }
-
- /*
- * Now we have a region and it is for the handler's address space type.
- *
- * First disconnect region for any previous handler (if any)
- */
- acpi_ev_detach_region(obj_desc, FALSE);
-
- /* Connect the region to the new handler */
-
- status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
- return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_install_space_handler
+ * FUNCTION: acpi_ev_execute_reg_method
*
- * PARAMETERS: node - Namespace node for the device
- * space_id - The address space ID
- * handler - Address of the handler
- * setup - Address of the setup function
- * context - Value passed to the handler on each access
+ * PARAMETERS: region_obj - Region object
+ * function - Passed to _REG: On (1) or Off (0)
*
* RETURN: Status
*
- * DESCRIPTION: Install a handler for all op_regions of a given space_id.
- * Assumes namespace is locked
+ * DESCRIPTION: Execute _REG method for a region
*
******************************************************************************/
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
- acpi_adr_space_type space_id,
- acpi_adr_space_handler handler,
- acpi_adr_space_setup setup, void *context)
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- union acpi_operand_object *obj_desc;
- union acpi_operand_object *handler_obj;
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
acpi_status status;
- acpi_object_type type;
- u8 flags = 0;
- ACPI_FUNCTION_TRACE(ev_install_space_handler);
-
- /*
- * This registration is valid for only the types below and the root. This
- * is where the default handlers get placed.
- */
- if ((node->type != ACPI_TYPE_DEVICE) &&
- (node->type != ACPI_TYPE_PROCESSOR) &&
- (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
- if (handler == ACPI_DEFAULT_HANDLER) {
- flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- handler = acpi_ex_system_memory_space_handler;
- setup = acpi_ev_system_memory_region_setup;
- break;
-
- case ACPI_ADR_SPACE_SYSTEM_IO:
- handler = acpi_ex_system_io_space_handler;
- setup = acpi_ev_io_space_region_setup;
- break;
-
- case ACPI_ADR_SPACE_PCI_CONFIG:
- handler = acpi_ex_pci_config_space_handler;
- setup = acpi_ev_pci_config_region_setup;
- break;
-
- case ACPI_ADR_SPACE_CMOS:
- handler = acpi_ex_cmos_space_handler;
- setup = acpi_ev_cmos_region_setup;
- break;
-
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- handler = acpi_ex_pci_bar_space_handler;
- setup = acpi_ev_pci_bar_region_setup;
- break;
-
- case ACPI_ADR_SPACE_DATA_TABLE:
- handler = acpi_ex_data_table_space_handler;
- setup = NULL;
- break;
-
- default:
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* If the caller hasn't specified a setup routine, use the default */
-
- if (!setup) {
- setup = acpi_ev_default_region_setup;
+ if (region_obj2->extra.method_REG == NULL) {
+ return_ACPI_STATUS(AE_OK);
}
- /* Check for an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
- /*
- * The attached device object already exists. Make sure the handler
- * is not already installed.
- */
- handler_obj = obj_desc->device.handler;
-
- /* Walk the handler list for this device */
-
- while (handler_obj) {
-
- /* Same space_id indicates a handler already installed */
-
- if (handler_obj->address_space.space_id == space_id) {
- if (handler_obj->address_space.handler ==
- handler) {
- /*
- * It is (relatively) OK to attempt to install the SAME
- * handler twice. This can easily happen with the
- * PCI_Config space.
- */
- status = AE_SAME_HANDLER;
- goto unlock_and_exit;
- } else {
- /* A handler is already installed */
-
- status = AE_ALREADY_EXISTS;
- }
- goto unlock_and_exit;
- }
-
- /* Walk the linked list of handlers */
-
- handler_obj = handler_obj->address_space.next;
- }
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Creating object on Device %p while installing handler\n",
- node));
-
- /* obj_desc does not exist, create one */
-
- if (node->type == ACPI_TYPE_ANY) {
- type = ACPI_TYPE_DEVICE;
- } else {
- type = node->type;
- }
-
- obj_desc = acpi_ut_create_internal_object(type);
- if (!obj_desc) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
-
- /* Init new descriptor */
-
- obj_desc->common.type = (u8) type;
-
- /* Attach the new object to the Node */
-
- status = acpi_ns_attach_object(node, obj_desc, type);
-
- /* Remove local reference to the object */
-
- acpi_ut_remove_reference(obj_desc);
+ /* Allocate and initialize the evaluation information block */
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
- acpi_ut_get_region_name(space_id), space_id,
- acpi_ut_get_node_name(node), node, obj_desc));
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
- * Install the handler
+ * The _REG method has two arguments:
+ *
+ * arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
*
- * At this point there is no existing handler. Just allocate the object
- * for the handler and link it into the list.
+ * arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
*/
- handler_obj =
- acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
- if (!handler_obj) {
+ args[0] =
+ acpi_ut_create_integer_object((u64)region_obj->region.space_id);
+ if (!args[0]) {
status = AE_NO_MEMORY;
- goto unlock_and_exit;
+ goto cleanup1;
}
- /* Init handler obj */
+ args[1] = acpi_ut_create_integer_object((u64)function);
+ if (!args[1]) {
+ status = AE_NO_MEMORY;
+ goto cleanup2;
+ }
- handler_obj->address_space.space_id = (u8) space_id;
- handler_obj->address_space.handler_flags = flags;
- handler_obj->address_space.region_list = NULL;
- handler_obj->address_space.node = node;
- handler_obj->address_space.handler = handler;
- handler_obj->address_space.context = context;
- handler_obj->address_space.setup = setup;
+ args[2] = NULL; /* Terminate list */
- /* Install at head of Device.address_space list */
+ /* Execute the method, no return value */
- handler_obj->address_space.next = obj_desc->device.handler;
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
- /*
- * The Device object is the first reference on the handler_obj.
- * Each region that uses the handler adds a reference.
- */
- obj_desc->device.handler = handler_obj;
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
- /*
- * Walk the namespace finding all of the regions this
- * handler will manage.
- *
- * Start at the device and search the branch toward
- * the leaf nodes until either the leaf is encountered or
- * a device is detected that has an address handler of the
- * same type.
- *
- * In either case, back up and search down the remainder
- * of the branch
- */
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK,
- acpi_ev_install_handler, NULL,
- handler_obj, NULL);
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
- unlock_and_exit:
+ cleanup1:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1474241bfc7e..3bb616794b3b 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f9661e2b46a9..f4b43bede015 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,7 +89,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_UINT32(interrupt_handled);
+ return_VALUE(interrupt_handled);
}
/*******************************************************************************
@@ -120,7 +120,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_UINT32(interrupt_handled);
+ return_VALUE(interrupt_handled);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ae668f32cf16..ddffd6847914 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,13 +56,13 @@ ACPI_MODULE_NAME("evxface")
*
* FUNCTION: acpi_install_notify_handler
*
- * PARAMETERS: Device - The device for which notifies will be handled
+ * PARAMETERS: device - The device for which notifies will be handled
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
* ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
* ACPI_ALL_NOTIFY: Both System and Device
- * Handler - Address of the handler
- * Context - Value passed to the handler on each GPE
+ * handler - Address of the handler
+ * context - Value passed to the handler on each GPE
*
* RETURN: Status
*
@@ -217,12 +217,12 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
*
* FUNCTION: acpi_remove_notify_handler
*
- * PARAMETERS: Device - The device for which the handler is installed
+ * PARAMETERS: device - The device for which the handler is installed
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
* ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
* ACPI_ALL_NOTIFY: Both System and Device
- * Handler - Address of the handler
+ * handler - Address of the handler
*
* RETURN: Status
*
@@ -249,7 +249,8 @@ acpi_remove_notify_handler(acpi_handle device,
(handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Make sure all deferred tasks are completed */
+
+ /* Make sure all deferred notify tasks are completed */
acpi_os_wait_events_complete();
@@ -596,7 +597,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
- /* Allocate memory for the handler object */
+ /* Allocate and init handler object (before lock) */
handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
if (!handler) {
@@ -622,16 +623,15 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
goto free_and_exit;
}
- /* Allocate and init handler object */
-
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
- handler->original_flags = gpe_event_info->flags &
- (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
+ handler->original_flags = (u8)(gpe_event_info->flags &
+ (ACPI_GPE_XRUPT_TYPE_MASK |
+ ACPI_GPE_DISPATCH_MASK));
/*
- * If the GPE is associated with a method, it might have been enabled
+ * If the GPE is associated with a method, it may have been enabled
* automatically during initialization, in which case it has to be
* disabled now to avoid spurious execution of the handler.
*/
@@ -646,7 +646,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
gpe_event_info->dispatch.handler = handler;
- /* Setup up dispatch flags to indicate handler (vs. method) */
+ /* Setup up dispatch flags to indicate handler (vs. method/notify) */
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
@@ -697,7 +697,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Make sure all deferred tasks are completed */
+ /* Make sure all deferred GPE tasks are completed */
acpi_os_wait_events_complete();
@@ -747,10 +747,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
* enabled, it should be enabled at this point to restore the
* post-initialization configuration.
*/
-
- if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
- && handler->originally_enabled)
+ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) &&
+ handler->originally_enabled) {
(void)acpi_ev_add_gpe_reference(gpe_event_info);
+ }
/* Now we can free the handler object */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 35520c6eeefb..d6e4e42316db 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,6 @@ ACPI_MODULE_NAME("evxfevnt")
* DESCRIPTION: Transfers the system into ACPI mode.
*
******************************************************************************/
-
acpi_status acpi_enable(void)
{
acpi_status status;
@@ -210,8 +209,8 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_disable_event
*
- * PARAMETERS: Event - The fixed eventto be enabled
- * Flags - Reserved
+ * PARAMETERS: event - The fixed event to be disabled
+ * flags - Reserved
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3f30e753b652..aff4cc261211 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,7 @@
ACPI_MODULE_NAME("evxfgpe")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
-/******************************************************************************
+/*******************************************************************************
*
* FUNCTION: acpi_update_all_gpes
*
@@ -172,6 +172,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
+
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
@@ -225,7 +226,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
}
- /* Validate WakeDevice is of type Device */
+ /* Validate wake_device is of type Device */
if (device_node->type != ACPI_TYPE_DEVICE) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
@@ -432,8 +433,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * event_status - Where the current status of the event will
- * be returned
+ * event_status - Where the current status of the event
+ * will be returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 96b412d03950..96c9e5f355ae 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 16219bde48da..d93b70be60ad 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
#include "actables.h"
#include "acdispat.h"
#include "acevents.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exconfig")
@@ -120,8 +121,11 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- /* Update GPEs for any new _Lxx/_Exx methods. Ignore errors */
-
+ /*
+ * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
+ * responsible for discovering any new wake GPEs by running _PRW methods
+ * that may have been loaded by this table.
+ */
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_SUCCESS(status)) {
acpi_ev_update_gpes(owner_id);
@@ -158,12 +162,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ex_load_table_op);
- /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
+ /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
(operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
(operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Find the ACPI table in the RSDT/XSDT */
@@ -210,8 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* parameter_path (optional parameter) */
if (operand[4]->string.length > 0) {
- if ((operand[4]->string.pointer[0] != '\\') &&
- (operand[4]->string.pointer[0] != '^')) {
+ if ((operand[4]->string.pointer[0] != AML_ROOT_PREFIX) &&
+ (operand[4]->string.pointer[0] != AML_PARENT_PREFIX)) {
/*
* Path is not absolute, so it will be relative to the node
* referenced by the root_path_string (or the NS root if omitted)
@@ -301,7 +305,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
region_offset, 8, &value);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
*buffer = (u8)value;
@@ -309,7 +313,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
region_offset++;
}
- return AE_OK;
+ return (AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 4492a4e03022..d2b9613bbf01 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
/* Save the Result */
- acpi_ex_truncate_for32bit_table(return_desc);
+ (void)acpi_ex_truncate_for32bit_table(return_desc);
*result_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 66554bc6f9a8..26a13f67977e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index d7c9f51608a7..7eb853cd279f 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 858b43a7dcf6..e5a3c249f7fa 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -464,9 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
- if (!
- ((ACPI_LV_EXEC & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+ /* Check if debug output enabled */
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -811,9 +810,10 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
return;
}
}
@@ -999,9 +999,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
return_VOID;
}
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index ebc55fbf3ff7..7d4bae71e8c6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index aa2ccfb7cb61..ec7f5690031b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,7 +329,6 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
- ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 84058705ed12..72a2a13b6d36 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index d1f449d93dcf..7be0205ad067 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -377,7 +377,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
- /* Must have a valid thread. */
+ /* Must have a valid thread ID */
+
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 2ff578a16adc..14689dec4960 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index bbf01e9bf057..b60c877f5906 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -948,13 +948,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
*/
return_desc =
acpi_ut_create_integer_object((u64)
- temp_desc->
- buffer.
- pointer
- [operand
- [0]->
- reference.
- value]);
+ temp_desc->buffer.pointer[operand[0]->reference.value]);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index ee5634a074c4..e491e46f17df 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 2c89b4651f08..2d7491f3126e 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 3e08695c3b30..b76b97002dff 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index ba9db4de7c89..d6eab81f54fb 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
/* Invalid field access type */
ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
- return_UINT32(0);
+ return_VALUE(0);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
}
*return_byte_alignment = byte_alignment;
- return_UINT32(bit_length);
+ return_VALUE(bit_length);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 1db2c0bfde0b..182abaf045e1 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,9 @@ acpi_ex_system_memory_space_handler(u32 function,
}
/*
- * Attempt to map from the requested address to the end of the region.
- * However, we will never map more than one page, nor will we cross
- * a page boundary.
+ * October 2009: Attempt to map from the requested address to the
+ * end of the region. However, we will never map more than one
+ * page, nor will we cross a page boundary.
*/
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
@@ -154,12 +154,15 @@ acpi_ex_system_memory_space_handler(u32 function,
* a page boundary, just map up to the page boundary, do not cross.
* On some systems, crossing a page boundary while mapping regions
* can cause warnings if the pages have different attributes
- * due to resource management
+ * due to resource management.
+ *
+ * This has the added benefit of constraining a single mapping to
+ * one page, which is similar to the original code that used a 4k
+ * maximum window.
*/
page_boundary_map_length =
ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
-
- if (!page_boundary_map_length) {
+ if (page_boundary_map_length == 0) {
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
}
@@ -236,19 +239,19 @@ acpi_ex_system_memory_space_handler(u32 function,
switch (bit_width) {
case 8:
- ACPI_SET8(logical_addr_ptr) = (u8) * value;
+ ACPI_SET8(logical_addr_ptr, *value);
break;
case 16:
- ACPI_SET16(logical_addr_ptr) = (u16) * value;
+ ACPI_SET16(logical_addr_ptr, *value);
break;
case 32:
- ACPI_SET32(logical_addr_ptr) = (u32) * value;
+ ACPI_SET32(logical_addr_ptr, *value);
break;
case 64:
- ACPI_SET64(logical_addr_ptr) = (u64) * value;
+ ACPI_SET64(logical_addr_ptr, *value);
break;
default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 6239956786eb..8565b6bd12bb 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index cc176b245e22..e4f9dfbb2a13 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index b9ebff2f6a09..9fb9f5e9a4da 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 90431f12f831..93c6049c2d75 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -487,14 +487,33 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
+ "Storing [%s] (%p) directly into node [%s] (%p)"
+ " with no implicit conversion\n",
acpi_ut_get_object_type_name(source_desc),
- source_desc, node));
+ source_desc,
+ acpi_ut_get_object_type_name(target_desc),
+ node));
- /* No conversions for all other types. Just attach the source object */
+ /*
+ * No conversions for all other types. Directly store a copy of
+ * the source object. NOTE: This is a departure from the ACPI
+ * spec, which states "If conversion is impossible, abort the
+ * running control method".
+ *
+ * This code implements "If conversion is impossible, treat the
+ * Store operation as a CopyObject".
+ */
+ status =
+ acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
- status = acpi_ns_attach_object(node, source_desc,
- source_desc->common.type);
+ status =
+ acpi_ns_attach_object(node, new_desc,
+ new_desc->common.type);
+ acpi_ut_remove_reference(new_desc);
break;
}
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 87153bbc4b43..1cefe777068e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -253,7 +253,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
/* Truncate value if we are executing from a 32-bit ACPI table */
- acpi_ex_truncate_for32bit_table(dest_desc);
+ (void)acpi_ex_truncate_for32bit_table(dest_desc);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index b5f339cb1305..26e371073b1a 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index c8a0ad5c1f55..6578dee2e51b 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 264d22d8018c..b205cbb4b50c 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -202,35 +202,39 @@ void acpi_ex_relinquish_interpreter(void)
*
* PARAMETERS: obj_desc - Object to be truncated
*
- * RETURN: none
+ * RETURN: TRUE if a truncation was performed, FALSE otherwise.
*
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
* 32-bit, as determined by the revision of the DSDT.
*
******************************************************************************/
-void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
+u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
{
ACPI_FUNCTION_ENTRY();
/*
* Object must be a valid number and we must be executing
- * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
+ * a control method. Object could be NS node for AML_INT_NAMEPATH_OP.
*/
if ((!obj_desc) ||
(ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
(obj_desc->common.type != ACPI_TYPE_INTEGER)) {
- return;
+ return (FALSE);
}
- if (acpi_gbl_integer_byte_width == 4) {
+ if ((acpi_gbl_integer_byte_width == 4) &&
+ (obj_desc->integer.value > (u64)ACPI_UINT32_MAX)) {
/*
- * We are running a method that exists in a 32-bit ACPI table.
+ * We are executing in a 32-bit ACPI table.
* Truncate the value to 32 bits by zeroing out the upper 32-bit field
*/
- obj_desc->integer.value &= (u64) ACPI_UINT32_MAX;
+ obj_desc->integer.value &= (u64)ACPI_UINT32_MAX;
+ return (TRUE);
}
+
+ return (FALSE);
}
/*******************************************************************************
@@ -336,7 +340,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
/* u64 is unsigned, so we don't worry about a '-' prefix */
if (value == 0) {
- return_UINT32(1);
+ return_VALUE(1);
}
current_value = value;
@@ -350,7 +354,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
num_digits++;
}
- return_UINT32(num_digits);
+ return_VALUE(num_digits);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 90a9aea1cee9..deb3f61e2bd1 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,8 +108,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
* enable bits to default
*/
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
- (u32) acpi_gbl_FADT.acpi_disable,
- 8);
+ (u32)acpi_gbl_FADT.acpi_disable, 8);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Attempting to enable Legacy (non-ACPI) mode\n"));
break;
@@ -152,18 +151,18 @@ u32 acpi_hw_get_mode(void)
* system does not support mode transition.
*/
if (!acpi_gbl_FADT.smi_command) {
- return_UINT32(ACPI_SYS_MODE_ACPI);
+ return_VALUE(ACPI_SYS_MODE_ACPI);
}
status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
if (ACPI_FAILURE(status)) {
- return_UINT32(ACPI_SYS_MODE_LEGACY);
+ return_VALUE(ACPI_SYS_MODE_LEGACY);
}
if (value) {
- return_UINT32(ACPI_SYS_MODE_ACPI);
+ return_VALUE(ACPI_SYS_MODE_ACPI);
} else {
- return_UINT32(ACPI_SYS_MODE_LEGACY);
+ return_VALUE(ACPI_SYS_MODE_LEGACY);
}
}
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 94996f9ae3ad..5e5f76230f5e 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -200,7 +200,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
* FUNCTION: acpi_hw_extended_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * flags - Reserved, set to zero
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 64560045052d..20d02e93c990 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,8 +69,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
{
- return (u32)1 << (gpe_event_info->gpe_number -
- gpe_event_info->register_info->base_gpe_number);
+
+ return ((u32)1 <<
+ (gpe_event_info->gpe_number -
+ gpe_event_info->register_info->base_gpe_number));
}
/******************************************************************************
@@ -133,7 +135,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
+ ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u", action));
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 65bc3453a29c..0889a629505f 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index f4e57503576b..083d6551f0e2 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acnamesp.h"
#include "acevents.h"
#define _COMPONENT ACPI_HARDWARE
@@ -364,8 +363,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
* DESCRIPTION: Read from the specified ACPI register
*
******************************************************************************/
-acpi_status
-acpi_hw_register_read(u32 register_id, u32 * return_value)
+acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
{
u32 value = 0;
acpi_status status;
@@ -485,7 +483,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
&acpi_gbl_xpm1b_status);
break;
- case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access */
+ case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access each */
status = acpi_hw_write_multiple(value,
&acpi_gbl_xpm1a_enable,
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3fddde056a5e..e3828cc4361b 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
#include <acpi/acpi.h>
#include <linux/acpi.h>
#include "accommon.h"
-#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwsleep")
@@ -178,7 +177,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
* to still read the right value. Ideally, this block would go
* away entirely.
*/
- acpi_os_stall(10000000);
+ acpi_os_stall(10 * ACPI_USEC_PER_SEC);
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
sleep_enable_reg_info->
@@ -323,7 +322,8 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
* and use it to determine whether the system is rebooting or
* resuming. Clear WAK_STS for compatibility.
*/
- acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
+ (void)acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS,
+ ACPI_CLEAR_STATUS);
acpi_gbl_system_awake_and_running = TRUE;
/* Enable power button */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index bfdce22f3798..0c1a8bbd05d6 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -176,10 +176,11 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
/*
* Compute Duration (Requires a 64-bit multiply and divide):
*
- * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY;
+ * time_elapsed (microseconds) =
+ * (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000,
- PM_TIMER_FREQUENCY, &quotient, NULL);
+ status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
*time_elapsed = (u32) quotient;
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b6aae58299dc..eab70d58852a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,7 +135,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -234,11 +234,11 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
status = acpi_hw_validate_io_request(address, width);
if (ACPI_SUCCESS(status)) {
status = acpi_os_read_port(address, value, width);
- return status;
+ return (status);
}
if (status != AE_AML_ILLEGAL_ADDRESS) {
- return status;
+ return (status);
}
/*
@@ -253,7 +253,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
if (acpi_hw_validate_io_request(address, 8) == AE_OK) {
status = acpi_os_read_port(address, &one_byte, 8);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
*value |= (one_byte << i);
@@ -262,7 +262,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
address++;
}
- return AE_OK;
+ return (AE_OK);
}
/******************************************************************************
@@ -297,11 +297,11 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
status = acpi_hw_validate_io_request(address, width);
if (ACPI_SUCCESS(status)) {
status = acpi_os_write_port(address, value, width);
- return status;
+ return (status);
}
if (status != AE_AML_ILLEGAL_ADDRESS) {
- return status;
+ return (status);
}
/*
@@ -317,12 +317,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
status =
acpi_os_write_port(address, (value >> i) & 0xFF, 8);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
}
address++;
}
- return AE_OK;
+ return (AE_OK);
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 05a154c3c9ac..04c2e16f2c0a 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,10 +80,10 @@ acpi_status acpi_reset(void)
if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
/*
- * For I/O space, write directly to the OSL. This
- * bypasses the port validation mechanism, which may
- * block a valid write to the reset register. Spec
- * section 4.7.3.6 requires register width to be 8.
+ * For I/O space, write directly to the OSL. This bypasses the port
+ * validation mechanism, which may block a valid write to the reset
+ * register.
+ * Spec section 4.7.3.6 requires register width to be 8.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
@@ -333,7 +333,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
* FUNCTION: acpi_write_bit_register
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
- * Value - Value to write to the register, in bit
+ * value - Value to write to the register, in bit
* position zero. The bit is automatically
* shifted to the correct position.
*
@@ -440,17 +440,41 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
* *sleep_type_a - Where SLP_TYPa is returned
* *sleep_type_b - Where SLP_TYPb is returned
*
- * RETURN: status - ACPI status
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested
+ * sleep state via the appropriate \_Sx object.
+ *
+ * The sleep state package returned from the corresponding \_Sx_ object
+ * must contain at least one integer.
+ *
+ * March 2005:
+ * Added support for a package that contains two integers. This
+ * goes against the ACPI specification which defines this object as a
+ * package with one encoded DWORD integer. However, existing practice
+ * by many BIOS vendors is to return a package with 2 or more integer
+ * elements, at least one per sleep type (A/B).
*
- * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
- * state.
+ * January 2013:
+ * Therefore, we must be prepared to accept a package with either a
+ * single integer or multiple integers.
+ *
+ * The single integer DWORD format is as follows:
+ * BYTE 0 - Value for the PM1A SLP_TYP register
+ * BYTE 1 - Value for the PM1B SLP_TYP register
+ * BYTE 2-3 - Reserved
+ *
+ * The dual integer format is as follows:
+ * Integer 0 - Value for the PM1A SLP_TYP register
+ * Integer 1 - Value for the PM1A SLP_TYP register
*
******************************************************************************/
acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
{
- acpi_status status = AE_OK;
+ acpi_status status;
struct acpi_evaluate_info *info;
+ union acpi_operand_object **elements;
ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
@@ -467,18 +491,14 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
return_ACPI_STATUS(AE_NO_MEMORY);
}
+ /*
+ * Evaluate the \_Sx namespace object containing the register values
+ * for this state
+ */
info->pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
-
- /* Evaluate the namespace object containing the values for this state */
-
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%s while evaluating SleepState [%s]\n",
- acpi_format_exception(status),
- info->pathname));
-
goto cleanup;
}
@@ -487,64 +507,67 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
info->pathname));
- status = AE_NOT_EXIST;
+ status = AE_AML_NO_RETURN_VALUE;
+ goto cleanup;
}
- /* It must be of type Package */
+ /* Return object must be of type Package */
- else if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
+ if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
+ goto cleanup1;
}
/*
- * The package must have at least two elements. NOTE (March 2005): This
- * goes against the current ACPI spec which defines this object as a
- * package with one encoded DWORD element. However, existing practice
- * by BIOS vendors seems to be to have 2 or more elements, at least
- * one per sleep type (A/B).
+ * Any warnings about the package length or the object types have
+ * already been issued by the predefined name module -- there is no
+ * need to repeat them here.
*/
- else if (info->return_object->package.count < 2) {
- ACPI_ERROR((AE_INFO,
- "Sleep State return package does not have at least two elements"));
- status = AE_AML_NO_OPERAND;
- }
+ elements = info->return_object->package.elements;
+ switch (info->return_object->package.count) {
+ case 0:
+ status = AE_AML_PACKAGE_LIMIT;
+ break;
+
+ case 1:
+ if (elements[0]->common.type != ACPI_TYPE_INTEGER) {
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
- /* The first two elements must both be of type Integer */
+ /* A valid _Sx_ package with one integer */
- else if (((info->return_object->package.elements[0])->common.type
- != ACPI_TYPE_INTEGER) ||
- ((info->return_object->package.elements[1])->common.type
- != ACPI_TYPE_INTEGER)) {
- ACPI_ERROR((AE_INFO,
- "Sleep State return package elements are not both Integers "
- "(%s, %s)",
- acpi_ut_get_object_type_name(info->return_object->
- package.elements[0]),
- acpi_ut_get_object_type_name(info->return_object->
- package.elements[1])));
- status = AE_AML_OPERAND_TYPE;
- } else {
- /* Valid _Sx_ package size, type, and value */
+ *sleep_type_a = (u8)elements[0]->integer.value;
+ *sleep_type_b = (u8)(elements[0]->integer.value >> 8);
+ break;
- *sleep_type_a = (u8)
- (info->return_object->package.elements[0])->integer.value;
- *sleep_type_b = (u8)
- (info->return_object->package.elements[1])->integer.value;
- }
+ case 2:
+ default:
+ if ((elements[0]->common.type != ACPI_TYPE_INTEGER) ||
+ (elements[1]->common.type != ACPI_TYPE_INTEGER)) {
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating SleepState [%s], bad Sleep object %p type %s",
- info->pathname, info->return_object,
- acpi_ut_get_object_type_name(info->
- return_object)));
+ /* A valid _Sx_ package with two integers */
+
+ *sleep_type_a = (u8)elements[0]->integer.value;
+ *sleep_type_b = (u8)elements[1]->integer.value;
+ break;
}
+ cleanup1:
acpi_ut_remove_reference(info->return_object);
cleanup:
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While evaluating Sleep State [%s]",
+ info->pathname));
+ }
+
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index ae443fe2ebf6..35eebdac0f9d 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwxfsleep")
@@ -207,7 +207,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
(u32)acpi_gbl_FADT.s4_bios_request, 8);
do {
- acpi_os_stall(1000);
+ acpi_os_stall(ACPI_USEC_PER_MSEC);
status =
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
if (ACPI_FAILURE(status)) {
@@ -350,7 +350,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
*
* RETURN: Status
*
- * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * DESCRIPTION: Enter a system sleep state
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
@@ -382,8 +382,9 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* RETURN: Status
*
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
- * sleep.
- * Called with interrupts DISABLED.
+ * sleep. Called with interrupts DISABLED.
+ * We break wake/resume into 2 stages so that OSPM can handle
+ * various OS-specific tasks between the two steps.
*
******************************************************************************/
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d70eaf39dfdf..8769cf83b044 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 15143c44f5e5..243737363fb8 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 924b3c71473a..ce6e97326205 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include <acpi/acoutput.h>
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsdump")
@@ -77,8 +78,9 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
ACPI_FUNCTION_NAME(ns_print_pathname);
- if (!(acpi_dbg_level & ACPI_LV_NAMES)
- || !(acpi_dbg_layer & ACPI_NAMESPACE)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_NAMES, ACPI_NAMESPACE)) {
return;
}
@@ -127,7 +129,7 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
/* Do this only if the requested debug level and component are enabled */
- if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) {
+ if (!ACPI_IS_DEBUG_ENABLED(level, component)) {
return_VOID;
}
@@ -729,5 +731,5 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
ACPI_OWNER_ID_MAX, search_handle);
return_VOID;
}
-#endif /* _ACPI_ASL_COMPILER */
-#endif /* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */
+#endif
+#endif
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 944d4c8d9438..409ae80824d1 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@
*/
#include <acpi/acpi.h>
-#include "accommon.h"
/* TBD: This entire module is apparently obsolete and should be removed */
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 69074be498e8..1538f3eb2a8f 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 4328e2adfeb9..2a431ec50a25 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@
#include "acnamesp.h"
#include "acdispat.h"
#include "acinterp.h"
-#include <linux/nmi.h>
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsinit")
@@ -87,7 +86,7 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Completing Region/Field/Buffer/Package initialization:"));
+ "Completing Region/Field/Buffer/Package initialization:\n"));
/* Set all init info to zero */
@@ -103,7 +102,7 @@ acpi_status acpi_ns_initialize_objects(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nInitialized %u/%u Regions %u/%u Fields %u/%u "
+ " Initialized %u/%u Regions %u/%u Fields %u/%u "
"Buffers %u/%u Packages (%u nodes)\n",
info.op_region_init, info.op_region_count,
info.field_init, info.field_count,
@@ -150,7 +149,7 @@ acpi_status acpi_ns_initialize_devices(void)
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"Initializing Device/Processor/Thermal objects "
- "by executing _INI methods:"));
+ "and executing _INI/_STA methods:\n"));
/* Tree analysis: find all subtrees that contain _INI methods */
@@ -208,7 +207,7 @@ acpi_status acpi_ns_initialize_devices(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nExecuted %u _INI methods requiring %u _STA executions "
+ " Executed %u _INI methods requiring %u _STA executions "
"(examined %u objects)\n",
info.num_INI, info.num_STA, info.device_count));
@@ -350,14 +349,6 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
}
/*
- * Print a dot for each object unless we are going to print the entire
- * pathname
- */
- if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
-
- /*
* We ignore errors from above, and always return OK, since we don't want
* to abort the walk on any single error.
*/
@@ -572,20 +563,10 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
info->parameters = NULL;
info->flags = ACPI_IGNORE_RETURN_VALUE;
- /*
- * Some hardware relies on this being executed as atomically
- * as possible (without an NMI being received in the middle of
- * this) - so disable NMIs and initialize the device:
- */
status = acpi_ns_evaluate(info);
if (ACPI_SUCCESS(status)) {
walk_info->num_INI++;
-
- if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
- (!(acpi_dbg_level & ACPI_LV_INFO))) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
}
#ifdef ACPI_DEBUG_OUTPUT
else if (status != AE_NOT_FOUND) {
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 911f99127b99..0a7badc3179f 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 55a175eadcc3..90a0380fb8a0 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,8 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* the node, In external format (name segments separated by path
* separators.)
*
- * DESCRIPTION: Used for debug printing in acpi_ns_search_table().
+ * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
+ * for error and debug statements.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index e69f7fa2579d..7a736f4d1fd8 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 233f756d5cfa..35dde8151c0d 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2419f417ea33..224c30053401 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -1,12 +1,11 @@
/******************************************************************************
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
- * $Revision: 1.1 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,27 +73,6 @@ ACPI_MODULE_NAME("nspredef")
******************************************************************************/
/* Local prototypes */
static acpi_status
-acpi_ns_check_package(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr);
-
-static acpi_status
-acpi_ns_check_package_list(struct acpi_predefined_data *data,
- const union acpi_predefined_info *package,
- union acpi_operand_object **elements, u32 count);
-
-static acpi_status
-acpi_ns_check_package_elements(struct acpi_predefined_data *data,
- union acpi_operand_object **elements,
- u8 type1,
- u32 count1,
- u8 type2, u32 count2, u32 start_index);
-
-static acpi_status
-acpi_ns_check_object_type(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr,
- u32 expected_btypes, u32 package_index);
-
-static acpi_status
acpi_ns_check_reference(struct acpi_predefined_data *data,
union acpi_operand_object *return_object);
@@ -148,7 +126,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
pathname = acpi_ns_get_external_pathname(node);
if (!pathname) {
- return AE_OK; /* Could not get pathname, ignore */
+ return (AE_OK); /* Could not get pathname, ignore */
}
/*
@@ -408,564 +386,6 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
/*******************************************************************************
*
- * FUNCTION: acpi_ns_check_package
- *
- * PARAMETERS: data - Pointer to validation data structure
- * return_object_ptr - Pointer to the object returned from the
- * evaluation of a method or object
- *
- * RETURN: Status
- *
- * DESCRIPTION: Check a returned package object for the correct count and
- * correct type of all sub-objects.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr)
-{
- union acpi_operand_object *return_object = *return_object_ptr;
- const union acpi_predefined_info *package;
- union acpi_operand_object **elements;
- acpi_status status = AE_OK;
- u32 expected_count;
- u32 count;
- u32 i;
-
- ACPI_FUNCTION_NAME(ns_check_package);
-
- /* The package info for this name is in the next table entry */
-
- package = data->predefined + 1;
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "%s Validating return Package of Type %X, Count %X\n",
- data->pathname, package->ret_info.type,
- return_object->package.count));
-
- /*
- * For variable-length Packages, we can safely remove all embedded
- * and trailing NULL package elements
- */
- acpi_ns_remove_null_elements(data, package->ret_info.type,
- return_object);
-
- /* Extract package count and elements array */
-
- elements = return_object->package.elements;
- count = return_object->package.count;
-
- /* The package must have at least one element, else invalid */
-
- if (!count) {
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Package has no elements (empty)"));
-
- return (AE_AML_OPERAND_VALUE);
- }
-
- /*
- * Decode the type of the expected package contents
- *
- * PTYPE1 packages contain no subpackages
- * PTYPE2 packages contain sub-packages
- */
- switch (package->ret_info.type) {
- case ACPI_PTYPE1_FIXED:
-
- /*
- * The package count is fixed and there are no sub-packages
- *
- * If package is too small, exit.
- * If package is larger than expected, issue warning but continue
- */
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (count < expected_count) {
- goto package_too_small;
- } else if (count > expected_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
- "%s: Return Package is larger than needed - "
- "found %u, expected %u\n",
- data->pathname, count,
- expected_count));
- }
-
- /* Validate all elements of the returned package */
-
- status = acpi_ns_check_package_elements(data, elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- package->ret_info.
- count2, 0);
- break;
-
- case ACPI_PTYPE1_VAR:
-
- /*
- * The package count is variable, there are no sub-packages, and all
- * elements must be of the same type
- */
- for (i = 0; i < count; i++) {
- status = acpi_ns_check_object_type(data, elements,
- package->ret_info.
- object_type1, i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- elements++;
- }
- break;
-
- case ACPI_PTYPE1_OPTION:
-
- /*
- * The package count is variable, there are no sub-packages. There are
- * a fixed number of required elements, and a variable number of
- * optional elements.
- *
- * Check if package is at least as large as the minimum required
- */
- expected_count = package->ret_info3.count;
- if (count < expected_count) {
- goto package_too_small;
- }
-
- /* Variable number of sub-objects */
-
- for (i = 0; i < count; i++) {
- if (i < package->ret_info3.count) {
-
- /* These are the required package elements (0, 1, or 2) */
-
- status =
- acpi_ns_check_object_type(data, elements,
- package->
- ret_info3.
- object_type[i],
- i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else {
- /* These are the optional package elements */
-
- status =
- acpi_ns_check_object_type(data, elements,
- package->
- ret_info3.
- tail_object_type,
- i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- elements++;
- }
- break;
-
- case ACPI_PTYPE2_REV_FIXED:
-
- /* First element is the (Integer) revision */
-
- status = acpi_ns_check_object_type(data, elements,
- ACPI_RTYPE_INTEGER, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- elements++;
- count--;
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- case ACPI_PTYPE2_PKG_COUNT:
-
- /* First element is the (Integer) count of sub-packages to follow */
-
- status = acpi_ns_check_object_type(data, elements,
- ACPI_RTYPE_INTEGER, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Count cannot be larger than the parent package length, but allow it
- * to be smaller. The >= accounts for the Integer above.
- */
- expected_count = (u32) (*elements)->integer.value;
- if (expected_count >= count) {
- goto package_too_small;
- }
-
- count = expected_count;
- elements++;
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- case ACPI_PTYPE2:
- case ACPI_PTYPE2_FIXED:
- case ACPI_PTYPE2_MIN:
- case ACPI_PTYPE2_COUNT:
- case ACPI_PTYPE2_FIX_VAR:
-
- /*
- * These types all return a single Package that consists of a
- * variable number of sub-Packages.
- *
- * First, ensure that the first element is a sub-Package. If not,
- * the BIOS may have incorrectly returned the object as a single
- * package instead of a Package of Packages (a common error if
- * there is only one entry). We may be able to repair this by
- * wrapping the returned Package with a new outer Package.
- */
- if (*elements
- && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
-
- /* Create the new outer package and populate it */
-
- status =
- acpi_ns_wrap_with_package(data, return_object,
- return_object_ptr);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Update locals to point to the new package (of 1 element) */
-
- return_object = *return_object_ptr;
- elements = return_object->package.elements;
- count = 1;
- }
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- default:
-
- /* Should not get here if predefined info table is correct */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Invalid internal return type in table entry: %X",
- package->ret_info.type));
-
- return (AE_AML_INTERNAL);
- }
-
- return (status);
-
-package_too_small:
-
- /* Error exit for the case with an incorrect package count */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Package is too small - found %u elements, expected %u",
- count, expected_count));
-
- return (AE_AML_OPERAND_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_check_package_list
- *
- * PARAMETERS: data - Pointer to validation data structure
- * package - Pointer to package-specific info for method
- * elements - Element list of parent package. All elements
- * of this list should be of type Package.
- * count - Count of subpackages
- *
- * RETURN: Status
- *
- * DESCRIPTION: Examine a list of subpackages
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package_list(struct acpi_predefined_data *data,
- const union acpi_predefined_info *package,
- union acpi_operand_object **elements, u32 count)
-{
- union acpi_operand_object *sub_package;
- union acpi_operand_object **sub_elements;
- acpi_status status;
- u32 expected_count;
- u32 i;
- u32 j;
-
- /*
- * Validate each sub-Package in the parent Package
- *
- * NOTE: assumes list of sub-packages contains no NULL elements.
- * Any NULL elements should have been removed by earlier call
- * to acpi_ns_remove_null_elements.
- */
- for (i = 0; i < count; i++) {
- sub_package = *elements;
- sub_elements = sub_package->package.elements;
- data->parent_package = sub_package;
-
- /* Each sub-object must be of type Package */
-
- status = acpi_ns_check_object_type(data, &sub_package,
- ACPI_RTYPE_PACKAGE, i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Examine the different types of expected sub-packages */
-
- data->parent_package = sub_package;
- switch (package->ret_info.type) {
- case ACPI_PTYPE2:
- case ACPI_PTYPE2_PKG_COUNT:
- case ACPI_PTYPE2_REV_FIXED:
-
- /* Each subpackage has a fixed number of elements */
-
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- package->ret_info.
- count2, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_FIX_VAR:
- /*
- * Each subpackage has a fixed number of elements and an
- * optional element
- */
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- sub_package->package.
- count -
- package->ret_info.
- count1, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_FIXED:
-
- /* Each sub-package has a fixed length */
-
- expected_count = package->ret_info2.count;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- /* Check the type of each sub-package element */
-
- for (j = 0; j < expected_count; j++) {
- status =
- acpi_ns_check_object_type(data,
- &sub_elements[j],
- package->
- ret_info2.
- object_type[j],
- j);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- break;
-
- case ACPI_PTYPE2_MIN:
-
- /* Each sub-package has a variable but minimum length */
-
- expected_count = package->ret_info.count1;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- /* Check the type of each sub-package element */
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- sub_package->package.
- count, 0, 0, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_COUNT:
-
- /*
- * First element is the (Integer) count of elements, including
- * the count field (the ACPI name is num_elements)
- */
- status = acpi_ns_check_object_type(data, sub_elements,
- ACPI_RTYPE_INTEGER,
- 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Make sure package is large enough for the Count and is
- * is as large as the minimum size
- */
- expected_count = (u32)(*sub_elements)->integer.value;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
- if (sub_package->package.count <
- package->ret_info.count1) {
- expected_count = package->ret_info.count1;
- goto package_too_small;
- }
- if (expected_count == 0) {
- /*
- * Either the num_entries element was originally zero or it was
- * a NULL element and repaired to an Integer of value zero.
- * In either case, repair it by setting num_entries to be the
- * actual size of the subpackage.
- */
- expected_count = sub_package->package.count;
- (*sub_elements)->integer.value = expected_count;
- }
-
- /* Check the type of each sub-package element */
-
- status =
- acpi_ns_check_package_elements(data,
- (sub_elements + 1),
- package->ret_info.
- object_type1,
- (expected_count - 1),
- 0, 0, 1);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- default: /* Should not get here, type was validated by caller */
-
- return (AE_AML_INTERNAL);
- }
-
- elements++;
- }
-
- return (AE_OK);
-
-package_too_small:
-
- /* The sub-package count was smaller than required */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Sub-Package[%u] is too small - found %u elements, expected %u",
- i, sub_package->package.count, expected_count));
-
- return (AE_AML_OPERAND_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_check_package_elements
- *
- * PARAMETERS: data - Pointer to validation data structure
- * elements - Pointer to the package elements array
- * type1 - Object type for first group
- * count1 - Count for first group
- * type2 - Object type for second group
- * count2 - Count for second group
- * start_index - Start of the first group of elements
- *
- * RETURN: Status
- *
- * DESCRIPTION: Check that all elements of a package are of the correct object
- * type. Supports up to two groups of different object types.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package_elements(struct acpi_predefined_data *data,
- union acpi_operand_object **elements,
- u8 type1,
- u32 count1,
- u8 type2, u32 count2, u32 start_index)
-{
- union acpi_operand_object **this_element = elements;
- acpi_status status;
- u32 i;
-
- /*
- * Up to two groups of package elements are supported by the data
- * structure. All elements in each group must be of the same type.
- * The second group can have a count of zero.
- */
- for (i = 0; i < count1; i++) {
- status = acpi_ns_check_object_type(data, this_element,
- type1, i + start_index);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- this_element++;
- }
-
- for (i = 0; i < count2; i++) {
- status = acpi_ns_check_object_type(data, this_element,
- type2,
- (i + count1 + start_index));
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- this_element++;
- }
-
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_check_object_type
*
* PARAMETERS: data - Pointer to validation data structure
@@ -983,7 +403,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
*
******************************************************************************/
-static acpi_status
+acpi_status
acpi_ns_check_object_type(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr,
u32 expected_btypes, u32 package_index)
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
new file mode 100644
index 000000000000..a40155467d2e
--- /dev/null
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -0,0 +1,621 @@
+/******************************************************************************
+ *
+ * Module Name: nsprepkg - Validation of package objects for predefined names
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acpredef.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsprepkg")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count);
+
+static acpi_status
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
+ union acpi_operand_object **elements,
+ u8 type1,
+ u32 count1,
+ u8 type2, u32 count2, u32 start_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ const union acpi_predefined_info *package;
+ union acpi_operand_object **elements;
+ acpi_status status = AE_OK;
+ u32 expected_count;
+ u32 count;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_check_package);
+
+ /* The package info for this name is in the next table entry */
+
+ package = data->predefined + 1;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%s Validating return Package of Type %X, Count %X\n",
+ data->pathname, package->ret_info.type,
+ return_object->package.count));
+
+ /*
+ * For variable-length Packages, we can safely remove all embedded
+ * and trailing NULL package elements
+ */
+ acpi_ns_remove_null_elements(data, package->ret_info.type,
+ return_object);
+
+ /* Extract package count and elements array */
+
+ elements = return_object->package.elements;
+ count = return_object->package.count;
+
+ /* The package must have at least one element, else invalid */
+
+ if (!count) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package has no elements (empty)"));
+
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ /*
+ * Decode the type of the expected package contents
+ *
+ * PTYPE1 packages contain no subpackages
+ * PTYPE2 packages contain sub-packages
+ */
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE1_FIXED:
+
+ /*
+ * The package count is fixed and there are no sub-packages
+ *
+ * If package is too small, exit.
+ * If package is larger than expected, issue warning but continue
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (count < expected_count) {
+ goto package_too_small;
+ } else if (count > expected_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ data->pathname, count,
+ expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(data, elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2, 0);
+ break;
+
+ case ACPI_PTYPE1_VAR:
+
+ /*
+ * The package count is variable, there are no sub-packages, and all
+ * elements must be of the same type
+ */
+ for (i = 0; i < count; i++) {
+ status = acpi_ns_check_object_type(data, elements,
+ package->ret_info.
+ object_type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE1_OPTION:
+
+ /*
+ * The package count is variable, there are no sub-packages. There are
+ * a fixed number of required elements, and a variable number of
+ * optional elements.
+ *
+ * Check if package is at least as large as the minimum required
+ */
+ expected_count = package->ret_info3.count;
+ if (count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Variable number of sub-objects */
+
+ for (i = 0; i < count; i++) {
+ if (i < package->ret_info3.count) {
+
+ /* These are the required package elements (0, 1, or 2) */
+
+ status =
+ acpi_ns_check_object_type(data, elements,
+ package->
+ ret_info3.
+ object_type[i],
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ } else {
+ /* These are the optional package elements */
+
+ status =
+ acpi_ns_check_object_type(data, elements,
+ package->
+ ret_info3.
+ tail_object_type,
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* First element is the (Integer) revision */
+
+ status = acpi_ns_check_object_type(data, elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ elements++;
+ count--;
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* First element is the (Integer) count of sub-packages to follow */
+
+ status = acpi_ns_check_object_type(data, elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Count cannot be larger than the parent package length, but allow it
+ * to be smaller. The >= accounts for the Integer above.
+ */
+ expected_count = (u32)(*elements)->integer.value;
+ if (expected_count >= count) {
+ goto package_too_small;
+ }
+
+ count = expected_count;
+ elements++;
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_FIX_VAR:
+
+ /*
+ * These types all return a single Package that consists of a
+ * variable number of sub-Packages.
+ *
+ * First, ensure that the first element is a sub-Package. If not,
+ * the BIOS may have incorrectly returned the object as a single
+ * package instead of a Package of Packages (a common error if
+ * there is only one entry). We may be able to repair this by
+ * wrapping the returned Package with a new outer Package.
+ */
+ if (*elements
+ && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
+
+ /* Create the new outer package and populate it */
+
+ status =
+ acpi_ns_wrap_with_package(data, return_object,
+ return_object_ptr);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Update locals to point to the new package (of 1 element) */
+
+ return_object = *return_object_ptr;
+ elements = return_object->package.elements;
+ count = 1;
+ }
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ default:
+
+ /* Should not get here if predefined info table is correct */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid internal return type in table entry: %X",
+ package->ret_info.type));
+
+ return (AE_AML_INTERNAL);
+ }
+
+ return (status);
+
+ package_too_small:
+
+ /* Error exit for the case with an incorrect package count */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package is too small - found %u elements, expected %u",
+ count, expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_list
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * package - Pointer to package-specific info for method
+ * elements - Element list of parent package. All elements
+ * of this list should be of type Package.
+ * count - Count of subpackages
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Examine a list of subpackages
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count)
+{
+ union acpi_operand_object *sub_package;
+ union acpi_operand_object **sub_elements;
+ acpi_status status;
+ u32 expected_count;
+ u32 i;
+ u32 j;
+
+ /*
+ * Validate each sub-Package in the parent Package
+ *
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
+ */
+ for (i = 0; i < count; i++) {
+ sub_package = *elements;
+ sub_elements = sub_package->package.elements;
+ data->parent_package = sub_package;
+
+ /* Each sub-object must be of type Package */
+
+ status = acpi_ns_check_object_type(data, &sub_package,
+ ACPI_RTYPE_PACKAGE, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Examine the different types of expected sub-packages */
+
+ data->parent_package = sub_package;
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_PKG_COUNT:
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* Each subpackage has a fixed number of elements */
+
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIX_VAR:
+ /*
+ * Each subpackage has a fixed number of elements and an
+ * optional element
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ sub_package->package.
+ count -
+ package->ret_info.
+ count1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIXED:
+
+ /* Each sub-package has a fixed length */
+
+ expected_count = package->ret_info2.count;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ for (j = 0; j < expected_count; j++) {
+ status =
+ acpi_ns_check_object_type(data,
+ &sub_elements[j],
+ package->
+ ret_info2.
+ object_type[j],
+ j);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_PTYPE2_MIN:
+
+ /* Each sub-package has a variable but minimum length */
+
+ expected_count = package->ret_info.count1;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ sub_package->package.
+ count, 0, 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_COUNT:
+
+ /*
+ * First element is the (Integer) count of elements, including
+ * the count field (the ACPI name is num_elements)
+ */
+ status = acpi_ns_check_object_type(data, sub_elements,
+ ACPI_RTYPE_INTEGER,
+ 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Make sure package is large enough for the Count and is
+ * is as large as the minimum size
+ */
+ expected_count = (u32)(*sub_elements)->integer.value;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+ if (sub_package->package.count <
+ package->ret_info.count1) {
+ expected_count = package->ret_info.count1;
+ goto package_too_small;
+ }
+ if (expected_count == 0) {
+ /*
+ * Either the num_entries element was originally zero or it was
+ * a NULL element and repaired to an Integer of value zero.
+ * In either case, repair it by setting num_entries to be the
+ * actual size of the subpackage.
+ */
+ expected_count = sub_package->package.count;
+ (*sub_elements)->integer.value = expected_count;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(data,
+ (sub_elements + 1),
+ package->ret_info.
+ object_type1,
+ (expected_count - 1),
+ 0, 0, 1);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ default: /* Should not get here, type was validated by caller */
+
+ return (AE_AML_INTERNAL);
+ }
+
+ elements++;
+ }
+
+ return (AE_OK);
+
+ package_too_small:
+
+ /* The sub-package count was smaller than required */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+ i, sub_package->package.count, expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_elements
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * elements - Pointer to the package elements array
+ * type1 - Object type for first group
+ * count1 - Count for first group
+ * type2 - Object type for second group
+ * count2 - Count for second group
+ * start_index - Start of the first group of elements
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check that all elements of a package are of the correct object
+ * type. Supports up to two groups of different object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
+ union acpi_operand_object **elements,
+ u8 type1,
+ u32 count1,
+ u8 type2, u32 count2, u32 start_index)
+{
+ union acpi_operand_object **this_element = elements;
+ acpi_status status;
+ u32 i;
+
+ /*
+ * Up to two groups of package elements are supported by the data
+ * structure. All elements in each group must be of the same type.
+ * The second group can have a count of zero.
+ */
+ for (i = 0; i < count1; i++) {
+ status = acpi_ns_check_object_type(data, this_element,
+ type1, i + start_index);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ for (i = 0; i < count2; i++) {
+ status = acpi_ns_check_object_type(data, this_element,
+ type2,
+ (i + count1 + start_index));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 8c5f292860fc..9e833353c06a 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 90189251cdf0..ba4d98287c6a 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,8 @@ ACPI_MODULE_NAME("nsrepair2")
*/
typedef
acpi_status(*acpi_repair_function) (struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr);
+ union acpi_operand_object
+ **return_object_ptr);
typedef struct acpi_repair_info {
char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 1d2d8ffc1bc5..5d43efc53a61 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -328,6 +328,11 @@ acpi_ns_search_and_enter(u32 target_name,
if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
status = AE_ALREADY_EXISTS;
}
+#ifdef ACPI_ASL_COMPILER
+ if (*return_node && (*return_node)->type == ACPI_TYPE_ANY) {
+ (*return_node)->flags |= ANOBJ_IS_EXTERNAL;
+ }
+#endif
/* Either found it or there was an error: finished either way */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b5b4cb72a8a8..686420df684f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,14 +46,11 @@
#include "accommon.h"
#include "acnamesp.h"
#include "amlcode.h"
-#include "actables.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsutils")
/* Local prototypes */
-static u8 acpi_ns_valid_path_separator(char sep);
-
#ifdef ACPI_OBSOLETE_FUNCTIONS
acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
#endif
@@ -99,42 +96,6 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_valid_root_prefix
- *
- * PARAMETERS: prefix - Character to be checked
- *
- * RETURN: TRUE if a valid prefix
- *
- * DESCRIPTION: Check if a character is a valid ACPI Root prefix
- *
- ******************************************************************************/
-
-u8 acpi_ns_valid_root_prefix(char prefix)
-{
-
- return ((u8) (prefix == '\\'));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_valid_path_separator
- *
- * PARAMETERS: sep - Character to be checked
- *
- * RETURN: TRUE if a valid path separator
- *
- * DESCRIPTION: Check if a character is a valid ACPI path separator
- *
- ******************************************************************************/
-
-static u8 acpi_ns_valid_path_separator(char sep)
-{
-
- return ((u8) (sep == '.'));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_get_type
*
* PARAMETERS: node - Parent Node to be examined
@@ -151,10 +112,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
- return_UINT32(ACPI_TYPE_ANY);
+ return_VALUE(ACPI_TYPE_ANY);
}
- return_UINT32((acpi_object_type) node->type);
+ return_VALUE(node->type);
}
/*******************************************************************************
@@ -179,10 +140,10 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
- return_UINT32(ACPI_NS_NORMAL);
+ return_VALUE(ACPI_NS_NORMAL);
}
- return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+ return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
}
/*******************************************************************************
@@ -218,19 +179,19 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
*
* strlen() + 1 covers the first name_seg, which has no path separator
*/
- if (acpi_ns_valid_root_prefix(*next_external_char)) {
+ if (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
info->fully_qualified = TRUE;
next_external_char++;
/* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
- while (acpi_ns_valid_root_prefix(*next_external_char)) {
+ while (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
next_external_char++;
}
} else {
/* Handle Carat prefixes */
- while (*next_external_char == '^') {
+ while (ACPI_IS_PARENT_PREFIX(*next_external_char)) {
info->num_carats++;
next_external_char++;
}
@@ -244,7 +205,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
if (*next_external_char) {
info->num_segments = 1;
for (i = 0; next_external_char[i]; i++) {
- if (acpi_ns_valid_path_separator(next_external_char[i])) {
+ if (ACPI_IS_PATH_SEPARATOR(next_external_char[i])) {
info->num_segments++;
}
}
@@ -282,7 +243,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Setup the correct prefixes, counts, and pointers */
if (info->fully_qualified) {
- internal_name[0] = '\\';
+ internal_name[0] = AML_ROOT_PREFIX;
if (num_segments <= 1) {
result = &internal_name[1];
@@ -302,7 +263,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
i = 0;
if (info->num_carats) {
for (i = 0; i < info->num_carats; i++) {
- internal_name[i] = '^';
+ internal_name[i] = AML_PARENT_PREFIX;
}
}
@@ -322,7 +283,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
for (; num_segments; num_segments--) {
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ns_valid_path_separator(*external_name) ||
+ if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
/* Pad the segment with underscore(s) if segment is short */
@@ -339,7 +300,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Now we must have a path separator, or the pathname is bad */
- if (!acpi_ns_valid_path_separator(*external_name) &&
+ if (!ACPI_IS_PATH_SEPARATOR(*external_name) &&
(*external_name != 0)) {
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
@@ -457,13 +418,13 @@ acpi_ns_externalize_name(u32 internal_name_length,
/* Check for a prefix (one '\' | one or more '^') */
switch (internal_name[0]) {
- case '\\':
+ case AML_ROOT_PREFIX:
prefix_length = 1;
break;
- case '^':
+ case AML_PARENT_PREFIX:
for (i = 0; i < internal_name_length; i++) {
- if (internal_name[i] == '^') {
+ if (ACPI_IS_PARENT_PREFIX(internal_name[i])) {
prefix_length = i + 1;
} else {
break;
@@ -664,17 +625,17 @@ void acpi_ns_terminate(void)
u32 acpi_ns_opens_scope(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
+ ACPI_FUNCTION_ENTRY();
- if (!acpi_ut_valid_object_type(type)) {
+ if (type > ACPI_TYPE_LOCAL_MAX) {
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
- return_UINT32(ACPI_NS_NORMAL);
+ return (ACPI_NS_NORMAL);
}
- return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
+ return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
}
/*******************************************************************************
@@ -710,6 +671,8 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
+ /* Simplest case is a null pathname */
+
if (!pathname) {
*return_node = prefix_node;
if (!prefix_node) {
@@ -718,6 +681,13 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
return_ACPI_STATUS(AE_OK);
}
+ /* Quick check for a reference to the root */
+
+ if (ACPI_IS_ROOT_PREFIX(pathname[0]) && (!pathname[1])) {
+ *return_node = acpi_gbl_root_node;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 0483877f26b8..e70911a9e441 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,12 +76,12 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
/* It's really the parent's _scope_ that we want */
- return parent_node->child;
+ return (parent_node->child);
}
/* Otherwise just return the next peer */
- return child_node->peer;
+ return (child_node->peer);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index d6a9f77972b6..fc69949151bb 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -236,7 +236,7 @@ acpi_evaluate_object(acpi_handle handle,
* 2) No handle, not fully qualified pathname (error)
* 3) Valid handle
*/
- if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
+ if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) {
/* The path is fully qualified, just evaluate by name */
@@ -492,7 +492,7 @@ acpi_walk_namespace(acpi_object_type type,
*/
status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock);
if (ACPI_FAILURE(status)) {
- return status;
+ return_ACPI_STATUS(status);
}
/*
@@ -550,7 +550,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
node = acpi_ns_validate_handle(obj_handle);
@@ -602,17 +602,22 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
/* Walk the CID list */
- found = 0;
+ found = FALSE;
for (i = 0; i < cid->count; i++) {
if (ACPI_STRCMP(cid->ids[i].string, info->hid)
== 0) {
- found = 1;
+
+ /* Found a matching CID */
+
+ found = TRUE;
break;
}
}
+
ACPI_FREE(cid);
- if (!found)
+ if (!found) {
return (AE_OK);
+ }
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 811c6f13f476..f3a4d95899f7 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -107,7 +107,7 @@ acpi_get_handle(acpi_handle parent,
*
* Error for <null Parent + relative path>
*/
- if (acpi_ns_valid_root_prefix(pathname[0])) {
+ if (ACPI_IS_ROOT_PREFIX(pathname[0])) {
/* Pathname is fully qualified (starts with '\') */
@@ -290,7 +290,7 @@ acpi_get_object_info(acpi_handle handle,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return (status);
}
node = acpi_ns_validate_handle(handle);
@@ -539,14 +539,14 @@ acpi_status acpi_install_method(u8 *buffer)
/* Parameter validation */
if (!buffer) {
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
/* Table must be a DSDT or SSDT */
if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
- return AE_BAD_HEADER;
+ return (AE_BAD_HEADER);
}
/* First AML opcode in the table must be a control method */
@@ -554,7 +554,7 @@ acpi_status acpi_install_method(u8 *buffer)
parser_state.aml = buffer + sizeof(struct acpi_table_header);
opcode = acpi_ps_peek_opcode(&parser_state);
if (opcode != AML_METHOD_OP) {
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
/* Extract method information from the raw AML */
@@ -572,13 +572,13 @@ acpi_status acpi_install_method(u8 *buffer)
*/
aml_buffer = ACPI_ALLOCATE(aml_length);
if (!aml_buffer) {
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!method_obj) {
ACPI_FREE(aml_buffer);
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
/* Lock namespace for acpi_ns_lookup, we may be creating a new node */
@@ -644,12 +644,12 @@ acpi_status acpi_install_method(u8 *buffer)
/* Remove local reference to the method object */
acpi_ut_remove_reference(method_obj);
- return status;
+ return (status);
error_exit:
ACPI_FREE(aml_buffer);
ACPI_FREE(method_obj);
- return status;
+ return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_method)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 9d029dac6b64..c0853ef294e4 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index cb79e2d4d743..f51308cdbc65 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
package_length |= (aml[0] & byte_zero_mask);
- return_UINT32(package_length);
+ return_VALUE(package_length);
}
/*******************************************************************************
@@ -162,7 +162,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
/* Point past any namestring prefix characters (backslash or carat) */
- while (acpi_ps_is_prefix_char(*end)) {
+ while (ACPI_IS_ROOT_PREFIX(*end) || ACPI_IS_PARENT_PREFIX(*end)) {
end++;
}
@@ -798,7 +798,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
subop = acpi_ps_peek_opcode(parser_state);
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
- acpi_ps_is_prefix_char(subop)) {
+ ACPI_IS_ROOT_PREFIX(subop) ||
+ ACPI_IS_PARENT_PREFIX(subop)) {
/* null_name or name_string */
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 5607805aab26..63c455447481 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,352 +58,17 @@
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
-static u32 acpi_gbl_depth = 0;
-
/* Local prototypes */
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start,
- union acpi_parse_object *unnamed_op,
- union acpi_parse_object **op);
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start, union acpi_parse_object **new_op);
-
static acpi_status
acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
u8 * aml_op_start, union acpi_parse_object *op);
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object **op, acpi_status status);
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op, acpi_status status);
-
static void
acpi_ps_link_module_code(union acpi_parse_object *parent_op,
u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
/*******************************************************************************
*
- * FUNCTION: acpi_ps_get_aml_opcode
- *
- * PARAMETERS: walk_state - Current state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Extract the next AML opcode from the input stream.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
-{
-
- ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
-
- walk_state->aml_offset =
- (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
- walk_state->parser_state.aml_start);
- walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
-
- /*
- * First cut to determine what we have found:
- * 1) A valid AML opcode
- * 2) A name string
- * 3) An unknown/invalid opcode
- */
- walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-
- switch (walk_state->op_info->class) {
- case AML_CLASS_ASCII:
- case AML_CLASS_PREFIX:
- /*
- * Starts with a valid prefix or ASCII char, this is a name
- * string. Convert the bare name string to a namepath.
- */
- walk_state->opcode = AML_INT_NAMEPATH_OP;
- walk_state->arg_types = ARGP_NAMESTRING;
- break;
-
- case AML_CLASS_UNKNOWN:
-
- /* The opcode is unrecognized. Complain and skip unknown opcodes */
-
- if (walk_state->pass_number == 2) {
- ACPI_ERROR((AE_INFO,
- "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
- walk_state->opcode,
- (u32)(walk_state->aml_offset +
- sizeof(struct acpi_table_header))));
-
- ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48);
-
-#ifdef ACPI_ASL_COMPILER
- /*
- * This is executed for the disassembler only. Output goes
- * to the disassembled ASL output file.
- */
- acpi_os_printf
- ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
- walk_state->opcode,
- (u32)(walk_state->aml_offset +
- sizeof(struct acpi_table_header)));
-
- /* Dump the context surrounding the invalid opcode */
-
- acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
- aml - 16), 48, DB_BYTE_DISPLAY,
- walk_state->aml_offset +
- sizeof(struct acpi_table_header) -
- 16);
- acpi_os_printf(" */\n");
-#endif
- }
-
- /* Increment past one-byte or two-byte opcode */
-
- walk_state->parser_state.aml++;
- if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
- walk_state->parser_state.aml++;
- }
-
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
-
- default:
-
- /* Found opcode info, this is a normal opcode */
-
- walk_state->parser_state.aml +=
- acpi_ps_get_opcode_size(walk_state->opcode);
- walk_state->arg_types = walk_state->op_info->parse_args;
- break;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_build_named_op
- *
- * PARAMETERS: walk_state - Current state
- * aml_op_start - Begin of named Op in AML
- * unnamed_op - Early Op (not a named Op)
- * op - Returned Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Parse a named Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start,
- union acpi_parse_object *unnamed_op,
- union acpi_parse_object **op)
-{
- acpi_status status = AE_OK;
- union acpi_parse_object *arg = NULL;
-
- ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
-
- unnamed_op->common.value.arg = NULL;
- unnamed_op->common.arg_list_length = 0;
- unnamed_op->common.aml_opcode = walk_state->opcode;
-
- /*
- * Get and append arguments until we find the node that contains
- * the name (the type ARGP_NAME).
- */
- while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
- (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
- status =
- acpi_ps_get_next_arg(walk_state,
- &(walk_state->parser_state),
- GET_CURRENT_ARG_TYPE(walk_state->
- arg_types), &arg);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ps_append_arg(unnamed_op, arg);
- INCREMENT_ARG_LIST(walk_state->arg_types);
- }
-
- /*
- * Make sure that we found a NAME and didn't run out of arguments
- */
- if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
- return_ACPI_STATUS(AE_AML_NO_OPERAND);
- }
-
- /* We know that this arg is a name, move to next arg */
-
- INCREMENT_ARG_LIST(walk_state->arg_types);
-
- /*
- * Find the object. This will either insert the object into
- * the namespace or simply look it up
- */
- walk_state->op = NULL;
-
- status = walk_state->descending_callback(walk_state, op);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
- return_ACPI_STATUS(status);
- }
-
- if (!*op) {
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
- }
-
- status = acpi_ps_next_parse_state(walk_state, *op, status);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_PENDING) {
- return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
- }
- return_ACPI_STATUS(status);
- }
-
- acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
- acpi_gbl_depth++;
-
- if ((*op)->common.aml_opcode == AML_REGION_OP ||
- (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
- /*
- * Defer final parsing of an operation_region body, because we don't
- * have enough info in the first pass to parse it correctly (i.e.,
- * there may be method calls within the term_arg elements of the body.)
- *
- * However, we must continue parsing because the opregion is not a
- * standalone package -- we don't know where the end is at this point.
- *
- * (Length is unknown until parse of the body complete)
- */
- (*op)->named.data = aml_op_start;
- (*op)->named.length = 0;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_create_op
- *
- * PARAMETERS: walk_state - Current state
- * aml_op_start - Op start in AML
- * new_op - Returned Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Get Op from AML
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start, union acpi_parse_object **new_op)
-{
- acpi_status status = AE_OK;
- union acpi_parse_object *op;
- union acpi_parse_object *named_op = NULL;
- union acpi_parse_object *parent_scope;
- u8 argument_count;
- const struct acpi_opcode_info *op_info;
-
- ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
-
- status = acpi_ps_get_aml_opcode(walk_state);
- if (status == AE_CTRL_PARSE_CONTINUE) {
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
- }
-
- /* Create Op structure and append to parent's argument list */
-
- walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
- op = acpi_ps_alloc_op(walk_state->opcode);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- if (walk_state->op_info->flags & AML_NAMED) {
- status =
- acpi_ps_build_named_op(walk_state, aml_op_start, op,
- &named_op);
- acpi_ps_free_op(op);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- *new_op = named_op;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Not a named opcode, just allocate Op and append to parent */
-
- if (walk_state->op_info->flags & AML_CREATE) {
- /*
- * Backup to beginning of create_XXXfield declaration
- * body_length is unknown until we parse the body
- */
- op->named.data = aml_op_start;
- op->named.length = 0;
- }
-
- if (walk_state->opcode == AML_BANK_FIELD_OP) {
- /*
- * Backup to beginning of bank_field declaration
- * body_length is unknown until we parse the body
- */
- op->named.data = aml_op_start;
- op->named.length = 0;
- }
-
- parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
- acpi_ps_append_arg(parent_scope, op);
-
- if (parent_scope) {
- op_info =
- acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
- if (op_info->flags & AML_HAS_TARGET) {
- argument_count =
- acpi_ps_get_argument_count(op_info->type);
- if (parent_scope->common.arg_list_length >
- argument_count) {
- op->common.flags |= ACPI_PARSEOP_TARGET;
- }
- } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
- op->common.flags |= ACPI_PARSEOP_TARGET;
- }
- }
-
- if (walk_state->descending_callback != NULL) {
- /*
- * Find the object. This will either insert the object into
- * the namespace or simply look it up
- */
- walk_state->op = *new_op = op;
-
- status = walk_state->descending_callback(walk_state, &op);
- status = acpi_ps_next_parse_state(walk_state, op, status);
- if (status == AE_CTRL_PENDING) {
- status = AE_CTRL_PARSE_PENDING;
- }
- }
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ps_get_arguments
*
* PARAMETERS: walk_state - Current state
@@ -711,288 +376,6 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
/*******************************************************************************
*
- * FUNCTION: acpi_ps_complete_op
- *
- * PARAMETERS: walk_state - Current state
- * op - Returned Op
- * status - Parse status before complete Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Complete Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object **op, acpi_status status)
-{
- acpi_status status2;
-
- ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
-
- /*
- * Finished one argument of the containing scope
- */
- walk_state->parser_state.scope->parse_scope.arg_count--;
-
- /* Close this Op (will result in parse subtree deletion) */
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- *op = NULL;
-
- switch (status) {
- case AE_OK:
- break;
-
- case AE_CTRL_TRANSFER:
-
- /* We are about to transfer to a called method */
-
- walk_state->prev_op = NULL;
- walk_state->prev_arg_types = walk_state->arg_types;
- return_ACPI_STATUS(status);
-
- case AE_CTRL_END:
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- if (*op) {
- walk_state->op = *op;
- walk_state->op_info =
- acpi_ps_get_opcode_info((*op)->common.aml_opcode);
- walk_state->opcode = (*op)->common.aml_opcode;
-
- status = walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, *op, status);
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- status = AE_OK;
- break;
-
- case AE_CTRL_BREAK:
- case AE_CTRL_CONTINUE:
-
- /* Pop off scopes until we find the While */
-
- while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- }
-
- /* Close this iteration of the While loop */
-
- walk_state->op = *op;
- walk_state->op_info =
- acpi_ps_get_opcode_info((*op)->common.aml_opcode);
- walk_state->opcode = (*op)->common.aml_opcode;
-
- status = walk_state->ascending_callback(walk_state);
- status = acpi_ps_next_parse_state(walk_state, *op, status);
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- status = AE_OK;
- break;
-
- case AE_CTRL_TERMINATE:
-
- /* Clean up */
- do {
- if (*op) {
- status2 =
- acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- acpi_ut_delete_generic_state
- (acpi_ut_pop_generic_state
- (&walk_state->control_state));
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (*op);
-
- return_ACPI_STATUS(AE_OK);
-
- default: /* All other non-AE_OK status */
-
- do {
- if (*op) {
- status2 =
- acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (*op);
-
-#if 0
- /*
- * TBD: Cleanup parse ops on error
- */
- if (*op == NULL) {
- acpi_ps_pop_scope(parser_state, op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- }
-#endif
- walk_state->prev_op = NULL;
- walk_state->prev_arg_types = walk_state->arg_types;
- return_ACPI_STATUS(status);
- }
-
- /* This scope complete? */
-
- if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
- } else {
- *op = NULL;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_complete_final_op
- *
- * PARAMETERS: walk_state - Current state
- * op - Current Op
- * status - Current parse status before complete last
- * Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Complete last Op.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op, acpi_status status)
-{
- acpi_status status2;
-
- ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
-
- /*
- * Complete the last Op (if not completed), and clear the scope stack.
- * It is easily possible to end an AML "package" with an unbounded number
- * of open scopes (such as when several ASL blocks are closed with
- * sequential closing braces). We want to terminate each one cleanly.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
- op));
- do {
- if (op) {
- if (walk_state->ascending_callback != NULL) {
- walk_state->op = op;
- walk_state->op_info =
- acpi_ps_get_opcode_info(op->common.
- aml_opcode);
- walk_state->opcode = op->common.aml_opcode;
-
- status =
- walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, op,
- status);
- if (status == AE_CTRL_PENDING) {
- status =
- acpi_ps_complete_op(walk_state, &op,
- AE_OK);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
- }
-
- else if (ACPI_FAILURE(status)) {
-
- /* First error is most important */
-
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
- }
- }
-
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), &op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
@@ -1177,10 +560,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.aml_opcode);
if (walk_state->op_info->flags & AML_NAMED) {
- if (acpi_gbl_depth) {
- acpi_gbl_depth--;
- }
-
if (op->common.aml_opcode == AML_REGION_OP ||
op->common.aml_opcode == AML_DATA_REGION_OP) {
/*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
new file mode 100644
index 000000000000..12c4028002b1
--- /dev/null
+++ b/drivers/acpi/acpica/psobject.c
@@ -0,0 +1,647 @@
+/******************************************************************************
+ *
+ * Module Name: psobject - Support for parse objects
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psobject")
+
+/* Local prototypes */
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_aml_opcode
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Extract the next AML opcode from the input stream.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
+
+ walk_state->aml_offset =
+ (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
+ walk_state->parser_state.aml_start);
+ walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
+
+ /*
+ * First cut to determine what we have found:
+ * 1) A valid AML opcode
+ * 2) A name string
+ * 3) An unknown/invalid opcode
+ */
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+
+ switch (walk_state->op_info->class) {
+ case AML_CLASS_ASCII:
+ case AML_CLASS_PREFIX:
+ /*
+ * Starts with a valid prefix or ASCII char, this is a name
+ * string. Convert the bare name string to a namepath.
+ */
+ walk_state->opcode = AML_INT_NAMEPATH_OP;
+ walk_state->arg_types = ARGP_NAMESTRING;
+ break;
+
+ case AML_CLASS_UNKNOWN:
+
+ /* The opcode is unrecognized. Complain and skip unknown opcodes */
+
+ if (walk_state->pass_number == 2) {
+ ACPI_ERROR((AE_INFO,
+ "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header))));
+
+ ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
+ 48);
+
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * This is executed for the disassembler only. Output goes
+ * to the disassembled ASL output file.
+ */
+ acpi_os_printf
+ ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header)));
+
+ /* Dump the context surrounding the invalid opcode */
+
+ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
+ aml - 16), 48, DB_BYTE_DISPLAY,
+ (walk_state->aml_offset +
+ sizeof(struct acpi_table_header) -
+ 16));
+ acpi_os_printf(" */\n");
+#endif
+ }
+
+ /* Increment past one-byte or two-byte opcode */
+
+ walk_state->parser_state.aml++;
+ if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
+ walk_state->parser_state.aml++;
+ }
+
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+
+ default:
+
+ /* Found opcode info, this is a normal opcode */
+
+ walk_state->parser_state.aml +=
+ acpi_ps_get_opcode_size(walk_state->opcode);
+ walk_state->arg_types = walk_state->op_info->parse_args;
+ break;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_build_named_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Begin of named Op in AML
+ * unnamed_op - Early Op (not a named Op)
+ * op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse a named Op
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *arg = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
+
+ unnamed_op->common.value.arg = NULL;
+ unnamed_op->common.arg_list_length = 0;
+ unnamed_op->common.aml_opcode = walk_state->opcode;
+
+ /*
+ * Get and append arguments until we find the node that contains
+ * the name (the type ARGP_NAME).
+ */
+ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
+ (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
+ status =
+ acpi_ps_get_next_arg(walk_state,
+ &(walk_state->parser_state),
+ GET_CURRENT_ARG_TYPE(walk_state->
+ arg_types), &arg);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(unnamed_op, arg);
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+ }
+
+ /*
+ * Make sure that we found a NAME and didn't run out of arguments
+ */
+ if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /* We know that this arg is a name, move to next arg */
+
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = NULL;
+
+ status = walk_state->descending_callback(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+ return_ACPI_STATUS(status);
+ }
+
+ if (!*op) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_PENDING) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
+
+ if ((*op)->common.aml_opcode == AML_REGION_OP ||
+ (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
+ /*
+ * Defer final parsing of an operation_region body, because we don't
+ * have enough info in the first pass to parse it correctly (i.e.,
+ * there may be method calls within the term_arg elements of the body.)
+ *
+ * However, we must continue parsing because the opregion is not a
+ * standalone package -- we don't know where the end is at this point.
+ *
+ * (Length is unknown until parse of the body complete)
+ */
+ (*op)->named.data = aml_op_start;
+ (*op)->named.length = 0;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_create_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Op start in AML
+ * new_op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get Op from AML
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start, union acpi_parse_object **new_op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *op;
+ union acpi_parse_object *named_op = NULL;
+ union acpi_parse_object *parent_scope;
+ u8 argument_count;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
+
+ status = acpi_ps_get_aml_opcode(walk_state);
+ if (status == AE_CTRL_PARSE_CONTINUE) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ /* Create Op structure and append to parent's argument list */
+
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ if (walk_state->op_info->flags & AML_NAMED) {
+ status =
+ acpi_ps_build_named_op(walk_state, aml_op_start, op,
+ &named_op);
+ acpi_ps_free_op(op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ *new_op = named_op;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Not a named opcode, just allocate Op and append to parent */
+
+ if (walk_state->op_info->flags & AML_CREATE) {
+ /*
+ * Backup to beginning of create_XXXfield declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ if (walk_state->opcode == AML_BANK_FIELD_OP) {
+ /*
+ * Backup to beginning of bank_field declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
+ acpi_ps_append_arg(parent_scope, op);
+
+ if (parent_scope) {
+ op_info =
+ acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
+ if (op_info->flags & AML_HAS_TARGET) {
+ argument_count =
+ acpi_ps_get_argument_count(op_info->type);
+ if (parent_scope->common.arg_list_length >
+ argument_count) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ }
+
+ if (walk_state->descending_callback != NULL) {
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = *new_op = op;
+
+ status = walk_state->descending_callback(walk_state, &op);
+ status = acpi_ps_next_parse_state(walk_state, op, status);
+ if (status == AE_CTRL_PENDING) {
+ status = AE_CTRL_PARSE_PENDING;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * op - Returned Op
+ * status - Parse status before complete Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete Op
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
+
+ /*
+ * Finished one argument of the containing scope
+ */
+ walk_state->parser_state.scope->parse_scope.arg_count--;
+
+ /* Close this Op (will result in parse subtree deletion) */
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ *op = NULL;
+
+ switch (status) {
+ case AE_OK:
+ break;
+
+ case AE_CTRL_TRANSFER:
+
+ /* We are about to transfer to a called method */
+
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+
+ case AE_CTRL_END:
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ if (*op) {
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_BREAK:
+ case AE_CTRL_CONTINUE:
+
+ /* Pop off scopes until we find the While */
+
+ while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+
+ /* Close this iteration of the While loop */
+
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_TERMINATE:
+
+ /* Clean up */
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ acpi_ut_delete_generic_state
+ (acpi_ut_pop_generic_state
+ (&walk_state->control_state));
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+ return_ACPI_STATUS(AE_OK);
+
+ default: /* All other non-AE_OK status */
+
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+#if 0
+ /*
+ * TBD: Cleanup parse ops on error
+ */
+ if (*op == NULL) {
+ acpi_ps_pop_scope(parser_state, op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+#endif
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+ }
+
+ /* This scope complete? */
+
+ if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
+ } else {
+ *op = NULL;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_final_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * op - Current Op
+ * status - Current parse status before complete last
+ * Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete last Op.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
+
+ /*
+ * Complete the last Op (if not completed), and clear the scope stack.
+ * It is easily possible to end an AML "package" with an unbounded number
+ * of open scopes (such as when several ASL blocks are closed with
+ * sequential closing braces). We want to terminate each one cleanly.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
+ op));
+ do {
+ if (op) {
+ if (walk_state->ascending_callback != NULL) {
+ walk_state->op = op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info(op->common.
+ aml_opcode);
+ walk_state->opcode = op->common.aml_opcode;
+
+ status =
+ walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, op,
+ status);
+ if (status == AE_CTRL_PENDING) {
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ AE_OK);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ if (status == AE_CTRL_TERMINATE) {
+ status = AE_OK;
+
+ /* Clean up */
+ do {
+ if (op) {
+ status2 =
+ acpi_ps_complete_this_op
+ (walk_state, op);
+ if (ACPI_FAILURE
+ (status2)) {
+ return_ACPI_STATUS
+ (status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&
+ (walk_state->
+ parser_state),
+ &op,
+ &walk_state->
+ arg_types,
+ &walk_state->
+ arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+ }
+
+ else if (ACPI_FAILURE(status)) {
+
+ /* First error is most important */
+
+ (void)
+ acpi_ps_complete_this_op(walk_state,
+ op);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ status2 = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), &op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1793d934aa30..1b659e59710a 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,16 +43,12 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acparser.h"
#include "acopcode.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psopcode")
-static const u8 acpi_gbl_argument_count[] =
- { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
-
/*******************************************************************************
*
* NAME: acpi_gbl_aml_op_info
@@ -63,7 +59,6 @@ static const u8 acpi_gbl_argument_count[] =
* the operand type.
*
******************************************************************************/
-
/*
* Summary of opcode types/flags
*
@@ -181,7 +176,6 @@ static const u8 acpi_gbl_argument_count[] =
AML_CREATE_QWORD_FIELD_OP
******************************************************************************/
-
/*
* Master Opcode information table. A summary of everything we know about each
* opcode, all in one place.
@@ -656,169 +650,3 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/*! [End] no source code translation !*/
};
-
-/*
- * This table is directly indexed by the opcodes, and returns an
- * index into the table above
- */
-static const u8 acpi_gbl_short_op_index[256] = {
-/* 0 1 2 3 4 5 6 7 */
-/* 8 9 A B C D E F */
-/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
-/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
-/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
-/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
-/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
-/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
-/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
-/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
-/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
-/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
-/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
-/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
-/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
-};
-
-/*
- * This table is indexed by the second opcode of the extended opcode
- * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
- */
-static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
-/* 0 1 2 3 4 5 6 7 */
-/* 8 9 A B C D E F */
-/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
-/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
-/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
-/* 0x88 */ 0x7C,
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_opcode_info
- *
- * PARAMETERS: opcode - The AML opcode
- *
- * RETURN: A pointer to the info about the opcode.
- *
- * DESCRIPTION: Find AML opcode description based on the opcode.
- * NOTE: This procedure must ALWAYS return a valid pointer!
- *
- ******************************************************************************/
-
-const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
-{
- ACPI_FUNCTION_NAME(ps_get_opcode_info);
-
- /*
- * Detect normal 8-bit opcode or extended 16-bit opcode
- */
- if (!(opcode & 0xFF00)) {
-
- /* Simple (8-bit) opcode: 0-255, can't index beyond table */
-
- return (&acpi_gbl_aml_op_info
- [acpi_gbl_short_op_index[(u8) opcode]]);
- }
-
- if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
- (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
-
- /* Valid extended (16-bit) opcode */
-
- return (&acpi_gbl_aml_op_info
- [acpi_gbl_long_op_index[(u8) opcode]]);
- }
-
- /* Unknown AML opcode */
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Unknown AML opcode [%4.4X]\n", opcode));
-
- return (&acpi_gbl_aml_op_info[_UNK]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_opcode_name
- *
- * PARAMETERS: opcode - The AML opcode
- *
- * RETURN: A pointer to the name of the opcode (ASCII String)
- * Note: Never returns NULL.
- *
- * DESCRIPTION: Translate an opcode into a human-readable string
- *
- ******************************************************************************/
-
-char *acpi_ps_get_opcode_name(u16 opcode)
-{
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
-
- const struct acpi_opcode_info *op;
-
- op = acpi_ps_get_opcode_info(opcode);
-
- /* Always guaranteed to return a valid pointer */
-
- return (op->name);
-
-#else
- return ("OpcodeName unavailable");
-
-#endif
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_argument_count
- *
- * PARAMETERS: op_type - Type associated with the AML opcode
- *
- * RETURN: Argument count
- *
- * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
- *
- ******************************************************************************/
-
-u8 acpi_ps_get_argument_count(u32 op_type)
-{
-
- if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
- return (acpi_gbl_argument_count[op_type]);
- }
-
- return (0);
-}
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
new file mode 100644
index 000000000000..9ba5301e5751
--- /dev/null
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -0,0 +1,223 @@
+/******************************************************************************
+ *
+ * Module Name: psopinfo - AML opcode information functions and dispatch tables
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acopcode.h"
+#include "amlcode.h"
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psopinfo")
+
+extern const u8 acpi_gbl_short_op_index[];
+extern const u8 acpi_gbl_long_op_index[];
+
+static const u8 acpi_gbl_argument_count[] =
+ { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_info
+ *
+ * PARAMETERS: opcode - The AML opcode
+ *
+ * RETURN: A pointer to the info about the opcode.
+ *
+ * DESCRIPTION: Find AML opcode description based on the opcode.
+ * NOTE: This procedure must ALWAYS return a valid pointer!
+ *
+ ******************************************************************************/
+
+const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
+{
+ ACPI_FUNCTION_NAME(ps_get_opcode_info);
+
+ /*
+ * Detect normal 8-bit opcode or extended 16-bit opcode
+ */
+ if (!(opcode & 0xFF00)) {
+
+ /* Simple (8-bit) opcode: 0-255, can't index beyond table */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_short_op_index[(u8)opcode]]);
+ }
+
+ if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
+ (((u8)opcode) <= MAX_EXTENDED_OPCODE)) {
+
+ /* Valid extended (16-bit) opcode */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_long_op_index[(u8)opcode]]);
+ }
+
+ /* Unknown AML opcode */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Unknown AML opcode [%4.4X]\n", opcode));
+
+ return (&acpi_gbl_aml_op_info[_UNK]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_name
+ *
+ * PARAMETERS: opcode - The AML opcode
+ *
+ * RETURN: A pointer to the name of the opcode (ASCII String)
+ * Note: Never returns NULL.
+ *
+ * DESCRIPTION: Translate an opcode into a human-readable string
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_opcode_name(u16 opcode)
+{
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+
+ const struct acpi_opcode_info *op;
+
+ op = acpi_ps_get_opcode_info(opcode);
+
+ /* Always guaranteed to return a valid pointer */
+
+ return (op->name);
+
+#else
+ return ("OpcodeName unavailable");
+
+#endif
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_argument_count
+ *
+ * PARAMETERS: op_type - Type associated with the AML opcode
+ *
+ * RETURN: Argument count
+ *
+ * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_get_argument_count(u32 op_type)
+{
+
+ if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
+ return (acpi_gbl_argument_count[op_type]);
+ }
+
+ return (0);
+}
+
+/*
+ * This table is directly indexed by the opcodes It returns
+ * an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+const u8 acpi_gbl_short_op_index[256] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
+/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
+/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
+/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
+/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
+/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
+/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
+/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
+/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
+/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
+/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
+/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
+/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
+};
+
+/*
+ * This table is indexed by the second opcode of the extended opcode
+ * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
+/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
+/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+/* 0x88 */ 0x7C,
+};
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 2494caf47755..abc4c48b2edd 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 608dc20dc173..6a4b6fb39f32 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index fdb2e71f3046..c1934bf04f0a 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 4137dcb352d1..91fa73a6e55e 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -202,14 +202,6 @@ u8 acpi_ps_is_leading_char(u32 c)
}
/*
- * Is "c" a namestring prefix character?
- */
-u8 acpi_ps_is_prefix_char(u32 c)
-{
- return ((u8) (c == '\\' || c == '^'));
-}
-
-/*
* Get op's name (4-byte name segment) or 0 if unnamed
*/
#ifdef ACPI_FUTURE_USAGE
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index ab96cf47896d..abd65624754f 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 963e16225797..f68254268965 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 856ff075b6ab..f3a9276ac665 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 147feb6aa2a0..7816d4eef04e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
bit_field &= (u16) (bit_field - 1);
}
- return bits_set;
+ return (bits_set);
}
/*******************************************************************************
@@ -407,7 +407,9 @@ acpi_rs_get_list_length(u8 * aml_buffer,
/* Validate the Resource Type and Resource Length */
- status = acpi_ut_validate_resource(aml_buffer, &resource_index);
+ status =
+ acpi_ut_validate_resource(NULL, aml_buffer,
+ &resource_index);
if (ACPI_FAILURE(status)) {
/*
* Exit on failure. Cannot continue because the descriptor length
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 311cbc4f05fa..f8b55b426c9d 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -98,7 +98,7 @@ acpi_buffer_to_resource(u8 *aml_buffer,
/* Perform the AML-to-Resource conversion */
- status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+ status = acpi_ut_walk_aml_resources(NULL, aml_buffer, aml_buffer_length,
acpi_rs_convert_aml_to_resources,
&current_resource_ptr);
if (status == AE_AML_NO_RESOURCE_END_TAG) {
@@ -174,7 +174,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
/* Do the conversion */
resource = output_buffer->pointer;
- status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+ status = acpi_ut_walk_aml_resources(NULL, aml_start, aml_buffer_length,
acpi_rs_convert_aml_to_resources,
&resource);
if (ACPI_FAILURE(status)) {
@@ -480,8 +480,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
- (u32) aml_size_needed,
- acpi_format_exception(status)));
+ (u32)aml_size_needed, acpi_format_exception(status)));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 4d11b072388c..cab51445189d 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,419 +77,16 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
-#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
-#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
-#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
-
-/*******************************************************************************
- *
- * Resource Descriptor info tables
- *
- * Note: The first table entry must be a Title or Literal and must contain
- * the table length (number of table entries)
- *
- ******************************************************************************/
-
-struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
- "Descriptor Length", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
- acpi_gbl_he_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
- acpi_gbl_shr_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
- "Interrupt Count", NULL},
- {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
- "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
- acpi_gbl_typ_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
- acpi_gbl_bm_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
- acpi_gbl_siz_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
- NULL},
- {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
- "Start-Dependent-Functions", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
- "Descriptor Length", NULL},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
- "Compatibility Priority", acpi_gbl_config_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
- "Performance/Robustness", acpi_gbl_config_decode}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
- "End-Dependent-Functions", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_io[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
- acpi_gbl_io_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
- "Fixed I/O", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
- "Vendor Specific", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
- {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
- "24-Bit Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
- "32-Bit Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
- "32-Bit Fixed Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
- "16-Bit WORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
- "32-Bit DWORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
- "64-Bit QWORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
- "64-Bit Extended Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
- "Granularity", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
- "Address Minimum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
- "Address Maximum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
- "Address Length", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
- "Type-Specific Attribute", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
- "Extended IRQ", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
- "Type", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
- "Triggering", acpi_gbl_he_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
- acpi_gbl_shr_decode},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
- "Interrupt Count", NULL},
- {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
- "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
- "Generic Register", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
- "Access Size", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
- "ConnectionType", acpi_gbl_ct_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
- "ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
- acpi_gbl_ppc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
- acpi_gbl_shr_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
- "IoRestriction", acpi_gbl_ior_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
- acpi_gbl_he_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
- "DebounceTimeout", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
- "ResourceSource", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
- "PinTableLength", NULL},
- {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
- NULL},
- {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
- NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
- "FixedDma", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
- "RequestLines", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
- acpi_gbl_dts_decode},
-};
-
-#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
- {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
-
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
- "Common Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS
-};
-
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
- "I2C Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(i2c_serial_bus.
- access_mode),
- "AccessMode", acpi_gbl_am_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
- "SlaveAddress", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
- "Spi Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(spi_serial_bus.
- wire_mode), "WireMode",
- acpi_gbl_wm_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
- "DevicePolarity", acpi_gbl_dp_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
- "DataBitLength", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
- "ClockPhase", acpi_gbl_cph_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
- "ClockPolarity", acpi_gbl_cpo_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
- "DeviceSelection", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
- "Uart Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
- ACPI_RSD_OFFSET(uart_serial_bus.
- flow_control),
- "FlowControl", acpi_gbl_fc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
- "StopBits", acpi_gbl_sb_decode},
- {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
- "DataBits", acpi_gbl_bpb_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
- acpi_gbl_ed_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
- acpi_gbl_pt_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
- "LinesEnabled", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
- "RxFifoSize", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
- "TxFifoSize", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
- "ConnectionSpeed", NULL},
-};
-
-/*
- * Tables used for common address descriptor flag fields
- */
-static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
- NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
- "Consumer/Producer", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
- acpi_gbl_dec_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
- "Min Relocatability", acpi_gbl_min_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
- "Max Relocatability", acpi_gbl_max_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
- {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
- "Resource Type", (void *)"Memory Range"},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
- "Caching", acpi_gbl_mem_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
- "Range Type", acpi_gbl_mtp_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
- "Translation", acpi_gbl_ttp_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
- {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
- "Resource Type", (void *)"I/O Range"},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
- "Range Type", acpi_gbl_rng_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
- "Translation", acpi_gbl_ttp_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
- "Translation Type", acpi_gbl_trs_decode}
-};
-
-/*
- * Table used to dump _PRT contents
- */
-static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
- {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
- {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
- {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
-};
-
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_descriptor
*
- * PARAMETERS: Resource
+ * PARAMETERS: resource - Buffer containing the resource
+ * table - Table entry to decode the resource
*
* RETURN: None
*
- * DESCRIPTION:
+ * DESCRIPTION: Dump a resource descriptor based on a dump table entry.
*
******************************************************************************/
@@ -654,7 +251,8 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/*
* Optional resource_source for Address resources
*/
- acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
+ acpi_rs_dump_resource_source(ACPI_CAST_PTR
+ (struct
acpi_resource_source,
target));
break;
@@ -765,8 +363,9 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
ACPI_FUNCTION_ENTRY();
- if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
- || !(_COMPONENT & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
@@ -827,8 +426,9 @@ void acpi_rs_dump_irq_list(u8 * route_table)
ACPI_FUNCTION_ENTRY();
- if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
- || !(_COMPONENT & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
new file mode 100644
index 000000000000..46192bd53653
--- /dev/null
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -0,0 +1,454 @@
+/*******************************************************************************
+ *
+ * Module Name: rsdumpinfo - Tables used to display resource descriptors.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsdumpinfo")
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
+#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
+#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
+/*******************************************************************************
+ *
+ * Resource Descriptor info tables
+ *
+ * Note: The first table entry must be a Title or Literal and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
+ acpi_gbl_typ_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
+ acpi_gbl_bm_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
+ acpi_gbl_siz_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
+ NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
+ "Start-Dependent-Functions", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
+ "Compatibility Priority", acpi_gbl_config_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
+ "Performance/Robustness", acpi_gbl_config_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
+ "End-Dependent-Functions", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
+ acpi_gbl_io_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
+ "Fixed I/O", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
+ "Vendor Specific", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
+ {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
+ "24-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
+ "32-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
+ "32-Bit Fixed Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
+ "16-Bit WORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
+ "32-Bit DWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
+ "64-Bit QWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
+ "64-Bit Extended Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
+ "Type-Specific Attribute", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
+ "Extended IRQ", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
+ "Type", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
+ "Triggering", acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
+ "Generic Register", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
+ "Access Size", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+ "ConnectionType", acpi_gbl_ct_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+ "ProducerConsumer", acpi_gbl_consume_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+ acpi_gbl_ppc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+ "IoRestriction", acpi_gbl_ior_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+ "DebounceTimeout", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+ "ResourceSource", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+ "PinTableLength", NULL},
+ {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+ NULL},
+ {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+ NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+ "FixedDma", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+ "RequestLines", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+ acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
+ {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+ "Common Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+ "I2C Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(i2c_serial_bus.
+ access_mode),
+ "AccessMode", acpi_gbl_am_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+ "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+ "Spi Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(spi_serial_bus.
+ wire_mode), "WireMode",
+ acpi_gbl_wm_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+ "DevicePolarity", acpi_gbl_dp_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+ "DataBitLength", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+ "ClockPhase", acpi_gbl_cph_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+ "ClockPolarity", acpi_gbl_cpo_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+ "DeviceSelection", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+ "Uart Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+ ACPI_RSD_OFFSET(uart_serial_bus.
+ flow_control),
+ "FlowControl", acpi_gbl_fc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+ "StopBits", acpi_gbl_sb_decode},
+ {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+ "DataBits", acpi_gbl_bpb_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+ acpi_gbl_ed_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+ acpi_gbl_pt_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+ "LinesEnabled", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+ "RxFifoSize", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+ "TxFifoSize", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+ "ConnectionSpeed", NULL},
+};
+
+/*
+ * Tables used for common address descriptor flag fields
+ */
+struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
+ NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
+ "Consumer/Producer", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
+ acpi_gbl_dec_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
+ "Min Relocatability", acpi_gbl_min_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
+ "Max Relocatability", acpi_gbl_max_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
+ "Resource Type", (void *)"Memory Range"},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
+ "Caching", acpi_gbl_mem_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
+ "Range Type", acpi_gbl_mtp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
+ "Translation", acpi_gbl_ttp_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
+ "Resource Type", (void *)"I/O Range"},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
+ "Range Type", acpi_gbl_rng_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
+ "Translation", acpi_gbl_ttp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
+ "Translation Type", acpi_gbl_trs_decode}
+};
+
+/*
+ * Table used to dump _PRT contents
+ */
+struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
+ {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
+};
+
+#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index a9fa5158200b..41fed78e0de6 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index f6a081057a22..ca183755a6f9 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index e23a9ec248cb..364decc1028a 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsirq")
* acpi_rs_get_irq
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
+struct acpi_rsconvert_info acpi_rs_get_irq[9] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
ACPI_RS_SIZE(struct acpi_resource_irq),
ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
@@ -80,7 +80,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
- /* Get flags: Triggering[0], Polarity[3], Sharing[4] */
+ /* Get flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
AML_OFFSET(irq.flags),
@@ -92,7 +92,11 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
AML_OFFSET(irq.flags),
- 4}
+ 4},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
+ AML_OFFSET(irq.flags),
+ 5}
};
/*******************************************************************************
@@ -101,7 +105,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
+struct acpi_rsconvert_info acpi_rs_set_irq[14] = {
/* Start with a default descriptor of length 3 */
{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
@@ -114,7 +118,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
AML_OFFSET(irq.irq_mask),
ACPI_RS_OFFSET(data.irq.interrupt_count)},
- /* Set the flags byte */
+ /* Set flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
AML_OFFSET(irq.flags),
@@ -128,6 +132,10 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
AML_OFFSET(irq.flags),
4},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
+ AML_OFFSET(irq.flags),
+ 5},
+
/*
* All done if the output descriptor length is required to be 3
* (i.e., optimization to 2 bytes cannot be attempted)
@@ -181,7 +189,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
+struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
@@ -190,8 +198,10 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
sizeof(struct aml_resource_extended_irq),
0},
- /* Flag bits */
-
+ /*
+ * Flags: Producer/Consumer[0], Triggering[1], Polarity[2],
+ * Sharing[3], Wake[4]
+ */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
AML_OFFSET(extended_irq.flags),
0},
@@ -208,19 +218,21 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
AML_OFFSET(extended_irq.flags),
3},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.wake_capable),
+ AML_OFFSET(extended_irq.flags),
+ 4},
+
/* IRQ Table length (Byte4) */
{ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
AML_OFFSET(extended_irq.interrupt_count),
- sizeof(u32)}
- ,
+ sizeof(u32)},
/* Copy every IRQ in the table, each is 32 bits */
{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
AML_OFFSET(extended_irq.interrupts[0]),
- 0}
- ,
+ 0},
/* Optional resource_source (Index and String) */
@@ -285,7 +297,6 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
* request_lines
* Channels
*/
-
{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
AML_OFFSET(fixed_dma.request_lines),
2},
@@ -293,5 +304,4 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
AML_OFFSET(fixed_dma.width),
1},
-
};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 8b64db9a3fd2..ee2e206fc6c8 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,10 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform final sanity check on the new AML resource descriptor */
- status =
- acpi_ut_validate_resource(ACPI_CAST_PTR
- (union aml_resource, aml), NULL);
+ status = acpi_ut_validate_resource(NULL,
+ ACPI_CAST_PTR(union
+ aml_resource,
+ aml), NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 4fd611ad02b4..ebc773a1b350 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -156,8 +156,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
0,
- sizeof(u8)}
- ,
+ sizeof(u8)},
/* Vendor data */
@@ -181,8 +180,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
0,
- sizeof(u8)}
- ,
+ sizeof(u8)},
/* Vendor data */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c6f291c2bc83..d5bf05a96096 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -136,30 +136,30 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/*
* Mask and shift the flag bit
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x01);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x01));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x03);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x03));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x07);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x07));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8) item_count;
+ ACPI_SET8(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
@@ -168,7 +168,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_COUNT16:
item_count = aml_resource_length;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
@@ -181,13 +181,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
resource->length = resource->length + item_count;
item_count = item_count / 2;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8)item_count;
+ ACPI_SET8(destination, item_count);
resource->length = resource->length +
(info->value * item_count);
@@ -216,7 +216,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_VEN:
@@ -224,7 +224,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
item_count = ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_RES:
@@ -234,7 +234,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
- ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_LENGTH:
@@ -385,7 +385,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
target = ACPI_ADD_PTR(char, resource, info->value);
- ACPI_SET8(target) = (u8) item_count;
+ ACPI_SET8(target, item_count);
break;
case ACPI_RSC_BITMASK16:
@@ -401,7 +401,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
target = ACPI_ADD_PTR(char, resource, info->value);
- ACPI_SET8(target) = (u8) item_count;
+ ACPI_SET8(target, item_count);
break;
case ACPI_RSC_EXIT_NE:
@@ -514,37 +514,40 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/*
* Clear the flag byte
*/
- ACPI_SET8(destination) = 0;
+ ACPI_SET8(destination, 0);
break;
case ACPI_RSC_1BITFLAG:
/*
* Mask and shift the flag bit
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x01) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x01) << info->
+ value));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x03) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x03) << info->
+ value));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x07) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x07) << info->
+ value));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8) item_count;
+ ACPI_SET8(destination, item_count);
aml_length =
(u16) (aml_length +
@@ -561,18 +564,18 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_COUNT_GPIO_PIN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
+ ACPI_SET16(destination, aml_length);
aml_length = (u16)(aml_length + item_count * 2);
target = ACPI_ADD_PTR(void, aml, info->value);
- ACPI_SET16(target) = (u16)aml_length;
+ ACPI_SET16(target, aml_length);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)item_count;
+ ACPI_SET16(destination, item_count);
aml_length =
(u16)(aml_length + (info->value * item_count));
@@ -584,7 +587,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/* Set resource source string length */
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
+ ACPI_SET16(destination, aml_length);
/* Compute offset for the Vendor Data */
@@ -594,7 +597,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/* Set vendor offset only if there is vendor data */
if (resource->data.gpio.vendor_length) {
- ACPI_SET16(target) = (u16)aml_length;
+ ACPI_SET16(target, aml_length);
}
acpi_rs_set_resource_length(aml_length, aml);
@@ -603,7 +606,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_COUNT_SERIAL_VEN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = item_count + info->value;
+ ACPI_SET16(destination, item_count + info->value);
aml_length = (u16)(aml_length + item_count);
acpi_rs_set_resource_length(aml_length, aml);
break;
@@ -686,7 +689,8 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
* Optional resource_source (Index and String)
*/
aml_length =
- acpi_rs_set_resource_source(aml, (acpi_rs_length)
+ acpi_rs_set_resource_source(aml,
+ (acpi_rs_length)
aml_length, source);
acpi_rs_set_resource_length(aml_length, aml);
break;
@@ -706,10 +710,12 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/*
* 8-bit encoded bitmask (DMA macro)
*/
- ACPI_SET8(destination) = (u8)
- acpi_rs_encode_bitmask(source,
- *ACPI_ADD_PTR(u8, resource,
- info->value));
+ ACPI_SET8(destination,
+ acpi_rs_encode_bitmask(source,
+ *ACPI_ADD_PTR(u8,
+ resource,
+ info->
+ value)));
break;
case ACPI_RSC_BITMASK16:
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9aa5e689b444..fe49fc43e10f 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsserial")
* acpi_rs_convert_gpio
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
ACPI_RS_SIZE(struct acpi_resource_gpio),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
@@ -75,10 +75,14 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
AML_OFFSET(gpio.flags),
0},
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
AML_OFFSET(gpio.int_flags),
3},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.wake_capable),
+ AML_OFFSET(gpio.int_flags),
+ 4},
+
{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
AML_OFFSET(gpio.int_flags),
0},
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 37d5241c0acf..a44953c6f75d 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
mask |= (0x1 << list[i]);
}
- return mask;
+ return (mask);
}
/*******************************************************************************
@@ -358,8 +358,10 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
*
* Zero the entire area of the buffer.
*/
- total_length = (u32)
- ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
+ total_length =
+ (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+ 1;
total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
@@ -675,7 +677,9 @@ acpi_rs_get_method_data(acpi_handle handle,
/* Execute the method, no parameters */
status =
- acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
+ acpi_ut_evaluate_object(ACPI_CAST_PTR
+ (struct acpi_namespace_node, handle), path,
+ ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 5aad744b5b83..15d6eaef0e28 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -423,7 +423,7 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
*
* RETURN: Status
*
- * DESCRIPTION: Walk a resource template for the specified evice to find a
+ * DESCRIPTION: Walk a resource template for the specified device to find a
* vendor-defined resource that matches the supplied UUID and
* UUID subtype. Returns a struct acpi_resource of type Vendor.
*
@@ -522,57 +522,42 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
/*******************************************************************************
*
- * FUNCTION: acpi_walk_resources
+ * FUNCTION: acpi_walk_resource_buffer
*
- * PARAMETERS: device_handle - Handle to the device object for the
- * device we are querying
- * name - Method name of the resources we want.
- * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * PARAMETERS: buffer - Formatted buffer returned by one of the
+ * various Get*Resource functions
* user_function - Called for each resource
* context - Passed to user_function
*
* RETURN: Status
*
- * DESCRIPTION: Retrieves the current or possible resource list for the
- * specified device. The user_function is called once for
- * each resource in the list.
+ * DESCRIPTION: Walks the input resource template. The user_function is called
+ * once for each resource in the list.
*
******************************************************************************/
+
acpi_status
-acpi_walk_resources(acpi_handle device_handle,
- char *name,
- acpi_walk_resource_callback user_function, void *context)
+acpi_walk_resource_buffer(struct acpi_buffer * buffer,
+ acpi_walk_resource_callback user_function,
+ void *context)
{
- acpi_status status;
- struct acpi_buffer buffer;
+ acpi_status status = AE_OK;
struct acpi_resource *resource;
struct acpi_resource *resource_end;
- ACPI_FUNCTION_TRACE(acpi_walk_resources);
+ ACPI_FUNCTION_TRACE(acpi_walk_resource_buffer);
/* Parameter validation */
- if (!device_handle || !user_function || !name ||
- (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ if (!buffer || !buffer->pointer || !user_function) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
-
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_rs_get_method_data(device_handle, name, &buffer);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Buffer now contains the resource list */
+ /* Buffer contains the resource list and length */
- resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+ resource = ACPI_CAST_PTR(struct acpi_resource, buffer->pointer);
resource_end =
- ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+ ACPI_ADD_PTR(struct acpi_resource, buffer->pointer, buffer->length);
/* Walk the resource list until the end_tag is found (or buffer end) */
@@ -606,11 +591,63 @@ acpi_walk_resources(acpi_handle device_handle,
/* Get the next resource descriptor */
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ resource = ACPI_NEXT_RESOURCE(resource);
}
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * name - Method name of the resources we want.
+ * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ * METHOD_NAME__AEI)
+ * user_function - Called for each resource
+ * context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ * specified device. The user_function is called once for
+ * each resource in the list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+ /* Parameter validation */
+
+ if (!device_handle || !user_function || !name ||
+ (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the _CRS/_PRS/_AEI resource list */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Walk the resource list and cleanup */
+
+ status = acpi_walk_resource_buffer(&buffer, user_function, context);
ACPI_FREE(buffer.pointer);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 390651860bf0..74181bf181ec 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -172,6 +172,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
* FUNCTION: acpi_tb_init_generic_address
*
* PARAMETERS: generic_address - GAS struct to be initialized
+ * space_id - ACPI Space ID for this register
* byte_width - Width of this register
* address - Address of the register
*
@@ -407,8 +408,8 @@ static void acpi_tb_convert_fadt(void)
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
*
- * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
- * offset 45, 55, 95, and the word located at offset 109, 110.
+ * The ACPI 1.0 reserved fields that will be zeroed are the bytes located
+ * at offset 45, 55, 95, and the word located at offset 109, 110.
*
* Note: The FADT revision value is unreliable. Only the length can be
* trusted.
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 77d1db29a725..e4f4f02d49e7 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index f540ae462925..e57cd38004e3 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 285e24b97382..ce3d5db39a9c 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,7 @@ acpi_status acpi_tb_initialize_facs(void)
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
&acpi_gbl_FACS));
- return status;
+ return (status);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index f5632780421d..b35a5e6d653a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acnamesp.h"
#include "actables.h"
#define _COMPONENT ACPI_TABLES
@@ -437,7 +436,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
*
******************************************************************************/
acpi_status
-acpi_install_table_handler(acpi_tbl_handler handler, void *context)
+acpi_install_table_handler(acpi_table_handler handler, void *context)
{
acpi_status status;
@@ -483,7 +482,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
* DESCRIPTION: Remove table event handler
*
******************************************************************************/
-acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
+acpi_status acpi_remove_table_handler(acpi_table_handler handler)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index a5e1e4e47098..67e046ec8f0a 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,7 +192,7 @@ static acpi_status acpi_tb_load_namespace(void)
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+ ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 28f330230f99..7c2ecfb7c2c3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 64880306133d..698b9d385516 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_UINT32(0);
+ return_VALUE(0);
}
range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
range_info = range_info->next;
}
- return_UINT32(overlap_count);
+ return_VALUE(overlap_count);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index ed29d474095e..e0ffb580f4b0 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e1d40ed26390..e0e8579deaac 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utclib.c b/drivers/acpi/acpica/utclib.c
deleted file mode 100644
index 19ea4755aa73..000000000000
--- a/drivers/acpi/acpica/utclib.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/******************************************************************************
- *
- * Module Name: cmclib - Local implementation of C library functions
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2012, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include "accommon.h"
-
-/*
- * These implementations of standard C Library routines can optionally be
- * used if a C library is not available. In general, they are less efficient
- * than an inline or assembly implementation
- */
-
-#define _COMPONENT ACPI_UTILITIES
-ACPI_MODULE_NAME("cmclib")
-
-#ifndef ACPI_USE_SYSTEM_CLIBRARY
-#define NEGATIVE 1
-#define POSITIVE 0
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_memcmp (memcmp)
- *
- * PARAMETERS: buffer1 - First Buffer
- * buffer2 - Second Buffer
- * count - Maximum # of bytes to compare
- *
- * RETURN: Index where Buffers mismatched, or 0 if Buffers matched
- *
- * DESCRIPTION: Compare two Buffers, with a maximum length
- *
- ******************************************************************************/
-int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count)
-{
-
- return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*buffer1 -
- (unsigned char)*buffer2));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_memcpy (memcpy)
- *
- * PARAMETERS: dest - Target of the copy
- * src - Source buffer to copy
- * count - Number of bytes to copy
- *
- * RETURN: Dest
- *
- * DESCRIPTION: Copy arbitrary bytes of memory
- *
- ******************************************************************************/
-
-void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count)
-{
- char *new = (char *)dest;
- char *old = (char *)src;
-
- while (count) {
- *new = *old;
- new++;
- old++;
- count--;
- }
-
- return (dest);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_memset (memset)
- *
- * PARAMETERS: dest - Buffer to set
- * value - Value to set each byte of memory
- * count - Number of bytes to set
- *
- * RETURN: Dest
- *
- * DESCRIPTION: Initialize a buffer to a known value.
- *
- ******************************************************************************/
-
-void *acpi_ut_memset(void *dest, u8 value, acpi_size count)
-{
- char *new = (char *)dest;
-
- while (count) {
- *new = (char)value;
- new++;
- count--;
- }
-
- return (dest);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strlen (strlen)
- *
- * PARAMETERS: string - Null terminated string
- *
- * RETURN: Length
- *
- * DESCRIPTION: Returns the length of the input string
- *
- ******************************************************************************/
-
-acpi_size acpi_ut_strlen(const char *string)
-{
- u32 length = 0;
-
- /* Count the string until a null is encountered */
-
- while (*string) {
- length++;
- string++;
- }
-
- return (length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strcpy (strcpy)
- *
- * PARAMETERS: dst_string - Target of the copy
- * src_string - The source string to copy
- *
- * RETURN: dst_string
- *
- * DESCRIPTION: Copy a null terminated string
- *
- ******************************************************************************/
-
-char *acpi_ut_strcpy(char *dst_string, const char *src_string)
-{
- char *string = dst_string;
-
- /* Move bytes brute force */
-
- while (*src_string) {
- *string = *src_string;
-
- string++;
- src_string++;
- }
-
- /* Null terminate */
-
- *string = 0;
- return (dst_string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strncpy (strncpy)
- *
- * PARAMETERS: dst_string - Target of the copy
- * src_string - The source string to copy
- * count - Maximum # of bytes to copy
- *
- * RETURN: dst_string
- *
- * DESCRIPTION: Copy a null terminated string, with a maximum length
- *
- ******************************************************************************/
-
-char *acpi_ut_strncpy(char *dst_string, const char *src_string, acpi_size count)
-{
- char *string = dst_string;
-
- /* Copy the string */
-
- for (string = dst_string;
- count && (count--, (*string++ = *src_string++));) {;
- }
-
- /* Pad with nulls if necessary */
-
- while (count--) {
- *string = 0;
- string++;
- }
-
- /* Return original pointer */
-
- return (dst_string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strcmp (strcmp)
- *
- * PARAMETERS: string1 - First string
- * string2 - Second string
- *
- * RETURN: Index where strings mismatched, or 0 if strings matched
- *
- * DESCRIPTION: Compare two null terminated strings
- *
- ******************************************************************************/
-
-int acpi_ut_strcmp(const char *string1, const char *string2)
-{
-
- for (; (*string1 == *string2); string2++) {
- if (!*string1++) {
- return (0);
- }
- }
-
- return ((unsigned char)*string1 - (unsigned char)*string2);
-}
-
-#ifdef ACPI_FUTURE_IMPLEMENTATION
-/* Not used at this time */
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strchr (strchr)
- *
- * PARAMETERS: string - Search string
- * ch - character to search for
- *
- * RETURN: Ptr to char or NULL if not found
- *
- * DESCRIPTION: Search a string for a character
- *
- ******************************************************************************/
-
-char *acpi_ut_strchr(const char *string, int ch)
-{
-
- for (; (*string); string++) {
- if ((*string) == (char)ch) {
- return ((char *)string);
- }
- }
-
- return (NULL);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strncmp (strncmp)
- *
- * PARAMETERS: string1 - First string
- * string2 - Second string
- * count - Maximum # of bytes to compare
- *
- * RETURN: Index where strings mismatched, or 0 if strings matched
- *
- * DESCRIPTION: Compare two null terminated strings, with a maximum length
- *
- ******************************************************************************/
-
-int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count)
-{
-
- for (; count-- && (*string1 == *string2); string2++) {
- if (!*string1++) {
- return (0);
- }
- }
-
- return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*string1 -
- (unsigned char)*string2));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strcat (Strcat)
- *
- * PARAMETERS: dst_string - Target of the copy
- * src_string - The source string to copy
- *
- * RETURN: dst_string
- *
- * DESCRIPTION: Append a null terminated string to a null terminated string
- *
- ******************************************************************************/
-
-char *acpi_ut_strcat(char *dst_string, const char *src_string)
-{
- char *string;
-
- /* Find end of the destination string */
-
- for (string = dst_string; *string++;) {;
- }
-
- /* Concatenate the string */
-
- for (--string; (*string++ = *src_string++);) {;
- }
-
- return (dst_string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strncat (strncat)
- *
- * PARAMETERS: dst_string - Target of the copy
- * src_string - The source string to copy
- * count - Maximum # of bytes to copy
- *
- * RETURN: dst_string
- *
- * DESCRIPTION: Append a null terminated string to a null terminated string,
- * with a maximum count.
- *
- ******************************************************************************/
-
-char *acpi_ut_strncat(char *dst_string, const char *src_string, acpi_size count)
-{
- char *string;
-
- if (count) {
-
- /* Find end of the destination string */
-
- for (string = dst_string; *string++;) {;
- }
-
- /* Concatenate the string */
-
- for (--string; (*string++ = *src_string++) && --count;) {;
- }
-
- /* Null terminate if necessary */
-
- if (!count) {
- *string = 0;
- }
- }
-
- return (dst_string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strstr (strstr)
- *
- * PARAMETERS: string1 - Target string
- * string2 - Substring to search for
- *
- * RETURN: Where substring match starts, Null if no match found
- *
- * DESCRIPTION: Checks if String2 occurs in String1. This is not really a
- * full implementation of strstr, only sufficient for command
- * matching
- *
- ******************************************************************************/
-
-char *acpi_ut_strstr(char *string1, char *string2)
-{
- char *string;
-
- if (acpi_ut_strlen(string2) > acpi_ut_strlen(string1)) {
- return (NULL);
- }
-
- /* Walk entire string, comparing the letters */
-
- for (string = string1; *string2;) {
- if (*string2 != *string) {
- return (NULL);
- }
-
- string2++;
- string++;
- }
-
- return (string1);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strtoul (strtoul)
- *
- * PARAMETERS: string - Null terminated string
- * terminater - Where a pointer to the terminating byte is
- * returned
- * base - Radix of the string
- *
- * RETURN: Converted value
- *
- * DESCRIPTION: Convert a string into a 32-bit unsigned value.
- * Note: use acpi_ut_strtoul64 for 64-bit integers.
- *
- ******************************************************************************/
-
-u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base)
-{
- u32 converted = 0;
- u32 index;
- u32 sign;
- const char *string_start;
- u32 return_value = 0;
- acpi_status status = AE_OK;
-
- /*
- * Save the value of the pointer to the buffer's first
- * character, save the current errno value, and then
- * skip over any white space in the buffer:
- */
- string_start = string;
- while (ACPI_IS_SPACE(*string) || *string == '\t') {
- ++string;
- }
-
- /*
- * The buffer may contain an optional plus or minus sign.
- * If it does, then skip over it but remember what is was:
- */
- if (*string == '-') {
- sign = NEGATIVE;
- ++string;
- } else if (*string == '+') {
- ++string;
- sign = POSITIVE;
- } else {
- sign = POSITIVE;
- }
-
- /*
- * If the input parameter Base is zero, then we need to
- * determine if it is octal, decimal, or hexadecimal:
- */
- if (base == 0) {
- if (*string == '0') {
- if (acpi_ut_to_lower(*(++string)) == 'x') {
- base = 16;
- ++string;
- } else {
- base = 8;
- }
- } else {
- base = 10;
- }
- } else if (base < 2 || base > 36) {
- /*
- * The specified Base parameter is not in the domain of
- * this function:
- */
- goto done;
- }
-
- /*
- * For octal and hexadecimal bases, skip over the leading
- * 0 or 0x, if they are present.
- */
- if (base == 8 && *string == '0') {
- string++;
- }
-
- if (base == 16 &&
- *string == '0' && acpi_ut_to_lower(*(++string)) == 'x') {
- string++;
- }
-
- /*
- * Main loop: convert the string to an unsigned long:
- */
- while (*string) {
- if (ACPI_IS_DIGIT(*string)) {
- index = (u32)((u8)*string - '0');
- } else {
- index = (u32)acpi_ut_to_upper(*string);
- if (ACPI_IS_UPPER(index)) {
- index = index - 'A' + 10;
- } else {
- goto done;
- }
- }
-
- if (index >= base) {
- goto done;
- }
-
- /*
- * Check to see if value is out of range:
- */
-
- if (return_value > ((ACPI_UINT32_MAX - (u32)index) / (u32)base)) {
- status = AE_ERROR;
- return_value = 0; /* reset */
- } else {
- return_value *= base;
- return_value += index;
- converted = 1;
- }
-
- ++string;
- }
-
- done:
- /*
- * If appropriate, update the caller's pointer to the next
- * unconverted character in the buffer.
- */
- if (terminator) {
- if (converted == 0 && return_value == 0 && string != NULL) {
- *terminator = (char *)string_start;
- } else {
- *terminator = (char *)string;
- }
- }
-
- if (status == AE_ERROR) {
- return_value = ACPI_UINT32_MAX;
- }
-
- /*
- * If a minus sign was present, then "the conversion is negated":
- */
- if (sign == NEGATIVE) {
- return_value = (ACPI_UINT32_MAX - return_value) + 1;
- }
-
- return (return_value);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_to_upper (TOUPPER)
- *
- * PARAMETERS: c - Character to convert
- *
- * RETURN: Converted character as an int
- *
- * DESCRIPTION: Convert character to uppercase
- *
- ******************************************************************************/
-
-int acpi_ut_to_upper(int c)
-{
-
- return (ACPI_IS_LOWER(c) ? ((c) - 0x20) : (c));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_to_lower (TOLOWER)
- *
- * PARAMETERS: c - Character to convert
- *
- * RETURN: Converted character as an int
- *
- * DESCRIPTION: Convert character to lowercase
- *
- ******************************************************************************/
-
-int acpi_ut_to_lower(int c)
-{
-
- return (ACPI_IS_UPPER(c) ? ((c) + 0x20) : (c));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: is* functions
- *
- * DESCRIPTION: is* functions use the ctype table below
- *
- ******************************************************************************/
-
-const u8 _acpi_ctype[257] = {
- _ACPI_CN, /* 0x00 0 NUL */
- _ACPI_CN, /* 0x01 1 SOH */
- _ACPI_CN, /* 0x02 2 STX */
- _ACPI_CN, /* 0x03 3 ETX */
- _ACPI_CN, /* 0x04 4 EOT */
- _ACPI_CN, /* 0x05 5 ENQ */
- _ACPI_CN, /* 0x06 6 ACK */
- _ACPI_CN, /* 0x07 7 BEL */
- _ACPI_CN, /* 0x08 8 BS */
- _ACPI_CN | _ACPI_SP, /* 0x09 9 TAB */
- _ACPI_CN | _ACPI_SP, /* 0x0A 10 LF */
- _ACPI_CN | _ACPI_SP, /* 0x0B 11 VT */
- _ACPI_CN | _ACPI_SP, /* 0x0C 12 FF */
- _ACPI_CN | _ACPI_SP, /* 0x0D 13 CR */
- _ACPI_CN, /* 0x0E 14 SO */
- _ACPI_CN, /* 0x0F 15 SI */
- _ACPI_CN, /* 0x10 16 DLE */
- _ACPI_CN, /* 0x11 17 DC1 */
- _ACPI_CN, /* 0x12 18 DC2 */
- _ACPI_CN, /* 0x13 19 DC3 */
- _ACPI_CN, /* 0x14 20 DC4 */
- _ACPI_CN, /* 0x15 21 NAK */
- _ACPI_CN, /* 0x16 22 SYN */
- _ACPI_CN, /* 0x17 23 ETB */
- _ACPI_CN, /* 0x18 24 CAN */
- _ACPI_CN, /* 0x19 25 EM */
- _ACPI_CN, /* 0x1A 26 SUB */
- _ACPI_CN, /* 0x1B 27 ESC */
- _ACPI_CN, /* 0x1C 28 FS */
- _ACPI_CN, /* 0x1D 29 GS */
- _ACPI_CN, /* 0x1E 30 RS */
- _ACPI_CN, /* 0x1F 31 US */
- _ACPI_XS | _ACPI_SP, /* 0x20 32 ' ' */
- _ACPI_PU, /* 0x21 33 '!' */
- _ACPI_PU, /* 0x22 34 '"' */
- _ACPI_PU, /* 0x23 35 '#' */
- _ACPI_PU, /* 0x24 36 '$' */
- _ACPI_PU, /* 0x25 37 '%' */
- _ACPI_PU, /* 0x26 38 '&' */
- _ACPI_PU, /* 0x27 39 ''' */
- _ACPI_PU, /* 0x28 40 '(' */
- _ACPI_PU, /* 0x29 41 ')' */
- _ACPI_PU, /* 0x2A 42 '*' */
- _ACPI_PU, /* 0x2B 43 '+' */
- _ACPI_PU, /* 0x2C 44 ',' */
- _ACPI_PU, /* 0x2D 45 '-' */
- _ACPI_PU, /* 0x2E 46 '.' */
- _ACPI_PU, /* 0x2F 47 '/' */
- _ACPI_XD | _ACPI_DI, /* 0x30 48 '0' */
- _ACPI_XD | _ACPI_DI, /* 0x31 49 '1' */
- _ACPI_XD | _ACPI_DI, /* 0x32 50 '2' */
- _ACPI_XD | _ACPI_DI, /* 0x33 51 '3' */
- _ACPI_XD | _ACPI_DI, /* 0x34 52 '4' */
- _ACPI_XD | _ACPI_DI, /* 0x35 53 '5' */
- _ACPI_XD | _ACPI_DI, /* 0x36 54 '6' */
- _ACPI_XD | _ACPI_DI, /* 0x37 55 '7' */
- _ACPI_XD | _ACPI_DI, /* 0x38 56 '8' */
- _ACPI_XD | _ACPI_DI, /* 0x39 57 '9' */
- _ACPI_PU, /* 0x3A 58 ':' */
- _ACPI_PU, /* 0x3B 59 ';' */
- _ACPI_PU, /* 0x3C 60 '<' */
- _ACPI_PU, /* 0x3D 61 '=' */
- _ACPI_PU, /* 0x3E 62 '>' */
- _ACPI_PU, /* 0x3F 63 '?' */
- _ACPI_PU, /* 0x40 64 '@' */
- _ACPI_XD | _ACPI_UP, /* 0x41 65 'A' */
- _ACPI_XD | _ACPI_UP, /* 0x42 66 'B' */
- _ACPI_XD | _ACPI_UP, /* 0x43 67 'C' */
- _ACPI_XD | _ACPI_UP, /* 0x44 68 'D' */
- _ACPI_XD | _ACPI_UP, /* 0x45 69 'E' */
- _ACPI_XD | _ACPI_UP, /* 0x46 70 'F' */
- _ACPI_UP, /* 0x47 71 'G' */
- _ACPI_UP, /* 0x48 72 'H' */
- _ACPI_UP, /* 0x49 73 'I' */
- _ACPI_UP, /* 0x4A 74 'J' */
- _ACPI_UP, /* 0x4B 75 'K' */
- _ACPI_UP, /* 0x4C 76 'L' */
- _ACPI_UP, /* 0x4D 77 'M' */
- _ACPI_UP, /* 0x4E 78 'N' */
- _ACPI_UP, /* 0x4F 79 'O' */
- _ACPI_UP, /* 0x50 80 'P' */
- _ACPI_UP, /* 0x51 81 'Q' */
- _ACPI_UP, /* 0x52 82 'R' */
- _ACPI_UP, /* 0x53 83 'S' */
- _ACPI_UP, /* 0x54 84 'T' */
- _ACPI_UP, /* 0x55 85 'U' */
- _ACPI_UP, /* 0x56 86 'V' */
- _ACPI_UP, /* 0x57 87 'W' */
- _ACPI_UP, /* 0x58 88 'X' */
- _ACPI_UP, /* 0x59 89 'Y' */
- _ACPI_UP, /* 0x5A 90 'Z' */
- _ACPI_PU, /* 0x5B 91 '[' */
- _ACPI_PU, /* 0x5C 92 '\' */
- _ACPI_PU, /* 0x5D 93 ']' */
- _ACPI_PU, /* 0x5E 94 '^' */
- _ACPI_PU, /* 0x5F 95 '_' */
- _ACPI_PU, /* 0x60 96 '`' */
- _ACPI_XD | _ACPI_LO, /* 0x61 97 'a' */
- _ACPI_XD | _ACPI_LO, /* 0x62 98 'b' */
- _ACPI_XD | _ACPI_LO, /* 0x63 99 'c' */
- _ACPI_XD | _ACPI_LO, /* 0x64 100 'd' */
- _ACPI_XD | _ACPI_LO, /* 0x65 101 'e' */
- _ACPI_XD | _ACPI_LO, /* 0x66 102 'f' */
- _ACPI_LO, /* 0x67 103 'g' */
- _ACPI_LO, /* 0x68 104 'h' */
- _ACPI_LO, /* 0x69 105 'i' */
- _ACPI_LO, /* 0x6A 106 'j' */
- _ACPI_LO, /* 0x6B 107 'k' */
- _ACPI_LO, /* 0x6C 108 'l' */
- _ACPI_LO, /* 0x6D 109 'm' */
- _ACPI_LO, /* 0x6E 110 'n' */
- _ACPI_LO, /* 0x6F 111 'o' */
- _ACPI_LO, /* 0x70 112 'p' */
- _ACPI_LO, /* 0x71 113 'q' */
- _ACPI_LO, /* 0x72 114 'r' */
- _ACPI_LO, /* 0x73 115 's' */
- _ACPI_LO, /* 0x74 116 't' */
- _ACPI_LO, /* 0x75 117 'u' */
- _ACPI_LO, /* 0x76 118 'v' */
- _ACPI_LO, /* 0x77 119 'w' */
- _ACPI_LO, /* 0x78 120 'x' */
- _ACPI_LO, /* 0x79 121 'y' */
- _ACPI_LO, /* 0x7A 122 'z' */
- _ACPI_PU, /* 0x7B 123 '{' */
- _ACPI_PU, /* 0x7C 124 '|' */
- _ACPI_PU, /* 0x7D 125 '}' */
- _ACPI_PU, /* 0x7E 126 '~' */
- _ACPI_CN, /* 0x7F 127 DEL */
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x80 to 0x8F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x90 to 0x9F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 to 0xAF */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xB0 to 0xBF */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xC0 to 0xCF */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 to 0xDF */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xE0 to 0xEF */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xF0 to 0xFF */
- 0 /* 0x100 */
-};
-
-#endif /* ACPI_USE_SYSTEM_CLIBRARY */
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 294692ae76e9..e4c9291fc0a3 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -785,7 +785,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
break;
@@ -795,7 +795,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
&dest_desc->event.
os_semaphore);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5d95166245ae..c57d9cc07ba9 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -166,11 +166,9 @@ acpi_debug_print(u32 requested_debug_level,
acpi_thread_id thread_id;
va_list args;
- /*
- * Stay silent if the debug level or component ID is disabled
- */
- if (!(requested_debug_level & acpi_dbg_level) ||
- !(component_id & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
return;
}
@@ -236,8 +234,9 @@ acpi_debug_print_raw(u32 requested_debug_level,
{
va_list args;
- if (!(requested_debug_level & acpi_dbg_level) ||
- !(component_id & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
return;
}
@@ -272,9 +271,13 @@ acpi_ut_trace(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s\n", acpi_gbl_fn_entry_str);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_entry_str);
+ }
}
ACPI_EXPORT_SYMBOL(acpi_ut_trace)
@@ -304,9 +307,14 @@ acpi_ut_trace_ptr(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %p\n", acpi_gbl_fn_entry_str, pointer);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_entry_str,
+ pointer);
+ }
}
/*******************************************************************************
@@ -335,9 +343,14 @@ acpi_ut_trace_str(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %s\n", acpi_gbl_fn_entry_str, string);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %s\n", acpi_gbl_fn_entry_str,
+ string);
+ }
}
/*******************************************************************************
@@ -366,9 +379,14 @@ acpi_ut_trace_u32(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %08X\n", acpi_gbl_fn_entry_str, integer);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %08X\n",
+ acpi_gbl_fn_entry_str, integer);
+ }
}
/*******************************************************************************
@@ -393,9 +411,13 @@ acpi_ut_exit(u32 line_number,
const char *module_name, u32 component_id)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s\n", acpi_gbl_fn_exit_str);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_exit_str);
+ }
acpi_gbl_nesting_level--;
}
@@ -425,17 +447,23 @@ acpi_ut_status_exit(u32 line_number,
u32 component_id, acpi_status status)
{
- if (ACPI_SUCCESS(status)) {
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name,
- component_id, "%s %s\n", acpi_gbl_fn_exit_str,
- acpi_format_exception(status));
- } else {
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name,
- component_id, "%s ****Exception****: %s\n",
- acpi_gbl_fn_exit_str,
- acpi_format_exception(status));
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ if (ACPI_SUCCESS(status)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name,
+ module_name, component_id, "%s %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ } else {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name,
+ module_name, component_id,
+ "%s ****Exception****: %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ }
}
acpi_gbl_nesting_level--;
@@ -465,10 +493,15 @@ acpi_ut_value_exit(u32 line_number,
const char *module_name, u32 component_id, u64 value)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str,
- ACPI_FORMAT_UINT64(value));
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %8.8X%8.8X\n",
+ acpi_gbl_fn_exit_str,
+ ACPI_FORMAT_UINT64(value));
+ }
acpi_gbl_nesting_level--;
}
@@ -497,9 +530,14 @@ acpi_ut_ptr_exit(u32 line_number,
const char *module_name, u32 component_id, u8 *ptr)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %p\n", acpi_gbl_fn_exit_str, ptr);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_exit_str,
+ ptr);
+ }
acpi_gbl_nesting_level--;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 60a158472d82..11e2e02e1618 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 798105443d0f..2541de420249 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
{
union acpi_operand_object **internal_obj;
- ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
+ ACPI_FUNCTION_ENTRY();
/* Walk the null-terminated internal list */
@@ -351,7 +351,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
/* Free the combined parameter pointer list and object array */
ACPI_FREE(obj_list);
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -484,7 +484,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
union acpi_generic_state *state;
u32 i;
- ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
+ ACPI_FUNCTION_NAME(ut_update_object_reference);
while (object) {
@@ -493,7 +493,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Object %p is NS handle\n", object));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/*
@@ -530,18 +530,42 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
*/
for (i = 0; i < object->package.count; i++) {
/*
- * Push each element onto the stack for later processing.
- * Note: There can be null elements within the package,
- * these are simply ignored
+ * Null package elements are legal and can be simply
+ * ignored.
*/
- status =
- acpi_ut_create_update_state_and_push
- (object->package.elements[i], action,
- &state_list);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
+ next_object = object->package.elements[i];
+ if (!next_object) {
+ continue;
+ }
+
+ switch (next_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ /*
+ * For these very simple sub-objects, we can just
+ * update the reference count here and continue.
+ * Greatly increases performance of this operation.
+ */
+ acpi_ut_update_ref_count(next_object,
+ action);
+ break;
+
+ default:
+ /*
+ * For complex sub-objects, push them onto the stack
+ * for later processing (this eliminates recursion.)
+ */
+ status =
+ acpi_ut_create_update_state_and_push
+ (next_object, action, &state_list);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ break;
}
}
+ next_object = NULL;
break;
case ACPI_TYPE_BUFFER_FIELD:
@@ -619,7 +643,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
}
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
error_exit:
@@ -633,7 +657,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
acpi_ut_delete_generic_state(state);
}
- return_ACPI_STATUS(status);
+ return (status);
}
/*******************************************************************************
@@ -652,12 +676,12 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
void acpi_ut_add_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
+ ACPI_FUNCTION_NAME(ut_add_reference);
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
- return_VOID;
+ return;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -667,7 +691,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
/* Increment the reference count */
(void)acpi_ut_update_object_reference(object, REF_INCREMENT);
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -685,7 +709,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
void acpi_ut_remove_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
+ ACPI_FUNCTION_NAME(ut_remove_reference);
/*
* Allow a NULL pointer to be passed in, just ignore it. This saves
@@ -694,13 +718,13 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
*/
if (!object ||
(ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
- return_VOID;
+ return;
}
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
- return_VOID;
+ return;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -713,5 +737,5 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
* of all subobjects!)
*/
(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
- return_VOID;
+ return;
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index a9c65fbea5f4..c3f3a7e7bdc7 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,7 +68,7 @@ ACPI_MODULE_NAME("uteval")
******************************************************************************/
acpi_status
-acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+acpi_ut_evaluate_object(struct acpi_namespace_node * prefix_node,
char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 23b98945f6b7..a0ab7c02e87c 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ed1893155f8b..ffecf4b4f0dd 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,11 +293,11 @@ acpi_status acpi_ut_init_globals(void)
/* GPE support */
+ acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_gpe_xrupt_list_head = NULL;
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
acpi_current_gpe_count = 0;
- acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_global_event_handler = NULL;
@@ -357,17 +357,24 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_root_node_struct.peer = NULL;
acpi_gbl_root_node_struct.object = NULL;
+#ifdef ACPI_DISASSEMBLER
+ acpi_gbl_external_list = NULL;
+#endif
+
#ifdef ACPI_DEBUG_OUTPUT
acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
#endif
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
acpi_gbl_display_final_mem_stats = FALSE;
+ acpi_gbl_disable_mem_tracking = FALSE;
#endif
return_ACPI_STATUS(AE_OK);
}
+/* Public globals */
+
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 774c3aefbf5d..43a170a74a61 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 246798e4c938..c5d1ac44c07d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b1eb7f17e110..5c26ad420344 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,11 +66,11 @@ acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock)
lock->num_readers = 0;
status = acpi_os_create_mutex(&lock->reader_mutex);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
status = acpi_os_create_mutex(&lock->writer_mutex);
- return status;
+ return (status);
}
void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
@@ -108,7 +108,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
/* Acquire the write lock only for the first reader */
@@ -121,7 +121,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
}
acpi_os_release_mutex(lock->reader_mutex);
- return status;
+ return (status);
}
acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
@@ -130,7 +130,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
/* Release the write lock only for the very last reader */
@@ -141,7 +141,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
}
acpi_os_release_mutex(lock->reader_mutex);
- return status;
+ return (status);
}
/*******************************************************************************
@@ -165,7 +165,7 @@ acpi_status acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock)
acpi_status status;
status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER);
- return status;
+ return (status);
}
void acpi_ut_release_write_lock(struct acpi_rw_lock *lock)
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49563674833a..909fe66e1934 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 9286a69eb9aa..785fdd07ae56 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,36 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
-#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
-/*******************************************************************************
- *
- * FUNCTION: ut_convert_backslashes
- *
- * PARAMETERS: pathname - File pathname string to be converted
- *
- * RETURN: Modifies the input Pathname
- *
- * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
- * the entire input file pathname string.
- *
- ******************************************************************************/
-void ut_convert_backslashes(char *pathname)
-{
-
- if (!pathname) {
- return;
- }
-
- while (*pathname) {
- if (*pathname == '\\') {
- *pathname = '/';
- }
-
- pathname++;
- }
-}
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_pci_root_bridge
@@ -89,7 +59,6 @@ void ut_convert_backslashes(char *pathname)
* DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
*
******************************************************************************/
-
u8 acpi_ut_is_pci_root_bridge(char *id)
{
@@ -136,362 +105,6 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_allocate_owner_id
- *
- * PARAMETERS: owner_id - Where the new owner ID is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
- * track objects created by the table or method, to be deleted
- * when the method exits or the table is unloaded.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
-{
- u32 i;
- u32 j;
- u32 k;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
-
- /* Guard against multiple allocations of ID to the same location */
-
- if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
- *owner_id));
- return_ACPI_STATUS(AE_ALREADY_EXISTS);
- }
-
- /* Mutex for the global ID mask */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Find a free owner ID, cycle through all possible IDs on repeated
- * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
- * to be scanned twice.
- */
- for (i = 0, j = acpi_gbl_last_owner_id_index;
- i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
- if (j >= ACPI_NUM_OWNERID_MASKS) {
- j = 0; /* Wraparound to start of mask array */
- }
-
- for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
- if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
-
- /* There are no free IDs in this mask */
-
- break;
- }
-
- if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
- /*
- * Found a free ID. The actual ID is the bit index plus one,
- * making zero an invalid Owner ID. Save this as the last ID
- * allocated and update the global ID mask.
- */
- acpi_gbl_owner_id_mask[j] |= (1 << k);
-
- acpi_gbl_last_owner_id_index = (u8)j;
- acpi_gbl_next_owner_id_offset = (u8)(k + 1);
-
- /*
- * Construct encoded ID from the index and bit position
- *
- * Note: Last [j].k (bit 255) is never used and is marked
- * permanently allocated (prevents +1 overflow)
- */
- *owner_id =
- (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
-
- ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Allocated OwnerId: %2.2X\n",
- (unsigned int)*owner_id));
- goto exit;
- }
- }
-
- acpi_gbl_next_owner_id_offset = 0;
- }
-
- /*
- * All owner_ids have been allocated. This typically should
- * not happen since the IDs are reused after deallocation. The IDs are
- * allocated upon table load (one per table) and method execution, and
- * they are released when a table is unloaded or a method completes
- * execution.
- *
- * If this error happens, there may be very deep nesting of invoked control
- * methods, or there may be a bug where the IDs are not released.
- */
- status = AE_OWNER_ID_LIMIT;
- ACPI_ERROR((AE_INFO,
- "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
-
- exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_release_owner_id
- *
- * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
- *
- * RETURN: None. No error is returned because we are either exiting a
- * control method or unloading a table. Either way, we would
- * ignore any error anyway.
- *
- * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
- *
- ******************************************************************************/
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
-{
- acpi_owner_id owner_id = *owner_id_ptr;
- acpi_status status;
- u32 index;
- u32 bit;
-
- ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
-
- /* Always clear the input owner_id (zero is an invalid ID) */
-
- *owner_id_ptr = 0;
-
- /* Zero is not a valid owner_ID */
-
- if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
- return_VOID;
- }
-
- /* Mutex for the global ID mask */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
- if (ACPI_FAILURE(status)) {
- return_VOID;
- }
-
- /* Normalize the ID to zero */
-
- owner_id--;
-
- /* Decode ID to index/offset pair */
-
- index = ACPI_DIV_32(owner_id);
- bit = 1 << ACPI_MOD_32(owner_id);
-
- /* Free the owner ID only if it is valid */
-
- if (acpi_gbl_owner_id_mask[index] & bit) {
- acpi_gbl_owner_id_mask[index] ^= bit;
- } else {
- ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: 0x%2.2X",
- owner_id + 1));
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strupr (strupr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, uppercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOUPPER(*string);
- }
-
- return;
-}
-
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strlwr (strlwr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to lowercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strlwr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, lowercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOLOWER(*string);
- }
-
- return;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ut_stricmp
- *
- * PARAMETERS: string1 - first string to compare
- * string2 - second string to compare
- *
- * RETURN: int that signifies string relationship. Zero means strings
- * are equal.
- *
- * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
- * strings with no case sensitivity)
- *
- ******************************************************************************/
-
-int acpi_ut_stricmp(char *string1, char *string2)
-{
- int c1;
- int c2;
-
- do {
- c1 = tolower((int)*string1);
- c2 = tolower((int)*string2);
-
- string1++;
- string2++;
- }
- while ((c1 == c2) && (c1));
-
- return (c1 - c2);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_print_string
- *
- * PARAMETERS: string - Null terminated ASCII string
- * max_length - Maximum output length
- *
- * RETURN: None
- *
- * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
- * sequences.
- *
- ******************************************************************************/
-
-void acpi_ut_print_string(char *string, u8 max_length)
-{
- u32 i;
-
- if (!string) {
- acpi_os_printf("<\"NULL STRING PTR\">");
- return;
- }
-
- acpi_os_printf("\"");
- for (i = 0; string[i] && (i < max_length); i++) {
-
- /* Escape sequences */
-
- switch (string[i]) {
- case 0x07:
- acpi_os_printf("\\a"); /* BELL */
- break;
-
- case 0x08:
- acpi_os_printf("\\b"); /* BACKSPACE */
- break;
-
- case 0x0C:
- acpi_os_printf("\\f"); /* FORMFEED */
- break;
-
- case 0x0A:
- acpi_os_printf("\\n"); /* LINEFEED */
- break;
-
- case 0x0D:
- acpi_os_printf("\\r"); /* CARRIAGE RETURN */
- break;
-
- case 0x09:
- acpi_os_printf("\\t"); /* HORIZONTAL TAB */
- break;
-
- case 0x0B:
- acpi_os_printf("\\v"); /* VERTICAL TAB */
- break;
-
- case '\'': /* Single Quote */
- case '\"': /* Double Quote */
- case '\\': /* Backslash */
- acpi_os_printf("\\%c", (int)string[i]);
- break;
-
- default:
-
- /* Check for printable character or hex escape */
-
- if (ACPI_IS_PRINT(string[i])) {
- /* This is a normal character */
-
- acpi_os_printf("%c", (int)string[i]);
- } else {
- /* All others will be Hex escapes */
-
- acpi_os_printf("\\x%2.2X", (s32) string[i]);
- }
- break;
- }
- }
- acpi_os_printf("\"");
-
- if (i == max_length && string[i]) {
- acpi_os_printf("...");
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_dword_byte_swap
*
* PARAMETERS: value - Value to be converted
@@ -559,379 +172,6 @@ void acpi_ut_set_integer_width(u8 revision)
}
}
-#ifdef ACPI_DEBUG_OUTPUT
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_display_init_pathname
- *
- * PARAMETERS: type - Object type of the node
- * obj_handle - Handle whose pathname will be displayed
- * path - Additional path string to be appended.
- * (NULL if no extra path)
- *
- * RETURN: acpi_status
- *
- * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
- *
- ******************************************************************************/
-
-void
-acpi_ut_display_init_pathname(u8 type,
- struct acpi_namespace_node *obj_handle,
- char *path)
-{
- acpi_status status;
- struct acpi_buffer buffer;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Only print the path if the appropriate debug level is enabled */
-
- if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
- return;
- }
-
- /* Get the full pathname to the node */
-
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
- if (ACPI_FAILURE(status)) {
- return;
- }
-
- /* Print what we're doing */
-
- switch (type) {
- case ACPI_TYPE_METHOD:
- acpi_os_printf("Executing ");
- break;
-
- default:
- acpi_os_printf("Initializing ");
- break;
- }
-
- /* Print the object type and pathname */
-
- acpi_os_printf("%-12s %s",
- acpi_ut_get_type_name(type), (char *)buffer.pointer);
-
- /* Extra path is used to append names like _STA, _INI, etc. */
-
- if (path) {
- acpi_os_printf(".%s", path);
- }
- acpi_os_printf("\n");
-
- ACPI_FREE(buffer.pointer);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_char
- *
- * PARAMETERS: char - The character to be examined
- * position - Byte position (0-3)
- *
- * RETURN: TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- * We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
- if (!((character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9') || (character == '_'))) {
-
- /* Allow a '!' in the last position */
-
- if (character == '!' && position == 3) {
- return (TRUE);
- }
-
- return (FALSE);
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_name
- *
- * PARAMETERS: name - The name to be examined
- *
- * RETURN: TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(u32 name)
-{
- u32 i;
-
- ACPI_FUNCTION_ENTRY();
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char
- ((ACPI_CAST_PTR(char, &name))[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_repair_name
- *
- * PARAMETERS: name - The ACPI name to be repaired
- *
- * RETURN: Repaired version of the name
- *
- * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
- * return the new name. NOTE: the Name parameter must reside in
- * read/write memory, cannot be a const.
- *
- * An ACPI Name must consist of valid ACPI characters. We will repair the name
- * if necessary because we don't want to abort because of this, but we want
- * all namespace names to be printable. A warning message is appropriate.
- *
- * This issue came up because there are in fact machines that exhibit
- * this problem, and we want to be able to enable ACPI support for them,
- * even though there are a few bad names.
- *
- ******************************************************************************/
-
-void acpi_ut_repair_name(char *name)
-{
- u32 i;
- u8 found_bad_char = FALSE;
- u32 original_name;
-
- ACPI_FUNCTION_NAME(ut_repair_name);
-
- ACPI_MOVE_NAME(&original_name, name);
-
- /* Check each character in the name */
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ut_valid_acpi_char(name[i], i)) {
- continue;
- }
-
- /*
- * Replace a bad character with something printable, yet technically
- * still invalid. This prevents any collisions with existing "good"
- * names in the namespace.
- */
- name[i] = '*';
- found_bad_char = TRUE;
- }
-
- if (found_bad_char) {
-
- /* Report warning only if in strict mode or debug mode */
-
- if (!acpi_gbl_enable_interpreter_slack) {
- ACPI_WARNING((AE_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- name));
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- name));
- }
- }
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strtoul64
- *
- * PARAMETERS: string - Null terminated string
- * base - Radix of the string: 16 or ACPI_ANY_BASE;
- * ACPI_ANY_BASE means 'in behalf of to_integer'
- * ret_integer - Where the converted integer is returned
- *
- * RETURN: Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the current mode
- * of the interpreter.
- * NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
-{
- u32 this_digit = 0;
- u64 return_value = 0;
- u64 quotient;
- u64 dividend;
- u32 to_integer_op = (base == ACPI_ANY_BASE);
- u32 mode32 = (acpi_gbl_integer_byte_width == 4);
- u8 valid_digits = 0;
- u8 sign_of0x = 0;
- u8 term = 0;
-
- ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
- switch (base) {
- case ACPI_ANY_BASE:
- case 16:
- break;
-
- default:
- /* Invalid Base */
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- if (!string) {
- goto error_exit;
- }
-
- /* Skip over any white space in the buffer */
-
- while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
- string++;
- }
-
- if (to_integer_op) {
- /*
- * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
- * We need to determine if it is decimal or hexadecimal.
- */
- if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
- sign_of0x = 1;
- base = 16;
-
- /* Skip over the leading '0x' */
- string += 2;
- } else {
- base = 10;
- }
- }
-
- /* Any string left? Check that '0x' is not followed by white space. */
-
- if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
- if (to_integer_op) {
- goto error_exit;
- } else {
- goto all_done;
- }
- }
-
- /*
- * Perform a 32-bit or 64-bit conversion, depending upon the current
- * execution mode of the interpreter
- */
- dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
- /* Main loop: convert the string to a 32- or 64-bit integer */
-
- while (*string) {
- if (ACPI_IS_DIGIT(*string)) {
-
- /* Convert ASCII 0-9 to Decimal value */
-
- this_digit = ((u8)*string) - '0';
- } else if (base == 10) {
-
- /* Digit is out of range; possible in to_integer case only */
-
- term = 1;
- } else {
- this_digit = (u8)ACPI_TOUPPER(*string);
- if (ACPI_IS_XDIGIT((char)this_digit)) {
-
- /* Convert ASCII Hex char to value */
-
- this_digit = this_digit - 'A' + 10;
- } else {
- term = 1;
- }
- }
-
- if (term) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- } else if ((valid_digits == 0) && (this_digit == 0)
- && !sign_of0x) {
-
- /* Skip zeros */
- string++;
- continue;
- }
-
- valid_digits++;
-
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
- /*
- * This is to_integer operation case.
- * No any restrictions for string-to-integer conversion,
- * see ACPI spec.
- */
- goto error_exit;
- }
-
- /* Divide the digit into the correct position */
-
- (void)acpi_ut_short_divide((dividend - (u64)this_digit),
- base, &quotient, NULL);
-
- if (return_value > quotient) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- }
-
- return_value *= base;
- return_value += this_digit;
- string++;
- }
-
- /* All done, normal exit */
-
- all_done:
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(return_value)));
-
- *ret_integer = return_value;
- return_ACPI_STATUS(AE_OK);
-
- error_exit:
- /* Base was set/validated above */
-
- if (base == 10) {
- return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
- } else {
- return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
- }
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_update_state_and_push
@@ -1097,3 +337,71 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_display_init_pathname
+ *
+ * PARAMETERS: type - Object type of the node
+ * obj_handle - Handle whose pathname will be displayed
+ * path - Additional path string to be appended.
+ * (NULL if no extra path)
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Only print the path if the appropriate debug level is enabled */
+
+ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+ return;
+ }
+
+ /* Get the full pathname to the node */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Print what we're doing */
+
+ switch (type) {
+ case ACPI_TYPE_METHOD:
+ acpi_os_printf("Executing ");
+ break;
+
+ default:
+ acpi_os_printf("Initializing ");
+ break;
+ }
+
+ /* Print the object type and pathname */
+
+ acpi_os_printf("%-12s %s",
+ acpi_ut_get_type_name(type), (char *)buffer.pointer);
+
+ /* Extra path is used to append names like _STA, _INI, etc. */
+
+ if (path) {
+ acpi_os_printf(".%s", path);
+ }
+ acpi_os_printf("\n");
+
+ ACPI_FREE(buffer.pointer);
+}
+#endif
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 5ccf57c0d87e..22feb99b8e35 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 5c52ca78f6fa..1099f5c069f8 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
- /* Object must be a union acpi_operand_object */
+ /* Object must be of type union acpi_operand_object */
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 676285d6116d..36a7d361d7cb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
new file mode 100644
index 000000000000..835340b26d37
--- /dev/null
+++ b/drivers/acpi/acpica/utownerid.c
@@ -0,0 +1,218 @@
+/*******************************************************************************
+ *
+ * Module Name: utownerid - Support for Table/Method Owner IDs
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utownerid")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_owner_id
+ *
+ * PARAMETERS: owner_id - Where the new owner ID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
+ * track objects created by the table or method, to be deleted
+ * when the method exits or the table is unloaded.
+ *
+ ******************************************************************************/
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+{
+ u32 i;
+ u32 j;
+ u32 k;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
+
+ /* Guard against multiple allocations of ID to the same location */
+
+ if (*owner_id) {
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
+ *owner_id));
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Find a free owner ID, cycle through all possible IDs on repeated
+ * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
+ * to be scanned twice.
+ */
+ for (i = 0, j = acpi_gbl_last_owner_id_index;
+ i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
+ if (j >= ACPI_NUM_OWNERID_MASKS) {
+ j = 0; /* Wraparound to start of mask array */
+ }
+
+ for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
+ if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
+ /* There are no free IDs in this mask */
+
+ break;
+ }
+
+ if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+ /*
+ * Found a free ID. The actual ID is the bit index plus one,
+ * making zero an invalid Owner ID. Save this as the last ID
+ * allocated and update the global ID mask.
+ */
+ acpi_gbl_owner_id_mask[j] |= (1 << k);
+
+ acpi_gbl_last_owner_id_index = (u8)j;
+ acpi_gbl_next_owner_id_offset = (u8)(k + 1);
+
+ /*
+ * Construct encoded ID from the index and bit position
+ *
+ * Note: Last [j].k (bit 255) is never used and is marked
+ * permanently allocated (prevents +1 overflow)
+ */
+ *owner_id =
+ (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Allocated OwnerId: %2.2X\n",
+ (unsigned int)*owner_id));
+ goto exit;
+ }
+ }
+
+ acpi_gbl_next_owner_id_offset = 0;
+ }
+
+ /*
+ * All owner_ids have been allocated. This typically should
+ * not happen since the IDs are reused after deallocation. The IDs are
+ * allocated upon table load (one per table) and method execution, and
+ * they are released when a table is unloaded or a method completes
+ * execution.
+ *
+ * If this error happens, there may be very deep nesting of invoked control
+ * methods, or there may be a bug where the IDs are not released.
+ */
+ status = AE_OWNER_ID_LIMIT;
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
+
+ exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_release_owner_id
+ *
+ * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
+ *
+ * RETURN: None. No error is returned because we are either exiting a
+ * control method or unloading a table. Either way, we would
+ * ignore any error anyway.
+ *
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
+ *
+ ******************************************************************************/
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+{
+ acpi_owner_id owner_id = *owner_id_ptr;
+ acpi_status status;
+ u32 index;
+ u32 bit;
+
+ ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
+
+ /* Always clear the input owner_id (zero is an invalid ID) */
+
+ *owner_id_ptr = 0;
+
+ /* Zero is not a valid owner_ID */
+
+ if (owner_id == 0) {
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
+ return_VOID;
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Normalize the ID to zero */
+
+ owner_id--;
+
+ /* Decode ID to index/offset pair */
+
+ index = ACPI_DIV_32(owner_id);
+ bit = 1 << ACPI_MOD_32(owner_id);
+
+ /* Free the owner ID only if it is valid */
+
+ if (acpi_gbl_owner_id_mask[index] & bit) {
+ acpi_gbl_owner_id_mask[index] ^= bit;
+ } else {
+ ACPI_ERROR((AE_INFO,
+ "Release of non-allocated OwnerId: 0x%2.2X",
+ owner_id + 1));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e38bef4980bc..cb7fa491decf 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -127,7 +127,9 @@ const char *acpi_gbl_rw_decode[] = {
const char *acpi_gbl_shr_decode[] = {
"Exclusive",
- "Shared"
+ "Shared",
+ "ExclusiveAndWake", /* ACPI 5.0 */
+ "SharedAndWake" /* ACPI 5.0 */
};
const char *acpi_gbl_siz_decode[] = {
@@ -383,26 +385,16 @@ static const u8 acpi_gbl_resource_types[] = {
ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
};
-/*
- * For the iASL compiler/disassembler, we don't want any error messages
- * because the disassembler uses the resource validation code to determine
- * if Buffer objects are actually Resource Templates.
- */
-#ifdef ACPI_ASL_COMPILER
-#define ACPI_RESOURCE_ERROR(plist)
-#else
-#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
*
- * PARAMETERS: aml - Pointer to the raw AML resource template
- * aml_length - Length of the entire template
- * user_function - Called once for each descriptor found. If
- * NULL, a pointer to the end_tag is returned
- * context - Passed to user_function
+ * PARAMETERS: walk_state - Current walk info
+ * PARAMETERS: aml - Pointer to the raw AML resource template
+ * aml_length - Length of the entire template
+ * user_function - Called once for each descriptor found. If
+ * NULL, a pointer to the end_tag is returned
+ * context - Passed to user_function
*
* RETURN: Status
*
@@ -412,7 +404,8 @@ static const u8 acpi_gbl_resource_types[] = {
******************************************************************************/
acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
+acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
+ u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function, void **context)
{
@@ -441,7 +434,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Validate the Resource Type and Resource Length */
- status = acpi_ut_validate_resource(aml, &resource_index);
+ status =
+ acpi_ut_validate_resource(walk_state, aml, &resource_index);
if (ACPI_FAILURE(status)) {
/*
* Exit on failure. Cannot continue because the descriptor length
@@ -498,7 +492,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
- (void)acpi_ut_validate_resource(end_tag, &resource_index);
+ (void)acpi_ut_validate_resource(walk_state, end_tag,
+ &resource_index);
status =
user_function(end_tag, 2, offset, resource_index, context);
if (ACPI_FAILURE(status)) {
@@ -513,9 +508,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
*
* FUNCTION: acpi_ut_validate_resource
*
- * PARAMETERS: aml - Pointer to the raw AML resource descriptor
- * return_index - Where the resource index is returned. NULL
- * if the index is not required.
+ * PARAMETERS: walk_state - Current walk info
+ * aml - Pointer to the raw AML resource descriptor
+ * return_index - Where the resource index is returned. NULL
+ * if the index is not required.
*
* RETURN: Status, and optionally the Index into the global resource tables
*
@@ -525,7 +521,9 @@ acpi_ut_walk_aml_resources(u8 * aml,
*
******************************************************************************/
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
+acpi_status
+acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
+ void *aml, u8 *return_index)
{
union aml_resource *aml_resource;
u8 resource_type;
@@ -627,10 +625,12 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((aml_resource->common_serial_bus.type == 0) ||
(aml_resource->common_serial_bus.type >
AML_RESOURCE_MAX_SERIALBUSTYPE)) {
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- aml_resource->common_serial_bus.
- type));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+ aml_resource->common_serial_bus.
+ type));
+ }
return (AE_AML_INVALID_RESOURCE_TYPE);
}
}
@@ -645,18 +645,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
invalid_resource:
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported resource descriptor: Type 0x%2.2X",
- resource_type));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_type));
+ }
return (AE_AML_INVALID_RESOURCE_TYPE);
bad_resource_length:
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid resource descriptor length: Type "
- "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
- resource_type, resource_length,
- minimum_resource_length));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid resource descriptor length: Type "
+ "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+ resource_type, resource_length,
+ minimum_resource_length));
+ }
return (AE_AML_BAD_RESOURCE_LENGTH);
}
@@ -800,8 +804,7 @@ u32 acpi_ut_get_descriptor_length(void *aml)
******************************************************************************/
acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
- u8 ** end_tag)
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag)
{
acpi_status status;
@@ -816,7 +819,7 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
/* Validate the template and get a pointer to the end_tag */
- status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+ status = acpi_ut_walk_aml_resources(NULL, obj_desc->buffer.pointer,
obj_desc->buffer.length, NULL,
(void **)end_tag);
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index cee0473ba813..a6b729d4c1dc 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,14 +97,13 @@ void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE(ut_push_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Push the state object onto the front of the list (stack) */
state->common.next = *list_head;
*list_head = state;
-
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -124,7 +123,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_pop_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Remove the state object at the head of the list (stack) */
@@ -136,7 +135,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
*list_head = state->common.next;
}
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -186,13 +185,13 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_create_thread_state);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -207,7 +206,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
state->thread.thread_id = (acpi_thread_id) 1;
}
- return_PTR((struct acpi_thread_state *)state);
+ return ((struct acpi_thread_state *)state);
}
/*******************************************************************************
@@ -230,13 +229,13 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -244,8 +243,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -267,13 +265,13 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -283,8 +281,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
state->pkg.dest_object = external_object;
state->pkg.index = index;
state->pkg.num_packages = 1;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -304,21 +301,20 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_create_control_state);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the control struct */
state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -336,12 +332,12 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
void acpi_ut_delete_generic_state(union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Ignore null state */
if (state) {
(void)acpi_os_release_object(acpi_gbl_state_cache, state);
}
- return_VOID;
+ return;
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
new file mode 100644
index 000000000000..b3e36a81aa4d
--- /dev/null
+++ b/drivers/acpi/acpica/utstring.c
@@ -0,0 +1,574 @@
+/*******************************************************************************
+ *
+ * Module Name: utstring - Common functions for strings and characters
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utstring")
+
+/*
+ * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
+ * version of strtoul.
+ */
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to lowercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+void acpi_ut_strlwr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, lowercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOLOWER(*string);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_stricmp (stricmp)
+ *
+ * PARAMETERS: string1 - first string to compare
+ * string2 - second string to compare
+ *
+ * RETURN: int that signifies string relationship. Zero means strings
+ * are equal.
+ *
+ * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
+ * strings with no case sensitivity)
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+ int c1;
+ int c2;
+
+ do {
+ c1 = tolower((int)*string1);
+ c2 = tolower((int)*string2);
+
+ string1++;
+ string2++;
+ }
+ while ((c1 == c2) && (c1));
+
+ return (c1 - c2);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to uppercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, uppercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOUPPER(*string);
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul64
+ *
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
+ * ret_integer - Where the converted integer is returned
+ *
+ * RETURN: Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ * 32-bit or 64-bit conversion, depending on the current mode
+ * of the interpreter.
+ * NOTE: Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+{
+ u32 this_digit = 0;
+ u64 return_value = 0;
+ u64 quotient;
+ u64 dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
+
+ ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+ switch (base) {
+ case ACPI_ANY_BASE:
+ case 16:
+ break;
+
+ default:
+ /* Invalid Base */
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!string) {
+ goto error_exit;
+ }
+
+ /* Skip over any white space in the buffer */
+
+ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+ string++;
+ }
+
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
+ if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ sign_of0x = 1;
+ base = 16;
+
+ /* Skip over the leading '0x' */
+ string += 2;
+ } else {
+ base = 10;
+ }
+ }
+
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
+ }
+
+ /*
+ * Perform a 32-bit or 64-bit conversion, depending upon the current
+ * execution mode of the interpreter
+ */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+ /* Main loop: convert the string to a 32- or 64-bit integer */
+
+ while (*string) {
+ if (ACPI_IS_DIGIT(*string)) {
+
+ /* Convert ASCII 0-9 to Decimal value */
+
+ this_digit = ((u8)*string) - '0';
+ } else if (base == 10) {
+
+ /* Digit is out of range; possible in to_integer case only */
+
+ term = 1;
+ } else {
+ this_digit = (u8)ACPI_TOUPPER(*string);
+ if (ACPI_IS_XDIGIT((char)this_digit)) {
+
+ /* Convert ASCII Hex char to value */
+
+ this_digit = this_digit - 'A' + 10;
+ } else {
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
+ }
+
+ /* Divide the digit into the correct position */
+
+ (void)acpi_ut_short_divide((dividend - (u64)this_digit),
+ base, &quotient, NULL);
+
+ if (return_value > quotient) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ }
+
+ return_value *= base;
+ return_value += this_digit;
+ string++;
+ }
+
+ /* All done, normal exit */
+
+ all_done:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(return_value)));
+
+ *ret_integer = return_value;
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ /* Base was set/validated above */
+
+ if (base == 10) {
+ return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+ } else {
+ return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_print_string
+ *
+ * PARAMETERS: string - Null terminated ASCII string
+ * max_length - Maximum output length
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
+ * sequences.
+ *
+ ******************************************************************************/
+
+void acpi_ut_print_string(char *string, u8 max_length)
+{
+ u32 i;
+
+ if (!string) {
+ acpi_os_printf("<\"NULL STRING PTR\">");
+ return;
+ }
+
+ acpi_os_printf("\"");
+ for (i = 0; string[i] && (i < max_length); i++) {
+
+ /* Escape sequences */
+
+ switch (string[i]) {
+ case 0x07:
+ acpi_os_printf("\\a"); /* BELL */
+ break;
+
+ case 0x08:
+ acpi_os_printf("\\b"); /* BACKSPACE */
+ break;
+
+ case 0x0C:
+ acpi_os_printf("\\f"); /* FORMFEED */
+ break;
+
+ case 0x0A:
+ acpi_os_printf("\\n"); /* LINEFEED */
+ break;
+
+ case 0x0D:
+ acpi_os_printf("\\r"); /* CARRIAGE RETURN */
+ break;
+
+ case 0x09:
+ acpi_os_printf("\\t"); /* HORIZONTAL TAB */
+ break;
+
+ case 0x0B:
+ acpi_os_printf("\\v"); /* VERTICAL TAB */
+ break;
+
+ case '\'': /* Single Quote */
+ case '\"': /* Double Quote */
+ case '\\': /* Backslash */
+ acpi_os_printf("\\%c", (int)string[i]);
+ break;
+
+ default:
+
+ /* Check for printable character or hex escape */
+
+ if (ACPI_IS_PRINT(string[i])) {
+ /* This is a normal character */
+
+ acpi_os_printf("%c", (int)string[i]);
+ } else {
+ /* All others will be Hex escapes */
+
+ acpi_os_printf("\\x%2.2X", (s32) string[i]);
+ }
+ break;
+ }
+ }
+ acpi_os_printf("\"");
+
+ if (i == max_length && string[i]) {
+ acpi_os_printf("...");
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS: char - The character to be examined
+ * position - Byte position (0-3)
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_name
+ *
+ * PARAMETERS: name - The name to be examined
+ *
+ * RETURN: TRUE if the name is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_name(u32 name)
+{
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_acpi_char
+ ((ACPI_CAST_PTR(char, &name))[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_repair_name
+ *
+ * PARAMETERS: name - The ACPI name to be repaired
+ *
+ * RETURN: Repaired version of the name
+ *
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ * return the new name. NOTE: the Name parameter must reside in
+ * read/write memory, cannot be a const.
+ *
+ * An ACPI Name must consist of valid ACPI characters. We will repair the name
+ * if necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
+ *
+ ******************************************************************************/
+
+void acpi_ut_repair_name(char *name)
+{
+ u32 i;
+ u8 found_bad_char = FALSE;
+ u32 original_name;
+
+ ACPI_FUNCTION_NAME(ut_repair_name);
+
+ ACPI_MOVE_NAME(&original_name, name);
+
+ /* Check each character in the name */
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (acpi_ut_valid_acpi_char(name[i], i)) {
+ continue;
+ }
+
+ /*
+ * Replace a bad character with something printable, yet technically
+ * still invalid. This prevents any collisions with existing "good"
+ * names in the namespace.
+ */
+ name[i] = '*';
+ found_bad_char = TRUE;
+ }
+
+ if (found_bad_char) {
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
+ original_name, name));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
+ original_name, name));
+ }
+ }
+}
+
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
+/*******************************************************************************
+ *
+ * FUNCTION: ut_convert_backslashes
+ *
+ * PARAMETERS: pathname - File pathname string to be converted
+ *
+ * RETURN: Modifies the input Pathname
+ *
+ * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
+ * the entire input file pathname string.
+ *
+ ******************************************************************************/
+
+void ut_convert_backslashes(char *pathname)
+{
+
+ if (!pathname) {
+ return;
+ }
+
+ while (*pathname) {
+ if (*pathname == '\\') {
+ *pathname = '/';
+ }
+
+ pathname++;
+ }
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index a424a9e3fea4..62774c7b76a8 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -436,10 +436,10 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
struct acpi_memory_list *mem_list;
acpi_status status;
- ACPI_FUNCTION_TRACE(ut_remove_allocation);
+ ACPI_FUNCTION_NAME(ut_remove_allocation);
if (acpi_gbl_disable_mem_tracking) {
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
mem_list = acpi_gbl_global_list;
@@ -450,12 +450,12 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
ACPI_ERROR((module, line,
"Empty allocation list, nothing to free!"));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
/* Unlink */
@@ -470,15 +470,15 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
(allocation->next)->previous = allocation->previous;
}
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n",
+ &allocation->user_space, allocation->size));
+
/* Mark the segment as deleted */
ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
- allocation->size));
-
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
+ return (status);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 390db0ca5e2e..48efb446258c 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,11 +44,7 @@
#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
#include "acdebug.h"
-#include "actables.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index d4d3826140d8..976b6c734fce 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ ACPI_EXPORT_SYMBOL(acpi_bios_warning)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
+ * pathname - Full pathname to the node
* node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 14f523627a5e..41ebaaf8bb1a 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 0a40a851b354..312299721ba1 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 00a783661d0b..46f80e2c92f7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
*access_bit_width < 32)
*access_bit_width = 32;
+ else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
+ *access_bit_width < 64)
+ *access_bit_width = 64;
if ((bit_width + bit_offset) > *access_bit_width) {
pr_warning(FW_BUG APEI_PFX
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index e6defd86b424..1e5d8a40101e 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/acpi.h>
+#include <linux/pci.h>
#include <linux/aer.h>
/*
@@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = {
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
const struct acpi_hest_generic_data *gdata)
{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct pci_dev *dev;
+#endif
+
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
#ifdef CONFIG_ACPI_APEI_PCIEAER
- if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
- cper_print_aer(pfx, gdata->error_severity, aer_regs);
+ dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
+ pcie->device_id.bus, pcie->device_id.function);
+ if (!dev) {
+ pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
+ pcie->device_id.segment, pcie->device_id.bus,
+ pcie->device_id.slot, pcie->device_id.function);
+ return;
}
+ if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
+ cper_print_aer(pfx, dev, gdata->error_severity,
+ (struct aer_capability_regs *) pcie->aer_info);
+ pci_dev_put(dev);
#endif
}
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 903549df809b..04ab5c9d3ced 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -111,8 +111,17 @@ retry_next:
if (rc)
goto out;
/* no more record */
- if (id == APEI_ERST_INVALID_RECORD_ID)
+ if (id == APEI_ERST_INVALID_RECORD_ID) {
+ /*
+ * If the persistent store is empty initially, the function
+ * 'erst_read' below will return "-ENOENT" value. This causes
+ * 'retry_next' label is entered again. The returned value
+ * should be zero indicating the read operation is EOF.
+ */
+ len = 0;
+
goto out;
+ }
retry:
rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len);
/* The record may be cleared by others, try read next record */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7efaeaa53b88..c5cd5b5513e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1111,7 +1111,7 @@ fail:
return result;
}
-static int acpi_battery_remove(struct acpi_device *device, int type)
+static int acpi_battery_remove(struct acpi_device *device)
{
struct acpi_battery *battery = NULL;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1f0d457ecbcf..01708a165368 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -178,276 +178,6 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
-/* --------------------------------------------------------------------------
- Power Management
- -------------------------------------------------------------------------- */
-
-static const char *state_string(int state)
-{
- switch (state) {
- case ACPI_STATE_D0:
- return "D0";
- case ACPI_STATE_D1:
- return "D1";
- case ACPI_STATE_D2:
- return "D2";
- case ACPI_STATE_D3_HOT:
- return "D3hot";
- case ACPI_STATE_D3_COLD:
- return "D3";
- default:
- return "(unknown)";
- }
-}
-
-static int __acpi_bus_get_power(struct acpi_device *device, int *state)
-{
- int result = ACPI_STATE_UNKNOWN;
-
- if (!device || !state)
- return -EINVAL;
-
- if (!device->flags.power_manageable) {
- /* TBD: Non-recursive algorithm for walking up hierarchy. */
- *state = device->parent ?
- device->parent->power.state : ACPI_STATE_D0;
- goto out;
- }
-
- /*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
- */
- if (device->power.flags.explicit_get) {
- unsigned long long psc;
- acpi_status status = acpi_evaluate_integer(device->handle,
- "_PSC", NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- result = psc;
- }
- /* The test below covers ACPI_STATE_UNKNOWN too. */
- if (result <= ACPI_STATE_D2) {
- ; /* Do nothing. */
- } else if (device->power.flags.power_resources) {
- int error = acpi_power_get_inferred_state(device, &result);
- if (error)
- return error;
- } else if (result == ACPI_STATE_D3_HOT) {
- result = ACPI_STATE_D3;
- }
-
- /*
- * If we were unsure about the device parent's power state up to this
- * point, the fact that the device is in D0 implies that the parent has
- * to be in D0 too.
- */
- if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
- && result == ACPI_STATE_D0)
- device->parent->power.state = ACPI_STATE_D0;
-
- *state = result;
-
- out:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
- device->pnp.bus_id, state_string(*state)));
-
- return 0;
-}
-
-
-/**
- * acpi_device_set_power - Set power state of an ACPI device.
- * @device: Device to set the power state of.
- * @state: New power state to set.
- *
- * Callers must ensure that the device is power manageable before using this
- * function.
- */
-int acpi_device_set_power(struct acpi_device *device, int state)
-{
- int result = 0;
- acpi_status status = AE_OK;
- char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
-
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
- return -EINVAL;
-
- /* Make sure this is a valid target state */
-
- if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
- state_string(state)));
- return 0;
- }
-
- if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support %s\n",
- state_string(state));
- return -ENODEV;
- }
- if (device->parent && (state < device->parent->power.state)) {
- printk(KERN_WARNING PREFIX
- "Cannot set device to a higher-powered"
- " state than parent\n");
- return -ENODEV;
- }
-
- /* For D3cold we should execute _PS3, not _PS4. */
- if (state == ACPI_STATE_D3_COLD)
- object_name[3] = '3';
-
- /*
- * Transition Power
- * ----------------
- * On transitions to a high-powered state we first apply power (via
- * power resources) then evalute _PSx. Conversly for transitions to
- * a lower-powered state.
- */
- if (state < device->power.state) {
- if (device->power.state >= ACPI_STATE_D3_HOT &&
- state != ACPI_STATE_D0) {
- printk(KERN_WARNING PREFIX
- "Cannot transition to non-D0 state from D3\n");
- return -ENODEV;
- }
- if (device->power.flags.power_resources) {
- result = acpi_power_transition(device, state);
- if (result)
- goto end;
- }
- if (device->power.states[state].flags.explicit_set) {
- status = acpi_evaluate_object(device->handle,
- object_name, NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
- }
- } else {
- if (device->power.states[state].flags.explicit_set) {
- status = acpi_evaluate_object(device->handle,
- object_name, NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
- }
- if (device->power.flags.power_resources) {
- result = acpi_power_transition(device, state);
- if (result)
- goto end;
- }
- }
-
- end:
- if (result)
- printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to %s\n",
- device->pnp.bus_id, state_string(state));
- else {
- device->power.state = state;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to %s\n",
- device->pnp.bus_id, state_string(state)));
- }
-
- return result;
-}
-EXPORT_SYMBOL(acpi_device_set_power);
-
-
-int acpi_bus_set_power(acpi_handle handle, int state)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- if (!device->flags.power_manageable) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] is not power manageable\n",
- dev_name(&device->dev)));
- return -ENODEV;
- }
-
- return acpi_device_set_power(device, state);
-}
-EXPORT_SYMBOL(acpi_bus_set_power);
-
-
-int acpi_bus_init_power(struct acpi_device *device)
-{
- int state;
- int result;
-
- if (!device)
- return -EINVAL;
-
- device->power.state = ACPI_STATE_UNKNOWN;
-
- result = __acpi_bus_get_power(device, &state);
- if (result)
- return result;
-
- if (device->power.flags.power_resources)
- result = acpi_power_on_resources(device, state);
-
- if (!result)
- device->power.state = state;
-
- return result;
-}
-
-
-int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
- struct acpi_device *device;
- int state;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- result = __acpi_bus_get_power(device, &state);
- if (result)
- return result;
-
- result = acpi_device_set_power(device, state);
- if (!result && state_p)
- *state_p = state;
-
- return result;
-}
-EXPORT_SYMBOL_GPL(acpi_bus_update_power);
-
-
-bool acpi_bus_power_manageable(acpi_handle handle)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->flags.power_manageable;
-}
-
-EXPORT_SYMBOL(acpi_bus_power_manageable);
-
-bool acpi_bus_can_wakeup(acpi_handle handle)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->wakeup.flags.valid;
-}
-
-EXPORT_SYMBOL(acpi_bus_can_wakeup);
-
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index f0d936b65e37..86c7d5445c38 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static const struct acpi_device_id button_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, button_device_ids);
static int acpi_button_add(struct acpi_device *device);
-static int acpi_button_remove(struct acpi_device *device, int type);
+static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
@@ -433,7 +433,7 @@ static int acpi_button_add(struct acpi_device *device)
return error;
}
-static int acpi_button_remove(struct acpi_device *device, int type)
+static int acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 811910b50b75..5523ba7d764d 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -34,46 +34,34 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/container.h>
#define PREFIX "ACPI: "
-#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
-#define ACPI_CONTAINER_CLASS "container"
-
-#define INSTALL_NOTIFY_HANDLER 1
-#define UNINSTALL_NOTIFY_HANDLER 2
-
#define _COMPONENT ACPI_CONTAINER_COMPONENT
ACPI_MODULE_NAME("container");
-MODULE_AUTHOR("Anil S Keshavamurthy");
-MODULE_DESCRIPTION("ACPI container driver");
-MODULE_LICENSE("GPL");
-
-static int acpi_container_add(struct acpi_device *device);
-static int acpi_container_remove(struct acpi_device *device, int type);
-
static const struct acpi_device_id container_device_ids[] = {
{"ACPI0004", 0},
{"PNP0A05", 0},
{"PNP0A06", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, container_device_ids);
-static struct acpi_driver acpi_container_driver = {
- .name = "container",
- .class = ACPI_CONTAINER_CLASS,
+static int container_device_attach(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
+{
+ /*
+ * FIXME: This is necessary, so that acpi_eject_store() doesn't return
+ * -ENODEV for containers.
+ */
+ return 1;
+}
+
+static struct acpi_scan_handler container_device_handler = {
.ids = container_device_ids,
- .ops = {
- .add = acpi_container_add,
- .remove = acpi_container_remove,
- },
+ .attach = container_device_attach,
};
-/*******************************************************************/
-
static int is_device_present(acpi_handle handle)
{
acpi_handle temp;
@@ -92,73 +80,6 @@ static int is_device_present(acpi_handle handle)
return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
}
-static bool is_container_device(const char *hid)
-{
- const struct acpi_device_id *container_id;
-
- for (container_id = container_device_ids;
- container_id->id[0]; container_id++) {
- if (!strcmp((char *)container_id->id, hid))
- return true;
- }
-
- return false;
-}
-
-/*******************************************************************/
-static int acpi_container_add(struct acpi_device *device)
-{
- struct acpi_container *container;
-
- container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
- if (!container)
- return -ENOMEM;
-
- container->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
- device->driver_data = container;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
- acpi_device_name(device), acpi_device_bid(device)));
-
- return 0;
-}
-
-static int acpi_container_remove(struct acpi_device *device, int type)
-{
- acpi_status status = AE_OK;
- struct acpi_container *pc = NULL;
-
- pc = acpi_driver_data(device);
- kfree(pc);
- return status;
-}
-
-static int container_device_add(struct acpi_device **device, acpi_handle handle)
-{
- acpi_handle phandle;
- struct acpi_device *pdev;
- int result;
-
-
- if (acpi_get_parent(handle, &phandle)) {
- return -ENODEV;
- }
-
- if (acpi_bus_get_device(phandle, &pdev)) {
- return -ENODEV;
- }
-
- if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_DEVICE)) {
- return -ENODEV;
- }
-
- result = acpi_bus_start(*device);
-
- return result;
-}
-
static void container_notify_cb(acpi_handle handle, u32 type, void *context)
{
struct acpi_device *device = NULL;
@@ -167,6 +88,8 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
acpi_status status;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_scan_lock_acquire();
+
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* Fall through */
@@ -182,7 +105,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
/* device exist and this is a remove request */
device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- return;
+ goto out;
}
break;
}
@@ -190,11 +113,16 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
if (!ACPI_FAILURE(status) || device)
break;
- result = container_device_add(&device, handle);
+ result = acpi_bus_scan(handle);
if (result) {
acpi_handle_warn(handle, "Failed to add container\n");
break;
}
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_warn(handle, "Missing device object\n");
+ break;
+ }
kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
ost_code = ACPI_OST_SC_SUCCESS;
@@ -204,98 +132,59 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
if (!acpi_bus_get_device(handle, &device) && device) {
device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- return;
+ goto out;
}
break;
default:
/* non-hotplug event; possibly handled by other handler */
- return;
+ goto out;
}
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
- return;
+
+ out:
+ acpi_scan_lock_release();
}
-static acpi_status
-container_walk_namespace_cb(acpi_handle handle,
- u32 lvl, void *context, void **rv)
+static bool is_container(acpi_handle handle)
{
- char *hid = NULL;
struct acpi_device_info *info;
- acpi_status status;
- int *action = context;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status)) {
- return AE_OK;
- }
+ bool ret = false;
- if (info->valid & ACPI_VALID_HID)
- hid = info->hardware_id.string;
+ if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
+ return false;
- if (hid == NULL) {
- goto end;
- }
-
- if (!is_container_device(hid))
- goto end;
+ if (info->valid & ACPI_VALID_HID) {
+ const struct acpi_device_id *id;
- switch (*action) {
- case INSTALL_NOTIFY_HANDLER:
- acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- container_notify_cb, NULL);
- break;
- case UNINSTALL_NOTIFY_HANDLER:
- acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- container_notify_cb);
- break;
- default:
- break;
+ for (id = container_device_ids; id->id[0]; id++) {
+ ret = !strcmp((char *)id->id, info->hardware_id.string);
+ if (ret)
+ break;
+ }
}
-
- end:
kfree(info);
-
- return AE_OK;
+ return ret;
}
-static int __init acpi_container_init(void)
+static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
+ u32 lvl, void *ctxt,
+ void **retv)
{
- int result = 0;
- int action = INSTALL_NOTIFY_HANDLER;
-
- result = acpi_bus_register_driver(&acpi_container_driver);
- if (result < 0) {
- return (result);
- }
-
- /* register notify handler to every container device */
- acpi_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- container_walk_namespace_cb, NULL, &action, NULL);
+ if (is_container(handle))
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ container_notify_cb, NULL);
- return (0);
+ return AE_OK;
}
-static void __exit acpi_container_exit(void)
+void __init acpi_container_init(void)
{
- int action = UNINSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_container_register_notify_handler, NULL,
+ NULL, NULL);
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- container_walk_namespace_cb, NULL, &action, NULL);
-
- acpi_bus_unregister_driver(&acpi_container_driver);
-
- return;
+ acpi_scan_add_handler(&container_device_handler);
}
-
-module_init(acpi_container_init);
-module_exit(acpi_container_exit);
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
new file mode 100644
index 000000000000..5c15a91faf0b
--- /dev/null
+++ b/drivers/acpi/csrt.c
@@ -0,0 +1,159 @@
+/*
+ * Support for Core System Resources Table (CSRT)
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI: CSRT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+ACPI_MODULE_NAME("CSRT");
+
+static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
+ const struct acpi_csrt_group *grp)
+{
+ const struct acpi_csrt_shared_info *si;
+ struct resource res[3];
+ size_t nres;
+ int ret;
+
+ memset(res, 0, sizeof(res));
+ nres = 0;
+
+ si = (const struct acpi_csrt_shared_info *)&grp[1];
+ /*
+ * The peripherals that are listed on CSRT typically support only
+ * 32-bit addresses so we only use the low part of MMIO base for
+ * now.
+ */
+ if (!si->mmio_base_high && si->mmio_base_low) {
+ /*
+ * There is no size of the memory resource in shared_info
+ * so we assume that it is 4k here.
+ */
+ res[nres].start = si->mmio_base_low;
+ res[nres].end = res[0].start + SZ_4K - 1;
+ res[nres++].flags = IORESOURCE_MEM;
+ }
+
+ if (si->gsi_interrupt) {
+ int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
+ si->interrupt_mode,
+ si->interrupt_polarity);
+ res[nres].start = irq;
+ res[nres].end = irq;
+ res[nres++].flags = IORESOURCE_IRQ;
+ }
+
+ if (si->base_request_line || si->num_handshake_signals) {
+ /*
+ * We pass the driver a DMA resource describing the range
+ * of request lines the device supports.
+ */
+ res[nres].start = si->base_request_line;
+ res[nres].end = res[nres].start + si->num_handshake_signals - 1;
+ res[nres++].flags = IORESOURCE_DMA;
+ }
+
+ ret = platform_device_add_resources(pdev, res, nres);
+ if (ret) {
+ if (si->gsi_interrupt)
+ acpi_unregister_gsi(si->gsi_interrupt);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init
+acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
+{
+ struct platform_device *pdev;
+ char vendor[5], name[16];
+ int ret, i;
+
+ vendor[0] = grp->vendor_id;
+ vendor[1] = grp->vendor_id >> 8;
+ vendor[2] = grp->vendor_id >> 16;
+ vendor[3] = grp->vendor_id >> 24;
+ vendor[4] = '\0';
+
+ if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
+ return -ENODEV;
+
+ snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
+ pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
+
+ /* Add resources based on the shared info */
+ ret = acpi_csrt_parse_shared_info(pdev, grp);
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail;
+
+ for (i = 0; i < pdev->num_resources; i++)
+ dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
+
+ return 0;
+
+fail:
+ platform_device_put(pdev);
+ return ret;
+}
+
+/*
+ * CSRT or Core System Resources Table is a proprietary ACPI table
+ * introduced by Microsoft. This table can contain devices that are not in
+ * the system DSDT table. In particular DMA controllers might be described
+ * here.
+ *
+ * We present these devices as normal platform devices that don't have ACPI
+ * IDs or handle. The platform device name will be something like
+ * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
+ */
+void __init acpi_csrt_init(void)
+{
+ struct acpi_csrt_group *grp, *end;
+ struct acpi_table_csrt *csrt;
+ acpi_status status;
+ int ret;
+
+ status = acpi_get_table(ACPI_SIG_CSRT, 0,
+ (struct acpi_table_header **)&csrt);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ pr_warn("failed to get the CSRT table\n");
+ return;
+ }
+
+ pr_debug("parsing CSRT table for devices\n");
+
+ grp = (struct acpi_csrt_group *)(csrt + 1);
+ end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
+
+ while (grp < end) {
+ ret = acpi_csrt_parse_resource_group(grp);
+ if (ret) {
+ pr_warn("error in parsing resource group: %d\n", ret);
+ return;
+ }
+
+ grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
+ }
+}
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 5d42c2414ae5..6adfc706a1de 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -1,5 +1,5 @@
/*
- * debugfs.c - ACPI debugfs interface to userspace.
+ * custom_method.c - debugfs interface for customizing ACPI control method
*/
#include <linux/init.h>
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index f09dc987cf17..dd314ef9bff1 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -30,6 +30,12 @@
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_POWER_COMPONENT
+ACPI_MODULE_NAME("device_pm");
static DEFINE_MUTEX(acpi_pm_notifier_lock);
@@ -94,6 +100,293 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
}
/**
+ * acpi_power_state_string - String representation of ACPI device power state.
+ * @state: ACPI device power state to return the string representation of.
+ */
+const char *acpi_power_state_string(int state)
+{
+ switch (state) {
+ case ACPI_STATE_D0:
+ return "D0";
+ case ACPI_STATE_D1:
+ return "D1";
+ case ACPI_STATE_D2:
+ return "D2";
+ case ACPI_STATE_D3_HOT:
+ return "D3hot";
+ case ACPI_STATE_D3_COLD:
+ return "D3cold";
+ default:
+ return "(unknown)";
+ }
+}
+
+/**
+ * acpi_device_get_power - Get power state of an ACPI device.
+ * @device: Device to get the power state of.
+ * @state: Place to store the power state of the device.
+ *
+ * This function does not update the device's power.state field, but it may
+ * update its parent's power.state field (when the parent's power state is
+ * unknown and the device's power state turns out to be D0).
+ */
+int acpi_device_get_power(struct acpi_device *device, int *state)
+{
+ int result = ACPI_STATE_UNKNOWN;
+
+ if (!device || !state)
+ return -EINVAL;
+
+ if (!device->flags.power_manageable) {
+ /* TBD: Non-recursive algorithm for walking up hierarchy. */
+ *state = device->parent ?
+ device->parent->power.state : ACPI_STATE_D0;
+ goto out;
+ }
+
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ unsigned long long psc;
+ acpi_status status = acpi_evaluate_integer(device->handle,
+ "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = psc;
+ }
+ /* The test below covers ACPI_STATE_UNKNOWN too. */
+ if (result <= ACPI_STATE_D2) {
+ ; /* Do nothing. */
+ } else if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ } else if (result == ACPI_STATE_D3_HOT) {
+ result = ACPI_STATE_D3;
+ }
+
+ /*
+ * If we were unsure about the device parent's power state up to this
+ * point, the fact that the device is in D0 implies that the parent has
+ * to be in D0 too.
+ */
+ if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+ && result == ACPI_STATE_D0)
+ device->parent->power.state = ACPI_STATE_D0;
+
+ *state = result;
+
+ out:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+ device->pnp.bus_id, acpi_power_state_string(*state)));
+
+ return 0;
+}
+
+static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
+{
+ if (adev->power.states[state].flags.explicit_set) {
+ char method[5] = { '_', 'P', 'S', '0' + state, '\0' };
+ acpi_status status;
+
+ status = acpi_evaluate_object(adev->handle, method, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * acpi_device_set_power - Set power state of an ACPI device.
+ * @device: Device to set the power state of.
+ * @state: New power state to set.
+ *
+ * Callers must ensure that the device is power manageable before using this
+ * function.
+ */
+int acpi_device_set_power(struct acpi_device *device, int state)
+{
+ int result = 0;
+ bool cut_power = false;
+
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
+ return -EINVAL;
+
+ /* Make sure this is a valid target state */
+
+ if (state == device->power.state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ acpi_power_state_string(state)));
+ return 0;
+ }
+
+ if (!device->power.states[state].flags.valid) {
+ printk(KERN_WARNING PREFIX "Device does not support %s\n",
+ acpi_power_state_string(state));
+ return -ENODEV;
+ }
+ if (device->parent && (state < device->parent->power.state)) {
+ printk(KERN_WARNING PREFIX
+ "Cannot set device to a higher-powered"
+ " state than parent\n");
+ return -ENODEV;
+ }
+
+ /* For D3cold we should first transition into D3hot. */
+ if (state == ACPI_STATE_D3_COLD
+ && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) {
+ state = ACPI_STATE_D3_HOT;
+ cut_power = true;
+ }
+
+ if (state < device->power.state && state != ACPI_STATE_D0
+ && device->power.state >= ACPI_STATE_D3_HOT) {
+ printk(KERN_WARNING PREFIX
+ "Cannot transition to non-D0 state from D3\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Transition Power
+ * ----------------
+ * In accordance with the ACPI specification first apply power (via
+ * power resources) and then evalute _PSx.
+ */
+ if (device->power.flags.power_resources) {
+ result = acpi_power_transition(device, state);
+ if (result)
+ goto end;
+ }
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ goto end;
+
+ if (cut_power) {
+ device->power.state = state;
+ state = ACPI_STATE_D3_COLD;
+ result = acpi_power_transition(device, state);
+ }
+
+ end:
+ if (result) {
+ printk(KERN_WARNING PREFIX
+ "Device [%s] failed to transition to %s\n",
+ device->pnp.bus_id,
+ acpi_power_state_string(state));
+ } else {
+ device->power.state = state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] transitioned to %s\n",
+ device->pnp.bus_id,
+ acpi_power_state_string(state)));
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(acpi_device_set_power);
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ if (!device->flags.power_manageable) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] is not power manageable\n",
+ dev_name(&device->dev)));
+ return -ENODEV;
+ }
+
+ return acpi_device_set_power(device, state);
+}
+EXPORT_SYMBOL(acpi_bus_set_power);
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ device->power.state = ACPI_STATE_UNKNOWN;
+
+ result = acpi_device_get_power(device, &state);
+ if (result)
+ return result;
+
+ if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
+ result = acpi_power_on_resources(device, state);
+ if (result)
+ return result;
+
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ return result;
+ } else if (state == ACPI_STATE_UNKNOWN) {
+ /* No power resources and missing _PSC? Try to force D0. */
+ state = ACPI_STATE_D0;
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ return result;
+ }
+ device->power.state = state;
+ return 0;
+}
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+ struct acpi_device *device;
+ int state;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ result = acpi_device_get_power(device, &state);
+ if (result)
+ return result;
+
+ if (state == ACPI_STATE_UNKNOWN)
+ state = ACPI_STATE_D0;
+
+ result = acpi_device_set_power(device, state);
+ if (!result && state_p)
+ *state_p = state;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+bool acpi_bus_power_manageable(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->flags.power_manageable;
+}
+EXPORT_SYMBOL(acpi_bus_power_manageable);
+
+bool acpi_bus_can_wakeup(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->wakeup.flags.valid;
+}
+EXPORT_SYMBOL(acpi_bus_can_wakeup);
+
+/**
* acpi_device_power_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @adev: ACPI device node corresponding to @dev.
@@ -213,7 +506,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
}
@@ -290,7 +583,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
__func__);
return -ENODEV;
@@ -304,7 +597,7 @@ static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
void *context) {}
#endif /* CONFIG_PM_RUNTIME */
- #ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
/**
* __acpi_device_sleep_wake - Enable or disable device to wake up the system.
* @dev: Device to enable/desible to wake up the system.
@@ -334,7 +627,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
}
@@ -353,13 +646,12 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
* acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
* @dev: Device to get the ACPI node for.
*/
-static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
- return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ?
- adev : NULL;
+ return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
}
/**
@@ -666,3 +958,59 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
}
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
+
+/**
+ * acpi_dev_pm_add_dependent - Add physical device depending for PM.
+ * @handle: Handle of ACPI device node.
+ * @depdev: Device depending on that node for PM.
+ */
+void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
+{
+ struct acpi_device_physical_node *dep;
+ struct acpi_device *adev;
+
+ if (!depdev || acpi_bus_get_device(handle, &adev))
+ return;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(dep, &adev->power_dependent, node)
+ if (dep->dev == depdev)
+ goto out;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (dep) {
+ dep->dev = depdev;
+ list_add_tail(&dep->node, &adev->power_dependent);
+ }
+
+ out:
+ mutex_unlock(&adev->physical_node_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
+
+/**
+ * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
+ * @handle: Handle of ACPI device node.
+ * @depdev: Device depending on that node for PM.
+ */
+void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
+{
+ struct acpi_device_physical_node *dep;
+ struct acpi_device *adev;
+
+ if (!depdev || acpi_bus_get_device(handle, &adev))
+ return;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(dep, &adev->power_dependent, node)
+ if (dep->dev == depdev) {
+ list_del(&dep->node);
+ kfree(dep);
+ break;
+ }
+
+ mutex_unlock(&adev->physical_node_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index f32bd47b35e0..4fdea381ef21 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -310,8 +310,6 @@ static int dock_present(struct dock_station *ds)
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
- struct acpi_device *parent_device;
- acpi_handle parent;
int ret;
if (acpi_bus_get_device(handle, &device)) {
@@ -319,16 +317,11 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
* no device created for this object,
* so we should create one.
*/
- acpi_get_parent(handle, &parent);
- if (acpi_bus_get_device(parent, &parent_device))
- parent_device = NULL;
-
- ret = acpi_bus_add(&device, parent_device, handle,
- ACPI_BUS_TYPE_DEVICE);
- if (ret) {
+ ret = acpi_bus_scan(handle);
+ if (ret)
pr_debug("error adding bus, %x\n", -ret);
- return NULL;
- }
+
+ acpi_bus_get_device(handle, &device);
}
return device;
}
@@ -343,13 +336,9 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
static void dock_remove_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
- int ret;
- if (!acpi_bus_get_device(handle, &device)) {
- ret = acpi_bus_trim(device, 1);
- if (ret)
- pr_debug("error removing bus, %x\n", -ret);
- }
+ if (!acpi_bus_get_device(handle, &device))
+ acpi_bus_trim(device);
}
/**
@@ -755,7 +744,9 @@ static void acpi_dock_deferred_cb(void *context)
{
struct dock_data *data = context;
+ acpi_scan_lock_acquire();
dock_notify(data->handle, data->event, data->ds);
+ acpi_scan_lock_release();
kfree(data);
}
@@ -768,20 +759,31 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
+
+ acpi_scan_lock_acquire();
+
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
struct dock_data *dd;
+ acpi_status status;
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
- return 0;
+ break;
+
dd->handle = handle;
dd->event = event;
dd->ds = dock_station;
- acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
- return 0 ;
+ status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
+ dd);
+ if (ACPI_FAILURE(status))
+ kfree(dd);
+
+ break;
}
}
+
+ acpi_scan_lock_release();
return 0;
}
@@ -836,7 +838,7 @@ static ssize_t show_docked(struct device *dev,
struct dock_station *dock_station = dev->platform_data;
- if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
+ if (!acpi_bus_get_device(dock_station->handle, &tmp))
return snprintf(buf, PAGE_SIZE, "1\n");
return snprintf(buf, PAGE_SIZE, "0\n");
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 354007d490d1..d45b2871d33b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -852,7 +852,7 @@ static int acpi_ec_add(struct acpi_device *device)
return ret;
}
-static int acpi_ec_remove(struct acpi_device *device, int type)
+static int acpi_ec_remove(struct acpi_device *device)
{
struct acpi_ec *ec;
struct acpi_ec_query_handler *handler, *tmp;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 3bd6a54702d6..f815da82c765 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -45,7 +45,7 @@ MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
-static int acpi_fan_remove(struct acpi_device *device, int type);
+static int acpi_fan_remove(struct acpi_device *device);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
@@ -172,7 +172,7 @@ static int acpi_fan_add(struct acpi_device *device)
return result;
}
-static int acpi_fan_remove(struct acpi_device *device, int type)
+static int acpi_fan_remove(struct acpi_device *device)
{
struct thermal_cooling_device *cdev = acpi_driver_data(device);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 01551840d236..ef6f155469b5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -18,9 +18,14 @@
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
-#define DBG(x...) printk(PREFIX x)
+#define DBG(fmt, ...) \
+ printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
#else
-#define DBG(x...) do { } while(0)
+#define DBG(fmt, ...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
+} while (0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
@@ -63,6 +68,9 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
{
struct acpi_bus_type *tmp, *ret = NULL;
+ if (!type)
+ return NULL;
+
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->bus == type) {
@@ -90,40 +98,31 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
return ret;
}
-/* Get device's handler per its address under its parent */
-struct acpi_find_child {
- acpi_handle handle;
- u64 address;
-};
-
-static acpi_status
-do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
+ void *addr_p, void **ret_p)
{
+ unsigned long long addr;
acpi_status status;
- struct acpi_device_info *info;
- struct acpi_find_child *find = context;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status)) {
- if ((info->address == find->address)
- && (info->valid & ACPI_VALID_ADR))
- find->handle = handle;
- kfree(info);
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+ if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+ *ret_p = handle;
+ return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_get_child(acpi_handle parent, u64 address)
{
- struct acpi_find_child find = { NULL, address };
+ void *ret = NULL;
if (!parent)
return NULL;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent,
- 1, do_acpi_find_child, NULL, &find, NULL);
- return find.handle;
-}
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
+ do_acpi_find_child, &address, &ret);
+ return (acpi_handle)ret;
+}
EXPORT_SYMBOL(acpi_get_child);
static int acpi_bind_one(struct device *dev, acpi_handle handle)
@@ -264,35 +263,46 @@ static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type;
acpi_handle handle;
- int ret = -EINVAL;
+ int ret;
ret = acpi_bind_one(dev, NULL);
- if (!ret)
- goto out;
-
- if (!dev->bus || !dev->parent) {
+ if (ret && (!dev->bus || !dev->parent)) {
/* bridge devices genernally haven't bus or parent */
ret = acpi_find_bridge_device(dev, &handle);
- goto end;
+ if (!ret) {
+ ret = acpi_bind_one(dev, handle);
+ if (ret)
+ goto out;
+ }
}
+
type = acpi_get_bus_type(dev->bus);
- if (!type) {
- DBG("No ACPI bus support for %s\n", dev_name(dev));
- ret = -EINVAL;
- goto end;
+ if (ret) {
+ if (!type || !type->find_device) {
+ DBG("No ACPI bus support for %s\n", dev_name(dev));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = type->find_device(dev, &handle);
+ if (ret) {
+ DBG("Unable to get handle for %s\n", dev_name(dev));
+ goto out;
+ }
+ ret = acpi_bind_one(dev, handle);
+ if (ret)
+ goto out;
}
- if ((ret = type->find_device(dev, &handle)) != 0)
- DBG("Can't get handler for %s\n", dev_name(dev));
- end:
- if (!ret)
- acpi_bind_one(dev, handle);
+
+ if (type && type->setup)
+ type->setup(dev);
out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
@@ -304,6 +314,12 @@ static int acpi_platform_notify(struct device *dev)
static int acpi_platform_notify_remove(struct device *dev)
{
+ struct acpi_bus_type *type;
+
+ type = acpi_get_bus_type(dev->bus);
+ if (type && type->cleanup)
+ type->cleanup(dev);
+
acpi_unbind_one(dev);
return 0;
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a0cc796932f7..13b1d39d7cdf 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -70,7 +70,7 @@ static int acpi_hed_add(struct acpi_device *device)
return 0;
}
-static int acpi_hed_remove(struct acpi_device *device, int type)
+static int acpi_hed_remove(struct acpi_device *device)
{
hed_handle = NULL;
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c407cdc1ec1..79092328cf06 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -25,7 +25,16 @@
int init_acpi_device_notify(void);
int acpi_scan_init(void);
+void acpi_pci_root_init(void);
+void acpi_pci_link_init(void);
+void acpi_platform_init(void);
int acpi_sysfs_init(void);
+void acpi_csrt_init(void);
+#ifdef CONFIG_ACPI_CONTAINER
+void acpi_container_init(void);
+#else
+static inline void acpi_container_init(void) {}
+#endif
#ifdef CONFIG_DEBUG_FS
extern struct dentry *acpi_debugfs_dir;
@@ -35,15 +44,33 @@ static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
+ Device Node Initialization / Removal
+ -------------------------------------------------------------------------- */
+#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
+
+int acpi_device_add(struct acpi_device *device,
+ void (*release)(struct device *));
+void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ int type, unsigned long long sta);
+void acpi_device_add_finalize(struct acpi_device *device);
+void acpi_free_ids(struct acpi_device *device);
+
+/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
int acpi_power_init(void);
+void acpi_power_resources_list_free(struct list_head *list);
+int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
+ struct list_head *list);
+int acpi_add_power_resource(acpi_handle handle);
+void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
+int acpi_power_min_system_level(struct list_head *list);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
-int acpi_bus_init_power(struct acpi_device *device);
int acpi_wakeup_device_init(void);
void acpi_early_processor_set_pdc(void);
@@ -98,6 +125,4 @@ static inline void suspend_nvs_restore(void) {}
-------------------------------------------------------------------------- */
struct platform_device;
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
-
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index cb31298ca684..59844ee149be 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -116,14 +116,16 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n",
+ "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
(unsigned long)p->base_address,
(unsigned long)p->length,
p->proximity_domain,
(p->flags & ACPI_SRAT_MEM_ENABLED)?
"enabled" : "disabled",
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : ""));
+ " hot-pluggable" : "",
+ (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
+ " non-volatile" : ""));
}
#endif /* ACPI_DEBUG_OUTPUT */
break;
@@ -273,17 +275,17 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
static int __init
acpi_table_parse_srat(enum acpi_srat_type id,
- acpi_table_entry_handler handler, unsigned int max_entries)
+ acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat), id,
handler, max_entries);
}
-int __init acpi_numa_init(void)
-{
- int cnt = 0;
+static int srat_mem_cnt;
+void __init early_parse_srat(void)
+{
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -293,21 +295,24 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
- cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_processor_affinity, 0);
+ srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
}
+}
+int __init acpi_numa_init(void)
+{
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- if (cnt < 0)
- return cnt;
+ if (srat_mem_cnt < 0)
+ return srat_mem_cnt;
else if (!parsed_numa_memblks)
return -ENOENT;
return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6dc4a2b1e956..908b02d5da1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
return acpi_rsdp;
#endif
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
@@ -534,6 +534,137 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
+
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum = (u8) (sum + *(buffer++));
+ return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+ ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+ ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+ ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+ ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+ ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+ ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+ ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+ ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+/* Non-fatal errors: Affected tables/files are ignored */
+#define INVALID_TABLE(x, path, name) \
+ { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+/* Must not increase 10 or needs code modification below */
+#define ACPI_OVERRIDE_TABLES 10
+
+void __init acpi_initrd_override(void *data, size_t size)
+{
+ int sig, no, table_nr = 0, total_offset = 0;
+ long offset = 0;
+ struct acpi_table_header *table;
+ char cpio_path[32] = "kernel/firmware/acpi/";
+ struct cpio_data file;
+ struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
+ char *p;
+
+ if (data == NULL || size == 0)
+ return;
+
+ for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
+ file = find_cpio_data(cpio_path, data, size, &offset);
+ if (!file.data)
+ break;
+
+ data += offset;
+ size -= offset;
+
+ if (file.size < sizeof(struct acpi_table_header))
+ INVALID_TABLE("Table smaller than ACPI header",
+ cpio_path, file.name);
+
+ table = file.data;
+
+ for (sig = 0; table_sigs[sig]; sig++)
+ if (!memcmp(table->signature, table_sigs[sig], 4))
+ break;
+
+ if (!table_sigs[sig])
+ INVALID_TABLE("Unknown signature",
+ cpio_path, file.name);
+ if (file.size != table->length)
+ INVALID_TABLE("File length does not match table length",
+ cpio_path, file.name);
+ if (acpi_table_checksum(file.data, table->length))
+ INVALID_TABLE("Bad table checksum",
+ cpio_path, file.name);
+
+ pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+ table->signature, cpio_path, file.name, table->length);
+
+ all_tables_size += table->length;
+ early_initrd_files[table_nr].data = file.data;
+ early_initrd_files[table_nr].size = file.size;
+ table_nr++;
+ }
+ if (table_nr == 0)
+ return;
+
+ acpi_tables_addr =
+ memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ all_tables_size, PAGE_SIZE);
+ if (!acpi_tables_addr) {
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Only calling e820_add_reserve does not work and the
+ * tables are invalid (memory got used) later.
+ * memblock_reserve works as expected and the tables won't get modified.
+ * But it's not enough on X86 because ioremap will
+ * complain later (used by acpi_os_map_memory) that the pages
+ * that should get mapped are not marked "reserved".
+ * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+ * works fine.
+ */
+ memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+ arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+ p = early_ioremap(acpi_tables_addr, all_tables_size);
+
+ for (no = 0; no < table_nr; no++) {
+ memcpy(p + total_offset, early_initrd_files[no].data,
+ early_initrd_files[no].size);
+ total_offset += early_initrd_files[no].size;
+ }
+ early_iounmap(p, all_tables_size);
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
+
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn(PREFIX
+ "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+}
+
+
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
@@ -547,24 +678,73 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
#endif
- if (*new_table != NULL) {
- printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
- "this is unsafe: tainting kernel\n",
- existing_table->signature,
- existing_table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
+ if (*new_table != NULL)
+ acpi_table_taint(existing_table);
return AE_OK;
}
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address * new_address,
- u32 *new_table_length)
+ acpi_physical_address *address,
+ u32 *table_length)
{
- return AE_SUPPORT;
-}
+#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+#else
+ int table_offset = 0;
+ struct acpi_table_header *table;
+
+ *table_length = 0;
+ *address = 0;
+
+ if (!acpi_tables_addr)
+ return AE_OK;
+
+ do {
+ if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
+ WARN_ON(1);
+ return AE_OK;
+ }
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
+
+ table_offset += table->length;
+
+ if (memcmp(existing_table->signature, table->signature, 4)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ /* Only override tables with matching oem id */
+ if (memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ table_offset -= table->length;
+ *table_length = table->length;
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ *address = acpi_tables_addr + table_offset;
+ break;
+ } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+
+ if (*address != 0)
+ acpi_table_taint(existing_table);
+ return AE_OK;
+#endif
+}
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
@@ -607,7 +787,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
deleted file mode 100644
index a1dee29beed3..000000000000
--- a/drivers/acpi/pci_bind.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * pci_bind.c - ACPI PCI Device Binding ($Revision: 2 $)
- *
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/acpi.h>
-#include <linux/pm_runtime.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define _COMPONENT ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_bind");
-
-static int acpi_pci_unbind(struct acpi_device *device)
-{
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(device->handle);
- if (!dev)
- goto out;
-
- device_set_run_wake(&dev->dev, false);
- pci_acpi_remove_pm_notifier(device);
- acpi_power_resource_unregister_device(&dev->dev, device->handle);
-
- if (!dev->subordinate)
- goto out;
-
- acpi_pci_irq_del_prt(pci_domain_nr(dev->bus), dev->subordinate->number);
-
- device->ops.bind = NULL;
- device->ops.unbind = NULL;
-
-out:
- pci_dev_put(dev);
- return 0;
-}
-
-static int acpi_pci_bind(struct acpi_device *device)
-{
- acpi_status status;
- acpi_handle handle;
- unsigned char bus;
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(device->handle);
- if (!dev)
- return 0;
-
- pci_acpi_add_pm_notifier(device, dev);
- acpi_power_resource_register_device(&dev->dev, device->handle);
- if (device->wakeup.flags.run_wake)
- device_set_run_wake(&dev->dev, true);
-
- /*
- * Install the 'bind' function to facilitate callbacks for
- * children of the P2P bridge.
- */
- if (dev->subordinate) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %04x:%02x:%02x.%d is a PCI bridge\n",
- pci_domain_nr(dev->bus), dev->bus->number,
- PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
- device->ops.bind = acpi_pci_bind;
- device->ops.unbind = acpi_pci_unbind;
- }
-
- /*
- * Evaluate and parse _PRT, if exists. This code allows parsing of
- * _PRT objects within the scope of non-bridge devices. Note that
- * _PRTs within the scope of a PCI bridge assume the bridge's
- * subordinate bus number.
- *
- * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
- */
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_FAILURE(status))
- goto out;
-
- if (dev->subordinate)
- bus = dev->subordinate->number;
- else
- bus = dev->bus->number;
-
- acpi_pci_irq_add_prt(device->handle, pci_domain_nr(dev->bus), bus);
-
-out:
- pci_dev_put(dev);
- return 0;
-}
-
-int acpi_pci_bind_root(struct acpi_device *device)
-{
- device->ops.bind = acpi_pci_bind;
- device->ops.unbind = acpi_pci_unbind;
-
- return 0;
-}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index a12808259dfb..ab764ed34a50 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -53,23 +53,19 @@ ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_FILE_STATUS "state"
#define ACPI_PCI_LINK_MAX_POSSIBLE 16
-static int acpi_pci_link_add(struct acpi_device *device);
-static int acpi_pci_link_remove(struct acpi_device *device, int type);
+static int acpi_pci_link_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used);
+static void acpi_pci_link_remove(struct acpi_device *device);
static const struct acpi_device_id link_device_ids[] = {
{"PNP0C0F", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, link_device_ids);
-static struct acpi_driver acpi_pci_link_driver = {
- .name = "pci_link",
- .class = ACPI_PCI_LINK_CLASS,
+static struct acpi_scan_handler pci_link_handler = {
.ids = link_device_ids,
- .ops = {
- .add = acpi_pci_link_add,
- .remove = acpi_pci_link_remove,
- },
+ .attach = acpi_pci_link_add,
+ .detach = acpi_pci_link_remove,
};
/*
@@ -692,7 +688,8 @@ int acpi_pci_link_free_irq(acpi_handle handle)
Driver Interface
-------------------------------------------------------------------------- */
-static int acpi_pci_link_add(struct acpi_device *device)
+static int acpi_pci_link_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
int result;
struct acpi_pci_link *link;
@@ -746,7 +743,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
if (result)
kfree(link);
- return result;
+ return result < 0 ? result : 1;
}
static int acpi_pci_link_resume(struct acpi_pci_link *link)
@@ -766,7 +763,7 @@ static void irqrouter_resume(void)
}
}
-static int acpi_pci_link_remove(struct acpi_device *device, int type)
+static void acpi_pci_link_remove(struct acpi_device *device)
{
struct acpi_pci_link *link;
@@ -777,7 +774,6 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
mutex_unlock(&acpi_link_lock);
kfree(link);
- return 0;
}
/*
@@ -874,20 +870,10 @@ static struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
-static int __init irqrouter_init_ops(void)
-{
- if (!acpi_disabled && !acpi_noirq)
- register_syscore_ops(&irqrouter_syscore_ops);
-
- return 0;
-}
-
-device_initcall(irqrouter_init_ops);
-
-static int __init acpi_pci_link_init(void)
+void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
- return 0;
+ return;
if (acpi_irq_balance == -1) {
/* no command line switch: enable balancing in IOAPIC mode */
@@ -896,11 +882,6 @@ static int __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
-
- if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0)
- return -ENODEV;
-
- return 0;
+ register_syscore_ops(&irqrouter_syscore_ops);
+ acpi_scan_add_handler(&pci_link_handler);
}
-
-subsys_initcall(acpi_pci_link_init);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7928d4dc7056..b3cc69c5caf1 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -45,9 +45,9 @@
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
-static int acpi_pci_root_add(struct acpi_device *device);
-static int acpi_pci_root_remove(struct acpi_device *device, int type);
-static int acpi_pci_root_start(struct acpi_device *device);
+static int acpi_pci_root_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used);
+static void acpi_pci_root_remove(struct acpi_device *device);
#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
| OSC_ACTIVE_STATE_PWR_SUPPORT \
@@ -58,17 +58,11 @@ static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, root_device_ids);
-static struct acpi_driver acpi_pci_root_driver = {
- .name = "pci_root",
- .class = ACPI_PCI_ROOT_CLASS,
+static struct acpi_scan_handler pci_root_handler = {
.ids = root_device_ids,
- .ops = {
- .add = acpi_pci_root_add,
- .remove = acpi_pci_root_remove,
- .start = acpi_pci_root_start,
- },
+ .attach = acpi_pci_root_add,
+ .detach = acpi_pci_root_remove,
};
/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */
@@ -188,21 +182,6 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
return AE_OK;
}
-static void acpi_pci_bridge_scan(struct acpi_device *device)
-{
- int status;
- struct acpi_device *child = NULL;
-
- if (device->flags.bus_address)
- if (device->parent && device->parent->ops.bind) {
- status = device->parent->ops.bind(device);
- if (!status) {
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
- }
- }
-}
-
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -445,14 +424,15 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static int acpi_pci_root_add(struct acpi_device *device)
+static int acpi_pci_root_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
unsigned long long segment, bus;
acpi_status status;
int result;
struct acpi_pci_root *root;
acpi_handle handle;
- struct acpi_device *child;
+ struct acpi_pci_driver *driver;
u32 flags, base_flags;
bool is_osc_granted = false;
@@ -603,21 +583,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
goto out_del_root;
}
- /*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- result = acpi_pci_bind_root(device);
- if (result)
- goto out_del_root;
-
- /*
- * Scan and bind all _ADR-Based Devices
- */
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
-
/* ASPM setting */
if (is_osc_granted) {
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
@@ -632,24 +597,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- return 0;
-
-out_del_root:
- mutex_lock(&acpi_pci_root_lock);
- list_del(&root->node);
- mutex_unlock(&acpi_pci_root_lock);
-
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
-end:
- kfree(root);
- return result;
-}
-
-static int acpi_pci_root_start(struct acpi_device *device)
-{
- struct acpi_pci_root *root = acpi_driver_data(device);
- struct acpi_pci_driver *driver;
-
if (system_state != SYSTEM_BOOTING)
pci_assign_unassigned_bus_resources(root->bus);
@@ -664,11 +611,20 @@ static int acpi_pci_root_start(struct acpi_device *device)
pci_enable_bridges(root->bus);
pci_bus_add_devices(root->bus);
+ return 1;
- return 0;
+out_del_root:
+ mutex_lock(&acpi_pci_root_lock);
+ list_del(&root->node);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
+end:
+ kfree(root);
+ return result;
}
-static int acpi_pci_root_remove(struct acpi_device *device, int type)
+static void acpi_pci_root_remove(struct acpi_device *device)
{
acpi_status status;
acpi_handle handle;
@@ -696,21 +652,14 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
kfree(root);
- return 0;
}
-static int __init acpi_pci_root_init(void)
+void __init acpi_pci_root_init(void)
{
acpi_hest_init();
- if (acpi_pci_disabled)
- return 0;
-
- pci_acpi_crs_quirks();
- if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
- return -ENODEV;
-
- return 0;
+ if (!acpi_pci_disabled) {
+ pci_acpi_crs_quirks();
+ acpi_scan_add_handler(&pci_root_handler);
+ }
}
-
-subsys_initcall(acpi_pci_root_init);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d22585f21aeb..2c630c006c2f 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -50,13 +50,12 @@ module_param(debug, bool, 0644);
ACPI_MODULE_NAME("pci_slot");
#define MY_NAME "pci_slot"
-#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg)
#define dbg(format, arg...) \
do { \
if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
+ pr_debug("%s: " format, MY_NAME , ## arg); \
} while (0)
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 7db61b8fa11f..b820528a5fa3 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -41,6 +41,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "sleep.h"
@@ -58,88 +59,121 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
-static int acpi_power_add(struct acpi_device *device);
-static int acpi_power_remove(struct acpi_device *device, int type);
-
-static const struct acpi_device_id power_device_ids[] = {
- {ACPI_POWER_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, power_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_power_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
-
-static struct acpi_driver acpi_power_driver = {
- .name = "power",
- .class = ACPI_POWER_CLASS,
- .ids = power_device_ids,
- .ops = {
- .add = acpi_power_add,
- .remove = acpi_power_remove,
- },
- .drv.pm = &acpi_power_pm,
-};
-
-/*
- * A power managed device
- * A device may rely on multiple power resources.
- * */
-struct acpi_power_managed_device {
- struct device *dev; /* The physical device */
- acpi_handle *handle;
-};
-
-struct acpi_power_resource_device {
- struct acpi_power_managed_device *device;
- struct acpi_power_resource_device *next;
+struct acpi_power_dependent_device {
+ struct list_head node;
+ struct acpi_device *adev;
+ struct work_struct work;
};
struct acpi_power_resource {
- struct acpi_device * device;
- acpi_bus_id name;
+ struct acpi_device device;
+ struct list_head list_node;
+ struct list_head dependent;
+ char *name;
u32 system_level;
u32 order;
unsigned int ref_count;
struct mutex resource_lock;
+};
- /* List of devices relying on this power resource */
- struct acpi_power_resource_device *devices;
- struct mutex devices_lock;
+struct acpi_power_resource_entry {
+ struct list_head node;
+ struct acpi_power_resource *resource;
};
-static struct list_head acpi_power_resource_list;
+static LIST_HEAD(acpi_power_resource_list);
+static DEFINE_MUTEX(power_resource_list_lock);
/* --------------------------------------------------------------------------
Power Resource Management
-------------------------------------------------------------------------- */
-static int
-acpi_power_get_context(acpi_handle handle,
- struct acpi_power_resource **resource)
+static inline
+struct acpi_power_resource *to_power_resource(struct acpi_device *device)
{
- int result = 0;
- struct acpi_device *device = NULL;
+ return container_of(device, struct acpi_power_resource, device);
+}
+
+static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
+{
+ struct acpi_device *device;
+ if (acpi_bus_get_device(handle, &device))
+ return NULL;
- if (!resource)
- return -ENODEV;
+ return to_power_resource(device);
+}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- printk(KERN_WARNING PREFIX "Getting context [%p]\n", handle);
- return result;
- }
+static int acpi_power_resources_list_add(acpi_handle handle,
+ struct list_head *list)
+{
+ struct acpi_power_resource *resource = acpi_power_get_context(handle);
+ struct acpi_power_resource_entry *entry;
- *resource = acpi_driver_data(device);
- if (!*resource)
- return -ENODEV;
+ if (!resource || !list)
+ return -EINVAL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->resource = resource;
+ if (!list_empty(list)) {
+ struct acpi_power_resource_entry *e;
+ list_for_each_entry(e, list, node)
+ if (e->resource->order > resource->order) {
+ list_add_tail(&entry->node, &e->node);
+ return 0;
+ }
+ }
+ list_add_tail(&entry->node, list);
return 0;
}
+void acpi_power_resources_list_free(struct list_head *list)
+{
+ struct acpi_power_resource_entry *entry, *e;
+
+ list_for_each_entry_safe(entry, e, list, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
+ struct list_head *list)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = start; i < package->package.count; i++) {
+ union acpi_object *element = &package->package.elements[i];
+ acpi_handle rhandle;
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ err = -ENODATA;
+ break;
+ }
+ rhandle = element->reference.handle;
+ if (!rhandle) {
+ err = -ENODEV;
+ break;
+ }
+ err = acpi_add_power_resource(rhandle);
+ if (err)
+ break;
+
+ err = acpi_power_resources_list_add(rhandle, list);
+ if (err)
+ break;
+ }
+ if (err)
+ acpi_power_resources_list_free(list);
+
+ return err;
+}
+
static int acpi_power_get_state(acpi_handle handle, int *state)
{
acpi_status status = AE_OK;
@@ -167,31 +201,23 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
return 0;
}
-static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
+static int acpi_power_get_list_state(struct list_head *list, int *state)
{
+ struct acpi_power_resource_entry *entry;
int cur_state;
- int i = 0;
if (!list || !state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
-
- for (i = 0; i < list->count; i++) {
- struct acpi_power_resource *resource;
- acpi_handle handle = list->handles[i];
+ list_for_each_entry(entry, list, node) {
+ struct acpi_power_resource *resource = entry->resource;
+ acpi_handle handle = resource->device.handle;
int result;
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
-
mutex_lock(&resource->resource_lock);
-
result = acpi_power_get_state(handle, &cur_state);
-
mutex_unlock(&resource->resource_lock);
-
if (result)
return result;
@@ -203,54 +229,52 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
cur_state ? "on" : "off"));
*state = cur_state;
-
return 0;
}
-/* Resume the device when all power resources in _PR0 are on */
-static void acpi_power_on_device(struct acpi_power_managed_device *device)
+static void acpi_power_resume_dependent(struct work_struct *work)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle = device->handle;
+ struct acpi_power_dependent_device *dep;
+ struct acpi_device_physical_node *pn;
+ struct acpi_device *adev;
int state;
- if (acpi_bus_get_device(handle, &acpi_dev))
+ dep = container_of(work, struct acpi_power_dependent_device, work);
+ adev = dep->adev;
+ if (acpi_power_get_inferred_state(adev, &state))
return;
- if(acpi_power_get_inferred_state(acpi_dev, &state))
+ if (state > ACPI_STATE_D0)
return;
- if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
- pm_request_resume(device->dev);
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ pm_request_resume(pn->dev);
+
+ list_for_each_entry(pn, &adev->power_dependent, node)
+ pm_request_resume(pn->dev);
+
+ mutex_unlock(&adev->physical_node_lock);
}
static int __acpi_power_on(struct acpi_power_resource *resource)
{
acpi_status status = AE_OK;
- status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+ status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
if (ACPI_FAILURE(status))
return -ENODEV;
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D0;
-
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
resource->name));
return 0;
}
-static int acpi_power_on(acpi_handle handle)
+static int acpi_power_on(struct acpi_power_resource *resource)
{
- int result = 0;
- bool resume_device = false;
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *device_list;
-
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
+ int result = 0;;
mutex_lock(&resource->resource_lock);
@@ -260,39 +284,38 @@ static int acpi_power_on(acpi_handle handle)
resource->name));
} else {
result = __acpi_power_on(resource);
- if (result)
+ if (result) {
resource->ref_count--;
- else
- resume_device = true;
+ } else {
+ struct acpi_power_dependent_device *dep;
+
+ list_for_each_entry(dep, &resource->dependent, node)
+ schedule_work(&dep->work);
+ }
}
mutex_unlock(&resource->resource_lock);
- if (!resume_device)
- return result;
-
- mutex_lock(&resource->devices_lock);
+ return result;
+}
- device_list = resource->devices;
- while (device_list) {
- acpi_power_on_device(device_list->device);
- device_list = device_list->next;
- }
+static int __acpi_power_off(struct acpi_power_resource *resource)
+{
+ acpi_status status;
- mutex_unlock(&resource->devices_lock);
+ status = acpi_evaluate_object(resource->device.handle, "_OFF",
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- return result;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n",
+ resource->name));
+ return 0;
}
-static int acpi_power_off(acpi_handle handle)
+static int acpi_power_off(struct acpi_power_resource *resource)
{
int result = 0;
- acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
-
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
mutex_lock(&resource->resource_lock);
@@ -307,19 +330,10 @@ static int acpi_power_off(acpi_handle handle)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Power resource [%s] still in use\n",
resource->name));
- goto unlock;
- }
-
- status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
} else {
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D3;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] turned off\n",
- resource->name));
+ result = __acpi_power_off(resource);
+ if (result)
+ resource->ref_count++;
}
unlock:
@@ -328,155 +342,202 @@ static int acpi_power_off(acpi_handle handle)
return result;
}
-static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+static int acpi_power_off_list(struct list_head *list)
{
- int i;
+ struct acpi_power_resource_entry *entry;
+ int result = 0;
- for (i = num_res - 1; i >= 0 ; i--)
- acpi_power_off(list->handles[i]);
-}
+ list_for_each_entry_reverse(entry, list, node) {
+ result = acpi_power_off(entry->resource);
+ if (result)
+ goto err;
+ }
+ return 0;
-static void acpi_power_off_list(struct acpi_handle_list *list)
-{
- __acpi_power_off_list(list, list->count);
+ err:
+ list_for_each_entry_continue(entry, list, node)
+ acpi_power_on(entry->resource);
+
+ return result;
}
-static int acpi_power_on_list(struct acpi_handle_list *list)
+static int acpi_power_on_list(struct list_head *list)
{
+ struct acpi_power_resource_entry *entry;
int result = 0;
- int i;
- for (i = 0; i < list->count; i++) {
- result = acpi_power_on(list->handles[i]);
- if (result) {
- __acpi_power_off_list(list, i);
- break;
- }
+ list_for_each_entry(entry, list, node) {
+ result = acpi_power_on(entry->resource);
+ if (result)
+ goto err;
}
+ return 0;
+
+ err:
+ list_for_each_entry_continue_reverse(entry, list, node)
+ acpi_power_off(entry->resource);
return result;
}
-static void __acpi_power_resource_unregister_device(struct device *dev,
- acpi_handle res_handle)
+static void acpi_power_add_dependent(struct acpi_power_resource *resource,
+ struct acpi_device *adev)
{
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *prev, *curr;
+ struct acpi_power_dependent_device *dep;
- if (acpi_power_get_context(res_handle, &resource))
- return;
+ mutex_lock(&resource->resource_lock);
- mutex_lock(&resource->devices_lock);
- prev = NULL;
- curr = resource->devices;
- while (curr) {
- if (curr->device->dev == dev) {
- if (!prev)
- resource->devices = curr->next;
- else
- prev->next = curr->next;
-
- kfree(curr);
- break;
- }
+ list_for_each_entry(dep, &resource->dependent, node)
+ if (dep->adev == adev)
+ goto out;
- prev = curr;
- curr = curr->next;
- }
- mutex_unlock(&resource->devices_lock);
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ goto out;
+
+ dep->adev = adev;
+ INIT_WORK(&dep->work, acpi_power_resume_dependent);
+ list_add_tail(&dep->node, &resource->dependent);
+
+ out:
+ mutex_unlock(&resource->resource_lock);
}
-/* Unlink dev from all power resources in _PR0 */
-void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
+static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
+ struct acpi_device *adev)
{
- struct acpi_device *acpi_dev;
- struct acpi_handle_list *list;
- int i;
+ struct acpi_power_dependent_device *dep;
+ struct work_struct *work = NULL;
- if (!dev || !handle)
- return;
+ mutex_lock(&resource->resource_lock);
- if (acpi_bus_get_device(handle, &acpi_dev))
- return;
+ list_for_each_entry(dep, &resource->dependent, node)
+ if (dep->adev == adev) {
+ list_del(&dep->node);
+ work = &dep->work;
+ break;
+ }
- list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+ mutex_unlock(&resource->resource_lock);
- for (i = 0; i < list->count; i++)
- __acpi_power_resource_unregister_device(dev,
- list->handles[i]);
+ if (work) {
+ cancel_work_sync(work);
+ kfree(dep);
+ }
}
-EXPORT_SYMBOL_GPL(acpi_power_resource_unregister_device);
-static int __acpi_power_resource_register_device(
- struct acpi_power_managed_device *powered_device, acpi_handle handle)
-{
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *power_resource_device;
- int result;
+static struct attribute *attrs[] = {
+ NULL,
+};
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
+static struct attribute_group attr_groups[] = {
+ [ACPI_STATE_D0] = {
+ .name = "power_resources_D0",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D1] = {
+ .name = "power_resources_D1",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D2] = {
+ .name = "power_resources_D2",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D3_HOT] = {
+ .name = "power_resources_D3hot",
+ .attrs = attrs,
+ },
+};
- power_resource_device = kzalloc(
- sizeof(*power_resource_device), GFP_KERNEL);
- if (!power_resource_device)
- return -ENOMEM;
+static void acpi_power_hide_list(struct acpi_device *adev, int state)
+{
+ struct acpi_device_power_state *ps = &adev->power.states[state];
+ struct acpi_power_resource_entry *entry;
- power_resource_device->device = powered_device;
+ if (list_empty(&ps->resources))
+ return;
- mutex_lock(&resource->devices_lock);
- power_resource_device->next = resource->devices;
- resource->devices = power_resource_device;
- mutex_unlock(&resource->devices_lock);
+ list_for_each_entry_reverse(entry, &ps->resources, node) {
+ struct acpi_device *res_dev = &entry->resource->device;
- return 0;
+ sysfs_remove_link_from_group(&adev->dev.kobj,
+ attr_groups[state].name,
+ dev_name(&res_dev->dev));
+ }
+ sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]);
}
-/* Link dev to all power resources in _PR0 */
-int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
+static void acpi_power_expose_list(struct acpi_device *adev, int state)
{
- struct acpi_device *acpi_dev;
- struct acpi_handle_list *list;
- struct acpi_power_managed_device *powered_device;
- int i, ret;
+ struct acpi_device_power_state *ps = &adev->power.states[state];
+ struct acpi_power_resource_entry *entry;
+ int ret;
- if (!dev || !handle)
- return -ENODEV;
+ if (list_empty(&ps->resources))
+ return;
- ret = acpi_bus_get_device(handle, &acpi_dev);
+ ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]);
if (ret)
- goto no_power_resource;
+ return;
- if (!acpi_dev->power.flags.power_resources)
- goto no_power_resource;
+ list_for_each_entry(entry, &ps->resources, node) {
+ struct acpi_device *res_dev = &entry->resource->device;
- powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
- if (!powered_device)
- return -ENOMEM;
+ ret = sysfs_add_link_to_group(&adev->dev.kobj,
+ attr_groups[state].name,
+ &res_dev->dev.kobj,
+ dev_name(&res_dev->dev));
+ if (ret) {
+ acpi_power_hide_list(adev, state);
+ break;
+ }
+ }
+}
- powered_device->dev = dev;
- powered_device->handle = handle;
+void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
+{
+ struct acpi_device_power_state *ps;
+ struct acpi_power_resource_entry *entry;
+ int state;
- list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+ if (!adev->power.flags.power_resources)
+ return;
- for (i = 0; i < list->count; i++) {
- ret = __acpi_power_resource_register_device(powered_device,
- list->handles[i]);
+ ps = &adev->power.states[ACPI_STATE_D0];
+ list_for_each_entry(entry, &ps->resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
- if (ret) {
- acpi_power_resource_unregister_device(dev, handle);
- break;
- }
+ if (add)
+ acpi_power_add_dependent(resource, adev);
+ else
+ acpi_power_remove_dependent(resource, adev);
+ }
+
+ for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
+ if (add)
+ acpi_power_expose_list(adev, state);
+ else
+ acpi_power_hide_list(adev, state);
}
+}
+
+int acpi_power_min_system_level(struct list_head *list)
+{
+ struct acpi_power_resource_entry *entry;
+ int system_level = 5;
- return ret;
+ list_for_each_entry(entry, list, node) {
+ struct acpi_power_resource *resource = entry->resource;
-no_power_resource:
- printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!\n");
- return -ENODEV;
+ if (system_level > resource->system_level)
+ system_level = resource->system_level;
+ }
+ return system_level;
}
-EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
+
+/* --------------------------------------------------------------------------
+ Device Power Management
+ -------------------------------------------------------------------------- */
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
@@ -549,7 +610,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
- int i, err = 0;
+ int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
@@ -559,24 +620,17 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- /* Open power resource */
- for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_on(dev->wakeup.resources.handles[i]);
- if (ret) {
- printk(KERN_ERR PREFIX "Transition power state\n");
- dev->wakeup.flags.valid = 0;
- err = -ENODEV;
- goto err_out;
- }
+ err = acpi_power_on_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn wakeup power resources on\n");
+ dev->wakeup.flags.valid = 0;
+ } else {
+ /*
+ * Passing 3 as the third argument below means the device may be
+ * put into arbitrary power state afterward.
+ */
+ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
}
-
- /*
- * Passing 3 as the third argument below means the device may be placed
- * in arbitrary power state afterwards.
- */
- err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
-
- err_out:
if (err)
dev->wakeup.prepare_count = 0;
@@ -593,7 +647,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
- int i, err = 0;
+ int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
@@ -614,15 +668,10 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (err)
goto out;
- /* Close power resource */
- for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
- if (ret) {
- printk(KERN_ERR PREFIX "Transition power state\n");
- dev->wakeup.flags.valid = 0;
- err = -ENODEV;
- goto out;
- }
+ err = acpi_power_off_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn wakeup power resources off\n");
+ dev->wakeup.flags.valid = 0;
}
out:
@@ -630,14 +679,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
return err;
}
-/* --------------------------------------------------------------------------
- Device Power Management
- -------------------------------------------------------------------------- */
-
int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
int result = 0;
- struct acpi_handle_list *list = NULL;
int list_state = 0;
int i = 0;
@@ -649,8 +693,9 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* required for a given D-state are 'on'.
*/
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
- list = &device->power.states[i].resources;
- if (list->count < 1)
+ struct list_head *list = &device->power.states[i].resources;
+
+ if (list_empty(list))
continue;
result = acpi_power_get_list_state(list, &list_state);
@@ -669,7 +714,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
int acpi_power_on_resources(struct acpi_device *device, int state)
{
- if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+ if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3_HOT)
return -EINVAL;
return acpi_power_on_list(&device->power.states[state].resources);
@@ -682,7 +727,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
- if (device->power.state == state)
+ if (device->power.state == state || !device->flags.power_manageable)
return 0;
if ((device->power.state < ACPI_STATE_D0)
@@ -710,118 +755,126 @@ int acpi_power_transition(struct acpi_device *device, int state)
return result;
}
-/* --------------------------------------------------------------------------
- Driver Interface
- -------------------------------------------------------------------------- */
+static void acpi_release_power_resource(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct acpi_power_resource *resource;
+
+ resource = container_of(device, struct acpi_power_resource, device);
+
+ mutex_lock(&power_resource_list_lock);
+ list_del(&resource->list_node);
+ mutex_unlock(&power_resource_list_lock);
+
+ acpi_free_ids(device);
+ kfree(resource);
+}
-static int acpi_power_add(struct acpi_device *device)
+static ssize_t acpi_power_in_use_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct acpi_power_resource *resource;
+
+ resource = to_power_resource(to_acpi_device(dev));
+ return sprintf(buf, "%u\n", !!resource->ref_count);
+}
+static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
+
+static void acpi_power_sysfs_remove(struct acpi_device *device)
{
- int result = 0, state;
- acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
+ device_remove_file(&device->dev, &dev_attr_resource_in_use);
+}
+
+int acpi_add_power_resource(acpi_handle handle)
+{
+ struct acpi_power_resource *resource;
+ struct acpi_device *device = NULL;
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
+ acpi_status status;
+ int state, result = -ENODEV;
+ acpi_bus_get_device(handle, &device);
+ if (device)
+ return 0;
- if (!device)
- return -EINVAL;
-
- resource = kzalloc(sizeof(struct acpi_power_resource), GFP_KERNEL);
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
return -ENOMEM;
- resource->device = device;
+ device = &resource->device;
+ acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
+ ACPI_STA_DEFAULT);
mutex_init(&resource->resource_lock);
- mutex_init(&resource->devices_lock);
- strcpy(resource->name, device->pnp.bus_id);
+ INIT_LIST_HEAD(&resource->dependent);
+ resource->name = device->pnp.bus_id;
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
- device->driver_data = resource;
+ device->power.state = ACPI_STATE_UNKNOWN;
/* Evalute the object to get the system level and resource order. */
- status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto err;
+
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
- result = acpi_power_get_state(device->handle, &state);
+ result = acpi_power_get_state(handle, &state);
if (result)
- goto end;
-
- switch (state) {
- case ACPI_POWER_RESOURCE_STATE_ON:
- device->power.state = ACPI_STATE_D0;
- break;
- case ACPI_POWER_RESOURCE_STATE_OFF:
- device->power.state = ACPI_STATE_D3;
- break;
- default:
- device->power.state = ACPI_STATE_UNKNOWN;
- break;
- }
+ goto err;
printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
acpi_device_bid(device), state ? "on" : "off");
- end:
+ device->flags.match_driver = true;
+ result = acpi_device_add(device, acpi_release_power_resource);
if (result)
- kfree(resource);
+ goto err;
- return result;
-}
-
-static int acpi_power_remove(struct acpi_device *device, int type)
-{
- struct acpi_power_resource *resource;
-
- if (!device)
- return -EINVAL;
-
- resource = acpi_driver_data(device);
- if (!resource)
- return -EINVAL;
-
- kfree(resource);
+ if (!device_create_file(&device->dev, &dev_attr_resource_in_use))
+ device->remove = acpi_power_sysfs_remove;
+ mutex_lock(&power_resource_list_lock);
+ list_add(&resource->list_node, &acpi_power_resource_list);
+ mutex_unlock(&power_resource_list_lock);
+ acpi_device_add_finalize(device);
return 0;
+
+ err:
+ acpi_release_power_resource(&device->dev);
+ return result;
}
-#ifdef CONFIG_PM_SLEEP
-static int acpi_power_resume(struct device *dev)
+#ifdef CONFIG_ACPI_SLEEP
+void acpi_resume_power_resources(void)
{
- int result = 0, state;
- struct acpi_device *device;
struct acpi_power_resource *resource;
- if (!dev)
- return -EINVAL;
+ mutex_lock(&power_resource_list_lock);
- device = to_acpi_device(dev);
- resource = acpi_driver_data(device);
- if (!resource)
- return -EINVAL;
+ list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
+ int result, state;
- mutex_lock(&resource->resource_lock);
+ mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(device->handle, &state);
- if (result)
- goto unlock;
+ result = acpi_power_get_state(resource->device.handle, &state);
+ if (result)
+ continue;
- if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count)
- result = __acpi_power_on(resource);
+ if (state == ACPI_POWER_RESOURCE_STATE_OFF
+ && resource->ref_count) {
+ dev_info(&resource->device.dev, "Turning ON\n");
+ __acpi_power_on(resource);
+ } else if (state == ACPI_POWER_RESOURCE_STATE_ON
+ && !resource->ref_count) {
+ dev_info(&resource->device.dev, "Turning OFF\n");
+ __acpi_power_off(resource);
+ }
- unlock:
- mutex_unlock(&resource->resource_lock);
+ mutex_unlock(&resource->resource_lock);
+ }
- return result;
+ mutex_unlock(&power_resource_list_lock);
}
#endif
-
-int __init acpi_power_init(void)
-{
- INIT_LIST_HEAD(&acpi_power_resource_list);
- return acpi_bus_register_driver(&acpi_power_driver);
-}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index ef98796b3824..52ce76725c20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,11 +311,12 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
- if (!dev->physical_node_count)
+ if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
- dev->wakeup.flags.run_wake ?
- '*' : ' ', "disabled");
- else {
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+ device_may_wakeup(&dev->dev) ?
+ "enabled" : "disabled");
+ } else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
node) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e83311bf1ebd..df34bd04ae62 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -45,6 +45,7 @@
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/memory_hotplug.h>
#include <asm/io.h>
#include <asm/cpu.h>
@@ -81,7 +82,7 @@ MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
-static int acpi_processor_remove(struct acpi_device *device, int type);
+static int acpi_processor_remove(struct acpi_device *device);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -610,7 +611,7 @@ err_free_pr:
return result;
}
-static int acpi_processor_remove(struct acpi_device *device, int type)
+static int acpi_processor_remove(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
@@ -623,7 +624,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
if (pr->id >= nr_cpu_ids)
goto free;
- if (type == ACPI_BUS_REMOVAL_EJECT) {
+ if (device->removal_type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr))
return -EINVAL;
}
@@ -641,6 +642,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
+ try_offline_node(cpu_to_node(pr->id));
free:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -677,36 +679,17 @@ static int is_processor_present(acpi_handle handle)
return 0;
}
-static
-int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
-{
- acpi_handle phandle;
- struct acpi_device *pdev;
-
-
- if (acpi_get_parent(handle, &phandle)) {
- return -ENODEV;
- }
-
- if (acpi_bus_get_device(phandle, &pdev)) {
- return -ENODEV;
- }
-
- if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
- return -ENODEV;
- }
-
- return 0;
-}
-
static void acpi_processor_hotplug_notify(acpi_handle handle,
u32 event, void *data)
{
struct acpi_device *device = NULL;
struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_status status;
int result;
+ acpi_scan_lock_acquire();
+
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -721,12 +704,16 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!acpi_bus_get_device(handle, &device))
break;
- result = acpi_processor_device_add(handle, &device);
+ result = acpi_bus_scan(handle);
if (result) {
acpi_handle_err(handle, "Unable to add the device\n");
break;
}
-
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_err(handle, "Missing device object\n");
+ break;
+ }
ost_code = ACPI_OST_SC_SUCCESS;
break;
@@ -751,25 +738,32 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
break;
}
- ej_event->handle = handle;
+ get_device(&device->dev);
+ ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
- (void *)ej_event);
-
- /* eject is performed asynchronously */
- return;
+ /* The eject is carried out asynchronously. */
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&device->dev);
+ kfree(ej_event);
+ break;
+ }
+ goto out;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
/* non-hotplug event; possibly handled by other handler */
- return;
+ goto out;
}
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
- return;
+
+ out:
+ acpi_scan_lock_release();
}
static acpi_status is_processor_device(acpi_handle handle)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f1a5da44591d..fc95308e9a11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -28,19 +28,12 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h> /* need_resched() */
-#include <linux/pm_qos.h>
+#include <linux/sched.h> /* need_resched() */
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
-#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -52,22 +45,14 @@
#include <asm/apic.h>
#endif
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
-#include <asm/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
-#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
-#define C2_OVERHEAD 1 /* 1us */
-#define C3_OVERHEAD 1 /* 1us */
-#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
@@ -81,10 +66,11 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
+
static int disabled_by_idle_boot_param(void)
{
return boot_option_idle_override == IDLE_POLL ||
- boot_option_idle_override == IDLE_FORCE_MWAIT ||
boot_option_idle_override == IDLE_HALT;
}
@@ -736,8 +722,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -760,8 +745,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
ACPI_FLUSH_CPU_CACHE();
@@ -791,8 +775,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -850,8 +833,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -943,13 +925,13 @@ struct cpuidle_driver acpi_idle_driver = {
* device i.e. per-cpu data
*
* @pr: the ACPI processor
+ * @dev : the cpuidle device
*/
-static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state_usage *state_usage;
- struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (!pr->flags.power_setup_done)
return -EINVAL;
@@ -958,6 +940,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
return -EINVAL;
}
+ if (!dev)
+ return -EINVAL;
+
dev->cpu = pr->id;
if (max_cstate == 0)
@@ -965,7 +950,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
- state_usage = &dev->states_usage[count];
if (!cx->valid)
continue;
@@ -976,8 +960,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
-
- cpuidle_set_statedata(state_usage, cx);
+ acpi_cstate[count] = cx;
count++;
if (count == CPUIDLE_STATE_MAX)
@@ -1101,7 +1084,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
cpuidle_disable_device(dev);
acpi_processor_get_power_info(pr);
if (pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(pr);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
ret = cpuidle_enable_device(dev);
}
cpuidle_resume_and_unlock();
@@ -1149,6 +1132,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
}
/* Populate Updated C-state information */
+ acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
@@ -1158,8 +1142,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
continue;
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(_pr);
dev = per_cpu(acpi_cpuidle_device, cpu);
+ acpi_processor_setup_cpuidle_cx(_pr, dev);
cpuidle_enable_device(dev);
}
}
@@ -1228,7 +1212,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
return -ENOMEM;
per_cpu(acpi_cpuidle_device, pr->id) = dev;
- acpi_processor_setup_cpuidle_cx(pr);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 836bfe069042..53e7ac9403a7 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+ /*
+ * MSR C001_0064+:
+ * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
+ */
+ if (!(hi & BIT(31)))
+ return;
+
fid = lo & 0x3f;
did = (lo >> 6) & 7;
if (boot_cpu_data.x86 == 0x10)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ff0740e0a9c2..e523245643ac 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,7 +130,7 @@ struct acpi_sbs {
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
-static int acpi_sbs_remove(struct acpi_device *device, int type);
+static int acpi_sbs_remove(struct acpi_device *device);
static int acpi_battery_get_state(struct acpi_battery *battery);
static inline int battery_scale(int log)
@@ -949,11 +949,11 @@ static int acpi_sbs_add(struct acpi_device *device)
acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
end:
if (result)
- acpi_sbs_remove(device, 0);
+ acpi_sbs_remove(device);
return result;
}
-static int acpi_sbs_remove(struct acpi_device *device, int type)
+static int acpi_sbs_remove(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int id;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index cf6129a8af7c..b78bc605837e 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -33,7 +33,7 @@ struct acpi_smb_hc {
};
static int acpi_smbus_hc_add(struct acpi_device *device);
-static int acpi_smbus_hc_remove(struct acpi_device *device, int type);
+static int acpi_smbus_hc_remove(struct acpi_device *device);
static const struct acpi_device_id sbs_device_ids[] = {
{"ACPI0001", 0},
@@ -296,7 +296,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
+static int acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 53502d1bbf26..daee7497efd3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -29,29 +29,10 @@ extern struct acpi_device *acpi_root;
static const char *dummy_hid = "device";
-/*
- * The following ACPI IDs are known to be suitable for representing as
- * platform devices.
- */
-static const struct acpi_device_id acpi_platform_device_ids[] = {
-
- { "PNP0D40" },
-
- /* Haswell LPSS devices */
- { "INT33C0", 0 },
- { "INT33C1", 0 },
- { "INT33C2", 0 },
- { "INT33C3", 0 },
- { "INT33C4", 0 },
- { "INT33C5", 0 },
- { "INT33C6", 0 },
- { "INT33C7", 0 },
-
- { }
-};
-
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
+static DEFINE_MUTEX(acpi_scan_lock);
+static LIST_HEAD(acpi_scan_handlers_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
@@ -61,6 +42,27 @@ struct acpi_device_bus_id{
struct list_head node;
};
+void acpi_scan_lock_acquire(void)
+{
+ mutex_lock(&acpi_scan_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
+
+void acpi_scan_lock_release(void)
+{
+ mutex_unlock(&acpi_scan_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
+
+int acpi_scan_add_handler(struct acpi_scan_handler *handler)
+{
+ if (!handler || !handler->attach)
+ return -EINVAL;
+
+ list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
+ return 0;
+}
+
/*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -115,39 +117,32 @@ static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
*/
void acpi_bus_hot_remove_device(void *context)
{
- struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
- struct acpi_device *device;
- acpi_handle handle = ej_event->handle;
+ struct acpi_eject_event *ej_event = context;
+ struct acpi_device *device = ej_event->device;
+ acpi_handle handle = device->handle;
acpi_handle temp;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
- if (acpi_bus_get_device(handle, &device))
- goto err_out;
+ mutex_lock(&acpi_scan_lock);
- if (!device)
- goto err_out;
+ /* If there is no handle, the device node has been unregistered. */
+ if (!device->handle) {
+ dev_dbg(&device->dev, "ACPI handle missing\n");
+ put_device(&device->dev);
+ goto out;
+ }
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
- if (acpi_bus_trim(device, 1)) {
- printk(KERN_ERR PREFIX
- "Removing device failed\n");
- goto err_out;
- }
-
- /* device has been freed */
+ acpi_bus_trim(device);
+ /* Device node has been unregistered. */
+ put_device(&device->dev);
device = NULL;
- /* power off device */
- status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- printk(KERN_WARNING PREFIX
- "Power-off device failed\n");
-
if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
arg_list.count = 1;
arg_list.pointer = &arg;
@@ -167,23 +162,46 @@ void acpi_bus_hot_remove_device(void *context)
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
- printk(KERN_WARNING PREFIX
- "Eject device failed\n");
- goto err_out;
- }
+ acpi_handle_warn(handle, "Eject failed\n");
- kfree(context);
- return;
+ /* Tell the firmware the hot-remove operation has failed. */
+ acpi_evaluate_hotplug_ost(handle, ej_event->event,
+ ost_code, NULL);
+ }
-err_out:
- /* Inform firmware the hot-remove operation has completed w/ error */
- (void) acpi_evaluate_hotplug_ost(handle,
- ej_event->event, ost_code, NULL);
+ out:
+ mutex_unlock(&acpi_scan_lock);
kfree(context);
return;
}
EXPORT_SYMBOL(acpi_bus_hot_remove_device);
+static ssize_t real_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+ int state;
+ int ret;
+
+ ret = acpi_device_get_power(adev, &state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(state));
+}
+
+static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+
+static ssize_t power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
+}
+
+static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
@@ -197,12 +215,10 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
}
-#ifndef FORCE_EJECT
- if (acpi_device->driver == NULL) {
+ if (!acpi_device->driver && !acpi_device->handler) {
ret = -ENODEV;
goto err;
}
-#endif
status = acpi_get_type(acpi_device->handle, &type);
if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
ret = -ENODEV;
@@ -215,7 +231,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
goto err;
}
- ej_event->handle = acpi_device->handle;
+ get_device(&acpi_device->dev);
+ ej_event->device = acpi_device;
if (acpi_device->flags.eject_pending) {
/* event originated from ACPI eject notification */
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
@@ -223,11 +240,15 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
} else {
/* event originated from user */
ej_event->event = ACPI_OST_EC_OSPM_EJECT;
- (void) acpi_evaluate_hotplug_ost(ej_event->handle,
+ (void) acpi_evaluate_hotplug_ost(acpi_device->handle,
ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
}
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device, (void *)ej_event);
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&acpi_device->dev);
+ kfree(ej_event);
+ }
err:
return ret;
}
@@ -375,8 +396,22 @@ static int acpi_device_setup_files(struct acpi_device *dev)
* hot-removal function from userland.
*/
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
- if (ACPI_SUCCESS(status))
+ if (ACPI_SUCCESS(status)) {
result = device_create_file(&dev->dev, &dev_attr_eject);
+ if (result)
+ return result;
+ }
+
+ if (dev->flags.power_manageable) {
+ result = device_create_file(&dev->dev, &dev_attr_power_state);
+ if (result)
+ return result;
+
+ if (dev->power.flags.power_resources)
+ result = device_create_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
end:
return result;
}
@@ -386,6 +421,13 @@ static void acpi_device_remove_files(struct acpi_device *dev)
acpi_status status;
acpi_handle temp;
+ if (dev->flags.power_manageable) {
+ device_remove_file(&dev->dev, &dev_attr_power_state);
+ if (dev->power.flags.power_resources)
+ device_remove_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
/*
* If device has _STR, remove 'description' file
*/
@@ -454,9 +496,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
struct acpi_device *adev;
+ acpi_handle handle = ACPI_HANDLE(dev);
- if (!ids || !ACPI_HANDLE(dev)
- || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev)))
+ if (!ids || !handle || acpi_bus_get_device(handle, &adev))
return NULL;
return __acpi_match_device(adev, ids);
@@ -470,7 +512,7 @@ int acpi_match_device_ids(struct acpi_device *device,
}
EXPORT_SYMBOL(acpi_match_device_ids);
-static void acpi_free_ids(struct acpi_device *device)
+void acpi_free_ids(struct acpi_device *device)
{
struct acpi_hardware_id *id, *tmp;
@@ -478,6 +520,23 @@ static void acpi_free_ids(struct acpi_device *device)
kfree(id->id);
kfree(id);
}
+ kfree(device->pnp.unique_id);
+}
+
+static void acpi_free_power_resources_lists(struct acpi_device *device)
+{
+ int i;
+
+ if (device->wakeup.flags.valid)
+ acpi_power_resources_list_free(&device->wakeup.resources);
+
+ if (!device->flags.power_manageable)
+ return;
+
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
+ struct acpi_device_power_state *ps = &device->power.states[i];
+ acpi_power_resources_list_free(&ps->resources);
+ }
}
static void acpi_device_release(struct device *dev)
@@ -485,7 +544,7 @@ static void acpi_device_release(struct device *dev)
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_ids(acpi_dev);
- kfree(acpi_dev->pnp.unique_id);
+ acpi_free_power_resources_lists(acpi_dev);
kfree(acpi_dev);
}
@@ -494,7 +553,8 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
- return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+ return acpi_dev->flags.match_driver
+ && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -570,7 +630,6 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
}
static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
-static int acpi_start_single_object(struct acpi_device *);
static int acpi_device_probe(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -579,15 +638,13 @@ static int acpi_device_probe(struct device * dev)
ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
if (!ret) {
- if (acpi_dev->bus_ops.acpi_op_start)
- acpi_start_single_object(acpi_dev);
-
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev,
- acpi_dev->removal_type);
+ acpi_drv->ops.remove(acpi_dev);
+ acpi_dev->driver = NULL;
+ acpi_dev->driver_data = NULL;
return ret;
}
}
@@ -609,7 +666,7 @@ static int acpi_device_remove(struct device * dev)
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
+ acpi_drv->ops.remove(acpi_dev);
}
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
@@ -626,12 +683,25 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
-static int acpi_device_register(struct acpi_device *device)
+int acpi_device_add(struct acpi_device *device,
+ void (*release)(struct device *))
{
int result;
struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
int found = 0;
+ if (device->handle) {
+ acpi_status status;
+
+ status = acpi_attach_data(device->handle, acpi_bus_data_handler,
+ device);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(device->handle,
+ "Unable to attach device data\n");
+ return -ENODEV;
+ }
+ }
+
/*
* Linkage
* -------
@@ -642,11 +712,13 @@ static int acpi_device_register(struct acpi_device *device)
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
mutex_init(&device->physical_node_lock);
+ INIT_LIST_HEAD(&device->power_dependent);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
- printk(KERN_ERR PREFIX "Memory allocation error\n");
- return -ENOMEM;
+ pr_err(PREFIX "Memory allocation error\n");
+ result = -ENOMEM;
+ goto err_detach;
}
mutex_lock(&acpi_device_lock);
@@ -681,11 +753,11 @@ static int acpi_device_register(struct acpi_device *device)
if (device->parent)
device->dev.parent = &device->parent->dev;
device->dev.bus = &acpi_bus_type;
- device->dev.release = &acpi_device_release;
- result = device_register(&device->dev);
+ device->dev.release = release;
+ result = device_add(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
- goto end;
+ goto err;
}
result = acpi_device_setup_files(device);
@@ -695,16 +767,20 @@ static int acpi_device_register(struct acpi_device *device)
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
-end:
+
+ err:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
mutex_unlock(&acpi_device_lock);
+
+ err_detach:
+ acpi_detach_data(device->handle, acpi_bus_data_handler);
return result;
}
-static void acpi_device_unregister(struct acpi_device *device, int type)
+static void acpi_device_unregister(struct acpi_device *device)
{
mutex_lock(&acpi_device_lock);
if (device->parent)
@@ -715,8 +791,20 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
acpi_detach_data(device->handle, acpi_bus_data_handler);
+ acpi_power_add_remove_device(device, false);
acpi_device_remove_files(device);
- device_unregister(&device->dev);
+ if (device->remove)
+ device->remove(device);
+
+ device_del(&device->dev);
+ /*
+ * Transition the device to D3cold to drop the reference counts of all
+ * power resources the device depends on and turn off the ones that have
+ * no more references.
+ */
+ acpi_device_set_power(device, ACPI_STATE_D3_COLD);
+ device->handle = NULL;
+ put_device(&device->dev);
}
/* --------------------------------------------------------------------------
@@ -760,24 +848,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
return 0;
}
-static int acpi_start_single_object(struct acpi_device *device)
-{
- int result = 0;
- struct acpi_driver *driver;
-
-
- if (!(driver = device->driver))
- return 0;
-
- if (driver->ops.start) {
- result = driver->ops.start(device);
- if (result && driver->ops.remove)
- driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
- }
-
- return result;
-}
-
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
@@ -821,29 +891,23 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
-------------------------------------------------------------------------- */
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
{
+ struct acpi_device *device = NULL;
acpi_status status;
- int ret;
- struct acpi_device *device;
/*
* Fixed hardware devices do not appear in the namespace and do not
* have handles, but we fabricate acpi_devices for them, so we have
* to deal with them specially.
*/
- if (handle == NULL)
+ if (!handle)
return acpi_root;
do {
status = acpi_get_parent(handle, &handle);
- if (status == AE_NULL_ENTRY)
- return NULL;
if (ACPI_FAILURE(status))
- return acpi_root;
-
- ret = acpi_bus_get_device(handle, &device);
- if (ret == 0)
- return device;
- } while (1);
+ return status == AE_NULL_ENTRY ? NULL : acpi_root;
+ } while (acpi_bus_get_device(handle, &device));
+ return device;
}
acpi_status
@@ -877,52 +941,43 @@ void acpi_bus_data_handler(acpi_handle handle, void *context)
return;
}
-static int acpi_bus_get_perf_flags(struct acpi_device *device)
-{
- device->performance.state = ACPI_STATE_UNKNOWN;
- return 0;
-}
-
-static acpi_status
-acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
- struct acpi_device_wakeup *wakeup)
+static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
+ struct acpi_device_wakeup *wakeup)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
- int i = 0;
+ int err = -ENODATA;
if (!wakeup)
- return AE_BAD_PARAMETER;
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
- return status;
+ return err;
}
package = (union acpi_object *)buffer.pointer;
- if (!package || (package->package.count < 2)) {
- status = AE_BAD_DATA;
+ if (!package || package->package.count < 2)
goto out;
- }
element = &(package->package.elements[0]);
- if (!element) {
- status = AE_BAD_DATA;
+ if (!element)
goto out;
- }
+
if (element->type == ACPI_TYPE_PACKAGE) {
if ((element->package.count < 2) ||
(element->package.elements[0].type !=
ACPI_TYPE_LOCAL_REFERENCE)
- || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) {
- status = AE_BAD_DATA;
+ || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
goto out;
- }
+
wakeup->gpe_device =
element->package.elements[0].reference.handle;
wakeup->gpe_number =
@@ -931,38 +986,35 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
wakeup->gpe_device = NULL;
wakeup->gpe_number = element->integer.value;
} else {
- status = AE_BAD_DATA;
goto out;
}
element = &(package->package.elements[1]);
- if (element->type != ACPI_TYPE_INTEGER) {
- status = AE_BAD_DATA;
+ if (element->type != ACPI_TYPE_INTEGER)
goto out;
- }
+
wakeup->sleep_state = element->integer.value;
- if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
- status = AE_NO_MEMORY;
+ err = acpi_extract_power_resources(package, 2, &wakeup->resources);
+ if (err)
goto out;
- }
- wakeup->resources.count = package->package.count - 2;
- for (i = 0; i < wakeup->resources.count; i++) {
- element = &(package->package.elements[i + 2]);
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
- status = AE_BAD_DATA;
- goto out;
- }
- wakeup->resources.handles[i] = element->reference.handle;
- }
+ if (!list_empty(&wakeup->resources)) {
+ int sleep_state;
+ sleep_state = acpi_power_min_system_level(&wakeup->resources);
+ if (sleep_state < wakeup->sleep_state) {
+ acpi_handle_warn(handle, "Overriding _PRW sleep state "
+ "(S%d) by S%d from power resources\n",
+ (int)wakeup->sleep_state, sleep_state);
+ wakeup->sleep_state = sleep_state;
+ }
+ }
acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
out:
kfree(buffer.pointer);
-
- return status;
+ return err;
}
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
@@ -1002,17 +1054,17 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
acpi_handle temp;
acpi_status status = 0;
- int psw_error;
+ int err;
/* Presence of _PRW indicates wake capable */
status = acpi_get_handle(device->handle, "_PRW", &temp);
if (ACPI_FAILURE(status))
return;
- status = acpi_bus_extract_wakeup_device_power_package(device->handle,
- &device->wakeup);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
+ err = acpi_bus_extract_wakeup_device_power_package(device->handle,
+ &device->wakeup);
+ if (err) {
+ dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
return;
}
@@ -1025,20 +1077,73 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
- psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
- if (psw_error)
+ err = acpi_device_sleep_wake(device, 0, 0, 0);
+ if (err)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"error in _DSW or _PSW evaluation\n"));
}
-static void acpi_bus_add_power_resource(acpi_handle handle);
+static void acpi_bus_init_power_state(struct acpi_device *device, int state)
+{
+ struct acpi_device_power_state *ps = &device->power.states[state];
+ char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle;
+ acpi_status status;
+
+ INIT_LIST_HEAD(&ps->resources);
+
+ /* Evaluate "_PRx" to get referenced power resources */
+ status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *package = buffer.pointer;
+
+ if (buffer.length && package
+ && package->type == ACPI_TYPE_PACKAGE
+ && package->package.count) {
+ int err = acpi_extract_power_resources(package, 0,
+ &ps->resources);
+ if (!err)
+ device->power.flags.power_resources = 1;
+ }
+ ACPI_FREE(buffer.pointer);
+ }
+
+ /* Evaluate "_PSx" to see if we can do explicit sets */
+ pathname[2] = 'S';
+ status = acpi_get_handle(device->handle, pathname, &handle);
+ if (ACPI_SUCCESS(status))
+ ps->flags.explicit_set = 1;
+
+ /*
+ * State is valid if there are means to put the device into it.
+ * D3hot is only valid if _PR3 present.
+ */
+ if (!list_empty(&ps->resources)
+ || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) {
+ ps->flags.valid = 1;
+ ps->flags.os_accessible = 1;
+ }
+
+ ps->power = -1; /* Unknown - driver assigned */
+ ps->latency = -1; /* Unknown - driver assigned */
+}
-static int acpi_bus_get_power_flags(struct acpi_device *device)
+static void acpi_bus_get_power_flags(struct acpi_device *device)
{
- acpi_status status = 0;
- acpi_handle handle = NULL;
- u32 i = 0;
+ acpi_status status;
+ acpi_handle handle;
+ u32 i;
+
+ /* Presence of _PS0|_PR0 indicates 'power manageable' */
+ status = acpi_get_handle(device->handle, "_PS0", &handle);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_get_handle(device->handle, "_PR0", &handle);
+ if (ACPI_FAILURE(status))
+ return;
+ }
+ device->flags.power_manageable = 1;
/*
* Power Management Flags
@@ -1053,40 +1158,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
/*
* Enumerate supported power management states
*/
- for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
- struct acpi_device_power_state *ps = &device->power.states[i];
- char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
-
- /* Evaluate "_PRx" to se if power resources are referenced */
- acpi_evaluate_reference(device->handle, object_name, NULL,
- &ps->resources);
- if (ps->resources.count) {
- int j;
-
- device->power.flags.power_resources = 1;
- for (j = 0; j < ps->resources.count; j++)
- acpi_bus_add_power_resource(ps->resources.handles[j]);
- }
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
+ acpi_bus_init_power_state(device, i);
- /* Evaluate "_PSx" to see if we can do explicit sets */
- object_name[2] = 'S';
- status = acpi_get_handle(device->handle, object_name, &handle);
- if (ACPI_SUCCESS(status))
- ps->flags.explicit_set = 1;
-
- /*
- * State is valid if there are means to put the device into it.
- * D3hot is only valid if _PR3 present.
- */
- if (ps->resources.count ||
- (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
- ps->flags.valid = 1;
- ps->flags.os_accessible = 1;
- }
-
- ps->power = -1; /* Unknown - driver assigned */
- ps->latency = -1; /* Unknown - driver assigned */
- }
+ INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
/* Set defaults for D0 and D3 states (always valid) */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
@@ -1103,17 +1178,17 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- acpi_bus_init_power(device);
-
- return 0;
+ if (acpi_bus_init_power(device)) {
+ acpi_free_power_resources_lists(device);
+ device->flags.power_manageable = 0;
+ }
}
-static int acpi_bus_get_flags(struct acpi_device *device)
+static void acpi_bus_get_flags(struct acpi_device *device)
{
acpi_status status = AE_OK;
acpi_handle temp = NULL;
-
/* Presence of _STA indicates 'dynamic_status' */
status = acpi_get_handle(device->handle, "_STA", &temp);
if (ACPI_SUCCESS(status))
@@ -1133,21 +1208,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.ejectable = 1;
}
-
- /* Power resources cannot be power manageable. */
- if (device->device_type == ACPI_BUS_TYPE_POWER)
- return 0;
-
- /* Presence of _PS0|_PR0 indicates 'power manageable' */
- status = acpi_get_handle(device->handle, "_PS0", &temp);
- if (ACPI_FAILURE(status))
- status = acpi_get_handle(device->handle, "_PR0", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.power_manageable = 1;
-
- /* TBD: Performance management */
-
- return 0;
}
static void acpi_device_get_busid(struct acpi_device *device)
@@ -1346,7 +1406,7 @@ static void acpi_device_set_id(struct acpi_device *device)
acpi_add_id(device, ACPI_DOCK_HID);
else if (!acpi_ibm_smbus_match(device))
acpi_add_id(device, ACPI_SMBUS_IBM_HID);
- else if (!acpi_device_hid(device) &&
+ else if (list_empty(&device->pnp.ids) &&
ACPI_IS_ROOT_DEVICE(device->parent)) {
acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
@@ -1372,56 +1432,32 @@ static void acpi_device_set_id(struct acpi_device *device)
}
}
-static int acpi_device_set_context(struct acpi_device *device)
+void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ int type, unsigned long long sta)
{
- acpi_status status;
-
- /*
- * Context
- * -------
- * Attach this 'struct acpi_device' to the ACPI object. This makes
- * resolutions from handle->device very efficient. Fixed hardware
- * devices have no handles, so we skip them.
- */
- if (!device->handle)
- return 0;
-
- status = acpi_attach_data(device->handle,
- acpi_bus_data_handler, device);
- if (ACPI_SUCCESS(status))
- return 0;
-
- printk(KERN_ERR PREFIX "Error attaching device data\n");
- return -ENODEV;
+ INIT_LIST_HEAD(&device->pnp.ids);
+ device->device_type = type;
+ device->handle = handle;
+ device->parent = acpi_bus_get_parent(handle);
+ STRUCT_TO_INT(device->status) = sta;
+ acpi_device_get_busid(device);
+ acpi_device_set_id(device);
+ acpi_bus_get_flags(device);
+ device->flags.match_driver = false;
+ device_initialize(&device->dev);
+ dev_set_uevent_suppress(&device->dev, true);
}
-static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
+void acpi_device_add_finalize(struct acpi_device *device)
{
- if (!dev)
- return -EINVAL;
-
- dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
- device_release_driver(&dev->dev);
-
- if (!rmdevice)
- return 0;
-
- /*
- * unbind _ADR-Based Devices when hot removal
- */
- if (dev->flags.bus_address) {
- if ((dev->parent) && (dev->parent->ops.unbind))
- dev->parent->ops.unbind(dev);
- }
- acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
-
- return 0;
+ device->flags.match_driver = true;
+ dev_set_uevent_suppress(&device->dev, false);
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
}
static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type,
- unsigned long long sta,
- struct acpi_bus_ops *ops)
+ unsigned long long sta)
{
int result;
struct acpi_device *device;
@@ -1433,102 +1469,25 @@ static int acpi_add_single_object(struct acpi_device **child,
return -ENOMEM;
}
- INIT_LIST_HEAD(&device->pnp.ids);
- device->device_type = type;
- device->handle = handle;
- device->parent = acpi_bus_get_parent(handle);
- device->bus_ops = *ops; /* workround for not call .start */
- STRUCT_TO_INT(device->status) = sta;
-
- acpi_device_get_busid(device);
-
- /*
- * Flags
- * -----
- * Note that we only look for object handles -- cannot evaluate objects
- * until we know the device is present and properly initialized.
- */
- result = acpi_bus_get_flags(device);
- if (result)
- goto end;
-
- /*
- * Initialize Device
- * -----------------
- * TBD: Synch with Core's enumeration/initialization process.
- */
- acpi_device_set_id(device);
-
- /*
- * Power Management
- * ----------------
- */
- if (device->flags.power_manageable) {
- result = acpi_bus_get_power_flags(device);
- if (result)
- goto end;
- }
-
- /*
- * Wakeup device management
- *-----------------------
- */
+ acpi_init_device_object(device, handle, type, sta);
+ acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
- /*
- * Performance Management
- * ----------------------
- */
- if (device->flags.performance_manageable) {
- result = acpi_bus_get_perf_flags(device);
- if (result)
- goto end;
- }
-
- if ((result = acpi_device_set_context(device)))
- goto end;
-
- result = acpi_device_register(device);
-
- /*
- * Bind _ADR-Based Devices when hot add
- */
- if (device->flags.bus_address) {
- if (device->parent && device->parent->ops.bind)
- device->parent->ops.bind(device);
- }
-
-end:
- if (!result) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Adding %s [%s] parent %s\n", dev_name(&device->dev),
- (char *) buffer.pointer,
- device->parent ? dev_name(&device->parent->dev) :
- "(null)"));
- kfree(buffer.pointer);
- *child = device;
- } else
+ result = acpi_device_add(device, acpi_device_release);
+ if (result) {
acpi_device_release(&device->dev);
+ return result;
+ }
- return result;
-}
-
-#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
- ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
-
-static void acpi_bus_add_power_resource(acpi_handle handle)
-{
- struct acpi_bus_ops ops = {
- .acpi_op_add = 1,
- .acpi_op_start = 1,
- };
- struct acpi_device *device = NULL;
-
- acpi_bus_get_device(handle, &device);
- if (!device)
- acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
- ACPI_STA_DEFAULT, &ops);
+ acpi_power_add_remove_device(device, true);
+ acpi_device_add_finalize(device);
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
+ dev_name(&device->dev), (char *) buffer.pointer,
+ device->parent ? dev_name(&device->parent->dev) : "(null)"));
+ kfree(buffer.pointer);
+ *child = device;
+ return 0;
}
static int acpi_bus_type_and_status(acpi_handle handle, int *type,
@@ -1570,218 +1529,248 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return 0;
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
- void *context, void **return_value)
+static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **return_value)
{
- struct acpi_bus_ops *ops = context;
+ struct acpi_device *device = NULL;
int type;
unsigned long long sta;
- struct acpi_device *device;
acpi_status status;
int result;
+ acpi_bus_get_device(handle, &device);
+ if (device)
+ goto out;
+
result = acpi_bus_type_and_status(handle, &type, &sta);
if (result)
return AE_OK;
+ if (type == ACPI_BUS_TYPE_POWER) {
+ acpi_add_power_resource(handle);
+ return AE_OK;
+ }
+
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
struct acpi_device_wakeup wakeup;
acpi_handle temp;
status = acpi_get_handle(handle, "_PRW", &temp);
- if (ACPI_SUCCESS(status))
+ if (ACPI_SUCCESS(status)) {
acpi_bus_extract_wakeup_device_power_package(handle,
&wakeup);
+ acpi_power_resources_list_free(&wakeup.resources);
+ }
return AE_CTRL_DEPTH;
}
- /*
- * We may already have an acpi_device from a previous enumeration. If
- * so, we needn't add it again, but we may still have to start it.
- */
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (ops->acpi_op_add && !device) {
- acpi_add_single_object(&device, handle, type, sta, ops);
- /* Is the device a known good platform device? */
- if (device
- && !acpi_match_device_ids(device, acpi_platform_device_ids))
- acpi_create_platform_device(device);
- }
-
+ acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
- if (ops->acpi_op_start && !(ops->acpi_op_add)) {
- status = acpi_start_single_object(device);
- if (ACPI_FAILURE(status))
- return AE_CTRL_DEPTH;
- }
-
+ out:
if (!*return_value)
*return_value = device;
+
return AE_OK;
}
-static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
- struct acpi_device **child)
+static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id)
{
- acpi_status status;
- void *device = NULL;
+ struct acpi_scan_handler *handler;
- status = acpi_bus_check_add(handle, 0, ops, &device);
- if (ACPI_SUCCESS(status))
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_check_add, NULL, ops, &device);
+ list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) {
+ const struct acpi_device_id *devid;
- if (child)
- *child = device;
+ for (devid = handler->ids; devid->id[0]; devid++) {
+ int ret;
- if (device)
- return 0;
- else
- return -ENODEV;
-}
+ if (strcmp((char *)devid->id, id))
+ continue;
-/*
- * acpi_bus_add and acpi_bus_start
- *
- * scan a given ACPI tree and (probably recently hot-plugged)
- * create and add or starts found devices.
- *
- * If no devices were found -ENODEV is returned which does not
- * mean that this is a real error, there just have been no suitable
- * ACPI objects in the table trunk from which the kernel could create
- * a device and add/start an appropriate driver.
- */
+ ret = handler->attach(device, devid);
+ if (ret > 0) {
+ device->handler = handler;
+ return ret;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
-int
-acpi_bus_add(struct acpi_device **child,
- struct acpi_device *parent, acpi_handle handle, int type)
+static int acpi_scan_attach_handler(struct acpi_device *device)
{
- struct acpi_bus_ops ops;
+ struct acpi_hardware_id *hwid;
+ int ret = 0;
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
+ ret = acpi_scan_do_attach_handler(device, hwid->id);
+ if (ret)
+ break;
- return acpi_bus_scan(handle, &ops, child);
+ }
+ return ret;
}
-EXPORT_SYMBOL(acpi_bus_add);
-int acpi_bus_start(struct acpi_device *device)
+static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
{
- struct acpi_bus_ops ops;
- int result;
-
- if (!device)
- return -EINVAL;
+ struct acpi_device *device;
+ unsigned long long sta_not_used;
+ int ret;
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_start = 1;
+ /*
+ * Ignore errors ignored by acpi_bus_check_add() to avoid terminating
+ * namespace walks prematurely.
+ */
+ if (acpi_bus_type_and_status(handle, &ret, &sta_not_used))
+ return AE_OK;
- result = acpi_bus_scan(device->handle, &ops, NULL);
+ if (acpi_bus_get_device(handle, &device))
+ return AE_CTRL_DEPTH;
- acpi_update_all_gpes();
+ ret = acpi_scan_attach_handler(device);
+ if (ret)
+ return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
- return result;
+ ret = device_attach(&device->dev);
+ return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
}
-EXPORT_SYMBOL(acpi_bus_start);
-int acpi_bus_trim(struct acpi_device *start, int rmdevice)
+/**
+ * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
+ * @handle: Root of the namespace scope to scan.
+ *
+ * Scan a given ACPI tree (probably recently hot-plugged) and create and add
+ * found devices.
+ *
+ * If no devices were found, -ENODEV is returned, but it does not mean that
+ * there has been a real error. There just have been no suitable ACPI objects
+ * in the table trunk from which the kernel could create a device and add an
+ * appropriate driver.
+ *
+ * Must be called under acpi_scan_lock.
+ */
+int acpi_bus_scan(acpi_handle handle)
{
- acpi_status status;
- struct acpi_device *parent, *child;
- acpi_handle phandle, chandle;
- acpi_object_type type;
- u32 level = 1;
- int err = 0;
+ void *device = NULL;
+ int error = 0;
- parent = start;
- phandle = start->handle;
- child = chandle = NULL;
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add, NULL, NULL, &device);
- while ((level > 0) && parent && (!err)) {
- status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
- chandle, &chandle);
+ if (!device)
+ error = -ENODEV;
+ else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_device_attach, NULL, NULL, NULL);
- /*
- * If this scope is exhausted then move our way back up.
- */
- if (ACPI_FAILURE(status)) {
- level--;
- chandle = phandle;
- acpi_get_parent(phandle, &phandle);
- child = parent;
- parent = parent->parent;
-
- if (level == 0)
- err = acpi_bus_remove(child, rmdevice);
- else
- err = acpi_bus_remove(child, 1);
+ return error;
+}
+EXPORT_SYMBOL(acpi_bus_scan);
- continue;
- }
+static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
+{
+ struct acpi_device *device = NULL;
- status = acpi_get_type(chandle, &type);
- if (ACPI_FAILURE(status)) {
- continue;
- }
- /*
- * If there is a device corresponding to chandle then
- * parse it (depth-first).
- */
- if (acpi_bus_get_device(chandle, &child) == 0) {
- level++;
- phandle = chandle;
- chandle = NULL;
- parent = child;
+ if (!acpi_bus_get_device(handle, &device)) {
+ struct acpi_scan_handler *dev_handler = device->handler;
+
+ device->removal_type = ACPI_BUS_REMOVAL_EJECT;
+ if (dev_handler) {
+ if (dev_handler->detach)
+ dev_handler->detach(device);
+
+ device->handler = NULL;
+ } else {
+ device_release_driver(&device->dev);
}
- continue;
}
- return err;
+ return AE_OK;
+}
+
+static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
+{
+ struct acpi_device *device = NULL;
+
+ if (!acpi_bus_get_device(handle, &device))
+ acpi_device_unregister(device);
+
+ return AE_OK;
+}
+
+/**
+ * acpi_bus_trim - Remove ACPI device node and all of its descendants
+ * @start: Root of the ACPI device nodes subtree to remove.
+ *
+ * Must be called under acpi_scan_lock.
+ */
+void acpi_bus_trim(struct acpi_device *start)
+{
+ /*
+ * Execute acpi_bus_device_detach() as a post-order callback to detach
+ * all ACPI drivers from the device nodes being removed.
+ */
+ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
+ acpi_bus_device_detach, NULL, NULL);
+ acpi_bus_device_detach(start->handle, 0, NULL, NULL);
+ /*
+ * Execute acpi_bus_remove() as a post-order callback to remove device
+ * nodes in the given namespace scope.
+ */
+ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
+ acpi_bus_remove, NULL, NULL);
+ acpi_bus_remove(start->handle, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
static int acpi_bus_scan_fixed(void)
{
int result = 0;
- struct acpi_device *device = NULL;
- struct acpi_bus_ops ops;
-
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
- ops.acpi_op_start = 1;
/*
* Enumerate all fixed-feature devices.
*/
- if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
+ struct acpi_device *device = NULL;
+
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_POWER_BUTTON,
- ACPI_STA_DEFAULT,
- &ops);
+ ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ result = device_attach(&device->dev);
+ if (result < 0)
+ return result;
+
device_init_wakeup(&device->dev, true);
}
- if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
+ struct acpi_device *device = NULL;
+
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_SLEEP_BUTTON,
- ACPI_STA_DEFAULT,
- &ops);
+ ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ result = device_attach(&device->dev);
}
- return result;
+ return result < 0 ? result : 0;
}
int __init acpi_scan_init(void)
{
int result;
- struct acpi_bus_ops ops;
-
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
- ops.acpi_op_start = 1;
result = bus_register(&acpi_bus_type);
if (result) {
@@ -1789,20 +1778,33 @@ int __init acpi_scan_init(void)
printk(KERN_ERR PREFIX "Could not register bus type\n");
}
- acpi_power_init();
+ acpi_pci_root_init();
+ acpi_pci_link_init();
+ acpi_platform_init();
+ acpi_csrt_init();
+ acpi_container_init();
+ mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
*/
- result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
-
- if (!result)
- result = acpi_bus_scan_fixed();
+ result = acpi_bus_scan(ACPI_ROOT_OBJECT);
+ if (result)
+ goto out;
+ result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
if (result)
- acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
- else
- acpi_update_all_gpes();
+ goto out;
+
+ result = acpi_bus_scan_fixed();
+ if (result) {
+ acpi_device_unregister(acpi_root);
+ goto out;
+ }
+ acpi_update_all_gpes();
+
+ out:
+ mutex_unlock(&acpi_scan_lock);
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2fcc67d34b11..6d3a06a629a1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW41E_H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
@@ -386,6 +394,8 @@ static void acpi_pm_finish(void)
acpi_target_sleep_state = ACPI_STATE_S0;
+ acpi_resume_power_resources();
+
/* If we were woken with the fixed power button, provide a small
* hint to userspace in the form of a wakeup event on the fixed power
* button device (if it can be found).
@@ -577,7 +587,28 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
-#endif /* CONFIG_SUSPEND */
+
+static void acpi_sleep_suspend_setup(void)
+{
+ int i;
+
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_states[i] = 1;
+ pr_cont(" S%d", i);
+ }
+ }
+
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
+}
+#else /* !CONFIG_SUSPEND */
+static inline void acpi_sleep_suspend_setup(void) {}
+#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
@@ -698,7 +729,30 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
.restore_cleanup = acpi_pm_thaw,
.recover = acpi_pm_finish,
};
-#endif /* CONFIG_HIBERNATION */
+
+static void acpi_sleep_hibernate_setup(void)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+ if (ACPI_FAILURE(status))
+ return;
+
+ hibernation_set_ops(old_suspend_ordering ?
+ &acpi_hibernation_ops_old : &acpi_hibernation_ops);
+ sleep_states[ACPI_STATE_S4] = 1;
+ pr_cont(KERN_CONT " S4");
+ if (nosigcheck)
+ return;
+
+ acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
+ if (facs)
+ s4_hardware_signature = facs->hardware_signature;
+}
+#else /* !CONFIG_HIBERNATION */
+static inline void acpi_sleep_hibernate_setup(void) {}
+#endif /* !CONFIG_HIBERNATION */
int acpi_suspend(u32 acpi_state)
{
@@ -734,9 +788,6 @@ int __init acpi_sleep_init(void)
{
acpi_status status;
u8 type_a, type_b;
-#ifdef CONFIG_SUSPEND
- int i = 0;
-#endif
if (acpi_disabled)
return 0;
@@ -744,45 +795,19 @@ int __init acpi_sleep_init(void)
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
- printk(KERN_INFO PREFIX "(supports S0");
-
-#ifdef CONFIG_SUSPEND
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
- sleep_states[i] = 1;
- printk(KERN_CONT " S%d", i);
- }
- }
+ pr_info(PREFIX "(supports S0");
- suspend_set_ops(old_suspend_ordering ?
- &acpi_suspend_ops_old : &acpi_suspend_ops);
-#endif
+ acpi_sleep_suspend_setup();
+ acpi_sleep_hibernate_setup();
-#ifdef CONFIG_HIBERNATION
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
- hibernation_set_ops(old_suspend_ordering ?
- &acpi_hibernation_ops_old : &acpi_hibernation_ops);
- sleep_states[ACPI_STATE_S4] = 1;
- printk(KERN_CONT " S4");
- if (!nosigcheck) {
- acpi_get_table(ACPI_SIG_FACS, 1,
- (struct acpi_table_header **)&facs);
- if (facs)
- s4_hardware_signature =
- facs->hardware_signature;
- }
- }
-#endif
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[ACPI_STATE_S5] = 1;
- printk(KERN_CONT " S5");
+ pr_cont(" S5");
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
}
- printk(KERN_CONT ")\n");
+ pr_cont(")\n");
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5.
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 74d59c8f4678..0143540a2519 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -6,3 +6,5 @@ extern void acpi_disable_wakeup_devices(u8 sleep_state);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
+
+extern void acpi_resume_power_resources(void);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index ea61ca9129cd..41c0504470db 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -498,7 +498,7 @@ static int get_status(u32 index, acpi_event_status *status,
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
- "Invalid GPE 0x%x\n", index));
+ "Invalid GPE 0x%x", index));
goto end;
}
result = acpi_get_gpe_status(*handle, index, status);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2572d9715bda..d67a1fe07f0e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -204,7 +204,7 @@ int __init
acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
- acpi_table_entry_handler handler,
+ acpi_tbl_entry_handler handler,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
@@ -269,7 +269,7 @@ err:
int __init
acpi_table_parse_madt(enum acpi_madt_type id,
- acpi_table_entry_handler handler, unsigned int max_entries)
+ acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
sizeof(struct acpi_table_madt), id,
@@ -285,7 +285,7 @@ acpi_table_parse_madt(enum acpi_madt_type id,
* Scan the ACPI System Descriptor Table (STD) for a table matching @id,
* run @handler on it. Return 0 if table found, return on if not.
*/
-int __init acpi_table_parse(char *id, acpi_table_handler handler)
+int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 506fbd4b5733..8470771e5eae 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -97,7 +97,7 @@ module_param(psv, int, 0644);
MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static int acpi_thermal_add(struct acpi_device *device);
-static int acpi_thermal_remove(struct acpi_device *device, int type);
+static int acpi_thermal_remove(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id thermal_device_ids[] = {
@@ -288,7 +288,7 @@ do { \
if (flags != ACPI_TRIPS_INIT) \
ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
"ACPI thermal trip point %s changed\n" \
- "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \
+ "Please send acpidump to linux-acpi@vger.kernel.org", str)); \
} while (0)
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
@@ -531,6 +531,10 @@ static void acpi_thermal_check(void *data)
{
struct acpi_thermal *tz = data;
+ if (!tz->tz_enabled) {
+ pr_warn("thermal zone is disabled \n");
+ return;
+ }
thermal_zone_device_update(tz->thermal_zone);
}
@@ -1111,7 +1115,7 @@ end:
return result;
}
-static int acpi_thermal_remove(struct acpi_device *device, int type)
+static int acpi_thermal_remove(struct acpi_device *device)
{
struct acpi_thermal *tz = NULL;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ac9a69cd45f5..313f959413dc 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,7 @@ module_param(use_bios_initial_backlight, bool, 0644);
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
-static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id video_device_ids[] = {
@@ -673,7 +673,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > 2)
- ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package\n"));
+ ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package"));
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[2]) {
@@ -682,7 +682,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
ACPI_ERROR((AE_INFO,
- "Found unordered _BCL package\n"));
+ "Found unordered _BCL package"));
br->count = count;
device->brightness = br;
@@ -1740,7 +1740,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
return error;
}
-static int acpi_video_bus_remove(struct acpi_device *device, int type)
+static int acpi_video_bus_remove(struct acpi_device *device)
{
struct acpi_video_bus *video = NULL;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a2fc56d2e681..cdbad3a454a0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -45,7 +45,6 @@ static int amba_match(struct device *dev, struct device_driver *drv)
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-#ifdef CONFIG_HOTPLUG
static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct amba_device *pcdev = to_amba_device(dev);
@@ -58,9 +57,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
-#else
-#define amba_uevent NULL
-#endif
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index bd5de08ad6fd..ab92785f54dc 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -157,6 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
+#ifdef CONFIG_PM_SLEEP
static int tegra_ahb_suspend(struct device *dev)
{
int i;
@@ -176,6 +178,7 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
+#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
@@ -241,7 +244,7 @@ static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
}
-static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+static int tegra_ahb_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_ahb *ahb;
@@ -255,9 +258,9 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!ahb->regs)
- return -EBUSY;
+ ahb->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ahb->regs))
+ return PTR_ERR(ahb->regs);
ahb->dev = &pdev->dev;
platform_set_drvdata(pdev, ahb);
@@ -265,7 +268,7 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+static const struct of_device_id tegra_ahb_of_match[] = {
{ .compatible = "nvidia,tegra30-ahb", },
{ .compatible = "nvidia,tegra20-ahb", },
{},
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e08d322d01d7..4cfb7200260d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@ menuconfig ATA
tristate "Serial ATA and Parallel ATA drivers"
depends on HAS_IOMEM
depends on BLOCK
- depends on !(M32R || M68K) || BROKEN
+ depends on !(M32R || M68K || S390) || BROKEN
select SCSI
---help---
If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
@@ -163,7 +163,7 @@ config SATA_QSTOR
config SATA_SX4
tristate "Promise SATA SX4 support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for Promise Serial ATA SX4.
@@ -390,7 +390,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+ depends on PCI && X86 && !X86_64
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -408,7 +408,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the Cypress/Contaq CY82C693
chipset found in some Alpha systems
@@ -496,7 +496,7 @@ config PATA_IMX
config PATA_IT8213
tristate "IT8213 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the ITE 821 PATA
controllers via the new ATA layer.
@@ -589,7 +589,7 @@ config PATA_OLDPIIX
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
@@ -616,7 +616,7 @@ config PATA_PDC_OLD
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the RADISYS 82600
PATA controllers via the new ATA layer
@@ -687,7 +687,7 @@ config PATA_SIS
config PATA_TOSHIBA
tristate "Toshiba Piccolo support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
Support for the Toshiba Piccolo controllers. Currently only the
primary channel is supported by this driver.
@@ -738,7 +738,7 @@ comment "PIO-only SFF controllers"
config PATA_AT32
tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
+ depends on AVR32 && PLATFORM_AT32AP
help
This option enables support for the IDE devices on the
Atmel AT32AP platform.
@@ -755,7 +755,7 @@ config PATA_AT91
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the CMD640 PCI IDE
interface chip. Only the primary channel is currently
@@ -801,7 +801,7 @@ config PATA_NS87410
config PATA_OPTI
tristate "OPTI621/6215 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables full PIO support for the early Opti ATA
controllers found on some old motherboards.
@@ -881,7 +881,7 @@ config PATA_SAMSUNG_CF
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
- depends on ISA && EXPERIMENTAL
+ depends on ISA
select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
@@ -909,7 +909,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI) && EXPERIMENTAL
+ depends on (ISA || PCI)
help
This option enables support for ISA/VLB/PCI bus legacy PATA
ports and allows them to be accessed via the new ATA layer.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7862d17976b7..495aeed26779 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
enum {
AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_ENMOTUS = 2,
AHCI_PCI_BAR_STANDARD = 5,
};
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1057,6 +1061,86 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
+int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+ int rc;
+ unsigned int maxvec;
+
+ if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
+ rc = pci_enable_msi_block_auto(pdev, &maxvec);
+ if (rc > 0) {
+ if ((rc == maxvec) || (rc == 1))
+ return rc;
+ /*
+ * Assume that advantage of multipe MSIs is negated,
+ * so fallback to single MSI mode to save resources
+ */
+ pci_disable_msi(pdev);
+ if (!pci_enable_msi(pdev))
+ return 1;
+ }
+ }
+
+ pci_intx(pdev, 1);
+ return 0;
+}
+
+/**
+ * ahci_host_activate - start AHCI host, request IRQs and register it
+ * @host: target ATA host
+ * @irq: base IRQ number to request
+ * @n_msis: number of MSIs allocated for this host
+ * @irq_handler: irq_handler used when requesting IRQs
+ * @irq_flags: irq_flags used when requesting IRQs
+ *
+ * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
+ * when multiple MSIs were allocated. That is one MSI per port, starting
+ * from @irq.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
+{
+ int i, rc;
+
+ /* Sharing Last Message among several ports is not supported */
+ if (n_msis < host->n_ports)
+ return -EINVAL;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+ rc = devm_request_threaded_irq(host->dev,
+ irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
+ dev_driver_string(host->dev), host->ports[i]);
+ if (rc)
+ goto out_free_irqs;
+ }
+
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_desc(host->ports[i], "irq %d", irq + i);
+
+ rc = ata_host_register(host, &ahci_sht);
+ if (rc)
+ goto out_free_all_irqs;
+
+ return 0;
+
+out_free_all_irqs:
+ i = host->n_ports;
+out_free_irqs:
+ for (i--; i >= 0; i--)
+ devm_free_irq(host->dev, irq + i, host->ports[i]);
+
+ return rc;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1065,7 +1149,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
- int n_ports, i, rc;
+ int n_ports, n_msis, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
@@ -1098,9 +1182,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
- /* The Connext uses non-standard BAR */
+ /* Both Connext and Enmotus devices use non-standard BARs */
if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1150,11 +1236,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
- if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
- pci_intx(pdev, 1);
-
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ n_msis = ahci_init_interrupts(pdev, hpriv);
+ if (n_msis > 1)
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1250,6 +1337,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_print_info(host);
pci_set_master(pdev);
+
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
+ return ahci_host_activate(host, pdev->irq, n_msis);
+
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
&ahci_sht);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 9be471200a07..b830e6c9fe49 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -231,6 +231,7 @@ enum {
AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
port start (wait until
error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
/* ap->flags bits */
@@ -297,6 +298,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_d2h:1;
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
+ u32 intr_status; /* interrupts to handle */
+ spinlock_t lock; /* protects parent ata_port */
u32 intr_mask; /* interrupts to enable */
bool fbs_supported; /* set iff FBS is supported */
bool fbs_enabled; /* set iff FBS is enabled */
@@ -359,7 +362,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index b7078afddb74..7a8a2841fe64 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -25,6 +25,8 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static void ahci_host_stop(struct ata_host *host);
+
enum ahci_type {
AHCI, /* standard platform ahci */
IMX53_AHCI, /* ahci on i.mx53 */
@@ -47,6 +49,15 @@ static struct platform_device_id ahci_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, ahci_devtype);
+static struct ata_port_operations ahci_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = ahci_host_stop,
+};
+
+static struct ata_port_operations ahci_platform_retry_srst_ops = {
+ .inherits = &ahci_pmp_retry_srst_ops,
+ .host_stop = ahci_host_stop,
+};
static const struct ata_port_info ahci_port_info[] = {
/* by features */
@@ -54,20 +65,20 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
[IMX53_AHCI] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_pmp_retry_srst_ops,
+ .port_ops = &ahci_platform_retry_srst_ops,
},
[STRICT_AHCI] = {
AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
};
@@ -75,7 +86,7 @@ static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
-static int __init ahci_probe(struct platform_device *pdev)
+static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
@@ -218,15 +229,12 @@ free_clk:
return rc;
}
-static int __devexit ahci_remove(struct platform_device *pdev)
+static void ahci_host_stop(struct ata_host *host)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = host->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
- ata_host_detach(host);
-
if (pdata && pdata->exit)
pdata->exit(dev);
@@ -234,8 +242,6 @@ static int __devexit ahci_remove(struct platform_device *pdev)
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -317,7 +323,7 @@ disable_unprepare_clk:
}
#endif
-SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
@@ -326,7 +332,8 @@ static const struct of_device_id ahci_of_match[] = {
MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver ahci_driver = {
- .remove = __devexit_p(ahci_remove),
+ .probe = ahci_probe,
+ .remove = ata_platform_remove_one,
.driver = {
.name = "ahci",
.owner = THIS_MODULE,
@@ -335,18 +342,7 @@ static struct platform_driver ahci_driver = {
},
.id_table = ahci_devtype,
};
-
-static int __init ahci_init(void)
-{
- return platform_driver_probe(&ahci_driver, ahci_probe);
-}
-module_init(ahci_init);
-
-static void __exit ahci_exit(void)
-{
- platform_driver_unregister(&ahci_driver);
-}
-module_exit(ahci_exit);
+module_platform_driver(ahci_driver);
MODULE_DESCRIPTION("AHCI SATA platform driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ef773e12af79..174eca609b42 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -164,28 +164,6 @@ struct piix_host_priv {
void __iomem *sidpr;
};
-static int piix_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void piix_remove_one(struct pci_dev *pdev);
-static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
-static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
-static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static int ich_pata_cable_detect(struct ata_port *ap);
-static u8 piix_vmw_bmdma_status(struct ata_port *ap);
-static int piix_sidpr_scr_read(struct ata_link *link,
- unsigned int reg, u32 *val);
-static int piix_sidpr_scr_write(struct ata_link *link,
- unsigned int reg, u32 val);
-static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- unsigned hints);
-static bool piix_irq_check(struct ata_port *ap);
-static int piix_port_start(struct ata_port *ap);
-#ifdef CONFIG_PM
-static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int piix_pci_device_resume(struct pci_dev *pdev);
-#endif
-
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
@@ -342,64 +320,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ } /* terminate list */
};
-static struct pci_driver piix_pci_driver = {
- .name = DRV_NAME,
- .id_table = piix_pci_tbl,
- .probe = piix_init_one,
- .remove = piix_remove_one,
-#ifdef CONFIG_PM
- .suspend = piix_pci_device_suspend,
- .resume = piix_pci_device_resume,
-#endif
-};
-
-static struct scsi_host_template piix_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations piix_sata_ops = {
- .inherits = &ata_bmdma32_port_ops,
- .sff_irq_check = piix_irq_check,
- .port_start = piix_port_start,
-};
-
-static struct ata_port_operations piix_pata_ops = {
- .inherits = &piix_sata_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = piix_set_piomode,
- .set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
-};
-
-static struct ata_port_operations piix_vmw_ops = {
- .inherits = &piix_pata_ops,
- .bmdma_status = piix_vmw_bmdma_status,
-};
-
-static struct ata_port_operations ich_pata_ops = {
- .inherits = &piix_pata_ops,
- .cable_detect = ich_pata_cable_detect,
- .set_dmamode = ich_set_dmamode,
-};
-
-static struct device_attribute *piix_sidpr_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- NULL
-};
-
-static struct scsi_host_template piix_sidpr_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
- .shost_attrs = piix_sidpr_shost_attrs,
-};
-
-static struct ata_port_operations piix_sidpr_sata_ops = {
- .inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
- .scr_read = piix_sidpr_scr_read,
- .scr_write = piix_sidpr_scr_write,
- .set_lpm = piix_sidpr_set_lpm,
-};
-
static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
.port_enable = 0x3,
@@ -504,147 +424,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_sata_snb] = &ich8_map_db,
};
-static struct ata_port_info piix_port_info[] = {
- [piix_pata_mwdma] = /* PIIX3 MWDMA only */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .port_ops = &piix_pata_ops,
- },
-
- [piix_pata_33] = /* PIIX4 at 33MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_pata_ops,
- },
-
- [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
- .udma_mask = ATA_UDMA2,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_66] = /* ICH controllers up to 66MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
- .udma_mask = ATA_UDMA4,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100_nomwdma1] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich5_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6m_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_2port_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [tolapai_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8m_apple_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [piix_pata_vmw] =
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_vmw_ops,
- },
-
- /*
- * some Sandybridge chipsets have broken 32 mode up to now,
- * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
- */
- [ich8_sata_snb] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
-};
-
static struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
@@ -1261,6 +1040,193 @@ static u8 piix_vmw_bmdma_status(struct ata_port *ap)
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
+static struct scsi_host_template piix_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_irq_check = piix_irq_check,
+ .port_start = piix_port_start,
+};
+
+static struct ata_port_operations piix_pata_ops = {
+ .inherits = &piix_sata_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+ .set_dmamode = piix_set_dmamode,
+ .prereset = piix_pata_prereset,
+};
+
+static struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+};
+
+static struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+};
+
+static struct device_attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ NULL
+};
+
+static struct scsi_host_template piix_sidpr_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .shost_attrs = piix_sidpr_shost_attrs,
+};
+
+static struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+ .scr_write = piix_sidpr_scr_write,
+ .set_lpm = piix_sidpr_set_lpm,
+};
+
+static struct ata_port_info piix_port_info[] = {
+ [piix_pata_mwdma] = /* PIIX3 MWDMA only */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .port_ops = &piix_pata_ops,
+ },
+
+ [piix_pata_33] = /* PIIX4 at 33MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_pata_ops,
+ },
+
+ [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_66] = /* ICH controllers up to 66MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100_nomwdma1] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich5_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6m_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_2port_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [tolapai_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8m_apple_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [piix_pata_vmw] =
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_vmw_ops,
+ },
+
+ /*
+ * some Sandybridge chipsets have broken 32 mode up to now,
+ * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+ */
+ [ich8_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+};
+
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
@@ -1304,7 +1270,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
* they are found return an error code so we can turn off DMA
*/
-static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
+static int piix_check_450nx_errata(struct pci_dev *ata_dev)
{
struct pci_dev *pdev = NULL;
u16 cfg;
@@ -1330,8 +1296,8 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
return no_piix_dma;
}
-static void __devinit piix_init_pcs(struct ata_host *host,
- const struct piix_map_db *map_db)
+static void piix_init_pcs(struct ata_host *host,
+ const struct piix_map_db *map_db)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 pcs, new_pcs;
@@ -1347,9 +1313,9 @@ static void __devinit piix_init_pcs(struct ata_host *host,
}
}
-static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
- struct ata_port_info *pinfo,
- const struct piix_map_db *map_db)
+static const int *piix_init_sata_map(struct pci_dev *pdev,
+ struct ata_port_info *pinfo,
+ const struct piix_map_db *map_db)
{
const int *map;
int i, invalid_map = 0;
@@ -1426,7 +1392,7 @@ static bool piix_no_sidpr(struct ata_host *host)
return false;
}
-static int __devinit piix_init_sidpr(struct ata_host *host)
+static int piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
@@ -1585,12 +1551,31 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
},
{ } /* terminate list */
};
- const struct dmi_system_id *dmi = dmi_first_match(ignore_hyperv);
+ static const struct dmi_system_id allow_virtual_pc[] = {
+ {
+ /* In MS Virtual PC guests the DMI ident is nearly
+ * identical to a Hyper-V guest. One difference is the
+ * product version which is used here to identify
+ * a Virtual PC guest. This entry allows ata_piix to
+ * drive the emulated hardware.
+ */
+ .ident = "MS Virtual PC 2007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ },
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
+ const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
- if (dmi && prefer_ms_hyperv) {
+ if (ignore && !allow && prefer_ms_hyperv) {
host->flags |= ATA_HOST_IGNORE_ATA;
dev_info(host->dev, "%s detected, ATA device ignore set\n",
- dmi->ident);
+ ignore->ident);
}
#endif
}
@@ -1610,8 +1595,7 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
* Zero on success, or -ERRNO value.
*/
-static int __devinit piix_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
@@ -1727,6 +1711,17 @@ static void piix_remove_one(struct pci_dev *pdev)
ata_pci_remove_one(pdev);
}
+static struct pci_driver piix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = piix_remove_one,
+#ifdef CONFIG_PM
+ .suspend = piix_pci_device_suspend,
+ .resume = piix_pci_device_resume,
+#endif
+};
+
static int __init piix_init(void)
{
int rc;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4201e535a8c8..34c82167b962 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1384,7 +1384,7 @@ int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"applying PMP SRST workaround "
"and retrying\n");
rc = ahci_do_softreset(link, class, 0, deadline,
@@ -1655,19 +1655,16 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}
-static void ahci_port_intr(struct ata_port *ap)
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+ void __iomem *port_mmio, u32 status)
{
- void __iomem *port_mmio = ahci_port_base(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
+ u32 qc_active = 0;
int rc;
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
/* ignore BAD_PMP while resetting */
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
@@ -1743,6 +1740,107 @@ static void ahci_port_intr(struct ata_port *ap)
}
}
+void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+}
+
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
+{
+ struct ata_port *ap = dev_instance;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ status = pp->intr_status;
+ if (status)
+ pp->intr_status = 0;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ spin_lock_bh(ap->lock);
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+ spin_unlock_bh(ap->lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(ahci_thread_fn);
+
+void ahci_hw_port_interrupt(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ pp->intr_status |= status;
+}
+
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
+{
+ struct ata_port *ap_this = dev_instance;
+ struct ahci_port_priv *pp = ap_this->private_data;
+ struct ata_host *host = ap_this->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int i;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ spin_lock(&host->lock);
+
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+
+ if (!irq_stat) {
+ u32 status = pp->intr_status;
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return status ? IRQ_WAKE_THREAD : IRQ_NONE;
+ }
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_hw_port_interrupt(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_warn(host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+ }
+
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_WAKE_THREAD;
+}
+EXPORT_SYMBOL_GPL(ahci_hw_interrupt);
+
irqreturn_t ahci_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
@@ -1951,13 +2049,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
- if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+ if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
ATA_LOG_DEVSLP_VALID_MASK) {
- mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+ mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
ATA_LOG_DEVSLP_MDAT_MASK;
if (!mdat)
mdat = 10;
- deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+ deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
if (!deto)
deto = 20;
} else {
@@ -2196,6 +2294,14 @@ static int ahci_port_start(struct ata_port *ap)
*/
pp->intr_mask = DEF_PORT_IRQ;
+ /*
+ * Switch to per-port locking in case each port has its own MSI vector.
+ */
+ if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+ spin_lock_init(&pp->lock);
+ ap->lock = &pp->lock;
+ }
+
ap->private_data = pp;
/* engage engines, captain */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 5b0ba3f20edc..6fc67f7efb22 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -76,6 +76,9 @@ acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
acpi_integer adr;
struct ata_port *ap = dev->link->ap;
+ if (dev->flags & ATA_DFLAG_ACPI_DISABLED)
+ return NULL;
+
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
@@ -945,6 +948,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
return rc;
}
+ dev->flags |= ATA_DFLAG_ACPI_DISABLED;
ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
/* We can safely continue if no _GTF command has been executed
@@ -1025,30 +1029,20 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
acpi_handle handle;
- struct device *device;
handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- device = &sdev->sdev_gendev;
-
- acpi_power_resource_register_device(device, handle);
+ if (handle)
+ acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
}
static void ata_acpi_unregister_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
acpi_handle handle;
- struct device *device;
handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- device = &sdev->sdev_gendev;
-
- acpi_power_resource_unregister_device(device, handle);
+ if (handle)
+ acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
}
void ata_acpi_bind(struct ata_device *dev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f46fbd3bd3fb..46cd3f4c6aaa 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -67,6 +67,7 @@
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
#include "libata.h"
#include "libata-transport.h"
@@ -2324,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- /* check and mark DevSlp capability */
- if (ata_id_has_devslp(dev->id))
- dev->flags |= ATA_DFLAG_DEVSLP;
-
- /* Obtain SATA Settings page from Identify Device Data Log,
- * which contains DevSlp timing variables etc.
- * Exclude old devices with ata_id_has_ncq()
+ /* Check and mark DevSlp capability. Get DevSlp timing variables
+ * from SATA Settings page of Identify Device Data Log.
*/
- if (ata_id_has_ncq(dev->id)) {
+ if (ata_id_has_devslp(dev->id)) {
+ u8 sata_setting[ATA_SECT_SIZE];
+ int i, j;
+
+ dev->flags |= ATA_DFLAG_DEVSLP;
err_mask = ata_read_log_page(dev,
ATA_LOG_SATA_ID_DEV_DATA,
ATA_LOG_SATA_SETTINGS,
- dev->sata_settings,
+ sata_setting,
1);
if (err_mask)
ata_dev_dbg(dev,
"failed to get Identify Device Data, Emask 0x%x\n",
err_mask);
+ else
+ for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+ j = ATA_LOG_DEVSLP_OFFSET + i;
+ dev->devslp_timing[i] = sata_setting[j];
+ }
}
dev->cdb_len = 16;
@@ -2560,6 +2565,7 @@ int ata_bus_probe(struct ata_port *ap)
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
@@ -6286,8 +6292,7 @@ void ata_host_detach(struct ata_host *host)
*/
void ata_pci_remove_one(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
ata_host_detach(host);
}
@@ -6356,7 +6361,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc = 0;
rc = ata_host_suspend(host, mesg);
@@ -6370,7 +6375,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
int ata_pci_device_resume(struct pci_dev *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
@@ -6382,6 +6387,26 @@ int ata_pci_device_resume(struct pci_dev *pdev)
#endif /* CONFIG_PCI */
+/**
+ * ata_platform_remove_one - Platform layer callback for device removal
+ * @pdev: Platform device that was removed
+ *
+ * Platform layer indicates to libata via this hook that hot-unplug or
+ * module unload event has occurred. Detach all ports. Resource
+ * release is handled via devres.
+ *
+ * LOCKING:
+ * Inherited from platform layer (may sleep).
+ */
+int ata_platform_remove_one(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
@@ -6877,6 +6902,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+
EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e60437cd0d19..bcf4437214f5 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
*/
static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
{
- if (qc->flags & AC_ERR_MEDIA)
+ if (qc->err_mask & AC_ERR_MEDIA)
return 0; /* don't retry media errors */
if (qc->flags & ATA_QCFLAG_IO)
return 1; /* otherwise retry anything from fs stack */
@@ -2657,6 +2657,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a6df6a351d6e..7c337e754dab 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ if (atadev && ap->ops->sw_activity_show &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
enum sw_activity val;
int rc;
- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ if (atadev && ap->ops->sw_activity_store &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 371fd2c698b7..405022d302c3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -674,13 +674,16 @@ void arasan_cf_error_handler(struct ata_port *ap)
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
{
+ struct ata_queued_cmd *qc = acdev->qc;
+ struct ata_port *ap = qc->ap;
+ struct ata_taskfile *tf = &qc->tf;
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
- u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 write = tf->flags & ATA_TFLAG_WRITE;
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
writel(xfer_ctr, acdev->vbase + XFER_CTR);
- acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
+ ap->ops->sff_exec_command(ap, tf);
ata_sff_queue_work(&acdev->work);
}
@@ -788,7 +791,7 @@ static struct ata_port_operations arasan_cf_ops = {
.set_dmamode = arasan_cf_set_dmamode,
};
-static int __devinit arasan_cf_probe(struct platform_device *pdev)
+static int arasan_cf_probe(struct platform_device *pdev)
{
struct arasan_cf_dev *acdev;
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
@@ -902,7 +905,7 @@ free_clk:
return ret;
}
-static int __devexit arasan_cf_remove(struct platform_device *pdev)
+static int arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
@@ -952,7 +955,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove = __devexit_p(arasan_cf_remove),
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 2a96bb2c53ee..033f3f4c20ad 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -313,7 +313,7 @@ static struct ata_port_operations pata_at91_port_ops = {
.cable_detect = ata_cable_40wire,
};
-static int __devinit pata_at91_probe(struct platform_device *pdev)
+static int pata_at91_probe(struct platform_device *pdev)
{
struct at91_cf_data *board = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -420,7 +420,7 @@ err_put:
return ret;
}
-static int __devexit pata_at91_remove(struct platform_device *pdev)
+static int pata_at91_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct at91_ide_info *info;
@@ -441,7 +441,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
static struct platform_driver pata_at91_driver = {
.probe = pata_at91_probe,
- .remove = __devexit_p(pata_at91_remove),
+ .remove = pata_at91_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 1e65842e2ca7..8d43510c6bec 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1538,7 +1538,7 @@ static unsigned short atapi_io_port[] = {
* - IRQ (IORESOURCE_IRQ)
*
*/
-static int __devinit bfin_atapi_probe(struct platform_device *pdev)
+static int bfin_atapi_probe(struct platform_device *pdev)
{
int board_idx = 0;
struct resource *res;
@@ -1608,7 +1608,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
* A bfin atapi device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int __devexit bfin_atapi_remove(struct platform_device *pdev)
+static int bfin_atapi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
@@ -1654,7 +1654,7 @@ static int bfin_atapi_resume(struct platform_device *pdev)
static struct platform_driver bfin_atapi_driver = {
.probe = bfin_atapi_probe,
- .remove = __devexit_p(bfin_atapi_remove),
+ .remove = bfin_atapi_remove,
.suspend = bfin_atapi_suspend,
.resume = bfin_atapi_resume,
.driver = {
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7ba01415b676..2949cfc2dd31 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -474,14 +474,14 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, &reg);
if (!port_ok)
- dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
+ dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
+ dev_notice(&pdev->dev, "Primary port is disabled\n");
ppi[0] = &ata_dummy_port_info;
}
if (port_ok && !(reg & CNTRL_CH1)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
+ dev_notice(&pdev->dev, "Secondary port is disabled\n");
ppi[1] = &ata_dummy_port_info;
}
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index de74d804f031..bfcf377e8f77 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -115,7 +115,7 @@ static struct ata_port_operations cs5520_port_ops = {
.set_piomode = cs5520_set_piomode,
};
-static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index dec1b6c4b351..0448860a2077 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -38,6 +38,7 @@
#include <linux/delay.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86_32
#include <asm/msr.h>
@@ -80,6 +81,21 @@ enum {
IDE_ETC_UDMA_MASK = 0xc0,
};
+/* Some Bachmann OT200 devices have a non working UDMA support due a
+ * missing resistor.
+ */
+static const struct dmi_system_id udma_quirk_dmi_table[] = {
+ {
+ .ident = "Bachmann electronic OT200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OT200"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1")
+ },
+ },
+ { }
+};
+
static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
@@ -242,9 +258,23 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &cs5536_port_ops,
};
- const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ static const struct ata_port_info no_udma_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &cs5536_port_ops,
+ };
+
+
+ const struct ata_port_info *ppi[2];
u32 cfg;
+ if (dmi_check_system(udma_quirk_dmi_table))
+ ppi[0] = &no_udma_info;
+ else
+ ppi[0] = &info;
+
+ ppi[1] = &ata_dummy_port_info;
+
if (use_msr)
printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index e056406d6a11..c1bfaf43d109 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -31,6 +31,7 @@
* Copyright (C) 2006 Tower Technologies
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -822,8 +823,7 @@ static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
/* if link is ocuppied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
- ata_link_printk(al, KERN_ERR, "SRST failed (errno=%d)\n",
- rc);
+ ata_link_err(al, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -857,8 +857,7 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
/* Can become DEBUG later */
if (count)
- ata_port_printk(ap, KERN_DEBUG,
- "drained %d bytes to clear DRQ.\n", count);
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
}
@@ -912,7 +911,7 @@ static struct ata_port_operations ep93xx_pata_port_ops = {
.port_start = ep93xx_pata_port_start,
};
-static int __devinit ep93xx_pata_probe(struct platform_device *pdev)
+static int ep93xx_pata_probe(struct platform_device *pdev)
{
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
@@ -939,9 +938,9 @@ static int __devinit ep93xx_pata_probe(struct platform_device *pdev)
goto err_rel_gpio;
}
- ide_base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (!ide_base) {
- err = -ENXIO;
+ ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(ide_base)) {
+ err = PTR_ERR(ide_base);
goto err_rel_gpio;
}
@@ -1013,7 +1012,7 @@ err_rel_gpio:
return err;
}
-static int __devexit ep93xx_pata_remove(struct platform_device *pdev)
+static int ep93xx_pata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ep93xx_pata_data *drv_data = host->private_data;
@@ -1031,7 +1030,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_pata_probe,
- .remove = __devexit_p(ep93xx_pata_remove),
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 52e7e7b8c74f..d7c732042a4f 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -337,10 +337,9 @@ static struct ata_port_operations pata_icside_port_ops = {
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
-static void __devinit
-pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
- struct pata_icside_info *info,
- const struct portinfo *port)
+static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
+ struct pata_icside_info *info,
+ const struct portinfo *port)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
void __iomem *cmd = base + port->dataoffset;
@@ -368,7 +367,7 @@ pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
}
-static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
+static int pata_icside_register_v5(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
void __iomem *base;
@@ -391,7 +390,7 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
return 0;
}
-static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
+static int pata_icside_register_v6(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
struct expansion_card *ec = info->ec;
@@ -434,7 +433,7 @@ static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
return icside_dma_init(info);
}
-static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
+static int pata_icside_add_ports(struct pata_icside_info *info)
{
struct expansion_card *ec = info->ec;
struct ata_host *host;
@@ -474,8 +473,8 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
&pata_icside_sht);
}
-static int __devinit
-pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int pata_icside_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct pata_icside_state *state;
struct pata_icside_info info;
@@ -575,7 +574,7 @@ static void pata_icside_shutdown(struct expansion_card *ec)
}
}
-static void __devexit pata_icside_remove(struct expansion_card *ec)
+static void pata_icside_remove(struct expansion_card *ec)
{
struct ata_host *host = ecard_get_drvdata(ec);
struct pata_icside_state *state = host->private_data;
@@ -602,7 +601,7 @@ static const struct ecard_id pata_icside_ids[] = {
static struct ecard_driver pata_icside_driver = {
.probe = pata_icside_probe,
- .remove = __devexit_p(pata_icside_remove),
+ .remove = pata_icside_remove,
.shutdown = pata_icside_shutdown,
.id_table = pata_icside_ids,
.drv = {
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 87bb05b3cafc..40849445a9dc 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -60,7 +60,7 @@ static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
@@ -91,7 +91,7 @@ static void pata_imx_setup_port(struct ata_ioports *ioaddr)
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
}
-static int __devinit pata_imx_probe(struct platform_device *pdev)
+static int pata_imx_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -167,7 +167,7 @@ free_priv:
return -ENOMEM;
}
-static int __devexit pata_imx_remove(struct platform_device *pdev)
+static int pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct pata_imx_priv *priv = host->private_data;
@@ -225,7 +225,7 @@ static const struct dev_pm_ops pata_imx_pm_ops = {
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove = __devexit_p(pata_imx_remove),
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index badb1789a918..dcc6b243e525 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -137,7 +137,7 @@ static void ixp4xx_setup_port(struct ata_port *ap,
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
}
-static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
+static int ixp4xx_pata_probe(struct platform_device *pdev)
{
unsigned int irq;
struct resource *cs0, *cs1;
@@ -187,22 +187,13 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
}
-static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
-{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ixp4xx_pata_probe,
- .remove = __devexit_p(ixp4xx_pata_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index b057e3fa44bc..e5725edcf515 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -935,7 +935,7 @@ static struct ata_port_operations pata_macio_ops = {
.sff_irq_clear = pata_macio_irq_clear,
};
-static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
+static void pata_macio_invariants(struct pata_macio_priv *priv)
{
const int *bidp;
@@ -976,9 +976,8 @@ static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
priv->aapl_bus_id = 1;
}
-static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
- void __iomem * base,
- void __iomem * dma)
+static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
+ void __iomem * base, void __iomem * dma)
{
/* cmd_addr is the base of regs for that port */
ioaddr->cmd_addr = base;
@@ -999,8 +998,8 @@ static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
ioaddr->bmdma_addr = dma;
}
-static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
- struct ata_port_info *pinfo)
+static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
+ struct ata_port_info *pinfo)
{
int i = 0;
@@ -1027,11 +1026,11 @@ static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
}
-static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
- resource_size_t tfregs,
- resource_size_t dmaregs,
- resource_size_t fcregs,
- unsigned long irq)
+static int pata_macio_common_init(struct pata_macio_priv *priv,
+ resource_size_t tfregs,
+ resource_size_t dmaregs,
+ resource_size_t fcregs,
+ unsigned long irq)
{
struct ata_port_info pinfo;
const struct ata_port_info *ppi[] = { &pinfo, NULL };
@@ -1113,8 +1112,8 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
&pata_macio_sht);
}
-static int __devinit pata_macio_attach(struct macio_dev *mdev,
- const struct of_device_id *match)
+static int pata_macio_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct pata_macio_priv *priv;
resource_size_t tfregs, dmaregs = 0;
@@ -1190,7 +1189,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
return rc;
}
-static int __devexit pata_macio_detach(struct macio_dev *mdev)
+static int pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
@@ -1257,8 +1256,8 @@ static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
#endif /* CONFIG_PMAC_MEDIABAY */
-static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pata_macio_pci_attach(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct pata_macio_priv *priv;
struct device_node *np;
@@ -1310,7 +1309,7 @@ static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
return 0;
}
-static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
+static void pata_macio_pci_detach(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index d2c102fd4330..3a8fb28b71f2 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -26,9 +26,9 @@
#include <asm/prom.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/bestcomm_priv.h>
-#include <sysdev/bestcomm/ata.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
@@ -621,9 +621,10 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
.qc_prep = ata_noop_qc_prep,
};
-static int __devinit
-mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
- unsigned long raw_ata_regs, int mwdma_mask, int udma_mask)
+static int mpc52xx_ata_init_one(struct device *dev,
+ struct mpc52xx_ata_priv *priv,
+ unsigned long raw_ata_regs,
+ int mwdma_mask, int udma_mask)
{
struct ata_host *host;
struct ata_port *ap;
@@ -663,24 +664,11 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
&mpc52xx_ata_sht);
}
-static struct mpc52xx_ata_priv *
-mpc52xx_ata_remove_one(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
- struct mpc52xx_ata_priv *priv = host->private_data;
-
- ata_host_detach(host);
-
- return priv;
-}
-
-
/* ======================================================================== */
/* OF Platform driver */
/* ======================================================================== */
-static int __devinit
-mpc52xx_ata_probe(struct platform_device *op)
+static int mpc52xx_ata_probe(struct platform_device *op)
{
unsigned int ipb_freq;
struct resource res_mem;
@@ -815,11 +803,12 @@ mpc52xx_ata_probe(struct platform_device *op)
static int
mpc52xx_ata_remove(struct platform_device *op)
{
- struct mpc52xx_ata_priv *priv;
+ struct ata_host *host = platform_get_drvdata(op);
+ struct mpc52xx_ata_priv *priv = host->private_data;
int task_irq;
/* Deregister the ATA interface */
- priv = mpc52xx_ata_remove_one(&op->dev);
+ ata_platform_remove_one(op);
/* Clean up DMA */
task_irq = bcom_get_task_irq(priv->dmatsk);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1d61d5d278fa..ff2e57f3b597 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -5,19 +5,22 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2005 - 2009 Cavium Networks
+ * Copyright (C) 2005 - 2012 Cavium Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
-#include <linux/irq.h>
+#include <linux/hrtimer.h>
#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
+#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
/*
@@ -34,20 +37,36 @@
*/
#define DRV_NAME "pata_octeon_cf"
-#define DRV_VERSION "2.1"
+#define DRV_VERSION "2.2"
+
+/* Poll interval in nS. */
+#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
+#define DMA_CFG 0
+#define DMA_TIM 0x20
+#define DMA_INT 0x38
+#define DMA_INT_EN 0x50
struct octeon_cf_port {
- struct workqueue_struct *wq;
- struct delayed_work delayed_finish;
+ struct hrtimer delayed_finish;
struct ata_port *ap;
int dma_finished;
+ void *c0;
+ unsigned int cs0;
+ unsigned int cs1;
+ bool is_true_ide;
+ u64 dma_base;
};
static struct scsi_host_template octeon_cf_sht = {
ATA_PIO_SHT(DRV_NAME),
};
+static int enable_dma;
+module_param(enable_dma, int, 0444);
+MODULE_PARM_DESC(enable_dma,
+ "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
+
/**
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
@@ -66,12 +85,29 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
return val;
}
-static void octeon_cf_set_boot_reg_cfg(int cs)
+static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
{
union cvmx_mio_boot_reg_cfgx reg_cfg;
+ unsigned int tim_mult;
+
+ switch (multiplier) {
+ case 8:
+ tim_mult = 3;
+ break;
+ case 4:
+ tim_mult = 0;
+ break;
+ case 2:
+ tim_mult = 2;
+ break;
+ default:
+ tim_mult = 1;
+ break;
+ }
+
reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
- reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
+ reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
reg_cfg.s.sam = 0; /* Don't combine write and output enable */
reg_cfg.s.we_ext = 0; /* No write enable extension */
@@ -92,12 +128,12 @@ static void octeon_cf_set_boot_reg_cfg(int cs)
*/
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_reg_timx reg_tim;
- int cs = ocd->base_region;
int T;
struct ata_timing timing;
+ unsigned int div;
int use_iordy;
int trh;
int pause;
@@ -106,7 +142,15 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
int t2;
int t2i;
- T = (int)(2000000000000LL / octeon_get_clock_rate());
+ /*
+ * A divisor value of four will overflow the timing fields at
+ * clock rates greater than 800MHz
+ */
+ if (octeon_get_io_clock_rate() <= 800000000)
+ div = 4;
+ else
+ div = 8;
+ T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
BUG();
@@ -121,23 +165,26 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
if (t2i)
t2i--;
- trh = ns_to_tim_reg(2, 20);
+ trh = ns_to_tim_reg(div, 20);
if (trh)
trh--;
- pause = timing.cycle - timing.active - timing.setup - trh;
+ pause = (int)timing.cycle - (int)timing.active -
+ (int)timing.setup - trh;
+ if (pause < 0)
+ pause = 0;
if (pause)
pause--;
- octeon_cf_set_boot_reg_cfg(cs);
- if (ocd->dma_engine >= 0)
+ octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- octeon_cf_set_boot_reg_cfg(cs + 1);
+ octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
use_iordy = ata_pio_need_iordy(dev);
- reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
+ reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
/* Disable page mode */
reg_tim.s.pagem = 0;
/* Enable dynamic timing */
@@ -161,20 +208,22 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
/* How long read enable is asserted */
reg_tim.s.oe = t2;
/* Time after CE that read/write starts */
- reg_tim.s.ce = ns_to_tim_reg(2, 5);
+ reg_tim.s.ce = ns_to_tim_reg(div, 5);
/* Time before CE that address is valid */
reg_tim.s.adr = 0;
/* Program the bootbus region timing for the data port chip select. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
- if (ocd->dma_engine >= 0)
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
+ reg_tim.u64);
}
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
+ union cvmx_mio_boot_pin_defs pin_defs;
union cvmx_mio_boot_dma_timx dma_tim;
unsigned int oe_a;
unsigned int oe_n;
@@ -183,6 +232,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
unsigned int pause;
unsigned int T0, Tkr, Td;
unsigned int tim_mult;
+ int c;
const struct ata_timing *timing;
@@ -199,13 +249,19 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
/* not spec'ed, value in eclocks, not affected by tim_mult */
dma_arq = 8;
pause = 25 - dma_arq * 1000 /
- (octeon_get_clock_rate() / 1000000); /* Tz */
+ (octeon_get_io_clock_rate() / 1000000); /* Tz */
oe_a = Td;
/* Tkr from cf spec, lengthened to meet T0 */
oe_n = max(T0 - oe_a, Tkr);
- dma_tim.s.dmack_pi = 1;
+ pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
+
+ /* DMA channel number. */
+ c = (cf_port->dma_base & 8) >> 3;
+
+ /* Invert the polarity if the default is 0*/
+ dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
@@ -228,14 +284,11 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
- pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
- "%d, dmarq: %d, pause: %d\n",
+ pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
- dma_tim.u64);
-
+ cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
/**
@@ -489,15 +542,10 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
ata_wait_idle(ap);
}
-static void octeon_cf_irq_on(struct ata_port *ap)
+static void octeon_cf_ata_port_noaction(struct ata_port *ap)
{
}
-static void octeon_cf_irq_clear(struct ata_port *ap)
-{
- return;
-}
-
static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -519,7 +567,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
*/
static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
{
- struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = qc->ap->private_data;
union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
@@ -535,15 +583,16 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
*/
mio_boot_dma_int.u64 = 0;
mio_boot_dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
/* Enable the interrupt. */
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
/* Set the direction of the DMA */
mio_boot_dma_cfg.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ mio_boot_dma_cfg.s.endian = 1;
+#endif
mio_boot_dma_cfg.s.en = 1;
mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
@@ -569,8 +618,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
- mio_boot_dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
/**
@@ -583,10 +631,9 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
- struct octeon_cf_port *cf_port;
u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -596,9 +643,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
- cf_port = ap->private_data;
-
- dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
if (dma_cfg.s.size != 0xfffff) {
/* Error, the transfer was not complete. */
qc->err_mask |= AC_ERR_HOST_BUS;
@@ -608,15 +653,15 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
/* Stop and clear the dma engine. */
dma_cfg.u64 = 0;
dma_cfg.s.size = -1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
/* Disable the interrupt. */
dma_int.u64 = 0;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
/* Clear the DMA complete status */
dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
status = ap->ops->sff_check_status(ap);
@@ -649,69 +694,68 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
struct ata_queued_cmd *qc;
union cvmx_mio_boot_dma_intx dma_int;
union cvmx_mio_boot_dma_cfgx dma_cfg;
- struct octeon_cf_data *ocd;
ap = host->ports[i];
- ocd = ap->dev->platform_data;
cf_port = ap->private_data;
- dma_int.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
- dma_cfg.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+
+ dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
- if (dma_int.s.done && !dma_cfg.s.en) {
- if (!sg_is_last(qc->cursg)) {
- qc->cursg = sg_next(qc->cursg);
- handled = 1;
- octeon_cf_dma_start(qc);
- continue;
- } else {
- cf_port->dma_finished = 1;
- }
- }
- if (!cf_port->dma_finished)
- continue;
- status = ioread8(ap->ioaddr.altstatus_addr);
- if (status & (ATA_BUSY | ATA_DRQ)) {
- /*
- * We are busy, try to handle it
- * later. This is the DMA finished
- * interrupt, and it could take a
- * little while for the card to be
- * ready for more commands.
- */
- /* Clear DMA irq. */
- dma_int.u64 = 0;
- dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- dma_int.u64);
-
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
+ continue;
+
+ if (dma_int.s.done && !dma_cfg.s.en) {
+ if (!sg_is_last(qc->cursg)) {
+ qc->cursg = sg_next(qc->cursg);
handled = 1;
+ octeon_cf_dma_start(qc);
+ continue;
} else {
- handled |= octeon_cf_dma_finished(ap, qc);
+ cf_port->dma_finished = 1;
}
}
+ if (!cf_port->dma_finished)
+ continue;
+ status = ioread8(ap->ioaddr.altstatus_addr);
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ /*
+ * We are busy, try to handle it later. This
+ * is the DMA finished interrupt, and it could
+ * take a little while for the card to be
+ * ready for more commands.
+ */
+ /* Clear DMA irq. */
+ dma_int.u64 = 0;
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT,
+ dma_int.u64);
+ hrtimer_start_range_ns(&cf_port->delayed_finish,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
+ OCTEON_CF_BUSY_POLL_INTERVAL / 5,
+ HRTIMER_MODE_REL);
+ handled = 1;
+ } else {
+ handled |= octeon_cf_dma_finished(ap, qc);
+ }
}
spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
-static void octeon_cf_delayed_finish(struct work_struct *work)
+static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
{
- struct octeon_cf_port *cf_port = container_of(work,
+ struct octeon_cf_port *cf_port = container_of(hrt,
struct octeon_cf_port,
- delayed_finish.work);
+ delayed_finish);
struct ata_port *ap = cf_port->ap;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
u8 status;
+ enum hrtimer_restart rv = HRTIMER_NORESTART;
spin_lock_irqsave(&host->lock, flags);
@@ -726,15 +770,17 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
status = ioread8(ap->ioaddr.altstatus_addr);
if (status & (ATA_BUSY | ATA_DRQ)) {
/* Still busy, try again. */
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ hrtimer_forward_now(hrt,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
+ rv = HRTIMER_RESTART;
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
+ return rv;
}
static void octeon_cf_dev_config(struct ata_device *dev)
@@ -786,58 +832,125 @@ static struct ata_port_operations octeon_cf_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
- .sff_irq_on = octeon_cf_irq_on,
- .sff_irq_clear = octeon_cf_irq_clear,
+ .sff_irq_on = octeon_cf_ata_port_noaction,
+ .sff_irq_clear = octeon_cf_ata_port_noaction,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
.dev_config = octeon_cf_dev_config,
};
-static int __devinit octeon_cf_probe(struct platform_device *pdev)
+static int octeon_cf_probe(struct platform_device *pdev)
{
struct resource *res_cs0, *res_cs1;
+ bool is_16bit;
+ const __be32 *cs_num;
+ struct property *reg_prop;
+ int n_addr, n_size, reg_len;
+ struct device_node *node;
+ const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
struct ata_port *ap;
- struct octeon_cf_data *ocd;
int irq = 0;
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
- char version[32];
+ int rv = -ENOMEM;
- res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_cs0)
+ node = pdev->dev.of_node;
+ if (node == NULL)
return -EINVAL;
- ocd = pdev->dev.platform_data;
+ cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
+ if (!cf_port)
+ return -ENOMEM;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
- resource_size(res_cs0));
+ cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
- if (!cs0)
- return -ENOMEM;
+ prop = of_get_property(node, "cavium,bus-width", NULL);
+ if (prop)
+ is_16bit = (be32_to_cpup(prop) == 16);
+ else
+ is_16bit = false;
- /* Determine from availability of DMA if True IDE mode or not */
- if (ocd->dma_engine >= 0) {
- res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_cs1)
- return -EINVAL;
+ n_addr = of_n_addr_cells(node);
+ n_size = of_n_size_cells(node);
+ reg_prop = of_find_property(node, "reg", &reg_len);
+ if (!reg_prop || reg_len < sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num = reg_prop->value;
+ cf_port->cs0 = be32_to_cpup(cs_num);
+
+ if (cf_port->is_true_ide) {
+ struct device_node *dma_node;
+ dma_node = of_parse_phandle(node,
+ "cavium,dma-engine-handle", 0);
+ if (dma_node) {
+ struct platform_device *dma_dev;
+ dma_dev = of_find_device_by_node(dma_node);
+ if (dma_dev) {
+ struct resource *res_dma;
+ int i;
+ res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
+ if (!res_dma) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ resource_size(res_dma));
+
+ if (!cf_port->dma_base) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ irq_handler = octeon_cf_interrupt;
+ i = platform_get_irq(dma_dev, 0);
+ if (i > 0)
+ irq = i;
+ }
+ of_node_put(dma_node);
+ }
+ res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_cs1) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
- resource_size(res_cs1));
+ res_cs1->end - res_cs1->start + 1);
if (!cs1)
- return -ENOMEM;
+ goto free_cf_port;
+
+ if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num += n_addr + n_size;
+ cf_port->cs1 = be32_to_cpup(cs_num);
}
- cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
- if (!cf_port)
- return -ENOMEM;
+ res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res_cs0) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ resource_size(res_cs0));
+
+ if (!cs0)
+ goto free_cf_port;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -846,21 +959,22 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->private_data = cf_port;
+ pdev->dev.platform_data = cf_port;
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
- base = cs0 + ocd->base_region_bias;
- if (!ocd->is16bit) {
+ if (!is_16bit) {
+ base = cs0 + 0x800;
ap->ioaddr.cmd_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.altstatus_addr = base + 0xe;
ap->ioaddr.ctl_addr = base + 0xe;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
- } else if (cs1) {
- /* Presence of cs1 indicates True IDE mode. */
+ } else if (cf_port->is_true_ide) {
+ base = cs0;
ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
@@ -876,19 +990,15 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- ap->mwdma_mask = ATA_MWDMA4;
- irq = platform_get_irq(pdev, 0);
- irq_handler = octeon_cf_interrupt;
-
- /* True IDE mode needs delayed work to poll for not-busy. */
- cf_port->wq = create_singlethread_workqueue(DRV_NAME);
- if (!cf_port->wq)
- goto free_cf_port;
- INIT_DELAYED_WORK(&cf_port->delayed_finish,
- octeon_cf_delayed_finish);
+ ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
+ /* True IDE mode needs a timer to poll for not-busy. */
+ hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ cf_port->delayed_finish.function = octeon_cf_delayed_finish;
} else {
/* 16 bit but not True IDE */
+ base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
octeon_cf_ops.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
@@ -902,28 +1012,71 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = base + 0xe;
ap->ioaddr.altstatus_addr = base + 0xe;
}
+ cf_port->c0 = ap->ioaddr.ctl_addr;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
- snprintf(version, sizeof(version), "%s %d bit%s",
- DRV_VERSION,
- (ocd->is16bit) ? 16 : 8,
- (cs1) ? ", True IDE" : "");
- ata_print_version_once(&pdev->dev, version);
+ dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+ is_16bit ? 16 : 8,
+ cf_port->is_true_ide ? ", True IDE" : "");
- return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
+ return ata_host_activate(host, irq, irq_handler,
+ IRQF_SHARED, &octeon_cf_sht);
free_cf_port:
kfree(cf_port);
- return -ENOMEM;
+ return rv;
+}
+
+static void octeon_cf_shutdown(struct device *dev)
+{
+ union cvmx_mio_boot_dma_cfgx dma_cfg;
+ union cvmx_mio_boot_dma_intx dma_int;
+
+ struct octeon_cf_port *cf_port = dev->platform_data;
+
+ if (cf_port->dma_base) {
+ /* Stop and clear the dma engine. */
+ dma_cfg.u64 = 0;
+ dma_cfg.s.size = -1;
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
+
+ /* Disable the interrupt. */
+ dma_int.u64 = 0;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
+
+ /* Clear the DMA complete status */
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
+
+ __raw_writeb(0, cf_port->c0);
+ udelay(20);
+ __raw_writeb(ATA_SRST, cf_port->c0);
+ udelay(20);
+ __raw_writeb(0, cf_port->c0);
+ mdelay(100);
+ }
}
+static struct of_device_id octeon_cf_match[] = {
+ {
+ .compatible = "cavium,ebt3000-compact-flash",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = octeon_cf_match,
+ .shutdown = octeon_cf_shutdown
},
};
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1654dc27e7f8..a7e95a54c782 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,8 +14,9 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
+#include <linux/libata.h>
-static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
+static int pata_of_platform_probe(struct platform_device *ofdev)
{
int ret;
struct device_node *dn = ofdev->dev.of_node;
@@ -76,11 +77,6 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
reg_shift, pio_mask);
}
-static int __devexit pata_of_platform_remove(struct platform_device *ofdev)
-{
- return __pata_platform_remove(&ofdev->dev);
-}
-
static struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
{ .compatible = "electra-ide", },
@@ -95,7 +91,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove = __devexit_p(pata_of_platform_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 5ff31b68135c..df2bb7504fc8 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -48,7 +48,7 @@ static struct ata_port_operations palmld_port_ops = {
.cable_detect = ata_cable_40wire,
};
-static __devinit int palmld_pata_probe(struct platform_device *pdev)
+static int palmld_pata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -109,11 +109,9 @@ err1:
return ret;
}
-static __devexit int palmld_pata_remove(struct platform_device *dev)
+static int palmld_pata_remove(struct platform_device *dev)
{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
+ ata_platform_remove_one(dev);
/* power down the HDD */
gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
@@ -129,7 +127,7 @@ static struct platform_driver palmld_pata_platform_driver = {
.owner = THIS_MODULE,
},
.probe = palmld_pata_probe,
- .remove = __devexit_p(palmld_pata_remove),
+ .remove = palmld_pata_remove,
};
module_platform_driver(palmld_pata_platform_driver);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index c9399c8688c5..3f94a886bb35 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -700,7 +700,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
* @pdev: instance of pci_dev found
* @ent: matching entry in the id_tbl[]
*/
-static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc2027x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index f1848aeda783..71e093767f4e 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -98,12 +98,9 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
*
* If no IRQ resource is present, PIO polling mode is used instead.
*/
-int __devinit __pata_platform_probe(struct device *dev,
- struct resource *io_res,
- struct resource *ctl_res,
- struct resource *irq_res,
- unsigned int ioport_shift,
- int __pio_mask)
+int __pata_platform_probe(struct device *dev, struct resource *io_res,
+ struct resource *ctl_res, struct resource *irq_res,
+ unsigned int ioport_shift, int __pio_mask)
{
struct ata_host *host;
struct ata_port *ap;
@@ -178,24 +175,7 @@ int __devinit __pata_platform_probe(struct device *dev,
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
-/**
- * __pata_platform_remove - unplug a platform interface
- * @dev: device
- *
- * A platform bus ATA device has been unplugged. Perform the needed
- * cleanup. Also called on module unload for any active devices.
- */
-int __pata_platform_remove(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pata_platform_remove);
-
-static int __devinit pata_platform_probe(struct platform_device *pdev)
+static int pata_platform_probe(struct platform_device *pdev)
{
struct resource *io_res;
struct resource *ctl_res;
@@ -242,14 +222,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
pio_mask);
}
-static int __devexit pata_platform_remove(struct platform_device *pdev)
-{
- return __pata_platform_remove(&pdev->dev);
-}
-
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove = __devexit_p(pata_platform_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 4b8ba559fe24..b0ac9e0c5e01 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -229,7 +229,7 @@ static void pxa_ata_dma_irq(int dma, void *port)
complete(&pd->dma_done);
}
-static int __devinit pxa_ata_probe(struct platform_device *pdev)
+static int pxa_ata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -369,7 +369,7 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit pxa_ata_remove(struct platform_device *pdev)
+static int pxa_ata_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct pata_pxa_data *data = host->ports[0]->private_data;
@@ -383,7 +383,7 @@ static int __devexit pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove = __devexit_p(pxa_ata_remove),
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 9417101bd5ca..3c5eb8fa6bd1 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -102,7 +102,7 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
}
-static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
+static int rb532_pata_driver_probe(struct platform_device *pdev)
{
int irq;
int gpio;
@@ -177,7 +177,7 @@ err_free_gpio:
return ret;
}
-static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
+static int rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
struct rb532_cf_info *info = ah->private_data;
@@ -190,7 +190,7 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove = __devexit_p(rb532_pata_driver_remove),
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 32a3499e83e7..6a8665574fee 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -321,13 +321,11 @@ static struct scsi_host_template rdc_sht = {
* Zero on success, or -ERRNO value.
*/
-static int __devinit rdc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
- unsigned long port_flags;
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
@@ -337,8 +335,6 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
- port_flags = port_info[0].flags;
-
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index db0d18cf1c2a..d3830c45a369 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -169,8 +169,7 @@ static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* Zero on success, or -ERRNO value.
*/
-static int __devinit sch_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 5cfdf94823d0..64c5f0d0f812 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -323,8 +323,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
return tmpbyte & 0x30;
}
-static int __devinit sil680_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 937aeb34b310..2e391730e8be 100755..100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -43,6 +43,7 @@
/* These two are defined in "libata.h" */
#undef DRV_NAME
#undef DRV_VERSION
+
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 400bf1c3e982..5dba77ccaa0b 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -213,7 +213,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = 0x80;
+ tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
-static int __devinit ahci_highbank_probe(struct platform_device *pdev)
+static int ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
@@ -368,16 +368,6 @@ err0:
return rc;
}
-static int __devexit ahci_highbank_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
@@ -432,7 +422,7 @@ SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove = __devexit_p(ahci_highbank_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = "highbank-ahci",
.owner = THIS_MODULE,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index dc35f4d42b8b..1e6827c89429 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -273,12 +273,10 @@ static void inic_reset_port(void __iomem *port_base)
static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
- void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
- addr = scr_addr + scr_map[sc_reg] * 4;
*val = readl(scr_addr + scr_map[sc_reg] * 4);
/* this controller has stuck DIAG.N, ignore it */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 68f4fb54d627..35c6b6d09c27 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4148,7 +4148,7 @@ err:
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int __devexit mv_platform_remove(struct platform_device *pdev)
+static int mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
@@ -4215,7 +4215,7 @@ static int mv_platform_resume(struct platform_device *pdev)
#endif
#ifdef CONFIG_OF
-static struct of_device_id mv_sata_dt_ids[] __devinitdata = {
+static struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,orion-sata", },
{},
};
@@ -4224,7 +4224,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove = __devexit_p(mv_platform_remove),
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
@@ -4429,7 +4429,7 @@ static int mv_pci_device_resume(struct pci_dev *pdev)
#endif
static int mv_platform_probe(struct platform_device *pdev);
-static int __devexit mv_platform_remove(struct platform_device *pdev);
+static int mv_platform_remove(struct platform_device *pdev);
static int __init mv_init(void)
{
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 489c81768321..fb0dd87f8893 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -147,6 +147,10 @@ struct pdc_port_priv {
dma_addr_t pkt_dma;
};
+struct pdc_host_priv {
+ spinlock_t hard_reset_lock;
+};
+
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
+ struct pdc_host_priv *hpriv = ap->host->private_data;
u8 tmp;
- spin_lock(&ap->host->lock);
+ spin_lock(&hpriv->hard_reset_lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
- spin_unlock(&ap->host->lock);
+ spin_unlock(&hpriv->hard_reset_lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
+ struct pdc_host_priv *hpriv;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
+ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ spin_lock_init(&hpriv->hard_reset_lock);
+ host->private_data = hpriv;
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index a5f2a563a26a..59f0d630d634 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -506,8 +506,6 @@ static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
@@ -519,8 +517,6 @@ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 122605593166..7b7127a58f51 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -315,9 +315,8 @@ static int pdc_port_start(struct ata_port *ap)
return 0;
}
-static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
@@ -337,9 +336,8 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
buf32[dw], buf32[dw + 1]);
}
-static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
@@ -486,10 +484,10 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
/*
* Build ATA, host DMA packets
*/
- pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
- pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
if (qc->tf.flags & ATA_TFLAG_LBA48)
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index e8cf88ba145d..44f304b3de63 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -312,8 +312,7 @@ static struct ata_port_operations vsc_sata_ops = {
.scr_write = vsc_sata_scr_write,
};
-static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
- void __iomem *base)
+static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
@@ -335,8 +334,8 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
}
-static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int vsc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
.flags = ATA_FLAG_SATA,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ff7bb8a42ed6..77a7480dc4d1 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1507,9 +1507,9 @@ static void do_housekeeping (unsigned long arg) {
/********** creation of communication queues **********/
-static int __devinit create_queues (amb_dev * dev, unsigned int cmds,
- unsigned int txs, unsigned int * rxs,
- unsigned int * rx_buffer_sizes) {
+static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs,
+ unsigned int *rxs, unsigned int *rx_buffer_sizes)
+{
unsigned char pool;
size_t total = 0;
void * memory;
@@ -1737,8 +1737,9 @@ static int decode_loader_result (loader_command cmd, u32 result)
return res;
}
-static int __devinit do_loader_command (volatile loader_block * lb,
- const amb_dev * dev, loader_command cmd) {
+static int do_loader_command(volatile loader_block *lb, const amb_dev *dev,
+ loader_command cmd)
+{
unsigned long timeout;
@@ -1793,8 +1794,9 @@ static int __devinit do_loader_command (volatile loader_block * lb,
/* loader: determine loader version */
-static int __devinit get_loader_version (loader_block * lb,
- const amb_dev * dev, u32 * version) {
+static int get_loader_version(loader_block *lb, const amb_dev *dev,
+ u32 *version)
+{
int res;
PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
@@ -1809,9 +1811,9 @@ static int __devinit get_loader_version (loader_block * lb,
/* loader: write memory data blocks */
-static int __devinit loader_write (loader_block* lb,
- const amb_dev *dev,
- const struct ihex_binrec *rec) {
+static int loader_write(loader_block *lb, const amb_dev *dev,
+ const struct ihex_binrec *rec)
+{
transfer_block * tb = &lb->payload.transfer;
PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
@@ -1824,9 +1826,9 @@ static int __devinit loader_write (loader_block* lb,
/* loader: verify memory data blocks */
-static int __devinit loader_verify (loader_block * lb,
- const amb_dev *dev,
- const struct ihex_binrec *rec) {
+static int loader_verify(loader_block *lb, const amb_dev *dev,
+ const struct ihex_binrec *rec)
+{
transfer_block * tb = &lb->payload.transfer;
int res;
@@ -1842,8 +1844,8 @@ static int __devinit loader_verify (loader_block * lb,
/* loader: start microcode */
-static int __devinit loader_start (loader_block * lb,
- const amb_dev * dev, u32 address) {
+static int loader_start(loader_block *lb, const amb_dev *dev, u32 address)
+{
PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
lb->payload.start = cpu_to_be32 (address);
@@ -1918,7 +1920,8 @@ static int amb_reset (amb_dev * dev, int diags) {
/********** transfer and start the microcode **********/
-static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
+static int ucode_init(loader_block *lb, amb_dev *dev)
+{
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
@@ -1980,7 +1983,8 @@ static inline __be32 bus_addr(void * addr) {
return cpu_to_be32 (virt_to_bus (addr));
}
-static int __devinit amb_talk (amb_dev * dev) {
+static int amb_talk(amb_dev *dev)
+{
adap_talk_block a;
unsigned char pool;
unsigned long timeout;
@@ -2027,7 +2031,8 @@ static int __devinit amb_talk (amb_dev * dev) {
}
// get microcode version
-static void __devinit amb_ucode_version (amb_dev * dev) {
+static void amb_ucode_version(amb_dev *dev)
+{
u32 major;
u32 minor;
command cmd;
@@ -2042,7 +2047,8 @@ static void __devinit amb_ucode_version (amb_dev * dev) {
}
// get end station address
-static void __devinit amb_esi (amb_dev * dev, u8 * esi) {
+static void amb_esi(amb_dev *dev, u8 *esi)
+{
u32 lower4;
u16 upper2;
command cmd;
@@ -2088,7 +2094,7 @@ static void fixup_plx_window (amb_dev *dev, loader_block *lb)
return;
}
-static int __devinit amb_init (amb_dev * dev)
+static int amb_init(amb_dev *dev)
{
loader_block lb;
@@ -2184,7 +2190,8 @@ static void setup_pci_dev(struct pci_dev *pci_dev)
}
}
-static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int amb_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
amb_dev * dev;
int err;
@@ -2285,7 +2292,7 @@ out_disable:
}
-static void __devexit amb_remove_one(struct pci_dev *pci_dev)
+static void amb_remove_one(struct pci_dev *pci_dev)
{
struct amb_dev *dev;
@@ -2379,7 +2386,7 @@ MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
static struct pci_driver amb_driver = {
.name = "amb",
.probe = amb_probe,
- .remove = __devexit_p(amb_remove_one),
+ .remove = amb_remove_one,
.id_table = amb_pci_tbl,
};
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 81e44f7b0ab6..c1eb6fa8ac35 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1567,7 +1567,7 @@ tx_complete++;
/*--------------------------------- entries ---------------------------------*/
-static char * const media_name[] __devinitconst = {
+static char * const media_name[] = {
"MMF", "SMF", "MMF", "03?", /* 0- 3 */
"UTP", "05?", "06?", "07?", /* 4- 7 */
"TAXI","09?", "10?", "11?", /* 8-11 */
@@ -1591,7 +1591,7 @@ static char * const media_name[] __devinitconst = {
} })
-static int __devinit get_esi_asic(struct atm_dev *dev)
+static int get_esi_asic(struct atm_dev *dev)
{
struct eni_dev *eni_dev;
unsigned char tonga;
@@ -1683,7 +1683,7 @@ static int __devinit get_esi_asic(struct atm_dev *dev)
#undef GET_SEPROM
-static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base)
+static int get_esi_fpga(struct atm_dev *dev, void __iomem *base)
{
void __iomem *mac_base;
int i;
@@ -1694,7 +1694,7 @@ static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base)
}
-static int __devinit eni_do_init(struct atm_dev *dev)
+static int eni_do_init(struct atm_dev *dev)
{
struct midway_eprom __iomem *eprom;
struct eni_dev *eni_dev;
@@ -1797,7 +1797,7 @@ static void eni_do_release(struct atm_dev *dev)
iounmap(ed->ioaddr);
}
-static int __devinit eni_start(struct atm_dev *dev)
+static int eni_start(struct atm_dev *dev)
{
struct eni_dev *eni_dev;
@@ -2226,8 +2226,8 @@ static const struct atmdev_ops ops = {
};
-static int __devinit eni_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int eni_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *dev;
struct eni_dev *eni_dev;
@@ -2292,7 +2292,7 @@ static struct pci_device_id eni_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci,eni_pci_tbl);
-static void __devexit eni_remove_one(struct pci_dev *pdev)
+static void eni_remove_one(struct pci_dev *pdev)
{
struct atm_dev *dev = pci_get_drvdata(pdev);
struct eni_dev *ed = ENI_DEV(dev);
@@ -2310,7 +2310,7 @@ static struct pci_driver eni_driver = {
.name = DEV_LABEL,
.id_table = eni_pci_tbl,
.probe = eni_init_one,
- .remove = __devexit_p(eni_remove_one),
+ .remove = eni_remove_one,
};
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 86fed1b91695..b41c9481b67b 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -252,7 +252,7 @@ struct reginit_item {
};
-static struct reginit_item PHY_NTC_INIT[] __devinitdata = {
+static struct reginit_item PHY_NTC_INIT[] = {
{ PHY_CLEARALL, 0x40 },
{ 0x12, 0x0001 },
{ 0x13, 0x7605 },
@@ -1295,7 +1295,7 @@ static const struct atmdev_ops ops = {
};
-static void __devinit undocumented_pci_fix (struct pci_dev *pdev)
+static void undocumented_pci_fix(struct pci_dev *pdev)
{
u32 tint;
@@ -1319,13 +1319,13 @@ static void __devinit undocumented_pci_fix (struct pci_dev *pdev)
* PHY routines *
**************************************************************************/
-static void __devinit write_phy (struct fs_dev *dev, int regnum, int val)
+static void write_phy(struct fs_dev *dev, int regnum, int val)
{
submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ,
regnum, val, 0);
}
-static int __devinit init_phy (struct fs_dev *dev, struct reginit_item *reginit)
+static int init_phy(struct fs_dev *dev, struct reginit_item *reginit)
{
int i;
@@ -1381,7 +1381,7 @@ static void reset_chip (struct fs_dev *dev)
}
}
-static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
+static void *aligned_kmalloc(int size, gfp_t flags, int alignment)
{
void *t;
@@ -1398,8 +1398,8 @@ static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
return NULL;
}
-static int __devinit init_q (struct fs_dev *dev,
- struct queue *txq, int queue, int nentries, int is_rq)
+static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
+ int nentries, int is_rq)
{
int sz = nentries * sizeof (struct FS_QENTRY);
struct FS_QENTRY *p;
@@ -1434,8 +1434,8 @@ static int __devinit init_q (struct fs_dev *dev,
}
-static int __devinit init_fp (struct fs_dev *dev,
- struct freepool *fp, int queue, int bufsize, int nr_buffers)
+static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
+ int bufsize, int nr_buffers)
{
func_enter ();
@@ -1528,7 +1528,7 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n);
}
-static void __devexit free_queue (struct fs_dev *dev, struct queue *txq)
+static void free_queue(struct fs_dev *dev, struct queue *txq)
{
func_enter ();
@@ -1544,7 +1544,7 @@ static void __devexit free_queue (struct fs_dev *dev, struct queue *txq)
func_exit ();
}
-static void __devexit free_freepool (struct fs_dev *dev, struct freepool *fp)
+static void free_freepool(struct fs_dev *dev, struct freepool *fp)
{
func_enter ();
@@ -1662,7 +1662,7 @@ static void fs_poll (unsigned long data)
}
#endif
-static int __devinit fs_init (struct fs_dev *dev)
+static int fs_init(struct fs_dev *dev)
{
struct pci_dev *pci_dev;
int isr, to;
@@ -1897,8 +1897,8 @@ unmap:
return 1;
}
-static int __devinit firestream_init_one (struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int firestream_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *atm_dev;
struct fs_dev *fs_dev;
@@ -1934,7 +1934,7 @@ static int __devinit firestream_init_one (struct pci_dev *pci_dev,
return -ENODEV;
}
-static void __devexit firestream_remove_one (struct pci_dev *pdev)
+static void firestream_remove_one(struct pci_dev *pdev)
{
int i;
struct fs_dev *dev, *nxtdev;
@@ -2038,7 +2038,7 @@ static struct pci_driver firestream_driver = {
.name = "firestream",
.id_table = firestream_pci_tbl,
.probe = firestream_init_one,
- .remove = __devexit_p(firestream_remove_one),
+ .remove = firestream_remove_one,
};
static int __init firestream_init_module (void)
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 361f5aee3be1..204814e88e46 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -527,8 +527,7 @@ fore200e_pca_reset(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_pca_map(struct fore200e* fore200e)
+static int fore200e_pca_map(struct fore200e* fore200e)
{
DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
@@ -561,8 +560,7 @@ fore200e_pca_unmap(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_pca_configure(struct fore200e* fore200e)
+static int fore200e_pca_configure(struct fore200e *fore200e)
{
struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
u8 master_ctrl, latency;
@@ -2028,8 +2026,7 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
}
-static int __devinit
-fore200e_irq_request(struct fore200e* fore200e)
+static int fore200e_irq_request(struct fore200e *fore200e)
{
if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
@@ -2051,8 +2048,7 @@ fore200e_irq_request(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_get_esi(struct fore200e* fore200e)
+static int fore200e_get_esi(struct fore200e *fore200e)
{
struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
int ok, i;
@@ -2081,8 +2077,7 @@ fore200e_get_esi(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_alloc_rx_buf(struct fore200e* fore200e)
+static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
{
int scheme, magn, nbr, size, i;
@@ -2146,8 +2141,7 @@ fore200e_alloc_rx_buf(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_bs_queue(struct fore200e* fore200e)
+static int fore200e_init_bs_queue(struct fore200e *fore200e)
{
int scheme, magn, i;
@@ -2209,8 +2203,7 @@ fore200e_init_bs_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_rx_queue(struct fore200e* fore200e)
+static int fore200e_init_rx_queue(struct fore200e *fore200e)
{
struct host_rxq* rxq = &fore200e->host_rxq;
struct cp_rxq_entry __iomem * cp_entry;
@@ -2269,8 +2262,7 @@ fore200e_init_rx_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_tx_queue(struct fore200e* fore200e)
+static int fore200e_init_tx_queue(struct fore200e *fore200e)
{
struct host_txq* txq = &fore200e->host_txq;
struct cp_txq_entry __iomem * cp_entry;
@@ -2332,8 +2324,7 @@ fore200e_init_tx_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_cmd_queue(struct fore200e* fore200e)
+static int fore200e_init_cmd_queue(struct fore200e *fore200e)
{
struct host_cmdq* cmdq = &fore200e->host_cmdq;
struct cp_cmdq_entry __iomem * cp_entry;
@@ -2374,10 +2365,10 @@ fore200e_init_cmd_queue(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_param_bs_queue(struct fore200e* fore200e,
- enum buffer_scheme scheme, enum buffer_magn magn,
- int queue_length, int pool_size, int supply_blksize)
+static void fore200e_param_bs_queue(struct fore200e *fore200e,
+ enum buffer_scheme scheme,
+ enum buffer_magn magn, int queue_length,
+ int pool_size, int supply_blksize)
{
struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
@@ -2388,8 +2379,7 @@ fore200e_param_bs_queue(struct fore200e* fore200e,
}
-static int __devinit
-fore200e_initialize(struct fore200e* fore200e)
+static int fore200e_initialize(struct fore200e *fore200e)
{
struct cp_queues __iomem * cpq;
int ok, scheme, magn;
@@ -2440,8 +2430,7 @@ fore200e_initialize(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_monitor_putc(struct fore200e* fore200e, char c)
+static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
@@ -2452,8 +2441,7 @@ fore200e_monitor_putc(struct fore200e* fore200e, char c)
}
-static int __devinit
-fore200e_monitor_getc(struct fore200e* fore200e)
+static int fore200e_monitor_getc(struct fore200e *fore200e)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
unsigned long timeout = jiffies + msecs_to_jiffies(50);
@@ -2477,8 +2465,7 @@ fore200e_monitor_getc(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_monitor_puts(struct fore200e* fore200e, char* str)
+static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
{
while (*str) {
@@ -2497,8 +2484,7 @@ fore200e_monitor_puts(struct fore200e* fore200e, char* str)
#define FW_EXT "_ecd.bin2"
#endif
-static int __devinit
-fore200e_load_and_start_fw(struct fore200e* fore200e)
+static int fore200e_load_and_start_fw(struct fore200e *fore200e)
{
const struct firmware *firmware;
struct device *device;
@@ -2566,8 +2552,7 @@ release:
}
-static int __devinit
-fore200e_register(struct fore200e* fore200e, struct device *parent)
+static int fore200e_register(struct fore200e *fore200e, struct device *parent)
{
struct atm_dev* atm_dev;
@@ -2593,8 +2578,7 @@ fore200e_register(struct fore200e* fore200e, struct device *parent)
}
-static int __devinit
-fore200e_init(struct fore200e* fore200e, struct device *parent)
+static int fore200e_init(struct fore200e *fore200e, struct device *parent)
{
if (fore200e_register(fore200e, parent) < 0)
return -ENODEV;
@@ -2644,7 +2628,7 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
#ifdef CONFIG_SBUS
static const struct of_device_id fore200e_sba_match[];
-static int __devinit fore200e_sba_probe(struct platform_device *op)
+static int fore200e_sba_probe(struct platform_device *op)
{
const struct of_device_id *match;
const struct fore200e_bus *bus;
@@ -2681,7 +2665,7 @@ static int __devinit fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int __devexit fore200e_sba_remove(struct platform_device *op)
+static int fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
@@ -2707,13 +2691,13 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = __devexit_p(fore200e_sba_remove),
+ .remove = fore200e_sba_remove,
};
#endif
#ifdef CONFIG_PCI
-static int __devinit
-fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int fore200e_pca_detect(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
struct fore200e* fore200e;
@@ -2766,7 +2750,7 @@ out_disable:
}
-static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
+static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
{
struct fore200e *fore200e;
@@ -2789,7 +2773,7 @@ MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
static struct pci_driver fore200e_pca_driver = {
.name = "fore_200e",
.probe = fore200e_pca_detect,
- .remove = __devexit_p(fore200e_pca_remove_one),
+ .remove = fore200e_pca_remove_one,
.id_table = fore200e_pca_tbl,
};
#endif
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index b182c2f7d777..72b6960fa95f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -349,8 +349,8 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
return NULL;
}
-static int __devinit
-he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int he_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
struct atm_dev *atm_dev = NULL;
struct he_dev *he_dev = NULL;
@@ -406,8 +406,7 @@ init_one_failure:
return err;
}
-static void __devexit
-he_remove_one (struct pci_dev *pci_dev)
+static void he_remove_one(struct pci_dev *pci_dev)
{
struct atm_dev *atm_dev;
struct he_dev *he_dev;
@@ -445,8 +444,7 @@ rate_to_atmf(unsigned rate) /* cps to atm forum format */
return (NONZERO | (exp << 9) | (rate & 0x1ff));
}
-static void __devinit
-he_init_rx_lbfp0(struct he_dev *he_dev)
+static void he_init_rx_lbfp0(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -476,8 +474,7 @@ he_init_rx_lbfp0(struct he_dev *he_dev)
he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
}
-static void __devinit
-he_init_rx_lbfp1(struct he_dev *he_dev)
+static void he_init_rx_lbfp1(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -507,8 +504,7 @@ he_init_rx_lbfp1(struct he_dev *he_dev)
he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
}
-static void __devinit
-he_init_tx_lbfp(struct he_dev *he_dev)
+static void he_init_tx_lbfp(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -537,8 +533,7 @@ he_init_tx_lbfp(struct he_dev *he_dev)
he_writel(he_dev, lbufd_index - 1, TLBF_T);
}
-static int __devinit
-he_init_tpdrq(struct he_dev *he_dev)
+static int he_init_tpdrq(struct he_dev *he_dev)
{
he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
@@ -559,8 +554,7 @@ he_init_tpdrq(struct he_dev *he_dev)
return 0;
}
-static void __devinit
-he_init_cs_block(struct he_dev *he_dev)
+static void he_init_cs_block(struct he_dev *he_dev)
{
unsigned clock, rate, delta;
int reg;
@@ -655,8 +649,7 @@ he_init_cs_block(struct he_dev *he_dev)
}
-static int __devinit
-he_init_cs_block_rcm(struct he_dev *he_dev)
+static int he_init_cs_block_rcm(struct he_dev *he_dev)
{
unsigned (*rategrid)[16][16];
unsigned rate, delta;
@@ -776,8 +769,7 @@ he_init_cs_block_rcm(struct he_dev *he_dev)
return 0;
}
-static int __devinit
-he_init_group(struct he_dev *he_dev, int group)
+static int he_init_group(struct he_dev *he_dev, int group)
{
struct he_buff *heb, *next;
dma_addr_t mapping;
@@ -915,8 +907,7 @@ out_free_rbpl_table:
return -ENOMEM;
}
-static int __devinit
-he_init_irq(struct he_dev *he_dev)
+static int he_init_irq(struct he_dev *he_dev)
{
int i;
@@ -978,8 +969,7 @@ he_init_irq(struct he_dev *he_dev)
return 0;
}
-static int __devinit
-he_start(struct atm_dev *dev)
+static int he_start(struct atm_dev *dev)
{
struct he_dev *he_dev;
struct pci_dev *pci_dev;
@@ -2879,7 +2869,7 @@ MODULE_DEVICE_TABLE(pci, he_pci_tbl);
static struct pci_driver he_driver = {
.name = "he",
.probe = he_init_one,
- .remove = __devexit_p(he_remove_one),
+ .remove = he_remove_one,
.id_table = he_pci_tbl,
};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 7d01c2a75256..1dc0519333f2 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1789,7 +1789,7 @@ static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
}
-static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
+static u16 read_bia(const hrz_dev *dev, u16 addr)
{
u32 ctrl = rd_regl (dev, CONTROL_0_REG);
@@ -1845,7 +1845,8 @@ static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
/********** initialise a card **********/
-static int __devinit hrz_init (hrz_dev * dev) {
+static int hrz_init(hrz_dev *dev)
+{
int onefivefive;
u16 chan;
@@ -2681,7 +2682,8 @@ static const struct atmdev_ops hrz_ops = {
.owner = THIS_MODULE,
};
-static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int hrz_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
hrz_dev * dev;
int err = 0;
@@ -2836,7 +2838,7 @@ out_disable:
goto out;
}
-static void __devexit hrz_remove_one(struct pci_dev *pci_dev)
+static void hrz_remove_one(struct pci_dev *pci_dev)
{
hrz_dev *dev;
@@ -2901,7 +2903,7 @@ MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
static struct pci_driver hrz_driver = {
.name = "horizon",
.probe = hrz_probe,
- .remove = __devexit_p(hrz_remove_one),
+ .remove = hrz_remove_one,
.id_table = hrz_pci_tbl,
};
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 8974bd2b961e..272f00927761 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3109,8 +3109,7 @@ deinit_card(struct idt77252_dev *card)
}
-static void __devinit
-init_sram(struct idt77252_dev *card)
+static void init_sram(struct idt77252_dev *card)
{
int i;
@@ -3257,8 +3256,7 @@ init_sram(struct idt77252_dev *card)
IPRINTK("%s: SRAM initialization complete.\n", card->name);
}
-static int __devinit
-init_card(struct atm_dev *dev)
+static int init_card(struct atm_dev *dev)
{
struct idt77252_dev *card = dev->dev_data;
struct pci_dev *pcidev = card->pcidev;
@@ -3537,8 +3535,7 @@ init_card(struct atm_dev *dev)
/*****************************************************************************/
-static int __devinit
-idt77252_preset(struct idt77252_dev *card)
+static int idt77252_preset(struct idt77252_dev *card)
{
u16 pci_command;
@@ -3579,8 +3576,7 @@ idt77252_preset(struct idt77252_dev *card)
}
-static unsigned long __devinit
-probe_sram(struct idt77252_dev *card)
+static unsigned long probe_sram(struct idt77252_dev *card)
{
u32 data, addr;
@@ -3601,8 +3597,8 @@ probe_sram(struct idt77252_dev *card)
return addr * sizeof(u32);
}
-static int __devinit
-idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
+static int idt77252_init_one(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
{
static struct idt77252_dev **last = &idt77252_chain;
static int index = 0;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 96cce6d53195..4217f29a85e0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2299,7 +2299,7 @@ static int reset_sar(struct atm_dev *dev)
}
-static int __devinit ia_init(struct atm_dev *dev)
+static int ia_init(struct atm_dev *dev)
{
IADEV *iadev;
unsigned long real_base;
@@ -2492,7 +2492,7 @@ static void ia_free_rx(IADEV *iadev)
iadev->rx_dle_dma);
}
-static int __devinit ia_start(struct atm_dev *dev)
+static int ia_start(struct atm_dev *dev)
{
IADEV *iadev;
int error;
@@ -3168,8 +3168,7 @@ static const struct atmdev_ops ops = {
.owner = THIS_MODULE,
};
-static int __devinit ia_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct atm_dev *dev;
IADEV *iadev;
@@ -3229,7 +3228,7 @@ err_out:
return ret;
}
-static void __devexit ia_remove_one(struct pci_dev *pdev)
+static void ia_remove_one(struct pci_dev *pdev)
{
struct atm_dev *dev = pci_get_drvdata(pdev);
IADEV *iadev = INPH_IA_DEV(dev);
@@ -3270,7 +3269,7 @@ static struct pci_driver ia_driver = {
.name = DEV_LABEL,
.id_table = ia_pci_tbl,
.probe = ia_init_one,
- .remove = __devexit_p(ia_remove_one),
+ .remove = ia_remove_one,
};
static int __init ia_module_init(void)
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 6a0955e6d4fc..53ecac5a2161 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
-typedef volatile u_int freg_t;
+typedef volatile u_int ffreg_t;
typedef u_int rreg_t;
typedef struct _ffredn_t {
- freg_t idlehead_high; /* Idle cell header (high) */
- freg_t idlehead_low; /* Idle cell header (low) */
- freg_t maxrate; /* Maximum rate */
- freg_t stparms; /* Traffic Management Parameters */
- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
- freg_t rm_type; /* */
- u_int filler5[0x17 - 0x06];
- freg_t cmd_reg; /* Command register */
- u_int filler18[0x20 - 0x18];
- freg_t cbr_base; /* CBR Pointer Base */
- freg_t vbr_base; /* VBR Pointer Base */
- freg_t abr_base; /* ABR Pointer Base */
- freg_t ubr_base; /* UBR Pointer Base */
- u_int filler24;
- freg_t vbrwq_base; /* VBR Wait Queue Base */
- freg_t abrwq_base; /* ABR Wait Queue Base */
- freg_t ubrwq_base; /* UBR Wait Queue Base */
- freg_t vct_base; /* Main VC Table Base */
- freg_t vcte_base; /* Extended Main VC Table Base */
- u_int filler2a[0x2C - 0x2A];
- freg_t cbr_tab_beg; /* CBR Table Begin */
- freg_t cbr_tab_end; /* CBR Table End */
- freg_t cbr_pointer; /* CBR Pointer */
- u_int filler2f[0x30 - 0x2F];
- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
- u_int filler38[0x40 - 0x38];
- freg_t queue_base; /* Base address for PRQ and TCQ */
- freg_t desc_base; /* Base address of descriptor table */
- u_int filler42[0x45 - 0x42];
- freg_t mode_reg_0; /* Mode register 0 */
- freg_t mode_reg_1; /* Mode register 1 */
- freg_t intr_status_reg;/* Interrupt Status register */
- freg_t mask_reg; /* Mask Register */
- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
- freg_t state_reg; /* Status register */
- u_int filler4c[0x58 - 0x4c];
- freg_t curr_desc_num; /* Contains the current descriptor num */
- freg_t next_desc; /* Next descriptor */
- freg_t next_vc; /* Next VC */
- u_int filler5b[0x5d - 0x5b];
- freg_t present_slot_cnt;/* Present slot count */
- u_int filler5e[0x6a - 0x5e];
- freg_t new_desc_num; /* New descriptor number */
- freg_t new_vc; /* New VC */
- freg_t sched_tbl_ptr; /* Schedule table pointer */
- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
- freg_t abrwq_wptr; /* ABR wait queue write pointer */
- freg_t abrwq_rptr; /* ABR wait queue read pointer */
- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
- freg_t cbr_vc; /* CBR VC */
- freg_t vbr_sb_vc; /* VBR SB VC */
- freg_t abr_sb_vc; /* ABR SB VC */
- freg_t ubr_sb_vc; /* UBR SB VC */
- freg_t vbr_next_link; /* VBR next link */
- freg_t abr_next_link; /* ABR next link */
- freg_t ubr_next_link; /* UBR next link */
- u_int filler7a[0x7c-0x7a];
- freg_t out_rate_head; /* Out of rate head */
- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ ffreg_t idlehead_high; /* Idle cell header (high) */
+ ffreg_t idlehead_low; /* Idle cell header (low) */
+ ffreg_t maxrate; /* Maximum rate */
+ ffreg_t stparms; /* Traffic Management Parameters */
+ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+ ffreg_t rm_type; /* */
+ u_int filler5[0x17 - 0x06];
+ ffreg_t cmd_reg; /* Command register */
+ u_int filler18[0x20 - 0x18];
+ ffreg_t cbr_base; /* CBR Pointer Base */
+ ffreg_t vbr_base; /* VBR Pointer Base */
+ ffreg_t abr_base; /* ABR Pointer Base */
+ ffreg_t ubr_base; /* UBR Pointer Base */
+ u_int filler24;
+ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
+ ffreg_t abrwq_base; /* ABR Wait Queue Base */
+ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
+ ffreg_t vct_base; /* Main VC Table Base */
+ ffreg_t vcte_base; /* Extended Main VC Table Base */
+ u_int filler2a[0x2C - 0x2A];
+ ffreg_t cbr_tab_beg; /* CBR Table Begin */
+ ffreg_t cbr_tab_end; /* CBR Table End */
+ ffreg_t cbr_pointer; /* CBR Pointer */
+ u_int filler2f[0x30 - 0x2F];
+ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
+ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
+ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+ u_int filler38[0x40 - 0x38];
+ ffreg_t queue_base; /* Base address for PRQ and TCQ */
+ ffreg_t desc_base; /* Base address of descriptor table */
+ u_int filler42[0x45 - 0x42];
+ ffreg_t mode_reg_0; /* Mode register 0 */
+ ffreg_t mode_reg_1; /* Mode register 1 */
+ ffreg_t intr_status_reg;/* Interrupt Status register */
+ ffreg_t mask_reg; /* Mask Register */
+ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+ ffreg_t state_reg; /* Status register */
+ u_int filler4c[0x58 - 0x4c];
+ ffreg_t curr_desc_num; /* Contains the current descriptor num */
+ ffreg_t next_desc; /* Next descriptor */
+ ffreg_t next_vc; /* Next VC */
+ u_int filler5b[0x5d - 0x5b];
+ ffreg_t present_slot_cnt;/* Present slot count */
+ u_int filler5e[0x6a - 0x5e];
+ ffreg_t new_desc_num; /* New descriptor number */
+ ffreg_t new_vc; /* New VC */
+ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
+ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
+ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
+ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
+ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
+ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
+ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
+ ffreg_t cbr_vc; /* CBR VC */
+ ffreg_t vbr_sb_vc; /* VBR SB VC */
+ ffreg_t abr_sb_vc; /* ABR SB VC */
+ ffreg_t ubr_sb_vc; /* UBR SB VC */
+ ffreg_t vbr_next_link; /* VBR next link */
+ ffreg_t abr_next_link; /* ABR next link */
+ ffreg_t ubr_next_link; /* UBR next link */
+ u_int filler7a[0x7c-0x7a];
+ ffreg_t out_rate_head; /* Out of rate head */
+ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
} ffredn_t;
typedef struct _rfredn_t {
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 68c758871812..fa7d701933ba 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -551,8 +551,8 @@ static inline void sram_write(const struct lanai_dev *lanai,
writel(val, sram_addr(lanai, offset));
}
-static int __devinit sram_test_word(const struct lanai_dev *lanai,
- int offset, u32 pattern)
+static int sram_test_word(const struct lanai_dev *lanai, int offset,
+ u32 pattern)
{
u32 readback;
sram_write(lanai, pattern, offset);
@@ -566,7 +566,7 @@ static int __devinit sram_test_word(const struct lanai_dev *lanai,
return -EIO;
}
-static int __devinit sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
+static int sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
{
int offset, result = 0;
for (offset = 0; offset < SRAM_BYTES && result == 0; offset += 4)
@@ -574,7 +574,7 @@ static int __devinit sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
return result;
}
-static int __devinit sram_test_and_clear(const struct lanai_dev *lanai)
+static int sram_test_and_clear(const struct lanai_dev *lanai)
{
#ifdef FULL_MEMORY_TEST
int result;
@@ -860,7 +860,7 @@ static inline void aal0_buffer_free(struct lanai_dev *lanai)
#ifndef READ_EEPROM
/* Stub functions to use if EEPROM reading is disabled */
-static int __devinit eeprom_read(struct lanai_dev *lanai)
+static int eeprom_read(struct lanai_dev *lanai)
{
printk(KERN_INFO DEV_LABEL "(itf %d): *NOT* reading EEPROM\n",
lanai->number);
@@ -868,7 +868,7 @@ static int __devinit eeprom_read(struct lanai_dev *lanai)
return 0;
}
-static int __devinit eeprom_validate(struct lanai_dev *lanai)
+static int eeprom_validate(struct lanai_dev *lanai)
{
lanai->serialno = 0;
lanai->magicno = EEPROM_MAGIC_VALUE;
@@ -877,7 +877,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
#else /* READ_EEPROM */
-static int __devinit eeprom_read(struct lanai_dev *lanai)
+static int eeprom_read(struct lanai_dev *lanai)
{
int i, address;
u8 data;
@@ -953,7 +953,7 @@ static inline u32 eeprom_be4(const struct lanai_dev *lanai, int address)
}
/* Checksum/validate EEPROM contents */
-static int __devinit eeprom_validate(struct lanai_dev *lanai)
+static int eeprom_validate(struct lanai_dev *lanai)
{
int i, s;
u32 v;
@@ -1448,7 +1448,7 @@ static void vcc_rx_aal0(struct lanai_dev *lanai)
#include <linux/vmalloc.h>
#endif
-static int __devinit vcc_table_allocate(struct lanai_dev *lanai)
+static int vcc_table_allocate(struct lanai_dev *lanai)
{
#ifdef VCCTABLE_GETFREEPAGE
APRINTK((lanai->num_vci) * sizeof(struct lanai_vcc *) <= PAGE_SIZE,
@@ -1588,7 +1588,7 @@ static void lanai_reset(struct lanai_dev *lanai)
/*
* Allocate service buffer and tell card about it
*/
-static int __devinit service_buffer_allocate(struct lanai_dev *lanai)
+static int service_buffer_allocate(struct lanai_dev *lanai)
{
lanai_buf_allocate(&lanai->service, SERVICE_ENTRIES * 4, 8,
lanai->pci);
@@ -1942,7 +1942,7 @@ static int check_board_id_and_rev(const char *name, u32 val, int *revp)
/* -------------------- PCI INITIALIZATION/SHUTDOWN: */
-static int __devinit lanai_pci_start(struct lanai_dev *lanai)
+static int lanai_pci_start(struct lanai_dev *lanai)
{
struct pci_dev *pci = lanai->pci;
int result;
@@ -2123,7 +2123,7 @@ static inline void lanai_cbr_shutdown(struct lanai_dev *lanai)
/* -------------------- OPERATIONS: */
/* setup a newly detected device */
-static int __devinit lanai_dev_open(struct atm_dev *atmdev)
+static int lanai_dev_open(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
unsigned long raw_base;
@@ -2566,8 +2566,8 @@ static const struct atmdev_ops ops = {
};
/* initialize one probed card */
-static int __devinit lanai_init_one(struct pci_dev *pci,
- const struct pci_device_id *ident)
+static int lanai_init_one(struct pci_dev *pci,
+ const struct pci_device_id *ident)
{
struct lanai_dev *lanai;
struct atm_dev *atmdev;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 1c70c45fa044..ed1d2b7f923b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -121,8 +121,8 @@
static u32 ns_read_sram(ns_dev * card, u32 sram_address);
static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
int count);
-static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
-static void __devinit ns_init_card_error(ns_dev * card, int error);
+static int ns_init_card(int i, struct pci_dev *pcidev);
+static void ns_init_card_error(ns_dev * card, int error);
static scq_info *get_scq(ns_dev *card, int size, u32 scd);
static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
static void push_rxbufs(ns_dev *, struct sk_buff *);
@@ -180,8 +180,8 @@ MODULE_LICENSE("GPL");
/* Functions */
-static int __devinit nicstar_init_one(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int nicstar_init_one(struct pci_dev *pcidev,
+ const struct pci_device_id *ent)
{
static int index = -1;
unsigned int error;
@@ -200,7 +200,7 @@ err_out:
return -ENODEV;
}
-static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
+static void nicstar_remove_one(struct pci_dev *pcidev)
{
int i, j;
ns_dev *card = pci_get_drvdata(pcidev);
@@ -262,7 +262,7 @@ static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
kfree(card);
}
-static struct pci_device_id nicstar_pci_tbl[] __devinitdata = {
+static struct pci_device_id nicstar_pci_tbl[] = {
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
{0,} /* terminate list */
};
@@ -273,7 +273,7 @@ static struct pci_driver nicstar_driver = {
.name = "nicstar",
.id_table = nicstar_pci_tbl,
.probe = nicstar_init_one,
- .remove = __devexit_p(nicstar_remove_one),
+ .remove = nicstar_remove_one,
};
static int __init nicstar_init(void)
@@ -351,7 +351,7 @@ static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
spin_unlock_irqrestore(&card->res_lock, flags);
}
-static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
+static int ns_init_card(int i, struct pci_dev *pcidev)
{
int j;
struct ns_dev *card = NULL;
@@ -821,7 +821,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
-static void __devinit ns_init_card_error(ns_dev * card, int error)
+static void ns_init_card_error(ns_dev *card, int error)
{
if (error >= 17) {
writel(0x00000000, card->membase + CFG);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c909b7b7d5f1..0474a89170b9 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -42,7 +42,8 @@
#include <linux/swab.h>
#include <linux/slab.h>
-#define VERSION "0.07"
+#define VERSION "1.04"
+#define DRIVER_VERSION 0x01
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
@@ -56,16 +57,21 @@
#define FLASH_BUSY 0x60
#define FPGA_MODE 0x5C
#define FLASH_MODE 0x58
+#define GPIO_STATUS 0x54
+#define DRIVER_VER 0x50
#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 2048
#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
-#define FPGA_PAGE 528 /* FPGA flash page size*/
-#define SOLOS_PAGE 512 /* Solos flash page size*/
-#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
-#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
+/* Old boards use ATMEL AD45DB161D flash */
+#define ATMEL_FPGA_PAGE 528 /* FPGA flash page size*/
+#define ATMEL_SOLOS_PAGE 512 /* Solos flash page size*/
+#define ATMEL_FPGA_BLOCK (ATMEL_FPGA_PAGE * 8) /* FPGA block size*/
+#define ATMEL_SOLOS_BLOCK (ATMEL_SOLOS_PAGE * 8) /* Solos block size*/
+/* Current boards use M25P/M25PE SPI flash */
+#define SPI_FLASH_BLOCK (256 * 64)
#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
@@ -122,11 +128,14 @@ struct solos_card {
struct sk_buff_head cli_queue[4];
struct sk_buff *tx_skb[4];
struct sk_buff *rx_skb[4];
+ unsigned char *dma_bounce;
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
+ int dma_alignment;
int fpga_version;
int buffer_size;
+ int atmel_flash;
};
@@ -451,7 +460,6 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
len = skb->len;
memcpy(buf, skb->data, len);
- dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return len;
@@ -498,6 +506,78 @@ static ssize_t console_store(struct device *dev, struct device_attribute *attr,
return err?:count;
}
+struct geos_gpio_attr {
+ struct device_attribute attr;
+ int offset;
+};
+
+#define SOLOS_GPIO_ATTR(_name, _mode, _show, _store, _offset) \
+ struct geos_gpio_attr gpio_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .offset = _offset }
+
+static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ if (count != 1 && (count != 2 || buf[1] != '\n'))
+ return -EINVAL;
+
+ spin_lock_irq(&card->param_queue_lock);
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ if (buf[0] == '1') {
+ data32 |= 1 << gattr->offset;
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else if (buf[0] == '0') {
+ data32 &= ~(1 << gattr->offset);
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else {
+ count = -EINVAL;
+ }
+ spin_unlock_irq(&card->param_queue_lock);
+ return count;
+}
+
+static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ data32 = (data32 >> gattr->offset) & 1;
+
+ return sprintf(buf, "%d\n", data32);
+}
+
+static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ switch (gattr->offset) {
+ case 0:
+ /* HardwareVersion */
+ data32 = data32 & 0x1F;
+ break;
+ case 1:
+ /* HardwareVariant */
+ data32 = (data32 >> 5) & 0x0F;
+ break;
+ }
+ return sprintf(buf, "%d\n", data32);
+}
+
static DEVICE_ATTR(console, 0644, console_show, console_store);
@@ -506,6 +586,14 @@ static DEVICE_ATTR(console, 0644, console_show, console_store);
#include "solos-attrlist.c"
+static SOLOS_GPIO_ATTR(GPIO1, 0644, geos_gpio_show, geos_gpio_store, 9);
+static SOLOS_GPIO_ATTR(GPIO2, 0644, geos_gpio_show, geos_gpio_store, 10);
+static SOLOS_GPIO_ATTR(GPIO3, 0644, geos_gpio_show, geos_gpio_store, 11);
+static SOLOS_GPIO_ATTR(GPIO4, 0644, geos_gpio_show, geos_gpio_store, 12);
+static SOLOS_GPIO_ATTR(GPIO5, 0644, geos_gpio_show, geos_gpio_store, 13);
+static SOLOS_GPIO_ATTR(PushButton, 0444, geos_gpio_show, NULL, 14);
+static SOLOS_GPIO_ATTR(HardwareVersion, 0444, hardware_show, NULL, 0);
+static SOLOS_GPIO_ATTR(HardwareVariant, 0444, hardware_show, NULL, 1);
#undef SOLOS_ATTR_RO
#undef SOLOS_ATTR_RW
@@ -522,6 +610,23 @@ static struct attribute_group solos_attr_group = {
.name = "parameters",
};
+static struct attribute *gpio_attrs[] = {
+ &gpio_attr_GPIO1.attr.attr,
+ &gpio_attr_GPIO2.attr.attr,
+ &gpio_attr_GPIO3.attr.attr,
+ &gpio_attr_GPIO4.attr.attr,
+ &gpio_attr_GPIO5.attr.attr,
+ &gpio_attr_PushButton.attr.attr,
+ &gpio_attr_HardwareVersion.attr.attr,
+ &gpio_attr_HardwareVariant.attr.attr,
+ NULL
+};
+
+static struct attribute_group gpio_attr_group = {
+ .attrs = gpio_attrs,
+ .name = "gpio",
+};
+
static int flash_upgrade(struct solos_card *card, int chip)
{
const struct firmware *fw;
@@ -533,16 +638,25 @@ static int flash_upgrade(struct solos_card *card, int chip)
switch (chip) {
case 0:
fw_name = "solos-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 1:
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 2:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-db-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -552,7 +666,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
case 3:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -568,6 +685,9 @@ static int flash_upgrade(struct solos_card *card, int chip)
dev_info(&card->dev->dev, "Flash upgrade starting\n");
+ /* New FPGAs require driver version before permitting flash upgrades */
+ iowrite32(DRIVER_VERSION, card->config_regs + DRIVER_VER);
+
numblocks = fw->size / blocksize;
dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
@@ -597,9 +717,13 @@ static int flash_upgrade(struct solos_card *card, int chip)
/* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
- /* Copy block to buffer, swapping each 16 bits */
+ /* Copy block to buffer, swapping each 16 bits for Atmel flash */
for(i = 0; i < blocksize; i += 4) {
- uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
+ uint32_t word;
+ if (card->atmel_flash)
+ word = swahb32p((uint32_t *)(fw->data + offset + i));
+ else
+ word = *(uint32_t *)(fw->data + offset + i);
if(card->fpga_version > LEGACY_BUFFERS)
iowrite32(word, FLASH_BUF + i);
else
@@ -961,7 +1085,12 @@ static uint32_t fpga_tx(struct solos_card *card)
tx_started |= 1 << port;
oldskb = skb; /* We're done with this skb already */
} else if (skb && card->using_dma) {
- SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ unsigned char *data = skb->data;
+ if ((unsigned long)data & card->dma_alignment) {
+ data = card->dma_bounce + (BUF_SIZE * port);
+ memcpy(data, skb->data, skb->len);
+ }
+ SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
skb->len, PCI_DMA_TODEVICE);
card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
@@ -1133,18 +1262,33 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
+ /* Stopped using Atmel flash after 0.03-38 */
+ if (fpga_ver < 39)
+ card->atmel_flash = 1;
+ else
+ card->atmel_flash = 0;
+
+ data32 = ioread32(card->config_regs + PORTS);
+ card->nr_ports = (data32 & 0x000000FF);
+
if (card->fpga_version >= DMA_SUPPORTED) {
pci_set_master(dev);
card->using_dma = 1;
+ if (1) { /* All known FPGA versions so far */
+ card->dma_alignment = 3;
+ card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
+ if (!card->dma_bounce) {
+ dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
+ /* Fallback to MMIO doesn't work */
+ goto out_unmap_both;
+ }
+ }
} else {
card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
- data32 = ioread32(card->config_regs + PORTS);
- card->nr_ports = (data32 & 0x000000FF);
-
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
@@ -1179,6 +1323,10 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto out_free_irq;
+ if (card->fpga_version >= DMA_SUPPORTED &&
+ sysfs_create_group(&card->dev->dev.kobj, &gpio_attr_group))
+ dev_err(&card->dev->dev, "Could not register parameter group for GPIOs\n");
+
return 0;
out_free_irq:
@@ -1187,6 +1335,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
tasklet_kill(&card->tlet);
out_unmap_both:
+ kfree(card->dma_bounce);
pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
@@ -1289,11 +1438,16 @@ static void fpga_remove(struct pci_dev *dev)
iowrite32(1, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
+ if (card->fpga_version >= DMA_SUPPORTED)
+ sysfs_remove_group(&card->dev->dev.kobj, &gpio_attr_group);
+
atm_remove(card);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
+ kfree(card->dma_bounce);
+
/* Release device from reset */
iowrite32(0, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
@@ -1308,7 +1462,7 @@ static void fpga_remove(struct pci_dev *dev)
kfree(card);
}
-static struct pci_device_id fpga_pci_tbl[] __devinitdata = {
+static struct pci_device_id fpga_pci_tbl[] = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index abe4e20b0766..969c3c29000c 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1094,8 +1094,8 @@ static irqreturn_t zatm_int(int irq,void *dev_id)
/*----------------------------- (E)EPROM access -----------------------------*/
-static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value,
- unsigned short cmd)
+static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
+ unsigned short cmd)
{
int error;
@@ -1105,8 +1105,7 @@ static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value,
}
-static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev,
- unsigned short cmd)
+static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
{
unsigned int value;
int error;
@@ -1118,8 +1117,8 @@ static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev,
}
-static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev,
- unsigned long data,int bits,unsigned short cmd)
+static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
+ int bits, unsigned short cmd)
{
unsigned long value;
int i;
@@ -1133,8 +1132,8 @@ static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev,
}
-static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev,
- unsigned char *byte,unsigned short cmd)
+static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
+ unsigned short cmd)
{
int i;
@@ -1149,8 +1148,8 @@ static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev,
}
-static unsigned char __devinit eprom_try_esi(struct atm_dev *dev,
- unsigned short cmd,int offset,int swap)
+static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
+ int offset, int swap)
{
unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev;
@@ -1170,7 +1169,7 @@ static unsigned char __devinit eprom_try_esi(struct atm_dev *dev,
}
-static void __devinit eprom_get_esi(struct atm_dev *dev)
+static void eprom_get_esi(struct atm_dev *dev)
{
if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
(void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
@@ -1180,7 +1179,7 @@ static void __devinit eprom_get_esi(struct atm_dev *dev)
/*--------------------------------- entries ---------------------------------*/
-static int __devinit zatm_init(struct atm_dev *dev)
+static int zatm_init(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev;
struct pci_dev *pci_dev;
@@ -1257,7 +1256,7 @@ static int __devinit zatm_init(struct atm_dev *dev)
}
-static int __devinit zatm_start(struct atm_dev *dev)
+static int zatm_start(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev = ZATM_DEV(dev);
struct pci_dev *pdev = zatm_dev->pci_dev;
@@ -1584,8 +1583,8 @@ static const struct atmdev_ops ops = {
.change_qos = zatm_change_qos,
};
-static int __devinit zatm_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int zatm_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *dev;
struct zatm_dev *zatm_dev;
@@ -1636,7 +1635,7 @@ out_free:
MODULE_LICENSE("GPL");
-static struct pci_device_id zatm_pci_tbl[] __devinitdata = {
+static struct pci_device_id zatm_pci_tbl[] = {
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
{ 0, }
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 5ad3bad2b0a5..d585735430dd 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
+static struct fb_var_screeninfo cfag12864bfb_var = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
@@ -80,7 +80,7 @@ static struct fb_ops cfag12864bfb_ops = {
.fb_mmap = cfag12864bfb_mmap,
};
-static int __devinit cfag12864bfb_probe(struct platform_device *device)
+static int cfag12864bfb_probe(struct platform_device *device)
{
int ret = -EINVAL;
struct fb_info *info = framebuffer_alloc(0, &device->dev);
@@ -114,7 +114,7 @@ none:
return ret;
}
-static int __devexit cfag12864bfb_remove(struct platform_device *device)
+static int cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -128,7 +128,7 @@ static int __devexit cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = __devexit_p(cfag12864bfb_remove),
+ .remove = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c8b453939da2..07abd9d76f7f 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -145,6 +145,17 @@ config EXTRA_FIRMWARE_DIR
this option you can point it elsewhere, such as /lib/firmware/ or
some other directory containing the firmware files.
+config FW_LOADER_USER_HELPER
+ bool "Fallback user-helper invocation for firmware loading"
+ depends on FW_LOADER
+ default y
+ help
+ This option enables / disables the invocation of user-helper
+ (e.g. udev) for loading firmware files as a fallback after the
+ direct file loading in kernel fails. The user-mode helper is
+ no longer required unless you have a special firmware file that
+ resides in a non-standard path.
+
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
depends on DEBUG_KERNEL
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..4e22ce3ed73d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 24eb07868344..519865b53f76 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -290,7 +290,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
struct device *dev;
int error = 0;
- if (!bus)
+ if (!bus || !bus->p)
return -EINVAL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -324,7 +324,7 @@ struct device *bus_find_device(struct bus_type *bus,
struct klist_iter i;
struct device *dev;
- if (!bus)
+ if (!bus || !bus->p)
return NULL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -700,12 +700,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_unregister;
+ klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_unregister;
}
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
module_add_driver(drv->owner, drv);
error = driver_create_file(drv, &driver_attr_uevent);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 03243d4002fd..3ce845471327 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
- void *data,
- int (*match)(struct device *, void *))
+ const void *data,
+ int (*match)(struct device *, const void *))
{
struct class_dev_iter iter;
struct device *dev;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 417913974df8..56536f4b0f6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -171,6 +171,27 @@ ssize_t device_show_int(struct device *dev,
}
EXPORT_SYMBOL_GPL(device_show_int);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ if (strtobool(buf, ea->var) < 0)
+ return -EINVAL;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -1596,9 +1617,9 @@ struct device *device_create(struct class *class, struct device *parent,
}
EXPORT_SYMBOL_GPL(device_create);
-static int __match_devt(struct device *dev, void *data)
+static int __match_devt(struct device *dev, const void *data)
{
- dev_t *devt = data;
+ const dev_t *devt = data;
return dev->devt == *devt;
}
@@ -1664,8 +1685,6 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
- char *old_class_name = NULL;
- char *new_class_name = NULL;
char *old_device_name = NULL;
int error;
@@ -1696,8 +1715,6 @@ int device_rename(struct device *dev, const char *new_name)
out:
put_device(dev);
- kfree(new_class_name);
- kfree(old_class_name);
kfree(old_device_name);
return error;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 63452943abd1..fb10728f6372 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
+ * on the linux-kernel list, you have been warned.
*/
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e3bbed8a617c..bb5645ea0282 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
#include "base.h"
#include "power/power.h"
@@ -172,6 +173,8 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
+ /* Sort as many dependencies as possible before exiting initcalls */
+ flush_workqueue(deferred_wq);
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -269,6 +272,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
WARN_ON(!list_empty(&dev->devres_head));
dev->driver = drv;
+
+ /* If using pinctrl, bind pins now before probing */
+ ret = pinctrl_bind_pins(dev);
+ if (ret)
+ goto probe_failed;
+
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 147d1a4dd269..17cf7cad601e 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -148,7 +148,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 460e22dee36d..ff5b745c4705 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -134,15 +134,14 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int error, fd;
+ int fd;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
+ fd = get_unused_fd_flags(flags);
+ if (fd < 0)
+ return fd;
fd_install(fd, dmabuf->file);
@@ -298,6 +297,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
+ might_sleep();
+
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3fbedc75e7c5..0ce39a33b3c2 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -218,6 +218,8 @@ void dmam_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dmam_release_declared_memory);
+#endif
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -236,8 +238,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_common_get_sgtable);
-#endif
-
/*
* Create userspace mapping for the DMA-coherent memory.
*/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d81460309182..4a223fedcd73 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -88,11 +88,6 @@ enum {
FW_STATUS_ABORT,
};
-enum fw_buf_fmt {
- VMALLOC_BUF, /* used in direct loading */
- PAGE_BUF, /* used in loading via userspace */
-};
-
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -128,12 +123,14 @@ struct firmware_buf {
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
- enum fw_buf_fmt fmt;
void *data;
size_t size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool is_paged_buf;
struct page **pages;
int nr_pages;
int page_array_size;
+#endif
char fw_id[];
};
@@ -142,14 +139,6 @@ struct fw_cache_entry {
char name[];
};
-struct firmware_priv {
- struct delayed_work timeout_work;
- bool nowait;
- struct device dev;
- struct firmware_buf *buf;
- struct firmware *fw;
-};
-
struct fw_name_devm {
unsigned long magic;
char name[];
@@ -182,7 +171,6 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
- buf->fmt = VMALLOC_BUF;
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -240,7 +228,6 @@ static void __fw_free_buf(struct kref *ref)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
- int i;
pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
__func__, buf->fw_id, buf, buf->data,
@@ -249,13 +236,15 @@ static void __fw_free_buf(struct kref *ref)
list_del(&buf->list);
spin_unlock(&fwc->lock);
-
- if (buf->fmt == PAGE_BUF) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ if (buf->is_paged_buf) {
+ int i;
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
kfree(buf->pages);
} else
+#endif
vfree(buf->data);
kfree(buf);
}
@@ -305,7 +294,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
- if (size < 0)
+ if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)
@@ -319,7 +308,8 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
return true;
}
-static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+static bool fw_get_filesystem_firmware(struct device *device,
+ struct firmware_buf *buf)
{
int i;
bool success = false;
@@ -343,9 +333,114 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
break;
}
__putname(path);
+
+ if (success) {
+ dev_dbg(device, "firmware: direct-loading firmware %s\n",
+ buf->fw_id);
+ mutex_lock(&fw_lock);
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ mutex_unlock(&fw_lock);
+ }
+
return success;
}
+/* firmware holds the ownership of pages */
+static void firmware_free_data(const struct firmware *fw)
+{
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
+ }
+ fw_free_buf(fw->priv);
+}
+
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+{
+ fw->priv = buf;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ fw->pages = buf->pages;
+#endif
+ fw->size = buf->size;
+ fw->data = buf->data;
+
+ pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
+ __func__, buf->fw_id, buf, buf->data,
+ (unsigned int)buf->size);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+ struct fw_name_devm *fwn = res;
+
+ if (fwn->magic == (unsigned long)&fw_cache)
+ pr_debug("%s: fw_name-%s devm-%p released\n",
+ __func__, fwn->name, res);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+
+ return (fwn->magic == (unsigned long)&fw_cache) &&
+ !strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+ const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = devres_find(dev, fw_name_devm_release,
+ fw_devm_match, (void *)name);
+ return fwn;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = fw_find_devm_name(dev, name);
+ if (fwn)
+ return 1;
+
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
+ strlen(name) + 1, GFP_KERNEL);
+ if (!fwn)
+ return -ENOMEM;
+
+ fwn->magic = (unsigned long)&fw_cache;
+ strcpy(fwn->name, name);
+ devres_add(dev, fwn);
+
+ return 0;
+}
+#else
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ return 0;
+}
+#endif
+
+
+/*
+ * user-mode helper code
+ */
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+struct firmware_priv {
+ struct delayed_work timeout_work;
+ bool nowait;
+ struct device dev;
+ struct firmware_buf *buf;
+ struct firmware *fw;
+};
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
@@ -359,6 +454,9 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
complete_all(&buf->completion);
}
+#define is_fw_load_aborted(buf) \
+ test_bit(FW_STATUS_ABORT, &(buf)->status)
+
static ssize_t firmware_timeout_show(struct class *class,
struct class_attribute *attr,
char *buf)
@@ -435,17 +533,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* firmware holds the ownership of pages */
-static void firmware_free_data(const struct firmware *fw)
-{
- /* Loaded directly? */
- if (!fw->priv) {
- vfree(fw->data);
- return;
- }
- fw_free_buf(fw->priv);
-}
-
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -454,7 +541,7 @@ static void firmware_free_data(const struct firmware *fw)
/* one pages buffer should be mapped/unmapped only once */
static int fw_map_pages_buf(struct firmware_buf *buf)
{
- if (buf->fmt != PAGE_BUF)
+ if (!buf->is_paged_buf)
return 0;
if (buf->data)
@@ -727,171 +814,16 @@ exit:
return fw_priv;
}
-/* store the pages buffer info firmware from buf */
-static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
-{
- fw->priv = buf;
- fw->pages = buf->pages;
- fw->size = buf->size;
- fw->data = buf->data;
-
- pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
- __func__, buf->fw_id, buf, buf->data,
- (unsigned int)buf->size);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static void fw_name_devm_release(struct device *dev, void *res)
-{
- struct fw_name_devm *fwn = res;
-
- if (fwn->magic == (unsigned long)&fw_cache)
- pr_debug("%s: fw_name-%s devm-%p released\n",
- __func__, fwn->name, res);
-}
-
-static int fw_devm_match(struct device *dev, void *res,
- void *match_data)
-{
- struct fw_name_devm *fwn = res;
-
- return (fwn->magic == (unsigned long)&fw_cache) &&
- !strcmp(fwn->name, match_data);
-}
-
-static struct fw_name_devm *fw_find_devm_name(struct device *dev,
- const char *name)
-{
- struct fw_name_devm *fwn;
-
- fwn = devres_find(dev, fw_name_devm_release,
- fw_devm_match, (void *)name);
- return fwn;
-}
-
-/* add firmware name into devres list */
-static int fw_add_devm_name(struct device *dev, const char *name)
-{
- struct fw_name_devm *fwn;
-
- fwn = fw_find_devm_name(dev, name);
- if (fwn)
- return 1;
-
- fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
- strlen(name) + 1, GFP_KERNEL);
- if (!fwn)
- return -ENOMEM;
-
- fwn->magic = (unsigned long)&fw_cache;
- strcpy(fwn->name, name);
- devres_add(dev, fwn);
-
- return 0;
-}
-#else
-static int fw_add_devm_name(struct device *dev, const char *name)
-{
- return 0;
-}
-#endif
-
-static void _request_firmware_cleanup(const struct firmware **firmware_p)
-{
- release_firmware(*firmware_p);
- *firmware_p = NULL;
-}
-
-static struct firmware_priv *
-_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
-{
- struct firmware *firmware;
- struct firmware_priv *fw_priv = NULL;
- struct firmware_buf *buf;
- int ret;
-
- if (!firmware_p)
- return ERR_PTR(-EINVAL);
-
- *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
- if (!firmware) {
- dev_err(device, "%s: kmalloc(struct firmware) failed\n",
- __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
- return NULL;
- }
-
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
- if (!ret)
- fw_priv = fw_create_instance(firmware, name, device,
- uevent, nowait);
-
- if (IS_ERR(fw_priv) || ret < 0) {
- kfree(firmware);
- *firmware_p = NULL;
- return ERR_PTR(-ENOMEM);
- } else if (fw_priv) {
- fw_priv->buf = buf;
-
- /*
- * bind with 'buf' now to avoid warning in failure path
- * of requesting firmware.
- */
- firmware->priv = buf;
- return fw_priv;
- }
-
- /* share the cached buf, which is inprogessing or completed */
- check_status:
- mutex_lock(&fw_lock);
- if (test_bit(FW_STATUS_ABORT, &buf->status)) {
- fw_priv = ERR_PTR(-ENOENT);
- firmware->priv = buf;
- _request_firmware_cleanup(firmware_p);
- goto exit;
- } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
- fw_priv = NULL;
- fw_set_page_data(buf, firmware);
- goto exit;
- }
- mutex_unlock(&fw_lock);
- wait_for_completion(&buf->completion);
- goto check_status;
-
-exit:
- mutex_unlock(&fw_lock);
- return fw_priv;
-}
-
+/* load a firmware via user helper */
static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
struct firmware_buf *buf = fw_priv->buf;
- struct firmware_cache *fwc = &fw_cache;
- int direct_load = 0;
-
- /* try direct loading from fs first */
- if (fw_get_filesystem_firmware(buf)) {
- dev_dbg(f_dev->parent, "firmware: direct-loading"
- " firmware %s\n", buf->fw_id);
-
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- mutex_unlock(&fw_lock);
- complete_all(&buf->completion);
- direct_load = 1;
- goto handle_fw;
- }
/* fall back on userspace loading */
- buf->fmt = PAGE_BUF;
+ buf->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
@@ -929,47 +861,196 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
cancel_delayed_work_sync(&fw_priv->timeout_work);
-handle_fw:
+ fw_priv->buf = NULL;
+
+ device_remove_file(f_dev, &dev_attr_loading);
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+ return retval;
+}
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+ const char *name, struct device *device,
+ bool uevent, bool nowait, long timeout)
+{
+ struct firmware_priv *fw_priv;
+
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv))
+ return PTR_ERR(fw_priv);
+
+ fw_priv->buf = firmware->priv;
+ return _request_firmware_load(fw_priv, uevent, timeout);
+}
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int
+fw_load_from_user_helper(struct firmware *firmware, const char *name,
+ struct device *device, bool uevent, bool nowait,
+ long timeout)
+{
+ return -ENOENT;
+}
+
+/* No abort during direct loading */
+#define is_fw_load_aborted(buf) false
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+
+/* wait until the shared firmware_buf becomes ready (or error) */
+static int sync_cached_firmware_buf(struct firmware_buf *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&fw_lock);
+ while (!test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (is_fw_load_aborted(buf)) {
+ ret = -ENOENT;
+ break;
+ }
+ mutex_unlock(&fw_lock);
+ wait_for_completion(&buf->completion);
+ mutex_lock(&fw_lock);
+ }
+ mutex_unlock(&fw_lock);
+ return ret;
+}
+
+/* prepare firmware and firmware_buf structs;
+ * return 0 if a firmware is already assigned, 1 if need to load one,
+ * or a negative error code
+ */
+static int
+_request_firmware_prepare(struct firmware **firmware_p, const char *name,
+ struct device *device)
+{
+ struct firmware *firmware;
+ struct firmware_buf *buf;
+ int ret;
+
+ *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
+ if (!firmware) {
+ dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+ return 0; /* assigned */
+ }
+
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+
+ /*
+ * bind with 'buf' now to avoid warning in failure path
+ * of requesting firmware.
+ */
+ firmware->priv = buf;
+
+ if (ret > 0) {
+ ret = sync_cached_firmware_buf(buf);
+ if (!ret) {
+ fw_set_page_data(buf, firmware);
+ return 0; /* assigned */
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+ return 1; /* need to load */
+}
+
+static int assign_firmware_buf(struct firmware *fw, struct device *device)
+{
+ struct firmware_buf *buf = fw->priv;
+
mutex_lock(&fw_lock);
- if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
- retval = -ENOENT;
+ if (!buf->size || is_fw_load_aborted(buf)) {
+ mutex_unlock(&fw_lock);
+ return -ENOENT;
+ }
/*
* add firmware name into devres list so that we can auto cache
* and uncache firmware for device.
*
- * f_dev->parent may has been deleted already, but the problem
+ * device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (!retval && f_dev->parent)
- fw_add_devm_name(f_dev->parent, buf->fw_id);
+ if (device)
+ fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (!retval && fwc->state == FW_LOADER_START_CACHE) {
+ if (buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
/* pass the pages buffer to driver at the last minute */
- fw_set_page_data(buf, fw_priv->fw);
-
- fw_priv->buf = NULL;
+ fw_set_page_data(buf, fw);
mutex_unlock(&fw_lock);
+ return 0;
+}
- if (direct_load)
- goto err_put_dev;
+/* called from request_firmware() and request_firmware_work_func() */
+static int
+_request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device, bool uevent, bool nowait)
+{
+ struct firmware *fw;
+ long timeout;
+ int ret;
- device_remove_file(f_dev, &dev_attr_loading);
-err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
- return retval;
+ if (!firmware_p)
+ return -EINVAL;
+
+ ret = _request_firmware_prepare(&fw, name, device);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+
+ ret = 0;
+ timeout = firmware_loading_timeout();
+ if (nowait) {
+ timeout = usermodehelper_read_lock_wait(timeout);
+ if (!timeout) {
+ dev_dbg(device, "firmware: %s loading timed out\n",
+ name);
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n",
+ name);
+ goto out;
+ }
+ }
+
+ if (!fw_get_filesystem_firmware(device, fw->priv))
+ ret = fw_load_from_user_helper(fw, name, device,
+ uevent, nowait, timeout);
+ if (!ret)
+ ret = assign_firmware_buf(fw, device);
+
+ usermodehelper_read_unlock();
+
+ out:
+ if (ret < 0) {
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ *firmware_p = fw;
+ return ret;
}
/**
@@ -996,26 +1077,7 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- struct firmware_priv *fw_priv;
- int ret;
-
- fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
- false);
- if (IS_ERR_OR_NULL(fw_priv))
- return PTR_RET(fw_priv);
-
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- } else {
- ret = _request_firmware_load(fw_priv, true,
- firmware_loading_timeout());
- usermodehelper_read_unlock();
- }
- if (ret)
- _request_firmware_cleanup(firmware_p);
-
- return ret;
+ return _request_firmware(firmware_p, name, device, true, false);
}
/**
@@ -1046,33 +1108,13 @@ static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work;
const struct firmware *fw;
- struct firmware_priv *fw_priv;
- long timeout;
- int ret;
fw_work = container_of(work, struct firmware_work, work);
- fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
- if (IS_ERR_OR_NULL(fw_priv)) {
- ret = PTR_RET(fw_priv);
- goto out;
- }
-
- timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
- if (timeout) {
- ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
- usermodehelper_read_unlock();
- } else {
- dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
- fw_work->name);
- ret = -EAGAIN;
- }
- if (ret)
- _request_firmware_cleanup(&fw);
- out:
+ _request_firmware(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
- put_device(fw_work->device);
+ put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
kfree(fw_work);
@@ -1474,7 +1516,11 @@ static void __init fw_cache_init(void)
static int __init firmware_class_init(void)
{
fw_cache_init();
+#ifdef CONFIG_FW_LOADER_USER_HELPER
return class_register(&firmware_class);
+#else
+ return 0;
+#endif
}
static void __exit firmware_class_exit(void)
@@ -1483,7 +1529,9 @@ static void __exit firmware_class_exit(void)
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
class_unregister(&firmware_class);
+#endif
}
fs_initcall(firmware_class_init);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 987604d56c83..a51007b79032 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -494,8 +494,8 @@ store_hard_offline_page(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
@@ -693,6 +693,12 @@ int offline_memory_block(struct memory_block *mem)
return ret;
}
+/* return true if the memory block is offlined, otherwise, return false */
+bool is_memblock_offlined(struct memory_block *mem)
+{
+ return mem->state == MEM_OFFLINE;
+}
+
/*
* Initialize the sysfs support for memory devices...
*/
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000000..67a274e86727
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,69 @@
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+ int ret;
+
+ dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+ if (!dev->pins)
+ return -ENOMEM;
+
+ dev->pins->p = devm_pinctrl_get(dev);
+ if (IS_ERR(dev->pins->p)) {
+ dev_dbg(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(dev->pins->p);
+ goto cleanup_alloc;
+ }
+
+ dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins->default_state)) {
+ dev_dbg(dev, "no default pinctrl state\n");
+ ret = 0;
+ goto cleanup_get;
+ }
+
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+ if (ret) {
+ dev_dbg(dev, "failed to activate default pinctrl state\n");
+ goto cleanup_get;
+ }
+
+ return 0;
+
+ /*
+ * If no pinctrl handle or default state was found for this device,
+ * let's explicitly free the pin container in the device, there is
+ * no point in keeping it around.
+ */
+cleanup_get:
+ devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+ devm_kfree(dev, dev->pins);
+ dev->pins = NULL;
+
+ /* Only return deferrals */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
+}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index acc3a8ded29d..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
*/
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
- if (!work_pending(&genpd->power_off_work))
- queue_work(pm_wq, &genpd->power_off_work);
+ queue_work(pm_wq, &genpd->power_off_work);
}
/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a3c1404c7933..2b7f77d3fcb0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -513,6 +513,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
Out:
TRACE_RESUME(error);
+
+ pm_runtime_enable(dev);
return error;
}
@@ -589,8 +591,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
- pm_runtime_enable(dev);
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -930,6 +930,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ __pm_runtime_disable(dev, false);
+
if (dev->power.syscore)
return 0;
@@ -1133,11 +1135,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
-
if (error)
async_error = error;
- else if (dev->power.is_suspended)
- __pm_runtime_disable(dev, false);
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 50b2831e027d..32ee0fc7ea54 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -162,7 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -192,7 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL(opp_get_freq);
+EXPORT_SYMBOL_GPL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -225,7 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -276,7 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -323,7 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -374,7 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -568,7 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL(opp_enable);
+EXPORT_SYMBOL_GPL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -590,7 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL(opp_disable);
+EXPORT_SYMBOL_GPL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
@@ -661,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
/**
* opp_free_cpufreq_table() - free the cpufreq table
@@ -678,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
+EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
@@ -738,4 +740,5 @@ int of_init_opp_table(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(of_init_opp_table);
#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index ff46387f5308..3d4d1f8aac5c 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -91,6 +91,7 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
@@ -542,19 +543,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req, s32 value)
{
struct device *ancestor = dev->parent;
- int error = -ENODEV;
+ int ret = -ENODEV;
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
if (ancestor)
- error = dev_pm_qos_add_request(ancestor, req,
- DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(ancestor, req,
+ DEV_PM_QOS_LATENCY, value);
- if (error < 0)
+ if (ret < 0)
req->dev = NULL;
- return error;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3148b10dc2e5..1244930e3d7a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
if (!cb)
return -ENOSYS;
- retval = __rpm_callback(cb, dev);
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
+ /*
+ * active wakeup source should bring the system
+ * out of PM_SUSPEND_FREEZE state
+ */
+ freeze_wake();
+
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5e75d1b683e2..cf129980abd0 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 401d1919635a..5a22bd33ce3d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/fs.h>
#include <linux/list.h>
+#include <linux/wait.h>
struct regmap;
struct regcache_ops;
@@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache {
off_t min;
off_t max;
unsigned int base_reg;
+ unsigned int max_reg;
};
struct regmap_format {
@@ -39,6 +41,13 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
+struct regmap_async {
+ struct list_head list;
+ struct work_struct cleanup;
+ struct regmap *map;
+ void *work_buf;
+};
+
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
@@ -53,6 +62,11 @@ struct regmap {
void *bus_context;
const char *name;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ int async_ret;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
const char *debugfs_name;
@@ -74,6 +88,11 @@ struct regmap {
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+ bool defer_caching;
+
u8 read_flag_mask;
u8 write_flag_mask;
@@ -175,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000000..d9762e41959b
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,72 @@
+/*
+ * Register cache access API - flat caching support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+static int regcache_flat_init(struct regmap *map)
+{
+ int i;
+ unsigned int *cache;
+
+ map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
+ GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ cache = map->cache;
+
+ for (i = 0; i < map->num_reg_defaults; i++)
+ cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+
+ return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ unsigned int *cache = map->cache;
+
+ *value = cache[reg];
+
+ return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ unsigned int *cache = map->cache;
+
+ cache[reg] = value;
+
+ return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+ .type = REGCACHE_FLAT,
+ .name = "flat",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .read = regcache_flat_read,
+ .write = regcache_flat_write,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 835883bda977..e69ff3e4742c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -22,6 +22,7 @@
static const struct regcache_ops *cache_types[] = {
&regcache_rbtree_ops,
&regcache_lzo_ops,
+ &regcache_flat_ops,
};
static int regcache_hw_init(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 07aad786f817..78d5f20c5f5b 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
@@ -68,6 +81,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
unsigned int i, ret;
+ unsigned int fpos_offset;
+ unsigned int reg_offset;
/*
* If we don't have a cache build one so we don't have to do a
@@ -80,6 +95,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
regmap_precious(map, i)) {
if (c) {
c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
@@ -91,8 +109,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- break;
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ return base;
+ }
c->min = p;
c->base_reg = i;
}
@@ -101,19 +121,53 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
}
}
- /* Find the relevant block */
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ ret = base;
+
+ /* Find the relevant block:offset */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
- if (*pos >= c->min && *pos <= c->max) {
- *pos = c->min;
- return c->base_reg;
+ if (from >= c->min && from <= c->max) {
+ fpos_offset = from - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ *pos = c->min + (reg_offset * map->debugfs_tot_len);
+ return c->base_reg + reg_offset;
}
- ret = c->max;
+ *pos = c->max;
+ ret = c->max_reg;
}
return ret;
}
+static inline void regmap_calc_tot_len(struct regmap *map,
+ void *buf, size_t count)
+{
+ /* Calculate the length of a fixed format */
+ if (!map->debugfs_tot_len) {
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+ buf, count);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+ }
+}
+
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -132,14 +186,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (!buf)
return -ENOMEM;
- /* Calculate the length of a fixed format */
- if (!map->debugfs_tot_len) {
- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
- buf, count);
- map->debugfs_val_len = 2 * map->format.val_bytes;
- map->debugfs_tot_len = map->debugfs_reg_len +
- map->debugfs_val_len + 3; /* : \n */
- }
+ regmap_calc_tot_len(map, buf, count);
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
@@ -154,7 +201,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos + 1 + map->debugfs_tot_len >= count)
+ if (buf_pos + map->debugfs_tot_len > count)
break;
/* Format the register */
@@ -387,16 +434,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- struct regmap_debugfs_off_cache *c;
-
debugfs_remove_recursive(map->debugfs);
- while (!list_empty(&map->debugfs_off_cache)) {
- c = list_first_entry(&map->debugfs_off_cache,
- struct regmap_debugfs_off_cache,
- list);
- list_del(&c->list);
- kfree(c);
- }
+ regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5972ad958544..4706c63d0bc6 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
int irq;
int wake_count;
+ void *status_reg_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
+
+ reg = d->chip->wake_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ if (d->wake_buf) {
+ if (d->chip->wake_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ ~d->wake_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to sync wakes in %x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- if (!d->chip->wake_base)
- return -EINVAL;
-
if (on) {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- &= ~irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
d->wake_count++;
} else {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- |= irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
d->wake_count--;
}
@@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
/*
- * Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
- * interrupt. We assume that typically few of the interrupts
- * will fire simultaneously so don't worry about overhead from
- * doing a write per register.
+ * Read in the statuses, using a single bulk read if possible
+ * in order to reduce the I/O overheads.
*/
- for (i = 0; i < data->chip->num_regs; i++) {
- ret = regmap_read(map, chip->status_base + (i * map->reg_stride
- * data->irq_reg_stride),
- &data->status_buf[i]);
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ data->irq_reg_stride == 1) {
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ BUG_ON(!data->status_reg_buf);
+
+ ret = regmap_bulk_read(map, chip->status_base,
+ data->status_reg_buf,
+ chip->num_regs);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
+ ret);
return IRQ_NONE;
}
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+ }
+
+ } else {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ ret = regmap_read(map, chip->status_base +
+ (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->status_buf[i]);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status: %d\n",
+ ret);
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+ return IRQ_NONE;
+ }
+ }
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
- if (!chip->wake_base) {
- d->irq_chip.irq_set_wake = NULL;
- d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
- IRQCHIP_SKIP_SET_WAKE;
- }
d->irq = irq;
d->map = map;
d->chip = chip;
@@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ d->irq_reg_stride == 1) {
+ d->status_reg_buf = kmalloc(map->format.val_bytes *
+ chip->num_regs, GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+ }
+
mutex_init(&d->lock);
for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +425,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->wake_buf[i] = d->mask_buf_def[i];
reg = chip->wake_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_update_bits(map, reg, d->wake_buf[i],
- d->wake_buf[i]);
+
+ if (chip->wake_invert)
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ 0);
+ else
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
@@ -401,6 +472,7 @@ err_alloc:
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
+ kfree(d->status_reg_buf);
kfree(d);
return ret;
}
@@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
kfree(d->status_buf);
kfree(d);
}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index f05fc74dd84a..98745dd77e8c 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -16,6 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -26,6 +27,7 @@
struct regmap_mmio_context {
void __iomem *regs;
unsigned val_bytes;
+ struct clk *clk;
};
static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
static void regmap_mmio_free_context(void *context)
{
+ struct regmap_mmio_context *ctx = context;
+
+ if (ctx->clk) {
+ clk_unprepare(ctx->clk);
+ clk_put(ctx->clk);
+ }
kfree(context);
}
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
int min_stride;
+ int ret;
if (config->reg_bits != 32)
return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
}
/**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio_clk(): Initialise register map with register clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_mmio);
+EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
/**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return devm_regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index ffa46a92ad33..4c506bd940f3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,6 +15,21 @@
#include <linux/init.h>
#include <linux/module.h>
+#include "internal.h"
+
+struct regmap_async_spi {
+ struct regmap_async core;
+ struct spi_message m;
+ struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+ struct regmap_async_spi *async = data;
+
+ regmap_async_complete_cb(&async->core, async->m.status);
+}
+
static int regmap_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
return spi_sync(spi, &m);
}
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ struct regmap_async_spi *async = container_of(a,
+ struct regmap_async_spi,
+ core);
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ async->t[0].tx_buf = reg;
+ async->t[0].len = reg_len;
+ async->t[1].tx_buf = val;
+ async->t[1].len = val_len;
+
+ spi_message_init(&async->m);
+ spi_message_add_tail(&async->t[0], &async->m);
+ spi_message_add_tail(&async->t[1], &async->m);
+
+ async->m.complete = regmap_spi_complete;
+ async->m.context = async;
+
+ return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ struct regmap_async_spi *async_spi;
+
+ async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+ if (!async_spi)
+ return NULL;
+
+ return &async_spi->core;
+}
+
static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
static struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 42d5cb0f503f..3d2367501fd0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/rbtree.h>
+#include <linux/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regmap.h>
@@ -34,6 +35,22 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val);
+
+static void async_cleanup(struct work_struct *work)
+{
+ struct regmap_async *async = container_of(work, struct regmap_async,
+ cleanup);
+
+ kfree(async->work_buf);
+ kfree(async);
+}
+
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -372,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
enum regmap_endian reg_endian, val_endian;
int i, j;
- if (!bus || !config)
+ if (!config)
goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -386,7 +403,8 @@ struct regmap *regmap_init(struct device *dev,
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
} else {
- if (bus->fast_io) {
+ if ((bus && bus->fast_io) ||
+ config->fast_io) {
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
@@ -423,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
map->cache_type = config->cache_type;
map->name = config->name;
+ spin_lock_init(&map->async_lock);
+ INIT_LIST_HEAD(&map->async_list);
+ init_waitqueue_head(&map->async_waitq);
+
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
- } else {
+ } else if (bus) {
map->read_flag_mask = bus->read_flag_mask;
}
+ if (!bus) {
+ map->reg_read = config->reg_read;
+ map->reg_write = config->reg_write;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
+ } else {
+ map->reg_read = _regmap_bus_read;
+ }
+
reg_endian = config->reg_format_endian;
if (reg_endian == REGMAP_ENDIAN_DEFAULT)
reg_endian = bus->reg_format_endian_default;
@@ -500,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 24:
+ if (reg_endian != REGMAP_ENDIAN_BIG)
+ goto err_map;
+ map->format.format_reg = regmap_format_24;
+ break;
+
case 32:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
@@ -575,6 +613,16 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ if (map->format.format_write) {
+ map->defer_caching = false;
+ map->reg_write = _regmap_bus_formatted_write;
+ } else if (map->format.format_val) {
+ map->defer_caching = true;
+ map->reg_write = _regmap_bus_raw_write;
+ }
+
+skip_format_initialization:
+
map->range_tree = RB_ROOT;
for (i = 0; i < config->num_ranges; i++) {
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
@@ -776,7 +824,7 @@ void regmap_exit(struct regmap *map)
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
- if (map->bus->free_context)
+ if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
kfree(map);
@@ -870,15 +918,20 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
}
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+ const void *val, size_t val_len, bool async)
{
struct regmap_range_node *range;
+ unsigned long flags;
u8 *u8 = map->work_buf;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
void *buf;
int ret = -ENOTSUPP;
size_t len;
int i;
+ BUG_ON(!map->bus);
+
/* Check for unwritable registers before we start */
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -918,7 +971,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes);
+ map->format.val_bytes, async);
if (ret != 0)
return ret;
@@ -941,6 +994,50 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
+ if (async && map->bus->async_write) {
+ struct regmap_async *async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&async->cleanup, async_cleanup);
+ async->map = map;
+
+ /* If the caller supplied the value we can use it safely. */
+ memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+ map->format.reg_bytes + map->format.val_bytes);
+ if (val == work_val)
+ val = async->work_buf + map->format.pad_bytes +
+ map->format.reg_bytes;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_add_tail(&async->list, &map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ ret = map->bus->async_write(map->bus_context, async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to schedule write: %d\n",
+ ret);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ kfree(async->work_buf);
+ kfree(async);
+ }
+ }
+
trace_regmap_hw_write_start(map->dev, reg,
val_len / map->format.val_bytes);
@@ -948,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
* send the work_buf directly, otherwise try to do a gather
* write.
*/
- if (val == (map->work_buf + map->format.pad_bytes +
- map->format.reg_bytes))
+ if (val == work_val)
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
@@ -981,14 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+ struct regmap_range_node *range;
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_write);
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ map->format.format_write(map, reg, val);
+
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+
+ ret = map->bus->write(map->bus_context, map->work_buf,
+ map->format.buf_size);
+
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+
+ return ret;
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_val);
+
+ map->format.format_val(map->work_buf + map->format.reg_bytes
+ + map->format.pad_bytes, val, 0);
+ return _regmap_raw_write(map, reg,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ map->format.val_bytes, false);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+ return (map->bus) ? map : map->bus_context;
+}
+
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
- struct regmap_range_node *range;
int ret;
- BUG_ON(!map->format.format_write && !map->format.format_val);
+ void *context = _regmap_map_get_context(map);
- if (!map->cache_bypass && map->format.format_write) {
+ if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
@@ -1005,33 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
trace_regmap_reg_write(map->dev, reg, val);
- if (map->format.format_write) {
- range = _regmap_range_lookup(map, reg);
- if (range) {
- ret = _regmap_select_page(map, &reg, range, 1);
- if (ret != 0)
- return ret;
- }
-
- map->format.format_write(map, reg, val);
-
- trace_regmap_hw_write_start(map->dev, reg, 1);
-
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.buf_size);
-
- trace_regmap_hw_write_done(map->dev, reg, 1);
-
- return ret;
- } else {
- map->format.format_val(map->work_buf + map->format.reg_bytes
- + map->format.pad_bytes, val, 0);
- return _regmap_raw_write(map, reg,
- map->work_buf +
- map->format.reg_bytes +
- map->format.pad_bytes,
- map->format.val_bytes);
- }
+ return map->reg_write(context, reg, val);
}
/**
@@ -1082,6 +1200,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1089,7 +1209,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len);
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
map->unlock(map->lock_arg);
@@ -1106,7 +1226,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
* @val_count: Number of registers to write
*
* This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
@@ -1118,6 +1238,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_bytes = map->format.val_bytes;
void *wval;
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1145,14 +1267,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
if (ret != 0)
return ret;
}
} else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
+ false);
}
if (val_bytes != 1)
@@ -1164,6 +1287,48 @@ out:
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
+/**
+ * regmap_raw_write_async(): Write raw values to one or more registers
+ * asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device. Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI. regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_raw_write(map, reg, val, val_len, true);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
@@ -1171,6 +1336,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
+ BUG_ON(!map->bus);
+
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, &reg, range,
@@ -1202,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct regmap *map = context;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ if (ret == 0)
+ *val = map->format.parse_val(map->work_buf);
+
+ return ret;
+}
+
static int _regmap_read(struct regmap *map, unsigned int reg,
unsigned int *val)
{
int ret;
+ void *context = _regmap_map_get_context(map);
+
+ BUG_ON(!map->reg_read);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
@@ -1213,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
return 0;
}
- if (!map->format.parse_val)
- return -EINVAL;
-
if (map->cache_only)
return -EBUSY;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = map->reg_read(context, reg, val);
if (ret == 0) {
- *val = map->format.parse_val(map->work_buf);
-
#ifdef LOG_DEVICE
if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
trace_regmap_reg_read(map->dev, reg, *val);
- }
- if (ret == 0 && !map->cache_bypass)
- regcache_write(map, reg, *val);
+ if (!map->cache_bypass)
+ regcache_write(map, reg, *val);
+ }
return ret;
}
@@ -1283,6 +1464,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1334,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1450,6 +1635,68 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+ struct regmap *map = async->map;
+ bool wake;
+
+ spin_lock(&map->async_lock);
+
+ list_del(&async->list);
+ wake = list_empty(&map->async_list);
+
+ if (ret != 0)
+ map->async_ret = ret;
+
+ spin_unlock(&map->async_lock);
+
+ schedule_work(&async->cleanup);
+
+ if (wake)
+ wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = list_empty(&map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+
+/**
+ * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed. Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Nothing to do with no async support */
+ if (!map->bus->async_write)
+ return 0;
+
+ wait_event(map->async_waitq, regmap_async_is_done(map));
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = map->async_ret;
+ map->async_ret = 0;
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
/**
* regmap_register_patch: Register and apply register updates to be applied
* on device initialistion
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index a533af218368..8b4221cfd118 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -65,6 +65,14 @@ config BCMA_DRIVER_GMAC_CMN
If unsure, say N
+config BCMA_DRIVER_GPIO
+ bool "BCMA GPIO driver"
+ depends on BCMA && GPIOLIB
+ help
+ Driver to provide access to the GPIO pins of the bcma bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 8ad42d41b2f2..734b32f09c0a 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -6,6 +6,7 @@ bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
+bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 537ae53231cd..79595a001204 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -22,7 +22,7 @@
struct bcma_bus;
/* main.c */
-int __devinit bcma_bus_register(struct bcma_bus *bus);
+int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
@@ -31,6 +31,8 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
@@ -45,6 +47,7 @@ int bcma_sprom_get(struct bcma_bus *bus);
/* driver_chipcommon.c */
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_pflash_dev;
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_pmu.c */
@@ -87,8 +90,23 @@ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
-bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
-void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+/* driver_gpio.c */
+int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ return -ENOTSUPP;
+}
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return 0;
+}
+#endif /* CONFIG_BCMA_DRIVER_GPIO */
+
#endif
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index dc96dd8ebff2..28fa50ad87be 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -114,6 +114,8 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->early_setup_done)
return;
+ spin_lock_init(&cc->gpio_lock);
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
@@ -202,28 +204,97 @@ u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
+/*
+ * If the bit is set to 0, chipcommon controlls this GPIO,
+ * if the bit is set to 1, it is used by some part of the chip and not our code.
+ */
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
#ifdef CONFIG_BCMA_DRIVER_MIPS
@@ -258,7 +329,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
return;
}
- irq = bcma_core_mips_irq(cc->core);
+ irq = bcma_core_irq(cc->core);
/* Determine the registers of the UARTs */
cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index dbda91e4dff5..d4f699aef8c4 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,11 +5,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "bcma_private.h"
+
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcma_private.h"
-
struct platform_device bcma_nflash_dev = {
.name = "bcma_nflash",
.num_resources = 0,
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
- cc->core->id.rev != 0x38) {
+ cc->core->id.rev != 38) {
bcma_err(bus, "NAND flash on unsupported board!\n");
return -ENOTSUPP;
}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index e162999bf916..932b101dee36 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -13,12 +13,13 @@
#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
+u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
@@ -263,7 +264,7 @@ static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
}
/* query bus clock frequency for PMU-enabled chipcommon */
-static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
+u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -292,6 +293,7 @@ static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
}
return BCMA_CC_PMU_HT_CLOCK;
}
+EXPORT_SYMBOL_GPL(bcma_pmu_get_bus_clock);
/* query cpu clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 63e688393825..e6ed4fe5dced 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,11 +5,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "bcma_private.h"
+
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcma_private.h"
-
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
.start = BCMA_SOC_FLASH2,
@@ -35,7 +35,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P40", 0x12, 0x10000, 8, },
{ "M25P16", 0x14, 0x10000, 32, },
- { "M25P32", 0x14, 0x10000, 64, },
+ { "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
{ 0 },
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
index 834225f65e8f..dcb137926d31 100644
--- a/drivers/bcma/driver_gmac_cmn.c
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -8,7 +8,7 @@
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
-void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
{
mutex_init(&gc->phy_mutex);
}
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
new file mode 100644
index 000000000000..45f0996a3752
--- /dev/null
+++ b/drivers/bcma/driver_gpio.c
@@ -0,0 +1,114 @@
+/*
+ * Broadcom specific AMBA
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcma_private.h"
+
+static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
+{
+ return container_of(chip, struct bcma_drv_cc, gpio);
+}
+
+static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ return !!bcma_chipco_gpio_in(cc, 1 << gpio);
+}
+
+static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 0);
+ return 0;
+}
+
+static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio);
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_control(cc, 1 << gpio, 0);
+ /* clear pulldown */
+ bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0);
+ /* Set pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio);
+
+ return 0;
+}
+
+static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ /* clear pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
+}
+
+static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ return bcma_core_irq(cc->core);
+ else
+ return -EINVAL;
+}
+
+int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+
+ chip->label = "bcma_gpio";
+ chip->owner = THIS_MODULE;
+ chip->request = bcma_gpio_request;
+ chip->free = bcma_gpio_free;
+ chip->get = bcma_gpio_get_value;
+ chip->set = bcma_gpio_set_value;
+ chip->direction_input = bcma_gpio_direction_input;
+ chip->direction_output = bcma_gpio_direction_output;
+ chip->to_irq = bcma_gpio_to_irq;
+ chip->ngpio = 16;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return gpiochip_remove(&cc->gpio);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 792daad28cbc..9a7f0e3ab5a3 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -14,11 +14,33 @@
#include <linux/bcma/bcma.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
+static const char *part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+ .part_probe_types = part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+ .name = "bcma_pflash",
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &bcma_pflash_data,
+ },
+ .resource = &bcma_pflash_resource,
+ .num_resources = 1,
+};
+
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
@@ -74,28 +96,41 @@ static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
return dev->core_index;
flag = bcma_aread32(dev, BCMA_MIPS_OOBSELOUTA30);
- return flag & 0x1F;
+ if (flag)
+ return flag & 0x1F;
+ else
+ return 0x3f;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
+ * If disabled, 5 is returned.
+ * If not supported, 6 is returned.
*/
-unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+static unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
struct bcma_device *mdev = dev->bus->drv_mips.core;
u32 irqflag;
unsigned int irq;
irqflag = bcma_core_mips_irqflag(dev);
+ if (irqflag == 0x3f)
+ return 6;
- for (irq = 1; irq <= 4; irq++)
+ for (irq = 0; irq <= 4; irq++)
if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
(1 << irqflag))
return irq;
- return 0;
+ return 5;
+}
+
+unsigned int bcma_core_irq(struct bcma_device *dev)
+{
+ unsigned int mips_irq = bcma_core_mips_irq(dev);
+ return mips_irq <= 4 ? mips_irq + 2 : 0;
}
-EXPORT_SYMBOL(bcma_core_mips_irq);
+EXPORT_SYMBOL(bcma_core_irq);
static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
{
@@ -114,7 +149,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
~(1 << irqflag));
- else
+ else if (oldirq != 5)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
/* assign the new one */
@@ -123,9 +158,9 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
(1 << irqflag));
} else {
- u32 oldirqflag = bcma_read32(mdev,
- BCMA_MIPS_MIPS74K_INTMASK(irq));
- if (oldirqflag) {
+ u32 irqinitmask = bcma_read32(mdev,
+ BCMA_MIPS_MIPS74K_INTMASK(irq));
+ if (irqinitmask) {
struct bcma_device *core;
/* backplane irq line is in use, find out who uses
@@ -133,7 +168,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
*/
list_for_each_entry(core, &bus->cores, list) {
if ((1 << bcma_core_mips_irqflag(core)) ==
- oldirqflag) {
+ irqinitmask) {
bcma_core_mips_set_irq(core, 0);
break;
}
@@ -143,15 +178,31 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
1 << irqflag);
}
- bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.id, oldirq + 2, irq + 2);
+ bcma_debug(bus, "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.id, oldirq <= 4 ? oldirq + 2 : 0, irq + 2);
+}
+
+static void bcma_core_mips_set_irq_name(struct bcma_bus *bus, unsigned int irq,
+ u16 coreid, u8 unit)
+{
+ struct bcma_device *core;
+
+ core = bcma_find_core_unit(bus, coreid, unit);
+ if (!core) {
+ bcma_warn(bus,
+ "Can not find core (id: 0x%x, unit %i) for IRQ configuration.\n",
+ coreid, unit);
+ return;
+ }
+
+ bcma_core_mips_set_irq(core, irq);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- printk(KERN_INFO KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
+ printk(KERN_DEBUG KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
for (i = 0; i <= 6; i++)
printk(" %s%s", irq_name[i], i == irq ? "*" : " ");
printk("\n");
@@ -182,6 +233,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
struct bcma_drv_cc *cc = &bus->drv_cc;
+ struct bcma_pflash *pflash = &cc->pflash;
switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
@@ -191,15 +243,20 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
break;
case BCMA_CC_FLASHT_PARA:
bcma_debug(bus, "Found parallel flash\n");
- cc->pflash.present = true;
- cc->pflash.window = BCMA_SOC_FLASH2;
- cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
+ pflash->present = true;
+ pflash->window = BCMA_SOC_FLASH2;
+ pflash->window_size = BCMA_SOC_FLASH2_SZ;
if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
BCMA_CC_FLASH_CFG_DS) == 0)
- cc->pflash.buswidth = 1;
+ pflash->buswidth = 1;
else
- cc->pflash.buswidth = 2;
+ pflash->buswidth = 2;
+
+ bcma_pflash_data.width = pflash->buswidth;
+ bcma_pflash_resource.start = pflash->window;
+ bcma_pflash_resource.end = pflash->window + pflash->window_size;
+
break;
default:
bcma_err(bus, "Flash type not supported\n");
@@ -227,6 +284,32 @@ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
mcore->early_setup_done = true;
}
+static void bcma_fix_i2s_irqflag(struct bcma_bus *bus)
+{
+ struct bcma_device *cpu, *pcie, *i2s;
+
+ /* Fixup the interrupts in 4716/4748 for i2s core (2010 Broadcom SDK)
+ * (IRQ flags > 7 are ignored when setting the interrupt masks)
+ */
+ if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4716 &&
+ bus->chipinfo.id != BCMA_CHIP_ID_BCM4748)
+ return;
+
+ cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ pcie = bcma_find_core(bus, BCMA_CORE_PCIE);
+ i2s = bcma_find_core(bus, BCMA_CORE_I2S);
+ if (cpu && pcie && i2s &&
+ bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+ bcma_aread32(pcie, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+ bcma_aread32(i2s, BCMA_MIPS_OOBSELOUTA30) == 0x88) {
+ bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
+ bcma_awrite32(pcie, BCMA_MIPS_OOBSELINA74, 0x07060504);
+ bcma_awrite32(i2s, BCMA_MIPS_OOBSELOUTA30, 0x87);
+ bcma_debug(bus,
+ "Moved i2s interrupt to oob line 7 instead of 8\n");
+ }
+}
+
void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus;
@@ -236,43 +319,55 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
if (mcore->setup_done)
return;
- bcma_info(bus, "Initializing MIPS core...\n");
+ bcma_debug(bus, "Initializing MIPS core...\n");
bcma_core_mips_early_init(mcore);
- mcore->assigned_irqs = 1;
-
- /* Assign IRQs to all cores on the bus */
- list_for_each_entry(core, &bus->cores, list) {
- int mips_irq;
- if (core->irq)
- continue;
-
- mips_irq = bcma_core_mips_irq(core);
- if (mips_irq > 4)
- core->irq = 0;
- else
- core->irq = mips_irq + 2;
- if (core->irq > 5)
- continue;
- switch (core->id.id) {
- case BCMA_CORE_PCI:
- case BCMA_CORE_PCIE:
- case BCMA_CORE_ETHERNET:
- case BCMA_CORE_ETHERNET_GBIT:
- case BCMA_CORE_MAC_GBIT:
- case BCMA_CORE_80211:
- case BCMA_CORE_USB20_HOST:
- /* These devices get their own IRQ line if available,
- * the rest goes on IRQ0
- */
- if (mcore->assigned_irqs <= 4)
- bcma_core_mips_set_irq(core,
- mcore->assigned_irqs++);
- break;
+ bcma_fix_i2s_irqflag(bus);
+
+ switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_PCIE, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+ break;
+ case BCMA_CHIP_ID_BCM5356:
+ case BCMA_CHIP_ID_BCM47162:
+ case BCMA_CHIP_ID_BCM53572:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ break;
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+ break;
+ case BCMA_CHIP_ID_BCM4706:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_PCIE, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_4706_MAC_GBIT,
+ 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_PCIE, 1);
+ bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_4706_CHIPCOMMON,
+ 0);
+ break;
+ default:
+ list_for_each_entry(core, &bus->cores, list) {
+ core->irq = bcma_core_irq(core);
}
+ bcma_err(bus,
+ "Unknown device (0x%x) found, can not configure IRQs\n",
+ bus->chipinfo.id);
}
- bcma_info(bus, "IRQ reconfiguration done\n");
+ bcma_debug(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
mcore->setup_done = true;
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c39ee6d45850..cf7a476a519f 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -207,14 +207,14 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
* Init.
**************************************************/
-static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
+static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc);
}
-void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
+void bcma_core_pci_init(struct bcma_drv_pci *pc)
{
if (pc->setup_done)
return;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index e6b5c89469dc..d3bde6cec927 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -24,7 +24,7 @@
#define BCMA_PCI_SLOT_MAX 16
#define PCI_CONFIG_SPACE_SIZE 256
-bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u16 chipid_top;
@@ -94,19 +94,19 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (dev == 0) {
/* we support only two functions on device 0 */
if (func > 1)
- return -EINVAL;
+ goto out;
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
if (off >= PCI_CONFIG_SPACE_SIZE) {
addr = (func << 12);
- addr |= (off & 0x0FFF);
+ addr |= (off & 0x0FFC);
val = bcma_pcie_read_config(pc, addr);
} else {
addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
- addr |= (off & 0xfc);
+ addr |= (off & 0xFC);
val = pcicore_read32(pc, addr);
}
} else {
@@ -119,11 +119,9 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
goto out;
if (mips_busprobe32(val, mmio)) {
- val = 0xffffffff;
+ val = 0xFFFFFFFF;
goto unmap;
}
-
- val = readl(mmio);
}
val >>= (8 * (off & 3));
@@ -151,7 +149,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
const void *buf, int len)
{
int err = -EINVAL;
- u32 addr = 0, val = 0;
+ u32 addr, val;
void __iomem *mmio = 0;
u16 chipid = pc->core->bus->chipinfo.id;
@@ -159,16 +157,22 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
if (dev == 0) {
+ /* we support only two functions on device 0 */
+ if (func > 1)
+ goto out;
+
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
- if (off < PCI_CONFIG_SPACE_SIZE) {
- addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
+ if (off >= PCI_CONFIG_SPACE_SIZE) {
+ addr = (func << 12);
+ addr |= (off & 0x0FFC);
+ val = bcma_pcie_read_config(pc, addr);
+ } else {
+ addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
- addr |= (off & 0xfc);
- mmio = ioremap_nocache(addr, sizeof(val));
- if (!mmio)
- goto out;
+ addr |= (off & 0xFC);
+ val = pcicore_read32(pc, addr);
}
} else {
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
@@ -180,19 +184,17 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
goto out;
if (mips_busprobe32(val, mmio)) {
- val = 0xffffffff;
+ val = 0xFFFFFFFF;
goto unmap;
}
}
switch (len) {
case 1:
- val = readl(mmio);
val &= ~(0xFF << (8 * (off & 3)));
val |= *((const u8 *)buf) << (8 * (off & 3));
break;
case 2:
- val = readl(mmio);
val &= ~(0xFFFF << (8 * (off & 3)));
val |= *((const u16 *)buf) << (8 * (off & 3));
break;
@@ -200,13 +202,14 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
val = *((const u32 *)buf);
break;
}
- if (dev == 0 && !addr) {
+ if (dev == 0) {
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
- addr = (func << 12);
- addr |= (off & 0x0FFF);
- bcma_pcie_write_config(pc, addr, val);
+ if (off >= PCI_CONFIG_SPACE_SIZE)
+ bcma_pcie_write_config(pc, addr, val);
+ else
+ pcicore_write32(pc, addr, val);
} else {
writel(val, mmio);
@@ -264,10 +267,9 @@ static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
}
/* return cap_offset if requested capability exists in the PCI config space */
-static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
- unsigned int dev,
- unsigned int func, u8 req_cap_id,
- unsigned char *buf, u32 *buflen)
+static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
+ unsigned int func, u8 req_cap_id,
+ unsigned char *buf, u32 *buflen)
{
u8 cap_id;
u8 cap_ptr = 0;
@@ -277,7 +279,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
/* check for Header type 0 */
bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
sizeof(u8));
- if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
+ if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
return cap_ptr;
/* check if the capability pointer field exists */
@@ -334,7 +336,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
* Retry Status (CRS) Completion Status to software then
* enable the feature.
*/
-static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
+static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
@@ -381,7 +383,7 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
}
}
-void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
struct bcma_drv_pci_host *pc_host;
@@ -427,7 +429,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Reset RC */
usleep_range(3000, 5000);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
- usleep_range(1000, 2000);
+ msleep(50);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
BCMA_CORE_PCI_CTL_RST_OE);
@@ -489,6 +491,17 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
bcma_core_pci_enable_crs(pc);
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
+ u16 val16;
+ bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+ &val16, sizeof(val16));
+ val16 |= (2 << 5); /* Max payload size of 512 */
+ val16 |= (2 << 12); /* MRRS 512 */
+ bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+ &val16, sizeof(val16));
+ }
+
/* Enable PCI bridge BAR0 memory & master access */
tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
@@ -577,7 +590,7 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
pr_info("PCI: Fixing up device %s\n", pci_name(dev));
/* Fix up interrupt lines */
- dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2;
+ dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
return 0;
@@ -596,6 +609,6 @@ int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
pci_ops);
- return bcma_core_mips_irq(pc_host->pdev->core) + 2;
+ return bcma_core_irq(pc_host->pdev->core);
}
EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 98fdc3e014e7..fbf2759e7e4e 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -155,8 +155,8 @@ static const struct bcma_host_ops bcma_host_pci_ops = {
.awrite32 = bcma_host_pci_awrite32,
};
-static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int bcma_host_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct bcma_bus *bus;
int err = -ENOMEM;
@@ -226,7 +226,7 @@ err_kfree_bus:
return err;
}
-static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
+static void bcma_host_pci_remove(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
@@ -284,7 +284,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.name = "bcma-pci-bridge",
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
- .remove = __devexit_p(bcma_host_pci_remove),
+ .remove = bcma_host_pci_remove,
.driver.pm = BCMA_PM_OPS,
};
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index debd4f142f93..9a6188add590 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,8 +81,8 @@ struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
}
EXPORT_SYMBOL_GPL(bcma_find_core);
-static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit)
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit)
{
struct bcma_device *core;
@@ -149,6 +149,14 @@ static int bcma_register_cores(struct bcma_bus *bus)
dev_id++;
}
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ if (bus->drv_cc.pflash.present) {
+ err = platform_device_register(&bcma_pflash_dev);
+ if (err)
+ bcma_err(bus, "Error registering parallel flash\n");
+ }
+#endif
+
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
@@ -164,6 +172,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
+ err = bcma_gpio_init(&bus->drv_cc);
+ if (err == -ENOTSUPP)
+ bcma_debug(bus, "GPIO driver not activated\n");
+ else if (err)
+ bcma_err(bus, "Error registering GPIO driver: %i\n", err);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
err = bcma_chipco_watchdog_register(&bus->drv_cc);
@@ -187,7 +200,7 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
platform_device_unregister(bus->drv_cc.watchdog);
}
-int __devinit bcma_bus_register(struct bcma_bus *bus)
+int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
@@ -263,6 +276,13 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
+ int err;
+
+ err = bcma_gpio_unregister(&bus->drv_cc);
+ if (err == -EBUSY)
+ bcma_err(bus, "Some GPIOs are still in use.\n");
+ else if (err)
+ bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index d2ed7f18d1ac..175649468c95 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "50"
+#define VERSION "81"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -10,7 +10,7 @@
#define AOE_PARTITIONS (16)
#endif
-#define WHITESPACE " \t\v\f\n"
+#define WHITESPACE " \t\v\f\n,"
enum {
AOECMD_ATA,
@@ -73,21 +73,29 @@ enum {
DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
DEVFL_GDALLOC = (1<<3), /* need to alloc gendisk */
- DEVFL_KICKME = (1<<4), /* slow polling network card catch */
- DEVFL_NEWSIZE = (1<<5), /* need to update dev size in block layer */
+ DEVFL_GD_NOW = (1<<4), /* allocating gendisk */
+ DEVFL_KICKME = (1<<5), /* slow polling network card catch */
+ DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
+ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
+ DEVFL_FREED = (1<<8), /* device has been cleaned up */
};
enum {
DEFAULTBCNT = 2 * 512, /* 2 sectors */
MIN_BUFS = 16,
- NTARGETS = 8,
+ NTARGETS = 4,
NAOEIFS = 8,
NSKBPOOLMAX = 256,
NFACTIVE = 61,
TIMERTICK = HZ / 10,
- MINTIMER = HZ >> 2,
- MAXTIMER = HZ << 1,
+ RTTSCALE = 8,
+ RTTDSCALE = 3,
+ RTTAVG_INIT = USEC_PER_SEC / 4 << RTTSCALE,
+ RTTDEV_INIT = RTTAVG_INIT / 4,
+
+ HARD_SCORN_SECS = 10, /* try another remote port after this */
+ MAX_TAINT = 1000, /* cap on aoetgt taint */
};
struct buf {
@@ -100,10 +108,17 @@ struct buf {
struct request *rq;
};
+enum frame_flags {
+ FFL_PROBE = 1,
+};
+
struct frame {
struct list_head head;
u32 tag;
+ struct timeval sent; /* high-res time packet was sent */
+ u32 sent_jiffs; /* low-res jiffies-based sent time */
ulong waited;
+ ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
@@ -112,6 +127,7 @@ struct frame {
struct bio_vec *bv;
ulong bcnt;
ulong bv_off;
+ char flags;
};
struct aoeif {
@@ -122,28 +138,31 @@ struct aoeif {
struct aoetgt {
unsigned char addr[6];
- ushort nframes;
+ ushort nframes; /* cap on frames to use */
struct aoedev *d; /* parent device I belong to */
struct list_head ffree; /* list of free frames */
struct aoeif ifs[NAOEIFS];
struct aoeif *ifp; /* current aoeif in use */
- ushort nout;
- ushort maxout;
- ulong falloc;
- ulong lastwadj; /* last window adjustment */
+ ushort nout; /* number of AoE commands outstanding */
+ ushort maxout; /* current value for max outstanding */
+ ushort next_cwnd; /* incr maxout after decrementing to zero */
+ ushort ssthresh; /* slow start threshold */
+ ulong falloc; /* number of allocated frames */
+ int taint; /* how much we want to avoid this aoetgt */
int minbcnt;
int wpkts, rpkts;
+ char nout_probes;
};
struct aoedev {
struct aoedev *next;
ulong sysminor;
ulong aoemajor;
+ u32 rttavg; /* scaled AoE round trip time average */
+ u32 rttdev; /* scaled round trip time mean deviation */
u16 aoeminor;
u16 flags;
u16 nopen; /* (bd_openers isn't available without sleeping) */
- u16 rttavg; /* round trip average of requests/responses */
- u16 mintimer;
u16 fw_ver; /* version of blade's firmware */
u16 lasttag; /* last tag sent */
u16 useme;
@@ -151,7 +170,7 @@ struct aoedev {
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
struct request_queue *blkq;
- struct hd_geometry geo;
+ struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
spinlock_t lock;
@@ -164,11 +183,12 @@ struct aoedev {
} ip;
ulong maxbcnt;
struct list_head factive[NFACTIVE]; /* hash of active frames */
- struct aoetgt *targets[NTARGETS];
+ struct list_head rexmitq; /* deferred retransmissions */
+ struct aoetgt **targets;
+ ulong ntargets; /* number of allocated aoetgt pointers */
struct aoetgt **tgt; /* target in use when working */
- struct aoetgt *htgt; /* target needing rexmit assistance */
- ulong ntargets;
ulong kicked;
+ char ident[512];
};
/* kthread tracking */
@@ -195,6 +215,7 @@ void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
struct sk_buff *aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
void aoecmd_sleepwork(struct work_struct *);
+void aoecmd_wreset(struct aoetgt *t);
void aoecmd_cleanslate(struct aoedev *);
void aoecmd_exit(void);
int aoecmd_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00dfc5008ad4..a129f8c8073d 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -16,11 +16,19 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+/* GPFS needs a larger value than the default. */
+static int aoe_maxsectors;
+module_param(aoe_maxsectors, int, 0644);
+MODULE_PARM_DESC(aoe_maxsectors,
+ "When nonzero, set the maximum number of sectors per I/O request");
+
static ssize_t aoedisk_show_state(struct device *dev,
struct device_attribute *attr, char *page)
{
@@ -59,7 +67,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
nd = nds;
ne = nd + ARRAY_SIZE(nds);
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++) {
ifp = (*t)->ifs;
e = ifp + NAOEIFS;
@@ -91,6 +99,14 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
}
+static ssize_t aoedisk_show_payload(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct aoedev *d = disk->private_data;
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+}
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
@@ -99,12 +115,14 @@ static struct device_attribute dev_attr_firmware_version = {
.attr = { .name = "firmware-version", .mode = S_IRUGO },
.show = aoedisk_show_fwver,
};
+static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL);
static struct attribute *aoe_attrs[] = {
&dev_attr_state.attr,
&dev_attr_mac.attr,
&dev_attr_netif.attr,
&dev_attr_firmware_version.attr,
+ &dev_attr_payload.attr,
NULL,
};
@@ -129,9 +147,18 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
+ if (!virt_addr_valid(d)) {
+ pr_crit("aoe: invalid device pointer in %s\n",
+ __func__);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
+ return -ENODEV;
+
mutex_lock(&aoeblk_mutex);
spin_lock_irqsave(&d->lock, flags);
- if (d->flags & DEVFL_UP) {
+ if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
mutex_unlock(&aoeblk_mutex);
@@ -195,9 +222,38 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static int
+aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
+{
+ struct aoedev *d;
+
+ if (!arg)
+ return -EINVAL;
+
+ d = bdev->bd_disk->private_data;
+ if ((d->flags & DEVFL_UP) == 0) {
+ pr_err("aoe: disk not up\n");
+ return -ENODEV;
+ }
+
+ if (cmd == HDIO_GET_IDENTITY) {
+ if (!copy_to_user((void __user *) arg, &d->ident,
+ sizeof(d->ident)))
+ return 0;
+ return -EFAULT;
+ }
+
+ /* udev calls scsi_id, which uses SG_IO, resulting in noise */
+ if (cmd != SG_IO)
+ pr_info("aoe: unknown ioctl 0x%x\n", cmd);
+
+ return -ENOTTY;
+}
+
static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
+ .ioctl = aoeblk_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
@@ -212,6 +268,18 @@ aoeblk_gdalloc(void *vp)
struct request_queue *q;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
+ int late = 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_GDALLOC
+ && !(d->flags & DEVFL_TKILL)
+ && !(d->flags & DEVFL_GD_NOW))
+ d->flags |= DEVFL_GD_NOW;
+ else
+ late = 1;
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (late)
+ return;
gd = alloc_disk(AOE_PARTITIONS);
if (gd == NULL) {
@@ -231,23 +299,24 @@ aoeblk_gdalloc(void *vp)
if (q == NULL) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
- mempool_destroy(mp);
- goto err_disk;
+ goto err_mempool;
}
- d->blkq = blk_alloc_queue(GFP_KERNEL);
- if (!d->blkq)
- goto err_mempool;
- d->blkq->backing_dev_info.name = "aoe";
- if (bdi_init(&d->blkq->backing_dev_info))
- goto err_blkq;
spin_lock_irqsave(&d->lock, flags);
- blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ WARN_ON(!(d->flags & DEVFL_GDALLOC));
+ WARN_ON(d->flags & DEVFL_TKILL);
+ WARN_ON(d->gd);
+ WARN_ON(d->flags & DEVFL_UP);
+ blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
+ q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
d->gd = gd;
+ if (aoe_maxsectors)
+ blk_queue_max_hw_sectors(q, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->fops = &aoe_bdops;
@@ -263,18 +332,21 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ d->flags &= ~DEVFL_GD_NOW;
+ spin_unlock_irqrestore(&d->lock, flags);
return;
-err_blkq:
- blk_cleanup_queue(d->blkq);
- d->blkq = NULL;
err_mempool:
- mempool_destroy(d->bufpool);
+ mempool_destroy(mp);
err_disk:
put_disk(gd);
err:
spin_lock_irqsave(&d->lock, flags);
- d->flags &= ~DEVFL_GDALLOC;
+ d->flags &= ~DEVFL_GD_NOW;
+ schedule_work(&d->work);
spin_unlock_irqrestore(&d->lock, flags);
}
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index ed57a890c643..42e67ad6bd20 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -39,6 +39,11 @@ struct ErrMsg {
};
static DEFINE_MUTEX(aoechr_mutex);
+
+/* A ring buffer of error messages, to be read through
+ * "/dev/etherd/err". When no messages are present,
+ * readers will block waiting for messages to appear.
+ */
static struct ErrMsg emsgs[NMSG];
static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
@@ -282,7 +287,7 @@ aoechr_init(void)
int n, i;
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
- if (n < 0) {
+ if (n < 0) {
printk(KERN_ERR "aoe: can't register char device\n");
return n;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 9fe4f1865558..25ef5c014fca 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -22,6 +22,7 @@
#define MAXIOC (8192) /* default meant to avoid most soft lockups */
static void ktcomplete(struct frame *, struct sk_buff *);
+static int count_targets(struct aoedev *d, int *untainted);
static struct buf *nextbuf(struct aoedev *);
@@ -29,7 +30,7 @@ static int aoe_deadsecs = 60 * 3;
module_param(aoe_deadsecs, int, 0644);
MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
-static int aoe_maxout = 16;
+static int aoe_maxout = 64;
module_param(aoe_maxout, int, 0644);
MODULE_PARM_DESC(aoe_maxout,
"Only aoe_maxout outstanding packets for every MAC on eX.Y.");
@@ -43,6 +44,8 @@ static struct {
spinlock_t lock;
} iocq;
+static struct page *empty_page;
+
static struct sk_buff *
new_skb(ulong len)
{
@@ -59,6 +62,23 @@ new_skb(ulong len)
}
static struct frame *
+getframe_deferred(struct aoedev *d, u32 tag)
+{
+ struct list_head *head, *pos, *nx;
+ struct frame *f;
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ if (f->tag == tag) {
+ list_del(pos);
+ return f;
+ }
+ }
+ return NULL;
+}
+
+static struct frame *
getframe(struct aoedev *d, u32 tag)
{
struct frame *f;
@@ -162,8 +182,10 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
+ f->lba = 0;
f->bv = NULL;
f->r_skb = NULL;
+ f->flags = 0;
list_add(&f->head, &t->ffree);
}
@@ -217,20 +239,25 @@ newframe(struct aoedev *d)
struct frame *f;
struct aoetgt *t, **tt;
int totout = 0;
+ int use_tainted;
+ int has_untainted;
- if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
+ if (!d->targets || !d->targets[0]) {
printk(KERN_ERR "aoe: NULL TARGETS!\n");
return NULL;
}
tt = d->tgt; /* last used target */
- for (;;) {
+ for (use_tainted = 0, has_untainted = 0;;) {
tt++;
- if (tt >= &d->targets[NTARGETS] || !*tt)
+ if (tt >= &d->targets[d->ntargets] || !*tt)
tt = d->targets;
t = *tt;
- totout += t->nout;
+ if (!t->taint) {
+ has_untainted = 1;
+ totout += t->nout;
+ }
if (t->nout < t->maxout
- && t != d->htgt
+ && (use_tainted || !t->taint)
&& t->ifp->nd) {
f = newtframe(d, t);
if (f) {
@@ -239,8 +266,12 @@ newframe(struct aoedev *d)
return f;
}
}
- if (tt == d->tgt) /* we've looped and found nada */
- break;
+ if (tt == d->tgt) { /* we've looped and found nada */
+ if (!use_tainted && !has_untainted)
+ use_tainted = 1;
+ else
+ break;
+ }
}
if (totout == 0) {
d->kicked++;
@@ -277,21 +308,68 @@ fhash(struct frame *f)
list_add_tail(&f->head, &d->factive[n]);
}
+static void
+ata_rw_frameinit(struct frame *f)
+{
+ struct aoetgt *t;
+ struct aoe_hdr *h;
+ struct aoe_atahdr *ah;
+ struct sk_buff *skb;
+ char writebit, extbit;
+
+ skb = f->skb;
+ h = (struct aoe_hdr *) skb_mac_header(skb);
+ ah = (struct aoe_atahdr *) (h + 1);
+ skb_put(skb, sizeof(*h) + sizeof(*ah));
+ memset(h, 0, skb->len);
+
+ writebit = 0x10;
+ extbit = 0x4;
+
+ t = f->t;
+ f->tag = aoehdr_atainit(t->d, t, h);
+ fhash(f);
+ t->nout++;
+ f->waited = 0;
+ f->waited_total = 0;
+ if (f->buf)
+ f->lba = f->buf->sector;
+
+ /* set up ata header */
+ ah->scnt = f->bcnt >> 9;
+ put_lba(ah, f->lba);
+ if (t->d->flags & DEVFL_EXT) {
+ ah->aflags |= AOEAFL_EXT;
+ } else {
+ extbit = 0;
+ ah->lba3 &= 0x0f;
+ ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
+ }
+ if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
+ skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ ah->aflags |= AOEAFL_WRITE;
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+ t->wpkts++;
+ } else {
+ t->rpkts++;
+ writebit = 0;
+ }
+
+ ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ skb->dev = t->ifp->nd;
+}
+
static int
aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct buf *buf;
struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
- char writebit, extbit;
-
- writebit = 0x10;
- extbit = 0x4;
buf = nextbuf(d);
if (buf == NULL)
@@ -326,50 +404,18 @@ aoecmd_ata_rw(struct aoedev *d)
} while (fbcnt);
/* initialize the headers & frame */
- skb = f->skb;
- h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
- skb_put(skb, sizeof *h + sizeof *ah);
- memset(h, 0, skb->len);
- f->tag = aoehdr_atainit(d, t, h);
- fhash(f);
- t->nout++;
- f->waited = 0;
f->buf = buf;
f->bcnt = bcnt;
- f->lba = buf->sector;
-
- /* set up ata header */
- ah->scnt = bcnt >> 9;
- put_lba(ah, buf->sector);
- if (d->flags & DEVFL_EXT) {
- ah->aflags |= AOEAFL_EXT;
- } else {
- extbit = 0;
- ah->lba3 &= 0x0f;
- ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
- }
- if (bio_data_dir(buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, bcnt);
- ah->aflags |= AOEAFL_WRITE;
- skb->len += bcnt;
- skb->data_len = bcnt;
- skb->truesize += bcnt;
- t->wpkts++;
- } else {
- t->rpkts++;
- writebit = 0;
- }
-
- ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ ata_rw_frameinit(f);
/* mark all tracking fields and load out */
buf->nframesout += 1;
buf->sector += bcnt >> 9;
- skb->dev = t->ifp->nd;
- skb = skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -442,11 +488,14 @@ resend(struct aoedev *d, struct frame *f)
h = (struct aoe_hdr *) skb_mac_header(skb);
ah = (struct aoe_atahdr *) (h+1);
- snprintf(buf, sizeof buf,
- "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
- "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
- h->src, h->dst, t->nout);
- aoechr_error(buf);
+ if (!(f->flags & FFL_PROBE)) {
+ snprintf(buf, sizeof(buf),
+ "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
+ "retransmit", d->aoemajor, d->aoeminor,
+ f->tag, jiffies, n,
+ h->src, h->dst, t->nout);
+ aoechr_error(buf);
+ }
f->tag = n;
fhash(f);
@@ -458,12 +507,46 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
static int
+tsince_hr(struct frame *f)
+{
+ struct timeval now;
+ int n;
+
+ do_gettimeofday(&now);
+ n = now.tv_usec - f->sent.tv_usec;
+ n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+
+ if (n < 0)
+ n = -n;
+
+ /* For relatively long periods, use jiffies to avoid
+ * discrepancies caused by updates to the system time.
+ *
+ * On system with HZ of 1000, 32-bits is over 49 days
+ * worth of jiffies, or over 71 minutes worth of usecs.
+ *
+ * Jiffies overflow is handled by subtraction of unsigned ints:
+ * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
+ * $3 = 4
+ * (gdb)
+ */
+ if (n > USEC_PER_SEC / 4) {
+ n = ((u32) jiffies) - f->sent_jiffs;
+ n *= USEC_PER_SEC / HZ;
+ }
+
+ return n;
+}
+
+static int
tsince(u32 tag)
{
int n;
@@ -472,7 +555,7 @@ tsince(u32 tag)
n -= tag & 0xffff;
if (n < 0)
n += 1<<16;
- return n;
+ return jiffies_to_usecs(n + 1);
}
static struct aoeif *
@@ -503,70 +586,189 @@ ejectif(struct aoetgt *t, struct aoeif *ifp)
dev_put(nd);
}
-static int
-sthtith(struct aoedev *d)
+static struct frame *
+reassign_frame(struct frame *f)
{
- struct frame *f, *nf;
- struct list_head *nx, *pos, *head;
+ struct frame *nf;
struct sk_buff *skb;
- struct aoetgt *ht = d->htgt;
- int i;
- for (i = 0; i < NFACTIVE; i++) {
- head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- if (f->t != ht)
- continue;
+ nf = newframe(f->t->d);
+ if (!nf)
+ return NULL;
+ if (nf->t == f->t) {
+ aoe_freetframe(nf);
+ return NULL;
+ }
- nf = newframe(d);
- if (!nf)
- return 0;
+ skb = nf->skb;
+ nf->skb = f->skb;
+ nf->buf = f->buf;
+ nf->bcnt = f->bcnt;
+ nf->lba = f->lba;
+ nf->bv = f->bv;
+ nf->bv_off = f->bv_off;
+ nf->waited = 0;
+ nf->waited_total = f->waited_total;
+ nf->sent = f->sent;
+ nf->sent_jiffs = f->sent_jiffs;
+ f->skb = skb;
+
+ return nf;
+}
- /* remove frame from active list */
- list_del(pos);
+static void
+probe(struct aoetgt *t)
+{
+ struct aoedev *d;
+ struct frame *f;
+ struct sk_buff *skb;
+ struct sk_buff_head queue;
+ size_t n, m;
+ int frag;
- /* reassign all pertinent bits to new outbound frame */
- skb = nf->skb;
- nf->skb = f->skb;
- nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
- nf->waited = 0;
- f->skb = skb;
+ d = t->d;
+ f = newtframe(d, t);
+ if (!f) {
+ pr_err("%s %pm for e%ld.%d: %s\n",
+ "aoe: cannot probe remote address",
+ t->addr,
+ (long) d->aoemajor, d->aoeminor,
+ "no frame available");
+ return;
+ }
+ f->flags |= FFL_PROBE;
+ ifrotate(t);
+ f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ ata_rw_frameinit(f);
+ skb = f->skb;
+ for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ if (n < PAGE_SIZE)
+ m = n;
+ else
+ m = PAGE_SIZE;
+ skb_fill_page_desc(skb, frag, empty_page, 0, m);
+ }
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+
+ skb = skb_clone(f->skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+ aoenet_xmit(&queue);
+ }
+}
+
+static long
+rto(struct aoedev *d)
+{
+ long t;
+
+ t = 2 * d->rttavg >> RTTSCALE;
+ t += 8 * d->rttdev >> RTTDSCALE;
+ if (t == 0)
+ t = 1;
+
+ return t;
+}
+
+static void
+rexmit_deferred(struct aoedev *d)
+{
+ struct aoetgt *t;
+ struct frame *f;
+ struct frame *nf;
+ struct list_head *pos, *nx, *head;
+ int since;
+ int untainted;
+
+ count_targets(d, &untainted);
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ t = f->t;
+ if (t->taint) {
+ if (!(f->flags & FFL_PROBE)) {
+ nf = reassign_frame(f);
+ if (nf) {
+ if (t->nout_probes == 0
+ && untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ list_replace(&f->head, &nf->head);
+ pos = &nf->head;
+ aoe_freetframe(f);
+ f = nf;
+ t = f->t;
+ }
+ } else if (untainted < 1) {
+ /* don't probe w/o other untainted aoetgts */
+ goto stop_probe;
+ } else if (tsince_hr(f) < t->taint * rto(d)) {
+ /* reprobe slowly when taint is high */
+ continue;
+ }
+ } else if (f->flags & FFL_PROBE) {
+stop_probe: /* don't probe untainted aoetgts */
+ list_del(pos);
aoe_freetframe(f);
- ht->nout--;
- nf->t->nout++;
- resend(d, nf);
+ /* leaving d->kicked, because this is routine */
+ f->t->d->flags |= DEVFL_KICKME;
+ continue;
}
+ if (t->nout >= t->maxout)
+ continue;
+ list_del(pos);
+ t->nout++;
+ if (f->flags & FFL_PROBE)
+ t->nout_probes++;
+ since = tsince_hr(f);
+ f->waited += since;
+ f->waited_total += since;
+ resend(d, f);
}
- /* We've cleaned up the outstanding so take away his
- * interfaces so he won't be used. We should remove him from
- * the target array here, but cleaning up a target is
- * involved. PUNT!
- */
- memset(ht->ifs, 0, sizeof ht->ifs);
- d->htgt = NULL;
- return 1;
}
-static inline unsigned char
-ata_scnt(unsigned char *packet) {
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
+/* An aoetgt accumulates demerits quickly, and successful
+ * probing redeems the aoetgt slowly.
+ */
+static void
+scorn(struct aoetgt *t)
+{
+ int n;
- h = (struct aoe_hdr *) packet;
- ah = (struct aoe_atahdr *) (h+1);
- return ah->scnt;
+ n = t->taint++;
+ t->taint += t->taint * 2;
+ if (n > t->taint)
+ t->taint = n;
+ if (t->taint > MAX_TAINT)
+ t->taint = MAX_TAINT;
+}
+
+static int
+count_targets(struct aoedev *d, int *untainted)
+{
+ int i, good;
+
+ for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
+ if (d->targets[i]->taint == 0)
+ good++;
+
+ if (untainted)
+ *untainted = good;
+ return i;
}
static void
rexmit_timer(ulong vp)
{
struct aoedev *d;
- struct aoetgt *t, **tt, **te;
+ struct aoetgt *t;
struct aoeif *ifp;
struct frame *f;
struct list_head *head, *pos, *nx;
@@ -574,15 +776,18 @@ rexmit_timer(ulong vp)
register long timeout;
ulong flags, n;
int i;
+ int utgts; /* number of aoetgt descriptors (not slots) */
+ int since;
d = (struct aoedev *) vp;
- /* timeout is always ~150% of the moving average */
- timeout = d->rttavg;
- timeout += timeout >> 1;
-
spin_lock_irqsave(&d->lock, flags);
+ /* timeout based on observed timings and variations */
+ timeout = rto(d);
+
+ utgts = count_targets(d, NULL);
+
if (d->flags & DEVFL_TKILL) {
spin_unlock_irqrestore(&d->lock, flags);
return;
@@ -593,67 +798,61 @@ rexmit_timer(ulong vp)
head = &d->factive[i];
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
- if (tsince(f->tag) < timeout)
+ if (tsince_hr(f) < timeout)
break; /* end of expired frames */
/* move to flist for later processing */
list_move_tail(pos, &flist);
}
}
- /* window check */
- tt = d->targets;
- te = tt + d->ntargets;
- for (; tt < te && (t = *tt); tt++) {
- if (t->nout == t->maxout
- && t->maxout < t->nframes
- && (jiffies - t->lastwadj)/HZ > 10) {
- t->maxout++;
- t->lastwadj = jiffies;
- }
- }
-
- if (!list_empty(&flist)) { /* retransmissions necessary */
- n = d->rttavg <<= 1;
- if (n > MAXTIMER)
- d->rttavg = MAXTIMER;
- }
/* process expired frames */
while (!list_empty(&flist)) {
pos = flist.next;
f = list_entry(pos, struct frame, head);
- n = f->waited += timeout;
- n /= HZ;
- if (n > aoe_deadsecs) {
+ since = tsince_hr(f);
+ n = f->waited_total + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs
+ && n > aoe_deadsecs
+ && !(f->flags & FFL_PROBE)) {
/* Waited too long. Device failure.
* Hang all frames on first hash bucket for downdev
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
aoedev_downdev(d);
- break;
+ goto out;
}
- list_del(pos);
t = f->t;
- if (n > aoe_deadsecs/2)
- d->htgt = t; /* see if another target can help */
-
- if (t->nout == t->maxout) {
- if (t->maxout > 1)
- t->maxout--;
- t->lastwadj = jiffies;
+ n = f->waited + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs && utgts > 0
+ && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
+ scorn(t); /* avoid this target */
+
+ if (t->maxout != 1) {
+ t->ssthresh = t->maxout / 2;
+ t->maxout = 1;
}
- ifp = getif(t, f->skb->dev);
- if (ifp && ++ifp->lost > (t->nframes << 1)
- && (ifp != t->ifs || t->ifs[1].nd)) {
- ejectif(t, ifp);
- ifp = NULL;
+ if (f->flags & FFL_PROBE) {
+ t->nout_probes--;
+ } else {
+ ifp = getif(t, f->skb->dev);
+ if (ifp && ++ifp->lost > (t->nframes << 1)
+ && (ifp != t->ifs || t->ifs[1].nd)) {
+ ejectif(t, ifp);
+ ifp = NULL;
+ }
}
- resend(d, f);
+ list_move_tail(pos, &d->rexmitq);
+ t->nout--;
}
+ rexmit_deferred(d);
- if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
+out:
+ if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
d->blkq->request_fn(d->blkq);
}
@@ -774,8 +973,7 @@ nextbuf(struct aoedev *d)
void
aoecmd_work(struct aoedev *d)
{
- if (d->htgt && !sthtith(d))
- return;
+ rexmit_deferred(d);
while (aoecmd_ata_rw(d))
;
}
@@ -809,6 +1007,17 @@ aoecmd_sleepwork(struct work_struct *work)
}
static void
+ata_ident_fixstring(u16 *id, int ns)
+{
+ u16 s;
+
+ while (ns-- > 0) {
+ s = *id;
+ *id++ = s >> 8 | s << 8;
+ }
+}
+
+static void
ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
{
u64 ssize;
@@ -843,6 +1052,11 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
}
+ ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
+ ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
+ ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
+ memcpy(d->ident, id, sizeof(d->ident));
+
if (d->ssize != ssize)
printk(KERN_INFO
"aoe: %pm e%ld.%d v%04x has %llu sectors\n",
@@ -862,26 +1076,28 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
}
static void
-calc_rttavg(struct aoedev *d, int rtt)
+calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
{
register long n;
n = rtt;
- if (n < 0) {
- n = -rtt;
- if (n < MINTIMER)
- n = MINTIMER;
- else if (n > MAXTIMER)
- n = MAXTIMER;
- d->mintimer += (n - d->mintimer) >> 1;
- } else if (n < d->mintimer)
- n = d->mintimer;
- else if (n > MAXTIMER)
- n = MAXTIMER;
-
- /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
- n -= d->rttavg;
- d->rttavg += n >> 2;
+
+ /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
+ n -= d->rttavg >> RTTSCALE;
+ d->rttavg += n;
+ if (n < 0)
+ n = -n;
+ n -= d->rttdev >> RTTDSCALE;
+ d->rttdev += n;
+
+ if (!t || t->maxout >= t->nframes)
+ return;
+ if (t->maxout < t->ssthresh)
+ t->maxout += 1;
+ else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
+ t->maxout += 1;
+ t->next_cwnd = t->maxout;
+ }
}
static struct aoetgt *
@@ -890,7 +1106,7 @@ gettgt(struct aoedev *d, char *addr)
struct aoetgt **t, **e;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
return *t;
@@ -966,19 +1182,22 @@ ktiocomplete(struct frame *f)
struct aoeif *ifp;
struct aoedev *d;
long n;
+ int untainted;
if (f == NULL)
return;
t = f->t;
d = t->d;
+ skb = f->r_skb;
+ buf = f->buf;
+ if (f->flags & FFL_PROBE)
+ goto out;
+ if (!skb) /* just fail the buf. */
+ goto noskb;
hout = (struct aoe_hdr *) skb_mac_header(f->skb);
ahout = (struct aoe_atahdr *) (hout+1);
- buf = f->buf;
- skb = f->r_skb;
- if (skb == NULL)
- goto noskb; /* just fail the buf. */
hin = (struct aoe_hdr *) skb->data;
skb_pull(skb, sizeof(*hin));
@@ -988,9 +1207,9 @@ ktiocomplete(struct frame *f)
pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
-noskb: if (buf)
+noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
- goto badrsp;
+ goto out;
}
n = ahout->scnt << 9;
@@ -998,8 +1217,10 @@ noskb: if (buf)
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
if (skb->len < n) {
- pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
- skb->len, n);
+ pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
+ "aoe: runt data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ skb->len, n);
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
@@ -1010,13 +1231,13 @@ noskb: if (buf)
ifp = getif(t, skb->dev);
if (ifp)
ifp->lost = 0;
- if (d->htgt == t) /* I'll help myself, thank you. */
- d->htgt = NULL;
spin_unlock_irq(&d->lock);
break;
case ATA_CMD_ID_ATA:
if (skb->len < 512) {
- pr_info("aoe: runt data size in ataid. skb->len=%d\n",
+ pr_info("%s e%ld.%d. skb->len=%d need=512\n",
+ "aoe: runt data size in ataid from",
+ (long) d->aoemajor, d->aoeminor,
skb->len);
break;
}
@@ -1032,16 +1253,23 @@ noskb: if (buf)
be16_to_cpu(get_unaligned(&hin->major)),
hin->minor);
}
-badrsp:
+out:
spin_lock_irq(&d->lock);
+ if (t->taint > 0
+ && --t->taint > 0
+ && t->nout_probes == 0) {
+ count_targets(d, &untainted);
+ if (untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ }
aoe_freetframe(f);
if (buf && --buf->nframesout == 0 && buf->resid == 0)
aoe_end_buf(d, buf);
- aoecmd_work(d);
-
spin_unlock_irq(&d->lock);
aoedev_put(d);
dev_kfree_skb(skb);
@@ -1141,7 +1369,6 @@ aoecmd_ata_rsp(struct sk_buff *skb)
struct aoedev *d;
struct aoe_hdr *h;
struct frame *f;
- struct aoetgt *t;
u32 n;
ulong flags;
char ebuf[128];
@@ -1162,23 +1389,32 @@ aoecmd_ata_rsp(struct sk_buff *skb)
n = be32_to_cpu(get_unaligned(&h->tag));
f = getframe(d, n);
- if (f == NULL) {
- calc_rttavg(d, -tsince(n));
- spin_unlock_irqrestore(&d->lock, flags);
- aoedev_put(d);
- snprintf(ebuf, sizeof ebuf,
- "%15s e%d.%d tag=%08x@%08lx\n",
- "unexpected rsp",
- get_unaligned_be16(&h->major),
- h->minor,
- get_unaligned_be32(&h->tag),
- jiffies);
- aoechr_error(ebuf);
- return skb;
+ if (f) {
+ calc_rttavg(d, f->t, tsince_hr(f));
+ f->t->nout--;
+ if (f->flags & FFL_PROBE)
+ f->t->nout_probes--;
+ } else {
+ f = getframe_deferred(d, n);
+ if (f) {
+ calc_rttavg(d, NULL, tsince_hr(f));
+ } else {
+ calc_rttavg(d, NULL, tsince(n));
+ spin_unlock_irqrestore(&d->lock, flags);
+ aoedev_put(d);
+ snprintf(ebuf, sizeof(ebuf),
+ "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
+ "unexpected rsp",
+ get_unaligned_be16(&h->major),
+ h->minor,
+ get_unaligned_be32(&h->tag),
+ jiffies,
+ h->src,
+ h->dst);
+ aoechr_error(ebuf);
+ return skb;
+ }
}
- t = f->t;
- calc_rttavg(d, tsince(f->tag));
- t->nout--;
aoecmd_work(d);
spin_unlock_irqrestore(&d->lock, flags);
@@ -1201,7 +1437,7 @@ aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
aoenet_xmit(&queue);
}
-
+
struct sk_buff *
aoecmd_ata_id(struct aoedev *d)
{
@@ -1227,6 +1463,7 @@ aoecmd_ata_id(struct aoedev *d)
fhash(f);
t->nout++;
f->waited = 0;
+ f->waited_total = 0;
/* set up ata header */
ah->scnt = 1;
@@ -1235,41 +1472,69 @@ aoecmd_ata_id(struct aoedev *d)
skb->dev = t->ifp->nd;
- d->rttavg = MAXTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->timer.function = rexmit_timer;
- return skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ }
+
+ return skb;
}
-
+
+static struct aoetgt **
+grow_targets(struct aoedev *d)
+{
+ ulong oldn, newn;
+ struct aoetgt **tt;
+
+ oldn = d->ntargets;
+ newn = oldn * 2;
+ tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
+ if (!tt)
+ return NULL;
+ memmove(tt, d->targets, sizeof(*d->targets) * oldn);
+ d->tgt = tt + (d->tgt - d->targets);
+ kfree(d->targets);
+ d->targets = tt;
+ d->ntargets = newn;
+
+ return &d->targets[oldn];
+}
+
static struct aoetgt *
addtgt(struct aoedev *d, char *addr, ulong nframes)
{
struct aoetgt *t, **tt, **te;
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && *tt; tt++)
;
if (tt == te) {
- printk(KERN_INFO
- "aoe: device addtgt failure; too many targets\n");
- return NULL;
+ tt = grow_targets(d);
+ if (!tt)
+ goto nomem;
}
t = kzalloc(sizeof(*t), GFP_ATOMIC);
- if (!t) {
- printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
- return NULL;
- }
-
- d->ntargets++;
+ if (!t)
+ goto nomem;
t->nframes = nframes;
t->d = d;
memcpy(t->addr, addr, sizeof t->addr);
t->ifp = t->ifs;
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
+ t->maxout = t->nframes / 2;
INIT_LIST_HEAD(&t->ffree);
return *tt = t;
+
+ nomem:
+ pr_info("aoe: cannot allocate memory to add target\n");
+ return NULL;
}
static void
@@ -1279,7 +1544,7 @@ setdbcnt(struct aoedev *d)
int bcnt = 0;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (bcnt == 0 || bcnt > (*t)->minbcnt)
bcnt = (*t)->minbcnt;
@@ -1373,7 +1638,11 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
spin_lock_irqsave(&d->lock, flags);
t = gettgt(d, h->src);
- if (!t) {
+ if (t) {
+ t->nframes = n;
+ if (n < t->maxout)
+ aoecmd_wreset(t);
+ } else {
t = addtgt(d, h->src, n);
if (!t)
goto bail;
@@ -1402,17 +1671,26 @@ bail:
}
void
+aoecmd_wreset(struct aoetgt *t)
+{
+ t->maxout = 1;
+ t->ssthresh = t->nframes / 2;
+ t->next_cwnd = t->nframes;
+}
+
+void
aoecmd_cleanslate(struct aoedev *d)
{
struct aoetgt **t, **te;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->maxbcnt = 0;
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++)
- (*t)->maxout = (*t)->nframes;
+ aoecmd_wreset(*t);
}
void
@@ -1460,6 +1738,14 @@ aoe_flush_iocq(void)
int __init
aoecmd_init(void)
{
+ void *p;
+
+ /* get_zeroed_page returns page with ref count 1 */
+ p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ if (!p)
+ return -ENOMEM;
+ empty_page = virt_to_page(p);
+
INIT_LIST_HEAD(&iocq.head);
spin_lock_init(&iocq.lock);
init_waitqueue_head(&ktiowq);
@@ -1475,4 +1761,7 @@ aoecmd_exit(void)
{
aoe_ktstop(&kts);
aoe_flush_iocq();
+
+ free_page((unsigned long) page_address(empty_page));
+ empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 90e5b537f94b..98f2965778b9 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include "aoe.h"
static void dummy_timer(ulong);
-static void aoedev_freedev(struct aoedev *);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -69,25 +68,34 @@ minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
NPERSHELF = 16,
};
+ if (aoemin >= NPERSHELF) {
+ pr_err("aoe: %s %d slots per shelf\n",
+ "static minor device numbers support only",
+ NPERSHELF);
+ error = -1;
+ goto out;
+ }
+
n = aoemaj * NPERSHELF + aoemin;
- if (aoemin >= NPERSHELF || n >= N_DEVS) {
+ if (n >= N_DEVS) {
pr_err("aoe: %s with e%ld.%d\n",
"cannot use static minor device numbers",
aoemaj, aoemin);
error = -1;
- } else {
- spin_lock_irqsave(&used_minors_lock, flags);
- if (test_bit(n, used_minors)) {
- pr_err("aoe: %s %lu\n",
- "existing device already has static minor number",
- n);
- error = -1;
- } else
- set_bit(n, used_minors);
- spin_unlock_irqrestore(&used_minors_lock, flags);
+ goto out;
}
- *sysminor = n;
+ spin_lock_irqsave(&used_minors_lock, flags);
+ if (test_bit(n, used_minors)) {
+ pr_err("aoe: %s %lu\n",
+ "existing device already has static minor number",
+ n);
+ error = -1;
+ } else
+ set_bit(n, used_minors);
+ spin_unlock_irqrestore(&used_minors_lock, flags);
+ *sysminor = n * AOE_PARTITIONS;
+out:
return error;
}
@@ -170,41 +178,50 @@ aoe_failip(struct aoedev *d)
aoe_end_request(d, rq, 0);
}
+static void
+downdev_frame(struct list_head *pos)
+{
+ struct frame *f;
+
+ f = list_entry(pos, struct frame, head);
+ list_del(pos);
+ if (f->buf) {
+ f->buf->nframesout--;
+ aoe_failbuf(f->t->d, f->buf);
+ }
+ aoe_freetframe(f);
+}
+
void
aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
- struct frame *f;
struct list_head *head, *pos, *nx;
struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
- /* clean out active buffers */
+ /* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- list_del(pos);
- if (f->buf) {
- f->buf->nframesout--;
- aoe_failbuf(d, f->buf);
- }
- aoe_freetframe(f);
- }
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
}
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
+
/* reset window dressings */
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && (t = *tt); tt++) {
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
t->nout = 0;
}
/* clean out the in-process request (if any) */
aoe_failip(d);
- d->htgt = NULL;
/* fast fail all pending I/O */
if (d->blkq) {
@@ -218,12 +235,48 @@ aoedev_downdev(struct aoedev *d)
set_capacity(d->gd, 0);
}
+/* return whether the user asked for this particular
+ * device to be flushed
+ */
+static int
+user_req(char *s, size_t slen, struct aoedev *d)
+{
+ char *p;
+ size_t lim;
+
+ if (!d->gd)
+ return 0;
+ p = strrchr(d->gd->disk_name, '/');
+ if (!p)
+ p = d->gd->disk_name;
+ else
+ p += 1;
+ lim = sizeof(d->gd->disk_name);
+ lim -= p - d->gd->disk_name;
+ if (slen < lim)
+ lim = slen;
+
+ return !strncmp(s, p, lim);
+}
+
static void
-aoedev_freedev(struct aoedev *d)
+freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
+ int freeing = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ d->flags |= DEVFL_FREEING;
+ freeing = 1;
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (!freeing)
+ return;
- cancel_work_sync(&d->work);
+ del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
@@ -231,61 +284,113 @@ aoedev_freedev(struct aoedev *d)
blk_cleanup_queue(d->blkq);
}
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
if (d->bufpool)
mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
- kfree(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags |= DEVFL_FREED;
+ spin_unlock_irqrestore(&d->lock, flags);
}
-int
-aoedev_flush(const char __user *str, size_t cnt)
+enum flush_parms {
+ NOT_EXITING = 0,
+ EXITING = 1,
+};
+
+static int
+flush(const char __user *str, size_t cnt, int exiting)
{
ulong flags;
struct aoedev *d, **dd;
- struct aoedev *rmd = NULL;
char buf[16];
int all = 0;
+ int specified = 0; /* flush a specific device */
+ unsigned int skipflags;
+
+ skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
- if (cnt >= 3) {
+ if (!exiting && cnt >= 3) {
if (cnt > sizeof buf)
cnt = sizeof buf;
if (copy_from_user(buf, str, cnt))
return -EFAULT;
all = !strncmp(buf, "all", 3);
+ if (!all)
+ specified = 1;
}
+ flush_scheduled_work();
+ /* pass one: without sleeping, do aoedev_downdev */
spin_lock_irqsave(&devlist_lock, flags);
- dd = &devlist;
- while ((d = *dd)) {
+ for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
- if ((!all && (d->flags & DEVFL_UP))
- || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
+ if (exiting) {
+ /* unconditionally take each device down */
+ } else if (specified) {
+ if (!user_req(buf, cnt, d))
+ goto cont;
+ } else if ((!all && (d->flags & DEVFL_UP))
+ || d->flags & skipflags
|| d->nopen
- || d->ref) {
- spin_unlock(&d->lock);
- dd = &d->next;
- continue;
- }
- *dd = d->next;
+ || d->ref)
+ goto cont;
+
aoedev_downdev(d);
d->flags |= DEVFL_TKILL;
+cont:
spin_unlock(&d->lock);
- d->next = rmd;
- rmd = d;
}
spin_unlock_irqrestore(&devlist_lock, flags);
- while ((d = rmd)) {
- rmd = d->next;
- del_timer_sync(&d->timer);
- aoedev_freedev(d); /* must be able to sleep */
+
+ /* pass two: call freedev, which might sleep,
+ * for aoedevs marked with DEVFL_TKILL
+ */
+restart:
+ spin_lock_irqsave(&devlist_lock, flags);
+ for (d = devlist; d; d = d->next) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&devlist_lock, flags);
+ freedev(d);
+ goto restart;
+ }
+ spin_unlock(&d->lock);
}
+
+ /* pass three: remove aoedevs marked with DEVFL_FREED */
+ for (dd = &devlist, d = *dd; d; d = *dd) {
+ struct aoedev *doomed = NULL;
+
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_FREED) {
+ *dd = d->next;
+ doomed = d;
+ } else {
+ dd = &d->next;
+ }
+ spin_unlock(&d->lock);
+ if (doomed)
+ kfree(doomed->targets);
+ kfree(doomed);
+ }
+ spin_unlock_irqrestore(&devlist_lock, flags);
+
return 0;
}
+int
+aoedev_flush(const char __user *str, size_t cnt)
+{
+ return flush(str, cnt, NOT_EXITING);
+}
+
/* This has been confirmed to occur once with Tms=3*1000 due to the
* driver changing link and not processing its transmit ring. The
* problem is hard enough to solve by returning an error that I'm
@@ -332,13 +437,20 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
struct aoedev *d;
int i;
ulong flags;
- ulong sysminor;
+ ulong sysminor = 0;
spin_lock_irqsave(&devlist_lock, flags);
for (d=devlist; d; d=d->next)
if (d->aoemajor == maj && d->aoeminor == min) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL) {
+ spin_unlock(&d->lock);
+ d = NULL;
+ goto out;
+ }
d->ref++;
+ spin_unlock(&d->lock);
break;
}
if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
@@ -346,6 +458,13 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
if (!d)
goto out;
+ d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
+ if (!d->targets) {
+ kfree(d);
+ d = NULL;
+ goto out;
+ }
+ d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
@@ -359,10 +478,12 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ref = 1;
for (i = 0; i < NFACTIVE; i++)
INIT_LIST_HEAD(&d->factive[i]);
+ INIT_LIST_HEAD(&d->rexmitq);
d->sysminor = sysminor;
d->aoemajor = maj;
d->aoeminor = min;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->next = devlist;
devlist = d;
out:
@@ -396,21 +517,9 @@ freetgt(struct aoedev *d, struct aoetgt *t)
void
aoedev_exit(void)
{
- struct aoedev *d;
- ulong flags;
-
+ flush_scheduled_work();
aoe_flush_iocq();
- while ((d = devlist)) {
- devlist = d->next;
-
- spin_lock_irqsave(&d->lock, flags);
- aoedev_downdev(d);
- d->flags |= DEVFL_TKILL;
- spin_unlock_irqrestore(&d->lock, flags);
-
- del_timer_sync(&d->timer);
- aoedev_freedev(d);
- }
+ flush(NULL, 0, EXITING);
}
int __init
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 04793c2c701b..4b987c2fefbe 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -105,7 +105,7 @@ aoe_init(void)
aoechr_exit();
chr_fail:
aoedev_exit();
-
+
printk(KERN_INFO "aoe: initialisation failure.\n");
return ret;
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 162c6471275c..71d3ea8d3006 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -31,7 +31,7 @@ enum {
static char aoe_iflist[IFLISTSZ];
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
-MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"");
+MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
static wait_queue_head_t txwq;
static struct ktstate kts;
@@ -52,13 +52,18 @@ static struct sk_buff_head skbtxq;
/* enters with txlock held */
static int
-tx(void)
+tx(void) __must_hold(&txlock)
{
struct sk_buff *skb;
+ struct net_device *ifp;
while ((skb = skb_dequeue(&skbtxq))) {
spin_unlock_irq(&txlock);
- dev_queue_xmit(skb);
+ ifp = skb->dev;
+ if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
+ pr_warn("aoe: packet could not be sent on %s. %s\n",
+ ifp ? ifp->name : "netif",
+ "consider increasing tx_queue_len");
spin_lock_irq(&txlock);
}
return 0;
@@ -119,8 +124,8 @@ aoenet_xmit(struct sk_buff_head *queue)
}
}
-/*
- * (1) len doesn't include the header by default. I want this.
+/*
+ * (1) len doesn't include the header by default. I want this.
*/
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ca83f96756ad..ade58bc8f3c4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -41,8 +41,9 @@
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
@@ -180,8 +181,8 @@ static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
-static void __devinit cciss_interrupt_mode(ctlr_info_t *);
-static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
+static void cciss_interrupt_mode(ctlr_info_t *);
+static int cciss_enter_simple_mode(struct ctlr_info *h);
static void start_io(ctlr_info_t *h);
static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
@@ -198,14 +199,13 @@ static void cciss_device_release(struct device *dev);
static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
static inline u32 next_command(ctlr_info_t *h);
-static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
+static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
-static __devinit int write_driver_ver_to_cfgtable(
- CfgTable_struct __iomem *cfgtable);
+static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -549,7 +549,7 @@ static const struct file_operations cciss_proc_fops = {
.write = cciss_proc_write,
};
-static void __devinit cciss_procinit(ctlr_info_t *h)
+static void cciss_procinit(ctlr_info_t *h)
{
struct proc_dir_entry *pde;
@@ -978,8 +978,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h)
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
if (i == h->nr_cmds)
return NULL;
- } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ } while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
c = h->cmd_pool + i;
memset(c, 0, sizeof(CommandList_struct));
cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
@@ -1046,8 +1045,7 @@ static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
h->nr_frees++;
}
@@ -2664,8 +2662,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
return status;
}
-static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
- u8 reset_type)
+static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
CommandList_struct *c;
int return_status;
@@ -3920,7 +3918,7 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
+static void cciss_wait_for_mode_change_ack(ctlr_info_t *h)
{
int i;
@@ -3934,8 +3932,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
}
}
-static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
- u32 use_short_tags)
+static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags)
{
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
@@ -4001,7 +3998,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
" performant mode\n");
}
-static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
{
__u32 trans_support;
@@ -4063,7 +4060,7 @@ clean_up:
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
+static void cciss_interrupt_mode(ctlr_info_t *h)
{
#ifdef CONFIG_PCI_MSI
int err;
@@ -4109,7 +4106,7 @@ default_int_mode:
return;
}
-static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
@@ -4135,8 +4132,8 @@ static inline bool cciss_board_disabled(ctlr_info_t *h)
return ((command & PCI_COMMAND_MEMORY) == 0);
}
-static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
+static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
{
int i;
@@ -4152,8 +4149,8 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
+static int cciss_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
#define BOARD_READY 1
#define BOARD_NOT_READY 0
{
@@ -4180,9 +4177,9 @@ static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
+static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
@@ -4196,7 +4193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
return 0;
}
-static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
+static int cciss_find_cfgtables(ctlr_info_t *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4225,7 +4222,7 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
return 0;
}
-static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
+static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
@@ -4246,7 +4243,7 @@ static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
-static void __devinit cciss_find_board_params(ctlr_info_t *h)
+static void cciss_find_board_params(ctlr_info_t *h)
{
cciss_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
@@ -4268,10 +4265,7 @@ static void __devinit cciss_find_board_params(ctlr_info_t *h)
static inline bool CISS_signature_present(ctlr_info_t *h)
{
- if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
- (readb(&h->cfgtable->Signature[1]) != 'I') ||
- (readb(&h->cfgtable->Signature[2]) != 'S') ||
- (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
@@ -4308,7 +4302,7 @@ static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
}
-static int __devinit cciss_pci_init(ctlr_info_t *h)
+static int cciss_pci_init(ctlr_info_t *h)
{
int prod_index, err;
@@ -4428,7 +4422,8 @@ static void free_hba(ctlr_info_t *h)
}
/* Send a message CDB to the firmware. */
-static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type)
+static int cciss_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
{
typedef struct {
CommandListHeader_struct CommandHeader;
@@ -4575,14 +4570,13 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
return 0;
}
-static __devinit void init_driver_version(char *driver_version, int len)
+static void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
}
-static __devinit int write_driver_ver_to_cfgtable(
- CfgTable_struct __iomem *cfgtable)
+static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
@@ -4598,8 +4592,8 @@ static __devinit int write_driver_ver_to_cfgtable(
return 0;
}
-static __devinit void read_driver_ver_from_cfgtable(
- CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
+static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable,
+ unsigned char *driver_ver)
{
int i;
@@ -4607,8 +4601,7 @@ static __devinit void read_driver_ver_from_cfgtable(
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
-static __devinit int controller_reset_failed(
- CfgTable_struct __iomem *cfgtable)
+static int controller_reset_failed(CfgTable_struct __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
@@ -4631,7 +4624,7 @@ static __devinit int controller_reset_failed(
/* This does a hard reset of the controller using PCI power management
* states or using the doorbell register. */
-static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4776,7 +4769,7 @@ unmap_vaddr:
return rc;
}
-static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
+static int cciss_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
@@ -4810,10 +4803,9 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0;
}
-static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
+static int cciss_allocate_cmd_pool(ctlr_info_t *h)
{
- h->cmd_pool_bits = kmalloc(
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
+ h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
sizeof(unsigned long), GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(CommandList_struct),
@@ -4830,7 +4822,7 @@ static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
return 0;
}
-static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
+static int cciss_allocate_scatterlists(ctlr_info_t *h)
{
int i;
@@ -4898,7 +4890,7 @@ static int cciss_request_irq(ctlr_info_t *h,
return -1;
}
-static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
+static int cciss_kdump_soft_reset(ctlr_info_t *h)
{
if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
@@ -4957,8 +4949,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
* stealing all these major device numbers.
* returns the number of block devices registered.
*/
-static int __devinit cciss_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int i;
int j = 0;
@@ -5068,9 +5059,7 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
/* command and error info recs zeroed out before
they are used */
- memset(h->cmd_pool_bits, 0,
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
- * sizeof(unsigned long));
+ bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
h->num_luns = 0;
h->highest_lun = -1;
@@ -5214,7 +5203,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
free_irq(h->intr[h->intr_mode], h);
}
-static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
+static int cciss_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
@@ -5236,7 +5225,7 @@ static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
}
-static void __devexit cciss_remove_one(struct pci_dev *pdev)
+static void cciss_remove_one(struct pci_dev *pdev)
{
ctlr_info_t *h;
int i, j;
@@ -5315,7 +5304,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
static struct pci_driver cciss_pci_driver = {
.name = "cciss",
.probe = cciss_init_one,
- .remove = __devexit_p(cciss_remove_one),
+ .remove = cciss_remove_one,
.id_table = cciss_pci_device_id, /* id_table */
.shutdown = cciss_shutdown,
};
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 9125bbeacd4d..3f087133a25a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -320,7 +320,7 @@ static void release_io_mem(ctlr_info_t *c)
c->io_mem_length = 0;
}
-static void __devexit cpqarray_remove_one(int i)
+static void cpqarray_remove_one(int i)
{
int j;
char buff[4];
@@ -352,7 +352,7 @@ static void __devexit cpqarray_remove_one(int i)
free_hba(i);
}
-static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
+static void cpqarray_remove_one_pci(struct pci_dev *pdev)
{
int i;
ctlr_info_t *tmp_ptr;
@@ -377,7 +377,7 @@ static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
/* removing an instance that was not removed automatically..
* must be an eisa card.
*/
-static void __devexit cpqarray_remove_one_eisa (int i)
+static void cpqarray_remove_one_eisa(int i)
{
if (hba[i] == NULL) {
printk(KERN_ERR "cpqarray: controller %d appears to have"
@@ -388,7 +388,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
}
/* pdev is NULL for eisa */
-static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
+static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
{
struct request_queue *q;
int j;
@@ -505,8 +505,8 @@ Enomem4:
return -1;
}
-static int __devinit cpqarray_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cpqarray_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int i;
@@ -536,7 +536,7 @@ static int __devinit cpqarray_init_one( struct pci_dev *pdev,
static struct pci_driver cpqarray_pci_driver = {
.name = "cpqarray",
.probe = cpqarray_init_one,
- .remove = __devexit_p(cpqarray_remove_one_pci),
+ .remove = cpqarray_remove_one_pci,
.id_table = cpqarray_pci_device_id,
};
@@ -742,7 +742,7 @@ __setup("smart2=", cpqarray_setup);
/*
* Find an EISA controller's signature. Set up an hba if we find it.
*/
-static int __devinit cpqarray_eisa_detect(void)
+static int cpqarray_eisa_detect(void)
{
int i=0, j;
__u32 board_id;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index df0983787390..7845bd6ee414 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -2,13 +2,14 @@
# DRBD device driver configuration
#
-comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
- depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
+comment "DRBD disabled because PROC_FS or INET not selected"
+ depends on PROC_FS='n' || INET='n'
config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
- depends on PROC_FS && INET && CONNECTOR
+ depends on PROC_FS && INET
select LRU_CACHE
+ select LIBCRC32C
default n
help
@@ -58,7 +59,8 @@ config DRBD_FAULT_INJECTION
32 data read
64 read ahead
128 kmalloc of bitmap
- 256 allocation of EE (epoch_entries)
+ 256 allocation of peer_requests
+ 512 insert data corruption on receiving side
fault_devs: bitmask of minor numbers
fault_rate: frequency in percent
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index 0d3f337ff5ff..8b450338075e 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,5 +1,7 @@
drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
+drbd-y += drbd_interval.o drbd_state.o
+drbd-y += drbd_nla.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 3fbef018ce55..92510f8ad013 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -24,21 +24,73 @@
*/
#include <linux/slab.h>
+#include <linux/crc32c.h>
#include <linux/drbd.h>
+#include <linux/drbd_limits.h>
+#include <linux/dynamic_debug.h>
#include "drbd_int.h"
#include "drbd_wrappers.h"
-/* We maintain a trivial checksum in our on disk activity log.
- * With that we can ensure correct operation even when the storage
- * device might do a partial (last) sector write while losing power.
- */
-struct __packed al_transaction {
- u32 magic;
- u32 tr_number;
- struct __packed {
- u32 pos;
- u32 extent; } updates[1 + AL_EXTENTS_PT];
- u32 xor_sum;
+
+enum al_transaction_types {
+ AL_TR_UPDATE = 0,
+ AL_TR_INITIALIZED = 0xffff
+};
+/* all fields on disc in big endian */
+struct __packed al_transaction_on_disk {
+ /* don't we all like magic */
+ __be32 magic;
+
+ /* to identify the most recent transaction block
+ * in the on disk ring buffer */
+ __be32 tr_number;
+
+ /* checksum on the full 4k block, with this field set to 0. */
+ __be32 crc32c;
+
+ /* type of transaction, special transaction types like:
+ * purge-all, set-all-idle, set-all-active, ... to-be-defined
+ * see also enum al_transaction_types */
+ __be16 transaction_type;
+
+ /* we currently allow only a few thousand extents,
+ * so 16bit will be enough for the slot number. */
+
+ /* how many updates in this transaction */
+ __be16 n_updates;
+
+ /* maximum slot number, "al-extents" in drbd.conf speak.
+ * Having this in each transaction should make reconfiguration
+ * of that parameter easier. */
+ __be16 context_size;
+
+ /* slot number the context starts with */
+ __be16 context_start_slot_nr;
+
+ /* Some reserved bytes. Expected usage is a 64bit counter of
+ * sectors-written since device creation, and other data generation tag
+ * supporting usage */
+ __be32 __reserved[4];
+
+ /* --- 36 byte used --- */
+
+ /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
+ * in one transaction, then use the remaining byte in the 4k block for
+ * context information. "Flexible" number of updates per transaction
+ * does not help, as we have to account for the case when all update
+ * slots are used anyways, so it would only complicate code without
+ * additional benefit.
+ */
+ __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* but the extent number is 32bit, which at an extent size of 4 MiB
+ * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
+ __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* --- 420 bytes used (36 + 64*6) --- */
+
+ /* 4096 - 420 = 3676 = 919 * 4 */
+ __be32 context[AL_CONTEXT_PER_TRANSACTION];
};
struct update_odbm_work {
@@ -48,22 +100,11 @@ struct update_odbm_work {
struct update_al_work {
struct drbd_work w;
- struct lc_element *al_ext;
struct completion event;
- unsigned int enr;
- /* if old_enr != LC_FREE, write corresponding bitmap sector, too */
- unsigned int old_enr;
-};
-
-struct drbd_atodb_wait {
- atomic_t count;
- struct completion io_done;
- struct drbd_conf *mdev;
- int error;
+ int err;
};
-
-int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+static int al_write_transaction(struct drbd_conf *mdev);
void *drbd_md_get_buffer(struct drbd_conf *mdev)
{
@@ -82,22 +123,24 @@ void drbd_md_put_buffer(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
-static bool md_io_allowed(struct drbd_conf *mdev)
-{
- enum drbd_disk_state ds = mdev->state.disk;
- return ds >= D_NEGOTIATING || ds == D_ATTACHING;
-}
-
-void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
unsigned int *done)
{
- long dt = bdev->dc.disk_timeout * HZ / 10;
+ long dt;
+
+ rcu_read_lock();
+ dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
+ rcu_read_unlock();
+ dt = dt * HZ / 10;
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
- if (dt == 0)
+ dt = wait_event_timeout(mdev->misc_wait,
+ *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ }
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
@@ -106,7 +149,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
int rw, int size)
{
struct bio *bio;
- int ok;
+ int err;
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
@@ -118,8 +161,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
- ok = (bio_add_page(bio, page, size, 0) == size);
- if (!ok)
+ err = -EIO;
+ if (bio_add_page(bio, page, size, 0) != size)
goto out;
bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
@@ -127,7 +170,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
- ok = 0;
+ err = -ENODEV;
goto out;
}
@@ -137,86 +180,47 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
- ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
+ wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
+ if (bio_flagged(bio, BIO_UPTODATE))
+ err = mdev->md_io.error;
out:
bio_put(bio);
- return ok;
+ return err;
}
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
- int logical_block_size, mask, ok;
- int offset = 0;
+ int err;
struct page *iop = mdev->md_io_page;
D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
- logical_block_size = bdev_logical_block_size(bdev->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- /* in case logical_block_size != 512 [ s390 only? ] */
- if (logical_block_size != MD_SECTOR_SIZE) {
- mask = (logical_block_size / MD_SECTOR_SIZE) - 1;
- D_ASSERT(mask == 1 || mask == 3 || mask == 7);
- D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE);
- offset = sector & mask;
- sector = sector & ~mask;
- iop = mdev->md_io_tmpp;
-
- if (rw & WRITE) {
- /* these are GFP_KERNEL pages, pre-allocated
- * on device initialization */
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
- READ, logical_block_size);
-
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
- "READ [logical_block_size!=512]) failed!\n",
- (unsigned long long)sector);
- return 0;
- }
-
- memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
- }
- }
+ dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+ current->comm, current->pid, __func__,
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
if (sector < drbd_md_first_sector(bdev) ||
- sector > drbd_md_last_sector(bdev))
+ sector + 7 > drbd_md_last_sector(bdev))
dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size);
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- return 0;
- }
-
- if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
+ err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+ if (err) {
+ dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
}
-
- return ok;
+ return err;
}
static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
struct lc_element *tmp;
- unsigned long al_flags = 0;
int wake;
spin_lock_irq(&mdev->al_lock);
@@ -231,76 +235,92 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
return NULL;
}
}
- al_ext = lc_get(mdev->act_log, enr);
- al_flags = mdev->act_log->flags;
+ al_ext = lc_get(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
-
- /*
- if (!al_ext) {
- if (al_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
- if (al_flags & LC_DIRTY)
- dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
- }
- */
-
return al_ext;
}
-void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
- struct lc_element *al_ext;
- struct update_al_work al_work;
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
+ bool locked = false;
+
+ D_ASSERT(first <= last);
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
+ for (enr = first; enr <= last; enr++)
+ wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+
+ /* Serialize multiple transactions.
+ * This uses test_and_set_bit, memory barrier is implicit.
+ */
+ wait_event(mdev->al_wait,
+ mdev->act_log->pending_changes == 0 ||
+ (locked = lc_try_lock_for_transaction(mdev->act_log)));
- if (al_ext->lc_number != enr) {
+ if (locked) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
* recurses into generic_make_request(), which
* disallows recursion, bios being serialized on the
* current->bio_tail list now.
* we have to delegate updates to the activity log
* to the worker thread. */
- init_completion(&al_work.event);
- al_work.al_ext = al_ext;
- al_work.enr = enr;
- al_work.old_enr = al_ext->lc_number;
- al_work.w.cb = w_al_write_transaction;
- drbd_queue_work_front(&mdev->data.work, &al_work.w);
- wait_for_completion(&al_work.event);
-
- mdev->al_writ_cnt++;
-
- spin_lock_irq(&mdev->al_lock);
- lc_changed(mdev->act_log, al_ext);
- spin_unlock_irq(&mdev->al_lock);
+
+ /* Double check: it may have been committed by someone else,
+ * while we have been waiting for the lock. */
+ if (mdev->act_log->pending_changes) {
+ bool write_al_updates;
+
+ rcu_read_lock();
+ write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ rcu_read_unlock();
+
+ if (write_al_updates) {
+ al_write_transaction(mdev);
+ mdev->al_writ_cnt++;
+ }
+
+ spin_lock_irq(&mdev->al_lock);
+ /* FIXME
+ if (err)
+ we need an "lc_cancel" here;
+ */
+ lc_committed(mdev->act_log);
+ spin_unlock_irq(&mdev->al_lock);
+ }
+ lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
}
}
-void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
struct lc_element *extent;
unsigned long flags;
+ D_ASSERT(first <= last);
spin_lock_irqsave(&mdev->al_lock, flags);
- extent = lc_find(mdev->act_log, enr);
-
- if (!extent) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
- return;
+ for (enr = first; enr <= last; enr++) {
+ extent = lc_find(mdev->act_log, enr);
+ if (!extent) {
+ dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+ continue;
+ }
+ lc_put(mdev->act_log, extent);
}
-
- if (lc_put(mdev->act_log, extent) == 0)
- wake_up(&mdev->al_wait);
-
spin_unlock_irqrestore(&mdev->al_lock, flags);
+ wake_up(&mdev->al_wait);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -326,296 +346,148 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
return rs_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
- /* al extent number to bit */
+ /* resync extent number to bit */
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
-int
-w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int
+_al_write_transaction(struct drbd_conf *mdev)
{
- struct update_al_work *aw = container_of(w, struct update_al_work, w);
- struct lc_element *updated = aw->al_ext;
- const unsigned int new_enr = aw->enr;
- const unsigned int evicted = aw->old_enr;
- struct al_transaction *buffer;
+ struct al_transaction_on_disk *buffer;
+ struct lc_element *e;
sector_t sector;
- int i, n, mx;
- unsigned int extent_nr;
- u32 xor_sum = 0;
+ int i, mx;
+ unsigned extent_nr;
+ unsigned crc = 0;
+ int err = 0;
if (!get_ldev(mdev)) {
- dev_err(DEV,
- "disk is %s, cannot start al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
- return 1;
+ dev_err(DEV, "disk is %s, cannot start al transaction\n",
+ drbd_disk_str(mdev->state.disk));
+ return -EIO;
}
- /* do we have to do a bitmap write, first?
- * TODO reduce maximum latency:
- * submit both bios, then wait for both,
- * instead of doing two synchronous sector writes.
- * For now, we must not write the transaction,
- * if we cannot write out the bitmap of the evicted extent. */
- if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
- drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
/* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
- "disk is %s, cannot write al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
+ "disk is %s, cannot write al transaction\n",
+ drbd_disk_str(mdev->state.disk));
put_ldev(mdev);
- return 1;
+ return -EIO;
}
buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
if (!buffer) {
dev_err(DEV, "disk failed while waiting for md_io buffer\n");
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return -ENODEV;
}
- buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
- n = lc_index_of(mdev->act_log, updated);
+ i = 0;
+
+ /* Even though no one can start to change this list
+ * once we set the LC_LOCKED -- from drbd_al_begin_io(),
+ * lc_try_lock_for_transaction() --, someone may still
+ * be in the process of changing it. */
+ spin_lock_irq(&mdev->al_lock);
+ list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
+ if (i == AL_UPDATES_PER_TRANSACTION) {
+ i++;
+ break;
+ }
+ buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
+ buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
+ if (e->lc_number != LC_FREE)
+ drbd_bm_mark_for_writeout(mdev,
+ al_extent_to_bm_page(e->lc_number));
+ i++;
+ }
+ spin_unlock_irq(&mdev->al_lock);
+ BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
- buffer->updates[0].pos = cpu_to_be32(n);
- buffer->updates[0].extent = cpu_to_be32(new_enr);
+ buffer->n_updates = cpu_to_be16(i);
+ for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
+ buffer->update_slot_nr[i] = cpu_to_be16(-1);
+ buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
+ }
- xor_sum ^= new_enr;
+ buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
+ buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
- mx = min_t(int, AL_EXTENTS_PT,
+ mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
mdev->act_log->nr_elements - mdev->al_tr_cycle);
for (i = 0; i < mx; i++) {
unsigned idx = mdev->al_tr_cycle + i;
extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
- buffer->updates[i+1].pos = cpu_to_be32(idx);
- buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
- xor_sum ^= extent_nr;
- }
- for (; i < AL_EXTENTS_PT; i++) {
- buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
- buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
- xor_sum ^= LC_FREE;
+ buffer->context[i] = cpu_to_be32(extent_nr);
}
- mdev->al_tr_cycle += AL_EXTENTS_PT;
+ for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
+ buffer->context[i] = cpu_to_be32(LC_FREE);
+
+ mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
- buffer->xor_sum = cpu_to_be32(xor_sum);
-
sector = mdev->ldev->md.md_offset
- + mdev->ldev->md.al_offset + mdev->al_tr_pos;
-
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ + mdev->ldev->md.al_offset
+ + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
+ crc = crc32c(0, buffer, 4096);
+ buffer->crc32c = cpu_to_be32(crc);
- D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
- mdev->al_tr_number++;
+ if (drbd_bm_write_hinted(mdev))
+ err = -EIO;
+ /* drbd_chk_io_error done already */
+ else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ err = -EIO;
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ } else {
+ /* advance ringbuffer position and transaction counter */
+ mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
+ mdev->al_tr_number++;
+ }
drbd_md_put_buffer(mdev);
-
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return err;
}
-/**
- * drbd_al_read_tr() - Read a single transaction from the on disk activity log
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- * @b: pointer to an al_transaction.
- * @index: On disk slot of the transaction to read.
- *
- * Returns -1 on IO error, 0 on checksum error and 1 upon success.
- */
-static int drbd_al_read_tr(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev,
- struct al_transaction *b,
- int index)
-{
- sector_t sector;
- int rv, i;
- u32 xor_sum = 0;
-
- sector = bdev->md.md_offset + bdev->md.al_offset + index;
-
- /* Dont process error normally,
- * as this is done before disk is attached! */
- if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
- return -1;
-
- rv = (be32_to_cpu(b->magic) == DRBD_MAGIC);
-
- for (i = 0; i < AL_EXTENTS_PT + 1; i++)
- xor_sum ^= be32_to_cpu(b->updates[i].extent);
- rv &= (xor_sum == be32_to_cpu(b->xor_sum));
- return rv;
-}
-
-/**
- * drbd_al_read_log() - Restores the activity log from its on disk representation.
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- *
- * Returns 1 on success, returns 0 when reading the log failed due to IO errors.
- */
-int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int w_al_write_transaction(struct drbd_work *w, int unused)
{
- struct al_transaction *buffer;
- int i;
- int rv;
- int mx;
- int active_extents = 0;
- int transactions = 0;
- int found_valid = 0;
- int from = 0;
- int to = 0;
- u32 from_tnr = 0;
- u32 to_tnr = 0;
- u32 cnr;
-
- mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
-
- /* lock out all other meta data io for now,
- * and make sure the page is mapped.
- */
- buffer = drbd_md_get_buffer(mdev);
- if (!buffer)
- return 0;
-
- /* Find the valid transaction in the log */
- for (i = 0; i <= mx; i++) {
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- if (rv == 0)
- continue;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
- cnr = be32_to_cpu(buffer->tr_number);
-
- if (++found_valid == 1) {
- from = i;
- to = i;
- from_tnr = cnr;
- to_tnr = cnr;
- continue;
- }
- if ((int)cnr - (int)from_tnr < 0) {
- D_ASSERT(from_tnr - cnr + i - from == mx+1);
- from = i;
- from_tnr = cnr;
- }
- if ((int)cnr - (int)to_tnr > 0) {
- D_ASSERT(cnr - to_tnr == i - to);
- to = i;
- to_tnr = cnr;
- }
- }
-
- if (!found_valid) {
- dev_warn(DEV, "No usable activity log found.\n");
- drbd_md_put_buffer(mdev);
- return 1;
- }
-
- /* Read the valid transactions.
- * dev_info(DEV, "Reading from %d to %d.\n",from,to); */
- i = from;
- while (1) {
- int j, pos;
- unsigned int extent_nr;
- unsigned int trn;
-
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- ERR_IF(rv == 0) goto cancel;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
-
- trn = be32_to_cpu(buffer->tr_number);
-
- spin_lock_irq(&mdev->al_lock);
-
- /* This loop runs backwards because in the cyclic
- elements there might be an old version of the
- updated element (in slot 0). So the element in slot 0
- can overwrite old versions. */
- for (j = AL_EXTENTS_PT; j >= 0; j--) {
- pos = be32_to_cpu(buffer->updates[j].pos);
- extent_nr = be32_to_cpu(buffer->updates[j].extent);
-
- if (extent_nr == LC_FREE)
- continue;
-
- lc_set(mdev->act_log, extent_nr, pos);
- active_extents++;
- }
- spin_unlock_irq(&mdev->al_lock);
-
- transactions++;
-
-cancel:
- if (i == to)
- break;
- i++;
- if (i > mx)
- i = 0;
- }
-
- mdev->al_tr_number = to_tnr+1;
- mdev->al_tr_pos = to;
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
-
- /* ok, we are done with it */
- drbd_md_put_buffer(mdev);
+ struct update_al_work *aw = container_of(w, struct update_al_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
- dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
- transactions, active_extents);
+ err = _al_write_transaction(mdev);
+ aw->err = err;
+ complete(&aw->event);
- return 1;
+ return err != -EIO ? err : 0;
}
-/**
- * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
- * @mdev: DRBD device.
- */
-void drbd_al_apply_to_bm(struct drbd_conf *mdev)
+/* Calls from worker context (see w_restart_disk_io()) need to write the
+ transaction directly. Others came through generic_make_request(),
+ those need to delegate it to the worker. */
+static int al_write_transaction(struct drbd_conf *mdev)
{
- unsigned int enr;
- unsigned long add = 0;
- char ppb[10];
- int i, tmp;
-
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ struct update_al_work al_work;
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- enr = lc_element_by_index(mdev->act_log, i)->lc_number;
- if (enr == LC_FREE)
- continue;
- tmp = drbd_bm_ALe_set_all(mdev, enr);
- dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
- add += tmp;
- }
+ if (current == mdev->tconn->worker.task)
+ return _al_write_transaction(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
+ init_completion(&al_work.event);
+ al_work.w.cb = w_al_write_transaction;
+ al_work.w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ wait_for_completion(&al_work.event);
- dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
- ppsize(ppb, Bit2KB(add)));
+ return al_work.err;
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
@@ -645,7 +517,7 @@ void drbd_al_shrink(struct drbd_conf *mdev)
struct lc_element *al_ext;
int i;
- D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags));
+ D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(mdev->act_log, i);
@@ -657,15 +529,17 @@ void drbd_al_shrink(struct drbd_conf *mdev)
wake_up(&mdev->al_wait);
}
-static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
if (!get_ldev(mdev)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
- return 1;
+ return 0;
}
drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
@@ -683,9 +557,9 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
break;
}
}
- drbd_bcast_sync_progress(mdev);
+ drbd_bcast_event(mdev, &sib);
- return 1;
+ return 0;
}
@@ -755,7 +629,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
}
ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count;
- lc_changed(mdev->resync, &ext->lce);
+ /* we don't keep a persistent log of the resync lru,
+ * we can commit any change right away. */
+ lc_committed(mdev->resync);
}
lc_put(mdev->resync, &ext->lce);
/* no race, we are within the al_lock! */
@@ -767,7 +643,8 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
- drbd_queue_work_front(&mdev->data.work, &udw->w);
+ udw->w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
@@ -813,16 +690,22 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int wake_up = 0;
unsigned long flags;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
+
+ if (!get_ldev(mdev))
+ return; /* no disk, no metadata, no bitmap to clear bits in */
+
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ goto out;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
@@ -830,7 +713,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* round up start sector, round down end sector. we make sure we only
* clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
- return;
+ goto out;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
@@ -838,14 +721,14 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
if (sbnr > ebnr)
- return;
+ goto out;
/*
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
- if (count && get_ldev(mdev)) {
+ if (count) {
drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
drbd_try_clear_on_disk_bm(mdev, sector, count, true);
@@ -854,8 +737,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
- put_ldev(mdev);
}
+out:
+ put_ldev(mdev);
if (wake_up)
wake_up(&mdev->al_wait);
}
@@ -871,7 +755,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
- unsigned long sbnr, ebnr, lbnr, flags;
+ unsigned long sbnr, ebnr, flags;
sector_t esector, nr_sectors;
unsigned int enr, count = 0;
struct lc_element *e;
@@ -880,7 +764,7 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
if (size == 0)
return 0;
- if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
@@ -892,12 +776,10 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors)
+ if (!expect(sector < nr_sectors))
goto out;
- ERR_IF(esector >= nr_sectors)
- esector = (nr_sectors-1);
-
- lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
/* we set it out of sync,
* we do not need to round anything here */
@@ -940,7 +822,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
@@ -956,7 +838,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
}
return bm_ext;
@@ -964,26 +846,12 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{
- struct lc_element *al_ext;
- int rv = 0;
+ int rv;
spin_lock_irq(&mdev->al_lock);
- if (unlikely(enr == mdev->act_log->new_number))
- rv = 1;
- else {
- al_ext = lc_find(mdev->act_log, enr);
- if (al_ext) {
- if (al_ext->refcnt)
- rv = 1;
- }
- }
+ rv = lc_is_used(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
- /*
- if (unlikely(rv)) {
- dev_info(DEV, "Delaying sync read until app's write is done\n");
- }
- */
return rv;
}
@@ -1113,13 +981,13 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wake_up(&mdev->al_wait);
D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
@@ -1130,8 +998,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- if (unlikely(al_enr+i == mdev->act_log->new_number))
- goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i))
goto try_again;
}
@@ -1266,7 +1132,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -1274,8 +1140,10 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ return;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index d84566496746..8dc29502dc08 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -119,13 +119,9 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
if (!__ratelimit(&drbd_ratelimit_state))
return;
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- func, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ func, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
}
void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
@@ -142,13 +138,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- why, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ why, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
@@ -196,6 +188,9 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
/* to mark for lazy writeout once syncer cleared all clearable bits,
* we if bits have been cleared since last IO. */
#define BM_PAGE_LAZY_WRITEOUT 28
+/* pages marked with this "HINT" will be considered for writeout
+ * on activity log transactions */
+#define BM_PAGE_HINT_WRITEOUT 27
/* store_page_idx uses non-atomic assignment. It is only used directly after
* allocating the page. All other bm_set_page_* and bm_clear_page_* need to
@@ -227,8 +222,7 @@ static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
- clear_bit(BM_PAGE_IO_LOCK, addr);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
wake_up(&mdev->bitmap->bm_io_wait);
}
@@ -246,6 +240,27 @@ static void bm_set_page_need_writeout(struct page *page)
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
+/**
+ * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
+ * @mdev: DRBD device.
+ * @page_nr: the bitmap page to mark with the "hint" flag
+ *
+ * From within an activity log transaction, we mark a few pages with these
+ * hints, then call drbd_bm_write_hinted(), which will only write out changed
+ * pages which are flagged with this mark.
+ */
+void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
+{
+ struct page *page;
+ if (page_nr >= mdev->bitmap->bm_number_of_pages) {
+ dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
+ page_nr, (int)mdev->bitmap->bm_number_of_pages);
+ return;
+ }
+ page = mdev->bitmap->bm_pages[page_nr];
+ set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
+}
+
static int bm_test_page_unchanged(struct page *page)
{
volatile const unsigned long *addr = &page_private(page);
@@ -373,14 +388,16 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
return old_pages;
/* Trying kmalloc first, falling back to vmalloc.
- * GFP_KERNEL is ok, as this is done when a lower level disk is
- * "attached" to the drbd. Context is receiver thread or cqueue
- * thread. As we have no disk yet, we are not in the IO path,
- * not even the IO path of the peer. */
+ * GFP_NOIO, as this is called while drbd IO is "suspended",
+ * and during resize or attach on diskless Primary,
+ * we must not block on IO to ourselves.
+ * Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
- new_pages = kzalloc(bytes, GFP_KERNEL);
+ new_pages = kzalloc(bytes, GFP_NOIO);
if (!new_pages) {
- new_pages = vzalloc(bytes);
+ new_pages = __vmalloc(bytes,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
if (!new_pages)
return NULL;
vmalloced = 1;
@@ -390,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
for (; i < want; i++) {
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
bm_vk_free(new_pages, vmalloced);
@@ -439,7 +456,8 @@ int drbd_bm_init(struct drbd_conf *mdev)
sector_t drbd_bm_capacity(struct drbd_conf *mdev)
{
- ERR_IF(!mdev->bitmap) return 0;
+ if (!expect(mdev->bitmap))
+ return 0;
return mdev->bitmap->bm_dev_capacity;
}
@@ -447,7 +465,8 @@ sector_t drbd_bm_capacity(struct drbd_conf *mdev)
*/
void drbd_bm_cleanup(struct drbd_conf *mdev)
{
- ERR_IF (!mdev->bitmap) return;
+ if (!expect(mdev->bitmap))
+ return;
bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
kfree(mdev->bitmap);
@@ -610,7 +629,8 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
int err = 0, growing;
int opages_vmalloced;
- ERR_IF(!b) return -ENOMEM;
+ if (!expect(b))
+ return -ENOMEM;
drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
@@ -732,8 +752,10 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long s;
unsigned long flags;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
s = b->bm_set;
@@ -756,8 +778,10 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
size_t drbd_bm_words(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
return b->bm_words;
}
@@ -765,7 +789,8 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
unsigned long drbd_bm_bits(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
+ if (!expect(b))
+ return 0;
return b->bm_bits;
}
@@ -786,8 +811,10 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
if (number == 0)
return;
WARN_ON(offset >= b->bm_words);
@@ -831,8 +858,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
if ((offset >= b->bm_words) ||
@@ -860,8 +889,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
void drbd_bm_set_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0xff, b->bm_words);
@@ -874,8 +905,10 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
void drbd_bm_clear_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0, b->bm_words);
@@ -889,7 +922,8 @@ struct bm_aio_ctx {
unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
-#define BM_WRITE_ALL_PAGES 2
+#define BM_AIO_WRITE_HINTED 2
+#define BM_WRITE_ALL_PAGES 4
int error;
struct kref kref;
};
@@ -977,17 +1011,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- void *src, *dest;
page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
- dest = kmap_atomic(page);
- src = kmap_atomic(b->bm_pages[page_nr]);
- memcpy(dest, src, PAGE_SIZE);
- kunmap_atomic(src);
- kunmap_atomic(dest);
+ copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
-
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
@@ -1060,6 +1088,11 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
break;
if (rw & WRITE) {
+ if ((flags & BM_AIO_WRITE_HINTED) &&
+ !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
+ &page_private(b->bm_pages[i])))
+ continue;
+
if (!(flags & BM_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
@@ -1088,13 +1121,15 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
- dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
- rw == WRITE ? "WRITE" : "READ",
- count, jiffies - now);
+ /* summary for global bitmap IO */
+ if (flags == 0)
+ dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ rw == WRITE ? "WRITE" : "READ",
+ count, jiffies - now);
if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
@@ -1103,7 +1138,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
if (atomic_read(&ctx->in_flight))
- err = -EIO; /* Disk failed during IO... */
+ err = -EIO; /* Disk timeout/force-detach during IO... */
now = jiffies;
if (rw == WRITE) {
@@ -1115,8 +1150,9 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
now = b->bm_set;
- dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
- ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ if (flags == 0)
+ dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+ ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@@ -1179,9 +1215,17 @@ int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
+/**
+ * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
+ * @mdev: DRBD device.
+ */
+int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
+}
/**
- * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
+ * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
* @mdev: DRBD device.
* @idx: bitmap page index
*
@@ -1222,11 +1266,11 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
if (ctx->error)
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- /* that should force detach, so the in memory bitmap will be
+ /* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
@@ -1289,8 +1333,10 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
struct drbd_bitmap *b = mdev->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
- ERR_IF(!b) return i;
- ERR_IF(!b->bm_pages) return i;
+ if (!expect(b))
+ return i;
+ if (!expect(b->bm_pages))
+ return i;
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
@@ -1391,8 +1437,10 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
struct drbd_bitmap *b = mdev->bitmap;
int c = 0;
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
@@ -1423,13 +1471,21 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
+ int changed = 0;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
- b->bm_set += BITS_PER_LONG - bits;
+ changed += BITS_PER_LONG - bits;
}
kunmap_atomic(paddr);
+ if (changed) {
+ /* We only need lazy writeout, the information is still in the
+ * remote bitmap as well, and is reconstructed during the next
+ * bitmap exchange, if lost locally due to a crash. */
+ bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
+ b->bm_set += changed;
+ }
}
/* Same thing as drbd_bm_set_bits,
@@ -1524,8 +1580,10 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
unsigned long *p_addr;
int i;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1559,8 +1617,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* robust in case we screwed up elsewhere, in that case pretend there
* was one dirty bit in the requested area, so we won't try to do a
* local read there (no bitmap probably implies no disk) */
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 1;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 1;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1573,11 +1633,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
bm_unmap(p_addr);
p_addr = bm_map_pidx(b, idx);
}
- ERR_IF (bitnr >= b->bm_bits) {
- dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
- } else {
+ if (expect(bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
- }
+ else
+ dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
@@ -1607,8 +1666,10 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
unsigned long flags;
unsigned long *p_addr, *bm;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1630,47 +1691,3 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
}
-
-/* Set all bits covered by the AL-extent al_enr.
- * Returns number of bits changed. */
-unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
-{
- struct drbd_bitmap *b = mdev->bitmap;
- unsigned long *p_addr, *bm;
- unsigned long weight;
- unsigned long s, e;
- int count, i, do_now;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
-
- spin_lock_irq(&b->bm_lock);
- if (BM_DONT_SET & b->bm_flags)
- bm_print_lock_info(mdev);
- weight = b->bm_set;
-
- s = al_enr * BM_WORDS_PER_AL_EXT;
- e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
- /* assert that s and e are on the same page */
- D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
- == s >> (PAGE_SHIFT - LN2_BPL + 3));
- count = 0;
- if (s < b->bm_words) {
- i = do_now = e-s;
- p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
- bm = p_addr + MLPP(s);
- while (i--) {
- count += hweight_long(*bm);
- *bm = -1UL;
- bm++;
- }
- bm_unmap(p_addr);
- b->bm_set += do_now*BITS_PER_LONG - count;
- if (e == b->bm_words)
- b->bm_set -= bm_clear_surplus(b);
- } else {
- dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
- }
- weight = b->bm_set - weight;
- spin_unlock_irq(&b->bm_lock);
- return weight;
-}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b953cc7c9c00..6b51afa1aae1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -39,9 +39,13 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
+#include <linux/drbd_genl_api.h>
+#include <linux/drbd.h>
+#include "drbd_state.h"
#ifdef __CHECKER__
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
@@ -61,7 +65,6 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
-extern unsigned int cn_idx;
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@@ -86,34 +89,44 @@ extern char usermode_helper[];
*/
#define DRBD_SIGKILL SIGHUP
-/* All EEs on the free list should have ID_VACANT (== 0)
- * freshly allocated EEs get !ID_VACANT (== 1)
- * so if it says "cannot dereference null pointer at address 0x00000001",
- * it is most likely one of these :( */
-
#define ID_IN_SYNC (4711ULL)
#define ID_OUT_OF_SYNC (4712ULL)
-
#define ID_SYNCER (-1ULL)
-#define ID_VACANT 0
-#define is_syncer_block_id(id) ((id) == ID_SYNCER)
+
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
struct drbd_conf;
+struct drbd_tconn;
/* to shorten dev_warn(DEV, "msg"); and relatives statements */
#define DEV (disk_to_dev(mdev->vdisk))
+#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
+ printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
+#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
+#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
+#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
+#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
+#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
+#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
+#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
+
#define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
-#define ERR_IF(exp) if (({ \
- int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
- __func__, #exp, __FILE__, __LINE__); \
- _b; \
- }))
+/**
+ * expect - Make an assertion
+ *
+ * Unlike the assert macro, this macro returns a boolean result.
+ */
+#define expect(exp) ({ \
+ bool _bool = (exp); \
+ if (!_bool) \
+ dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
+ #exp, __func__); \
+ _bool; \
+ })
/* Defines to control fault insertion */
enum {
@@ -150,15 +163,12 @@ drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
/* usual integer division */
#define div_floor(A, B) ((A)/(B))
-/* drbd_meta-data.c (still in drbd_main.c) */
-/* 4th incarnation of the disk layout. */
-#define DRBD_MD_MAGIC (DRBD_MAGIC+4)
-
-extern struct drbd_conf **minor_table;
extern struct ratelimit_state drbd_ratelimit_state;
+extern struct idr minors; /* RCU, updates: genl_lock() */
+extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
/* on the wire */
-enum drbd_packets {
+enum drbd_packet {
/* receiver (data socket) */
P_DATA = 0x00,
P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
@@ -186,7 +196,7 @@ enum drbd_packets {
P_RECV_ACK = 0x15, /* Used in protocol B */
P_WRITE_ACK = 0x16, /* Used in protocol C */
P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
- P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
@@ -207,77 +217,23 @@ enum drbd_packets {
P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
+ P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
+ P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
+ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
+ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
- P_MAX_CMD = 0x2A,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
/* special command ids for handshake */
- P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */
- P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */
+ P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
+ P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
- P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */
+ P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
};
-static inline const char *cmdname(enum drbd_packets cmd)
-{
- /* THINK may need to become several global tables
- * when we want to support more than
- * one PRO_VERSION */
- static const char *cmdnames[] = {
- [P_DATA] = "Data",
- [P_DATA_REPLY] = "DataReply",
- [P_RS_DATA_REPLY] = "RSDataReply",
- [P_BARRIER] = "Barrier",
- [P_BITMAP] = "ReportBitMap",
- [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
- [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
- [P_UNPLUG_REMOTE] = "UnplugRemote",
- [P_DATA_REQUEST] = "DataRequest",
- [P_RS_DATA_REQUEST] = "RSDataRequest",
- [P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
- [P_PROTOCOL] = "ReportProtocol",
- [P_UUIDS] = "ReportUUIDs",
- [P_SIZES] = "ReportSizes",
- [P_STATE] = "ReportState",
- [P_SYNC_UUID] = "ReportSyncUUID",
- [P_AUTH_CHALLENGE] = "AuthChallenge",
- [P_AUTH_RESPONSE] = "AuthResponse",
- [P_PING] = "Ping",
- [P_PING_ACK] = "PingAck",
- [P_RECV_ACK] = "RecvAck",
- [P_WRITE_ACK] = "WriteAck",
- [P_RS_WRITE_ACK] = "RSWriteAck",
- [P_DISCARD_ACK] = "DiscardAck",
- [P_NEG_ACK] = "NegAck",
- [P_NEG_DREPLY] = "NegDReply",
- [P_NEG_RS_DREPLY] = "NegRSDReply",
- [P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
- [P_STATE_CHG_REPLY] = "StateChgReply",
- [P_OV_REQUEST] = "OVRequest",
- [P_OV_REPLY] = "OVReply",
- [P_OV_RESULT] = "OVResult",
- [P_CSUM_RS_REQUEST] = "CsumRSRequest",
- [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
- [P_COMPRESSED_BITMAP] = "CBitmap",
- [P_DELAY_PROBE] = "DelayProbe",
- [P_OUT_OF_SYNC] = "OutOfSync",
- [P_MAX_CMD] = NULL,
- };
-
- if (cmd == P_HAND_SHAKE_M)
- return "HandShakeM";
- if (cmd == P_HAND_SHAKE_S)
- return "HandShakeS";
- if (cmd == P_HAND_SHAKE)
- return "HandShake";
- if (cmd >= P_MAX_CMD)
- return "Unknown";
- return cmdnames[cmd];
-}
+extern const char *cmdname(enum drbd_packet cmd);
/* for sending/receiving the bitmap,
* possibly in some encoding scheme */
@@ -337,37 +293,24 @@ struct p_header80 {
u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
- u8 payload[0];
} __packed;
/* Header for big packets, Used for data packets exceeding 64kB */
struct p_header95 {
u16 magic; /* use DRBD_MAGIC_BIG here */
u16 command;
- u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */
- u8 payload[0];
+ u32 length;
} __packed;
-union p_header {
- struct p_header80 h80;
- struct p_header95 h95;
-};
-
-/*
- * short commands, packets without payload, plain p_header:
- * P_PING
- * P_PING_ACK
- * P_BECOME_SYNC_TARGET
- * P_BECOME_SYNC_SOURCE
- * P_UNPLUG_REMOTE
- */
+struct p_header100 {
+ u32 magic;
+ u16 volume;
+ u16 command;
+ u32 length;
+ u32 pad;
+} __packed;
-/*
- * commands with out-of-struct payload:
- * P_BITMAP (no additional fields)
- * P_DATA, P_DATA_REPLY (see p_data)
- * P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
- */
+extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
/* these defines must not be changed without changing the protocol version */
#define DP_HARDBARRIER 1 /* depricated */
@@ -377,9 +320,10 @@ union p_header {
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
+#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
struct p_data {
- union p_header head;
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
@@ -390,21 +334,18 @@ struct p_data {
* commands which share a struct:
* p_block_ack:
* P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
- * P_DISCARD_ACK (proto C, two-primaries conflict detection)
+ * P_SUPERSEDED (proto C, two-primaries conflict detection)
* p_block_req:
* P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
struct p_block_ack {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
u32 seq_num;
} __packed;
-
struct p_block_req {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
@@ -413,59 +354,52 @@ struct p_block_req {
/*
* commands with their own struct for additional fields:
- * P_HAND_SHAKE
+ * P_CONNECTION_FEATURES
* P_BARRIER
* P_BARRIER_ACK
* P_SYNC_PARAM
* ReportParams
*/
-struct p_handshake {
- struct p_header80 head; /* 8 bytes */
+struct p_connection_features {
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
/* should be more than enough for future enhancements
- * for now, feature_flags and the reserverd array shall be zero.
+ * for now, feature_flags and the reserved array shall be zero.
*/
u32 _pad;
- u64 reserverd[7];
+ u64 reserved[7];
} __packed;
-/* 80 bytes, FIXED for the next century */
struct p_barrier {
- struct p_header80 head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __packed;
struct p_barrier_ack {
- struct p_header80 head;
u32 barrier;
u32 set_size;
} __packed;
struct p_rs_param {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* Since protocol version 88 and higher. */
char verify_alg[0];
} __packed;
struct p_rs_param_89 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
} __packed;
struct p_rs_param_95 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
u32 c_plan_ahead;
@@ -475,12 +409,11 @@ struct p_rs_param_95 {
} __packed;
enum drbd_conn_flags {
- CF_WANT_LOSE = 1,
+ CF_DISCARD_MY_DATA = 1,
CF_DRY_RUN = 2,
};
struct p_protocol {
- struct p_header80 head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
@@ -494,17 +427,14 @@ struct p_protocol {
} __packed;
struct p_uuids {
- struct p_header80 head;
u64 uuid[UI_EXTENDED_SIZE];
} __packed;
struct p_rs_uuid {
- struct p_header80 head;
u64 uuid;
} __packed;
struct p_sizes {
- struct p_header80 head;
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
@@ -514,18 +444,15 @@ struct p_sizes {
} __packed;
struct p_state {
- struct p_header80 head;
u32 state;
} __packed;
struct p_req_state {
- struct p_header80 head;
u32 mask;
u32 val;
} __packed;
struct p_req_state_reply {
- struct p_header80 head;
u32 retcode;
} __packed;
@@ -539,15 +466,7 @@ struct p_drbd06_param {
u32 bit_map_gen[5];
} __packed;
-struct p_discard {
- struct p_header80 head;
- u64 block_id;
- u32 seq_num;
- u32 pad;
-} __packed;
-
struct p_block_desc {
- struct p_header80 head;
u64 sector;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
@@ -563,7 +482,6 @@ enum drbd_bitmap_code {
};
struct p_compressed_bm {
- struct p_header80 head;
/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
@@ -575,90 +493,22 @@ struct p_compressed_bm {
} __packed;
struct p_delay_probe93 {
- struct p_header80 head;
u32 seq_num; /* sequence number to match the two probe packets */
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
-/* DCBP: Drbd Compressed Bitmap Packet ... */
-static inline enum drbd_bitmap_code
-DCBP_get_code(struct p_compressed_bm *p)
-{
- return (enum drbd_bitmap_code)(p->encoding & 0x0f);
-}
-
-static inline void
-DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
-{
- BUG_ON(code & ~0xf);
- p->encoding = (p->encoding & ~0xf) | code;
-}
-
-static inline int
-DCBP_get_start(struct p_compressed_bm *p)
-{
- return (p->encoding & 0x80) != 0;
-}
-
-static inline void
-DCBP_set_start(struct p_compressed_bm *p, int set)
-{
- p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
-}
-
-static inline int
-DCBP_get_pad_bits(struct p_compressed_bm *p)
-{
- return (p->encoding >> 4) & 0x7;
-}
-
-static inline void
-DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
-{
- BUG_ON(n & ~0x7);
- p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
-}
-
-/* one bitmap packet, including the p_header,
- * should fit within one _architecture independend_ page.
- * so we need to use the fixed size 4KiB page size
- * most architectures have used for a long time.
+/*
+ * Bitmap packets need to fit within a single page on the sender and receiver,
+ * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
*/
-#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
-#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
-#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
-#if (PAGE_SIZE < 4096)
-/* drbd_send_bitmap / receive_bitmap would break horribly */
-#error "PAGE_SIZE too small"
-#endif
-
-union p_polymorph {
- union p_header header;
- struct p_handshake handshake;
- struct p_data data;
- struct p_block_ack block_ack;
- struct p_barrier barrier;
- struct p_barrier_ack barrier_ack;
- struct p_rs_param_89 rs_param_89;
- struct p_rs_param_95 rs_param_95;
- struct p_protocol protocol;
- struct p_sizes sizes;
- struct p_uuids uuids;
- struct p_state state;
- struct p_req_state req_state;
- struct p_req_state_reply req_state_reply;
- struct p_block_req block_req;
- struct p_delay_probe93 delay_probe93;
- struct p_rs_uuid rs_uuid;
- struct p_block_desc block_desc;
-} __packed;
+#define DRBD_SOCKET_BUFFER_SIZE 4096
/**********************************************************************/
enum drbd_thread_state {
- None,
- Running,
- Exiting,
- Restarting
+ NONE,
+ RUNNING,
+ EXITING,
+ RESTARTING
};
struct drbd_thread {
@@ -667,8 +517,9 @@ struct drbd_thread {
struct completion stop;
enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *);
- struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
int reset_cpu_mask;
+ char name[9];
};
static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
@@ -681,58 +532,54 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
return thi->t_state;
}
-struct drbd_work;
-typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
struct drbd_work {
struct list_head list;
- drbd_work_cb cb;
+ int (*cb)(struct drbd_work *, int cancel);
+ union {
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+ };
};
-struct drbd_tl_epoch;
+#include "drbd_interval.h"
+
+extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
+
struct drbd_request {
struct drbd_work w;
- struct drbd_conf *mdev;
/* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone,
* or, after local IO completion, the ERR_PTR(error).
- * see drbd_endio_pri(). */
+ * see drbd_request_endio(). */
struct bio *private_bio;
- struct hlist_node collision;
- sector_t sector;
- unsigned int size;
- unsigned int epoch; /* barrier_nr */
+ struct drbd_interval i;
- /* barrier_nr: used to check on "completion" whether this req was in
+ /* epoch: used to check on "completion" whether this req was in
* the current epoch, and we therefore have to close it,
- * starting a new epoch...
+ * causing a p_barrier packet to be send, starting a new epoch.
+ *
+ * This corresponds to "barrier" in struct p_barrier[_ack],
+ * and to "barrier_nr" in struct drbd_epoch (and various
+ * comments/function parameters/local variable names).
*/
+ unsigned int epoch;
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
- unsigned long rq_state; /* see comments above _req_mod() */
unsigned long start_time;
-};
-
-struct drbd_tl_epoch {
- struct drbd_work w;
- struct list_head requests; /* requests before */
- struct drbd_tl_epoch *next; /* pointer to the next barrier */
- unsigned int br_number; /* the barriers identifier. */
- int n_writes; /* number of requests attached before this barrier */
-};
-struct drbd_request;
+ /* once it hits 0, we may complete the master_bio */
+ atomic_t completion_ref;
+ /* once it hits 0, we may destroy this drbd_request object */
+ struct kref kref;
-/* These Tl_epoch_entries may be in one of 6 lists:
- active_ee .. data packet being written
- sync_ee .. syncer block being written
- done_ee .. block written, need to send P_WRITE_ACK
- read_ee .. [RS]P_DATA_REQUEST being read
-*/
+ unsigned rq_state; /* see comments above _req_mod() */
+};
struct drbd_epoch {
+ struct drbd_tconn *tconn;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
@@ -762,17 +609,14 @@ struct digest_info {
void *digest;
};
-struct drbd_epoch_entry {
+struct drbd_peer_request {
struct drbd_work w;
- struct hlist_node collision;
struct drbd_epoch *epoch; /* for writes */
- struct drbd_conf *mdev;
struct page *pages;
atomic_t pending_bios;
- unsigned int size;
+ struct drbd_interval i;
/* see comments on ee flag bits below */
unsigned long flags;
- sector_t sector;
union {
u64 block_id;
struct digest_info *digest;
@@ -793,31 +637,37 @@ enum {
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
- /* we may have several bios per epoch entry.
+ /* we may have several bios per peer request.
* if any of those fail, we set this flag atomically
* from the endio callback */
__EE_WAS_ERROR,
/* This ee has a pointer to a digest instead of a block id */
__EE_HAS_DIGEST,
+
+ /* Conflicting local requests need to be restarted after this request */
+ __EE_RESTART_REQUESTS,
+
+ /* The peer wants a write ACK for this (wire proto C) */
+ __EE_SEND_WRITE_ACK,
+
+ /* Is set when net_conf had two_primaries set while creating this peer_req */
+ __EE_IN_INTERVAL_TREE,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
+#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
+#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
+#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
-/* global flag bits */
+/* flag bits per mdev */
enum {
- CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
- SIGNAL_ASENDER, /* whether asender wants to be interrupted */
- SEND_PING, /* whether asender should send a ping asap */
-
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
- DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
- CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */
CL_ST_CHG_SUCCESS,
CL_ST_CHG_FAIL,
CRASHED_PRIMARY, /* This node was a crashed primary.
@@ -831,32 +681,18 @@ enum {
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
- WAS_IO_ERROR, /* Local disk failed returned IO error */
+ WAS_IO_ERROR, /* Local disk failed, returned IO error */
+ WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
- NET_CONGESTED, /* The data socket is congested */
-
- CONFIG_PENDING, /* serialization of (re)configuration requests.
- * if set, also prevents the device from dying */
- DEVICE_DYING, /* device became unconfigured,
- * but worker thread is still handling the cleanup.
- * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed,
- * while this is set. */
RESIZE_PENDING, /* Size change detected locally, waiting for the response from
* the peer, if it changed there as well. */
- CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
- GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
- STATE_SENT, /* Do not change state/UUIDs while this is set */
-
- CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
- * pending, from drbd worker context.
- * If set, bdi_write_congested() returns true,
- * so shrink_page_list() would not recurse into,
- * and potentially deadlock on, this drbd worker.
- */
+ B_RS_H_DONE, /* Before resync handler done (already executed) */
+ DISCARD_MY_DATA, /* discard_my_data flag per volume */
+ READ_BALANCE_RR,
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -894,24 +730,24 @@ enum bm_flag {
struct drbd_work_queue {
struct list_head q;
- struct semaphore s; /* producers up it, worker down()s it */
spinlock_t q_lock; /* to protect the list. */
+ wait_queue_head_t q_wait;
};
struct drbd_socket {
- struct drbd_work_queue work;
struct mutex mutex;
struct socket *socket;
/* this way we get our
* send/receive buffers off the stack */
- union p_polymorph sbuf;
- union p_polymorph rbuf;
+ void *sbuf;
+ void *rbuf;
};
struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
+ spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@@ -921,24 +757,16 @@ struct drbd_md {
s32 bm_offset; /* signed relative sector offset to bitmap */
/* u32 al_nr_extents; important for restoring the AL
- * is stored into sync_conf.al_extents, which in turn
+ * is stored into ldev->dc.al_extents, which in turn
* gets applied to act_log->nr_elements
*/
};
-/* for sync_conf and other types... */
-#define NL_PACKET(name, number, fields) struct name { fields };
-#define NL_INTEGER(pn,pr,member) int member;
-#define NL_INT64(pn,pr,member) __u64 member;
-#define NL_BIT(pn,pr,member) unsigned member:1;
-#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
-#include <linux/drbd_nl.h>
-
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
- struct disk_conf dc; /* The user provided config... */
+ struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@@ -962,18 +790,116 @@ enum write_ordering_e {
};
struct fifo_buffer {
- int *values;
unsigned int head_index;
unsigned int size;
+ int total; /* sum of all values */
+ int values[0];
+};
+extern struct fifo_buffer *fifo_alloc(int fifo_size);
+
+/* flag bits per tconn */
+enum {
+ NET_CONGESTED, /* The data socket is congested */
+ RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
+ SEND_PING, /* whether asender should send a ping asap */
+ SIGNAL_ASENDER, /* whether asender wants to be interrupted */
+ GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
+ CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
+ CONN_WD_ST_CHG_OKAY,
+ CONN_WD_ST_CHG_FAIL,
+ CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
+ CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
+ CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
+ * pending, from drbd worker context.
+ * If set, bdi_write_congested() returns true,
+ * so shrink_page_list() would not recurse into,
+ * and potentially deadlock on, this drbd worker.
+ */
+ DISCONNECT_SENT,
+};
+
+struct drbd_tconn { /* is a resource from the config file */
+ char *name; /* Resource name */
+ struct list_head all_tconn; /* linked on global drbd_tconns */
+ struct kref kref;
+ struct idr volumes; /* <tconn, vnr> to mdev mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ unsigned susp:1; /* IO suspended by user */
+ unsigned susp_nod:1; /* IO suspended because no data */
+ unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
+ struct mutex cstate_mutex; /* Protects graceful disconnects */
+
+ unsigned long flags;
+ struct net_conf *net_conf; /* content protected by rcu */
+ struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
+ struct res_opts res_opts;
+
+ struct sockaddr_storage my_addr;
+ int my_addr_len;
+ struct sockaddr_storage peer_addr;
+ int peer_addr_len;
+
+ struct drbd_socket data; /* data/barrier/cstate/parameter packets */
+ struct drbd_socket meta; /* ping/ack (metadata) packets */
+ int agreed_pro_version; /* actually used protocol version */
+ unsigned long last_received; /* in jiffies, either socket */
+ unsigned int ko_count;
+
+ spinlock_t req_lock;
+
+ struct list_head transfer_log; /* all requests not yet fully processed */
+
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
+ struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *verify_tfm;
+ void *int_dig_in;
+ void *int_dig_vv;
+
+ /* receiver side */
+ struct drbd_epoch *current_epoch;
+ spinlock_t epoch_lock;
+ unsigned int epochs;
+ enum write_ordering_e write_ordering;
+ atomic_t current_tle_nr; /* transfer log epoch number */
+ unsigned current_tle_writes; /* writes seen within this tl epoch */
+
+ unsigned long last_reconnect_jif;
+ struct drbd_thread receiver;
+ struct drbd_thread worker;
+ struct drbd_thread asender;
+ cpumask_var_t cpu_mask;
+
+ /* sender side */
+ struct drbd_work_queue sender_work;
+
+ struct {
+ /* whether this sender thread
+ * has processed a single write yet. */
+ bool seen_any_write_yet;
+
+ /* Which barrier number to send with the next P_BARRIER */
+ int current_epoch_nr;
+
+ /* how many write requests have been sent
+ * with req->epoch == current_epoch_nr.
+ * If none, no P_BARRIER will be sent. */
+ unsigned current_epoch_writes;
+ } send;
};
struct drbd_conf {
+ struct drbd_tconn *tconn;
+ int vnr; /* volume number within the connection */
+ struct kref kref;
+
/* things that are stored as / read from meta data on disk */
unsigned long flags;
/* configured by drbdsetup */
- struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
- struct syncer_conf sync_conf;
struct drbd_backing_dev *ldev __protected_by(local);
sector_t p_size; /* partner's disk size */
@@ -981,11 +907,7 @@ struct drbd_conf {
struct block_device *this_bdev;
struct gendisk *vdisk;
- struct drbd_socket data; /* data/barrier/cstate/parameter packets */
- struct drbd_socket meta; /* ping/ack (metadata) packets */
- int agreed_pro_version; /* actually used protocol version */
- unsigned long last_received; /* in jiffies, either socket */
- unsigned int ko_count;
+ unsigned long last_reattach_jif;
struct drbd_work resync_work,
unplug_work,
go_diskless,
@@ -1005,10 +927,9 @@ struct drbd_conf {
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
- union drbd_state state;
+ union drbd_dev_state state;
wait_queue_head_t misc_wait;
wait_queue_head_t state_wait; /* upon each state change. */
- wait_queue_head_t net_cnt_wait;
unsigned int send_cnt;
unsigned int recv_cnt;
unsigned int read_cnt;
@@ -1018,17 +939,12 @@ struct drbd_conf {
atomic_t ap_bio_cnt; /* Requests we need to complete */
atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
- atomic_t unacked_cnt; /* Need to send replys for */
+ atomic_t unacked_cnt; /* Need to send replies for */
atomic_t local_cnt; /* Waiting for local completion */
- atomic_t net_cnt; /* Users of net_conf */
- spinlock_t req_lock;
- struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
- struct drbd_tl_epoch *newest_tle;
- struct drbd_tl_epoch *oldest_tle;
- struct list_head out_of_sequence_requests;
- struct list_head barrier_acked_requests;
- struct hlist_head *tl_hash;
- unsigned int tl_hash_s;
+
+ /* Interval tree of pending local requests */
+ struct rb_root read_requests;
+ struct rb_root write_requests;
/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
unsigned long rs_total;
@@ -1048,9 +964,11 @@ struct drbd_conf {
unsigned long rs_mark_time[DRBD_SYNC_MARKS];
/* current index into rs_mark_{left,time} */
int rs_last_mark;
+ unsigned long rs_last_bcast; /* [unit jiffies] */
/* where does the admin want us to start? (sector) */
sector_t ov_start_sector;
+ sector_t ov_stop_sector;
/* where are we now? (sector) */
sector_t ov_position;
/* Start sector of out of sync range (to merge printk reporting). */
@@ -1058,14 +976,7 @@ struct drbd_conf {
/* size of out-of-sync range in sectors. */
sector_t ov_last_oos_size;
unsigned long ov_left; /* in bits */
- struct crypto_hash *csums_tfm;
- struct crypto_hash *verify_tfm;
- unsigned long last_reattach_jif;
- unsigned long last_reconnect_jif;
- struct drbd_thread receiver;
- struct drbd_thread worker;
- struct drbd_thread asender;
struct drbd_bitmap *bitmap;
unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
@@ -1078,29 +989,19 @@ struct drbd_conf {
int open_cnt;
u64 *p_uuid;
- struct drbd_epoch *current_epoch;
- spinlock_t epoch_lock;
- unsigned int epochs;
- enum write_ordering_e write_ordering;
+
struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
- struct list_head done_ee; /* send ack */
- struct list_head read_ee; /* IO in progress (any read) */
+ struct list_head done_ee; /* need to send P_WRITE_ACK */
+ struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
struct list_head net_ee; /* zero-copy network send in progress */
- struct hlist_head *ee_hash; /* is proteced by req_lock! */
- unsigned int ee_hash_s;
-
- /* this one is protected by ee_lock, single thread */
- struct drbd_epoch_entry *last_write_w_barrier;
int next_barrier_nr;
- struct hlist_head *app_reads_hash; /* is proteced by req_lock */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
- struct page *md_io_tmpp; /* for logical_block_size != 512 */
struct drbd_md_io md_io;
atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
@@ -1109,22 +1010,16 @@ struct drbd_conf {
unsigned int al_tr_number;
int al_tr_cycle;
int al_tr_pos; /* position of the next transaction in the journal */
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
- struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
- void *int_dig_out;
- void *int_dig_in;
- void *int_dig_vv;
wait_queue_head_t seq_wait;
atomic_t packet_seq;
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
unsigned long comm_bm_set; /* communicated number of set bits. */
- cpumask_var_t cpu_mask;
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
- struct mutex state_mutex;
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -1132,9 +1027,8 @@ struct drbd_conf {
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */
- struct fifo_buffer rs_plan_s; /* correction values of resync planer */
+ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
- int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
@@ -1142,11 +1036,7 @@ struct drbd_conf {
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
- struct drbd_conf *mdev;
-
- mdev = minor < minor_count ? minor_table[minor] : NULL;
-
- return mdev;
+ return (struct drbd_conf *)idr_find(&minors, minor);
}
static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
@@ -1154,29 +1044,9 @@ static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
return mdev->minor;
}
-/* returns 1 if it was successful,
- * returns 0 if there was no data socket.
- * so wherever you are going to use the data.socket, e.g. do
- * if (!drbd_get_data_sock(mdev))
- * return 0;
- * CODE();
- * drbd_put_data_sock(mdev);
- */
-static inline int drbd_get_data_sock(struct drbd_conf *mdev)
-{
- mutex_lock(&mdev->data.mutex);
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (unlikely(mdev->data.socket == NULL)) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
- return 1;
-}
-
-static inline void drbd_put_data_sock(struct drbd_conf *mdev)
+static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
{
- mutex_unlock(&mdev->data.mutex);
+ return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
}
/*
@@ -1185,106 +1055,77 @@ static inline void drbd_put_data_sock(struct drbd_conf *mdev)
/* drbd_main.c */
-enum chg_state_flags {
- CS_HARD = 1,
- CS_VERBOSE = 2,
- CS_WAIT_COMPLETE = 4,
- CS_SERIALIZE = 8,
- CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
-};
-
enum dds_flags {
DDSF_FORCED = 1,
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
-extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
- enum chg_state_flags f,
- union drbd_state mask,
- union drbd_state val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state,
- union drbd_state);
-extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
- union drbd_state,
- union drbd_state,
- enum chg_state_flags);
-extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
- enum chg_state_flags,
- struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state,
- union drbd_state, int);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
+extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
#ifdef CONFIG_SMP
-extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
-extern void drbd_calc_cpu_mask(struct drbd_conf *mdev);
+extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
+extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
#else
#define drbd_thread_current_set_cpu(A) ({})
#define drbd_calc_cpu_mask(A) ({})
#endif
-extern void drbd_free_resources(struct drbd_conf *mdev);
-extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
unsigned int set_size);
-extern void tl_clear(struct drbd_conf *mdev);
-extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
-extern void drbd_free_sock(struct drbd_conf *mdev);
-extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, unsigned msg_flags);
-extern int drbd_send_protocol(struct drbd_conf *mdev);
+extern void tl_clear(struct drbd_tconn *);
+extern void drbd_free_sock(struct drbd_tconn *tconn);
+extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+ void *buf, size_t size, unsigned msg_flags);
+extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
+ unsigned);
+
+extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
+extern int drbd_send_protocol(struct drbd_tconn *tconn);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
+extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
extern int drbd_send_current_state(struct drbd_conf *mdev);
-extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags);
-#define USE_DATA_SOCKET 1
-#define USE_META_SOCKET 0
-extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size);
-extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
- char *data, size_t size);
-extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
-extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
- u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
-extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp);
-extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+extern int drbd_send_sync_param(struct drbd_conf *mdev);
+extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
+ u32 set_size);
+extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
+extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp);
+extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size);
+extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id);
-extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
+extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
+extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
-extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector,int size,
- void *digest, int digest_size,
- enum drbd_packets cmd);
+extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
+ int size, void *digest, int digest_size,
+ enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
extern int drbd_send_bitmap(struct drbd_conf *mdev);
-extern int _drbd_send_bitmap(struct drbd_conf *mdev);
-extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
+extern void conn_md_sync(struct drbd_tconn *tconn);
extern void drbd_md_sync(struct drbd_conf *mdev);
extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
@@ -1302,33 +1143,52 @@ extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
extern int drbd_bitmap_io(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
char *why, enum bm_flag flags);
+extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
-
/* Meta data layout
We reserve a 128MB Block (4k aligned)
* either at the end of the backing device
* or on a separate meta data device. */
-#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
/* The following numbers are sectors */
-#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
-#define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */
-/* Allows up to about 3.8TB */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
-
-/* Since the smalles IO unit is usually 512 byte */
-#define MD_SECTOR_SHIFT 9
-#define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
-
-/* activity log */
-#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
-#define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */
+/* Allows up to about 3.8TB, so if you want more,
+ * you need to use the "flexible" meta data format. */
+#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
+#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
+#define MD_AL_SECTORS 64 /* = 32 kB on disk activity log ring buffer */
+#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
+
+/* we do all meta data IO in 4k blocks */
+#define MD_BLOCK_SHIFT 12
+#define MD_BLOCK_SIZE (1<<MD_BLOCK_SHIFT)
+
+/* One activity log extent represents 4M of storage */
+#define AL_EXTENT_SHIFT 22
#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
+/* We could make these currently hardcoded constants configurable
+ * variables at create-md time (or even re-configurable at runtime?).
+ * Which will require some more changes to the DRBD "super block"
+ * and attach code.
+ *
+ * updates per transaction:
+ * This many changes to the active set can be logged with one transaction.
+ * This number is arbitrary.
+ * context per transaction:
+ * This many context extent numbers are logged with each transaction.
+ * This number is resulting from the transaction block size (4k), the layout
+ * of the transaction header, and the number of updates per transaction.
+ * See drbd_actlog.c:struct al_transaction_on_disk
+ * */
+#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
+#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
+
#if BITS_PER_LONG == 32
#define LN2_BPL 5
#define cpu_to_lel(A) cpu_to_le32(A)
@@ -1364,11 +1224,14 @@ struct bm_extent {
#define SLEEP_TIME (HZ/10)
-#define BM_BLOCK_SHIFT 12 /* 4k per bit */
+/* We do bitmap IO in units of 4k blocks.
+ * We also still have a hardcoded 4k per bit relation. */
+#define BM_BLOCK_SHIFT 12 /* 4k per bit */
#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
-/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
- * per sector of on disk bitmap */
-#define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */
+/* mostly arbitrarily set the represented size of one bitmap extent,
+ * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
+ * at 4k per bit resolution) */
+#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
@@ -1436,17 +1299,20 @@ struct bm_extent {
#endif
#endif
-/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
- * With a value of 8 all IO in one 128K block make it to the same slot of the
- * hash table. */
-#define HT_SHIFT 8
-#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
+/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
+ * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+ * Since we may live in a mixed-platform cluster,
+ * we limit us to a platform agnostic constant here for now.
+ * A followup commit may allow even bigger BIO sizes,
+ * once we thought that through. */
+#define DRBD_MAX_BIO_SIZE (1U << 20)
+#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#endif
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
-#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
-
-/* Number of elements in the app_reads_hash */
-#define APP_R_HSIZE 15
+#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
+#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
extern int drbd_bm_init(struct drbd_conf *mdev);
extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
@@ -1468,11 +1334,11 @@ extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
+extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
-extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
- unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
@@ -1497,7 +1363,7 @@ extern void drbd_bm_unlock(struct drbd_conf *mdev);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
-extern struct kmem_cache *drbd_ee_cache; /* epoch entries */
+extern struct kmem_cache *drbd_ee_cache; /* peer requests */
extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
@@ -1537,12 +1403,22 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
-extern struct drbd_conf *drbd_new_device(unsigned int minor);
-extern void drbd_free_mdev(struct drbd_conf *mdev);
+extern int conn_lowest_minor(struct drbd_tconn *tconn);
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
+extern void drbd_minor_destroy(struct kref *kref);
+
+extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
+extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
+extern void conn_destroy(struct kref *kref);
+struct drbd_tconn *conn_get_by_name(const char *name);
+extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len);
+extern void conn_free_crypto(struct drbd_tconn *tconn);
extern int proc_details;
/* drbd_req */
+extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
@@ -1550,10 +1426,11 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
+extern int drbd_msg_put_info(const char *info);
extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
@@ -1561,13 +1438,14 @@ extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
enum drbd_role new_role,
int force);
-extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
-extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
+extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
+extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
-extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
+void drbd_resync_after_changed(struct drbd_conf *mdev);
extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
@@ -1576,13 +1454,13 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
- unsigned int *done);
-extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
+extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+ struct drbd_backing_dev *bdev, unsigned int *done);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
-static inline void ov_oos_print(struct drbd_conf *mdev)
+static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
{
if (mdev->ov_last_oos_size) {
dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
@@ -1594,97 +1472,102 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
+ struct drbd_peer_request *, void *);
/* worker callbacks */
-extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
-extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
-extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
-extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_data_req(struct drbd_work *, int);
+extern int w_e_end_rsdata_req(struct drbd_work *, int);
+extern int w_e_end_csum_rs_req(struct drbd_work *, int);
+extern int w_e_end_ov_reply(struct drbd_work *, int);
+extern int w_e_end_ov_req(struct drbd_work *, int);
+extern int w_ov_finished(struct drbd_work *, int);
+extern int w_resync_timer(struct drbd_work *, int);
+extern int w_send_write_hint(struct drbd_work *, int);
+extern int w_make_resync_request(struct drbd_work *, int);
+extern int w_send_dblock(struct drbd_work *, int);
+extern int w_send_read_req(struct drbd_work *, int);
+extern int w_prev_work_done(struct drbd_work *, int);
+extern int w_e_reissue(struct drbd_work *, int);
+extern int w_restart_disk_io(struct drbd_work *, int);
+extern int w_send_out_of_sync(struct drbd_work *, int);
+extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type);
-extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
-extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local);
-extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- int is_net);
-#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
-#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
-extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
-extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
+extern int drbd_submit_peer_request(struct drbd_conf *,
+ struct drbd_peer_request *, const unsigned,
+ const int);
+extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
+extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
+ sector_t, unsigned int,
+ gfp_t) __must_hold(local);
+extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+ int);
+#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
+#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void drbd_flush_workqueue(struct drbd_conf *mdev);
-extern void drbd_free_tl_hash(struct drbd_conf *mdev);
+extern void conn_flush_workqueue(struct drbd_tconn *tconn);
+extern int drbd_connected(struct drbd_conf *mdev);
+static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
+{
+ conn_flush_workqueue(mdev->tconn);
+}
-/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
- * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
+/* Yes, there is kernel_setsockopt, but only since 2.6.18.
+ * So we have our own copy of it here. */
static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char *optval, int optlen)
{
+ mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
int err;
+
+ uoptval = (char __user __force *)optval;
+
+ set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, optval, optlen);
+ err = sock_setsockopt(sock, level, optname, uoptval, optlen);
else
- err = sock->ops->setsockopt(sock, level, optname, optval,
+ err = sock->ops->setsockopt(sock, level, optname, uoptval,
optlen);
+ set_fs(oldfs);
return err;
}
static inline void drbd_tcp_cork(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_uncork(struct socket *sock)
{
- int __user val = 0;
+ int val = 0;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_nodelay(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_quickack(struct socket *sock)
{
- int __user val = 2;
+ int val = 2;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
@@ -1693,8 +1576,8 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1702,7 +1585,6 @@ extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
extern int drbd_rs_del_all(struct drbd_conf *mdev);
extern void drbd_rs_failed_io(struct drbd_conf *mdev,
sector_t sector, int size);
-extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
@@ -1712,73 +1594,24 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
extern void drbd_al_shrink(struct drbd_conf *mdev);
-
/* drbd_nl.c */
-
-void drbd_nl_cleanup(void);
-int __init drbd_nl_init(void);
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
-void drbd_bcast_sync_progress(struct drbd_conf *mdev);
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e);
-
-
-/**
- * DOC: DRBD State macros
- *
- * These macros are used to express state changes in easily readable form.
- *
- * The NS macros expand to a mask and a value, that can be bit ored onto the
- * current state as soon as the spinlock (req_lock) was taken.
- *
- * The _NS macros are used for state functions that get called with the
- * spinlock. These macros expand directly to the new state value.
- *
- * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
- * to express state changes that affect more than one aspect of the state.
- *
- * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
- * Means that the network connection was established and that the peer
- * is in secondary role.
- */
-#define role_MASK R_MASK
-#define peer_MASK R_MASK
-#define disk_MASK D_MASK
-#define pdsk_MASK D_MASK
-#define conn_MASK C_MASK
-#define susp_MASK 1
-#define user_isp_MASK 1
-#define aftr_isp_MASK 1
-#define susp_nod_MASK 1
-#define susp_fen_MASK 1
-
-#define NS(T, S) \
- ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T = (S); val; })
-#define NS2(T1, S1, T2, S2) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val; })
-#define NS3(T1, S1, T2, S2, T3, S3) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val.T3 = (S3); val; })
-
-#define _NS(D, T, S) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
-#define _NS2(D, T1, S1, T2, S2) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns; })
-#define _NS3(D, T1, S1, T2, S2, T3, S3) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+/* state info broadcast */
+struct sib_info {
+ enum drbd_state_info_bcast_reason sib_reason;
+ union {
+ struct {
+ char *helper_name;
+ unsigned helper_exit_code;
+ };
+ struct {
+ union drbd_state os;
+ union drbd_state ns;
+ };
+ };
+};
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
/*
* inline helper functions
@@ -1795,9 +1628,10 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+
+static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
page_chain_for_each(page) {
if (page_count(page) > 1)
return 1;
@@ -1805,18 +1639,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-static inline void drbd_state_lock(struct drbd_conf *mdev)
-{
- wait_event(mdev->misc_wait,
- !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
-}
-
-static inline void drbd_state_unlock(struct drbd_conf *mdev)
-{
- clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
- wake_up(&mdev->misc_wait);
-}
-
static inline enum drbd_state_rv
_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
@@ -1830,48 +1652,71 @@ _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
-/**
- * drbd_request_state() - Reqest a state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- *
- * This is the most graceful way of requesting a state change. It is verbose
- * quite verbose in case the state change is not possible, and all those
- * state changes are globally serialized.
- */
-static inline int drbd_request_state(struct drbd_conf *mdev,
- union drbd_state mask,
- union drbd_state val)
+static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
{
- return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+ union drbd_state rv;
+
+ rv.i = mdev->state.i;
+ rv.susp = mdev->tconn->susp;
+ rv.susp_nod = mdev->tconn->susp_nod;
+ rv.susp_fen = mdev->tconn->susp_fen;
+
+ return rv;
}
enum drbd_force_detach_flags {
- DRBD_IO_ERROR,
+ DRBD_READ_ERROR,
+ DRBD_WRITE_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
- enum drbd_force_detach_flags forcedetach,
+ enum drbd_force_detach_flags df,
const char *where)
{
- switch (mdev->ldev->dc.on_io_error) {
- case EP_PASS_ON:
- if (forcedetach == DRBD_IO_ERROR) {
+ enum drbd_io_error_p ep;
+
+ rcu_read_lock();
+ ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+ switch (ep) {
+ case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
+ if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through to detach case if forcedetach set */
+ /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
+ /* Remember whether we saw a READ or WRITE error.
+ *
+ * Recovery of the affected area for WRITE failure is covered
+ * by the activity log.
+ * READ errors may fall outside that area though. Certain READ
+ * errors can be "healed" by writing good data to the affected
+ * blocks, which triggers block re-allocation in lower layers.
+ *
+ * If we can not write the bitmap after a READ error,
+ * we may need to trigger a full sync (see w_go_diskless()).
+ *
+ * Force-detach is not really an IO error, but rather a
+ * desperate measure to try to deal with a completely
+ * unresponsive lower level IO stack.
+ * Still it should be treated as a WRITE error.
+ *
+ * Meta IO error is always WRITE error:
+ * we read meta data only once during attach,
+ * which will fail in case of errors.
+ */
set_bit(WAS_IO_ERROR, &mdev->flags);
- if (forcedetach == DRBD_FORCE_DETACH)
+ if (df == DRBD_READ_ERROR)
+ set_bit(WAS_READ_ERROR, &mdev->flags);
+ if (df == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
@@ -1896,9 +1741,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
}
}
@@ -1910,9 +1755,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
* BTW, for internal meta data, this happens to be the maximum capacity
* we could agree upon with our peer node.
*/
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1922,13 +1767,30 @@ static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
}
}
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+{
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ return _drbd_md_first_sector(meta_dev_idx, bdev);
+}
+
/**
* drbd_md_last_sector() - Return the last sector number of the meta data area
* @bdev: Meta data block device.
*/
static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + MD_AL_OFFSET - 1;
@@ -1956,12 +1818,18 @@ static inline sector_t drbd_get_capacity(struct block_device *bdev)
static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
{
sector_t s;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
s = drbd_get_capacity(bdev->backing_bdev)
? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
- drbd_md_first_sector(bdev))
+ _drbd_md_first_sector(meta_dev_idx, bdev))
: 0;
break;
case DRBD_MD_INDEX_FLEX_EXT:
@@ -1987,9 +1855,15 @@ static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
default: /* external, some index */
- return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
+ return MD_RESERVED_SECT * meta_dev_idx;
case DRBD_MD_INDEX_INTERNAL:
/* with drbd08, internal meta data is always "flexible" */
case DRBD_MD_INDEX_FLEX_INT:
@@ -2015,9 +1889,8 @@ drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
static inline void
@@ -2026,41 +1899,35 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add_tail(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
-static inline void wake_asender(struct drbd_conf *mdev)
-{
- if (test_bit(SIGNAL_ASENDER, &mdev->flags))
- force_sig(DRBD_SIG, mdev->asender.task);
-}
-
-static inline void request_ping(struct drbd_conf *mdev)
+static inline void wake_asender(struct drbd_tconn *tconn)
{
- set_bit(SEND_PING, &mdev->flags);
- wake_asender(mdev);
+ if (test_bit(SIGNAL_ASENDER, &tconn->flags))
+ force_sig(DRBD_SIG, tconn->asender.task);
}
-static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
- enum drbd_packets cmd)
+static inline void request_ping(struct drbd_tconn *tconn)
{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
+ set_bit(SEND_PING, &tconn->flags);
+ wake_asender(tconn);
}
-static inline int drbd_send_ping(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
-}
+extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
+extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
+extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
+extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
-static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
-}
+extern int drbd_send_ping(struct drbd_tconn *tconn);
+extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
+extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
+extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
@@ -2082,21 +1949,21 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* or implicit barrier packets as necessary.
* increased:
* w_send_barrier
- * _req_mod(req, queue_for_net_write or queue_for_net_read);
+ * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the
* worker, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error)
* decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier)
- * _req_mod(req, data_received)
+ * _req_mod(req, DATA_RECEIVED)
* [from receive_DataReply]
- * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
+ * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
* [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params.
* we should try to remember the reason for that...
- * _req_mod(req, send_failed or send_canceled)
- * _req_mod(req, connection_lost_while_pending)
+ * _req_mod(req, SEND_FAILED or SEND_CANCELED)
+ * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
static inline void inc_ap_pending(struct drbd_conf *mdev)
@@ -2104,17 +1971,19 @@ static inline void inc_ap_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->ap_pending_cnt);
}
-#define ERR_IF_CNT_IS_NEGATIVE(which) \
- if (atomic_read(&mdev->which) < 0) \
+#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
+ if (atomic_read(&mdev->which) < 0) \
dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
- __func__ , __LINE__ , \
- atomic_read(&mdev->which))
+ func, line, \
+ atomic_read(&mdev->which))
-#define dec_ap_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
- wake_up(&mdev->misc_wait); \
- ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
+#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ if (atomic_dec_and_test(&mdev->ap_pending_cnt))
+ wake_up(&mdev->misc_wait);
+ ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
+}
/* counts how many resync-related answers we still expect from the peer
* increase decrease
@@ -2127,10 +1996,12 @@ static inline void inc_rs_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->rs_pending_cnt);
}
-#define dec_rs_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->rs_pending_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
+#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ atomic_dec(&mdev->rs_pending_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
+}
/* counts how many answers we still need to send to the peer.
* increased on
@@ -2146,38 +2017,18 @@ static inline void inc_unacked(struct drbd_conf *mdev)
atomic_inc(&mdev->unacked_cnt);
}
-#define dec_unacked(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-#define sub_unacked(mdev, n) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_sub(n, &mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-
-static inline void put_net_conf(struct drbd_conf *mdev)
+#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
{
- if (atomic_dec_and_test(&mdev->net_cnt))
- wake_up(&mdev->net_cnt_wait);
+ atomic_dec(&mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
-/**
- * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there
- * @mdev: DRBD device.
- *
- * You have to call put_net_conf() when finished working with mdev->net_conf.
- */
-static inline int get_net_conf(struct drbd_conf *mdev)
+#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
+static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
{
- int have_net_conf;
-
- atomic_inc(&mdev->net_cnt);
- have_net_conf = mdev->state.conn >= C_UNCONNECTED;
- if (!have_net_conf)
- put_net_conf(mdev);
- return have_net_conf;
+ atomic_sub(n, &mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
@@ -2281,17 +2132,20 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* maybe re-implement using semaphores? */
static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
{
- int mxb = 1000000; /* arbitrary limit on open requests */
- if (get_net_conf(mdev)) {
- mxb = mdev->net_conf->max_buffers;
- put_net_conf(mdev);
- }
+ struct net_conf *nc;
+ int mxb;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
+ rcu_read_unlock();
+
return mxb;
}
static inline int drbd_state_is_stable(struct drbd_conf *mdev)
{
- union drbd_state s = mdev->state;
+ union drbd_dev_state s = mdev->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2325,7 +2179,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
- if (mdev->agreed_pro_version < 96)
+ if (mdev->tconn->agreed_pro_version < 96)
return 0;
break;
@@ -2347,7 +2201,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* disk state is stable as well. */
break;
- /* no new io accepted during tansitional states */
+ /* no new io accepted during transitional states */
case D_ATTACHING:
case D_NEGOTIATING:
case D_UNKNOWN:
@@ -2359,16 +2213,18 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
return 1;
}
-static inline int is_susp(union drbd_state s)
+static inline int drbd_suspended(struct drbd_conf *mdev)
{
- return s.susp || s.susp_nod || s.susp_fen;
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ return tconn->susp || tconn->susp_fen || tconn->susp_nod;
}
static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
{
int mxb = drbd_get_max_buffers(mdev);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
return false;
@@ -2390,30 +2246,30 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
return true;
}
-static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
+static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
{
bool rv = false;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev);
if (rv)
- atomic_add(count, &mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->req_lock);
+ atomic_inc(&mdev->ap_bio_cnt);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
-static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+static inline void inc_ap_bio(struct drbd_conf *mdev)
{
/* we wait here
* as long as the device is suspended
* until the bitmap is no longer on the fly during connection
- * handshake as long as we would exeed the max_buffer limit.
+ * handshake as long as we would exceed the max_buffer limit.
*
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
+ wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
}
static inline void dec_ap_bio(struct drbd_conf *mdev)
@@ -2425,7 +2281,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
@@ -2435,6 +2291,12 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
+static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
+{
+ return mdev->tconn->agreed_pro_version >= 97 &&
+ mdev->tconn->agreed_pro_version != 100;
+}
+
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
{
int changed = mdev->ed_uuid != val;
@@ -2442,40 +2304,6 @@ static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
return changed;
}
-static inline int seq_cmp(u32 a, u32 b)
-{
- /* we assume wrap around at 32bit.
- * for wrap around at 24bit (old atomic_t),
- * we'd have to
- * a <<= 8; b <<= 8;
- */
- return (s32)(a) - (s32)(b);
-}
-#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
-#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
-#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
-#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
-/* CAUTION: please no side effects in arguments! */
-#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
-
-static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
-{
- unsigned int m;
- spin_lock(&mdev->peer_seq_lock);
- m = seq_max(mdev->peer_seq, new_seq);
- mdev->peer_seq = m;
- spin_unlock(&mdev->peer_seq_lock);
- if (m == new_seq)
- wake_up(&mdev->seq_wait);
-}
-
-static inline void drbd_update_congested(struct drbd_conf *mdev)
-{
- struct sock *sk = mdev->data.socket->sk;
- if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &mdev->flags);
-}
-
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
{
/* sorry, we currently have no working implementation
@@ -2490,10 +2318,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
+ if (mdev->ldev == NULL) {
+ dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
+ return;
+ }
+
if (test_bit(MD_NO_FUA, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
new file mode 100644
index 000000000000..89c497c630b4
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.c
@@ -0,0 +1,207 @@
+#include <asm/bug.h>
+#include <linux/rbtree_augmented.h>
+#include "drbd_interval.h"
+
+/**
+ * interval_end - return end of @node
+ */
+static inline
+sector_t interval_end(struct rb_node *node)
+{
+ struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
+ return this->end;
+}
+
+/**
+ * compute_subtree_last - compute end of @node
+ *
+ * The end of an interval is the highest (start + (size >> 9)) value of this
+ * node and of its children. Called for @node and its parents whenever the end
+ * may have changed.
+ */
+static inline sector_t
+compute_subtree_last(struct drbd_interval *node)
+{
+ sector_t max = node->sector + (node->size >> 9);
+
+ if (node->rb.rb_left) {
+ sector_t left = interval_end(node->rb.rb_left);
+ if (left > max)
+ max = left;
+ }
+ if (node->rb.rb_right) {
+ sector_t right = interval_end(node->rb.rb_right);
+ if (right > max)
+ max = right;
+ }
+ return max;
+}
+
+static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
+{
+ while (rb != stop) {
+ struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb);
+ sector_t subtree_last = compute_subtree_last(node);
+ if (node->end == subtree_last)
+ break;
+ node->end = subtree_last;
+ rb = rb_parent(&node->rb);
+ }
+}
+
+static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+}
+
+static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+ old->end = compute_subtree_last(old);
+}
+
+static const struct rb_augment_callbacks augment_callbacks = {
+ augment_propagate,
+ augment_copy,
+ augment_rotate,
+};
+
+/**
+ * drbd_insert_interval - insert a new interval into a tree
+ */
+bool
+drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+
+ BUG_ON(!IS_ALIGNED(this->size, 512));
+
+ while (*new) {
+ struct drbd_interval *here =
+ rb_entry(*new, struct drbd_interval, rb);
+
+ parent = *new;
+ if (this->sector < here->sector)
+ new = &(*new)->rb_left;
+ else if (this->sector > here->sector)
+ new = &(*new)->rb_right;
+ else if (this < here)
+ new = &(*new)->rb_left;
+ else if (this > here)
+ new = &(*new)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&this->rb, parent, new);
+ rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ return true;
+}
+
+/**
+ * drbd_contains_interval - check if a tree contains a given interval
+ * @sector: start sector of @interval
+ * @interval: may not be a valid pointer
+ *
+ * Returns if the tree contains the node @interval with start sector @start.
+ * Does not dereference @interval until @interval is known to be a valid object
+ * in @tree. Returns %false if @interval is in the tree but with a different
+ * sector number.
+ */
+bool
+drbd_contains_interval(struct rb_root *root, sector_t sector,
+ struct drbd_interval *interval)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (sector < here->sector)
+ node = node->rb_left;
+ else if (sector > here->sector)
+ node = node->rb_right;
+ else if (interval < here)
+ node = node->rb_left;
+ else if (interval > here)
+ node = node->rb_right;
+ else
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drbd_remove_interval - remove an interval from a tree
+ */
+void
+drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ rb_erase_augmented(&this->rb, root, &augment_callbacks);
+}
+
+/**
+ * drbd_find_overlap - search for an interval overlapping with [sector, sector + size)
+ * @sector: start sector
+ * @size: size, aligned to 512 bytes
+ *
+ * Returns an interval overlapping with [sector, sector + size), or NULL if
+ * there is none. When there is more than one overlapping interval in the
+ * tree, the interval with the lowest start sector is returned, and all other
+ * overlapping intervals will be on the right side of the tree, reachable with
+ * rb_next().
+ */
+struct drbd_interval *
+drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size)
+{
+ struct rb_node *node = root->rb_node;
+ struct drbd_interval *overlap = NULL;
+ sector_t end = sector + (size >> 9);
+
+ BUG_ON(!IS_ALIGNED(size, 512));
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (node->rb_left &&
+ sector < interval_end(node->rb_left)) {
+ /* Overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (here->sector < end &&
+ sector < here->sector + (here->size >> 9)) {
+ overlap = here;
+ break;
+ } else if (sector >= here->sector) {
+ /* Overlap if any must be on right side */
+ node = node->rb_right;
+ } else
+ break;
+ }
+ return overlap;
+}
+
+struct drbd_interval *
+drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size)
+{
+ sector_t end = sector + (size >> 9);
+ struct rb_node *node;
+
+ for (;;) {
+ node = rb_next(&i->rb);
+ if (!node)
+ return NULL;
+ i = rb_entry(node, struct drbd_interval, rb);
+ if (i->sector >= end)
+ return NULL;
+ if (sector < i->sector + (i->size >> 9))
+ return i;
+ }
+}
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
new file mode 100644
index 000000000000..f38fcb00c10d
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.h
@@ -0,0 +1,40 @@
+#ifndef __DRBD_INTERVAL_H
+#define __DRBD_INTERVAL_H
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+
+struct drbd_interval {
+ struct rb_node rb;
+ sector_t sector; /* start sector of the interval */
+ unsigned int size; /* size in bytes */
+ sector_t end; /* highest interval end in subtree */
+ int local:1 /* local or remote request? */;
+ int waiting:1;
+};
+
+static inline void drbd_clear_interval(struct drbd_interval *i)
+{
+ RB_CLEAR_NODE(&i->rb);
+}
+
+static inline bool drbd_interval_empty(struct drbd_interval *i)
+{
+ return RB_EMPTY_NODE(&i->rb);
+}
+
+extern bool drbd_insert_interval(struct rb_root *, struct drbd_interval *);
+extern bool drbd_contains_interval(struct rb_root *, sector_t,
+ struct drbd_interval *);
+extern void drbd_remove_interval(struct rb_root *, struct drbd_interval *);
+extern struct drbd_interval *drbd_find_overlap(struct rb_root *, sector_t,
+ unsigned int);
+extern struct drbd_interval *drbd_next_overlap(struct drbd_interval *, sector_t,
+ unsigned int);
+
+#define drbd_for_each_overlap(i, root, sector, size) \
+ for (i = drbd_find_overlap(root, sector, size); \
+ i; \
+ i = drbd_next_overlap(i, sector, size))
+
+#endif /* __DRBD_INTERVAL_H */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f55683ad4ffa..8c13eeb83c53 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -56,14 +56,6 @@
#include "drbd_vli.h"
-struct after_state_chg_work {
- struct drbd_work w;
- union drbd_state os;
- union drbd_state ns;
- enum chg_state_flags flags;
- struct completion *done;
-};
-
static DEFINE_MUTEX(drbd_main_mutex);
int drbdd_init(struct drbd_thread *);
int drbd_worker(struct drbd_thread *);
@@ -72,21 +64,17 @@ int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static int drbd_release(struct gendisk *gd, fmode_t mode);
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags);
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static int w_md_sync(struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void _tl_clear(struct drbd_conf *mdev);
+static int w_bitmap_io(struct drbd_work *w, int unused);
+static int w_go_diskless(struct drbd_work *w, int unused);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
MODULE_VERSION(REL_VERSION);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
+MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
__stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
@@ -98,7 +86,6 @@ MODULE_PARM_DESC(allow_oos, "DONT USE!");
module_param(minor_count, uint, 0444);
module_param(disable_sendpage, bool, 0644);
module_param(allow_oos, bool, 0);
-module_param(cn_idx, uint, 0444);
module_param(proc_details, int, 0644);
#ifdef CONFIG_DRBD_FAULT_INJECTION
@@ -120,7 +107,6 @@ module_param(fault_devs, int, 0644);
unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
bool disable_sendpage;
bool allow_oos;
-unsigned int cn_idx = CN_IDX_DRBD;
int proc_details; /* Detail level in proc drbd*/
/* Module parameter for setting the user mode helper program
@@ -132,10 +118,11 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct drbd_conf **minor_table;
+struct idr minors;
+struct list_head drbd_tconns; /* list of struct drbd_tconn */
struct kmem_cache *drbd_request_cache;
-struct kmem_cache *drbd_ee_cache; /* epoch entries */
+struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
@@ -164,10 +151,15 @@ static const struct block_device_operations drbd_ops = {
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
+ struct bio *bio;
+
if (!drbd_md_io_bio_set)
return bio_alloc(gfp_mask, 1);
- return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ return bio;
}
#ifdef __CHECKER__
@@ -190,158 +182,87 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
#endif
/**
- * DOC: The transfer log
- *
- * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
- * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
- * of the list. There is always at least one &struct drbd_tl_epoch object.
- *
- * Each &struct drbd_tl_epoch has a circular double linked list of requests
- * attached.
- */
-static int tl_init(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* during device minor initialization, we may well use GFP_KERNEL */
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
- if (!b)
- return 0;
- INIT_LIST_HEAD(&b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->next = NULL;
- b->br_number = 4711;
- b->n_writes = 0;
- b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
-
- mdev->oldest_tle = b;
- mdev->newest_tle = b;
- INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
- INIT_LIST_HEAD(&mdev->barrier_acked_requests);
-
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-
- return 1;
-}
-
-static void tl_cleanup(struct drbd_conf *mdev)
-{
- D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
- kfree(mdev->oldest_tle);
- mdev->oldest_tle = NULL;
- kfree(mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-}
-
-/**
- * _tl_add_barrier() - Adds a barrier to the transfer log
- * @mdev: DRBD device.
- * @new: Barrier to be added before the current head of the TL.
- *
- * The caller must hold the req_lock.
- */
-void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
-{
- struct drbd_tl_epoch *newest_before;
-
- INIT_LIST_HEAD(&new->requests);
- INIT_LIST_HEAD(&new->w.list);
- new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
- new->next = NULL;
- new->n_writes = 0;
-
- newest_before = mdev->newest_tle;
- new->br_number = newest_before->br_number+1;
- if (mdev->newest_tle != new) {
- mdev->newest_tle->next = new;
- mdev->newest_tle = new;
- }
-}
-
-/**
- * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
- * @mdev: DRBD device.
+ * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
+ * @tconn: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
* In case the passed barrier_nr or set_size does not match the oldest
- * &struct drbd_tl_epoch objects this function will cause a termination
- * of the connection.
+ * epoch of not yet barrier-acked requests, this function will cause a
+ * termination of the connection.
*/
-void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
- unsigned int set_size)
+void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+ unsigned int set_size)
{
- struct drbd_tl_epoch *b, *nob; /* next old barrier */
- struct list_head *le, *tle;
struct drbd_request *r;
-
- spin_lock_irq(&mdev->req_lock);
-
- b = mdev->oldest_tle;
+ struct drbd_request *req = NULL;
+ int expect_epoch = 0;
+ int expect_size = 0;
+
+ spin_lock_irq(&tconn->req_lock);
+
+ /* find oldest not yet barrier-acked write request,
+ * count writes in its epoch. */
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ const unsigned s = r->rq_state;
+ if (!req) {
+ if (!(s & RQ_WRITE))
+ continue;
+ if (!(s & RQ_NET_MASK))
+ continue;
+ if (s & RQ_NET_DONE)
+ continue;
+ req = r;
+ expect_epoch = req->epoch;
+ expect_size ++;
+ } else {
+ if (r->epoch != expect_epoch)
+ break;
+ if (!(s & RQ_WRITE))
+ continue;
+ /* if (s & RQ_DONE): not expected */
+ /* if (!(s & RQ_NET_MASK)): not expected */
+ expect_size++;
+ }
+ }
/* first some paranoia code */
- if (b == NULL) {
- dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
- barrier_nr);
+ if (req == NULL) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ barrier_nr);
goto bail;
}
- if (b->br_number != barrier_nr) {
- dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
- barrier_nr, b->br_number);
+ if (expect_epoch != barrier_nr) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ barrier_nr, expect_epoch);
goto bail;
}
- if (b->n_writes != set_size) {
- dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
- barrier_nr, set_size, b->n_writes);
+
+ if (expect_size != set_size) {
+ conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ barrier_nr, set_size, expect_size);
goto bail;
}
- /* Clean up list of requests processed during current epoch */
- list_for_each_safe(le, tle, &b->requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- _req_mod(r, barrier_acked);
- }
- /* There could be requests on the list waiting for completion
- of the write to the local disk. To avoid corruptions of
- slab's data structures we have to remove the lists head.
-
- Also there could have been a barrier ack out of sequence, overtaking
- the write acks - which would be a bug and violating write ordering.
- To not deadlock in case we lose connection while such requests are
- still pending, we need some way to find them for the
- _req_mode(connection_lost_while_pending).
-
- These have been list_move'd to the out_of_sequence_requests list in
- _req_mod(, barrier_acked) above.
- */
- list_splice_init(&b->requests, &mdev->barrier_acked_requests);
-
- nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, b);
- if (nob)
- mdev->oldest_tle = nob;
- /* if nob == NULL b was the only barrier, and becomes the new
- barrier. Therefore mdev->oldest_tle points already to b */
- } else {
- D_ASSERT(nob != NULL);
- mdev->oldest_tle = nob;
- kfree(b);
+ /* Clean up list of requests processed during current epoch. */
+ /* this extra list walk restart is paranoia,
+ * to catch requests being barrier-acked "unexpectedly".
+ * It usually should find the same req again, or some READ preceding it. */
+ list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ if (req->epoch == expect_epoch)
+ break;
+ list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ if (req->epoch != expect_epoch)
+ break;
+ _req_mod(req, BARRIER_ACKED);
}
-
- spin_unlock_irq(&mdev->req_lock);
- dec_ap_pending(mdev);
+ spin_unlock_irq(&tconn->req_lock);
return;
bail:
- spin_unlock_irq(&mdev->req_lock);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ spin_unlock_irq(&tconn->req_lock);
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
@@ -350,85 +271,24 @@ bail:
* @mdev: DRBD device.
* @what: The action/event to perform with all request objects
*
- * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
- * restart_frozen_disk_io.
+ * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
+ * RESTART_FROZEN_DISK_IO.
*/
-static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- struct drbd_tl_epoch *b, *tmp, **pn;
- struct list_head *le, *tle, carry_reads;
- struct drbd_request *req;
- int rv, n_writes, n_reads;
-
- b = mdev->oldest_tle;
- pn = &mdev->oldest_tle;
- while (b) {
- n_writes = 0;
- n_reads = 0;
- INIT_LIST_HEAD(&carry_reads);
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- rv = _req_mod(req, what);
-
- n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
- n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
- }
- tmp = b->next;
-
- if (n_writes) {
- if (what == resend) {
- b->n_writes = n_writes;
- if (b->w.cb == NULL) {
- b->w.cb = w_send_barrier;
- inc_ap_pending(mdev);
- set_bit(CREATE_BARRIER, &mdev->flags);
- }
-
- drbd_queue_work(&mdev->data.work, &b->w);
- }
- pn = &b->next;
- } else {
- if (n_reads)
- list_add(&carry_reads, &b->requests);
- /* there could still be requests on that ring list,
- * in case local io is still pending */
- list_del(&b->requests);
-
- /* dec_ap_pending corresponding to queue_barrier.
- * the newest barrier may not have been queued yet,
- * in which case w.cb is still NULL. */
- if (b->w.cb != NULL)
- dec_ap_pending(mdev);
-
- if (b == mdev->newest_tle) {
- /* recycle, but reinit! */
- D_ASSERT(tmp == NULL);
- INIT_LIST_HEAD(&b->requests);
- list_splice(&carry_reads, &b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->w.cb = NULL;
- b->br_number = net_random();
- b->n_writes = 0;
-
- *pn = b;
- break;
- }
- *pn = tmp;
- kfree(b);
- }
- b = tmp;
- list_splice(&carry_reads, &b->requests);
- }
-
- /* Actions operating on the disk state, also want to work on
- requests that got barrier acked. */
+/* must hold resource->req_lock */
+void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
_req_mod(req, what);
- }
}
+void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, what);
+ spin_unlock_irq(&tconn->req_lock);
+}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
@@ -438,43 +298,9 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_conf *mdev)
+void tl_clear(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void _tl_clear(struct drbd_conf *mdev)
-{
- struct list_head *le, *tle;
- struct drbd_request *r;
-
- _tl_restart(mdev, connection_lost_while_pending);
-
- /* we expect this list to be empty. */
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
-
- /* but just in case, clean it up anyways! */
- list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- /* It would be nice to complete outside of spinlock.
- * But this is easier for now. */
- _req_mod(r, connection_lost_while_pending);
- }
-
- /* ensure bit indicating barrier is required is clear */
- clear_bit(CREATE_BARRIER, &mdev->flags);
-
- memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
-
-}
-
-void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- spin_unlock_irq(&mdev->req_lock);
+ tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
}
/**
@@ -483,1377 +309,131 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
*/
void tl_abort_disk_io(struct drbd_conf *mdev)
{
- struct drbd_tl_epoch *b;
- struct list_head *le, *tle;
- struct drbd_request *req;
-
- spin_lock_irq(&mdev->req_lock);
- b = mdev->oldest_tle;
- while (b) {
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- if (!(req->rq_state & RQ_LOCAL_PENDING))
- continue;
- _req_mod(req, abort_disk_io);
- }
- b = b->next;
- }
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ spin_lock_irq(&tconn->req_lock);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- _req_mod(req, abort_disk_io);
- }
-
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/**
- * cl_wide_st_chg() - true if the state change is a cluster wide one
- * @mdev: DRBD device.
- * @os: old (current) state.
- * @ns: new (wanted) state.
- */
-static int cl_wide_st_chg(struct drbd_conf *mdev,
- union drbd_state os, union drbd_state ns)
-{
- return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
- ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
- (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
- (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
- (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
-}
-
-enum drbd_state_rv
-drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state mask, union drbd_state val)
-{
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, NULL);
- ns = mdev->state;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_force_state() - Impose a change which happens outside our control on our state
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- */
-void drbd_force_state(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
-{
- drbd_change_state(mdev, CS_HARD, mask, val);
-}
-
-static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
-static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
- union drbd_state,
- union drbd_state);
-enum sanitize_state_warnings {
- NO_WARNING,
- ABORTED_ONLINE_VERIFY,
- ABORTED_RESYNC,
- CONNECTION_LOST_NEGOTIATING,
- IMPLICITLY_UPGRADED_DISK,
- IMPLICITLY_UPGRADED_PDSK,
-};
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn);
-int drbd_send_state_req(struct drbd_conf *,
- union drbd_state, union drbd_state);
-
-static enum drbd_state_rv
-_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val)
-{
- union drbd_state os, ns;
- unsigned long flags;
- enum drbd_state_rv rv;
-
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
- return SS_CW_SUCCESS;
-
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
- return SS_CW_FAILED_BY_PEER;
-
- rv = 0;
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (!cl_wide_st_chg(mdev, os, ns))
- rv = SS_CW_NO_NEED;
- if (!rv) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS) {
- rv = is_valid_state_transition(mdev, ns, os);
- if (rv == SS_SUCCESS)
- rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
- }
- }
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_req_state() - Perform an eventually cluster wide state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Should not be called directly, use drbd_request_state() or
- * _drbd_request_state().
- */
-static enum drbd_state_rv
-drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- struct completion done;
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- init_completion(&done);
-
- if (f & CS_SERIALIZE)
- mutex_lock(&mdev->state_mutex);
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (cl_wide_st_chg(mdev, os, ns)) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS)
- rv = is_valid_state_transition(mdev, ns, os);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (rv < SS_SUCCESS) {
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- drbd_state_lock(mdev);
- if (!drbd_send_state_req(mdev, mask, val)) {
- drbd_state_unlock(mdev);
- rv = SS_CW_FAILED_BY_PEER;
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- wait_event(mdev->state_wait,
- (rv = _req_st_cond(mdev, mask, val)));
-
- if (rv < SS_SUCCESS) {
- drbd_state_unlock(mdev);
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, &done);
- drbd_state_unlock(mdev);
- } else {
- rv = _drbd_set_state(mdev, ns, f, &done);
- }
-
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(current != mdev->worker.task);
- wait_for_completion(&done);
- }
-
-abort:
- if (f & CS_SERIALIZE)
- mutex_unlock(&mdev->state_mutex);
-
- return rv;
-}
-
-/**
- * _drbd_request_state() - Request a state change (with flags)
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
- * flag, or when logging of failed state change requests is not desired.
- */
-enum drbd_state_rv
-_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- enum drbd_state_rv rv;
-
- wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
-
- return rv;
-}
-
-static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
-{
- dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
- name,
- drbd_conn_str(ns.conn),
- drbd_role_str(ns.role),
- drbd_role_str(ns.peer),
- drbd_disk_str(ns.disk),
- drbd_disk_str(ns.pdsk),
- is_susp(ns) ? 's' : 'r',
- ns.aftr_isp ? 'a' : '-',
- ns.peer_isp ? 'p' : '-',
- ns.user_isp ? 'u' : '-'
- );
-}
-
-void print_st_err(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum drbd_state_rv err)
-{
- if (err == SS_IN_TRANSIENT_STATE)
- return;
- dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
- print_st(mdev, " state", os);
- print_st(mdev, "wanted", ns);
-}
-
-
-/**
- * is_valid_state() - Returns an SS_ error code if ns is not valid
- * @mdev: DRBD device.
- * @ns: State to consider.
- */
-static enum drbd_state_rv
-is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
-{
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- enum drbd_fencing_p fp;
- enum drbd_state_rv rv = SS_SUCCESS;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (get_net_conf(mdev)) {
- if (!mdev->net_conf->two_primaries &&
- ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
- rv = SS_TWO_PRIMARIES;
- put_net_conf(mdev);
- }
-
- if (rv <= 0)
- /* already found a reason to abort */;
- else if (ns.role == R_SECONDARY && mdev->open_cnt)
- rv = SS_DEVICE_IN_USE;
-
- else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (fp >= FP_RESOURCE &&
- ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
- rv = SS_PRIMARY_NOP;
-
- else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
- rv = SS_NO_LOCAL_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
- rv = SS_NO_REMOTE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if ((ns.conn == C_CONNECTED ||
- ns.conn == C_WF_BITMAP_S ||
- ns.conn == C_SYNC_SOURCE ||
- ns.conn == C_PAUSED_SYNC_S) &&
- ns.disk == D_OUTDATED)
- rv = SS_CONNECTED_OUTDATES;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- (mdev->sync_conf.verify_alg[0] == 0))
- rv = SS_NO_VERIFY_ALG;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- mdev->agreed_pro_version < 88)
- rv = SS_NOT_SUPPORTED;
-
- else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
- rv = SS_CONNECTED_OUTDATES;
-
- return rv;
-}
-
-/**
- * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
- * @mdev: DRBD device.
- * @ns: new state.
- * @os: old state.
- */
-static enum drbd_state_rv
-is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
- union drbd_state os)
-{
- enum drbd_state_rv rv = SS_SUCCESS;
-
- if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
- os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
- rv = SS_ALREADY_STANDALONE;
-
- if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
- rv = SS_IS_DISKLESS;
-
- if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
- rv = SS_NO_NET_CONFIG;
-
- if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
- rv = SS_LOWER_THAN_OUTDATED;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
- rv = SS_IN_TRANSIENT_STATE;
-
- if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
- rv = SS_IN_TRANSIENT_STATE;
-
- /* While establishing a connection only allow cstate to change.
- Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &mdev->flags) &&
- !(os.conn == C_WF_REPORT_PARAMS ||
- (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
- rv = SS_IN_TRANSIENT_STATE;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- ns.conn != os.conn && os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
- os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
- && os.conn < C_WF_REPORT_PARAMS)
- rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
-
- return rv;
-}
-
-static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
-{
- static const char *msg_table[] = {
- [NO_WARNING] = "",
- [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
- [ABORTED_RESYNC] = "Resync aborted.",
- [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
- [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
- [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
- };
-
- if (warn != NO_WARNING)
- dev_warn(DEV, "%s\n", msg_table[warn]);
-}
-
-/**
- * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @warn_sync_abort:
- *
- * When we loose connection, we have to set the state of the peers disk (pdsk)
- * to D_UNKNOWN. This rule and many more along those lines are in this function.
- */
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn)
-{
- enum drbd_fencing_p fp;
- enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
-
- if (warn)
- *warn = NO_WARNING;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Disallow Network errors to configure a device's network part */
- if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
- os.conn <= C_DISCONNECTING)
- ns.conn = os.conn;
-
- /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
- * If you try to go into some Sync* state, that shall fail (elsewhere). */
- if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
- ns.conn = os.conn;
-
- /* we cannot fail (again) if we already detached */
- if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
- ns.disk = D_DISKLESS;
-
- /* After C_DISCONNECTING only C_STANDALONE may follow */
- if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
- ns.conn = os.conn;
-
- if (ns.conn < C_CONNECTED) {
- ns.peer_isp = 0;
- ns.peer = R_UNKNOWN;
- if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
- ns.pdsk = D_UNKNOWN;
- }
-
- /* Clear the aftr_isp when becoming unconfigured */
- if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
- ns.aftr_isp = 0;
-
- /* Abort resync if a disk fails/detaches */
- if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
- (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn)
- *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
- ns.conn = C_CONNECTED;
- }
-
- /* Connection breaks down before we finished "Negotiating" */
- if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
- if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
- ns.disk = mdev->new_state_tmp.disk;
- ns.pdsk = mdev->new_state_tmp.pdsk;
- } else {
- if (warn)
- *warn = CONNECTION_LOST_NEGOTIATING;
- ns.disk = D_DISKLESS;
- ns.pdsk = D_UNKNOWN;
- }
- put_ldev(mdev);
- }
-
- /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
- if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
- if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
- ns.disk = D_UP_TO_DATE;
- if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
- ns.pdsk = D_UP_TO_DATE;
- }
-
- /* Implications of the connection stat on the disk states */
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_UNKNOWN;
- switch ((enum drbd_conns)ns.conn) {
- case C_WF_BITMAP_T:
- case C_PAUSED_SYNC_T:
- case C_STARTING_SYNC_T:
- case C_WF_SYNC_UUID:
- case C_BEHIND:
- disk_min = D_INCONSISTENT;
- disk_max = D_OUTDATED;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_VERIFY_S:
- case C_VERIFY_T:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_CONNECTED:
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_DISKLESS;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_WF_BITMAP_S:
- case C_PAUSED_SYNC_S:
- case C_STARTING_SYNC_S:
- case C_AHEAD:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
- break;
- case C_SYNC_TARGET:
- disk_min = D_INCONSISTENT;
- disk_max = D_INCONSISTENT;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_SYNC_SOURCE:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_INCONSISTENT;
- break;
- case C_STANDALONE:
- case C_DISCONNECTING:
- case C_UNCONNECTED:
- case C_TIMEOUT:
- case C_BROKEN_PIPE:
- case C_NETWORK_FAILURE:
- case C_PROTOCOL_ERROR:
- case C_TEAR_DOWN:
- case C_WF_CONNECTION:
- case C_WF_REPORT_PARAMS:
- case C_MASK:
- break;
- }
- if (ns.disk > disk_max)
- ns.disk = disk_max;
-
- if (ns.disk < disk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_DISK;
- ns.disk = disk_min;
- }
- if (ns.pdsk > pdsk_max)
- ns.pdsk = pdsk_max;
-
- if (ns.pdsk < pdsk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_PDSK;
- ns.pdsk = pdsk_min;
- }
-
- if (fp == FP_STONITH &&
- (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
- !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
- ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
-
- if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
- !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
- ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
-
- if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
- if (ns.conn == C_SYNC_SOURCE)
- ns.conn = C_PAUSED_SYNC_S;
- if (ns.conn == C_SYNC_TARGET)
- ns.conn = C_PAUSED_SYNC_T;
- } else {
- if (ns.conn == C_PAUSED_SYNC_S)
- ns.conn = C_SYNC_SOURCE;
- if (ns.conn == C_PAUSED_SYNC_T)
- ns.conn = C_SYNC_TARGET;
- }
-
- return ns;
-}
-
-/* helper for __drbd_set_state */
-static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
-{
- if (mdev->agreed_pro_version < 90)
- mdev->ov_start_sector = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- mdev->ov_position = 0;
- if (cs == C_VERIFY_T) {
- /* starting online verify from an arbitrary position
- * does not fit well into the existing protocol.
- * on C_VERIFY_T, we initialize ov_left and friends
- * implicitly in receive_DataRequest once the
- * first P_OV_REQUEST is received */
- mdev->ov_start_sector = ~(sector_t)0;
- } else {
- unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - 1);
- mdev->rs_total = 1;
- } else
- mdev->rs_total -= bit;
- mdev->ov_position = mdev->ov_start_sector;
- }
- mdev->ov_left = mdev->rs_total;
-}
-
-static void drbd_resume_al(struct drbd_conf *mdev)
-{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
- dev_info(DEV, "Resumed AL updates\n");
-}
-
-/**
- * __drbd_set_state() - Set a new DRBD state
- * @mdev: DRBD device.
- * @ns: new state.
- * @flags: Flags
- * @done: Optional completion, that will get completed after the after_state_ch() finished
- *
- * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
- */
-enum drbd_state_rv
-__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
- enum chg_state_flags flags, struct completion *done)
-{
- union drbd_state os;
- enum drbd_state_rv rv = SS_SUCCESS;
- enum sanitize_state_warnings ssw;
- struct after_state_chg_work *ascw;
-
- os = mdev->state;
-
- ns = sanitize_state(mdev, os, ns, &ssw);
-
- if (ns.i == os.i)
- return SS_NOTHING_TO_DO;
-
- if (!(flags & CS_HARD)) {
- /* pre-state-change checks ; only look at ns */
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- rv = is_valid_state(mdev, ns);
- if (rv < SS_SUCCESS) {
- /* If the old state was illegal as well, then let
- this happen...*/
-
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_state_transition(mdev, ns, os);
- } else
- rv = is_valid_state_transition(mdev, ns, os);
- }
-
- if (rv < SS_SUCCESS) {
- if (flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- return rv;
- }
-
- print_sanitize_warnings(mdev, ssw);
-
- {
- char *pbp, pb[300];
- pbp = pb;
- *pbp = 0;
- if (ns.role != os.role)
- pbp += sprintf(pbp, "role( %s -> %s ) ",
- drbd_role_str(os.role),
- drbd_role_str(ns.role));
- if (ns.peer != os.peer)
- pbp += sprintf(pbp, "peer( %s -> %s ) ",
- drbd_role_str(os.peer),
- drbd_role_str(ns.peer));
- if (ns.conn != os.conn)
- pbp += sprintf(pbp, "conn( %s -> %s ) ",
- drbd_conn_str(os.conn),
- drbd_conn_str(ns.conn));
- if (ns.disk != os.disk)
- pbp += sprintf(pbp, "disk( %s -> %s ) ",
- drbd_disk_str(os.disk),
- drbd_disk_str(ns.disk));
- if (ns.pdsk != os.pdsk)
- pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
- drbd_disk_str(os.pdsk),
- drbd_disk_str(ns.pdsk));
- if (is_susp(ns) != is_susp(os))
- pbp += sprintf(pbp, "susp( %d -> %d ) ",
- is_susp(os),
- is_susp(ns));
- if (ns.aftr_isp != os.aftr_isp)
- pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
- os.aftr_isp,
- ns.aftr_isp);
- if (ns.peer_isp != os.peer_isp)
- pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
- os.peer_isp,
- ns.peer_isp);
- if (ns.user_isp != os.user_isp)
- pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
- os.user_isp,
- ns.user_isp);
- dev_info(DEV, "%s\n", pb);
- }
-
- /* solve the race between becoming unconfigured,
- * worker doing the cleanup, and
- * admin reconfiguring us:
- * on (re)configure, first set CONFIG_PENDING,
- * then wait for a potentially exiting worker,
- * start the worker, and schedule one no_op.
- * then proceed with configuration.
- */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY &&
- !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
- set_bit(DEVICE_DYING, &mdev->flags);
-
- /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
- * on the ldev here, to be sure the transition -> D_DISKLESS resp.
- * drbd_ldev_destroy() won't happen before our corresponding
- * after_state_ch works run, where we put_ldev again. */
- if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
- atomic_inc(&mdev->local_cnt);
-
- mdev->state = ns;
-
- if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
- drbd_print_uuids(mdev, "attached to UUIDs");
-
- wake_up(&mdev->misc_wait);
- wake_up(&mdev->state_wait);
-
- /* aborted verify run. log the last position */
- if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
- ns.conn < C_CONNECTED) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
- }
-
- if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
- dev_info(DEV, "Syncer continues.\n");
- mdev->rs_paused += (long)jiffies
- -(long)mdev->rs_mark_time[mdev->rs_last_mark];
- if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
- }
-
- if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
- (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
- dev_info(DEV, "Resync suspended\n");
- mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
- }
-
- if (os.conn == C_CONNECTED &&
- (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
- unsigned long now = jiffies;
- int i;
-
- set_ov_position(mdev, ns.conn);
- mdev->rs_start = now;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->ov_last_oos_size = 0;
- mdev->ov_last_oos_start = 0;
-
- for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
- }
-
- drbd_rs_controller_reset(mdev);
-
- if (ns.conn == C_VERIFY_S) {
- dev_info(DEV, "Starting Online Verify from sector %llu\n",
- (unsigned long long)mdev->ov_position);
- mod_timer(&mdev->resync_timer, jiffies);
- }
- }
-
- if (get_ldev(mdev)) {
- u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
- MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
- MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
-
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
- mdf |= MDF_CRASHED_PRIMARY;
- if (mdev->state.role == R_PRIMARY ||
- (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
- mdf |= MDF_PRIMARY_IND;
- if (mdev->state.conn > C_WF_REPORT_PARAMS)
- mdf |= MDF_CONNECTED_IND;
- if (mdev->state.disk > D_INCONSISTENT)
- mdf |= MDF_CONSISTENT;
- if (mdev->state.disk > D_OUTDATED)
- mdf |= MDF_WAS_UP_TO_DATE;
- if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
- mdf |= MDF_PEER_OUT_DATED;
- if (mdf != mdev->ldev->md.flags) {
- mdev->ldev->md.flags = mdf;
- drbd_md_mark_dirty(mdev);
- }
- if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
- drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
- put_ldev(mdev);
- }
-
- /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
- if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
- os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
-
- /* Receiver should clean up itself */
- if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Now the receiver finished cleaning up itself, it should die */
- if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_WF_CONNECTION &&
- ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
- drbd_thread_restart_nowait(&mdev->receiver);
-
- /* Resume AL writing if we get a connection */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- drbd_resume_al(mdev);
-
- /* remember last connect and attach times so request_timer_fn() won't
- * kill newly established sessions while we are still trying to thaw
- * previously frozen IO */
- if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
- mdev->last_reconnect_jif = jiffies;
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- mdev->last_reattach_jif = jiffies;
-
- ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
- if (ascw) {
- ascw->os = os;
- ascw->ns = ns;
- ascw->flags = flags;
- ascw->w.cb = w_after_state_ch;
- ascw->done = done;
- drbd_queue_work(&mdev->data.work, &ascw->w);
- } else {
- dev_warn(DEV, "Could not kmalloc an ascw\n");
- }
-
- return rv;
-}
-
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
-{
- struct after_state_chg_work *ascw =
- container_of(w, struct after_state_chg_work, w);
- after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & CS_WAIT_COMPLETE) {
- D_ASSERT(ascw->done != NULL);
- complete(ascw->done);
- }
- kfree(ascw);
-
- return 1;
-}
-
-static void abw_start_sync(struct drbd_conf *mdev, int rv)
-{
- if (rv) {
- dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
- return;
- }
-
- switch (mdev->state.conn) {
- case C_STARTING_SYNC_T:
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- break;
- case C_STARTING_SYNC_S:
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- break;
- }
-}
-
-int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- char *why, enum bm_flag flags)
-{
- int rv;
-
- D_ASSERT(current == mdev->worker.task);
-
- /* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
-
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
-
- drbd_resume_io(mdev);
-
- return rv;
-}
-
-/**
- * after_state_ch() - Perform after state change actions that may sleep
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @flags: Flags
- */
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags)
-{
- enum drbd_fencing_p fp;
- enum drbd_req_event what = nothing;
- union drbd_state nsm = (union drbd_state){ .i = -1 };
-
- if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (mdev->p_uuid)
- mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
- }
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Inform userspace about the change... */
- drbd_bcast_state(mdev, ns);
-
- if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
- drbd_khelper(mdev, "pri-on-incon-degr");
-
- /* Here we have the actions that are performed after a
- state change. This function might sleep */
-
- if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
- mod_timer(&mdev->request_timer, jiffies + HZ);
-
- nsm.i = -1;
- if (ns.susp_nod) {
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- what = resend;
-
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- what = restart_frozen_disk_io;
-
- if (what != nothing)
- nsm.susp_nod = 0;
- }
-
- if (ns.susp_fen) {
- /* case1: The outdate peer handler is successful: */
- if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- }
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
- /* case2: The connection was established again: */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- what = resend;
- nsm.susp_fen = 0;
- }
- }
-
- if (what != nothing) {
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- nsm.i &= mdev->state.i;
- _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
-
- /* Became sync source. With protocol >= 96, we still need to send out
- * the sync uuid now. Need to do that before any drbd_send_state, or
- * the other side may go "paused sync" before receiving the sync uuids,
- * which is unexpected. */
- if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
- mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
- drbd_gen_and_send_sync_uuid(mdev);
- put_ldev(mdev);
- }
-
- /* Do not change the order of the if above and the two below... */
- if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
- /* we probably will start a resync soon.
- * make sure those things are properly reset. */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- drbd_rs_cancel_all(mdev);
-
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
- /* No point in queuing send_bitmap if we don't have a connection
- * anymore, so check also the _current_ state, not only the new state
- * at the time this work was queued. */
- if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
- mdev->state.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
- "send_bitmap (WFBitMapS)",
- BM_LOCKED_TEST_ALLOWED);
-
- /* Lost contact to peer's copy of the data */
- if ((os.pdsk >= D_INCONSISTENT &&
- os.pdsk != D_UNKNOWN &&
- os.pdsk != D_OUTDATED)
- && (ns.pdsk < D_INCONSISTENT ||
- ns.pdsk == D_UNKNOWN ||
- ns.pdsk == D_OUTDATED)) {
- if (get_ldev(mdev)) {
- if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- if (is_susp(mdev->state)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
- } else {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- }
- put_ldev(mdev);
- }
- }
-
- if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- /* D_DISKLESS Peer becomes secondary */
- if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
- /* We may still be Primary ourselves.
- * No harm done if the bitmap still changes,
- * redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote diskless peer", BM_LOCKED_SET_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Write out all changed bits on demote.
- * Though, no need to da that just yet
- * if there is a resync going on still */
- if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
- mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- /* No changes to the bitmap expected this time, so assert that,
- * even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote", BM_LOCKED_TEST_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Last part of the attaching process ... */
- if (ns.conn >= C_CONNECTED &&
- os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
- drbd_send_sizes(mdev, 0, 0); /* to start sync... */
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
-
- /* We want to pause/continue resync, tell peer. */
- if (ns.conn >= C_CONNECTED &&
- ((os.aftr_isp != ns.aftr_isp) ||
- (os.user_isp != ns.user_isp)))
- drbd_send_state(mdev, ns);
-
- /* In case one of the isp bits got set, suspend other devices. */
- if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
- (ns.aftr_isp || ns.peer_isp || ns.user_isp))
- suspend_other_sg(mdev);
-
- /* Make sure the peer gets informed about eventual state
- changes (ISP bits) while we were in WFReportParams. */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev, ns);
-
- /* We are in the progress to start a full sync... */
- if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
- /* no other bitmap changes expected during this phase */
- drbd_queue_bitmap_io(mdev,
- &drbd_bmio_set_n_write, &abw_start_sync,
- "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
-
- /* We are invalidating our self... */
- if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
- os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- /* other bitmap operation expected during this phase */
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
- "set_n_write from invalidate", BM_LOCKED_MASK);
-
- /* first half of local IO error, failure to attach,
- * or administrative detach */
- if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
- int was_io_error = 0;
- /* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS.
- * But is is still not save to dreference ldev here, since
- * we might come from an failed Attach before ldev was set. */
- if (mdev->ldev) {
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
-
- /* Immediately allow completion of all application IO,
- * that waits for completion from the local disk,
- * if this was a force-detach due to disk_timeout
- * or administrator request (drbdsetup detach --force).
- * Do NOT abort otherwise.
- * Aborting local requests may cause serious problems,
- * if requests are completed to upper layers already,
- * and then later the already submitted local bio completes.
- * This can cause DMA into former bio pages that meanwhile
- * have been re-used for other things.
- * So aborting local requests may cause crashes,
- * or even worse, silent data corruption.
- */
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
- tl_abort_disk_io(mdev);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
- }
- put_ldev(mdev);
- }
-
- /* second half of local IO error, failure to attach,
- * or administrative detach,
- * after local_cnt references have reached zero again */
- if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* corresponding get_ldev in __drbd_set_state
- * this may finally trigger drbd_ldev_destroy. */
- put_ldev(mdev);
- }
-
- /* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Disks got bigger while they were detached */
- if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
- if (ns.conn == C_CONNECTED)
- resync_after_online_grow(mdev);
- }
-
- /* A resync finished or aborted, wake paused devices... */
- if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
- (os.peer_isp && !ns.peer_isp) ||
- (os.user_isp && !ns.user_isp))
- resume_next_sg(mdev);
-
- /* sync target done with resync. Explicitly notify peer, even though
- * it should (at least for non-empty resyncs) already know itself. */
- if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Wake up role changes, that were delayed because of connection establishing */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &mdev->flags);
- wake_up(&mdev->state_wait);
- }
-
- /* This triggers bitmap writeout of potentially still unwritten pages
- * if the resync finished cleanly, or aborted because of peer disk
- * failure, or because of connection loss.
- * For resync aborted because of local disk failure, we cannot do
- * any bitmap writeout anymore.
- * No harm done if some bits change during this phase.
- */
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
- "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
- put_ldev(mdev);
- }
-
- /* free tl_hash if we Got thawed and are C_STANDALONE */
- if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
- drbd_free_tl_hash(mdev);
-
- /* Upon network connection, we need to start the receiver */
- if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
- drbd_thread_start(&mdev->receiver);
-
- /* Terminate worker thread if we are unconfigured - it will be
- restarted as needed... */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY) {
- if (os.aftr_isp != ns.aftr_isp)
- resume_next_sg(mdev);
- /* set in __drbd_set_state, unless CONFIG_PENDING was set */
- if (test_bit(DEVICE_DYING, &mdev->flags))
- drbd_thread_stop_nowait(&mdev->worker);
+ if (req->w.mdev != mdev)
+ continue;
+ _req_mod(req, ABORT_DISK_IO);
}
-
- drbd_md_sync(mdev);
+ spin_unlock_irq(&tconn->req_lock);
}
-
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
unsigned long flags;
int retval;
+ snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
+ thi->name[0], thi->tconn->name);
+
restart:
retval = thi->function(thi);
spin_lock_irqsave(&thi->t_lock, flags);
- /* if the receiver has been "Exiting", the last thing it did
+ /* if the receiver has been "EXITING", the last thing it did
* was set the conn state to "StandAlone",
* if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
- * drbd_thread_start needs to set "Restarting" in that case.
+ * drbd_thread_start needs to set "RESTARTING" in that case.
* t_state check and assignment needs to be within the same spinlock,
- * so either thread_start sees Exiting, and can remap to Restarting,
- * or thread_start see None, and can proceed as normal.
+ * so either thread_start sees EXITING, and can remap to RESTARTING,
+ * or thread_start see NONE, and can proceed as normal.
*/
- if (thi->t_state == Restarting) {
- dev_info(DEV, "Restarting %s\n", current->comm);
- thi->t_state = Running;
+ if (thi->t_state == RESTARTING) {
+ conn_info(tconn, "Restarting %s thread\n", thi->name);
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
}
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
smp_mb();
- complete(&thi->stop);
+ complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- dev_info(DEV, "Terminating %s\n", current->comm);
+ conn_info(tconn, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
+
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *))
+static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
thi->function = func;
- thi->mdev = mdev;
+ thi->tconn = tconn;
+ strncpy(thi->name, name, ARRAY_SIZE(thi->name));
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
struct task_struct *nt;
unsigned long flags;
- const char *me =
- thi == &mdev->receiver ? "receiver" :
- thi == &mdev->asender ? "asender" :
- thi == &mdev->worker ? "worker" : "NONSENSE";
-
/* is used from state engine doing drbd_thread_stop_nowait,
* while holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
switch (thi->t_state) {
- case None:
- dev_info(DEV, "Starting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case NONE:
+ conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
+ conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
+ kref_get(&thi->tconn->kref);
+
init_completion(&thi->stop);
- D_ASSERT(thi->task == NULL);
thi->reset_cpu_mask = 1;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd%d_%s", mdev_to_minor(mdev), me);
+ "drbd_%c_%s", thi->name[0], thi->tconn->name);
if (IS_ERR(nt)) {
- dev_err(DEV, "Couldn't start thread\n");
+ conn_err(tconn, "Couldn't start thread\n");
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
wake_up_process(nt);
break;
- case Exiting:
- thi->t_state = Restarting;
- dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case EXITING:
+ thi->t_state = RESTARTING;
+ conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* fall through */
- case Running:
- case Restarting:
+ case RUNNING:
+ case RESTARTING:
default:
spin_unlock_irqrestore(&thi->t_lock, flags);
break;
@@ -1867,12 +447,12 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{
unsigned long flags;
- enum drbd_thread_state ns = restart ? Restarting : Exiting;
+ enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
/* may be called from state engine, holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
- if (thi->t_state == None) {
+ if (thi->t_state == NONE) {
spin_unlock_irqrestore(&thi->t_lock, flags);
if (restart)
drbd_thread_start(thi);
@@ -1890,7 +470,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
init_completion(&thi->stop);
if (thi->task != current)
force_sig(DRBD_SIGKILL, thi->task);
-
}
spin_unlock_irqrestore(&thi->t_lock, flags);
@@ -1899,6 +478,35 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
+static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi =
+ task == tconn->receiver.task ? &tconn->receiver :
+ task == tconn->asender.task ? &tconn->asender :
+ task == tconn->worker.task ? &tconn->worker : NULL;
+
+ return thi;
+}
+
+char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
+ return thi ? thi->name : task->comm;
+}
+
+int conn_lowest_minor(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr = 0, m;
+
+ rcu_read_lock();
+ mdev = idr_get_next(&tconn->volumes, &vnr);
+ m = mdev ? mdev_to_minor(mdev) : -1;
+ rcu_read_unlock();
+
+ return m;
+}
+
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
@@ -1907,238 +515,345 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
* Forces all threads of a device onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_conf *mdev)
+void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
{
int ord, cpu;
/* user override. */
- if (cpumask_weight(mdev->cpu_mask))
+ if (cpumask_weight(tconn->cpu_mask))
return;
- ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
+ ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
- cpumask_set_cpu(cpu, mdev->cpu_mask);
+ cpumask_set_cpu(cpu, tconn->cpu_mask);
return;
}
}
/* should not be reached */
- cpumask_setall(mdev->cpu_mask);
+ cpumask_setall(tconn->cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
* @mdev: DRBD device.
+ * @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
* prematurely.
*/
-void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
+void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
struct task_struct *p = current;
- struct drbd_thread *thi =
- p == mdev->asender.task ? &mdev->asender :
- p == mdev->receiver.task ? &mdev->receiver :
- p == mdev->worker.task ? &mdev->worker :
- NULL;
- ERR_IF(thi == NULL)
- return;
+
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, mdev->cpu_mask);
+ set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
}
#endif
-/* the appropriate socket mutex must be held already */
-int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags)
+/**
+ * drbd_header_size - size of a packet header
+ *
+ * The header size is a multiple of 8, so any payload following the header is
+ * word aligned on 64-bit architectures. (The bitmap send and receive code
+ * relies on this.)
+ */
+unsigned int drbd_header_size(struct drbd_tconn *tconn)
{
- int sent, ok;
+ if (tconn->agreed_pro_version >= 100) {
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
+ return sizeof(struct p_header100);
+ } else {
+ BUILD_BUG_ON(sizeof(struct p_header80) !=
+ sizeof(struct p_header95));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
+ return sizeof(struct p_header80);
+ }
+}
- ERR_IF(!h) return false;
- ERR_IF(!size) return false;
+static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be16(size);
+ return sizeof(struct p_header80);
+}
- h->magic = BE_DRBD_MAGIC;
+static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
h->command = cpu_to_be16(cmd);
- h->length = cpu_to_be16(size-sizeof(struct p_header80));
+ h->length = cpu_to_be32(size);
+ return sizeof(struct p_header95);
+}
- sent = drbd_send(mdev, sock, h, size, msg_flags);
+static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
+ int size, int vnr)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC_100);
+ h->volume = cpu_to_be16(vnr);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be32(size);
+ h->pad = 0;
+ return sizeof(struct p_header100);
+}
- ok = (sent == size);
- if (!ok && !signal_pending(current))
- dev_warn(DEV, "short sent %s size=%d sent=%d\n",
- cmdname(cmd), (int)size, sent);
- return ok;
+static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+ void *buffer, enum drbd_packet cmd, int size)
+{
+ if (tconn->agreed_pro_version >= 100)
+ return prepare_header100(buffer, cmd, size, vnr);
+ else if (tconn->agreed_pro_version >= 95 &&
+ size > DRBD_MAX_SIZE_H80_PACKET)
+ return prepare_header95(buffer, cmd, size);
+ else
+ return prepare_header80(buffer, cmd, size);
}
-/* don't pass the socket. we may only look at it
- * when we hold the appropriate socket mutex.
- */
-int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h, size_t size)
+static void *__conn_prepare_command(struct drbd_tconn *tconn,
+ struct drbd_socket *sock)
{
- int ok = 0;
- struct socket *sock;
+ if (!sock->socket)
+ return NULL;
+ return sock->sbuf + drbd_header_size(tconn);
+}
- if (use_data_socket) {
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
- } else {
- mutex_lock(&mdev->meta.mutex);
- sock = mdev->meta.socket;
- }
+void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+{
+ void *p;
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (likely(sock != NULL))
- ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
+ mutex_lock(&sock->mutex);
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ mutex_unlock(&sock->mutex);
- if (use_data_socket)
- mutex_unlock(&mdev->data.mutex);
- else
- mutex_unlock(&mdev->meta.mutex);
- return ok;
+ return p;
}
-int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
- size_t size)
+void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
{
- struct p_header80 h;
- int ok;
+ return conn_prepare_command(mdev->tconn, sock);
+}
- h.magic = BE_DRBD_MAGIC;
- h.command = cpu_to_be16(cmd);
- h.length = cpu_to_be16(size);
+static int __send_command(struct drbd_tconn *tconn, int vnr,
+ struct drbd_socket *sock, enum drbd_packet cmd,
+ unsigned int header_size, void *data,
+ unsigned int size)
+{
+ int msg_flags;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
+ /*
+ * Called with @data == NULL and the size of the data blocks in @size
+ * for commands that send data blocks. For those commands, omit the
+ * MSG_MORE flag: this will increase the likelihood that data blocks
+ * which are page aligned on the sender will end up page aligned on the
+ * receiver.
+ */
+ msg_flags = data ? MSG_MORE : 0;
+
+ header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size + size);
+ err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ msg_flags);
+ if (data && !err)
+ err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ return err;
+}
- ok = (sizeof(h) ==
- drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
- ok = ok && (size ==
- drbd_send(mdev, mdev->data.socket, data, size, 0));
+static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+}
- drbd_put_data_sock(mdev);
+int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ int err;
- return ok;
+ err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
}
-int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
+int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
{
+ int err;
+
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
+ data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
+}
+
+int drbd_send_ping(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+}
+
+int drbd_send_ping_ack(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+}
+
+int drbd_send_sync_param(struct drbd_conf *mdev)
+{
+ struct drbd_socket *sock;
struct p_rs_param_95 *p;
- struct socket *sock;
- int size, rv;
- const int apv = mdev->agreed_pro_version;
+ int size;
+ const int apv = mdev->tconn->agreed_pro_version;
+ enum drbd_packet cmd;
+ struct net_conf *nc;
+ struct disk_conf *dc;
+
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
- + strlen(mdev->sync_conf.verify_alg) + 1
+ + strlen(nc->verify_alg) + 1
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- /* used from admin command context and receiver/worker context.
- * to avoid kmalloc, grab the socket right here,
- * then use the pre-allocated sbuf there */
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
-
- if (likely(sock != NULL)) {
- enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
-
- p = &mdev->data.sbuf.rs_param_95;
+ cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
- /* initialize verify_alg and csums_alg */
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ /* initialize verify_alg and csums_alg */
+ memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- p->rate = cpu_to_be32(sc->rate);
- p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
- p->c_delay_target = cpu_to_be32(sc->c_delay_target);
- p->c_fill_target = cpu_to_be32(sc->c_fill_target);
- p->c_max_rate = cpu_to_be32(sc->c_max_rate);
-
- if (apv >= 88)
- strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
- if (apv >= 89)
- strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
-
- rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
- } else
- rv = 0; /* not ok */
+ if (get_ldev(mdev)) {
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ p->resync_rate = cpu_to_be32(dc->resync_rate);
+ p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
+ p->c_delay_target = cpu_to_be32(dc->c_delay_target);
+ p->c_fill_target = cpu_to_be32(dc->c_fill_target);
+ p->c_max_rate = cpu_to_be32(dc->c_max_rate);
+ put_ldev(mdev);
+ } else {
+ p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
+ p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
+ p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
+ p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
+ p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
+ }
- mutex_unlock(&mdev->data.mutex);
+ if (apv >= 88)
+ strcpy(p->verify_alg, nc->verify_alg);
+ if (apv >= 89)
+ strcpy(p->csums_alg, nc->csums_alg);
+ rcu_read_unlock();
- return rv;
+ return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_conf *mdev)
+int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
{
+ struct drbd_socket *sock;
struct p_protocol *p;
- int size, cf, rv;
+ struct net_conf *nc;
+ int size, cf;
- size = sizeof(struct p_protocol);
+ sock = &tconn->data;
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
- if (mdev->agreed_pro_version >= 87)
- size += strlen(mdev->net_conf->integrity_alg) + 1;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- /* we must not recurse into our own queue,
- * as that is blocked during handshake */
- p = kmalloc(size, GFP_NOIO);
- if (p == NULL)
- return 0;
+ if (nc->tentative && tconn->agreed_pro_version < 92) {
+ rcu_read_unlock();
+ mutex_unlock(&sock->mutex);
+ conn_err(tconn, "--dry-run is not supported by peer");
+ return -EOPNOTSUPP;
+ }
- p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
- p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
- p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
- p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
- p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
+ size = sizeof(*p);
+ if (tconn->agreed_pro_version >= 87)
+ size += strlen(nc->integrity_alg) + 1;
+ p->protocol = cpu_to_be32(nc->wire_protocol);
+ p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
+ p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
+ p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
+ p->two_primaries = cpu_to_be32(nc->two_primaries);
cf = 0;
- if (mdev->net_conf->want_lose)
- cf |= CF_WANT_LOSE;
- if (mdev->net_conf->dry_run) {
- if (mdev->agreed_pro_version >= 92)
- cf |= CF_DRY_RUN;
- else {
- dev_err(DEV, "--dry-run is not supported by peer");
- kfree(p);
- return -1;
- }
- }
+ if (nc->discard_my_data)
+ cf |= CF_DISCARD_MY_DATA;
+ if (nc->tentative)
+ cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (mdev->agreed_pro_version >= 87)
- strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
+ if (tconn->agreed_pro_version >= 87)
+ strcpy(p->integrity_alg, nc->integrity_alg);
+ rcu_read_unlock();
- rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
- (struct p_header80 *)p, size);
- kfree(p);
- return rv;
+ return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+}
+
+int drbd_send_protocol(struct drbd_tconn *tconn)
+{
+ int err;
+
+ mutex_lock(&tconn->data.mutex);
+ err = __drbd_send_protocol(tconn, P_PROTOCOL);
+ mutex_unlock(&tconn->data.mutex);
+
+ return err;
}
int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
{
- struct p_uuids p;
+ struct drbd_socket *sock;
+ struct p_uuids *p;
int i;
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
- return 1;
+ return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p) {
+ put_ldev(mdev);
+ return -EIO;
+ }
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
+ p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
- uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
+ p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ rcu_read_lock();
+ uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
- p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
+ p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
put_ldev(mdev);
-
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
int drbd_send_uuids(struct drbd_conf *mdev)
@@ -2169,9 +884,10 @@ void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
}
}
-int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
{
- struct p_rs_uuid p;
+ struct drbd_socket *sock;
+ struct p_rs_uuid *p;
u64 uuid;
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
@@ -2184,24 +900,29 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
- p.uuid = cpu_to_be64(uuid);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->uuid = cpu_to_be64(uuid);
+ drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ }
}
int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
- struct p_sizes p;
+ struct drbd_socket *sock;
+ struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- int ok;
if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
D_ASSERT(mdev->ldev->backing_bdev);
d_size = drbd_get_max_capacity(mdev->ldev);
- u_size = mdev->ldev->dc.disk_size;
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
@@ -2213,20 +934,23 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
- if (mdev->agreed_pro_version <= 94)
- max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
- p.d_size = cpu_to_be64(d_size);
- p.u_size = cpu_to_be64(u_size);
- p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
- p.max_bio_size = cpu_to_be32(max_bio_size);
- p.queue_order_type = cpu_to_be16(q_order_type);
- p.dds_flags = cpu_to_be16(flags);
+ if (mdev->tconn->agreed_pro_version <= 94)
+ max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ else if (mdev->tconn->agreed_pro_version < 100)
+ max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ p->d_size = cpu_to_be64(d_size);
+ p->u_size = cpu_to_be64(u_size);
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->max_bio_size = cpu_to_be32(max_bio_size);
+ p->queue_order_type = cpu_to_be16(q_order_type);
+ p->dds_flags = cpu_to_be16(flags);
+ return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
@@ -2235,34 +959,21 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
*/
int drbd_send_current_state(struct drbd_conf *mdev)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
-
- /* Grab state lock so we wont send state if we're in the middle
- * of a cluster wide state change on another thread */
- drbd_state_lock(mdev);
-
- mutex_lock(&mdev->data.mutex);
-
- p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- sock = mdev->data.socket;
+ struct drbd_socket *sock;
+ struct p_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
-
- mutex_unlock(&mdev->data.mutex);
-
- drbd_state_unlock(mdev);
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
- * @state: the state to send, not necessarily the current state.
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
* send the resulting new state to the peer. If more state changes happen
@@ -2271,50 +982,95 @@ int drbd_send_current_state(struct drbd_conf *mdev)
*/
int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
+ struct drbd_socket *sock;
+ struct p_state *p;
- mutex_lock(&mdev->data.mutex);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
- p.state = cpu_to_be32(state.i);
- sock = mdev->data.socket;
+int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+{
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+}
- mutex_unlock(&mdev->data.mutex);
+int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_packet cmd;
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- return ok;
+ cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
+void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
{
- struct p_req_state p;
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
- p.mask = cpu_to_be32(mask.i);
- p.val = cpu_to_be32(val.i);
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ }
+}
+
+void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+{
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
+ enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ }
}
-int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{
- struct p_req_state_reply p;
+ BUG_ON(code & ~0xf);
+ p->encoding = (p->encoding & ~0xf) | code;
+}
- p.retcode = cpu_to_be32(retcode);
+static void dcbp_set_start(struct p_compressed_bm *p, int set)
+{
+ p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
+}
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
- (struct p_header80 *)&p, sizeof(p));
+static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
+{
+ BUG_ON(n & ~0x7);
+ p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
int fill_bitmap_rle_bits(struct drbd_conf *mdev,
- struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct p_compressed_bm *p,
+ unsigned int size,
+ struct bm_xfer_ctx *c)
{
struct bitstream bs;
unsigned long plain_bits;
@@ -2322,19 +1078,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
unsigned long rl;
unsigned len;
unsigned toggle;
- int bits;
+ int bits, use_rle;
/* may we use this feature? */
- if ((mdev->sync_conf.use_rle == 0) ||
- (mdev->agreed_pro_version < 90))
- return 0;
+ rcu_read_lock();
+ use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ rcu_read_unlock();
+ if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ return 0;
if (c->bit_offset >= c->bm_bits)
return 0; /* nothing to do. */
/* use at most thus many bytes */
- bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
- memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
+ bitstream_init(&bs, p->code, size, 0);
+ memset(p->code, 0, size);
/* plain bits covered in this code string */
plain_bits = 0;
@@ -2356,12 +1114,12 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (rl == 0) {
/* the first checked bit was set,
* store start value, */
- DCBP_set_start(p, 1);
+ dcbp_set_start(p, 1);
/* but skip encoding of zero run length */
toggle = !toggle;
continue;
}
- DCBP_set_start(p, 0);
+ dcbp_set_start(p, 0);
}
/* paranoia: catch zero runlength.
@@ -2401,7 +1159,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
bm_xfer_ctx_bit_to_word_offset(c);
/* store pad_bits */
- DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
+ dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
return len;
}
@@ -2413,48 +1171,52 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct p_header80 *h, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
{
- struct p_compressed_bm *p = (void*)h;
- unsigned long num_words;
- int len;
- int ok;
-
- len = fill_bitmap_rle_bits(mdev, p, c);
+ struct drbd_socket *sock = &mdev->tconn->data;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct p_compressed_bm *p = sock->sbuf + header_size;
+ int len, err;
+ len = fill_bitmap_rle_bits(mdev, p,
+ DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
- DCBP_set_code(p, RLE_VLI_Bits);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
- sizeof(*p) + len, 0);
-
+ dcbp_set_code(p, RLE_VLI_Bits);
+ err = __send_command(mdev->tconn, mdev->vnr, sock,
+ P_COMPRESSED_BITMAP, sizeof(*p) + len,
+ NULL, 0);
c->packets[0]++;
- c->bytes[0] += sizeof(*p) + len;
+ c->bytes[0] += header_size + sizeof(*p) + len;
if (c->bit_offset >= c->bm_bits)
len = 0; /* DONE */
} else {
/* was not compressible.
* send a buffer full of plain text bits instead. */
- num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- len = num_words * sizeof(long);
+ unsigned int data_size;
+ unsigned long num_words;
+ unsigned long *p = sock->sbuf + header_size;
+
+ data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
- h, sizeof(struct p_header80) + len, 0);
+ drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
c->packets[1]++;
- c->bytes[1] += sizeof(struct p_header80) + len;
+ c->bytes[1] += header_size + len;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
}
- if (ok) {
+ if (!err) {
if (len == 0) {
INFO_bm_xfer_stats(mdev, "send", c);
return 0;
@@ -2465,21 +1227,13 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
}
/* See the comment at receive_bitmap() */
-int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
- struct p_header80 *p;
int err;
- ERR_IF(!mdev->bitmap) return false;
-
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- p = (struct p_header80 *) __get_free_page(GFP_NOIO);
- if (!p) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
+ if (!expect(mdev->bitmap))
return false;
- }
if (get_ldev(mdev)) {
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
@@ -2504,37 +1258,39 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
};
do {
- err = send_bitmap_rle_or_plain(mdev, p, &c);
+ err = send_bitmap_rle_or_plain(mdev, &c);
} while (err > 0);
- free_page((unsigned long) p);
return err == 0;
}
int drbd_send_bitmap(struct drbd_conf *mdev)
{
- int err;
+ struct drbd_socket *sock = &mdev->tconn->data;
+ int err = -1;
- if (!drbd_get_data_sock(mdev))
- return -1;
- err = !_drbd_send_bitmap(mdev);
- drbd_put_data_sock(mdev);
+ mutex_lock(&sock->mutex);
+ if (sock->socket)
+ err = !_drbd_send_bitmap(mdev);
+ mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
{
- int ok;
- struct p_barrier_ack p;
+ struct drbd_socket *sock;
+ struct p_barrier_ack *p;
- p.barrier = barrier_nr;
- p.set_size = cpu_to_be32(set_size);
+ if (tconn->cstate < C_WF_REPORT_PARAMS)
+ return;
- if (mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return;
+ p->barrier = barrier_nr;
+ p->set_size = cpu_to_be32(set_size);
+ conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
@@ -2545,62 +1301,62 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- u64 sector,
- u32 blksize,
- u64 block_id)
+static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ u64 sector, u32 blksize, u64 block_id)
{
- int ok;
- struct p_block_ack p;
+ struct drbd_socket *sock;
+ struct p_block_ack *p;
- p.sector = sector;
- p.block_id = block_id;
- p.blksize = blksize;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ if (mdev->state.conn < C_CONNECTED)
+ return -EIO;
- if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = sector;
+ p->block_id = block_id;
+ p->blksize = blksize;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size)
+void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size)
{
- data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
- return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
- dp->block_id);
+ if (mdev->tconn->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ dp->block_id);
}
-int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp)
+void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp)
{
- return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
- * @cmd: Packet command code.
- * @e: Epoch entry.
+ * @mdev: DRBD device
+ * @cmd: packet command code
+ * @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev,
- enum drbd_packets cmd, struct drbd_epoch_entry *e)
+int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
return _drbd_send_ack(mdev, cmd,
- cpu_to_be64(e->sector),
- cpu_to_be32(e->size),
- e->block_id);
+ cpu_to_be64(peer_req->i.sector),
+ cpu_to_be32(peer_req->i.size),
+ peer_req->block_id);
}
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
return _drbd_send_ack(mdev, cmd,
@@ -2612,85 +1368,87 @@ int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = block_id;
- p.blksize = cpu_to_be32(size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = block_id;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector, int size,
- void *digest, int digest_size,
- enum drbd_packets cmd)
+int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+ void *digest, int digest_size, enum drbd_packet cmd)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbeef;
- p.blksize = cpu_to_be32(size);
-
- p.head.magic = BE_DRBD_MAGIC;
- p.head.command = cpu_to_be16(cmd);
- p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- mutex_lock(&mdev->data.mutex);
+ /* FIXME: Put the digest into the preallocated socket buffer. */
- ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
- ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
-
- mutex_unlock(&mdev->data.mutex);
-
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p),
+ digest, digest_size);
}
int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
{
- int ok;
- struct p_block_req p;
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbabe;
- p.blksize = cpu_to_be32(size);
-
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
{
int drop_it;
/* long elapsed = (long)(jiffies - mdev->last_received); */
- drop_it = mdev->meta.socket == sock
- || !mdev->asender.task
- || get_t_state(&mdev->asender) != Running
- || mdev->state.conn < C_CONNECTED;
+ drop_it = tconn->meta.socket == sock
+ || !tconn->asender.task
+ || get_t_state(&tconn->asender) != RUNNING
+ || tconn->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--mdev->ko_count;
+ drop_it = !--tconn->ko_count;
if (!drop_it) {
- dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, mdev->ko_count);
- request_ping(mdev);
+ conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, tconn->ko_count);
+ request_ping(tconn);
}
return drop_it; /* && (mdev->state == R_PRIMARY) */;
}
+static void drbd_update_congested(struct drbd_tconn *tconn)
+{
+ struct sock *sk = tconn->data.socket->sk;
+ if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
+ set_bit(NET_CONGESTED, &tconn->flags);
+}
+
/* The idea of sendpage seems to be to put some kind of reference
* to the page into the skb, and to hand it over to the NIC. In
* this process get_page() gets called.
@@ -2713,21 +1471,28 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
* with page_count == 0 or PageSlab.
*/
static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
- int offset, size_t size, unsigned msg_flags)
+ int offset, size_t size, unsigned msg_flags)
{
- int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
+ struct socket *socket;
+ void *addr;
+ int err;
+
+ socket = mdev->tconn->data.socket;
+ addr = kmap(page) + offset;
+ err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
kunmap(page);
- if (sent == size)
- mdev->send_cnt += size>>9;
- return sent == size;
+ if (!err)
+ mdev->send_cnt += size >> 9;
+ return err;
}
static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
+ struct socket *socket = mdev->tconn->data.socket;
mm_segment_t oldfs = get_fs();
- int sent, ok;
int len = size;
+ int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
* page_count of 0 and/or have PageSlab() set.
@@ -2739,34 +1504,35 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev);
+ drbd_update_congested(mdev->tconn);
set_fs(KERNEL_DS);
do {
- sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
- offset, len,
- msg_flags);
- if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev,
- mdev->data.socket))
- break;
- else
- continue;
- }
+ int sent;
+
+ sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
+ if (sent == -EAGAIN) {
+ if (we_should_drop_the_connection(mdev->tconn, socket))
+ break;
+ continue;
+ }
dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
+ if (sent < 0)
+ err = sent;
break;
}
len -= sent;
offset += sent;
} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->flags);
+ clear_bit(NET_CONGESTED, &mdev->tconn->flags);
- ok = (len == 0);
- if (likely(ok))
- mdev->send_cnt += size>>9;
- return ok;
+ if (len == 0) {
+ err = 0;
+ mdev->send_cnt += size >> 9;
+ }
+ return err;
}
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2775,12 +1541,15 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_no_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2789,32 +1558,40 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
- unsigned len = e->size;
+ struct page *page = peer_req->pages;
+ unsigned len = peer_req->i.size;
+ int err;
+
/* hint all but last page with MSG_MORE */
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- if (!_drbd_send_page(mdev, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0))
- return 0;
+
+ err = _drbd_send_page(mdev, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ if (err)
+ return err;
len -= l;
}
- return 1;
+ return 0;
}
static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{
- if (mdev->agreed_pro_version >= 95)
+ if (mdev->tconn->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -2828,50 +1605,36 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
*/
int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
{
- int ok = 1;
- struct p_data p;
+ struct drbd_socket *sock;
+ struct p_data *p;
unsigned int dp_flags = 0;
- void *dgb;
int dgs;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(P_DATA);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(P_DATA);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- }
-
- p.sector = cpu_to_be64(req->sector);
- p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->block_id = (unsigned long)req;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
-
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
-
- p.dp_flags = cpu_to_be32(dp_flags);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
- ok = (sizeof(p) ==
- drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok) {
+ if (mdev->tconn->agreed_pro_version >= 100) {
+ if (req->rq_state & RQ_EXP_RECEIVE_ACK)
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ if (req->rq_state & RQ_EXP_WRITE_ACK)
+ dp_flags |= DP_SEND_WRITE_ACK;
+ }
+ p->dp_flags = cpu_to_be32(dp_flags);
+ if (dgs)
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
* as soon as we handed it over to tcp, at which point the data
@@ -2883,92 +1646,76 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* out ok after sending on this side, but does not fit on the
* receiving side, we sure have detected corruption elsewhere.
*/
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
- ok = _drbd_send_bio(mdev, req->master_bio);
+ if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
+ err = _drbd_send_bio(mdev, req->master_bio);
else
- ok = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(mdev, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
- if (memcmp(mdev->int_dig_out, digest, dgs)) {
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ if (memcmp(p + 1, digest, dgs)) {
dev_warn(DEV,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
}
} /* else if (dgs > 64) {
... Be noisy about digest too large ...
} */
}
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- drbd_put_data_sock(mdev);
-
- return ok;
+ return err;
}
/* answer packet, used to send data back for read requests:
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e)
+int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
- int ok;
- struct p_data p;
- void *dgb;
+ struct drbd_socket *sock;
+ struct p_data *p;
+ int err;
int dgs;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(cmd);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(cmd);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- }
-
- p.sector = cpu_to_be64(e->sector);
- p.block_id = e->block_id;
- /* p.seq_num = 0; No sequence numbers here.. */
-
- /* Only called by our kernel thread.
- * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
- * in response to admin command or module unload.
- */
- if (!drbd_get_data_sock(mdev))
- return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
- ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok)
- ok = _drbd_send_zc_ee(mdev, e);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
- drbd_put_data_sock(mdev);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(peer_req->i.sector);
+ p->block_id = peer_req->block_id;
+ p->seq_num = 0; /* unused */
+ p->dp_flags = 0;
+ if (dgs)
+ drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ if (!err)
+ err = _drbd_send_zc_ee(mdev, peer_req);
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- return ok;
+ return err;
}
-int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
{
- struct p_block_desc p;
-
- p.sector = cpu_to_be64(req->sector);
- p.blksize = cpu_to_be32(req->size);
+ struct drbd_socket *sock;
+ struct p_block_desc *p;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->blksize = cpu_to_be32(req->i.size);
+ return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -2987,7 +1734,7 @@ int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_conf *mdev, struct socket *sock,
+int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -2995,7 +1742,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
int rv, sent = 0;
if (!sock)
- return -1000;
+ return -EBADR;
/* THINK if (signal_pending) return ... ? */
@@ -3008,9 +1755,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == mdev->data.socket) {
- mdev->ko_count = mdev->net_conf->ko_count;
- drbd_update_congested(mdev);
+ if (sock == tconn->data.socket) {
+ rcu_read_lock();
+ tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ rcu_read_unlock();
+ drbd_update_congested(tconn);
}
do {
/* STRANGE
@@ -3024,12 +1773,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(mdev, sock))
+ if (we_should_drop_the_connection(tconn, sock))
break;
else
continue;
}
- D_ASSERT(rv != 0);
if (rv == -EINTR) {
flush_signals(current);
rv = 0;
@@ -3041,22 +1789,40 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == mdev->data.socket)
- clear_bit(NET_CONGESTED, &mdev->flags);
+ if (sock == tconn->data.socket)
+ clear_bit(NET_CONGESTED, &tconn->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- dev_err(DEV, "%s_sendmsg returned %d\n",
- sock == mdev->meta.socket ? "msock" : "sock",
- rv);
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_err(tconn, "%s_sendmsg returned %d\n",
+ sock == tconn->meta.socket ? "msock" : "sock",
+ rv);
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- drbd_force_state(mdev, NS(conn, C_TIMEOUT));
+ conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
}
+/**
+ * drbd_send_all - Send an entire buffer
+ *
+ * Returns 0 upon success and a negative error value otherwise.
+ */
+int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+ size_t size, unsigned msg_flags)
+{
+ int err;
+
+ err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ if (err < 0)
+ return err;
+ if (err != size)
+ return -EIO;
+ return 0;
+}
+
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
struct drbd_conf *mdev = bdev->bd_disk->private_data;
@@ -3064,7 +1830,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
@@ -3077,7 +1843,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (!rv)
mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -3094,35 +1860,14 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
static void drbd_set_defaults(struct drbd_conf *mdev)
{
- /* This way we get a compile error when sync_conf grows,
- and we forgot to initialize it here */
- mdev->sync_conf = (struct syncer_conf) {
- /* .rate = */ DRBD_RATE_DEF,
- /* .after = */ DRBD_AFTER_DEF,
- /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
- /* .verify_alg = */ {}, 0,
- /* .cpu_mask = */ {}, 0,
- /* .csums_alg = */ {}, 0,
- /* .use_rle = */ 0,
- /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
- /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
- /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
- /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
- /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
- /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
- };
-
- /* Have to use that way, because the layout differs between
- big endian and little endian */
- mdev->state = (union drbd_state) {
+ /* Beware! The actual layout differs
+ * between big endian and little endian */
+ mdev->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
- .susp = 0,
- .susp_nod = 0,
- .susp_fen = 0
} };
}
@@ -3138,28 +1883,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
atomic_set(&mdev->unacked_cnt, 0);
atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->net_cnt, 0);
- atomic_set(&mdev->packet_seq, 0);
- atomic_set(&mdev->pp_in_use, 0);
atomic_set(&mdev->pp_in_use_by_net, 0);
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->data.mutex);
- mutex_init(&mdev->meta.mutex);
- sema_init(&mdev->data.work.s, 0);
- sema_init(&mdev->meta.work.s, 0);
- mutex_init(&mdev->state_mutex);
-
- spin_lock_init(&mdev->data.work.q_lock);
- spin_lock_init(&mdev->meta.work.q_lock);
+ mutex_init(&mdev->own_state_mutex);
+ mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->req_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
@@ -3167,8 +1901,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->read_ee);
INIT_LIST_HEAD(&mdev->net_ee);
INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->data.work.q);
- INIT_LIST_HEAD(&mdev->meta.work.q);
INIT_LIST_HEAD(&mdev->resync_work.list);
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->go_diskless.list);
@@ -3182,6 +1914,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
mdev->start_resync_work.cb = w_start_resync;
+
+ mdev->resync_work.mdev = mdev;
+ mdev->unplug_work.mdev = mdev;
+ mdev->go_diskless.mdev = mdev;
+ mdev->md_sync_work.mdev = mdev;
+ mdev->bm_io_work.w.mdev = mdev;
+ mdev->start_resync_work.mdev = mdev;
+
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
init_timer(&mdev->start_resync_timer);
@@ -3197,17 +1937,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->net_cnt_wait);
init_waitqueue_head(&mdev->ee_wait);
init_waitqueue_head(&mdev->al_wait);
init_waitqueue_head(&mdev->seq_wait);
- drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
- drbd_thread_init(mdev, &mdev->worker, drbd_worker);
- drbd_thread_init(mdev, &mdev->asender, drbd_asender);
-
- mdev->agreed_pro_version = PRO_VERSION_MAX;
- mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
@@ -3216,13 +1949,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
void drbd_mdev_cleanup(struct drbd_conf *mdev)
{
int i;
- if (mdev->receiver.t_state != None)
+ if (mdev->tconn->receiver.t_state != NONE)
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->receiver.t_state);
+ mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
@@ -3239,7 +1969,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
mdev->rs_mark_left[i] = 0;
mdev->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->net_conf == NULL);
+ D_ASSERT(mdev->tconn->net_conf == NULL);
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
@@ -3248,21 +1978,18 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_bm_cleanup(mdev);
}
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
+
clear_bit(AL_SUSPENDED, &mdev->flags);
- /*
- * currently we drbd_init_ee only on module load, so
- * we may do drbd_release_ee only on module unload!
- */
D_ASSERT(list_empty(&mdev->active_ee));
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->net_ee));
D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->data.work.q));
- D_ASSERT(list_empty(&mdev->meta.work.q));
+ D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -3336,7 +2063,7 @@ static int drbd_create_mempools(void)
goto Enomem;
drbd_ee_cache = kmem_cache_create(
- "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
+ "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
@@ -3351,11 +2078,9 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
-#ifdef COMPAT_HAVE_BIOSET_CREATE
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
-#endif
drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_page_pool == NULL)
@@ -3404,73 +2129,53 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_ee_lists(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
{
int rr;
- rr = drbd_release_ee(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
if (rr)
dev_err(DEV, "%d EEs in active list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
if (rr)
dev_err(DEV, "%d EEs in sync list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
if (rr)
dev_err(DEV, "%d EEs in read list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
if (rr)
dev_err(DEV, "%d EEs in done list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (rr)
dev_err(DEV, "%d EEs in net list found!\n", rr);
}
-/* caution. no locking.
- * currently only used from module cleanup code. */
-static void drbd_delete_device(unsigned int minor)
+/* caution. no locking. */
+void drbd_minor_destroy(struct kref *kref)
{
- struct drbd_conf *mdev = minor_to_mdev(minor);
-
- if (!mdev)
- return;
+ struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
+ struct drbd_tconn *tconn = mdev->tconn;
del_timer_sync(&mdev->request_timer);
/* paranoia asserts */
- if (mdev->open_cnt != 0)
- dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
- __FILE__ , __LINE__);
-
- ERR_IF (!list_empty(&mdev->data.work.q)) {
- struct list_head *lp;
- list_for_each(lp, &mdev->data.work.q) {
- dev_err(DEV, "lp = %p\n", lp);
- }
- };
+ D_ASSERT(mdev->open_cnt == 0);
/* end paranoia asserts */
- del_gendisk(mdev->vdisk);
-
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
if (mdev->this_bdev)
bdput(mdev->this_bdev);
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
- drbd_release_ee_lists(mdev);
-
- /* should be freed on disconnect? */
- kfree(mdev->ee_hash);
- /*
- mdev->ee_hash_s = 0;
- mdev->ee_hash = NULL;
- */
+ drbd_release_all_peer_reqs(mdev);
lc_destroy(mdev->act_log);
lc_destroy(mdev->resync);
@@ -3478,19 +2183,101 @@ static void drbd_delete_device(unsigned int minor)
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
+ if (mdev->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(mdev);
+ __free_page(mdev->md_io_page);
+ put_disk(mdev->vdisk);
+ blk_cleanup_queue(mdev->rq_queue);
+ kfree(mdev->rs_plan_s);
+ kfree(mdev);
- /* cleanup the rest that has been
- * allocated from drbd_new_device
- * and actually free the mdev itself */
- drbd_free_mdev(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
}
+/* One global retry thread, if we need to push back some bio and have it
+ * reinserted through our make request function.
+ */
+static struct retry_worker {
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t lock;
+ struct list_head writes;
+} retry;
+
+static void do_retry(struct work_struct *ws)
+{
+ struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
+ LIST_HEAD(writes);
+ struct drbd_request *req, *tmp;
+
+ spin_lock_irq(&retry->lock);
+ list_splice_init(&retry->writes, &writes);
+ spin_unlock_irq(&retry->lock);
+
+ list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->master_bio;
+ unsigned long start_time = req->start_time;
+ bool expected;
+
+ expected =
+ expect(atomic_read(&req->completion_ref) == 0) &&
+ expect(req->rq_state & RQ_POSTPONED) &&
+ expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
+ (req->rq_state & RQ_LOCAL_ABORTED) != 0);
+
+ if (!expected)
+ dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ req, atomic_read(&req->completion_ref),
+ req->rq_state);
+
+ /* We still need to put one kref associated with the
+ * "completion_ref" going zero in the code path that queued it
+ * here. The request object may still be referenced by a
+ * frozen local req->private_bio, in case we force-detached.
+ */
+ kref_put(&req->kref, drbd_req_destroy);
+
+ /* A single suspended or otherwise blocking device may stall
+ * all others as well. Fortunately, this code path is to
+ * recover from a situation that "should not happen":
+ * concurrent writes in multi-primary setup.
+ * In a "normal" lifecycle, this workqueue is supposed to be
+ * destroyed without ever doing anything.
+ * If it turns out to be an issue anyways, we can do per
+ * resource (replication group) or per device (minor) retry
+ * workqueues instead.
+ */
+
+ /* We are not just doing generic_make_request(),
+ * as we want to keep the start_time information. */
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
+ }
+}
+
+void drbd_restart_request(struct drbd_request *req)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&retry.lock, flags);
+ list_move_tail(&req->tl_requests, &retry.writes);
+ spin_unlock_irqrestore(&retry.lock, flags);
+
+ /* Drop the extra reference that would otherwise
+ * have been dropped by complete_master_bio.
+ * do_retry() needs to grab a new one. */
+ dec_ap_bio(req->w.mdev);
+
+ queue_work(retry.wq, &retry.worker);
+}
+
+
static void drbd_cleanup(void)
{
unsigned int i;
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -3505,19 +2292,31 @@ static void drbd_cleanup(void)
if (drbd_proc)
remove_proc_entry("drbd", NULL);
- drbd_nl_cleanup();
+ if (retry.wq)
+ destroy_workqueue(retry.wq);
+
+ drbd_genl_unregister();
- if (minor_table) {
- i = minor_count;
- while (i--)
- drbd_delete_device(i);
- drbd_destroy_mempools();
+ idr_for_each_entry(&minors, mdev, i) {
+ idr_remove(&minors, mdev_to_minor(mdev));
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ del_gendisk(mdev->vdisk);
+ /* synchronize_rcu(); No other threads running at this point */
+ kref_put(&mdev->kref, &drbd_minor_destroy);
}
- kfree(minor_table);
+ /* not _rcu since, no other updater anymore. Genl already unregistered */
+ list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
+ /* synchronize_rcu(); */
+ kref_put(&tconn->kref, &conn_destroy);
+ }
+ drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
+ idr_destroy(&minors);
+
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -3542,7 +2341,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
@@ -3566,7 +2365,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
@@ -3576,20 +2375,243 @@ out:
return r;
}
-struct drbd_conf *drbd_new_device(unsigned int minor)
+static void drbd_init_workqueue(struct drbd_work_queue* wq)
+{
+ spin_lock_init(&wq->q_lock);
+ INIT_LIST_HEAD(&wq->q);
+ init_waitqueue_head(&wq->q_wait);
+}
+
+struct drbd_tconn *conn_get_by_name(const char *name)
+{
+ struct drbd_tconn *tconn;
+
+ if (!name || !name[0])
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (!strcmp(tconn->name, name)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len)
+{
+ struct drbd_tconn *tconn;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (tconn->my_addr_len == my_addr_len &&
+ tconn->peer_addr_len == peer_addr_len &&
+ !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+static int drbd_alloc_socket(struct drbd_socket *socket)
+{
+ socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->rbuf)
+ return -ENOMEM;
+ socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->sbuf)
+ return -ENOMEM;
+ return 0;
+}
+
+static void drbd_free_socket(struct drbd_socket *socket)
+{
+ free_page((unsigned long) socket->sbuf);
+ free_page((unsigned long) socket->rbuf);
+}
+
+void conn_free_crypto(struct drbd_tconn *tconn)
+{
+ drbd_free_sock(tconn);
+
+ crypto_free_hash(tconn->csums_tfm);
+ crypto_free_hash(tconn->verify_tfm);
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ crypto_free_hash(tconn->integrity_tfm);
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+
+ tconn->csums_tfm = NULL;
+ tconn->verify_tfm = NULL;
+ tconn->cram_hmac_tfm = NULL;
+ tconn->integrity_tfm = NULL;
+ tconn->peer_integrity_tfm = NULL;
+ tconn->int_dig_in = NULL;
+ tconn->int_dig_vv = NULL;
+}
+
+int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+{
+ cpumask_var_t new_cpu_mask;
+ int err;
+
+ if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+ /*
+ retcode = ERR_NOMEM;
+ drbd_msg_put_info("unable to allocate cpumask");
+ */
+
+ /* silently ignore cpu mask on UP kernel */
+ if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
+ /* FIXME: Get rid of constant 32 here */
+ err = bitmap_parse(res_opts->cpu_mask, 32,
+ cpumask_bits(new_cpu_mask), nr_cpu_ids);
+ if (err) {
+ conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ /* retcode = ERR_CPU_MASK_PARSE; */
+ goto fail;
+ }
+ }
+ tconn->res_opts = *res_opts;
+ if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(tconn->cpu_mask, new_cpu_mask);
+ drbd_calc_cpu_mask(tconn);
+ tconn->receiver.reset_cpu_mask = 1;
+ tconn->asender.reset_cpu_mask = 1;
+ tconn->worker.reset_cpu_mask = 1;
+ }
+ err = 0;
+
+fail:
+ free_cpumask_var(new_cpu_mask);
+ return err;
+
+}
+
+/* caller must be under genl_lock() */
+struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+{
+ struct drbd_tconn *tconn;
+
+ tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
+ if (!tconn)
+ return NULL;
+
+ tconn->name = kstrdup(name, GFP_KERNEL);
+ if (!tconn->name)
+ goto fail;
+
+ if (drbd_alloc_socket(&tconn->data))
+ goto fail;
+ if (drbd_alloc_socket(&tconn->meta))
+ goto fail;
+
+ if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ goto fail;
+
+ if (set_resource_options(tconn, res_opts))
+ goto fail;
+
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+
+ INIT_LIST_HEAD(&tconn->transfer_log);
+
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
+ tconn->write_ordering = WO_bdev_flush;
+
+ tconn->send.seen_any_write_yet = false;
+ tconn->send.current_epoch_nr = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ tconn->cstate = C_STANDALONE;
+ mutex_init(&tconn->cstate_mutex);
+ spin_lock_init(&tconn->req_lock);
+ mutex_init(&tconn->conf_update);
+ init_waitqueue_head(&tconn->ping_wait);
+ idr_init(&tconn->volumes);
+
+ drbd_init_workqueue(&tconn->sender_work);
+ mutex_init(&tconn->data.mutex);
+ mutex_init(&tconn->meta.mutex);
+
+ drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
+ drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
+ drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+
+ kref_init(&tconn->kref);
+ list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+
+ return tconn;
+
+fail:
+ kfree(tconn->current_epoch);
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn);
+
+ return NULL;
+}
+
+void conn_destroy(struct kref *kref)
+{
+ struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
+ idr_destroy(&tconn->volumes);
+
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ kfree(tconn);
+}
+
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
struct gendisk *disk;
struct request_queue *q;
+ int vnr_got = vnr;
+ int minor_got = minor;
+ enum drbd_ret_code err = ERR_NOMEM;
+
+ mdev = minor_to_mdev(minor);
+ if (mdev)
+ return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
if (!mdev)
- return NULL;
- if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
- goto out_no_cpumask;
+ return ERR_NOMEM;
+
+ kref_get(&tconn->kref);
+ mdev->tconn = tconn;
mdev->minor = minor;
+ mdev->vnr = vnr;
drbd_init_set_defaults(mdev);
@@ -3627,7 +2649,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock;
+ q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
@@ -3635,30 +2657,44 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
if (drbd_bm_init(mdev))
goto out_no_bitmap;
- /* no need to lock access, we are still initializing this minor device. */
- if (!tl_init(mdev))
- goto out_no_tl;
-
- mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
- if (!mdev->app_reads_hash)
- goto out_no_app_reads;
-
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
- return mdev;
-
-/* out_whatever_else:
- kfree(mdev->current_epoch); */
-out_no_epoch:
- kfree(mdev->app_reads_hash);
-out_no_app_reads:
- tl_cleanup(mdev);
-out_no_tl:
+ mdev->read_requests = RB_ROOT;
+ mdev->write_requests = RB_ROOT;
+
+ if (!idr_pre_get(&minors, GFP_KERNEL))
+ goto out_no_minor_idr;
+ if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+ goto out_no_minor_idr;
+ if (minor_got != minor) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
+ goto out_idr_remove_minor;
+ }
+
+ if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
+ goto out_idr_remove_minor;
+ if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+ goto out_idr_remove_minor;
+ if (vnr_got != vnr) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ goto out_idr_remove_vol;
+ }
+ add_disk(disk);
+ kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
+
+ /* inherit the connection state */
+ mdev->state.conn = tconn->cstate;
+ if (mdev->state.conn == C_WF_REPORT_PARAMS)
+ drbd_connected(mdev);
+
+ return NO_ERROR;
+
+out_idr_remove_vol:
+ idr_remove(&tconn->volumes, vnr_got);
+out_idr_remove_minor:
+ idr_remove(&minors, minor_got);
+ synchronize_rcu();
+out_no_minor_idr:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
@@ -3667,55 +2703,25 @@ out_no_io_page:
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- free_cpumask_var(mdev->cpu_mask);
-out_no_cpumask:
- kfree(mdev);
- return NULL;
-}
-
-/* counterpart of drbd_new_device.
- * last part of drbd_delete_device. */
-void drbd_free_mdev(struct drbd_conf *mdev)
-{
- kfree(mdev->current_epoch);
- kfree(mdev->app_reads_hash);
- tl_cleanup(mdev);
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- free_cpumask_var(mdev->cpu_mask);
- drbd_free_tl_hash(mdev);
kfree(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
+ return err;
}
-
int __init drbd_init(void)
{
int err;
- if (sizeof(struct p_handshake) != 80) {
- printk(KERN_ERR
- "drbd: never change the size or layout "
- "of the HandShake packet.\n");
- return -EINVAL;
- }
-
if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
printk(KERN_ERR
- "drbd: invalid minor_count (%d)\n", minor_count);
+ "drbd: invalid minor_count (%d)\n", minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = 8;
+ minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
- err = drbd_nl_init();
- if (err)
- return err;
-
err = register_blkdev(DRBD_MAJOR, "drbd");
if (err) {
printk(KERN_ERR
@@ -3724,6 +2730,13 @@ int __init drbd_init(void)
return err;
}
+ err = drbd_genl_register();
+ if (err) {
+ printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+ goto fail;
+ }
+
+
register_reboot_notifier(&drbd_notifier);
/*
@@ -3734,22 +2747,29 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
- GFP_KERNEL);
- if (!minor_table)
- goto Enomem;
+ idr_init(&minors);
err = drbd_create_mempools();
if (err)
- goto Enomem;
+ goto fail;
drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
- goto Enomem;
+ goto fail;
}
rwlock_init(&global_state_lock);
+ INIT_LIST_HEAD(&drbd_tconns);
+
+ retry.wq = create_singlethread_workqueue("drbd-reissue");
+ if (!retry.wq) {
+ printk(KERN_ERR "drbd: unable to create retry workqueue\n");
+ goto fail;
+ }
+ INIT_WORK(&retry.worker, do_retry);
+ spin_lock_init(&retry.lock);
+ INIT_LIST_HEAD(&retry.writes);
printk(KERN_INFO "drbd: initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
@@ -3757,11 +2777,10 @@ int __init drbd_init(void)
printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
printk(KERN_INFO "drbd: registered as block device major %d\n",
DRBD_MAJOR);
- printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
return 0; /* Success! */
-Enomem:
+fail:
drbd_cleanup();
if (err == -ENOMEM)
/* currently always the case */
@@ -3782,47 +2801,42 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_conf *mdev)
+void drbd_free_sock(struct drbd_tconn *tconn)
{
- if (mdev->data.socket) {
- mutex_lock(&mdev->data.mutex);
- kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
- sock_release(mdev->data.socket);
- mdev->data.socket = NULL;
- mutex_unlock(&mdev->data.mutex);
+ if (tconn->data.socket) {
+ mutex_lock(&tconn->data.mutex);
+ kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
+ sock_release(tconn->data.socket);
+ tconn->data.socket = NULL;
+ mutex_unlock(&tconn->data.mutex);
}
- if (mdev->meta.socket) {
- mutex_lock(&mdev->meta.mutex);
- kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
- sock_release(mdev->meta.socket);
- mdev->meta.socket = NULL;
- mutex_unlock(&mdev->meta.mutex);
+ if (tconn->meta.socket) {
+ mutex_lock(&tconn->meta.mutex);
+ kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
+ sock_release(tconn->meta.socket);
+ tconn->meta.socket = NULL;
+ mutex_unlock(&tconn->meta.mutex);
}
}
+/* meta data management */
-void drbd_free_resources(struct drbd_conf *mdev)
+void conn_md_sync(struct drbd_tconn *tconn)
{
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = NULL;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = NULL;
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = NULL;
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = NULL;
-
- drbd_free_sock(mdev);
+ struct drbd_conf *mdev;
+ int vnr;
- __no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_md_sync(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
-/* meta data management */
-
struct meta_data_on_disk {
u64 la_size; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
@@ -3833,7 +2847,7 @@ struct meta_data_on_disk {
u32 md_size_sect;
u32 al_offset; /* offset to this block */
u32 al_nr_extents; /* important for restoring the AL */
- /* `-- act_log->nr_elements <-- sync_conf.al_extents */
+ /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
@@ -3871,7 +2885,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
- buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
+ buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
@@ -3885,7 +2899,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
sector = mdev->ldev->md.md_offset;
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
@@ -3906,11 +2920,12 @@ out:
* @bdev: Device from which the meta data should be read in.
*
* Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
- * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
+ * something goes wrong.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
+ u32 magic, flags;
int i, rv = NO_ERROR;
if (!get_ldev_if_state(mdev, D_ATTACHING))
@@ -3920,7 +2935,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!buffer)
goto out;
- if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
dev_err(DEV, "Error while reading metadata.\n");
@@ -3928,8 +2943,20 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
goto err;
}
- if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
- dev_err(DEV, "Error while reading metadata, magic not found.\n");
+ magic = be32_to_cpu(buffer->magic);
+ flags = be32_to_cpu(buffer->flags);
+ if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
+ (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
+ /* btw: that's Activity Log clean, not "all" clean. */
+ dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ rv = ERR_MD_UNCLEAN;
+ goto err;
+ }
+ if (magic != DRBD_MD_MAGIC_08) {
+ if (magic == DRBD_MD_MAGIC_07)
+ dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ else
+ dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
rv = ERR_MD_INVALID;
goto err;
}
@@ -3963,20 +2990,16 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev->md.flags = be32_to_cpu(buffer->flags);
- mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
+ spin_unlock_irq(&mdev->tconn->req_lock);
err:
drbd_md_put_buffer(mdev);
@@ -4011,7 +3034,7 @@ void drbd_md_mark_dirty(struct drbd_conf *mdev)
}
#endif
-static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@@ -4019,7 +3042,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@@ -4034,14 +3057,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
+void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
- _drbd_uuid_set(mdev, idx, val);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@@ -4054,15 +3087,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid;
+
+ get_random_bytes(&val, sizeof(u64));
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(mdev, UI_CURRENT, val);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
- get_random_bytes(&val, sizeof(u64));
- _drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@@ -4070,9 +3108,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
+ unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@@ -4084,6 +3124,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+
drbd_md_mark_dirty(mdev);
}
@@ -4135,9 +3177,10 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
return rv;
}
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_bitmap_io(struct drbd_work *w, int unused)
{
struct bm_io_work *work = container_of(w, struct bm_io_work, w);
+ struct drbd_conf *mdev = w->mdev;
int rv = -EIO;
D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
@@ -4149,8 +3192,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
put_ldev(mdev);
}
- clear_bit(BITMAP_IO, &mdev->flags);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BITMAP_IO, &mdev->flags);
wake_up(&mdev->misc_wait);
if (work->done)
@@ -4160,7 +3202,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
work->why = NULL;
work->flags = 0;
- return 1;
+ return 0;
}
void drbd_ldev_destroy(struct drbd_conf *mdev)
@@ -4173,29 +3215,51 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
- if (mdev->md_io_tmpp) {
- __free_page(mdev->md_io_tmpp);
- mdev->md_io_tmpp = NULL;
- }
clear_bit(GO_DISKLESS, &mdev->flags);
}
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_go_diskless(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
* again, it will be safe to free them. */
+
+ /* Try to write changed bitmap pages, read errors may have just
+ * set some bits outside the area covered by the activity log.
+ *
+ * If we have an IO error during the bitmap writeout,
+ * we will want a full sync next time, just in case.
+ * (Do we want a specific meta data flag for this?)
+ *
+ * If that does not make it to stable storage either,
+ * we cannot do anything about that anymore.
+ *
+ * We still need to check if both bitmap and ldev are present, we may
+ * end up here after a failed attach, before ldev was even assigned.
+ */
+ if (mdev->bitmap && mdev->ldev) {
+ if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ "detach", BM_LOCKED_MASK)) {
+ if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
+ drbd_md_set_flag(mdev, MDF_FULL_SYNC);
+ drbd_md_sync(mdev);
+ }
+ }
+ }
+
drbd_force_state(mdev, NS(disk, D_DISKLESS));
- return 1;
+ return 0;
}
void drbd_go_diskless(struct drbd_conf *mdev)
{
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}
/**
@@ -4215,7 +3279,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
void (*done)(struct drbd_conf *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->worker.task);
+ D_ASSERT(current == mdev->tconn->worker.task);
D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
@@ -4229,13 +3293,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
mdev->bm_io_work.why = why;
mdev->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/**
@@ -4252,7 +3316,7 @@ int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
{
int rv;
- D_ASSERT(current != mdev->worker.task);
+ D_ASSERT(current != mdev->tconn->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
drbd_suspend_io(mdev);
@@ -4291,18 +3355,127 @@ static void md_sync_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
+ /* must not double-queue! */
+ if (list_empty(&mdev->md_sync_work.list))
+ drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_md_sync(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
#endif
drbd_md_sync(mdev);
- return 1;
+ return 0;
+}
+
+const char *cmdname(enum drbd_packet cmd)
+{
+ /* THINK may need to become several global tables
+ * when we want to support more than
+ * one PRO_VERSION */
+ static const char *cmdnames[] = {
+ [P_DATA] = "Data",
+ [P_DATA_REPLY] = "DataReply",
+ [P_RS_DATA_REPLY] = "RSDataReply",
+ [P_BARRIER] = "Barrier",
+ [P_BITMAP] = "ReportBitMap",
+ [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
+ [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
+ [P_UNPLUG_REMOTE] = "UnplugRemote",
+ [P_DATA_REQUEST] = "DataRequest",
+ [P_RS_DATA_REQUEST] = "RSDataRequest",
+ [P_SYNC_PARAM] = "SyncParam",
+ [P_SYNC_PARAM89] = "SyncParam89",
+ [P_PROTOCOL] = "ReportProtocol",
+ [P_UUIDS] = "ReportUUIDs",
+ [P_SIZES] = "ReportSizes",
+ [P_STATE] = "ReportState",
+ [P_SYNC_UUID] = "ReportSyncUUID",
+ [P_AUTH_CHALLENGE] = "AuthChallenge",
+ [P_AUTH_RESPONSE] = "AuthResponse",
+ [P_PING] = "Ping",
+ [P_PING_ACK] = "PingAck",
+ [P_RECV_ACK] = "RecvAck",
+ [P_WRITE_ACK] = "WriteAck",
+ [P_RS_WRITE_ACK] = "RSWriteAck",
+ [P_SUPERSEDED] = "Superseded",
+ [P_NEG_ACK] = "NegAck",
+ [P_NEG_DREPLY] = "NegDReply",
+ [P_NEG_RS_DREPLY] = "NegRSDReply",
+ [P_BARRIER_ACK] = "BarrierAck",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
+ [P_STATE_CHG_REPLY] = "StateChgReply",
+ [P_OV_REQUEST] = "OVRequest",
+ [P_OV_REPLY] = "OVReply",
+ [P_OV_RESULT] = "OVResult",
+ [P_CSUM_RS_REQUEST] = "CsumRSRequest",
+ [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
+ [P_OUT_OF_SYNC] = "OutOfSync",
+ [P_RETRY_WRITE] = "RetryWrite",
+ [P_RS_CANCEL] = "RSCancel",
+ [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
+ [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
+ [P_RETRY_WRITE] = "retry_write",
+ [P_PROTOCOL_UPDATE] = "protocol_update",
+
+ /* enum drbd_packet, but not commands - obsoleted flags:
+ * P_MAY_IGNORE
+ * P_MAX_OPT_CMD
+ */
+ };
+
+ /* too big for the array: 0xfffX */
+ if (cmd == P_INITIAL_META)
+ return "InitialMeta";
+ if (cmd == P_INITIAL_DATA)
+ return "InitialData";
+ if (cmd == P_CONNECTION_FEATURES)
+ return "ConnectionFeatures";
+ if (cmd >= ARRAY_SIZE(cmdnames))
+ return "Unknown";
+ return cmdnames[cmd];
+}
+
+/**
+ * drbd_wait_misc - wait for a request to make progress
+ * @mdev: device associated with the request
+ * @i: the struct drbd_interval embedded in struct drbd_request or
+ * struct drbd_peer_request
+ */
+int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ struct net_conf *nc;
+ DEFINE_WAIT(wait);
+ long timeout;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -ETIMEDOUT;
+ }
+ timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
+ rcu_read_unlock();
+
+ /* Indicate to wake up mdev->misc_wait on progress. */
+ i->waiting = true;
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ timeout = schedule_timeout(timeout);
+ finish_wait(&mdev->misc_wait, &wait);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (!timeout || mdev->state.conn < C_CONNECTED)
+ return -ETIMEDOUT;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ return 0;
}
#ifdef CONFIG_DRBD_FAULT_INJECTION
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index edb490aad8b4..2af26fc95280 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -29,159 +29,317 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
-#include <linux/connector.h>
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
#include "drbd_req.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
-#include <linux/drbd_tag_magic.h>
#include <linux/drbd_limits.h>
-#include <linux/compiler.h>
#include <linux/kthread.h>
-static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
-static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
-static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
-
-/* see get_sb_bdev and bd_claim */
+#include <net/genetlink.h>
+
+/* .doit */
+// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
+// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
+/* .dumpit */
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
+
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+#include <linux/genl_magic_func.h>
+
+/* used blkdev_get_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
-/* Generate the tag_list to struct functions */
-#define NL_PACKET(name, number, fields) \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) \
-{ \
- int tag; \
- int dlen; \
- \
- while ((tag = get_unaligned(tags++)) != TT_END) { \
- dlen = get_unaligned(tags++); \
- switch (tag_number(tag)) { \
- fields \
- default: \
- if (tag & T_MANDATORY) { \
- dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
- return 0; \
- } \
- } \
- tags = (unsigned short *)((char *)tags + dlen); \
- } \
- return 1; \
-}
-#define NL_INTEGER(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
- arg->member = get_unaligned((int *)(tags)); \
- break;
-#define NL_INT64(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
- arg->member = get_unaligned((u64 *)(tags)); \
+/* Configuration is strictly serialized, because generic netlink message
+ * processing is strictly serialized by the genl_lock().
+ * Which means we can use one static global drbd_config_context struct.
+ */
+static struct drbd_config_context {
+ /* assigned from drbd_genlmsghdr */
+ unsigned int minor;
+ /* assigned from request attributes, if present */
+ unsigned int volume;
+#define VOLUME_UNSPECIFIED (-1U)
+ /* pointer into the request skb,
+ * limited lifetime! */
+ char *resource_name;
+ struct nlattr *my_addr;
+ struct nlattr *peer_addr;
+
+ /* reply buffer */
+ struct sk_buff *reply_skb;
+ /* pointer into reply buffer */
+ struct drbd_genlmsghdr *reply_dh;
+ /* resolved from attributes, if possible */
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+} adm_ctx;
+
+static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
+ if (genlmsg_reply(skb, info))
+ printk(KERN_ERR "drbd: error sending genl reply\n");
+}
+
+/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
+ * reason it could fail was no space in skb, and there are 4k available. */
+int drbd_msg_put_info(const char *info)
+{
+ struct sk_buff *skb = adm_ctx.reply_skb;
+ struct nlattr *nla;
+ int err = -EMSGSIZE;
+
+ if (!info || !info[0])
+ return 0;
+
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ if (!nla)
+ return err;
+
+ err = nla_put_string(skb, T_info_text, info);
+ if (err) {
+ nla_nest_cancel(skb, nla);
+ return err;
+ } else
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
+/* This would be a good candidate for a "pre_doit" hook,
+ * and per-family private info->pointers.
+ * But we need to stay compatible with older kernels.
+ * If it returns successfully, adm_ctx members are valid.
+ */
+#define DRBD_ADM_NEED_MINOR 1
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
+static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
+ unsigned flags)
+{
+ struct drbd_genlmsghdr *d_in = info->userhdr;
+ const u8 cmd = info->genlhdr->cmd;
+ int err;
+
+ memset(&adm_ctx, 0, sizeof(adm_ctx));
+
+ /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
+ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!adm_ctx.reply_skb) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
+ info, &drbd_genl_family, 0, cmd);
+ /* put of a few bytes into a fresh skb of >= 4k will always succeed.
+ * but anyways */
+ if (!adm_ctx.reply_dh) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh->minor = d_in->minor;
+ adm_ctx.reply_dh->ret_code = NO_ERROR;
+
+ adm_ctx.volume = VOLUME_UNSPECIFIED;
+ if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
+ struct nlattr *nla;
+ /* parse and validate only */
+ err = drbd_cfg_context_from_attrs(NULL, info);
+ if (err)
+ goto fail;
+
+ /* It was present, and valid,
+ * copy it over to the reply skb. */
+ err = nla_put_nohdr(adm_ctx.reply_skb,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]);
+ if (err)
+ goto fail;
+
+ /* and assign stuff to the global adm_ctx */
+ nla = nested_attr_tb[__nla_type(T_ctx_volume)];
+ if (nla)
+ adm_ctx.volume = nla_get_u32(nla);
+ nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+ if (nla)
+ adm_ctx.resource_name = nla_data(nla);
+ adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+ adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+ if ((adm_ctx.my_addr &&
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ (adm_ctx.peer_addr &&
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ adm_ctx.minor = d_in->minor;
+ adm_ctx.mdev = minor_to_mdev(d_in->minor);
+ adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+
+ if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+ drbd_msg_put_info("unknown minor");
+ return ERR_MINOR_INVALID;
+ }
+ if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("unknown resource");
+ return ERR_INVALID_REQUEST;
+ }
+
+ if (flags & DRBD_ADM_NEED_CONNECTION) {
+ if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("no resource name expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev) {
+ drbd_msg_put_info("no minor number expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.my_addr && adm_ctx.peer_addr)
+ adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ nla_len(adm_ctx.my_addr),
+ nla_data(adm_ctx.peer_addr),
+ nla_len(adm_ctx.peer_addr));
+ if (!adm_ctx.tconn) {
+ drbd_msg_put_info("unknown connection");
+ return ERR_INVALID_REQUEST;
+ }
+ }
+
+ /* some more paranoia, if the request was over-determined */
+ if (adm_ctx.mdev && adm_ctx.tconn &&
+ adm_ctx.mdev->tconn != adm_ctx.tconn) {
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+ adm_ctx.minor, adm_ctx.resource_name,
+ adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists in different resource");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev &&
+ adm_ctx.volume != VOLUME_UNSPECIFIED &&
+ adm_ctx.volume != adm_ctx.mdev->vnr) {
+ pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+ adm_ctx.minor, adm_ctx.volume,
+ adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists as different volume");
+ return ERR_INVALID_REQUEST;
+ }
+
+ return NO_ERROR;
+
+fail:
+ nlmsg_free(adm_ctx.reply_skb);
+ adm_ctx.reply_skb = NULL;
+ return err;
+}
+
+static int drbd_adm_finish(struct genl_info *info, int retcode)
+{
+ if (adm_ctx.tconn) {
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+ adm_ctx.tconn = NULL;
+ }
+
+ if (!adm_ctx.reply_skb)
+ return -ENOMEM;
+
+ adm_ctx.reply_dh->ret_code = retcode;
+ drbd_adm_send_reply(adm_ctx.reply_skb, info);
+ return 0;
+}
+
+static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+{
+ char *afs;
+
+ /* FIXME: A future version will not allow this case. */
+ if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ return;
+
+ switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ case AF_INET6:
+ afs = "ipv6";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+ &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
break;
-#define NL_BIT(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
- arg->member = *(char *)(tags) ? 1 : 0; \
+ case AF_INET:
+ afs = "ipv4";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
break;
-#define NL_STRING(pn, pr, member, len) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
- if (dlen > len) { \
- dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
- #member, dlen, (unsigned int)len); \
- return 0; \
- } \
- arg->member ## _len = dlen; \
- memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
- break;
-#include <linux/drbd_nl.h>
-
-/* Generate the struct to tag_list functions */
-#define NL_PACKET(name, number, fields) \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) \
-{ \
- fields \
- return tags; \
-}
-
-#define NL_INTEGER(pn, pr, member) \
- put_unaligned(pn | pr | TT_INTEGER, tags++); \
- put_unaligned(sizeof(int), tags++); \
- put_unaligned(arg->member, (int *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(int));
-#define NL_INT64(pn, pr, member) \
- put_unaligned(pn | pr | TT_INT64, tags++); \
- put_unaligned(sizeof(u64), tags++); \
- put_unaligned(arg->member, (u64 *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(u64));
-#define NL_BIT(pn, pr, member) \
- put_unaligned(pn | pr | TT_BIT, tags++); \
- put_unaligned(sizeof(char), tags++); \
- *(char *)tags = arg->member; \
- tags = (unsigned short *)((char *)tags+sizeof(char));
-#define NL_STRING(pn, pr, member, len) \
- put_unaligned(pn | pr | TT_STRING, tags++); \
- put_unaligned(arg->member ## _len, tags++); \
- memcpy(tags, arg->member, arg->member ## _len); \
- tags = (unsigned short *)((char *)tags + arg->member ## _len);
-#include <linux/drbd_nl.h>
-
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
-void drbd_nl_send_reply(struct cn_msg *, int);
+ default:
+ afs = "ssocks";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ }
+ snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+}
int drbd_khelper(struct drbd_conf *mdev, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
- NULL, /* Will be set to address family */
- NULL, /* Will be set to address */
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
NULL };
-
- char mb[12], af[20], ad[60], *afs;
+ char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct sib_info sib;
int ret;
- if (current == mdev->worker.task)
- set_bit(CALLBACK_PENDING, &mdev->flags);
+ if (current == tconn->worker.task)
+ set_bit(CALLBACK_PENDING, &tconn->flags);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
-
- if (get_net_conf(mdev)) {
- switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
- case AF_INET6:
- afs = "ipv6";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
- break;
- case AF_INET:
- afs = "ipv4";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- break;
- default:
- afs = "ssocks";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- }
- snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
- envp[3]=af;
- envp[4]=ad;
- put_net_conf(mdev);
- }
+ setup_khelper_env(tconn, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
drbd_md_sync(mdev);
dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
-
- drbd_bcast_ev_helper(mdev, cmd);
+ sib.sib_reason = SIB_HELPER_PRE;
+ sib.helper_name = cmd;
+ drbd_bcast_event(mdev, &sib);
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
@@ -191,9 +349,46 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
+ sib.sib_reason = SIB_HELPER_POST;
+ sib.helper_exit_code = ret;
+ drbd_bcast_event(mdev, &sib);
+
+ if (current == tconn->worker.task)
+ clear_bit(CALLBACK_PENDING, &tconn->flags);
+
+ if (ret < 0) /* Ignore any ERRNOs we got. */
+ ret = 0;
+
+ return ret;
+}
+
+int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+{
+ char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
+ NULL };
+ char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+ int ret;
+
+ setup_khelper_env(tconn, envp);
+ conn_md_sync(tconn);
- if (current == mdev->worker.task)
- clear_bit(CALLBACK_PENDING, &mdev->flags);
+ conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+ /* TODO: conn_bcast_event() ?? */
+
+ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ if (ret)
+ conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ else
+ conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ /* TODO: conn_bcast_event() ?? */
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -201,116 +396,129 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
return ret;
}
-enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
{
+ enum drbd_fencing_p fp = FP_NOT_AVAIL;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (get_ldev_if_state(mdev, D_CONSISTENT)) {
+ fp = max_t(enum drbd_fencing_p, fp,
+ rcu_dereference(mdev->ldev->disk_conf)->fencing);
+ put_ldev(mdev);
+ }
+ }
+ rcu_read_unlock();
+
+ return fp;
+}
+
+bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+{
+ union drbd_state mask = { };
+ union drbd_state val = { };
+ enum drbd_fencing_p fp;
char *ex_to_string;
int r;
- enum drbd_disk_state nps;
- enum drbd_fencing_p fp;
- D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+ if (tconn->cstate >= C_WF_REPORT_PARAMS) {
+ conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ return false;
+ }
- if (get_ldev_if_state(mdev, D_CONSISTENT)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- } else {
- dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
- nps = mdev->state.pdsk;
+ fp = highest_fencing_policy(tconn);
+ switch (fp) {
+ case FP_NOT_AVAIL:
+ conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
goto out;
+ case FP_DONT_CARE:
+ return true;
+ default: ;
}
- r = drbd_khelper(mdev, "fence-peer");
+ r = conn_khelper(tconn, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
- nps = D_INCONSISTENT;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_INCONSISTENT;
break;
case 4: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
case 5: /* peer was down */
- if (mdev->state.disk == D_UP_TO_DATE) {
+ if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
} else {
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
- nps = mdev->state.pdsk;
}
break;
case 6: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
- dev_warn(DEV, "Peer is primary, outdating myself.\n");
- nps = D_UNKNOWN;
- _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
+ conn_warn(tconn, "Peer is primary, outdating myself.\n");
+ mask.disk = D_MASK;
+ val.disk = D_OUTDATED;
break;
case 7:
if (fp != FP_STONITH)
- dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
+ conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
- nps = D_UNKNOWN;
- dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
- return nps;
+ conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+ return false; /* Eventually leave IO frozen */
}
- dev_info(DEV, "fence-peer helper returned %d (%s)\n",
- (r>>8) & 0xff, ex_to_string);
+ conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+ (r>>8) & 0xff, ex_to_string);
-out:
- if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
- /* The handler was not successful... unfreeze here, the
- state engine can not unfreeze... */
- _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
- }
+ out:
- return nps;
+ /* Not using
+ conn_request_state(tconn, mask, val, CS_VERBOSE);
+ here, because we might were able to re-establish the connection in the
+ meantime. */
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
+ _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return conn_highest_pdsk(tconn) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
- struct drbd_conf *mdev = (struct drbd_conf *)data;
- enum drbd_disk_state nps;
- union drbd_state ns;
+ struct drbd_tconn *tconn = (struct drbd_tconn *)data;
- nps = drbd_try_outdate_peer(mdev);
-
- /* Not using
- drbd_request_state(mdev, NS(pdsk, nps));
- here, because we might were able to re-establish the connection
- in the meantime. This can only partially be solved in the state's
- engine is_valid_state() and is_valid_state_transition()
- functions.
-
- nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
- pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
- therefore we have to have the pre state change check here.
- */
- spin_lock_irq(&mdev->req_lock);
- ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
- ns.pdsk = nps;
- _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
+ conn_try_outdate_peer(tconn);
+ kref_put(&tconn->kref, &conn_destroy);
return 0;
}
-void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
+void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
{
struct task_struct *opa;
- opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
- if (IS_ERR(opa))
- dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
+ kref_get(&tconn->kref);
+ opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ if (IS_ERR(opa)) {
+ conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+ kref_put(&tconn->kref, &conn_destroy);
+ }
}
enum drbd_state_rv
@@ -318,15 +526,15 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
+ struct net_conf *nc;
int try = 0;
int forced = 0;
union drbd_state mask, val;
- enum drbd_disk_state nps;
if (new_role == R_PRIMARY)
- request_ping(mdev); /* Detect a dead peer ASAP */
+ request_ping(mdev->tconn); /* Detect a dead peer ASAP */
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
@@ -354,38 +562,34 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (rv == SS_NO_UP_TO_DATE_DISK &&
mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
- nps = drbd_try_outdate_peer(mdev);
- if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
+ if (conn_try_outdate_peer(mdev->tconn)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
-
- val.pdsk = nps;
- mask.pdsk = D_MASK;
-
continue;
}
if (rv == SS_NOTHING_TO_DO)
- goto fail;
+ goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
- nps = drbd_try_outdate_peer(mdev);
-
- if (force && nps > D_OUTDATED) {
+ if (!conn_try_outdate_peer(mdev->tconn) && force) {
dev_warn(DEV, "Forced into split brain situation!\n");
- nps = D_OUTDATED;
- }
-
- mask.pdsk = D_MASK;
- val.pdsk = nps;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
+ }
continue;
}
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
+ int timeo;
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
if (try < max_tries)
try = max_tries - 1;
continue;
@@ -394,13 +598,13 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
rv = _drbd_request_state(mdev, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
}
break;
}
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
if (forced)
dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
@@ -408,6 +612,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* Wait until nothing is on the fly :) */
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ /* FIXME also wait for all pending P_BARRIER_ACK? */
+
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
@@ -415,10 +621,12 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
put_ldev(mdev);
}
} else {
- if (get_net_conf(mdev)) {
- mdev->net_conf->want_lose = 0;
- put_net_conf(mdev);
- }
+ mutex_lock(&mdev->tconn->conf_update);
+ nc = mdev->tconn->net_conf;
+ if (nc)
+ nc->discard_my_data = 0; /* without copy; single bit op is atomic */
+ mutex_unlock(&mdev->tconn->conf_update);
+
set_disk_ro(mdev->vdisk, false);
if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
@@ -444,67 +652,47 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
drbd_md_sync(mdev);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- fail:
- mutex_unlock(&mdev->state_mutex);
+out:
+ mutex_unlock(mdev->state_mutex);
return rv;
}
-static struct drbd_conf *ensure_mdev(int minor, int create)
+static const char *from_attrs_err_to_txt(int err)
{
- struct drbd_conf *mdev;
-
- if (minor >= minor_count)
- return NULL;
-
- mdev = minor_to_mdev(minor);
-
- if (!mdev && create) {
- struct gendisk *disk = NULL;
- mdev = drbd_new_device(minor);
-
- spin_lock_irq(&drbd_pp_lock);
- if (minor_table[minor] == NULL) {
- minor_table[minor] = mdev;
- disk = mdev->vdisk;
- mdev = NULL;
- } /* else: we lost the race */
- spin_unlock_irq(&drbd_pp_lock);
-
- if (disk) /* we won the race above */
- /* in case we ever add a drbd_delete_device(),
- * don't forget the del_gendisk! */
- add_disk(disk);
- else /* we lost the race above */
- drbd_free_mdev(mdev);
-
- mdev = minor_to_mdev(minor);
- }
-
- return mdev;
+ return err == -ENOMSG ? "required attribute missing" :
+ err == -EOPNOTSUPP ? "unknown mandatory attribute" :
+ err == -EEXIST ? "can not change invariant setting" :
+ "invalid attribute value";
}
-static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
- struct primary primary_args;
-
- memset(&primary_args, 0, sizeof(struct primary));
- if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
- }
-
- reply->ret_code =
- drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
+ struct set_role_parms parms;
+ int err;
+ enum drbd_ret_code retcode;
- return 0;
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
-{
- reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
+ err = set_role_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
+ }
+ if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
+ retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+ else
+ retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -514,7 +702,12 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+
+ switch (meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -533,7 +726,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
case DRBD_MD_INDEX_FLEX_INT:
bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
- bdev->md.al_offset = -MD_AL_MAX_SIZE;
+ bdev->md.al_offset = -MD_AL_SECTORS;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -549,6 +742,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
break;
}
+ rcu_read_unlock();
}
/* input size is expected to be in KB */
@@ -581,10 +775,16 @@ char *ppsize(char *buf, unsigned long long size)
* R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
* peer may not initiate a resize.
*/
+/* Note these are not to be confused with
+ * drbd_adm_suspend_io/drbd_adm_resume_io,
+ * which are (sub) state changes triggered by admin (drbdsetup),
+ * and can be long lived.
+ * This changes an mdev->flag, is triggered by drbd internals,
+ * and should be short-lived. */
void drbd_suspend_io(struct drbd_conf *mdev)
{
set_bit(SUSPEND_IO, &mdev->flags);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
}
@@ -605,7 +805,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
- sector_t la_size;
+ sector_t la_size, u_size;
sector_t size;
char ppb[10];
@@ -633,7 +833,10 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
@@ -696,12 +899,12 @@ out:
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
- sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
@@ -750,24 +953,21 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
-static int drbd_check_al_size(struct drbd_conf *mdev)
+static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
- ERR_IF(mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
-
if (mdev->act_log &&
- mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+ mdev->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
t = mdev->act_log;
- n = lc_create("act_log", drbd_al_ext_cache,
- mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
+ n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
+ dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -808,7 +1008,9 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- max_segments = mdev->ldev->dc.max_bio_bvecs;
+ rcu_read_lock();
+ max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+ rcu_read_unlock();
put_ldev(mdev);
}
@@ -852,12 +1054,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94) {
- peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ if (mdev->tconn->agreed_pro_version < 94)
+ peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- } else if (mdev->agreed_pro_version == 94)
+ else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else /* drbd 8.3.8 onwards */
+ else if (mdev->tconn->agreed_pro_version < 100)
+ peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
+ else
peer = DRBD_MAX_BIO_SIZE;
}
@@ -872,36 +1076,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
drbd_setup_queue_param(mdev, new);
}
-/* serialize deconfig (worker exiting, doing cleanup)
- * and reconfig (drbdsetup disk, drbdsetup net)
- *
- * Wait for a potentially exiting worker, then restart it,
- * or start a new one. Flush any pending work, there may still be an
- * after_state_change queued.
- */
-static void drbd_reconfig_start(struct drbd_conf *mdev)
+/* Starts the worker thread */
+static void conn_reconfig_start(struct drbd_tconn *tconn)
{
- wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
- wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
- drbd_thread_start(&mdev->worker);
- drbd_flush_workqueue(mdev);
+ drbd_thread_start(&tconn->worker);
+ conn_flush_workqueue(tconn);
}
-/* if still unconfigured, stops worker again.
- * if configured now, clears CONFIG_PENDING.
- * wakes potential waiters */
-static void drbd_reconfig_done(struct drbd_conf *mdev)
+/* if still unconfigured, stops worker again. */
+static void conn_reconfig_done(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.disk == D_DISKLESS &&
- mdev->state.conn == C_STANDALONE &&
- mdev->state.role == R_SECONDARY) {
- set_bit(DEVICE_DYING, &mdev->flags);
- drbd_thread_stop_nowait(&mdev->worker);
- } else
- clear_bit(CONFIG_PENDING, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
- wake_up(&mdev->state_wait);
+ bool stop_threads;
+ spin_lock_irq(&tconn->req_lock);
+ stop_threads = conn_all_vols_unconf(tconn) &&
+ tconn->cstate == C_STANDALONE;
+ spin_unlock_irq(&tconn->req_lock);
+ if (stop_threads) {
+ /* asender is implicitly stopped by receiver
+ * in conn_disconnect() */
+ drbd_thread_stop(&tconn->receiver);
+ drbd_thread_stop(&tconn->worker);
+ }
}
/* Make sure IO is suspended before calling this function(). */
@@ -909,42 +1104,187 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
{
int s = 0;
- if (lc_try_lock(mdev->act_log)) {
- drbd_al_shrink(mdev);
- lc_unlock(mdev->act_log);
- } else {
+ if (!lc_try_lock(mdev->act_log)) {
dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
return;
}
- spin_lock_irq(&mdev->req_lock);
+ drbd_al_shrink(mdev);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
-
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ lc_unlock(mdev->act_log);
if (s)
dev_info(DEV, "Suspended AL updates\n");
}
-/* does always return 0;
- * interesting return code is in reply->ret_code */
-static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+
+static bool should_set_defaults(struct genl_info *info)
+{
+ unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
+ return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
+}
+
+static void enforce_disk_conf_limits(struct disk_conf *dc)
+{
+ if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
+ dc->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
+ dc->al_extents = DRBD_AL_EXTENTS_MAX;
+
+ if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+}
+
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
+ struct drbd_conf *mdev;
+ struct disk_conf *new_disk_conf, *old_disk_conf;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
+ int err, fifo_size;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* we also need a disk
+ * to change the options on */
+ if (!get_ldev(mdev)) {
+ retcode = ERR_NO_DISK;
+ goto out;
+ }
+
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ if (should_set_defaults(info))
+ set_disk_conf_defaults(new_disk_conf);
+
+ err = disk_conf_from_attrs_for_change(new_disk_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ }
+
+ if (!expect(new_disk_conf->resync_rate >= 1))
+ new_disk_conf->resync_rate = 1;
+
+ enforce_disk_conf_limits(new_disk_conf);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
+ dev_err(DEV, "kmalloc of fifo_buffer failed");
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+ }
+
+ drbd_suspend_io(mdev);
+ wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ drbd_al_shrink(mdev);
+ err = drbd_check_al_size(mdev, new_disk_conf);
+ lc_unlock(mdev->act_log);
+ wake_up(&mdev->al_wait);
+ drbd_resume_io(mdev);
+
+ if (err) {
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+
+ write_lock_irq(&global_state_lock);
+ retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ if (retcode == NO_ERROR) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ drbd_resync_after_changed(mdev);
+ }
+ write_unlock_irq(&global_state_lock);
+
+ if (retcode != NO_ERROR)
+ goto fail_unlock;
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+
+ if (new_disk_conf->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ if (new_disk_conf->md_flushes)
+ clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
+
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
+ drbd_md_sync(mdev);
+
+ if (mdev->state.conn >= C_CONNECTED)
+ drbd_send_sync_param(mdev);
+
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ kfree(old_plan);
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+ goto success;
+
+fail_unlock:
+ mutex_unlock(&mdev->tconn->conf_update);
+ fail:
+ kfree(new_disk_conf);
+ kfree(new_plan);
+success:
+ put_ldev(mdev);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ int err;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+ struct disk_conf *new_disk_conf = NULL;
struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
+ struct fifo_buffer *new_plan = NULL;
union drbd_state ns, os;
enum drbd_state_rv rv;
- int cp_discovered = 0;
- int logical_block_size;
+ struct net_conf *nc;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto finish;
+
+ mdev = adm_ctx.mdev;
+ conn_reconfig_start(mdev->tconn);
/* if you want to reconfigure, please tear down first */
if (mdev->state.disk > D_DISKLESS) {
@@ -959,47 +1299,65 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &mdev->flags);
+ clear_bit(WAS_IO_ERROR, &mdev->flags);
+ clear_bit(WAS_READ_ERROR, &mdev->flags);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- /* allocation not in the IO path, cqueue thread context */
+ /* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
retcode = ERR_NOMEM;
goto fail;
}
+ spin_lock_init(&nbc->md.uuid_lock);
- nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
- nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
- nbc->dc.fencing = DRBD_FENCING_DEF;
- nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+ nbc->disk_conf = new_disk_conf;
- if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
+ set_disk_conf_defaults(new_disk_conf);
+ err = disk_conf_from_attrs(new_disk_conf, info);
+ if (err) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+ enforce_disk_conf_limits(new_disk_conf);
+
+ new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
+ if (!new_plan) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
- if (get_net_conf(mdev)) {
- int prot = mdev->net_conf->wire_protocol;
- put_net_conf(mdev);
- if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+ rcu_read_unlock();
retcode = ERR_STONITH_AND_PROT_A;
goto fail;
}
}
+ rcu_read_unlock();
- bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
@@ -1014,12 +1372,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
- (nbc->dc.meta_dev_idx < 0) ?
+ (new_disk_conf->meta_dev_idx < 0) ?
(void *)mdev : (void *)drbd_m_holder);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
@@ -1027,14 +1385,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->md_bdev = bdev;
if ((nbc->backing_bdev == nbc->md_bdev) !=
- (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
- nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+ (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
resync_lru = lc_create("resync", drbd_bm_ext_cache,
- 61, sizeof(struct bm_extent),
+ 1, 61, sizeof(struct bm_extent),
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
@@ -1044,21 +1402,21 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
drbd_md_set_sector_offsets(mdev, nbc);
- if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+ if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
- (unsigned long long) nbc->dc.disk_size);
+ (unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
- if (nbc->dc.meta_dev_idx < 0) {
+ if (new_disk_conf->meta_dev_idx < 0) {
max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
/* at least one MB, otherwise it does not make sense */
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
- min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+ min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1083,14 +1441,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_warn(DEV, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
- if (nbc->dc.meta_dev_idx >= 0)
+ if (new_disk_conf->meta_dev_idx >= 0)
dev_warn(DEV, "==>> using internal or flexible "
"meta data may help <<==\n");
}
drbd_suspend_io(mdev);
/* also wait for the last barrier ack. */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
+ /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+ * We need a way to either ignore barrier acks for barriers sent before a device
+ * was attached, or a way to wait for all pending barrier acks to come in.
+ * As barriers are counted per resource,
+ * we'd need to suspend io on all devices of a resource.
+ */
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
/* and for any other previously queued work */
drbd_flush_workqueue(mdev);
@@ -1105,25 +1469,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_md_set_sector_offsets(mdev, nbc);
- /* allocate a second IO page if logical_block_size != 512 */
- logical_block_size = bdev_logical_block_size(nbc->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- if (logical_block_size != MD_SECTOR_SIZE) {
- if (!mdev->md_io_tmpp) {
- struct page *page = alloc_page(GFP_NOIO);
- if (!page)
- goto force_diskless_dec;
-
- dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
- logical_block_size, MD_SECTOR_SIZE);
- dev_warn(DEV, "Workaround engaged (has performance impact).\n");
-
- mdev->md_io_tmpp = page;
- }
- }
-
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
@@ -1145,30 +1490,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
/* Since we are diskless, fix the activity log first... */
- if (drbd_check_al_size(mdev)) {
+ if (drbd_check_al_size(mdev, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
+ drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
- if (!drbd_al_read_log(mdev, nbc)) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
-
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
- if (nbc->dc.no_md_flush)
- set_bit(MD_NO_FUA, &mdev->flags);
- else
+ if (new_disk_conf->md_flushes)
clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1177,11 +1517,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
D_ASSERT(mdev->ldev == NULL);
mdev->ldev = nbc;
mdev->resync = resync_lru;
+ mdev->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
+ new_disk_conf = NULL;
+ new_plan = NULL;
- mdev->write_ordering = WO_bdev_flush;
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1189,10 +1531,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
clear_bit(CRASHED_PRIMARY, &mdev->flags);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
+ !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
set_bit(CRASHED_PRIMARY, &mdev->flags);
- cp_discovered = 1;
- }
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
@@ -1228,7 +1568,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+ if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+ drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
@@ -1238,16 +1580,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
} else {
if (drbd_bitmap_io(mdev, &drbd_bm_read,
- "read from attaching", BM_LOCKED_MASK) < 0) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
- }
-
- if (cp_discovered) {
- drbd_al_apply_to_bm(mdev);
- if (drbd_bitmap_io(mdev, &drbd_bm_write,
- "crashed primary apply AL", BM_LOCKED_MASK)) {
+ "read from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -1256,9 +1589,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- ns.i = os.i;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
+ ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
@@ -1276,8 +1609,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
- if ( ns.disk == D_CONSISTENT &&
- (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
+ rcu_read_lock();
+ if (ns.disk == D_CONSISTENT &&
+ (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1285,6 +1619,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
this point, because drbd_request_state() modifies these
flags. */
+ if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ rcu_read_unlock();
+
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (mdev->state.conn == C_CONNECTED) {
@@ -1300,12 +1641,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
if (mdev->state.role == R_PRIMARY)
mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
@@ -1316,16 +1658,17 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(mdev);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(mdev->tconn);
+ drbd_adm_finish(info, retcode);
return 0;
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
fail:
+ conn_reconfig_done(mdev->tconn);
if (nbc) {
if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev,
@@ -1335,34 +1678,24 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(nbc);
}
+ kfree(new_disk_conf);
lc_destroy(resync_lru);
+ kfree(new_plan);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ finish:
+ drbd_adm_finish(info, retcode);
return 0;
}
-/* Detaching the disk is a process in multiple stages. First we need to lock
- * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
- * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
- * internal references as well.
- * Only then we have finally detached. */
-static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static int adm_detach(struct drbd_conf *mdev, int force)
{
- enum drbd_ret_code retcode;
+ enum drbd_state_rv retcode;
int ret;
- struct detach dt = {};
- if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- goto out;
- }
-
- if (dt.detach_force) {
+ if (force) {
set_bit(FORCE_DETACH, &mdev->flags);
drbd_force_state(mdev, NS(disk, D_FAILED));
- reply->ret_code = SS_SUCCESS;
+ retcode = SS_SUCCESS;
goto out;
}
@@ -1374,326 +1707,529 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
-
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
- reply->ret_code = retcode;
out:
- return 0;
+ return retcode;
}
-static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+/* Detaching the disk is a process in multiple stages. First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
- int i, ns;
enum drbd_ret_code retcode;
- struct net_conf *new_conf = NULL;
- struct crypto_hash *tfm = NULL;
- struct crypto_hash *integrity_w_tfm = NULL;
- struct crypto_hash *integrity_r_tfm = NULL;
- struct hlist_head *new_tl_hash = NULL;
- struct hlist_head *new_ee_hash = NULL;
- struct drbd_conf *odev;
- char hmac_name[CRYPTO_MAX_ALG_NAME];
- void *int_dig_out = NULL;
- void *int_dig_in = NULL;
- void *int_dig_vv = NULL;
- struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
+ struct detach_parms parms = { };
+ int err;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (mdev->state.conn > C_STANDALONE) {
- retcode = ERR_NET_CONFIGURED;
- goto fail;
+ if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+ err = detach_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
- /* allocation not in the IO path, cqueue thread context */
+ retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static bool conn_resync_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_SYNC_SOURCE ||
+ mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_PAUSED_SYNC_S ||
+ mdev->state.conn == C_PAUSED_SYNC_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static bool conn_ov_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_VERIFY_S ||
+ mdev->state.conn == C_VERIFY_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static enum drbd_ret_code
+_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+{
+ struct drbd_conf *mdev;
+ int i;
+
+ if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+ if (new_conf->wire_protocol != old_conf->wire_protocol)
+ return ERR_NEED_APV_100;
+
+ if (new_conf->two_primaries != old_conf->two_primaries)
+ return ERR_NEED_APV_100;
+
+ if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+ return ERR_NEED_APV_100;
+ }
+
+ if (!new_conf->two_primaries &&
+ conn_highest_role(tconn) == R_PRIMARY &&
+ conn_highest_peer(tconn) == R_PRIMARY)
+ return ERR_NEED_ALLOW_TWO_PRI;
+
+ if (new_conf->two_primaries &&
+ (new_conf->wire_protocol != DRBD_PROT_C))
+ return ERR_NOT_PROTO_C;
+
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (get_ldev(mdev)) {
+ enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+ return ERR_STONITH_AND_PROT_A;
+ }
+ if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+ return ERR_DISCARD_IMPOSSIBLE;
+ }
+
+ if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+ return ERR_CONG_NOT_PROTO_A;
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+{
+ static enum drbd_ret_code rv;
+ struct drbd_conf *mdev;
+ int i;
+
+ rcu_read_lock();
+ rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+ rcu_read_unlock();
+
+ /* tconn->volumes protected by genl_lock() here */
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (!mdev->bitmap) {
+ if(drbd_bm_init(mdev))
+ return ERR_NOMEM;
+ }
+ }
+
+ return rv;
+}
+
+struct crypto {
+ struct crypto_hash *verify_tfm;
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm;
+};
+
+static int
+alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+{
+ if (!tfm_name[0])
+ return NO_ERROR;
+
+ *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(*tfm)) {
+ *tfm = NULL;
+ return err_alg;
+ }
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+{
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ enum drbd_ret_code rv;
+
+ rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+ ERR_CSUMS_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+ ERR_VERIFY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+ ERR_INTEGRITY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ if (new_conf->cram_hmac_alg[0] != 0) {
+ snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+ new_conf->cram_hmac_alg);
+
+ rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
+ ERR_AUTH_ALG);
+ }
+
+ return rv;
+}
+
+static void free_crypto(struct crypto *crypto)
+{
+ crypto_free_hash(crypto->cram_hmac_tfm);
+ crypto_free_hash(crypto->integrity_tfm);
+ crypto_free_hash(crypto->csums_tfm);
+ crypto_free_hash(crypto->verify_tfm);
+}
+
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct net_conf *old_conf, *new_conf = NULL;
+ int err;
+ int ovr; /* online verify running */
+ int rsr; /* re-sync running */
+ struct crypto crypto = { };
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ tconn = adm_ctx.tconn;
+
new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
+ goto out;
+ }
+
+ conn_reconfig_start(tconn);
+
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+
+ if (!old_conf) {
+ drbd_msg_put_info("net conf missing, try connect");
+ retcode = ERR_INVALID_REQUEST;
goto fail;
}
- new_conf->timeout = DRBD_TIMEOUT_DEF;
- new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
- new_conf->ping_int = DRBD_PING_INT_DEF;
- new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
- new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
- new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
- new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
- new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
- new_conf->ko_count = DRBD_KO_COUNT_DEF;
- new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
- new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
- new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
- new_conf->want_lose = 0;
- new_conf->two_primaries = 0;
- new_conf->wire_protocol = DRBD_PROT_C;
- new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
- new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
- new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
- new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
-
- if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
+ *new_conf = *old_conf;
+ if (should_set_defaults(info))
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs_for_change(new_conf, info);
+ if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (new_conf->two_primaries
- && (new_conf->wire_protocol != DRBD_PROT_C)) {
- retcode = ERR_NOT_PROTO_C;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- if (get_ldev(mdev)) {
- enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
- retcode = ERR_STONITH_AND_PROT_A;
- goto fail;
- }
+ /* re-sync running */
+ rsr = conn_resync_running(tconn);
+ if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+ retcode = ERR_CSUMS_RESYNC_RUNNING;
+ goto fail;
}
- if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
- retcode = ERR_CONG_NOT_PROTO_A;
+ /* online verify running */
+ ovr = conn_ov_running(tconn);
+ if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+ retcode = ERR_VERIFY_RUNNING;
goto fail;
}
- if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
- retcode = ERR_DISCARD;
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- retcode = NO_ERROR;
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- new_my_addr = (struct sockaddr *)&new_conf->my_addr;
- new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev || odev == mdev)
- continue;
- if (get_net_conf(odev)) {
- taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
- if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
- !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
- retcode = ERR_LOCAL_ADDR;
-
- taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
- if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
- !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
- retcode = ERR_PEER_ADDR;
-
- put_net_conf(odev);
- if (retcode != NO_ERROR)
- goto fail;
- }
+ if (!rsr) {
+ crypto_free_hash(tconn->csums_tfm);
+ tconn->csums_tfm = crypto.csums_tfm;
+ crypto.csums_tfm = NULL;
+ }
+ if (!ovr) {
+ crypto_free_hash(tconn->verify_tfm);
+ tconn->verify_tfm = crypto.verify_tfm;
+ crypto.verify_tfm = NULL;
}
- if (new_conf->cram_hmac_alg[0] != 0) {
- snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- new_conf->cram_hmac_alg);
- tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- tfm = NULL;
- retcode = ERR_AUTH_ALG;
- goto fail;
- }
+ crypto_free_hash(tconn->integrity_tfm);
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
+ /* Do this without trying to take tconn->data.mutex again. */
+ __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- retcode = ERR_AUTH_ALG_ND;
- goto fail;
- }
- }
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
- if (new_conf->integrity_alg[0]) {
- integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_w_tfm)) {
- integrity_w_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ synchronize_rcu();
+ kfree(old_conf);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
- retcode=ERR_INTEGRITY_ALG_ND;
- goto fail;
- }
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
- integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_r_tfm)) {
- integrity_r_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ goto done;
+
+ fail:
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ free_crypto(&crypto);
+ kfree(new_conf);
+ done:
+ conn_reconfig_done(tconn);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ struct net_conf *old_conf, *new_conf = NULL;
+ struct crypto crypto = { };
+ struct drbd_tconn *tconn;
+ enum drbd_ret_code retcode;
+ int i;
+ int err;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+ if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+ drbd_msg_put_info("connection endpoint(s) missing");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
}
- ns = new_conf->max_epoch_size/8;
- if (mdev->tl_hash_s != ns) {
- new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_tl_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ /* No need for _rcu here. All reconfiguration is
+ * strictly serialized on genl_lock(). We are protected against
+ * concurrent reconfiguration/addition/deletion */
+ list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+ if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
}
- }
- ns = new_conf->max_buffers/8;
- if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
- new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_ee_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
}
}
- ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+ tconn = adm_ctx.tconn;
+ conn_reconfig_start(tconn);
- if (integrity_w_tfm) {
- i = crypto_hash_digestsize(integrity_w_tfm);
- int_dig_out = kmalloc(i, GFP_KERNEL);
- if (!int_dig_out) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_in = kmalloc(i, GFP_KERNEL);
- if (!int_dig_in) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_vv = kmalloc(i, GFP_KERNEL);
- if (!int_dig_vv) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ if (tconn->cstate > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
+ goto fail;
}
- if (!mdev->bitmap) {
- if(drbd_bm_init(mdev)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ /* allocation not in the IO path, drbdsetup / netlink process context */
+ new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
+ if (!new_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
}
- drbd_flush_workqueue(mdev);
- spin_lock_irq(&mdev->req_lock);
- if (mdev->net_conf != NULL) {
- retcode = ERR_NET_CONFIGURED;
- spin_unlock_irq(&mdev->req_lock);
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs(new_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- mdev->net_conf = new_conf;
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
- if (new_tl_hash) {
- kfree(mdev->tl_hash);
- mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
- mdev->tl_hash = new_tl_hash;
- }
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
+
+ ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+
+ conn_flush_workqueue(tconn);
- if (new_ee_hash) {
- kfree(mdev->ee_hash);
- mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
- mdev->ee_hash = new_ee_hash;
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ if (old_conf) {
+ retcode = ERR_NET_CONFIGURED;
+ mutex_unlock(&tconn->conf_update);
+ goto fail;
}
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = tfm;
+ conn_free_crypto(tconn);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ tconn->csums_tfm = crypto.csums_tfm;
+ tconn->verify_tfm = crypto.verify_tfm;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = integrity_w_tfm;
+ tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+ tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = integrity_r_tfm;
+ mutex_unlock(&tconn->conf_update);
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
- mdev->int_dig_out=int_dig_out;
- mdev->int_dig_in=int_dig_in;
- mdev->int_dig_vv=int_dig_vv;
- retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ mdev->send_cnt = 0;
+ mdev->recv_cnt = 0;
+ }
+ rcu_read_unlock();
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ conn_reconfig_done(tconn);
+ drbd_adm_finish(info, retcode);
return 0;
fail:
- kfree(int_dig_out);
- kfree(int_dig_in);
- kfree(int_dig_vv);
- crypto_free_hash(tfm);
- crypto_free_hash(integrity_w_tfm);
- crypto_free_hash(integrity_r_tfm);
- kfree(new_tl_hash);
- kfree(new_ee_hash);
+ free_crypto(&crypto);
kfree(new_conf);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(tconn);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
{
- int retcode;
- struct disconnect dc;
-
- memset(&dc, 0, sizeof(struct disconnect));
- if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- if (dc.force) {
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.conn >= C_WF_CONNECTION)
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
- spin_unlock_irq(&mdev->req_lock);
- goto done;
- }
+ enum drbd_state_rv rv;
- retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ force ? CS_HARD : 0);
- if (retcode == SS_NOTHING_TO_DO)
- goto done;
- else if (retcode == SS_ALREADY_STANDALONE)
- goto done;
- else if (retcode == SS_PRIMARY_NOP) {
- /* Our statche checking code wants to see the peer outdated. */
- retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- pdsk, D_OUTDATED));
- } else if (retcode == SS_CW_FAILED_BY_PEER) {
+ switch (rv) {
+ case SS_NOTHING_TO_DO:
+ break;
+ case SS_ALREADY_STANDALONE:
+ return SS_SUCCESS;
+ case SS_PRIMARY_NOP:
+ /* Our state checking code wants to see the peer outdated. */
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ pdsk, D_OUTDATED), CS_VERBOSE);
+ break;
+ case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
- retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- disk, D_OUTDATED),
- CS_ORDERED);
- if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- retcode = SS_SUCCESS;
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ disk, D_OUTDATED), 0);
+ if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ CS_HARD);
}
+ break;
+ default:;
+ /* no special handling necessary */
+ }
+
+ if (rv >= SS_SUCCESS) {
+ enum drbd_state_rv rv2;
+ /* No one else can reconfigure the network while I am here.
+ * The state handling only uses drbd_thread_stop_nowait(),
+ * we want to really wait here until the receiver is no more.
+ */
+ drbd_thread_stop(&adm_ctx.tconn->receiver);
+
+ /* Race breaker. This additional state change request may be
+ * necessary, if this was a forced disconnect during a receiver
+ * restart. We may have "killed" the receiver thread just
+ * after drbdd_init() returned. Typically, we should be
+ * C_STANDALONE already, now, and this becomes a no-op.
+ */
+ rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+ CS_VERBOSE | CS_HARD);
+ if (rv2 < SS_SUCCESS)
+ conn_err(tconn,
+ "unexpected rv2=%d in conn_try_disconnect()\n",
+ rv2);
}
+ return rv;
+}
- if (retcode < SS_SUCCESS)
- goto fail;
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct disconnect_parms parms;
+ struct drbd_tconn *tconn;
+ enum drbd_state_rv rv;
+ enum drbd_ret_code retcode;
+ int err;
- if (wait_event_interruptible(mdev->state_wait,
- mdev->state.conn != C_DISCONNECTING)) {
- /* Do not test for mdev->state.conn == C_STANDALONE, since
- someone else might connect us in the mean time! */
- retcode = ERR_INTR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ tconn = adm_ctx.tconn;
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
+ err = disconnect_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
- done:
- retcode = NO_ERROR;
+ rv = conn_try_disconnect(tconn, parms.force_disconnect);
+ if (rv < SS_SUCCESS)
+ retcode = rv; /* FIXME: Type mismatch. */
+ else
+ retcode = NO_ERROR;
fail:
- drbd_md_sync(mdev);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -1705,7 +2241,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1713,20 +2249,34 @@ void resync_after_online_grow(struct drbd_conf *mdev)
_drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
-static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
- struct resize rs;
- int retcode = NO_ERROR;
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+ struct resize_parms rs;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
enum dds_flags ddsf;
+ sector_t u_size;
+ int err;
- memset(&rs, 0, sizeof(struct resize));
- if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
- retcode = ERR_MANDATORY_TAG;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ memset(&rs, 0, sizeof(struct resize_parms));
+ if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
+ err = resize_parms_from_attrs(&rs, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
+ mdev = adm_ctx.mdev;
if (mdev->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail;
@@ -1743,15 +2293,36 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ if (u_size != (sector_t)rs.resize_size) {
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail_ldev;
+ }
+ }
+
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
+ if (new_disk_conf) {
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = (sector_t)rs.resize_size;
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ }
+
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
dd = drbd_determine_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
@@ -1770,7 +2341,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
fail:
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
fail_ldev:
@@ -1778,204 +2349,55 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
-static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct res_opts res_opts;
int err;
- int ovr; /* online verify running */
- int rsr; /* re-sync running */
- struct crypto_hash *verify_tfm = NULL;
- struct crypto_hash *csums_tfm = NULL;
- struct syncer_conf sc;
- cpumask_var_t new_cpu_mask;
- int *rs_plan_s = NULL;
- int fifo_size;
-
- if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
- memset(&sc, 0, sizeof(struct syncer_conf));
- sc.rate = DRBD_RATE_DEF;
- sc.after = DRBD_AFTER_DEF;
- sc.al_extents = DRBD_AL_EXTENTS_DEF;
- sc.on_no_data = DRBD_ON_NO_DATA_DEF;
- sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
- sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
- sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
- sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
- sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
- } else
- memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
-
- if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- /* re-sync running */
- rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T );
-
- if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
- retcode = ERR_CSUMS_RESYNC_RUNNING;
- goto fail;
- }
-
- if (!rsr && sc.csums_alg[0]) {
- csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(csums_tfm)) {
- csums_tfm = NULL;
- retcode = ERR_CSUMS_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
- retcode = ERR_CSUMS_ALG_ND;
- goto fail;
- }
- }
-
- /* online verify running */
- ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
-
- if (ovr) {
- if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
- retcode = ERR_VERIFY_RUNNING;
- goto fail;
- }
- }
-
- if (!ovr && sc.verify_alg[0]) {
- verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(verify_tfm)) {
- verify_tfm = NULL;
- retcode = ERR_VERIFY_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
- retcode = ERR_VERIFY_ALG_ND;
- goto fail;
- }
- }
-
- /* silently ignore cpu mask on UP kernel */
- if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
- err = bitmap_parse(sc.cpu_mask, 32,
- cpumask_bits(new_cpu_mask), nr_cpu_ids);
- if (err) {
- dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
- retcode = ERR_CPU_MASK_PARSE;
- goto fail;
- }
- }
-
- ERR_IF (sc.rate < 1) sc.rate = 1;
- ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
-#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
- if (sc.al_extents > AL_MAX) {
- dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
- sc.al_extents = AL_MAX;
- }
-#undef AL_MAX
-
- /* to avoid spurious errors when configuring minors before configuring
- * the minors they depend on: if necessary, first create the minor we
- * depend on */
- if (sc.after >= 0)
- ensure_mdev(sc.after, 1);
-
- /* most sanity checks done, try to assign the new sync-after
- * dependency. need to hold the global lock in there,
- * to avoid a race in the dependency loop check. */
- retcode = drbd_alter_sa(mdev, sc.after);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
if (retcode != NO_ERROR)
goto fail;
+ tconn = adm_ctx.tconn;
- fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
- retcode = ERR_NOMEM;
- goto fail;
- }
- }
+ res_opts = tconn->res_opts;
+ if (should_set_defaults(info))
+ set_res_opts_defaults(&res_opts);
- /* ok, assign the rest of it as well.
- * lock against receive_SyncParam() */
- spin_lock(&mdev->peer_seq_lock);
- mdev->sync_conf = sc;
-
- if (!rsr) {
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- csums_tfm = NULL;
- }
-
- if (!ovr) {
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- verify_tfm = NULL;
- }
-
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
- rs_plan_s = NULL;
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
}
- spin_unlock(&mdev->peer_seq_lock);
-
- if (get_ldev(mdev)) {
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- drbd_al_shrink(mdev);
- err = drbd_check_al_size(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
-
- put_ldev(mdev);
- drbd_md_sync(mdev);
-
- if (err) {
+ err = set_resource_options(tconn, &res_opts);
+ if (err) {
+ retcode = ERR_INVALID_REQUEST;
+ if (err == -ENOMEM)
retcode = ERR_NOMEM;
- goto fail;
- }
}
- if (mdev->state.conn >= C_CONNECTED)
- drbd_send_sync_param(mdev, &sc);
-
- if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
- cpumask_copy(mdev->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(mdev);
- mdev->receiver.reset_cpu_mask = 1;
- mdev->asender.reset_cpu_mask = 1;
- mdev->worker.reset_cpu_mask = 1;
- }
-
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
- kfree(rs_plan_s);
- free_cpumask_var(new_cpu_mask);
- crypto_free_hash(csums_tfm);
- crypto_free_hash(verify_tfm);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -1990,10 +2412,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (retcode != SS_NEED_CONNECTION)
break;
@@ -2002,7 +2424,25 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+ union drbd_state mask, union drbd_state val)
+{
+ enum drbd_ret_code retcode;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -2015,10 +2455,18 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
return rv;
}
-static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ int retcode; /* drbd_ret_code, drbd_state_rv */
+ struct drbd_conf *mdev;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -2028,16 +2476,15 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
-
if (retcode < SS_SUCCESS) {
if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
- /* The peer will get a resync upon connect anyways. Just make that
- into a full resync. */
+ /* The peer will get a resync upon connect anyways.
+ * Just make that into a full resync. */
retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -2045,30 +2492,41 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
- retcode = ERR_PAUSE_IS_SET;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- reply->ret_code = retcode;
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ retcode = ERR_PAUSE_IS_SET;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
- union drbd_state s;
+ union drbd_dev_state s;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = mdev->state;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx.mdev->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2077,172 +2535,482 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
}
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
-
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
}
-static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
drbd_suspend_io(mdev);
- reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
- if (reply->ret_code == SS_SUCCESS) {
+ retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+ if (retcode == SS_SUCCESS) {
if (mdev->state.conn < C_CONNECTED)
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev, fail_frozen_disk_io);
+ tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
{
- unsigned short *tl;
+ struct nlattr *nla;
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ if (!nla)
+ goto nla_put_failure;
+ if (vnr != VOLUME_UNSPECIFIED &&
+ nla_put_u32(skb, T_ctx_volume, vnr))
+ goto nla_put_failure;
+ if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+ goto nla_put_failure;
+ if (tconn->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ goto nla_put_failure;
+ if (tconn->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ goto nla_put_failure;
+ nla_nest_end(skb, nla);
+ return 0;
- tl = reply->tag_list;
+nla_put_failure:
+ if (nla)
+ nla_nest_cancel(skb, nla);
+ return -EMSGSIZE;
+}
- if (get_ldev(mdev)) {
- tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
- put_ldev(mdev);
- }
+int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+ const struct sib_info *sib)
+{
+ struct state_info *si = NULL; /* for sizeof(si->member); */
+ struct net_conf *nc;
+ struct nlattr *nla;
+ int got_ldev;
+ int err = 0;
+ int exclude_sensitive;
+
+ /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
+ * to. So we better exclude_sensitive information.
+ *
+ * If sib == NULL, this is drbd_adm_get_status, executed synchronously
+ * in the context of the requesting user process. Exclude sensitive
+ * information, unless current has superuser.
+ *
+ * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
+ * relies on the current implementation of netlink_dump(), which
+ * executes the dump callback successively from netlink_recvmsg(),
+ * always in the context of the receiving process */
+ exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
+
+ got_ldev = get_ldev(mdev);
+
+ /* We need to add connection name and volume number information still.
+ * Minor number is in drbd_genlmsghdr. */
+ if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+ goto nla_put_failure;
+
+ if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+ if (got_ldev)
+ if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
+ goto nla_put_failure;
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc)
+ err = net_conf_to_skb(skb, nc, exclude_sensitive);
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+
+ nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ if (!nla)
+ goto nla_put_failure;
+ if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+ nla_put_u32(skb, T_current_state, mdev->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ goto nla_put_failure;
+
+ if (got_ldev) {
+ int err;
- if (get_net_conf(mdev)) {
- tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
- put_net_conf(mdev);
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+ if (err)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ goto nla_put_failure;
+ if (C_SYNC_SOURCE <= mdev->state.conn &&
+ C_PAUSED_SYNC_T >= mdev->state.conn) {
+ if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ goto nla_put_failure;
+ }
}
- tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (sib) {
+ switch(sib->sib_reason) {
+ case SIB_SYNC_PROGRESS:
+ case SIB_GET_STATUS_REPLY:
+ break;
+ case SIB_STATE_CHANGE:
+ if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+ nla_put_u32(skb, T_new_state, sib->ns.i))
+ goto nla_put_failure;
+ break;
+ case SIB_HELPER_POST:
+ if (nla_put_u32(skb, T_helper_exit_code,
+ sib->helper_exit_code))
+ goto nla_put_failure;
+ /* fall through */
+ case SIB_HELPER_PRE:
+ if (nla_put_string(skb, T_helper, sib->helper_name))
+ goto nla_put_failure;
+ break;
+ }
+ }
+ nla_nest_end(skb, nla);
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (0)
+nla_put_failure:
+ err = -EMSGSIZE;
+ if (got_ldev)
+ put_ldev(mdev);
+ return err;
}
-static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short *tl = reply->tag_list;
- union drbd_state s = mdev->state;
- unsigned long rs_left;
- unsigned int res;
+ enum drbd_ret_code retcode;
+ int err;
- tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* no local ref, no bitmap, no syncer progress. */
- if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
- if (get_ldev(mdev)) {
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_ldev(mdev);
- }
+ err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- return (int)((char *)tl - (char *)reply->tag_list);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
-
- tl = reply->tag_list;
+ struct drbd_conf *mdev;
+ struct drbd_genlmsghdr *dh;
+ struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
+ struct drbd_tconn *tconn = NULL;
+ struct drbd_tconn *tmp;
+ unsigned volume = cb->args[1];
+
+ /* Open coded, deferred, iteration:
+ * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ * idr_for_each_entry(&tconn->volumes, mdev, i) {
+ * ...
+ * }
+ * }
+ * where tconn is cb->args[0];
+ * and i is cb->args[1];
+ *
+ * cb->args[2] indicates if we shall loop over all resources,
+ * or just dump all volumes of a single resource.
+ *
+ * This may miss entries inserted after this dump started,
+ * or entries deleted before they are reached.
+ *
+ * We need to make sure the mdev won't disappear while
+ * we are looking at it, and revalidate our iterators
+ * on each iteration.
+ */
- if (get_ldev(mdev)) {
- tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
- tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
- put_ldev(mdev);
+ /* synchronize with conn_create()/conn_destroy() */
+ rcu_read_lock();
+ /* revalidate iterator position */
+ list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+ if (pos == NULL) {
+ /* first iteration */
+ pos = tmp;
+ tconn = pos;
+ break;
+ }
+ if (tmp == pos) {
+ tconn = pos;
+ break;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (tconn) {
+next_tconn:
+ mdev = idr_get_next(&tconn->volumes, &volume);
+ if (!mdev) {
+ /* No more volumes to dump on this tconn.
+ * Advance tconn iterator. */
+ pos = list_entry_rcu(tconn->all_tconn.next,
+ struct drbd_tconn, all_tconn);
+ /* Did we dump any volume on this tconn yet? */
+ if (volume != 0) {
+ /* If we reached the end of the list,
+ * or only a single resource dump was requested,
+ * we are done. */
+ if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+ goto out;
+ volume = 0;
+ tconn = pos;
+ goto next_tconn;
+ }
+ }
+
+ dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &drbd_genl_family,
+ NLM_F_MULTI, DRBD_ADM_GET_STATUS);
+ if (!dh)
+ goto out;
+
+ if (!mdev) {
+ /* This is a tconn without a single volume.
+ * Suprisingly enough, it may have a network
+ * configuration. */
+ struct net_conf *nc;
+ dh->minor = -1U;
+ dh->ret_code = NO_ERROR;
+ if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+ goto cancel;
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ goto done;
+ }
+
+ D_ASSERT(mdev->vnr == volume);
+ D_ASSERT(mdev->tconn == tconn);
+
+ dh->minor = mdev_to_minor(mdev);
+ dh->ret_code = NO_ERROR;
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
+ genlmsg_cancel(skb, dh);
+ goto out;
+ }
+done:
+ genlmsg_end(skb, dh);
+ }
+
+out:
+ rcu_read_unlock();
+ /* where to start the next iteration */
+ cb->args[0] = (long)pos;
+ cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+
+ /* No more tconns/volumes/minors found results in an empty skb.
+ * Which will terminate the dump. */
+ return skb->len;
}
-/**
- * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
- * @mdev: DRBD device.
- * @nlp: Netlink/connector packet from drbdsetup
- * @reply: Reply packet for drbdsetup
+/*
+ * Request status of all resources, or of all volumes within a single resource.
+ *
+ * This is a dump, as the answer may not fit in a single reply skb otherwise.
+ * Which means we cannot use the family->attrbuf or other such members, because
+ * dump is NOT protected by the genl_lock(). During dump, we only have access
+ * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
+ *
+ * Once things are setup properly, we call into get_one_status().
*/
-static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
- char rv;
+ const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
+ struct nlattr *nla;
+ const char *resource_name;
+ struct drbd_tconn *tconn;
+ int maxtype;
+
+ /* Is this a followup call? */
+ if (cb->args[0]) {
+ /* ... of a single resource dump,
+ * and the resource iterator has been advanced already? */
+ if (cb->args[2] && cb->args[2] != cb->args[0])
+ return 0; /* DONE. */
+ goto dump;
+ }
+
+ /* First call (from netlink_dump_start). We need to figure out
+ * which resource(s) the user wants us to dump. */
+ nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
+ nlmsg_attrlen(cb->nlh, hdrlen),
+ DRBD_NLA_CFG_CONTEXT);
+
+ /* No explicit context given. Dump all. */
+ if (!nla)
+ goto dump;
+ maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+ nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+ if (IS_ERR(nla))
+ return PTR_ERR(nla);
+ /* context given, but no name present? */
+ if (!nla)
+ return -EINVAL;
+ resource_name = nla_data(nla);
+ tconn = conn_get_by_name(resource_name);
+
+ if (!tconn)
+ return -ENODEV;
+
+ kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+
+ /* prime iterators, and set "filter" mode mark:
+ * only dump this tconn. */
+ cb->args[0] = (long)tconn;
+ /* cb->args[1] = 0; passed in this way. */
+ cb->args[2] = (long)tconn;
+
+dump:
+ return get_one_status(skb, cb);
+}
- tl = reply->tag_list;
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct timeout_parms tp;
+ int err;
- rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ tp.timeout_type =
+ adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+ UT_DEFAULT;
- return (int)((char *)tl - (char *)reply->tag_list);
+ err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
+ }
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- /* default to resume from last known position, if possible */
- struct start_ov args =
- { .start_sector = mdev->ov_start_sector };
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
+ struct start_ov_parms parms;
- if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* resume from last known position, if possible */
+ parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_stop_sector = ULLONG_MAX;
+ if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
+ int err = start_ov_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
+ /* w_make_ov_request expects position to be aligned */
+ mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ mdev->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-
- /* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
- reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
+ struct new_c_uuid_parms args;
- struct new_c_uuid args;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out_nolock;
- memset(&args, 0, sizeof(struct new_c_uuid));
- if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ mdev = adm_ctx.mdev;
+ memset(&args, 0, sizeof(args));
+ if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
+ err = new_c_uuid_parms_from_attrs(&args, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out_nolock;
+ }
}
- mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
@@ -2250,7 +3018,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
+ if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
dev_info(DEV, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
@@ -2273,10 +3041,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
}
@@ -2284,416 +3052,284 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
out_dec:
put_ldev(mdev);
out:
- mutex_unlock(&mdev->state_mutex);
-
- reply->ret_code = retcode;
+ mutex_unlock(mdev->state_mutex);
+out_nolock:
+ drbd_adm_finish(info, retcode);
return 0;
}
-struct cn_handler_struct {
- int (*function)(struct drbd_conf *,
- struct drbd_nl_cfg_req *,
- struct drbd_nl_cfg_reply *);
- int reply_body_size;
-};
-
-static struct cn_handler_struct cnd_table[] = {
- [ P_primary ] = { &drbd_nl_primary, 0 },
- [ P_secondary ] = { &drbd_nl_secondary, 0 },
- [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
- [ P_detach ] = { &drbd_nl_detach, 0 },
- [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
- [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
- [ P_resize ] = { &drbd_nl_resize, 0 },
- [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
- [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
- [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
- [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
- [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
- [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
- [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
- [ P_outdate ] = { &drbd_nl_outdate, 0 },
- [ P_get_config ] = { &drbd_nl_get_config,
- sizeof(struct syncer_conf_tag_len_struct) +
- sizeof(struct disk_conf_tag_len_struct) +
- sizeof(struct net_conf_tag_len_struct) },
- [ P_get_state ] = { &drbd_nl_get_state,
- sizeof(struct get_state_tag_len_struct) +
- sizeof(struct sync_progress_tag_len_struct) },
- [ P_get_uuids ] = { &drbd_nl_get_uuids,
- sizeof(struct get_uuids_tag_len_struct) },
- [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
- sizeof(struct get_timeout_flag_tag_len_struct)},
- [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
- [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
-};
-
-static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
+static enum drbd_ret_code
+drbd_check_resource_name(const char *name)
{
- struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
- struct cn_handler_struct *cm;
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- struct drbd_conf *mdev;
- int retcode, rr;
- int reply_size = sizeof(struct cn_msg)
- + sizeof(struct drbd_nl_cfg_reply)
- + sizeof(short int);
-
- if (!try_module_get(THIS_MODULE)) {
- printk(KERN_ERR "drbd: try_module_get() failed!\n");
- return;
+ if (!name || !name[0]) {
+ drbd_msg_put_info("resource name missing");
+ return ERR_MANDATORY_TAG;
}
-
- if (!capable(CAP_SYS_ADMIN)) {
- retcode = ERR_PERM;
- goto fail;
- }
-
- mdev = ensure_mdev(nlp->drbd_minor,
- (nlp->flags & DRBD_NL_CREATE_DEVICE));
- if (!mdev) {
- retcode = ERR_MINOR_INVALID;
- goto fail;
+ /* if we want to use these in sysfs/configfs/debugfs some day,
+ * we must not allow slashes */
+ if (strchr(name, '/')) {
+ drbd_msg_put_info("invalid resource name");
+ return ERR_INVALID_REQUEST;
}
+ return NO_ERROR;
+}
- if (nlp->packet_type >= P_nl_after_last_packet ||
- nlp->packet_type == P_return_code_only) {
- retcode = ERR_PACKET_NR;
- goto fail;
- }
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct res_opts res_opts;
+ int err;
- cm = cnd_table + nlp->packet_type;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* This may happen if packet number is 0: */
- if (cm->function == NULL) {
- retcode = ERR_PACKET_NR;
- goto fail;
+ set_res_opts_defaults(&res_opts);
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
}
- reply_size += cm->reply_body_size;
+ retcode = drbd_check_resource_name(adm_ctx.resource_name);
+ if (retcode != NO_ERROR)
+ goto out;
- /* allocation not in the IO path, cqueue thread context */
- cn_reply = kzalloc(reply_size, GFP_KERNEL);
- if (!cn_reply) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (adm_ctx.tconn) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
+ retcode = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("resource exists");
+ }
+ /* else: still NO_ERROR */
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
-
- reply->packet_type =
- cm->reply_body_size ? nlp->packet_type : P_return_code_only;
- reply->minor = nlp->drbd_minor;
- reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
- /* reply->tag_list; might be modified by cm->function. */
-
- rr = cm->function(mdev, nlp, reply);
-
- cn_reply->id = req->id;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
- cn_reply->flags = 0;
-
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
- kfree(cn_reply);
- module_put(THIS_MODULE);
- return;
- fail:
- drbd_nl_send_reply(req, retcode);
- module_put(THIS_MODULE);
+ if (!conn_create(adm_ctx.resource_name, &res_opts))
+ retcode = ERR_NOMEM;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-
-static unsigned short *
-__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
- unsigned short len, int nul_terminated)
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short l = tag_descriptions[tag_number(tag)].max_len;
- len = (len < l) ? len : l;
- put_unaligned(tag, tl++);
- put_unaligned(len, tl++);
- memcpy(tl, data, len);
- tl = (unsigned short*)((char*)tl + len);
- if (nul_terminated)
- *((char*)tl - 1) = 0;
- return tl;
-}
+ struct drbd_genlmsghdr *dh = info->userhdr;
+ enum drbd_ret_code retcode;
-static unsigned short *
-tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
-{
- return __tl_add_blob(tl, tag, data, len, 0);
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static unsigned short *
-tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
-{
- return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
-}
+ if (dh->minor > MINORMASK) {
+ drbd_msg_put_info("requested minor out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
+ if (adm_ctx.volume > DRBD_VOLUME_MAX) {
+ drbd_msg_put_info("requested volume id out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
-static unsigned short *
-tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
-{
- put_unaligned(tag, tl++);
- switch(tag_type(tag)) {
- case TT_INTEGER:
- put_unaligned(sizeof(int), tl++);
- put_unaligned(*(int *)val, (int *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(int));
- break;
- case TT_INT64:
- put_unaligned(sizeof(u64), tl++);
- put_unaligned(*(u64 *)val, (u64 *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(u64));
- break;
- default:
- /* someone did something stupid. */
- ;
+ /* drbd_adm_prepare made sure already
+ * that mdev->tconn and mdev->vnr match the request. */
+ if (adm_ctx.mdev) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+ retcode = ERR_MINOR_EXISTS;
+ /* else: still NO_ERROR */
+ goto out;
}
- return tl;
+
+ retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
+static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct get_state_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
-
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_get_state;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
-
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ if (mdev->state.disk == D_DISKLESS &&
+ /* no need to be mdev->state.conn == C_STANDALONE &&
+ * we may want to delete a minor from a live replication group.
+ */
+ mdev->state.role == R_SECONDARY) {
+ _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ CS_VERBOSE + CS_WAIT_COMPLETE);
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ idr_remove(&minors, mdev_to_minor(mdev));
+ del_gendisk(mdev->vdisk);
+ synchronize_rcu();
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return NO_ERROR;
+ } else
+ return ERR_MINOR_CONFIGURED;
}
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct call_helper_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = tl_add_str(tl, T_helper, helper_name);
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ enum drbd_ret_code retcode;
- reply->packet_type = P_call_helper;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = adm_delete_minor(adm_ctx.mdev);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e)
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- unsigned short *tl;
- struct page *page;
- unsigned len;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+ struct drbd_conf *mdev;
+ unsigned i;
- if (!e)
- return;
- if (!reason || !reason[0])
- return;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* apparently we have to memcpy twice, first to prepare the data for the
- * struct cn_msg, then within cn_netlink_send from the cn_msg to the
- * netlink skb. */
- /* receiver thread context, which is not in the writeout path (of this node),
- * but may be in the writeout path of the _other_ node.
- * GFP_NOIO to avoid potential "distributed deadlock". */
- cn_reply = kzalloc(
- sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct dump_ee_tag_len_struct)+
- sizeof(short int),
- GFP_NOIO);
-
- if (!cn_reply) {
- dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
- (unsigned long long)e->sector, e->size);
- return;
+ if (!adm_ctx.tconn) {
+ retcode = ERR_RES_NOT_KNOWN;
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
- tl = reply->tag_list;
-
- tl = tl_add_str(tl, T_dump_ee_reason, reason);
- tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
- tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
- tl = tl_add_int(tl, T_ee_sector, &e->sector);
- tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
-
- /* dump the first 32k */
- len = min_t(unsigned, e->size, 32 << 10);
- put_unaligned(T_ee_data, tl++);
- put_unaligned(len, tl++);
-
- page = e->pages;
- page_chain_for_each(page) {
- void *d = kmap_atomic(page);
- unsigned l = min_t(unsigned, len, PAGE_SIZE);
- memcpy(tl, d, l);
- kunmap_atomic(d);
- tl = (unsigned short*)((char*)tl + l);
- len -= l;
- if (len == 0)
- break;
+ /* demote */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to demote");
+ goto out;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
- cn_reply->ack = 0; // not used here.
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char*)tl - (char*)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_dump_ee;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- kfree(cn_reply);
-}
-
-void drbd_bcast_sync_progress(struct drbd_conf *mdev)
-{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct sync_progress_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
- unsigned long rs_left;
- unsigned int res;
+ retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to disconnect");
+ goto out;
+ }
- /* no local ref, no bitmap, no syncer progress, no broadcast. */
- if (!get_ldev(mdev))
- return;
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- put_ldev(mdev);
+ /* detach */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_detach(mdev, 0);
+ if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
+ drbd_msg_put_info("failed to detach");
+ goto out;
+ }
+ }
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ /* If we reach this, all volumes (of this tconn) are Secondary,
+ * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
+ * actually stopped, state handling only does drbd_thread_stop_nowait(). */
+ drbd_thread_stop(&adm_ctx.tconn->worker);
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
+ /* Now, nothing can fail anymore */
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ /* delete volumes */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_delete_minor(mdev);
+ if (retcode != NO_ERROR) {
+ /* "can not happen" */
+ drbd_msg_put_info("failed to delete volume");
+ goto out;
+ }
+ }
- reply->packet_type = P_sync_progress;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ /* delete connection */
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = NO_ERROR;
+ } else {
+ /* "can not happen" */
+ retcode = ERR_RES_IN_USE;
+ drbd_msg_put_info("failed to delete connection");
+ }
+ goto out;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-int __init drbd_nl_init(void)
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
- static struct cb_id cn_id_drbd;
- int err, try=10;
+ enum drbd_ret_code retcode;
- cn_id_drbd.val = CN_VAL_DRBD;
- do {
- cn_id_drbd.idx = cn_idx;
- err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
- if (!err)
- break;
- cn_idx = (cn_idx + CN_IDX_STEP);
- } while (try--);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (err) {
- printk(KERN_ERR "drbd: cn_drbd failed to register\n");
- return err;
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
+ retcode = NO_ERROR;
+ } else {
+ retcode = ERR_RES_IN_USE;
}
+ if (retcode == NO_ERROR)
+ drbd_thread_stop(&adm_ctx.tconn->worker);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-void drbd_nl_cleanup(void)
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
{
- static struct cb_id cn_id_drbd;
-
- cn_id_drbd.idx = cn_idx;
- cn_id_drbd.val = CN_VAL_DRBD;
-
- cn_del_callback(&cn_id_drbd);
-}
+ static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+ struct sk_buff *msg;
+ struct drbd_genlmsghdr *d_out;
+ unsigned seq;
+ int err = -ENOMEM;
+
+ if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+ if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+ mdev->rs_last_bcast = jiffies;
+ else
+ return;
+ }
-void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
-{
- char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- int rr;
+ seq = atomic_inc_return(&drbd_genl_seq);
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+ if (!msg)
+ goto failed;
- memset(buffer, 0, sizeof(buffer));
- cn_reply->id = req->id;
+ err = -EMSGSIZE;
+ d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
+ if (!d_out) /* cannot happen, but anyways. */
+ goto nla_put_failure;
+ d_out->minor = mdev_to_minor(mdev);
+ d_out->ret_code = NO_ERROR;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
- cn_reply->flags = 0;
+ if (nla_put_status_info(msg, mdev, sib))
+ goto nla_put_failure;
+ genlmsg_end(msg, d_out);
+ err = drbd_genl_multicast_events(msg, 0);
+ /* msg has been consumed or freed in netlink_broadcast() */
+ if (err && err != -ESRCH)
+ goto failed;
- reply->packet_type = P_return_code_only;
- reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
- reply->ret_code = ret_code;
+ return;
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+nla_put_failure:
+ nlmsg_free(msg);
+failed:
+ dev_err(DEV, "Error %d while broadcasting event. "
+ "Event seq:%u sib_reason:%u\n",
+ err, seq, sib->sib_reason);
}
-
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
new file mode 100644
index 000000000000..fa672b6df8d6
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.c
@@ -0,0 +1,55 @@
+#include "drbd_wrappers.h"
+#include <linux/kernel.h>
+#include <net/netlink.h>
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+
+static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
+{
+ struct nlattr *head = nla_data(nla);
+ int len = nla_len(nla);
+ int rem;
+
+ /*
+ * validate_nla (called from nla_parse_nested) ignores attributes
+ * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
+ * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
+ * flag set also, check and remove that flag before calling
+ * nla_parse_nested.
+ */
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
+ nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
+ if (nla_type(nla) > maxtype)
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy)
+{
+ int err;
+
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (!err)
+ err = nla_parse_nested(tb, maxtype, nla, policy);
+
+ return err;
+}
+
+struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
+{
+ int err;
+ /*
+ * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
+ * we don't know about that attribute, reject all the nested
+ * attributes.
+ */
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (err)
+ return ERR_PTR(err);
+ return nla_find_nested(nla, attrtype);
+}
diff --git a/drivers/block/drbd/drbd_nla.h b/drivers/block/drbd/drbd_nla.h
new file mode 100644
index 000000000000..679c2d5b4535
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.h
@@ -0,0 +1,8 @@
+#ifndef __DRBD_NLA_H
+#define __DRBD_NLA_H
+
+extern int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy);
+extern struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype);
+
+#endif /* __DRBD_NLA_H */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 5496104f90b9..56672a61eb94 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -167,18 +167,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bit_pos;
+ unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
+ mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
- else
+ if (verify_can_do_stop_sector(mdev))
+ stop_sector = mdev->ov_stop_sector;
+ } else
bit_pos = mdev->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
- "\t%3d%% sector pos: %llu/%llu\n",
+ "\t%3d%% sector pos: %llu/%llu",
(int)(bit_pos / (bm_bits/100+1)),
(unsigned long long)bit_pos * BM_SECT_PER_BIT,
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
+ if (stop_sector != 0 && stop_sector != ULLONG_MAX)
+ seq_printf(seq, " stop sector: %llu", stop_sector);
+ seq_printf(seq, "\n");
}
}
@@ -194,9 +200,11 @@ static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
static int drbd_seq_show(struct seq_file *seq, void *v)
{
- int i, hole = 0;
+ int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
+ struct net_conf *nc;
+ char wp;
static char write_ordering_chars[] = {
[WO_none] = 'n',
@@ -227,16 +235,11 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
oos .. known out-of-sync kB
*/
- for (i = 0; i < minor_count; i++) {
- mdev = minor_to_mdev(i);
- if (!mdev) {
- hole = 1;
- continue;
- }
- if (hole) {
- hole = 0;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, mdev, i) {
+ if (prev_i != i - 1)
seq_printf(seq, "\n");
- }
+ prev_i = i;
sn = drbd_conn_str(mdev->state.conn);
@@ -248,6 +251,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
/* reset mdev->congestion_reason */
bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
@@ -257,9 +262,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
- (mdev->net_conf == NULL ? ' ' :
- (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')),
- is_susp(mdev->state) ? 's' : 'r',
+ wp,
+ drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
@@ -276,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
atomic_read(&mdev->rs_pending_cnt),
atomic_read(&mdev->unacked_cnt),
atomic_read(&mdev->ap_bio_cnt),
- mdev->epochs,
- write_ordering_chars[mdev->write_ordering]
+ mdev->tconn->epochs,
+ write_ordering_chars[mdev->tconn->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
@@ -302,6 +306,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
}
}
}
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c74ca2df7431..a9eccfc6079b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -48,17 +48,25 @@
#include "drbd_vli.h"
+struct packet_info {
+ enum drbd_packet cmd;
+ unsigned int size;
+ unsigned int vnr;
+ void *data;
+};
+
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
FE_RECYCLED,
};
-static int drbd_do_handshake(struct drbd_conf *mdev);
-static int drbd_do_auth(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_tconn *tconn);
+static int drbd_do_auth(struct drbd_tconn *tconn);
+static int drbd_disconnected(struct drbd_conf *mdev);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
-static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -142,11 +150,12 @@ static void page_chain_add(struct page **head,
*head = chain_first;
}
-static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
+static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+ unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
- int i = 0;
+ unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
@@ -175,7 +184,7 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return page;
/* Not enough pages immediately available this time.
- * No need to jump around here, drbd_pp_alloc will retry this
+ * No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
@@ -187,9 +196,10 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL;
}
-static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
+static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+ struct list_head *to_be_freed)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
@@ -198,8 +208,8 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_ee_has_active_page(e))
+ peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(le, to_be_freed);
}
@@ -208,18 +218,18 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
{
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
}
/**
- * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
+ * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
@@ -230,23 +240,31 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
*
* Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
+struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+ bool retry)
{
struct page *page = NULL;
+ struct net_conf *nc;
DEFINE_WAIT(wait);
+ int mxb;
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000;
+ rcu_read_unlock();
+
+ if (atomic_read(&mdev->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(mdev, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ if (atomic_read(&mdev->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(mdev, number);
if (page)
break;
}
@@ -255,7 +273,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
+ dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -268,11 +286,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
return page;
}
-/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
{
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
@@ -280,7 +298,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -302,127 +320,130 @@ You need to hold the req_lock:
_drbd_wait_ee_list_empty()
You must not have the req_lock:
- drbd_free_ee()
- drbd_alloc_ee()
- drbd_init_ee()
- drbd_release_ee()
+ drbd_free_peer_req()
+ drbd_alloc_peer_req()
+ drbd_free_peer_reqs()
drbd_ee_fix_bhs()
- drbd_process_done_ee()
+ drbd_finish_peer_reqs()
drbd_clear_done_ee()
drbd_wait_ee_list_empty()
*/
-struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local)
+struct drbd_peer_request *
+drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+ unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
- e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
- if (!e) {
+ peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
+ if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
+ dev_err(DEV, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
- INIT_HLIST_NODE(&e->collision);
- e->epoch = NULL;
- e->mdev = mdev;
- e->pages = page;
- atomic_set(&e->pending_bios, 0);
- e->size = data_size;
- e->flags = 0;
- e->sector = sector;
- e->block_id = id;
+ drbd_clear_interval(&peer_req->i);
+ peer_req->i.size = data_size;
+ peer_req->i.sector = sector;
+ peer_req->i.local = false;
+ peer_req->i.waiting = false;
+
+ peer_req->epoch = NULL;
+ peer_req->w.mdev = mdev;
+ peer_req->pages = page;
+ atomic_set(&peer_req->pending_bios, 0);
+ peer_req->flags = 0;
+ /*
+ * The block_id is opaque to the receiver. It is not endianness
+ * converted, and sent back to the sender unchanged.
+ */
+ peer_req->block_id = id;
- return e;
+ return peer_req;
fail:
- mempool_free(e, drbd_ee_mempool);
+ mempool_free(peer_req, drbd_ee_mempool);
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
+void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+ int is_net)
{
- if (e->flags & EE_HAS_DIGEST)
- kfree(e->digest);
- drbd_pp_free(mdev, e->pages, is_net);
- D_ASSERT(atomic_read(&e->pending_bios) == 0);
- D_ASSERT(hlist_unhashed(&e->collision));
- mempool_free(e, drbd_ee_mempool);
+ if (peer_req->flags & EE_HAS_DIGEST)
+ kfree(peer_req->digest);
+ drbd_free_pages(mdev, peer_req->pages, is_net);
+ D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
+ mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
{
LIST_HEAD(work_list);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &work_list, w.list) {
- drbd_free_some_ee(mdev, e, is_net);
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ __drbd_free_peer_req(mdev, peer_req, is_net);
count++;
}
return count;
}
-
/*
- * This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
- * and receive_Barrier.
- *
- * Move entries from net_ee to done_ee, if ready.
- * Grab done_ee, call all callbacks, free the entries.
- * The callbacks typically send out ACKs.
+ * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_process_done_ee(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
- int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
+ struct drbd_peer_request *peer_req, *t;
+ int err = 0;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here:
- * e_end_block, and e_end_resync_block, e_send_discard_ack.
+ * e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
*/
- list_for_each_entry_safe(e, t, &work_list, w.list) {
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ int err2;
+
/* list_del not necessary, next/prev members not touched */
- ok = e->w.cb(mdev, &e->w, !ok) && ok;
- drbd_free_ee(mdev, e);
+ err2 = peer_req->w.cb(&peer_req->w, !!err);
+ if (!err)
+ err = err2;
+ drbd_free_peer_req(mdev, peer_req);
}
wake_up(&mdev->ee_wait);
- return ok;
+ return err;
}
-void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -430,55 +451,22 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
-void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/* see also kernel_accept; which is only present since 2.6.18.
- * also we want to log which part of it failed, exactly */
-static int drbd_accept(struct drbd_conf *mdev, const char **what,
- struct socket *sock, struct socket **newsock)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- *what = "listen";
- err = sock->ops->listen(sock, 5);
- if (err < 0)
- goto out;
-
- *what = "sock_create_lite";
- err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
- newsock);
- if (err < 0)
- goto out;
-
- *what = "accept";
- err = sock->ops->accept(sock, *newsock, 0);
- if (err < 0) {
- sock_release(*newsock);
- *newsock = NULL;
- goto out;
- }
- (*newsock)->ops = sock->ops;
- __module_get((*newsock)->ops->owner);
-
-out:
- return err;
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
-static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, int flags)
+static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
mm_segment_t oldfs;
struct kvec iov = {
@@ -500,59 +488,62 @@ static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
return rv;
}
-static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
+static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
{
- mm_segment_t oldfs;
- struct kvec iov = {
- .iov_base = buf,
- .iov_len = size,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&iov,
- .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
- };
int rv;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
+ rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
- for (;;) {
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
- if (rv == size)
- break;
+ if (rv < 0) {
+ if (rv == -ECONNRESET)
+ conn_info(tconn, "sock was reset by peer\n");
+ else if (rv != -ERESTARTSYS)
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ } else if (rv == 0) {
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
- /* Note:
- * ECONNRESET other side closed the connection
- * ERESTARTSYS (on sock) we got a signal
- */
+ t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
- if (rv < 0) {
- if (rv == -ECONNRESET)
- dev_info(DEV, "sock was reset by peer\n");
- else if (rv != -ERESTARTSYS)
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
- break;
- } else if (rv == 0) {
- dev_info(DEV, "sock was shut down by peer\n");
- break;
- } else {
- /* signal came in, or peer/link went down,
- * after we read a partial message
- */
- /* D_ASSERT(signal_pending(current)); */
- break;
+ if (t)
+ goto out;
}
- };
-
- set_fs(oldfs);
+ conn_info(tconn, "sock was shut down by peer\n");
+ }
if (rv != size)
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+out:
return rv;
}
+static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv(tconn, buf, size);
+ if (err != size) {
+ if (err >= 0)
+ err = -EIO;
+ } else
+ err = 0;
+ return err;
+}
+
+static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv_all(tconn, buf, size);
+ if (err && !signal_pending(current))
+ conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ return err;
+}
+
/* quoting tcp(7):
* On individual connections, the socket buffer size must be set prior to the
* listen(2) or connect(2) calls in order to have it take effect.
@@ -572,29 +563,50 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
}
}
-static struct socket *drbd_try_connect(struct drbd_conf *mdev)
+static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
{
const char *what;
struct socket *sock;
struct sockaddr_in6 src_in6;
- int err;
+ struct sockaddr_in6 peer_in6;
+ struct net_conf *nc;
+ int err, peer_addr_len, my_addr_len;
+ int sndbuf_size, rcvbuf_size, connect_int;
int disconnect_on_error = 1;
- if (!get_net_conf(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
return NULL;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+
+ if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ src_in6.sin6_port = 0;
+ else
+ ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
+
+ peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &sock);
+ err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
goto out;
}
sock->sk->sk_rcvtimeo =
- sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
- drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ sock->sk->sk_sndtimeo = connect_int * HZ;
+ drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
@@ -603,17 +615,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
- memcpy(&src_in6, mdev->net_conf->my_addr,
- min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
- if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
- src_in6.sin6_port = 0;
- else
- ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
-
what = "bind before connect";
- err = sock->ops->bind(sock,
- (struct sockaddr *) &src_in6,
- mdev->net_conf->my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -621,9 +624,7 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock,
- (struct sockaddr *)mdev->net_conf->peer_addr,
- mdev->net_conf->peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -641,91 +642,174 @@ out:
disconnect_on_error = 0;
break;
default:
- dev_err(DEV, "%s failed, err = %d\n", what, err);
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- put_net_conf(mdev);
+
return sock;
}
-static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
+struct accept_wait_data {
+ struct drbd_tconn *tconn;
+ struct socket *s_listen;
+ struct completion door_bell;
+ void (*original_sk_state_change)(struct sock *sk);
+
+};
+
+static void drbd_incoming_connection(struct sock *sk)
{
- int timeo, err;
- struct socket *s_estab = NULL, *s_listen;
+ struct accept_wait_data *ad = sk->sk_user_data;
+ void (*state_change)(struct sock *sk);
+
+ state_change = ad->original_sk_state_change;
+ if (sk->sk_state == TCP_ESTABLISHED)
+ complete(&ad->door_bell);
+ state_change(sk);
+}
+
+static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+{
+ int err, sndbuf_size, rcvbuf_size, my_addr_len;
+ struct sockaddr_in6 my_addr;
+ struct socket *s_listen;
+ struct net_conf *nc;
const char *what;
- if (!get_net_conf(mdev))
- return NULL;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -EIO;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &tconn->my_addr, my_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &s_listen);
+ err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
- timeo = mdev->net_conf->try_connect_int * HZ;
- timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
-
- s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- s_listen->sk->sk_rcvtimeo = timeo;
- s_listen->sk->sk_sndtimeo = timeo;
- drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen,
- (struct sockaddr *) mdev->net_conf->my_addr,
- mdev->net_conf->my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
if (err < 0)
goto out;
- err = drbd_accept(mdev, &what, s_listen, &s_estab);
+ ad->s_listen = s_listen;
+ write_lock_bh(&s_listen->sk->sk_callback_lock);
+ ad->original_sk_state_change = s_listen->sk->sk_state_change;
+ s_listen->sk->sk_state_change = drbd_incoming_connection;
+ s_listen->sk->sk_user_data = ad;
+ write_unlock_bh(&s_listen->sk->sk_callback_lock);
+
+ what = "listen";
+ err = s_listen->ops->listen(s_listen, 5);
+ if (err < 0)
+ goto out;
+ return 0;
out:
if (s_listen)
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- dev_err(DEV, "%s failed, err = %d\n", what, err);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
- put_net_conf(mdev);
- return s_estab;
+ return -EIO;
}
-static int drbd_send_fp(struct drbd_conf *mdev,
- struct socket *sock, enum drbd_packets cmd)
+static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.sbuf.header.h80;
-
- return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = ad->original_sk_state_change;
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
}
-static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
+static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
- int rr;
+ int timeo, connect_int, err = 0;
+ struct socket *s_estab = NULL;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ timeo = connect_int * HZ;
+ timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
+
+ err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
+ if (err <= 0)
+ return NULL;
+
+ err = kernel_accept(ad->s_listen, &s_estab, 0);
+ if (err < 0) {
+ if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
+ conn_err(tconn, "accept failed, err = %d\n", err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ }
+ }
+
+ if (s_estab)
+ unregister_state_change(s_estab->sk, ad);
+
+ return s_estab;
+}
- rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
+static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
- if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
- return be16_to_cpu(h->command);
+static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd)
+{
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
+}
- return 0xffff;
+static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
+{
+ unsigned int header_size = drbd_header_size(tconn);
+ struct packet_info pi;
+ int err;
+
+ err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ if (err != header_size) {
+ if (err >= 0)
+ err = -EIO;
+ return err;
+ }
+ err = decode_header(tconn, tconn->data.rbuf, &pi);
+ if (err)
+ return err;
+ return pi.cmd;
}
/**
* drbd_socket_okay() - Free the socket if its connection is not okay
- * @mdev: DRBD device.
* @sock: pointer to the pointer to the socket.
*/
-static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
+static int drbd_socket_okay(struct socket **sock)
{
int rr;
char tb[4];
@@ -733,7 +817,7 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
if (!*sock)
return false;
- rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
+ rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
return true;
@@ -743,6 +827,31 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
return false;
}
}
+/* Gets called if a connection is established, or if a new minor gets created
+ in a connection */
+int drbd_connected(struct drbd_conf *mdev)
+{
+ int err;
+
+ atomic_set(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+ &mdev->tconn->cstate_mutex :
+ &mdev->own_state_mutex;
+
+ err = drbd_send_sync_param(mdev);
+ if (!err)
+ err = drbd_send_sizes(mdev, 0, 0);
+ if (!err)
+ err = drbd_send_uuids(mdev);
+ if (!err)
+ err = drbd_send_current_state(mdev);
+ clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(RESIZE_PENDING, &mdev->flags);
+ mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ return err;
+}
/*
* return values:
@@ -752,232 +861,315 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int drbd_connect(struct drbd_conf *mdev)
+static int conn_connect(struct drbd_tconn *tconn)
{
- struct socket *s, *sock, *msock;
- int try, h, ok;
+ struct drbd_socket sock, msock;
+ struct drbd_conf *mdev;
+ struct net_conf *nc;
+ int vnr, timeout, h, ok;
+ bool discard_my_data;
enum drbd_state_rv rv;
+ struct accept_wait_data ad = {
+ .tconn = tconn,
+ .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
+ };
- D_ASSERT(!mdev->data.socket);
-
- if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &tconn->flags);
+ if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
- clear_bit(DISCARD_CONCURRENT, &mdev->flags);
+ mutex_init(&sock.mutex);
+ sock.sbuf = tconn->data.sbuf;
+ sock.rbuf = tconn->data.rbuf;
+ sock.socket = NULL;
+ mutex_init(&msock.mutex);
+ msock.sbuf = tconn->meta.sbuf;
+ msock.rbuf = tconn->meta.rbuf;
+ msock.socket = NULL;
+
+ /* Assume that the peer only understands protocol 80 until we know better. */
+ tconn->agreed_pro_version = 80;
- sock = NULL;
- msock = NULL;
+ if (prepare_listen_socket(tconn, &ad))
+ return 0;
do {
- for (try = 0;;) {
- /* 3 tries, this should take less than a second! */
- s = drbd_try_connect(mdev);
- if (s || ++try >= 3)
- break;
- /* give the other side time to call bind() & listen() */
- schedule_timeout_interruptible(HZ / 10);
- }
+ struct socket *s;
+ s = drbd_try_connect(tconn);
if (s) {
- if (!sock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
- sock = s;
- s = NULL;
- } else if (!msock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
- msock = s;
- s = NULL;
+ if (!sock.socket) {
+ sock.socket = s;
+ send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ } else if (!msock.socket) {
+ clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ msock.socket = s;
+ send_first_packet(tconn, &msock, P_INITIAL_META);
} else {
- dev_err(DEV, "Logic error in drbd_connect()\n");
+ conn_err(tconn, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
- if (sock && msock) {
- schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
+ if (sock.socket && msock.socket) {
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ timeout = nc->ping_timeo * HZ / 10;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeout);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
if (ok)
break;
}
retry:
- s = drbd_wait_for_connect(mdev);
+ s = drbd_wait_for_connect(tconn, &ad);
if (s) {
- try = drbd_recv_fp(mdev, s);
- drbd_socket_okay(mdev, &sock);
- drbd_socket_okay(mdev, &msock);
- switch (try) {
- case P_HAND_SHAKE_S:
- if (sock) {
- dev_warn(DEV, "initial packet S crossed\n");
- sock_release(sock);
+ int fp = receive_first_packet(tconn, s);
+ drbd_socket_okay(&sock.socket);
+ drbd_socket_okay(&msock.socket);
+ switch (fp) {
+ case P_INITIAL_DATA:
+ if (sock.socket) {
+ conn_warn(tconn, "initial packet S crossed\n");
+ sock_release(sock.socket);
+ sock.socket = s;
+ goto randomize;
}
- sock = s;
+ sock.socket = s;
break;
- case P_HAND_SHAKE_M:
- if (msock) {
- dev_warn(DEV, "initial packet M crossed\n");
- sock_release(msock);
+ case P_INITIAL_META:
+ set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ if (msock.socket) {
+ conn_warn(tconn, "initial packet M crossed\n");
+ sock_release(msock.socket);
+ msock.socket = s;
+ goto randomize;
}
- msock = s;
- set_bit(DISCARD_CONCURRENT, &mdev->flags);
+ msock.socket = s;
break;
default:
- dev_warn(DEV, "Error receiving initial packet\n");
+ conn_warn(tconn, "Error receiving initial packet\n");
sock_release(s);
+randomize:
if (random32() & 1)
goto retry;
}
}
- if (mdev->state.conn <= C_DISCONNECTING)
+ if (tconn->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&mdev->receiver) == Exiting)
+ if (get_t_state(&tconn->receiver) == EXITING)
goto out_release_sockets;
}
- if (sock && msock) {
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
- if (ok)
- break;
- }
- } while (1);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
+ } while (!ok);
+
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
- msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_allocation = GFP_NOIO;
- msock->sk->sk_allocation = GFP_NOIO;
+ sock.socket->sk->sk_allocation = GFP_NOIO;
+ msock.socket->sk->sk_allocation = GFP_NOIO;
- sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
- msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
+ sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- * first set it to the P_HAND_SHAKE timeout,
+ * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ * first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
- sock->sk->sk_sndtimeo =
- sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ sock.socket->sk->sk_sndtimeo =
+ sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
+
+ msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
+ timeout = nc->timeout * HZ / 10;
+ discard_my_data = nc->discard_my_data;
+ rcu_read_unlock();
+
+ msock.socket->sk->sk_sndtimeo = timeout;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock);
- drbd_tcp_nodelay(msock);
-
- mdev->data.socket = sock;
- mdev->meta.socket = msock;
- mdev->last_received = jiffies;
+ drbd_tcp_nodelay(sock.socket);
+ drbd_tcp_nodelay(msock.socket);
- D_ASSERT(mdev->asender.task == NULL);
+ tconn->data.socket = sock.socket;
+ tconn->meta.socket = msock.socket;
+ tconn->last_received = jiffies;
- h = drbd_do_handshake(mdev);
+ h = drbd_do_features(tconn);
if (h <= 0)
return h;
- if (mdev->cram_hmac_tfm) {
+ if (tconn->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(tconn)) {
case -1:
- dev_err(DEV, "Authentication of peer failed\n");
+ conn_err(tconn, "Authentication of peer failed\n");
return -1;
case 0:
- dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ conn_err(tconn, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ tconn->data.socket->sk->sk_sndtimeo = timeout;
+ tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
-
- if (drbd_send_protocol(mdev) == -1)
+ if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
return -1;
- set_bit(STATE_SENT, &mdev->flags);
- drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0, 0);
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
- if (mdev->state.conn != C_WF_REPORT_PARAMS)
- clear_bit(STATE_SENT, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ set_bit(STATE_SENT, &tconn->flags);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ /* Prevent a race between resync-handshake and
+ * being promoted to Primary.
+ *
+ * Grab and release the state mutex, so we know that any current
+ * drbd_set_role() is finished, and any incoming drbd_set_role
+ * will see the STATE_SENT flag, and wait for it to be cleared.
+ */
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
+
+ rcu_read_unlock();
+
+ if (discard_my_data)
+ set_bit(DISCARD_MY_DATA, &mdev->flags);
+ else
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
+
+ drbd_connected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
- if (rv < SS_SUCCESS)
+ rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &tconn->flags);
return 0;
+ }
- drbd_thread_start(&mdev->asender);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ drbd_thread_start(&tconn->asender);
- return 1;
+ mutex_lock(&tconn->conf_update);
+ /* The discard_my_data flag is a single-shot modifier to the next
+ * connection attempt, the handshake of which is now well underway.
+ * No need for rcu style copying of the whole struct
+ * just to clear a single value. */
+ tconn->net_conf->discard_my_data = 0;
+ mutex_unlock(&tconn->conf_update);
+
+ return h;
out_release_sockets:
- if (sock)
- sock_release(sock);
- if (msock)
- sock_release(msock);
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
+ if (sock.socket)
+ sock_release(sock.socket);
+ if (msock.socket)
+ sock_release(msock.socket);
return -1;
}
-static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
+static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
{
- union p_header *h = &mdev->data.rbuf.header;
- int r;
-
- r = drbd_recv(mdev, h, sizeof(*h));
- if (unlikely(r != sizeof(*h))) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
- return false;
- }
-
- if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
- *cmd = be16_to_cpu(h->h80.command);
- *packet_size = be16_to_cpu(h->h80.length);
- } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
- *cmd = be16_to_cpu(h->h95.command);
- *packet_size = be32_to_cpu(h->h95.length);
+ unsigned int header_size = drbd_header_size(tconn);
+
+ if (header_size == sizeof(struct p_header100) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
+ struct p_header100 *h = header;
+ if (h->pad != 0) {
+ conn_err(tconn, "Header padding is not zero\n");
+ return -EINVAL;
+ }
+ pi->vnr = be16_to_cpu(h->volume);
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ } else if (header_size == sizeof(struct p_header95) &&
+ *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
+ struct p_header95 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ pi->vnr = 0;
+ } else if (header_size == sizeof(struct p_header80) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
+ struct p_header80 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be16_to_cpu(h->length);
+ pi->vnr = 0;
} else {
- dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->h80.magic),
- be16_to_cpu(h->h80.command),
- be16_to_cpu(h->h80.length));
- return false;
+ conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ be32_to_cpu(*(__be32 *)header),
+ tconn->agreed_pro_version);
+ return -EINVAL;
}
- mdev->last_received = jiffies;
+ pi->data = header + header_size;
+ return 0;
+}
- return true;
+static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int err;
+
+ err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ if (err)
+ return err;
+
+ err = decode_header(tconn, buffer, pi);
+ tconn->last_received = jiffies;
+
+ return err;
}
-static void drbd_flush(struct drbd_conf *mdev)
+static void drbd_flush(struct drbd_tconn *tconn)
{
int rv;
+ struct drbd_conf *mdev;
+ int vnr;
- if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
- NULL);
- if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(mdev, WO_drain_io);
+ if (tconn->write_ordering >= WO_bdev_flush) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev(mdev))
+ continue;
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ GFP_NOIO, NULL);
+ if (rv) {
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ drbd_bump_write_ordering(tconn, WO_drain_io);
+ }
+ put_ldev(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+
+ rcu_read_lock();
+ if (rv)
+ break;
}
- put_ldev(mdev);
+ rcu_read_unlock();
}
}
@@ -987,7 +1179,7 @@ static void drbd_flush(struct drbd_conf *mdev)
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -995,7 +1187,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
do {
next_epoch = NULL;
@@ -1017,18 +1209,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&mdev->epoch_lock);
- drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
- spin_lock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
+ drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
+ spin_lock(&tconn->epoch_lock);
}
+#if 0
+ /* FIXME: dec unacked on connection, once we have
+ * something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(mdev);
+ dec_unacked(epoch->tconn);
+#endif
- if (mdev->current_epoch != epoch) {
+ if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- mdev->epochs--;
+ tconn->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1039,7 +1235,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
- wake_up(&mdev->ee_wait);
}
}
@@ -1049,40 +1244,52 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
epoch = next_epoch;
} while (1);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @mdev: DRBD device.
+ * @tconn: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
{
+ struct disk_conf *dc;
+ struct drbd_conf *mdev;
enum write_ordering_e pwo;
+ int vnr;
static char *write_ordering_str[] = {
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
};
- pwo = mdev->write_ordering;
+ pwo = tconn->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
- wo = WO_drain_io;
- if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
- wo = WO_none;
- mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
- dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
+ continue;
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+
+ if (wo == WO_bdev_flush && !dc->disk_flushes)
+ wo = WO_drain_io;
+ if (wo == WO_drain_io && !dc->disk_drain)
+ wo = WO_none;
+ put_ldev(mdev);
+ }
+ rcu_read_unlock();
+ tconn->write_ordering = wo;
+ if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
+ conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
}
/**
- * drbd_submit_ee()
+ * drbd_submit_peer_request()
* @mdev: DRBD device.
- * @e: epoch entry
+ * @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
* May spread the pages to multiple bios,
@@ -1096,14 +1303,15 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type)
+int drbd_submit_peer_request(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req,
+ const unsigned rw, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
- struct page *page = e->pages;
- sector_t sector = e->sector;
- unsigned ds = e->size;
+ struct page *page = peer_req->pages;
+ sector_t sector = peer_req->i.sector;
+ unsigned ds = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
@@ -1122,12 +1330,12 @@ next_bio:
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
- /* > e->sector, unless this is the first bio */
+ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
- bio->bi_private = e;
- bio->bi_end_io = drbd_endio_sec;
+ bio->bi_private = peer_req;
+ bio->bi_end_io = drbd_peer_request_endio;
bio->bi_next = bios;
bios = bio;
@@ -1156,7 +1364,7 @@ next_bio:
D_ASSERT(page == NULL);
D_ASSERT(ds == 0);
- atomic_set(&e->pending_bios, n_bios);
+ atomic_set(&peer_req->pending_bios, n_bios);
do {
bio = bios;
bios = bios->bi_next;
@@ -1175,26 +1383,57 @@ fail:
return err;
}
-static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_interval *i = &peer_req->i;
+
+ drbd_remove_interval(&mdev->write_requests, i);
+ drbd_clear_interval(i);
+
+ /* Wake up any processes waiting for this peer request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
+void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
+static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
{
int rv;
- struct p_barrier *p = &mdev->data.rbuf.barrier;
+ struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
- inc_unacked(mdev);
-
- mdev->current_epoch->barrier_nr = p->barrier;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+ /* FIXME these are unacked on connection,
+ * not a specific (peer)device.
+ */
+ tconn->current_epoch->barrier_nr = p->barrier;
+ tconn->current_epoch->tconn = tconn;
+ rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (mdev->write_ordering) {
+ switch (tconn->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
- return true;
+ return 0;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
@@ -1202,81 +1441,75 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
if (epoch)
break;
else
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- drbd_flush(mdev);
+ conn_wait_active_ee_empty(tconn);
+ drbd_flush(tconn);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
- epoch = mdev->current_epoch;
- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-
- D_ASSERT(atomic_read(&epoch->active) == 0);
- D_ASSERT(epoch->flags == 0);
-
- return true;
+ return 0;
default:
- dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
- return false;
+ conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ return -EIO;
}
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&mdev->epoch_lock);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
+ spin_lock(&tconn->epoch_lock);
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &tconn->current_epoch->list);
+ tconn->current_epoch = epoch;
+ tconn->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
- return true;
+ return 0;
}
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-static struct drbd_epoch_entry *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
+static struct drbd_peer_request *
+read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+ int data_size) __must_hold(local)
{
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page;
- int dgs, ds, rr;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
+ int dgs, ds, err;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
unsigned long *data;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
-
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data digest: read %d expected %d\n",
- rr, dgs);
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ /*
+ * FIXME: Receive the incoming digest into the receive buffer
+ * here, together with its struct p_data?
+ */
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
return NULL;
- }
+ data_size -= dgs;
}
- data_size -= dgs;
-
- ERR_IF(data_size & 0x1ff) return NULL;
- ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
+ if (!expect(IS_ALIGNED(data_size, 512)))
+ return NULL;
+ if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+ return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
@@ -1291,47 +1524,42 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ if (!peer_req)
return NULL;
if (!data_size)
- return e;
+ return peer_req;
ds = data_size;
- page = e->pages;
+ page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- rr = drbd_recv(mdev, data, len);
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
- if (rr != len) {
- drbd_free_ee(mdev, e);
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, len);
+ if (err) {
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
- ds -= rr;
+ ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
+ drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_bcast_ee(mdev, "digest failed",
- dgs, dig_in, dig_vv, e);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
}
mdev->recv_cnt += data_size>>9;
- return e;
+ return peer_req;
}
/* drbd_drain_block() just takes a data block
@@ -1340,30 +1568,26 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
{
struct page *page;
- int rr, rv = 1;
+ int err = 0;
void *data;
if (!data_size)
- return true;
+ return 0;
- page = drbd_pp_alloc(mdev, 1, 1);
+ page = drbd_alloc_pages(mdev, 1, 1);
data = kmap(page);
while (data_size) {
- rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
- if (rr != min_t(int, data_size, PAGE_SIZE)) {
- rv = 0;
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data: read %d expected %d\n",
- rr, min_t(int, data_size, PAGE_SIZE));
+ unsigned int len = min_t(int, data_size, PAGE_SIZE);
+
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
+ if (err)
break;
- }
- data_size -= rr;
+ data_size -= len;
}
kunmap(page);
- drbd_pp_free(mdev, page, 0);
- return rv;
+ drbd_free_pages(mdev, page, 0);
+ return err;
}
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
@@ -1371,26 +1595,19 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
{
struct bio_vec *bvec;
struct bio *bio;
- int dgs, rr, i, expect;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
+ int dgs, err, i, expect;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data reply digest: read %d expected %d\n",
- rr, dgs);
- return 0;
- }
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
+ return err;
+ data_size -= dgs;
}
- data_size -= dgs;
-
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
mdev->recv_cnt += data_size>>9;
@@ -1399,63 +1616,61 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
D_ASSERT(sector == bio->bi_sector);
bio_for_each_segment(bvec, bio, i) {
+ void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
expect = min_t(int, data_size, bvec->bv_len);
- rr = drbd_recv(mdev,
- kmap(bvec->bv_page)+bvec->bv_offset,
- expect);
+ err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
kunmap(bvec->bv_page);
- if (rr != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data reply: "
- "read %d expected %d\n",
- rr, expect);
- return 0;
- }
- data_size -= rr;
+ if (err)
+ return err;
+ data_size -= expect;
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
- return 0;
+ return -EINVAL;
}
}
D_ASSERT(data_size == 0);
- return 1;
+ return 0;
}
-/* e_end_resync_block() is called via
- * drbd_process_done_ee() by asender only */
-static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+/*
+ * e_end_resync_block() is called in asender context via
+ * drbd_finish_peer_reqs().
+ */
+static int e_end_resync_block(struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err;
- D_ASSERT(hlist_unhashed(&e->collision));
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, e->size);
+ drbd_rs_failed_io(mdev, sector, peer_req->i.size);
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
}
dec_unacked(mdev);
- return ok;
+ return err;
}
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
- e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e)
+ peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ if (!peer_req)
goto fail;
dec_rs_pending(mdev);
@@ -1464,64 +1679,88 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
- e->w.cb = e_end_resync_block;
+ peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
fail:
put_ldev(mdev);
- return false;
+ return -EIO;
}
-static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static struct drbd_request *
+find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+ sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
+
+ /* Request object according to our peer */
+ req = (struct drbd_request *)(unsigned long)id;
+ if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
+ return req;
+ if (!missing_ok) {
+ dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ (unsigned long)id, (unsigned long long)sector);
+ }
+ return NULL;
+}
+
+static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct drbd_request *req;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
- req = _ar_id_to_req(mdev, p->block_id, sector);
- spin_unlock_irq(&mdev->req_lock);
- if (unlikely(!req)) {
- dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
- return false;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (unlikely(!req))
+ return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- ok = recv_dless_read(mdev, req, sector, data_size);
-
- if (ok)
- req_mod(req, data_received);
+ err = recv_dless_read(mdev, req, sector, pi->size);
+ if (!err)
+ req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
- return ok;
+ return err;
}
-static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
@@ -1529,42 +1768,63 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
if (get_ldev(mdev)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
- * or in drbd_endio_write_sec. */
- ok = recv_resync_read(mdev, sector, data_size);
+ * or in drbd_peer_request_endio. */
+ err = recv_resync_read(mdev, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not write resync data to local disk.\n");
- ok = drbd_drain_block(mdev, data_size);
+ err = drbd_drain_block(mdev, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
}
- atomic_add(data_size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &mdev->rs_sect_in);
- return ok;
+ return err;
}
-/* e_end_block() is called via drbd_process_done_ee().
- * this means this function only runs in the asender thread
- */
-static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void restart_conflicting_writes(struct drbd_conf *mdev,
+ sector_t sector, int size)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok = 1, pcmd;
+ struct drbd_interval *i;
+ struct drbd_request *req;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED))
+ continue;
+ /* as it is RQ_POSTPONED, this will cause it to
+ * be queued on the retry workqueue. */
+ __req_mod(req, CONFLICT_RESOLVED, NULL);
+ }
+}
+
+/*
+ * e_end_block() is called in asender context via drbd_finish_peer_reqs().
+ */
+static int e_end_block(struct drbd_work *w, int cancel)
+{
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err = 0, pcmd;
+
+ if (peer_req->flags & EE_SEND_WRITE_ACK) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
- e->flags & EE_MAY_SET_IN_SYNC) ?
+ peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- ok &= drbd_send_ack(mdev, pcmd, e);
+ err = drbd_send_ack(mdev, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, e->size);
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
} else {
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
@@ -1572,52 +1832,115 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
- if (mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- } else {
- D_ASSERT(hlist_unhashed(&e->collision));
- }
+ if (peer_req->flags & EE_IN_INTERVAL_TREE) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ if (peer_req->flags & EE_RESTART_REQUESTS)
+ restart_conflicting_writes(mdev, sector, peer_req->i.size);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ } else
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
- return ok;
+ return err;
}
-static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- int ok = 1;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ int err;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
+ err = drbd_send_ack(mdev, ack, peer_req);
+ dec_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
+ return err;
+}
- dec_unacked(mdev);
+static int e_send_superseded(struct drbd_work *w, int unused)
+{
+ return e_send_ack(w, P_SUPERSEDED);
+}
+
+static int e_send_retry_write(struct drbd_work *w, int unused)
+{
+ struct drbd_tconn *tconn = w->mdev->tconn;
+
+ return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ P_RETRY_WRITE : P_SUPERSEDED);
+}
+
+static bool seq_greater(u32 a, u32 b)
+{
+ /*
+ * We assume 32-bit wrap-around here.
+ * For 24-bit wrap-around, we would have to shift:
+ * a <<= 8; b <<= 8;
+ */
+ return (s32)a - (s32)b > 0;
+}
+
+static u32 seq_max(u32 a, u32 b)
+{
+ return seq_greater(a, b) ? a : b;
+}
+
+static bool need_peer_seq(struct drbd_conf *mdev)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ int tp;
- return ok;
+ /*
+ * We only need to keep track of the last packet_seq number of our peer
+ * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
+ * handle_write_conflicts().
+ */
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
}
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
{
+ unsigned int newest_peer_seq;
- struct drbd_epoch_entry *rs_e;
+ if (need_peer_seq(mdev)) {
+ spin_lock(&mdev->peer_seq_lock);
+ newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ mdev->peer_seq = newest_peer_seq;
+ spin_unlock(&mdev->peer_seq_lock);
+ /* wake up only if we actually changed mdev->peer_seq */
+ if (peer_seq == newest_peer_seq)
+ wake_up(&mdev->seq_wait);
+ }
+}
+
+static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
+{
+ return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
+}
+
+/* maybe change sync_ee into interval trees as well? */
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+{
+ struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->req_lock);
- list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
- if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ if (overlaps(peer_req->i.sector, peer_req->i.size,
+ rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
@@ -1643,35 +1966,41 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_e
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
+static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
{
DEFINE_WAIT(wait);
- unsigned int p_seq;
long timeout;
- int ret = 0;
+ int ret;
+
+ if (!need_peer_seq(mdev))
+ return 0;
+
spin_lock(&mdev->peer_seq_lock);
for (;;) {
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- if (seq_le(packet_seq, mdev->peer_seq+1))
+ if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
+ mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ ret = 0;
break;
+ }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
- p_seq = mdev->peer_seq;
+ prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
- timeout = schedule_timeout(30*HZ);
+ rcu_read_lock();
+ timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ rcu_read_unlock();
+ timeout = schedule_timeout(timeout);
spin_lock(&mdev->peer_seq_lock);
- if (timeout == 0 && p_seq == mdev->peer_seq) {
+ if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
+ dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- finish_wait(&mdev->seq_wait, &wait);
- if (mdev->peer_seq+1 == packet_seq)
- mdev->peer_seq++;
spin_unlock(&mdev->peer_seq_lock);
+ finish_wait(&mdev->seq_wait, &wait);
return ret;
}
@@ -1686,233 +2015,277 @@ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
+static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+ unsigned int size)
+{
+ struct drbd_interval *i;
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ struct drbd_request *req;
+ struct bio_and_error m;
+
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (!(req->rq_state & RQ_POSTPONED))
+ continue;
+ req->rq_state &= ~RQ_POSTPONED;
+ __req_mod(req, NEG_ACKED, &m);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ goto repeat;
+ }
+}
+
+static int handle_write_conflicts(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ sector_t sector = peer_req->i.sector;
+ const unsigned int size = peer_req->i.size;
+ struct drbd_interval *i;
+ bool equal;
+ int err;
+
+ /*
+ * Inserting the peer request into the write_requests tree will prevent
+ * new conflicting local requests from being added.
+ */
+ drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (i == &peer_req->i)
+ continue;
+
+ if (!i->local) {
+ /*
+ * Our peer has sent a conflicting remote request; this
+ * should not happen in a two-node setup. Wait for the
+ * earlier peer request to complete.
+ */
+ err = drbd_wait_misc(mdev, i);
+ if (err)
+ goto out;
+ goto repeat;
+ }
+
+ equal = i->sector == sector && i->size == size;
+ if (resolve_conflicts) {
+ /*
+ * If the peer request is fully contained within the
+ * overlapping request, it can be considered overwritten
+ * and thus superseded; otherwise, it will be retried
+ * once all overlapping requests have completed.
+ */
+ bool superseded = i->sector <= sector && i->sector +
+ (i->size >> 9) >= sector + (size >> 9);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u, "
+ "assuming %s came first\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size,
+ superseded ? "local" : "remote");
+
+ inc_unacked(mdev);
+ peer_req->w.cb = superseded ? e_send_superseded :
+ e_send_retry_write;
+ list_add_tail(&peer_req->w.list, &mdev->done_ee);
+ wake_asender(mdev->tconn);
+
+ err = -ENOENT;
+ goto out;
+ } else {
+ struct drbd_request *req =
+ container_of(i, struct drbd_request, i);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size);
+
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED)) {
+ /*
+ * Wait for the node with the discard flag to
+ * decide if this request has been superseded
+ * or needs to be retried.
+ * Requests that have been superseded will
+ * disappear from the write_requests tree.
+ *
+ * In addition, wait for the conflicting
+ * request to finish locally before submitting
+ * the conflicting peer request.
+ */
+ err = drbd_wait_misc(mdev, &req->i);
+ if (err) {
+ _conn_request_state(mdev->tconn,
+ NS(conn, C_TIMEOUT),
+ CS_HARD);
+ fail_postponed_requests(mdev, sector, size);
+ goto out;
+ }
+ goto repeat;
+ }
+ /*
+ * Remember to restart the conflicting requests after
+ * the new peer request has completed.
+ */
+ peer_req->flags |= EE_RESTART_REQUESTS;
+ }
+ }
+ err = 0;
+
+ out:
+ if (err)
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ return err;
+}
+
/* mirrored write */
-static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- struct drbd_epoch_entry *e;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct drbd_peer_request *peer_req;
+ struct p_data *p = pi->data;
+ u32 peer_seq = be32_to_cpu(p->seq_num);
int rw = WRITE;
u32 dp_flags;
+ int err, tp;
- if (!get_ldev(mdev)) {
- spin_lock(&mdev->peer_seq_lock);
- if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
- mdev->peer_seq++;
- spin_unlock(&mdev->peer_seq_lock);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
- atomic_inc(&mdev->current_epoch->epoch_size);
- return drbd_drain_block(mdev, data_size);
+ if (!get_ldev(mdev)) {
+ int err2;
+
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ atomic_inc(&tconn->current_epoch->epoch_size);
+ err2 = drbd_drain_block(mdev, pi->size);
+ if (!err)
+ err = err2;
+ return err;
}
- /* get_ldev(mdev) successful.
- * Corresponding put_ldev done either below (on various errors),
- * or in drbd_endio_write_sec, if we successfully submit the data at
- * the end of this function. */
+ /*
+ * Corresponding put_ldev done either below (on various errors), or in
+ * drbd_peer_request_endio, if we successfully submit the data at the
+ * end of this function.
+ */
sector = be64_to_cpu(p->sector);
- e = read_in_block(mdev, p->block_id, sector, data_size);
- if (!e) {
+ peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -EIO;
}
- e->w.cb = e_end_block;
+ peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(mdev, dp_flags);
- if (e->pages == NULL) {
- D_ASSERT(e->size == 0);
+ if (peer_req->pages == NULL) {
+ D_ASSERT(peer_req->i.size == 0);
D_ASSERT(dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
- e->flags |= EE_MAY_SET_IN_SYNC;
-
- spin_lock(&mdev->epoch_lock);
- e->epoch = mdev->current_epoch;
- atomic_inc(&e->epoch->epoch_size);
- atomic_inc(&e->epoch->active);
- spin_unlock(&mdev->epoch_lock);
-
- /* I'm the receiver, I do hold a net_cnt reference. */
- if (!mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- } else {
- /* don't get the req_lock yet,
- * we may sleep in drbd_wait_peer_seq */
- const int size = e->size;
- const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
- DEFINE_WAIT(wait);
- struct drbd_request *i;
- struct hlist_node *n;
- struct hlist_head *slot;
- int first;
-
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- BUG_ON(mdev->ee_hash == NULL);
- BUG_ON(mdev->tl_hash == NULL);
-
- /* conflict detection and handling:
- * 1. wait on the sequence number,
- * in case this data packet overtook ACK packets.
- * 2. check our hash tables for conflicting requests.
- * we only need to walk the tl_hash, since an ee can not
- * have a conflict with an other ee: on the submitting
- * node, the corresponding req had already been conflicting,
- * and a conflicting req is never sent.
- *
- * Note: for two_primaries, we are protocol C,
- * so there cannot be any request that is DONE
- * but still on the transfer log.
- *
- * unconditionally add to the ee_hash.
- *
- * if no conflicting request is found:
- * submit.
- *
- * if any conflicting request is found
- * that has not yet been acked,
- * AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the P_DISCARD_ACK; OUT.
- *
- * if any conflicting request is found:
- * block the receiver, waiting on misc_wait
- * until no more conflicting requests are there,
- * or we get interrupted (disconnect).
- *
- * we do not just write after local io completion of those
- * requests, but only after req is done completely, i.e.
- * we wait for the P_DISCARD_ACK to arrive!
- *
- * then proceed normally, i.e. submit.
- */
- if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
+ peer_req->flags |= EE_MAY_SET_IN_SYNC;
+
+ spin_lock(&tconn->epoch_lock);
+ peer_req->epoch = tconn->current_epoch;
+ atomic_inc(&peer_req->epoch->epoch_size);
+ atomic_inc(&peer_req->epoch->active);
+ spin_unlock(&tconn->epoch_lock);
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+ if (tp) {
+ peer_req->flags |= EE_IN_INTERVAL_TREE;
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ if (err)
goto out_interrupted;
-
- spin_lock_irq(&mdev->req_lock);
-
- hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- first = 1;
- for (;;) {
- int have_unacked = 0;
- int have_conflict = 0;
- prepare_to_wait(&mdev->misc_wait, &wait,
- TASK_INTERRUPTIBLE);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- /* only ALERT on first iteration,
- * we may be woken up early... */
- if (first)
- dev_alert(DEV, "%s[%u] Concurrent local write detected!"
- " new: %llus +%u; pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- if (i->rq_state & RQ_NET_PENDING)
- ++have_unacked;
- ++have_conflict;
- }
- }
-#undef OVERLAPS
- if (!have_conflict)
- break;
-
- /* Discard Ack only for the _first_ iteration */
- if (first && discard && have_unacked) {
- dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
- (unsigned long long)sector);
- inc_unacked(mdev);
- e->w.cb = e_send_discard_ack;
- list_add_tail(&e->w.list, &mdev->done_ee);
-
- spin_unlock_irq(&mdev->req_lock);
-
- /* we could probably send that P_DISCARD_ACK ourselves,
- * but I don't like the receiver using the msock */
-
+ spin_lock_irq(&mdev->tconn->req_lock);
+ err = handle_write_conflicts(mdev, peer_req);
+ if (err) {
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (err == -ENOENT) {
put_ldev(mdev);
- wake_asender(mdev);
- finish_wait(&mdev->misc_wait, &wait);
- return true;
+ return 0;
}
+ goto out_interrupted;
+ }
+ } else
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->active_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (signal_pending(current)) {
- hlist_del_init(&e->collision);
-
- spin_unlock_irq(&mdev->req_lock);
-
- finish_wait(&mdev->misc_wait, &wait);
- goto out_interrupted;
- }
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
- spin_unlock_irq(&mdev->req_lock);
- if (first) {
- first = 0;
- dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
- "sec=%llus\n", (unsigned long long)sector);
- } else if (discard) {
- /* we had none on the first iteration.
- * there must be none now. */
- D_ASSERT(have_unacked == 0);
- }
- schedule();
- spin_lock_irq(&mdev->req_lock);
+ if (mdev->tconn->agreed_pro_version < 100) {
+ rcu_read_lock();
+ switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ case DRBD_PROT_C:
+ dp_flags |= DP_SEND_WRITE_ACK;
+ break;
+ case DRBD_PROT_B:
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ break;
}
- finish_wait(&mdev->misc_wait, &wait);
+ rcu_read_unlock();
}
- list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
-
- switch (mdev->net_conf->wire_protocol) {
- case DRBD_PROT_C:
+ if (dp_flags & DP_SEND_WRITE_ACK) {
+ peer_req->flags |= EE_SEND_WRITE_ACK;
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
- break;
- case DRBD_PROT_B:
+ }
+
+ if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, e);
- break;
- case DRBD_PROT_A:
- /* nothing to do */
- break;
+ drbd_send_ack(mdev, P_RECV_ACK, peer_req);
}
if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, e->sector, e->size);
- e->flags |= EE_CALL_AL_COMPLETE_IO;
- e->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, e->sector);
+ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
+ peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
+ drbd_al_begin_io(mdev, &peer_req->i);
}
- if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
- return true;
+ err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ if (!err)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- if (e->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, e->sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
+ drbd_al_complete_io(mdev, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return err;
}
/* We may throttle resync, if the lower device seems to be busy,
@@ -1933,9 +2306,14 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
struct lc_element *tmp;
int curr_events;
int throttle = 0;
+ unsigned int c_min_rate;
+
+ rcu_read_lock();
+ c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ rcu_read_unlock();
/* feature disabled? */
- if (mdev->sync_conf.c_min_rate == 0)
+ if (c_min_rate == 0)
return 0;
spin_lock_irq(&mdev->al_lock);
@@ -1975,40 +2353,46 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
db = mdev->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
- if (dbdt > mdev->sync_conf.c_min_rate)
+ if (dbdt > c_min_rate)
throttle = 1;
}
return throttle;
}
-static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
+static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ sector_t capacity;
+ struct drbd_peer_request *peer_req;
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
- struct p_block_req *p = &mdev->data.rbuf.block_req;
+ struct p_block_req *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+ capacity = drbd_get_capacity(mdev->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (sector + (size>>9) > capacity) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
verb = 1;
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
break;
@@ -2023,35 +2407,34 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
+ BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, digest_size);
+ return drbd_drain_block(mdev, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
- if (!e) {
+ peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -ENOMEM;
}
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
- e->w.cb = w_e_end_data_req;
+ peer_req->w.cb = w_e_end_data_req;
fault_type = DRBD_FAULT_DT_RD;
/* application IO, don't drbd_rs_begin_io */
goto submit;
case P_RS_DATA_REQUEST:
- e->w.cb = w_e_end_rsdata_req;
+ peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2060,28 +2443,28 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REPLY:
case P_CSUM_RS_REQUEST:
fault_type = DRBD_FAULT_RS_RD;
- di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
+ di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
if (!di)
goto out_free_e;
- di->digest_size = digest_size;
+ di->digest_size = pi->size;
di->digest = (((char *)di)+sizeof(struct digest_info));
- e->digest = di;
- e->flags |= EE_HAS_DIGEST;
+ peer_req->digest = di;
+ peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
+ if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
goto out_free_e;
- if (cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->agreed_pro_version >= 89);
- e->w.cb = w_e_end_csum_rs_req;
+ if (pi->cmd == P_CSUM_RS_REQUEST) {
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
- } else if (cmd == P_OV_REPLY) {
+ } else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
atomic_add(size >> 9, &mdev->rs_sect_in);
- e->w.cb = w_e_end_ov_reply;
+ peer_req->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
@@ -2091,7 +2474,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->agreed_pro_version >= 90) {
+ mdev->tconn->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
mdev->ov_start_sector = sector;
@@ -2105,15 +2488,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
dev_info(DEV, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
- e->w.cb = w_e_end_ov_req;
+ peer_req->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
- fault_type = DRBD_FAULT_MAX;
- goto out_free_e;
+ BUG();
}
/* Throttle, drbd_rs_begin_io and submit should become asynchronous
@@ -2148,30 +2528,31 @@ submit_for_resync:
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return -EIO;
}
static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
+ enum drbd_after_sb_p after_sb_0p;
self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
peer = mdev->p_uuid[UI_BITMAP] & 1;
@@ -2179,10 +2560,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
- switch (mdev->net_conf->after_sb_0p) {
+ rcu_read_lock();
+ after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ rcu_read_unlock();
+ switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
+ case ASB_VIOLENTLY:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2211,14 +2596,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
} else {
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+ if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
@@ -2227,7 +2612,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2243,13 +2628,18 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_1p;
- switch (mdev->net_conf->after_sb_1p) {
+ rcu_read_lock();
+ after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ rcu_read_unlock();
+ switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2292,8 +2682,12 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_2p;
- switch (mdev->net_conf->after_sb_2p) {
+ rcu_read_lock();
+ after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ rcu_read_unlock();
+ switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
@@ -2301,6 +2695,7 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_DISCARD_REMOTE:
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
@@ -2386,13 +2781,15 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_set_bm(mdev, 0UL);
+ drbd_uuid_move_history(mdev);
+ mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
+ mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@@ -2407,7 +2804,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2440,7 +2837,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
return dc ? -1 : 1;
}
}
@@ -2453,14 +2850,14 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
@@ -2490,18 +2887,18 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
- _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@@ -2545,20 +2942,24 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
- int hg, rule_nr;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
+ struct net_conf *nc;
+ int hg, rule_nr, rr_conflict, tentative;
mydisk = mdev->state.disk;
if (mydisk == D_NEGOTIATING)
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -2584,7 +2985,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (abs(hg) == 100)
drbd_khelper(mdev, "initial-split-brain");
- if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+
+ if (hg == 100 || (hg == -100 && nc->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -2613,9 +3017,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
@@ -2623,6 +3027,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -2641,7 +3048,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
- switch (mdev->net_conf->rr_conflict) {
+ switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
@@ -2654,7 +3061,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
@@ -2686,33 +3093,29 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
return rv;
}
-/* returns 1 if invalid */
-static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
+static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{
/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
- if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
- (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
- return 0;
+ if (peer == ASB_DISCARD_REMOTE)
+ return ASB_DISCARD_LOCAL;
/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
- if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
- self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
- return 1;
+ if (peer == ASB_DISCARD_LOCAL)
+ return ASB_DISCARD_REMOTE;
/* everything else is valid if they are equal on both sides. */
- if (peer == self)
- return 0;
-
- /* everything es is invalid. */
- return 1;
+ return peer;
}
-static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_protocol *p = &mdev->data.rbuf.protocol;
- int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
- int p_want_lose, p_two_primaries, cf;
- char p_integrity_alg[SHARED_SECRET_MAX] = "";
+ struct p_protocol *p = pi->data;
+ enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
+ int p_proto, p_discard_my_data, p_two_primaries, cf;
+ struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
+ char integrity_alg[SHARED_SECRET_MAX] = "";
+ struct crypto_hash *peer_integrity_tfm = NULL;
+ void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
@@ -2720,63 +3123,138 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
p_two_primaries = be32_to_cpu(p->two_primaries);
cf = be32_to_cpu(p->conn_flags);
- p_want_lose = cf & CF_WANT_LOSE;
-
- clear_bit(CONN_DRY_RUN, &mdev->flags);
+ p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &mdev->flags);
+ if (tconn->agreed_pro_version >= 87) {
+ int err;
- if (p_proto != mdev->net_conf->wire_protocol) {
- dev_err(DEV, "incompatible communication protocols\n");
- goto disconnect;
+ if (pi->size > sizeof(integrity_alg))
+ return -EIO;
+ err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ if (err)
+ return err;
+ integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
- if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
- dev_err(DEV, "incompatible after-sb-0pri settings\n");
- goto disconnect;
- }
+ if (pi->cmd != P_PROTOCOL_UPDATE) {
+ clear_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
- dev_err(DEV, "incompatible after-sb-1pri settings\n");
- goto disconnect;
- }
+ if (cf & CF_DRY_RUN)
+ set_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
- dev_err(DEV, "incompatible after-sb-2pri settings\n");
- goto disconnect;
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- if (p_want_lose && mdev->net_conf->want_lose) {
- dev_err(DEV, "both sides have the 'want_lose' flag set\n");
- goto disconnect;
- }
+ if (p_proto != nc->wire_protocol) {
+ conn_err(tconn, "incompatible %s settings\n", "protocol");
+ goto disconnect_rcu_unlock;
+ }
- if (p_two_primaries != mdev->net_conf->two_primaries) {
- dev_err(DEV, "incompatible setting of the two-primaries options\n");
- goto disconnect;
+ if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_discard_my_data && nc->discard_my_data) {
+ conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_two_primaries != nc->two_primaries) {
+ conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (strcmp(integrity_alg, nc->integrity_alg)) {
+ conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ goto disconnect_rcu_unlock;
+ }
+
+ rcu_read_unlock();
}
- if (mdev->agreed_pro_version >= 87) {
- unsigned char *my_alg = mdev->net_conf->integrity_alg;
+ if (integrity_alg[0]) {
+ int hash_size;
- if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
- return false;
+ /*
+ * We can only change the peer data integrity algorithm
+ * here. Changing our own data integrity algorithm
+ * requires that we send a P_PROTOCOL_UPDATE packet at
+ * the same time; otherwise, the peer has no way to
+ * tell between which packets the algorithm should
+ * change.
+ */
- p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
- if (strcmp(p_integrity_alg, my_alg)) {
- dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
+ peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ if (!peer_integrity_tfm) {
+ conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ integrity_alg);
goto disconnect;
}
- dev_info(DEV, "data-integrity-alg: %s\n",
- my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
+
+ hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ int_dig_in = kmalloc(hash_size, GFP_KERNEL);
+ int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
+ if (!(int_dig_in && int_dig_vv)) {
+ conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ goto disconnect;
+ }
+ }
+
+ new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ conn_err(tconn, "Allocation of new net_conf failed\n");
+ goto disconnect;
}
- return true;
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_net_conf = tconn->net_conf;
+ *new_net_conf = *old_net_conf;
+
+ new_net_conf->wire_protocol = p_proto;
+ new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
+ new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
+ new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
+ new_net_conf->two_primaries = p_two_primaries;
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ tconn->peer_integrity_tfm = peer_integrity_tfm;
+ tconn->int_dig_in = int_dig_in;
+ tconn->int_dig_vv = int_dig_vv;
+
+ if (strcmp(old_net_conf->integrity_alg, integrity_alg))
+ conn_info(tconn, "peer data-integrity-alg: %s\n",
+ integrity_alg[0] ? integrity_alg : "(none)");
+
+ synchronize_rcu();
+ kfree(old_net_conf);
+ return 0;
+
+disconnect_rcu_unlock:
+ rcu_read_unlock();
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ crypto_free_hash(peer_integrity_tfm);
+ kfree(int_dig_in);
+ kfree(int_dig_vv);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* helper function
@@ -2798,24 +3276,64 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
alg, name, PTR_ERR(tfm));
return tfm;
}
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- crypto_free_hash(tfm);
- dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
- return ERR_PTR(-EINVAL);
- }
return tfm;
}
-static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
+static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int size = pi->size;
+
+ while (size) {
+ int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
+ s = drbd_recv(tconn, buffer, s);
+ if (s <= 0) {
+ if (s < 0)
+ return s;
+ break;
+ }
+ size -= s;
+ }
+ if (size)
+ return -EIO;
+ return 0;
+}
+
+/*
+ * config_unknown_volume - device configuration command for unknown volume
+ *
+ * When a device is added to an existing connection, the node on which the
+ * device is added first will send configuration commands to its peer but the
+ * peer will not know about the device yet. It will warn and ignore these
+ * commands. Once the device is added on the second node, the second node will
+ * send the same device configuration commands, but in the other direction.
+ *
+ * (We can also end up here if drbd is misconfigured.)
+ */
+static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
{
- int ok = true;
- struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+ conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ cmdname(pi->cmd), pi->vnr);
+ return ignore_remaining_packet(tconn, pi);
+}
+
+static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
- const int apv = mdev->agreed_pro_version;
- int *rs_plan_s = NULL;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
+ struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
+ const int apv = tconn->agreed_pro_version;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -2823,32 +3341,49 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- if (packet_size > exp_max_sz) {
+ if (pi->size > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
- packet_size, exp_max_sz);
- return false;
+ pi->size, exp_max_sz);
+ return -EIO;
}
if (apv <= 88) {
- header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param);
+ data_size = pi->size - header_size;
} else if (apv <= 94) {
- header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_89);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
} else {
- header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_95);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
}
/* initialize verify_alg and csums_alg */
+ p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
- return false;
+ err = drbd_recv_all(mdev->tconn, p, header_size);
+ if (err)
+ return err;
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
+ mutex_lock(&mdev->tconn->conf_update);
+ old_net_conf = mdev->tconn->net_conf;
+ if (get_ldev(mdev)) {
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ put_ldev(mdev);
+ mutex_unlock(&mdev->tconn->conf_update);
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ return -ENOMEM;
+ }
+
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+
+ new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
+ }
if (apv >= 88) {
if (apv == 88) {
@@ -2856,12 +3391,13 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
dev_err(DEV, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
- return false;
+ err = -EIO;
+ goto reconnect;
}
- if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
- return false;
-
+ err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ if (err)
+ goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
D_ASSERT(p->verify_alg[data_size-1] == 0);
@@ -2876,10 +3412,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
- if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
+ if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.verify_alg, p->verify_alg);
+ old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2890,10 +3426,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
+ if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.csums_alg, p->csums_alg);
+ old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2904,57 +3440,91 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv > 94) {
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
- mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
- mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
- mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
- mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
-
- fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
+ if (apv > 94 && new_disk_conf) {
+ new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
+ new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
+ new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
+ new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
dev_err(DEV, "kmalloc of fifo_buffer failed");
+ put_ldev(mdev);
goto disconnect;
}
}
}
- spin_lock(&mdev->peer_seq_lock);
- /* lock against drbd_nl_syncer_conf() */
- if (verify_tfm) {
- strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
- mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
- }
- if (csums_tfm) {
- strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
- mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
- }
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
+ if (verify_tfm || csums_tfm) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ dev_err(DEV, "Allocation of new net_conf failed\n");
+ goto disconnect;
+ }
+
+ *new_net_conf = *old_net_conf;
+
+ if (verify_tfm) {
+ strcpy(new_net_conf->verify_alg, p->verify_alg);
+ new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
+ crypto_free_hash(mdev->tconn->verify_tfm);
+ mdev->tconn->verify_tfm = verify_tfm;
+ dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ }
+ if (csums_tfm) {
+ strcpy(new_net_conf->csums_alg, p->csums_alg);
+ new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
+ crypto_free_hash(mdev->tconn->csums_tfm);
+ mdev->tconn->csums_tfm = csums_tfm;
+ dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ }
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
}
- spin_unlock(&mdev->peer_seq_lock);
}
- return ok;
+ if (new_disk_conf) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ put_ldev(mdev);
+ }
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ if (new_net_conf)
+ kfree(old_net_conf);
+ kfree(old_disk_conf);
+ kfree(old_plan);
+
+ return 0;
+
+reconnect:
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
+ return -EIO;
+
disconnect:
+ kfree(new_plan);
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
@@ -2970,59 +3540,77 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_sizes *p = &mdev->data.rbuf.sizes;
+ struct drbd_conf *mdev;
+ struct p_sizes *p = pi->data;
enum determine_dev_size dd = unchanged;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
- if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
- dev_err(DEV, "some backing storage is needed\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
- }
-
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
mdev->p_size = p_size;
if (get_ldev(mdev)) {
+ rcu_read_lock();
+ my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+
warn_if_differ_considerably(mdev, "lower level device sizes",
p_size, drbd_get_max_capacity(mdev->ldev));
warn_if_differ_considerably(mdev, "user requested size",
- p_usize, mdev->ldev->dc.disk_size);
+ p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
if (mdev->state.conn == C_WF_REPORT_PARAMS)
- p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
- p_usize);
-
- my_usize = mdev->ldev->dc.disk_size;
-
- if (mdev->ldev->dc.disk_size != p_usize) {
- mdev->ldev->dc.disk_size = p_usize;
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
- (unsigned long)mdev->ldev->dc.disk_size);
- }
+ p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
+ if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
+ drbd_get_capacity(mdev->this_bdev) &&
+ mdev->state.disk >= D_OUTDATED &&
+ mdev->state.conn < C_CONNECTED) {
dev_err(DEV, "The peer's disk size is too small!\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- mdev->ldev->dc.disk_size = my_usize;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(mdev);
- return false;
+ return -EIO;
+ }
+
+ if (my_usize != p_usize) {
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(mdev);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = p_usize;
+
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+
+ dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ (unsigned long)my_usize);
}
+
put_ldev(mdev);
}
@@ -3031,7 +3619,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dd = drbd_determine_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
- return false;
+ return -EIO;
drbd_md_sync(mdev);
} else {
/* I am diskless, need to accept the peer's size. */
@@ -3070,16 +3658,25 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- return true;
+ return 0;
}
-static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_uuids *p = &mdev->data.rbuf.uuids;
+ struct drbd_conf *mdev;
+ struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ if (!p_uuid) {
+ dev_err(DEV, "kmalloc of p_uuid failed\n");
+ return false;
+ }
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
@@ -3093,14 +3690,14 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
(mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
- mdev->agreed_pro_version >= 90 &&
+ mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
@@ -3127,14 +3724,15 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
if (updated_uuids)
drbd_print_uuids(mdev, "receiver updated UUIDs to");
- return true;
+ return 0;
}
/**
@@ -3146,6 +3744,7 @@ static union drbd_state convert_state(union drbd_state ps)
union drbd_state ms;
static enum drbd_conns c_tab[] = {
+ [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
[C_CONNECTED] = C_CONNECTED,
[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
@@ -3167,40 +3766,74 @@ static union drbd_state convert_state(union drbd_state ps)
return ms;
}
-static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state *p = &mdev->data.rbuf.req_state;
+ struct drbd_conf *mdev;
+ struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
- test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
+ mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
- return true;
+ return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
-
drbd_send_sr_reply(mdev, rv);
+
drbd_md_sync(mdev);
- return true;
+ return 0;
}
-static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_state *p = &mdev->data.rbuf.state;
+ struct p_req_state *p = pi->data;
+ union drbd_state mask, val;
+ enum drbd_state_rv rv;
+
+ mask.i = be32_to_cpu(p->mask);
+ val.i = be32_to_cpu(p->val);
+
+ if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
+ mutex_is_locked(&tconn->cstate_mutex)) {
+ conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ return 0;
+ }
+
+ mask = convert_state(mask);
+ val = convert_state(val);
+
+ rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(tconn, rv);
+
+ return 0;
+}
+
+static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
@@ -3209,16 +3842,16 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
- os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ os = ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
* we must not "re-establish" it here. */
if (os.conn <= C_TEAR_DOWN)
- return false;
+ return -ECONNRESET;
/* If this is the "end of sync" confirmation, usually the peer disk
* transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
@@ -3246,10 +3879,18 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
drbd_resync_finished(mdev);
- return true;
+ return 0;
}
}
+ /* explicit verify finished notification, stop sector reached. */
+ if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
+ peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
+ ov_out_of_sync_print(mdev);
+ drbd_resync_finished(mdev);
+ return 0;
+ }
+
/* peer says his disk is inconsistent, while we think it is uptodate,
* and this happens while the peer still thinks we have a sync going on,
* but we think we are already done with the sync.
@@ -3298,17 +3939,17 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
- return false;
+ if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ return -EIO;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
}
}
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.i != os.i)
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (os.i != drbd_read_state(mdev).i)
goto retry;
clear_bit(CONSIDER_RESYNC, &mdev->flags);
ns.peer = peer_state.role;
@@ -3317,25 +3958,25 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &mdev->flags)) {
- /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+ /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
- return false;
+ conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ return -EIO;
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3349,16 +3990,21 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- mdev->net_conf->want_lose = 0;
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
- return true;
+ return 0;
}
-static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+ struct drbd_conf *mdev;
+ struct p_rs_uuid *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3381,7 +4027,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
- return true;
+ return 0;
}
/**
@@ -3391,27 +4037,27 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
- unsigned long *buffer, struct bm_xfer_ctx *c)
+receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+ unsigned long *p, struct bm_xfer_ctx *c)
{
- unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- unsigned want = num_words * sizeof(long);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
+ drbd_header_size(mdev->tconn);
+ unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ unsigned int want = num_words * sizeof(*p);
int err;
- if (want != data_size) {
- dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
+ if (want != size) {
+ dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv(mdev, buffer, want);
- if (err != want) {
- if (err >= 0)
- err = -EIO;
+ err = drbd_recv_all(mdev->tconn, p, want);
+ if (err)
return err;
- }
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
+ drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -3421,6 +4067,21 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
return 1;
}
+static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
+{
+ return (enum drbd_bitmap_code)(p->encoding & 0x0f);
+}
+
+static int dcbp_get_start(struct p_compressed_bm *p)
+{
+ return (p->encoding & 0x80) != 0;
+}
+
+static int dcbp_get_pad_bits(struct p_compressed_bm *p)
+{
+ return (p->encoding >> 4) & 0x7;
+}
+
/**
* recv_bm_rle_bits
*
@@ -3430,7 +4091,8 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
static int
recv_bm_rle_bits(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
struct bitstream bs;
u64 look_ahead;
@@ -3438,12 +4100,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
u64 tmp;
unsigned long s = c->bit_offset;
unsigned long e;
- int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
- int toggle = DCBP_get_start(p);
+ int toggle = dcbp_get_start(p);
int have;
int bits;
- bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
+ bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
@@ -3495,17 +4156,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
static int
decode_bitmap_c(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
- if (DCBP_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c);
+ if (dcbp_get_code(p) == RLE_VLI_Bits)
+ return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
@@ -3513,11 +4175,13 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned plain = sizeof(struct p_header80) *
- ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
- + c->bm_words * sizeof(long);
- unsigned total = c->bytes[0] + c->bytes[1];
- unsigned r;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ unsigned int plain =
+ header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
+ c->bm_words * sizeof(unsigned long);
+ unsigned int total = c->bytes[0] + c->bytes[1];
+ unsigned int r;
/* total can not be zero. but just in case: */
if (total == 0)
@@ -3551,67 +4215,63 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
struct bm_xfer_ctx c;
- void *buffer;
int err;
- int ok = false;
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- buffer = (unsigned long *) __get_free_page(GFP_NOIO);
- if (!buffer) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
- goto out;
- }
-
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(mdev),
.bm_words = drbd_bm_words(mdev),
};
for(;;) {
- if (cmd == P_BITMAP) {
- err = receive_bitmap_plain(mdev, data_size, buffer, &c);
- } else if (cmd == P_COMPRESSED_BITMAP) {
+ if (pi->cmd == P_BITMAP)
+ err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
- struct p_compressed_bm *p;
+ struct p_compressed_bm *p = pi->data;
- if (data_size > BM_PACKET_PAYLOAD_BYTES) {
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
dev_err(DEV, "ReportCBitmap packet too large\n");
+ err = -EIO;
goto out;
}
- /* use the page buff */
- p = buffer;
- memcpy(p, h, sizeof(*h));
- if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
- goto out;
- if (data_size <= (sizeof(*p) - sizeof(p->head))) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
+ if (pi->size <= sizeof(*p)) {
+ dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ err = -EIO;
goto out;
}
- err = decode_bitmap_c(mdev, p, &c);
+ err = drbd_recv_all(mdev->tconn, p, pi->size);
+ if (err)
+ goto out;
+ err = decode_bitmap_c(mdev, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
+ dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ err = -EIO;
goto out;
}
- c.packets[cmd == P_BITMAP]++;
- c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
+ c.packets[pi->cmd == P_BITMAP]++;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- if (!drbd_recv_header(mdev, &cmd, &data_size))
+ err = drbd_recv_header(mdev->tconn, pi);
+ if (err)
goto out;
}
@@ -3620,8 +4280,8 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
if (mdev->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- ok = !drbd_send_bitmap(mdev);
- if (!ok)
+ err = drbd_send_bitmap(mdev);
+ if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
@@ -3632,47 +4292,40 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
drbd_conn_str(mdev->state.conn));
}
+ err = 0;
- ok = true;
out:
drbd_bm_unlock(mdev);
- if (ok && mdev->state.conn == C_WF_BITMAP_S)
+ if (!err && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
- free_page((unsigned long) buffer);
- return ok;
+ return err;
}
-static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
{
- /* TODO zero copy sink :) */
- static char sink[128];
- int size, want, r;
+ conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ pi->cmd, pi->size);
- dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
- cmd, data_size);
-
- size = data_size;
- while (size > 0) {
- want = min_t(int, size, sizeof(sink));
- r = drbd_recv(mdev, sink, want);
- ERR_IF(r <= 0) break;
- size -= r;
- }
- return size == 0;
+ return ignore_remaining_packet(tconn, pi);
}
-static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
+ drbd_tcp_quickack(tconn->data.socket);
- return true;
+ return 0;
}
-static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+ struct drbd_conf *mdev;
+ struct p_block_desc *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
switch (mdev->state.conn) {
case C_WF_SYNC_UUID:
@@ -3686,15 +4339,13 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
- return true;
+ return 0;
}
-typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
-
struct data_cmd {
int expect_payload;
size_t pkt_size;
- drbd_cmd_handler_f function;
+ int (*fn)(struct drbd_tconn *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -3702,13 +4353,13 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
[P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
[P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
- [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
+ [P_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
[P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
- [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
- [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
+ [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
+ [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
[P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
[P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
@@ -3720,124 +4371,75 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
- /* anything missing from this table is in
- * the asender_tbl, see get_asender_cmd */
- [P_MAX_CMD] = { 0, 0, NULL },
+ [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
+ [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-/* All handler functions that expect a sub-header get that sub-heder in
- mdev->data.rbuf.header.head.payload.
-
- Usually in mdev->data.rbuf.header.head the callback can find the usual
- p_header, but they may not rely on that. Since there is also p_header95 !
- */
-
-static void drbdd(struct drbd_conf *mdev)
+static void drbdd(struct drbd_tconn *tconn)
{
- union p_header *header = &mdev->data.rbuf.header;
- unsigned int packet_size;
- enum drbd_packets cmd;
+ struct packet_info pi;
size_t shs; /* sub header size */
- int rv;
+ int err;
+
+ while (get_t_state(&tconn->receiver) == RUNNING) {
+ struct data_cmd *cmd;
- while (get_t_state(&mdev->receiver) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (!drbd_recv_header(mdev, &cmd, &packet_size))
+ drbd_thread_current_set_cpu(&tconn->receiver);
+ if (drbd_recv_header(tconn, &pi))
goto err_out;
- if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
- dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
+ cmd = &drbd_cmd_handler[pi.cmd];
+ if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
+ conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ cmdname(pi.cmd), pi.cmd);
goto err_out;
}
- shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
- if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
- dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
+ shs = cmd->pkt_size;
+ if (pi.size > shs && !cmd->expect_payload) {
+ conn_err(tconn, "No payload expected %s l:%d\n",
+ cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- rv = drbd_recv(mdev, &header->h80.payload, shs);
- if (unlikely(rv != shs)) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
+ err = drbd_recv_all_warn(tconn, pi.data, shs);
+ if (err)
goto err_out;
- }
+ pi.size -= shs;
}
- rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
-
- if (unlikely(!rv)) {
- dev_err(DEV, "error receiving %s, l: %d!\n",
- cmdname(cmd), packet_size);
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ cmdname(pi.cmd), err, pi.size);
goto err_out;
}
}
+ return;
- if (0) {
- err_out:
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
- }
- /* If we leave here, we probably want to update at least the
- * "Connected" indicator on stable storage. Do so explicitly here. */
- drbd_md_sync(mdev);
+ err_out:
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-void drbd_flush_workqueue(struct drbd_conf *mdev)
+void conn_flush_workqueue(struct drbd_tconn *tconn)
{
struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done;
+ barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&mdev->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
-void drbd_free_tl_hash(struct drbd_conf *mdev)
-{
- struct hlist_head *h;
-
- spin_lock_irq(&mdev->req_lock);
-
- if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
- spin_unlock_irq(&mdev->req_lock);
- return;
- }
- /* paranoia code */
- for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
- if (h->first)
- dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
- (int)(h - mdev->ee_hash), h->first);
- kfree(mdev->ee_hash);
- mdev->ee_hash = NULL;
- mdev->ee_hash_s = 0;
-
- /* We may not have had the chance to wait for all locally pending
- * application requests. The hlist_add_fake() prevents access after
- * free on master bio completion. */
- for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
- struct drbd_request *req;
- struct hlist_node *pos, *n;
- hlist_for_each_entry_safe(req, pos, n, h, collision) {
- hlist_del_init(&req->collision);
- hlist_add_fake(&req->collision);
- }
- }
-
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void drbd_disconnect(struct drbd_conf *mdev)
+static void conn_disconnect(struct drbd_tconn *tconn)
{
- enum drbd_fencing_p fp;
- union drbd_state os, ns;
- int rv = SS_UNKNOWN_ERROR;
- unsigned int i;
+ struct drbd_conf *mdev;
+ enum drbd_conns oc;
+ int vnr;
- if (mdev->state.conn == C_STANDALONE)
+ if (tconn->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -3845,18 +4447,54 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&mdev->asender);
- drbd_free_sock(mdev);
+ drbd_thread_stop(&tconn->asender);
+ drbd_free_sock(tconn);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_disconnected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ if (!list_empty(&tconn->current_epoch->list))
+ conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+ atomic_set(&tconn->current_epoch->epoch_size, 0);
+ tconn->send.seen_any_write_yet = false;
+
+ conn_info(tconn, "Connection closed\n");
+
+ if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(tconn);
+
+ spin_lock_irq(&tconn->req_lock);
+ oc = tconn->cstate;
+ if (oc >= C_UNCONNECTED)
+ _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ spin_unlock_irq(&tconn->req_lock);
+
+ if (oc == C_DISCONNECTING)
+ conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+}
+
+static int drbd_disconnected(struct drbd_conf *mdev)
+{
+ unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -3874,7 +4512,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- /* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -3883,50 +4520,25 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* to be "canceled" */
drbd_flush_workqueue(mdev);
- /* This also does reclaim_net_ee(). If we do this too early, we might
- * miss some resync ee and pages.*/
- drbd_process_done_ee(mdev);
+ drbd_finish_peer_reqs(mdev);
+
+ /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
+ might have issued a work again. The one before drbd_finish_peer_reqs() is
+ necessary to reclain net_ee in drbd_finish_peer_reqs(). */
+ drbd_flush_workqueue(mdev);
+
+ /* need to do it again, drbd_finish_peer_reqs() may have populated it
+ * again via drbd_try_clear_on_disk_bm(). */
+ drbd_rs_cancel_all(mdev);
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
- if (!is_susp(mdev->state))
- tl_clear(mdev);
-
- dev_info(DEV, "Connection closed\n");
+ if (!drbd_suspended(mdev))
+ tl_clear(mdev->tconn);
drbd_md_sync(mdev);
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
- drbd_try_outdate_peer_async(mdev);
-
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- if (os.conn >= C_UNCONNECTED) {
- /* Do not restart in case we are C_DISCONNECTING */
- ns = os;
- ns.conn = C_UNCONNECTED;
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (os.conn == C_DISCONNECTING) {
- wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
-
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
-
- kfree(mdev->net_conf);
- mdev->net_conf = NULL;
- drbd_request_state(mdev, NS(conn, C_STANDALONE));
- }
-
/* serialize with bitmap writeout triggered by the state change,
* if any. */
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
@@ -3938,7 +4550,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_release_ee(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (i)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use_by_net);
@@ -3953,9 +4565,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
+ return 0;
}
/*
@@ -3967,29 +4577,19 @@ static void drbd_disconnect(struct drbd_conf *mdev)
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_handshake(struct drbd_conf *mdev)
+static int drbd_send_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.sbuf.handshake;
- int ok;
-
- if (mutex_lock_interruptible(&mdev->data.mutex)) {
- dev_err(DEV, "interrupted during initial handshake\n");
- return 0; /* interrupted. not ok. */
- }
-
- if (mdev->data.socket == NULL) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
+ struct drbd_socket *sock;
+ struct p_connection_features *p;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
- (struct p_header80 *)p, sizeof(*p), 0 );
- mutex_unlock(&mdev->data.mutex);
- return ok;
+ return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -3999,42 +4599,38 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_handshake(struct drbd_conf *mdev)
+static int drbd_do_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.rbuf.handshake;
- const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
- unsigned int length;
- enum drbd_packets cmd;
- int rv;
+ /* ASSERT current == tconn->receiver ... */
+ struct p_connection_features *p;
+ const int expect = sizeof(struct p_connection_features);
+ struct packet_info pi;
+ int err;
- rv = drbd_send_handshake(mdev);
- if (!rv)
+ err = drbd_send_features(tconn);
+ if (err)
return 0;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err)
return 0;
- if (cmd != P_HAND_SHAKE) {
- dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_CONNECTION_FEATURES) {
+ conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
return -1;
}
- if (length != expect) {
- dev_err(DEV, "expected HandShake length: %u, received: %u\n",
- expect, length);
+ if (pi.size != expect) {
+ conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ expect, pi.size);
return -1;
}
- rv = drbd_recv(mdev, &p->head.payload, expect);
-
- if (rv != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
+ p = pi.data;
+ err = drbd_recv_all_warn(tconn, p, expect);
+ if (err)
return 0;
- }
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
@@ -4045,15 +4641,15 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- dev_info(DEV, "Handshake successful: "
- "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+ conn_info(tconn, "Handshake successful: "
+ "Agreed network protocol version %d\n", tconn->agreed_pro_version);
return 1;
incompat:
- dev_err(DEV, "incompatible DRBD dialects: "
+ conn_err(tconn, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4061,7 +4657,7 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
@@ -4076,121 +4672,139 @@ static int drbd_do_auth(struct drbd_conf *mdev)
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
+ struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
struct hash_desc desc;
- enum drbd_packets cmd;
- unsigned int length;
- int rv;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
+
+ /* FIXME: Put the challenge/response into the preallocated socket buffer. */
- desc.tfm = mdev->cram_hmac_tfm;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ key_len = strlen(nc->shared_secret);
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
+ desc.tfm = tconn->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
- (u8 *)mdev->net_conf->shared_secret, key_len);
+ rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
+ sock = &tconn->data;
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
+ goto fail;
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
+ rv = 0;
goto fail;
+ }
- if (cmd != P_AUTH_CHALLENGE) {
- dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_AUTH_CHALLENGE) {
+ conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- if (length > CHALLENGE_LEN * 2) {
- dev_err(DEV, "expected AuthChallenge payload too big.\n");
+ if (pi.size > CHALLENGE_LEN * 2) {
+ conn_err(tconn, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
- peers_ch = kmalloc(length, GFP_NOIO);
+ peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- dev_err(DEV, "kmalloc of peers_ch failed\n");
+ conn_err(tconn, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- rv = drbd_recv(mdev, peers_ch, length);
-
- if (rv != length) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- dev_err(DEV, "kmalloc of response failed\n");
+ conn_err(tconn, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, length);
+ sg_set_buf(&sg, peers_ch, pi.size);
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
- if (!rv)
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
goto fail;
-
- rv = drbd_recv_header(mdev, &cmd, &length);
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ response, resp_size);
if (!rv)
goto fail;
- if (cmd != P_AUTH_RESPONSE) {
- dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
rv = 0;
goto fail;
}
- if (length != resp_size) {
- dev_err(DEV, "expected AuthResponse payload of wrong size\n");
+ if (pi.cmd != P_AUTH_RESPONSE) {
+ conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- rv = drbd_recv(mdev, response , resp_size);
+ if (pi.size != resp_size) {
+ conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ rv = 0;
+ goto fail;
+ }
- if (rv != resp_size) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, response , resp_size);
+ if (err) {
rv = 0;
goto fail;
}
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- dev_err(DEV, "kmalloc of right_response failed\n");
+ conn_err(tconn, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4199,7 +4813,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4207,8 +4821,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = !memcmp(response, right_response, resp_size);
if (rv)
- dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
- resp_size, mdev->net_conf->cram_hmac_alg);
+ conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ resp_size);
else
rv = -1;
@@ -4223,82 +4837,106 @@ static int drbd_do_auth(struct drbd_conf *mdev)
int drbdd_init(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- unsigned int minor = mdev_to_minor(mdev);
+ struct drbd_tconn *tconn = thi->tconn;
int h;
- sprintf(current->comm, "drbd%d_receiver", minor);
-
- dev_info(DEV, "receiver (re)started\n");
+ conn_info(tconn, "receiver (re)started\n");
do {
- h = drbd_connect(mdev);
+ h = conn_connect(tconn);
if (h == 0) {
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- dev_warn(DEV, "Discarding network configuration.\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_warn(tconn, "Discarding network configuration.\n");
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
- if (h > 0) {
- if (get_net_conf(mdev)) {
- drbdd(mdev);
- put_net_conf(mdev);
- }
- }
+ if (h > 0)
+ drbdd(tconn);
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
- dev_info(DEV, "receiver terminated\n");
+ conn_info(tconn, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state_reply *p = (struct p_req_state_reply *)h;
+ struct p_req_state_reply *p = pi->data;
+ int retcode = be32_to_cpu(p->retcode);
+
+ if (retcode >= SS_SUCCESS) {
+ set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
+ } else {
+ set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
+ conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ drbd_set_st_err_str(retcode), retcode);
+ }
+ wake_up(&tconn->ping_wait);
+
+ return 0;
+}
+static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
+ D_ASSERT(tconn->agreed_pro_version < 100);
+ return got_conn_RqSReply(tconn, pi);
+ }
+
if (retcode >= SS_SUCCESS) {
set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
} else {
set_bit(CL_ST_CHG_FAIL, &mdev->flags);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
- drbd_set_st_err_str(retcode), retcode);
+ drbd_set_st_err_str(retcode), retcode);
}
wake_up(&mdev->state_wait);
- return true;
+ return 0;
}
-static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
{
- return drbd_send_ping_ack(mdev);
+ return drbd_send_ping_ack(tconn);
}
-static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
- wake_up(&mdev->misc_wait);
+ tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
+ wake_up(&tconn->ping_wait);
- return true;
+ return 0;
}
-static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4312,162 +4950,139 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
dec_rs_pending(mdev);
atomic_add(blksize >> 9, &mdev->rs_sect_in);
- return true;
-}
-
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = tl_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- if (req->sector != sector) {
- dev_err(DEV, "_ack_id_to_req: found req %p but it has "
- "wrong sector (%llus versus %llus)\n", req,
- (unsigned long long)req->sector,
- (unsigned long long)sector);
- break;
- }
- return req;
- }
- }
- return NULL;
+ return 0;
}
-typedef struct drbd_request *(req_validator_fn)
- (struct drbd_conf *mdev, u64 id, sector_t sector);
-
-static int validate_req_change_req_state(struct drbd_conf *mdev,
- u64 id, sector_t sector, req_validator_fn validator,
- const char *func, enum drbd_req_event what)
+static int
+validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+ struct rb_root *root, const char *func,
+ enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
- req = validator(mdev, id, sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
-
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
- (void *)(unsigned long)id, (unsigned long long)sector);
- return false;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
drbd_set_in_sync(mdev, sector, blksize);
dec_rs_pending(mdev);
- return true;
+ return 0;
}
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_RS_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer_and_sis;
+ what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer;
+ what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
- what = recv_acked_by_peer;
+ what = RECV_ACKED_BY_PEER;
break;
- case P_DISCARD_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = conflict_discarded_by_peer;
+ case P_SUPERSEDED:
+ what = CONFLICT_RESOLVED;
+ break;
+ case P_RETRY_WRITE:
+ what = POSTPONE_WRITE;
break;
default:
- D_ASSERT(0);
- return false;
+ BUG();
}
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ack_id_to_req, __func__ , what);
+ &mdev->write_requests, __func__,
+ what, false);
}
-static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
- struct drbd_request *req;
- struct bio_and_error m;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
dec_rs_pending(mdev);
drbd_rs_failed_io(mdev, sector, size);
- return true;
+ return 0;
}
- spin_lock_irq(&mdev->req_lock);
- req = _ack_id_to_req(mdev, p->block_id, sector);
- if (!req) {
- spin_unlock_irq(&mdev->req_lock);
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
- mdev->net_conf->wire_protocol == DRBD_PROT_B) {
- /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
- The master bio might already be completed, therefore the
- request is no longer in the collision hash.
- => Do not try to validate block_id as request. */
- /* In Protocol B we might already have got a P_RECV_ACK
- but then get a P_NEG_ACK after wards. */
- drbd_set_out_of_sync(mdev, sector, size);
- return true;
- } else {
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
- (void *)(unsigned long)p->block_id, (unsigned long long)sector);
- return false;
- }
+ err = validate_req_change_req_state(mdev, p->block_id, sector,
+ &mdev->write_requests, __func__,
+ NEG_ACKED, true);
+ if (err) {
+ /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+ The master bio might already be completed, therefore the
+ request is no longer in the collision hash. */
+ /* In Protocol B we might already have got a P_RECV_ACK
+ but then get a P_NEG_ACK afterwards. */
+ drbd_set_out_of_sync(mdev, sector, size);
}
- __req_mod(req, neg_acked, &m);
- spin_unlock_irq(&mdev->req_lock);
-
- if (m.bio)
- complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
+
+ dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ar_id_to_req, __func__ , neg_acked);
+ &mdev->read_requests, __func__,
+ NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
int size;
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct p_block_ack *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4478,57 +5093,66 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_NEG_RS_DREPLY:
drbd_rs_failed_io(mdev, sector, size);
case P_RS_CANCEL:
break;
default:
- D_ASSERT(0);
- put_ldev(mdev);
- return false;
+ BUG();
}
put_ldev(mdev);
}
- return true;
+ return 0;
}
-static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_barrier_ack *p = (struct p_barrier_ack *)h;
-
- tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
-
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ struct p_barrier_ack *p = pi->data;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_AHEAD &&
+ atomic_read(&mdev->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ mdev->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&mdev->start_resync_timer);
+ }
}
+ rcu_read_unlock();
- return true;
+ return 0;
}
-static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
struct drbd_work *w;
sector_t sector;
int size;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
if (!get_ldev(mdev))
- return true;
+ return 0;
drbd_rs_complete_io(mdev, sector);
dec_rs_pending(mdev);
@@ -4543,114 +5167,137 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
- drbd_queue_work_front(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
}
put_ldev(mdev);
- return true;
+ return 0;
+}
+
+static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ return 0;
}
-static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
+static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
{
- return true;
+ struct drbd_conf *mdev;
+ int vnr, not_empty = 0;
+
+ do {
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ flush_signals(current);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ if (drbd_finish_peer_reqs(mdev)) {
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return 1;
+ }
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ set_bit(SIGNAL_ASENDER, &tconn->flags);
+
+ spin_lock_irq(&tconn->req_lock);
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ not_empty = !list_empty(&mdev->done_ee);
+ if (not_empty)
+ break;
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ rcu_read_unlock();
+ } while (not_empty);
+
+ return 0;
}
struct asender_cmd {
size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
+ int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
};
-static struct asender_cmd *get_asender_cmd(int cmd)
-{
- static struct asender_cmd asender_tbl[] = {
- /* anything missing from this table is in
- * the drbd_cmd_handler (drbd_default_handler) table,
- * see the beginning of drbdd() */
- [P_PING] = { sizeof(struct p_header80), got_Ping },
- [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
+static struct asender_cmd asender_tbl[] = {
+ [P_PING] = { 0, got_Ping },
+ [P_PING_ACK] = { 0, got_PingAck },
[P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
- [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
[P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
[P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
- [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
+ [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
- [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
- [P_MAX_CMD] = { 0, NULL },
- };
- if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
- return NULL;
- return &asender_tbl[cmd];
-}
+ [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
+ [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
+ [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
+};
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+ struct drbd_tconn *tconn = thi->tconn;
struct asender_cmd *cmd = NULL;
-
- int rv, len;
- void *buf = h;
+ struct packet_info pi;
+ int rv;
+ void *buf = tconn->meta.rbuf;
int received = 0;
- int expect = sizeof(struct p_header80);
- int empty;
- int ping_timeout_active = 0;
-
- sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
+ unsigned int header_size = drbd_header_size(tconn);
+ int expect = header_size;
+ bool ping_timeout_active = false;
+ struct net_conf *nc;
+ int ping_timeo, tcp_cork, ping_int;
current->policy = SCHED_RR; /* Make this a realtime task! */
current->rt_priority = 2; /* more important than all other tasks */
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
- ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
- mdev->meta.socket->sk->sk_rcvtimeo =
- mdev->net_conf->ping_timeo*HZ/10;
- ping_timeout_active = 1;
- }
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
- /* conditionally cork;
- * it may hurt latency if we cork without much to send */
- if (!mdev->net_conf->no_cork &&
- 3 < atomic_read(&mdev->unacked_cnt))
- drbd_tcp_cork(mdev->meta.socket);
- while (1) {
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
- flush_signals(current);
- if (!drbd_process_done_ee(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ ping_timeo = nc->ping_timeo;
+ tcp_cork = nc->tcp_cork;
+ ping_int = nc->ping_int;
+ rcu_read_unlock();
+
+ if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
+ if (drbd_send_ping(tconn)) {
+ conn_err(tconn, "drbd_send_ping has failed\n");
goto reconnect;
- /* to avoid race with newly queued ACKs */
- set_bit(SIGNAL_ASENDER, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
- /* new ack may have been queued right here,
- * but then there is also a signal pending,
- * and we start over... */
- if (empty)
- break;
+ }
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ ping_timeout_active = true;
+ }
+
+ /* TODO: conditionally cork; it may hurt latency if we cork without
+ much to send */
+ if (tcp_cork)
+ drbd_tcp_cork(tconn->meta.socket);
+ if (tconn_finish_peer_reqs(tconn)) {
+ conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ goto reconnect;
}
/* but unconditionally uncork unless disabled */
- if (!mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->meta.socket);
+ if (tcp_cork)
+ drbd_tcp_uncork(tconn->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(mdev, mdev->meta.socket,
- buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
flush_signals(current);
@@ -4668,80 +5315,91 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
- dev_err(DEV, "meta connection shut down by peer.\n");
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
+
+ t = wait_event_timeout(tconn->ping_wait,
+ tconn->cstate < C_WF_REPORT_PARAMS,
+ t);
+ if (t)
+ break;
+ }
+ conn_err(tconn, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(mdev->last_received,
- jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(tconn->last_received,
+ jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- dev_err(DEV, "PingAck did not arrive in time.\n");
+ conn_err(tconn, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &mdev->flags);
+ set_bit(SEND_PING, &tconn->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (unlikely(h->magic != BE_DRBD_MAGIC)) {
- dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ if (decode_header(tconn, tconn->meta.rbuf, &pi))
goto reconnect;
- }
- cmd = get_asender_cmd(be16_to_cpu(h->command));
- len = be16_to_cpu(h->length);
- if (unlikely(cmd == NULL)) {
- dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ cmd = &asender_tbl[pi.cmd];
+ if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
+ conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
- expect = cmd->pkt_size;
- ERR_IF(len != expect-sizeof(struct p_header80))
+ expect = header_size + cmd->pkt_size;
+ if (pi.size != expect - header_size) {
+ conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ pi.cmd, pi.size);
goto reconnect;
+ }
}
if (received == expect) {
- mdev->last_received = jiffies;
- D_ASSERT(cmd != NULL);
- if (!cmd->process(mdev, h))
+ bool err;
+
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "%pf failed\n", cmd->fn);
goto reconnect;
+ }
+
+ tconn->last_received = jiffies;
- /* the idle_timeout (ping-int)
- * has been restored in got_PingAck() */
- if (cmd == get_asender_cmd(P_PING_ACK))
- ping_timeout_active = 0;
+ if (cmd == &asender_tbl[P_PING_ACK]) {
+ /* restore idle timeout */
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ ping_timeout_active = false;
+ }
- buf = h;
+ buf = tconn->meta.rbuf;
received = 0;
- expect = sizeof(struct p_header80);
+ expect = header_size;
cmd = NULL;
}
}
if (0) {
reconnect:
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(tconn);
}
if (0) {
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
- D_ASSERT(mdev->state.conn < C_CONNECTED);
- dev_info(DEV, "asender terminated\n");
+ conn_info(tconn, "asender terminated\n");
return 0;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 01b2ac641c7b..2b8303ad63c9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -31,6 +31,8 @@
#include "drbd_req.h"
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
+
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
@@ -40,6 +42,8 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ (void) cpu; /* The macro invocations above want the cpu argument, I do not like
+ the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
part_stat_unlock();
}
@@ -57,9 +61,51 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
part_stat_unlock();
}
-static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
+static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+ struct bio *bio_src)
+{
+ struct drbd_request *req;
+
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+ if (!req)
+ return NULL;
+
+ drbd_req_make_private_bio(req, bio_src);
+ req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
+ req->w.mdev = mdev;
+ req->master_bio = bio_src;
+ req->epoch = 0;
+
+ drbd_clear_interval(&req->i);
+ req->i.sector = bio_src->bi_sector;
+ req->i.size = bio_src->bi_size;
+ req->i.local = true;
+ req->i.waiting = false;
+
+ INIT_LIST_HEAD(&req->tl_requests);
+ INIT_LIST_HEAD(&req->w.list);
+
+ /* one reference to be put by __drbd_make_request */
+ atomic_set(&req->completion_ref, 1);
+ /* one kref as long as completion_ref > 0 */
+ kref_init(&req->kref);
+ return req;
+}
+
+void drbd_req_destroy(struct kref *kref)
{
- const unsigned long s = req->rq_state;
+ struct drbd_request *req = container_of(kref, struct drbd_request, kref);
+ struct drbd_conf *mdev = req->w.mdev;
+ const unsigned s = req->rq_state;
+
+ if ((req->master_bio && !(s & RQ_POSTPONED)) ||
+ atomic_read(&req->completion_ref) ||
+ (s & RQ_LOCAL_PENDING) ||
+ ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
+ dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
+ s, atomic_read(&req->completion_ref));
+ return;
+ }
/* remove it from the transfer log.
* well, only if it had been there in the first
@@ -67,24 +113,33 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* and never sent), it should still be "empty" as
* initialized in drbd_req_new(), so we can list_del() it
* here unconditionally */
- list_del(&req->tl_requests);
+ list_del_init(&req->tl_requests);
/* if it was a write, we may have to set the corresponding
* bit(s) out-of-sync first. If it had a local part, we need to
* release the reference to the activity log. */
- if (rw == WRITE) {
+ if (s & RQ_WRITE) {
/* Set out-of-sync unless both OK flags are set
* (local only or remote failed).
* Other places where we set out-of-sync:
* READ with local io-error */
- if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->sector, req->size);
- if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->sector, req->size);
+ /* There is a special case:
+ * we may notice late that IO was suspended,
+ * and postpone, or schedule for retry, a write,
+ * before it even was submitted or sent.
+ * In that case we do not want to touch the bitmap at all.
+ */
+ if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
+ if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+
+ if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
+ drbd_set_in_sync(mdev, req->i.sector, req->i.size);
+ }
/* one might be tempted to move the drbd_al_complete_io
- * to the local io completion callback drbd_endio_pri.
+ * to the local io completion callback drbd_request_endio.
* but, if this was a mirror write, we may only
* drbd_al_complete_io after this is RQ_NET_DONE,
* otherwise the extent could be dropped from the al
@@ -93,109 +148,35 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* but after the extent has been dropped from the al,
* we would forget to resync the corresponding extent.
*/
- if (s & RQ_LOCAL_MASK) {
+ if (s & RQ_IN_ACT_LOG) {
if (get_ldev_if_state(mdev, D_FAILED)) {
- if (s & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, req->sector);
+ drbd_al_complete_io(mdev, &req->i);
put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
- dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
- "but my Disk seems to have failed :(\n",
- (unsigned long long) req->sector);
+ dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+ "but my Disk seems to have failed :(\n",
+ (unsigned long long) req->i.sector, req->i.size);
}
}
}
- drbd_req_free(req);
+ mempool_free(req, drbd_request_mempool);
}
-static void queue_barrier(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* We are within the req_lock. Once we queued the barrier for sending,
- * we set the CREATE_BARRIER bit. It is cleared as soon as a new
- * barrier/epoch object is added. This is the only place this bit is
- * set. It indicates that the barrier for this epoch is already queued,
- * and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
- return;
-
- b = mdev->newest_tle;
- b->w.cb = w_send_barrier;
- /* inc_ap_pending done here, so we won't
- * get imbalanced on connection loss.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in tl_clear. */
- inc_ap_pending(mdev);
- drbd_queue_work(&mdev->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+static void wake_all_senders(struct drbd_tconn *tconn) {
+ wake_up(&tconn->sender_work.q_wait);
}
-static void _about_to_complete_local_write(struct drbd_conf *mdev,
- struct drbd_request *req)
+/* must hold resource->req_lock */
+void start_new_tl_epoch(struct drbd_tconn *tconn)
{
- const unsigned long s = req->rq_state;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
-
- /* Before we can signal completion to the upper layers,
- * we may need to close the current epoch.
- * We can skip this, if this request has not even been sent, because we
- * did not have a fully established connection yet/anymore, during
- * bitmap exchange, or while we are C_AHEAD due to congestion policy.
- */
- if (mdev->state.conn >= C_CONNECTED &&
- (s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->newest_tle->br_number)
- queue_barrier(mdev);
-
- /* we need to do the conflict detection stuff,
- * if we have the ee_hash (two_primaries) and
- * this has been on the network */
- if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
- const sector_t sector = req->sector;
- const int size = req->size;
-
- /* ASSERT:
- * there must be no conflicting requests, since
- * they must have been failed on the spot */
-#define OVERLAPS overlaps(sector, size, i->sector, i->size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
- "other: %p %llus +%u\n",
- req, (unsigned long long)sector, size,
- i, (unsigned long long)i->sector, i->size);
- }
- }
+ /* no point closing an epoch, if it is empty, anyways. */
+ if (tconn->current_tle_writes == 0)
+ return;
- /* maybe "wake" those conflicting epoch entries
- * that wait for this request to finish.
- *
- * currently, there can be only _one_ such ee
- * (well, or some more, which would be pending
- * P_DISCARD_ACK not yet sent by the asender...),
- * since we block the receiver thread upon the
- * first conflict detection, which will wait on
- * misc_wait. maybe we want to assert that?
- *
- * anyways, if we found one,
- * we just have to do a wake_up. */
-#undef OVERLAPS
-#define OVERLAPS overlaps(sector, size, e->sector, e->size)
- slot = ee_hash_slot(mdev, req->sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- wake_up(&mdev->misc_wait);
- break;
- }
- }
- }
-#undef OVERLAPS
+ tconn->current_tle_writes = 0;
+ atomic_inc(&tconn->current_tle_nr);
+ wake_all_senders(tconn);
}
void complete_master_bio(struct drbd_conf *mdev,
@@ -205,17 +186,33 @@ void complete_master_bio(struct drbd_conf *mdev,
dec_ap_bio(mdev);
}
+
+static void drbd_remove_request_interval(struct rb_root *root,
+ struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i = &req->i;
+
+ drbd_remove_interval(root, i);
+
+ /* Wake up any processes waiting for this request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
* if it has already been completed, or cannot be completed yet.
* If m->bio is set, the error status to be returned is placed in m->error.
*/
-void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
+static
+void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
- const unsigned long s = req->rq_state;
- struct drbd_conf *mdev = req->mdev;
- int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
+ const unsigned s = req->rq_state;
+ struct drbd_conf *mdev = req->w.mdev;
+ int rw;
+ int error, ok;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -226,165 +223,220 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
* the receiver,
* the bio_endio completion callbacks.
*/
- if (s & RQ_NET_QUEUED)
- return;
- if (s & RQ_NET_PENDING)
+ if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
+ (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
+ (s & RQ_COMPLETION_SUSP)) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
- if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
+ }
+
+ if (!req->master_bio) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
+ }
- if (req->master_bio) {
- /* this is data_received (remote read)
- * or protocol C P_WRITE_ACK
- * or protocol B P_RECV_ACK
- * or protocol A "handed_over_to_network" (SendAck)
- * or canceled or failed,
- * or killed from the transfer log due to connection loss.
- */
+ rw = bio_rw(req->master_bio);
- /*
- * figure out whether to report success or failure.
- *
- * report success when at least one of the operations succeeded.
- * or, to put the other way,
- * only report failure, when both operations failed.
- *
- * what to do about the failures is handled elsewhere.
- * what we need to do here is just: complete the master_bio.
- *
- * local completion error, if any, has been stored as ERR_PTR
- * in private_bio within drbd_endio_pri.
- */
- int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
- int error = PTR_ERR(req->private_bio);
+ /*
+ * figure out whether to report success or failure.
+ *
+ * report success when at least one of the operations succeeded.
+ * or, to put the other way,
+ * only report failure, when both operations failed.
+ *
+ * what to do about the failures is handled elsewhere.
+ * what we need to do here is just: complete the master_bio.
+ *
+ * local completion error, if any, has been stored as ERR_PTR
+ * in private_bio within drbd_request_endio.
+ */
+ ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
+ error = PTR_ERR(req->private_bio);
- /* remove the request from the conflict detection
- * respective block_id verification hash */
- if (!hlist_unhashed(&req->collision))
- hlist_del(&req->collision);
- else
- D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+ /* remove the request from the conflict detection
+ * respective block_id verification hash */
+ if (!drbd_interval_empty(&req->i)) {
+ struct rb_root *root;
- /* for writes we need to do some extra housekeeping */
if (rw == WRITE)
- _about_to_complete_local_write(mdev, req);
+ root = &mdev->write_requests;
+ else
+ root = &mdev->read_requests;
+ drbd_remove_request_interval(root, req);
+ } else if (!(s & RQ_POSTPONED))
+ D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
- /* Update disk stats */
- _drbd_end_io_acct(mdev, req);
+ /* Before we can signal completion to the upper layers,
+ * we may need to close the current transfer log epoch.
+ * We are within the request lock, so we can simply compare
+ * the request epoch number with the current transfer log
+ * epoch number. If they match, increase the current_tle_nr,
+ * and reset the transfer log epoch write_cnt.
+ */
+ if (rw == WRITE &&
+ req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
+ start_new_tl_epoch(mdev->tconn);
+
+ /* Update disk stats */
+ _drbd_end_io_acct(mdev, req);
+
+ /* If READ failed,
+ * have it be pushed back to the retry work queue,
+ * so it will re-enter __drbd_make_request(),
+ * and be re-assigned to a suitable local or remote path,
+ * or failed if we do not have access to good data anymore.
+ *
+ * Unless it was failed early by __drbd_make_request(),
+ * because no path was available, in which case
+ * it was not even added to the transfer_log.
+ *
+ * READA may fail, and will not be retried.
+ *
+ * WRITE should have used all available paths already.
+ */
+ if (!ok && rw == READ && !list_empty(&req->tl_requests))
+ req->rq_state |= RQ_POSTPONED;
+ if (!(req->rq_state & RQ_POSTPONED)) {
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL;
}
+}
- if (s & RQ_LOCAL_PENDING)
- return;
+static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
+
+ if (!atomic_sub_and_test(put, &req->completion_ref))
+ return 0;
- if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
- /* this is disconnected (local only) operation,
- * or protocol C P_WRITE_ACK,
- * or protocol A or B P_BARRIER_ACK,
- * or killed from the transfer log due to connection loss. */
- _req_is_done(mdev, req, rw);
+ drbd_req_complete(req, m);
+
+ if (req->rq_state & RQ_POSTPONED) {
+ /* don't destroy the req object just yet,
+ * but queue it for retry */
+ drbd_restart_request(req);
+ return 0;
}
- /* else: network part and not DONE yet. that is
- * protocol A or B, barrier ack still pending... */
+
+ return 1;
}
-static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
+/* I'd like this to be the only place that manipulates
+ * req->completion_ref and req->kref. */
+static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
+ int clear, int set)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
+ unsigned s = req->rq_state;
+ int c_put = 0;
+ int k_put = 0;
- if (!is_susp(mdev->state))
- _req_may_be_done(req, m);
-}
+ if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
+ set |= RQ_COMPLETION_SUSP;
-/*
- * checks whether there was an overlapping request
- * or ee already registered.
- *
- * if so, return 1, in which case this request is completed on the spot,
- * without ever being submitted or send.
- *
- * return 0 if it is ok to submit this request.
- *
- * NOTE:
- * paranoia: assume something above us is broken, and issues different write
- * requests for the same block simultaneously...
- *
- * To ensure these won't be reordered differently on both nodes, resulting in
- * diverging data sets, we discard the later one(s). Not that this is supposed
- * to happen, but this is the rationale why we also have to check for
- * conflicting requests with local origin, and why we have to do so regardless
- * of whether we allowed multiple primaries.
- *
- * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
- * second hlist_for_each_entry becomes a noop. This is even simpler than to
- * grab a reference on the net_conf, and check for the two_primaries flag...
- */
-static int _req_conflicts(struct drbd_request *req)
-{
- struct drbd_conf *mdev = req->mdev;
- const sector_t sector = req->sector;
- const int size = req->size;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
+ /* apply */
- D_ASSERT(hlist_unhashed(&req->collision));
+ req->rq_state &= ~clear;
+ req->rq_state |= set;
- if (!get_net_conf(mdev))
- return 0;
+ /* no change? */
+ if (req->rq_state == s)
+ return;
- /* BUG_ON */
- ERR_IF (mdev->tl_hash_s == 0)
- goto out_no_conflict;
- BUG_ON(mdev->tl_hash == NULL);
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent local write detected! "
- "[DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- goto out_conflict;
- }
+ /* intent: get references */
+
+ if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
+ inc_ap_pending(mdev);
+ atomic_inc(&req->completion_ref);
}
- if (mdev->ee_hash_s) {
- /* now, check for overlapping requests with remote origin */
- BUG_ON(mdev->ee_hash == NULL);
-#undef OVERLAPS
-#define OVERLAPS overlaps(e->sector, e->size, sector, size)
- slot = ee_hash_slot(mdev, sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
- " [DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)e->sector, e->size);
- goto out_conflict;
- }
- }
+ if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
+ kref_get(&req->kref); /* wait for the DONE */
+
+ if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
+ atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
+
+ if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
+ atomic_inc(&req->completion_ref);
+
+ /* progress: put references */
+
+ if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
+ ++c_put;
+
+ if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
+ D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
+ /* local completion may still come in later,
+ * we need to keep the req object around. */
+ kref_get(&req->kref);
+ ++c_put;
+ }
+
+ if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
+ if (req->rq_state & RQ_LOCAL_ABORTED)
+ ++k_put;
+ else
+ ++c_put;
}
-#undef OVERLAPS
-out_no_conflict:
- /* this is like it should be, and what we expected.
- * our users do behave after all... */
- put_net_conf(mdev);
- return 0;
+ if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
+ dec_ap_pending(mdev);
+ ++c_put;
+ }
-out_conflict:
- put_net_conf(mdev);
- return 1;
+ if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED))
+ ++c_put;
+
+ if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
+ if (req->rq_state & RQ_NET_SENT)
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ ++k_put;
+ }
+
+ /* potentially complete and destroy */
+
+ if (k_put || c_put) {
+ /* Completion does it's own kref_put. If we are going to
+ * kref_sub below, we need req to be still around then. */
+ int at_least = k_put + !!c_put;
+ int refcount = atomic_read(&req->kref.refcount);
+ if (refcount < at_least)
+ dev_err(DEV,
+ "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
+ s, req->rq_state, refcount, at_least);
+ }
+
+ /* If we made progress, retry conflicting peer requests, if any. */
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+
+ if (c_put)
+ k_put += drbd_req_put_completion_ref(req, m, c_put);
+ if (k_put)
+ kref_sub(&req->kref, k_put, drbd_req_destroy);
+}
+
+static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ char b[BDEVNAME_SIZE];
+
+ if (!__ratelimit(&drbd_ratelimit_state))
+ return;
+
+ dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
+ (unsigned long long)req->i.sector,
+ req->i.size >> 9,
+ bdevname(mdev->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@@ -402,9 +454,12 @@ out_conflict:
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
- struct drbd_conf *mdev = req->mdev;
- int rv = 0;
- m->bio = NULL;
+ struct drbd_conf *mdev = req->w.mdev;
+ struct net_conf *nc;
+ int p, rv = 0;
+
+ if (m)
+ m->bio = NULL;
switch (what) {
default:
@@ -413,116 +468,91 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* does not happen...
* initialization done in drbd_req_new
- case created:
+ case CREATED:
break;
*/
- case to_be_send: /* via network */
- /* reached via drbd_make_request_common
+ case TO_BE_SENT: /* via network */
+ /* reached via __drbd_make_request
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->wire_protocol;
+ rcu_read_unlock();
+ req->rq_state |=
+ p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
+ p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
+ mod_rq_state(req, m, 0, RQ_NET_PENDING);
break;
- case to_be_submitted: /* locally */
- /* reached via drbd_make_request_common */
+ case TO_BE_SUBMITTED: /* locally */
+ /* reached via __drbd_make_request */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
- req->rq_state |= RQ_LOCAL_PENDING;
+ mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
- case completed_ok:
+ case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
- mdev->writ_cnt += req->size>>9;
+ mdev->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->size>>9;
+ mdev->read_cnt += req->i.size >> 9;
- req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING,
+ RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
break;
- case abort_disk_io:
- req->rq_state |= RQ_LOCAL_ABORTED;
- if (req->rq_state & RQ_WRITE)
- _req_may_be_done_not_susp(req, m);
- else
- goto goto_queue_for_net_read;
+ case ABORT_DISK_IO:
+ mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
break;
- case write_completed_with_error:
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- _req_may_be_done_not_susp(req, m);
+ case WRITE_COMPLETED_WITH_ERROR:
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_ahead_completed_with_error:
- /* it is legal to fail READA */
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
- _req_may_be_done_not_susp(req, m);
+ case READ_COMPLETED_WITH_ERROR:
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ /* fall through. */
+ case READ_AHEAD_COMPLETED_WITH_ERROR:
+ /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_completed_with_error:
- drbd_set_out_of_sync(mdev, req->sector, req->size);
-
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- if (req->rq_state & RQ_LOCAL_ABORTED) {
- _req_may_be_done(req, m);
- break;
- }
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
-
- goto_queue_for_net_read:
-
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
-
- /* no point in retrying if there is no good remote data,
- * or we have no connection. */
- if (mdev->state.pdsk != D_UP_TO_DATE) {
- _req_may_be_done_not_susp(req, m);
- break;
- }
-
- /* _req_mod(req,to_be_send); oops, recursion... */
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
- /* fall through: _req_mod(req,queue_for_net_read); */
-
- case queue_for_net_read:
+ case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
- /* from drbd_make_request_common
+ /* from __drbd_make_request
* or from bio_endio during read io-error recovery */
- /* so we can verify the handle in the answer packet
- * corresponding hlist_del is in _req_may_be_done() */
- hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
+ /* So we can verify the handle in the answer packet.
+ * Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &mdev->flags);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
- ? w_read_retry_remote
- : w_send_read_req;
- drbd_queue_work(&mdev->data.work, &req->w);
+ D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_read_req;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case queue_for_net_write:
+ case QUEUE_FOR_NET_WRITE:
/* assert something? */
- /* from drbd_make_request_common only */
+ /* from __drbd_make_request only */
- hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
- /* corresponding hlist_del is in _req_may_be_done() */
+ /* Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@@ -533,7 +563,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
*
* _req_add_to_epoch(req); this has to be after the
* _maybe_start_new_epoch(req); which happened in
- * drbd_make_request_common, because we now may set the bit
+ * __drbd_make_request, because we now may set the bit
* again ourselves to close the current epoch.
*
* Add req to the (now) current epoch (barrier). */
@@ -543,202 +573,187 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
- /* see drbd_make_request_common,
- * just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
-
- req->epoch = mdev->newest_tle->br_number;
-
- /* increment size of current epoch */
- mdev->newest_tle->n_writes++;
-
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
- queue_barrier(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->max_epoch_size;
+ rcu_read_unlock();
+ if (mdev->tconn->current_tle_writes >= p)
+ start_new_tl_epoch(mdev->tconn);
break;
- case queue_for_send_oos:
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = w_send_oos;
- drbd_queue_work(&mdev->data.work, &req->w);
+ case QUEUE_FOR_SEND_OOS:
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_out_of_sync;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case read_retry_remote_canceled:
- case send_canceled:
- case send_failed:
+ case READ_RETRY_REMOTE_CANCELED:
+ case SEND_CANCELED:
+ case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
- req->rq_state &= ~RQ_NET_QUEUED;
- /* if we did it right, tl_clear should be scheduled only after
- * this, so this should not be necessary! */
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, 0);
break;
- case handed_over_to_network:
+ case HANDED_OVER_TO_NETWORK:
/* assert something? */
- if (bio_data_dir(req->master_bio) == WRITE)
- atomic_add(req->size>>9, &mdev->ap_in_flight);
-
if (bio_data_dir(req->master_bio) == WRITE &&
- mdev->net_conf->wire_protocol == DRBD_PROT_A) {
+ !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
/* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= RQ_NET_OK;
- } /* else: neg-ack was faster... */
+ if (req->rq_state & RQ_NET_PENDING)
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
+ /* else: neg-ack was faster... */
/* it is still not yet RQ_NET_DONE until the
* corresponding epoch barrier got acked as well,
* so we know what to dirty on connection loss */
}
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_SENT;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
break;
- case oos_handed_to_network:
+ case OOS_HANDED_TO_NETWORK:
/* Was not set PENDING, no longer QUEUED, so is now DONE
* as far as this connection is concerned. */
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
break;
- case connection_lost_while_pending:
+ case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING)
- dec_ap_pending(mdev);
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
- req->rq_state |= RQ_NET_DONE;
- if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
-
- /* if it is still queued, we may not complete it here.
- * it will be canceled soon. */
- if (!(req->rq_state & RQ_NET_QUEUED))
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m,
+ RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
+ RQ_NET_DONE);
break;
- case conflict_discarded_by_peer:
- /* for discarded conflicting writes of multiple primaries,
+ case CONFLICT_RESOLVED:
+ /* for superseded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
- * node crashes are covered by the activity log. */
- if (what == conflict_discarded_by_peer)
- dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
- " DRBD is not a random data generator!\n",
- (unsigned long long)req->sector, req->size);
- req->rq_state |= RQ_NET_DONE;
- /* fall through */
- case write_acked_by_peer_and_sis:
- case write_acked_by_peer:
- if (what == write_acked_by_peer_and_sis)
- req->rq_state |= RQ_NET_SIS;
+ * node crashes are covered by the activity log.
+ *
+ * If this request had been marked as RQ_POSTPONED before,
+ * it will actually not be completed, but "restarted",
+ * resubmitted from the retry worker context. */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
+ break;
+
+ case WRITE_ACKED_BY_PEER_AND_SIS:
+ req->rq_state |= RQ_NET_SIS;
+ case WRITE_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
/* protocol C; successfully written on peer.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
* for volatile write-back caches on lower level devices. */
- case recv_acked_by_peer:
+ goto ack_common;
+ case RECV_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
- * see also notes above in handed_over_to_network about
+ * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
- req->rq_state |= RQ_NET_OK;
+ ack_common:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- req->rq_state &= ~RQ_NET_PENDING;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
- case neg_acked:
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
+ case POSTPONE_WRITE:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ /* If this node has already detected the write conflict, the
+ * worker will be waiting on misc_wait. Wake it up once this
+ * request has completed locally.
+ */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ req->rq_state |= RQ_POSTPONED;
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+ /* Do not clear RQ_NET_PENDING. This request will make further
+ * progress via restart_conflicting_writes() or
+ * fail_postponed_requests(). Hopefully. */
+ break;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
- /* else: done by handed_over_to_network */
+ case NEG_ACKED:
+ mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
break;
- case fail_frozen_disk_io:
+ case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
-
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
- case restart_frozen_disk_io:
+ case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
- req->rq_state &= ~RQ_LOCAL_COMPLETED;
+ mod_rq_state(req, m,
+ RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
+ RQ_LOCAL_PENDING);
rv = MR_READ;
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
- get_ldev(mdev);
+ get_ldev(mdev); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case resend:
+ case RESEND:
/* Simply complete (local only) READs. */
if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
- _req_may_be_done(req, m);
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
}
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
- before the connection loss (B&C only); only P_BARRIER_ACK was missing.
- Trowing them out of the TL here by pretending we got a BARRIER_ACK
- We ensure that the peer was not rebooted */
+ before the connection loss (B&C only); only P_BARRIER_ACK
+ (or the local completion?) was missing when we suspended.
+ Throwing them out of the TL here by pretending we got a BARRIER_ACK.
+ During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
+ /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
+ * in that case we must not set RQ_NET_PENDING. */
+
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
- }
+ } /* else: FIXME can this happen? */
break;
}
- /* else, fall through to barrier_acked */
+ /* else, fall through to BARRIER_ACKED */
- case barrier_acked:
+ case BARRIER_ACKED:
+ /* barrier ack for READ requests does not make sense */
if (!(req->rq_state & RQ_WRITE))
break;
if (req->rq_state & RQ_NET_PENDING) {
- /* barrier came in before all requests have been acked.
+ /* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (barrier_acked but pending)\n");
- list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
+ dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
}
- if ((req->rq_state & RQ_NET_MASK) != 0) {
- req->rq_state |= RQ_NET_DONE;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ /* Allowed to complete requests, even while suspended.
+ * As this is called for all requests within a matching epoch,
+ * we need to filter, and only set RQ_NET_DONE for those that
+ * have actually been on the wire. */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP,
+ (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
break;
- case data_received:
+ case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
};
@@ -752,75 +767,265 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
-static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
if (mdev->state.disk == D_UP_TO_DATE)
- return 1;
- if (mdev->state.disk >= D_OUTDATED)
- return 0;
- if (mdev->state.disk < D_INCONSISTENT)
- return 0;
- /* state.disk == D_INCONSISTENT We will have a look at the BitMap */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ return true;
+ if (mdev->state.disk != D_INCONSISTENT)
+ return false;
esector = sector + (size >> 9) - 1;
-
+ nr_sectors = drbd_get_capacity(mdev->this_bdev);
D_ASSERT(sector < nr_sectors);
D_ASSERT(esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
+ return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
+}
+
+static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
+ enum drbd_read_balancing rbm)
+{
+ struct backing_dev_info *bdi;
+ int stripe_shift;
+
+ switch (rbm) {
+ case RB_CONGESTED_REMOTE:
+ bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ return bdi_read_congested(bdi);
+ case RB_LEAST_PENDING:
+ return atomic_read(&mdev->local_cnt) >
+ atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ case RB_32K_STRIPING: /* stripe_shift = 15 */
+ case RB_64K_STRIPING:
+ case RB_128K_STRIPING:
+ case RB_256K_STRIPING:
+ case RB_512K_STRIPING:
+ case RB_1M_STRIPING: /* stripe_shift = 20 */
+ stripe_shift = (rbm - RB_32K_STRIPING + 15);
+ return (sector >> (stripe_shift - 9)) & 1;
+ case RB_ROUND_ROBIN:
+ return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ case RB_PREFER_REMOTE:
+ return true;
+ case RB_PREFER_LOCAL:
+ default:
+ return false;
+ }
+}
+
+/*
+ * complete_conflicting_writes - wait for any conflicting write requests
+ *
+ * The write_requests tree contains all active write requests which we
+ * currently know about. Wait for any requests to complete which conflict with
+ * the new one.
+ *
+ * Only way out: remove the conflicting intervals from the tree.
+ */
+static void complete_conflicting_writes(struct drbd_request *req)
+{
+ DEFINE_WAIT(wait);
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i;
+ sector_t sector = req->i.sector;
+ int size = req->i.size;
+
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ return;
+
+ for (;;) {
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ break;
+ /* Indicate to wake up device->misc_wait on progress. */
+ i->waiting = true;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ schedule();
+ spin_lock_irq(&mdev->tconn->req_lock);
+ }
+ finish_wait(&mdev->misc_wait, &wait);
}
+/* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_conf *mdev)
{
- int congested = 0;
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct net_conf *nc;
+ bool congested = false;
+ enum drbd_on_congestion on_congestion;
+
+ nc = rcu_dereference(tconn->net_conf);
+ on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+ if (on_congestion == OC_BLOCK ||
+ tconn->agreed_pro_version < 96)
+ return;
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
* sure mdev->act_log is there.
- * Note: caller has to make sure that net_conf is there.
*/
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
return;
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ if (nc->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
+ congested = true;
}
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ if (mdev->act_log->used >= nc->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
+ congested = true;
}
if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
+ /* start a new epoch for non-mirrored writes */
+ start_new_tl_epoch(mdev->tconn);
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
}
put_ldev(mdev);
}
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+/* If this returns false, and req->private_bio is still set,
+ * this should be submitted locally.
+ *
+ * If it returns false, but req->private_bio is not set,
+ * we do not have access to good data :(
+ *
+ * Otherwise, this destroys req->private_bio, if any,
+ * and returns true.
+ */
+static bool do_remote_read(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ enum drbd_read_balancing rbm;
+
+ if (req->private_bio) {
+ if (!drbd_may_do_local_read(mdev,
+ req->i.sector, req->i.size)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ }
+
+ if (mdev->state.pdsk != D_UP_TO_DATE)
+ return false;
+
+ if (req->private_bio == NULL)
+ return true;
+
+ /* TODO: improve read balancing decisions, take into account drbd
+ * protocol, pending requests etc. */
+
+ rcu_read_lock();
+ rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rcu_read_unlock();
+
+ if (rbm == RB_PREFER_LOCAL && req->private_bio)
+ return false; /* submit locally */
+
+ if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/* returns number of connections (== 1, for drbd 8.4)
+ * expected to actually write this data,
+ * which does NOT include those that we are L_AHEAD for. */
+static int drbd_process_write_request(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ int remote, send_oos;
+
+ rcu_read_lock();
+ remote = drbd_should_do_remote(mdev->state);
+ if (remote) {
+ maybe_pull_ahead(mdev);
+ remote = drbd_should_do_remote(mdev->state);
+ }
+ send_oos = drbd_should_send_out_of_sync(mdev->state);
+ rcu_read_unlock();
+
+ /* Need to replicate writes. Unless it is an empty flush,
+ * which is better mapped to a DRBD P_BARRIER packet,
+ * also for drbd wire protocol compatibility reasons.
+ * If this was a flush, just start a new epoch.
+ * Unless the current epoch was empty anyways, or we are not currently
+ * replicating, in which case there is no point. */
+ if (unlikely(req->i.size == 0)) {
+ /* The only size==0 bios we expect are empty flushes. */
+ D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
+ if (remote)
+ start_new_tl_epoch(mdev->tconn);
+ return 0;
+ }
+
+ if (!remote && !send_oos)
+ return 0;
+
+ D_ASSERT(!(remote && send_oos));
+
+ if (remote) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_WRITE);
+ } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
+ _req_mod(req, QUEUE_FOR_SEND_OOS);
+
+ return remote;
+}
+
+static void
+drbd_submit_req_private_bio(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->private_bio;
+ const int rw = bio_rw(bio);
+
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+
+ /* State may have changed since we grabbed our reference on the
+ * ->ldev member. Double check, and short-circuit to endio.
+ * In case the last activity log transaction failed to get on
+ * stable storage, and this is a WRITE, we may not even submit
+ * this bio. */
+ if (get_ldev(mdev)) {
+ if (drbd_insert_fault(mdev,
+ rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
+ bio_endio(bio, -EIO);
+ else
+ generic_make_request(bio);
+ put_ldev(mdev);
+ } else
+ bio_endio(bio, -EIO);
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
- const int size = bio->bi_size;
- const sector_t sector = bio->bi_sector;
- struct drbd_tl_epoch *b = NULL;
+ struct bio_and_error m = { NULL, };
struct drbd_request *req;
- int local, remote, send_oos = 0;
- int err = -EIO;
- int ret = 0;
- union drbd_state s;
+ bool no_remote = false;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -830,55 +1035,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
- return 0;
+ return;
}
req->start_time = start_time;
- local = get_ldev(mdev);
- if (!local) {
- bio_put(req->private_bio); /* or we get a bio leak */
+ if (!get_ldev(mdev)) {
+ bio_put(req->private_bio);
req->private_bio = NULL;
}
- if (rw == WRITE) {
- /* Need to replicate writes. Unless it is an empty flush,
- * which is better mapped to a DRBD P_BARRIER packet,
- * also for drbd wire protocol compatibility reasons. */
- if (unlikely(size == 0)) {
- /* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(bio->bi_rw & REQ_FLUSH);
- remote = 0;
- } else
- remote = 1;
- } else {
- /* READ || READA */
- if (local) {
- if (!drbd_may_do_local_read(mdev, sector, size)) {
- /* we could kick the syncer to
- * sync this extent asap, wait for
- * it, then continue locally.
- * Or just issue the request remotely.
- */
- local = 0;
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
- }
- }
- remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
- }
-
- /* If we have a disk, but a READA request is mapped to remote,
- * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
- * Just fail that READA request right here.
- *
- * THINK: maybe fail all READA when not local?
- * or make this configurable...
- * if network is slow, READA won't do any good.
- */
- if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
- err = -EWOULDBLOCK;
- goto fail_and_free_req;
- }
/* For WRITES going to the local disk, grab a reference on the target
* extent. This waits for any resync activity in the corresponding
@@ -887,348 +1051,131 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* of transactional on-disk meta data updates.
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
- if (rw == WRITE && local && size
+ if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
req->rq_state |= RQ_IN_ACT_LOG;
- drbd_al_begin_io(mdev, sector);
- }
-
- s = mdev->state;
- remote = remote && drbd_should_do_remote(s);
- send_oos = rw == WRITE && drbd_should_send_oos(s);
- D_ASSERT(!(remote && send_oos));
-
- if (!(local || remote) && !is_susp(mdev->state)) {
- if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- goto fail_free_complete;
+ drbd_al_begin_io(mdev, &req->i);
}
- /* For WRITE request, we have to make sure that we have an
- * unused_spare_tle, in case we need to start a new epoch.
- * I try to be smart and avoid to pre-allocate always "just in case",
- * but there is a race between testing the bit and pointer outside the
- * spinlock, and grabbing the spinlock.
- * if we lost that race, we retry. */
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
-allocate_barrier:
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
- if (!b) {
- dev_err(DEV, "Failed to alloc barrier.\n");
- err = -ENOMEM;
- goto fail_free_complete;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (rw == WRITE) {
+ /* This may temporarily give up the req_lock,
+ * but will re-aquire it before it returns here.
+ * Needs to be before the check on drbd_suspended() */
+ complete_conflicting_writes(req);
}
- /* GOOD, everything prepared, grab the spin_lock */
- spin_lock_irq(&mdev->req_lock);
-
- if (is_susp(mdev->state)) {
- /* If we got suspended, use the retry mechanism of
- drbd_make_request() to restart processing of this
- bio. In the next call to drbd_make_request
- we sleep in inc_ap_bio() */
- ret = 1;
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
- }
+ /* no more giving up req_lock from now on! */
- if (remote || send_oos) {
- remote = drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
- D_ASSERT(!(remote && send_oos));
-
- if (!(remote || send_oos))
- dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
- if (!(local || remote)) {
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
+ if (drbd_suspended(mdev)) {
+ /* push back and retry: */
+ req->rq_state |= RQ_POSTPONED;
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
}
+ goto out;
}
- if (b && mdev->unused_spare_tle == NULL) {
- mdev->unused_spare_tle = b;
- b = NULL;
- }
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
- /* someone closed the current epoch
- * while we were grabbing the spinlock */
- spin_unlock_irq(&mdev->req_lock);
- goto allocate_barrier;
- }
-
-
/* Update disk stats */
_drbd_start_io_acct(mdev, req, bio);
- /* _maybe_start_new_epoch(mdev);
- * If we need to generate a write barrier packet, we have to add the
- * new epoch (barrier) object, and queue the barrier packet for sending,
- * and queue the req's data after it _within the same lock_, otherwise
- * we have race conditions were the reorder domains could be mixed up.
- *
- * Even read requests may start a new epoch and queue the corresponding
- * barrier packet. To get the write ordering right, we only have to
- * make sure that, if this is a write request and it triggered a
- * barrier packet, this request is queued within the same spinlock. */
- if ((remote || send_oos) && mdev->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- } else {
- D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ /* We fail READ/READA early, if we can not serve it.
+ * We must do this before req is registered on any lists.
+ * Otherwise, drbd_req_complete() will queue failed READ for retry. */
+ if (rw != WRITE) {
+ if (!do_remote_read(req) && !req->private_bio)
+ goto nodata;
}
- /* NOTE
- * Actually, 'local' may be wrong here already, since we may have failed
- * to write to the meta data, and may become wrong anytime because of
- * local io-error for some other request, which would lead to us
- * "detaching" the local disk.
- *
- * 'remote' may become wrong any time because the network could fail.
- *
- * This is a harmless race condition, though, since it is handled
- * correctly at the appropriate places; so it just defers the failure
- * of the respective operation.
- */
-
- /* mark them early for readability.
- * this just sets some state flags. */
- if (remote)
- _req_mod(req, to_be_send);
- if (local)
- _req_mod(req, to_be_submitted);
-
- /* check this request on the collision detection hash tables.
- * if we have a conflict, just complete it here.
- * THINK do we want to check reads, too? (I don't think so...) */
- if (rw == WRITE && _req_conflicts(req))
- goto fail_conflicting;
+ /* which transfer log epoch does this belong to? */
+ req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
- if (likely(size!=0))
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ if (likely(req->i.size!=0)) {
+ if (rw == WRITE)
+ mdev->tconn->current_tle_writes++;
- /* NOTE remote first: to get the concurrent write detection right,
- * we must register the request before start of local IO. */
- if (remote) {
- /* either WRITE and C_CONNECTED,
- * or READ, and no local disk,
- * or READ, but not in sync.
- */
- _req_mod(req, (rw == WRITE)
- ? queue_for_net_write
- : queue_for_net_read);
+ list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
}
- if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
- _req_mod(req, queue_for_send_oos);
- if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
- maybe_pull_ahead(mdev);
-
- /* If this was a flush, queue a drbd barrier/start a new epoch.
- * Unless the current epoch was empty anyways, or we are not currently
- * replicating, in which case there is no point. */
- if (unlikely(bio->bi_rw & REQ_FLUSH)
- && mdev->newest_tle->n_writes
- && drbd_should_do_remote(mdev->state))
- queue_barrier(mdev);
-
- spin_unlock_irq(&mdev->req_lock);
- kfree(b); /* if someone else has beaten us to it... */
-
- if (local) {
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
-
- /* State may have changed since we grabbed our reference on the
- * mdev->ldev member. Double check, and short-circuit to endio.
- * In case the last activity log transaction failed to get on
- * stable storage, and this is a WRITE, we may not even submit
- * this bio. */
- if (get_ldev(mdev)) {
- if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
- bio_endio(req->private_bio, -EIO);
- else
- generic_make_request(req->private_bio);
- put_ldev(mdev);
+ if (rw == WRITE) {
+ if (!drbd_process_write_request(req))
+ no_remote = true;
+ } else {
+ /* We either have a private_bio, or we can read from remote.
+ * Otherwise we had done the goto nodata above. */
+ if (req->private_bio == NULL) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_READ);
} else
- bio_endio(req->private_bio, -EIO);
+ no_remote = true;
}
- return 0;
-
-fail_conflicting:
- /* this is a conflicting request.
- * even though it may have been only _partially_
- * overlapping with one of the currently pending requests,
- * without even submitting or sending it, we will
- * pretend that it was successfully served right now.
- */
- _drbd_end_io_acct(mdev, req);
- spin_unlock_irq(&mdev->req_lock);
- if (remote)
- dec_ap_pending(mdev);
- /* THINK: do we want to fail it (-EIO), or pretend success?
- * this pretends success. */
- err = 0;
-
-fail_free_complete:
- if (req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, sector);
-fail_and_free_req:
- if (local) {
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
+ if (req->private_bio) {
+ /* needs to be marked within the same spinlock */
+ _req_mod(req, TO_BE_SUBMITTED);
+ /* but we need to give up the spinlock to submit */
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ drbd_submit_req_private_bio(req);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ } else if (no_remote) {
+nodata:
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ (unsigned long long)req->i.sector, req->i.size >> 9);
+ /* A write may have been queued for send_oos, however.
+ * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
}
- if (!ret)
- bio_endio(bio, err);
-
- drbd_req_free(req);
- dec_ap_bio(mdev);
- kfree(b);
-
- return ret;
-}
-/* helper function for drbd_make_request
- * if we can determine just by the mdev (state) that this request will fail,
- * return 1
- * otherwise return 0
- */
-static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
-{
- if (mdev->state.role != R_PRIMARY &&
- (!allow_oos || is_write)) {
- if (__ratelimit(&drbd_ratelimit_state)) {
- dev_err(DEV, "Process %s[%u] tried to %s; "
- "since we are not in Primary state, "
- "we cannot allow this\n",
- current->comm, current->pid,
- is_write ? "WRITE" : "READ");
- }
- return 1;
- }
+out:
+ if (drbd_req_put_completion_ref(req, &m, 1))
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- return 0;
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ return;
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
- unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
unsigned long start_time;
- if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
- bio_endio(bio, -EPERM);
- return;
- }
-
start_time = jiffies;
/*
* what we "blindly" assume:
*/
- D_ASSERT((bio->bi_size & 0x1ff) == 0);
-
- /* to make some things easier, force alignment of requests within the
- * granularity of our hash tables */
- s_enr = bio->bi_sector >> HT_SHIFT;
- e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
-
- if (likely(s_enr == e_enr)) {
- do {
- inc_ap_bio(mdev, 1);
- } while (drbd_make_request_common(mdev, bio, start_time));
- return;
- }
-
- /* can this bio be split generically?
- * Maybe add our own split-arbitrary-bios function. */
- if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
- /* rather error out here than BUG in bio_split */
- dev_err(DEV, "bio would need to, but cannot, be split: "
- "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
- bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- (unsigned long long)bio->bi_sector);
- bio_endio(bio, -EINVAL);
- } else {
- /* This bio crosses some boundary, so we have to split it. */
- struct bio_pair *bp;
- /* works for the "do not cross hash slot boundaries" case
- * e.g. sector 262269, size 4096
- * s_enr = 262269 >> 6 = 4097
- * e_enr = (262269+8-1) >> 6 = 4098
- * HT_SHIFT = 6
- * sps = 64, mask = 63
- * first_sectors = 64 - (262269 & 63) = 3
- */
- const sector_t sect = bio->bi_sector;
- const int sps = 1 << HT_SHIFT; /* sectors per slot */
- const int mask = sps - 1;
- const sector_t first_sectors = sps - (sect & mask);
- bp = bio_split(bio, first_sectors);
+ D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
- /* we need to get a "reference count" (ap_bio_cnt)
- * to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get three references
- * atomically, otherwise we might deadlock when trying to submit the
- * second one! */
- inc_ap_bio(mdev, 3);
-
- D_ASSERT(e_enr == s_enr + 1);
-
- while (drbd_make_request_common(mdev, &bp->bio1, start_time))
- inc_ap_bio(mdev, 1);
-
- while (drbd_make_request_common(mdev, &bp->bio2, start_time))
- inc_ap_bio(mdev, 1);
-
- dec_ap_bio(mdev);
-
- bio_pair_release(bp);
- }
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
}
-/* This is called by bio_add_page(). With this function we reduce
- * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
- * units (was AL_EXTENTs).
+/* This is called by bio_add_page().
+ *
+ * q->max_hw_sectors and other global limits are already enforced there.
*
- * we do the calculation within the lower 32bit of the byte offsets,
- * since we don't care for actual offset, but only check whether it
- * would cross "activity log extent" boundaries.
+ * We need to call down to our lower level device,
+ * in case it has special restrictions.
+ *
+ * We also may need to enforce configured max-bio-bvecs limits.
*
* As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset. so the resulting bio may still
- * cross extent boundaries. those are dealt with (bio_split) in
- * drbd_make_request.
+ * regardless of size and offset, so no need to ask lower levels.
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
- unsigned int bio_offset =
- (unsigned int)bvm->bi_sector << 9; /* 32 bit */
unsigned int bio_size = bvm->bi_size;
- int limit, backing_limit;
-
- limit = DRBD_MAX_BIO_SIZE
- - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
- if (limit < 0)
- limit = 0;
- if (bio_size == 0) {
- if (limit <= bvec->bv_len)
- limit = bvec->bv_len;
- } else if (limit && get_ldev(mdev)) {
+ int limit = DRBD_MAX_BIO_SIZE;
+ int backing_limit;
+
+ if (bio_size && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
@@ -1240,24 +1187,38 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
return limit;
}
+struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
+{
+ /* Walk the transfer log,
+ * and find the oldest not yet completed request */
+ struct drbd_request *r;
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ if (atomic_read(&r->completion_ref))
+ return r;
+ }
+ return NULL;
+}
+
void request_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_tconn *tconn = mdev->tconn;
struct drbd_request *req; /* oldest request */
- struct list_head *le;
+ struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
- if (get_net_conf(mdev)) {
- if (mdev->state.conn >= C_WF_REPORT_PARAMS)
- ent = mdev->net_conf->timeout*HZ/10
- * mdev->net_conf->ko_count;
- put_net_conf(mdev);
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = nc->timeout * HZ/10 * nc->ko_count;
+
if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
- dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(mdev);
}
+ rcu_read_unlock();
+
et = min_not_zero(dt, ent);
if (!et)
@@ -1265,17 +1226,14 @@ void request_timer_fn(unsigned long data)
now = jiffies;
- spin_lock_irq(&mdev->req_lock);
- le = &mdev->oldest_tle->requests;
- if (list_empty(le)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&tconn->req_lock);
+ req = find_oldest_request(tconn);
+ if (!req) {
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, now + et);
return;
}
- le = le->prev;
- req = list_entry(le, struct drbd_request, tl_requests);
-
/* The request is considered timed out, if
* - we have some effective timeout from the configuration,
* with above state restrictions applied,
@@ -1294,17 +1252,17 @@ void request_timer_fn(unsigned long data)
*/
if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) &&
- !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 3d2111919486..c08d22964d06 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -77,40 +77,41 @@
*/
enum drbd_req_event {
- created,
- to_be_send,
- to_be_submitted,
+ CREATED,
+ TO_BE_SENT,
+ TO_BE_SUBMITTED,
/* XXX yes, now I am inconsistent...
* these are not "events" but "actions"
* oh, well... */
- queue_for_net_write,
- queue_for_net_read,
- queue_for_send_oos,
-
- send_canceled,
- send_failed,
- handed_over_to_network,
- oos_handed_to_network,
- connection_lost_while_pending,
- read_retry_remote_canceled,
- recv_acked_by_peer,
- write_acked_by_peer,
- write_acked_by_peer_and_sis, /* and set_in_sync */
- conflict_discarded_by_peer,
- neg_acked,
- barrier_acked, /* in protocol A and B */
- data_received, /* (remote read) */
-
- read_completed_with_error,
- read_ahead_completed_with_error,
- write_completed_with_error,
- abort_disk_io,
- completed_ok,
- resend,
- fail_frozen_disk_io,
- restart_frozen_disk_io,
- nothing, /* for tracing only */
+ QUEUE_FOR_NET_WRITE,
+ QUEUE_FOR_NET_READ,
+ QUEUE_FOR_SEND_OOS,
+
+ SEND_CANCELED,
+ SEND_FAILED,
+ HANDED_OVER_TO_NETWORK,
+ OOS_HANDED_TO_NETWORK,
+ CONNECTION_LOST_WHILE_PENDING,
+ READ_RETRY_REMOTE_CANCELED,
+ RECV_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
+ CONFLICT_RESOLVED,
+ POSTPONE_WRITE,
+ NEG_ACKED,
+ BARRIER_ACKED, /* in protocol A and B */
+ DATA_RECEIVED, /* (remote read) */
+
+ READ_COMPLETED_WITH_ERROR,
+ READ_AHEAD_COMPLETED_WITH_ERROR,
+ WRITE_COMPLETED_WITH_ERROR,
+ ABORT_DISK_IO,
+ COMPLETED_OK,
+ RESEND,
+ FAIL_FROZEN_DISK_IO,
+ RESTART_FROZEN_DISK_IO,
+ NOTHING,
};
/* encoding of request states for now. we don't actually need that many bits.
@@ -142,8 +143,8 @@ enum drbd_req_state_bits {
* recv_ack (B) or implicit "ack" (A),
* still waiting for the barrier ack.
* master_bio may already be completed and invalidated.
- * 11100: write_acked (C),
- * data_received (for remote read, any protocol)
+ * 11100: write acked (C),
+ * data received (for remote read, any protocol)
* or finally the barrier ack has arrived (B,A)...
* request can be freed
* 01100: neg-acked (write, protocol C)
@@ -198,6 +199,22 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+
+ /* The peer has sent a retry ACK */
+ __RQ_POSTPONED,
+
+ /* would have been completed,
+ * but was not, because of drbd_suspended() */
+ __RQ_COMPLETION_SUSP,
+
+ /* We expect a receive ACK (wire proto B) */
+ __RQ_EXP_RECEIVE_ACK,
+
+ /* We expect a write ACK (wite proto C) */
+ __RQ_EXP_WRITE_ACK,
+
+ /* waiting for a barrier ack, did an extra kref_get */
+ __RQ_EXP_BARR_ACK,
};
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
@@ -219,56 +236,16 @@ enum drbd_req_state_bits {
#define RQ_WRITE (1UL << __RQ_WRITE)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
+#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
+#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
+#define RQ_EXP_WRITE_ACK (1UL << __RQ_EXP_WRITE_ACK)
+#define RQ_EXP_BARR_ACK (1UL << __RQ_EXP_BARR_ACK)
/* For waking up the frozen transfer log mod_req() has to return if the request
should be counted in the epoch object*/
-#define MR_WRITE_SHIFT 0
-#define MR_WRITE (1 << MR_WRITE_SHIFT)
-#define MR_READ_SHIFT 1
-#define MR_READ (1 << MR_READ_SHIFT)
-
-/* epoch entries */
-static inline
-struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->ee_hash_s == 0);
- return mdev->ee_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
-}
-
-/* transfer log (drbd_request objects) */
-static inline
-struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->tl_hash_s == 0);
- return mdev->tl_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
-}
-
-/* application reads (drbd_request objects) */
-static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- return mdev->app_reads_hash
- + ((unsigned int)(sector) % APP_R_HSIZE);
-}
-
-/* when we receive the answer for a read request,
- * verify that we actually know about it */
-static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = ar_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- D_ASSERT(req->sector == sector);
- return req;
- }
- }
- return NULL;
-}
+#define MR_WRITE 1
+#define MR_READ 2
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{
@@ -278,41 +255,10 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
req->private_bio = bio;
bio->bi_private = req;
- bio->bi_end_io = drbd_endio_pri;
+ bio->bi_end_io = drbd_request_endio;
bio->bi_next = NULL;
}
-static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
- struct bio *bio_src)
-{
- struct drbd_request *req =
- mempool_alloc(drbd_request_mempool, GFP_NOIO);
- if (likely(req)) {
- drbd_req_make_private_bio(req, bio_src);
-
- req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->mdev = mdev;
- req->master_bio = bio_src;
- req->epoch = 0;
- req->sector = bio_src->bi_sector;
- req->size = bio_src->bi_size;
- INIT_HLIST_NODE(&req->collision);
- INIT_LIST_HEAD(&req->tl_requests);
- INIT_LIST_HEAD(&req->w.list);
- }
- return req;
-}
-
-static inline void drbd_req_free(struct drbd_request *req)
-{
- mempool_free(req, drbd_request_mempool);
-}
-
-static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
-{
- return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_size, or similar. But that would be too ugly. */
@@ -321,6 +267,8 @@ struct bio_and_error {
int error;
};
+extern void start_new_tl_epoch(struct drbd_tconn *tconn);
+extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
@@ -328,13 +276,17 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
extern void complete_master_bio(struct drbd_conf *mdev,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
+extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+
+/* this is in drbd_main.c */
+extern void drbd_restart_request(struct drbd_request *req);
/* use this if you don't want to deal with calling complete_master_bio()
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
@@ -354,13 +306,13 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -368,7 +320,7 @@ static inline int req_mod(struct drbd_request *req,
return rv;
}
-static inline bool drbd_should_do_remote(union drbd_state s)
+static inline bool drbd_should_do_remote(union drbd_dev_state s)
{
return s.pdsk == D_UP_TO_DATE ||
(s.pdsk >= D_INCONSISTENT &&
@@ -378,7 +330,7 @@ static inline bool drbd_should_do_remote(union drbd_state s)
That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
states. */
}
-static inline bool drbd_should_send_oos(union drbd_state s)
+static inline bool drbd_should_send_out_of_sync(union drbd_dev_state s)
{
return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
new file mode 100644
index 000000000000..0fe220cfb9e9
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.c
@@ -0,0 +1,1863 @@
+/*
+ drbd_state.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
+ from Logicworks, Inc. for making SDP replication support possible.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/drbd_limits.h>
+#include "drbd_int.h"
+#include "drbd_req.h"
+
+/* in drbd_main.c */
+extern void tl_abort_disk_io(struct drbd_conf *mdev);
+
+struct after_state_chg_work {
+ struct drbd_work w;
+ union drbd_state os;
+ union drbd_state ns;
+ enum chg_state_flags flags;
+ struct completion *done;
+};
+
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
+
+static int w_after_state_ch(struct drbd_work *w, int unused);
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags);
+static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
+static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
+static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn);
+
+static inline bool is_susp(union drbd_state s)
+{
+ return s.susp || s.susp_nod || s.susp_fen;
+}
+
+bool conn_all_vols_unconf(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = true;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.disk != D_DISKLESS ||
+ mdev->state.conn != C_STANDALONE ||
+ mdev->state.role != R_SECONDARY) {
+ rv = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/* Unfortunately the states where not correctly ordered, when
+ they where defined. therefore can not use max_t() here. */
+static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_PRIMARY || role2 == R_PRIMARY)
+ return R_PRIMARY;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_UNKNOWN;
+}
+static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
+ return R_UNKNOWN;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_PRIMARY;
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
+{
+ enum drbd_role role = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ role = max_role(role, mdev->state.role);
+ rcu_read_unlock();
+
+ return role;
+}
+
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
+{
+ enum drbd_role peer = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ peer = max_role(peer, mdev->state.peer);
+ rcu_read_unlock();
+
+ return peer;
+}
+
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
+{
+ enum drbd_conns conn = C_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ rcu_read_unlock();
+
+ return conn;
+}
+
+static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+ bool rv = true;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ if (mdev->state.conn == C_WF_REPORT_PARAMS) {
+ rv = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+
+/**
+ * cl_wide_st_chg() - true if the state change is a cluster wide one
+ * @mdev: DRBD device.
+ * @os: old (current) state.
+ * @ns: new (wanted) state.
+ */
+static int cl_wide_st_chg(struct drbd_conf *mdev,
+ union drbd_state os, union drbd_state ns)
+{
+ return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
+ ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
+ (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
+ (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
+ (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
+ (os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
+}
+
+static union drbd_state
+apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
+{
+ union drbd_state ns;
+ ns.i = (os.i & ~mask.i) | val.i;
+ return ns;
+}
+
+enum drbd_state_rv
+drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+ union drbd_state mask, union drbd_state val)
+{
+ unsigned long flags;
+ union drbd_state ns;
+ enum drbd_state_rv rv;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, NULL);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_force_state() - Impose a change which happens outside our control on our state
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ */
+void drbd_force_state(struct drbd_conf *mdev,
+ union drbd_state mask, union drbd_state val)
+{
+ drbd_change_state(mdev, CS_HARD, mask, val);
+}
+
+static enum drbd_state_rv
+_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val)
+{
+ union drbd_state os, ns;
+ unsigned long flags;
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ if (!cl_wide_st_chg(mdev, os, ns))
+ rv = SS_CW_NO_NEED;
+ if (rv == SS_UNKNOWN_ERROR) {
+ rv = is_valid_state(mdev, ns);
+ if (rv >= SS_SUCCESS) {
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+ }
+ }
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_req_state() - Perform an eventually cluster wide state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Should not be called directly, use drbd_request_state() or
+ * _drbd_request_state().
+ */
+static enum drbd_state_rv
+drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ struct completion done;
+ unsigned long flags;
+ union drbd_state os, ns;
+ enum drbd_state_rv rv;
+
+ init_completion(&done);
+
+ if (f & CS_SERIALIZE)
+ mutex_lock(mdev->state_mutex);
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS) {
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ goto abort;
+ }
+
+ if (cl_wide_st_chg(mdev, os, ns)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv == SS_SUCCESS)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ if (drbd_send_state_req(mdev, mask, val)) {
+ rv = SS_CW_FAILED_BY_PEER;
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ wait_event(mdev->state_wait,
+ (rv = _req_st_cond(mdev, mask, val)));
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ } else {
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ }
+
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
+ D_ASSERT(current != mdev->tconn->worker.task);
+ wait_for_completion(&done);
+ }
+
+abort:
+ if (f & CS_SERIALIZE)
+ mutex_unlock(mdev->state_mutex);
+
+ return rv;
+}
+
+/**
+ * _drbd_request_state() - Request a state change (with flags)
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
+ * flag, or when logging of failed state change requests is not desired.
+ */
+enum drbd_state_rv
+_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ enum drbd_state_rv rv;
+
+ wait_event(mdev->state_wait,
+ (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+
+ return rv;
+}
+
+static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+{
+ dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
+ name,
+ drbd_conn_str(ns.conn),
+ drbd_role_str(ns.role),
+ drbd_role_str(ns.peer),
+ drbd_disk_str(ns.disk),
+ drbd_disk_str(ns.pdsk),
+ is_susp(ns) ? 's' : 'r',
+ ns.aftr_isp ? 'a' : '-',
+ ns.peer_isp ? 'p' : '-',
+ ns.user_isp ? 'u' : '-',
+ ns.susp_fen ? 'F' : '-',
+ ns.susp_nod ? 'N' : '-'
+ );
+}
+
+void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum drbd_state_rv err)
+{
+ if (err == SS_IN_TRANSIENT_STATE)
+ return;
+ dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
+ print_st(mdev, " state", os);
+ print_st(mdev, "wanted", ns);
+}
+
+static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char *pbp;
+ pbp = pb;
+ *pbp = 0;
+
+ if (ns.role != os.role && flags & CS_DC_ROLE)
+ pbp += sprintf(pbp, "role( %s -> %s ) ",
+ drbd_role_str(os.role),
+ drbd_role_str(ns.role));
+ if (ns.peer != os.peer && flags & CS_DC_PEER)
+ pbp += sprintf(pbp, "peer( %s -> %s ) ",
+ drbd_role_str(os.peer),
+ drbd_role_str(ns.peer));
+ if (ns.conn != os.conn && flags & CS_DC_CONN)
+ pbp += sprintf(pbp, "conn( %s -> %s ) ",
+ drbd_conn_str(os.conn),
+ drbd_conn_str(ns.conn));
+ if (ns.disk != os.disk && flags & CS_DC_DISK)
+ pbp += sprintf(pbp, "disk( %s -> %s ) ",
+ drbd_disk_str(os.disk),
+ drbd_disk_str(ns.disk));
+ if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
+ pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
+ drbd_disk_str(os.pdsk),
+ drbd_disk_str(ns.pdsk));
+
+ return pbp - pb;
+}
+
+static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
+
+ if (ns.aftr_isp != os.aftr_isp)
+ pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
+ os.aftr_isp,
+ ns.aftr_isp);
+ if (ns.peer_isp != os.peer_isp)
+ pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
+ os.peer_isp,
+ ns.peer_isp);
+ if (ns.user_isp != os.user_isp)
+ pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
+ os.user_isp,
+ ns.user_isp);
+
+ if (pbp != pb)
+ dev_info(DEV, "%s\n", pb);
+}
+
+static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags);
+
+ if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
+ pbp += sprintf(pbp, "susp( %d -> %d ) ",
+ is_susp(os),
+ is_susp(ns));
+
+ if (pbp != pb)
+ conn_info(tconn, "%s\n", pb);
+}
+
+
+/**
+ * is_valid_state() - Returns an SS_ error code if ns is not valid
+ * @mdev: DRBD device.
+ * @ns: State to consider.
+ */
+static enum drbd_state_rv
+is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+{
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ enum drbd_fencing_p fp;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ }
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (!nc->two_primaries && ns.role == R_PRIMARY) {
+ if (ns.peer == R_PRIMARY)
+ rv = SS_TWO_PRIMARIES;
+ else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
+ rv = SS_O_VOL_PEER_PRI;
+ }
+ }
+
+ if (rv <= 0)
+ /* already found a reason to abort */;
+ else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ rv = SS_DEVICE_IN_USE;
+
+ else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (fp >= FP_RESOURCE &&
+ ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
+ rv = SS_PRIMARY_NOP;
+
+ else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
+ rv = SS_NO_LOCAL_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
+ rv = SS_NO_REMOTE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if ((ns.conn == C_CONNECTED ||
+ ns.conn == C_WF_BITMAP_S ||
+ ns.conn == C_SYNC_SOURCE ||
+ ns.conn == C_PAUSED_SYNC_S) &&
+ ns.disk == D_OUTDATED)
+ rv = SS_CONNECTED_OUTDATES;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ (nc->verify_alg[0] == 0))
+ rv = SS_NO_VERIFY_ALG;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ mdev->tconn->agreed_pro_version < 88)
+ rv = SS_NOT_SUPPORTED;
+
+ else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
+ rv = SS_CONNECTED_OUTDATES;
+
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/**
+ * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
+ * This function limits state transitions that may be declined by DRBD. I.e.
+ * user requests (aka soft transitions).
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+
+ if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
+ os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
+ rv = SS_ALREADY_STANDALONE;
+
+ if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
+ rv = SS_NO_NET_CONFIG;
+
+ if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
+ rv = SS_LOWER_THAN_OUTDATED;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
+ rv = SS_IN_TRANSIENT_STATE;
+
+ /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
+ rv = SS_IN_TRANSIENT_STATE; */
+
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &tconn->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ ns.conn != os.conn && os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
+ && os.conn < C_WF_REPORT_PARAMS)
+ rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+
+ return rv;
+}
+
+static enum drbd_state_rv
+is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
+{
+ /* no change -> nothing to do, at least for the connection part */
+ if (oc == nc)
+ return SS_NOTHING_TO_DO;
+
+ /* disconnect of an unconfigured connection does not make sense */
+ if (oc == C_STANDALONE && nc == C_DISCONNECTING)
+ return SS_ALREADY_STANDALONE;
+
+ /* from C_STANDALONE, we start with C_UNCONNECTED */
+ if (oc == C_STANDALONE && nc != C_UNCONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* When establishing a connection we need to go through WF_REPORT_PARAMS!
+ Necessary to do the right thing upon invalidate-remote on a disconnected resource */
+ if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
+ if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
+ return SS_IN_TRANSIENT_STATE;
+
+ /* After C_DISCONNECTING only C_STANDALONE may follow */
+ if (oc == C_DISCONNECTING && nc != C_STANDALONE)
+ return SS_IN_TRANSIENT_STATE;
+
+ return SS_SUCCESS;
+}
+
+
+/**
+ * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
+ * This limits hard state transitions. Hard state transitions are facts there are
+ * imposed on DRBD by the environment. E.g. disk broke or network broke down.
+ * But those hard state transitions are still not allowed to do everything.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_transition(union drbd_state os, union drbd_state ns)
+{
+ enum drbd_state_rv rv;
+
+ rv = is_valid_conn_transition(os.conn, ns.conn);
+
+ /* we cannot fail (again) if we already detached */
+ if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ return rv;
+}
+
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
+/**
+ * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @warn_sync_abort:
+ *
+ * When we loose connection, we have to set the state of the peers disk (pdsk)
+ * to D_UNKNOWN. This rule and many more along those lines are in this function.
+ */
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn)
+{
+ enum drbd_fencing_p fp;
+ enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+
+ if (warn)
+ *warn = NO_WARNING;
+
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ rcu_read_lock();
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ rcu_read_unlock();
+ put_ldev(mdev);
+ }
+
+ /* Implications from connection to peer and peer_isp */
+ if (ns.conn < C_CONNECTED) {
+ ns.peer_isp = 0;
+ ns.peer = R_UNKNOWN;
+ if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
+ ns.pdsk = D_UNKNOWN;
+ }
+
+ /* Clear the aftr_isp when becoming unconfigured */
+ if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
+ ns.aftr_isp = 0;
+
+ /* An implication of the disk states onto the connection state */
+ /* Abort resync if a disk fails/detaches */
+ if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
+ if (warn)
+ *warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
+ ns.conn = C_CONNECTED;
+ }
+
+ /* Connection breaks down before we finished "Negotiating" */
+ if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
+ get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
+ ns.disk = mdev->new_state_tmp.disk;
+ ns.pdsk = mdev->new_state_tmp.pdsk;
+ } else {
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
+ ns.disk = D_DISKLESS;
+ ns.pdsk = D_UNKNOWN;
+ }
+ put_ldev(mdev);
+ }
+
+ /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
+ if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
+ if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
+ ns.disk = D_UP_TO_DATE;
+ if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
+ ns.pdsk = D_UP_TO_DATE;
+ }
+
+ /* Implications of the connection stat on the disk states */
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_UNKNOWN;
+ switch ((enum drbd_conns)ns.conn) {
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ case C_STARTING_SYNC_T:
+ case C_WF_SYNC_UUID:
+ case C_BEHIND:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_OUTDATED;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_VERIFY_S:
+ case C_VERIFY_T:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_CONNECTED:
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_DISKLESS;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_WF_BITMAP_S:
+ case C_PAUSED_SYNC_S:
+ case C_STARTING_SYNC_S:
+ case C_AHEAD:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
+ break;
+ case C_SYNC_TARGET:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_INCONSISTENT;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_SYNC_SOURCE:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_INCONSISTENT;
+ break;
+ case C_STANDALONE:
+ case C_DISCONNECTING:
+ case C_UNCONNECTED:
+ case C_TIMEOUT:
+ case C_BROKEN_PIPE:
+ case C_NETWORK_FAILURE:
+ case C_PROTOCOL_ERROR:
+ case C_TEAR_DOWN:
+ case C_WF_CONNECTION:
+ case C_WF_REPORT_PARAMS:
+ case C_MASK:
+ break;
+ }
+ if (ns.disk > disk_max)
+ ns.disk = disk_max;
+
+ if (ns.disk < disk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
+ ns.disk = disk_min;
+ }
+ if (ns.pdsk > pdsk_max)
+ ns.pdsk = pdsk_max;
+
+ if (ns.pdsk < pdsk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
+ ns.pdsk = pdsk_min;
+ }
+
+ if (fp == FP_STONITH &&
+ (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
+ ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
+
+ if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
+
+ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
+ if (ns.conn == C_SYNC_SOURCE)
+ ns.conn = C_PAUSED_SYNC_S;
+ if (ns.conn == C_SYNC_TARGET)
+ ns.conn = C_PAUSED_SYNC_T;
+ } else {
+ if (ns.conn == C_PAUSED_SYNC_S)
+ ns.conn = C_SYNC_SOURCE;
+ if (ns.conn == C_PAUSED_SYNC_T)
+ ns.conn = C_SYNC_TARGET;
+ }
+
+ return ns;
+}
+
+void drbd_resume_al(struct drbd_conf *mdev)
+{
+ if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+ dev_info(DEV, "Resumed AL updates\n");
+}
+
+/* helper for __drbd_set_state */
+static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+{
+ if (mdev->tconn->agreed_pro_version < 90)
+ mdev->ov_start_sector = 0;
+ mdev->rs_total = drbd_bm_bits(mdev);
+ mdev->ov_position = 0;
+ if (cs == C_VERIFY_T) {
+ /* starting online verify from an arbitrary position
+ * does not fit well into the existing protocol.
+ * on C_VERIFY_T, we initialize ov_left and friends
+ * implicitly in receive_DataRequest once the
+ * first P_OV_REQUEST is received */
+ mdev->ov_start_sector = ~(sector_t)0;
+ } else {
+ unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
+ if (bit >= mdev->rs_total) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(mdev->rs_total - 1);
+ mdev->rs_total = 1;
+ } else
+ mdev->rs_total -= bit;
+ mdev->ov_position = mdev->ov_start_sector;
+ }
+ mdev->ov_left = mdev->rs_total;
+}
+
+/**
+ * __drbd_set_state() - Set a new DRBD state
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @flags: Flags
+ * @done: Optional completion, that will get completed after the after_state_ch() finished
+ *
+ * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
+ */
+enum drbd_state_rv
+__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum chg_state_flags flags, struct completion *done)
+{
+ union drbd_state os;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ enum sanitize_state_warnings ssw;
+ struct after_state_chg_work *ascw;
+ bool did_remote, should_do_remote;
+
+ os = drbd_read_state(mdev);
+
+ ns = sanitize_state(mdev, ns, &ssw);
+ if (ns.i == os.i)
+ return SS_NOTHING_TO_DO;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ return rv;
+
+ if (!(flags & CS_HARD)) {
+ /* pre-state-change checks ; only look at ns */
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ /* If the old state was illegal as well, then let
+ this happen...*/
+
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ }
+
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ return rv;
+ }
+
+ print_sanitize_warnings(mdev, ssw);
+
+ drbd_pr_state_change(mdev, os, ns, flags);
+
+ /* Display changes to the susp* flags that where caused by the call to
+ sanitize_state(). Only display it here if we where not called from
+ _conn_request_state() */
+ if (!(flags & CS_DC_SUSP))
+ conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
+
+ /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+ * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+ * drbd_ldev_destroy() won't happen before our corresponding
+ * after_state_ch works run, where we put_ldev again. */
+ if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+ atomic_inc(&mdev->local_cnt);
+
+ did_remote = drbd_should_do_remote(mdev->state);
+ mdev->state.i = ns.i;
+ should_do_remote = drbd_should_do_remote(mdev->state);
+ mdev->tconn->susp = ns.susp;
+ mdev->tconn->susp_nod = ns.susp_nod;
+ mdev->tconn->susp_fen = ns.susp_fen;
+
+ /* put replicated vs not-replicated requests in seperate epochs */
+ if (did_remote != should_do_remote)
+ start_new_tl_epoch(mdev->tconn);
+
+ if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
+ drbd_print_uuids(mdev, "attached to UUIDs");
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
+ no_peer_wf_report_params(mdev->tconn))
+ clear_bit(STATE_SENT, &mdev->tconn->flags);
+
+ wake_up(&mdev->misc_wait);
+ wake_up(&mdev->state_wait);
+ wake_up(&mdev->tconn->ping_wait);
+
+ /* Aborted verify run, or we reached the stop sector.
+ * Log the last position, unless end-of-device. */
+ if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
+ ns.conn <= C_CONNECTED) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
+ if (mdev->ov_left)
+ dev_info(DEV, "Online Verify reached sector %llu\n",
+ (unsigned long long)mdev->ov_start_sector);
+ }
+
+ if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
+ dev_info(DEV, "Syncer continues.\n");
+ mdev->rs_paused += (long)jiffies
+ -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+ if (ns.conn == C_SYNC_TARGET)
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+
+ if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
+ (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
+ dev_info(DEV, "Resync suspended\n");
+ mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
+ }
+
+ if (os.conn == C_CONNECTED &&
+ (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
+ unsigned long now = jiffies;
+ int i;
+
+ set_ov_position(mdev, ns.conn);
+ mdev->rs_start = now;
+ mdev->rs_last_events = 0;
+ mdev->rs_last_sect_ev = 0;
+ mdev->ov_last_oos_size = 0;
+ mdev->ov_last_oos_start = 0;
+
+ for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+ mdev->rs_mark_left[i] = mdev->ov_left;
+ mdev->rs_mark_time[i] = now;
+ }
+
+ drbd_rs_controller_reset(mdev);
+
+ if (ns.conn == C_VERIFY_S) {
+ dev_info(DEV, "Starting Online Verify from sector %llu\n",
+ (unsigned long long)mdev->ov_position);
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+ }
+
+ if (get_ldev(mdev)) {
+ u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
+ MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
+
+ mdf &= ~MDF_AL_CLEAN;
+ if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ mdf |= MDF_CRASHED_PRIMARY;
+ if (mdev->state.role == R_PRIMARY ||
+ (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ mdf |= MDF_PRIMARY_IND;
+ if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ mdf |= MDF_CONNECTED_IND;
+ if (mdev->state.disk > D_INCONSISTENT)
+ mdf |= MDF_CONSISTENT;
+ if (mdev->state.disk > D_OUTDATED)
+ mdf |= MDF_WAS_UP_TO_DATE;
+ if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ mdf |= MDF_PEER_OUT_DATED;
+ if (mdf != mdev->ldev->md.flags) {
+ mdev->ldev->md.flags = mdf;
+ drbd_md_mark_dirty(mdev);
+ }
+ if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
+ drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
+ put_ldev(mdev);
+ }
+
+ /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
+ if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
+ os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
+ set_bit(CONSIDER_RESYNC, &mdev->flags);
+
+ /* Receiver should clean up itself */
+ if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Now the receiver finished cleaning up itself, it should die */
+ if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Upon network failure, we need to restart the receiver. */
+ if (os.conn > C_WF_CONNECTION &&
+ ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
+ drbd_thread_restart_nowait(&mdev->tconn->receiver);
+
+ /* Resume AL writing if we get a connection */
+ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+ drbd_resume_al(mdev);
+
+ /* remember last attach time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
+ ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
+ if (ascw) {
+ ascw->os = os;
+ ascw->ns = ns;
+ ascw->flags = flags;
+ ascw->w.cb = w_after_state_ch;
+ ascw->w.mdev = mdev;
+ ascw->done = done;
+ drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
+ } else {
+ dev_err(DEV, "Could not kmalloc an ascw\n");
+ }
+
+ return rv;
+}
+
+static int w_after_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_state_chg_work *ascw =
+ container_of(w, struct after_state_chg_work, w);
+ struct drbd_conf *mdev = w->mdev;
+
+ after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
+ if (ascw->flags & CS_WAIT_COMPLETE) {
+ D_ASSERT(ascw->done != NULL);
+ complete(ascw->done);
+ }
+ kfree(ascw);
+
+ return 0;
+}
+
+static void abw_start_sync(struct drbd_conf *mdev, int rv)
+{
+ if (rv) {
+ dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
+ _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+ return;
+ }
+
+ switch (mdev->state.conn) {
+ case C_STARTING_SYNC_T:
+ _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ break;
+ case C_STARTING_SYNC_S:
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ break;
+ }
+}
+
+int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags)
+{
+ int rv;
+
+ D_ASSERT(current == mdev->tconn->worker.task);
+
+ /* open coded non-blocking drbd_suspend_io(mdev); */
+ set_bit(SUSPEND_IO, &mdev->flags);
+
+ drbd_bm_lock(mdev, why, flags);
+ rv = io_fn(mdev);
+ drbd_bm_unlock(mdev);
+
+ drbd_resume_io(mdev);
+
+ return rv;
+}
+
+/**
+ * after_state_ch() - Perform after state change actions that may sleep
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @flags: Flags
+ */
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags)
+{
+ struct sib_info sib;
+
+ sib.sib_reason = SIB_STATE_CHANGE;
+ sib.os = os;
+ sib.ns = ns;
+
+ if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
+ clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (mdev->p_uuid)
+ mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+ }
+
+ /* Inform userspace about the change... */
+ drbd_bcast_event(mdev, &sib);
+
+ if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ drbd_khelper(mdev, "pri-on-incon-degr");
+
+ /* Here we have the actions that are performed after a
+ state change. This function might sleep */
+
+ if (ns.susp_nod) {
+ struct drbd_tconn *tconn = mdev->tconn;
+ enum drbd_req_event what = NOTHING;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
+ what = RESEND;
+
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ conn_lowest_disk(tconn) > D_NEGOTIATING)
+ what = RESTART_FROZEN_DISK_IO;
+
+ if (tconn->susp_nod && what != NOTHING) {
+ _tl_restart(tconn, what);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_nod = 1 } },
+ (union drbd_state) { { .susp_nod = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ if (ns.susp_fen) {
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
+ /* case2: The connection was established again: */
+ struct drbd_conf *odev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, odev, vnr)
+ clear_bit(NEW_CUR_UUID, &odev->flags);
+ rcu_read_unlock();
+ _tl_restart(tconn, RESEND);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ /* Became sync source. With protocol >= 96, we still need to send out
+ * the sync uuid now. Need to do that before any drbd_send_state, or
+ * the other side may go "paused sync" before receiving the sync uuids,
+ * which is unexpected. */
+ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+ mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
+ drbd_gen_and_send_sync_uuid(mdev);
+ put_ldev(mdev);
+ }
+
+ /* Do not change the order of the if above and the two below... */
+ if (os.pdsk == D_DISKLESS &&
+ ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
+ /* we probably will start a resync soon.
+ * make sure those things are properly reset. */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(mdev);
+
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+ /* No point in queuing send_bitmap if we don't have a connection
+ * anymore, so check also the _current_ state, not only the new state
+ * at the time this work was queued. */
+ if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
+ mdev->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ "send_bitmap (WFBitMapS)",
+ BM_LOCKED_TEST_ALLOWED);
+
+ /* Lost contact to peer's copy of the data */
+ if ((os.pdsk >= D_INCONSISTENT &&
+ os.pdsk != D_UNKNOWN &&
+ os.pdsk != D_OUTDATED)
+ && (ns.pdsk < D_INCONSISTENT ||
+ ns.pdsk == D_UNKNOWN ||
+ ns.pdsk == D_OUTDATED)) {
+ if (get_ldev(mdev)) {
+ if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ if (drbd_suspended(mdev)) {
+ set_bit(NEW_CUR_UUID, &mdev->flags);
+ } else {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ }
+ put_ldev(mdev);
+ }
+ }
+
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ /* D_DISKLESS Peer becomes secondary */
+ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
+ /* We may still be Primary ourselves.
+ * No harm done if the bitmap still changes,
+ * redirtied pages will follow later. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Write out all changed bits on demote.
+ * Though, no need to da that just yet
+ * if there is a resync going on still */
+ if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
+ mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ /* No changes to the bitmap expected this time, so assert that,
+ * even though no harm was done if it did change. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote", BM_LOCKED_TEST_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Last part of the attaching process ... */
+ if (ns.conn >= C_CONNECTED &&
+ os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+
+ /* We want to pause/continue resync, tell peer. */
+ if (ns.conn >= C_CONNECTED &&
+ ((os.aftr_isp != ns.aftr_isp) ||
+ (os.user_isp != ns.user_isp)))
+ drbd_send_state(mdev, ns);
+
+ /* In case one of the isp bits got set, suspend other devices. */
+ if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
+ (ns.aftr_isp || ns.peer_isp || ns.user_isp))
+ suspend_other_sg(mdev);
+
+ /* Make sure the peer gets informed about eventual state
+ changes (ISP bits) while we were in WFReportParams. */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
+ drbd_send_state(mdev, ns);
+
+ /* We are in the progress to start a full sync... */
+ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
+ /* no other bitmap changes expected during this phase */
+ drbd_queue_bitmap_io(mdev,
+ &drbd_bmio_set_n_write, &abw_start_sync,
+ "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
+
+ /* We are invalidating our self... */
+ if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
+ os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
+ /* other bitmap operation expected during this phase */
+ drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
+ "set_n_write from invalidate", BM_LOCKED_MASK);
+
+ /* first half of local IO error, failure to attach,
+ * or administrative detach */
+ if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
+ /* corresponding get_ldev was in __drbd_set_state, to serialize
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ rcu_read_lock();
+ eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ if (was_io_error && eh == EP_CALL_HELPER)
+ drbd_khelper(mdev, "local-io-error");
+
+ /* Immediately allow completion of all application IO,
+ * that waits for completion from the local disk,
+ * if this was a force-detach due to disk_timeout
+ * or administrator request (drbdsetup detach --force).
+ * Do NOT abort otherwise.
+ * Aborting local requests may cause serious problems,
+ * if requests are completed to upper layers already,
+ * and then later the already submitted local bio completes.
+ * This can cause DMA into former bio pages that meanwhile
+ * have been re-used for other things.
+ * So aborting local requests may cause crashes,
+ * or even worse, silent data corruption.
+ */
+ if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
+ put_ldev(mdev);
+ }
+
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (mdev->state.disk != D_DISKLESS)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+ /* corresponding get_ldev in __drbd_set_state
+ * this may finally trigger drbd_ldev_destroy. */
+ put_ldev(mdev);
+ }
+
+ /* Notify peer that I had a local IO error, and did not detached.. */
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Disks got bigger while they were detached */
+ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
+ test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ if (ns.conn == C_CONNECTED)
+ resync_after_online_grow(mdev);
+ }
+
+ /* A resync finished or aborted, wake paused devices... */
+ if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
+ (os.peer_isp && !ns.peer_isp) ||
+ (os.user_isp && !ns.user_isp))
+ resume_next_sg(mdev);
+
+ /* sync target done with resync. Explicitly notify peer, even though
+ * it should (at least for non-empty resyncs) already know itself. */
+ if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Verify finished, or reached stop sector. Peer did not know about
+ * the stop sector, and we may even have changed the stop sector during
+ * verify to interrupt/stop early. Send the new state. */
+ if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
+ && verify_can_do_stop_sector(mdev))
+ drbd_send_state(mdev, ns);
+
+ /* This triggers bitmap writeout of potentially still unwritten pages
+ * if the resync finished cleanly, or aborted because of peer disk
+ * failure, or because of connection loss.
+ * For resync aborted because of local disk failure, we cannot do
+ * any bitmap writeout anymore.
+ * No harm done if some bits change during this phase.
+ */
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ if (ns.disk == D_DISKLESS &&
+ ns.conn == C_STANDALONE &&
+ ns.role == R_SECONDARY) {
+ if (os.aftr_isp != ns.aftr_isp)
+ resume_next_sg(mdev);
+ }
+
+ drbd_md_sync(mdev);
+}
+
+struct after_conn_state_chg_work {
+ struct drbd_work w;
+ enum drbd_conns oc;
+ union drbd_state ns_min;
+ union drbd_state ns_max; /* new, max state, over all mdevs */
+ enum chg_state_flags flags;
+};
+
+static int w_after_conn_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_conn_state_chg_work *acscw =
+ container_of(w, struct after_conn_state_chg_work, w);
+ struct drbd_tconn *tconn = w->tconn;
+ enum drbd_conns oc = acscw->oc;
+ union drbd_state ns_max = acscw->ns_max;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ kfree(acscw);
+
+ /* Upon network configuration, we need to start the receiver */
+ if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
+ drbd_thread_start(&tconn->receiver);
+
+ if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
+ struct net_conf *old_conf;
+
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ tconn->my_addr_len = 0;
+ tconn->peer_addr_len = 0;
+ rcu_assign_pointer(tconn->net_conf, NULL);
+ conn_free_crypto(tconn);
+ mutex_unlock(&tconn->conf_update);
+
+ synchronize_rcu();
+ kfree(old_conf);
+ }
+
+ if (ns_max.susp_fen) {
+ /* case1: The outdate peer handler is successful: */
+ if (ns_max.pdsk <= D_OUTDATED) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ drbd_uuid_new_current(mdev);
+ clear_bit(NEW_CUR_UUID, &mdev->flags);
+ }
+ }
+ rcu_read_unlock();
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+ }
+ }
+ kref_put(&tconn->kref, &conn_destroy);
+
+ conn_md_sync(tconn);
+
+ return 0;
+}
+
+void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
+{
+ enum chg_state_flags flags = ~0;
+ struct drbd_conf *mdev;
+ int vnr, first_vol = 1;
+ union drbd_dev_state os, cs = {
+ { .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = tconn->cstate,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN,
+ } };
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = mdev->state;
+
+ if (first_vol) {
+ cs = os;
+ first_vol = 0;
+ continue;
+ }
+
+ if (cs.role != os.role)
+ flags &= ~CS_DC_ROLE;
+
+ if (cs.peer != os.peer)
+ flags &= ~CS_DC_PEER;
+
+ if (cs.conn != os.conn)
+ flags &= ~CS_DC_CONN;
+
+ if (cs.disk != os.disk)
+ flags &= ~CS_DC_DISK;
+
+ if (cs.pdsk != os.pdsk)
+ flags &= ~CS_DC_PDSK;
+ }
+ rcu_read_unlock();
+
+ *pf |= CS_DC_MASK;
+ *pf &= flags;
+ (*pcs).i = cs.i;
+}
+
+static enum drbd_state_rv
+conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ union drbd_state ns, os;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ if (ns.i == os.i)
+ continue;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ break;
+
+ if (!(flags & CS_HARD)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, tconn);
+ }
+ if (rv < SS_SUCCESS)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+
+ return rv;
+}
+
+void
+conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
+{
+ union drbd_state ns, os, ns_max = { };
+ union drbd_state ns_min = {
+ { .role = R_MASK,
+ .peer = R_MASK,
+ .conn = val.conn,
+ .disk = D_MASK,
+ .pdsk = D_MASK
+ } };
+ struct drbd_conf *mdev;
+ enum drbd_state_rv rv;
+ int vnr, number_of_volumes = 0;
+
+ if (mask.conn == C_MASK) {
+ /* remember last connect time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
+ tconn->last_reconnect_jif = jiffies;
+
+ tconn->cstate = val.conn;
+ }
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ number_of_volumes++;
+ os = drbd_read_state(mdev);
+ ns = apply_mask_val(os, mask, val);
+ ns = sanitize_state(mdev, ns, NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ rv = __drbd_set_state(mdev, ns, flags, NULL);
+ if (rv < SS_SUCCESS)
+ BUG();
+
+ ns.i = mdev->state.i;
+ ns_max.role = max_role(ns.role, ns_max.role);
+ ns_max.peer = max_role(ns.peer, ns_max.peer);
+ ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
+ ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
+ ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
+
+ ns_min.role = min_role(ns.role, ns_min.role);
+ ns_min.peer = min_role(ns.peer, ns_min.peer);
+ ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
+ ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
+ ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
+ }
+ rcu_read_unlock();
+
+ if (number_of_volumes == 0) {
+ ns_min = ns_max = (union drbd_state) { {
+ .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = val.conn,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN
+ } };
+ }
+
+ ns_min.susp = ns_max.susp = tconn->susp;
+ ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
+ ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
+
+ *pns_min = ns_min;
+ *pns_max = ns_max;
+}
+
+static enum drbd_state_rv
+_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
+
+ if (rv == SS_UNKNOWN_ERROR)
+ rv = conn_is_valid_transition(tconn, mask, val, 0);
+
+ if (rv == SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ return rv;
+}
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct after_conn_state_chg_work *acscw;
+ enum drbd_conns oc = tconn->cstate;
+ union drbd_state ns_max, ns_min, os;
+ bool have_mutex = false;
+
+ if (mask.conn) {
+ rv = is_valid_conn_transition(oc, val.conn);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ rv = conn_is_valid_transition(tconn, mask, val, flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+
+ if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
+ !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
+
+ /* This will be a cluster-wide state change.
+ * Need to give up the spinlock, grab the mutex,
+ * then send the state change request, ... */
+ spin_unlock_irq(&tconn->req_lock);
+ mutex_lock(&tconn->cstate_mutex);
+ have_mutex = true;
+
+ set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (conn_send_state_req(tconn, mask, val)) {
+ /* sending failed. */
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ rv = SS_CW_FAILED_BY_PEER;
+ /* need to re-aquire the spin lock, though */
+ goto abort_unlocked;
+ }
+
+ if (val.conn == C_DISCONNECTING)
+ set_bit(DISCONNECT_SENT, &tconn->flags);
+
+ /* ... and re-aquire the spinlock.
+ * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
+ * conn_set_state() within the same spinlock. */
+ spin_lock_irq(&tconn->req_lock);
+ wait_event_lock_irq(tconn->ping_wait,
+ (rv = _conn_rq_cond(tconn, mask, val)),
+ tconn->req_lock);
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ conn_old_common_state(tconn, &os, &flags);
+ flags |= CS_DC_SUSP;
+ conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
+ conn_pr_state_change(tconn, os, ns_max, flags);
+
+ acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
+ if (acscw) {
+ acscw->oc = os.conn;
+ acscw->ns_min = ns_min;
+ acscw->ns_max = ns_max;
+ acscw->flags = flags;
+ acscw->w.cb = w_after_conn_state_ch;
+ kref_get(&tconn->kref);
+ acscw->w.tconn = tconn;
+ drbd_queue_work(&tconn->sender_work, &acscw->w);
+ } else {
+ conn_err(tconn, "Could not kmalloc an acscw\n");
+ }
+
+ abort:
+ if (have_mutex) {
+ /* mutex_unlock() "... must not be used in interrupt context.",
+ * so give up the spinlock, then re-aquire it */
+ spin_unlock_irq(&tconn->req_lock);
+ abort_unlocked:
+ mutex_unlock(&tconn->cstate_mutex);
+ spin_lock_irq(&tconn->req_lock);
+ }
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
+ conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
+ conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
+ conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
+ }
+ return rv;
+}
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv;
+
+ spin_lock_irq(&tconn->req_lock);
+ rv = _conn_request_state(tconn, mask, val, flags);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return rv;
+}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
new file mode 100644
index 000000000000..a3c361bbc4b6
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.h
@@ -0,0 +1,161 @@
+#ifndef DRBD_STATE_H
+#define DRBD_STATE_H
+
+struct drbd_conf;
+struct drbd_tconn;
+
+/**
+ * DOC: DRBD State macros
+ *
+ * These macros are used to express state changes in easily readable form.
+ *
+ * The NS macros expand to a mask and a value, that can be bit ored onto the
+ * current state as soon as the spinlock (req_lock) was taken.
+ *
+ * The _NS macros are used for state functions that get called with the
+ * spinlock. These macros expand directly to the new state value.
+ *
+ * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
+ * to express state changes that affect more than one aspect of the state.
+ *
+ * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
+ * Means that the network connection was established and that the peer
+ * is in secondary role.
+ */
+#define role_MASK R_MASK
+#define peer_MASK R_MASK
+#define disk_MASK D_MASK
+#define pdsk_MASK D_MASK
+#define conn_MASK C_MASK
+#define susp_MASK 1
+#define user_isp_MASK 1
+#define aftr_isp_MASK 1
+#define susp_nod_MASK 1
+#define susp_fen_MASK 1
+
+#define NS(T, S) \
+ ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T = (S); val; })
+#define NS2(T1, S1, T2, S2) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val; })
+#define NS3(T1, S1, T2, S2, T3, S3) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val.T3 = (S3); val; })
+
+#define _NS(D, T, S) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T = (S); __ns; })
+#define _NS2(D, T1, S1, T2, S2) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns; })
+#define _NS3(D, T1, S1, T2, S2, T3, S3) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+
+enum chg_state_flags {
+ CS_HARD = 1 << 0,
+ CS_VERBOSE = 1 << 1,
+ CS_WAIT_COMPLETE = 1 << 2,
+ CS_SERIALIZE = 1 << 3,
+ CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
+ CS_LOCAL_ONLY = 1 << 4, /* Do not consider a device pair wide state change */
+ CS_DC_ROLE = 1 << 5, /* DC = display as connection state change */
+ CS_DC_PEER = 1 << 6,
+ CS_DC_CONN = 1 << 7,
+ CS_DC_DISK = 1 << 8,
+ CS_DC_PDSK = 1 << 9,
+ CS_DC_SUSP = 1 << 10,
+ CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
+ CS_IGN_OUTD_FAIL = 1 << 11,
+};
+
+/* drbd_dev_state and drbd_state are different types. This is to stress the
+ small difference. There is no suspended flag (.susp), and no suspended
+ while fence handler runs flas (susp_fen). */
+union drbd_dev_state {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned _unused:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned peer_isp:1 ;
+ unsigned user_isp:1 ;
+ unsigned _pad:11; /* 0 unused */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned _pad:11;
+ unsigned user_isp:1 ;
+ unsigned peer_isp:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned _unused:1 ;
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+#else
+# error "this endianess is not supported"
+#endif
+ };
+ unsigned int i;
+};
+
+extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+ enum chg_state_flags f,
+ union drbd_state mask,
+ union drbd_state val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+ union drbd_state);
+extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+ union drbd_state,
+ union drbd_state,
+ enum chg_state_flags);
+extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+ enum chg_state_flags,
+ struct completion *done);
+extern void print_st_err(struct drbd_conf *, union drbd_state,
+ union drbd_state, int);
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+extern void drbd_resume_al(struct drbd_conf *mdev);
+extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
+
+/**
+ * drbd_request_state() - Reqest a state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ *
+ * This is the most graceful way of requesting a state change. It is verbose
+ * quite verbose in case the state change is not possible, and all those
+ * state changes are globally serialized.
+ */
+static inline int drbd_request_state(struct drbd_conf *mdev,
+ union drbd_state mask,
+ union drbd_state val)
+{
+ return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
+
+#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index c44a2a602772..9a664bd27404 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+ [-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
const char *drbd_conn_str(enum drbd_conns s)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 6bce2cc179d4..424dc7bdf9b7 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -38,16 +38,13 @@
#include "drbd_int.h"
#include "drbd_req.h"
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel);
-
+static int w_make_ov_request(struct drbd_work *w, int cancel);
/* endio handlers:
* drbd_md_io_complete (defined here)
- * drbd_endio_pri (defined here)
- * drbd_endio_sec (defined here)
+ * drbd_request_endio (defined here)
+ * drbd_peer_request_endio (defined here)
* bm_async_io_complete (defined in drbd_bitmap.c)
*
* For all these callbacks, note the following:
@@ -60,7 +57,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
/* About the global_state_lock
Each state transition on an device holds a read lock. In case we have
- to evaluate the sync after dependencies, we grab a write lock, because
+ to evaluate the resync after dependencies, we grab a write lock, because
we need stable states on all devices for that. */
rwlock_t global_state_lock;
@@ -98,97 +95,93 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
-
- D_ASSERT(e->block_id != ID_VACANT);
+ struct drbd_conf *mdev = peer_req->w.mdev;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->read_cnt += e->size >> 9;
- list_del(&e->w.list);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->read_cnt += peer_req->i.size >> 9;
+ list_del(&peer_req->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- drbd_queue_work(&mdev->data.work, &e->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
-static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
+static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
- sector_t e_sector;
+ struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_interval i;
int do_wake;
- int is_syncer_req;
+ u64 block_id;
int do_al_complete_io;
- D_ASSERT(e->block_id != ID_VACANT);
-
- /* after we moved e to done_ee,
+ /* after we moved peer_req to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
- e_sector = e->sector;
- do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
- is_syncer_req = is_syncer_block_id(e->block_id);
+ i = peer_req->i;
+ do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
+ block_id = peer_req->block_id;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- list_del(&e->w.list); /* has been on active_ee or sync_ee */
- list_add_tail(&e->w.list, &mdev->done_ee);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->writ_cnt += peer_req->i.size >> 9;
+ list_move_tail(&peer_req->w.list, &mdev->done_ee);
- /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
- * neither did we wake possibly waiting conflicting requests.
- * done from "drbd_process_done_ee" within the appropriate w.cb
- * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
+ /*
+ * Do not remove from the write_requests tree here: we did not send the
+ * Ack yet and did not wake possibly waiting conflicting requests.
+ * Removed from the tree from "drbd_process_done_ee" within the
+ * appropriate w.cb (e_end_block/e_end_resync_block) or from
+ * _drbd_clear_done_ee.
+ */
- do_wake = is_syncer_req
- ? list_empty(&mdev->sync_ee)
- : list_empty(&mdev->active_ee);
+ do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- if (is_syncer_req)
- drbd_rs_complete_io(mdev, e_sector);
+ if (block_id == ID_SYNCER)
+ drbd_rs_complete_io(mdev, i.sector);
if (do_wake)
wake_up(&mdev->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, e_sector);
+ drbd_al_complete_io(mdev, &i);
- wake_asender(mdev);
+ wake_asender(mdev->tconn);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
-void drbd_endio_sec(struct bio *bio, int error)
+void drbd_peer_request_endio(struct bio *bio, int error)
{
- struct drbd_epoch_entry *e = bio->bi_private;
- struct drbd_conf *mdev = e->mdev;
+ struct drbd_peer_request *peer_req = bio->bi_private;
+ struct drbd_conf *mdev = peer_req->w.mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?! */
@@ -196,24 +189,24 @@ void drbd_endio_sec(struct bio *bio, int error)
}
if (error)
- set_bit(__EE_WAS_ERROR, &e->flags);
+ set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
- if (atomic_dec_and_test(&e->pending_bios)) {
+ if (atomic_dec_and_test(&peer_req->pending_bios)) {
if (is_write)
- drbd_endio_write_sec_final(e);
+ drbd_endio_write_sec_final(peer_req);
else
- drbd_endio_read_sec_final(e);
+ drbd_endio_read_sec_final(peer_req);
}
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
-void drbd_endio_pri(struct bio *bio, int error)
+void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -227,53 +220,72 @@ void drbd_endio_pri(struct bio *bio, int error)
error = -EIO;
}
+
+ /* If this request was aborted locally before,
+ * but now was completed "successfully",
+ * chances are that this caused arbitrary data corruption.
+ *
+ * "aborting" requests, or force-detaching the disk, is intended for
+ * completely blocked/hung local backing devices which do no longer
+ * complete requests at all, not even do error completions. In this
+ * situation, usually a hard-reset and failover is the only way out.
+ *
+ * By "aborting", basically faking a local error-completion,
+ * we allow for a more graceful swichover by cleanly migrating services.
+ * Still the affected node has to be rebooted "soon".
+ *
+ * By completing these requests, we allow the upper layers to re-use
+ * the associated data pages.
+ *
+ * If later the local backing device "recovers", and now DMAs some data
+ * from disk into the original request pages, in the best case it will
+ * just put random data into unused pages; but typically it will corrupt
+ * meanwhile completely unrelated data, causing all sorts of damage.
+ *
+ * Which means delayed successful completion,
+ * especially for READ requests,
+ * is a reason to panic().
+ *
+ * We assume that a delayed *error* completion is OK,
+ * though we still will complain noisily about it.
+ */
+ if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+
+ if (!error)
+ panic("possible random memory corruption caused by delayed completion of aborted local request\n");
+ }
+
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
- ? write_completed_with_error
+ ? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
- ? read_completed_with_error
- : read_ahead_completed_with_error;
+ ? READ_COMPLETED_WITH_ERROR
+ : READ_AHEAD_COMPLETED_WITH_ERROR;
} else
- what = completed_ok;
+ what = COMPLETED_OK;
bio_put(req->private_bio);
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
}
-int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct drbd_request *req = container_of(w, struct drbd_request, w);
-
- /* We should not detach for read io-error,
- * but try to WRITE the P_DATA_REPLY to the failed location,
- * to give the disk the chance to relocate that block */
-
- spin_lock_irq(&mdev->req_lock);
- if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
- _req_mod(req, read_retry_remote_canceled);
- spin_unlock_irq(&mdev->req_lock);
- return 1;
- }
- spin_unlock_irq(&mdev->req_lock);
-
- return w_send_read_req(mdev, w, 0);
-}
-
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
+ struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
@@ -290,7 +302,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_e
page = tmp;
}
/* and now the last, possibly only partially used page */
- len = e->size & (PAGE_SIZE - 1);
+ len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
crypto_hash_update(&desc, &sg, sg.length);
crypto_hash_final(&desc, digest);
@@ -316,59 +328,58 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
crypto_hash_final(&desc, digest);
}
-/* TODO merge common code with w_e_end_ov_req */
-int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* MAYBE merge common code with w_e_end_ov_req */
+static int w_e_send_csum(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
int digest_size;
void *digest;
- int ok = 1;
-
- D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
+ int err = 0;
if (unlikely(cancel))
goto out;
- if (likely((e->flags & EE_WAS_ERROR) != 0))
+ if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- sector_t sector = e->sector;
- unsigned int size = e->size;
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
- /* Free e and pages before send.
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ /* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_CSUM_RS_REQUEST);
+ err = drbd_send_drequest_csum(mdev, sector, size,
+ digest, digest_size,
+ P_CSUM_RS_REQUEST);
kfree(digest);
} else {
dev_err(DEV, "kmalloc() of digest failed.\n");
- ok = 0;
+ err = -ENOMEM;
}
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
- return ok;
+ return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
if (!get_ldev(mdev))
return -EIO;
@@ -378,45 +389,47 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
- e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
+ size, GFP_TRY);
+ if (!peer_req)
goto defer;
- e->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ peer_req->w.cb = w_e_send_csum;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
defer:
put_ldev(mdev);
return -EAGAIN;
}
-int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_resync_timer(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
switch (mdev->state.conn) {
case C_VERIFY_S:
- w_make_ov_request(mdev, w, cancel);
+ w_make_ov_request(w, cancel);
break;
case C_SYNC_TARGET:
- w_make_resync_request(mdev, w, cancel);
+ w_make_resync_request(w, cancel);
break;
}
- return 1;
+ return 0;
}
void resync_timer_fn(unsigned long data)
@@ -424,7 +437,7 @@ void resync_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->data.work, &mdev->resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -456,8 +469,24 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
fb->values[i] += value;
}
+struct fifo_buffer *fifo_alloc(int fifo_size)
+{
+ struct fifo_buffer *fb;
+
+ fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+ if (!fb)
+ return NULL;
+
+ fb->head_index = 0;
+ fb->size = fifo_size;
+ fb->total = 0;
+
+ return fb;
+}
+
static int drbd_rs_controller(struct drbd_conf *mdev)
{
+ struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
unsigned int want; /* The number of sectors we want in the proxy */
int req_sect; /* Number of sectors to request in this turn */
@@ -466,38 +495,39 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
int steps; /* Number of time steps to plan ahead */
int curr_corr;
int max_sect;
+ struct fifo_buffer *plan;
sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
mdev->rs_in_flight -= sect_in;
- spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ plan = rcu_dereference(mdev->rs_plan_s);
- steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
+ steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
- want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
+ want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
- want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
- sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
+ want = dc->c_fill_target ? dc->c_fill_target :
+ sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
- correction = want - mdev->rs_in_flight - mdev->rs_planed;
+ correction = want - mdev->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
- fifo_add_val(&mdev->rs_plan_s, cps);
- mdev->rs_planed += cps * steps;
+ fifo_add_val(plan, cps);
+ plan->total += cps * steps;
/* What we do in this step */
- curr_corr = fifo_push(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
- mdev->rs_planed -= curr_corr;
+ curr_corr = fifo_push(plan, 0);
+ plan->total -= curr_corr;
req_sect = sect_in + curr_corr;
if (req_sect < 0)
req_sect = 0;
- max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
+ max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
if (req_sect > max_sect)
req_sect = max_sect;
@@ -513,22 +543,25 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
static int drbd_rs_number_requests(struct drbd_conf *mdev)
{
int number;
- if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
+
+ rcu_read_lock();
+ if (rcu_dereference(mdev->rs_plan_s)->size) {
number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
- mdev->c_sync_rate = mdev->sync_conf.rate;
+ mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
+ rcu_read_unlock();
/* ignore the amount of pending requests, the resync controller should
* throttle down to incoming reply rate soon enough anyways. */
return number;
}
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel)
+int w_make_resync_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
@@ -538,12 +571,12 @@ static int w_make_resync_request(struct drbd_conf *mdev,
int i = 0;
if (unlikely(cancel))
- return 1;
+ return 0;
if (mdev->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
if (!get_ldev(mdev)) {
@@ -552,7 +585,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
- return 1;
+ return 0;
}
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
@@ -562,15 +595,15 @@ static int w_make_resync_request(struct drbd_conf *mdev,
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket) {
- queued = mdev->data.socket->sk->sk_wmem_queued;
- sndbuf = mdev->data.socket->sk->sk_sndbuf;
+ mutex_lock(&mdev->tconn->data.mutex);
+ if (mdev->tconn->data.socket) {
+ queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
+ sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
} else {
queued = 1;
sndbuf = 0;
}
- mutex_unlock(&mdev->data.mutex);
+ mutex_unlock(&mdev->tconn->data.mutex);
if (queued > sndbuf / 2)
goto requeue;
@@ -581,7 +614,7 @@ next_sector:
if (bit == DRBD_END_OF_BITMAP) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
put_ldev(mdev);
- return 1;
+ return 0;
}
sector = BM_BIT_TO_SECT(bit);
@@ -640,11 +673,11 @@ next_sector:
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
+ if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
switch (read_for_csum(mdev, sector, size)) {
case -EIO: /* Disk failure */
put_ldev(mdev);
- return 0;
+ return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
drbd_rs_complete_io(mdev, sector);
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -657,13 +690,16 @@ next_sector:
BUG();
}
} else {
+ int err;
+
inc_rs_pending(mdev);
- if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
- sector, size, ID_SYNCER)) {
+ err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ sector, size, ID_SYNCER);
+ if (err) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
put_ldev(mdev);
- return 0;
+ return err;
}
}
}
@@ -676,21 +712,23 @@ next_sector:
* until then resync "work" is "inactive" ...
*/
put_ldev(mdev);
- return 1;
+ return 0;
}
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
put_ldev(mdev);
- return 1;
+ return 0;
}
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_make_ov_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
int number, i, size;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
@@ -699,9 +737,17 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
sector = mdev->ov_position;
for (i = 0; i < number; i++) {
- if (sector >= capacity) {
+ if (sector >= capacity)
return 1;
- }
+
+ /* We check for "finished" only in the reply path:
+ * w_e_end_ov_reply().
+ * We need to send at least one request out. */
+ stop_sector_reached = i > 0
+ && verify_can_do_stop_sector(mdev)
+ && sector >= mdev->ov_stop_sector;
+ if (stop_sector_reached)
+ break;
size = BM_BLOCK_SIZE;
@@ -715,7 +761,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
size = (capacity-sector)<<9;
inc_rs_pending(mdev);
- if (!drbd_send_ov_request(mdev, sector, size)) {
+ if (drbd_send_ov_request(mdev, sector, size)) {
dec_rs_pending(mdev);
return 0;
}
@@ -725,56 +771,39 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- return 1;
-}
-
-
-void start_resync_timer_fn(unsigned long data)
-{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
-
- drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
-}
-
-int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
- dev_warn(DEV, "w_start_resync later...\n");
- mdev->start_resync_timer.expires = jiffies + HZ/10;
- add_timer(&mdev->start_resync_timer);
- return 1;
- }
-
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ if (i == 0 || !stop_sector_reached)
+ mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
-int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_ov_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
-static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_resync_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
static void ping_peer(struct drbd_conf *mdev)
{
- clear_bit(GOT_PING_ACK, &mdev->flags);
- request_ping(mdev);
- wait_event(mdev->misc_wait,
- test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ clear_bit(GOT_PING_ACK, &tconn->flags);
+ request_ping(tconn);
+ wait_event(tconn->ping_wait,
+ test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
@@ -799,7 +828,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
if (w) {
w->cb = w_resync_finished;
- drbd_queue_work(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
return 1;
}
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -808,7 +838,12 @@ int drbd_resync_finished(struct drbd_conf *mdev)
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
+
db = mdev->rs_total;
+ /* adjust for verify start and stop sectors, respective reached position */
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ db -= mdev->ov_left;
+
dbdt = Bit2KB(db/dt);
mdev->rs_paused /= HZ;
@@ -817,8 +852,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ping_peer(mdev);
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -831,7 +866,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- verify_done ? "Online verify " : "Resync",
+ verify_done ? "Online verify" : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
@@ -848,7 +883,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
- if (mdev->csums_tfm && mdev->rs_total) {
+ if (mdev->tconn->csums_tfm && mdev->rs_total) {
const unsigned long s = mdev->rs_same_csum;
const unsigned long t = mdev->rs_total;
const int ratio =
@@ -906,13 +941,15 @@ int drbd_resync_finished(struct drbd_conf *mdev)
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
put_ldev(mdev);
out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
mdev->rs_paused = 0;
- if (verify_done)
+
+ /* reset start sector, if we reached end of device */
+ if (verify_done && mdev->ov_left == 0)
mdev->ov_start_sector = 0;
drbd_md_sync(mdev);
@@ -924,19 +961,19 @@ out:
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
{
- if (drbd_ee_has_active_page(e)) {
+ if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
- int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->net_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
}
/**
@@ -945,174 +982,177 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_data_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- ok = drbd_send_block(mdev, P_DATA_REPLY, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
/**
- * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
+ * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
if (mdev->state.conn == C_AHEAD) {
- ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
- } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, "
"partner DISKLESS!\n");
- ok = 1;
+ err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(mdev, e->sector, e->size);
+ drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
-int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
int digest_size;
void *digest = NULL;
- int ok, eq = 0;
+ int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
- if (mdev->csums_tfm) {
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ if (mdev->tconn->csums_tfm) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
D_ASSERT(digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
- drbd_set_in_sync(mdev, e->sector, e->size);
+ drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
- mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
- ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
+ mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
+ err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
} else {
inc_rs_pending(mdev);
- e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
- e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
+ peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
+ peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
}
} else {
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block/ack() failed\n");
- return ok;
+ return err;
}
-/* TODO merge common code with w_e_send_csum */
-int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
void *digest;
- int ok = 1;
+ int err = 0;
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
- ok = 0; /* terminate the connection in case the allocation failed */
+ err = 1; /* terminate the connection in case the allocation failed */
goto out;
}
- if (likely(!(e->flags & EE_WAS_ERROR)))
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ if (likely(!(peer_req->flags & EE_WAS_ERROR)))
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
@@ -1120,25 +1160,23 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_OV_REPLY);
- if (!ok)
+ err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ if (err)
dec_rs_pending(mdev);
kfree(digest);
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return ok;
+ return err;
}
-void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
{
if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
mdev->ov_last_oos_size += size>>9;
@@ -1149,36 +1187,38 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
drbd_set_out_of_sync(mdev, sector, size);
}
-int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
void *digest;
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
- int ok, eq = 0;
+ int err, eq = 0;
+ bool stop_sector_reached = false;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
@@ -1186,19 +1226,19 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
}
- /* Free e and pages before send.
- * In case we block on congestion, we could otherwise run into
- * some distributed deadlock, if the other side blocks on
- * congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
+ /* Free peer_req and pages before send.
+ * In case we block on congestion, we could otherwise run into
+ * some distributed deadlock, if the other side blocks on
+ * congestion as well, because our receiver blocks in
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
if (!eq)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
- ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
- eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
+ err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
dec_unacked(mdev);
@@ -1208,73 +1248,102 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if ((mdev->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(mdev, mdev->ov_left);
- if (mdev->ov_left == 0) {
- ov_oos_print(mdev);
+ stop_sector_reached = verify_can_do_stop_sector(mdev) &&
+ (sector + (size>>9)) >= mdev->ov_stop_sector;
+
+ if (mdev->ov_left == 0 || stop_sector_reached) {
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
- return ok;
+ return err;
}
-int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_prev_work_done(struct drbd_work *w, int cancel)
{
struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
+
complete(&b->done);
- return 1;
+ return 0;
}
-int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* FIXME
+ * We need to track the number of pending barrier acks,
+ * and to be able to wait for them.
+ * See also comment in drbd_adm_attach before drbd_suspend_io.
+ */
+int drbd_send_barrier(struct drbd_tconn *tconn)
{
- struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
- struct p_barrier *p = &mdev->data.sbuf.barrier;
- int ok = 1;
-
- /* really avoid racing with tl_clear. w.cb may have been referenced
- * just before it was reassigned and re-queued, so double check that.
- * actually, this race was harmless, since we only try to send the
- * barrier packet here, and otherwise do nothing with the object.
- * but compare with the head of w_clear_epoch */
- spin_lock_irq(&mdev->req_lock);
- if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
- cancel = 1;
- spin_unlock_irq(&mdev->req_lock);
- if (cancel)
- return 1;
+ struct p_barrier *p;
+ struct drbd_socket *sock;
- if (!drbd_get_data_sock(mdev))
- return 0;
- p->barrier = b->br_number;
- /* inc_ap_pending was done where this was queued.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
- (struct p_header80 *)p, sizeof(*p), 0);
- drbd_put_data_sock(mdev);
-
- return ok;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->barrier = tconn->send.current_epoch_nr;
+ p->pad = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
-int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_write_hint(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_socket *sock;
+
if (cancel)
- return 1;
- return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
+ return 0;
+ sock = &mdev->tconn->data;
+ if (!drbd_prepare_command(mdev, sock))
+ return -EIO;
+ return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
-int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ if (!tconn->send.seen_any_write_yet) {
+ tconn->send.seen_any_write_yet = true;
+ tconn->send.current_epoch_nr = epoch;
+ tconn->send.current_epoch_writes = 0;
+ }
+}
+
+static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ /* re-init if first write on this connection */
+ if (!tconn->send.seen_any_write_yet)
+ return;
+ if (tconn->send.current_epoch_nr != epoch) {
+ if (tconn->send.current_epoch_writes)
+ drbd_send_barrier(tconn);
+ tconn->send.current_epoch_nr = epoch;
+ }
+}
+
+int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_oos(mdev, req);
- req_mod(req, oos_handed_to_network);
+ /* this time, no tconn->send.current_epoch_writes++;
+ * If it was sent, it was the closing barrier for the last
+ * replicated epoch, before we went into AHEAD mode.
+ * No more barriers will be sent, until we leave AHEAD mode again. */
+ maybe_send_barrier(tconn, req->epoch);
+
+ err = drbd_send_out_of_sync(mdev, req);
+ req_mod(req, OOS_HANDED_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1283,20 +1352,26 @@ int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_dblock(mdev, req);
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ re_init_if_first_write(tconn, req->epoch);
+ maybe_send_barrier(tconn, req->epoch);
+ tconn->send.current_epoch_writes++;
+
+ err = drbd_send_dblock(mdev, req);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1305,57 +1380,61 @@ int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
- (unsigned long)req);
+ /* Even read requests may close a write epoch,
+ * if there was any yet. */
+ maybe_send_barrier(tconn, req->epoch);
- if (!ok) {
- /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
- * so this is probably redundant */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- }
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ (unsigned long)req);
+
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
-int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
+ struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, req->sector);
- /* Calling drbd_al_begin_io() out of the worker might deadlocks
- theoretically. Practically it can not deadlock, since this is
- only used when unfreezing IOs. All the extents of the requests
- that made it into the TL are already active */
+ drbd_al_begin_io(mdev, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
generic_make_request(req->private_bio);
- return 1;
+ return 0;
}
static int _drbd_may_sync_now(struct drbd_conf *mdev)
{
struct drbd_conf *odev = mdev;
+ int resync_after;
while (1) {
- if (odev->sync_conf.after == -1)
+ if (!odev->ldev)
+ return 1;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
+ if (resync_after == -1)
+ return 1;
+ odev = minor_to_mdev(resync_after);
+ if (!expect(odev))
return 1;
- odev = minor_to_mdev(odev->sync_conf.after);
- ERR_IF(!odev) return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
odev->state.aftr_isp || odev->state.peer_isp ||
@@ -1375,16 +1454,15 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
!= SS_NOTHING_TO_DO);
}
+ rcu_read_unlock();
return rv;
}
@@ -1400,10 +1478,8 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
@@ -1413,6 +1489,7 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
!= SS_NOTHING_TO_DO) ;
}
}
+ rcu_read_unlock();
return rv;
}
@@ -1430,57 +1507,86 @@ void suspend_other_sg(struct drbd_conf *mdev)
write_unlock_irq(&global_state_lock);
}
-static int sync_after_error(struct drbd_conf *mdev, int o_minor)
+/* caller must hold global_state_lock */
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
{
struct drbd_conf *odev;
+ int resync_after;
if (o_minor == -1)
return NO_ERROR;
if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
- return ERR_SYNC_AFTER;
+ return ERR_RESYNC_AFTER;
/* check for loops */
odev = minor_to_mdev(o_minor);
while (1) {
if (odev == mdev)
- return ERR_SYNC_AFTER_CYCLE;
+ return ERR_RESYNC_AFTER_CYCLE;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
/* dependency chain ends here, no cycles. */
- if (odev->sync_conf.after == -1)
+ if (resync_after == -1)
return NO_ERROR;
/* follow the dependency chain */
- odev = minor_to_mdev(odev->sync_conf.after);
+ odev = minor_to_mdev(resync_after);
}
}
-int drbd_alter_sa(struct drbd_conf *mdev, int na)
+/* caller must hold global_state_lock */
+void drbd_resync_after_changed(struct drbd_conf *mdev)
{
int changes;
- int retcode;
- write_lock_irq(&global_state_lock);
- retcode = sync_after_error(mdev, na);
- if (retcode == NO_ERROR) {
- mdev->sync_conf.after = na;
- do {
- changes = _drbd_pause_after(mdev);
- changes |= _drbd_resume_next(mdev);
- } while (changes);
- }
- write_unlock_irq(&global_state_lock);
- return retcode;
+ do {
+ changes = _drbd_pause_after(mdev);
+ changes |= _drbd_resume_next(mdev);
+ } while (changes);
}
void drbd_rs_controller_reset(struct drbd_conf *mdev)
{
+ struct fifo_buffer *plan;
+
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
mdev->rs_in_flight = 0;
- mdev->rs_planed = 0;
- spin_lock(&mdev->peer_seq_lock);
- fifo_set(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
+
+ /* Updating the RCU protected object in place is necessary since
+ this function gets called from atomic context.
+ It is valid since all other updates also lead to an completely
+ empty fifo */
+ rcu_read_lock();
+ plan = rcu_dereference(mdev->rs_plan_s);
+ plan->total = 0;
+ fifo_set(plan, 0);
+ rcu_read_unlock();
+}
+
+void start_resync_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
+}
+
+int w_start_resync(struct drbd_work *w, int cancel)
+{
+ struct drbd_conf *mdev = w->mdev;
+
+ if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
+ dev_warn(DEV, "w_start_resync later...\n");
+ mdev->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&mdev->start_resync_timer);
+ return 0;
+ }
+
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ return 0;
}
/**
@@ -1501,43 +1607,58 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (side == C_SYNC_TARGET) {
- /* Since application IO was locked out during C_WF_BITMAP_T and
- C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
- we check that we might make the data inconsistent. */
- r = drbd_khelper(mdev, "before-resync-target");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- dev_info(DEV, "before-resync-target handler returned %d, "
- "dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
- }
- } else /* C_SYNC_SOURCE */ {
- r = drbd_khelper(mdev, "before-resync-source");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- if (r == 3) {
- dev_info(DEV, "before-resync-source handler returned %d, "
- "ignoring. Old userland tools?", r);
- } else {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
+ if (side == C_SYNC_TARGET) {
+ /* Since application IO was locked out during C_WF_BITMAP_T and
+ C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
+ we check that we might make the data inconsistent. */
+ r = drbd_khelper(mdev, "before-resync-target");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ dev_info(DEV, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
+ } else /* C_SYNC_SOURCE */ {
+ r = drbd_khelper(mdev, "before-resync-source");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ if (r == 3) {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "ignoring. Old userland tools?", r);
+ } else {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "dropping connection.\n", r);
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return;
+ }
+ }
}
}
- drbd_state_lock(mdev);
+ if (current == mdev->tconn->worker.task) {
+ /* The worker should not sleep waiting for state_mutex,
+ that can take long */
+ if (!mutex_trylock(mdev->state_mutex)) {
+ set_bit(B_RS_H_DONE, &mdev->flags);
+ mdev->start_resync_timer.expires = jiffies + HZ/5;
+ add_timer(&mdev->start_resync_timer);
+ return;
+ }
+ } else {
+ mutex_lock(mdev->state_mutex);
+ }
+ clear_bit(B_RS_H_DONE, &mdev->flags);
+
write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
return;
}
- ns.i = mdev->state.i;
+ ns = drbd_read_state(mdev);
ns.aftr_isp = !_drbd_may_sync_now(mdev);
@@ -1549,7 +1670,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
ns.pdsk = D_INCONSISTENT;
r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
+ ns = drbd_read_state(mdev);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
@@ -1575,6 +1696,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
write_unlock_irq(&global_state_lock);
if (r == SS_SUCCESS) {
+ /* reset rs_last_bcast when a resync or verify is started,
+ * to deal with potential jiffies wrap. */
+ mdev->rs_last_bcast = jiffies - HZ;
+
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
(unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
@@ -1589,10 +1714,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
- if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
+ if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
drbd_gen_and_send_sync_uuid(mdev);
- if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
+ if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
@@ -1603,10 +1728,16 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* detect connection loss, then waiting for a ping
* response (implicit in drbd_resync_finished) reduces
* the race considerably, but does not solve it. */
- if (side == C_SYNC_SOURCE)
- schedule_timeout_interruptible(
- mdev->net_conf->ping_int * HZ +
- mdev->net_conf->ping_timeo*HZ/9);
+ if (side == C_SYNC_SOURCE) {
+ struct net_conf *nc;
+ int timeo;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
+ }
drbd_resync_finished(mdev);
}
@@ -1621,114 +1752,180 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev);
}
put_ldev(mdev);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
}
-int drbd_worker(struct drbd_thread *thi)
+/* If the resource already closed the current epoch, but we did not
+ * (because we have not yet seen new requests), we should send the
+ * corresponding barrier now. Must be checked within the same spinlock
+ * that is used to check for new requests. */
+bool need_to_send_barrier(struct drbd_tconn *connection)
{
- struct drbd_conf *mdev = thi->mdev;
- struct drbd_work *w = NULL;
- LIST_HEAD(work_list);
- int intr = 0, i;
+ if (!connection->send.seen_any_write_yet)
+ return false;
+
+ /* Skip barriers that do not contain any writes.
+ * This may happen during AHEAD mode. */
+ if (!connection->send.current_epoch_writes)
+ return false;
+
+ /* ->req_lock is held when requests are queued on
+ * connection->sender_work, and put into ->transfer_log.
+ * It is also held when ->current_tle_nr is increased.
+ * So either there are already new requests queued,
+ * and corresponding barriers will be send there.
+ * Or nothing new is queued yet, so the difference will be 1.
+ */
+ if (atomic_read(&connection->current_tle_nr) !=
+ connection->send.current_epoch_nr + 1)
+ return false;
+
+ return true;
+}
+
+bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ list_splice_init(&queue->q, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
+bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ if (!list_empty(&queue->q))
+ list_move(queue->q.next, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
+void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
+{
+ DEFINE_WAIT(wait);
+ struct net_conf *nc;
+ int uncork, cork;
- if (down_trylock(&mdev->data.work.s)) {
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ dequeue_work_item(&connection->sender_work, work_list);
+ if (!list_empty(work_list))
+ return;
- intr = down_interruptible(&mdev->data.work.s);
+ /* Still nothing to do?
+ * Maybe we still need to close the current epoch,
+ * even if no new requests are queued yet.
+ *
+ * Also, poke TCP, just in case.
+ * Then wait for new work (or signal). */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ uncork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ if (uncork) {
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket)
+ drbd_tcp_uncork(connection->data.socket);
+ mutex_unlock(&connection->data.mutex);
+ }
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_cork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ for (;;) {
+ int send_barrier;
+ prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_lock_irq(&connection->req_lock);
+ spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ /* dequeue single item only,
+ * we still use drbd_queue_work_front() in some places */
+ if (!list_empty(&connection->sender_work.q))
+ list_move(connection->sender_work.q.next, work_list);
+ spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ if (!list_empty(work_list) || signal_pending(current)) {
+ spin_unlock_irq(&connection->req_lock);
+ break;
}
+ send_barrier = need_to_send_barrier(connection);
+ spin_unlock_irq(&connection->req_lock);
+ if (send_barrier) {
+ drbd_send_barrier(connection);
+ connection->send.current_epoch_nr++;
+ }
+ schedule();
+ /* may be woken up for other things but new work, too,
+ * e.g. if the current epoch got closed.
+ * In which case we send the barrier above. */
+ }
+ finish_wait(&connection->sender_work.q_wait, &wait);
+
+ /* someone may have changed the config while we have been waiting above. */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ cork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket) {
+ if (cork)
+ drbd_tcp_cork(connection->data.socket);
+ else if (!uncork)
+ drbd_tcp_uncork(connection->data.socket);
+ }
+ mutex_unlock(&connection->data.mutex);
+}
- if (intr) {
- D_ASSERT(intr == -EINTR);
+int drbd_worker(struct drbd_thread *thi)
+{
+ struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_work *w = NULL;
+ struct drbd_conf *mdev;
+ LIST_HEAD(work_list);
+ int vnr;
+
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
+
+ /* as long as we use drbd_queue_work_front(),
+ * we may only dequeue single work items here, not batches. */
+ if (list_empty(&work_list))
+ wait_for_work(tconn, &work_list);
+
+ if (signal_pending(current)) {
flush_signals(current);
- ERR_IF (get_t_state(thi) == Running)
+ if (get_t_state(thi) == RUNNING) {
+ conn_warn(tconn, "Worker got an unexpected signal\n");
continue;
+ }
break;
}
- if (get_t_state(thi) != Running)
+ if (get_t_state(thi) != RUNNING)
break;
- /* With this break, we have done a down() but not consumed
- the entry from the list. The cleanup code takes care of
- this... */
-
- w = NULL;
- spin_lock_irq(&mdev->data.work.q_lock);
- ERR_IF(list_empty(&mdev->data.work.q)) {
- /* something terribly wrong in our logic.
- * we were able to down() the semaphore,
- * but the list is empty... doh.
- *
- * what is the best thing to do now?
- * try again from scratch, restarting the receiver,
- * asender, whatnot? could break even more ugly,
- * e.g. when we are primary, but no good local data.
- *
- * I'll try to get away just starting over this loop.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
- continue;
- }
- w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
- list_del_init(&w->list);
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
- /* dev_warn(DEV, "worker: a callback failed! \n"); */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev,
- NS(conn, C_NETWORK_FAILURE));
+
+ while (!list_empty(&work_list)) {
+ w = list_first_entry(&work_list, struct drbd_work, list);
+ list_del_init(&w->list);
+ if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
+ continue;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
- D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
- D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
-
- spin_lock_irq(&mdev->data.work.q_lock);
- i = 0;
- while (!list_empty(&mdev->data.work.q)) {
- list_splice_init(&mdev->data.work.q, &work_list);
- spin_unlock_irq(&mdev->data.work.q_lock);
+ do {
while (!list_empty(&work_list)) {
- w = list_entry(work_list.next, struct drbd_work, list);
+ w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
- w->cb(mdev, w, 1);
- i++; /* dead debugging code */
+ w->cb(w, 1);
}
-
- spin_lock_irq(&mdev->data.work.q_lock);
+ dequeue_work_batch(&tconn->sender_work, &work_list);
+ } while (!list_empty(&work_list));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_mdev_cleanup(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
}
- sema_init(&mdev->data.work.s, 0);
- /* DANGEROUS race: if someone did queue his work within the spinlock,
- * but up() ed outside the spinlock, we could get an up() on the
- * semaphore without corresponding list entry.
- * So don't do that.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
- /* _drbd_set_state only uses stop_nowait.
- * wait here for the Exiting receiver. */
- drbd_thread_stop(&mdev->receiver);
- drbd_mdev_cleanup(mdev);
-
- dev_info(DEV, "worker terminated\n");
-
- clear_bit(DEVICE_DYING, &mdev->flags);
- clear_bit(CONFIG_PENDING, &mdev->flags);
- wake_up(&mdev->state_wait);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 151f1a37478f..328f18e4b4ee 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/mm.h>
+#include "drbd_int.h"
/* see get_sb_bdev and bd_claim */
extern char *drbd_sec_holder;
@@ -20,8 +21,8 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_sec(struct bio *bio, int error);
-extern void drbd_endio_pri(struct bio *bio, int error);
+extern void drbd_peer_request_endio(struct bio *bio, int error);
+extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
@@ -45,12 +46,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio);
}
-static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
-{
- return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
- == CRYPTO_ALG_TYPE_HASH;
-}
-
#ifndef __CHECKER__
# undef __cond_lock
# define __cond_lock(x,c) (c)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 54046e51160a..ae1251270624 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -463,6 +463,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
+ lo->lo_bio_count++;
bio_list_add(&lo->lo_bio_list, bio);
}
@@ -471,6 +472,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
+ lo->lo_bio_count--;
return bio_list_pop(&lo->lo_bio_list);
}
@@ -489,6 +491,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto out;
if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
goto out;
+ if (lo->lo_bio_count >= q->nr_congestion_on)
+ wait_event_lock_irq(lo->lo_req_wait,
+ lo->lo_bio_count < q->nr_congestion_off,
+ lo->lo_lock);
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
@@ -546,6 +552,8 @@ static int loop_thread(void *data)
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
+ if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
+ wake_up(&lo->lo_req_wait);
spin_unlock_irq(&lo->lo_lock);
BUG_ON(!bio);
@@ -873,6 +881,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
+ lo->lo_bio_count = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
@@ -1673,6 +1682,7 @@ static int loop_add(struct loop_device **l, int i)
lo->lo_number = i;
lo->lo_thread = NULL;
init_waitqueue_head(&lo->lo_event);
+ init_waitqueue_head(&lo->lo_req_wait);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9694dd99bbbc..3fd100990453 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
}
}
- if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
-
- mtip_restart_port(port);
+ if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ mtip_restart_port(port);
+ wake_up_interruptible(&port->svc_wait);
+ }
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
* Delete our gendisk structure. This also removes the device
* from /dev
*/
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
"Shutting down %s ...\n", dd->disk->disk_name);
/* Delete our gendisk structure, and cleanup the blk queue. */
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
+
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 931769e133e5..07fb2dfaae13 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -975,8 +975,8 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
- int qid, int cq_size, int vector)
+static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
+ int cq_size, int vector)
{
int result;
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
@@ -1011,7 +1011,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
return ERR_PTR(result);
}
-static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result = 0;
u32 aqa;
@@ -1408,7 +1408,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}
-static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+static int nvme_setup_io_queues(struct nvme_dev *dev)
{
int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
@@ -1481,7 +1481,7 @@ static void nvme_free_queues(struct nvme_dev *dev)
nvme_free_queue(dev, i);
}
-static int __devinit nvme_dev_add(struct nvme_dev *dev)
+static int nvme_dev_add(struct nvme_dev *dev)
{
int res, nn, i;
struct nvme_ns *ns, *next;
@@ -1619,8 +1619,7 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
-static int __devinit nvme_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int bars, result = -ENOMEM;
struct nvme_dev *dev;
@@ -1702,7 +1701,7 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
return result;
}
-static void __devexit nvme_remove(struct pci_dev *pdev)
+static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
nvme_dev_remove(dev);
@@ -1747,7 +1746,7 @@ static struct pci_driver nvme_driver = {
.name = "nvme",
.id_table = nvme_id_table,
.probe = nvme_probe,
- .remove = __devexit_p(nvme_remove),
+ .remove = nvme_remove,
.suspend = nvme_suspend,
.resume = nvme_resume,
.err_handler = &nvme_err_handler,
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 28cf3082d442..efefb5ac3004 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -205,8 +205,8 @@ config PARIDE_EPAT
support.
config PARIDE_EPATC8
- bool "Support c7/c8 chips (EXPERIMENTAL)"
- depends on PARIDE_EPAT && EXPERIMENTAL
+ bool "Support c7/c8 chips"
+ depends on PARIDE_EPAT
help
This option enables support for the newer Shuttle EP1284 (aka c7 and
c8) chip. You need this if you are using any recent Imation SuperDisk
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index da0abc1838c1..d754a88d7585 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -401,7 +401,7 @@ static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
-static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
+static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index f58cdcfb305f..75e112d66006 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -536,7 +536,7 @@ static const struct file_operations ps3vram_proc_fops = {
.release = single_release,
};
-static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev)
+static void ps3vram_proc_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct proc_dir_entry *pde;
@@ -618,7 +618,7 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
} while (bio);
}
-static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
+static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int error, status;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bb3d9be3b1b4..89576a0b3f2e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,15 +61,29 @@
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
-#define RBD_MAX_SNAP_NAME_LEN 32
+#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
+#define RBD_MAX_SNAP_NAME_LEN \
+ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
+
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
+/* This allows a single page to hold an image name sent by OSD */
+#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
+
#define RBD_OBJ_PREFIX_LEN_MAX 64
+/* Feature bits */
+
+#define RBD_FEATURE_LAYERING 1
+
+/* Features supported by this (client software) implementation. */
+
+#define RBD_FEATURES_ALL (0)
+
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
@@ -101,6 +115,27 @@ struct rbd_image_header {
u64 obj_version;
};
+/*
+ * An rbd image specification.
+ *
+ * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
+ * identify an image.
+ */
+struct rbd_spec {
+ u64 pool_id;
+ char *pool_name;
+
+ char *image_id;
+ size_t image_id_len;
+ char *image_name;
+ size_t image_name_len;
+
+ u64 snap_id;
+ char *snap_name;
+
+ struct kref kref;
+};
+
struct rbd_options {
bool read_only;
};
@@ -155,11 +190,8 @@ struct rbd_snap {
};
struct rbd_mapping {
- char *snap_name;
- u64 snap_id;
u64 size;
u64 features;
- bool snap_exists;
bool read_only;
};
@@ -173,7 +205,6 @@ struct rbd_device {
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
- struct rbd_options rbd_opts;
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
@@ -181,17 +212,17 @@ struct rbd_device {
spinlock_t lock; /* queue lock */
struct rbd_image_header header;
- char *image_id;
- size_t image_id_len;
- char *image_name;
- size_t image_name_len;
+ bool exists;
+ struct rbd_spec *spec;
+
char *header_name;
- char *pool_name;
- int pool_id;
struct ceph_osd_event *watch_event;
struct ceph_osd_request *watch_request;
+ struct rbd_spec *parent_spec;
+ u64 parent_overlap;
+
/* protects updating the header */
struct rw_semaphore header_rwsem;
@@ -204,6 +235,7 @@ struct rbd_device {
/* sysfs related */
struct device dev;
+ unsigned long open_count;
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
@@ -218,7 +250,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static void __rbd_remove_snap_dev(struct rbd_snap *snap);
+static void rbd_remove_snap_dev(struct rbd_snap *snap);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
@@ -258,17 +290,8 @@ static struct device rbd_root_dev = {
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
-static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
-{
- return get_device(&rbd_dev->dev);
-}
-
-static void rbd_put_dev(struct rbd_device *rbd_dev)
-{
- put_device(&rbd_dev->dev);
-}
-
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -277,8 +300,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
- rbd_get_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ (void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
+ rbd_dev->open_count++;
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -287,7 +313,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
- rbd_put_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ rbd_assert(rbd_dev->open_count > 0);
+ rbd_dev->open_count--;
+ put_device(&rbd_dev->dev);
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -388,7 +418,7 @@ enum {
static match_table_t rbd_opts_tokens = {
/* int args above */
/* string args above */
- {Opt_read_only, "mapping.read_only"},
+ {Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
@@ -441,33 +471,17 @@ static int parse_rbd_opts_token(char *c, void *private)
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
-static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
- size_t mon_addr_len, char *options)
+static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
- struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
- struct ceph_options *ceph_opts;
struct rbd_client *rbdc;
- rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
-
- ceph_opts = ceph_parse_options(options, mon_addr,
- mon_addr + mon_addr_len,
- parse_rbd_opts_token, rbd_opts);
- if (IS_ERR(ceph_opts))
- return PTR_ERR(ceph_opts);
-
rbdc = rbd_client_find(ceph_opts);
- if (rbdc) {
- /* using an existing client */
+ if (rbdc) /* using an existing client */
ceph_destroy_options(ceph_opts);
- } else {
+ else
rbdc = rbd_client_create(ceph_opts);
- if (IS_ERR(rbdc))
- return PTR_ERR(rbdc);
- }
- rbd_dev->rbd_client = rbdc;
- return 0;
+ return rbdc;
}
/*
@@ -492,10 +506,10 @@ static void rbd_client_release(struct kref *kref)
* Drop reference to ceph client node. If it's not referenced anymore, release
* it.
*/
-static void rbd_put_client(struct rbd_device *rbd_dev)
+static void rbd_put_client(struct rbd_client *rbdc)
{
- kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- rbd_dev->rbd_client = NULL;
+ if (rbdc)
+ kref_put(&rbdc->kref, rbd_client_release);
}
/*
@@ -524,6 +538,16 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
return false;
+ /* The bio layer requires at least sector-sized I/O */
+
+ if (ondisk->options.order < SECTOR_SHIFT)
+ return false;
+
+ /* If we use u64 in a few spots we may be able to loosen this */
+
+ if (ondisk->options.order > 8 * sizeof (int) - 1)
+ return false;
+
/*
* The size of a snapshot header has to fit in a size_t, and
* that limits the number of snapshots.
@@ -635,6 +659,20 @@ out_err:
return -ENOMEM;
}
+static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+{
+ struct rbd_snap *snap;
+
+ if (snap_id == CEPH_NOSNAP)
+ return RBD_SNAP_HEAD_NAME;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node)
+ if (snap_id == snap->id)
+ return snap->name;
+
+ return NULL;
+}
+
static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
{
@@ -642,7 +680,7 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
list_for_each_entry(snap, &rbd_dev->snaps, node) {
if (!strcmp(snap_name, snap->name)) {
- rbd_dev->mapping.snap_id = snap->id;
+ rbd_dev->spec->snap_id = snap->id;
rbd_dev->mapping.size = snap->size;
rbd_dev->mapping.features = snap->features;
@@ -653,26 +691,23 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
return -ENOENT;
}
-static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
+static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
{
int ret;
- if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
+ if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->mapping.snap_id = CEPH_NOSNAP;
+ rbd_dev->spec->snap_id = CEPH_NOSNAP;
rbd_dev->mapping.size = rbd_dev->header.image_size;
rbd_dev->mapping.features = rbd_dev->header.features;
- rbd_dev->mapping.snap_exists = false;
- rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
ret = 0;
} else {
- ret = snap_by_name(rbd_dev, snap_name);
+ ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
if (ret < 0)
goto done;
- rbd_dev->mapping.snap_exists = true;
rbd_dev->mapping.read_only = true;
}
- rbd_dev->mapping.snap_name = snap_name;
+ rbd_dev->exists = true;
done:
return ret;
}
@@ -695,13 +730,13 @@ static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
- ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
+ ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
rbd_dev->header.object_prefix, segment);
- if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
+ if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
@@ -800,77 +835,144 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
}
/*
- * bio_chain_clone - clone a chain of bios up to a certain length.
- * might return a bio_pair that will need to be released.
+ * Clone a portion of a bio, starting at the given byte offset
+ * and continuing for the number of bytes indicated.
*/
-static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
- struct bio_pair **bp,
- int len, gfp_t gfpmask)
-{
- struct bio *old_chain = *old;
- struct bio *new_chain = NULL;
- struct bio *tail;
- int total = 0;
-
- if (*bp) {
- bio_pair_release(*bp);
- *bp = NULL;
- }
+static struct bio *bio_clone_range(struct bio *bio_src,
+ unsigned int offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio_vec *bv;
+ unsigned int resid;
+ unsigned short idx;
+ unsigned int voff;
+ unsigned short end_idx;
+ unsigned short vcnt;
+ struct bio *bio;
- while (old_chain && (total < len)) {
- struct bio *tmp;
+ /* Handle the easy case for the caller */
- tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
- if (!tmp)
- goto err_out;
- gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
+ if (!offset && len == bio_src->bi_size)
+ return bio_clone(bio_src, gfpmask);
- if (total + old_chain->bi_size > len) {
- struct bio_pair *bp;
+ if (WARN_ON_ONCE(!len))
+ return NULL;
+ if (WARN_ON_ONCE(len > bio_src->bi_size))
+ return NULL;
+ if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
+ return NULL;
- /*
- * this split can only happen with a single paged bio,
- * split_bio will BUG_ON if this is not the case
- */
- dout("bio_chain_clone split! total=%d remaining=%d"
- "bi_size=%u\n",
- total, len - total, old_chain->bi_size);
+ /* Find first affected segment... */
- /* split the bio. We'll release it either in the next
- call, or it will have to be released outside */
- bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
- if (!bp)
- goto err_out;
+ resid = offset;
+ __bio_for_each_segment(bv, bio_src, idx, 0) {
+ if (resid < bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ voff = resid;
- __bio_clone(tmp, &bp->bio1);
+ /* ...and the last affected segment */
- *next = &bp->bio2;
- } else {
- __bio_clone(tmp, old_chain);
- *next = old_chain->bi_next;
- }
+ resid += len;
+ __bio_for_each_segment(bv, bio_src, end_idx, idx) {
+ if (resid <= bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ vcnt = end_idx - idx + 1;
+
+ /* Build the clone */
- tmp->bi_bdev = NULL;
- tmp->bi_next = NULL;
- if (new_chain)
- tail->bi_next = tmp;
- else
- new_chain = tmp;
- tail = tmp;
- old_chain = old_chain->bi_next;
+ bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ if (!bio)
+ return NULL; /* ENOMEM */
- total += tmp->bi_size;
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_flags |= 1 << BIO_CLONED;
+
+ /*
+ * Copy over our part of the bio_vec, then update the first
+ * and last (or only) entries.
+ */
+ memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
+ vcnt * sizeof (struct bio_vec));
+ bio->bi_io_vec[0].bv_offset += voff;
+ if (vcnt > 1) {
+ bio->bi_io_vec[0].bv_len -= voff;
+ bio->bi_io_vec[vcnt - 1].bv_len = resid;
+ } else {
+ bio->bi_io_vec[0].bv_len = len;
}
- rbd_assert(total == len);
+ bio->bi_vcnt = vcnt;
+ bio->bi_size = len;
+ bio->bi_idx = 0;
+
+ return bio;
+}
+
+/*
+ * Clone a portion of a bio chain, starting at the given byte offset
+ * into the first bio in the source chain and continuing for the
+ * number of bytes indicated. The result is another bio chain of
+ * exactly the given length, or a null pointer on error.
+ *
+ * The bio_src and offset parameters are both in-out. On entry they
+ * refer to the first source bio and the offset into that bio where
+ * the start of data to be cloned is located.
+ *
+ * On return, bio_src is updated to refer to the bio in the source
+ * chain that contains first un-cloned byte, and *offset will
+ * contain the offset of that byte within that bio.
+ */
+static struct bio *bio_chain_clone_range(struct bio **bio_src,
+ unsigned int *offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio *bi = *bio_src;
+ unsigned int off = *offset;
+ struct bio *chain = NULL;
+ struct bio **end;
+
+ /* Build up a chain of clone bios up to the limit */
+
+ if (!bi || off >= bi->bi_size || !len)
+ return NULL; /* Nothing to clone */
- *old = old_chain;
+ end = &chain;
+ while (len) {
+ unsigned int bi_size;
+ struct bio *bio;
+
+ if (!bi)
+ goto out_err; /* EINVAL; ran out of bio's */
+ bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bio = bio_clone_range(bi, off, bi_size, gfpmask);
+ if (!bio)
+ goto out_err; /* ENOMEM */
+
+ *end = bio;
+ end = &bio->bi_next;
+
+ off += bi_size;
+ if (off == bi->bi_size) {
+ bi = bi->bi_next;
+ off = 0;
+ }
+ len -= bi_size;
+ }
+ *bio_src = bi;
+ *offset = off;
- return new_chain;
+ return chain;
+out_err:
+ bio_chain_put(chain);
-err_out:
- dout("bio_chain_clone with err\n");
- bio_chain_put(new_chain);
return NULL;
}
@@ -988,8 +1090,9 @@ static int rbd_do_request(struct request *rq,
req_data->coll_index = coll_index;
}
- dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
- (unsigned long long) ofs, (unsigned long long) len);
+ dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
+ object_name, (unsigned long long) ofs,
+ (unsigned long long) len, coll, coll_index);
osdc = &rbd_dev->rbd_client->client->osdc;
req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
@@ -1019,7 +1122,7 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
+ layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
rbd_assert(ret == 0);
@@ -1154,8 +1257,6 @@ done:
static int rbd_do_op(struct request *rq,
struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
- u64 snapid,
- int opcode, int flags,
u64 ofs, u64 len,
struct bio *bio,
struct rbd_req_coll *coll,
@@ -1167,6 +1268,9 @@ static int rbd_do_op(struct request *rq,
int ret;
struct ceph_osd_req_op *ops;
u32 payload_len;
+ int opcode;
+ int flags;
+ u64 snapid;
seg_name = rbd_segment_name(rbd_dev, ofs);
if (!seg_name)
@@ -1174,7 +1278,18 @@ static int rbd_do_op(struct request *rq,
seg_len = rbd_segment_length(rbd_dev, ofs, len);
seg_ofs = rbd_segment_offset(rbd_dev, ofs);
- payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
+ if (rq_data_dir(rq) == WRITE) {
+ opcode = CEPH_OSD_OP_WRITE;
+ flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
+ snapid = CEPH_NOSNAP;
+ payload_len = seg_len;
+ } else {
+ opcode = CEPH_OSD_OP_READ;
+ flags = CEPH_OSD_FLAG_READ;
+ snapc = NULL;
+ snapid = rbd_dev->spec->snap_id;
+ payload_len = 0;
+ }
ret = -ENOMEM;
ops = rbd_create_rw_ops(1, opcode, payload_len);
@@ -1202,41 +1317,6 @@ done:
}
/*
- * Request async osd write
- */
-static int rbd_req_write(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
- * Request async osd read
- */
-static int rbd_req_read(struct request *rq,
- struct rbd_device *rbd_dev,
- u64 snapid,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, NULL,
- snapid,
- CEPH_OSD_OP_READ,
- CEPH_OSD_FLAG_READ,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_read(struct rbd_device *rbd_dev,
@@ -1304,7 +1384,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
rbd_dev->header_name, (unsigned long long) notify_id,
(unsigned int) opcode);
- rc = rbd_refresh_header(rbd_dev, &hver);
+ rc = rbd_dev_refresh(rbd_dev, &hver);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
" update snaps: %d\n", rbd_dev->major, rc);
@@ -1460,18 +1540,16 @@ static void rbd_rq_fn(struct request_queue *q)
{
struct rbd_device *rbd_dev = q->queuedata;
struct request *rq;
- struct bio_pair *bp = NULL;
while ((rq = blk_fetch_request(q))) {
struct bio *bio;
- struct bio *rq_bio, *next_bio = NULL;
bool do_write;
unsigned int size;
- u64 op_size = 0;
u64 ofs;
int num_segs, cur_seg = 0;
struct rbd_req_coll *coll;
struct ceph_snap_context *snapc;
+ unsigned int bio_offset;
dout("fetched request\n");
@@ -1483,10 +1561,6 @@ static void rbd_rq_fn(struct request_queue *q)
/* deduce our operation (read, write) */
do_write = (rq_data_dir(rq) == WRITE);
-
- size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * SECTOR_SIZE;
- rq_bio = rq->bio;
if (do_write && rbd_dev->mapping.read_only) {
__blk_end_request_all(rq, -EROFS);
continue;
@@ -1496,8 +1570,8 @@ static void rbd_rq_fn(struct request_queue *q)
down_read(&rbd_dev->header_rwsem);
- if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
- !rbd_dev->mapping.snap_exists) {
+ if (!rbd_dev->exists) {
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
up_read(&rbd_dev->header_rwsem);
dout("request for non-existent snapshot");
spin_lock_irq(q->queue_lock);
@@ -1509,6 +1583,10 @@ static void rbd_rq_fn(struct request_queue *q)
up_read(&rbd_dev->header_rwsem);
+ size = blk_rq_bytes(rq);
+ ofs = blk_rq_pos(rq) * SECTOR_SIZE;
+ bio = rq->bio;
+
dout("%s 0x%x bytes at 0x%llx\n",
do_write ? "write" : "read",
size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
@@ -1528,45 +1606,37 @@ static void rbd_rq_fn(struct request_queue *q)
continue;
}
+ bio_offset = 0;
do {
- /* a bio clone to be passed down to OSD req */
+ u64 limit = rbd_segment_length(rbd_dev, ofs, size);
+ unsigned int chain_size;
+ struct bio *bio_chain;
+
+ BUG_ON(limit > (u64) UINT_MAX);
+ chain_size = (unsigned int) limit;
dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- op_size = rbd_segment_length(rbd_dev, ofs, size);
+
kref_get(&coll->kref);
- bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
- op_size, GFP_ATOMIC);
- if (!bio) {
- rbd_coll_end_req_index(rq, coll, cur_seg,
- -ENOMEM, op_size);
- goto next_seg;
- }
+ /* Pass a cloned bio chain via an osd request */
- /* init OSD command: write or read */
- if (do_write)
- rbd_req_write(rq, rbd_dev,
- snapc,
- ofs,
- op_size, bio,
- coll, cur_seg);
+ bio_chain = bio_chain_clone_range(&bio,
+ &bio_offset, chain_size,
+ GFP_ATOMIC);
+ if (bio_chain)
+ (void) rbd_do_op(rq, rbd_dev, snapc,
+ ofs, chain_size,
+ bio_chain, coll, cur_seg);
else
- rbd_req_read(rq, rbd_dev,
- rbd_dev->mapping.snap_id,
- ofs,
- op_size, bio,
- coll, cur_seg);
-
-next_seg:
- size -= op_size;
- ofs += op_size;
+ rbd_coll_end_req_index(rq, coll, cur_seg,
+ -ENOMEM, chain_size);
+ size -= chain_size;
+ ofs += chain_size;
cur_seg++;
- rq_bio = next_bio;
} while (size > 0);
kref_put(&coll->kref, rbd_coll_release);
- if (bp)
- bio_pair_release(bp);
spin_lock_irq(q->queue_lock);
ceph_put_snap_context(snapc);
@@ -1576,28 +1646,47 @@ next_seg:
/*
* a queue callback. Makes sure that we don't create a bio that spans across
* multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone
+ * which we handle later at bio_chain_clone_range()
*/
static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct rbd_device *rbd_dev = q->queuedata;
- unsigned int chunk_sectors;
- sector_t sector;
- unsigned int bio_sectors;
- int max;
+ sector_t sector_offset;
+ sector_t sectors_per_obj;
+ sector_t obj_sector_offset;
+ int ret;
- chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
- sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
- bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
+ /*
+ * Find how far into its rbd object the partition-relative
+ * bio start sector is to offset relative to the enclosing
+ * device.
+ */
+ sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
+ sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
+ obj_sector_offset = sector_offset & (sectors_per_obj - 1);
+
+ /*
+ * Compute the number of bytes from that offset to the end
+ * of the object. Account for what's already used by the bio.
+ */
+ ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
+ if (ret > bmd->bi_size)
+ ret -= bmd->bi_size;
+ else
+ ret = 0;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << SECTOR_SHIFT;
- if (max < 0)
- max = 0; /* bio_add cannot handle a negative return */
- if (max <= bvec->bv_len && bio_sectors == 0)
- return bvec->bv_len;
- return max;
+ /*
+ * Don't send back more than was asked for. And if the bio
+ * was empty, let the whole thing through because: "Note
+ * that a block device *must* allow a single page to be
+ * added to an empty bio."
+ */
+ rbd_assert(bvec->bv_len <= PAGE_SIZE);
+ if (ret > (int) bvec->bv_len || !bmd->bi_size)
+ ret = (int) bvec->bv_len;
+
+ return ret;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -1663,13 +1752,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
ret = -ENXIO;
pr_warning("short header read for image %s"
" (want %zd got %d)\n",
- rbd_dev->image_name, size, ret);
+ rbd_dev->spec->image_name, size, ret);
goto out_err;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
pr_warning("invalid header for image %s\n",
- rbd_dev->image_name);
+ rbd_dev->spec->image_name);
goto out_err;
}
@@ -1707,19 +1796,32 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
return ret;
}
-static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
+static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
{
struct rbd_snap *snap;
struct rbd_snap *next;
list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
- __rbd_remove_snap_dev(snap);
+ rbd_remove_snap_dev(snap);
+}
+
+static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
+{
+ sector_t size;
+
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ return;
+
+ size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long) size);
+ rbd_dev->mapping.size = (u64) size;
+ set_capacity(rbd_dev->disk, size);
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
struct rbd_image_header h;
@@ -1730,17 +1832,9 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
down_write(&rbd_dev->header_rwsem);
- /* resized? */
- if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
- sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
-
- if (size != (sector_t) rbd_dev->mapping.size) {
- dout("setting size to %llu sectors",
- (unsigned long long) size);
- rbd_dev->mapping.size = (u64) size;
- set_capacity(rbd_dev->disk, size);
- }
- }
+ /* Update image size, and check for resize of mapped image */
+ rbd_dev->header.image_size = h.image_size;
+ rbd_update_mapping_size(rbd_dev);
/* rbd_dev->header.object_prefix shouldn't change */
kfree(rbd_dev->header.snap_sizes);
@@ -1768,12 +1862,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
return ret;
}
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- ret = __rbd_refresh_header(rbd_dev, hver);
+ if (rbd_dev->image_format == 1)
+ ret = rbd_dev_v1_refresh(rbd_dev, hver);
+ else
+ ret = rbd_dev_v2_refresh(rbd_dev, hver);
mutex_unlock(&ctl_mutex);
return ret;
@@ -1885,7 +1983,7 @@ static ssize_t rbd_pool_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->pool_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
}
static ssize_t rbd_pool_id_show(struct device *dev,
@@ -1893,7 +1991,8 @@ static ssize_t rbd_pool_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%d\n", rbd_dev->pool_id);
+ return sprintf(buf, "%llu\n",
+ (unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
@@ -1901,7 +2000,10 @@ static ssize_t rbd_name_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_name);
+ if (rbd_dev->spec->image_name)
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
+
+ return sprintf(buf, "(unknown)\n");
}
static ssize_t rbd_image_id_show(struct device *dev,
@@ -1909,7 +2011,7 @@ static ssize_t rbd_image_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_id);
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
}
/*
@@ -1922,7 +2024,50 @@ static ssize_t rbd_snap_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
+}
+
+/*
+ * For an rbd v2 image, shows the pool id, image id, and snapshot id
+ * for the parent image. If there is no parent, simply shows
+ * "(no parent image)".
+ */
+static ssize_t rbd_parent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ struct rbd_spec *spec = rbd_dev->parent_spec;
+ int count;
+ char *bufp = buf;
+
+ if (!spec)
+ return sprintf(buf, "(no parent image)\n");
+
+ count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
+ (unsigned long long) spec->pool_id, spec->pool_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
+ spec->image_name ? spec->image_name : "(unknown)");
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
+ (unsigned long long) spec->snap_id, spec->snap_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ return (ssize_t) (bufp - buf);
}
static ssize_t rbd_image_refresh(struct device *dev,
@@ -1933,7 +2078,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
- ret = rbd_refresh_header(rbd_dev, NULL);
+ ret = rbd_dev_refresh(rbd_dev, NULL);
return ret < 0 ? ret : size;
}
@@ -1948,6 +2093,7 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1959,6 +2105,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
+ &dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
};
@@ -2047,6 +2194,74 @@ static struct device_type rbd_snap_device_type = {
.release = rbd_snap_dev_release,
};
+static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
+{
+ kref_get(&spec->kref);
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref);
+static void rbd_spec_put(struct rbd_spec *spec)
+{
+ if (spec)
+ kref_put(&spec->kref, rbd_spec_free);
+}
+
+static struct rbd_spec *rbd_spec_alloc(void)
+{
+ struct rbd_spec *spec;
+
+ spec = kzalloc(sizeof (*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+ kref_init(&spec->kref);
+
+ rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref)
+{
+ struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
+
+ kfree(spec->pool_name);
+ kfree(spec->image_id);
+ kfree(spec->image_name);
+ kfree(spec->snap_name);
+ kfree(spec);
+}
+
+struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ struct rbd_spec *spec)
+{
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
+ if (!rbd_dev)
+ return NULL;
+
+ spin_lock_init(&rbd_dev->lock);
+ INIT_LIST_HEAD(&rbd_dev->node);
+ INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header_rwsem);
+
+ rbd_dev->spec = spec;
+ rbd_dev->rbd_client = rbdc;
+
+ return rbd_dev;
+}
+
+static void rbd_dev_destroy(struct rbd_device *rbd_dev)
+{
+ rbd_spec_put(rbd_dev->parent_spec);
+ kfree(rbd_dev->header_name);
+ rbd_put_client(rbd_dev->rbd_client);
+ rbd_spec_put(rbd_dev->spec);
+ kfree(rbd_dev);
+}
+
static bool rbd_snap_registered(struct rbd_snap *snap)
{
bool ret = snap->dev.type == &rbd_snap_device_type;
@@ -2057,7 +2272,7 @@ static bool rbd_snap_registered(struct rbd_snap *snap)
return ret;
}
-static void __rbd_remove_snap_dev(struct rbd_snap *snap)
+static void rbd_remove_snap_dev(struct rbd_snap *snap)
{
list_del(&snap->node);
if (device_is_registered(&snap->dev))
@@ -2073,7 +2288,7 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
dev->type = &rbd_snap_device_type;
dev->parent = parent;
dev->release = rbd_snap_dev_release;
- dev_set_name(dev, "snap_%s", snap->name);
+ dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
dout("%s: registering device for snapshot %s\n", __func__, snap->name);
ret = device_register(dev);
@@ -2189,6 +2404,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2216,6 +2432,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} features_buf = { 0 };
+ u64 incompat;
int ret;
ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
@@ -2226,6 +2443,11 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+
+ incompat = le64_to_cpu(features_buf.incompat);
+ if (incompat & ~RBD_FEATURES_ALL)
+ return -ENXIO;
+
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
@@ -2242,6 +2464,183 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
&rbd_dev->header.features);
}
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *parent_spec;
+ size_t size;
+ void *reply_buf = NULL;
+ __le64 snapid;
+ void *p;
+ void *end;
+ char *image_id;
+ u64 overlap;
+ size_t len = 0;
+ int ret;
+
+ parent_spec = rbd_spec_alloc();
+ if (!parent_spec)
+ return -ENOMEM;
+
+ size = sizeof (__le64) + /* pool_id */
+ sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
+ sizeof (__le64) + /* snap_id */
+ sizeof (__le64); /* overlap */
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ snapid = cpu_to_le64(CEPH_NOSNAP);
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_parent",
+ (char *) &snapid, sizeof (snapid),
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out_err;
+
+ ret = -ERANGE;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
+ if (parent_spec->pool_id == CEPH_NOPOOL)
+ goto out; /* No parent? No problem. */
+
+ image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_id)) {
+ ret = PTR_ERR(image_id);
+ goto out_err;
+ }
+ parent_spec->image_id = image_id;
+ parent_spec->image_id_len = len;
+ ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
+ ceph_decode_64_safe(&p, end, overlap, out_err);
+
+ rbd_dev->parent_overlap = overlap;
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
+out:
+ ret = 0;
+out_err:
+ kfree(reply_buf);
+ rbd_spec_put(parent_spec);
+
+ return ret;
+}
+
+static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
+{
+ size_t image_id_size;
+ char *image_id;
+ void *p;
+ void *end;
+ size_t size;
+ void *reply_buf = NULL;
+ size_t len = 0;
+ char *image_name = NULL;
+ int ret;
+
+ rbd_assert(!rbd_dev->spec->image_name);
+
+ image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+ image_id = kmalloc(image_id_size, GFP_KERNEL);
+ if (!image_id)
+ return NULL;
+
+ p = image_id;
+ end = (char *) image_id + image_id_size;
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id,
+ (u32) rbd_dev->spec->image_id_len);
+
+ size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf)
+ goto out;
+
+ ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+ "rbd", "dir_get_name",
+ image_id, image_id_size,
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ if (ret < 0)
+ goto out;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_name))
+ image_name = NULL;
+ else
+ dout("%s: name is %s len is %zd\n", __func__, image_name, len);
+out:
+ kfree(reply_buf);
+ kfree(image_id);
+
+ return image_name;
+}
+
+/*
+ * When a parent image gets probed, we only have the pool, image,
+ * and snapshot ids but not the names of any of them. This call
+ * is made later to fill in those names. It has to be done after
+ * rbd_dev_snaps_update() has completed because some of the
+ * information (in particular, snapshot name) is not available
+ * until then.
+ */
+static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc;
+ const char *name;
+ void *reply_buf = NULL;
+ int ret;
+
+ if (rbd_dev->spec->pool_name)
+ return 0; /* Already have the names */
+
+ /* Look up the pool name */
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
+ if (!name)
+ return -EIO; /* pool id too large (>= 2^31) */
+
+ rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
+ if (!rbd_dev->spec->pool_name)
+ return -ENOMEM;
+
+ /* Fetch the image name; tolerate failure here */
+
+ name = rbd_dev_image_name(rbd_dev);
+ if (name) {
+ rbd_dev->spec->image_name_len = strlen(name);
+ rbd_dev->spec->image_name = (char *) name;
+ } else {
+ pr_warning(RBD_DRV_NAME "%d "
+ "unable to get image name for image id %s\n",
+ rbd_dev->major, rbd_dev->spec->image_id);
+ }
+
+ /* Look up the snapshot name. */
+
+ name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
+ if (!name) {
+ ret = -EIO;
+ goto out_err;
+ }
+ rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
+ if(!rbd_dev->spec->snap_name)
+ goto out_err;
+
+ return 0;
+out_err:
+ kfree(reply_buf);
+ kfree(rbd_dev->spec->pool_name);
+ rbd_dev->spec->pool_name = NULL;
+
+ return ret;
+}
+
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
{
size_t size;
@@ -2328,7 +2727,6 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
int ret;
void *p;
void *end;
- size_t snap_name_len;
char *snap_name;
size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
@@ -2348,9 +2746,7 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
p = reply_buf;
end = (char *) reply_buf + size;
- snap_name_len = 0;
- snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
- GFP_KERNEL);
+ snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out;
@@ -2397,6 +2793,41 @@ static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
return ERR_PTR(-EINVAL);
}
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
+{
+ int ret;
+ __u8 obj_order;
+
+ down_write(&rbd_dev->header_rwsem);
+
+ /* Grab old order first, to see if it changes */
+
+ obj_order = rbd_dev->header.obj_order,
+ ret = rbd_dev_v2_image_size(rbd_dev);
+ if (ret)
+ goto out;
+ if (rbd_dev->header.obj_order != obj_order) {
+ ret = -EIO;
+ goto out;
+ }
+ rbd_update_mapping_size(rbd_dev);
+
+ ret = rbd_dev_v2_snap_context(rbd_dev, hver);
+ dout("rbd_dev_v2_snap_context returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_update(rbd_dev);
+ dout("rbd_dev_snaps_update returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_register(rbd_dev);
+ dout("rbd_dev_snaps_register returned %d\n", ret);
+out:
+ up_write(&rbd_dev->header_rwsem);
+
+ return ret;
+}
+
/*
* Scan the rbd device's current snapshot list and compare it to the
* newly-received snapshot context. Remove any existing snapshots
@@ -2436,12 +2867,12 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
/* Existing snapshot not in the new snap context */
- if (rbd_dev->mapping.snap_id == snap->id)
- rbd_dev->mapping.snap_exists = false;
- __rbd_remove_snap_dev(snap);
+ if (rbd_dev->spec->snap_id == snap->id)
+ rbd_dev->exists = false;
+ rbd_remove_snap_dev(snap);
dout("%ssnap id %llu has been removed\n",
- rbd_dev->mapping.snap_id == snap->id ?
- "mapped " : "",
+ rbd_dev->spec->snap_id == snap->id ?
+ "mapped " : "",
(unsigned long long) snap->id);
/* Done with this list entry; advance */
@@ -2559,7 +2990,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
do {
ret = rbd_req_sync_watch(rbd_dev);
if (ret == -ERANGE) {
- rc = rbd_refresh_header(rbd_dev, NULL);
+ rc = rbd_dev_refresh(rbd_dev, NULL);
if (rc < 0)
return rc;
}
@@ -2621,8 +3052,8 @@ static void rbd_dev_id_put(struct rbd_device *rbd_dev)
struct rbd_device *rbd_dev;
rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_id > max_id)
- max_id = rbd_id;
+ if (rbd_dev->dev_id > max_id)
+ max_id = rbd_dev->dev_id;
}
spin_unlock(&rbd_dev_list_lock);
@@ -2722,73 +3153,140 @@ static inline char *dup_token(const char **buf, size_t *lenp)
}
/*
- * This fills in the pool_name, image_name, image_name_len, rbd_dev,
- * rbd_md_name, and name fields of the given rbd_dev, based on the
- * list of monitor addresses and other options provided via
- * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
- * copy of the snapshot name to map if successful, or a
- * pointer-coded error otherwise.
+ * Parse the options provided for an "rbd add" (i.e., rbd image
+ * mapping) request. These arrive via a write to /sys/bus/rbd/add,
+ * and the data written is passed here via a NUL-terminated buffer.
+ * Returns 0 if successful or an error code otherwise.
+ *
+ * The information extracted from these options is recorded in
+ * the other parameters which return dynamically-allocated
+ * structures:
+ * ceph_opts
+ * The address of a pointer that will refer to a ceph options
+ * structure. Caller must release the returned pointer using
+ * ceph_destroy_options() when it is no longer needed.
+ * rbd_opts
+ * Address of an rbd options pointer. Fully initialized by
+ * this function; caller must release with kfree().
+ * spec
+ * Address of an rbd image specification pointer. Fully
+ * initialized by this function based on parsed options.
+ * Caller must release with rbd_spec_put().
*
- * Note: rbd_dev is assumed to have been initially zero-filled.
+ * The options passed take this form:
+ * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
+ * where:
+ * <mon_addrs>
+ * A comma-separated list of one or more monitor addresses.
+ * A monitor address is an ip address, optionally followed
+ * by a port number (separated by a colon).
+ * I.e.: ip1[:port1][,ip2[:port2]...]
+ * <options>
+ * A comma-separated list of ceph and/or rbd options.
+ * <pool_name>
+ * The name of the rados pool containing the rbd image.
+ * <image_name>
+ * The name of the image in that pool to map.
+ * <snap_id>
+ * An optional snapshot id. If provided, the mapping will
+ * present data from the image at the time that snapshot was
+ * created. The image head is used if no snapshot id is
+ * provided. Snapshot mappings are always read-only.
*/
-static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
- const char *buf,
- const char **mon_addrs,
- size_t *mon_addrs_size,
- char *options,
- size_t options_size)
+static int rbd_add_parse_args(const char *buf,
+ struct ceph_options **ceph_opts,
+ struct rbd_options **opts,
+ struct rbd_spec **rbd_spec)
{
size_t len;
- char *err_ptr = ERR_PTR(-EINVAL);
- char *snap_name;
+ char *options;
+ const char *mon_addrs;
+ size_t mon_addrs_size;
+ struct rbd_spec *spec = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct ceph_options *copts;
+ int ret;
/* The first four tokens are required */
len = next_token(&buf);
if (!len)
- return err_ptr;
- *mon_addrs_size = len + 1;
- *mon_addrs = buf;
-
+ return -EINVAL; /* Missing monitor address(es) */
+ mon_addrs = buf;
+ mon_addrs_size = len + 1;
buf += len;
- len = copy_token(&buf, options, options_size);
- if (!len || len >= options_size)
- return err_ptr;
+ ret = -EINVAL;
+ options = dup_token(&buf, NULL);
+ if (!options)
+ return -ENOMEM;
+ if (!*options)
+ goto out_err; /* Missing options */
- err_ptr = ERR_PTR(-ENOMEM);
- rbd_dev->pool_name = dup_token(&buf, NULL);
- if (!rbd_dev->pool_name)
- goto out_err;
+ spec = rbd_spec_alloc();
+ if (!spec)
+ goto out_mem;
- rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
- if (!rbd_dev->image_name)
- goto out_err;
+ spec->pool_name = dup_token(&buf, NULL);
+ if (!spec->pool_name)
+ goto out_mem;
+ if (!*spec->pool_name)
+ goto out_err; /* Missing pool name */
- /* Snapshot name is optional */
+ spec->image_name = dup_token(&buf, &spec->image_name_len);
+ if (!spec->image_name)
+ goto out_mem;
+ if (!*spec->image_name)
+ goto out_err; /* Missing image name */
+
+ /*
+ * Snapshot name is optional; default is to use "-"
+ * (indicating the head/no snapshot).
+ */
len = next_token(&buf);
if (!len) {
buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
- }
- snap_name = kmalloc(len + 1, GFP_KERNEL);
- if (!snap_name)
+ } else if (len > RBD_MAX_SNAP_NAME_LEN) {
+ ret = -ENAMETOOLONG;
goto out_err;
- memcpy(snap_name, buf, len);
- *(snap_name + len) = '\0';
+ }
+ spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+ if (!spec->snap_name)
+ goto out_mem;
+ memcpy(spec->snap_name, buf, len);
+ *(spec->snap_name + len) = '\0';
-dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
+ /* Initialize all rbd options to the defaults */
- return snap_name;
+ rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
+ if (!rbd_opts)
+ goto out_mem;
+
+ rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+
+ copts = ceph_parse_options(options, mon_addrs,
+ mon_addrs + mon_addrs_size - 1,
+ parse_rbd_opts_token, rbd_opts);
+ if (IS_ERR(copts)) {
+ ret = PTR_ERR(copts);
+ goto out_err;
+ }
+ kfree(options);
+ *ceph_opts = copts;
+ *opts = rbd_opts;
+ *rbd_spec = spec;
+
+ return 0;
+out_mem:
+ ret = -ENOMEM;
out_err:
- kfree(rbd_dev->image_name);
- rbd_dev->image_name = NULL;
- rbd_dev->image_name_len = 0;
- kfree(rbd_dev->pool_name);
- rbd_dev->pool_name = NULL;
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+ kfree(options);
- return err_ptr;
+ return ret;
}
/*
@@ -2814,14 +3312,22 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
void *p;
/*
+ * When probing a parent image, the image id is already
+ * known (and the image name likely is not). There's no
+ * need to fetch the image id again in this case.
+ */
+ if (rbd_dev->spec->image_id)
+ return 0;
+
+ /*
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
- size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
+ size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
- sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
+ sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
dout("rbd id object name is %s\n", object_name);
/* Response will be an encoded string, which includes a length */
@@ -2841,17 +3347,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = response;
- rbd_dev->image_id = ceph_extract_encoded_string(&p,
+ rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
p + RBD_IMAGE_ID_LEN_MAX,
- &rbd_dev->image_id_len,
+ &rbd_dev->spec->image_id_len,
GFP_NOIO);
- if (IS_ERR(rbd_dev->image_id)) {
- ret = PTR_ERR(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ if (IS_ERR(rbd_dev->spec->image_id)) {
+ ret = PTR_ERR(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
} else {
- dout("image_id is %s\n", rbd_dev->image_id);
+ dout("image_id is %s\n", rbd_dev->spec->image_id);
}
out:
kfree(response);
@@ -2867,26 +3374,33 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
/* Version 1 images have no id; empty string is used */
- rbd_dev->image_id = kstrdup("", GFP_KERNEL);
- if (!rbd_dev->image_id)
+ rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
+ if (!rbd_dev->spec->image_id)
return -ENOMEM;
- rbd_dev->image_id_len = 0;
+ rbd_dev->spec->image_id_len = 0;
/* Record the header object name for this rbd image. */
- size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
+ size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name) {
ret = -ENOMEM;
goto out_err;
}
- sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
+ sprintf(rbd_dev->header_name, "%s%s",
+ rbd_dev->spec->image_name, RBD_SUFFIX);
/* Populate rbd image metadata */
ret = rbd_read_header(rbd_dev, &rbd_dev->header);
if (ret < 0)
goto out_err;
+
+ /* Version 1 images have no parent (no layering) */
+
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
+
rbd_dev->image_format = 1;
dout("discovered version 1 image, header name is %s\n",
@@ -2897,8 +3411,8 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
out_err:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
- kfree(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
return ret;
}
@@ -2913,12 +3427,12 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
* Image id was filled in by the caller. Record the header
* object name for this rbd image.
*/
- size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
+ size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, rbd_dev->image_id);
+ RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
/* Get the size and object order for the image */
@@ -2932,12 +3446,20 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
if (ret < 0)
goto out_err;
- /* Get the features for the image */
+ /* Get the and check features for the image */
ret = rbd_dev_v2_features(rbd_dev);
if (ret < 0)
goto out_err;
+ /* If the image supports layering, get the parent info */
+
+ if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret < 0)
+ goto out_err;
+ }
+
/* crypto and compression type aren't (yet) supported for v2 images */
rbd_dev->header.crypt_type = 0;
@@ -2955,8 +3477,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
dout("discovered version 2 image, header name is %s\n",
rbd_dev->header_name);
- return -ENOTSUPP;
+ return 0;
out_err:
+ rbd_dev->parent_overlap = 0;
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
kfree(rbd_dev->header.object_prefix);
@@ -2965,91 +3490,22 @@ out_err:
return ret;
}
-/*
- * Probe for the existence of the header object for the given rbd
- * device. For format 2 images this includes determining the image
- * id.
- */
-static int rbd_dev_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
{
int ret;
- /*
- * Get the id from the image id object. If it's not a
- * format 2 image, we'll get ENOENT back, and we'll assume
- * it's a format 1 image.
- */
- ret = rbd_dev_image_id(rbd_dev);
- if (ret)
- ret = rbd_dev_v1_probe(rbd_dev);
- else
- ret = rbd_dev_v2_probe(rbd_dev);
+ /* no need to lock here, as rbd_dev is not registered yet */
+ ret = rbd_dev_snaps_update(rbd_dev);
if (ret)
- dout("probe failed, returning %d\n", ret);
-
- return ret;
-}
-
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
-{
- char *options;
- struct rbd_device *rbd_dev = NULL;
- const char *mon_addrs = NULL;
- size_t mon_addrs_size = 0;
- struct ceph_osd_client *osdc;
- int rc = -ENOMEM;
- char *snap_name;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- options = kmalloc(count, GFP_KERNEL);
- if (!options)
- goto err_out_mem;
- rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
- if (!rbd_dev)
- goto err_out_mem;
-
- /* static rbd_device initialization */
- spin_lock_init(&rbd_dev->lock);
- INIT_LIST_HEAD(&rbd_dev->node);
- INIT_LIST_HEAD(&rbd_dev->snaps);
- init_rwsem(&rbd_dev->header_rwsem);
-
- /* parse add command */
- snap_name = rbd_add_parse_args(rbd_dev, buf,
- &mon_addrs, &mon_addrs_size, options, count);
- if (IS_ERR(snap_name)) {
- rc = PTR_ERR(snap_name);
- goto err_out_mem;
- }
-
- rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
- if (rc < 0)
- goto err_out_args;
-
- /* pick the pool */
- osdc = &rbd_dev->rbd_client->client->osdc;
- rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
- if (rc < 0)
- goto err_out_client;
- rbd_dev->pool_id = rc;
-
- rc = rbd_dev_probe(rbd_dev);
- if (rc < 0)
- goto err_out_client;
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ return ret;
- /* no need to lock here, as rbd_dev is not registered yet */
- rc = rbd_dev_snaps_update(rbd_dev);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_probe_update_spec(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
- rc = rbd_dev_set_mapping(rbd_dev, snap_name);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_set_mapping(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
@@ -3061,34 +3517,33 @@ static ssize_t rbd_add(struct bus_type *bus,
/* Get our block major device number. */
- rc = register_blkdev(0, rbd_dev->name);
- if (rc < 0)
+ ret = register_blkdev(0, rbd_dev->name);
+ if (ret < 0)
goto err_out_id;
- rbd_dev->major = rc;
+ rbd_dev->major = ret;
/* Set up the blkdev mapping. */
- rc = rbd_init_disk(rbd_dev);
- if (rc)
+ ret = rbd_init_disk(rbd_dev);
+ if (ret)
goto err_out_blkdev;
- rc = rbd_bus_add_dev(rbd_dev);
- if (rc)
+ ret = rbd_bus_add_dev(rbd_dev);
+ if (ret)
goto err_out_disk;
/*
* At this point cleanup in the event of an error is the job
* of the sysfs code (initiated by rbd_bus_del_dev()).
*/
-
down_write(&rbd_dev->header_rwsem);
- rc = rbd_dev_snaps_register(rbd_dev);
+ ret = rbd_dev_snaps_register(rbd_dev);
up_write(&rbd_dev->header_rwsem);
- if (rc)
+ if (ret)
goto err_out_bus;
- rc = rbd_init_watch_dev(rbd_dev);
- if (rc)
+ ret = rbd_init_watch_dev(rbd_dev);
+ if (ret)
goto err_out_bus;
/* Everything's ready. Announce the disk to the world. */
@@ -3098,37 +3553,119 @@ static ssize_t rbd_add(struct bus_type *bus,
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
- return count;
-
+ return ret;
err_out_bus:
/* this will also clean up rest of rbd_dev stuff */
rbd_bus_del_dev(rbd_dev);
- kfree(options);
- return rc;
+ return ret;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
-err_out_header:
- rbd_header_free(&rbd_dev->header);
+err_out_snaps:
+ rbd_remove_all_snaps(rbd_dev);
+
+ return ret;
+}
+
+/*
+ * Probe for the existence of the header object for the given rbd
+ * device. For format 2 images this includes determining the image
+ * id.
+ */
+static int rbd_dev_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ /*
+ * Get the id from the image id object. If it's not a
+ * format 2 image, we'll get ENOENT back, and we'll assume
+ * it's a format 1 image.
+ */
+ ret = rbd_dev_image_id(rbd_dev);
+ if (ret)
+ ret = rbd_dev_v1_probe(rbd_dev);
+ else
+ ret = rbd_dev_v2_probe(rbd_dev);
+ if (ret) {
+ dout("probe failed, returning %d\n", ret);
+
+ return ret;
+ }
+
+ ret = rbd_dev_probe_finish(rbd_dev);
+ if (ret)
+ rbd_header_free(&rbd_dev->header);
+
+ return ret;
+}
+
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ struct ceph_options *ceph_opts = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct rbd_spec *spec = NULL;
+ struct rbd_client *rbdc;
+ struct ceph_osd_client *osdc;
+ int rc = -ENOMEM;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ /* parse add command */
+ rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
+ if (rc < 0)
+ goto err_out_module;
+
+ rbdc = rbd_get_client(ceph_opts);
+ if (IS_ERR(rbdc)) {
+ rc = PTR_ERR(rbdc);
+ goto err_out_args;
+ }
+ ceph_opts = NULL; /* rbd_dev client now owns this */
+
+ /* pick the pool */
+ osdc = &rbdc->client->osdc;
+ rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
+ if (rc < 0)
+ goto err_out_client;
+ spec->pool_id = (u64) rc;
+
+ rbd_dev = rbd_dev_create(rbdc, spec);
+ if (!rbd_dev)
+ goto err_out_client;
+ rbdc = NULL; /* rbd_dev now owns this */
+ spec = NULL; /* rbd_dev now owns this */
+
+ rbd_dev->mapping.read_only = rbd_opts->read_only;
+ kfree(rbd_opts);
+ rbd_opts = NULL; /* done with this */
+
+ rc = rbd_dev_probe(rbd_dev);
+ if (rc < 0)
+ goto err_out_rbd_dev;
+
+ return count;
+err_out_rbd_dev:
+ rbd_dev_destroy(rbd_dev);
err_out_client:
- kfree(rbd_dev->header_name);
- rbd_put_client(rbd_dev);
- kfree(rbd_dev->image_id);
+ rbd_put_client(rbdc);
err_out_args:
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_name);
- kfree(rbd_dev->pool_name);
-err_out_mem:
- kfree(rbd_dev);
- kfree(options);
+ if (ceph_opts)
+ ceph_destroy_options(ceph_opts);
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+err_out_module:
+ module_put(THIS_MODULE);
dout("Error adding device %s\n", buf);
- module_put(THIS_MODULE);
return (ssize_t) rc;
}
@@ -3163,7 +3700,6 @@ static void rbd_dev_release(struct device *dev)
if (rbd_dev->watch_event)
rbd_req_sync_unwatch(rbd_dev);
- rbd_put_client(rbd_dev);
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
@@ -3173,13 +3709,9 @@ static void rbd_dev_release(struct device *dev)
rbd_header_free(&rbd_dev->header);
/* done with the id, and with the rbd_dev */
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_id);
- kfree(rbd_dev->header_name);
- kfree(rbd_dev->pool_name);
- kfree(rbd_dev->image_name);
rbd_dev_id_put(rbd_dev);
- kfree(rbd_dev);
+ rbd_assert(rbd_dev->rbd_client != NULL);
+ rbd_dev_destroy(rbd_dev);
/* release module ref */
module_put(THIS_MODULE);
@@ -3211,7 +3743,12 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- __rbd_remove_all_snaps(rbd_dev);
+ if (rbd_dev->open_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
done:
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index cbe77fa105ba..49d77cbcf8bd 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -46,8 +46,6 @@
#define RBD_MIN_OBJ_ORDER 16
#define RBD_MAX_OBJ_ORDER 30
-#define RBD_MAX_SEG_NAME_LEN 128
-
#define RBD_COMP_NONE 0
#define RBD_CRYPT_NONE 0
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 9dcf76a10bb6..5814deb6963d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -25,7 +25,7 @@
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "June 25, 2007"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
int op_len, err;
void *req_buf;
- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
+ if (!(((u64)1 << (u64)op) & port->operations))
return -EOPNOTSUPP;
switch (op) {
@@ -592,7 +592,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
return err;
}
-static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
+static int vdc_alloc_tx_ring(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
unsigned long len, entry_size;
@@ -725,7 +725,7 @@ static struct vio_driver_ops vdc_vio_ops = {
.handshake_complete = vdc_handshake_complete,
};
-static void __devinit print_version(void)
+static void print_version(void)
{
static int version_printed;
@@ -733,8 +733,7 @@ static void __devinit print_version(void)
printk(KERN_INFO "%s", version);
}
-static int __devinit vdc_port_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
+static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vdc_port *port;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 6d5a914b9619..8766a2257091 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -788,8 +788,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
return get_disk(swd->unit[drive].disk);
}
-static int __devinit swim_add_floppy(struct swim_priv *swd,
- enum drive_location location)
+static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
{
struct floppy_state *fs = &swd->unit[swd->floppy_count];
struct swim __iomem *base = swd->base;
@@ -812,7 +811,7 @@ static int __devinit swim_add_floppy(struct swim_priv *swd,
return 0;
}
-static int __devinit swim_floppy_init(struct swim_priv *swd)
+static int swim_floppy_init(struct swim_priv *swd)
{
int err;
int drive;
@@ -845,6 +844,7 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].swd = swd;
}
+ spin_lock_init(&swd->lock);
swd->queue = blk_init_queue(do_fd_request, &swd->lock);
if (!swd->queue) {
err = -ENOMEM;
@@ -875,7 +875,7 @@ exit_put_disks:
return err;
}
-static int __devinit swim_probe(struct platform_device *dev)
+static int swim_probe(struct platform_device *dev)
{
struct resource *res;
struct swim __iomem *swim_base;
@@ -936,7 +936,7 @@ out:
return ret;
}
-static int __devexit swim_remove(struct platform_device *dev)
+static int swim_remove(struct platform_device *dev)
{
struct swim_priv *swd = platform_get_drvdata(dev);
int drive;
@@ -972,7 +972,7 @@ static int __devexit swim_remove(struct platform_device *dev)
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove = __devexit_p(swim_remove),
+ .remove = swim_remove,
.driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 89ddab127e33..57763c54363a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1194,7 +1194,8 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
return rc;
}
-static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int swim3_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct gendisk *disk;
int index, rc;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index eb0d8216f557..ad70868f8a96 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -789,8 +789,7 @@ static const struct block_device_operations mm_fops = {
.revalidate_disk = mm_revalidate,
};
-static int __devinit mm_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret = -ENODEV;
struct cardinfo *card = &cards[num_cards];
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 0bdde8fba397..8ad21a25bc0d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -696,7 +696,7 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
-static int __devinit virtblk_probe(struct virtio_device *vdev)
+static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
@@ -885,10 +885,11 @@ out:
return err;
}
-static void __devexit virtblk_remove(struct virtio_device *vdev)
+static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
+ int refc;
/* Prevent config work handler from accessing the device. */
mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
+ refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
kfree(vblk);
- ida_simple_remove(&vd_index_ida, index);
+
+ /* Only free device id if we don't have any users */
+ if (refc == 1)
+ ida_simple_remove(&vd_index_ida, index);
}
#ifdef CONFIG_PM
@@ -961,19 +966,14 @@ static unsigned int features[] = {
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
};
-/*
- * virtio_blk causes spurious section mismatch warning by
- * simultaneously referring to a __devinit and a __devexit function.
- * Use __refdata to avoid this warning.
- */
-static struct virtio_driver __refdata virtio_blk = {
+static struct virtio_driver virtio_blk = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .remove = virtblk_remove,
.config_changed = virtblk_config_changed,
#ifdef CONFIG_PM
.freeze = virtblk_freeze,
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 280a13846e6c..5ac841ff6cc7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,7 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/bitmap.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -79,6 +80,7 @@ struct pending_req {
unsigned short operation;
int status;
struct list_head free_list;
+ DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
};
#define BLKBACK_INVALID_HANDLE (~0)
@@ -99,6 +101,36 @@ struct xen_blkbk {
static struct xen_blkbk *blkbk;
/*
+ * Maximum number of grant pages that can be mapped in blkback.
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
+ * pages that blkback will persistently map.
+ * Currently, this is:
+ * RING_SIZE = 32 (for all known ring types)
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
+ * sizeof(struct persistent_gnt) = 48
+ * So the maximum memory used to store the grants is:
+ * 32 * 11 * 48 = 16896 bytes
+ */
+static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol)
+{
+ switch (protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ return __CONST_RING_SIZE(blkif, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_32:
+ return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_64:
+ return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+
+/*
* Little helpful macro to figure out the index and virtual address of the
* pending_pages[..]. For each 'pending_req' we have have up to
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
@@ -129,6 +161,94 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
+#define foreach_grant_safe(pos, n, rbtree, node) \
+ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+ (n) = rb_next(&(pos)->node); \
+ &(pos)->node != NULL; \
+ (pos) = container_of(n, typeof(*(pos)), node), \
+ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
+
+
+static void add_persistent_gnt(struct rb_root *root,
+ struct persistent_gnt *persistent_gnt)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct persistent_gnt *this;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ this = container_of(*new, struct persistent_gnt, node);
+
+ parent = *new;
+ if (persistent_gnt->gnt < this->gnt)
+ new = &((*new)->rb_left);
+ else if (persistent_gnt->gnt > this->gnt)
+ new = &((*new)->rb_right);
+ else {
+ pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n");
+ BUG();
+ }
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&(persistent_gnt->node), parent, new);
+ rb_insert_color(&(persistent_gnt->node), root);
+}
+
+static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
+ grant_ref_t gref)
+{
+ struct persistent_gnt *data;
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ data = container_of(node, struct persistent_gnt, node);
+
+ if (gref < data->gnt)
+ node = node->rb_left;
+ else if (gref > data->gnt)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void free_persistent_gnts(struct rb_root *root, unsigned int num)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt;
+ struct rb_node *n;
+ int ret = 0;
+ int segs_to_unmap = 0;
+
+ foreach_grant_safe(persistent_gnt, n, root, node) {
+ BUG_ON(persistent_gnt->handle ==
+ BLKBACK_INVALID_HANDLE);
+ gnttab_set_unmap_op(&unmap[segs_to_unmap],
+ (unsigned long) pfn_to_kaddr(page_to_pfn(
+ persistent_gnt->page)),
+ GNTMAP_host_map,
+ persistent_gnt->handle);
+
+ pages[segs_to_unmap] = persistent_gnt->page;
+
+ if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
+ !rb_next(&persistent_gnt->node)) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
+ BUG_ON(ret);
+ segs_to_unmap = 0;
+ }
+
+ rb_erase(&persistent_gnt->node, root);
+ kfree(persistent_gnt);
+ num--;
+ }
+ BUG_ON(num != 0);
+}
+
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
@@ -302,6 +422,14 @@ int xen_blkif_schedule(void *arg)
print_stats(blkif);
}
+ /* Free all persistent grant pages */
+ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
+ free_persistent_gnts(&blkif->persistent_gnts,
+ blkif->persistent_gnt_c);
+
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+ blkif->persistent_gnt_c = 0;
+
if (log_stats)
print_stats(blkif);
@@ -328,6 +456,8 @@ static void xen_blkbk_unmap(struct pending_req *req)
int ret;
for (i = 0; i < req->nr_pages; i++) {
+ if (!test_bit(i, req->unmap_seg))
+ continue;
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
@@ -344,12 +474,26 @@ static void xen_blkbk_unmap(struct pending_req *req)
static int xen_blkbk_map(struct blkif_request *req,
struct pending_req *pending_req,
- struct seg_buf seg[])
+ struct seg_buf seg[],
+ struct page *pages[])
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- int i;
+ struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt = NULL;
+ struct xen_blkif *blkif = pending_req->blkif;
+ phys_addr_t addr = 0;
+ int i, j;
+ bool new_map;
int nseg = req->u.rw.nr_segments;
+ int segs_to_map = 0;
int ret = 0;
+ int use_persistent_gnts;
+
+ use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
+
+ BUG_ON(blkif->persistent_gnt_c >
+ max_mapped_grant_pages(pending_req->blkif->blk_protocol));
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
@@ -359,36 +503,146 @@ static int xen_blkbk_map(struct blkif_request *req,
for (i = 0; i < nseg; i++) {
uint32_t flags;
- flags = GNTMAP_host_map;
- if (pending_req->operation != BLKIF_OP_READ)
- flags |= GNTMAP_readonly;
- gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
- req->u.rw.seg[i].gref,
- pending_req->blkif->domid);
+ if (use_persistent_gnts)
+ persistent_gnt = get_persistent_gnt(
+ &blkif->persistent_gnts,
+ req->u.rw.seg[i].gref);
+
+ if (persistent_gnt) {
+ /*
+ * We are using persistent grants and
+ * the grant is already mapped
+ */
+ new_map = false;
+ } else if (use_persistent_gnts &&
+ blkif->persistent_gnt_c <
+ max_mapped_grant_pages(blkif->blk_protocol)) {
+ /*
+ * We are using persistent grants, the grant is
+ * not mapped but we have room for it
+ */
+ new_map = true;
+ persistent_gnt = kmalloc(
+ sizeof(struct persistent_gnt),
+ GFP_KERNEL);
+ if (!persistent_gnt)
+ return -ENOMEM;
+ persistent_gnt->page = alloc_page(GFP_KERNEL);
+ if (!persistent_gnt->page) {
+ kfree(persistent_gnt);
+ return -ENOMEM;
+ }
+ persistent_gnt->gnt = req->u.rw.seg[i].gref;
+ persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
+
+ pages_to_gnt[segs_to_map] =
+ persistent_gnt->page;
+ addr = (unsigned long) pfn_to_kaddr(
+ page_to_pfn(persistent_gnt->page));
+
+ add_persistent_gnt(&blkif->persistent_gnts,
+ persistent_gnt);
+ blkif->persistent_gnt_c++;
+ pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ persistent_gnt->gnt, blkif->persistent_gnt_c,
+ max_mapped_grant_pages(blkif->blk_protocol));
+ } else {
+ /*
+ * We are either using persistent grants and
+ * hit the maximum limit of grants mapped,
+ * or we are not using persistent grants.
+ */
+ if (use_persistent_gnts &&
+ !blkif->vbd.overflow_max_grants) {
+ blkif->vbd.overflow_max_grants = 1;
+ pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ blkif->domid, blkif->vbd.handle);
+ }
+ new_map = true;
+ pages[i] = blkbk->pending_page(pending_req, i);
+ addr = vaddr(pending_req, i);
+ pages_to_gnt[segs_to_map] =
+ blkbk->pending_page(pending_req, i);
+ }
+
+ if (persistent_gnt) {
+ pages[i] = persistent_gnt->page;
+ persistent_gnts[i] = persistent_gnt;
+ } else {
+ persistent_gnts[i] = NULL;
+ }
+
+ if (new_map) {
+ flags = GNTMAP_host_map;
+ if (!persistent_gnt &&
+ (pending_req->operation != BLKIF_OP_READ))
+ flags |= GNTMAP_readonly;
+ gnttab_set_map_op(&map[segs_to_map++], addr,
+ flags, req->u.rw.seg[i].gref,
+ blkif->domid);
+ }
}
- ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
- BUG_ON(ret);
+ if (segs_to_map) {
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
+ BUG_ON(ret);
+ }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain.
*/
- for (i = 0; i < nseg; i++) {
- if (unlikely(map[i].status != 0)) {
- pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
- map[i].handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0, j = 0; i < nseg; i++) {
+ if (!persistent_gnts[i] ||
+ persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
+ /* This is a newly mapped grant */
+ BUG_ON(j >= segs_to_map);
+ if (unlikely(map[j].status != 0)) {
+ pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
+ map[j].handle = BLKBACK_INVALID_HANDLE;
+ ret |= 1;
+ if (persistent_gnts[i]) {
+ rb_erase(&persistent_gnts[i]->node,
+ &blkif->persistent_gnts);
+ blkif->persistent_gnt_c--;
+ kfree(persistent_gnts[i]);
+ persistent_gnts[i] = NULL;
+ }
+ }
+ }
+ if (persistent_gnts[i]) {
+ if (persistent_gnts[i]->handle ==
+ BLKBACK_INVALID_HANDLE) {
+ /*
+ * If this is a new persistent grant
+ * save the handler
+ */
+ persistent_gnts[i]->handle = map[j].handle;
+ persistent_gnts[i]->dev_bus_addr =
+ map[j++].dev_bus_addr;
+ }
+ pending_handle(pending_req, i) =
+ persistent_gnts[i]->handle;
+
+ if (ret)
+ continue;
+
+ seg[i].buf = persistent_gnts[i]->dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
+ } else {
+ pending_handle(pending_req, i) = map[j].handle;
+ bitmap_set(pending_req->unmap_seg, i, 1);
+
+ if (ret) {
+ j++;
+ continue;
+ }
+
+ seg[i].buf = map[j++].dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
}
-
- pending_handle(pending_req, i) = map[i].handle;
-
- if (ret)
- continue;
-
- seg[i].buf = map[i].dev_bus_addr |
- (req->u.rw.seg[i].first_sect << 9);
}
return ret;
}
@@ -591,6 +845,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
int operation;
struct blk_plug plug;
bool drain = false;
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
switch (req->operation) {
case BLKIF_OP_READ:
@@ -677,7 +932,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (xen_blkbk_map(req, pending_req, seg))
+ if (xen_blkbk_map(req, pending_req, seg, pages))
goto fail_flush;
/*
@@ -689,7 +944,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
- blkbk->pending_page(pending_req, i),
+ pages[i],
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9a54623e52d7..6072390c7f57 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/io.h>
+#include <linux/rbtree.h>
#include <asm/setup.h>
#include <asm/pgalloc.h>
#include <asm/hypervisor.h>
@@ -160,10 +161,21 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ unsigned int feature_gnt_persistent:1;
+ unsigned int overflow_max_grants:1;
};
struct backend_info;
+
+struct persistent_gnt {
+ struct page *page;
+ grant_ref_t gnt;
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+ struct rb_node node;
+};
+
struct xen_blkif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -190,6 +202,10 @@ struct xen_blkif {
struct task_struct *xenblkd;
unsigned int waiting_reqs;
+ /* tree to store persistent grants */
+ struct rb_root persistent_gnts;
+ unsigned int persistent_gnt_c;
+
/* statistics */
unsigned long st_print;
int st_rd_req;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f58434c2617c..63980722db41 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -117,6 +117,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
+ blkif->persistent_gnts.rb_node = NULL;
return blkif;
}
@@ -672,6 +673,13 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
+ dev->nodename);
+ goto abort;
+ }
+
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
@@ -720,6 +728,7 @@ static int connect_ring(struct backend_info *be)
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
unsigned int evtchn;
+ unsigned int pers_grants;
char protocol[64] = "";
int err;
@@ -749,8 +758,18 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
}
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
- ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "feature-persistent", "%u",
+ &pers_grants, NULL);
+ if (err)
+ pers_grants = 0;
+
+ be->blkif->vbd.feature_gnt_persistent = pers_grants;
+ be->blkif->vbd.overflow_max_grants = 0;
+
+ pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol,
+ pers_grants ? "persistent grants" : "");
/* Map the shared frame, irq etc. */
err = xen_blkif_map(be->blkif, ring_ref, evtchn);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 007db8986e84..11043c18ac5a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -44,6 +44,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
+#include <linux/llist.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -64,10 +65,17 @@ enum blkif_state {
BLKIF_STATE_SUSPENDED,
};
+struct grant {
+ grant_ref_t gref;
+ unsigned long pfn;
+ struct llist_node node;
+};
+
struct blk_shadow {
struct blkif_request req;
struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
static DEFINE_MUTEX(blkfront_mutex);
@@ -97,6 +105,8 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
+ struct llist_head persistent_gnts;
+ unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
@@ -104,6 +114,7 @@ struct blkfront_info
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int feature_persistent:1;
int is_ready;
};
@@ -287,21 +298,36 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref;
+
+ /*
+ * Used to store if we are able to queue the request by just using
+ * existing persistent grants, or if we have to get new grants,
+ * as there are not sufficiently many free.
+ */
+ bool new_persistent_gnts;
grant_ref_t gref_head;
+ struct page *granted_page;
+ struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (gnttab_alloc_grant_references(
- BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
- gnttab_request_free_callback(
- &info->callback,
- blkif_restart_queue_callback,
- info,
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
- return 1;
- }
+ /* Check if we have enought grants to allocate a requests */
+ if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ new_persistent_gnts = 1;
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c,
+ &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+ } else
+ new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
@@ -341,18 +367,73 @@ static int blkif_queue_request(struct request *req)
BLKIF_MAX_SEGMENTS_PER_REQUEST);
for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
- buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
- gnttab_grant_foreign_access_ref(
- ref,
+ if (info->persistent_gnts_c) {
+ BUG_ON(llist_empty(&info->persistent_gnts));
+ gnt_list_entry = llist_entry(
+ llist_del_first(&info->persistent_gnts),
+ struct grant, node);
+
+ ref = gnt_list_entry->gref;
+ buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ info->persistent_gnts_c--;
+ } else {
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnt_list_entry =
+ kmalloc(sizeof(struct grant),
+ GFP_ATOMIC);
+ if (!gnt_list_entry)
+ return -ENOMEM;
+
+ granted_page = alloc_page(GFP_ATOMIC);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+ return -ENOMEM;
+ }
+
+ gnt_list_entry->pfn =
+ page_to_pfn(granted_page);
+ gnt_list_entry->gref = ref;
+
+ buffer_mfn = pfn_to_mfn(page_to_pfn(
+ granted_page));
+ gnttab_grant_foreign_access_ref(ref,
info->xbdev->otherend_id,
- buffer_mfn,
- rq_data_dir(req));
+ buffer_mfn, 0);
+ }
+
+ info->shadow[id].grants_used[i] = gnt_list_entry;
+
+ if (rq_data_dir(req)) {
+ char *bvec_data;
+ void *shared_data;
+
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+
+ shared_data = kmap_atomic(
+ pfn_to_page(gnt_list_entry->pfn));
+ bvec_data = kmap_atomic(sg_page(sg));
+
+ /*
+ * this does not wipe data stored outside the
+ * range sg->offset..sg->offset+sg->length.
+ * Therefore, blkback *could* see data from
+ * previous requests. This is OK as long as
+ * persistent grants are shared with just one
+ * domain. It may need refactoring if this
+ * changes
+ */
+ memcpy(shared_data + sg->offset,
+ bvec_data + sg->offset,
+ sg->length);
+
+ kunmap_atomic(bvec_data);
+ kunmap_atomic(shared_data);
+ }
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->u.rw.seg[i] =
@@ -368,7 +449,8 @@ static int blkif_queue_request(struct request *req)
/* Keep a private copy so we can reissue requests when recovering. */
info->shadow[id].req = *ring_req;
- gnttab_free_grant_references(gref_head);
+ if (new_persistent_gnts)
+ gnttab_free_grant_references(gref_head);
return 0;
}
@@ -480,12 +562,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
- printk(KERN_INFO "blkfront: %s: %s: %s\n",
+ printk(KERN_INFO "blkfront: %s: %s: %s %s\n",
info->gd->disk_name,
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
"flush diskcache" : "barrier or flush"),
- info->feature_flush ? "enabled" : "disabled");
+ info->feature_flush ? "enabled" : "disabled",
+ info->feature_persistent ? "using persistent grants" : "");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -707,6 +790,10 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
+ struct llist_node *all_gnts;
+ struct grant *persistent_gnt;
+ struct llist_node *n;
+
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
info->connected = suspend ?
@@ -714,6 +801,18 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* No more blkif_request(). */
if (info->rq)
blk_stop_queue(info->rq);
+
+ /* Remove all persistent grants */
+ if (info->persistent_gnts_c) {
+ all_gnts = llist_del_all(&info->persistent_gnts);
+ llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ __free_page(pfn_to_page(persistent_gnt->pfn));
+ kfree(persistent_gnt);
+ }
+ info->persistent_gnts_c = 0;
+ }
+
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&info->io_lock);
@@ -734,13 +833,44 @@ static void blkif_free(struct blkfront_info *info, int suspend)
}
-static void blkif_completion(struct blk_shadow *s)
+static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ struct blkif_response *bret)
{
- int i;
- /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
- * flag. */
- for (i = 0; i < s->req.u.rw.nr_segments; i++)
- gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
+ int i = 0;
+ struct bio_vec *bvec;
+ struct req_iterator iter;
+ unsigned long flags;
+ char *bvec_data;
+ void *shared_data;
+ unsigned int offset = 0;
+
+ if (bret->operation == BLKIF_OP_READ) {
+ /*
+ * Copy the data received from the backend into the bvec.
+ * Since bv_offset can be different than 0, and bv_len different
+ * than PAGE_SIZE, we have to keep track of the current offset,
+ * to be sure we are copying the data from the right shared page.
+ */
+ rq_for_each_segment(bvec, s->request, iter) {
+ BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
+ if (bvec->bv_offset < offset)
+ i++;
+ BUG_ON(i >= s->req.u.rw.nr_segments);
+ shared_data = kmap_atomic(
+ pfn_to_page(s->grants_used[i]->pfn));
+ bvec_data = bvec_kmap_irq(bvec, &flags);
+ memcpy(bvec_data, shared_data + bvec->bv_offset,
+ bvec->bv_len);
+ bvec_kunmap_irq(bvec_data, &flags);
+ kunmap_atomic(shared_data);
+ offset = bvec->bv_offset + bvec->bv_len;
+ }
+ }
+ /* Add the persistent grant into the list of free grants */
+ for (i = 0; i < s->req.u.rw.nr_segments; i++) {
+ llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ }
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -783,7 +913,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
- blkif_completion(&info->shadow[id]);
+ blkif_completion(&info->shadow[id], info, bret);
if (add_id_to_freelist(info, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
@@ -942,6 +1072,11 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-persistent", "%u", 1);
+ if (err)
+ dev_warn(&dev->dev,
+ "writing persistent grants feature to xenbus");
err = xenbus_transaction_end(xbt, 0);
if (err) {
@@ -1029,6 +1164,8 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
+ init_llist_head(&info->persistent_gnts);
+ info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@@ -1093,7 +1230,7 @@ static int blkif_recover(struct blkfront_info *info)
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
- rq_data_dir(info->shadow[req->u.rw.id].request));
+ 0);
}
info->shadow[req->u.rw.id].req = *req;
@@ -1225,7 +1362,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard;
+ int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1303,6 +1440,14 @@ static void blkfront_connect(struct blkfront_info *info)
if (!err && discard)
blkfront_setup_discard(info);
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1a17e338735e..1f38643173ca 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -961,7 +961,7 @@ static const struct block_device_operations ace_fops = {
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
-static int __devinit ace_setup(struct ace_device *ace)
+static int ace_setup(struct ace_device *ace)
{
u16 version;
u16 val;
@@ -1074,7 +1074,7 @@ err_ioremap:
return -ENOMEM;
}
-static void __devexit ace_teardown(struct ace_device *ace)
+static void ace_teardown(struct ace_device *ace)
{
if (ace->gd) {
del_gendisk(ace->gd);
@@ -1092,9 +1092,8 @@ static void __devexit ace_teardown(struct ace_device *ace)
iounmap(ace->baseaddr);
}
-static int __devinit
-ace_alloc(struct device *dev, int id, resource_size_t physaddr,
- int irq, int bus_width)
+static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
+ int irq, int bus_width)
{
struct ace_device *ace;
int rc;
@@ -1135,7 +1134,7 @@ err_noreg:
return rc;
}
-static void __devexit ace_free(struct device *dev)
+static void ace_free(struct device *dev)
{
struct ace_device *ace = dev_get_drvdata(dev);
dev_dbg(dev, "ace_free(%p)\n", dev);
@@ -1151,7 +1150,7 @@ static void __devexit ace_free(struct device *dev)
* Platform Bus Support
*/
-static int __devinit ace_probe(struct platform_device *dev)
+static int ace_probe(struct platform_device *dev)
{
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
@@ -1182,7 +1181,7 @@ static int __devinit ace_probe(struct platform_device *dev)
/*
* Platform bus remove() method
*/
-static int __devexit ace_remove(struct platform_device *dev)
+static int ace_remove(struct platform_device *dev)
{
ace_free(&dev->dev);
return 0;
@@ -1190,7 +1189,7 @@ static int __devexit ace_remove(struct platform_device *dev)
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static const struct of_device_id ace_of_match[] __devinitconst = {
+static const struct of_device_id ace_of_match[] = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
{ .compatible = "xlnx,opb-sysace-1.00.c", },
{ .compatible = "xlnx,xps-sysace-1.00.a", },
@@ -1204,7 +1203,7 @@ MODULE_DEVICE_TABLE(of, ace_of_match);
static struct platform_driver ace_platform_driver = {
.probe = ace_probe,
- .remove = __devexit_p(ace_remove),
+ .remove = ace_remove,
.driver = {
.owner = THIS_MODULE,
.name = "xsysace",
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index e9f203eadb1f..fdfd61a2d523 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -26,6 +26,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
+ depends on TTY
help
Bluetooth HCI UART driver.
This driver is required if you want to use Bluetooth devices with
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b00000e8aef6..a8a41e07a221 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
+ { USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe056) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -339,7 +349,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
- BT_ERR("Can't get state to change to load configration err");
+ BT_ERR("Can't get state to change to load configuration err");
return -EBUSY;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1d4ede5b892..7e351e345476 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -135,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index bbec35d21fe5..0f51ed687dc8 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -6,6 +6,7 @@ menu "Bus devices"
config OMAP_OCP2SCP
tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
help
Driver to enable ocp2scp module which transforms ocp interface
protocol to scp protocol. In OMAP4, USB PHY is connected via
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 0c48b0e05ed6..fe7191663bbd 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -52,7 +52,7 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
return 0;
}
-static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
+static int omap_ocp2scp_probe(struct platform_device *pdev)
{
int ret;
unsigned res_cnt, i;
@@ -116,7 +116,7 @@ err0:
return ret;
}
-static int __devexit omap_ocp2scp_remove(struct platform_device *pdev)
+static int omap_ocp2scp_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove = __devexit_p(omap_ocp2scp_remove),
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.owner = THIS_MODULE,
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index ab911a33f8a8..feeecae623f6 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -128,7 +128,7 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
return IRQ_HANDLED;
}
-static int __devinit omap4_l3_probe(struct platform_device *pdev)
+static int omap4_l3_probe(struct platform_device *pdev)
{
static struct omap4_l3 *l3;
struct resource *res;
@@ -219,7 +219,7 @@ err0:
return ret;
}
-static int __devexit omap4_l3_remove(struct platform_device *pdev)
+static int omap4_l3_remove(struct platform_device *pdev)
{
struct omap4_l3 *l3 = platform_get_drvdata(pdev);
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, l3_noc_match);
static struct platform_driver omap4_l3_driver = {
.probe = omap4_l3_probe,
- .remove = __devexit_p(omap4_l3_remove),
+ .remove = omap4_l3_remove,
.driver = {
.name = "omap_l3_noc",
.owner = THIS_MODULE,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 75d485afe56c..d59cdcb8fe39 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -557,7 +557,7 @@ static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit gdrom_set_interrupt_handlers(void)
+static int gdrom_set_interrupt_handlers(void)
{
int err;
@@ -681,7 +681,7 @@ static void gdrom_request(struct request_queue *rq)
}
/* Print string identifying GD ROM device */
-static int __devinit gdrom_outputversion(void)
+static int gdrom_outputversion(void)
{
struct gdrom_id *id;
char *model_name, *manuf_name, *firmw_ver;
@@ -715,7 +715,7 @@ free_id:
}
/* set the default mode for DMA transfer */
-static int __devinit gdrom_init_dma_mode(void)
+static int gdrom_init_dma_mode(void)
{
__raw_writeb(0x13, GDROM_ERROR_REG);
__raw_writeb(0x22, GDROM_INTSEC_REG);
@@ -736,7 +736,7 @@ static int __devinit gdrom_init_dma_mode(void)
return 0;
}
-static void __devinit probe_gdrom_setupcd(void)
+static void probe_gdrom_setupcd(void)
{
gd.cd_info->ops = &gdrom_ops;
gd.cd_info->capacity = 1;
@@ -745,7 +745,7 @@ static void __devinit probe_gdrom_setupcd(void)
CDC_SELECT_DISC;
}
-static void __devinit probe_gdrom_setupdisk(void)
+static void probe_gdrom_setupdisk(void)
{
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
@@ -753,7 +753,7 @@ static void __devinit probe_gdrom_setupdisk(void)
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
-static int __devinit probe_gdrom_setupqueue(void)
+static int probe_gdrom_setupqueue(void)
{
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
@@ -768,7 +768,7 @@ static int __devinit probe_gdrom_setupqueue(void)
* register this as a block device and as compliant with the
* universal CD Rom driver interface
*/
-static int __devinit probe_gdrom(struct platform_device *devptr)
+static int probe_gdrom(struct platform_device *devptr)
{
int err;
/* Start the device */
@@ -838,7 +838,7 @@ probe_fail_no_mem:
return err;
}
-static int __devexit remove_gdrom(struct platform_device *devptr)
+static int remove_gdrom(struct platform_device *devptr)
{
flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
@@ -854,7 +854,7 @@ static int __devexit remove_gdrom(struct platform_device *devptr)
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove = __devexit_p(remove_gdrom),
+ .remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72bedad6bf8c..3bb6fa3930be 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -53,7 +53,7 @@ source "drivers/tty/serial/Kconfig"
config TTY_PRINTK
bool "TTY driver to output user messages via printk"
- depends on EXPERT
+ depends on EXPERT && TTY
default n
---help---
If you say Y here, the support for writing user messages (i.e.
@@ -159,7 +159,7 @@ source "drivers/tty/hvc/Kconfig"
config VIRTIO_CONSOLE
tristate "Virtio console"
- depends on VIRTIO
+ depends on VIRTIO && TTY
select HVC_DRIVER
help
Virtio console for use with lguest and other hypervisors.
@@ -392,6 +392,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
+ depends on TTY
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
@@ -439,7 +440,7 @@ source "drivers/char/pcmcia/Kconfig"
config MWAVE
tristate "ACP Modem (Mwave) support"
- depends on X86
+ depends on X86 && TTY
select SERIAL_8250
---help---
The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 478493543b32..443cd6751ca2 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -299,8 +299,7 @@ static struct agp_device_ids ali_agp_device_ids[] =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_ali_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ali_agp_device_ids;
struct agp_bridge_data *bridge;
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 1b2101160e98..779f0ab845a9 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -405,8 +405,8 @@ static struct agp_device_ids amd_agp_device_ids[] =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_amdk7_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 061d46209b1a..d79d692d05b8 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -240,7 +240,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
};
/* Some basic sanity checks for the aperture. */
-static int __devinit agp_aperture_valid(u64 aper, u32 size)
+static int agp_aperture_valid(u64 aper, u32 size)
{
if (!aperture_valid(aper, size, 32*1024*1024))
return 0;
@@ -267,8 +267,7 @@ static int __devinit agp_aperture_valid(u64 aper, u32 size)
* to allocate that much memory. But at least error out cleanly instead of
* crashing.
*/
-static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
- u16 cap)
+static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
{
u32 aper_low, aper_hi;
u64 aper, nb_aper;
@@ -326,7 +325,7 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
return 0;
}
-static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
+static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{
int i;
@@ -352,7 +351,7 @@ static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
}
/* Handle AMD 8151 quirks */
-static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
{
char *revstring;
@@ -390,7 +389,7 @@ static const struct aper_size_info_32 uli_sizes[7] =
{8, 2048, 1, 4},
{4, 1024, 0, 3}
};
-static int __devinit uli_agp_init(struct pci_dev *pdev)
+static int uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
@@ -513,8 +512,8 @@ put:
return ret;
}
-static int __devinit agp_amd64_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_amd64_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index ed0433576e74..0628d7b65c71 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -490,8 +490,7 @@ static struct agp_device_ids ati_agp_device_ids[] =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_ati_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ati_agp_device_ids;
struct agp_bridge_data *bridge;
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 55f3e33a309f..6974d5032053 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -343,8 +343,8 @@ static const struct agp_bridge_driver efficeon_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_efficeon_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index d328b662e50d..15b240ea4848 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -587,8 +587,8 @@ const struct agp_bridge_driver intel_i460_driver = {
.cant_use_aperture = true,
};
-static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_intel_i460_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -637,7 +637,7 @@ static struct pci_driver agp_intel_i460_pci_driver = {
.name = "agpgart-intel-i460",
.id_table = agp_intel_i460_pci_table,
.probe = agp_intel_i460_probe,
- .remove = __devexit_p(agp_intel_i460_remove),
+ .remove = agp_intel_i460_remove,
};
static int __init agp_intel_i460_init(void)
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f3a8f52b5a00..a426ee1f57a6 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -732,8 +732,8 @@ static const struct intel_agp_driver_description {
{ 0, NULL, NULL }
};
-static int __devinit agp_intel_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
@@ -912,7 +912,7 @@ static struct pci_driver agp_intel_pci_driver = {
.name = "agpgart-intel",
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
- .remove = __devexit_p(agp_intel_remove),
+ .remove = agp_intel_remove,
#ifdef CONFIG_PM
.resume = agp_intel_resume,
#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc2..1042c1b90376 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED 0x00000002
-#define HSW_PTE_UNCACHED 0x00000000
-#define GEN6_PTE_LLC 0x00000004
-#define GEN6_PTE_LLC_MLC 0x00000006
-#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..dbd901e94ea6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
- } else if (INTEL_GTT_GEN == 6) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen_size = MB(192);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen_size = MB(288);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen_size = MB(320);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen_size = MB(384);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen_size = MB(416);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen_size = MB(448);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen_size = MB(480);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen_size = MB(512);
- break;
- }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
- int size;
-
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else if (INTEL_GTT_GEN == 6) {
- u16 snb_gmch_ctl;
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- size = MB(2);
- break;
- }
- return size/4;
- } else {
+ else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
- if (INTEL_GTT_GEN >= 6)
- return true;
-
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool gen6_check_flags(unsigned int flags)
-{
- return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else {
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-
- writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr;
+ u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- if (INTEL_GTT_GEN >= 7)
- size = MB(2);
-
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- if (INTEL_GTT_GEN == 3) {
- u32 gtt_addr;
-
+ switch (INTEL_GTT_GEN) {
+ case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
- } else {
- u32 gtt_offset;
-
- switch (INTEL_GTT_GEN) {
- case 5:
- case 6:
- case 7:
- gtt_offset = MB(2);
- break;
- case 4:
- default:
- gtt_offset = KB(512);
- break;
- }
- intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ break;
+ case 5:
+ intel_private.gtt_bus_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_bus_addr = reg_addr + KB(512);
+ break;
}
if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = gen6_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = haswell_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
- .gen = 7,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = valleyview_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
-};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
- "ValleyView", &valleyview_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 66e0868000f4..62be3ec0da4b 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -332,8 +332,8 @@ static const struct agp_bridge_driver nvidia_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_nvidia_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index a18791d7718a..05b8d0241bde 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -270,7 +270,7 @@ const struct agp_bridge_driver sgi_tioca_driver = {
.num_aperture_sizes = 1,
};
-static int __devinit agp_sgi_init(void)
+static int agp_sgi_init(void)
{
unsigned int j;
struct tioca_kernel *info;
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 93d1d31f9d0c..79c838c434bc 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -154,7 +154,7 @@ static int sis_broken_chipsets[] = {
0 // terminator
};
-static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
+static void sis_get_driver(struct agp_bridge_data *bridge)
{
int i;
@@ -180,8 +180,7 @@ static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
}
-static int __devinit agp_sis_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 26020fb8d7a9..9b163b49d976 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -445,8 +445,8 @@ static const struct agp_bridge_driver sworks_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_serverworks_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
struct pci_dev *bridge_dev;
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 011967ad3eed..a56ee9bedd11 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -592,8 +592,8 @@ static struct agp_device_ids uninorth_agp_device_ids[] = {
},
};
-static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_uninorth_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_device_ids *devs = uninorth_agp_device_ids;
struct agp_bridge_data *bridge;
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 6818595bb863..74d3aa3773bf 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -438,8 +438,7 @@ static void check_via_agp3 (struct agp_bridge_data *bridge)
}
-static int __devinit agp_via_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = via_agp_device_ids;
struct agp_bridge_data *bridge;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index fe6d4be48296..e3f9a99b8522 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -1041,7 +1041,7 @@ static int hpet_acpi_add(struct acpi_device *device)
return hpet_alloc(&data);
}
-static int hpet_acpi_remove(struct acpi_device *device, int type)
+static int hpet_acpi_remove(struct acpi_device *device)
{
/* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 5a4a6e70478b..7c73d4aca36b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -138,7 +138,7 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove = __devexit_p(atmel_trng_remove),
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ae95bcb18d4a..f343b7d0dfa1 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -61,7 +61,7 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
return 4;
}
-static int __devinit bcm63xx_rng_probe(struct platform_device *pdev)
+static int bcm63xx_rng_probe(struct platform_device *pdev)
{
struct resource *r;
struct clk *clk;
@@ -161,7 +161,7 @@ static int bcm63xx_rng_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_rng_driver = {
.probe = bcm63xx_rng_probe,
- .remove = __devexit_p(bcm63xx_rng_remove),
+ .remove = bcm63xx_rng_remove,
.driver = {
.name = "bcm63xx-rng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index bdc852ea7632..ac47631ab34f 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -101,9 +101,10 @@ static int exynos_read(struct hwrng *rng, void *buf,
return 4;
}
-static int __devinit exynos_rng_probe(struct platform_device *pdev)
+static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
+ struct resource *res;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -120,10 +121,10 @@ static int __devinit exynos_rng_probe(struct platform_device *pdev)
return -ENOENT;
}
- exynos_rng->mem = devm_request_and_ioremap(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
- if (!exynos_rng->mem)
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(exynos_rng->mem))
+ return PTR_ERR(exynos_rng->mem);
platform_set_drvdata(pdev, exynos_rng);
@@ -162,7 +163,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
}
-UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
+static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL);
static struct platform_driver exynos_rng_driver = {
@@ -172,7 +173,7 @@ static struct platform_driver exynos_rng_driver = {
.pm = &exynos_rng_pm_ops,
},
.probe = exynos_rng_probe,
- .remove = __devexit_p(exynos_rng_remove),
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index d68a72a08b51..20b962e1d832 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -611,7 +611,7 @@ static void n2rng_work(struct work_struct *work)
schedule_delayed_work(&np->work, HZ * 2);
}
-static void __devinit n2rng_driver_version(void)
+static void n2rng_driver_version(void)
{
static int n2rng_version_printed;
@@ -620,7 +620,7 @@ static void __devinit n2rng_driver_version(void)
}
static const struct of_device_id n2rng_match[];
-static int __devinit n2rng_probe(struct platform_device *op)
+static int n2rng_probe(struct platform_device *op)
{
const struct of_device_id *match;
int multi_capable;
@@ -767,7 +767,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove = __devexit_p(n2rng_remove),
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 5c34c092af71..1eada566ca70 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -56,7 +56,7 @@ static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
return sizeof(u32);
}
-static int __devinit octeon_rng_probe(struct platform_device *pdev)
+static int octeon_rng_probe(struct platform_device *pdev)
{
struct resource *res_ports;
struct resource *res_result;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 45e467dcc8c8..749dc16ca2cc 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -104,7 +104,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read,
};
-static int __devinit omap_rng_probe(struct platform_device *pdev)
+static int omap_rng_probe(struct platform_device *pdev)
{
struct omap_rng_private_data *priv;
int ret;
@@ -124,9 +124,9 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
- priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res);
- if (!priv->base) {
- ret = -ENOMEM;
+ priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto err_ioremap;
}
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index a1f70407cc9e..c6df5b29af08 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -94,7 +94,7 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
-static int __devinit rng_probe(struct platform_device *ofdev)
+static int rng_probe(struct platform_device *ofdev)
{
void __iomem *rng_regs;
struct device_node *rng_np = ofdev->dev.of_node;
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index d4b24c1dd48e..973b95113edf 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -181,7 +181,7 @@ static const struct dev_pm_ops picoxcell_trng_pm_ops = {
static struct platform_driver picoxcell_trng_driver = {
.probe = picoxcell_trng_probe,
- .remove = __devexit_p(picoxcell_trng_remove),
+ .remove = picoxcell_trng_remove,
.driver = {
.name = "picoxcell-trng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index af6506a69cd9..732c330805fd 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -90,7 +90,7 @@ static struct hwrng ppc4xx_rng = {
.data_read = ppc4xx_rng_data_read,
};
-static int __devinit ppc4xx_rng_probe(struct platform_device *dev)
+static int ppc4xx_rng_probe(struct platform_device *dev)
{
void __iomem *rng_regs;
int err = 0;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 3a1abc9417e4..849db199c02c 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,7 +88,7 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0,
};
-static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
+static int timeriomem_rng_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
@@ -146,7 +146,7 @@ static struct platform_driver timeriomem_rng_driver = {
.owner = THIS_MODULE,
},
.probe = timeriomem_rng_probe,
- .remove = __devexit_p(timeriomem_rng_remove),
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index de473ef3882b..30991989d65b 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -7,6 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -115,9 +116,9 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
- rngdev->base = devm_request_and_ioremap(&dev->dev, r);
- if (!rngdev->base)
- return -EBUSY;
+ rngdev->base = devm_ioremap_resource(&dev->dev, r);
+ if (IS_ERR(rngdev->base))
+ return PTR_ERR(rngdev->base);
rngdev->rng.name = dev_name(&dev->dev);
rngdev->rng.data_present = tx4939_rng_data_present;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 621f595f1a98..b65c10395959 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -147,7 +147,7 @@ static struct virtio_driver virtio_rng_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtrng_probe,
- .remove = __devexit_p(virtrng_remove),
+ .remove = virtrng_remove,
#ifdef CONFIG_PM
.freeze = virtrng_freeze,
.restore = virtrng_restore,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index cfdfecd5bc76..1c7fdcd22a98 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2243,7 +2243,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
static struct pnp_driver ipmi_pnp_driver = {
.name = DEVICE_NAME,
.probe = ipmi_pnp_probe,
- .remove = __devexit_p(ipmi_pnp_remove),
+ .remove = ipmi_pnp_remove,
.id_table = pnp_dev_table,
};
#endif
@@ -2546,7 +2546,7 @@ static struct pci_driver ipmi_pci_driver = {
.name = DEVICE_NAME,
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
- .remove = __devexit_p(ipmi_pci_remove),
+ .remove = ipmi_pci_remove,
};
#endif /* CONFIG_PCI */
@@ -2661,7 +2661,7 @@ static struct platform_driver ipmi_driver = {
.of_match_table = ipmi_match,
},
.probe = ipmi_probe,
- .remove = __devexit_p(ipmi_remove),
+ .remove = ipmi_remove,
};
static int wait_for_msg_done(struct smi_info *smi_info)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa8..6f6e92a3102d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
{
unsigned long p = *ppos;
ssize_t low_count, read, sz;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
read = 0;
@@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long i = *ppos;
- const char __user * tmp = buf;
+ const char __user *tmp = buf;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
@@ -729,7 +729,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
-static int open_port(struct inode * inode, struct file * filp)
+static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
@@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
continue;
/*
- * Create /dev/port?
+ * Create /dev/port?
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 6614416a8623..2a166d56738a 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -7,7 +7,7 @@ menu "PCMCIA character devices"
config SYNCLINK_CS
tristate "SyncLink PC Card support"
- depends on PCMCIA
+ depends on PCMCIA && TTY
help
Enable support for the SyncLink PC Card serial adapter, running
asynchronous and HDLC communications up to 512Kbps. The port is
@@ -45,7 +45,7 @@ config CARDMAN_4040
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
- depends on PCMCIA && NETDEVICES
+ depends on PCMCIA && NETDEVICES && TTY
select PPP
help
This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b66eaa04f8cb..5c5cc00ebb07 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -102,8 +102,7 @@ static MGSL_PARAMS default_params = {
ASYNC_PARITY_NONE /* unsigned char parity; */
};
-typedef struct
-{
+typedef struct {
int count;
unsigned char status;
char data[1];
@@ -210,7 +209,7 @@ typedef struct _mgslpc_info {
char testing_irq;
unsigned int init_error; /* startup error (DIAGS) */
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -326,10 +325,10 @@ typedef struct _mgslpc_info {
#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
#define set_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) | (mask)))
#define clear_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) & ~(mask)))
/*
* interrupt enable/disable routines
@@ -356,10 +355,10 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short
}
#define port_irq_disable(info, mask) \
- { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
#define port_irq_enable(info, mask) \
- { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
static void rx_start(MGSLPC_INFO *info);
static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@ static int adapter_test(MGSLPC_INFO *info);
static int claim_resources(MGSLPC_INFO *info);
static void release_resources(MGSLPC_INFO *info);
-static void mgslpc_add_device(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
static void mgslpc_remove_device(MGSLPC_INFO *info);
static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@ static const struct tty_port_operations mgslpc_port_ops = {
static int mgslpc_probe(struct pcmcia_device *link)
{
- MGSLPC_INFO *info;
- int ret;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_attach\n");
-
- info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
- if (!info) {
- printk("Error can't allocate device instance data\n");
- return -ENOMEM;
- }
-
- info->magic = MGSLPC_MAGIC;
- tty_port_init(&info->port);
- info->port.ops = &mgslpc_port_ops;
- INIT_WORK(&info->task, bh_handler);
- info->max_frame_size = 4096;
- info->port.close_delay = 5*HZ/10;
- info->port.closing_wait = 30*HZ;
- init_waitqueue_head(&info->status_event_wait_q);
- init_waitqueue_head(&info->event_wait_q);
- spin_lock_init(&info->lock);
- spin_lock_init(&info->netlock);
- memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
- info->idle_mode = HDLC_TXIDLE_FLAGS;
- info->imra_value = 0xffff;
- info->imrb_value = 0xffff;
- info->pim_value = 0xff;
-
- info->p_dev = link;
- link->priv = info;
-
- /* Initialize the struct pcmcia_device structure */
-
- ret = mgslpc_config(link);
- if (ret) {
- tty_port_destroy(&info->port);
- return ret;
- }
-
- mgslpc_add_device(info);
-
- return 0;
+ MGSLPC_INFO *info;
+ int ret;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_attach\n");
+
+ info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+ if (!info) {
+ printk("Error can't allocate device instance data\n");
+ return -ENOMEM;
+ }
+
+ info->magic = MGSLPC_MAGIC;
+ tty_port_init(&info->port);
+ info->port.ops = &mgslpc_port_ops;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->imra_value = 0xffff;
+ info->imrb_value = 0xffff;
+ info->pim_value = 0xff;
+
+ info->p_dev = link;
+ link->priv = info;
+
+ /* Initialize the struct pcmcia_device structure */
+
+ ret = mgslpc_config(link);
+ if (ret != 0)
+ goto failed;
+
+ ret = mgslpc_add_device(info);
+ if (ret != 0)
+ goto failed_release;
+
+ return 0;
+
+failed_release:
+ mgslpc_release((u_long)link);
+failed:
+ tty_port_destroy(&info->port);
+ kfree(info);
+ return ret;
}
/* Card has been inserted.
@@ -569,35 +575,35 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
static int mgslpc_config(struct pcmcia_device *link)
{
- MGSLPC_INFO *info = link->priv;
- int ret;
+ MGSLPC_INFO *info = link->priv;
+ int ret;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_config(0x%p)\n", link);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_config(0x%p)\n", link);
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+ if (ret != 0)
+ goto failed;
- link->config_index = 8;
- link->config_regs = PRESENT_OPTION;
+ link->config_index = 8;
+ link->config_regs = PRESENT_OPTION;
- ret = pcmcia_request_irq(link, mgslpc_isr);
- if (ret)
- goto failed;
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
+ ret = pcmcia_request_irq(link, mgslpc_isr);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
- info->io_base = link->resource[0]->start;
- info->irq_level = link->irq;
- return 0;
+ info->io_base = link->resource[0]->start;
+ info->irq_level = link->irq;
+ return 0;
failed:
- mgslpc_release((u_long)link);
- return -ENODEV;
+ mgslpc_release((u_long)link);
+ return -ENODEV;
}
/* Card has been removed.
@@ -703,12 +709,12 @@ static void tx_pause(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_pause(%s)\n",info->device_name);
+ printk("tx_pause(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_enabled)
- tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@ static void tx_release(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_release(%s)\n",info->device_name);
+ printk("tx_release(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Return next bottom half action to perform.
@@ -735,7 +741,7 @@ static int bh_action(MGSLPC_INFO *info)
unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->pending_bh & BH_RECEIVE) {
info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@ static int bh_action(MGSLPC_INFO *info)
info->bh_requested = false;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -765,11 +771,8 @@ static void bh_handler(struct work_struct *work)
struct tty_struct *tty;
int action;
- if (!info)
- return;
-
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) entry\n",
+ printk("%s(%d):bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
info->bh_running = true;
@@ -778,8 +781,8 @@ static void bh_handler(struct work_struct *work)
while((action = bh_action(info)) != 0) {
/* Process work item */
- if ( debug_level >= DEBUG_LEVEL_BH )
- printk( "%s(%d):bh_handler() work item action=%d\n",
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk("%s(%d):bh_handler() work item action=%d\n",
__FILE__,__LINE__,action);
switch (action) {
@@ -802,7 +805,7 @@ static void bh_handler(struct work_struct *work)
tty_kref_put(tty);
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) exit\n",
+ printk("%s(%d):bh_handler(%s) exit\n",
__FILE__,__LINE__,info->device_name);
}
@@ -831,7 +834,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+ printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
if (!info->rx_enabled)
return;
@@ -847,7 +850,8 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
if (eom) {
/* end of frame, get FIFO count from RBCL register */
- if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+ if (fifo_count == 0)
fifo_count = 32;
} else
fifo_count = 32;
@@ -886,20 +890,13 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
issue_command(info, CHA, CMD_RXFIFO);
}
-static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
+static void rx_ready_async(MGSLPC_INFO *info, int tcd)
{
+ struct tty_port *port = &info->port;
unsigned char data, status, flag;
int fifo_count;
int work = 0;
- struct mgsl_icount *icount = &info->icount;
-
- if (!tty) {
- /* tty is not available anymore */
- issue_command(info, CHA, CMD_RXRESET);
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_async(tty=NULL)\n",__FILE__,__LINE__);
- return;
- }
+ struct mgsl_icount *icount = &info->icount;
if (tcd) {
/* early termination, get FIFO count from RBCL register */
@@ -913,7 +910,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
} else
fifo_count = 32;
- tty_buffer_request_room(tty, fifo_count);
+ tty_buffer_request_room(port, fifo_count);
/* Flush received async data to receive data buffer. */
while (fifo_count) {
data = read_reg(info, CHA + RXFIFO);
@@ -944,7 +941,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
else if (status & BIT6)
flag = TTY_FRAME;
}
- work += tty_insert_flip_char(tty, data, flag);
+ work += tty_insert_flip_char(port, data, flag);
}
issue_command(info, CHA, CMD_RXFIFO);
@@ -957,7 +954,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
}
if (work)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
@@ -1004,7 +1001,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
int c;
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->params.mode == MGSL_MODE_HDLC) {
if (!info->tx_active)
@@ -1217,7 +1214,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
if (info->params.mode == MGSL_MODE_HDLC)
rx_ready_hdlc(info, isr & IRQ_RXEOM);
else
- rx_ready_async(info, isr & IRQ_RXEOM, tty);
+ rx_ready_async(info, isr & IRQ_RXEOM);
}
/* transmit IRQs */
@@ -1249,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
*/
if (info->pending_bh && !info->bh_running && !info->bh_requested) {
- if ( debug_level >= DEBUG_LEVEL_ISR )
+ if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
@@ -1273,7 +1270,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
int retval = 0;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+ printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->port.flags & ASYNC_INITIALIZED)
return 0;
@@ -1283,7 +1280,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
if (!info->tx_buf) {
printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return -ENOMEM;
}
}
@@ -1298,15 +1295,15 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
retval = claim_resources(info);
/* perform existence check and diagnostics */
- if ( !retval )
+ if (!retval)
retval = adapter_test(info);
- if ( retval ) {
- if (capable(CAP_SYS_ADMIN) && tty)
+ if (retval) {
+ if (capable(CAP_SYS_ADMIN) && tty)
set_bit(TTY_IO_ERROR, &tty->flags);
release_resources(info);
- return retval;
- }
+ return retval;
+ }
/* program hardware for current parameters */
mgslpc_change_params(info, tty);
@@ -1330,7 +1327,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_shutdown(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* clear status wait queue because status changes */
/* can't happen after shutting down the hardware */
@@ -1344,7 +1341,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = NULL;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1352,12 +1349,12 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
/* TODO:disable interrupts instead of reset to preserve signal states */
reset_device(info);
- if (!tty || tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ if (!tty || tty->termios.c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
release_resources(info);
@@ -1371,7 +1368,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1396,7 +1393,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
rx_start(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Reconfigure adapter based on new parameters
@@ -1411,16 +1408,16 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_change_params(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
cflag = tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
- if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -1463,7 +1460,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.data_rate = tty_get_baud_rate(tty);
}
- if ( info->params.data_rate ) {
+ if (info->params.data_rate) {
info->timeout = (32*HZ*bits_per_char) /
info->params.data_rate;
}
@@ -1498,8 +1495,8 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO) {
- printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
- __FILE__,__LINE__,ch,info->device_name);
+ printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+ __FILE__, __LINE__, ch, info->device_name);
}
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1508,7 +1505,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
if (!info->tx_buf)
return 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
if (info->tx_count < TXBUFSIZE - 1) {
@@ -1518,7 +1515,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 1;
}
@@ -1531,8 +1528,8 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
- __FILE__,__LINE__,info->device_name,info->tx_count);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+ __FILE__, __LINE__, info->device_name, info->tx_count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
return;
@@ -1542,13 +1539,13 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
- __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+ __FILE__, __LINE__, info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Send a block of data
@@ -1569,8 +1566,8 @@ static int mgslpc_write(struct tty_struct * tty,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) count=%d\n",
- __FILE__,__LINE__,info->device_name,count);
+ printk("%s(%d):mgslpc_write(%s) count=%d\n",
+ __FILE__, __LINE__, info->device_name, count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
!info->tx_buf)
@@ -1596,26 +1593,26 @@ static int mgslpc_write(struct tty_struct * tty,
memcpy(info->tx_buf + info->tx_put, buf, c);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
info->tx_count += c;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
buf += c;
count -= c;
ret += c;
}
start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
+ }
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
- __FILE__,__LINE__,info->device_name,ret);
+ printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1643,7 +1640,7 @@ static int mgslpc_write_room(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_write_room(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, ret);
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1656,7 +1653,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
return 0;
@@ -1668,7 +1665,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, rc);
+ __FILE__, __LINE__, info->device_name, rc);
return rc;
}
@@ -1682,15 +1679,15 @@ static void mgslpc_flush_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
return;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_count = info->tx_put = info->tx_get = 0;
del_timer(&info->tx_timer);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
wake_up_interruptible(&tty->write_wait);
tty_wakeup(tty);
@@ -1705,17 +1702,17 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, ch );
+ __FILE__, __LINE__, info->device_name, ch);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
return;
info->x_char = ch;
if (ch) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1728,7 +1725,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_throttle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
return;
@@ -1736,11 +1733,11 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgslpc_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1753,7 +1750,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
return;
@@ -1765,11 +1762,11 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
mgslpc_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1807,33 +1804,33 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
*
* Arguments:
*
- * info pointer to device instance data
- * new_params user buffer containing new serial params
+ * info pointer to device instance data
+ * new_params user buffer containing new serial params
*
* Returns: 0 if success, otherwise error code
*/
static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
MGSL_PARAMS tmp_params;
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
- info->device_name );
+ info->device_name);
COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
if (err) {
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):set_params(%s) user buffer copy failed\n",
- __FILE__,__LINE__,info->device_name);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):set_params(%s) user buffer copy failed\n",
+ __FILE__, __LINE__, info->device_name);
return -EFAULT;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
- mgslpc_change_params(info, tty);
+ mgslpc_change_params(info, tty);
return 0;
}
@@ -1851,13 +1848,13 @@ static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
static int set_txidle(MGSLPC_INFO * info, int idle_mode)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->idle_mode = idle_mode;
tx_set_idle(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -1874,11 +1871,11 @@ static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
static int set_interface(MGSLPC_INFO * info, int if_mode)
{
- unsigned long flags;
+ unsigned long flags;
unsigned char val;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_interface(%s,%d)\n", info->device_name, if_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->if_mode = if_mode;
val = read_reg(info, PVR) & 0x0f;
@@ -1890,18 +1887,18 @@ static int set_interface(MGSLPC_INFO * info, int if_mode)
}
write_reg(info, PVR, val);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->tx_enabled)
tx_start(info, tty);
@@ -1909,18 +1906,18 @@ static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
if (info->tx_enabled)
tx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int tx_abort(MGSLPC_INFO * info)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("tx_abort(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_active && info->tx_count &&
info->params.mode == MGSL_MODE_HDLC) {
/* clear data count so FIFO is not filled on next IRQ.
@@ -1929,18 +1926,18 @@ static int tx_abort(MGSLPC_INFO * info)
info->tx_count = info->tx_put = info->tx_get = 0;
info->tx_aborting = true;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_rxenable(MGSLPC_INFO * info, int enable)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_rxenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->rx_enabled)
rx_start(info);
@@ -1948,21 +1945,21 @@ static int set_rxenable(MGSLPC_INFO * info, int enable)
if (info->rx_enabled)
rx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
/* wait for specified event to occur
*
- * Arguments: info pointer to device instance data
- * mask pointer to bitmask of events to wait for
- * Return Value: 0 if successful and bit mask updated with
+ * Arguments: info pointer to device instance data
+ * mask pointer to bitmask of events to wait for
+ * Return Value: 0 if successful and bit mask updated with
* of events triggerred,
- * otherwise error code
+ * otherwise error code
*/
static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
{
- unsigned long flags;
+ unsigned long flags;
int s;
int rc=0;
struct mgsl_icount cprev, cnow;
@@ -1978,18 +1975,18 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("wait_events(%s,%d)\n", info->device_name, mask);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
/* return immediately if state matches requested events */
get_signals(info);
s = info->serial_signals;
events = mask &
( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
- ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
if (events) {
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
goto exit;
}
@@ -2004,7 +2001,7 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&info->event_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
@@ -2015,11 +2012,11 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
}
/* get current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
newsigs = info->input_signal_events;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (newsigs.dsr_up == oldsigs.dsr_up &&
@@ -2058,10 +2055,10 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_RUNNING);
if (mask & MgslEvent_ExitHuntMode) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!waitqueue_active(&info->event_wait_q))
irq_disable(info, CHA, IRQ_EXITHUNT);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
exit:
if (rc == 0)
@@ -2071,17 +2068,17 @@ exit:
static int modem_input_wait(MGSLPC_INFO *info,int arg)
{
- unsigned long flags;
+ unsigned long flags;
int rc;
struct mgsl_icount cprev, cnow;
DECLARE_WAITQUEUE(wait, current);
/* save current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cprev = info->icount;
add_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
schedule();
@@ -2091,10 +2088,10 @@ static int modem_input_wait(MGSLPC_INFO *info,int arg)
}
/* get new irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2125,11 +2122,11 @@ static int tiocmget(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned int result;
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2140,7 +2137,7 @@ static int tiocmget(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
- __FILE__,__LINE__, info->device_name, result );
+ __FILE__, __LINE__, info->device_name, result);
return result;
}
@@ -2150,11 +2147,11 @@ static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmset(%x,%x)\n",
- __FILE__,__LINE__,info->device_name, set, clear);
+ __FILE__, __LINE__, info->device_name, set, clear);
if (set & TIOCM_RTS)
info->serial_signals |= SerialSignal_RTS;
@@ -2165,9 +2162,9 @@ static int tiocmset(struct tty_struct *tty,
if (clear & TIOCM_DTR)
info->serial_signals &= ~SerialSignal_DTR;
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2184,17 +2181,17 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_break(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, break_state);
+ __FILE__, __LINE__, info->device_name, break_state);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
return -EINVAL;
- spin_lock_irqsave(&info->lock,flags);
- if (break_state == -1)
+ spin_lock_irqsave(&info->lock, flags);
+ if (break_state == -1)
set_reg_bits(info, CHA+DAFO, BIT6);
else
clear_reg_bits(info, CHA+DAFO, BIT6);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2205,9 +2202,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
struct mgsl_icount cnow; /* kernel counter temps */
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
@@ -2228,9 +2225,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty instance data
- * cmd IOCTL command code
- * arg command argument/context
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
*
* Return Value: 0 if success, otherwise error code
*/
@@ -2241,8 +2238,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
void __user *argp = (void __user *)arg;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
- info->device_name, cmd );
+ printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+ info->device_name, cmd);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
return -ENODEV;
@@ -2288,8 +2285,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty structure
- * termios pointer to buffer to hold returned old termios
+ * tty pointer to tty structure
+ * termios pointer to buffer to hold returned old termios
*/
static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
@@ -2297,8 +2294,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
- tty->driver->name );
+ printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+ tty->driver->name);
/* just return if nothing has changed */
if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2311,23 +2308,23 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
tty->termios.c_cflag & CBAUD) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!(tty->termios.c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
info->serial_signals |= SerialSignal_RTS;
- }
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ }
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle turning off CRTSCTS */
@@ -2348,15 +2345,15 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
- __FILE__,__LINE__, info->device_name, port->count);
+ __FILE__, __LINE__, info->device_name, port->count);
WARN_ON(!port->count);
if (tty_port_close_start(port, tty, filp) == 0)
goto cleanup;
- if (port->flags & ASYNC_INITIALIZED)
- mgslpc_wait_until_sent(tty, info->timeout);
+ if (port->flags & ASYNC_INITIALIZED)
+ mgslpc_wait_until_sent(tty, info->timeout);
mgslpc_flush_buffer(tty);
@@ -2367,7 +2364,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(port, NULL);
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+ printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
tty->driver->name, port->count);
}
@@ -2378,12 +2375,12 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
unsigned long orig_jiffies, char_time;
- if (!info )
+ if (!info)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
return;
@@ -2399,8 +2396,8 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
- if ( info->params.data_rate ) {
- char_time = info->timeout/(32 * 5);
+ if (info->params.data_rate) {
+ char_time = info->timeout/(32 * 5);
if (!char_time)
char_time++;
} else
@@ -2431,7 +2428,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
exit:
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
}
/* Called by tty_hangup() when a hangup is signaled.
@@ -2443,7 +2440,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_hangup(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
return;
@@ -2458,9 +2455,9 @@ static int carrier_raised(struct tty_port *port)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
if (info->serial_signals & SerialSignal_DCD)
return 1;
@@ -2472,13 +2469,13 @@ static void dtr_rts(struct tty_port *port, int onoff)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (onoff)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
@@ -2486,14 +2483,14 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
{
MGSLPC_INFO *info;
struct tty_port *port;
- int retval, line;
- unsigned long flags;
+ int retval, line;
+ unsigned long flags;
/* verify range of specified line number */
line = tty->index;
if (line >= mgslpc_device_count) {
printk("%s(%d):mgslpc_open with invalid line #%d.\n",
- __FILE__,__LINE__,line);
+ __FILE__, __LINE__, line);
return -ENODEV;
}
@@ -2510,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
- __FILE__,__LINE__,tty->driver->name, port->count);
+ __FILE__, __LINE__, tty->driver->name, port->count);
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2521,7 +2518,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
goto cleanup;
}
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -2545,13 +2542,13 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (retval) {
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready(%s) returned %d\n",
- __FILE__,__LINE__, info->device_name, retval);
+ __FILE__, __LINE__, info->device_name, retval);
goto cleanup;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s) success\n",
- __FILE__,__LINE__, info->device_name);
+ __FILE__, __LINE__, info->device_name);
retval = 0;
cleanup:
@@ -2571,9 +2568,9 @@ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
info->device_name, info->io_base, info->irq_level);
/* output current serial signal states */
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
@@ -2635,7 +2632,7 @@ static int mgslpc_proc_show(struct seq_file *m, void *v)
seq_printf(m, "synclink driver:%s\n", driver_version);
info = mgslpc_device_list;
- while( info ) {
+ while (info) {
line_info(m, info);
info = info->next_device;
}
@@ -2674,6 +2671,14 @@ static int rx_alloc_buffers(MGSLPC_INFO *info)
if (info->rx_buf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->rx_buf);
+ info->rx_buf = NULL;
+ return -ENOMEM;
+ }
+
rx_reset_buffers(info);
return 0;
}
@@ -2682,12 +2687,14 @@ static void rx_free_buffers(MGSLPC_INFO *info)
{
kfree(info->rx_buf);
info->rx_buf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
static int claim_resources(MGSLPC_INFO *info)
{
- if (rx_alloc_buffers(info) < 0 ) {
- printk( "Can't allocate rx buffer %s\n", info->device_name);
+ if (rx_alloc_buffers(info) < 0) {
+ printk("Can't allocate rx buffer %s\n", info->device_name);
release_resources(info);
return -ENODEV;
}
@@ -2706,8 +2713,12 @@ static void release_resources(MGSLPC_INFO *info)
*
* Arguments: info pointer to device instance data
*/
-static void mgslpc_add_device(MGSLPC_INFO *info)
+static int mgslpc_add_device(MGSLPC_INFO *info)
{
+ MGSLPC_INFO *current_dev = NULL;
+ struct device *tty_dev;
+ int ret;
+
info->next_device = NULL;
info->line = mgslpc_device_count;
sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
if (!mgslpc_device_list)
mgslpc_device_list = info;
else {
- MGSLPC_INFO *current_dev = mgslpc_device_list;
- while( current_dev->next_device )
+ current_dev = mgslpc_device_list;
+ while (current_dev->next_device)
current_dev = current_dev->next_device;
current_dev->next_device = info;
}
@@ -2733,14 +2744,34 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
else if (info->max_frame_size > 65535)
info->max_frame_size = 65535;
- printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+ printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
info->device_name, info->io_base, info->irq_level);
#if SYNCLINK_GENERIC_HDLC
- hdlcdev_init(info);
+ ret = hdlcdev_init(info);
+ if (ret != 0)
+ goto failed;
#endif
- tty_port_register_device(&info->port, serial_driver, info->line,
+
+ tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
&info->p_dev->dev);
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ if (current_dev)
+ current_dev->next_device = NULL;
+ else
+ mgslpc_device_list = NULL;
+ mgslpc_device_count--;
+ return ret;
}
static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@ static void rx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* MODE:03 RAC Receiver Active, 0=inactive */
clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@ static void rx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
rx_reset_buffers(info);
info->rx_enabled = false;
@@ -3291,7 +3322,7 @@ static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (info->tx_count) {
/* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@ static void tx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
del_timer(&info->tx_timer);
@@ -3575,8 +3606,8 @@ static void get_signals(MGSLPC_INFO *info)
{
unsigned char status = 0;
- /* preserve DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* preserve RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
if (read_reg(info, CHB + VSTR) & BIT7)
info->serial_signals |= SerialSignal_DCD;
@@ -3590,7 +3621,7 @@ static void get_signals(MGSLPC_INFO *info)
info->serial_signals |= SerialSignal_DSR;
}
-/* Set the state of DTR and RTS based on contents of
+/* Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*/
static void set_signals(MGSLPC_INFO *info)
@@ -3681,7 +3712,7 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_BH)
printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
- __FILE__,__LINE__,info->device_name,status,framesize);
+ __FILE__, __LINE__, info->device_name, status, framesize);
if (debug_level >= DEBUG_LEVEL_DATA)
trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
}
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
buf->status = buf->count = 0;
info->rx_frame_count--;
info->rx_get++;
if (info->rx_get >= info->rx_buf_count)
info->rx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return true;
}
@@ -3729,7 +3760,7 @@ static bool register_test(MGSLPC_INFO *info)
bool rc = true;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@ static bool register_test(MGSLPC_INFO *info)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -3751,7 +3782,7 @@ static bool irq_test(MGSLPC_INFO *info)
unsigned long end_time;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
info->testing_irq = true;
@@ -3765,7 +3796,7 @@ static bool irq_test(MGSLPC_INFO *info)
write_reg(info, CHA + TIMR, 0); /* 512 cycles */
issue_command(info, CHA, CMD_START_TIMER);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
end_time=100;
while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@ static bool irq_test(MGSLPC_INFO *info)
info->testing_irq = false;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return info->irq_occurred;
}
@@ -3785,21 +3816,21 @@ static int adapter_test(MGSLPC_INFO *info)
{
if (!register_test(info)) {
info->init_error = DiagStatus_AddressFailure;
- printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+ printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
return -ENODEV;
}
if (!irq_test(info)) {
info->init_error = DiagStatus_IrqFailure;
- printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
return -ENODEV;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):device %s passed diagnostics\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return 0;
}
@@ -3808,9 +3839,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
int i;
int linecount;
if (xmit)
- printk("%s tx data:\n",info->device_name);
+ printk("%s tx data:\n", info->device_name);
else
- printk("%s rx data:\n",info->device_name);
+ printk("%s rx data:\n", info->device_name);
while(count) {
if (count > 16)
@@ -3819,12 +3850,12 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
linecount = count;
for(i=0;i<linecount;i++)
- printk("%02X ",(unsigned char)data[i]);
+ printk("%02X ", (unsigned char)data[i]);
for(;i<17;i++)
printk(" ");
for(i=0;i<linecount;i++) {
if (data[i]>=040 && data[i]<=0176)
- printk("%c",data[i]);
+ printk("%c", data[i]);
else
printk(".");
}
@@ -3843,18 +3874,18 @@ static void tx_timeout(unsigned long context)
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
unsigned long flags;
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):tx_timeout(%s)\n",
- __FILE__,__LINE__,info->device_name);
- if(info->tx_active &&
- info->params.mode == MGSL_MODE_HDLC) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):tx_timeout(%s)\n",
+ __FILE__, __LINE__, info->device_name);
+ if (info->tx_active &&
+ info->params.mode == MGSL_MODE_HDLC) {
info->icount.txtimeout++;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
@@ -3936,7 +3967,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
/* stop sending until this frame completes */
netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev->trans_start = jiffies;
/* start hardware transmitter if necessary */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active) {
struct tty_struct *tty = tty_port_tty_get(&info->port);
- tx_start(info, tty);
- tty_kref_put(tty);
+ tx_start(info, tty);
+ tty_kref_put(tty);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return NETDEV_TX_OK;
}
@@ -3984,10 +4015,11 @@ static int hdlcdev_open(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
/* generic HDLC layer open processing */
- if ((rc = hdlc_open(dev)))
+ rc = hdlc_open(dev);
+ if (rc != 0)
return rc;
/* arbitrate between network and tty opens */
@@ -4002,15 +4034,16 @@ static int hdlcdev_open(struct net_device *dev)
tty = tty_port_tty_get(&info->port);
/* claim resources and init adapter */
- if ((rc = startup(info, tty)) != 0) {
+ rc = startup(info, tty);
+ if (rc != 0) {
tty_kref_put(tty);
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgslpc_program_hw(info, tty);
tty_kref_put(tty);
@@ -4044,7 +4077,7 @@ static int hdlcdev_close(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
netif_stop_queue(dev);
@@ -4078,7 +4111,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned int flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
/* return error if TTY interface open */
if (info->port.count)
@@ -4179,14 +4212,14 @@ static void hdlcdev_tx_timeout(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+ printk("hdlcdev_tx_timeout(%s)\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
netif_wake_queue(dev);
}
@@ -4217,7 +4250,7 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
struct net_device *dev = info->netdev;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_rx(%s)\n",dev->name);
+ printk("hdlcdev_rx(%s)\n", dev->name);
if (skb == NULL) {
printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
/* allocate and initialize network and HDLC layer objects */
- if (!(dev = alloc_hdlcdev(info))) {
- printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ dev = alloc_hdlcdev(info);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
return -ENOMEM;
}
@@ -4280,8 +4314,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
- if ((rc = register_hdlc_device(dev))) {
- printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ rc = register_hdlc_device(dev);
+ if (rc) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
free_netdev(dev);
return rc;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b86eae9b77df..594bda9dcfc8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -399,7 +399,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
-#if 0
static bool debug;
module_param(debug, bool, 0644);
#define DEBUG_ENT(fmt, arg...) do { \
@@ -410,9 +409,6 @@ module_param(debug, bool, 0644);
blocking_pool.entropy_count,\
nonblocking_pool.entropy_count,\
## arg); } while (0)
-#else
-#define DEBUG_ENT(fmt, arg...) do {} while (0)
-#endif
/**********************************************************************
*
@@ -437,6 +433,7 @@ struct entropy_store {
int entropy_count;
int entropy_total;
unsigned int initialized:1;
+ bool last_data_init;
__u8 last_data[EXTRACT_SIZE];
};
@@ -448,7 +445,7 @@ static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
.limit = 1,
- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
@@ -457,7 +454,7 @@ static struct entropy_store blocking_pool = {
.name = "blocking",
.limit = 1,
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data
};
@@ -465,7 +462,7 @@ static struct entropy_store nonblocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "nonblocking",
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
.pool = nonblocking_pool_data
};
@@ -829,7 +826,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = min_t(int, bytes, sizeof(tmp));
DEBUG_ENT("going to reseed %s with %d bits "
- "(%d of %d requested)\n",
+ "(%zu of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes = extract_entropy(r->pull, tmp, bytes,
@@ -860,7 +857,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
spin_lock_irqsave(&r->lock, flags);
BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
- DEBUG_ENT("trying to extract %d bits from %s\n",
+ DEBUG_ENT("trying to extract %zu bits from %s\n",
nbytes * 8, r->name);
/* Can we pull enough? */
@@ -882,7 +879,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
}
}
- DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+ DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
spin_unlock_irqrestore(&r->lock, flags);
@@ -957,6 +954,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
+ if (fips_enabled && !r->last_data_init)
+ nbytes += EXTRACT_SIZE;
+
trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -967,6 +968,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
if (fips_enabled) {
unsigned long flags;
+
+ /* prime last_data value if need be, per fips 140-2 */
+ if (!r->last_data_init) {
+ spin_lock_irqsave(&r->lock, flags);
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ r->last_data_init = true;
+ nbytes -= EXTRACT_SIZE;
+ spin_unlock_irqrestore(&r->lock, flags);
+ extract_buf(r, tmp);
+ }
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -1086,6 +1098,7 @@ static void init_std_data(struct entropy_store *r)
r->entropy_count = 0;
r->entropy_total = 0;
+ r->last_data_init = false;
mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
@@ -1142,11 +1155,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
- DEBUG_ENT("reading %d bits\n", n*8);
+ DEBUG_ENT("reading %zu bits\n", n*8);
n = extract_entropy_user(&blocking_pool, buf, n);
- DEBUG_ENT("read got %d bits (%d still needed)\n",
+ if (n < 0) {
+ retval = n;
+ break;
+ }
+
+ DEBUG_ENT("read got %zd bits (%zd still needed)\n",
n*8, (nbytes-n)*8);
if (n == 0) {
@@ -1171,10 +1189,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
continue;
}
- if (n < 0) {
- retval = n;
- break;
- }
count += n;
buf += n;
nbytes -= n;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d780295a1473..6386a98e43c1 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1142,7 +1142,7 @@ static int sonypi_acpi_add(struct acpi_device *device)
return 0;
}
-static int sonypi_acpi_remove(struct acpi_device *device, int type)
+static int sonypi_acpi_remove(struct acpi_device *device)
{
sonypi_acpi_device = NULL;
return 0;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 915875e431d2..dbfd56446c31 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -75,10 +75,20 @@ config TCG_INFINEON
config TCG_IBMVTPM
tristate "IBM VTPM Interface"
- depends on PPC64
+ depends on PPC_PSERIES
---help---
If you have IBM virtual TPM (VTPM) support say Yes and it
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_ST33_I2C
+ tristate "STMicroelectronics ST33 I2C TPM"
+ depends on I2C
+ depends on GPIOLIB
+ ---help---
+ If you have a TPM security chip from STMicroelectronics working with
+ an I2C bus say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_stm_st33_i2c.
+
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 5b3fc8bc6c13..a3736c97c65a 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
+obj-$(CONFIG_TCG_ST33_I2C) += tpm_i2c_stm_st33.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 93211df52aab..0d2e82f95577 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -40,8 +40,9 @@ enum tpm_duration {
};
#define TPM_MAX_ORDINAL 243
-#define TPM_MAX_PROTECTED_ORDINAL 12
-#define TPM_PROTECTED_ORDINAL_MASK 0xFF
+#define TSC_MAX_ORDINAL 12
+#define TPM_PROTECTED_COMMAND 0x00
+#define TPM_CONNECTION_COMMAND 0x40
/*
* Bug workaround - some TPM's don't flush the most
@@ -65,21 +66,6 @@ static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
* values of the SHORT, MEDIUM, and LONG durations are retrieved
* from the chip during initialization with a call to tpm_get_timeouts.
*/
-static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
- TPM_UNDEFINED, /* 0 */
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED, /* 5 */
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_SHORT, /* 10 */
- TPM_SHORT,
-};
-
static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
@@ -351,14 +337,11 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
+ u8 category = (ordinal >> 24) & 0xFF;
- if (ordinal < TPM_MAX_ORDINAL)
+ if ((category == TPM_PROTECTED_COMMAND && ordinal < TPM_MAX_ORDINAL) ||
+ (category == TPM_CONNECTION_COMMAND && ordinal < TSC_MAX_ORDINAL))
duration_idx = tpm_ordinal_duration[ordinal];
- else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
- TPM_MAX_PROTECTED_ORDINAL)
- duration_idx =
- tpm_protected_ordinal_duration[ordinal &
- TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
@@ -410,7 +393,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
chip->vendor.req_complete_val)
goto out_recv;
- if ((status == chip->vendor.req_canceled)) {
+ if (chip->vendor.req_canceled(chip, status)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
@@ -468,7 +451,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
return -EFAULT;
err = be32_to_cpu(cmd->header.out.return_code);
- if (err != 0)
+ if (err != 0 && desc)
dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
@@ -528,6 +511,25 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
+#define TPM_ORD_STARTUP cpu_to_be32(153)
+#define TPM_ST_CLEAR cpu_to_be16(1)
+#define TPM_ST_STATE cpu_to_be16(2)
+#define TPM_ST_DEACTIVATED cpu_to_be16(3)
+static const struct tpm_input_header tpm_startup_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(12),
+ .ordinal = TPM_ORD_STARTUP
+};
+
+static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+{
+ struct tpm_cmd_t start_cmd;
+ start_cmd.header.in = tpm_startup_header;
+ start_cmd.params.startup_in.startup_type = startup_type;
+ return transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to start the TPM");
+}
+
int tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
@@ -541,11 +543,28 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the timeouts");
- if (rc)
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ /* The TPM is not started, we are the first to talk to it.
+ Execute a startup command. */
+ dev_info(chip->dev, "Issuing TPM_STARTUP");
+ if (tpm_startup(chip, TPM_ST_CLEAR))
+ return rc;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ NULL);
+ }
+ if (rc) {
+ dev_err(chip->dev,
+ "A TPM error (%zd) occurred attempting to determine the timeouts\n",
+ rc);
goto duration;
+ }
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
@@ -824,7 +843,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
{
int rc;
unsigned int loops;
- unsigned int delay_msec = 1000;
+ unsigned int delay_msec = 100;
unsigned long duration;
struct tpm_cmd_t cmd;
@@ -845,6 +864,14 @@ int tpm_do_selftest(struct tpm_chip *chip)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+ /* Some buggy TPMs will not respond to tpm_tis_ready() for
+ * around 300ms while the self test is ongoing, keep trying
+ * until the self test duration expires. */
+ if (rc == -ETIME) {
+ dev_info(chip->dev, HW_ERR "TPM command timed out during continue self test");
+ msleep(delay_msec);
+ continue;
+ }
if (rc < TPM_HEADER_SIZE)
return -EFAULT;
@@ -1075,12 +1102,28 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel,
+ bool *canceled)
+{
+ u8 status = chip->vendor.status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->vendor.req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue)
+ wait_queue_head_t *queue, bool check_cancel)
{
unsigned long stop;
long rc;
u8 status;
+ bool canceled = false;
/* check current status */
status = chip->vendor.status(chip);
@@ -1095,11 +1138,14 @@ again:
if ((long)timeout <= 0)
return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
- ((chip->vendor.status(chip)
- & mask) == mask),
- timeout);
- if (rc > 0)
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
return 0;
+ }
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8ef7649a50aa..81b52015f669 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -47,6 +47,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
@@ -77,7 +78,7 @@ struct tpm_chip;
struct tpm_vendor_specific {
const u8 req_complete_mask;
const u8 req_complete_val;
- const u8 req_canceled;
+ bool (*req_canceled)(struct tpm_chip *chip, u8 status);
void __iomem *iobase; /* ioremapped address */
unsigned long base; /* TPM base address */
@@ -100,13 +101,19 @@ struct tpm_vendor_specific {
bool timeout_adjusted;
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- void *data;
+ void *priv;
wait_queue_head_t read_queue;
wait_queue_head_t int_queue;
+
+ u16 manufacturer_id;
};
+#define TPM_VPRIV(c) (c)->vendor.priv
+
#define TPM_VID_INTEL 0x8086
+#define TPM_VID_WINBOND 0x1050
+#define TPM_VID_STM 0x104A
struct tpm_chip {
struct device *dev; /* Device stuff */
@@ -154,13 +161,13 @@ struct tpm_input_header {
__be16 tag;
__be32 length;
__be32 ordinal;
-}__attribute__((packed));
+} __packed;
struct tpm_output_header {
__be16 tag;
__be32 length;
__be32 return_code;
-}__attribute__((packed));
+} __packed;
struct stclear_flags_t {
__be16 tag;
@@ -169,14 +176,14 @@ struct stclear_flags_t {
u8 physicalPresence;
u8 physicalPresenceLock;
u8 bGlobalLock;
-}__attribute__((packed));
+} __packed;
struct tpm_version_t {
u8 Major;
u8 Minor;
u8 revMajor;
u8 revMinor;
-}__attribute__((packed));
+} __packed;
struct tpm_version_1_2_t {
__be16 tag;
@@ -184,20 +191,20 @@ struct tpm_version_1_2_t {
u8 Minor;
u8 revMajor;
u8 revMinor;
-}__attribute__((packed));
+} __packed;
struct timeout_t {
__be32 a;
__be32 b;
__be32 c;
__be32 d;
-}__attribute__((packed));
+} __packed;
struct duration_t {
__be32 tpm_short;
__be32 tpm_medium;
__be32 tpm_long;
-}__attribute__((packed));
+} __packed;
struct permanent_flags_t {
__be16 tag;
@@ -221,7 +228,7 @@ struct permanent_flags_t {
u8 tpmEstablished;
u8 maintenanceDone;
u8 disableFullDALogicInfo;
-}__attribute__((packed));
+} __packed;
typedef union {
struct permanent_flags_t perm_flags;
@@ -239,12 +246,12 @@ struct tpm_getcap_params_in {
__be32 cap;
__be32 subcap_size;
__be32 subcap;
-}__attribute__((packed));
+} __packed;
struct tpm_getcap_params_out {
__be32 cap_size;
cap_t cap;
-}__attribute__((packed));
+} __packed;
struct tpm_readpubek_params_out {
u8 algorithm[4];
@@ -255,7 +262,7 @@ struct tpm_readpubek_params_out {
__be32 keysize;
u8 modulus[256];
u8 checksum[20];
-}__attribute__((packed));
+} __packed;
typedef union {
struct tpm_input_header in;
@@ -265,16 +272,16 @@ typedef union {
#define TPM_DIGEST_SIZE 20
struct tpm_pcrread_out {
u8 pcr_result[TPM_DIGEST_SIZE];
-}__attribute__((packed));
+} __packed;
struct tpm_pcrread_in {
__be32 pcr_idx;
-}__attribute__((packed));
+} __packed;
struct tpm_pcrextend_in {
__be32 pcr_idx;
u8 hash[TPM_DIGEST_SIZE];
-}__attribute__((packed));
+} __packed;
/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
* bytes, but 128 is still a relatively large number of random bytes and
@@ -285,11 +292,15 @@ struct tpm_pcrextend_in {
struct tpm_getrandom_out {
__be32 rng_data_len;
u8 rng_data[TPM_MAX_RNG_DATA];
-}__attribute__((packed));
+} __packed;
struct tpm_getrandom_in {
__be32 num_bytes;
-}__attribute__((packed));
+} __packed;
+
+struct tpm_startup_in {
+ __be16 startup_type;
+} __packed;
typedef union {
struct tpm_getcap_params_out getcap_out;
@@ -301,12 +312,13 @@ typedef union {
struct tpm_pcrextend_in pcrextend_in;
struct tpm_getrandom_in getrandom_in;
struct tpm_getrandom_out getrandom_out;
+ struct tpm_startup_in startup_in;
} tpm_cmd_params;
struct tpm_cmd_t {
tpm_cmd_header header;
tpm_cmd_params params;
-}__attribute__((packed));
+} __packed;
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
@@ -326,7 +338,7 @@ extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
- wait_queue_head_t *);
+ wait_queue_head_t *, bool);
#ifdef CONFIG_ACPI
extern int tpm_add_ppi(struct kobject *);
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 56051d0c97a2..64420b3396a2 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -33,13 +33,13 @@ struct acpi_tcpa {
u16 platform_class;
union {
struct client_hdr {
- u32 log_max_len __attribute__ ((packed));
- u64 log_start_addr __attribute__ ((packed));
+ u32 log_max_len __packed;
+ u64 log_start_addr __packed;
} client;
struct server_hdr {
u16 reserved;
- u64 log_max_len __attribute__ ((packed));
- u64 log_start_addr __attribute__ ((packed));
+ u64 log_max_len __packed;
+ u64 log_start_addr __packed;
} server;
};
};
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 678d57019dc4..99d6820c611d 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -116,6 +116,11 @@ static u8 tpm_atml_status(struct tpm_chip *chip)
return ioread8(chip->vendor.iobase + 1);
}
+static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == ATML_STATUS_READY);
+}
+
static const struct file_operations atmel_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -147,7 +152,7 @@ static const struct tpm_vendor_specific tpm_atmel = {
.status = tpm_atml_status,
.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
.req_complete_val = ATML_STATUS_DATA_AVAIL,
- .req_canceled = ATML_STATUS_READY,
+ .req_canceled = tpm_atml_req_canceled,
.attr_group = &atmel_attr_grp,
.miscdev = { .fops = &atmel_ops, },
};
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index fb447bd0cb61..8fe7ac3d095b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -505,6 +505,11 @@ out_err:
return rc;
}
+static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -550,7 +555,7 @@ static struct tpm_vendor_specific tpm_tis_i2c = {
.cancel = tpm_tis_i2c_ready,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = TPM_STS_COMMAND_READY,
+ .req_canceled = tpm_tis_i2c_req_canceled,
.attr_group = &tis_attr_grp,
.miscdev.fops = &tis_ops,
};
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
new file mode 100644
index 000000000000..1f5f71e14abe
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -0,0 +1,887 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009, 2010 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * STMicroelectronics version 1.2.0, Copyright (C) 2010
+ * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
+ * This is free software, and you are welcome to redistribute it
+ * under certain conditions.
+ *
+ * @Author: Christophe RICARD tpmsupport@st.com
+ *
+ * @File: tpm_stm_st33_i2c.c
+ *
+ * @Synopsis:
+ * 09/15/2010: First shot driver tpm_tis driver for
+ lpc is used as model.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "tpm.h"
+#include "tpm_i2c_stm_st33.h"
+
+enum stm33zp24_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum stm33zp24_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum stm33zp24_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+ TPM_INTF_WAKE_UP_READY_INT = 0x020,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750,
+ TIS_LONG_TIMEOUT = 2000,
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(struct i2c_client *client, u8 tpm_register,
+ u8 *tpm_data, u16 tpm_size)
+{
+ struct st33zp24_platform_data *pin_infos;
+
+ pin_infos = client->dev.platform_data;
+
+ pin_infos->tpm_i2c_buffer[0][0] = tpm_register;
+ memcpy(&pin_infos->tpm_i2c_buffer[0][1], tpm_data, tpm_size);
+ return i2c_master_send(client, pin_infos->tpm_i2c_buffer[0],
+ tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(struct i2c_client *client, u8 tpm_register,
+ u8 *tpm_data, int tpm_size)
+{
+ u8 status = 0;
+ u8 data;
+
+ data = TPM_DUMMY_BYTE;
+ status = write8_reg(client, tpm_register, &data, 1);
+ if (status == 2)
+ status = i2c_master_recv(client, tpm_data, tpm_size);
+ return status;
+} /* read8_reg() */
+
+/*
+ * I2C_WRITE_DATA
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: client, the chip description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+#define I2C_WRITE_DATA(client, tpm_register, tpm_data, tpm_size) \
+ (write8_reg(client, tpm_register | \
+ TPM_WRITE_DIRECTION, tpm_data, tpm_size))
+
+/*
+ * I2C_READ_DATA
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm, the chip description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+#define I2C_READ_DATA(client, tpm_register, tpm_data, tpm_size) \
+ (read8_reg(client, tpm_register, tpm_data, tpm_size))
+
+/*
+ * clear_interruption
+ * clear the TPM interrupt register.
+ * @param: tpm, the chip description
+ */
+static void clear_interruption(struct i2c_client *client)
+{
+ u8 interrupt;
+ I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+ I2C_WRITE_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+ I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+} /* clear_interruption() */
+
+/*
+ * _wait_for_interrupt_serirq_timeout
+ * @param: tpm, the chip description
+ * @param: timeout, the timeout of the interrupt
+ * @return: the status of the interruption.
+ */
+static long _wait_for_interrupt_serirq_timeout(struct tpm_chip *chip,
+ unsigned long timeout)
+{
+ long status;
+ struct i2c_client *client;
+ struct st33zp24_platform_data *pin_infos;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ pin_infos = client->dev.platform_data;
+
+ status = wait_for_completion_interruptible_timeout(
+ &pin_infos->irq_detection,
+ timeout);
+ if (status > 0)
+ enable_irq(gpio_to_irq(pin_infos->io_serirq));
+ gpio_direction_input(pin_infos->io_serirq);
+
+ return status;
+} /* wait_for_interrupt_serirq_timeout() */
+
+static int wait_for_serirq_timeout(struct tpm_chip *chip, bool condition,
+ unsigned long timeout)
+{
+ int status = 2;
+ struct i2c_client *client;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ status = _wait_for_interrupt_serirq_timeout(chip, timeout);
+ if (!status) {
+ status = -EBUSY;
+ } else{
+ clear_interruption(client);
+ if (condition)
+ status = 1;
+ }
+ return status;
+}
+
+/*
+ * tpm_stm_i2c_cancel, cancel is not implemented.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ */
+static void tpm_stm_i2c_cancel(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ data = TPM_STS_COMMAND_READY;
+ I2C_WRITE_DATA(client, TPM_STS, &data, 1);
+ if (chip->vendor.irq)
+ wait_for_serirq_timeout(chip, 1, chip->vendor.timeout_a);
+} /* tpm_stm_i2c_cancel() */
+
+/*
+ * tpm_stm_spi_status return the TPM_STS register
+ * @param: chip, the tpm chip description
+ * @return: the TPM_STS register value.
+ */
+static u8 tpm_stm_i2c_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ I2C_READ_DATA(client, TPM_STS, &data, 1);
+ return data;
+} /* tpm_stm_i2c_status() */
+
+
+/*
+ * check_locality if the locality is active
+ * @param: chip, the tpm chip description
+ * @return: the active locality or -EACCESS.
+ */
+static int check_locality(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+ u8 status;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ status = I2C_READ_DATA(client, TPM_ACCESS, &data, 1);
+ if (status && (data &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return chip->vendor.locality;
+
+ return -EACCES;
+
+} /* check_locality() */
+
+/*
+ * request_locality request the TPM locality
+ * @param: chip, the chip description
+ * @return: the active locality or EACCESS.
+ */
+static int request_locality(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ long rc;
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ if (check_locality(chip) == chip->vendor.locality)
+ return chip->vendor.locality;
+
+ data = TPM_ACCESS_REQUEST_USE;
+ rc = I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
+ if (rc < 0)
+ goto end;
+
+ if (chip->vendor.irq) {
+ rc = wait_for_serirq_timeout(chip, (check_locality
+ (chip) >= 0),
+ chip->vendor.timeout_a);
+ if (rc > 0)
+ return chip->vendor.locality;
+ } else{
+ stop = jiffies + chip->vendor.timeout_a;
+ do {
+ if (check_locality(chip) >= 0)
+ return chip->vendor.locality;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ rc = -EACCES;
+end:
+ return rc;
+} /* request_locality() */
+
+/*
+ * release_locality release the active locality
+ * @param: chip, the tpm chip description.
+ */
+static void release_locality(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+ I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount address 0x19 0x1A
+ * @param: chip, the chip description
+ * return: the burstcount.
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ int burstcnt, status;
+ u8 tpm_reg, temp;
+
+ struct i2c_client *client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ stop = jiffies + chip->vendor.timeout_d;
+ do {
+ tpm_reg = TPM_STS + 1;
+ status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ if (status < 0)
+ goto end;
+
+ tpm_reg = tpm_reg + 1;
+ burstcnt = temp;
+ status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ if (status < 0)
+ goto end;
+
+ burstcnt |= temp << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+
+end:
+ return -EBUSY;
+} /* get_burstcount() */
+
+/*
+ * wait_for_stat wait for a TPM_STS value
+ * @param: chip, the tpm chip description
+ * @param: mask, the value mask to wait
+ * @param: timeout, the timeout
+ * @param: queue, the wait queue.
+ * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+
+ if (chip->vendor.irq) {
+ rc = wait_for_serirq_timeout(chip, ((tpm_stm_i2c_status
+ (chip) & mask) ==
+ mask), timeout);
+ if (rc > 0)
+ return 0;
+ } else{
+ stop = jiffies + timeout;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = tpm_stm_i2c_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+} /* wait_for_stat() */
+
+/*
+ * recv_data receive data
+ * @param: chip, the tpm chip description
+ * @param: buf, the buffer where the data are received
+ * @param: count, the number of data to receive
+ * @return: the number of bytes read from TPM FIFO.
+ */
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0, burstcnt, len;
+ struct i2c_client *client;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue)
+ == 0) {
+ burstcnt = get_burstcount(chip);
+ len = min_t(int, burstcnt, count - size);
+ I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ size += len;
+ }
+ return size;
+}
+
+/*
+ * tpm_ioserirq_handler the serirq irq handler
+ * @param: irq, the tpm chip description
+ * @param: dev_id, the description of the chip
+ * @return: the status of the handler.
+ */
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct i2c_client *client;
+ struct st33zp24_platform_data *pin_infos;
+
+ disable_irq_nosync(irq);
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ pin_infos = client->dev.platform_data;
+
+ complete(&pin_infos->irq_detection);
+ return IRQ_HANDLED;
+} /* tpm_ioserirq_handler() */
+
+
+/*
+ * tpm_stm_i2c_send send TPM commands through the I2C bus.
+ *
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @param: buf, the buffer to send.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes sent.
+ * In other case, a < 0 value describing the issue.
+ */
+static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+{
+ u32 status,
+ burstcnt = 0, i, size;
+ int ret;
+ u8 data;
+ struct i2c_client *client;
+
+ if (chip == NULL)
+ return -EBUSY;
+ if (len < TPM_HEADER_SIZE)
+ return -EBUSY;
+
+ client = (struct i2c_client *)TPM_VPRIV(chip);
+
+ client->flags = 0;
+
+ ret = request_locality(chip);
+ if (ret < 0)
+ return ret;
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_stm_i2c_cancel(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
+ &chip->vendor.int_queue) < 0) {
+ ret = -ETIME;
+ goto out_err;
+ }
+ }
+
+ for (i = 0 ; i < len - 1 ;) {
+ burstcnt = get_burstcount(chip);
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
+ if (ret < 0)
+ goto out_err;
+
+ i += size;
+ }
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + len - 1, 1);
+ if (ret < 0)
+ goto out_err;
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ data = TPM_STS_GO;
+ I2C_WRITE_DATA(client, TPM_STS, &data, 1);
+
+ return len;
+out_err:
+ tpm_stm_i2c_cancel(chip);
+ release_locality(chip);
+ return ret;
+}
+
+/*
+ * tpm_stm_i2c_recv received TPM response through the I2C bus.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: buf, the buffer to store datas.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes received.
+ * In other case, a < 0 value describing the issue.
+ */
+static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
+ size_t count)
+{
+ int size = 0;
+ int expected;
+
+ if (chip == NULL)
+ return -EBUSY;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+out:
+ chip->vendor.cancel(chip);
+ release_locality(chip);
+ return size;
+}
+
+static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct file_operations tpm_st33_i2c_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = tpm_read,
+ .write = tpm_write,
+ .open = tpm_open,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute *stm_tpm_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr, NULL,
+};
+
+static struct attribute_group stm_tpm_attr_grp = {
+ .attrs = stm_tpm_attrs
+};
+
+static struct tpm_vendor_specific st_i2c_tpm = {
+ .send = tpm_stm_i2c_send,
+ .recv = tpm_stm_i2c_recv,
+ .cancel = tpm_stm_i2c_cancel,
+ .status = tpm_stm_i2c_status,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_st33_i2c_req_canceled,
+ .attr_group = &stm_tpm_attr_grp,
+ .miscdev = {.fops = &tpm_st33_i2c_fops,},
+};
+
+static int interrupts ;
+module_param(interrupts, int, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static int power_mgt = 1;
+module_param(power_mgt, int, 0444);
+MODULE_PARM_DESC(power_mgt, "Power Management");
+
+/*
+ * tpm_st33_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+static int
+tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int err;
+ u8 intmask;
+ struct tpm_chip *chip;
+ struct st33zp24_platform_data *platform_data;
+
+ if (client == NULL) {
+ pr_info("%s: i2c client is NULL. Device not accessible.\n",
+ __func__);
+ err = -ENODEV;
+ goto end;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_info(&client->dev, "client not i2c capable\n");
+ err = -ENODEV;
+ goto end;
+ }
+
+ chip = tpm_register_hardware(&client->dev, &st_i2c_tpm);
+ if (!chip) {
+ dev_info(&client->dev, "fail chip\n");
+ err = -ENODEV;
+ goto end;
+ }
+
+ platform_data = client->dev.platform_data;
+
+ if (!platform_data) {
+ dev_info(&client->dev, "chip not available\n");
+ err = -ENODEV;
+ goto _tpm_clean_answer;
+ }
+
+ platform_data->tpm_i2c_buffer[0] =
+ kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
+ if (platform_data->tpm_i2c_buffer[0] == NULL) {
+ err = -ENOMEM;
+ goto _tpm_clean_answer;
+ }
+ platform_data->tpm_i2c_buffer[1] =
+ kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
+ if (platform_data->tpm_i2c_buffer[1] == NULL) {
+ err = -ENOMEM;
+ goto _tpm_clean_response1;
+ }
+
+ TPM_VPRIV(chip) = client;
+
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ chip->vendor.locality = LOCALITY0;
+
+ if (power_mgt) {
+ err = gpio_request(platform_data->io_lpcpd, "TPM IO_LPCPD");
+ if (err)
+ goto _gpio_init1;
+ gpio_set_value(platform_data->io_lpcpd, 1);
+ }
+
+ if (interrupts) {
+ init_completion(&platform_data->irq_detection);
+ if (request_locality(chip) != LOCALITY0) {
+ err = -ENODEV;
+ goto _tpm_clean_response2;
+ }
+ err = gpio_request(platform_data->io_serirq, "TPM IO_SERIRQ");
+ if (err)
+ goto _gpio_init2;
+
+ clear_interruption(client);
+ err = request_irq(gpio_to_irq(platform_data->io_serirq),
+ &tpm_ioserirq_handler,
+ IRQF_TRIGGER_HIGH,
+ "TPM SERIRQ management", chip);
+ if (err < 0) {
+ dev_err(chip->dev , "TPM SERIRQ signals %d not available\n",
+ gpio_to_irq(platform_data->io_serirq));
+ goto _irq_set;
+ }
+
+ err = I2C_READ_DATA(client, TPM_INT_ENABLE, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_FIFO_AVALAIBLE_INT
+ | TPM_INTF_WAKE_UP_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT
+ | TPM_INTF_STS_VALID_INT
+ | TPM_INTF_DATA_AVAIL_INT;
+
+ err = I2C_WRITE_DATA(client, TPM_INT_ENABLE, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ intmask = TPM_GLOBAL_INT_ENABLE;
+ err = I2C_WRITE_DATA(client, (TPM_INT_ENABLE + 3), &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ err = I2C_READ_DATA(client, TPM_INT_STATUS, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ chip->vendor.irq = interrupts;
+
+ tpm_gen_interrupt(chip);
+ }
+
+ tpm_get_timeouts(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ dev_info(chip->dev, "TPM I2C Initialized\n");
+ return 0;
+_irq_set:
+ free_irq(gpio_to_irq(platform_data->io_serirq), (void *) chip);
+_gpio_init2:
+ if (interrupts)
+ gpio_free(platform_data->io_serirq);
+_gpio_init1:
+ if (power_mgt)
+ gpio_free(platform_data->io_lpcpd);
+_tpm_clean_response2:
+ kzfree(platform_data->tpm_i2c_buffer[1]);
+ platform_data->tpm_i2c_buffer[1] = NULL;
+_tpm_clean_response1:
+ kzfree(platform_data->tpm_i2c_buffer[0]);
+ platform_data->tpm_i2c_buffer[0] = NULL;
+_tpm_clean_answer:
+ tpm_remove_hardware(chip->dev);
+end:
+ pr_info("TPM I2C initialisation fail\n");
+ return err;
+}
+
+/*
+ * tpm_st33_i2c_remove remove the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ clear_bit(0, &chip->is_open);
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = (struct tpm_chip *)i2c_get_clientdata(client);
+ struct st33zp24_platform_data *pin_infos =
+ ((struct i2c_client *) TPM_VPRIV(chip))->dev.platform_data;
+
+ if (pin_infos != NULL) {
+ free_irq(pin_infos->io_serirq, chip);
+
+ gpio_free(pin_infos->io_serirq);
+ gpio_free(pin_infos->io_lpcpd);
+
+ tpm_remove_hardware(chip->dev);
+
+ if (pin_infos->tpm_i2c_buffer[1] != NULL) {
+ kzfree(pin_infos->tpm_i2c_buffer[1]);
+ pin_infos->tpm_i2c_buffer[1] = NULL;
+ }
+ if (pin_infos->tpm_i2c_buffer[0] != NULL) {
+ kzfree(pin_infos->tpm_i2c_buffer[0]);
+ pin_infos->tpm_i2c_buffer[0] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * tpm_st33_i2c_pm_suspend suspend the TPM device
+ * Added: Work around when suspend and no tpm application is running, suspend
+ * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
+ * TPM core)
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: mesg, the power management message.
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_platform_data *pin_infos = dev->platform_data;
+ int ret = 0;
+
+ if (power_mgt)
+ gpio_set_value(pin_infos->io_lpcpd, 0);
+ else{
+ if (chip->data_buffer == NULL)
+ chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
+ ret = tpm_pm_suspend(dev);
+ }
+ return ret;
+} /* tpm_st33_i2c_suspend() */
+
+/*
+ * tpm_st33_i2c_pm_resume resume the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_platform_data *pin_infos = dev->platform_data;
+
+ int ret = 0;
+
+ if (power_mgt) {
+ gpio_set_value(pin_infos->io_lpcpd, 1);
+ ret = wait_for_serirq_timeout(chip,
+ (chip->vendor.status(chip) &
+ TPM_STS_VALID) == TPM_STS_VALID,
+ chip->vendor.timeout_b);
+ } else{
+ if (chip->data_buffer == NULL)
+ chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+ }
+ return ret;
+} /* tpm_st33_i2c_pm_resume() */
+#endif
+
+static const struct i2c_device_id tpm_st33_i2c_id[] = {
+ {TPM_ST33_I2C, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_st33_i2c_id);
+static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend, tpm_st33_i2c_pm_resume);
+static struct i2c_driver tpm_st33_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TPM_ST33_I2C,
+ .pm = &tpm_st33_i2c_ops,
+ },
+ .probe = tpm_st33_i2c_probe,
+ .remove = tpm_st33_i2c_remove,
+ .id_table = tpm_st33_i2c_id
+};
+
+module_i2c_driver(tpm_st33_i2c_driver);
+
+MODULE_AUTHOR("Christophe Ricard (tpmsupport@st.com)");
+MODULE_DESCRIPTION("STM TPM I2C ST33 Driver");
+MODULE_VERSION("1.2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.h b/drivers/char/tpm/tpm_i2c_stm_st33.h
new file mode 100644
index 000000000000..439a43249aa6
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.h
@@ -0,0 +1,61 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009, 2010 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * STMicroelectronics version 1.2.0, Copyright (C) 2010
+ * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
+ * This is free software, and you are welcome to redistribute it
+ * under certain conditions.
+ *
+ * @Author: Christophe RICARD tpmsupport@st.com
+ *
+ * @File: stm_st33_tpm_i2c.h
+ *
+ * @Date: 09/15/2010
+ */
+#ifndef __STM_ST33_TPM_I2C_MAIN_H__
+#define __STM_ST33_TPM_I2C_MAIN_H__
+
+#define TPM_ACCESS (0x0)
+#define TPM_STS (0x18)
+#define TPM_HASH_END (0x20)
+#define TPM_DATA_FIFO (0x24)
+#define TPM_HASH_DATA (0x24)
+#define TPM_HASH_START (0x28)
+#define TPM_INTF_CAPABILITY (0x14)
+#define TPM_INT_STATUS (0x10)
+#define TPM_INT_ENABLE (0x08)
+
+#define TPM_DUMMY_BYTE 0xAA
+#define TPM_WRITE_DIRECTION 0x80
+#define TPM_HEADER_SIZE 10
+#define TPM_BUFSIZE 2048
+
+#define LOCALITY0 0
+
+#define TPM_ST33_I2C "st33zp24_i2c"
+
+struct st33zp24_platform_data {
+ int io_serirq;
+ int io_lpcpd;
+ struct i2c_client *client;
+ u8 *tpm_i2c_buffer[2]; /* 0 Request 1 Response */
+ struct completion irq_detection;
+ struct mutex lock;
+};
+
+#endif /* __STM_ST33_TPM_I2C_MAIN_H__ */
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 7da840d487d2..56b07c35a13e 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -38,8 +38,6 @@ static struct vio_device_id tpm_ibmvtpm_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
-DECLARE_WAIT_QUEUE_HEAD(wq);
-
/**
* ibmvtpm_send_crq - Send a CRQ request
* @vdev: vio device struct
@@ -66,7 +64,7 @@ static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip)
- return (struct ibmvtpm_dev *)chip->vendor.data;
+ return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
return NULL;
}
@@ -83,30 +81,32 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm;
u16 len;
+ int sig;
- ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+ ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
return 0;
}
- wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+ sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
+ if (sig)
+ return -EINTR;
+
+ len = ibmvtpm->res_len;
- if (count < ibmvtpm->crq_res.len) {
+ if (count < len) {
dev_err(ibmvtpm->dev,
"Invalid size in recv: count=%ld, crq_size=%d\n",
- count, ibmvtpm->crq_res.len);
+ count, len);
return -EIO;
}
spin_lock(&ibmvtpm->rtce_lock);
- memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
- memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
- ibmvtpm->crq_res.valid = 0;
- ibmvtpm->crq_res.msg = 0;
- len = ibmvtpm->crq_res.len;
- ibmvtpm->crq_res.len = 0;
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
spin_unlock(&ibmvtpm->rtce_lock);
return len;
}
@@ -127,7 +127,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
u64 *word = (u64 *) &crq;
int rc;
- ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+ ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
@@ -273,7 +273,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
int rc = 0;
free_irq(vdev->irq, ibmvtpm);
- tasklet_kill(&ibmvtpm->tasklet);
do {
if (rc)
@@ -372,7 +371,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
- unsigned long flags;
int rc = 0;
do {
@@ -387,10 +385,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
@@ -399,6 +398,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
+static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == 0);
+}
+
static const struct file_operations ibmvtpm_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -442,7 +446,7 @@ static const struct tpm_vendor_specific tpm_ibmvtpm = {
.status = tpm_ibmvtpm_status,
.req_complete_mask = 0,
.req_complete_val = 0,
- .req_canceled = 0,
+ .req_canceled = tpm_ibmvtpm_req_canceled,
.attr_group = &ibmvtpm_attr_grp,
.miscdev = { .fops = &ibmvtpm_ops, },
};
@@ -467,7 +471,7 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
if (crq->valid & VTPM_MSG_RES) {
if (++crq_q->index == crq_q->num_entry)
crq_q->index = 0;
- rmb();
+ smp_rmb();
} else
crq = NULL;
return crq;
@@ -535,11 +539,9 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
ibmvtpm->vtpm_version = crq->data;
return;
case VTPM_TPM_COMMAND_RES:
- ibmvtpm->crq_res.valid = crq->valid;
- ibmvtpm->crq_res.msg = crq->msg;
- ibmvtpm->crq_res.len = crq->len;
- ibmvtpm->crq_res.data = crq->data;
- wake_up_interruptible(&wq);
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = crq->len;
+ wake_up_interruptible(&ibmvtpm->wq);
return;
default:
return;
@@ -559,38 +561,19 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
{
struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ibmvtpm_tasklet - Interrupt handler tasklet
- * @data: ibm vtpm device struct
- *
- * Returns:
- * Nothing
- **/
-static void ibmvtpm_tasklet(void *data)
-{
- struct ibmvtpm_dev *ibmvtpm = data;
struct ibmvtpm_crq *crq;
- unsigned long flags;
- spin_lock_irqsave(&ibmvtpm->lock, flags);
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
crq->valid = 0;
- wmb();
+ smp_wmb();
}
- vio_enable_interrupts(ibmvtpm->vdev);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ return IRQ_HANDLED;
}
/**
@@ -650,9 +633,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto reg_crq_cleanup;
}
- tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
- (unsigned long)ibmvtpm);
-
rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
tpm_ibmvtpm_driver_name, ibmvtpm);
if (rc) {
@@ -666,13 +646,14 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
+ init_waitqueue_head(&ibmvtpm->wq);
+
crq_q->index = 0;
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
- chip->vendor.data = (void *)ibmvtpm;
+ TPM_VPRIV(chip) = (void *)ibmvtpm;
- spin_lock_init(&ibmvtpm->lock);
spin_lock_init(&ibmvtpm->rtce_lock);
rc = ibmvtpm_crq_send_init(ibmvtpm);
@@ -689,7 +670,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
return rc;
init_irq_cleanup:
- tasklet_kill(&ibmvtpm->tasklet);
do {
rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index 4296eb4b4d82..bd82a791f995 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -38,13 +38,12 @@ struct ibmvtpm_dev {
struct vio_dev *vdev;
struct ibmvtpm_crq_queue crq_queue;
dma_addr_t crq_dma_handle;
- spinlock_t lock;
- struct tasklet_struct tasklet;
u32 rtce_size;
void __iomem *rtce_buf;
dma_addr_t rtce_dma_handle;
spinlock_t rtce_lock;
- struct ibmvtpm_crq crq_res;
+ wait_queue_head_t wq;
+ u16 res_len;
u32 vtpm_version;
};
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 640c9a427b59..770c46f8eb30 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -227,6 +227,11 @@ static u8 tpm_nsc_status(struct tpm_chip *chip)
return inb(chip->vendor.base + NSC_STATUS);
}
+static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == NSC_STATUS_RDY);
+}
+
static const struct file_operations nsc_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -258,7 +263,7 @@ static const struct tpm_vendor_specific tpm_nsc = {
.status = tpm_nsc_status,
.req_complete_mask = NSC_STATUS_OBF,
.req_complete_val = NSC_STATUS_OBF,
- .req_canceled = NSC_STATUS_RDY,
+ .req_canceled = tpm_nsc_req_canceled,
.attr_group = &nsc_attr_grp,
.miscdev = { .fops = &nsc_ops, },
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index ea31dafbcac2..8a41b6be23a0 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -84,6 +84,9 @@ static int is_itpm(struct pnp_dev *dev)
struct acpi_device *acpi = pnp_acpi_device(dev);
struct acpi_hardware_id *id;
+ if (!acpi)
+ return 0;
+
list_for_each_entry(id, &acpi->pnp.ids, list) {
if (!strcmp("INTC0102", id->id))
return 1;
@@ -98,6 +101,22 @@ static inline int is_itpm(struct pnp_dev *dev)
}
#endif
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.' */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+ unsigned long stop = jiffies + chip->vendor.timeout_a;
+ do {
+ if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
+ TPM_ACCESS_VALID)
+ return 0;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -1;
+}
+
static int check_locality(struct tpm_chip *chip, int l)
{
if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
@@ -198,7 +217,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->vendor.timeout_c,
- &chip->vendor.read_queue)
+ &chip->vendor.read_queue, true)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
@@ -241,7 +260,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->dev, "Error left over data\n");
@@ -277,7 +296,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
tpm_tis_ready(chip);
if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.int_queue) < 0) {
+ &chip->vendor.int_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
@@ -292,7 +311,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -304,7 +323,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -342,7 +361,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
- &chip->vendor.read_queue) < 0) {
+ &chip->vendor.read_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
@@ -374,7 +393,7 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;
- itpm = 0;
+ itpm = false;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
@@ -383,7 +402,7 @@ static int probe_itpm(struct tpm_chip *chip)
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
- itpm = 1;
+ itpm = true;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
@@ -400,6 +419,19 @@ out:
return rc;
}
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ switch (chip->vendor.manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ return (status == TPM_STS_COMMAND_READY);
+ }
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -445,7 +477,7 @@ static struct tpm_vendor_specific tpm_tis = {
.cancel = tpm_tis_ready,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = TPM_STS_COMMAND_READY,
+ .req_canceled = tpm_tis_req_canceled,
.attr_group = &tis_attr_grp,
.miscdev = {
.fops = &tis_ops,},
@@ -502,7 +534,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static bool interrupts = 1;
+static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -528,12 +560,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
}
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+ chip->vendor.manufacturer_id = vendor;
dev_info(dev,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
@@ -545,7 +583,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
rc = -ENODEV;
goto out_err;
}
- itpm = (probe == 0) ? 0 : 1;
+ itpm = !!probe;
}
if (itpm)
@@ -741,10 +779,10 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
if (pnp_irq_valid(pnp_dev, 0))
irq = pnp_irq(pnp_dev, 0);
else
- interrupts = 0;
+ interrupts = false;
if (is_itpm(pnp_dev))
- itpm = 1;
+ itpm = true;
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 90493d4ead1f..ee4dbeafb377 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -37,8 +37,12 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/kconfig.h>
#include "../tty/hvc/hvc_console.h"
+#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -111,6 +115,21 @@ struct port_buffer {
size_t len;
/* offset in the buf from which to consume data */
size_t offset;
+
+ /* DMA address of buffer */
+ dma_addr_t dma;
+
+ /* Device we got DMA memory from */
+ struct device *dev;
+
+ /* List of pending dma buffers to free */
+ struct list_head list;
+
+ /* If sgpages == 0 then buf is used */
+ unsigned int sgpages;
+
+ /* sg is used if spages > 0. sg must be the last in is struct */
+ struct scatterlist sg[0];
};
/*
@@ -325,6 +344,11 @@ static bool is_console_port(struct port *port)
return false;
}
+static bool is_rproc_serial(const struct virtio_device *vdev)
+{
+ return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
+}
+
static inline bool use_multiport(struct ports_device *portdev)
{
/*
@@ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev)
return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
}
-static void free_buf(struct port_buffer *buf)
+static DEFINE_SPINLOCK(dma_bufs_lock);
+static LIST_HEAD(pending_free_dma_bufs);
+
+static void free_buf(struct port_buffer *buf, bool can_sleep)
{
- kfree(buf->buf);
+ unsigned int i;
+
+ for (i = 0; i < buf->sgpages; i++) {
+ struct page *page = sg_page(&buf->sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+
+ if (!buf->dev) {
+ kfree(buf->buf);
+ } else if (is_rproc_enabled) {
+ unsigned long flags;
+
+ /* dma_free_coherent requires interrupts to be enabled. */
+ if (!can_sleep) {
+ /* queue up dma-buffers to be freed later */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_add_tail(&buf->list, &pending_free_dma_bufs);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+ return;
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
+
+ /* Release device refcnt and allow it to be freed */
+ put_device(buf->dev);
+ }
+
kfree(buf);
}
-static struct port_buffer *alloc_buf(size_t buf_size)
+static void reclaim_dma_bufs(void)
+{
+ unsigned long flags;
+ struct port_buffer *buf, *tmp;
+ LIST_HEAD(tmp_list);
+
+ if (list_empty(&pending_free_dma_bufs))
+ return;
+
+ /* Create a copy of the pending_free_dma_bufs while holding the lock */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_cut_position(&tmp_list, &pending_free_dma_bufs,
+ pending_free_dma_bufs.prev);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+
+ /* Release the dma buffers, without irqs enabled */
+ list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
+ list_del(&buf->list);
+ free_buf(buf, true);
+ }
+}
+
+static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+ int pages)
{
struct port_buffer *buf;
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ reclaim_dma_bufs();
+
+ /*
+ * Allocate buffer and the sg list. The sg list array is allocated
+ * directly after the port_buffer struct.
+ */
+ buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
+ GFP_KERNEL);
if (!buf)
goto fail;
- buf->buf = kzalloc(buf_size, GFP_KERNEL);
+
+ buf->sgpages = pages;
+ if (pages > 0) {
+ buf->dev = NULL;
+ buf->buf = NULL;
+ return buf;
+ }
+
+ if (is_rproc_serial(vq->vdev)) {
+ /*
+ * Allocate DMA memory from ancestor. When a virtio
+ * device is created by remoteproc, the DMA memory is
+ * associated with the grandparent device:
+ * vdev => rproc => platform-dev.
+ * The code here would have been less quirky if
+ * DMA_MEMORY_INCLUDES_CHILDREN had been supported
+ * in dma-coherent.c
+ */
+ if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+ goto free_buf;
+ buf->dev = vq->vdev->dev.parent->parent;
+
+ /* Increase device refcnt to avoid freeing it */
+ get_device(buf->dev);
+ buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
+ GFP_KERNEL);
+ } else {
+ buf->dev = NULL;
+ buf->buf = kmalloc(buf_size, GFP_KERNEL);
+ }
+
if (!buf->buf)
goto free_buf;
buf->len = 0;
@@ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
+ if (!ret)
+ ret = vq->num_free;
return ret;
}
@@ -416,7 +532,7 @@ static void discard_port_data(struct port *port)
port->stats.bytes_discarded += buf->len - buf->offset;
if (add_inbuf(port->in_vq, buf) < 0) {
err++;
- free_buf(buf);
+ free_buf(buf, false);
}
port->inbuf = NULL;
buf = get_inbuf(port);
@@ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
return 0;
}
-struct buffer_token {
- union {
- void *buf;
- struct scatterlist *sg;
- } u;
- /* If sgpages == 0 then buf is used, else sg is used */
- unsigned int sgpages;
-};
-
-static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
-{
- int i;
- struct page *page;
-
- for (i = 0; i < nrpages; i++) {
- page = sg_page(&sg[i]);
- if (!page)
- break;
- put_page(page);
- }
- kfree(sg);
-}
/* Callers must take the port->outvq_lock */
static void reclaim_consumed_buffers(struct port *port)
{
- struct buffer_token *tok;
+ struct port_buffer *buf;
unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
- if (tok->sgpages)
- reclaim_sg_pages(tok->u.sg, tok->sgpages);
- else
- kfree(tok->u.buf);
- kfree(tok);
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ free_buf(buf, false);
port->outvq_full = false;
}
}
static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
int nents, size_t in_count,
- struct buffer_token *tok, bool nonblock)
+ void *data, bool nonblock)
{
struct virtqueue *out_vq;
- ssize_t ret;
+ int err;
unsigned long flags;
unsigned int len;
@@ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port);
- ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
+ err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
- if (ret < 0) {
+ if (err) {
in_count = 0;
goto done;
}
- if (ret == 0)
+ if (out_vq->num_free == 0)
port->outvq_full = true;
if (nonblock)
@@ -572,37 +662,6 @@ done:
return in_count;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
- bool nonblock)
-{
- struct scatterlist sg[1];
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = 0;
- tok->u.buf = in_buf;
-
- sg_init_one(sg, in_buf, in_count);
-
- return __send_to_port(port, sg, 1, in_count, tok, nonblock);
-}
-
-static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
- size_t in_count, bool nonblock)
-{
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = nents;
- tok->u.sg = sg;
-
- return __send_to_port(port, sg, nents, in_count, tok, nonblock);
-}
-
/*
* Give out the data that's requested from the buffer that we have
* queued up.
@@ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct port *port;
- char *buf;
+ struct port_buffer *buf;
ssize_t ret;
bool nonblock;
+ struct scatterlist sg[1];
/* Userspace could be out to fool us */
if (!count)
@@ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count);
- buf = kmalloc(count, GFP_KERNEL);
+ buf = alloc_buf(port->out_vq, count, 0);
if (!buf)
return -ENOMEM;
- ret = copy_from_user(buf, ubuf, count);
+ ret = copy_from_user(buf->buf, ubuf, count);
if (ret) {
ret = -EFAULT;
goto free_buf;
@@ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
* through to the host.
*/
nonblock = true;
- ret = send_buf(port, buf, count, nonblock);
+ sg_init_one(sg, buf->buf, count);
+ ret = __send_to_port(port, sg, 1, count, buf, nonblock);
if (nonblock && ret > 0)
goto out;
free_buf:
- kfree(buf);
+ free_buf(buf, true);
out:
return ret;
}
@@ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
struct port *port = filp->private_data;
struct sg_list sgl;
ssize_t ret;
+ struct port_buffer *buf;
struct splice_desc sd = {
.total_len = len,
.flags = flags,
@@ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
.u.data = &sgl,
};
+ /*
+ * Rproc_serial does not yet support splice. To support splice
+ * pipe_to_sg() must allocate dma-buffers and copy content from
+ * regular pages to dma pages. And alloc_buf and free_buf must
+ * support allocating and freeing such a list of dma-buffers.
+ */
+ if (is_rproc_serial(port->out_vq->vdev))
+ return -EINVAL;
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
+ buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+ if (!buf)
+ return -ENOMEM;
+
sgl.n = 0;
sgl.len = 0;
sgl.size = pipe->nrbufs;
- sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
- if (unlikely(!sgl.sg))
- return -ENOMEM;
-
+ sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
if (likely(ret > 0))
- ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
+ ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
+ if (unlikely(ret <= 0))
+ free_buf(buf, true);
return ret;
}
@@ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp)
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+ reclaim_dma_bufs();
/*
* Locks aren't necessary here as a port can't be opened after
* unplug, and if a port isn't unplugged, a kref would already
@@ -1031,6 +1106,7 @@ static const struct file_operations port_fops = {
static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
+ struct scatterlist sg[1];
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
if (!port)
return -EPIPE;
- return send_buf(port, (void *)buf, count, false);
+ sg_init_one(sg, buf, count);
+ return __send_to_port(port, sg, 1, count, (void *)buf, false);
}
/*
@@ -1076,7 +1153,10 @@ static void resize_console(struct port *port)
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+
+ /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
hvc_resize(port->cons.hvc, port->cons.ws);
}
@@ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
nr_added_bufs = 0;
do {
- buf = alloc_buf(PAGE_SIZE);
+ buf = alloc_buf(vq, PAGE_SIZE, 0);
if (!buf)
break;
@@ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
- free_buf(buf);
+ free_buf(buf, true);
break;
}
nr_added_bufs++;
@@ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id)
goto free_device;
}
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
+ if (is_rproc_serial(port->portdev->vdev))
+ /*
+ * For rproc_serial assume remote processor is connected.
+ * rproc_serial does not want the console port, only
+ * the generic port implementation.
+ */
+ port->host_connected = true;
+ else if (!use_multiport(port->portdev)) {
+ /*
+ * If we're not using multiport support,
+ * this has to be a console port.
+ */
err = init_port_console(port);
if (err)
goto free_inbufs;
@@ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id)
free_inbufs:
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port)
/* Remove buffers we queued up for the Host to send us data in. */
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
+
+ /* Free pending buffers from the out-queue. */
+ while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+ free_buf(buf, true);
}
/*
@@ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work)
if (add_inbuf(portdev->c_ivq, buf) < 0) {
dev_warn(&portdev->vdev->dev,
"Error adding buffer to queue\n");
- free_buf(buf);
+ free_buf(buf, false);
}
}
spin_unlock(&portdev->cvq_lock);
@@ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev)
return;
while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
+ free_buf(buf, true);
while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
+ free_buf(buf, true);
}
/*
@@ -1882,11 +1974,15 @@ static int virtcons_probe(struct virtio_device *vdev)
multiport = false;
portdev->config.max_nr_ports = 1;
- if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
- offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports) == 0)
+
+ /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports) == 0) {
multiport = true;
+ }
err = init_vqs(portdev);
if (err < 0) {
@@ -1966,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
- cancel_work_sync(&portdev->control_work);
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
@@ -1996,6 +2093,16 @@ static unsigned int features[] = {
VIRTIO_CONSOLE_F_MULTIPORT,
};
+static struct virtio_device_id rproc_serial_id_table[] = {
+#if IS_ENABLED(CONFIG_REMOTEPROC)
+ { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
+#endif
+ { 0 },
+};
+
+static unsigned int rproc_serial_features[] = {
+};
+
#ifdef CONFIG_PM
static int virtcons_freeze(struct virtio_device *vdev)
{
@@ -2080,6 +2187,16 @@ static struct virtio_driver virtio_console = {
#endif
};
+static struct virtio_driver virtio_rproc_serial = {
+ .feature_table = rproc_serial_features,
+ .feature_table_size = ARRAY_SIZE(rproc_serial_features),
+ .driver.name = "virtio_rproc_serial",
+ .driver.owner = THIS_MODULE,
+ .id_table = rproc_serial_id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+};
+
static int __init init(void)
{
int err;
@@ -2104,7 +2221,15 @@ static int __init init(void)
pr_err("Error %d registering virtio driver\n", err);
goto free;
}
+ err = register_virtio_driver(&virtio_rproc_serial);
+ if (err < 0) {
+ pr_err("Error %d registering virtio rproc serial driver\n",
+ err);
+ goto unregister;
+ }
return 0;
+unregister:
+ unregister_virtio_driver(&virtio_console);
free:
if (pdrvdata.debugfs_dir)
debugfs_remove_recursive(pdrvdata.debugfs_dir);
@@ -2114,7 +2239,10 @@ free:
static void __exit fini(void)
{
+ reclaim_dma_bufs();
+
unregister_virtio_driver(&virtio_console);
+ unregister_virtio_driver(&virtio_rproc_serial);
class_destroy(pdrvdata.class);
if (pdrvdata.debugfs_dir)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 823f62d900ba..a47e6ee98b8c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -64,3 +64,5 @@ config CLK_TWL6040
as functional clock.
endmenu
+
+source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4e1ccb1e6614..300d4775d926 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,8 +1,13 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
-obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
- clk-mux.o clk-divider.o clk-fixed-factor.o
+obj-$(CONFIG_COMMON_CLK) += clk.o
+obj-$(CONFIG_COMMON_CLK) += clk-divider.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_COMMON_CLK) += clk-gate.o
+obj-$(CONFIG_COMMON_CLK) += clk-mux.o
+
# SoCs specific
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
@@ -13,14 +18,17 @@ obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o
+obj-$(CONFIG_PLAT_ORION) += mvebu/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
-obj-$(CONFIG_ARCH_SUNXI) += clk-sunxi.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+
+obj-$(CONFIG_X86) += x86/
# Chip specific
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index e69991aab43a..792bc57a9db7 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -20,6 +20,13 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { }
+};
/*
* These are fixed clocks. They're probably not all root clocks and it may
@@ -56,4 +63,6 @@ void __init bcm2835_init_clocks(void)
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
+
+ of_clk_init(clk_match);
}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a9204c69148d..68b402101170 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/log2.h>
/*
* DOC: basic adjustable divider clock that cannot gate
@@ -29,8 +30,7 @@
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-#define div_mask(d) ((1 << (d->width)) - 1)
-#define is_power_of_two(i) !(i & ~i)
+#define div_mask(d) ((1 << ((d)->width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
{
@@ -137,7 +137,7 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
{
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
- return is_power_of_two(div);
+ return is_power_of_2(div);
if (divider->table)
return _is_valid_table_div(divider->table, div);
return true;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a4899855c0f6..1ef271e47594 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -28,8 +28,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+ unsigned long long int rate;
- return parent_rate * fix->mult / fix->div;
+ rate = (unsigned long long int)parent_rate * fix->mult;
+ do_div(rate, fix->div);
+ return (unsigned long)rate;
}
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index af78ed6b67ef..dc58fbd8516f 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -85,7 +85,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
*/
-void __init of_fixed_clk_setup(struct device_node *node)
+void of_fixed_clk_setup(struct device_node *node)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -101,4 +101,5 @@ void __init of_fixed_clk_setup(struct device_node *node)
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
+CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
#endif
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 52fecadf004a..2e08cb001936 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -182,8 +182,10 @@ static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
reg |= HB_PLL_EXT_ENA;
reg &= ~HB_PLL_EXT_BYPASS;
} else {
+ writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
reg &= ~HB_PLL_DIVQ_MASK;
reg |= divq << HB_PLL_DIVQ_SHIFT;
+ writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
}
writel(reg, hbclk->reg);
@@ -314,33 +316,23 @@ static void __init hb_pll_init(struct device_node *node)
{
hb_clk_init(node, &clk_pll_ops);
}
+CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
static void __init hb_a9periph_init(struct device_node *node)
{
hb_clk_init(node, &a9periphclk_ops);
}
+CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
static void __init hb_a9bus_init(struct device_node *node)
{
struct clk *clk = hb_clk_init(node, &a9bclk_ops);
clk_prepare_enable(clk);
}
+CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
static void __init hb_emmc_init(struct device_node *node)
{
hb_clk_init(node, &periclk_ops);
}
-
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "calxeda,hb-pll-clock", .data = hb_pll_init, },
- { .compatible = "calxeda,hb-a9periph-clock", .data = hb_a9periph_init, },
- { .compatible = "calxeda,hb-a9bus-clock", .data = hb_a9bus_init, },
- { .compatible = "calxeda,hb-emmc-clock", .data = hb_emmc_init, },
- {}
-};
-
-void __init highbank_clocks_init(void)
-{
- of_clk_init(clk_match);
-}
+CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index d098f72e1d5f..9f57bc37cd60 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -44,33 +44,23 @@ struct max77686_clk {
struct clk_lookup *lookup;
};
-static struct max77686_clk *get_max77686_clk(struct clk_hw *hw)
+static struct max77686_clk *to_max77686_clk(struct clk_hw *hw)
{
return container_of(hw, struct max77686_clk, hw);
}
static int max77686_clk_prepare(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
- int ret;
-
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return -ENOMEM;
-
- ret = regmap_update_bits(max77686->iodev->regmap,
- MAX77686_REG_32KHZ, max77686->mask, max77686->mask);
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
- return ret;
+ return regmap_update_bits(max77686->iodev->regmap,
+ MAX77686_REG_32KHZ, max77686->mask,
+ max77686->mask);
}
static void max77686_clk_unprepare(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
-
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return;
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
regmap_update_bits(max77686->iodev->regmap,
MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask);
@@ -78,14 +68,10 @@ static void max77686_clk_unprepare(struct clk_hw *hw)
static int max77686_clk_is_enabled(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
int ret;
u32 val;
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return -ENOMEM;
-
ret = regmap_read(max77686->iodev->regmap,
MAX77686_REG_32KHZ, &val);
@@ -130,9 +116,8 @@ static int max77686_clk_register(struct device *dev,
if (IS_ERR(clk))
return -ENOMEM;
- max77686->lookup = devm_kzalloc(dev, sizeof(struct clk_lookup),
- GFP_KERNEL);
- if (IS_ERR(max77686->lookup))
+ max77686->lookup = kzalloc(sizeof(struct clk_lookup), GFP_KERNEL);
+ if (!max77686->lookup)
return -ENOMEM;
max77686->lookup->con_id = hw->init->name;
@@ -151,13 +136,13 @@ static int max77686_clk_probe(struct platform_device *pdev)
max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *)
* MAX77686_CLKS_NUM, GFP_KERNEL);
- if (IS_ERR(max77686_clks))
+ if (!max77686_clks)
return -ENOMEM;
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
max77686_clks[i] = devm_kzalloc(&pdev->dev,
sizeof(struct max77686_clk), GFP_KERNEL);
- if (IS_ERR(max77686_clks[i]))
+ if (!max77686_clks[i])
return -ENOMEM;
}
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 517a8ff7121e..6b4c70f7d23d 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -20,6 +20,7 @@ void __init nomadik_clk_init(void)
clk_register_clkdev(clk, NULL, "gpio.2");
clk_register_clkdev(clk, NULL, "gpio.3");
clk_register_clkdev(clk, NULL, "rng");
+ clk_register_clkdev(clk, NULL, "fsmc-nand");
/*
* The 2.4 MHz TIMCLK reference clock is active at boot time, this is
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index a203ecccdc4f..f8e9d0c27be2 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1025,20 +1025,67 @@ static struct of_device_id rsc_ids[] = {
{},
};
+enum prima2_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, maxclk,
+};
+
+static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_mm.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+};
+
+static struct clk *prima2_clks[maxclk];
+static struct clk_onecell_data clk_data;
+
void __init sirfsoc_of_clk_init(void)
{
- struct clk *clk;
struct device_node *np;
-
- np = of_find_matching_node(NULL, clkc_ids);
- if (!np)
- panic("unable to find compatible clkc node in dtb\n");
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- of_node_put(np);
+ int i;
np = of_find_matching_node(NULL, rsc_ids);
if (!np)
@@ -1050,122 +1097,30 @@ void __init sirfsoc_of_clk_init(void)
of_node_put(np);
+ np = of_find_matching_node(NULL, clkc_ids);
+ if (!np)
+ return;
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
/* These are always available (RTC and 26MHz OSC)*/
- clk = clk_register_fixed_rate(NULL, "rtc", NULL,
+ prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
CLK_IS_ROOT, 32768);
- BUG_ON(IS_ERR(clk));
- clk = clk_register_fixed_rate(NULL, "osc", NULL,
+ prima2_clks[osc]= clk_register_fixed_rate(NULL, "osc", NULL,
CLK_IS_ROOT, 26000000);
- BUG_ON(IS_ERR(clk));
-
- clk = clk_register(NULL, &clk_pll1.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_pll2.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_pll3.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mem.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_sys.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_security.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b8030000.security");
- clk = clk_register(NULL, &clk_dsp.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_gps.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "a8010000.gps");
- clk = clk_register(NULL, &clk_mf.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_io.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "io");
- clk = clk_register(NULL, &clk_cpu.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "cpu");
- clk = clk_register(NULL, &clk_uart0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0050000.uart");
- clk = clk_register(NULL, &clk_uart1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0060000.uart");
- clk = clk_register(NULL, &clk_uart2.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0070000.uart");
- clk = clk_register(NULL, &clk_tsc.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0110000.tsc");
- clk = clk_register(NULL, &clk_i2c0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00e0000.i2c");
- clk = clk_register(NULL, &clk_i2c1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00f0000.i2c");
- clk = clk_register(NULL, &clk_spi0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00d0000.spi");
- clk = clk_register(NULL, &clk_spi1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0170000.spi");
- clk = clk_register(NULL, &clk_pwmc.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0130000.pwm");
- clk = clk_register(NULL, &clk_efuse.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0140000.efusesys");
- clk = clk_register(NULL, &clk_pulse.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0150000.pulsec");
- clk = clk_register(NULL, &clk_dmac0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00b0000.dma-controller");
- clk = clk_register(NULL, &clk_dmac1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0160000.dma-controller");
- clk = clk_register(NULL, &clk_nand.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0030000.nand");
- clk = clk_register(NULL, &clk_audio.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0040000.audio");
- clk = clk_register(NULL, &clk_usp0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0080000.usp");
- clk = clk_register(NULL, &clk_usp1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0090000.usp");
- clk = clk_register(NULL, &clk_usp2.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00a0000.usp");
- clk = clk_register(NULL, &clk_vip.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00c0000.vip");
- clk = clk_register(NULL, &clk_gfx.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "98000000.graphics");
- clk = clk_register(NULL, &clk_mm.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "a0000000.multimedia");
- clk = clk_register(NULL, &clk_lcd.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "90010000.display");
- clk = clk_register(NULL, &clk_vpp.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "90020000.vpp");
- clk = clk_register(NULL, &clk_mmc01.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mmc23.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mmc45.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &usb_pll_clk_hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_usb0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00e0000.usb");
- clk = clk_register(NULL, &clk_usb1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00f0000.usb");
+
+ for (i = pll1; i < maxclk; i++) {
+ prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
+ BUG_ON(!prima2_clks[i]);
+ }
+ clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(prima2_clks[io], NULL, "io");
+ clk_register_clkdev(prima2_clks[mem], NULL, "mem");
+
+ clk_data.clks = prima2_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
diff --git a/drivers/clk/clk-sunxi.c b/drivers/clk/clk-sunxi.c
deleted file mode 100644
index 0e831b584ba7..000000000000
--- a/drivers/clk/clk-sunxi.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/clk/sunxi.h>
-#include <linux/of.h>
-
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- {}
-};
-
-void __init sunxi_init_clocks(void)
-{
- of_clk_init(clk_match);
-}
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index bc1e713e7b9c..3af729b1b89d 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -78,7 +78,7 @@ static struct clk_init_data wm831x_clkout_init = {
.flags = CLK_IS_ROOT,
};
-static int __devinit twl6040_clk_probe(struct platform_device *pdev)
+static int twl6040_clk_probe(struct platform_device *pdev)
{
struct twl6040 *twl6040 = dev_get_drvdata(pdev->dev.parent);
struct twl6040_clk *clkdata;
@@ -100,7 +100,7 @@ static int __devinit twl6040_clk_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit twl6040_clk_remove(struct platform_device *pdev)
+static int twl6040_clk_remove(struct platform_device *pdev)
{
struct twl6040_clk *clkdata = dev_get_drvdata(&pdev->dev);
@@ -115,7 +115,7 @@ static struct platform_driver twl6040_clk_driver = {
.owner = THIS_MODULE,
},
.probe = twl6040_clk_probe,
- .remove = __devexit_p(twl6040_clk_remove),
+ .remove = twl6040_clk_remove,
};
module_platform_driver(twl6040_clk_driver);
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index fe25570874d6..b5538bba7a10 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -41,6 +41,7 @@ struct clk_device {
#define PLL_TYPE_VT8500 0
#define PLL_TYPE_WM8650 1
+#define PLL_TYPE_WM8750 2
struct clk_pll {
struct clk_hw hw;
@@ -121,7 +122,16 @@ static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_device *cdev = to_clk_device(hw);
- u32 divisor = *prate / rate;
+ u32 divisor;
+
+ if (rate == 0)
+ return 0;
+
+ divisor = *prate / rate;
+
+ /* If prate / rate would be decimal, incr the divisor */
+ if (rate * divisor < *prate)
+ divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
@@ -138,9 +148,18 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_device *cdev = to_clk_device(hw);
- u32 divisor = parent_rate / rate;
+ u32 divisor;
unsigned long flags = 0;
+ if (rate == 0)
+ return 0;
+
+ divisor = parent_rate / rate;
+
+ /* If prate / rate would be decimal, incr the divisor */
+ if (rate * divisor < *prate)
+ divisor++;
+
if (divisor == cdev->div_mask + 1)
divisor = 0;
@@ -272,7 +291,7 @@ static __init void vtwm_device_clk_init(struct device_node *node)
rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
-
+CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
/* PLL clock related functions */
@@ -298,6 +317,16 @@ static __init void vtwm_device_clk_init(struct device_node *node)
#define WM8650_BITS_TO_VAL(m, d1, d2) \
((d2 << 13) | (d1 << 10) | (m & 0x3FF))
+/* Helper macros for PLL_WM8750 */
+#define WM8750_PLL_MUL(x) (((x >> 16) & 0xFF) + 1)
+#define WM8750_PLL_DIV(x) ((((x >> 8) & 1) + 1) * (1 << (x & 7)))
+
+#define WM8750_BITS_TO_FREQ(r, m, d1, d2) \
+ (r * (m+1) / ((d1+1) * (1 << d2)))
+
+#define WM8750_BITS_TO_VAL(f, m, d1, d2) \
+ ((f << 24) | ((m - 1) << 16) | ((d1 - 1) << 8) | d2)
+
static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *prediv)
@@ -361,16 +390,87 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
- *multiplier = mul;
- *divisor1 = div1;
- *divisor2 = div2;
+ *multiplier = best_mul;
+ *divisor1 = best_div1;
+ *divisor2 = best_div2;
+}
+
+static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
+{
+ /* calculate frequency (MHz) after pre-divisor */
+ u32 freq = (parent_rate / 1000000) / (divisor1 + 1);
+
+ if ((freq < 10) || (freq > 200))
+ pr_warn("%s: PLL recommended input frequency 10..200Mhz (requested %d Mhz)\n",
+ __func__, freq);
+
+ if (freq >= 166)
+ return 7;
+ else if (freq >= 104)
+ return 6;
+ else if (freq >= 65)
+ return 5;
+ else if (freq >= 42)
+ return 4;
+ else if (freq >= 26)
+ return 3;
+ else if (freq >= 16)
+ return 2;
+ else if (freq >= 10)
+ return 1;
+
+ return 0;
+}
+
+static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+ u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
+{
+ u32 mul, div1, div2;
+ u32 best_mul, best_div1, best_div2;
+ unsigned long tclk, rate_err, best_err;
+
+ best_err = (unsigned long)-1;
+
+ /* Find the closest match (lower or equal to requested) */
+ for (div1 = 1; div1 >= 0; div1--)
+ for (div2 = 7; div2 >= 0; div2--)
+ for (mul = 0; mul <= 255; mul++) {
+ tclk = parent_rate * (mul + 1) / ((div1 + 1) * (1 << div2));
+ if (tclk > rate)
+ continue;
+ /* error will always be +ve */
+ rate_err = rate - tclk;
+ if (rate_err == 0) {
+ *filter = wm8750_get_filter(parent_rate, div1);
+ *multiplier = mul;
+ *divisor1 = div1;
+ *divisor2 = div2;
+ return;
+ }
+
+ if (rate_err < best_err) {
+ best_err = rate_err;
+ best_mul = mul;
+ best_div1 = div1;
+ best_div2 = div2;
+ }
+ }
+
+ /* if we got here, it wasn't an exact match */
+ pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
+ rate - best_err);
+
+ *filter = wm8750_get_filter(parent_rate, best_div1);
+ *multiplier = best_mul;
+ *divisor1 = best_div1;
+ *divisor2 = best_div2;
}
static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
- u32 mul, div1, div2;
+ u32 filter, mul, div1, div2;
u32 pll_val;
unsigned long flags = 0;
@@ -385,6 +485,9 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
break;
+ case PLL_TYPE_WM8750:
+ wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
+ pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
default:
pr_err("%s: invalid pll type\n", __func__);
return 0;
@@ -405,7 +508,7 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_pll *pll = to_clk_pll(hw);
- u32 mul, div1, div2;
+ u32 filter, mul, div1, div2;
long round_rate;
switch (pll->type) {
@@ -417,6 +520,9 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
+ case PLL_TYPE_WM8750:
+ wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
default:
round_rate = 0;
}
@@ -440,6 +546,10 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
pll_freq = parent_rate * WM8650_PLL_MUL(pll_val);
pll_freq /= WM8650_PLL_DIV(pll_val);
break;
+ case PLL_TYPE_WM8750:
+ pll_freq = parent_rate * WM8750_PLL_MUL(pll_val);
+ pll_freq /= WM8750_PLL_DIV(pll_val);
+ break;
default:
pll_freq = 0;
}
@@ -502,20 +612,19 @@ static void __init vt8500_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_VT8500);
}
+CLK_OF_DECLARE(vt8500_pll, "via,vt8500-pll-clock", vt8500_pll_init);
static void __init wm8650_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_WM8650);
}
+CLK_OF_DECLARE(wm8650_pll, "wm,wm8650-pll-clock", wm8650_pll_init);
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "via,vt8500-pll-clock", .data = vt8500_pll_init, },
- { .compatible = "wm,wm8650-pll-clock", .data = wm8650_pll_init, },
- { .compatible = "via,vt8500-device-clock",
- .data = vtwm_device_clk_init, },
- { /* sentinel */ }
-};
+static void __init wm8750_pll_init(struct device_node *node)
+{
+ vtwm_pll_clk_init(node, PLL_TYPE_WM8750);
+}
+CLK_OF_DECLARE(wm8750_pll, "wm,wm8750-pll-clock", wm8750_pll_init);
void __init vtwm_clk_init(void __iomem *base)
{
@@ -524,5 +633,5 @@ void __init vtwm_clk_init(void __iomem *base)
pmc_base = base;
- of_clk_init(clk_match);
+ of_clk_init(NULL);
}
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
index 37a30514fd66..b14a25f39255 100644
--- a/drivers/clk/clk-zynq.c
+++ b/drivers/clk/clk-zynq.c
@@ -81,6 +81,7 @@ static void __init zynq_pll_clk_setup(struct device_node *np)
if (WARN_ON(ret))
return;
}
+CLK_OF_DECLARE(zynq_pll, "xlnx,zynq-pll", zynq_pll_clk_setup);
struct zynq_periph_clk {
struct clk_hw hw;
@@ -187,6 +188,7 @@ static void __init zynq_periph_clk_setup(struct device_node *np)
if (WARN_ON(err))
return;
}
+CLK_OF_DECLARE(zynq_periph, "xlnx,zynq-periph-clock", zynq_periph_clk_setup);
/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
* derivative rates depend on CLK_621_TRUE
@@ -366,18 +368,10 @@ static void __init zynq_cpu_clk_setup(struct device_node *np)
if (WARN_ON(err))
return;
}
-
-static const __initconst struct of_device_id zynq_clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
- { .compatible = "xlnx,zynq-periph-clock",
- .data = zynq_periph_clk_setup, },
- { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
- {}
-};
+CLK_OF_DECLARE(zynq_cpu, "xlnx,zynq-cpu-clock", zynq_cpu_clk_setup);
void __init xilinx_zynq_clocks_init(void __iomem *slcr)
{
slcr_base = slcr;
- of_clk_init(zynq_clk_match);
+ of_clk_init(NULL);
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 251e45d6024d..fabbfe1a9253 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/device.h>
+#include <linux/init.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -35,6 +36,137 @@ static struct dentry *rootdir;
static struct dentry *orphandir;
static int inited = 0;
+static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
+ level * 3 + 1, "",
+ 30 - level * 3, c->name,
+ c->enable_count, c->prepare_count, c->rate);
+ seq_printf(s, "\n");
+}
+
+static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+ int level)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+
+ if (!c)
+ return;
+
+ clk_summary_show_one(s, c, level);
+
+ hlist_for_each_entry(child, tmp, &c->children, child_node)
+ clk_summary_show_subtree(s, child, level + 1);
+}
+
+static int clk_summary_show(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ struct hlist_node *tmp;
+
+ seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
+ seq_printf(s, "---------------------------------------------------------------------\n");
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ mutex_unlock(&prepare_lock);
+
+ return 0;
+}
+
+
+static int clk_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_summary_show, inode->i_private);
+}
+
+static const struct file_operations clk_summary_fops = {
+ .open = clk_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "\"%s\": { ", c->name);
+ seq_printf(s, "\"enable_count\": %d,", c->enable_count);
+ seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+ seq_printf(s, "\"rate\": %lu", c->rate);
+}
+
+static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+
+ if (!c)
+ return;
+
+ clk_dump_one(s, c, level);
+
+ hlist_for_each_entry(child, tmp, &c->children, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, child, level + 1);
+ }
+
+ seq_printf(s, "}");
+}
+
+static int clk_dump(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ struct hlist_node *tmp;
+ bool first_node = true;
+
+ seq_printf(s, "{");
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
+ if (!first_node)
+ seq_printf(s, ",");
+ first_node = false;
+ clk_dump_subtree(s, c, 0);
+ }
+
+ hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, c, 0);
+ }
+
+ mutex_unlock(&prepare_lock);
+
+ seq_printf(s, "}");
+ return 0;
+}
+
+
+static int clk_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_dump, inode->i_private);
+}
+
+static const struct file_operations clk_dump_fops = {
+ .open = clk_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
@@ -168,12 +300,23 @@ static int __init clk_debug_init(void)
{
struct clk *clk;
struct hlist_node *tmp;
+ struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
if (!rootdir)
return -ENOMEM;
+ d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
+ &clk_summary_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
+ &clk_dump_fops);
+ if (!d)
+ return -ENOMEM;
+
orphandir = debugfs_create_dir("orphans", rootdir);
if (!orphandir)
@@ -259,32 +402,33 @@ late_initcall(clk_disable_unused);
/*** helper functions ***/
-inline const char *__clk_get_name(struct clk *clk)
+const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
+EXPORT_SYMBOL_GPL(__clk_get_name);
-inline struct clk_hw *__clk_get_hw(struct clk *clk)
+struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
-inline u8 __clk_get_num_parents(struct clk *clk)
+u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
-inline struct clk *__clk_get_parent(struct clk *clk)
+struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
-inline unsigned int __clk_get_enable_count(struct clk *clk)
+unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
}
-inline unsigned int __clk_get_prepare_count(struct clk *clk)
+unsigned int __clk_get_prepare_count(struct clk *clk)
{
return !clk ? 0 : clk->prepare_count;
}
@@ -310,7 +454,7 @@ out:
return ret;
}
-inline unsigned long __clk_get_flags(struct clk *clk)
+unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
@@ -950,9 +1094,6 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* change the rates */
clk_change_rate(top);
- mutex_unlock(&prepare_lock);
-
- return 0;
out:
mutex_unlock(&prepare_lock);
@@ -1663,6 +1804,11 @@ struct of_clk_provider {
void *data;
};
+extern struct of_device_id __clk_of_table[];
+
+static const struct of_device_id __clk_of_table_sentinel
+ __used __section(__clk_of_table_end);
+
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_lock);
@@ -1791,6 +1937,9 @@ void __init of_clk_init(const struct of_device_id *matches)
{
struct device_node *np;
+ if (!matches)
+ matches = __clk_of_table;
+
for_each_matching_node(np, matches) {
const struct of_device_id *match = of_match_node(matches, np);
of_clk_init_cb_t clk_init_cb = match->data;
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
new file mode 100644
index 000000000000..57323fd15ec9
--- /dev/null
+++ b/drivers/clk/mvebu/Kconfig
@@ -0,0 +1,8 @@
+config MVEBU_CLK_CORE
+ bool
+
+config MVEBU_CLK_CPU
+ bool
+
+config MVEBU_CLK_GATING
+ bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
new file mode 100644
index 000000000000..58df3dc49363
--- /dev/null
+++ b/drivers/clk/mvebu/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MVEBU_CLK_CORE) += clk.o clk-core.o
+obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
+obj-$(CONFIG_MVEBU_CLK_GATING) += clk-gating-ctrl.o
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
new file mode 100644
index 000000000000..69056a7479e8
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.c
@@ -0,0 +1,675 @@
+/*
+ * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "clk-core.h"
+
+struct core_ratio {
+ int id;
+ const char *name;
+};
+
+struct core_clocks {
+ u32 (*get_tclk_freq)(void __iomem *sar);
+ u32 (*get_cpu_freq)(void __iomem *sar);
+ void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
+ const struct core_ratio *ratios;
+ int num_ratios;
+};
+
+static struct clk_onecell_data clk_data;
+
+static void __init mvebu_clk_core_setup(struct device_node *np,
+ struct core_clocks *coreclk)
+{
+ const char *tclk_name = "tclk";
+ const char *cpuclk_name = "cpuclk";
+ void __iomem *base;
+ unsigned long rate;
+ int n;
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ /*
+ * Allocate struct for TCLK, cpu clk, and core ratio clocks
+ */
+ clk_data.clk_num = 2 + coreclk->num_ratios;
+ clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_data.clks))
+ return;
+
+ /*
+ * Register TCLK
+ */
+ of_property_read_string_index(np, "clock-output-names", 0,
+ &tclk_name);
+ rate = coreclk->get_tclk_freq(base);
+ clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[0]));
+
+ /*
+ * Register CPU clock
+ */
+ of_property_read_string_index(np, "clock-output-names", 1,
+ &cpuclk_name);
+ rate = coreclk->get_cpu_freq(base);
+ clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[1]));
+
+ /*
+ * Register fixed-factor clocks derived from CPU clock
+ */
+ for (n = 0; n < coreclk->num_ratios; n++) {
+ const char *rclk_name = coreclk->ratios[n].name;
+ int mult, div;
+
+ of_property_read_string_index(np, "clock-output-names",
+ 2+n, &rclk_name);
+ coreclk->get_clk_ratio(base, coreclk->ratios[n].id,
+ &mult, &div);
+ clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
+ cpuclk_name, 0, mult, div);
+ WARN_ON(IS_ERR(clk_data.clks[2+n]));
+ };
+
+ /*
+ * SAR register isn't needed anymore
+ */
+ iounmap(base);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+#ifdef CONFIG_MACH_ARMADA_370_XP
+/*
+ * Armada 370/XP Sample At Reset is a 64 bit bitfiled split in two
+ * register of 32 bits
+ */
+
+#define SARL 0 /* Low part [0:31] */
+#define SARL_AXP_PCLK_FREQ_OPT 21
+#define SARL_AXP_PCLK_FREQ_OPT_MASK 0x7
+#define SARL_A370_PCLK_FREQ_OPT 11
+#define SARL_A370_PCLK_FREQ_OPT_MASK 0xF
+#define SARL_AXP_FAB_FREQ_OPT 24
+#define SARL_AXP_FAB_FREQ_OPT_MASK 0xF
+#define SARL_A370_FAB_FREQ_OPT 15
+#define SARL_A370_FAB_FREQ_OPT_MASK 0x1F
+#define SARL_A370_TCLK_FREQ_OPT 20
+#define SARL_A370_TCLK_FREQ_OPT_MASK 0x1
+#define SARH 4 /* High part [32:63] */
+#define SARH_AXP_PCLK_FREQ_OPT (52-32)
+#define SARH_AXP_PCLK_FREQ_OPT_MASK 0x1
+#define SARH_AXP_PCLK_FREQ_OPT_SHIFT 3
+#define SARH_AXP_FAB_FREQ_OPT (51-32)
+#define SARH_AXP_FAB_FREQ_OPT_MASK 0x1
+#define SARH_AXP_FAB_FREQ_OPT_SHIFT 4
+
+static const u32 __initconst armada_370_tclk_frequencies[] = {
+ 16600000,
+ 20000000,
+};
+
+static u32 __init armada_370_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select = 0;
+
+ tclk_freq_select = ((readl(sar) >> SARL_A370_TCLK_FREQ_OPT) &
+ SARL_A370_TCLK_FREQ_OPT_MASK);
+ return armada_370_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 __initconst armada_370_cpu_frequencies[] = {
+ 400000000,
+ 533000000,
+ 667000000,
+ 800000000,
+ 1000000000,
+ 1067000000,
+ 1200000000,
+};
+
+static u32 __init armada_370_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) &
+ SARL_A370_PCLK_FREQ_OPT_MASK);
+ if (cpu_freq_select > ARRAY_SIZE(armada_370_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_370_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+enum { A370_XP_NBCLK, A370_XP_HCLK, A370_XP_DRAMCLK };
+
+static const struct core_ratio __initconst armada_370_xp_core_ratios[] = {
+ { .id = A370_XP_NBCLK, .name = "nbclk" },
+ { .id = A370_XP_HCLK, .name = "hclk" },
+ { .id = A370_XP_DRAMCLK, .name = "dramclk" },
+};
+
+static const int __initconst armada_370_xp_nbclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 2}, {2, 2},
+ {1, 2}, {1, 2}, {1, 1}, {2, 3},
+ {0, 1}, {1, 2}, {2, 4}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {2, 2},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_hclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 6}, {2, 3},
+ {1, 3}, {1, 4}, {1, 2}, {2, 6},
+ {0, 1}, {1, 6}, {2, 10}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 2},
+ {2, 6}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_dramclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 3}, {2, 3},
+ {1, 3}, {1, 2}, {1, 2}, {2, 6},
+ {0, 1}, {1, 3}, {2, 5}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_370_xp_get_clk_ratio(u32 opt,
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case A370_XP_NBCLK:
+ *mult = armada_370_xp_nbclk_ratios[opt][0];
+ *div = armada_370_xp_nbclk_ratios[opt][1];
+ break;
+ case A370_XP_HCLK:
+ *mult = armada_370_xp_hclk_ratios[opt][0];
+ *div = armada_370_xp_hclk_ratios[opt][1];
+ break;
+ case A370_XP_DRAMCLK:
+ *mult = armada_370_xp_dramclk_ratios[opt][0];
+ *div = armada_370_xp_dramclk_ratios[opt][1];
+ break;
+ }
+}
+
+static void __init armada_370_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ u32 opt = ((readl(sar) >> SARL_A370_FAB_FREQ_OPT) &
+ SARL_A370_FAB_FREQ_OPT_MASK);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+
+static const struct core_clocks armada_370_core_clocks = {
+ .get_tclk_freq = armada_370_get_tclk_freq,
+ .get_cpu_freq = armada_370_get_cpu_freq,
+ .get_clk_ratio = armada_370_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+static const u32 __initconst armada_xp_cpu_frequencies[] = {
+ 1000000000,
+ 1066000000,
+ 1200000000,
+ 1333000000,
+ 1500000000,
+ 1666000000,
+ 1800000000,
+ 2000000000,
+ 667000000,
+ 0,
+ 800000000,
+ 1600000000,
+};
+
+/* For Armada XP TCLK frequency is fix: 250MHz */
+static u32 __init armada_xp_get_tclk_freq(void __iomem *sar)
+{
+ return 250 * 1000 * 1000;
+}
+
+static u32 __init armada_xp_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_AXP_PCLK_FREQ_OPT) &
+ SARL_AXP_PCLK_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ cpu_freq_select |= (((readl(sar+4) >> SARH_AXP_PCLK_FREQ_OPT) &
+ SARH_AXP_PCLK_FREQ_OPT_MASK)
+ << SARH_AXP_PCLK_FREQ_OPT_SHIFT);
+ if (cpu_freq_select > ARRAY_SIZE(armada_xp_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported: %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_xp_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+static void __init armada_xp_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+
+ u32 opt = ((readl(sar) >> SARL_AXP_FAB_FREQ_OPT) &
+ SARL_AXP_FAB_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ opt |= (((readl(sar+4) >> SARH_AXP_FAB_FREQ_OPT) &
+ SARH_AXP_FAB_FREQ_OPT_MASK)
+ << SARH_AXP_FAB_FREQ_OPT_SHIFT);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+static const struct core_clocks armada_xp_core_clocks = {
+ .get_tclk_freq = armada_xp_get_tclk_freq,
+ .get_cpu_freq = armada_xp_get_cpu_freq,
+ .get_clk_ratio = armada_xp_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+#endif /* CONFIG_MACH_ARMADA_370_XP */
+
+/*
+ * Dove PLL sample-at-reset configuration
+ *
+ * SAR0[8:5] : CPU frequency
+ * 5 = 1000 MHz
+ * 6 = 933 MHz
+ * 7 = 933 MHz
+ * 8 = 800 MHz
+ * 9 = 800 MHz
+ * 10 = 800 MHz
+ * 11 = 1067 MHz
+ * 12 = 667 MHz
+ * 13 = 533 MHz
+ * 14 = 400 MHz
+ * 15 = 333 MHz
+ * others reserved.
+ *
+ * SAR0[11:9] : CPU to L2 Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[15:12] : CPU to DDR DRAM Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 3 = (2/5) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 8 = (1/5) * CPU
+ * 10 = (1/6) * CPU
+ * 12 = (1/7) * CPU
+ * 14 = (1/8) * CPU
+ * 15 = (1/10) * CPU
+ * others reserved.
+ *
+ * SAR0[24:23] : TCLK frequency
+ * 0 = 166 MHz
+ * 1 = 125 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_DOVE
+#define SAR_DOVE_CPU_FREQ 5
+#define SAR_DOVE_CPU_FREQ_MASK 0xf
+#define SAR_DOVE_L2_RATIO 9
+#define SAR_DOVE_L2_RATIO_MASK 0x7
+#define SAR_DOVE_DDR_RATIO 12
+#define SAR_DOVE_DDR_RATIO_MASK 0xf
+#define SAR_DOVE_TCLK_FREQ 23
+#define SAR_DOVE_TCLK_FREQ_MASK 0x3
+
+static const u32 __initconst dove_tclk_frequencies[] = {
+ 166666667,
+ 125000000,
+ 0, 0
+};
+
+static u32 __init dove_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_TCLK_FREQ) &
+ SAR_DOVE_TCLK_FREQ_MASK;
+ return dove_tclk_frequencies[opt];
+}
+
+static const u32 __initconst dove_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 1000000000,
+ 933333333, 933333333,
+ 800000000, 800000000, 800000000,
+ 1066666667,
+ 666666667,
+ 533333333,
+ 400000000,
+ 333333333
+};
+
+static u32 __init dove_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_CPU_FREQ) &
+ SAR_DOVE_CPU_FREQ_MASK;
+ return dove_cpu_frequencies[opt];
+}
+
+enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
+
+static const struct core_ratio __initconst dove_core_ratios[] = {
+ { .id = DOVE_CPU_TO_L2, .name = "l2clk", },
+ { .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static const int __initconst dove_cpu_l2_ratios[8][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
+};
+
+static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
+ { 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
+ { 1, 7 }, { 0, 1 }, { 1, 8 }, { 1, 10 }
+};
+
+static void __init dove_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case DOVE_CPU_TO_L2:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_L2_RATIO) &
+ SAR_DOVE_L2_RATIO_MASK;
+ *mult = dove_cpu_l2_ratios[opt][0];
+ *div = dove_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case DOVE_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_DDR_RATIO) &
+ SAR_DOVE_DDR_RATIO_MASK;
+ *mult = dove_cpu_ddr_ratios[opt][0];
+ *div = dove_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks dove_core_clocks = {
+ .get_tclk_freq = dove_get_tclk_freq,
+ .get_cpu_freq = dove_get_cpu_freq,
+ .get_clk_ratio = dove_get_clk_ratio,
+ .ratios = dove_core_ratios,
+ .num_ratios = ARRAY_SIZE(dove_core_ratios),
+};
+#endif /* CONFIG_ARCH_DOVE */
+
+/*
+ * Kirkwood PLL sample-at-reset configuration
+ * (6180 has different SAR layout than other Kirkwood SoCs)
+ *
+ * SAR0[4:3,22,1] : CPU frequency (6281,6292,6282)
+ * 4 = 600 MHz
+ * 6 = 800 MHz
+ * 7 = 1000 MHz
+ * 9 = 1200 MHz
+ * 12 = 1500 MHz
+ * 13 = 1600 MHz
+ * 14 = 1800 MHz
+ * 15 = 2000 MHz
+ * others reserved.
+ *
+ * SAR0[19,10:9] : CPU to L2 Clock divider ratio (6281,6292,6282)
+ * 1 = (1/2) * CPU
+ * 3 = (1/3) * CPU
+ * 5 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[8:5] : CPU to DDR DRAM Clock divider ratio (6281,6292,6282)
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 7 = (2/9) * CPU
+ * 8 = (1/5) * CPU
+ * 9 = (1/6) * CPU
+ * others reserved.
+ *
+ * SAR0[4:2] : Kirkwood 6180 cpu/l2/ddr clock configuration (6180 only)
+ * 5 = [CPU = 600 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/3) * CPU]
+ * 6 = [CPU = 800 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/4) * CPU]
+ * 7 = [CPU = 1000 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/5) * CPU]
+ * others reserved.
+ *
+ * SAR0[21] : TCLK frequency
+ * 0 = 200 MHz
+ * 1 = 166 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_KIRKWOOD
+#define SAR_KIRKWOOD_CPU_FREQ(x) \
+ (((x & (1 << 1)) >> 1) | \
+ ((x & (1 << 22)) >> 21) | \
+ ((x & (3 << 3)) >> 1))
+#define SAR_KIRKWOOD_L2_RATIO(x) \
+ (((x & (3 << 9)) >> 9) | \
+ (((x & (1 << 19)) >> 17)))
+#define SAR_KIRKWOOD_DDR_RATIO 5
+#define SAR_KIRKWOOD_DDR_RATIO_MASK 0xf
+#define SAR_MV88F6180_CLK 2
+#define SAR_MV88F6180_CLK_MASK 0x7
+#define SAR_KIRKWOOD_TCLK_FREQ 21
+#define SAR_KIRKWOOD_TCLK_FREQ_MASK 0x1
+
+enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
+
+static const struct core_ratio __initconst kirkwood_core_ratios[] = {
+ { .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
+ { .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_TCLK_FREQ) &
+ SAR_KIRKWOOD_TCLK_FREQ_MASK;
+ return (opt) ? 166666667 : 200000000;
+}
+
+static const u32 __initconst kirkwood_cpu_frequencies[] = {
+ 0, 0, 0, 0,
+ 600000000,
+ 0,
+ 800000000,
+ 1000000000,
+ 0,
+ 1200000000,
+ 0, 0,
+ 1500000000,
+ 1600000000,
+ 1800000000,
+ 2000000000
+};
+
+static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = SAR_KIRKWOOD_CPU_FREQ(readl(sar));
+ return kirkwood_cpu_frequencies[opt];
+}
+
+static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+ { 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
+ { 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
+};
+
+static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+ { 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
+ { 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
+};
+
+static void __init kirkwood_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ u32 opt = SAR_KIRKWOOD_L2_RATIO(readl(sar));
+ *mult = kirkwood_cpu_l2_ratios[opt][0];
+ *div = kirkwood_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_DDR_RATIO) &
+ SAR_KIRKWOOD_DDR_RATIO_MASK;
+ *mult = kirkwood_cpu_ddr_ratios[opt][0];
+ *div = kirkwood_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks kirkwood_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = kirkwood_get_cpu_freq,
+ .get_clk_ratio = kirkwood_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+
+static const u32 __initconst mv88f6180_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 600000000,
+ 800000000,
+ 1000000000
+};
+
+static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & SAR_MV88F6180_CLK_MASK;
+ return mv88f6180_cpu_frequencies[opt];
+}
+
+static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
+};
+
+static void __init mv88f6180_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ /* mv88f6180 has a fixed 1:2 CPU-to-L2 ratio */
+ *mult = 1;
+ *div = 2;
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) &
+ SAR_MV88F6180_CLK_MASK;
+ *mult = mv88f6180_cpu_ddr_ratios[opt][0];
+ *div = mv88f6180_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks mv88f6180_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = mv88f6180_get_cpu_freq,
+ .get_clk_ratio = mv88f6180_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+#endif /* CONFIG_ARCH_KIRKWOOD */
+
+static const __initdata struct of_device_id clk_core_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370_XP
+ {
+ .compatible = "marvell,armada-370-core-clock",
+ .data = &armada_370_core_clocks,
+ },
+ {
+ .compatible = "marvell,armada-xp-core-clock",
+ .data = &armada_xp_core_clocks,
+ },
+#endif
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-core-clock",
+ .data = &dove_core_clocks,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-core-clock",
+ .data = &kirkwood_core_clocks,
+ },
+ {
+ .compatible = "marvell,mv88f6180-core-clock",
+ .data = &mv88f6180_core_clocks,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_core_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_core_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_core_match, np);
+ mvebu_clk_core_setup(np, (struct core_clocks *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-core.h b/drivers/clk/mvebu/clk-core.h
new file mode 100644
index 000000000000..28b5e02e9885
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.h
@@ -0,0 +1,18 @@
+/*
+ * * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CORE_H
+#define __MVEBU_CLK_CORE_H
+
+void __init mvebu_core_clk_init(void);
+
+#endif
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
new file mode 100644
index 000000000000..9dd2551a0a41
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -0,0 +1,189 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include "clk-cpu.h"
+
+#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
+#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
+#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
+
+#define MAX_CPU 4
+struct cpu_clk {
+ struct clk_hw hw;
+ int cpu;
+ const char *clk_name;
+ const char *parent_name;
+ void __iomem *reg_base;
+};
+
+static struct clk **clks;
+
+static struct clk_onecell_data clk_data;
+
+#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
+
+static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
+ return parent_rate / div;
+}
+
+static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Valid ratio are 1:1, 1:2 and 1:3 */
+ u32 div;
+
+ div = *parent_rate / rate;
+ if (div == 0)
+ div = 1;
+ else if (div > 3)
+ div = 3;
+
+ return *parent_rate / div;
+}
+
+static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+ u32 reload_mask;
+
+ div = parent_rate / rate;
+ reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
+ & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
+ | (div << (cpuclk->cpu * 8));
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ /* Set clock divider reload smooth bit mask */
+ reload_mask = 1 << (20 + cpuclk->cpu);
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | reload_mask;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Now trigger the clock update */
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | 1 << 24;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Wait for clocks to settle down then clear reload request */
+ udelay(1000);
+ reg &= ~(reload_mask | 1 << 24);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ udelay(1000);
+
+ return 0;
+}
+
+static const struct clk_ops cpu_ops = {
+ .recalc_rate = clk_cpu_recalc_rate,
+ .round_rate = clk_cpu_round_rate,
+ .set_rate = clk_cpu_set_rate,
+};
+
+void __init of_cpu_clk_setup(struct device_node *node)
+{
+ struct cpu_clk *cpuclk;
+ void __iomem *clock_complex_base = of_iomap(node, 0);
+ int ncpus = 0;
+ struct device_node *dn;
+
+ if (clock_complex_base == NULL) {
+ pr_err("%s: clock-complex base register not set\n",
+ __func__);
+ return;
+ }
+
+ for_each_node_by_type(dn, "cpu")
+ ncpus++;
+
+ cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
+ if (WARN_ON(!cpuclk))
+ return;
+
+ clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
+ if (WARN_ON(!clks))
+ goto clks_out;
+
+ for_each_node_by_type(dn, "cpu") {
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk *parent_clk;
+ char *clk_name = kzalloc(5, GFP_KERNEL);
+ int cpu, err;
+
+ if (WARN_ON(!clk_name))
+ goto bail_out;
+
+ err = of_property_read_u32(dn, "reg", &cpu);
+ if (WARN_ON(err))
+ goto bail_out;
+
+ sprintf(clk_name, "cpu%d", cpu);
+ parent_clk = of_clk_get(node, 0);
+
+ cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+ cpuclk[cpu].clk_name = clk_name;
+ cpuclk[cpu].cpu = cpu;
+ cpuclk[cpu].reg_base = clock_complex_base;
+ cpuclk[cpu].hw.init = &init;
+
+ init.name = cpuclk[cpu].clk_name;
+ init.ops = &cpu_ops;
+ init.flags = 0;
+ init.parent_names = &cpuclk[cpu].parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &cpuclk[cpu].hw);
+ if (WARN_ON(IS_ERR(clk)))
+ goto bail_out;
+ clks[cpu] = clk;
+ }
+ clk_data.clk_num = MAX_CPU;
+ clk_data.clks = clks;
+ of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+
+ return;
+bail_out:
+ kfree(clks);
+ while(ncpus--)
+ kfree(cpuclk[ncpus].clk_name);
+clks_out:
+ kfree(cpuclk);
+}
+
+static const __initconst struct of_device_id clk_cpu_match[] = {
+ {
+ .compatible = "marvell,armada-xp-cpu-clock",
+ .data = of_cpu_clk_setup,
+ },
+ {
+ /* sentinel */
+ },
+};
+
+void __init mvebu_cpu_clk_init(void)
+{
+ of_clk_init(clk_cpu_match);
+}
diff --git a/drivers/clk/mvebu/clk-cpu.h b/drivers/clk/mvebu/clk-cpu.h
new file mode 100644
index 000000000000..08e2affba4e6
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CPU_H
+#define __MVEBU_CLK_CPU_H
+
+#ifdef CONFIG_MVEBU_CLK_CPU
+void __init mvebu_cpu_clk_init(void);
+#else
+static inline void mvebu_cpu_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
new file mode 100644
index 000000000000..ebf141d4374b
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -0,0 +1,250 @@
+/*
+ * Marvell MVEBU clock gating control.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct mvebu_gating_ctrl {
+ spinlock_t lock;
+ struct clk **gates;
+ int num_gates;
+};
+
+struct mvebu_soc_descr {
+ const char *name;
+ const char *parent;
+ int bit_idx;
+};
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static struct clk *mvebu_clk_gating_get_src(
+ struct of_phandle_args *clkspec, void *data)
+{
+ struct mvebu_gating_ctrl *ctrl = (struct mvebu_gating_ctrl *)data;
+ int n;
+
+ if (clkspec->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ struct clk_gate *gate =
+ to_clk_gate(__clk_get_hw(ctrl->gates[n]));
+ if (clkspec->args[0] == gate->bit_idx)
+ return ctrl->gates[n];
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static void __init mvebu_clk_gating_setup(
+ struct device_node *np, const struct mvebu_soc_descr *descr)
+{
+ struct mvebu_gating_ctrl *ctrl;
+ struct clk *clk;
+ void __iomem *base;
+ const char *default_parent = NULL;
+ int n;
+
+ base = of_iomap(np, 0);
+
+ clk = of_clk_get(np, 0);
+ if (!IS_ERR(clk)) {
+ default_parent = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL);
+ if (WARN_ON(!ctrl))
+ return;
+
+ spin_lock_init(&ctrl->lock);
+
+ /*
+ * Count, allocate, and register clock gates
+ */
+ for (n = 0; descr[n].name;)
+ n++;
+
+ ctrl->num_gates = n;
+ ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!ctrl->gates)) {
+ kfree(ctrl);
+ return;
+ }
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ u8 flags = 0;
+ const char *parent =
+ (descr[n].parent) ? descr[n].parent : default_parent;
+
+ /*
+ * On Armada 370, the DDR clock is a special case: it
+ * isn't taken by any driver, but should anyway be
+ * kept enabled, so we mark it as IGNORE_UNUSED for
+ * now.
+ */
+ if (!strcmp(descr[n].name, "ddr"))
+ flags |= CLK_IGNORE_UNUSED;
+
+ ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent,
+ flags, base, descr[n].bit_idx, 0, &ctrl->lock);
+ WARN_ON(IS_ERR(ctrl->gates[n]));
+ }
+ of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
+}
+
+/*
+ * SoC specific clock gating control
+ */
+
+#ifdef CONFIG_MACH_ARMADA_370
+static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "pex0_en", NULL, 1 },
+ { "pex1_en", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 9 },
+ { "sata0", NULL, 15 },
+ { "sdio", NULL, 17 },
+ { "tdm", NULL, 25 },
+ { "ddr", NULL, 28 },
+ { "sata1", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "ge3", NULL, 1 },
+ { "ge2", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 6 },
+ { "pex2", NULL, 7 },
+ { "pex3", NULL, 8 },
+ { "bp", NULL, 13 },
+ { "sata0lnk", NULL, 14 },
+ { "sata0", "sata0lnk", 15 },
+ { "lcd", NULL, 16 },
+ { "sdio", NULL, 17 },
+ { "usb0", NULL, 18 },
+ { "usb1", NULL, 19 },
+ { "usb2", NULL, 20 },
+ { "xor0", NULL, 22 },
+ { "crypto", NULL, 23 },
+ { "tdm", NULL, 25 },
+ { "xor1", NULL, 28 },
+ { "sata1lnk", NULL, 29 },
+ { "sata1", "sata1lnk", 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+static const struct mvebu_soc_descr __initconst dove_gating_descr[] = {
+ { "usb0", NULL, 0 },
+ { "usb1", NULL, 1 },
+ { "ge", "gephy", 2 },
+ { "sata", NULL, 3 },
+ { "pex0", NULL, 4 },
+ { "pex1", NULL, 5 },
+ { "sdio0", NULL, 8 },
+ { "sdio1", NULL, 9 },
+ { "nand", NULL, 10 },
+ { "camera", NULL, 11 },
+ { "i2s0", NULL, 12 },
+ { "i2s1", NULL, 13 },
+ { "crypto", NULL, 15 },
+ { "ac97", NULL, 21 },
+ { "pdma", NULL, 22 },
+ { "xor0", NULL, 23 },
+ { "xor1", NULL, 24 },
+ { "gephy", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
+ { "ge0", NULL, 0 },
+ { "pex0", NULL, 2 },
+ { "usb0", NULL, 3 },
+ { "sdio", NULL, 4 },
+ { "tsu", NULL, 5 },
+ { "runit", NULL, 7 },
+ { "xor0", NULL, 8 },
+ { "audio", NULL, 9 },
+ { "powersave", "cpuclk", 11 },
+ { "sata0", NULL, 14 },
+ { "sata1", NULL, 15 },
+ { "xor1", NULL, 16 },
+ { "crypto", NULL, 17 },
+ { "pex1", NULL, 18 },
+ { "ge1", NULL, 19 },
+ { "tdm", NULL, 20 },
+ { }
+};
+#endif
+
+static const __initdata struct of_device_id clk_gating_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370
+ {
+ .compatible = "marvell,armada-370-gating-clock",
+ .data = armada_370_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+ {
+ .compatible = "marvell,armada-xp-gating-clock",
+ .data = armada_xp_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-gating-clock",
+ .data = dove_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-gating-clock",
+ .data = kirkwood_gating_descr,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_gating_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_gating_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_gating_match, np);
+ mvebu_clk_gating_setup(np,
+ (const struct mvebu_soc_descr *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.h b/drivers/clk/mvebu/clk-gating-ctrl.h
new file mode 100644
index 000000000000..9275d1e51f1b
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell EBU gating clock handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_GATING_H
+#define __MVEBU_CLK_GATING_H
+
+#ifdef CONFIG_MVEBU_CLK_GATING
+void __init mvebu_gating_clk_init(void);
+#else
+void mvebu_gating_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk.c b/drivers/clk/mvebu/clk.c
new file mode 100644
index 000000000000..855681b8a9dc
--- /dev/null
+++ b/drivers/clk/mvebu/clk.c
@@ -0,0 +1,27 @@
+/*
+ * Marvell EBU SoC clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include "clk-core.h"
+#include "clk-cpu.h"
+#include "clk-gating-ctrl.h"
+
+void __init mvebu_clocks_init(void)
+{
+ mvebu_core_clk_init();
+ mvebu_gating_clk_init();
+ mvebu_cpu_clk_init();
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 8dd476e2a9c5..b5c06f9766f6 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -99,7 +99,7 @@ static enum imx23_clk clks_init_on[] __initdata = {
int __init mx23_clocks_init(void)
{
struct device_node *np;
- int i;
+ u32 i;
clk_misc_init();
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index db3af0874121..76ce6c6d1113 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -154,7 +154,7 @@ static enum imx28_clk clks_init_on[] __initdata = {
int __init mx28_clocks_init(void)
{
struct device_node *np;
- int i;
+ u32 i;
clk_misc_init();
@@ -238,7 +238,7 @@ int __init mx28_clocks_init(void)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
- clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[xbus], NULL, "timrot");
clk_register_clkdev(clks[enet_out], NULL, "enet_out");
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 147e25f00405..ed9af4278619 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -20,6 +20,7 @@
#include <mach/spear.h>
#include "clk.h"
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
/* PLL related registers and bit values */
#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
/* PLL_CFG bit values */
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
new file mode 100644
index 000000000000..2b41b0f4f731
--- /dev/null
+++ b/drivers/clk/tegra/Makefile
@@ -0,0 +1,11 @@
+obj-y += clk.o
+obj-y += clk-audio-sync.o
+obj-y += clk-divider.o
+obj-y += clk-periph.o
+obj-y += clk-periph-gate.o
+obj-y += clk-pll.o
+obj-y += clk-pll-out.o
+obj-y += clk-super.o
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
new file mode 100644
index 000000000000..c0f7843e80e6
--- /dev/null
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ return sync->rate;
+}
+
+static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ if (rate > sync->max_rate)
+ return -EINVAL;
+ else
+ return rate;
+}
+
+static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ sync->rate = rate;
+ return 0;
+}
+
+const struct clk_ops tegra_clk_sync_source_ops = {
+ .round_rate = clk_sync_source_round_rate,
+ .set_rate = clk_sync_source_set_rate,
+ .recalc_rate = clk_sync_source_recalc_rate,
+};
+
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long rate, unsigned long max_rate)
+{
+ struct tegra_clk_sync_source *sync;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ if (!sync) {
+ pr_err("%s: could not allocate sync source clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sync->rate = rate;
+ sync->max_rate = max_rate;
+
+ init.ops = &tegra_clk_sync_source_ops;
+ init.name = name;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sync->hw.init = &init;
+
+ clk = clk_register(NULL, &sync->hw);
+ if (IS_ERR(clk))
+ kfree(sync);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
new file mode 100644
index 000000000000..4d75b1f37e3a
--- /dev/null
+++ b/drivers/clk/tegra/clk-divider.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_override(p) (BIT((p->shift - 6)))
+#define div_mask(d) ((1 << (d->width)) - 1)
+#define get_mul(d) (1 << d->frac_width)
+#define get_max_div(d) div_mask(d)
+
+#define PERIPH_CLK_UART_DIV_ENB BIT(24)
+
+static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
+ unsigned long parent_rate)
+{
+ s64 divider_ux1 = parent_rate;
+ u8 flags = divider->flags;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = get_mul(divider);
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 < 0)
+ return 0;
+
+ if (divider_ux1 > get_max_div(divider))
+ return -EINVAL;
+
+ return divider_ux1;
+}
+
+static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ u32 reg;
+ int div, mul;
+ u64 rate = parent_rate;
+
+ reg = readl_relaxed(divider->reg) >> divider->shift;
+ div = reg & div_mask(divider);
+
+ mul = get_mul(divider);
+ div += mul;
+
+ rate *= mul;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div, mul;
+ unsigned long output_rate = *prate;
+
+ if (!rate)
+ return output_rate;
+
+ div = get_div(divider, rate, output_rate);
+ if (div < 0)
+ return *prate;
+
+ mul = get_mul(divider);
+
+ return DIV_ROUND_UP(output_rate * mul, div + mul);
+}
+
+static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = get_div(divider, rate, parent_rate);
+ if (div < 0)
+ return div;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl_relaxed(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ val |= div << divider->shift;
+
+ if (divider->flags & TEGRA_DIVIDER_UART) {
+ if (div)
+ val |= PERIPH_CLK_UART_DIV_ENB;
+ else
+ val &= ~PERIPH_CLK_UART_DIV_ENB;
+ }
+
+ if (divider->flags & TEGRA_DIVIDER_FIXED)
+ val |= pll_out_override(divider);
+
+ writel_relaxed(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops tegra_clk_frac_div_ops = {
+ .recalc_rate = clk_frac_div_recalc_rate,
+ .set_rate = clk_frac_div_set_rate,
+ .round_rate = clk_frac_div_round_rate,
+};
+
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock)
+{
+ struct tegra_clk_frac_div *divider;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ pr_err("%s: could not allocate fractional divider clk\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_frac_div_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ divider->reg = reg;
+ divider->shift = shift;
+ divider->width = width;
+ divider->frac_width = frac_width;
+ divider->lock = lock;
+ divider->flags = clk_divider_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ divider->hw.init = &init;
+
+ clk = clk_register(NULL, &divider->hw);
+ if (IS_ERR(clk))
+ kfree(divider);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
new file mode 100644
index 000000000000..6dd533251e7b
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/tegra-soc.h>
+
+#include "clk.h"
+
+static DEFINE_SPINLOCK(periph_ref_lock);
+
+/* Macros to assist peripheral gate clock */
+#define read_enb(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->enb_reg))
+#define write_enb_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_set_reg))
+#define write_enb_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_clr_reg))
+
+#define read_rst(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
+#define write_rst_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_set_reg))
+#define write_rst_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
+
+#define periph_clk_to_bit(periph) (1 << (gate->clk_num % 32))
+
+/* Peripheral gate clock ops */
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ int state = 1;
+
+ if (!(read_enb(gate) & periph_clk_to_bit(gate)))
+ state = 0;
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET))
+ if (read_rst(gate) & periph_clk_to_bit(gate))
+ state = 0;
+
+ return state;
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]++;
+ if (gate->enable_refcnt[gate->clk_num] > 1) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return 0;
+ }
+
+ write_enb_set(periph_clk_to_bit(gate), gate);
+ udelay(2);
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET) &&
+ !(gate->flags & TEGRA_PERIPH_MANUAL_RESET)) {
+ if (read_rst(gate) & periph_clk_to_bit(gate)) {
+ udelay(5); /* reset propogation delay */
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+ }
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+
+ return 0;
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]--;
+ if (gate->enable_refcnt[gate->clk_num] > 0) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return;
+ }
+
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_enb_clr(periph_clk_to_bit(gate), gate);
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+}
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert)
+{
+ if (gate->flags & TEGRA_PERIPH_NO_RESET)
+ return;
+
+ if (assert) {
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_rst_set(periph_clk_to_bit(gate), gate);
+ } else {
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+}
+
+const struct clk_ops tegra_clk_periph_gate_ops = {
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt)
+{
+ struct tegra_clk_periph_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ pr_err("%s: could not allocate periph gate clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.ops = &tegra_clk_periph_gate_ops;
+
+ gate->magic = TEGRA_CLK_PERIPH_GATE_MAGIC;
+ gate->clk_base = clk_base;
+ gate->clk_num = clk_num;
+ gate->flags = gate_flags;
+ gate->enable_refcnt = enable_refcnt;
+ gate->regs = pregs;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ gate->hw.init = &init;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
new file mode 100644
index 000000000000..788486e6331a
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static u8 clk_periph_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->get_parent(mux_hw);
+}
+
+static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->set_parent(mux_hw, index);
+}
+
+static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->recalc_rate(div_hw, parent_rate);
+}
+
+static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->round_rate(div_hw, rate, prate);
+}
+
+static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->set_rate(div_hw, rate, parent_rate);
+}
+
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+void tegra_periph_reset_deassert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 0);
+}
+
+void tegra_periph_reset_assert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 1);
+}
+
+const struct clk_ops tegra_clk_periph_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .recalc_rate = clk_periph_recalc_rate,
+ .round_rate = clk_periph_round_rate,
+ .set_rate = clk_periph_set_rate,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+const struct clk_ops tegra_clk_periph_nodiv_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+static struct clk *_tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph,
+ void __iomem *clk_base, u32 offset, bool div)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = name;
+ init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
+ init.flags = div ? 0 : CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ periph->hw.init = &init;
+ periph->magic = TEGRA_CLK_PERIPH_MAGIC;
+ periph->mux.reg = clk_base + offset;
+ periph->divider.reg = div ? (clk_base + offset) : NULL;
+ periph->gate.clk_base = clk_base;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ periph->mux.hw.clk = clk;
+ periph->divider.hw.clk = div ? clk : NULL;
+ periph->gate.hw.clk = clk;
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, true);
+}
+
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, false);
+}
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
new file mode 100644
index 000000000000..3598987a451d
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_enb(p) (BIT(p->enb_bit_idx))
+#define pll_out_rst(p) (BIT(p->rst_bit_idx))
+
+static int clk_pll_out_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ u32 val = readl_relaxed(pll_out->reg);
+ int state;
+
+ state = (val & pll_out_enb(pll_out)) ? 1 : 0;
+ if (!(val & (pll_out_rst(pll_out))))
+ state = 0;
+ return state;
+}
+
+static int clk_pll_out_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val |= (pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+
+ return 0;
+}
+
+static void clk_pll_out_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val &= ~(pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+}
+
+const struct clk_ops tegra_clk_pll_out_ops = {
+ .is_enabled = clk_pll_out_is_enabled,
+ .enable = clk_pll_out_enable,
+ .disable = clk_pll_out_disable,
+};
+
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_out_flags,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll_out *pll_out;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll_out = kzalloc(sizeof(*pll_out), GFP_KERNEL);
+ if (!pll_out)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &tegra_clk_pll_out_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = flags;
+
+ pll_out->reg = reg;
+ pll_out->enb_bit_idx = enb_bit_idx;
+ pll_out->rst_bit_idx = rst_bit_idx;
+ pll_out->flags = pll_out_flags;
+ pll_out->lock = lock;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll_out->hw.init = &init;
+
+ clk = clk_register(NULL, &pll_out->hw);
+ if (IS_ERR(clk))
+ kfree(pll_out);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
new file mode 100644
index 000000000000..165f24734c1b
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define PLL_BASE_BYPASS BIT(31)
+#define PLL_BASE_ENABLE BIT(30)
+#define PLL_BASE_REF_ENABLE BIT(29)
+#define PLL_BASE_OVERRIDE BIT(28)
+
+#define PLL_BASE_DIVP_SHIFT 20
+#define PLL_BASE_DIVP_WIDTH 3
+#define PLL_BASE_DIVN_SHIFT 8
+#define PLL_BASE_DIVN_WIDTH 10
+#define PLL_BASE_DIVM_SHIFT 0
+#define PLL_BASE_DIVM_WIDTH 5
+#define PLLU_POST_DIVP_MASK 0x1
+
+#define PLL_MISC_DCCON_SHIFT 20
+#define PLL_MISC_CPCON_SHIFT 8
+#define PLL_MISC_CPCON_WIDTH 4
+#define PLL_MISC_CPCON_MASK ((1 << PLL_MISC_CPCON_WIDTH) - 1)
+#define PLL_MISC_LFCON_SHIFT 4
+#define PLL_MISC_LFCON_WIDTH 4
+#define PLL_MISC_LFCON_MASK ((1 << PLL_MISC_LFCON_WIDTH) - 1)
+#define PLL_MISC_VCOCON_SHIFT 0
+#define PLL_MISC_VCOCON_WIDTH 4
+#define PLL_MISC_VCOCON_MASK ((1 << PLL_MISC_VCOCON_WIDTH) - 1)
+
+#define OUT_OF_TABLE_CPCON 8
+
+#define PMC_PLLP_WB0_OVERRIDE 0xf8
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE BIT(12)
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE BIT(11)
+
+#define PLL_POST_LOCK_DELAY 50
+
+#define PLLDU_LFCON_SET_DIVN 600
+
+#define PLLE_BASE_DIVCML_SHIFT 24
+#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVP_SHIFT 16
+#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVN_SHIFT 8
+#define PLLE_BASE_DIVN_WIDTH 8
+#define PLLE_BASE_DIVM_SHIFT 0
+#define PLLE_BASE_DIVM_WIDTH 8
+
+#define PLLE_MISC_SETUP_BASE_SHIFT 16
+#define PLLE_MISC_SETUP_BASE_MASK (0xffff << PLLE_MISC_SETUP_BASE_SHIFT)
+#define PLLE_MISC_LOCK_ENABLE BIT(9)
+#define PLLE_MISC_READY BIT(15)
+#define PLLE_MISC_SETUP_EX_SHIFT 2
+#define PLLE_MISC_SETUP_EX_MASK (3 << PLLE_MISC_SETUP_EX_SHIFT)
+#define PLLE_MISC_SETUP_MASK (PLLE_MISC_SETUP_BASE_MASK | \
+ PLLE_MISC_SETUP_EX_MASK)
+#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
+
+#define PLLE_SS_CTRL 0x68
+#define PLLE_SS_DISABLE (7 << 10)
+
+#define PMC_SATA_PWRGT 0x1ac
+#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
+#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
+
+#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
+#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
+#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
+
+#define pll_writel(val, offset, p) writel_relaxed(val, p->clk_base + offset)
+#define pll_writel_base(val, p) pll_writel(val, p->params->base_reg, p)
+#define pll_writel_misc(val, p) pll_writel(val, p->params->misc_reg, p)
+
+#define mask(w) ((1 << (w)) - 1)
+#define divm_mask(p) mask(p->divm_width)
+#define divn_mask(p) mask(p->divn_width)
+#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
+ mask(p->divp_width))
+
+#define divm_max(p) (divm_mask(p))
+#define divn_max(p) (divn_mask(p))
+#define divp_max(p) (1 << (divp_mask(p)))
+
+static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
+{
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK))
+ return;
+
+ val = pll_readl_misc(pll);
+ val |= BIT(pll->params->lock_enable_bit_idx);
+ pll_writel_misc(val, pll);
+}
+
+static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll,
+ void __iomem *lock_addr, u32 lock_bit_idx)
+{
+ int i;
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
+ udelay(pll->params->lock_delay);
+ return 0;
+ }
+
+ for (i = 0; i < pll->params->lock_delay; i++) {
+ val = readl_relaxed(lock_addr);
+ if (val & BIT(lock_bit_idx)) {
+ udelay(PLL_POST_LOCK_DELAY);
+ return 0;
+ }
+ udelay(2); /* timeout = 2 * lock time */
+ }
+
+ pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
+ __clk_get_name(pll->hw.clk));
+
+ return -1;
+}
+
+static int clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
+ return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
+ }
+
+ val = pll_readl_base(pll);
+
+ return val & PLL_BASE_ENABLE ? 1 : 0;
+}
+
+static int _clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ clk_pll_enable_lock(pll);
+
+ val = pll_readl_base(pll);
+ val &= ~PLL_BASE_BYPASS;
+ val |= PLL_BASE_ENABLE;
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->base_reg,
+ pll->params->lock_bit_idx);
+
+ return 0;
+}
+
+static void _clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ val = pll_readl_base(pll);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = _clk_pll_enable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int _get_table_rate(struct clk_hw *hw,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table *sel;
+
+ for (sel = pll->freq_table; sel->input_rate != 0; sel++)
+ if (sel->input_rate == parent_rate &&
+ sel->output_rate == rate)
+ break;
+
+ if (sel->input_rate == 0)
+ return -EINVAL;
+
+ BUG_ON(sel->p < 1);
+
+ cfg->input_rate = sel->input_rate;
+ cfg->output_rate = sel->output_rate;
+ cfg->m = sel->m;
+ cfg->n = sel->n;
+ cfg->p = sel->p;
+ cfg->cpcon = sel->cpcon;
+
+ return 0;
+}
+
+static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long cfreq;
+ u32 p_div = 0;
+
+ switch (parent_rate) {
+ case 12000000:
+ case 26000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000;
+ break;
+ case 13000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000;
+ break;
+ case 16800000:
+ case 19200000:
+ cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000;
+ break;
+ case 9600000:
+ case 28800000:
+ /*
+ * PLL_P_OUT1 rate is not listed in PLLA table
+ */
+ cfreq = parent_rate/(parent_rate/1000000);
+ break;
+ default:
+ pr_err("%s Unexpected reference rate %lu\n",
+ __func__, parent_rate);
+ BUG();
+ }
+
+ /* Raise VCO to guarantee 0.5% accuracy */
+ for (cfg->output_rate = rate; cfg->output_rate < 200 * cfreq;
+ cfg->output_rate <<= 1)
+ p_div++;
+
+ cfg->p = 1 << p_div;
+ cfg->m = parent_rate / cfreq;
+ cfg->n = cfg->output_rate / cfreq;
+ cfg->cpcon = OUT_OF_TABLE_CPCON;
+
+ if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
+ cfg->p > divp_max(pll) || cfg->output_rate > pll->params->vco_max) {
+ pr_err("%s: Failed to set %s rate %lu\n",
+ __func__, __clk_get_name(hw->clk), rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ u32 divp, val, old_base;
+ int state;
+
+ divp = __ffs(cfg->p);
+
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ old_base = val = pll_readl_base(pll);
+ val &= ~((divm_mask(pll) << pll->divm_shift) |
+ (divn_mask(pll) << pll->divn_shift) |
+ (divp_mask(pll) << pll->divp_shift));
+ val |= ((cfg->m << pll->divm_shift) |
+ (cfg->n << pll->divn_shift) |
+ (divp << pll->divp_shift));
+ if (val == old_base) {
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ return 0;
+ }
+
+ state = clk_pll_is_enabled(hw);
+
+ if (state) {
+ _clk_pll_disable(hw);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ }
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLL_HAS_CPCON) {
+ val = pll_readl_misc(pll);
+ val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
+ val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
+ if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
+ if (cfg->n >= PLLDU_LFCON_SET_DIVN)
+ val |= 0x1 << PLL_MISC_LFCON_SHIFT;
+ } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ val &= ~(0x1 << PLL_MISC_DCCON_SHIFT);
+ if (rate >= (pll->params->vco_max >> 1))
+ val |= 0x1 << PLL_MISC_DCCON_SHIFT;
+ }
+ pll_writel_misc(val, pll);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ if (state)
+ clk_pll_enable(hw);
+
+ return 0;
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+
+ if (pll->flags & TEGRA_PLL_FIXED) {
+ if (rate != pll->fixed_rate) {
+ pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
+ __func__, __clk_get_name(hw->clk),
+ pll->fixed_rate, rate);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
+ _calc_rate(hw, &cfg, rate, parent_rate))
+ return -EINVAL;
+
+ return _program_pll(hw, &cfg, rate);
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+ u64 output_rate = *prate;
+
+ if (pll->flags & TEGRA_PLL_FIXED)
+ return pll->fixed_rate;
+
+ /* PLLM is used for memory; we do not change rate */
+ if (pll->flags & TEGRA_PLLM)
+ return __clk_get_rate(hw->clk);
+
+ if (_get_table_rate(hw, &cfg, rate, *prate) &&
+ _calc_rate(hw, &cfg, rate, *prate))
+ return -EINVAL;
+
+ output_rate *= cfg.n;
+ do_div(output_rate, cfg.m * cfg.p);
+
+ return output_rate;
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ if (val & PLL_BASE_BYPASS)
+ return parent_rate;
+
+ if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
+ struct tegra_clk_pll_freq_table sel;
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
+ pr_err("Clock %s has unknown fixed frequency\n",
+ __clk_get_name(hw->clk));
+ BUG();
+ }
+ return pll->fixed_rate;
+ }
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= (1 << divp);
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+static int clk_plle_training(struct tegra_clk_pll *pll)
+{
+ u32 val;
+ unsigned long timeout;
+
+ if (!pll->pmc)
+ return -ENOSYS;
+
+ /*
+ * PLLE is already disabled, and setup cleared;
+ * create falling edge on PLLE IDDQ input.
+ */
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = pll_readl_misc(pll);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ val = pll_readl_misc(pll);
+ if (val & PLLE_MISC_READY)
+ break;
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: timeout waiting for PLLE\n", __func__);
+ return -EBUSY;
+ }
+ udelay(300);
+ }
+
+ return 0;
+}
+
+static int clk_plle_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ struct tegra_clk_pll_freq_table sel;
+ u32 val;
+ int err;
+
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ return -EINVAL;
+
+ clk_pll_disable(hw);
+
+ val = pll_readl_misc(pll);
+ val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK);
+ pll_writel_misc(val, pll);
+
+ val = pll_readl_misc(pll);
+ if (!(val & PLLE_MISC_READY)) {
+ err = clk_plle_training(pll);
+ if (err)
+ return err;
+ }
+
+ if (pll->flags & TEGRA_PLLE_CONFIGURE) {
+ /* configure dividers */
+ val = pll_readl_base(pll);
+ val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
+ val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << pll->divm_shift;
+ val |= sel.n << pll->divn_shift;
+ val |= sel.p << pll->divp_shift;
+ val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
+ pll_writel_base(val, pll);
+ }
+
+ val = pll_readl_misc(pll);
+ val |= PLLE_MISC_SETUP_VALUE;
+ val |= PLLE_MISC_LOCK_ENABLE;
+ pll_writel_misc(val, pll);
+
+ val = readl(pll->clk_base + PLLE_SS_CTRL);
+ val |= PLLE_SS_DISABLE;
+ writel(val, pll->clk_base + PLLE_SS_CTRL);
+
+ val |= pll_readl_base(pll);
+ val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->misc_reg,
+ pll->params->lock_bit_idx);
+ return 0;
+}
+
+static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= divp;
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+const struct clk_ops tegra_clk_pll_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+const struct clk_ops tegra_clk_plle_ops = {
+ .recalc_rate = clk_plle_recalc_rate,
+ .is_enabled = clk_pll_is_enabled,
+ .disable = clk_pll_disable,
+ .enable = clk_plle_enable,
+};
+
+static struct clk *_tegra_clk_register_pll(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ void __iomem *pmc, unsigned long flags,
+ unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock,
+ const struct clk_ops *ops)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->clk_base = clk_base;
+ pll->pmc = pmc;
+
+ pll->freq_table = freq_table;
+ pll->params = pll_params;
+ pll->fixed_rate = fixed_rate;
+ pll->flags = pll_flags;
+ pll->lock = lock;
+
+ pll->divp_shift = PLL_BASE_DIVP_SHIFT;
+ pll->divp_width = PLL_BASE_DIVP_WIDTH;
+ pll->divn_shift = PLL_BASE_DIVN_SHIFT;
+ pll->divn_width = PLL_BASE_DIVN_WIDTH;
+ pll->divm_shift = PLL_BASE_DIVM_SHIFT;
+ pll->divm_width = PLL_BASE_DIVM_WIDTH;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_pll_ops);
+}
+
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_plle_ops);
+}
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
new file mode 100644
index 000000000000..2fd924d38606
--- /dev/null
+++ b/drivers/clk/tegra/clk-super.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define SUPER_STATE_IDLE 0
+#define SUPER_STATE_RUN 1
+#define SUPER_STATE_IRQ 2
+#define SUPER_STATE_FIQ 3
+
+#define SUPER_STATE_SHIFT 28
+#define SUPER_STATE_MASK ((BIT(SUPER_STATE_IDLE) | BIT(SUPER_STATE_RUN) | \
+ BIT(SUPER_STATE_IRQ) | BIT(SUPER_STATE_FIQ)) \
+ << SUPER_STATE_SHIFT)
+
+#define SUPER_LP_DIV2_BYPASS (1 << 16)
+
+#define super_state(s) (BIT(s) << SUPER_STATE_SHIFT)
+#define super_state_to_src_shift(m, s) ((m->width * s))
+#define super_state_to_src_mask(m) (((1 << m->width) - 1))
+
+static u8 clk_super_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ u8 source, shift;
+
+ val = readl_relaxed(mux->reg);
+
+ state = val & SUPER_STATE_MASK;
+
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ source = (val >> shift) & super_state_to_src_mask(mux);
+
+ /*
+ * If LP_DIV2_BYPASS is not set and PLLX is current parent then
+ * PLLX/2 is the input source to CCLKLP.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && !(val & SUPER_LP_DIV2_BYPASS) &&
+ (source == mux->pllx_index))
+ source = mux->div2_index;
+
+ return source;
+}
+
+static int clk_super_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ int err = 0;
+ u8 parent_index, shift;
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = readl_relaxed(mux->reg);
+ state = val & SUPER_STATE_MASK;
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ /*
+ * For LP mode super-clock switch between PLLX direct
+ * and divided-by-2 outputs is allowed only when other
+ * than PLLX clock source is current parent.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && ((index == mux->div2_index) ||
+ (index == mux->pllx_index))) {
+ parent_index = clk_super_get_parent(hw);
+ if ((parent_index == mux->div2_index) ||
+ (parent_index == mux->pllx_index)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val ^= SUPER_LP_DIV2_BYPASS;
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+ if (index == mux->div2_index)
+ index = mux->pllx_index;
+ }
+ val &= ~((super_state_to_src_mask(mux)) << shift);
+ val |= (index & (super_state_to_src_mask(mux))) << shift;
+
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+out:
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return err;
+}
+
+const struct clk_ops tegra_clk_super_ops = {
+ .get_parent = clk_super_get_parent,
+ .set_parent = clk_super_set_parent,
+};
+
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock)
+{
+ struct tegra_clk_super_mux *super;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super) {
+ pr_err("%s: could not allocate super clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_super_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ super->reg = reg;
+ super->pllx_index = pllx_index;
+ super->div2_index = div2_index;
+ super->lock = lock;
+ super->width = width;
+ super->flags = clk_super_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ super->hw.init = &init;
+
+ clk = clk_register(NULL, &super->hw);
+ if (IS_ERR(clk))
+ kfree(super);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
new file mode 100644
index 000000000000..143ce1f899ad
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -0,0 +1,1355 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_NUM 3
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_NUM 3
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
+#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
+#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG (1<<31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+
+#define PLLS_BASE 0xf0
+#define PLLS_MISC 0xf4
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define CCLK_BURST_POLICY 0x20
+#define SUPER_CCLK_DIVIDER 0x24
+#define SCLK_BURST_POLICY 0x28
+#define SUPER_SCLK_DIVIDER 0x2c
+#define CLK_SYSTEM_RATE 0x30
+
+#define CCLK_BURST_POLICY_SHIFT 28
+#define CCLK_RUN_POLICY_SHIFT 4
+#define CCLK_IDLE_POLICY_SHIFT 0
+#define CCLK_IDLE_POLICY 1
+#define CCLK_RUN_POLICY 2
+#define CCLK_BURST_POLICY_PLLX 8
+
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_SPI 0x114
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_XIO 0x120
+#define CLK_SOURCE_TWC 0x12c
+#define CLK_SOURCE_IDE 0x144
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_DVC 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_CLK 0x38
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_BLINK_ENB 7
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra20_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/* IDs assigned here must be in sync with DT bindings definition
+ * for Tegra20 clocks .
+ */
+enum tegra20_clk {
+ cpu, ac97 = 3, rtc, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1,
+ ndflash, sdmmc1, sdmmc4, twc, pwm, i2s2, epp, gr2d = 21, usbd, isp,
+ gr3d, ide, disp2, disp1, host1x, vcp, cache2 = 31, mem, ahbdma, apbdma,
+ kbc = 36, stat_mon, pmc, fuse, kfuse, sbc1, nor, spi, sbc2, xio, sbc3,
+ dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
+ iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev1, cdev2,
+ uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
+ osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
+ pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
+ pll_p, pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_s, pll_u,
+ pll_x, cop, audio, pll_ref, twd, clk_max,
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 600000000, 600, 12, 1, 8 },
+ { 13000000, 600000000, 600, 13, 1, 8 },
+ { 19200000, 600000000, 500, 16, 1, 6 },
+ { 26000000, 600000000, 600, 26, 1, 8 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 19200000, 216000000, 90, 4, 2, 1},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 12000000, 432000000, 432, 12, 1, 8},
+ { 13000000, 432000000, 432, 13, 1, 8},
+ { 19200000, 432000000, 90, 4, 1, 1},
+ { 26000000, 432000000, 432, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 19200000, 216000000, 135, 12, 1, 3},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 0},
+ { 13000000, 480000000, 960, 13, 2, 0},
+ { 19200000, 480000000, 200, 4, 2, 0},
+ { 26000000, 480000000, 960, 26, 2, 0},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ /* 912 MHz */
+ { 12000000, 912000000, 912, 12, 1, 12},
+ { 13000000, 912000000, 912, 13, 1, 12},
+ { 19200000, 912000000, 760, 16, 1, 8},
+ { 26000000, 912000000, 912, 26, 1, 12},
+
+ /* 816 MHz */
+ { 12000000, 816000000, 816, 12, 1, 12},
+ { 13000000, 816000000, 816, 13, 1, 12},
+ { 19200000, 816000000, 680, 16, 1, 8},
+ { 26000000, 816000000, 816, 26, 1, 12},
+
+ /* 760 MHz */
+ { 12000000, 760000000, 760, 12, 1, 12},
+ { 13000000, 760000000, 760, 13, 1, 12},
+ { 19200000, 760000000, 950, 24, 1, 8},
+ { 26000000, 760000000, 760, 26, 1, 12},
+
+ /* 750 MHz */
+ { 12000000, 750000000, 750, 12, 1, 12},
+ { 13000000, 750000000, 750, 13, 1, 12},
+ { 19200000, 750000000, 625, 16, 1, 8},
+ { 26000000, 750000000, 750, 26, 1, 12},
+
+ /* 608 MHz */
+ { 12000000, 608000000, 608, 12, 1, 12},
+ { 13000000, 608000000, 608, 13, 1, 12},
+ { 19200000, 608000000, 380, 12, 1, 8},
+ { 26000000, 608000000, 608, 26, 1, 12},
+
+ /* 456 MHz */
+ { 12000000, 456000000, 456, 12, 1, 12},
+ { 13000000, 456000000, 456, 13, 1, 12},
+ { 19200000, 456000000, 380, 16, 1, 8},
+ { 26000000, 456000000, 456, 26, 1, 12},
+
+ /* 312 MHz */
+ { 12000000, 312000000, 312, 12, 1, 12},
+ { 13000000, 312000000, 312, 13, 1, 12},
+ { 19200000, 312000000, 260, 16, 1, 8},
+ { 26000000, 312000000, 312, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ { 12000000, 100000000, 200, 24, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 12000000,
+ .cf_min = 0,
+ .cf_max = 0,
+ .vco_min = 0,
+ .vco_max = 0,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 0,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static unsigned long tegra20_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+ unsigned long input_freq;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ default:
+ pr_err("Unexpected clock autodetect value %d",
+ auto_clk_control);
+ BUG();
+ return 0;
+ }
+
+ return input_freq;
+}
+
+static unsigned int tegra20_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalied pll ref divider %d\n", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra20_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_c_params, TEGRA_PLL_HAS_CPCON,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, NULL, 0,
+ 216000000, &pll_p_params, TEGRA_PLL_FIXED |
+ TEGRA_PLL_HAS_CPCON, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLL_HAS_CPCON,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
+ pll_u_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, NULL, 0,
+ 0, &pll_a_params, TEGRA_PLL_HAS_CPCON,
+ pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+ 0, 100000000, &pll_e_params,
+ 0, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclk", "pll_p_out4_cclk",
+ "pll_p_out3_cclk", "clk_d", "pll_x" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "clk_d",
+ "clk_32k", "pll_m_out1" };
+
+static void tegra20_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * DIV_U71 dividers for CCLK, these dividers are used only
+ * if parent clock is fixed rate.
+ */
+
+ /*
+ * Clock input to cclk divided from pll_p using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclk", "pll_p",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out3 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclk", "pll_p_out3",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out4 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclk", "pll_p_out4",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclk", NULL);
+
+ /* CCLK */
+ clk = tegra_clk_register_super_mux("cclk", cclk_parents,
+ ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk", NULL);
+ clks[cclk] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *audio_parents[] = {"spdif_in", "i2s1", "i2s2", "unused",
+ "pll_a_out0", "unused", "unused",
+ "unused"};
+
+static void __init tegra20_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* audio */
+ clk = clk_register_mux(NULL, "audio_mux", audio_parents,
+ ARRAY_SIZE(audio_parents), 0,
+ clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
+ clk_base + AUDIO_SYNC_CLK, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio", NULL);
+ clks[audio] = clk;
+
+ /* audio_2x */
+ clk = clk_register_fixed_factor(NULL, "audio_doubler", "audio",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_periph_gate("audio_2x", "audio_doubler",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 89, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio_2x", NULL);
+ clks[audio_2x] = clk;
+
+}
+
+static const char *i2s1_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *i2s2_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_out_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_in_parents[] = {"pll_p", "pll_c", "pll_m"};
+static const char *pwm_parents[] = {"pll_p", "pll_c", "audio", "clk_m",
+ "clk_32k"};
+static const char *mux_pllpcm_clkm[] = {"pll_p", "pll_c", "pll_m", "clk_m"};
+static const char *mux_pllmcpa[] = {"pll_m", "pll_c", "pll_c", "pll_a"};
+static const char *mux_pllpdc_clkm[] = {"pll_p", "pll_d_out0", "pll_c",
+ "clk_m"};
+static const char *mux_pllmcp_clkm[] = {"pll_m", "pll_c", "pll_p", "clk_m"};
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra20-i2s.0", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra20-i2s.1", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra20-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra20-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("spi", NULL, "spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, &periph_h_regs, TEGRA_PERIPH_ON_APB, spi),
+ TEGRA_INIT_DATA_MUX("xio", NULL, "xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, &periph_h_regs, 0, xio),
+ TEGRA_INIT_DATA_MUX("twc", NULL, "twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, &periph_l_regs, TEGRA_PERIPH_ON_APB, twc),
+ TEGRA_INIT_DATA_MUX("ide", NULL, "ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, &periph_l_regs, 0, ide),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, 0, ndflash),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, 0, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, 0, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_MUX("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_MUX("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_MUX("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_MUX("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_MUX("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_MUX("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("dvc", "div-clk", "tegra-i2c.3", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, dvc),
+ TEGRA_INIT_DATA_MUX("hdmi", NULL, "hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA("pwm", NULL, "tegra-pwm", pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, TEGRA_PERIPH_ON_APB, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, &periph_l_regs, TEGRA_PERIPH_ON_APB, uarta),
+ TEGRA_INIT_DATA_NODIV("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, uartb),
+ TEGRA_INIT_DATA_NODIV("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, &periph_h_regs, TEGRA_PERIPH_ON_APB, uartc),
+ TEGRA_INIT_DATA_NODIV("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, &periph_u_regs, TEGRA_PERIPH_ON_APB, uartd),
+ TEGRA_INIT_DATA_NODIV("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, &periph_u_regs, TEGRA_PERIPH_ON_APB, uarte),
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, &periph_l_regs, 0, disp2),
+};
+
+static void __init tegra20_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
+ 0, 34, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
+ 0, 5, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0,
+ clk_base, 0, 29, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0,
+ clk_base, 0, 62, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0,
+ clk_base, 0, 63, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsi */
+ clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
+ 48, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dsi");
+ clks[dsi] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pex */
+ clk = tegra_clk_register_periph_gate("pex", "clk_m", 0, clk_base, 0, 70,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pex", NULL);
+ clks[pex] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", NULL);
+ clks[afi] = clk;
+
+ /* pcie_xclk */
+ clk = tegra_clk_register_periph_gate("pcie_xclk", "clk_m", 0, clk_base,
+ 0, 74, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie_xclk", NULL);
+ clks[pcie_xclk] = clk;
+
+ /* cdev1 */
+ clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
+ clk_base, 0, 94, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev1", NULL);
+ clks[cdev1] = clk;
+
+ /* cdev2 */
+ clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
+ clk_base, 0, 93, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev2", NULL);
+ clks[cdev2] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+
+static void __init tegra20_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+}
+
+static void __init tegra20_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+}
+
+static void __init tegra20_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned long input_freq;
+ unsigned int pll_ref_div;
+
+ input_freq = tegra20_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
+ CLK_IGNORE_UNUSED, input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra20_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra20 CPU clock and reset control functions */
+static void tegra20_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra20_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra20_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+static void tegra20_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ barrier();
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+static void tegra20_disable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra20_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+
+ return !!(cpu_rst_status & 0x2);
+}
+
+static void tegra20_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra20_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_SOURCE_CSITE);
+
+ tegra20_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CCLK_BURST_POLICY);
+ tegra20_cpu_clk_sctx.pllx_base =
+ readl(clk_base + PLLX_BASE);
+ tegra20_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + PLLX_MISC);
+ tegra20_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + SUPER_CCLK_DIVIDER);
+}
+
+static void tegra20_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CCLK_BURST_POLICY);
+ policy = (reg >> CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CCLK_IDLE_POLICY)
+ reg = (reg >> CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CCLK_RUN_POLICY)
+ reg = (reg >> CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra20_cpu_clk_sctx.pllx_misc,
+ clk_base + PLLX_MISC);
+ writel(tegra20_cpu_clk_sctx.pllx_base,
+ clk_base + PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra20_cpu_clk_sctx.cclk_divider,
+ clk_base + SUPER_CCLK_DIVIDER);
+ writel(tegra20_cpu_clk_sctx.cpu_burst,
+ clk_base + CCLK_BURST_POLICY);
+
+ writel(tegra20_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
+ .wait_for_reset = tegra20_wait_cpu_in_reset,
+ .put_in_reset = tegra20_put_cpu_in_reset,
+ .out_of_reset = tegra20_cpu_out_of_reset,
+ .enable_clock = tegra20_enable_cpu_clock,
+ .disable_clock = tegra20_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra20_cpu_rail_off_ready,
+ .suspend = tegra20_cpu_clock_suspend,
+ .resume = tegra20_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {pll_p, clk_max, 216000000, 1},
+ {pll_p_out1, clk_max, 28800000, 1},
+ {pll_p_out2, clk_max, 48000000, 1},
+ {pll_p_out3, clk_max, 72000000, 1},
+ {pll_p_out4, clk_max, 24000000, 1},
+ {pll_c, clk_max, 600000000, 1},
+ {pll_c_out1, clk_max, 120000000, 1},
+ {sclk, pll_c_out1, 0, 1},
+ {hclk, clk_max, 0, 1},
+ {pclk, clk_max, 60000000, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {cclk, clk_max, 0, 1},
+ {uarta, pll_p, 0, 0},
+ {uartb, pll_p, 0, 0},
+ {uartc, pll_p, 0, 0},
+ {uartd, pll_p, 0, 0},
+ {uarte, pll_p, 0, 0},
+ {usbd, clk_max, 12000000, 0},
+ {usb2, clk_max, 12000000, 0},
+ {usb3, clk_max, 12000000, 0},
+ {pll_a, clk_max, 56448000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {cdev1, clk_max, 0, 1},
+ {blink, clk_max, 32768, 1},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {sdmmc4, pll_p, 48000000, 0},
+ {spi, pll_p, 20000000, 0},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-pmc" },
+ {},
+};
+
+void __init tegra20_clock_init(struct device_node *np)
+{
+ int i;
+ struct device_node *node;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("Can't map CAR registers\n");
+ BUG();
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra20_osc_clk_init();
+ tegra20_pmc_clk_init();
+ tegra20_fixed_clk_init();
+ tegra20_pll_init();
+ tegra20_super_clk_init();
+ tegra20_periph_clk_init();
+ tegra20_audio_clk_init();
+
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra20 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra20_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
new file mode 100644
index 000000000000..32c61cb6d0bb
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -0,0 +1,1994 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+
+#include <mach/powergate.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_NUM 5
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_NUM 5
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0X0<<28)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (0X4<<28)
+#define OSC_CTRL_OSC_FREQ_12MHZ (0X8<<28)
+#define OSC_CTRL_OSC_FREQ_26MHZ (0XC<<28)
+#define OSC_CTRL_OSC_FREQ_16_8MHZ (0X1<<28)
+#define OSC_CTRL_OSC_FREQ_38_4MHZ (0X5<<28)
+#define OSC_CTRL_OSC_FREQ_48MHZ (0X9<<28)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<26)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<26)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<26)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<26)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG BIT(31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY BIT(31)
+#define OSC_FREQ_DET_CNT_MASK 0xffff
+
+#define CCLKG_BURST_POLICY 0x368
+#define SUPER_CCLKG_DIVIDER 0x36c
+#define CCLKLP_BURST_POLICY 0x370
+#define SUPER_CCLKLP_DIVIDER 0x374
+#define SCLK_BURST_POLICY 0x028
+#define SUPER_SCLK_DIVIDER 0x02c
+
+#define SYSTEM_CLK_RATE 0x030
+
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLLE_AUX 0x48c
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_3D2 0x3b0
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_DTV 0x1dc
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_DSIB 0xd0
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+#define TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR 0x34c
+#define TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+
+#define CLK_RESET_SOURCE_CSITE 0x1d4
+
+#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
+#define CLK_RESET_CCLK_RUN_POLICY_SHIFT 4
+#define CLK_RESET_CCLK_IDLE_POLICY_SHIFT 0
+#define CLK_RESET_CCLK_IDLE_POLICY 1
+#define CLK_RESET_CCLK_RUN_POLICY 2
+#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra30_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+static unsigned long input_freq;
+
+static DEFINE_SPINLOCK(clk_doubler_lock);
+static DEFINE_SPINLOCK(clk_out_lock);
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(cml_lock);
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 29, 3, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs, \
+ _clk_num, periph_clk_enb_refcnt, 0, _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/*
+ * IDs assigned here must be in sync with DT bindings definition
+ * for Tegra30 clocks.
+ */
+enum tegra30_clk {
+ cpu, rtc = 4, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1, ndflash,
+ sdmmc1, sdmmc4, pwm = 17, i2s2, epp, gr2d = 21, usbd, isp, gr3d,
+ disp2 = 26, disp1, host1x, vcp, i2s0, cop_cache, mc, ahbdma, apbdma,
+ kbc = 36, statmon, pmc, kfuse = 40, sbc1, nor, sbc2 = 44, sbc3 = 46,
+ i2c5, dsia, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
+ dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
+ cdev1, cdev2, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
+ i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
+ atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
+ spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
+ se = 127, hda2hdmi, sata_cold, uartb = 160, vfir, spdif_in, spdif_out,
+ vi, vi_sensor, fuse, fuse_burn, cve, tvo, clk_32k, clk_m, clk_m_div2,
+ clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_m, pll_m_out1, pll_p,
+ pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_a, pll_a_out0,
+ pll_d, pll_d_out0, pll_d2, pll_d2_out0, pll_u, pll_x, pll_x_out0, pll_e,
+ spdif_in_sync, i2s0_sync, i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync,
+ vimclk_sync, audio0, audio1, audio2, audio3, audio4, spdif, clk_out_1,
+ clk_out_2, clk_out_3, sclk, blink, cclk_g, cclk_lp, twd, cml0, cml1,
+ hclk, pclk, clk_out_1_mux = 300, clk_max
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+/*
+ * Structure defining the fields for USB UTMI clocks Parameters.
+ */
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+/* OSC_FREQUENCY, ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT */
+ {13000000, 0x02, 0x33, 0x05, 0x7F},
+ {19200000, 0x03, 0x4B, 0x06, 0xBB},
+ {12000000, 0x02, 0x2F, 0x04, 0x76},
+ {26000000, 0x04, 0x66, 0x09, 0xFE},
+ {16800000, 0x03, 0x41, 0x0A, 0xA4},
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 1040000000, 520, 6, 1, 8},
+ { 13000000, 1040000000, 480, 6, 1, 8},
+ { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */
+ { 19200000, 1040000000, 325, 6, 1, 6},
+ { 26000000, 1040000000, 520, 13, 1, 8},
+
+ { 12000000, 832000000, 416, 6, 1, 8},
+ { 13000000, 832000000, 832, 13, 1, 8},
+ { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */
+ { 19200000, 832000000, 260, 6, 1, 8},
+ { 26000000, 832000000, 416, 13, 1, 8},
+
+ { 12000000, 624000000, 624, 12, 1, 8},
+ { 13000000, 624000000, 624, 13, 1, 8},
+ { 16800000, 600000000, 520, 14, 1, 8},
+ { 19200000, 624000000, 520, 16, 1, 8},
+ { 26000000, 624000000, 624, 26, 1, 8},
+
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+
+ { 12000000, 520000000, 520, 12, 1, 8},
+ { 13000000, 520000000, 520, 13, 1, 8},
+ { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */
+ { 19200000, 520000000, 325, 12, 1, 6},
+ { 26000000, 520000000, 520, 26, 1, 8},
+
+ { 12000000, 416000000, 416, 12, 1, 8},
+ { 13000000, 416000000, 416, 13, 1, 8},
+ { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */
+ { 19200000, 416000000, 260, 12, 1, 6},
+ { 26000000, 416000000, 416, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 16800000, 666000000, 555, 14, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 16800000, 216000000, 360, 14, 2, 8},
+ { 19200000, 216000000, 360, 16, 2, 8},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 9600000, 564480000, 294, 5, 1, 4},
+ { 9600000, 552960000, 288, 5, 1, 4},
+ { 9600000, 24000000, 5, 2, 1, 1},
+
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 16800000, 216000000, 180, 14, 1, 4},
+ { 19200000, 216000000, 180, 16, 1, 4},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 16800000, 594000000, 495, 14, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 12},
+ { 13000000, 480000000, 960, 13, 2, 12},
+ { 16800000, 480000000, 400, 7, 2, 5},
+ { 19200000, 480000000, 200, 4, 2, 3},
+ { 26000000, 480000000, 960, 26, 2, 12},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1.7 GHz */
+ { 12000000, 1700000000, 850, 6, 1, 8},
+ { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */
+ { 26000000, 1700000000, 850, 13, 1, 8},
+
+ /* 1.6 GHz */
+ { 12000000, 1600000000, 800, 6, 1, 8},
+ { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */
+ { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */
+ { 19200000, 1600000000, 500, 6, 1, 8},
+ { 26000000, 1600000000, 800, 13, 1, 8},
+
+ /* 1.5 GHz */
+ { 12000000, 1500000000, 750, 6, 1, 8},
+ { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */
+ { 16800000, 1500000000, 625, 7, 1, 8},
+ { 19200000, 1500000000, 625, 8, 1, 8},
+ { 26000000, 1500000000, 750, 13, 1, 8},
+
+ /* 1.4 GHz */
+ { 12000000, 1400000000, 700, 6, 1, 8},
+ { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */
+ { 16800000, 1400000000, 1000, 12, 1, 8},
+ { 19200000, 1400000000, 875, 12, 1, 8},
+ { 26000000, 1400000000, 700, 13, 1, 8},
+
+ /* 1.3 GHz */
+ { 12000000, 1300000000, 975, 9, 1, 8},
+ { 13000000, 1300000000, 1000, 10, 1, 8},
+ { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 26000000, 1300000000, 650, 13, 1, 8},
+
+ /* 1.2 GHz */
+ { 12000000, 1200000000, 1000, 10, 1, 8},
+ { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */
+ { 16800000, 1200000000, 1000, 14, 1, 8},
+ { 19200000, 1200000000, 1000, 16, 1, 8},
+ { 26000000, 1200000000, 600, 13, 1, 8},
+
+ /* 1.1 GHz */
+ { 12000000, 1100000000, 825, 9, 1, 8},
+ { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */
+ { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */
+ { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */
+ { 26000000, 1100000000, 550, 13, 1, 8},
+
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 8},
+ { 13000000, 1000000000, 1000, 13, 1, 8},
+ { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 8},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ { 12000000, 100000000, 150, 1, 18, 11},
+ { 216000000, 100000000, 200, 18, 24, 13},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_d2_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1700000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 216000000,
+ .cf_min = 12000000,
+ .cf_max = 12000000,
+ .vco_min = 1200000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static struct tegra_clk_periph_regs periph_v_regs = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+};
+
+static struct tegra_clk_periph_regs periph_w_regs = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+};
+
+static void tegra30_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_16_8MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 16800000;
+ break;
+ case OSC_CTRL_OSC_FREQ_38_4MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
+ input_freq = 38400000;
+ break;
+ case OSC_CTRL_OSC_FREQ_48MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
+ input_freq = 48000000;
+ break;
+ default:
+ pr_err("Unexpected auto clock control value %d",
+ auto_clk_control);
+ BUG();
+ return;
+ }
+}
+
+static unsigned int tegra30_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalid pll ref divider %d", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra30_utmi_param_configure(void)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected input rate %lu\n", __func__, input_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(
+ utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(
+ utmi_parameters[i].active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(
+ utmi_parameters[i].enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(
+ utmi_parameters[i].xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+}
+
+static const char *pll_e_parents[] = {"pll_ref", "pll_p"};
+
+static void __init tegra30_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_c_params,
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc_base, 0,
+ 408000000, &pll_p_params,
+ TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLX_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_x_out0", NULL);
+ clks[pll_x_out0] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_u_freq_table,
+ NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ tegra30_utmi_param_configure();
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d2_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[pll_d2] = clk;
+
+ /* PLLD2_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[pll_d2_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc_base,
+ 0, 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
+ ARRAY_SIZE(pll_e_parents), 0,
+ clk_base + PLLE_AUX, 2, 1, 0, NULL);
+ clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
+ CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
+ TEGRA_PLLE_CONFIGURE, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",};
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1", };
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2", };
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3", };
+
+static void __init tegra30_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* spdif_in_sync */
+ clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
+ 24000000);
+ clk_register_clkdev(clk, "spdif_in_sync", NULL);
+ clks[spdif_in_sync] = clk;
+
+ /* i2s0_sync */
+ clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s0_sync", NULL);
+ clks[i2s0_sync] = clk;
+
+ /* i2s1_sync */
+ clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s1_sync", NULL);
+ clks[i2s1_sync] = clk;
+
+ /* i2s2_sync */
+ clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s2_sync", NULL);
+ clks[i2s2_sync] = clk;
+
+ /* i2s3_sync */
+ clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s3_sync", NULL);
+ clks[i2s3_sync] = clk;
+
+ /* i2s4_sync */
+ clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s4_sync", NULL);
+ clks[i2s4_sync] = clk;
+
+ /* vimclk_sync */
+ clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "vimclk_sync", NULL);
+ clks[vimclk_sync] = clk;
+
+ /* audio0 */
+ clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio0", NULL);
+ clks[audio0] = clk;
+
+ /* audio1 */
+ clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio1", NULL);
+ clks[audio1] = clk;
+
+ /* audio2 */
+ clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio2", NULL);
+ clks[audio2] = clk;
+
+ /* audio3 */
+ clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio3", NULL);
+ clks[audio3] = clk;
+
+ /* audio4 */
+ clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio4", NULL);
+ clks[audio4] = clk;
+
+ /* spdif */
+ clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "spdif", NULL);
+ clks[spdif] = clk;
+
+ /* audio0_2x */
+ clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 113, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio0_2x", NULL);
+ clks[audio0_2x] = clk;
+
+ /* audio1_2x */
+ clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 114, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio1_2x", NULL);
+ clks[audio1_2x] = clk;
+
+ /* audio2_2x */
+ clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 115, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio2_2x", NULL);
+ clks[audio2_2x] = clk;
+
+ /* audio3_2x */
+ clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 116, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio3_2x", NULL);
+ clks[audio3_2x] = clk;
+
+ /* audio4_2x */
+ clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 117, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio4_2x", NULL);
+ clks[audio4_2x] = clk;
+
+ /* spdif_2x */
+ clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 118, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "spdif_2x", NULL);
+ clks[spdif_2x] = clk;
+}
+
+static void __init tegra30_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_out_1 */
+ clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_1_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern1", "clk_out_1");
+ clks[clk_out_1] = clk;
+
+ /* clk_out_2 */
+ clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern2", "clk_out_2");
+ clks[clk_out_2] = clk;
+
+ /* clk_out_3 */
+ clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern3", "clk_out_3");
+ clks[clk_out_3] = clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+
+}
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclkg", "pll_p_out4_cclkg",
+ "pll_p_out3_cclkg", "unused", "pll_x" };
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclklp", "pll_p_out4_cclklp",
+ "pll_p_out3_cclklp", "unused", "pll_x",
+ "pll_x_out0" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static void __init tegra30_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * Clock input to cclk_g divided from pll_p using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclkg", "pll_p",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out3 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclkg", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out4 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclkg", "pll_p_out4",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL);
+
+ /* CCLKG */
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk_g", NULL);
+ clks[cclk_g] = clk;
+
+ /*
+ * Clock input to cclk_lp divided from pll_p using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclklp", "pll_p",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out3 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out4 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclklp", "pll_p_out4",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclklp", NULL);
+
+ /* CCLKLP */
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ TEGRA_DIVIDER_2, 4, 8, 9,
+ NULL);
+ clk_register_clkdev(clk, "cclk_lp", NULL);
+ clks[cclk_lp] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
+ "clk_m" };
+static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
+static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
+static const char *i2s0_parents[] = { "pll_a_out0", "audio0_2x", "pll_p",
+ "clk_m" };
+static const char *i2s1_parents[] = { "pll_a_out0", "audio1_2x", "pll_p",
+ "clk_m" };
+static const char *i2s2_parents[] = { "pll_a_out0", "audio2_2x", "pll_p",
+ "clk_m" };
+static const char *i2s3_parents[] = { "pll_a_out0", "audio3_2x", "pll_p",
+ "clk_m" };
+static const char *i2s4_parents[] = { "pll_a_out0", "audio4_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_in_parents[] = { "pll_p", "pll_c", "pll_m" };
+static const char *mux_pllpc_clk32k_clkm[] = { "pll_p", "pll_c", "clk_32k",
+ "clk_m" };
+static const char *mux_pllpc_clkm_clk32k[] = { "pll_p", "pll_c", "clk_m",
+ "clk_32k" };
+static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
+static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
+ "clk_m" };
+static const char *mux_pllp_clkm[] = { "pll_p", "unused", "unused", "clk_m" };
+static const char *mux_pllpmdacd2_clkm[] = { "pll_p", "pll_m", "pll_d_out0",
+ "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m" };
+static const char *mux_plla_clk32k_pllp_clkm_plle[] = { "pll_a_out0",
+ "clk_32k", "pll_p",
+ "clk_m", "pll_e" };
+static const char *mux_plld_out0_plld2_out0[] = { "pll_d_out0",
+ "pll_d2_out0" };
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", i2s0_parents, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", i2s3_parents, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
+ TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", i2s4_parents, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("d_audio", "d_audio", "tegra30-ahub", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, 0, d_audio),
+ TEGRA_INIT_DATA_MUX("dam0", NULL, "tegra30-dam.0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, &periph_v_regs, 0, dam0),
+ TEGRA_INIT_DATA_MUX("dam1", NULL, "tegra30-dam.1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, &periph_v_regs, 0, dam1),
+ TEGRA_INIT_DATA_MUX("dam2", NULL, "tegra30-dam.2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, &periph_v_regs, 0, dam2),
+ TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, 0, hda),
+ TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, 0, hda2codec_2x),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("sbc5", NULL, "spi_tegra.4", mux_pllpcm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
+ TEGRA_INIT_DATA_MUX("sbc6", NULL, "spi_tegra.5", mux_pllpcm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
+ TEGRA_INIT_DATA_MUX("sata_oob", NULL, "tegra_sata_oob", mux_pllpcm_clkm, CLK_SOURCE_SATA_OOB, 123, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata_oob),
+ TEGRA_INIT_DATA_MUX("sata", NULL, "tegra_sata", mux_pllpcm_clkm, CLK_SOURCE_SATA, 124, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, TEGRA_PERIPH_ON_APB, ndflash),
+ TEGRA_INIT_DATA_MUX("ndspeed", NULL, "tegra_nand_speed", mux_pllpcm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllpc_clkm_clk32k, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
+ TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllpc_clk32k_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
+ TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_INT("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_INT("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_INT("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_INT("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_INT("3d2", NULL, "3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, &periph_v_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d2),
+ TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_INT("se", NULL, "se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, 0, se),
+ TEGRA_INIT_DATA_MUX("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllpc_clk32k_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("i2c4", "div-clk", "tegra-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2c4),
+ TEGRA_INIT_DATA_DIV16("i2c5", "div-clk", "tegra-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c5),
+ TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
+ TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
+ TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
+ TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
+ TEGRA_INIT_DATA_UART("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 66, &periph_u_regs, uarte),
+ TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
+ TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
+ TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
+ TEGRA_INIT_DATA("pwm", NULL, "pwm", mux_pllpc_clk32k_clkm, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, 0, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP1, 29, 3, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP2, 29, 3, 26, &periph_l_regs, 0, disp2),
+ TEGRA_INIT_DATA_NODIV("dsib", NULL, "tegradc.1", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, &periph_u_regs, 0, dsib),
+};
+
+static void __init tegra30_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base, 0, 34,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base, 0,
+ 5, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0, 29,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base, 0,
+ 62, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base, 0,
+ 63, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsia */
+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_out0", 0, clk_base,
+ 0, 48, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "dsia", "tegradc.0");
+ clks[dsia] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pcie */
+ clk = tegra_clk_register_periph_gate("pcie", "clk_m", 0, clk_base, 0,
+ 70, &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie", "tegra-pcie");
+ clks[pcie] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", "tegra-pcie");
+ clks[afi] = clk;
+
+ /* kfuse */
+ clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 40, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "kfuse-tegra");
+ clks[kfuse] = clk;
+
+ /* fuse */
+ clk = tegra_clk_register_periph_gate("fuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse", "fuse-tegra");
+ clks[fuse] = clk;
+
+ /* fuse_burn */
+ clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse_burn", "fuse-tegra");
+ clks[fuse_burn] = clk;
+
+ /* apbif */
+ clk = tegra_clk_register_periph_gate("apbif", "clk_m", 0,
+ clk_base, 0, 107, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "apbif", "tegra30-ahub");
+ clks[apbif] = clk;
+
+ /* hda2hdmi */
+ clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 128, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "hda2hdmi", "tegra30-hda");
+ clks[hda2hdmi] = clk;
+
+ /* sata_cold */
+ clk = tegra_clk_register_periph_gate("sata_cold", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 129, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra_sata_cold");
+ clks[sata_cold] = clk;
+
+ /* dtv */
+ clk = tegra_clk_register_periph_gate("dtv", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 79, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dtv");
+ clks[dtv] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+static void __init tegra30_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+
+ /* clk_m_div2 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "clk_m_div2", NULL);
+ clks[clk_m_div2] = clk;
+
+ /* clk_m_div4 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ clk_register_clkdev(clk, "clk_m_div4", NULL);
+ clks[clk_m_div4] = clk;
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml0", NULL);
+ clks[cml0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml1", NULL);
+ clks[cml1] = clk;
+
+ /* pciex */
+ clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
+ clk_register_clkdev(clk, "pciex", NULL);
+ clks[pciex] = clk;
+}
+
+static void __init tegra30_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned int pll_ref_div;
+
+ tegra30_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra30_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra30 CPU clock and reset control functions */
+static void tegra30_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra30_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra30_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+
+static void tegra30_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ writel(CPU_CLOCK(cpu),
+ clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+}
+
+static void tegra30_disable_cpu_clock(u32 cpu)
+{
+
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra30_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+ int cpu_pwr_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_pwr_status = tegra_powergate_is_powered(TEGRA_POWERGATE_CPU1) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU2) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU3);
+
+ if (((cpu_rst_status & 0xE) != 0xE) || cpu_pwr_status)
+ return false;
+
+ return true;
+}
+
+static void tegra30_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra30_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_RESET_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_RESET_SOURCE_CSITE);
+
+ tegra30_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CLK_RESET_CCLK_BURST);
+ tegra30_cpu_clk_sctx.pllx_base =
+ readl(clk_base + CLK_RESET_PLLX_BASE);
+ tegra30_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + CLK_RESET_PLLX_MISC);
+ tegra30_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + CLK_RESET_CCLK_DIVIDER);
+}
+
+static void tegra30_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CLK_RESET_CCLK_BURST);
+ policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CLK_RESET_CCLK_IDLE_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CLK_RESET_CCLK_RUN_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra30_cpu_clk_sctx.pllx_misc,
+ clk_base + CLK_RESET_PLLX_MISC);
+ writel(tegra30_cpu_clk_sctx.pllx_base,
+ clk_base + CLK_RESET_PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra30_cpu_clk_sctx.cclk_divider,
+ clk_base + CLK_RESET_CCLK_DIVIDER);
+ writel(tegra30_cpu_clk_sctx.cpu_burst,
+ clk_base + CLK_RESET_CCLK_BURST);
+
+ writel(tegra30_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_RESET_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
+ .wait_for_reset = tegra30_wait_cpu_in_reset,
+ .put_in_reset = tegra30_put_cpu_in_reset,
+ .out_of_reset = tegra30_cpu_out_of_reset,
+ .enable_clock = tegra30_enable_cpu_clock,
+ .disable_clock = tegra30_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra30_cpu_rail_off_ready,
+ .suspend = tegra30_cpu_clock_suspend,
+ .resume = tegra30_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {uarta, pll_p, 408000000, 0},
+ {uartb, pll_p, 408000000, 0},
+ {uartc, pll_p, 408000000, 0},
+ {uartd, pll_p, 408000000, 0},
+ {uarte, pll_p, 408000000, 0},
+ {pll_a, clk_max, 564480000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {extern1, pll_a_out0, 0, 1},
+ {clk_out_1_mux, extern1, 0, 0},
+ {clk_out_1, clk_max, 0, 1},
+ {blink, clk_max, 0, 1},
+ {i2s0, pll_a_out0, 11289600, 0},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {i2s3, pll_a_out0, 11289600, 0},
+ {i2s4, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc2, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {pll_m, clk_max, 0, 1},
+ {pclk, clk_max, 0, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {mselect, clk_max, 0, 1},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {sbc5, pll_p, 100000000, 0},
+ {sbc6, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {twd, clk_max, 0, 1},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(bsev, "tegra-avp", "bsev"),
+ TEGRA_CLK_DUPLICATE(bsev, "nvavp", "bsev"),
+ TEGRA_CLK_DUPLICATE(vde, "tegra-aes", "vde"),
+ TEGRA_CLK_DUPLICATE(bsea, "tegra-aes", "bsea"),
+ TEGRA_CLK_DUPLICATE(bsea, "nvavp", "bsea"),
+ TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
+ TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
+ TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra30-pmc" },
+ {},
+};
+
+void __init tegra30_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+ int i;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra30 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra30_osc_clk_init();
+ tegra30_fixed_clk_init();
+ tegra30_pll_init();
+ tegra30_super_clk_init();
+ tegra30_periph_clk_init();
+ tegra30_audio_clk_init();
+ tegra30_pmc_clk_init();
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra30 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
new file mode 100644
index 000000000000..a603b9af0ad3
--- /dev/null
+++ b/drivers/clk/tegra/clk.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+
+/* Global data of Tegra CPU CAR ops */
+struct tegra_cpu_car_ops *tegra_cpu_car_ops;
+
+void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; dup_list->clk_id < clk_max; dup_list++) {
+ clk = clks[dup_list->clk_id];
+ dup_list->lookup.clk = clk;
+ clkdev_add(&dup_list->lookup);
+ }
+}
+
+void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; tbl->clk_id < clk_max; tbl++) {
+ clk = clks[tbl->clk_id];
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
+ if (tbl->parent_id < clk_max) {
+ struct clk *parent = clks[tbl->parent_id];
+ if (clk_set_parent(clk, parent)) {
+ pr_err("%s: Failed to set parent %s of %s\n",
+ __func__, __clk_get_name(parent),
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+
+ if (tbl->rate)
+ if (clk_set_rate(clk, tbl->rate)) {
+ pr_err("%s: Failed to set rate %lu of %s\n",
+ __func__, tbl->rate,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+
+ if (tbl->state)
+ if (clk_prepare_enable(clk)) {
+ pr_err("%s: Failed to enable %s\n", __func__,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+}
+
+static const struct of_device_id tegra_dt_clk_match[] = {
+ { .compatible = "nvidia,tegra20-car", .data = tegra20_clock_init },
+ { .compatible = "nvidia,tegra30-car", .data = tegra30_clock_init },
+ { }
+};
+
+void __init tegra_clocks_init(void)
+{
+ of_clk_init(tegra_dt_clk_match);
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
new file mode 100644
index 000000000000..0744731c6229
--- /dev/null
+++ b/drivers/clk/tegra/clk.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TEGRA_CLK_H
+#define __TEGRA_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+/**
+ * struct tegra_clk_sync_source - external clock source from codec
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @rate: input frequency from source
+ * @max_rate: max rate allowed
+ */
+struct tegra_clk_sync_source {
+ struct clk_hw hw;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+#define to_clk_sync_source(_hw) \
+ container_of(_hw, struct tegra_clk_sync_source, hw)
+
+extern const struct clk_ops tegra_clk_sync_source_ops;
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long fixed_rate, unsigned long max_rate);
+
+/**
+ * struct tegra_clk_frac_div - fractional divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing divider
+ * @flags: hardware-specific flags
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @frac_width: width of the fractional bit field
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_ROUND_UP - This flags indicates to round up the divider value.
+ * TEGRA_DIVIDER_FIXED - Fixed rate PLL dividers has addition override bit, this
+ * flag indicates that this divider is for fixed rate PLL.
+ * TEGRA_DIVIDER_INT - Some modules can not cope with the duty cycle when
+ * fraction bit is set. This flags indicates to calculate divider for which
+ * fracton bit will be zero.
+ * TEGRA_DIVIDER_UART - UART module divider has additional enable bit which is
+ * set when divider value is not 0. This flags indicates that the divider
+ * is for UART module.
+ */
+struct tegra_clk_frac_div {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 flags;
+ u8 shift;
+ u8 width;
+ u8 frac_width;
+ spinlock_t *lock;
+};
+
+#define to_clk_frac_div(_hw) container_of(_hw, struct tegra_clk_frac_div, hw)
+
+#define TEGRA_DIVIDER_ROUND_UP BIT(0)
+#define TEGRA_DIVIDER_FIXED BIT(1)
+#define TEGRA_DIVIDER_INT BIT(2)
+#define TEGRA_DIVIDER_UART BIT(3)
+
+extern const struct clk_ops tegra_clk_frac_div_ops;
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock);
+
+/*
+ * Tegra PLL:
+ *
+ * In general, there are 3 requirements for each PLL
+ * that SW needs to be comply with.
+ * (1) Input frequency range (REF).
+ * (2) Comparison frequency range (CF). CF = REF/DIVM.
+ * (3) VCO frequency range (VCO). VCO = CF * DIVN.
+ *
+ * The final PLL output frequency (FO) = VCO >> DIVP.
+ */
+
+/**
+ * struct tegra_clk_pll_freq_table - PLL frequecy table
+ *
+ * @input_rate: input rate from source
+ * @output_rate: output rate from PLL for the input rate
+ * @n: feedback divider
+ * @m: input divider
+ * @p: post divider
+ * @cpcon: charge pump current
+ */
+struct tegra_clk_pll_freq_table {
+ unsigned long input_rate;
+ unsigned long output_rate;
+ u16 n;
+ u16 m;
+ u8 p;
+ u8 cpcon;
+};
+
+/**
+ * struct clk_pll_params - PLL parameters
+ *
+ * @input_min: Minimum input frequency
+ * @input_max: Maximum input frequency
+ * @cf_min: Minimum comparison frequency
+ * @cf_max: Maximum comparison frequency
+ * @vco_min: Minimum VCO frequency
+ * @vco_max: Maximum VCO frequency
+ * @base_reg: PLL base reg offset
+ * @misc_reg: PLL misc reg offset
+ * @lock_reg: PLL lock reg offset
+ * @lock_bit_idx: Bit index for PLL lock status
+ * @lock_enable_bit_idx: Bit index to enable PLL lock
+ * @lock_delay: Delay in us if PLL lock is not used
+ */
+struct tegra_clk_pll_params {
+ unsigned long input_min;
+ unsigned long input_max;
+ unsigned long cf_min;
+ unsigned long cf_max;
+ unsigned long vco_min;
+ unsigned long vco_max;
+
+ u32 base_reg;
+ u32 misc_reg;
+ u32 lock_reg;
+ u32 lock_bit_idx;
+ u32 lock_enable_bit_idx;
+ int lock_delay;
+};
+
+/**
+ * struct tegra_clk_pll - Tegra PLL clock
+ *
+ * @hw: handle between common and hardware-specifix interfaces
+ * @clk_base: address of CAR controller
+ * @pmc: address of PMC, required to read override bits
+ * @freq_table: array of frequencies supported by PLL
+ * @params: PLL parameters
+ * @flags: PLL flags
+ * @fixed_rate: PLL rate if it is fixed
+ * @lock: register lock
+ * @divn_shift: shift to the feedback divider bit field
+ * @divn_width: width of the feedback divider bit field
+ * @divm_shift: shift to the input divider bit field
+ * @divm_width: width of the input divider bit field
+ * @divp_shift: shift to the post divider bit field
+ * @divp_width: width of the post divider bit field
+ *
+ * Flags:
+ * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
+ * PLL locking. If not set it will use lock_delay value to wait.
+ * TEGRA_PLL_HAS_CPCON - This flag indicates that CPCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_LFCON - This flag indicates that LFCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_DCCON - This flag indicates that DCCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLLU - PLLU has inverted post divider. This flags indicated
+ * that it is PLLU and invert post divider value.
+ * TEGRA_PLLM - PLLM has additional override settings in PMC. This
+ * flag indicates that it is PLLM and use override settings.
+ * TEGRA_PLL_FIXED - We are not supposed to change output frequency
+ * of some plls.
+ * TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
+ */
+struct tegra_clk_pll {
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ void __iomem *pmc;
+ u8 flags;
+ unsigned long fixed_rate;
+ spinlock_t *lock;
+ u8 divn_shift;
+ u8 divn_width;
+ u8 divm_shift;
+ u8 divm_width;
+ u8 divp_shift;
+ u8 divp_width;
+ struct tegra_clk_pll_freq_table *freq_table;
+ struct tegra_clk_pll_params *params;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct tegra_clk_pll, hw)
+
+#define TEGRA_PLL_USE_LOCK BIT(0)
+#define TEGRA_PLL_HAS_CPCON BIT(1)
+#define TEGRA_PLL_SET_LFCON BIT(2)
+#define TEGRA_PLL_SET_DCCON BIT(3)
+#define TEGRA_PLLU BIT(4)
+#define TEGRA_PLLM BIT(5)
+#define TEGRA_PLL_FIXED BIT(6)
+#define TEGRA_PLLE_CONFIGURE BIT(7)
+
+extern const struct clk_ops tegra_clk_pll_ops;
+extern const struct clk_ops tegra_clk_plle_ops;
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+
+/**
+ * struct tegra_clk_pll_out - PLL divider down clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the PLL divider
+ * @enb_bit_idx: bit to enable/disable PLL divider
+ * @rst_bit_idx: bit to reset PLL divider
+ * @lock: register lock
+ * @flags: hardware-specific flags
+ */
+struct tegra_clk_pll_out {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 enb_bit_idx;
+ u8 rst_bit_idx;
+ spinlock_t *lock;
+ u8 flags;
+};
+
+#define to_clk_pll_out(_hw) container_of(_hw, struct tegra_clk_pll_out, hw)
+
+extern const struct clk_ops tegra_clk_pll_out_ops;
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_div_flags,
+ spinlock_t *lock);
+
+/**
+ * struct tegra_clk_periph_regs - Registers controlling peripheral clock
+ *
+ * @enb_reg: read the enable status
+ * @enb_set_reg: write 1 to enable clock
+ * @enb_clr_reg: write 1 to disable clock
+ * @rst_reg: read the reset status
+ * @rst_set_reg: write 1 to assert the reset of peripheral
+ * @rst_clr_reg: write 1 to deassert the reset of peripheral
+ */
+struct tegra_clk_periph_regs {
+ u32 enb_reg;
+ u32 enb_set_reg;
+ u32 enb_clr_reg;
+ u32 rst_reg;
+ u32 rst_set_reg;
+ u32 rst_clr_reg;
+};
+
+/**
+ * struct tegra_clk_periph_gate - peripheral gate clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @clk_base: address of CAR controller
+ * @regs: Registers to control the peripheral
+ * @flags: hardware-specific flags
+ * @clk_num: Clock number
+ * @enable_refcnt: array to maintain reference count of the clock
+ *
+ * Flags:
+ * TEGRA_PERIPH_NO_RESET - This flag indicates that reset is not allowed
+ * for this module.
+ * TEGRA_PERIPH_MANUAL_RESET - This flag indicates not to reset module
+ * after clock enable and driver for the module is responsible for
+ * doing reset.
+ * TEGRA_PERIPH_ON_APB - If peripheral is in the APB bus then read the
+ * bus to flush the write operation in apb bus. This flag indicates
+ * that this peripheral is in apb bus.
+ */
+struct tegra_clk_periph_gate {
+ u32 magic;
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ u8 flags;
+ int clk_num;
+ int *enable_refcnt;
+ struct tegra_clk_periph_regs *regs;
+};
+
+#define to_clk_periph_gate(_hw) \
+ container_of(_hw, struct tegra_clk_periph_gate, hw)
+
+#define TEGRA_CLK_PERIPH_GATE_MAGIC 0x17760309
+
+#define TEGRA_PERIPH_NO_RESET BIT(0)
+#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
+#define TEGRA_PERIPH_ON_APB BIT(2)
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
+extern const struct clk_ops tegra_clk_periph_gate_ops;
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt);
+
+/**
+ * struct clk-periph - peripheral clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @mux: mux clock
+ * @divider: divider clock
+ * @gate: gate clock
+ * @mux_ops: mux clock ops
+ * @div_ops: divider clock ops
+ * @gate_ops: gate clock ops
+ */
+struct tegra_clk_periph {
+ u32 magic;
+ struct clk_hw hw;
+ struct clk_mux mux;
+ struct tegra_clk_frac_div divider;
+ struct tegra_clk_periph_gate gate;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *div_ops;
+ const struct clk_ops *gate_ops;
+};
+
+#define to_clk_periph(_hw) container_of(_hw, struct tegra_clk_periph, hw)
+
+#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
+
+extern const struct clk_ops tegra_clk_periph_ops;
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+
+#define TEGRA_CLK_PERIPH(_mux_shift, _mux_width, _mux_flags, \
+ _div_shift, _div_width, _div_frac_width, \
+ _div_flags, _clk_num, _enb_refcnt, _regs, \
+ _gate_flags) \
+ { \
+ .mux = { \
+ .flags = _mux_flags, \
+ .shift = _mux_shift, \
+ .width = _mux_width, \
+ }, \
+ .divider = { \
+ .flags = _div_flags, \
+ .shift = _div_shift, \
+ .width = _div_width, \
+ .frac_width = _div_frac_width, \
+ }, \
+ .gate = { \
+ .flags = _gate_flags, \
+ .clk_num = _clk_num, \
+ .enable_refcnt = _enb_refcnt, \
+ .regs = _regs, \
+ }, \
+ .mux_ops = &clk_mux_ops, \
+ .div_ops = &tegra_clk_frac_div_ops, \
+ .gate_ops = &tegra_clk_periph_gate_ops, \
+ }
+
+struct tegra_periph_init_data {
+ const char *name;
+ int clk_id;
+ const char **parent_names;
+ int num_parents;
+ struct tegra_clk_periph periph;
+ u32 offset;
+ const char *con_id;
+ const char *dev_id;
+};
+
+#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset, \
+ _mux_shift, _mux_width, _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, _div_flags, _regs, \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ { \
+ .name = _name, \
+ .clk_id = _clk_id, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_width, \
+ _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, \
+ _div_flags, _clk_num, \
+ _enb_refcnt, _regs, \
+ _gate_flags), \
+ .offset = _offset, \
+ .con_id = _con_id, \
+ .dev_id = _dev_id, \
+ }
+
+/**
+ * struct clk_super_mux - super clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling multiplexer
+ * @width: width of the multiplexer bit field
+ * @flags: hardware-specific flags
+ * @div2_index: bit controlling divide-by-2
+ * @pllx_index: PLLX index in the parent list
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
+ * that this is LP cluster clock.
+ */
+struct tegra_clk_super_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 width;
+ u8 flags;
+ u8 div2_index;
+ u8 pllx_index;
+ spinlock_t *lock;
+};
+
+#define to_clk_super_mux(_hw) container_of(_hw, struct tegra_clk_super_mux, hw)
+
+#define TEGRA_DIVIDER_2 BIT(0)
+
+extern const struct clk_ops tegra_clk_super_ops;
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock);
+
+/**
+ * struct clk_init_tabel - clock initialization table
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @parent_id: parent clock id as mentioned in device tree bindings
+ * @rate: rate to set
+ * @state: enable/disable
+ */
+struct tegra_clk_init_table {
+ unsigned int clk_id;
+ unsigned int parent_id;
+ unsigned long rate;
+ int state;
+};
+
+/**
+ * struct clk_duplicate - duplicate clocks
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @lookup: duplicate lookup entry for the clock
+ */
+struct tegra_clk_duplicate {
+ int clk_id;
+ struct clk_lookup lookup;
+};
+
+#define TEGRA_CLK_DUPLICATE(_clk_id, _dev, _con) \
+ { \
+ .clk_id = _clk_id, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ }
+
+void tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max);
+
+void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_clock_init(struct device_node *np);
+#else
+static inline void tegra20_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_2x_SOC */
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_clock_init(struct device_node *np);
+#else
+static inline void tegra30_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_3x_SOC */
+
+#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index e27c52317ffe..9f7400d74fa7 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -34,7 +34,7 @@ static int ab9540_reg_clks(struct device *dev)
return 0;
}
-static int __devinit abx500_clk_probe(struct platform_device *pdev)
+static int abx500_clk_probe(struct platform_device *pdev)
{
struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
int ret;
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index dcb6ae0a0425..256c8be74df8 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -144,3 +144,4 @@ error:
vexpress_config_func_put(osc->func);
kfree(osc);
}
+CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index c742ac7c60bb..82b45aad8ccf 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -11,6 +11,7 @@
* Copyright (C) 2012 ARM Limited
*/
+#include <linux/amba/sp810.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
@@ -18,8 +19,6 @@
#include <linux/of_address.h>
#include <linux/vexpress.h>
-#include <asm/hardware/sp810.h>
-
static struct clk *vexpress_sp810_timerclken[4];
static DEFINE_SPINLOCK(vexpress_sp810_lock);
@@ -99,19 +98,13 @@ struct clk *vexpress_sp810_of_get(struct of_phandle_args *clkspec, void *data)
return vexpress_sp810_timerclken[clkspec->args[0]];
}
-static const __initconst struct of_device_id vexpress_fixed_clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "arm,vexpress-osc", .data = vexpress_osc_of_setup, },
- {}
-};
-
void __init vexpress_clk_of_init(void)
{
struct device_node *node;
struct clk *clk;
struct clk *refclk, *timclk;
- of_clk_init(vexpress_fixed_clk_match);
+ of_clk_init(NULL);
node = of_find_compatible_node(NULL, NULL, "arm,sp810");
vexpress_sp810_init(of_iomap(node, 0));
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
new file mode 100644
index 000000000000..f9ba4fab0ddc
--- /dev/null
+++ b/drivers/clk/x86/Makefile
@@ -0,0 +1,2 @@
+clk-x86-lpss-objs := clk-lpss.o clk-lpt.o
+obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
new file mode 100644
index 000000000000..b5e229f3c3d9
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.c
@@ -0,0 +1,99 @@
+/*
+ * Intel Low Power Subsystem clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
+{
+ struct resource r;
+ return !acpi_dev_resource_memory(res, &r);
+}
+
+static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
+ void *data, void **retval)
+{
+ struct resource_list_entry *rentry;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ const char *uid = data;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (uid) {
+ if (!adev->pnp.unique_id)
+ return AE_OK;
+ if (strcmp(uid, adev->pnp.unique_id))
+ return AE_OK;
+ }
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ clk_lpss_is_mmio_resource, NULL);
+ if (ret < 0)
+ return AE_NO_MEMORY;
+
+ list_for_each_entry(rentry, &resource_list, node)
+ if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+ *(struct resource *)retval = rentry->res;
+ break;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+ return AE_OK;
+}
+
+/**
+ * clk_register_lpss_gate - register LPSS clock gate
+ * @name: name of this clock gate
+ * @parent_name: parent clock name
+ * @hid: ACPI _HID of the device
+ * @uid: ACPI _UID of the device (optional)
+ * @offset: LPSS PRV_CLOCK_PARAMS offset
+ *
+ * Creates and registers LPSS clock gate.
+ */
+struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
+ const char *hid, const char *uid,
+ unsigned offset)
+{
+ struct resource res = { };
+ void __iomem *mmio_base;
+ acpi_status status;
+ struct clk *clk;
+
+ /*
+ * First try to look the device and its mmio resource from the
+ * ACPI namespace.
+ */
+ status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
+ (void **)&res);
+ if (ACPI_FAILURE(status) || !res.start)
+ return ERR_PTR(-ENODEV);
+
+ mmio_base = ioremap(res.start, resource_size(&res));
+ if (!mmio_base)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
+ 0, 0, NULL);
+ if (IS_ERR(clk))
+ iounmap(mmio_base);
+
+ return clk;
+}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
new file mode 100644
index 000000000000..e9460f442297
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.h
@@ -0,0 +1,36 @@
+/*
+ * Intel Low Power Subsystem clock.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CLK_LPSS_H
+#define __CLK_LPSS_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+
+#ifdef CONFIG_ACPI
+extern struct clk *clk_register_lpss_gate(const char *name,
+ const char *parent_name,
+ const char *hid, const char *uid,
+ unsigned offset);
+#else
+static inline struct clk *clk_register_lpss_gate(const char *name,
+ const char *parent_name,
+ const char *hid,
+ const char *uid,
+ unsigned offset)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
new file mode 100644
index 000000000000..81298aeef7e3
--- /dev/null
+++ b/drivers/clk/x86/clk-lpt.c
@@ -0,0 +1,86 @@
+/*
+ * Intel Lynxpoint LPSS clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "clk-lpss.h"
+
+#define PRV_CLOCK_PARAMS 0x800
+
+static int lpt_clk_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /* LPSS free running clock */
+ clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
+ 100000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* Shared DMA clock */
+ clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
+
+ /* SPI clocks */
+ clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C0:00");
+
+ clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C1:00");
+
+ /* I2C clocks */
+ clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C2:00");
+
+ clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C3:00");
+
+ /* UART clocks */
+ clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C4:00");
+
+ clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C5:00");
+
+ return 0;
+}
+
+static struct platform_driver lpt_clk_driver = {
+ .driver = {
+ .name = "clk-lpt",
+ .owner = THIS_MODULE,
+ },
+ .probe = lpt_clk_probe,
+};
+
+static int __init lpt_clk_init(void)
+{
+ return platform_driver_register(&lpt_clk_driver);
+}
+arch_initcall(lpt_clk_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 7fdcbd3f4da5..e920cbe519fa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,6 @@
+config CLKSRC_OF
+ bool
+
config CLKSRC_I8253
bool
@@ -25,6 +28,9 @@ config ARMADA_370_XP_TIMER
config SUNXI_TIMER
bool
+config VT8500_TIMER
+ bool
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -54,7 +60,5 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
-config CLKSRC_ARM_GENERIC
- def_bool y if ARM64
- help
- This option enables support for the ARM generic timer.
+config ARM_ARCH_TIMER
+ bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f93453d01673..7d671b85a98e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
@@ -16,5 +17,7 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
+obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 5d1b9268bcaf..6efe4d1ab3aa 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -73,7 +73,7 @@ static struct clocksource clocksource_acpi_pm = {
#ifdef CONFIG_PCI
-static int __devinitdata acpi_pm_good;
+static int acpi_pm_good;
static int __init acpi_pm_good_setup(char *__str)
{
acpi_pm_good = 1;
@@ -102,7 +102,7 @@ static inline void acpi_pm_need_workaround(void)
* incorrect when read). As a result, the ACPI free running count up
* timer specification is violated due to erroneous reads.
*/
-static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
+static void acpi_pm_check_blacklist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
@@ -120,7 +120,7 @@ static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
acpi_pm_check_blacklist);
-static void __devinit acpi_pm_check_graylist(struct pci_dev *dev)
+static void acpi_pm_check_graylist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
new file mode 100644
index 000000000000..d7ad425ab9b3
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -0,0 +1,391 @@
+/*
+ * linux/drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+#include <asm/arch_timer.h>
+#include <asm/virt.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+static u32 arch_timer_rate;
+
+enum ppi_nr {
+ PHYS_SECURE_PPI,
+ PHYS_NONSECURE_PPI,
+ VIRT_PPI,
+ HYP_PPI,
+ MAX_TIMER_PPI
+};
+
+static int arch_timer_ppi[MAX_TIMER_PPI];
+
+static struct clock_event_device __percpu *arch_timer_evt;
+
+static bool arch_timer_use_virtual = true;
+
+/*
+ * Architected system timer support.
+ */
+
+static inline irqreturn_t timer_handler(const int access,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
+}
+
+static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
+}
+
+static inline void timer_set_mode(const int access, int mode)
+{
+ unsigned long ctrl;
+ switch (mode) {
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void arch_timer_set_mode_virt(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
+}
+
+static void arch_timer_set_mode_phys(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
+}
+
+static inline void set_next_event(const int access, unsigned long evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+}
+
+static int arch_timer_set_next_event_virt(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
+ return 0;
+}
+
+static int arch_timer_set_next_event_phys(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
+ return 0;
+}
+
+static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
+{
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ if (arch_timer_use_virtual) {
+ clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->set_mode = arch_timer_set_mode_virt;
+ clk->set_next_event = arch_timer_set_next_event_virt;
+ } else {
+ clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ clk->set_mode = arch_timer_set_mode_phys;
+ clk->set_next_event = arch_timer_set_next_event_phys;
+ }
+
+ clk->cpumask = cpumask_of(smp_processor_id());
+
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
+
+ clockevents_config_and_register(clk, arch_timer_rate,
+ 0xf, 0x7fffffff);
+
+ if (arch_timer_use_virtual)
+ enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
+ else {
+ enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+ }
+
+ arch_counter_set_user_access();
+
+ return 0;
+}
+
+static int arch_timer_available(void)
+{
+ u32 freq;
+
+ if (arch_timer_rate == 0) {
+ freq = arch_timer_get_cntfrq();
+
+ /* Check the timer frequency. */
+ if (freq == 0) {
+ pr_warn("Architected timer frequency not available\n");
+ return -EINVAL;
+ }
+
+ arch_timer_rate = freq;
+ }
+
+ pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
+ (unsigned long)arch_timer_rate / 1000000,
+ (unsigned long)(arch_timer_rate / 10000) % 100,
+ arch_timer_use_virtual ? "virt" : "phys");
+ return 0;
+}
+
+u32 arch_timer_get_rate(void)
+{
+ return arch_timer_rate;
+}
+
+/*
+ * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
+ * call it before it has been initialised. Rather than incur a performance
+ * penalty checking for initialisation, provide a default implementation that
+ * won't lead to time appearing to jump backwards.
+ */
+static u64 arch_timer_read_zero(void)
+{
+ return 0;
+}
+
+u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+ return arch_timer_read_counter();
+}
+
+static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+{
+ return arch_timer_read_counter();
+}
+
+static struct clocksource clocksource_counter = {
+ .name = "arch_sys_counter",
+ .rating = 400,
+ .read = arch_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct cyclecounter cyclecounter = {
+ .read = arch_counter_read_cc,
+ .mask = CLOCKSOURCE_MASK(56),
+};
+
+static struct timecounter timecounter;
+
+struct timecounter *arch_timer_get_timecounter(void)
+{
+ return &timecounter;
+}
+
+static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
+{
+ pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
+ clk->irq, smp_processor_id());
+
+ if (arch_timer_use_virtual)
+ disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
+ else {
+ disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+}
+
+static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ arch_timer_setup(evt);
+ break;
+ case CPU_DYING:
+ arch_timer_stop(evt);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
+ .notifier_call = arch_timer_cpu_notify,
+};
+
+static int __init arch_timer_register(void)
+{
+ int err;
+ int ppi;
+
+ err = arch_timer_available();
+ if (err)
+ goto out;
+
+ arch_timer_evt = alloc_percpu(struct clock_event_device);
+ if (!arch_timer_evt) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+ cyclecounter.mult = clocksource_counter.mult;
+ cyclecounter.shift = clocksource_counter.shift;
+ timecounter_init(&timecounter, &cyclecounter,
+ arch_counter_get_cntpct());
+
+ if (arch_timer_use_virtual) {
+ ppi = arch_timer_ppi[VIRT_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_virt,
+ "arch_timer", arch_timer_evt);
+ } else {
+ ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (err)
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ }
+ }
+
+ if (err) {
+ pr_err("arch_timer: can't register interrupt %d (%d)\n",
+ ppi, err);
+ goto out_free;
+ }
+
+ err = register_cpu_notifier(&arch_timer_cpu_nb);
+ if (err)
+ goto out_free_irq;
+
+ /* Immediately configure the timer on the boot CPU */
+ arch_timer_setup(this_cpu_ptr(arch_timer_evt));
+
+ return 0;
+
+out_free_irq:
+ if (arch_timer_use_virtual)
+ free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
+ else {
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
+ arch_timer_evt);
+ }
+
+out_free:
+ free_percpu(arch_timer_evt);
+out:
+ return err;
+}
+
+static const struct of_device_id arch_timer_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer", },
+ { .compatible = "arm,armv8-timer", },
+ {},
+};
+
+int __init arch_timer_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+ int i;
+
+ np = of_find_matching_node(NULL, arch_timer_of_match);
+ if (!np) {
+ pr_err("arch_timer: can't find DT node\n");
+ return -ENODEV;
+ }
+
+ /* Try to determine the frequency from the device tree or CNTFRQ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
+
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ of_node_put(np);
+
+ /*
+ * If HYP mode is available, we know that the physical timer
+ * has been configured to be accessible from PL1. Use it, so
+ * that a guest can use the virtual timer instead.
+ *
+ * If no interrupt provided for virtual timer, we'll have to
+ * stick to the physical timer. It'd better be accessible...
+ */
+ if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
+ arch_timer_use_virtual = false;
+
+ if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ pr_warn("arch_timer: No interrupt available, giving up\n");
+ return -EINVAL;
+ }
+ }
+
+ if (arch_timer_use_virtual)
+ arch_timer_read_counter = arch_counter_get_cntvct;
+ else
+ arch_timer_read_counter = arch_counter_get_cntpct;
+
+ return arch_timer_register();
+}
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
deleted file mode 100644
index 8ae1a61523ff..000000000000
--- a/drivers/clocksource/arm_generic.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Generic timers support
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-
-#include <clocksource/arm_generic.h>
-
-#include <asm/arm_generic.h>
-
-static u32 arch_timer_rate;
-static u64 sched_clock_mult __read_mostly;
-static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt);
-static int arch_timer_ppi;
-
-static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- if (ctrl & ARCH_TIMER_CTRL_ISTATUS) {
- ctrl |= ARCH_TIMER_CTRL_IMASK;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- evt->event_handler(evt);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void arch_timer_stop(void)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-}
-
-static void arch_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_stop();
- break;
- default:
- break;
- }
-}
-
-static int arch_timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IMASK;
-
- arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-
- return 0;
-}
-
-static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
-{
- /* Let's make sure the timer is off before doing anything else */
- arch_timer_stop();
-
- clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 400;
- clk->set_mode = arch_timer_set_mode;
- clk->set_next_event = arch_timer_set_next_event;
- clk->irq = arch_timer_ppi;
- clk->cpumask = cpumask_of(smp_processor_id());
-
- clockevents_config_and_register(clk, arch_timer_rate,
- 0xf, 0x7fffffff);
-
- enable_percpu_irq(clk->irq, 0);
-
- /* Ensure the virtual counter is visible to userspace for the vDSO. */
- arch_counter_enable_user_access();
-}
-
-static void __init arch_timer_calibrate(void)
-{
- if (arch_timer_rate == 0) {
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
- arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
-
- /* Check the timer frequency. */
- if (arch_timer_rate == 0)
- panic("Architected timer frequency is set to zero.\n"
- "You must set this in your .dts file\n");
- }
-
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
-
- sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
-
- pr_info("Architected local timer running at %u.%02uMHz.\n",
- arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
-}
-
-static cycle_t arch_counter_read(struct clocksource *cs)
-{
- return arch_counter_get_cntpct();
-}
-
-static struct clocksource clocksource_counter = {
- .name = "arch_sys_counter",
- .rating = 400,
- .read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
- .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
-};
-
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_counter_get_cntpct();
- return 0;
-}
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_counter_get_cntvct() * sched_clock_mult;
-}
-
-static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
-
- switch(action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- arch_timer_setup(clk);
- break;
-
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
- clk->irq, cpu);
- disable_percpu_irq(clk->irq);
- arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv8-timer" },
- {},
-};
-
-int __init arm_generic_timer_init(void)
-{
- struct device_node *np;
- int err;
- u32 freq;
-
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
- }
-
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
- arch_timer_calibrate();
-
- arch_timer_ppi = irq_of_parse_and_map(np, 0);
- pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi);
-
- err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq,
- np->name, &arch_timer_evt);
- if (err) {
- pr_err("arch_timer: can't register interrupt %d (%d)\n",
- arch_timer_ppi, err);
- return err;
- }
-
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
-
- /* Calibrate the delay loop directly */
- lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
-
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
-
- register_cpu_notifier(&arch_timer_cpu_nb);
-
- return 0;
-}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index bc19f12c20ce..50c68fef944b 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/bcm2835_timer.h>
#include <linux/bitops.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
@@ -101,7 +100,7 @@ static struct of_device_id bcm2835_time_match[] __initconst = {
{}
};
-static void __init bcm2835_time_init(void)
+static void __init bcm2835_timer_init(void)
{
struct device_node *node;
void __iomem *base;
@@ -155,7 +154,5 @@ static void __init bcm2835_time_init(void)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
}
-
-struct sys_timer bcm2835_timer = {
- .init = bcm2835_time_init,
-};
+CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
+ bcm2835_timer_init);
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
new file mode 100644
index 000000000000..bdabdaa8d00f
--- /dev/null
+++ b/drivers/clocksource/clksrc-of.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+extern struct of_device_id __clksrc_of_table[];
+
+static const struct of_device_id __clksrc_of_table_sentinel
+ __used __section(__clksrc_of_table_end);
+
+void __init clocksource_of_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ void (*init_func)(void);
+
+ for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+ init_func = match->data;
+ init_func();
+ }
+}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index d9279385304d..ea210482dd20 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -100,7 +100,6 @@ static struct clock_event_device cs5535_clockevent = {
.set_mode = mfgpt_set_mode,
.set_next_event = mfgpt_next_event,
.rating = 250,
- .shift = 32
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
@@ -169,17 +168,11 @@ static int __init cs5535_mfgpt_init(void)
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
- cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
- cs5535_clockevent.shift);
- cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
- &cs5535_clockevent);
- cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
- &cs5535_clockevent);
-
printk(KERN_INFO DRV_NAME
": Registering MFGPT timer as a clock event, using IRQ %d\n",
timer_irq);
- clockevents_register_device(&cs5535_clockevent);
+ clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ,
+ 0xF, 0xFFFE);
return 0;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index f7dba5b79b44..ab09ed3742ee 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -107,7 +107,7 @@ static const struct of_device_id osctimer_ids[] __initconst = {
{},
};
-static void __init timer_init(void)
+void __init dw_apb_timer_init(void)
{
struct device_node *event_timer, *source_timer;
@@ -125,7 +125,3 @@ static void __init timer_init(void)
init_sched_clock();
}
-
-struct sys_timer dw_apb_timer = {
- .init = timer_init,
-};
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 372051d1bba8..e6a553cb73e8 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -311,7 +311,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
clockevents_config_and_register(ced, 1, 2, 0xffffffff);
}
-static int __devinit em_sti_probe(struct platform_device *pdev)
+static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
struct resource *res;
@@ -379,12 +379,12 @@ err0:
return ret;
}
-static int __devexit em_sti_remove(struct platform_device *pdev)
+static int em_sti_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
-static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+static const struct of_device_id em_sti_dt_ids[] = {
{ .compatible = "renesas,em-sti", },
{},
};
@@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
static struct platform_driver em_sti_device_driver = {
.probe = em_sti_probe,
- .remove = __devexit_p(em_sti_remove),
+ .remove = em_sti_remove,
.driver = {
.name = "em_sti",
.of_match_table = em_sti_dt_ids,
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 8914c3c1c88b..435e54d55bbd 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -15,6 +15,7 @@
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <asm/mach/time.h>
@@ -64,6 +65,7 @@ static void __iomem *mtu_base;
static bool clkevt_periodic;
static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
+static struct delay_timer mtu_delay_timer;
#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
/*
@@ -80,6 +82,11 @@ static u32 notrace nomadik_read_sched_clock(void)
}
#endif
+static unsigned long nmdk_timer_read_current_timer(void)
+{
+ return ~readl_relaxed(mtu_base + MTU_VAL(0));
+}
+
/* Clockevent device: use one-shot mode */
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
{
@@ -134,12 +141,32 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
}
}
+void nmdk_clksrc_reset(void)
+{
+ /* Disable */
+ writel(0, mtu_base + MTU_CR(0));
+
+ /* ClockSource: configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(0));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
+
+ writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(0));
+}
+
+static void nmdk_clkevt_resume(struct clock_event_device *cedev)
+{
+ nmdk_clkevt_reset();
+ nmdk_clksrc_reset();
+}
+
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
+ .resume = nmdk_clkevt_resume,
};
/*
@@ -161,19 +188,6 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-void nmdk_clksrc_reset(void)
-{
- /* Disable */
- writel(0, mtu_base + MTU_CR(0));
-
- /* ClockSource: configure load and background-load, and fire it up */
- writel(nmdk_cycle, mtu_base + MTU_LR(0));
- writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
-
- writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
- mtu_base + MTU_CR(0));
-}
-
void __init nmdk_timer_init(void __iomem *base, int irq)
{
unsigned long rate;
@@ -227,4 +241,8 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+
+ mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
+ mtu_delay_timer.freq = rate;
+ register_current_timer_delay(&mtu_delay_timer);
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a5f7829f2799..488c14cc8dbf 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -726,7 +726,7 @@ err0:
return ret;
}
-static int __devinit sh_cmt_probe(struct platform_device *pdev)
+static int sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -767,14 +767,14 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_cmt_remove(struct platform_device *pdev)
+static int sh_cmt_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
- .remove = __devexit_p(sh_cmt_remove),
+ .remove = sh_cmt_remove,
.driver = {
.name = "sh_cmt",
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index c5eea858054a..83943e27cfac 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -321,7 +321,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
return ret;
}
-static int __devinit sh_mtu2_probe(struct platform_device *pdev)
+static int sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -362,14 +362,14 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_mtu2_remove(struct platform_device *pdev)
+static int sh_mtu2_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent */
}
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
- .remove = __devexit_p(sh_mtu2_remove),
+ .remove = sh_mtu2_remove,
.driver = {
.name = "sh_mtu2",
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0cc4add88279..b4502edce2a1 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -484,7 +484,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
return ret;
}
-static int __devinit sh_tmu_probe(struct platform_device *pdev)
+static int sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -525,14 +525,14 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_tmu_remove(struct platform_device *pdev)
+static int sh_tmu_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
- .remove = __devexit_p(sh_tmu_remove),
+ .remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
}
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
index 3cd1bd3d7aee..4086b9167159 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sunxi_timer.c
@@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sunxi_timer.h>
-#include <linux/clk/sunxi.h>
+#include <linux/clk-provider.h>
#define TIMER_CTL_REG 0x00
#define TIMER_CTL_ENABLE (1 << 0)
@@ -74,7 +74,6 @@ static int sunxi_clkevt_next_event(unsigned long evt,
static struct clock_event_device sunxi_clockevent = {
.name = "sunxi_tick",
- .shift = 32,
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sunxi_clkevt_mode,
@@ -104,7 +103,7 @@ static struct of_device_id sunxi_timer_dt_ids[] = {
{ }
};
-static void __init sunxi_timer_init(void)
+void __init sunxi_timer_init(void)
{
struct device_node *node;
unsigned long rate = 0;
@@ -124,7 +123,7 @@ static void __init sunxi_timer_init(void)
if (irq <= 0)
panic("Can't parse IRQ");
- sunxi_init_clocks();
+ of_clk_init(NULL);
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
@@ -154,18 +153,8 @@ static void __init sunxi_timer_init(void)
val = readl(timer_base + TIMER_CTL_REG);
writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);
- sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL,
- NSEC_PER_SEC,
- sunxi_clockevent.shift);
- sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0xff,
- &sunxi_clockevent);
- sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x1,
- &sunxi_clockevent);
sunxi_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&sunxi_clockevent);
+ clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL,
+ 0x1, 0xff);
}
-
-struct sys_timer sunxi_timer = {
- .init = sunxi_timer_init,
-};
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 32cb929b8eb6..8a6187225dd0 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -157,7 +157,6 @@ static struct tc_clkevt_device clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
@@ -196,13 +195,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
timer_clock = clk32k_divisor_idx;
- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
- clkevt.clkevt.max_delta_ns
- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
- clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
clkevt.clkevt.cpumask = cpumask_of(0);
- clockevents_register_device(&clkevt.clkevt);
+ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
setup_irq(irq, &tc_irqaction);
}
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
new file mode 100644
index 000000000000..0bde03feb095
--- /dev/null
+++ b/drivers/clocksource/tegra20_timer.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/time.h>
+#include <asm/smp_twd.h>
+#include <asm/sched_clock.h>
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+#define TIMER1_BASE 0x0
+#define TIMER2_BASE 0x8
+#define TIMER3_BASE 0x50
+#define TIMER4_BASE 0x58
+
+#define TIMER_PTV 0x0
+#define TIMER_PCR 0x4
+
+static void __iomem *timer_reg_base;
+static void __iomem *rtc_base;
+
+static struct timespec persistent_ts;
+static u64 persistent_ms, last_persistent_ms;
+
+#define timer_writel(value, reg) \
+ __raw_writel(value, timer_reg_base + (reg))
+#define timer_readl(reg) \
+ __raw_readl(timer_reg_base + (reg))
+
+static int tegra_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+
+ return 0;
+}
+
+static void tegra_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ timer_writel(0, TIMER3_BASE + TIMER_PTV);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ reg = 0xC0000000 | ((1000000/HZ)-1);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device tegra_clockevent = {
+ .name = "timer0",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_mode = tegra_timer_set_mode,
+};
+
+static u32 notrace tegra_read_sched_clock(void)
+{
+ return timer_readl(TIMERUS_CNTR_1US);
+}
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static u64 tegra_rtc_read_ms(void)
+{
+ u32 ms = readl(rtc_base + RTC_MILLISECONDS);
+ u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+/*
+ * tegra_read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static void tegra_read_persistent_clock(struct timespec *ts)
+{
+ u64 delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_persistent_ms = persistent_ms;
+ persistent_ms = tegra_rtc_read_ms();
+ delta = persistent_ms - last_persistent_ms;
+
+ timespec_add_ns(tsp, delta * NSEC_PER_MSEC);
+ *ts = *tsp;
+}
+
+static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction tegra_timer_irq = {
+ .name = "timer0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .handler = tegra_timer_interrupt,
+ .dev_id = &tegra_clockevent,
+};
+
+static const struct of_device_id timer_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-timer" },
+ {}
+};
+
+static const struct of_device_id rtc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-rtc" },
+ {}
+};
+
+static void __init tegra20_init_timer(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+ unsigned long rate;
+ int ret;
+
+ np = of_find_matching_node(NULL, timer_match);
+ if (!np) {
+ pr_err("Failed to find timer DT node\n");
+ BUG();
+ }
+
+ timer_reg_base = of_iomap(np, 0);
+ if (!timer_reg_base) {
+ pr_err("Can't map timer registers\n");
+ BUG();
+ }
+
+ tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
+ if (tegra_timer_irq.irq <= 0) {
+ pr_err("Failed to map timer IRQ\n");
+ BUG();
+ }
+
+ clk = clk_get_sys("timer", NULL);
+ if (IS_ERR(clk)) {
+ pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
+ rate = 12000000;
+ } else {
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+ }
+
+ of_node_put(np);
+
+ np = of_find_matching_node(NULL, rtc_match);
+ if (!np) {
+ pr_err("Failed to find RTC DT node\n");
+ BUG();
+ }
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base) {
+ pr_err("Can't map RTC registers");
+ BUG();
+ }
+
+ /*
+ * rtc registers are used by read_persistent_clock, keep the rtc clock
+ * enabled
+ */
+ clk = clk_get_sys("rtc-tegra", NULL);
+ if (IS_ERR(clk))
+ pr_warn("Unable to get rtc-tegra clock\n");
+ else
+ clk_prepare_enable(clk);
+
+ of_node_put(np);
+
+ switch (rate) {
+ case 12000000:
+ timer_writel(0x000b, TIMERUS_USEC_CFG);
+ break;
+ case 13000000:
+ timer_writel(0x000c, TIMERUS_USEC_CFG);
+ break;
+ case 19200000:
+ timer_writel(0x045f, TIMERUS_USEC_CFG);
+ break;
+ case 26000000:
+ timer_writel(0x0019, TIMERUS_USEC_CFG);
+ break;
+ default:
+ WARN(1, "Unknown clock rate");
+ }
+
+ setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+
+ if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ pr_err("Failed to register clocksource\n");
+ BUG();
+ }
+
+ ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
+ if (ret) {
+ pr_err("Failed to register timer IRQ: %d\n", ret);
+ BUG();
+ }
+
+ tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.irq = tegra_timer_irq.irq;
+ clockevents_config_and_register(&tegra_clockevent, 1000000,
+ 0x1, 0x1fffffff);
+#ifdef CONFIG_HAVE_ARM_TWD
+ twd_local_timer_of_register();
+#endif
+ register_persistent_clock(NULL, tegra_read_persistent_clock);
+}
+CLOCKSOURCE_OF_DECLARE(tegra20, "nvidia,tegra20-timer", tegra20_init_timer);
+
+#ifdef CONFIG_PM
+static u32 usec_config;
+
+void tegra_timer_suspend(void)
+{
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+}
+
+void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+}
+#endif
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4674f94957cd..a4605fd7e303 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -167,7 +168,6 @@ void __init armada_370_xp_timer_init(void)
u32 u;
struct device_node *np;
unsigned int timer_clk;
- int ret;
np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
@@ -179,13 +179,14 @@ void __init armada_370_xp_timer_init(void)
timer_base + TIMER_CTRL_OFF);
timer_clk = 25000000;
} else {
- u32 clk = 0;
- ret = of_property_read_u32(np, "clock-frequency", &clk);
- WARN_ON(!clk || ret < 0);
+ unsigned long rate = 0;
+ struct clk *clk = of_clk_get(np, 0);
+ WARN_ON(IS_ERR(clk));
+ rate = clk_get_rate(clk);
u = readl(timer_base + TIMER_CTRL_OFF);
writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
timer_base + TIMER_CTRL_OFF);
- timer_clk = clk / TIMER_DIVIDER;
+ timer_clk = rate / TIMER_DIVIDER;
}
/* We use timer 0 as clocksource, and timer 1 for
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
new file mode 100644
index 000000000000..8efc86b5b5dd
--- /dev/null
+++ b/drivers/clocksource/vt8500_timer.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-vt8500/timer.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file is copied and modified from the original timer.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <asm/mach/time.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define VT8500_TIMER_OFFSET 0x0100
+#define VT8500_TIMER_HZ 3000000
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0010
+#define TIMER_STATUS_VAL 0x0014
+#define TIMER_IER_VAL 0x001c /* interrupt enable */
+#define TIMER_CTRL_VAL 0x0020
+#define TIMER_AS_VAL 0x0024 /* access status */
+#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */
+#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */
+#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+static void __iomem *regbase;
+
+static cycle_t vt8500_timer_read(struct clocksource *cs)
+{
+ int loops = msecs_to_loops(10);
+ writel(3, regbase + TIMER_CTRL_VAL);
+ while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE)
+ && --loops)
+ cpu_relax();
+ return readl(regbase + TIMER_COUNT_VAL);
+}
+
+static struct clocksource clocksource = {
+ .name = "vt8500_timer",
+ .rating = 200,
+ .read = vt8500_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int vt8500_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ int loops = msecs_to_loops(10);
+ cycle_t alarm = clocksource.read(&clocksource) + cycles;
+ while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
+ && --loops)
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+ if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+
+ return 0;
+}
+
+static void vt8500_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(regbase + TIMER_CTRL_VAL) | 1,
+ regbase + TIMER_CTRL_VAL);
+ writel(0, regbase + TIMER_IER_VAL);
+ break;
+ }
+}
+
+static struct clock_event_device clockevent = {
+ .name = "vt8500_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = vt8500_timer_set_next_event,
+ .set_mode = vt8500_timer_set_mode,
+};
+
+static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq = {
+ .name = "vt8500_timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = vt8500_timer_interrupt,
+ .dev_id = &clockevent,
+};
+
+static struct of_device_id vt8500_timer_ids[] = {
+ { .compatible = "via,vt8500-timer" },
+ { }
+};
+
+static void __init vt8500_timer_init(void)
+{
+ struct device_node *np;
+ int timer_irq;
+
+ np = of_find_matching_node(NULL, vt8500_timer_ids);
+ if (!np) {
+ pr_err("%s: Timer description missing from Device Tree\n",
+ __func__);
+ return;
+ }
+ regbase = of_iomap(np, 0);
+ if (!regbase) {
+ pr_err("%s: Missing iobase description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+ timer_irq = irq_of_parse_and_map(np, 0);
+ if (!timer_irq) {
+ pr_err("%s: Missing irq description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+
+ writel(1, regbase + TIMER_CTRL_VAL);
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ writel(~0, regbase + TIMER_MATCH_VAL);
+
+ if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
+ __func__, clocksource.name);
+
+ clockevent.cpumask = cpumask_of(0);
+
+ if (setup_irq(timer_irq, &irq))
+ pr_err("%s: setup_irq failed for %s\n", __func__,
+ clockevent.name);
+ clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+ 4, 0xf0000000);
+}
+
+CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 965b7811e04f..f1b7e244bfc1 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -256,7 +256,7 @@ static struct cn_dev cdev = {
.input = cn_rx_skb,
};
-static int __devinit cn_init(void)
+static int cn_init(void)
{
struct cn_dev *dev = &cdev;
struct netlink_kernel_cfg cfg = {
@@ -276,18 +276,18 @@ static int __devinit cn_init(void)
cn_already_initialized = 1;
- proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
+ proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
return 0;
}
-static void __devexit cn_fini(void)
+static void cn_fini(void)
{
struct cn_dev *dev = &cdev;
cn_already_initialized = 0;
- proc_net_remove(&init_net, "connector");
+ remove_proc_entry("connector", init_net.proc_net);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index ea512f47b789..cbcb21e32771 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -20,6 +20,9 @@ if CPU_FREQ
config CPU_FREQ_TABLE
tristate
+config CPU_FREQ_GOV_COMMON
+ bool
+
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
select CPU_FREQ_TABLE
@@ -141,6 +144,7 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
select CPU_FREQ_TABLE
+ select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
@@ -159,6 +163,7 @@ config CPU_FREQ_GOV_ONDEMAND
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
+ select CPU_FREQ_GOV_COMMON
help
'conservative' - this driver is rather similar to the 'ondemand'
governor both in its source code and its purpose, the difference is
@@ -180,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config GENERIC_CPUFREQ_CPU0
- bool "Generic CPU0 cpufreq driver"
+ tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
select CPU_FREQ_TABLE
help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0b3661d90b0..030ddf6dd3f1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,8 +21,8 @@ config ARM_S3C2416_CPUFREQ
If in doubt, say N.
config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core (EXPERIMENTAL)"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR && EXPERIMENTAL
+ bool "Allow voltage scaling for S3C2416 arm core"
+ depends on ARM_S3C2416_CPUFREQ && REGULATOR
help
Enable CPU voltage scaling when entering the dvs mode.
It uses information gathered through existing hardware and
@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool ARCH_KIRKWOOD && OF
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6Q cpufreq support"
+ depends on SOC_IMX6Q
+ depends on REGULATOR_ANATOP
+ help
+ This adds cpufreq driver support for Freescale i.MX6Q SOC.
+
+ If in doubt, say N.
+
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK
+ select CPU_FREQ_TABLE
+ select GENERIC_CPUFREQ_CPU0
+ select PM_OPP
+ select REGULATOR
+
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 934854ae5eb4..d7dc0ed6adb0 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -2,6 +2,19 @@
# x86 CPU Frequency scaling drivers
#
+config X86_INTEL_PSTATE
+ bool "Intel P state control"
+ depends on X86
+ help
+ This driver provides a P state for Intel core processors.
+ The driver implements an internal governor and will become
+ the scaling driver and governor for Sandy bridge processors.
+
+ When this driver is enabled it will become the perferred
+ scaling driver for Sandy bridge processors.
+
+ If in doubt, say N.
+
config X86_PCC_CPUFREQ
tristate "Processor Clocking Control interface driver"
depends on ACPI && ACPI_PROCESSOR
@@ -106,7 +119,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
select CPU_FREQ_TABLE
- depends on ACPI && ACPI_PROCESSOR
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
@@ -174,7 +187,7 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin)
@@ -206,7 +219,7 @@ config X86_P4_CLOCKMOD
config X86_CPUFREQ_NFORCE2
tristate "nVidia nForce2 FSB changing"
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for FSB changing on nVidia nForce2
platforms.
@@ -242,7 +255,7 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
does not have any safeguards to prevent operating the CPU out of spec
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 1f254ec087c1..863fd1865d45 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -7,8 +7,9 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
-obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o
-obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
@@ -18,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
-# K8 systems. ACPI is preferred to all other hardware-specific drivers.
+# K8 systems. This is still the case but acpi-cpufreq errors out so that
+# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
@@ -38,10 +40,11 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
@@ -49,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
-obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0d048f6a2b23..937bc286591f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
- if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
+ if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
}
@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0xf) {
+ pr_debug("AMD K8 systems must use native drivers.\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
@@ -1030,4 +1036,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+ X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 52bf36d599f5..4e5b7fb8927c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,12 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
if (cpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
return PTR_ERR(opp);
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
@@ -143,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
@@ -174,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
-static int cpu0_cpufreq_driver_init(void)
+static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int ret;
- np = of_find_node_by_path("/cpus/cpu@0");
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+ }
+
if (!np) {
pr_err("failed to find cpu0 node\n");
return -ENOENT;
}
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev) {
- pr_err("failed to get cpu0 device\n");
- ret = -ENODEV;
- goto out_put_node;
- }
-
+ cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
- cpu_clk = clk_get(cpu_dev, NULL);
+ cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
- cpu_reg = regulator_get(cpu_dev, "cpu0");
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
pr_warn("failed to get cpu0 regulator\n");
cpu_reg = NULL;
@@ -236,12 +236,14 @@ static int cpu0_cpufreq_driver_init(void)
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
+ rcu_read_lock();
opp = opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = opp_get_voltage(opp);
opp = opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = opp_get_voltage(opp);
+ rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
@@ -262,7 +264,24 @@ out_put_node:
of_node_put(np);
return ret;
}
-late_initcall(cpu0_cpufreq_driver_init);
+
+static int cpu0_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpu0_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver cpu0_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-cpu0",
+ .owner = THIS_MODULE,
+ },
+ .probe = cpu0_cpufreq_probe,
+ .remove = cpu0_cpufreq_remove,
+};
+module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..b02824d092e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* mode before doing so.
*
* Additional rules:
- * - All holders of the lock should check to make sure that the CPU they
- * are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode \
-(int cpu) \
+static int lock_policy_rwsem_##mode(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- if (unlikely(!cpu_online(cpu))) { \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- return -1; \
- } \
\
return 0; \
}
lock_policy_rwsem(read, cpu);
-
lock_policy_rwsem(write, cpu);
-static void unlock_policy_rwsem_read(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
-}
-
-static void unlock_policy_rwsem_write(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
+#define unlock_policy_rwsem(mode, cpu) \
+static void unlock_policy_rwsem_##mode(int cpu) \
+{ \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
+ BUG_ON(policy_cpu == -1); \
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
}
+unlock_policy_rwsem(read, cpu);
+unlock_policy_rwsem(write, cpu);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
@@ -180,6 +168,9 @@ err_out:
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
+ if (cpufreq_disabled())
+ return NULL;
+
return __cpufreq_cpu_get(cpu, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
+ if (cpufreq_disabled())
+ return;
+
__cpufreq_cpu_put(data, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
struct cpufreq_policy *policy;
+ unsigned long flags;
BUG_ON(irqs_disabled());
+ if (cpufreq_disabled())
+ return;
+
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -294,7 +295,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
- trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
@@ -543,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- if (cpumask_empty(policy->related_cpus))
- return show_cpus(policy->cpus, buf);
return show_cpus(policy->related_cpus, buf);
}
@@ -700,87 +698,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-/*
- * Returns:
- * Negative: Failure
- * 0: Success
- * Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct device *dev)
-{
- int ret = 0;
-#ifdef CONFIG_SMP
- unsigned long flags;
- unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_governor *gov;
-
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
- for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
-
- if (cpu == j)
- continue;
-
- /* Check for existing affected CPUs.
- * They may not be aware of it due to CPU Hotplug.
- * cpufreq_cpu_put is called when the device is removed
- * in __cpufreq_remove_dev()
- */
- managed_policy = cpufreq_cpu_get(j);
- if (unlikely(managed_policy)) {
-
- /* Set proper policy_cpu */
- unlock_policy_rwsem_write(cpu);
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
- if (lock_policy_rwsem_write(cpu) < 0) {
- /* Should not go through policy unlock path */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- cpufreq_cpu_put(managed_policy);
- return -EBUSY;
- }
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&dev->kobj,
- &managed_policy->kobj,
- "cpufreq");
- if (ret)
- cpufreq_cpu_put(managed_policy);
- /*
- * Success. We only needed to be added to the mask.
- * Call driver->exit() because only the cpu parent of
- * the kobj needed to call init().
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- if (!ret)
- return 1;
- else
- return ret;
- }
- }
-#endif
- return ret;
-}
-
-
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
@@ -794,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
if (j == cpu)
continue;
- if (!cpu_online(j))
- continue;
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
@@ -852,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
@@ -885,6 +798,42 @@ err_out_kobj_put:
return ret;
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
+ struct device *dev)
+{
+ struct cpufreq_policy *policy;
+ int ret = 0;
+ unsigned long flags;
+
+ policy = cpufreq_cpu_get(sibling);
+ WARN_ON(!policy);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+
+ lock_policy_rwsem_write(sibling);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ unlock_policy_rwsem_write(sibling);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
/**
* cpufreq_add_dev - add a CPU device
@@ -897,12 +846,12 @@ err_out_kobj_put:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
- int ret = 0, found = 0;
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
+ struct cpufreq_governor *gov;
int sibling;
#endif
@@ -919,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy);
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_online_cpu(sibling) {
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ }
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
#endif
if (!try_module_get(cpufreq_driver->owner)) {
@@ -926,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto module_out;
}
- ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
@@ -938,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_free_cpumask;
policy->cpu = cpu;
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
- ret = (lock_policy_rwsem_write(cpu) < 0);
- WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
- /* Set governor before ->init, so that driver could check it */
-#ifdef CONFIG_HOTPLUG_CPU
- for_each_online_cpu(sibling) {
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cp->governor &&
- (cpumask_test_cpu(cpu, cp->related_cpus))) {
- policy->governor = cp->governor;
- found = 1;
- break;
- }
- }
-#endif
- if (!found)
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
- goto err_unlock_policy;
+ goto err_set_policy_cpu;
}
+
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, dev);
- if (ret) {
- if (ret > 0)
- /* This is a managed cpu, symlink created,
- exit with 0 */
- ret = 0;
- goto err_unlock_policy;
+#ifdef CONFIG_HOTPLUG_CPU
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+ if (gov) {
+ policy->governor = gov;
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
}
+#endif
ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
- unlock_policy_rwsem_write(cpu);
-
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
pr_debug("initialization complete\n");
return 0;
-
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
@@ -1007,8 +960,8 @@ err_out_unregister:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
-err_unlock_policy:
- unlock_policy_rwsem_write(cpu);
+err_set_policy_cpu:
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
@@ -1020,6 +973,22 @@ module_out:
return ret;
}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ int j;
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_policy_cpu, j) = cpu;
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
/**
* __cpufreq_remove_dev - remove a CPU device
@@ -1030,129 +999,103 @@ module_out:
*/
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
+ unsigned int cpu = dev->id, ret, cpus;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
-#ifdef CONFIG_SMP
struct device *cpu_dev;
- unsigned int j;
-#endif
- pr_debug("unregistering CPU %u\n", cpu);
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
data = per_cpu(cpufreq_cpu_data, cpu);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-#ifdef CONFIG_SMP
- /* if this isn't the CPU which is the parent of the kobj, we
- * only need to unlink, put and exit
- */
- if (unlikely(cpu != data->cpu)) {
- pr_debug("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &dev->kobj;
- cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- return 0;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpufreq_driver->setpolicy)
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
+ data->governor->name, CPUFREQ_NAME_LEN);
#endif
-#ifdef CONFIG_SMP
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpus = cpumask_weight(data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
+ unlock_policy_rwsem_write(cpu);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
- CPUFREQ_NAME_LEN);
-#endif
+ if (cpu != data->cpu) {
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ } else if (cpus > 1) {
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
- /* if we have other CPUs still registered, we need to unlink them,
- * or else wait_for_completion below will lock up. Clean the
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- per_cpu(cpufreq_cpu_data, j) = NULL;
- }
- }
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpumask_set_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = data;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- pr_debug("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, j),
- data->governor->name, CPUFREQ_NAME_LEN);
-#endif
- cpu_dev = get_cpu_device(j);
- kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- cpufreq_cpu_put(data);
- }
- }
-#else
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+ "cpufreq");
+ return -EINVAL;
+ }
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
- unlock_policy_rwsem_write(cpu);
- kobject_put(kobj);
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ update_policy_cpu(data, cpu_dev->id);
+ unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, cpu_dev->id, cpu);
+ }
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
- lock_policy_rwsem_write(cpu);
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ lock_policy_rwsem_read(cpu);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_read(cpu);
+ kobject_put(kobj);
+
+ /* we need to make sure that the underlying kobj is actually
+ * not referenced anymore by anybody before we proceed with
+ * unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
-#ifdef CONFIG_HOTPLUG_CPU
- /* when the CPU which is the parent of the kobj is hotplugged
- * offline, check for siblings, and create cpufreq sysfs interface
- * and symlinks
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- /* first sibling now owns the new sysfs dir */
- cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(data);
- /* finally remove our own symlink */
- lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(dev, sif);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
+ kfree(data);
+ } else if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
-#endif
-
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
return 0;
}
@@ -1165,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1216,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ return cpufreq_driver->get(cpu);
+
+ policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
@@ -1386,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = {
.resume = cpufreq_bp_resume,
};
+/**
+ * cpufreq_get_current_driver - return current driver's name
+ *
+ * Return the name string of the currently loaded cpufreq driver
+ * or NULL, if none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
@@ -1408,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
@@ -1442,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_unregister(
@@ -1487,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (target_freq == policy->cur)
return 0;
- if (cpu_online(policy->cpu) && cpufreq_driver->target)
+ if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
return retval;
@@ -1522,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
- if (!(cpu_online(cpu) && cpufreq_driver->getavg))
+ if (cpufreq_disabled())
+ return ret;
+
+ if (!cpufreq_driver->getavg)
return 0;
policy = cpufreq_cpu_get(policy->cpu);
@@ -1577,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
ret = policy->governor->governor(policy, event);
+ if (event == CPUFREQ_GOV_START)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_STOP)
+ policy->governor->initialized--;
+
/* we keep one module reference alive for
each CPU governed by this CPU */
if ((event != CPUFREQ_GOV_START) || ret)
@@ -1600,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
+ governor->initialized = 0;
err = -EBUSY;
if (__find_governor(governor->name) == NULL) {
err = 0;
@@ -1797,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
data->cur = policy.cur;
} else {
- if (data->cur != policy.cur)
+ if (data->cur != policy.cur && cpufreq_driver->target)
cpufreq_out_of_sync(cpu, data->cur,
policy.cur);
}
@@ -1829,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 64ef737e7e72..4fd0006b1291 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -25,7 +25,7 @@
#include "cpufreq_governor.h"
-/* Conservative governor macors */
+/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load)
static void cs_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
+ cpu);
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
+ dbs_check_cpu(&cs_dbs_data, cpu);
- dbs_check_cpu(&cs_dbs_data, cpu);
-
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
/*
* we only care if our internally tracked freq moves outside the 'valid'
- * ranges of freqency available to us otherwise we do not change it
+ * ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6c5f1d383cdc..5a76086ff09b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
-static inline void dbs_timer_init(struct dbs_data *dbs_data,
- struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
+ unsigned int sampling_rate)
{
int delay = delay_for_sampling_rate(sampling_rate);
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
- INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
- schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+ schedule_delayed_work_on(cpu, &cdbs->work, delay);
}
-static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
+static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
{
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+
cancel_delayed_work_sync(&cdbs->work);
}
+/* Will return if we need to evaluate cpu load again or not */
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate)
+{
+ if (policy_is_shared(cdbs->cur_policy)) {
+ ktime_t time_now = ktime_get();
+ s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(sampling_rate / 2))
+ return false;
+ else
+ cdbs->time_stamp = time_now;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(need_load_eval);
+
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event)
{
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct cs_ops *cs_ops = NULL;
+ struct od_ops *od_ops = NULL;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpu_dbs_common_info *cpu_cdbs;
@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
+ cs_ops = dbs_data->gov_ops;
} else {
od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
+ od_ops = dbs_data->gov_ops;
}
switch (event) {
case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
+ if (!policy->cur)
return -EINVAL;
mutex_lock(&dbs_data->mutex);
- dbs_data->enable++;
- cpu_cdbs->cpu = cpu;
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs;
- j_cdbs = dbs_data->get_cpu_cdbs(j);
+ struct cpu_dbs_common_info *j_cdbs =
+ dbs_data->get_cpu_cdbs(j);
+ j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall);
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- /*
- * Start the timerschedule work, when this governor is used for
- * first time
- */
- if (dbs_data->enable != 1)
- goto second_time;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- dbs_data->attr_group);
- if (rc) {
- mutex_unlock(&dbs_data->mutex);
- return rc;
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work,
+ dbs_data->gov_dbs_timer);
}
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
+ if (!policy->governor->initialized) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_data->mutex);
+ return rc;
+ }
+ }
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
*/
if (dbs_data->governor == GOV_CONSERVATIVE) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
- cpufreq_register_notifier(ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (!policy->governor->initialized) {
+ cpufreq_register_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
+ dbs_data->min_sampling_rate =
+ MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ }
} else {
- struct od_ops *ops = dbs_data->gov_ops;
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ od_ops->powersave_bias_init_cpu(cpu);
- od_tuners->io_is_busy = ops->io_busy();
+ if (!policy->governor->initialized)
+ od_tuners->io_is_busy = od_ops->io_busy();
}
+ if (policy->governor->initialized)
+ goto unlock;
+
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
*sampling_rate = max(dbs_data->min_sampling_rate, latency *
LATENCY_MULTIPLIER);
-
-second_time:
- if (dbs_data->governor == GOV_CONSERVATIVE) {
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->enable = 1;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- struct od_ops *ops = dbs_data->gov_ops;
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- ops->powersave_bias_init_cpu(cpu);
- }
+unlock:
mutex_unlock(&dbs_data->mutex);
- mutex_init(&cpu_cdbs->timer_mutex);
- dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ /* Initiate timer time stamp */
+ cpu_cdbs->time_stamp = ktime_get();
+
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_init(dbs_data, j, *sampling_rate);
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- dbs_timer_exit(cpu_cdbs);
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_exit(dbs_data, j);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
- dbs_data->enable--;
- if (!dbs_data->enable) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ if (policy->governor->initialized == 1) {
sysfs_remove_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (dbs_data->governor == GOV_CONSERVATIVE)
- cpufreq_unregister_notifier(ops->notifier_block,
+ cpufreq_unregister_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
mutex_unlock(&dbs_data->mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index f6616540c53d..d2ac91150600 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
* the governor or limits.
*/
struct mutex timer_mutex;
+ ktime_t time_stamp;
};
struct od_cpu_dbs_info_s {
@@ -108,7 +109,7 @@ struct od_dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int down_differential;
+ unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
@@ -129,7 +130,6 @@ struct dbs_data {
#define GOV_CONSERVATIVE 1
int governor;
unsigned int min_sampling_rate;
- unsigned int enable; /* number of CPUs using this policy */
struct attribute_group *attr_group;
void *tuners;
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7731f7c7e79a..f3eb26cd848f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -26,7 +26,7 @@
#include "cpufreq_governor.h"
-/* On-demand governor macors */
+/* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand;
static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+ .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
+ DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
};
@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu)
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
@@ -73,7 +74,7 @@ static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ * For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
/*
* Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency Every sampling_rate, we look for
- * a the lowest frequency which can sustain the load while keeping idle time
+ * (default), then we try to increase frequency. Every sampling_rate, we look
+ * for the lowest frequency which can sustain the load while keeping idle time
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
- if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
- policy->cur) {
+ if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
unsigned int freq_next;
- freq_next = load_freq / (od_tuners.up_threshold -
- od_tuners.down_differential);
+ freq_next = load_freq / od_tuners.adj_up_threshold;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
static void od_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
- int delay, sample_type = dbs_info->sample_type;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+ cpu);
+ int delay, sample_type = core_dbs_info->sample_type;
+ bool eval_load;
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ eval_load = need_load_eval(&core_dbs_info->cdbs,
+ od_tuners.sampling_rate);
/* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
- delay = dbs_info->freq_lo_jiffies;
- __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = core_dbs_info->freq_lo_jiffies;
+ if (eval_load)
+ __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+ core_dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
} else {
- dbs_check_cpu(&od_dbs_data, cpu);
- if (dbs_info->freq_lo) {
+ if (eval_load)
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (core_dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
+ core_dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = core_dbs_info->freq_hi_jiffies;
} else {
delay = delay_for_sampling_rate(od_tuners.sampling_rate
- * dbs_info->rate_mult);
+ * core_dbs_info->rate_mult);
}
}
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
/************************** sysfs interface ************************/
@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
- * If new rate is smaller than the old, simply updaing
+ * If new rate is smaller than the old, simply updating
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
* original sampling_rate was 1 second and the requested new sampling rate is 10
* ms because the user needs immediate reaction from ondemand governor, but not
@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate)
cpufreq_cpu_put(policy);
continue;
}
- dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex);
@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate)
cancel_delayed_work_sync(&dbs_info->cdbs.work);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cdbs.cpu,
- &dbs_info->cdbs.work,
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
usecs_to_jiffies(new_rate));
}
@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
+ /* Calculate the new adj_up_threshold */
+ od_tuners.adj_up_threshold += input;
+ od_tuners.adj_up_threshold -= od_tuners.up_threshold;
+
od_tuners.up_threshold = input;
return count;
}
@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
+ MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e40e50809644..2fd779eb1ed1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -24,12 +24,6 @@
static spinlock_t cpufreq_stats_lock;
-#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
-static struct freq_attr _attr_##_name = {\
- .attr = {.name = __stringify(_name), .mode = _mode, }, \
- .show = _show,\
-};
-
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
return PAGE_SIZE;
return len;
}
-CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
+cpufreq_freq_attr_ro(trans_table);
#endif
-CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
-CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
+cpufreq_freq_attr_ro(total_trans);
+cpufreq_freq_attr_ro(time_in_state);
static struct attribute *default_attrs[] = {
- &_attr_total_trans.attr,
- &_attr_time_in_state.attr,
+ &total_trans.attr,
+ &time_in_state.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- &_attr_trans_table.attr,
+ &trans_table.attr,
#endif
NULL
};
@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
if (stat) {
+ pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
}
- per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
+
+ if (!cpufreq_frequency_get_table(cpu))
+ return;
+
+ if (policy && !policy_is_shared(policy)) {
+ pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ }
if (policy)
cpufreq_cpu_put(policy);
}
@@ -262,6 +264,19 @@ error_get_fail:
return ret;
}
+static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+
+ pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
+ stat->cpu = policy->cpu;
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
+
+ if (val == CPUFREQ_UPDATE_POLICY_CPU) {
+ cpufreq_stats_update_policy_cpu(policy);
+ return 0;
+ }
+
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);
@@ -364,18 +385,21 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
+ register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_update_policy(cpu);
+
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
+ unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_free_table(cpu);
return ret;
}
- register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu) {
- cpufreq_update_policy(cpu);
- }
return 0;
}
static void __exit cpufreq_stats_exit(void)
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index c8c3d293cc57..bbeb9c0720a6 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
- if (!cpu_online(cpu))
- return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 4f154bc0ebe4..72f0c3efa76e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -1,13 +1,13 @@
/*
* Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2010-2012
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Martin Persson <martin.persson@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
@@ -15,27 +15,27 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <mach/id.h>
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *db8500_cpufreq_attr[] = {
+static struct freq_attr *dbx500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_freqs freqs;
unsigned int idx;
+ int ret;
/* scale the target frequency to one of the extremes supported */
if (target_freq < policy->cpuinfo.min_freq)
@@ -44,10 +44,9 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
target_freq = policy->cpuinfo.max_freq;
/* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
+ if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx))
return -EINVAL;
- }
freqs.old = policy->cur;
freqs.new = freq_table[idx].frequency;
@@ -60,9 +59,12 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
- if (clk_set_rate(armss_clk, freq_table[idx].frequency * 1000)) {
- pr_err("db8500-cpufreq: Failed to update armss clk\n");
- return -EINVAL;
+ ret = clk_set_rate(armss_clk, freqs.new * 1000);
+
+ if (ret) {
+ pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
+ freqs.new * 1000, ret);
+ return ret;
}
/* post change notification */
@@ -72,7 +74,7 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
{
int i = 0;
unsigned long freq = clk_get_rate(armss_clk) / 1000;
@@ -84,40 +86,26 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
}
/* We could not find a corresponding frequency. */
- pr_err("db8500-cpufreq: Failed to find cpufreq speed\n");
+ pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
return 0;
}
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int i = 0;
int res;
- armss_clk = clk_get(NULL, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("db8500-cpufreq : Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("db8500-cpufreq : Available frequencies:\n");
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
- i++;
- }
-
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!res)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- clk_put(armss_clk);
+ pr_err("dbx500-cpufreq: Failed to read policy table\n");
return res;
}
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
@@ -128,52 +116,59 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, cpu_present_mask);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_setall(policy->cpus);
return 0;
}
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
};
-static int db8500_cpufreq_probe(struct platform_device *pdev)
+static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
- freq_table = dev_get_platdata(&pdev->dev);
+ int i = 0;
+ freq_table = dev_get_platdata(&pdev->dev);
if (!freq_table) {
- pr_err("db8500-cpufreq: Failed to fetch cpufreq table\n");
+ pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
return -ENODEV;
}
- return cpufreq_register_driver(&db8500_cpufreq_driver);
+ armss_clk = clk_get(&pdev->dev, "armss");
+ if (IS_ERR(armss_clk)) {
+ pr_err("dbx500-cpufreq: Failed to get armss clk\n");
+ return PTR_ERR(armss_clk);
+ }
+
+ pr_info("dbx500-cpufreq: Available frequencies:\n");
+ while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
+ pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
+ i++;
+ }
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
-static struct platform_driver db8500_cpufreq_plat_driver = {
+static struct platform_driver dbx500_cpufreq_plat_driver = {
.driver = {
- .name = "cpufreq-u8500",
+ .name = "cpufreq-ux500",
.owner = THIS_MODULE,
},
- .probe = db8500_cpufreq_probe,
+ .probe = dbx500_cpufreq_probe,
};
-static int __init db8500_cpufreq_register(void)
+static int __init dbx500_cpufreq_register(void)
{
- if (!cpu_is_u8500_family())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return platform_driver_register(&db8500_cpufreq_plat_driver);
+ return platform_driver_register(&dbx500_cpufreq_plat_driver);
}
-device_initcall(db8500_cpufreq_register);
+device_initcall(dbx500_cpufreq_register);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DB8500");
+MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 7012ea8bf1e7..78057a357ddb 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,10 @@
#include <linux/cpufreq.h>
#include <linux/suspend.h>
-#include <mach/cpufreq.h>
-
#include <plat/cpu.h>
+#include "exynos-cpufreq.h"
+
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
@@ -42,51 +42,56 @@ static unsigned int exynos_getspeed(unsigned int cpu)
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_cpufreq_get_index(unsigned int freq)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ int index;
+
+ for (index = 0;
+ freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
+ if (freq_table[index].frequency == freq)
+ break;
+
+ if (freq_table[index].frequency == CPUFREQ_TABLE_END)
+ return -EINVAL;
+
+ return index;
+}
+
+static int exynos_cpufreq_scale(unsigned int target_freq)
{
- unsigned int index, old_index;
- unsigned int arm_volt, safe_arm_volt = 0;
- int ret = 0;
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
unsigned int *volt_table = exynos_info->volt_table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
-
- mutex_lock(&cpufreq_lock);
+ int index, old_index;
+ int ret = 0;
freqs.old = policy->cur;
+ freqs.new = target_freq;
+ freqs.cpu = policy->cpu;
- if (frequency_locked && target_freq != locking_frequency) {
- ret = -EAGAIN;
+ if (freqs.new == freqs.old)
goto out;
- }
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
* policy and get the index from the raw freqeuncy table.
*/
- for (old_index = 0;
- freq_table[old_index].frequency != CPUFREQ_TABLE_END;
- old_index++)
- if (freq_table[old_index].frequency == freqs.old)
- break;
-
- if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
- ret = -EINVAL;
+ old_index = exynos_cpufreq_get_index(freqs.old);
+ if (old_index < 0) {
+ ret = old_index;
goto out;
}
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
+ index = exynos_cpufreq_get_index(target_freq);
+ if (index < 0) {
+ ret = index;
goto out;
}
- freqs.new = freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
/*
* ARM clock source will be changed APLL to MPLL temporary
* To support this level, need to control regulator for
@@ -106,15 +111,25 @@ static int exynos_target(struct cpufreq_policy *policy,
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
- regulator_set_voltage(arm_regulator, arm_volt,
- arm_volt);
+ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
}
- if (safe_arm_volt)
- regulator_set_voltage(arm_regulator, safe_arm_volt,
+ if (safe_arm_volt) {
+ ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
safe_arm_volt);
- if (freqs.new != freqs.old)
- exynos_info->set_freq(old_index, index);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, safe_arm_volt);
+ goto out;
+ }
+ }
+
+ exynos_info->set_freq(old_index, index);
for_each_cpu(freqs.cpu, policy->cpus)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
@@ -125,8 +140,44 @@ static int exynos_target(struct cpufreq_policy *policy,
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
+ }
+
+out:
+
+ cpufreq_cpu_put(policy);
+
+ return ret;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int index;
+ unsigned int new_freq;
+ int ret = 0;
+
+ mutex_lock(&cpufreq_lock);
+
+ if (frequency_locked)
+ goto out;
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto out;
}
+ new_freq = freq_table[index].frequency;
+
+ ret = exynos_cpufreq_scale(new_freq);
+
out:
mutex_unlock(&cpufreq_lock);
@@ -163,51 +214,26 @@ static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- static unsigned int saved_frequency;
- unsigned int temp;
+ int ret;
- mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- if (frequency_locked)
- goto out;
-
+ mutex_lock(&cpufreq_lock);
frequency_locked = true;
+ mutex_unlock(&cpufreq_lock);
- if (locking_frequency) {
- saved_frequency = exynos_getspeed(0);
+ ret = exynos_cpufreq_scale(locking_frequency);
+ if (ret < 0)
+ return NOTIFY_BAD;
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
- }
break;
case PM_POST_SUSPEND:
- if (saved_frequency) {
- /*
- * While frequency_locked, only locking_frequency
- * is valid for target(). In order to use
- * saved_frequency while keeping frequency_locked,
- * we temporarly overwrite locking_frequency.
- */
- temp = locking_frequency;
- locking_frequency = saved_frequency;
-
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
-
- locking_frequency = temp;
- }
+ mutex_lock(&cpufreq_lock);
frequency_locked = false;
+ mutex_unlock(&cpufreq_lock);
break;
}
-out:
- mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
@@ -222,35 +248,34 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
- locking_frequency = exynos_getspeed(0);
-
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- if (num_online_cpus() == 1) {
- cpumask_copy(policy->related_cpus, cpu_possible_mask);
- cpumask_copy(policy->cpus, cpu_online_mask);
- } else {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
- cpumask_setall(policy->cpus);
- }
+ cpumask_setall(policy->cpus);
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
+static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *exynos_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = exynos_cpufreq_cpu_exit,
.name = "exynos_cpufreq",
+ .attr = exynos_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
@@ -288,6 +313,8 @@ static int __init exynos_cpufreq_init(void)
goto err_vdd_arm;
}
+ locking_frequency = exynos_getspeed(0);
+
register_pm_notifier(&exynos_cpufreq_nb);
if (cpufreq_register_driver(&exynos_driver)) {
@@ -299,8 +326,7 @@ static int __init exynos_cpufreq_init(void)
err_cpufreq:
unregister_pm_notifier(&exynos_cpufreq_nb);
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
+ regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
new file mode 100644
index 000000000000..92b852ee5ddc
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPUFreq support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
+struct exynos_dvfs_info {
+ unsigned long mpll_freq_khz;
+ unsigned int pll_safe_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ void (*set_freq)(unsigned int, unsigned int);
+ bool (*need_apll_change)(unsigned int, unsigned int);
+};
+
+extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index fb148fa27678..add7fbec4fc9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -18,99 +18,40 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END L5
-
-static int max_support_idx = L0;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
-};
-
-static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
+static unsigned int exynos4210_volt_table[] = {
1250000, 1150000, 1050000, 975000, 950000,
};
-
-static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4210_freq_table[] = {
- {L0, 1200*1000},
- {L1, 1000*1000},
- {L2, 800*1000},
- {L3, 500*1000},
- {L4, 200*1000},
+ {L0, 1200 * 1000},
+ {L1, 1000 * 1000},
+ {L2, 800 * 1000},
+ {L3, 500 * 1000},
+ {L4, 200 * 1000},
{0, CPUFREQ_TABLE_END},
};
-static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+static struct apll_freq apll_freq_4210[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
-
- /* ARM L0: 1200MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L1: 1000MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L2: 800MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L3: 500MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L4: 200MHz */
- { 0, 1, 3, 1, 3, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVCOPY, DIVHPM }
- */
-
- /* ARM L0: 1200MHz */
- { 5, 0 },
-
- /* ARM L1: 1000MHz */
- { 4, 0 },
-
- /* ARM L2: 800MHz */
- { 3, 0 },
-
- /* ARM L3: 500MHz */
- { 3, 0 },
-
- /* ARM L4: 200MHz */
- { 3, 0 },
-};
-
-static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1200MHz */
- ((150 << 16) | (3 << 8) | 1),
-
- /* APLL FOUT L1: 1000MHz */
- ((250 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L2: 800MHz */
- ((200 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L3: 500MHz */
- ((250 << 16) | (6 << 8) | 2),
-
- /* APLL FOUT L4: 200MHz */
- ((200 << 16) | (6 << 8) | 3),
+ APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
+ APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
+ APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
+ APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
+ APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
};
static void exynos4210_set_clkdiv(unsigned int div_index)
@@ -119,7 +60,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4210_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4210[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -129,12 +70,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU1 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 4) | 0x7);
-
- tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
- (clkdiv_cpu1[div_index][1] << 0));
+ tmp = apll_freq_4210[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
@@ -162,7 +98,7 @@ static void exynos4210_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4210_apll_pms_table[index];
+ tmp |= apll_freq_4210[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -179,10 +115,10 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -200,7 +136,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
@@ -214,7 +150,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
@@ -231,8 +167,6 @@ static void exynos4210_set_frequency(unsigned int old_index,
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
cpu_clk = clk_get(NULL, "armclk");
@@ -253,33 +187,9 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
-
- exynos4210_clkdiv_table[i].clkdiv = tmp;
- }
-
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L2;
+ /* 800Mhz */
info->pll_safe_idx = L2;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
@@ -289,14 +199,11 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
return 0;
err_mout_apll:
- if (!IS_ERR(mout_mpll))
- clk_put(mout_mpll);
+ clk_put(mout_mpll);
err_mout_mpll:
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
+ clk_put(moutcore);
err_moutcore:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
+ clk_put(cpu_clk);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8c5a7afa5b0b..08b7477b0aa2 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -18,28 +18,21 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L13 + 1)
-
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos4x12_volt_table[] = {
+ 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
+ 1000000, 987500, 975000, 950000, 925000, 900000, 900000
};
-static unsigned int exynos4x12_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {L0, 1500 * 1000},
+ {L0, CPUFREQ_ENTRY_INVALID},
{L1, 1400 * 1000},
{L2, 1300 * 1000},
{L3, 1200 * 1000},
@@ -56,247 +49,54 @@ static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos4x12_clkdiv_table[CPUFREQ_LEVEL_END];
+static struct apll_freq *apll_freq_4x12;
-static unsigned int clkdiv_cpu0_4212[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4212[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
};
-static unsigned int clkdiv_cpu0_4412[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4412[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
- */
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4212[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, CORES
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 6, 0 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0 },
-
- /* ARM L6: 900 MHz */
- { 3, 0 },
-
- /* ARM L7: 800 MHz */
- { 3, 0 },
-
- /* ARM L8: 700 MHz */
- { 3, 0 },
-
- /* ARM L9: 600 MHz */
- { 3, 0 },
-
- /* ARM L10: 500 MHz */
- { 3, 0 },
-
- /* ARM L11: 400 MHz */
- { 3, 0 },
-
- /* ARM L12: 300 MHz */
- { 3, 0 },
-
- /* ARM L13: 200 MHz */
- { 3, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4412[CPUFREQ_LEVEL_END][3] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM, DIVCORES }
- */
- /* ARM L0: 1500 MHz */
- { 6, 0, 7 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0, 6 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0, 6 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0, 5 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0, 5 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0, 4 },
-
- /* ARM L6: 900 MHz */
- { 3, 0, 4 },
-
- /* ARM L7: 800 MHz */
- { 3, 0, 3 },
-
- /* ARM L8: 700 MHz */
- { 3, 0, 3 },
-
- /* ARM L9: 600 MHz */
- { 3, 0, 2 },
-
- /* ARM L10: 500 MHz */
- { 3, 0, 2 },
-
- /* ARM L11: 400 MHz */
- { 3, 0, 1 },
-
- /* ARM L12: 300 MHz */
- { 3, 0, 1 },
-
- /* ARM L13: 200 MHz */
- { 3, 0, 0 },
-};
-
-static unsigned int exynos4x12_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1500 MHz */
- ((250 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L1: 1400 MHz */
- ((175 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L2: 1300 MHz */
- ((325 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L3: 1200 MHz */
- ((200 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L4: 1100 MHz */
- ((275 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L5: 1000 MHz */
- ((125 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L6: 900 MHz */
- ((150 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L7: 800 MHz */
- ((100 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L8: 700 MHz */
- ((175 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L9: 600 MHz */
- ((200 << 16) | (4 << 8) | (0x1)),
-
- /* APLL FOUT L10: 500 MHz */
- ((125 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L11 400 MHz */
- ((100 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L12: 300 MHz */
- ((200 << 16) | (4 << 8) | (0x2)),
-
- /* APLL FOUT L13: 200 MHz */
- ((100 << 16) | (3 << 8) | (0x2)),
-};
-
-static const unsigned int asv_voltage_4x12[CPUFREQ_LEVEL_END] = {
- 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
- 1000000, 987500, 975000, 950000, 925000, 900000, 900000
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
};
static void exynos4x12_set_clkdiv(unsigned int div_index)
@@ -306,7 +106,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -314,7 +114,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
if (soc_is_exynos4212())
@@ -341,14 +141,14 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos4x12_apll_pms_table[index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4x12_apll_pms_table[index];
+ tmp |= apll_freq_4x12[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -367,10 +167,10 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = exynos4x12_apll_pms_table[old_index] >> 8;
- unsigned int new_pm = exynos4x12_apll_pms_table[new_index] >> 8;
+ unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -387,7 +187,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
@@ -402,7 +202,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
@@ -416,27 +216,10 @@ static void exynos4x12_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L1;
-
- /* Not supported */
- exynos4x12_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos4x12_volt_table[i] = asv_voltage_4x12[i];
-}
-
int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -455,66 +238,14 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos4x12_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- if (soc_is_exynos4212()) {
- tmp |= ((clkdiv_cpu0_4212[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4212[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4212[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4212[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4212[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4212[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4212[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
- } else {
- tmp &= ~EXYNOS4_CLKDIV_CPU0_CORE2_MASK;
-
- tmp |= ((clkdiv_cpu0_4412[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4412[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4412[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4412[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4412[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4412[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4412[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT) |
- (clkdiv_cpu0_4412[i][7] << EXYNOS4_CLKDIV_CPU0_CORE2_SHIFT));
- }
-
- exynos4x12_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- if (soc_is_exynos4212()) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK);
- tmp |= ((clkdiv_cpu1_4212[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4212[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT));
- } else {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK |
- EXYNOS4_CLKDIV_CPU1_CORES_MASK);
- tmp |= ((clkdiv_cpu1_4412[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4412[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT) |
- (clkdiv_cpu1_4412[i][2] << EXYNOS4_CLKDIV_CPU1_CORES_SHIFT));
- }
- exynos4x12_clkdiv_table[i].clkdiv1 = tmp;
- }
+ if (soc_is_exynos4212())
+ apll_freq_4x12 = apll_freq_4212;
+ else
+ apll_freq_4x12 = apll_freq_4412;
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L5;
+ /* 800Mhz */
info->pll_safe_idx = L7;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index e64c253cb169..9fae466d7746 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -19,25 +19,21 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L15 + 1)
+#include "exynos-cpufreq.h"
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos5250_volt_table[] = {
+ 1300000, 1250000, 1225000, 1200000, 1150000,
+ 1125000, 1100000, 1075000, 1050000, 1025000,
+ 1012500, 1000000, 975000, 950000, 937500,
+ 925000
};
-static unsigned int exynos5250_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L0, 1700 * 1000},
{L1, 1600 * 1000},
@@ -47,8 +43,8 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L5, 1200 * 1000},
{L6, 1100 * 1000},
{L7, 1000 * 1000},
- {L8, 900 * 1000},
- {L9, 800 * 1000},
+ {L8, 900 * 1000},
+ {L9, 800 * 1000},
{L10, 700 * 1000},
{L11, 600 * 1000},
{L12, 500 * 1000},
@@ -58,78 +54,30 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos5250_clkdiv_table[CPUFREQ_LEVEL_END];
-
-static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_5250[] = {
/*
- * Clock divider value for following
- * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 }
- */
- { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */
- { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */
- { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
- { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
- { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
- { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
- { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
-};
-
-static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { COPY, HPM }
+ * values:
+ * freq
+ * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- { 0, 2 }, /* 1700 MHz */
- { 0, 2 }, /* 1600 MHz */
- { 0, 2 }, /* 1500 MHz */
- { 0, 2 }, /* 1400 MHz */
- { 0, 2 }, /* 1300 MHz */
- { 0, 2 }, /* 1200 MHz */
- { 0, 2 }, /* 1100 MHz */
- { 0, 2 }, /* 1000 MHz */
- { 0, 2 }, /* 900 MHz */
- { 0, 2 }, /* 800 MHz */
- { 0, 2 }, /* 700 MHz */
- { 0, 2 }, /* 600 MHz */
- { 0, 2 }, /* 500 MHz */
- { 0, 2 }, /* 400 MHz */
- { 0, 2 }, /* 300 MHz */
- { 0, 2 }, /* 200 MHz */
-};
-
-static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
- ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
- ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
- ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
- ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
- ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
- ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
- ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
- ((125 << 16) | (3 << 8) | 0), /* 1000 MHz */
- ((150 << 16) | (4 << 8) | 0), /* 900 MHz */
- ((100 << 16) | (3 << 8) | 0), /* 800 MHz */
- ((175 << 16) | (3 << 8) | 1), /* 700 MHz */
- ((200 << 16) | (4 << 8) | 1), /* 600 MHz */
- ((125 << 16) | (3 << 8) | 1), /* 500 MHz */
- ((100 << 16) | (3 << 8) | 1), /* 400 MHz */
- ((200 << 16) | (4 << 8) | 2), /* 300 MHz */
- ((100 << 16) | (3 << 8) | 2), /* 200 MHz */
-};
-
-/* ASV group voltage table */
-static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
- 1300000, 1250000, 1225000, 1200000, 1150000,
- 1125000, 1100000, 1075000, 1050000, 1025000,
- 1012500, 1000000, 975000, 950000, 937500,
- 925000
+ APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
+ APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
+ APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
};
static void set_clkdiv(unsigned int div_index)
@@ -138,7 +86,7 @@ static void set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_5250[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
@@ -146,7 +94,7 @@ static void set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_5250[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
@@ -169,14 +117,14 @@ static void set_apll(unsigned int new_index,
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos5_apll_pms_table[new_index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos5_apll_pms_table[new_index];
+ tmp |= apll_freq_5250[new_index].mps;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 4. wait_lock_time */
@@ -196,10 +144,10 @@ static void set_apll(unsigned int new_index,
}
-bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos5_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos5_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -216,7 +164,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
} else {
@@ -231,7 +179,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 2. Change the system clock divider values */
set_clkdiv(new_index);
@@ -245,24 +193,10 @@ static void exynos5250_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L0;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos5250_volt_table[i] = asv_voltage_5250[i];
-}
-
int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -281,44 +215,9 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos5250_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU0);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) |
- (0x7 << 12) | (0x7 << 16) | (0x7 << 20) |
- (0x7 << 24) | (0x7 << 28));
-
- tmp |= ((clkdiv_cpu0_5250[i][0] << 0) |
- (clkdiv_cpu0_5250[i][1] << 4) |
- (clkdiv_cpu0_5250[i][2] << 8) |
- (clkdiv_cpu0_5250[i][3] << 12) |
- (clkdiv_cpu0_5250[i][4] << 16) |
- (clkdiv_cpu0_5250[i][5] << 20) |
- (clkdiv_cpu0_5250[i][6] << 24) |
- (clkdiv_cpu0_5250[i][7] << 28));
-
- exynos5250_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4));
-
- tmp |= ((clkdiv_cpu1_5250[i][0] << 0) |
- (clkdiv_cpu1_5250[i][1] << 4));
-
- exynos5250_clkdiv_table[i].clkdiv1 = tmp;
- }
-
info->mpll_freq_khz = rate;
- /* 1000Mhz */
- info->pm_lock_idx = L7;
/* 800Mhz */
info->pll_safe_idx = L9;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos5250_volt_table;
info->freq_table = exynos5250_freq_table;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 49cda256efb2..d7a79662e24c 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
break;
}
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
+}
+
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
return per_cpu(cpufreq_show_table, cpu);
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000000..66e3a71b81a3
--- /dev/null
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver provides the clk notifier callbacks that are used when
+ * the cpufreq-cpu0 driver changes to frequency to alert the highbank
+ * EnergyCore Management Engine (ECME) about the need to change
+ * voltage. The ECME interfaces with the actual voltage regulators.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/mailbox.h>
+#include <linux/platform_device.h>
+
+#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+#define HB_CPUFREQ_IPC_LEN 7
+#define HB_CPUFREQ_VOLT_RETRIES 15
+
+static int hb_voltage_change(unsigned int freq)
+{
+ int i;
+ u32 msg[HB_CPUFREQ_IPC_LEN];
+
+ msg[0] = HB_CPUFREQ_CHANGE_NOTE;
+ msg[1] = freq / 1000000;
+ for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
+ msg[i] = 0;
+
+ return pl320_ipc_transmit(msg);
+}
+
+static int hb_cpufreq_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *hclk)
+{
+ struct clk_notifier_data *clk_data = hclk;
+ int i = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ if (clk_data->new_rate > clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ } else if (action == POST_RATE_CHANGE) {
+ if (clk_data->new_rate < clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hb_cpufreq_clk_nb = {
+ .notifier_call = hb_cpufreq_clk_notify,
+};
+
+static int hb_cpufreq_driver_init(void)
+{
+ struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ struct device_node *np;
+ int ret;
+
+ if (!of_machine_is_compatible("calxeda,highbank"))
+ return -ENODEV;
+
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np)
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get highbank cpufreq device\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cpu_dev->of_node = np;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ pr_err("failed to get cpu0 clock: %d\n", ret);
+ goto out_put_node;
+ }
+
+ ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
+ if (ret) {
+ pr_err("failed to register clk notifier: %d\n", ret);
+ goto out_put_node;
+ }
+
+ /* Instantiate cpufreq-cpu0 */
+ platform_device_register_full(&devinfo);
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+module_init(hb_cpufreq_driver_init);
+
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000000..d6b6ef350cb6
--- /dev/null
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define PU_SOC_VOLTAGE_NORMAL 1250000
+#define PU_SOC_VOLTAGE_HIGH 1275000
+#define FREQ_1P2_GHZ 1200000000
+
+static struct regulator *arm_reg;
+static struct regulator *pu_reg;
+static struct regulator *soc_reg;
+
+static struct clk *arm_clk;
+static struct clk *pll1_sys_clk;
+static struct clk *pll1_sw_clk;
+static struct clk *step_clk;
+static struct clk *pll2_pfd2_396m_clk;
+
+static struct device *cpu_dev;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int transition_latency;
+
+static int imx6q_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int imx6q_get_speed(unsigned int cpu)
+{
+ return clk_get_rate(arm_clk) / 1000;
+}
+
+static int imx6q_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ struct opp *opp;
+ unsigned long freq_hz, volt, volt_old;
+ unsigned int index, cpu;
+ int ret;
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &index);
+ if (ret) {
+ dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
+ target_freq, ret);
+ return ret;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ freq_hz = freqs.new * 1000;
+ freqs.old = clk_get_rate(arm_clk) / 1000;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ rcu_read_lock();
+ opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+ return PTR_ERR(opp);
+ }
+
+ volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ volt_old = regulator_get_voltage(arm_reg);
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ freqs.old / 1000, volt_old / 1000,
+ freqs.new / 1000, volt / 1000);
+
+ /* scaling up? scale voltage before frequency */
+ if (freqs.new > freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale vddarm up: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Need to increase vddpu and vddsoc for safety
+ * if we are about to run at 1.2 GHz.
+ */
+ if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ }
+ }
+
+ /*
+ * The setpoints are selected per PLL/PDF frequencies, so we need to
+ * reprogram PLL for frequency scaling. The procedure of reprogramming
+ * PLL1 is as below.
+ *
+ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+ * - Disable pll2_pfd2_396m_clk
+ */
+ clk_prepare_enable(pll2_pfd2_396m_clk);
+ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+ clk_set_parent(pll1_sw_clk, step_clk);
+ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ /*
+ * If we are leaving 396 MHz set-point, we need to enable
+ * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
+ * their use count correct.
+ */
+ if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_prepare_enable(pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ }
+ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ } else {
+ /*
+ * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
+ * to provide the frequency.
+ */
+ clk_disable_unprepare(pll1_sys_clk);
+ }
+
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (freqs.new < freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret)
+ dev_warn(cpu_dev,
+ "failed to scale vddarm down: %d\n", ret);
+
+ if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ }
+ }
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return 0;
+}
+
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->cur = clk_get_rate(arm_clk) / 1000;
+ cpumask_setall(policy->cpus);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *imx6q_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .verify = imx6q_verify_speed,
+ .target = imx6q_set_target,
+ .get = imx6q_get_speed,
+ .init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
+ .name = "imx6q-cpufreq",
+ .attr = imx6q_cpufreq_attr,
+};
+
+static int imx6q_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct opp *opp;
+ unsigned long min_volt, max_volt;
+ int num, ret;
+
+ cpu_dev = &pdev->dev;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu0 node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev->of_node = np;
+
+ arm_clk = devm_clk_get(cpu_dev, "arm");
+ pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
+ pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
+ step_clk = devm_clk_get(cpu_dev, "step");
+ pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
+ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+ dev_err(cpu_dev, "failed to get clocks\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ arm_reg = devm_regulator_get(cpu_dev, "arm");
+ pu_reg = devm_regulator_get(cpu_dev, "pu");
+ soc_reg = devm_regulator_get(cpu_dev, "soc");
+ if (!arm_reg || !pu_reg || !soc_reg) {
+ dev_err(cpu_dev, "failed to get regulators\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ /* We expect an OPP table supplied by platform */
+ num = opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto put_node;
+ }
+
+ ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto put_node;
+ }
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ rcu_read_lock();
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_volt = opp_get_voltage(opp);
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[--num].frequency * 1000, true);
+ max_volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ /* Count vddpu and vddsoc latency in for 1.2 GHz support */
+ if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
+ ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+ if (ret) {
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ goto free_freq_table;
+ }
+
+ of_node_put(np);
+ return 0;
+
+free_freq_table:
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int imx6q_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver imx6q_cpufreq_platdrv = {
+ .driver = {
+ .name = "imx6q-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = imx6q_cpufreq_probe,
+ .remove = imx6q_cpufreq_remove,
+};
+module_platform_driver(imx6q_cpufreq_platdrv);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000000..096fde0ebcb5
--- /dev/null
+++ b/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,823 @@
+/*
+ * cpufreq_snb.c: Native P state management for Intel processors
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <trace/events/power.h>
+
+#include <asm/div64.h>
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#define SAMPLE_COUNT 3
+
+#define FRAC_BITS 8
+#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
+#define fp_toint(X) ((X) >> FRAC_BITS)
+
+static inline int32_t mul_fp(int32_t x, int32_t y)
+{
+ return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+}
+
+static inline int32_t div_fp(int32_t x, int32_t y)
+{
+ return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+}
+
+struct sample {
+ ktime_t start_time;
+ ktime_t end_time;
+ int core_pct_busy;
+ int pstate_pct_busy;
+ u64 duration_us;
+ u64 idletime_us;
+ u64 aperf;
+ u64 mperf;
+ int freq;
+};
+
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int turbo_pstate;
+};
+
+struct _pid {
+ int setpoint;
+ int32_t integral;
+ int32_t p_gain;
+ int32_t i_gain;
+ int32_t d_gain;
+ int deadband;
+ int last_err;
+};
+
+struct cpudata {
+ int cpu;
+
+ char name[64];
+
+ struct timer_list timer;
+
+ struct pstate_adjust_policy *pstate_policy;
+ struct pstate_data pstate;
+ struct _pid pid;
+ struct _pid idle_pid;
+
+ int min_pstate_count;
+ int idle_mode;
+
+ ktime_t prev_sample;
+ u64 prev_idle_time_us;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ int sample_ptr;
+ struct sample samples[SAMPLE_COUNT];
+};
+
+static struct cpudata **all_cpu_data;
+struct pstate_adjust_policy {
+ int sample_rate_ms;
+ int deadband;
+ int setpoint;
+ int p_gain_pct;
+ int d_gain_pct;
+ int i_gain_pct;
+};
+
+static struct pstate_adjust_policy default_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 109,
+ .p_gain_pct = 17,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+};
+
+struct perf_limits {
+ int no_turbo;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+};
+
+static struct perf_limits limits = {
+ .no_turbo = 0,
+ .max_perf_pct = 100,
+ .max_perf = int_tofp(1),
+ .min_perf_pct = 0,
+ .min_perf = 0,
+};
+
+static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
+ int deadband, int integral) {
+ pid->setpoint = setpoint;
+ pid->deadband = deadband;
+ pid->integral = int_tofp(integral);
+ pid->last_err = setpoint - busy;
+}
+
+static inline void pid_p_gain_set(struct _pid *pid, int percent)
+{
+ pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_i_gain_set(struct _pid *pid, int percent)
+{
+ pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_d_gain_set(struct _pid *pid, int percent)
+{
+
+ pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static signed int pid_calc(struct _pid *pid, int busy)
+{
+ signed int err, result;
+ int32_t pterm, dterm, fp_error;
+ int32_t integral_limit;
+
+ err = pid->setpoint - busy;
+ fp_error = int_tofp(err);
+
+ if (abs(err) <= pid->deadband)
+ return 0;
+
+ pterm = mul_fp(pid->p_gain, fp_error);
+
+ pid->integral += fp_error;
+
+ /* limit the integral term */
+ integral_limit = int_tofp(30);
+ if (pid->integral > integral_limit)
+ pid->integral = integral_limit;
+ if (pid->integral < -integral_limit)
+ pid->integral = -integral_limit;
+
+ dterm = mul_fp(pid->d_gain, (err - pid->last_err));
+ pid->last_err = err;
+
+ result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
+
+ return (signed int)fp_toint(result);
+}
+
+static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->pid,
+ cpu->pstate_policy->setpoint,
+ 100,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->idle_pid,
+ 75,
+ 50,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_reset_all_pid(void)
+{
+ unsigned int cpu;
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu])
+ intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
+ }
+}
+
+/************************** debugfs begin ************************/
+static int pid_param_set(void *data, u64 val)
+{
+ *(u32 *)data = val;
+ intel_pstate_reset_all_pid();
+ return 0;
+}
+static int pid_param_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
+ pid_param_set, "%llu\n");
+
+struct pid_param {
+ char *name;
+ void *value;
+};
+
+static struct pid_param pid_files[] = {
+ {"sample_rate_ms", &default_policy.sample_rate_ms},
+ {"d_gain_pct", &default_policy.d_gain_pct},
+ {"i_gain_pct", &default_policy.i_gain_pct},
+ {"deadband", &default_policy.deadband},
+ {"setpoint", &default_policy.setpoint},
+ {"p_gain_pct", &default_policy.p_gain_pct},
+ {NULL, NULL}
+};
+
+static struct dentry *debugfs_parent;
+static void intel_pstate_debug_expose_params(void)
+{
+ int i = 0;
+
+ debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
+ if (IS_ERR_OR_NULL(debugfs_parent))
+ return;
+ while (pid_files[i].name) {
+ debugfs_create_file(pid_files[i].name, 0660,
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
+ i++;
+ }
+}
+
+/************************** debugfs end ************************/
+
+/************************** sysfs begin ************************/
+#define show_one(file_name, object) \
+ static ssize_t show_##file_name \
+ (struct kobject *kobj, struct attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%u\n", limits.object); \
+ }
+
+static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.no_turbo = clamp_t(int, input, 0 , 1);
+
+ return count;
+}
+
+static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+ return count;
+}
+
+static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.min_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ return count;
+}
+
+show_one(no_turbo, no_turbo);
+show_one(max_perf_pct, max_perf_pct);
+show_one(min_perf_pct, min_perf_pct);
+
+define_one_global_rw(no_turbo);
+define_one_global_rw(max_perf_pct);
+define_one_global_rw(min_perf_pct);
+
+static struct attribute *intel_pstate_attributes[] = {
+ &no_turbo.attr,
+ &max_perf_pct.attr,
+ &min_perf_pct.attr,
+ NULL
+};
+
+static struct attribute_group intel_pstate_attr_group = {
+ .attrs = intel_pstate_attributes,
+};
+static struct kobject *intel_pstate_kobject;
+
+static void intel_pstate_sysfs_expose_params(void)
+{
+ int rc;
+
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate",
+ &cpu_subsys.dev_root->kobj);
+ BUG_ON(!intel_pstate_kobject);
+ rc = sysfs_create_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ BUG_ON(rc);
+}
+
+/************************** sysfs end ************************/
+
+static int intel_pstate_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 40) & 0xFF;
+}
+
+static int intel_pstate_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 8) & 0xFF;
+}
+
+static int intel_pstate_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+ rdmsrl(0x1AD, value);
+ nont = intel_pstate_max_pstate();
+ ret = ((value) & 255);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+{
+ int max_perf = cpu->pstate.turbo_pstate;
+ int min_perf;
+ if (limits.no_turbo)
+ max_perf = cpu->pstate.max_pstate;
+
+ max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+ *max = clamp_t(int, max_perf,
+ cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
+
+ min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
+ *min = clamp_t(int, min_perf,
+ cpu->pstate.min_pstate, max_perf);
+}
+
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+#ifndef MODULE
+ trace_cpu_frequency(pstate * 100000, cpu->cpu);
+#endif
+ cpu->pstate.current_pstate = pstate;
+ wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+
+}
+
+static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate + steps;
+
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate - steps;
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+{
+ sprintf(cpu->name, "Intel 2nd generation core");
+
+ cpu->pstate.min_pstate = intel_pstate_min_pstate();
+ cpu->pstate.max_pstate = intel_pstate_max_pstate();
+ cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+
+ /*
+ * goto max pstate so we don't slow up boot if we are built-in if we are
+ * a module we will take care of it during normal operation
+ */
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+}
+
+static inline void intel_pstate_calc_busy(struct cpudata *cpu,
+ struct sample *sample)
+{
+ u64 core_pct;
+ sample->pstate_pct_busy = 100 - div64_u64(
+ sample->idletime_us * 100,
+ sample->duration_us);
+ core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+ sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
+
+ sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
+ 100);
+}
+
+static inline void intel_pstate_sample(struct cpudata *cpu)
+{
+ ktime_t now;
+ u64 idle_time_us;
+ u64 aperf, mperf;
+
+ now = ktime_get();
+ idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ /* for the first sample, don't actually record a sample, just
+ * set the baseline */
+ if (cpu->prev_idle_time_us > 0) {
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
+ cpu->samples[cpu->sample_ptr].end_time = now;
+ cpu->samples[cpu->sample_ptr].duration_us =
+ ktime_us_delta(now, cpu->prev_sample);
+ cpu->samples[cpu->sample_ptr].idletime_us =
+ idle_time_us - cpu->prev_idle_time_us;
+
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+ }
+
+ cpu->prev_sample = now;
+ cpu->prev_idle_time_us = idle_time_us;
+ cpu->prev_aperf = aperf;
+ cpu->prev_mperf = mperf;
+}
+
+static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+{
+ int sample_time, delay;
+
+ sample_time = cpu->pstate_policy->sample_rate_ms;
+ delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
+static inline void intel_pstate_idle_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 1;
+}
+
+static inline void intel_pstate_normal_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 0;
+}
+
+static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+{
+ int32_t busy_scaled;
+ int32_t core_busy, turbo_pstate, current_pstate;
+
+ core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+ turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ current_pstate = int_tofp(cpu->pstate.current_pstate);
+ busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+
+ return fp_toint(busy_scaled);
+}
+
+static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ signed int ctl = 0;
+ int steps;
+
+ pid = &cpu->pid;
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_increase(cpu, steps);
+ else
+ intel_pstate_pstate_decrease(cpu, steps);
+}
+
+static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ int ctl = 0;
+ int steps;
+
+ pid = &cpu->idle_pid;
+
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, 100 - busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_decrease(cpu, steps);
+ else
+ intel_pstate_pstate_increase(cpu, steps);
+
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
+ intel_pstate_normal_mode(cpu);
+}
+
+static void intel_pstate_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+
+ intel_pstate_sample(cpu);
+
+ if (!cpu->idle_mode)
+ intel_pstate_adjust_busy_pstate(cpu);
+ else
+ intel_pstate_adjust_idle_pstate(cpu);
+
+#if defined(XPERF_FIX)
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
+ cpu->min_pstate_count++;
+ if (!(cpu->min_pstate_count % 5)) {
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+ intel_pstate_idle_mode(cpu);
+ }
+ } else
+ cpu->min_pstate_count = 0;
+#endif
+ intel_pstate_set_sample_time(cpu);
+}
+
+#define ICPU(model, policy) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+
+static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x2a, default_policy),
+ ICPU(0x2d, default_policy),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+
+static int intel_pstate_init_cpu(unsigned int cpunum)
+{
+
+ const struct x86_cpu_id *id;
+ struct cpudata *cpu;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
+ if (!all_cpu_data[cpunum])
+ return -ENOMEM;
+
+ cpu = all_cpu_data[cpunum];
+
+ intel_pstate_get_cpu_pstates(cpu);
+
+ cpu->cpu = cpunum;
+ cpu->pstate_policy =
+ (struct pstate_adjust_policy *)id->driver_data;
+ init_timer_deferrable(&cpu->timer);
+ cpu->timer.function = intel_pstate_timer_func;
+ cpu->timer.data =
+ (unsigned long)cpu;
+ cpu->timer.expires = jiffies + HZ/100;
+ intel_pstate_busy_pid_reset(cpu);
+ intel_pstate_idle_pid_reset(cpu);
+ intel_pstate_sample(cpu);
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+
+ add_timer_on(&cpu->timer, cpunum);
+
+ pr_info("Intel pstate controlling: cpu %d\n", cpunum);
+
+ return 0;
+}
+
+static unsigned int intel_pstate_get(unsigned int cpu_num)
+{
+ struct sample *sample;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[cpu_num];
+ if (!cpu)
+ return 0;
+ sample = &cpu->samples[cpu->sample_ptr];
+ return sample->freq;
+}
+
+static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int min, max;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ intel_pstate_get_min_max(cpu, &min, &max);
+
+ limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+ limits.no_turbo = 0;
+ }
+
+ return 0;
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
+ (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ del_timer(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ all_cpu_data[cpu] = NULL;
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int rc, min_pstate, max_pstate;
+ struct cpudata *cpu;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+ return rc;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (!limits.no_turbo &&
+ limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ policy->min = min_pstate * 100000;
+ policy->max = max_pstate * 100000;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
+ policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_pstate_verify_policy,
+ .setpolicy = intel_pstate_set_policy,
+ .get = intel_pstate_get,
+ .init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .name = "intel_pstate",
+ .owner = THIS_MODULE,
+};
+
+static void intel_pstate_exit(void)
+{
+ int cpu;
+
+ sysfs_remove_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ debugfs_remove_recursive(debugfs_parent);
+
+ cpufreq_unregister_driver(&intel_pstate_driver);
+
+ if (!all_cpu_data)
+ return;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
+}
+module_exit(intel_pstate_exit);
+
+static int __initdata no_load;
+
+static int __init intel_pstate_init(void)
+{
+ int rc = 0;
+ const struct x86_cpu_id *id;
+
+ if (no_load)
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ pr_info("Intel P-state driver initializing.\n");
+
+ all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+ if (!all_cpu_data)
+ return -ENOMEM;
+ memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
+
+ rc = cpufreq_register_driver(&intel_pstate_driver);
+ if (rc)
+ goto out;
+
+ intel_pstate_debug_expose_params();
+ intel_pstate_sysfs_expose_params();
+ return rc;
+out:
+ intel_pstate_exit();
+ return -ENODEV;
+}
+device_initcall(intel_pstate_init);
+
+static int __init intel_pstate_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable"))
+ no_load = 1;
+ return 0;
+}
+early_param("intel_pstate", intel_pstate_setup);
+
+MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
+MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000000..0e83e3c24f5b
--- /dev/null
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,259 @@
+/*
+ * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#define CPU_SW_INT_BLK BIT(28)
+
+static struct priv
+{
+ struct clk *cpu_clk;
+ struct clk *ddr_clk;
+ struct clk *powersave_clk;
+ struct device *dev;
+ void __iomem *base;
+} priv;
+
+#define STATE_CPU_FREQ 0x01
+#define STATE_DDR_FREQ 0x02
+
+/*
+ * Kirkwood can swap the clock to the CPU between two clocks:
+ *
+ * - cpu clk
+ * - ddr clk
+ *
+ * The frequencies are set at runtime before registering this *
+ * table.
+ */
+static struct cpufreq_frequency_table kirkwood_freq_table[] = {
+ {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
+{
+ if (__clk_is_enabled(priv.powersave_clk))
+ return kirkwood_freq_table[1].frequency;
+ return kirkwood_freq_table[0].frequency;
+}
+
+static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int state = kirkwood_freq_table[index].index;
+ unsigned long reg;
+
+ freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
+ freqs.new = kirkwood_freq_table[index].frequency;
+ freqs.cpu = 0; /* Kirkwood is UP */
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
+ kirkwood_freq_table[index].frequency);
+ dev_dbg(priv.dev, "old frequency was %i KHz\n",
+ kirkwood_cpufreq_get_cpu_frequency(0));
+
+ if (freqs.old != freqs.new) {
+ local_irq_disable();
+
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
+ }
+
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
+
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ local_irq_enable();
+ }
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
+}
+
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index = 0;
+
+ if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ kirkwood_cpufreq_set_cpu_state(index);
+
+ return 0;
+}
+
+/* Module init and exit code */
+static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 5000; /* 5uS */
+ policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *kirkwood_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .get = kirkwood_cpufreq_get_cpu_frequency,
+ .verify = kirkwood_cpufreq_verify,
+ .target = kirkwood_cpufreq_target,
+ .init = kirkwood_cpufreq_cpu_init,
+ .exit = kirkwood_cpufreq_cpu_exit,
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = kirkwood_cpufreq_attr,
+};
+
+static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct resource *res;
+ int err;
+
+ priv.dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory resource\n");
+ return -ENODEV;
+ }
+ priv.base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv.base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np)
+ return -ENODEV;
+
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk");
+ return PTR_ERR(priv.cpu_clk);
+ }
+
+ clk_prepare_enable(priv.cpu_clk);
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+
+ priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
+ if (IS_ERR(priv.ddr_clk)) {
+ dev_err(priv.dev, "Unable to get ddrclk");
+ err = PTR_ERR(priv.ddr_clk);
+ goto out_cpu;
+ }
+
+ clk_prepare_enable(priv.ddr_clk);
+ kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
+
+ priv.powersave_clk = of_clk_get_by_name(np, "powersave");
+ if (IS_ERR(priv.powersave_clk)) {
+ dev_err(priv.dev, "Unable to get powersave");
+ err = PTR_ERR(priv.powersave_clk);
+ goto out_ddr;
+ }
+ clk_prepare(priv.powersave_clk);
+
+ of_node_put(np);
+ np = NULL;
+
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+ if (!err)
+ return 0;
+
+ dev_err(priv.dev, "Failed to register cpufreq driver");
+
+ clk_disable_unprepare(priv.powersave_clk);
+out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
+ of_node_put(np);
+
+ return err;
+}
+
+static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
+
+ clk_disable_unprepare(priv.powersave_clk);
+ clk_disable_unprepare(priv.ddr_clk);
+ clk_disable_unprepare(priv.cpu_clk);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpufreq_platform_driver = {
+ .probe = kirkwood_cpufreq_probe,
+ .remove = kirkwood_cpufreq_remove,
+ .driver = {
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpufreq_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
+MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index f1fa500ac105..1180d536d1eb 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -77,7 +77,7 @@ static unsigned int longhaul_index;
static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
-
+static int enable;
/* Clock ratios multiplied by 10 */
static int mults[32];
@@ -965,6 +965,10 @@ static int __init longhaul_init(void)
if (!x86_match_cpu(longhaul_id))
return -ENODEV;
+ if (!enable) {
+ printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ return -ENODEV;
+ }
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, "
@@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
* such. */
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
+/* By default driver is disabled to prevent incompatible
+ * system freeze. */
+module_param(enable, int, 0644);
+MODULE_PARM_DESC(enable, "Enable driver");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 89b178a3f849..d4c4989823dc 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
+ cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1f3417a8322d..9128c07bafba 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
freq = ret;
if (mpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, freqs.new);
return -EINVAL;
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
@@ -211,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
- if (is_smp()) {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ if (is_smp())
cpumask_setall(policy->cpus);
- }
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 056faf6af1a9..d13a13678b5f 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+static void __request_acpi_cpufreq(void)
+{
+ const char *cur_drv, *drv = "acpi-cpufreq";
+
+ cur_drv = cpufreq_get_current_driver();
+ if (!cur_drv)
+ goto request;
+
+ if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
+ pr_warn(PFX "WTF driver: %s\n", cur_drv);
+
+ return;
+
+ request:
+ pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
+ request_module(drv);
+}
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
- int rv;
+ int ret;
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
- pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
- request_module("acpi-cpufreq");
+ __request_acpi_cpufreq();
return -ENODEV;
}
if (!x86_match_cpu(powernow_k8_ids))
return -ENODEV;
+ get_online_cpus();
for_each_online_cpu(i) {
- int rc;
- smp_call_function_single(i, check_supported_cpu, &rc, 1);
- if (rc == 0)
+ smp_call_function_single(i, check_supported_cpu, &ret, 1);
+ if (!ret)
supported_cpus++;
}
- if (supported_cpus != num_online_cpus())
+ if (supported_cpus != num_online_cpus()) {
+ put_online_cpus();
return -ENODEV;
+ }
+ put_online_cpus();
- rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+ ret = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (ret)
+ return ret;
- if (!rv)
- pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(), boot_cpu_data.x86_model_id,
- supported_cpus);
+ pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
- return rv;
+ return ret;
}
/* driver entry point for term */
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4575cfe41755..7e4d77327957 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,7 +30,7 @@ static struct {
u32 cnt;
} spear_cpufreq;
-int spear_cpufreq_verify(struct cpufreq_policy *policy)
+static int spear_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
}
@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = newfreq / 1000;
freqs.new /= mult;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
policy->cur = spear_cpufreq_get(0);
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- cpumask_copy(policy->related_cpus, policy->cpus);
+ cpumask_setall(policy->cpus);
return 0;
}
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c4cc27e5c8a5..071e2c3eec4f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,4 +39,10 @@ config CPU_IDLE_CALXEDA
help
Select this to enable cpuidle on Calxeda processors.
+config CPU_IDLE_KIRKWOOD
+ bool "CPU Idle Driver for Kirkwood processors"
+ depends on ARCH_KIRKWOOD
+ help
+ Select this to enable cpuidle on Kirkwood processors.
+
endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 03ee87482c71..24c6e7d945ed 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,3 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
+obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 3265844839bf..2a297f86dbad 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
int all;
int ret;
- all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ all = coupled->online_count | (coupled->online_count << WAITING_BITS);
ret = atomic_add_unless(&coupled->ready_waiting_counts,
-MAX_WAITING_CPUS, all);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
new file mode 100644
index 000000000000..670aa1e55cd6
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -0,0 +1,106 @@
+/*
+ * arch/arm/mach-kirkwood/cpuidle.c
+ *
+ * CPU idle Marvell Kirkwood SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The cpu idle uses wait-for-interrupt and DDR self refresh in order
+ * to implement two idle states -
+ * #1 wait-for-interrupt
+ * #2 wait-for-interrupt and DDR self refresh
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
+
+#define KIRKWOOD_MAX_STATES 2
+
+static void __iomem *ddr_operation_base;
+
+/* Actual code that puts the SoC in different idle states */
+static int kirkwood_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ writel(0x7, ddr_operation_base);
+ cpu_do_idle();
+
+ return index;
+}
+
+static struct cpuidle_driver kirkwood_idle_driver = {
+ .name = "kirkwood_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = kirkwood_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = KIRKWOOD_MAX_STATES,
+};
+static struct cpuidle_device *device;
+
+static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
+
+/* Initialize CPU idle by registering the idle states */
+static int kirkwood_cpuidle_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ddr_operation_base)
+ return -EADDRNOTAVAIL;
+
+ device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
+ device->state_count = KIRKWOOD_MAX_STATES;
+
+ cpuidle_register_driver(&kirkwood_idle_driver);
+ if (cpuidle_register_device(device)) {
+ pr_err("kirkwood_init_cpuidle: Failed registering\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int kirkwood_cpuidle_remove(struct platform_device *pdev)
+{
+ cpuidle_unregister_device(device);
+ cpuidle_unregister_driver(&kirkwood_idle_driver);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpuidle_driver = {
+ .probe = kirkwood_cpuidle_probe,
+ .remove = kirkwood_cpuidle_remove,
+ .driver = {
+ .name = "kirkwood_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpuidle_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Kirkwood cpu idle driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kirkwood-cpuidle");
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 8df53dd8dbe1..eba69290e074 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- int i, dead_state = -1;
- int power_usage = -1;
+ int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
-
- if (s->power_usage < power_usage && s->enter_dead) {
- power_usage = s->power_usage;
- dead_state = i;
- }
- }
-
- if (dead_state != -1)
- return drv->states[dead_state].enter_dead(dev, dead_state);
+ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ if (drv->states[i].enter_dead)
+ return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}
@@ -153,7 +144,6 @@ int cpuidle_idle_call(void)
return 0;
}
- trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
@@ -162,7 +152,6 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
- trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* give the governor an opportunity to reflect on the outcome */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3af841fb397a..422c7b69ba7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
-static void set_power_states(struct cpuidle_driver *drv)
-{
- int i;
-
- /*
- * cpuidle driver should set the drv->power_specified bit
- * before registering if the driver provides
- * power_usage numbers.
- *
- * If power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
- drv->states[i].power_usage = -1 - i;
-}
-
static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
drv->refcnt = 0;
-
- if (!drv->power_specified)
- set_power_states(drv);
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
@@ -235,16 +210,10 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver);
*/
struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
{
- struct cpuidle_driver *drv;
-
if (!dev)
return NULL;
- spin_lock(&cpuidle_driver_lock);
- drv = __cpuidle_get_cpu_driver(dev->cpu);
- spin_unlock(&cpuidle_driver_lock);
-
- return drv;
+ return __cpuidle_get_cpu_driver(dev->cpu);
}
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index bd40b943b6db..fe343a06b7da 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- int power_usage = -1;
int i;
int multiplier;
struct timespec t;
@@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->exit_latency * multiplier > data->predicted_us)
continue;
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
- }
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
}
/* not deepest C-state chosen for low predicted residency */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 340942946106..428754af6236 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
- for (i = 0; i < drv->state_count; i++) {
+ for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f6644f59fd9d..87ec4d027c25 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -254,6 +254,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3
select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 8061336e07e7..c9d9d5c16f94 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1036,7 +1036,7 @@ err_aes_algs:
return err;
}
-static int __devinit atmel_aes_probe(struct platform_device *pdev)
+static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
struct aes_platform_data *pdata;
@@ -1152,7 +1152,7 @@ aes_dd_err:
return err;
}
-static int __devexit atmel_aes_remove(struct platform_device *pdev)
+static int atmel_aes_remove(struct platform_device *pdev)
{
static struct atmel_aes_dev *aes_dd;
@@ -1185,7 +1185,7 @@ static int __devexit atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove = __devexit_p(atmel_aes_remove),
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index bcdf55fdc623..4918e9424d31 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -964,7 +964,7 @@ err_sha_algs:
return err;
}
-static int __devinit atmel_sha_probe(struct platform_device *pdev)
+static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
struct device *dev = &pdev->dev;
@@ -1063,7 +1063,7 @@ sha_dd_err:
return err;
}
-static int __devexit atmel_sha_remove(struct platform_device *pdev)
+static int atmel_sha_remove(struct platform_device *pdev)
{
static struct atmel_sha_dev *sha_dd;
@@ -1093,7 +1093,7 @@ static int __devexit atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove = __devexit_p(atmel_sha_remove),
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 7495f98c7221..7c73fbb17538 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1053,7 +1053,7 @@ err_tdes_algs:
return err;
}
-static int __devinit atmel_tdes_probe(struct platform_device *pdev)
+static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
struct device *dev = &pdev->dev;
@@ -1162,7 +1162,7 @@ tdes_dd_err:
return err;
}
-static int __devexit atmel_tdes_remove(struct platform_device *pdev)
+static int atmel_tdes_remove(struct platform_device *pdev)
{
static struct atmel_tdes_dev *tdes_dd;
@@ -1195,7 +1195,7 @@ static int __devexit atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove = __devexit_p(atmel_tdes_remove),
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 5398580b4313..a22f1a9f895f 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -586,7 +586,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
* bfin_crypto_crc_probe - Initialize module
*
*/
-static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
+static int bfin_crypto_crc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -707,7 +707,7 @@ out_error_free_mem:
* bfin_crypto_crc_remove - Initialize module
*
*/
-static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
+static int bfin_crypto_crc_remove(struct platform_device *pdev)
{
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
@@ -731,7 +731,7 @@ static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
static struct platform_driver bfin_crypto_crc_driver = {
.probe = bfin_crypto_crc_probe,
- .remove = __devexit_p(bfin_crypto_crc_remove),
+ .remove = bfin_crypto_crc_remove,
.suspend = bfin_crypto_crc_suspend,
.resume = bfin_crypto_crc_resume,
.driver = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bf20dd891705..1c56f63524f2 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -420,7 +420,7 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = __devexit_p(caam_remove),
+ .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 51f196d77f21..0c9ff4971724 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -498,8 +498,7 @@ static struct crypto_alg geode_ecb_alg = {
}
};
-static void __devexit
-geode_aes_remove(struct pci_dev *dev)
+static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
crypto_unregister_alg(&geode_ecb_alg);
@@ -513,8 +512,7 @@ geode_aes_remove(struct pci_dev *dev)
}
-static int __devinit
-geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
ret = pci_enable_device(dev);
@@ -582,7 +580,7 @@ static struct pci_driver geode_aes_driver = {
.name = "Geode LX AES",
.id_table = geode_aes_tbl,
.probe = geode_aes_probe,
- .remove = __devexit_p(geode_aes_remove)
+ .remove = geode_aes_remove,
};
module_pci_driver(geode_aes_driver);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index fda32968a66b..ebf130e894b5 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2561,7 +2561,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev);
}
-static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err, i;
struct hifn_device *dev;
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device:
return err;
}
-static void __devexit hifn_remove(struct pci_dev *pdev)
+static void hifn_remove(struct pci_dev *pdev)
{
int i;
struct hifn_device *dev;
@@ -2740,7 +2740,7 @@ static struct pci_driver hifn_pci_driver = {
.name = "hifn795x",
.id_table = hifn_pci_tbl,
.probe = hifn_probe,
- .remove = __devexit_p(hifn_remove),
+ .remove = hifn_remove,
};
static int __init hifn_init(void)
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 24ccae453e79..ce6290e5471a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1184,7 +1184,7 @@ MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
static struct platform_driver marvell_crypto = {
.probe = mv_probe,
- .remove = __devexit_p(mv_remove),
+ .remove = mv_remove,
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index aab257403b4a..e1f0ab413c3b 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -34,7 +34,7 @@
#define DRV_MODULE_VERSION "0.2"
#define DRV_MODULE_RELDATE "July 28, 2011"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -1388,7 +1388,7 @@ static int n2_cipher_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
{
struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct crypto_alg *alg;
@@ -1424,7 +1424,7 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
return err;
}
-static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
+static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
{
struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct ahash_alg *ahash;
@@ -1462,7 +1462,7 @@ static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
return err;
}
-static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
{
struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct hash_alg_common *halg;
@@ -1517,7 +1517,7 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
return err;
}
-static int __devinit n2_register_algs(void)
+static int n2_register_algs(void)
{
int i, err = 0;
@@ -1545,7 +1545,7 @@ out:
return err;
}
-static void __devexit n2_unregister_algs(void)
+static void n2_unregister_algs(void)
{
mutex_lock(&spu_lock);
if (!--algs_registered)
@@ -1822,8 +1822,8 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
return err;
}
-static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
+static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
+ struct spu_mdesc_info *ip)
{
const u64 *ino;
int ino_len;
@@ -1851,10 +1851,10 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
return 0;
}
-static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
+static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+ struct platform_device *dev,
+ struct spu_mdesc_info *ip,
+ const char *node_name)
{
const unsigned int *reg;
u64 node;
@@ -1883,7 +1883,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
static unsigned long n2_spu_hvapi_major;
static unsigned long n2_spu_hvapi_minor;
-static int __devinit n2_spu_hvapi_register(void)
+static int n2_spu_hvapi_register(void)
{
int err;
@@ -1909,7 +1909,7 @@ static void n2_spu_hvapi_unregister(void)
static int global_ref;
-static int __devinit grab_global_resources(void)
+static int grab_global_resources(void)
{
int err = 0;
@@ -1973,7 +1973,7 @@ static void release_global_resources(void)
mutex_unlock(&spu_lock);
}
-static struct n2_crypto * __devinit alloc_n2cp(void)
+static struct n2_crypto *alloc_n2cp(void)
{
struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
@@ -1993,7 +1993,7 @@ static void free_n2cp(struct n2_crypto *np)
kfree(np);
}
-static void __devinit n2_spu_driver_version(void)
+static void n2_spu_driver_version(void)
{
static int n2_spu_version_printed;
@@ -2001,7 +2001,7 @@ static void __devinit n2_spu_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2_crypto_probe(struct platform_device *dev)
+static int n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2077,7 +2077,7 @@ out_free_n2cp:
return err;
}
-static int __devexit n2_crypto_remove(struct platform_device *dev)
+static int n2_crypto_remove(struct platform_device *dev)
{
struct n2_crypto *np = dev_get_drvdata(&dev->dev);
@@ -2092,7 +2092,7 @@ static int __devexit n2_crypto_remove(struct platform_device *dev)
return 0;
}
-static struct n2_mau * __devinit alloc_ncp(void)
+static struct n2_mau *alloc_ncp(void)
{
struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
@@ -2112,7 +2112,7 @@ static void free_ncp(struct n2_mau *mp)
kfree(mp);
}
-static int __devinit n2_mau_probe(struct platform_device *dev)
+static int n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2179,7 +2179,7 @@ out_free_ncp:
return err;
}
-static int __devexit n2_mau_remove(struct platform_device *dev)
+static int n2_mau_remove(struct platform_device *dev)
{
struct n2_mau *mp = dev_get_drvdata(&dev->dev);
@@ -2217,7 +2217,7 @@ static struct platform_driver n2_crypto_driver = {
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
- .remove = __devexit_p(n2_crypto_remove),
+ .remove = n2_crypto_remove,
};
static struct of_device_id n2_mau_match[] = {
@@ -2245,7 +2245,7 @@ static struct platform_driver n2_mau_driver = {
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
- .remove = __devexit_p(n2_mau_remove),
+ .remove = n2_mau_remove,
};
static int __init n2_init(void)
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 0ce625738677..6c4c000671c5 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/vio.h>
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -1014,26 +1013,23 @@ error_out:
* NOTIFY_BAD encoded with error number on failure, use
* notifier_to_errno() to decode this value
*/
-static int nx842_OF_notifier(struct notifier_block *np,
- unsigned long action,
- void *update)
+static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
+ void *update)
{
- struct pSeries_reconfig_prop_update *upd;
+ struct of_prop_reconfig *upd = update;
struct nx842_devdata *local_devdata;
struct device_node *node = NULL;
- upd = (struct pSeries_reconfig_prop_update *)update;
-
rcu_read_lock();
local_devdata = rcu_dereference(devdata);
if (local_devdata)
node = local_devdata->dev->of_node;
if (local_devdata &&
- action == PSERIES_UPDATE_PROPERTY &&
- !strcmp(upd->node->name, node->name)) {
+ action == OF_RECONFIG_UPDATE_PROPERTY &&
+ !strcmp(upd->dn->name, node->name)) {
rcu_read_unlock();
- nx842_OF_upd(upd->property);
+ nx842_OF_upd(upd->prop);
} else
rcu_read_unlock();
@@ -1182,7 +1178,7 @@ static int __init nx842_probe(struct vio_dev *viodev,
synchronize_rcu();
kfree(old_devdata);
- pSeries_reconfig_notifier_register(&nx842_of_nb);
+ of_reconfig_notifier_register(&nx842_of_nb);
ret = nx842_OF_upd(NULL);
if (ret && ret != -ENODEV) {
@@ -1228,7 +1224,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
- pSeries_reconfig_notifier_unregister(&nx842_of_nb);
+ of_reconfig_notifier_unregister(&nx842_of_nb);
rcu_assign_pointer(devdata, NULL);
spin_unlock_irqrestore(&devdata_mutex, flags);
synchronize_rcu();
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 638110efae9b..c767f232e693 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -33,7 +33,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
@@ -635,8 +634,7 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
-static int __devinit nx_probe(struct vio_dev *viodev,
- const struct vio_device_id *id)
+static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
viodev->name, viodev->resource_id);
@@ -654,7 +652,7 @@ static int __devinit nx_probe(struct vio_dev *viodev,
return nx_register_algs();
}
-static int __devexit nx_remove(struct vio_dev *viodev)
+static int nx_remove(struct vio_dev *viodev)
{
dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
viodev->unit_address);
@@ -690,7 +688,7 @@ static void __exit nx_fini(void)
vio_unregister_driver(&nx_driver.viodriver);
}
-static struct vio_device_id nx_crypto_driver_ids[] __devinitdata = {
+static struct vio_device_id nx_crypto_driver_ids[] = {
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
{ "", "" }
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 1d75e6f95a58..9e6947bc296f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -38,7 +38,10 @@
#include <crypto/internal/hash.h>
#include <linux/omap-dma.h>
+
+#ifdef CONFIG_ARCH_OMAP1
#include <mach/irqs.h>
+#endif
#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
@@ -1137,7 +1140,7 @@ static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
}
}
-static int __devinit omap_sham_probe(struct platform_device *pdev)
+static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
struct device *dev = &pdev->dev;
@@ -1250,7 +1253,7 @@ data_err:
return err;
}
-static int __devexit omap_sham_remove(struct platform_device *pdev)
+static int omap_sham_remove(struct platform_device *pdev)
{
static struct omap_sham_dev *dd;
int i;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 410a03c01ca4..2096d4685a9e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1708,7 +1708,7 @@ static bool spacc_is_compatible(struct platform_device *pdev,
return false;
}
-static int __devinit spacc_probe(struct platform_device *pdev)
+static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
struct resource *mem, *irq;
@@ -1841,7 +1841,7 @@ static int __devinit spacc_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit spacc_remove(struct platform_device *pdev)
+static int spacc_remove(struct platform_device *pdev)
{
struct spacc_alg *alg, *next;
struct spacc_engine *engine = platform_get_drvdata(pdev);
@@ -1863,11 +1863,12 @@ static int __devexit spacc_remove(struct platform_device *pdev)
static const struct platform_device_id spacc_id_table[] = {
{ "picochip,spacc-ipsec", },
{ "picochip,spacc-l2", },
+ { }
};
static struct platform_driver spacc_driver = {
.probe = spacc_probe,
- .remove = __devexit_p(spacc_remove),
+ .remove = spacc_remove,
.driver = {
.name = "picochip,spacc",
#ifdef CONFIG_PM
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index a22714412cda..49ad8cbade69 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -30,7 +30,7 @@
#include <crypto/ctr.h>
#include <plat/cpu.h>
-#include <plat/dma.h>
+#include <mach/dma.h>
#define _SBF(s, v) ((v) << (s))
#define _BIT(b) _SBF(b, 1)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index da1112765a44..09b184adf31b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -936,8 +936,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
sg_count--;
link_tbl_ptr--;
}
- link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
- + cryptlen);
+ be16_add_cpu(&link_tbl_ptr->len, cryptlen);
/* tag end of link table */
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index e69f3bc473be..85ea7525fa36 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -672,8 +672,10 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
mutex_lock(&aes_lock);
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
ctx->dd = dd;
dd->ctx = ctx;
@@ -757,8 +759,10 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
aes_set_key(dd);
@@ -1029,7 +1033,7 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (IS_ERR(dd->aes_clk))
+ if (!IS_ERR(dd->aes_clk))
clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
@@ -1043,7 +1047,7 @@ out:
return err;
}
-static int __devexit tegra_aes_remove(struct platform_device *pdev)
+static int tegra_aes_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
@@ -1070,7 +1074,7 @@ static int __devexit tegra_aes_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_aes_of_match[] __devinitdata = {
+static struct of_device_id tegra_aes_of_match[] = {
{ .compatible = "nvidia,tegra20-aes", },
{ .compatible = "nvidia,tegra30-aes", },
{ },
@@ -1078,7 +1082,7 @@ static struct of_device_id tegra_aes_of_match[] __devinitdata = {
static struct platform_driver tegra_aes_driver = {
.probe = tegra_aes_probe,
- .remove = __devexit_p(tegra_aes_remove),
+ .remove = tegra_aes_remove,
.driver = {
.name = "tegra-aes",
.owner = THIS_MODULE,
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 53766f39aadd..3b367973a802 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -994,6 +994,11 @@ module_exit(devfreq_exit);
* @freq: The frequency given to target function
* @flags: Flags handed from devfreq framework.
*
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
u32 flags)
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 741837208716..3f37f3b3f268 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@ enum busclk_level_idx {
#define EX4210_LV_NUM (LV_2 + 1)
#define EX4x12_LV_NUM (LV_4 + 1)
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate: Frequency in hertz
+ * @volt: Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+ unsigned long rate;
+ unsigned long volt;
+};
+
struct busfreq_data {
enum exynos4_busf_type type;
struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
bool disabled;
struct regulator *vdd_int;
struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct opp *curr_opp;
+ struct busfreq_opp_info curr_oppinfo;
struct exynos4_ppmu dmc[2];
struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
};
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ if (oppi->rate == exynos4210_busclk_table[index].clk)
break;
if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
return 0;
}
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ if (oppi->rate == exynos4x12_mifclk_table[index].clk)
break;
if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
return -EINVAL;
}
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
- struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi,
+ struct busfreq_opp_info *oldoppi)
{
int err = 0, tmp;
- unsigned long volt = opp_get_voltage(opp);
+ unsigned long volt = oppi->volt;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
if (err)
break;
- tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ tmp = exynos4x12_get_intspec(oppi->rate);
if (tmp < 0) {
err = tmp;
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
}
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
/* Try to recover */
if (err)
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
- unsigned long freq = opp_get_freq(opp);
- unsigned long old_freq = opp_get_freq(data->curr_opp);
+ struct opp *opp;
+ unsigned long freq;
+ unsigned long old_freq = data->curr_oppinfo.rate;
+ struct busfreq_opp_info new_oppinfo;
- if (IS_ERR(opp))
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, _freq, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ freq = new_oppinfo.rate;
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+ dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
mutex_lock(&data->lock);
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq < freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
if (old_freq != freq) {
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq > freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
out:
mutex_unlock(&data->lock);
return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
exynos4_read_ppmu(data);
busier_dmc = exynos4_get_busier_dmc(data);
- stat->current_frequency = opp_get_freq(data->curr_opp);
+ stat->current_frequency = data->curr_oppinfo.rate;
if (busier_dmc)
addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
struct opp *opp;
+ struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
+ rcu_read_lock();
opp = opp_find_freq_floor(data->dev, &maxfreq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(data->dev, "%s: unable to find a min freq\n",
+ __func__);
+ return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto unlock;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
if (err)
goto unlock;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
unlock:
mutex_unlock(&data->lock);
if (err)
@@ -980,7 +1016,7 @@ unlock:
return NOTIFY_DONE;
}
-static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+static int exynos4_busfreq_probe(struct platform_device *pdev)
{
struct busfreq_data *data;
struct opp *opp;
@@ -1027,13 +1063,17 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
}
}
+ rcu_read_lock();
opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos4_devfreq_profile.initial_freq);
return PTR_ERR(opp);
}
- data->curr_opp = opp;
+ data->curr_oppinfo.rate = opp_get_freq(opp);
+ data->curr_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
platform_set_drvdata(pdev, data);
@@ -1056,7 +1096,7 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
return 0;
}
-static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+static int exynos4_busfreq_remove(struct platform_device *pdev)
{
struct busfreq_data *data = platform_get_drvdata(pdev);
@@ -1087,7 +1127,7 @@ static const struct platform_device_id exynos4_busfreq_id[] = {
static struct platform_driver exynos4_busfreq_driver = {
.probe = exynos4_busfreq_probe,
- .remove = __devexit_p(exynos4_busfreq_remove),
+ .remove = exynos4_busfreq_remove,
.id_table = exynos4_busfreq_id,
.driver = {
.name = "exynos4-busfreq",
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d4c12180c654..40179e749f08 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -125,6 +125,8 @@ config MPC512X_DMA
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
+source "drivers/dma/bestcomm/Kconfig"
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7428feaa8705..642d96736cf5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/bestcomm/Kconfig b/drivers/dma/bestcomm/Kconfig
new file mode 100644
index 000000000000..29e427085efb
--- /dev/null
+++ b/drivers/dma/bestcomm/Kconfig
@@ -0,0 +1,36 @@
+#
+# Kconfig options for Bestcomm
+#
+
+config PPC_BESTCOMM
+ tristate "Bestcomm DMA engine support"
+ depends on PPC_MPC52xx
+ default n
+ select PPC_LIB_RHEAP
+ help
+ BestComm is the name of the communication coprocessor found
+ on the Freescale MPC5200 family of processor. Its usage is
+ optional for some drivers (like ATA), but required for
+ others (like FEC).
+
+ If you want to use drivers that require DMA operations,
+ answer Y or M. Otherwise say N.
+
+config PPC_BESTCOMM_ATA
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the ATA task.
+
+config PPC_BESTCOMM_FEC
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the FEC tasks.
+
+config PPC_BESTCOMM_GEN_BD
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the GenBD tasks.
+
diff --git a/drivers/dma/bestcomm/Makefile b/drivers/dma/bestcomm/Makefile
new file mode 100644
index 000000000000..aed2df2a6580
--- /dev/null
+++ b/drivers/dma/bestcomm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for BestComm & co
+#
+
+bestcomm-core-objs := bestcomm.o sram.o
+bestcomm-ata-objs := ata.o bcom_ata_task.o
+bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
+bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
+
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
+obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
+obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
+obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
+
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
new file mode 100644
index 000000000000..2fd87f83cf90
--- /dev/null
+++ b/drivers/dma/bestcomm/ata.c
@@ -0,0 +1,157 @@
+/*
+ * Bestcomm ATA task driver
+ *
+ *
+ * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
+ * 2003-2004 (c) MontaVista, Software, Inc.
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 Freescale - John Rigby
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* ata task image */
+extern u32 bcom_ata_task[];
+
+/* ata task vars that need to be set before enabling the task */
+struct bcom_ata_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* ata task incs that need to be set before enabling the task */
+struct bcom_ata_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_src;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_ata_init(int queue_len, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_ata_var *var;
+ struct bcom_ata_inc *inc;
+
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ bcom_ata_reset_bd(tsk);
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = maxbufsize;
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_init);
+
+void bcom_ata_rx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = 0;
+ inc->incr_dst = sizeof(u32);
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
+
+void bcom_ata_tx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_dst = 0;
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
+
+void bcom_ata_reset_bd(struct bcom_task *tsk)
+{
+ struct bcom_ata_var *var;
+
+ /* Reset all BD */
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ var->bd_start = var->bd_base;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
+
+void bcom_ata_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the ATA tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_release);
+
+
+MODULE_DESCRIPTION("BestComm ATA task driver");
+MODULE_AUTHOR("John Rigby");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/bcom_ata_task.c b/drivers/dma/bestcomm/bcom_ata_task.c
new file mode 100644
index 000000000000..cc6049a4e469
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_ata_task.c
@@ -0,0 +1,67 @@
+/*
+ * Bestcomm ATA task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_ata_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0e060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
+ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
+ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
+ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
+ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
+ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
+ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
+ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
+ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
+ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa000000c,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_rx_task.c b/drivers/dma/bestcomm/bcom_fec_rx_task.c
new file mode 100644
index 000000000000..a1ad6a02fcef
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_rx_task.c
@@ -0,0 +1,78 @@
+/*
+ * Bestcomm FEC RX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:38 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x18060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
+ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
+ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
+ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
+ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
+ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
+ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
+ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
+ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
+ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
+ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
+ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
+ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000008,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_tx_task.c b/drivers/dma/bestcomm/bcom_fec_tx_task.c
new file mode 100644
index 000000000000..b1c495c3a65a
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_tx_task.c
@@ -0,0 +1,91 @@
+/*
+ * Bestcomm FEC TX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:29 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x2407070d,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
+ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
+ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
+ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
+ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
+ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
+ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
+ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
+ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
+ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
+ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
+ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
+ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
+ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
+ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
+ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
+ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
+ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
+ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[13]-VAR[19] */
+ 0x0c000000,
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000004,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
new file mode 100644
index 000000000000..efee022b0256
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
@@ -0,0 +1,63 @@
+/*
+ * Bestcomm GenBD RX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0d020409,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
+ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[10] */
+ 0x40000000,
+ 0x7fff7fff,
+
+ /* INC[0]-INC[3] */
+ 0x40000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
new file mode 100644
index 000000000000..c605aa42ecbb
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
@@ -0,0 +1,69 @@
+/*
+ * Bestcomm GenBD TX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0f040609,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
+ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
+ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
+ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[12] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x40000004,
+
+ /* INC[0]-INC[5] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
new file mode 100644
index 000000000000..a8c2e2994d2e
--- /dev/null
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for MPC52xx processor BestComm peripheral controller
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include "linux/fsl/bestcomm/bestcomm.h"
+
+#define DRIVER_NAME "bestcomm-core"
+
+/* MPC5200 device tree match tables */
+static struct of_device_id mpc52xx_sram_ids[] = {
+ { .compatible = "fsl,mpc5200-sram", },
+ { .compatible = "mpc5200-sram", },
+ {}
+};
+
+
+struct bcom_engine *bcom_eng = NULL;
+EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
+
+/* ======================================================================== */
+/* Public and private API */
+/* ======================================================================== */
+
+/* Private API */
+
+struct bcom_task *
+bcom_task_alloc(int bd_count, int bd_size, int priv_size)
+{
+ int i, tasknum = -1;
+ struct bcom_task *tsk;
+
+ /* Don't try to do anything if bestcomm init failed */
+ if (!bcom_eng)
+ return NULL;
+
+ /* Get and reserve a task num */
+ spin_lock(&bcom_eng->lock);
+
+ for (i=0; i<BCOM_MAX_TASKS; i++)
+ if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
+ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
+ tasknum = i;
+ break;
+ }
+
+ spin_unlock(&bcom_eng->lock);
+
+ if (tasknum < 0)
+ return NULL;
+
+ /* Allocate our structure */
+ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
+ if (!tsk)
+ goto error;
+
+ tsk->tasknum = tasknum;
+ if (priv_size)
+ tsk->priv = (void*)tsk + sizeof(struct bcom_task);
+
+ /* Get IRQ of that task */
+ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
+ if (tsk->irq == NO_IRQ)
+ goto error;
+
+ /* Init the BDs, if needed */
+ if (bd_count) {
+ tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
+ if (!tsk->cookie)
+ goto error;
+
+ tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
+ if (!tsk->bd)
+ goto error;
+ memset(tsk->bd, 0x00, bd_count * bd_size);
+
+ tsk->num_bd = bd_count;
+ tsk->bd_size = bd_size;
+ }
+
+ return tsk;
+
+error:
+ if (tsk) {
+ if (tsk->irq != NO_IRQ)
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+ }
+
+ bcom_eng->tdt[tasknum].stop = 0;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bcom_task_alloc);
+
+void
+bcom_task_free(struct bcom_task *tsk)
+{
+ /* Stop the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Clear TDT */
+ bcom_eng->tdt[tsk->tasknum].start = 0;
+ bcom_eng->tdt[tsk->tasknum].stop = 0;
+
+ /* Free everything */
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_task_free);
+
+int
+bcom_load_image(int task, u32 *task_image)
+{
+ struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
+ struct bcom_tdt *tdt;
+ u32 *desc, *var, *inc;
+ u32 *desc_src, *var_src, *inc_src;
+
+ /* Safety checks */
+ if (hdr->magic != BCOM_TASK_MAGIC) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid microcode\n");
+ return -EINVAL;
+ }
+
+ if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid task %d\n", task);
+ return -EINVAL;
+ }
+
+ /* Initial load or reload */
+ tdt = &bcom_eng->tdt[task];
+
+ if (tdt->start) {
+ desc = bcom_task_desc(task);
+ if (hdr->desc_size != bcom_task_num_descs(task)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to reload wrong task image "
+ "(%d size %d/%d)!\n",
+ task,
+ hdr->desc_size,
+ bcom_task_num_descs(task));
+ return -EINVAL;
+ }
+ } else {
+ phys_addr_t start_pa;
+
+ desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
+ if (!desc)
+ return -ENOMEM;
+
+ tdt->start = start_pa;
+ tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
+ }
+
+ var = bcom_task_var(task);
+ inc = bcom_task_inc(task);
+
+ /* Clear & copy */
+ memset(var, 0x00, BCOM_VAR_SIZE);
+ memset(inc, 0x00, BCOM_INC_SIZE);
+
+ desc_src = (u32 *)(hdr + 1);
+ var_src = desc_src + hdr->desc_size;
+ inc_src = var_src + hdr->var_size;
+
+ memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
+ memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
+ memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_load_image);
+
+void
+bcom_set_initiator(int task, int initiator)
+{
+ int i;
+ int num_descs;
+ u32 *desc;
+ int next_drd_has_initiator;
+
+ bcom_set_tcr_initiator(task, initiator);
+
+ /* Just setting tcr is apparently not enough due to some problem */
+ /* with it. So we just go thru all the microcode and replace in */
+ /* the DRD directly */
+
+ desc = bcom_task_desc(task);
+ next_drd_has_initiator = 1;
+ num_descs = bcom_task_num_descs(task);
+
+ for (i=0; i<num_descs; i++, desc++) {
+ if (!bcom_desc_is_drd(*desc))
+ continue;
+ if (next_drd_has_initiator)
+ if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
+ bcom_set_desc_initiator(desc, initiator);
+ next_drd_has_initiator = !bcom_drd_is_extended(*desc);
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_set_initiator);
+
+
+/* Public API */
+
+void
+bcom_enable(struct bcom_task *tsk)
+{
+ bcom_enable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_enable);
+
+void
+bcom_disable(struct bcom_task *tsk)
+{
+ bcom_disable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_disable);
+
+
+/* ======================================================================== */
+/* Engine init/cleanup */
+/* ======================================================================== */
+
+/* Function Descriptor table */
+/* this will need to be updated if Freescale changes their task code FDT */
+static u32 fdt_ops[] = {
+ 0xa0045670, /* FDT[48] - load_acc() */
+ 0x80045670, /* FDT[49] - unload_acc() */
+ 0x21800000, /* FDT[50] - and() */
+ 0x21e00000, /* FDT[51] - or() */
+ 0x21500000, /* FDT[52] - xor() */
+ 0x21400000, /* FDT[53] - andn() */
+ 0x21500000, /* FDT[54] - not() */
+ 0x20400000, /* FDT[55] - add() */
+ 0x20500000, /* FDT[56] - sub() */
+ 0x20800000, /* FDT[57] - lsh() */
+ 0x20a00000, /* FDT[58] - rsh() */
+ 0xc0170000, /* FDT[59] - crc8() */
+ 0xc0145670, /* FDT[60] - crc16() */
+ 0xc0345670, /* FDT[61] - crc32() */
+ 0xa0076540, /* FDT[62] - endian32() */
+ 0xa0000760, /* FDT[63] - endian16() */
+};
+
+
+static int bcom_engine_init(void)
+{
+ int task;
+ phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
+ unsigned int tdt_size, ctx_size, var_size, fdt_size;
+
+ /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
+ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
+ ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
+ var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
+ fdt_size = BCOM_FDT_SIZE;
+
+ bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
+ bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
+ bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
+ bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
+
+ if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
+ printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
+
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+
+ return -ENOMEM;
+ }
+
+ memset(bcom_eng->tdt, 0x00, tdt_size);
+ memset(bcom_eng->ctx, 0x00, ctx_size);
+ memset(bcom_eng->var, 0x00, var_size);
+ memset(bcom_eng->fdt, 0x00, fdt_size);
+
+ /* Copy the FDT for the EU#3 */
+ memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
+
+ /* Initialize Task base structure */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+
+ bcom_eng->tdt[task].context = ctx_pa;
+ bcom_eng->tdt[task].var = var_pa;
+ bcom_eng->tdt[task].fdt = fdt_pa;
+
+ var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
+ ctx_pa += BCOM_CTX_SIZE;
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, tdt_pa);
+
+ /* Init 'always' initiator */
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
+
+ /* Disable COMM Bus Prefetch on the original 5200; it's broken */
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
+
+ /* Init lock */
+ spin_lock_init(&bcom_eng->lock);
+
+ return 0;
+}
+
+static void
+bcom_engine_cleanup(void)
+{
+ int task;
+
+ /* Stop all tasks */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, 0ul);
+
+ /* Release the SRAM zones */
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+}
+
+
+/* ======================================================================== */
+/* OF platform driver */
+/* ======================================================================== */
+
+static int mpc52xx_bcom_probe(struct platform_device *op)
+{
+ struct device_node *ofn_sram;
+ struct resource res_bcom;
+
+ int rv;
+
+ /* Inform user we're ok so far */
+ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
+
+ /* Get the bestcomm node */
+ of_node_get(op->dev.of_node);
+
+ /* Prepare SRAM */
+ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
+ if (!ofn_sram) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "No SRAM found in device tree\n");
+ rv = -ENODEV;
+ goto error_ofput;
+ }
+ rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
+ of_node_put(ofn_sram);
+
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error in SRAM init\n");
+ goto error_ofput;
+ }
+
+ /* Get a clean struct */
+ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
+ if (!bcom_eng) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't allocate state structure\n");
+ rv = -ENOMEM;
+ goto error_sramclean;
+ }
+
+ /* Save the node */
+ bcom_eng->ofnode = op->dev.of_node;
+
+ /* Get, reserve & map io */
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't get resource\n");
+ rv = -EINVAL;
+ goto error_sramclean;
+ }
+
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
+ DRIVER_NAME)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't request registers region\n");
+ rv = -EBUSY;
+ goto error_sramclean;
+ }
+
+ bcom_eng->regs_base = res_bcom.start;
+ bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
+ if (!bcom_eng->regs) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't map registers\n");
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Now, do the real init */
+ rv = bcom_engine_init();
+ if (rv)
+ goto error_unmap;
+
+ /* Done ! */
+ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
+ (long)bcom_eng->regs_base);
+
+ return 0;
+
+ /* Error path */
+error_unmap:
+ iounmap(bcom_eng->regs);
+error_release:
+ release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
+error_sramclean:
+ kfree(bcom_eng);
+ bcom_sram_cleanup();
+error_ofput:
+ of_node_put(op->dev.of_node);
+
+ printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
+
+ return rv;
+}
+
+
+static int mpc52xx_bcom_remove(struct platform_device *op)
+{
+ /* Clean up the engine */
+ bcom_engine_cleanup();
+
+ /* Cleanup SRAM */
+ bcom_sram_cleanup();
+
+ /* Release regs */
+ iounmap(bcom_eng->regs);
+ release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
+
+ /* Release the node */
+ of_node_put(bcom_eng->ofnode);
+
+ /* Release memory */
+ kfree(bcom_eng);
+ bcom_eng = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_bcom_of_match[] = {
+ { .compatible = "fsl,mpc5200-bestcomm", },
+ { .compatible = "mpc5200-bestcomm", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
+
+
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
+ .probe = mpc52xx_bcom_probe,
+ .remove = mpc52xx_bcom_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_bcom_of_match,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_bcom_init(void)
+{
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_bcom_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
+}
+
+/* If we're not a module, we must make sure everything is setup before */
+/* anyone tries to use us ... that's why we use subsys_initcall instead */
+/* of module_init. */
+subsys_initcall(mpc52xx_bcom_init);
+module_exit(mpc52xx_bcom_exit);
+
+MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
new file mode 100644
index 000000000000..7f1fb1c999e4
--- /dev/null
+++ b/drivers/dma/bestcomm/fec.c
@@ -0,0 +1,270 @@
+/*
+ * Bestcomm FEC tasks driver
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* fec tasks images */
+extern u32 bcom_fec_rx_task[];
+extern u32 bcom_fec_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_fec_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_fec_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_dst_ma;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_fec_tx_var {
+ u32 DRD; /* (u32*) address of self-modified DRD */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_fec_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure in the task */
+struct bcom_fec_priv {
+ phys_addr_t fifo;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_fec_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
+
+int
+bcom_fec_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_rx_var *var;
+ struct bcom_fec_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_dst = sizeof(u32); /* task image, but we stick */
+ inc->incr_dst_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
+
+void
+bcom_fec_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
+
+
+
+ /* Return 2nd to last DRD */
+ /* This is an ugly hack, but at least it's only done
+ once at initialization */
+static u32 *self_modified_drd(int tasknum)
+{
+ u32 *desc;
+ int num_descs;
+ int drd_count;
+ int i;
+
+ num_descs = bcom_task_num_descs(tasknum);
+ desc = bcom_task_desc(tasknum) + num_descs - 1;
+ drd_count = 0;
+ for (i=0; i<num_descs; i++, desc--)
+ if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
+ break;
+ return desc;
+}
+
+struct bcom_task *
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_ENABLE_TASK;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+
+ if (bcom_fec_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
+
+int
+bcom_fec_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_tx_var *var;
+ struct bcom_fec_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_src = sizeof(u32); /* task image, but we stick */
+ inc->incr_src_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
+
+void
+bcom_fec_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
+
+
+MODULE_DESCRIPTION("BestComm FEC tasks driver");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c
new file mode 100644
index 000000000000..1a5b22d88127
--- /dev/null
+++ b/drivers/dma/bestcomm/gen_bd.c
@@ -0,0 +1,354 @@
+/*
+ * Driver for MPC52xx processor BestComm General Buffer Descriptor
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/gen_bd.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* gen_bd tasks images */
+extern u32 bcom_gen_bd_rx_task[];
+extern u32 bcom_gen_bd_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_tx_var {
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure */
+struct bcom_gen_bd_priv {
+ phys_addr_t fifo;
+ int initiator;
+ int ipr;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_gen_bd_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
+
+int
+bcom_gen_bd_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_rx_var *var;
+ struct bcom_gen_bd_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_dst = sizeof(u32);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
+
+void
+bcom_gen_bd_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
+
+
+extern struct bcom_task *
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+
+ if (bcom_gen_bd_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
+
+int
+bcom_gen_bd_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_tx_var *var;
+ struct bcom_gen_bd_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_src_ma = sizeof(u8);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
+
+void
+bcom_gen_bd_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
+
+/* ---------------------------------------------------------------------
+ * PSC support code
+ */
+
+/**
+ * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
+ *
+ * This structure is only used internally. It is a lookup table for PSC
+ * specific parameters to bestcomm tasks.
+ */
+static struct bcom_psc_params {
+ int rx_initiator;
+ int rx_ipr;
+ int tx_initiator;
+ int tx_ipr;
+} bcom_psc_params[] = {
+ [0] = {
+ .rx_initiator = BCOM_INITIATOR_PSC1_RX,
+ .rx_ipr = BCOM_IPR_PSC1_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC1_TX,
+ .tx_ipr = BCOM_IPR_PSC1_TX,
+ },
+ [1] = {
+ .rx_initiator = BCOM_INITIATOR_PSC2_RX,
+ .rx_ipr = BCOM_IPR_PSC2_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC2_TX,
+ .tx_ipr = BCOM_IPR_PSC2_TX,
+ },
+ [2] = {
+ .rx_initiator = BCOM_INITIATOR_PSC3_RX,
+ .rx_ipr = BCOM_IPR_PSC3_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC3_TX,
+ .tx_ipr = BCOM_IPR_PSC3_TX,
+ },
+ [3] = {
+ .rx_initiator = BCOM_INITIATOR_PSC4_RX,
+ .rx_ipr = BCOM_IPR_PSC4_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC4_TX,
+ .tx_ipr = BCOM_IPR_PSC4_TX,
+ },
+ [4] = {
+ .rx_initiator = BCOM_INITIATOR_PSC5_RX,
+ .rx_ipr = BCOM_IPR_PSC5_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC5_TX,
+ .tx_ipr = BCOM_IPR_PSC5_TX,
+ },
+ [5] = {
+ .rx_initiator = BCOM_INITIATOR_PSC6_RX,
+ .rx_ipr = BCOM_IPR_PSC6_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC6_TX,
+ .tx_ipr = BCOM_IPR_PSC6_TX,
+ },
+};
+
+/**
+ * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ * @maxbufsize: Maximum receive data size in bytes.
+ *
+ * Allocate a bestcomm task structure for receiving data from a PSC.
+ */
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo, int maxbufsize)
+{
+ if (psc_num >= MPC52xx_PSC_MAXNUM)
+ return NULL;
+
+ return bcom_gen_bd_rx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].rx_initiator,
+ bcom_psc_params[psc_num].rx_ipr,
+ maxbufsize);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
+
+/**
+ * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ *
+ * Allocate a bestcomm task structure for transmitting data to a PSC.
+ */
+struct bcom_task *
+bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
+{
+ struct psc;
+ return bcom_gen_bd_tx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].tx_initiator,
+ bcom_psc_params[psc_num].tx_ipr);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
+
+
+MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
+MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
new file mode 100644
index 000000000000..5e2ed30ba2c4
--- /dev/null
+++ b/drivers/dma/bestcomm/sram.c
@@ -0,0 +1,178 @@
+/*
+ * Simple memory allocator for on-board SRAM
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#include <asm/io.h>
+#include <asm/mmu.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+
+
+/* Struct keeping our 'state' */
+struct bcom_sram *bcom_sram = NULL;
+EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
+
+
+/* ======================================================================== */
+/* Public API */
+/* ======================================================================== */
+/* DO NOT USE in interrupts, if needed in irq handler, we should use the
+ _irqsave version of the spin_locks */
+
+int bcom_sram_init(struct device_node *sram_node, char *owner)
+{
+ int rv;
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ unsigned int psize;
+
+ /* Create our state struct */
+ if (bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Already initialized !\n", owner);
+ return -EBUSY;
+ }
+
+ bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
+ if (!bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't allocate internal state !\n", owner);
+ return -ENOMEM;
+ }
+
+ /* Get address and size of the sram */
+ regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Invalid device node !\n", owner);
+ rv = -EINVAL;
+ goto error_free;
+ }
+
+ regaddr64 = of_translate_address(sram_node, regaddr_p);
+
+ bcom_sram->base_phys = (phys_addr_t) regaddr64;
+ bcom_sram->size = (unsigned int) size64;
+
+ /* Request region */
+ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't request region !\n", owner);
+ rv = -EBUSY;
+ goto error_free;
+ }
+
+ /* Map SRAM */
+ /* sram is not really __iomem */
+ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
+
+ if (!bcom_sram->base_virt) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Map error SRAM zone 0x%08lx (0x%0x)!\n",
+ owner, (long)bcom_sram->base_phys, bcom_sram->size );
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Create an rheap (defaults to 32 bits word alignment) */
+ bcom_sram->rh = rh_create(4);
+
+ /* Attach the free zones */
+#if 0
+ /* Currently disabled ... for future use only */
+ reg_addr_p = of_get_property(sram_node, "available", &psize);
+#else
+ regaddr_p = NULL;
+ psize = 0;
+#endif
+
+ if (!regaddr_p || !psize) {
+ /* Attach the whole zone */
+ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
+ } else {
+ /* Attach each zone independently */
+ while (psize >= 2 * sizeof(u32)) {
+ phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
+ rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
+ regaddr_p += 2;
+ psize -= 2 * sizeof(u32);
+ }
+ }
+
+ /* Init our spinlock */
+ spin_lock_init(&bcom_sram->lock);
+
+ return 0;
+
+error_release:
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+error_free:
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_init);
+
+void bcom_sram_cleanup(void)
+{
+ /* Free resources */
+ if (bcom_sram) {
+ rh_destroy(bcom_sram->rh);
+ iounmap((void __iomem *)bcom_sram->base_virt);
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
+
+void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
+{
+ unsigned long offset;
+
+ spin_lock(&bcom_sram->lock);
+ offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
+ spin_unlock(&bcom_sram->lock);
+
+ if (IS_ERR_VALUE(offset))
+ return NULL;
+
+ *phys = bcom_sram->base_phys + offset;
+ return bcom_sram->base_virt + offset;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_alloc);
+
+void bcom_sram_free(void *ptr)
+{
+ unsigned long offset;
+
+ if (!ptr)
+ return;
+
+ offset = ptr - bcom_sram->base_virt;
+
+ spin_lock(&bcom_sram->lock);
+ rh_free(bcom_sram->rh, offset);
+ spin_unlock(&bcom_sram->lock);
+}
+EXPORT_SYMBOL_GPL(bcom_sram_free);
+
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index aa384e53b7ac..a2f079aca550 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -21,11 +21,1241 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <mach/coh901318.h>
+#include <linux/platform_data/dma-coh901318.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#include "dmaengine.h"
+#define COH901318_MOD32_MASK (0x1F)
+#define COH901318_WORD_MASK (0xFFFFFFFF)
+/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
+#define COH901318_INT_STATUS1 (0x0000)
+#define COH901318_INT_STATUS2 (0x0004)
+/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_TC_INT_STATUS1 (0x0008)
+#define COH901318_TC_INT_STATUS2 (0x000C)
+/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_TC_INT_CLEAR1 (0x0010)
+#define COH901318_TC_INT_CLEAR2 (0x0014)
+/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
+#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
+/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
+#define COH901318_BE_INT_STATUS1 (0x0020)
+#define COH901318_BE_INT_STATUS2 (0x0024)
+/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_BE_INT_CLEAR1 (0x0028)
+#define COH901318_BE_INT_CLEAR2 (0x002C)
+/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
+#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
+
+/*
+ * CX_CFG - Channel Configuration Registers 32bit (R/W)
+ */
+#define COH901318_CX_CFG (0x0100)
+#define COH901318_CX_CFG_SPACING (0x04)
+/* Channel enable activates tha dma job */
+#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
+#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
+/* Request Mode */
+#define COH901318_CX_CFG_RM_MASK (0x00000006)
+#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
+#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
+#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
+/* Linked channel request field. RM must == 11 */
+#define COH901318_CX_CFG_LCRF_SHIFT 3
+#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
+#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
+/* Terminal Counter Interrupt Request Mask */
+#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
+#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
+/* Bus Error interrupt Mask */
+#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
+#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
+
+/*
+ * CX_STAT - Channel Status Registers 32bit (R/-)
+ */
+#define COH901318_CX_STAT (0x0200)
+#define COH901318_CX_STAT_SPACING (0x04)
+#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
+#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
+#define COH901318_CX_STAT_ACTIVE (0x00000002)
+#define COH901318_CX_STAT_ENABLED (0x00000001)
+
+/*
+ * CX_CTRL - Channel Control Registers 32bit (R/W)
+ */
+#define COH901318_CX_CTRL (0x0400)
+#define COH901318_CX_CTRL_SPACING (0x10)
+/* Transfer Count Enable */
+#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
+#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
+/* Transfer Count Value 0 - 4095 */
+#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
+/* Burst count */
+#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
+#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
+/* Source bus size */
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
+/* Source address increment */
+#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
+#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
+/* Destination Bus Size */
+#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
+/* Destination address increment */
+#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
+#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
+/* Master Mode (Master2 is only connected to MSL) */
+#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
+#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
+/* Terminal Count flag to PER enable */
+#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
+#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
+/* Terminal Count flags to CPU enable */
+#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
+#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
+/* Hand shake to peripheral */
+#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
+#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
+#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
+#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
+/* DMA mode */
+#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
+#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
+/* Primary Request Data Destination */
+#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
+#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
+#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
+
+/*
+ * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_SRC_ADDR (0x0404)
+#define COH901318_CX_SRC_ADDR_SPACING (0x10)
+
+/*
+ * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
+ */
+#define COH901318_CX_DST_ADDR (0x0408)
+#define COH901318_CX_DST_ADDR_SPACING (0x10)
+
+/*
+ * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_LNK_ADDR (0x040C)
+#define COH901318_CX_LNK_ADDR_SPACING (0x10)
+#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
+
+/**
+ * struct coh901318_params - parameters for DMAC configuration
+ * @config: DMA config register
+ * @ctrl_lli_last: DMA control register for the last lli in the list
+ * @ctrl_lli: DMA control register for an lli
+ * @ctrl_lli_chained: DMA control register for a chained lli
+ */
+struct coh901318_params {
+ u32 config;
+ u32 ctrl_lli_last;
+ u32 ctrl_lli;
+ u32 ctrl_lli_chained;
+};
+
+/**
+ * struct coh_dma_channel - dma channel base
+ * @name: ascii name of dma channel
+ * @number: channel id number
+ * @desc_nbr_max: number of preallocated descriptors
+ * @priority_high: prio of channel, 0 low otherwise high.
+ * @param: configuration parameters
+ */
+struct coh_dma_channel {
+ const char name[32];
+ const int number;
+ const int desc_nbr_max;
+ const int priority_high;
+ const struct coh901318_params param;
+};
+
+/**
+ * struct powersave - DMA power save structure
+ * @lock: lock protecting data in this struct
+ * @started_channels: bit mask indicating active dma channels
+ */
+struct powersave {
+ spinlock_t lock;
+ u64 started_channels;
+};
+
+/* points out all dma slave channels.
+ * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
+ * Select all channels from A to B, end of list is marked with -1,-1
+ */
+static int dma_slave_channels[] = {
+ U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
+ U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
+
+/* points out all dma memcpy channels. */
+static int dma_memcpy_channels[] = {
+ U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
+
+#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
+ COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
+ COH901318_CX_CFG_LCR_DISABLE | \
+ COH901318_CX_CFG_TC_IRQ_ENABLE | \
+ COH901318_CX_CFG_BE_IRQ_ENABLE)
+#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_ENABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+
+const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+ {
+ .number = U300_DMA_MSL_TX_0,
+ .name = "MSL TX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_1,
+ .name = "MSL TX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_2,
+ .name = "MSL TX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .desc_nbr_max = 10,
+ },
+ {
+ .number = U300_DMA_MSL_TX_3,
+ .name = "MSL TX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_4,
+ .name = "MSL TX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_5,
+ .name = "MSL TX 5",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_6,
+ .name = "MSL TX 6",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_0,
+ .name = "MSL RX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_1,
+ .name = "MSL RX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_2,
+ .name = "MSL RX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_3,
+ .name = "MSL RX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_4,
+ .name = "MSL RX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_5,
+ .name = "MSL RX 5",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_6,
+ .name = "MSL RX 6",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_MMCSD_RX_TX,
+ .name = "MMCSD RX TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_MSPRO_TX,
+ .name = "MSPRO TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSPRO_RX,
+ .name = "MSPRO RX",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_UART0_TX,
+ .name = "UART0 TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_UART0_RX,
+ .name = "UART0 RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_APEX_TX,
+ .name = "APEX TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_APEX_RX,
+ .name = "APEX RX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_TX,
+ .name = "PCM I2S0 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_RX,
+ .name = "PCM I2S0 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_TX,
+ .name = "PCM I2S1 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_RX,
+ .name = "PCM I2S1 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_XGAM_CDI,
+ .name = "XGAM CDI",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_XGAM_PDI,
+ .name = "XGAM PDI",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_SPI_TX,
+ .name = "SPI TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_SPI_RX,
+ .name = "SPI RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_0,
+ .name = "GENERAL 00",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_1,
+ .name = "GENERAL 01",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_2,
+ .name = "GENERAL 02",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_3,
+ .name = "GENERAL 03",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_4,
+ .name = "GENERAL 04",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_5,
+ .name = "GENERAL 05",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_6,
+ .name = "GENERAL 06",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_7,
+ .name = "GENERAL 07",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_8,
+ .name = "GENERAL 08",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_UART1_TX,
+ .name = "UART1 TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_UART1_RX,
+ .name = "UART1 RX",
+ .priority_high = 0,
+ }
+};
+
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
#ifdef VERBOSE_DEBUG
@@ -54,7 +1284,6 @@ struct coh901318_base {
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct coh901318_chan *chans;
- struct coh901318_platform *platform;
};
struct coh901318_chan {
@@ -75,8 +1304,8 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- u32 runtime_addr;
- u32 runtime_ctrl;
+ u32 addr;
+ u32 ctrl;
struct coh901318_base *base;
};
@@ -122,7 +1351,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ for (i = 0; i < U300_DMA_CHANNELS; i++)
if (started_channels & (1 << i))
tmp += sprintf(tmp, "channel %d\n", i);
@@ -187,25 +1416,16 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
-static inline dma_addr_t
-cohc_dev_addr(struct coh901318_chan *cohc)
-{
- /* Runtime supplied address will take precedence */
- if (cohc->runtime_addr)
- return cohc->runtime_addr;
- return cohc->base->platform->chan_conf[cohc->id].dev_addr;
-}
-
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id].param;
+ return &chan_config[cohc->id].param;
}
static inline const struct coh_dma_channel *
cohc_chan_conf(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id];
+ return &chan_config[cohc->id];
}
static void enable_powersave(struct coh901318_chan *cohc)
@@ -217,12 +1437,6 @@ static void enable_powersave(struct coh901318_chan *cohc)
pm->started_channels &= ~(1ULL << cohc->id);
- if (!pm->started_channels) {
- /* DMA no longer intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- false);
- }
-
spin_unlock_irqrestore(&pm->lock, flags);
}
static void disable_powersave(struct coh901318_chan *cohc)
@@ -232,12 +1446,6 @@ static void disable_powersave(struct coh901318_chan *cohc)
spin_lock_irqsave(&pm->lock, flags);
- if (!pm->started_channels) {
- /* DMA intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- true);
- }
-
pm->started_channels |= (1ULL << cohc->id);
spin_unlock_irqrestore(&pm->lock, flags);
@@ -596,7 +1804,7 @@ static int coh901318_config(struct coh901318_chan *cohc,
if (param)
p = param;
else
- p = &cohc->base->platform->chan_conf[channel].param;
+ p = cohc_chan_param(cohc);
/* Clear any pending BE or TC interrupt */
if (channel < 32) {
@@ -1052,9 +2260,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
* sure the bits you set per peripheral channel are
* cleared in the default config from the platform.
*/
- ctrl_chained |= cohc->runtime_ctrl;
- ctrl_last |= cohc->runtime_ctrl;
- ctrl |= cohc->runtime_ctrl;
+ ctrl_chained |= cohc->ctrl;
+ ctrl_last |= cohc->ctrl;
+ ctrl |= cohc->ctrl;
if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
@@ -1103,7 +2311,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc_dev_addr(cohc),
+ cohc->addr,
ctrl_chained,
ctrl,
ctrl_last,
@@ -1244,7 +2452,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- u32 runtime_ctrl = 0;
+ u32 ctrl = 0;
int i = 0;
/* We only support mem to per or per to mem transfers */
@@ -1265,7 +2473,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
addr_width);
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
@@ -1277,7 +2485,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
@@ -1290,7 +2498,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
/* Direction doesn't matter here, it's 32/32 bits */
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
@@ -1307,13 +2515,13 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return;
}
- runtime_ctrl |= burst_sizes[i].reg;
+ ctrl |= burst_sizes[i].reg;
dev_dbg(COHC_2_DEV(cohc),
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
burst_sizes[i].burst_8bit, addr_width, maxburst);
- cohc->runtime_addr = addr;
- cohc->runtime_ctrl = runtime_ctrl;
+ cohc->addr = addr;
+ cohc->ctrl = ctrl;
}
static int
@@ -1431,7 +2639,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
static int __init coh901318_probe(struct platform_device *pdev)
{
int err = 0;
- struct coh901318_platform *pdata;
struct coh901318_base *base;
int irq;
struct resource *io;
@@ -1447,13 +2654,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
pdev->dev.driver->name) == NULL)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
base = devm_kzalloc(&pdev->dev,
ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
+ U300_DMA_CHANNELS *
sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base)
@@ -1466,7 +2669,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
return -ENOMEM;
base->dev = &pdev->dev;
- base->platform = pdata;
spin_lock_init(&base->pm.lock);
base->pm.started_channels = 0;
@@ -1488,7 +2690,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
return err;
/* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ coh901318_base_init(&base->dma_slave, dma_slave_channels,
base);
dma_cap_zero(base->dma_slave.cap_mask);
@@ -1508,7 +2710,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_slave;
/* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
base);
dma_cap_zero(base->dma_memcpy.cap_mask);
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318.h
index abff3714fdda..95ce1e2123ec 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318.h
@@ -1,16 +1,15 @@
/*
- * driver/dma/coh901318_lli.h
- *
- * Copyright (C) 2007-2009 ST-Ericsson
+ * Copyright (C) 2007-2013 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for coh901318
+ * DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
-#ifndef COH901318_LLI_H
-#define COH901318_LLI_H
+#ifndef COH901318_H
+#define COH901318_H
-#include <mach/coh901318.h>
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
struct device;
@@ -24,7 +23,25 @@ struct coh901318_pool {
#endif
};
-struct device;
+/**
+ * struct coh901318_lli - linked list item for DMAC
+ * @control: control settings for DMAC
+ * @src_addr: transfer source address
+ * @dst_addr: transfer destination address
+ * @link_addr: physical address to next lli
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
+ * @phy_this: physical address of current lli (only used by pool_free)
+ */
+struct coh901318_lli {
+ u32 control;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ dma_addr_t link_addr;
+
+ void *virt_link_addr;
+ dma_addr_t phy_this;
+};
+
/**
* coh901318_pool_create() - Creates an dma pool for lli:s
* @pool: pool handle
@@ -121,4 +138,4 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl, u32 ctrl_last,
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-#endif /* COH901318_LLI_H */
+#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 780e0429b38c..3e96610e18e2 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -11,9 +11,9 @@
#include <linux/memory.h>
#include <linux/gfp.h>
#include <linux/dmapool.h>
-#include <mach/coh901318.h>
+#include <linux/dmaengine.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 24225f0fdcd8..64b048d7fba7 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -228,6 +228,20 @@ static void dmatest_callback(void *arg)
wake_up_all(done->wait);
}
+static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
+}
+
+static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -353,15 +367,35 @@ static int dmatest_func(void *data)
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev->dev, dma_srcs[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "src_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ src_off, len);
+ failed_tests++;
+ continue;
+ }
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dma_dsts[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ dst_off, test_buf_size);
+ failed_tests++;
+ continue;
+ }
}
-
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
@@ -383,13 +417,8 @@ static int dmatest_func(void *data)
}
if (!tx) {
- for (i = 0; i < src_cnt; i++)
- dma_unmap_single(dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i],
- test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
@@ -443,9 +472,7 @@ static int dmatest_func(void *data)
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
error_count = 0;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 8f0b111af4de..b33d1f6e1333 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1489,9 +1490,9 @@ static int dw_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- regs = devm_request_and_ioremap(&pdev->dev, io);
- if (!regs)
- return -EBUSY;
+ regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
dw_params = dma_read_byaddr(regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1634,7 +1635,7 @@ static int dw_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit dw_remove(struct platform_device *pdev)
+static int dw_remove(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 232b4583ae93..f424298f1ac5 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -585,7 +585,7 @@ err_reg1:
return ret;
}
-static int __devexit edma_remove(struct platform_device *pdev)
+static int edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index dbf0e6f8de8a..70b8975d107e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -684,9 +685,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break;
}
- imxdmac->hw_chaining = 1;
- if (!imxdma_hw_chain(imxdmac))
- return -EINVAL;
+ imxdmac->hw_chaining = 0;
+
imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
CCR_REN;
@@ -1011,9 +1011,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!imxdma->base)
- return -EADDRNOTAVAIL;
+ imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imxdma->base))
+ return PTR_ERR(imxdma->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index bc764afacd9b..a0de82e21a7c 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1308,7 +1308,7 @@ err_enable_device:
* Free up all resources and data
* Call shutdown_dma to complete contoller and chan cleanup
*/
-static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
+static void intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index d6668071bd0d..9b041858d10d 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -242,8 +242,7 @@ static struct dca_ops ioat_dca_ops = {
};
-struct dca_provider * __devinit
-ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -408,8 +407,7 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
return slots;
}
-struct dca_provider * __devinit
-ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -621,8 +619,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map)
(tag_map[4] == DCA_TAG_MAP_VALID));
}
-struct dca_provider * __devinit
-ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 73b2b65cb1de..1a68a8ba87e6 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -782,7 +782,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
*/
#define IOAT_TEST_SIZE 2000
-static void __devinit ioat_dma_test_callback(void *dma_async_param)
+static void ioat_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
@@ -793,7 +793,7 @@ static void __devinit ioat_dma_test_callback(void *dma_async_param)
* ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
* @device: device to be tested
*/
-int __devinit ioat_dma_self_test(struct ioatdma_device *device)
+int ioat_dma_self_test(struct ioatdma_device *device)
{
int i;
u8 *src;
@@ -994,7 +994,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device)
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}
-int __devinit ioat_probe(struct ioatdma_device *device)
+int ioat_probe(struct ioatdma_device *device)
{
int err = -ENODEV;
struct dma_device *dma = &device->common;
@@ -1049,7 +1049,7 @@ err_dma_pool:
return err;
}
-int __devinit ioat_register(struct ioatdma_device *device)
+int ioat_register(struct ioatdma_device *device)
{
int err = dma_async_device_register(&device->common);
@@ -1183,7 +1183,7 @@ void ioat_kobject_del(struct ioatdma_device *device)
}
}
-int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
+int ioat1_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
@@ -1216,7 +1216,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
return err;
}
-void __devexit ioat_dma_remove(struct ioatdma_device *device)
+void ioat_dma_remove(struct ioatdma_device *device)
{
struct dma_device *dma = &device->common;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5e8fe01ba69d..087935f1565f 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -303,13 +303,12 @@ static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
pci_unmap_page(pdev, addr, len, direction);
}
-int __devinit ioat_probe(struct ioatdma_device *device);
-int __devinit ioat_register(struct ioatdma_device *device);
-int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int __devinit ioat_dma_self_test(struct ioatdma_device *device);
-void __devexit ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
- void __iomem *iobase);
+int ioat_probe(struct ioatdma_device *device);
+int ioat_register(struct ioatdma_device *device);
+int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
+int ioat_dma_self_test(struct ioatdma_device *device);
+void ioat_dma_remove(struct ioatdma_device *device);
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b9d667851445..82d4e306c32e 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -862,7 +862,7 @@ struct kobj_type ioat2_ktype = {
.default_attrs = ioat2_attrs,
};
-int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
+int ioat2_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index be2a55b95c23..e100f644e344 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -155,10 +155,10 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}
-int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
+int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor *
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f7f1dc62c15c..3e9d66920eb3 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -836,7 +836,7 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
return &desc->txd;
}
-static void __devinit ioat3_dma_test_callback(void *dma_async_param)
+static void ioat3_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
@@ -844,7 +844,7 @@ static void __devinit ioat3_dma_test_callback(void *dma_async_param)
}
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+static int ioat_xor_val_self_test(struct ioatdma_device *device)
{
int i, src_idx;
struct page *dest;
@@ -951,7 +951,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
}
- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
@@ -1096,7 +1096,7 @@ out:
return err;
}
-static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
+static int ioat3_dma_self_test(struct ioatdma_device *device)
{
int rc = ioat_dma_self_test(device);
@@ -1187,7 +1187,7 @@ static bool is_snb_ioat(struct pci_dev *pdev)
}
}
-int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
int dca_en = system_has_dca_enabled(pdev);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index bfa9a3536e09..4f686c527ab6 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -109,9 +109,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-static int __devinit ioat_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void __devexit ioat_remove(struct pci_dev *pdev);
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
@@ -141,7 +140,7 @@ alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
return d;
}
-static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
@@ -195,7 +194,7 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
return 0;
}
-static void __devexit ioat_remove(struct pci_dev *pdev)
+static void ioat_remove(struct pci_dev *pdev)
{
struct ioatdma_device *device = pci_get_drvdata(pdev);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 9072e173b860..eacb8be99812 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1406,7 +1406,7 @@ out:
}
#endif
-static int __devexit iop_adma_remove(struct platform_device *dev)
+static int iop_adma_remove(struct platform_device *dev)
{
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 13bdf4a7e1ec..dc7466563507 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -712,7 +713,7 @@ static void dma_do_tasklet(unsigned long data)
}
}
-static int __devexit mmp_pdma_remove(struct platform_device *op)
+static int mmp_pdma_remove(struct platform_device *op)
{
struct mmp_pdma_device *pdev = platform_get_drvdata(op);
@@ -782,9 +783,9 @@ static int mmp_pdma_probe(struct platform_device *op)
if (!iores)
return -EINVAL;
- pdev->base = devm_request_and_ioremap(pdev->dev, iores);
- if (!pdev->base)
- return -EADDRNOTAVAIL;
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ if (IS_ERR(pdev->base))
+ return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 323821c0c095..43d5a6c33297 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -467,7 +468,7 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
mmp_tdma_enable_chan(tdmac);
}
-static int __devexit mmp_tdma_remove(struct platform_device *pdev)
+static int mmp_tdma_remove(struct platform_device *pdev)
{
struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
@@ -547,9 +548,9 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (!iores)
return -EINVAL;
- tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!tdev->base)
- return -EADDRNOTAVAIL;
+ tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(tdev->base))
+ return PTR_ERR(tdev->base);
INIT_LIST_HEAD(&tdev->device.channels);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2cd024a91d40..2d956732aa3d 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -799,7 +799,7 @@ static int mpc_dma_probe(struct platform_device *op)
return retval;
}
-static int __devexit mpc_dma_remove(struct platform_device *op)
+static int mpc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d12ad00da4cb..e17fad03cb80 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
@@ -34,14 +37,14 @@
static void mv_xor_issue_pending(struct dma_chan *chan);
#define to_mv_xor_chan(chan) \
- container_of(chan, struct mv_xor_chan, common)
-
-#define to_mv_xor_device(dev) \
- container_of(dev, struct mv_xor_device, common)
+ container_of(chan, struct mv_xor_chan, dmachan)
#define to_mv_xor_slot(tx) \
container_of(tx, struct mv_xor_desc_slot, async_tx)
+#define mv_chan_to_devp(chan) \
+ ((chan)->dmadev.dev)
+
static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
- dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
+ dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
op_mode = XOR_OPERATION_MODE_MEMSET;
break;
default:
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error: unsupported operation %d.\n",
- type);
+ dev_err(mv_chan_to_devp(chan),
+ "error: unsupported operation %d.\n",
+ type);
BUG();
return;
}
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
{
u32 activation;
- dev_dbg(chan->device->common.dev, " activate chan.\n");
+ dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
activation = __raw_readl(XOR_ACTIVATION(chan));
activation |= 0x1;
__raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *slot)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
__func__, __LINE__, slot);
slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *sw_desc)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
}
mv_chan->pending += sw_desc->slot_cnt;
- mv_xor_issue_pending(&mv_chan->common);
+ mv_xor_issue_pending(&mv_chan->dmachan);
}
static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &mv_chan->device->pdev->dev;
+ struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
completed_node) {
@@ -369,7 +371,7 @@ static int
mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
__func__, __LINE__, desc, desc->async_tx.flags);
list_del(&desc->chain_node);
/* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int seen_current = 0;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
- dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
mv_xor_clean_completed_slots(mv_chan);
/* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
}
if (cookie > 0)
- mv_chan->common.completed_cookie = cookie;
+ mv_chan->dmachan.completed_cookie = cookie;
}
static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
int new_hw_chain = 1;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
- struct mv_xor_platform_data *plat_data =
- mv_chan->device->pdev->dev.platform_data;
- int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+ int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
/* Allocate descriptor slots */
idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
+ hw_desc = (char *) mv_chan->dma_desc_pool_virt;
slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->device->dma_desc_pool;
+ hw_desc = (char *) mv_chan->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot,
slot_node);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"allocated %d descriptor slots last_used: %p\n",
mv_chan->slots_allocated, mv_chan->last_used);
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x src %x len: %u flags: %ld\n",
__func__, dest, src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x len: %u flags: %ld\n",
__func__, dest, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
sw_desc->unmap_len = len;
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
__func__, src_cnt, len, dest, flags);
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
mv_chan->last_used = NULL;
- dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
__func__, mv_chan->slots_allocated);
spin_unlock_bh(&mv_chan->lock);
if (in_use_descs)
- dev_err(mv_chan->device->common.dev,
+ dev_err(mv_chan_to_devp(mv_chan),
"freeing %d in use descriptors!\n", in_use_descs);
}
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "config 0x%08x.\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "activation 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr cause 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr mask 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error cause 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error addr 0x%08x.\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
if (intr_cause & (1 << 4)) {
- dev_dbg(chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(chan),
"ignore this error\n");
return;
}
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error on chan %d. intr cause 0x%08x.\n",
- chan->idx, intr_cause);
+ dev_err(mv_chan_to_devp(chan),
+ "error on chan %d. intr cause 0x%08x.\n",
+ chan->idx, intr_cause);
mv_dump_xor_regs(chan);
BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
struct mv_xor_chan *chan = data;
u32 intr_cause = mv_chan_get_intr_cause(chan);
- dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+ dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
if (mv_is_err_intr(intr_cause))
mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
*/
#define MV_XOR_TEST_SIZE 2000
-static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
+static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
int i;
void *src, *dest;
@@ -910,7 +910,6 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
int err = 0;
- struct mv_xor_chan *mv_chan;
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
if (!src)
@@ -926,10 +925,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
((u8 *) src)[i] = (u8)i;
- /* Start copy, using first DMA channel */
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -950,18 +946,17 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy failed compare, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
@@ -976,7 +971,7 @@ out:
#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
static int
-mv_xor_xor_self_test(struct mv_xor_device *device)
+mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
int i, src_idx;
struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
- struct mv_xor_chan *mv_chan;
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
memset(page_address(dest), 0, PAGE_SIZE);
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ dev_err(dma_chan->device->dev,
+ "Self-test xor failed compare, disabling."
+ " index %d, data %x, expected %x\n", i,
+ ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1079,62 +1070,66 @@ out:
return err;
}
-static int __devexit mv_xor_remove(struct platform_device *dev)
+/* This driver does not implement any of the optional DMA operations. */
+static int
+mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{
- struct mv_xor_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
- struct mv_xor_chan *mv_chan;
- struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+ struct device *dev = mv_chan->dmadev.dev;
- dma_async_device_unregister(&device->common);
+ dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(&dev->dev, plat_data->pool_size,
- device->dma_desc_pool_virt, device->dma_desc_pool);
+ dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
- list_for_each_entry_safe(chan, _chan, &device->common.channels,
- device_node) {
- mv_chan = to_mv_xor_chan(chan);
+ list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
+ device_node) {
list_del(&chan->device_node);
}
+ free_irq(mv_chan->irq, mv_chan);
+
return 0;
}
-static int mv_xor_probe(struct platform_device *pdev)
+static struct mv_xor_chan *
+mv_xor_channel_add(struct mv_xor_device *xordev,
+ struct platform_device *pdev,
+ int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
- int irq;
- struct mv_xor_device *adev;
struct mv_xor_chan *mv_chan;
struct dma_device *dma_dev;
- struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+ mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+ if (!mv_chan) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
- adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
- if (!adev)
- return -ENOMEM;
+ mv_chan->idx = idx;
+ mv_chan->irq = irq;
- dma_dev = &adev->common;
+ dma_dev = &mv_chan->dmadev;
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
- if (!adev->dma_desc_pool_virt)
- return -ENOMEM;
-
- adev->id = plat_data->hw_id;
+ mv_chan->dma_desc_pool_virt =
+ dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
+ &mv_chan->dma_desc_pool, GFP_KERNEL);
+ if (!mv_chan->dma_desc_pool_virt)
+ return ERR_PTR(-ENOMEM);
/* discover transaction capabilites from the platform data */
- dma_dev->cap_mask = plat_data->cap_mask;
- adev->pdev = pdev;
- platform_set_drvdata(pdev, adev);
-
- adev->shared = platform_get_drvdata(plat_data->shared);
+ dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1143,6 +1138,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
+ dma_dev->device_control = mv_xor_control;
dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
- mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
- mv_chan->device = adev;
- mv_chan->idx = plat_data->hw_id;
- mv_chan->mmr_base = adev->shared->xor_base;
-
+ mv_chan->mmr_base = xordev->xor_base;
if (!mv_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_dma;
@@ -1174,14 +1162,8 @@ static int mv_xor_probe(struct platform_device *pdev)
/* clear errors before enabling interrupts */
mv_xor_device_clear_err_status(mv_chan);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_free_dma;
- }
- ret = devm_request_irq(&pdev->dev, irq,
- mv_xor_interrupt_handler,
- 0, dev_name(&pdev->dev), mv_chan);
+ ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
+ 0, dev_name(&pdev->dev), mv_chan);
if (ret)
goto err_free_dma;
@@ -1193,26 +1175,26 @@ static int mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
- mv_chan->common.device = dma_dev;
- dma_cookie_init(&mv_chan->common);
+ mv_chan->dmachan.device = dma_dev;
+ dma_cookie_init(&mv_chan->dmachan);
- list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+ list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- ret = mv_xor_memcpy_self_test(adev);
+ ret = mv_xor_memcpy_self_test(mv_chan);
dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- ret = mv_xor_xor_self_test(adev);
+ ret = mv_xor_xor_self_test(mv_chan);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
- dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
+ dev_info(&pdev->dev, "Marvell XOR: "
"( %s%s%s%s)\n",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1202,21 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
- goto out;
+ return mv_chan;
+err_free_irq:
+ free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
- adev->dma_desc_pool_virt, adev->dma_desc_pool);
- out:
- return ret;
+ dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+ return ERR_PTR(ret);
}
static void
-mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = msp->xor_base;
+ void __iomem *base = xordev->xor_base;
u32 win_enable = 0;
int i;
@@ -1258,99 +1241,179 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
-static struct platform_driver mv_xor_driver = {
- .probe = mv_xor_probe,
- .remove = mv_xor_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_NAME,
- },
-};
-
-static int mv_xor_shared_probe(struct platform_device *pdev)
+static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
- struct mv_xor_shared_private *msp;
+ struct mv_xor_device *xordev;
+ struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
+ int i, ret;
- dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell XOR driver\n");
- msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
- if (!msp)
+ xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
+ if (!xordev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_base)
+ xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
- msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_high_base)
+ xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_high_base)
return -EBUSY;
- platform_set_drvdata(pdev, msp);
+ platform_set_drvdata(pdev, xordev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
- mv_xor_conf_mbus_windows(msp, dram);
+ mv_xor_conf_mbus_windows(xordev, dram);
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
*/
- msp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ xordev->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(xordev->clk))
+ clk_prepare_enable(xordev->clk);
+
+ if (pdev->dev.of_node) {
+ struct device_node *np;
+ int i = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ dma_cap_mask_t cap_mask;
+ int irq;
+
+ dma_cap_zero(cap_mask);
+ if (of_property_read_bool(np, "dmacap,memcpy"))
+ dma_cap_set(DMA_MEMCPY, cap_mask);
+ if (of_property_read_bool(np, "dmacap,xor"))
+ dma_cap_set(DMA_XOR, cap_mask);
+ if (of_property_read_bool(np, "dmacap,memset"))
+ dma_cap_set(DMA_MEMSET, cap_mask);
+ if (of_property_read_bool(np, "dmacap,interrupt"))
+ dma_cap_set(DMA_INTERRUPT, cap_mask);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ xordev->channels[i] = NULL;
+ irq_dispose_mapping(irq);
+ goto err_channel_add;
+ }
+
+ i++;
+ }
+ } else if (pdata && pdata->channels) {
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_channel_data *cd;
+ int irq;
+
+ cd = &pdata->channels[i];
+ if (!cd) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ goto err_channel_add;
+ }
+ }
+ }
return 0;
+
+err_channel_add:
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
+ if (xordev->channels[i]) {
+ mv_xor_channel_remove(xordev->channels[i]);
+ if (pdev->dev.of_node)
+ irq_dispose_mapping(xordev->channels[i]->irq);
+ }
+
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
+ }
+
+ return ret;
}
-static int mv_xor_shared_remove(struct platform_device *pdev)
+static int mv_xor_remove(struct platform_device *pdev)
{
- struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+ struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ if (xordev->channels[i])
+ mv_xor_channel_remove(xordev->channels[i]);
+ }
- if (!IS_ERR(msp->clk)) {
- clk_disable_unprepare(msp->clk);
- clk_put(msp->clk);
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
}
return 0;
}
-static struct platform_driver mv_xor_shared_driver = {
- .probe = mv_xor_shared_probe,
- .remove = mv_xor_shared_remove,
+#ifdef CONFIG_OF
+static struct of_device_id mv_xor_dt_ids[] = {
+ { .compatible = "marvell,orion-xor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_driver = {
+ .probe = mv_xor_probe,
+ .remove = mv_xor_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_SHARED_NAME,
+ .owner = THIS_MODULE,
+ .name = MV_XOR_NAME,
+ .of_match_table = of_match_ptr(mv_xor_dt_ids),
},
};
static int __init mv_xor_init(void)
{
- int rc;
-
- rc = platform_driver_register(&mv_xor_shared_driver);
- if (!rc) {
- rc = platform_driver_register(&mv_xor_driver);
- if (rc)
- platform_driver_unregister(&mv_xor_shared_driver);
- }
- return rc;
+ return platform_driver_register(&mv_xor_driver);
}
module_init(mv_xor_init);
@@ -1359,7 +1422,6 @@ module_init(mv_xor_init);
static void __exit mv_xor_exit(void)
{
platform_driver_unregister(&mv_xor_driver);
- platform_driver_unregister(&mv_xor_shared_driver);
return;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index a5b422f5a8ab..c632a4761fcf 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,8 +24,10 @@
#include <linux/interrupt.h>
#define USE_TIMER
+#define MV_XOR_POOL_SIZE PAGE_SIZE
#define MV_XOR_SLOT_SIZE 64
#define MV_XOR_THRESHOLD 1
+#define MV_XOR_MAX_CHANNELS 2
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
@@ -51,29 +53,13 @@
#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
+#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
-struct mv_xor_shared_private {
- void __iomem *xor_base;
- void __iomem *xor_high_base;
- struct clk *clk;
-};
-
-
-/**
- * struct mv_xor_device - internal representation of a XOR device
- * @pdev: Platform device
- * @id: HW XOR Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
struct mv_xor_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
- struct mv_xor_shared_private *shared;
+ void __iomem *xor_base;
+ void __iomem *xor_high_base;
+ struct clk *clk;
+ struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
};
/**
@@ -96,11 +82,15 @@ struct mv_xor_chan {
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
unsigned int idx;
+ int irq;
enum dma_transaction_type current_type;
struct list_head chain;
struct list_head completed_slots;
- struct mv_xor_device *device;
- struct dma_chan common;
+ dma_addr_t dma_desc_pool;
+ void *dma_desc_pool_virt;
+ size_t pool_size;
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
struct mv_xor_desc_slot *last_used;
struct list_head all_slots;
int slots_allocated;
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 5a31264f2bd1..c4b4fd2acc42 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -661,32 +661,14 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
-static struct platform_device *pdev;
-
-static const struct platform_device_info omap_dma_dev_info = {
- .name = "omap-dma-engine",
- .id = -1,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
static int omap_dma_init(void)
{
- int rc = platform_driver_register(&omap_dma_driver);
-
- if (rc == 0) {
- pdev = platform_device_register_full(&omap_dma_dev_info);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&omap_dma_driver);
- rc = PTR_ERR(pdev);
- }
- }
- return rc;
+ return platform_driver_register(&omap_dma_driver);
}
subsys_initcall(omap_dma_init);
static void __exit omap_dma_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&omap_dma_driver);
}
module_exit(omap_dma_exit);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index eca1c4ddf039..3f2617255ef2 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -961,7 +961,7 @@ err_free_mem:
return err;
}
-static void __devexit pch_dma_remove(struct pci_dev *pdev)
+static void pch_dma_remove(struct pci_dev *pdev)
{
struct pch_dma *pd = pci_get_drvdata(pdev);
struct pch_dma_chan *pd_chan;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 95555f37ea6d..80680eee0171 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2988,7 +2988,7 @@ probe_err1:
return ret;
}
-static int __devexit pl330_remove(struct amba_device *adev)
+static int pl330_remove(struct amba_device *adev)
{
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index b94afc339e7f..5d3d95569a1e 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4592,7 +4592,7 @@ out:
/**
* ppc440spe_adma_remove - remove the asynch device
*/
-static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev)
+static int ppc440spe_adma_remove(struct platform_device *ofdev)
{
struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
struct device_node *np = ofdev->dev.of_node;
@@ -4905,7 +4905,7 @@ out_free:
return ret;
}
-static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
+static const struct of_device_id ppc440spe_adma_of_match[] = {
{ .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", },
{},
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 2ad628df8223..461a91ab70bb 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -967,7 +967,7 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
+static int sa11x0_dma_remove(struct platform_device *pdev)
{
struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
unsigned pch;
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 8201bb4e0cd7..3315e4be9b85 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -880,7 +880,7 @@ ermrdmars:
return err;
}
-static int __devexit sh_dmae_remove(struct platform_device *pdev)
+static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index c3de6edb9651..94674a96c646 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -655,7 +655,7 @@ irq_dispose:
return ret;
}
-static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+static int sirfsoc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index efdfffa13349..f6c018f1b453 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -31,8 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/clk/tegra.h>
-#include <mach/clk.h>
#include "dmaengine.h"
#define TEGRA_APBDMA_GENERAL 0x0
@@ -266,6 +267,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
if (async_tx_test_ack(&dma_desc->txd)) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
+ dma_desc->txd.flags = 0;
return dma_desc;
}
}
@@ -1050,7 +1052,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1099,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
mem += len;
}
sg_req->last_sg = true;
- dma_desc->txd.flags = 0;
+ if (flags & DMA_CTRL_ACK)
+ dma_desc->txd.flags = DMA_CTRL_ACK;
/*
* Make sure that mode should not be conflicting with currently
@@ -1184,7 +1189,7 @@ static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.max_dma_count = 1024UL * 64,
};
-static const struct of_device_id tegra_dma_of_match[] __devinitconst = {
+static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra30-apbdma",
.data = &tegra30_dma_chip_data,
@@ -1236,12 +1241,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
- if (!tdma->base_addr) {
- dev_err(&pdev->dev,
- "Cannot request memregion/iomap dma address\n");
- return -EADDRNOTAVAIL;
- }
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tdma->dma_clk)) {
@@ -1360,7 +1362,7 @@ err_pm_disable:
return ret;
}
-static int __devexit tegra_dma_remove(struct platform_device *pdev)
+static int tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
int i;
@@ -1403,7 +1405,7 @@ static int tegra_dma_runtime_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = tegra_dma_runtime_suspend,
.runtime_resume = tegra_dma_runtime_resume,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 98cf51e1544c..952f823901a6 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -798,7 +798,7 @@ err_release_region:
}
-static int __devexit td_remove(struct platform_device *pdev)
+static int td_remove(struct platform_device *pdev)
{
struct timb_dma *td = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index bb82d6be793c..acb709bfac0f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -4,10 +4,13 @@
# Licensed and distributed under the GPL
#
+config EDAC_SUPPORT
+ bool
+
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC || TILE || ARM
+ depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -29,8 +32,6 @@ menuconfig EDAC
if EDAC
-comment "Reporting subsystems"
-
config EDAC_LEGACY_SYSFS
bool "EDAC legacy sysfs"
default y
@@ -157,7 +158,7 @@ config EDAC_I3000
config EDAC_I3200
tristate "Intel 3200"
- depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
+ depends on EDAC_MM_EDAC && PCI && X86
help
Support for error detection and correction on the Intel
3200 and 3210 server chipsets.
@@ -223,7 +224,7 @@ config EDAC_I7300
config EDAC_SBRIDGE
tristate "Intel Sandy-Bridge Integrated MC"
depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
- depends on PCI_MMCONFIG && EXPERIMENTAL
+ depends on PCI_MMCONFIG
help
Support for error detection and correction the Intel
Sandy Bridge Integrated Memory Controller.
@@ -316,4 +317,32 @@ config EDAC_HIGHBANK_L2
Support for error detection and correction on the
Calxeda Highbank memory controller.
+config EDAC_OCTEON_PC
+ tristate "Cavium Octeon Primary Caches"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the primary caches of
+ the cnMIPS cores of Cavium Octeon family SOCs.
+
+config EDAC_OCTEON_L2C
+ tristate "Cavium Octeon Secondary Caches (L2C)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_LMC
+ tristate "Cavium Octeon DRAM Memory Controller (LMC)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_PCI
+ tristate "Cavium Octeon PCI Controller"
+ depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 7e5129a733f8..5608a9ba61b7 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -58,3 +58,8 @@ obj-$(CONFIG_EDAC_TILE) += tile_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
+
+obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o
+obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
+obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
+obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f74a684269ff..910b0116c128 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
*
*FIXME: Produce a better mapping/linearisation.
*/
-struct scrubrate {
+static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* DRAM base/limit associated with node_id
*/
static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- unsigned nid)
+ u8 nid)
{
u64 addr;
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- unsigned node_id;
+ u8 node_id;
u32 intlv_en, bits;
/*
@@ -602,111 +602,6 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
return input_addr;
}
-
-/*
- * @input_addr is an InputAddr associated with the node represented by mci.
- * Translate @input_addr to a DramAddr and return the result.
- */
-static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id, intlv_shift;
- u64 bits, dram_addr;
- u32 intlv_sel;
-
- /*
- * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * shows how to translate a DramAddr to an InputAddr. Here we reverse
- * this procedure. When translating from a DramAddr to an InputAddr, the
- * bits used for node interleaving are discarded. Here we recover these
- * bits from the IntlvSel field of the DRAM Limit register (section
- * 3.4.4.2) for the node that input_addr is associated with.
- */
- pvt = mci->pvt_info;
- node_id = pvt->mc_node_id;
-
- BUG_ON(node_id > 7);
-
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- if (intlv_shift == 0) {
- edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
- (unsigned long)input_addr);
-
- return input_addr;
- }
-
- bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
- (input_addr & 0xfff);
-
- intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
- dram_addr = bits + (intlv_sel << 12);
-
- edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
- (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
-
- return dram_addr;
-}
-
-/*
- * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
- * @dram_addr to a SysAddr.
- */
-static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, sys_addr;
- int ret = 0;
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((dram_addr >= hole_base) &&
- (dram_addr < (hole_base + hole_size))) {
- sys_addr = dram_addr + hole_offset;
-
- edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
- (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
- }
- }
-
- base = get_dram_base(pvt, pvt->mc_node_id);
- sys_addr = dram_addr + base;
-
- /*
- * The sys_addr we have computed up to this point is a 40-bit value
- * because the k8 deals with 40-bit values. However, the value we are
- * supposed to return is a full 64-bit physical address. The AMD
- * x86-64 architecture specifies that the most significant implemented
- * address bit through bit 63 of a physical address must be either all
- * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
- * 64-bit value below. See section 3.4.2 of AMD publication 24592:
- * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
- * Programming.
- */
- sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
-
- edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
-}
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Translate
- * @input_addr to a SysAddr.
- */
-static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
- u64 input_addr)
-{
- return dram_addr_to_sys_addr(mci,
- input_addr_to_dram_addr(mci, input_addr));
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
struct err_info *err)
@@ -939,7 +834,8 @@ static u64 get_error_address(struct mce *m)
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
- u8 mce_nid, intlv_en;
+ u16 mce_nid;
+ u8 intlv_en;
if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
return addr;
@@ -979,10 +875,29 @@ static u64 get_error_address(struct mce *m)
return addr;
}
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(vendor, device, dev))) {
+ if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
+ (dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ }
+
+ return dev;
+}
+
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
+ struct amd_northbridge *nb;
+ struct pci_dev *misc, *f1 = NULL;
struct cpuinfo_x86 *c = &boot_cpu_data;
int off = range << 3;
+ u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -996,30 +911,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
- /* Factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 == 0x15) {
- struct pci_dev *f1 = NULL;
- u8 nid = dram_dst_node(pvt, range);
- u32 llim;
+ /* F15h: factor in CC6 save area by reading dst node's limit reg */
+ if (c->x86 != 0x15)
+ return;
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
- if (WARN_ON(!f1))
- return;
+ nb = node_to_amd_nb(dram_dst_node(pvt, range));
+ if (WARN_ON(!nb))
+ return;
+
+ misc = nb->misc;
+ f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+ if (WARN_ON(!f1))
+ return;
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
+ amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ pvt->ranges[range].lim.lo &= GENMASK(0, 15);
- /* {[39:27],111b} */
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
+ /* {[39:27],111b} */
+ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.hi &= GENMASK(0, 7);
- /* [47:40] */
- pvt->ranges[range].lim.hi |= llim >> 13;
+ /* [47:40] */
+ pvt->ranges[range].lim.hi |= llim >> 13;
- pci_dev_put(f1);
- }
+ pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1305,7 +1222,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
}
/* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
@@ -1381,7 +1298,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -1672,23 +1589,6 @@ static struct amd64_family_type amd64_family_types[] = {
},
};
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
- unsigned int device,
- struct pci_dev *related)
-{
- struct pci_dev *dev = NULL;
-
- dev = pci_get_device(vendor, device, dev);
- while (dev) {
- if ((dev->bus->number == related->bus->number) &&
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
- break;
- dev = pci_get_device(vendor, device, dev);
- }
-
- return dev;
-}
-
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@@ -1696,7 +1596,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
-static u16 x4_vectors[] = {
+static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
@@ -1735,7 +1635,7 @@ static u16 x4_vectors[] = {
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
-static u16 x8_vectors[] = {
+static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1757,7 +1657,7 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
@@ -2181,7 +2081,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
@@ -2191,7 +2091,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
+static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2224,7 +2124,7 @@ out:
return ret;
}
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
@@ -2262,7 +2162,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
return 0;
}
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
@@ -2314,7 +2214,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
return ret;
}
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
@@ -2353,7 +2253,7 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
u32 value;
u8 ecc_en = 0;
@@ -2474,7 +2374,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int err = 0, ret;
- u8 nid = get_node_id(F2);
+ u16 nid = amd_get_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2563,10 +2463,10 @@ err_ret:
return ret;
}
-static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int amd64_probe_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
{
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0;
@@ -2612,11 +2512,11 @@ err_out:
return ret;
}
-static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+static void amd64_remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
@@ -2686,7 +2586,7 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR,
.probe = amd64_probe_one_instance,
- .remove = __devexit_p(amd64_remove_one_instance),
+ .remove = amd64_remove_one_instance,
.id_table = amd64_pci_table,
};
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e864f407806c..35637d83f235 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -292,12 +292,6 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
-/* AMD sets the first MC device at device ID 0x18. */
-static inline u8 get_node_id(struct pci_dev *pdev)
-{
- return PCI_SLOT(pdev->devfn) - 0x18;
-}
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -340,7 +334,7 @@ struct amd64_pvt {
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- unsigned mc_node_id; /* MC index of this MC node */
+ u16 mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -393,7 +387,7 @@ struct err_info {
u32 offset;
};
-static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
}
-static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
{
u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 29eeb68a200c..96e3ee3460a5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -301,8 +301,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -318,7 +318,7 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
* structure for the device then delete the mci and free the
* resources.
*/
-static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+static void amd76x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -350,7 +350,7 @@ MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
static struct pci_driver amd76x_driver = {
.name = EDAC_MOD_STR,
.probe = amd76x_init_one,
- .remove = __devexit_p(amd76x_remove_one),
+ .remove = amd76x_remove_one,
.id_table = amd76x_pci_tbl,
};
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a1bbd8edd257..c2eaf334b90b 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -124,7 +124,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
}
}
-static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+static void cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
struct dimm_info *dimm;
@@ -164,7 +164,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit cell_edac_probe(struct platform_device *pdev)
+static int cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
@@ -233,7 +233,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit cell_edac_remove(struct platform_device *pdev)
+static int cell_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (mci)
@@ -247,7 +247,7 @@ static struct platform_driver cell_edac_driver = {
.owner = THIS_MODULE,
},
.probe = cell_edac_probe,
- .remove = __devexit_p(cell_edac_remove),
+ .remove = cell_edac_remove,
};
static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index c2ef13495873..7f3c57113ba1 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -932,7 +932,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase)
return dual;
}
-static int __devinit cpc925_probe(struct platform_device *pdev)
+static int cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index a5ed6b795fd4..644fec54681f 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1390,8 +1390,7 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -1402,7 +1401,7 @@ static int __devinit e752x_init_one(struct pci_dev *pdev,
return e752x_probe1(pdev, ent->driver_data);
}
-static void __devexit e752x_remove_one(struct pci_dev *pdev)
+static void e752x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
@@ -1445,7 +1444,7 @@ MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
static struct pci_driver e752x_driver = {
.name = EDAC_MOD_STR,
.probe = e752x_init_one,
- .remove = __devexit_p(e752x_remove_one),
+ .remove = e752x_remove_one,
.id_table = e752x_pci_tbl,
};
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 9ff57f361a43..1c4056a50383 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -528,8 +528,7 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit e7xxx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -538,7 +537,7 @@ static int __devinit e7xxx_init_one(struct pci_dev *pdev,
-EIO : e7xxx_probe1(pdev, ent->driver_data);
}
-static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+static void e7xxx_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
@@ -579,7 +578,7 @@ MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
static struct pci_driver e7xxx_driver = {
.name = EDAC_MOD_STR,
.probe = e7xxx_init_one,
- .remove = __devexit_p(e7xxx_remove_one),
+ .remove = e7xxx_remove_one,
.id_table = e7xxx_pci_tbl,
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 281f566a5513..d1e9eb191f2b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Alocate and fill the csrow/channels structs
*/
- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
goto error;
for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Allocate and fill the dimm structs
*/
- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
goto error;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index de2df92f9c77..0ca1ca71157f 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -472,8 +472,7 @@ static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
device_remove_file(&csrow->dev,
dynamic_csrow_ce_count_attr[chan]);
}
- put_device(&mci->csrows[i]->dev);
- device_del(&mci->csrows[i]->dev);
+ device_unregister(&mci->csrows[i]->dev);
}
}
#endif
@@ -1055,11 +1054,9 @@ fail:
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- put_device(&dimm->dev);
- device_del(&dimm->dev);
+ device_unregister(&dimm->dev);
}
- put_device(&mci->dev);
- device_del(&mci->dev);
+ device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
return err;
@@ -1086,16 +1083,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
if (dimm->nr_pages == 0)
continue;
edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
- put_device(&dimm->dev);
- device_del(&dimm->dev);
+ device_unregister(&dimm->dev);
}
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
- put_device(&mci->dev);
- device_del(&mci->dev);
+ device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
}
@@ -1159,8 +1154,6 @@ int __init edac_mc_sysfs_init(void)
void __exit edac_mc_sysfs_exit(void)
{
- put_device(mci_pdev);
- device_del(mci_pdev);
+ device_unregister(mci_pdev);
edac_put_sysfs_subsys();
- kfree(mci_pdev);
}
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index dc6e905ee1a5..0056c4dae9d5 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
- if (edac_pci_dev->show)
+ if (edac_pci_dev->store)
return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
return -EIO;
}
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index e599b00c05a8..c2bd8c6a4349 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -50,7 +50,7 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit highbank_l2_err_probe(struct platform_device *pdev)
+static int highbank_l2_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci;
struct hb_l2_drvdata *drvdata;
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 7ea4cc2e8bd2..4695dd2d71fd 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -119,7 +119,7 @@ static const struct file_operations highbank_mc_debug_inject_fops = {
.llseek = generic_file_llseek,
};
-static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
{
if (mci->debugfs)
debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
@@ -127,11 +127,11 @@ static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
;
}
#else
-static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
{}
#endif
-static int __devinit highbank_mc_probe(struct platform_device *pdev)
+static int highbank_mc_probe(struct platform_device *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index d3d19cc4e9a1..694efcbf19c0 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -455,8 +455,7 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i3000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -472,7 +471,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i3000_remove_one(struct pci_dev *pdev)
+static void i3000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -502,7 +501,7 @@ MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
static struct pci_driver i3000_driver = {
.name = EDAC_MOD_STR,
.probe = i3000_init_one,
- .remove = __devexit_p(i3000_remove_one),
+ .remove = i3000_remove_one,
.id_table = i3000_pci_tbl,
};
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index b6653a6fc5d5..4e8337602e78 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -419,8 +419,7 @@ fail:
return rc;
}
-static int __devinit i3200_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -436,7 +435,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i3200_remove_one(struct pci_dev *pdev)
+static void i3200_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i3200_priv *priv;
@@ -467,7 +466,7 @@ MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
static struct pci_driver i3200_driver = {
.name = EDAC_MOD_STR,
.probe = i3200_init_one,
- .remove = __devexit_p(i3200_remove_one),
+ .remove = i3200_remove_one,
.id_table = i3200_pci_tbl,
};
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 6a49dd00b81b..63b2194e8c20 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1489,8 +1489,7 @@ fail0:
* negative on error
* count (>= 0)
*/
-static int __devinit i5000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
@@ -1509,7 +1508,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
* i5000_remove_one destructor for one instance of device
*
*/
-static void __devexit i5000_remove_one(struct pci_dev *pdev)
+static void i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -1547,7 +1546,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
static struct pci_driver i5000_driver = {
.name = KBUILD_BASENAME,
.probe = i5000_init_one,
- .remove = __devexit_p(i5000_remove_one),
+ .remove = i5000_remove_one,
.id_table = i5000_pci_tbl,
};
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index c4b5e5f868e8..d6955b2cc99f 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -638,8 +638,7 @@ static struct pci_dev *pci_get_device_func(unsigned vendor,
return ret;
}
-static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
- int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
@@ -660,7 +659,7 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
}
-static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
+static void i5100_init_mtr(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
@@ -732,7 +731,7 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
-static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
+static void i5100_init_dimm_csmap(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
@@ -762,8 +761,8 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
}
-static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
+static void i5100_init_dimm_layout(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
@@ -784,8 +783,8 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
i5100_init_dimm_csmap(mci);
}
-static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
+static void i5100_init_interleaving(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
{
u16 w;
u32 dw;
@@ -830,7 +829,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
i5100_init_mtr(mci);
}
-static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
+static void i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
struct i5100_priv *priv = mci->pvt_info;
@@ -864,8 +863,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit i5100_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
struct mem_ctl_info *mci;
@@ -1020,7 +1018,7 @@ bail:
return ret;
}
-static void __devexit i5100_remove_one(struct pci_dev *pdev)
+static void i5100_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i5100_priv *priv;
@@ -1054,7 +1052,7 @@ MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
static struct pci_driver i5100_driver = {
.name = KBUILD_BASENAME,
.probe = i5100_init_one,
- .remove = __devexit_p(i5100_remove_one),
+ .remove = i5100_remove_one,
.id_table = i5100_pci_tbl,
};
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 277246998b80..0a05bbceb08f 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1373,8 +1373,7 @@ fail0:
* negative on error
* count (>= 0)
*/
-static int __devinit i5400_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
@@ -1393,7 +1392,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
* i5400_remove_one destructor for one instance of device
*
*/
-static void __devexit i5400_remove_one(struct pci_dev *pdev)
+static void i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -1431,7 +1430,7 @@ MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
static struct pci_driver i5400_driver = {
.name = "i5400_edac",
.probe = i5400_init_one,
- .remove = __devexit_p(i5400_remove_one),
+ .remove = i5400_remove_one,
.id_table = i5400_pci_tbl,
};
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 9d669cd43618..087c27bc5d42 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -923,7 +923,7 @@ static void i7300_put_devices(struct mem_ctl_info *mci)
* Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
* Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
*/
-static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
+static int i7300_get_devices(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct pci_dev *pdev;
@@ -1008,8 +1008,7 @@ error:
* @pdev: struct pci_dev pointer
* @id: struct pci_device_id pointer - currently unused
*/
-static int __devinit i7300_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[3];
@@ -1122,7 +1121,7 @@ fail0:
* i7300_remove_one() - Remove the driver
* @pdev: struct pci_dev pointer
*/
-static void __devexit i7300_remove_one(struct pci_dev *pdev)
+static void i7300_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
char *tmp;
@@ -1163,7 +1162,7 @@ MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
static struct pci_driver i7300_driver = {
.name = "i7300_edac",
.probe = i7300_init_one,
- .remove = __devexit_p(i7300_remove_one),
+ .remove = i7300_remove_one,
.id_table = i7300_pci_tbl,
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 10c8c00d6469..e213d030b0dd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2305,8 +2305,7 @@ fail0:
* < 0 for error code
*/
-static int __devinit i7core_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc, count = 0;
struct i7core_dev *i7core_dev;
@@ -2368,7 +2367,7 @@ fail0:
* i7core_remove destructor for one instance of device
*
*/
-static void __devexit i7core_remove(struct pci_dev *pdev)
+static void i7core_remove(struct pci_dev *pdev)
{
struct i7core_dev *i7core_dev;
@@ -2409,7 +2408,7 @@ MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
static struct pci_driver i7core_driver = {
.name = "i7core_edac",
.probe = i7core_probe,
- .remove = __devexit_p(i7core_remove),
+ .remove = i7core_remove,
.id_table = i7core_pci_tbl,
};
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 90f303db5d1d..57fdb77903ba 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -353,8 +353,8 @@ fail:
EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
/* returns count (>= 0), or negative on error */
-static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -369,7 +369,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -399,7 +399,7 @@ MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
static struct pci_driver i82443bxgx_edacmc_driver = {
.name = EDAC_MOD_STR,
.probe = i82443bxgx_edacmc_init_one,
- .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+ .remove = i82443bxgx_edacmc_remove_one,
.id_table = i82443bxgx_pci_tbl,
};
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 1faa74971513..3e3e431c8301 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -254,8 +254,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82860_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -273,7 +273,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82860_remove_one(struct pci_dev *pdev)
+static void i82860_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
static struct pci_driver i82860_driver = {
.name = EDAC_MOD_STR,
.probe = i82860_init_one,
- .remove = __devexit_p(i82860_remove_one),
+ .remove = i82860_remove_one,
.id_table = i82860_pci_tbl,
};
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 3e416b1a6b53..2f8535fc451e 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -479,8 +479,8 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82875p_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -498,7 +498,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+static void i82875p_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
@@ -541,7 +541,7 @@ MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
static struct pci_driver i82875p_driver = {
.name = EDAC_MOD_STR,
.probe = i82875p_init_one,
- .remove = __devexit_p(i82875p_remove_one),
+ .remove = i82875p_remove_one,
.id_table = i82875p_pci_tbl,
};
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a98020409fa9..0c8d4b0eaa32 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -592,8 +592,8 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82975x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82975x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -610,7 +610,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+static void i82975x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82975x_pvt *pvt;
@@ -643,7 +643,7 @@ MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
static struct pci_driver i82975x_driver = {
.name = EDAC_MOD_STR,
.probe = i82975x_init_one,
- .remove = __devexit_p(i82975x_remove_one),
+ .remove = i82975x_remove_one,
.id_table = i82975x_pci_tbl,
};
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ad637572d8c7..f3f0c930d550 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -39,30 +39,28 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
*/
/* transaction type */
-const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
-EXPORT_SYMBOL_GPL(tt_msgs);
+static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
/* cache level */
-const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
-EXPORT_SYMBOL_GPL(ll_msgs);
+static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
/* memory transaction type */
-const char * const rrrr_msgs[] = {
+static const char * const rrrr_msgs[] = {
"GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
};
-EXPORT_SYMBOL_GPL(rrrr_msgs);
/* participating processor */
const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
EXPORT_SYMBOL_GPL(pp_msgs);
/* request timeout */
-const char * const to_msgs[] = { "no timeout", "timed out" };
-EXPORT_SYMBOL_GPL(to_msgs);
+static const char * const to_msgs[] = { "no timeout", "timed out" };
/* memory or i/o */
-const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
-EXPORT_SYMBOL_GPL(ii_msgs);
+static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
+
+/* internal error type */
+static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
static const char * const f15h_mc1_mce_desc[] = {
"UC during a demand linefill from L2",
@@ -176,7 +174,7 @@ static bool k8_mc0_mce(u16 ec, u8 xec)
return f10h_mc0_mce(ec, xec);
}
-static bool f14h_mc0_mce(u16 ec, u8 xec)
+static bool cat_mc0_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
@@ -330,22 +328,28 @@ static bool k8_mc1_mce(u16 ec, u8 xec)
return ret;
}
-static bool f14h_mc1_mce(u16 ec, u8 xec)
+static bool cat_mc1_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
- if (MEM_ERROR(ec)) {
- if (TT(ec) != 0 || LL(ec) != 1)
- ret = false;
+ if (!MEM_ERROR(ec))
+ return false;
+
+ if (TT(ec) != TT_INSTR)
+ return false;
+
+ if (r4 == R4_IRD)
+ pr_cont("Data/tag array parity error for a tag hit.\n");
+ else if (r4 == R4_SNOOP)
+ pr_cont("Tag error during snoop/victimization.\n");
+ else if (xec == 0x0)
+ pr_cont("Tag parity error from victim castout.\n");
+ else if (xec == 0x2)
+ pr_cont("Microcode patch RAM parity error.\n");
+ else
+ ret = false;
- if (r4 == R4_IRD)
- pr_cont("Data/tag array parity error for a tag hit.\n");
- else if (r4 == R4_SNOOP)
- pr_cont("Tag error during snoop/victimization.\n");
- else
- ret = false;
- }
return ret;
}
@@ -399,12 +403,9 @@ static void decode_mc1_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
}
-static void decode_mc2_mce(struct mce *m)
+static bool k8_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error");
+ bool ret = true;
if (xec == 0x1)
pr_cont(" in the write data buffers.\n");
@@ -429,24 +430,18 @@ static void decode_mc2_mce(struct mce *m)
pr_cont(": %s parity/ECC error during data "
"access from L2.\n", R4_MSG(ec));
else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
-
- return;
+ ret = false;
- wrong_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+ return ret;
}
-static void decode_f15_mc2_mce(struct mce *m)
+static bool f15h_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error: ");
+ bool ret = true;
if (TLB_ERROR(ec)) {
if (xec == 0x0)
@@ -454,10 +449,10 @@ static void decode_f15_mc2_mce(struct mce *m)
else if (xec == 0x1)
pr_cont("Poison data provided for TLB fill.\n");
else
- goto wrong_f15_mc2_mce;
+ ret = false;
} else if (BUS_ERROR(ec)) {
if (xec > 2)
- goto wrong_f15_mc2_mce;
+ ret = false;
pr_cont("Error during attempted NB data read.\n");
} else if (MEM_ERROR(ec)) {
@@ -471,14 +466,63 @@ static void decode_f15_mc2_mce(struct mce *m)
break;
default:
- goto wrong_f15_mc2_mce;
+ ret = false;
}
}
- return;
+ return ret;
+}
- wrong_f15_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+static bool f16h_mc2_mce(u16 ec, u8 xec)
+{
+ u8 r4 = R4(ec);
+
+ if (!MEM_ERROR(ec))
+ return false;
+
+ switch (xec) {
+ case 0x04 ... 0x05:
+ pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
+ break;
+
+ case 0x09 ... 0x0b:
+ case 0x0d ... 0x0f:
+ pr_cont("ECC error in L2 tag (%s).\n",
+ ((r4 == R4_GEN) ? "BankReq" :
+ ((r4 == R4_SNOOP) ? "Prb" : "Fill")));
+ break;
+
+ case 0x10 ... 0x19:
+ case 0x1b:
+ pr_cont("ECC error in L2 data array (%s).\n",
+ (((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" :
+ ((r4 == R4_EVICT) ? "Vict" : "Fill"))));
+ break;
+
+ case 0x1c ... 0x1d:
+ case 0x1f:
+ pr_cont("Parity error in L2 attribute bits (%s).\n",
+ ((r4 == R4_RD) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" : "Fill")));
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void decode_mc2_mce(struct mce *m)
+{
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, xec_mask);
+
+ pr_emerg(HW_ERR "MC2 Error: ");
+
+ if (!fam_ops->mc2_mce(ec, xec))
+ pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
static void decode_mc3_mce(struct mce *m)
@@ -547,7 +591,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
- if (boot_cpu_data.x86 == 0x15)
+ if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@@ -633,6 +677,10 @@ static void decode_mc6_mce(struct mce *m)
static inline void amd_decode_err_code(u16 ec)
{
+ if (INT_ERROR(ec)) {
+ pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
+ return;
+ }
pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
@@ -702,10 +750,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
case 2:
- if (c->x86 == 0x15)
- decode_f15_mc2_mce(m);
- else
- decode_mc2_mce(m);
+ decode_mc2_mce(m);
break;
case 3:
@@ -740,7 +785,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 == 0x15)
+ if (c->x86 == 0x15 || c->x86 == 0x16)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
@@ -772,7 +817,7 @@ static int __init mce_amd_init(void)
if (c->x86_vendor != X86_VENDOR_AMD)
return 0;
- if (c->x86 < 0xf || c->x86 > 0x15)
+ if (c->x86 < 0xf || c->x86 > 0x16)
return 0;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -783,33 +828,46 @@ static int __init mce_amd_init(void)
case 0xf:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x10:
fam_ops->mc0_mce = f10h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x11:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x12:
fam_ops->mc0_mce = f12h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x14:
nb_err_cpumask = 0x3;
- fam_ops->mc0_mce = f14h_mc0_mce;
- fam_ops->mc1_mce = f14h_mc1_mce;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = 0x1f;
fam_ops->mc0_mce = f15h_mc0_mce;
fam_ops->mc1_mce = f15h_mc1_mce;
+ fam_ops->mc2_mce = f15h_mc2_mce;
+ break;
+
+ case 0x16:
+ xec_mask = 0x1f;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = f16h_mc2_mce;
break;
default:
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 679679951e23..51b7e3a36e37 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -14,6 +14,7 @@
#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
+#define INT_ERROR(x) (((x) & 0xF4FF) == 0x0400)
#define TT(x) (((x) >> 2) & 0x3)
#define TT_MSG(x) tt_msgs[TT(x)]
@@ -25,6 +26,8 @@
#define TO_MSG(x) to_msgs[TO(x)]
#define PP(x) (((x) >> 9) & 0x3)
#define PP_MSG(x) pp_msgs[PP(x)]
+#define UU(x) (((x) >> 8) & 0x3)
+#define UU_MSG(x) uu_msgs[UU(x)]
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
@@ -32,6 +35,8 @@
#define MCI_STATUS_DEFERRED BIT_64(44)
#define MCI_STATUS_POISON BIT_64(43)
+extern const char * const pp_msgs[];
+
enum tt_ids {
TT_INSTR = 0,
TT_DATA,
@@ -65,19 +70,13 @@ enum rrrr_ids {
R4_SNOOP,
};
-extern const char * const tt_msgs[];
-extern const char * const ll_msgs[];
-extern const char * const rrrr_msgs[];
-extern const char * const pp_msgs[];
-extern const char * const to_msgs[];
-extern const char * const ii_msgs[];
-
/*
* per-family decoder ops
*/
struct amd_decoder_ops {
bool (*mc0_mce)(u16, u8);
bool (*mc1_mce)(u16, u8);
+ bool (*mc2_mce)(u16, u8);
};
void amd_report_gart_errors(bool);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4fe66fa183ec..3eb32f62d72a 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -212,7 +212,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
+int mpc85xx_pci_err_probe(struct platform_device *op)
{
struct edac_pci_ctl_info *pci;
struct mpc85xx_pci_pdata *pdata;
@@ -301,7 +301,7 @@ int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx PCI err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
@@ -504,7 +504,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
+static int mpc85xx_l2_err_probe(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev;
struct mpc85xx_l2_pdata *pdata;
@@ -583,7 +583,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx L2 err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
@@ -885,7 +885,7 @@ static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
@@ -964,7 +964,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
+static int mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 2b315c2edc3c..542fad70e360 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -100,7 +100,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
return 0;
}
-static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+static int mv64x60_pci_err_probe(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci;
struct mv64x60_pci_pdata *pdata;
@@ -221,7 +221,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev)
static struct platform_driver mv64x60_pci_err_driver = {
.probe = mv64x60_pci_err_probe,
- .remove = __devexit_p(mv64x60_pci_err_remove),
+ .remove = mv64x60_pci_err_remove,
.driver = {
.name = "mv64x60_pci_err",
}
@@ -271,7 +271,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+static int mv64x60_sram_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct mv64x60_sram_pdata *pdata;
@@ -439,7 +439,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+static int mv64x60_cpu_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct resource *r;
@@ -697,7 +697,7 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
dimm->edac_mode = EDAC_SECDED;
}
-static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+static int mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
new file mode 100644
index 000000000000..7e98084d3645
--- /dev/null
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_MOD_STR "octeon-l2c"
+
+static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c)
+{
+ union cvmx_l2t_err l2t_err, l2t_err_reset;
+ union cvmx_l2d_err l2d_err, l2d_err_reset;
+
+ l2t_err_reset.u64 = 0;
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 0,
+ "Tag Single bit error (corrected)");
+ l2t_err_reset.s.sec_err = 1;
+ }
+ if (l2t_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 0,
+ "Tag Double bit error (detected)");
+ l2t_err_reset.s.ded_err = 1;
+ }
+ if (l2t_err_reset.u64)
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64);
+
+ l2d_err_reset.u64 = 0;
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ if (l2d_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 1,
+ "Data Single bit error (corrected)");
+ l2d_err_reset.s.sec_err = 1;
+ }
+ if (l2d_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 1,
+ "Data Double bit error (detected)");
+ l2d_err_reset.s.ded_err = 1;
+ }
+ if (l2d_err_reset.u64)
+ cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64);
+
+}
+
+static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad)
+{
+ union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset;
+ union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset;
+ char buf1[64];
+ char buf2[80];
+
+ err_tdtx_reset.u64 = 0;
+ err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad));
+ if (err_tdtx.s.dbe || err_tdtx.s.sbe ||
+ err_tdtx.s.vdbe || err_tdtx.s.vsbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx);
+
+ if (err_tdtx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vdbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.vdbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vsbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.vsbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64);
+
+ err_ttgx_reset.u64 = 0;
+ err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad));
+
+ if (err_ttgx.s.dbe || err_ttgx.s.sbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx);
+
+ if (err_ttgx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Double bit error (detected):%s", buf1);
+ err_ttgx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Single bit error (corrected):%s", buf1);
+ err_ttgx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64);
+}
+
+static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c)
+{
+ int i;
+ for (i = 0; i < l2c->nr_instances; i++)
+ _octeon_l2c_poll_oct2(l2c, i);
+}
+
+static int octeon_l2c_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c;
+
+ int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1;
+
+ /* 'Tags' are block 0, 'Data' is block 1*/
+ l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
+ NULL, 0, edac_device_alloc_index());
+ if (!l2c)
+ return -ENOMEM;
+
+ l2c->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l2c);
+ l2c->dev_name = dev_name(&pdev->dev);
+
+ l2c->mod_name = "octeon-l2c";
+ l2c->ctl_name = "octeon_l2c_err";
+
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_l2t_err l2t_err;
+ union cvmx_l2d_err l2d_err;
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.sec_intena = 0; /* We poll */
+ l2t_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ l2d_err.s.sec_intena = 0; /* We poll */
+ l2d_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64);
+
+ l2c->edac_check = octeon_l2c_poll_oct1;
+ } else {
+ /* OCTEON II */
+ l2c->edac_check = octeon_l2c_poll_oct2;
+ }
+
+ if (edac_device_add_device(l2c) > 0) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err;
+ }
+
+
+ return 0;
+
+err:
+ edac_device_free_ctl_info(l2c);
+
+ return -ENXIO;
+}
+
+static int octeon_l2c_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(l2c);
+
+ return 0;
+}
+
+static struct platform_driver octeon_l2c_driver = {
+ .probe = octeon_l2c_probe,
+ .remove = octeon_l2c_remove,
+ .driver = {
+ .name = "octeon_l2c_edac",
+ }
+};
+module_platform_driver(octeon_l2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
new file mode 100644
index 000000000000..93412d6b3af1
--- /dev/null
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -0,0 +1,186 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-lmcx-defs.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define OCTEON_MAX_MC 4
+
+static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_mem_cfg0 cfg0;
+ bool do_clear = false;
+ char msg[64];
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
+ if (cfg0.s.sec_err || cfg0.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
+ fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
+ }
+
+ if (cfg0.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (cfg0.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
+}
+
+static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_int int_reg;
+ bool do_clear = false;
+ char msg[64];
+
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ if (int_reg.s.sec_err || int_reg.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
+ fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
+ }
+
+ if (int_reg.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (int_reg.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+}
+
+static int octeon_lmc_edac_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ int mc = pdev->id;
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_lmcx_mem_cfg0 cfg0;
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
+ if (!cfg0.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "octeon-lmc-err";
+ mci->edac_check = octeon_lmc_edac_poll;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ cfg0.s.intr_ded_ena = 0; /* We poll */
+ cfg0.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
+ } else {
+ /* OCTEON II */
+ union cvmx_lmcx_int_en en;
+ union cvmx_lmcx_config config;
+
+ config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
+ if (!config.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "co_lmc_err";
+ mci->edac_check = octeon_lmc_edac_poll_o2;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ en.s.intr_ded_ena = 0; /* We poll */
+ en.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
+ }
+ platform_set_drvdata(pdev, mci);
+
+ return 0;
+}
+
+static int octeon_lmc_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver octeon_lmc_edac_driver = {
+ .probe = octeon_lmc_edac_probe,
+ .remove = octeon_lmc_edac_remove,
+ .driver = {
+ .name = "octeon_lmc_edac",
+ }
+};
+module_platform_driver(octeon_lmc_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
new file mode 100644
index 000000000000..0f83c33a7d1f
--- /dev/null
+++ b/drivers/edac/octeon_edac-pc.c
@@ -0,0 +1,143 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#include <asm/octeon/cvmx.h>
+#include <asm/mipsregs.h>
+
+extern int register_co_cache_error_notifier(struct notifier_block *nb);
+extern int unregister_co_cache_error_notifier(struct notifier_block *nb);
+
+extern unsigned long long cache_err_dcache[NR_CPUS];
+
+struct co_cache_error {
+ struct notifier_block notifier;
+ struct edac_device_ctl_info *ed;
+};
+
+/**
+ * EDAC CPU cache error callback
+ *
+ * @event: non-zero if unrecoverable.
+ */
+static int co_cache_error_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct co_cache_error *p = container_of(this, struct co_cache_error,
+ notifier);
+
+ unsigned int core = cvmx_get_core_num();
+ unsigned int cpu = smp_processor_id();
+ u64 icache_err = read_octeon_c0_icacheerr();
+ u64 dcache_err;
+
+ if (event) {
+ dcache_err = cache_err_dcache[core];
+ cache_err_dcache[core] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ if (icache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)icache_err, core, cpu,
+ read_c0_errorepc());
+ write_octeon_c0_icacheerr(0);
+ edac_device_handle_ce(p->ed, cpu, 1, "icache");
+ }
+ if (dcache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)dcache_err, core, cpu,
+ read_c0_errorepc());
+ if (event)
+ edac_device_handle_ue(p->ed, cpu, 0, "dcache");
+ else
+ edac_device_handle_ce(p->ed, cpu, 0, "dcache");
+
+ /* Clear the error indication */
+ if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+ write_octeon_c0_dcacheerr(1);
+ else
+ write_octeon_c0_dcacheerr(0);
+ }
+
+ return NOTIFY_STOP;
+}
+
+static int co_cache_error_probe(struct platform_device *pdev)
+{
+ struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->notifier.notifier_call = co_cache_error_event;
+ platform_set_drvdata(pdev, p);
+
+ p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
+ "cache", 2, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!p->ed)
+ goto err;
+
+ p->ed->dev = &pdev->dev;
+
+ p->ed->dev_name = dev_name(&pdev->dev);
+
+ p->ed->mod_name = "octeon-cpu";
+ p->ed->ctl_name = "cache";
+
+ if (edac_device_add_device(p->ed)) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err1;
+ }
+
+ register_co_cache_error_notifier(&p->notifier);
+
+ return 0;
+
+err1:
+ edac_device_free_ctl_info(p->ed);
+err:
+ return -ENXIO;
+}
+
+static int co_cache_error_remove(struct platform_device *pdev)
+{
+ struct co_cache_error *p = platform_get_drvdata(pdev);
+
+ unregister_co_cache_error_notifier(&p->notifier);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(p->ed);
+ return 0;
+}
+
+static struct platform_driver co_cache_error_driver = {
+ .probe = co_cache_error_probe,
+ .remove = co_cache_error_remove,
+ .driver = {
+ .name = "octeon_pc_edac",
+ }
+};
+module_platform_driver(co_cache_error_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
new file mode 100644
index 000000000000..9ca73cec74e7
--- /dev/null
+++ b/drivers/edac/octeon_edac-pci.c
@@ -0,0 +1,111 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/octeon.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
+{
+ union cvmx_pci_cfg01 cfg01;
+
+ cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01);
+ if (cfg01.s.dpe) { /* Detected parity error */
+ edac_pci_handle_pe(pci, pci->ctl_name);
+ cfg01.s.dpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sse) {
+ edac_pci_handle_npe(pci, "Signaled System Error");
+ cfg01.s.sse = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rma) {
+ edac_pci_handle_npe(pci, "Received Master Abort");
+ cfg01.s.rma = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rta) {
+ edac_pci_handle_npe(pci, "Received Target Abort");
+ cfg01.s.rta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sta) {
+ edac_pci_handle_npe(pci, "Signaled Target Abort");
+ cfg01.s.sta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.mdpe) {
+ edac_pci_handle_npe(pci, "Master Data Parity Error");
+ cfg01.s.mdpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+}
+
+static int octeon_pci_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ int res = 0;
+
+ pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pci);
+ pci->dev_name = dev_name(&pdev->dev);
+
+ pci->mod_name = "octeon-pci";
+ pci->ctl_name = "octeon_pci_err";
+ pci->edac_check = octeon_pci_poll;
+
+ if (edac_pci_add_device(pci, 0) > 0) {
+ pr_err("%s: edac_pci_add_device() failed\n", __func__);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ edac_pci_free_ctl_info(pci);
+
+ return res;
+}
+
+static int octeon_pci_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ edac_pci_del_device(&pdev->dev);
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver octeon_pci_driver = {
+ .probe = octeon_pci_probe,
+ .remove = octeon_pci_remove,
+ .driver = {
+ .name = "octeon_pci_edac",
+ }
+};
+module_platform_driver(octeon_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 2d35b78ada3c..9c971b575530 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -188,8 +188,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
return 0;
}
-static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pasemi_edac_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
@@ -266,7 +266,7 @@ fail:
return -ENODEV;
}
-static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+static void pasemi_edac_remove(struct pci_dev *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
@@ -287,7 +287,7 @@ MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
static struct pci_driver pasemi_edac_driver = {
.name = MODULE_NAME,
.probe = pasemi_edac_probe,
- .remove = __devexit_p(pasemi_edac_remove),
+ .remove = pasemi_edac_remove,
.id_table = pasemi_edac_pci_tbl,
};
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index bf0957635991..ef6b7e08f485 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -838,8 +838,7 @@ ppc4xx_edac_isr(int irq, void *dev_id)
*
* Returns a device type width enumeration.
*/
-static enum dev_type __devinit
-ppc4xx_edac_get_dtype(u32 mcopt1)
+static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1)
{
switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
case SDRAM_MCOPT1_WDTH_16:
@@ -862,8 +861,7 @@ ppc4xx_edac_get_dtype(u32 mcopt1)
*
* Returns a memory type enumeration.
*/
-static enum mem_type __devinit
-ppc4xx_edac_get_mtype(u32 mcopt1)
+static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1)
{
bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
@@ -893,8 +891,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1)
* Returns 0 if OK; otherwise, -EINVAL if the memory bank size
* configuration cannot be determined.
*/
-static int __devinit
-ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
int status = 0;
@@ -1011,11 +1008,9 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*
* Returns 0 if OK; otherwise, < 0 on error.
*/
-static int __devinit
-ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
- struct platform_device *op,
- const dcr_host_t *dcr_host,
- u32 mcopt1)
+static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
+ struct platform_device *op,
+ const dcr_host_t *dcr_host, u32 mcopt1)
{
int status = 0;
const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
@@ -1105,8 +1100,8 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
* Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
* mapped and assigned.
*/
-static int __devinit
-ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
+static int ppc4xx_edac_register_irq(struct platform_device *op,
+ struct mem_ctl_info *mci)
{
int status = 0;
int ded_irq, sec_irq;
@@ -1183,8 +1178,8 @@ ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
* Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
* error.
*/
-static int __devinit
-ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
+static int ppc4xx_edac_map_dcrs(const struct device_node *np,
+ dcr_host_t *dcr_host)
{
unsigned int dcr_base, dcr_len;
@@ -1232,7 +1227,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* Returns 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int __devinit ppc4xx_edac_probe(struct platform_device *op)
+static int ppc4xx_edac_probe(struct platform_device *op)
{
int status = 0;
u32 mcopt1, memcheck;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index f854debd5533..2fd6a5490905 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -359,8 +359,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int r82600_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -368,7 +368,7 @@ static int __devinit r82600_init_one(struct pci_dev *pdev,
return r82600_probe1(pdev, ent->driver_data);
}
-static void __devexit r82600_remove_one(struct pci_dev *pdev)
+static void r82600_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -397,7 +397,7 @@ MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
static struct pci_driver r82600_driver = {
.name = EDAC_MOD_STR,
.probe = r82600_init_one,
- .remove = __devexit_p(r82600_remove_one),
+ .remove = r82600_remove_one,
.id_table = r82600_pci_tbl,
};
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 5715b7c2c517..da7e2986e3d5 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1692,8 +1692,7 @@ fail0:
* < 0 for error code
*/
-static int __devinit sbridge_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
u8 mc, num_mc = 0;
@@ -1744,7 +1743,7 @@ fail0:
* sbridge_remove destructor for one instance of device
*
*/
-static void __devexit sbridge_remove(struct pci_dev *pdev)
+static void sbridge_remove(struct pci_dev *pdev)
{
struct sbridge_dev *sbridge_dev;
@@ -1785,7 +1784,7 @@ MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
static struct pci_driver sbridge_driver = {
.name = "sbridge_edac",
.probe = sbridge_probe,
- .remove = __devexit_p(sbridge_remove),
+ .remove = sbridge_remove,
.id_table = sbridge_pci_tbl,
};
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 1e904b7b79a0..a0820536b7d9 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -82,7 +82,7 @@ static void tile_edac_check(struct mem_ctl_info *mci)
* Initialize the 'csrows' table within the mci control structure with the
* addressing of memory.
*/
-static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
+static int tile_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
@@ -120,7 +120,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
return 0;
}
-static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
+static int tile_edac_mc_probe(struct platform_device *pdev)
{
char hv_file[32];
int hv_devhdl;
@@ -186,7 +186,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
+static int tile_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
@@ -202,7 +202,7 @@ static struct platform_driver tile_edac_mc_driver = {
.owner = THIS_MODULE,
},
.probe = tile_edac_mc_probe,
- .remove = __devexit_p(tile_edac_mc_remove),
+ .remove = tile_edac_mc_remove,
};
/*
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 08a992693e62..c9db24d95caa 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -418,8 +418,7 @@ fail:
return rc;
}
-static int __devinit x38_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -435,7 +434,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit x38_remove_one(struct pci_dev *pdev)
+static void x38_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -464,7 +463,7 @@ MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
static struct pci_driver x38_driver = {
.name = EDAC_MOD_STR,
.probe = x38_init_one,
- .remove = __devexit_p(x38_remove_one),
+ .remove = x38_remove_one,
.id_table = x38_pci_tbl,
};
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 07122a9ef36e..5168a1324a65 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
- depends on MFD_MAX77693
+ depends on MFD_MAX77693 && INPUT
select IRQ_DOMAIN
select REGMAP_I2C
help
@@ -47,7 +47,7 @@ config EXTCON_MAX8997
config EXTCON_ARIZONA
tristate "Wolfson Arizona EXTCON support"
- depends on MFD_ARIZONA && INPUT
+ depends on MFD_ARIZONA && INPUT && SND_SOC
help
Say Y here to enable support for external accessory detection
with Wolfson Arizona devices. These are audio CODECs with
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index eda2a1aa4adb..d0233cd18ffa 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -135,8 +135,7 @@ static int adc_jack_probe(struct platform_device *pdev)
;
data->num_conditions = i;
- data->chan = iio_channel_get(dev_name(&pdev->dev),
- pdata->consumer_channel);
+ data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan)) {
err = PTR_ERR(data->chan);
goto out;
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index f10f05d4ee9c..dc357a4051f6 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,12 +27,18 @@
#include <linux/regulator/consumer.h>
#include <linux/extcon.h>
+#include <sound/soc.h>
+
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#define ARIZONA_NUM_BUTTONS 6
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
+
struct arizona_extcon_info {
struct device *dev;
struct arizona *arizona;
@@ -45,17 +51,28 @@ struct arizona_extcon_info {
int micd_num_modes;
bool micd_reva;
+ bool micd_clamp;
+
+ struct delayed_work hpdet_work;
+
+ bool hpdet_active;
+ bool hpdet_done;
+
+ int num_hpdet_res;
+ unsigned int hpdet_res[3];
bool mic;
bool detecting;
int jack_flips;
+ int hpdet_ip;
+
struct extcon_dev edev;
};
static const struct arizona_micd_config micd_default_modes[] = {
- { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
{ 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
+ { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
};
static struct {
@@ -73,11 +90,13 @@ static struct {
#define ARIZONA_CABLE_MECHANICAL 0
#define ARIZONA_CABLE_MICROPHONE 1
#define ARIZONA_CABLE_HEADPHONE 2
+#define ARIZONA_CABLE_LINEOUT 3
static const char *arizona_cable[] = {
"Mechanical",
"Microphone",
"Headphone",
+ "Line-out",
NULL,
};
@@ -85,8 +104,9 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
{
struct arizona *arizona = info->arizona;
- gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
- info->micd_modes[mode].gpio);
+ if (arizona->pdata.micd_pol_gpio > 0)
+ gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
+ info->micd_modes[mode].gpio);
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
info->micd_modes[mode].bias);
@@ -98,19 +118,70 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
}
+static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
+{
+ switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
+ case 1:
+ return "MICBIAS1";
+ case 2:
+ return "MICBIAS2";
+ case 3:
+ return "MICBIAS3";
+ default:
+ return "MICVDD";
+ }
+}
+
+static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
+ int ret;
+
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to enable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
+ if (!arizona->pdata.micd_force_micbias) {
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+ }
+}
+
static void arizona_start_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
bool change;
int ret;
- info->detecting = true;
- info->mic = false;
- info->jack_flips = 0;
-
/* Microphone detection can't use idle mode */
pm_runtime_get(info->dev);
+ if (info->detecting) {
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to regulate MICVDD: %d\n",
+ ret);
+ }
+ }
+
ret = regulator_enable(info->micvdd);
if (ret != 0) {
dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
@@ -123,6 +194,12 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ arizona_extcon_pulse_micbias(info);
+
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
&change);
@@ -135,18 +212,39 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
static void arizona_stop_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
bool change;
+ int ret;
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, 0,
&change);
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev,
+ "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
if (info->micd_reva) {
regmap_write(arizona->regmap, 0x80, 0x3);
regmap_write(arizona->regmap, 0x294, 2);
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ ret = regulator_allow_bypass(info->micvdd, true);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
if (change) {
regulator_disable(info->micvdd);
pm_runtime_mark_last_busy(info->dev);
@@ -154,6 +252,478 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
}
}
+static struct {
+ unsigned int factor_a;
+ unsigned int factor_b;
+} arizona_hpdet_b_ranges[] = {
+ { 5528, 362464 },
+ { 11084, 6186851 },
+ { 11065, 65460395 },
+};
+
+static struct {
+ int min;
+ int max;
+} arizona_hpdet_c_ranges[] = {
+ { 0, 30 },
+ { 8, 100 },
+ { 100, 1000 },
+ { 1000, 10000 },
+};
+
+static int arizona_hpdet_read(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val, range;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HPDET status: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (info->hpdet_ip) {
+ case 0:
+ if (!(val & ARIZONA_HP_DONE)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_MASK;
+ break;
+
+ case 1:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HP_DACVAL, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HP value: %d\n",
+ ret);
+ return -EAGAIN;
+ }
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
+ (val < 100 || val > 0x3fb)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
+ range);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ /* If we go out of range report top of range */
+ if (val < 100 || val > 0x3fb) {
+ dev_dbg(arizona->dev, "Measurement out of range\n");
+ return 10000;
+ }
+
+ dev_dbg(arizona->dev, "HPDET read %d in range %d\n",
+ val, range);
+
+ val = arizona_hpdet_b_ranges[range].factor_b
+ / ((val * 100) -
+ arizona_hpdet_b_ranges[range].factor_a);
+ break;
+
+ default:
+ dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
+ info->hpdet_ip);
+ case 2:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_B_MASK;
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ /* Skip up or down a range? */
+ if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+ range--;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
+ (val >= arizona_hpdet_c_ranges[range].max)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+ }
+
+ dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
+ return val;
+}
+
+static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading)
+{
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+
+ /*
+ * If we're using HPDET for accessory identification we need
+ * to take multiple measurements, step through them in sequence.
+ */
+ if (arizona->pdata.hpdet_acc_id) {
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+
+ /*
+ * If the impedence is too high don't measure the
+ * second ground.
+ */
+ if (info->num_hpdet_res == 1 && *reading >= 45) {
+ dev_dbg(arizona->dev, "Skipping ground flip\n");
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+ }
+
+ if (info->num_hpdet_res == 1) {
+ dev_dbg(arizona->dev, "Flipping ground\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ ~info->micd_modes[0].src);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* Only check the mic directly if we didn't already ID it */
+ if (id_gpio && info->num_hpdet_res == 2 &&
+ !((info->hpdet_res[0] > info->hpdet_res[1] * 2))) {
+ dev_dbg(arizona->dev, "Measuring mic\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK |
+ ARIZONA_ACCDET_SRC,
+ ARIZONA_ACCDET_MODE_HPR |
+ info->micd_modes[0].src);
+
+ gpio_set_value_cansleep(id_gpio, 1);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* OK, got both. Now, compare... */
+ dev_dbg(arizona->dev, "HPDET measured %d %d %d\n",
+ info->hpdet_res[0], info->hpdet_res[1],
+ info->hpdet_res[2]);
+
+
+ /* Take the headphone impedance for the main report */
+ *reading = info->hpdet_res[0];
+
+ /*
+ * Either the two grounds measure differently or we
+ * measure the mic as high impedance.
+ */
+ if ((info->hpdet_res[0] > info->hpdet_res[1] * 2) ||
+ (id_gpio && info->hpdet_res[2] > 10)) {
+ dev_dbg(arizona->dev, "Detected mic\n");
+ info->mic = true;
+ info->detecting = true;
+ } else {
+ dev_dbg(arizona->dev, "Detected headphone\n");
+ }
+
+ /* Make sure everything is reset back to the real polarity */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ info->micd_modes[0].src);
+ }
+
+ return 0;
+}
+
+static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+{
+ struct arizona_extcon_info *info = data;
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+ int report = ARIZONA_CABLE_HEADPHONE;
+ unsigned int val;
+ int ret, reading;
+
+ mutex_lock(&info->lock);
+
+ /* If we got a spurious IRQ for some reason then ignore it */
+ if (!info->hpdet_active) {
+ dev_warn(arizona->dev, "Spurious HPDET IRQ\n");
+ mutex_unlock(&info->lock);
+ return IRQ_NONE;
+ }
+
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ goto out;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+ goto done;
+ }
+
+ ret = arizona_hpdet_read(info);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+ reading = ret;
+
+ /* Reset back to starting range */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+ 0);
+
+ ret = arizona_hpdet_do_id(info, &reading);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+
+ /* Report high impedence cables as line outputs */
+ if (reading >= 5000)
+ report = ARIZONA_CABLE_LINEOUT;
+ else
+ report = ARIZONA_CABLE_HEADPHONE;
+
+ ret = extcon_set_cable_state_(&info->edev, report, true);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report HP/line: %d\n",
+ ret);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+done:
+ if (id_gpio)
+ gpio_set_value_cansleep(id_gpio, 0);
+
+ /* Revert back to MICDET mode */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* If we have a mic then reenable MICDET */
+ if (info->mic)
+ arizona_start_mic(info);
+
+ if (info->hpdet_active) {
+ pm_runtime_put_autosuspend(info->dev);
+ info->hpdet_active = false;
+ }
+
+ info->hpdet_done = true;
+
+out:
+ mutex_unlock(&info->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void arizona_identify_headphone(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ int ret;
+
+ if (info->hpdet_done)
+ return;
+
+ dev_dbg(arizona->dev, "Starting HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get(info->dev);
+
+ info->hpdet_active = true;
+
+ if (info->mic)
+ arizona_stop_mic(info);
+
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK,
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ if (info->mic)
+ arizona_start_mic(info);
+
+ info->hpdet_active = false;
+}
+
+static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val;
+ int ret;
+
+ dev_dbg(arizona->dev, "Starting identification via HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get_sync(info->dev);
+
+ info->hpdet_active = true;
+
+ arizona_extcon_pulse_micbias(info);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
+ info->micd_modes[0].src |
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ info->hpdet_active = false;
+}
+
static irqreturn_t arizona_micdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
@@ -166,6 +736,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+ mutex_unlock(&info->lock);
return IRQ_NONE;
}
@@ -186,16 +757,23 @@ static irqreturn_t arizona_micdet(int irq, void *data)
/* If we got a high impedence we should have a headset, report it. */
if (info->detecting && (val & 0x400)) {
+ arizona_identify_headphone(info);
+
ret = extcon_update_state(&info->edev,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE);
+ 1 << ARIZONA_CABLE_MICROPHONE,
+ 1 << ARIZONA_CABLE_MICROPHONE);
if (ret != 0)
dev_err(arizona->dev, "Headset report failed: %d\n",
ret);
+ /* Don't need to regulate for button detection */
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
info->mic = true;
info->detecting = false;
goto handled;
@@ -208,20 +786,13 @@ static irqreturn_t arizona_micdet(int irq, void *data)
* impedence then give up and report headphones.
*/
if (info->detecting && (val & 0x3f8)) {
- info->jack_flips++;
-
if (info->jack_flips >= info->micd_num_modes) {
- dev_dbg(arizona->dev, "Detected headphone\n");
+ dev_dbg(arizona->dev, "Detected HP/line\n");
+ arizona_identify_headphone(info);
+
info->detecting = false;
- arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_stop_mic(info);
} else {
info->micd_mode++;
if (info->micd_mode == info->micd_num_modes)
@@ -257,13 +828,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
info->detecting = false;
arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_identify_headphone(info);
} else {
dev_warn(arizona->dev, "Button with no mic: %x\n",
val);
@@ -274,6 +839,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
input_sync(info->input);
+ arizona_extcon_pulse_micbias(info);
}
handled:
@@ -283,17 +849,38 @@ handled:
return IRQ_HANDLED;
}
+static void arizona_hpdet_work(struct work_struct *work)
+{
+ struct arizona_extcon_info *info = container_of(work,
+ struct arizona_extcon_info,
+ hpdet_work.work);
+
+ mutex_lock(&info->lock);
+ arizona_start_hpdet_acc_id(info);
+ mutex_unlock(&info->lock);
+}
+
static irqreturn_t arizona_jackdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
struct arizona *arizona = info->arizona;
- unsigned int val;
+ unsigned int val, present, mask;
int ret, i;
pm_runtime_get_sync(info->dev);
+ cancel_delayed_work_sync(&info->hpdet_work);
+
mutex_lock(&info->lock);
+ if (arizona->pdata.jd_gpio5) {
+ mask = ARIZONA_MICD_CLAMP_STS;
+ present = 0;
+ } else {
+ mask = ARIZONA_JD1_STS;
+ present = ARIZONA_JD1_STS;
+ }
+
ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
@@ -303,7 +890,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
return IRQ_NONE;
}
- if (val & ARIZONA_JD1_STS) {
+ if ((val & mask) == present) {
dev_dbg(arizona->dev, "Detected jack\n");
ret = extcon_set_cable_state_(&info->edev,
ARIZONA_CABLE_MECHANICAL, true);
@@ -312,12 +899,31 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
ret);
- arizona_start_mic(info);
+ if (!arizona->pdata.hpdet_acc_id) {
+ info->detecting = true;
+ info->mic = false;
+ info->jack_flips = 0;
+
+ arizona_start_mic(info);
+ } else {
+ schedule_delayed_work(&info->hpdet_work,
+ msecs_to_jiffies(250));
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0);
} else {
dev_dbg(arizona->dev, "Detected jack removal\n");
arizona_stop_mic(info);
+ info->num_hpdet_res = 0;
+ for (i = 0; i < ARRAY_SIZE(info->hpdet_res); i++)
+ info->hpdet_res[i] = 0;
+ info->mic = false;
+ info->hpdet_done = false;
+
for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
@@ -327,8 +933,20 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (ret != 0)
dev_err(arizona->dev, "Removal report failed: %d\n",
ret);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
}
+ /* Clear trig_sts to make sure DCVDD is not forced up */
+ regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
+ ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
+ ARIZONA_MICD_CLAMP_RISE_TRIG_STS |
+ ARIZONA_JD1_FALL_TRIG_STS |
+ ARIZONA_JD1_RISE_TRIG_STS);
+
mutex_unlock(&info->lock);
pm_runtime_mark_last_busy(info->dev);
@@ -342,8 +960,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_pdata *pdata;
struct arizona_extcon_info *info;
+ int jack_irq_fall, jack_irq_rise;
int ret, mode, i;
+ if (!arizona->dapm || !arizona->dapm->card)
+ return -EPROBE_DEFER;
+
pdata = dev_get_platdata(arizona->dev);
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -363,7 +985,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
mutex_init(&info->lock);
info->arizona = arizona;
info->dev = &pdev->dev;
- info->detecting = true;
+ INIT_DELAYED_WORK(&info->hpdet_work, arizona_hpdet_work);
platform_set_drvdata(pdev, info);
switch (arizona->type) {
@@ -373,6 +995,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info->micd_reva = true;
break;
default:
+ info->micd_clamp = true;
+ info->hpdet_ip = 1;
break;
}
break;
@@ -415,9 +1039,64 @@ static int arizona_extcon_probe(struct platform_device *pdev)
}
}
+ if (arizona->pdata.hpdet_id_gpio > 0) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ arizona->pdata.hpdet_id_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "HPDET");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
+ arizona->pdata.hpdet_id_gpio, ret);
+ goto err_register;
+ }
+ }
+
+ if (arizona->pdata.micd_bias_start_time)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_BIAS_STARTTIME_MASK,
+ arizona->pdata.micd_bias_start_time
+ << ARIZONA_MICD_BIAS_STARTTIME_SHIFT);
+
+ if (arizona->pdata.micd_rate)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_RATE_MASK,
+ arizona->pdata.micd_rate
+ << ARIZONA_MICD_RATE_SHIFT);
+
+ if (arizona->pdata.micd_dbtime)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_DBTIME_MASK,
+ arizona->pdata.micd_dbtime
+ << ARIZONA_MICD_DBTIME_SHIFT);
+
+ /*
+ * If we have a clamp use it, activating in conjunction with
+ * GPIO5 if that is connected for jack detect operation.
+ */
+ if (info->micd_clamp) {
+ if (arizona->pdata.jd_gpio5) {
+ /* Put the GPIO into input mode */
+ regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
+ 0xc101);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
+ } else {
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB,
+ ARIZONA_MICD_CLAMP_DB);
+ }
+
arizona_extcon_set_mode(info, 0);
- info->input = input_allocate_device();
+ info->input = devm_input_allocate_device(&pdev->dev);
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
@@ -435,7 +1114,15 @@ static int arizona_extcon_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE,
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ ret = arizona_request_irq(arizona, jack_irq_rise,
"JACKDET rise", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
@@ -443,21 +1130,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_input;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
ret);
goto err_rise;
}
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL,
+ ret = arizona_request_irq(arizona, jack_irq_fall,
"JACKDET fall", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
goto err_rise_wake;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_fall, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
ret);
@@ -471,11 +1158,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_fall_wake;
}
- regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_BIAS_STARTTIME_MASK |
- ARIZONA_MICD_RATE_MASK,
- 7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT |
- 8 << ARIZONA_MICD_RATE_SHIFT);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_HPDET,
+ "HPDET", arizona_hpdet_irq, info);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to get HPDET IRQ: %d\n", ret);
+ goto err_micdet;
+ }
arizona_clk32k_enable(arizona);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
@@ -493,23 +1181,24 @@ static int arizona_extcon_probe(struct platform_device *pdev)
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
- goto err_micdet;
+ goto err_hpdet;
}
return 0;
+err_hpdet:
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
err_micdet:
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
err_fall_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
err_fall:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
err_rise_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
err_input:
- input_free_device(info->input);
err_register:
pm_runtime_disable(&pdev->dev);
extcon_dev_unregister(&info->edev);
@@ -521,18 +1210,32 @@ static int arizona_extcon_remove(struct platform_device *pdev)
{
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
+ int jack_irq_rise, jack_irq_fall;
pm_runtime_disable(&pdev->dev);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
+ cancel_delayed_work_sync(&info->hpdet_work);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
ARIZONA_JD1_ENA, 0);
arizona_clk32k_disable(arizona);
- input_unregister_device(info->input);
extcon_dev_unregister(&info->edev);
return 0;
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index d398821097f3..60adc04b0561 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -472,7 +472,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
if (obj->cable_index < 0)
- return -ENODEV;
+ return obj->cable_index;
obj->user_nb = nb;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 1b14bfcdc176..02bec32adde4 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -29,7 +29,7 @@
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/extcon.h>
-#include <linux/extcon/extcon_gpio.h>
+#include <linux/extcon/extcon-gpio.h>
struct gpio_extcon_data {
struct extcon_dev edev;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index b656dfa401a6..b70e3815c459 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -29,92 +30,7 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max77693-muic"
-
-/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX77693 CDETCTRL1~2 register */
-#define CDETCTRL1_CHGDETEN_SHIFT (0)
-#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
-#define CDETCTRL1_DCDEN_SHIFT (2)
-#define CDETCTRL1_DCD2SCT_SHIFT (3)
-#define CDETCTRL1_CDDELAY_SHIFT (4)
-#define CDETCTRL1_DCDCPL_SHIFT (5)
-#define CDETCTRL1_CDPDET_SHIFT (7)
-#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
-#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
-#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
-#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
-#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
-#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
-#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
-
-#define CDETCTRL2_VIDRMEN_SHIFT (1)
-#define CDETCTRL2_DXOVPEN_SHIFT (3)
-#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
-#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
-
-/* MAX77693 MUIC - CONTROL1~3 register */
-#define COMN1SW_SHIFT (0)
-#define COMP2SW_SHIFT (3)
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
- | (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
- | (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
- | (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
- | (0 << COMN1SW_SHIFT))
-
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
enum max77693_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
@@ -127,14 +43,40 @@ struct max77693_muic_info {
struct device *dev;
struct max77693_dev *max77693;
struct extcon_dev *edev;
- int prev_adc;
- int prev_adc_gnd;
+ int prev_cable_type;
+ int prev_cable_type_gnd;
int prev_chg_type;
+ int prev_button_type;
u8 status[2];
int irq;
struct work_struct irq_work;
struct mutex mutex;
+
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /* Button of dock device */
+ struct input_dev *dock;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max77693_muic_cable_group {
+ MAX77693_CABLE_GROUP_ADC = 0,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ MAX77693_CABLE_GROUP_CHG,
+ MAX77693_CABLE_GROUP_VBVOLT,
};
enum max77693_muic_charger_type {
@@ -215,27 +157,59 @@ enum max77693_muic_acc_type {
/* The below accessories have same ADC value so ADCLow and
ADC1K bit is used to separate specific accessory */
- MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, ADCLow:0, ADC1K:0 */
- MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */
- MAX77693_MUIC_GND_MHL_CABLE = 0x103, /* ADC:0x0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
+ MAX77693_MUIC_GND_MHL = 0x103, /* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_MHL_VB = 0x107, /* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
};
/* MAX77693 MUIC device support below list of accessories(external connector) */
-const char *max77693_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Audio-video-load",
- [8] = "Audio-video-noload",
- [9] = "JIG",
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_MHL_TA,
+ EXTCON_CABLE_JIG_USB_ON,
+ EXTCON_CABLE_JIG_USB_OFF,
+ EXTCON_CABLE_JIG_UART_OFF,
+ EXTCON_CABLE_JIG_UART_ON,
+ EXTCON_CABLE_DOCK_SMART,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_AUDIO,
+
+ _EXTCON_CABLE_NUM,
+};
+
+static const char *max77693_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_MHL_TA] = "MHL_TA",
+ [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+ [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car",
+ [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
NULL,
};
+/*
+ * max77693_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max77693 MUIC
+ * @time: the debounce time of ADC
+ */
static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
enum max77693_muic_adc_debounce_time time)
{
@@ -250,18 +224,29 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL3,
time << CONTROL3_ADCDBSET_SHIFT,
CONTROL3_ADCDBSET_MASK);
- if (ret)
+ if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- return ret;
+ return 0;
};
+/*
+ * max77693_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max77693 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max77693 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
static int max77693_muic_set_path(struct max77693_muic_info *info,
u8 val, bool attached)
{
@@ -277,7 +262,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
if (attached)
@@ -290,141 +275,457 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
-out:
- return ret;
+
+ return 0;
}
-static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info,
- bool attached)
+/*
+ * max77693_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max77693 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX77693_CABLE_GROUP_ADC
+ * - MAX77693_CABLE_GROUP_ADC_GND
+ * - MAX77693_CABLE_GROUP_CHG
+ * - MAX77693_CABLE_GROUP_VBVOLT
+ */
+static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
+ enum max77693_muic_cable_group group, bool *attached)
{
- int ret = 0;
- int type;
- int adc, adc1k, adclow;
+ int cable_type = 0;
+ int adc;
+ int adc1k;
+ int adclow;
+ int vbvolt;
+ int chg_type;
+
+ switch (group) {
+ case MAX77693_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
- if (attached) {
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX77693_CABLE_GROUP_ADC_GND:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
adc = info->status[0] & STATUS1_ADC_MASK;
- adclow = info->status[0] & STATUS1_ADCLOW_MASK;
- adclow >>= STATUS1_ADCLOW_SHIFT;
- adc1k = info->status[0] & STATUS1_ADC1K_MASK;
- adc1k >>= STATUS1_ADC1K_SHIFT;
-
- /**
- * [0x1][ADCLow][ADC1K]
- * [0x1 0 0 ] : USB_OTG
- * [0x1 1 0 ] : Audio Video Cable with load
- * [0x1 1 1 ] : MHL
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type/_gnd) for handling cable when cable
+ * is detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type_gnd;
+ info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ adclow = info->status[0] & STATUS1_ADCLOW_MASK;
+ adclow >>= STATUS1_ADCLOW_SHIFT;
+ adc1k = info->status[0] & STATUS1_ADC1K_MASK;
+ adc1k >>= STATUS1_ADC1K_SHIFT;
+
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ /**
+ * [0x1][VBVolt][ADCLow][ADC1K]
+ * [0x1 0 0 0 ] : USB_OTG
+ * [0x1 1 0 0 ] : USB_OTG_VB
+ * [0x1 0 1 0 ] : Audio Video Cable with load
+ * [0x1 0 1 1 ] : MHL without charging connector
+ * [0x1 1 1 1 ] : MHL with charging connector
+ */
+ cable_type = ((0x1 << 8)
+ | (vbvolt << 2)
+ | (adclow << 1)
+ | adc1k);
+
+ info->prev_cable_type = adc;
+ info->prev_cable_type_gnd = cable_type;
+ }
+
+ break;
+ case MAX77693_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ case MAX77693_CABLE_GROUP_VBVOLT:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (adc == MAX77693_MUIC_ADC_OPEN
+ && chg_type == MAX77693_CHARGER_TYPE_NONE)
+ *attached = false;
+ else
+ *attached = true;
+
+ /*
+ * Read vbvolt field, if vbvolt is 1,
+ * this cable is used for charging.
+ */
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ cable_type = vbvolt;
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max77693_muic_dock_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ int ret = 0;
+ int vbvolt;
+ bool cable_attached;
+ char dock_name[CABLE_NAME_MAX];
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Check power cable whether attached or detached state.
+ * The Dock-Smart device need surely external power supply.
+ * If power cable(USB/TA) isn't connected to Dock device,
+ * user can't use Dock-Smart for desktop mode.
*/
- type = ((0x1 << 8) | (adclow << 1) | adc1k);
+ vbvolt = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
+ if (attached && !vbvolt) {
+ dev_warn(info->dev,
+ "Cannot detect external power supply\n");
+ return 0;
+ }
+
+ /*
+ * Notify Dock-Smart/MHL state.
+ * - Dock-Smart device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
+ */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ goto out;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ strcpy(dock_name, "Dock-Car");
+ break;
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ strcpy(dock_name, "Dock-Desk");
+ break;
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ strcpy(dock_name, "Dock-Audio");
+ if (!attached)
+ extcon_set_cable_state(info->edev, "USB", false);
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
+ }
- /* Store previous ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = adc;
- info->prev_adc_gnd = type;
- } else
- type = info->prev_adc_gnd;
+ /* Dock-Car/Desk/Audio, PATH:AUDIO */
+ ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, dock_name, attached);
- switch (type) {
+out:
+ return 0;
+}
+
+static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
+ int button_type, bool attached)
+{
+ struct input_dev *dock = info->dock;
+ unsigned int code;
+
+ switch (button_type) {
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
+ /* DOCK_KEY_PREV */
+ code = KEY_PREVIOUSSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
+ /* DOCK_KEY_NEXT */
+ code = KEY_NEXTSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
+ /* DOCK_VOL_DOWN */
+ code = KEY_VOLUMEDOWN;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
+ /* DOCK_VOL_UP */
+ code = KEY_VOLUMEUP;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
+ /* DOCK_KEY_PLAY_PAUSE */
+ code = KEY_PLAYPAUSE;
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s key (adc:0x%x)\n",
+ attached ? "pressed" : "released", button_type);
+ return -EINVAL;
+ }
+
+ input_event(dock, EV_KEY, code, attached);
+ input_sync(dock);
+
+ return 0;
+}
+
+static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
+{
+ int cable_type_gnd;
+ int ret = 0;
+ bool attached;
+
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND, &attached);
+
+ switch (cable_type_gnd) {
case MAX77693_MUIC_GND_USB_OTG:
- /* USB_OTG */
+ case MAX77693_MUIC_GND_USB_OTG_VB:
+ /* USB_OTG, PATH: AP_USB */
ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev, "USB-Host", attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
- /* Audio Video Cable with load */
+ /* Audio Video Cable with load, PATH:AUDIO */
ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev,
"Audio-video-load", attached);
break;
- case MAX77693_MUIC_GND_MHL_CABLE:
- /* MHL */
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /* MHL or MHL with USB/TA cable */
extcon_set_cable_state(info->edev, "MHL", attached);
break;
default:
- dev_err(info->dev, "failed to detect %s accessory\n",
+ dev_err(info->dev, "failed to detect %s cable of gnd type\n",
attached ? "attached" : "detached");
- dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n",
- adc, adclow, adc1k);
- ret = -EINVAL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77693_muic_jig_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ char cable_name[32];
+ int ret = 0;
+ u8 path = CONTROL1_SW_OPEN;
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-OFF");
+ path = CONTROL1_SW_USB;
+ break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-ON");
+ path = CONTROL1_SW_USB;
+ break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-OFF");
+ path = CONTROL1_SW_UART;
break;
+ default:
+ dev_err(info->dev, "failed to detect %s jig cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ ret = max77693_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, cable_name, attached);
+
+ return 0;
}
-static int max77693_muic_adc_handler(struct max77693_muic_info *info,
- int curr_adc, bool attached)
+static int max77693_muic_adc_handler(struct max77693_muic_info *info)
{
+ int cable_type;
+ int button_type;
+ bool attached;
int ret = 0;
- int adc;
- if (attached) {
- /* Store ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = curr_adc;
- adc = curr_adc;
- } else
- adc = info->prev_adc;
+ /* Check accessory state which is either detached or attached */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC, &attached);
dev_info(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
- attached ? "attached" : "detached", curr_adc, info->prev_adc);
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
- switch (adc) {
+ switch (cable_type) {
case MAX77693_MUIC_ADC_GROUND:
/* USB_OTG/MHL/Audio */
- max77693_muic_adc_ground_handler(info, attached);
+ max77693_muic_adc_ground_handler(info);
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
- /* USB */
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
- break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
- case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
/* JIG */
- ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached);
+ ret = max77693_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "JIG", attached);
+ return ret;
break;
- case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:
- /* Audio Video cable with no-load */
- ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * DOCK device
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ ret = max77693_muic_dock_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev,
- "Audio-video-noload", attached);
+ return ret;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
+ /*
+ * Button of DOCK device
+ * - the Prev/Next/Volume Up/Volume Down/Play-Pause button
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ if (attached)
+ button_type = info->prev_button_type = cable_type;
+ else
+ button_type = info->prev_button_type;
+
+ ret = max77693_muic_dock_button_handler(info, button_type,
+ attached);
+ if (ret < 0)
+ return ret;
break;
case MAX77693_MUIC_ADC_SEND_END_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77693_MUIC_ADC_RESERVED_ACC_1:
case MAX77693_MUIC_ADC_RESERVED_ACC_2:
- case MAX77693_MUIC_ADC_RESERVED_ACC_3:
case MAX77693_MUIC_ADC_RESERVED_ACC_4:
case MAX77693_MUIC_ADC_RESERVED_ACC_5:
case MAX77693_MUIC_ADC_CEA936_AUDIO:
@@ -432,60 +733,164 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info,
case MAX77693_MUIC_ADC_TTY_CONVERTER:
case MAX77693_MUIC_ADC_UART_CABLE:
case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
- case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
- /* This accessory isn't used in general case if it is specially
- needed to detect additional accessory, should implement
- proper operation when this accessory is attached/detached. */
+ /*
+ * This accessory isn't used in general case if it is specially
+ * needed to detect additional accessory, should implement
+ * proper operation when this accessory is attached/detached.
+ */
dev_info(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- ret = -EINVAL;
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
-static int max77693_muic_chg_handler(struct max77693_muic_info *info,
- int curr_chg_type, bool attached)
+static int max77693_muic_chg_handler(struct max77693_muic_info *info)
{
- int ret = 0;
int chg_type;
+ int cable_type_gnd;
+ int cable_type;
+ bool attached;
+ bool cable_attached;
+ int ret = 0;
- if (attached) {
- /* Store previous charger type to control
- when charger accessory will be detached */
- info->prev_chg_type = curr_chg_type;
- chg_type = curr_chg_type;
- } else
- chg_type = info->prev_chg_type;
+ chg_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_CHG, &attached);
dev_info(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
- curr_chg_type, info->prev_chg_type);
+ chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX77693_CHARGER_TYPE_USB:
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX77693_CHARGER_TYPE_NONE:
+ /* Check MAX77693_CABLE_GROUP_ADC_GND type */
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ &cable_attached);
+ switch (cable_type_gnd) {
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /*
+ * MHL cable with MHL_TA(USB/TA) cable
+ * - MHL cable include two port(HDMI line and separate micro-
+ * usb port. When the target connect MHL cable, extcon driver
+ * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'MHL_TA(USB/TA) with MHL cable'
+ * - Support MHL
+ * - Support charging through micro-usb port without data connection
+ */
+ extcon_set_cable_state(info->edev, "MHL_TA", attached);
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "MHL", cable_attached);
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_ADC type */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC,
+ &cable_attached);
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * Dock-Audio device with USB/TA cable
+ * - Dock device include two port(Dock-Audio and micro-usb
+ * port). When the target connect Dock-Audio device, extcon
+ * driver check whether USB/TA cable is connected. If USB/TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'USB/TA cable with Dock-Audio device'
+ * - Support external output feature of audio.
+ * - Support charging through micro-usb port without data
+ * connection.
+ */
+ extcon_set_cable_state(info->edev, "USB", attached);
+
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
+ break;
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Dock-Smart device with USB/TA cable
+ * - Dock-Desk device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
+ */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_CHG type */
+ switch (chg_type) {
+ case MAX77693_CHARGER_TYPE_NONE:
+ /*
+ * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
+ * is attached, muic device happen below two interrupt.
+ * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
+ * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
+ * connected to MHL or Dock-Audio.
+ * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
+ * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
+ *
+ * If user attach MHL (with USB/TA cable and immediately detach
+ * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
+ * interrupt is happened, USB/TA cable remain connected state to
+ * target. But USB/TA cable isn't connected to target. The user
+ * be face with unusual action. So, driver should check this
+ * situation in spite of, that previous charger type is N/A.
+ */
+ break;
+ case MAX77693_CHARGER_TYPE_USB:
+ /* Only USB cable, PATH:AP_USB */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ /* Only TA cable */
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ }
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_cable_state(info->edev,
"Charge-downstream", attached);
break;
- case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", attached);
- break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
@@ -498,22 +903,18 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info,
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static void max77693_muic_irq_work(struct work_struct *work)
{
struct max77693_muic_info *info = container_of(work,
struct max77693_muic_info, irq_work);
- int curr_adc, curr_chg_type;
int irq_type = -1;
int i, ret = 0;
- bool attached = true;
if (!info->edev)
return;
@@ -539,14 +940,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT1_ADC1K:
/* Handle all of accessory except for
type of charger accessory */
- curr_adc = info->status[0] & STATUS1_ADC_MASK;
- curr_adc >>= STATUS1_ADC_SHIFT;
-
- /* Check accessory state which is either detached or attached */
- if (curr_adc == MAX77693_MUIC_ADC_OPEN)
- attached = false;
-
- ret = max77693_muic_adc_handler(info, curr_adc, attached);
+ ret = max77693_muic_adc_handler(info);
break;
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
@@ -555,15 +949,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT2_VBVOLT:
case MAX77693_MUIC_IRQ_INT2_VIDRM:
/* Handle charger accessory */
- curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- /* Check charger accessory state which
- is either detached or attached */
- if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
- attached = false;
-
- ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
+ ret = max77693_muic_chg_handler(info);
break;
case MAX77693_MUIC_IRQ_INT3_EOC:
case MAX77693_MUIC_IRQ_INT3_CGMBC:
@@ -575,7 +961,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
default:
dev_err(info->dev, "muic interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
if (ret < 0)
@@ -604,7 +991,9 @@ static struct regmap_config max77693_muic_regmap_config = {
static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
{
int ret = 0;
- int adc, chg_type;
+ int adc;
+ int chg_type;
+ bool attached;
mutex_lock(&info->mutex);
@@ -617,35 +1006,39 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
return -EINVAL;
}
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- if (adc != MAX77693_MUIC_ADC_OPEN) {
- dev_info(info->dev,
- "external connector is attached (adc:0x%02x)\n", adc);
+ adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
+ ret = max77693_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- ret = max77693_muic_adc_handler(info, adc, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect accessory\n");
- goto out;
+ chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
+ ret = max77693_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
}
- chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ mutex_unlock(&info->mutex);
- if (chg_type != MAX77693_CHARGER_TYPE_NONE) {
- dev_info(info->dev,
- "external connector is attached (chg_type:0x%x)\n",
- chg_type);
+ return 0;
+}
- max77693_muic_chg_handler(info, chg_type, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect charger accessory\n");
- }
+static void max77693_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max77693_muic_info *info = container_of(to_delayed_work(work),
+ struct max77693_muic_info, wq_detcable);
-out:
- mutex_unlock(&info->mutex);
- return ret;
+ max77693_muic_detect_accessory(info);
}
static int max77693_muic_probe(struct platform_device *pdev)
@@ -654,20 +1047,22 @@ static int max77693_muic_probe(struct platform_device *pdev)
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
struct max77693_muic_info *info;
- int ret, i;
+ int delay_jiffies;
+ int ret;
+ int i;
u8 id;
- info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
info->max77693 = max77693;
- if (info->max77693->regmap_muic)
+ if (info->max77693->regmap_muic) {
dev_dbg(&pdev->dev, "allocate register map\n");
- else {
+ } else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
info->max77693->muic,
&max77693_muic_regmap_config);
@@ -675,9 +1070,35 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ return ret;
}
}
+
+ /* Register input device for button of dock device */
+ info->dock = devm_input_allocate_device(&pdev->dev);
+ if (!info->dock) {
+ dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
+ return -ENOMEM;
+ }
+ info->dock->name = "max77693-muic/dock";
+ info->dock->phys = "max77693-muic/extcon";
+ info->dock->dev.parent = &pdev->dev;
+
+ __set_bit(EV_REP, info->dock->evbit);
+
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
+ input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
+ input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
+ input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
+
+ ret = input_register_device(info->dock);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
+ ret);
+ return ret;
+ }
+
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
@@ -686,30 +1107,31 @@ static int max77693_muic_probe(struct platform_device *pdev)
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max77693_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
ret = request_threaded_irq(virq, NULL,
max77693_muic_irq_handler,
- IRQF_ONESHOT, muic_irq->name, info);
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
" error :%d)\n",
muic_irq->irq, ret);
-
- for (i = i - 1; i >= 0; i--)
- free_irq(muic_irq->virq, info);
goto err_irq;
}
}
/* Initialize extcon device */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -720,7 +1142,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize MUIC register by using platform data */
@@ -748,6 +1170,23 @@ static int max77693_muic_probe(struct platform_device *pdev)
= muic_pdata->init_data[i].data;
}
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (muic_pdata->path_uart)
+ info->path_uart = muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (muic_pdata->path_usb)
+ info->path_usb = muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max77693_muic_set_path(info, info->path_uart, true);
+
/* Check revision number of MUIC device*/
ret = max77693_read_reg(info->max77693->regmap_muic,
MAX77693_MUIC_REG_ID, &id);
@@ -760,17 +1199,28 @@ static int max77693_muic_probe(struct platform_device *pdev)
/* Set ADC debounce time */
max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
- /* Detect accessory on boot */
- max77693_muic_detect_accessory(info);
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
+ if (muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return ret;
err_extcon:
- kfree(info->edev);
+ extcon_dev_unregister(info->edev);
err_irq:
-err_regmap:
- kfree(info);
-err_kfree:
+ while (--i >= 0)
+ free_irq(muic_irqs[i].virq, info);
return ret;
}
@@ -782,9 +1232,8 @@ static int max77693_muic_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
+ input_unregister_device(info->dock);
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
return 0;
}
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index bad76f51161b..e636d950ad6c 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,7 +1,7 @@
/*
* extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Donggeun Kim <dg77.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -29,51 +29,14 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
-/* MAX8997-MUIC STATUS1 register */
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCLOW_SHIFT 5
-#define STATUS1_ADCERR_SHIFT 6
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-
-/* MAX8997-MUIC STATUS2 register */
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DBCHG_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-
-/* MAX8997-MUIC STATUS3 register */
-#define STATUS3_OVP_SHIFT 2
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX8997-MUIC CONTROL1 register */
-#define COMN1SW_SHIFT 0
-#define COMP2SW_SHIFT 3
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-
-#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
-#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
-#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
-#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
-
-#define MAX8997_ADC_GROUND 0x00
-#define MAX8997_ADC_MHL 0x01
-#define MAX8997_ADC_JIG_USB_1 0x18
-#define MAX8997_ADC_JIG_USB_2 0x19
-#define MAX8997_ADC_DESKDOCK 0x1a
-#define MAX8997_ADC_JIG_UART 0x1c
-#define MAX8997_ADC_CARDOCK 0x1d
-#define MAX8997_ADC_OPEN 0x1f
+enum max8997_muic_adc_debounce_time {
+ ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
+ ADC_DEBOUNCE_TIME_10MS, /* 10ms */
+ ADC_DEBOUNCE_TIME_25MS, /* 25ms */
+ ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
+};
struct max8997_muic_irq {
unsigned int irq;
@@ -82,61 +45,303 @@ struct max8997_muic_irq {
};
static struct max8997_muic_irq muic_irqs[] = {
- { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
- { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
- { MAX8997_MUICIRQ_ADC, "muic-ADC" },
- { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
- { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
- { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
- { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
- { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
- { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+ { MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
+ { MAX8997_MUICIRQ_OVP, "muic-OVP" },
+};
+
+/* Define supported cable type */
+enum max8997_muic_acc_type {
+ MAX8997_MUIC_ADC_GROUND = 0x0,
+ MAX8997_MUIC_ADC_MHL, /* MHL*/
+ MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX8997_MUIC_ADC_RESERVED_ACC_1,
+ MAX8997_MUIC_ADC_RESERVED_ACC_2,
+ MAX8997_MUIC_ADC_RESERVED_ACC_3,
+ MAX8997_MUIC_ADC_RESERVED_ACC_4,
+ MAX8997_MUIC_ADC_RESERVED_ACC_5,
+ MAX8997_MUIC_ADC_CEA936_AUDIO,
+ MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX8997_MUIC_ADC_TTY_CONVERTER,
+ MAX8997_MUIC_ADC_UART_CABLE,
+ MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
+ MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
+ MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
+ MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
+ MAX8997_MUIC_ADC_OPEN, /* OPEN */
+};
+
+enum max8997_muic_cable_group {
+ MAX8997_CABLE_GROUP_ADC = 0,
+ MAX8997_CABLE_GROUP_ADC_GND,
+ MAX8997_CABLE_GROUP_CHG,
+ MAX8997_CABLE_GROUP_VBVOLT,
+};
+
+enum max8997_muic_usb_type {
+ MAX8997_USB_HOST,
+ MAX8997_USB_DEVICE,
+};
+
+enum max8997_muic_charger_type {
+ MAX8997_CHARGER_TYPE_NONE = 0,
+ MAX8997_CHARGER_TYPE_USB,
+ MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
+ MAX8997_CHARGER_TYPE_DEDICATED_CHG,
+ MAX8997_CHARGER_TYPE_500MA,
+ MAX8997_CHARGER_TYPE_1A,
+ MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
};
struct max8997_muic_info {
struct device *dev;
struct i2c_client *muic;
- struct max8997_muic_platform_data *muic_pdata;
+ struct extcon_dev *edev;
+ int prev_cable_type;
+ int prev_chg_type;
+ u8 status[2];
int irq;
struct work_struct irq_work;
+ struct mutex mutex;
+ struct max8997_muic_platform_data *muic_pdata;
enum max8997_muic_charger_type pre_charger_type;
- int pre_adc;
- struct mutex mutex;
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
- struct extcon_dev *edev;
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_CARD,
+ EXTCON_CABLE_JIG,
+
+ _EXTCON_CABLE_NUM,
};
-const char *max8997_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Dock-desk",
- [8] = "Dock-card",
- [9] = "JIG",
+static const char *max8997_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
+ [EXTCON_CABLE_JIG] = "JIG",
NULL,
};
+/*
+ * max8997_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max8997 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
+ enum max8997_muic_adc_debounce_time time)
+{
+ int ret;
+
+ switch (time) {
+ case ADC_DEBOUNCE_TIME_0_5MS:
+ case ADC_DEBOUNCE_TIME_10MS:
+ case ADC_DEBOUNCE_TIME_25MS:
+ case ADC_DEBOUNCE_TIME_38_62MS:
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL3,
+ time << CONTROL3_ADCDBSET_SHIFT,
+ CONTROL3_ADCDBSET_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
+ break;
+ default:
+ dev_err(info->dev, "invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
+/*
+ * max8997_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max8997 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max8997 MUIC device share outside H/W line among a varity of cables,
+ * so this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max8997_muic_set_path(struct max8997_muic_info *info,
+ u8 val, bool attached)
+{
+ int ret = 0;
+ u8 ctrl1, ctrl2 = 0;
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CONTROL1_SW_OPEN;
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ if (attached)
+ ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ else
+ ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL2, ctrl2,
+ CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ dev_info(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+/*
+ * max8997_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max8997 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX8997_CABLE_GROUP_ADC
+ * - MAX8997_CABLE_GROUP_CHG
+ */
+static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
+ enum max8997_muic_cable_group group, bool *attached)
+{
+ int cable_type = 0;
+ int adc;
+ int chg_type;
+
+ switch (group) {
+ case MAX8997_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX8997_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX8997_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
static int max8997_muic_handle_usb(struct max8997_muic_info *info,
enum max8997_muic_usb_type usb_type, bool attached)
{
int ret = 0;
if (usb_type == MAX8997_USB_HOST) {
- /* switch to USB */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
- SW_MASK);
- if (ret) {
+ ret = max8997_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
}
@@ -148,41 +353,39 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
extcon_set_cable_state(info->edev, "USB", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s usb cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int max8997_muic_handle_dock(struct max8997_muic_info *info,
- int adc, bool attached)
+ int cable_type, bool attached)
{
int ret = 0;
- /* switch to AUDIO */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
- switch (adc) {
- case MAX8997_ADC_DESKDOCK:
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
extcon_set_cable_state(info->edev, "Dock-desk", attached);
break;
- case MAX8997_ADC_CARDOCK:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
extcon_set_cable_state(info->edev, "Dock-card", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+
+ return 0;
}
static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@@ -191,199 +394,188 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
int ret = 0;
/* switch to UART */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, info->path_uart, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return -EINVAL;
}
extcon_set_cable_state(info->edev, "JIG", attached);
-out:
- return ret;
-}
-
-static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
-{
- int ret = 0;
- switch (info->pre_adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", false);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, info->pre_adc, false);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, false);
- break;
- default:
- break;
- }
-
- return ret;
+ return 0;
}
-static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+static int max8997_muic_adc_handler(struct max8997_muic_info *info)
{
+ int cable_type;
+ bool attached;
int ret = 0;
- switch (adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", true);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, adc, true);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, true);
- break;
- case MAX8997_ADC_OPEN:
- ret = max8997_muic_handle_adc_detach(info);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- info->pre_adc = adc;
-out:
- return ret;
-}
-
-static int max8997_muic_handle_charger_type_detach(
- struct max8997_muic_info *info)
-{
- switch (info->pre_charger_type) {
- case MAX8997_CHARGER_TYPE_USB:
- extcon_set_cable_state(info->edev, "USB", false);
- break;
- case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", false);
- break;
- case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", false);
- break;
- case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", false);
- break;
- case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", false);
- break;
+ /* Check cable state which is either detached or attached */
+ cable_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_ADC, &attached);
+
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_MHL:
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
+ ret = max8997_muic_handle_dock(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ ret = max8997_muic_handle_jig_uart(info, attached);
+ break;
+ case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_1:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_2:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_3:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_4:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_5:
+ case MAX8997_MUIC_ADC_CEA936_AUDIO:
+ case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX8997_MUIC_ADC_TTY_CONVERTER:
+ case MAX8997_MUIC_ADC_UART_CABLE:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
+ /*
+ * This cable isn't used in general case if it is specially
+ * needed to detect additional cable, should implement
+ * proper operation when this cable is attached/detached.
+ */
+ dev_info(info->dev,
+ "cable is %s but it isn't used (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
+ dev_err(info->dev,
+ "failed to detect %s unknown cable (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
return -EINVAL;
- break;
}
return 0;
}
-static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
- enum max8997_muic_charger_type charger_type)
+static int max8997_muic_chg_handler(struct max8997_muic_info *info)
{
- u8 adc;
- int ret;
+ int chg_type;
+ bool attached;
+ int adc;
- ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
- if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- goto out;
- }
+ chg_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_CHG, &attached);
- switch (charger_type) {
+ switch (chg_type) {
case MAX8997_CHARGER_TYPE_NONE:
- ret = max8997_muic_handle_charger_type_detach(info);
break;
case MAX8997_CHARGER_TYPE_USB:
- if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
max8997_muic_handle_usb(info,
- MAX8997_USB_DEVICE, true);
+ MAX8997_USB_DEVICE, attached);
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", true);
+ extcon_set_cable_state(info->edev, "Charge-downstream", attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", true);
+ extcon_set_cable_state(info->edev, "TA", attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", true);
+ extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", true);
+ extcon_set_cable_state(info->edev, "Fast-charger", attached);
break;
default:
- ret = -EINVAL;
- goto out;
+ dev_err(info->dev,
+ "failed to detect %s unknown chg cable (type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+ return -EINVAL;
}
- info->pre_charger_type = charger_type;
-out:
- return ret;
+ return 0;
}
static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
- u8 status[2];
- u8 adc, chg_type;
int irq_type = 0;
int i, ret;
+ if (!info->edev)
+ return;
+
mutex_lock(&info->mutex);
+ for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ if (info->irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ 2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
mutex_unlock(&info->mutex);
return;
}
- dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
- status[0], status[1]);
-
- for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
- if (info->irq == muic_irqs[i].virq)
- irq_type = muic_irqs[i].irq;
-
switch (irq_type) {
+ case MAX8997_MUICIRQ_ADCError:
+ case MAX8997_MUICIRQ_ADCLow:
case MAX8997_MUICIRQ_ADC:
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- max8997_muic_handle_adc(info, adc);
+ /* Handle all of cable except for charger cable */
+ ret = max8997_muic_adc_handler(info);
break;
+ case MAX8997_MUICIRQ_VBVolt:
+ case MAX8997_MUICIRQ_DBChg:
+ case MAX8997_MUICIRQ_DCDTmr:
+ case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- max8997_muic_handle_charger_type(info, chg_type);
+ /* Handle charger cable */
+ ret = max8997_muic_chg_handler(info);
+ break;
+ case MAX8997_MUICIRQ_OVP:
break;
default:
dev_info(info->dev, "misc interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
mutex_unlock(&info->mutex);
return;
@@ -401,29 +593,60 @@ static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+static int max8997_muic_detect_dev(struct max8997_muic_info *info)
{
- int ret;
- u8 status[2], adc, chg_type;
+ int ret = 0;
+ int adc;
+ int chg_type;
+ bool attached;
- ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ mutex_lock(&info->mutex);
+
+ /* Read STATUSx register to detect accessory */
+ ret = max8997_bulk_read(info->muic,
+ MAX8997_MUIC_REG_STATUS1, 2, info->status);
if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- return;
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return -EINVAL;
}
- dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
- status[0], status[1]);
+ adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
+ ret = max8997_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect ADC cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
+ ret = max8997_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ mutex_unlock(&info->mutex);
- max8997_muic_handle_adc(info, adc);
- max8997_muic_handle_charger_type(info, chg_type);
+ return 0;
+}
+
+static void max8997_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(to_delayed_work(work),
+ struct max8997_muic_info, wq_detcable);
+ int ret;
+
+ ret = max8997_muic_detect_dev(info);
+ if (ret < 0)
+ pr_err("failed to detect cable type\n");
}
static int max8997_muic_probe(struct platform_device *pdev)
@@ -431,13 +654,14 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
+ int delay_jiffies;
int ret, i;
- info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
@@ -450,15 +674,19 @@ static int max8997_muic_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
- 0, muic_irq->name, info);
+ ret = request_threaded_irq(virq, NULL,
+ max8997_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
@@ -469,7 +697,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
/* External connector */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -480,7 +709,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize registers according to platform data */
@@ -493,18 +722,46 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
}
- /* Initial device detection */
- max8997_muic_detect_dev(info);
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (pdata->muic_pdata->path_uart)
+ info->path_uart = pdata->muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (pdata->muic_pdata->path_usb)
+ info->path_usb = pdata->muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max8997_muic_set_path(info, info->path_uart, true);
+
+ /* Set ADC debounce time */
+ max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
+ if (pdata->muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
- return ret;
+ return 0;
-err_extcon:
- kfree(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
- kfree(info);
-err_kfree:
return ret;
}
@@ -519,9 +776,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
-
return 0;
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index e7a711f53a6f..2b27bff2591a 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -270,7 +270,7 @@ static int fwnet_header_cache(const struct neighbour *neigh,
if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
- h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
hh->hh_len = FWNET_HLEN;
@@ -282,7 +282,7 @@ static int fwnet_header_cache(const struct neighbour *neigh,
static void fwnet_header_cache_update(struct hh_cache *hh,
const struct net_device *net, const unsigned char *haddr)
{
- memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+ memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
}
static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
@@ -398,11 +398,11 @@ static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
new->datagram_label = datagram_label;
new->datagram_size = dg_size;
- new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
if (new->skb == NULL)
goto fail_w_fi;
- skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ skb_reserve(new->skb, LL_RESERVED_SPACE(net));
new->pbuf = skb_put(new->skb, dg_size);
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add_tail(&new->pd_link, &peer->pd_list);
@@ -520,7 +520,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
- skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+ skb->ip_summed = CHECKSUM_NONE;
/*
* Parse the encapsulation header. This actually does the job of
@@ -690,14 +690,14 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
- skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
dev_err(&net->dev, "out of memory\n");
net->stats.rx_dropped++;
return -ENOMEM;
}
- skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ skb_reserve(skb, LL_RESERVED_SPACE(net));
memcpy(skb_put(skb, len), buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6ce6e07c38c1..45912e6e0ac2 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -329,7 +329,7 @@ module_param_named(quirks, param_quirks, int, 0644);
MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
- ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ea5ac2dc1233..8e77c02edb24 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -537,7 +537,7 @@ static struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
};
-static int __devinit dcdbas_probe(struct platform_device *dev)
+static int dcdbas_probe(struct platform_device *dev)
{
int i, error;
@@ -575,7 +575,7 @@ static int __devinit dcdbas_probe(struct platform_device *dev)
return 0;
}
-static int __devexit dcdbas_remove(struct platform_device *dev)
+static int dcdbas_remove(struct platform_device *dev)
{
int i;
@@ -593,7 +593,7 @@ static struct platform_driver dcdbas_driver = {
.owner = THIS_MODULE,
},
.probe = dcdbas_probe,
- .remove = __devexit_p(dcdbas_remove),
+ .remove = dcdbas_remove,
};
/**
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index b298158cb922..982f1f5f5742 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -16,6 +16,7 @@
*/
static char dmi_empty_string[] = " ";
+static u16 __initdata dmi_ver;
/*
* Catch too early calls to dmi_check_system():
*/
@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
return 0;
}
-static int __init dmi_checksum(const u8 *buf)
+static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
- for (a = 0; a < 15; a++)
+ for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
- if(d[i] != 0x00) is_ff = 0;
- if(d[i] != 0xFF) is_00 = 0;
+ if (d[i] != 0x00)
+ is_00 = 0;
+ if (d[i] != 0xFF)
+ is_ff = 0;
}
if (is_ff || is_00)
@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
- sprintf(s, "%pUB", d);
+ /*
+ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+ * the UUID are supposed to be little-endian encoded. The specification
+ * says that this is the defacto standard.
+ */
+ if (dmi_ver >= 0x0206)
+ sprintf(s, "%pUL", d);
+ else
+ sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
@@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p)
u8 buf[15];
memcpy_fromio(buf, p, 15);
- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ if (dmi_checksum(buf, 15)) {
dmi_num = (buf[13] << 8) | buf[12];
dmi_len = (buf[7] << 8) | buf[6];
dmi_base = (buf[11] << 24) | (buf[10] << 16) |
(buf[9] << 8) | buf[8];
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if (buf[14] != 0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14] >> 4, buf[14] & 0xF);
- else
- printk(KERN_INFO "DMI present.\n");
if (dmi_walk_early(dmi_decode) == 0) {
+ if (dmi_ver)
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ else {
+ dmi_ver = (buf[14] & 0xF0) << 4 |
+ (buf[14] & 0x0F);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ }
dmi_dump_ids();
return 0;
}
}
+ dmi_ver = 0;
return 1;
}
+static int __init smbios_present(const char __iomem *p)
+{
+ u8 buf[32];
+ int offset = 0;
+
+ memcpy_fromio(buf, p, 32);
+ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
+ dmi_ver = (buf[6] << 8) + buf[7];
+
+ /* Some BIOS report weird SMBIOS version, fix that up */
+ switch (dmi_ver) {
+ case 0x021F:
+ case 0x0221:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
+ dmi_ver & 0xFF, 3);
+ dmi_ver = 0x0203;
+ break;
+ case 0x0233:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
+ dmi_ver = 0x0206;
+ break;
+ }
+ offset = 16;
+ }
+ return dmi_present(buf + offset);
+}
+
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
int rc;
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
+ rc = smbios_present(p);
dmi_iounmap(p, 32);
if (!rc) {
dmi_available = 1;
@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
goto error;
for (q = p; q < p + 0x10000; q += 16) {
- rc = dmi_present(q);
+ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
+ rc = smbios_present(q);
+ else if (memcmp(q, "_DMI_", 5) == 0)
+ rc = dmi_present(q);
+ else
+ continue;
if (!rc) {
dmi_available = 1;
dmi_iounmap(p, 0x10000);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 6e51c1e81f14..fed08b661711 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -80,6 +80,10 @@
#include <linux/slab.h>
#include <linux/pstore.h>
+#include <linux/fs.h>
+#include <linux/ramfs.h>
+#include <linux/pagemap.h>
+
#include <asm/uaccess.h>
#define EFIVARS_VERSION "0.08"
@@ -93,6 +97,12 @@ MODULE_VERSION(EFIVARS_VERSION);
#define DUMP_NAME_LEN 52
/*
+ * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
+ * not including trailing NUL
+ */
+#define GUID_LEN 36
+
+/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
* space in each part of the structure,
@@ -108,7 +118,6 @@ struct efi_variable {
__u32 Attributes;
} __attribute__((packed));
-
struct efivar_entry {
struct efivars *efivars;
struct efi_variable var;
@@ -122,6 +131,9 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+static struct efivars __efivars;
+static struct efivar_operations ops;
+
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
@@ -146,6 +158,13 @@ efivar_create_sysfs_entry(struct efivars *efivars,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid);
+/*
+ * Prototype for workqueue functions updating sysfs entry
+ */
+
+static void efivar_update_sysfs_entries(struct work_struct *);
+static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
+
/* Return the number of unicode characters in data */
static unsigned long
utf16_strnlen(efi_char16_t *s, size_t maxlength)
@@ -393,10 +412,11 @@ static efi_status_t
get_var_data(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
+ unsigned long flags;
- spin_lock(&efivars->lock);
+ spin_lock_irqsave(&efivars->lock, flags);
status = get_var_data_locked(efivars, var);
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
@@ -525,14 +545,14 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
return -EINVAL;
}
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
@@ -629,21 +649,489 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
-static struct pstore_info efi_pstore_info;
-
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -EIO;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ssize_t efivarfs_file_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars;
+ efi_status_t status;
+ void *data;
+ u32 attributes;
+ struct inode *inode = file->f_mapping->host;
+ unsigned long datasize = count - sizeof(attributes);
+ unsigned long newdatasize;
+ u64 storage_size, remaining_size, max_size;
+ ssize_t bytes = 0;
+
+ if (count < sizeof(attributes))
+ return -EINVAL;
+
+ if (copy_from_user(&attributes, userbuf, sizeof(attributes)))
+ return -EFAULT;
+
+ if (attributes & ~(EFI_VARIABLE_MASK))
+ return -EINVAL;
+
+ efivars = var->efivars;
+
+ /*
+ * Ensure that the user can't allocate arbitrarily large
+ * amounts of memory. Pick a default size of 64K if
+ * QueryVariableInfo() isn't supported by the firmware.
+ */
+ spin_lock_irq(&efivars->lock);
+
+ if (!efivars->ops->query_variable_info)
+ status = EFI_UNSUPPORTED;
+ else {
+ const struct efivar_operations *fops = efivars->ops;
+ status = fops->query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ }
+
+ spin_unlock_irq(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ if (status != EFI_UNSUPPORTED)
+ return efi_status_to_err(status);
+
+ remaining_size = 65536;
+ }
+
+ if (datasize > remaining_size)
+ return -ENOSPC;
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
+ bytes = -EFAULT;
+ goto out;
+ }
+
+ if (validate_var(&var->var, data, datasize) == false) {
+ bytes = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * The lock here protects the get_variable call, the conditional
+ * set_variable call, and removal of the variable from the efivars
+ * list (in the case of an authenticated delete).
+ */
+ spin_lock_irq(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ attributes, datasize,
+ data);
+
+ if (status != EFI_SUCCESS) {
+ spin_unlock_irq(&efivars->lock);
+ kfree(data);
+
+ return efi_status_to_err(status);
+ }
+
+ bytes = count;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ newdatasize = 0;
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ NULL, &newdatasize,
+ NULL);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ spin_unlock_irq(&efivars->lock);
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, newdatasize + sizeof(attributes));
+ mutex_unlock(&inode->i_mutex);
+
+ } else if (status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(inode);
+ d_delete(file->f_dentry);
+ dput(file->f_dentry);
+
+ } else {
+ spin_unlock_irq(&efivars->lock);
+ pr_warn("efivarfs: inconsistent EFI variable implementation? "
+ "status = %lx\n", status);
+ }
+
+out:
+ kfree(data);
+
+ return bytes;
+}
+
+static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+ unsigned long datasize = 0;
+ u32 attributes;
+ void *data;
+ ssize_t size = 0;
+
+ spin_lock_irq(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize, NULL);
+ spin_unlock_irq(&efivars->lock);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_irq(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize,
+ (data + sizeof(attributes)));
+ spin_unlock_irq(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ size = efi_status_to_err(status);
+ goto out_free;
+ }
+
+ memcpy(data, &attributes, sizeof(attributes));
+ size = simple_read_from_buffer(userbuf, count, ppos,
+ data, datasize + sizeof(attributes));
+out_free:
+ kfree(data);
+
+ return size;
+}
+
+static void efivarfs_evict_inode(struct inode *inode)
+{
+ clear_inode(inode);
+}
+
+static const struct super_operations efivarfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .evict_inode = efivarfs_evict_inode,
+ .show_options = generic_show_options,
+};
+
+static struct super_block *efivarfs_sb;
+
+static const struct inode_operations efivarfs_dir_inode_operations;
+
+static const struct file_operations efivarfs_file_operations = {
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .llseek = no_llseek,
+};
+
+static struct inode *efivarfs_get_inode(struct super_block *sb,
+ const struct inode *dir, int mode, dev_t dev)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_fop = &efivarfs_file_operations;
+ break;
+ case S_IFDIR:
+ inode->i_op = &efivarfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ break;
+ }
+ }
+ return inode;
+}
+
+static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+{
+ guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
+ guid->b[1] = hex_to_bin(str[4]) << 4 | hex_to_bin(str[5]);
+ guid->b[2] = hex_to_bin(str[2]) << 4 | hex_to_bin(str[3]);
+ guid->b[3] = hex_to_bin(str[0]) << 4 | hex_to_bin(str[1]);
+ guid->b[4] = hex_to_bin(str[11]) << 4 | hex_to_bin(str[12]);
+ guid->b[5] = hex_to_bin(str[9]) << 4 | hex_to_bin(str[10]);
+ guid->b[6] = hex_to_bin(str[16]) << 4 | hex_to_bin(str[17]);
+ guid->b[7] = hex_to_bin(str[14]) << 4 | hex_to_bin(str[15]);
+ guid->b[8] = hex_to_bin(str[19]) << 4 | hex_to_bin(str[20]);
+ guid->b[9] = hex_to_bin(str[21]) << 4 | hex_to_bin(str[22]);
+ guid->b[10] = hex_to_bin(str[24]) << 4 | hex_to_bin(str[25]);
+ guid->b[11] = hex_to_bin(str[26]) << 4 | hex_to_bin(str[27]);
+ guid->b[12] = hex_to_bin(str[28]) << 4 | hex_to_bin(str[29]);
+ guid->b[13] = hex_to_bin(str[30]) << 4 | hex_to_bin(str[31]);
+ guid->b[14] = hex_to_bin(str[32]) << 4 | hex_to_bin(str[33]);
+ guid->b[15] = hex_to_bin(str[34]) << 4 | hex_to_bin(str[35]);
+}
+
+static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ struct inode *inode;
+ struct efivars *efivars = &__efivars;
+ struct efivar_entry *var;
+ int namelen, i = 0, err = 0;
+
+ /*
+ * We need a GUID, plus at least one letter for the variable name,
+ * plus the '-' separator
+ */
+ if (dentry->d_name.len < GUID_LEN + 2)
+ return -EINVAL;
+
+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+ if (!inode)
+ return -ENOMEM;
+
+ var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+ if (!var) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* length of the variable name itself: remove GUID and separator */
+ namelen = dentry->d_name.len - GUID_LEN - 1;
+
+ efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ &var->var.VendorGuid);
+
+ for (i = 0; i < namelen; i++)
+ var->var.VariableName[i] = dentry->d_name.name[i];
+
+ var->var.VariableName[i] = '\0';
+
+ inode->i_private = var;
+ var->efivars = efivars;
+ var->kobj.kset = efivars->kset;
+
+ err = kobject_init_and_add(&var->kobj, &efivar_ktype, NULL, "%s",
+ dentry->d_name.name);
+ if (err)
+ goto out;
+
+ kobject_uevent(&var->kobj, KOBJ_ADD);
+ spin_lock_irq(&efivars->lock);
+ list_add(&var->list, &efivars->list);
+ spin_unlock_irq(&efivars->lock);
+ d_instantiate(dentry, inode);
+ dget(dentry);
+out:
+ if (err) {
+ kfree(var);
+ iput(inode);
+ }
+ return err;
+}
+
+static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct efivar_entry *var = dentry->d_inode->i_private;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+
+ spin_lock_irq(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ 0, 0, NULL);
+
+ if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(dentry->d_inode);
+ dput(dentry);
+ return 0;
+ }
+
+ spin_unlock_irq(&efivars->lock);
+ return -EINVAL;
+};
+
+static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct efivar_entry *entry, *n;
+ struct efivars *efivars = &__efivars;
+ char *name;
+
+ efivarfs_sb = sb;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = EFIVARFS_MAGIC;
+ sb->s_op = &efivarfs_ops;
+ sb->s_time_gran = 1;
+
+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_op = &efivarfs_dir_inode_operations;
+
+ root = d_make_root(inode);
+ sb->s_root = root;
+ if (!root)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+ struct dentry *dentry, *root = efivarfs_sb->s_root;
+ unsigned long size = 0;
+ int len, i;
+
+ inode = NULL;
+
+ len = utf16_strlen(entry->var.VariableName);
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
+ if (!name)
+ goto fail;
+
+ for (i = 0; i < len; i++)
+ name[i] = entry->var.VariableName[i] & 0xFF;
+
+ name[len] = '-';
+
+ efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
+
+ name[len+GUID_LEN+1] = '\0';
+
+ inode = efivarfs_get_inode(efivarfs_sb, root->d_inode,
+ S_IFREG | 0644, 0);
+ if (!inode)
+ goto fail_name;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ goto fail_inode;
+
+ /* copied by the above to local storage in the dentry. */
+ kfree(name);
+
+ spin_lock_irq(&efivars->lock);
+ efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ &entry->var.Attributes,
+ &size,
+ NULL);
+ spin_unlock_irq(&efivars->lock);
+
+ mutex_lock(&inode->i_mutex);
+ inode->i_private = entry;
+ i_size_write(inode, size+4);
+ mutex_unlock(&inode->i_mutex);
+ d_add(dentry, inode);
+ }
+
+ return 0;
+
+fail_inode:
+ iput(inode);
+fail_name:
+ kfree(name);
+fail:
+ return -ENOMEM;
+}
+
+static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_single(fs_type, flags, data, efivarfs_fill_super);
+}
+
+static void efivarfs_kill_sb(struct super_block *sb)
+{
+ kill_litter_super(sb);
+ efivarfs_sb = NULL;
+}
+
+static struct file_system_type efivarfs_type = {
+ .name = "efivarfs",
+ .mount = efivarfs_mount,
+ .kill_sb = efivarfs_kill_sb,
+};
+
+static const struct inode_operations efivarfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = efivarfs_unlink,
+ .create = efivarfs_create,
+};
+
+static struct pstore_info efi_pstore_info;
+
#ifdef CONFIG_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
efivars->walk_entry = list_first_entry(&efivars->list,
struct efivar_entry, list);
return 0;
@@ -653,7 +1141,7 @@ static int efi_pstore_close(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return 0;
}
@@ -729,8 +1217,18 @@ static int efi_pstore_write(enum pstore_type_id type,
int i, ret = 0;
u64 storage_space, remaining_space, max_variable_size;
efi_status_t status = EFI_NOT_FOUND;
-
- spin_lock(&efivars->lock);
+ unsigned long flags;
+
+ if (pstore_cannot_block_path(reason)) {
+ /*
+ * If the lock is taken by another cpu in non-blocking path,
+ * this driver returns without entering firmware to avoid
+ * hanging up.
+ */
+ if (!spin_trylock_irqsave(&efivars->lock, flags))
+ return -EBUSY;
+ } else
+ spin_lock_irqsave(&efivars->lock, flags);
/*
* Check if there is a space enough to log.
@@ -742,7 +1240,7 @@ static int efi_pstore_write(enum pstore_type_id type,
&remaining_space,
&max_variable_size);
if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
*id = part;
return -ENOSPC;
}
@@ -756,13 +1254,10 @@ static int efi_pstore_write(enum pstore_type_id type,
efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
size, psi->buf);
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
- if (size)
- ret = efivar_create_sysfs_entry(efivars,
- utf16_strsize(efi_name,
- DUMP_NAME_LEN * 2),
- efi_name, &vendor);
+ if (reason == KMSG_DUMP_OOPS)
+ schedule_work(&efivar_work);
*id = part;
return ret;
@@ -783,7 +1278,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
time.tv_sec);
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
@@ -827,7 +1322,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
if (found)
list_del(&found->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (found)
efivar_unregister(found);
@@ -897,7 +1392,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
return -EINVAL;
}
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
/*
* Does this variable already exist?
@@ -915,7 +1410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
}
if (found) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EINVAL;
}
@@ -929,10 +1424,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EIO;
}
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
@@ -960,7 +1455,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
/*
* Does this variable already exist?
@@ -978,7 +1473,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
}
}
if (!found) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EINVAL;
}
/* force the Attributes/DataSize to 0 to ensure deletion */
@@ -994,18 +1489,87 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EIO;
}
list_del(&search_efivar->list);
/* We need to release this lock before unregistering. */
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(search_efivar);
/* It's dead Jim.... */
return count;
}
+static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
+{
+ struct efivar_entry *entry, *n;
+ struct efivars *efivars = &__efivars;
+ unsigned long strsize1, strsize2;
+ bool found = false;
+
+ strsize1 = utf16_strsize(variable_name, 1024);
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(variable_name, &(entry->var.VariableName),
+ strsize2) &&
+ !efi_guidcmp(entry->var.VendorGuid,
+ *vendor)) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+static void efivar_update_sysfs_entries(struct work_struct *work)
+{
+ struct efivars *efivars = &__efivars;
+ efi_guid_t vendor;
+ efi_char16_t *variable_name;
+ unsigned long variable_name_size = 1024;
+ efi_status_t status = EFI_NOT_FOUND;
+ bool found;
+
+ /* Add new sysfs entries */
+ while (1) {
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ pr_err("efivars: Memory allocation failed.\n");
+ return;
+ }
+
+ spin_lock_irq(&efivars->lock);
+ found = false;
+ while (1) {
+ variable_name_size = 1024;
+ status = efivars->ops->get_next_variable(
+ &variable_name_size,
+ variable_name,
+ &vendor);
+ if (status != EFI_SUCCESS) {
+ break;
+ } else {
+ if (!variable_is_present(variable_name,
+ &vendor)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ spin_unlock_irq(&efivars->lock);
+
+ if (!found) {
+ kfree(variable_name);
+ break;
+ } else
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name, &vendor);
+ }
+}
+
/*
* Let's not leave out systab information that snuck into
* the efivars driver
@@ -1065,11 +1629,18 @@ efivar_create_sysfs_entry(struct efivars *efivars,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid)
{
- int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
+ int i, short_name_size;
char *short_name;
struct efivar_entry *new_efivar;
- short_name = kzalloc(short_name_size + 1, GFP_KERNEL);
+ /*
+ * Length of the variable bytes in ASCII, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+ short_name_size = variable_name_size / sizeof(efi_char16_t)
+ + 1 + GUID_LEN + 1;
+
+ short_name = kzalloc(short_name_size, GFP_KERNEL);
new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
if (!short_name || !new_efivar) {
@@ -1107,9 +1678,9 @@ efivar_create_sysfs_entry(struct efivars *efivars,
kfree(short_name);
short_name = NULL;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
list_add(&new_efivar->list, &efivars->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return 0;
}
@@ -1178,9 +1749,9 @@ void unregister_efivars(struct efivars *efivars)
struct efivar_entry *entry, *n;
list_for_each_entry_safe(entry, n, &efivars->list, list) {
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
list_del(&entry->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(entry);
}
if (efivars->new_var)
@@ -1189,6 +1760,7 @@ void unregister_efivars(struct efivars *efivars)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
kfree(efivars->new_var);
kfree(efivars->del_var);
+ kobject_put(efivars->kobject);
kset_unregister(efivars->kset);
}
EXPORT_SYMBOL_GPL(unregister_efivars);
@@ -1220,6 +1792,14 @@ int register_efivars(struct efivars *efivars,
goto out;
}
+ efivars->kobject = kobject_create_and_add("efivars", parent_kobj);
+ if (!efivars->kobject) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ error = -ENOMEM;
+ kset_unregister(efivars->kset);
+ goto out;
+ }
+
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
@@ -1262,6 +1842,8 @@ int register_efivars(struct efivars *efivars,
pstore_register(&efivars->efi_pstore_info);
}
+ register_filesystem(&efivarfs_type);
+
out:
kfree(variable_name);
@@ -1269,9 +1851,6 @@ out:
}
EXPORT_SYMBOL_GPL(register_efivars);
-static struct efivars __efivars;
-static struct efivar_operations ops;
-
/*
* For now we register the efi subsystem with the firmware subsystem
* and the vars subsystem with the efi subsystem. In the future, it
@@ -1288,7 +1867,7 @@ efivars_init(void)
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
EFIVARS_DATE);
- if (!efi_enabled)
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
/* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1302,6 +1881,7 @@ efivars_init(void)
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
ops.query_variable_info = efi.query_variable_info;
+
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
goto err_put;
@@ -1327,7 +1907,9 @@ err_put:
static void __exit
efivars_exit(void)
{
- if (efi_enabled) {
+ cancel_work_sync(&efivar_work);
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
unregister_efivars(&__efivars);
kobject_put(efi_kobj);
}
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 4da4eb9ae926..2224f1dc074b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!efi_enabled)
+ if (!efi_enabled(EFI_BOOT))
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 90723e65b081..0b5b5f619c75 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <linux/mm.h>
/*
* Data types ------------------------------------------------------------------
@@ -52,6 +53,9 @@ static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
/*
* Static data -----------------------------------------------------------------
*/
@@ -79,7 +83,52 @@ static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
-static struct kobj_type memmap_ktype = {
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+ if (PageReserved(virt_to_page(entry))) {
+ /*
+ * Remember the storage allocated by bootmem, and reuse it when
+ * the memory is hot-added again. The entry will be added to
+ * map_entries_bootmem here, and deleted from &map_entries in
+ * firmware_map_remove_entry().
+ */
+ if (firmware_map_find_entry(entry->start, entry->end,
+ entry->type)) {
+ spin_lock(&map_entries_bootmem_lock);
+ list_add(&entry->list, &map_entries_bootmem);
+ spin_unlock(&map_entries_bootmem_lock);
+ }
+
+ return;
+ }
+
+ kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+ .release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
};
@@ -88,13 +137,6 @@ static struct kobj_type memmap_ktype = {
* Registration functions ------------------------------------------------------
*/
-/*
- * Firmware memory map entries. No locking is needed because the
- * firmware_map_add() and firmware_map_add_early() functions are called
- * in firmware initialisation code in one single thread of execution.
- */
-static LIST_HEAD(map_entries);
-
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
@@ -118,11 +160,25 @@ static int firmware_map_add_entry(u64 start, u64 end,
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
+ spin_lock(&map_entries_lock);
list_add_tail(&entry->list, &map_entries);
+ spin_unlock(&map_entries_lock);
return 0;
}
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ **/
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+ list_del(&entry->list);
+}
+
/*
* Add memmap entry on sysfs
*/
@@ -144,6 +200,78 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
return 0;
}
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ kobject_put(&entry->kobj);
+}
+
+/*
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @list: In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+ struct list_head *list)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, list, list)
+ if ((entry->start == start) && (entry->end == end) &&
+ (!strcmp(entry->type, type))) {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/*
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type,
+ &map_entries_bootmem);
+}
+
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
@@ -161,9 +289,19 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
+ entry = firmware_map_find_entry_bootmem(start, end, type);
+ if (!entry) {
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ } else {
+ /* Reuse storage allocated by bootmem. */
+ spin_lock(&map_entries_bootmem_lock);
+ list_del(&entry->list);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ memset(entry, 0, sizeof(*entry));
+ }
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
@@ -196,6 +334,36 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
return firmware_map_add_entry(start, end, type, entry);
}
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Returns 0 on success, or -EINVAL if no entry.
+ **/
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ spin_lock(&map_entries_lock);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (!entry) {
+ spin_unlock(&map_entries_lock);
+ return -EINVAL;
+ }
+
+ firmware_map_remove_entry(entry);
+ spin_unlock(&map_entries_lock);
+
+ /* remove the memmap entry */
+ remove_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
/*
* Sysfs functions -------------------------------------------------------------
*/
@@ -217,8 +385,10 @@ static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
-#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
-#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+ return container_of(attr, struct memmap_attribute, attr);
+}
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index bf892bd68c17..b89d250f56e7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -66,7 +66,7 @@ config DEBUG_GPIO
config GPIO_SYSFS
bool "/sys/class/gpio/... (sysfs interface)"
- depends on SYSFS && EXPERIMENTAL
+ depends on SYSFS
help
Say Y here to add a sysfs interface for GPIOs.
@@ -172,6 +172,7 @@ config GPIO_MSM_V2
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION
+ depends on OF
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -276,7 +277,7 @@ config GPIO_ICH
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_VX855
help
@@ -598,7 +599,7 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_RDC321X
help
@@ -656,12 +657,6 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
-config GPIO_AB8500
- bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE && BROKEN
- help
- Select this to enable the AB8500 IC GPIO driver
-
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
@@ -683,4 +678,17 @@ config GPIO_MSIC
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
+comment "USB GPIO expanders:"
+
+config GPIO_VIPERBOARD
+ tristate "Viperboard GPIO a & b support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the GPIO signals of Nano River
+ Technologies Viperboard. There are two GPIO chips on the
+ board: gpioa and gpiob.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 76b344683251..45a388c21d04 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
-obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
@@ -76,6 +75,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VT8500) += gpio-vt8500.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
deleted file mode 100644
index 983ad425f0ac..000000000000
--- a/drivers/gpio/gpio-ab8500.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * Author: BIBEK BASU <bibek.basu@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpio.h>
-
-/*
- * GPIO registers offset
- * Bank: 0x10
- */
-#define AB8500_GPIO_SEL1_REG 0x00
-#define AB8500_GPIO_SEL2_REG 0x01
-#define AB8500_GPIO_SEL3_REG 0x02
-#define AB8500_GPIO_SEL4_REG 0x03
-#define AB8500_GPIO_SEL5_REG 0x04
-#define AB8500_GPIO_SEL6_REG 0x05
-
-#define AB8500_GPIO_DIR1_REG 0x10
-#define AB8500_GPIO_DIR2_REG 0x11
-#define AB8500_GPIO_DIR3_REG 0x12
-#define AB8500_GPIO_DIR4_REG 0x13
-#define AB8500_GPIO_DIR5_REG 0x14
-#define AB8500_GPIO_DIR6_REG 0x15
-
-#define AB8500_GPIO_OUT1_REG 0x20
-#define AB8500_GPIO_OUT2_REG 0x21
-#define AB8500_GPIO_OUT3_REG 0x22
-#define AB8500_GPIO_OUT4_REG 0x23
-#define AB8500_GPIO_OUT5_REG 0x24
-#define AB8500_GPIO_OUT6_REG 0x25
-
-#define AB8500_GPIO_PUD1_REG 0x30
-#define AB8500_GPIO_PUD2_REG 0x31
-#define AB8500_GPIO_PUD3_REG 0x32
-#define AB8500_GPIO_PUD4_REG 0x33
-#define AB8500_GPIO_PUD5_REG 0x34
-#define AB8500_GPIO_PUD6_REG 0x35
-
-#define AB8500_GPIO_IN1_REG 0x40
-#define AB8500_GPIO_IN2_REG 0x41
-#define AB8500_GPIO_IN3_REG 0x42
-#define AB8500_GPIO_IN4_REG 0x43
-#define AB8500_GPIO_IN5_REG 0x44
-#define AB8500_GPIO_IN6_REG 0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
-#define ALTFUN_REG_INDEX 6
-#define AB8500_NUM_GPIO 42
-#define AB8500_NUM_VIR_GPIO_IRQ 16
-
-enum ab8500_gpio_action {
- NONE,
- STARTUP,
- SHUTDOWN,
- MASK,
- UNMASK
-};
-
-struct ab8500_gpio {
- struct gpio_chip chip;
- struct ab8500 *parent;
- struct device *dev;
- struct mutex lock;
- u32 irq_base;
- enum ab8500_gpio_action irq_action;
- u16 rising;
- u16 falling;
-};
-/**
- * to_ab8500_gpio() - get the pointer to ab8500_gpio
- * @chip: Member of the structure ab8500_gpio
- */
-static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
-{
- return container_of(chip, struct ab8500_gpio, chip);
-}
-
-static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
- unsigned offset, int val)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- u8 pos = offset % 8;
- int ret;
-
- reg = reg + (offset / 8);
- ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
- AB8500_MISC, reg, 1 << pos, val << pos);
- if (ret < 0)
- dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
- return ret;
-}
-/**
- * ab8500_gpio_get() - Get the particular GPIO value
- * @chip: Gpio device
- * @offset: GPIO number to read
- */
-static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- u8 mask = 1 << (offset % 8);
- u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
- int ret;
- u8 data;
- ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
- reg, &data);
- if (ret < 0) {
- dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
- return ret;
- }
- return (data & mask) >> (offset % 8);
-}
-
-static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- int ret;
- /* Write the data */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
- if (ret < 0)
- dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
-}
-
-static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- int ret;
- /* set direction as output */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
- if (ret < 0)
- return ret;
- /* disable pull down */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
- if (ret < 0)
- return ret;
- /* set the output as 1 or 0 */
- return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
-
-}
-
-static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- /* set the register as input */
- return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
-}
-
-static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- /*
- * Only some GPIOs are interrupt capable, and they are
- * organized in discontiguous clusters:
- *
- * GPIO6 to GPIO13
- * GPIO24 and GPIO25
- * GPIO36 to GPIO41
- */
- static struct ab8500_gpio_irq_cluster {
- int start;
- int end;
- } clusters[] = {
- {.start = 6, .end = 13},
- {.start = 24, .end = 25},
- {.start = 36, .end = 41},
- };
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- int base = ab8500_gpio->irq_base;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(clusters); i++) {
- struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
-
- if (offset >= cluster->start && offset <= cluster->end)
- return base + offset - cluster->start;
-
- /* Advance by the number of gpios in this cluster */
- base += cluster->end - cluster->start + 1;
- }
-
- return -EINVAL;
-}
-
-static struct gpio_chip ab8500gpio_chip = {
- .label = "ab8500_gpio",
- .owner = THIS_MODULE,
- .direction_input = ab8500_gpio_direction_input,
- .get = ab8500_gpio_get,
- .direction_output = ab8500_gpio_direction_output,
- .set = ab8500_gpio_set,
- .to_irq = ab8500_gpio_to_irq,
-};
-
-static unsigned int irq_to_rising(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- int new_irq = offset + AB8500_INT_GPIO6R
- + ab8500_gpio->parent->irq_base;
- return new_irq;
-}
-
-static unsigned int irq_to_falling(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- int new_irq = offset + AB8500_INT_GPIO6F
- + ab8500_gpio->parent->irq_base;
- return new_irq;
-
-}
-
-static unsigned int rising_to_irq(unsigned int irq, void *dev)
-{
- struct ab8500_gpio *ab8500_gpio = dev;
- int offset = irq - AB8500_INT_GPIO6R
- - ab8500_gpio->parent->irq_base ;
- int new_irq = offset + ab8500_gpio->irq_base;
- return new_irq;
-}
-
-static unsigned int falling_to_irq(unsigned int irq, void *dev)
-{
- struct ab8500_gpio *ab8500_gpio = dev;
- int offset = irq - AB8500_INT_GPIO6F
- - ab8500_gpio->parent->irq_base ;
- int new_irq = offset + ab8500_gpio->irq_base;
- return new_irq;
-
-}
-
-/*
- * IRQ handler
- */
-
-static irqreturn_t handle_rising(int irq, void *dev)
-{
-
- handle_nested_irq(rising_to_irq(irq , dev));
- return IRQ_HANDLED;
-}
-
-static irqreturn_t handle_falling(int irq, void *dev)
-{
-
- handle_nested_irq(falling_to_irq(irq, dev));
- return IRQ_HANDLED;
-}
-
-static void ab8500_gpio_irq_lock(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- mutex_lock(&ab8500_gpio->lock);
-}
-
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- bool rising = ab8500_gpio->rising & BIT(offset);
- bool falling = ab8500_gpio->falling & BIT(offset);
- int ret;
-
- switch (ab8500_gpio->irq_action) {
- case STARTUP:
- if (rising)
- ret = request_threaded_irq(irq_to_rising(irq),
- NULL, handle_rising,
- IRQF_TRIGGER_RISING,
- "ab8500-gpio-r", ab8500_gpio);
- if (falling)
- ret = request_threaded_irq(irq_to_falling(irq),
- NULL, handle_falling,
- IRQF_TRIGGER_FALLING,
- "ab8500-gpio-f", ab8500_gpio);
- break;
- case SHUTDOWN:
- if (rising)
- free_irq(irq_to_rising(irq), ab8500_gpio);
- if (falling)
- free_irq(irq_to_falling(irq), ab8500_gpio);
- break;
- case MASK:
- if (rising)
- disable_irq(irq_to_rising(irq));
- if (falling)
- disable_irq(irq_to_falling(irq));
- break;
- case UNMASK:
- if (rising)
- enable_irq(irq_to_rising(irq));
- if (falling)
- enable_irq(irq_to_falling(irq));
- break;
- case NONE:
- break;
- }
- ab8500_gpio->irq_action = NONE;
- ab8500_gpio->rising &= ~(BIT(offset));
- ab8500_gpio->falling &= ~(BIT(offset));
- mutex_unlock(&ab8500_gpio->lock);
-}
-
-
-static void ab8500_gpio_irq_mask(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = MASK;
-}
-
-static void ab8500_gpio_irq_unmask(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = UNMASK;
-}
-
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
-
- if (type == IRQ_TYPE_EDGE_BOTH) {
- ab8500_gpio->rising = BIT(offset);
- ab8500_gpio->falling = BIT(offset);
- } else if (type == IRQ_TYPE_EDGE_RISING) {
- ab8500_gpio->rising = BIT(offset);
- } else {
- ab8500_gpio->falling = BIT(offset);
- }
- return 0;
-}
-
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = STARTUP;
- return 0;
-}
-
-void ab8500_gpio_irq_shutdown(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = SHUTDOWN;
-}
-
-static struct irq_chip ab8500_gpio_irq_chip = {
- .name = "ab8500-gpio",
- .startup = ab8500_gpio_irq_startup,
- .shutdown = ab8500_gpio_irq_shutdown,
- .bus_lock = ab8500_gpio_irq_lock,
- .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
- .mask = ab8500_gpio_irq_mask,
- .unmask = ab8500_gpio_irq_unmask,
- .set_type = ab8500_gpio_irq_set_type,
-};
-
-static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
-{
- u32 base = ab8500_gpio->irq_base;
- int irq;
-
- for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
- set_irq_chip_data(irq, ab8500_gpio);
- set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
- handle_simple_irq);
- set_irq_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- set_irq_noprobe(irq);
-#endif
- }
-
- return 0;
-}
-
-static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
-{
- int base = ab8500_gpio->irq_base;
- int irq;
-
- for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
- }
-}
-
-static int ab8500_gpio_probe(struct platform_device *pdev)
-{
- struct ab8500_platform_data *ab8500_pdata =
- dev_get_platdata(pdev->dev.parent);
- struct ab8500_gpio_platform_data *pdata;
- struct ab8500_gpio *ab8500_gpio;
- int ret;
- int i;
-
- pdata = ab8500_pdata->gpio;
- if (!pdata) {
- dev_err(&pdev->dev, "gpio platform data missing\n");
- return -ENODEV;
- }
-
- ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
- if (ab8500_gpio == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
- }
- ab8500_gpio->dev = &pdev->dev;
- ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
- ab8500_gpio->chip = ab8500gpio_chip;
- ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
- ab8500_gpio->chip.dev = &pdev->dev;
- ab8500_gpio->chip.base = pdata->gpio_base;
- ab8500_gpio->irq_base = pdata->irq_base;
- /* initialize the lock */
- mutex_init(&ab8500_gpio->lock);
- /*
- * AB8500 core will handle and clear the IRQ
- * configre GPIO based on config-reg value.
- * These values are for selecting the PINs as
- * GPIO or alternate function
- */
- for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
- ret = abx500_set_register_interruptible(ab8500_gpio->dev,
- AB8500_MISC, i,
- pdata->config_reg[i]);
- if (ret < 0)
- goto out_free;
- }
- ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
- AB8500_GPIO_ALTFUN_REG,
- pdata->config_reg[ALTFUN_REG_INDEX]);
- if (ret < 0)
- goto out_free;
-
- ret = ab8500_gpio_irq_init(ab8500_gpio);
- if (ret)
- goto out_free;
- ret = gpiochip_add(&ab8500_gpio->chip);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
- ret);
- goto out_rem_irq;
- }
- platform_set_drvdata(pdev, ab8500_gpio);
- return 0;
-
-out_rem_irq:
- ab8500_gpio_irq_remove(ab8500_gpio);
-out_free:
- mutex_destroy(&ab8500_gpio->lock);
- kfree(ab8500_gpio);
- return ret;
-}
-
-/*
- * ab8500_gpio_remove() - remove Ab8500-gpio driver
- * @pdev : Platform device registered
- */
-static int ab8500_gpio_remove(struct platform_device *pdev)
-{
- struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
- int ret;
-
- ret = gpiochip_remove(&ab8500_gpio->chip);
- if (ret < 0) {
- dev_err(ab8500_gpio->dev, "unable to remove gpiochip: %d\n",
- ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, NULL);
- mutex_destroy(&ab8500_gpio->lock);
- kfree(ab8500_gpio);
-
- return 0;
-}
-
-static struct platform_driver ab8500_gpio_driver = {
- .driver = {
- .name = "ab8500-gpio",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_gpio_probe,
- .remove = ab8500_gpio_remove,
-};
-
-static int __init ab8500_gpio_init(void)
-{
- return platform_driver_register(&ab8500_gpio_driver);
-}
-arch_initcall(ab8500_gpio_init);
-
-static void __exit ab8500_gpio_exit(void)
-{
- platform_driver_unregister(&ab8500_gpio_driver);
-}
-module_exit(ab8500_gpio_exit);
-
-MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
-MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("platform:ab8500-gpio");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index a05aacd2777a..29b11e9b6a78 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -185,7 +185,11 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
struct da9052_gpio *gpio = to_da9052_gpio(gc);
struct da9052 *da9052 = gpio->da9052;
- return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+ int irq;
+
+ irq = regmap_irq_get_virq(da9052->irq_data, DA9052_IRQ_GPI0 + offset);
+
+ return irq;
}
static struct gpio_chip reference_gp = {
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 55d83c7d9c7f..fd6dfe382f13 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -126,7 +126,7 @@ static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
DA9055_IRQ_GPI0 + offset);
}
-static struct gpio_chip reference_gp __devinitdata = {
+static struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
@@ -139,7 +139,7 @@ static struct gpio_chip reference_gp __devinitdata = {
.base = -1,
};
-static int __devinit da9055_gpio_probe(struct platform_device *pdev)
+static int da9055_gpio_probe(struct platform_device *pdev)
{
struct da9055_gpio *gpio;
struct da9055_pdata *pdata;
@@ -170,7 +170,7 @@ err_mem:
return ret;
}
-static int __devexit da9055_gpio_remove(struct platform_device *pdev)
+static int da9055_gpio_remove(struct platform_device *pdev)
{
struct da9055_gpio *gpio = platform_get_drvdata(pdev);
@@ -179,7 +179,7 @@ static int __devexit da9055_gpio_remove(struct platform_device *pdev)
static struct platform_driver da9055_gpio_driver = {
.probe = da9055_gpio_probe,
- .remove = __devexit_p(da9055_gpio_remove),
+ .remove = da9055_gpio_remove,
.driver = {
.name = "da9055-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6cc87ac8e019..6f2306db8591 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -390,6 +390,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
err = ichx_gpio_request_regions(res_base, pdev->name,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d767b534c4af..7472182967ce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -33,6 +33,7 @@
* interrupts.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/irq.h>
@@ -41,7 +42,6 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
/*
@@ -469,19 +469,6 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
-static struct platform_device_id mvebu_gpio_ids[] = {
- {
- .name = "orion-gpio",
- }, {
- .name = "mv78200-gpio",
- }, {
- .name = "armadaxp-gpio",
- }, {
- /* sentinel */
- },
-};
-MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
-
static struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
@@ -555,17 +542,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = 0;
-#ifdef CONFIG_OF
mvchip->chip.of_node = np;
-#endif
spin_lock_init(&mvchip->lock);
- mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
- if (! mvchip->membase) {
- dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
- return -ENOMEM;
- }
+ mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mvchip->membase))
+ return PTR_ERR(mvchip->membase);
/* The Armada XP has a second range of registers for the
* per-CPU registers */
@@ -573,16 +555,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (! res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
- kfree(mvchip->chip.label);
return -ENODEV;
}
- mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
- if (! mvchip->percpu_membase) {
- dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
- return -ENOMEM;
- }
+ mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(mvchip->percpu_membase))
+ return PTR_ERR(mvchip->percpu_membase);
}
/*
@@ -641,7 +620,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -649,7 +627,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->membase, handle_level_irq);
if (! gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -684,7 +661,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
kfree(gc);
- kfree(mvchip->chip.label);
return -ENODEV;
}
@@ -698,7 +674,6 @@ static struct platform_driver mvebu_gpio_driver = {
.of_match_table = mvebu_gpio_of_match,
},
.probe = mvebu_gpio_probe,
- .id_table = mvebu_gpio_ids,
};
static int __init mvebu_gpio_init(void)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index fa2a63cad32e..45d97c46831a 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -20,6 +20,7 @@
* MA 02110-1301, USA.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -253,12 +254,14 @@ static int mxs_gpio_probe(struct platform_device *pdev)
parent = of_get_parent(np);
base = of_iomap(parent, 0);
of_node_put(parent);
+ if (!base)
+ return -EADDRNOTAVAIL;
} else {
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_request_and_ioremap(&pdev->dev, iores);
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
}
- if (!base)
- return -EADDRNOTAVAIL;
}
port->base = base;
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 01f7fe955590..b3643ff007e4 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -32,14 +32,12 @@
#include <mach/hardware.h>
#include <mach/map.h>
-#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/cpu.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
-#include <plat/gpio-fns.h>
#include <plat/pm.h>
int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
@@ -446,7 +444,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
-#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
static struct samsung_gpio_cfg exynos_gpio_cfg = {
.set_pull = exynos_gpio_setpull,
.get_pull = exynos_gpio_getpull,
@@ -2446,7 +2444,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_1[] = {
{
.chip = {
@@ -2614,7 +2612,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_2[] = {
{
.chip = {
@@ -2675,7 +2673,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_3[] = {
{
.chip = {
@@ -2711,7 +2709,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_4[] = {
{
.chip = {
@@ -3010,7 +3008,7 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
-#ifdef CONFIG_PINCTRL_SAMSUNG
+#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
/*
* This gpio driver includes support for device tree support and there
* are platforms using it. In order to maintain compatibility with those
@@ -3024,8 +3022,9 @@ static __init int samsung_gpiolib_init(void)
*/
struct device_node *pctrl_np;
static const struct of_device_id exynos_pinctrl_ids[] = {
- { .compatible = "samsung,pinctrl-exynos4210", },
- { .compatible = "samsung,pinctrl-exynos4x12", },
+ { .compatible = "samsung,exynos4210-pinctrl", },
+ { .compatible = "samsung,exynos4x12-pinctrl", },
+ { .compatible = "samsung,exynos5440-pinctrl", },
};
for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
if (pctrl_np && of_device_is_available(pctrl_np))
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 5f45fc4ed5d1..7a4bf7c0d98f 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -140,11 +140,9 @@ static int spics_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spics->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!spics->base) {
- dev_err(&pdev->dev, "request and ioremap fail\n");
- return -ENOMEM;
- }
+ spics->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spics->base))
+ return PTR_ERR(spics->base);
if (of_property_read_u32(np, "st-spics,peripcfg-reg",
&spics->perip_cfg))
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 85841ee70b17..c20e05151212 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -214,11 +214,10 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- chip->virt = devm_request_and_ioremap(&pdev->dev, res);
- if (!chip->virt) {
- dev_err(&pdev->dev, "failed to remap STP memory\n");
- return -ENOMEM;
- }
+ chip->virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->virt))
+ return PTR_ERR(chip->virt);
+
chip->gc.dev = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 63cb643d4b5a..414ad912232f 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -450,11 +451,9 @@ static int tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!regs) {
- dev_err(&pdev->dev, "Couldn't ioremap regs\n");
- return -ENODEV;
- }
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
for (i = 0; i < tegra_gpio_bank_count; i++) {
for (j = 0; j < 4; j++) {
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index c1b82da56504..29e8e750bd49 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -80,6 +80,14 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
val, mask);
}
+static int tps6586x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+ return tps6586x_irq_get_virq(tps6586x_gpio->parent,
+ TPS6586X_INT_PLDO_0 + offset);
+}
+
static int tps6586x_gpio_probe(struct platform_device *pdev)
{
struct tps6586x_platform_data *pdata;
@@ -106,6 +114,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
+ tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
#ifdef CONFIG_OF_GPIO
tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 0634ceea3c24..cc53cab8df2a 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -319,7 +319,7 @@ static void ts5500_disable_irq(struct ts5500_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int __devinit ts5500_dio_probe(struct platform_device *pdev)
+static int ts5500_dio_probe(struct platform_device *pdev)
{
enum ts5500_blocks block = platform_get_device_id(pdev)->driver_data;
struct ts5500_dio_platform_data *pdata = pdev->dev.platform_data;
@@ -432,7 +432,7 @@ cleanup:
return ret;
}
-static int __devexit ts5500_dio_remove(struct platform_device *pdev)
+static int ts5500_dio_remove(struct platform_device *pdev)
{
struct ts5500_priv *priv = platform_get_drvdata(pdev);
@@ -455,7 +455,7 @@ static struct platform_driver ts5500_dio_driver = {
.owner = THIS_MODULE,
},
.probe = ts5500_dio_probe,
- .remove = __devexit_p(ts5500_dio_remove),
+ .remove = ts5500_dio_remove,
.id_table = ts5500_dio_ids,
};
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 00329f2fc05b..9572aa137e6f 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -355,13 +355,13 @@ static struct gpio_chip twl_gpiochip = {
static int gpio_twl4030_pulls(u32 ups, u32 downs)
{
- u8 message[6];
+ u8 message[5];
unsigned i, gpio_bit;
/* For most pins, a pulldown was enabled by default.
* We should have data that's specific to this board.
*/
- for (gpio_bit = 1, i = 1; i < 6; i++) {
+ for (gpio_bit = 1, i = 0; i < 5; i++) {
u8 bit_mask;
unsigned j;
@@ -380,16 +380,16 @@ static int gpio_twl4030_pulls(u32 ups, u32 downs)
static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
{
- u8 message[4];
+ u8 message[3];
/* 30 msec of debouncing is always used for MMC card detect,
* and is optional for everything else.
*/
- message[1] = (debounce & 0xff) | (mmc_cd & 0x03);
+ message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
debounce >>= 8;
- message[2] = (debounce & 0xff);
+ message[1] = (debounce & 0xff);
debounce >>= 8;
- message[3] = (debounce & 0x03);
+ message[2] = (debounce & 0x03);
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
new file mode 100644
index 000000000000..59d72391de26
--- /dev/null
+++ b/drivers/gpio/gpio-viperboard.c
@@ -0,0 +1,517 @@
+/*
+ * Nano River Technologies viperboard GPIO lib driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_GPIOA_CLK_1MHZ 0
+#define VPRBRD_GPIOA_CLK_100KHZ 1
+#define VPRBRD_GPIOA_CLK_10KHZ 2
+#define VPRBRD_GPIOA_CLK_1KHZ 3
+#define VPRBRD_GPIOA_CLK_100HZ 4
+#define VPRBRD_GPIOA_CLK_10HZ 5
+
+#define VPRBRD_GPIOA_FREQ_DEFAULT 1000
+
+#define VPRBRD_GPIOA_CMD_CONT 0x00
+#define VPRBRD_GPIOA_CMD_PULSE 0x01
+#define VPRBRD_GPIOA_CMD_PWM 0x02
+#define VPRBRD_GPIOA_CMD_SETOUT 0x03
+#define VPRBRD_GPIOA_CMD_SETIN 0x04
+#define VPRBRD_GPIOA_CMD_SETINT 0x05
+#define VPRBRD_GPIOA_CMD_GETIN 0x06
+
+#define VPRBRD_GPIOB_CMD_SETDIR 0x00
+#define VPRBRD_GPIOB_CMD_SETVAL 0x01
+
+struct vprbrd_gpioa_msg {
+ u8 cmd;
+ u8 clk;
+ u8 offset;
+ u8 t1;
+ u8 t2;
+ u8 invert;
+ u8 pwmlevel;
+ u8 outval;
+ u8 risefall;
+ u8 answer;
+ u8 __fill;
+} __packed;
+
+struct vprbrd_gpiob_msg {
+ u8 cmd;
+ u16 val;
+ u16 mask;
+} __packed;
+
+struct vprbrd_gpio {
+ struct gpio_chip gpioa; /* gpio a related things */
+ u32 gpioa_out;
+ u32 gpioa_val;
+ struct gpio_chip gpiob; /* gpio b related things */
+ u32 gpiob_out;
+ u32 gpiob_val;
+ struct vprbrd *vb;
+};
+
+/* gpioa sampling clock module parameter */
+static unsigned char gpioa_clk;
+static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT;
+module_param(gpioa_freq, uint, 0);
+MODULE_PARM_DESC(gpioa_freq,
+ "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000");
+
+/* ----- begin of gipo a chip -------------------------------------------- */
+
+static int vprbrd_gpioa_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret, answer, error = 0;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpioa_out & (1 << offset))
+ return gpio->gpioa_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ answer = gamsg->answer & 0x01;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ if (error)
+ return error;
+
+ return answer;
+}
+
+static void vprbrd_gpioa_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ if (gpio->gpioa_out & (1 << offset)) {
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN;
+ gamsg->clk = gpioa_clk;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out |= (1 << offset);
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+/* ----- end of gpio a chip ---------------------------------------------- */
+
+/* ----- begin of gipo b chip -------------------------------------------- */
+
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
+ unsigned dir)
+{
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+ int ret;
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR;
+ gbmsg->val = cpu_to_be16(dir << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpiob_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ u16 val;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpiob_out & (1 << offset))
+ return gpio->gpiob_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ val = gbmsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return ret;
+
+ /* cache the read values */
+ gpio->gpiob_val = be16_to_cpu(val);
+
+ return (gpio->gpiob_val >> offset) & 0x1;
+}
+
+static void vprbrd_gpiob_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ if (gpio->gpiob_out & (1 << offset)) {
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 0);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to input\n");
+
+ return ret;
+}
+
+static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out |= (1 << offset);
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 1);
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to output\n");
+
+ mutex_unlock(&vb->lock);
+
+ vprbrd_gpiob_set(chip, offset, value);
+
+ return ret;
+}
+
+/* ----- end of gpio b chip ---------------------------------------------- */
+
+static int vprbrd_gpio_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_gpio *vb_gpio;
+ int ret;
+
+ vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL);
+ if (vb_gpio == NULL)
+ return -ENOMEM;
+
+ vb_gpio->vb = vb;
+ /* registering gpio a */
+ vb_gpio->gpioa.label = "viperboard gpio a";
+ vb_gpio->gpioa.dev = &pdev->dev;
+ vb_gpio->gpioa.owner = THIS_MODULE;
+ vb_gpio->gpioa.base = -1;
+ vb_gpio->gpioa.ngpio = 16;
+ vb_gpio->gpioa.can_sleep = 1;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
+ vb_gpio->gpioa.get = vprbrd_gpioa_get;
+ vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
+ vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpioa);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpioa.dev, "could not add gpio a");
+ goto err_gpioa;
+ }
+
+ /* registering gpio b */
+ vb_gpio->gpiob.label = "viperboard gpio b";
+ vb_gpio->gpiob.dev = &pdev->dev;
+ vb_gpio->gpiob.owner = THIS_MODULE;
+ vb_gpio->gpiob.base = -1;
+ vb_gpio->gpiob.ngpio = 16;
+ vb_gpio->gpiob.can_sleep = 1;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
+ vb_gpio->gpiob.get = vprbrd_gpiob_get;
+ vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
+ vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpiob);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpiob.dev, "could not add gpio b");
+ goto err_gpiob;
+ }
+
+ platform_set_drvdata(pdev, vb_gpio);
+
+ return ret;
+
+err_gpiob:
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+err_gpioa:
+ return ret;
+}
+
+static int vprbrd_gpio_remove(struct platform_device *pdev)
+{
+ struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&vb_gpio->gpiob);
+ if (ret == 0)
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_gpio_driver = {
+ .driver.name = "viperboard-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_gpio_probe,
+ .remove = vprbrd_gpio_remove,
+};
+
+static int __init vprbrd_gpio_init(void)
+{
+ switch (gpioa_freq) {
+ case 1000000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ;
+ break;
+ case 100000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ;
+ break;
+ case 10000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ;
+ break;
+ case 1000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ break;
+ case 100:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100HZ;
+ break;
+ case 10:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10HZ;
+ break;
+ default:
+ pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq);
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_gpio_driver);
+}
+subsys_initcall(vprbrd_gpio_init);
+
+static void __exit vprbrd_gpio_exit(void)
+{
+ platform_driver_unregister(&vprbrd_gpio_driver);
+}
+module_exit(vprbrd_gpio_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-gpio");
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d542a141811a..a71a54a3e3f7 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -89,41 +89,6 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
- * of_gpio_named_count - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- *
- * Note that the empty GPIO specifiers counts too. For example,
- *
- * gpios = <0
- * &pio1 1 2
- * 0
- * &pio2 3 4>;
- *
- * defines four GPIOs (so this function will return 4), two of which
- * are not specified.
- */
-unsigned int of_gpio_named_count(struct device_node *np, const char* propname)
-{
- unsigned int cnt = 0;
-
- do {
- int ret;
-
- ret = of_parse_phandle_with_args(np, propname, "#gpio-cells",
- cnt, NULL);
- /* A hole in the gpios = <> counts anyway. */
- if (ret < 0 && ret != -EEXIST)
- break;
- } while (++cnt);
-
- return cnt;
-}
-EXPORT_SYMBOL(of_gpio_named_count);
-
-/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @np: device node of the GPIO chip
@@ -250,7 +215,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
* on the same GPIO chip.
*/
ret = gpiochip_add_pin_range(chip,
- pinctrl_dev_get_name(pctldev),
+ pinctrl_dev_get_devname(pctldev),
0, /* offset in gpiochip */
pinspec.args[0],
pinspec.args[1]);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 199fca15f270..5359ca78130f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -806,7 +806,7 @@ fail_unlock:
}
EXPORT_SYMBOL_GPL(gpio_export);
-static int match_export(struct device *dev, void *data)
+static int match_export(struct device *dev, const void *data)
{
return dev_get_drvdata(dev) == data;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index a277b1257888..da4a51eae824 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,6 +1,6 @@
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select DRM_TTM
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 31123b6a0be5..2d2c2f8d6dc6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist);
-static int __devinit
-ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index fc154dd75296..bf67b22723f9 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,6 +1,6 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..8ecb601152ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
+
+ return 0;
}
-static int __devinit
-cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- cirrus_kick_out_firmware_fb(pdev);
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
if (connector->funcs->reset)
connector->funcs->reset(connector);
+ }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
}
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
+ /* Ignore forced connectors. */
+ if (connector->force)
continue;
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
if (!dev->mode_config.poll_enabled)
return;
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
-
}
static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- int i;
- u32 *raw_edid = (u32 *)in_edid;
+ if (memchr_inv(in_edid, 0, length))
+ return false;
- for (i = 0; i < length / 4; i++)
- if (*(raw_edid + i) != 0)
- return false;
return true;
}
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
default:
- case DRM_FORCE_ON: s = "ON"; break;
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- printk(KERN_ERR "panic occurred, switching back to text console\n");
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
+ pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
+ if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict) {
+ if (strict)
enable = connector->status == connector_status_connected;
- } else {
+ else
enable = connector->status != connector_status_disconnected;
- }
+
return enable;
}
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
- }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
drm_enable_connectors(fb_helper, enabled);
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
+out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- struct timeval stime, raw_time;
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- do_gettimeofday(&stime);
+ stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- do_gettimeofday(&raw_time);
+ etime = ktime_get();
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return gettimeofday timestamp as best estimate.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
*/
- do_gettimeofday(tvblank);
+ *tvblank = get_drm_timestamp();
return 0;
}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
vblwait->reply.sequence = seq;
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, false);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, 0);
-
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -213,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
if (adj_start < start)
adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
@@ -275,22 +285,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, false);
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
start, end);
-
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
@@ -489,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
@@ -516,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
@@ -535,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
- unsigned long adj_start;
- unsigned long adj_end;
+ unsigned long adj_start, adj_end;
mm->scanned_blocks++;
@@ -553,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
- hole_start = drm_mm_hole_node_start(prev_node);
- hole_end = drm_mm_hole_node_end(prev_node);
-
- adj_start = hole_start;
- adj_end = hole_end;
-
- if (mm->color_adjust)
- mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
@@ -569,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end;
}
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
- mm->scan_hit_size = hole_end;
-
+ mm->scan_hit_end = hole_end;
return 1;
}
@@ -609,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
- INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
- /* Only need to check for containement because start&size for the
- * complete resulting free block (not just the desired part) is
- * stored. */
- if (node->start >= mm->scan_hit_start &&
- node->start + node->size
- <= mm->scan_hit_start + mm->scan_hit_size) {
- return 1;
- }
-
- return 0;
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (file_priv->minor->master)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
+ kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..046bcda36abe 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -18,7 +24,7 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on DRM_EXYNOS && !FB_S3C
+ depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..4e9b5ba8edff 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,15 +56,16 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
- .remove = __devexit_p(s5p_ddc_remove),
+ .remove = s5p_ddc_remove,
.command = NULL,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..57affae9568b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -29,93 +15,103 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (!(flags & EXYNOS_BO_NONCONTIG))
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
+
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
+
+ nr_pages = buf->size >> PAGE_SHIFT;
+
+ if (!is_drm_iommu_supported(dev)) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
+
+ buf->pages = kzalloc(sizeof(struct page) * nr_pages,
+ GFP_KERNEL);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ return -ENOMEM;
+ }
+
+ buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ kfree(buf->pages);
+ return -ENOMEM;
+ }
+
+ start_addr = buf->dma_addr;
+ while (i < nr_pages) {
+ buf->pages[i] = phys_to_page(start_addr);
+ start_addr += PAGE_SIZE;
+ i++;
+ }
+ } else {
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
}
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
ret = -ENOMEM;
- goto err2;
- }
-
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
+
+ if (!is_drm_iommu_supported(dev))
+ kfree(buf->pages);
return ret;
}
@@ -125,23 +121,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +135,14 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
+ if (!is_drm_iommu_supported(dev)) {
+ dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ kfree(buf->pages);
+ } else
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..a6412f19673c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_BUF_H_
@@ -34,12 +20,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 0f68a2872673..4c5b6859c9ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -32,7 +18,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
-#define MAX_EDID 256
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
@@ -110,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
to_exynos_connector(connector);
struct exynos_drm_manager *manager = exynos_connector->manager;
struct exynos_drm_display_ops *display_ops = manager->display_ops;
- unsigned int count;
+ struct edid *edid = NULL;
+ unsigned int count = 0;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -128,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* because lcd panel has only one mode.
*/
if (display_ops->get_edid) {
- int ret;
- void *edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid) {
- DRM_ERROR("failed to allocate edid\n");
- return 0;
+ edid = display_ops->get_edid(manager->dev, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ ret = PTR_ERR(edid);
+ edid = NULL;
+ DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+ goto out;
}
- ret = display_ops->get_edid(manager->dev, connector,
- edid, MAX_EDID);
- if (ret < 0) {
- DRM_ERROR("failed to get edid data.\n");
- kfree(edid);
- edid = NULL;
- return 0;
+ count = drm_add_edid_modes(connector, edid);
+ if (count < 0) {
+ DRM_ERROR("Add edid modes failed %d\n", count);
+ goto out;
}
drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
} else {
struct exynos_drm_panel_info *panel;
struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -175,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
count = 1;
}
+out:
+ kfree(edid);
return count;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 22f6cc442c3d..547c6b590357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_CONNECTOR_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 94026ad76a77..4667c9f67acd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..e8894bc9e6d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -236,16 +222,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
@@ -402,3 +393,33 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
+
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(dev, crtc);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6bae8d8c250e..3e197e6ae7d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_CRTC_H_
@@ -32,5 +18,6 @@
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..ba0a3aa78547 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -30,70 +16,106 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir && exynos_attach->is_mapped)
+ return &exynos_attach->sgt;
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sgt = &exynos_attach->sgt;
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
- goto err_unlock;
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ exynos_attach->is_mapped = true;
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +126,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +188,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -185,7 +206,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
- exynos_gem_obj->base.size, 0600);
+ exynos_gem_obj->base.size, flags);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
@@ -196,7 +217,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -210,7 +230,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
/* is it from our device? */
if (obj->dev == drm_dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
drm_gem_object_reference(obj);
+ dma_buf_put(dma_buf);
return obj;
}
}
@@ -233,38 +258,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +291,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
index 662a8f98ccdb..49acfafb4fdb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_DMABUF_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..3da5c2d214d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -40,6 +26,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +37,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +57,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +83,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +92,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +129,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +147,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +236,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +294,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -295,7 +311,7 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = __devexit_p(exynos_drm_platform_remove),
+ .remove = exynos_drm_platform_remove,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-drm",
@@ -324,6 +340,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +358,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +451,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..4606fac7241a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_DRV_H_
@@ -74,8 +60,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +67,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +93,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +124,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -167,8 +148,8 @@ struct exynos_drm_overlay {
struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
- int (*get_edid)(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(struct device *dev,
+ struct drm_connector *connector);
void *(*get_panel)(struct device *dev);
int (*check_timing)(struct device *dev, void *timing);
int (*power_on)(struct device *dev, int mode);
@@ -186,6 +167,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +183,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +215,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +251,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +318,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..c63721f64aec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -234,6 +220,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
+}
+
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -505,14 +517,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops && overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..89e2fb0770af 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_ENCODER_H_
@@ -46,5 +32,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..294c0513f587 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -6,34 +6,23 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +39,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +72,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +146,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +217,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +228,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +295,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 96262e54f76d..517471b37566 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_FB_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..71f867340a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -34,6 +20,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -46,8 +33,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +96,26 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ if (is_drm_iommu_supported(dev)) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ } else {
+ phys_addr_t dma_addr = buffer->dma_addr;
+ if (dma_addr)
+ buffer->kvaddr = phys_to_virt(dma_addr);
+ else
+ buffer->kvaddr = (void __iomem *)NULL;
+ }
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,8 +124,12 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
- offset);
+ if (is_drm_iommu_supported(dev))
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
+ else
+ fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
+
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index ccfce8a1a451..e16d7f0ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_FBDEV_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..411f69b76e84
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1953 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC stands for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* stop dma operation */
+ cfg = fimc_read(EXYNOS_CISTATUS);
+ if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ }
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ return PTR_ERR(ctx->sclk_fimc_clk);
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = devm_clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->fimc_clk);
+ }
+
+ ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->wb_clk);
+ }
+
+ ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->wb_b_clk);
+ }
+
+ parent_clk = devm_clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(parent_clk);
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return -EINVAL;
+ }
+
+ devm_clk_put(dev, parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+
+ return ret;
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..127a424c5fdf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..36493ce71f9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
+ bool resume;
};
struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1;
bool suspended;
struct mutex lock;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel;
};
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,exynos4-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ if (of_id)
+ return (struct fimd_driver_data *)of_id->data;
+#endif
+
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
+ VIDTCON2_HOZVAL(timing->xres - 1) |
+ VIDTCON2_LINEVAL_E(timing->yres - 1) |
+ VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
}
}
+static void fimd_wait_for_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
};
static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win));
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static void fimd_wait_for_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- int ret;
-
- ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
- VIDCON1_VSTATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
- .wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
@@ -617,52 +663,6 @@ static struct exynos_drm_manager fimd_manager = {
.display_ops = &fimd_display_ops,
};
-static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
-{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
-
- is_checked = true;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
- }
-
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-}
-
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -682,8 +682,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
goto out;
drm_handle_vblank(drm_dev, manager->pipe);
- fimd_finish_pageflip(drm_dev, manager->pipe);
+ exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
out:
return IRQ_HANDLED;
}
@@ -709,6 +714,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_attach_device(drm_dev, dev);
+
return 0;
}
@@ -716,7 +725,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +816,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0;
}
+static void fimd_window_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ fimd_win_disable(dev, i);
+ }
+ fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
+ struct device *dev = ctx->subdrv.dev;
if (enable) {
int ret;
- struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
@@ -820,7 +858,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
+
+ fimd_window_resume(dev);
} else {
+ fimd_window_suspend(dev);
+
fimd_clock(ctx, false);
ctx->suspended = true;
}
@@ -828,7 +870,7 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
return 0;
}
-static int __devinit fimd_probe(struct platform_device *pdev)
+static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
@@ -857,33 +899,28 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = clk_get(dev, "fimd");
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
+ return PTR_ERR(ctx->bus_clk);
}
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
+ return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!ctx->regs) {
- dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_clk;
- }
+ ctx->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return -ENXIO;
}
ctx->irq = res->start;
@@ -892,13 +929,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return ret;
}
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
ctx->panel = panel;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
@@ -926,20 +965,9 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- return ret;
}
-static int __devexit fimd_remove(struct platform_device *pdev)
+static int fimd_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
@@ -960,9 +988,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
return 0;
}
@@ -991,7 +1016,7 @@ static int fimd_resume(struct device *dev)
* of pm runtime would still be 1 so in this case, fimd driver
* should be on directly not drawing on pm runtime interface.
*/
- if (pm_runtime_suspended(dev)) {
+ if (!pm_runtime_suspended(dev)) {
int ret;
ret = fimd_activate(ctx, true);
@@ -1050,11 +1075,12 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = __devexit_p(fimd_remove),
+ .remove = fimd_remove,
.id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
+ .of_match_table = of_match_ptr(fimd_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..fb2f81b8063d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+#define MAX_BUF_ADDR_NR 6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- u32 data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
struct list_head list;
- unsigned int handle;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int gem_nr;
+ unsigned int map_nr;
+ unsigned long handles[MAX_BUF_ADDR_NR];
+ unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
+ struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL);
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+ g2d_userptr->sgt = NULL;
+
+ kfree(g2d_userptr->pages);
+ g2d_userptr->pages = NULL;
+ kfree(g2d_userptr);
+ g2d_userptr = NULL;
+}
+
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr) {
+ DRM_ERROR("failed to allocate g2d_userptr.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ kfree(g2d_userptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ if (!sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ kfree(pages);
+ kfree(g2d_userptr);
+ pages = NULL;
+ g2d_userptr = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
struct g2d_cmdlist *cmdlist = node->cmdlist;
- dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->gem_nr; i++) {
- struct g2d_gem_node *gem_node;
-
- gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
- if (!gem_node) {
- dev_err(g2d_priv->dev, "failed to allocate gem node\n");
- return -ENOMEM;
- }
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle;
+ dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
- gem_node->handle = cmdlist->data[offset];
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
- file);
- if (IS_ERR(addr)) {
- node->gem_nr = i;
- kfree(gem_node);
- return PTR_ERR(addr);
+ handle = cmdlist->data[offset];
+
+ if (node->obj_type[i] == BUF_TYPE_GEM) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
}
cmdlist->data[offset] = *addr;
- list_add_tail(&gem_node->list, &g2d_priv->gem_list);
- g2d_priv->gem_nr++;
+ node->handles[i] = handle;
}
return 0;
}
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_gem_node *node, *n;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int i;
- list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
- if (!nr)
- break;
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle = node->handles[i];
- exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
- list_del_init(&node->list);
- kfree(node);
- nr--;
+ if (node->obj_type[i] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ node->handles[i] = 0;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
+
+ if (for_addr) {
+ /* check userptr buffer type. */
+ reg_offset = (cmdlist->data[index] &
+ ~0x7fffffff) >> 31;
+ if (reg_offset) {
+ node->obj_type[i] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ }
+ }
+
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
+
+ if (node->obj_type[i] != BUF_TYPE_USERPTR)
+ node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->gem_nr = req->cmd_gem_nr;
- if (req->cmd_gem_nr) {
- struct drm_exynos_g2d_cmd *cmd_gem;
+ node->map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_gem,
- sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_gem_nr * 2;
+ cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+ g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->gem_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
@@ -734,16 +1071,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
mutex_unlock(&g2d->cmdlist_mutex);
- g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
-static int __devinit g2d_probe(struct platform_device *pdev)
+static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -778,25 +1125,20 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_destroy_workqueue;
-
- g2d->gate_clk = clk_get(dev, "fimg2d");
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!g2d->regs) {
- dev_err(dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
+ g2d->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(g2d->regs)) {
+ ret = PTR_ERR(g2d->regs);
goto err_put_clk;
}
@@ -814,10 +1156,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ g2d->max_pool = MAX_POOL;
+
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -834,9 +1180,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
- clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -844,7 +1187,7 @@ err_destroy_slab:
return ret;
}
-static int __devexit g2d_remove(struct platform_device *pdev)
+static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -857,7 +1200,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
@@ -899,7 +1241,7 @@ static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = __devexit_p(g2d_remove),
+ .remove = g2d_remove,
.driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..473180776528 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -83,157 +69,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
+ /* TODO */
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
-
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
-
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
+ if (!buf->sgt)
+ return -EINTR;
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +139,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +148,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +227,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +263,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +278,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +296,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +324,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +386,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +431,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +488,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +742,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..35ebac47dc2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_GEM_H_
@@ -35,21 +21,27 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
- struct sg_table *sgt;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
struct page **pages;
- unsigned long page_size;
+ struct sg_table *sgt;
unsigned long size;
+ bool pfnmap;
};
/*
@@ -65,6 +57,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
+ * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +67,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
+ struct vm_area_struct *vma;
unsigned int flags;
};
@@ -104,9 +98,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
@@ -115,7 +109,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +122,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -163,4 +161,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..7841c3b8a20e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1836 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC stands for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+ __func__, sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = devm_clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ return PTR_ERR(ctx->gsc_clk);
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+ return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = gsc_remove,
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..29ec1c5efcf2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..28644539b305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
+int exynos_platform_device_hdmi_register(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ return -EEXIST;
+
+ exynos_drm_hdmi_pdev = platform_device_register_simple(
+ "exynos-drm-hdmi", -1, NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+ return PTR_ERR(exynos_drm_hdmi_pdev);
+
+ return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
@@ -86,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
return false;
}
-static int drm_hdmi_get_edid(struct device *dev,
- struct drm_connector *connector, u8 *edid, int len)
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->get_edid)
- return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
- len);
+ return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
- return 0;
+ return NULL;
}
static int drm_hdmi_check_timing(struct device *dev, void *timing)
@@ -157,6 +178,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (mixer_ops && mixer_ops->wait_for_vblank)
+ mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -238,6 +269,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +323,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
- .wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
@@ -346,10 +367,24 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
return 0;
}
-static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct drm_hdmi_context *ctx;
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
+static int exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_subdrv *subdrv;
@@ -368,6 +403,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
@@ -376,7 +412,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+static int exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -389,7 +425,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
struct platform_driver exynos_drm_common_hdmi_driver = {
.probe = exynos_drm_hdmi_probe,
- .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .remove = exynos_drm_hdmi_remove,
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..d80516fc9ed7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_HDMI_H_
@@ -44,8 +30,8 @@ struct exynos_drm_hdmi_context {
struct exynos_hdmi_ops {
/* display */
bool (*is_connected)(void *ctx);
- int (*get_edid)(void *ctx, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(void *ctx,
+ struct drm_connector *connector);
int (*check_timing)(void *ctx, void *timing);
int (*power_on)(void *ctx, int mode);
@@ -62,12 +48,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
+ int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
- void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..3799d5c2b5df
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,136 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..53b7deea8ab7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,71 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..1a556354e92f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2050 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP stands for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("failed to get idr.\n");
+ return -ENOMEM;
+ }
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ mutex_unlock(lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("%s:used device.\n", __func__);
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("%s:not support property.\n",
+ __func__);
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command contro.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ if (!list_empty(&ippdrv->cmd_list)) {
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+ if (c_node->property.prop_id == prop_id)
+ return ippdrv;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some deivce not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return -EINVAL;
+ }
+
+ prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+ __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work) {
+ DRM_ERROR("failed to alloc cmd_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work) {
+ DRM_ERROR("failed to alloc event_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_clear;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR_OR_NULL(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* delete list */
+ list_del(&c_node->list);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+ i ? "dst" : "src");
+ continue;
+ }
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+ __func__, count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ goto err_unlock;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+ __func__, i, buf_info.base[i],
+ (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+ mutex_unlock(&c_node->mem_lock);
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ if (list_empty(&m_node->list)) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ list_add_tail(&e->base.link, &c_node->event_list);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /*
+ * quf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ return;
+ }
+ }
+}
+
+static void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return -EFAULT;
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->cmd_lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->cmd_lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ break;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ c_node->state = IPP_STATE_START;
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->c_node = c_node;
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->c_node = NULL;
+ return ret;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+ __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->cmd_lock);
+
+ property = &c_node->property;
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ if (!e) {
+ DRM_ERROR("empty event.\n");
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+ , __func__, now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+ event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->c_node;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+ __func__, c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ mutex_lock(&c_node->event_lock);
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+
+ mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_idr;
+ }
+
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+ count++, (int)ippdrv, ippdrv->ipp_id);
+
+ if (ippdrv->ipp_id == 0) {
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ ippdrv->ipp_id);
+ goto err_idr;
+ }
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_iommu;
+ }
+ }
+ }
+
+ return 0;
+
+err_iommu:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ goto err_clear;
+ }
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (list_empty(&ippdrv->cmd_list))
+ continue;
+
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+ __func__, count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ }
+
+err_clear:
+ kfree(priv);
+ return;
+}
+
+static int ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ return -EINVAL;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+ return ret;
+}
+
+static int ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = ipp_remove,
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..4cadbea7dbde
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex cmd_lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @c_node: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ u32 ipp_id;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct list_head cmd_list;
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb2102..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ----------------
* ^ start ^ end
*
- * There are six cases from a to b.
+ * There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
}
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
- else
- src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
- else
- src_y += crtc_h;
crtc_y = 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..a40b9fb60240
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else
+ DRM_ERROR("the SFR is set illegally\n");
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:not support format\n", __func__);
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static int rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot) {
+ dev_err(dev, "failed to allocate rot\n");
+ return -ENOMEM;
+ }
+
+ rot->limit_tbl = (struct rot_limit_table *)
+ platform_get_device_id(pdev)->driver_data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rot->regs = devm_ioremap_resource(dev, rot->regs_res);
+ if (IS_ERR(rot->regs))
+ return PTR_ERR(rot->regs);
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return rot->irq;
+ }
+
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+ IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ rot->clock = devm_clk_get(dev, "rotator");
+ if (IS_ERR_OR_NULL(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(rot->clock);
+ goto err_clk_get;
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_clk_get:
+ free_irq(rot->irq, rot);
+ return ret;
+}
+
+static int rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+
+ free_irq(rot->irq, rot);
+
+ return 0;
+}
+
+static struct rot_limit_table rot_limit_tbl = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+static struct platform_device_id rotator_driver_ids[] = {
+ {
+ .name = "exynos-rot",
+ .driver_data = (unsigned long)&rot_limit_tbl,
+ },
+ {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = rotator_remove,
+ .id_table = rotator_driver_ids,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..71a0b4c0c1e8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..13ccbd4bcfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
@@ -99,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
return ctx->connected ? true : false;
}
-static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *vidi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct vidi_context *ctx = get_vidi_context(dev);
+ struct edid *edid;
+ int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -112,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
*/
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("raw_edid is null.\n");
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
}
- memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
- * EDID_LENGTH, len));
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kzalloc(edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEBUG_KMS("failed to allocate edid\n");
+ return ERR_PTR(-ENOMEM);
+ }
- return 0;
+ memcpy(edid, ctx->raw_edid, edid_len);
+ return edid;
}
static void *vidi_get_panel(struct device *dev)
@@ -294,7 +300,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -309,9 +314,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -376,52 +379,6 @@ static struct exynos_drm_manager vidi_manager = {
.display_ops = &vidi_display_ops,
};
-static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
-{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
-
- is_checked = true;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
- }
-
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-}
-
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -446,7 +403,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_unlock(&ctx->lock);
- vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
+ exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
}
static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -564,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
- struct edid *raw_edid;
int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -601,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
}
if (vidi->connection) {
- if (!vidi->edid) {
- DRM_DEBUG_KMS("edid data is null.\n");
+ struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ if (!drm_edid_is_valid(raw_edid)) {
+ DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
- raw_edid = (struct edid *)(uint32_t)vidi->edid;
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
if (!ctx->raw_edid) {
@@ -631,7 +587,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-static int __devinit vidi_probe(struct platform_device *pdev)
+static int vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vidi_context *ctx;
@@ -667,7 +623,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit vidi_remove(struct platform_device *pdev)
+static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -705,7 +661,7 @@ static const struct dev_pm_ops vidi_pm_ops = {
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = __devexit_p(vidi_remove),
+ .remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
index a4babe4e65d7..1e5fdaa36ccc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_VIDI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..233247505ff8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,7 +34,6 @@
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
-#include <plat/gpio-cfg.h>
#include <drm/exynos_drm.h>
@@ -50,6 +49,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+ /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+ /* InfoFrame packet type */
+ HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+ /* Vendor-Specific InfoFrame */
+ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+ /* Auxiliary Video information InfoFrame */
+ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+ /* Audio information InfoFrame */
+ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -74,8 +96,8 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
- int external_irq;
- int internal_irq;
+ void *parent_ctx;
+ int irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -84,7 +106,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
- void *parent_ctx;
int hpd_gpio;
@@ -182,6 +203,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -353,15 +375,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+ &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -479,6 +506,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -934,16 +962,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
+struct hdmi_infoframe {
+ enum HDMI_PACKET_TYPE type;
+ u8 ver;
+ u8 len;
+};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1267,6 +1300,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ struct hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 aspect_ratio;
+ u32 mod;
+ u32 vic;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->type) {
+ case HDMI_PACKET_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+ if (hdata->type == HDMI_TYPE13)
+ vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+ else
+ vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_PACKET_TYPE_AUI:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1274,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
return hdata->hpd;
}
-static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
{
struct edid *raw_edid;
struct hdmi_context *hdata = ctx;
@@ -1283,21 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (!hdata->ddc_port)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
- if (raw_edid) {
- hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
- memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
- * EDID_LENGTH, len));
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- raw_edid->width_cm, raw_edid->height_cm);
- } else {
- return -ENODEV;
- }
+ if (!raw_edid)
+ return ERR_PTR(-ENODEV);
- return 0;
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
+
+ return raw_edid;
}
static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1534,14 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
/* resetting HDMI core */
hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- /* disable HPD interrupts */
+ struct hdmi_infoframe infoframe;
+
+ /* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1688,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
+ infoframe.type = HDMI_PACKET_TYPE_AVI;
+ infoframe.ver = HDMI_AVI_VERSION;
+ infoframe.len = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.type = HDMI_PACKET_TYPE_AUI;
+ infoframe.ver = HDMI_AUI_VERSION;
+ infoframe.len = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
/* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -1651,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1818,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1870,9 +1991,27 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
/* reset hdmiphy */
hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
+}
+
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+ HDMI_PHY_POWER_OFF_EN);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
@@ -1902,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
return;
}
- mdelay(10);
+ usleep_range(10000, 12000);
/* operation mode */
operation[0] = 0x1f;
@@ -1978,9 +2117,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
break;
}
}
@@ -2015,6 +2163,13 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_conf_apply(hdata);
}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
- pm_runtime_get_sync(hdata->dev);
-
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
+ hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
+ if (pm_runtime_suspended(hdata->dev))
+ pm_runtime_get_sync(hdata->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- hdmi_poweroff(hdata);
+ if (!pm_runtime_suspended(hdata->dev))
+ pm_runtime_put_sync(hdata->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2109,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
.dpms = hdmi_dpms,
};
-static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2124,32 +2280,7 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
-{
- struct exynos_drm_hdmi_context *ctx = arg;
- struct hdmi_context *hdata = ctx->ctx;
- u32 intc_flag;
-
- intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
- /* clearing flags for HPD plug/unplug */
- if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_UNPLUG);
- }
- if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_PLUG);
- }
-
- if (ctx->drm_dev)
- drm_helper_hpd_irq_event(ctx->drm_dev);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
@@ -2166,27 +2297,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
+ res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = clk_get(dev, "hdmiphy");
+ res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2194,7 +2325,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2335,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2217,28 +2348,6 @@ fail:
return -ENODEV;
}
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR_OR_NULL(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR_OR_NULL(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
-
- return 0;
-}
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2415,7 @@ static struct platform_device_id hdmi_driver_types[] = {
}
};
+#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2314,8 +2424,9 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+#endif
-static int __devinit hdmi_probe(struct platform_device *pdev)
+static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -2366,6 +2477,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
+ if (match == NULL)
+ return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2491,30 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- goto err_data;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
+ return -ENOENT;
}
- hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hdata->regs) {
- DRM_ERROR("failed to map registers\n");
- ret = -ENXIO;
- goto err_resource;
- }
+ hdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hdata->regs))
+ return PTR_ERR(hdata->regs);
- ret = gpio_request(hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- goto err_resource;
+ return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- ret = -ENOENT;
- goto err_gpio;
+ return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
@@ -2421,39 +2528,24 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
- if (hdata->external_irq < 0) {
- DRM_ERROR("failed to get GPIO external irq\n");
- ret = hdata->external_irq;
- goto err_hdmiphy;
- }
-
- hdata->internal_irq = platform_get_irq(pdev, 0);
- if (hdata->internal_irq < 0) {
- DRM_ERROR("failed to get platform internal irq\n");
- ret = hdata->internal_irq;
+ hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+ if (hdata->irq < 0) {
+ DRM_ERROR("failed to get GPIO irq\n");
+ ret = hdata->irq;
goto err_hdmiphy;
}
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- ret = request_threaded_irq(hdata->external_irq, NULL,
- hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ ret = request_threaded_irq(hdata->irq, NULL,
+ hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi_external", drm_hdmi_ctx);
+ "hdmi", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("failed to register hdmi external interrupt\n");
+ DRM_ERROR("failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
- ret = request_threaded_irq(hdata->internal_irq, NULL,
- hdmi_internal_irq_thread, IRQF_ONESHOT,
- "hdmi_internal", drm_hdmi_ctx);
- if (ret) {
- DRM_ERROR("failed to register hdmi internal interrupt\n");
- goto err_free_irq;
- }
-
/* Attach HDMI Driver to common hdmi. */
exynos_hdmi_drv_attach(drm_hdmi_ctx);
@@ -2464,21 +2556,14 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_gpio:
- gpio_free(hdata->hpd_gpio);
-err_resource:
- hdmi_resources_cleanup(hdata);
-err_data:
return ret;
}
-static int __devexit hdmi_remove(struct platform_device *pdev)
+static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -2488,12 +2573,8 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
- free_irq(hdata->internal_irq, hdata);
- free_irq(hdata->external_irq, hdata);
+ free_irq(hdata->irq, hdata);
- gpio_free(hdata->hpd_gpio);
-
- hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
@@ -2509,13 +2590,19 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
- disable_irq(hdata->internal_irq);
- disable_irq(hdata->external_irq);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ disable_irq(hdata->irq);
hdata->hpd = false;
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
hdmi_poweroff(hdata);
return 0;
@@ -2526,22 +2613,60 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
- enable_irq(hdata->external_irq);
- enable_irq(hdata->internal_irq);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+ enable_irq(hdata->irq);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ hdmi_poweron(hdata);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweron(hdata);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+static const struct dev_pm_ops hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+ SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = __devexit_p(hdmi_remove),
+ .remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = hdmi_match_types,
+ .of_match_table = of_match_ptr(hdmi_match_types),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
index 1c3b6d8f1fe7..0ddf3957de15 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -5,24 +5,10 @@
* Inki Dae <inki.dae@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_HDMI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..ea49d132ecf6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -53,16 +54,17 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
+ .of_match_table = of_match_ptr(hdmiphy_match_types),
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
- .remove = __devexit_p(hdmiphy_remove),
+ .remove = hdmiphy_remove,
.command = NULL,
};
EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..c414584bfbae 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -35,15 +35,15 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
dma_addr_t dma_addr;
- void __iomem *vaddr;
dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
uint32_t pixel_format;
unsigned int bpp;
unsigned int crtc_x;
@@ -59,6 +59,8 @@ struct hdmi_win_data {
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
+ bool enabled;
+ bool resume;
};
struct mixer_resources {
@@ -80,6 +82,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
+ struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -90,6 +93,9 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
+ void *parent_ctx;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -594,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
break;
- mdelay(10);
+ usleep_range(10000, 12000);
}
WARN(tries == 0, "failed to reset Video Processor\n");
}
@@ -665,58 +671,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static void mixer_poweron(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *mdata = ctx;
+ struct drm_device *drm_dev;
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
- return;
- }
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
+ drm_hdmi_ctx = mdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
- pm_runtime_get_sync(ctx->dev);
+ if (is_drm_iommu_supported(drm_dev)) {
+ if (enable)
+ return drm_iommu_attach_device(drm_dev, mdata->dev);
- clk_enable(res->mixer);
- if (ctx->vp_enabled) {
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
+ drm_iommu_detach_device(drm_dev, mdata->dev);
}
-
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
- mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
- mutex_unlock(&ctx->mixer_mutex);
-
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
- clk_disable(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
- }
-
- pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
+ return 0;
}
static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +716,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_dpms(void *ctx, int mode)
-{
- struct mixer_context *mixer_ctx = ctx;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- mixer_poweron(mixer_ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mixer_ctx);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
- int ret;
-
- ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
- MXR_INT_STATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -811,9 +748,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0];
- win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp;
@@ -841,10 +776,19 @@ static void mixer_win_commit(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
if (win > 1 && mixer_ctx->vp_enabled)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
}
static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +799,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -862,59 +814,149 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
}
-static struct exynos_mixer_ops mixer_ops = {
- /* manager */
- .enable_vblank = mixer_enable_vblank,
- .disable_vblank = mixer_disable_vblank,
- .dpms = mixer_dpms,
+static void mixer_wait_for_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
- /* overlay */
- .wait_for_vblank = mixer_wait_for_vblank,
- .win_mode_set = mixer_win_mode_set,
- .win_commit = mixer_win_commit,
- .win_disable = mixer_win_disable,
-};
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
-/* for pageflip event */
-static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(ctx, i);
+ }
+ mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ }
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- spin_lock_irqsave(&drm_dev->event_lock, flags);
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
+ mixer_window_suspend(ctx);
- is_checked = true;
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ clk_disable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
}
- if (is_checked)
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+out:
+ mutex_unlock(&ctx->mixer_mutex);
}
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_get_sync(mixer_ctx->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_put_sync(mixer_ctx->dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_mixer_ops mixer_ops = {
+ /* manager */
+ .iommu_on = mixer_iommu_on,
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
+ .dpms = mixer_dpms,
+
+ /* overlay */
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
@@ -943,7 +985,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
}
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
- mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
+ ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
}
out:
@@ -960,8 +1009,8 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
@@ -971,85 +1020,69 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = clk_get(dev, "mixer");
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail;
+ return ret;
}
mixer_res->irq = res->start;
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
- clk_put(mixer_res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(mixer_res->mixer))
- clk_put(mixer_res->mixer);
- return ret;
}
-static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- int ret;
- mixer_res->vp = clk_get(dev, "vp");
+ mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (mixer_res->sclk_hdmi)
@@ -1058,28 +1091,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
- clk_put(mixer_res->sclk_dac);
- if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
- clk_put(mixer_res->sclk_mixer);
- if (!IS_ERR_OR_NULL(mixer_res->vp))
- clk_put(mixer_res->vp);
- return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1113,7 +1135,7 @@ static struct of_device_id mixer_match_types[] = {
}
};
-static int __devinit mixer_probe(struct platform_device *pdev)
+static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -1149,9 +1171,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
+ ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1202,13 +1227,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+
+static int mixer_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
mixer_poweroff(ctx);
return 0;
}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
#endif
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+static const struct dev_pm_ops mixer_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+ SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
struct platform_driver mixer_driver = {
.driver = {
@@ -1218,6 +1296,6 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = __devexit_p(mixer_remove),
+ .remove = mixer_remove,
.id_table = mixer_driver_types,
};
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
/* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 42e665c7e90a..1188f0fe7e4f 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,6 +1,6 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86 && EXPERIMENTAL
+ depends on DRM && PCI && X86
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..88d9ef6b5b4a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -1650,7 +1651,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property, &curValue))
return -1;
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
return -1;
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e1..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector, property, &val))
+ if (drm_object_property_get_value(&connector->base, property, &val))
goto set_prop_error;
if (val == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
goto set_prop_error;
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector, property,
+ if (drm_object_property_set_value(&connector->base, property,
value))
goto set_prop_error;
else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
- if (!dev || ((pipe != 0) && (pipe != 2))) {
+ if (pipe != 0 && pipe != 2) {
DRM_ERROR("Invalid parameter\n");
return;
}
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*attach properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a5362..30adbbe23024 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
extern void oaktrail_hdmi_save(struct drm_device *dev);
extern void oaktrail_hdmi_restore(struct drm_device *dev);
extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72f..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
+ if (pipe == 1) {
+ oaktrail_crtc_hdmi_dpms(crtc, mode);
+ return;
+ }
+
if (!gma_power_begin(dev, true))
return;
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
+ if (pipe == 1)
+ return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
if (!gma_power_begin(dev, true))
return 0;
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a5..08747fd7105c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
- .hdmi_mask = (1 << 0),
+ .hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f35..f036f1fc161e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+/*
+ * You don't want to know, you really really don't want to know....
+ *
+ * This is magic. However it's safe magic because of the way the platform
+ * works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void __iomem *base;
+ unsigned long scu_ipc_mmio = 0xff11c000UL;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map scu mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* Reset controller */
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Set the DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ /* wait for pipe off */
+ udelay(150);
+
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+
+ /* wait for dpll off */
+ udelay(150);
+
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+ }
+
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+
+ /* LNC Chicken Bits - Squawk! */
+ REG_WRITE(0x70400, 0x4000);
+
+ return;
+}
+
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
struct i2c_adapter *i2c_adap;
struct edid *edid;
- struct drm_display_mode *mode, *t;
- int i = 0, ret = 0;
+ int ret = 0;
+ /*
+ * FIXME: We need to figure this lot out. In theory we can
+ * read the EDID somehow but I've yet to find working reference
+ * code.
+ */
i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
-
- /*
- * prune modes that require frame buffer bigger than stolen mem
- */
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
- i++;
- drm_mode_remove(connector, mode);
- }
- }
- return ret - i;
+ return ret;
}
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_sysfs_connector_add(connector);
+ dev_info(dev->dev, "HDMI initialised.\n");
return;
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev);
+
+ dev_info(dev->dev, "HDMI hardware present.\n");
+
return;
free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return;
}
- drm_connector_property_get_value(
- connector,
+ drm_object_property_get_value(
+ &connector->base,
dev->mode_config.scaling_mode_property,
&v);
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curval))
goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 4a07ab596174..771ff66711af 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -700,7 +700,7 @@ static struct i2c_driver tc35876x_bridge_i2c_driver = {
},
.id_table = tc35876x_bridge_id,
.probe = tc35876x_bridge_probe,
- .remove = __devexit_p(tc35876x_bridge_remove),
+ .remove = tc35876x_bridge_remove,
};
/* LCD panel I2C */
@@ -741,7 +741,7 @@ static struct i2c_driver cmi_lcd_i2c_driver = {
},
.id_table = cmi_lcd_i2c_id,
.probe = cmi_lcd_i2c_probe,
- .remove = __devexit_p(cmi_lcd_i2c_remove),
+ .remove = cmi_lcd_i2c_remove,
};
/* HACK to create I2C device while it's not created by platform code */
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
- drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
- drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_connector_attach_property(connector, conf->tv_mode_property,
+ drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm);
- drm_connector_attach_property(connector, conf->tv_brightness_property,
+ drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
- drm_connector_attach_property(connector, conf->tv_contrast_property,
+ drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
- drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_connector_attach_property(connector, priv->scale_property,
+ drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7f..32158d21c632 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <generated/utsrelease.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -317,7 +318,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
- if (!work->pending) {
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
@@ -328,7 +329,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Stall check enabled, ");
else
seq_printf(m, "Stall check waiting for page flip ioctl, ");
- seq_printf(m, "%d prepares\n", work->pending);
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -655,10 +657,12 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- seq_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- seq_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
+ seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -687,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "Kernel: " UTS_RELEASE);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -1068,7 +1075,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1;
+ u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count=0, ret;
@@ -1097,6 +1104,9 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1158,12 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@@ -1273,7 +1289,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1282,19 +1298,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
- I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_READ_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode read of freq table timed out\n");
- continue;
- }
- ia_freq = I915_READ(GEN6_PCODE_DATA);
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(dev_priv,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
}
@@ -1398,15 +1409,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx) {
seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
+ describe_obj(m, dev_priv->ips.pwrctx);
seq_printf(m, "\n");
}
- if (dev_priv->renderctx) {
+ if (dev_priv->ips.renderctx) {
seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
+ describe_obj(m, dev_priv->ips.renderctx);
seq_printf(m, "\n");
}
@@ -1449,7 +1460,7 @@ static const char *swizzle_string(unsigned swizzle)
case I915_BIT_6_SWIZZLE_9_10_17:
return "bit9/bit10/bit17";
case I915_BIT_6_SWIZZLE_UNKNOWN:
- return "unkown";
+ return "unknown";
}
return "bug";
@@ -1711,13 +1722,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1752,7 +1763,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1762,7 +1773,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
@@ -1787,13 +1798,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1826,7 +1837,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1836,7 +1847,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 61ae104dca8c..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
}
/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
-
- memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
- 0, PAGE_SIZE);
-
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
- return 0;
-}
-
-/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev)
{
- struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
i915_kernel_lost_context(dev);
- return intel_wait_ring_idle(ring);
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
- return dev_priv->counter;
+ return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
return ret;
@@ -1014,6 +986,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1068,7 +1046,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1088,6 +1066,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+ ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1305,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
+ INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1491,19 +1472,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- ret = -EIO;
+ ret = i915_gem_gtt_init(dev);
+ if (ret)
goto put_bridge;
- }
-
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto put_gmch;
- }
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1561,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
- /* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
- if (ret)
- goto out_gem_unload;
- }
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1621,6 +1584,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1643,7 @@ out_mtrrfree:
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- intel_gmch_remove();
+ i915_gem_gtt_fini(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1721,6 +1686,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
+ cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b4..117265840b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect [default], 1=lid open, "
- "-1=lid closed)");
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- int id;
+ unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
-static int i915_drm_thaw(struct drm_device *dev)
+void intel_console_resume(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ console_resume_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
- intel_modeset_setup_hw_state(dev);
- drm_mode_config_reset(dev);
+ intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
}
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
- console_lock();
- intel_fbdev_set_suspend(dev, 0);
- console_unlock();
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ if (console_trylock()) {
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ } else {
+ schedule_work(&dev_priv->console_resume_work);
+ }
+
+ return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ intel_gt_reset(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ __i915_drm_thaw(dev);
+
return error;
}
int i915_resume(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- ret = i915_drm_thaw(dev);
+ intel_gt_reset(dev);
+
+ /*
+ * Platforms with opregion should have sane BIOS, older ones (gen3 and
+ * earlier) need this since the BIOS might clear all our scratch PTEs.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ !dev_priv->opregion.header) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ ret = __i915_drm_thaw(dev);
if (ret)
return ret;
@@ -827,13 +877,12 @@ int i915_reset(struct drm_device *dev)
return 0;
}
-static int __devinit
-i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (intel_info->is_haswell || intel_info->is_valleyview)
+ if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
@@ -1140,12 +1189,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST)
return false;
+ switch (reg) {
+ case _3D_CHICKEN3:
+ case IVB_CHICKEN3:
+ case GEN7_COMMON_SLICE_CHICKEN1:
+ case GEN7_L3CNTLREG1:
+ case GEN7_L3_CHICKEN_MODE_REGISTER:
+ case GEN7_ROW_CHICKEN2:
+ case GEN7_L3SQCREG4:
+ case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+ case GEN7_HALF_SLICE_CHICKEN1:
+ case GEN6_MBCTL:
+ case GEN6_UCGCTL2:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+ * chip from rc6 before touching it for real. MI_MODE is masked, hence
+ * harmless to write 0 into. */
+ I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1254,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+ } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f4168..12ab3bdea54d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
};
#define pipe_name(p) ((p) + 'A')
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
/* Interface history:
*
* 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -182,15 +188,19 @@ struct drm_i915_error_state {
u32 pgtbl_er;
u32 ier;
u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +261,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -263,7 +274,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
- void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
@@ -338,6 +348,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
+ struct drm_device *dev;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
@@ -374,6 +385,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +399,18 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
- bool force_bit;
+ u32 force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- const struct intel_device_info *info;
-
- int relative_constants_mode;
-
- void __iomem *regs;
-
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
-
- struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of the gmbus and gpio block.
- */
- uint32_t gpio_mmio_base;
-
- struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- uint32_t counter;
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-
- struct resource mch_res;
-
- atomic_t irq_received;
-
- /* protects the irq masks */
- spinlock_t irq_lock;
-
- /* DPIO indirect register protection */
- spinlock_t dpio_lock;
-
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
-
- u32 hotplug_supported_mask;
- struct work_struct hotplug_work;
-
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- struct intel_opregion opregion;
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- struct notifier_block lid_notifier;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
- struct workqueue_struct *wq;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- /* PCH chipset type */
- enum intel_pch pch_type;
-
- unsigned long quirks;
-
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -676,10 +556,206 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct delayed_work delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct mutex hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct work_struct error_work;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ struct pci_dev *bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ struct resource mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct work_struct hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct error_work;
+ struct completion error_completion;
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
- const struct intel_gtt *gtt;
+ struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@@ -706,9 +782,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
- u32 *l3_remap_info;
-
struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
@@ -785,19 +860,6 @@ typedef struct drm_i915_private {
u32 object_count;
} mm;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
- } dri1;
-
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +873,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
@@ -820,46 +883,17 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
+ struct intel_l3_parity l3_parity;
+
/* gen6+ rps state */
- struct {
- struct work_struct work;
- u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
-
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- } rps;
+ struct intel_gen6_power_mgmt rps;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
- struct {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
- } ips;
+ struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
@@ -871,14 +905,27 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct work_struct console_resume_work;
+
struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- struct work_struct parity_error_work;
bool hw_contexts_disabled;
uint32_t hw_context_size;
+
+ bool fdi_rx_polarity_reversed;
+
+ struct i915_suspend_saved_registers regfile;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1057,6 +1104,7 @@ struct drm_i915_gem_object {
*/
atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1120,9 +1168,17 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
/*
* The genX designation typically refers to the render engine, so render
@@ -1148,6 +1204,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1168,6 +1227,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1316,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1324,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1368,8 +1436,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno);
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1387,7 +1454,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1566,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1670,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1704,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b285da4449b..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
}
if (needs_clflush_after)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return ret;
}
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
- if (!obj->map_and_fenceable) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- goto unlock;
- }
- if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
- if (ret)
- goto unlock;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
- }
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret)
+ goto unlock;
- if (!obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto unlock;
-
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ goto unpin;
obj->fault_mappable = true;
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+ i915_gem_object_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1528,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
@@ -1542,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
i915_gem_shrink_all(dev_priv);
- return drm_gem_create_mmap_offset(&obj->base);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1707,10 +1702,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count)
return -EBUSY;
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->gtt_list);
+
ops->put_pages(obj);
obj->pages = NULL;
- list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1718,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1726,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@@ -1737,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@@ -1749,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count;
}
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ return __i915_gem_shrink(dev_priv, target, true);
+}
+
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
@@ -1868,11 +1874,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
obj->ring = ring;
@@ -1933,26 +1939,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
- return seqno;
+ return 0;
}
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
- return ring->outstanding_lazy_request;
+ *seqno = dev_priv->next_seqno++;
+ return 0;
}
int
@@ -1963,7 +1997,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
- u32 seqno;
int was_empty;
int ret;
@@ -1982,7 +2015,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL)
return -ENOMEM;
- seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
@@ -1991,15 +2023,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/
request_ring_position = intel_ring_get_tail(ring);
- ret = ring->add_request(ring, &seqno);
+ ret = ring->add_request(ring);
if (ret) {
kfree(request);
return ret;
}
- trace_i915_gem_request_add(ring, seqno);
-
- request->seqno = seqno;
+ request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
@@ -2017,23 +2047,24 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
+ trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work, HZ);
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
if (out_seqno)
- *out_seqno = seqno;
+ *out_seqno = request->seqno;
return 0;
}
@@ -2131,7 +2162,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
- int i;
if (list_empty(&ring->request_list))
return;
@@ -2140,10 +2170,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
- if (seqno >= ring->sync_seqno[i])
- ring->sync_seqno[i] = 0;
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2218,7 +2244,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
return;
}
@@ -2236,7 +2263,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
@@ -2386,7 +2414,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno);
if (!ret)
- from->sync_seqno[idx] = seqno;
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2469,14 +2501,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
- if (list_empty(&ring->active_list))
- return 0;
-
- return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2513,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
@@ -2879,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2923,74 +2947,63 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ i915_gem_object_pin_pages(obj);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space =
- drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- false);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- obj->gtt_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
- else
- obj->gtt_space =
- drm_mm_get_block_generic(free_space,
- size, alignment, obj->cache_level,
- false);
- }
- if (obj->gtt_space == NULL) {
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
+ if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret)
- return ret;
+ if (ret == 0)
+ goto search_free;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ kfree(node);
+ return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level))) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return -EINVAL;
}
-
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return ret;
}
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
-
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_offset = obj->gtt_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
+ i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@@ -3059,7 +3072,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3454,11 +3467,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3511,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
- obj->user_pin_count++;
- obj->pin_filp = file;
- if (obj->user_pin_count == 1) {
+ if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@@ -3832,7 +3851,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev))
return;
- if (!dev_priv->mm.l3_remap_info)
+ if (!dev_priv->l3_parity.remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3860,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+ if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->mm.l3_remap_info[i/4])
+ if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3895,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
-
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
- }
-}
-
static bool
intel_enable_blt(struct drm_device *dev)
{
@@ -3960,7 +3917,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (!intel_enable_gtt())
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4252,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page);
}
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@@ -4382,7 +4339,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT;
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return 0;
}
@@ -4407,6 +4364,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -4417,14 +4387,25 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
+ bool unlock = true;
int cnt;
- if (!mutex_trylock(&dev->struct_mutex))
- return 0;
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
+ nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}
@@ -4432,10 +4413,11 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d7..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx;
int ret, id;
- ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
- u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from_obj, ring, seqno);
+ i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..abeaafef6d7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
obj = dma_buf->priv;
/* is it from our device? */
if (obj->base.dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
drm_gem_object_reference(&obj->base);
+ dma_buf_put(dma_buf);
return &obj->base;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f6..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level);
}
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (unlikely(target_offset == 0)) {
- DRM_DEBUG("No GTT space found for object %d\n",
- reloc->target_handle);
- return ret;
- }
-
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
@@ -548,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -558,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+ }
+
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
@@ -672,7 +684,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -722,8 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -735,10 +746,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
}
@@ -798,8 +809,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
+ u32 flags;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +822,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -983,26 +1004,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but let's be paranoid and do it
+ * unconditionally for now. */
+ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- BUG_ON(ring->sync_seqno[i]);
- }
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1028,8 +1040,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno);
-
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1040,17 +1050,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len);
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
} else {
- ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
#include "i915_trace.h"
#include "intel_drv.h"
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ /* Haswell doesn't set L3 this way */
+ if (IS_HASWELL(dev))
+ pte |= GEN6_PTE_CACHE_LLC;
+ else
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(dev))
+ pte |= HSW_PTE_UNCACHED;
+ else
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+
+ return pte;
+}
+
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- uint32_t *pt_vaddr;
- uint32_t scratch_pte;
+ gtt_pte_t *pt_vaddr;
+ gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
- scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+ scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return ret;
+ ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
- uint32_t pte_flags)
+ enum i915_cache_level cache_level)
{
- uint32_t *pt_vaddr, pte;
+ gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[j] = pte | pte_flags;
+ pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+ cache_level);
/* grab the next page */
if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- uint32_t pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- if (IS_HASWELL(obj->base.dev))
- pte_flags |= HSW_PTE_UNCACHED;
- else
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
-/* XXX kill agp_type! */
-static unsigned int cache_level_to_agp_type(struct drm_device *dev,
- enum i915_cache_level cache_level)
+void i915_gem_init_ppgtt(struct drm_device *dev)
{
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- if (INTEL_INFO(dev)->gen >= 6)
- return AGP_USER_CACHED_MEMORY_LLC_MLC;
- /* Older chipsets do not have this extra level of CPU
- * cacheing, so fallthrough and request the PTE simply
- * as cached.
- */
- case I915_CACHE_LLC:
- return AGP_USER_CACHED_MEMORY;
- default:
- case I915_CACHE_NONE:
- return AGP_USER_MEMORY;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6) {
+ intel_gtt_clear_range(first_entry, num_entries);
+ return;
+ }
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st = obj->pages;
+ struct scatterlist *sg = st->sgl;
+ const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ int unused, i = 0;
+ unsigned int len, m = 0;
+ dma_addr_t addr;
+
+ for_each_sg(st->sgl, sg, st->nents, unused) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ i++;
+ }
+ }
+
+ BUG_ON(i > max_entries);
+ BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+ if (INTEL_INFO(dev)->gen < 6) {
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ flags);
+ } else {
+ gen6_ggtt_bind_object(obj, cache_level);
+ }
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ i915_ggtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+ dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dma_addr))
+ return -EINVAL;
+#else
+ dma_addr = page_to_phys(page);
+#endif
+ dev_priv->mm.gtt->scratch_page = page;
+ dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+ return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(dev_priv->mm.gtt->scratch_page);
+ __free_page(dev_priv->mm.gtt->scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+ static const int stolen_decoder[] = {
+ 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+ snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+ return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ phys_addr_t gtt_bus_addr;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* On modern platforms we need not worry ourself with the legacy
+ * hostbridge query stuff. Skip it entirely
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ intel_gmch_remove();
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
+ if (!dev_priv->mm.gtt)
+ return -ENOMEM;
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+
+#ifdef CONFIG_INTEL_IOMMU
+ dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+ /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+ gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
+ dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
+
+ /* i9xx_setup */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ dev_priv->mm.gtt->gtt_total_entries =
+ gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ else
+ dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+ dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
+ /* 64/512MB is the current min/max we actually know of, but this is just a
+ * coarse sanity check.
+ */
+ if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+ dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+ DRM_ERROR("Unknown GMADR entries (%d)\n",
+ dev_priv->mm.gtt->gtt_mappable_entries);
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ goto err_out;
+ }
+
+ dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
+ dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+ if (!dev_priv->mm.gtt->gtt) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ teardown_scratch_page(dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+ DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+ DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+ DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+ return 0;
+
+err_out:
+ kfree(dev_priv->mm.gtt);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->mm.gtt->gtt);
+ teardown_scratch_page(dev);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ kfree(dev_priv->mm.gtt);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b8..fe843389c7b4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
- vbl = I915_READ(VBLANK(pipe));
+ vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay);
}
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- parity_error_work);
+ l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
-static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
}
-static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]);
}
-static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
- if (HAS_PCH_CPT(dev))
- hotplug_mask = SDE_HOTPLUG_MASK_CPT;
- else
- hotplug_mask = SDE_HOTPLUG_MASK;
-
ret = IRQ_HANDLED;
if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- if (pch_iir & hotplug_mask)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
@@ -1120,6 +1132,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1143,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1237,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
else
error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1464,7 +1489,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || work->pending || !work->enable_stall_check) {
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
@@ -1751,7 +1778,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
/* drm_dma.h hooks
@@ -1956,6 +1983,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2023,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- dev_priv->gt_irq_mask = ~0;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2038,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2045,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
-#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@@ -2129,7 +2146,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2324,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2562,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2708,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
- INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c5..59afb7eb6db6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -40,6 +41,14 @@
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define IVB_GMCH_GMS_SHIFT 4
+#define IVB_GMCH_GMS_MASK 0xf
+
/* PCI config space */
@@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define HSW_PTE_UNCACHED (0)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
@@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100
+#define DPIO_DATA_CHANNEL1 0x8220
+#define DPIO_DATA_CHANNEL2 0x8420
+
/*
* Fence registers
*/
@@ -509,11 +512,14 @@
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define DERRMR 0x44050
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
@@ -521,14 +527,17 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -547,6 +556,8 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
@@ -661,6 +672,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -670,6 +682,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
@@ -1559,14 +1573,14 @@
#define _VSYNCSHIFT_B 0x61028
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2641,6 +2655,7 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2711,7 +2726,7 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3013,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3024,6 +3046,8 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3057,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3078,6 +3104,8 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+#define _DSPBOFFSET 0x711A4
+#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3143,6 +3171,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@@ -3177,6 +3206,8 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@@ -3197,6 +3228,8 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@@ -3210,8 +3243,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -3246,12 +3281,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-#define PCH_DSPCLK_GATE_D 0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3301,20 +3330,22 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
@@ -3423,15 +3454,13 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
-#define ILK_DSPCLK_GATE 0x42020
-#define IVB_VRHUNIT_CLK_GATE (1<<28)
-#define ILK_DPARB_CLK_GATE (1<<5)
-#define ILK_DPFD_CLK_GATE (1<<7)
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define ILK_CLK_FBC (1<<7)
-#define ILK_DPFC_DIS1 (1<<8)
-#define ILK_DPFC_DIS2 (1<<9)
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3447,14 +3476,21 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define HSW_FUSE_STRAP 0x42014
+#define HSW_CDCLK_LIMIT (1 << 24)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -3686,7 +3722,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@@ -3795,18 +3831,26 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
@@ -3816,6 +3860,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
@@ -3877,6 +3922,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
@@ -3901,16 +3947,21 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
-#define _FDI_RXA_MISC 0xf0010
-#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -4003,6 +4054,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
+#define PANEL_POWER_PORT_LVDS (0 << 30)
+#define PANEL_POWER_PORT_DP_A (1 << 30)
+#define PANEL_POWER_PORT_DP_C (2 << 30)
+#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4050,7 +4106,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4108,6 +4164,8 @@
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4220,6 +4278,10 @@
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4251,6 +4313,15 @@
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4380,33 +4451,39 @@
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
-#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
-#define PIPE_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+ TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
-#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
-#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_MASK (7<<20)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_PVSYNC (1<<17)
-#define PIPE_DDI_PHSYNC (1<<16)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
+#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
+#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@@ -4420,12 +4497,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
@@ -4444,6 +4525,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4460,6 +4542,10 @@
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4477,10 +4563,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4490,8 +4578,8 @@
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SCC (1<<28)
-#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
@@ -4500,7 +4588,7 @@
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4517,21 +4605,36 @@
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
-
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A 0x46140
-#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PORT_CLK_SEL_NONE (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+ _TRANSB_MSA_MISC)
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9f..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
+ dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
+ dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->saveGR[0x10] =
+ dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
+ dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
+ dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
+ dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
+ dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
+ dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
+ dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
+ dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
+ dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->saveCURABASE = I915_READ(_CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(_FPA0);
- dev_priv->saveFPA1 = I915_READ(_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(_FPB0);
- dev_priv->saveFPB1 = I915_READ(_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
return;
}
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
case 7:
case 6:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
}
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
- I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
- I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
- I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
- I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
- I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
- I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
- I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
- I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
return;
}
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
+ /* Don't regfile.save them in KMS mode */
i915_save_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->saveDP_B = I915_READ(DP_B);
- dev_priv->saveDP_C = I915_READ(DP_C);
- dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: save TV & SDVO state */
-
- /* Only save FBC state on the platform that supports FBC */
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+ }
+
+ /* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
-
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
- dev_priv->saveMCHBAR_RENDER_STANDBY);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->saveDP_B);
- I915_WRITE(DP_C, dev_priv->saveDP_C);
- I915_WRITE(DP_D, dev_priv->saveDP_D);
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
}
- /* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
i915_save_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
}
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
mutex_unlock(&dev->struct_mutex);
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE(IER, dev_priv->saveIER);
- I915_WRITE(IMR, dev_priv->saveIMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117a..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return -EPERM;
if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- if (!dev_priv->mm.l3_remap_info) {
+ if (!dev_priv->l3_parity.remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->mm.l3_remap_info = temp;
+ dev_priv->l3_parity.remap_info = temp;
- memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+ memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
+ __field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
+ __entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+ __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 56846ed5ee55..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
- if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6345878ae1e7..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
- adpa = ADPA_HOTPLUG_BITS;
+ if (HAS_PCH_SPLIT(dev))
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
+ }
+
}
/*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_HASWELL(dev) || IS_I830(dev))
+ if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- crt->base.get_hw_state = intel_crt_get_hw_state;
+ if (IS_HASWELL(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
- if (HAS_PCH_SPLIT(dev)) {
- u32 adpa;
-
- adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
-
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
- }
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the
+ * polarity reversal bit or not, instead of relying on the BIOS.
+ */
+ if (HAS_PCH_LPT(dev))
+ dev_priv->fdi_rx_polarity_reversed =
+ !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0e..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+}
+
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_800MV_3_5DB_HSW
};
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
-
- /* Configure CPU PLL, wait for warmup */
- I915_WRITE(SPLL_CTL,
- SPLL_PLL_ENABLE |
- SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SCC);
+ u32 temp, i, rx_ctl_val;
- /* Use SPLL to drive the output when in FDI mode */
- I915_WRITE(PORT_CLK_SEL(PORT_E),
- PORT_CLK_SEL_SPLL);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(PORT_E));
-
- udelay(20);
-
- /* Start the training iterating through available voltages and emphasis */
- for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 19);
+ if (dev_priv->fdi_rx_polarity_reversed)
+ rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
- temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
- temp |
- DDI_BUF_CTL_ENABLE |
- DDI_PORT_WIDTH_X2 |
- hsw_ddi_buf_ctl_values[i]);
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
udelay(600);
- /* We need to program FDI_RX_MISC with the default TP1 to TP2
- * values before enabling the receiver, and configure the delay
- * for the FDI timing generator to 90h. Luckily, all the other
- * bits are supposed to be zeroed, so we can write those values
- * directly.
- */
- I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
- FDI_RX_FDI_DELAY_90);
-
- /* Enable CPU FDI Receiver with auto-training */
- reg = FDI_RX_CTL(pipe);
- I915_WRITE(reg,
- I915_READ(reg) |
- FDI_LINK_TRAIN_AUTO |
- FDI_RX_ENABLE |
- FDI_LINK_TRAIN_PATTERN_1_CPT |
- FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_PORT_WIDTH_2X_LPT |
- FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
- PIPE_DDI_MODE_SELECT_FDI |
- PIPE_DDI_FUNC_ENABLE |
- PIPE_DDI_PORT_WIDTH_X2;
- I915_WRITE(DDI_FUNC_CTL(pipe),
- temp);
- break;
- } else {
- DRM_ERROR("Error training BUF_CTL %d\n", i);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
- /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) &
- ~DP_TP_CTL_ENABLE);
- I915_WRITE(FDI_RX_CTL(pipe),
- I915_READ(FDI_RX_CTL(pipe)) &
- ~FDI_RX_PLL_ENABLE);
- continue;
+ return;
}
- }
- DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
- /* For now, we don't do any proper output detection and assume that we
- * handle HDMI only */
-
- switch(port){
- case PORT_A:
- /* We don't handle eDP and DP yet */
- DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
- break;
- /* Assume that the ports B, C and D are working in HDMI mode for now */
- case PORT_B:
- case PORT_C:
- case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
- break;
- default:
- DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
- port);
- break;
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
}
+
+ DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{298000, 2, 21, 19},
};
-void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int port = intel_hdmi->ddi_port;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
- int p, n2, r2;
- u32 temp, i;
+ int type = intel_encoder->type;
- /* On Haswell, we need to enable the clocks and prepare DDI function to
- * work in HDMI mode for this pipe.
- */
- DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ intel_dp->DP |= DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ WARN(1, "Unexpected DP lane count %d\n",
+ intel_dp->lane_count);
+ break;
+ }
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+ intel_crtc->pipe);
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+ u32 i;
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
- if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ if (clock <= wrpll_tmds_clock_table[i].clock)
break;
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ *p = wrpll_tmds_clock_table[i].p;
+ *n2 = wrpll_tmds_clock_table[i].n2;
+ *r2 = wrpll_tmds_clock_table[i].r2;
- if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
- DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
- wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+ if (wrpll_tmds_clock_table[i].clock != clock)
+ DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, clock);
- DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- crtc->mode.clock, p, n2, r2);
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p, *n2, *r2);
+}
- /* Enable LCPLL if disabled */
- temp = I915_READ(LCPLL_CTL);
- if (temp & LCPLL_PLL_DISABLE)
- I915_WRITE(LCPLL_CTL,
- temp & ~LCPLL_PLL_DISABLE);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t reg, val;
- /* Configure WR PLL 1, program the correct divider values for
- * the desired frequency and wait for warmup */
- I915_WRITE(WRPLL_CTL1,
- WRPLL_PLL_ENABLE |
- WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p));
+ /* TODO: reuse PLLs when possible (compare values) */
- udelay(20);
+ intel_ddi_put_crtc_pll(crtc);
- /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
- * this port for connection.
- */
- I915_WRITE(PORT_CLK_SEL(port),
- PORT_CLK_SEL_WRPLL1);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(port));
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ int p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+ WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+ "WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ reg = SPLL_CTL;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ }
+
+ WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+ "SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
+ }
+
+ I915_WRITE(reg, val);
udelay(20);
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic and a new set
- * of registers, so we leave it for future patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ return true;
+}
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ temp |= TRANS_MSA_8_BPC;
+ WARN(1, "%d bpp unsupported by DDI function\n",
+ intel_crtc->bpp);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
+}
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (intel_crtc->bpp) {
case 18:
- temp |= PIPE_DDI_BPC_6;
+ temp |= TRANS_DDI_BPC_6;
break;
case 24:
- temp |= PIPE_DDI_BPC_8;
+ temp |= TRANS_DDI_BPC_8;
break;
case 30:
- temp |= PIPE_DDI_BPC_10;
+ temp |= TRANS_DDI_BPC_10;
break;
case 36:
- temp |= PIPE_DDI_BPC_12;
+ temp |= TRANS_DDI_BPC_12;
break;
default:
- WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ WARN(1, "%d bpp unsupported by transcoder DDI function\n",
intel_crtc->bpp);
}
- if (intel_hdmi->has_hdmi_sink)
- temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ temp |= TRANS_DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ temp |= TRANS_DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ WARN(1, "Unsupported lane count %d\n",
+ intel_dp->lane_count);
+ }
+
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %d\n",
+ intel_encoder->type, pipe);
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
else
- temp |= PIPE_DDI_MODE_SELECT_DVI;
+ cpu_transcoder = pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- temp |= PIPE_DDI_PVSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- temp |= PIPE_DDI_PHSYNC;
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
- I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ default:
+ return false;
+ }
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
u32 tmp;
int i;
- tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+ tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
- for_each_pipe(i) {
- tmp = I915_READ(DDI_FUNC_CTL(i));
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if ((tmp & PIPE_DDI_PORT_MASK)
- == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
- *pipe = i;
- return true;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
}
}
- DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
return true;
}
-void intel_enable_ddi(struct intel_encoder *encoder)
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ ret = I915_READ(PORT_CLK_SEL(port));
+
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+ pipe_name(pipe), port_name(port), ret);
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
- temp = I915_READ(DDI_BUF_CTL(port));
- temp |= DDI_BUF_CTL_ENABLE;
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
- * and swing/emphasis values are ignored so nothing special needs
- * to be done besides enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
}
-void intel_disable_ddi(struct intel_encoder *encoder)
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450;
+ else if (IS_ULT(dev_priv->dev))
+ return 338;
+ else
+ return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ bool wait;
+ uint32_t val;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int type = intel_encoder->type;
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ else
+ return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_fixup = intel_ddi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!dp_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ if (port != PORT_A) {
+ hdmi_connector = kzalloc(sizeof(struct intel_connector),
+ GFP_KERNEL);
+ if (!hdmi_connector) {
+ kfree(dp_connector);
+ kfree(intel_dig_port);
+ return;
+ }
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+ if (hdmi_connector)
+ intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+ else
+ intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
- temp = I915_READ(DDI_BUF_CTL(port));
- temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (hdmi_connector)
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ intel_dp_init_connector(intel_dig_port, dp_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b426d44a2b05..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .vco = { .min = 4000000, .max = 5994000},
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
};
static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 162000, .max = 270000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 60, .max = 300 }, /* guess */
+ .m = { .min = 22, .max = 450 },
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- HAS_eDP)
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->cpu_transcoder;
+}
+
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (INTEL_INFO(dev)->gen >= 4) {
- int reg = PIPECONF(pipe);
+ int reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
- reg = DDI_FUNC_CTL(pipe);
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
- cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
- return;
- } else {
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
- }
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
- return;
- }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* SBI access */
static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
+ u32 tmp;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_DATA,
- value);
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRWR);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
}
static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRRD);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
}
/**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
pll->on = false;
}
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- int reg;
- u32 val, pipeconf_val;
+ struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
- return;
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
+
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- int reg;
- u32 val;
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+ if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(_TRANSACONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(_TRANSACONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
}
/**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum transcoder pch_transcoder;
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
+
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pipe);
- assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int reg;
u32 val;
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
}
/**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
{
int tile_rows, tiles;
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
+
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth != 16)
- return -EINVAL;
-
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
break;
- case 24:
- case 32:
- if (fb->depth == 24)
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- else if (fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
POSTING_READ(reg);
return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
- if (!dev->primary->master)
- return 0;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return 0;
-
- if (intel_crtc->pipe) {
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- } else {
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- }
+ intel_crtc_update_sarea_pos(crtc, x, y);
return 0;
}
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
+ /* When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. XXX: This misses the case where a pipe is not using
+ * any pch resources and so doesn't need any fdi lanes. */
+ if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- if (HAS_PCH_IBX(dev)) {
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
- }
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
break;
}
}
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
}
}
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
- /* Write the TU size bits so error detection works */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
-}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_EN));
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
* must be driven by its own crtc; no sharing is possible.
*/
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
- /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
- * CPU handles all others */
- if (IS_HASWELL(dev)) {
- /* It is still unclear how this will work on PPT, so throw up a warning */
- WARN_ON(!HAS_PCH_LPT(dev));
-
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
- DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
- return true;
- } else {
- DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- intel_encoder->type);
- return false;
- }
- }
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
- intel_sbi_read(dev_priv, SBI_SSCCTL6) |
- SBI_SSCCTL_DISABLE);
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
phaseinc);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
- intel_sbi_write(dev_priv,
- SBI_SSCDIVINTPHASE6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv,
- SBI_SSCAUXDIV6,
- temp);
-
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv,
- SBI_SSCCTL6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
/* Wait for initialization time */
udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_transcoder_disabled(dev_priv, pipe);
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(intel_crtc);
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_pch_pll(intel_crtc);
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
- lpt_program_iclkip(crtc);
- } else if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- if (!IS_HASWELL(dev))
- intel_fdi_normal_train(crtc);
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- temp |= TRANS_DP_PORT_SEL_B;
- break;
+ BUG();
}
I915_WRITE(reg, temp);
}
- intel_enable_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ int dslreg = PIPEDSL(pipe);
u32 temp;
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
- /* Without this, mode sets may fail silently on FDI */
- I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
- udelay(250);
- I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = intel_crtc_driving_pch(crtc);
+ is_pch_port = ironlake_crtc_driving_pch(crtc);
if (is_pch_port) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ /* Enable panel fitting for eDP */
+ if (dev_priv->pch_pf_size &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_pipe_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ lpt_pch_enable(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_disable(crtc);
- intel_disable_transcoder(dev_priv, pipe);
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ bool is_pch_port;
+
+ if (!intel_crtc->active)
+ return;
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (is_pch_port) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
}
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+ * start using it. */
+ intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+ intel_ddi_put_crtc_pll(crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
- int refclk, int num_connectors)
+ int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_hdmi;
+ bool is_sdvo;
+ u32 temp;
- is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+ dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+ dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
bestn = clock->n;
bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bestp1 = clock->p1;
bestp2 = clock->p2;
- /* Enable DPIO clock input */
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
-
+ /*
+ * In Valleyview PLL and program lane counter registers are exposed
+ * through DPIO interface
+ */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
- pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
- (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+ (5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
- intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
- if (is_hdmi) {
- u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
-
- I915_WRITE(DPLL_MD(pipe), temp);
- POSTING_READ(DPLL_MD(pipe));
}
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
- intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+ /* Now program lane control registers */
+ if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+ }
+ if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ }
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
u32 dpll;
bool is_sdvo;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
dpll = DPLL_VGA_MODE_DIS;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t vsyncshift;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf, vsyncshift;
+ u32 dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
- i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
- &reduced_clock : NULL);
-
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ i8xx_update_pll(crtc, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else if (IS_VALLEYVIEW(dev))
- vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
- refclk, num_connectors);
+ vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2;
- } else {
+ else
pipeconf |= PIPECONF_PROGRESSIVE;
- vsyncshift = 0;
- }
- if (!IS_GEN3(dev))
- I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
-
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (!has_vga)
+ return;
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+ 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+ tmp &= ~(0x3 << 6);
+ tmp |= (1 << 6) | (1 << 0);
+ intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+ }
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
static int ironlake_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val |= PIPE_12BPC;
break;
default:
- val |= PIPE_8BPC;
- break;
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
}
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
POSTING_READ(PIPECONF(pipe));
}
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(cpu_transcoder));
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK_HSW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *fb)
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ if (intel_crtc->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 4;
+
+ return false;
+ }
+
+ if (dev_priv->num_pipe == 2)
+ return true;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+
+ if (intel_crtc->fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ case PIPE_C:
+ if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+ if (intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ default:
+ BUG();
+ }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *encoder, *edp_encoder = NULL;
- int ret;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct intel_encoder *intel_encoder, *edp_encoder = NULL;
struct fdi_m_n m_n = {0};
- u32 temp;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
- bool is_cpu_edp = false, is_pch_edp = false;
+ int target_clock, pixel_multiplier, lane, link_bw;
+ bool is_dp = false, is_cpu_edp = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- if (encoder->needs_tv_clock)
- is_tv = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- is_pch_edp = true;
- else
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
is_cpu_edp = true;
- edp_encoder = encoder;
+ edp_encoder = intel_encoder;
break;
}
-
- num_connectors++;
}
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
target_clock = adjusted_mode->clock;
- /* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
- adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
-
- if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
- pipe_bpp != 36) {
- WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- pipe_bpp = 24;
- }
- intel_crtc->bpp = pipe_bpp;
-
- if (!lane) {
- /*
- * Account for spread spectrum to avoid
- * oversubscribing the link. Max center spread
- * is 2.5%; use 5% for safety's sake.
- */
- u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
- lane = bps / (link_bw * 8) + 1;
- }
+ if (!lane)
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->bpp);
intel_crtc->fdi_lanes = lane;
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, u32 fp)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, pixel_multiplier, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false, is_tv = false;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m < factor * clock.n)
+ if (clock->m < factor * clock->n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (clock.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -4939,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
+ return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither, fdi_config_ok;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
- * pre-Haswell/LPT generation */
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
- pipe);
- } else if (!is_cpu_edp) {
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!is_cpu_edp) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- I915_WRITE(VSYNCSHIFT(pipe),
- adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2);
- } else {
- I915_WRITE(VSYNCSHIFT(pipe), 0);
- }
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+ * ironlake_check_fdi_lanes. */
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
- /* pipesrc controls the size that is scaled from, which should
- * always be the user's requested size.
- */
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+ return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
+ /* We are not sure yet this won't happen. */
+ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+ INTEL_PCH_TYPE(dev));
+
+ WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+ num_connectors, pipe_name(pipe));
+
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+ WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+ if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+ return -EINVAL;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock,
+ &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+ fp);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its
+ * own on pre-Haswell/LPT generation */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are
+ * enabled. This is an exception to the general rule that
+ * mode_set doesn't turn things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether
+ * we're going to set the DPLLs for dual-channel mode or
+ * not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP |
+ LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode
+ * (LVDS_A3_POWER_UP) appropriately here, but we need to
+ * look more thoroughly into how panels behave in the
+ * two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
+ }
+ }
+
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery
+ * setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
+
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ if (!is_dp || is_cpu_edp)
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- return ret;
+ if (ret != 0)
+ return ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+
+ return 0;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- goto fail;
+ return false;
}
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- goto fail;
+ return false;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
-
return true;
-fail:
- connector->encoder = NULL;
- encoder->crtc = NULL;
- return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct drm_display_mode *mode;
- int htot = I915_READ(HTOTAL(pipe));
- int hsync = I915_READ(HSYNC(pipe));
- int vtot = I915_READ(VTOTAL(pipe));
- int vsync = I915_READ(VSYNC(pipe));
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
+ struct drm_device *dev = work->crtc->dev;
- mutex_lock(&work->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(work->dev);
- mutex_unlock(&work->dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
kfree(work);
}
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
- struct drm_pending_vblank_event *e;
- struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || !work->pending) {
+
+ /* Ensure we don't miss a work->pending update ... */
+ smp_rmb();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
- intel_crtc->unpin_work = NULL;
-
- if (work->event) {
- e = work->event;
- e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+ /* and that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- e->event.tv_sec = tvbl.tv_sec;
- e->event.tv_usec = tvbl.tv_usec;
+ intel_crtc->unpin_work = NULL;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
drm_vblank_put(dev, intel_crtc->pipe);
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
-
wake_up(&dev_priv->pending_flip_queue);
- schedule_work(&work->work);
+
+ queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work) {
- if ((++intel_crtc->unpin_work->pending) > 1)
- DRM_ERROR("Prepared flip multiple times\n");
- } else {
- DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
- }
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ smp_wmb();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ smp_wmb();
+}
+
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
- work->dev = crtc->dev;
+ work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
dev->mode_config.dpms_property;
connector->dpms = DRM_MODE_DPMS_ON;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
x, y, fb);
if (!ret)
goto done;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != &intel_crtc->base)
- continue;
-
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
- mode->base.id, mode->name);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->mode_set(encoder, mode, adjusted_mode);
- }
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7259,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
-
- /* Disable all disconnected encoders. */
- if (connector->base.status == connector_status_disconnected)
- connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
@@ -7420,6 +8301,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (IS_HASWELL(dev))
+ intel_ddi_pll_init(dev);
+}
+
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8346,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
+ intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
@@ -7551,17 +8439,9 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (HAS_PCH_SPLIT(dev)) {
- dpd_is_edp = intel_dpd_is_edp(dev);
-
- if (has_edp_a(dev))
- intel_dp_init(dev, DP_A, PORT_A);
-
- if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D, PORT_D);
- }
-
- intel_crt_init(dev);
+ if (!(IS_HASWELL(dev) &&
+ (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ intel_crt_init(dev);
if (IS_HASWELL(dev)) {
int found;
@@ -7584,6 +8464,10 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8487,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
- if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
+
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8508,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
- /* Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -7676,8 +8561,9 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_encoder_clones(encoder);
}
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7712,33 +8598,74 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
- if (obj->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
+ }
- if (mode_cmd->pitches[0] & 63)
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
+ /* FIXME <= Gen4 stride limits are bit unclear */
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
+ return -EINVAL;
+ }
+
+ /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- /* RGB formats are common across chipsets */
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format %u\n",
- mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8703,13 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8760,8 @@ static void intel_init_display(struct drm_device *dev)
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8993,7 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
/* Just disable it once at startup */
@@ -8127,7 +9063,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->pipe);
+ reg = PIPECONF(crtc->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
@@ -8244,9 +9180,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
+static void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
@@ -8255,10 +9209,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
+ if (IS_HASWELL(dev)) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc->cpu_transcoder = TRANSCODER_EDP;
+
+ DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+ pipe_name(pipe));
+ }
+ }
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- tmp = I915_READ(PIPECONF(pipe));
+ tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
@@ -8271,6 +9250,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
+ if (IS_HASWELL(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
@@ -8317,9 +9299,21 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
intel_sanitize_crtc(crtc);
}
- intel_modeset_update_staged_output_state(dev);
+ if (force_restore) {
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_set_mode(&crtc->base, &crtc->base.mode,
+ crtc->base.x, crtc->base.y, crtc->base.fb);
+ }
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9322,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, false);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9441,7 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
+ enum transcoder cpu_transcoder;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9449,8 @@ intel_display_capture_error_state(struct drm_device *dev)
return NULL;
for_each_pipe(i) {
+ cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9465,14 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(i));
- error->pipe[i].hblank = I915_READ(HBLANK(i));
- error->pipe[i].hsync = I915_READ(HSYNC(i));
- error->pipe[i].vtotal = I915_READ(VTOTAL(i));
- error->pipe[i].vblank = I915_READ(VBLANK(i));
- error->pipe[i].vsync = I915_READ(VSYNC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef1600..fb3715b4b09d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
/**
@@ -49,7 +47,9 @@
*/
static bool is_edp(struct intel_dp *intel_dp)
{
- return intel_dp->base.type == INTEL_OUTPUT_EDP;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
/**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
- return container_of(encoder, struct intel_dp, base.base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dp, base);
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
/**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
return is_pch_edp(intel_dp);
}
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
*lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
- *link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
- *link_bw = 270000;
+ *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
}
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
- if (intel_dp->panel_fixed_mode)
- return intel_dp->panel_fixed_mode->clock;
+ if (intel_connector->panel.fixed_mode)
+ return intel_connector->panel.fixed_mode->clock;
else
return mode->clock;
}
static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
-{
- int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
- return max_lane_count;
-}
-
-static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int try, precharge;
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ ch_data = DPA_AUX_CH_DATA1;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ ch_data = PCH_DPB_AUX_CH_DATA1;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ ch_data = PCH_DPC_AUX_CH_DATA1;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ ch_data = PCH_DPD_AUX_CH_DATA1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_HASWELL(dev))
+ aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+ else if (IS_VALLEYVIEW(dev))
+ aux_clock_divider = 100;
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
-static bool
+bool
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
/*
* Find the lane count in the intel_encoder private
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
+
+ intel_dp_init_link_config(intel_dp);
/* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
return control;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
ironlake_wait_panel_off(intel_dp);
}
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ intel_panel_enable_backlight(dev, pipe);
}
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (!is_edp(intel_dp))
return;
+ intel_panel_disable_backlight(dev);
+
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
}
/* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
return true;
}
}
- }
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
return true;
}
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = {
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
uint8_t v = 0;
uint8_t p = 0;
int lane;
- uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
}
}
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int s = (lane & 1) * 4;
- uint8_t l = link_status[lane>>1];
-
- return (l >> s) & 0xf;
-}
-
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
{
- int lane;
- uint8_t lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
- DP_LANE_CHANNEL_EQ_DONE|\
- DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
- uint8_t lane_align;
- uint8_t lane_status;
- int lane;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
- lane_align = intel_dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- return false;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
}
- return true;
}
static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
int ret;
+ uint32_t temp;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ if (IS_HASWELL(dev)) {
+ temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) &&
+ (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
/* Enable corresponding port and start training pattern 1 */
-static void
+void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
+ if (IS_HASWELL(dev))
+ intel_ddi_prepare_link_retrain(encoder);
+
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(
+ intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+ signal_levels);
+ /* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
- /* Set training pattern 1 */
- udelay(100);
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static void
+void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_LINK_SCRAMBLING_DISABLE))
break;
- udelay(400);
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp, link_status)) {
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (IS_HASWELL(dev))
+ return;
+
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 4. Check link status on receipt of hot-plug interrupt
*/
-static void
+void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->base.connectors_active)
+ if (!intel_encoder->connectors_active)
return;
- if (WARN_ON(!intel_dp->base.base.crtc))
+ if (WARN_ON(!intel_encoder->base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp, link_status)) {
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_dp->base.base));
+ drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
- status = intel_panel_detect(intel_dp->base.base.dev);
+ status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct edid *edid;
- int size;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- if (!intel_dp->edid)
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
return NULL;
- size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
- memcpy(edid, intel_dp->edid, size);
+ memcpy(edid, intel_connector->edid, size);
return edid;
}
- edid = drm_get_edid(connector, adapter);
- return edid;
+ return drm_get_edid(connector, adapter);
}
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int ret;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- drm_mode_connector_update_edid_property(connector,
- intel_dp->edid);
- ret = drm_add_edid_modes(connector, intel_dp->edid);
- drm_edid_to_eld(connector,
- intel_dp->edid);
- return intel_dp->edid_mode_count;
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
}
- ret = intel_ddc_get_modes(connector, adapter);
- return ret;
+ return intel_ddc_get_modes(connector, adapter);
}
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
- intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
- intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
- intel_dp->dpcd[6], intel_dp->dpcd[7]);
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
if (status != connector_status_connected)
return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
- if (ret) {
- if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
- struct drm_display_mode *newmode;
- list_for_each_entry(newmode, &connector->probed_modes,
- head) {
- if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, newmode);
- break;
- }
- }
- }
+ if (ret)
return ret;
- }
- /* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_dp->panel_fixed_mode) {
- intel_dp->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
- if (intel_dp->panel_fixed_mode) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
return -EINVAL;
done:
- if (intel_dp->base.base.crtc) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ if (intel_encoder->base.crtc) {
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp))
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ if (is_edp(intel_dp)) {
intel_panel_destroy_backlight(dev);
+ intel_panel_fini(&intel_connector->panel);
+ }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
- kfree(intel_dp);
+ kfree(intel_dig_port);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
intel_dp_check_link_status(intel_dp);
}
@@ -2435,13 +2525,14 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2471,78 +2562,204 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(dev_priv);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div;
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+ << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (is_cpu_edp(intel_dp))
+ pp_on |= PANEL_POWER_PORT_DP_A;
+ else
+ pp_on |= PANEL_POWER_PORT_DP_D;
+ }
+
+ I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+ I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+ I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(PCH_PP_ON_DELAYS),
+ I915_READ(PCH_PP_OFF_DELAYS),
+ I915_READ(PCH_PP_DIVISOR));
}
void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_dp *intel_dp;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
+ enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
- intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
- if (!intel_dp)
- return;
-
- intel_dp->output_reg = output_reg;
- intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_dp);
- return;
- }
- intel_encoder = &intel_dp->base;
-
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
+ /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+ * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+ * rewrite it.
+ */
type = DRM_MODE_CONNECTOR_DisplayPort;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- intel_encoder->cloneable = false;
-
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
-
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->disable = intel_disable_dp;
- intel_encoder->post_disable = intel_post_disable_dp;
- intel_encoder->get_hw_state = intel_dp_get_hw_state;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
switch (port) {
@@ -2566,66 +2783,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
break;
}
- /* Cache some DPCD data in the eDP case */
- if (is_edp(intel_dp)) {
- struct edp_power_seq cur, vbt;
- u32 pp_on, pp_off, pp_div;
-
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
-
- if (!pp_on || !pp_off || !pp_div) {
- DRM_INFO("bad panel power sequencing delays, disabling panel\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
- return;
- }
-
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
-
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
-
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- }
+ if (is_edp(intel_dp))
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_i2c_init(intel_dp, intel_connector, name);
+ /* Cache DPCD and EDID for edp. */
if (is_edp(intel_dp)) {
bool ret;
+ struct drm_display_mode *scan;
struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2806,51 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_encoder->base);
+ intel_dp_destroy(connector);
return;
}
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector,
- edid);
- intel_dp->edid_mode_count =
- drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- intel_dp->edid = edid;
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
}
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
ironlake_edp_panel_vdd_off(intel_dp, false);
}
- intel_encoder->hot_plug = intel_dp_hot_plug;
-
if (is_edp(intel_dp)) {
- dev_priv->int_edp_connector = connector;
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
}
intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2864,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f43..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
int crtc_mask;
};
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -179,12 +185,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
+ enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ atomic_t unpin_work_count;
+
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
+ uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
+ bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
- struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
- int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
- struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
- enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
};
static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
- struct drm_device *dev;
+ struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
- int pending;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check;
};
@@ -395,6 +415,8 @@ struct intel_fbc_work {
int interval;
};
+int intel_pch_rawclk(struct drm_device *dev);
+
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch);
+
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_enable_ddi(struct intel_encoder *encoder);
-extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee8..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_hdmi, base.base);
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_hdmi, base);
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
intel_set_infoframe(encoder, &avi_if);
}
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -786,6 +794,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_hdmi->base.base.crtc) {
- struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ if (intel_dig_port->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .mode_fixup = intel_hdmi_mode_fixup,
- .mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
-};
-
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
-
- intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
- if (!intel_hdmi)
- return;
-
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_hdmi);
- return;
- }
-
- intel_encoder = &intel_hdmi->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ enum port port = intel_dig_port->port;
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_encoder->type = INTEL_OUTPUT_HDMI;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
-
- intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG();
}
- intel_hdmi->sdvox_reg = sdvox_reg;
-
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev)) {
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs_hsw);
- } else {
- intel_encoder->enable = intel_enable_hdmi;
- intel_encoder->disable = intel_disable_hdmi;
- intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs);
- }
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector);
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971c..3ef5af15b812 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = true;
+ bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
- bus->force_bit = true;
+ bus->force_bit = 1;
intel_gpio_setup(bus, port);
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- bus->force_bit = force_bit;
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474b..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_connector {
+ struct intel_connector base;
- struct edid *edid;
+ struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
- int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
- struct drm_display_mode *fixed_mode;
+ struct intel_lvds_connector *attached_connector;
};
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds, base.base);
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_lvds, base);
+ return container_of(connector, struct intel_lvds_connector, base.base);
}
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_dirty) {
+ if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
+ lvds_encoder->pfit_control,
+ lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+ lvds_encoder->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (intel_lvds->pfit_control) {
+ if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_dirty = true;
+ lvds_encoder->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
/*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- switch (intel_lvds->fitting_mode) {
+ switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- if (pfit_control != intel_lvds->pfit_control ||
- pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
- intel_lvds->pfit_dirty = true;
+ if (pfit_control != lvds_encoder->pfit_control ||
+ pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+ lvds_encoder->pfit_control = pfit_control;
+ lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid)
- return drm_add_edid_modes(connector, intel_lvds->edid);
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
- mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, lid_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- if (connector)
- connector->status = connector->funcs->detect(connector,
- false);
+ connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- intel_modeset_check_state(dev);
+ intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ kfree(lvds_connector->base.edid);
- intel_panel_destroy_backlight(dev);
+ intel_panel_destroy_backlight(connector->dev);
+ intel_panel_fini(&lvds_connector->base.panel);
- if (dev_priv->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
- struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+ struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (intel_lvds->fitting_mode == value) {
+ if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -763,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "ZOTAC ZBOXSD-ID12/ID13",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
@@ -912,12 +917,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
+ struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@@ -945,23 +953,25 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
- if (!intel_lvds) {
+ lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
return false;
- }
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_lvds);
+ lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ if (!lvds_connector) {
+ kfree(lvds_encoder);
return false;
}
+ lvds_encoder->attached_connector = lvds_connector;
+
if (!HAS_PCH_SPLIT(dev)) {
- intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
- intel_encoder = &intel_lvds->base;
+ intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1003,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- /*
- * the initial panel fitting mode will be FULL_SCREEN.
- */
-
- drm_connector_attach_property(&intel_connector->base,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1015,20 +1021,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_lvds->edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- pin));
- if (intel_lvds->edid) {
- if (drm_add_edid_modes(connector,
- intel_lvds->edid)) {
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ edid);
} else {
- kfree(intel_lvds->edid);
- intel_lvds->edid = NULL;
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
}
+ } else {
+ edid = ERR_PTR(-ENOENT);
}
- if (!intel_lvds->edid) {
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@@ -1041,22 +1048,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, scan);
- intel_find_lvds_downclock(dev,
- intel_lvds->fixed_mode,
- connector);
- goto out;
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1076,16 +1087,17 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!intel_lvds->fixed_mode)
+ if (!fixed_mode)
goto failed;
out:
@@ -1100,16 +1112,15 @@ out:
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
- dev_priv->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- dev_priv->lid_notifier.notifier_call = NULL;
+ lvds_connector->lid_notifier.notifier_call = NULL;
}
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
return true;
@@ -1117,7 +1128,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_lvds);
- kfree(intel_connector);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ kfree(lvds_encoder);
+ kfree(lvds_connector);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66eb..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- kfree(edid);
return ret;
}
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d8..7741c22c934c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
+ DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2aacd329545..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->saveBLC_PWM_CTL2 == 0) {
- dev_priv->saveBLC_PWM_CTL2 = val;
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
- I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL2;
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
} else {
val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->saveBLC_PWM_CTL == 0) {
- dev_priv->saveBLC_PWM_CTL = val;
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
- I915_WRITE(BLC_PWM_CTL,
- dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL;
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
- max = i915_read_blc_pwm_ctl(dev_priv);
+ max = i915_read_blc_pwm_ctl(dev);
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
-#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
-
- if (i915_panel_ignore_lid)
- return i915_panel_ignore_lid > 0 ?
- connector_status_connected :
- connector_status_disconnected;
- /* opregion lid state on HP 2540p is wrong at boot up,
- * appears to be either the BIOS or Linux ACPI fault */
-#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
- if (dev_priv->opregion.lid_state)
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
-#endif
+ }
- return connector_status_unknown;
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
- struct drm_connector *connector;
intel_panel_init_backlight(dev);
- if (dev_priv->int_lvds_connector)
- connector = dev_priv->int_lvds_connector;
- else if (dev_priv->int_edp_connector)
- connector = dev_priv->int_edp_connector;
- else
- return -ENODEV;
-
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight);
}
#else
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
- intel_panel_init_backlight(dev);
+ intel_panel_init_backlight(connector->dev);
return 0;
}
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return;
}
#endif
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+{
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 442968f8b201..3280cffe50f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter
*/
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled &&
- !to_intel_crtc(tmp_crtc)->primary_disabled &&
- tmp_crtc->fb) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1293,7 @@ static void valleyview_update_wm(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
vlv_update_drain_latency(dev);
@@ -1302,17 +1310,23 @@ static void valleyview_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1325,10 +1339,11 @@ static void valleyview_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void g4x_update_wm(struct drm_device *dev)
@@ -1351,17 +1366,18 @@ static void g4x_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1374,11 +1390,11 @@ static void g4x_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
@@ -1467,10 +1483,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
@@ -1478,10 +1497,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@@ -1571,8 +1593,7 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
+ 4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1805,8 +1826,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
- if ((dev_priv->num_pipe == 3) &&
- g4x_compute_wm0(dev, 2,
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -1869,12 +1992,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
- /* WM3 */
+ /* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
@@ -1923,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
@@ -2323,7 +2451,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
@@ -2398,12 +2526,12 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
- u32 pcu_mbox, rc6_mask = 0;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i;
+ int i, ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@@ -2497,30 +2625,16 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->rps.max_delay = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2648,20 @@ static void gen6_enable_rps(struct drm_device *dev)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
gen6_gt_force_wake_put(dev_priv);
}
@@ -2541,10 +2669,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
+ int gpu_freq;
+ unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
max_ia_freq = cpufreq_quick_get_max(0);
/*
@@ -2575,17 +2704,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq | gpu_freq);
}
}
@@ -2593,16 +2716,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
}
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
}
}
@@ -2628,14 +2751,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
return -ENOMEM;
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@@ -2647,6 +2770,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2785,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
if (ret)
return;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
/*
* GPU can automatically power down the render unit if given a page
* to save state.
@@ -2668,12 +2795,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
return;
}
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2816,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
return;
}
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -3304,37 +3433,72 @@ static void intel_init_emon(struct drm_device *dev)
void intel_disable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+ mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
}
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
void intel_enable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev);
- gen6_update_ring_freq(dev);
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
}
}
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3506,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@@ -3354,9 +3516,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@@ -3378,33 +3538,70 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
}
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -3454,11 +3651,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3473,6 +3671,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3687,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3715,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3743,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
@@ -3547,27 +3756,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
+ lpt_init_clock_gating(dev);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3796,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
@@ -3607,6 +3838,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3620,39 +3852,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3704,6 +3956,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3981,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +4040,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
@@ -3840,7 +4070,7 @@ void intel_init_power_wells(struct drm_device *dev)
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
@@ -3878,11 +4108,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@@ -3905,7 +4130,7 @@ void intel_init_pm(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -3993,6 +4218,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
DRM_ERROR("GT thread status wait timed out\n");
}
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4006,7 +4237,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4247,13 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+}
+
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4029,8 +4267,9 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4067,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ /* something from same cacheline, but !FORCEWAKE */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4111,13 +4352,20 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4376,90 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
gen6_gt_check_fifodbg(dev_priv);
}
+void intel_gt_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->gt_lock);
+ intel_gt_reset(dev);
+
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
}
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
}
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbbc..42ff97d667d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
static inline int ring_space(struct intel_ring_buffer *ring)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/*
* TLB invalidate requires a post-sync write.
*/
- flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
ret = intel_ring_begin(ring, 4);
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
- if (IS_GEN7(dev))
- I915_WRITE(GFX_MODE_GEN7,
- _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- }
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
@@ -547,23 +559,24 @@ static int init_render_ring(struct intel_ring_buffer *ring)
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
+
if (!ring->private)
return;
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
cleanup_pipe_control(ring);
}
static void
update_mboxes(struct intel_ring_buffer *ring,
- u32 seqno,
- u32 mmio_offset)
+ u32 mmio_offset)
{
- intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
}
/**
@@ -576,8 +589,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring,
- u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
@@ -590,13 +602,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_next_request_seqno(ring);
-
- update_mboxes(ring, *seqno, mbox1_reg);
- update_mboxes(ring, *seqno, mbox2_reg);
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
@@ -653,10 +663,8 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -677,7 +685,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +704,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -888,25 +895,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno;
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- seqno = i915_gem_next_request_seqno(ring);
-
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -964,7 +966,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
{
int ret;
@@ -975,35 +979,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- MI_BATCH_NON_SECURE_I965);
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
return 0;
}
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1012,7 +1052,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -1075,6 +1115,29 @@ err:
return ret;
}
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@@ -1086,6 +1149,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1093,6 +1157,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring);
if (ret)
return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1226,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -1176,28 +1245,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-{
- uint32_t __iomem *virt;
- int rem = ring->size - ring->tail;
-
- if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(ring, rem);
- if (ret)
- return ret;
- }
-
- virt = ring->virtual_start + ring->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ring->tail = 0;
- ring->space = ring_space(ring);
-
- return 0;
-}
-
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
@@ -1231,7 +1278,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (request->tail == -1)
continue;
- space = request->tail - (ring->tail + 8);
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
@@ -1266,7 +1313,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0;
}
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1356,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ uint32_t __iomem *virt;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
@@ -1320,6 +1421,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
@@ -1327,7 +1433,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
}
if (unlikely(ring->space < n)) {
- ret = intel_wait_ring_buffer(ring, n);
+ ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
@@ -1391,10 +1497,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1402,8 +1515,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1411,7 +1546,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1432,10 +1569,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB;
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1490,7 +1634,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,10 +1647,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
- if (!I915_NEED_GFX_HWS(dev)) {
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
}
return intel_init_ring_buffer(dev, ring);
@@ -1514,6 +1675,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1551,16 +1713,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
- if (!I915_NEED_GFX_HWS(dev))
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
- if (IS_I830(ring->dev))
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1729,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1618,7 +1783,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
-
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring,
- u32 *seqno);
+ int (*add_request)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,10 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length);
+ u32 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
@@ -181,27 +194,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
-int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
- return intel_wait_ring_buffer(ring, ring->size - 8);
-}
-
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-
void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
@@ -217,6 +224,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
return ring->tail;
}
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a6ac0b416964..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- u8 retry = 5;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
*/
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
- udelay(15);
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
+
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ temp |= SDVO_PIPE_B_SELECT;
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
-
- /* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- msleep(30);
-
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_sdvo_connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- pin = GMBUS_PORT_DPB;
- if (mapping->initialized)
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
- if (intel_gmbus_is_port_valid(pin)) {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
- intel_gmbus_force_bit(sdvo->i2c, true);
- } else {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- }
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
}
static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
- kfree(intel_sdvo);
- return false;
- }
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009d..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- int pixel_size;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe));
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,27 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(SPRLINOFF(pipe), offset);
- }
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPRSCALE(pipe), sprscale);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -152,7 +153,8 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
- I915_WRITE(SPRSCALE(pipe), 0);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@@ -225,8 +227,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe, pixel_size;
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -239,33 +243,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +284,22 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(DVSLINOFF(pipe), offset);
- }
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -422,6 +421,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +437,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
- if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +447,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@@ -473,6 +483,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
goto out;
/*
+ * We may not have a scaler, eg. HSW does not have it any more
+ */
+ if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+ return -EINVAL;
+
+ /*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
@@ -665,6 +681,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +698,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
case 7:
+ if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+ intel_plane->can_scale = false;
+ else
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135e..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
ARRAY_SIZE(tv_modes),
tv_format_names);
- drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index d63013497f66..b487cdec5ee7 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,6 +1,6 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1e910117b0a2..122b571ccc7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -60,8 +60,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
}
-static int __devinit
-mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
mgag200_kick_out_firmware_fb(pdev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae33701..70dd3c5529d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
struct apertures_struct *aper = alloc_apertures(1);
+ if (!aper)
+ return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
aper->ranges[0].base = mdev->mc.vram_base;
aper->ranges[0].size = mdev->mc.vram_window;
- aper->count = 1;
remove_conflicting_framebuffers(aper, "mgafb", true);
+ kfree(aper);
if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c4..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
static int mgag200_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, mgag200_bo_ttm_destroy);
if (ret)
return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
- if (ret) {
- nouveau_namedb_destroy(&client->base);
+ if (ret)
return ret;
- }
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
return nouveau_gpuobj_fini(&engctx->base, suspend);
}
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_engctx *engctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ *pobject = nv_object(engctx);
+ return ret;
+}
+
void
_nouveau_engctx_dtor(struct nouveau_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..e05c15777588
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_falcon *falcon = (void *)object;
+ const struct firmware *fw;
+ char name[32] = "internal";
+ int ret, i;
+ u32 caps;
+
+ /* enable engine, and determine its capabilities */
+ ret = nouveau_engine_init(&falcon->base);
+ if (ret)
+ return ret;
+
+ if (device->chipset < 0xa3 ||
+ device->chipset == 0xaa || device->chipset == 0xac) {
+ falcon->version = 0;
+ falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
+ } else {
+ caps = nv_ro32(falcon, 0x12c);
+ falcon->version = (caps & 0x0000000f);
+ falcon->secret = (caps & 0x00000030) >> 4;
+ }
+
+ caps = nv_ro32(falcon, 0x108);
+ falcon->code.limit = (caps & 0x000001ff) << 8;
+ falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+ nv_debug(falcon, "falcon version: %d\n", falcon->version);
+ nv_debug(falcon, "secret level: %d\n", falcon->secret);
+ nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+ nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+ /* wait for 'uc halted' to be signalled before continuing */
+ if (falcon->secret && falcon->version < 4) {
+ if (!falcon->version)
+ nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ else
+ nv_wait(falcon, 0x180, 0x80000000, 0);
+ nv_wo32(falcon, 0x004, 0x00000010);
+ }
+
+ /* disable all interrupts */
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ /* no default ucode provided by the engine implementation, try and
+ * locate a "self-bootstrapping" firmware image for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret == 0) {
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ falcon->data.data = NULL;
+ falcon->data.size = 0;
+ release_firmware(fw);
+ }
+
+ falcon->external = true;
+ }
+
+ /* next step is to try and load "static code/data segment" firmware
+ * images for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware data\n");
+ return ret;
+ }
+
+ falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->data.data)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware code\n");
+ return ret;
+ }
+
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->code.data)
+ return -ENOMEM;
+ }
+
+ nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+ "static code/data segments" : "self-bootstrapping");
+
+ /* ensure any "self-bootstrapping" firmware image is in vram */
+ if (!falcon->data.data && !falcon->core) {
+ ret = nouveau_gpuobj_new(object->parent, NULL,
+ falcon->code.size, 256, 0,
+ &falcon->core);
+ if (ret) {
+ nv_error(falcon, "core allocation failed, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < falcon->code.size; i += 4)
+ nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ }
+
+ /* upload firmware bootloader (or the full code segments) */
+ if (falcon->core) {
+ if (device->card_type < NV_C0)
+ nv_wo32(falcon, 0x618, 0x04000000);
+ else
+ nv_wo32(falcon, 0x618, 0x00000114);
+ nv_wo32(falcon, 0x11c, 0);
+ nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+ nv_wo32(falcon, 0x114, 0);
+ nv_wo32(falcon, 0x118, 0x00006610);
+ } else {
+ if (falcon->code.size > falcon->code.limit ||
+ falcon->data.size > falcon->data.limit) {
+ nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+ return -EINVAL;
+ }
+
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00100000);
+ for (i = 0; i < falcon->code.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+ } else {
+ nv_wo32(falcon, 0x180, 0x01000000);
+ for (i = 0; i < falcon->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wo32(falcon, 0x188, i >> 6);
+ nv_wo32(falcon, 0x184, falcon->code.data[i]);
+ }
+ }
+ }
+
+ /* upload data segment (if necessary), zeroing the remainder */
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+ for (; i < falcon->data.limit; i += 4)
+ nv_wo32(falcon, 0xff4, 0x00000000);
+ } else {
+ nv_wo32(falcon, 0x1c0, 0x01000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+ for (; i < falcon->data.limit / 4; i++)
+ nv_wo32(falcon, 0x1c4, 0x00000000);
+ }
+
+ /* start it running */
+ nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+ nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+ nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+ nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+ return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+
+ if (!suspend) {
+ nouveau_gpuobj_ref(NULL, &falcon->core);
+ if (falcon->external) {
+ kfree(falcon->data.data);
+ kfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+
+ nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_falcon *falcon;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
+ falcon = *pobject;
+ if (ret)
+ return ret;
+
+ falcon->addr = addr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
}
u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
}
void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
- handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
+
+ *phandle = handle;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
int
nouveau_mm_fini(struct nouveau_mm *mm)
{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
+ if (nouveau_mm_initialised(mm)) {
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
+ }
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (WARN_ON(nodes++ == mm->heap_nodes))
- return -EBUSY;
+ kfree(heap);
}
- kfree(heap);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index f74c30aa33a0..48f06378d3f9 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (ret)
return ret;
- mutex_init(&subdev->mutex);
+ __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
subdev->name = subname;
if (parent) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/bsp.h>
struct nv84_bsp_priv {
- struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
- struct nouveau_bsp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_bsp_chan *priv;
- int ret;
-
- ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_bsp_context_ctor,
- .dtor = nv84_bsp_context_dtor,
- .init = nv84_bsp_context_init,
- .fini = nv84_bsp_context_fini,
- .rd32 = _nouveau_bsp_context_rd32,
- .wr32 = _nouveau_bsp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
* BSP engine/subdev functions
******************************************************************************/
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_bsp_priv *priv;
int ret;
- ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- return nouveau_bsp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
- .dtor = nv84_bsp_dtor,
- .init = nv84_bsp_init,
- .fini = nv84_bsp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+ { 0x90b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+ struct nvc0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+ nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+ { 0x95b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+ struct nve0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nve0_bsp_cclass;
+ nv_engine(priv)->sclass = nve0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
@@ -36,11 +35,7 @@
#include "fuc/nva3.fuc.h"
struct nva3_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
{}
};
-static void
+void
nva3_copy_intr(struct nouveau_subdev *subdev)
{
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_falcon *falcon = (void *)subdev;
struct nouveau_object *engctx;
- struct nva3_copy_priv *priv = (void *)subdev;
- u32 dispatch = nv_rd32(priv, 0x10401c);
- u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040) >> 16;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+ u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+ u32 addr = nv_ro32(falcon, 0x040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044);
+ u32 data = nv_ro32(falcon, 0x044);
int chid;
engctx = nouveau_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
+ nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004, 0x00000040);
+ nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004, stat);
+ nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+ nv_wo32(falcon, 0x004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_copy_priv *priv;
int ret;
- ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ nv_falcon(priv)->code.data = nva3_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+ nv_falcon(priv)->data.data = nva3_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
return 0;
}
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
- struct nva3_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188, i >> 6);
- nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x10410c, 0x00000000);
- nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
- struct nva3_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = nva3_copy_init,
- .fini = nva3_copy_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <engine/fifo.h>
#include <engine/copy.h>
@@ -33,11 +32,7 @@
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
- .ctor = nvc0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
};
static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
* PCOPY engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)subdev;
- u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
- u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
- u32 stat = intr & disp & ~(disp >> 16);
- u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
- u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
- int chid;
-
- engctx = nouveau_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
- nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
- stat &= ~0x00000040;
- }
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret;
- if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
- }
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
- nouveau_engctx_put(engctx);
+ nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ return 0;
}
static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
- nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
- nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
- nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
#include <engine/copy.h>
struct nve0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nve0_copy_context_ofuncs = {
- .ctor = nve0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
};
static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
#include <engine/crypt.h>
struct nv84_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
struct nv84_crypt_priv *priv = (void *)object;
int ret;
- ret = nouveau_crypt_init(&priv->base);
+ ret = nouveau_engine_init(&priv->base);
if (ret)
return ret;
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
+ .dtor = _nouveau_engine_dtor,
.init = nv84_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/falcon.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
@@ -36,11 +37,7 @@
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
- return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
- struct nv98_crypt_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_crypt_init(&priv->base);
- if (ret)
- return ret;
-
- /* wait for exit interrupt to signal */
- nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(priv, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(priv, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
- /* start it running */
- nv_wr32(priv, 0x08710c, 0x00000000);
- nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+ nv_falcon(priv)->code.data = nv98_pcrypt_code;
+ nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+ nv_falcon(priv)->data.data = nv98_pcrypt_data;
+ nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
return 0;
}
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
- .init = nv98_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+ (data & NV50_DISP_DAC_PWR_VSYNC) |
+ (data & NV50_DISP_DAC_PWR_DATA) |
+ (data & NV50_DISP_DAC_PWR_STATE);
+ const u32 doff = (or * 0x800);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+ const u32 doff = (or * 0x800);
+ int load = -EINVAL;
+ nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ udelay(9500);
+ nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+ load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+ nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_DAC_PWR:
+ ret = priv->dac.power(priv, or, data[0]);
+ break;
+ case NV50_DISP_DAC_LOAD:
+ ret = priv->dac.sense(priv, or, data[0]);
+ if (ret >= 0) {
+ data[0] = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x800);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x030);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+ nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+ nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+ nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 soff = (or * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+ nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+ nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+ nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+ nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+ nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+ nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 15b182c84ce8..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,20 +22,740 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nv50_disp_priv {
- struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_base *base = (void *)parent;
+ struct nv50_disp_chan *chan;
+ int ret;
+
+ if (base->chan & (1 << chid))
+ return -EBUSY;
+ base->chan |= (1 << chid);
+
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
+ chan = *pobject;
+ if (ret)
+ return ret;
+
+ chan->chid = chid;
+ return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+ base->chan &= ~(1 << chan->chid);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 chid = chan->chid;
+ u32 data = (chid << 28) | (addr << 10) | chid;
+ return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+ dmac = *pobject;
+ if (ret)
+ return ret;
+
+ dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!dmac->pushdma)
+ return -ENOENT;
+
+ switch (nv_mclass(dmac->pushdma)) {
+ case 0x0002:
+ case 0x003d:
+ if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmac->pushdma->target) {
+ case NV_MEM_TARGET_VRAM:
+ dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_dmac *dmac = (void *)object;
+ nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+ nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+ if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+ nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204, mast->push);
+ nv_wr32(priv, 0x610208, 0x00010000);
+ nv_wr32(priv, 0x61020c, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+ .ctor = nv50_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_mast_init,
+ .fini = nv50_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+ .ctor = nv50_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 3 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+ .ctor = nv50_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+ nv_error(pioc, "timeout0: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "timeout1: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+ .ctor = nv50_disp_oimm_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+ .ctor = nv50_disp_curs_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nv_rd32(priv, 0x614004);
+ nv_wr32(priv, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+ nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... EXT caps */
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+ nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x610024) & 0x00000100) {
+ nv_wr32(priv, 0x610024, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x61002c, 0x00000370);
+ nv_wr32(priv, 0x610028, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x610024, 0x00000000);
+ nv_wr32(priv, 0x610020, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
+ .init = nv50_disp_base_init,
+ .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+ { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- {},
+ { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nouveau_engctx *ectx;
+ int ret = -EBUSY;
+
+ /* no context needed for channel objects... */
+ if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ atomic_inc(&parent->refcount);
+ *pobject = parent;
+ return 0;
+ }
+
+ /* allocate display hardware to client */
+ mutex_lock(&nv_subdev(priv)->mutex);
+ if (list_empty(&nv_engine(priv)->contexts)) {
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0x10000, 0x10000,
+ NVOBJ_FLAG_HEAP, &ectx);
+ *pobject = nv_object(ectx);
+ }
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+ .handle = NV_ENGCTX(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_data_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+ u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
+
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
+ continue;
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+ chid, addr & 0xffc, data, addr);
+ }
+}
+
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
@@ -80,30 +800,428 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 4;
+ }
+ }
+
+ if (!(ctrl & (1 << head)))
+ return false;
+ i--;
+
+ data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+ struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 4;
+ }
+ }
+
+ if (!(ctrl & (1 << head)))
+ return 0x0000;
+ i--;
+
+ data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+ if (!data)
+ return 0x0000;
+
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = outp,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000060) >> 5) - 1;
+ if (head >= 0) {
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 1);
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000010);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
+{
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 symbol = 100000;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+ u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, bits, r;
+
+ /* calculate packed data rate for each lane */
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ if (clksor & 0x000c0000)
+ link_bw = 270000;
+ else
+ link_bw = 162000;
+
+ if ((ctrl & 0xf0000) == 0x60000) bits = 30;
+ else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+ else bits = 18;
+
+ link_data_rate = (pclk * bits / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ nv_error(priv, "unable to find suitable dp config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+ struct dcb_output outp;
+ u32 addr, mask, data;
+ int head;
+
+ /* finish detaching encoder? */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 2);
+
+ /* check whether a vpll change is required */
+ head = ffs((super & 0x00000600) >> 9) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+ }
+
+ /* (re)attach the relevant OR to the head */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+ if (conf) {
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0xffffffff;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+ addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0x00000707;
+ data = (conf & 0x0100) ? 0x0101 : 0x0000;
+ }
+
+ nv_mask(priv, addr, mask, data);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000020);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ u8 ver, hdr;
+
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
+static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+ if (outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_tmds(priv, &outp);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000040);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+ u32 super = nv_rd32(priv, 0x610030);
+
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+ if (intr1 & 0x00000010)
+ nv50_disp_intr_unk10(priv, super);
+ if (intr1 & 0x00000020)
+ nv50_disp_intr_unk20(priv, super);
+ if (intr1 & 0x00000040)
+ nv50_disp_intr_unk40(priv, super);
+}
+
+void
nv50_disp_intr(struct nouveau_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
- u32 stat1 = nv_rd32(priv, 0x610024);
+ u32 intr0 = nv_rd32(priv, 0x610020);
+ u32 intr1 = nv_rd32(priv, 0x610024);
- if (stat1 & 0x00000004) {
+ if (intr0 & 0x001f0000) {
+ nv50_disp_intr_error(priv);
+ intr0 &= ~0x001f0000;
+ }
+
+ if (intr1 & 0x00000004) {
nv50_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x610024, 0x00000004);
- stat1 &= ~0x00000004;
+ intr1 &= ~0x00000004;
}
- if (stat1 & 0x00000008) {
+ if (intr1 & 0x00000008) {
nv50_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x610024, 0x00000008);
- stat1 &= ~0x00000008;
+ intr1 &= ~0x00000008;
}
+ if (intr1 & 0x00000070) {
+ nv50_disp_intr_super(priv, intr1);
+ intr1 &= ~0x00000070;
+ }
}
static int
nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
@@ -114,8 +1232,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv50_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+ struct nouveau_oclass *sclass;
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+ int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+ int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+ int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+ u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+ int lane, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ u32 lvdsconf;
+ } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+struct nv50_disp_base {
+ struct nouveau_parent base;
+ struct nouveau_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan {
+ struct nouveau_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a) \
+ nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nouveau_namedb_fini(&(a)->base, (b))
+
+int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nouveau_dmaobj *pushdma;
+ u32 push;
+};
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+ { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+ { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv84_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x82),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+ { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+ { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv94_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x88),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+ { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+ { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva0_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x83),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+ { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+ { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva3_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nva3_hda_eld;
+ priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x85),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nvd0_disp_priv {
- struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+ return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494, mast->push);
+ nv_wr32(priv, 0x610498, 0x00010000);
+ nv_wr32(priv, 0x61049c, 0x00000001);
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+ .ctor = nvd0_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_mast_init,
+ .fini = nvd0_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+ .ctor = nvd0_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 5 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+ .ctor = nvd0_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+ .ctor = nvd0_disp_oimm_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+ .ctor = nvd0_disp_curs_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+ nv_wr32(priv, 0x6100ac, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x610090, 0x00000000);
+ nv_wr32(priv, 0x6100a0, 0x00000000);
+ nv_wr32(priv, 0x6100b0, 0x00000307);
+
+ return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x6100b0, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+ .ctor = nvd0_disp_base_ctor,
+ .dtor = nvd0_disp_base_dtor,
+ .init = nvd0_disp_base_init,
+ .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- {},
+ { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ dcb->sorconf.link = mask;
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+ u32 ctrl, int id, u32 pclk)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return false;
+
+ switch (dcb.type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 1);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+ const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
+ const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+ const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(priv, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ u32 pclk;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 2);
+ }
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+ if (pclk && (mask & 0x00010000)) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+ if (mcp & (1 << head)) {
+ if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+ u32 addr, mask, data = 0x00000000;
+ if (i < 4) {
+ addr = 0x612280 + ((i - 0) * 0x800);
+ mask = 0xffffffff;
+ } else {
+ switch (mcp & 0x00000f00) {
+ case 0x00000800:
+ case 0x00000900:
+ nvd0_display_unk2_calc_tu(priv, head, i - 4);
+ break;
+ default:
+ break;
+ }
+
+ addr = 0x612300 + ((i - 4) * 0x800);
+ mask = 0x00000707;
+ if (cfg & 0x00000100)
+ data = 0x00000101;
+ }
+ nv_mask(priv, addr, mask, data);
+ }
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int pclk, i;
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+ if (mcp & (1 << head))
+ exec_clkcmp(priv, head, i, mcp, 1, pclk);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
-static void
+void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
- struct nvd0_disp_priv *priv = (void *)subdev;
+ struct nv50_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
int i;
- for (i = 0; i < 4; i++) {
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x61008c);
+ nv_wr32(priv, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(priv, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0) {
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+ }
+
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(priv, 0x6100ac);
+ u32 mask = 0, crtc = ~0;
+
+ while (!mask && ++crtc < priv->head.nr)
+ mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+ if (stat & 0x00000001) {
+ nv_wr32(priv, 0x6100ac, 0x00000001);
+ nvd0_display_unk1_handler(priv, crtc, mask);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ nv_wr32(priv, 0x6100ac, 0x00000002);
+ nvd0_display_unk2_handler(priv, crtc, mask);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000004) {
+ nv_wr32(priv, 0x6100ac, 0x00000004);
+ nvd0_display_unk4_handler(priv, crtc, mask);
+ stat &= ~0x00000004;
+ }
+
+ if (stat) {
+ nv_info(priv, "unknown intr24 0x%08x\n", stat);
+ nv_wr32(priv, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < priv->head.nr; i++) {
u32 mask = 0x01000000 << i;
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
static int
nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvd0_disp_priv *priv;
+ struct nv50_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nvd0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass
nvd0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0xd0),
+ .handle = NV_ENGINE(DISP, 0x90),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+ { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nve0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x91),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+ const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+ const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+ const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
+ const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret = -EINVAL;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+ data = *(u32 *)args;
+
+ if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+ return -ENODEV;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_SOR_PWR:
+ ret = priv->sor.power(priv, or, data);
+ break;
+ case NVA3_DISP_SOR_HDA_ELD:
+ ret = priv->sor.hda_eld(priv, or, args, size);
+ break;
+ case NV84_DISP_SOR_HDMI_PWR:
+ ret = priv->sor.hdmi(priv, head, or, data);
+ break;
+ case NV50_DISP_SOR_LVDS_SCRIPT:
+ priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+ ret = 0;
+ break;
+ case NV94_DISP_SOR_DP_TRAIN:
+ switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+ case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+ ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+ ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+ ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+ break;
+ default:
+ break;
+ }
+ break;
+ case NV94_DISP_SOR_DP_LNKCTL:
+ ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_DRVCTL(0):
+ case NV94_DISP_SOR_DP_DRVCTL(1):
+ case NV94_DISP_SOR_DP_DRVCTL(2):
+ case NV94_DISP_SOR_DP_DRVCTL(3):
+ ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+ type, mask, data, &outp);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+ init.offset = info.script[2];
+ else
+ init.offset = info.script[3];
+ nvbios_exec(&init);
+
+ init.offset = info.script[0];
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[1],
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ /* -> 10Khz units */
+ link_bw *= 2700;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (link_bw < nv_ro16(bios, info.lnkcmp))
+ info.lnkcmp += 4;
+ init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+ nvbios_exec(&init);
+ }
+
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+ if (link_bw > 16200)
+ clksor |= 0x00040000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvd0[] = { 16, 8, 0, 24 };
+ return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (nv_ro08(bios, info.lnkcmp) < link_bw)
+ info.lnkcmp += 3;
+ init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+ nvbios_exec(&init);
+ }
+
+ clksor |= link_bw << 18;
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
#include <subdev/fb.h>
#include <engine/dmaobj.h>
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nouveau_dmaobj *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
struct nv_dma_class *args = data;
- struct nouveau_dmaobj *object;
int ret;
if (size < sizeof(*args))
return -EINVAL;
- ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
- object = *pobject;
+ ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+ *pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
- object->target = NV_MEM_TARGET_VM;
+ dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
- object->target = NV_MEM_TARGET_VRAM;
+ dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
- object->target = NV_MEM_TARGET_PCI;
+ dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
- object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
- object->access = NV_MEM_ACCESS_VM;
+ dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
- object->access = NV_MEM_ACCESS_RO;
+ dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
- object->access = NV_MEM_ACCESS_WO;
+ dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
- object->access = NV_MEM_ACCESS_RW;
+ dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- object->start = args->start;
- object->limit = args->limit;
- return 0;
+ dmaobj->start = args->start;
+ dmaobj->limit = args->limit;
+ dmaobj->conf0 = args->conf0;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ /* delayed, or no, binding */
+ break;
+ default:
+ ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+ if (ret == 0) {
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ }
+ break;
+ }
+
+ return ret;
}
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+ .ctor = nouveau_dmaobj_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+ { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv04_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
u32 length = dmaobj->limit - dmaobj->start;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV03_CHANNEL_DMA_CLASS:
+ case NV10_CHANNEL_DMA_CLASS:
+ case NV17_CHANNEL_DMA_CLASS:
+ case NV40_CHANNEL_DMA_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (dmaobj->target == NV_MEM_TARGET_VM) {
if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv04_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
- .ctor = nv04_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
- { 0x0002, &nv04_dmaobj_ofuncs },
- { 0x0003, &nv04_dmaobj_ofuncs },
- { 0x003d, &nv04_dmaobj_ofuncs },
- {}
-};
-
-static int
nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv04_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv04_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv50_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags = nv_mclass(dmaobj);
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV50_CHANNEL_DMA_CLASS:
+ case NV84_CHANNEL_DMA_CLASS:
+ case NV50_CHANNEL_IND_CLASS:
+ case NV84_CHANNEL_IND_CLASS:
+ case NV50_DISP_MAST_CLASS:
+ case NV84_DISP_MAST_CLASS:
+ case NV94_DISP_MAST_CLASS:
+ case NVA0_DISP_MAST_CLASS:
+ case NVA3_DISP_MAST_CLASS:
+ case NV50_DISP_SYNC_CLASS:
+ case NV84_DISP_SYNC_CLASS:
+ case NV94_DISP_SYNC_CLASS:
+ case NVA0_DISP_SYNC_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NV50_DISP_OVLY_CLASS:
+ case NV84_DISP_OVLY_CLASS:
+ case NV94_DISP_OVLY_CLASS:
+ case NVA0_DISP_OVLY_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
switch (dmaobj->target) {
case NV_MEM_TARGET_VM:
- flags |= 0x00000000;
- flags |= 0x60000000; /* COMPRESSION_USEVM */
- flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags |= 0x00010000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags |= 0x00020000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags |= 0x00030000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00030000;
break;
default:
return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags |= 0x00040000;
+ flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags |= 0x00080000;
+ flags0 |= 0x00080000;
break;
}
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x00, flags0);
nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
upper_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
}
return ret;
}
static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv50_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
- .ctor = nv50_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
- { 0x0002, &nv50_dmaobj_ofuncs },
- { 0x0003, &nv50_dmaobj_ofuncs },
- { 0x003d, &nv50_dmaobj_ofuncs },
- {}
-};
-
-static int
nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv50_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv50_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/device.h>
#include <core/gpuobj.h>
+#include <core/class.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nvc0_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
{
- struct nvc0_dmaobj_priv *dmaobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
- ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVA3_DISP_MAST_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= 0x00020000;
+ }
+ }
- if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
return -EINVAL;
+ }
- return 0;
-}
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags0 |= 0x00080000;
+ break;
+ }
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
- .ctor = nvc0_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
+ }
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
- { 0x0002, &nvc0_dmaobj_ofuncs },
- { 0x0003, &nvc0_dmaobj_ofuncs },
- { 0x003d, &nvc0_dmaobj_ofuncs },
- {}
-};
+ return ret;
+}
static int
nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nvc0_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvc0_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags0 = 0x00000000;
+ int ret;
+
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVD0_DISP_MAST_CLASS:
+ case NVD0_DISP_SYNC_CLASS:
+ case NVD0_DISP_OVLY_CLASS:
+ case NVE0_DISP_MAST_CLASS:
+ case NVE0_DISP_SYNC_CLASS:
+ case NVE0_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+ } else {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00000009;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+ nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvd0_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
#include <core/object.h>
#include <core/handle.h>
+#include <core/class.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int bar, u32 addr, u32 size, u32 pushbuf,
- u32 engmask, int len, void **ptr)
+ u64 engmask, int len, void **ptr)
{
struct nouveau_device *device = nv_device(engine);
struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case 0x0002:
- case 0x003d:
+ case NV_DMA_FROM_MEMORY_CLASS:
+ case NV_DMA_IN_MEMORY_CLASS:
break;
default:
return -EINVAL;
}
- if (dmaeng->bind) {
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
- if (ret)
- return ret;
- }
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
/* find a free fifo channel */
spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
}
u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
return ioread32_native(chan->user + addr);
}
void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ nv_error(priv, "CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
u32 ib_get = nv_rd32(priv, 0x003334);
u32 ib_put = nv_rd32(priv, 0x003330);
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x003334, ib_put);
}
} else {
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
if (device->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
- nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status) {
- nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_error(priv, "still angry after %d spins, halt\n", cnt);
nv_wr32(priv, 0x002140, 0);
nv_wr32(priv, 0x000140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
&chan);
*pobject = nv_object(chan);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x1000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
ret = -EBUSY;
}
-
nv_wr32(priv, 0x00b860, me);
+
+ if (ret == 0) {
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ }
+
return ret;
}
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
return -EBUSY;
}
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
args->pushbuf,
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_COPY1), &chan);
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_COPY1) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_PPP), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- nv_info(priv, "unknown status 0x00000100\n");
+ nv_warn(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
- int subdev;
- u32 mask;
+ u64 subdev;
+ u64 mask;
} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
+ _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
_(NVDEV_ENGINE_VP , 0),
_(NVDEV_ENGINE_PPP , 0),
_(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda2763..62ab231cd6b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
.b16 #nve4_gpc_mmio_tail
.b16 #nve4_tpc_mmio_head
.b16 #nve4_tpc_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0f..09ee4702c8b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
0x00000000,
/* 0x0064: chipsets */
0x000000e4,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
0x000000e7,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
+ 0x000000e6,
+ 0x0110008c,
+ 0x01580110,
0x00000000,
-/* 0x0080: nve4_gpc_mmio_head */
+/* 0x008c: nve4_gpc_mmio_head */
0x00000380,
0x04000400,
0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
0x14003100,
0x000031d0,
0x040031e0,
-/* 0x0104: nve4_gpc_mmio_tail */
-/* 0x0104: nve4_tpc_mmio_head */
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
0x00000048,
0x00000064,
0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654bd..0bcfa4d447e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f484..bb03d2a1d57b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
0x01fa0613,
0xf803f806,
/* 0x0829: ctx_xfer */
- 0x0611f400,
-/* 0x082f: ctx_xfer_pre */
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
-/* 0x083d: ctx_xfer_pre_load */
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
-/* 0x0856: ctx_xfer_exec */
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
-/* 0x08dd: ctx_xfer_post_save_wait */
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
-/* 0x08e9: ctx_xfer_post */
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
-/* 0x0914: ctx_xfer_no_post_mmio */
- 0x064921f5,
-/* 0x0918: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00f7f100,
+ 0x06f4b60c,
+ 0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+ 0xfecf80fe,
+ 0x00e4f100,
+ 0xf91bf420,
+ 0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x9821f510,
+ 0x3121f506,
+ 0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf5065721,
+ 0xf5066621,
+ 0xbd067821,
+ 0x5721f5f4,
+ 0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+ 0x16019806,
+ 0x041427f1,
+ 0xd00624b6,
+ 0xe7f10020,
+ 0xe3f0a500,
+ 0x021fb941,
+ 0xb68d21f4,
+ 0xfcf004e0,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0x21f405f2,
+ 0xfc17f18d,
+ 0x0213f04a,
+ 0xd00c27f0,
+ 0x21f50012,
+ 0x27f10207,
+ 0x23f047fc,
+ 0x0020d002,
+ 0xb6012cf0,
+ 0x12d00320,
+ 0x01acf000,
+ 0xf006a5f0,
+ 0x0c9800b7,
+ 0x150d9814,
+ 0xf500e7f0,
+ 0xf0015c21,
+ 0x21f508a7,
+ 0x21f50103,
+ 0x01f40207,
+ 0x0ca7f022,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd065721,
+ 0x9821f5f4,
+ 0x2621f506,
+ 0x6621f502,
+ 0xf5f4bd06,
+ 0xf4065721,
+ 0x01981011,
+ 0x0511fd80,
+ 0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+ 0xf507df21,
+/* 0x092f: ctx_xfer_done */
+ 0xf8064921,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa28665..7fe9d7cf486b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
.b8 0xe7 0 0 0
.b16 #nve4_hub_mmio_head
.b16 #nve4_hub_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
.b8 0 0 0 0
nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3b..e3421af68ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
0x00000000,
/* 0x005c: chipsets */
0x000000e4,
- 0x013c0070,
+ 0x01440078,
0x000000e7,
- 0x013c0070,
+ 0x01440078,
+ 0x000000e6,
+ 0x01440078,
0x00000000,
-/* 0x0070: nve4_hub_mmio_head */
+/* 0x0078: nve4_hub_mmio_head */
0x0417e91c,
0x04400204,
0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
0x00408840,
0x08408900,
0x00408980,
-/* 0x013c: nve4_hub_mmio_tail */
- 0x00000000,
- 0x00000000,
+/* 0x0144: nve4_hub_mmio_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
0x0613f002,
0xf80601fa,
/* 0x07fb: ctx_xfer */
- 0xf400f803,
- 0x02f40611,
-/* 0x0801: ctx_xfer_pre */
- 0x10f7f00d,
- 0x067221f5,
-/* 0x080b: ctx_xfer_pre_load */
- 0xf01c11f4,
- 0x21f502f7,
- 0x21f50631,
- 0x21f50640,
- 0xf4bd0652,
- 0x063121f5,
- 0x069221f5,
-/* 0x0824: ctx_xfer_exec */
- 0xf1160198,
- 0xb6041427,
- 0x20d00624,
- 0x00e7f100,
- 0x41e3f0a5,
- 0xf4021fb9,
- 0xe0b68d21,
- 0x01fcf004,
- 0xb6022cf0,
- 0xf2fd0124,
- 0x8d21f405,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f006a5,
- 0x140c9800,
- 0xf0150d98,
- 0x21f500e7,
- 0xa7f0015c,
- 0x0321f508,
- 0x0721f501,
- 0x2201f402,
- 0xf40ca7f0,
- 0x17f1c921,
- 0x14b60a10,
- 0x0527f006,
-/* 0x08ab: ctx_xfer_post_save_wait */
- 0xcf0012d0,
- 0x22fd0012,
- 0xfa1bf405,
-/* 0x08b7: ctx_xfer_post */
- 0xf02e02f4,
- 0x21f502f7,
- 0xf4bd0631,
- 0x067221f5,
- 0x022621f5,
- 0x064021f5,
- 0x21f5f4bd,
- 0x11f40631,
- 0x80019810,
- 0xf40511fd,
- 0x21f5070b,
-/* 0x08e2: ctx_xfer_no_post_mmio */
-/* 0x08e2: ctx_xfer_done */
- 0x00f807b1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xf100f803,
+ 0xb60c00f7,
+ 0xe7f006f4,
+ 0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+ 0xf100fecf,
+ 0xf42000e4,
+ 0x11f4f91b,
+ 0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x3121f502,
+ 0x4021f506,
+ 0x5221f506,
+ 0xf5f4bd06,
+ 0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+ 0x98069221,
+ 0x27f11601,
+ 0x24b60414,
+ 0x0020d006,
+ 0xa500e7f1,
+ 0xb941e3f0,
+ 0x21f4021f,
+ 0x04e0b68d,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0xf18d21f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00c,
+ 0x020721f5,
+ 0x47fc27f1,
+ 0xd00223f0,
+ 0x2cf00020,
+ 0x0320b601,
+ 0xf00012d0,
+ 0xa5f001ac,
+ 0x00b7f006,
+ 0x98140c98,
+ 0xe7f0150d,
+ 0x5c21f500,
+ 0x08a7f001,
+ 0x010321f5,
+ 0x020721f5,
+ 0xf02201f4,
+ 0x21f40ca7,
+ 0x1017f1c9,
+ 0x0614b60a,
+ 0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+ 0x12cf0012,
+ 0x0522fd00,
+ 0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x3121f502,
+ 0xf5f4bd06,
+ 0xf5067221,
+ 0xf5022621,
+ 0xbd064021,
+ 0x3121f5f4,
+ 0x1011f406,
+ 0xfd800198,
+ 0x0bf40511,
+ 0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+ 0x0000f807,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv03_graph_gdi_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_gdi_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_iifc_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_chroma },
- { 0x018c, nv01_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifm_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifm_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_surf3d_omthds[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{}
};
static struct nouveau_omthds
nv03_graph_ttri_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
{}
};
static struct nouveau_omthds
nv01_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv17_celcius_omthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+ { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
- if (nv_device(engine)->card_type == NV_20) {
+ if (nv_device(engine)->chipset != 0x34) {
nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
- nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, subc, class, mthd, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index cc6574eeb80e..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
switch (nv_device(priv)->chipset) {
case 0x40:
- case 0x41: /* guess */
+ case 0x41:
case 0x42:
case 0x43:
- case 0x45: /* guess */
+ case 0x45:
case 0x4e:
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
break;
case 0x44:
case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
+ case 0x4c:
case 0x47:
case 0x49:
case 0x4b:
- case 0x4c:
+ case 0x63:
case 0x67:
- default:
+ case 0x68:
nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
break;
}
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
return 0;
}
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+ { 0x00000001, "BUSY" }, /* set when any bit is set */
+ { 0x00000002, "DISPATCH" },
+ { 0x00000004, "UNK2" },
+ { 0x00000008, "UNK3" },
+ { 0x00000010, "UNK4" },
+ { 0x00000020, "UNK5" },
+ { 0x00000040, "M2MF" },
+ { 0x00000080, "UNK7" },
+ { 0x00000100, "CTXPROG" },
+ { 0x00000200, "VFETCH" },
+ { 0x00000400, "CCACHE_UNK4" },
+ { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+ { 0x00001000, "UNK14XX" },
+ { 0x00002000, "UNK24XX_CSCHED" },
+ { 0x00004000, "UNK1CXX" },
+ { 0x00008000, "CLIPID" },
+ { 0x00010000, "ZCULL" },
+ { 0x00020000, "ENG2D" },
+ { 0x00040000, "UNK34XX" },
+ { 0x00080000, "TPRAST" },
+ { 0x00100000, "TPROP" },
+ { 0x00200000, "TEX" },
+ { 0x00400000, "TPVP" },
+ { 0x00800000, "MP" },
+ { 0x01000000, "ROP" },
+ {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+ "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+ "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+ "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+ "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+ const char *const units[], u32 status)
+{
+ int i;
+
+ nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+ for (i = 0; units[i] && status; i++) {
+ if ((status & 7) == 1)
+ pr_cont(" %s", units[i]);
+ status >>= 3;
+ }
+ if (status)
+ pr_cont(" (invalid: 0x%x)", status);
+ pr_cont("\n");
+}
+
static int
nv84_graph_tlb_flush(struct nouveau_engine *engine)
{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
!(timeout = ptimer->read(ptimer) - start > 2000000000));
if (timeout) {
- nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
- "0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
- nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+ nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+ tmp = nv_rd32(priv, 0x400700);
+ nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
+ nouveau_bitfield_print(nv50_pgraph_status, tmp);
+ pr_cont("\n");
+
+ nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+ nv_rd32(priv, 0x400380));
+ nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+ nv_rd32(priv, 0x400384));
+ nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+ nv_rd32(priv, 0x400388));
}
nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
if (ustatus) {
if (display)
- nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(priv, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- nv_info(priv, "%s - No TPs claiming errors?\n", name);
+ nv_warn(priv, "%s - No TPs claiming errors?\n", name);
}
static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
printk("\n");
nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, subc, class, mthd, data);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..45aff5f5085a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
- bool enable = true;
int ret, i;
- switch (device->chipset) {
- case 0xd9: /* known broken without binary driver firmware */
- enable = false;
- break;
- default:
- break;
- }
-
- ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -814,7 +805,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a100, 0x00000002);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
- nv_info(priv, "0x409800 wait failed\n");
+ nv_warn(priv, "0x409800 wait failed\n");
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12eb..a1e78de46456 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
return 0x9297;
case 0xe4:
case 0xe7:
+ case 0xe6:
return 0xa097;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f192..9f82e9702b46 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_graph_priv *priv;
int ret, i;
- ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->magic_not_rop_nr = 1;
break;
case 0xe7:
+ case 0xe6:
priv->magic_not_rop_nr = 1;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
static struct nouveau_omthds
nv31_mpeg_omthds[] = {
- { 0x0190, nv31_mpeg_mthd_dma },
- { 0x01a0, nv31_mpeg_mthd_dma },
- { 0x01b0, nv31_mpeg_mthd_dma },
+ { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+ { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+ { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_ppp base;
+ struct nouveau_engine base;
};
struct nv98_ppp_chan {
- struct nouveau_ppp_chan base;
+ struct nouveau_engctx base;
};
/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
* PPPP context
******************************************************************************/
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_ppp_chan *priv;
- int ret;
-
- ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_ppp_context_ctor,
- .dtor = nv98_ppp_context_dtor,
- .init = nv98_ppp_context_init,
- .fini = nv98_ppp_context_fini,
- .rd32 = _nouveau_ppp_context_rd32,
- .wr32 = _nouveau_ppp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
* PPPP engine/subdev functions
******************************************************************************/
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
- nv_subdev(priv)->intr = nv98_ppp_intr;
nv_engine(priv)->cclass = &nv98_ppp_cclass;
nv_engine(priv)->sclass = nv98_ppp_sclass;
return 0;
}
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- return nouveau_ppp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = nv98_ppp_dtor,
- .init = nv98_ppp_init,
- .fini = nv98_ppp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+ { 0x90b3, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+ .handle = NV_ENGCTX(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+ struct nvc0_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000fff2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_ppp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PPPP", "ppp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+ nv_engine(priv)->sclass = nvc0_ppp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+ .handle = NV_ENGINE(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_ppp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv04_software_omthds[] = {
- { 0x0150, nv04_software_set_ref },
- { 0x0500, nv04_software_flip },
+ { 0x0150, 0x0150, nv04_software_set_ref },
+ { 0x0500, 0x0500, nv04_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv10_software_omthds[] = {
- { 0x0500, nv10_software_flip },
+ { 0x0500, 0x0500, nv10_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv50_software_omthds[] = {
- { 0x018c, nv50_software_mthd_dma_vblsem },
- { 0x0400, nv50_software_mthd_vblsem_offset },
- { 0x0404, nv50_software_mthd_vblsem_value },
- { 0x0408, nv50_software_mthd_vblsem_release },
- { 0x0500, nv50_software_mthd_flip },
+ { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+ { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+ { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nvc0_software_omthds[] = {
- { 0x0400, nvc0_software_mthd_vblsem_offset },
- { 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, nvc0_software_mthd_vblsem_value },
- { 0x040c, nvc0_software_mthd_vblsem_release },
- { 0x0500, nvc0_software_mthd_flip },
+ { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+ { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+ { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nvc0_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/vp.h>
struct nv84_vp_priv {
- struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
- struct nouveau_vp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
* PVP context
******************************************************************************/
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_vp_chan *priv;
- int ret;
-
- ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_chan *priv = (void *)object;
- return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_vp_context_ctor,
- .dtor = nv84_vp_context_dtor,
- .init = nv84_vp_context_init,
- .fini = nv84_vp_context_fini,
- .rd32 = _nouveau_vp_context_rd32,
- .wr32 = _nouveau_vp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
* PVP engine/subdev functions
******************************************************************************/
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_vp_priv *priv;
int ret;
- ret = nouveau_vp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_subdev(priv)->intr = nv84_vp_intr;
nv_engine(priv)->cclass = &nv84_vp_cclass;
nv_engine(priv)->sclass = nv84_vp_sclass;
return 0;
}
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_priv *priv = (void *)object;
- return nouveau_vp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_ctor,
- .dtor = nv84_vp_dtor,
- .init = nv84_vp_init,
- .fini = nv84_vp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+ { 0x90b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+ struct nvc0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nvc0_vp_cclass;
+ nv_engine(priv)->sclass = nvc0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+ { 0x95b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+ struct nve0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nve0_vp_cclass;
+ nv_engine(priv)->sclass = nve0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
struct nv_device_class {
u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
#define NV_DMA_ACCESS_WR 0x00000200
#define NV_DMA_ACCESS_RDWR 0x00000300
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE 0x80000000
+#define NV50_DMA_CONF0_PRIV 0x00300000
+#define NV50_DMA_CONF0_PRIV_VM 0x00000000
+#define NV50_DMA_CONF0_PRIV_US 0x00100000
+#define NV50_DMA_CONF0_PRIV__S 0x00200000
+#define NV50_DMA_CONF0_PART 0x00030000
+#define NV50_DMA_CONF0_PART_VM 0x00000000
+#define NV50_DMA_CONF0_PART_256 0x00010000
+#define NV50_DMA_CONF0_PART_1KB 0x00020000
+#define NV50_DMA_CONF0_COMP 0x00000180
+#define NV50_DMA_CONF0_COMP_NONE 0x00000000
+#define NV50_DMA_CONF0_COMP_VM 0x00000180
+#define NV50_DMA_CONF0_TYPE 0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE 0x80000000
+#define NVC0_DMA_CONF0_PRIV 0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
+#define NVC0_DMA_CONF0_PRIV_US 0x00100000
+#define NVC0_DMA_CONF0_PRIV__S 0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
+#define NVC0_DMA_CONF0_TYPE 0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE 0x80000000
+#define NVD0_DMA_CONF0_PAGE 0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
+#define NVD0_DMA_CONF0_TYPE 0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
+
struct nv_dma_class {
u32 flags;
u32 pad0;
u64 start;
u64 limit;
+ u32 conf0;
};
/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS 0x00005070
+#define NV84_DISP_CLASS 0x00008270
+#define NVA0_DISP_CLASS 0x00008370
+#define NV94_DISP_CLASS 0x00008870
+#define NVA3_DISP_CLASS 0x00008570
+#define NVD0_DISP_CLASS 0x00009070
+#define NVE0_DISP_CLASS 0x00009170
+
+#define NV50_DISP_SOR_MTHD 0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
+#define NV50_DISP_SOR_MTHD_LINK 0x00000004
+#define NV50_DISP_SOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_SOR_PWR 0x00010000
+#define NV50_DISP_SOR_PWR_STATE 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
+#define NVA3_DISP_SOR_HDA_ELD 0x00010100
+#define NV84_DISP_SOR_HDMI_PWR 0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN 0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
+
+#define NV50_DISP_DAC_MTHD 0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
+#define NV50_DISP_DAC_MTHD_OR 0x00000003
+
+#define NV50_DISP_DAC_PWR 0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
+#define NV50_DISP_DAC_PWR_DATA 0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
+#define NV50_DISP_DAC_PWR_STATE 0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
+#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS 0x0000507a
+#define NV84_DISP_CURS_CLASS 0x0000827a
+#define NVA0_DISP_CURS_CLASS 0x0000837a
+#define NV94_DISP_CURS_CLASS 0x0000887a
+#define NVA3_DISP_CURS_CLASS 0x0000857a
+#define NVD0_DISP_CURS_CLASS 0x0000907a
+#define NVE0_DISP_CURS_CLASS 0x0000917a
+
+struct nv50_display_curs_class {
+ u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS 0x0000507b
+#define NV84_DISP_OIMM_CLASS 0x0000827b
+#define NVA0_DISP_OIMM_CLASS 0x0000837b
+#define NV94_DISP_OIMM_CLASS 0x0000887b
+#define NVA3_DISP_OIMM_CLASS 0x0000857b
+#define NVD0_DISP_OIMM_CLASS 0x0000907b
+#define NVE0_DISP_OIMM_CLASS 0x0000917b
+
+struct nv50_display_oimm_class {
+ u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS 0x0000507c
+#define NV84_DISP_SYNC_CLASS 0x0000827c
+#define NVA0_DISP_SYNC_CLASS 0x0000837c
+#define NV94_DISP_SYNC_CLASS 0x0000887c
+#define NVA3_DISP_SYNC_CLASS 0x0000857c
+#define NVD0_DISP_SYNC_CLASS 0x0000907c
+#define NVE0_DISP_SYNC_CLASS 0x0000917c
+
+struct nv50_display_sync_class {
+ u32 pushbuf;
+ u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS 0x0000507d
+#define NV84_DISP_MAST_CLASS 0x0000827d
+#define NVA0_DISP_MAST_CLASS 0x0000837d
+#define NV94_DISP_MAST_CLASS 0x0000887d
+#define NVA3_DISP_MAST_CLASS 0x0000857d
+#define NVD0_DISP_MAST_CLASS 0x0000907d
+#define NVE0_DISP_MAST_CLASS 0x0000917d
+
+struct nv50_display_mast_class {
+ u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS 0x0000507e
+#define NV84_DISP_OVLY_CLASS 0x0000827e
+#define NVA0_DISP_OVLY_CLASS 0x0000837e
+#define NV94_DISP_OVLY_CLASS 0x0000887e
+#define NVA3_DISP_OVLY_CLASS 0x0000857e
+#define NVD0_DISP_OVLY_CLASS 0x0000907e
+#define NVE0_DISP_OVLY_CLASS 0x0000917e
+
+struct nv50_display_ovly_class {
+ u32 pushbuf;
+ u32 head;
+};
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
int nouveau_client_create_(const char *name, u64 device, const char *cfg,
const char *dbg, int, void **);
+#define nouveau_client_destroy(p) \
+ nouveau_namedb_destroy(&(p)->base)
+
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
int nouveau_engctx_init(struct nouveau_engctx *);
int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
void _nouveau_engctx_dtor(struct nouveau_object *);
int _nouveau_engctx_init(struct nouveau_object *);
int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+ bool external;
+};
+
+struct nouveau_falcon {
+ struct nouveau_engine base;
+
+ u32 addr;
+ u8 version;
+ u8 secret;
+
+ struct nouveau_gpuobj *core;
+ bool external;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
+ nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p) \
+ nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_init(nv_object(falcon)); \
+})
+#define nouveau_falcon_fini(p,s) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_fini(nv_object(falcon), (s)); \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, bool, const char *,
+ const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int _nouveau_falcon_init(struct nouveau_object *);
+int _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
void _nouveau_gpuobj_dtor(struct nouveau_object *);
int _nouveau_gpuobj_init(struct nouveau_object *);
int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
int heap_nodes;
};
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+ return mm->block_size != 0;
+}
+
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
int nouveau_mm_fini(struct nouveau_mm *);
int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 486f1a9217fd..106bb19fdd9a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
extern struct nouveau_ofuncs nouveau_object_ofuncs;
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
struct nouveau_oclass {
u32 handle;
- struct nouveau_ofuncs *ofuncs;
- struct nouveau_omthds *omthds;
+ struct nouveau_ofuncs * const ofuncs;
+ struct nouveau_omthds * const omthds;
+ struct lock_class_key lock_class_key;
};
#define nv_oclass(o) nv_object(o)->oclass
@@ -70,7 +73,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
}
struct nouveau_omthds {
- u32 method;
+ u32 start;
+ u32 limit;
int (*call)(struct nouveau_object *, u32, void *, u32);
};
@@ -81,12 +85,12 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
- u8 (*rd08)(struct nouveau_object *, u32 offset);
- u16 (*rd16)(struct nouveau_object *, u32 offset);
- u32 (*rd32)(struct nouveau_object *, u32 offset);
- void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
- void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
- void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+ u8 (*rd08)(struct nouveau_object *, u64 offset);
+ u16 (*rd16)(struct nouveau_object *, u64 offset);
+ u32 (*rd32)(struct nouveau_object *, u64 offset);
+ void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+ void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+ void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
};
static inline struct nouveau_ofuncs *
@@ -109,21 +113,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
{
struct nouveau_omthds *method = nv_oclass(obj)->omthds;
while (method && method->call) {
- if (method->method == mthd)
- return method->call(obj, mthd, &data, sizeof(data));
+ if (mthd >= method->start && mthd <= method->limit)
+ return method->call(obj, mthd, data, size);
method++;
}
return -EINVAL;
}
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+ return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +141,7 @@ nv_ro08(void *obj, u32 addr)
}
static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +149,7 @@ nv_ro16(void *obj, u32 addr)
}
static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +157,28 @@ nv_ro32(void *obj, u32 addr)
}
static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
{
nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
{
nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
{
nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
{
u32 temp = nv_ro32(obj, addr);
nv_wo32(obj, addr, (temp & ~mask) | data);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
struct nouveau_object base;
struct nouveau_sclass *sclass;
- u32 engine;
+ u64 engine;
int (*context_attach)(struct nouveau_object *,
struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_BSP_H__
#define __NOUVEAU_BSP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
- struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
#ifndef __NOUVEAU_COPY_H__
#define __NOUVEAU_COPY_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
- struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d) \
- nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
extern struct nouveau_oclass nva3_copy_oclass;
extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
#ifndef __NOUVEAU_CRYPT_H__
#define __NOUVEAU_CRYPT_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
- struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_crypt_oclass;
extern struct nouveau_oclass nv98_crypt_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
extern struct nouveau_oclass nv04_disp_oclass;
extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
+ u32 conf0;
};
-#define nouveau_dmaobj_create(p,e,c,a,s,d) \
- nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p) \
- nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
struct nouveau_dmaeng {
struct nouveau_engine base;
- int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
- struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+ /* creates a "physical" dma object from a struct nouveau_dmaobj */
+ int (*bind)(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **);
};
#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
extern struct nouveau_oclass nv04_dmaeng_oclass;
extern struct nouveau_oclass nv50_dmaeng_oclass;
extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
struct nouveau_object *,
struct nouveau_oclass *,
int bar, u32 addr, u32 size, u32 push,
- u32 engmask, int len, void **);
+ u64 engmask, int len, void **);
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_init _nouveau_namedb_init
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
#ifndef __NOUVEAU_PPP_H__
#define __NOUVEAU_PPP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
- struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_VP_H__
#define __NOUVEAU_VP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
- struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be4..5bd1ca8cd20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
u8 chip;
u8 minor;
u8 micro;
+ u8 patch;
} version;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
uint8_t bus;
uint8_t location;
uint8_t or;
+ uint8_t link;
bool duallink_possible;
union {
struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+ struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+ struct dcb_output *);
int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
(struct nouveau_bios *, void *, int index, u16 entry));
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+ u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub,
+ struct nvbios_disp *);
+
+struct nvbios_outp {
+ u16 type;
+ u16 mask;
+ u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+ u16 match;
+ u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+ u16 type;
+ u16 mask;
+ u8 flags;
+ u32 script[5];
+ u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+ u8 drv;
+ u8 pre;
+ u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a36..e6563b5cb08e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
u8 param;
};
-u16 dcb_gpio_table(struct nouveau_bios *);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
-int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e97..ca2f6bf37f46 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
u32 nested;
u16 repeat;
u16 repend;
+ u32 ramcfg;
};
int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
PLL_UNK42 = 0x42,
PLL_VPLL0 = 0x80,
PLL_VPLL1 = 0x81,
+ PLL_VPLL2 = 0x82,
+ PLL_VPLL3 = 0x83,
PLL_MAX = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
} type;
u64 stolen;
u64 size;
+
int ranks;
+ int parts;
+ int (*init)(struct nouveau_fb *);
int (*get)(struct nouveau_fb *, u64 size, u32 align,
u32 size_nc, u32 type, struct nouveau_mem **);
void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
int regions;
void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
+ void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
void (*fini)(struct nouveau_fb *, int i,
struct nouveau_fb_tile *);
void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
#define nouveau_fb_create(p,e,c,d) \
nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int nouveau_fb_created(struct nouveau_fb *);
+int nouveau_fb_preinit(struct nouveau_fb *);
void nouveau_fb_destroy(struct nouveau_fb *);
int nouveau_fb_init(struct nouveau_fb *);
#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
extern struct nouveau_oclass nv04_fb_oclass;
extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
extern struct nouveau_oclass nv50_fb_oclass;
extern struct nouveau_oclass nvc0_fb_oclass;
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+int nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv30_fb_init(struct nouveau_object *);
void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+
+int nv41_fb_vram_init(struct nouveau_fb *);
+int nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv44_fb_vram_init(struct nouveau_fb *);
+int nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15d..b75e8f18e52c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
struct nouveau_subdev base;
/* hardware interfaces */
- void (*reset)(struct nouveau_gpio *);
+ void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
}
static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_barobj *barobj = (void *)object;
return ioread32_native(barobj->iomem + addr);
}
static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_barobj *barobj = (void *)object;
iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..f621f69fa1a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
struct pci_dev *pdev = nv_device(bios)->pdev;
struct device_node *dn;
const u32 *data;
- int size, i;
+ int size;
dn = pci_device_to_OF_node(pdev);
if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
- for (i = 0; bios->data && i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
+ if (bios->data) {
+ /* disobey the acpi spec - much faster on at least w530 ... */
+ ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+ if (ret != bios->size ||
+ nvbios_checksum(bios->data, bios->size)) {
+ /* ... that didn't work, ok, i'll be good now */
+ for (i = 0; i < bios->size; i += cnt) {
+ cnt = min((bios->size - i), (u32)4096);
+ ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+ if (ret != cnt)
+ break;
+ }
+ }
}
}
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
}
static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return bios->data[addr];
}
static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le16(&bios->data[addr]);
}
static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le32(&bios->data[addr]);
}
static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
struct nouveau_bios *bios = (void *)object;
bios->data[addr] = data;
}
static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le16(data, &bios->data[addr]);
}
static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le32(data, &bios->data[addr]);
@@ -439,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+ bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -447,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
}
- nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+ nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
bios->version.major, bios->version.chip,
- bios->version.minor, bios->version.micro);
+ bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index c51197157749..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+{
+ u16 dcb = dcb_outp(bios, idx, ver, len);
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+ outp->or = (conn & 0x0f000000) >> 24;
+ outp->location = (conn & 0x00300000) >> 20;
+ outp->bus = (conn & 0x000f0000) >> 16;
+ outp->connector = (conn & 0x0000f000) >> 12;
+ outp->heads = (conn & 0x00000f00) >> 8;
+ outp->i2c_index = (conn & 0x000000f0) >> 4;
+ outp->type = (conn & 0x0000000f);
+ outp->link = 0;
+ } else {
+ dcb = 0x0000;
+ }
+
+ if (*ver >= 0x40) {
+ u32 conf = nv_ro32(bios, dcb + 0x04);
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ outp->link = (conf & 0x00000030) >> 4;
+ outp->sorconf.link = outp->link; /*XXX*/
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *len, struct dcb_output *outp)
+{
+ u16 dcb, idx = 0;
+ while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+ if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hashm(outp) & mask) == mask)
+ break;
+ }
+ }
+ return dcb;
+}
+
int
dcb_outp_foreach(struct nouveau_bios *bios, void *data,
int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+ struct bit_entry U;
+
+ if (!bit_entry(bios, 'U', &U)) {
+ if (U.version == 1) {
+ u16 data = nv_ro16(bios, U.offset);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x20:
+ case 0x21:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ *sub = nv_ro08(bios, data + 0x04);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub)
+{
+ u8 hdr, cnt;
+ u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+ if (data && idx < cnt)
+ return data + hdr + (idx * *len);
+ *ver = 0x00;
+ return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub,
+ struct nvbios_disp *info)
+{
+ u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+ if (data && *len >= 2) {
+ info->data = nv_ro16(bios, data + 0);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct nvbios_disp info;
+ u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+ if (data) {
+ *cnt = nv_ro08(bios, info.data + 0x05);
+ *len = 0x06;
+ data = info.data;
+ }
+ return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *hdr >= 0x0a) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro32(bios, data + 0x02);
+ if (*ver <= 0x20) /* match any link */
+ info->mask |= 0x00c0;
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->script[2] = 0x0000;
+ if (*hdr >= 0x0c)
+ info->script[2] = nv_ro16(bios, data + 0x0a);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+ return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ info->match = nv_ro16(bios, data + 0x00);
+ info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+ info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+ if (info->match == type)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+ while (cmp) {
+ if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+ return nv_ro16(bios, cmp + 0x02);
+ cmp += 0x04;
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
#include "subdev/bios.h"
#include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
#include "subdev/bios/dp.h"
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct bit_entry bit_d;
+ struct bit_entry d;
- if (!bit_entry(bios, 'd', &bit_d)) {
- if (bit_d.version == 1) {
- u16 data = nv_ro16(bios, bit_d.offset);
+ if (!bit_entry(bios, 'd', &d)) {
+ if (d.version == 1 && d.length >= 2) {
+ u16 data = nv_ro16(bios, d.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *len = nv_ro08(bios, data + 2);
- *cnt = nv_ro08(bios, data + 3);
- return data;
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
}
}
}
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return 0x0000;
}
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+ switch (*ver * !!outp) {
+ case 0x21:
+ case 0x30:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *len = nv_ro08(bios, data + 0x05);
+ *cnt = nv_ro08(bios, outp + 0x04);
+ break;
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *cnt = 0;
+ *len = 0;
+ break;
+ default:
+ break;
+ }
+ return outp;
+ }
+ *ver = 0x00;
+ return 0x0000;
+}
+
u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 hdr, cnt;
- u16 table = dp_table(bios, ver, &hdr, &cnt, len);
- if (table && idx < cnt)
- return nv_ro16(bios, table + hdr + (idx * *len));
- return 0xffff;
+ u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *ver) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro16(bios, data + 0x02);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ info->flags = nv_ro08(bios, data + 0x05);
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->lnkcmp = nv_ro16(bios, data + 0x0a);
+ info->script[2] = nv_ro16(bios, data + 0x0c);
+ info->script[3] = nv_ro16(bios, data + 0x0e);
+ info->script[4] = nv_ro16(bios, data + 0x10);
+ break;
+ case 0x40:
+ info->flags = nv_ro08(bios, data + 0x04);
+ info->script[0] = nv_ro16(bios, data + 0x05);
+ info->script[1] = nv_ro16(bios, data + 0x07);
+ info->lnkcmp = nv_ro16(bios, data + 0x09);
+ info->script[2] = nv_ro16(bios, data + 0x0b);
+ info->script[3] = nv_ro16(bios, data + 0x0d);
+ info->script[4] = nv_ro16(bios, data + 0x0f);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
}
u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
- u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 idx = 0;
- u16 data;
- while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
- if (data) {
- u32 hash = nv_ro32(bios, data);
- if (dcb_hash_match(outp, hash))
- return data;
+ u16 data, idx = 0;
+ while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
}
}
+ return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (*ver >= 0x40) {
+ outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ *hdr = *hdr + (*len * * cnt);
+ *len = nv_ro08(bios, outp + 0x06);
+ *cnt = nv_ro08(bios, outp + 0x07);
+ }
+
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+
return 0x0000;
}
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ switch (*ver) {
+ case 0x21:
+ info->drv = nv_ro08(bios, data + 0x02);
+ info->pre = nv_ro08(bios, data + 0x03);
+ info->unk = nv_ro08(bios, data + 0x04);
+ break;
+ case 0x30:
+ case 0x40:
+ info->drv = nv_ro08(bios, data + 0x01);
+ info->pre = nv_ro08(bios, data + 0x02);
+ info->unk = nv_ro08(bios, data + 0x03);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u8 idx = 0xff;
+ u16 data;
+
+ if (*ver >= 0x30) {
+ const u8 vsoff[] = { 0, 4, 7, 9 };
+ idx = (un * 10) + vsoff[vs] + pe;
+ } else {
+ while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+ ver, hdr, cnt, len))) {
+ if (nv_ro08(bios, data + 0x00) == vs &&
+ nv_ro08(bios, data + 0x01) == pe)
+ break;
+ idx++;
+ }
+ }
+
+ return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c84e93fa6d95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
#include <subdev/bios/gpio.h>
u16
-dcb_gpio_table(struct nouveau_bios *bios)
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 ver, hdr, cnt, len;
- u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+ u16 data = 0x0000;
+ u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
- if (ver >= 0x30 && hdr >= 0x0c)
- return nv_ro16(bios, dcb + 0x0a);
- if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
- return nv_ro16(bios, dcb - 0x0f);
+ if (*ver >= 0x30 && *hdr >= 0x0c)
+ data = nv_ro16(bios, dcb + 0x0a);
+ else
+ if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+ data = nv_ro16(bios, dcb - 0x0f);
+
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ if (*ver < 0x30) {
+ *hdr = 3;
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x01);
+ } else
+ if (*ver <= 0x41) {
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ } else {
+ data = 0x0000;
+ }
+ }
}
- return 0x0000;
+ return data;
}
u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u16 gpio = dcb_gpio_table(bios);
- if (gpio) {
- *ver = nv_ro08(bios, gpio);
- if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
- return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
- else if (ent < nv_ro08(bios, gpio + 2))
- return gpio + nv_ro08(bios, gpio + 1) +
- (ent * nv_ro08(bios, gpio + 3));
- }
+ u8 hdr, cnt;
+ u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ if (gpio && ent < cnt)
+ return gpio + hdr + (ent * *len);
return 0x0000;
}
-int
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
- u8 ver, hdr, cnt, len;
- u16 entry;
- int i = -1;
-
- while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
- if (ver < 0x40) {
- u16 data = nv_ro16(bios, entry);
+ u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+ if (data) {
+ if (*ver < 0x40) {
+ u16 info = nv_ro16(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- .param = !!(data & 0x8000),
+ .line = (info & 0x001f) >> 0,
+ .func = (info & 0x07e0) >> 5,
+ .log[0] = (info & 0x1800) >> 11,
+ .log[1] = (info & 0x6000) >> 13,
+ .param = !!(info & 0x8000),
};
} else
- if (ver < 0x41) {
- u32 data = nv_ro32(bios, entry);
+ if (*ver < 0x41) {
+ u32 info = nv_ro32(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000001f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data & 0x18000000) >> 27,
- .log[1] = (data & 0x60000000) >> 29,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000001f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info & 0x18000000) >> 27,
+ .log[1] = (info & 0x60000000) >> 29,
+ .param = !!(info & 0x80000000),
};
} else {
- u32 data = nv_ro32(bios, entry + 0);
- u8 data1 = nv_ro32(bios, entry + 4);
+ u32 info = nv_ro32(bios, data + 0);
+ u8 info1 = nv_ro32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000003f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data1 & 0x30) >> 4,
- .log[1] = (data1 & 0xc0) >> 6,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000003f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info1 & 0x30) >> 4,
+ .log[1] = (info1 & 0xc0) >> 6,
+ .param = !!(info & 0x80000000),
};
}
+ }
+
+ return data;
+}
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+ u8 hdr, cnt, i = 0;
+ u16 data;
+
+ while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
- return 0;
+ return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
- if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
- u8 conf = nv_ro08(bios, entry - 5);
- u8 addr = nv_ro08(bios, entry - 4);
+ if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+ if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+ u8 conf = nv_ro08(bios, data - 5);
+ u8 addr = nv_ro08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
- return 0;
+ *ver = 0x00;
+ return data;
}
}
}
- return -EINVAL;
+ return 0x0000;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
#include <core/device.h>
#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
}
static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+ /* This appears to be the behaviour of the VBIOS parser, and *is*
+ * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+ * avoid fucking up the memory controller (somehow) by reading it
+ * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+ *
+ * Preserving the non-caching behaviour on earlier chipsets just
+ * in case *not* re-reading the strap causes similar breakage.
+ */
+ if (!init->ramcfg || init->bios->version.major < 0x70)
+ init->ramcfg = init_rd32(init, 0x101000);
+ return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
init_ram_restrict(struct nvbios_init *init)
{
- u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+ u8 strap = init_ram_restrict_strap(init);
u16 table = init_ram_restrict_table(init);
if (table)
return nv_ro08(init->bios, table + strap);
@@ -743,9 +760,10 @@ static void
init_dp_condition(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
+ struct nvbios_dpout info;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 unkn = nv_ro08(bios, init->offset + 2);
- u8 ver, len;
+ u8 ver, hdr, cnt, len;
u16 data;
trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +777,12 @@ init_dp_condition(struct nvbios_init *init)
case 1:
case 2:
if ( init->outp &&
- (data = dp_outp_match(bios, init->outp, &ver, &len))) {
- if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
- init_exec_set(init, false);
- if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+ (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+ (init->outp->or << 0) |
+ (init->outp->sorconf.link << 6),
+ &ver, &hdr, &cnt, &len, &info)))
+ {
+ if (!(info.flags & cond))
init_exec_set(init, false);
break;
}
@@ -1514,7 +1534,6 @@ init_io(struct nvbios_init *init)
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
- return;
}
value = init_rdport(init, port) & mask;
@@ -1778,7 +1797,7 @@ init_gpio(struct nvbios_init *init)
init->offset += 1;
if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
/**
@@ -1992,6 +2011,47 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ struct nouveau_gpio *gpio = nouveau_gpio(bios);
+ struct dcb_gpio_func func;
+ u8 count = nv_ro08(bios, init->offset + 1);
+ u8 idx = 0, ver, len;
+ u16 data, i;
+
+ trace("GPIO_NE\t");
+ init->offset += 2;
+
+ for (i = init->offset; i < init->offset + count; i++)
+ cont("0x%02x ", nv_ro08(bios, i));
+ cont("\n");
+
+ while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+ if (func.func != DCB_GPIO_UNUSED) {
+ for (i = init->offset; i < init->offset + count; i++) {
+ if (func.func == nv_ro08(bios, i))
+ break;
+ }
+
+ trace("\tFUNC[0x%02x]", func.func);
+ if (i == (init->offset + count)) {
+ cont(" *");
+ if (init_exec(init) && gpio && gpio->reset)
+ gpio->reset(gpio, func.func);
+ }
+ cont("\n");
+ }
+ }
+
+ init->offset += count;
+}
+
static struct nvbios_init_opcode {
void (*exec)(struct nvbios_init *);
} init_opcode[] = {
@@ -2056,6 +2116,7 @@ static struct nvbios_init_opcode {
[0x98] = { init_auxch },
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
+ [0xa9] = { init_gpio_ne },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
#include <core/object.h>
#include <core/device.h>
#include <core/client.h>
-#include <core/device.h>
#include <core/option.h>
#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
static const u64 disable_map[] = {
[NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
[NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
[NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
[NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine frequency of timing crystal */
if ( device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset <= 0x25))
+ (device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
}
static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
{
return nv_rd08(object->engine, addr);
}
static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
{
return nv_rd16(object->engine, addr);
}
static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object->engine, addr);
}
static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
nv_wr08(object->engine, addr, data);
}
static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
nv_wr16(object->engine, addr, data);
}
static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
nv_wr32(object->engine, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x86:
device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x92:
device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x94:
device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x96:
device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x98:
device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa0:
device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
break;
case 0xaa:
device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xac:
device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa3:
device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa5:
device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa8:
device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xaf:
device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc4:
device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc3:
device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xcf:
device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc1:
device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xd9:
device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..03a652876e73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
#include <engine/graph.h>
#include <engine/disp.h>
#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
int
nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
case 0xe7:
device->cname = "GK107";
@@ -92,13 +98,44 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/vga.h>
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
static int
nv50_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_bios *bios = nouveau_bios(object);
struct nv50_devinit_priv *priv = (void *)object;
+ struct nvbios_outp info;
+ struct dcb_output outp;
+ u8 ver = 0xff, hdr, cnt, len;
+ int ret, i = 0;
if (!priv->base.post) {
if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
- return nouveau_devinit_init(&priv->base);
+ ret = nouveau_devinit_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* if we ran the init tables, execute first script pointer for each
+ * display table output entry that has a matching dcb entry.
+ */
+ while (priv->base.post && ver) {
+ u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+ if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[0],
+ .outp = &outp,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ };
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d62045f454b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
}
int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
{
- int ret, i;
+ static const char *name[] = {
+ [NV_MEM_TYPE_UNKNOWN] = "unknown",
+ [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+ [NV_MEM_TYPE_SGRAM ] = "SGRAM",
+ [NV_MEM_TYPE_SDRAM ] = "SDRAM",
+ [NV_MEM_TYPE_DDR1 ] = "DDR1",
+ [NV_MEM_TYPE_DDR2 ] = "DDR2",
+ [NV_MEM_TYPE_DDR3 ] = "DDR3",
+ [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
+ [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
+ [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
+ [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
+ };
+ int ret, tags;
- ret = nouveau_subdev_init(&pfb->base);
- if (ret)
- return ret;
+ tags = pfb->ram.init(pfb);
+ if (tags < 0 || !pfb->ram.size) {
+ nv_fatal(pfb, "error detecting memory configuration!!\n");
+ return (tags < 0) ? tags : -ERANGE;
+ }
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+ if (!nouveau_mm_initialised(&pfb->vram)) {
+ ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+ if (ret)
+ return ret;
+ }
- return 0;
-}
+ if (!nouveau_mm_initialised(&pfb->tags)) {
+ ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
+ if (ret)
+ return ret;
+ }
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
- struct nouveau_fb *pfb = (void *)object;
- return nouveau_fb_init(pfb);
+ nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+ nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+ nv_info(pfb, " ZCOMP: %d tags\n", tags);
+ return 0;
}
void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
- if (pfb->tags.block_size)
- nouveau_mm_fini(&pfb->tags);
-
- if (pfb->vram.block_size)
- nouveau_mm_fini(&pfb->vram);
+ nouveau_mm_fini(&pfb->tags);
+ nouveau_mm_fini(&pfb->vram);
nouveau_subdev_destroy(&pfb->base);
}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
struct nouveau_fb *pfb = (void *)object;
nouveau_fb_destroy(pfb);
}
-
int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
{
- static const char *name[] = {
- [NV_MEM_TYPE_UNKNOWN] = "unknown",
- [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
- [NV_MEM_TYPE_SGRAM ] = "SGRAM",
- [NV_MEM_TYPE_SDRAM ] = "SDRAM",
- [NV_MEM_TYPE_DDR1 ] = "DDR1",
- [NV_MEM_TYPE_DDR2 ] = "DDR2",
- [NV_MEM_TYPE_DDR3 ] = "DDR3",
- [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
- [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
- [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
- [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
- };
+ int ret, i;
- if (pfb->ram.size == 0) {
- nv_fatal(pfb, "no vram detected!!\n");
- return -ERANGE;
- }
+ ret = nouveau_subdev_init(&pfb->base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pfb->tile.regions; i++)
+ pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
- nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
- nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
return 0;
}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object;
+ return nouveau_fb_init(pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+ if (boot0 & 0x00000100) {
+ pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ pfb->ram.size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ pfb->ram.size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ pfb->ram.size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ pfb->ram.size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ pfb->ram.size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ pfb->ram.type = NV_MEM_TYPE_SGRAM;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+ return 0;
+}
+
+static int
nv04_fb_init(struct nouveau_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv04_fb_priv *priv;
- u32 boot0;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
- if (boot0 & 0x00000100) {
- priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
- priv->base.ram.size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- priv->base.ram.size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- priv->base.ram.size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- priv->base.ram.size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- priv->base.ram.size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- priv->base.ram.type = NV_MEM_TYPE_SGRAM;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- return nouveau_fb_created(&priv->base);
+ priv->base.ram.init = nv04_fb_vram_init;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
struct nouveau_fb base;
};
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 cfg0 = nv_rd32(pfb, 0x100200);
+ if (cfg0 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+void
nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-static void
+void
nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
}
static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv10_fb_priv *priv;
int ret;
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- if (device->chipset == 0x1a || device->chipset == 0x1f) {
- struct pci_dev *bridge;
- u32 mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- nv_fatal(device, "no bridge device\n");
- return 0;
- }
-
- if (device->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- priv->base.ram.size = mib * 1024 * 1024;
- } else {
- u32 cfg0 = nv_rd32(priv, 0x100200);
- if (cfg0 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
- }
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv10_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv10_fb_tile_init;
priv->base.tile.fini = nv10_fb_tile_fini;
priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct pci_dev *bridge;
+ u32 mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ nv_fatal(pfb, "no bridge device\n");
+ return -ENODEV;
+ }
+
+ if (nv_device(pfb)->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.size = mib * 1024 * 1024;
+ return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv1a_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv1a_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv10_fb_tile_init;
+ priv->base.tile.fini = nv10_fb_tile_fini;
+ priv->base.tile.prog = nv10_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x1a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv1a_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
struct nouveau_fb base;
};
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+ }
+ pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- struct nouveau_device *device = nv_device(pfb);
- int bpp = (flags & 2) ? 32 : 16;
-
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- size /= 256;
if (flags & 4) {
- if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
- /* Enable Z compression */
- tile->zcomp = tile->tag->offset;
- if (device->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= 0x00100000;
- else
- tile->zcomp |= 0x00200000;
- } else {
- tile->zcomp |= 0x80000000;
- if (bpp != 16)
- tile->zcomp |= 0x04000000;
- }
- }
-
+ pfb->tile.comp(pfb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+ else tile->zcomp = 0x04000000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+ tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x08000000;
+#endif
+ }
+}
+
+void
nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nouveau_mm_free(&pfb->tags, &tile->tag);
}
-static void
+void
nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv20_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
- if (device->chipset >= 0x25)
- ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
- else
- ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
- if (ret)
- return ret;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv20_fb_tile_comp;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv20_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+ else tile->zcomp = 0x00200000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x01000000;
+#endif
+ }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv25_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv25_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- tile->addr = addr | 1;
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) {
+ tile->addr = (0 << 4);
+ } else {
+ if (pfb->tile.comp) /* z compression */
+ pfb->tile.comp(pfb, i, size, flags, tile);
+ tile->addr = (1 << 4);
+ }
+
+ tile->addr |= 0x00000001; /* enable */
+ tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- tile->addr = 0;
- tile->limit = 0;
- tile->pitch = 0;
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+ else tile->zcomp |= 0x02000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x10000000;
+#endif
+ }
}
static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
return x;
}
-static int
+int
nv30_fb_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv30_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv30_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+ else tile->zcomp |= 0x08000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv35_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv35_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+ else tile->zcomp |= 0x20000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x80000000;
+#endif
+ }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv36_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv36_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x36),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv36_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
struct nouveau_fb base;
};
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
- if ((device->chipset & 0xf0) == 0x60)
- return 1;
-
- return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ }
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
- nv_wr32(priv, 0x100800, 0x00000001);
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
}
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100800, 0x00000001);
+ u32 tiles = DIV_ROUND_UP(size, 0x80);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
+ if ( (flags & 2) &&
+ !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
+ tile->zcomp |= ((tile->tag->offset ) >> 8);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
}
static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
if (ret)
return ret;
- switch (nv_device(priv)->chipset) {
- case 0x40:
- case 0x45:
- nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
- break;
- default:
- if (nv44_graph_class(nv_device(priv)))
- nv44_fb_init_gart(priv);
- else
- nv40_fb_init_gart(priv);
- break;
- }
-
+ nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
return 0;
}
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv40_fb_priv *priv;
int ret;
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (device->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (device->chipset == 0x49 || device->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(priv, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (device->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(priv, 0x100474);
- if (pfb474 & 0x00000004)
- priv->base.ram.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- } else {
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- switch (device->chipset) {
- case 0x40:
- case 0x45:
- priv->base.tile.regions = 8;
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- case 0x4c:
- priv->base.tile.regions = 15;
- break;
- default:
- priv->base.tile.regions = 12;
- break;
- }
+ priv->base.ram.init = nv40_fb_vram_init;
+ priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- if (device->chipset == 0x40)
- priv->base.tile.prog = nv10_fb_tile_prog;
- else
- priv->base.tile.prog = nv40_fb_tile_prog;
-
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+ nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+ struct nv41_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv41_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x41),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv41_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ tile->addr = 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+ struct nv44_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100850, 0x80000000);
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv44_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+ struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) tile->addr = (0 << 3);
+ else tile->addr = (1 << 3);
+
+ tile->addr |= 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv46_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x46),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv46_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv47_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x47),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv47_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv49_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv49_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x49),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv49_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv4e_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..eac236ed19b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,102 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
return types[(memtype & 0xff00) >> 8] != 0;
}
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(pfb, 0x100200);
+ r4 = nv_rd32(pfb, 0x100204);
+ rt = nv_rd32(pfb, 0x100250);
+ ru = nv_rd32(pfb, 0x001540);
+ nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != pfb->ram.size) {
+ nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+ (u32)(pfb->ram.size >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nouveau_bios *bios = nouveau_bios(device);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 size, tags = 0;
+ int ret;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c);
+ pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+ ((pfb->ram.size & 0x000000ff) << 32);
+
+ size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ switch (device->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf: /* IGPs, no reordering, no real VRAM */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+ if (ret)
+ return ret;
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ break;
+ default:
+ switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+ case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ pfb->ram.type = NV_MEM_TYPE_DDR3;
+ else
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+ case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+ nv50_fb_vram_rblock(pfb) >> 12);
+ if (ret)
+ return ret;
+
+ pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ tags = nv_rd32(pfb, 0x100320);
+ break;
+ }
+
+ return tags;
+}
+
static int
nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +236,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
kfree(mem);
}
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru, rblock_size;
-
- r0 = nv_rd32(priv, 0x100200);
- r4 = nv_rd32(priv, 0x100204);
- rt = nv_rd32(priv, 0x100250);
- ru = nv_rd32(priv, 0x001540);
- nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != priv->base.ram.size) {
- nv_warn(priv, "memory controller reports %d MiB VRAM\n",
- (u32)(priv->base.ram.size >> 20));
- }
-
- rblock_size = rowsize;
- if (rt & 1)
- rblock_size *= 3;
-
- nv_debug(priv, "rblock %d bytes\n", rblock_size);
- return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bios *bios = nouveau_bios(device);
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nv50_fb_priv *priv;
- u32 tags;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- switch (nv_rd32(priv, 0x100714) & 0x00000007) {
- case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- priv->base.ram.type = NV_MEM_TYPE_DDR3;
- else
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- break;
- case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
- case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c);
- priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
- ((priv->base.ram.size & 0x000000ff) << 32);
-
- tags = nv_rd32(priv, 0x100320);
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
-
- nv_debug(priv, "%d compression tags\n", tags);
-
- size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- switch (device->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf: /* IGPs, no reordering, no real VRAM */
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
- if (ret)
- return ret;
-
- priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- break;
- default:
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
- nv50_vram_rblock(priv) >> 12);
- if (ret)
- return ret;
-
- priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
- break;
- }
-
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c08_page) {
- priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c08))
- nv_warn(priv, "failed 0x100c08 page map\n");
- } else {
- nv_warn(priv, "failed 0x100c08 page alloc\n");
- }
-
- priv->base.memtype_valid = nv50_fb_memtype_valid;
- priv->base.ram.get = nv50_fb_vram_new;
- priv->base.ram.put = nv50_fb_vram_del;
- return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
-
- if (priv->r100c08_page) {
- pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- }
-
- nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_fb_init(&priv->base);
- if (ret)
- return ret;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- switch (device->chipset) {
- case 0x50:
- nv_wr32(priv, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(priv, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(priv, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(priv, 0x100c90, 0x001d07ff);
- break;
- }
-
- return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
- },
-};
-
static const struct nouveau_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
@@ -424,11 +331,11 @@ static const struct nouveau_enum vm_fault[] = {
{}
};
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nv50_fb_priv *priv = (void *)pfb;
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
@@ -445,9 +352,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
}
nv_wr32(priv, 0x100c90, idx | 0x80000000);
- if (!display)
- return;
-
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +398,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
else
printk("0x%08x\n", st1);
}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (priv->r100c08_page) {
+ priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+ nv_warn(priv, "failed 0x100c08 page map\n");
+ } else {
+ nv_warn(priv, "failed 0x100c08 page alloc\n");
+ }
+
+ priv->base.memtype_valid = nv50_fb_memtype_valid;
+ priv->base.ram.init = nv50_fb_vram_init;
+ priv->base.ram.get = nv50_fb_vram_new;
+ priv->base.ram.put = nv50_fb_vram_del;
+ nv_subdev(priv)->intr = nv50_fb_intr;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (device->chipset) {
+ case 0x50:
+ nv_wr32(priv, 0x100c90, 0x000707ff);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(priv, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(priv, 0x100c90, 0x089d1fff);
+ break;
+ default:
+ nv_wr32(priv, 0x100c90, 0x001d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 parts = nv_rd32(pfb, 0x022438);
+ u32 pmask = nv_rd32(pfb, 0x022554);
+ u32 bsize = nv_rd32(pfb, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
+
+ nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+ nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ pfb->ram.type = nouveau_fb_bios_memtype(bios);
+ pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+ /* read amount of vram attached to each memory controller */
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+ pfb->ram.size += (u64)psize << 20;
+ }
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&pfb->vram, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&pfb->vram);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
@@ -86,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->memtype = type;
mem->size = size;
- mutex_lock(&mm->mutex);
+ mutex_lock(&pfb->base.mutex);
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
else
ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
if (ret) {
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
pfb->ram.put(pfb, &mem);
return ret;
}
@@ -101,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
}
static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nouveau_fb *pfb = &priv->base;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(priv, 0x022438);
- u32 pmask = nv_rd32(priv, 0x022554);
- u32 bsize = nv_rd32(priv, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
- nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- priv->base.ram.type = nouveau_fb_bios_memtype(bios);
- priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
- priv->base.ram.size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&pfb->vram, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&pfb->vram);
- return ret;
- }
-
- return 0;
-}
-
-static int
nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.memtype_valid = nvc0_fb_memtype_valid;
+ priv->base.ram.init = nvc0_fb_vram_init;
priv->base.ram.get = nvc0_fb_vram_new;
priv->base.ram.put = nv50_fb_vram_del;
- ret = nvc0_vram_detect(priv);
- if (ret)
- return ret;
-
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page)
return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (pci_dma_mapping_error(device->pdev, priv->r100c10))
return -EFAULT;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf0..9fb0f9b92d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
+ struct nouveau_bios *bios = nouveau_bios(gpio);
+ u8 ver, len;
+ u16 data;
+
if (line == 0xff && tag == 0xff)
return -EINVAL;
- if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+ data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+ if (data)
return 0;
/* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
int ret = nouveau_subdev_init(&gpio->base);
if (ret == 0 && gpio->reset) {
if (dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd9..bf13a1200f26 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
};
static void
-nv50_gpio_reset(struct nouveau_gpio *gpio)
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nv50_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
u32 val = (unk1 << 16) | unk0;
u32 reg = regs[line >> 4]; line &= 0x0f;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e0..83e8b8f16e6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
};
static void
-nvd0_gpio_reset(struct nouveau_gpio *gpio)
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nvd0_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
udelay(1);
if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x", ctrl);
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
return -EBUSY;
}
} while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
list_add(&iobj->head, &imem->list);
+ mutex_unlock(&imem->base.mutex);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
- if (iobj->head.prev)
- list_del(&iobj->head);
+ struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+ mutex_lock(&subdev->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&subdev->mutex);
+
return nouveau_object_destroy(&iobj->base);
}
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
}
}
+ mutex_unlock(&imem->base.mutex);
+
return 0;
}
@@ -104,17 +114,26 @@ int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
- int i;
+ int i, ret = 0;
if (suspend) {
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- iobj->suspend[i / 4] = nv_ro32(iobj, i);
- } else
- return -ENOMEM;
+ if (!iobj->suspend) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < iobj->size; i += 4)
+ iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
+
+ mutex_unlock(&imem->base.mutex);
+
+ if (ret)
+ return ret;
}
return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instobj_priv *node = (void *)object;
return nv_ro32(object->engine, node->mem->offset + addr);
}
static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instobj_priv *node = (void *)object;
nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
}
static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
return nv_wr32(object, 0x700000 + addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
}
static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
struct nouveau_mc *pmc = nouveau_mc(subdev);
const struct nouveau_mc_intr *map = pmc->intr_map;
struct nouveau_subdev *unit;
- u32 stat;
+ u32 stat, intr;
- stat = nv_rd32(pmc, 0x000100);
+ intr = stat = nv_rd32(pmc, 0x000100);
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(subdev, map->unit);
if (unit && unit->intr)
unit->intr(unit);
- stat &= ~map->stat;
+ intr &= ~map->stat;
}
map++;
}
- if (stat) {
+ if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0000d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0040d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303a..e286e132c7e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nv_ro08(bios, data))) {
- nv_info(mxm, "no VBIOS data, nothing to do\n");
+ nv_debug(mxm, "no VBIOS data, nothing to do\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_length = (offset + length) - mm_offset;
int ret;
- vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
return ret;
}
+ *pvm = vm;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cbf1fc60a386..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, -ENODEV);
client = nv_client(abi16->client);
-
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return nouveau_abi16_put(abi16, -EINVAL);
-
device = nv_device(abi16->device);
imem = nouveau_instmem(device);
pfb = nouveau_fb(device);
+ /* hack to allow channel engine type specification on kepler */
+ if (device->card_type >= NV_E0) {
+ if (init->fb_ctxdma_handle != ~0)
+ init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ else
+ init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+ /* allow flips to be executed if this is a graphics channel */
+ init->tt_ctxdma_handle = 0;
+ if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ init->tt_ctxdma_handle = 1;
+ }
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return nouveau_abi16_put(abi16, -EINVAL);
+
/* allocate "abi16 channel" data and make up a handle for it */
init->channel = ffsll(~abi16->handles);
if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
- if (device->card_type >= NV_E0) {
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
- init->tt_ctxdma_handle = 0;
- }
-
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret;
if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
acpi_handle rom_handle;
} nouveau_dsm_priv;
+bool nouveau_is_optimus(void) {
+ return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+ return nouveau_dsm_priv.dsm_detected;
+}
+
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
- /* perhaps the _DSM functions are mutually exclusive, but prepare for
- * the future */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
has_optimus = 1;
}
- if (vga_count == 2 && has_dsm && guid_valid) {
+ /* find the optimus DSM or the old v1 DSM */
+ if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.dsm_detected = true;
+ nouveau_dsm_priv.optimus_detected = true;
ret = true;
- }
-
- if (has_optimus == 1) {
+ } else if (vga_count == 2 && has_dsm && guid_valid) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
- struct dcb_output *dcbent, int crtc)
-{
- /*
- * The display script table is located by the BIT 'U' table.
- *
- * It contains an array of pointers to various tables describing
- * a particular output type. The first 32-bits of the output
- * tables contains similar information to a DCB entry, and is
- * used to decide whether that particular table is suitable for
- * the output you want to access.
- *
- * The "record header length" field here seems to indicate the
- * offset of the first configuration entry in the output tables.
- * This is 10 on most cards I've seen, but 12 has been witnessed
- * on DP cards, and there's another script pointer within the
- * header.
- *
- * offset + 0 ( 8 bits): version
- * offset + 1 ( 8 bits): header length
- * offset + 2 ( 8 bits): record length
- * offset + 3 ( 8 bits): number of records
- * offset + 4 ( 8 bits): record header length
- * offset + 5 (16 bits): pointer to first output script table
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- uint8_t *table = &bios->data[bios->display.script_table_ptr];
- uint8_t *otable = NULL;
- uint16_t script;
- int i;
-
- if (!bios->display.script_table_ptr) {
- NV_ERROR(drm, "No pointer to output script table\n");
- return 1;
- }
-
- /*
- * Nothing useful has been in any of the pre-2.0 tables I've seen,
- * so until they are, we really don't need to care.
- */
- if (table[0] < 0x20)
- return 1;
-
- if (table[0] != 0x20 && table[0] != 0x21) {
- NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
- table[0]);
- return 1;
- }
-
- /*
- * The output script tables describing a particular output type
- * look as follows:
- *
- * offset + 0 (32 bits): output this table matches (hash of DCB)
- * offset + 4 ( 8 bits): unknown
- * offset + 5 ( 8 bits): number of configurations
- * offset + 6 (16 bits): pointer to some script
- * offset + 8 (16 bits): pointer to some script
- *
- * headerlen == 10
- * offset + 10 : configuration 0
- *
- * headerlen == 12
- * offset + 10 : pointer to some script
- * offset + 12 : configuration 0
- *
- * Each config entry is as follows:
- *
- * offset + 0 (16 bits): unknown, assumed to be a match value
- * offset + 2 (16 bits): pointer to script table (clock set?)
- * offset + 4 (16 bits): pointer to script table (reset?)
- *
- * There doesn't appear to be a count value to say how many
- * entries exist in each script table, instead, a 0 value in
- * the first 16-bit word seems to indicate both the end of the
- * list and the default entry. The second 16-bit word in the
- * script tables is a pointer to the script to execute.
- */
-
- NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
- dcbent->type, dcbent->location, dcbent->or);
- for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
- break;
- }
-
- if (!otable) {
- NV_DEBUG(drm, "failed to match any output table\n");
- return 1;
- }
-
- if (pclk < -2 || pclk > 0) {
- /* Try to find matching script table entry */
- for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == type)
- break;
- }
-
- if (i == otable[5]) {
- NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
- "using first\n",
- type, dcbent->type, dcbent->or);
- i = 0;
- }
- }
-
- if (pclk == 0) {
- script = ROM16(otable[6]);
- if (!script) {
- NV_DEBUG(drm, "output script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -1) {
- script = ROM16(otable[8]);
- if (!script) {
- NV_DEBUG(drm, "output script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -2) {
- if (table[4] >= 12)
- script = ROM16(otable[10]);
- else
- script = 0;
- if (!script) {
- NV_DEBUG(drm, "output script 2 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk > 0) {
- script = ROM16(otable[table[4] + i*6 + 2]);
- if (script)
- script = clkcmptable(bios, script, pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk < 0) {
- script = ROM16(otable[table[4] + i*6 + 4]);
- if (script)
- script = clkcmptable(bios, script, -pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- }
-
- return 0;
-}
-
-
int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
{
/*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_entry *bitentry)
-{
- /*
- * Parses the pointer to the G80 output script tables
- *
- * Starting at bitentry->offset:
- *
- * offset + 0 (16 bits): output script table pointer
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- uint16_t outputscripttableptr;
-
- if (bitentry->length != 3) {
- NV_ERROR(drm, "Do not understand BIT U table\n");
- return -EINVAL;
- }
-
- outputscripttableptr = ROM16(bios->data[bitentry->offset]);
- bios->display.script_table_ptr = outputscripttableptr;
- return 0;
-}
-
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
- parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
return 0;
}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- int i, ret = 0;
+ int ret = 0;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
bios->fp.lvds_init_run = false;
}
- if (nv_device(drm->device)->card_type >= NV_50) {
- for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev, 0, 0,
- &bios->dcb.entry[i], -1);
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
} state;
struct {
- struct dcb_output *output;
- int crtc;
- uint16_t script_table_ptr;
- } display;
-
- struct {
uint16_t fptablepointer; /* also used by tmds */
uint16_t fpxlatetableptr;
int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
int nouveau_run_vbios_init(struct drm_device *);
struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
- struct dcb_output *, int crtc);
bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
int head, int pxclk);
int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab6..1699a9083a2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include <core/engine.h>
+#include <linux/swiotlb.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
@@ -225,7 +226,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+ align >> PAGE_SHIFT, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +316,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -351,7 +352,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -392,12 +393,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ interruptible, no_wait_gpu);
if (ret)
return ret;
@@ -556,8 +557,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
int ret;
@@ -566,8 +566,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+ no_wait_gpu, new_mem);
nouveau_fence_unref(&fence);
return ret;
}
@@ -965,8 +965,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +994,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
no_wait_gpu, new_mem);
}
@@ -1064,8 +1062,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1078,7 +1075,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
@@ -1086,11 +1083,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1098,8 +1095,7 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1112,15 +1108,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -1195,8 +1191,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1215,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* CPU copy if we have no accelerated method available */
if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
goto out;
}
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1279,7 +1277,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = true;
+ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
break;
@@ -1343,7 +1341,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
+ return nouveau_bo_validate(nvbo, false, false);
}
static int
@@ -1472,19 +1470,19 @@ nouveau_bo_fence_ref(void *sync_obj)
}
static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
{
return nouveau_fence_done(sync_obj);
}
static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
{
return nouveau_fence_wait(sync_obj, lazy, intr);
}
static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
{
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
+ if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_software_chan *swch;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret, i;
/* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
- if (chan != chan->drm->cechan) {
+ if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d3595b23434a..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev = nv_connector->base.dev;
drm = nouveau_drm(dev);
gpio = nouveau_gpio(drm->device);
- NV_DEBUG(drm, "\n");
if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -128,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- int i;
+ struct nouveau_i2c_port *port = NULL;
+ int i, panel = -ENODEV;
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (panel == 0) {
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+ msleep(300);
+ }
+ }
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_port *port = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -151,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
- return port;
+ break;
}
+
+ port = NULL;
}
- return NULL;
+ /* eDP panel not detected, restore panel power GPIO to previous
+ * state to avoid confusing the SOR for other output types.
+ */
+ if (!port && panel == 0)
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+ return port;
}
static struct nouveau_encoder *
@@ -221,7 +242,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +950,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
int type, ret = 0;
bool dummy;
- NV_DEBUG(drm, "\n");
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index)
@@ -1043,7 +1062,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
/* Init DVI-I specific properties */
if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs */
if (disp->underscan_property &&
@@ -1051,31 +1070,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
type == DRM_MODE_CONNECTOR_DVII ||
type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_hborder_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_vborder_property,
0);
}
/* Add hue and saturation options */
if (disp->vibrant_hue_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
90);
if (disp->color_vibrance_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
150);
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (nv_device(drm->device)->card_type >= NV_50) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
@@ -1088,18 +1107,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
if (disp->dithering_mode) {
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->dithering_mode);
}
if (disp->dithering_depth) {
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->dithering_depth);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
#define __NOUVEAU_CONNECTOR_H__
#include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
struct nouveau_i2c_port;
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
return &crtc->base;
}
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width,
- uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
nv_fb->r_dma = NvEvoVRAM_LP;
switch (fb->depth) {
- case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
- case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
- case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
case 24:
- case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
- case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
default:
NV_ERROR(drm, "unknown depth %d\n", fb->depth);
return -EINVAL;
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* power on internal panel if it's not already. the init tables of
- * some vbios default this to off for some reason, causing the
- * panel to not work after resume
- */
- if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
-
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
@@ -324,7 +315,7 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen == 1) {
+ if (gen >= 1) {
disp->vibrant_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"vibrant hue", 2);
@@ -366,10 +357,7 @@ nouveau_display_create(struct drm_device *dev)
if (nv_device(drm->device)->card_type < NV_50)
ret = nv04_display_create(dev);
else
- if (nv_device(drm->device)->card_type < NV_D0)
ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
if (ret)
goto disp_create_err;
@@ -400,11 +388,12 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
if (disp->dtor)
disp->dtor(dev);
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -659,10 +648,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- if (nv_device(drm->device)->card_type >= NV_D0)
- ret = nvd0_display_flip_next(crtc, fb, chan, 0);
- else
- ret = nv50_display_flip_next(crtc, fb, chan);
+ ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) {
mutex_unlock(&chan->cli->mutex);
goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <core/class.h>
+
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry d;
- u8 *table;
- int i;
-
- if (bit_table(dev, 'd', &d)) {
- NV_ERROR(drm, "BIT 'd' table not found\n");
- return NULL;
- }
-
- if (d.version != 1) {
- NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
- return NULL;
- }
-
- table = ROMPTR(dev, d.data[0]);
- if (!table) {
- NV_ERROR(drm, "displayport table pointer invalid\n");
- return NULL;
- }
-
- switch (table[0]) {
- case 0x20:
- case 0x21:
- case 0x30:
- case 0x40:
- break;
- default:
- NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
- return NULL;
- }
-
- for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
- return table;
- }
-
- NV_ERROR(drm, "displayport encoder table not found\n");
- return NULL;
-}
-
/******************************************************************************
* link training
*****************************************************************************/
struct dp_state {
struct nouveau_i2c_port *auxch;
- struct dp_train_func *func;
+ struct nouveau_object *core;
struct dcb_output *dcb;
int crtc;
u8 *dpcd;
@@ -97,13 +54,20 @@ static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink[2];
+ u32 data;
NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
- dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
- dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
/* inform the sink of the new configuration */
sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink_tp;
NV_DEBUG(drm, "training pattern %d\n", pattern);
- dp->func->train_set(dev, dp->dcb, pattern);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
int i;
for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
- dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
}
return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
}
static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30) {
- if (enable) script = ROM16(entry[12]);
- else script = ROM16(entry[14]);
- } else
- if (table[0] == 0x40) {
- if (enable) script = ROM16(entry[11]);
- else script = ROM16(entry[13]);
- }
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[6]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[5]);
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+ NV94_DISP_SOR_DP_TRAIN_OP_INIT);
}
static void
dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[8]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[7]);
- }
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+ NV94_DISP_SOR_DP_TRAIN_OP_FINI);
}
static bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
if (!dp.auxch)
return false;
- dp.func = func;
+ dp.core = core;
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
*/
gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
- /* enable down-spreading, if possible */
- dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* execute pre-train script from vbios */
- dp_link_train_init(dev, &dp);
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
/* start off at highest link rate supported by encoder and display */
while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
void
nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nv_wraux(auxch, DP_SET_POWER, &status, 1);
if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, func);
+ nouveau_dp_link_train(encoder, datarate, core);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8503b2ea570a..5e7aef23825a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
-#include "nouveau_ttm.h"
-
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
@@ -86,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
struct nouveau_cli *cli;
int ret;
+ *pcli = NULL;
ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
- if (ret)
+ if (ret) {
+ if (cli)
+ nouveau_client_destroy(&cli->base);
+ *pcli = NULL;
return ret;
+ }
mutex_init(&cli->mutex);
return 0;
@@ -149,7 +152,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
- arg1 = 0;
+ arg1 = 1;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -191,8 +194,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
nouveau_bo_move_init(drm);
}
-static int __devinit
-nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
+static int nouveau_drm_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
{
struct nouveau_device *device;
struct apertures_struct *aper;
@@ -224,6 +227,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ kfree(aper);
ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
nouveau_config, nouveau_debug, &device);
@@ -241,6 +245,8 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
return 0;
}
+static struct lock_class_key drm_client_lock_class_key;
+
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -252,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret)
return ret;
+ lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm;
drm->dev = dev;
@@ -395,17 +402,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
}
int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- pm_state.event == PM_EVENT_PRETHAW)
- return 0;
-
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending fbcon...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +438,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
goto fail_client;
nouveau_agp_fini(drm);
-
- pci_save_state(pdev);
- if (pm_state.event == PM_EVENT_SUSPEND) {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
return 0;
fail_client:
@@ -457,24 +452,33 @@ fail_client:
return ret;
}
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(drm, "re-enabling device...\n");
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
+ ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
- pci_set_master(pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli;
+
+ NV_INFO(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
@@ -500,6 +504,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
return 0;
}
+int nouveau_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
@@ -652,14 +692,22 @@ nouveau_drm_pci_table[] = {
{}
};
+static const struct dev_pm_ops nouveau_pm_ops = {
+ .suspend = nouveau_pmops_suspend,
+ .resume = nouveau_pmops_resume,
+ .freeze = nouveau_pmops_freeze,
+ .thaw = nouveau_pmops_thaw,
+ .poweroff = nouveau_pmops_freeze,
+ .restore = nouveau_pmops_resume,
+};
+
static struct pci_driver
nouveau_drm_pci_driver = {
.name = "nouveau",
.id_table = nouveau_drm_pci_table,
.probe = nouveau_drm_probe,
.remove = nouveau_drm_remove,
- .suspend = nouveau_drm_suspend,
- .resume = nouveau_drm_resume,
+ .driver.pm = &nouveau_pm_ops,
};
static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
return nv_device(nouveau_drm(dev)->device);
}
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
/* nouveau_dp.c */
bool nouveau_dp_detect(struct drm_encoder *);
void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+ struct nouveau_object *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_validate(nvbo, true, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- if (nv_device(drm->device)->chipset < 0xa3 ||
- nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return false;
- return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- if (!hdmi_sor(encoder))
- return 0x616500 + (nv_crtc->index * 0x800);
- return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
- u32 tmp = hdmi_rd32(encoder, reg);
- hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
- return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- u32 or = nv_encoder->or * 0x800;
-
- if (hdmi_sor(encoder))
- nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_connector *nv_connector;
- u32 or = nv_encoder->or * 0x800;
- int i;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid)) {
- nouveau_audio_disconnect(encoder);
- return;
- }
-
- if (hdmi_sor(encoder)) {
- nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
- nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
- }
- }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
- /* calculate checksum for the infoframe */
- u8 sum = 0, i;
- for (i = 0; i < frame[2]; i++)
- sum += frame[i];
- frame[3] = 256 - sum;
-
- /* disable infoframe, and write header */
- hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
- hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
- /* register scans tell me the audio infoframe has only one set of
- * subpack regs, according to tegra (gee nvidia, it'd be nice if we
- * could get those docs too!), the hdmi block pads out the rest of
- * the packet on its own.
- */
- if (ctrl == 0x020)
- frame[2] = 6;
-
- /* write out checksum and data, weird weird 7 byte register pairs */
- for (i = 0; i < frame[2] + 1; i += 7) {
- u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
- u32 *subpack = (u32 *)&frame[3 + i];
- hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
- hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
- }
-
- /* enable the infoframe */
- hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
- const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
- const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
- u8 frame[20];
-
- frame[0x00] = 0x82; /* AVI infoframe */
- frame[0x01] = 0x02; /* version */
- frame[0x02] = 0x0d; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
- frame[0x05] = (C << 6) | (M << 4) | R;
- frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
- frame[0x07] = VIC;
- frame[0x08] = PR;
- frame[0x09] = bar_top & 0xff;
- frame[0x0a] = bar_top >> 8;
- frame[0x0b] = bar_bottom & 0xff;
- frame[0x0c] = bar_bottom >> 8;
- frame[0x0d] = bar_left & 0xff;
- frame[0x0e] = bar_left >> 8;
- frame[0x0f] = bar_right & 0xff;
- frame[0x10] = bar_right >> 8;
- frame[0x11] = 0x00;
- frame[0x12] = 0x00;
- frame[0x13] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
- const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
- u8 frame[12];
-
- frame[0x00] = 0x84; /* Audio infoframe */
- frame[0x01] = 0x01; /* version */
- frame[0x02] = 0x0a; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (CT << 4) | CC;
- frame[0x05] = (SF << 2) | ceaSS;
- frame[0x06] = FMT;
- frame[0x07] = CA;
- frame[0x08] = (DM_INH << 7) | (LSV << 3);
- frame[0x09] = 0x00;
- frame[0x0a] = 0x00;
- frame[0x0b] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
- nouveau_audio_disconnect(encoder);
-
- /* disable audio and avi infoframes */
- hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
- hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
- /* disable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- u32 max_ac_packet, rekey;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!mode || !nv_connector || !nv_connector->edid ||
- !drm_detect_hdmi_monitor(nv_connector->edid)) {
- nouveau_hdmi_disconnect(encoder);
- return;
- }
-
- nouveau_hdmi_video_infoframe(encoder, mode);
- nouveau_hdmi_audio_infoframe(encoder, mode);
-
- hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
- nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* value matches nvidia binary driver, and tegra constant */
- rekey = 56;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* enable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
- 0x1f000000 | /* unknown */
- max_ac_packet << 16 |
- rekey);
-
- nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
nv_subdev(pmc)->intr(nv_subdev(pmc));
-
- if (dev->mode_config.num_crtc) {
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
- }
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 5566172774df..a701ff5ffa5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -698,10 +698,10 @@ static int
nouveau_hwmon_init(struct drm_device *dev)
{
struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..b8e05ae38212 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
return ret;
nvbo = *pnvbo;
- /* we restrict allowed domains on nv50+ to only the types
- * that were requested at creation time. not possibly on
- * earlier chips without busting the ABI.
- */
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
@@ -197,6 +193,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
if (nvbo->gem) {
if (nvbo->gem->dev == dev) {
drm_gem_object_reference(nvbo->gem);
+ dma_buf_put(dma_buf);
return nvbo->gem;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- nouveau_drm_resume(pdev);
+ nouveau_pmops_resume(&pdev->dev);
drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
- nouveau_drm_suspend(pdev, pmm);
+ nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
if (ret)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
static inline bool is_powersaving_dpms(int mode)
{
- return (mode != DRM_MODE_DPMS_ON);
+ return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- NV_DEBUG(drm, "\n");
-
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
void
nv04_display_destroy(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(drm, "\n");
-
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,11 +155,20 @@ nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
int
nv10_fence_create(struct nouveau_drm *drm)
{
@@ -183,8 +192,11 @@ nv10_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
@@ -192,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
break;
}
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_mode_property,
tv_enc->tv_norm);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_saturation_property,
tv_enc->saturation);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_hue_property,
tv_enc->hue);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_overscan_property,
tv_enc->overscan);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- NV_DEBUG(drm, "\n");
-
- for (i = 0; i < 256; i++) {
- writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
- }
-
- if (nv_crtc->lut.depth == 30) {
- writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
- }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int index = nv_crtc->index, ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
- NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
- if (blanked) {
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
- if (ret) {
- NV_ERROR(drm, "no space while blanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- } else {
- if (nv_crtc->cursor.visible)
- nv_crtc->cursor.show(nv_crtc, false);
- else
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
- if (ret) {
- NV_ERROR(drm, "no space while unblanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, nv_crtc->lut.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF :
- NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- if (nv_device(drm->device)->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- }
-
- nv_crtc->fb.blanked = blanked;
- return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- int head = nv_crtc->index, ret;
- u32 mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
- OUT_RING (evo, mode);
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
- }
-
- return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
- int adj;
- u32 hue, vib;
-
- NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
- nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(drm, "no space while setting color vibrance\n");
- return ret;
- }
-
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, (hue << 20) | (vib << 8));
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
-
- return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_connector *connector;
- struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
- /* The safest approach is to find an encoder with the right crtc, that
- * is also linked to a connector. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
- }
-
- return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_connector *nv_connector;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *umode = &crtc->mode;
- struct drm_display_mode *omode;
- int scaling_mode, ret;
- u32 ctrl = 0, oX, oY;
-
- NV_DEBUG(drm, "\n");
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(drm, "no native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
- } else {
- scaling_mode = nv_connector->scaling_mode;
- }
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- if (scaling_mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- if (umode->hdisplay != oX || umode->vdisplay != oY ||
- umode->flags & DRM_MODE_FLAG_INTERLACE ||
- umode->flags & DRM_MODE_FLAG_DBLSCAN)
- ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
- ret = RING_SPACE(evo, 5);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- OUT_RING (evo, ctrl);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING (evo, oY << 16 | oX);
- OUT_RING (evo, oY << 16 | oX);
-
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
- }
-
- return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *clk = nouveau_clock(device);
-
- return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
- NV_DEBUG(drm, "\n");
-
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- drm_crtc_cleanup(&nv_crtc->base);
- kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_bo *cursor = NULL;
- struct drm_gem_object *gem;
- int ret = 0, i;
-
- if (!buffer_handle) {
- nv_crtc->cursor.hide(nv_crtc, true);
- return 0;
- }
-
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
- if (!gem)
- return -ENOENT;
- cursor = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(cursor);
- if (ret)
- goto out;
-
- /* The simple will do for now. */
- for (i = 0; i < 64 * 64; i++)
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
- nouveau_bo_unmap(cursor);
-
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
- nv_crtc->cursor.show(nv_crtc, true);
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->cursor.set_pos(nv_crtc, x, y);
- return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- /* We need to know the depth before we upload, but it's possible to
- * get called before a framebuffer is bound. If this is the case,
- * mark the lut values as dirty by setting depth==0, and it'll be
- * uploaded on the first mode_set_base()
- */
- if (!nv_crtc->base.fb) {
- nv_crtc->lut.depth = 0;
- return;
- }
-
- nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
- .save = nv50_crtc_save,
- .restore = nv50_crtc_restore,
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = nouveau_crtc_page_flip,
- .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_display_flip_stop(crtc);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
- nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_crtc_blank(nv_crtc, false);
- drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *passed_fb,
- int x, int y, bool atomic)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
- int ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- /* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
- */
- if (atomic) {
- drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
- } else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
- }
-
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
- nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
- nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
- if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- OUT_RING (evo, fb->r_dma);
- }
-
- ret = RING_SPACE(evo, 12);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING (evo, nv_crtc->fb.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
- OUT_RING (evo, fb->r_pitch);
- OUT_RING (evo, fb->r_format);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING (evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING (evo, (y << 16) | x);
-
- if (nv_crtc->lut.depth != fb->base.depth) {
- nv_crtc->lut.depth = fb->base.depth;
- nv50_crtc_lut_load(crtc);
- }
-
- return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 head = nv_crtc->index * 0x400;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- int ret;
-
- /* hw timing description looks like this:
- *
- * <sync> <back porch> <---------display---------> <front porch>
- * ______
- * |____________|---------------------------|____________|
- *
- * ^ synce ^ blanke ^ blanks ^ active
- *
- * interlaced modes also have 2 additional values pointing at the end
- * and start of the next field's blanking period.
- */
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = RING_SPACE(evo, 18);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0804 + head, 2);
- OUT_RING (evo, 0x00800000 | mode->clock);
- OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_NV04(evo, 0, 0x0810 + head, 6);
- OUT_RING (evo, 0x00000000); /* border colour */
- OUT_RING (evo, (vactive << 16) | hactive);
- OUT_RING (evo, ( vsynce << 16) | hsynce);
- OUT_RING (evo, (vblanke << 16) | hblanke);
- OUT_RING (evo, (vblanks << 16) | hblanks);
- OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_NV04(evo, 0, 0x082c + head, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0900 + head, 1);
- OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
- OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
- OUT_RING (evo, 0x00000000); /* screen position */
- }
-
- nv_crtc->set_dither(nv_crtc, false);
- nv_crtc->set_scale(nv_crtc, false);
- nv_crtc->set_color_vibrance(nv_crtc, false);
-
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
- if (ret)
- return ret;
-
- ret = nv50_display_sync(crtc->dev);
- if (ret)
- return ret;
-
- return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
- if (ret)
- return ret;
-
- return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = NULL;
- int ret, i;
-
- NV_DEBUG(drm, "\n");
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
- nv_crtc->color_vibrance = 50;
- nv_crtc->vibrant_hue = 0;
- nv_crtc->lut.depth = 0;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
- ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
-
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- nv50_cursor_init(nv_crtc);
-out:
- if (ret)
- nv50_crtc_destroy(&nv_crtc->base);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while unhiding cursor\n");
- return;
- }
-
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
- OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = true;
- }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && !nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while hiding cursor\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
- }
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = false;
- }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
- struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
- nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
- ((y & 0xFFFF) << 16) | (x & 0xFFFF));
- /* Needed to make the cursor move. */
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
- if (offset == nv_crtc->cursor.offset)
- return;
-
- nv_crtc->cursor.offset = offset;
- if (nv_crtc->cursor.visible) {
- nv_crtc->cursor.visible = false;
- nv_crtc->cursor.show(nv_crtc, true);
- }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
- nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
- nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
- nv_crtc->cursor.hide = nv50_cursor_hide;
- nv_crtc->cursor.show = nv50_cursor_show;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- enum drm_connector_status status = connector_status_disconnected;
- uint32_t dpms_state, load_pattern, load_state;
- int or = nv_encoder->or;
-
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
- dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return status;
- }
-
- /* Use bios provided value if possible. */
- if (drm->vbios.dactestval) {
- load_pattern = drm->vbios.dactestval;
- NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
- load_pattern);
- } else {
- load_pattern = 340;
- NV_DEBUG(drm, "Using default load_pattern of %d\n",
- load_pattern);
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
- NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
- mdelay(45); /* give it some time to process */
- load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
- if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
- NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
- status = connector_status_connected;
-
- if (status == connector_status_connected)
- NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
- else
- NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
- return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return;
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
- if (mode != DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
- switch (mode) {
- case DRM_MODE_DPMS_STANDBY:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- case DRM_MODE_DPMS_OFF:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- default:
- break;
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- uint32_t mode_ctl = 0, mode_ctl2 = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
- /* Lacking a working tv-out, this is not a 100% sure. */
- if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
- mode_ctl |= 0x40;
- else
- if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- mode_ctl |= 0x100;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
- ret = RING_SPACE(evo, 3);
- if (ret) {
- NV_ERROR(drm, "no space while connecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
- OUT_RING(evo, mode_ctl);
- OUT_RING(evo, mode_ctl2);
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
- .dpms = nv50_dac_dpms,
- .save = nv50_dac_save,
- .restore = nv50_dac_restore,
- .mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .get_crtc = nv50_dac_crtc_get,
- .detect = nv50_dac_detect,
- .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- if (!encoder)
- return;
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
- .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
-
- drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+ /*
+ * Copyright 2011 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
#include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nv50_display.h"
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <subdev/timer.h>
+#include <core/class.h>
-static void nv50_display_bh(unsigned long);
-
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
+ (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+ struct nouveau_object *user;
+ u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+ const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+ int ret;
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0)
- return 2;
+ ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+ oclass, data, size, &chan->user);
+ if (ret)
+ return ret;
- return 4;
+ chan->handle = handle;
+ return 0;
}
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 mask = 0;
- int i;
-
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0) {
- for (i = 0; i < 2; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- } else {
- for (i = 0; i < 4; i++)
- mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
- }
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ if (chan->handle)
+ nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
- for (i = 0; i < 3; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
- return mask & 3;
-}
+struct nv50_pioc {
+ struct nv50_chan base;
+};
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
{
- return 0;
+ nv50_chan_destroy(core, &pioc->base);
}
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_pioc *pioc)
{
+ return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
}
-int
-nv50_display_sync(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- int ret;
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
- ret = RING_SPACE(evo, 6);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
+struct nv50_dmac {
+ struct nv50_chan base;
+ dma_addr_t handle;
+ u32 *ptr;
+};
- nv_wo32(disp->ramin, 0x2000, 0x00000000);
- FIRE_RING (evo);
-
- if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
- return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+ if (dmac->ptr) {
+ struct pci_dev *pdev = nv_device(core)->pdev;
+ pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
- return 0;
+ nv50_chan_destroy(core, &dmac->base);
}
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_channel *evo;
- int ret, i;
- u32 val;
-
- NV_DEBUG(drm, "\n");
-
- nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
- /*
- * I think the 0x006101XX range is some kind of main control area
- * that enables things.
- */
- /* CRTC? */
- for (i = 0; i < 2; i++) {
- val = nv_rd32(device, 0x00616100 + (i * 0x800));
- nv_wr32(device, 0x00610190 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616104 + (i * 0x800));
- nv_wr32(device, 0x00610194 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616108 + (i * 0x800));
- nv_wr32(device, 0x00610198 + (i * 0x10), val);
- val = nv_rd32(device, 0x0061610c + (i * 0x800));
- nv_wr32(device, 0x0061019c + (i * 0x10), val);
- }
-
- /* DAC */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061a000 + (i * 0x800));
- nv_wr32(device, 0x006101d0 + (i * 0x04), val);
- }
-
- /* SOR */
- for (i = 0; i < nv50_sor_nr(dev); i++) {
- val = nv_rd32(device, 0x0061c000 + (i * 0x800));
- nv_wr32(device, 0x006101e0 + (i * 0x04), val);
- }
-
- /* EXT */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061e000 + (i * 0x800));
- nv_wr32(device, 0x006101f0 + (i * 0x04), val);
- }
-
- for (i = 0; i < 3; i++) {
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
- }
-
- /* The precise purpose is unknown, i suspect it has something to do
- * with text mode.
- */
- if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
- nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
- nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
- if (!nv_wait(device, 0x006194e8, 2, 0)) {
- NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
- NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
-
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
- NV_ERROR(drm, "timeout: "
- "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
- NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
- }
-
- nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
- ret = nv50_evo_init(dev);
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- evo = nv50_display(dev)->master;
-
- nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
- ret = RING_SPACE(evo, 3);
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING (evo, NvEvoSync);
- return nv50_display_sync(dev);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
}
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- struct drm_crtc *drm_crtc;
- int ret, i;
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- NV_DEBUG(drm, "\n");
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- nv50_crtc_blank(crtc, true);
- }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- ret = RING_SPACE(evo, 2);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- }
- FIRE_RING(evo);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
- * cleaning up?
- */
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
- uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, u64 syncbuf,
+ struct nv50_dmac *dmac)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ u32 pushbuf = *(u32 *)data;
+ int ret;
- if (!crtc->base.enabled)
- continue;
+ dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+ &dmac->handle);
+ if (!dmac->ptr)
+ return -ENOMEM;
- nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
- NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
- "0x%08x\n", mask, mask);
- NV_ERROR(drm, "0x610024 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_INTR_1));
- }
- }
+ ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+ NV_DMA_FROM_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_PCI_US |
+ NV_DMA_ACCESS_RD,
+ .start = dmac->handle + 0x0000,
+ .limit = dmac->handle + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- }
- }
+ ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ if (ret)
+ return ret;
- nv50_evo_fini(dev);
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = syncbuf + 0x0000,
+ .limit = syncbuf + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 3; i++) {
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
- }
- }
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- /* disable interrupts. */
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+ if (nv_device(core)->card_type < NV_C0)
+ ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ if (nv_device(core)->card_type < NV_D0)
+ ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+ return ret;
}
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+ struct nv50_dmac base;
+};
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+struct nv50_sync {
+ struct nv50_dmac base;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_ovly {
+ struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+struct nv50_head {
+ struct nouveau_crtc base;
+ struct nv50_curs curs;
+ struct nv50_sync sync;
+ struct nv50_ovly ovly;
+ struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+ struct nouveau_object *core;
+ struct nv50_mast mast;
+
+ u32 modeset;
+
+ struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *ct;
- struct nv50_display *priv;
- int ret, i;
+ return nouveau_display(dev)->priv;
+}
- NV_DEBUG(drm, "\n");
+#define nv50_mast(d) (&nv50_disp(d)->mast)
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = priv;
- nouveau_display(dev)->dtor = nv50_display_destroy;
- nouveau_display(dev)->init = nv50_display_init;
- nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
- /* Create CRTC objects */
- for (i = 0; i < 2; i++) {
- ret = nv50_crtc_create(dev, i);
- if (ret)
- return ret;
- }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+ struct nv50_dmac *dmac = evoc;
+ u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
- /* We setup the encoders from the BIOS table */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_output *entry = &dcb->entry[i];
+ if (put + nr >= (PAGE_SIZE / 4) - 8) {
+ dmac->ptr[put] = 0x20000000;
- if (entry->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
- entry->type, ffs(entry->or) - 1);
- continue;
+ nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+ if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ NV_ERROR(dmac->base.user, "channel stalled\n");
+ return NULL;
}
- connector = nouveau_connector_create(dev, entry->connector);
- if (IS_ERR(connector))
- continue;
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, entry);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, entry);
- break;
- default:
- NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
- continue;
- }
+ put = 0;
}
- list_for_each_entry_safe(connector, ct,
- &dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
- }
+ return dmac->ptr + put;
+}
- tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+ struct nv50_dmac *dmac = evoc;
+ nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
- ret = nv50_evo_create(dev);
- if (ret) {
- nv50_display_destroy(dev);
- return ret;
- }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d) *((p)++) = (d)
- return 0;
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
}
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
{
- struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ u32 *push = evo_wait(mast, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ return 0;
+ }
- nv50_evo_destroy(dev);
- kfree(disp);
+ return -EBUSY;
}
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
struct nouveau_bo *
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
{
- return nv50_display(dev)->crtc[crtc].sem.bo;
+ return nv50_disp(dev)->sync;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_display *disp = nv50_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
- int ret;
-
- ret = RING_SPACE(evo, 8);
- if (ret) {
- WARN_ON(1);
- return;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
+
+ push = evo_wait(sync, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
}
-
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0094, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan)
+ struct nouveau_channel *chan, u32 swap_interval)
{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
int ret;
- ret = RING_SPACE(evo, chan ? 25 : 27);
- if (unlikely(ret))
- return ret;
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(sync, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
ret = RING_SPACE(chan, 10);
- if (ret) {
- WIND_RING(evo);
+ if (ret)
return ret;
- }
- if (nv_device(drm->device)->chipset < 0xc0) {
- BEGIN_NV04(chan, 0, 0x0060, 2);
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, dispc->sem.offset);
- BEGIN_NV04(chan, 0, 0x006c, 1);
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_NV04(chan, 0, 0x0064, 2);
- OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, sync->sem.offset);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, 0x0060, 1);
- if (nv_device(drm->device)->chipset < 0x84)
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram);
} else {
u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ offset += sync->sem.offset;
+
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
+
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
- 0xf00d0000 | dispc->sem.value);
+ nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+ 0xf00d0000 | sync->sem.value);
+ evo_sync(crtc->dev);
}
- /* queue the flip on the crtc's "display sync" channel */
- BEGIN_NV04(evo, 0, 0x0100, 1);
- OUT_RING (evo, 0xfffe0000);
- if (chan) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000100);
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, sync->sem.offset);
+ evo_data(push, 0xf00d0000 | sync->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
} else {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000010);
- /* allows gamma somehow, PDISP will bitch at you if
- * you don't wait for vblank before changing this..
- */
- BEGIN_NV04(evo, 0, 0x00e0, 1);
- OUT_RING (evo, 0x40000000);
- }
- BEGIN_NV04(evo, 0, 0x0088, 4);
- OUT_RING (evo, dispc->sem.offset);
- OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
- OUT_RING (evo, 0x74b1e000);
- OUT_RING (evo, NvEvoSync);
- BEGIN_NV04(evo, 0, 0x00a0, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, nv_fb->r_dma);
- BEGIN_NV04(evo, 0, 0x0110, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0800, 5);
- OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (fb->height << 16) | fb->width);
- OUT_RING (evo, nv_fb->r_pitch);
- OUT_RING (evo, nv_fb->r_format);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
-
- dispc->sem.offset ^= 0x10;
- dispc->sem.value++;
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ }
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
+
+ sync->sem.offset ^= 0x10;
+ sync->sem.value++;
return 0;
}
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
- u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_connector *nv_connector = NULL;
- struct drm_encoder *encoder;
- struct nvbios *bios = &drm->vbios;
- u32 script = 0, or;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (nv_encoder->dcb != dcb)
- continue;
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+ evo_data(push, mode);
+ } else
+ if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ } else {
+ evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ }
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- break;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
}
- or = ffs(dcb->or) - 1;
- switch (dcb->type) {
- case DCB_OUTPUT_LVDS:
- script = (mc >> 8) & 0xf;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- script |= 0x0100;
- if (bios->fp.if_is_24bit)
- script |= 0x0200;
- } else {
- /* determine number of lvds links */
- if (nv_connector && nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- /* http://www.spwg.org */
- if (((u8 *)nv_connector->edid)[121] == 2)
- script |= 0x0100;
- } else
- if (pxclk >= bios->fp.duallink_transition_clk) {
- script |= 0x0100;
- }
+ return 0;
+}
- /* determine panel depth */
- if (script & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- script |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- script |= 0x0200;
- }
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct nouveau_connector *nv_connector;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
- if (nv_connector && nv_connector->edid &&
- (nv_connector->edid->revision >= 4) &&
- (nv_connector->edid->input & 0x70) >= 0x20)
- script |= 0x0200;
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DCB_OUTPUT_TMDS:
- script = (mc >> 8) & 0xf;
- if (pxclk >= 165000)
- script |= 0x0100;
- break;
- case DCB_OUTPUT_DP:
- script = (mc >> 8) & 0xf;
- break;
- case DCB_OUTPUT_ANALOG:
- script = 0xff;
- break;
default:
- NV_ERROR(drm, "modeset on unsupported output type!\n");
break;
}
- return script;
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ /*XXX: SCALE_CTRL_ACTIVE??? */
+ evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ } else {
+ evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ }
+
+ evo_kick(push, mast);
+
+ if (update) {
+ nv50_display_flip_stop(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ }
+ }
+
+ return 0;
}
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), mc;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push, hue, vib;
+ int adj;
+
+ adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+ vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+ hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ } else {
+ evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
+ return 0;
+}
- /* Determine which CRTC we're dealing with, only 1 ever will be
- * signalled at the same time with the current nouveau code.
- */
- crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Find which encoder was connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ int x, int y, bool update)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (y << 16) | x);
+ if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->r_dma);
+ }
+ } else {
+ evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_data(push, nvfb->r_dma);
+ evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (y << 16) | x);
+ }
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
}
+ evo_kick(push, mast);
+ }
- or = i;
+ nv_crtc->fb.tile_flags = nvfb->r_dma;
+ return 0;
+}
+
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ }
+ evo_kick(push, mast);
}
+}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+}
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+ if (show)
+ nv50_crtc_cursor_show(nv_crtc);
+ else
+ nv50_crtc_cursor_hide(nv_crtc);
+
+ if (update) {
+ u32 *push = evo_wait(mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ nv50_display_flip_stop(crtc);
+
+ push = evo_wait(mast, 2);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x03000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ push = evo_wait(mast, 32);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM_LP);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, 0x83000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0xffffff00);
}
- or = i;
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ nvfb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(nvfb->nvbo);
}
- /* There was no encoder to disable */
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 *push;
+ int ret;
+
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ }
- /* Disable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
- if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
- disp->irq.dcb = dcb;
- goto ack;
+ push = evo_wait(mast, 64);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00800000 | mode->clock);
+ evo_data(push, (ilace == 2) ? 2 : 0);
+ evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ } else {
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000); /* ??? */
+ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, mode->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, mode->clock * 1000);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
}
+
+ evo_kick(push, mast);
}
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ nv50_crtc_set_dither(nv_crtc, false);
+ nv50_crtc_set_scale(nv_crtc, false);
+ nv50_crtc_set_color_vibrance(nv_crtc, false);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ if (!crtc->fb) {
+ NV_DEBUG(drm, "No FB bound\n");
+ return 0;
+ }
+
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ return 0;
}
static void
-nv50_display_unk20_handler(struct drm_device *dev)
+nv50_crtc_lut_load(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
- struct dcb_output *dcb;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- dcb = disp->irq.dcb;
- if (dcb) {
- nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
- disp->irq.dcb = NULL;
- }
-
- /* CRTC clock change requested? */
- crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
- if (crtc >= 0) {
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
- pclk &= 0x003fffff;
- if (pclk)
- nv50_crtc_set_clock(dev, crtc, pclk);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
- tmp &= ~0x000000f;
- nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
- }
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
- /* Find which encoder is connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ for (i = 0; i < 256; i++) {
+ u16 r = nv_crtc->lut.r[i] >> 2;
+ u16 g = nv_crtc->lut.g[i] >> 2;
+ u16 b = nv_crtc->lut.b[i] >> 2;
+
+ if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ writew(r + 0x0000, lut + (i * 0x08) + 0);
+ writew(g + 0x0000, lut + (i * 0x08) + 2);
+ writew(b + 0x0000, lut + (i * 0x08) + 4);
+ } else {
+ writew(r + 0x6000, lut + (i * 0x20) + 0);
+ writew(g + 0x6000, lut + (i * 0x20) + 2);
+ writew(b + 0x6000, lut + (i * 0x20) + 4);
+ }
+ }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool visible = (handle != 0);
+ int i, ret = 0;
+
+ if (visible) {
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(!gem))
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret == 0) {
+ for (i = 0; i < 64 * 64; i++) {
+ u32 v = nouveau_bo_rd32(nvbo, i);
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+ }
+ nouveau_bo_unmap(nvbo);
}
- or = i;
+ drm_gem_object_unreference_unlocked(gem);
}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
+ if (visible != nv_crtc->cursor.visible) {
+ nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+ nv_crtc->cursor.visible = visible;
+ }
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ return ret;
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
- }
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nv50_curs *curs = nv50_curs(crtc);
+ struct nv50_chan *chan = nv50_chan(curs);
+ nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nv_wo32(chan->user, 0x0080, 0x00000000);
+ return 0;
+}
- or = i;
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 end = max(start + size, (u32)256);
+ u32 i;
+
+ for (i = start; i < end; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
}
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ nv50_crtc_lut_load(crtc);
+}
- /* Enable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)))
- break;
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ nv50_dmac_destroy(disp->core, &head->ovly.base);
+ nv50_pioc_destroy(disp->core, &head->oimm.base);
+ nv50_dmac_destroy(disp->core, &head->sync.base);
+ nv50_pioc_destroy(disp->core, &head->curs.base);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
+ if (nv_crtc->lut.nvbo)
+ nouveau_bo_unpin(nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_head *head;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ head->base.index = index;
+ head->base.set_dither = nv50_crtc_set_dither;
+ head->base.set_scale = nv50_crtc_set_scale;
+ head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+ head->base.color_vibrance = 50;
+ head->base.vibrant_hue = 0;
+ head->base.cursor.set_offset = nv50_cursor_set_offset;
+ head->base.cursor.set_pos = nv50_cursor_set_pos;
+ for (i = 0; i < 256; i++) {
+ head->base.lut.r[i] = i << 8;
+ head->base.lut.g[i] = i << 8;
+ head->base.lut.b[i] = i << 8;
}
- if (i == drm->vbios.dcb.entries) {
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
- goto ack;
+ crtc = &head->base.base;
+ drm_crtc_init(dev, crtc, &nv50_crtc_func);
+ drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.lut.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.lut.nvbo);
}
- script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
+ if (ret)
+ goto out;
- if (type == DCB_OUTPUT_DP) {
- int link = !(dcb->dpconf.sor.link & 1);
- if ((mc & 0x000f0000) == 0x00020000)
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
- else
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+ nv50_crtc_lut_load(crtc);
+
+ /* allocate cursor resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+ &(struct nv50_display_curs_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_curs_class),
+ &head->curs.base);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
}
- if (dcb->type != DCB_OUTPUT_ANALOG) {
- tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
- tmp &= ~0x00000f0f;
- if (script & 0x0100)
- tmp |= 0x00000101;
- nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
- } else {
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ if (ret)
+ goto out;
+
+ /* allocate page flip / sync resources */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+ &(struct nv50_display_sync_class) {
+ .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+ .head = index,
+ }, sizeof(struct nv50_display_sync_class),
+ disp->sync->bo.offset, &head->sync.base);
+ if (ret)
+ goto out;
+
+ head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+ /* allocate overlay resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+ &(struct nv50_display_oimm_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_oimm_class),
+ &head->oimm.base);
+ if (ret)
+ goto out;
+
+ ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+ &(struct nv50_display_ovly_class) {
+ .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+ .head = index,
+ }, sizeof(struct nv50_display_ovly_class),
+ disp->sync->bo.offset, &head->ovly.base);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv50_crtc_destroy(crtc);
+ return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ dpms_ctrl = 0x00000000;
+ if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000001;
+ if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000004;
+
+ nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
}
- disp->irq.dcb = dcb;
- disp->irq.pclk = pclk;
- disp->irq.script = script;
+ return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ u32 *push;
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 syncs = 0x00000000;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000002;
+
+ evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+ evo_data(push, 1 << nv_crtc->index);
+ evo_data(push, syncs);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, 1 << nv_crtc->index);
+ }
+
+ evo_kick(push, mast);
+ }
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_encoder->crtc = encoder->crtc;
}
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int ret, or = nouveau_encoder(encoder)->or;
+ u32 load = 0;
+
+ ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+ if (ret || load != 7)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+ .dpms = nv50_dac_dpms,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_disconnect,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .disable = nv50_dac_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+ .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+ .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u32 tmp;
- if (dcb->type != DCB_OUTPUT_TMDS)
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+ nv_connector->base.eld,
+ nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+ NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+ (max_ac_packet << 16) | rekey);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
- nv_encoder->dcb->or & (1 << or)) {
- tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+ nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+ nv50_audio_disconnect(encoder);
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *partner;
+ int or = nv_encoder->or;
+
+ nv_encoder->last_dpms = mode;
+
+ list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+ if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+ continue;
+
+ if (nv_partner != nv_encoder &&
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
+ if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+ return;
break;
}
}
+
+ nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
}
static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct dcb_output *dcb = disp->irq.dcb;
- u16 script = disp->irq.script;
- u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0600 + (or * 0x40), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0200 + (or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
- if (!dcb)
- goto ack;
+ nv50_hdmi_disconnect(encoder);
+ }
- nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
- nv50_display_unk40_dp_set_tmds(dev, dcb);
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->crtc = NULL;
+}
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
- nv_wr32(device, 0x610030, 0x80000000);
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+ nv50_sor_disconnect(encoder);
+ if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+ evo_sync(encoder->dev);
}
static void
-nv50_display_bh(unsigned long data)
+nv50_sor_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nvbios *bios = &drm->vbios;
+ u32 *push, lvds = 0;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto = 0xf;
+ u8 depth = 0x0;
- for (;;) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (mode->clock < 165000)
+ proto = 0x1;
+ else
+ proto = 0x5;
+ } else {
+ proto = 0x2;
+ }
- NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ nv50_hdmi_mode_set(encoder, mode);
+ break;
+ case DCB_OUTPUT_LVDS:
+ proto = 0x0;
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
- nv50_display_unk10_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
- nv50_display_unk20_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
- nv50_display_unk40_handler(dev);
+ if (bios->fp_no_ddc) {
+ if (bios->fp.dual_link)
+ lvds |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ lvds |= 0x0200;
+ } else {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ lvds |= 0x0100;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ lvds |= 0x0100;
+ }
+
+ if (lvds & 0x0100) {
+ if (bios->fp.strapless_is_24bit & 2)
+ lvds |= 0x0200;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ lvds |= 0x0200;
+ }
+
+ if (nv_connector->base.display_info.bpc == 8)
+ lvds |= 0x0200;
+ }
+
+ nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ break;
+ case DCB_OUTPUT_DP:
+ if (nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ depth = 0x2;
+ } else
+ if (nv_connector->base.display_info.bpc == 8) {
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ depth = 0x5;
+ } else {
+ nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ depth = 0x6;
+ }
+
+ if (nv_encoder->dcb->sorconf.link & 1)
+ proto = 0x8;
else
- break;
+ proto = 0x9;
+ break;
+ default:
+ BUG_ON(1);
+ break;
}
- nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(nv50_mast(dev), 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, (depth << 16) | (proto << 8) | owner);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, owner | (proto << 8));
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
}
static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
- u32 addr, data;
- int chid;
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
- for (chid = 0; chid < 5; chid++) {
- if (!(channels & (1 << chid)))
- continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+ .dpms = nv50_sor_dpms,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .disable = nv50_sor_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+ .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
- addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
- data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
- NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
- "(0x%04x 0x%02x)\n", chid,
- addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
- nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+ u32 *push = evo_wait(nv50_mast(dev), 32);
+ if (push) {
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return evo_sync(dev);
}
+
+ return -EBUSY;
}
void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+
+ nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+ nouveau_bo_unmap(disp->sync);
+ if (disp->sync)
+ nouveau_bo_unpin(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
{
+ static const u16 oclass[] = {
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- uint32_t delayed = 0;
-
- while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
- uint32_t clock;
+ struct dcb_table *dcb = &drm->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct nv50_disp *disp;
+ struct dcb_output *dcbe;
+ int crtcs, ret, i;
- NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
- if (!intr0 && !(intr1 & ~delayed))
- break;
+ nouveau_display(dev)->priv = disp;
+ nouveau_display(dev)->dtor = nv50_display_destroy;
+ nouveau_display(dev)->init = nv50_display_init;
+ nouveau_display(dev)->fini = nv50_display_fini;
- if (intr0 & 0x001f0000) {
- nv50_display_error_handler(dev);
- intr0 &= ~0x001f0000;
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_unpin(disp->sync);
}
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
- if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
- intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- }
+ if (ret)
+ goto out;
+
+ /* attempt to allocate a supported evo display class */
+ ret = -ENODEV;
+ for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ 0xd1500000, oclass[i], NULL, 0,
+ &disp->core);
+ }
- clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_1_CLK_UNK40));
- if (clock) {
- nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
- tasklet_schedule(&disp->tasklet);
- delayed |= clock;
- intr1 &= ~clock;
- }
+ if (ret)
+ goto out;
+
+ /* allocate master evo channel */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+ &(struct nv50_display_mast_class) {
+ .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+ }, sizeof(struct nv50_display_mast_class),
+ disp->sync->bo.offset, &disp->mast.base);
+ if (ret)
+ goto out;
+
+ /* create crtc objects to represent the hw heads */
+ if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+ crtcs = nv_rd32(device, 0x022448);
+ else
+ crtcs = 2;
+
+ for (i = 0; i < crtcs; i++) {
+ ret = nv50_crtc_create(dev, disp->core, i);
+ if (ret)
+ goto out;
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe->connector);
+ if (IS_ERR(connector))
+ continue;
- if (intr0) {
- NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+ if (dcbe->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
- if (intr1) {
- NV_ERROR(drm,
- "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
- nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
}
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->encoder_ids[0])
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+
+out:
+ if (ret)
+ nv50_display_destroy(dev);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
#include "nouveau_display.h"
#include "nouveau_crtc.h"
#include "nouveau_reg.h"
-#include "nv50_evo.h"
-struct nv50_display_crtc {
- struct nouveau_channel *sync;
- struct {
- struct nouveau_bo *bo;
- u32 offset;
- u16 value;
- } sem;
-};
+int nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
-struct nv50_display {
- struct nouveau_channel *master;
-
- struct nouveau_gpuobj *ramin;
- u32 dmao;
- u32 hash;
-
- struct nv50_display_crtc crtc[2];
-
- struct tasklet_struct tasklet;
- struct {
- struct dcb_output *dcb;
- u16 script;
- u32 pclk;
- } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32 nv50_display_active_crtcs(struct drm_device *);
-
-int nv50_display_sync(struct drm_device *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
-
-int nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
- u64 size);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **);
-
-int nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *, u32 swap_interval);
struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
- struct nouveau_channel *evo = *pevo;
-
- if (!evo)
- return;
- *pevo = NULL;
-
- nouveau_bo_unmap(evo->push.buffer);
- nouveau_bo_ref(NULL, &evo->push.buffer);
-
- if (evo->object)
- iounmap(evo->object->oclass->ofuncs);
-
- kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
- struct drm_device *dev = evo->fence;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 dmao = disp->dmao;
- u32 hash = disp->hash;
- u32 flags5;
-
- if (nv_device(drm->device)->chipset < 0xc0) {
- /* not supported on 0x50, specified in format mthd */
- if (nv_device(drm->device)->chipset == 0x50)
- memtype = 0;
- flags5 = 0x00010000;
- } else {
- if (memtype & 0x80000000)
- flags5 = 0x00000000; /* large pages */
- else
- flags5 = 0x00020000;
- }
-
- nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
- nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
- nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
- upper_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
- nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
- nv_wo32(disp->ramin, hash + 0x00, handle);
- nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
- evo->handle);
-
- disp->dmao += 0x20;
- disp->hash += 0x08;
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
- struct nouveau_channel **pevo)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret;
-
- evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!evo)
- return -ENOMEM;
- *pevo = evo;
-
- evo->drm = drm;
- evo->handle = chid;
- evo->fence = dev;
- evo->user_get = 4;
- evo->user_put = 0;
-
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
- &evo->push.buffer);
- if (ret == 0)
- ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- ret = nouveau_bo_map(evo->push.buffer);
- if (ret) {
- NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
- evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
- evo->object->parent = nv_object(disp->ramin)->parent;
- evo->object->engine = nv_object(disp->ramin)->engine;
- evo->object->oclass =
- kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
- evo->object->oclass->ofuncs =
- kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
- evo->object->oclass->ofuncs->rd08 =
- ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
- return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle, ret, i;
- u64 pushbuf = evo->push.buffer->bo.offset;
- u32 tmp;
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x009f0000) == 0x00020000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x003f0000) == 0x00030000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
- /* initialise fifo */
- nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
- NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_EVO_DMA_CB_VALID);
- nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
- nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
- nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- return -EBUSY;
- }
-
- /* enable error reporting on the channel */
- nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.max &= ~7;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
- if (ret)
- return ret;
-
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
-
- return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle;
-
- nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sem.bo) {
- nouveau_bo_unmap(disp->crtc[i].sem.bo);
- nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
- }
- nv50_evo_channel_del(&disp->crtc[i].sync);
- }
- nv50_evo_channel_del(&disp->master);
- nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret, i, j;
-
- /* setup object management on it, any other evo channel will
- * use this also as there's no per-channel support on the
- * hardware
- */
- ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
- if (ret) {
- NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
- goto err;
- }
-
- disp->hash = 0x0000;
- disp->dmao = 0x1000;
-
- /* create primary evo channel, the one we use for modesetting
- * purporses
- */
- ret = nv50_evo_channel_new(dev, 0, &disp->master);
- if (ret)
- return ret;
- evo = disp->master;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
- disp->ramin->addr + 0x2000, 0x1000, NULL);
- if (ret)
- goto err;
-
- /* create some default objects for the scanout memtypes we support */
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- /* create "display sync" channels and other structures we need
- * to implement page flipping
- */
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- u64 offset;
-
- ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
- if (ret)
- goto err;
-
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &dispc->sem.bo);
- if (!ret) {
- ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(dispc->sem.bo);
- if (ret)
- nouveau_bo_ref(NULL, &dispc->sem.bo);
- offset = dispc->sem.bo->bo.offset;
- }
-
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
- offset, 4096, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- for (j = 0; j < 4096; j += 4)
- nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
- dispc->sem.offset = 0;
- }
-
- return 0;
-
-err:
- nv50_evo_destroy(dev);
- return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int ret, i;
-
- ret = nv50_evo_channel_init(disp->master);
- if (ret)
- return ret;
-
- for (i = 0; i < 2; i++) {
- ret = nv50_evo_channel_init(disp->crtc[i].sync);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sync)
- nv50_evo_channel_fini(disp->crtc[i].sync);
- }
-
- if (disp->master)
- nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE 0x00000080
-#define NV50_EVO_UNK84 0x00000084
-#define NV50_EVO_UNK84_NOTIFY 0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
-#define NV50_EVO_DMA_NOTIFY 0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
-#define NV50_EVO_UNK8C 0x0000008C
-
-#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL 0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
-
-#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL 0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
-
-#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800 0x00000800
-#define NV50_EVO_CRTC_CLOCK 0x00000804
-#define NV50_EVO_CRTC_INTERLACE 0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
-#define NV50_EVO_CRTC_UNK0820 0x00000820
-#define NV50_EVO_CRTC_UNK0824 0x00000824
-#define NV50_EVO_CRTC_UNK082C 0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
-#define NV50_EVO_CRTC_FB_SIZE 0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
-#define NV50_EVO_CRTC_FB_DMA 0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
-#define NV50_EVO_CRTC_FB_POS 0x000008c0
-#define NV50_EVO_CRTC_REAL_RES 0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
- ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
-#define NV50_EVO_CRTC_UNK900 0x00000900
-#define NV50_EVO_CRTC_UNK904 0x00000904
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
@@ -119,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nouveau_dev(dev);
- u32 crtc_mask = nv50_display_active_crtcs(dev);
+ u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
struct nouveau_mem_exec_func exec = {
.dev = dev,
.precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv50[] = { 16, 8, 0, 24 };
- if (nv_device(drm->device)->chipset == 0xaf)
- return nvaf[lane];
- return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- config = entry + table[4];
- while (config[0] != swing || config[1] != preem) {
- config += table[5];
- if (config >= entry + table[4] + entry[4] * table[5])
- return;
- }
-
- nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
- nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
- u8 *table, *entry, mask;
- int i;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- entry = ROMPTR(dev, entry[10]);
- if (entry) {
- while (link_bw < ROM16(entry[0]) * 10)
- entry += 4;
-
- nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
- }
-
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- if (link_bw > 162000)
- clksor |= 0x00040000;
-
- nv_wr32(device, 0x614300 + (or * 0x800), clksor);
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
- mask = 0;
- for (i = 0; i < link_nr; i++)
- mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
- if (clksor & 0x000c0000)
- *bw = 270000;
- else
- *bw = 162000;
-
- if (dpctrl > 0x00030000) *nr = 4;
- else if (dpctrl > 0x00010000) *nr = 2;
- else *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(drm, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting SOR\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nouveau_hdmi_mode_set(encoder, NULL);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder *enc;
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
- nv_encoder->last_dpms = mode;
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
- if (nvenc == nv_encoder ||
- (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
- nvenc->dcb->type != DCB_OUTPUT_LVDS &&
- nvenc->dcb->type != DCB_OUTPUT_DP) ||
- nvenc->dcb->or != nv_encoder->dcb->or)
- continue;
-
- if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
- return;
- }
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
- if (mode == DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
- else
- val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
- nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
- }
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nv50_sor_dp_link_set,
- .train_set = nv50_sor_dp_train_set,
- .train_adj = nv50_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv50_sor_disconnect(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- /* avoid race between link training and supervisor intr */
- nv50_display_sync(encoder->dev);
- }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- uint32_t mode_ctl = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv_encoder->crtc = encoder->crtc;
-
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctl = 0x0100;
- else
- mode_ctl = 0x0500;
- } else
- mode_ctl = 0x0200;
-
- nouveau_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_DP:
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- mode_ctl |= 0x00020000;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- mode_ctl |= 0x00050000;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctl |= 0x00000800;
- else
- mode_ctl |= 0x00000900;
- break;
- default:
- break;
- }
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(drm, "no space while connecting SOR\n");
- nv_encoder->crtc = NULL;
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
- .dpms = nv50_sor_dpms,
- .save = nv50_sor_save,
- .restore = nv50_sor_restore,
- .mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .get_crtc = nv50_sor_crtc_get,
- .detect = NULL,
- .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
-
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
- .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder = NULL;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder *encoder;
- int type;
-
- NV_DEBUG(drm, "\n");
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_DP:
- type = DRM_MODE_ENCODER_TMDS;
- break;
- case DCB_OUTPUT_LVDS:
- type = DRM_MODE_ENCODER_LVDS;
- break;
- default:
- return -EINVAL;
- }
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
- drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
struct nvc0_fence_chan *fctx = chan->fence;
int i;
- if (nv_device(chan->drm->device)->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
- } else
- if (nv_device(chan->drm->device)->card_type >= NV_50) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (nv_device(chan->drm->device)->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(chan->drm->dev, i);
- else
- bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
{
struct nvc0_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0)
+ if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
-
-struct evo {
- int idx;
- dma_addr_t handle;
- u32 *ptr;
- struct {
- u32 offset;
- u16 value;
- } sem;
-};
-
-struct nvd0_display {
- struct nouveau_gpuobj *mem;
- struct nouveau_bo *sync;
- struct evo evo[9];
-
- struct tasklet_struct tasklet;
- u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- int ret = 0;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
- nv_wr32(device, 0x610704 + (id * 0x10), data);
- nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
- if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
- ret = -EBUSY;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
- return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
- if (put + nr >= (PAGE_SIZE / 4)) {
- disp->evo[id].ptr[put] = 0x20000000;
-
- nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
- if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
- NV_ERROR(drm, "evo %d dma stalled\n", id);
- return NULL;
- }
-
- put = 0;
- }
-
- return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
-
- nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 flags;
-
- flags = 0x00000000;
- if (ch == EVO_MASTER)
- flags |= 0x01000000;
-
- nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
- nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
- nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 *push = evo_wait(dev, ch, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, ch);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
- return 0;
- }
-
- return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
- return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u32 *push;
-
- push = evo_wait(crtc->dev, evo->idx, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
- }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u64 offset;
- u32 *push;
- int ret;
-
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
-
- push = evo_wait(crtc->dev, evo->idx, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
-
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
-
-
- offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += evo->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | evo->sem.value);
- OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
- FIRE_RING (chan);
- } else {
- nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
- 0xf00d0000 | evo->sem.value);
- evo_sync(crtc->dev, EVO_MASTER);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, evo->sem.offset);
- evo_data(push, 0xf00d0000 | evo->sem.value);
- evo_data(push, 0x74b1e000);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
-
- evo->sem.offset ^= 0x10;
- evo->sem.value++;
- return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
- u32 mthd;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- if (nv_device(drm->device)->card_type < NV_E0)
- mthd = 0x0490 + (nv_crtc->index * 0x0300);
- else
- mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, mthd, 1);
- evo_data(push, mode);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, dev, EVO_MASTER);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
- mode = nv_connector->scaling_mode;
-
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
- evo_kick(push, dev, EVO_MASTER);
- if (update) {
- nvd0_display_flip_stop(crtc);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- }
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- u32 *push;
-
- push = evo_wait(fb->dev, EVO_MASTER, 16);
- if (push) {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, fb->dev, EVO_MASTER);
- }
-
- nv_crtc->fb.tile_flags = nvfb->r_dma;
- return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, EVO_MASTER, 16);
- if (push) {
- if (show) {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- }
-
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
-
- evo_kick(push, dev, EVO_MASTER);
- }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- nvd0_display_flip_stop(crtc);
-
- push = evo_wait(crtc->dev, EVO_MASTER, 2);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 32);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
- evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
- int ret;
-
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 64);
- if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, false);
- nvd0_crtc_set_scale(nv_crtc, false);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
-
- if (!crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
- return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- for (i = 0; i < 256; i++) {
- writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
- writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
- writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
- }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
-
- if (visible) {
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
- }
-
- if (visible != nv_crtc->cursor.visible) {
- nvd0_crtc_cursor_show(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
- }
-
- return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ch = EVO_CURS(nv_crtc->index);
-
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
- evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
- return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
- u32 i;
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
- .dpms = nvd0_crtc_dpms,
- .prepare = nvd0_crtc_prepare,
- .commit = nvd0_crtc_commit,
- .mode_fixup = nvd0_crtc_mode_fixup,
- .mode_set = nvd0_crtc_mode_set,
- .mode_set_base = nvd0_crtc_mode_set_base,
- .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
- .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
- .cursor_set = nvd0_crtc_cursor_set,
- .cursor_move = nvd0_crtc_cursor_move,
- .gamma_set = nvd0_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = nvd0_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_crtc *nv_crtc;
- struct drm_crtc *crtc;
- int ret, i;
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nvd0_crtc_set_dither;
- nv_crtc->set_scale = nvd0_crtc_set_scale;
- nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
- nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- crtc = &nv_crtc->base;
- drm_crtc_init(dev, crtc, &nvd0_crtc_func);
- drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
- nvd0_crtc_lut_load(crtc);
-
-out:
- if (ret)
- nvd0_crtc_destroy(crtc);
- return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x80000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
-
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 syncs, magic, *push;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- push = evo_wait(encoder->dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
- evo_data(push, 1 << nv_crtc->index);
- evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = NULL;
- }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- enum drm_connector_status status = connector_status_disconnected;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 load;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
- udelay(9500);
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
- load = nv_rd32(device, 0x61a00c + (or * 0x800));
- if ((load & 0x38000000) == 0x38000000)
- status = connector_status_connected;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
- return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
- .dpms = nvd0_dac_dpms,
- .mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_disconnect,
- .commit = nvd0_dac_commit,
- .mode_set = nvd0_dac_mode_set,
- .disable = nvd0_dac_disconnect,
- .get_crtc = nvd0_display_crtc_get,
- .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
- .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int i, or = nv_encoder->or * 0x30;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid))
- return;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
-
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
- nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
- }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or * 0x30;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
- u32 rekey = 56; /* binary driver, and tegra constant */
- u32 max_ac_packet;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_hdmi_monitor(nv_connector->edid))
- return;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* AVI InfoFrame */
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x61671c + head, 0x000d0282);
- nv_wr32(device, 0x616720 + head, 0x0000006f);
- nv_wr32(device, 0x616724 + head, 0x00000000);
- nv_wr32(device, 0x616728 + head, 0x00000000);
- nv_wr32(device, 0x61672c + head, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x6167ac + head, 0x00000010);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
- max_ac_packet << 16);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
- nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
-
- nvd0_audio_disconnect(encoder);
-
- nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config = NULL;
-
- switch (swing) {
- case 0: preem += 0; break;
- case 1: preem += 4; break;
- case 2: preem += 7; break;
- case 3: preem += 9; break;
- }
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) {
- config = entry + table[4];
- config += table[5] * preem;
- } else
- if (table[0] == 0x40) {
- config = table + table[1];
- config += table[2] * table[3];
- config += table[6] * preem;
- }
- }
-
- if (!config) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
- nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
- nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
- nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
- u32 script = 0x0000, lane_mask = 0;
- u8 *table, *entry;
- int i;
-
- link_bw /= 27000;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
- else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
- else entry = NULL;
-
- while (entry) {
- if (entry[0] >= link_bw)
- break;
- entry += 3;
- }
-
- nouveau_bios_run_init_table(dev, script, dcb, crtc);
- }
-
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- for (i = 0; i < link_nr; i++)
- lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
- nv_wr32(device, 0x612300 + soff, clksor);
- nv_wr32(device, 0x61c10c + loff, dpctrl);
- nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
- u32 *link_nr, u32 *link_bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x612300 + soff);
-
- if (dpctrl > 0x00030000) *link_nr = 4;
- else if (dpctrl > 0x00010000) *link_nr = 2;
- else *link_nr = 1;
-
- *link_bw = (clksor & 0x007c0000) >> 18;
- *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
- u32 crtc, u32 datarate)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 link_nr, link_bw;
- u64 ratio, value;
-
- nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- struct drm_encoder *partner;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
-
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
-
- dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
- dpms_ctrl |= 0x80000000;
-
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nvd0_sor_dp_link_set,
- .train_set = nvd0_sor_dp_train_set,
- .train_adj = nvd0_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
- nvd0_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct nvbios *bios = &drm->vbios;
- u32 mode_ctrl = (1 << nv_crtc->index);
- u32 syncs, magic, *push;
- u32 or_config;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctrl |= 0x00000100;
- else
- mode_ctrl |= 0x00000500;
- } else {
- mode_ctrl |= 0x00000200;
- }
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (mode->clock >= 165000)
- or_config |= 0x0100;
-
- nvd0_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_LVDS:
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- or_config |= 0x0100;
- if (bios->fp.if_is_24bit)
- or_config |= 0x0200;
- } else {
- if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- if (((u8 *)nv_connector->edid)[121] == 2)
- or_config |= 0x0100;
- } else
- if (mode->clock >= bios->fp.duallink_transition_clk) {
- or_config |= 0x0100;
- }
-
- if (or_config & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- or_config |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- or_config |= 0x0200;
- }
-
- if (nv_connector->base.display_info.bpc == 8)
- or_config |= 0x0200;
-
- }
- break;
- case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000002 << 6;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000005 << 6;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctrl |= 0x00000800;
- else
- mode_ctrl |= 0x00000900;
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
- nv_encoder->dp.datarate);
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
- evo_data(push, mode_ctrl);
- evo_data(push, or_config);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
- .dpms = nvd0_sor_dpms,
- .mode_fixup = nvd0_sor_mode_fixup,
- .prepare = nvd0_sor_prepare,
- .commit = nvd0_sor_commit,
- .mode_set = nvd0_sor_mode_set,
- .disable = nvd0_sor_disconnect,
- .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
- .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int type, or, i, link = -1;
-
- if (id < 4) {
- type = DCB_OUTPUT_ANALOG;
- or = id;
- } else {
- switch (mc & 0x00000f00) {
- case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
- case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
- case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
- case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
- return NULL;
- }
-
- or = id - 4;
- }
-
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)) &&
- (link < 0 || link == !(dcb->sorconf.link & 1)))
- return dcb;
- }
-
- NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
- return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb;
- u32 or, tmp, pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
- }
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
- NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
- crtc, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- nv50_crtc_set_clock(dev, crtc, pclk);
- }
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
- or = ffs(dcb->or) - 1;
-
- nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
- nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
- switch (dcb->type) {
- case DCB_OUTPUT_ANALOG:
- nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
- break;
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (cfg & 0x00000100)
- tmp = 0x00000101;
- else
- tmp = 0x00000000;
-
- nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
- break;
- default:
- break;
- }
-
- break;
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int pclk, i;
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 mask = 0, crtc = ~0;
- int i;
-
- if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
- NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
- NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(device, 0x6101d0),
- nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
- for (i = 0; i < 8; i++) {
- NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
- i < 4 ? "DAC" : "SOR", i,
- nv_rd32(device, 0x640180 + (i * 0x20)),
- nv_rd32(device, 0x660180 + (i * 0x20)));
- }
- }
-
- while (!mask && ++crtc < dev->mode_config.num_crtc)
- mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
- if (disp->modeset & 0x00000001)
- nvd0_display_unk1_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000002)
- nvd0_display_unk2_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000004)
- nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 intr = nv_rd32(device, 0x610088);
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(device, 0x61008c);
- nv_wr32(device, 0x61008c, stat);
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(device, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
- NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(device, 0x61009c, (1 << chid));
- nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(device, 0x6100ac);
-
- if (stat & 0x00000007) {
- disp->modeset = stat;
- tasklet_schedule(&disp->tasklet);
-
- nv_wr32(device, 0x6100ac, (stat & 0x00000007));
- stat &= ~0x00000007;
- }
-
- if (stat) {
- NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
- nv_wr32(device, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- intr &= ~0x0f000000; /* vblank, handled in core */
- if (intr)
- NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
- int i;
-
- /* fini cursors + overlays + flips */
- for (i = 1; i >= 0; i--) {
- evo_fini_pio(dev, EVO_CURS(i));
- evo_fini_pio(dev, EVO_OIMM(i));
- evo_fini_dma(dev, EVO_OVLY(i));
- evo_fini_dma(dev, EVO_FLIP(i));
- }
-
- /* fini master */
- evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret, i;
- u32 *push;
-
- if (nv_rd32(device, 0x6100ac) & 0x00000100) {
- nv_wr32(device, 0x6100ac, 0x00000100);
- nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
- NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
- * work at all unless you do the SOR part below.
- */
- for (i = 0; i < 3; i++) {
- u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
- nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
- }
-
- for (i = 0; i < 4; i++) {
- u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
- nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
- u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
- u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
- nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
- nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
- nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
- }
-
- /* point at our hash table / objects, enable interrupts */
- nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
- nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
- /* init master */
- ret = evo_init_dma(dev, EVO_MASTER);
- if (ret)
- goto error;
-
- /* init flips + overlays + cursors */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
- (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
- (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
- (ret = evo_init_pio(dev, EVO_CURS(i))))
- goto error;
- }
-
- push = evo_wait(dev, EVO_MASTER, 32);
- if (!push) {
- ret = -EBUSY;
- goto error;
- }
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000);
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
-
-error:
- if (ret)
- nvd0_display_fini(dev);
- return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct pci_dev *pdev = dev->pdev;
- int i;
-
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
- }
-
- nouveau_gpuobj_ref(NULL, &disp->mem);
- nouveau_bo_unmap(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
-
- nouveau_display(dev)->priv = NULL;
- kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bar *bar = nouveau_bar(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *tmp;
- struct pci_dev *pdev = dev->pdev;
- struct nvd0_display *disp;
- struct dcb_output *dcbe;
- int crtcs, ret, i;
-
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = disp;
- nouveau_display(dev)->dtor = nvd0_display_destroy;
- nouveau_display(dev)->init = nvd0_display_init;
- nouveau_display(dev)->fini = nvd0_display_fini;
-
- /* create crtc objects to represent the hw heads */
- crtcs = nv_rd32(device, 0x022448);
- for (i = 0; i < crtcs; i++) {
- ret = nvd0_crtc_create(dev, i);
- if (ret)
- goto out;
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
-
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nvd0_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nvd0_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
- }
-
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
- continue;
-
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
-
- /* setup interrupt handling */
- tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
- /* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
- }
-
- if (ret)
- goto out;
-
- /* hash table and dma objects for the memory areas we care about */
- ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
- if (ret)
- goto out;
-
- /* create evo dma channels */
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- u64 offset = disp->sync->bo.offset;
- u32 dmao = 0x1000 + (i * 0x100);
- u32 hash = 0x0000 + (i * 0x040);
-
- evo->idx = i;
- evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
- evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
- if (!evo->ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
- nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
- nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
- nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
- nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
- ((dmao + 0x00) << 9));
-
- nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
- nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
- nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
- ((dmao + 0x20) << 9));
-
- nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
- nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
- nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
- nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
- ((dmao + 0x40) << 9));
-
- nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
- nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
- nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
- nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
- ((dmao + 0x60) << 9));
- }
-
- bar->flush(bar);
-
-out:
- if (ret)
- nvd0_display_destroy(dev);
- return ret;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 24d932f53203..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e8..064023bed480 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
-#define DP_LINK_STATUS_SIZE 6
-#define DP_DPCD_SIZE 8
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/***** general DP utility functions *****/
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
- int s = (lane & 1) * 4;
- u8 l = dp_link_status(link_status, i);
- return (l >> s) & 0xf;
-}
-
-static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- int lane;
- u8 lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
-
-static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- u8 lane_align;
- u8 lane_status;
- int lane;
-
- lane_align = dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
- return false;
- }
- return true;
-}
-
-static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane;
for (lane = 0; lane < lane_count; lane++) {
- u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
- u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
return (link_rate * lane_num * 8) / bpp;
}
-static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
-{
- switch (dpcd[DP_MAX_LINK_RATE]) {
- case DP_LINK_BW_1_62:
- default:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- }
-}
-
-static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static u8 dp_get_dp_link_rate_coded(int link_rate)
-{
- switch (link_rate) {
- case 162000:
- default:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- }
-}
-
/***** radeon specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = dp_get_max_link_rate(dpcd);
- int max_lane_num = dp_get_max_lane_number(dpcd);
+ int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
- return dp_get_max_link_rate(dpcd);
+ return drm_dp_max_link_rate(dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- u8 msg[25];
+ u8 msg[DP_DPCD_SIZE];
int ret, i;
- ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE, 0);
if (ret > 0) {
- memcpy(dig_connector->dpcd, msg, 8);
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < 8; i++)
+ for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
- if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
int enc_id;
int dp_clock;
int dp_lane_count;
- int rd_interval;
bool tp3_supported;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
- tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
+ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(100);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(400);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
- dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
- memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
+ memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 010bae19554a..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
+ radeon_dp_set_link_config(connector, adjusted_mode);
}
return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5d1d21a6dcdd..a2d478e8692a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -1821,7 +1831,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
- rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 4;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1854,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1876,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1924,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
- rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,10 +2044,22 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG, gb_addr_config);
- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
- EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2305,22 +2327,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2330,10 +2350,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -2357,15 +2374,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2375,13 +2391,71 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+}
+
+static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ evergreen_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ evergreen_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev);
+ return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* Interrupts */
@@ -2403,8 +2477,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
+ tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ WREG32(CAYMAN_DMA1_CNTL, tmp);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2535,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2563,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2587,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2685,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_CAYMAN)
+ WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3193,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3116,9 +3226,19 @@ restart_ih:
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ if (rdev->family >= CHIP_CAYMAN) {
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3144,6 +3264,143 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3464,12 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3221,12 +3484,23 @@ static int evergreen_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = evergreen_cp_resume(rdev);
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3273,11 +3547,9 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3626,9 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3366,6 +3641,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3393,6 +3669,7 @@ void evergreen_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c042e497e450..ee4cff534f10 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
/* height is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
switch (track->npipes) {
case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 64 * 8);
break;
case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
}
}
/* compute number of htile */
- nbx = nbx / 8;
- nby = nby / 8;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_SURFACE:
/* 8x8 only */
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size, info;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ info = radeon_get_ib_value(p, idx+1);
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if (size % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ /* GDS is ok */
+ if (((info & 0x60000000) >> 29) != 1) {
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ if (((info & 0x60000000) >> 29) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ } else if (((info & 0x60000000) >> 29) != 2) {
+ DRM_ERROR("bad CP DMA SRC_SEL\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ /* GDS is ok */
+ if (((info & 0x00300000) >> 20) != 1) {
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ if (((info & 0x00300000) >> 20) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad CP DMA DST_SEL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2540,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2715,6 +2858,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/*
+ * DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset, dst2_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 7;
+ } else {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
+
/* vm parser */
static bool evergreen_vm_reg_valid(u32 reg)
{
@@ -2724,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
+ case WAIT_UNTIL:
case GRBM_GFX_INDEX:
case CP_STRMOUT_CNTL:
case CP_COHER_CNTL:
@@ -2843,6 +3436,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2917,6 +3511,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if ((command & 0x1fffff) % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
return -EINVAL;
}
@@ -2958,3 +3610,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
return ret;
}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib: radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ u32 idx = 0;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+
+ do {
+ header = ib->ptr[idx];
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ if (tiled)
+ idx += count + 7;
+ else
+ idx += count + 3;
+ break;
+ case DMA_PACKET_COPY:
+ if (tiled) {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (idx < ib->length_dw);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2bc0f6a1b428..0bfd0e9e469b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
/* Registers */
@@ -355,6 +357,54 @@
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x5e78
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -651,6 +701,7 @@
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT1_CNTL2 0x1434
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -672,6 +723,8 @@
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define WAIT_UNTIL 0x8040
@@ -689,8 +742,9 @@
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
/* display watermarks */
@@ -854,6 +908,37 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* ASYNC DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xD0B8
+
+#define CAYMAN_DMA1_CNTL 0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -951,6 +1036,53 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
@@ -1896,4 +2028,15 @@
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cda01f808f12..835992d8d067 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
@@ -1118,22 +1131,199 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1143,19 +1333,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14F8));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14D8));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14FC));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14DC));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -1180,16 +1358,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1199,13 +1375,113 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ cayman_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ cayman_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev);
+ return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ else
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int cayman_startup(struct radeon_device *rdev)
@@ -1289,6 +1565,18 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1303,6 +1591,23 @@ static int cayman_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = cayman_cp_load_microcode(rdev);
if (r)
return r;
@@ -1310,6 +1615,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1663,7 @@ int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
cayman_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1730,14 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1433,6 +1750,7 @@ int cayman_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1463,6 +1781,7 @@ void cayman_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1538,30 +1857,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
}
-
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
}
}
}
@@ -1596,3 +1942,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..48e5022ee921 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,61 @@
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
-#endif
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_MODE 0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
}
}
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157da..becb03e8b32f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
-static int r600_gpu_soft_reset(struct radeon_device *rdev)
+static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
u32 tmp;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- rv515_mc_stop(rdev, &save);
- if (r600_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- /* Wait a little for things to settle down */
- mdelay(1);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1348,6 +1343,66 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct rv515_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ r600_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ r600_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ mdelay(1);
+
rv515_mc_resume(rdev, &save);
return 0;
}
@@ -1370,9 +1425,34 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ dma_status_reg = RREG32(DMA_STATUS_REG);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
- return r600_gpu_soft_reset(rdev);
+ return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@@ -1382,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -1424,13 +1507,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
-
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1671,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1949,7 @@ void r600_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2275,132 @@ void r600_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
/*
* GPU scratch registers helpers function.
@@ -2252,6 +2457,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -2315,6 +2578,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2334,6 +2650,80 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2739,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
static int r600_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -2394,6 +2784,12 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2403,12 +2799,20 @@ static int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
+
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2416,6 +2820,10 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2879,7 @@ int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2544,6 +2952,9 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2556,6 +2967,7 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2572,6 +2984,7 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2674,6 +3087,104 @@ free_scratch:
return r;
}
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
/*
* Interrupts
*
@@ -2865,6 +3376,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3519,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3554,19 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3611,7 @@ int r600_irq_set(struct radeon_device *rdev)
}
WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3991,10 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d00..be85f75aedda 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..9b2512bf1a46 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
- /* htile widht & nby (8 or 4) make 2 bits number */
- tmp = track->htile_surface & 3;
+ /* always assume 8x8 htile */
/* align is htile align * 8, htile align vary according to
* number of pipe and tile width and nby
*/
switch (track->npipes) {
case 8:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 64 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
break;
case 4:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 2:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 1:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 8 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
break;
default:
dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
}
/* compute number of htile */
- nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
- nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_HTILE_SURFACE:
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2276,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2429,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
@@ -2496,3 +2545,209 @@ void r600_cs_legacy_init(void)
{
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}
+
+/*
+ * DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p: parser structure holding parsing context.
+ * @cs_reloc: reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ unsigned idx;
+
+ *cs_reloc = NULL;
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ idx = p->dma_reloc_idx;
+ if (idx >= p->nrelocs) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs_ptr[idx];
+ p->dma_reloc_idx++;
+ return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ u32 header, cmd, count, tiled;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 5;
+ } else {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+5);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+5);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 7;
+ } else {
+ if (p->family >= CHIP_RV770) {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ } else {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ p->idx += 4;
+ }
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("Constant Fill is 7xx only !\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
#define R600_CONFIG_F0_BASE 0x542C
#define R600_CONFIG_APER_SIZE 0x5430
+#define R600_BIF_FB_EN 0x5490
+#define R600_FB_READ_EN (1 << 0)
+#define R600_FB_WRITE_EN (1 << 1)
+
+#define R600_CITF_CNTL 0x200c
+#define R600_BLACKOUT_MASK 0x00000003
+
+#define R700_MC_CITF_CNTL 0x25c0
+
#define R600_ROM_CNTL 0x1600
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
+/* async DMA */
+#define DMA_TILING_CONFIG 0x3ec4
+#define DMA_CONFIG 0x3e4c
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_MODE 0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
+#define DMA_PACKET_NOP 0xf
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
-# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..a08f657329a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 5
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,11 +122,21 @@ extern int radeon_lockup_timeout;
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX 3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* reset flags */
+#define RADEON_RESET_GFX (1 << 0)
+#define RADEON_RESET_COMPUTE (1 << 1)
+#define RADEON_RESET_DMA (1 << 2)
+
/*
* Errata workarounds.
*/
@@ -220,12 +230,13 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
@@ -642,6 +653,8 @@ struct radeon_ring {
u32 ptr_reg_mask;
u32 nop;
u32 idx;
+ u64 last_semaphore_signal_addr;
+ u64 last_semaphore_wait_addr;
};
/*
@@ -787,6 +800,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
/*
* CS.
*/
@@ -824,6 +846,7 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ unsigned dma_reloc_idx;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
@@ -883,7 +906,9 @@ struct radeon_wb {
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1539,6 +1564,8 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
+ /* protects concurrent MM_INDEX/DATA based register access */
+ spinlock_t mmio_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -1614,8 +1641,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -1631,9 +1660,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1689,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..0b202c07fe50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &rv770_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &rv770_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
}
},
.irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = NULL,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &si_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &si_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
struct rv515_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
+ bool crtc_enabled[2];
};
int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -388,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* evergreen
@@ -416,6 +435,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +448,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* cayman
@@ -449,6 +477,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +509,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..3e403bdda58f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (of_machine_is_compatible("PowerMac3,5")) {
+ /* PowerMac G4 Silver radeon 7500 */
+ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_MAC_G4_SILVER:
+ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -2419,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
@@ -3246,11 +3305,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
while (ram--) {
addr = ram * 1024 * 1024;
/* write to each page */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- WREG32(RADEON_MM_DATA, 0xdeadbeef);
+ WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
/* read back and verify */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+ if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c2..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
return connector->status;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
+ /* bridge chips are always aux */
+ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
}
@@ -1599,7 +1601,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
break;
@@ -1608,13 +1610,13 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1627,14 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1653,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1669,7 +1671,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1692,23 +1694,23 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1732,17 +1734,17 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1771,17 +1773,17 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1806,7 +1808,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1821,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -1843,7 +1845,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1924,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1940,7 +1942,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1959,7 +1961,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1983,10 +1985,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
*/
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -2002,7 +2004,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
}
}
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
- u32 ret;
-
- if (addr < 0x10000)
- ret = DRM_READ32(dev_priv->mmio, addr);
- else {
- DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
- ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
- }
-
- return ret;
-}
-
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..5407459e56d2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return 0;
}
chunk = &p->chunks[p->chunk_relocs_idx];
+ p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
} else
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
+ case RADEON_CS_RING_DMA:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->priority > 0)
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+ } else if (p->rdev->family >= CHIP_R600) {
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ } else {
+ return -EINVAL;
+ }
+ break;
}
return 0;
}
@@ -266,13 +279,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- if ((p->rdev->flags & RADEON_IS_AGP)) {
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
return -ENOMEM;
}
}
@@ -570,7 +585,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
- bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..0d67674b64b1 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ u32 reg;
switch (radeon_crtc->crtc_id) {
case 0:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ reg = RADEON_CRTC_GEN_CNTL;
break;
case 1:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ reg = RADEON_CRTC2_GEN_CNTL;
break;
default:
return;
}
- WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+ WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
}
}
@@ -240,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
y = 0;
}
- if (ASIC_IS_AVIVO(rdev)) {
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..0d6562bb0c93 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ if (efi_enabled(EFI_BOOT) &&
+ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
return false;
/* first check CRTCs */
@@ -897,6 +898,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -910,10 +930,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
+ unsigned d3_delay = dev->pdev->d3_delay;
+
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ dev->pdev->d3_delay = 20;
+
radeon_resume_kms(dev);
+
+ dev->pdev->d3_delay = d3_delay;
+
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
@@ -1059,6 +1088,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
+ spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
@@ -1163,6 +1193,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
+ bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1205,8 +1236,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_empty_locked(rdev, i);
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* delay GPU reset to resume */
+ force_completion = true;
+ }
+ }
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1337,7 +1376,6 @@ retry:
}
radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1357,11 +1395,14 @@ retry:
}
}
} else {
+ radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
+ drm_helper_resume_force_mode(rdev->ddev);
+
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..05c96fa0b051 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->old_rbo = rbo;
obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -695,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE)) {
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -1106,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL)
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
+ }
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
drm_gem_object_unreference_unlocked(obj);
- return NULL;
+ return ERR_PTR(ret);
}
return &radeon_fb->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a4..d9bf96ee299a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,14 @@
* 2.22.0 - r600 only: RESOLVE_BOX allowed
* 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
* 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ * 2.25.0 - eg+: new info request for num SE and num SH
+ * 2.26.0 - r600-eg: fix htile size computation
+ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ * 2.28.0 - r600-eg: Add MEM_WRITE packet support
+ * 2.29.0 - R500 FP16 color clear registers
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 24
+#define KMS_DRIVER_MINOR 29
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +286,15 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
-static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -295,13 +303,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
kfree(ap);
+
+ return 0;
}
-static int __devinit
-radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int radeon_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
+ int ret;
+
/* Get rid of things like offb */
- radeon_kick_out_firmware_fb(pdev);
+ ret = radeon_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &kms_driver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
- while(1) {
- int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
if (r == -EDEADLK) {
- mutex_unlock(&rdev->ring_lock);
- r = radeon_gpu_reset(rdev);
- mutex_lock(&rdev->ring_lock);
- if (!r)
- continue;
- }
- if (r) {
- dev_err(rdev->dev, "error waiting for ring to become"
- " idle (%d)\n", r);
+ return -EDEADLK;
}
- return;
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ ring, r);
}
+ return 0;
}
/**
@@ -772,7 +766,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
int r;
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- if (rdev->wb.use_event) {
+ if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- int ring;
+ int ring, r;
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_empty_locked(rdev, ring);
+ r = radeon_fence_wait_empty_locked(rdev, ring);
+ if (r) {
+ /* no need to trigger GPU reset as we are unloading */
+ radeon_fence_driver_force_completion(rdev);
+ }
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ }
+}
+
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa6..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
{
struct radeon_bo_va *bo_va;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ if (use_aux) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+ } else {
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ }
+
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_MAX_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.num_ses;
+ else
+ value = 1;
+ break;
+ case RADEON_INFO_MAX_SH_PER_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_sh_per_se;
+ else
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf08..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
- CT_SAM440EP
+ CT_SAM440EP,
+ CT_MAC_G4_SILVER
};
enum radeon_dvo_chip {
@@ -427,7 +428,7 @@ struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef86..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -88,10 +88,20 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (domain & RADEON_GEM_DOMAIN_GTT)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- if (domain & RADEON_GEM_DOMAIN_CPU)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ }
+ }
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
@@ -140,7 +150,7 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL,
+ &bo->placement, page_align, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
@@ -240,7 +250,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -269,7 +279,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -355,7 +365,7 @@ int radeon_bo_list_validate(struct list_head *head)
retry:
radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false, false);
+ true, false);
if (unlikely(r)) {
if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
domain |= RADEON_GEM_DOMAIN_GTT;
@@ -384,7 +394,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (!bo->tiling_flags)
return 0;
@@ -510,7 +520,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
if (pitch)
@@ -520,7 +530,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -575,7 +585,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2e..5fc86b03043b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
{
- return !!atomic_read(&bo->tbo.reserved);
+ return ttm_bo_is_reserved(&bo->tbo);
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4fb..0bfa656aa87d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
- int i;
+ int i, r;
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
- if (ring->ready)
- radeon_fence_wait_empty_locked(rdev, i);
+ if (!ring->ready) {
+ continue;
+ }
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* needs a GPU reset dont reset here */
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return;
+ }
}
radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..26c23bb651c6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
bo = dma_buf->priv;
if (bo->gem_base.dev == dev) {
drm_gem_object_reference(&bo->gem_base);
+ dma_buf_put(dma_buf);
return &bo->gem_base;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ebd69562ef6c..cd72062d5a91 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;
+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -770,22 +773,30 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
+ u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
- seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
- seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- i = ring->rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ /* print 8 dw before current rptr as often it's the last executed
+ * packet that is the root issue
+ */
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
@@ -794,11 +805,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
#include "radeon_reg.h"
#include "radeon.h"
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA 0
+
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
- int r;
+ int r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+ ring = radeon_copy_dma_ring_index(rdev);
+ break;
+ case RADEON_TEST_COPY_BLIT:
+ ring = radeon_copy_blit_ring_index(rdev);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ return;
+ }
size = 1024 * 1024;
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
}
}
+void radeon_test_moves(struct radeon_device *rdev)
+{
+ if (rdev->asic->copy.dma)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+ if (rdev->asic->copy.blit)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db2..93f760e27a92 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -216,7 +217,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
@@ -265,15 +266,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -294,7 +295,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -308,11 +309,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -320,7 +321,7 @@ out_cleanup:
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -340,15 +341,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -359,7 +361,7 @@ out_cleanup:
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -388,18 +390,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
return r;
}
@@ -471,13 +473,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
-static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int radeon_sync_obj_flush(void *sync_obj)
{
return 0;
}
@@ -492,7 +493,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
return radeon_fence_ref((struct radeon_fence *)sync_obj);
}
-static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool radeon_sync_obj_signaled(void *sync_obj)
{
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b111c15..a072fa8c46b0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..435ed3551364 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
void rv515_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,116 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- /* Stop all video */
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ /* disable VGA render */
WREG32(R_000300_VGA_RENDER_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, 0);
- WREG32(R_006880_D2CRTC_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
- WREG32(R_000330_D1VGA_CONTROL, 0);
- WREG32(R_000338_D2VGA_CONTROL, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_RV770)
+ blackout = RREG32(R700_MC_CITF_CNTL);
+ else
+ blackout = RREG32(R600_CITF_CNTL);
+ if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+ /* Block CPU access */
+ WREG32(R600_BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout |= R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, blackout);
+ else
+ WREG32(R600_CITF_CNTL, blackout);
+ }
+ }
+ /* wait for the MC to settle */
+ udelay(100);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
- /* Unlock host access */
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->family >= CHIP_RV770) {
+ if (i == 1) {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ } else {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ }
+ }
+ WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+ WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ if (rdev->family >= CHIP_R600) {
+ /* unblackout the MC */
+ if (rdev->family >= CHIP_RV770)
+ tmp = RREG32(R700_MC_CITF_CNTL);
+ else
+ tmp = RREG32(R600_CITF_CNTL);
+ tmp &= ~R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, tmp);
+ else
+ WREG32(R600_CITF_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+ }
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -884,9 +887,83 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -932,6 +1009,12 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -941,11 +1024,20 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = rv770_cp_load_microcode(rdev);
if (r)
return r;
@@ -953,6 +1045,10 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1091,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1162,9 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1078,6 +1177,7 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1093,6 +1193,7 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
+#define DMA_TILING_CONFIG 0x3ec8
+#define DMA_TILING_CONFIG2 0xd0b8
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
#define WAIT_UNTIL 0x8040
+/* async DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+
#define SRBM_STATUS 0x0E50
/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 010156dd949f..ae8b48205a6c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
udelay(50);
}
@@ -2121,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-static int si_gpu_soft_reset(struct radeon_device *rdev)
+static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2140,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
@@ -2168,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2180,13 +2179,81 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+}
+
+static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ evergreen_mc_stop(rdev, &save);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ si_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ si_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev);
+ return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* MC */
@@ -2426,9 +2493,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-15 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2612,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2633,6 +2712,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -2809,30 +2934,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- uint64_t value;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+ radeon_ring_write(ring, pe); /* dst addr */
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ radeon_ring_write(ring, r600_flags); /* mask */
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, value); /* value */
+ radeon_ring_write(ring, upper_32_bits(value));
+ radeon_ring_write(ring, incr); /* increment size */
+ radeon_ring_write(ring, 0);
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
}
}
@@ -2880,6 +3061,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
/*
* RLC
*/
@@ -3048,6 +3255,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
+ tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3378,7 @@ int si_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3399,9 @@ int si_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3415,15 @@ int si_irq_set(struct radeon_device *rdev)
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3483,9 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3911,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
@@ -3707,9 +3944,17 @@ restart_ih:
break;
}
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3733,6 +3978,80 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* startup/shutdown callbacks
*/
@@ -3804,6 +4123,18 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -3834,6 +4165,22 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
r = si_cp_load_microcode(rdev);
if (r)
return r;
@@ -3841,6 +4188,10 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4233,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
si_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ cayman_dma_stop(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -3962,6 +4311,14 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3974,6 +4331,7 @@ int si_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
@@ -4002,6 +4360,7 @@ void si_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
#endif
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index a8871afc5b4e..c056aae814f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -62,6 +62,22 @@
#define SRBM_STATUS 0xE50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
@@ -91,7 +107,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +131,9 @@
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
@@ -835,6 +865,54 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -922,4 +1000,63 @@
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
#define PACKET3_SWITCH_BUFFER 0x8B
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_TILING_CONFIG 0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
connector->encoder = encoder;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 1c350fc4e449..d1d5306ebf24 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -33,7 +33,7 @@
* Hardware initialization
*/
-static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
{
static const u32 ldmt1r[] = {
[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
@@ -67,7 +67,7 @@ static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
return 0;
}
-static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
enum shmob_drm_clk_source clksrc)
{
struct clk *clk;
@@ -330,12 +330,12 @@ static const struct dev_pm_ops shmob_drm_pm_ops = {
* Platform driver
*/
-static int __devinit shmob_drm_probe(struct platform_device *pdev)
+static int shmob_drm_probe(struct platform_device *pdev)
{
return drm_platform_init(&shmob_drm_driver, pdev);
}
-static int __devexit shmob_drm_remove(struct platform_device *pdev)
+static int shmob_drm_remove(struct platform_device *pdev)
{
drm_platform_exit(&shmob_drm_driver, pdev);
@@ -344,7 +344,7 @@ static int __devexit shmob_drm_remove(struct platform_device *pdev)
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
- .remove = __devexit_p(shmob_drm_remove),
+ .remove = shmob_drm_remove,
.driver = {
.owner = THIS_MODULE,
.name = "shmob-drm",
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000000..be1daf7344d3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
+config DRM_TEGRA
+ tristate "NVIDIA Tegra DRM"
+ depends on DRM && OF && ARCH_TEGRA
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..80f73d1315d0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := drm.o fb.o dc.o host1x.o
+tegra-drm-y += output.o rgb.o hdmi.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000000..b6679b36700f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_dc_window {
+ fixed20_12 x;
+ fixed20_12 y;
+ fixed20_12 w;
+ fixed20_12 h;
+ unsigned int outx;
+ unsigned int outy;
+ unsigned int outw;
+ unsigned int outh;
+ unsigned int stride;
+ unsigned int fmt;
+};
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+ unsigned int bpp)
+{
+ fixed20_12 outf = dfixed_init(out);
+ u32 dda_inc;
+ int max;
+
+ if (v)
+ max = 15;
+ else {
+ switch (bpp) {
+ case 2:
+ max = 8;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ /* fallthrough */
+ case 4:
+ max = 4;
+ break;
+ }
+ }
+
+ outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+ inf.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(inf, outf);
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+ return dfixed_frac(in);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+ struct drm_display_mode *mode)
+{
+ /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+ unsigned int h_ref_to_sync = 0;
+ unsigned int v_ref_to_sync = 0;
+ unsigned long value;
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = (v_ref_to_sync << 16) | h_ref_to_sync;
+ tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+ value = ((mode->vsync_end - mode->vsync_start) << 16) |
+ ((mode->hsync_end - mode->hsync_start) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+ value = ((mode->vtotal - mode->vsync_end) << 16) |
+ ((mode->htotal - mode->hsync_end) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+ value = ((mode->vsync_start - mode->vdisplay) << 16) |
+ ((mode->hsync_start - mode->hdisplay) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+ value = (mode->vdisplay << 16) | mode->hdisplay;
+ tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+ return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ unsigned long *div)
+{
+ unsigned long pclk = mode->clock * 1000, rate;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_output *output = NULL;
+ struct drm_encoder *encoder;
+ long err;
+
+ list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc) {
+ output = encoder_to_output(encoder);
+ break;
+ }
+
+ if (!output)
+ return -ENODEV;
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+ return err;
+ }
+
+ rate = clk_get_rate(dc->clk);
+ *div = (rate * 2 / pclk) - 2;
+
+ DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+ return 0;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int h_dda, v_dda, bpp;
+ struct tegra_dc_window win;
+ unsigned long div, value;
+ int err;
+
+ err = tegra_crtc_setup_clk(crtc, mode, &div);
+ if (err) {
+ dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+ return err;
+ }
+
+ /* program display mode */
+ tegra_dc_set_timings(dc, mode);
+
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+ value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+ /* setup window parameters */
+ memset(&win, 0, sizeof(win));
+ win.x.full = dfixed_const(0);
+ win.y.full = dfixed_const(0);
+ win.w.full = dfixed_const(mode->hdisplay);
+ win.h.full = dfixed_const(mode->vdisplay);
+ win.outx = 0;
+ win.outy = 0;
+ win.outw = mode->hdisplay;
+ win.outh = mode->vdisplay;
+
+ switch (crtc->fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
+
+ case DRM_FORMAT_RGB565:
+ win.fmt = WIN_COLOR_DEPTH_B5G6R5;
+ break;
+
+ default:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ WARN_ON(1);
+ break;
+ }
+
+ bpp = crtc->fb->bits_per_pixel / 8;
+ win.stride = crtc->fb->pitches[0];
+
+ /* program window registers */
+ value = WINDOW_A_SELECT;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(win.outy) | H_POSITION(win.outx);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(win.outh) | H_SIZE(win.outw);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
+ H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
+ v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(win.x);
+ v_dda = compute_initial_dda(win.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
+ tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
+ DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (bpp < 24)
+ value |= COLOR_EXPAND;
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+
+ return 0;
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int syncpt;
+ unsigned long value;
+
+ /* hardware initialization */
+ tegra_periph_reset_deassert(dc->clk);
+ usleep_range(10000, 20000);
+
+ if (dc->pipe)
+ syncpt = SYNCPT_VBLANK1;
+ else
+ syncpt = SYNCPT_VBLANK0;
+
+ /* initialize display controller */
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned long update_mask;
+ unsigned long value;
+
+ update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+ .dpms = tegra_crtc_dpms,
+ .mode_fixup = tegra_crtc_mode_fixup,
+ .mode_set = tegra_crtc_mode_set,
+ .prepare = tegra_crtc_prepare,
+ .commit = tegra_crtc_commit,
+ .load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_drm_irq(int irq, void *data)
+{
+ struct tegra_dc *dc = data;
+ unsigned long status;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+ */
+ }
+
+ if (status & VBLANK_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+ */
+ drm_handle_vblank(dc->base.dev, dc->pipe);
+ }
+
+ if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+ */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
+ tegra_dc_readl(dc, name))
+
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+ DUMP_REG(DC_COM_CRC_CONTROL);
+ DUMP_REG(DC_COM_CRC_CHECKSUM);
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+ DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+ DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+ DUMP_REG(DC_COM_SPI_CONTROL);
+ DUMP_REG(DC_COM_SPI_START_BYTE);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+ DUMP_REG(DC_COM_HSPI_CS_DC);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+ DUMP_REG(DC_COM_GPIO_CTRL);
+ DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+ DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+ DUMP_REG(DC_DISP_SD_CONTROL);
+ DUMP_REG(DC_DISP_SD_CSC_COEFF);
+ DUMP_REG(DC_DISP_SD_LUT(0));
+ DUMP_REG(DC_DISP_SD_LUT(1));
+ DUMP_REG(DC_DISP_SD_LUT(2));
+ DUMP_REG(DC_DISP_SD_LUT(3));
+ DUMP_REG(DC_DISP_SD_LUT(4));
+ DUMP_REG(DC_DISP_SD_LUT(5));
+ DUMP_REG(DC_DISP_SD_LUT(6));
+ DUMP_REG(DC_DISP_SD_LUT(7));
+ DUMP_REG(DC_DISP_SD_LUT(8));
+ DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+ DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+ DUMP_REG(DC_DISP_SD_BL_TF(0));
+ DUMP_REG(DC_DISP_SD_BL_TF(1));
+ DUMP_REG(DC_DISP_SD_BL_TF(2));
+ DUMP_REG(DC_DISP_SD_BL_TF(3));
+ DUMP_REG(DC_DISP_SD_BL_CONTROL);
+ DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+ DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INC);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+ DUMP_REG(DC_WIN_DV_CONTROL);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+ unsigned int i;
+ char *name;
+ int err;
+
+ name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+ dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ if (!dc->debugfs)
+ return -ENOMEM;
+
+ dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dc->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ dc->debugfs_files[i].data = dc;
+
+ err = drm_debugfs_create_files(dc->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ dc->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ dc->minor = minor;
+
+ return 0;
+
+free:
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+remove:
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+ drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+ dc->minor);
+ dc->minor = NULL;
+
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ dc->pipe = drm->mode_config.num_crtc;
+
+ drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&dc->base, 256);
+ drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+ err = tegra_dc_rgb_init(drm, dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_init(dc, drm->primary);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ dev_name(dc->dev), dc);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ devm_free_irq(dc->dev, dc->irq, dc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_exit(dc);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
+ err = tegra_dc_rgb_exit(dc);
+ if (err) {
+ dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+ .drm_init = tegra_dc_drm_init,
+ .drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct resource *regs;
+ struct tegra_dc *dc;
+ int err;
+
+ dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dc->list);
+ dc->dev = &pdev->dev;
+
+ dc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dc->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dc->clk);
+ }
+
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dc->regs))
+ return PTR_ERR(dc->regs);
+
+ dc->irq = platform_get_irq(pdev, 0);
+ if (dc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
+ err = tegra_dc_rgb_probe(dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ return err;
+ }
+
+ err = host1x_register_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, dc);
+
+ return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_dc *dc = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(dc->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra20-dc", },
+ { },
+};
+
+struct platform_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegra-dc",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_dc_of_match,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000000..99977b5d5c36
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT (1 << 2)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+
+#define DC_CMD_STATE_ACCESS 0x040
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+#define NC_HOST_TRIG (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PIN_PM0_CONTROL 0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PIN_PM1_CONTROL 0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
+
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
+#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) << 0)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (4 << 0)
+#define DISP_DATA_FORMAT_DF3S (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB (0 << 8)
+#define DISP_ALIGNMENT_LSB (1 << 8)
+#define DISP_ORDER_RED_BLUE (0 << 9)
+#define DISP_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK (0 << 0)
+#define DE_SELECT_ACTIVE (1 << 0)
+#define DE_SELECT_ACTIVE_IS (2 << 0)
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+#define DC_DISP_SD_CONTROL 0x4c2
+#define DC_DISP_SD_CSC_COEFF 0x4c3
+#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
+#define DC_DISP_DC_PIXEL_COUNT 0x4ce
+#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS 0x4d7
+#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL 0x4dc
+#define DC_DISP_SD_HW_K_VALUES 0x4dd
+#define DC_DISP_SD_MAN_K_VALUES 0x4de
+
+#define DC_WIN_WIN_OPTIONS 0x700
+#define COLOR_EXPAND (1 << 6)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP (0 << 0)
+#define BYTE_SWAP_SWAP2 (1 << 0)
+#define BYTE_SWAP_SWAP4 (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST (0 << 0)
+#define BUFFER_CONTROL_VI (1 << 0)
+#define BUFFER_CONTROL_EPP (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH 0x703
+#define WIN_COLOR_DEPTH_P1 0
+#define WIN_COLOR_DEPTH_P2 1
+#define WIN_COLOR_DEPTH_P4 2
+#define WIN_COLOR_DEPTH_P8 3
+#define WIN_COLOR_DEPTH_B4G4R4A4 4
+#define WIN_COLOR_DEPTH_B5G5R5A 5
+#define WIN_COLOR_DEPTH_B5G6R5 6
+#define WIN_COLOR_DEPTH_AB5G5R5 7
+#define WIN_COLOR_DEPTH_B8G8R8A8 12
+#define WIN_COLOR_DEPTH_R8G8B8A8 13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422 16
+#define WIN_COLOR_DEPTH_YUV422 17
+#define WIN_COLOR_DEPTH_YCbCr420P 18
+#define WIN_COLOR_DEPTH_YUV420P 19
+#define WIN_COLOR_DEPTH_YCbCr422P 20
+#define WIN_COLOR_DEPTH_YUV422P 21
+#define WIN_COLOR_DEPTH_YCbCr422R 22
+#define WIN_COLOR_DEPTH_YUV422R 23
+#define WIN_COLOR_DEPTH_YCbCr422RA 24
+#define WIN_COLOR_DEPTH_YUV422RA 25
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0x1fff) << 0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0x1fff) << 0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INC 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_DV_CONTROL 0x70e
+
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND32WIN_XY 0x713
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..d980dc75788c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include "drm.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct device *dev = drm->dev;
+ struct host1x *host1x;
+ int err;
+
+ host1x = dev_get_drvdata(dev);
+ drm->dev_private = host1x;
+ host1x->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_drm_init(host1x, drm);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ return 0;
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(host1x->fbdev);
+}
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .lastclose = tegra_drm_lastclose,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000000..741b5dc2742c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DRM_H
+#define TEGRA_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fixed.h>
+
+struct tegra_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj;
+};
+
+static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct tegra_framebuffer, base);
+}
+
+struct host1x {
+ struct drm_device *drm;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int syncpt;
+ int irq;
+
+ struct mutex drm_clients_lock;
+ struct list_head drm_clients;
+ struct list_head drm_active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+
+ struct drm_fbdev_cma *fbdev;
+ struct tegra_framebuffer fb;
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+ int (*drm_exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct host1x *host1x;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x *host1x);
+
+extern int host1x_register_client(struct host1x *host1x,
+ struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+ struct host1x_client client;
+
+ struct host1x *host1x;
+ struct device *dev;
+
+ struct drm_crtc base;
+ int pipe;
+
+ struct clk *clk;
+
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+ unsigned long reg)
+{
+ writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_output_ops {
+ int (*enable)(struct tegra_output *output);
+ int (*disable)(struct tegra_output *output);
+ int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+ unsigned long pclk);
+ int (*check_mode)(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+ TEGRA_OUTPUT_RGB,
+ TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+ struct device_node *of_node;
+ struct device *dev;
+
+ const struct tegra_output_ops *ops;
+ enum tegra_output_type type;
+
+ struct i2c_adapter *ddc;
+ const struct edid *edid;
+ unsigned int hpd_irq;
+ int hpd_gpio;
+
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+ return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+ return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->enable)
+ return output->ops->enable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->disable)
+ return output->ops->disable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ if (output && output->ops && output->ops->setup_clock)
+ return output->ops->setup_clock(output, clk, pclk);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ if (output && output->ops && output->ops->check_mode)
+ return output->ops->check_mode(output, mode, status);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from fb.c */
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_host1x_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000000..97993c6835fd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(host1x->fbdev);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = tegra_drm_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+ struct drm_fbdev_cma *fbdev;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+ fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ drm_fbdev_cma_restore_mode(fbdev);
+#endif
+
+ host1x->fbdev = fbdev;
+
+ return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_fini(host1x->fbdev);
+}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000000..d4f3fb9f0c29
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1320 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/tegra.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_hdmi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ struct regulator *vdd;
+ struct regulator *pll;
+
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct clk *clk_parent;
+ struct clk *clk;
+
+ unsigned int audio_source;
+ unsigned int audio_freq;
+ bool stereo;
+ bool dvi;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+ AUTO = 0,
+ SPDIF,
+ HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+ unsigned int pclk;
+ unsigned int n;
+ unsigned int cts;
+ unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ { 25200000, 4096, 25200, 24000 },
+ { 27000000, 4096, 27000, 24000 },
+ { 74250000, 4096, 74250, 24000 },
+ { 148500000, 4096, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ { 25200000, 5880, 26250, 25000 },
+ { 27000000, 5880, 28125, 25000 },
+ { 74250000, 4704, 61875, 20000 },
+ { 148500000, 4704, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ { 25200000, 6144, 25200, 24000 },
+ { 27000000, 6144, 27000, 24000 },
+ { 74250000, 6144, 74250, 24000 },
+ { 148500000, 6144, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+ { 25200000, 11760, 26250, 25000 },
+ { 27000000, 11760, 28125, 25000 },
+ { 74250000, 9408, 61875, 20000 },
+ { 148500000, 9408, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+ { 25200000, 12288, 25200, 24000 },
+ { 27000000, 12288, 27000, 24000 },
+ { 74250000, 12288, 74250, 24000 },
+ { 148500000, 12288, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+ { 25200000, 23520, 26250, 25000 },
+ { 27000000, 23520, 28125, 25000 },
+ { 74250000, 18816, 61875, 20000 },
+ { 148500000, 18816, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+ { 25200000, 24576, 25200, 24000 },
+ { 27000000, 24576, 27000, 24000 },
+ { 74250000, 24576, 74250, 24000 },
+ { 148500000, 24576, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+ { /* slow pixel clock modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* high pixel clock modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 1080p modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case 32000:
+ table = tegra_hdmi_audio_32k;
+ break;
+
+ case 44100:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+
+ case 48000:
+ table = tegra_hdmi_audio_48k;
+ break;
+
+ case 88200:
+ table = tegra_hdmi_audio_88_2k;
+ break;
+
+ case 96000:
+ table = tegra_hdmi_audio_96k;
+ break;
+
+ case 176400:
+ table = tegra_hdmi_audio_176_4k;
+ break;
+
+ case 192000:
+ table = tegra_hdmi_audio_192k;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ while (table->pclk) {
+ if (table->pclk == pclk)
+ return table;
+
+ table++;
+ }
+
+ return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+ const unsigned int freqs[] = {
+ 32000, 44100, 48000, 88200, 96000, 176400, 192000
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned int f = freqs[i];
+ unsigned int eight_half;
+ unsigned long value;
+ unsigned int delta;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 480000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ value = AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+ struct device_node *node = hdmi->dev->of_node;
+ const struct tegra_hdmi_audio_config *config;
+ unsigned int offset = 0;
+ unsigned long value;
+
+ switch (hdmi->audio_source) {
+ case HDA:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ break;
+
+ case SPDIF:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ break;
+
+ default:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ break;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ } else {
+ value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+ value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ }
+
+ config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ if (!config) {
+ dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+ hdmi->audio_freq, pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ value = ACR_SUBPACK_CTS(config->cts);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+ value &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ switch (hdmi->audio_freq) {
+ case 32000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+ break;
+
+ case 44100:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+ break;
+
+ case 48000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+ break;
+
+ case 88200:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+ break;
+
+ case 96000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+ break;
+
+ case 176400:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+ break;
+
+ case 192000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+ break;
+ }
+
+ tegra_hdmi_writel(hdmi, config->aval, offset);
+ }
+
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+ return 0;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
+ unsigned int offset, u8 type,
+ u8 version, void *data, size_t size)
+{
+ unsigned long value;
+ u8 *ptr = data;
+ u32 subpack[2];
+ size_t i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + size - 1;
+
+ for (i = 1; i < size; i++)
+ csum += ptr[i];
+
+ ptr[0] = 0x100 - csum;
+
+ value = INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(size - 1);
+ tegra_hdmi_writel(hdmi, value, offset);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ size = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < size; i++) {
+ size_t index = i % 7;
+
+ if (index == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[index] = ptr[i];
+
+ if (index == 6 || (i + 1 == size)) {
+ unsigned int reg = offset + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ unsigned int h_front_porch;
+ unsigned int hsize = 16;
+ unsigned int vsize = 9;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ memset(&frame, 0, sizeof(frame));
+ frame.r = HDMI_AVI_R_SAME;
+
+ switch (mode->vdisplay) {
+ case 480:
+ if (mode->hdisplay == 640) {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 1;
+ } else {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 3;
+ }
+ break;
+
+ case 576:
+ if (((hsize * 10) / vsize) > 14) {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 18;
+ } else {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 17;
+ }
+ break;
+
+ case 720:
+ case 1470: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ if (h_front_porch == 110)
+ frame.vic = 4;
+ else
+ frame.vic = 19;
+ break;
+
+ case 1080:
+ case 2205: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ switch (h_front_porch) {
+ case 88:
+ frame.vic = 16;
+ break;
+
+ case 528:
+ frame.vic = 31;
+ break;
+
+ default:
+ frame.vic = 32;
+ break;
+ }
+ break;
+
+ default:
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 0;
+ break;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.cc = HDMI_AUDIO_CC_2;
+
+ tegra_hdmi_write_infopack(hdmi,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_stereo_infoframe frame;
+ unsigned long value;
+
+ if (!hdmi->stereo) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.regid0 = 0x03;
+ frame.regid1 = 0x0c;
+ frame.regid2 = 0x00;
+ frame.hdmi_video_format = 2;
+
+ /* TODO: 74 MHz limit? */
+ if (1) {
+ frame._3d_structure = 0;
+ } else {
+ frame._3d_structure = 8;
+ frame._3d_ext_data = 0;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_VENDOR_VERSION, &frame, 6);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+ const struct tmds_config *tmds)
+{
+ unsigned long value;
+
+ tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+ tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+ value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+ unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct device_node *node = hdmi->dev->of_node;
+ unsigned int pulse_start, div82, pclk;
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+ unsigned long value;
+ int retries = 1000;
+ int err;
+
+ pclk = mode->clock * 1000;
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_back_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(hdmi->pll);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+ return err;
+ }
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(hdmi->clk, pclk);
+ if (err < 0)
+ return err;
+
+ err = clk_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ tegra_periph_reset_assert(hdmi->clk);
+ usleep_range(1000, 2000);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+ DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ if (dc->pipe)
+ value = HDMI_SRC_DISPLAYB;
+ else
+ value = HDMI_SRC_DISPLAYA;
+
+ if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+ (mode->vdisplay == 576)))
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_FULL,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+ else
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+ if (!hdmi->dvi) {
+ err = tegra_hdmi_setup_audio(hdmi, pclk);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+ /*
+ * TODO: add ELD support
+ */
+ }
+
+ rekey = HDMI_REKEY_DEFAULT;
+ value = HDMI_CTRL_REKEY(rekey);
+ value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+ h_front_porch - rekey - 18) / 32);
+
+ if (!hdmi->dvi)
+ value |= HDMI_CTRL_ENABLE;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (hdmi->dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+ /* TMDS CONFIG */
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+ tmds = tegra3_tmds_config;
+ } else {
+ num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+ tmds = tegra2_tmds_config;
+ }
+
+ for (i = 0; i < num_tmds; i++) {
+ if (pclk <= tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+ break;
+ }
+ }
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+ value = 0x1c800;
+ value &= ~SOR_CSTM_ROTCLK(~0);
+ value |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ do {
+ BUG_ON(--retries < 0);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+ value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ /* setup sync polarities */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+ value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* TODO: add HDCP support */
+
+ return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ tegra_periph_reset_assert(hdmi->clk);
+ clk_disable(hdmi->clk);
+ regulator_disable(hdmi->pll);
+ regulator_disable(hdmi->vdd);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct clk *base;
+ int err;
+
+ err = clk_set_parent(clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(output->dev, "failed to set parent: %d\n", err);
+ return err;
+ }
+
+ base = clk_get_parent(hdmi->clk_parent);
+
+ /*
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
+ */
+ err = clk_set_rate(base, pclk * 2);
+ if (err < 0)
+ dev_err(output->dev,
+ "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long pclk = mode->clock * 1000;
+ struct clk *parent;
+ long err;
+
+ parent = clk_get_parent(hdmi->clk_parent);
+
+ err = clk_round_rate(parent, pclk * 4);
+ if (err < 0)
+ *status = MODE_NOCLOCK;
+ else
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+ .enable = tegra_output_hdmi_enable,
+ .disable = tegra_output_hdmi_disable,
+ .setup_clock = tegra_output_hdmi_setup_clock,
+ .check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
+ tegra_hdmi_readl(hdmi, name))
+
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+ struct drm_minor *minor)
+{
+ unsigned int i;
+ int err;
+
+ hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+ if (!hdmi->debugfs)
+ return -ENOMEM;
+
+ hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!hdmi->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ hdmi->debugfs_files[i].data = hdmi;
+
+ err = drm_debugfs_create_files(hdmi->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ hdmi->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ hdmi->minor = minor;
+
+ return 0;
+
+free:
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+remove:
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+ drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+ hdmi->minor);
+ hdmi->minor = NULL;
+
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ hdmi->output.type = TEGRA_OUTPUT_HDMI;
+ hdmi->output.dev = client->dev;
+ hdmi->output.ops = &hdmi_ops;
+
+ err = tegra_output_init(drm, &hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+ if (err < 0)
+ dev_err(client->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_exit(hdmi);
+ if (err < 0)
+ dev_err(client->dev, "debugfs cleanup failed: %d\n",
+ err);
+ }
+
+ err = tegra_output_disable(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+ .drm_init = tegra_hdmi_drm_init,
+ .drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi;
+ struct resource *regs;
+ int err;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = &pdev->dev;
+ hdmi->audio_source = AUTO;
+ hdmi->audio_freq = 44100;
+ hdmi->stereo = false;
+ hdmi->dvi = false;
+
+ hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdmi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(hdmi->clk);
+ }
+
+ err = clk_prepare(hdmi->clk);
+ if (err < 0)
+ return err;
+
+ hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(hdmi->clk_parent))
+ return PTR_ERR(hdmi->clk_parent);
+
+ err = clk_prepare(hdmi->clk_parent);
+ if (err < 0)
+ return err;
+
+ err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+ return err;
+ }
+
+ hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(hdmi->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD regulator\n");
+ return PTR_ERR(hdmi->vdd);
+ }
+
+ hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+ if (IS_ERR(hdmi->pll)) {
+ dev_err(&pdev->dev, "failed to get PLL regulator\n");
+ return PTR_ERR(hdmi->pll);
+ }
+
+ hdmi->output.dev = &pdev->dev;
+
+ err = tegra_output_parse_dt(&hdmi->output);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ hdmi->irq = err;
+
+ hdmi->client.ops = &hdmi_client_ops;
+ INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.dev = &pdev->dev;
+
+ err = host1x_register_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_unprepare(hdmi->clk_parent);
+ clk_unprepare(hdmi->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+ .driver = {
+ .name = "tegra-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_hdmi_of_match,
+ },
+ .probe = tegra_hdmi_probe,
+ .remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000000..1477f36eb45a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __packed;
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __packed;
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ u8 regid0;
+
+ /* PB2 */
+ u8 regid1;
+
+ /* PB3 */
+ u8 regid2;
+
+ /* PB4 */
+ unsigned res1:5;
+ unsigned hdmi_video_format:3;
+
+ /* PB5 */
+ unsigned res2:4;
+ unsigned _3d_structure:4;
+
+ /* PB6*/
+ unsigned res3:4;
+ unsigned _3d_ext_data:4;
+} __packed;
+
+#define HDMI_VENDOR_VERSION 0x01
+
+/* register definitions */
+#define HDMI_CTXSW 0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
+#define ACR_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCAPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 000000000000..92e25a7e00ea
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+
+struct host1x_drm_client {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
+{
+ struct host1x_drm_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->list);
+ client->np = of_node_get(np);
+
+ list_add_tail(&client->list, &host1x->drm_clients);
+
+ return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *drm,
+ struct host1x_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&drm->list);
+ list_add_tail(&drm->list, &host1x->drm_active);
+ drm->client = client;
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ of_node_put(client->np);
+ kfree(client);
+
+ return 0;
+}
+
+static int host1x_parse_dt(struct host1x *host1x)
+{
+ static const char * const compat[] = {
+ "nvidia,tegra20-dc",
+ "nvidia,tegra20-hdmi",
+ "nvidia,tegra30-dc",
+ "nvidia,tegra30-hdmi",
+ };
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(compat); i++) {
+ struct device_node *np;
+
+ for_each_child_of_node(host1x->dev->of_node, np) {
+ if (of_device_is_compatible(np, compat[i]) &&
+ of_device_is_available(np)) {
+ err = host1x_add_drm_client(host1x, np);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_host1x_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x;
+ struct resource *regs;
+ int err;
+
+ host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+ if (!host1x)
+ return -ENOMEM;
+
+ mutex_init(&host1x->drm_clients_lock);
+ INIT_LIST_HEAD(&host1x->drm_clients);
+ INIT_LIST_HEAD(&host1x->drm_active);
+ mutex_init(&host1x->clients_lock);
+ INIT_LIST_HEAD(&host1x->clients);
+ host1x->dev = &pdev->dev;
+
+ err = host1x_parse_dt(host1x);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+ return err;
+ }
+
+ host1x->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host1x->clk))
+ return PTR_ERR(host1x->clk);
+
+ err = clk_prepare_enable(host1x->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ err = -ENXIO;
+ goto err;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto err;
+
+ host1x->syncpt = err;
+
+ err = platform_get_irq(pdev, 1);
+ if (err < 0)
+ goto err;
+
+ host1x->irq = err;
+
+ host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host1x->regs)) {
+ err = PTR_ERR(host1x->regs);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, host1x);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(host1x->clk);
+ return err;
+}
+
+static int tegra_host1x_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(host1x->clk);
+
+ return 0;
+}
+
+int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
+{
+ struct host1x_client *client;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_init) {
+ int err = client->ops->drm_init(client, drm);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM setup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+int host1x_drm_exit(struct host1x *host1x)
+{
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+ struct host1x_client *client;
+
+ if (!host1x->drm)
+ return 0;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry_reverse(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_exit) {
+ int err = client->ops->drm_exit(client);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM cleanup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ drm_platform_exit(&tegra_drm_driver, pdev);
+ host1x->drm = NULL;
+
+ return 0;
+}
+
+int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ mutex_lock(&host1x->clients_lock);
+ list_add_tail(&client->list, &host1x->clients);
+ mutex_unlock(&host1x->clients_lock);
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+ if (drm->np == client->dev->of_node)
+ host1x_activate_drm_client(host1x, drm, client);
+
+ if (list_empty(&host1x->drm_clients)) {
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+
+ err = drm_platform_init(&tegra_drm_driver, pdev);
+ if (err < 0) {
+ dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+ return err;
+ }
+ }
+
+ client->host1x = host1x;
+
+ return 0;
+}
+
+int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+ if (drm->client == client) {
+ err = host1x_drm_exit(host1x);
+ if (err < 0) {
+ dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+ err);
+ return err;
+ }
+
+ host1x_remove_drm_client(host1x, drm);
+ break;
+ }
+ }
+
+ mutex_lock(&host1x->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+static struct of_device_id tegra_host1x_of_match[] = {
+ { .compatible = "nvidia,tegra30-host1x", },
+ { .compatible = "nvidia,tegra20-host1x", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
+
+struct platform_driver tegra_host1x_driver = {
+ .driver = {
+ .name = "tegra-host1x",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_host1x_of_match,
+ },
+ .probe = tegra_host1x_probe,
+ .remove = tegra_host1x_remove,
+};
+
+static int __init tegra_host1x_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ return 0;
+
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+ return err;
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+ platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000000..8140fc6c34d8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct edid *edid = NULL;
+ int err = 0;
+
+ if (output->edid)
+ edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ else if (output->ddc)
+ edid = drm_get_edid(connector, output->ddc);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ err = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_mode_status status = MODE_OK;
+ int err;
+
+ err = tegra_output_check_mode(output, mode, &status);
+ if (err < 0)
+ return MODE_ERROR;
+
+ return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+
+ return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = tegra_connector_get_modes,
+ .mode_valid = tegra_connector_mode_valid,
+ .best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_connector_status status = connector_status_unknown;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ if (gpio_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ status = connector_status_connected;
+ }
+
+ return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = tegra_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ int err;
+
+ err = tegra_output_enable(output);
+ if (err < 0)
+ dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = tegra_encoder_dpms,
+ .mode_fixup = tegra_encoder_mode_fixup,
+ .prepare = tegra_encoder_prepare,
+ .commit = tegra_encoder_commit,
+ .mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+ struct tegra_output *output = data;
+
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+ enum of_gpio_flags flags;
+ struct device_node *ddc;
+ size_t size;
+ int err;
+
+ if (!output->of_node)
+ output->of_node = output->dev->of_node;
+
+ output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+ ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+ if (ddc) {
+ output->ddc = of_find_i2c_adapter_by_node(ddc);
+ if (!output->ddc) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
+
+ if (!output->edid && !output->ddc)
+ return -ENODEV;
+
+ output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+ "nvidia,hpd-gpio", 0,
+ &flags);
+
+ return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector, encoder, err;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ unsigned long flags;
+
+ err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+ "HDMI hotplug detect");
+ if (err < 0) {
+ dev_err(output->dev, "gpio_request_one(): %d\n", err);
+ return err;
+ }
+
+ err = gpio_to_irq(output->hpd_gpio);
+ if (err < 0) {
+ dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+ goto free_hpd;
+ }
+
+ output->hpd_irq = err;
+
+ flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT;
+
+ err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+ flags, "hpd", output);
+ if (err < 0) {
+ dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ output->hpd_irq, err);
+ goto free_hpd;
+ }
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ switch (output->type) {
+ case TEGRA_OUTPUT_RGB:
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ break;
+
+ case TEGRA_OUTPUT_HDMI:
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ break;
+
+ default:
+ connector = DRM_MODE_CONNECTOR_Unknown;
+ encoder = DRM_MODE_ENCODER_NONE;
+ break;
+ }
+
+ drm_connector_init(drm, &output->connector, &connector_funcs,
+ connector);
+ drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+ drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+ drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+ drm_sysfs_connector_add(&output->connector);
+
+ output->encoder.possible_crtcs = 0x3;
+
+ return 0;
+
+free_hpd:
+ gpio_free(output->hpd_gpio);
+
+ return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+ if (gpio_is_valid(output->hpd_gpio)) {
+ free_irq(output->hpd_irq, output);
+ gpio_free(output->hpd_gpio);
+ }
+
+ if (output->ddc)
+ put_device(&output->ddc->dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000000..ed4416f20260
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+ struct tegra_output output;
+ struct clk *clk_parent;
+ struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+ unsigned long offset;
+ unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+ const struct reg_entry *table,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_rgb *rgb = to_rgb(output);
+
+ return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay. There are
+ * unresolved issues with clk_round_rate(), which doesn't always
+ * reliably report whether a frequency can be set or not.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+ .enable = tegra_output_rgb_enable,
+ .disable = tegra_output_rgb_disable,
+ .setup_clock = tegra_output_rgb_setup_clock,
+ .check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ struct tegra_rgb *rgb;
+ int err;
+
+ np = of_get_child_by_name(dc->dev->of_node, "rgb");
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return -ENOMEM;
+
+ rgb->clk = devm_clk_get(dc->dev, NULL);
+ if (IS_ERR(rgb->clk)) {
+ dev_err(dc->dev, "failed to get clock\n");
+ return PTR_ERR(rgb->clk);
+ }
+
+ rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ if (IS_ERR(rgb->clk_parent)) {
+ dev_err(dc->dev, "failed to get parent clock\n");
+ return PTR_ERR(rgb->clk_parent);
+ }
+
+ err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+
+ err = tegra_output_parse_dt(&rgb->output);
+ if (err < 0)
+ return err;
+
+ dc->rgb = &rgb->output;
+
+ return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct tegra_rgb *rgb = to_rgb(dc->rgb);
+ int err;
+
+ if (!dc->rgb)
+ return -ENODEV;
+
+ rgb->output.type = TEGRA_OUTPUT_RGB;
+ rgb->output.ops = &rgb_ops;
+
+ err = tegra_output_init(dc->base.dev, &rgb->output);
+ if (err < 0) {
+ dev_err(dc->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ /*
+ * By default, outputs can be associated with each display controller.
+ * RGB outputs are an exception, so we make sure they can be attached
+ * to only their parent display controller.
+ */
+ rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+ return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+ if (dc->rgb) {
+ int err;
+
+ err = tegra_output_disable(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ dc->rgb = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b5..52b20b12c83a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->reserved) == 0);
+ !ttm_bo_is_reserved(bo));
} else {
- wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
return 0;
}
}
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
return ret;
}
+ atomic_set(&bo->reserved, 1);
if (use_sequence) {
/**
* Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, mem);
+ no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -433,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bo->mem = tmp_mem;
bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
+ *mem = tmp_mem;
}
goto out_err;
@@ -487,40 +489,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
/*
- * Make processes trying to reserve really pick it up.
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
*/
- smp_mb__after_atomic_dec();
- wake_up_all(&bo->event_queue);
+ smp_mb__before_atomic_dec();
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver;
+ struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
- void *sync_obj_arg;
int put_count;
int ret;
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
- if (!bo->sync_obj) {
-
- spin_lock(&glob->lru_lock);
-
- /**
- * Lock inversion between bo:reserve and bdev::fence_lock here,
- * but that's OK, since we're only trylocking.
- */
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY))
- goto queue;
-
+ if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
@@ -530,22 +525,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_list_ref_sub(bo, put_count, true);
return;
- } else {
- spin_lock(&glob->lru_lock);
}
-queue:
- driver = bdev->driver;
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
+ spin_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bdev->fence_lock);
if (sync_obj) {
- driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
schedule_delayed_work(&bdev->wq,
@@ -553,68 +548,84 @@ queue:
}
/**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
* @interruptible Any sleeps should occur interruptibly.
- * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_reserve,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
- int ret = 0;
+ int ret;
-retry:
spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
- if (unlikely(ret != 0))
- return ret;
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
-retry_reserve:
- spin_lock(&glob->lru_lock);
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- return 0;
- }
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
- if (unlikely(ret != 0))
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
return ret;
- goto retry_reserve;
- }
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
- BUG_ON(ret != 0);
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread recently starting an accelerated
- * eviction.
- */
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(bo->sync_obj)) {
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +668,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
- !remove_all);
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ spin_unlock(&glob->lru_lock);
+
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -697,6 +712,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
@@ -708,18 +724,14 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
- write_lock(&bdev->vm_lock);
}
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
- struct ttm_bo_device *bdev = bo->bdev;
*p_bo = NULL;
- write_lock(&bdev->vm_lock);
kref_put(&bo->kref, ttm_bo_release);
- write_unlock(&bdev->vm_lock);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -738,7 +750,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
@@ -756,7 +768,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -769,7 +781,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +792,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -794,49 +806,33 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret, put_count = 0;
+ int ret = -EBUSY, put_count;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
}
- bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (ret) {
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(bo, interruptible,
- no_wait_reserve, no_wait_gpu);
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
+ kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +842,7 @@ retry:
ttm_bo_list_ref_sub(bo, put_count, true);
- ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +867,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +879,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -950,7 +945,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1036,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1054,26 +1049,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
- if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
- return -EBUSY;
-
- return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/*
* FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1078,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1112,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1128,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1179,7 +1167,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
@@ -1200,7 +1187,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
@@ -1233,7 +1219,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->seq_valid = false;
@@ -1257,7 +1242,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1306,7 +1291,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
@@ -1321,8 +1305,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL, NULL);
+ interruptible, persistent_swap_storage, acc_size,
+ NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1344,7 +1328,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1577,7 +1561,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
goto out_no_addr_mm;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- bdev->nice_mode = true;
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
bdev->glob = glob;
@@ -1721,7 +1704,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
- void *sync_obj_arg;
int ret = 0;
if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1711,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
while (bo->sync_obj) {
- if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1725,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return -EBUSY;
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
spin_unlock(&bdev->fence_lock);
- ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+ ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1734,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return ret;
}
spin_lock(&bdev->fence_lock);
- if (likely(bo->sync_obj == sync_obj &&
- bo->sync_obj_arg == sync_obj_arg)) {
+ if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1777,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
- if (atomic_dec_and_test(&bo->cpu_writers))
- wake_up_all(&bo->event_queue);
+ atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
@@ -1817,40 +1796,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
- while (ret == -EBUSY) {
- if (unlikely(list_empty(&glob->swap_lru))) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- bo = list_first_entry(&glob->swap_lru,
- struct ttm_buffer_object, swap);
- kref_get(&bo->list_kref);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
- if (!list_empty(&bo->ddestroy)) {
- spin_unlock(&glob->lru_lock);
- (void) ttm_bo_cleanup_refs(bo, false, false, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- continue;
- }
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return ret;
+ }
- /**
- * Reserve buffer. Since we unlock while sleeping, we need
- * to re-check that nobody removed us from the swap-list while
- * we slept.
- */
+ kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait_unreserved(bo, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- }
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
}
- BUG_ON(ret != 0);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
@@ -1876,7 +1840,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false, false);
+ false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e0..8be35c809c7b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
+ if (ret) {
+ /* if we fail here don't nuke the mm node
+ * as the bo still owns it */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret)
+ if (ret) {
+ /* failing here, means keep old copy as-is */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
mb();
out2:
@@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
- fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+ fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
@@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_lock(&bdev->fence_lock);
+ if (bo->sync_obj)
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ else
+ fbo->sync_obj = NULL;
+ spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@@ -611,8 +623,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
@@ -630,7 +641,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c5..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_cpu(bo, false);
- if (ret)
- return ret;
- goto retry;
+ return -EBUSY;
}
}
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
driver = bdev->driver;
glob = bo->glob;
- spin_lock(&bdev->fence_lock);
spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = entry->new_sync_obj_arg;
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
- spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467ca..dbc2def887cd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
- init_waitqueue_head(&glob->queue);
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956a..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
*/
struct ttm_object_device {
- rwlock_t object_lock;
+ spinlock_t object_lock;
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
- write_lock(&tdev->object_lock);
kref_init(&base->refcount);
- ret = drm_ht_just_insert_please(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
container_of(kref, struct ttm_base_object, refcount);
struct ttm_object_device *tdev = base->tfile->tdev;
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
+
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
}
- write_lock(&tdev->object_lock);
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
- struct ttm_object_device *tdev = base->tfile->tdev;
*p_base = NULL;
- /*
- * Need to take the lock here to avoid racing with
- * users trying to look up the object.
- */
-
- write_lock(&tdev->object_lock);
kref_put(&base->refcount, ttm_release_base);
- write_unlock(&tdev->object_lock);
}
EXPORT_SYMBOL(ttm_base_object_unref);
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- read_lock(&tdev->object_lock);
- ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
- kref_get(&base->refcount);
+ ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
}
- read_unlock(&tdev->object_lock);
+ rcu_read_unlock();
if (unlikely(ret != 0))
return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
return NULL;
tdev->mem_glob = mem_glob;
- rwlock_init(&tdev->object_lock);
+ spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- write_lock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- write_unlock(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
kfree(tdev);
}
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 56e0bf31d425..6222af19f456 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,6 +1,6 @@
config DRM_UDL
tristate "DisplayLink"
- depends on DRM && EXPERIMENTAL
+ depends on DRM
depends on USB_ARCH_HAS_HCD
select DRM_USB
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf6745..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
- char rbuf[3];
+ char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
+ rbuf = kmalloc(2, GFP_KERNEL);
+ if (rbuf == NULL)
+ goto error;
+
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->ddev->usbdev,
usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- i--;
goto error;
}
block[i] = rbuf[1];
}
+ kfree(rbuf);
return block;
error:
kfree(block);
+ kfree(rbuf);
return NULL;
}
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
+ /*
+ * We only read the main block, but if the monitor reports extension
+ * blocks then the drm edid code expects them to be present, so patch
+ * the extension count to 0.
+ */
+ edid->checksum += edid->extensions;
+ edid->extensions = 0;
+
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -84,7 +97,8 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
@@ -97,8 +111,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
return encoder;
}
-int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
- uint64_t val)
+static int udl_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
{
return 0;
}
@@ -110,13 +125,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
.best_encoder = udl_best_single_encoder,
};
-struct drm_connector_funcs udl_connector_funcs = {
+static struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +153,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_sysfs_connector_add(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_surface.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type) \
+ struct { \
+ union { \
+ type blue; \
+ type u; \
+ type uv_video; \
+ type u_video; \
+ }; \
+ union { \
+ type green; \
+ type v; \
+ type stencil; \
+ type v_video; \
+ }; \
+ union { \
+ type red; \
+ type w; \
+ type luminance; \
+ type y; \
+ type depth; \
+ type data; \
+ }; \
+ union { \
+ type alpha; \
+ type q; \
+ type exp; \
+ }; \
+ }
+
+struct svga3d_surface_desc {
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ struct {
+ u32 total;
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_depth;
+
+ struct {
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
+ {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
+ {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
+ {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
+ {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
+ {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
+ {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
+ {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
+
+ {SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
+
+ {SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
+ {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
+
+ {SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
+
+ {SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
+
+ {SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
+ {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
+ {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ bool cubemap)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ if (cubemap)
+ total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+ return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-/**
- * FIXME: Proper access checks on buffers.
- */
-
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ttm_object_file *tfile =
+ vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+ return vmw_user_dmabuf_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
-static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int vmw_sync_obj_flush(void *sync_obj)
{
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
-static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vmw_sync_obj_signaled(void *sync_obj)
{
- unsigned long flags = (unsigned long) sync_arg;
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags);
+ DRM_VMW_FENCE_FLAG_EXEC);
}
-static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
- unsigned long flags = (unsigned long) sync_arg;
-
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags,
+ DRM_VMW_FENCE_FLAG_EXEC,
lazy, interruptible,
VMW_FENCE_WAIT_TIMEOUT);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+ .object_type = VMW_RES_CONTEXT,
+ .base_obj_to_res = vmw_user_context_base_to_res,
+ .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+ &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "legacy contexts",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd;
+
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, false,
+ res_free, &vmw_legacy_context_func);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ctx, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = ctx->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d1498bfd7873..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
ttm_bo_unreserve(bo);
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_gmr_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_GMR);
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
placement.num_placement = 1;
placement.placement = &pl_flags;
- ret = ttm_bo_validate(bo, &placement, false, true, true);
+ ret = ttm_bo_validate(bo, &placement, false, true);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2dd185e42f21..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
PAGE_SIZE,
ttm_bo_type_device,
&vmw_vram_sys_placement,
- 0, 0, false, NULL,
+ 0, false, NULL,
&dev_priv->dummy_query_bo);
}
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
+ enum vmw_res_type i;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
- idr_init(&dev_priv->context_idr);
- idr_init(&dev_priv->surface_idr);
- idr_init(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i) {
+ idr_init(&dev_priv->res_idr[i]);
+ INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+ }
+
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->surface_lru);
+
dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL))
goto out_no_fman;
- /* Need to start the fifo to check if we can do screen objects */
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
vmw_kms_save_vga(dev_priv);
/* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_kms;
vmw_overlay_init(dev_priv);
- /* 3D Depends on Screen Objects being used. */
- DRM_INFO("Detected %sdevice 3D availability.\n",
- vmw_fifo_have_3d(dev_priv) ?
- "" : "no ");
-
- /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv, true);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
vmw_fb_init(dev_priv);
- } else {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- }
-
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed installing irq: %d\n", ret);
- goto out_no_irq;
- }
}
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
-out_no_irq:
- if (dev_priv->enable_fb)
- vmw_fb_close(dev_priv);
+out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- /* We still have a 3D resource reference held */
- if (dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
- }
-out_no_fifo:
+ vmw_kms_restore_vga(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -684,9 +674,9 @@ out_err2:
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
+
kfree(dev_priv);
return ret;
}
@@ -694,13 +684,14 @@ out_err0:
static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.res_ht_initialized)
+ drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv);
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
out_no_active_lock:
if (!dev_priv->enable_fb) {
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
return ret;
}
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de9..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
- struct list_head validate_list;
- bool gmr_bound;
- uint32_t cur_validate_node;
- bool on_validate_list;
+ struct list_head res_list;
};
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
- struct idr *idr;
int id;
- enum ttm_object_type res_type;
bool avail;
- void (*remove_from_lists) (struct vmw_resource *res);
- void (*hw_destroy) (struct vmw_resource *res);
+ unsigned long backup_size;
+ bool res_dirty; /* Protected by backup buffer reserved */
+ bool backup_dirty; /* Protected by backup buffer reserved */
+ struct vmw_dma_buffer *backup;
+ unsigned long backup_offset;
+ const struct vmw_res_func *func;
+ struct list_head lru_head; /* Protected by the resource lock */
+ struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res);
- struct list_head validate_head;
- struct list_head query_head; /* Protected by the cmdbuf mutex */
- /* TODO is a generic snooper needed? */
-#if 0
- void (*snoop)(struct vmw_resource *res,
- struct ttm_object_file *tfile,
- SVGA3dCmdHeader *header);
- void *snoop_priv;
-#endif
+ void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+ vmw_res_context,
+ vmw_res_surface,
+ vmw_res_stream,
+ vmw_res_max
};
struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
uint32_t num_sizes;
-
bool scanout;
-
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
- struct ttm_buffer_object *backup;
struct vmw_surface_offset *offsets;
- uint32_t backup_size;
+ SVGA3dTextureFilter autogen_filter;
+ uint32_t multisample_count;
};
struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
uint32_t index;
};
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+ bool valid;
+ uint32_t handle;
+ struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+};
+
struct vmw_sw_context{
- struct ida bo_list;
- uint32_t last_cid;
- bool cid_valid;
+ struct drm_open_hash res_ht;
+ bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct vmw_resource *cur_ctx;
- uint32_t last_sid;
- uint32_t sid_translation;
- bool sid_valid;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+ struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
- struct list_head query_list;
struct ttm_buffer_object *cur_query_bo;
- uint32_t cur_query_cid;
- bool query_cid_valid;
+ struct list_head res_relocations;
+ uint32_t *buf_start;
+ struct vmw_res_cache_entry res_cache[vmw_res_max];
+ struct vmw_resource *last_query_ctx;
+ bool needs_post_query_barrier;
+ struct vmw_resource *error_resource;
};
struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
*/
rwlock_t resource_lock;
- struct idr context_idr;
- struct idr surface_idr;
- struct idr stream_idr;
-
+ struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
*/
@@ -320,6 +347,7 @@ struct vmw_private {
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
+ uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
/*
@@ -329,10 +357,15 @@ struct vmw_private {
* protected by the cmdbuf mutex for simplicity.
*/
- struct list_head surface_lru;
+ struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
};
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_surface, res);
+}
+
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* Resource utilities - vmwgfx_resource.c
*/
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+ struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *converter,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
struct vmw_dma_buffer *tmp_buf = *buf;
- struct ttm_buffer_object *bo = &tmp_buf->base;
+
*buf = NULL;
+ if (tmp_buf != NULL) {
+ struct ttm_buffer_object *bo = &tmp_buf->base;
- ttm_bo_unref(&bo);
+ ttm_bo_unref(&bo);
+ }
}
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+ struct list_head head;
+ const struct vmw_resource *res;
+ unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *new_backup;
+ unsigned long new_backup_offset;
+ bool first_usage;
+ bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+ bool backoff)
+{
+ struct vmw_resource_val_node *val;
+
+ list_for_each_entry(val, list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *new_backup =
+ backoff ? NULL : val->new_backup;
+
+ vmw_resource_unreserve(res, new_backup,
+ val->new_backup_offset);
+ vmw_dmabuf_unreference(&val->new_backup);
+ }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_node)
+{
+ struct vmw_resource_val_node *node;
+ struct drm_hash_item *hash;
+ int ret;
+
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+ &hash) == 0)) {
+ node = container_of(hash, struct vmw_resource_val_node, hash);
+ node->first_usage = false;
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+ return 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(node == NULL)) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ kfree(node);
+ return ret;
+ }
+ list_add_tail(&node->head, &sw_context->resource_list);
+ node->res = vmw_resource_reference(res);
+ node->first_usage = true;
+
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+ const struct vmw_resource *res,
+ unsigned long offset)
+{
+ struct vmw_resource_relocation *rel;
+
+ rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ if (unlikely(rel == NULL)) {
+ DRM_ERROR("Failed to allocate a resource relocation.\n");
+ return -ENOMEM;
+ }
+
+ rel->res = res;
+ rel->offset = offset;
+ list_add_tail(&rel->head, list);
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+ struct vmw_resource_relocation *rel, *n;
+
+ list_for_each_entry_safe(rel, n, list, head) {
+ list_del(&rel->head);
+ kfree(rel);
+ }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+ struct list_head *list)
+{
+ struct vmw_resource_relocation *rel;
+
+ list_for_each_entry(rel, list, head)
+ cb[rel->offset] = rel->res->id;
+}
+
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
- uint32_t fence_flags,
uint32_t *p_val_node)
{
uint32_t val_node;
+ struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
+ struct drm_hash_item *hash;
+ int ret;
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
- val_buf->new_sync_obj_arg = NULL;
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ &hash) == 0)) {
+ vval_buf = container_of(hash, struct vmw_validate_buffer,
+ hash);
+ val_buf = &vval_buf->base;
+ val_node = vval_buf - sw_context->val_bufs;
+ } else {
+ val_node = sw_context->cur_val_buf;
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission "
+ "exceeded.\n");
+ return -EINVAL;
+ }
+ vval_buf = &sw_context->val_bufs[val_node];
+ vval_buf->hash.key = (unsigned long) bo;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a buffer validation "
+ "entry.\n");
+ return ret;
+ }
+ ++sw_context->cur_val_buf;
+ val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
}
- val_buf->new_sync_obj_arg = (void *)
- ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
- sw_context->fence_flags |= fence_flags;
+ sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
if (p_val_node)
*p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return 0;
}
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource *ctx;
-
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- __le32 cid;
- } *cmd;
+ struct vmw_resource_val_node *val;
int ret;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
- return 0;
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use context %u\n",
- (unsigned) cmd->cid);
- return ret;
+ ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, bo, NULL);
+
+ if (unlikely(ret != 0))
+ return ret;
+ }
}
+ return 0;
+}
- sw_context->last_cid = cmd->cid;
- sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ ret = vmw_resource_validate(res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+ }
return 0;
}
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t *sid)
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id,
+ struct vmw_resource_val_node **p_val)
{
- struct vmw_surface *srf;
- int ret;
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[res_type];
struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+ int ret;
- if (*sid == SVGA3D_INVALID_ID)
+ if (*id == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
+ /*
+ * Fastpath in case of repeated commands referencing the same
+ * resource
+ */
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
+ if (likely(rcache->valid && *id == rcache->handle)) {
+ const struct vmw_resource *res = rcache->res;
+
+ rcache->node->first_usage = false;
+ if (p_val)
+ *p_val = rcache->node;
+
+ return vmw_resource_relocation_add
+ (&sw_context->res_relocations, res,
+ id - sw_context->buf_start);
}
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_user_resource_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *id,
+ converter,
+ &res);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned) *id);
+ dump_stack();
return ret;
}
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ rcache->valid = true;
+ rcache->res = res;
+ rcache->handle = *id;
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ rcache->node = node;
+ if (p_val)
+ *p_val = node;
+ vmw_resource_unreference(&res);
return 0;
+
+out_no_reloc:
+ BUG_ON(sw_context->error_resource != NULL);
+ sw_context->error_resource = res;
+
+ return ret;
}
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->cid, NULL);
+}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.target.sid, NULL);
return ret;
}
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->body.sid,
+ NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
+
+ BUG_ON(!ctx_entry->valid);
+ sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
+ sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
}
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
return 0;
}
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
/*
* The validate list should still hold references to all
* contexts here.
*/
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
+ if (sw_context->needs_post_query_barrier) {
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
+ struct vmw_resource *ctx;
+ int ret;
- BUG_ON(list_empty(&ctx->validate_head));
+ BUG_ON(!ctx_entry->valid);
+ ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ttm_bo_unref(&dev_priv->pinned_bo);
}
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ if (!sw_context->needs_post_query_barrier) {
+ vmw_bo_pin(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ BUG_ON(sw_context->last_query_ctx == NULL);
+ dev_priv->query_cid = sw_context->last_query_ctx->id;
+ dev_priv->query_cid_valid = true;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
}
}
/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
*
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
*
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
- &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -479,6 +731,37 @@ out_no_reloc:
return ret;
}
+/**
+ * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
+/*
+ * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- bo = &vmw_bo->base;
- ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
- if (ret) {
- DRM_ERROR("could not find surface\n");
- goto out_no_reloc;
- }
-
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
+ if (unlikely(ret != -ERESTARTSYS))
+ DRM_ERROR("could not find surface for DMA.\n");
+ goto out_no_surface;
}
- /*
- * Patch command stream with device SID.
- */
- cmd->dma.host.sid = srf->res.id;
- vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- return 0;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
-out_no_validate:
- vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &decl->array.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &range->indexArray.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &cur_state->value);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_cmd,
+ header);
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return 0;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check)
+ &vmw_cmd_blt_surf_screen_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
int32_t cur_size = size;
int ret;
+ sw_context->buf_start = buf;
+
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index];
+ validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- } else
+ break;
+ case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
+ break;
+ default:
+ BUG();
+ }
}
vmw_free_relocations(sw_context);
}
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+ struct vmw_resource_val_node *val, *val_next;
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+
+ list_for_each_entry_safe(val, val_next, list, head) {
+ list_del_init(&val->head);
+ vmw_resource_unreference(&val->res);
+ kfree(val);
+ }
+}
+
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
+ struct vmw_validate_buffer *entry, *next;
+ struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- head) {
- list_del(&entry->head);
- vmw_dmabuf_validate_clear(entry->bo);
- ttm_bo_unref(&entry->bo);
+ base.head) {
+ list_del(&entry->base.head);
+ ttm_bo_unref(&entry->base.bo);
+ (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
+ list_for_each_entry(val, &sw_context->resource_list, head)
+ (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
*/
DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry;
+ struct vmw_validate_buffer *entry;
int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
+ struct vmw_resource *error_resource;
+ struct list_head resource_list;
uint32_t handle;
void *cmd;
int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
- sw_context->cid_valid = false;
- sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+ sw_context->last_query_ctx = NULL;
+ sw_context->needs_post_query_barrier = false;
+ memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
+ INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (!sw_context->res_ht_initialized) {
+ ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ sw_context->res_ht_initialized = true;
+ }
+ INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- vmw_apply_relocations(sw_context);
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_throttle;
+ goto out_err;
}
+ vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
+
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *) fence);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
+ list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+
return 0;
out_err:
+ vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
+ list_splice_init(&sw_context->resource_list, &resource_list);
+ error_resource = sw_context->error_resource;
+ sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+ if (unlikely(error_resource != NULL))
+ vmw_resource_unreference(&error_resource);
+
return ret;
}
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
*/
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
+ struct vmw_fence_obj *lfence = NULL;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
INIT_LIST_HEAD(&validate_list);
- pinned_val.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
list_add_tail(&pinned_val.head, &validate_list);
- query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
goto out_no_reserve;
}
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
+ if (dev_priv->query_cid_valid) {
+ BUG_ON(fence != NULL);
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+ dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (fence == NULL) {
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+ NULL);
+ fence = lfence;
+ }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+ if (lfence != NULL)
+ vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ if (dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
- kfree(ufence);
+ ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 7290811f89be..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *clips = NULL;
struct drm_mode_object *obj;
struct vmw_framebuffer *vfb;
+ struct vmw_resource *res;
uint32_t num_clips;
int ret;
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_no_ttm_lock;
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
- &surface);
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+ user_surface_converter,
+ &res);
if (ret)
goto out_no_surface;
+ surface = vmw_res_to_srf(res);
ret = vmw_kms_present(dev_priv, file_priv,
vfb, surface, arg->sid,
arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b66377..87e39f68e9d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
#include "svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
return 0;
}
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->overlay_priv != NULL &&
+ ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+ VMW_OVERLAY_CAP_MASK));
+}
+
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
int ret;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return -ENOSYS;
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
{
- if (!dev_priv->overlay_priv)
+ if (!vmw_overlay_available(dev_priv))
return 0;
return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, k;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return 0;
mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
if (dev_priv->overlay_priv)
return -EINVAL;
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
- (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
- DRM_INFO("hardware doesn't support overlays\n");
- return -ENOSYS;
- }
-
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a1..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
-
-struct vmw_user_context {
- struct ttm_base_object base;
- struct vmw_resource res;
-};
-
-struct vmw_user_surface {
- struct ttm_base_object base;
- struct vmw_surface srf;
- uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
struct vmw_user_dma_buffer {
struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
-struct vmw_surface_offset {
- uint32_t face;
- uint32_t mip;
- uint32_t bo_offset;
-};
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
static uint64_t vmw_user_stream_size;
+static const struct vmw_res_func vmw_stream_func = {
+ .res_type = vmw_res_stream,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "video streams",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
*
* Release the resource id to the resource id manager and set it to -1
*/
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
if (res->id != -1)
- idr_remove(res->idr, res->id);
+ idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
- int id = res->id;
- struct idr *idr = res->idr;
+ int id;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
res->avail = false;
- if (res->remove_from_lists != NULL)
- res->remove_from_lists(res);
+ list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ if (!list_empty(&res->mob_head) &&
+ res->func->unbind != NULL) {
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+ res->func->unbind(res, false, &val_buf);
+ }
+ res->backup_dirty = false;
+ list_del_init(&res->mob_head);
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&res->backup);
+ }
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
+ id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
- * @dev_priv: Pointer to the device private structure.
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
- struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
{
+ struct vmw_private *dev_priv = res->dev_priv;
int ret;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(res->idr, res, 1, &res->id);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
return ret;
}
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- bool delay_id,
- void (*res_free) (struct vmw_resource *res),
- void (*remove_from_lists)
- (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @res: The struct vmw_resource to initialize.
+ * @obj_type: Resource object type.
+ * @delay_id: Boolean whether to defer device id allocation until
+ * the first validation.
+ * @res_free: Resource destructor.
+ * @func: Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->remove_from_lists = remove_from_lists;
- res->res_type = obj_type;
- res->idr = idr;
res->avail = false;
res->dev_priv = dev_priv;
- INIT_LIST_HEAD(&res->query_head);
- INIT_LIST_HEAD(&res->validate_head);
+ res->func = func;
+ INIT_LIST_HEAD(&res->lru_head);
+ INIT_LIST_HEAD(&res->mob_head);
res->id = -1;
+ res->backup = NULL;
+ res->backup_offset = 0;
+ res->backup_dirty = false;
+ res->res_dirty = false;
if (delay_id)
return 0;
else
- return vmw_resource_alloc_id(dev_priv, res);
+ return vmw_resource_alloc_id(res);
}
/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
-
-static void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
}
/**
- * Context management:
- */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyContext body;
- } *cmd;
-
-
- vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineContext body;
- } *cmd;
-
- ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, false, res_free, NULL);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a resource id.\n");
- goto out_early;
- }
-
- if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
- DRM_ERROR("Out of hw context ids.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_context_destroy);
- return 0;
-
-out_early:
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
- return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
-
-static void vmw_user_context_free(struct vmw_resource *res)
-{
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
-
- kfree(ctx);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
-}
-
-/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
- */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_context *ctx =
- container_of(base, struct vmw_user_context, base);
- struct vmw_resource *res = &ctx->res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_user_context *ctx;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_context_free) {
- ret = -EINVAL;
- goto out;
- }
-
- ctx = container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
-
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_unlock;
- }
-
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- res = &ctx->res;
- ctx->base.shareable = false;
- ctx->base.tfile = NULL;
-
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&ctx->res);
- ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
- }
-
- arg->cid = res->id;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res;
- int ret = 0;
-
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->context_idr, id);
- if (res && res->avail) {
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable)
- ret = -EPERM;
- if (p_res)
- *p_res = vmw_resource_reference(res);
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
-
- return ret;
-}
-
-struct vmw_bpp {
- uint8_t bpp;
- uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
- [SVGA3D_FORMAT_INVALID] = {0, 0},
- [SVGA3D_X8R8G8B8] = {32, 32},
- [SVGA3D_A8R8G8B8] = {32, 32},
- [SVGA3D_R5G6B5] = {16, 16},
- [SVGA3D_X1R5G5B5] = {16, 16},
- [SVGA3D_A1R5G5B5] = {16, 16},
- [SVGA3D_A4R4G4B4] = {16, 16},
- [SVGA3D_Z_D32] = {32, 32},
- [SVGA3D_Z_D16] = {16, 16},
- [SVGA3D_Z_D24S8] = {32, 32},
- [SVGA3D_Z_D15S1] = {16, 16},
- [SVGA3D_LUMINANCE8] = {8, 8},
- [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
- [SVGA3D_LUMINANCE16] = {16, 16},
- [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
- [SVGA3D_DXT1] = {4, 16},
- [SVGA3D_DXT2] = {8, 32},
- [SVGA3D_DXT3] = {8, 32},
- [SVGA3D_DXT4] = {8, 32},
- [SVGA3D_DXT5] = {8, 32},
- [SVGA3D_BUMPU8V8] = {16, 16},
- [SVGA3D_BUMPL6V5U5] = {16, 16},
- [SVGA3D_BUMPX8L8V8U8] = {32, 32},
- [SVGA3D_ARGB_S10E5] = {16, 16},
- [SVGA3D_ARGB_S23E8] = {32, 32},
- [SVGA3D_A2R10G10B10] = {32, 32},
- [SVGA3D_V8U8] = {16, 16},
- [SVGA3D_Q8W8V8U8] = {32, 32},
- [SVGA3D_CxV8U8] = {16, 16},
- [SVGA3D_X8L8V8U8] = {32, 32},
- [SVGA3D_A2W10V10U10] = {32, 32},
- [SVGA3D_ALPHA8] = {8, 8},
- [SVGA3D_R_S10E5] = {16, 16},
- [SVGA3D_R_S23E8] = {32, 32},
- [SVGA3D_RG_S10E5] = {16, 16},
- [SVGA3D_RG_S23E8] = {32, 32},
- [SVGA3D_BUFFER] = {8, 8},
- [SVGA3D_Z_D24X8] = {32, 32},
- [SVGA3D_V16U16] = {32, 32},
- [SVGA3D_G16R16] = {32, 32},
- [SVGA3D_A16B16G16R16] = {64, 64},
- [SVGA3D_UYVY] = {12, 12},
- [SVGA3D_YUY2] = {12, 12},
- [SVGA3D_NV12] = {12, 8},
- [SVGA3D_AYUV] = {32, 32},
- [SVGA3D_BC4_UNORM] = {4, 16},
- [SVGA3D_BC5_UNORM] = {8, 32},
- [SVGA3D_Z_DF16] = {16, 16},
- [SVGA3D_Z_DF24] = {24, 24},
- [SVGA3D_Z_D24S8_INT] = {32, 32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
- SVGA3dCopyBox cb;
- SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
- sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
*
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
- return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
- void *cmd_space)
-{
- struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
- cmd_space;
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
*
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
*/
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
- void *cmd_space)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter,
+ struct vmw_resource **p_res)
{
- struct vmw_surface_define *cmd = (struct vmw_surface_define *)
- cmd_space;
- struct drm_vmw_size *src_size;
- SVGA3dSize *cmd_size;
- uint32_t cmd_len;
- int i;
-
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = src_size->width;
- cmd_size->height = src_size->height;
- cmd_size->depth = src_size->depth;
- }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
- void *cmd_space,
- const SVGAGuestPtr *ptr,
- bool to_surface)
-{
- uint32_t i;
- uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
- uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
- struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
- for (i = 0; i < srf->num_sizes; ++i) {
- SVGA3dCmdHeader *header = &cmd->header;
- SVGA3dCmdSurfaceDMA *body = &cmd->body;
- SVGA3dCopyBox *cb = &cmd->cb;
- SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
- const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
- header->id = SVGA_3D_CMD_SURFACE_DMA;
- header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
- body->guest.ptr = *ptr;
- body->guest.ptr.offset += cur_offset->bo_offset;
- body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
- body->host.sid = srf->res.id;
- body->host.face = cur_offset->face;
- body->host.mipmap = cur_offset->mip;
- body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM);
- cb->x = 0;
- cb->y = 0;
- cb->z = 0;
- cb->srcx = 0;
- cb->srcy = 0;
- cb->srcz = 0;
- cb->w = cur_size->width;
- cb->h = cur_size->height;
- cb->d = cur_size->depth;
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = body->guest.pitch*cur_size->height*
- cur_size->depth*bpp / stride_bpp;
- suffix->flags.discard = 0;
- suffix->flags.unsynchronized = 0;
- suffix->flags.reserved = 0;
- ++cmd;
- }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
- void *cmd;
-
- if (res->id != -1) {
-
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
- /*
- * used_memory_size_atomic, or separate lock
- * to avoid taking dev_priv::cmdbuf_mutex in
- * the destroy path.
- */
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = container_of(res, struct vmw_surface, res);
- dev_priv->used_memory_size -= srf->backup_size;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- }
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
-
- if (likely(res->id != -1))
- return 0;
-
- if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
- dev_priv->memory_size))
- return -EBUSY;
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- if (srf->backup) {
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)((unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
- }
-
- /*
- * Alloc id for the resource.
- */
-
- ret = vmw_resource_alloc_id(dev_priv, res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a surface id.\n");
- goto out_no_id;
- }
- if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
- ret = -EBUSY;
- goto out_no_fifo;
- }
-
-
- /*
- * Encode surface define- and dma commands.
- */
-
- submit_size = vmw_surface_define_size(srf);
- if (srf->backup)
- submit_size += vmw_surface_dma_size(srf);
-
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "validation.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_surface_define_encode(srf, cmd);
- if (srf->backup) {
- SVGAGuestPtr ptr;
-
- cmd += vmw_surface_define_size(srf);
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, true);
- }
-
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Create a fence object and fence the backup buffer.
- */
-
- if (srf->backup) {
- struct vmw_fence_obj *fence;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- }
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size += srf->backup_size;
-
- return 0;
-
-out_no_fifo:
- vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- if (srf->backup)
- ttm_bo_unref(&val_buf.bo);
- return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
- struct vmw_fence_obj *fence;
- SVGAGuestPtr ptr;
-
- BUG_ON(res->id == -1);
-
- /*
- * Create a surface backup buffer object.
- */
-
- if (!srf->backup) {
- ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
- ttm_bo_type_device,
- &vmw_srf_placement, 0, 0, true,
- NULL, &srf->backup);
- if (unlikely(ret != 0))
- return ret;
- }
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
-
-
- /*
- * Encode the dma- and surface destroy commands.
- */
-
- submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, false);
- cmd += vmw_surface_dma_size(srf);
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size -= srf->backup_size;
-
- /*
- * Create a fence object and fence the DMA buffer.
- */
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
-
- /*
- * Release the surface ID.
- */
-
- vmw_resource_release_id(res);
-
- return 0;
-
-out_no_fifo:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- int ret;
- struct vmw_surface *evict_srf;
-
- do {
- write_lock(&dev_priv->resource_lock);
- list_del_init(&srf->lru_head);
- write_unlock(&dev_priv->resource_lock);
-
- ret = vmw_surface_do_validate(dev_priv, srf);
- if (likely(ret != -EBUSY))
- break;
-
- write_lock(&dev_priv->resource_lock);
- if (list_empty(&dev_priv->surface_lru)) {
- DRM_ERROR("Out of device memory for surfaces.\n");
- ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
- break;
- }
-
- evict_srf = vmw_surface_reference
- (list_first_entry(&dev_priv->surface_lru,
- struct vmw_surface,
- lru_head));
- list_del_init(&evict_srf->lru_head);
-
- write_unlock(&dev_priv->resource_lock);
- (void) vmw_surface_evict(dev_priv, evict_srf);
-
- vmw_surface_unreference(&evict_srf);
-
- } while (1);
-
- if (unlikely(ret != 0 && srf->res.id != -1)) {
- write_lock(&dev_priv->resource_lock);
- list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
- write_unlock(&dev_priv->resource_lock);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
- struct vmw_resource *res = &srf->res;
-
- BUG_ON(res_free == NULL);
- INIT_LIST_HEAD(&srf->lru_head);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, true, res_free,
- vmw_surface_remove_from_lists);
-
- if (unlikely(ret != 0))
- res_free(res);
-
- /*
- * The surface won't be visible to hardware until a
- * surface validate.
- */
-
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
- return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *user_srf =
- container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(user_srf);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- rwlock_t *lock = NULL;
-
- list_for_each_entry(res, list, validate_head) {
-
- if (res->res_free != &vmw_surface_res_free &&
- res->res_free != &vmw_user_surface_free)
- continue;
-
- if (unlikely(lock == NULL)) {
- lock = &res->dev_priv->resource_lock;
- write_lock(lock);
- }
-
- srf = container_of(res, struct vmw_surface, res);
- list_del_init(&srf->lru_head);
- list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
- }
-
- if (lock != NULL)
- write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
-{
- int ret;
-
- BUG_ON(*out_surf || *out_buf);
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
- if (!ret)
- return 0;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
- return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_surface **out)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
struct ttm_base_object *base;
+ struct vmw_resource *res;
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(base->object_type != converter->object_type))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
- res = &srf->res;
+ res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
-
- if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
- *out = srf;
+ *p_res = res;
ret = 0;
out_bad_resource:
@@ -1254,286 +319,32 @@ out_bad_resource:
return ret;
}
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
- struct vmw_resource *res = &user_srf->srf.res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
struct vmw_resource *res;
- struct vmw_resource *tmp;
- union drm_vmw_surface_create_arg *arg =
- (union drm_vmw_surface_create_arg *)data;
- struct drm_vmw_surface_create_req *req = &arg->req;
- struct drm_vmw_surface_arg *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
- int i, j;
- uint32_t cur_bo_offset;
- struct drm_vmw_size *cur_size;
- struct vmw_surface_offset *cur_offset;
- uint32_t stride_bpp;
- uint32_t bpp;
- uint32_t num_sizes;
- uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- num_sizes += req->mip_levels[i];
-
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
-
- size = vmw_user_surface_size + 128 +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
- srf->backup = NULL;
-
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
- user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_sizes;
- }
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_offsets;
- }
-
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- cur_bo_offset = 0;
- cur_offset = srf->offsets;
- cur_size = srf->sizes;
-
- bpp = vmw_sf_bpp[srf->format].bpp;
- stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
- uint32_t stride =
- (cur_size->width * stride_bpp + 7) >> 3;
-
- cur_offset->face = i;
- cur_offset->mip = j;
- cur_offset->bo_offset = cur_bo_offset;
- cur_bo_offset += stride * cur_size->height *
- cur_size->depth * bpp / stride_bpp;
- ++cur_offset;
- ++cur_size;
- }
- }
- srf->backup_size = cur_bo_offset;
-
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- /* allocate image area and clear it */
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
- }
- srf->snooper.crtc = NULL;
-
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
-
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->sid = user_srf->base.hash.key;
- if (rep->sid == SVGA3D_INVALID_ID)
- DRM_ERROR("Created bad Surface ID.\n");
-
- vmw_resource_unreference(&res);
-
- ttm_read_unlock(&vmaster->lock);
- return 0;
-out_no_copy:
- kfree(srf->offsets);
-out_no_offsets:
- kfree(srf->sizes);
-out_no_sizes:
- kfree(user_srf);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_vmw_surface_reference_arg *arg =
- (union drm_vmw_surface_reference_arg *)data;
- struct drm_vmw_surface_arg *req = &arg->req;
- struct drm_vmw_surface_create_req *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct drm_vmw_size __user *user_sizes;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
-
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
- rep->flags = srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- rep->size_addr;
+ BUG_ON(*out_surf || *out_buf);
- if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
- ret = -EFAULT;
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+ *out_surf = vmw_res_to_srf(res);
+ return 0;
}
-out_bad_resource:
-out_no_reference:
- ttm_base_object_unref(&base);
-
- return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id)
-{
- struct ttm_base_object *base;
- struct vmw_user_surface *user_srf;
-
- int ret = -EPERM;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_surface;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- *id = user_srf->srf.res.id;
- ret = 0;
-
-out_bad_surface:
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
-
- ttm_base_object_unref(&base);
+ *out_surf = NULL;
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->validate_list);
+ INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
- 0, 0, interruptible,
+ 0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- kfree(vmw_user_bo);
+ ttm_base_object_kfree(vmw_user_bo, base);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(user_bo == NULL)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->dma.base);
+ ret = ttm_base_object_init(tfile,
+ &user_bo->base,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_dma_buf = &user_bo->dma;
+ *handle = user_bo->base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_dma_buffer(bo);
+ return (vmw_user_bo->base.tfile == tfile ||
+ vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
+ uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (unlikely(vmw_user_bo == NULL))
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0)) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
goto out_no_dmabuf;
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0))
- goto out_no_base_object;
- else {
- rep->handle = vmw_user_bo->base.hash.key;
- rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
- rep->cur_gmr_id = vmw_user_bo->base.hash.key;
- rep->cur_gmr_offset = 0;
- }
+ rep->handle = handle;
+ rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_dmabuf_unreference(&dma_buf);
-out_no_base_object:
- ttm_bo_unref(&tmp);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- if (likely(vmw_bo->on_validate_list))
- return vmw_bo->cur_validate_node;
-
- vmw_bo->cur_validate_node = cur_validate_node;
- vmw_bo->on_validate_list = true;
-
- return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_bo->on_validate_list = false;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+
+ if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+ return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
/*
* Stream management
*/
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &stream->res;
int ret;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, false, res_free, NULL);
+ ret = vmw_resource_init(dev_priv, res, false, res_free,
+ &vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
return 0;
}
-/**
- * User-space context management:
- */
-
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
- kfree(stream);
+ ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+ res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource *res;
int ret;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+ *inout_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ bool interruptible)
+{
+ unsigned long size =
+ (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ struct vmw_dma_buffer *backup;
+ int ret;
+
+ if (likely(res->backup)) {
+ BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ return 0;
+ }
+
+ backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+ if (unlikely(backup == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ res->func->backup_placement,
+ interruptible,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ res->backup = backup;
+
+out_no_dmabuf:
+ return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ * @val_buf: Information about a buffer possibly
+ * containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ int ret = 0;
+ const struct vmw_res_func *func = res->func;
+
+ if (unlikely(res->id == -1)) {
+ ret = func->create(res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (func->bind &&
+ ((func->needs_backup && list_empty(&res->mob_head) &&
+ val_buf->bo != NULL) ||
+ (!func->needs_backup && val_buf->bo != NULL))) {
+ ret = func->bind(res, val_buf);
+ if (unlikely(ret != 0))
+ goto out_bind_failed;
+ if (func->needs_backup)
+ list_add_tail(&res->mob_head, &res->backup->res_list);
+ }
+
+ /*
+ * Only do this on write operations, and move to
+ * vmw_resource_unreserve if it can be called after
+ * backup buffers have been unreserved. Otherwise
+ * sort out locking.
+ */
+ res->res_dirty = true;
+
+ return 0;
+
+out_bind_failed:
+ func->destroy(res);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res: Pointer to the struct vmw_resource to unreserve.
+ * @new_backup: Pointer to new backup buffer if command submission
+ * switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (!list_empty(&res->lru_head))
+ return;
+
+ if (new_backup && new_backup != res->backup) {
+
+ if (res->backup) {
+ BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ res->backup = vmw_dmabuf_reference(new_backup);
+ BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ }
+ if (new_backup)
+ res->backup_offset = new_backup_offset;
+
+ if (!res->func->may_evict)
+ return;
+
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&res->lru_head,
+ &res->dev_priv->res_lru[res->func->res_type]);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ * for a resource and in that case, allocate
+ * one, reserve and validate it.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ * @val_buf: On successful return contains data about the
+ * reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+ bool interruptible,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+ bool backup_dirty = false;
+ int ret;
+
+ if (unlikely(res->backup == NULL)) {
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf->bo = ttm_bo_reference(&res->backup->base);
+ list_add_tail(&val_buf->head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ if (res->func->needs_backup && list_empty(&res->mob_head))
+ return 0;
+
+ backup_dirty = res->backup_dirty;
+ ret = ttm_bo_validate(&res->backup->base,
+ res->func->backup_placement,
+ true, false);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf->bo);
+ if (backup_dirty)
+ vmw_dmabuf_unreference(&res->backup);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res: The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (res->func->needs_backup && res->backup == NULL &&
+ !no_backup) {
+ ret = vmw_resource_buf_alloc(res, true);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ * backup buffer
+ *.
+ * @val_buf: Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+
+ if (likely(val_buf->bo == NULL))
+ return;
+
+ INIT_LIST_HEAD(&val_list);
+ list_add_tail(&val_buf->head, &val_list);
+ ttm_eu_backoff_reservation(&val_list);
+ ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+ int ret;
+
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+ ret = vmw_resource_check_buffer(res, true, &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(func->unbind != NULL &&
+ (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ ret = func->unbind(res, res->res_dirty, &val_buf);
+ if (unlikely(ret != 0))
+ goto out_no_unbind;
+ list_del_init(&res->mob_head);
+ }
+ ret = func->destroy(res);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+out_no_unbind:
+ vmw_resource_backoff_reservation(&val_buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+ int ret;
+ struct vmw_resource *evict_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+
+ val_buf.bo = NULL;
+ if (res->backup)
+ val_buf.bo = &res->backup->base;
+ do {
+ ret = vmw_resource_do_validate(res, &val_buf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+ DRM_ERROR("Out of device device id entries "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_res = vmw_resource_reference
+ (list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ else if (!res->func->needs_backup && res->backup) {
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ return 0;
+
+out_no_validate:
+ return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct vmw_fence_obj *old_fence_obj;
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL)
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ driver->sync_obj_ref(fence);
+
+ spin_lock(&bdev->fence_lock);
+
+ old_fence_obj = bo->sync_obj;
+ bo->sync_obj = fence;
+
+ spin_unlock(&bdev->fence_lock);
+
+ if (old_fence_obj)
+ vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res: The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+ return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+{
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+
+ if (list_empty(lru_list))
+ goto out_unlock;
+
+ evict_res = vmw_resource_reference(
+ list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+out_unlock:
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv: Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+ enum vmw_res_type type;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ for (type = 0; type < vmw_res_max; ++type)
+ vmw_resource_evict_type(dev_priv, type);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+ enum ttm_object_type object_type;
+ struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+ void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type: Enum that identifies the lru list to use for eviction.
+ * @needs_backup: Whether the resource is guest-backed and needs
+ * persistent buffer storage.
+ * @type_name: String that identifies the resource type.
+ * @backup_placement: TTM placement for backup buffers.
+ * @may_evict Whether the resource may be evicted.
+ * @create: Create a hardware resource.
+ * @destroy: Destroy a hardware resource.
+ * @bind: Bind a hardware resource to persistent buffer storage.
+ * @unbind: Unbind a hardware resource from persistent
+ * buffer storage.
+ */
+
+struct vmw_res_func {
+ enum vmw_res_type res_type;
+ bool needs_backup;
+ const char *type_name;
+ struct ttm_placement *backup_placement;
+ bool may_evict;
+
+ int (*create) (struct vmw_resource *res);
+ int (*destroy) (struct vmw_resource *res);
+ int (*bind) (struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+ int (*unbind) (struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab1..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
return -EINVAL;
}
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+ if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base: The TTM base object handling user-space visibility.
+ * @srf: The surface metadata.
+ * @size: TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+ uint32_t size;
+ uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face: Surface face.
+ * @mip: Mip level.
+ * @bo_offset: Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+ .object_type = VMW_RES_SURFACE,
+ .base_obj_to_res = vmw_user_surface_base_to_res,
+ .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+ &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = false,
+ .may_evict = true,
+ .type_name = "legacy surfaces",
+ .backup_placement = &vmw_srf_placement,
+ .create = &vmw_legacy_srf_create,
+ .destroy = &vmw_legacy_srf_destroy,
+ .bind = &vmw_legacy_srf_bind,
+ .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(srf->format);
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+ cur_size);
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset =
+ svga3dsurface_get_image_buffer_size(desc, cur_size,
+ body->guest.pitch);
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct
+ * vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ void *cmd;
+
+ if (res->id != -1) {
+
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = vmw_res_to_srf(res);
+ dev_priv->used_memory_size -= res->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ srf = vmw_res_to_srf(res);
+ if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Alloc id for the resource.
+ */
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ /*
+ * Encode surface define- commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ vmw_surface_define_encode(srf, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += res->backup_size;
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ * @bind: Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf,
+ bool bind)
+{
+ SVGAGuestPtr ptr;
+ struct vmw_fence_obj *fence;
+ uint32_t submit_size;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint8_t *cmd;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ BUG_ON(val_buf->bo == NULL);
+
+ submit_size = vmw_surface_dma_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "DMA.\n");
+ return -ENOMEM;
+ }
+ vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ * surface validation process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (!res->backup_dirty)
+ return 0;
+
+ return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ * surface eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (unlikely(readback))
+ return vmw_legacy_srf_dma(res, val_buf, false);
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ * resource eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ return -ENOMEM;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= res->backup_size;
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
+ return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to the struct vmw_surface to initialize.
+ * @res_free: Pointer to a resource destructor used to free
+ * the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_legacy_surface_func);
+
+ if (unlikely(ret != 0)) {
+ vmw_3d_resource_dec(dev_priv, false);
+ res_free(res);
+ return ret;
+ }
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ * user visible surfaces
+ *
+ * @base: Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res: A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+
+ kfree(srf->offsets);
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ ttm_base_object_kfree(user_srf, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base: Pointer to a pointer to a TTM base object
+ * embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * the user surface destroy functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ srf->base_size = *srf->sizes;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = 1;
+
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride = svga3dsurface_calculate_pitch
+ (desc, cur_size);
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += svga3dsurface_get_image_buffer_size
+ (desc, cur_size, stride);
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ res->backup_size = cur_bo_offset;
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_no_copy;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
+ kfree(srf->sizes);
+out_no_sizes:
+ ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab2..fa60add0ff63 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/dmi.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
- const char *pdev_name;
int ret;
bool delay = false, can_switch;
bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
if (can_switch) {
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
struct vga_switcheroo_client *client;
- const char *pdev_name;
int ret;
int err = -EINVAL;
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
if (!client || !check_can_switch())
goto err;
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage2(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index e7d6a13ec6a6..5f07d85c4189 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -320,7 +320,7 @@ config HID_LOGITECH_DJ
Say Y if you want support for Logitech Unifying receivers and devices.
Unifying receivers are capable of pairing up to 6 Logitech compliant
devices to the same receiver. Without this driver it will be handled by
- generic USB_HID driver and all incomming events will be multiplexed
+ generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
config LOGITECH_FF
@@ -596,6 +596,12 @@ config HID_SPEEDLINK
---help---
Support for Speedlink Vicious and Divine Cezanne mouse.
+config HID_STEELSERIES
+ tristate "Steelseries SRW-S1 steering wheel support"
+ depends on USB_HID
+ ---help---
+ Support for Steelseries SRW-S1 steering wheel
+
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
depends on USB_HID
@@ -655,6 +661,16 @@ config HID_TOPSEED
Say Y if you have a TopSeed Cyberlink or BTC Emprex or Conceptronic
CLLRCMCE remote control.
+config HID_THINGM
+ tristate "ThingM blink(1) USB RGB LED"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ ---help---
+ Support for the ThingM blink(1) USB RGB LED. This driver registers a
+ Linux LED class instance, plus additional sysfs attributes to control
+ RGB colors, fade time and playing. The device is exposed through hidraw
+ to access other functions.
+
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
depends on USB_HID
@@ -719,7 +735,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on USB_HID
+ depends on USB_HID && GENERIC_HARDIRQS
select MFD_CORE
default n
-- help---
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index b62215716b2f..72d1b0bc0a97 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -101,8 +101,10 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
+obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+obj-$(CONFIG_HID_THINGM) += hid-thingm.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 0a239885e67c..7c5507e94820 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -146,17 +146,6 @@ static struct hid_driver a4_driver = {
.probe = a4_probe,
.remove = a4_remove,
};
+module_hid_driver(a4_driver);
-static int __init a4_init(void)
-{
- return hid_register_driver(&a4_driver);
-}
-
-static void __exit a4_exit(void)
-{
- hid_unregister_driver(&a4_driver);
-}
-
-module_init(a4_init);
-module_exit(a4_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d0f7662aacca..320a958d4139 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -555,23 +555,6 @@ static struct hid_driver apple_driver = {
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
};
+module_hid_driver(apple_driver);
-static int __init apple_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&apple_driver);
- if (ret)
- pr_err("can't register apple driver\n");
-
- return ret;
-}
-
-static void __exit apple_exit(void)
-{
- hid_unregister_driver(&apple_driver);
-}
-
-module_init(apple_init);
-module_exit(apple_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c
index 7968187ddf7b..340ba9d394a0 100644
--- a/drivers/hid/hid-aureal.c
+++ b/drivers/hid/hid-aureal.c
@@ -37,17 +37,6 @@ static struct hid_driver aureal_driver = {
.id_table = aureal_devices,
.report_fixup = aureal_report_fixup,
};
+module_hid_driver(aureal_driver);
-static int __init aureal_init(void)
-{
- return hid_register_driver(&aureal_driver);
-}
-
-static void __exit aureal_exit(void)
-{
- hid_unregister_driver(&aureal_driver);
-}
-
-module_init(aureal_init);
-module_exit(aureal_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 5be858dd9a15..62f0cee032ba 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -192,19 +192,7 @@ static struct hid_driver ax_driver = {
.probe = ax_probe,
.remove = ax_remove,
};
-
-static int __init ax_init(void)
-{
- return hid_register_driver(&ax_driver);
-}
-
-static void __exit ax_exit(void)
-{
- hid_unregister_driver(&ax_driver);
-}
-
-module_init(ax_init);
-module_exit(ax_exit);
+module_hid_driver(ax_driver);
MODULE_AUTHOR("Sergei Kolzun");
MODULE_DESCRIPTION("Force feedback support for ACRUX game controllers");
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index a1a5a12c3a6b..cc4cf138bef5 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -86,17 +86,6 @@ static struct hid_driver belkin_driver = {
.input_mapping = belkin_input_mapping,
.probe = belkin_probe,
};
+module_hid_driver(belkin_driver);
-static int __init belkin_init(void)
-{
- return hid_register_driver(&belkin_driver);
-}
-
-static void __exit belkin_exit(void)
-{
- hid_unregister_driver(&belkin_driver);
-}
-
-module_init(belkin_init);
-module_exit(belkin_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index af034d3d9256..1bdcccc54a1d 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -69,17 +69,6 @@ static struct hid_driver ch_driver = {
.report_fixup = ch_report_fixup,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index a2abb8e15727..b613d5a79684 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -70,17 +70,6 @@ static struct hid_driver ch_driver = {
.id_table = ch_devices,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index eb2ee11b6412..ff75cabf7393 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -729,7 +729,7 @@ static int hid_scan_report(struct hid_device *hid)
item.type == HID_ITEM_TYPE_MAIN &&
item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
(item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL &&
- hid->bus == BUS_USB)
+ (hid->bus == BUS_USB || hid->bus == BUS_I2C))
hid->group = HID_GROUP_SENSOR_HUB;
}
@@ -1195,6 +1195,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
+ struct hid_driver *hdrv;
unsigned int a;
int rsize, csize = size;
u8 *cdata = data;
@@ -1231,6 +1232,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
if (hid->claimed != HID_CLAIMED_HIDRAW) {
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
+ hdrv = hid->driver;
+ if (hdrv && hdrv->report)
+ hdrv->report(hid, report);
}
if (hid->claimed & HID_CLAIMED_INPUT)
@@ -1599,6 +1603,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
@@ -1697,7 +1702,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2228,6 +2235,14 @@ bool hid_ignore(struct hid_device *hdev)
hdev->type != HID_TYPE_USBMOUSE)
return true;
break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 3e159a50dac7..c4ef3bc726e3 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -144,17 +144,6 @@ static struct hid_driver cp_driver = {
.event = cp_event,
.probe = cp_probe,
};
+module_hid_driver(cp_driver);
-static int __init cp_init(void)
-{
- return hid_register_driver(&cp_driver);
-}
-
-static void __exit cp_exit(void)
-{
- hid_unregister_driver(&cp_driver);
-}
-
-module_init(cp_init);
-module_exit(cp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index e832f44ae383..0fe8f65ef01a 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -297,17 +297,6 @@ static struct hid_driver dr_driver = {
.report_fixup = dr_report_fixup,
.probe = dr_probe,
};
+module_hid_driver(dr_driver);
-static int __init dr_init(void)
-{
- return hid_register_driver(&dr_driver);
-}
-
-static void __exit dr_exit(void)
-{
- hid_unregister_driver(&dr_driver);
-}
-
-module_init(dr_init);
-module_exit(dr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 79d0c61e7214..d0bd13b62dc2 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -41,17 +41,6 @@ static struct hid_driver elecom_driver = {
.id_table = elecom_devices,
.report_fixup = elecom_report_fixup
};
+module_hid_driver(elecom_driver);
-static int __init elecom_init(void)
-{
- return hid_register_driver(&elecom_driver);
-}
-
-static void __exit elecom_exit(void)
-{
- hid_unregister_driver(&elecom_driver);
-}
-
-module_init(elecom_init);
-module_exit(elecom_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 2630d483d262..2e093ab99b43 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -150,18 +150,7 @@ static struct hid_driver ems_driver = {
.id_table = ems_devices,
.probe = ems_probe,
};
+module_hid_driver(ems_driver);
-static int ems_init(void)
-{
- return hid_register_driver(&ems_driver);
-}
-
-static void ems_exit(void)
-{
- hid_unregister_driver(&ems_driver);
-}
-
-module_init(ems_init);
-module_exit(ems_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ezkey.c b/drivers/hid/hid-ezkey.c
index 6540af2871a7..212ac6be2451 100644
--- a/drivers/hid/hid-ezkey.c
+++ b/drivers/hid/hid-ezkey.c
@@ -76,17 +76,6 @@ static struct hid_driver ez_driver = {
.input_mapping = ez_input_mapping,
.event = ez_event,
};
+module_hid_driver(ez_driver);
-static int __init ez_init(void)
-{
- return hid_register_driver(&ez_driver);
-}
-
-static void __exit ez_exit(void)
-{
- hid_unregister_driver(&ez_driver);
-}
-
-module_init(ez_init);
-module_exit(ez_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index f1e1bcf67427..04d2e6aca778 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -176,17 +176,6 @@ static struct hid_driver ga_driver = {
.id_table = ga_devices,
.probe = ga_probe,
};
+module_hid_driver(ga_driver);
-static int __init ga_init(void)
-{
- return hid_register_driver(&ga_driver);
-}
-
-static void __exit ga_exit(void)
-{
- hid_unregister_driver(&ga_driver);
-}
-
-module_init(ga_init);
-module_exit(ga_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index a8b3148e03a2..e288a4a06fe8 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -34,19 +34,7 @@ static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
};
-
-static int __init hid_init(void)
-{
- return hid_register_driver(&hid_generic);
-}
-
-static void __exit hid_exit(void)
-{
- hid_unregister_driver(&hid_generic);
-}
-
-module_init(hid_init);
-module_exit(hid_exit);
+module_hid_driver(hid_generic);
MODULE_AUTHOR("Henrik Rydberg");
MODULE_DESCRIPTION("HID generic driver");
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 4442c30ef531..288d61c9748e 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -88,17 +88,6 @@ static struct hid_driver gyration_driver = {
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
+module_hid_driver(gyration_driver);
-static int __init gyration_init(void)
-{
- return hid_register_driver(&gyration_driver);
-}
-
-static void __exit gyration_exit(void)
-{
- hid_unregister_driver(&gyration_driver);
-}
-
-module_init(gyration_init);
-module_exit(gyration_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index e0a5d1739fc3..6e1a4a4fc0c1 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -167,17 +167,6 @@ static struct hid_driver holtek_kbd_driver = {
.report_fixup = holtek_kbd_report_fixup,
.probe = holtek_kbd_probe
};
+module_hid_driver(holtek_kbd_driver);
-static int __init holtek_kbd_init(void)
-{
- return hid_register_driver(&holtek_kbd_driver);
-}
-
-static void __exit holtek_kbd_exit(void)
-{
- hid_unregister_driver(&holtek_kbd_driver);
-}
-
-module_exit(holtek_kbd_exit);
-module_init(holtek_kbd_init);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index ff295e60059b..f34d1186a3e1 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -224,17 +224,4 @@ static struct hid_driver holtek_driver = {
.id_table = holtek_devices,
.probe = holtek_probe,
};
-
-static int __init holtek_init(void)
-{
- return hid_register_driver(&holtek_driver);
-}
-
-static void __exit holtek_exit(void)
-{
- hid_unregister_driver(&holtek_driver);
-}
-
-module_init(holtek_init);
-module_exit(holtek_exit);
-
+module_hid_driver(holtek_driver);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3d62781b8993..aa3fec0d9dc6 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
- { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
- 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+ { HV_MOUSE_GUID, },
{ },
};
diff --git a/drivers/hid/hid-icade.c b/drivers/hid/hid-icade.c
index 1d6565e37ba3..09dcc04595f3 100644
--- a/drivers/hid/hid-icade.c
+++ b/drivers/hid/hid-icade.c
@@ -235,25 +235,8 @@ static struct hid_driver icade_driver = {
.input_mapped = icade_input_mapped,
.input_mapping = icade_input_mapping,
};
+module_hid_driver(icade_driver);
-static int __init icade_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&icade_driver);
- if (ret)
- pr_err("can't register icade driver\n");
-
- return ret;
-}
-
-static void __exit icade_exit(void)
-{
- hid_unregister_driver(&icade_driver);
-}
-
-module_init(icade_init);
-module_exit(icade_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_DESCRIPTION("ION iCade input driver");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4dfa605e2d14..6e5c2ffa8d96 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -306,6 +306,9 @@
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
+#define USB_VENDOR_ID_FORMOSA 0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
+
#define USB_VENDOR_ID_FREESCALE 0x15A2
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
@@ -442,6 +445,9 @@
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
+#define USB_VENDOR_ID_JESS2 0x0f30
+#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
+
#define USB_VENDOR_ID_KBGEAR 0x084e
#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
@@ -522,8 +528,8 @@
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
-#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
+#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
@@ -594,6 +600,9 @@
#define USB_VENDOR_ID_NEC 0x073e
#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
+#define USB_VENDOR_ID_NEXIO 0x1870
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
@@ -706,6 +715,7 @@
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
+#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
@@ -723,6 +733,9 @@
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
+#define USB_VENDOR_ID_STEELSERIES 0x1038
+#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
@@ -744,6 +757,9 @@
#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
+#define USB_VENDOR_ID_THINGM 0x27b8
+#define USB_DEVICE_ID_BLINK1 0x01ed
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -791,6 +807,12 @@
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
+#define USB_VENDOR_ID_VELLEMAN 0x10cf
+#define USB_DEVICE_ID_VELLEMAN_K8055_FIRST 0x5500
+#define USB_DEVICE_ID_VELLEMAN_K8055_LAST 0x5503
+#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
+#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
+
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
diff --git a/drivers/hid/hid-kensington.c b/drivers/hid/hid-kensington.c
index a5b4016e9bd7..fe9a99dd8d08 100644
--- a/drivers/hid/hid-kensington.c
+++ b/drivers/hid/hid-kensington.c
@@ -47,17 +47,6 @@ static struct hid_driver ks_driver = {
.id_table = ks_devices,
.input_mapping = ks_input_mapping,
};
+module_hid_driver(ks_driver);
-static int __init ks_init(void)
-{
- return hid_register_driver(&ks_driver);
-}
-
-static void __exit ks_exit(void)
-{
- hid_unregister_driver(&ks_driver);
-}
-
-module_init(ks_init);
-module_exit(ks_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
index 07cd825f6f01..3074671b7d6a 100644
--- a/drivers/hid/hid-keytouch.c
+++ b/drivers/hid/hid-keytouch.c
@@ -49,18 +49,7 @@ static struct hid_driver keytouch_driver = {
.id_table = keytouch_devices,
.report_fixup = keytouch_report_fixup,
};
+module_hid_driver(keytouch_driver);
-static int __init keytouch_init(void)
-{
- return hid_register_driver(&keytouch_driver);
-}
-
-static void __exit keytouch_exit(void)
-{
- hid_unregister_driver(&keytouch_driver);
-}
-
-module_init(keytouch_init);
-module_exit(keytouch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b4f0d8216fd0..ef72daecfa16 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -419,17 +419,6 @@ static struct hid_driver kye_driver = {
.probe = kye_probe,
.report_fixup = kye_report_fixup,
};
+module_hid_driver(kye_driver);
-static int __init kye_init(void)
-{
- return hid_register_driver(&kye_driver);
-}
-
-static void __exit kye_exit(void)
-{
- hid_unregister_driver(&kye_driver);
-}
-
-module_init(kye_init);
-module_exit(kye_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
index 22bc14abdfa3..6424cfdb7737 100644
--- a/drivers/hid/hid-lcpower.c
+++ b/drivers/hid/hid-lcpower.c
@@ -54,17 +54,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index cea016e94f43..956c3b135f64 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -468,18 +468,6 @@ static struct hid_driver tpkbd_driver = {
.probe = tpkbd_probe,
.remove = tpkbd_remove,
};
-
-static int __init tpkbd_init(void)
-{
- return hid_register_driver(&tpkbd_driver);
-}
-
-static void __exit tpkbd_exit(void)
-{
- hid_unregister_driver(&tpkbd_driver);
-}
-
-module_init(tpkbd_init);
-module_exit(tpkbd_exit);
+module_hid_driver(tpkbd_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a2f8e88b9fa2..6f12ecd36c88 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -21,8 +21,10 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/usb.h>
#include <linux/wait.h>
+#include "usbhid/usbhid.h"
#include "hid-ids.h"
#include "hid-lg.h"
@@ -40,17 +42,86 @@
#define LG_FF3 0x1000
#define LG_FF4 0x2000
-/* Size of the original descriptor of the Driving Force Pro wheel */
+/* Size of the original descriptors of the Driving Force (and Pro) wheels */
+#define DF_RDESC_ORIG_SIZE 130
#define DFP_RDESC_ORIG_SIZE 97
+#define MOMO_RDESC_ORIG_SIZE 87
-/* Fixed report descriptor for Logitech Driving Force Pro wheel controller
+/* Fixed report descriptors for Logitech Driving Force (and Pro)
+ * wheel controllers
*
- * The original descriptor hides the separate throttle and brake axes in
+ * The original descriptors hide the separate throttle and brake axes in
* a custom vendor usage page, providing only a combined value as
* GenericDesktop.Y.
- * This descriptor removes the combined Y axis and instead reports
+ * These descriptors remove the combined Y axis and instead report
* separate throttle (Y) and brake (RZ).
*/
+static __u8 df_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x14, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x34, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0C, /* Report Count (12), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage (Buttons), */
+0x19, 0x01, /* Usage Minimum (1), */
+0x29, 0x0c, /* Usage Maximum (12), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x02, /* Report Count (2), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x75, 0x04, /* Report Size (4), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Null State), */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x04, /* Report Count (4), */
+0x65, 0x00, /* Unit (none), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (Rz), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x07, /* Report Count (7), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x03, /* Usage (?: 3), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
static __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
@@ -99,6 +170,51 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 momo_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x08, /* Report Count (8), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x08, /* Usage Maximum (08h), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x75, 0x0E, /* Report Size (14), */
+0x95, 0x01, /* Report Count (1), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x00, /* Usage (00h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
/*
* Certain Logitech keyboards send in report #3 keys which are far
@@ -109,6 +225,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
+ struct usb_device_descriptor *udesc;
+ __u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
@@ -124,17 +242,39 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
"fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
- if ((drv_data->quirks & LG_FF4) && *rsize >= 101 &&
- rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
- rdesc[47] == 0x05 && rdesc[48] == 0x09) {
- hid_info(hdev, "fixing up Logitech Speed Force Wireless button descriptor\n");
- rdesc[41] = 0x05;
- rdesc[42] = 0x09;
- rdesc[47] = 0x95;
- rdesc[48] = 0x0B;
- }
switch (hdev->product) {
+
+ /* Several wheels report as this id when operating in emulation mode. */
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ udesc = &(hid_to_usb_dev(hdev)->descriptor);
+ if (!udesc) {
+ hid_err(hdev, "NULL USB device descriptor\n");
+ break;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ /* Update the report descriptor for only the Driving Force wheel */
+ if (rev_maj == 1 && rev_min == 2 &&
+ *rsize == DF_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Driving Force report descriptor\n");
+ rdesc = df_rdesc_fixed;
+ *rsize = sizeof(df_rdesc_fixed);
+ }
+ break;
+
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ if (*rsize == MOMO_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Momo Force (Red) report descriptor\n");
+ rdesc = momo_rdesc_fixed;
+ *rsize = sizeof(momo_rdesc_fixed);
+ }
+ break;
+
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
@@ -143,6 +283,17 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(dfp_rdesc_fixed);
}
break;
+
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
+ rdesc[47] == 0x05 && rdesc[48] == 0x09) {
+ hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n");
+ rdesc[41] = 0x05;
+ rdesc[42] = 0x09;
+ rdesc[47] = 0x95;
+ rdesc[48] = 0x0B;
+ }
+ break;
}
return rdesc;
@@ -328,6 +479,26 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
usage->type == EV_REL || usage->type == EV_ABS))
clear_bit(usage->code, *bit);
+ /* Ensure that Logitech wheels are not given a default fuzz/flat value */
+ if (usage->type == EV_ABS && (usage->code == ABS_X ||
+ usage->code == ABS_Y || usage->code == ABS_Z ||
+ usage->code == ABS_RZ)) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ field->application = HID_GD_MULTIAXIS;
+ break;
+ default:
+ break;
+ }
+ }
+
return 0;
}
@@ -465,7 +636,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF4 },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
@@ -503,17 +674,6 @@ static struct hid_driver lg_driver = {
.probe = lg_probe,
.remove = lg_remove,
};
+module_hid_driver(lg_driver);
-static int __init lg_init(void)
-{
- return hid_register_driver(&lg_driver);
-}
-
-static void __exit lg_exit(void)
-{
- hid_unregister_driver(&lg_driver);
-}
-
-module_init(lg_init);
-module_exit(lg_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index d7947c701f30..65a6ec8d3742 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,11 +43,6 @@
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
-#define DFP_X_MIN 0
-#define DFP_X_MAX 16383
-#define DFP_PEDAL_MIN 0
-#define DFP_PEDAL_MAX 255
-
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
@@ -598,18 +593,6 @@ int lg4ff_init(struct hid_device *hid)
return error;
dbg_hid("sysfs interface created\n");
- /* Set default axes parameters */
- switch (lg4ff_devices[i].product_id) {
- case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
- dbg_hid("Setting axes parameters for Driving Force Pro\n");
- input_set_abs_params(dev, ABS_X, DFP_X_MIN, DFP_X_MAX, 0, 0);
- input_set_abs_params(dev, ABS_Y, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- input_set_abs_params(dev, ABS_RZ, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- break;
- default:
- break;
- }
-
/* Set the maximum range to start with */
entry->range = entry->max_range;
if (entry->set_range != NULL)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 25ddf3e3aec6..f7f113ba083e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -569,23 +569,6 @@ static struct hid_driver magicmouse_driver = {
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
};
+module_hid_driver(magicmouse_driver);
-static int __init magicmouse_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&magicmouse_driver);
- if (ret)
- pr_err("can't register magicmouse driver\n");
-
- return ret;
-}
-
-static void __exit magicmouse_exit(void)
-{
- hid_unregister_driver(&magicmouse_driver);
-}
-
-module_init(magicmouse_init);
-module_exit(magicmouse_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 6fcd466d0825..29d27f65a118 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -221,17 +221,6 @@ static struct hid_driver ms_driver = {
.event = ms_event,
.probe = ms_probe,
};
+module_hid_driver(ms_driver);
-static int __init ms_init(void)
-{
- return hid_register_driver(&ms_driver);
-}
-
-static void __exit ms_exit(void)
-{
- hid_unregister_driver(&ms_driver);
-}
-
-module_init(ms_init);
-module_exit(ms_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index cd3643e06fa6..9e14c00eb1b6 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -63,17 +63,6 @@ static struct hid_driver mr_driver = {
.report_fixup = mr_report_fixup,
.input_mapping = mr_input_mapping,
};
+module_hid_driver(mr_driver);
-static int __init mr_init(void)
-{
- return hid_register_driver(&mr_driver);
-}
-
-static void __exit mr_exit(void)
-{
- hid_unregister_driver(&mr_driver);
-}
-
-module_init(mr_init);
-module_exit(mr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 61543c02ea0b..7a1ebb867cf4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_NO_AREA (1 << 9)
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
#define MT_QUIRK_HOVERING (1 << 11)
+#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
struct mt_slot {
__s32 x, y, cx, cy, p, w, h;
@@ -83,8 +84,11 @@ struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct mt_fields *fields; /* temporary placeholder for storing the
multitouch fields */
+ int cc_index; /* contact count field index in the report */
+ int cc_value_index; /* contact count value index in the field */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
+ unsigned mt_report_id; /* the report ID of the multitouch device */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
__s8 inputmode_index; /* InputMode HID feature index in the report */
__s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -111,6 +115,9 @@ struct mt_device {
#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
+#define MT_CLS_NSMU 0x000a
+#define MT_CLS_DUAL_CONTACT_NUMBER 0x0010
+#define MT_CLS_DUAL_CONTACT_ID 0x0011
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -144,6 +151,9 @@ static int cypress_compute_slot(struct mt_device *td)
static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE },
+ { .name = MT_CLS_NSMU,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
@@ -170,6 +180,16 @@ static struct mt_class mt_classes[] = {
{ .name = MT_CLS_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER },
+ { .name = MT_CLS_DUAL_CONTACT_NUMBER,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ .maxcontacts = 2 },
+ { .name = MT_CLS_DUAL_CONTACT_ID,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
/*
* vendor specific classes
@@ -250,6 +270,9 @@ static ssize_t mt_set_quirks(struct device *dev,
td->mtclass.quirks = val;
+ if (td->cc_index < 0)
+ td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+
return count;
}
@@ -301,6 +324,7 @@ static void mt_feature_mapping(struct hid_device *hdev,
*quirks |= MT_QUIRK_ALWAYS_VALID;
*quirks |= MT_QUIRK_IGNORE_DUPLICATES;
*quirks |= MT_QUIRK_HOVERING;
+ *quirks |= MT_QUIRK_CONTACT_CNT_ACCURATE;
*quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
*quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
*quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
@@ -428,6 +452,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
td->touches_by_report++;
+ td->mt_report_id = field->report->id;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -459,6 +484,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTCOUNT:
+ td->cc_index = field->index;
+ td->cc_value_index = usage->usage_index;
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTMAX:
@@ -523,6 +550,10 @@ static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
{
+ if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
+ td->num_received >= td->num_expected)
+ return;
+
if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
@@ -578,6 +609,16 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
+ /* we will handle the hidinput part later, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
struct mt_device *td = hid_get_drvdata(hid);
__s32 quirks = td->mtclass.quirks;
@@ -623,20 +664,13 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
td->curdata.h = value;
break;
case HID_DG_CONTACTCOUNT:
- /*
- * Includes multi-packet support where subsequent
- * packets are sent with zero contactcount.
- */
- if (value)
- td->num_expected = value;
break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
- /* fallback to the generic hidinput handling */
- return 0;
+ return;
}
if (usage->usage_index + 1 == field->report_count) {
@@ -650,12 +684,43 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
}
}
+}
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
+static void mt_report(struct hid_device *hid, struct hid_report *report)
+{
+ struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_field *field;
+ unsigned count;
+ int r, n;
- return 1;
+ if (report->id != td->mt_report_id)
+ return;
+
+ if (!(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ /*
+ * Includes multi-packet support where subsequent
+ * packets are sent with zero contactcount.
+ */
+ if (td->cc_index >= 0) {
+ struct hid_field *field = report->field[td->cc_index];
+ int value = field->value[td->cc_value_index];
+ if (value)
+ td->num_expected = value;
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+ field = report->field[r];
+ count = field->report_count;
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < count; n++)
+ mt_process_mt_event(hid, field, &field->usage[n],
+ field->value[n]);
+ }
}
static void mt_set_input_mode(struct hid_device *hdev)
@@ -711,6 +776,7 @@ static void mt_post_parse_default_settings(struct mt_device *td)
quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
+ quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
td->mtclass.quirks = quirks;
@@ -719,11 +785,15 @@ static void mt_post_parse_default_settings(struct mt_device *td)
static void mt_post_parse(struct mt_device *td)
{
struct mt_fields *f = td->fields;
+ struct mt_class *cls = &td->mtclass;
if (td->touches_by_report > 0) {
int field_count_per_touch = f->length / td->touches_by_report;
td->last_slot_field = f->usages[field_count_per_touch - 1];
}
+
+ if (td->cc_index < 0)
+ cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
@@ -781,6 +851,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
+ td->cc_index = -1;
hid_set_drvdata(hdev, td);
td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL);
@@ -875,7 +946,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
@@ -888,14 +959,14 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) },
/* Baanto multitouch devices */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_BAANTO,
USB_DEVICE_ID_BAANTO_MT_190W2) },
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
+ { .driver_data = MT_CLS_DUAL_CONTACT_NUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
@@ -906,12 +977,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
/* Chunghwa Telecom touch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
/* CVTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
@@ -982,7 +1053,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) },
/* Elo TouchSystems IntelliTouch Plus panel */
- { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+ { .driver_data = MT_CLS_DUAL_CONTACT_ID,
MT_USB_DEVICE(USB_VENDOR_ID_ELO,
USB_DEVICE_ID_ELO_TS2515) },
@@ -1000,12 +1071,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
/* Gametel game controller */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
USB_DEVICE_ID_GAMETEL_MT_MODE) },
/* GoodTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
@@ -1023,7 +1094,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_IDEACOM_IDC6651) },
/* Ilitek dual touch panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1056,6 +1127,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
+ /* Nexio panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ MT_USB_DEVICE(USB_VENDOR_ID_NEXIO,
+ USB_DEVICE_ID_NEXIO_MULTITOUCH_420)},
+
/* Panasonic panels */
{ .driver_data = MT_CLS_PANASONIC,
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
@@ -1065,7 +1141,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_PANABOARD_UBT880) },
/* Novatek Panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
@@ -1111,7 +1187,7 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
- { .driver_data = MT_CLS_CONFIDENCE,
+ { .driver_data = MT_CLS_DEFAULT,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
@@ -1121,48 +1197,48 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_TOPSEED2_PERIPAD_701) },
/* Touch International panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL,
USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) },
/* Unitec panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
/* XAT */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
/* Xiroku */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
@@ -1193,21 +1269,10 @@ static struct hid_driver mt_driver = {
.feature_mapping = mt_feature_mapping,
.usage_table = mt_grabbed_usages,
.event = mt_event,
+ .report = mt_report,
#ifdef CONFIG_PM
.reset_resume = mt_reset_resume,
.resume = mt_resume,
#endif
};
-
-static int __init mt_init(void)
-{
- return hid_register_driver(&mt_driver);
-}
-
-static void __exit mt_exit(void)
-{
- hid_unregister_driver(&mt_driver);
-}
-
-module_init(mt_init);
-module_exit(mt_exit);
+module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 86a969f63292..7757e82416e7 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -858,12 +858,43 @@ not_claimed_input:
return 1;
}
+static void ntrig_input_configured(struct hid_device *hid,
+ struct hid_input *hidinput)
+
+{
+ struct input_dev *input = hidinput->input;
+
+ if (hidinput->report->maxfield < 1)
+ return;
+
+ switch (hidinput->report->field[0]->application) {
+ case HID_DG_PEN:
+ input->name = "N-Trig Pen";
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* These keys are redundant for fingers, clear them
+ * to prevent incorrect identification */
+ __clear_bit(BTN_TOOL_PEN, input->keybit);
+ __clear_bit(BTN_TOOL_FINGER, input->keybit);
+ __clear_bit(BTN_0, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ /*
+ * The physical touchscreen (single touch)
+ * input has a value for physical, whereas
+ * the multitouch only has logical input
+ * fields.
+ */
+ input->name = (hidinput->report->field[0]->physical) ?
+ "N-Trig Touchscreen" :
+ "N-Trig MultiTouch";
+ break;
+ }
+}
+
static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct ntrig_data *nd;
- struct hid_input *hidinput;
- struct input_dev *input;
struct hid_report *report;
if (id->driver_data)
@@ -901,38 +932,6 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
-
- list_for_each_entry(hidinput, &hdev->inputs, list) {
- if (hidinput->report->maxfield < 1)
- continue;
-
- input = hidinput->input;
- switch (hidinput->report->field[0]->application) {
- case HID_DG_PEN:
- input->name = "N-Trig Pen";
- break;
- case HID_DG_TOUCHSCREEN:
- /* These keys are redundant for fingers, clear them
- * to prevent incorrect identification */
- __clear_bit(BTN_TOOL_PEN, input->keybit);
- __clear_bit(BTN_TOOL_FINGER, input->keybit);
- __clear_bit(BTN_0, input->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- /*
- * The physical touchscreen (single touch)
- * input has a value for physical, whereas
- * the multitouch only has logical input
- * fields.
- */
- input->name =
- (hidinput->report->field[0]
- ->physical) ?
- "N-Trig Touchscreen" :
- "N-Trig MultiTouch";
- break;
- }
- }
-
/* This is needed for devices with more recent firmware versions */
report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
if (report) {
@@ -1023,20 +1022,10 @@ static struct hid_driver ntrig_driver = {
.remove = ntrig_remove,
.input_mapping = ntrig_input_mapping,
.input_mapped = ntrig_input_mapped,
+ .input_configured = ntrig_input_configured,
.usage_table = ntrig_grabbed_usages,
.event = ntrig_event,
};
+module_hid_driver(ntrig_driver);
-static int __init ntrig_init(void)
-{
- return hid_register_driver(&ntrig_driver);
-}
-
-static void __exit ntrig_exit(void)
-{
- hid_unregister_driver(&ntrig_driver);
-}
-
-module_init(ntrig_init);
-module_exit(ntrig_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 0ffa1d2d64f0..6620f15fec22 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -50,17 +50,6 @@ static struct hid_driver ortek_driver = {
.id_table = ortek_devices,
.report_fixup = ortek_report_fixup
};
+module_hid_driver(ortek_driver);
-static int __init ortek_init(void)
-{
- return hid_register_driver(&ortek_driver);
-}
-
-static void __exit ortek_exit(void)
-{
- hid_unregister_driver(&ortek_driver);
-}
-
-module_init(ortek_init);
-module_exit(ortek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 4c521de4e7e6..736b2502df4f 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -103,17 +103,6 @@ static struct hid_driver pl_driver = {
.input_mapping = pl_input_mapping,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 13ca9191b630..a79e95bb9fb6 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -116,7 +116,7 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
rdev->priv = data;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
rdev->input_name = data->hdev->name;
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 86df26e58aba..31cd93fc3d4b 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -672,18 +672,7 @@ static struct hid_driver picolcd_driver = {
.reset_resume = picolcd_reset_resume,
#endif
};
+module_hid_driver(picolcd_driver);
-static int __init picolcd_init(void)
-{
- return hid_register_driver(&picolcd_driver);
-}
-
-static void __exit picolcd_exit(void)
-{
- hid_unregister_driver(&picolcd_driver);
-}
-
-module_init(picolcd_init);
-module_exit(picolcd_exit);
MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 47ed74c46b6b..b0199d27787b 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -14,6 +14,8 @@
* 0e8f:0003 "GASIA USB Gamepad"
* - another version of the König gamepad
*
+ * 0f30:0111 "Saitek Color Rumble Pad"
+ *
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
@@ -51,6 +53,7 @@
struct plff_device {
struct hid_report *report;
+ s32 maxval;
s32 *strong;
s32 *weak;
};
@@ -66,8 +69,8 @@ static int hid_plff_play(struct input_dev *dev, void *data,
right = effect->u.rumble.weak_magnitude;
debug("called with 0x%04x 0x%04x", left, right);
- left = left * 0x7f / 0xffff;
- right = right * 0x7f / 0xffff;
+ left = left * plff->maxval / 0xffff;
+ right = right * plff->maxval / 0xffff;
*plff->strong = left;
*plff->weak = right;
@@ -87,6 +90,7 @@ static int plff_init(struct hid_device *hid)
struct list_head *report_ptr = report_list;
struct input_dev *dev;
int error;
+ s32 maxval;
s32 *strong;
s32 *weak;
@@ -123,6 +127,7 @@ static int plff_init(struct hid_device *hid)
return -ENODEV;
}
+ maxval = 0x7f;
if (report->field[0]->report_count >= 4) {
report->field[0]->value[0] = 0x00;
report->field[0]->value[1] = 0x00;
@@ -135,6 +140,8 @@ static int plff_init(struct hid_device *hid)
report->field[1]->value[0] = 0x00;
strong = &report->field[2]->value[0];
weak = &report->field[3]->value[0];
+ if (hid->vendor == USB_VENDOR_ID_JESS2)
+ maxval = 0xff;
debug("detected 4-field device");
} else {
hid_err(hid, "not enough fields or values\n");
@@ -158,6 +165,7 @@ static int plff_init(struct hid_device *hid)
plff->report = report;
plff->strong = strong;
plff->weak = weak;
+ plff->maxval = maxval;
*strong = 0x00;
*weak = 0x00;
@@ -207,6 +215,7 @@ static const struct hid_device_id pl_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR),
.driver_data = 1 }, /* Twin USB Joystick */
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD), },
{ }
};
MODULE_DEVICE_TABLE(hid, pl_devices);
@@ -216,17 +225,6 @@ static struct hid_driver pl_driver = {
.id_table = pl_devices,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
index c15adb0c98a1..3a1c3c4c50dc 100644
--- a/drivers/hid/hid-primax.c
+++ b/drivers/hid/hid-primax.c
@@ -75,18 +75,7 @@ static struct hid_driver px_driver = {
.id_table = px_devices,
.raw_event = px_raw_event,
};
+module_hid_driver(px_driver);
-static int __init px_init(void)
-{
- return hid_register_driver(&px_driver);
-}
-
-static void __exit px_exit(void)
-{
- hid_unregister_driver(&px_driver);
-}
-
-module_init(px_init);
-module_exit(px_exit);
MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index ec8ca3336315..4e1c4bcbdc03 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -889,23 +889,6 @@ static struct hid_driver pk_driver = {
.probe = pk_probe,
.remove = pk_remove,
};
+module_hid_driver(pk_driver);
-static int pk_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&pk_driver);
- if (ret)
- pr_err("can't register prodikeys driver\n");
-
- return ret;
-}
-
-static void pk_exit(void)
-{
- hid_unregister_driver(&pk_driver);
-}
-
-module_init(pk_init);
-module_exit(pk_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ps3remote.c b/drivers/hid/hid-ps3remote.c
index 03811e539d71..f1239d3c5b14 100644
--- a/drivers/hid/hid-ps3remote.c
+++ b/drivers/hid/hid-ps3remote.c
@@ -198,18 +198,7 @@ static struct hid_driver ps3remote_driver = {
.report_fixup = ps3remote_fixup,
.input_mapping = ps3remote_mapping,
};
+module_hid_driver(ps3remote_driver);
-static int __init ps3remote_init(void)
-{
- return hid_register_driver(&ps3remote_driver);
-}
-
-static void __exit ps3remote_exit(void)
-{
- hid_unregister_driver(&ps3remote_driver);
-}
-
-module_init(ps3remote_init);
-module_exit(ps3remote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Dillow <dave@thedillows.org>, Antonio Ospite <ospite@studenti.unina.it>");
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 5084fb4b7e91..6adc0fa08d96 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -208,19 +208,7 @@ static struct hid_driver lua_driver = {
.probe = lua_probe,
.remove = lua_remove
};
-
-static int __init lua_init(void)
-{
- return hid_register_driver(&lua_driver);
-}
-
-static void __exit lua_exit(void)
-{
- hid_unregister_driver(&lua_driver);
-}
-
-module_init(lua_init);
-module_exit(lua_exit);
+module_hid_driver(lua_driver);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat Lua driver");
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 45aea77bb611..37961c7e397d 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -54,17 +54,6 @@ static struct hid_driver saitek_driver = {
.id_table = saitek_devices,
.report_fixup = saitek_report_fixup
};
+module_hid_driver(saitek_driver);
-static int __init saitek_init(void)
-{
- return hid_register_driver(&saitek_driver);
-}
-
-static void __exit saitek_exit(void)
-{
- hid_unregister_driver(&saitek_driver);
-}
-
-module_init(saitek_init);
-module_exit(saitek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index a5821d317229..7cbb067d4a9e 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -196,17 +196,6 @@ static struct hid_driver samsung_driver = {
.input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
+module_hid_driver(samsung_driver);
-static int __init samsung_init(void)
-{
- return hid_register_driver(&samsung_driver);
-}
-
-static void __exit samsung_exit(void)
-{
- hid_unregister_driver(&samsung_driver);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 0bc58bd8d4f5..6679788bf75a 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -605,16 +605,12 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_DEVICE(BUS_USB, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
+ HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, sensor_hub_devices);
-static const struct hid_usage_id sensor_hub_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 }
-};
-
static struct hid_driver sensor_hub_driver = {
.name = "hid-sensor-hub",
.id_table = sensor_hub_devices,
@@ -627,19 +623,7 @@ static struct hid_driver sensor_hub_driver = {
.reset_resume = sensor_hub_reset_resume,
#endif
};
-
-static int __init sensor_hub_init(void)
-{
- return hid_register_driver(&sensor_hub_driver);
-}
-
-static void __exit sensor_hub_exit(void)
-{
- hid_unregister_driver(&sensor_hub_driver);
-}
-
-module_init(sensor_hub_init);
-module_exit(sensor_hub_exit);
+module_hid_driver(sensor_hub_driver);
MODULE_DESCRIPTION("HID Sensor Hub driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 42257acfeb73..28f774003f03 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -177,19 +177,8 @@ static struct hid_driver sjoy_driver = {
.id_table = sjoy_devices,
.probe = sjoy_probe,
};
+module_hid_driver(sjoy_driver);
-static int __init sjoy_init(void)
-{
- return hid_register_driver(&sjoy_driver);
-}
-
-static void __exit sjoy_exit(void)
-{
- hid_unregister_driver(&sjoy_driver);
-}
-
-module_init(sjoy_init);
-module_exit(sjoy_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7f33ebf299c2..312098e4af4f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -33,6 +33,28 @@ static const u8 sixaxis_rdesc_fixup[] = {
0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
};
+static const u8 sixaxis_rdesc_fixup2[] = {
+ 0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
+ 0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
+ 0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
+ 0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
+ 0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
+ 0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
+ 0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
+ 0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
+ 0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
+ 0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
+ 0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
+ 0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
+ 0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
+ 0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
+ 0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
+ 0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
+ 0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
+ 0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
+ 0xb1, 0x02, 0xc0, 0xc0,
+};
+
struct sony_sc {
unsigned long quirks;
};
@@ -43,9 +65,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
- *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
- hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
+ /*
+ * Some Sony RF receivers wrongly declare the mouse pointer as a
+ * a constant non-data variable.
+ */
+ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
+ /* usage page: generic desktop controls */
+ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
+ /* usage: mouse */
+ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ /* input (usage page for x,y axes): constant, variable, relative */
+ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
+ /* input: data, variable, relative */
rdesc[55] = 0x06;
}
@@ -56,6 +88,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
sizeof(sixaxis_rdesc_fixup));
+ } else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
+ *rsize > sizeof(sixaxis_rdesc_fixup2)) {
+ hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
+ *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
+ *rsize = sizeof(sixaxis_rdesc_fixup2);
+ memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
}
return rdesc;
}
@@ -217,6 +255,8 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = SIXAXIS_CONTROLLER_BT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
.driver_data = VAIO_RDESC_CONSTANT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
+ .driver_data = VAIO_RDESC_CONSTANT },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
@@ -229,17 +269,6 @@ static struct hid_driver sony_driver = {
.report_fixup = sony_report_fixup,
.raw_event = sony_raw_event
};
+module_hid_driver(sony_driver);
-static int __init sony_init(void)
-{
- return hid_register_driver(&sony_driver);
-}
-
-static void __exit sony_exit(void)
-{
- hid_unregister_driver(&sony_driver);
-}
-
-module_init(sony_init);
-module_exit(sony_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index 602013741718..e94371a059cb 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -73,17 +73,6 @@ static struct hid_driver speedlink_driver = {
.input_mapping = speedlink_input_mapping,
.event = speedlink_event,
};
+module_hid_driver(speedlink_driver);
-static int __init speedlink_init(void)
-{
- return hid_register_driver(&speedlink_driver);
-}
-
-static void __exit speedlink_exit(void)
-{
- hid_unregister_driver(&speedlink_driver);
-}
-
-module_init(speedlink_init);
-module_exit(speedlink_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
new file mode 100644
index 000000000000..2ed995cda44a
--- /dev/null
+++ b/drivers/hid/hid-steelseries.c
@@ -0,0 +1,393 @@
+/*
+ * HID driver for Steelseries SRW-S1
+ *
+ * Copyright (c) 2013 Simon Wood
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#define SRWS1_NUMBER_LEDS 15
+struct steelseries_srws1_data {
+ __u16 led_state;
+ /* the last element is used for setting all leds simultaneously */
+ struct led_classdev *led[SRWS1_NUMBER_LEDS + 1];
+};
+#endif
+
+/* Fixed report descriptor for Steelseries SRW-S1 wheel controller
+ *
+ * The original descriptor hides the sensitivity and assists dials
+ * a custom vendor usage page. This inserts a patch to make them
+ * appear in the 'Generic Desktop' usage.
+ */
+
+static __u8 steelseries_srws1_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop) */
+0x09, 0x08, /* Usage (MultiAxis), Changed */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x05, 0x01, /* Changed Usage Page (Desktop), */
+0x09, 0x30, /* Changed Usage (X), */
+0x16, 0xF8, 0xF8, /* Logical Minimum (-1800), */
+0x26, 0x08, 0x07, /* Logical Maximum (1800), */
+0x65, 0x14, /* Unit (Degrees), */
+0x55, 0x0F, /* Unit Exponent (15), */
+0x75, 0x10, /* Report Size (16), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x31, /* Changed Usage (Y), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Changed Usage (Z), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x65, 0x14, /* Unit (Degrees), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x65, 0x00, /* Unit, */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x03, /* Report Count (3), */
+0x81, 0x01, /* Input (Constant), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x11, /* Usage Maximum (11h), */
+0x95, 0x11, /* Report Count (17), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch starts here ---- */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x33, /* Usage (RX), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x02, /* Report Count (2), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x25, 0x0b, /* Logical Maximum (b), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (RZ), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x25, 0x03, /* Logical Maximum (3), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch ends here ---- */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x75, 0x04, /* Changed Report Size (4), */
+0x95, 0x0D, /* Changed Report Count (13), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x75, 0x08, /* Report Size (8), */
+0x95, 0x10, /* Report Count (16), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
+{
+ struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __s32 *value = report->field[0]->value;
+
+ value[0] = 0x40;
+ value[1] = leds & 0xFF;
+ value[2] = leds >> 8;
+ value[3] = 0x00;
+ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+ value[7] = 0x00;
+ value[8] = 0x00;
+ value[9] = 0x00;
+ value[10] = 0x00;
+ value[11] = 0x00;
+ value[12] = 0x00;
+ value[13] = 0x00;
+ value[14] = 0x00;
+ value[15] = 0x00;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+
+ /* Note: LED change does not show on device until the device is read/polled */
+}
+
+static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ if (value == LED_OFF)
+ drv_data->led_state = 0;
+ else
+ drv_data->led_state = (1 << (SRWS1_NUMBER_LEDS + 1)) - 1;
+
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+}
+
+static enum led_brightness steelseries_srws1_led_all_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ return (drv_data->led_state >> SRWS1_NUMBER_LEDS) ? LED_FULL : LED_OFF;
+}
+
+static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+ int i, state = 0;
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ if (led_cdev != drv_data->led[i])
+ continue;
+
+ state = (drv_data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ drv_data->led_state &= ~(1 << i);
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ } else if (value != LED_OFF && !state) {
+ drv_data->led_state |= 1 << i;
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ }
+ break;
+ }
+}
+
+static enum led_brightness steelseries_srws1_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+ int i, value = 0;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++)
+ if (led_cdev == drv_data->led[i]) {
+ value = (drv_data->led_state >> i) & 1;
+ break;
+ }
+
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int steelseries_srws1_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret, i;
+ struct led_classdev *led;
+ size_t name_sz;
+ char *name;
+
+ struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+
+ if (drv_data == NULL) {
+ hid_err(hdev, "can't alloc SRW-S1 memory\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drv_data);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err_free;
+ }
+
+ /* register led subsystem */
+ drv_data->led_state = 0;
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++)
+ drv_data->led[i] = NULL;
+
+ steelseries_srws1_set_leds(hdev, 0);
+
+ name_sz = strlen(hdev->uniq) + 16;
+
+ /* 'ALL', for setting all LEDs simultaneously */
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED ALL\n");
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPMALL", hdev->uniq);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_all_get_brightness;
+ led->brightness_set = steelseries_srws1_led_all_set_brightness;
+
+ drv_data->led[SRWS1_NUMBER_LEDS] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto err_led;
+
+ /* Each individual LED */
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED %d\n", i);
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPM%d", hdev->uniq, i+1);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_get_brightness;
+ led->brightness_set = steelseries_srws1_led_set_brightness;
+
+ drv_data->led[i] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
+err_led:
+ /* Deregister all LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ goto out; /* but let the driver continue without LEDs */
+ }
+ }
+out:
+ return 0;
+err_free:
+ kfree(drv_data);
+ return ret;
+}
+
+static void steelseries_srws1_remove(struct hid_device *hdev)
+{
+ int i;
+ struct led_classdev *led;
+
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data) {
+ /* Deregister LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+
+ }
+
+ hid_hw_stop(hdev);
+ kfree(drv_data);
+ return;
+}
+#endif
+
+static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8
+ && rdesc[29] == 0xbb && rdesc[40] == 0xc5) {
+ hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n");
+ rdesc = steelseries_srws1_rdesc_fixed;
+ *rsize = sizeof(steelseries_srws1_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id steelseries_srws1_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
+
+static struct hid_driver steelseries_srws1_driver = {
+ .name = "steelseries_srws1",
+ .id_table = steelseries_srws1_devices,
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+ .probe = steelseries_srws1_probe,
+ .remove = steelseries_srws1_remove,
+#endif
+ .report_fixup = steelseries_srws1_report_fixup
+};
+
+static int __init steelseries_srws1_init(void)
+{
+ return hid_register_driver(&steelseries_srws1_driver);
+}
+
+static void __exit steelseries_srws1_exit(void)
+{
+ hid_unregister_driver(&steelseries_srws1_driver);
+}
+
+module_init(steelseries_srws1_init);
+module_exit(steelseries_srws1_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 45b4b066a262..87fc91e1c8de 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -63,17 +63,6 @@ static struct hid_driver sp_driver = {
.report_fixup = sp_report_fixup,
.input_mapping = sp_input_mapping,
};
+module_hid_driver(sp_driver);
-static int __init sp_init(void)
-{
- return hid_register_driver(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- hid_unregister_driver(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
new file mode 100644
index 000000000000..2055a52e9a20
--- /dev/null
+++ b/drivers/hid/hid-thingm.c
@@ -0,0 +1,272 @@
+/*
+ * ThingM blink(1) USB RGB LED driver
+ *
+ * Copyright 2013 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#define BLINK1_CMD_SIZE 9
+
+#define blink1_rgb_to_r(rgb) ((rgb & 0xFF0000) >> 16)
+#define blink1_rgb_to_g(rgb) ((rgb & 0x00FF00) >> 8)
+#define blink1_rgb_to_b(rgb) ((rgb & 0x0000FF) >> 0)
+
+/**
+ * struct blink1_data - blink(1) device specific data
+ * @hdev: HID device.
+ * @led_cdev: LED class instance.
+ * @rgb: 8-bit per channel RGB notation.
+ * @fade: fade time in hundredths of a second.
+ * @brightness: brightness coefficient.
+ * @play: play/pause in-memory patterns.
+ */
+struct blink1_data {
+ struct hid_device *hdev;
+ struct led_classdev led_cdev;
+ u32 rgb;
+ u16 fade;
+ u8 brightness;
+ bool play;
+};
+
+static int blink1_send_command(struct blink1_data *data,
+ u8 buf[BLINK1_CMD_SIZE])
+{
+ int ret;
+
+ hid_dbg(data->hdev, "command: %d%c%.2x%.2x%.2x%.2x%.2x%.2x%.2x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4],
+ buf[5], buf[6], buf[7], buf[8]);
+
+ ret = data->hdev->hid_output_raw_report(data->hdev, buf,
+ BLINK1_CMD_SIZE, HID_FEATURE_REPORT);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int blink1_update_color(struct blink1_data *data)
+{
+ u8 buf[BLINK1_CMD_SIZE] = { 1, 'n', 0, 0, 0, 0, 0, 0, 0 };
+
+ if (data->brightness) {
+ unsigned int coef = DIV_ROUND_CLOSEST(255, data->brightness);
+
+ buf[2] = DIV_ROUND_CLOSEST(blink1_rgb_to_r(data->rgb), coef);
+ buf[3] = DIV_ROUND_CLOSEST(blink1_rgb_to_g(data->rgb), coef);
+ buf[4] = DIV_ROUND_CLOSEST(blink1_rgb_to_b(data->rgb), coef);
+ }
+
+ if (data->fade) {
+ buf[1] = 'c';
+ buf[5] = (data->fade & 0xFF00) >> 8;
+ buf[6] = (data->fade & 0x00FF);
+ }
+
+ return blink1_send_command(data, buf);
+}
+
+static void blink1_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ data->brightness = brightness;
+ if (blink1_update_color(data))
+ hid_err(data->hdev, "failed to update color\n");
+}
+
+static enum led_brightness blink1_led_get(struct led_classdev *led_cdev)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ return data->brightness;
+}
+
+static ssize_t blink1_show_rgb(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%.6X\n", data->rgb);
+}
+
+static ssize_t blink1_store_rgb(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int rgb;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ data->rgb = rgb;
+ ret = blink1_update_color(data);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(rgb, S_IRUGO | S_IWUSR, blink1_show_rgb, blink1_store_rgb);
+
+static ssize_t blink1_show_fade(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->fade * 10);
+}
+
+static ssize_t blink1_store_fade(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int fade;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &fade);
+ if (ret)
+ return ret;
+
+ /* blink(1) accepts 16-bit fade time, number of 10ms ticks */
+ fade = DIV_ROUND_CLOSEST(fade, 10);
+ if (fade > 65535)
+ return -EINVAL;
+
+ data->fade = fade;
+
+ return count;
+}
+
+static DEVICE_ATTR(fade, S_IRUGO | S_IWUSR,
+ blink1_show_fade, blink1_store_fade);
+
+static ssize_t blink1_show_play(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->play);
+}
+
+static ssize_t blink1_store_play(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ u8 cmd[BLINK1_CMD_SIZE] = { 1, 'p', 0, 0, 0, 0, 0, 0, 0 };
+ long unsigned int play;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &play);
+ if (ret)
+ return ret;
+
+ data->play = !!play;
+ cmd[2] = data->play;
+ ret = blink1_send_command(data, cmd);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(play, S_IRUGO | S_IWUSR,
+ blink1_show_play, blink1_store_play);
+
+static const struct attribute_group blink1_sysfs_group = {
+ .attrs = (struct attribute *[]) {
+ &dev_attr_rgb.attr,
+ &dev_attr_fade.attr,
+ &dev_attr_play.attr,
+ NULL
+ },
+};
+
+static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct blink1_data *data;
+ struct led_classdev *led;
+ char led_name[13];
+ int ret;
+
+ data = devm_kzalloc(&hdev->dev, sizeof(struct blink1_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, data);
+ data->hdev = hdev;
+ data->rgb = 0xFFFFFF; /* set a default white color */
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto error;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ goto error;
+
+ /* blink(1) serial numbers range is 0x1A001000 to 0x1A002FFF */
+ led = &data->led_cdev;
+ snprintf(led_name, sizeof(led_name), "blink1::%s", hdev->uniq + 4);
+ led->name = led_name;
+ led->brightness_set = blink1_led_set;
+ led->brightness_get = blink1_led_get;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto stop;
+
+ ret = sysfs_create_group(&led->dev->kobj, &blink1_sysfs_group);
+ if (ret)
+ goto remove_led;
+
+ return 0;
+
+remove_led:
+ led_classdev_unregister(led);
+stop:
+ hid_hw_stop(hdev);
+error:
+ return ret;
+}
+
+static void thingm_remove(struct hid_device *hdev)
+{
+ struct blink1_data *data = hid_get_drvdata(hdev);
+ struct led_classdev *led = &data->led_cdev;
+
+ sysfs_remove_group(&led->dev->kobj, &blink1_sysfs_group);
+ led_classdev_unregister(led);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id thingm_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, thingm_table);
+
+static struct hid_driver thingm_driver = {
+ .name = "thingm",
+ .probe = thingm_probe,
+ .remove = thingm_remove,
+ .id_table = thingm_table,
+};
+
+module_hid_driver(thingm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vivien Didelot <vivien.didelot@savoirfairelinux.com>");
+MODULE_DESCRIPTION("ThingM blink(1) USB RGB LED driver");
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index 9f85f827607f..d790d8d71f7f 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -73,18 +73,7 @@ static struct hid_driver tivo_driver = {
.id_table = tivo_devices,
.input_mapping = tivo_input_mapping,
};
+module_hid_driver(tivo_driver);
-static int __init tivo_init(void)
-{
- return hid_register_driver(&tivo_driver);
-}
-
-static void __exit tivo_exit(void)
-{
- hid_unregister_driver(&tivo_driver);
-}
-
-module_init(tivo_init);
-module_exit(tivo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 83a933b9c2e9..e4fcf3f702a5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -261,17 +261,6 @@ static struct hid_driver tm_driver = {
.id_table = tm_devices,
.probe = tm_probe,
};
+module_hid_driver(tm_driver);
-static int __init tm_init(void)
-{
- return hid_register_driver(&tm_driver);
-}
-
-static void __exit tm_exit(void)
-{
- hid_unregister_driver(&tm_driver);
-}
-
-module_init(tm_init);
-module_exit(tm_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 613ff7b1d746..8a5b843e9dd6 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -76,17 +76,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index f23456b1fd4b..c08c36443f83 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -131,17 +131,6 @@ static struct hid_driver twinhan_driver = {
.id_table = twinhan_devices,
.input_mapping = twinhan_input_mapping,
};
+module_hid_driver(twinhan_driver);
-static int __init twinhan_init(void)
-{
- return hid_register_driver(&twinhan_driver);
-}
-
-static void __exit twinhan_exit(void)
-{
- hid_unregister_driver(&twinhan_driver);
-}
-
-module_init(twinhan_init);
-module_exit(twinhan_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 2e56a1fd2375..fb8b516ff0ed 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -650,17 +650,6 @@ static struct hid_driver uclogic_driver = {
.id_table = uclogic_devices,
.report_fixup = uclogic_report_fixup,
};
+module_hid_driver(uclogic_driver);
-static int __init uclogic_init(void)
-{
- return hid_register_driver(&uclogic_driver);
-}
-
-static void __exit uclogic_exit(void)
-{
- hid_unregister_driver(&uclogic_driver);
-}
-
-module_init(uclogic_init);
-module_exit(uclogic_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 2f60da9ed066..a4a8bb0da688 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -953,23 +953,7 @@ static struct hid_driver wacom_driver = {
.raw_event = wacom_raw_event,
.input_mapped = wacom_input_mapped,
};
+module_hid_driver(wacom_driver);
-static int __init wacom_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wacom_driver);
- if (ret)
- pr_err("can't register wacom driver\n");
- return ret;
-}
-
-static void __exit wacom_exit(void)
-{
- hid_unregister_driver(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
MODULE_DESCRIPTION("Driver for Wacom Graphire Bluetooth and Wacom Intuos4 WL");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index bb536ab5941e..059931d7b392 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -779,17 +779,6 @@ static struct hid_driver waltop_driver = {
.report_fixup = waltop_report_fixup,
.raw_event = waltop_raw_event,
};
+module_hid_driver(waltop_driver);
-static int __init waltop_init(void)
-{
- return hid_register_driver(&waltop_driver);
-}
-
-static void __exit waltop_exit(void)
-{
- hid_unregister_driver(&waltop_driver);
-}
-
-module_init(waltop_init);
-module_exit(waltop_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 84e2fbec5fbb..0fb8ab93db68 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1294,25 +1294,8 @@ static struct hid_driver wiimote_hid_driver = {
.remove = wiimote_hid_remove,
.raw_event = wiimote_hid_event,
};
+module_hid_driver(wiimote_hid_driver);
-static int __init wiimote_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wiimote_hid_driver);
- if (ret)
- pr_err("Can't register wiimote hid driver\n");
-
- return ret;
-}
-
-static void __exit wiimote_exit(void)
-{
- hid_unregister_driver(&wiimote_hid_driver);
-}
-
-module_init(wiimote_init);
-module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index eec329197c16..90124ffaa2a5 100644
--- a/drivers/hid/hid-wiimote-debug.c
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -31,7 +31,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
unsigned long flags;
ssize_t ret;
char buf[16];
- __u16 size;
+ __u16 size = 0;
if (s == 0)
return -EINVAL;
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index 38ae87772e96..0472191d4a72 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -403,14 +403,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
if (ext->motionp) {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
} else {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
}
input_sync(ext->input);
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f6ba81df71bd..af66452592e9 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -152,17 +152,6 @@ static struct hid_driver zp_driver = {
.id_table = zp_devices,
.probe = zp_probe,
};
+module_hid_driver(zp_driver);
-static int __init zp_init(void)
-{
- return hid_register_driver(&zp_driver);
-}
-
-static void __exit zp_exit(void)
-{
- hid_unregister_driver(&zp_driver);
-}
-
-module_init(zp_init);
-module_exit(zp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 1ad85f2257b4..e4cddeccd6b5 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -219,17 +219,6 @@ static struct hid_driver zc_driver = {
.probe = zc_probe,
.remove = zc_remove,
};
+module_hid_driver(zc_driver);
-static int __init zc_init(void)
-{
- return hid_register_driver(&zc_driver);
-}
-
-static void __exit zc_exit(void)
-{
- hid_unregister_driver(&zc_driver);
-}
-
-module_init(zc_init);
-module_exit(zc_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 413a73187d33..f3bbbce8353b 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -581,6 +581,7 @@ int __init hidraw_init(void)
if (result < 0)
goto error_class;
+ printk(KERN_INFO "hidraw: raw HID events driver (C) Jiri Kosina\n");
out:
return result;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 9ef222442ca0..ec7930217a6d 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/mutex.h>
+#include <linux/acpi.h>
#include <linux/i2c/i2c-hid.h>
@@ -139,6 +140,8 @@ struct i2c_hid {
unsigned long flags; /* device flags */
wait_queue_head_t wait; /* For waiting the interrupt */
+
+ struct i2c_hid_platform_data pdata;
};
static int __i2c_hid_command(struct i2c_client *client,
@@ -540,13 +543,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
{
struct i2c_client *client = hid->driver_data;
int report_id = buf[0];
+ int ret;
if (report_type == HID_INPUT_REPORT)
return -EINVAL;
- return i2c_hid_set_report(client,
+ if (report_id) {
+ buf++;
+ count--;
+ }
+
+ ret = i2c_hid_set_report(client,
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
report_id, buf, count);
+
+ if (report_id && ret >= 0)
+ ret++; /* add report_id to the number of transfered bytes */
+
+ return ret;
}
static int i2c_hid_parse(struct hid_device *hid)
@@ -731,7 +745,7 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
.hidinput_input_event = i2c_hid_hidinput_input_event,
};
-static int __devinit i2c_hid_init_irq(struct i2c_client *client)
+static int i2c_hid_init_irq(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
@@ -753,7 +767,7 @@ static int __devinit i2c_hid_init_irq(struct i2c_client *client)
return 0;
}
-static int __devinit i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
+static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
{
struct i2c_client *client = ihid->client;
struct i2c_hid_desc *hdesc = &ihid->hdesc;
@@ -810,8 +824,72 @@ static int __devinit i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
return 0;
}
-static int __devinit i2c_hid_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+#ifdef CONFIG_ACPI
+static int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ static u8 i2c_hid_guid[] = {
+ 0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
+ };
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object params[4], *obj;
+ struct acpi_object_list input;
+ struct acpi_device *adev;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ input.count = ARRAY_SIZE(params);
+ input.pointer = params;
+
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(i2c_hid_guid);
+ params[0].buffer.pointer = i2c_hid_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 1;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = 1; /* HID function */
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ dev_err(&client->dev, "device _DSM execution failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
+ obj->type);
+ kfree(buf.pointer);
+ return -EINVAL;
+ }
+
+ pdata->hid_descriptor_address = obj->integer.value;
+
+ kfree(buf.pointer);
+ return 0;
+}
+
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+#else
+static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int i2c_hid_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
{
int ret;
struct i2c_hid *ihid;
@@ -821,11 +899,6 @@ static int __devinit i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
- if (!platform_data) {
- dev_err(&client->dev, "HID register address not provided\n");
- return -EINVAL;
- }
-
if (!client->irq) {
dev_err(&client->dev,
"HID over i2c has not been provided an Int IRQ\n");
@@ -836,11 +909,22 @@ static int __devinit i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
+ if (!platform_data) {
+ ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
+ if (ret) {
+ dev_err(&client->dev,
+ "HID register address not provided\n");
+ goto err;
+ }
+ } else {
+ ihid->pdata = *platform_data;
+ }
+
i2c_set_clientdata(client, ihid);
ihid->client = client;
- hidRegister = platform_data->hid_descriptor_address;
+ hidRegister = ihid->pdata.hid_descriptor_address;
ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
init_waitqueue_head(&ihid->wait);
@@ -873,6 +957,7 @@ static int __devinit i2c_hid_probe(struct i2c_client *client,
hid->hid_get_raw_report = i2c_hid_get_raw_report;
hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
+ ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
@@ -902,7 +987,7 @@ err:
return ret;
}
-static int __devexit i2c_hid_remove(struct i2c_client *client)
+static int i2c_hid_remove(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
@@ -964,10 +1049,11 @@ static struct i2c_driver i2c_hid_driver = {
.name = "i2c_hid",
.owner = THIS_MODULE,
.pm = &i2c_hid_pm,
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
},
.probe = i2c_hid_probe,
- .remove = __devexit_p(i2c_hid_remove),
+ .remove = i2c_hid_remove,
.id_table = i2c_hid_id_table,
};
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 714cd8cc9579..fc307e0422af 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -11,6 +11,7 @@
*/
#include <linux/atomic.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hid.h>
@@ -276,6 +277,94 @@ static struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
};
+#ifdef CONFIG_COMPAT
+
+/* Apparently we haven't stepped on these rakes enough times yet. */
+struct uhid_create_req_compat {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+
+ compat_uptr_t rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (is_compat_task()) {
+ u32 type;
+
+ if (get_user(type, buffer))
+ return -EFAULT;
+
+ if (type == UHID_CREATE) {
+ /*
+ * This is our messed up request with compat pointer.
+ * It is largish (more than 256 bytes) so we better
+ * allocate it from the heap.
+ */
+ struct uhid_create_req_compat *compat;
+
+ compat = kmalloc(sizeof(*compat), GFP_KERNEL);
+ if (!compat)
+ return -ENOMEM;
+
+ buffer += sizeof(type);
+ len -= sizeof(type);
+ if (copy_from_user(compat, buffer,
+ min(len, sizeof(*compat)))) {
+ kfree(compat);
+ return -EFAULT;
+ }
+
+ /* Shuffle the data over to proper structure */
+ event->type = type;
+
+ memcpy(event->u.create.name, compat->name,
+ sizeof(compat->name));
+ memcpy(event->u.create.phys, compat->phys,
+ sizeof(compat->phys));
+ memcpy(event->u.create.uniq, compat->uniq,
+ sizeof(compat->uniq));
+
+ event->u.create.rd_data = compat_ptr(compat->rd_data);
+ event->u.create.rd_size = compat->rd_size;
+
+ event->u.create.bus = compat->bus;
+ event->u.create.vendor = compat->vendor;
+ event->u.create.product = compat->product;
+ event->u.create.version = compat->version;
+ event->u.create.country = compat->country;
+
+ kfree(compat);
+ return 0;
+ }
+ /* All others can be copied directly */
+ }
+
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
static int uhid_dev_create(struct uhid_device *uhid,
const struct uhid_event *ev)
{
@@ -498,10 +587,10 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
len = min(count, sizeof(uhid->input_buf));
- if (copy_from_user(&uhid->input_buf, buffer, len)) {
- ret = -EFAULT;
+
+ ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
+ if (ret)
goto unlock;
- }
switch (uhid->input_buf.type) {
case UHID_CREATE:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ac9e35228254..e0e6abf1cd3b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 3ad91f6447d8..e61e5f991aa5 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -675,7 +675,7 @@ static const struct file_operations hsc_fops = {
.release = hsc_release,
};
-static void __devinit hsc_channel_init(struct hsc_channel *channel)
+static void hsc_channel_init(struct hsc_channel *channel)
{
init_waitqueue_head(&channel->rx_wait);
init_waitqueue_head(&channel->tx_wait);
@@ -685,7 +685,7 @@ static void __devinit hsc_channel_init(struct hsc_channel *channel)
INIT_LIST_HEAD(&channel->tx_msgs_queue);
}
-static int __devinit hsc_probe(struct device *dev)
+static int hsc_probe(struct device *dev)
{
const char devname[] = "hsi_char";
struct hsc_client_data *cl_data;
@@ -744,7 +744,7 @@ out1:
return ret;
}
-static int __devexit hsc_remove(struct device *dev)
+static int hsc_remove(struct device *dev)
{
struct hsi_client *cl = to_hsi_client(dev);
struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
@@ -763,7 +763,7 @@ static struct hsi_client_driver hsc_driver = {
.name = "hsi_char",
.owner = THIS_MODULE,
.probe = hsc_probe,
- .remove = __devexit_p(hsc_remove),
+ .remove = hsc_remove,
},
};
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index b38ef6d8d049..64630f15f181 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
- depends on X86 && ACPI && PCI
+ depends on X86 && ACPI && PCI && X86_LOCAL_APIC
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..0b122f8c7005 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
[channel->monitor_grp].pending);
} else {
- vmbus_set_event(channel->offermsg.child_relid);
+ vmbus_set_event(channel);
}
}
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- open_msg->server_contextarea_gpadlhandle = 0;
+ open_msg->target_vp = newchannel->target_vp;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
int ret;
+ bool signal = false;
/* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 packetlen;
u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
/* Copy over the packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
- (desc.offset8 << 3));
+ (desc.offset8 << 3), &signal);
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
{
struct vmpacket_descriptor desc;
u32 packetlen;
- u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
*buffer_actual_len = packetlen;
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.trans_id;
/* Copy over the entire packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+ &signal);
+
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d4..53a8600162a5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
+enum {
+ IDE = 0,
+ SCSI,
+ NIC,
+ MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+ /* IDE */
+ { HV_IDE_GUID, },
+ /* Storage - SCSI */
+ { HV_SCSI_GUID, },
+ /* Network */
+ { HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32 next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static u32 get_vp_index(uuid_le *type_guid)
+{
+ u32 cur_cpu;
+ int i;
+ bool perf_chn = false;
+ u32 max_cpus = num_online_cpus();
+
+ for (i = IDE; i < MAX_PERF_CHN; i++) {
+ if (!memcmp(type_guid->b, hp_devs[i].guid,
+ sizeof(uuid_le))) {
+ perf_chn = true;
+ break;
+ }
+ }
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+ /*
+ * Prior to win8, all channel interrupts are
+ * delivered on cpu 0.
+ * Also if the channel is not a performance critical
+ * channel, bind it to cpu 0.
+ */
+ return 0;
+ }
+ cur_cpu = (++next_vp % max_cpus);
+ return 0;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
+ /*
+ * By default we setup state to enable batched
+ * reading. A specific service can choose to
+ * disable this prior to opening the channel.
+ */
+ newchannel->batched_reading = true;
+
+ /*
+ * Setup state for signalling the host.
+ */
+ newchannel->sig_event = (struct hv_input_signal_event *)
+ (ALIGN((unsigned long)
+ &newchannel->sig_buf,
+ HV_HYPERCALL_PARAM_ALIGN));
+
+ newchannel->sig_event->connectionid.asu32 = 0;
+ newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+ newchannel->sig_event->flag_number = 0;
+ newchannel->sig_event->rsvdz = 0;
+
+ if (vmbus_proto_version != VERSION_WS2008) {
+ newchannel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ newchannel->sig_event->connectionid.u.id =
+ offer->connection_id;
+ }
+
+ newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b6642..253a74ba245c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
};
/*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+ switch (current_version) {
+ case (VERSION_WIN7):
+ return VERSION_WS2008;
+
+ case (VERSION_WIN8):
+ return VERSION_WIN7;
+
+ case (VERSION_WS2008):
+ default:
+ return VERSION_INVAL;
+ }
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ __u32 version)
+{
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+ int t;
+
+ init_completion(&msginfo->waitevent);
+
+ msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = version;
+ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+ msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
+ msg->monitor_page2 = virt_to_phys(
+ (void *)((unsigned long)vmbus_connection.monitor_pages +
+ PAGE_SIZE));
+
+ /*
+ * Add to list before we send the request since we may
+ * receive the response before returning from this routine
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_initiate_contact));
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return ret;
+ }
+
+ /* Wait for the connection response */
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+ if (t == 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+ flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ /* Check if successful */
+ if (msginfo->response.version_response.version_supported) {
+ vmbus_connection.conn_state = CONNECTED;
+ } else {
+ return -ECONNREFUSED;
+ }
+
+ return ret;
+}
+
+/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
int ret = 0;
- int t;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_initiate_contact *msg;
- unsigned long flags;
+ __u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
goto cleanup;
}
- init_completion(&msginfo->waitevent);
-
- msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
- msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
- msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
- msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
- msg->monitor_page2 = virt_to_phys(
- (void *)((unsigned long)vmbus_connection.monitor_pages +
- PAGE_SIZE));
-
/*
- * Add to list before we send the request since we may
- * receive the response before returning from this routine
+ * Negotiate a compatible VMBUS version number with the
+ * host. We start with the highest number we can support
+ * and work our way down until we negotiate a compatible
+ * version.
*/
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&msginfo->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ version = VERSION_CURRENT;
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_initiate_contact));
- if (ret != 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- goto cleanup;
- }
+ do {
+ ret = vmbus_negotiate_version(msginfo, version);
+ if (ret == 0)
+ break;
- /* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ version = vmbus_get_next_version(version);
+ } while (version != VERSION_INVAL);
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- /* Check if successful */
- if (msginfo->response.version_response.version_supported) {
- vmbus_connection.conn_state = CONNECTED;
- } else {
- pr_err("Unable to connect, "
- "Version %d not supported by Hyper-V\n",
- VMBUS_REVISION_NUMBER);
- ret = -ECONNREFUSED;
+ if (version == VERSION_INVAL)
goto cleanup;
- }
+
+ vmbus_proto_version = version;
+ pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+ host_info_eax, host_info_ebx >> 16,
+ host_info_ebx & 0xFFFF, host_info_ecx,
+ host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+ version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
cleanup:
+ pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
unsigned long flags;
+ void *arg;
+ bool read_state;
+ u32 bytes_to_read;
/*
* Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
*/
spin_lock_irqsave(&channel->inbound_lock, flags);
- if (channel->onchannel_callback != NULL)
- channel->onchannel_callback(channel->channel_callback_context);
- else
+ if (channel->onchannel_callback != NULL) {
+ arg = channel->channel_callback_context;
+ read_state = channel->batched_reading;
+ /*
+ * This callback reads the messages sent by the host.
+ * We can optimize host to guest signaling by ensuring:
+ * 1. While reading the channel, we disable interrupts from
+ * host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ */
+
+ do {
+ hv_begin_read(&channel->inbound);
+ channel->onchannel_callback(arg);
+ bytes_to_read = hv_end_read(&channel->inbound);
+ } while (read_state && (bytes_to_read != 0));
+ } else {
pr_err("no channel callback for relid - %u\n", relid);
+ }
spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
void vmbus_on_event(unsigned long data)
{
u32 dword;
- u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ u32 maxdword;
int bit;
u32 relid;
- u32 *recv_int_page = vmbus_connection.recv_int_page;
+ u32 *recv_int_page = NULL;
+ void *page_addr;
+ int cpu = smp_processor_id();
+ union hv_synic_event_flags *event;
+
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
+ maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ recv_int_page = vmbus_connection.recv_int_page;
+ } else {
+ /*
+ * When the host is win8 and beyond, the event page
+ * can be directly checked to get the id of the channel
+ * that has the interrupt pending.
+ */
+ maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ recv_int_page = event->flags32;
+ }
+
+
/* Check events */
if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
{
- /* Each u32 represents 32 channels */
- sync_set_bit(child_relid & 31,
- (unsigned long *)vmbus_connection.send_int_page +
- (child_relid >> 5));
+ u32 child_relid = channel->offermsg.child_relid;
+
+ if (!channel->is_dedicated_interrupt) {
+ /* Each u32 represents 32 channels */
+ sync_set_bit(child_relid & 31,
+ (unsigned long *)vmbus_connection.send_int_page +
+ (child_relid >> 5));
+ }
- return hv_signal_event();
+ return hv_signal_event(channel->sig_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f368..1c5481da6e4a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -34,13 +35,16 @@
struct hv_context hv_context = {
.synic_initialized = false,
.hypercall_page = NULL,
- .signal_event_param = NULL,
- .signal_event_buffer = NULL,
};
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
static int query_hypervisor_info(void)
{
unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
edx = 0;
op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
- pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
- eax,
- ebx >> 16,
- ebx & 0xFFFF,
- ecx,
- edx >> 24,
- edx & 0xFFFFFF);
+ host_info_eax = eax;
+ host_info_ebx = ebx;
+ host_info_ecx = ecx;
+ host_info_edx = edx;
}
return max_leaf;
}
@@ -137,6 +138,10 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.vp_index, 0,
+ sizeof(int) * NR_CPUS);
+ memset(hv_context.event_dpc, 0,
+ sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@@ -168,24 +173,6 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
- /* Setup the global signal event param for the signal event hypercall */
- hv_context.signal_event_buffer =
- kmalloc(sizeof(struct hv_input_signal_event_buffer),
- GFP_KERNEL);
- if (!hv_context.signal_event_buffer)
- goto cleanup;
-
- hv_context.signal_event_param =
- (struct hv_input_signal_event *)
- (ALIGN((unsigned long)
- hv_context.signal_event_buffer,
- HV_HYPERCALL_PARAM_ALIGN));
- hv_context.signal_event_param->connectionid.asu32 = 0;
- hv_context.signal_event_param->connectionid.u.id =
- VMBUS_EVENT_CONNECTION_ID;
- hv_context.signal_event_param->flag_number = 0;
- hv_context.signal_event_param->rsvdz = 0;
-
return 0;
cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- kfree(hv_context.signal_event_buffer);
- hv_context.signal_event_buffer = NULL;
- hv_context.signal_event_param = NULL;
-
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
*
* This involves a hypercall.
*/
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
{
u16 status;
- status = do_hypercall(HVCALL_SIGNAL_EVENT,
- hv_context.signal_event_param,
- NULL) & 0xFFFF;
+ status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
return status;
}
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
+ u64 vp_index;
u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
+ hv_context.event_dpc[cpu] = (struct tasklet_struct *)
+ kmalloc(sizeof(struct tasklet_struct),
+ GFP_ATOMIC);
+ if (hv_context.event_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto cleanup;
+ }
+ tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
hv_context.synic_message_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
shared_sint.as_uint64 = 0;
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.masked = false;
- shared_sint.auto_eoi = false;
+ shared_sint.auto_eoi = true;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
hv_context.synic_initialized = true;
+
+ /*
+ * Setup the mapping between Hyper-V's notion
+ * of cpuid and Linux' notion of cpuid.
+ * This array will be indexed using Linux cpuid.
+ */
+ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+ hv_context.vp_index[cpu] = (u32)vp_index;
return;
cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f6c0011a0337..37873213e24f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
-#include <linux/mman.h>
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
@@ -403,7 +402,7 @@ struct dm_info_header {
*/
struct dm_info_msg {
- struct dm_info_header header;
+ struct dm_header hdr;
__u32 reserved;
__u32 info_size;
__u8 info[];
@@ -415,10 +414,17 @@ struct dm_info_msg {
static bool hot_add;
static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 30;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
@@ -503,16 +509,48 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
{
- switch (msg->header.type) {
+ struct dm_info_header *info_hdr;
+
+ info_hdr = (struct dm_info_header *)msg->info;
+
+ switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", msg->header.data_size);
+ pr_info("Data Size is %d\n", info_hdr->data_size);
break;
default:
- pr_info("Received Unknown type: %d\n", msg->header.type);
+ pr_info("Received Unknown type: %d\n", info_hdr->type);
}
}
+unsigned long compute_balloon_floor(void)
+{
+ unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+ /* Simple continuous piecewiese linear function:
+ * max MiB -> min MiB gradient
+ * 0 0
+ * 16 16
+ * 32 24
+ * 128 72 (1/2)
+ * 512 168 (1/4)
+ * 2048 360 (1/8)
+ * 8192 552 (1/32)
+ * 32768 1320
+ * 131072 4392
+ */
+ if (totalram_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+ else if (totalram_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+ else if (totalram_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+ else
+ min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+ return min_pages;
+}
+
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
@@ -526,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
+ struct sysinfo val;
-
+ if (pressure_report_delay > 0) {
+ --pressure_report_delay;
+ return;
+ }
+ si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
-
- status.num_committed = vm_memory_committed();
+ /*
+ * The host expects the guest to report free memory.
+ * Further, the host expects the pressure information to
+ * include the ballooned out pages.
+ * For a given amount of memory that we are managing, we
+ * need to compute a floor below which we should not balloon.
+ * Compute this and add it to the pressure report.
+ */
+ status.num_avail = val.freeram;
+ status.num_committed = vm_memory_committed() +
+ dm->num_pages_ballooned +
+ compute_balloon_floor();
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
@@ -543,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
}
-
-
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
@@ -879,7 +930,7 @@ static int balloon_probe(struct hv_device *dev,
balloon_onchannelcallback, dev);
if (ret)
- return ret;
+ goto probe_error0;
dm_device.dev = dev;
dm_device.state = DM_INITIALIZING;
@@ -891,7 +942,7 @@ static int balloon_probe(struct hv_device *dev,
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
if (IS_ERR(dm_device.thread)) {
ret = PTR_ERR(dm_device.thread);
- goto probe_error0;
+ goto probe_error1;
}
hv_set_drvdata(dev, &dm_device);
@@ -914,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
- goto probe_error1;
+ goto probe_error2;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
@@ -928,7 +979,7 @@ static int balloon_probe(struct hv_device *dev,
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
* Now submit our capabilities to the host.
@@ -961,12 +1012,12 @@ static int balloon_probe(struct hv_device *dev,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
- goto probe_error1;
+ goto probe_error2;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
@@ -975,18 +1026,20 @@ static int balloon_probe(struct hv_device *dev,
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
dm_device.state = DM_INITIALIZED;
return 0;
-probe_error1:
+probe_error2:
kthread_stop(dm_device.thread);
-probe_error0:
+probe_error1:
vmbus_close(dev->channel);
+probe_error0:
+ kfree(send_buffer);
return ret;
}
@@ -999,6 +1052,7 @@ static int balloon_remove(struct hv_device *dev)
vmbus_close(dev->channel);
kthread_stop(dm->thread);
+ kfree(send_buffer);
return 0;
}
@@ -1006,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
- { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
- 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
- },
+ { HV_DM_GUID, },
{ },
};
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04c..1d4cbd8e8261 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
.util_deinit = hv_kvp_deinit,
};
+static void perform_shutdown(struct work_struct *dummy)
+{
+ orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
}
if (execute_shutdown == true)
- orderly_poweroff(true);
+ schedule_work(&shutdown_work);
}
/*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
}
}
+ /*
+ * The set of services managed by the util driver are not performance
+ * critical and do not need batched reading. Furthermore, some services
+ * such as KVP can only handle one message from the host at a time.
+ * Turn off batched reading for all util drivers before we open the
+ * channel.
+ */
+
+ set_channel_read_state(dev->channel, false);
+
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
- { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
- 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
- .driver_data = (unsigned long)&util_shutdown },
+ { HV_SHUTDOWN_GUID,
+ .driver_data = (unsigned long)&util_shutdown
+ },
/* Time synch guid */
- { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
- 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
- .driver_data = (unsigned long)&util_timesynch },
+ { HV_TS_GUID,
+ .driver_data = (unsigned long)&util_timesynch
+ },
/* Heartbeat guid */
- { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
- 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
- .driver_data = (unsigned long)&util_heartbeat },
+ { HV_HEART_BEAT_GUID,
+ .driver_data = (unsigned long)&util_heartbeat
+ },
/* KVP guid */
- { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
- 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
- .driver_data = (unsigned long)&util_kvp },
+ { HV_KVP_GUID,
+ .driver_data = (unsigned long)&util_kvp
+ },
{ },
};
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398a..12f2f9e989f7 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
/* Define invalid partition identifier. */
#define HV_PARTITION_ID_INVALID ((u64)0x0)
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
/* Define port identifier type. */
union hv_port_id {
u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
/*
* Versioning definitions used for guests reporting themselves to the
* hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
bool synic_initialized;
- /*
- * This is used as an input param to HvCallSignalEvent hypercall. The
- * input param is immutable in our usage and must be dynamic mem (vs
- * stack or global). */
- struct hv_input_signal_event_buffer *signal_event_buffer;
- /* 8-bytes aligned of the buffer above */
- struct hv_input_signal_event *signal_event_param;
-
void *synic_message_page[NR_CPUS];
void *synic_event_page[NR_CPUS];
+ /*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information:
+ *
+ * vp_index[a] is the Hyper-V's processor ID corresponding to
+ * Linux cpuid 'a'.
+ */
+ u32 vp_index[NR_CPUS];
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events on a per CPU
+ * basis.
+ */
+ struct tasklet_struct *event_dpc[NR_CPUS];
};
extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
/* Interface */
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
- u32 sgcount);
+ u32 sgcount, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
void *buffer,
u32 buflen,
- u32 offset);
+ u32 offset, bool *signal);
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b8..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
#include "hyperv_vmbus.h"
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+ rbi->ring_buffer->interrupt_mask = 1;
+ smp_mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 read;
+ u32 write;
+
+ rbi->ring_buffer->interrupt_mask = 0;
+ smp_mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming messages.
+ */
+ hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+ return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+ if (rbi->ring_buffer->interrupt_mask)
+ return false;
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == rbi->ring_buffer->read_index)
+ return true;
+
+ return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+ struct hv_ring_buffer_info *rbi)
+{
+ u32 prev_write_sz;
+ u32 cur_write_sz;
+ u32 r_size;
+ u32 write_loc = rbi->ring_buffer->write_index;
+ u32 read_loc = rbi->ring_buffer->read_index;
+ u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+ /*
+ * If the other end is not blocked on write don't bother.
+ */
+ if (pending_sz == 0)
+ return false;
+
+ r_size = rbi->ring_datasize;
+ cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ read_loc - write_loc;
+
+ prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+ old_rd - write_loc;
+
+
+ if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ return true;
+
+ return false;
+}
/*
* hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
}
}
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->interrupt_mask;
-}
-
/*
*
* hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount)
+ struct scatterlist *sglist, u32 sgcount, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sg;
u32 next_write_location;
+ u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
+ old_write = next_write_location;
+
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
- /* Make sure we flush all writes before updating the writeIndex */
- smp_wmb();
+ /* Issue a full memory barrier before updating the write index */
+ smp_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ *signal = hv_need_to_signal(old_write, outring_info);
return 0;
}
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
- u32 buflen, u32 offset)
+ u32 buflen, u32 offset, bool *signal)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
+ u32 old_read;
if (buflen <= 0)
return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
+ old_read = bytes_avail_toread;
+
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ *signal = hv_need_to_signal_on_read(old_read, inring_info);
+
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec53003..cf19dfa5ead1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
static struct completion probe_event;
static int irq;
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
union hv_synic_event_flags *event;
bool handled = false;
+ page_addr = hv_context.synic_event_page[cpu];
+ if (page_addr == NULL)
+ return IRQ_NONE;
+
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
/*
* Check for events before checking for messages. This is the order
* in which events and messages are checked in Windows guests on
* Hyper-V, and the Windows team suggested we do the same.
*/
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ /* Since we are a child, we only need to check bit 0 */
+ if (sync_test_and_clear_bit(0,
+ (unsigned long *) &event->flags32[0])) {
+ handled = true;
+ }
+ } else {
+ /*
+ * Our host is win8 or above. The signaling mechanism
+ * has changed and we can directly look at the event page.
+ * If bit n is set then we have an interrup on the channel
+ * whose id is n.
+ */
handled = true;
- tasklet_schedule(&event_dpc);
}
+ if (handled)
+ tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
page_addr = hv_context.synic_message_page[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
}
/*
+ * vmbus interrupt flow handler:
+ * vmbus interrupts can concurrently occur on multiple CPUs and
+ * can be handled concurrently.
+ */
+
+static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+{
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ desc->action->handler(irq, desc->action->dev_id);
+}
+
+/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
* Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
}
tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
- tasklet_init(&event_dpc, vmbus_on_event, 0);
ret = bus_register(&hv_bus);
if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
goto err_unregister;
}
+ /*
+ * Vmbus interrupts can be handled concurrently on
+ * different CPUs. Establish an appropriate interrupt flow
+ * handler that can support this model.
+ */
+ irq_set_handler(irq, vmbus_flow_handler);
+
vector = IRQ0_VECTOR + irq;
/*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
ret = driver_register(&hv_driver->driver);
- vmbus_request_offers();
-
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 32f238f3caea..89ac1cb26f24 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -180,11 +180,11 @@ config SENSORS_ADM9240
will be called adm9240.
config SENSORS_ADT7410
- tristate "Analog Devices ADT7410"
+ tristate "Analog Devices ADT7410/ADT7420"
depends on I2C
help
If you say yes here you get support for the Analog Devices
- ADT7410 temperature monitoring chip.
+ ADT7410 and ADT7420 temperature monitoring chips.
This driver can also be built as a module. If so, the module
will be called adt7410.
@@ -506,7 +506,8 @@ config SENSORS_IT87
help
If you say yes here you get support for ITE IT8705F, IT8712F,
IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
- IT8782F, and IT8783E/F sensor chips, and the SiS950 clone.
+ IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
+ SiS950 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -529,8 +530,8 @@ config SENSORS_JC42
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
- MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
- STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
+ MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
+ STTS2002, STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -854,6 +855,17 @@ config SENSORS_MAX6650
This driver can also be built as a module. If so, the module
will be called max6650.
+config SENSORS_MAX6697
+ tristate "Maxim MAX6697 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6581, MAX6602, MAX6622,
+ MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
+ temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6697.
+
config SENSORS_MCP3021
tristate "Microchip MCP3021 and compatibles"
depends on I2C
@@ -1145,6 +1157,16 @@ config SENSORS_AMC6821
This driver can also be build as a module. If so, the module
will be called amc6821.
+config SENSORS_INA209
+ tristate "TI / Burr Brown INA209"
+ depends on I2C
+ help
+ If you say yes here you get support for the TI / Burr Brown INA209
+ voltage / current / power monitor I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ina209.
+
config SENSORS_INA2XX
tristate "Texas Instruments INA219 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 5da287443f6c..8d6d97ea7c1e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
+obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
+obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 1672e2a5db46..6351aba8819c 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -911,7 +911,7 @@ exit:
return res;
}
-static int acpi_power_meter_remove(struct acpi_device *device, int type)
+static int acpi_power_meter_remove(struct acpi_device *device)
{
struct acpi_power_meter_resource *resource;
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f3a5d4764eb9..5d501adc3e54 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -137,7 +137,7 @@ static ssize_t set_max_min(struct device *dev,
if (ret < 0)
return ret;
- temp = SENSORS_LIMIT(temp, -40000, 85000);
+ temp = clamp_val(temp, -40000, 85000);
temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index fd1d1b15854e..71bcba8abfc0 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -193,7 +193,7 @@ static ssize_t set_temp_max(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_max[index] = SENSORS_LIMIT(temp, -128, 127);
+ data->temp_max[index] = clamp_val(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
data->temp_max[index]);
@@ -218,7 +218,7 @@ static ssize_t set_temp_min(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_min[index] = SENSORS_LIMIT(temp, -128, 127);
+ data->temp_min[index] = clamp_val(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
data->temp_min[index]);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 0f068e7297ee..ea09046e651d 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,7 +197,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val) (SENSORS_LIMIT(SCALE(val, adm1026_scaling[n], 192),\
+#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
0, 255))
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
@@ -207,7 +207,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
* 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000
*/
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \
- SENSORS_LIMIT(1350000 / ((val) * (div)), \
+ clamp_val(1350000 / ((val) * (div)), \
1, 254))
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
1350000 / ((val) * (div)))
@@ -215,14 +215,14 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
/ 1000, -127, 127))
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
/ 1000, -127, 127))
#define OFFSET_FROM_REG(val) ((val) * 1000)
-#define PWM_TO_REG(val) (SENSORS_LIMIT(val, 0, 255))
+#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
#define PWM_FROM_REG(val) (val)
#define PWM_MIN_TO_REG(val) ((val) & 0xf0)
@@ -233,7 +233,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
-#define DAC_TO_REG(val) (SENSORS_LIMIT(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
@@ -933,7 +933,7 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
return;
new_min = data->fan_min[fan] * old_div / new_div;
- new_min = SENSORS_LIMIT(new_min, 1, 254);
+ new_min = clamp_val(new_min, 1, 254);
data->fan_min[fan] = new_min;
adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min);
}
@@ -1527,7 +1527,7 @@ static ssize_t set_auto_pwm_min(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->pwm1.auto_pwm_min = SENSORS_LIMIT(val, 0, 255);
+ data->pwm1.auto_pwm_min = clamp_val(val, 0, 255);
if (data->pwm1.enable == 2) { /* apply immediately */
data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index c6a4631e833f..253ea396106d 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -162,13 +162,13 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
static int FAN_TO_REG(int reg, int div)
{
int tmp;
- tmp = FAN_FROM_REG(SENSORS_LIMIT(reg, 0, 65535), div);
+ tmp = FAN_FROM_REG(clamp_val(reg, 0, 65535), div);
return tmp > 255 ? 255 : tmp;
}
#define FAN_DIV_FROM_REG(reg) (1<<(((reg)&0xc0)>>6))
-#define PWM_TO_REG(val) (SENSORS_LIMIT((val), 0, 255) >> 4)
+#define PWM_TO_REG(val) (clamp_val((val), 0, 255) >> 4)
#define PWM_FROM_REG(val) ((val) << 4)
#define FAN_CHAN_FROM_REG(reg) (((reg) >> 5) & 7)
@@ -675,7 +675,7 @@ static ssize_t set_temp_offset(struct device *dev,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -15000, 15000);
+ val = clamp_val(val, -15000, 15000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr),
@@ -696,7 +696,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +717,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +738,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index dafa477715e3..2416628e0ab1 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,13 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+ return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
}
/* temperature range: -40..125, 127 disables temperature alarm */
static inline s8 TEMP_TO_REG(long val)
{
- return SENSORS_LIMIT(SCALE(val, 1, 1000), -40, 127);
+ return clamp_val(SCALE(val, 1, 1000), -40, 127);
}
/* two fans, each with low fan speed limit */
@@ -122,7 +122,7 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
/* analog out 0..1250mV */
static inline u8 AOUT_TO_REG(unsigned long val)
{
- return SENSORS_LIMIT(SCALE(val, 255, 1250), 0, 255);
+ return clamp_val(SCALE(val, 255, 1250), 0, 255);
}
static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 409b5c16defb..ba962ac4b81f 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -163,9 +163,9 @@ static int ads7828_probe(struct i2c_client *client,
/* Bound Vref with min/max values if it was provided */
if (data->vref_mv)
- data->vref_mv = SENSORS_LIMIT(data->vref_mv,
- ADS7828_EXT_VREF_MV_MIN,
- ADS7828_EXT_VREF_MV_MAX);
+ data->vref_mv = clamp_val(data->vref_mv,
+ ADS7828_EXT_VREF_MV_MIN,
+ ADS7828_EXT_VREF_MV_MAX);
else
data->vref_mv = ADS7828_INT_VREF_MV;
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 030c8d7c33a5..99a7290da0a3 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -78,10 +78,6 @@ enum adt7410_type { /* keep sorted in alphabetical order */
adt7410,
};
-/* Addresses scanned */
-static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
- I2C_CLIENT_END };
-
static const u8 ADT7410_REG_TEMP[4] = {
ADT7410_TEMPERATURE, /* input */
ADT7410_T_ALARM_HIGH, /* high */
@@ -173,8 +169,8 @@ abort:
static s16 ADT7410_TEMP_TO_REG(long temp)
{
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, ADT7410_TEMP_MIN,
- ADT7410_TEMP_MAX) * 128, 1000);
+ return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7410_TEMP_MIN,
+ ADT7410_TEMP_MAX) * 128, 1000);
}
static int ADT7410_REG_TO_TEMP(struct adt7410_data *data, s16 reg)
@@ -269,9 +265,9 @@ static ssize_t adt7410_set_t_hyst(struct device *dev,
return ret;
/* convert absolute hysteresis value to a 4 bit delta value */
limit = ADT7410_REG_TO_TEMP(data, data->temp[1]);
- hyst = SENSORS_LIMIT(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
- data->hyst = SENSORS_LIMIT(DIV_ROUND_CLOSEST(limit - hyst, 1000),
- 0, ADT7410_T_HYST_MASK);
+ hyst = clamp_val(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
+ data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
+ ADT7410_T_HYST_MASK);
ret = i2c_smbus_write_byte_data(client, ADT7410_T_HYST, data->hyst);
if (ret)
return ret;
@@ -364,6 +360,7 @@ static int adt7410_probe(struct i2c_client *client,
/*
* Set to 16 bit resolution, continous conversion and comparator mode.
*/
+ ret &= ~ADT7410_MODE_MASK;
data->config = ret | ADT7410_FULL | ADT7410_RESOLUTION |
ADT7410_EVENT_MODE;
if (data->config != data->oldconfig) {
@@ -410,11 +407,12 @@ static int adt7410_remove(struct i2c_client *client)
static const struct i2c_device_id adt7410_ids[] = {
{ "adt7410", adt7410, },
+ { "adt7420", adt7410, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, adt7410_ids);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int adt7410_suspend(struct device *dev)
{
int ret;
@@ -436,10 +434,8 @@ static int adt7410_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops adt7410_dev_pm_ops = {
- .suspend = adt7410_suspend,
- .resume = adt7410_resume,
-};
+static SIMPLE_DEV_PM_OPS(adt7410_dev_pm_ops, adt7410_suspend, adt7410_resume);
+
#define ADT7410_DEV_PM_OPS (&adt7410_dev_pm_ops)
#else
#define ADT7410_DEV_PM_OPS NULL
@@ -454,11 +450,11 @@ static struct i2c_driver adt7410_driver = {
.probe = adt7410_probe,
.remove = adt7410_remove,
.id_table = adt7410_ids,
- .address_list = normal_i2c,
+ .address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
module_i2c_driver(adt7410_driver);
MODULE_AUTHOR("Hartmut Knaack");
-MODULE_DESCRIPTION("ADT7410 driver");
+MODULE_DESCRIPTION("ADT7410/ADT7420 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 98a7d81e25c5..69481d3a3d23 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -836,7 +836,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -874,7 +874,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -939,7 +939,7 @@ static ssize_t set_volt_max(struct device *dev,
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
@@ -981,7 +981,7 @@ static ssize_t set_volt_min(struct device *dev,
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
@@ -1071,7 +1071,7 @@ static ssize_t set_fan_min(struct device *dev,
temp = FAN_RPM_TO_PERIOD(temp);
temp >>= 8;
- temp = SENSORS_LIMIT(temp, 1, 255);
+ temp = clamp_val(temp, 1, 255);
mutex_lock(&data->lock);
data->fan_min[attr->index] = temp;
@@ -1149,7 +1149,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm[attr->index] = temp;
@@ -1179,7 +1179,7 @@ static ssize_t set_pwm_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_max = temp;
@@ -1211,7 +1211,7 @@ static ssize_t set_pwm_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_min[attr->index] = temp;
@@ -1246,7 +1246,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 15);
+ temp = clamp_val(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
@@ -1333,7 +1333,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 39ecb1a3b9ef..b83bf4bb95eb 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -452,7 +452,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 60000);
+ temp = clamp_val(temp, 0, 60000);
mutex_lock(&data->lock);
data->auto_update_interval = temp;
@@ -481,7 +481,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, -1, 10);
+ temp = clamp_val(temp, -1, 10);
mutex_lock(&data->lock);
data->num_temp_sensors = temp;
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -604,7 +604,7 @@ static ssize_t set_fan_max(struct device *dev,
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
- temp = SENSORS_LIMIT(temp, 1, 65534);
+ temp = clamp_val(temp, 1, 65534);
mutex_lock(&data->lock);
data->fan_max[attr->index] = temp;
@@ -641,7 +641,7 @@ static ssize_t set_fan_min(struct device *dev,
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
- temp = SENSORS_LIMIT(temp, 1, 65534);
+ temp = clamp_val(temp, 1, 65534);
mutex_lock(&data->lock);
data->fan_min[attr->index] = temp;
@@ -717,7 +717,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm[attr->index] = temp;
@@ -749,7 +749,7 @@ static ssize_t set_pwm_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_max[attr->index] = temp;
@@ -782,7 +782,7 @@ static ssize_t set_pwm_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_min[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 989e54c39252..22d008bbdc10 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -201,10 +201,10 @@ static inline u16 temp2reg(struct adt7475_data *data, long val)
u16 ret;
if (!(data->config5 & CONFIG5_TWOSCOMP)) {
- val = SENSORS_LIMIT(val, -64000, 191000);
+ val = clamp_val(val, -64000, 191000);
ret = (val + 64500) / 1000;
} else {
- val = SENSORS_LIMIT(val, -128000, 127000);
+ val = clamp_val(val, -128000, 127000);
if (val < -500)
ret = (256500 + val) / 1000;
else
@@ -240,7 +240,7 @@ static inline u16 rpm2tach(unsigned long rpm)
if (rpm == 0)
return 0;
- return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
+ return clamp_val((90000 * 60) / rpm, 1, 0xFFFF);
}
/* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */
@@ -271,7 +271,7 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
reg = (volt * 1024) / 2250;
else
reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
- return SENSORS_LIMIT(reg, 0, 1023) & (0xff << 2);
+ return clamp_val(reg, 0, 1023) & (0xff << 2);
}
static u16 adt7475_read_word(struct i2c_client *client, int reg)
@@ -451,10 +451,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
switch (sattr->nr) {
case OFFSET:
if (data->config5 & CONFIG5_TEMPOFFSET) {
- val = SENSORS_LIMIT(val, -63000, 127000);
+ val = clamp_val(val, -63000, 127000);
out = data->temp[OFFSET][sattr->index] = val / 1000;
} else {
- val = SENSORS_LIMIT(val, -63000, 64000);
+ val = clamp_val(val, -63000, 64000);
out = data->temp[OFFSET][sattr->index] = val / 500;
}
break;
@@ -471,7 +471,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
adt7475_read_hystersis(client);
temp = reg2temp(data, data->temp[THERM][sattr->index]);
- val = SENSORS_LIMIT(val, temp - 15000, temp);
+ val = clamp_val(val, temp - 15000, temp);
val = (temp - val) / 1000;
if (sattr->index != 1) {
@@ -577,7 +577,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
* to figure the range
*/
temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
- val = SENSORS_LIMIT(val, temp + autorange_table[0],
+ val = clamp_val(val, temp + autorange_table[0],
temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
val -= temp;
@@ -701,7 +701,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
break;
}
- data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
+ data->pwm[sattr->nr][sattr->index] = clamp_val(val, 0, 0xFF);
i2c_smbus_write_byte_data(client, reg,
data->pwm[sattr->nr][sattr->index]);
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index ae482e3afdac..4fe49d2bfe1d 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -241,7 +241,7 @@ static ssize_t set_temp(
int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
- val = SENSORS_LIMIT(val / 1000, -128, 127);
+ val = clamp_val(val / 1000, -128, 127);
mutex_lock(&data->update_lock);
data->temp[ix] = val;
@@ -332,7 +332,7 @@ static ssize_t set_pwm1(
return ret;
mutex_lock(&data->update_lock);
- data->pwm1 = SENSORS_LIMIT(val , 0, 255);
+ data->pwm1 = clamp_val(val , 0, 255);
i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
mutex_unlock(&data->update_lock);
return count;
@@ -499,11 +499,11 @@ static ssize_t set_temp_auto_point_temp(
mutex_lock(&data->update_lock);
switch (ix) {
case 0:
- ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
- data->temp1_auto_point_temp[1]);
- ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
- data->temp2_auto_point_temp[1]);
- ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
+ ptemp[0] = clamp_val(val / 1000, 0,
+ data->temp1_auto_point_temp[1]);
+ ptemp[0] = clamp_val(ptemp[0], 0,
+ data->temp2_auto_point_temp[1]);
+ ptemp[0] = clamp_val(ptemp[0], 0, 63);
if (i2c_smbus_write_byte_data(
client,
AMC6821_REG_PSV_TEMP,
@@ -515,20 +515,12 @@ static ssize_t set_temp_auto_point_temp(
goto EXIT;
break;
case 1:
- ptemp[1] = SENSORS_LIMIT(
- val / 1000,
- (ptemp[0] & 0x7C) + 4,
- 124);
+ ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
ptemp[1] &= 0x7C;
- ptemp[2] = SENSORS_LIMIT(
- ptemp[2], ptemp[1] + 1,
- 255);
+ ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255);
break;
case 2:
- ptemp[2] = SENSORS_LIMIT(
- val / 1000,
- ptemp[1]+1,
- 255);
+ ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255);
break;
default:
dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
@@ -561,7 +553,7 @@ static ssize_t set_pwm1_auto_point_pwm(
return ret;
mutex_lock(&data->update_lock);
- data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
+ data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254);
if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
data->pwm1_auto_point_pwm[1])) {
dev_err(&client->dev, "Register write error, aborting.\n");
@@ -629,7 +621,7 @@ static ssize_t set_fan(
val = 1 > val ? 0xFFFF : 6000000/val;
mutex_lock(&data->update_lock);
- data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
+ data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF);
if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
data->fan[ix] & 0xFF)) {
dev_err(&client->dev, "Register write error, aborting.\n");
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 520e5bf4f76d..6ac612cabda1 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -114,7 +114,7 @@ static const u16 asb100_reg_temp_hyst[] = {0, 0x3a, 0x153, 0x253, 0x19};
*/
static u8 IN_TO_REG(unsigned val)
{
- unsigned nval = SENSORS_LIMIT(val, ASB100_IN_MIN, ASB100_IN_MAX);
+ unsigned nval = clamp_val(val, ASB100_IN_MIN, ASB100_IN_MAX);
return (nval + 8) / 16;
}
@@ -129,8 +129,8 @@ static u8 FAN_TO_REG(long rpm, int div)
return 0;
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static int FAN_FROM_REG(u8 val, int div)
@@ -148,7 +148,7 @@ static int FAN_FROM_REG(u8 val, int div)
*/
static u8 TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
+ int ntemp = clamp_val(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
ntemp += (ntemp < 0 ? -500 : 500);
return (u8)(ntemp / 1000);
}
@@ -164,7 +164,7 @@ static int TEMP_FROM_REG(u8 reg)
*/
static u8 ASB100_PWM_TO_REG(int pwm)
{
- pwm = SENSORS_LIMIT(pwm, 0, 255);
+ pwm = clamp_val(pwm, 0, 255);
return (u8)(pwm / 16);
}
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index b867aab78049..da7f5b5d5db5 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -191,7 +191,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, 255);
+ reqval = clamp_val(reqval, 0, 255);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -224,7 +224,7 @@ static ssize_t store_bitmask(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
+ reqval = clamp_val(reqval, 0, param->mask[0]);
reqval = (reqval & param->mask[0]) << param->shift[0];
@@ -274,7 +274,7 @@ static ssize_t store_fan16(struct device *dev,
* generating an alarm.
*/
reqval =
- (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
+ (reqval <= 0 ? 0xffff : clamp_val(5400000 / reqval, 0, 0xfffe));
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -343,11 +343,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
+ reqval = clamp_val(reqval, 0, 0xffff);
reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
- reqval = SENSORS_LIMIT(reqval, 0, 0xff);
+ reqval = clamp_val(reqval, 0, 0xff);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -376,7 +376,7 @@ static ssize_t store_temp8(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, -127000, 127000);
+ reqval = clamp_val(reqval, -127000, 127000);
temp = reqval / 1000;
@@ -432,7 +432,7 @@ static ssize_t store_temp62(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, -32000, 31750);
+ reqval = clamp_val(reqval, -32000, 31750);
i = reqval / 1000;
f = reqval - (i * 1000);
temp = i << 2;
@@ -468,7 +468,7 @@ static ssize_t show_ap2_temp(struct device *dev,
auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
regval =
((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
- temp = auto_point1 + asc7621_range_map[SENSORS_LIMIT(regval, 0, 15)];
+ temp = auto_point1 + asc7621_range_map[clamp_val(regval, 0, 15)];
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", temp);
@@ -489,7 +489,7 @@ static ssize_t store_ap2_temp(struct device *dev,
mutex_lock(&data->update_lock);
auto_point1 = data->reg[param->msb[1]] * 1000;
- reqval = SENSORS_LIMIT(reqval, auto_point1 + 2000, auto_point1 + 80000);
+ reqval = clamp_val(reqval, auto_point1 + 2000, auto_point1 + 80000);
for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
if (reqval >= auto_point1 + asc7621_range_map[i]) {
@@ -523,7 +523,7 @@ static ssize_t show_pwm_ac(struct device *dev,
regval = config | (altbit << 3);
mutex_unlock(&data->update_lock);
- return sprintf(buf, "%u\n", map[SENSORS_LIMIT(regval, 0, 15)]);
+ return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
}
static ssize_t store_pwm_ac(struct device *dev,
@@ -663,7 +663,7 @@ static ssize_t show_pwm_freq(struct device *dev,
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 15);
+ regval = clamp_val(regval, 0, 15);
return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
}
@@ -711,7 +711,7 @@ static ssize_t show_pwm_ast(struct device *dev,
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 7);
+ regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
@@ -759,7 +759,7 @@ static ssize_t show_temp_st(struct device *dev,
SETUP_SHOW_data_param(dev, attr);
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 7);
+ regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 56dbcfb3e301..b25c64302cbc 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -190,7 +190,7 @@ struct atk_acpi_input_buf {
};
static int atk_add(struct acpi_device *device);
-static int atk_remove(struct acpi_device *device, int type);
+static int atk_remove(struct acpi_device *device);
static void atk_print_sensor(struct atk_data *data, union acpi_object *obj);
static int atk_read_value(struct atk_sensor_data *sensor, u64 *value);
static void atk_free_sensors(struct atk_data *data);
@@ -1416,7 +1416,7 @@ out:
return err;
}
-static int atk_remove(struct acpi_device *device, int type)
+static int atk_remove(struct acpi_device *device)
{
struct atk_data *data = device->driver_data;
dev_dbg(&device->dev, "removing...\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d64923d63537..3f1e297663ad 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -198,7 +198,7 @@ struct tjmax {
static const struct tjmax __cpuinitconst tjmax_table[] = {
{ "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
{ "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
- { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
+ { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
{ "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
{ "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
};
@@ -212,7 +212,7 @@ struct tjmax_model {
#define ANY 0xff
static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
- { 0x1c, 10, 100000 }, /* D4xx, N4xx, D5xx, N5xx */
+ { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
{ 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
* Note: Also matches 230 and 330,
* which are covered by tjmax_table
@@ -222,6 +222,7 @@ static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
* is undetectable by software
*/
{ 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
+ { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
{ 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
};
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 7430f70ae452..c347c94f2f73 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
static inline int IN_TO_REG(int val, int nominal)
{
- return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255);
+ return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
}
/*
@@ -293,8 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
static inline int TEMP_TO_REG(int val)
{
- return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000,
- -128, 127);
+ return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
}
/* Temperature range */
@@ -332,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
{
- int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15);
+ int hyst = clamp_val((val + 500) / 1000, 0, 15);
return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
}
@@ -349,10 +348,10 @@ static inline int FAN_FROM_REG(int reg, int tpc)
static inline int FAN_TO_REG(int val, int tpc)
{
if (tpc) {
- return SENSORS_LIMIT(val / tpc, 0, 0xffff);
+ return clamp_val(val / tpc, 0, 0xffff);
} else {
return (val <= 0) ? 0xffff :
- SENSORS_LIMIT(90000 * 60 / val, 0, 0xfffe);
+ clamp_val(90000 * 60 / val, 0, 0xfffe);
}
}
@@ -1282,7 +1281,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (fn) {
case SYS_PWM:
- data->pwm[ix] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[ix] = clamp_val(val, 0, 255);
dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]);
break;
case SYS_PWM_FREQ:
@@ -1450,7 +1449,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
break;
case SYS_PWM_AUTO_POINT1_PWM:
/* Only valid for pwm[1-3] */
- data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm_min[ix] = clamp_val(val, 0, 255);
dme1737_write(data, DME1737_REG_PWM_MIN(ix),
data->pwm_min[ix]);
break;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 77f434c58236..b07305622087 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -405,7 +405,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
if (rpm_target == 0)
data->fan_target = 0x1fff;
else
- data->fan_target = SENSORS_LIMIT(
+ data->fan_target = clamp_val(
(FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
0, 0x1fff);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index a98c917b5888..936898f82f94 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -187,7 +187,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
* Sysfs callback functions
*/
-static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
+static const s16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
char *buf)
@@ -220,7 +220,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
- data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
+ data->in[sf][nr] = clamp_val(val, 0, 255);
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -257,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
- data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
+ data->temp[sf][nr] = clamp_val(val, -127, 128);
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -298,7 +298,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
val = 0xFFFF;
} else {
val = DIV_ROUND_CLOSEST(5400000U, val);
- val = SENSORS_LIMIT(val, 0, 0xFFFE);
+ val = clamp_val(val, 0, 0xFFFE);
}
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index bb7275cc47f3..cfb02dd91aad 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1350,7 +1350,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 23, 1500000);
+ val = clamp_val(val, 23, 1500000);
val = fan_to_reg(val);
mutex_lock(&data->update_lock);
@@ -1438,7 +1438,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
return err;
val /= 8;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
@@ -1542,7 +1542,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
return err;
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
@@ -1589,8 +1589,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
/* convert abs to relative and check */
data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
- val = SENSORS_LIMIT(val, data->temp_high[nr] - 15,
- data->temp_high[nr]);
+ val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
val = data->temp_high[nr] - val;
/* convert value to register contents */
@@ -1627,7 +1626,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
return err;
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
@@ -1754,7 +1753,7 @@ static ssize_t store_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1805,7 +1804,7 @@ static ssize_t store_simple_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
@@ -1932,7 +1931,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1991,8 +1990,8 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
- val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15,
- data->pwm_auto_point_temp[nr][point]);
+ val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
+ data->pwm_auto_point_temp[nr][point]);
val = data->pwm_auto_point_temp[nr][point] - val;
reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
@@ -2126,9 +2125,9 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
if (data->auto_point_temp_signed)
- val = SENSORS_LIMIT(val, -128, 127);
+ val = clamp_val(val, -128, 127);
else
- val = SENSORS_LIMIT(val, 0, 127);
+ val = clamp_val(val, 0, 127);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index f7dba229395f..9e300e567f15 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -359,7 +359,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
f75375_write_pwm(client, nr);
mutex_unlock(&data->update_lock);
return count;
@@ -556,7 +556,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+ val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]);
@@ -577,7 +577,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+ val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]);
@@ -625,7 +625,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+ val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]);
@@ -646,7 +646,7 @@ static ssize_t set_temp_max_hyst(struct device *dev,
if (err < 0)
return err;
- val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+ val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HYST(nr),
@@ -822,7 +822,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
!duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
continue;
- data->pwm[nr] = SENSORS_LIMIT(f75375s_pdata->pwm[nr], 0, 255);
+ data->pwm[nr] = clamp_val(f75375s_pdata->pwm[nr], 0, 255);
f75375_write_pwm(client, nr);
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 519ce8b9c142..8af2755cdb87 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -379,7 +379,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
if (err)
return err;
- v = SENSORS_LIMIT(v / 1000, -128, 127) + 128;
+ v = clamp_val(v / 1000, -128, 127) + 128;
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(to_i2c_client(dev),
@@ -540,7 +540,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
/* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */
if (v || data->kind == fscsyl) {
- v = SENSORS_LIMIT(v, 128, 255);
+ v = clamp_val(v, 128, 255);
v = (v - 128) * 2 + 1;
}
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 8b2106f60eda..ea6480b80e7f 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -171,7 +171,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->set_cnt = PWM_TO_CNT(SENSORS_LIMIT(val, 0, 255));
+ data->set_cnt = PWM_TO_CNT(clamp_val(val, 0, 255));
g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 2c74673f48e5..e2e5909a34df 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -86,7 +86,7 @@ enum chips { gl518sm_r00, gl518sm_r80 };
#define BOOL_FROM_REG(val) ((val) ? 0 : 1)
#define BOOL_TO_REG(val) ((val) ? 0 : 1)
-#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
(val) - 500 : \
(val) + 500) / 1000) + 119), 0, 255)
#define TEMP_FROM_REG(val) (((val) - 119) * 1000)
@@ -96,15 +96,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
long rpmdiv;
if (rpm == 0)
return 0;
- rpmdiv = SENSORS_LIMIT(rpm, 1, 960000) * div;
- return SENSORS_LIMIT((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
+ rpmdiv = clamp_val(rpm, 1, 960000) * div;
+ return clamp_val((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
-#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
#define IN_FROM_REG(val) ((val) * 19)
-#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index a21ff252f2f1..ed56e09c3dd7 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -144,10 +144,10 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -285,8 +285,7 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, \
- 255))
+ clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -450,7 +449,7 @@ static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
(val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 4e04c1228e51..39781945a5d2 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -422,7 +422,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
/* Fill GPIO pin array */
pdata->num_ctrl = of_gpio_count(node);
- if (!pdata->num_ctrl) {
+ if (pdata->num_ctrl <= 0) {
dev_err(dev, "gpios DT property empty / missing");
return -ENODEV;
}
@@ -477,7 +477,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
pdata->speed = speed;
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios")) {
+ if (of_gpio_named_count(node, "alarm-gpios") > 0) {
struct gpio_fan_alarm *alarm;
int val;
enum of_gpio_flags flags;
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 9f26400713f0..89cfd64b3373 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -115,6 +115,12 @@ int vid_from_reg(int val, u8 vrm)
return (val < 32) ? 1550 - 25 * val
: 775 - (25 * (val - 31)) / 2;
+ case 26: /* AMD family 10h to 15h, serial VID */
+ val &= 0x7f;
+ if (val >= 0x7c)
+ return 0;
+ return DIV_ROUND_CLOSEST(15500 - 125 * val, 10);
+
case 91: /* VRM 9.1 */
case 90: /* VRM 9.0 */
val &= 0x1f;
@@ -195,6 +201,10 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0xF, 0x40, 0x7F, ANY, 24}, /* NPT family 0Fh */
{X86_VENDOR_AMD, 0xF, 0x80, ANY, ANY, 25}, /* future fam. 0Fh */
{X86_VENDOR_AMD, 0x10, 0x0, ANY, ANY, 25}, /* NPT family 10h */
+ {X86_VENDOR_AMD, 0x11, 0x0, ANY, ANY, 26}, /* family 11h */
+ {X86_VENDOR_AMD, 0x12, 0x0, ANY, ANY, 26}, /* family 12h */
+ {X86_VENDOR_AMD, 0x14, 0x0, ANY, ANY, 26}, /* family 14h */
+ {X86_VENDOR_AMD, 0x15, 0x0, ANY, ANY, 26}, /* family 15h */
{X86_VENDOR_INTEL, 0x6, 0x0, 0x6, ANY, 82}, /* Pentium Pro,
* Pentium II, Xeon,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c3c471ca202f..646314f7c839 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -84,19 +84,21 @@ static void __init hwmon_pci_quirks(void)
/* Open access to 0x295-0x296 on MSI MS-7031 */
sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL);
- if (sb &&
- (sb->subsystem_vendor == 0x1462 && /* MSI */
- sb->subsystem_device == 0x0031)) { /* MS-7031 */
-
- pci_read_config_byte(sb, 0x48, &enable);
- pci_read_config_word(sb, 0x64, &base);
-
- if (base == 0 && !(enable & BIT(2))) {
- dev_info(&sb->dev,
- "Opening wide generic port at 0x295\n");
- pci_write_config_word(sb, 0x64, 0x295);
- pci_write_config_byte(sb, 0x48, enable | BIT(2));
+ if (sb) {
+ if (sb->subsystem_vendor == 0x1462 && /* MSI */
+ sb->subsystem_device == 0x0031) { /* MS-7031 */
+ pci_read_config_byte(sb, 0x48, &enable);
+ pci_read_config_word(sb, 0x64, &base);
+
+ if (base == 0 && !(enable & BIT(2))) {
+ dev_info(&sb->dev,
+ "Opening wide generic port at 0x295\n");
+ pci_write_config_word(sb, 0x64, 0x295);
+ pci_write_config_byte(sb, 0x48,
+ enable | BIT(2));
+ }
}
+ pci_dev_put(sb);
}
#endif
}
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
new file mode 100644
index 000000000000..c6fdd5bd395e
--- /dev/null
+++ b/drivers/hwmon/ina209.c
@@ -0,0 +1,636 @@
+/*
+ * Driver for the Texas Instruments / Burr Brown INA209
+ * Bidirectional Current/Power Monitor
+ *
+ * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Derived from Ira W. Snyder's original driver submission
+ * Copyright (C) 2008 Paul Hays <Paul.Hays@cattail.ca>
+ * Copyright (C) 2008-2009 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Aligned with ina2xx driver
+ * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Thanks to Jan Volkering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Datasheet:
+ * http://www.ti.com/lit/gpn/ina209
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <linux/platform_data/ina2xx.h>
+
+/* register definitions */
+#define INA209_CONFIGURATION 0x00
+#define INA209_STATUS 0x01
+#define INA209_STATUS_MASK 0x02
+#define INA209_SHUNT_VOLTAGE 0x03
+#define INA209_BUS_VOLTAGE 0x04
+#define INA209_POWER 0x05
+#define INA209_CURRENT 0x06
+#define INA209_SHUNT_VOLTAGE_POS_PEAK 0x07
+#define INA209_SHUNT_VOLTAGE_NEG_PEAK 0x08
+#define INA209_BUS_VOLTAGE_MAX_PEAK 0x09
+#define INA209_BUS_VOLTAGE_MIN_PEAK 0x0a
+#define INA209_POWER_PEAK 0x0b
+#define INA209_SHUNT_VOLTAGE_POS_WARN 0x0c
+#define INA209_SHUNT_VOLTAGE_NEG_WARN 0x0d
+#define INA209_POWER_WARN 0x0e
+#define INA209_BUS_VOLTAGE_OVER_WARN 0x0f
+#define INA209_BUS_VOLTAGE_UNDER_WARN 0x10
+#define INA209_POWER_OVER_LIMIT 0x11
+#define INA209_BUS_VOLTAGE_OVER_LIMIT 0x12
+#define INA209_BUS_VOLTAGE_UNDER_LIMIT 0x13
+#define INA209_CRITICAL_DAC_POS 0x14
+#define INA209_CRITICAL_DAC_NEG 0x15
+#define INA209_CALIBRATION 0x16
+
+#define INA209_REGISTERS 0x17
+
+#define INA209_CONFIG_DEFAULT 0x3c47 /* PGA=8, full range */
+#define INA209_SHUNT_DEFAULT 10000 /* uOhm */
+
+struct ina209_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ u16 regs[INA209_REGISTERS]; /* All chip registers */
+
+ u16 config_orig; /* Original configuration */
+ u16 calibration_orig; /* Original calibration */
+ u16 update_interval;
+};
+
+static struct ina209_data *ina209_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *ret = data;
+ s32 val;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (!data->valid ||
+ time_after(jiffies, data->last_updated + data->update_interval)) {
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ val = i2c_smbus_read_word_swapped(client, i);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/*
+ * Read a value from a device register and convert it to the
+ * appropriate sysfs units
+ */
+static long ina209_from_reg(const u8 reg, const u16 val)
+{
+ switch (reg) {
+ case INA209_SHUNT_VOLTAGE:
+ case INA209_SHUNT_VOLTAGE_POS_PEAK:
+ case INA209_SHUNT_VOLTAGE_NEG_PEAK:
+ case INA209_SHUNT_VOLTAGE_POS_WARN:
+ case INA209_SHUNT_VOLTAGE_NEG_WARN:
+ /* LSB=10 uV. Convert to mV. */
+ return DIV_ROUND_CLOSEST(val, 100);
+
+ case INA209_BUS_VOLTAGE:
+ case INA209_BUS_VOLTAGE_MAX_PEAK:
+ case INA209_BUS_VOLTAGE_MIN_PEAK:
+ case INA209_BUS_VOLTAGE_OVER_WARN:
+ case INA209_BUS_VOLTAGE_UNDER_WARN:
+ case INA209_BUS_VOLTAGE_OVER_LIMIT:
+ case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+ /* LSB=4 mV, last 3 bits unused */
+ return (val >> 3) * 4;
+
+ case INA209_CRITICAL_DAC_POS:
+ /* LSB=1 mV, in the upper 8 bits */
+ return val >> 8;
+
+ case INA209_CRITICAL_DAC_NEG:
+ /* LSB=1 mV, in the upper 8 bits */
+ return -1 * (val >> 8);
+
+ case INA209_POWER:
+ case INA209_POWER_PEAK:
+ case INA209_POWER_WARN:
+ case INA209_POWER_OVER_LIMIT:
+ /* LSB=20 mW. Convert to uW */
+ return val * 20 * 1000L;
+
+ case INA209_CURRENT:
+ /* LSB=1 mA (selected). Is in mA */
+ return val;
+ }
+
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+/*
+ * Take a value and convert it to register format, clamping the value
+ * to the appropriate range.
+ */
+static int ina209_to_reg(u8 reg, u16 old, long val)
+{
+ switch (reg) {
+ case INA209_SHUNT_VOLTAGE_POS_WARN:
+ case INA209_SHUNT_VOLTAGE_NEG_WARN:
+ /* Limit to +- 320 mV, 10 uV LSB */
+ return clamp_val(val, -320, 320) * 100;
+
+ case INA209_BUS_VOLTAGE_OVER_WARN:
+ case INA209_BUS_VOLTAGE_UNDER_WARN:
+ case INA209_BUS_VOLTAGE_OVER_LIMIT:
+ case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+ /*
+ * Limit to 0-32000 mV, 4 mV LSB
+ *
+ * The last three bits aren't part of the value, but we'll
+ * preserve them in their original state.
+ */
+ return (DIV_ROUND_CLOSEST(clamp_val(val, 0, 32000), 4) << 3)
+ | (old & 0x7);
+
+ case INA209_CRITICAL_DAC_NEG:
+ /*
+ * Limit to -255-0 mV, 1 mV LSB
+ * Convert the value to a positive value for the register
+ *
+ * The value lives in the top 8 bits only, be careful
+ * and keep original value of other bits.
+ */
+ return (clamp_val(-val, 0, 255) << 8) | (old & 0xff);
+
+ case INA209_CRITICAL_DAC_POS:
+ /*
+ * Limit to 0-255 mV, 1 mV LSB
+ *
+ * The value lives in the top 8 bits only, be careful
+ * and keep original value of other bits.
+ */
+ return (clamp_val(val, 0, 255) << 8) | (old & 0xff);
+
+ case INA209_POWER_WARN:
+ case INA209_POWER_OVER_LIMIT:
+ /* 20 mW LSB */
+ return DIV_ROUND_CLOSEST(val, 20 * 1000);
+ }
+
+ /* Other registers are read-only, return access error */
+ return -EACCES;
+}
+
+static int ina209_interval_from_reg(u16 reg)
+{
+ return 68 >> (15 - ((reg >> 3) & 0x0f));
+}
+
+static u16 ina209_reg_from_interval(u16 config, long interval)
+{
+ int i, adc;
+
+ if (interval <= 0) {
+ adc = 8;
+ } else {
+ adc = 15;
+ for (i = 34 + 34 / 2; i; i >>= 1) {
+ if (i < interval)
+ break;
+ adc--;
+ }
+ }
+ return (config & 0xf807) | (adc << 3) | (adc << 7);
+}
+
+static ssize_t ina209_set_interval(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = ina209_update_device(dev);
+ long val;
+ u16 regval;
+ int ret;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
+ val);
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+ data->regs[INA209_CONFIGURATION] = regval;
+ data->update_interval = ina209_interval_from_reg(regval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_show_interval(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
+}
+
+/*
+ * History is reset by writing 1 into bit 0 of the respective peak register.
+ * Since more than one peak register may be affected by the scope of a
+ * reset_history attribute write, use a bit mask in attr->index to identify
+ * which registers are affected.
+ */
+static u16 ina209_reset_history_regs[] = {
+ INA209_SHUNT_VOLTAGE_POS_PEAK,
+ INA209_SHUNT_VOLTAGE_NEG_PEAK,
+ INA209_BUS_VOLTAGE_MAX_PEAK,
+ INA209_BUS_VOLTAGE_MIN_PEAK,
+ INA209_POWER_PEAK
+};
+
+static ssize_t ina209_reset_history(struct device *dev,
+ struct device_attribute *da,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ u32 mask = attr->index;
+ long val;
+ int i, ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ for (i = 0; i < ARRAY_SIZE(ina209_reset_history_regs); i++) {
+ if (mask & (1 << i))
+ i2c_smbus_write_word_swapped(client,
+ ina209_reset_history_regs[i], 1);
+ }
+ data->valid = false;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_set_value(struct device *dev,
+ struct device_attribute *da,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = ina209_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int reg = attr->index;
+ long val;
+ int ret;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ ret = ina209_to_reg(reg, data->regs[reg], val);
+ if (ret < 0) {
+ count = ret;
+ goto abort;
+ }
+ i2c_smbus_write_word_swapped(client, reg, ret);
+ data->regs[reg] = ret;
+abort:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_show_value(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = ina209_update_device(dev);
+ long val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = ina209_from_reg(attr->index, data->regs[attr->index]);
+ return snprintf(buf, PAGE_SIZE, "%ld\n", val);
+}
+
+static ssize_t ina209_show_alarm(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = ina209_update_device(dev);
+ const unsigned int mask = attr->index;
+ u16 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->regs[INA209_STATUS];
+
+ /*
+ * All alarms are in the INA209_STATUS register. To avoid a long
+ * switch statement, the mask is passed in attr->index
+ */
+ return snprintf(buf, PAGE_SIZE, "%u\n", !!(status & mask));
+}
+
+/* Shunt voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE_POS_PEAK);
+static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE_NEG_PEAK);
+static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, (1 << 0) | (1 << 1));
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN);
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN);
+static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_CRITICAL_DAC_POS);
+static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_CRITICAL_DAC_NEG);
+
+static SENSOR_DEVICE_ATTR(in0_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 11);
+static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 12);
+static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 6);
+static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 7);
+
+/* Bus voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE_MAX_PEAK);
+static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE_MIN_PEAK);
+static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, (1 << 2) | (1 << 3));
+static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN);
+static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN);
+static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT);
+static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 14);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 15);
+static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 9);
+static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 10);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_POWER);
+static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value,
+ NULL, INA209_POWER_PEAK);
+static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, 1 << 4);
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_POWER_WARN);
+static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_POWER_OVER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 13);
+static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 8);
+
+/* Current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_CURRENT);
+
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+ ina209_show_interval, ina209_set_interval, 0);
+
+/*
+ * Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ina209_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_input_highest.dev_attr.attr,
+ &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_in0_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_max.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_in1_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_in1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_max.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_power1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_power1_max.dev_attr.attr,
+ &sensor_dev_attr_power1_crit.dev_attr.attr,
+ &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ina209_group = {
+ .attrs = ina209_attributes,
+};
+
+static void ina209_restore_conf(struct i2c_client *client,
+ struct ina209_data *data)
+{
+ /* Restore initial configuration */
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+ data->config_orig);
+ i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+ data->calibration_orig);
+}
+
+static int ina209_init_client(struct i2c_client *client,
+ struct ina209_data *data)
+{
+ struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
+ u32 shunt;
+ int reg;
+
+ reg = i2c_smbus_read_word_swapped(client, INA209_CALIBRATION);
+ if (reg < 0)
+ return reg;
+ data->calibration_orig = reg;
+
+ reg = i2c_smbus_read_word_swapped(client, INA209_CONFIGURATION);
+ if (reg < 0)
+ return reg;
+ data->config_orig = reg;
+
+ if (pdata) {
+ if (pdata->shunt_uohms <= 0)
+ return -EINVAL;
+ shunt = pdata->shunt_uohms;
+ } else if (!of_property_read_u32(client->dev.of_node, "shunt-resistor",
+ &shunt)) {
+ if (shunt == 0)
+ return -EINVAL;
+ } else {
+ shunt = data->calibration_orig ?
+ 40960000 / data->calibration_orig : INA209_SHUNT_DEFAULT;
+ }
+
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+ INA209_CONFIG_DEFAULT);
+ data->update_interval = ina209_interval_from_reg(INA209_CONFIG_DEFAULT);
+
+ /*
+ * Calibrate current LSB to 1mA. Shunt is in uOhms.
+ * See equation 13 in datasheet.
+ */
+ i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+ clamp_val(40960000 / shunt, 1, 65535));
+
+ /* Clear status register */
+ i2c_smbus_read_word_swapped(client, INA209_STATUS);
+
+ return 0;
+}
+
+static int ina209_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ina209_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ ret = ina209_init_client(client, data);
+ if (ret)
+ return ret;
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
+ if (ret)
+ goto out_restore_conf;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ina209_group);
+out_restore_conf:
+ ina209_restore_conf(client, data);
+ return ret;
+}
+
+static int ina209_remove(struct i2c_client *client)
+{
+ struct ina209_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ina209_group);
+ ina209_restore_conf(client, data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ina209_id[] = {
+ { "ina209", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ina209_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ina209_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "ina209",
+ },
+ .probe = ina209_probe,
+ .remove = ina209_remove,
+ .id_table = ina209_id,
+};
+
+module_i2c_driver(ina209_driver);
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>, Paul Hays <Paul.Hays@cattail.ca>, Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("INA209 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d32aa354cbdf..37fc980fde24 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -19,6 +19,8 @@
* IT8726F Super I/O chip w/LPC interface
* IT8728F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
+ * IT8771E Super I/O chip w/LPC interface
+ * IT8772E Super I/O chip w/LPC interface
* IT8782F Super I/O chip w/LPC interface
* IT8783E/F Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
@@ -61,8 +63,8 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8782,
- it8783 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
+ it8772, it8782, it8783 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -140,6 +142,8 @@ static inline void superio_exit(void)
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
#define IT8728F_DEVID 0x8728
+#define IT8771E_DEVID 0x8771
+#define IT8772E_DEVID 0x8772
#define IT8782F_DEVID 0x8782
#define IT8783E_DEVID 0x8783
#define IT87_ACT_REG 0x30
@@ -203,6 +207,8 @@ static const u8 IT87_REG_FAN[] = { 0x0d, 0x0e, 0x0f, 0x80, 0x82 };
static const u8 IT87_REG_FAN_MIN[] = { 0x10, 0x11, 0x12, 0x84, 0x86 };
static const u8 IT87_REG_FANX[] = { 0x18, 0x19, 0x1a, 0x81, 0x83 };
static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
+static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
+
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
#define IT87_REG_PWM(nr) (0x15 + (nr))
@@ -226,6 +232,101 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+struct it87_devices {
+ const char *name;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
+};
+
+#define FEAT_12MV_ADC (1 << 0)
+#define FEAT_NEWER_AUTOPWM (1 << 1)
+#define FEAT_OLD_AUTOPWM (1 << 2)
+#define FEAT_16BIT_FANS (1 << 3)
+#define FEAT_TEMP_OFFSET (1 << 4)
+#define FEAT_TEMP_PECI (1 << 5)
+#define FEAT_TEMP_OLD_PECI (1 << 6)
+
+static const struct it87_devices it87_devices[] = {
+ [it87] = {
+ .name = "it87",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8712] = {
+ .name = "it8712",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8716] = {
+ .name = "it8716",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET,
+ },
+ [it8718] = {
+ .name = "it8718",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8720] = {
+ .name = "it8720",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8721] = {
+ .name = "it8721",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI,
+ .peci_mask = 0x05,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
+ [it8728] = {
+ .name = "it8728",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ .peci_mask = 0x07,
+ },
+ [it8771] = {
+ .name = "it8771",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ /* PECI: guesswork */
+ /* 12mV ADC (OHM) */
+ /* 16 bit fans (OHM) */
+ .peci_mask = 0x07,
+ },
+ [it8772] = {
+ .name = "it8772",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ /* PECI (coreboot) */
+ /* 12mV ADC (HWSensors4, OHM) */
+ /* 16 bit fans (HWSensors4, OHM) */
+ .peci_mask = 0x07,
+ },
+ [it8782] = {
+ .name = "it8782",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8783] = {
+ .name = "it8783",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+};
+
+#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
+#define has_12mv_adc(data) ((data)->features & FEAT_12MV_ADC)
+#define has_newer_autopwm(data) ((data)->features & FEAT_NEWER_AUTOPWM)
+#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
+#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
+#define has_temp_peci(data, nr) (((data)->features & FEAT_TEMP_PECI) && \
+ ((data)->peci_mask & (1 << nr)))
+#define has_temp_old_peci(data, nr) \
+ (((data)->features & FEAT_TEMP_OLD_PECI) && \
+ ((data)->old_peci_mask & (1 << nr)))
struct it87_sio_data {
enum chips type;
@@ -249,7 +350,9 @@ struct it87_sio_data {
struct it87_data {
struct device *hwmon_dev;
enum chips type;
- u8 revision;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
unsigned short addr;
const char *name;
@@ -258,17 +361,13 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[9]; /* Register value */
- u8 in_max[8]; /* Register value */
- u8 in_min[8]; /* Register value */
+ u8 in[9][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
- u16 fan[5]; /* Register values, possibly combined */
- u16 fan_min[5]; /* Register values, possibly combined */
+ u16 fan[5][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
- s8 temp[3]; /* Register value */
- s8 temp_high[3]; /* Register value */
- s8 temp_low[3]; /* Register value */
- u8 sensor; /* Register value */
+ s8 temp[3][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+ u8 sensor; /* Register value (IT87_REG_TEMP_ENABLE) */
+ u8 extra; /* Register value (IT87_REG_TEMP_EXTRA) */
u8 fan_div[3]; /* Register encoding, shifted right */
u8 vid; /* Register encoding, combined */
u8 vrm;
@@ -296,26 +395,6 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
-static inline int has_12mv_adc(const struct it87_data *data)
-{
- /*
- * IT8721F and later have a 12 mV ADC, also with internal scaling
- * on selected inputs.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
-static inline int has_newer_autopwm(const struct it87_data *data)
-{
- /*
- * IT8721F and later have separate registers for the temperature
- * mapping and the manual duty cycle.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
static int adc_lsb(const struct it87_data *data, int nr)
{
int lsb = has_12mv_adc(data) ? 12 : 16;
@@ -327,7 +406,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
static u8 in_to_reg(const struct it87_data *data, int nr, long val)
{
val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
- return SENSORS_LIMIT(val, 0, 255);
+ return clamp_val(val, 0, 255);
}
static int in_from_reg(const struct it87_data *data, int nr, int val)
@@ -339,16 +418,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
- 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline u16 FAN16_TO_REG(long rpm)
{
if (rpm == 0)
return 0xffff;
- return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
+ return clamp_val((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \
@@ -357,8 +435,8 @@ static inline u16 FAN16_TO_REG(long rpm)
#define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
1350000 / ((val) * 2))
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (((val) - 500) / 1000) : \
- ((val) + 500) / 1000), -128, 127))
+#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (((val) - 500) / 1000) : \
+ ((val) + 500) / 1000), -128, 127))
#define TEMP_FROM_REG(val) ((val) * 1000)
static u8 pwm_to_reg(const struct it87_data *data, long val)
@@ -398,35 +476,6 @@ static const unsigned int pwm_freq[8] = {
750000 / 128,
};
-static inline int has_16bit_fans(const struct it87_data *data)
-{
- /*
- * IT8705F Datasheet 0.4.1, 3h == Version G.
- * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
- * These are the first revisions with 16-bit tachometer support.
- */
- return (data->type == it87 && data->revision >= 0x03)
- || (data->type == it8712 && data->revision >= 0x08)
- || data->type == it8716
- || data->type == it8718
- || data->type == it8720
- || data->type == it8721
- || data->type == it8728
- || data->type == it8782
- || data->type == it8783;
-}
-
-static inline int has_old_autopwm(const struct it87_data *data)
-{
- /*
- * The old automatic fan speed control interface is implemented
- * by IT8705F chips up to revision F and IT8712F chips up to
- * revision G.
- */
- return (data->type == it87 && data->revision < 0x03)
- || (data->type == it8712 && data->revision < 0x08);
-}
-
static int it87_probe(struct platform_device *pdev);
static int it87_remove(struct platform_device *pdev);
@@ -447,59 +496,22 @@ static struct platform_driver it87_driver = {
};
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_min[nr]));
-}
-
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_max[nr]));
-}
-
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->in_min[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MIN(nr),
- data->in_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -508,140 +520,167 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->in_max[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MAX(nr),
- data->in_max[nr]);
+ data->in[nr][index] = in_to_reg(data, nr, val);
+ it87_write_value(data,
+ index == 1 ? IT87_REG_VIN_MIN(nr)
+ : IT87_REG_VIN_MAX(nr),
+ data->in[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset);
-
-#define limit_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-limit_in_offset(0);
-show_in_offset(1);
-limit_in_offset(1);
-show_in_offset(2);
-limit_in_offset(2);
-show_in_offset(3);
-limit_in_offset(3);
-show_in_offset(4);
-limit_in_offset(4);
-show_in_offset(5);
-limit_in_offset(5);
-show_in_offset(6);
-limit_in_offset(6);
-show_in_offset(7);
-limit_in_offset(7);
-show_in_offset(8);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 2);
+
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 1);
+static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 2);
+
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 1);
+static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 2);
+
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 1);
+static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 2);
+
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 1);
+static SENSOR_DEVICE_ATTR_2(in6_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 2);
+
+static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 7, 0);
+static SENSOR_DEVICE_ATTR_2(in7_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 1);
+static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 2);
+
+static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
/* 3 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
-}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_high[nr]));
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_low[nr]));
-}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
+ u8 reg, regval;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->temp_high[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
+ switch (index) {
+ default:
+ case 1:
+ reg = IT87_REG_TEMP_LOW(nr);
+ break;
+ case 2:
+ reg = IT87_REG_TEMP_HIGH(nr);
+ break;
+ case 3:
+ regval = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ if (!(regval & 0x80)) {
+ regval |= 0x80;
+ it87_write_value(data, IT87_REG_BEEP_ENABLE, regval);
+ }
+ data->valid = 0;
+ reg = IT87_REG_TEMP_OFFSET[nr];
+ break;
+ }
- mutex_lock(&data->update_lock);
- data->temp_low[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
+ data->temp[nr][index] = TEMP_TO_REG(val);
+ it87_write_value(data, reg, data->temp[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1);
-
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
-
-static ssize_t show_sensor(struct device *dev, struct device_attribute *attr,
- char *buf)
+
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 2);
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 2, 3);
+
+static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
u8 reg = data->sensor; /* In case value is updated while used */
+ u8 extra = data->extra;
+ if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
+ || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+ return sprintf(buf, "6\n"); /* Intel PECI */
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
if (reg & (8 << nr))
return sprintf(buf, "4\n"); /* thermistor */
return sprintf(buf, "0\n"); /* disabled */
}
-static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
- u8 reg;
+ u8 reg, extra;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
@@ -649,33 +688,45 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
reg &= ~(1 << nr);
reg &= ~(8 << nr);
+ if (has_temp_peci(data, nr) && (reg >> 6 == nr + 1 || val == 6))
+ reg &= 0x3f;
+ extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+ if (has_temp_old_peci(data, nr) && ((extra & 0x80) || val == 6))
+ extra &= 0x7f;
if (val == 2) { /* backwards compatibility */
- dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
- "instead\n");
+ dev_warn(dev,
+ "Sensor type 2 is deprecated, please use 4 instead\n");
val = 4;
}
- /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
+ /* 3 = thermal diode; 4 = thermistor; 6 = Intel PECI; 0 = disabled */
if (val == 3)
reg |= 1 << nr;
else if (val == 4)
reg |= 8 << nr;
+ else if (has_temp_peci(data, nr) && val == 6)
+ reg |= (nr + 1) << 6;
+ else if (has_temp_old_peci(data, nr) && val == 6)
+ extra |= 0x80;
else if (val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
data->sensor = reg;
+ data->extra = extra;
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
+ if (has_temp_old_peci(data, nr))
+ it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
-#define show_sensor_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_sensor, set_sensor, offset - 1);
-show_sensor_offset(1);
-show_sensor_offset(2);
-show_sensor_offset(3);
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 0);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 1);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 2);
/* 3 Fans */
@@ -692,25 +743,21 @@ static int pwm_mode(const struct it87_data *data, int nr)
}
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
+ int speed;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
- DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
- DIV_FROM_REG(data->fan_div[nr])));
+ speed = has_16bit_fans(data) ?
+ FAN16_FROM_REG(data->fan[nr][index]) :
+ FAN_FROM_REG(data->fan[nr][index],
+ DIV_FROM_REG(data->fan_div[nr]));
+ return sprintf(buf, "%d\n", speed);
}
+
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -747,11 +794,13 @@ static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", pwm_freq[index]);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
@@ -761,24 +810,36 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = it87_read_value(data, IT87_REG_FAN_DIV);
- switch (nr) {
- case 0:
- data->fan_div[nr] = reg & 0x07;
- break;
- case 1:
- data->fan_div[nr] = (reg >> 3) & 0x07;
- break;
- case 2:
- data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
- break;
+
+ if (has_16bit_fans(data)) {
+ data->fan[nr][index] = FAN16_TO_REG(val);
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index] & 0xff);
+ it87_write_value(data, IT87_REG_FANX_MIN[nr],
+ data->fan[nr][index] >> 8);
+ } else {
+ reg = it87_read_value(data, IT87_REG_FAN_DIV);
+ switch (nr) {
+ case 0:
+ data->fan_div[nr] = reg & 0x07;
+ break;
+ case 1:
+ data->fan_div[nr] = (reg >> 3) & 0x07;
+ break;
+ case 2:
+ data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
+ break;
+ }
+ data->fan[nr][index] =
+ FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index]);
}
- data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -797,7 +858,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
old = it87_read_value(data, IT87_REG_FAN_DIV);
/* Save fan min limit */
- min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
+ min = FAN_FROM_REG(data->fan[nr][1], DIV_FROM_REG(data->fan_div[nr]));
switch (nr) {
case 0:
@@ -818,8 +879,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_FAN_DIV, val);
/* Restore fan min limit */
- data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
+ data->fan[nr][1] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan[nr][1]);
mutex_unlock(&data->update_lock);
return count;
@@ -843,8 +904,8 @@ static int check_trip_points(struct device *dev, int nr)
}
if (err) {
- dev_err(dev, "Inconsistent trip points, not switching to "
- "automatic mode\n");
+ dev_err(dev,
+ "Inconsistent trip points, not switching to automatic mode\n");
dev_err(dev, "Adjust the trip points and try again\n");
}
return err;
@@ -1092,118 +1153,106 @@ static ssize_t set_auto_temp(struct device *dev,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-
-#define show_pwm_offset(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static DEVICE_ATTR(pwm##offset##_freq, \
- (offset == 1 ? S_IRUGO | S_IWUSR : S_IRUGO), \
- show_pwm_freq, (offset == 1 ? set_pwm_freq : NULL)); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels_temp, \
- S_IRUGO | S_IWUSR, show_pwm_temp_map, set_pwm_temp_map, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_pwm, \
- S_IRUGO, show_auto_pwm, NULL, offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp_hyst, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 4);
-
-show_pwm_offset(1);
-show_pwm_offset(2);
-show_pwm_offset(3);
-
-/* A different set of callbacks for 16-bit fans */
-static ssize_t show_fan16(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan[nr]));
-}
-
-static ssize_t show_fan16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan_min[nr]));
-}
-
-static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->fan_min[nr] = FAN16_TO_REG(val);
- it87_write_value(data, IT87_REG_FAN_MIN[nr],
- data->fan_min[nr] & 0xff);
- it87_write_value(data, IT87_REG_FANX_MIN[nr],
- data->fan_min[nr] >> 8);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-/*
- * We want to use the same sysfs file names as 8-bit fans, but we need
- * different variable names, so we have to use SENSOR_ATTR instead of
- * SENSOR_DEVICE_ATTR.
- */
-#define show_fan16_offset(offset) \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_input16 \
- = SENSOR_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan16, NULL, offset - 1); \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_min16 \
- = SENSOR_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan16_min, set_fan16_min, offset - 1)
-
-show_fan16_offset(1);
-show_fan16_offset(2);
-show_fan16_offset(3);
-show_fan16_offset(4);
-show_fan16_offset(5);
+static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 0, 1);
+static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 0);
+
+static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 1, 1);
+static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 2, 1);
+static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 2);
+
+static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 3, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan5_input, S_IRUGO, show_fan, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(fan5_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 4, 1);
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
+static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 4);
+
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
+static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 4);
+
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
+static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 4);
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
@@ -1471,6 +1520,12 @@ static const struct attribute_group it87_group_temp[3] = {
{ .attrs = it87_attributes_temp[2] },
};
+static struct attribute *it87_attributes_temp_offset[] = {
+ &sensor_dev_attr_temp1_offset.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
+};
+
static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
@@ -1500,73 +1555,47 @@ static struct attribute *it87_attributes_temp_beep[] = {
&sensor_dev_attr_temp3_beep.dev_attr.attr,
};
-static struct attribute *it87_attributes_fan16[5][3+1] = { {
- &sensor_dev_attr_fan1_input16.dev_attr.attr,
- &sensor_dev_attr_fan1_min16.dev_attr.attr,
+static struct attribute *it87_attributes_fan[5][3+1] = { {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan2_input16.dev_attr.attr,
- &sensor_dev_attr_fan2_min16.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan3_input16.dev_attr.attr,
- &sensor_dev_attr_fan3_min16.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan4_input16.dev_attr.attr,
- &sensor_dev_attr_fan4_min16.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan5_input16.dev_attr.attr,
- &sensor_dev_attr_fan5_min16.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
NULL
} };
-static const struct attribute_group it87_group_fan16[5] = {
- { .attrs = it87_attributes_fan16[0] },
- { .attrs = it87_attributes_fan16[1] },
- { .attrs = it87_attributes_fan16[2] },
- { .attrs = it87_attributes_fan16[3] },
- { .attrs = it87_attributes_fan16[4] },
+static const struct attribute_group it87_group_fan[5] = {
+ { .attrs = it87_attributes_fan[0] },
+ { .attrs = it87_attributes_fan[1] },
+ { .attrs = it87_attributes_fan[2] },
+ { .attrs = it87_attributes_fan[3] },
+ { .attrs = it87_attributes_fan[4] },
};
-static struct attribute *it87_attributes_fan[3][4+1] = { {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
+static const struct attribute *it87_attributes_fan_div[] = {
&sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
- &sensor_dev_attr_fan3_alarm.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_fan[3] = {
- { .attrs = it87_attributes_fan[0] },
- { .attrs = it87_attributes_fan[1] },
- { .attrs = it87_attributes_fan[2] },
};
-static const struct attribute_group *
-it87_get_fan_group(const struct it87_data *data)
-{
- return has_16bit_fans(data) ? it87_group_fan16 : it87_group_fan;
-}
-
static struct attribute *it87_attributes_pwm[3][4+1] = { {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
@@ -1701,6 +1730,12 @@ static int __init it87_find(unsigned short *address,
case IT8728F_DEVID:
sio_data->type = it8728;
break;
+ case IT8771E_DEVID:
+ sio_data->type = it8771;
+ break;
+ case IT8772E_DEVID:
+ sio_data->type = it8772;
+ break;
case IT8782F_DEVID:
sio_data->type = it8782;
break;
@@ -1818,10 +1853,11 @@ static int __init it87_find(unsigned short *address,
reg = superio_inb(IT87_SIO_GPIO3_REG);
if (sio_data->type == it8721 || sio_data->type == it8728 ||
+ sio_data->type == it8771 || sio_data->type == it8772 ||
sio_data->type == it8782) {
/*
* IT8721F/IT8758E, and IT8782F don't have VID pins
- * at all, not sure about the IT8728F.
+ * at all, not sure about the IT8728F and compatibles.
*/
sio_data->skip_vid = 1;
} else {
@@ -1875,7 +1911,9 @@ static int __init it87_find(unsigned short *address,
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
if ((reg & (1 << 1)) || sio_data->type == it8721 ||
- sio_data->type == it8728)
+ sio_data->type == it8728 ||
+ sio_data->type == it8771 ||
+ sio_data->type == it8772)
sio_data->internal |= (1 << 1);
/*
@@ -1925,7 +1963,6 @@ static void it87_remove_files(struct device *dev)
{
struct it87_data *data = platform_get_drvdata(pdev);
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group = it87_get_fan_group(data);
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
@@ -1941,6 +1978,9 @@ static void it87_remove_files(struct device *dev)
if (!(data->has_temp & (1 << i)))
continue;
sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
+ if (has_temp_offset(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -1948,10 +1988,13 @@ static void it87_remove_files(struct device *dev)
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- sysfs_remove_group(&dev->kobj, &fan_group[i]);
+ sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_fan_beep[i]);
+ if (i < 3 && !has_16bit_fans(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
}
for (i = 0; i < 3; i++) {
if (sio_data->skip_pwm & (1 << 0))
@@ -1972,21 +2015,9 @@ static int it87_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group;
int err = 0, i;
int enable_pwm_interface;
int fan_beep_need_rw;
- static const char * const names[] = {
- "it87",
- "it8712",
- "it8716",
- "it8718",
- "it8720",
- "it8721",
- "it8728",
- "it8782",
- "it8783",
- };
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
@@ -2003,8 +2034,31 @@ static int it87_probe(struct platform_device *pdev)
data->addr = res->start;
data->type = sio_data->type;
- data->revision = sio_data->revision;
- data->name = names[sio_data->type];
+ data->features = it87_devices[sio_data->type].features;
+ data->peci_mask = it87_devices[sio_data->type].peci_mask;
+ data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+ data->name = it87_devices[sio_data->type].name;
+ /*
+ * IT8705F Datasheet 0.4.1, 3h == Version G.
+ * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+ * These are the first revisions with 16-bit tachometer support.
+ */
+ switch (data->type) {
+ case it87:
+ if (sio_data->revision >= 0x03) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ case it8712:
+ if (sio_data->revision >= 0x08) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ default:
+ break;
+ }
/* Now, we do the remaining detection. */
if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
@@ -2068,6 +2122,12 @@ static int it87_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
if (err)
goto error;
+ if (has_temp_offset(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
+ if (err)
+ goto error;
+ }
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -2077,15 +2137,21 @@ static int it87_probe(struct platform_device *pdev)
}
/* Do not create fan files for disabled fans */
- fan_group = it87_get_fan_group(data);
fan_beep_need_rw = 1;
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- err = sysfs_create_group(&dev->kobj, &fan_group[i]);
+ err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
if (err)
goto error;
+ if (i < 3 && !has_16bit_fans(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
+ if (err)
+ goto error;
+ }
+
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_fan_beep[i]);
@@ -2221,8 +2287,8 @@ static int it87_check_pwm(struct device *dev)
* PWM interface).
*/
if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(dev, "Reconfiguring PWM to "
- "active high polarity\n");
+ dev_info(dev,
+ "Reconfiguring PWM to active high polarity\n");
it87_write_value(data, IT87_REG_FAN_CTL,
tmp | 0x87);
for (i = 0; i < 3; i++)
@@ -2232,16 +2298,16 @@ static int it87_check_pwm(struct device *dev)
return 1;
}
- dev_info(dev, "PWM configuration is "
- "too broken to be fixed\n");
+ dev_info(dev,
+ "PWM configuration is too broken to be fixed\n");
}
- dev_info(dev, "Detected broken BIOS "
- "defaults, disabling PWM interface\n");
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
return 0;
} else if (fix_pwm_polarity) {
- dev_info(dev, "PWM configuration looks "
- "sane, won't touch\n");
+ dev_info(dev,
+ "PWM configuration looks sane, won't touch\n");
}
return 1;
@@ -2389,42 +2455,46 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_read_value(data, IT87_REG_CONFIG) | 0x40);
}
for (i = 0; i <= 7; i++) {
- data->in[i] =
+ data->in[i][0] =
it87_read_value(data, IT87_REG_VIN(i));
- data->in_min[i] =
+ data->in[i][1] =
it87_read_value(data, IT87_REG_VIN_MIN(i));
- data->in_max[i] =
+ data->in[i][2] =
it87_read_value(data, IT87_REG_VIN_MAX(i));
}
/* in8 (battery) has no limit registers */
- data->in[8] = it87_read_value(data, IT87_REG_VIN(8));
+ data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
if (!(data->has_fan & (1 << i)))
continue;
- data->fan_min[i] =
+ data->fan[i][1] =
it87_read_value(data, IT87_REG_FAN_MIN[i]);
- data->fan[i] = it87_read_value(data,
+ data->fan[i][0] = it87_read_value(data,
IT87_REG_FAN[i]);
/* Add high byte if in 16-bit mode */
if (has_16bit_fans(data)) {
- data->fan[i] |= it87_read_value(data,
+ data->fan[i][0] |= it87_read_value(data,
IT87_REG_FANX[i]) << 8;
- data->fan_min[i] |= it87_read_value(data,
+ data->fan[i][1] |= it87_read_value(data,
IT87_REG_FANX_MIN[i]) << 8;
}
}
for (i = 0; i < 3; i++) {
if (!(data->has_temp & (1 << i)))
continue;
- data->temp[i] =
+ data->temp[i][0] =
it87_read_value(data, IT87_REG_TEMP(i));
- data->temp_high[i] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- data->temp_low[i] =
+ data->temp[i][1] =
it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ data->temp[i][2] =
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ if (has_temp_offset(data))
+ data->temp[i][3] =
+ it87_read_value(data,
+ IT87_REG_TEMP_OFFSET[i]);
}
/* Newer chips don't have clock dividers */
@@ -2448,6 +2518,7 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_update_pwm_ctrl(data, i);
data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
/*
* The IT8705F does not have VID capability.
* The IT8718F and later don't use IT87_REG_VID for the
@@ -2549,8 +2620,7 @@ static void __exit sm_it87_exit(void)
}
-MODULE_AUTHOR("Chris Gauthron, "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e21e43c13156..4a58f130fd4e 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -103,6 +103,9 @@ static const unsigned short normal_i2c[] = {
#define MCP98243_DEVID 0x2100
#define MCP98243_DEVID_MASK 0xfffc
+#define MCP98244_DEVID 0x2200
+#define MCP98244_DEVID_MASK 0xfffc
+
#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */
#define MCP9843_DEVID_MASK 0xfffe
@@ -147,6 +150,7 @@ static struct jc42_chips jc42_chips[] = {
{ MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
+ { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
{ NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
{ ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
@@ -237,9 +241,9 @@ static struct i2c_driver jc42_driver = {
static u16 jc42_temp_to_reg(int temp, bool extended)
{
- int ntemp = SENSORS_LIMIT(temp,
- extended ? JC42_TEMP_MIN_EXTENDED :
- JC42_TEMP_MIN, JC42_TEMP_MAX);
+ int ntemp = clamp_val(temp,
+ extended ? JC42_TEMP_MIN_EXTENDED :
+ JC42_TEMP_MIN, JC42_TEMP_MAX);
/* convert from 0.001 to 0.0625 resolution */
return (ntemp * 2 / 125) & 0x1fff;
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eed4d9401788..f644a2e57599 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -209,9 +209,9 @@ static inline int lut_temp_to_reg(struct lm63_data *data, long val)
{
val -= data->temp2_offset;
if (data->lut_temp_highres)
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127500), 500);
+ return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127500), 500);
else
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127000), 1000);
+ return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
}
/*
@@ -415,7 +415,7 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
return err;
reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm1[nr] = data->pwm_highres ? val :
@@ -700,7 +700,7 @@ static ssize_t set_update_interval(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ lm63_set_convrate(client, data, clamp_val(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 8fa2632cbbaf..9bde9644b102 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -8,6 +8,7 @@
* Guillaume Ligneul <guillaume.ligneul@gmail.com>
* Adrien Demarez <adrien.demarez@bolloretelecom.eu>
* Jeremy Laine <jeremy.laine@bolloretelecom.eu>
+ * Chris Verges <kg4ysn@gmail.com>
*
* This software program is licensed subject to the GNU General Public License
* (GPL).Version 2,June 1991, available at
@@ -36,11 +37,30 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
#define LM73_ID 0x9001 /* 0x0190, byte-swapped */
#define DRVNAME "lm73"
-#define LM73_TEMP_MIN (-40)
-#define LM73_TEMP_MAX 150
+#define LM73_TEMP_MIN (-256000 / 250)
+#define LM73_TEMP_MAX (255750 / 250)
-/*-----------------------------------------------------------------------*/
+#define LM73_CTRL_RES_SHIFT 5
+#define LM73_CTRL_RES_MASK (BIT(5) | BIT(6))
+#define LM73_CTRL_TO_MASK BIT(7)
+
+#define LM73_CTRL_HI_SHIFT 2
+#define LM73_CTRL_LO_SHIFT 1
+
+static const unsigned short lm73_convrates[] = {
+ 14, /* 11-bits (0.25000 C/LSB): RES1 Bit = 0, RES0 Bit = 0 */
+ 28, /* 12-bits (0.12500 C/LSB): RES1 Bit = 0, RES0 Bit = 1 */
+ 56, /* 13-bits (0.06250 C/LSB): RES1 Bit = 1, RES0 Bit = 0 */
+ 112, /* 14-bits (0.03125 C/LSB): RES1 Bit = 1, RES0 Bit = 1 */
+};
+struct lm73_data {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u8 ctrl; /* control register value */
+};
+
+/*-----------------------------------------------------------------------*/
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
@@ -49,16 +69,16 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long temp;
short value;
+ s32 err;
int status = kstrtol(buf, 10, &temp);
if (status < 0)
return status;
/* Write value */
- value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
- (LM73_TEMP_MAX*4)) << 5;
- i2c_smbus_write_word_swapped(client, attr->index, value);
- return count;
+ value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
+ err = i2c_smbus_write_word_swapped(client, attr->index, value);
+ return (err < 0) ? err : count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
@@ -66,13 +86,85 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
+ int temp;
+
+ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+ if (err < 0)
+ return err;
+
/* use integer division instead of equivalent right shift to
guarantee arithmetic shift and preserve the sign */
- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
- attr->index))*250) / 32;
- return sprintf(buf, "%d\n", temp);
+ temp = (((s16) err) * 250) / 32;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ unsigned long convrate;
+ s32 err;
+ int res = 0;
+
+ err = kstrtoul(buf, 10, &convrate);
+ if (err < 0)
+ return err;
+
+ /*
+ * Convert the desired conversion rate into register bits.
+ * res is already initialized, and everything past the second-to-last
+ * value in the array is treated as belonging to the last value
+ * in the array.
+ */
+ while (res < (ARRAY_SIZE(lm73_convrates) - 1) &&
+ convrate > lm73_convrates[res])
+ res++;
+
+ mutex_lock(&data->lock);
+ data->ctrl &= LM73_CTRL_TO_MASK;
+ data->ctrl |= res << LM73_CTRL_RES_SHIFT;
+ err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+ mutex_unlock(&data->lock);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ int res;
+
+ res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
+ return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
+}
+
+static ssize_t show_maxmin_alarm(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ s32 ctrl;
+
+ mutex_lock(&data->lock);
+ ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ if (ctrl < 0)
+ goto abort;
+ data->ctrl = ctrl;
+ mutex_unlock(&data->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (ctrl >> attr->index) & 1);
+
+abort:
+ mutex_unlock(&data->lock);
+ return ctrl;
+}
/*-----------------------------------------------------------------------*/
@@ -84,13 +176,20 @@ static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
show_temp, set_temp, LM73_REG_MIN);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp, NULL, LM73_REG_INPUT);
-
+static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
+ show_convrate, set_convrate, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+ show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
static struct attribute *lm73_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
-
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
@@ -105,23 +204,36 @@ static const struct attribute_group lm73_group = {
static int
lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct device *hwmon_dev;
int status;
+ struct lm73_data *data;
+ int ctrl;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->lock);
+
+ ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ if (ctrl < 0)
+ return ctrl;
+ data->ctrl = ctrl;
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &lm73_group);
if (status)
return status;
- hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(hwmon_dev)) {
- status = PTR_ERR(hwmon_dev);
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ status = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
- i2c_set_clientdata(client, hwmon_dev);
dev_info(&client->dev, "%s: sensor '%s'\n",
- dev_name(hwmon_dev), client->name);
+ dev_name(data->hwmon_dev), client->name);
return 0;
@@ -132,9 +244,9 @@ exit_remove:
static int lm73_remove(struct i2c_client *client)
{
- struct device *hwmon_dev = i2c_get_clientdata(client);
+ struct lm73_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(hwmon_dev);
+ hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm73_group);
return 0;
}
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 89aa9098ba5b..668ff4721323 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -36,7 +36,7 @@
REG: (0.5C/bit, two's complement) << 7 */
static inline u16 LM75_TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index f82acf67acf5..f17beb5e6dd6 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -101,7 +101,7 @@ static struct i2c_driver lm77_driver = {
*/
static inline s16 LM77_TEMP_TO_REG(int temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
return (ntemp / 500) * 8;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 53d6ee8ffa33..483538fa1bd5 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -85,7 +85,7 @@ enum chips { lm78, lm79 };
*/
static inline u8 IN_TO_REG(unsigned long val)
{
- unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+ unsigned long nval = clamp_val(val, 0, 4080);
return (nval + 8) / 16;
}
#define IN_FROM_REG(val) ((val) * 16)
@@ -94,7 +94,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline int FAN_FROM_REG(u8 val, int div)
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
*/
static inline s8 TEMP_TO_REG(int val)
{
- int nval = SENSORS_LIMIT(val, -128000, 127000) ;
+ int nval = clamp_val(val, -128000, 127000) ;
return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 28a8b71f4571..357fbb998728 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -72,15 +72,15 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
* Fixing this is just not worth it.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_TO_REG(val) (clamp_val(((val) + 5) / 10, 0, 255))
#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -102,7 +102,7 @@ static inline long TEMP_FROM_REG(u16 temp)
#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
(val) - 0x100 : (val)) * 1000)
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
+#define TEMP_LIMIT_TO_REG(val) clamp_val((val) < 0 ? \
((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 9f2dd77e1e0e..47ade8ba152d 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -139,7 +139,7 @@ static const int lm85_scaling[] = { /* .001 Volts */
#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
#define INS_TO_REG(n, val) \
- SENSORS_LIMIT(SCALE(val, lm85_scaling[n], 192), 0, 255)
+ clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
#define INSEXT_FROM_REG(n, val, ext) \
SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
@@ -151,19 +151,19 @@ static inline u16 FAN_TO_REG(unsigned long val)
{
if (!val)
return 0xffff;
- return SENSORS_LIMIT(5400000 / val, 1, 0xfffe);
+ return clamp_val(5400000 / val, 1, 0xfffe);
}
#define FAN_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
5400000 / (val))
/* Temperature is reported in .001 degC increments */
#define TEMP_TO_REG(val) \
- SENSORS_LIMIT(SCALE(val, 1000, 1), -127, 127)
+ clamp_val(SCALE(val, 1000, 1), -127, 127)
#define TEMPEXT_FROM_REG(val, ext) \
SCALE(((val) << 4) + (ext), 16, 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define PWM_TO_REG(val) SENSORS_LIMIT(val, 0, 255)
+#define PWM_TO_REG(val) clamp_val(val, 0, 255)
#define PWM_FROM_REG(val) (val)
@@ -258,7 +258,7 @@ static int ZONE_TO_REG(int zone)
return i << 5;
}
-#define HYST_TO_REG(val) SENSORS_LIMIT(((val) + 500) / 1000, 0, 15)
+#define HYST_TO_REG(val) clamp_val(((val) + 500) / 1000, 0, 15)
#define HYST_FROM_REG(val) ((val) * 1000)
/*
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 863412a02bdd..8eeb141c85ac 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -931,7 +931,7 @@ static ssize_t set_update_interval(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ lm90_set_convrate(client, data, clamp_val(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 1a003f73e4e4..b40f34cdb3ca 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -371,8 +371,8 @@ static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
static u8 LM93_IN_TO_REG(int nr, unsigned val)
{
/* range limit */
- const long mV = SENSORS_LIMIT(val,
- lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
+ const long mV = clamp_val(val,
+ lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
/* try not to lose too much precision here */
const long uV = mV * 1000;
@@ -385,8 +385,8 @@ static u8 LM93_IN_TO_REG(int nr, unsigned val)
const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
u8 result = ((uV - intercept + (slope/2)) / slope);
- result = SENSORS_LIMIT(result,
- lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
+ result = clamp_val(result,
+ lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
return result;
}
@@ -411,10 +411,10 @@ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
{
long uV_offset = vid * 1000 - val * 10000;
if (upper) {
- uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000);
+ uV_offset = clamp_val(uV_offset, 12500, 200000);
return (u8)((uV_offset / 12500 - 1) << 4);
} else {
- uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000);
+ uV_offset = clamp_val(uV_offset, -400000, -25000);
return (u8)((uV_offset / -25000 - 1) << 0);
}
}
@@ -437,7 +437,7 @@ static int LM93_TEMP_FROM_REG(u8 reg)
*/
static u8 LM93_TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
ntemp += (ntemp < 0 ? -500 : 500);
return (u8)(ntemp / 1000);
}
@@ -472,7 +472,7 @@ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
{
int factor = mode ? 5 : 10;
- off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN,
+ off = clamp_val(off, LM93_TEMP_OFFSET_MIN,
mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
return (u8)((off + factor/2) / factor);
}
@@ -620,8 +620,8 @@ static u16 LM93_FAN_TO_REG(long rpm)
if (rpm == 0) {
count = 0x3fff;
} else {
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe);
+ rpm = clamp_val(rpm, 1, 1000000);
+ count = clamp_val((1350000 + rpm) / rpm, 1, 0x3ffe);
}
regs = count << 2;
@@ -692,7 +692,7 @@ static int LM93_RAMP_FROM_REG(u8 reg)
*/
static u8 LM93_RAMP_TO_REG(int ramp)
{
- ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
+ ramp = clamp_val(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
return (u8)((ramp + 2) / 5);
}
@@ -702,7 +702,7 @@ static u8 LM93_RAMP_TO_REG(int ramp)
*/
static u8 LM93_PROCHOT_TO_REG(long prochot)
{
- prochot = SENSORS_LIMIT(prochot, 0, 255);
+ prochot = clamp_val(prochot, 0, 255);
return (u8)prochot;
}
@@ -2052,7 +2052,7 @@ static ssize_t store_pwm_auto_channels(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255);
+ data->block9[nr][LM93_PWM_CTL1] = clamp_val(val, 0, 255);
lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1),
data->block9[nr][LM93_PWM_CTL1]);
mutex_unlock(&data->update_lock);
@@ -2397,7 +2397,7 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
mutex_lock(&data->update_lock);
data->prochot_override = (data->prochot_override & 0xf0) |
- SENSORS_LIMIT(val, 0, 15);
+ clamp_val(val, 0, 15);
lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
data->prochot_override);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 2915fd908364..a6c85f0ff8f3 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -259,7 +259,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
val /= 1000;
- val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
+ val = clamp_val(val, 0, (index == 6 ? 127 : 255));
mutex_lock(&data->update_lock);
@@ -284,7 +284,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 31);
+ val = clamp_val(val, 0, 31);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index e0019c69d1bb..2fa2c02f5569 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -118,7 +118,7 @@ static inline int LIMIT_TO_MV(int limit, int range)
static inline int MV_TO_LIMIT(int mv, int range)
{
- return SENSORS_LIMIT(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
+ return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
}
static inline int ADC_TO_CURR(int adc, int gain)
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 666d9f6263eb..a7626358c95d 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -215,7 +215,7 @@ static ssize_t set_temp_max(struct device *dev,
return ret;
mutex_lock(&data->update_lock);
- data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ data->temp_max[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIMH_WR(index),
data->temp_max[index]))
@@ -240,7 +240,7 @@ static ssize_t set_temp_min(struct device *dev,
return ret;
mutex_lock(&data->update_lock);
- data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ data->temp_min[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
data->temp_max[index]))
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 6e60036abfa7..3e7b4269f5b9 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -74,7 +74,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
0 : (rpm_ranges[rpm_range] * 30) / (val))
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+#define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255)
/*
* Client data (each client gets its own)
@@ -312,7 +312,7 @@ static ssize_t set_pwm(struct device *dev,
if (res)
return res;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[attr->index] = (u8)(val * 120 / 255);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 223461a6d70f..57d58cd32206 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -239,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_high[attr2->nr] = SENSORS_LIMIT(temp_to_reg(val), 0, 255);
+ data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
i2c_smbus_write_byte_data(client, attr2->index,
data->temp_high[attr2->nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index f739f83bafb9..3c16cbd4c002 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -245,7 +245,7 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
if (err)
return err;
- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
/*
* Divide the required speed by 60 to get from rpm to rps, then
@@ -313,7 +313,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (err)
return err;
- pwm = SENSORS_LIMIT(pwm, 0, 255);
+ pwm = clamp_val(pwm, 0, 255);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
new file mode 100644
index 000000000000..bf4aa3777fc1
--- /dev/null
+++ b/drivers/hwmon/max6697.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * based on max1668.c
+ * Copyright (c) 2011 David George <david.george@ska.ac.za>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/max6697.h>
+
+enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694,
+ max6697, max6698, max6699 };
+
+/* Report local sensor as temp1 */
+
+static const u8 MAX6697_REG_TEMP[] = {
+ 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08 };
+static const u8 MAX6697_REG_TEMP_EXT[] = {
+ 0x57, 0x09, 0x52, 0x53, 0x54, 0x55, 0x56, 0 };
+static const u8 MAX6697_REG_MAX[] = {
+ 0x17, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18 };
+static const u8 MAX6697_REG_CRIT[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 };
+
+/*
+ * Map device tree / platform data register bit map to chip bit map.
+ * Applies to alert register and over-temperature register.
+ */
+#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+ (((reg) & 0x01) << 6) | ((reg) & 0x80))
+
+#define MAX6697_REG_STAT(n) (0x44 + (n))
+
+#define MAX6697_REG_CONFIG 0x41
+#define MAX6581_CONF_EXTENDED (1 << 1)
+#define MAX6693_CONF_BETA (1 << 2)
+#define MAX6697_CONF_RESISTANCE (1 << 3)
+#define MAX6697_CONF_TIMEOUT (1 << 5)
+#define MAX6697_REG_ALERT_MASK 0x42
+#define MAX6697_REG_OVERT_MASK 0x43
+
+#define MAX6581_REG_RESISTANCE 0x4a
+#define MAX6581_REG_IDEALITY 0x4b
+#define MAX6581_REG_IDEALITY_SELECT 0x4c
+#define MAX6581_REG_OFFSET 0x4d
+#define MAX6581_REG_OFFSET_SELECT 0x4e
+
+#define MAX6697_CONV_TIME 156 /* ms per channel, worst case */
+
+struct max6697_chip_data {
+ int channels;
+ u32 have_ext;
+ u32 have_crit;
+ u32 have_fault;
+ u8 valid_conf;
+ const u8 *alarm_map;
+};
+
+struct max6697_data {
+ struct device *hwmon_dev;
+
+ enum chips type;
+ const struct max6697_chip_data *chip;
+
+ int update_interval; /* in milli-seconds */
+ int temp_offset; /* in degrees C */
+
+ struct mutex update_lock;
+ unsigned long last_updated; /* In jiffies */
+ bool valid; /* true if following fields are valid */
+
+ /* 1x local and up to 7x remote */
+ u8 temp[8][4]; /* [nr][0]=temp [1]=ext [2]=max [3]=crit */
+#define MAX6697_TEMP_INPUT 0
+#define MAX6697_TEMP_EXT 1
+#define MAX6697_TEMP_MAX 2
+#define MAX6697_TEMP_CRIT 3
+ u32 alarms;
+};
+
+/* Diode fault status bits on MAX6581 are right shifted by one bit */
+static const u8 max6581_alarm_map[] = {
+ 0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23 };
+
+static const struct max6697_chip_data max6697_chip_data[] = {
+ [max6581] = {
+ .channels = 8,
+ .have_crit = 0xff,
+ .have_ext = 0x7f,
+ .have_fault = 0xfe,
+ .valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT,
+ .alarm_map = max6581_alarm_map,
+ },
+ [max6602] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6622] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6636] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6689] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6693] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+ MAX6697_CONF_TIMEOUT,
+ },
+ [max6694] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+ MAX6697_CONF_TIMEOUT,
+ },
+ [max6697] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6698] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x0e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6699] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+};
+
+static struct max6697_data *max6697_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *ret = data;
+ int val;
+ int i;
+ u32 alarms;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->valid &&
+ !time_after(jiffies, data->last_updated
+ + msecs_to_jiffies(data->update_interval)))
+ goto abort;
+
+ for (i = 0; i < data->chip->channels; i++) {
+ if (data->chip->have_ext & (1 << i)) {
+ val = i2c_smbus_read_byte_data(client,
+ MAX6697_REG_TEMP_EXT[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_EXT] = val;
+ }
+
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_INPUT] = val;
+
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_MAX] = val;
+
+ if (data->chip->have_crit & (1 << i)) {
+ val = i2c_smbus_read_byte_data(client,
+ MAX6697_REG_CRIT[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_CRIT] = val;
+ }
+ }
+
+ alarms = 0;
+ for (i = 0; i < 3; i++) {
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ alarms = (alarms << 8) | val;
+ }
+ data->alarms = alarms;
+ data->last_updated = jiffies;
+ data->valid = true;
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3;
+ temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5;
+
+ return sprintf(buf, "%d\n", temp * 125);
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ int index = to_sensor_dev_attr_2(devattr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[nr][index];
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->chip->alarm_map)
+ index = data->chip->alarm_map[index];
+
+ return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+}
+
+static ssize_t set_temp(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ int index = to_sensor_dev_attr_2(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6697_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
+ temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
+ data->temp[nr][index] = temp;
+ ret = i2c_smbus_write_byte_data(client,
+ index == 2 ? MAX6697_REG_MAX[nr]
+ : MAX6697_REG_CRIT[nr],
+ temp);
+ mutex_unlock(&data->update_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 3, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 3, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
+static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 4, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 4, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
+static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 5, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 5, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_input, NULL, 6);
+static SENSOR_DEVICE_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 6, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 6, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_input, NULL, 7);
+static SENSOR_DEVICE_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 7, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 7, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_alarm, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp7_max_alarm, S_IRUGO, show_alarm, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp8_max_alarm, S_IRUGO, show_alarm, NULL, 23);
+
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_alarm, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp7_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp8_crit_alarm, S_IRUGO, show_alarm, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_alarm, NULL, 7);
+
+static struct attribute *max6697_attributes[8][7] = {
+ {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_max.dev_attr.attr,
+ &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp7_input.dev_attr.attr,
+ &sensor_dev_attr_temp7_max.dev_attr.attr,
+ &sensor_dev_attr_temp7_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_crit.dev_attr.attr,
+ &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_max.dev_attr.attr,
+ &sensor_dev_attr_temp8_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_crit.dev_attr.attr,
+ &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_fault.dev_attr.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group max6697_group[8] = {
+ { .attrs = max6697_attributes[0] },
+ { .attrs = max6697_attributes[1] },
+ { .attrs = max6697_attributes[2] },
+ { .attrs = max6697_attributes[3] },
+ { .attrs = max6697_attributes[4] },
+ { .attrs = max6697_attributes[5] },
+ { .attrs = max6697_attributes[6] },
+ { .attrs = max6697_attributes[7] },
+};
+
+static void max6697_get_config_of(struct device_node *node,
+ struct max6697_platform_data *pdata)
+{
+ int len;
+ const __be32 *prop;
+
+ prop = of_get_property(node, "smbus-timeout-disable", &len);
+ if (prop)
+ pdata->smbus_timeout_disable = true;
+ prop = of_get_property(node, "extended-range-enable", &len);
+ if (prop)
+ pdata->extended_range_enable = true;
+ prop = of_get_property(node, "beta-compensation-enable", &len);
+ if (prop)
+ pdata->beta_compensation = true;
+ prop = of_get_property(node, "alert-mask", &len);
+ if (prop && len == sizeof(u32))
+ pdata->alert_mask = be32_to_cpu(prop[0]);
+ prop = of_get_property(node, "over-temperature-mask", &len);
+ if (prop && len == sizeof(u32))
+ pdata->over_temperature_mask = be32_to_cpu(prop[0]);
+ prop = of_get_property(node, "resistance-cancellation", &len);
+ if (prop) {
+ if (len == sizeof(u32))
+ pdata->resistance_cancellation = be32_to_cpu(prop[0]);
+ else
+ pdata->resistance_cancellation = 0xfe;
+ }
+ prop = of_get_property(node, "transistor-ideality", &len);
+ if (prop && len == 2 * sizeof(u32)) {
+ pdata->ideality_mask = be32_to_cpu(prop[0]);
+ pdata->ideality_value = be32_to_cpu(prop[1]);
+ }
+}
+
+static int max6697_init_chip(struct i2c_client *client)
+{
+ struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct max6697_platform_data p;
+ const struct max6697_chip_data *chip = data->chip;
+ int factor = chip->channels;
+ int ret, reg;
+
+ /*
+ * Don't touch configuration if neither platform data nor OF
+ * configuration was specified. If that is the case, use the
+ * current chip configuration.
+ */
+ if (!pdata && !client->dev.of_node) {
+ reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG);
+ if (reg < 0)
+ return reg;
+ if (data->type == max6581) {
+ if (reg & MAX6581_CONF_EXTENDED)
+ data->temp_offset = 64;
+ reg = i2c_smbus_read_byte_data(client,
+ MAX6581_REG_RESISTANCE);
+ if (reg < 0)
+ return reg;
+ factor += hweight8(reg);
+ } else {
+ if (reg & MAX6697_CONF_RESISTANCE)
+ factor++;
+ }
+ goto done;
+ }
+
+ if (client->dev.of_node) {
+ memset(&p, 0, sizeof(p));
+ max6697_get_config_of(client->dev.of_node, &p);
+ pdata = &p;
+ }
+
+ reg = 0;
+ if (pdata->smbus_timeout_disable &&
+ (chip->valid_conf & MAX6697_CONF_TIMEOUT)) {
+ reg |= MAX6697_CONF_TIMEOUT;
+ }
+ if (pdata->extended_range_enable &&
+ (chip->valid_conf & MAX6581_CONF_EXTENDED)) {
+ reg |= MAX6581_CONF_EXTENDED;
+ data->temp_offset = 64;
+ }
+ if (pdata->resistance_cancellation &&
+ (chip->valid_conf & MAX6697_CONF_RESISTANCE)) {
+ reg |= MAX6697_CONF_RESISTANCE;
+ factor++;
+ }
+ if (pdata->beta_compensation &&
+ (chip->valid_conf & MAX6693_CONF_BETA)) {
+ reg |= MAX6693_CONF_BETA;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
+ MAX6697_MAP_BITS(pdata->alert_mask));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
+ MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ if (ret < 0)
+ return ret;
+
+ if (data->type == max6581) {
+ factor += hweight8(pdata->resistance_cancellation >> 1);
+ ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE,
+ pdata->resistance_cancellation >> 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
+ pdata->ideality_mask >> 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(client,
+ MAX6581_REG_IDEALITY_SELECT,
+ pdata->ideality_value);
+ if (ret < 0)
+ return ret;
+ }
+done:
+ data->update_interval = factor * MAX6697_CONV_TIME;
+ return 0;
+}
+
+static void max6697_remove_files(struct i2c_client *client)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max6697_group); i++)
+ sysfs_remove_group(&client->dev.kobj, &max6697_group[i]);
+}
+
+static int max6697_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct max6697_data *data;
+ int i, err;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->type = id->driver_data;
+ data->chip = &max6697_chip_data[data->type];
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ err = max6697_init_chip(client);
+ if (err)
+ return err;
+
+ for (i = 0; i < data->chip->channels; i++) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][0]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][1]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][2]);
+ if (err)
+ goto error;
+
+ if (data->chip->have_crit & (1 << i)) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][3]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][4]);
+ if (err)
+ goto error;
+ }
+ if (data->chip->have_fault & (1 << i)) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][5]);
+ if (err)
+ goto error;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ max6697_remove_files(client);
+ return err;
+}
+
+static int max6697_remove(struct i2c_client *client)
+{
+ struct max6697_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ max6697_remove_files(client);
+
+ return 0;
+}
+
+static const struct i2c_device_id max6697_id[] = {
+ { "max6581", max6581 },
+ { "max6602", max6602 },
+ { "max6622", max6622 },
+ { "max6636", max6636 },
+ { "max6689", max6689 },
+ { "max6693", max6693 },
+ { "max6694", max6694 },
+ { "max6697", max6697 },
+ { "max6698", max6698 },
+ { "max6699", max6699 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6697_id);
+
+static struct i2c_driver max6697_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6697",
+ },
+ .probe = max6697_probe,
+ .remove = max6697_remove,
+ .id_table = max6697_id,
+};
+
+module_i2c_driver(max6697_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX6697 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index a87eb8986e36..b5f63f9c0ce1 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -43,7 +43,7 @@ struct ntc_compensation {
* The following compensation tables are from the specification of Murata NTC
* Thermistors Datasheet
*/
-const struct ntc_compensation ncpXXwb473[] = {
+static const struct ntc_compensation ncpXXwb473[] = {
{ .temp_C = -40, .ohm = 1747920 },
{ .temp_C = -35, .ohm = 1245428 },
{ .temp_C = -30, .ohm = 898485 },
@@ -79,7 +79,7 @@ const struct ntc_compensation ncpXXwb473[] = {
{ .temp_C = 120, .ohm = 1615 },
{ .temp_C = 125, .ohm = 1406 },
};
-const struct ntc_compensation ncpXXwl333[] = {
+static const struct ntc_compensation ncpXXwl333[] = {
{ .temp_C = -40, .ohm = 1610154 },
{ .temp_C = -35, .ohm = 1130850 },
{ .temp_C = -30, .ohm = 802609 },
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 60745a535821..4f9eb0af5229 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -72,7 +72,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, and MAX34446.
+ MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 2ada7b021fbe..7e930c3ce1ab 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for Maxim MAX34440/MAX34441
*
* Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +26,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446 };
+enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -87,7 +88,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34446_MFR_POUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34460 &&
+ data->id != max34461)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_TEMPERATURE_AVG);
@@ -322,6 +324,73 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34460] = {
+ .pages = 18,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[14] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
+ [max34461] = {
+ .pages = 23,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[12] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[13] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[14] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[15] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ /* page 16 is reserved */
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
};
static int max34440_probe(struct i2c_client *client,
@@ -343,6 +412,8 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34460", max34460},
+ {"max34461", max34461},
{}
};
MODULE_DEVICE_TABLE(i2c, max34440_id);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 3fe03dc47eb7..fa9beb3eb60c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -2,6 +2,7 @@
* pmbus.h - Common defines and structures for PMBus devices
*
* Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -177,6 +178,13 @@
#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 28)
#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29)
+#define PMBUS_VIRT_READ_VMON (PMBUS_VIRT_BASE + 30)
+#define PMBUS_VIRT_VMON_UV_WARN_LIMIT (PMBUS_VIRT_BASE + 31)
+#define PMBUS_VIRT_VMON_OV_WARN_LIMIT (PMBUS_VIRT_BASE + 32)
+#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT (PMBUS_VIRT_BASE + 33)
+#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT (PMBUS_VIRT_BASE + 34)
+#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35)
+
/*
* CAPABILITY
*/
@@ -317,6 +325,8 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+#define PMBUS_HAVE_VMON (1 << 18)
+#define PMBUS_HAVE_STATUS_VMON (1 << 19)
enum pmbus_data_format { linear = 0, direct, vid };
@@ -359,6 +369,7 @@ struct pmbus_driver_info {
/* Function declarations */
+void pmbus_clear_cache(struct i2c_client *client);
int pmbus_set_page(struct i2c_client *client, u8 page);
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 7d19b1bb9ce6..80eef50c50fd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for PMBus devices
*
* Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,45 +32,10 @@
#include "pmbus.h"
/*
- * Constants needed to determine number of sensors, booleans, and labels.
+ * Number of additional attribute pointers to allocate
+ * with each call to krealloc
*/
-#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
-#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
- crit, lowest, highest, avg,
- reset */
-#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
- lowest, highest, avg,
- reset */
-#define PMBUS_POUT_SENSORS_PER_PAGE 7 /* input, cap, max, crit,
- * highest, avg, reset
- */
-#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
-#define PMBUS_MAX_SENSORS_PER_TEMP 9 /* input, min, max, lcrit,
- * crit, lowest, highest, avg,
- * reset
- */
-
-#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
- lcrit_alarm, crit_alarm;
- c: alarm, crit_alarm;
- p: crit_alarm */
-#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
- lcrit_alarm, crit_alarm */
-#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
- crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
- */
-#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
-#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
- lcrit_alarm, crit_alarm */
-
-#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
-
-/*
- * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
- * are paged. status_input is unpaged.
- */
-#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
+#define PMBUS_ATTR_ALLOC_SIZE 32
/*
* Index into status register array, per status register group
@@ -79,14 +45,18 @@
#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
-#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
-#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
+#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1)
+
+#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1)
#define PMBUS_NAME_SIZE 24
struct pmbus_sensor {
+ struct pmbus_sensor *next;
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
- struct sensor_device_attribute attribute;
+ struct device_attribute attribute;
u8 page; /* page number */
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
@@ -94,19 +64,28 @@ struct pmbus_sensor {
int data; /* Sensor data.
Negative if there was a read error */
};
+#define to_pmbus_sensor(_attr) \
+ container_of(_attr, struct pmbus_sensor, attribute)
struct pmbus_boolean {
char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
struct sensor_device_attribute attribute;
+ struct pmbus_sensor *s1;
+ struct pmbus_sensor *s2;
};
+#define to_pmbus_boolean(_attr) \
+ container_of(_attr, struct pmbus_boolean, attribute)
struct pmbus_label {
char name[PMBUS_NAME_SIZE]; /* sysfs label name */
- struct sensor_device_attribute attribute;
+ struct device_attribute attribute;
char label[PMBUS_NAME_SIZE]; /* label */
};
+#define to_pmbus_label(_attr) \
+ container_of(_attr, struct pmbus_label, attribute)
struct pmbus_data {
+ struct device *dev;
struct device *hwmon_dev;
u32 flags; /* from platform data */
@@ -117,29 +96,9 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
- struct attribute **attributes;
struct attribute_group group;
- /*
- * Sensors cover both sensor and limit registers.
- */
- int max_sensors;
- int num_sensors;
struct pmbus_sensor *sensors;
- /*
- * Booleans are used for alarms.
- * Values are determined from status registers.
- */
- int max_booleans;
- int num_booleans;
- struct pmbus_boolean *booleans;
- /*
- * Labels are used to map generic names (e.g., "in1")
- * to PMBus specific names (e.g., "vin" or "vout1").
- */
- int max_labels;
- int num_labels;
- struct pmbus_label *labels;
struct mutex update_lock;
bool valid;
@@ -150,10 +109,19 @@ struct pmbus_data {
* so we keep them all together.
*/
u8 status[PB_NUM_STATUS_REG];
+ u8 status_register;
u8 currpage;
};
+void pmbus_clear_cache(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ data->valid = false;
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_cache);
+
int pmbus_set_page(struct i2c_client *client, u8 page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -318,9 +286,10 @@ EXPORT_SYMBOL_GPL(pmbus_clear_faults);
static int pmbus_check_status_cml(struct i2c_client *client)
{
+ struct pmbus_data *data = i2c_get_clientdata(client);
int status, status2;
- status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
+ status = _pmbus_read_byte_data(client, -1, data->status_register);
if (status < 0 || (status & PB_STATUS_CML)) {
status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
@@ -329,29 +298,30 @@ static int pmbus_check_status_cml(struct i2c_client *client)
return 0;
}
-bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+static bool pmbus_check_register(struct i2c_client *client,
+ int (*func)(struct i2c_client *client,
+ int page, int reg),
+ int page, int reg)
{
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = _pmbus_read_byte_data(client, page, reg);
+ rv = func(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
rv = pmbus_check_status_cml(client);
pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
+}
EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
{
- int rv;
- struct pmbus_data *data = i2c_get_clientdata(client);
-
- rv = _pmbus_read_word_data(client, page, reg);
- if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client);
- pmbus_clear_fault_page(client, -1);
- return rv >= 0;
+ return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
}
EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -363,53 +333,43 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+static struct _pmbus_status {
+ u32 func;
+ u16 base;
+ u16 reg;
+} pmbus_status[] = {
+ { PMBUS_HAVE_STATUS_VOUT, PB_STATUS_VOUT_BASE, PMBUS_STATUS_VOUT },
+ { PMBUS_HAVE_STATUS_IOUT, PB_STATUS_IOUT_BASE, PMBUS_STATUS_IOUT },
+ { PMBUS_HAVE_STATUS_TEMP, PB_STATUS_TEMP_BASE,
+ PMBUS_STATUS_TEMPERATURE },
+ { PMBUS_HAVE_STATUS_FAN12, PB_STATUS_FAN_BASE, PMBUS_STATUS_FAN_12 },
+ { PMBUS_HAVE_STATUS_FAN34, PB_STATUS_FAN34_BASE, PMBUS_STATUS_FAN_34 },
+};
+
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ struct pmbus_sensor *sensor;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- int i;
+ int i, j;
- for (i = 0; i < info->pages; i++)
+ for (i = 0; i < info->pages; i++) {
data->status[PB_STATUS_BASE + i]
= _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_BYTE);
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
- continue;
- data->status[PB_STATUS_VOUT_BASE + i]
- = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
- continue;
- data->status[PB_STATUS_IOUT_BASE + i]
- = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
- continue;
- data->status[PB_STATUS_TEMP_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_TEMPERATURE);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
- continue;
- data->status[PB_STATUS_FAN_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_FAN_12);
- }
-
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
- continue;
- data->status[PB_STATUS_FAN34_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_FAN_34);
+ data->status_register);
+ for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
+ struct _pmbus_status *s = &pmbus_status[j];
+
+ if (!(info->func[i] & s->func))
+ continue;
+ data->status[s->base + i]
+ = _pmbus_read_byte_data(client, i,
+ s->reg);
+ }
}
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
@@ -417,9 +377,12 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
= _pmbus_read_byte_data(client, 0,
PMBUS_STATUS_INPUT);
- for (i = 0; i < data->num_sensors; i++) {
- struct pmbus_sensor *sensor = &data->sensors[i];
+ if (info->func[0] & PMBUS_HAVE_STATUS_VMON)
+ data->status[PB_STATUS_VMON_BASE]
+ = _pmbus_read_byte_data(client, 0,
+ PMBUS_VIRT_STATUS_VMON);
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
if (!data->valid || sensor->update)
sensor->data
= _pmbus_read_word_data(client,
@@ -657,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val)
{
- val = SENSORS_LIMIT(val, 500, 1600);
+ val = clamp_val(val, 500, 1600);
return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
}
@@ -684,25 +647,20 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
/*
* Return boolean calculated from converted data.
- * <index> defines a status register index and mask, and optionally
- * two sensor indexes.
- * The upper half-word references the two sensors,
- * two sensor indices.
- * The upper half-word references the two optional sensors,
- * the lower half word references status register and mask.
- * The function returns true if (status[reg] & mask) is true and,
- * if specified, if v1 >= v2.
- * To determine if an object exceeds upper limits, specify <v, limit>.
- * To determine if an object exceeds lower limits, specify <limit, v>.
+ * <index> defines a status register index and mask.
+ * The mask is in the lower 8 bits, the register index is in bits 8..23.
*
- * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
- * index are set. s1 and s2 (the sensor index values) are zero in this case.
- * The function returns true if (status[reg] & mask) is true.
+ * The associated pmbus_boolean structure contains optional pointers to two
+ * sensor attributes. If specified, those attributes are compared against each
+ * other to determine if a limit has been exceeded.
*
- * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
- * a specified limit has to be performed to determine the boolean result.
+ * If the sensor attribute pointers are NULL, the function returns true if
+ * (status[reg] & mask) is true.
+ *
+ * If sensor attribute pointers are provided, a comparison against a specified
+ * limit has to be performed to determine the boolean result.
* In this case, the function returns true if v1 >= v2 (where v1 and v2 are
- * sensor values referenced by sensor indices s1 and s2).
+ * sensor values referenced by sensor attribute pointers s1 and s2).
*
* To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
* To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
@@ -710,11 +668,12 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
* If a negative value is stored in any of the referenced registers, this value
* reflects an error code which will be returned.
*/
-static int pmbus_get_boolean(struct pmbus_data *data, int index)
+static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
+ int index)
{
- u8 s1 = (index >> 24) & 0xff;
- u8 s2 = (index >> 16) & 0xff;
- u8 reg = (index >> 8) & 0xff;
+ struct pmbus_sensor *s1 = b->s1;
+ struct pmbus_sensor *s2 = b->s2;
+ u16 reg = (index >> 8) & 0xffff;
u8 mask = index & 0xff;
int ret, status;
u8 regval;
@@ -724,21 +683,21 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index)
return status;
regval = status & mask;
- if (!s1 && !s2)
+ if (!s1 && !s2) {
ret = !!regval;
- else {
+ } else if (!s1 || !s2) {
+ BUG();
+ return 0;
+ } else {
long v1, v2;
- struct pmbus_sensor *sensor1, *sensor2;
- sensor1 = &data->sensors[s1];
- if (sensor1->data < 0)
- return sensor1->data;
- sensor2 = &data->sensors[s2];
- if (sensor2->data < 0)
- return sensor2->data;
+ if (s1->data < 0)
+ return s1->data;
+ if (s2->data < 0)
+ return s2->data;
- v1 = pmbus_reg2data(data, sensor1);
- v2 = pmbus_reg2data(data, sensor2);
+ v1 = pmbus_reg2data(data, s1);
+ v2 = pmbus_reg2data(data, s2);
ret = !!(regval && v1 >= v2);
}
return ret;
@@ -748,23 +707,22 @@ static ssize_t pmbus_show_boolean(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_boolean *boolean = to_pmbus_boolean(attr);
struct pmbus_data *data = pmbus_update_device(dev);
int val;
- val = pmbus_get_boolean(data, attr->index);
+ val = pmbus_get_boolean(data, boolean, attr->index);
if (val < 0)
return val;
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static ssize_t pmbus_show_sensor(struct device *dev,
- struct device_attribute *da, char *buf)
+ struct device_attribute *devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pmbus_data *data = pmbus_update_device(dev);
- struct pmbus_sensor *sensor;
+ struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
- sensor = &data->sensors[attr->index];
if (sensor->data < 0)
return sensor->data;
@@ -775,10 +733,9 @@ static ssize_t pmbus_set_sensor(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct pmbus_data *data = i2c_get_clientdata(client);
- struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
ssize_t rv = count;
long val = 0;
int ret;
@@ -793,7 +750,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
if (ret < 0)
rv = ret;
else
- data->sensors[attr->index].data = regval;
+ sensor->data = regval;
mutex_unlock(&data->update_lock);
return rv;
}
@@ -801,102 +758,130 @@ static ssize_t pmbus_set_sensor(struct device *dev,
static ssize_t pmbus_show_label(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct pmbus_data *data = i2c_get_clientdata(client);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_label *label = to_pmbus_label(da);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- data->labels[attr->index].label);
+ return snprintf(buf, PAGE_SIZE, "%s\n", label->label);
}
-#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
-do { \
- struct sensor_device_attribute *a \
- = &data->_type##s[data->num_##_type##s].attribute; \
- BUG_ON(data->num_attributes >= data->max_attributes); \
- sysfs_attr_init(&a->dev_attr.attr); \
- a->dev_attr.attr.name = _name; \
- a->dev_attr.attr.mode = _mode; \
- a->dev_attr.show = _show; \
- a->dev_attr.store = _set; \
- a->index = _idx; \
- data->attributes[data->num_attributes] = &a->dev_attr.attr; \
- data->num_attributes++; \
-} while (0)
-
-#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
- PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
- pmbus_show_##_type, NULL)
-
-#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
- PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
- pmbus_show_##_type, pmbus_set_##_type)
-
-static void pmbus_add_boolean(struct pmbus_data *data,
- const char *name, const char *type, int seq,
- int idx)
+static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
{
- struct pmbus_boolean *boolean;
-
- BUG_ON(data->num_booleans >= data->max_booleans);
-
- boolean = &data->booleans[data->num_booleans];
+ if (data->num_attributes >= data->max_attributes - 1) {
+ data->max_attributes += PMBUS_ATTR_ALLOC_SIZE;
+ data->group.attrs = krealloc(data->group.attrs,
+ sizeof(struct attribute *) *
+ data->max_attributes, GFP_KERNEL);
+ if (data->group.attrs == NULL)
+ return -ENOMEM;
+ }
- snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
- name, seq, type);
- PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
- data->num_booleans++;
+ data->group.attrs[data->num_attributes++] = attr;
+ data->group.attrs[data->num_attributes] = NULL;
+ return 0;
}
-static void pmbus_add_boolean_reg(struct pmbus_data *data,
- const char *name, const char *type,
- int seq, int reg, int bit)
+static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+ const char *name,
+ umode_t mode,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
- pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = name;
+ dev_attr->attr.mode = mode;
+ dev_attr->show = show;
+ dev_attr->store = store;
}
-static void pmbus_add_boolean_cmp(struct pmbus_data *data,
- const char *name, const char *type,
- int seq, int i1, int i2, int reg, int mask)
+static void pmbus_attr_init(struct sensor_device_attribute *a,
+ const char *name,
+ umode_t mode,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count),
+ int idx)
{
- pmbus_add_boolean(data, name, type, seq,
- (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+ pmbus_dev_attr_init(&a->dev_attr, name, mode, show, store);
+ a->index = idx;
}
-static void pmbus_add_sensor(struct pmbus_data *data,
+static int pmbus_add_boolean(struct pmbus_data *data,
const char *name, const char *type, int seq,
- int page, int reg, enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ struct pmbus_sensor *s1,
+ struct pmbus_sensor *s2,
+ u16 reg, u8 mask)
+{
+ struct pmbus_boolean *boolean;
+ struct sensor_device_attribute *a;
+
+ boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
+ if (!boolean)
+ return -ENOMEM;
+
+ a = &boolean->attribute;
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ boolean->s1 = s1;
+ boolean->s2 = s2;
+ pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+ (reg << 8) | mask);
+
+ return pmbus_add_attribute(data, &a->dev_attr.attr);
+}
+
+static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int page, int reg,
+ enum pmbus_sensor_classes class,
+ bool update, bool readonly)
{
struct pmbus_sensor *sensor;
+ struct device_attribute *a;
- BUG_ON(data->num_sensors >= data->max_sensors);
+ sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return NULL;
+ a = &sensor->attribute;
- sensor = &data->sensors[data->num_sensors];
snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
name, seq, type);
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
- if (readonly)
- PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
- data->num_sensors);
- else
- PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
- data->num_sensors);
- data->num_sensors++;
+ pmbus_dev_attr_init(a, sensor->name,
+ readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+ pmbus_show_sensor, pmbus_set_sensor);
+
+ if (pmbus_add_attribute(data, &a->attr))
+ return NULL;
+
+ sensor->next = data->sensors;
+ data->sensors = sensor;
+
+ return sensor;
}
-static void pmbus_add_label(struct pmbus_data *data,
- const char *name, int seq,
- const char *lstring, int index)
+static int pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
{
struct pmbus_label *label;
+ struct device_attribute *a;
- BUG_ON(data->num_labels >= data->max_labels);
+ label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+
+ a = &label->attribute;
- label = &data->labels[data->num_labels];
snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
if (!index)
strncpy(label->label, lstring, sizeof(label->label) - 1);
@@ -904,65 +889,8 @@ static void pmbus_add_label(struct pmbus_data *data,
snprintf(label->label, sizeof(label->label), "%s%d", lstring,
index);
- PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
- data->num_labels++;
-}
-
-/*
- * Determine maximum number of sensors, booleans, and labels.
- * To keep things simple, only make a rough high estimate.
- */
-static void pmbus_find_max_attr(struct i2c_client *client,
- struct pmbus_data *data)
-{
- const struct pmbus_driver_info *info = data->info;
- int page, max_sensors, max_booleans, max_labels;
-
- max_sensors = PMBUS_MAX_INPUT_SENSORS;
- max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
- max_labels = PMBUS_MAX_INPUT_LABELS;
-
- for (page = 0; page < info->pages; page++) {
- if (info->func[page] & PMBUS_HAVE_VOUT) {
- max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_IOUT) {
- max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_POUT) {
- max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_FAN12) {
- max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
- max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
- }
- if (info->func[page] & PMBUS_HAVE_FAN34) {
- max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
- max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP2) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP3) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- }
- data->max_sensors = max_sensors;
- data->max_booleans = max_booleans;
- data->max_labels = max_labels;
- data->max_attributes = max_sensors + max_booleans + max_labels;
+ pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+ return pmbus_add_attribute(data, &a->attr);
}
/*
@@ -975,12 +903,12 @@ static void pmbus_find_max_attr(struct i2c_client *client,
*/
struct pmbus_limit_attr {
u16 reg; /* Limit register */
+ u16 sbit; /* Alarm attribute status bit */
bool update; /* True if register needs updates */
bool low; /* True if low limit; for limits with compare
functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
- u32 sbit; /* Alarm attribute status bit */
};
/*
@@ -988,7 +916,9 @@ struct pmbus_limit_attr {
* description includes a reference to the associated limit attributes.
*/
struct pmbus_sensor_attr {
- u8 reg; /* sensor register */
+ u16 reg; /* sensor register */
+ u8 gbit; /* generic status bit */
+ u8 nlimit; /* # of limit registers */
enum pmbus_sensor_classes class;/* sensor class */
const char *label; /* sensor label */
bool paged; /* true if paged sensor */
@@ -997,47 +927,47 @@ struct pmbus_sensor_attr {
u32 func; /* sensor mask */
u32 sfunc; /* sensor status mask */
int sbase; /* status base register */
- u32 gbit; /* generic status bit */
const struct pmbus_limit_attr *limit;/* limit registers */
- int nlimit; /* # of limit registers */
};
/*
* Add a set of limit attributes and, if supported, the associated
* alarm attributes.
+ * returns 0 if no alarm register found, 1 if an alarm register was found,
+ * < 0 on errors.
*/
-static bool pmbus_add_limit_attrs(struct i2c_client *client,
- struct pmbus_data *data,
- const struct pmbus_driver_info *info,
- const char *name, int index, int page,
- int cbase,
- const struct pmbus_sensor_attr *attr)
+static int pmbus_add_limit_attrs(struct i2c_client *client,
+ struct pmbus_data *data,
+ const struct pmbus_driver_info *info,
+ const char *name, int index, int page,
+ struct pmbus_sensor *base,
+ const struct pmbus_sensor_attr *attr)
{
const struct pmbus_limit_attr *l = attr->limit;
int nlimit = attr->nlimit;
- bool have_alarm = false;
- int i, cindex;
+ int have_alarm = 0;
+ int i, ret;
+ struct pmbus_sensor *curr;
for (i = 0; i < nlimit; i++) {
if (pmbus_check_word_register(client, page, l->reg)) {
- cindex = data->num_sensors;
- pmbus_add_sensor(data, name, l->attr, index, page,
- l->reg, attr->class,
- attr->update || l->update,
- false);
+ curr = pmbus_add_sensor(data, name, l->attr, index,
+ page, l->reg, attr->class,
+ attr->update || l->update,
+ false);
+ if (!curr)
+ return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
- if (attr->compare) {
- pmbus_add_boolean_cmp(data, name,
- l->alarm, index,
- l->low ? cindex : cbase,
- l->low ? cbase : cindex,
- attr->sbase + page, l->sbit);
- } else {
- pmbus_add_boolean_reg(data, name,
- l->alarm, index,
- attr->sbase + page, l->sbit);
- }
- have_alarm = true;
+ ret = pmbus_add_boolean(data, name,
+ l->alarm, index,
+ attr->compare ? l->low ? curr : base
+ : NULL,
+ attr->compare ? l->low ? base : curr
+ : NULL,
+ attr->sbase + page, l->sbit);
+ if (ret)
+ return ret;
+ have_alarm = 1;
}
}
l++;
@@ -1045,45 +975,59 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
return have_alarm;
}
-static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
- struct pmbus_data *data,
- const struct pmbus_driver_info *info,
- const char *name,
- int index, int page,
- const struct pmbus_sensor_attr *attr)
+static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
+ struct pmbus_data *data,
+ const struct pmbus_driver_info *info,
+ const char *name,
+ int index, int page,
+ const struct pmbus_sensor_attr *attr)
{
- bool have_alarm;
- int cbase = data->num_sensors;
-
- if (attr->label)
- pmbus_add_label(data, name, index, attr->label,
- attr->paged ? page + 1 : 0);
- pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ struct pmbus_sensor *base;
+ int ret;
+
+ if (attr->label) {
+ ret = pmbus_add_label(data, name, index, attr->label,
+ attr->paged ? page + 1 : 0);
+ if (ret)
+ return ret;
+ }
+ base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
+ attr->class, true, true);
+ if (!base)
+ return -ENOMEM;
if (attr->sfunc) {
- have_alarm = pmbus_add_limit_attrs(client, data, info, name,
- index, page, cbase, attr);
+ ret = pmbus_add_limit_attrs(client, data, info, name,
+ index, page, base, attr);
+ if (ret < 0)
+ return ret;
/*
* Add generic alarm attribute only if there are no individual
* alarm attributes, if there is a global alarm bit, and if
* the generic status register for this page is accessible.
*/
- if (!have_alarm && attr->gbit &&
- pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
- pmbus_add_boolean_reg(data, name, "alarm", index,
- PB_STATUS_BASE + page,
- attr->gbit);
+ if (!ret && attr->gbit &&
+ pmbus_check_byte_register(client, page,
+ data->status_register)) {
+ ret = pmbus_add_boolean(data, name, "alarm", index,
+ NULL, NULL,
+ PB_STATUS_BASE + page,
+ attr->gbit);
+ if (ret)
+ return ret;
+ }
}
+ return 0;
}
-static void pmbus_add_sensor_attrs(struct i2c_client *client,
- struct pmbus_data *data,
- const char *name,
- const struct pmbus_sensor_attr *attrs,
- int nattrs)
+static int pmbus_add_sensor_attrs(struct i2c_client *client,
+ struct pmbus_data *data,
+ const char *name,
+ const struct pmbus_sensor_attr *attrs,
+ int nattrs)
{
const struct pmbus_driver_info *info = data->info;
int index, i;
+ int ret;
index = 1;
for (i = 0; i < nattrs; i++) {
@@ -1093,12 +1037,16 @@ static void pmbus_add_sensor_attrs(struct i2c_client *client,
for (page = 0; page < pages; page++) {
if (!(info->func[page] & attrs->func))
continue;
- pmbus_add_sensor_attrs_one(client, data, info, name,
- index, page, attrs);
+ ret = pmbus_add_sensor_attrs_one(client, data, info,
+ name, index, page,
+ attrs);
+ if (ret)
+ return ret;
index++;
}
attrs++;
}
+ return 0;
}
static const struct pmbus_limit_attr vin_limit_attrs[] = {
@@ -1140,6 +1088,30 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
},
};
+static const struct pmbus_limit_attr vmon_limit_attrs[] = {
+ {
+ .reg = PMBUS_VIRT_VMON_UV_WARN_LIMIT,
+ .attr = "min",
+ .alarm = "min_alarm",
+ .sbit = PB_VOLTAGE_UV_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
+ .attr = "lcrit",
+ .alarm = "lcrit_alarm",
+ .sbit = PB_VOLTAGE_UV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_VMON_OV_WARN_LIMIT,
+ .attr = "max",
+ .alarm = "max_alarm",
+ .sbit = PB_VOLTAGE_OV_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
+ .attr = "crit",
+ .alarm = "crit_alarm",
+ .sbit = PB_VOLTAGE_OV_FAULT,
+ }
+};
+
static const struct pmbus_limit_attr vout_limit_attrs[] = {
{
.reg = PMBUS_VOUT_UV_WARN_LIMIT,
@@ -1191,6 +1163,15 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.limit = vin_limit_attrs,
.nlimit = ARRAY_SIZE(vin_limit_attrs),
}, {
+ .reg = PMBUS_VIRT_READ_VMON,
+ .class = PSC_VOLTAGE_IN,
+ .label = "vmon",
+ .func = PMBUS_HAVE_VMON,
+ .sfunc = PMBUS_HAVE_STATUS_VMON,
+ .sbase = PB_STATUS_VMON_BASE,
+ .limit = vmon_limit_attrs,
+ .nlimit = ARRAY_SIZE(vmon_limit_attrs),
+ }, {
.reg = PMBUS_READ_VCAP,
.class = PSC_VOLTAGE_IN,
.label = "vcap",
@@ -1553,12 +1534,13 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
-static void pmbus_add_fan_attributes(struct i2c_client *client,
- struct pmbus_data *data)
+static int pmbus_add_fan_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
{
const struct pmbus_driver_info *info = data->info;
int index = 1;
int page;
+ int ret;
for (page = 0; page < info->pages; page++) {
int f;
@@ -1584,9 +1566,10 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
(!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
continue;
- pmbus_add_sensor(data, "fan", "input", index, page,
- pmbus_fan_registers[f], PSC_FAN, true,
- true);
+ if (pmbus_add_sensor(data, "fan", "input", index,
+ page, pmbus_fan_registers[f],
+ PSC_FAN, true, true) == NULL)
+ return -ENOMEM;
/*
* Each fan status register covers multiple fans,
@@ -1601,39 +1584,55 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
base = PB_STATUS_FAN34_BASE + page;
else
base = PB_STATUS_FAN_BASE + page;
- pmbus_add_boolean_reg(data, "fan", "alarm",
- index, base,
+ ret = pmbus_add_boolean(data, "fan",
+ "alarm", index, NULL, NULL, base,
PB_FAN_FAN1_WARNING >> (f & 1));
- pmbus_add_boolean_reg(data, "fan", "fault",
- index, base,
+ if (ret)
+ return ret;
+ ret = pmbus_add_boolean(data, "fan",
+ "fault", index, NULL, NULL, base,
PB_FAN_FAN1_FAULT >> (f & 1));
+ if (ret)
+ return ret;
}
index++;
}
}
+ return 0;
}
-static void pmbus_find_attributes(struct i2c_client *client,
- struct pmbus_data *data)
+static int pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
{
+ int ret;
+
/* Voltage sensors */
- pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
- ARRAY_SIZE(voltage_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
+ ARRAY_SIZE(voltage_attributes));
+ if (ret)
+ return ret;
/* Current sensors */
- pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
- ARRAY_SIZE(current_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
+ ARRAY_SIZE(current_attributes));
+ if (ret)
+ return ret;
/* Power sensors */
- pmbus_add_sensor_attrs(client, data, "power", power_attributes,
- ARRAY_SIZE(power_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "power", power_attributes,
+ ARRAY_SIZE(power_attributes));
+ if (ret)
+ return ret;
/* Temperature sensors */
- pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
- ARRAY_SIZE(temp_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
+ ARRAY_SIZE(temp_attributes));
+ if (ret)
+ return ret;
/* Fans */
- pmbus_add_fan_attributes(client, data);
+ ret = pmbus_add_fan_attributes(client, data);
+ return ret;
}
/*
@@ -1672,127 +1671,119 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- /* Determine maximum number of sensors, booleans, and labels */
- pmbus_find_max_attr(client, data);
pmbus_clear_fault_page(client, 0);
return 0;
}
-int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info)
+static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ struct pmbus_driver_info *info)
{
- const struct pmbus_platform_data *pdata = client->dev.platform_data;
- struct pmbus_data *data;
+ struct device *dev = &client->dev;
int ret;
- if (!info) {
- dev_err(&client->dev, "Missing chip information");
- return -ENODEV;
- }
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
- | I2C_FUNC_SMBUS_BYTE_DATA
- | I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "No memory to allocate driver data\n");
- return -ENOMEM;
- }
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* Bail out if PMBus status register does not exist. */
- if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
- dev_err(&client->dev, "PMBus status register not found\n");
- return -ENODEV;
+ /*
+ * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
+ * to use PMBUS_STATUS_WORD instead if that is the case.
+ * Bail out if both registers are not supported.
+ */
+ data->status_register = PMBUS_STATUS_BYTE;
+ ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
+ if (ret < 0 || ret == 0xff) {
+ data->status_register = PMBUS_STATUS_WORD;
+ ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
+ if (ret < 0 || ret == 0xffff) {
+ dev_err(dev, "PMBus status register not found\n");
+ return -ENODEV;
+ }
}
- if (pdata)
- data->flags = pdata->flags;
- data->info = info;
-
pmbus_clear_faults(client);
if (info->identify) {
ret = (*info->identify)(client, info);
if (ret < 0) {
- dev_err(&client->dev, "Chip identification failed\n");
+ dev_err(dev, "Chip identification failed\n");
return ret;
}
}
if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
- dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
- info->pages);
+ dev_err(dev, "Bad number of PMBus pages: %d\n", info->pages);
return -ENODEV;
}
ret = pmbus_identify_common(client, data);
if (ret < 0) {
- dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ dev_err(dev, "Failed to identify chip capabilities\n");
return ret;
}
+ return 0;
+}
- ret = -ENOMEM;
- data->sensors = devm_kzalloc(&client->dev, sizeof(struct pmbus_sensor)
- * data->max_sensors, GFP_KERNEL);
- if (!data->sensors) {
- dev_err(&client->dev, "No memory to allocate sensor data\n");
- return -ENOMEM;
- }
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ struct device *dev = &client->dev;
+ const struct pmbus_platform_data *pdata = dev->platform_data;
+ struct pmbus_data *data;
+ int ret;
- data->booleans = devm_kzalloc(&client->dev, sizeof(struct pmbus_boolean)
- * data->max_booleans, GFP_KERNEL);
- if (!data->booleans) {
- dev_err(&client->dev, "No memory to allocate boolean data\n");
- return -ENOMEM;
- }
+ if (!info)
+ return -ENODEV;
- data->labels = devm_kzalloc(&client->dev, sizeof(struct pmbus_label)
- * data->max_labels, GFP_KERNEL);
- if (!data->labels) {
- dev_err(&client->dev, "No memory to allocate label data\n");
- return -ENOMEM;
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
- data->attributes = devm_kzalloc(&client->dev, sizeof(struct attribute *)
- * data->max_attributes, GFP_KERNEL);
- if (!data->attributes) {
- dev_err(&client->dev, "No memory to allocate attribute data\n");
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
- pmbus_find_attributes(client, data);
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+ data->dev = dev;
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ ret = pmbus_init_common(client, data, info);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_find_attributes(client, data);
+ if (ret)
+ goto out_kfree;
/*
* If there are no attributes, something is wrong.
* Bail out instead of trying to register nothing.
*/
if (!data->num_attributes) {
- dev_err(&client->dev, "No attributes found\n");
- return -ENODEV;
+ dev_err(dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_kfree;
}
/* Register sysfs hooks */
- data->group.attrs = data->attributes;
- ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ ret = sysfs_create_group(&dev->kobj, &data->group);
if (ret) {
- dev_err(&client->dev, "Failed to create sysfs entries\n");
- return ret;
+ dev_err(dev, "Failed to create sysfs entries\n");
+ goto out_kfree;
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
- dev_err(&client->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
goto out_hwmon_device_register;
}
return 0;
out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &data->group);
+ sysfs_remove_group(&dev->kobj, &data->group);
+out_kfree:
+ kfree(data->group.attrs);
return ret;
}
EXPORT_SYMBOL_GPL(pmbus_do_probe);
@@ -1802,6 +1793,7 @@ int pmbus_do_remove(struct i2c_client *client)
struct pmbus_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->group.attrs);
return 0;
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index fc5eed8e85bb..819644121259 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for ZL6100 and compatibles
*
* Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -45,12 +46,87 @@ struct zl6100_data {
#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+#define MFR_VMON_OV_FAULT_LIMIT 0xf5
+#define MFR_VMON_UV_FAULT_LIMIT 0xf6
+#define MFR_READ_VMON 0xf7
+
+#define VMON_UV_WARNING (1 << 5)
+#define VMON_OV_WARNING (1 << 4)
+#define VMON_UV_FAULT (1 << 1)
+#define VMON_OV_FAULT (1 << 0)
+
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
module_param(delay, ushort, 0644);
MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
+/* Convert linear sensor value to milli-units */
+static long zl6100_l2d(s16 l)
+{
+ s16 exponent;
+ s32 mantissa;
+ long val;
+
+ exponent = l >> 11;
+ mantissa = ((s16)((l & 0x7ff) << 5)) >> 5;
+
+ val = mantissa;
+
+ /* scale result to milli-units */
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 zl6100_d2l(long val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
@@ -65,9 +141,9 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, vreg;
- if (page || reg >= PMBUS_VIRT_BASE)
+ if (page > 0)
return -ENXIO;
if (data->id == zl2005) {
@@ -83,9 +159,39 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
}
}
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ vreg = MFR_READ_VMON;
+ break;
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ break;
+ default:
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+ vreg = reg;
+ break;
+ }
+
zl6100_wait(data);
- ret = pmbus_read_word_data(client, page, reg);
+ ret = pmbus_read_word_data(client, page, vreg);
data->access = ktime_get();
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 9, 10));
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 11, 10));
+ break;
+ }
return ret;
}
@@ -94,13 +200,35 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, status;
if (page > 0)
return -ENXIO;
zl6100_wait(data);
- ret = pmbus_read_byte_data(client, page, reg);
+
+ switch (reg) {
+ case PMBUS_VIRT_STATUS_VMON:
+ ret = pmbus_read_byte_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (ret < 0)
+ break;
+
+ status = 0;
+ if (ret & VMON_UV_WARNING)
+ status |= PB_VOLTAGE_UV_WARNING;
+ if (ret & VMON_OV_WARNING)
+ status |= PB_VOLTAGE_OV_WARNING;
+ if (ret & VMON_UV_FAULT)
+ status |= PB_VOLTAGE_UV_FAULT;
+ if (ret & VMON_OV_FAULT)
+ status |= PB_VOLTAGE_OV_FAULT;
+ ret = status;
+ break;
+ default:
+ ret = pmbus_read_byte_data(client, page, reg);
+ break;
+ }
data->access = ktime_get();
return ret;
@@ -111,13 +239,38 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, vreg;
- if (page || reg >= PMBUS_VIRT_BASE)
+ if (page > 0)
return -ENXIO;
+ switch (reg) {
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 9));
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 11));
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ default:
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+ vreg = reg;
+ }
+
zl6100_wait(data);
- ret = pmbus_write_word_data(client, page, reg, word);
+ ret = pmbus_write_word_data(client, page, vreg, word);
data->access = ktime_get();
return ret;
@@ -225,6 +378,13 @@ static int zl6100_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ /*
+ * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
+ * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
+ */
+ if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
+ info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
+
ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 1c85d39df171..bfe326e896df 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -139,12 +139,12 @@ static const u8 sht15_crc8_table[] = {
* @reg: associated regulator (if specified).
* @nb: notifier block to handle notifications of voltage
* changes.
- * @supply_uV: local copy of supply voltage used to allow use of
+ * @supply_uv: local copy of supply voltage used to allow use of
* regulator consumer if available.
- * @supply_uV_valid: indicates that an updated value has not yet been
+ * @supply_uv_valid: indicates that an updated value has not yet been
* obtained from the regulator and so any calculations
* based upon it will be invalid.
- * @update_supply_work: work struct that is used to update the supply_uV.
+ * @update_supply_work: work struct that is used to update the supply_uv.
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
@@ -166,8 +166,8 @@ struct sht15_data {
struct device *hwmon_dev;
struct regulator *reg;
struct notifier_block nb;
- int supply_uV;
- bool supply_uV_valid;
+ int supply_uv;
+ bool supply_uv_valid;
struct work_struct update_supply_work;
atomic_t interrupt_handled;
};
@@ -212,11 +212,13 @@ static u8 sht15_crc8(struct sht15_data *data,
*
* This implements section 3.4 of the data sheet
*/
-static void sht15_connection_reset(struct sht15_data *data)
+static int sht15_connection_reset(struct sht15_data *data)
{
- int i;
+ int i, err;
- gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSCKL);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
@@ -226,6 +228,7 @@ static void sht15_connection_reset(struct sht15_data *data)
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
}
+ return 0;
}
/**
@@ -251,10 +254,14 @@ static inline void sht15_send_bit(struct sht15_data *data, int val)
* conservative ones used in implementation. This implements
* figure 12 on the data sheet.
*/
-static void sht15_transmission_start(struct sht15_data *data)
+static int sht15_transmission_start(struct sht15_data *data)
{
+ int err;
+
/* ensure data is high and output */
- gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
@@ -270,6 +277,7 @@ static void sht15_transmission_start(struct sht15_data *data)
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
+ return 0;
}
/**
@@ -293,13 +301,19 @@ static void sht15_send_byte(struct sht15_data *data, u8 byte)
*/
static int sht15_wait_for_response(struct sht15_data *data)
{
- gpio_direction_input(data->pdata->gpio_data);
+ int err;
+
+ err = gpio_direction_input(data->pdata->gpio_data);
+ if (err)
+ return err;
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
if (gpio_get_value(data->pdata->gpio_data)) {
gpio_set_value(data->pdata->gpio_sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
- sht15_connection_reset(data);
+ err = sht15_connection_reset(data);
+ if (err)
+ return err;
return -EIO;
}
gpio_set_value(data->pdata->gpio_sck, 0);
@@ -317,12 +331,13 @@ static int sht15_wait_for_response(struct sht15_data *data)
*/
static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
{
- int ret = 0;
+ int err;
- sht15_transmission_start(data);
+ err = sht15_transmission_start(data);
+ if (err)
+ return err;
sht15_send_byte(data, cmd);
- ret = sht15_wait_for_response(data);
- return ret;
+ return sht15_wait_for_response(data);
}
/**
@@ -352,9 +367,13 @@ static int sht15_soft_reset(struct sht15_data *data)
* Each byte of data is acknowledged by pulling the data line
* low for one clock pulse.
*/
-static void sht15_ack(struct sht15_data *data)
+static int sht15_ack(struct sht15_data *data)
{
- gpio_direction_output(data->pdata->gpio_data, 0);
+ int err;
+
+ err = gpio_direction_output(data->pdata->gpio_data, 0);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSU);
@@ -362,7 +381,7 @@ static void sht15_ack(struct sht15_data *data)
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_data, 1);
- gpio_direction_input(data->pdata->gpio_data);
+ return gpio_direction_input(data->pdata->gpio_data);
}
/**
@@ -371,14 +390,19 @@ static void sht15_ack(struct sht15_data *data)
*
* This is basically a NAK (single clock pulse, data high).
*/
-static void sht15_end_transmission(struct sht15_data *data)
+static int sht15_end_transmission(struct sht15_data *data)
{
- gpio_direction_output(data->pdata->gpio_data, 1);
+ int err;
+
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
+ return 0;
}
/**
@@ -410,17 +434,19 @@ static u8 sht15_read_byte(struct sht15_data *data)
*/
static int sht15_send_status(struct sht15_data *data, u8 status)
{
- int ret;
-
- ret = sht15_send_cmd(data, SHT15_WRITE_STATUS);
- if (ret)
- return ret;
- gpio_direction_output(data->pdata->gpio_data, 1);
+ int err;
+
+ err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
+ if (err)
+ return err;
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
sht15_send_byte(data, status);
- ret = sht15_wait_for_response(data);
- if (ret)
- return ret;
+ err = sht15_wait_for_response(data);
+ if (err)
+ return err;
data->val_status = status;
return 0;
@@ -446,7 +472,7 @@ static int sht15_update_status(struct sht15_data *data)
|| !data->status_valid) {
ret = sht15_send_cmd(data, SHT15_READ_STATUS);
if (ret)
- goto error_ret;
+ goto unlock;
status = sht15_read_byte(data);
if (data->checksumming) {
@@ -458,7 +484,9 @@ static int sht15_update_status(struct sht15_data *data)
== dev_checksum);
}
- sht15_end_transmission(data);
+ ret = sht15_end_transmission(data);
+ if (ret)
+ goto unlock;
/*
* Perform checksum validation on the received data.
@@ -469,27 +497,27 @@ static int sht15_update_status(struct sht15_data *data)
previous_config = data->val_status & 0x07;
ret = sht15_soft_reset(data);
if (ret)
- goto error_ret;
+ goto unlock;
if (previous_config) {
ret = sht15_send_status(data, previous_config);
if (ret) {
dev_err(data->dev,
"CRC validation failed, unable "
"to restore device settings\n");
- goto error_ret;
+ goto unlock;
}
}
ret = -EAGAIN;
- goto error_ret;
+ goto unlock;
}
data->val_status = status;
data->status_valid = true;
data->last_status = jiffies;
}
-error_ret:
- mutex_unlock(&data->read_lock);
+unlock:
+ mutex_unlock(&data->read_lock);
return ret;
}
@@ -511,7 +539,9 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- gpio_direction_input(data->pdata->gpio_data);
+ ret = gpio_direction_input(data->pdata->gpio_data);
+ if (ret)
+ return ret;
atomic_set(&data->interrupt_handled, 0);
enable_irq(gpio_to_irq(data->pdata->gpio_data));
@@ -524,9 +554,14 @@ static int sht15_measurement(struct sht15_data *data,
ret = wait_event_timeout(data->wait_queue,
(data->state == SHT15_READING_NOTHING),
msecs_to_jiffies(timeout_msecs));
- if (ret == 0) {/* timeout occurred */
+ if (data->state != SHT15_READING_NOTHING) { /* I/O error occurred */
+ data->state = SHT15_READING_NOTHING;
+ return -EIO;
+ } else if (ret == 0) { /* timeout occurred */
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
- sht15_connection_reset(data);
+ ret = sht15_connection_reset(data);
+ if (ret)
+ return ret;
return -ETIME;
}
@@ -570,17 +605,17 @@ static int sht15_update_measurements(struct sht15_data *data)
data->state = SHT15_READING_HUMID;
ret = sht15_measurement(data, SHT15_MEASURE_RH, 160);
if (ret)
- goto error_ret;
+ goto unlock;
data->state = SHT15_READING_TEMP;
ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400);
if (ret)
- goto error_ret;
+ goto unlock;
data->measurements_valid = true;
data->last_measurement = jiffies;
}
-error_ret:
- mutex_unlock(&data->read_lock);
+unlock:
+ mutex_unlock(&data->read_lock);
return ret;
}
@@ -598,8 +633,8 @@ static inline int sht15_calc_temp(struct sht15_data *data)
for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
/* Find pointer to interpolate */
- if (data->supply_uV > temppoints[i - 1].vdd) {
- d1 = (data->supply_uV - temppoints[i - 1].vdd)
+ if (data->supply_uv > temppoints[i - 1].vdd) {
+ d1 = (data->supply_uv - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
@@ -818,7 +853,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
/* Read the data back from the device */
val = sht15_read_byte(data);
val <<= 8;
- sht15_ack(data);
+ if (sht15_ack(data))
+ goto wakeup;
val |= sht15_read_byte(data);
if (data->checksumming) {
@@ -826,7 +862,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
* Ask the device for a checksum and read it back.
* Note: the device sends the checksum byte reversed.
*/
- sht15_ack(data);
+ if (sht15_ack(data))
+ goto wakeup;
dev_checksum = sht15_reverse(sht15_read_byte(data));
checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
@@ -837,7 +874,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
}
/* Tell the device we are done */
- sht15_end_transmission(data);
+ if (sht15_end_transmission(data))
+ goto wakeup;
switch (data->state) {
case SHT15_READING_TEMP:
@@ -851,6 +889,7 @@ static void sht15_bh_read_data(struct work_struct *work_s)
}
data->state = SHT15_READING_NOTHING;
+wakeup:
wake_up(&data->wait_queue);
}
@@ -859,7 +898,7 @@ static void sht15_update_voltage(struct work_struct *work_s)
struct sht15_data *data
= container_of(work_s, struct sht15_data,
update_supply_work);
- data->supply_uV = regulator_get_voltage(data->reg);
+ data->supply_uv = regulator_get_voltage(data->reg);
}
/**
@@ -878,7 +917,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
struct sht15_data *data = container_of(nb, struct sht15_data, nb);
if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
- data->supply_uV_valid = false;
+ data->supply_uv_valid = false;
schedule_work(&data->update_supply_work);
return NOTIFY_OK;
@@ -906,7 +945,7 @@ static int sht15_probe(struct platform_device *pdev)
return -EINVAL;
}
data->pdata = pdev->dev.platform_data;
- data->supply_uV = data->pdata->supply_mv * 1000;
+ data->supply_uv = data->pdata->supply_mv * 1000;
if (data->pdata->checksum)
data->checksumming = true;
if (data->pdata->no_otp_reload)
@@ -924,7 +963,7 @@ static int sht15_probe(struct platform_device *pdev)
voltage = regulator_get_voltage(data->reg);
if (voltage)
- data->supply_uV = voltage;
+ data->supply_uv = voltage;
regulator_enable(data->reg);
/*
@@ -942,17 +981,17 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_sck, "SHT15 sck");
+ ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
+ GPIOF_OUT_INIT_LOW, "SHT15 sck");
if (ret) {
- dev_err(&pdev->dev, "gpio request failed\n");
+ dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
- gpio_direction_output(data->pdata->gpio_sck, 0);
ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
"SHT15 data");
if (ret) {
- dev_err(&pdev->dev, "gpio request failed\n");
+ dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
@@ -966,7 +1005,9 @@ static int sht15_probe(struct platform_device *pdev)
goto err_release_reg;
}
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
- sht15_connection_reset(data);
+ ret = sht15_connection_reset(data);
+ if (ret)
+ goto err_release_reg;
ret = sht15_soft_reset(data);
if (ret)
goto err_release_reg;
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 06ce3c911db9..c35847a1a0a3 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -132,7 +132,7 @@ static struct platform_device *pdev;
*/
static inline u8 IN_TO_REG(unsigned long val)
{
- unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+ unsigned long nval = clamp_val(val, 0, 4080);
return (nval + 8) / 16;
}
#define IN_FROM_REG(val) ((val) * 16)
@@ -141,7 +141,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline int FAN_FROM_REG(u8 val, int div)
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
}
static inline s8 TEMP_TO_REG(int val)
{
- int nval = SENSORS_LIMIT(val, -54120, 157530) ;
+ int nval = clamp_val(val, -54120, 157530) ;
return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
}
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index dba0c567e7a1..6d8255ccf07a 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -326,7 +326,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
/* Preserve fan min */
tmp = 192 - (old_div * (192 - data->fan_preload[nr])
+ new_div / 2) / new_div;
- data->fan_preload[nr] = SENSORS_LIMIT(tmp, 0, 191);
+ data->fan_preload[nr] = clamp_val(tmp, 0, 191);
smsc47m1_write_value(data, SMSC47M1_REG_FAN_PRELOAD[nr],
data->fan_preload[nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 36a3478d0799..efee4c59239f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,7 +77,7 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+ return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
}
/*
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
- return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000);
+ return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
}
static inline int TEMP_FROM_REG(s8 val)
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 3c2c48d904e6..4b59eb53b18a 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -134,7 +134,7 @@ static ssize_t set_analog_out(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->analog_out = SENSORS_LIMIT(tmp, 0, 255);
+ data->analog_out = clamp_val(tmp, 0, 255);
i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT,
data->analog_out);
@@ -187,7 +187,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+ data->temp_min[nr] = clamp_val(val / 1000, -128, 127);
i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr],
data->temp_min[nr]);
mutex_unlock(&data->update_lock);
@@ -216,7 +216,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+ data->temp_max[nr] = clamp_val(val / 1000, -128, 127);
i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr],
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index b10c3d36ccbc..523dd89ba498 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -115,7 +115,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
- val = SENSORS_LIMIT(val, -256000, 255000);
+ val = clamp_val(val, -256000, 255000);
mutex_lock(&tmp102->lock);
tmp102->temp[sda->index] = val;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index e62054875164..c85f6967ccc3 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -142,10 +142,10 @@ static int tmp401_register_to_temp(u16 reg, u8 config)
static u16 tmp401_temp_to_register(long temp, u8 config)
{
if (config & TMP401_CONFIG_RANGE) {
- temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp = clamp_val(temp, -64000, 191000);
temp += 64000;
} else
- temp = SENSORS_LIMIT(temp, 0, 127000);
+ temp = clamp_val(temp, 0, 127000);
return (temp * 160 + 312) / 625;
}
@@ -163,10 +163,10 @@ static int tmp401_crit_register_to_temp(u8 reg, u8 config)
static u8 tmp401_crit_temp_to_register(long temp, u8 config)
{
if (config & TMP401_CONFIG_RANGE) {
- temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp = clamp_val(temp, -64000, 191000);
temp += 64000;
} else
- temp = SENSORS_LIMIT(temp, 0, 127000);
+ temp = clamp_val(temp, 0, 127000);
return (temp + 500) / 1000;
}
@@ -417,14 +417,14 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
return -EINVAL;
if (data->config & TMP401_CONFIG_RANGE)
- val = SENSORS_LIMIT(val, -64000, 191000);
+ val = clamp_val(val, -64000, 191000);
else
- val = SENSORS_LIMIT(val, 0, 127000);
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
temp = tmp401_crit_register_to_temp(data->temp_crit[index],
data->config);
- val = SENSORS_LIMIT(val, temp - 255000, temp);
+ val = clamp_val(val, temp - 255000, temp);
reg = ((temp - val) + 500) / 1000;
i2c_smbus_write_byte_data(to_i2c_client(dev),
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 149d44a7c584..6c6d440bb2dd 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -130,7 +130,7 @@ static int twl4030_madc_hwmon_remove(struct platform_device *pdev)
static struct platform_driver twl4030_madc_hwmon_driver = {
.probe = twl4030_madc_hwmon_probe,
- .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .remove = twl4030_madc_hwmon_remove,
.driver = {
.name = "twl4030_madc_hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 59fd1268e58a..d867e6bb2be1 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -19,6 +19,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/vexpress.h>
@@ -196,7 +197,7 @@ error:
return err;
}
-static int __devexit vexpress_hwmon_remove(struct platform_device *pdev)
+static int vexpress_hwmon_remove(struct platform_device *pdev)
{
struct vexpress_hwmon_data *data = platform_get_drvdata(pdev);
const struct of_device_id *match;
@@ -213,7 +214,7 @@ static int __devexit vexpress_hwmon_remove(struct platform_device *pdev)
static struct platform_driver vexpress_hwmon_driver = {
.probe = vexpress_hwmon_probe,
- .remove = __devexit_p(vexpress_hwmon_remove),
+ .remove = vexpress_hwmon_remove,
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index e0e14a9f1658..3123b30208c5 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -135,17 +135,14 @@ static inline u8 IN_TO_REG(long val, int inNum)
* for the constants.
*/
if (inNum <= 1)
- return (u8)
- SENSORS_LIMIT((val * 21024 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 21024 - 1205000) / 250000, 0, 255);
else if (inNum == 2)
- return (u8)
- SENSORS_LIMIT((val * 15737 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 15737 - 1205000) / 250000, 0, 255);
else if (inNum == 3)
- return (u8)
- SENSORS_LIMIT((val * 10108 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 10108 - 1205000) / 250000, 0, 255);
else
- return (u8)
- SENSORS_LIMIT((val * 41714 - 12050000) / 2500000, 0, 255);
+ return (u8) clamp_val((val * 41714 - 12050000) / 2500000, 0,
+ 255);
}
static inline long IN_FROM_REG(u8 val, int inNum)
@@ -175,8 +172,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 0;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (val) == 255 ? 0 : 1350000 / \
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 751703059fae..dcc62f80f67b 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -158,7 +158,7 @@ struct vt1211_data {
#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \
(((reg) - 3) * 15882 + 479) / 958 : \
(((reg) - 3) * 10000 + 479) / 958)
-#define IN_TO_REG(ix, val) (SENSORS_LIMIT((ix) == 5 ? \
+#define IN_TO_REG(ix, val) (clamp_val((ix) == 5 ? \
((val) * 958 + 7941) / 15882 + 3 : \
((val) * 958 + 5000) / 10000 + 3, 0, 255))
@@ -173,7 +173,7 @@ struct vt1211_data {
(ix) == 1 ? (reg) < 51 ? 0 : \
((reg) - 51) * 1000 : \
((253 - (reg)) * 2200 + 105) / 210)
-#define TEMP_TO_REG(ix, val) SENSORS_LIMIT( \
+#define TEMP_TO_REG(ix, val) clamp_val( \
((ix) == 0 ? ((val) + 500) / 1000 : \
(ix) == 1 ? ((val) + 500) / 1000 + 51 : \
253 - ((val) * 210 + 1100) / 2200), 0, 255)
@@ -183,7 +183,7 @@ struct vt1211_data {
#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
1310720 / (reg) / DIV_FROM_REG(div))
#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \
- SENSORS_LIMIT((1310720 / (val) / \
+ clamp_val((1310720 / (val) / \
DIV_FROM_REG(div)), 1, 254))
/* ---------------------------------------------------------------------
@@ -687,7 +687,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
data->fan_ctl));
break;
case SHOW_SET_PWM_FREQ:
- val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000);
+ val = 135000 / clamp_val(val, 135000 >> 7, 135000);
/* calculate tmp = log2(val) */
tmp = 0;
for (val >>= 1; val > 0; val >>= 1)
@@ -845,7 +845,7 @@ static ssize_t set_pwm_auto_point_pwm(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->pwm_auto_pwm[ix][ap] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm_auto_pwm[ix][ap] = clamp_val(val, 0, 255);
vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap),
data->pwm_auto_pwm[ix][ap]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a56355cef184..988a2a796764 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -147,7 +147,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 0;
- return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+ return clamp_val(1310720 / (rpm * div), 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
@@ -236,7 +236,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ data->in_min[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -256,7 +256,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ data->in_max[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -302,8 +302,8 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
- 0, 255);
+ data->in_min[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
mutex_unlock(&data->update_lock);
return count;
@@ -321,8 +321,8 @@ static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
- 0, 255);
+ data->in_max[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
mutex_unlock(&data->update_lock);
return count;
@@ -380,7 +380,7 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ data->temp_max[0] = clamp_val((val + 500) / 1000, 0, 255);
vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
mutex_unlock(&data->update_lock);
return count;
@@ -397,7 +397,7 @@ static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ data->temp_min[0] = clamp_val((val + 500) / 1000, 0, 255);
vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
mutex_unlock(&data->update_lock);
return count;
@@ -444,7 +444,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ data->temp_max[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -463,7 +463,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ data->temp_min[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 55ac41c05561..0a89211c25f6 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
* w83627ehf - Driver for the hardware monitoring functionality of
* the Winbond W83627EHF Super-I/O chip
- * Copyright (C) 2005-2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -354,8 +354,8 @@ static inline unsigned int step_time_from_reg(u8 reg, u8 mode)
static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
{
- return SENSORS_LIMIT((mode ? (msec + 50) / 100 :
- (msec + 200) / 400), 1, 255);
+ return clamp_val((mode ? (msec + 50) / 100 : (msec + 200) / 400),
+ 1, 255);
}
static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
@@ -414,8 +414,7 @@ static inline long in_from_reg(u8 reg, u8 nr, const u16 *scale_in)
static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in)
{
- return SENSORS_LIMIT(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0,
- 255);
+ return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
}
/*
@@ -502,6 +501,13 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 vbat;
+ u8 fandiv1;
+ u8 fandiv2;
+#endif
};
struct w83627ehf_sio_data {
@@ -898,6 +904,8 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->temp_max_hyst[i]
= w83627ehf_read_temp(data,
data->reg_temp_hyst[i]);
+ if (i > 2)
+ continue;
if (data->have_temp_offset & (1 << i))
data->temp_offset[i]
= w83627ehf_read_value(data,
@@ -1258,7 +1266,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
@@ -1426,7 +1434,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
@@ -1505,7 +1513,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
@@ -1531,7 +1539,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
return err;
/* Limit the temp to 0C - 15C */
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
@@ -1630,7 +1638,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
- val = SENSORS_LIMIT(val, 1, 255); \
+ val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, data->REG_##REG[nr], val); \
@@ -2608,10 +2616,98 @@ static int w83627ehf_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int w83627ehf_suspend(struct device *dev)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ mutex_lock(&data->update_lock);
+ data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
+ if (sio_data->kind == nct6775) {
+ data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627ehf_resume(struct device *dev)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ int i;
+
+ mutex_lock(&data->update_lock);
+ data->bank = 0xff; /* Force initial bank selection */
+
+ /* Restore limits */
+ for (i = 0; i < data->in_num; i++) {
+ if ((i == 6) && data->in6_skip)
+ continue;
+
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MIN(i),
+ data->in_min[i]);
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MAX(i),
+ data->in_max[i]);
+ }
+
+ for (i = 0; i < 5; i++) {
+ if (!(data->has_fan_min & (1 << i)))
+ continue;
+
+ w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ data->fan_min[i]);
+ }
+
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+
+ if (data->reg_temp_over[i])
+ w83627ehf_write_temp(data, data->reg_temp_over[i],
+ data->temp_max[i]);
+ if (data->reg_temp_hyst[i])
+ w83627ehf_write_temp(data, data->reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ if (i > 2)
+ continue;
+ if (data->have_temp_offset & (1 << i))
+ w83627ehf_write_value(data,
+ W83627EHF_REG_TEMP_OFFSET[i],
+ data->temp_offset[i]);
+ }
+
+ /* Restore other settings */
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
+ if (sio_data->kind == nct6775) {
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
+ }
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
+ .suspend = w83627ehf_suspend,
+ .resume = w83627ehf_resume,
+};
+
+#define W83627EHF_DEV_PM_OPS (&w83627ehf_dev_pm_ops)
+#else
+#define W83627EHF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627ehf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627EHF_DEV_PM_OPS,
},
.probe = w83627ehf_probe,
.remove = w83627ehf_remove,
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 7f68b8309d10..3b9ef2d23452 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
* Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012 Jean Delvare <khali@linux-fr.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -254,16 +254,15 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
* these macros are called: arguments may be evaluated more than once.
* Fixing this is just not worth it.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255))
+#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
- 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define TEMP_MIN (-128000)
@@ -275,9 +274,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
*/
static u8 TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX);
- ntemp += (ntemp<0 ? -500 : 500);
- return (u8)(ntemp / 1000);
+ int ntemp = clamp_val(temp, TEMP_MIN, TEMP_MAX);
+ ntemp += (ntemp < 0 ? -500 : 500);
+ return (u8)(ntemp / 1000);
}
static int TEMP_FROM_REG(u8 reg)
@@ -287,7 +286,7 @@ static int TEMP_FROM_REG(u8 reg)
#define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div)))
-#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
+#define PWM_TO_REG(val) (clamp_val((val), 0, 255))
static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
{
@@ -342,7 +341,7 @@ static inline u8 pwm_freq_to_reg(unsigned long val)
static inline u8 DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -389,6 +388,12 @@ struct w83627hf_data {
*/
u8 vrm;
u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 scfg1;
+ u8 scfg2;
+#endif
};
@@ -401,10 +406,77 @@ static void w83627hf_update_fan_div(struct w83627hf_data *data);
static struct w83627hf_data *w83627hf_update_device(struct device *dev);
static void w83627hf_init_device(struct platform_device *pdev);
+#ifdef CONFIG_PM
+static int w83627hf_suspend(struct device *dev)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+
+ mutex_lock(&data->update_lock);
+ data->scfg1 = w83627hf_read_value(data, W83781D_REG_SCFG1);
+ data->scfg2 = w83627hf_read_value(data, W83781D_REG_SCFG2);
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627hf_resume(struct device *dev)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ int i, num_temps = (data->type == w83697hf) ? 2 : 3;
+
+ /* Restore limits */
+ mutex_lock(&data->update_lock);
+ for (i = 0; i <= 8; i++) {
+ /* skip missing sensors */
+ if (((data->type == w83697hf) && (i == 1)) ||
+ ((data->type != w83627hf && data->type != w83697hf)
+ && (i == 5 || i == 6)))
+ continue;
+ w83627hf_write_value(data, W83781D_REG_IN_MAX(i),
+ data->in_max[i]);
+ w83627hf_write_value(data, W83781D_REG_IN_MIN(i),
+ data->in_min[i]);
+ }
+ for (i = 0; i <= 2; i++)
+ w83627hf_write_value(data, W83627HF_REG_FAN_MIN(i),
+ data->fan_min[i]);
+ for (i = 0; i < num_temps; i++) {
+ w83627hf_write_value(data, w83627hf_reg_temp_over[i],
+ data->temp_max[i]);
+ w83627hf_write_value(data, w83627hf_reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ }
+
+ /* Fixup BIOS bugs */
+ if (data->type == w83627thf || data->type == w83637hf ||
+ data->type == w83687thf)
+ w83627hf_write_value(data, W83627THF_REG_VRM_OVT_CFG,
+ data->vrm_ovt);
+ w83627hf_write_value(data, W83781D_REG_SCFG1, data->scfg1);
+ w83627hf_write_value(data, W83781D_REG_SCFG2, data->scfg2);
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627hf_dev_pm_ops = {
+ .suspend = w83627hf_suspend,
+ .resume = w83627hf_resume,
+};
+
+#define W83627HF_DEV_PM_OPS (&w83627hf_dev_pm_ops)
+#else
+#define W83627HF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627hf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627HF_DEV_PM_OPS,
},
.probe = w83627hf_probe,
.remove = w83627hf_remove,
@@ -541,8 +613,7 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
/* use VRM9 calculation */
data->in_min[0] =
- SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
- 255);
+ clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
else
/* use VRM8 (standard) calculation */
data->in_min[0] = IN_TO_REG(val);
@@ -571,8 +642,7 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
/* use VRM9 calculation */
data->in_max[0] =
- SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
- 255);
+ clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
else
/* use VRM8 (standard) calculation */
data->in_max[0] = IN_TO_REG(val);
@@ -1659,8 +1729,10 @@ static void w83627hf_init_device(struct platform_device *pdev)
/* Minimize conflicts with other winbond i2c-only clients... */
/* disable i2c subclients... how to disable main i2c client?? */
/* force i2c address to relatively uncommon address */
- w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
- w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ if (type == w83627hf) {
+ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
+ w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ }
/* Read VID only once */
if (type == w83627hf || type == w83637hf) {
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 93bd28639595..aeec5b1d81c9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -159,7 +159,7 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
#define W83781D_DEFAULT_BETA 3435
/* Conversions */
-#define IN_TO_REG(val) SENSORS_LIMIT(((val) + 8) / 16, 0, 255)
+#define IN_TO_REG(val) clamp_val(((val) + 8) / 16, 0, 255)
#define IN_FROM_REG(val) ((val) * 16)
static inline u8
@@ -167,8 +167,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline long
@@ -181,7 +181,7 @@ FAN_FROM_REG(u8 val, int div)
return 1350000 / (val * div);
}
-#define TEMP_TO_REG(val) SENSORS_LIMIT((val) / 1000, -127, 128)
+#define TEMP_TO_REG(val) clamp_val((val) / 1000, -127, 128)
#define TEMP_FROM_REG(val) ((val) * 1000)
#define BEEP_MASK_FROM_REG(val, type) ((type) == as99127f ? \
@@ -195,9 +195,8 @@ static inline u8
DIV_TO_REG(long val, enum chips type)
{
int i;
- val = SENSORS_LIMIT(val, 1,
- ((type == w83781d
- || type == as99127f) ? 8 : 128)) >> 1;
+ val = clamp_val(val, 1,
+ ((type == w83781d || type == as99127f) ? 8 : 128)) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -443,7 +442,7 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- data->vrm = SENSORS_LIMIT(val, 0, 255);
+ data->vrm = clamp_val(val, 0, 255);
return count;
}
@@ -730,7 +729,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
return err;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
w83781d_write_value(data, W83781D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index ed397c645198..38dddddf8875 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -220,15 +220,15 @@ static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
* in mV as would be measured on the chip input pin, need to just
* multiply/divide by 16 to translate from/to register values.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255))
+#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static u8 fan_to_reg(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -273,7 +273,7 @@ static u8 div_to_reg(int nr, long val)
int i;
/* fan divisors max out at 128 */
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -747,7 +747,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 301942d08453..5cb83ddf2cc6 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -235,8 +235,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -244,16 +244,15 @@ FAN_TO_REG(long rpm, int div)
1350000 / ((val) * (div))))
/* for temp1 */
-#define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
- : (val)) / 1000, 0, 0xff))
+#define TEMP1_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+ : (val)) / 1000, 0, 0xff))
#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
/* for temp2 and temp3, because they need additional resolution */
#define TEMP_ADD_FROM_REG(val1, val2) \
((((val1) & 0x80 ? (val1)-0x100 \
: (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
#define TEMP_ADD_TO_REG_HIGH(val) \
- (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
- : (val)) / 1000, 0, 0xff))
+ (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 : (val)) / 1000, 0, 0xff))
#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00)
#define DIV_FROM_REG(val) (1 << (val))
@@ -262,7 +261,7 @@ static inline u8
DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -397,7 +396,7 @@ static ssize_t store_in_##reg(struct device *dev, \
if (err) \
return err; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \
+ data->in_##reg[nr] = clamp_val(IN_TO_REG(nr, val) / 4, 0, 255); \
w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \
data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
@@ -645,7 +644,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255) >> 4;
+ val = clamp_val(val, 0, 255) >> 4;
mutex_lock(&data->update_lock);
val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0;
@@ -799,7 +798,7 @@ store_thermal_cruise(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
target_mask = w83792d_read_value(client,
W83792D_REG_THERMAL[nr]) & 0x80;
- data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255);
+ data->thermal_cruise[nr] = clamp_val(target_tmp, 0, 255);
w83792d_write_value(client, W83792D_REG_THERMAL[nr],
(data->thermal_cruise[nr]) | target_mask);
mutex_unlock(&data->update_lock);
@@ -837,7 +836,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
tol_mask = w83792d_read_value(client,
W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
- tol_tmp = SENSORS_LIMIT(val, 0, 15);
+ tol_tmp = clamp_val(val, 0, 15);
tol_tmp &= 0x0f;
data->tolerance[nr] = tol_tmp;
if (nr == 1)
@@ -881,7 +880,7 @@ store_sf2_point(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127);
+ data->sf2_points[index][nr] = clamp_val(val, 0, 127);
mask_tmp = w83792d_read_value(client,
W83792D_REG_POINTS[index][nr]) & 0x80;
w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
@@ -923,7 +922,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15);
+ data->sf2_levels[index][nr] = clamp_val((val * 15) / 100, 0, 15);
mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
& ((nr == 3) ? 0xf0 : 0x0f);
if (nr == 3)
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 99799fd1d917..660427520c53 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -191,7 +191,7 @@ static inline u16 FAN_TO_REG(long rpm)
{
if (rpm <= 0)
return 0x0fff;
- return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+ return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long TIME_FROM_REG(u8 reg)
@@ -201,7 +201,7 @@ static inline unsigned long TIME_FROM_REG(u8 reg)
static inline u8 TIME_TO_REG(unsigned long val)
{
- return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+ return clamp_val((val + 50) / 100, 0, 0xff);
}
static inline long TEMP_FROM_REG(s8 reg)
@@ -211,7 +211,7 @@ static inline long TEMP_FROM_REG(s8 reg)
static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
{
- return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
+ return clamp_val((val + (val < 0 ? -500 : 500)) / 1000, min, max);
}
struct w83793_data {
@@ -558,7 +558,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
val);
} else {
- val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ val = clamp_val(val, 0, 0xff) >> 2;
data->pwm[index][nr] =
w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
data->pwm[index][nr] |= val;
@@ -739,7 +739,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
if (nr == SETUP_PWM_DEFAULT) {
data->pwm_default =
w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
- data->pwm_default |= SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ data->pwm_default |= clamp_val(val, 0, 0xff) >> 2;
w83793_write_value(client, W83793_REG_PWM_DEFAULT,
data->pwm_default);
} else if (nr == SETUP_PWM_UPTIME) {
@@ -838,7 +838,7 @@ store_sf_ctrl(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
if (nr == TEMP_FAN_MAP) {
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
data->temp_fan_map[index] = val;
} else if (nr == TEMP_PWM_ENABLE) {
@@ -907,7 +907,7 @@ store_sf2_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ val = clamp_val(val, 0, 0xff) >> 2;
mutex_lock(&data->update_lock);
data->sf2_pwm[index][nr] =
@@ -1003,9 +1003,9 @@ store_in(struct device *dev, struct device_attribute *attr,
/* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
if (nr == 1 || nr == 2)
val -= scale_in_add[index] / scale_in[index];
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
} else {
- val = SENSORS_LIMIT(val, 0, 0x3FF);
+ val = clamp_val(val, 0, 0x3FF);
data->in_low_bits[nr] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
data->in_low_bits[nr] &= ~(0x03 << (2 * index));
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 55a4f4894531..e226096148eb 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -262,7 +262,7 @@ static inline u16 fan_to_reg(long rpm)
{
if (rpm <= 0)
return 0x0fff;
- return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+ return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long time_from_reg(u8 reg)
@@ -272,7 +272,7 @@ static inline unsigned long time_from_reg(u8 reg)
static inline u8 time_to_reg(unsigned long val)
{
- return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+ return clamp_val((val + 50) / 100, 0, 0xff);
}
static inline long temp_from_reg(s8 reg)
@@ -282,7 +282,7 @@ static inline long temp_from_reg(s8 reg)
static inline s8 temp_to_reg(long val, s8 min, s8 max)
{
- return SENSORS_LIMIT(val / 1000, min, max);
+ return clamp_val(val / 1000, min, max);
}
static const u16 pwm_freq_cksel0[16] = {
@@ -319,7 +319,7 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
/* Best fit for cksel = 1 */
base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
- reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
+ reg1 = clamp_val(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
best1 = base_clock / reg1;
reg1 = 0x80 | (reg1 - 1);
@@ -889,7 +889,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
val = pwm_freq_to_reg(val, data->clkin);
break;
default:
- val = SENSORS_LIMIT(val, 0, 0xff);
+ val = clamp_val(val, 0, 0xff);
break;
}
w83795_write(client, W83795_REG_PWM(index, nr), val);
@@ -1126,7 +1126,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
break;
case TEMP_PWM_FAN_MAP:
mutex_lock(&data->update_lock);
- tmp = SENSORS_LIMIT(tmp, 0, 0xff);
+ tmp = clamp_val(tmp, 0, 0xff);
w83795_write(client, W83795_REG_TFMR(index), tmp);
data->pwm_tfmr[index] = tmp;
mutex_unlock(&data->update_lock);
@@ -1177,13 +1177,13 @@ store_fanin(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (nr) {
case FANIN_TARGET:
- val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff));
+ val = fan_to_reg(clamp_val(val, 0, 0xfff));
w83795_write(client, W83795_REG_FTSH(index), val >> 4);
w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
data->target_speed[index] = val;
break;
case FANIN_TOL:
- val = SENSORS_LIMIT(val, 0, 0x3f);
+ val = clamp_val(val, 0, 0x3f);
w83795_write(client, W83795_REG_TFTS, val);
data->tol_speed = val;
break;
@@ -1227,22 +1227,22 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (nr) {
case TEMP_PWM_TTTI:
- val = SENSORS_LIMIT(val, 0, 0x7f);
+ val = clamp_val(val, 0, 0x7f);
w83795_write(client, W83795_REG_TTTI(index), val);
break;
case TEMP_PWM_CTFS:
- val = SENSORS_LIMIT(val, 0, 0x7f);
+ val = clamp_val(val, 0, 0x7f);
w83795_write(client, W83795_REG_CTFS(index), val);
break;
case TEMP_PWM_HCT:
- val = SENSORS_LIMIT(val, 0, 0x0f);
+ val = clamp_val(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0x0f;
tmp |= (val << 4) & 0xf0;
w83795_write(client, W83795_REG_HT(index), tmp);
break;
case TEMP_PWM_HOT:
- val = SENSORS_LIMIT(val, 0, 0x0f);
+ val = clamp_val(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0xf0;
tmp |= val & 0x0f;
@@ -1541,7 +1541,7 @@ store_in(struct device *dev, struct device_attribute *attr,
if ((index >= 17) &&
!((data->has_gain >> (index - 17)) & 1))
val /= 8;
- val = SENSORS_LIMIT(val, 0, 0x3FF);
+ val = clamp_val(val, 0, 0x3FF);
mutex_lock(&data->update_lock);
lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
@@ -1596,7 +1596,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
switch (nr) {
case SETUP_PWM_DEFAULT:
- val = SENSORS_LIMIT(val, 0, 0xff);
+ val = clamp_val(val, 0, 0xff);
break;
case SETUP_PWM_UPTIME:
case SETUP_PWM_DOWNTIME:
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 79710bcac2f7..edb06cda5a68 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -86,8 +86,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -95,9 +95,8 @@ FAN_TO_REG(long rpm, int div)
1350000 / ((val) * (div))))
/* for temp */
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? \
- (val) + 0x100 * 1000 \
- : (val)) / 1000, 0, 0xff))
+#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+ : (val)) / 1000, 0, 0xff))
#define TEMP_FROM_REG(val) (((val) & 0x80 ? \
(val) - 0x100 : (val)) * 1000)
@@ -106,7 +105,7 @@ FAN_TO_REG(long rpm, int div)
* in mV as would be measured on the chip input pin, need to just
* multiply/divide by 8 to translate from/to register values.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 4) / 8), 0, 255))
+#define IN_TO_REG(val) (clamp_val((((val) + 4) / 8), 0, 255))
#define IN_FROM_REG(val) ((val) * 8)
#define DIV_FROM_REG(val) (1 << (val))
@@ -115,7 +114,7 @@ static inline u8
DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -481,7 +480,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
@@ -564,7 +563,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
tol_mask = w83l786ng_read_value(client,
W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0);
- tol_tmp = SENSORS_LIMIT(val, 0, 15);
+ tol_tmp = clamp_val(val, 0, 15);
tol_tmp &= 0x0f;
data->tolerance[nr] = tol_tmp;
if (nr == 1)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 2f8c76becc6b..46cde098c11c 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -89,7 +89,7 @@ source drivers/i2c/busses/Kconfig
config I2C_STUB
tristate "I2C/SMBus Test Stub"
- depends on EXPERIMENTAL && m
+ depends on m
default 'n'
help
This module may be useful to developers of SMBus client drivers,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e9df4612b7eb..8bb810e1900b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -22,7 +22,7 @@ config I2C_ALI1535
config I2C_ALI1563
tristate "ALI 1563"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
If you say yes to this option, support will be included for the SMB
Host controller on Acer Labs Inc. (ALI) M1563 South Bridges. The SMB
@@ -56,7 +56,7 @@ config I2C_AMD756
config I2C_AMD756_S4882
tristate "SMBus multiplexing on the Tyan S4882"
- depends on I2C_AMD756 && X86 && EXPERIMENTAL
+ depends on I2C_AMD756 && X86
help
Enabling this option will add specific SMBus support for the Tyan
S4882 motherboard. On this 4-CPU board, the SMBus is multiplexed
@@ -164,7 +164,7 @@ config I2C_NFORCE2
config I2C_NFORCE2_S4985
tristate "SMBus multiplexing on the Tyan S4985"
- depends on I2C_NFORCE2 && X86 && EXPERIMENTAL
+ depends on I2C_NFORCE2 && X86
help
Enabling this option will add specific SMBus support for the Tyan
S4985 motherboard. On this 4-CPU board, the SMBus is multiplexed
@@ -215,7 +215,7 @@ config I2C_SIS96X
config I2C_VIA
tristate "VIA VT82C586B"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select I2C_ALGOBIT
help
If you say yes to this option, support will be included for the VIA
@@ -267,7 +267,7 @@ comment "Mac SMBus host controller drivers"
config I2C_HYDRA
tristate "CHRP Apple Hydra Mac I/O I2C interface"
- depends on PCI && PPC_CHRP && EXPERIMENTAL
+ depends on PCI && PPC_CHRP
select I2C_ALGOBIT
help
This supports the use of the I2C interface in the Apple Hydra Mac
@@ -293,7 +293,7 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)"
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91 && EXPERIMENTAL
+ depends on ARCH_AT91
help
This supports the use of the I2C interface on Atmel AT91
processors.
@@ -337,6 +337,16 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
help
The unit of the TWI clock is kHz.
+config I2C_CBUS_GPIO
+ tristate "CBUS I2C driver"
+ depends on GENERIC_GPIO
+ help
+ Support for CBUS access using I2C API. Mostly relevant for Nokia
+ Internet Tablets (770, N800 and N810).
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-cbus-gpio.
+
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
depends on (CPM1 || CPM2) && OF_I2C
@@ -509,7 +519,6 @@ config I2C_NUC900
config I2C_OCORES
tristate "OpenCores I2C Controller"
- depends on EXPERIMENTAL
help
If you say yes to this option, support will be included for the
OpenCores I2C controller. For details see
@@ -702,7 +711,7 @@ config I2C_OCTEON
config I2C_XILINX
tristate "Xilinx I2C Controller"
- depends on EXPERIMENTAL && HAS_IOMEM
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for the
Xilinx I2C controller.
@@ -793,7 +802,7 @@ config I2C_PARPORT_LIGHT
config I2C_TAOS_EVM
tristate "TAOS evaluation module"
- depends on EXPERIMENTAL
+ depends on TTY
select SERIO
select SERIO_SERPORT
default n
@@ -818,6 +827,16 @@ config I2C_TINY_USB
This driver can also be built as a module. If so, the module
will be called i2c-tiny-usb.
+config I2C_VIPERBOARD
+ tristate "Viperboard I2C master support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the I2C part of the Nano River
+ Technologies Viperboard as I2C master.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
comment "Other I2C/SMBus bus drivers"
config I2C_ACORN
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 395b516ffa08..6181f3ff263f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
+obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
# Other I2C/SMBus bus drivers
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 125cd8e0ad25..3f491815e2c4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -139,7 +139,7 @@ static unsigned short ali1535_offset;
Note the differences between kernels with the old PCI BIOS interface and
newer kernels with the real PCI interface. In compat.h some things are
defined to make the transition easier. */
-static int __devinit ali1535_setup(struct pci_dev *dev)
+static int ali1535_setup(struct pci_dev *dev)
{
int retval;
unsigned char temp;
@@ -502,7 +502,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = {
MODULE_DEVICE_TABLE(pci, ali1535_ids);
-static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
@@ -518,7 +518,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali1535_adapter);
}
-static void __devexit ali1535_remove(struct pci_dev *dev)
+static void ali1535_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1535_adapter);
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
@@ -528,7 +528,7 @@ static struct pci_driver ali1535_driver = {
.name = "ali1535_smbus",
.id_table = ali1535_ids,
.probe = ali1535_probe,
- .remove = __devexit_p(ali1535_remove),
+ .remove = ali1535_remove,
};
module_pci_driver(ali1535_driver);
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index e02d9f86c6a0..84ccd9496a5e 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -326,7 +326,7 @@ static u32 ali1563_func(struct i2c_adapter * a)
}
-static int __devinit ali1563_setup(struct pci_dev * dev)
+static int ali1563_setup(struct pci_dev *dev)
{
u16 ctrl;
@@ -390,8 +390,8 @@ static struct i2c_adapter ali1563_adapter = {
.algo = &ali1563_algorithm,
};
-static int __devinit ali1563_probe(struct pci_dev * dev,
- const struct pci_device_id * id_table)
+static int ali1563_probe(struct pci_dev *dev,
+ const struct pci_device_id *id_table)
{
int error;
@@ -411,7 +411,7 @@ exit:
return error;
}
-static void __devexit ali1563_remove(struct pci_dev * dev)
+static void ali1563_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1563_adapter);
ali1563_shutdown(dev);
@@ -428,7 +428,7 @@ static struct pci_driver ali1563_pci_driver = {
.name = "ali1563_smbus",
.id_table = ali1563_id_table,
.probe = ali1563_probe,
- .remove = __devexit_p(ali1563_remove),
+ .remove = ali1563_remove,
};
module_pci_driver(ali1563_pci_driver);
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index ce8d26d053a5..26bcc6127cee 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(force_addr,
static struct pci_driver ali15x3_driver;
static unsigned short ali15x3_smba;
-static int __devinit ali15x3_setup(struct pci_dev *ALI15X3_dev)
+static int ali15x3_setup(struct pci_dev *ALI15X3_dev)
{
u16 a;
unsigned char temp;
@@ -484,7 +484,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
MODULE_DEVICE_TABLE (pci, ali15x3_ids);
-static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
@@ -500,7 +500,7 @@ static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali15x3_adapter);
}
-static void __devexit ali15x3_remove(struct pci_dev *dev)
+static void ali15x3_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali15x3_adapter);
release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
@@ -510,7 +510,7 @@ static struct pci_driver ali15x3_driver = {
.name = "ali15x3_smbus",
.id_table = ali15x3_ids,
.probe = ali15x3_probe,
- .remove = __devexit_p(ali15x3_remove),
+ .remove = ali15x3_remove,
};
module_pci_driver(ali15x3_driver);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 304aa03b57b2..e13e2aa2d05d 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -324,8 +324,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
MODULE_DEVICE_TABLE (pci, amd756_ids);
-static int __devinit amd756_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int nforce = (id->driver_data == NFORCE);
int error;
@@ -397,7 +396,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
return error;
}
-static void __devexit amd756_remove(struct pci_dev *dev)
+static void amd756_remove(struct pci_dev *dev)
{
i2c_del_adapter(&amd756_smbus);
release_region(amd756_ioport, SMB_IOSIZE);
@@ -407,7 +406,7 @@ static struct pci_driver amd756_driver = {
.name = "amd756_smbus",
.id_table = amd756_ids,
.probe = amd756_probe,
- .remove = __devexit_p(amd756_remove),
+ .remove = amd756_remove,
};
module_pci_driver(amd756_driver);
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 0919ac1d99aa..a44e6e77c5a1 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -422,8 +422,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
MODULE_DEVICE_TABLE (pci, amd8111_ids);
-static int __devinit amd8111_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct amd_smbus *smbus;
int error;
@@ -475,7 +474,7 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
return error;
}
-static void __devexit amd8111_remove(struct pci_dev *dev)
+static void amd8111_remove(struct pci_dev *dev)
{
struct amd_smbus *smbus = pci_get_drvdata(dev);
@@ -488,7 +487,7 @@ static struct pci_driver amd8111_driver = {
.name = "amd8111_smbus2",
.id_table = amd8111_ids,
.probe = amd8111_probe,
- .remove = __devexit_p(amd8111_remove),
+ .remove = amd8111_remove,
};
module_pci_driver(amd8111_driver);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c02bf208084f..ebc224154695 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -19,6 +19,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -29,9 +31,11 @@
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/platform_data/dma-atmel.h>
#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
+#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -66,24 +70,39 @@
#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
struct at91_twi_pdata {
- unsigned clk_max_div;
- unsigned clk_offset;
- bool has_unre_flag;
+ unsigned clk_max_div;
+ unsigned clk_offset;
+ bool has_unre_flag;
+ bool has_dma_support;
+ struct at_dma_slave dma_slave;
+};
+
+struct at91_twi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sg;
+ struct dma_async_tx_descriptor *data_desc;
+ enum dma_data_direction direction;
+ bool buf_mapped;
+ bool xfer_in_progress;
};
struct at91_twi_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct clk *clk;
- u8 *buf;
- size_t buf_len;
- struct i2c_msg *msg;
- int irq;
- unsigned transfer_status;
- struct i2c_adapter adapter;
- unsigned twi_cwgr_reg;
- struct at91_twi_pdata *pdata;
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct clk *clk;
+ u8 *buf;
+ size_t buf_len;
+ struct i2c_msg *msg;
+ int irq;
+ unsigned imr;
+ unsigned transfer_status;
+ struct i2c_adapter adapter;
+ unsigned twi_cwgr_reg;
+ struct at91_twi_pdata *pdata;
+ bool use_dma;
+ struct at91_twi_dma dma;
};
static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
@@ -102,6 +121,17 @@ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
}
+static void at91_twi_irq_save(struct at91_twi_dev *dev)
+{
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
+ at91_disable_twi_interrupts(dev);
+}
+
+static void at91_twi_irq_restore(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IER, dev->imr);
+}
+
static void at91_init_twi_bus(struct at91_twi_dev *dev)
{
at91_disable_twi_interrupts(dev);
@@ -115,7 +145,7 @@ static void at91_init_twi_bus(struct at91_twi_dev *dev)
* Calculate symmetric clock as stated in datasheet:
* twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
*/
-static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
+static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
{
int ckdiv, cdiv, div;
struct at91_twi_pdata *pdata = dev->pdata;
@@ -138,6 +168,28 @@ static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
}
+static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
+{
+ struct at91_twi_dma *dma = &dev->dma;
+
+ at91_twi_irq_save(dev);
+
+ if (dma->xfer_in_progress) {
+ if (dma->direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_all(dma->chan_rx);
+ else
+ dmaengine_terminate_all(dma->chan_tx);
+ dma->xfer_in_progress = false;
+ }
+ if (dma->buf_mapped) {
+ dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
+ dev->buf_len, dma->direction);
+ dma->buf_mapped = false;
+ }
+
+ at91_twi_irq_restore(dev);
+}
+
static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -154,6 +206,60 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_write_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_MEM_TO_DEV);
+
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+}
+
+static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *txdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_tx = dma->chan_tx;
+
+ if (dev->buf_len <= 0)
+ return;
+
+ dma->direction = DMA_TO_DEVICE;
+
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ sg_dma_len(&dma->sg) = dev->buf_len;
+ sg_dma_address(&dma->sg) = dma_addr;
+
+ txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ txdesc->callback = at91_twi_write_data_dma_callback;
+ txdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(chan_tx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -179,6 +285,61 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_read_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_DEV_TO_MEM);
+
+ /* The last two bytes have to be read without using dma */
+ dev->buf += dev->buf_len - 2;
+ dev->buf_len = 2;
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
+}
+
+static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_rx = dma->chan_rx;
+
+ dma->direction = DMA_FROM_DEVICE;
+
+ /* Keep in mind that we won't use dma to read the last two bytes */
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ dma->sg.dma_address = dma_addr;
+ sg_dma_len(&dma->sg) = dev->buf_len - 2;
+
+ rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ rxdesc->callback = at91_twi_read_data_dma_callback;
+ rxdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dma->chan_rx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
{
struct at91_twi_dev *dev = dev_id;
@@ -229,12 +390,36 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
- at91_twi_write(dev, AT91_TWI_IER,
+ /*
+ * When using dma, the last byte has to be read manually in
+ * order to not send the stop command too late and then
+ * to receive extra data. In practice, there are some issues
+ * if you use the dma to read n-1 bytes because of latency.
+ * Reading n-2 bytes with dma and the two last ones manually
+ * seems to be the best solution.
+ */
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_read_data_dma(dev);
+ /*
+ * It is important to enable TXCOMP irq here because
+ * doing it only when transferring the last two bytes
+ * will mask NACK errors since TXCOMP is set when a
+ * NACK occurs.
+ */
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP);
+ } else
+ at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
} else {
- at91_twi_write_next_byte(dev);
- at91_twi_write(dev, AT91_TWI_IER,
- AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_write_data_dma(dev);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else {
+ at91_twi_write_next_byte(dev);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ }
}
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
@@ -242,23 +427,31 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_NACK) {
dev_dbg(dev->dev, "received nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_OVRE) {
dev_err(dev->dev, "overrun while reading\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
dev_err(dev->dev, "underrun while writing\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
dev_dbg(dev->dev, "transfer complete\n");
return 0;
+
+error:
+ at91_twi_dma_cleanup(dev);
+ return ret;
}
static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
@@ -329,36 +522,42 @@ static struct at91_twi_pdata at91rm9200_config = {
.clk_max_div = 5,
.clk_offset = 3,
.has_unre_flag = true,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9261_config = {
.clk_max_div = 5,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9260_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9x5_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = true,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -405,7 +604,91 @@ MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
#define atmel_twi_dt_ids NULL
#endif
-static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
+{
+ int ret = 0;
+ struct at_dma_slave *sdata;
+ struct dma_slave_config slave_config;
+ struct at91_twi_dma *dma = &dev->dma;
+
+ sdata = &dev->pdata->dma_slave;
+
+ memset(&slave_config, 0, sizeof(slave_config));
+ slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ if (sdata && sdata->dma_dev) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma->chan_tx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_tx) {
+ dev_err(dev->dev, "no DMA channel available for tx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ dma->chan_rx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_rx) {
+ dev_err(dev->dev, "no DMA channel available for rx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure tx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure rx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ sg_init_table(&dma->sg, 1);
+ dma->buf_mapped = false;
+ dma->xfer_in_progress = false;
+
+ dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+ return ret;
+
+error:
+ dev_info(dev->dev, "can't use DMA\n");
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+ return ret;
+}
+
+static struct at91_twi_pdata *at91_twi_get_driver_data(
struct platform_device *pdev)
{
if (pdev->dev.of_node) {
@@ -413,16 +696,17 @@ static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
if (!match)
return NULL;
- return match->data;
+ return (struct at91_twi_pdata *)match->data;
}
return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
}
-static int __devinit at91_twi_probe(struct platform_device *pdev)
+static int at91_twi_probe(struct platform_device *pdev)
{
struct at91_twi_dev *dev;
struct resource *mem;
int rc;
+ u32 phy_addr;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -433,14 +717,15 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -ENODEV;
+ phy_addr = mem->start;
dev->pdata = at91_twi_get_driver_data(pdev);
if (!dev->pdata)
return -ENODEV;
- dev->base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!dev->base)
- return -EBUSY;
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
@@ -462,6 +747,11 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
}
clk_prepare_enable(dev->clk);
+ if (dev->pdata->has_dma_support) {
+ if (at91_twi_configure_dma(dev, phy_addr) == 0)
+ dev->use_dma = true;
+ }
+
at91_calc_twi_clock(dev, TWI_CLK_HZ);
at91_init_twi_bus(dev);
@@ -489,7 +779,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit at91_twi_remove(struct platform_device *pdev)
+static int at91_twi_remove(struct platform_device *pdev)
{
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
int rc;
@@ -530,7 +820,7 @@ static const struct dev_pm_ops at91_twi_pm = {
static struct platform_driver at91_twi_driver = {
.probe = at91_twi_probe,
- .remove = __devexit_p(at91_twi_remove),
+ .remove = at91_twi_remove,
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 582d616db346..b278298787d7 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -313,7 +313,7 @@ static void i2c_au1550_disable(struct i2c_au1550_data *priv)
* Prior to calling us, the 50MHz clock frequency and routing
* must have been set up for the PSC indicated by the adapter.
*/
-static int __devinit
+static int
i2c_au1550_probe(struct platform_device *pdev)
{
struct i2c_au1550_data *priv;
@@ -372,7 +372,7 @@ out:
return ret;
}
-static int __devexit i2c_au1550_remove(struct platform_device *pdev)
+static int i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
@@ -423,7 +423,7 @@ static struct platform_driver au1xpsc_smbus_driver = {
.pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
- .remove = __devexit_p(i2c_au1550_remove),
+ .remove = i2c_au1550_remove,
};
module_platform_driver(au1xpsc_smbus_driver);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
new file mode 100644
index 000000000000..98386d659318
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -0,0 +1,300 @@
+/*
+ * CBUS I2C driver for Nokia Internet Tablets.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and
+ * Felipe Balbi. Converted to I2C driver by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-cbus-gpio.h>
+
+/*
+ * Bit counts are derived from Nokia implementation. These should be checked
+ * if other CBUS implementations appear.
+ */
+#define CBUS_ADDR_BITS 3
+#define CBUS_REG_BITS 5
+
+struct cbus_host {
+ spinlock_t lock; /* host lock */
+ struct device *dev;
+ int clk_gpio;
+ int dat_gpio;
+ int sel_gpio;
+};
+
+/**
+ * cbus_send_bit - sends one bit over the bus
+ * @host: the host we're using
+ * @bit: one bit of information to send
+ */
+static void cbus_send_bit(struct cbus_host *host, unsigned bit)
+{
+ gpio_set_value(host->dat_gpio, bit ? 1 : 0);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+}
+
+/**
+ * cbus_send_data - sends @len amount of data over the bus
+ * @host: the host we're using
+ * @data: the data to send
+ * @len: size of the transfer
+ */
+static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len)
+{
+ int i;
+
+ for (i = len; i > 0; i--)
+ cbus_send_bit(host, data & (1 << (i - 1)));
+}
+
+/**
+ * cbus_receive_bit - receives one bit from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_bit(struct cbus_host *host)
+{
+ int ret;
+
+ gpio_set_value(host->clk_gpio, 1);
+ ret = gpio_get_value(host->dat_gpio);
+ gpio_set_value(host->clk_gpio, 0);
+ return ret;
+}
+
+/**
+ * cbus_receive_word - receives 16-bit word from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_word(struct cbus_host *host)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 16; i > 0; i--) {
+ int bit = cbus_receive_bit(host);
+
+ if (bit < 0)
+ return bit;
+
+ if (bit)
+ ret |= 1 << (i - 1);
+ }
+ return ret;
+}
+
+/**
+ * cbus_transfer - transfers data over the bus
+ * @host: the host we're using
+ * @rw: read/write flag
+ * @dev: device address
+ * @reg: register address
+ * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0
+ */
+static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev,
+ unsigned reg, unsigned data)
+{
+ unsigned long flags;
+ int ret;
+
+ /* We don't want interrupts disturbing our transfer */
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Reset state and start of transfer, SEL stays down during transfer */
+ gpio_set_value(host->sel_gpio, 0);
+
+ /* Set the DAT pin to output */
+ gpio_direction_output(host->dat_gpio, 1);
+
+ /* Send the device address */
+ cbus_send_data(host, dev, CBUS_ADDR_BITS);
+
+ /* Send the rw flag */
+ cbus_send_bit(host, rw == I2C_SMBUS_READ);
+
+ /* Send the register address */
+ cbus_send_data(host, reg, CBUS_REG_BITS);
+
+ if (rw == I2C_SMBUS_WRITE) {
+ cbus_send_data(host, data, 16);
+ ret = 0;
+ } else {
+ ret = gpio_direction_input(host->dat_gpio);
+ if (ret) {
+ dev_dbg(host->dev, "failed setting direction\n");
+ goto out;
+ }
+ gpio_set_value(host->clk_gpio, 1);
+
+ ret = cbus_receive_word(host);
+ if (ret < 0) {
+ dev_dbg(host->dev, "failed receiving data\n");
+ goto out;
+ }
+ }
+
+ /* Indicate end of transfer, SEL goes up until next transfer */
+ gpio_set_value(host->sel_gpio, 1);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+
+static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter,
+ u16 addr,
+ unsigned short flags,
+ char read_write,
+ u8 command,
+ int size,
+ union i2c_smbus_data *data)
+{
+ struct cbus_host *chost = i2c_get_adapdata(adapter);
+ int ret;
+
+ if (size != I2C_SMBUS_WORD_DATA)
+ return -EINVAL;
+
+ ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr,
+ command, data->word);
+ if (ret < 0)
+ return ret;
+
+ if (read_write == I2C_SMBUS_READ)
+ data->word = ret;
+
+ return 0;
+}
+
+static u32 cbus_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+}
+
+static const struct i2c_algorithm cbus_i2c_algo = {
+ .smbus_xfer = cbus_i2c_smbus_xfer,
+ .functionality = cbus_i2c_func,
+};
+
+static int cbus_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+ return i2c_del_adapter(adapter);
+}
+
+static int cbus_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct cbus_host *chost;
+ int ret;
+
+ adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter),
+ GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL);
+ if (!chost)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ struct device_node *dnode = pdev->dev.of_node;
+ if (of_gpio_count(dnode) != 3)
+ return -ENODEV;
+ chost->clk_gpio = of_get_gpio(dnode, 0);
+ chost->dat_gpio = of_get_gpio(dnode, 1);
+ chost->sel_gpio = of_get_gpio(dnode, 2);
+ } else if (pdev->dev.platform_data) {
+ struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
+ chost->clk_gpio = pdata->clk_gpio;
+ chost->dat_gpio = pdata->dat_gpio;
+ chost->sel_gpio = pdata->sel_gpio;
+ } else {
+ return -ENODEV;
+ }
+
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+ adapter->nr = pdev->id;
+ adapter->timeout = HZ;
+ adapter->algo = &cbus_i2c_algo;
+ strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+
+ spin_lock_init(&chost->lock);
+ chost->dev = &pdev->dev;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->clk_gpio,
+ GPIOF_OUT_INIT_LOW, "CBUS clk");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->dat_gpio, GPIOF_IN,
+ "CBUS data");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->sel_gpio,
+ GPIOF_OUT_INIT_HIGH, "CBUS sel");
+ if (ret)
+ return ret;
+
+ i2c_set_adapdata(adapter, chost);
+ platform_set_drvdata(pdev, adapter);
+
+ return i2c_add_numbered_adapter(adapter);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id i2c_cbus_dt_ids[] = {
+ { .compatible = "i2c-cbus-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
+#endif
+
+static struct platform_driver cbus_i2c_driver = {
+ .probe = cbus_i2c_probe,
+ .remove = cbus_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "i2c-cbus-gpio",
+ },
+};
+module_platform_driver(cbus_i2c_driver);
+
+MODULE_ALIAS("platform:i2c-cbus-gpio");
+MODULE_DESCRIPTION("CBUS I2C driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index c1e1096ba069..2e79c1024191 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -426,7 +426,7 @@ static const struct i2c_adapter cpm_ops = {
.algo = &cpm_i2c_algo,
};
-static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
+static int cpm_i2c_setup(struct cpm_i2c *cpm)
{
struct platform_device *ofdev = cpm->ofdev;
const u32 *data;
@@ -634,7 +634,7 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm)
cpm_muram_free(cpm->i2c_addr);
}
-static int __devinit cpm_i2c_probe(struct platform_device *ofdev)
+static int cpm_i2c_probe(struct platform_device *ofdev)
{
int result, len;
struct cpm_i2c *cpm;
@@ -688,7 +688,7 @@ out_free:
return result;
}
-static int __devexit cpm_i2c_remove(struct platform_device *ofdev)
+static int cpm_i2c_remove(struct platform_device *ofdev)
{
struct cpm_i2c *cpm = dev_get_drvdata(&ofdev->dev);
@@ -716,7 +716,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
- .remove = __devexit_p(cpm_i2c_remove),
+ .remove = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index cbba7db9ad59..f5258c205de5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "i2c-designware-core.h"
/*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
return dw_readl(dev, DW_IC_COMP_PARAM_1);
}
EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 92a1e2c15baa..6add851e9dee 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -207,7 +207,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return dev->controller->clk_khz;
}
-static int __devinit i2c_dw_pci_probe(struct pci_dev *pdev,
+static int i2c_dw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct dw_i2c_dev *dev;
@@ -328,7 +328,7 @@ exit:
return r;
}
-static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
+static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
@@ -368,7 +368,7 @@ static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
.id_table = i2_designware_pci_ids,
.probe = i2c_dw_pci_probe,
- .remove = __devexit_p(i2c_dw_pci_remove),
+ .remove = i2c_dw_pci_remove,
.driver = {
.pm = &i2c_dw_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0506fef8dc00..343357a2b5b4 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -50,7 +50,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk)/1000;
}
-static int __devinit dw_i2c_probe(struct platform_device *pdev)
+static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -169,7 +169,7 @@ err_release_region:
return r;
}
-static int __devexit dw_i2c_remove(struct platform_device *pdev)
+static int dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
@@ -228,7 +228,7 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
MODULE_ALIAS("platform:i2c_designware");
static struct platform_driver dw_i2c_driver = {
- .remove = __devexit_p(dw_i2c_remove),
+ .remove = dw_i2c_remove,
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 259f7697bf25..5e7886e7136e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -758,7 +758,7 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
}
-static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+static int pch_i2c_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *base_addr;
@@ -851,7 +851,7 @@ err_pci_enable:
return ret;
}
-static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+static void pch_i2c_remove(struct pci_dev *pdev)
{
int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
@@ -948,7 +948,7 @@ static struct pci_driver pch_pcidriver = {
.name = KBUILD_MODNAME,
.id_table = pch_pcidev_id,
.probe = pch_i2c_probe,
- .remove = __devexit_p(pch_i2c_remove),
+ .remove = pch_i2c_remove,
.suspend = pch_i2c_suspend,
.resume = pch_i2c_resume
};
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 37e2e82a9c88..485497066ed7 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -205,7 +205,7 @@ static struct i2c_adapter pcf_isa_ops = {
.name = "i2c-elektor",
};
-static int __devinit elektor_match(struct device *dev, unsigned int id)
+static int elektor_match(struct device *dev, unsigned int id)
{
#ifdef __alpha__
/* check to see we have memory mapped PCF8584 connected to the
@@ -264,7 +264,7 @@ static int __devinit elektor_match(struct device *dev, unsigned int id)
return 1;
}
-static int __devinit elektor_probe(struct device *dev, unsigned int id)
+static int elektor_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pcf_wait);
if (pcf_isa_init())
@@ -293,7 +293,7 @@ static int __devinit elektor_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit elektor_remove(struct device *dev, unsigned int id)
+static int elektor_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pcf_isa_ops);
@@ -316,7 +316,7 @@ static int __devexit elektor_remove(struct device *dev, unsigned int id)
static struct isa_driver i2c_elektor_driver = {
.match = elektor_match,
.probe = elektor_probe,
- .remove = __devexit_p(elektor_remove),
+ .remove = elektor_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-elektor",
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e62d2d938628..f3fa4332bbdf 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -85,7 +85,7 @@ static int i2c_gpio_getscl(void *data)
return gpio_get_value(pdata->scl_pin);
}
-static int __devinit of_i2c_gpio_probe(struct device_node *np,
+static int of_i2c_gpio_probe(struct device_node *np,
struct i2c_gpio_platform_data *pdata)
{
u32 reg;
@@ -117,7 +117,7 @@ static int __devinit of_i2c_gpio_probe(struct device_node *np,
return 0;
}
-static int __devinit i2c_gpio_probe(struct platform_device *pdev)
+static int i2c_gpio_probe(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -184,7 +184,11 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
bit_data->data = pdata;
adap->owner = THIS_MODULE;
- snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+ if (pdev->dev.of_node)
+ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ else
+ snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->dev.parent = &pdev->dev;
@@ -214,7 +218,7 @@ err_request_sda:
return ret;
}
-static int __devexit i2c_gpio_remove(struct platform_device *pdev)
+static int i2c_gpio_remove(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -247,7 +251,7 @@ static struct platform_driver i2c_gpio_driver = {
.of_match_table = of_match_ptr(i2c_gpio_dt_ids),
},
.probe = i2c_gpio_probe,
- .remove = __devexit_p(i2c_gpio_remove),
+ .remove = i2c_gpio_remove,
};
static int __init i2c_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 19515df61021..3351cc7ed11f 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -356,7 +356,7 @@ static const struct i2c_algorithm highlander_i2c_algo = {
.functionality = highlander_i2c_func,
};
-static int __devinit highlander_i2c_probe(struct platform_device *pdev)
+static int highlander_i2c_probe(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -441,7 +441,7 @@ err:
return ret;
}
-static int __devexit highlander_i2c_remove(struct platform_device *pdev)
+static int highlander_i2c_remove(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev = platform_get_drvdata(pdev);
@@ -465,7 +465,7 @@ static struct platform_driver highlander_i2c_driver = {
},
.probe = highlander_i2c_probe,
- .remove = __devexit_p(highlander_i2c_remove),
+ .remove = highlander_i2c_remove,
};
module_platform_driver(highlander_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c9f95e1666a8..79c3d9069a48 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -112,7 +112,7 @@ static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
MODULE_DEVICE_TABLE (pci, hydra_ids);
-static int __devinit hydra_probe(struct pci_dev *dev,
+static int hydra_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned long base = pci_resource_start(dev, 0);
@@ -139,7 +139,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit hydra_remove(struct pci_dev *dev)
+static void hydra_remove(struct pci_dev *dev)
{
pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */
i2c_del_adapter(&hydra_adap);
@@ -153,7 +153,7 @@ static struct pci_driver hydra_driver = {
.name = "hydra_smbus",
.id_table = hydra_ids,
.probe = hydra_probe,
- .remove = __devexit_p(hydra_remove),
+ .remove = hydra_remove,
};
module_pci_driver(hydra_driver);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6abc00d59881..3092387f6ef4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -81,6 +81,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/of_i2c.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
@@ -840,14 +841,14 @@ struct dmi_onboard_device_info {
const char *i2c_type;
};
-static struct dmi_onboard_device_info __devinitdata dmi_devices[] = {
+static const struct dmi_onboard_device_info dmi_devices[] = {
{ "Syleus", DMI_DEV_TYPE_OTHER, 0x73, "fscsyl" },
{ "Hermes", DMI_DEV_TYPE_OTHER, 0x73, "fscher" },
{ "Hades", DMI_DEV_TYPE_OTHER, 0x73, "fschds" },
};
-static void __devinit dmi_check_onboard_device(u8 type, const char *name,
- struct i2c_adapter *adap)
+static void dmi_check_onboard_device(u8 type, const char *name,
+ struct i2c_adapter *adap)
{
int i;
struct i2c_board_info info;
@@ -870,8 +871,7 @@ static void __devinit dmi_check_onboard_device(u8 type, const char *name,
/* We use our own function to check for onboard devices instead of
dmi_find_device() as some buggy BIOS's have the devices we are interested
in marked as disabled */
-static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
- void *adap)
+static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
{
int i, count;
@@ -900,7 +900,7 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
/* Register optional slaves */
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
+static void i801_probe_optional_slaves(struct i801_priv *priv)
{
/* Only register slaves on main SMBus channel */
if (priv->features & FEATURE_IDF)
@@ -920,7 +920,7 @@ static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
}
#else
static void __init input_apanel_init(void) {}
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
+static void i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
@@ -943,7 +943,7 @@ static struct i801_mux_config i801_mux_config_asus_z8_d18 = {
.n_gpios = 2,
};
-static struct dmi_system_id __devinitdata mux_dmi_table[] = {
+static const struct dmi_system_id mux_dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
@@ -1011,7 +1011,7 @@ static struct dmi_system_id __devinitdata mux_dmi_table[] = {
};
/* Setup multiplexing if needed */
-static int __devinit i801_add_mux(struct i801_priv *priv)
+static int i801_add_mux(struct i801_priv *priv)
{
struct device *dev = &priv->adapter.dev;
const struct i801_mux_config *mux_config;
@@ -1047,13 +1047,13 @@ static int __devinit i801_add_mux(struct i801_priv *priv)
return 0;
}
-static void __devexit i801_del_mux(struct i801_priv *priv)
+static void i801_del_mux(struct i801_priv *priv)
{
if (priv->mux_pdev)
platform_device_unregister(priv->mux_pdev);
}
-static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv)
+static unsigned int i801_get_adapter_class(struct i801_priv *priv)
{
const struct dmi_system_id *id;
const struct i801_mux_config *mux_config;
@@ -1083,8 +1083,7 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
int err, i;
@@ -1108,6 +1107,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
/* fall through */
default:
priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
priv->features |= FEATURE_SMBUS_PEC;
@@ -1120,16 +1120,6 @@ static int __devinit i801_probe(struct pci_dev *dev,
break;
}
- /* IRQ processing tested on CougarPoint PCH, ICH5, ICH7-M and ICH10 */
- if (dev->device == PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS ||
- dev->device == PCI_DEVICE_ID_INTEL_82801EB_3 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH7_17 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH8_5 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH9_6 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_4 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_5)
- priv->features |= FEATURE_IRQ;
-
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
if (priv->features & disable_features & (1 << i))
@@ -1215,6 +1205,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
goto exit_free_irq;
}
+ of_i2c_register_devices(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
@@ -1233,7 +1224,7 @@ exit:
return err;
}
-static void __devexit i801_remove(struct pci_dev *dev)
+static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
@@ -1279,7 +1270,7 @@ static struct pci_driver i801_driver = {
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
- .remove = __devexit_p(i801_remove),
+ .remove = i801_remove,
.suspend = i801_suspend,
.resume = i801_resume,
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 806e225f3de7..33a2abb6c063 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -660,7 +660,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
return (u8)((opb + 9) / 10 - 1);
}
-static int __devinit iic_request_irq(struct platform_device *ofdev,
+static int iic_request_irq(struct platform_device *ofdev,
struct ibm_iic_private *dev)
{
struct device_node *np = ofdev->dev.of_node;
@@ -691,7 +691,7 @@ static int __devinit iic_request_irq(struct platform_device *ofdev,
/*
* Register single IIC interface
*/
-static int __devinit iic_probe(struct platform_device *ofdev)
+static int iic_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
@@ -781,7 +781,7 @@ error_cleanup:
/*
* Cleanup initialized IIC interface
*/
-static int __devexit iic_remove(struct platform_device *ofdev)
+static int iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
@@ -812,7 +812,7 @@ static struct platform_driver ibm_iic_driver = {
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
- .remove = __devexit_p(iic_remove),
+ .remove = iic_remove,
};
module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b9734747d610..a71ece63e917 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -511,9 +511,9 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return -ENOENT;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base)
- return -EBUSY;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct),
GFP_KERNEL);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index 7c28f10f95ca..de3736bf6465 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -947,7 +947,7 @@ static const struct dev_pm_ops intel_mid_i2c_pm_ops = {
* 5. Call intel_mid_i2c_hwinit() for hardware initialization
* 6. Register I2C adapter in i2c-core
*/
-static int __devinit intel_mid_i2c_probe(struct pci_dev *dev,
+static int intel_mid_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct intel_mid_i2c_private *mrst;
@@ -1079,7 +1079,7 @@ exit:
return err;
}
-static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
+static void intel_mid_i2c_remove(struct pci_dev *dev)
{
struct intel_mid_i2c_private *mrst = pci_get_drvdata(dev);
intel_mid_i2c_disable(&mrst->adap);
@@ -1113,7 +1113,7 @@ static struct pci_driver intel_mid_i2c_driver = {
.name = DRIVER_NAME,
.id_table = intel_mid_i2c_ids,
.probe = intel_mid_i2c_probe,
- .remove = __devexit_p(intel_mid_i2c_remove),
+ .remove = intel_mid_i2c_remove,
};
module_pci_driver(intel_mid_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index f90a6057508d..4099f79c2280 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -249,7 +249,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit smbus_sch_probe(struct platform_device *dev)
+static int smbus_sch_probe(struct platform_device *dev)
{
struct resource *res;
int retval;
@@ -284,7 +284,7 @@ static int __devinit smbus_sch_probe(struct platform_device *dev)
return retval;
}
-static int __devexit smbus_sch_remove(struct platform_device *pdev)
+static int smbus_sch_remove(struct platform_device *pdev)
{
struct resource *res;
if (sch_smba) {
@@ -303,7 +303,7 @@ static struct platform_driver smbus_sch_driver = {
.owner = THIS_MODULE,
},
.probe = smbus_sch_probe,
- .remove = __devexit_p(smbus_sch_remove),
+ .remove = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index ca86430cb4a2..a69459e5c3f3 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -175,7 +175,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
}
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
-static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
{28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02},
{36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28},
@@ -196,7 +196,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
{10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f}
};
-static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
int prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -230,7 +230,7 @@ static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
return (int)div->fdr;
}
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -252,7 +252,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
fdr);
}
#else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -260,7 +260,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
#endif /* CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x */
#ifdef CONFIG_PPC_MPC512x
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -288,7 +288,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
mpc_i2c_setup_52xx(node, i2c, clock, prescaler);
}
#else /* CONFIG_PPC_MPC512x */
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -296,7 +296,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
#endif /* CONFIG_PPC_MPC512x */
#ifdef CONFIG_FSL_SOC
-static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
{160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123},
{288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102},
{416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127},
@@ -316,7 +316,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
{49152, 0x011e}, {61440, 0x011f}
};
-static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
+static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
struct device_node *node = NULL;
u32 __iomem *reg;
@@ -345,7 +345,7 @@ static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
return val;
}
-static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
u32 prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -383,7 +383,7 @@ static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
return div ? (int)div->fdr : -EINVAL;
}
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -408,7 +408,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
}
#else /* !CONFIG_FSL_SOC */
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -615,7 +615,7 @@ static struct i2c_adapter mpc_ops = {
};
static const struct of_device_id mpc_i2c_of_match[];
-static int __devinit fsl_i2c_probe(struct platform_device *op)
+static int fsl_i2c_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct mpc_i2c *i2c;
@@ -706,7 +706,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
return result;
};
-static int __devexit fsl_i2c_remove(struct platform_device *op)
+static int fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
@@ -746,24 +746,24 @@ static int mpc_i2c_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
#endif
-static const struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
};
-static const struct mpc_i2c_data mpc_i2c_data_52xx __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_52xx = {
.setup = mpc_i2c_setup_52xx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8313 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8313 = {
.setup = mpc_i2c_setup_8xxx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8543 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8543 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 2,
};
-static const struct mpc_i2c_data mpc_i2c_data_8544 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8544 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 3,
};
@@ -785,7 +785,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
- .remove = __devexit_p(fsl_i2c_remove),
+ .remove = fsl_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 2e9d56719e99..8b20ef8524ac 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -495,7 +495,7 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
*
*****************************************************************************
*/
-static int __devinit
+static int
mv64xxx_i2c_map_regs(struct platform_device *pd,
struct mv64xxx_i2c_data *drv_data)
{
@@ -530,13 +530,13 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
}
#ifdef CONFIG_OF
-static int __devinit
+static int
mv64xxx_calc_freq(const int tclk, const int n, const int m)
{
return tclk / (10 * (m + 1) * (2 << n));
}
-static bool __devinit
+static bool
mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
u32 *best_m)
{
@@ -560,7 +560,7 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
return true;
}
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -597,7 +597,7 @@ out:
#endif
}
#else /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -605,7 +605,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
}
#endif /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
@@ -697,7 +697,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
return rc;
}
-static int __devexit
+static int
mv64xxx_i2c_remove(struct platform_device *dev)
{
struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
@@ -718,7 +718,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
return rc;
}
-static const struct of_device_id mv64xxx_i2c_of_match_table[] __devinitdata = {
+static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "marvell,mv64xxx-i2c", },
{}
};
@@ -726,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
- .remove = __devexit_p(mv64xxx_i2c_remove),
+ .remove = mv64xxx_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 0670da79ee5e..d6abaf2cf2e3 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
struct device *dev;
void __iomem *regs;
struct completion cmd_complete;
- u32 cmd_err;
+ int cmd_err;
struct i2c_adapter adapter;
const struct mxs_i2c_speed_config *speed;
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg->len == 0)
return -EINVAL;
- init_completion(&i2c->cmd_complete);
+ INIT_COMPLETION(i2c->cmd_complete);
i2c->cmd_err = 0;
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -359,7 +359,7 @@ static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 mxs_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
@@ -432,7 +432,7 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
return 0;
}
-static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+static int mxs_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
@@ -473,6 +473,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
i2c->dev = dev;
i2c->speed = &mxs_i2c_95kHz_config;
+ init_completion(&i2c->cmd_complete);
+
if (dev->of_node) {
err = mxs_i2c_get_ofdata(i2c);
if (err)
@@ -515,7 +517,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+static int mxs_i2c_remove(struct platform_device *pdev)
{
struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
int ret;
@@ -546,7 +548,7 @@ static struct platform_driver mxs_i2c_driver = {
.owner = THIS_MODULE,
.of_match_table = mxs_i2c_dt_ids,
},
- .remove = __devexit_p(mxs_i2c_remove),
+ .remove = mxs_i2c_remove,
};
static int __init mxs_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 392303b4be07..adac8542771d 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -117,7 +117,7 @@ struct nforce2_smbus {
#define MAX_TIMEOUT 100
/* We disable the second SMBus channel on these boards */
-static struct dmi_system_id __devinitdata nforce2_dmi_blacklist2[] = {
+static const struct dmi_system_id nforce2_dmi_blacklist2[] = {
{
.ident = "DFI Lanparty NF4 Expert",
.matches = {
@@ -330,8 +330,8 @@ static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
MODULE_DEVICE_TABLE (pci, nforce2_ids);
-static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
- int alt_reg, struct nforce2_smbus *smbus, const char *name)
+static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
+ struct nforce2_smbus *smbus, const char *name)
{
int error;
@@ -382,7 +382,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
}
-static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct nforce2_smbus *smbuses;
int res1, res2;
@@ -430,7 +430,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
}
-static void __devexit nforce2_remove(struct pci_dev *dev)
+static void nforce2_remove(struct pci_dev *dev)
{
struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
@@ -450,7 +450,7 @@ static struct pci_driver nforce2_driver = {
.name = "nForce2_smbus",
.id_table = nforce2_ids,
.probe = nforce2_probe,
- .remove = __devexit_p(nforce2_remove),
+ .remove = nforce2_remove,
};
module_pci_driver(nforce2_driver);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 02c3115a2dfa..8b2ffcf45322 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -435,13 +435,6 @@ static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
@@ -523,13 +516,6 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index a23b91b0b738..865ee350adb3 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -518,7 +518,7 @@ static const struct i2c_algorithm nuc900_i2c_algorithm = {
* called by the bus driver when a suitable device is found
*/
-static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
+static int nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
@@ -663,7 +663,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
* called when device is removed from the bus
*/
-static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
+static int nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
@@ -684,7 +684,7 @@ static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
- .remove = __devexit_p(nuc900_i2c_remove),
+ .remove = nuc900_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 15da1ac7cf9e..a337d08a392d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -4,11 +4,15 @@
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
+ * Support for the GRLIB port of the controller by
+ * Andreas Larsson <andreas@gaisler.com>
+ *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -34,6 +38,8 @@ struct ocores_i2c {
int nmsgs;
int state; /* see STATE_ */
int clock_khz;
+ void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
+ u8 (*getreg)(struct ocores_i2c *i2c, int reg);
};
/* registers */
@@ -67,24 +73,47 @@ struct ocores_i2c {
#define STATE_READ 3
#define STATE_ERROR 4
+#define TYPE_OCORES 0
+#define TYPE_GRLIB 1
+
+static void oc_setreg_8(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_16(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite16(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite32(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_8(struct ocores_i2c *i2c, int reg)
+{
+ return ioread8(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_16(struct ocores_i2c *i2c, int reg)
+{
+ return ioread16(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_32(struct ocores_i2c *i2c, int reg)
+{
+ return ioread32(i2c->base + (reg << i2c->reg_shift));
+}
+
static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
{
- if (i2c->reg_io_width == 4)
- iowrite32(value, i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- iowrite16(value, i2c->base + (reg << i2c->reg_shift));
- else
- iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+ i2c->setreg(i2c, reg, value);
}
static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg)
{
- if (i2c->reg_io_width == 4)
- return ioread32(i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- return ioread16(i2c->base + (reg << i2c->reg_shift));
- else
- return ioread8(i2c->base + (reg << i2c->reg_shift));
+ return i2c->getreg(i2c, reg);
}
static void ocores_process(struct ocores_i2c *i2c)
@@ -223,11 +252,59 @@ static struct i2c_adapter ocores_adapter = {
.algo = &ocores_algorithm,
};
+static struct of_device_id ocores_i2c_match[] = {
+ {
+ .compatible = "opencores,i2c-ocores",
+ .data = (void *)TYPE_OCORES,
+ },
+ {
+ .compatible = "aeroflexgaisler,i2cmst",
+ .data = (void *)TYPE_GRLIB,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+
#ifdef CONFIG_OF
+/* Read and write functions for the GRLIB port of the controller. Registers are
+ * 32-bit big endian and the PRELOW and PREHIGH registers are merged into one
+ * register. The subsequent registers has their offset decreased accordingly. */
+static u8 oc_getreg_grlib(struct ocores_i2c *i2c, int reg)
+{
+ u32 rd;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ rd = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PREHIGH)
+ return (u8)(rd >> 8);
+ else
+ return (u8)rd;
+}
+
+static void oc_setreg_grlib(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ u32 curr, wr;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ if (reg == OCI2C_PRELOW || reg == OCI2C_PREHIGH) {
+ curr = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PRELOW)
+ wr = (curr & 0xff00) | value;
+ else
+ wr = (((u32)value) << 8) | (curr & 0xff);
+ } else {
+ wr = value;
+ }
+ iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift));
+}
+
static int ocores_i2c_of_probe(struct platform_device *pdev,
struct ocores_i2c *i2c)
{
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
u32 val;
if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
@@ -253,17 +330,26 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "reg-io-width",
&i2c->reg_io_width);
+
+ match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
+ if (match && (int)match->data == TYPE_GRLIB) {
+ dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
+ i2c->setreg = oc_setreg_grlib;
+ i2c->getreg = oc_getreg_grlib;
+ }
+
return 0;
}
#else
#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
#endif
-static int __devinit ocores_i2c_probe(struct platform_device *pdev)
+static int ocores_i2c_probe(struct platform_device *pdev)
{
struct ocores_i2c *i2c;
struct ocores_i2c_platform_data *pdata;
- struct resource *res, *res2;
+ struct resource *res;
+ int irq;
int ret;
int i;
@@ -271,26 +357,17 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Memory region busy\n");
- return -EBUSY;
- }
-
- i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!i2c->base) {
- dev_err(&pdev->dev, "Unable to map registers\n");
- return -EIO;
- }
+ i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
pdata = pdev->dev.platform_data;
if (pdata) {
@@ -306,10 +383,34 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (i2c->reg_io_width == 0)
i2c->reg_io_width = 1; /* Set to default value */
+ if (!i2c->setreg || !i2c->getreg) {
+ switch (i2c->reg_io_width) {
+ case 1:
+ i2c->setreg = oc_setreg_8;
+ i2c->getreg = oc_getreg_8;
+ break;
+
+ case 2:
+ i2c->setreg = oc_setreg_16;
+ i2c->getreg = oc_getreg_16;
+ break;
+
+ case 4:
+ i2c->setreg = oc_setreg_32;
+ i2c->getreg = oc_getreg_32;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
+ i2c->reg_io_width);
+ return -EINVAL;
+ }
+ }
+
ocores_init(i2c);
init_waitqueue_head(&i2c->wait);
- ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
@@ -341,7 +442,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ocores_i2c_remove(struct platform_device *pdev)
+static int ocores_i2c_remove(struct platform_device *pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
@@ -383,15 +484,9 @@ static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
#define OCORES_I2C_PM NULL
#endif
-static struct of_device_id ocores_i2c_match[] = {
- { .compatible = "opencores,i2c-ocores", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
- .remove = __devexit_p(ocores_i2c_remove),
+ .remove = ocores_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index f44c83549fe5..484ca771fdff 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -446,7 +446,7 @@ static struct i2c_adapter octeon_i2c_ops = {
/**
* octeon_i2c_setclock - Calculate and set clock divisors.
*/
-static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
+static int octeon_i2c_setclock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -489,7 +489,7 @@ static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
return 0;
}
-static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
+static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
{
u8 status;
int tries;
@@ -510,7 +510,7 @@ static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
return -EIO;
}
-static int __devinit octeon_i2c_probe(struct platform_device *pdev)
+static int octeon_i2c_probe(struct platform_device *pdev)
{
int irq, result = 0;
struct octeon_i2c *i2c;
@@ -609,7 +609,7 @@ out:
return result;
};
-static int __devexit octeon_i2c_remove(struct platform_device *pdev)
+static int octeon_i2c_remove(struct platform_device *pdev)
{
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
@@ -628,7 +628,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match);
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
- .remove = __devexit_p(octeon_i2c_remove),
+ .remove = octeon_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3525c9e62cb0..3ee188679cf1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,14 +43,16 @@
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
/* I2C controller revisions present on specific hardware */
-#define OMAP_I2C_REV_ON_2430 0x36
-#define OMAP_I2C_REV_ON_3430_3530 0x3C
-#define OMAP_I2C_REV_ON_3630_4430 0x40
+#define OMAP_I2C_REV_ON_2430 0x00000036
+#define OMAP_I2C_REV_ON_3430_3530 0x0000003C
+#define OMAP_I2C_REV_ON_3630 0x00000040
+#define OMAP_I2C_REV_ON_4430_PLUS 0x50400002
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -190,7 +192,6 @@ struct omap_i2c_dev {
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
u32 speed; /* Speed of bus in kHz */
- u32 dtrev; /* extra revision from DT */
u32 flags;
u16 cmd_err;
u8 *buf;
@@ -202,17 +203,18 @@ struct omap_i2c_dev {
* fifo_size==0 implies no fifo
* if set, should be trsh+1
*/
- u8 rev;
+ u32 rev;
unsigned b_hw:1; /* bad h/w fixes */
unsigned receiver:1; /* true when we're in receiver mode */
u16 iestate; /* Saved interrupt register */
u16 pscstate;
u16 scllstate;
u16 sclhstate;
- u16 bufstate;
u16 syscstate;
u16 westate;
u16 errata;
+
+ struct pinctrl *pins;
};
static const u8 reg_map_ip_v1[] = {
@@ -275,16 +277,39 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
-static int omap_i2c_init(struct omap_i2c_dev *dev)
+static void __omap_i2c_init(struct omap_i2c_dev *dev)
+{
+
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+
+ /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
+ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
+
+ /* SCL low and high time values */
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+
+ /* Take the I2C module out of reset: */
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+
+ /*
+ * Don't write to this register if the IE state is 0 as it can
+ * cause deadlock.
+ */
+ if (dev->iestate)
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+}
+
+static int omap_i2c_reset(struct omap_i2c_dev *dev)
{
- u16 psc = 0, scll = 0, sclh = 0, buf = 0;
- u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
- unsigned long fclk_rate = 12000000;
unsigned long timeout;
- unsigned long internal_clk = 0;
- struct clk *fclk;
+ u16 sysc;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
+ sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
+
/* Disable I2C controller before soft reset */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -306,32 +331,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
/* SYSC register is cleared by the reset; rewrite it */
- if (dev->rev == OMAP_I2C_REV_ON_2430) {
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- SYSC_AUTOIDLE_MASK);
-
- } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
- dev->syscstate = SYSC_AUTOIDLE_MASK;
- dev->syscstate |= SYSC_ENAWAKEUP_MASK;
- dev->syscstate |= (SYSC_IDLEMODE_SMART <<
- __ffs(SYSC_SIDLEMODE_MASK));
- dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK <<
- __ffs(SYSC_CLOCKACTIVITY_MASK));
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- dev->syscstate);
- /*
- * Enabling all wakup sources to stop I2C freezing on
- * WFI instruction.
- * REVISIT: Some wkup sources might not be needed.
- */
- dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
- dev->westate);
- }
+ omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+
+ }
+ return 0;
+}
+
+static int omap_i2c_init(struct omap_i2c_dev *dev)
+{
+ u16 psc = 0, scll = 0, sclh = 0;
+ u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
+ unsigned long fclk_rate = 12000000;
+ unsigned long internal_clk = 0;
+ struct clk *fclk;
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
+ /*
+ * Enabling all wakup sources to stop I2C freezing on
+ * WFI instruction.
+ * REVISIT: Some wkup sources might not be needed.
+ */
+ dev->westate = OMAP_I2C_WE_ALL;
}
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
@@ -416,28 +437,17 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
}
- /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
- omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
-
- /* SCL low and high time values */
- omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
-
- /* Take the I2C module out of reset: */
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
-
- /* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- dev->pscstate = psc;
- dev->scllstate = scll;
- dev->sclhstate = sclh;
- dev->bufstate = buf;
- }
+
+ dev->pscstate = psc;
+ dev->scllstate = scll;
+ dev->sclhstate = sclh;
+
+ __omap_i2c_init(dev);
+
return 0;
}
@@ -490,7 +500,7 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -586,7 +596,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
OMAP_I2C_TIMEOUT);
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -ETIMEDOUT;
}
@@ -596,7 +607,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
/* We have an error */
if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
OMAP_I2C_STAT_XUDF)) {
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -EIO;
}
@@ -642,13 +654,14 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
break;
}
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
-
if (r == 0)
r = num;
omap_i2c_wait_for_bb(dev);
+
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
+
out:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -790,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
dev->cmd_err |= OMAP_I2C_STAT_AL;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
}
return -EIO;
@@ -950,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
i2c_omap_errata_i207(dev, stat);
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_RRDY) {
@@ -976,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
break;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_XRDY) {
@@ -1025,9 +1038,7 @@ static const struct i2c_algorithm omap_i2c_algo = {
#ifdef CONFIG_OF
static struct omap_i2c_bus_platform_data omap3_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_i2c_bus_platform_data omap4_pdata = {
@@ -1048,7 +1059,17 @@ static const struct of_device_id omap_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
#endif
-static int __devinit
+#define OMAP_I2C_SCHEME(rev) ((rev & 0xc000) >> 14)
+
+#define OMAP_I2C_REV_SCHEME_0_MAJOR(rev) (rev >> 4)
+#define OMAP_I2C_REV_SCHEME_0_MINOR(rev) (rev & 0xf)
+
+#define OMAP_I2C_REV_SCHEME_1_MAJOR(rev) ((rev & 0x0700) >> 7)
+#define OMAP_I2C_REV_SCHEME_1_MINOR(rev) (rev & 0x1f)
+#define OMAP_I2C_SCHEME_0 0
+#define OMAP_I2C_SCHEME_1 1
+
+static int
omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *dev;
@@ -1060,6 +1081,8 @@ omap_i2c_probe(struct platform_device *pdev)
const struct of_device_id *match;
int irq;
int r;
+ u32 rev;
+ u16 minor, major, scheme;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1080,18 +1103,15 @@ omap_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dev->base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!dev->base) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -ENOMEM;
- }
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
u32 freq = 100000; /* default to 100000 Hz */
pdata = match->data;
- dev->dtrev = pdata->rev;
dev->flags = pdata->flags;
of_property_read_u32(node, "clock-frequency", &freq);
@@ -1101,7 +1121,16 @@ omap_i2c_probe(struct platform_device *pdev)
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- dev->dtrev = pdata->rev;
+ }
+
+ dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(dev->pins)) {
+ if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
+ PTR_ERR(dev->pins));
+ dev->pins = NULL;
}
dev->dev = &pdev->dev;
@@ -1114,11 +1143,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
- dev->regs = (u8 *)reg_map_ip_v2;
- else
- dev->regs = (u8 *)reg_map_ip_v1;
-
pm_runtime_enable(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(dev->dev);
@@ -1127,11 +1151,37 @@ omap_i2c_probe(struct platform_device *pdev)
if (IS_ERR_VALUE(r))
goto err_free_mem;
- dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
+ /*
+ * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
+ * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
+ * Also since the omap_i2c_read_reg uses reg_map_ip_* a
+ * raw_readw is done.
+ */
+ rev = __raw_readw(dev->base + 0x04);
+
+ scheme = OMAP_I2C_SCHEME(rev);
+ switch (scheme) {
+ case OMAP_I2C_SCHEME_0:
+ dev->regs = (u8 *)reg_map_ip_v1;
+ dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
+ minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ break;
+ case OMAP_I2C_SCHEME_1:
+ /* FALLTHROUGH */
+ default:
+ dev->regs = (u8 *)reg_map_ip_v2;
+ rev = (rev << 16) |
+ omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
+ minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
+ major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
+ dev->rev = rev;
+ }
dev->errata = 0;
- if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
+ dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
dev->errata |= I2C_OMAP_ERRATA_I207;
if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
@@ -1152,7 +1202,7 @@ omap_i2c_probe(struct platform_device *pdev)
dev->fifo_size = (dev->fifo_size / 2);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -1195,8 +1245,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", adap->nr,
- dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
+ major, minor, dev->speed);
of_i2c_register_devices(adap);
@@ -1215,7 +1265,7 @@ err_free_mem:
return r;
}
-static int __devexit omap_i2c_remove(struct platform_device *pdev)
+static int omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
@@ -1239,14 +1289,13 @@ static int omap_i2c_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- u16 iv;
_dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
- iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+ omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
@@ -1262,23 +1311,10 @@ static int omap_i2c_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0);
- omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate);
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- }
+ if (!_dev->regs)
+ return 0;
- /*
- * Don't write to this register if the IE state is 0 as it can
- * cause deadlock.
- */
- if (_dev->iestate)
- omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate);
+ __omap_i2c_init(_dev);
return 0;
}
@@ -1295,7 +1331,7 @@ static struct dev_pm_ops omap_i2c_pm_ops = {
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove = __devexit_p(omap_i2c_remove),
+ .remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 4b95f7a63a3b..aa9577881925 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -135,7 +135,7 @@ static struct lineop parport_ctrl_irq = {
.port = PORT_CTRL,
};
-static int __devinit i2c_parport_probe(struct platform_device *pdev)
+static int i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -169,7 +169,7 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit i2c_parport_remove(struct platform_device *pdev)
+static int i2c_parport_remove(struct platform_device *pdev)
{
if (ara) {
line_set(0, &parport_ctrl_irq);
@@ -191,7 +191,7 @@ static struct platform_driver i2c_parport_driver = {
.name = DRVNAME,
},
.probe = i2c_parport_probe,
- .remove = __devexit_p(i2c_parport_remove),
+ .remove = i2c_parport_remove,
};
static int __init i2c_parport_device_add(u16 address)
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 12edefd4183a..615f632c846f 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -340,7 +340,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = pasemi_smb_func,
};
-static int __devinit pasemi_smb_probe(struct pci_dev *dev,
+static int pasemi_smb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct pasemi_smbus *smbus;
@@ -392,7 +392,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
return error;
}
-static void __devexit pasemi_smb_remove(struct pci_dev *dev)
+static void pasemi_smb_remove(struct pci_dev *dev)
{
struct pasemi_smbus *smbus = pci_get_drvdata(dev);
@@ -412,7 +412,7 @@ static struct pci_driver pasemi_smb_driver = {
.name = "i2c-pasemi",
.id_table = pasemi_smb_ids,
.probe = pasemi_smb_probe,
- .remove = __devexit_p(pasemi_smb_remove),
+ .remove = pasemi_smb_remove,
};
module_pci_driver(pasemi_smb_driver);
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 29933f87d8fa..323f061a3163 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -119,7 +119,7 @@ static struct i2c_adapter pca_isa_ops = {
.timeout = HZ,
};
-static int __devinit pca_isa_match(struct device *dev, unsigned int id)
+static int pca_isa_match(struct device *dev, unsigned int id)
{
int match = base != 0;
@@ -132,7 +132,7 @@ static int __devinit pca_isa_match(struct device *dev, unsigned int id)
return match;
}
-static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
+static int pca_isa_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pca_wait);
@@ -174,7 +174,7 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
+static int pca_isa_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pca_isa_ops);
@@ -190,7 +190,7 @@ static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
static struct isa_driver pca_isa_driver = {
.match = pca_isa_match,
.probe = pca_isa_probe,
- .remove = __devexit_p(pca_isa_remove),
+ .remove = pca_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER,
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 675878f49f76..a30d2f613c03 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -131,7 +131,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
}
-static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
+static int i2c_pca_pf_probe(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c;
struct resource *res;
@@ -257,7 +257,7 @@ e_print:
return ret;
}
-static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
+static int i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
@@ -279,7 +279,7 @@ static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
static struct platform_driver i2c_pca_pf_driver = {
.probe = i2c_pca_pf_probe,
- .remove = __devexit_p(i2c_pca_pf_remove),
+ .remove = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8bbd6ece7c41..39ab78c1a02c 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(force_addr,
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
-static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
+static const struct dmi_system_id piix4_dmi_blacklist[] = {
{
.ident = "Sapphire AM2RD790",
.matches = {
@@ -119,7 +119,7 @@ static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
/* The IBM entry is in a separate table because we only check it
on Intel-based systems */
-static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
+static const struct dmi_system_id piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
@@ -131,8 +131,8 @@ struct i2c_piix4_adapdata {
unsigned short smba;
};
-static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
unsigned short piix4_smba;
@@ -204,9 +204,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
- dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
- "WARNING: SMBus interface has been "
- "FORCEFULLY ENABLED!\n");
+ dev_notice(&PIIX4_dev->dev,
+ "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
@@ -231,8 +230,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
@@ -295,9 +294,9 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_aux(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id,
- unsigned short base_reg_addr)
+static int piix4_setup_aux(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id,
+ unsigned short base_reg_addr)
{
/* Set up auxiliary SMBus controllers found on some
* AMD chipsets e.g. SP5100 (SB700 derivative) */
@@ -541,9 +540,8 @@ MODULE_DEVICE_TABLE (pci, piix4_ids);
static struct i2c_adapter *piix4_main_adapter;
static struct i2c_adapter *piix4_aux_adapter;
-static int __devinit piix4_add_adapter(struct pci_dev *dev,
- unsigned short smba,
- struct i2c_adapter **padap)
+static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
struct i2c_piix4_adapdata *adapdata;
@@ -589,8 +587,7 @@ static int __devinit piix4_add_adapter(struct pci_dev *dev,
return 0;
}
-static int __devinit piix4_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
@@ -627,7 +624,7 @@ static int __devinit piix4_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
+static void piix4_adap_remove(struct i2c_adapter *adap)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
@@ -639,7 +636,7 @@ static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
}
}
-static void __devexit piix4_remove(struct pci_dev *dev)
+static void piix4_remove(struct pci_dev *dev)
{
if (piix4_main_adapter) {
piix4_adap_remove(piix4_main_adapter);
@@ -656,7 +653,7 @@ static struct pci_driver piix4_driver = {
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
- .remove = __devexit_p(piix4_remove),
+ .remove = piix4_remove,
};
module_pci_driver(piix4_driver);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 3d71395ae1f7..083d68cfaf0b 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -270,7 +270,7 @@ static irqreturn_t pmcmsptwi_interrupt(int irq, void *ptr)
/*
* Probe for and register the device and return 0 if there is one.
*/
-static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
+static int pmcmsptwi_probe(struct platform_device *pldev)
{
struct resource *res;
int rc = -ENODEV;
@@ -368,7 +368,7 @@ ret_err:
/*
* Release the device and return 0 if there is one.
*/
-static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
+static int pmcmsptwi_remove(struct platform_device *pldev)
{
struct resource *res;
@@ -628,7 +628,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
- .remove = __devexit_p(pmcmsptwi_remove),
+ .remove = pmcmsptwi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 8488bddfe465..ce4097012e97 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -619,7 +619,7 @@ static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
#define PNX_I2C_PM NULL
#endif
-static int __devinit i2c_pnx_probe(struct platform_device *pdev)
+static int i2c_pnx_probe(struct platform_device *pdev)
{
unsigned long tmp;
int ret = 0;
@@ -765,7 +765,7 @@ err_kzalloc:
return ret;
}
-static int __devexit i2c_pnx_remove(struct platform_device *pdev)
+static int i2c_pnx_remove(struct platform_device *pdev)
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
@@ -797,7 +797,7 @@ static struct platform_driver i2c_pnx_driver = {
.pm = PNX_I2C_PM,
},
.probe = i2c_pnx_probe,
- .remove = __devexit_p(i2c_pnx_remove),
+ .remove = i2c_pnx_remove,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 5285f8565de4..0dd5b334d090 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -210,7 +210,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
};
-static int __devexit i2c_powermac_remove(struct platform_device *dev)
+static int i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
int rc;
@@ -227,7 +227,7 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
-static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
+static u32 i2c_powermac_get_addr(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
struct device_node *node)
{
@@ -255,7 +255,7 @@ static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
return 0xffffffff;
}
-static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
+static void i2c_powermac_create_one(struct i2c_adapter *adap,
const char *type,
u32 addr)
{
@@ -271,7 +271,7 @@ static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
type);
}
-static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
+static void i2c_powermac_add_missing(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
bool found_onyx)
{
@@ -297,7 +297,7 @@ static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
}
}
-static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
+static bool i2c_powermac_get_type(struct i2c_adapter *adap,
struct device_node *node,
u32 addr, char *type, int type_size)
{
@@ -336,7 +336,7 @@ static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
return false;
}
-static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
+static void i2c_powermac_register_devices(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus)
{
struct i2c_client *newdev;
@@ -403,7 +403,7 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
i2c_powermac_add_missing(adap, bus, found_onyx);
}
-static int __devinit i2c_powermac_probe(struct platform_device *dev)
+static int i2c_powermac_probe(struct platform_device *dev)
{
struct pmac_i2c_bus *bus = dev->dev.platform_data;
struct device_node *parent = NULL;
@@ -467,7 +467,7 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
- .remove = __devexit_p(i2c_powermac_remove),
+ .remove = i2c_powermac_remove,
.driver = {
.name = "i2c-powermac",
.bus = &platform_bus_type,
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index d8515be00b98..d7c512d717a7 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -184,7 +184,7 @@ static struct i2c_algorithm puv3_i2c_algorithm = {
/*
* Main initialization routine.
*/
-static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+static int puv3_i2c_probe(struct platform_device *pdev)
{
struct i2c_adapter *adapter;
struct resource *mem;
@@ -231,7 +231,7 @@ fail_nomem:
return rc;
}
-static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+static int puv3_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct resource *mem;
@@ -276,7 +276,7 @@ static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
- .remove = __devexit_p(puv3_i2c_remove),
+ .remove = puv3_i2c_remove,
.driver = {
.name = "PKUnity-v3-I2C",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 4dc9bef17d77..3d4985695aed 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -94,7 +94,7 @@ out:
return ERR_PTR(ret);
}
-static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
+static int ce4100_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
@@ -135,7 +135,7 @@ err_mem:
return ret;
}
-static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
+static void ce4100_i2c_remove(struct pci_dev *dev)
{
struct ce4100_devices *sds;
unsigned int i;
@@ -160,7 +160,7 @@ static struct pci_driver ce4100_i2c_driver = {
.name = "ce4100_i2c",
.id_table = ce4100_i2c_devices,
.probe = ce4100_i2c_probe,
- .remove = __devexit_p(ce4100_i2c_remove),
+ .remove = ce4100_i2c_remove,
};
module_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index f9399d163af2..4ba4a95b6b26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -613,7 +613,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.functionality = rcar_i2c_func,
};
-static int __devinit rcar_i2c_probe(struct platform_device *pdev)
+static int rcar_i2c_probe(struct platform_device *pdev)
{
struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
struct rcar_i2c_priv *priv;
@@ -642,11 +642,9 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->io = devm_ioremap(dev, res->start, resource_size(res));
- if (!priv->io) {
- dev_err(dev, "cannot ioremap\n");
- return -ENODEV;
- }
+ priv->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->io))
+ return PTR_ERR(priv->io);
priv->irq = platform_get_irq(pdev, 0);
init_waitqueue_head(&priv->wait);
@@ -682,7 +680,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rcar_i2c_remove(struct platform_device *pdev)
+static int rcar_i2c_remove(struct platform_device *pdev)
{
struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -693,16 +691,16 @@ static int __devexit rcar_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver rcar_i2c_drv = {
+static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.owner = THIS_MODULE,
},
.probe = rcar_i2c_probe,
- .remove = __devexit_p(rcar_i2c_remove),
+ .remove = rcar_i2c_remove,
};
-module_platform_driver(rcar_i2c_drv);
+module_platform_driver(rcar_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas R-Car I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b33d95ebc890..c807a6d14f0c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -38,6 +38,7 @@
#include <linux/io.h>
#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/irq.h>
@@ -49,6 +50,9 @@
#define QUIRK_HDMIPHY (1 << 1)
#define QUIRK_NO_GPIO (1 << 2)
+/* Max time to wait for bus to become idle after a xfer (in us) */
+#define S3C2410_IDLE_TIMEOUT 5000
+
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
@@ -59,7 +63,6 @@ enum s3c24xx_i2c_state {
};
struct s3c24xx_i2c {
- spinlock_t lock;
wait_queue_head_t wait;
unsigned int quirks;
unsigned int suspended:1;
@@ -78,11 +81,11 @@ struct s3c24xx_i2c {
void __iomem *regs;
struct clk *clk;
struct device *dev;
- struct resource *ioarea;
struct i2c_adapter adap;
struct s3c2410_platform_i2c *pdata;
int gpios[2];
+ struct pinctrl *pctrl;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -235,8 +238,47 @@ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
dev_dbg(i2c->dev, "STOP\n");
- /* stop the transfer */
- iicstat &= ~S3C2410_IICSTAT_START;
+ /*
+ * The datasheet says that the STOP sequence should be:
+ * 1) I2CSTAT.5 = 0 - Clear BUSY (or 'generate STOP')
+ * 2) I2CCON.4 = 0 - Clear IRQPEND
+ * 3) Wait until the stop condition takes effect.
+ * 4*) I2CSTAT.4 = 0 - Clear TXRXEN
+ *
+ * Where, step "4*" is only for buses with the "HDMIPHY" quirk.
+ *
+ * However, after much experimentation, it appears that:
+ * a) normal buses automatically clear BUSY and transition from
+ * Master->Slave when they complete generating a STOP condition.
+ * Therefore, step (3) can be done in doxfer() by polling I2CCON.4
+ * after starting the STOP generation here.
+ * b) HDMIPHY bus does neither, so there is no way to do step 3.
+ * There is no indication when this bus has finished generating
+ * STOP.
+ *
+ * In fact, we have found that as soon as the IRQPEND bit is cleared in
+ * step 2, the HDMIPHY bus generates the STOP condition, and then
+ * immediately starts transferring another data byte, even though the
+ * bus is supposedly stopped. This is presumably because the bus is
+ * still in "Master" mode, and its BUSY bit is still set.
+ *
+ * To avoid these extra post-STOP transactions on HDMI phy devices, we
+ * just disable Serial Output on the bus (I2CSTAT.4 = 0) directly,
+ * instead of first generating a proper STOP condition. This should
+ * float SDA & SCK terminating the transfer. Subsequent transfers
+ * start with a proper START condition, and proceed normally.
+ *
+ * The HDMIPHY bus is an internal bus that always has exactly two
+ * devices, the host as Master and the HDMIPHY device as the slave.
+ * Skipping the STOP condition has been tested on this bus and works.
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ /* Stop driving the I2C pins */
+ iicstat &= ~S3C2410_IICSTAT_TXRXEN;
+ } else {
+ /* stop the transfer */
+ iicstat &= ~S3C2410_IICSTAT_START;
+ }
writel(iicstat, i2c->regs + S3C2410_IICSTAT);
i2c->state = STATE_STOP;
@@ -490,13 +532,6 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
- /* the timeout for HDMIPHY is reduced to 10 ms because
- * the hangup is expected to happen, so waiting 400 ms
- * causes only unnecessary system hangup
- */
- if (i2c->quirks & QUIRK_HDMIPHY)
- timeout = 10;
-
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -506,16 +541,61 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
- /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
- if (i2c->quirks & QUIRK_HDMIPHY) {
- writel(0, i2c->regs + S3C2410_IICCON);
- writel(0, i2c->regs + S3C2410_IICSTAT);
- writel(0, i2c->regs + S3C2410_IICDS);
+ return -ETIMEDOUT;
+}
- return 0;
+/* s3c24xx_i2c_wait_idle
+ *
+ * wait for the i2c bus to become idle.
+*/
+
+static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+{
+ unsigned long iicstat;
+ ktime_t start, now;
+ unsigned long delay;
+ int spins;
+
+ /* ensure the stop has been through the bus */
+
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ start = now = ktime_get();
+
+ /*
+ * Most of the time, the bus is already idle within a few usec of the
+ * end of a transaction. However, really slow i2c devices can stretch
+ * the clock, delaying STOP generation.
+ *
+ * On slower SoCs this typically happens within a very small number of
+ * instructions so busy wait briefly to avoid scheduling overhead.
+ */
+ spins = 3;
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ while ((iicstat & S3C2410_IICSTAT_START) && --spins) {
+ cpu_relax();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
}
- return -ETIMEDOUT;
+ /*
+ * If we do get an appreciable delay as a compromise between idle
+ * detection latency for the normal, fast case, and system load in the
+ * slow device case, use an exponential back off in the polling loop,
+ * up to 1/10th of the total timeout, then continue to poll at a
+ * constant rate up to the timeout.
+ */
+ delay = 1;
+ while ((iicstat & S3C2410_IICSTAT_START) &&
+ ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) {
+ usleep_range(delay, 2 * delay);
+ if (delay < S3C2410_IDLE_TIMEOUT / 10)
+ delay <<= 1;
+ now = ktime_get();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
}
/* s3c24xx_i2c_doxfer
@@ -526,8 +606,7 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long iicstat, timeout;
- int spins = 20;
+ unsigned long timeout;
int ret;
if (i2c->suspended)
@@ -540,8 +619,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
goto out;
}
- spin_lock_irq(&i2c->lock);
-
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
@@ -550,7 +627,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
- spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
@@ -564,24 +640,11 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
- /* ensure the stop has been through the bus */
-
- dev_dbg(i2c->dev, "waiting for bus idle\n");
-
- /* first, try busy waiting briefly */
- do {
- cpu_relax();
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
-
- /* if that timed out sleep */
- if (!spins) {
- msleep(1);
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- }
+ /* For QUIRK_HDMIPHY, bus is already disabled */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ goto out;
- if (iicstat & S3C2410_IICSTAT_START)
- dev_warn(i2c->dev, "timeout waiting for bus idle\n");
+ s3c24xx_i2c_wait_idle(i2c);
out:
return ret;
@@ -740,7 +803,6 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
- unsigned long flags;
unsigned int got;
int delta_f;
int ret;
@@ -754,9 +816,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- spin_lock_irqsave(&i2c->lock, flags);
+ i2c_lock_adapter(&i2c->adap);
ret = s3c24xx_i2c_clockrate(i2c, &got);
- spin_unlock_irqrestore(&i2c->lock, flags);
+ i2c_unlock_adapter(&i2c->adap);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency\n");
@@ -858,14 +920,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
pdata = i2c->pdata;
- /* inititalise the gpio */
-
- if (pdata->cfg_gpio)
- pdata->cfg_gpio(to_platform_device(i2c->dev));
- else
- if (s3c24xx_i2c_parse_dt_gpio(i2c))
- return -EINVAL;
-
/* write slave address */
writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
@@ -963,7 +1017,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
i2c->tx_setup = 50;
- spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
@@ -989,36 +1042,37 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, res);
- if (i2c->ioarea == NULL) {
- dev_err(&pdev->dev, "cannot request IO\n");
- ret = -ENXIO;
+ if (IS_ERR(i2c->regs)) {
+ ret = PTR_ERR(i2c->regs);
goto err_clk;
}
- i2c->regs = ioremap(res->start, resource_size(res));
-
- if (i2c->regs == NULL) {
- dev_err(&pdev->dev, "cannot map IO\n");
- ret = -ENXIO;
- goto err_ioarea;
- }
-
- dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
- i2c->regs, i2c->ioarea, res);
+ dev_dbg(&pdev->dev, "registers %p (%p)\n",
+ i2c->regs, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
+ i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
+
+ /* inititalise the i2c gpio lines */
+
+ if (i2c->pdata->cfg_gpio) {
+ i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
+ } else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
/* initialise the i2c controller */
ret = s3c24xx_i2c_init(i2c);
if (ret != 0)
- goto err_iomap;
+ goto err_clk;
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
@@ -1027,7 +1081,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto err_iomap;
+ goto err_clk;
}
ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
@@ -1035,7 +1089,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- goto err_iomap;
+ goto err_clk;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
@@ -1075,13 +1129,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
err_irq:
free_irq(i2c->irq, i2c);
- err_iomap:
- iounmap(i2c->regs);
-
- err_ioarea:
- release_resource(i2c->ioarea);
- kfree(i2c->ioarea);
-
err_clk:
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
@@ -1110,16 +1157,13 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
- iounmap(i2c->regs);
-
- release_resource(i2c->ioarea);
- s3c24xx_i2c_dt_gpio_free(i2c);
- kfree(i2c->ioarea);
+ if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
+ s3c24xx_i2c_dt_gpio_free(i2c);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1142,10 +1186,14 @@ static int s3c24xx_i2c_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
+#endif
};
#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index b76a29d1f8e4..008836409efe 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -248,7 +248,7 @@ static struct i2c_algorithm s6i2c_algorithm = {
.functionality = s6i2c_functionality,
};
-static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
+static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
{
u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000;
if (dividend > 0xffff)
@@ -256,7 +256,7 @@ static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
return dividend;
}
-static int __devinit s6i2c_probe(struct platform_device *dev)
+static int s6i2c_probe(struct platform_device *dev)
{
struct s6i2c_if *iface = &s6i2c_if;
struct i2c_adapter *p_adap;
@@ -361,7 +361,7 @@ err_out:
return rc;
}
-static int __devexit s6i2c_remove(struct platform_device *pdev)
+static int s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
@@ -378,7 +378,7 @@ static int __devexit s6i2c_remove(struct platform_device *pdev)
static struct platform_driver s6i2c_driver = {
.probe = s6i2c_probe,
- .remove = __devexit_p(s6i2c_remove),
+ .remove = s6i2c_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 6aafa3d88ff0..c447e8d40b78 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -406,7 +406,7 @@ err:
return -EIO;
}
-static int acpi_smbus_cmi_remove(struct acpi_device *device, int type)
+static int acpi_smbus_cmi_remove(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c0c9dffbdb12..3a2253e1bf59 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -390,7 +390,7 @@ static const struct i2c_algorithm sh7760_i2c_algo = {
* iclk = mclk/(CDF + 1). iclk must be < 20MHz.
* scl = iclk/(SCGD*8 + 20).
*/
-static int __devinit calc_CCR(unsigned long scl_hz)
+static int calc_CCR(unsigned long scl_hz)
{
struct clk *mclk;
unsigned long mck, m1, dff, odff, iclk;
@@ -430,7 +430,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
return ((scgdm << 2) | cdfm);
}
-static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
+static int sh7760_i2c_probe(struct platform_device *pdev)
{
struct sh7760_i2c_platdata *pd;
struct resource *res;
@@ -536,7 +536,7 @@ out0:
return ret;
}
-static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
+static int sh7760_i2c_remove(struct platform_device *pdev)
{
struct cami2c *id = platform_get_drvdata(pdev);
@@ -557,7 +557,7 @@ static struct platform_driver sh7760_i2c_drv = {
.owner = THIS_MODULE,
},
.probe = sh7760_i2c_probe,
- .remove = __devexit_p(sh7760_i2c_remove),
+ .remove = sh7760_i2c_remove,
};
module_platform_driver(sh7760_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8110ca45f342..b6e7a83a8296 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -120,11 +120,12 @@ struct sh_mobile_i2c_data {
void __iomem *reg;
struct i2c_adapter adap;
unsigned long bus_speed;
+ unsigned int clks_per_count;
struct clk *clk;
u_int8_t icic;
- u_int8_t iccl;
- u_int8_t icch;
u_int8_t flags;
+ u_int16_t iccl;
+ u_int16_t icch;
spinlock_t lock;
wait_queue_head_t wait;
@@ -135,7 +136,8 @@ struct sh_mobile_i2c_data {
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
-#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
+#define STANDARD_MODE 100000
+#define FAST_MODE 400000
/* Register offsets */
#define ICDR 0x00
@@ -187,57 +189,90 @@ static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
}
-static void activate_ch(struct sh_mobile_i2c_data *pd)
+static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf, int offset)
{
- unsigned long i2c_clk;
- u_int32_t num;
- u_int32_t denom;
- u_int32_t tmp;
-
- /* Wake up device and enable clock */
- pm_runtime_get_sync(pd->dev);
- clk_enable(pd->clk);
-
- /* Get clock rate after clock is enabled */
- i2c_clk = clk_get_rate(pd->clk);
+ /*
+ * Conditional expression:
+ * ICCL >= COUNT_CLK * (tLOW + tf)
+ *
+ * SH-Mobile IIC hardware starts counting the LOW period of
+ * the SCL signal (tLOW) as soon as it pulls the SCL line.
+ * In order to meet the tLOW timing spec, we need to take into
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+ return (((count_khz * (tLOW + tf)) + 5000) / 10000) + offset;
+}
- /* Calculate the value for iccl. From the data sheet:
- * iccl = (p clock / transfer rate) * (L / (L + H))
- * where L and H are the SCL low/high ratio (5/4 in this case).
- * We also round off the result.
+static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf, int offset)
+{
+ /*
+ * Conditional expression:
+ * ICCH >= COUNT_CLK * (tHIGH + tf)
+ *
+ * SH-Mobile IIC hardware is aware of SCL transition period 'tr',
+ * and can ignore it. SH-Mobile IIC controller starts counting
+ * the HIGH period of the SCL signal (tHIGH) after the SCL input
+ * voltage increases at VIH.
+ *
+ * Afterward it turned out calculating ICCH using only tHIGH spec
+ * will result in violation of the tHD;STA timing spec. We need
+ * to take into account the fall time of SDA signal (tf) at START
+ * condition, in order to meet both tHIGH and tHD;STA specs.
*/
- num = i2c_clk * 5;
- denom = pd->bus_speed * 9;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->iccl = (u_int8_t)((num/denom) + 1);
- else
- pd->iccl = (u_int8_t)(num/denom);
+ return (((count_khz * (tHIGH + tf)) + 5000) / 10000) + offset;
+}
- /* one more bit of ICCL in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCLB8;
- else
- pd->icic &= ~ICIC_ICCLB8;
+static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
+{
+ unsigned long i2c_clk_khz;
+ u32 tHIGH, tLOW, tf;
+ int offset;
+
+ /* Get clock rate after clock is enabled */
+ clk_enable(pd->clk);
+ i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
+ i2c_clk_khz /= pd->clks_per_count;
+
+ if (pd->bus_speed == STANDARD_MODE) {
+ tLOW = 47; /* tLOW = 4.7 us */
+ tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else if (pd->bus_speed == FAST_MODE) {
+ tLOW = 13; /* tLOW = 1.3 us */
+ tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else {
+ dev_err(pd->dev, "unrecognized bus speed %lu Hz\n",
+ pd->bus_speed);
+ goto out;
}
- /* Calculate the value for icch. From the data sheet:
- icch = (p clock / transfer rate) * (H / (L + H)) */
- num = i2c_clk * 4;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->icch = (u_int8_t)((num/denom) + 1);
+ pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf, offset);
+ /* one more bit of ICCL in ICIC */
+ if ((pd->iccl > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCLB8;
else
- pd->icch = (u_int8_t)(num/denom);
+ pd->icic &= ~ICIC_ICCLB8;
+ pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf, offset);
/* one more bit of ICCH in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCHB8;
- else
- pd->icic &= ~ICIC_ICCHB8;
- }
+ if ((pd->icch > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCHB8;
+ else
+ pd->icic &= ~ICIC_ICCHB8;
+
+out:
+ clk_disable(pd->clk);
+}
+
+static void activate_ch(struct sh_mobile_i2c_data *pd)
+{
+ /* Wake up device and enable clock */
+ pm_runtime_get_sync(pd->dev);
+ clk_enable(pd->clk);
/* Enable channel and configure rx ack */
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -246,8 +281,8 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
iic_wr(pd, ICIC, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
}
static void deactivate_ch(struct sh_mobile_i2c_data *pd)
@@ -434,6 +469,9 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
wake_up(&pd->wait);
}
+ /* defeat write posting to avoid spurious WAIT interrupts */
+ iic_rd(pd, ICSR);
+
return IRQ_HANDLED;
}
@@ -451,8 +489,8 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
pd->msg = usr_msg;
pd->pos = -1;
@@ -621,10 +659,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_irq;
}
- /* Use platformd data bus speed or NORMAL_SPEED */
- pd->bus_speed = NORMAL_SPEED;
+ /* Use platform data bus speed or STANDARD_MODE */
+ pd->bus_speed = STANDARD_MODE;
if (pdata && pdata->bus_speed)
pd->bus_speed = pdata->bus_speed;
+ pd->clks_per_count = 1;
+ if (pdata && pdata->clks_per_count)
+ pd->clks_per_count = pdata->clks_per_count;
/* The IIC blocks on SH-Mobile ARM processors
* come with two new bits in ICIC.
@@ -632,6 +673,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
if (size > 0x17)
pd->flags |= IIC_FLAG_HAS_ICIC67;
+ sh_mobile_i2c_init(pd);
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
@@ -667,8 +710,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_all;
}
- dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
- adap->nr, pd->bus_speed);
+ dev_info(&dev->dev,
+ "I2C adapter %d with bus speed %lu Hz (L/H=%x/%x)\n",
+ adap->nr, pd->bus_speed, pd->iccl, pd->icch);
of_i2c_register_devices(adap);
return 0;
@@ -714,7 +758,7 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
-static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,rmobile-iic", },
{},
};
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 5574a47792fb..5a7ad240bd26 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -258,7 +259,7 @@ static const struct i2c_algorithm i2c_sirfsoc_algo = {
.functionality = i2c_sirfsoc_func,
};
-static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
+static int i2c_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_i2c *siic;
struct i2c_adapter *adap;
@@ -308,10 +309,9 @@ static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
goto out;
}
- siic->base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (siic->base == NULL) {
- dev_err(&pdev->dev, "IO remap failed!\n");
- err = -ENOMEM;
+ siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(siic->base)) {
+ err = PTR_ERR(siic->base);
goto out;
}
@@ -328,6 +328,7 @@ static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
adap->algo = &i2c_sirfsoc_algo;
adap->algo_data = siic;
+ adap->dev.of_node = pdev->dev.of_node;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
@@ -371,6 +372,8 @@ static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
clk_disable(clk);
+ of_i2c_register_devices(adap);
+
dev_info(&pdev->dev, " I2C adapter ready to operate\n");
return 0;
@@ -385,7 +388,7 @@ err_get_clk:
return err;
}
-static int __devexit i2c_sirfsoc_remove(struct platform_device *pdev)
+static int i2c_sirfsoc_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct sirfsoc_i2c *siic = adapter->algo_data;
@@ -433,7 +436,7 @@ static const struct dev_pm_ops i2c_sirfsoc_pm_ops = {
};
#endif
-static const struct of_device_id sirfsoc_i2c_of_match[] __devinitconst = {
+static const struct of_device_id sirfsoc_i2c_of_match[] = {
{ .compatible = "sirf,prima2-i2c", },
{},
};
@@ -449,7 +452,7 @@ static struct platform_driver i2c_sirfsoc_driver = {
.of_match_table = sirfsoc_i2c_of_match,
},
.probe = i2c_sirfsoc_probe,
- .remove = __devexit_p(i2c_sirfsoc_remove),
+ .remove = i2c_sirfsoc_remove,
};
module_platform_driver(i2c_sirfsoc_driver);
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 87e5126d449c..79fd96a04386 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -142,7 +142,7 @@ static void sis5595_write(u8 reg, u8 data)
outb(data, sis5595_base + SMB_DAT);
}
-static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+static int sis5595_setup(struct pci_dev *SIS5595_dev)
{
u16 a;
u8 val;
@@ -376,7 +376,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
MODULE_DEVICE_TABLE (pci, sis5595_ids);
-static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 5d6723b7525e..de6dddb9f865 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -389,7 +389,7 @@ static u32 sis630_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_BLOCK_DATA;
}
-static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+static int sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
@@ -480,7 +480,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
MODULE_DEVICE_TABLE (pci, sis630_ids);
-static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
@@ -496,7 +496,7 @@ static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_i
return i2c_add_adapter(&sis630_adapter);
}
-static void __devexit sis630_remove(struct pci_dev *dev)
+static void sis630_remove(struct pci_dev *dev)
{
if (acpi_base) {
i2c_del_adapter(&sis630_adapter);
@@ -510,7 +510,7 @@ static struct pci_driver sis630_driver = {
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
- .remove = __devexit_p(sis630_remove),
+ .remove = sis630_remove,
};
module_pci_driver(sis630_driver);
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 7b72614a9bc0..b9faf9b6002b 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -252,7 +252,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
MODULE_DEVICE_TABLE (pci, sis96x_ids);
-static int __devinit sis96x_probe(struct pci_dev *dev,
+static int sis96x_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 ww = 0;
@@ -308,7 +308,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
return retval;
}
-static void __devexit sis96x_remove(struct pci_dev *dev)
+static void sis96x_remove(struct pci_dev *dev)
{
if (sis96x_smbus_base) {
i2c_del_adapter(&sis96x_adapter);
@@ -321,7 +321,7 @@ static struct pci_driver sis96x_driver = {
.name = "sis96x_smbus",
.id_table = sis96x_ids,
.probe = sis96x_probe,
- .remove = __devexit_p(sis96x_remove),
+ .remove = sis96x_remove,
};
module_pci_driver(sis96x_driver);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 580a0c04cb42..60195b590637 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -888,11 +888,11 @@ stu300_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- dev->virtbase = devm_request_and_ioremap(&pdev->dev, res);
+ dev->virtbase = devm_ioremap_resource(&pdev->dev, res);
dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
"base %p\n", bus_nr, dev->virtbase);
- if (!dev->virtbase)
- return -ENOMEM;
+ if (IS_ERR(dev->virtbase))
+ return PTR_ERR(dev->virtbase);
dev->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index dcea77bf6f50..f0d9923323ea 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -29,11 +29,10 @@
#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/clk/tegra.h>
#include <asm/unaligned.h>
-#include <mach/clk.h>
-
#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
#define BYTES_PER_FIFO_WORD 4
@@ -642,7 +641,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
@@ -651,7 +650,7 @@ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
#endif
-static int __devinit tegra_i2c_probe(struct platform_device *pdev)
+static int tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -669,11 +668,9 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base) {
- dev_err(&pdev->dev, "Cannot request/ioremap I2C registers\n");
- return -EADDRNOTAVAIL;
- }
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -769,7 +766,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tegra_i2c_remove(struct platform_device *pdev)
+static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -817,7 +814,7 @@ static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
- .remove = __devexit_p(tegra_i2c_remove),
+ .remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ffee71ca190..be662511c58b 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -96,7 +96,7 @@ static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
MODULE_DEVICE_TABLE (pci, vt586b_ids);
-static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u16 base;
u8 rev;
@@ -146,7 +146,7 @@ static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_i
return 0;
}
-static void __devexit vt586b_remove(struct pci_dev *dev)
+static void vt586b_remove(struct pci_dev *dev)
{
i2c_del_adapter(&vt586b_adapter);
release_region(I2C_DIR, IOSPACE);
@@ -158,7 +158,7 @@ static struct pci_driver vt586b_driver = {
.name = "vt586b_smbus",
.id_table = vt586b_ids,
.probe = vt586b_probe,
- .remove = __devexit_p(vt586b_remove),
+ .remove = vt586b_remove,
};
module_pci_driver(vt586b_driver);
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 271c9a2b0fd7..b2d90e105f41 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -320,8 +320,8 @@ static struct i2c_adapter vt596_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit vt596_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int vt596_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
unsigned char temp;
int error;
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
new file mode 100644
index 000000000000..f45c32c1ace6
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -0,0 +1,480 @@
+/*
+ * Nano River Technologies viperboard i2c master driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/viperboard.h>
+
+struct vprbrd_i2c {
+ struct i2c_adapter i2c;
+ u8 bus_freq_param;
+};
+
+/* i2c bus frequency module parameter */
+static u8 i2c_bus_param;
+static unsigned int i2c_bus_freq = 100;
+module_param(i2c_bus_freq, int, 0);
+MODULE_PARM_DESC(i2c_bus_freq,
+ "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000");
+
+static int vprbrd_i2c_status(struct i2c_adapter *i2c,
+ struct vprbrd_i2c_status *status, bool prev_error)
+{
+ u16 bytes_xfer;
+ int ret;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+
+ /* check for protocol error */
+ bytes_xfer = sizeof(struct vprbrd_i2c_status);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000,
+ status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != bytes_xfer)
+ prev_error = true;
+
+ if (prev_error) {
+ dev_err(&i2c->dev, "failure in usb communication\n");
+ return -EREMOTEIO;
+ }
+
+ dev_dbg(&i2c->dev, " status = %d\n", status->status);
+ if (status->status != 0x00) {
+ dev_err(&i2c->dev, "failure: i2c protocol error\n");
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_receive(struct usb_device *usb_dev,
+ struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer)
+{
+ int ret, bytes_actual;
+ int error = 0;
+
+ /* send the read request */
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg,
+ sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0)
+ || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ error = -EREMOTEIO;
+ }
+
+ /* read the actual data */
+ ret = usb_bulk_msg(usb_dev,
+ usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) || (bytes_xfer != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure receiving usb\n");
+ error = -EREMOTEIO;
+ }
+ return error;
+}
+
+static int vprbrd_i2c_addr(struct usb_device *usb_dev,
+ struct vprbrd_i2c_addr_msg *amsg)
+{
+ int ret, bytes_actual;
+
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg,
+ sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) ||
+ (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret;
+ u16 remain_len, bytes_xfer, len1, len2,
+ start = 0x0000;
+ struct vprbrd_i2c_read_msg *rmsg =
+ (struct vprbrd_i2c_read_msg *)vb->buf;
+
+ remain_len = msg->len;
+ rmsg->header.cmd = VPRBRD_I2C_CMD_READ;
+ while (remain_len > 0) {
+ rmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len <= 255) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len;
+ rmsg->header.len1 = 0x00;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 510) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 255;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 512) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 510;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 767) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 512;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ bytes_xfer = remain_len;
+ remain_len = 0;
+ } else if (remain_len <= 1022) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 767;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 1024) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 1022;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len = 0;
+ } else {
+ len1 = 512;
+ len2 = 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x02;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len -= 1024;
+ start += 1024;
+ }
+ rmsg->header.tf1 = cpu_to_le16(len1);
+ rmsg->header.tf2 = cpu_to_le16(len2);
+
+ /* first read transfer */
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start, rmsg, len1);
+
+ /* second read transfer if neccessary */
+ if (len2 > 0) {
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start + 512, rmsg, len2);
+ }
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret, bytes_actual;
+ u16 remain_len, bytes_xfer,
+ start = 0x0000;
+ struct vprbrd_i2c_write_msg *wmsg =
+ (struct vprbrd_i2c_write_msg *)vb->buf;
+
+ remain_len = msg->len;
+ wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE;
+ wmsg->header.last = 0x00;
+ wmsg->header.chan = 0x00;
+ wmsg->header.spi = 0x0000;
+ while (remain_len > 0) {
+ wmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len > 503) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = 0xf8;
+ remain_len -= 503;
+ bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr);
+ start += 503;
+ } else if (remain_len > 255) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = (remain_len - 255);
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ } else {
+ wmsg->header.len1 = remain_len;
+ wmsg->header.len2 = 0x00;
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ }
+ memcpy(wmsg->data, msg->buf + start,
+ bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr));
+
+ ret = usb_bulk_msg(vb->usb_dev,
+ usb_sndbulkpipe(vb->usb_dev,
+ VPRBRD_EP_OUT), wmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+ if ((ret < 0) || (bytes_xfer != bytes_actual))
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_msg *pmsg;
+ int i, ret,
+ error = 0;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+ struct vprbrd_i2c_addr_msg *amsg =
+ (struct vprbrd_i2c_addr_msg *)vb->buf;
+ struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf;
+
+ dev_dbg(&i2c->dev, "master xfer %d messages:\n", num);
+
+ for (i = 0 ; i < num ; i++) {
+ pmsg = &msgs[i];
+
+ dev_dbg(&i2c->dev,
+ " %d: %s (flags %d) %d bytes to 0x%02x\n",
+ i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ pmsg->flags, pmsg->len, pmsg->addr);
+
+ /* msgs longer than 2048 bytes are not supported by adapter */
+ if (pmsg->len > 2048)
+ return -EINVAL;
+
+ mutex_lock(&vb->lock);
+ /* directly send the message */
+ if (pmsg->flags & I2C_M_RD) {
+ /* read data */
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x01;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr and len, we're interested to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_read(vb, pmsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+ /* in case of protocol error, return the error */
+ if (error < 0)
+ goto error;
+ } else {
+ /* write data */
+ ret = vprbrd_i2c_write(vb, pmsg);
+
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x00;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr, the data goes to to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+
+ if (error < 0)
+ goto error;
+ }
+ mutex_unlock(&vb->lock);
+ }
+ return 0;
+error:
+ mutex_unlock(&vb->lock);
+ return error;
+}
+
+static u32 vprbrd_i2c_func(struct i2c_adapter *i2c)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+/* This is the actual algorithm we define */
+static const struct i2c_algorithm vprbrd_algorithm = {
+ .master_xfer = vprbrd_i2c_xfer,
+ .functionality = vprbrd_i2c_func,
+};
+
+static int vprbrd_i2c_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_i2c *vb_i2c;
+ int ret;
+ int pipe;
+
+ vb_i2c = kzalloc(sizeof(*vb_i2c), GFP_KERNEL);
+ if (vb_i2c == NULL)
+ return -ENOMEM;
+
+ /* setup i2c adapter description */
+ vb_i2c->i2c.owner = THIS_MODULE;
+ vb_i2c->i2c.class = I2C_CLASS_HWMON;
+ vb_i2c->i2c.algo = &vprbrd_algorithm;
+ vb_i2c->i2c.algo_data = vb;
+ /* save the param in usb capabable memory */
+ vb_i2c->bus_freq_param = i2c_bus_param;
+
+ snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name),
+ "viperboard at bus %03d device %03d",
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ /* setting the bus frequency */
+ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ)
+ && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) {
+ pipe = usb_sndctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe,
+ VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != 1) {
+ dev_err(&pdev->dev,
+ "failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+
+ vb_i2c->i2c.dev.parent = &pdev->dev;
+
+ /* attach to i2c layer */
+ i2c_add_adapter(&vb_i2c->i2c);
+
+ platform_set_drvdata(pdev, vb_i2c);
+
+ return 0;
+
+error:
+ kfree(vb_i2c);
+ return ret;
+}
+
+static int vprbrd_i2c_remove(struct platform_device *pdev)
+{
+ struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&vb_i2c->i2c);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_i2c_driver = {
+ .driver.name = "viperboard-i2c",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_i2c_probe,
+ .remove = vprbrd_i2c_remove,
+};
+
+static int __init vprbrd_i2c_init(void)
+{
+ switch (i2c_bus_freq) {
+ case 6000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ;
+ break;
+ case 3000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ;
+ break;
+ case 1000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ;
+ break;
+ case 400:
+ i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ;
+ break;
+ case 200:
+ i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ;
+ break;
+ case 100:
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ break;
+ case 10:
+ i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ;
+ break;
+ default:
+ pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq);
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_i2c_driver);
+}
+subsys_initcall(vprbrd_i2c_init);
+
+static void __exit vprbrd_i2c_exit(void)
+{
+ platform_driver_unregister(&vprbrd_i2c_driver);
+}
+module_exit(vprbrd_i2c_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-i2c");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 641d0e5e3303..f042f6da0ace 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -689,7 +689,7 @@ static struct i2c_adapter xiic_adapter = {
};
-static int __devinit xiic_i2c_probe(struct platform_device *pdev)
+static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
struct xiic_i2c_platform_data *pdata;
@@ -774,7 +774,7 @@ resource_missing:
return -ENOENT;
}
-static int __devexit xiic_i2c_remove(struct platform_device* pdev)
+static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
struct resource *res;
@@ -800,7 +800,7 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
}
#if defined(CONFIG_OF)
-static const struct of_device_id xiic_of_match[] __devinitconst = {
+static const struct of_device_id xiic_of_match[] = {
{ .compatible = "xlnx,xps-iic-2.00.a", },
{},
};
@@ -809,7 +809,7 @@ MODULE_DEVICE_TABLE(of, xiic_of_match);
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
- .remove = __devexit_p(xiic_i2c_remove),
+ .remove = xiic_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 96d3fabd8883..93f029e98c0d 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -7,6 +7,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -214,7 +215,7 @@ static struct i2c_algorithm xlr_i2c_algo = {
.functionality = xlr_func,
};
-static int __devinit xlr_i2c_probe(struct platform_device *pdev)
+static int xlr_i2c_probe(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
struct resource *res;
@@ -225,11 +226,9 @@ static int __devinit xlr_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->iobase) {
- dev_err(&pdev->dev, "devm_request_and_ioremap failed\n");
- return -EBUSY;
- }
+ priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
priv->adap.dev.parent = &pdev->dev;
priv->adap.owner = THIS_MODULE;
@@ -251,7 +250,7 @@ static int __devinit xlr_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit xlr_i2c_remove(struct platform_device *pdev)
+static int xlr_i2c_remove(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
@@ -263,7 +262,7 @@ static int __devexit xlr_i2c_remove(struct platform_device *pdev)
static struct platform_driver xlr_i2c_driver = {
.probe = xlr_i2c_probe,
- .remove = __devexit_p(xlr_i2c_remove),
+ .remove = xlr_i2c_remove,
.driver = {
.name = "xlr-i2cbus",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 08aab57337dd..3862a953239c 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -389,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
static struct scx200_acb_iface *scx200_acb_list;
static DEFINE_MUTEX(scx200_acb_list_mutex);
-static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
+static int scx200_acb_probe(struct scx200_acb_iface *iface)
{
u8 val;
@@ -424,7 +424,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
return 0;
}
-static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
+static struct scx200_acb_iface *scx200_create_iface(const char *text,
struct device *dev, int index)
{
struct scx200_acb_iface *iface;
@@ -449,7 +449,7 @@ static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
return iface;
}
-static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
+static int scx200_acb_create(struct scx200_acb_iface *iface)
{
struct i2c_adapter *adapter;
int rc;
@@ -480,7 +480,7 @@ static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
return 0;
}
-static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+static struct scx200_acb_iface *scx200_create_dev(const char *text,
unsigned long base, int index, struct device *dev)
{
struct scx200_acb_iface *iface;
@@ -508,7 +508,7 @@ static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
return NULL;
}
-static int __devinit scx200_probe(struct platform_device *pdev)
+static int scx200_probe(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
struct resource *res;
@@ -530,14 +530,14 @@ static int __devinit scx200_probe(struct platform_device *pdev)
return 0;
}
-static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+static void scx200_cleanup_iface(struct scx200_acb_iface *iface)
{
i2c_del_adapter(&iface->adapter);
release_region(iface->base, 8);
kfree(iface);
}
-static int __devexit scx200_remove(struct platform_device *pdev)
+static int scx200_remove(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
@@ -554,7 +554,7 @@ static struct platform_driver scx200_pci_driver = {
.owner = THIS_MODULE,
},
.probe = scx200_probe,
- .remove = __devexit_p(scx200_remove),
+ .remove = scx200_remove,
};
static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index a0edd9854218..0be5b83c08fa 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -19,7 +19,6 @@ config I2C_MUX_GPIO
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
- depends on EXPERIMENTAL
help
If you say yes here you get support for the NXP PCA9541
I2C Master Selector.
@@ -29,7 +28,6 @@ config I2C_MUX_PCA9541
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
- depends on EXPERIMENTAL
help
If you say yes here you get support for the Philips PCA954x
I2C mux/switch devices.
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 566a6757a33d..9f50ef04a4bd 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/of_i2c.h>
+#include <linux/of_gpio.h>
struct gpiomux {
struct i2c_adapter *parent;
@@ -51,35 +53,116 @@ static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
return 0;
}
-static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip,
+static int match_gpio_chip_by_label(struct gpio_chip *chip,
void *data)
{
return !strcmp(chip->label, data);
}
-static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *adapter_np, *child;
+ struct i2c_adapter *adapter;
+ unsigned *values, *gpios;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(&pdev->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->data.parent = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ mux->data.n_values = of_get_child_count(np);
+
+ values = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.values) * mux->data.n_values,
+ GFP_KERNEL);
+ if (!values) {
+ dev_err(&pdev->dev, "Cannot allocate values array");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ of_property_read_u32(child, "reg", values + i);
+ i++;
+ }
+ mux->data.values = values;
+
+ if (of_property_read_u32(np, "idle-state", &mux->data.idle))
+ mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
+
+ mux->data.n_gpios = of_gpio_named_count(np, "mux-gpios");
+ if (mux->data.n_gpios < 0) {
+ dev_err(&pdev->dev, "Missing mux-gpios property in the DT.\n");
+ return -EINVAL;
+ }
+
+ gpios = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.gpios) * mux->data.n_gpios, GFP_KERNEL);
+ if (!gpios) {
+ dev_err(&pdev->dev, "Cannot allocate gpios array");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
+
+ mux->data.gpios = gpios;
+
+ return 0;
+}
+#else
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state, gpio_base;
int i, ret;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "Missing platform data\n");
- return -ENODEV;
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
+ return -ENOMEM;
}
+ platform_set_drvdata(pdev, mux);
+
+ if (!pdev->dev.platform_data) {
+ ret = i2c_mux_gpio_probe_dt(mux, pdev);
+ if (ret < 0)
+ return ret;
+ } else
+ memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
+
/*
* If a GPIO chip name is provided, the GPIO pin numbers provided are
* relative to its base GPIO number. Otherwise they are absolute.
*/
- if (pdata->gpio_chip) {
+ if (mux->data.gpio_chip) {
struct gpio_chip *gpio;
- gpio = gpiochip_find(pdata->gpio_chip,
+ gpio = gpiochip_find(mux->data.gpio_chip,
match_gpio_chip_by_label);
if (!gpio)
return -EPROBE_DEFER;
@@ -89,49 +172,44 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
gpio_base = 0;
}
- parent = i2c_get_adapter(pdata->parent);
+ parent = i2c_get_adapter(mux->data.parent);
if (!parent) {
dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
- pdata->parent);
+ mux->data.parent);
return -ENODEV;
}
- mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- ret = -ENOMEM;
- goto alloc_failed;
- }
-
mux->parent = parent;
- mux->data = *pdata;
mux->gpio_base = gpio_base;
+
mux->adap = devm_kzalloc(&pdev->dev,
- sizeof(*mux->adap) * pdata->n_values,
+ sizeof(*mux->adap) * mux->data.n_values,
GFP_KERNEL);
if (!mux->adap) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
ret = -ENOMEM;
goto alloc_failed;
}
- if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
- initial_state = pdata->idle;
+ if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
+ initial_state = mux->data.idle;
deselect = i2c_mux_gpio_deselect;
} else {
- initial_state = pdata->values[0];
+ initial_state = mux->data.values[0];
deselect = NULL;
}
- for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio");
+ for (i = 0; i < mux->data.n_gpios; i++) {
+ ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
- gpio_direction_output(gpio_base + pdata->gpios[i],
+ gpio_direction_output(gpio_base + mux->data.gpios[i],
initial_state & (1 << i));
}
- for (i = 0; i < pdata->n_values; i++) {
- u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- unsigned int class = pdata->classes ? pdata->classes[i] : 0;
+ for (i = 0; i < mux->data.n_values; i++) {
+ u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
+ unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
i, class,
@@ -144,26 +222,24 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%d port mux on %s adapter\n",
- pdata->n_values, parent->name);
-
- platform_set_drvdata(pdev, mux);
+ mux->data.n_values, parent->name);
return 0;
add_adapter_failed:
for (; i > 0; i--)
i2c_del_mux_adapter(mux->adap[i - 1]);
- i = pdata->n_gpios;
+ i = mux->data.n_gpios;
err_request_gpio:
for (; i > 0; i--)
- gpio_free(gpio_base + pdata->gpios[i - 1]);
+ gpio_free(gpio_base + mux->data.gpios[i - 1]);
alloc_failed:
i2c_put_adapter(parent);
return ret;
}
-static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
+static int i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -180,12 +256,19 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id i2c_mux_gpio_of_match[] = {
+ { .compatible = "i2c-mux-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match);
+
static struct platform_driver i2c_mux_gpio_driver = {
.probe = i2c_mux_gpio_probe,
- .remove = __devexit_p(i2c_mux_gpio_remove),
+ .remove = i2c_mux_gpio_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-mux-gpio",
+ .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 7fa5b24b16db..a43c0ce5e3d8 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -129,7 +129,7 @@ static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
}
#endif
-static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux;
int (*deselect)(struct i2c_adapter *, void *, u32);
@@ -167,7 +167,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
}
mux->busses = devm_kzalloc(&pdev->dev,
- sizeof(mux->busses) * mux->pdata->bus_count,
+ sizeof(*mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
@@ -241,7 +241,7 @@ err:
return ret;
}
-static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
int i;
@@ -255,7 +255,7 @@ static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+static const struct of_device_id i2c_mux_pinctrl_of_match[] = {
{ .compatible = "i2c-mux-pinctrl", },
{},
};
@@ -269,7 +269,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = {
.of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
},
.probe = i2c_mux_pinctrl_probe,
- .remove = __devexit_p(i2c_mux_pinctrl_remove),
+ .remove = i2c_mux_pinctrl_remove,
};
module_platform_driver(i2c_mux_pinctrl_driver);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 5a26584934ca..02906ca99b41 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -322,8 +322,7 @@ config BLK_DEV_GENERIC
which otherwise might not be supported.
config BLK_DEV_OPTI621
- tristate "OPTi 82C621 chipset enhanced support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "OPTi 82C621 chipset enhanced support"
select BLK_DEV_IDEPCI
help
This is a driver for the OPTi 82C621 EIDE controller.
@@ -417,7 +416,6 @@ config BLK_DEV_CY82C693
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
- depends on EXPERIMENTAL
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -702,11 +700,6 @@ config BLK_DEV_IDE_TX4939
depends on SOC_TX4939
select BLK_DEV_IDEDMA_SFF
-config BLK_DEV_IDE_AT91
- tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
- depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
- select IDE_TIMINGS
-
config BLK_DEV_IDE_ICSIDE
tristate "ICS IDE interface support"
depends on ARM && ARCH_ACORN
@@ -761,8 +754,8 @@ config BLK_DEV_GAYLE
use Gayle IDE interfaces on the Zorro expansion bus.
config BLK_DEV_BUDDHA
- tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
- depends on ZORRO && EXPERIMENTAL
+ tristate "Buddha/Catweasel/X-Surf IDE interface support"
+ depends on ZORRO
help
This is the IDE driver for the IDE interfaces on the Buddha, Catweasel
and X-Surf expansion boards. It supports up to two interfaces on the
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 01451940393b..c7eaf20af926 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
.cable_detect = atp86x_cable_detect,
};
-static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
+static const struct ide_port_info aec62xx_chipsets[] = {
{ /* 0: AEC6210 */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
@@ -251,7 +251,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
* chips, pass a local copy of 'struct ide_port_info' down the call chain.
*/
-static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct chipset_bus_clock_list_entry *bus_clock;
struct ide_port_info d;
@@ -287,7 +287,7 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
return err;
}
-static void __devexit aec62xx_remove(struct pci_dev *dev)
+static void aec62xx_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
@@ -307,7 +307,7 @@ static struct pci_driver aec62xx_pci_driver = {
.name = "AEC62xx_IDE",
.id_table = aec62xx_pci_tbl,
.probe = aec62xx_init_one,
- .remove = __devexit_p(aec62xx_remove),
+ .remove = aec62xx_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 911a27ca356b..36f76e28a0bf 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -415,7 +415,7 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
* Sparc systems.
*/
-static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
+static void init_hwif_ali15x3(ide_hwif_t *hwif)
{
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
@@ -464,8 +464,7 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
* Set up the DMA functionality on the ALi 15x3.
*/
-static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = ide_pci_dma_base(hwif, d);
@@ -512,7 +511,7 @@ static const struct ide_dma_ops ali_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info ali15x3_chipset __devinitconst = {
+static const struct ide_port_info ali15x3_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_ali15x3,
.init_hwif = init_hwif_ali15x3,
@@ -532,7 +531,8 @@ static const struct ide_port_info ali15x3_chipset __devinitconst = {
* hot plug layer.
*/
-static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int alim15x3_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct ide_port_info d = ali15x3_chipset;
u8 rev = dev->revision, idx = id->driver_data;
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 56fc99557ba2..cbfe846911d1 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
+static const struct ide_port_info amd74xx_chipsets[] = {
/* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
/* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
/* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
@@ -235,7 +235,7 @@ static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
/* 6: AMD5536 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
};
-static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index cb43480b1bd5..dbd0f242ec18 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
.cable_detect = atiixp_cable_detect,
};
-static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
+static const struct ide_port_info atiixp_pci_info[] = {
{ /* 0: IXP200/300/400/700 */
.name = DRV_NAME,
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
@@ -168,7 +168,7 @@ static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL);
}
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index d1fc43802f5d..b127ed60c733 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
+static const struct ide_port_info cmd64x_chipsets[] = {
{ /* 0: CMD643 */
.name = DRV_NAME,
.init_chipset = init_chipset_cmd64x,
@@ -373,7 +373,7 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
}
};
-static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 14447621e60b..6250aee30503 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
.set_dma_mode = cs5520_set_dma_mode,
};
-static const struct ide_port_info cyrix_chipset __devinitconst = {
+static const struct ide_port_info cyrix_chipset = {
.name = DRV_NAME,
.enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
.port_ops = &cs5520_port_ops,
@@ -108,7 +108,7 @@ static const struct ide_port_info cyrix_chipset __devinitconst = {
* work longhand.
*/
-static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &cyrix_chipset;
struct ide_hw hw[2], *hws[] = { NULL, NULL };
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index 49b40ad59d1a..65371599b976 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -226,7 +226,7 @@ out:
* performs channel-specific pre-initialization before drive probing.
*/
-static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
+static void init_hwif_cs5530 (ide_hwif_t *hwif)
{
unsigned long basereg;
u32 d0_timings;
@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
.udma_filter = cs5530_udma_filter,
};
-static const struct ide_port_info cs5530_chipset __devinitconst = {
+static const struct ide_port_info cs5530_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_cs5530,
.init_hwif = init_hwif_cs5530,
@@ -257,7 +257,7 @@ static const struct ide_port_info cs5530_chipset __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &cs5530_chipset, NULL);
}
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
index 18d4c852602b..3bc5b9a34013 100644
--- a/drivers/ide/cs5535.c
+++ b/drivers/ide/cs5535.c
@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
.cable_detect = cs5535_cable_detect,
};
-static const struct ide_port_info cs5535_chipset __devinitconst = {
+static const struct ide_port_info cs5535_chipset = {
.name = DRV_NAME,
.port_ops = &cs5535_port_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
@@ -179,8 +179,7 @@ static const struct ide_port_info cs5535_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit cs5535_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &cs5535_chipset, NULL);
}
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3ffb49dab574..f5820079a286 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -145,7 +145,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_dev_put(dev);
}
-static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+static void init_iops_cy82c693(ide_hwif_t *hwif)
{
static ide_hwif_t *primary;
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
.set_dma_mode = cy82c693_set_dma_mode,
};
-static const struct ide_port_info cy82c693_chipset __devinitconst = {
+static const struct ide_port_info cy82c693_chipset = {
.name = DRV_NAME,
.init_iops = init_iops_cy82c693,
.port_ops = &cy82c693_port_ops,
@@ -173,7 +173,8 @@ static const struct ide_port_info cy82c693_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cy82c693_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct pci_dev *dev2;
int ret = -ENODEV;
@@ -190,7 +191,7 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
return ret;
}
-static void __devexit cy82c693_remove(struct pci_dev *dev)
+static void cy82c693_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
@@ -209,7 +210,7 @@ static struct pci_driver cy82c693_pci_driver = {
.name = "Cypress_IDE",
.id_table = cy82c693_pci_tbl,
.probe = cy82c693_init_one,
- .remove = __devexit_p(cy82c693_remove),
+ .remove = cy82c693_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index 1e10eba62ceb..7e27d3295e55 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -71,8 +71,7 @@ static const struct ide_port_info delkin_cb_port_info = {
.chipset = ide_pci,
};
-static int __devinit
-delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
+static int delkin_cb_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_host *host;
unsigned long base;
@@ -158,7 +157,7 @@ static int delkin_cb_resume(struct pci_dev *dev)
#define delkin_cb_resume NULL
#endif
-static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
+static struct pci_device_id delkin_cb_pci_tbl[] = {
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0, },
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4aec3b87ff91..696b6c1ec940 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
}
};
-static const struct hpt_info hpt36x __devinitconst = {
+static const struct hpt_info hpt36x = {
.chip_name = "HPT36x",
.chip_type = HPT36x,
.udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitconst = {
.timings = &hpt36x_timings
};
-static const struct hpt_info hpt370 __devinitconst = {
+static const struct hpt_info hpt370 = {
.chip_name = "HPT370",
.chip_type = HPT370,
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt370a __devinitconst = {
+static const struct hpt_info hpt370a = {
.chip_name = "HPT370A",
.chip_type = HPT370A,
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt374 __devinitconst = {
+static const struct hpt_info hpt374 = {
.chip_name = "HPT374",
.chip_type = HPT374,
.udma_mask = ATA_UDMA5,
@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372 __devinitconst = {
+static const struct hpt_info hpt372 = {
.chip_name = "HPT372",
.chip_type = HPT372,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372a __devinitconst = {
+static const struct hpt_info hpt372a = {
.chip_name = "HPT372A",
.chip_type = HPT372A,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt302 __devinitconst = {
+static const struct hpt_info hpt302 = {
.chip_name = "HPT302",
.chip_type = HPT302,
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt371 __devinitconst = {
+static const struct hpt_info hpt371 = {
.chip_name = "HPT371",
.chip_type = HPT371,
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372n __devinitconst = {
+static const struct hpt_info hpt372n = {
.chip_name = "HPT372N",
.chip_type = HPT372N,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt302n __devinitconst = {
+static const struct hpt_info hpt302n = {
.chip_name = "HPT302N",
.chip_type = HPT302N,
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt371n __devinitconst = {
+static const struct hpt_info hpt371n = {
.chip_name = "HPT371N",
.chip_type = HPT371N,
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -1197,7 +1197,7 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
+static void init_hwif_hpt366(ide_hwif_t *hwif)
{
struct hpt_info *info = hpt3xx_get_info(hwif->dev);
u8 chip_type = info->chip_type;
@@ -1221,7 +1221,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
}
}
-static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
+static int init_dma_hpt366(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -1265,7 +1265,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
return 0;
}
-static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
+static void hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
{
if (dev2->irq != dev->irq) {
/* FIXME: we need a core pci_set_interrupt() */
@@ -1275,7 +1275,7 @@ static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
}
}
-static void __devinit hpt371_init(struct pci_dev *dev)
+static void hpt371_init(struct pci_dev *dev)
{
u8 mcr1 = 0;
@@ -1290,7 +1290,7 @@ static void __devinit hpt371_init(struct pci_dev *dev)
pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
}
-static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
+static int hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
{
u8 mcr1 = 0, pin1 = 0, pin2 = 0;
@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
+static const struct ide_port_info hpt366_chipsets[] = {
{ /* 0: HPT36x */
.name = DRV_NAME,
.init_chipset = init_chipset_hpt366,
@@ -1402,7 +1402,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
-static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct hpt_info *info = NULL;
struct hpt_info *dyn_info;
@@ -1499,7 +1499,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
return ret;
}
-static void __devexit hpt366_remove(struct pci_dev *dev)
+static void hpt366_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct ide_info *info = host->host_priv;
@@ -1510,7 +1510,7 @@ static void __devexit hpt366_remove(struct pci_dev *dev)
kfree(info);
}
-static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = {
+static const struct pci_device_id hpt366_pci_tbl[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 },
@@ -1525,7 +1525,7 @@ static struct pci_driver hpt366_pci_driver = {
.name = "HPT366_IDE",
.id_table = hpt366_pci_tbl,
.probe = hpt366_init_one,
- .remove = __devexit_p(hpt366_remove),
+ .remove = hpt366_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index e640d0ac3af6..9f0a48e39b8a 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -406,8 +406,8 @@ static const struct ide_port_info icside_v5_port_info = {
.chipset = ide_acorn,
};
-static int __devinit
-icside_register_v5(struct icside_state *state, struct expansion_card *ec)
+static int icside_register_v5(struct icside_state *state,
+ struct expansion_card *ec)
{
void __iomem *base;
struct ide_host *host;
@@ -460,8 +460,8 @@ static const struct ide_port_info icside_v6_port_info __initconst = {
.chipset = ide_acorn,
};
-static int __devinit
-icside_register_v6(struct icside_state *state, struct expansion_card *ec)
+static int icside_register_v6(struct icside_state *state,
+ struct expansion_card *ec)
{
void __iomem *ioc_base, *easi_base;
struct ide_host *host;
@@ -537,8 +537,7 @@ out:
return ret;
}
-static int __devinit
-icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct icside_state *state;
void __iomem *idmem;
@@ -604,7 +603,7 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit icside_remove(struct expansion_card *ec)
+static void icside_remove(struct expansion_card *ec)
{
struct icside_state *state = ecard_get_drvdata(ec);
@@ -666,7 +665,7 @@ static const struct ecard_id icside_ids[] = {
static struct ecard_driver icside_driver = {
.probe = icside_probe,
- .remove = __devexit_p(icside_remove),
+ .remove = icside_remove,
.shutdown = icside_shutdown,
.id_table = icside_ids,
.drv = {
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index dab5b670bfbf..673420db953f 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
.udma_mask = ATA_UDMA6, \
}
-static const struct ide_port_info generic_chipsets[] __devinitconst = {
+static const struct ide_port_info generic_chipsets[] = {
/* 0: Unknown */
DECLARE_GENERIC_PCI_DEV(0),
@@ -103,7 +103,7 @@ static const struct ide_port_info generic_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &generic_chipsets[id->driver_data];
int ret = -ENODEV;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 962693b10a1c..ba4bfbead24b 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -22,11 +22,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
- void __iomem *base,
- void __iomem *ctrl,
- struct pata_platform_info *pdata,
- int irq)
+static void plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base,
+ void __iomem *ctrl,
+ struct pata_platform_info *pdata, int irq)
{
unsigned long port = (unsigned long)base;
int i;
@@ -48,7 +46,7 @@ static const struct ide_port_info platform_ide_port_info = {
.chipset = ide_generic,
};
-static int __devinit plat_ide_probe(struct platform_device *pdev)
+static int plat_ide_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_alt, *res_irq;
void __iomem *base, *alt_base;
@@ -115,7 +113,7 @@ out:
return ret;
}
-static int __devexit plat_ide_remove(struct platform_device *pdev)
+static int plat_ide_remove(struct platform_device *pdev)
{
struct ide_host *host = dev_get_drvdata(&pdev->dev);
@@ -130,7 +128,7 @@ static struct platform_driver platform_ide_driver = {
.owner = THIS_MODULE,
},
.probe = plat_ide_probe,
- .remove = __devexit_p(plat_ide_remove),
+ .remove = plat_ide_remove,
};
static int __init platform_ide_init(void)
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
index d5dd180c4b85..b6f674ab4fb7 100644
--- a/drivers/ide/it8172.c
+++ b/drivers/ide/it8172.c
@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
.set_dma_mode = it8172_set_dma_mode,
};
-static const struct ide_port_info it8172_port_info __devinitconst = {
+static const struct ide_port_info it8172_port_info = {
.name = DRV_NAME,
.port_ops = &it8172_port_ops,
.enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
@@ -125,8 +125,7 @@ static const struct ide_port_info it8172_port_info __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit it8172_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int it8172_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return -ENODEV; /* IT8172 is more than an IDE controller */
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
index 1847aeb5450a..6b92846682fc 100644
--- a/drivers/ide/it8213.c
+++ b/drivers/ide/it8213.c
@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
.cable_detect = it8213_cable_detect,
};
-static const struct ide_port_info it8213_chipset __devinitconst = {
+static const struct ide_port_info it8213_chipset = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80} },
.port_ops = &it8213_port_ops,
@@ -177,7 +177,7 @@ static const struct ide_port_info it8213_chipset __devinitconst = {
* standard helper functions to do almost all the work for us.
*/
-static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &it8213_chipset, NULL);
}
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index c5611dbca342..f01ba4606be0 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -528,7 +528,7 @@ static struct ide_dma_ops it821x_pass_through_dma_ops = {
* ide DMA handlers appropriately
*/
-static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
+static void init_hwif_it821x(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
.cable_detect = it821x_cable_detect,
};
-static const struct ide_port_info it821x_chipset __devinitconst = {
+static const struct ide_port_info it821x_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_it821x,
.init_hwif = init_hwif_it821x,
@@ -647,7 +647,7 @@ static const struct ide_port_info it821x_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct it821x_dev *itdevs;
int rc;
@@ -667,7 +667,7 @@ static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_devic
return rc;
}
-static void __devexit it821x_remove(struct pci_dev *dev)
+static void it821x_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct it821x_dev *itdevs = host->host_priv;
@@ -689,7 +689,7 @@ static struct pci_driver it821x_pci_driver = {
.name = "ITE821x IDE",
.id_table = it821x_pci_tbl,
.probe = it821x_init_one,
- .remove = __devexit_p(it821x_remove),
+ .remove = it821x_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
index efddd7d9f92d..ae6480dcbadf 100644
--- a/drivers/ide/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
.cable_detect = jmicron_cable_detect,
};
-static const struct ide_port_info jmicron_chipset __devinitconst = {
+static const struct ide_port_info jmicron_chipset = {
.name = DRV_NAME,
.enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
.port_ops = &jmicron_port_ops,
@@ -120,7 +120,7 @@ static const struct ide_port_info jmicron_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &jmicron_chipset, NULL);
}
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
index 73f78d872d55..392fd106edf1 100644
--- a/drivers/ide/ns87415.c
+++ b/drivers/ide/ns87415.c
@@ -96,7 +96,7 @@ static const struct ide_tp_ops superio_tp_ops = {
.output_data = ide_output_data,
};
-static void __devinit superio_init_iops(struct hwif_s *hwif)
+static void superio_init_iops(struct hwif_s *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
u32 dma_stat;
@@ -201,7 +201,7 @@ static int ns87415_dma_end(ide_drive_t *drive)
return (dma_stat & 7) != 4;
}
-static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
+static void init_hwif_ns87415 (ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int ctrl, using_inta;
@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
.dma_sff_read_status = superio_dma_sff_read_status,
};
-static const struct ide_port_info ns87415_chipset __devinitconst = {
+static const struct ide_port_info ns87415_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_ns87415,
.tp_ops = &ns87415_tp_ops,
@@ -302,7 +302,7 @@ static const struct ide_port_info ns87415_chipset __devinitconst = {
IDE_HFLAG_NO_ATAPI_DMA,
};
-static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = ns87415_chipset;
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
index 39edc66cb96c..26a45007e535 100644
--- a/drivers/ide/opti621.c
+++ b/drivers/ide/opti621.c
@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
.set_pio_mode = opti621_set_pio_mode,
};
-static const struct ide_port_info opti621_chipset __devinitconst = {
+static const struct ide_port_info opti621_chipset = {
.name = DRV_NAME,
.enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
.port_ops = &opti621_port_ops,
@@ -139,7 +139,7 @@ static const struct ide_port_info opti621_chipset __devinitconst = {
.pio_mask = ATA_PIO4,
};
-static int __devinit opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &opti621_chipset, NULL);
}
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 712c7904d03e..6107cc4ee012 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -220,7 +220,7 @@ static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
}
-static void __devinit palm_bk3710_chipinit(void __iomem *base)
+static void palm_bk3710_chipinit(void __iomem *base)
{
/*
* REVISIT: the ATA reset signal needs to be managed through a
@@ -282,8 +282,7 @@ static u8 palm_bk3710_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int palm_bk3710_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
@@ -301,7 +300,7 @@ static const struct ide_port_ops palm_bk3710_ports_ops = {
.cable_detect = palm_bk3710_cable_detect,
};
-static struct ide_port_info __devinitdata palm_bk3710_port_info = {
+static struct ide_port_info palm_bk3710_port_info = {
.init_dma = palm_bk3710_init_dma,
.port_ops = &palm_bk3710_ports_ops,
.dma_ops = &sff_dma_ops,
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 2e5ceb62fb3b..df73cbd9387e 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -422,7 +422,7 @@ static int init_chipset_pdcnew(struct pci_dev *dev)
return 0;
}
-static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
+static struct pci_dev *pdc20270_get_dev2(struct pci_dev *dev)
{
struct pci_dev *dev2;
@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
+static const struct ide_port_info pdcnew_chipsets[] = {
/* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
/* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
};
@@ -479,7 +479,7 @@ static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
struct pci_dev *bridge = dev->bus->self;
@@ -514,7 +514,7 @@ static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_de
return ide_pci_init_one(dev, d, NULL);
}
-static void __devexit pdc202new_remove(struct pci_dev *dev)
+static void pdc202new_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
@@ -539,7 +539,7 @@ static struct pci_driver pdc202new_pci_driver = {
.name = "Promise_IDE",
.id_table = pdc202new_pci_tbl,
.probe = pdc202new_init_one,
- .remove = __devexit_p(pdc202new_remove),
+ .remove = pdc202new_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 563451096812..224ad46d6cb2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -211,8 +211,7 @@ out:
return 0;
}
-static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
- const char *name)
+static void pdc202ata4_fixup_irq(struct pci_dev *dev, const char *name)
{
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) {
u8 irq = 0, irq2 = 0;
@@ -270,7 +269,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
.max_sectors = sectors, \
}
-static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
+static const struct ide_port_info pdc202xx_chipsets[] = {
{ /* 0: PDC20246 */
.name = DRV_NAME,
.init_chipset = init_chipset_pdc202xx,
@@ -297,7 +296,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int pdc202xx_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
const struct ide_port_info *d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index fe0fd60cfc09..a671cead6ae7 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -297,7 +297,7 @@ static u8 piix_cable_detect(ide_hwif_t *hwif)
* capabilities of the hardware.
*/
-static void __devinit init_hwif_piix(ide_hwif_t *hwif)
+static void init_hwif_piix(ide_hwif_t *hwif)
{
if (!hwif->dma_base)
return;
@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info piix_pci_info[] __devinitconst = {
+static const struct ide_port_info piix_pci_info[] = {
/* 0: MPIIX */
{ /*
* MPIIX actually has only a single IDE channel mapped to
@@ -382,7 +382,7 @@ static const struct ide_port_info piix_pci_info[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
}
@@ -394,7 +394,7 @@ static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_
* they are found, disable use of DMA IDE
*/
-static void __devinit piix_check_450nx(void)
+static void piix_check_450nx(void)
{
struct pci_dev *pdev = NULL;
u16 cfg;
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index e944c7f705f7..bf83d7bb6bc6 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1025,8 +1025,7 @@ static const struct ide_port_info pmac_port_info = {
* Setup, register & probe an IDE channel driven by this driver, this is
* called by one of the 2 probe functions (macio or PCI).
*/
-static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
- struct ide_hw *hw)
+static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
{
struct device_node *np = pmif->node;
const int *bidp;
@@ -1126,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
return rc;
}
-static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
+static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
{
int i;
@@ -1139,8 +1138,8 @@ static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
/*
* Attach to a macio probed interface
*/
-static int __devinit
-pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int pmac_ide_macio_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
void __iomem *base;
unsigned long regbase;
@@ -1262,8 +1261,8 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
/*
* Attach to a PCI probed interface
*/
-static int __devinit
-pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+static int pmac_ide_pci_attach(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct device_node *np;
pmac_ide_hwif_t *pmif;
@@ -1692,8 +1691,7 @@ static const struct ide_dma_ops pmac_dma_ops = {
* Allocate the data structures needed for using DMA with an interface
* and fill the proper list of functions pointers
*/
-static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
index 48d976aad7ab..d73c3d10087c 100644
--- a/drivers/ide/rapide.c
+++ b/drivers/ide/rapide.c
@@ -29,8 +29,7 @@ static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
hw->irq = irq;
}
-static int __devinit
-rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
{
void __iomem *base;
struct ide_host *host;
@@ -64,7 +63,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit rapide_remove(struct expansion_card *ec)
+static void rapide_remove(struct expansion_card *ec)
{
struct ide_host *host = ecard_get_drvdata(ec);
@@ -82,7 +81,7 @@ static struct ecard_id rapide_ids[] = {
static struct ecard_driver rapide_driver = {
.probe = rapide_probe,
- .remove = __devexit_p(rapide_remove),
+ .remove = rapide_remove,
.id_table = rapide_ids,
.drv = {
.name = "rapide",
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
index c04173e9fc38..f4b66f7ec9fd 100644
--- a/drivers/ide/rz1000.c
+++ b/drivers/ide/rz1000.c
@@ -22,7 +22,7 @@
#define DRV_NAME "rz1000"
-static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
+static int rz1000_disable_readahead(struct pci_dev *dev)
{
u16 reg;
@@ -38,12 +38,12 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
}
}
-static const struct ide_port_info rz1000_chipset __devinitconst = {
+static const struct ide_port_info rz1000_chipset = {
.name = DRV_NAME,
.host_flags = IDE_HFLAG_NO_DMA,
};
-static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = rz1000_chipset;
int rc;
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
index d4758ebe77da..a5b701818405 100644
--- a/drivers/ide/sc1200.c
+++ b/drivers/ide/sc1200.c
@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info sc1200_chipset __devinitconst = {
+static const struct ide_port_info sc1200_chipset = {
.name = DRV_NAME,
.port_ops = &sc1200_port_ops,
.dma_ops = &sc1200_dma_ops,
@@ -303,7 +303,7 @@ static const struct ide_port_info sc1200_chipset __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct sc1200_saved_state *ss = NULL;
int rc;
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 970103810021..2a2d188b5d5b 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -585,8 +585,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
* Perform the initial set up for this device.
*/
-static int __devinit init_setup_scc(struct pci_dev *dev,
- const struct ide_port_info *d)
+static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d)
{
unsigned long ctl_base;
unsigned long dma_base;
@@ -718,7 +717,7 @@ static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
*
*/
-static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
+static void init_mmio_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct scc_ports *ports = pci_get_drvdata(dev);
@@ -738,7 +737,7 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
* and then do the MMIO setup.
*/
-static void __devinit init_iops_scc(ide_hwif_t *hwif)
+static void init_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -748,8 +747,7 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
init_mmio_iops_scc(hwif);
}
-static int __devinit scc_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
return ide_allocate_dma_engine(hwif);
}
@@ -768,7 +766,7 @@ static u8 scc_cable_detect(ide_hwif_t *hwif)
* ide DMA handlers appropriately.
*/
-static void __devinit init_hwif_scc(ide_hwif_t *hwif)
+static void init_hwif_scc(ide_hwif_t *hwif)
{
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
@@ -811,7 +809,7 @@ static const struct ide_dma_ops scc_dma_ops = {
.dma_sff_read_status = scc_dma_sff_read_status,
};
-static const struct ide_port_info scc_chipset __devinitconst = {
+static const struct ide_port_info scc_chipset = {
.name = "sccIDE",
.init_iops = init_iops_scc,
.init_dma = scc_init_dma,
@@ -834,7 +832,7 @@ static const struct ide_port_info scc_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return init_setup_scc(dev, &scc_chipset);
}
@@ -846,7 +844,7 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
* Called by the PCI code when it removes an SCC PATA controller.
*/
-static void __devexit scc_remove(struct pci_dev *dev)
+static void scc_remove(struct pci_dev *dev)
{
struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host = ports->host;
@@ -869,7 +867,7 @@ static struct pci_driver scc_pci_driver = {
.name = "SCC IDE",
.id_table = scc_pci_tbl,
.probe = scc_init_one,
- .remove = __devexit_p(scc_remove),
+ .remove = scc_remove,
};
static int __init scc_ide_init(void)
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index 24d72ef23df7..a97affca18ab 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
.cable_detect = svwks_cable_detect,
};
-static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
+static const struct ide_port_info serverworks_chipsets[] = {
{ /* 0: OSB4 */
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
@@ -391,7 +391,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index e3ea591f66d3..a5ca179a83b3 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -307,8 +307,7 @@ static u8 sgiioc4_read_status(ide_hwif_t *hwif)
}
/* Creates a DMA map for the scatter-gather list entries */
-static int __devinit ide_dma_sgiioc4(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
@@ -520,7 +519,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
.dma_lost_irq = sgiioc4_dma_lost_irq,
};
-static const struct ide_port_info sgiioc4_port_info __devinitconst = {
+static const struct ide_port_info sgiioc4_port_info = {
.name = DRV_NAME,
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
@@ -532,7 +531,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitconst = {
.mwdma_mask = ATA_MWDMA2_ONLY,
};
-static int __devinit sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+static int sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
{
unsigned long cmd_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
@@ -581,7 +580,7 @@ req_mem_rgn_err:
return rc;
}
-static unsigned int __devinit pci_init_sgiioc4(struct pci_dev *dev)
+static unsigned int pci_init_sgiioc4(struct pci_dev *dev)
{
int ret;
@@ -601,7 +600,7 @@ out:
return ret;
}
-int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd)
+int ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
/*
* PCI-RT does not bring out IDE connection.
@@ -613,7 +612,7 @@ int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd)
return pci_init_sgiioc4(idd->idd_pdev);
}
-static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
+static struct ioc4_submodule ioc4_ide_submodule = {
.is_name = "IOC4_ide",
.is_owner = THIS_MODULE,
.is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 46f7e30d3790..6a1849bb476c 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -546,7 +546,7 @@ static int init_chipset_siimage(struct pci_dev *dev)
* extended PRD tables. For better SI3112 support use the libata driver
*/
-static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
+static void init_mmio_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -646,7 +646,7 @@ static void sil_quirkproc(ide_drive_t *drive)
* can get the iops right before using them.
*/
-static void __devinit init_iops_siimage(ide_hwif_t *hwif)
+static void init_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
.udma_mask = ATA_UDMA6, \
}
-static const struct ide_port_info siimage_chipsets[] __devinitconst = {
+static const struct ide_port_info siimage_chipsets[] = {
/* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
/* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
};
@@ -733,8 +733,7 @@ static const struct ide_port_info siimage_chipsets[] __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit siimage_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
void __iomem *ioaddr = NULL;
resource_size_t bar5 = pci_resource_start(dev, 5);
@@ -790,7 +789,7 @@ static int __devinit siimage_init_one(struct pci_dev *dev,
return rc;
}
-static void __devexit siimage_remove(struct pci_dev *dev)
+static void siimage_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
void __iomem *ioaddr = host->host_priv;
@@ -822,7 +821,7 @@ static struct pci_driver siimage_pci_driver = {
.name = "SiI_IDE",
.id_table = siimage_pci_tbl,
.probe = siimage_init_one,
- .remove = __devexit_p(siimage_remove),
+ .remove = siimage_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 09e61b4c5e94..247853ea1368 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -362,7 +362,7 @@ static u8 sis_ata133_udma_filter(ide_drive_t *drive)
return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
}
-static int __devinit sis_find_family(struct pci_dev *dev)
+static int sis_find_family(struct pci_dev *dev)
{
struct pci_dev *host;
int i = 0;
@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
.cable_detect = sis_cable_detect,
};
-static const struct ide_port_info sis5513_chipset __devinitconst = {
+static const struct ide_port_info sis5513_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_sis5513,
.enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
@@ -572,7 +572,7 @@ static const struct ide_port_info sis5513_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = sis5513_chipset;
u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
@@ -595,7 +595,7 @@ static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_devi
return ide_pci_init_one(dev, &d, NULL);
}
-static void __devexit sis5513_remove(struct pci_dev *dev)
+static void sis5513_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
@@ -613,7 +613,7 @@ static struct pci_driver sis5513_pci_driver = {
.name = "SIS_IDE",
.id_table = sis5513_pci_tbl,
.probe = sis5513_init_one,
- .remove = __devexit_p(sis5513_remove),
+ .remove = sis5513_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index d051cd224bdb..8755df3330a0 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info sl82c105_chipset __devinitconst = {
+static const struct ide_port_info sl82c105_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_sl82c105,
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
@@ -313,7 +313,7 @@ static const struct ide_port_info sl82c105_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = sl82c105_chipset;
u8 rev = sl82c105_bridge_revision(dev);
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 863a5e9283ca..8af92bbb3dcb 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
.cable_detect = slc90e66_cable_detect,
};
-static const struct ide_port_info slc90e66_chipset __devinitconst = {
+static const struct ide_port_info slc90e66_chipset = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
.port_ops = &slc90e66_port_ops,
@@ -142,7 +142,8 @@ static const struct ide_port_info slc90e66_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int slc90e66_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &slc90e66_chipset, NULL);
}
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index 17946785ebf6..17e6132b99bf 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -144,7 +144,7 @@ static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
+static void init_hwif_tc86c001(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long sc_base = pci_resource_start(dev, 5);
@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info tc86c001_chipset __devinitconst = {
+static const struct ide_port_info tc86c001_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_tc86c001,
.port_ops = &tc86c001_port_ops,
@@ -203,8 +203,8 @@ static const struct ide_port_info tc86c001_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit tc86c001_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int tc86c001_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
int rc;
@@ -232,7 +232,7 @@ out:
return rc;
}
-static void __devexit tc86c001_remove(struct pci_dev *dev)
+static void tc86c001_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_release_region(dev, 5);
@@ -249,7 +249,7 @@ static struct pci_driver tc86c001_pci_driver = {
.name = "TC86C001",
.id_table = tc86c001_pci_tbl,
.probe = tc86c001_init_one,
- .remove = __devexit_p(tc86c001_remove),
+ .remove = tc86c001_remove,
};
static int __init tc86c001_ide_init(void)
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index 55ce1b80efcb..7f1af9493f0e 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
.set_dma_mode = triflex_set_mode,
};
-static const struct ide_port_info triflex_device __devinitconst = {
+static const struct ide_port_info triflex_device = {
.name = DRV_NAME,
.enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
.port_ops = &triflex_port_ops,
@@ -101,8 +101,7 @@ static const struct ide_port_info triflex_device __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit triflex_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &triflex_device, NULL);
}
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
index e494a98a43a9..0069f6ce74cf 100644
--- a/drivers/ide/trm290.c
+++ b/drivers/ide/trm290.c
@@ -231,7 +231,7 @@ static void trm290_dma_host_set(ide_drive_t *drive, int on)
{
}
-static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
+static void init_hwif_trm290(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int cfg_base = pci_resource_start(dev, 4);
@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
.dma_check = trm290_dma_check,
};
-static const struct ide_port_info trm290_chipset __devinitconst = {
+static const struct ide_port_info trm290_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_trm290,
.tp_ops = &trm290_tp_ops,
@@ -338,7 +338,7 @@ static const struct ide_port_info trm290_chipset __devinitconst = {
IDE_HFLAG_NO_LBA48,
};
-static int __devinit trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &trm290_chipset, NULL);
}
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index eb7767864d10..01464f1e2339 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
.cable_detect = via82cxxx_cable_detect,
};
-static const struct ide_port_info via82cxxx_chipset __devinitconst = {
+static const struct ide_port_info via82cxxx_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
@@ -416,7 +416,7 @@ static const struct ide_port_info via82cxxx_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_dev *isa = NULL;
struct via_isa_bridge *via_config;
@@ -489,7 +489,7 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
return rc;
}
-static void __devexit via_remove(struct pci_dev *dev)
+static void via_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct via82cxxx_dev *vdev = host->host_priv;
@@ -514,7 +514,7 @@ static struct pci_driver via_pci_driver = {
.name = "VIA_IDE",
.id_table = via_pci_tbl,
.probe = via_init_one,
- .remove = __devexit_p(via_remove),
+ .remove = via_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 8489eb58a52c..4732dfc15447 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -18,7 +18,6 @@ config I7300_IDLE_IOAT_CHANNEL
config I7300_IDLE
tristate "Intel chipset idle memory power saving driver"
select I7300_IDLE_IOAT_CHANNEL
- depends on EXPERIMENTAL
help
Enable memory power savings when idle with certain Intel server
chipsets. The chipset must have I/O AT support, such as the
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index fa080ebd568f..ffeebc7e9f1c 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -75,7 +75,7 @@ static unsigned long past_skip;
static struct pci_dev *fbd_dev;
-static spinlock_t i7300_idle_lock;
+static raw_spinlock_t i7300_idle_lock;
static int i7300_idle_active;
static u8 i7300_idle_thrtctl_saved;
@@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
idle_begin_time = ktime_get();
}
- spin_lock_irqsave(&i7300_idle_lock, flags);
+ raw_spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
cpumask_set_cpu(smp_processor_id(), idle_cpumask);
@@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
}
}
end:
- spin_unlock_irqrestore(&i7300_idle_lock, flags);
+ raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
return 0;
}
@@ -548,7 +548,7 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
- spin_lock_init(&i7300_idle_lock);
+ raw_spin_lock_init(&i7300_idle_lock);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c49c04d9c2b0..5d6675013864 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -74,7 +74,7 @@ static struct cpuidle_driver intel_idle_driver = {
.en_core_tk_irqen = 1,
};
/* intel_idle.max_cstate=0 disables driver */
-static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
+static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int mwait_substates;
@@ -90,6 +90,7 @@ struct idle_cpu {
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
+ bool disable_promotion_to_c1e;
};
static const struct idle_cpu *icpu;
@@ -109,162 +110,206 @@ static struct cpuidle_state *cpuidle_state_table;
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
+ * MWAIT takes an 8-bit "hint" in EAX "suggesting"
+ * the C-state (top nibble) and sub-state (bottom nibble)
+ * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
+ *
+ * We store the hint at the top of our "flags" for each state.
+ */
+#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
+#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+
+/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-NHM",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
.target_residency = 6,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-NHM",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C3-NHM",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-NHM",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-SNB",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 1,
- .target_residency = 1,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-SNB",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
.name = "C3-SNB",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-SNB",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
.enter = &intel_idle },
- { /* MWAIT C4 */
+ {
.name = "C7-SNB",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-IVB",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-IVB",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C3-IVB",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-IVB",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
.enter = &intel_idle },
- { /* MWAIT C4 */
+ {
.name = "C7-IVB",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
- .name = "C1-ATM",
+static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1-HSW",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 1,
- .target_residency = 4,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-HSW",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
+ .name = "C3-HSW",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 33,
+ .target_residency = 100,
+ .enter = &intel_idle },
+ {
+ .name = "C6-HSW",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ {
+ .name = "C7s-HSW",
+ .desc = "MWAIT 0x32",
+ .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 166,
+ .target_residency = 500,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1E-ATM",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C2-ATM",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
- { /* MWAIT C3 */ },
- { /* MWAIT C4 */
+ {
.name = "C4-ATM",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle },
- { /* MWAIT C5 */ },
- { /* MWAIT C6 */
+ {
.name = "C6-ATM",
.desc = "MWAIT 0x52",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static long get_driver_data(int cstate)
-{
- int driver_data;
- switch (cstate) {
-
- case 1: /* MWAIT C1 */
- driver_data = 0x00;
- break;
- case 2: /* MWAIT C2 */
- driver_data = 0x10;
- break;
- case 3: /* MWAIT C3 */
- driver_data = 0x20;
- break;
- case 4: /* MWAIT C4 */
- driver_data = 0x30;
- break;
- case 5: /* MWAIT C5 */
- driver_data = 0x40;
- break;
- case 6: /* MWAIT C6 */
- driver_data = 0x52;
- break;
- default:
- driver_data = 0x00;
- }
- return driver_data;
-}
-
/**
* intel_idle
* @dev: cpuidle_device
@@ -278,8 +323,7 @@ static int intel_idle(struct cpuidle_device *dev,
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
+ unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
int cpu = smp_processor_id();
@@ -362,10 +406,19 @@ static void auto_demotion_disable(void *dummy)
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
+static void c1e_promotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_atom = {
@@ -379,10 +432,17 @@ static const struct idle_cpu idle_cpu_lincroft = {
static const struct idle_cpu idle_cpu_snb = {
.state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_ivb = {
.state_table = ivb_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
+static const struct idle_cpu idle_cpu_hsw = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
};
#define ICPU(model, cpu) \
@@ -402,6 +462,9 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2d, idle_cpu_snb),
ICPU(0x3a, idle_cpu_ivb),
ICPU(0x3e, idle_cpu_ivb),
+ ICPU(0x3c, idle_cpu_hsw),
+ ICPU(0x3f, idle_cpu_hsw),
+ ICPU(0x45, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -448,8 +511,6 @@ static int intel_idle_probe(void)
else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- register_cpu_notifier(&cpu_hotplug_notifier);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -486,32 +547,31 @@ static int intel_idle_cpuidle_driver_init(void)
drv->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
+ int num_substates, mwait_hint, mwait_cstate, mwait_substate;
- if (cstate > max_cstate) {
+ if (cpuidle_state_table[cstate].enter == NULL)
+ break;
+
+ if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n",
max_cstate);
break;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
+ mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
+
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
& MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL) {
- /* does the driver not know about the state? */
- if (*cpuidle_state_table[cstate].name == '\0')
- pr_debug(PREFIX "unaware of model 0x%x"
- " MWAIT %d please"
- " contact lenb@kernel.org",
- boot_cpu_data.x86_model, cstate);
+
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
continue;
- }
- if ((cstate > 2) &&
+ if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
@@ -525,6 +585,9 @@ static int intel_idle_cpuidle_driver_init(void)
if (icpu->auto_demotion_disable_flags)
on_each_cpu(auto_demotion_disable, NULL, 1);
+ if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
+ on_each_cpu(c1e_promotion_disable, NULL, 1);
+
return 0;
}
@@ -543,25 +606,28 @@ static int intel_idle_cpu_init(int cpu)
dev->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
+ int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+
+ if (cpuidle_state_table[cstate].enter == NULL)
+ continue;
- if (cstate > max_cstate) {
+ if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
break;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
+ mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
+
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+ & MWAIT_SUBSTATE_MASK;
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
+ continue;
dev->state_count += 1;
}
@@ -612,6 +678,7 @@ static int __init intel_idle_init(void)
return retval;
}
}
+ register_cpu_notifier(&cpu_hotplug_notifier);
return 0;
}
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index fe4bcd7c5b12..bb594963f91e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -8,9 +8,48 @@ config HID_SENSOR_ACCEL_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
help
Say yes here to build support for the HID SENSOR
accelerometers 3D.
+config KXSD9
+ tristate "Kionix KXSD9 Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Kionix KXSD9 accelerometer.
+ Currently this only supports the device via an SPI interface.
+
+config IIO_ST_ACCEL_3AXIS
+ tristate "STMicroelectronics accelerometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_ACCEL_I2C_3AXIS if (I2C)
+ select IIO_ST_ACCEL_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_ACCEL_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics accelerometers:
+ LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_accel (core functions for the driver [it is mandatory]);
+ - st_accel_i2c (necessary for the I2C devices [optional*]);
+ - st_accel_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_ACCEL_I2C_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_ACCEL_SPI_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 5bc6855a973e..87d8fa264894 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -3,3 +3,12 @@
#
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
+st_accel-y := st_accel_core.o
+st_accel-$(CONFIG_IIO_BUFFER) += st_accel_buffer.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_I2C_3AXIS) += st_accel_i2c.o
+obj-$(CONFIG_IIO_ST_ACCEL_SPI_3AXIS) += st_accel_spi.o
+
+obj-$(CONFIG_KXSD9) += kxsd9.o
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index e67bb912bd19..dd8ea4284934 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum accel_3d_channel {
struct accel_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
u32 accel_val[ACCEL_3D_CHANNEL_MAX];
};
@@ -278,7 +277,7 @@ static int accel_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_accel_3d_probe(struct platform_device *pdev)
+static int hid_accel_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "accel_3d";
@@ -375,7 +374,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_accel_3d_remove(struct platform_device *pdev)
+static int hid_accel_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 318331f08d9c..c2229a521ab9 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -94,7 +94,6 @@ error_ret:
static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
{
- struct spi_message msg;
int ret;
struct kxsd9_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
@@ -112,10 +111,7 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(address);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret)
return ret;
return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
@@ -226,7 +222,7 @@ static int kxsd9_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct kxsd9_state *st;
- int ret = 0;
+ int ret;
indio_dev = iio_device_alloc(sizeof(*st));
if (indio_dev == NULL) {
@@ -245,14 +241,14 @@ static int kxsd9_probe(struct spi_device *spi)
indio_dev->info = &kxsd9_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
spi->mode = SPI_MODE_0;
spi_setup(spi);
kxsd9_power_up(st);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
return 0;
error_free_dev:
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
new file mode 100644
index 000000000000..37949b94377d
--- /dev/null
+++ b/drivers/iio/accel/st_accel.h
@@ -0,0 +1,47 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_ACCEL_H
+#define ST_ACCEL_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
+#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
+#define LSM330D_ACCEL_DEV_NAME "lsm330d_accel"
+#define LSM330DL_ACCEL_DEV_NAME "lsm330dl_accel"
+#define LSM330DLC_ACCEL_DEV_NAME "lsm330dlc_accel"
+#define LIS331DLH_ACCEL_DEV_NAME "lis331dlh"
+#define LSM303DL_ACCEL_DEV_NAME "lsm303dl_accel"
+#define LSM303DLH_ACCEL_DEV_NAME "lsm303dlh_accel"
+#define LSM303DLM_ACCEL_DEV_NAME "lsm303dlm_accel"
+#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
+
+int st_accel_common_probe(struct iio_dev *indio_dev);
+void st_accel_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_accel_allocate_ring(struct iio_dev *indio_dev);
+void st_accel_deallocate_ring(struct iio_dev *indio_dev);
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_ACCEL_TRIGGER_SET_STATE (&st_accel_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_ACCEL_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_ACCEL_H */
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
new file mode 100644
index 000000000000..6bd82c7f769c
--- /dev/null
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -0,0 +1,114 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_accel_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_accel_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_accel_set_enable_error:
+ return err;
+}
+
+static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ adata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (adata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ return err;
+
+st_accel_buffer_postenable_error:
+ kfree(adata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_accel_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_accel_buffer_predisable_error:
+ kfree(adata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
+ .preenable = &st_accel_buffer_preenable,
+ .postenable = &st_accel_buffer_postenable,
+ .predisable = &st_accel_buffer_predisable,
+};
+
+int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
+}
+
+void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
new file mode 100644
index 000000000000..e0f5a3ceba5e
--- /dev/null
+++ b/drivers/iio/accel/st_accel_core.c
@@ -0,0 +1,500 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_ACCEL_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_ACCEL_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_ACCEL_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_ACCEL_FS_AVL_2G 2
+#define ST_ACCEL_FS_AVL_4G 4
+#define ST_ACCEL_FS_AVL_6G 6
+#define ST_ACCEL_FS_AVL_8G 8
+#define ST_ACCEL_FS_AVL_16G 16
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_ACCEL_1_WAI_EXP 0x33
+#define ST_ACCEL_1_ODR_ADDR 0x20
+#define ST_ACCEL_1_ODR_MASK 0xf0
+#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
+#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
+#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
+#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
+#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
+#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
+#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
+#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
+#define ST_ACCEL_1_FS_ADDR 0x23
+#define ST_ACCEL_1_FS_MASK 0x30
+#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
+#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
+#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
+#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
+#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
+#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
+#define ST_ACCEL_1_BDU_ADDR 0x23
+#define ST_ACCEL_1_BDU_MASK 0x80
+#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_1_DRDY_IRQ_MASK 0x10
+#define ST_ACCEL_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_ACCEL_2_WAI_EXP 0x32
+#define ST_ACCEL_2_ODR_ADDR 0x20
+#define ST_ACCEL_2_ODR_MASK 0x18
+#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
+#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
+#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
+#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
+#define ST_ACCEL_2_PW_ADDR 0x20
+#define ST_ACCEL_2_PW_MASK 0xe0
+#define ST_ACCEL_2_FS_ADDR 0x23
+#define ST_ACCEL_2_FS_MASK 0x30
+#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
+#define ST_ACCEL_2_BDU_ADDR 0x23
+#define ST_ACCEL_2_BDU_MASK 0x80
+#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_2_DRDY_IRQ_MASK 0x02
+#define ST_ACCEL_2_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 3 */
+#define ST_ACCEL_3_WAI_EXP 0x40
+#define ST_ACCEL_3_ODR_ADDR 0x20
+#define ST_ACCEL_3_ODR_MASK 0xf0
+#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
+#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
+#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
+#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
+#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
+#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
+#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
+#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
+#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
+#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
+#define ST_ACCEL_3_FS_ADDR 0x24
+#define ST_ACCEL_3_FS_MASK 0x38
+#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
+#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
+#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
+#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
+#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
+#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
+#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
+#define ST_ACCEL_3_BDU_ADDR 0x20
+#define ST_ACCEL_3_BDU_MASK 0x08
+#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
+#define ST_ACCEL_3_DRDY_IRQ_MASK 0x80
+#define ST_ACCEL_3_IG1_EN_ADDR 0x23
+#define ST_ACCEL_3_IG1_EN_MASK 0x08
+#define ST_ACCEL_3_MULTIREAD_BIT false
+
+static const struct iio_chan_spec st_accel_12bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_accel_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_accel_sensors[] = {
+ {
+ .wai = ST_ACCEL_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3DH_ACCEL_DEV_NAME,
+ [1] = LSM303DLHC_ACCEL_DEV_NAME,
+ [2] = LSM330D_ACCEL_DEV_NAME,
+ [3] = LSM330DL_ACCEL_DEV_NAME,
+ [4] = LSM330DLC_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
+ { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
+ { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
+ { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_1_FS_ADDR,
+ .mask = ST_ACCEL_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_1_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_1_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_1_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_1_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_1_BDU_ADDR,
+ .mask = ST_ACCEL_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_1_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS331DLH_ACCEL_DEV_NAME,
+ [1] = LSM303DL_ACCEL_DEV_NAME,
+ [2] = LSM303DLH_ACCEL_DEV_NAME,
+ [3] = LSM303DLM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_2_ODR_ADDR,
+ .mask = ST_ACCEL_2_ODR_MASK,
+ .odr_avl = {
+ { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
+ { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
+ { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_2_PW_ADDR,
+ .mask = ST_ACCEL_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_2_FS_ADDR,
+ .mask = ST_ACCEL_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_2_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_2_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_2_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_2_BDU_ADDR,
+ .mask = ST_ACCEL_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_2_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_3_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM330_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .odr_avl = {
+ { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
+ { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
+ { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
+ { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
+ { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
+ { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_3_FS_ADDR,
+ .mask = ST_ACCEL_3_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_3_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_3_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_6G,
+ .value = ST_ACCEL_3_FS_AVL_6_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_3_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ },
+ [4] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_3_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_3_BDU_ADDR,
+ .mask = ST_ACCEL_3_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_3_DRDY_IRQ_MASK,
+ .ig1 = {
+ .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
+ .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ },
+ },
+ .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = adata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_accel_scale_available);
+
+static struct attribute *st_accel_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_accel_attribute_group = {
+ .attrs = st_accel_attributes,
+};
+
+static const struct iio_info accel_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_accel_attribute_group,
+ .read_raw = &st_accel_read_raw,
+ .write_raw = &st_accel_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_accel_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+};
+#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
+#else
+#define ST_ACCEL_TRIGGER_OPS NULL
+#endif
+
+int st_accel_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &accel_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_accel_sensors), st_accel_sensors);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ adata->multiread_bit = adata->sensor->multi_read_bit;
+ indio_dev->channels = adata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ adata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &adata->sensor->fs.fs_avl[0];
+ adata->odr = adata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ if (adata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_accel_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_ACCEL_TRIGGER_OPS);
+ if (err < 0)
+ goto st_accel_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_accel_device_register_error;
+
+ return err;
+
+st_accel_device_register_error:
+ if (adata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_accel_probe_trigger_error:
+ if (adata->get_irq_data_ready(indio_dev) > 0)
+ st_accel_deallocate_ring(indio_dev);
+st_accel_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_accel_common_probe);
+
+void st_accel_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (adata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_accel_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_accel_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
new file mode 100644
index 000000000000..ffc9d097e484
--- /dev/null
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -0,0 +1,86 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_accel.h"
+
+static int st_accel_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*adata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ adata = iio_priv(indio_dev);
+ adata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ return 0;
+
+st_accel_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_accel_i2c_remove(struct i2c_client *client)
+{
+ st_accel_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
+
+static struct i2c_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-i2c",
+ },
+ .probe = st_accel_i2c_probe,
+ .remove = st_accel_i2c_remove,
+ .id_table = st_accel_id_table,
+};
+module_i2c_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
new file mode 100644
index 000000000000..22b35bfea7d2
--- /dev/null
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -0,0 +1,85 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_accel.h"
+
+static int st_accel_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*adata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ adata = iio_priv(indio_dev);
+ adata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ return 0;
+
+st_accel_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_accel_spi_remove(struct spi_device *spi)
+{
+ st_accel_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_accel_id_table);
+
+static struct spi_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-spi",
+ },
+ .probe = st_accel_spi_probe,
+ .remove = st_accel_spi_remove,
+ .id_table = st_accel_id_table,
+};
+module_spi_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 961b8d0a4bac..e372257a8494 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -100,10 +100,8 @@ config LP8788_ADC
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
- select IIO_TRIGGER
- select MAX1363_RING_BUFFER
select IIO_BUFFER
- select IIO_KFIFO_BUF
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for many Maxim i2c analog to digital
converters (ADC). (max1361, max1362, max1363, max1364, max1036,
@@ -125,4 +123,18 @@ config TI_ADC081C
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
+config TI_AM335X_ADC
+ tristate "TI's ADC driver"
+ depends on MFD_TI_AM335X_TSCADC
+ help
+ Say yes here to build support for Texas Instruments ADC
+ driver which is also a MFD client.
+
+config VIPERBOARD_ADC
+ tristate "Viperboard ADC support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the ADC part of the Nano River
+ Technologies Viperboard.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 472fd7cd2417..2d5f10080d8d 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -13,4 +13,5 @@ obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
-
+obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index a6f4fc5f8201..bbad9b94cd75 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -367,7 +367,7 @@ static const struct ad7266_chan_info ad7266_chan_infos[] = {
},
};
-static void __devinit ad7266_init_channels(struct iio_dev *indio_dev)
+static void ad7266_init_channels(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
bool is_differential, is_signed;
@@ -391,7 +391,7 @@ static const char * const ad7266_gpio_labels[] = {
"AD0", "AD1", "AD2",
};
-static int __devinit ad7266_probe(struct spi_device *spi)
+static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -411,7 +411,11 @@ static int __devinit ad7266_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- st->vref_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;
@@ -494,7 +498,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad7266_remove(struct spi_device *spi)
+static int ad7266_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7266_state *st = iio_priv(indio_dev);
@@ -525,7 +529,7 @@ static struct spi_driver ad7266_driver = {
.owner = THIS_MODULE,
},
.probe = ad7266_probe,
- .remove = __devexit_p(ad7266_remove),
+ .remove = ad7266_remove,
.id_table = ad7266_id,
};
module_spi_driver(ad7266_driver);
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 2364807a5d6c..b34d754994d5 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -292,7 +292,7 @@ static const struct iio_info ad7298_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad7298_probe(struct spi_device *spi)
+static int ad7298_probe(struct spi_device *spi)
{
struct ad7298_platform_data *pdata = spi->dev.platform_data;
struct ad7298_state *st;
@@ -370,7 +370,7 @@ error_free:
return ret;
}
-static int __devexit ad7298_remove(struct spi_device *spi)
+static int ad7298_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7298_state *st = iio_priv(indio_dev);
@@ -398,7 +398,7 @@ static struct spi_driver ad7298_driver = {
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
- .remove = __devexit_p(ad7298_remove),
+ .remove = ad7298_remove,
.id_table = ad7298_id,
};
module_spi_driver(ad7298_driver);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 330248bfebae..1491fa6debb2 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -207,7 +207,7 @@ static const struct iio_info ad7476_info = {
.read_raw = &ad7476_read_raw,
};
-static int __devinit ad7476_probe(struct spi_device *spi)
+static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
struct iio_dev *indio_dev;
@@ -277,7 +277,7 @@ error_ret:
return ret;
}
-static int __devexit ad7476_remove(struct spi_device *spi)
+static int ad7476_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7476_state *st = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static struct spi_driver ad7476_driver = {
.owner = THIS_MODULE,
},
.probe = ad7476_probe,
- .remove = __devexit_p(ad7476_remove),
+ .remove = ad7476_remove,
.id_table = ad7476_id,
};
module_spi_driver(ad7476_driver);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index e93740843b2b..5e8d1da6887f 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -325,8 +325,8 @@ static const struct iio_info ad7791_no_filter_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad7791_setup(struct ad7791_state *st,
- struct ad7791_platform_data *pdata)
+static int ad7791_setup(struct ad7791_state *st,
+ struct ad7791_platform_data *pdata)
{
/* Set to poweron-reset default values */
st->mode = AD7791_MODE_BUFFER;
@@ -349,7 +349,7 @@ static int __devinit ad7791_setup(struct ad7791_state *st,
st->mode);
}
-static int __devinit ad7791_probe(struct spi_device *spi)
+static int ad7791_probe(struct spi_device *spi)
{
struct ad7791_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -418,7 +418,7 @@ err_iio_free:
return ret;
}
-static int __devexit ad7791_remove(struct spi_device *spi)
+static int ad7791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7791_state *st = iio_priv(indio_dev);
@@ -450,7 +450,7 @@ static struct spi_driver ad7791_driver = {
.owner = THIS_MODULE,
},
.probe = ad7791_probe,
- .remove = __devexit_p(ad7791_remove),
+ .remove = ad7791_remove,
.id_table = ad7791_spi_ids,
};
module_spi_driver(ad7791_driver);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 81153fafac7a..a33d5cd1a536 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -233,7 +233,7 @@ static const struct iio_info ad7887_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad7887_probe(struct spi_device *spi)
+static int ad7887_probe(struct spi_device *spi)
{
struct ad7887_platform_data *pdata = spi->dev.platform_data;
struct ad7887_state *st;
@@ -340,7 +340,7 @@ error_free:
return ret;
}
-static int __devexit ad7887_remove(struct spi_device *spi)
+static int ad7887_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7887_state *st = iio_priv(indio_dev);
@@ -368,7 +368,7 @@ static struct spi_driver ad7887_driver = {
.owner = THIS_MODULE,
},
.probe = ad7887_probe,
- .remove = __devexit_p(ad7887_remove),
+ .remove = ad7887_remove,
.id_table = ad7887_id,
};
module_spi_driver(ad7887_driver);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 03b85940f4ba..83c836ba600f 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -80,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
*timestamp = pf->timestamp;
}
- iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
+ iio_push_to_buffers(idev, (u8 *)st->buffer);
iio_trigger_notify_done(idev->trig);
@@ -514,7 +514,7 @@ static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
};
-static int __devinit at91_adc_probe(struct platform_device *pdev)
+static int at91_adc_probe(struct platform_device *pdev)
{
unsigned int prsc, mstrclk, ticks, adc_clk;
int ret;
@@ -557,9 +557,9 @@ static int __devinit at91_adc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- st->reg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!st->reg_base) {
- ret = -ENOMEM;
+ st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->reg_base)) {
+ ret = PTR_ERR(st->reg_base);
goto error_free_device;
}
@@ -678,7 +678,7 @@ error_ret:
return ret;
}
-static int __devexit at91_adc_remove(struct platform_device *pdev)
+static int at91_adc_remove(struct platform_device *pdev)
{
struct iio_dev *idev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(idev);
@@ -702,7 +702,7 @@ MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
- .remove = __devexit_p(at91_adc_remove),
+ .remove = at91_adc_remove,
.driver = {
.name = "at91_adc",
.of_match_table = of_match_ptr(at91_adc_dt_ids),
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index a93aaf0bb841..763f57565ee4 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -179,7 +179,7 @@ static int lp8788_iio_map_register(struct iio_dev *indio_dev,
ret = iio_map_array_register(indio_dev, map);
if (ret) {
- dev_err(adc->lp->dev, "iio map err: %d\n", ret);
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
return ret;
}
@@ -187,13 +187,7 @@ static int lp8788_iio_map_register(struct iio_dev *indio_dev,
return 0;
}
-static inline void lp8788_iio_map_unregister(struct iio_dev *indio_dev,
- struct lp8788_adc *adc)
-{
- iio_map_array_unregister(indio_dev, adc->map);
-}
-
-static int __devinit lp8788_adc_probe(struct platform_device *pdev)
+static int lp8788_adc_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct iio_dev *indio_dev;
@@ -208,13 +202,14 @@ static int __devinit lp8788_adc_probe(struct platform_device *pdev)
adc->lp = lp;
platform_set_drvdata(pdev, indio_dev);
+ indio_dev->dev.of_node = pdev->dev.of_node;
ret = lp8788_iio_map_register(indio_dev, lp->pdata, adc);
if (ret)
goto err_iio_map;
mutex_init(&adc->lock);
- indio_dev->dev.parent = lp->dev;
+ indio_dev->dev.parent = &pdev->dev;
indio_dev->name = pdev->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &lp8788_adc_info;
@@ -223,26 +218,25 @@ static int __devinit lp8788_adc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(lp->dev, "iio dev register err: %d\n", ret);
+ dev_err(&pdev->dev, "iio dev register err: %d\n", ret);
goto err_iio_device;
}
return 0;
err_iio_device:
- lp8788_iio_map_unregister(indio_dev, adc);
+ iio_map_array_unregister(indio_dev);
err_iio_map:
iio_device_free(indio_dev);
return ret;
}
-static int __devexit lp8788_adc_remove(struct platform_device *pdev)
+static int lp8788_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct lp8788_adc *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- lp8788_iio_map_unregister(indio_dev, adc);
+ iio_map_array_unregister(indio_dev);
iio_device_free(indio_dev);
return 0;
@@ -250,7 +244,7 @@ static int __devexit lp8788_adc_remove(struct platform_device *pdev)
static struct platform_driver lp8788_adc_driver = {
.probe = lp8788_adc_probe,
- .remove = __devexit_p(lp8788_adc_remove),
+ .remove = lp8788_adc_remove,
.driver = {
.name = LP8788_DEV_ADC,
.owner = THIS_MODULE,
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 1e84b5b55093..6c1cfb74bdfc 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -39,6 +39,7 @@
#include <linux/iio/driver.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define MAX1363_SETUP_BYTE(a) ((a) | 0x80)
@@ -55,7 +56,7 @@
#define MAX1363_SETUP_POWER_UP_INT_REF 0x10
#define MAX1363_SETUP_POWER_DOWN_INT_REF 0x00
-/* think about includeing max11600 etc - more settings */
+/* think about including max11600 etc - more settings */
#define MAX1363_SETUP_EXT_CLOCK 0x08
#define MAX1363_SETUP_INT_CLOCK 0x00
#define MAX1363_SETUP_UNIPOLAR 0x00
@@ -86,7 +87,7 @@
/* max123{6-9} only */
#define MAX1236_SCAN_MID_TO_CHANNEL 0x40
-/* max1363 only - merely part of channel selects or don't care for others*/
+/* max1363 only - merely part of channel selects or don't care for others */
#define MAX1363_CONFIG_EN_MON_MODE_READ 0x18
#define MAX1363_CHANNEL_SEL(a) ((a) << 1)
@@ -133,7 +134,7 @@ enum max1363_modes {
* @mode_list: array of available scan modes
* @default_mode: the scan mode in which the chip starts up
* @int_vref_mv: the internal reference voltage
- * @num_channels: number of channels
+ * @num_modes: number of modes
* @bits: accuracy of the adc in bits
*/
struct max1363_chip_info {
@@ -152,7 +153,7 @@ struct max1363_chip_info {
* @client: i2c_client
* @setupbyte: cache of current device setup byte
* @configbyte: cache of current device config byte
- * @chip_info: chip model specific constants, available modes etc
+ * @chip_info: chip model specific constants, available modes, etc.
* @current_mode: the scan mode of this chip
* @requestedmask: a valid requested set of channels
* @reg: supply regulator
@@ -162,6 +163,8 @@ struct max1363_chip_info {
* @mask_low: bitmask for enabled low thresholds
* @thresh_high: high threshold values
* @thresh_low: low threshold values
+ * @vref: Reference voltage regulator
+ * @vref_uv: Actual (external or internal) reference voltage
*/
struct max1363_state {
struct i2c_client *client;
@@ -181,6 +184,8 @@ struct max1363_state {
/* 4x unipolar first then the fours bipolar ones */
s16 thresh_high[8];
s16 thresh_low[8];
+ struct regulator *vref;
+ u32 vref_uv;
};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
@@ -293,7 +298,7 @@ static const struct max1363_mode max1363_mode_table[] = {
static const struct max1363_mode
*max1363_match_mode(const unsigned long *mask,
-const struct max1363_chip_info *ci)
+ const struct max1363_chip_info *ci)
{
int i;
if (mask)
@@ -334,7 +339,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
{
int ret = 0;
s32 data;
- char rxbuf[2];
+ u8 rxbuf[2];
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -366,7 +371,8 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
ret = data;
goto error_ret;
}
- data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
+ data = (rxbuf[1] | rxbuf[0] << 8) &
+ ((1 << st->chip_info->bits) - 1);
} else {
/* Get reading */
data = i2c_master_recv(client, rxbuf, 1);
@@ -391,6 +397,8 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
{
struct max1363_state *st = iio_priv(indio_dev);
int ret;
+ unsigned long scale_uv;
+
switch (m) {
case IIO_CHAN_INFO_RAW:
ret = max1363_read_single_chan(indio_dev, chan, val, m);
@@ -398,16 +406,10 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if ((1 << (st->chip_info->bits + 1)) >
- st->chip_info->int_vref_mv) {
- *val = 0;
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
- } else {
- *val = (st->chip_info->int_vref_mv)
- >> st->chip_info->bits;
- return IIO_VAL_INT;
- }
+ scale_uv = st->vref_uv >> st->chip_info->bits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -1388,13 +1390,17 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
static int max1363_initial_setup(struct max1363_state *st)
{
- st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
- | MAX1363_SETUP_POWER_UP_INT_REF
- | MAX1363_SETUP_INT_CLOCK
+ st->setupbyte = MAX1363_SETUP_INT_CLOCK
| MAX1363_SETUP_UNIPOLAR
| MAX1363_SETUP_NORESET;
- /* Set scan mode writes the config anyway so wait until then*/
+ if (st->vref)
+ st->setupbyte |= MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF;
+ else
+ st->setupbyte |= MAX1363_SETUP_POWER_UP_INT_REF
+ | MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT;
+
+ /* Set scan mode writes the config anyway so wait until then */
st->setupbyte = MAX1363_SETUP_BYTE(st->setupbyte);
st->current_mode = &max1363_mode_table[st->chip_info->default_mode];
st->configbyte = MAX1363_CONFIG_BYTE(st->configbyte);
@@ -1402,14 +1408,15 @@ static int max1363_initial_setup(struct max1363_state *st)
return max1363_set_scan_mode(st);
}
-static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
+static int max1363_alloc_scan_masks(struct iio_dev *indio_dev)
{
struct max1363_state *st = iio_priv(indio_dev);
unsigned long *masks;
int i;
- masks = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
- (st->chip_info->num_modes + 1), GFP_KERNEL);
+ masks = devm_kzalloc(&indio_dev->dev,
+ BITS_TO_LONGS(MAX1363_MAX_CHANNELS) * sizeof(long) *
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
if (!masks)
return -ENOMEM;
@@ -1423,7 +1430,6 @@ static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
return 0;
}
-
static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -1483,54 +1489,13 @@ static const struct iio_buffer_setup_ops max1363_buffered_setup_ops = {
.predisable = &iio_triggered_buffer_predisable,
};
-static int max1363_register_buffered_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct max1363_state *st = iio_priv(indio_dev);
- int ret = 0;
-
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer) {
- ret = -ENOMEM;
- goto error_ret;
- }
- indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
- &max1363_trigger_handler,
- IRQF_ONESHOT,
- indio_dev,
- "%s_consumer%d",
- st->client->name,
- indio_dev->id);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_deallocate_sw_rb;
- }
- /* Buffer functions - here trigger setup related */
- indio_dev->setup_ops = &max1363_buffered_setup_ops;
-
- /* Flag that polled buffering is possible */
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-
- return 0;
-
-error_deallocate_sw_rb:
- iio_kfifo_free(indio_dev->buffer);
-error_ret:
- return ret;
-}
-
-static void max1363_buffer_cleanup(struct iio_dev *indio_dev)
-{
- /* ensure that the trigger has been detached */
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
-static int __devinit max1363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max1363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
int ret;
struct max1363_state *st;
struct iio_dev *indio_dev;
+ struct regulator *vref;
indio_dev = iio_device_alloc(sizeof(struct max1363_state));
if (indio_dev == NULL) {
@@ -1538,13 +1503,14 @@ static int __devinit max1363_probe(struct i2c_client *client,
goto error_out;
}
+ indio_dev->dev.of_node = client->dev.of_node;
ret = iio_map_array_register(indio_dev, client->dev.platform_data);
if (ret < 0)
goto error_free_device;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&client->dev, "vcc");
+ st->reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(st->reg)) {
ret = PTR_ERR(st->reg);
goto error_unregister_map;
@@ -1552,7 +1518,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ goto error_unregister_map;
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -1560,35 +1526,45 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
+ st->vref_uv = st->chip_info->int_vref_mv * 1000;
+ vref = devm_regulator_get(&client->dev, "vref");
+ if (!IS_ERR(vref)) {
+ int vref_uv;
+
+ ret = regulator_enable(vref);
+ if (ret)
+ goto error_disable_reg;
+ st->vref = vref;
+ vref_uv = regulator_get_voltage(vref);
+ if (vref_uv <= 0) {
+ ret = -EINVAL;
+ goto error_disable_reg;
+ }
+ st->vref_uv = vref_uv;
+ }
+
ret = max1363_alloc_scan_masks(indio_dev);
if (ret)
goto error_disable_reg;
- /* Estabilish that the iio_dev is a child of the i2c device */
+ /* Establish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = st->chip_info->info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = st->chip_info->num_channels;
ret = max1363_initial_setup(st);
if (ret < 0)
- goto error_free_available_scan_masks;
-
- ret = max1363_register_buffered_funcs_and_init(indio_dev);
- if (ret)
- goto error_free_available_scan_masks;
+ goto error_disable_reg;
- ret = iio_buffer_register(indio_dev,
- st->chip_info->channels,
- st->chip_info->num_channels);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &max1363_trigger_handler, &max1363_buffered_setup_ops);
if (ret)
- goto error_cleanup_buffer;
+ goto error_disable_reg;
if (client->irq) {
- ret = request_threaded_irq(st->client->irq,
+ ret = devm_request_threaded_irq(&client->dev, st->client->irq,
NULL,
&max1363_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -1601,45 +1577,35 @@ static int __devinit max1363_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_free_irq;
+ goto error_uninit_buffer;
return 0;
-error_free_irq:
- free_irq(st->client->irq, indio_dev);
+
error_uninit_buffer:
- iio_buffer_unregister(indio_dev);
-error_cleanup_buffer:
- max1363_buffer_cleanup(indio_dev);
-error_free_available_scan_masks:
- kfree(indio_dev->available_scan_masks);
-error_unregister_map:
- iio_map_array_unregister(indio_dev, client->dev.platform_data);
+ iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
+ if (st->vref)
+ regulator_disable(st->vref);
regulator_disable(st->reg);
-error_put_reg:
- regulator_put(st->reg);
+error_unregister_map:
+ iio_map_array_unregister(indio_dev);
error_free_device:
iio_device_free(indio_dev);
error_out:
return ret;
}
-static int __devexit max1363_remove(struct i2c_client *client)
+static int max1363_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max1363_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (client->irq)
- free_irq(st->client->irq, indio_dev);
- iio_buffer_unregister(indio_dev);
- max1363_buffer_cleanup(indio_dev);
- kfree(indio_dev->available_scan_masks);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_map_array_unregister(indio_dev, client->dev.platform_data);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->vref)
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ iio_map_array_unregister(indio_dev);
iio_device_free(indio_dev);
return 0;
@@ -1690,7 +1656,7 @@ static struct i2c_driver max1363_driver = {
.name = "max1363",
},
.probe = max1363_probe,
- .remove = __devexit_p(max1363_remove),
+ .remove = max1363_remove,
.id_table = max1363_id,
};
module_i2c_driver(max1363_driver);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
new file mode 100644
index 000000000000..cd030e100c39
--- /dev/null
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -0,0 +1,260 @@
+/*
+ * TI ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+struct tiadc_device {
+ struct ti_tscadc_dev *mfd_tscadc;
+ int channels;
+};
+
+static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
+{
+ return readl(adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_writel(struct tiadc_device *adc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_step_config(struct tiadc_device *adc_dev)
+{
+ unsigned int stepconfig;
+ int i, channels = 0, steps;
+
+ /*
+ * There are 16 configurable steps and 8 analog input
+ * lines available which are shared between Touchscreen and ADC.
+ *
+ * Steps backwards i.e. from 16 towards 0 are used by ADC
+ * depending on number of input lines needed.
+ * Channel would represent which analog input
+ * needs to be given to ADC to digitalize data.
+ */
+
+ steps = TOTAL_STEPS - adc_dev->channels;
+ channels = TOTAL_CHANNELS - adc_dev->channels;
+
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+
+ for (i = (steps + 1); i <= TOTAL_STEPS; i++) {
+ tiadc_writel(adc_dev, REG_STEPCONFIG(i),
+ stepconfig | STEPCONFIG_INP(channels));
+ tiadc_writel(adc_dev, REG_STEPDELAY(i),
+ STEPCONFIG_OPENDLY);
+ channels++;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+}
+
+static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
+{
+ struct iio_chan_spec *chan_array;
+ int i;
+
+ indio_dev->num_channels = channels;
+ chan_array = kcalloc(indio_dev->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (chan_array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < (indio_dev->num_channels); i++) {
+ struct iio_chan_spec *chan = chan_array + i;
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = i;
+ chan->info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT;
+ }
+
+ indio_dev->channels = chan_array;
+
+ return indio_dev->num_channels;
+}
+
+static void tiadc_channels_remove(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->channels);
+}
+
+static int tiadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i;
+ unsigned int fifo1count, readx1;
+
+ /*
+ * When the sub-system is first enabled,
+ * the sequencer will always start with the
+ * lowest step (1) and continue until step (16).
+ * For ex: If we have enabled 4 ADC channels and
+ * currently use only 1 out of them, the
+ * sequencer still configures all the 4 steps,
+ * leading to 3 unwanted data.
+ * Hence we need to flush out this data.
+ */
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++) {
+ readx1 = tiadc_readl(adc_dev, REG_FIFO1);
+ if (i == chan->channel)
+ *val = readx1 & 0xfff;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info tiadc_info = {
+ .read_raw = &tiadc_read_raw,
+};
+
+static int tiadc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct tiadc_device *adc_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+ if (!pdata || !pdata->adc_init) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ indio_dev = iio_device_alloc(sizeof(struct tiadc_device));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ err = -ENOMEM;
+ goto err_ret;
+ }
+ adc_dev = iio_priv(indio_dev);
+
+ adc_dev->mfd_tscadc = tscadc_dev;
+ adc_dev->channels = pdata->adc_init->adc_channels;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tiadc_info;
+
+ tiadc_step_config(adc_dev);
+
+ err = tiadc_channel_init(indio_dev, adc_dev->channels);
+ if (err < 0)
+ goto err_free_device;
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_free_channels;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_channels:
+ tiadc_channels_remove(indio_dev);
+err_free_device:
+ iio_device_free(indio_dev);
+err_ret:
+ return err;
+}
+
+static int tiadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ tiadc_channels_remove(indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tiadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ unsigned int idle;
+
+ if (!device_may_wakeup(tscadc_dev->dev)) {
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
+ }
+
+ return 0;
+}
+
+static int tiadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int restore;
+
+ /* Make sure ADC is powered up */
+ restore = tiadc_readl(adc_dev, REG_CTRL);
+ restore &= ~(CNTRLREG_POWERDOWN);
+ tiadc_writel(adc_dev, REG_CTRL, restore);
+
+ tiadc_step_config(adc_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tiadc_pm_ops = {
+ .suspend = tiadc_suspend,
+ .resume = tiadc_resume,
+};
+#define TIADC_PM_OPS (&tiadc_pm_ops)
+#else
+#define TIADC_PM_OPS NULL
+#endif
+
+static struct platform_driver tiadc_driver = {
+ .driver = {
+ .name = "tiadc",
+ .owner = THIS_MODULE,
+ .pm = TIADC_PM_OPS,
+ },
+ .probe = tiadc_probe,
+ .remove = tiadc_remove,
+};
+
+module_platform_driver(tiadc_driver);
+
+MODULE_DESCRIPTION("TI ADC controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
new file mode 100644
index 000000000000..ad0261533dee
--- /dev/null
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -0,0 +1,181 @@
+/*
+ * Nano River Technologies viperboard IIO ADC driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_ADC_CMD_GET 0x00
+
+struct vprbrd_adc_msg {
+ u8 cmd;
+ u8 chan;
+ u8 val;
+} __packed;
+
+struct vprbrd_adc {
+ struct vprbrd *vb;
+};
+
+#define VPRBRD_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
+}
+
+static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
+ VPRBRD_ADC_CHANNEL(0),
+ VPRBRD_ADC_CHANNEL(1),
+ VPRBRD_ADC_CHANNEL(2),
+ VPRBRD_ADC_CHANNEL(3),
+};
+
+static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ int ret, error = 0;
+ struct vprbrd_adc *adc = iio_priv(iio_dev);
+ struct vprbrd *vb = adc->vb;
+ struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&vb->lock);
+
+ admsg->cmd = VPRBRD_ADC_CMD_GET;
+ admsg->chan = chan->scan_index;
+ admsg->val = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb send error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ *val = admsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb recv error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ if (error)
+ goto error;
+
+ return IIO_VAL_INT;
+ default:
+ error = -EINVAL;
+ break;
+ }
+error:
+ return error;
+}
+
+static const struct iio_info vprbrd_adc_iio_info = {
+ .read_raw = &vprbrd_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int vprbrd_adc_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_adc *adc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ /* registering iio */
+ indio_dev = iio_device_alloc(sizeof(*adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(indio_dev);
+ adc->vb = vb;
+ indio_dev->name = "viperboard adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &vprbrd_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vprbrd_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio (adc)");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+error:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int vprbrd_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver vprbrd_adc_driver = {
+ .driver = {
+ .name = "viperboard-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = vprbrd_adc_probe,
+ .remove = vprbrd_adc_remove,
+};
+
+module_platform_driver(vprbrd_adc_driver);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-adc");
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d8281cdbfc4a..d6c0af23a2a7 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -133,7 +133,7 @@ static const struct iio_chan_spec ad8366_channels[] = {
AD8366_CHAN(1),
};
-static int __devinit ad8366_probe(struct spi_device *spi)
+static int ad8366_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct ad8366_state *st;
@@ -182,7 +182,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad8366_remove(struct spi_device *spi)
+static int ad8366_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8366_state *st = iio_priv(indio_dev);
@@ -211,7 +211,7 @@ static struct spi_driver ad8366_driver = {
.owner = THIS_MODULE,
},
.probe = ad8366_probe,
- .remove = __devexit_p(ad8366_remove),
+ .remove = ad8366_remove,
.id_table = ad8366_id,
};
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 4d40e24f3721..9201022945e9 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -25,7 +25,7 @@ static struct iio_buffer_access_funcs iio_cb_access = {
.store_to = &iio_buffer_cb_store_to,
};
-struct iio_cb_buffer *iio_channel_get_all_cb(const char *name,
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
int (*cb)(u8 *data,
void *private),
void *private)
@@ -46,7 +46,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(const char *name,
cb_buff->buffer.access = &iio_cb_access;
INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
- cb_buff->channels = iio_channel_get_all(name);
+ cb_buff->channels = iio_channel_get_all(dev);
if (IS_ERR(cb_buff->channels)) {
ret = PTR_ERR(cb_buff->channels);
goto error_free_cb_buff;
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index ed45ee54500c..0b6e97d18fa0 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -3,3 +3,4 @@
#
source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/st_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 81584009b21b..c2352beb5d97 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,3 +7,4 @@
#
obj-y += hid-sensors/
+obj-y += st_sensors/
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index ae10778da7aa..1178121b55b0 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
config HID_SENSOR_IIO_COMMON
tristate "Common modules for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB
- select IIO_TRIGGER if IIO_BUFFER
+ select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
help
Say yes here to build support for HID sensor to use
HID sensor common processing for attributes and IIO triggers.
@@ -14,6 +14,17 @@ config HID_SENSOR_IIO_COMMON
HID sensor drivers, this module contains processing for those
attributes.
+config HID_SENSOR_IIO_TRIGGER
+ tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+ select IIO_TRIGGER
+ help
+ Say yes here to build trigger support for HID sensors.
+ Triggers will be send if all requested attributes were read.
+
+ If this driver is compiled as a module, it will be named
+ hid-sensor-trigger.
+
config HID_SENSOR_ENUM_BASE_QUIRKS
bool "ENUM base quirks for HID Sensor IIO drivers"
depends on HID_SENSOR_IIO_COMMON
diff --git a/drivers/iio/common/hid-sensors/Makefile b/drivers/iio/common/hid-sensors/Makefile
index 1f463e00c242..22e7c5a82325 100644
--- a/drivers/iio/common/hid-sensors/Makefile
+++ b/drivers/iio/common/hid-sensors/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 75374955caba..75b54730a963 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -25,7 +25,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "hid-sensor-attributes.h"
static int pow_10(unsigned power)
{
@@ -114,7 +113,7 @@ static u32 convert_to_vtf_format(int size, int exp, int val1, int val2)
return value;
}
-int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
+int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
int *val1, int *val2)
{
s32 value;
@@ -141,7 +140,7 @@ int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_read_samp_freq_value);
-int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
+int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2)
{
s32 value;
@@ -169,7 +168,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
-int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
+int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2)
{
s32 value;
@@ -191,7 +190,7 @@ int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value);
-int hid_sensor_write_raw_hyst_value(struct hid_sensor_iio_common *st,
+int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2)
{
s32 value;
@@ -212,7 +211,7 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_iio_common *st)
+ struct hid_sensor_common *st)
{
sensor_hub_input_get_attribute_info(hsdev,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.h b/drivers/iio/common/hid-sensors/hid-sensor-attributes.h
deleted file mode 100644
index a4676a0c3de5..000000000000
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * HID Sensors Driver
- * Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef _HID_SENSORS_ATTRIBUTES_H
-#define _HID_SENSORS_ATTRIBUTES_H
-
-/* Common hid sensor iio structure */
-struct hid_sensor_iio_common {
- struct hid_sensor_hub_device *hsdev;
- struct platform_device *pdev;
- unsigned usage_id;
- bool data_ready;
- struct hid_sensor_hub_attribute_info poll;
- struct hid_sensor_hub_attribute_info report_state;
- struct hid_sensor_hub_attribute_info power_state;
- struct hid_sensor_hub_attribute_info sensitivity;
-};
-
-/*Convert from hid unit expo to regular exponent*/
-static inline int hid_sensor_convert_exponent(int unit_expo)
-{
- if (unit_expo < 0x08)
- return unit_expo;
- else if (unit_expo <= 0x0f)
- return -(0x0f-unit_expo+1);
- else
- return 0;
-}
-
-int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
- u32 usage_id,
- struct hid_sensor_iio_common *st);
-int hid_sensor_write_raw_hyst_value(struct hid_sensor_iio_common *st,
- int val1, int val2);
-int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
- int *val1, int *val2);
-int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
- int val1, int val2);
-int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
- int *val1, int *val2);
-
-#endif
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index d60198a6ca29..7a525a91105d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -26,13 +26,12 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/sysfs.h>
-#include "hid-sensor-attributes.h"
#include "hid-sensor-trigger.h"
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct hid_sensor_iio_common *st = trig->private_data;
+ struct hid_sensor_common *st = trig->private_data;
int state_val;
state_val = state ? 1 : 0;
@@ -64,7 +63,7 @@ static const struct iio_trigger_ops hid_sensor_trigger_ops = {
};
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
- struct hid_sensor_iio_common *attrb)
+ struct hid_sensor_common *attrb)
{
int ret;
struct iio_trigger *trig;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index fd982971b1b8..9a8731478eda 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -20,7 +20,7 @@
#define _HID_SENSOR_TRIGGER_H
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
- struct hid_sensor_iio_common *attrb);
+ struct hid_sensor_common *attrb);
void hid_sensor_remove_trigger(struct iio_dev *indio_dev);
#endif
diff --git a/drivers/iio/common/st_sensors/Kconfig b/drivers/iio/common/st_sensors/Kconfig
new file mode 100644
index 000000000000..865f1ca33eb9
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Kconfig
@@ -0,0 +1,14 @@
+#
+# STMicroelectronics sensors common library
+#
+
+config IIO_ST_SENSORS_I2C
+ tristate
+
+config IIO_ST_SENSORS_SPI
+ tristate
+
+config IIO_ST_SENSORS_CORE
+ tristate
+ select IIO_ST_SENSORS_I2C if I2C
+ select IIO_ST_SENSORS_SPI if SPI_MASTER
diff --git a/drivers/iio/common/st_sensors/Makefile b/drivers/iio/common/st_sensors/Makefile
new file mode 100644
index 000000000000..9f3e24f3024b
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the STMicroelectronics sensor common modules.
+#
+
+obj-$(CONFIG_IIO_ST_SENSORS_I2C) += st_sensors_i2c.o
+obj-$(CONFIG_IIO_ST_SENSORS_SPI) += st_sensors_spi.o
+obj-$(CONFIG_IIO_ST_SENSORS_CORE) += st_sensors.o
+st_sensors-y := st_sensors_core.o
+st_sensors-$(CONFIG_IIO_BUFFER) += st_sensors_buffer.o
+st_sensors-$(CONFIG_IIO_TRIGGER) += st_sensors_trigger.o
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
new file mode 100644
index 000000000000..09b236d6ee89
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -0,0 +1,116 @@
+/*
+ * STMicroelectronics sensors buffer library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+{
+ int i, n = 0, len;
+ u8 addr[ST_SENSORS_NUMBER_DATA_CHANNELS];
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ for (i = 0; i < ST_SENSORS_NUMBER_DATA_CHANNELS; i++) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ addr[n] = indio_dev->channels[i].address;
+ n++;
+ }
+ }
+ switch (n) {
+ case 1:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], ST_SENSORS_BYTE_FOR_CHANNEL, buf,
+ sdata->multiread_bit);
+ break;
+ case 2:
+ if ((addr[1] - addr[0]) == ST_SENSORS_BYTE_FOR_CHANNEL) {
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0],
+ ST_SENSORS_BYTE_FOR_CHANNEL*n,
+ buf, sdata->multiread_bit);
+ } else {
+ u8 rx_array[ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS];
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0],
+ ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS,
+ rx_array, sdata->multiread_bit);
+ if (len < 0)
+ goto read_data_channels_error;
+
+ for (i = 0; i < n * ST_SENSORS_NUMBER_DATA_CHANNELS;
+ i++) {
+ if (i < n)
+ buf[i] = rx_array[i];
+ else
+ buf[i] = rx_array[n + i];
+ }
+ len = ST_SENSORS_BYTE_FOR_CHANNEL*n;
+ }
+ break;
+ case 3:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS,
+ buf, sdata->multiread_bit);
+ break;
+ default:
+ len = -EINVAL;
+ goto read_data_channels_error;
+ }
+ if (len != ST_SENSORS_BYTE_FOR_CHANNEL*n) {
+ len = -EIO;
+ goto read_data_channels_error;
+ }
+
+read_data_channels_error:
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_get_buffer_element);
+
+irqreturn_t st_sensors_trigger_handler(int irq, void *p)
+{
+ int len;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
+ if (len < 0)
+ goto st_sensors_get_buffer_element_error;
+
+ if (indio_dev->scan_timestamp)
+ *(s64 *)((u8 *)sdata->buffer_data +
+ ALIGN(len, sizeof(s64))) = pf->timestamp;
+
+ iio_push_to_buffers(indio_dev, sdata->buffer_data);
+
+st_sensors_get_buffer_element_error:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(st_sensors_trigger_handler);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
new file mode 100644
index 000000000000..0198324a8b0c
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -0,0 +1,446 @@
+/*
+ * STMicroelectronics sensors core library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+#define ST_SENSORS_WAI_ADDRESS 0x0f
+
+static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data)
+{
+ int err;
+ u8 new_data;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data);
+ if (err < 0)
+ goto st_sensors_write_data_with_mask_error;
+
+ new_data = ((new_data & (~mask)) | ((data << __ffs(mask)) & mask));
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data);
+
+st_sensors_write_data_with_mask_error:
+ return err;
+}
+
+static int st_sensors_match_odr(struct st_sensors *sensor,
+ unsigned int odr, struct st_sensor_odr_avl *odr_out)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sensor->odr.odr_avl[i].hz == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor->odr.odr_avl[i].hz == odr) {
+ odr_out->hz = sensor->odr.odr_avl[i].hz;
+ odr_out->value = sensor->odr.odr_avl[i].value;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
+{
+ int err;
+ struct st_sensor_odr_avl odr_out;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_odr(sdata->sensor, odr, &odr_out);
+ if (err < 0)
+ goto st_sensors_match_odr_error;
+
+ if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
+ (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ if (sdata->enabled == true) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->odr.addr,
+ sdata->sensor->odr.mask,
+ odr_out.value);
+ } else {
+ err = 0;
+ }
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->odr.addr, sdata->sensor->odr.mask,
+ odr_out.value);
+ }
+ if (err >= 0)
+ sdata->odr = odr_out.hz;
+
+st_sensors_match_odr_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_odr);
+
+static int st_sensors_match_fs(struct st_sensors *sensor,
+ unsigned int fs, int *index_fs_avl)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sensor->fs.fs_avl[i].num == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor->fs.fs_avl[i].num == fs) {
+ *index_fs_avl = i;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
+{
+ int err, i;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_fs(sdata->sensor, fs, &i);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->fs.addr,
+ sdata->sensor->fs.mask,
+ sdata->sensor->fs.fs_avl[i].value);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &sdata->sensor->fs.fs_avl[i];
+ return err;
+
+st_accel_set_fullscale_error:
+ dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
+ return err;
+}
+
+int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ bool found;
+ u8 tmp_value;
+ int err = -EINVAL;
+ struct st_sensor_odr_avl odr_out;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ if (enable) {
+ found = false;
+ tmp_value = sdata->sensor->pw.value_on;
+ if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
+ (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ err = st_sensors_match_odr(sdata->sensor,
+ sdata->odr, &odr_out);
+ if (err < 0)
+ goto set_enable_error;
+ tmp_value = odr_out.value;
+ found = true;
+ }
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->pw.addr,
+ sdata->sensor->pw.mask, tmp_value);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = true;
+
+ if (found)
+ sdata->odr = odr_out.hz;
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->pw.addr,
+ sdata->sensor->pw.mask,
+ sdata->sensor->pw.value_off);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = false;
+ }
+
+set_enable_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_enable);
+
+int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->enable_axis.addr,
+ sdata->sensor->enable_axis.mask, axis_enable);
+}
+EXPORT_SYMBOL(st_sensors_set_axis_enable);
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_init(&sdata->tb.buf_lock);
+
+ err = st_sensors_set_enable(indio_dev, false);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->current_fullscale->num);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_odr(indio_dev, sdata->odr);
+ if (err < 0)
+ goto init_error;
+
+ /* set BDU */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->bdu.addr, sdata->sensor->bdu.mask, true);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+
+init_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_init_sensor);
+
+int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ /* Enable/Disable the interrupt generator 1. */
+ if (sdata->sensor->drdy_irq.ig1.en_addr > 0) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->drdy_irq.ig1.en_addr,
+ sdata->sensor->drdy_irq.ig1.en_mask, (int)enable);
+ if (err < 0)
+ goto st_accel_set_dataready_irq_error;
+ }
+
+ /* Enable/Disable the interrupt generator for data ready. */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->drdy_irq.addr,
+ sdata->sensor->drdy_irq.mask, (int)enable);
+
+st_accel_set_dataready_irq_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_dataready_irq);
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
+{
+ int err = -EINVAL, i;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if ((sdata->sensor->fs.fs_avl[i].gain == scale) &&
+ (sdata->sensor->fs.fs_avl[i].gain != 0)) {
+ err = 0;
+ break;
+ }
+ }
+ if (err < 0)
+ goto st_sensors_match_scale_error;
+
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->sensor->fs.fs_avl[i].num);
+
+st_sensors_match_scale_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain);
+
+static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
+ u8 ch_addr, int *data)
+{
+ int err;
+ u8 outdata[ST_SENSORS_BYTE_FOR_CHANNEL];
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ ch_addr, ST_SENSORS_BYTE_FOR_CHANNEL,
+ outdata, sdata->multiread_bit);
+ if (err < 0)
+ goto read_error;
+
+ *data = (s16)get_unaligned_le16(outdata);
+
+read_error:
+ return err;
+}
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ err = -EBUSY;
+ goto read_error;
+ } else {
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto read_error;
+
+ msleep((sdata->sensor->bootime * 1000) / sdata->odr);
+ err = st_sensors_read_axis_data(indio_dev, ch->address, val);
+ if (err < 0)
+ goto read_error;
+
+ *val = *val >> ch->scan_type.shift;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return err;
+
+read_error:
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_read_info_raw);
+
+int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list, const struct st_sensors *sensors)
+{
+ u8 wai;
+ int i, n, err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ ST_SENSORS_DEFAULT_WAI_ADDRESS, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
+ goto read_wai_error;
+ }
+
+ for (i = 0; i < num_sensors_list; i++) {
+ if (sensors[i].wai == wai)
+ break;
+ }
+ if (i == num_sensors_list)
+ goto device_not_supported;
+
+ for (n = 0; n < ARRAY_SIZE(sensors[i].sensors_supported); n++) {
+ if (strcmp(indio_dev->name,
+ &sensors[i].sensors_supported[n][0]) == 0)
+ break;
+ }
+ if (n == ARRAY_SIZE(sensors[i].sensors_supported)) {
+ dev_err(&indio_dev->dev, "device name and WhoAmI mismatch.\n");
+ goto sensor_name_mismatch;
+ }
+
+ sdata->sensor = (struct st_sensors *)&sensors[i];
+
+ return i;
+
+device_not_supported:
+ dev_err(&indio_dev->dev, "device not supported: WhoAmI (0x%x).\n", wai);
+sensor_name_mismatch:
+ err = -ENODEV;
+read_wai_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_check_device_support);
+
+ssize_t st_sensors_sysfs_get_sampling_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_sensor_data *adata = iio_priv(dev_get_drvdata(dev));
+
+ return sprintf(buf, "%d\n", adata->odr);
+}
+EXPORT_SYMBOL(st_sensors_sysfs_get_sampling_frequency);
+
+ssize_t st_sensors_sysfs_set_sampling_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int err;
+ unsigned int odr;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ err = kstrtoint(buf, 10, &odr);
+ if (err < 0)
+ goto conversion_error;
+
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, odr);
+ mutex_unlock(&indio_dev->mlock);
+
+conversion_error:
+ return err < 0 ? err : size;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_set_sampling_frequency);
+
+ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sdata->sensor->odr.odr_avl[i].hz == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ sdata->sensor->odr.odr_avl[i].hz);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
+
+ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sdata->sensor->fs.fs_avl[i].num == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ sdata->sensor->fs.fs_avl[i].gain);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_scale_avail);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
new file mode 100644
index 000000000000..38af9440c103
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -0,0 +1,81 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors_i2c.h>
+
+
+#define ST_SENSORS_I2C_MULTIREAD 0x80
+
+static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_i2c_client(sdata->dev)->irq;
+}
+
+static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ int err;
+
+ err = i2c_smbus_read_byte_data(to_i2c_client(dev), reg_addr);
+ if (err < 0)
+ goto st_accel_i2c_read_byte_error;
+
+ *res_byte = err & 0xff;
+
+st_accel_i2c_read_byte_error:
+ return err < 0 ? err : 0;
+}
+
+static int st_sensors_i2c_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ if (multiread_bit)
+ reg_addr |= ST_SENSORS_I2C_MULTIREAD;
+
+ return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
+ reg_addr, len, data);
+}
+
+static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), reg_addr, data);
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_i2c = {
+ .read_byte = st_sensors_i2c_read_byte,
+ .write_byte = st_sensors_i2c_write_byte,
+ .read_multiple_byte = st_sensors_i2c_read_multiple_byte,
+};
+
+void st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct st_sensor_data *sdata)
+{
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->name;
+
+ sdata->tf = &st_sensors_tf_i2c;
+ sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_i2c_configure);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
new file mode 100644
index 000000000000..f0aa2f105222
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -0,0 +1,128 @@
+/*
+ * STMicroelectronics sensors spi library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors_spi.h>
+
+
+#define ST_SENSORS_SPI_MULTIREAD 0xc0
+#define ST_SENSORS_SPI_READ 0x80
+
+static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_spi_device(sdata->dev)->irq;
+}
+
+static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ struct spi_message msg;
+ int err;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = tb->rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ mutex_lock(&tb->buf_lock);
+ if ((multiread_bit) && (len > 1))
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD;
+ else
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ err = spi_sync(to_spi_device(dev), &msg);
+ if (err)
+ goto acc_spi_read_error;
+
+ memcpy(data, tb->rx_buf, len*sizeof(u8));
+ mutex_unlock(&tb->buf_lock);
+ return len;
+
+acc_spi_read_error:
+ mutex_unlock(&tb->buf_lock);
+ return err;
+}
+
+static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false);
+}
+
+static int st_sensors_spi_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit);
+}
+
+static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ struct spi_message msg;
+ int err;
+
+ struct spi_transfer xfers = {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 2,
+ };
+
+ mutex_lock(&tb->buf_lock);
+ tb->tx_buf[0] = reg_addr;
+ tb->tx_buf[1] = data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers, &msg);
+ err = spi_sync(to_spi_device(dev), &msg);
+ mutex_unlock(&tb->buf_lock);
+
+ return err;
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_spi = {
+ .read_byte = st_sensors_spi_read_byte,
+ .write_byte = st_sensors_spi_write_byte,
+ .read_multiple_byte = st_sensors_spi_read_multiple_byte,
+};
+
+void st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi, struct st_sensor_data *sdata)
+{
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi->modalias;
+
+ sdata->tf = &st_sensors_tf_spi;
+ sdata->get_irq_data_ready = st_sensors_spi_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_spi_configure);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
new file mode 100644
index 000000000000..139ed030abb0
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -0,0 +1,77 @@
+/*
+ * STMicroelectronics sensors trigger library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
+ if (sdata->trig == NULL) {
+ err = -ENOMEM;
+ dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
+ goto iio_trigger_alloc_error;
+ }
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_RISING,
+ sdata->trig->name,
+ sdata->trig);
+ if (err)
+ goto request_irq_error;
+
+ sdata->trig->private_data = indio_dev;
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
+ err = iio_trigger_register(sdata->trig);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ goto iio_trigger_register_error;
+ }
+ indio_dev->trig = sdata->trig;
+
+ return 0;
+
+iio_trigger_register_error:
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+request_irq_error:
+ iio_trigger_free(sdata->trig);
+iio_trigger_alloc_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_allocate_trigger);
+
+void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ iio_trigger_unregister(sdata->trig);
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+ iio_trigger_free(sdata->trig);
+}
+EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index eb281a2c295b..2fe1d4edcb2f 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -424,8 +424,8 @@ static const char * const ad5064_vref_name(struct ad5064_state *st,
return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
}
-static int __devinit ad5064_probe(struct device *dev, enum ad5064_type type,
- const char *name, ad5064_write_func write)
+static int ad5064_probe(struct device *dev, enum ad5064_type type,
+ const char *name, ad5064_write_func write)
{
struct iio_dev *indio_dev;
struct ad5064_state *st;
@@ -495,7 +495,7 @@ error_free:
return ret;
}
-static int __devexit ad5064_remove(struct device *dev)
+static int ad5064_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
@@ -523,7 +523,7 @@ static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
return spi_write(spi, &st->data.spi, sizeof(st->data.spi));
}
-static int __devinit ad5064_spi_probe(struct spi_device *spi)
+static int ad5064_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -531,7 +531,7 @@ static int __devinit ad5064_spi_probe(struct spi_device *spi)
ad5064_spi_write);
}
-static int __devexit ad5064_spi_remove(struct spi_device *spi)
+static int ad5064_spi_remove(struct spi_device *spi)
{
return ad5064_remove(&spi->dev);
}
@@ -563,7 +563,7 @@ static struct spi_driver ad5064_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5064_spi_probe,
- .remove = __devexit_p(ad5064_spi_remove),
+ .remove = ad5064_spi_remove,
.id_table = ad5064_spi_ids,
};
@@ -596,14 +596,14 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
return i2c_master_send(i2c, st->data.i2c, 3);
}
-static int __devinit ad5064_i2c_probe(struct i2c_client *i2c,
+static int ad5064_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
return ad5064_probe(&i2c->dev, id->driver_data, id->name,
ad5064_i2c_write);
}
-static int __devexit ad5064_i2c_remove(struct i2c_client *i2c)
+static int ad5064_i2c_remove(struct i2c_client *i2c)
{
return ad5064_remove(&i2c->dev);
}
@@ -625,7 +625,7 @@ static struct i2c_driver ad5064_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5064_i2c_probe,
- .remove = __devexit_p(ad5064_i2c_remove),
+ .remove = ad5064_i2c_remove,
.id_table = ad5064_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 8fce84fe70b1..92771217f665 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -213,7 +213,6 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
unsigned int addr)
{
struct ad5360_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -226,10 +225,6 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
@@ -237,7 +232,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
AD5360_READBACK_TYPE(type) |
AD5360_READBACK_ADDR(addr));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
@@ -433,7 +428,7 @@ static const char * const ad5360_vref_name[] = {
"vref0", "vref1", "vref2"
};
-static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
+static int ad5360_alloc_channels(struct iio_dev *indio_dev)
{
struct ad5360_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels;
@@ -456,7 +451,7 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad5360_probe(struct spi_device *spi)
+static int ad5360_probe(struct spi_device *spi)
{
enum ad5360_type type = spi_get_device_id(spi)->driver_data;
struct iio_dev *indio_dev;
@@ -524,7 +519,7 @@ error_free:
return ret;
}
-static int __devexit ad5360_remove(struct spi_device *spi)
+static int ad5360_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5360_state *st = iio_priv(indio_dev);
@@ -560,7 +555,7 @@ static struct spi_driver ad5360_driver = {
.owner = THIS_MODULE,
},
.probe = ad5360_probe,
- .remove = __devexit_p(ad5360_remove),
+ .remove = ad5360_remove,
.id_table = ad5360_ids,
};
module_spi_driver(ad5360_driver);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 14991ac55f26..483fc379a2da 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -338,7 +338,7 @@ static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
},
};
-static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
+static int ad5380_alloc_channels(struct iio_dev *indio_dev)
{
struct ad5380_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels;
@@ -361,8 +361,8 @@ static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
- enum ad5380_type type, const char *name)
+static int ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
{
struct iio_dev *indio_dev;
struct ad5380_state *st;
@@ -406,7 +406,11 @@ static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
goto error_free_reg;
}
- st->vref = regulator_get_voltage(st->vref_reg);
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref = ret;
} else {
st->vref = st->chip_info->int_vref;
ctrl |= AD5380_CTRL_INT_VREF_EN;
@@ -441,7 +445,7 @@ error_out:
return ret;
}
-static int __devexit ad5380_remove(struct device *dev)
+static int ad5380_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5380_state *st = iio_priv(indio_dev);
@@ -478,7 +482,7 @@ static const struct regmap_config ad5380_regmap_config = {
#if IS_ENABLED(CONFIG_SPI_MASTER)
-static int __devinit ad5380_spi_probe(struct spi_device *spi)
+static int ad5380_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct regmap *regmap;
@@ -491,7 +495,7 @@ static int __devinit ad5380_spi_probe(struct spi_device *spi)
return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
}
-static int __devexit ad5380_spi_remove(struct spi_device *spi)
+static int ad5380_spi_remove(struct spi_device *spi)
{
return ad5380_remove(&spi->dev);
}
@@ -523,7 +527,7 @@ static struct spi_driver ad5380_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5380_spi_probe,
- .remove = __devexit_p(ad5380_spi_remove),
+ .remove = ad5380_spi_remove,
.id_table = ad5380_spi_ids,
};
@@ -552,8 +556,8 @@ static inline void ad5380_spi_unregister_driver(void)
#if IS_ENABLED(CONFIG_I2C)
-static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct regmap *regmap;
@@ -565,7 +569,7 @@ static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
}
-static int __devexit ad5380_i2c_remove(struct i2c_client *i2c)
+static int ad5380_i2c_remove(struct i2c_client *i2c)
{
return ad5380_remove(&i2c->dev);
}
@@ -597,7 +601,7 @@ static struct i2c_driver ad5380_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5380_i2c_probe,
- .remove = __devexit_p(ad5380_i2c_remove),
+ .remove = ad5380_i2c_remove,
.id_table = ad5380_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index cdbc5bf25c31..6b86a638dad0 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -127,7 +127,6 @@ static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
{
struct ad5421_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -140,15 +139,11 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
@@ -449,7 +444,7 @@ static const struct iio_info ad5421_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5421_probe(struct spi_device *spi)
+static int ad5421_probe(struct spi_device *spi)
{
struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
struct iio_dev *indio_dev;
@@ -516,7 +511,7 @@ error_free:
return ret;
}
-static int __devexit ad5421_remove(struct spi_device *spi)
+static int ad5421_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
@@ -534,7 +529,7 @@ static struct spi_driver ad5421_driver = {
.owner = THIS_MODULE,
},
.probe = ad5421_probe,
- .remove = __devexit_p(ad5421_remove),
+ .remove = ad5421_remove,
};
module_spi_driver(ad5421_driver);
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 3310cbbd41e7..f5583aedfb59 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -212,8 +212,8 @@ static const struct iio_info ad5446_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5446_probe(struct device *dev, const char *name,
- const struct ad5446_chip_info *chip_info)
+static int ad5446_probe(struct device *dev, const char *name,
+ const struct ad5446_chip_info *chip_info)
{
struct ad5446_state *st;
struct iio_dev *indio_dev;
@@ -226,7 +226,11 @@ static int __devinit ad5446_probe(struct device *dev, const char *name,
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
indio_dev = iio_device_alloc(sizeof(*st));
@@ -461,7 +465,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
-static int __devinit ad5446_spi_probe(struct spi_device *spi)
+static int ad5446_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -469,7 +473,7 @@ static int __devinit ad5446_spi_probe(struct spi_device *spi)
&ad5446_spi_chip_info[id->driver_data]);
}
-static int __devexit ad5446_spi_remove(struct spi_device *spi)
+static int ad5446_spi_remove(struct spi_device *spi)
{
return ad5446_remove(&spi->dev);
}
@@ -480,7 +484,7 @@ static struct spi_driver ad5446_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5446_spi_probe,
- .remove = __devexit_p(ad5446_spi_remove),
+ .remove = ad5446_spi_remove,
.id_table = ad5446_spi_ids,
};
@@ -539,14 +543,14 @@ static const struct ad5446_chip_info ad5446_i2c_chip_info[] = {
},
};
-static int __devinit ad5446_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ad5446_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
return ad5446_probe(&i2c->dev, id->name,
&ad5446_i2c_chip_info[id->driver_data]);
}
-static int __devexit ad5446_i2c_remove(struct i2c_client *i2c)
+static int ad5446_i2c_remove(struct i2c_client *i2c)
{
return ad5446_remove(&i2c->dev);
}
@@ -568,7 +572,7 @@ static struct i2c_driver ad5446_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5446_i2c_probe,
- .remove = __devexit_p(ad5446_i2c_remove),
+ .remove = ad5446_i2c_remove,
.id_table = ad5446_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 0ee6f8eeba8d..c4731b7b577b 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -266,7 +266,7 @@ static const char *ad5449_vref_name(struct ad5449 *st, int n)
return "VREFB";
}
-static int __devinit ad5449_spi_probe(struct spi_device *spi)
+static int ad5449_spi_probe(struct spi_device *spi)
{
struct ad5449_platform_data *pdata = spi->dev.platform_data;
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -333,7 +333,7 @@ error_free:
return ret;
}
-static int __devexit ad5449_spi_remove(struct spi_device *spi)
+static int ad5449_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5449 *st = iio_priv(indio_dev);
@@ -366,7 +366,7 @@ static struct spi_driver ad5449_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5449_spi_probe,
- .remove = __devexit_p(ad5449_spi_remove),
+ .remove = ad5449_spi_remove,
.id_table = ad5449_spi_ids,
};
module_spi_driver(ad5449_spi_driver);
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 242bdc7d0044..e5e59749f109 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -85,11 +85,7 @@ static int ad5504_spi_read(struct spi_device *spi, u8 addr)
.rx_buf = &val,
.len = 2,
};
- struct spi_message m;
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- ret = spi_sync(spi, &m);
+ ret = spi_sync_transfer(spi, &t, 1);
if (ret < 0)
return ret;
@@ -277,7 +273,7 @@ static const struct iio_chan_spec ad5504_channels[] = {
AD5504_CHANNEL(3),
};
-static int __devinit ad5504_probe(struct spi_device *spi)
+static int ad5504_probe(struct spi_device *spi)
{
struct ad5504_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -296,7 +292,11 @@ static int __devinit ad5504_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -352,7 +352,7 @@ error_ret:
return ret;
}
-static int __devexit ad5504_remove(struct spi_device *spi)
+static int ad5504_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5504_state *st = iio_priv(indio_dev);
@@ -383,7 +383,7 @@ static struct spi_driver ad5504_driver = {
.owner = THIS_MODULE,
},
.probe = ad5504_probe,
- .remove = __devexit_p(ad5504_remove),
+ .remove = ad5504_remove,
.id_table = ad5504_id,
};
module_spi_driver(ad5504_driver);
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 6a7d6a48cc6d..f6e116627b71 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -220,7 +220,7 @@ static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
},
};
-static int __devinit ad5624r_probe(struct spi_device *spi)
+static int ad5624r_probe(struct spi_device *spi)
{
struct ad5624r_state *st;
struct iio_dev *indio_dev;
@@ -238,7 +238,11 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -282,7 +286,7 @@ error_ret:
return ret;
}
-static int __devexit ad5624r_remove(struct spi_device *spi)
+static int ad5624r_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5624r_state *st = iio_priv(indio_dev);
@@ -314,7 +318,7 @@ static struct spi_driver ad5624r_driver = {
.owner = THIS_MODULE,
},
.probe = ad5624r_probe,
- .remove = __devexit_p(ad5624r_remove),
+ .remove = ad5624r_remove,
.id_table = ad5624r_id,
};
module_spi_driver(ad5624r_driver);
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index bc92ff9309c2..5e554af21703 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -117,18 +117,13 @@ static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
.len = 3,
},
};
- struct spi_message m;
int ret;
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
AD5686_ADDR(addr));
st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
return ret;
@@ -313,7 +308,7 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
};
-static int __devinit ad5686_probe(struct spi_device *spi)
+static int ad5686_probe(struct spi_device *spi)
{
struct ad5686_state *st;
struct iio_dev *indio_dev;
@@ -332,7 +327,11 @@ static int __devinit ad5686_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
st->chip_info =
@@ -379,7 +378,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad5686_remove(struct spi_device *spi)
+static int ad5686_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5686_state *st = iio_priv(indio_dev);
@@ -408,7 +407,7 @@ static struct spi_driver ad5686_driver = {
.owner = THIS_MODULE,
},
.probe = ad5686_probe,
- .remove = __devexit_p(ad5686_remove),
+ .remove = ad5686_remove,
.id_table = ad5686_id,
};
module_spi_driver(ad5686_driver);
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 5db3506034c5..71faabc6b14e 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -153,7 +153,6 @@ static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
{
struct ad5755_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -167,16 +166,12 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
@@ -447,8 +442,8 @@ static bool ad5755_is_valid_mode(struct ad5755_state *st, enum ad5755_mode mode)
}
}
-static int __devinit ad5755_setup_pdata(struct iio_dev *indio_dev,
- const struct ad5755_platform_data *pdata)
+static int ad5755_setup_pdata(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
{
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int val;
@@ -503,7 +498,7 @@ static int __devinit ad5755_setup_pdata(struct iio_dev *indio_dev,
return 0;
}
-static bool __devinit ad5755_is_voltage_mode(enum ad5755_mode mode)
+static bool ad5755_is_voltage_mode(enum ad5755_mode mode)
{
switch (mode) {
case AD5755_MODE_VOLTAGE_0V_5V:
@@ -516,8 +511,8 @@ static bool __devinit ad5755_is_voltage_mode(enum ad5755_mode mode)
}
}
-static int __devinit ad5755_init_channels(struct iio_dev *indio_dev,
- const struct ad5755_platform_data *pdata)
+static int ad5755_init_channels(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
{
struct ad5755_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels = st->channels;
@@ -562,7 +557,7 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
-static int __devinit ad5755_probe(struct spi_device *spi)
+static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -614,7 +609,7 @@ error_free:
return ret;
}
-static int __devexit ad5755_remove(struct spi_device *spi)
+static int ad5755_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
@@ -640,7 +635,7 @@ static struct spi_driver ad5755_driver = {
.owner = THIS_MODULE,
},
.probe = ad5755_probe,
- .remove = __devexit_p(ad5755_remove),
+ .remove = ad5755_remove,
.id_table = ad5755_id,
};
module_spi_driver(ad5755_driver);
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index ffce30447445..5b7acd3a2c77 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -135,7 +135,6 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
unsigned int *val)
{
struct ad5764_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -148,15 +147,11 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
*val = be32_to_cpu(st->data[1].d32) & 0xffff;
@@ -273,7 +268,7 @@ static const struct iio_info ad5764_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5764_probe(struct spi_device *spi)
+static int ad5764_probe(struct spi_device *spi)
{
enum ad5764_type type = spi_get_device_id(spi)->driver_data;
struct iio_dev *indio_dev;
@@ -340,7 +335,7 @@ error_free:
return ret;
}
-static int __devexit ad5764_remove(struct spi_device *spi)
+static int ad5764_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5764_state *st = iio_priv(indio_dev);
@@ -372,7 +367,7 @@ static struct spi_driver ad5764_driver = {
.owner = THIS_MODULE,
},
.probe = ad5764_probe,
- .remove = __devexit_p(ad5764_remove),
+ .remove = ad5764_remove,
.id_table = ad5764_ids,
};
module_spi_driver(ad5764_driver);
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 2bd2e37280ff..8dfd3da8a07b 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -125,7 +125,6 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
u8 d8[4];
} data[3];
int ret;
- struct spi_message msg;
struct spi_transfer xfers[] = {
{
.tx_buf = &data[0].d8[1],
@@ -144,10 +143,7 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
AD5791_ADDR(addr));
data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(spi, &msg);
+ ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
*val = be32_to_cpu(data[2].d32);
@@ -346,7 +342,7 @@ static const struct iio_info ad5791_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5791_probe(struct spi_device *spi)
+static int ad5791_probe(struct spi_device *spi)
{
struct ad5791_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -365,7 +361,11 @@ static int __devinit ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+ ret = regulator_get_voltage(st->reg_vdd);
+ if (ret < 0)
+ goto error_disable_reg_pos;
+
+ pos_voltage_uv = ret;
}
st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +374,11 @@ static int __devinit ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+ ret = regulator_get_voltage(st->reg_vss);
+ if (ret < 0)
+ goto error_disable_reg_neg;
+
+ neg_voltage_uv = ret;
}
st->pwr_down = true;
@@ -428,6 +432,7 @@ error_put_reg_neg:
if (!IS_ERR(st->reg_vss))
regulator_put(st->reg_vss);
+error_disable_reg_pos:
if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
error_put_reg_pos:
@@ -439,7 +444,7 @@ error_ret:
return ret;
}
-static int __devexit ad5791_remove(struct spi_device *spi)
+static int ad5791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5791_state *st = iio_priv(indio_dev);
@@ -475,7 +480,7 @@ static struct spi_driver ad5791_driver = {
.owner = THIS_MODULE,
},
.probe = ad5791_probe,
- .remove = __devexit_p(ad5791_remove),
+ .remove = ad5791_remove,
.id_table = ad5791_id,
};
module_spi_driver(ad5791_driver);
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index c3d748c25939..352abe2004a4 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -156,7 +156,7 @@ static const struct iio_chan_spec max517_channels[] = {
MAX517_CHANNEL(1)
};
-static int __devinit max517_probe(struct i2c_client *client,
+static int max517_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max517_data *data;
@@ -210,7 +210,7 @@ exit:
return err;
}
-static int __devexit max517_remove(struct i2c_client *client)
+static int max517_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
iio_device_free(i2c_get_clientdata(client));
@@ -232,7 +232,7 @@ static struct i2c_driver max517_driver = {
.pm = MAX517_PM_OPS,
},
.probe = max517_probe,
- .remove = __devexit_p(max517_remove),
+ .remove = max517_remove,
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index e0e168bd5b45..8f88cc4059a2 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -141,8 +141,8 @@ static const struct iio_info mcp4725_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit mcp4725_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp4725_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct mcp4725_data *data;
struct iio_dev *indio_dev;
@@ -195,7 +195,7 @@ exit:
return err;
}
-static int __devexit mcp4725_remove(struct i2c_client *client)
+static int mcp4725_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -217,7 +217,7 @@ static struct i2c_driver mcp4725_driver = {
.pm = MCP4725_PM_OPS,
},
.probe = mcp4725_probe,
- .remove = __devexit_p(mcp4725_remove),
+ .remove = mcp4725_remove,
.id_table = mcp4725_id,
};
module_i2c_driver(mcp4725_driver);
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index b737c64a402d..1ea132e239ea 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -287,7 +287,6 @@ struct ad9523_state {
static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
{
struct ad9523_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
/* We encode the register size 1..3 bytes into the register address.
@@ -305,15 +304,11 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD9523_READ |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "read failed (%d)", ret);
else
@@ -326,7 +321,6 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
{
struct ad9523_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -338,16 +332,12 @@ static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD9523_WRITE |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
st->data[1].d32 = cpu_to_be32(val);
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "write failed (%d)", ret);
@@ -959,7 +949,7 @@ static int ad9523_setup(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad9523_probe(struct spi_device *spi)
+static int ad9523_probe(struct spi_device *spi)
{
struct ad9523_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -1020,7 +1010,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad9523_remove(struct spi_device *spi)
+static int ad9523_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad9523_state *st = iio_priv(indio_dev);
@@ -1049,7 +1039,7 @@ static struct spi_driver ad9523_driver = {
.owner = THIS_MODULE,
},
.probe = ad9523_probe,
- .remove = __devexit_p(ad9523_remove),
+ .remove = ad9523_remove,
.id_table = ad9523_id,
};
module_spi_driver(ad9523_driver);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index e35bb8f6fe75..a884252ac66b 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
} while (r_cnt == 0);
- tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+ tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
st->r0_fract = do_div(tmp, st->r1_mod);
st->r0_int = tmp;
@@ -355,7 +355,7 @@ static const struct iio_info adf4350_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit adf4350_probe(struct spi_device *spi)
+static int adf4350_probe(struct spi_device *spi)
{
struct adf4350_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -440,7 +440,7 @@ error_put_reg:
return ret;
}
-static int __devexit adf4350_remove(struct spi_device *spi)
+static int adf4350_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adf4350_state *st = iio_priv(indio_dev);
@@ -476,7 +476,7 @@ static struct spi_driver adf4350_driver = {
.owner = THIS_MODULE,
},
.probe = adf4350_probe,
- .remove = __devexit_p(adf4350_remove),
+ .remove = adf4350_remove,
.id_table = adf4350_id,
};
module_spi_driver(adf4350_driver);
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 48ed1483ff27..6be4628faffe 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -3,6 +3,13 @@
#
menu "Digital gyroscope sensors"
+config ADIS16080
+ tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADIS16080, ADIS16100 Yaw
+ Rate Gyroscope with SPI.
+
config ADIS16136
tristate "Analog devices ADIS16136 and similar gyroscopes driver"
depends on SPI_MASTER
@@ -12,14 +19,63 @@ config ADIS16136
Say yes here to build support for the Analog Devices ADIS16133, ADIS16135,
ADIS16136 gyroscope devices.
+config ADXRS450
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
+
+ This driver can also be built as a module. If so, the module
+ will be called adxrs450.
+
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
help
Say yes here to build support for the HID SENSOR
Gyroscope 3D.
+config IIO_ST_GYRO_3AXIS
+ tristate "STMicroelectronics gyroscopes 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_GYRO_I2C_3AXIS if (I2C)
+ select IIO_ST_GYRO_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_GYRO_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics gyroscopes:
+ L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_gyro (core functions for the driver [it is mandatory]);
+ - st_gyro_i2c (necessary for the I2C devices [optional*]);
+ - st_gyro_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_GYRO_I2C_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_GYRO_SPI_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
+config ITG3200
+ tristate "InvenSense ITG3200 Digital 3-Axis Gyroscope I2C driver"
+ depends on I2C
+ select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ help
+ Say yes here to add support for the InvenSense ITG3200 digital
+ 3-axis gyroscope sensor.
+
endmenu
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 702a058907e3..225d289082e6 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -2,5 +2,19 @@
# Makefile for industrial I/O gyroscope sensor drivers
#
+obj-$(CONFIG_ADIS16080) += adis16080.o
obj-$(CONFIG_ADIS16136) += adis16136.o
+obj-$(CONFIG_ADXRS450) += adxrs450.o
+
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+
+itg3200-y := itg3200_core.o
+itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
+obj-$(CONFIG_ITG3200) += itg3200.o
+
+obj-$(CONFIG_IIO_ST_GYRO_3AXIS) += st_gyro.o
+st_gyro-y := st_gyro_core.o
+st_gyro-$(CONFIG_IIO_BUFFER) += st_gyro_buffer.o
+
+obj-$(CONFIG_IIO_ST_GYRO_I2C_3AXIS) += st_gyro_i2c.o
+obj-$(CONFIG_IIO_ST_GYRO_SPI_3AXIS) += st_gyro_spi.o
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/iio/gyro/adis16080.c
index 3525a68d6a75..1861287911f1 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -29,48 +29,50 @@
#define ADIS16080_DIN_WRITE (1 << 15)
+struct adis16080_chip_info {
+ int scale_val;
+ int scale_val2;
+};
+
/**
* struct adis16080_state - device instance specific data
* @us: actual spi_device to write data
+ * @info: chip specific parameters
* @buf: transmit or receive buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct adis16080_state {
struct spi_device *us;
- struct mutex buf_lock;
+ const struct adis16080_chip_info *info;
- u8 buf[2] ____cacheline_aligned;
+ __be16 buf ____cacheline_aligned;
};
-static int adis16080_spi_write(struct iio_dev *indio_dev,
- u16 val)
+static int adis16080_read_sample(struct iio_dev *indio_dev,
+ u16 addr, int *val)
{
- int ret;
struct adis16080_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->buf[0] = val >> 8;
- st->buf[1] = val;
-
- ret = spi_write(st->us, st->buf, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16080_spi_read(struct iio_dev *indio_dev,
- u16 *val)
-{
+ struct spi_message m;
int ret;
- struct adis16080_state *st = iio_priv(indio_dev);
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->buf,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->buf,
+ .len = 2,
+ },
+ };
- mutex_lock(&st->buf_lock);
+ st->buf = cpu_to_be16(addr | ADIS16080_DIN_WRITE);
- ret = spi_read(st->us, st->buf, 2);
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+ ret = spi_sync(st->us, &m);
if (ret == 0)
- *val = ((st->buf[0] & 0xF) << 8) | st->buf[1];
- mutex_unlock(&st->buf_lock);
+ *val = sign_extend32(be16_to_cpu(st->buf), 11);
return ret;
}
@@ -81,28 +83,52 @@ static int adis16080_read_raw(struct iio_dev *indio_dev,
int *val2,
long mask)
{
- int ret = -EINVAL;
- u16 ut = 0;
- /* Take the iio_dev status lock */
+ struct adis16080_state *st = iio_priv(indio_dev);
+ int ret;
- mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = adis16080_spi_write(indio_dev,
- chan->address |
- ADIS16080_DIN_WRITE);
- if (ret < 0)
- break;
- ret = adis16080_spi_read(indio_dev, &ut);
- if (ret < 0)
- break;
- *val = ut;
- ret = IIO_VAL_INT;
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16080_read_sample(indio_dev, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->scale_val;
+ *val2 = st->info->scale_val2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* VREF = 5V, 12 bits */
+ *val = 5000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = 85000 - 25000;
+ *val2 = 585;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* 2.5 V = 0 */
+ *val = 2048;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = DIV_ROUND_CLOSEST(25 * 585, 85 - 25);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
break;
}
- mutex_unlock(&indio_dev->mlock);
- return ret;
+ return -EINVAL;
}
static const struct iio_chan_spec adis16080_channels[] = {
@@ -110,25 +136,32 @@ static const struct iio_chan_spec adis16080_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = ADIS16080_DIN_GYRO,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_AIN1,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_AIN2,
}, {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_TEMP,
}
};
@@ -138,8 +171,27 @@ static const struct iio_info adis16080_info = {
.driver_module = THIS_MODULE,
};
+enum {
+ ID_ADIS16080,
+ ID_ADIS16100,
+};
+
+static const struct adis16080_chip_info adis16080_chip_info[] = {
+ [ID_ADIS16080] = {
+ /* 80 degree = 819, 819 rad = 46925 degree */
+ .scale_val = 80,
+ .scale_val2 = 46925,
+ },
+ [ID_ADIS16100] = {
+ /* 300 degree = 1230, 1230 rad = 70474 degree */
+ .scale_val = 300,
+ .scale_val2 = 70474,
+ },
+};
+
static int adis16080_probe(struct spi_device *spi)
{
+ const struct spi_device_id *id = spi_get_device_id(spi);
int ret;
struct adis16080_state *st;
struct iio_dev *indio_dev;
@@ -156,7 +208,7 @@ static int adis16080_probe(struct spi_device *spi)
/* Allocate the comms buffers */
st->us = spi;
- mutex_init(&st->buf_lock);
+ st->info = &adis16080_chip_info[id->driver_data];
indio_dev->name = spi->dev.driver->name;
indio_dev->channels = adis16080_channels;
@@ -176,7 +228,6 @@ error_ret:
return ret;
}
-/* fixme, confirm ordering in this function */
static int adis16080_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
@@ -185,6 +236,13 @@ static int adis16080_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id adis16080_ids[] = {
+ { "adis16080", ID_ADIS16080 },
+ { "adis16100", ID_ADIS16100 },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, adis16080_ids);
+
static struct spi_driver adis16080_driver = {
.driver = {
.name = "adis16080",
@@ -192,10 +250,10 @@ static struct spi_driver adis16080_driver = {
},
.probe = adis16080_probe,
.remove = adis16080_remove,
+ .id_table = adis16080_ids,
};
module_spi_driver(adis16080_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16080");
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/iio/gyro/adxrs450.c
index f0ce81da8aca..5b79953f7011 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -21,45 +21,110 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "adxrs450.h"
+#define ADXRS450_STARTUP_DELAY 50 /* ms */
+
+/* The MSB for the spi commands */
+#define ADXRS450_SENSOR_DATA (0x20 << 24)
+#define ADXRS450_WRITE_DATA (0x40 << 24)
+#define ADXRS450_READ_DATA (0x80 << 24)
+
+#define ADXRS450_RATE1 0x00 /* Rate Registers */
+#define ADXRS450_TEMP1 0x02 /* Temperature Registers */
+#define ADXRS450_LOCST1 0x04 /* Low CST Memory Registers */
+#define ADXRS450_HICST1 0x06 /* High CST Memory Registers */
+#define ADXRS450_QUAD1 0x08 /* Quad Memory Registers */
+#define ADXRS450_FAULT1 0x0A /* Fault Registers */
+#define ADXRS450_PID1 0x0C /* Part ID Register 1 */
+#define ADXRS450_SNH 0x0E /* Serial Number Registers, 4 bytes */
+#define ADXRS450_SNL 0x10
+#define ADXRS450_DNC1 0x12 /* Dynamic Null Correction Registers */
+/* Check bits */
+#define ADXRS450_P 0x01
+#define ADXRS450_CHK 0x02
+#define ADXRS450_CST 0x04
+#define ADXRS450_PWR 0x08
+#define ADXRS450_POR 0x10
+#define ADXRS450_NVM 0x20
+#define ADXRS450_Q 0x40
+#define ADXRS450_PLL 0x80
+#define ADXRS450_UV 0x100
+#define ADXRS450_OV 0x200
+#define ADXRS450_AMP 0x400
+#define ADXRS450_FAIL 0x800
+
+#define ADXRS450_WRERR_MASK (0x7 << 29)
+
+#define ADXRS450_MAX_RX 4
+#define ADXRS450_MAX_TX 4
+
+#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
+/**
+ * struct adxrs450_state - device instance specific data
+ * @us: actual spi_device
+ * @buf_lock: mutex to protect tx and rx
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ **/
+struct adxrs450_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ __be32 tx ____cacheline_aligned;
+ __be32 rx;
+
+};
/**
* adxrs450_spi_read_reg_16() - read 2 bytes from a register pair
- * @dev: device associated with child of actual iio_dev
- * @reg_address: the address of the lower of the two registers,which should be an even address,
- * Second register's address is reg_address + 1.
+ * @indio_dev: device associated with child of actual iio_dev
+ * @reg_address: the address of the lower of the two registers, which should be
+ * an even address, the second register's address is reg_address + 1.
* @val: somewhere to pass back the value read
**/
static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
u8 reg_address,
u16 *val)
{
+ struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_READ_DATA | (reg_address >> 7);
- st->tx[1] = reg_address << 1;
- st->tx[2] = 0;
- st->tx[3] = 0;
+ tx = ADXRS450_READ_DATA | (reg_address << 17);
- if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
- st->tx[3] |= ADXRS450_P;
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
- ret = spi_write(st->us, st->tx, 4);
- if (ret) {
- dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
- reg_address);
- goto error_ret;
- }
- ret = spi_read(st->us, st->rx, 4);
+ st->tx = cpu_to_be32(tx);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
reg_address);
goto error_ret;
}
- *val = (be32_to_cpu(*(u32 *)st->rx) >> 5) & 0xFFFF;
+ *val = (be32_to_cpu(st->rx) >> 5) & 0xFFFF;
error_ret:
mutex_unlock(&st->buf_lock);
@@ -68,9 +133,9 @@ error_ret:
/**
* adxrs450_spi_write_reg_16() - write 2 bytes data to a register pair
- * @dev: device associated with child of actual actual iio_dev
- * @reg_address: the address of the lower of the two registers,which should be an even address,
- * Second register's address is reg_address + 1.
+ * @indio_dev: device associated with child of actual actual iio_dev
+ * @reg_address: the address of the lower of the two registers,which should be
+ * an even address, the second register's address is reg_address + 1.
* @val: value to be written.
**/
static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
@@ -78,55 +143,61 @@ static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
u16 val)
{
struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
int ret;
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_WRITE_DATA | reg_address >> 7;
- st->tx[1] = reg_address << 1 | val >> 15;
- st->tx[2] = val >> 7;
- st->tx[3] = val << 1;
+ tx = ADXRS450_WRITE_DATA | (reg_address << 17) | (val << 1);
- if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
- st->tx[3] |= ADXRS450_P;
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
- ret = spi_write(st->us, st->tx, 4);
+ st->tx = cpu_to_be32(tx);
+ ret = spi_write(st->us, &st->tx, sizeof(st->tx));
if (ret)
dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n",
reg_address);
- msleep(1); /* enforce sequential transfer delay 0.1ms */
+ usleep_range(100, 1000); /* enforce sequential transfer delay 0.1ms */
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* adxrs450_spi_sensor_data() - read 2 bytes sensor data
- * @dev: device associated with child of actual iio_dev
+ * @indio_dev: device associated with child of actual iio_dev
* @val: somewhere to pass back the value read
**/
static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
{
+ struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_SENSOR_DATA;
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_write(st->us, st->tx, 4);
- if (ret) {
- dev_err(&st->us->dev, "Problem while reading sensor data\n");
- goto error_ret;
- }
+ st->tx = cpu_to_be32(ADXRS450_SENSOR_DATA);
- ret = spi_read(st->us, st->rx, 4);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "Problem while reading sensor data\n");
goto error_ret;
}
- *val = (be32_to_cpu(*(u32 *)st->rx) >> 10) & 0xFFFF;
+ *val = (be32_to_cpu(st->rx) >> 10) & 0xFFFF;
error_ret:
mutex_unlock(&st->buf_lock);
@@ -137,35 +208,32 @@ error_ret:
* adxrs450_spi_initial() - use for initializing procedure.
* @st: device instance specific data
* @val: somewhere to pass back the value read
+ * @chk: Whether to perform fault check
**/
static int adxrs450_spi_initial(struct adxrs450_state *st,
u32 *val, char chk)
{
- struct spi_message msg;
int ret;
+ u32 tx;
struct spi_transfer xfers = {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
+ .tx_buf = &st->tx,
+ .rx_buf = &st->rx,
.bits_per_word = 8,
- .len = 4,
+ .len = sizeof(st->tx),
};
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_SENSOR_DATA;
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
+ tx = ADXRS450_SENSOR_DATA;
if (chk)
- st->tx[3] |= (ADXRS450_CHK | ADXRS450_P);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ tx |= (ADXRS450_CHK | ADXRS450_P);
+ st->tx = cpu_to_be32(tx);
+ ret = spi_sync_transfer(st->us, &xfers, 1);
if (ret) {
dev_err(&st->us->dev, "Problem while reading initializing data\n");
goto error_ret;
}
- *val = be32_to_cpu(*(u32 *)st->rx);
+ *val = be32_to_cpu(st->rx);
error_ret:
mutex_unlock(&st->buf_lock);
@@ -185,8 +253,7 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
if (t != 0x01)
- dev_warn(&st->us->dev, "The initial power on response "
- "is not correct! Restart without reset?\n");
+ dev_warn(&st->us->dev, "The initial power on response is not correct! Restart without reset?\n");
msleep(ADXRS450_STARTUP_DELAY);
ret = adxrs450_spi_initial(st, &t, 0);
@@ -217,20 +284,6 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
dev_err(&st->us->dev, "The device is not in normal status!\n");
return -EINVAL;
}
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_PID1, &data);
- if (ret)
- return ret;
- dev_info(&st->us->dev, "The Part ID is 0x%x\n", data);
-
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNL, &data);
- if (ret)
- return ret;
- t = data;
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNH, &data);
- if (ret)
- return ret;
- t |= data << 16;
- dev_info(&st->us->dev, "The Serial Number is 0x%x\n", t);
return 0;
}
@@ -244,9 +297,10 @@ static int adxrs450_write_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -0x400 || val >= 0x400)
+ return -EINVAL;
ret = adxrs450_spi_write_reg_16(indio_dev,
- ADXRS450_DNC1,
- val & 0x3FF);
+ ADXRS450_DNC1, val);
break;
default:
ret = -EINVAL;
@@ -312,7 +366,7 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
if (ret)
break;
- *val = t;
+ *val = sign_extend32(t, 9);
ret = IIO_VAL_INT;
break;
default:
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 4c8b158e40e1..fcfc83a9f861 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum gyro_3d_channel {
struct gyro_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info gyro[GYRO_3D_CHANNEL_MAX];
u32 gyro_val[GYRO_3D_CHANNEL_MAX];
};
@@ -278,7 +277,7 @@ static int gyro_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_gyro_3d_probe(struct platform_device *pdev)
+static int hid_gyro_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "gyro_3d";
@@ -375,7 +374,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_gyro_3d_remove(struct platform_device *pdev)
+static int hid_gyro_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
new file mode 100644
index 000000000000..f667d2c8c00f
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -0,0 +1,156 @@
+/*
+ * itg3200_buffer.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/gyro/itg3200.h>
+
+
+static int itg3200_read_all_channels(struct i2c_client *i2c, __be16 *buf)
+{
+ u8 tx = 0x80 | ITG3200_REG_TEMP_OUT_H;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags,
+ .len = 1,
+ .buf = &tx,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags | I2C_M_RD,
+ .len = ITG3200_SCAN_ELEMENTS * sizeof(s16),
+ .buf = (char *)&buf,
+ },
+ };
+
+ return i2c_transfer(i2c->adapter, msg, 2);
+}
+
+static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct itg3200 *st = iio_priv(indio_dev);
+ __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
+
+ int ret = itg3200_read_all_channels(st->i2c, buf);
+ if (ret < 0)
+ goto error_ret;
+
+ if (indio_dev->scan_timestamp)
+ memcpy(buf + indio_dev->scan_bytes - sizeof(s64),
+ &pf->timestamp, sizeof(pf->timestamp));
+
+ iio_push_to_buffers(indio_dev, (u8 *)buf);
+ iio_trigger_notify_done(indio_dev->trig);
+
+error_ret:
+ return IRQ_HANDLED;
+}
+
+int itg3200_buffer_configure(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ itg3200_trigger_handler, NULL);
+}
+
+void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+
+static int itg3200_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+ int ret;
+ u8 msc;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, &msc);
+ if (ret)
+ goto error_ret;
+
+ if (state)
+ msc |= ITG3200_IRQ_DATA_RDY_ENABLE;
+ else
+ msc &= ~ITG3200_IRQ_DATA_RDY_ENABLE;
+
+ ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+
+}
+
+static const struct iio_trigger_ops itg3200_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &itg3200_data_rdy_trigger_set_state,
+};
+
+int itg3200_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
+
+ ret = request_irq(st->i2c->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "itg3200_data_rdy",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+
+
+ st->trig->dev.parent = &st->i2c->dev;
+ st->trig->ops = &itg3200_trigger_ops;
+ st->trig->private_data = indio_dev;
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->i2c->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+ return ret;
+}
+
+void itg3200_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(st->i2c->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
new file mode 100644
index 000000000000..df2e6aa5d73b
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -0,0 +1,401 @@
+/*
+ * itg3200_core.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO:
+ * - Support digital low pass filter
+ * - Support power management
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/gyro/itg3200.h>
+
+
+int itg3200_write_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ return i2c_smbus_write_byte_data(st->i2c, 0x80 | reg_address, val);
+}
+
+int itg3200_read_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(st->i2c, reg_address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int itg3200_read_reg_s16(struct iio_dev *indio_dev, u8 lower_reg_address,
+ int *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->i2c;
+ int ret;
+ s16 out;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = 1,
+ .buf = (char *)&lower_reg_address,
+ },
+ {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = 2,
+ .buf = (char *)&out,
+ },
+ };
+
+ lower_reg_address |= 0x80;
+ ret = i2c_transfer(client->adapter, msg, 2);
+ be16_to_cpus(&out);
+ *val = out;
+
+ return (ret == 2) ? 0 : ret;
+}
+
+static int itg3200_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ int ret = 0;
+ u8 reg;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ reg = (u8)chan->address;
+ ret = itg3200_read_reg_s16(indio_dev, reg, val);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_TEMP)
+ *val2 = 1000000000/280;
+ else
+ *val2 = 1214142; /* (1 / 14,375) * (PI / 180) */
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = 23000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t itg3200_read_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ int ret, sps;
+ u8 val;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &val);
+ if (ret)
+ return ret;
+
+ sps = (val & ITG3200_DLPF_CFG_MASK) ? 1000 : 8000;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_SAMPLE_RATE_DIV, &val);
+ if (ret)
+ return ret;
+
+ sps /= val + 1;
+
+ return sprintf(buf, "%d\n", sps);
+}
+
+static ssize_t itg3200_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ unsigned val;
+ int ret;
+ u8 t;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &t);
+ if (ret)
+ goto err_ret;
+
+ if (val == 0) {
+ ret = -EINVAL;
+ goto err_ret;
+ }
+ t = ((t & ITG3200_DLPF_CFG_MASK) ? 1000u : 8000u) / val - 1;
+
+ ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_SAMPLE_RATE_DIV, t);
+
+err_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+/*
+ * Reset device and internal registers to the power-up-default settings
+ * Use the gyro clock as reference, as suggested by the datasheet
+ */
+static int itg3200_reset(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ dev_dbg(&st->i2c->dev, "reset device");
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_POWER_MANAGEMENT,
+ ITG3200_RESET);
+ if (ret) {
+ dev_err(&st->i2c->dev, "error resetting device");
+ goto error_ret;
+ }
+
+ /* Wait for PLL (1ms according to datasheet) */
+ udelay(1500);
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_IRQ_CONFIG,
+ ITG3200_IRQ_ACTIVE_HIGH |
+ ITG3200_IRQ_PUSH_PULL |
+ ITG3200_IRQ_LATCH_50US_PULSE |
+ ITG3200_IRQ_LATCH_CLEAR_ANY);
+
+ if (ret)
+ dev_err(&st->i2c->dev, "error init device");
+
+error_ret:
+ return ret;
+}
+
+/* itg3200_enable_full_scale() - Disables the digital low pass filter */
+static int itg3200_enable_full_scale(struct iio_dev *indio_dev)
+{
+ u8 val;
+ int ret;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &val);
+ if (ret)
+ goto err_ret;
+
+ val |= ITG3200_DLPF_FS_SEL_2000;
+ return itg3200_write_reg_8(indio_dev, ITG3200_REG_DLPF, val);
+
+err_ret:
+ return ret;
+}
+
+static int itg3200_initial_setup(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+ u8 val;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_ADDRESS, &val);
+ if (ret)
+ goto err_ret;
+
+ if (((val >> 1) & 0x3f) != 0x34) {
+ dev_err(&st->i2c->dev, "invalid reg value 0x%02x", val);
+ ret = -ENXIO;
+ goto err_ret;
+ }
+
+ ret = itg3200_reset(indio_dev);
+ if (ret)
+ goto err_ret;
+
+ ret = itg3200_enable_full_scale(indio_dev);
+err_ret:
+ return ret;
+}
+
+#define ITG3200_TEMP_INFO_MASK (IIO_CHAN_INFO_OFFSET_SHARED_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_RAW_SEPARATE_BIT)
+#define ITG3200_GYRO_INFO_MASK (IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_RAW_SEPARATE_BIT)
+
+#define ITG3200_ST \
+ { .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_BE }
+
+#define ITG3200_GYRO_CHAN(_mod) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _mod, \
+ .info_mask = ITG3200_GYRO_INFO_MASK, \
+ .address = ITG3200_REG_GYRO_ ## _mod ## OUT_H, \
+ .scan_index = ITG3200_SCAN_GYRO_ ## _mod, \
+ .scan_type = ITG3200_ST, \
+}
+
+static const struct iio_chan_spec itg3200_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .info_mask = ITG3200_TEMP_INFO_MASK,
+ .address = ITG3200_REG_TEMP_OUT_H,
+ .scan_index = ITG3200_SCAN_TEMP,
+ .scan_type = ITG3200_ST,
+ },
+ ITG3200_GYRO_CHAN(X),
+ ITG3200_GYRO_CHAN(Y),
+ ITG3200_GYRO_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(ITG3200_SCAN_ELEMENTS),
+};
+
+/* IIO device attributes */
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, itg3200_read_frequency,
+ itg3200_write_frequency);
+
+static struct attribute *itg3200_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group itg3200_attribute_group = {
+ .attrs = itg3200_attributes,
+};
+
+static const struct iio_info itg3200_info = {
+ .attrs = &itg3200_attribute_group,
+ .read_raw = &itg3200_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 };
+
+static int itg3200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct itg3200 *st;
+ struct iio_dev *indio_dev;
+
+ dev_dbg(&client->dev, "probe I2C dev with IRQ %i", client->irq);
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ st->i2c = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->dev.driver->name;
+ indio_dev->channels = itg3200_channels;
+ indio_dev->num_channels = ARRAY_SIZE(itg3200_channels);
+ indio_dev->available_scan_masks = itg3200_available_scan_masks;
+ indio_dev->info = &itg3200_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = itg3200_buffer_configure(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (client->irq) {
+ ret = itg3200_probe_trigger(indio_dev);
+ if (ret)
+ goto error_unconfigure_buffer;
+ }
+
+ ret = itg3200_initial_setup(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+error_unconfigure_buffer:
+ itg3200_buffer_unconfigure(indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int itg3200_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+
+ itg3200_buffer_unconfigure(indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id itg3200_id[] = {
+ { "itg3200", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, itg3200_id);
+
+static struct i2c_driver itg3200_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "itg3200",
+ },
+ .id_table = itg3200_id,
+ .probe = itg3200_probe,
+ .remove = itg3200_remove,
+};
+
+module_i2c_driver(itg3200_driver);
+
+MODULE_AUTHOR("Christian Strobel <christian.strobel@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("ITG3200 Gyroscope I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
new file mode 100644
index 000000000000..3ad9907bb154
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro.h
@@ -0,0 +1,45 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_GYRO_H
+#define ST_GYRO_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define L3G4200D_GYRO_DEV_NAME "l3g4200d"
+#define LSM330D_GYRO_DEV_NAME "lsm330d_gyro"
+#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
+#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
+#define L3GD20_GYRO_DEV_NAME "l3gd20"
+#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
+#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
+#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+void st_gyro_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_gyro_allocate_ring(struct iio_dev *indio_dev);
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev);
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_GYRO_TRIGGER_SET_STATE (&st_gyro_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_GYRO_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_GYRO_H */
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
new file mode 100644
index 000000000000..da4d122ec7dc
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -0,0 +1,114 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_gyro_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_gyro_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_gyro_set_enable_error:
+ return err;
+}
+
+static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ gdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (gdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ return err;
+
+st_gyro_buffer_postenable_error:
+ kfree(gdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_gyro_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_gyro_buffer_predisable_error:
+ kfree(gdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
+ .preenable = &st_gyro_buffer_preenable,
+ .postenable = &st_gyro_buffer_postenable,
+ .predisable = &st_gyro_buffer_predisable,
+};
+
+int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
+}
+
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
new file mode 100644
index 000000000000..fa9b24219987
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -0,0 +1,368 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_GYRO_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_GYRO_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_GYRO_FS_AVL_250DPS 250
+#define ST_GYRO_FS_AVL_500DPS 500
+#define ST_GYRO_FS_AVL_2000DPS 2000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_GYRO_1_WAI_EXP 0xd3
+#define ST_GYRO_1_ODR_ADDR 0x20
+#define ST_GYRO_1_ODR_MASK 0xc0
+#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
+#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
+#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
+#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
+#define ST_GYRO_1_PW_ADDR 0x20
+#define ST_GYRO_1_PW_MASK 0x08
+#define ST_GYRO_1_FS_ADDR 0x23
+#define ST_GYRO_1_FS_MASK 0x30
+#define ST_GYRO_1_FS_AVL_250_VAL 0x00
+#define ST_GYRO_1_FS_AVL_500_VAL 0x01
+#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_1_BDU_ADDR 0x23
+#define ST_GYRO_1_BDU_MASK 0x80
+#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_1_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_GYRO_2_WAI_EXP 0xd4
+#define ST_GYRO_2_ODR_ADDR 0x20
+#define ST_GYRO_2_ODR_MASK 0xc0
+#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
+#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
+#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
+#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
+#define ST_GYRO_2_PW_ADDR 0x20
+#define ST_GYRO_2_PW_MASK 0x08
+#define ST_GYRO_2_FS_ADDR 0x23
+#define ST_GYRO_2_FS_MASK 0x30
+#define ST_GYRO_2_FS_AVL_250_VAL 0x00
+#define ST_GYRO_2_FS_AVL_500_VAL 0x01
+#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_2_BDU_ADDR 0x23
+#define ST_GYRO_2_BDU_MASK 0x80
+#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_2_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_2_MULTIREAD_BIT true
+
+static const struct iio_chan_spec st_gyro_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_X,
+ IIO_MOD_X, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_Y,
+ IIO_MOD_Y, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_Z,
+ IIO_MOD_Z, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_gyro_sensors[] = {
+ {
+ .wai = ST_GYRO_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3G4200D_GYRO_DEV_NAME,
+ [1] = LSM330DL_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_1_ODR_ADDR,
+ .mask = ST_GYRO_1_ODR_MASK,
+ .odr_avl = {
+ { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
+ { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_1_PW_ADDR,
+ .mask = ST_GYRO_1_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_1_FS_ADDR,
+ .mask = ST_GYRO_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_1_FS_AVL_250_VAL,
+ .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_1_FS_AVL_500_VAL,
+ .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_1_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_1_BDU_ADDR,
+ .mask = ST_GYRO_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
+ .mask = ST_GYRO_1_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_GYRO_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3GD20_GYRO_DEV_NAME,
+ [1] = L3GD20H_GYRO_DEV_NAME,
+ [2] = LSM330D_GYRO_DEV_NAME,
+ [3] = LSM330DLC_GYRO_DEV_NAME,
+ [4] = L3G4IS_GYRO_DEV_NAME,
+ [5] = LSM330_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_2_ODR_ADDR,
+ .mask = ST_GYRO_2_ODR_MASK,
+ .odr_avl = {
+ { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
+ { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
+ { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
+ { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_2_PW_ADDR,
+ .mask = ST_GYRO_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_2_FS_ADDR,
+ .mask = ST_GYRO_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_2_FS_AVL_250_VAL,
+ .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_2_FS_AVL_500_VAL,
+ .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_2_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_2_BDU_ADDR,
+ .mask = ST_GYRO_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
+ .mask = ST_GYRO_2_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_gyro_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = gdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_gyro_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_anglvel_scale_available);
+
+static struct attribute *st_gyro_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_gyro_attribute_group = {
+ .attrs = st_gyro_attributes,
+};
+
+static const struct iio_info gyro_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_gyro_attribute_group,
+ .read_raw = &st_gyro_read_raw,
+ .write_raw = &st_gyro_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_gyro_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+};
+#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
+#else
+#define ST_GYRO_TRIGGER_OPS NULL
+#endif
+
+int st_gyro_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &gyro_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ gdata->multiread_bit = gdata->sensor->multi_read_bit;
+ indio_dev->channels = gdata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &gdata->sensor->fs.fs_avl[0];
+ gdata->odr = gdata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ if (gdata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_gyro_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_GYRO_TRIGGER_OPS);
+ if (err < 0)
+ goto st_gyro_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_gyro_device_register_error;
+
+ return err;
+
+st_gyro_device_register_error:
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_gyro_probe_trigger_error:
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
+ st_gyro_deallocate_ring(indio_dev);
+st_gyro_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_gyro_common_probe);
+
+void st_gyro_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (gdata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_gyro_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_gyro_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
new file mode 100644
index 000000000000..8a310500573d
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -0,0 +1,84 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_gyro.h"
+
+static int st_gyro_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*gdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ gdata = iio_priv(indio_dev);
+ gdata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ return 0;
+
+st_gyro_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_gyro_i2c_remove(struct i2c_client *client)
+{
+ st_gyro_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
+
+static struct i2c_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-i2c",
+ },
+ .probe = st_gyro_i2c_probe,
+ .remove = st_gyro_i2c_remove,
+ .id_table = st_gyro_id_table,
+};
+module_i2c_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
new file mode 100644
index 000000000000..f3540390eb22
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -0,0 +1,83 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_gyro.h"
+
+static int st_gyro_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*gdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ gdata = iio_priv(indio_dev);
+ gdata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ return 0;
+
+st_gyro_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_gyro_spi_remove(struct spi_device *spi)
+{
+ st_gyro_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
+
+static struct spi_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-spi",
+ },
+ .probe = st_gyro_spi_probe,
+ .remove = st_gyro_spi_remove,
+ .id_table = st_gyro_id_table,
+};
+module_spi_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 3d79a40e916b..4f40a10cb74f 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -3,6 +3,17 @@
#
menu "Inertial measurement units"
+config ADIS16400
+ tristate "Analog Devices ADIS16400 and similar IMU SPI driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16300, adis16344,
+ adis16350, adis16354, adis16355, adis16360, adis16362, adis16364,
+ adis16365, adis16400 and adis16405 triaxial inertial sensors
+ (adis16400 series also have magnetometers).
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
@@ -25,3 +36,5 @@ config IIO_ADIS_LIB_BUFFER
help
A set of buffer helper functions for the Analog Devices ADIS* device
family.
+
+source "drivers/iio/imu/inv_mpu6050/Kconfig"
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index cfe57638f6f9..f2f56ceaed26 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -2,9 +2,14 @@
# Makefile for Inertial Measurement Units
#
+adis16400-y := adis16400_core.o
+adis16400-$(CONFIG_IIO_BUFFER) += adis16400_buffer.o
+obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
+
+obj-y += inv_mpu6050/
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 7a105e966464..2f8f9d632386 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -17,12 +17,11 @@
#ifndef SPI_ADIS16400_H_
#define SPI_ADIS16400_H_
+#include <linux/iio/imu/adis.h>
+
#define ADIS16400_STARTUP_DELAY 290 /* ms */
#define ADIS16400_MTEST_DELAY 90 /* ms */
-#define ADIS16400_READ_REG(a) a
-#define ADIS16400_WRITE_REG(a) ((a) | 0x80)
-
#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
@@ -45,6 +44,9 @@
#define ADIS16300_ROLL_OUT 0x14 /* Y axis inclinometer output measurement */
#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+#define ADIS16448_BARO_OUT 0x16 /* Barometric pressure output */
+#define ADIS16448_TEMP_OUT 0x18 /* Temperature output */
+
/* Calibration parameters */
#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
@@ -75,7 +77,10 @@
#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
+#define ADIS16334_LOT_ID1 0x52 /* Lot identification code 1 */
+#define ADIS16334_LOT_ID2 0x54 /* Lot identification code 2 */
#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
+#define ADIS16334_SERIAL_NUMBER 0x58 /* Serial number, lot specific */
#define ADIS16400_ERROR_ACTIVE (1<<14)
#define ADIS16400_NEW_DATA (1<<14)
@@ -96,21 +101,21 @@
#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
/* DIAG_STAT */
-#define ADIS16400_DIAG_STAT_ZACCL_FAIL (1<<15)
-#define ADIS16400_DIAG_STAT_YACCL_FAIL (1<<14)
-#define ADIS16400_DIAG_STAT_XACCL_FAIL (1<<13)
-#define ADIS16400_DIAG_STAT_XGYRO_FAIL (1<<12)
-#define ADIS16400_DIAG_STAT_YGYRO_FAIL (1<<11)
-#define ADIS16400_DIAG_STAT_ZGYRO_FAIL (1<<10)
-#define ADIS16400_DIAG_STAT_ALARM2 (1<<9)
-#define ADIS16400_DIAG_STAT_ALARM1 (1<<8)
-#define ADIS16400_DIAG_STAT_FLASH_CHK (1<<6)
-#define ADIS16400_DIAG_STAT_SELF_TEST (1<<5)
-#define ADIS16400_DIAG_STAT_OVERFLOW (1<<4)
-#define ADIS16400_DIAG_STAT_SPI_FAIL (1<<3)
-#define ADIS16400_DIAG_STAT_FLASH_UPT (1<<2)
-#define ADIS16400_DIAG_STAT_POWER_HIGH (1<<1)
-#define ADIS16400_DIAG_STAT_POWER_LOW (1<<0)
+#define ADIS16400_DIAG_STAT_ZACCL_FAIL 15
+#define ADIS16400_DIAG_STAT_YACCL_FAIL 14
+#define ADIS16400_DIAG_STAT_XACCL_FAIL 13
+#define ADIS16400_DIAG_STAT_XGYRO_FAIL 12
+#define ADIS16400_DIAG_STAT_YGYRO_FAIL 11
+#define ADIS16400_DIAG_STAT_ZGYRO_FAIL 10
+#define ADIS16400_DIAG_STAT_ALARM2 9
+#define ADIS16400_DIAG_STAT_ALARM1 8
+#define ADIS16400_DIAG_STAT_FLASH_CHK 6
+#define ADIS16400_DIAG_STAT_SELF_TEST 5
+#define ADIS16400_DIAG_STAT_OVERFLOW 4
+#define ADIS16400_DIAG_STAT_SPI_FAIL 3
+#define ADIS16400_DIAG_STAT_FLASH_UPT 2
+#define ADIS16400_DIAG_STAT_POWER_HIGH 1
+#define ADIS16400_DIAG_STAT_POWER_LOW 0
/* GLOB_CMD */
#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
@@ -126,9 +131,6 @@
#define ADIS16334_RATE_DIV_SHIFT 8
#define ADIS16334_RATE_INT_CLK BIT(0)
-#define ADIS16400_MAX_TX 24
-#define ADIS16400_MAX_RX 24
-
#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
@@ -136,6 +138,9 @@
#define ADIS16400_HAS_PROD_ID BIT(0)
#define ADIS16400_NO_BURST BIT(1)
#define ADIS16400_HAS_SLOW_MODE BIT(2)
+#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
+
+struct adis16400_state;
struct adis16400_chip_info {
const struct iio_chan_spec *channels;
@@ -145,95 +150,63 @@ struct adis16400_chip_info {
unsigned int accel_scale_micro;
int temp_scale_nano;
int temp_offset;
- unsigned long default_scan_mask;
- int (*set_freq)(struct iio_dev *indio_dev, unsigned int freq);
- int (*get_freq)(struct iio_dev *indio_dev);
+ int (*set_freq)(struct adis16400_state *st, unsigned int freq);
+ int (*get_freq)(struct adis16400_state *st);
};
/**
* struct adis16400_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- * @filt_int: integer part of requested filter frequency
+ * @variant: chip variant info
+ * @filt_int: integer part of requested filter frequency
+ * @adis: adis device
**/
struct adis16400_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- struct mutex buf_lock;
struct adis16400_chip_info *variant;
int filt_int;
- u8 tx[ADIS16400_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16400_MAX_RX] ____cacheline_aligned;
+ struct adis adis;
};
-int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
-
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
-#define ADIS16400_SCAN_SUPPLY 0
-#define ADIS16400_SCAN_GYRO_X 1
-#define ADIS16400_SCAN_GYRO_Y 2
-#define ADIS16400_SCAN_GYRO_Z 3
-#define ADIS16400_SCAN_ACC_X 4
-#define ADIS16400_SCAN_ACC_Y 5
-#define ADIS16400_SCAN_ACC_Z 6
-#define ADIS16400_SCAN_MAGN_X 7
-#define ADIS16350_SCAN_TEMP_X 7
-#define ADIS16400_SCAN_MAGN_Y 8
-#define ADIS16350_SCAN_TEMP_Y 8
-#define ADIS16400_SCAN_MAGN_Z 9
-#define ADIS16350_SCAN_TEMP_Z 9
-#define ADIS16400_SCAN_TEMP 10
-#define ADIS16350_SCAN_ADC_0 10
-#define ADIS16400_SCAN_ADC_0 11
-#define ADIS16300_SCAN_INCLI_X 12
-#define ADIS16300_SCAN_INCLI_Y 13
+enum {
+ ADIS16400_SCAN_SUPPLY,
+ ADIS16400_SCAN_GYRO_X,
+ ADIS16400_SCAN_GYRO_Y,
+ ADIS16400_SCAN_GYRO_Z,
+ ADIS16400_SCAN_ACC_X,
+ ADIS16400_SCAN_ACC_Y,
+ ADIS16400_SCAN_ACC_Z,
+ ADIS16400_SCAN_MAGN_X,
+ ADIS16400_SCAN_MAGN_Y,
+ ADIS16400_SCAN_MAGN_Z,
+ ADIS16400_SCAN_BARO,
+ ADIS16350_SCAN_TEMP_X,
+ ADIS16350_SCAN_TEMP_Y,
+ ADIS16350_SCAN_TEMP_Z,
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+ ADIS16400_SCAN_ADC,
+};
#ifdef CONFIG_IIO_BUFFER
-void adis16400_remove_trigger(struct iio_dev *indio_dev);
-int adis16400_probe_trigger(struct iio_dev *indio_dev);
ssize_t adis16400_read_data_from_ring(struct device *dev,
struct device_attribute *attr,
char *buf);
-int adis16400_configure_ring(struct iio_dev *indio_dev);
-void adis16400_unconfigure_ring(struct iio_dev *indio_dev);
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+irqreturn_t adis16400_trigger_handler(int irq, void *p);
#else /* CONFIG_IIO_BUFFER */
-static inline void adis16400_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int adis16400_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline ssize_t
-adis16400_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return 0;
-}
-
-static int adis16400_configure_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
-{
-}
+#define adis16400_update_scan_mode NULL
+#define adis16400_trigger_handler NULL
#endif /* CONFIG_IIO_BUFFER */
+
#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
new file mode 100644
index 000000000000..054c01d6e73c
--- /dev/null
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -0,0 +1,96 @@
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "adis16400.h"
+
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ uint16_t *tx, *rx;
+
+ if (st->variant->flags & ADIS16400_NO_BURST)
+ return adis_update_scan_mode(indio_dev, scan_mask);
+
+ kfree(adis->xfer);
+ kfree(adis->buffer);
+
+ adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
+ GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ rx = adis->buffer;
+ tx = adis->buffer + indio_dev->scan_bytes;
+
+ tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
+ tx[1] = 0;
+
+ adis->xfer[0].tx_buf = tx;
+ adis->xfer[0].bits_per_word = 8;
+ adis->xfer[0].len = 2;
+ adis->xfer[1].tx_buf = tx;
+ adis->xfer[1].bits_per_word = 8;
+ adis->xfer[1].len = indio_dev->scan_bytes;
+
+ spi_message_init(&adis->msg);
+ spi_message_add_tail(&adis->xfer[0], &adis->msg);
+ spi_message_add_tail(&adis->xfer[1], &adis->msg);
+
+ return 0;
+}
+
+irqreturn_t adis16400_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ u32 old_speed_hz = st->adis.spi->max_speed_hz;
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST) &&
+ st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
+ spi_setup(st->adis.spi);
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+ st->adis.spi->max_speed_hz = old_speed_hz;
+ spi_setup(st->adis.spi);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (indio_dev->scan_timestamp) {
+ void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64);
+ *(s64 *)b = pf->timestamp;
+ }
+
+ iio_push_to_buffers(indio_dev, adis->buffer);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
new file mode 100644
index 000000000000..b7f215eab5de
--- /dev/null
+++ b/drivers/iio/imu/adis16400_core.c
@@ -0,0 +1,965 @@
+/*
+ * adis16400.c support Analog Devices ADIS16400/5
+ * 3d 2g Linear Accelerometers,
+ * 3d Gyroscopes,
+ * 3d Magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include "adis16400.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16400_show_serial_number(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16400_state *st = file->private_data;
+ u16 lot1, lot2, serial_number;
+ char buf[16];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
+ &serial_number);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
+ serial_number);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16400_serial_number_fops = {
+ .open = simple_open,
+ .read = adis16400_show_serial_number,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16400_show_product_id(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+ adis16400_show_product_id, NULL, "%lld\n");
+
+static int adis16400_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+ adis16400_show_flash_count, NULL, "%lld\n");
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
+ debugfs_create_file("serial_number", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_serial_number_fops);
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID)
+ debugfs_create_file("product_id", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ st, &adis16400_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+enum adis16400_chip_variant {
+ ADIS16300,
+ ADIS16334,
+ ADIS16350,
+ ADIS16360,
+ ADIS16362,
+ ADIS16364,
+ ADIS16400,
+ ADIS16448,
+};
+
+static int adis16334_get_freq(struct adis16400_state *st)
+{
+ int ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ t >>= ADIS16334_RATE_DIV_SHIFT;
+
+ return 819200 >> t;
+}
+
+static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+
+ if (freq < 819200)
+ t = ilog2(819200 / freq);
+ else
+ t = 0;
+
+ if (t > 0x31)
+ t = 0x31;
+
+ t <<= ADIS16334_RATE_DIV_SHIFT;
+ t |= ADIS16334_RATE_INT_CLK;
+
+ return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+}
+
+static int adis16400_get_freq(struct adis16400_state *st)
+{
+ int sps, ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
+static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+ uint8_t val = 0;
+
+ t = 1638404 / freq;
+ if (t >= 128) {
+ val |= ADIS16400_SMPL_PRD_TIME_BASE;
+ t = 52851 / freq;
+ if (t >= 128)
+ t = 127;
+ } else if (t != 0) {
+ t--;
+ }
+
+ val |= t;
+
+ if (t >= 0x0A || (val & ADIS16400_SMPL_PRD_TIME_BASE))
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+
+ return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+}
+
+static ssize_t adis16400_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = st->variant->get_freq(st);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d.%.3d\n", ret / 1000, ret % 1000);
+}
+
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 6,
+ [2] = 12,
+ [3] = 25,
+ [4] = 50,
+ [5] = 100,
+ [6] = 200,
+ [7] = 200, /* Not a valid setting */
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t val16;
+ int i, ret;
+
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 1; i--) {
+ if (sps / adis16400_3db_divisors[i] >= val)
+ break;
+ }
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ (val16 & ~0x07) | i);
+ return ret;
+}
+
+static ssize_t adis16400_write_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int i, f, val;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &i, &f);
+ if (ret)
+ return ret;
+
+ val = i * 1000 + f;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ st->variant->set_freq(st, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+/* Power down the device */
+static int adis16400_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SLP_CNT,
+ ADIS16400_SLP_CNT_POWER_OFF);
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16400_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t prod_id, smp_prd;
+ unsigned int device_id;
+ int ret;
+
+ /* use low spi speed for init if the device has a slow mode */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE)
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ st->adis.spi->mode = SPI_MODE_3;
+ spi_setup(st->adis.spi);
+
+ ret = adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID) {
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret)
+ goto err_ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ dev_info(&indio_dev->dev, "%s: prod_id 0x%04x at CS%d (irq %d)\n",
+ indio_dev->name, prod_id,
+ st->adis.spi->chip_select, st->adis.spi->irq);
+ }
+ /* use high spi speed if possible */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE) {
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &smp_prd);
+ if (ret)
+ goto err_ret;
+
+ if ((smp_prd & ADIS16400_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ spi_setup(st->adis.spi);
+ }
+ }
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16400_read_frequency,
+ adis16400_write_frequency);
+
+static const uint8_t adis16400_addresses[] = {
+ [ADIS16400_SCAN_GYRO_X] = ADIS16400_XGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Y] = ADIS16400_YGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Z] = ADIS16400_ZGYRO_OFF,
+ [ADIS16400_SCAN_ACC_X] = ADIS16400_XACCL_OFF,
+ [ADIS16400_SCAN_ACC_Y] = ADIS16400_YACCL_OFF,
+ [ADIS16400_SCAN_ACC_Z] = ADIS16400_ZACCL_OFF,
+};
+
+static int adis16400_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_write_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /*
+ * Need to cache values so we can update if the frequency
+ * changes.
+ */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = st->variant->get_freq(st);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps,
+ val * 1000 + val2 / 1000);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16400_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = st->variant->gyro_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_VOLTAGE:
+ *val = 0;
+ if (chan->channel == 0) {
+ *val = 2;
+ *val2 = 418000; /* 2.418 mV */
+ } else {
+ *val = 0;
+ *val2 = 805800; /* 805.8 uV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = st->variant->accel_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 500; /* 0.5 mgauss */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = st->variant->temp_scale_nano / 1000000;
+ *val2 = (st->variant->temp_scale_nano % 1000000);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_read_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], &val16);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ val16 = ((val16 & 0xFFF) << 4) >> 4;
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ /* currently only temperature */
+ *val = st->variant->temp_offset;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = st->variant->get_freq(st);
+ if (ret >= 0) {
+ ret /= adis16400_3db_divisors[val16 & 0x07];
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
+ }
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = name, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_SUPPLY_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+
+#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+
+#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = addr, \
+ .scan_index = ADIS16400_SCAN_GYRO_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_ACCEL_CHAN(mod, addr, bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_ACC_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MAGN_CHAN(mod, addr, bits) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_MAGN_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MOD_TEMP_NAME_X "x"
+#define ADIS16400_MOD_TEMP_NAME_Y "y"
+#define ADIS16400_MOD_TEMP_NAME_Z "z"
+
+#define ADIS16400_MOD_TEMP_CHAN(mod, addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = ADIS16400_MOD_TEMP_NAME_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_TEMP_CHAN(addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_X, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_INCLI_CHAN(mod, addr, bits) { \
+ .type = IIO_INCLI, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16300_SCAN_INCLI_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec adis16400_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 14),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(12)
+};
+
+static const struct iio_chan_spec adis16448_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 16),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 16),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 16),
+ {
+ .type = IIO_PRESSURE,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ .address = ADIS16448_BARO_OUT,
+ .scan_index = ADIS16400_SCAN_BARO,
+ .scan_type = IIO_ST('s', 16, 16, 0),
+ },
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16350_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16300_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
+ ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
+ IIO_CHAN_SOFT_TIMESTAMP(14)
+};
+
+static const struct iio_chan_spec adis16334_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(8)
+};
+
+static struct attribute *adis16400_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16400_attribute_group = {
+ .attrs = adis16400_attributes,
+};
+
+static struct adis16400_chip_info adis16400_chips[] = {
+ [ADIS16300] = {
+ .channels = adis16300_channels,
+ .num_channels = ARRAY_SIZE(adis16300_channels),
+ .flags = ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = 5884,
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16334] = {
+ .channels = adis16334_channels,
+ .num_channels = ARRAY_SIZE(adis16334_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_NO_BURST |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 67850000, /* 0.06785 C */
+ .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ },
+ [ADIS16350] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
+ .temp_scale_nano = 145300000, /* 0.1453 C */
+ .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
+ .flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16360] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16362] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16364] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16400] = {
+ .channels = adis16400_channels,
+ .num_channels = ARRAY_SIZE(adis16400_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16448] = {
+ .channels = adis16448_channels,
+ .num_channels = ARRAY_SIZE(adis16448_channels),
+ .flags = ADIS16400_HAS_PROD_ID |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
+ .temp_scale_nano = 73860000, /* 0.07386 C */
+ .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ }
+};
+
+static const struct iio_info adis16400_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &adis16400_read_raw,
+ .write_raw = &adis16400_write_raw,
+ .attrs = &adis16400_attribute_group,
+ .update_scan_mode = adis16400_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static const unsigned long adis16400_burst_scan_mask[] = {
+ ~0UL,
+ 0,
+};
+
+static const char * const adis16400_status_error_msgs[] = {
+ [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active",
+ [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active",
+ [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error",
+ [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error",
+ [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange",
+ [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed",
+ [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V",
+ [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
+};
+
+static const struct adis_data adis16400_data = {
+ .msc_ctrl_reg = ADIS16400_MSC_CTRL,
+ .glob_cmd_reg = ADIS16400_GLOB_CMD,
+ .diag_stat_reg = ADIS16400_DIAG_STAT,
+
+ .read_delay = 50,
+ .write_delay = 50,
+
+ .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
+ .startup_delay = ADIS16400_STARTUP_DELAY,
+
+ .status_error_msgs = adis16400_status_error_msgs,
+ .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ALARM2) |
+ BIT(ADIS16400_DIAG_STAT_ALARM1) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_CHK) |
+ BIT(ADIS16400_DIAG_STAT_SELF_TEST) |
+ BIT(ADIS16400_DIAG_STAT_OVERFLOW) |
+ BIT(ADIS16400_DIAG_STAT_SPI_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_UPT) |
+ BIT(ADIS16400_DIAG_STAT_POWER_HIGH) |
+ BIT(ADIS16400_DIAG_STAT_POWER_LOW),
+};
+
+static int adis16400_probe(struct spi_device *spi)
+{
+ struct adis16400_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ /* setup the industrialio driver allocated elements */
+ st->variant = &adis16400_chips[spi_get_device_id(spi)->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->variant->channels;
+ indio_dev->num_channels = st->variant->num_channels;
+ indio_dev->info = &adis16400_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST))
+ indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16400_trigger_handler);
+ if (ret)
+ goto error_free_dev;
+
+ /* Get the device into a sane initial state */
+ ret = adis16400_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ adis16400_debugfs_init(indio_dev);
+ return 0;
+
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int adis16400_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16400_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16400_id[] = {
+ {"adis16300", ADIS16300},
+ {"adis16334", ADIS16334},
+ {"adis16350", ADIS16350},
+ {"adis16354", ADIS16350},
+ {"adis16355", ADIS16350},
+ {"adis16360", ADIS16360},
+ {"adis16362", ADIS16362},
+ {"adis16364", ADIS16364},
+ {"adis16365", ADIS16360},
+ {"adis16400", ADIS16400},
+ {"adis16405", ADIS16400},
+ {"adis16448", ADIS16448},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
+
+static struct spi_driver adis16400_driver = {
+ .driver = {
+ .name = "adis16400",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16400_id,
+ .probe = adis16400_probe,
+ .remove = adis16400_remove,
+};
+module_spi_driver(adis16400_driver);
+
+MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
new file mode 100644
index 000000000000..b5cfa3a354cf
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -0,0 +1,13 @@
+#
+# inv-mpu6050 drivers for Invensense MPU devices and combos
+#
+
+config INV_MPU6050_IIO
+ tristate "Invensense MPU6050 devices"
+ depends on I2C && SYSFS
+ select IIO_TRIGGERED_BUFFER
+ help
+ This driver supports the Invensense MPU6050 devices.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
new file mode 100644
index 000000000000..3a677c778afb
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense MPU6050 device.
+#
+
+obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
new file mode 100644
index 000000000000..37ca05b47e4b
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -0,0 +1,795 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include "inv_mpu_iio.h"
+
+/*
+ * this is the gyro scale translated from dynamic range plus/minus
+ * {250, 500, 1000, 2000} to rad/s
+ */
+static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
+
+/*
+ * this is the accel scale translated from dynamic range plus/minus
+ * {2, 4, 8, 16} to m/s^2
+ */
+static const int accel_scale[] = {598, 1196, 2392, 4785};
+
+static const struct inv_mpu6050_reg_map reg_set_6050 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6050 = {
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_20HZ,
+ .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_02G,
+};
+
+static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+ {
+ .num_reg = 117,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+};
+
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
+{
+ return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
+}
+
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
+{
+ u8 d, mgmt_1;
+ int result;
+
+ /* switch clock needs to be careful. Only when gyro is on, can
+ clock source be switched to gyro. Otherwise, it must be set to
+ internal clock */
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_1, 1, &mgmt_1);
+ if (result != 1)
+ return result;
+
+ mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
+ }
+
+ if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
+ /* turning off gyro requires switch to internal clock first.
+ Then turn off gyro engine */
+ mgmt_1 |= INV_CLK_INTERNAL;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_2, 1, &d);
+ if (result != 1)
+ return result;
+ if (en)
+ d &= ~mask;
+ else
+ d |= mask;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ if (result)
+ return result;
+
+ if (en) {
+ /* Wait for output stablize */
+ msleep(INV_MPU6050_TEMP_UP_TIME);
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ /* switch internal clock to PLL */
+ mgmt_1 |= INV_CLK_PLL;
+ result = inv_mpu6050_write_reg(st,
+ st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
+{
+ int result;
+
+ if (power_on)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, 0);
+ else
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ if (result)
+ return result;
+
+ if (power_on)
+ msleep(INV_MPU6050_REG_UP_TIME);
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
+ *
+ * Initial configuration:
+ * FSR: ± 2000DPS
+ * DLPF: 20Hz
+ * FIFO rate: 50Hz
+ * Clock source: Gyro PLL
+ */
+static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_FILTER_20HZ;
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ return result;
+
+ d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+
+ memcpy(&st->chip_config, hw_info[st->chip_type].config,
+ sizeof(struct inv_mpu6050_chip_config));
+ result = inv_mpu6050_set_power_itg(st, false);
+
+ return result;
+}
+
+static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
+ int axis, int *val)
+{
+ int ind, result;
+ __be16 d;
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
+ (u8 *)&d);
+ if (result != 2)
+ return -EINVAL;
+ *val = (short)be16_to_cpup(&d);
+
+ return IIO_VAL_INT;
+}
+
+static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ int ret, result;
+
+ ret = IIO_VAL_INT;
+ result = 0;
+ mutex_lock(&indio_dev->mlock);
+ if (!st->chip_config.enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_read_raw;
+ }
+ /* when enable is on, power is already on */
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_ACCEL:
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
+ chan->channel2, val);
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_TEMP:
+ /* wait for stablization */
+ msleep(INV_MPU6050_SENSOR_UP_TIME);
+ inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error_read_raw:
+ if (!st->chip_config.enable)
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = gyro_scale_6050[st->chip_config.fsr];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = accel_scale[st->chip_config.accl_fs];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = INV_MPU6050_TEMP_SCALE;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = INV_MPU6050_TEMP_OFFSET;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+{
+ int result;
+ u8 d;
+
+ if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
+ return -EINVAL;
+ if (fsr == st->chip_config.fsr)
+ return 0;
+
+ d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+ st->chip_config.fsr = fsr;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+{
+ int result;
+ u8 d;
+
+ if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
+ return -EINVAL;
+ if (fs == st->chip_config.accl_fs)
+ return 0;
+
+ d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+ st->chip_config.accl_fs = fs;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ /* we should only update scale when the chip is disabled, i.e.,
+ not running */
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto error_write_raw;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_write_raw;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_write_fsr(st, val);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_write_accel_fs(st, val);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+error_write_raw:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+/**
+ * inv_mpu6050_set_lpf() - set low pass filer based on fifo rate.
+ *
+ * Based on the Nyquist principle, the sampling rate must
+ * exceed twice of the bandwidth of the signal, or there
+ * would be alising. This function basically search for the
+ * correct low pass parameters based on the fifo rate, e.g,
+ * sampling frequency.
+ */
+static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
+{
+ const int hz[] = {188, 98, 42, 20, 10, 5};
+ const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ int i, h, result;
+ u8 data;
+
+ h = (rate >> 1);
+ i = 0;
+ while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+ i++;
+ data = d[i];
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+ if (result)
+ return result;
+ st->chip_config.lpf = data;
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_fifo_rate_store() - Set fifo rate.
+ */
+static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 fifo_rate;
+ u8 d;
+ int result;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (kstrtoint(buf, 10, &fifo_rate))
+ return -EINVAL;
+ if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ return -EINVAL;
+ if (fifo_rate == st->chip_config.fifo_rate)
+ return count;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto fifo_rate_fail;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto fifo_rate_fail;
+
+ d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ goto fifo_rate_fail;
+ st->chip_config.fifo_rate = fifo_rate;
+
+ result = inv_mpu6050_set_lpf(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail;
+
+fifo_rate_fail:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return count;
+}
+
+/**
+ * inv_fifo_rate_show() - Get the current sampling rate.
+ */
+static ssize_t inv_fifo_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+
+ return sprintf(buf, "%d\n", st->chip_config.fifo_rate);
+}
+
+/**
+ * inv_attr_show() - calling this function will show current
+ * parameters.
+ */
+static ssize_t inv_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s8 *m;
+
+ switch (this_attr->address) {
+ /* In MPU6050, the two matrix are the same because gyro and accel
+ are integrated in one chip */
+ case ATTR_GYRO_MATRIX:
+ case ATTR_ACCL_MATRIX:
+ m = st->plat_data.orientation;
+
+ return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * inv_mpu6050_validate_trigger() - validate_trigger callback for invensense
+ * MPU6050 device.
+ * @indio_dev: The IIO device
+ * @trig: The new trigger
+ *
+ * Returns: 0 if the 'trig' matches the trigger registered by the MPU6050
+ * device, -EINVAL otherwise.
+ */
+static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define INV_MPU6050_CHAN(_type, _channel2, _index) \
+ { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = _channel2, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT \
+ | IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0 , \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT
+ | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT
+ | IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+};
+
+/* constant IIO attribute */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
+ inv_mpu6050_fifo_rate_store);
+static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_ACCL_MATRIX);
+
+static struct attribute *inv_attributes[] = {
+ &iio_dev_attr_in_gyro_matrix.dev_attr.attr,
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group inv_attribute_group = {
+ .attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &inv_mpu6050_read_raw,
+ .write_raw = &inv_mpu6050_write_raw,
+ .attrs = &inv_attribute_group,
+ .validate_trigger = inv_mpu6050_validate_trigger,
+};
+
+/**
+ * inv_check_and_setup_chip() - check and setup chip.
+ */
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
+ const struct i2c_device_id *id)
+{
+ int result;
+
+ st->chip_type = INV_MPU6050;
+ st->hw = &hw_info[st->chip_type];
+ st->reg = hw_info[st->chip_type].reg;
+
+ /* reset to make sure previous state are not there */
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
+ if (result)
+ return result;
+ msleep(INV_MPU6050_POWER_UP_TIME);
+ /* toggle power state. After reset, the sleep bit could be on
+ or off depending on the OTP settings. Toggling power would
+ make it in a definite state as well as making the hardware
+ state align with the software state */
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ struct iio_dev *indio_dev;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK |
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ result = -ENOSYS;
+ goto out_no_free;
+ }
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+ st = iio_priv(indio_dev);
+ st->client = client;
+ st->plat_data = *(struct inv_mpu6050_platform_data
+ *)dev_get_platdata(&client->dev);
+ /* power is turned on inside check chip type*/
+ result = inv_check_and_setup_chip(st, id);
+ if (result)
+ goto out_free;
+
+ result = inv_mpu6050_init_config(indio_dev);
+ if (result) {
+ dev_err(&client->dev,
+ "Could not initialize device.\n");
+ goto out_free;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+
+ indio_dev->info = &mpu_info;
+ indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+
+ result = iio_triggered_buffer_setup(indio_dev,
+ inv_mpu6050_irq_handler,
+ inv_mpu6050_read_fifo,
+ NULL);
+ if (result) {
+ dev_err(&st->client->dev, "configure buffer fail %d\n",
+ result);
+ goto out_free;
+ }
+ result = inv_mpu6050_probe_trigger(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ goto out_unreg_ring;
+ }
+
+ INIT_KFIFO(st->timestamps);
+ spin_lock_init(&st->time_stamp_lock);
+ result = iio_device_register(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ goto out_remove_trigger;
+ }
+
+ return 0;
+
+out_remove_trigger:
+ inv_mpu6050_remove_trigger(st);
+out_unreg_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+out_free:
+ iio_device_free(indio_dev);
+out_no_free:
+
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ inv_mpu6050_remove_trigger(st);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+
+static int inv_mpu_resume(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+}
+
+static int inv_mpu_suspend(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+}
+static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+
+#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
+#else
+#define INV_MPU6050_PMOPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu6050",
+ .pm = INV_MPU6050_PMOPS,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
new file mode 100644
index 000000000000..f38395529a44
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -0,0 +1,246 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/platform_data/invensense_mpu6050.h>
+
+/**
+ * struct inv_mpu6050_reg_map - Notable registers.
+ * @sample_rate_div: Divider applied to gyro output rate.
+ * @lpf: Configures internal low pass filter.
+ * @user_ctrl: Enables/resets the FIFO.
+ * @fifo_en: Determines which data will appear in FIFO.
+ * @gyro_config: gyro config register.
+ * @accl_config: accel config register
+ * @fifo_count_h: Upper byte of FIFO count.
+ * @fifo_r_w: FIFO register.
+ * @raw_gyro: Address of first gyro register.
+ * @raw_accl: Address of first accel register.
+ * @temperature: temperature register
+ * @int_enable: Interrupt enable register.
+ * @pwr_mgmt_1: Controls chip's power state and clock source.
+ * @pwr_mgmt_2: Controls power state of individual sensors.
+ */
+struct inv_mpu6050_reg_map {
+ u8 sample_rate_div;
+ u8 lpf;
+ u8 user_ctrl;
+ u8 fifo_en;
+ u8 gyro_config;
+ u8 accl_config;
+ u8 fifo_count_h;
+ u8 fifo_r_w;
+ u8 raw_gyro;
+ u8 raw_accl;
+ u8 temperature;
+ u8 int_enable;
+ u8 pwr_mgmt_1;
+ u8 pwr_mgmt_2;
+};
+
+/*device enum */
+enum inv_devices {
+ INV_MPU6050,
+ INV_NUM_PARTS
+};
+
+/**
+ * struct inv_mpu6050_chip_config - Cached chip configuration data.
+ * @fsr: Full scale range.
+ * @lpf: Digital low pass filter frequency.
+ * @accl_fs: accel full scale range.
+ * @enable: master enable state.
+ * @accl_fifo_enable: enable accel data output
+ * @gyro_fifo_enable: enable gyro data output
+ * @fifo_rate: FIFO update rate.
+ */
+struct inv_mpu6050_chip_config {
+ unsigned int fsr:2;
+ unsigned int lpf:3;
+ unsigned int accl_fs:2;
+ unsigned int enable:1;
+ unsigned int accl_fifo_enable:1;
+ unsigned int gyro_fifo_enable:1;
+ u16 fifo_rate;
+};
+
+/**
+ * struct inv_mpu6050_hw - Other important hardware information.
+ * @num_reg: Number of registers on device.
+ * @name: name of the chip.
+ * @reg: register map of the chip.
+ * @config: configuration of the chip.
+ */
+struct inv_mpu6050_hw {
+ u8 num_reg;
+ u8 *name;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_chip_config *config;
+};
+
+/*
+ * struct inv_mpu6050_state - Driver state variables.
+ * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ * @trig: IIO trigger.
+ * @chip_config: Cached attribute information.
+ * @reg: Map of important registers.
+ * @hw: Other hardware-specific information.
+ * @chip_type: chip type.
+ * @time_stamp_lock: spin lock to time stamp.
+ * @client: i2c client handle.
+ * @plat_data: platform data.
+ * @timestamps: kfifo queue to store time stamp.
+ */
+struct inv_mpu6050_state {
+#define TIMESTAMP_FIFO_SIZE 16
+ struct iio_trigger *trig;
+ struct inv_mpu6050_chip_config chip_config;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_hw *hw;
+ enum inv_devices chip_type;
+ spinlock_t time_stamp_lock;
+ struct i2c_client *client;
+ struct inv_mpu6050_platform_data plat_data;
+ DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+};
+
+/*register and associated bit definition*/
+#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
+#define INV_MPU6050_REG_CONFIG 0x1A
+#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+
+#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
+
+#define INV_MPU6050_REG_INT_ENABLE 0x38
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+
+#define INV_MPU6050_REG_RAW_ACCEL 0x3B
+#define INV_MPU6050_REG_TEMPERATURE 0x41
+#define INV_MPU6050_REG_RAW_GYRO 0x43
+
+#define INV_MPU6050_REG_USER_CTRL 0x6A
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
+
+#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CLK_MASK 0x7
+
+#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+
+#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
+#define INV_MPU6050_REG_FIFO_R_W 0x74
+
+#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
+#define INV_MPU6050_FIFO_COUNT_BYTE 2
+#define INV_MPU6050_FIFO_THRESHOLD 500
+#define INV_MPU6050_POWER_UP_TIME 100
+#define INV_MPU6050_TEMP_UP_TIME 100
+#define INV_MPU6050_SENSOR_UP_TIME 30
+#define INV_MPU6050_REG_UP_TIME 5
+
+#define INV_MPU6050_TEMP_OFFSET 12421
+#define INV_MPU6050_TEMP_SCALE 2941
+#define INV_MPU6050_MAX_GYRO_FS_PARAM 3
+#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
+#define INV_MPU6050_THREE_AXIS 3
+#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
+#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
+
+/* 6 + 6 round up and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+
+/* init parameters */
+#define INV_MPU6050_INIT_FIFO_RATE 50
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
+
+/* scan element definition */
+enum inv_mpu6050_scan {
+ INV_MPU6050_SCAN_ACCL_X,
+ INV_MPU6050_SCAN_ACCL_Y,
+ INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_GYRO_X,
+ INV_MPU6050_SCAN_GYRO_Y,
+ INV_MPU6050_SCAN_GYRO_Z,
+ INV_MPU6050_SCAN_TIMESTAMP,
+};
+
+enum inv_mpu6050_filter_e {
+ INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
+ INV_MPU6050_FILTER_188HZ,
+ INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ,
+ INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ,
+ INV_MPU6050_FILTER_5HZ,
+ INV_MPU6050_FILTER_2100HZ_NOLPF,
+ NUM_MPU6050_FILTER
+};
+
+/* IIO attribute address */
+enum INV_MPU6050_IIO_ATTR_ADDR {
+ ATTR_GYRO_MATRIX,
+ ATTR_ACCL_MATRIX,
+};
+
+enum inv_mpu6050_accl_fs_e {
+ INV_MPU6050_FS_02G = 0,
+ INV_MPU6050_FS_04G,
+ INV_MPU6050_FS_08G,
+ INV_MPU6050_FS_16G,
+ NUM_ACCL_FSR
+};
+
+enum inv_mpu6050_fsr_e {
+ INV_MPU6050_FSR_250DPS = 0,
+ INV_MPU6050_FSR_500DPS,
+ INV_MPU6050_FSR_1000DPS,
+ INV_MPU6050_FSR_2000DPS,
+ NUM_MPU6050_FSR
+};
+
+enum inv_mpu6050_clock_sel_e {
+ INV_CLK_INTERNAL = 0,
+ INV_CLK_PLL,
+ NUM_CLK
+};
+
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st);
+int inv_reset_fifo(struct iio_dev *indio_dev);
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
new file mode 100644
index 000000000000..331781ffbb15
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -0,0 +1,196 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include "inv_mpu_iio.h"
+
+int inv_reset_fifo(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ /* disable interrupt */
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result) {
+ dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ return result;
+ }
+ /* disable the sensor output to FIFO */
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ goto reset_fifo_fail;
+ /* disable fifo reading */
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* reset FIFO*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable interrupt */
+ if (st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+ if (result)
+ return result;
+ }
+ /* enable FIFO reading and I2C master interface*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable sensor output to FIFO */
+ d = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ d |= INV_MPU6050_BITS_GYRO_OUT;
+ if (st->chip_config.accl_fifo_enable)
+ d |= INV_MPU6050_BIT_ACCEL_OUT;
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ if (result)
+ goto reset_fifo_fail;
+
+ return 0;
+
+reset_fifo_fail:
+ dev_err(&st->client->dev, "reset fifo failed %d\n", result);
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+
+ return result;
+}
+
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
+/**
+ * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ s64 timestamp;
+
+ timestamp = iio_get_time_ns();
+ spin_lock(&st->time_stamp_lock);
+ kfifo_in(&st->timestamps, &timestamp, 1);
+ spin_unlock(&st->time_stamp_lock);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ size_t bytes_per_datum;
+ int result;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u16 fifo_count;
+ s64 timestamp;
+ u64 *tmp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (!(st->chip_config.accl_fifo_enable |
+ st->chip_config.gyro_fifo_enable))
+ goto end_session;
+ bytes_per_datum = 0;
+ if (st->chip_config.accl_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ if (st->chip_config.gyro_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ /*
+ * read fifo_count register to know how many bytes inside FIFO
+ * right now
+ */
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_count_h,
+ INV_MPU6050_FIFO_COUNT_BYTE, data);
+ if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ goto end_session;
+ fifo_count = be16_to_cpup((__be16 *)(&data[0]));
+ if (fifo_count < bytes_per_datum)
+ goto end_session;
+ /* fifo count can't be odd number, if it is odd, reset fifo*/
+ if (fifo_count & 1)
+ goto flush_fifo;
+ if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
+ goto flush_fifo;
+ /* Timestamp mismatch. */
+ if (kfifo_len(&st->timestamps) >
+ fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
+ while (fifo_count >= bytes_per_datum) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_r_w,
+ bytes_per_datum, data);
+ if (result != bytes_per_datum)
+ goto flush_fifo;
+
+ result = kfifo_out(&st->timestamps, &timestamp, 1);
+ /* when there is no timestamp, put timestamp as 0 */
+ if (0 == result)
+ timestamp = 0;
+
+ tmp = (u64 *)data;
+ tmp[DIV_ROUND_UP(bytes_per_datum, 8)] = timestamp;
+ result = iio_push_to_buffers(indio_dev, data);
+ if (result)
+ goto flush_fifo;
+ fifo_count -= bytes_per_datum;
+ }
+
+end_session:
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+
+flush_fifo:
+ /* Flush HW and SW FIFOs. */
+ inv_reset_fifo(indio_dev);
+ inv_clear_kfifo(st);
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
new file mode 100644
index 000000000000..e1d0869e0ad1
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -0,0 +1,155 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include "inv_mpu_iio.h"
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->chip_config.gyro_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_GYRO_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
+
+ st->chip_config.accl_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_ACCL_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
+}
+
+/**
+ * inv_mpu6050_set_enable() - enable chip functions.
+ * @indio_dev: Device driver instance.
+ * @enable: enable/disable
+ */
+static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ if (enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ inv_scan_query(indio_dev);
+ if (st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+ }
+ if (st->chip_config.accl_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ }
+ result = inv_reset_fifo(indio_dev);
+ if (result)
+ return result;
+ } else {
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ }
+ st->chip_config.enable = enable;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_data_rdy_trigger_set_state() - set data ready interrupt state
+ * @trig: Trigger instance
+ * @state: Desired trigger state
+ */
+static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ return inv_mpu6050_set_enable(trig->private_data, state);
+}
+
+static const struct iio_trigger_ops inv_mpu_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
+};
+
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = request_irq(st->client->irq, &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "inv_mpu",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+ st->trig->dev.parent = &st->client->dev;
+ st->trig->private_data = indio_dev;
+ st->trig->ops = &inv_mpu_trigger_ops;
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+ indio_dev->trig = st->trig;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->client->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+error_ret:
+ return ret;
+}
+
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
+{
+ iio_trigger_unregister(st->trig);
+ free_irq(st->client->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 4fe0ead84213..4d6c7d84e155 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -160,7 +160,7 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
trig->use_count--;
if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
- /* Missed and interrupt so launch new poll now */
+ /* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -193,7 +193,7 @@ static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
* This is not currently handled. Alternative of not enabling trigger unless
* the relevant function is in there may be the best option.
*/
-/* Worth protecting against double additions?*/
+/* Worth protecting against double additions? */
static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
@@ -201,7 +201,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
bool notinuse
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
- /* Prevent the module being removed whilst attached to a trigger */
+ /* Prevent the module from being removed whilst attached to a trigger */
__module_get(pf->indio_dev->info->driver_module);
pf->irq = iio_trigger_get_irq(trig);
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
@@ -288,7 +288,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_current() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -305,7 +305,7 @@ static ssize_t iio_trigger_read_current(struct device *dev,
}
/**
- * iio_trigger_write_current() trigger consumer sysfs set current trigger
+ * iio_trigger_write_current() - trigger consumer sysfs set current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used for this device to be specified at run time based on the triggers
@@ -476,7 +476,7 @@ void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
- /* Clean up and associated but not attached triggers references */
+ /* Clean up an associated but not attached trigger reference */
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index d55e98fb300e..b289915b8469 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -54,39 +54,25 @@ error_ret:
EXPORT_SYMBOL_GPL(iio_map_array_register);
-/* Assumes the exact same array (e.g. memory locations)
- * used at unregistration as used at registration rather than
- * more complex checking of contents.
+/*
+ * Remove all map entries associated with the given iio device
*/
-int iio_map_array_unregister(struct iio_dev *indio_dev,
- struct iio_map *maps)
+int iio_map_array_unregister(struct iio_dev *indio_dev)
{
- int i = 0, ret = 0;
- bool found_it;
+ int ret = -ENODEV;
struct iio_map_internal *mapi;
-
- if (maps == NULL)
- return 0;
+ struct list_head *pos, *tmp;
mutex_lock(&iio_map_list_lock);
- while (maps[i].consumer_dev_name != NULL) {
- found_it = false;
- list_for_each_entry(mapi, &iio_map_list, l)
- if (&maps[i] == mapi->map) {
- list_del(&mapi->l);
- kfree(mapi);
- found_it = true;
- break;
- }
- if (!found_it) {
- ret = -ENODEV;
- goto error_ret;
+ list_for_each_safe(pos, tmp, &iio_map_list) {
+ mapi = list_entry(pos, struct iio_map_internal, l);
+ if (indio_dev == mapi->indio_dev) {
+ list_del(&mapi->l);
+ kfree(mapi);
+ ret = 0;
}
- i++;
}
-error_ret:
mutex_unlock(&iio_map_list_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
@@ -107,7 +93,8 @@ static const struct iio_chan_spec
}
-struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
+static struct iio_channel *iio_channel_get_sys(const char *name,
+ const char *channel_name)
{
struct iio_map_internal *c_i = NULL, *c = NULL;
struct iio_channel *channel;
@@ -158,6 +145,14 @@ error_no_mem:
iio_device_put(c->indio_dev);
return ERR_PTR(err);
}
+
+struct iio_channel *iio_channel_get(struct device *dev,
+ const char *channel_name)
+{
+ const char *name = dev ? dev_name(dev) : NULL;
+
+ return iio_channel_get_sys(name, channel_name);
+}
EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
@@ -167,16 +162,18 @@ void iio_channel_release(struct iio_channel *channel)
}
EXPORT_SYMBOL_GPL(iio_channel_release);
-struct iio_channel *iio_channel_get_all(const char *name)
+struct iio_channel *iio_channel_get_all(struct device *dev)
{
+ const char *name;
struct iio_channel *chans;
struct iio_map_internal *c = NULL;
int nummaps = 0;
int mapind = 0;
int i, ret;
- if (name == NULL)
+ if (dev == NULL)
return ERR_PTR(-EINVAL);
+ name = dev_name(dev);
mutex_lock(&iio_map_list_lock);
/* first count the matching maps */
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 5bc5c860e9ca..a923c78d5cb4 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -22,7 +22,6 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
- __iio_update_buffer(&buf->buffer, bytes_per_datum, length);
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 1763c9bcb98a..5ef1a396e0c9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -32,6 +32,16 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config SENSORS_TSL2563
+ tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Taos TSL2560,
+ TSL2561, TSL2562 and TSL2563 ambient light sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsl2563.
+
config VCNL4000
tristate "VCNL4000 combined ALS and proximity sensor"
depends on I2C
@@ -47,6 +57,7 @@ config HID_SENSOR_ALS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 21a8f0df1407..040d9c75f8e6 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 36d210a06b28..d5b9d39d95b2 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -286,8 +286,8 @@ static const struct iio_info adjd_s311_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit adjd_s311_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adjd_s311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adjd_s311_data *data;
struct iio_dev *indio_dev;
@@ -330,7 +330,7 @@ exit:
return err;
}
-static int __devexit adjd_s311_remove(struct i2c_client *client)
+static int adjd_s311_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct adjd_s311_data *data = iio_priv(indio_dev);
@@ -354,7 +354,7 @@ static struct i2c_driver adjd_s311_driver = {
.name = ADJD_S311_DRV_NAME,
},
.probe = adjd_s311_probe,
- .remove = __devexit_p(adjd_s311_remove),
+ .remove = adjd_s311_remove,
.id_table = adjd_s311_id,
};
module_i2c_driver(adjd_s311_driver);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 23eeeef64e84..3d7e8c9b4beb 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -39,7 +38,7 @@
struct als_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als_illum;
u32 illum;
};
@@ -245,7 +244,7 @@ static int als_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_als_probe(struct platform_device *pdev)
+static int hid_als_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "als";
@@ -341,7 +340,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_als_remove(struct platform_device *pdev)
+static int hid_als_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index e45712a921ce..7503012ce933 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -718,8 +718,7 @@ static struct attribute_group lm3533_als_attribute_group = {
.attrs = lm3533_als_attributes
};
-static int __devinit lm3533_als_set_input_mode(struct lm3533_als *als,
- bool pwm_mode)
+static int lm3533_als_set_input_mode(struct lm3533_als *als, bool pwm_mode)
{
u8 mask = LM3533_ALS_INPUT_MODE_MASK;
u8 val;
@@ -740,7 +739,7 @@ static int __devinit lm3533_als_set_input_mode(struct lm3533_als *als,
return 0;
}
-static int __devinit lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
+static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
{
int ret;
@@ -756,8 +755,8 @@ static int __devinit lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
return 0;
}
-static int __devinit lm3533_als_setup(struct lm3533_als *als,
- struct lm3533_als_platform_data *pdata)
+static int lm3533_als_setup(struct lm3533_als *als,
+ struct lm3533_als_platform_data *pdata)
{
int ret;
@@ -775,7 +774,7 @@ static int __devinit lm3533_als_setup(struct lm3533_als *als,
return 0;
}
-static int __devinit lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
+static int lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
{
u8 mask = LM3533_ALS_INT_ENABLE_MASK;
int ret;
@@ -799,7 +798,7 @@ static int __devinit lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
return 0;
}
-static int __devinit lm3533_als_enable(struct lm3533_als *als)
+static int lm3533_als_enable(struct lm3533_als *als)
{
u8 mask = LM3533_ALS_ENABLE_MASK;
int ret;
@@ -830,7 +829,7 @@ static const struct iio_info lm3533_als_info = {
.read_raw = &lm3533_als_read_raw,
};
-static int __devinit lm3533_als_probe(struct platform_device *pdev)
+static int lm3533_als_probe(struct platform_device *pdev)
{
struct lm3533 *lm3533;
struct lm3533_als_platform_data *pdata;
@@ -901,7 +900,7 @@ err_free_dev:
return ret;
}
-static int __devexit lm3533_als_remove(struct platform_device *pdev)
+static int lm3533_als_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct lm3533_als *als = iio_priv(indio_dev);
@@ -922,7 +921,7 @@ static struct platform_driver lm3533_als_driver = {
.owner = THIS_MODULE,
},
.probe = lm3533_als_probe,
- .remove = __devexit_p(lm3533_als_remove),
+ .remove = lm3533_als_remove,
};
module_platform_driver(lm3533_als_driver);
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 1a9adc020f64..fd8be69b7d05 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -1,5 +1,5 @@
/*
- * drivers/i2c/chips/tsl2563.c
+ * drivers/iio/light/tsl2563.c
*
* Copyright (C) 2008 Nokia Corporation
*
@@ -38,52 +38,52 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
-#include "tsl2563.h"
+#include <linux/platform_data/tsl2563.h>
/* Use this many bits for fraction part. */
-#define ADC_FRAC_BITS (14)
+#define ADC_FRAC_BITS 14
/* Given number of 1/10000's in ADC_FRAC_BITS precision. */
#define FRAC10K(f) (((f) * (1L << (ADC_FRAC_BITS))) / (10000))
/* Bits used for fraction in calibration coefficients.*/
-#define CALIB_FRAC_BITS (10)
+#define CALIB_FRAC_BITS 10
/* 0.5 in CALIB_FRAC_BITS precision */
#define CALIB_FRAC_HALF (1 << (CALIB_FRAC_BITS - 1))
/* Make a fraction from a number n that was multiplied with b. */
#define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b))
/* Decimal 10^(digits in sysfs presentation) */
-#define CALIB_BASE_SYSFS (1000)
-
-#define TSL2563_CMD (0x80)
-#define TSL2563_CLEARINT (0x40)
-
-#define TSL2563_REG_CTRL (0x00)
-#define TSL2563_REG_TIMING (0x01)
-#define TSL2563_REG_LOWLOW (0x02) /* data0 low threshold, 2 bytes */
-#define TSL2563_REG_LOWHIGH (0x03)
-#define TSL2563_REG_HIGHLOW (0x04) /* data0 high threshold, 2 bytes */
-#define TSL2563_REG_HIGHHIGH (0x05)
-#define TSL2563_REG_INT (0x06)
-#define TSL2563_REG_ID (0x0a)
-#define TSL2563_REG_DATA0LOW (0x0c) /* broadband sensor value, 2 bytes */
-#define TSL2563_REG_DATA0HIGH (0x0d)
-#define TSL2563_REG_DATA1LOW (0x0e) /* infrared sensor value, 2 bytes */
-#define TSL2563_REG_DATA1HIGH (0x0f)
-
-#define TSL2563_CMD_POWER_ON (0x03)
-#define TSL2563_CMD_POWER_OFF (0x00)
-#define TSL2563_CTRL_POWER_MASK (0x03)
-
-#define TSL2563_TIMING_13MS (0x00)
-#define TSL2563_TIMING_100MS (0x01)
-#define TSL2563_TIMING_400MS (0x02)
-#define TSL2563_TIMING_MASK (0x03)
-#define TSL2563_TIMING_GAIN16 (0x10)
-#define TSL2563_TIMING_GAIN1 (0x00)
-
-#define TSL2563_INT_DISBLED (0x00)
-#define TSL2563_INT_LEVEL (0x10)
+#define CALIB_BASE_SYSFS 1000
+
+#define TSL2563_CMD 0x80
+#define TSL2563_CLEARINT 0x40
+
+#define TSL2563_REG_CTRL 0x00
+#define TSL2563_REG_TIMING 0x01
+#define TSL2563_REG_LOWLOW 0x02 /* data0 low threshold, 2 bytes */
+#define TSL2563_REG_LOWHIGH 0x03
+#define TSL2563_REG_HIGHLOW 0x04 /* data0 high threshold, 2 bytes */
+#define TSL2563_REG_HIGHHIGH 0x05
+#define TSL2563_REG_INT 0x06
+#define TSL2563_REG_ID 0x0a
+#define TSL2563_REG_DATA0LOW 0x0c /* broadband sensor value, 2 bytes */
+#define TSL2563_REG_DATA0HIGH 0x0d
+#define TSL2563_REG_DATA1LOW 0x0e /* infrared sensor value, 2 bytes */
+#define TSL2563_REG_DATA1HIGH 0x0f
+
+#define TSL2563_CMD_POWER_ON 0x03
+#define TSL2563_CMD_POWER_OFF 0x00
+#define TSL2563_CTRL_POWER_MASK 0x03
+
+#define TSL2563_TIMING_13MS 0x00
+#define TSL2563_TIMING_100MS 0x01
+#define TSL2563_TIMING_400MS 0x02
+#define TSL2563_TIMING_MASK 0x03
+#define TSL2563_TIMING_GAIN16 0x10
+#define TSL2563_TIMING_GAIN1 0x00
+
+#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_LEVEL 0x10
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
struct tsl2563_gainlevel_coeff {
@@ -190,8 +190,10 @@ static int tsl2563_configure(struct tsl2563_chip *chip)
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_LOWHIGH,
(chip->low_thres >> 8) & 0xFF);
-/* Interrupt register is automatically written anyway if it is relevant
- so is not here */
+/*
+ * Interrupt register is automatically written anyway if it is relevant
+ * so is not here.
+ */
error_ret:
return ret;
}
@@ -423,9 +425,7 @@ static const struct tsl2563_lux_coeff lux_table[] = {
},
};
-/*
- * Convert normalized, scaled ADC values to lux.
- */
+/* Convert normalized, scaled ADC values to lux. */
static unsigned int adc_to_lux(u32 adc0, u32 adc1)
{
const struct tsl2563_lux_coeff *lp = lux_table;
@@ -441,11 +441,6 @@ static unsigned int adc_to_lux(u32 adc0, u32 adc1)
return (unsigned int) (lux >> ADC_FRAC_BITS);
}
-/*--------------------------------------------------------------*/
-/* Sysfs interface */
-/*--------------------------------------------------------------*/
-
-
/* Apply calibration coefficient to ADC count. */
static u32 calib_adc(u32 adc, u32 calib)
{
@@ -677,18 +672,11 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
TSL2563_CMD | TSL2563_REG_INT);
mutex_unlock(&chip->lock);
if (ret < 0)
- goto error_ret;
- ret = !!(ret & 0x30);
-error_ret:
+ return ret;
- return ret;
+ return !!(ret & 0x30);
}
-/*--------------------------------------------------------------*/
-/* Probe, Attach, Remove */
-/*--------------------------------------------------------------*/
-static struct i2c_driver tsl2563_i2c_driver;
-
static const struct iio_info tsl2563_info_no_irq = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index e49cb9784a6f..2aa748fbdc0e 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -150,8 +150,8 @@ static const struct iio_info vcnl4000_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit vcnl4000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int vcnl4000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct vcnl4000_data *data;
struct iio_dev *indio_dev;
@@ -190,7 +190,7 @@ error_free_dev:
return ret;
}
-static int __devexit vcnl4000_remove(struct i2c_client *client)
+static int vcnl4000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -206,7 +206,7 @@ static struct i2c_driver vcnl4000_driver = {
.owner = THIS_MODULE,
},
.probe = vcnl4000_probe,
- .remove = __devexit_p(vcnl4000_remove),
+ .remove = vcnl4000_remove,
.id_table = vcnl4000_id,
};
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index c1f0cdd57037..cd29be54f643 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -8,9 +8,40 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
help
Say yes here to build support for the HID SENSOR
Magnetometer 3D.
+config IIO_ST_MAGN_3AXIS
+ tristate "STMicroelectronics magnetometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_MAGN_I2C_3AXIS if (I2C)
+ select IIO_ST_MAGN_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_MAGN_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics magnetometers:
+ LSM303DLHC, LSM303DLM, LIS3MDL.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_magn (core functions for the driver [it is mandatory]);
+ - st_magn_i2c (necessary for the I2C devices [optional*]);
+ - st_magn_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_MAGN_I2C_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_MAGN_SPI_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 60dc4f2b1963..e78672876dc2 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -3,3 +3,10 @@
#
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
+
+obj-$(CONFIG_IIO_ST_MAGN_3AXIS) += st_magn.o
+st_magn-y := st_magn_core.o
+st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
+
+obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
+obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 8e75eb76ccd9..d8d01265220b 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum magn_3d_channel {
struct magn_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
u32 magn_val[MAGN_3D_CHANNEL_MAX];
};
@@ -279,7 +278,7 @@ static int magn_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_magn_3d_probe(struct platform_device *pdev)
+static int hid_magn_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static char *name = "magn_3d";
@@ -376,7 +375,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_magn_3d_remove(struct platform_device *pdev)
+static int hid_magn_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
new file mode 100644
index 000000000000..7e81d00ef0c3
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -0,0 +1,45 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_MAGN_H
+#define ST_MAGN_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_MAGN_DEV_NAME "lsm303dlhc_magn"
+#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
+#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
+
+int st_magn_common_probe(struct iio_dev *indio_dev);
+void st_magn_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_magn_allocate_ring(struct iio_dev *indio_dev);
+void st_magn_deallocate_ring(struct iio_dev *indio_dev);
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_magn_probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return 0;
+}
+static inline void st_magn_remove_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return;
+}
+static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_MAGN_H */
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
new file mode 100644
index 000000000000..708857bdb47d
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -0,0 +1,98 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_magn_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_magn_set_enable_error:
+ return err;
+}
+
+static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ mdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (mdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_postenable_error;
+
+ return err;
+
+st_magn_buffer_postenable_error:
+ kfree(mdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_magn_buffer_predisable_error:
+ kfree(mdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
+ .preenable = &st_magn_buffer_preenable,
+ .postenable = &st_magn_buffer_postenable,
+ .predisable = &st_magn_buffer_predisable,
+};
+
+int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
+}
+
+void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
new file mode 100644
index 000000000000..16f0d6df239f
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -0,0 +1,400 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04
+#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08
+#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06
+
+/* FULLSCALE */
+#define ST_MAGN_FS_AVL_1300MG 1300
+#define ST_MAGN_FS_AVL_1900MG 1900
+#define ST_MAGN_FS_AVL_2500MG 2500
+#define ST_MAGN_FS_AVL_4000MG 4000
+#define ST_MAGN_FS_AVL_4700MG 4700
+#define ST_MAGN_FS_AVL_5600MG 5600
+#define ST_MAGN_FS_AVL_8000MG 8000
+#define ST_MAGN_FS_AVL_8100MG 8100
+#define ST_MAGN_FS_AVL_10000MG 10000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_MAGN_1_WAI_EXP 0x3c
+#define ST_MAGN_1_ODR_ADDR 0x00
+#define ST_MAGN_1_ODR_MASK 0x1c
+#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
+#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
+#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
+#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
+#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
+#define ST_MAGN_1_PW_ADDR 0x02
+#define ST_MAGN_1_PW_MASK 0x03
+#define ST_MAGN_1_PW_ON 0x00
+#define ST_MAGN_1_PW_OFF 0x03
+#define ST_MAGN_1_FS_ADDR 0x01
+#define ST_MAGN_1_FS_MASK 0xe0
+#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
+#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
+#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
+#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
+#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
+#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
+#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205
+#define ST_MAGN_1_MULTIREAD_BIT false
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_MAGN_2_WAI_EXP 0x3d
+#define ST_MAGN_2_ODR_ADDR 0x20
+#define ST_MAGN_2_ODR_MASK 0x1c
+#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
+#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
+#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
+#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
+#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
+#define ST_MAGN_2_PW_ADDR 0x22
+#define ST_MAGN_2_PW_MASK 0x03
+#define ST_MAGN_2_PW_ON 0x00
+#define ST_MAGN_2_PW_OFF 0x03
+#define ST_MAGN_2_FS_ADDR 0x21
+#define ST_MAGN_2_FS_MASK 0x60
+#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
+#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
+#define ST_MAGN_2_FS_AVL_10000_VAL 0x02
+#define ST_MAGN_2_FS_AVL_4000_GAIN 430
+#define ST_MAGN_2_FS_AVL_8000_GAIN 230
+#define ST_MAGN_2_FS_AVL_10000_GAIN 230
+#define ST_MAGN_2_MULTIREAD_BIT false
+#define ST_MAGN_2_OUT_X_L_ADDR 0x28
+#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
+#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
+
+static const struct iio_chan_spec st_magn_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_magn_sensors[] = {
+ {
+ .wai = ST_MAGN_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM303DLHC_MAGN_DEV_NAME,
+ [1] = LSM303DLM_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_1_ODR_ADDR,
+ .mask = ST_MAGN_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
+ { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
+ { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
+ { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
+ { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
+ { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_1_PW_ADDR,
+ .mask = ST_MAGN_1_PW_MASK,
+ .value_on = ST_MAGN_1_PW_ON,
+ .value_off = ST_MAGN_1_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_1_FS_ADDR,
+ .mask = ST_MAGN_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_1300MG,
+ .value = ST_MAGN_1_FS_AVL_1300_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_1900MG,
+ .value = ST_MAGN_1_FS_AVL_1900_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_2500MG,
+ .value = ST_MAGN_1_FS_AVL_2500_VAL,
+ .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ },
+ [3] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_1_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ },
+ [4] = {
+ .num = ST_MAGN_FS_AVL_4700MG,
+ .value = ST_MAGN_1_FS_AVL_4700_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ },
+ [5] = {
+ .num = ST_MAGN_FS_AVL_5600MG,
+ .value = ST_MAGN_1_FS_AVL_5600_VAL,
+ .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ },
+ [6] = {
+ .num = ST_MAGN_FS_AVL_8100MG,
+ .value = ST_MAGN_1_FS_AVL_8100_VAL,
+ .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_MAGN_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3MDL_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_2_ODR_ADDR,
+ .mask = ST_MAGN_2_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
+ { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
+ { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
+ { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
+ { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
+ { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_2_PW_ADDR,
+ .mask = ST_MAGN_2_PW_MASK,
+ .value_on = ST_MAGN_2_PW_ON,
+ .value_off = ST_MAGN_2_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_2_FS_ADDR,
+ .mask = ST_MAGN_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_2_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_8000MG,
+ .value = ST_MAGN_2_FS_AVL_8000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_10000MG,
+ .value = ST_MAGN_2_FS_AVL_10000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_magn_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if ((ch->scan_index == ST_SENSORS_SCAN_Z) &&
+ (mdata->current_fullscale->gain2 != 0))
+ *val2 = mdata->current_fullscale->gain2;
+ else
+ *val2 = mdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_magn_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_magn_scale_available);
+
+static struct attribute *st_magn_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_magn_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_magn_attribute_group = {
+ .attrs = st_magn_attributes,
+};
+
+static const struct iio_info magn_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_magn_attribute_group,
+ .read_raw = &st_magn_read_raw,
+ .write_raw = &st_magn_write_raw,
+};
+
+int st_magn_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &magn_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_magn_sensors), st_magn_sensors);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ mdata->multiread_bit = mdata->sensor->multi_read_bit;
+ indio_dev->channels = mdata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &mdata->sensor->fs.fs_avl[0];
+ mdata->odr = mdata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ if (mdata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_magn_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+ err = st_sensors_allocate_trigger(indio_dev, NULL);
+ if (err < 0)
+ goto st_magn_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_magn_device_register_error;
+
+ return err;
+
+st_magn_device_register_error:
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_magn_probe_trigger_error:
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
+ st_magn_deallocate_ring(indio_dev);
+st_magn_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_magn_common_probe);
+
+void st_magn_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (mdata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_magn_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_magn_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
new file mode 100644
index 000000000000..e6adc4a86425
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -0,0 +1,80 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_magn.h"
+
+static int st_magn_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*mdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ mdata = iio_priv(indio_dev);
+ mdata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ return 0;
+
+st_magn_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_magn_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
+
+static struct i2c_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-i2c",
+ },
+ .probe = st_magn_i2c_probe,
+ .remove = st_magn_i2c_remove,
+ .id_table = st_magn_id_table,
+};
+module_i2c_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
new file mode 100644
index 000000000000..51adb797cb7d
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -0,0 +1,79 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_magn.h"
+
+static int st_magn_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*mdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ mdata = iio_priv(indio_dev);
+ mdata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ return 0;
+
+st_magn_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_magn_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_magn_id_table);
+
+static struct spi_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-spi",
+ },
+ .probe = st_magn_spi_probe,
+ .remove = st_magn_spi_remove,
+ .id_table = st_magn_id_table,
+};
+module_spi_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a7568c34a1aa..d789eea32168 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -345,17 +345,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu
err = ib_query_port(device, port_num, &props);
if (err)
- return 1;
+ return err;
for (i = 0; i < props.gid_tbl_len; ++i) {
err = ib_query_gid(device, port_num, i, &tmp);
if (err)
- return 1;
+ return err;
if (!memcmp(&tmp, gid, sizeof tmp))
return 0;
}
- return -EAGAIN;
+ return -EADDRNOTAVAIL;
}
static int cma_acquire_dev(struct rdma_id_private *id_priv)
@@ -388,8 +388,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
if (!ret) {
id_priv->id.port_num = port;
goto out;
- } else if (ret == 1)
- break;
+ }
}
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 5ce7b9e8bff6..7275e727e0f5 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -920,8 +920,7 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev,
return netdev;
}
-static int __devinit c2_probe(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
int ret = 0, i;
unsigned long reg0_start, reg0_flags, reg0_len;
@@ -1191,7 +1190,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
return ret;
}
-static void __devexit c2_remove(struct pci_dev *pcidev)
+static void c2_remove(struct pci_dev *pcidev)
{
struct c2_dev *c2dev = pci_get_drvdata(pcidev);
struct net_device *netdev = c2dev->netdev;
@@ -1236,7 +1235,7 @@ static struct pci_driver c2_pci_driver = {
.name = DRV_NAME,
.id_table = c2_pci_table,
.probe = c2_probe,
- .remove = __devexit_p(c2_remove),
+ .remove = c2_remove,
};
static int __init c2_init_module(void)
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 6ae698e68775..ba7a1208ff9e 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -498,16 +498,16 @@ extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct ib_send_wr **bad_wr);
extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
struct ib_recv_wr **bad_wr);
-extern void __devinit c2_init_qp_table(struct c2_dev *c2dev);
-extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev);
+extern void c2_init_qp_table(struct c2_dev *c2dev);
+extern void c2_cleanup_qp_table(struct c2_dev *c2dev);
extern void c2_set_qp_state(struct c2_qp *, int);
extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
/* PDs */
extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-extern int __devinit c2_init_pd_table(struct c2_dev *c2dev);
-extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev);
+extern int c2_init_pd_table(struct c2_dev *c2dev);
+extern void c2_cleanup_pd_table(struct c2_dev *c2dev);
/* CQs */
extern int c2_init_cq(struct c2_dev *c2dev, int entries,
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index 32d34e88d5cf..706cf97cbe8f 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -311,6 +311,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
+ break;
}
default:
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c
index 161f2a285351..f3e81dc357bb 100644
--- a/drivers/infiniband/hw/amso1100/c2_pd.c
+++ b/drivers/infiniband/hw/amso1100/c2_pd.c
@@ -70,7 +70,7 @@ void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
spin_unlock(&c2dev->pd_table.lock);
}
-int __devinit c2_init_pd_table(struct c2_dev *c2dev)
+int c2_init_pd_table(struct c2_dev *c2dev)
{
c2dev->pd_table.last = 0;
@@ -84,7 +84,7 @@ int __devinit c2_init_pd_table(struct c2_dev *c2dev)
return 0;
}
-void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev)
+void c2_cleanup_pd_table(struct c2_dev *c2dev)
{
kfree(c2dev->pd_table.table);
}
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 0d7b6f23caff..28cd5cb51859 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -1010,13 +1010,13 @@ out:
return err;
}
-void __devinit c2_init_qp_table(struct c2_dev *c2dev)
+void c2_init_qp_table(struct c2_dev *c2dev)
{
spin_lock_init(&c2dev->qp_table.lock);
idr_init(&c2dev->qp_table.idr);
}
-void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
+void c2_cleanup_qp_table(struct c2_dev *c2dev)
{
idr_destroy(&c2dev->qp_table.idr);
}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index e4a73158fc7f..b7c986990053 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -442,7 +442,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
* involves initializing the various limits and resource pools that
* comprise the RNIC instance.
*/
-int __devinit c2_rnic_init(struct c2_dev *c2dev)
+int c2_rnic_init(struct c2_dev *c2dev)
{
int err;
u32 qsize, msgsize;
@@ -611,7 +611,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
/*
* Called by c2_remove to cleanup the RNIC resources.
*/
-void __devexit c2_rnic_term(struct c2_dev *c2dev)
+void c2_rnic_term(struct c2_dev *c2dev)
{
/* Close the open adapter instance */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index aaf88ef9409c..3e094cd6a0e3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -128,9 +128,8 @@ static void stop_ep_timer(struct iwch_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
+ WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -1756,9 +1755,8 @@ static void ep_timeout(unsigned long arg)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p state %u\n",
+ WARN(1, "%s unexpected state ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 0bdf09aa6f42..145d82a64d0a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -783,8 +783,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
remove_handle(rhp, &rhp->mmidr, mmid);
- kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6cfd4d8fd0bd..c13745cde7fa 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -38,10 +38,12 @@
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/if_vlan.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
+#include <net/tcp.h>
#include "iw_cxgb4.h"
@@ -61,6 +63,14 @@ static char *states[] = {
NULL,
};
+static int nocong;
+module_param(nocong, int, 0644);
+MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
+
+static int enable_ecn;
+module_param(enable_ecn, int, 0644);
+MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
+
static int dack_mode = 1;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
@@ -151,9 +161,8 @@ static void stop_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! "
+ WARN(1, "%s timer stopped when its not running! "
"ep %p state %u\n", __func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -266,6 +275,7 @@ void _c4iw_free_ep(struct kref *kref)
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
}
kfree(ep);
}
@@ -442,6 +452,50 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
+#define VLAN_NONE 0xfff
+#define FILTER_SEL_VLAN_NONE 0xffff
+#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
+#define FILTER_SEL_WIDTH_VIN_P_FC \
+ (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
+#define FILTER_SEL_WIDTH_TAG_P_FC \
+ (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
+#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
+
+static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
+ struct l2t_entry *l2t)
+{
+ unsigned int ntuple = 0;
+ u32 viid;
+
+ switch (dev->rdev.lldi.filt_mode) {
+
+ /* default filter mode */
+ case HW_TPL_FR_MT_PR_IV_P_FC:
+ if (l2t->vlan == VLAN_NONE)
+ ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
+ else {
+ ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ }
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ case HW_TPL_FR_MT_PR_OV_P_FC: {
+ viid = cxgb4_port_viid(l2t->neigh->dev);
+
+ ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
+ ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ }
+ default:
+ break;
+ }
+ return ntuple;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -464,7 +518,8 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
@@ -475,6 +530,7 @@ static int send_connect(struct c4iw_ep *ep)
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
@@ -493,8 +549,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = ep->com.local_addr.sin_addr.s_addr;
req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = 0;
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
req->opt2 = cpu_to_be32(opt2);
+ set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -771,6 +828,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
/* setup the hwtid for this connection */
ep->hwtid = tid;
cxgb4_insert_tid(t, ep, tid);
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -778,7 +836,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_emss(ep, ntohs(req->tcp_opt));
/* dealloc the atid */
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
+ set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
send_flowc(ep, NULL);
@@ -804,6 +864,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
@@ -812,6 +873,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
close_complete_upcall(ep);
state_set(&ep->com, ABORTING);
+ set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
@@ -826,6 +888,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
PDBG("peer close delivered ep %p cm_id %p tid %u\n",
ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(DISCONN_UPCALL, &ep->com.history);
}
}
@@ -844,6 +907,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -876,6 +940,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
+ set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
if (status < 0) {
@@ -916,6 +981,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)
ep->parent_ep->com.cm_id,
&event);
}
+ set_bit(CONNREQ_UPCALL, &ep->com.history);
c4iw_put_ep(&ep->parent_ep->com);
ep->parent_ep = NULL;
}
@@ -932,6 +998,7 @@ static void established_upcall(struct c4iw_ep *ep)
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(ESTAB_UPCALL, &ep->com.history);
}
}
@@ -1317,6 +1384,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int dlen = ntohs(hdr->len);
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
+ __u8 status = hdr->status;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1339,9 +1407,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
case MPA_REP_SENT:
break;
default:
- printk(KERN_ERR MOD "%s Unexpected streaming data."
- " ep %p state %d tid %u\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
+ pr_err("%s Unexpected streaming data." \
+ " ep %p state %d tid %u status %d\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid, status);
/*
* The ep will timeout and inform the ULP of the failure.
@@ -1384,6 +1452,63 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+{
+ struct sk_buff *skb;
+ struct fw_ofld_connection_wr *req;
+ unsigned int mtu_idx;
+ int wscale;
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ ep->l2t));
+ req->le.lport = ep->com.local_addr.sin_port;
+ req->le.pport = ep->com.remote_addr.sin_port;
+ req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
+ req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(atid));
+ req->tcb.cplrxdataack_cplpassacceptrpl =
+ htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
+ req->tcb.tx_max = jiffies;
+ req->tcb.rcv_adv = htons(1);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ req->tcb.opt0 = TCAM_BYPASS(1) |
+ (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
+ DELACK(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ ULP_MODE(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ(rcv_win >> 10);
+ req->tcb.opt2 = PACE(1) |
+ TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ req->tcb.opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ req->tcb.opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ req->tcb.opt2 |= WND_SCALE_EN(1);
+ req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ set_bit(ACT_OFLD_CONN, &ep->com.history);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
/*
* Return whether a failed active open has allocated a TID
*/
@@ -1393,6 +1518,111 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS;
}
+#define ACT_OPEN_RETRY_COUNT 2
+
+static int c4iw_reconnect(struct c4iw_ep *ep)
+{
+ int err = 0;
+ struct rtable *rt;
+ struct port_info *pi;
+ struct net_device *pdev;
+ int step;
+ struct neighbour *neigh;
+
+ PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+ init_timer(&ep->timer);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ pr_err("%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+ insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+
+ /* find a route */
+ rt = find_route(ep->com.dev,
+ ep->com.cm_id->local_addr.sin_addr.s_addr,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->com.cm_id->local_addr.sin_port,
+ ep->com.cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ pr_err("%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->dst;
+
+ neigh = dst_neigh_lookup(ep->dst,
+ &ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ /* get a l2t entry */
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(neigh->dev);
+ ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
+ 0x7F) << 1;
+ }
+
+ step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = pi->port_id * step;
+ ep->ctrlq_idx = pi->port_id;
+ step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
+
+ if (!ep->l2t) {
+ pr_err("%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ /*
+ * remember to send notification to upper layer.
+ * We are in here so the upper layer is not aware that this is
+ * re-connect attempt and so, upper layer is still waiting for
+ * response of 1st connect request.
+ */
+ connect_reply_upcall(ep, -ECONNRESET);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
@@ -1413,6 +1643,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+ set_bit(ACT_OPEN_RPL, &ep->com.history);
+
/*
* Log interesting failures.
*/
@@ -1420,6 +1652,29 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_RESET:
case CPL_ERR_CONN_TIMEDOUT:
break;
+ case CPL_ERR_TCAM_FULL:
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.tcam_full++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ send_fw_act_open_req(ep,
+ GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status))));
+ return 0;
+ }
+ break;
+ case CPL_ERR_CONN_EXIST:
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
+ atid);
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_reconnect(ep);
+ return 0;
+ }
+ break;
default:
printk(KERN_INFO MOD "Active open failure - "
"atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
@@ -1437,6 +1692,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (status && act_open_has_tid(status))
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -1453,13 +1709,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
if (!ep) {
- printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
- return 0;
+ PDBG("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+out:
return 0;
}
@@ -1511,14 +1768,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
skb_get(skb);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
TX_CHAN(ep->tx_chan) |
SMAC_SEL(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP(ep->tos >> 2) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
@@ -1530,6 +1788,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (enable_ecn) {
+ const struct tcphdr *tcph;
+ u32 hlen = ntohl(req->hdr_len);
+
+ tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
+ G_IP_HDR_LEN(hlen);
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN(1);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
@@ -1646,22 +1913,30 @@ out:
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *child_ep, *parent_ep;
+ struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
struct rtable *rt;
- __be32 local_ip, peer_ip;
+ __be32 local_ip, peer_ip = 0;
__be16 local_port, peer_port;
int err;
+ u16 peer_mss = ntohs(req->tcpopt.mss);
parent_ep = lookup_stid(t, stid);
- PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
+ if (!parent_ep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+ PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
+ "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
+ ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+
if (state_read(&parent_ep->com) != LISTEN) {
printk(KERN_ERR "%s - listening ep not in LISTEN\n",
__func__);
@@ -1695,6 +1970,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ if (peer_mss && child_ep->mtu > (peer_mss + 40))
+ child_ep->mtu = peer_mss + 40;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1716,6 +1994,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
accept_cr(child_ep, peer_ip, skb, req);
+ set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
reject:
reject_cr(dev, hwtid, peer_ip, skb);
@@ -1735,12 +2014,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ ntohs(req->tcp_opt));
+
set_emss(ep, ntohs(req->tcp_opt));
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
dst_confirm(ep->dst);
state_set(&ep->com, MPA_REQ_WAIT);
start_ep_timer(ep);
send_flowc(ep, skb);
+ set_bit(PASS_ESTAB, &ep->com.history);
return 0;
}
@@ -1760,6 +2044,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
+ set_bit(PEER_CLOSE, &ep->com.history);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
@@ -1839,74 +2124,6 @@ static int is_neg_adv_abort(unsigned int status)
status == CPL_ERR_PERSIST_NEG_ADVICE;
}
-static int c4iw_reconnect(struct c4iw_ep *ep)
-{
- struct rtable *rt;
- int err = 0;
-
- PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
- if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(ep->com.dev,
- ep->com.cm_id->local_addr.sin_addr.s_addr,
- ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->com.cm_id->local_addr.sin_port,
- ep->com.cm_id->remote_addr.sin_port, 0);
- if (!rt) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
-
- err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->dst, ep->com.dev, false);
- if (err) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail4;
- }
-
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
-
- state_set(&ep->com, CONNECTING);
- ep->tos = 0;
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- cxgb4_l2t_release(ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail2:
- /*
- * remember to send notification to upper layer.
- * We are in here so the upper layer is not aware that this is
- * re-connect attempt and so, upper layer is still waiting for
- * response of 1st connect request.
- */
- connect_reply_upcall(ep, -ECONNRESET);
- c4iw_put_ep(&ep->com);
-out:
- return err;
-}
-
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -1927,6 +2144,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(PEER_ABORT, &ep->com.history);
/*
* Wake up any threads in rdma_init() or rdma_fini().
@@ -2141,6 +2359,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
+ set_bit(ULP_REJECT, &ep->com.history);
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2170,6 +2389,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
+ set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2293,6 +2513,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto fail2;
}
+ insert_handle(dev, &dev->atid_idr, ep, ep->atid);
PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
ntohl(cm_id->local_addr.sin_addr.s_addr),
@@ -2338,6 +2559,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail4:
dst_release(ep->dst);
fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
cm_id->rem_ref(cm_id);
@@ -2352,7 +2574,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
-
might_sleep();
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
@@ -2371,30 +2592,54 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (dev->rdev.lldi.enable_fw_ofld_conn)
+ ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
+ else
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
err = -ENOMEM;
goto fail2;
}
-
+ insert_handle(dev, &dev->stid_idr, ep, ep->stid);
state_set(&ep->com, LISTEN);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.local_addr.sin_addr.s_addr,
- ep->com.local_addr.sin_port,
- ep->com.dev->rdev.lldi.rxq_ids[0]);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ do {
+ err = cxgb4_create_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ 0,
+ 0);
+ if (err == -EBUSY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(100));
+ }
+ } while (err == -EBUSY);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
+ ep->stid, ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (!err)
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+ &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
if (!err) {
cm_id->provider_data = ep;
goto out;
}
-fail3:
+ pr_err("%s cxgb4_create_server/filter failed err %d " \
+ "stid %d laddr %08x lport %d\n", \
+ __func__, err, ep->stid,
+ ntohl(ep->com.local_addr.sin_addr.s_addr),
+ ntohs(ep->com.local_addr.sin_port));
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
fail2:
cm_id->rem_ref(cm_id);
@@ -2413,12 +2658,19 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = listen_stop(ep);
- if (err)
- goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
+ err = cxgb4_remove_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
cm_id->rem_ref(cm_id);
@@ -2482,10 +2734,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
+ set_bit(EP_DISC_ABORT, &ep->com.history);
close_complete_upcall(ep);
ret = send_abort(ep, NULL, gfp);
- } else
+ } else {
+ set_bit(EP_DISC_CLOSE, &ep->com.history);
ret = send_halfclose(ep, gfp);
+ }
if (ret)
fatal = 1;
}
@@ -2495,10 +2750,323 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
-static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct c4iw_ep *ep;
+ int atid = be32_to_cpu(req->tid);
+
+ ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
+ if (!ep)
+ return;
+
+ switch (req->retval) {
+ case FW_ENOMEM:
+ set_bit(ACT_RETRY_NOMEM, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ case FW_EADDRINUSE:
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ break;
+ default:
+ pr_info("%s unexpected ofld conn wr retval %d\n",
+ __func__, req->retval);
+ break;
+ }
+ pr_err("active ofld_connect_wr failure %d atid %d\n",
+ req->retval, atid);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.act_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ connect_reply_upcall(ep, status2errno(req->retval));
+ state_set(&ep->com, DEAD);
+ remove_handle(dev, &dev->atid_idr, atid);
+ cxgb4_free_atid(dev->rdev.lldi.tids, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+}
+
+static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct sk_buff *rpl_skb;
+ struct cpl_pass_accept_req *cpl;
+ int ret;
+
+ rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
+ BUG_ON(!rpl_skb);
+ if (req->retval) {
+ PDBG("%s passive open failure %d\n", __func__, req->retval);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.pas_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ kfree_skb(rpl_skb);
+ } else {
+ cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
+ htonl(req->tid)));
+ ret = pass_accept_req(dev, rpl_skb);
+ if (!ret)
+ kfree_skb(rpl_skb);
+ }
+ return;
+}
+
+static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
+
+ switch (rpl->type) {
+ case FW6_TYPE_CQE:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
+ req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
+ switch (req->t_state) {
+ case TCP_SYN_SENT:
+ active_ofld_conn_reply(dev, skb, req);
+ break;
+ case TCP_SYN_RECV:
+ passive_ofld_conn_reply(dev, skb, req);
+ break;
+ default:
+ pr_err("%s unexpected ofld conn wr state %d\n",
+ __func__, req->t_state);
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
+{
+ u32 l2info;
+ u16 vlantag, len, hdr_len;
+ u8 intf;
+ struct cpl_rx_pkt *cpl = cplhdr(skb);
+ struct cpl_pass_accept_req *req;
+ struct tcp_options_received tmp_opt;
+
+ /* Store values from cpl_rx_pkt in temporary location. */
+ vlantag = cpl->vlan;
+ len = cpl->len;
+ l2info = cpl->l2info;
+ hdr_len = cpl->hdr_len;
+ intf = cpl->iff;
+
+ __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
+
+ /*
+ * We need to parse the TCP options from SYN packet.
+ * to generate cpl_pass_accept_req.
+ */
+ memset(&tmp_opt, 0, sizeof(tmp_opt));
+ tcp_clear_options(&tmp_opt);
+ tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
+
+ req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
+ V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
+ F_SYN_XACT_MATCH);
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
+ V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
+ V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
+ req->vlan = vlantag;
+ req->len = len;
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
+ PASS_OPEN_TOS(tos));
+ req->tcpopt.mss = htons(tmp_opt.mss_clamp);
+ if (tmp_opt.wscale_ok)
+ req->tcpopt.wsf = tmp_opt.snd_wscale;
+ req->tcpopt.tstamp = tmp_opt.saw_tstamp;
+ if (tmp_opt.sack_ok)
+ req->tcpopt.sack = 1;
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
+ return;
+}
+
+static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+ __be32 laddr, __be16 lport,
+ __be32 raddr, __be16 rport,
+ u32 rcv_isn, u32 filter, u16 window,
+ u32 rss_qid, u8 port_id)
+{
+ struct sk_buff *req_skb;
+ struct fw_ofld_connection_wr *req;
+ struct cpl_pass_accept_req *cpl = cplhdr(skb);
+
+ req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
+ req->le.filter = filter;
+ req->le.lport = lport;
+ req->le.pport = rport;
+ req->le.u.ipv4.lip = laddr;
+ req->le.u.ipv4.pip = raddr;
+ req->tcb.rcv_nxt = htonl(rcv_isn + 1);
+ req->tcb.rcv_adv = htons(window);
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
+ V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(
+ GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+
+ /*
+ * We store the qid in opt2 which will be used by the firmware
+ * to send us the wr response.
+ */
+ req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+
+ /*
+ * We initialize the MSS index in TCB to 0xF.
+ * So that when driver sends cpl_pass_accept_rpl
+ * TCB picks up the correct value. If this was 0
+ * TP will ignore any value > 0 for MSS index.
+ */
+ req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->cookie = cpu_to_be64((u64)skb);
+
+ set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
+ cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
+}
+
+/*
+ * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
+ * messages when a filter is being used instead of server to
+ * redirect a syn packet. When packets hit filter they are redirected
+ * to the offload queue and driver tries to establish the connection
+ * using firmware work request.
+ */
+static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ int stid;
+ unsigned int filter;
+ struct ethhdr *eh = NULL;
+ struct vlan_ethhdr *vlan_eh = NULL;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct rss_header *rss = (void *)skb->data;
+ struct cpl_rx_pkt *cpl = (void *)skb->data;
+ struct cpl_pass_accept_req *req = (void *)(rss + 1);
+ struct l2t_entry *e;
+ struct dst_entry *dst;
+ struct rtable *rt;
+ struct c4iw_ep *lep;
+ u16 window;
+ struct port_info *pi;
+ struct net_device *pdev;
+ u16 rss_qid;
+ int step;
+ u32 tx_chan;
+ struct neighbour *neigh;
+
+ /* Drop all non-SYN packets */
+ if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ goto reject;
+
+ /*
+ * Drop all packets which did not hit the filter.
+ * Unlikely to happen.
+ */
+ if (!(rss->filter_hit && rss->filter_tid))
+ goto reject;
+
+ /*
+ * Calculate the server tid from filter hit index from cpl_rx_pkt.
+ */
+ stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
+ + dev->rdev.lldi.tids->nstids;
+
+ lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
+ if (!lep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
+
+ if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ skb->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ if (iph->version != 0x4)
+ goto reject;
+
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)rss);
+ skb_set_transport_header(skb, (void *)tcph - (void *)rss);
+ skb_get(skb);
+
+ PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
+ ntohs(tcph->source), iph->tos);
+
+ rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
+ iph->tos);
+ if (!rt) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->dst;
+ neigh = dst_neigh_lookup_skb(dst, skb);
+
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, iph->daddr);
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ tx_chan = cxgb4_port_chan(pdev);
+ dev_put(pdev);
+ } else {
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ tx_chan = cxgb4_port_chan(neigh->dev);
+ }
+ if (!e) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ goto free_dst;
+ }
+
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
+ window = htons(tcph->window);
+
+ /* Calcuate filter portion for LE region. */
+ filter = cpu_to_be32(select_ntuple(dev, dst, e));
+
+ /*
+ * Synthesize the cpl_pass_accept_req. We have everything except the
+ * TID. Once firmware sends a reply with TID we update the TID field
+ * in cpl and pass it through the regular cpl_pass_accept_req path.
+ */
+ build_cpl_pass_accept_req(skb, stid, iph->tos);
+ send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
+ tcph->source, ntohl(tcph->seq), filter, window,
+ rss_qid, pi->port_id);
+ cxgb4_l2t_release(e);
+free_dst:
+ dst_release(dst);
+reject:
return 0;
}
@@ -2521,7 +3089,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
- [CPL_FW6_MSG] = async_event
+ [CPL_FW6_MSG] = deferred_fw6_msg,
+ [CPL_RX_PKT] = rx_pkt
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2532,6 +3101,7 @@ static void process_timeout(struct c4iw_ep *ep)
mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
__state_set(&ep->com, ABORTING);
@@ -2551,9 +3121,8 @@ static void process_timeout(struct c4iw_ep *ep)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ WARN(1, "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
- WARN_ON(1);
abort = 0;
}
mutex_unlock(&ep->com.mutex);
@@ -2653,7 +3222,7 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
- case 1:
+ case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
@@ -2661,7 +3230,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
- case 2:
+ case FW6_TYPE_CQE:
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
sched(dev, skb);
break;
default:
@@ -2724,7 +3294,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
- [CPL_FW6_MSG] = fw6_msg
+ [CPL_FW6_MSG] = fw6_msg,
+ [CPL_RX_PKT] = sched
};
int __init c4iw_cm_init(void)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cb4ecd783700..ba11c76c0b5a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " DB State: %s Transitions %llu\n",
db_state_str[dev->db_state],
dev->rdev.stats.db_state_transitions);
+ seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
+ seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.act_ofld_conn_fails);
+ seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.pas_ofld_conn_fails);
return 0;
}
@@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.db_empty = 0;
dev->rdev.stats.db_drop = 0;
dev->rdev.stats.db_state_transitions = 0;
+ dev->rdev.stats.tcam_full = 0;
+ dev->rdev.stats.act_ofld_conn_fails = 0;
+ dev->rdev.stats.pas_ofld_conn_fails = 0;
mutex_unlock(&dev->rdev.stats.lock);
return count;
}
@@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
.write = stats_clear,
};
+static int dump_ep(int id, void *p, void *data)
+{
+ struct c4iw_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
+ "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
+ ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
+ ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port),
+ &ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(ep->com.remote_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int dump_listen_ep(int id, void *p, void *data)
+{
+ struct c4iw_listen_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
+ "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
+ ep->com.flags, ep->stid, ep->backlog,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int ep_release(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd = file->private_data;
+ if (!epd) {
+ pr_info("%s null qpd?\n", __func__);
+ return 0;
+ }
+ vfree(epd->buf);
+ kfree(epd);
+ return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd;
+ int ret = 0;
+ int count = 1;
+
+ epd = kmalloc(sizeof(*epd), GFP_KERNEL);
+ if (!epd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ epd->devp = inode->i_private;
+ epd->pos = 0;
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
+ spin_unlock_irq(&epd->devp->lock);
+
+ epd->bufsize = count * 160;
+ epd->buf = vmalloc(epd->bufsize);
+ if (!epd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
+ spin_unlock_irq(&epd->devp->lock);
+
+ file->private_data = epd;
+ goto out;
+err1:
+ kfree(epd);
+out:
+ return ret;
+}
+
+static const struct file_operations ep_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = ep_open,
+ .release = ep_release,
+ .read = debugfs_read,
+};
+
static int setup_debugfs(struct c4iw_dev *devp)
{
struct dentry *de;
@@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+ de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &ep_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+
return 0;
}
@@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
idr_destroy(&ctx->dev->cqidr);
idr_destroy(&ctx->dev->qpidr);
idr_destroy(&ctx->dev->mmidr);
+ idr_destroy(&ctx->dev->hwtid_idr);
+ idr_destroy(&ctx->dev->stid_idr);
+ idr_destroy(&ctx->dev->atid_idr);
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
ctx->dev = NULL;
@@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
idr_init(&devp->cqidr);
idr_init(&devp->qpidr);
idr_init(&devp->mmidr);
+ idr_init(&devp->hwtid_idr);
+ idr_init(&devp->stid_idr);
+ idr_init(&devp->atid_idr);
spin_lock_init(&devp->lock);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
@@ -577,14 +703,76 @@ out:
return ctx;
}
+static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ const __be64 *rsp,
+ u32 pktshift)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once the driver synthesizes the request the skb will go
+ * through the regular cpl_pass_accept_req processing.
+ * The math here assumes sizeof cpl_pass_accept_req >= sizeof
+ * cpl_rx_pkt.
+ */
+ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
+
+ /*
+ * This skb will contain:
+ * rss_header from the rspq descriptor (1 flit)
+ * cpl_rx_pkt struct from the rspq descriptor (2 flits)
+ * space for the difference between the size of an
+ * rx_pkt and pass_accept_req cpl (1 flit)
+ * the packet data from the gl
+ */
+ skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header));
+ skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
+ sizeof(struct cpl_pass_accept_req),
+ gl->va + pktshift,
+ gl->tot_len - pktshift);
+ return skb;
+}
+
+static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
+ const __be64 *rsp)
+{
+ unsigned int opcode = *(u8 *)rsp;
+ struct sk_buff *skb;
+
+ if (opcode != CPL_RX_PKT)
+ goto out;
+
+ skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
+ if (skb == NULL)
+ goto out;
+
+ if (c4iw_handlers[opcode] == NULL) {
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+ kfree_skb(skb);
+ goto out;
+ }
+ c4iw_handlers[opcode](dev, skb);
+ return 1;
+out:
+ return 0;
+}
+
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct uld_ctx *ctx = handle;
struct c4iw_dev *dev = ctx->dev;
struct sk_buff *skb;
- const struct cpl_act_establish *rpl;
- unsigned int opcode;
+ u8 opcode;
if (gl == NULL) {
/* omit RSS and rsp_ctrl at end of descriptor */
@@ -601,19 +789,29 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
u32 qid = be32_to_cpu(rc->pldbuflen_qid);
c4iw_ev_handler(dev, qid);
return 0;
+ } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
+ if (recv_rx_pkt(dev, gl, rsp))
+ return 0;
+
+ pr_info("%s: unexpected FL contents at %p, " \
+ "RSS %#llx, FL %#llx, len %u\n",
+ pci_name(ctx->lldi.pdev), gl->va,
+ (unsigned long long)be64_to_cpu(*rsp),
+ (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
+ gl->tot_len);
+
+ return 0;
} else {
skb = cxgb4_pktgl_to_skb(gl, 128, 128);
if (unlikely(!skb))
goto nomem;
}
- rpl = cplhdr(skb);
- opcode = rpl->ot.opcode;
-
+ opcode = *(u8 *)rsp;
if (c4iw_handlers[opcode])
c4iw_handlers[opcode](dev, skb);
else
- printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
opcode);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9beb3a9f0336..9c1644fb0259 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -130,6 +130,9 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
+ u64 tcam_full;
+ u64 act_ofld_conn_fails;
+ u64 pas_ofld_conn_fails;
};
struct c4iw_rdev {
@@ -223,6 +226,9 @@ struct c4iw_dev {
struct dentry *debugfs_root;
enum db_state db_state;
int qpcnt;
+ struct idr hwtid_idr;
+ struct idr atid_idr;
+ struct idr stid_idr;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -712,6 +718,31 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
};
+enum c4iw_ep_history {
+ ACT_OPEN_REQ = 0,
+ ACT_OFLD_CONN = 1,
+ ACT_OPEN_RPL = 2,
+ ACT_ESTAB = 3,
+ PASS_ACCEPT_REQ = 4,
+ PASS_ESTAB = 5,
+ ABORT_UPCALL = 6,
+ ESTAB_UPCALL = 7,
+ CLOSE_UPCALL = 8,
+ ULP_ACCEPT = 9,
+ ULP_REJECT = 10,
+ TIMEDOUT = 11,
+ PEER_ABORT = 12,
+ PEER_CLOSE = 13,
+ CONNREQ_UPCALL = 14,
+ ABORT_CONN = 15,
+ DISCONN_UPCALL = 16,
+ EP_DISC_CLOSE = 17,
+ EP_DISC_ABORT = 18,
+ CONN_RPL_UPCALL = 19,
+ ACT_RETRY_NOMEM = 20,
+ ACT_RETRY_INUSE = 21
+};
+
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
@@ -723,6 +754,7 @@ struct c4iw_ep_common {
struct sockaddr_in remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
+ unsigned long history;
};
struct c4iw_listen_ep {
@@ -760,6 +792,7 @@ struct c4iw_ep {
u8 tos;
u8 retry_with_mpa_v1;
u8 tried_with_mpa_v1;
+ unsigned int retry_count;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 832e7a7d0aee..f8a62918a88d 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -713,8 +713,8 @@ static struct attribute_group ehca_dev_attr_grp = {
.attrs = ehca_dev_attrs
};
-static int __devinit ehca_probe(struct platform_device *dev,
- const struct of_device_id *id)
+static int ehca_probe(struct platform_device *dev,
+ const struct of_device_id *id)
{
struct ehca_shca *shca;
const u64 *handle;
@@ -879,7 +879,7 @@ probe1:
return -EINVAL;
}
-static int __devexit ehca_remove(struct platform_device *dev)
+static int ehca_remove(struct platform_device *dev)
{
struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
unsigned long flags;
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 2d41d04fd959..89517ffb4389 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -90,26 +90,6 @@
static DEFINE_SPINLOCK(hcall_lock);
-static u32 get_longbusy_msecs(int longbusy_rc)
-{
- switch (longbusy_rc) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index bfca37b2432f..7b371f545ece 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -127,9 +127,8 @@ const char *ipath_ibcstatus_str[] = {
"LTState1C", "LTState1D", "LTState1E", "LTState1F"
};
-static void __devexit ipath_remove_one(struct pci_dev *);
-static int __devinit ipath_init_one(struct pci_dev *,
- const struct pci_device_id *);
+static void ipath_remove_one(struct pci_dev *);
+static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
@@ -148,7 +147,7 @@ MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
static struct pci_driver ipath_driver = {
.name = IPATH_DRV_NAME,
.probe = ipath_init_one,
- .remove = __devexit_p(ipath_remove_one),
+ .remove = ipath_remove_one,
.id_table = ipath_pci_tbl,
.driver = {
.groups = ipath_driver_attr_groups,
@@ -392,8 +391,7 @@ done:
static void cleanup_device(struct ipath_devdata *dd);
-static int __devinit ipath_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret, len, j;
struct ipath_devdata *dd;
@@ -737,7 +735,7 @@ static void cleanup_device(struct ipath_devdata *dd)
kfree(tmp);
}
-static void __devexit ipath_remove_one(struct pci_dev *pdev)
+static void ipath_remove_one(struct pci_dev *pdev)
{
struct ipath_devdata *dd = pci_get_drvdata(pdev);
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 49b09c697c7c..be2a60e142b0 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -719,16 +719,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
goto done;
/*
- * we ignore most issues after reporting them, but have to specially
- * handle hardware-disabled chips.
- */
- if (ret == 2) {
- /* unique error, known to ipath_init_one */
- ret = -EPERM;
- goto done;
- }
-
- /*
* We could bump this to allow for full rcvegrcnt + rcvtidcnt,
* but then it no longer nicely fits power of two, and since
* we now use routines that backend onto __get_free_pages, the
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 80079e5a2e30..dbc99d41605c 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
unsigned long flags;
- spin_lock_irqsave(&sriov->going_down_lock, flags);
spin_lock(&sriov->id_map_lock);
+ spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
- spin_unlock(&sriov->id_map_lock);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+ spin_unlock(&sriov->id_map_lock);
}
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index c9eb6a6815ce..ae67df35dd4d 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
- return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
+ return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
}
static void *get_cqe(struct mlx4_ib_cq *cq, int n)
@@ -77,8 +77,9 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+ struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
- return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}
@@ -99,12 +100,13 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
{
int err;
- err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+ err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
+ buf->entry_size = dev->dev->caps.cqe_size;
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
&buf->mtt);
if (err)
@@ -120,8 +122,7 @@ err_mtt:
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
err_buf:
- mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
- &buf->buf);
+ mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
out:
return err;
@@ -129,7 +130,7 @@ out:
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
{
- mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+ mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
@@ -137,8 +138,9 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
u64 buf_addr, int cqe)
{
int err;
+ int cqe_size = dev->dev->caps.cqe_size;
- *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+ *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -331,16 +333,23 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
{
struct mlx4_cqe *cqe, *new_cqe;
int i;
+ int cqe_size = cq->buf.entry_size;
+ int cqe_inc = cqe_size == 64 ? 1 : 0;
i = cq->mcq.cons_index;
cqe = get_cqe(cq, i & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
(i + 1) & cq->resize_buf->cqe);
- memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+ memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
+ new_cqe += cqe_inc;
+
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+ cqe += cqe_inc;
}
++cq->mcq.cons_index;
}
@@ -438,6 +447,7 @@ err_buf:
out:
mutex_unlock(&cq->resize_mutex);
+
return err;
}
@@ -586,6 +596,9 @@ repoll:
if (!cqe)
return -EAGAIN;
+ if (cq->buf.entry_size == 64)
+ cqe++;
+
++cq->mcq.cons_index;
/*
@@ -807,6 +820,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
int nfreed = 0;
struct mlx4_cqe *cqe, *dest;
u8 owner_bit;
+ int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
/*
* First we need to find the current producer index, so we
@@ -825,12 +839,16 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+ dest += cqe_inc;
+
owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
dest->owner_sr_opcode = owner_bit |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 718ec6b2bad2..e7d81c0d1ac5 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,15 +563,24 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- resp.qp_tab_size = dev->dev->caps.num_qps;
- resp.bf_reg_size = dev->dev->caps.bf_reg_size;
- resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
+ resp_v3.qp_tab_size = dev->dev->caps.num_qps;
+ resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ } else {
+ resp.dev_caps = dev->dev->caps.userspace_caps;
+ resp.qp_tab_size = dev->dev->caps.num_qps;
+ resp.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ resp.cqe_size = dev->dev->caps.cqe_size;
+ }
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
@@ -586,7 +595,11 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- err = ib_copy_to_udata(udata, &resp, sizeof resp);
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
+ err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
+ else
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
@@ -1342,7 +1355,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ if (dev->caps.userspace_caps)
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ else
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
+
ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e04cbc9a54a5..dcd845bc30f0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -90,6 +90,7 @@ struct mlx4_ib_xrcd {
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
+ int entry_size;
};
struct mlx4_ib_cq_resize {
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 13beedeeef9f..07e6769ef43b 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -40,7 +40,9 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define MLX4_IB_UVERBS_ABI_VERSION 3
+
+#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
+#define MLX4_IB_UVERBS_ABI_VERSION 4
/*
* Make sure that all structs defined in this file remain laid out so
@@ -50,10 +52,18 @@
* instead.
*/
+struct mlx4_ib_alloc_ucontext_resp_v3 {
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+};
+
struct mlx4_ib_alloc_ucontext_resp {
+ __u32 dev_caps;
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
+ __u32 cqe_size;
};
struct mlx4_ib_alloc_pd_resp {
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index aa12a533ae9e..87897b95666d 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -130,7 +130,7 @@ static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
-static char mthca_version[] __devinitdata =
+static char mthca_version[] =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -1139,8 +1139,7 @@ int __mthca_restart_one(struct pci_dev *pdev)
return __mthca_init_one(pdev, hca_type);
}
-static int __devinit mthca_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
@@ -1162,7 +1161,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
return ret;
}
-static void __devexit mthca_remove_one(struct pci_dev *pdev)
+static void mthca_remove_one(struct pci_dev *pdev)
{
mutex_lock(&mthca_device_mutex);
__mthca_remove_one(pdev);
@@ -1199,7 +1198,7 @@ static struct pci_driver mthca_driver = {
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
- .remove = __devexit_p(mthca_remove_one)
+ .remove = mthca_remove_one,
};
static void __init __mthca_check_profile_val(const char *name, int *pval,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 748db2d3e465..429141078eec 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -135,6 +135,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
struct net_device *event_netdev = ifa->ifa_dev->dev;
struct nes_device *nesdev;
struct net_device *netdev;
+ struct net_device *upper_dev;
struct nes_vnic *nesvnic;
unsigned int is_bonded;
@@ -145,8 +146,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
+ upper_dev = netdev_master_upper_dev_get(netdev);
is_bonded = netif_is_bond_slave(netdev) &&
- (netdev->master == event_netdev);
+ (upper_dev == event_netdev);
if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
@@ -179,9 +181,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
/* fall through */
case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
- if (netdev->master)
+ if (upper_dev)
nesvnic->local_ipaddr =
- ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
else
nesvnic->local_ipaddr = ifa->ifa_address;
@@ -444,7 +446,7 @@ static irqreturn_t nes_interrupt(int irq, void *dev_id)
/**
* nes_probe - Device initialization
*/
-static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
+static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct nes_device *nesdev = NULL;
@@ -749,7 +751,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
/**
* nes_remove - unload from kernel
*/
-static void __devexit nes_remove(struct pci_dev *pcidev)
+static void nes_remove(struct pci_dev *pcidev)
{
struct nes_device *nesdev = pci_get_drvdata(pcidev);
struct net_device *netdev;
@@ -810,7 +812,7 @@ static struct pci_driver nes_pci_driver = {
.name = DRV_NAME,
.id_table = nes_pci_table,
.probe = nes_probe,
- .remove = __devexit_p(nes_remove),
+ .remove = nes_remove,
};
static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 5cac29e6bc1c..33cc58941a3e 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -532,6 +532,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
void nes_recheck_link_status(struct work_struct *work);
+void nes_terminate_timeout(unsigned long context);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index cfaacaf6bf5f..24b9f1a0107b 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -629,11 +629,9 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a
case SEND_RDMA_READ_ZERO:
default:
- if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
- printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
- __func__, __LINE__, cm_node->send_rdma0_op);
- WARN_ON(1);
- }
+ if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
+ WARN(1, "Unsupported RDMA0 len operation=%u\n",
+ cm_node->send_rdma0_op);
nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
@@ -671,7 +669,6 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
struct nes_cm_core *cm_core = cm_node->cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
- u32 was_timer_set;
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send)
@@ -723,12 +720,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
}
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
-
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = new_send->timetosend;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, new_send->timetosend);
return ret;
}
@@ -946,10 +939,8 @@ static void nes_cm_timer_tick(unsigned long pass)
}
if (settimer) {
- if (!timer_pending(&cm_core->tcp_timer)) {
- cm_core->tcp_timer.expires = nexttimeout;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, nexttimeout);
}
}
@@ -1314,8 +1305,6 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- u32 was_timer_set;
-
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
@@ -1325,11 +1314,8 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, (jiffies + NES_SHORT_TIME));
return 0;
}
@@ -1354,7 +1340,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
}
if (netif_is_bond_slave(nesvnic->netdev))
- netdev = nesvnic->netdev->master;
+ netdev = netdev_master_upper_dev_get(nesvnic->netdev);
else
netdev = nesvnic->netdev;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index fe7965ee4096..67647e264611 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
-static void nes_terminate_timeout(unsigned long context);
static void nes_terminate_start_timer(struct nes_qp *nesqp);
#ifdef CONFIG_INFINIBAND_NES_DEBUG
@@ -3520,7 +3519,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
}
/* Timeout routine in case terminate fails to complete */
-static void nes_terminate_timeout(unsigned long context)
+void nes_terminate_timeout(unsigned long context)
{
struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
@@ -3530,11 +3529,7 @@ static void nes_terminate_timeout(unsigned long context)
/* Set a timer in case hw cannot complete the terminate sequence */
static void nes_terminate_start_timer(struct nes_qp *nesqp)
{
- init_timer(&nesqp->terminate_timer);
- nesqp->terminate_timer.function = nes_terminate_timeout;
- nesqp->terminate_timer.expires = jiffies + HZ;
- nesqp->terminate_timer.data = (unsigned long)nesqp;
- add_timer(&nesqp->terminate_timer);
+ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
}
/**
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 3ba7be369452..416645259b0f 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -210,6 +210,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
}
while (1) {
+ if (skb_queue_empty(&nesqp->pau_list))
+ goto out;
+
seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
if (seq == nextseq) {
if (skb->len || processacks)
@@ -218,14 +221,13 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
goto out;
}
- if (skb->next == (struct sk_buff *)&nesqp->pau_list)
- goto out;
-
old_skb = skb;
skb = skb->next;
skb_unlink(old_skb, &nesqp->pau_list);
nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
nes_rem_ref_cm_node(nesqp->cm_node);
+ if (skb == (struct sk_buff *)&nesqp->pau_list)
+ goto out;
}
return skb;
@@ -245,7 +247,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
struct nes_rskb_cb *cb;
struct pau_fpdu_info *fpdu_info = NULL;
struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
- unsigned long flags;
u32 fpdu_len = 0;
u32 tmp_len;
int frag_cnt = 0;
@@ -260,12 +261,10 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
*pau_fpdu_info = NULL;
- spin_lock_irqsave(&nesqp->pau_lock, flags);
skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- }
+
cb = (struct nes_rskb_cb *)&skb->cb[0];
if (skb->len) {
fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
@@ -290,10 +289,9 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
skb = nes_get_next_skb(nesdev, nesqp, skb,
nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- } else if (rst_rcvd) {
+ if (rst_rcvd) {
/* rst received in the middle of fpdu */
for (; i >= 0; i--) {
skb_unlink(frags[i].skb, &nesqp->pau_list);
@@ -320,8 +318,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
frag_cnt = 1;
}
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
-
/* Found one */
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
if (fpdu_info == NULL) {
@@ -383,9 +379,8 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
if (frags[i].skb->len == 0) {
/* Pull skb off the list - it will be freed in the callback */
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- skb_unlink(frags[i].skb, &nesqp->pau_list);
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb_queue_empty(&nesqp->pau_list))
+ skb_unlink(frags[i].skb, &nesqp->pau_list);
} else {
/* Last skb still has data so update the seq */
iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
@@ -414,14 +409,18 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
struct pau_fpdu_info *fpdu_info;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
+ unsigned long flags;
u64 u64tmp;
u32 u32tmp;
int rc;
while (1) {
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
- if (fpdu_info == NULL)
+ if (rc || (fpdu_info == NULL)) {
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
return rc;
+ }
cqp_request = fpdu_info->cqp_request;
cqp_wqe = &cqp_request->cqp_wqe;
@@ -447,7 +446,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
lower_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
- upper_32_bits(u64tmp >> 32));
+ upper_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
lower_32_bits(fpdu_info->frags[0].physaddr));
@@ -475,6 +474,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
atomic_set(&cqp_request->refcount, 1);
nes_post_cqp_request(nesdev, cqp_request);
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
}
return 0;
@@ -649,11 +649,9 @@ static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request
nesqp = qh_chg->nesqp;
/* Should we handle the bad completion */
- if (cqp_request->major_code) {
- printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
+ if (cqp_request->major_code)
+ WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
cqp_request->major_code);
- WARN_ON(1);
- }
switch (nesqp->pau_state) {
case PAU_DEL_QH:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 0564be757d82..85cf4d1ac442 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -944,12 +944,13 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
- macaddr_high = ((u16) addr[0]) << 8;
- macaddr_high += (u16) addr[1];
- macaddr_low = ((u32) addr[2]) << 24;
- macaddr_low += ((u32) addr[3]) << 16;
- macaddr_low += ((u32) addr[4]) << 8;
- macaddr_low += (u32) addr[5];
+ macaddr_high = ((u8) addr[0]) << 8;
+ macaddr_high += (u8) addr[1];
+ macaddr_low = ((u8) addr[2]) << 24;
+ macaddr_low += ((u8) addr[3]) << 16;
+ macaddr_low += ((u8) addr[4]) << 8;
+ macaddr_low += (u8) addr[5];
+
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
@@ -1316,11 +1317,13 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
- sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
- nesadapter->firmware_version & 0x000000ff);
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", nesadapter->firmware_version >> 16,
+ nesadapter->firmware_version & 0x000000ff);
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = 0;
@@ -1702,7 +1705,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->dev_addr[3] = (u8)(u64temp>>16);
netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp;
- memcpy(netdev->perm_addr, netdev->dev_addr, 6);
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index cd0ecb215cca..07e4fbad987a 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
+ init_timer(&nesqp->terminate_timer);
+ nesqp->terminate_timer.function = nes_terminate_timeout;
+ nesqp->terminate_timer.data = (unsigned long)nesqp;
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return &nesqp->ibqp;
}
-
/**
* nes_clean_cq
*/
@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ibmr;
case IWNES_MEMREG_TYPE_QP:
case IWNES_MEMREG_TYPE_CQ:
+ if (!region->length) {
+ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
+ ib_umem_release(region);
+ return ERR_PTR(-EINVAL);
+ }
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 4443adfcd9ee..ddf066d9abb6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1134,9 +1134,8 @@ void qib_disable_after_error(struct qib_devdata *dd)
*dd->devstatusp |= QIB_STATUS_HWERROR;
}
-static void __devexit qib_remove_one(struct pci_dev *);
-static int __devinit qib_init_one(struct pci_dev *,
- const struct pci_device_id *);
+static void qib_remove_one(struct pci_dev *);
+static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
@@ -1153,7 +1152,7 @@ MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
- .remove = __devexit_p(qib_remove_one),
+ .remove = qib_remove_one,
.id_table = qib_pci_tbl,
.err_handler = &qib_pci_err_handler,
};
@@ -1342,8 +1341,7 @@ static void qib_postinit_cleanup(struct qib_devdata *dd)
qib_free_devdata(dd);
}
-static int __devinit qib_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret, j, pidx, initfail;
struct qib_devdata *dd = NULL;
@@ -1448,7 +1446,7 @@ bail:
return ret;
}
-static void __devexit qib_remove_one(struct pci_dev *pdev)
+static void qib_remove_one(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
int ret;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4850d03870c2..35275099cafd 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n];
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- for (; q; qpp = &q->next) {
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock))) != NULL;
+ qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
*qpp = qp->next;
rcu_assign_pointer(qp->next, NULL);
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
break;
}
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- }
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 72ae63f0072d..67b0c1d23678 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
tx_req->mapping = addr;
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 29bc7b5724ac..ca131335417b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -39,7 +39,7 @@
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
+ strlcpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10221f40803..2cfa76f5d99e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
netif_stop_queue(dev);
}
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen);
if (unlikely(rc)) {
@@ -615,8 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head;
++priv->tx_head;
- skb_orphan(skb);
-
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 922d845f76b0..d5088ce78290 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -222,27 +222,29 @@ static int srp_new_cm_id(struct srp_target_port *target)
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
+ struct ib_cq *recv_cq, *send_cq;
+ struct ib_qp *qp;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
- target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
- if (IS_ERR(target->recv_cq)) {
- ret = PTR_ERR(target->recv_cq);
+ recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(recv_cq)) {
+ ret = PTR_ERR(recv_cq);
goto err;
}
- target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
- if (IS_ERR(target->send_cq)) {
- ret = PTR_ERR(target->send_cq);
+ send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(send_cq)) {
+ ret = PTR_ERR(send_cq);
goto err_recv_cq;
}
- ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -251,30 +253,41 @@ static int srp_create_target_ib(struct srp_target_port *target)
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->send_cq;
- init_attr->recv_cq = target->recv_cq;
+ init_attr->send_cq = send_cq;
+ init_attr->recv_cq = recv_cq;
- target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
- if (IS_ERR(target->qp)) {
- ret = PTR_ERR(target->qp);
+ qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto err_send_cq;
}
- ret = srp_init_qp(target, target->qp);
+ ret = srp_init_qp(target, qp);
if (ret)
goto err_qp;
+ if (target->qp)
+ ib_destroy_qp(target->qp);
+ if (target->recv_cq)
+ ib_destroy_cq(target->recv_cq);
+ if (target->send_cq)
+ ib_destroy_cq(target->send_cq);
+
+ target->qp = qp;
+ target->recv_cq = recv_cq;
+ target->send_cq = send_cq;
+
kfree(init_attr);
return 0;
err_qp:
- ib_destroy_qp(target->qp);
+ ib_destroy_qp(qp);
err_send_cq:
- ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(send_cq);
err_recv_cq:
- ib_destroy_cq(target->recv_cq);
+ ib_destroy_cq(recv_cq);
err:
kfree(init_attr);
@@ -289,6 +302,9 @@ static void srp_free_target_ib(struct srp_target_port *target)
ib_destroy_cq(target->send_cq);
ib_destroy_cq(target->recv_cq);
+ target->qp = NULL;
+ target->send_cq = target->recv_cq = NULL;
+
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
for (i = 0; i < SRP_SQ_SIZE; ++i)
@@ -428,34 +444,50 @@ static int srp_send_req(struct srp_target_port *target)
return status;
}
-static void srp_disconnect_target(struct srp_target_port *target)
+static bool srp_queue_remove_work(struct srp_target_port *target)
{
- /* XXX should send SRP_I_LOGOUT request */
+ bool changed = false;
- init_completion(&target->done);
- if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
- shost_printk(KERN_DEBUG, target->scsi_host,
- PFX "Sending CM DREQ failed\n");
- return;
+ spin_lock_irq(&target->lock);
+ if (target->state != SRP_TARGET_REMOVED) {
+ target->state = SRP_TARGET_REMOVED;
+ changed = true;
}
- wait_for_completion(&target->done);
+ spin_unlock_irq(&target->lock);
+
+ if (changed)
+ queue_work(system_long_wq, &target->remove_work);
+
+ return changed;
}
-static bool srp_change_state(struct srp_target_port *target,
- enum srp_target_state old,
- enum srp_target_state new)
+static bool srp_change_conn_state(struct srp_target_port *target,
+ bool connected)
{
bool changed = false;
spin_lock_irq(&target->lock);
- if (target->state == old) {
- target->state = new;
+ if (target->connected != connected) {
+ target->connected = connected;
changed = true;
}
spin_unlock_irq(&target->lock);
+
return changed;
}
+static void srp_disconnect_target(struct srp_target_port *target)
+{
+ if (srp_change_conn_state(target, false)) {
+ /* XXX should send SRP_I_LOGOUT request */
+
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM DREQ failed\n");
+ }
+ }
+}
+
static void srp_free_req_data(struct srp_target_port *target)
{
struct ib_device *ibdev = target->srp_host->srp_dev->dev;
@@ -489,32 +521,50 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
device_remove_file(&shost->shost_dev, *attr);
}
-static void srp_remove_work(struct work_struct *work)
+static void srp_remove_target(struct srp_target_port *target)
{
- struct srp_target_port *target =
- container_of(work, struct srp_target_port, work);
-
- if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
- return;
-
- spin_lock(&target->srp_host->target_lock);
- list_del(&target->list);
- spin_unlock(&target->srp_host->target_lock);
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
+ srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
+static void srp_remove_work(struct work_struct *work)
+{
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, remove_work);
+
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
+
+ srp_remove_target(target);
+}
+
+static void srp_rport_delete(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+
+ srp_queue_remove_work(target);
+}
+
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
+ WARN_ON_ONCE(target->connected);
+
+ target->qp_in_error = false;
+
ret = srp_lookup_path(target);
if (ret)
return ret;
@@ -534,6 +584,7 @@ static int srp_connect_target(struct srp_target_port *target)
*/
switch (target->status) {
case 0:
+ srp_change_conn_state(target, true);
return 0;
case SRP_PORT_REDIRECT:
@@ -646,13 +697,14 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
- struct ib_qp_attr qp_attr;
- struct ib_wc wc;
+ struct Scsi_Host *shost = target->scsi_host;
int i, ret;
- if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
+ if (target->state != SRP_TARGET_LIVE)
return -EAGAIN;
+ scsi_target_block(&shost->shost_gendev);
+
srp_disconnect_target(target);
/*
* Now get a new local CM ID so that we avoid confusing the
@@ -660,21 +712,11 @@ static int srp_reconnect_target(struct srp_target_port *target)
*/
ret = srp_new_cm_id(target);
if (ret)
- goto err;
+ goto unblock;
- qp_attr.qp_state = IB_QPS_RESET;
- ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
- if (ret)
- goto err;
-
- ret = srp_init_qp(target, target->qp);
+ ret = srp_create_target_ib(target);
if (ret)
- goto err;
-
- while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
- ; /* nothing */
- while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
- ; /* nothing */
+ goto unblock;
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
@@ -686,13 +728,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
- target->qp_in_error = 0;
ret = srp_connect_target(target);
+
+unblock:
+ scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
+ SDEV_TRANSPORT_OFFLINE);
+
if (ret)
goto err;
- if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
- ret = -EAGAIN;
+ shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
return ret;
@@ -705,17 +750,8 @@ err:
* However, we have to defer the real removal because we
* are in the context of the SCSI error handler now, which
* will deadlock if we call scsi_remove_host().
- *
- * Schedule our work inside the lock to avoid a race with
- * the flush_scheduled_work() in srp_remove_one().
*/
- spin_lock_irq(&target->lock);
- if (target->state == SRP_TARGET_CONNECTING) {
- target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work);
- queue_work(ib_wq, &target->work);
- }
- spin_unlock_irq(&target->lock);
+ srp_queue_remove_work(target);
return ret;
}
@@ -1262,6 +1298,19 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
+static void srp_handle_qp_err(enum ib_wc_status wc_status,
+ enum ib_wc_opcode wc_opcode,
+ struct srp_target_port *target)
+{
+ if (target->connected && !target->qp_in_error) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %d\n",
+ wc_opcode & IB_WC_RECV ? "receive" : "send",
+ wc_status);
+ }
+ target->qp_in_error = true;
+}
+
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
@@ -1269,15 +1318,11 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed receive status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ srp_handle_recv(target, &wc);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- srp_handle_recv(target, &wc);
}
}
@@ -1288,16 +1333,12 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed send status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
- list_add(&iu->list, &target->free_tx);
}
}
@@ -1311,16 +1352,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags;
int len;
- if (target->state == SRP_TARGET_CONNECTING)
- goto err;
-
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED) {
- scmnd->result = DID_BAD_TARGET << 16;
- scmnd->scsi_done(scmnd);
- return 0;
- }
-
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
@@ -1377,7 +1408,6 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
-err:
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1419,6 +1449,33 @@ err:
return -ENOMEM;
}
+static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
+{
+ uint64_t T_tr_ns, max_compl_time_ms;
+ uint32_t rq_tmo_jiffies;
+
+ /*
+ * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
+ * table 91), both the QP timeout and the retry count have to be set
+ * for RC QP's during the RTR to RTS transition.
+ */
+ WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
+ (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
+
+ /*
+ * Set target->rq_tmo_jiffies to one second more than the largest time
+ * it can take before an error completion is generated. See also
+ * C9-140..142 in the IBTA spec for more information about how to
+ * convert the QP Local ACK Timeout value to nanoseconds.
+ */
+ T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
+ max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
+ do_div(max_compl_time_ms, NSEC_PER_MSEC);
+ rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
+
+ return rq_tmo_jiffies;
+}
+
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_login_rsp *lrsp,
struct srp_target_port *target)
@@ -1478,6 +1535,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
@@ -1599,6 +1658,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
+ srp_change_conn_state(target, false);
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
@@ -1608,7 +1668,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
- comp = 1;
target->status = 0;
break;
@@ -1636,10 +1695,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED)
- return -1;
-
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
@@ -1729,6 +1784,21 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return ret;
}
+static int srp_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct request_queue *q = sdev->request_queue;
+ unsigned long timeout;
+
+ if (sdev->type == TYPE_DISK) {
+ timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
+ blk_queue_rq_timeout(q, timeout);
+ }
+
+ return 0;
+}
+
static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1861,6 +1931,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.eh_abort_handler = srp_abort,
@@ -1894,11 +1965,14 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
return PTR_ERR(rport);
}
+ rport->lld_data = target;
+
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
+ target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
@@ -2188,6 +2262,7 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
+ INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
@@ -2232,7 +2307,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_free_ib;
- target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret) {
shost_printk(KERN_ERR, target->scsi_host,
@@ -2422,8 +2496,7 @@ static void srp_remove_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
- LIST_HEAD(target_list);
- struct srp_target_port *target, *tmp_target;
+ struct srp_target_port *target;
srp_dev = ib_get_client_data(device, &srp_client);
@@ -2436,35 +2509,17 @@ static void srp_remove_one(struct ib_device *device)
wait_for_completion(&host->released);
/*
- * Mark all target ports as removed, so we stop queueing
- * commands and don't try to reconnect.
+ * Remove all target ports.
*/
spin_lock(&host->target_lock);
- list_for_each_entry(target, &host->target_list, list) {
- spin_lock_irq(&target->lock);
- target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(&target->lock);
- }
+ list_for_each_entry(target, &host->target_list, list)
+ srp_queue_remove_work(target);
spin_unlock(&host->target_lock);
/*
- * Wait for any reconnection tasks that may have
- * started before we marked our target ports as
- * removed, and any target port removal tasks.
+ * Wait for target port removal tasks.
*/
- flush_workqueue(ib_wq);
-
- list_for_each_entry_safe(target, tmp_target,
- &host->target_list, list) {
- srp_del_scsi_host_attr(target->scsi_host);
- srp_remove_host(target->scsi_host);
- scsi_remove_host(target->scsi_host);
- srp_disconnect_target(target);
- ib_destroy_cm_id(target->cm_id);
- srp_free_target_ib(target);
- srp_free_req_data(target);
- scsi_host_put(target->scsi_host);
- }
+ flush_workqueue(system_long_wq);
kfree(host);
}
@@ -2478,6 +2533,7 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
+ .rport_delete = srp_rport_delete,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 020caf0c3789..de2d0b3c0bfe 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -80,9 +80,7 @@ enum {
enum srp_target_state {
SRP_TARGET_LIVE,
- SRP_TARGET_CONNECTING,
- SRP_TARGET_DEAD,
- SRP_TARGET_REMOVED
+ SRP_TARGET_REMOVED,
};
enum srp_iu_type {
@@ -163,6 +161,9 @@ struct srp_target_port {
struct ib_sa_query *path_query;
int path_query_id;
+ u32 rq_tmo_jiffies;
+ bool connected;
+
struct ib_cm_id *cm_id;
int max_ti_iu_len;
@@ -173,12 +174,12 @@ struct srp_target_port {
struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
- struct work_struct work;
+ struct work_struct remove_work;
struct list_head list;
struct completion done;
int status;
- int qp_in_error;
+ bool qp_in_error;
struct completion tsk_mgmt_done;
u8 tsk_mgmt_status;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index cf23c46185b2..c09d41b1a2ff 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0;
@@ -1291,39 +1290,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_put_send_ioctx() - Free up resources.
- */
-static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
-
- BUG_ON(!ioctx);
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- transport_generic_free_cmd(&ioctx->cmd, 0);
-
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
- }
-
- spin_lock_irqsave(&ch->spinlock, flags);
- list_add(&ioctx->free_list, &ch->free_list);
- spin_unlock_irqrestore(&ch->spinlock, flags);
-}
-
-static void srpt_put_send_ioctx_kref(struct kref *kref)
-{
- srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
-}
-
-/**
* srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command.
* @context: Preferred execution context.
@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (state == SRPT_STATE_DONE)
+ if (state == SRPT_STATE_DONE) {
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ BUG_ON(ch->sess == NULL);
+
+ target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out;
+ }
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag);
@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
default:
WARN_ON("ERROR: unexpected command state");
@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state);
- if (state != SRPT_STATE_DONE)
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- else
+ if (state != SRPT_STATE_DONE) {
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ } else {
printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
+ }
}
/**
@@ -1712,10 +1686,10 @@ out_err:
static int srpt_check_stop_free(struct se_cmd *cmd)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
/**
@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
- int ret;
+ sense_reason_t ret;
+ int rc;
BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf;
- kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag;
@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
- ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
- if (ret) {
+ if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
}
- cmd->data_length = data_len;
- cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun));
- if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+ &send_ioctx->sense_data[0], unpacked_lun, data_len,
+ MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
}
- ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
- if (ret < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
- if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
- srpt_queue_status(cmd);
- return 0;
- } else
- goto send_sense;
- }
-
- transport_handle_cdb_direct(cmd);
return 0;
send_sense:
- transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
- 0);
+ transport_send_check_condition_and_sense(cmd, ret, 0);
return -1;
}
@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
+ struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
+ uint32_t tag = 0;
int tcm_tmr;
- int res;
+ int rc;
BUG_ON(!send_ioctx);
@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto process_tmr;
- }
- res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
- if (res < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
- goto process_tmr;
+ goto fail;
}
-
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
- res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
- if (res) {
- pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
- goto process_tmr;
- }
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
- srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-
-process_tmr:
- kref_get(&send_ioctx->kref);
- if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
- transport_generic_handle_tmr(&send_ioctx->cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+ rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+ if (rc < 0) {
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_DOES_NOT_EXIST;
+ goto fail;
+ }
+ tag = srp_tsk->task_tag;
+ }
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+ srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto fail;
+ }
+ return;
+fail:
+ transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**
@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
}
- transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG,
- send_ioctx->sense_data);
-
switch (srp_cmd->opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev;
BUG_ON(!sdev);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
+ se_sess = ch->sess;
+ BUG_ON(!se_sess);
+
+ target_wait_for_sess_cmds(se_sess, 0);
+
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
ch->sess = NULL;
srpt_destroy_ch_ib(ch);
@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
out:
@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd)
{
+ struct srpt_send_ioctx *ioctx = container_of(se_cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ unsigned long flags;
+
+ WARN_ON(ioctx->state != SRPT_STATE_DONE);
+ WARN_ON(ioctx->mapped_sg_count != 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 61e52b830816..4caf55cda7b1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
- struct kref kref;
struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf;
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 55f7e57d4e42..38b523a1ece0 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,7 +3,7 @@
#
menu "Input device support"
- depends on !S390 && !UML
+ depends on !UML
config INPUT
tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index daceafe7ee7d..fa7a95c1da0e 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -57,7 +57,7 @@ static const struct pci_device_id emu_tbl[] = {
MODULE_DEVICE_TABLE(pci, emu_tbl);
-static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct emu *emu;
struct gameport *port;
@@ -107,7 +107,7 @@ static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id
return error;
}
-static void __devexit emu_remove(struct pci_dev *pdev)
+static void emu_remove(struct pci_dev *pdev)
{
struct emu *emu = pci_get_drvdata(pdev);
@@ -122,7 +122,7 @@ static struct pci_driver emu_driver = {
.name = "Emu10k1_gameport",
.id_table = emu_tbl,
.probe = emu_probe,
- .remove = __devexit_p(emu_remove),
+ .remove = emu_remove,
};
module_pci_driver(emu_driver);
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 48ad3829ff20..ae912d3aee4e 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -78,7 +78,7 @@ static int fm801_gp_open(struct gameport *gameport, int mode)
return 0;
}
-static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
+static int fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct fm801_gp *gp;
struct gameport *port;
@@ -129,7 +129,7 @@ static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device
return error;
}
-static void __devexit fm801_gp_remove(struct pci_dev *pci)
+static void fm801_gp_remove(struct pci_dev *pci)
{
struct fm801_gp *gp = pci_get_drvdata(pci);
@@ -150,7 +150,7 @@ static struct pci_driver fm801_gp_driver = {
.name = "FM801_gameport",
.id_table = fm801_gp_id_table,
.probe = fm801_gp_probe,
- .remove = __devexit_p(fm801_gp_remove),
+ .remove = fm801_gp_remove,
};
module_pci_driver(fm801_gp_driver);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 8c4b50fd9a79..71db1930573f 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -18,6 +18,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
{
if (dev->absinfo && test_bit(src, dev->absbit)) {
dev->absinfo[dst] = dev->absinfo[src];
+ dev->absinfo[dst].fuzz = 0;
dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
}
}
@@ -194,7 +195,7 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
if (!mt)
return;
- oldest = 0;
+ oldest = NULL;
oldid = mt->trkid;
count = 0;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 53a0ddee7872..c04469928925 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -534,8 +534,11 @@ EXPORT_SYMBOL(input_grab_device);
static void __input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
+ struct input_handle *grabber;
- if (dev->grab == handle) {
+ grabber = rcu_dereference_protected(dev->grab,
+ lockdep_is_held(&dev->mutex));
+ if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
/* Make sure input_pass_event() notices that grab is gone */
synchronize_rcu();
@@ -1723,7 +1726,7 @@ EXPORT_SYMBOL_GPL(input_class);
/**
* input_allocate_device - allocate memory for new input device
*
- * Returns prepared struct input_dev or NULL.
+ * Returns prepared struct input_dev or %NULL.
*
* NOTE: Use input_free_device() to free devices that have not been
* registered; input_unregister_device() should be used for already
@@ -1750,6 +1753,71 @@ struct input_dev *input_allocate_device(void)
}
EXPORT_SYMBOL(input_allocate_device);
+struct input_devres {
+ struct input_dev *input;
+};
+
+static int devm_input_device_match(struct device *dev, void *res, void *data)
+{
+ struct input_devres *devres = res;
+
+ return devres->input == data;
+}
+
+static void devm_input_device_release(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: dropping reference to %s\n",
+ __func__, dev_name(&input->dev));
+ input_put_device(input);
+}
+
+/**
+ * devm_input_allocate_device - allocate managed input device
+ * @dev: device owning the input device being created
+ *
+ * Returns prepared struct input_dev or %NULL.
+ *
+ * Managed input devices do not need to be explicitly unregistered or
+ * freed as it will be done automatically when owner device unbinds from
+ * its driver (or binding fails). Once managed input device is allocated,
+ * it is ready to be set up and registered in the same fashion as regular
+ * input device. There are no special devm_input_device_[un]register()
+ * variants, regular ones work with both managed and unmanaged devices,
+ * should you need them. In most cases however, managed input device need
+ * not be explicitly unregistered or freed.
+ *
+ * NOTE: the owner device is set up as parent of input device and users
+ * should not override it.
+ */
+struct input_dev *devm_input_allocate_device(struct device *dev)
+{
+ struct input_dev *input;
+ struct input_devres *devres;
+
+ devres = devres_alloc(devm_input_device_release,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return NULL;
+
+ input = input_allocate_device();
+ if (!input) {
+ devres_free(devres);
+ return NULL;
+ }
+
+ input->dev.parent = dev;
+ input->devres_managed = true;
+
+ devres->input = input;
+ devres_add(dev, devres);
+
+ return input;
+}
+EXPORT_SYMBOL(devm_input_allocate_device);
+
/**
* input_free_device - free memory occupied by input_dev structure
* @dev: input device to free
@@ -1766,8 +1834,14 @@ EXPORT_SYMBOL(input_allocate_device);
*/
void input_free_device(struct input_dev *dev)
{
- if (dev)
+ if (dev) {
+ if (dev->devres_managed)
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_release,
+ devm_input_device_match,
+ dev));
input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_free_device);
@@ -1888,6 +1962,38 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
INPUT_CLEANSE_BITMASK(dev, SW, sw);
}
+static void __input_unregister_device(struct input_dev *dev)
+{
+ struct input_handle *handle, *next;
+
+ input_disconnect_device(dev);
+
+ mutex_lock(&input_mutex);
+
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
+
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
+
+ input_wakeup_procfs_readers();
+
+ mutex_unlock(&input_mutex);
+
+ device_del(&dev->dev);
+}
+
+static void devm_input_device_unregister(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: unregistering device %s\n",
+ __func__, dev_name(&input->dev));
+ __input_unregister_device(input);
+}
+
/**
* input_register_device - register device with input core
* @dev: device to be registered
@@ -1899,15 +2005,36 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
* Once device has been successfully registered it can be unregistered
* with input_unregister_device(); input_free_device() should not be
* called in this case.
+ *
+ * Note that this function is also used to register managed input devices
+ * (ones allocated with devm_input_allocate_device()). Such managed input
+ * devices need not be explicitly unregistered or freed, their tear down
+ * is controlled by the devres infrastructure. It is also worth noting
+ * that tear down of managed input devices is internally a 2-step process:
+ * registered managed input device is first unregistered, but stays in
+ * memory and can still handle input_event() calls (although events will
+ * not be delivered anywhere). The freeing of managed input device will
+ * happen later, when devres stack is unwound to the point where device
+ * allocation was made.
*/
int input_register_device(struct input_dev *dev)
{
static atomic_t input_no = ATOMIC_INIT(0);
+ struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
const char *path;
int error;
+ if (dev->devres_managed) {
+ devres = devres_alloc(devm_input_device_unregister,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->input = dev;
+ }
+
/* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
@@ -1923,8 +2050,10 @@ int input_register_device(struct input_dev *dev)
dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
- if (!dev->vals)
- return -ENOMEM;
+ if (!dev->vals) {
+ error = -ENOMEM;
+ goto err_devres_free;
+ }
/*
* If delay and period are pre-set by the driver, then autorepeating
@@ -1949,7 +2078,7 @@ int input_register_device(struct input_dev *dev)
error = device_add(&dev->dev);
if (error)
- return error;
+ goto err_free_vals;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
pr_info("%s as %s\n",
@@ -1958,10 +2087,8 @@ int input_register_device(struct input_dev *dev)
kfree(path);
error = mutex_lock_interruptible(&input_mutex);
- if (error) {
- device_del(&dev->dev);
- return error;
- }
+ if (error)
+ goto err_device_del;
list_add_tail(&dev->node, &input_dev_list);
@@ -1972,7 +2099,21 @@ int input_register_device(struct input_dev *dev)
mutex_unlock(&input_mutex);
+ if (dev->devres_managed) {
+ dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
+ __func__, dev_name(&dev->dev));
+ devres_add(dev->dev.parent, devres);
+ }
return 0;
+
+err_device_del:
+ device_del(&dev->dev);
+err_free_vals:
+ kfree(dev->vals);
+ dev->vals = NULL;
+err_devres_free:
+ devres_free(devres);
+ return error;
}
EXPORT_SYMBOL(input_register_device);
@@ -1985,24 +2126,20 @@ EXPORT_SYMBOL(input_register_device);
*/
void input_unregister_device(struct input_dev *dev)
{
- struct input_handle *handle, *next;
-
- input_disconnect_device(dev);
-
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
-
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
-
- device_unregister(&dev->dev);
+ if (dev->devres_managed) {
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_unregister,
+ devm_input_device_match,
+ dev));
+ __input_unregister_device(dev);
+ /*
+ * We do not do input_put_device() here because it will be done
+ * when 2nd devres fires up.
+ */
+ } else {
+ __input_unregister_device(dev);
+ input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_unregister_device);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 358cd7ee905b..7cd74e29cbc8 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC"
-#elif defined(CONFIG_MN10300)
+#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x, y) ((x) - (y))
#define TIME_NAME "TSC"
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index c96653b58867..121cd63d3334 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -85,7 +85,10 @@ static int as5011_i2c_write(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr, avalue };
struct i2c_msg msg = {
- client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+ .addr = client->addr,
+ .flags = I2C_M_IGNORE_NAK,
+ .len = 2,
+ .buf = (uint8_t *)data
};
int error;
@@ -98,8 +101,18 @@ static int as5011_i2c_read(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr };
struct i2c_msg msg_set[2] = {
- { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
- { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_REV_DIR_ADDR,
+ .len = 1,
+ .buf = (uint8_t *)data
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 1,
+ .buf = (uint8_t *)data
+ }
};
int error;
@@ -144,7 +157,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+static int as5011_configure_chip(struct as5011_device *as5011,
const struct as5011_platform_data *plat_dat)
{
struct i2c_client *client = as5011->i2c_client;
@@ -212,8 +225,8 @@ static int __devinit as5011_configure_chip(struct as5011_device *as5011,
return 0;
}
-static int __devinit as5011_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int as5011_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct as5011_platform_data *plat_data;
struct as5011_device *as5011;
@@ -328,7 +341,7 @@ err_free_mem:
return error;
}
-static int __devexit as5011_remove(struct i2c_client *client)
+static int as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
@@ -353,7 +366,7 @@ static struct i2c_driver as5011_driver = {
.name = "as5011",
},
.probe = as5011_probe,
- .remove = __devexit_p(as5011_remove),
+ .remove = as5011_remove,
.id_table = as5011_id,
};
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c
index 77cfde571bd9..59c10ec5a2a1 100644
--- a/drivers/input/joystick/maplecontrol.c
+++ b/drivers/input/joystick/maplecontrol.c
@@ -78,7 +78,7 @@ static void dc_pad_close(struct input_dev *dev)
}
/* allow the controller to be used */
-static int __devinit probe_maple_controller(struct device *dev)
+static int probe_maple_controller(struct device *dev)
{
static const short btn_bit[32] = {
BTN_C, BTN_B, BTN_A, BTN_START, -1, -1, -1, -1,
@@ -157,7 +157,7 @@ fail:
return error;
}
-static int __devexit remove_maple_controller(struct device *dev)
+static int remove_maple_controller(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_pad *pad = maple_get_drvdata(mdev);
@@ -175,7 +175,7 @@ static struct maple_driver dc_pad_driver = {
.drv = {
.name = "Dreamcast_controller",
.probe = probe_maple_controller,
- .remove = __devexit_p(remove_maple_controller),
+ .remove = remove_maple_controller,
},
};
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 4dfa1eed4b7c..b76ac580703c 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -12,7 +12,7 @@
* the Free Software Foundation.
*/
-/* #define WK0701_DEBUG */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define RESERVE 20000
#define SYNC_PULSE 1306000
@@ -67,6 +67,7 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
{
int i;
int val1, val2, val3, val4, val5, val6, val7, val8;
+ int magic, magic_bit;
int crc1, crc2;
for (crc1 = crc2 = i = 0; i < 10; i++) {
@@ -102,17 +103,12 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
val8 = (w->buf[18] & 1) << 8 | (w->buf[19] << 4) | w->buf[20];
val8 *= (w->buf[18] & 2) - 1; /*sign */
-#ifdef WK0701_DEBUG
- {
- int magic, magic_bit;
- magic = (w->buf[21] << 4) | w->buf[22];
- magic_bit = (w->buf[24] & 8) >> 3;
- printk(KERN_DEBUG
- "walkera0701: %4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
- val1, val2, val3, val4, val5, val6, val7, val8, magic,
- magic_bit);
- }
-#endif
+ magic = (w->buf[21] << 4) | w->buf[22];
+ magic_bit = (w->buf[24] & 8) >> 3;
+ pr_debug("%4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
+ val1, val2, val3, val4, val5, val6, val7, val8,
+ magic, magic_bit);
+
input_report_abs(w->input_dev, ABS_X, val2);
input_report_abs(w->input_dev, ABS_Y, val1);
input_report_abs(w->input_dev, ABS_Z, val6);
@@ -187,6 +183,9 @@ static int walkera0701_open(struct input_dev *dev)
{
struct walkera_dev *w = input_get_drvdata(dev);
+ if (parport_claim(w->pardevice))
+ return -EBUSY;
+
parport_enable_irq(w->parport);
return 0;
}
@@ -196,37 +195,52 @@ static void walkera0701_close(struct input_dev *dev)
struct walkera_dev *w = input_get_drvdata(dev);
parport_disable_irq(w->parport);
+ hrtimer_cancel(&w->timer);
+
+ parport_release(w->pardevice);
}
static int walkera0701_connect(struct walkera_dev *w, int parport)
{
- int err = -ENODEV;
+ int error;
w->parport = parport_find_number(parport);
- if (w->parport == NULL)
+ if (!w->parport) {
+ pr_err("parport %d does not exist\n", parport);
return -ENODEV;
+ }
if (w->parport->irq == -1) {
- printk(KERN_ERR "walkera0701: parport without interrupt\n");
- goto init_err;
+ pr_err("parport %d does not have interrupt assigned\n",
+ parport);
+ error = -EINVAL;
+ goto err_put_parport;
}
- err = -EBUSY;
w->pardevice = parport_register_device(w->parport, "walkera0701",
NULL, NULL, walkera0701_irq_handler,
PARPORT_DEV_EXCL, w);
- if (!w->pardevice)
- goto init_err;
+ if (!w->pardevice) {
+ pr_err("failed to register parport device\n");
+ error = -EIO;
+ goto err_put_parport;
+ }
- if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT))
- goto init_err1;
+ if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT)) {
+ pr_err("failed to negotiate parport mode\n");
+ error = -EIO;
+ goto err_unregister_device;
+ }
- if (parport_claim(w->pardevice))
- goto init_err1;
+ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ w->timer.function = timer_handler;
w->input_dev = input_allocate_device();
- if (!w->input_dev)
- goto init_err2;
+ if (!w->input_dev) {
+ pr_err("failed to allocate input device\n");
+ error = -ENOMEM;
+ goto err_unregister_device;
+ }
input_set_drvdata(w->input_dev, w);
w->input_dev->name = "Walkera WK-0701 TX";
@@ -237,6 +251,7 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
w->input_dev->id.vendor = 0x0001;
w->input_dev->id.product = 0x0001;
w->input_dev->id.version = 0x0100;
+ w->input_dev->dev.parent = w->parport->dev;
w->input_dev->open = walkera0701_open;
w->input_dev->close = walkera0701_close;
@@ -250,30 +265,26 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
input_set_abs_params(w->input_dev, ABS_RUDDER, -512, 512, 0, 0);
input_set_abs_params(w->input_dev, ABS_MISC, -512, 512, 0, 0);
- err = input_register_device(w->input_dev);
- if (err)
- goto init_err3;
+ error = input_register_device(w->input_dev);
+ if (error) {
+ pr_err("failed to register input device\n");
+ goto err_free_input_dev;
+ }
- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- w->timer.function = timer_handler;
return 0;
- init_err3:
+err_free_input_dev:
input_free_device(w->input_dev);
- init_err2:
- parport_release(w->pardevice);
- init_err1:
+err_unregister_device:
parport_unregister_device(w->pardevice);
- init_err:
+err_put_parport:
parport_put_port(w->parport);
- return err;
+ return error;
}
static void walkera0701_disconnect(struct walkera_dev *w)
{
- hrtimer_cancel(&w->timer);
input_unregister_device(w->input_dev);
- parport_release(w->pardevice);
parport_unregister_device(w->pardevice);
parport_put_port(w->parport);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83811e45d633..d6cbfe9df218 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -118,11 +118,12 @@ static const struct xpad_device {
u8 xtype;
} xpad_device[] = {
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
- { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
- { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
@@ -136,9 +137,12 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
@@ -148,24 +152,28 @@ static const struct xpad_device {
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
- { 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
- { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -235,7 +243,7 @@ static const signed short xpad_abs_triggers[] = {
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
-static struct usb_device_id xpad_table [] = {
+static struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
@@ -248,10 +256,11 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
{ }
};
-MODULE_DEVICE_TABLE (usb, xpad_table);
+MODULE_DEVICE_TABLE(usb, xpad_table);
struct usb_xpad {
struct input_dev *dev; /* input device interface */
@@ -783,7 +792,7 @@ static int xpad_open(struct input_dev *dev)
struct usb_xpad *xpad = input_get_drvdata(dev);
/* URB was submitted in probe */
- if(xpad->xtype == XTYPE_XBOX360W)
+ if (xpad->xtype == XTYPE_XBOX360W)
return 0;
xpad->irq_in->dev = xpad->udev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 77629d33f03f..ac0500667000 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -134,7 +134,7 @@ config KEYBOARD_QT1070
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Atmel AT42QT2160 Touch
Sensor chip as a keyboard input.
@@ -224,7 +224,7 @@ config KEYBOARD_TCA6416
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select INPUT_MATRIXKMAP
help
This driver implements basic keypad functionality
@@ -303,7 +303,7 @@ config KEYBOARD_HP7XX
config KEYBOARD_LM8323
tristate "LM8323 keypad chip"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
depends on LEDS_CLASS
help
If you say yes here you get support for the National Semiconductor
@@ -420,7 +420,7 @@ config KEYBOARD_NOMADIK
config KEYBOARD_TEGRA
tristate "NVIDIA Tegra internal matrix keyboard controller support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA && OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use a matrix keyboard connected directly
@@ -479,6 +479,16 @@ config KEYBOARD_SAMSUNG
To compile this driver as a module, choose M here: the
module will be called samsung-keypad.
+config KEYBOARD_GOLDFISH_EVENTS
+ depends on GOLDFISH
+ tristate "Generic Input Event device for Goldfish"
+ help
+ Say Y here to get an input event device for the Goldfish virtual
+ device emulator.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goldfish-events.
+
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
@@ -544,6 +554,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
+ depends on ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 44e76002f54b..49b16453d00e 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index e9e8674dfda1..ef26b17fb159 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -69,7 +69,7 @@ static int adp5520_keys_notifier(struct notifier_block *nb,
return 0;
}
-static int __devinit adp5520_keys_probe(struct platform_device *pdev)
+static int adp5520_keys_probe(struct platform_device *pdev)
{
struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
struct input_dev *input;
@@ -182,7 +182,7 @@ err:
return ret;
}
-static int __devexit adp5520_keys_remove(struct platform_device *pdev)
+static int adp5520_keys_remove(struct platform_device *pdev)
{
struct adp5520_keys *dev = platform_get_drvdata(pdev);
@@ -200,7 +200,7 @@ static struct platform_driver adp5520_keys_driver = {
.owner = THIS_MODULE,
},
.probe = adp5520_keys_probe,
- .remove = __devexit_p(adp5520_keys_remove),
+ .remove = adp5520_keys_remove,
};
module_platform_driver(adp5520_keys_driver);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b083bf10f139..dbd2047f1641 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -145,7 +145,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
+static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
const struct adp5588_kpad_platform_data *pdata)
{
bool pin_used[ADP5588_MAXGPIO];
@@ -170,7 +170,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
-static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
+static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -224,7 +224,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
-static void __devexit adp5588_gpio_remove(struct adp5588_kpad *kpad)
+static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -319,7 +319,7 @@ static irqreturn_t adp5588_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5588_setup(struct i2c_client *client)
+static int adp5588_setup(struct i2c_client *client)
{
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
@@ -382,7 +382,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
return 0;
}
-static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
+static void adp5588_report_switch_state(struct adp5588_kpad *kpad)
{
int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1);
int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2);
@@ -420,8 +420,8 @@ static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
}
-static int __devinit adp5588_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5588_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
@@ -587,7 +587,7 @@ static int __devinit adp5588_probe(struct i2c_client *client,
return error;
}
-static int __devexit adp5588_remove(struct i2c_client *client)
+static int adp5588_remove(struct i2c_client *client)
{
struct adp5588_kpad *kpad = i2c_get_clientdata(client);
@@ -650,7 +650,7 @@ static struct i2c_driver adp5588_driver = {
#endif
},
.probe = adp5588_probe,
- .remove = __devexit_p(adp5588_remove),
+ .remove = adp5588_remove,
.id_table = adp5588_id,
};
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 74e603213386..67d12b3427c9 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -464,7 +464,7 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
+static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
const struct adp5589_kpad_platform_data *pdata)
{
bool pin_used[ADP5589_MAXGPIO];
@@ -496,7 +496,7 @@ static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
return n_unused;
}
-static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
+static int adp5589_gpio_add(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -550,7 +550,7 @@ static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
return 0;
}
-static void __devexit adp5589_gpio_remove(struct adp5589_kpad *kpad)
+static void adp5589_gpio_remove(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -641,8 +641,7 @@ static irqreturn_t adp5589_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
- unsigned short key)
+static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
{
int i;
@@ -655,7 +654,7 @@ static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
return -EINVAL;
}
-static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
+static int adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
@@ -820,7 +819,7 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
return 0;
}
-static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
+static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
{
int gpi_stat_tmp, pin_loc;
int i;
@@ -860,8 +859,8 @@ static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
input_sync(kpad->input);
}
-static int __devinit adp5589_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5589_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5589_kpad *kpad;
const struct adp5589_kpad_platform_data *pdata =
@@ -1045,7 +1044,7 @@ err_free_mem:
return error;
}
-static int __devexit adp5589_remove(struct i2c_client *client)
+static int adp5589_remove(struct i2c_client *client)
{
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
@@ -1104,7 +1103,7 @@ static struct i2c_driver adp5589_driver = {
.pm = &adp5589_dev_pm_ops,
},
.probe = adp5589_probe,
- .remove = __devexit_p(adp5589_remove),
+ .remove = adp5589_remove,
.id_table = adp5589_id,
};
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index add5ffd9fe26..2626773ff29b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -676,6 +676,39 @@ static inline void atkbd_disable(struct atkbd *atkbd)
serio_continue_rx(atkbd->ps2dev.serio);
}
+static int atkbd_activate(struct atkbd *atkbd)
+{
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+/*
+ * Enable the keyboard to receive keystrokes.
+ */
+
+ if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
+ dev_err(&ps2dev->serio->dev,
+ "Failed to enable keyboard on %s\n",
+ ps2dev->serio->phys);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * atkbd_deactivate() resets and disables the keyboard from sending
+ * keystrokes.
+ */
+
+static void atkbd_deactivate(struct atkbd *atkbd)
+{
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_DIS))
+ dev_err(&ps2dev->serio->dev,
+ "Failed to deactivate keyboard on %s\n",
+ ps2dev->serio->phys);
+}
+
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@@ -726,11 +759,17 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd->id == 0xaca1 && atkbd->translated) {
dev_err(&ps2dev->serio->dev,
- "NCD terminal keyboards are only supported on non-translating controlelrs. "
+ "NCD terminal keyboards are only supported on non-translating controllers. "
"Use i8042.direct=1 to disable translation.\n");
return -1;
}
+/*
+ * Make sure nothing is coming from the keyboard and disturbs our
+ * internal state.
+ */
+ atkbd_deactivate(atkbd);
+
return 0;
}
@@ -825,24 +864,6 @@ static int atkbd_reset_state(struct atkbd *atkbd)
return 0;
}
-static int atkbd_activate(struct atkbd *atkbd)
-{
- struct ps2dev *ps2dev = &atkbd->ps2dev;
-
-/*
- * Enable the keyboard to receive keystrokes.
- */
-
- if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
- dev_err(&ps2dev->serio->dev,
- "Failed to enable keyboard on %s\n",
- ps2dev->serio->phys);
- return -1;
- }
-
- return 0;
-}
-
/*
* atkbd_cleanup() restores the keyboard state so that BIOS is happy after a
* reboot.
@@ -1150,7 +1171,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
atkbd_reset_state(atkbd);
- atkbd_activate(atkbd);
} else {
atkbd->set = 2;
@@ -1165,6 +1185,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
goto fail3;
atkbd_enable(atkbd);
+ if (serio->write)
+ atkbd_activate(atkbd);
err = input_register_device(atkbd->dev);
if (err)
@@ -1208,8 +1230,6 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
goto out;
- atkbd_activate(atkbd);
-
/*
* Restore LED state and repeat rate. While input core
* will do this for us at resume time reconnect may happen
@@ -1223,7 +1243,17 @@ static int atkbd_reconnect(struct serio *serio)
}
+ /*
+ * Reset our state machine in case reconnect happened in the middle
+ * of multi-byte scancode.
+ */
+ atkbd->xl_bit = 0;
+ atkbd->emul = 0;
+
atkbd_enable(atkbd);
+ if (atkbd->write)
+ atkbd_activate(atkbd);
+
retval = 0;
out:
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 8eb9116e0a5f..20b9fa91fb9e 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -177,7 +177,7 @@ static irqreturn_t bfin_kpad_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_kpad_probe(struct platform_device *pdev)
+static int bfin_kpad_probe(struct platform_device *pdev)
{
struct bf54x_kpad *bf54x_kpad;
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
@@ -331,7 +331,7 @@ out:
return error;
}
-static int __devexit bfin_kpad_remove(struct platform_device *pdev)
+static int bfin_kpad_remove(struct platform_device *pdev)
{
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
@@ -390,7 +390,7 @@ static struct platform_driver bfin_kpad_device_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_kpad_probe,
- .remove = __devexit_p(bfin_kpad_remove),
+ .remove = bfin_kpad_remove,
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index d5bacbb479b0..4e4e453ea15e 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -303,7 +303,7 @@ fail1:
return error;
}
-static int __devexit davinci_ks_remove(struct platform_device *pdev)
+static int davinci_ks_remove(struct platform_device *pdev)
{
struct davinci_ks *davinci_ks = platform_get_drvdata(pdev);
@@ -326,7 +326,7 @@ static struct platform_driver davinci_ks_driver = {
.name = "davinci_keyscan",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(davinci_ks_remove),
+ .remove = davinci_ks_remove,
};
static int __init davinci_ks_init(void)
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7363402de8d4..9857e8fd0987 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -232,7 +232,7 @@ static int ep93xx_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
-static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
+static int ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
const struct matrix_keymap_data *keymap_data;
@@ -346,7 +346,7 @@ failed_free:
return err;
}
-static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
+static int ep93xx_keypad_remove(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -380,7 +380,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
- .remove = __devexit_p(ep93xx_keypad_remove),
+ .remove = ep93xx_keypad_remove,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
new file mode 100644
index 000000000000..9f60a2ec88db
--- /dev/null
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+enum {
+ REG_READ = 0x00,
+ REG_SET_PAGE = 0x00,
+ REG_LEN = 0x04,
+ REG_DATA = 0x08,
+
+ PAGE_NAME = 0x00000,
+ PAGE_EVBITS = 0x10000,
+ PAGE_ABSDATA = 0x20000 | EV_ABS,
+};
+
+struct event_dev {
+ struct input_dev *input;
+ int irq;
+ void __iomem *addr;
+ char name[0];
+};
+
+static irqreturn_t events_interrupt(int irq, void *dev_id)
+{
+ struct event_dev *edev = dev_id;
+ unsigned type, code, value;
+
+ type = __raw_readl(edev->addr + REG_READ);
+ code = __raw_readl(edev->addr + REG_READ);
+ value = __raw_readl(edev->addr + REG_READ);
+
+ input_event(edev->input, type, code, value);
+ input_sync(edev->input);
+ return IRQ_HANDLED;
+}
+
+static void events_import_bits(struct event_dev *edev,
+ unsigned long bits[], unsigned type, size_t count)
+{
+ void __iomem *addr = edev->addr;
+ int i, j;
+ size_t size;
+ uint8_t val;
+
+ __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
+
+ size = __raw_readl(addr + REG_LEN) * 8;
+ if (size < count)
+ count = size;
+
+ addr += REG_DATA;
+ for (i = 0; i < count; i += 8) {
+ val = __raw_readb(addr++);
+ for (j = 0; j < 8; j++)
+ if (val & 1 << j)
+ set_bit(i + j, bits);
+ }
+}
+
+static void events_import_abs_params(struct event_dev *edev)
+{
+ struct input_dev *input_dev = edev->input;
+ void __iomem *addr = edev->addr;
+ u32 val[4];
+ int count;
+ int i, j;
+
+ __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
+
+ count = __raw_readl(addr + REG_LEN) / sizeof(val);
+ if (count > ABS_MAX)
+ count = ABS_MAX;
+
+ for (i = 0; i < count; i++) {
+ if (!test_bit(i, input_dev->absbit))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(val); j++) {
+ int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+ val[j] = __raw_readl(edev->addr + REG_DATA + offset);
+ }
+
+ input_set_abs_params(input_dev, i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int events_probe(struct platform_device *pdev)
+{
+ struct input_dev *input_dev;
+ struct event_dev *edev;
+ struct resource *res;
+ unsigned keymapnamelen;
+ void __iomem *addr;
+ int irq;
+ int i;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ addr = devm_ioremap(&pdev->dev, res->start, 4096);
+ if (!addr)
+ return -ENOMEM;
+
+ __raw_writel(PAGE_NAME, addr + REG_SET_PAGE);
+ keymapnamelen = __raw_readl(addr + REG_LEN);
+
+ edev = devm_kzalloc(&pdev->dev,
+ sizeof(struct event_dev) + keymapnamelen + 1,
+ GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ edev->input = input_dev;
+ edev->addr = addr;
+ edev->irq = irq;
+
+ for (i = 0; i < keymapnamelen; i++)
+ edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
+
+ pr_debug("events_probe() keymap=%s\n", edev->name);
+
+ input_dev->name = edev->name;
+ input_dev->id.bustype = BUS_HOST;
+
+ events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
+ events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX);
+ events_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
+ events_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
+ events_import_bits(edev, input_dev->mscbit, EV_MSC, MSC_MAX);
+ events_import_bits(edev, input_dev->ledbit, EV_LED, LED_MAX);
+ events_import_bits(edev, input_dev->sndbit, EV_SND, SND_MAX);
+ events_import_bits(edev, input_dev->ffbit, EV_FF, FF_MAX);
+ events_import_bits(edev, input_dev->swbit, EV_SW, SW_MAX);
+
+ events_import_abs_params(edev);
+
+ error = devm_request_irq(&pdev->dev, edev->irq, events_interrupt, 0,
+ "goldfish-events-keypad", edev);
+ if (error)
+ return error;
+
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static struct platform_driver events_driver = {
+ .probe = events_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "goldfish_events",
+ },
+};
+
+module_platform_driver(events_driver);
+
+MODULE_AUTHOR("Brian Swetland");
+MODULE_DESCRIPTION("Goldfish Event Device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6a68041c261d..b29ca651a395 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -423,10 +423,10 @@ out:
return IRQ_HANDLED;
}
-static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
- struct input_dev *input,
- struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+static int gpio_keys_setup_key(struct platform_device *pdev,
+ struct input_dev *input,
+ struct gpio_button_data *bdata,
+ const struct gpio_keys_button *button)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -440,21 +440,13 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (gpio_is_valid(button->gpio)) {
- error = gpio_request(button->gpio, desc);
+ error = gpio_request_one(button->gpio, GPIOF_IN, desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
- error = gpio_direction_input(button->gpio);
- if (error < 0) {
- dev_err(dev,
- "Failed to configure direction for GPIO %d, error %d\n",
- button->gpio, error);
- goto fail;
- }
-
if (button->debounce_interval) {
error = gpio_set_debounce(button->gpio,
button->debounce_interval * 1000);
@@ -526,12 +518,35 @@ fail:
return error;
}
+static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
+{
+ struct input_dev *input = ddata->input;
+ int i;
+
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_keys_gpio_report_event(bdata);
+ }
+ input_sync(input);
+}
+
static int gpio_keys_open(struct input_dev *input)
{
struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = ddata->pdata;
+ int error;
- return pdata->enable ? pdata->enable(input->dev.parent) : 0;
+ if (pdata->enable) {
+ error = pdata->enable(input->dev.parent);
+ if (error)
+ return error;
+ }
+
+ /* Report current state of buttons that are connected to GPIOs */
+ gpio_keys_report_state(ddata);
+
+ return 0;
}
static void gpio_keys_close(struct input_dev *input)
@@ -551,7 +566,7 @@ static void gpio_keys_close(struct input_dev *input)
/*
* Translate OpenFirmware node properties into platform_data
*/
-static struct gpio_keys_platform_data * __devinit
+static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
@@ -587,6 +602,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
+ int gpio;
enum of_gpio_flags flags;
if (!of_find_property(pp, "gpios", NULL)) {
@@ -595,9 +611,19 @@ gpio_keys_get_devtree_pdata(struct device *dev)
continue;
}
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ goto err_free_pdata;
+ }
+
button = &pdata->buttons[i++];
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
if (of_property_read_u32(pp, "linux,code", &button->code)) {
@@ -658,7 +684,7 @@ static void gpio_remove_key(struct gpio_button_data *bdata)
gpio_free(bdata->button->gpio);
}
-static int __devinit gpio_keys_probe(struct platform_device *pdev)
+static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -731,14 +757,6 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
goto fail3;
}
- /* get current state of buttons that are connected to GPIOs */
- for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
- }
- input_sync(input);
-
device_init_wakeup(&pdev->dev, wakeup);
return 0;
@@ -760,7 +778,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_keys_remove(struct platform_device *pdev)
+static int gpio_keys_remove(struct platform_device *pdev)
{
struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
struct input_dev *input = ddata->input;
@@ -788,6 +806,7 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
static int gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
int i;
if (device_may_wakeup(dev)) {
@@ -796,6 +815,11 @@ static int gpio_keys_suspend(struct device *dev)
if (bdata->button->wakeup)
enable_irq_wake(bdata->irq);
}
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ gpio_keys_close(input);
+ mutex_unlock(&input->mutex);
}
return 0;
@@ -804,18 +828,27 @@ static int gpio_keys_suspend(struct device *dev)
static int gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
+ int error = 0;
int i;
- for (i = 0; i < ddata->pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (bdata->button->wakeup && device_may_wakeup(dev))
- disable_irq_wake(bdata->irq);
-
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
+ if (device_may_wakeup(dev)) {
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (bdata->button->wakeup)
+ disable_irq_wake(bdata->irq);
+ }
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ error = gpio_keys_open(input);
+ mutex_unlock(&input->mutex);
}
- input_sync(ddata->input);
+ if (error)
+ return error;
+
+ gpio_keys_report_state(ddata);
return 0;
}
#endif
@@ -824,7 +857,7 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
- .remove = __devexit_p(gpio_keys_remove),
+ .remove = gpio_keys_remove,
.driver = {
.name = "gpio-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index f2142de789e7..21147164874d 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -103,8 +103,7 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
}
#ifdef CONFIG_OF
-static struct gpio_keys_platform_data * __devinit
-gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
@@ -136,6 +135,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
+ int gpio;
enum of_gpio_flags flags;
if (!of_find_property(pp, "gpios", NULL)) {
@@ -144,9 +144,19 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
continue;
}
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ goto err_free_pdata;
+ }
+
button = &pdata->buttons[i++];
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
if (of_property_read_u32(pp, "linux,code", &button->code)) {
@@ -196,7 +206,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
}
#endif
-static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
+static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -246,7 +256,6 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input = poll_dev->input;
- input->evbit[0] = BIT(EV_KEY);
input->name = pdev->name;
input->phys = DRV_NAME"/input0";
input->dev.parent = &pdev->dev;
@@ -256,6 +265,10 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
for (i = 0; i < pdata->nbuttons; i++) {
struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
@@ -268,22 +281,14 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
goto err_free_gpio;
}
- error = gpio_request(gpio,
- button->desc ? button->desc : DRV_NAME);
+ error = gpio_request_one(gpio, GPIOF_IN,
+ button->desc ?: DRV_NAME);
if (error) {
dev_err(dev, "unable to claim gpio %u, err=%d\n",
gpio, error);
goto err_free_gpio;
}
- error = gpio_direction_input(gpio);
- if (error) {
- dev_err(dev,
- "unable to set direction on gpio %u, err=%d\n",
- gpio, error);
- goto err_free_gpio;
- }
-
bdata->can_sleep = gpio_cansleep(gpio);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
@@ -329,7 +334,7 @@ err_free_pdata:
return error;
}
-static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
+static int gpio_keys_polled_remove(struct platform_device *pdev)
{
struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
@@ -357,7 +362,7 @@ static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
- .remove = __devexit_p(gpio_keys_polled_remove),
+ .remove = gpio_keys_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 5f72440b50c8..198dc07a1be5 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -200,7 +200,7 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len)
/* initialize HIL */
-static int __devinit hil_keyb_init(void)
+static int hil_keyb_init(void)
{
unsigned char c;
unsigned int i, kbid;
@@ -286,7 +286,7 @@ err1:
return err;
}
-static void __devexit hil_keyb_exit(void)
+static void hil_keyb_exit(void)
{
if (HIL_IRQ)
free_irq(HIL_IRQ, hil_dev.dev_id);
@@ -299,7 +299,7 @@ static void __devexit hil_keyb_exit(void)
}
#if defined(CONFIG_PARISC)
-static int __devinit hil_probe_chip(struct parisc_device *dev)
+static int hil_probe_chip(struct parisc_device *dev)
{
/* Only allow one HIL keyboard */
if (hil_dev.dev)
@@ -320,7 +320,7 @@ static int __devinit hil_probe_chip(struct parisc_device *dev)
return hil_keyb_init();
}
-static int __devexit hil_remove_chip(struct parisc_device *dev)
+static int hil_remove_chip(struct parisc_device *dev)
{
hil_keyb_exit();
@@ -341,7 +341,7 @@ static struct parisc_driver hil_driver = {
.name = "hil",
.id_table = hil_tbl,
.probe = hil_probe_chip,
- .remove = __devexit_p(hil_remove_chip),
+ .remove = hil_remove_chip,
};
static int __init hil_init(void)
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index cdc252612c0b..98f9113251d2 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -20,6 +20,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -362,7 +363,8 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
writew(reg_val, keypad->mmio_base + KPSR);
/* Colums as open drain and disable all rows */
- writew(0xff00, keypad->mmio_base + KPCR);
+ reg_val = (keypad->cols_en_mask & 0xff) << 8;
+ writew(reg_val, keypad->mmio_base + KPCR);
}
static void imx_keypad_close(struct input_dev *dev)
@@ -413,15 +415,23 @@ open_err:
return -EIO;
}
-static int __devinit imx_keypad_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct of_device_id imx_keypad_of_match[] = {
+ { .compatible = "fsl,imx21-kpp", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
+#endif
+
+static int imx_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
struct imx_keypad *keypad;
struct input_dev *input_dev;
struct resource *res;
- int irq, error, i;
+ int irq, error, i, row, col;
- if (keymap_data == NULL) {
+ if (!keymap_data && !pdev->dev.of_node) {
dev_err(&pdev->dev, "no keymap defined\n");
return -EINVAL;
}
@@ -479,22 +489,6 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
goto failed_unmap;
}
- /* Search for rows and cols enabled */
- for (i = 0; i < keymap_data->keymap_size; i++) {
- keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]);
- keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]);
- }
-
- if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
- keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
- dev_err(&pdev->dev,
- "invalid key data (too many rows or colums)\n");
- error = -EINVAL;
- goto failed_clock_put;
- }
- dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
- dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
-
/* Init the Input device */
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
@@ -511,6 +505,19 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
goto failed_clock_put;
}
+ /* Search for rows and cols enabled */
+ for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) {
+ for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) {
+ i = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+ if (keypad->keycodes[i] != KEY_RESERVED) {
+ keypad->rows_en_mask |= 1 << row;
+ keypad->cols_en_mask |= 1 << col;
+ }
+ }
+ }
+ dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
+ dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
+
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
@@ -554,7 +561,7 @@ failed_rel_mem:
return error;
}
-static int __devexit imx_keypad_remove(struct platform_device *pdev)
+static int imx_keypad_remove(struct platform_device *pdev)
{
struct imx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -630,9 +637,10 @@ static struct platform_driver imx_keypad_driver = {
.name = "imx-keypad",
.owner = THIS_MODULE,
.pm = &imx_kbd_pm_ops,
+ .of_match_table = of_match_ptr(imx_keypad_of_match),
},
.probe = imx_keypad_probe,
- .remove = __devexit_p(imx_keypad_remove),
+ .remove = imx_keypad_remove,
};
module_platform_driver(imx_keypad_driver);
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 24f3ea01c4d5..74e75a6e8deb 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -179,7 +179,7 @@ static void jornadakbd680_poll(struct input_polled_dev *dev)
memcpy(jornadakbd->old_scan, jornadakbd->new_scan, JORNADA_SCAN_SIZE);
}
-static int __devinit jornada680kbd_probe(struct platform_device *pdev)
+static int jornada680kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_polled_dev *poll_dev;
@@ -240,7 +240,7 @@ static int __devinit jornada680kbd_probe(struct platform_device *pdev)
}
-static int __devexit jornada680kbd_remove(struct platform_device *pdev)
+static int jornada680kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -258,7 +258,7 @@ static struct platform_driver jornada680kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada680kbd_probe,
- .remove = __devexit_p(jornada680kbd_remove),
+ .remove = jornada680kbd_remove,
};
module_platform_driver(jornada680kbd_driver);
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 9d639fa1afbd..5ceef636df2f 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -94,7 +94,7 @@ static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
+static int jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
@@ -152,7 +152,7 @@ static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
return err;
};
-static int __devexit jornada720_kbd_remove(struct platform_device *pdev)
+static int jornada720_kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -173,6 +173,6 @@ static struct platform_driver jornada720_kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
- .remove = __devexit_p(jornada720_kbd_remove),
+ .remove = jornada720_kbd_remove,
};
module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 39ac2787e275..0de23f41b2d3 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -398,7 +398,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm)
lm8323_configure(lm);
}
for (i = 0; i < LM8323_NUM_PWMS; i++) {
- if (ints & (1 << (INT_PWM1 + i))) {
+ if (ints & (INT_PWM1 << i)) {
dev_vdbg(&lm->client->dev,
"pwm%d engine completed\n", i);
pwm_done(&lm->pwm[i]);
@@ -624,7 +624,7 @@ static ssize_t lm8323_set_disable(struct device *dev,
}
static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
-static int __devinit lm8323_probe(struct i2c_client *client,
+static int lm8323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm8323_platform_data *pdata = client->dev.platform_data;
@@ -764,7 +764,7 @@ fail1:
return err;
}
-static int __devexit lm8323_remove(struct i2c_client *client)
+static int lm8323_remove(struct i2c_client *client)
{
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -846,7 +846,7 @@ static struct i2c_driver lm8323_i2c_driver = {
.pm = &lm8323_pm_ops,
},
.probe = lm8323_probe,
- .remove = __devexit_p(lm8323_remove),
+ .remove = lm8323_remove,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 081fd9effa8c..5a8ca35dc9af 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -128,7 +128,7 @@ static irqreturn_t lm8333_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit lm8333_probe(struct i2c_client *client,
+static int lm8333_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct lm8333_platform_data *pdata = client->dev.platform_data;
@@ -202,7 +202,7 @@ static int __devinit lm8333_probe(struct i2c_client *client,
return err;
}
-static int __devexit lm8333_remove(struct i2c_client *client)
+static int lm8333_remove(struct i2c_client *client)
{
struct lm8333 *lm8333 = i2c_get_clientdata(client);
@@ -225,7 +225,7 @@ static struct i2c_driver lm8333_driver = {
.owner = THIS_MODULE,
},
.probe = lm8333_probe,
- .remove = __devexit_p(lm8333_remove),
+ .remove = lm8333_remove,
.id_table = lm8333_id,
};
module_i2c_driver(lm8333_driver);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index b1ab29861e1c..c94d610b9d78 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -46,7 +46,7 @@ MODULE_LICENSE("GPL");
#define KEY_CENTER KEY_F15
static const unsigned char
-locomokbd_keycode[LOCOMOKBD_NUMKEYS] __devinitconst = {
+locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
0, KEY_ESC, KEY_ACTIVITY, 0, 0, 0, 0, 0, 0, 0, /* 0 - 9 */
0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_HOME, KEY_CONTACT, /* 10 - 19 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 29 */
@@ -236,7 +236,7 @@ static void locomokbd_close(struct input_dev *dev)
locomo_writel(r, locomokbd->base + LOCOMO_KIC);
}
-static int __devinit locomokbd_probe(struct locomo_dev *dev)
+static int locomokbd_probe(struct locomo_dev *dev)
{
struct locomokbd *locomokbd;
struct input_dev *input_dev;
@@ -321,7 +321,7 @@ static int __devinit locomokbd_probe(struct locomo_dev *dev)
return err;
}
-static int __devexit locomokbd_remove(struct locomo_dev *dev)
+static int locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
@@ -345,7 +345,7 @@ static struct locomo_driver keyboard_driver = {
},
.devid = LOCOMO_DEVID_KEYBOARD,
.probe = locomokbd_probe,
- .remove = __devexit_p(locomokbd_remove),
+ .remove = locomokbd_remove,
};
static int __init locomokbd_init(void)
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index dd786c8a7584..1b8add6cfb9d 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -139,7 +139,7 @@ static void lpc32xx_kscan_close(struct input_dev *dev)
clk_disable_unprepare(kscandat->clk);
}
-static int __devinit lpc32xx_parse_dt(struct device *dev,
+static int lpc32xx_parse_dt(struct device *dev,
struct lpc32xx_kscan_drv *kscandat)
{
struct device_node *np = dev->of_node;
@@ -166,7 +166,7 @@ static int __devinit lpc32xx_parse_dt(struct device *dev,
return 0;
}
-static int __devinit lpc32xx_kscan_probe(struct platform_device *pdev)
+static int lpc32xx_kscan_probe(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat;
struct input_dev *input;
@@ -310,7 +310,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_kscan_remove(struct platform_device *pdev)
+static int lpc32xx_kscan_remove(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = __devexit_p(lpc32xx_kscan_remove),
+ .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 18b72372028a..71d77192ac1e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -23,6 +23,9 @@
#include <linux/gpio.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
struct matrix_keypad {
const struct matrix_keypad_platform_data *pdata;
@@ -37,8 +40,6 @@ struct matrix_keypad {
bool scan_pending;
bool stopped;
bool gpio_all_disabled;
-
- unsigned short keycodes[];
};
/*
@@ -118,6 +119,7 @@ static void matrix_keypad_scan(struct work_struct *work)
struct matrix_keypad *keypad =
container_of(work, struct matrix_keypad, work.work);
struct input_dev *input_dev = keypad->input_dev;
+ const unsigned short *keycodes = input_dev->keycode;
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
@@ -153,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work)
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev,
- keypad->keycodes[code],
+ keycodes[code],
new_state[col] & (1 << row));
}
}
@@ -299,8 +301,8 @@ static int matrix_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
matrix_keypad_suspend, matrix_keypad_resume);
-static int __devinit matrix_keypad_init_gpio(struct platform_device *pdev,
- struct matrix_keypad *keypad)
+static int matrix_keypad_init_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i, err;
@@ -394,33 +396,95 @@ static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
gpio_free(pdata->col_gpios[i]);
}
-static int __devinit matrix_keypad_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ struct matrix_keypad_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+ unsigned int *gpios;
+ int i, nrow, ncol;
+
+ if (!np) {
+ dev_err(dev, "device lacks DT data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->num_row_gpios = nrow = of_gpio_named_count(np, "row-gpios");
+ pdata->num_col_gpios = ncol = of_gpio_named_count(np, "col-gpios");
+ if (nrow <= 0 || ncol <= 0) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_get_property(np, "linux,no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,wakeup", NULL))
+ pdata->wakeup = true;
+ if (of_get_property(np, "gpio-activelow", NULL))
+ pdata->active_low = true;
+
+ of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms);
+ of_property_read_u32(np, "col-scan-delay-us",
+ &pdata->col_scan_delay_us);
+
+ gpios = devm_kzalloc(dev,
+ sizeof(unsigned int) *
+ (pdata->num_row_gpios + pdata->num_col_gpios),
+ GFP_KERNEL);
+ if (!gpios) {
+ dev_err(dev, "could not allocate memory for gpios\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpios[pdata->num_row_gpios + i] =
+ of_get_named_gpio(np, "col-gpios", i);
+
+ pdata->row_gpios = gpios;
+ pdata->col_gpios = &gpios[pdata->num_row_gpios];
+
+ return pdata;
+}
+#else
+static inline struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data defined\n");
+
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int matrix_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
- const struct matrix_keymap_data *keymap_data;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
- unsigned int row_shift;
- size_t keymap_size;
int err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- keymap_data = pdata->keymap_data;
- if (!keymap_data) {
+ pdata = matrix_keypad_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return PTR_ERR(pdata);
+ }
+ } else if (!pdata->keymap_data) {
dev_err(&pdev->dev, "no keymap data defined\n");
return -EINVAL;
}
- row_shift = get_count_order(pdata->num_col_gpios);
- keymap_size = (pdata->num_row_gpios << row_shift) *
- sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(struct matrix_keypad) + keymap_size,
- GFP_KERNEL);
+ keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!keypad || !input_dev) {
err = -ENOMEM;
@@ -429,7 +493,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
keypad->pdata = pdata;
- keypad->row_shift = row_shift;
+ keypad->row_shift = get_count_order(pdata->num_col_gpios);
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
spin_lock_init(&keypad->lock);
@@ -440,12 +504,14 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- err = matrix_keypad_build_keymap(keymap_data, NULL,
+ err = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
pdata->num_row_gpios,
pdata->num_col_gpios,
- keypad->keycodes, input_dev);
- if (err)
+ NULL, input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
goto err_free_mem;
+ }
if (!pdata->no_autorepeat)
__set_bit(EV_REP, input_dev->evbit);
@@ -473,7 +539,7 @@ err_free_mem:
return err;
}
-static int __devexit matrix_keypad_remove(struct platform_device *pdev)
+static int matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
@@ -488,13 +554,22 @@ static int __devexit matrix_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id matrix_keypad_dt_match[] = {
+ { .compatible = "gpio-matrix-keypad" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
+#endif
+
static struct platform_driver matrix_keypad_driver = {
.probe = matrix_keypad_probe,
- .remove = __devexit_p(matrix_keypad_remove),
+ .remove = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
.owner = THIS_MODULE,
.pm = &matrix_keypad_pm_ops,
+ .of_match_table = of_match_ptr(matrix_keypad_dt_match),
},
};
module_platform_driver(matrix_keypad_driver);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 8edada8ae712..7c7af2b01e65 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -179,7 +179,7 @@ static void max7359_initialize(struct i2c_client *client)
max7359_fall_deepsleep(client);
}
-static int __devinit max7359_probe(struct i2c_client *client,
+static int max7359_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
@@ -260,7 +260,7 @@ failed_free_mem:
return error;
}
-static int __devexit max7359_remove(struct i2c_client *client)
+static int max7359_remove(struct i2c_client *client)
{
struct max7359_keypad *keypad = i2c_get_clientdata(client);
@@ -312,7 +312,7 @@ static struct i2c_driver max7359_i2c_driver = {
.pm = &max7359_pm,
},
.probe = max7359_probe,
- .remove = __devexit_p(max7359_remove),
+ .remove = max7359_remove,
.id_table = max7359_ids,
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 0d77f6c84950..7c236f9c6a51 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -97,7 +97,7 @@ static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mcs_touchkey_probe(struct i2c_client *client,
+static int mcs_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mcs_platform_data *pdata;
@@ -200,7 +200,7 @@ err_free_mem:
return error;
}
-static int __devexit mcs_touchkey_remove(struct i2c_client *client)
+static int mcs_touchkey_remove(struct i2c_client *client)
{
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
@@ -270,7 +270,7 @@ static struct i2c_driver mcs_touchkey_driver = {
.pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
- .remove = __devexit_p(mcs_touchkey_remove),
+ .remove = mcs_touchkey_remove,
.shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 7613f1cac951..f7f3e9a9fd3f 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -71,7 +71,7 @@ struct mpr121_init_register {
u8 val;
};
-static const struct mpr121_init_register init_reg_table[] __devinitconst = {
+static const struct mpr121_init_register init_reg_table[] = {
{ MHD_RISING_ADDR, 0x1 },
{ NHD_RISING_ADDR, 0x1 },
{ MHD_FALLING_ADDR, 0x1 },
@@ -123,7 +123,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
+static int mpr121_phys_init(const struct mpr121_platform_data *pdata,
struct mpr121_touchkey *mpr121,
struct i2c_client *client)
{
@@ -185,8 +185,8 @@ err_i2c_write:
return ret;
}
-static int __devinit mpr_touchkey_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mpr_touchkey_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct mpr121_platform_data *pdata = client->dev.platform_data;
struct mpr121_touchkey *mpr121;
@@ -272,7 +272,7 @@ err_free_mem:
return error;
}
-static int __devexit mpr_touchkey_remove(struct i2c_client *client)
+static int mpr_touchkey_remove(struct i2c_client *client)
{
struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
@@ -327,7 +327,7 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = __devexit_p(mpr_touchkey_remove),
+ .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 49f5fa64e0b1..0e6a8151fee3 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -67,6 +67,7 @@ struct ske_keypad {
const struct ske_keypad_platform_data *board;
unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
struct clk *clk;
+ struct clk *pclk;
spinlock_t ske_keypad_lock;
};
@@ -271,11 +272,18 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
goto err_free_mem_region;
}
+ keypad->pclk = clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(keypad->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ error = PTR_ERR(keypad->pclk);
+ goto err_iounmap;
+ }
+
keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ goto err_pclk;
}
input->id.bustype = BUS_HOST;
@@ -287,14 +295,25 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->keymap, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_iounmap;
+ goto err_clk;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- clk_enable(keypad->clk);
+ error = clk_prepare_enable(keypad->pclk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable pclk\n");
+ goto err_clk;
+ }
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable clk\n");
+ goto err_pclk_disable;
+ }
+
/* go through board initialization helpers */
if (keypad->board->init)
@@ -330,8 +349,13 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
err_free_irq:
free_irq(keypad->irq, keypad);
err_clk_disable:
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
+err_pclk_disable:
+ clk_disable_unprepare(keypad->pclk);
+err_clk:
clk_put(keypad->clk);
+err_pclk:
+ clk_put(keypad->pclk);
err_iounmap:
iounmap(keypad->reg_base);
err_free_mem_region:
@@ -342,7 +366,7 @@ err_free_mem:
return error;
}
-static int __devexit ske_keypad_remove(struct platform_device *pdev)
+static int ske_keypad_remove(struct platform_device *pdev)
{
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -351,7 +375,7 @@ static int __devexit ske_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input);
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
@@ -403,7 +427,7 @@ static struct platform_driver ske_keypad_driver = {
.owner = THIS_MODULE,
.pm = &ske_keypad_dev_pm_ops,
},
- .remove = __devexit_p(ske_keypad_remove),
+ .remove = ske_keypad_remove,
};
static int __init ske_keypad_init(void)
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 4a5fcc8026f5..d0d5226d9cd4 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -244,7 +244,7 @@ static int omap_kp_resume(struct platform_device *dev)
#define omap_kp_resume NULL
#endif
-static int __devinit omap_kp_probe(struct platform_device *pdev)
+static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
@@ -357,7 +357,7 @@ err2:
return -EINVAL;
}
-static int __devexit omap_kp_remove(struct platform_device *pdev)
+static int omap_kp_remove(struct platform_device *pdev)
{
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
@@ -379,7 +379,7 @@ static int __devexit omap_kp_remove(struct platform_device *pdev)
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
- .remove = __devexit_p(omap_kp_remove),
+ .remove = omap_kp_remove,
.suspend = omap_kp_suspend,
.resume = omap_kp_resume,
.driver = {
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c05f98c41410..e25b022692cd 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -211,8 +211,8 @@ static void omap4_keypad_close(struct input_dev *input)
}
#ifdef CONFIG_OF
-static int __devinit omap4_keypad_parse_dt(struct device *dev,
- struct omap4_keypad *keypad_data)
+static int omap4_keypad_parse_dt(struct device *dev,
+ struct omap4_keypad *keypad_data)
{
struct device_node *np = dev->of_node;
@@ -241,7 +241,7 @@ static inline int omap4_keypad_parse_dt(struct device *dev,
}
#endif
-static int __devinit omap4_keypad_probe(struct platform_device *pdev)
+static int omap4_keypad_probe(struct platform_device *pdev)
{
const struct omap4_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -406,7 +406,7 @@ err_free_keypad:
return error;
}
-static int __devexit omap4_keypad_remove(struct platform_device *pdev)
+static int omap4_keypad_remove(struct platform_device *pdev)
{
struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
struct resource *res;
@@ -440,7 +440,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
static struct platform_driver omap4_keypad_driver = {
.probe = omap4_keypad_probe,
- .remove = __devexit_p(omap4_keypad_remove),
+ .remove = omap4_keypad_remove,
.driver = {
.name = "omap4-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index abe728c7b88e..7ac5f174c6f7 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -37,7 +37,7 @@ static irqreturn_t opencores_kbd_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit opencores_kbd_probe(struct platform_device *pdev)
+static int opencores_kbd_probe(struct platform_device *pdev)
{
struct input_dev *input;
struct opencores_kbd *opencores_kbd;
@@ -139,7 +139,7 @@ static int __devinit opencores_kbd_probe(struct platform_device *pdev)
return error;
}
-static int __devexit opencores_kbd_remove(struct platform_device *pdev)
+static int opencores_kbd_remove(struct platform_device *pdev)
{
struct opencores_kbd *opencores_kbd = platform_get_drvdata(pdev);
@@ -158,7 +158,7 @@ static int __devexit opencores_kbd_remove(struct platform_device *pdev)
static struct platform_driver opencores_kbd_device_driver = {
.probe = opencores_kbd_probe,
- .remove = __devexit_p(opencores_kbd_remove),
+ .remove = opencores_kbd_remove,
.driver = {
.name = "opencores-kbd",
},
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 52c34657d301..74339e139d43 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -397,7 +397,7 @@ static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
+static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
{
int bits, rc, cycles;
u8 scan_val = 0, ctrl_val = 0;
@@ -447,7 +447,7 @@ static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
}
-static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
+static int pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
{
int rc, i;
@@ -518,7 +518,7 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
* - set irq edge type.
* - enable the keypad controller.
*/
-static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
+static int pmic8xxx_kp_probe(struct platform_device *pdev)
{
const struct pm8xxx_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -712,7 +712,7 @@ err_alloc_device:
return rc;
}
-static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
+static int pmic8xxx_kp_remove(struct platform_device *pdev)
{
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
@@ -773,7 +773,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
- .remove = __devexit_p(pmic8xxx_kp_remove),
+ .remove = pmic8xxx_kp_remove,
.driver = {
.name = PM8XXX_KEYPAD_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index cad9d5dd5973..5330d8fbf6c0 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -482,7 +482,7 @@ static const struct dev_pm_ops pxa27x_keypad_pm_ops = {
};
#endif
-static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
+static int pxa27x_keypad_probe(struct platform_device *pdev)
{
struct pxa27x_keypad_platform_data *pdata = pdev->dev.platform_data;
struct pxa27x_keypad *keypad;
@@ -595,7 +595,7 @@ failed_free:
return error;
}
-static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
+static int pxa27x_keypad_remove(struct platform_device *pdev)
{
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -620,7 +620,7 @@ MODULE_ALIAS("platform:pxa27x-keypad");
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = __devexit_p(pxa27x_keypad_remove),
+ .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 41488f9add20..bcad95be73aa 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -82,7 +82,7 @@ static void pxa930_rotary_close(struct input_dev *dev)
clear_sbcr(r);
}
-static int __devinit pxa930_rotary_probe(struct platform_device *pdev)
+static int pxa930_rotary_probe(struct platform_device *pdev)
{
struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
struct pxa930_rotary *r;
@@ -174,7 +174,7 @@ failed_free:
return err;
}
-static int __devexit pxa930_rotary_remove(struct platform_device *pdev)
+static int pxa930_rotary_remove(struct platform_device *pdev)
{
struct pxa930_rotary *r = platform_get_drvdata(pdev);
@@ -193,7 +193,7 @@ static struct platform_driver pxa930_rotary_driver = {
.owner = THIS_MODULE,
},
.probe = pxa930_rotary_probe,
- .remove = __devexit_p(pxa930_rotary_remove),
+ .remove = pxa930_rotary_remove,
};
module_platform_driver(pxa930_rotary_driver);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca68f2992d72..42b773b3125a 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -91,7 +91,7 @@ static int qt1070_write(struct i2c_client *client, u8 reg, u8 data)
return ret;
}
-static bool __devinit qt1070_identify(struct i2c_client *client)
+static bool qt1070_identify(struct i2c_client *client)
{
int id, ver;
@@ -140,7 +140,7 @@ static irqreturn_t qt1070_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit qt1070_probe(struct i2c_client *client,
+static int qt1070_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct qt1070_data *data;
@@ -230,7 +230,7 @@ err_free_mem:
return err;
}
-static int __devexit qt1070_remove(struct i2c_client *client)
+static int qt1070_remove(struct i2c_client *client)
{
struct qt1070_data *data = i2c_get_clientdata(client);
@@ -256,7 +256,7 @@ static struct i2c_driver qt1070_driver = {
},
.id_table = qt1070_id,
.probe = qt1070_probe,
- .remove = __devexit_p(qt1070_remove),
+ .remove = qt1070_remove,
};
module_i2c_driver(qt1070_driver);
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 76b7d430d03a..1c0ddad0a1cc 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -39,6 +40,11 @@
#define QT2160_CMD_GPIOS 6
#define QT2160_CMD_SUBVER 7
#define QT2160_CMD_CALIBRATE 10
+#define QT2160_CMD_DRIVE_X 70
+#define QT2160_CMD_PWMEN_X 74
+#define QT2160_CMD_PWM_DUTY 76
+
+#define QT2160_NUM_LEDS_X 8
#define QT2160_CYCLE_INTERVAL (2*HZ)
@@ -49,6 +55,17 @@ static unsigned char qt2160_key2code[] = {
KEY_C, KEY_D, KEY_E, KEY_F,
};
+#ifdef CONFIG_LEDS_CLASS
+struct qt2160_led {
+ struct qt2160_data *qt2160;
+ struct led_classdev cdev;
+ struct work_struct work;
+ char name[32];
+ int id;
+ enum led_brightness new_brightness;
+};
+#endif
+
struct qt2160_data {
struct i2c_client *client;
struct input_dev *input;
@@ -56,8 +73,61 @@ struct qt2160_data {
spinlock_t lock; /* Protects canceling/rescheduling of dwork */
unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
u16 key_matrix;
+#ifdef CONFIG_LEDS_CLASS
+ struct qt2160_led leds[QT2160_NUM_LEDS_X];
+ struct mutex led_lock;
+#endif
};
+static int qt2160_read(struct i2c_client *client, u8 reg);
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
+
+#ifdef CONFIG_LEDS_CLASS
+
+static void qt2160_led_work(struct work_struct *work)
+{
+ struct qt2160_led *led = container_of(work, struct qt2160_led, work);
+ struct qt2160_data *qt2160 = led->qt2160;
+ struct i2c_client *client = qt2160->client;
+ int value = led->new_brightness;
+ u32 drive, pwmen;
+
+ mutex_lock(&qt2160->led_lock);
+
+ drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
+ pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
+ if (value != LED_OFF) {
+ drive |= (1 << led->id);
+ pwmen |= (1 << led->id);
+
+ } else {
+ drive &= ~(1 << led->id);
+ pwmen &= ~(1 << led->id);
+ }
+ qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
+ qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
+
+ /*
+ * Changing this register will change the brightness
+ * of every LED in the qt2160. It's a HW limitation.
+ */
+ if (value != LED_OFF)
+ qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
+
+ mutex_unlock(&qt2160->led_lock);
+}
+
+static void qt2160_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+#endif /* CONFIG_LEDS_CLASS */
+
static int qt2160_read_block(struct i2c_client *client,
u8 inireg, u8 *buffer, unsigned int count)
{
@@ -183,7 +253,7 @@ static void qt2160_worker(struct work_struct *work)
qt2160_schedule_read(qt2160);
}
-static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
+static int qt2160_read(struct i2c_client *client, u8 reg)
{
int ret;
@@ -204,29 +274,77 @@ static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
return ret;
}
-static int __devinit qt2160_write(struct i2c_client *client, u8 reg, u8 data)
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
{
- int error;
+ int ret;
- error = i2c_smbus_write_byte(client, reg);
- if (error) {
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
dev_err(&client->dev,
- "couldn't send request. Returned %d\n", error);
- return error;
+ "couldn't write data. Returned %d\n", ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_LEDS_CLASS
+
+static int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+ struct i2c_client *client = qt2160->client;
+ int ret;
+ int i;
+
+ mutex_init(&qt2160->led_lock);
+
+ for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+ struct qt2160_led *led = &qt2160->leds[i];
+
+ snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
+ led->cdev.name = led->name;
+ led->cdev.brightness_set = qt2160_led_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = i;
+ led->qt2160 = qt2160;
+
+ INIT_WORK(&led->work, qt2160_led_work);
+
+ ret = led_classdev_register(&client->dev, &led->cdev);
+ if (ret < 0)
+ return ret;
}
- error = i2c_smbus_write_byte(client, data);
- if (error) {
- dev_err(&client->dev,
- "couldn't write data. Returned %d\n", error);
- return error;
+ /* Tur off LEDs */
+ qt2160_write(client, QT2160_CMD_DRIVE_X, 0);
+ qt2160_write(client, QT2160_CMD_PWMEN_X, 0);
+ qt2160_write(client, QT2160_CMD_PWM_DUTY, 0);
+
+ return 0;
+}
+
+static void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
+ int i;
+
+ for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+ led_classdev_unregister(&qt2160->leds[i].cdev);
+ cancel_work_sync(&qt2160->leds[i].work);
}
+}
- return error;
+#else
+
+static inline int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+ return 0;
+}
+
+static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
}
+#endif
-static bool __devinit qt2160_identify(struct i2c_client *client)
+static bool qt2160_identify(struct i2c_client *client)
{
int id, ver, rev;
@@ -257,8 +375,8 @@ static bool __devinit qt2160_identify(struct i2c_client *client)
return true;
}
-static int __devinit qt2160_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int qt2160_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct qt2160_data *qt2160;
struct input_dev *input;
@@ -323,11 +441,17 @@ static int __devinit qt2160_probe(struct i2c_client *client,
}
}
+ error = qt2160_register_leds(qt2160);
+ if (error) {
+ dev_err(&client->dev, "Failed to register leds\n");
+ goto err_free_irq;
+ }
+
error = input_register_device(qt2160->input);
if (error) {
dev_err(&client->dev,
"Failed to register input device\n");
- goto err_free_irq;
+ goto err_unregister_leds;
}
i2c_set_clientdata(client, qt2160);
@@ -335,6 +459,8 @@ static int __devinit qt2160_probe(struct i2c_client *client,
return 0;
+err_unregister_leds:
+ qt2160_unregister_leds(qt2160);
err_free_irq:
if (client->irq)
free_irq(client->irq, qt2160);
@@ -344,10 +470,12 @@ err_free_mem:
return error;
}
-static int __devexit qt2160_remove(struct i2c_client *client)
+static int qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
+ qt2160_unregister_leds(qt2160);
+
/* Release IRQ so no queue will be scheduled */
if (client->irq)
free_irq(client->irq, qt2160);
@@ -375,7 +503,7 @@ static struct i2c_driver qt2160_driver = {
.id_table = qt2160_idtable,
.probe = qt2160_probe,
- .remove = __devexit_p(qt2160_remove),
+ .remove = qt2160_remove,
};
module_i2c_driver(qt2160_driver);
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 9d7a111486f7..22e357b51024 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -309,7 +309,7 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
struct samsung_keypad *keypad)
{
struct device_node *np = dev->of_node;
- int gpio, ret, row, col;
+ int gpio, error, row, col;
for (row = 0; row < keypad->rows; row++) {
gpio = of_get_named_gpio(np, "row-gpios", row);
@@ -320,10 +320,11 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-row");
- if (ret)
- dev_err(dev, "keypad row[%d] gpio request failed\n",
- row);
+ error = devm_gpio_request(dev, gpio, "keypad-row");
+ if (error)
+ dev_err(dev,
+ "keypad row[%d] gpio request failed: %d\n",
+ row, error);
}
for (col = 0; col < keypad->cols; col++) {
@@ -335,38 +336,22 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-col");
- if (ret)
- dev_err(dev, "keypad column[%d] gpio request failed\n",
- col);
+ error = devm_gpio_request(dev, gpio, "keypad-col");
+ if (error)
+ dev_err(dev,
+ "keypad column[%d] gpio request failed: %d\n",
+ col, error);
}
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
- int cnt;
-
- for (cnt = 0; cnt < keypad->rows; cnt++)
- if (gpio_is_valid(keypad->row_gpios[cnt]))
- gpio_free(keypad->row_gpios[cnt]);
-
- for (cnt = 0; cnt < keypad->cols; cnt++)
- if (gpio_is_valid(keypad->col_gpios[cnt]))
- gpio_free(keypad->col_gpios[cnt]);
-}
#else
static
struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
{
return NULL;
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
-}
#endif
-static int __devinit samsung_keypad_probe(struct platform_device *pdev)
+static int samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -405,36 +390,30 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
row_shift = get_count_order(pdata->cols);
keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(*keypad) + keymap_size, GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size,
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!keypad || !input_dev)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- error = -ENODEV;
- goto err_free_mem;
- }
+ if (!res)
+ return -ENODEV;
- keypad->base = ioremap(res->start, resource_size(res));
- if (!keypad->base) {
- error = -EBUSY;
- goto err_free_mem;
- }
+ keypad->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!keypad->base)
+ return -EBUSY;
- keypad->clk = clk_get(&pdev->dev, "keypad");
+ keypad->clk = devm_clk_get(&pdev->dev, "keypad");
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_unmap_base;
+ return PTR_ERR(keypad->clk);
}
error = clk_prepare(keypad->clk);
if (error) {
dev_err(&pdev->dev, "keypad clock prepare failed\n");
- goto err_put_clk;
+ return error;
}
keypad->input_dev = input_dev;
@@ -479,14 +458,15 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
error = keypad->irq;
- goto err_put_clk;
+ goto err_unprepare_clk;
}
- error = request_threaded_irq(keypad->irq, NULL, samsung_keypad_irq,
- IRQF_ONESHOT, dev_name(&pdev->dev), keypad);
+ error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL,
+ samsung_keypad_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), keypad);
if (error) {
dev_err(&pdev->dev, "failed to register keypad interrupt\n");
- goto err_put_clk;
+ goto err_unprepare_clk;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
@@ -495,7 +475,7 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
error = input_register_device(keypad->input_dev);
if (error)
- goto err_free_irq;
+ goto err_disable_runtime_pm;
if (pdev->dev.of_node) {
devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
@@ -504,26 +484,16 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
return 0;
-err_free_irq:
- free_irq(keypad->irq, keypad);
+err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
err_unprepare_clk:
clk_unprepare(keypad->clk);
-err_put_clk:
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-err_unmap_base:
- iounmap(keypad->base);
-err_free_mem:
- input_free_device(input_dev);
- kfree(keypad);
-
return error;
}
-static int __devexit samsung_keypad_remove(struct platform_device *pdev)
+static int samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
@@ -533,18 +503,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
- /*
- * It is safe to free IRQ after unregistering device because
- * samsung_keypad_close will shut off interrupts.
- */
- free_irq(keypad->irq, keypad);
-
clk_unprepare(keypad->clk);
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-
- iounmap(keypad->base);
- kfree(keypad);
return 0;
}
@@ -685,7 +644,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = __devexit_p(samsung_keypad_remove),
+ .remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index da54ad5db154..fdb9eb2df380 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -162,7 +162,7 @@ static irqreturn_t sh_keysc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit sh_keysc_probe(struct platform_device *pdev)
+static int sh_keysc_probe(struct platform_device *pdev)
{
struct sh_keysc_priv *priv;
struct sh_keysc_info *pdata;
@@ -272,7 +272,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sh_keysc_remove(struct platform_device *pdev)
+static int sh_keysc_remove(struct platform_device *pdev)
{
struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
@@ -331,7 +331,7 @@ static SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
static struct platform_driver sh_keysc_device_driver = {
.probe = sh_keysc_probe,
- .remove = __devexit_p(sh_keysc_remove),
+ .remove = sh_keysc_remove,
.driver = {
.name = "sh_keysc",
.pm = &sh_keysc_dev_pm_ops,
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index c7ca97f44bfb..cb1e8f614631 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -55,15 +55,15 @@
struct spear_kbd {
struct input_dev *input;
- struct resource *res;
void __iomem *io_base;
struct clk *clk;
unsigned int irq;
unsigned int mode;
+ unsigned int suspended_rate;
unsigned short last_key;
unsigned short keycodes[NUM_ROWS * NUM_COLS];
bool rep;
- unsigned int suspended_rate;
+ bool irq_wake_enabled;
u32 mode_ctl_reg;
};
@@ -146,7 +146,7 @@ static void spear_kbd_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit spear_kbd_parse_dt(struct platform_device *pdev,
+static int spear_kbd_parse_dt(struct platform_device *pdev,
struct spear_kbd *kbd)
{
struct device_node *np = pdev->dev.of_node;
@@ -181,7 +181,7 @@ static inline int spear_kbd_parse_dt(struct platform_device *pdev,
}
#endif
-static int __devinit spear_kbd_probe(struct platform_device *pdev)
+static int spear_kbd_probe(struct platform_device *pdev)
{
struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
@@ -203,12 +203,16 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
return irq;
}
- kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!kbd || !input_dev) {
- dev_err(&pdev->dev, "out of memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ kbd = devm_kzalloc(&pdev->dev, sizeof(*kbd), GFP_KERNEL);
+ if (!kbd) {
+ dev_err(&pdev->dev, "not enough memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "unable to allocate input device\n");
+ return -ENOMEM;
}
kbd->input = input_dev;
@@ -217,37 +221,23 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
if (!pdata) {
error = spear_kbd_parse_dt(pdev, kbd);
if (error)
- goto err_free_mem;
+ return error;
} else {
kbd->mode = pdata->mode;
kbd->rep = pdata->rep;
kbd->suspended_rate = pdata->suspended_rate;
}
- kbd->res = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kbd->res) {
- dev_err(&pdev->dev, "keyboard region already claimed\n");
- error = -EBUSY;
- goto err_free_mem;
- }
-
- kbd->io_base = ioremap(res->start, resource_size(res));
- if (!kbd->io_base) {
- dev_err(&pdev->dev, "ioremap failed for kbd_region\n");
- error = -ENOMEM;
- goto err_release_mem_region;
- }
+ kbd->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kbd->io_base))
+ return PTR_ERR(kbd->io_base);
- kbd->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(kbd->clk)) {
- error = PTR_ERR(kbd->clk);
- goto err_iounmap;
- }
+ kbd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kbd->clk))
+ return PTR_ERR(kbd->clk);
input_dev->name = "Spear Keyboard";
input_dev->phys = "keyboard/input0";
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
@@ -259,7 +249,7 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
kbd->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_put_clk;
+ return error;
}
if (kbd->rep)
@@ -268,48 +258,36 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, kbd);
- error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
+ error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
+ "keyboard", kbd);
if (error) {
- dev_err(&pdev->dev, "request_irq fail\n");
- goto err_put_clk;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return error;
}
+ error = clk_prepare(kbd->clk);
+ if (error)
+ return error;
+
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "Unable to register keyboard device\n");
- goto err_free_irq;
+ clk_unprepare(kbd->clk);
+ return error;
}
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, kbd);
return 0;
-
-err_free_irq:
- free_irq(kbd->irq, kbd);
-err_put_clk:
- clk_put(kbd->clk);
-err_iounmap:
- iounmap(kbd->io_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(kbd);
-
- return error;
}
-static int __devexit spear_kbd_remove(struct platform_device *pdev)
+static int spear_kbd_remove(struct platform_device *pdev)
{
struct spear_kbd *kbd = platform_get_drvdata(pdev);
- free_irq(kbd->irq, kbd);
input_unregister_device(kbd->input);
- clk_put(kbd->clk);
- iounmap(kbd->io_base);
- release_mem_region(kbd->res->start, resource_size(kbd->res));
- kfree(kbd);
+ clk_unprepare(kbd->clk);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -333,7 +311,8 @@ static int spear_kbd_suspend(struct device *dev)
mode_ctl_reg = readl_relaxed(kbd->io_base + MODE_CTL_REG);
if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(kbd->irq);
+ if (!enable_irq_wake(kbd->irq))
+ kbd->irq_wake_enabled = true;
/*
* reprogram the keyboard operating frequency as on some
@@ -379,7 +358,10 @@ static int spear_kbd_resume(struct device *dev)
mutex_lock(&input_dev->mutex);
if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(kbd->irq);
+ if (kbd->irq_wake_enabled) {
+ kbd->irq_wake_enabled = false;
+ disable_irq_wake(kbd->irq);
+ }
} else {
if (input_dev->users)
clk_enable(kbd->clk);
@@ -407,7 +389,7 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove = __devexit_p(spear_kbd_remove),
+ .remove = spear_kbd_remove,
.driver = {
.name = "keyboard",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 470a8778dec1..5cbec56f7720 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -166,7 +166,7 @@ static irqreturn_t stmpe_keypad_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
unsigned int col_gpios = variant->col_gpios;
@@ -207,7 +207,7 @@ static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
}
-static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -257,105 +257,131 @@ static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
(plat->debounce_ms << 1));
}
-static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
{
- struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ int row, col;
+
+ for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
+ for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ int code = MATRIX_SCAN_CODE(row, col,
+ STMPE_KEYPAD_ROW_SHIFT);
+ if (keypad->keymap[code] != KEY_RESERVED) {
+ keypad->rows |= 1 << row;
+ keypad->cols |= 1 << col;
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_OF
+static const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
struct stmpe_keypad_platform_data *plat;
+
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &plat->scan_count);
+
+ plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
+ return plat;
+}
+#else
+static inline const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int stmpe_keypad_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ const struct stmpe_keypad_platform_data *plat;
struct stmpe_keypad *keypad;
struct input_dev *input;
- int ret;
+ int error;
int irq;
- int i;
plat = stmpe->pdata->keypad;
- if (!plat)
- return -ENODEV;
+ if (!plat) {
+ plat = stmpe_keypad_of_probe(&pdev->dev);
+ if (IS_ERR(plat))
+ return PTR_ERR(plat);
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- keypad = kzalloc(sizeof(struct stmpe_keypad), GFP_KERNEL);
+ keypad = devm_kzalloc(&pdev->dev, sizeof(struct stmpe_keypad),
+ GFP_KERNEL);
if (!keypad)
return -ENOMEM;
- input = input_allocate_device();
- if (!input) {
- ret = -ENOMEM;
- goto out_freekeypad;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
input->name = "STMPE keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- ret = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
- keypad->keymap, input);
- if (ret)
- goto out_freeinput;
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ STMPE_KEYPAD_MAX_ROWS,
+ STMPE_KEYPAD_MAX_COLS,
+ keypad->keymap, input);
+ if (error)
+ return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- for (i = 0; i < plat->keymap_data->keymap_size; i++) {
- unsigned int key = plat->keymap_data->keymap[i];
-
- keypad->cols |= 1 << KEY_COL(key);
- keypad->rows |= 1 << KEY_ROW(key);
- }
+ stmpe_keypad_fill_used_pins(keypad);
keypad->stmpe = stmpe;
keypad->plat = plat;
keypad->input = input;
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
- ret = stmpe_keypad_chip_init(keypad);
- if (ret < 0)
- goto out_freeinput;
+ error = stmpe_keypad_chip_init(keypad);
+ if (error < 0)
+ return error;
- ret = input_register_device(input);
- if (ret) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", ret);
- goto out_freeinput;
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, stmpe_keypad_irq,
+ IRQF_ONESHOT, "stmpe-keypad", keypad);
+ if (error) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", error);
+ return error;
}
- ret = request_threaded_irq(irq, NULL, stmpe_keypad_irq, IRQF_ONESHOT,
- "stmpe-keypad", keypad);
- if (ret) {
- dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_unregisterinput;
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", error);
+ return error;
}
platform_set_drvdata(pdev, keypad);
return 0;
-
-out_unregisterinput:
- input_unregister_device(input);
- input = NULL;
-out_freeinput:
- input_free_device(input);
-out_freekeypad:
- kfree(keypad);
- return ret;
}
-static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
+static int stmpe_keypad_remove(struct platform_device *pdev)
{
struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
- struct stmpe *stmpe = keypad->stmpe;
- int irq = platform_get_irq(pdev, 0);
-
- stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
- free_irq(irq, keypad);
- input_unregister_device(keypad->input);
- platform_set_drvdata(pdev, NULL);
- kfree(keypad);
+ stmpe_disable(keypad->stmpe, STMPE_BLOCK_KEYPAD);
return 0;
}
@@ -364,7 +390,7 @@ static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
.probe = stmpe_keypad_probe,
- .remove = __devexit_p(stmpe_keypad_remove),
+ .remove = stmpe_keypad_remove,
};
module_platform_driver(stmpe_keypad_driver);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 7d498e698508..2fb0d76a04c4 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -299,7 +299,7 @@ static void tc3589x_keypad_close(struct input_dev *input)
tc3589x_keypad_disable(keypad);
}
-static int __devinit tc3589x_keypad_probe(struct platform_device *pdev)
+static int tc3589x_keypad_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
struct tc_keypad *keypad;
@@ -382,7 +382,7 @@ err_free_mem:
return error;
}
-static int __devexit tc3589x_keypad_remove(struct platform_device *pdev)
+static int tc3589x_keypad_remove(struct platform_device *pdev)
{
struct tc_keypad *keypad = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -448,7 +448,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.pm = &tc3589x_keypad_dev_pm_ops,
},
.probe = tc3589x_keypad_probe,
- .remove = __devexit_p(tc3589x_keypad_remove),
+ .remove = tc3589x_keypad_remove,
};
module_platform_driver(tc3589x_keypad_driver);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index c355cdde8d22..bfc832c35a7c 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -166,7 +166,7 @@ static void tca6416_keys_close(struct input_dev *dev)
disable_irq(chip->irqnum);
}
-static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
{
int error;
@@ -197,7 +197,7 @@ static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
return 0;
}
-static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+static int tca6416_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tca6416_keys_platform_data *pdata;
@@ -313,7 +313,7 @@ fail1:
return error;
}
-static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+static int tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
@@ -361,7 +361,7 @@ static struct i2c_driver tca6416_keypad_driver = {
.pm = &tca6416_keypad_dev_pm_ops,
},
.probe = tca6416_keypad_probe,
- .remove = __devexit_p(tca6416_keypad_remove),
+ .remove = tca6416_keypad_remove,
.id_table = tca6416_id,
};
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 893869b29ed9..a34cc6714e5b 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -35,6 +35,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/tca8418_keypad.h>
+#include <linux/of.h>
/* TCA8418 hardware limits */
#define TCA8418_MAX_ROWS 8
@@ -109,25 +110,11 @@
#define KEY_EVENT_CODE 0x7f
#define KEY_EVENT_VALUE 0x80
-
-static const struct i2c_device_id tca8418_id[] = {
- { TCA8418_NAME, 8418, },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tca8418_id);
-
struct tca8418_keypad {
- unsigned int rows;
- unsigned int cols;
- unsigned int keypad_mask; /* Mask for keypad col/rol regs */
- unsigned int irq;
- unsigned int row_shift;
-
struct i2c_client *client;
struct input_dev *input;
- /* Flexible array member, must be at end of struct */
- unsigned short keymap[];
+ unsigned int row_shift;
};
/*
@@ -172,6 +159,8 @@ static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
{
+ struct input_dev *input = keypad_data->input;
+ unsigned short *keymap = input->keycode;
int error, col, row;
u8 reg, state, code;
@@ -190,9 +179,8 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
- input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
- input_report_key(keypad_data->input,
- keypad_data->keymap[code], state);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keymap[code], state);
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
@@ -202,7 +190,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
dev_err(&keypad_data->client->dev,
"unable to read REG_KEY_EVENT_A\n");
- input_sync(keypad_data->input);
+ input_sync(input);
}
/*
@@ -218,16 +206,18 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
if (error) {
dev_err(&keypad_data->client->dev,
"unable to read REG_INT_STAT\n");
- goto exit;
+ return IRQ_NONE;
}
+ if (!reg)
+ return IRQ_NONE;
+
if (reg & INT_STAT_OVR_FLOW_INT)
dev_warn(&keypad_data->client->dev, "overflow occurred\n");
if (reg & INT_STAT_K_INT)
tca8418_read_keypad(keypad_data);
-exit:
/* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
reg = 0xff;
error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
@@ -241,7 +231,8 @@ exit:
/*
* Configure the TCA8418 for keypad operation
*/
-static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+static int tca8418_configure(struct tca8418_keypad *keypad_data,
+ u32 rows, u32 cols)
{
int reg, error;
@@ -253,9 +244,8 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
/* Assemble a mask for row and column registers */
- reg = ~(~0 << keypad_data->rows);
- reg += (~(~0 << keypad_data->cols)) << 8;
- keypad_data->keypad_mask = reg;
+ reg = ~(~0 << rows);
+ reg += (~(~0 << cols)) << 8;
/* Set registers to keypad mode */
error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
@@ -270,145 +260,144 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
return error;
}
-static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+static int tca8418_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
const struct tca8418_keypad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(dev);
struct tca8418_keypad *keypad_data;
struct input_dev *input;
+ const struct matrix_keymap_data *keymap_data = NULL;
+ u32 rows = 0, cols = 0;
+ bool rep = false;
+ bool irq_is_gpio = false;
+ int irq;
int error, row_shift, max_keys;
/* Copy the platform data */
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
- }
-
- if (!pdata->keymap_data) {
- dev_err(&client->dev, "no keymap data defined\n");
- return -EINVAL;
+ if (pdata) {
+ if (!pdata->keymap_data) {
+ dev_err(dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+ keymap_data = pdata->keymap_data;
+ rows = pdata->rows;
+ cols = pdata->cols;
+ rep = pdata->rep;
+ irq_is_gpio = pdata->irq_is_gpio;
+ } else {
+ struct device_node *np = dev->of_node;
+ of_property_read_u32(np, "keypad,num-rows", &rows);
+ of_property_read_u32(np, "keypad,num-columns", &cols);
+ rep = of_property_read_bool(np, "keypad,autorepeat");
}
- if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
- dev_err(&client->dev, "invalid rows\n");
+ if (!rows || rows > TCA8418_MAX_ROWS) {
+ dev_err(dev, "invalid rows\n");
return -EINVAL;
}
- if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
- dev_err(&client->dev, "invalid columns\n");
+ if (!cols || cols > TCA8418_MAX_COLS) {
+ dev_err(dev, "invalid columns\n");
return -EINVAL;
}
/* Check i2c driver capabilities */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(&client->dev, "%s adapter not supported\n",
+ dev_err(dev, "%s adapter not supported\n",
dev_driver_string(&client->adapter->dev));
return -ENODEV;
}
- row_shift = get_count_order(pdata->cols);
- max_keys = pdata->rows << row_shift;
+ row_shift = get_count_order(cols);
+ max_keys = rows << row_shift;
- /* Allocate memory for keypad_data, keymap and input device */
- keypad_data = kzalloc(sizeof(*keypad_data) +
- max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ /* Allocate memory for keypad_data and input device */
+ keypad_data = devm_kzalloc(dev, sizeof(*keypad_data), GFP_KERNEL);
if (!keypad_data)
return -ENOMEM;
- keypad_data->rows = pdata->rows;
- keypad_data->cols = pdata->cols;
keypad_data->client = client;
keypad_data->row_shift = row_shift;
/* Initialize the chip or fail if chip isn't present */
- error = tca8418_configure(keypad_data);
+ error = tca8418_configure(keypad_data, rows, cols);
if (error < 0)
- goto fail1;
+ return error;
/* Configure input device */
- input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto fail1;
- }
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
keypad_data->input = input;
input->name = client->name;
- input->dev.parent = &client->dev;
-
input->id.bustype = BUS_I2C;
input->id.vendor = 0x0001;
input->id.product = 0x001;
input->id.version = 0x0001;
- error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
- pdata->rows, pdata->cols,
- keypad_data->keymap, input);
+ error = matrix_keypad_build_keymap(keymap_data, NULL, rows, cols,
+ NULL, input);
if (error) {
- dev_dbg(&client->dev, "Failed to build keymap\n");
- goto fail2;
+ dev_err(dev, "Failed to build keymap\n");
+ return error;
}
- if (pdata->rep)
+ if (rep)
__set_bit(EV_REP, input->evbit);
input_set_capability(input, EV_MSC, MSC_SCAN);
input_set_drvdata(input, keypad_data);
- if (pdata->irq_is_gpio)
- client->irq = gpio_to_irq(client->irq);
+ irq = client->irq;
+ if (irq_is_gpio)
+ irq = gpio_to_irq(irq);
- error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, keypad_data);
+ error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_SHARED |
+ IRQF_ONESHOT,
+ client->name, keypad_data);
if (error) {
- dev_dbg(&client->dev,
- "Unable to claim irq %d; error %d\n",
+ dev_err(dev, "Unable to claim irq %d; error %d\n",
client->irq, error);
- goto fail2;
+ return error;
}
error = input_register_device(input);
if (error) {
- dev_dbg(&client->dev,
- "Unable to register input device, error: %d\n", error);
- goto fail3;
+ dev_err(dev, "Unable to register input device, error: %d\n",
+ error);
+ return error;
}
- i2c_set_clientdata(client, keypad_data);
return 0;
-
-fail3:
- free_irq(client->irq, keypad_data);
-fail2:
- input_free_device(input);
-fail1:
- kfree(keypad_data);
- return error;
}
-static int __devexit tca8418_keypad_remove(struct i2c_client *client)
-{
- struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
-
- free_irq(keypad_data->client->irq, keypad_data);
-
- input_unregister_device(keypad_data->input);
-
- kfree(keypad_data);
-
- return 0;
-}
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+#ifdef CONFIG_OF
+static const struct of_device_id tca8418_dt_ids[] = {
+ { .compatible = "ti,tca8418", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+#endif
static struct i2c_driver tca8418_keypad_driver = {
.driver = {
.name = TCA8418_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tca8418_dt_ids),
},
.probe = tca8418_keypad_probe,
- .remove = __devexit_p(tca8418_keypad_remove),
.id_table = tca8418_id,
};
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 5faaf2553e33..0e138ebcc768 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -29,8 +29,15 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/slab.h>
-#include <linux/input/tegra_kbc.h>
-#include <mach/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/clk/tegra.h>
+
+#define KBC_MAX_GPIO 24
+#define KBC_MAX_KPENT 8
+
+#define KBC_MAX_ROW 16
+#define KBC_MAX_COL 8
+#define KBC_MAX_KEY (KBC_MAX_ROW * KBC_MAX_COL)
#define KBC_MAX_DEBOUNCE_CNT 0x3ffu
@@ -67,10 +74,27 @@
#define KBC_ROW_SHIFT 3
+enum tegra_pin_type {
+ PIN_CFG_IGNORE,
+ PIN_CFG_COL,
+ PIN_CFG_ROW,
+};
+
+struct tegra_kbc_pin_cfg {
+ enum tegra_pin_type type;
+ unsigned char num;
+};
+
struct tegra_kbc {
+ struct device *dev;
+ unsigned int debounce_cnt;
+ unsigned int repeat_cnt;
+ struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+ const struct matrix_keymap_data *keymap_data;
+ bool wakeup;
void __iomem *mmio;
struct input_dev *idev;
- unsigned int irq;
+ int irq;
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
@@ -78,7 +102,6 @@ struct tegra_kbc {
bool use_fn_map;
bool use_ghost_filter;
bool keypress_caused_wake;
- const struct tegra_kbc_platform_data *pdata;
unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
@@ -87,147 +110,6 @@ struct tegra_kbc {
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] __devinitdata = {
- KEY(0, 2, KEY_W),
- KEY(0, 3, KEY_S),
- KEY(0, 4, KEY_A),
- KEY(0, 5, KEY_Z),
- KEY(0, 7, KEY_FN),
-
- KEY(1, 7, KEY_LEFTMETA),
-
- KEY(2, 6, KEY_RIGHTALT),
- KEY(2, 7, KEY_LEFTALT),
-
- KEY(3, 0, KEY_5),
- KEY(3, 1, KEY_4),
- KEY(3, 2, KEY_R),
- KEY(3, 3, KEY_E),
- KEY(3, 4, KEY_F),
- KEY(3, 5, KEY_D),
- KEY(3, 6, KEY_X),
-
- KEY(4, 0, KEY_7),
- KEY(4, 1, KEY_6),
- KEY(4, 2, KEY_T),
- KEY(4, 3, KEY_H),
- KEY(4, 4, KEY_G),
- KEY(4, 5, KEY_V),
- KEY(4, 6, KEY_C),
- KEY(4, 7, KEY_SPACE),
-
- KEY(5, 0, KEY_9),
- KEY(5, 1, KEY_8),
- KEY(5, 2, KEY_U),
- KEY(5, 3, KEY_Y),
- KEY(5, 4, KEY_J),
- KEY(5, 5, KEY_N),
- KEY(5, 6, KEY_B),
- KEY(5, 7, KEY_BACKSLASH),
-
- KEY(6, 0, KEY_MINUS),
- KEY(6, 1, KEY_0),
- KEY(6, 2, KEY_O),
- KEY(6, 3, KEY_I),
- KEY(6, 4, KEY_L),
- KEY(6, 5, KEY_K),
- KEY(6, 6, KEY_COMMA),
- KEY(6, 7, KEY_M),
-
- KEY(7, 1, KEY_EQUAL),
- KEY(7, 2, KEY_RIGHTBRACE),
- KEY(7, 3, KEY_ENTER),
- KEY(7, 7, KEY_MENU),
-
- KEY(8, 4, KEY_RIGHTSHIFT),
- KEY(8, 5, KEY_LEFTSHIFT),
-
- KEY(9, 5, KEY_RIGHTCTRL),
- KEY(9, 7, KEY_LEFTCTRL),
-
- KEY(11, 0, KEY_LEFTBRACE),
- KEY(11, 1, KEY_P),
- KEY(11, 2, KEY_APOSTROPHE),
- KEY(11, 3, KEY_SEMICOLON),
- KEY(11, 4, KEY_SLASH),
- KEY(11, 5, KEY_DOT),
-
- KEY(12, 0, KEY_F10),
- KEY(12, 1, KEY_F9),
- KEY(12, 2, KEY_BACKSPACE),
- KEY(12, 3, KEY_3),
- KEY(12, 4, KEY_2),
- KEY(12, 5, KEY_UP),
- KEY(12, 6, KEY_PRINT),
- KEY(12, 7, KEY_PAUSE),
-
- KEY(13, 0, KEY_INSERT),
- KEY(13, 1, KEY_DELETE),
- KEY(13, 3, KEY_PAGEUP),
- KEY(13, 4, KEY_PAGEDOWN),
- KEY(13, 5, KEY_RIGHT),
- KEY(13, 6, KEY_DOWN),
- KEY(13, 7, KEY_LEFT),
-
- KEY(14, 0, KEY_F11),
- KEY(14, 1, KEY_F12),
- KEY(14, 2, KEY_F8),
- KEY(14, 3, KEY_Q),
- KEY(14, 4, KEY_F4),
- KEY(14, 5, KEY_F3),
- KEY(14, 6, KEY_1),
- KEY(14, 7, KEY_F7),
-
- KEY(15, 0, KEY_ESC),
- KEY(15, 1, KEY_GRAVE),
- KEY(15, 2, KEY_F5),
- KEY(15, 3, KEY_TAB),
- KEY(15, 4, KEY_F1),
- KEY(15, 5, KEY_F2),
- KEY(15, 6, KEY_CAPSLOCK),
- KEY(15, 7, KEY_F6),
-
- /* Software Handled Function Keys */
- KEY(20, 0, KEY_KP7),
-
- KEY(21, 0, KEY_KP9),
- KEY(21, 1, KEY_KP8),
- KEY(21, 2, KEY_KP4),
- KEY(21, 4, KEY_KP1),
-
- KEY(22, 1, KEY_KPSLASH),
- KEY(22, 2, KEY_KP6),
- KEY(22, 3, KEY_KP5),
- KEY(22, 4, KEY_KP3),
- KEY(22, 5, KEY_KP2),
- KEY(22, 7, KEY_KP0),
-
- KEY(27, 1, KEY_KPASTERISK),
- KEY(27, 3, KEY_KPMINUS),
- KEY(27, 4, KEY_KPPLUS),
- KEY(27, 5, KEY_KPDOT),
-
- KEY(28, 5, KEY_VOLUMEUP),
-
- KEY(29, 3, KEY_HOME),
- KEY(29, 4, KEY_END),
- KEY(29, 5, KEY_BRIGHTNESSDOWN),
- KEY(29, 6, KEY_VOLUMEDOWN),
- KEY(29, 7, KEY_BRIGHTNESSUP),
-
- KEY(30, 0, KEY_NUMLOCK),
- KEY(30, 1, KEY_SCROLLLOCK),
- KEY(30, 2, KEY_MUTE),
-
- KEY(31, 4, KEY_HELP),
-};
-
-static const
-struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
- .keymap = tegra_kbc_default_keymap,
- .keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
-};
-
static void tegra_kbc_report_released_keys(struct input_dev *input,
unsigned short old_keycodes[],
unsigned int old_num_keys,
@@ -357,18 +239,6 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
writel(val, kbc->mmio + KBC_CONTROL_0);
}
-static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
-{
- u32 val;
-
- val = readl(kbc->mmio + KBC_CONTROL_0);
- if (enable)
- val |= KBC_CONTROL_KEYPRESS_INT_EN;
- else
- val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
- writel(val, kbc->mmio + KBC_CONTROL_0);
-}
-
static void tegra_kbc_keypress_timer(unsigned long data)
{
struct tegra_kbc *kbc = (struct tegra_kbc *)data;
@@ -439,12 +309,11 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
int i;
unsigned int rst_val;
/* Either mask all keys or none. */
- rst_val = (filter && !pdata->wakeup) ? ~0 : 0;
+ rst_val = (filter && !kbc->wakeup) ? ~0 : 0;
for (i = 0; i < KBC_MAX_ROW; i++)
writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
@@ -452,7 +321,6 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
int i;
for (i = 0; i < KBC_MAX_GPIO; i++) {
@@ -468,13 +336,13 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
row_cfg &= ~r_mask;
col_cfg &= ~c_mask;
- switch (pdata->pin_cfg[i].type) {
+ switch (kbc->pin_cfg[i].type) {
case PIN_CFG_ROW:
- row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+ row_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << r_shft;
break;
case PIN_CFG_COL:
- col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+ col_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << c_shft;
break;
case PIN_CFG_IGNORE:
@@ -488,7 +356,6 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
static int tegra_kbc_start(struct tegra_kbc *kbc)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
unsigned int debounce_cnt;
u32 val = 0;
@@ -503,10 +370,10 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
tegra_kbc_config_pins(kbc);
tegra_kbc_setup_wakekeys(kbc, false);
- writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+ writel(kbc->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
/* Keyboard debounce count is maximum of 12 bits. */
- debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */
@@ -573,21 +440,20 @@ static void tegra_kbc_close(struct input_dev *dev)
return tegra_kbc_stop(kbc);
}
-static bool __devinit
-tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
- struct device *dev, unsigned int *num_rows)
+static bool tegra_kbc_check_pin_cfg(const struct tegra_kbc *kbc,
+ unsigned int *num_rows)
{
int i;
*num_rows = 0;
for (i = 0; i < KBC_MAX_GPIO; i++) {
- const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+ const struct tegra_kbc_pin_cfg *pin_cfg = &kbc->pin_cfg[i];
switch (pin_cfg->type) {
case PIN_CFG_ROW:
if (pin_cfg->num >= KBC_MAX_ROW) {
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid row number %d\n",
i, pin_cfg->num);
return false;
@@ -597,7 +463,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
case PIN_CFG_COL:
if (pin_cfg->num >= KBC_MAX_COL) {
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid column number %d\n",
i, pin_cfg->num);
return false;
@@ -608,7 +474,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
break;
default:
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid entry type %d\n",
pin_cfg->type, pin_cfg->num);
return false;
@@ -618,154 +484,140 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
return true;
}
-#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data * __devinit tegra_kbc_dt_parse_pdata(
- struct platform_device *pdev)
+static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
{
- struct tegra_kbc_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = kbc->dev->of_node;
u32 prop;
int i;
-
- if (!np)
- return NULL;
-
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
+ u32 num_rows = 0;
+ u32 num_cols = 0;
+ u32 cols_cfg[KBC_MAX_GPIO];
+ u32 rows_cfg[KBC_MAX_GPIO];
+ int proplen;
+ int ret;
if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
- pdata->debounce_cnt = prop;
+ kbc->debounce_cnt = prop;
if (!of_property_read_u32(np, "nvidia,repeat-delay-ms", &prop))
- pdata->repeat_cnt = prop;
+ kbc->repeat_cnt = prop;
if (of_find_property(np, "nvidia,needs-ghost-filter", NULL))
- pdata->use_ghost_filter = true;
+ kbc->use_ghost_filter = true;
if (of_find_property(np, "nvidia,wakeup-source", NULL))
- pdata->wakeup = true;
+ kbc->wakeup = true;
- /*
- * All currently known keymaps with device tree support use the same
- * pin_cfg, so set it up here.
- */
- for (i = 0; i < KBC_MAX_ROW; i++) {
- pdata->pin_cfg[i].num = i;
- pdata->pin_cfg[i].type = PIN_CFG_ROW;
+ if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
+ dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
+ return -ENOENT;
}
+ num_rows = proplen / sizeof(u32);
- for (i = 0; i < KBC_MAX_COL; i++) {
- pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
- pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL;
+ if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
+ dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
+ return -ENOENT;
}
+ num_cols = proplen / sizeof(u32);
- return pdata;
-}
-#else
-static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
- struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
+ if (!of_get_property(np, "linux,keymap", &proplen)) {
+ dev_err(kbc->dev, "property linux,keymap not found\n");
+ return -ENOENT;
+ }
-static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
-{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
- const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
- unsigned int keymap_rows = KBC_MAX_KEY;
- int retval;
+ if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
+ dev_err(kbc->dev,
+ "keypad rows/columns not porperly specified\n");
+ return -EINVAL;
+ }
- if (keymap_data && pdata->use_fn_map)
- keymap_rows *= 2;
+ /* Set all pins as non-configured */
+ for (i = 0; i < KBC_MAX_GPIO; i++)
+ kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
- retval = matrix_keypad_build_keymap(keymap_data, NULL,
- keymap_rows, KBC_MAX_COL,
- kbc->keycode, kbc->idev);
- if (retval == -ENOSYS || retval == -ENOENT) {
- /*
- * If there is no OF support in kernel or keymap
- * property is missing, use default keymap.
- */
- retval = matrix_keypad_build_keymap(
- &tegra_kbc_default_keymap_data, NULL,
- keymap_rows, KBC_MAX_COL,
- kbc->keycode, kbc->idev);
+ ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
+ rows_cfg, num_rows);
+ if (ret < 0) {
+ dev_err(kbc->dev, "Rows configurations are not proper\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
+ cols_cfg, num_cols);
+ if (ret < 0) {
+ dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_rows; i++) {
+ kbc->pin_cfg[rows_cfg[i]].type = PIN_CFG_ROW;
+ kbc->pin_cfg[rows_cfg[i]].num = i;
}
- return retval;
+ for (i = 0; i < num_cols; i++) {
+ kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
+ kbc->pin_cfg[cols_cfg[i]].num = i;
+ }
+
+ return 0;
}
-static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+static int tegra_kbc_probe(struct platform_device *pdev)
{
- const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
struct tegra_kbc *kbc;
- struct input_dev *input_dev;
struct resource *res;
- int irq;
int err;
int num_rows = 0;
unsigned int debounce_cnt;
unsigned int scan_time_rows;
+ unsigned int keymap_rows = KBC_MAX_KEY;
- if (!pdata)
- pdata = tegra_kbc_dt_parse_pdata(pdev);
+ kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
+ if (!kbc) {
+ dev_err(&pdev->dev, "failed to alloc memory for kbc\n");
+ return -ENOMEM;
+ }
- if (!pdata)
- return -EINVAL;
+ kbc->dev = &pdev->dev;
+ spin_lock_init(&kbc->lock);
- if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
- err = -EINVAL;
- goto err_free_pdata;
- }
+ err = tegra_kbc_parse_dt(kbc);
+ if (err)
+ return err;
+
+ if (!tegra_kbc_check_pin_cfg(kbc, &num_rows))
+ return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- err = -ENXIO;
- goto err_free_pdata;
+ return -ENXIO;
}
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
+ kbc->irq = platform_get_irq(pdev, 0);
+ if (kbc->irq < 0) {
dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
- err = -ENXIO;
- goto err_free_pdata;
+ return -ENXIO;
}
- kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!kbc || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ kbc->idev = devm_input_allocate_device(&pdev->dev);
+ if (!kbc->idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
}
- kbc->pdata = pdata;
- kbc->idev = input_dev;
- kbc->irq = irq;
- spin_lock_init(&kbc->lock);
setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- kbc->mmio = ioremap(res->start, resource_size(res));
+ kbc->mmio = devm_request_and_ioremap(&pdev->dev, res);
if (!kbc->mmio) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- err = -ENXIO;
- goto err_free_mem_region;
+ dev_err(&pdev->dev, "Cannot request memregion/iomap address\n");
+ return -EBUSY;
}
- kbc->clk = clk_get(&pdev->dev, NULL);
+ kbc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kbc->clk)) {
dev_err(&pdev->dev, "failed to get keyboard clock\n");
- err = PTR_ERR(kbc->clk);
- goto err_iounmap;
+ return PTR_ERR(kbc->clk);
}
/*
@@ -774,37 +626,38 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
* the rows. There is an additional delay before the row scanning
* starts. The repoll delay is computed in milliseconds.
*/
- debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
- kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+ kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + kbc->repeat_cnt;
kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
- kbc->wakeup_key = pdata->wakeup_key;
- kbc->use_fn_map = pdata->use_fn_map;
- kbc->use_ghost_filter = pdata->use_ghost_filter;
+ kbc->idev->name = pdev->name;
+ kbc->idev->id.bustype = BUS_HOST;
+ kbc->idev->dev.parent = &pdev->dev;
+ kbc->idev->open = tegra_kbc_open;
+ kbc->idev->close = tegra_kbc_close;
- input_dev->name = pdev->name;
- input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &pdev->dev;
- input_dev->open = tegra_kbc_open;
- input_dev->close = tegra_kbc_close;
+ if (kbc->keymap_data && kbc->use_fn_map)
+ keymap_rows *= 2;
- err = tegra_kbd_setup_keymap(kbc);
+ err = matrix_keypad_build_keymap(kbc->keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
if (err) {
dev_err(&pdev->dev, "failed to setup keymap\n");
- goto err_put_clk;
+ return err;
}
- __set_bit(EV_REP, input_dev->evbit);
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ __set_bit(EV_REP, kbc->idev->evbit);
+ input_set_capability(kbc->idev, EV_MSC, MSC_SCAN);
- input_set_drvdata(input_dev, kbc);
+ input_set_drvdata(kbc->idev, kbc);
- err = request_irq(kbc->irq, tegra_kbc_isr,
+ err = devm_request_irq(&pdev->dev, kbc->irq, tegra_kbc_isr,
IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
- goto err_put_clk;
+ return err;
}
disable_irq(kbc->irq);
@@ -812,60 +665,28 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
err = input_register_device(kbc->idev);
if (err) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ return err;
}
platform_set_drvdata(pdev, kbc);
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ device_init_wakeup(&pdev->dev, kbc->wakeup);
return 0;
-
-err_free_irq:
- free_irq(kbc->irq, pdev);
-err_put_clk:
- clk_put(kbc->clk);
-err_iounmap:
- iounmap(kbc->mmio);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(kbc);
-err_free_pdata:
- if (!pdev->dev.platform_data)
- kfree(pdata);
-
- return err;
}
-static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+#ifdef CONFIG_PM_SLEEP
+static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
{
- struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- struct resource *res;
-
- platform_set_drvdata(pdev, NULL);
-
- free_irq(kbc->irq, pdev);
- clk_put(kbc->clk);
-
- input_unregister_device(kbc->idev);
- iounmap(kbc->mmio);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- /*
- * If we do not have platform data attached to the device we
- * allocated it ourselves and thus need to free it.
- */
- if (!pdev->dev.platform_data)
- kfree(kbc->pdata);
-
- kfree(kbc);
+ u32 val;
- return 0;
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ if (enable)
+ val |= KBC_CONTROL_KEYPRESS_INT_EN;
+ else
+ val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
}
-#ifdef CONFIG_PM_SLEEP
static int tegra_kbc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -954,7 +775,6 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
- .remove = __devexit_p(tegra_kbc_remove),
.driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 4c34f21fbe2d..ee1635011292 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -153,7 +153,7 @@ static void keypad_stop(struct input_dev *dev)
clk_disable(kp->clk);
}
-static int __devinit keypad_probe(struct platform_device *pdev)
+static int keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -301,7 +301,7 @@ error_res:
return error;
}
-static int __devexit keypad_remove(struct platform_device *pdev)
+static int keypad_remove(struct platform_device *pdev)
{
struct keypad_data *kp = platform_get_drvdata(pdev);
@@ -319,7 +319,7 @@ static int __devexit keypad_remove(struct platform_device *pdev)
static struct platform_driver keypad_driver = {
.probe = keypad_probe,
- .remove = __devexit_p(keypad_remove),
+ .remove = keypad_remove,
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a2c6f79aa101..04f84fd57173 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -271,7 +271,7 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
return IRQ_HANDLED;
}
-static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
+static int twl4030_kp_program(struct twl4030_keypad *kp)
{
u8 reg;
int i;
@@ -328,7 +328,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
* Registers keypad device with input subsystem
* and configures TWL4030 keypad registers
*/
-static int __devinit twl4030_kp_probe(struct platform_device *pdev)
+static int twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
const struct matrix_keymap_data *keymap_data;
@@ -432,7 +432,7 @@ err1:
return error;
}
-static int __devexit twl4030_kp_remove(struct platform_device *pdev)
+static int twl4030_kp_remove(struct platform_device *pdev)
{
struct twl4030_keypad *kp = platform_get_drvdata(pdev);
@@ -452,7 +452,7 @@ static int __devexit twl4030_kp_remove(struct platform_device *pdev)
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
- .remove = __devexit_p(twl4030_kp_remove),
+ .remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index e0f6cd1ad0fd..ee163bee8cce 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -118,7 +118,7 @@ static void w90p910_keypad_close(struct input_dev *dev)
clk_disable(keypad->clk);
}
-static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
+static int w90p910_keypad_probe(struct platform_device *pdev)
{
const struct w90p910_keypad_platform_data *pdata =
pdev->dev.platform_data;
@@ -234,7 +234,7 @@ failed_free:
return error;
}
-static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
+static int w90p910_keypad_remove(struct platform_device *pdev)
{
struct w90p910_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -257,7 +257,7 @@ static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
static struct platform_driver w90p910_keypad_driver = {
.probe = w90p910_keypad_probe,
- .remove = __devexit_p(w90p910_keypad_remove),
+ .remove = w90p910_keypad_remove,
.driver = {
.name = "nuc900-kpi",
.owner = THIS_MODULE,
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index d88d9be1d1b7..3ae496ea5fe6 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -18,6 +18,7 @@
*/
#include <linux/device.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/input.h>
@@ -123,6 +124,11 @@ static int matrix_keypad_parse_of_keymap(const char *propname,
* it will attempt load the keymap from property specified by @keymap_name
* argument (or "linux,keymap" if @keymap_name is %NULL).
*
+ * If @keymap is %NULL the function will automatically allocate managed
+ * block of memory to store the keymap. This memory will be associated with
+ * the parent device and automatically freed when device unbinds from the
+ * driver.
+ *
* Callers are expected to set up input_dev->dev.parent before calling this
* function.
*/
@@ -133,12 +139,27 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
struct input_dev *input_dev)
{
unsigned int row_shift = get_count_order(cols);
+ size_t max_keys = rows << row_shift;
int i;
int error;
+ if (WARN_ON(!input_dev->dev.parent))
+ return -EINVAL;
+
+ if (!keymap) {
+ keymap = devm_kzalloc(input_dev->dev.parent,
+ max_keys * sizeof(*keymap),
+ GFP_KERNEL);
+ if (!keymap) {
+ dev_err(input_dev->dev.parent,
+ "Unable to allocate memory for keymap");
+ return -ENOMEM;
+ }
+ }
+
input_dev->keycode = keymap;
input_dev->keycodesize = sizeof(*keymap);
- input_dev->keycodemax = rows << row_shift;
+ input_dev->keycodemax = max_keys;
__set_bit(EV_KEY, input_dev->evbit);
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index 7f26e7b6c228..ee43e5b7c881 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t pm80x_onkey_handler(int irq, void *data)
static SIMPLE_DEV_PM_OPS(pm80x_onkey_pm_ops, pm80x_dev_suspend,
pm80x_dev_resume);
-static int __devinit pm80x_onkey_probe(struct platform_device *pdev)
+static int pm80x_onkey_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -139,7 +139,7 @@ out:
return err;
}
-static int __devexit pm80x_onkey_remove(struct platform_device *pdev)
+static int pm80x_onkey_remove(struct platform_device *pdev)
{
struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
@@ -157,7 +157,7 @@ static struct platform_driver pm80x_onkey_driver = {
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
- .remove = __devexit_p(pm80x_onkey_remove),
+ .remove = pm80x_onkey_remove,
};
module_platform_driver(pm80x_onkey_driver);
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index f9ce1835e4d7..abd8453e5212 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -56,7 +56,7 @@ static irqreturn_t pm860x_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
+static int pm860x_onkey_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_onkey_info *info;
@@ -121,7 +121,7 @@ out:
return ret;
}
-static int __devexit pm860x_onkey_remove(struct platform_device *pdev)
+static int pm860x_onkey_remove(struct platform_device *pdev)
{
struct pm860x_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,7 +161,7 @@ static struct platform_driver pm860x_onkey_driver = {
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
- .remove = __devexit_p(pm860x_onkey_remove),
+ .remove = pm860x_onkey_remove,
};
module_platform_driver(pm860x_onkey_driver);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 104a7c3153c0..259ef31abb18 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -300,8 +300,7 @@ config INPUT_ATI_REMOTE2
called ati_remote2.
config INPUT_KEYSPAN_REMOTE
- tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Keyspan DMR USB remote control"
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -350,7 +349,6 @@ config INPUT_POWERMATE
config INPUT_YEALINK
tristate "Yealink usb-p1k voip phone"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -366,7 +364,6 @@ config INPUT_YEALINK
config INPUT_CM109
tristate "C-Media CM109 USB I/O Controller"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -377,6 +374,16 @@ config INPUT_CM109
To compile this driver as a module, choose M here: the module will be
called cm109.
+config INPUT_RETU_PWRBUTTON
+ tristate "Retu Power button Driver"
+ depends on MFD_RETU
+ help
+ Say Y here if you want to enable power key reporting via the
+ Retu chips found in Nokia Internet Tablets (770, N800, N810).
+
+ To compile this driver as a module, choose M here. The module will
+ be called retu-pwrbutton.
+
config INPUT_TWL4030_PWRBUTTON
tristate "TWL4030 Power button Driver"
depends on TWL4030_CORE
@@ -444,7 +451,7 @@ config INPUT_PCF50633_PMU
config INPUT_PCF8574
tristate "PCF8574 Keypad input device"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
Say Y here if you want to support a keypad connected via I2C
with a PCF8574.
@@ -454,7 +461,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on HAVE_PWM
+ depends on HAVE_PWM || PWM
help
Say Y here to get support for PWM based beeper devices.
@@ -496,6 +503,16 @@ config INPUT_DA9052_ONKEY
To compile this driver as a module, choose M here: the
module will be called da9052_onkey.
+config INPUT_DA9055_ONKEY
+ tristate "Dialog Semiconductor DA9055 ONKEY"
+ depends on MFD_DA9055
+ help
+ Support the ONKEY of DA9055 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called da9055_onkey.
+
config INPUT_DM355EVM
tristate "TI DaVinci DM355 EVM Keypad and IR Remote"
depends on MFD_DM355EVM_MSP
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 5ea769eda999..1f1e1b109d9d 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
+obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
+obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 84ec691c05aa..2f090b46e716 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -45,7 +45,7 @@ static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
+static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
@@ -118,7 +118,7 @@ err_free_mem:
return error;
}
-static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
+static int ab8500_ponkey_remove(struct platform_device *pdev)
{
struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
@@ -146,7 +146,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
- .remove = __devexit_p(ab8500_ponkey_remove),
+ .remove = ab8500_ponkey_remove,
};
module_platform_driver(ab8500_ponkey_driver);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index c8a79015472a..29d2064c26f2 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -72,7 +72,7 @@ static int ad714x_i2c_read(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+static int ad714x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad714x_chip *chip;
@@ -87,7 +87,7 @@ static int __devinit ad714x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+static int ad714x_i2c_remove(struct i2c_client *client)
{
struct ad714x_chip *chip = i2c_get_clientdata(client);
@@ -112,7 +112,7 @@ static struct i2c_driver ad714x_i2c_driver = {
.pm = &ad714x_i2c_pm,
},
.probe = ad714x_i2c_probe,
- .remove = __devexit_p(ad714x_i2c_remove),
+ .remove = ad714x_i2c_remove,
.id_table = ad714x_id,
};
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 75f6136d608e..bdccca42d138 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -83,7 +83,7 @@ static int ad714x_spi_write(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_spi_probe(struct spi_device *spi)
+static int ad714x_spi_probe(struct spi_device *spi)
{
struct ad714x_chip *chip;
int err;
@@ -103,7 +103,7 @@ static int __devinit ad714x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad714x_spi_remove(struct spi_device *spi)
+static int ad714x_spi_remove(struct spi_device *spi)
{
struct ad714x_chip *chip = spi_get_drvdata(spi);
@@ -120,7 +120,7 @@ static struct spi_driver ad714x_spi_driver = {
.pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
- .remove = __devexit_p(ad714x_spi_remove),
+ .remove = ad714x_spi_remove,
};
module_spi_driver(ad714x_spi_driver);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index dd1d1c145a7f..535dda48cace 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -73,7 +73,7 @@ static const struct adxl34x_bus_ops adxl34x_i2c_bops = {
.read_block = adxl34x_i2c_read_block,
};
-static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
+static int adxl34x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adxl34x *ac;
@@ -98,7 +98,7 @@ static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit adxl34x_i2c_remove(struct i2c_client *client)
+static int adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -144,7 +144,7 @@ static struct i2c_driver adxl34x_driver = {
.pm = &adxl34x_i2c_pm,
},
.probe = adxl34x_i2c_probe,
- .remove = __devexit_p(adxl34x_i2c_remove),
+ .remove = adxl34x_i2c_remove,
.id_table = adxl34x_id,
};
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 820a802a1e6e..ad5f40d37e48 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -65,7 +65,7 @@ static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.read_block = adxl34x_spi_read_block,
};
-static int __devinit adxl34x_spi_probe(struct spi_device *spi)
+static int adxl34x_spi_probe(struct spi_device *spi)
{
struct adxl34x *ac;
@@ -87,7 +87,7 @@ static int __devinit adxl34x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit adxl34x_spi_remove(struct spi_device *spi)
+static int adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
@@ -126,7 +126,7 @@ static struct spi_driver adxl34x_driver = {
.pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
- .remove = __devexit_p(adxl34x_spi_remove),
+ .remove = adxl34x_spi_remove,
};
module_spi_driver(adxl34x_driver);
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 1cf72fe513e6..0735de3a6468 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -232,7 +232,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = {
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
- .fifo_mode = FIFO_STREAM,
+ .fifo_mode = ADXL_FIFO_STREAM,
.watermark = 0,
};
@@ -732,7 +732,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
mutex_init(&ac->mutex);
input_dev->name = "ADXL34x accelerometer";
- revid = ac->bops->read(dev, DEVID);
+ revid = AC_READ(ac, DEVID);
switch (revid) {
case ID_ADXL345:
@@ -809,7 +809,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->fifo_delay = false;
- ac->bops->write(dev, POWER_CTL, 0);
+ AC_WRITE(ac, POWER_CTL, 0);
err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -827,7 +827,6 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (err)
goto err_remove_attr;
- AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
AC_WRITE(ac, OFSX, pdata->x_axis_offset);
ac->hwcal.x = pdata->x_axis_offset;
AC_WRITE(ac, OFSY, pdata->y_axis_offset);
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 26f13131639a..5d4402365a52 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -121,7 +121,7 @@ static int atlas_acpi_button_add(struct acpi_device *device)
return err;
}
-static int atlas_acpi_button_remove(struct acpi_device *device, int type)
+static int atlas_acpi_button_remove(struct acpi_device *device)
{
acpi_status status;
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 1c4146fccfdf..a6666e142a91 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -90,7 +90,7 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_rotary_probe(struct platform_device *pdev)
+static int bfin_rotary_probe(struct platform_device *pdev)
{
struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
struct bfin_rot *rotary;
@@ -196,7 +196,7 @@ out1:
return error;
}
-static int __devexit bfin_rotary_remove(struct platform_device *pdev)
+static int bfin_rotary_remove(struct platform_device *pdev)
{
struct bfin_rot *rotary = platform_get_drvdata(pdev);
@@ -255,7 +255,7 @@ static const struct dev_pm_ops bfin_rotary_pm_ops = {
static struct platform_driver bfin_rotary_device_driver = {
.probe = bfin_rotary_probe,
- .remove = __devexit_p(bfin_rotary_remove),
+ .remove = bfin_rotary_remove,
.driver = {
.name = "bfin-rotary",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index e2f1e9f952b1..865c2f9d25b9 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -46,18 +46,6 @@
#define BMA150_POLL_MAX 200
#define BMA150_POLL_MIN 0
-#define BMA150_BW_25HZ 0
-#define BMA150_BW_50HZ 1
-#define BMA150_BW_100HZ 2
-#define BMA150_BW_190HZ 3
-#define BMA150_BW_375HZ 4
-#define BMA150_BW_750HZ 5
-#define BMA150_BW_1500HZ 6
-
-#define BMA150_RANGE_2G 0
-#define BMA150_RANGE_4G 1
-#define BMA150_RANGE_8G 2
-
#define BMA150_MODE_NORMAL 0
#define BMA150_MODE_SLEEP 2
#define BMA150_MODE_WAKE_UP 3
@@ -158,7 +146,7 @@ struct bma150_data {
* are stated and verified by Bosch Sensortec where they are configured
* to provide a generic sensitivity performance.
*/
-static struct bma150_cfg default_cfg __devinitdata = {
+static struct bma150_cfg default_cfg = {
.any_motion_int = 1,
.hg_int = 1,
.lg_int = 1,
@@ -224,7 +212,7 @@ static int bma150_set_mode(struct bma150_data *bma150, u8 mode)
return 0;
}
-static int __devinit bma150_soft_reset(struct bma150_data *bma150)
+static int bma150_soft_reset(struct bma150_data *bma150)
{
int error;
@@ -237,19 +225,19 @@ static int __devinit bma150_soft_reset(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_set_range(struct bma150_data *bma150, u8 range)
+static int bma150_set_range(struct bma150_data *bma150, u8 range)
{
return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
BMA150_RANGE_MSK, BMA150_RANGE_REG);
}
-static int __devinit bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
+static int bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
{
return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS,
BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG);
}
-static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_low_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -273,7 +261,7 @@ static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
BMA150_LOW_G_EN_REG);
}
-static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_high_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -300,7 +288,7 @@ static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
}
-static int __devinit bma150_set_any_motion_interrupt(struct bma150_data *bma150,
+static int bma150_set_any_motion_interrupt(struct bma150_data *bma150,
u8 enable, u8 dur, u8 thres)
{
int error;
@@ -372,7 +360,7 @@ static int bma150_open(struct bma150_data *bma150)
int error;
error = pm_runtime_get_sync(&bma150->client->dev);
- if (error && error != -ENOSYS)
+ if (error < 0 && error != -ENOSYS)
return error;
/*
@@ -424,7 +412,7 @@ static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
bma150_close(bma150);
}
-static int __devinit bma150_initialize(struct bma150_data *bma150,
+static int bma150_initialize(struct bma150_data *bma150,
const struct bma150_cfg *cfg)
{
int error;
@@ -465,7 +453,7 @@ static int __devinit bma150_initialize(struct bma150_data *bma150,
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static void __devinit bma150_init_input_device(struct bma150_data *bma150,
+static void bma150_init_input_device(struct bma150_data *bma150,
struct input_dev *idev)
{
idev->name = BMA150_DRIVER;
@@ -479,7 +467,7 @@ static void __devinit bma150_init_input_device(struct bma150_data *bma150,
input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
}
-static int __devinit bma150_register_input_device(struct bma150_data *bma150)
+static int bma150_register_input_device(struct bma150_data *bma150)
{
struct input_dev *idev;
int error;
@@ -504,7 +492,7 @@ static int __devinit bma150_register_input_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
+static int bma150_register_polled_device(struct bma150_data *bma150)
{
struct input_polled_dev *ipoll_dev;
int error;
@@ -535,7 +523,7 @@ static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_probe(struct i2c_client *client,
+static int bma150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct bma150_platform_data *pdata = client->dev.platform_data;
@@ -613,7 +601,7 @@ err_free_mem:
return error;
}
-static int __devexit bma150_remove(struct i2c_client *client)
+static int bma150_remove(struct i2c_client *client)
{
struct bma150_data *bma150 = i2c_get_clientdata(client);
@@ -670,7 +658,7 @@ static struct i2c_driver bma150_driver = {
.class = I2C_CLASS_HWMON,
.id_table = bma150_id,
.probe = bma150_probe,
- .remove = __devexit_p(bma150_remove),
+ .remove = bma150_remove,
};
module_i2c_driver(bma150_driver);
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index fe9b85f07792..4fdef98ceb56 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -55,7 +55,7 @@ static const struct cma3000_bus_ops cma3000_i2c_bops = {
.write = cma3000_i2c_set,
};
-static int __devinit cma3000_i2c_probe(struct i2c_client *client,
+static int cma3000_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cma3000_accl_data *data;
@@ -69,7 +69,7 @@ static int __devinit cma3000_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cma3000_i2c_remove(struct i2c_client *client)
+static int cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
@@ -114,7 +114,7 @@ MODULE_DEVICE_TABLE(i2c, cma3000_i2c_id);
static struct i2c_driver cma3000_i2c_driver = {
.probe = cma3000_i2c_probe,
- .remove = __devexit_p(cma3000_i2c_remove),
+ .remove = cma3000_i2c_remove,
.id_table = cma3000_i2c_id,
.driver = {
.name = "cma3000_i2c_accl",
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 53e43d295148..4f77f87847e8 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -73,7 +73,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
+static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -135,7 +135,7 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
+static int cobalt_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -157,7 +157,7 @@ MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
- .remove = __devexit_p(cobalt_buttons_remove),
+ .remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 3c843cd725fa..020569a499f2 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -24,7 +24,6 @@ struct da9052_onkey {
struct da9052 *da9052;
struct input_dev *input;
struct delayed_work work;
- unsigned int irq;
};
static void da9052_onkey_query(struct da9052_onkey *onkey)
@@ -71,12 +70,11 @@ static irqreturn_t da9052_onkey_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit da9052_onkey_probe(struct platform_device *pdev)
+static int da9052_onkey_probe(struct platform_device *pdev)
{
struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
struct da9052_onkey *onkey;
struct input_dev *input_dev;
- int irq;
int error;
if (!da9052) {
@@ -84,13 +82,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq = platform_get_irq_byname(pdev, "ONKEY");
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Failed to get an IRQ for input device, %d\n", irq);
- return -EINVAL;
- }
-
onkey = kzalloc(sizeof(*onkey), GFP_KERNEL);
input_dev = input_allocate_device();
if (!onkey || !input_dev) {
@@ -101,7 +92,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
onkey->input = input_dev;
onkey->da9052 = da9052;
- onkey->irq = irq;
INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
input_dev->name = "da9052-onkey";
@@ -111,13 +101,11 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, input_dev->keybit);
- error = request_threaded_irq(onkey->irq, NULL, da9052_onkey_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ONKEY", onkey);
+ error = da9052_request_irq(onkey->da9052, DA9052_IRQ_NONKEY, "ONKEY",
+ da9052_onkey_irq, onkey);
if (error < 0) {
dev_err(onkey->da9052->dev,
- "Failed to register ONKEY IRQ %d, error = %d\n",
- onkey->irq, error);
+ "Failed to register ONKEY IRQ: %d\n", error);
goto err_free_mem;
}
@@ -132,7 +120,7 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return 0;
err_free_irq:
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
err_free_mem:
input_free_device(input_dev);
@@ -141,11 +129,11 @@ err_free_mem:
return error;
}
-static int __devexit da9052_onkey_remove(struct platform_device *pdev)
+static int da9052_onkey_remove(struct platform_device *pdev)
{
struct da9052_onkey *onkey = platform_get_drvdata(pdev);
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
@@ -156,7 +144,7 @@ static int __devexit da9052_onkey_remove(struct platform_device *pdev)
static struct platform_driver da9052_onkey_driver = {
.probe = da9052_onkey_probe,
- .remove = __devexit_p(da9052_onkey_remove),
+ .remove = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
new file mode 100644
index 000000000000..ee6ae3a00174
--- /dev/null
+++ b/drivers/input/misc/da9055_onkey.c
@@ -0,0 +1,171 @@
+/*
+ * ON pin driver for Dialog DA9055 PMICs
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+
+struct da9055_onkey {
+ struct da9055 *da9055;
+ struct input_dev *input;
+ struct delayed_work work;
+};
+
+static void da9055_onkey_query(struct da9055_onkey *onkey)
+{
+ int key_stat;
+
+ key_stat = da9055_reg_read(onkey->da9055, DA9055_REG_STATUS_A);
+ if (key_stat < 0) {
+ dev_err(onkey->da9055->dev,
+ "Failed to read onkey event %d\n", key_stat);
+ } else {
+ key_stat &= DA9055_NOKEY_STS;
+ /*
+ * Onkey status bit is cleared when onkey button is relased.
+ */
+ if (!key_stat) {
+ input_report_key(onkey->input, KEY_POWER, 0);
+ input_sync(onkey->input);
+ }
+ }
+
+ /*
+ * Interrupt is generated only when the ONKEY pin is asserted.
+ * Hence the deassertion of the pin is simulated through work queue.
+ */
+ if (key_stat)
+ schedule_delayed_work(&onkey->work, msecs_to_jiffies(10));
+
+}
+
+static void da9055_onkey_work(struct work_struct *work)
+{
+ struct da9055_onkey *onkey = container_of(work, struct da9055_onkey,
+ work.work);
+
+ da9055_onkey_query(onkey);
+}
+
+static irqreturn_t da9055_onkey_irq(int irq, void *data)
+{
+ struct da9055_onkey *onkey = data;
+
+ input_report_key(onkey->input, KEY_POWER, 1);
+ input_sync(onkey->input);
+
+ da9055_onkey_query(onkey);
+
+ return IRQ_HANDLED;
+}
+
+static int da9055_onkey_probe(struct platform_device *pdev)
+{
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_onkey *onkey;
+ struct input_dev *input_dev;
+ int irq, err;
+
+ irq = platform_get_irq_byname(pdev, "ONKEY");
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "Failed to get an IRQ for input device, %d\n", irq);
+ return -EINVAL;
+ }
+
+ onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onkey->input = input_dev;
+ onkey->da9055 = da9055;
+ input_dev->name = "da9055-onkey";
+ input_dev->phys = "da9055-onkey/input0";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ __set_bit(KEY_POWER, input_dev->keybit);
+
+ INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
+
+ irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ONKEY", onkey);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to register ONKEY IRQ %d, error = %d\n",
+ irq, err);
+ goto err_free_input;
+ }
+
+ err = input_register_device(input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register input device, %d\n",
+ err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, onkey);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+err_free_input:
+ input_free_device(input_dev);
+
+ return err;
+}
+
+static int da9055_onkey_remove(struct platform_device *pdev)
+{
+ struct da9055_onkey *onkey = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, "ONKEY");
+
+ irq = regmap_irq_get_virq(onkey->da9055->irq_data, irq);
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+ input_unregister_device(onkey->input);
+
+ return 0;
+}
+
+static struct platform_driver da9055_onkey_driver = {
+ .probe = da9055_onkey_probe,
+ .remove = da9055_onkey_remove,
+ .driver = {
+ .name = "da9055-onkey",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9055_onkey_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Onkey driver for DA9055");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-onkey");
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index c1313d8535c3..a309a5c0899e 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -173,7 +173,7 @@ static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
/*----------------------------------------------------------------------*/
-static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
+static int dm355evm_keys_probe(struct platform_device *pdev)
{
struct dm355evm_keys *keys;
struct input_dev *input;
@@ -239,7 +239,7 @@ fail1:
return status;
}
-static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
+static int dm355evm_keys_remove(struct platform_device *pdev)
{
struct dm355evm_keys *keys = platform_get_drvdata(pdev);
@@ -262,7 +262,7 @@ static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
*/
static struct platform_driver dm355evm_keys_driver = {
.probe = dm355evm_keys_probe,
- .remove = __devexit_p(dm355evm_keys_remove),
+ .remove = dm355evm_keys_remove,
.driver = {
.owner = THIS_MODULE,
.name = "dm355evm_keys",
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index b6664cfa340a..fe30bd0fe4bd 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -98,7 +98,7 @@ static void gp2a_device_close(struct input_dev *dev)
"unable to deactivate, err %d\n", error);
}
-static int __devinit gp2a_initialize(struct gp2a_data *dt)
+static int gp2a_initialize(struct gp2a_data *dt)
{
int error;
@@ -122,7 +122,7 @@ static int __devinit gp2a_initialize(struct gp2a_data *dt)
return error;
}
-static int __devinit gp2a_probe(struct i2c_client *client,
+static int gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct gp2a_platform_data *pdata = client->dev.platform_data;
@@ -205,7 +205,7 @@ err_hw_shutdown:
return error;
}
-static int __devexit gp2a_remove(struct i2c_client *client)
+static int gp2a_remove(struct i2c_client *client)
{
struct gp2a_data *dt = i2c_get_clientdata(client);
const struct gp2a_platform_data *pdata = dt->pdata;
@@ -277,7 +277,7 @@ static struct i2c_driver gp2a_i2c_driver = {
.pm = &gp2a_pm,
},
.probe = gp2a_probe,
- .remove = __devexit_p(gp2a_remove),
+ .remove = gp2a_remove,
.id_table = gp2a_i2c_id,
};
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 277a0574c199..da05cca8b562 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -96,7 +96,7 @@ static void gpio_tilt_polled_close(struct input_polled_dev *dev)
pdata->disable(tdev->dev);
}
-static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+static int gpio_tilt_polled_probe(struct platform_device *pdev)
{
const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -179,7 +179,7 @@ err_free_tdev:
return error;
}
-static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+static int gpio_tilt_polled_remove(struct platform_device *pdev)
{
struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
const struct gpio_tilt_platform_data *pdata = tdev->pdata;
@@ -198,7 +198,7 @@ static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_tilt_polled_driver = {
.probe = gpio_tilt_polled_probe,
- .remove = __devexit_p(gpio_tilt_polled_remove),
+ .remove = gpio_tilt_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 50e283068301..6ab3decc86e6 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -87,7 +87,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
+static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -132,7 +132,7 @@ static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
return err;
}
-static int __devexit ixp4xx_spkr_remove(struct platform_device *dev)
+static int ixp4xx_spkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
@@ -165,7 +165,7 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ixp4xx_spkr_probe,
- .remove = __devexit_p(ixp4xx_spkr_remove),
+ .remove = ixp4xx_spkr_remove,
.shutdown = ixp4xx_spkr_shutdown,
};
module_platform_driver(ixp4xx_spkr_platform_driver);
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index f46139f19ff1..a993b67a8a5b 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -295,7 +295,7 @@ static void kxtj9_input_close(struct input_dev *dev)
kxtj9_disable(tj9);
}
-static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
+static void kxtj9_init_input_device(struct kxtj9_data *tj9,
struct input_dev *input_dev)
{
__set_bit(EV_ABS, input_dev->evbit);
@@ -308,7 +308,7 @@ static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
input_dev->dev.parent = &tj9->client->dev;
}
-static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_input_device(struct kxtj9_data *tj9)
{
struct input_dev *input_dev;
int err;
@@ -433,7 +433,7 @@ static void kxtj9_polled_input_close(struct input_polled_dev *dev)
kxtj9_disable(tj9);
}
-static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
{
int err;
struct input_polled_dev *poll_dev;
@@ -466,7 +466,7 @@ static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
return 0;
}
-static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
+static void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
{
input_unregister_polled_device(tj9->poll_dev);
input_free_polled_device(tj9->poll_dev);
@@ -485,7 +485,7 @@ static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
#endif
-static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
+static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
@@ -506,7 +506,7 @@ out:
return retval;
}
-static int __devinit kxtj9_probe(struct i2c_client *client,
+static int kxtj9_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct kxtj9_platform_data *pdata = client->dev.platform_data;
@@ -594,7 +594,7 @@ err_free_mem:
return err;
}
-static int __devexit kxtj9_remove(struct i2c_client *client)
+static int kxtj9_remove(struct i2c_client *client)
{
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -663,7 +663,7 @@ static struct i2c_driver kxtj9_driver = {
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
- .remove = __devexit_p(kxtj9_remove),
+ .remove = kxtj9_remove,
.id_table = kxtj9_id,
};
diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c
index 0c64d9bb718e..b40ee4b47f4f 100644
--- a/drivers/input/misc/m68kspkr.c
+++ b/drivers/input/misc/m68kspkr.c
@@ -48,7 +48,7 @@ static int m68kspkr_event(struct input_dev *dev, unsigned int type, unsigned int
return 0;
}
-static int __devinit m68kspkr_probe(struct platform_device *dev)
+static int m68kspkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -80,7 +80,7 @@ static int __devinit m68kspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit m68kspkr_remove(struct platform_device *dev)
+static int m68kspkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
@@ -104,7 +104,7 @@ static struct platform_driver m68kspkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = m68kspkr_probe,
- .remove = __devexit_p(m68kspkr_remove),
+ .remove = m68kspkr_remove,
.shutdown = m68kspkr_shutdown,
};
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 0a12b74140d3..369a39de4ff3 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t max8925_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+static int max8925_onkey_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_onkey_info *info;
@@ -141,7 +141,7 @@ err_free_mem:
return error;
}
-static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+static int max8925_onkey_remove(struct platform_device *pdev)
{
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -195,7 +195,7 @@ static struct platform_driver max8925_onkey_driver = {
.pm = &max8925_onkey_pm_ops,
},
.probe = max8925_onkey_probe,
- .remove = __devexit_p(max8925_onkey_remove),
+ .remove = max8925_onkey_remove,
};
module_platform_driver(max8925_onkey_driver);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 05b7b8bfaf0a..e973133212a5 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -241,7 +241,7 @@ static void max8997_haptic_close(struct input_dev *dev)
max8997_haptic_disable(chip);
}
-static int __devinit max8997_haptic_probe(struct platform_device *pdev)
+static int max8997_haptic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
@@ -354,7 +354,7 @@ err_free_mem:
return error;
}
-static int __devexit max8997_haptic_remove(struct platform_device *pdev)
+static int max8997_haptic_remove(struct platform_device *pdev)
{
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -396,7 +396,7 @@ static struct platform_driver max8997_haptic_driver = {
.pm = &max8997_haptic_pm_ops,
},
.probe = max8997_haptic_probe,
- .remove = __devexit_p(max8997_haptic_remove),
+ .remove = max8997_haptic_remove,
.id_table = max8997_haptic_id,
};
module_platform_driver(max8997_haptic_driver);
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 8428f1e8e83e..0906ca593d5f 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -89,7 +89,7 @@ static irqreturn_t button_irq(int irq, void *_priv)
return IRQ_HANDLED;
}
-static int __devinit mc13783_pwrbutton_probe(struct platform_device *pdev)
+static int mc13783_pwrbutton_probe(struct platform_device *pdev)
{
const struct mc13xxx_buttons_platform_data *pdata;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
@@ -230,7 +230,7 @@ free_input_dev:
return err;
}
-static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
+static int mc13783_pwrbutton_remove(struct platform_device *pdev)
{
struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
const struct mc13xxx_buttons_platform_data *pdata;
@@ -257,7 +257,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
- .remove = __devexit_p(mc13783_pwrbutton_remove),
+ .remove = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 873ebced544e..480557f14f23 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -167,7 +167,7 @@ static void mma8450_close(struct input_polled_dev *dev)
/*
* I2C init/probing/exit functions
*/
-static int __devinit mma8450_probe(struct i2c_client *c,
+static int mma8450_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
struct input_polled_dev *idev;
@@ -212,7 +212,7 @@ err_free_mem:
return err;
}
-static int __devexit mma8450_remove(struct i2c_client *c)
+static int mma8450_remove(struct i2c_client *c)
{
struct mma8450 *m = i2c_get_clientdata(c);
struct input_polled_dev *idev = m->idev;
@@ -243,7 +243,7 @@ static struct i2c_driver mma8450_driver = {
.of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
- .remove = __devexit_p(mma8450_remove),
+ .remove = mma8450_remove,
.id_table = mma8450_id,
};
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 306f84c2d8fb..dce0d95943c5 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -257,7 +257,7 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
*
* Called during device probe; configures the sampling method.
*/
-static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+static int mpu3050_hw_init(struct mpu3050_sensor *sensor)
{
struct i2c_client *client = sensor->client;
int ret;
@@ -306,7 +306,7 @@ static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
*
* If present install the relevant sysfs interfaces and input device.
*/
-static int __devinit mpu3050_probe(struct i2c_client *client,
+static int mpu3050_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mpu3050_sensor *sensor;
@@ -402,7 +402,7 @@ err_free_mem:
*
* Our sensor is going away, clean up the resources.
*/
-static int __devexit mpu3050_remove(struct i2c_client *client)
+static int mpu3050_remove(struct i2c_client *client)
{
struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
@@ -471,7 +471,7 @@ static struct i2c_driver mpu3050_i2c_driver = {
.of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
- .remove = __devexit_p(mpu3050_remove),
+ .remove = mpu3050_remove,
.id_table = mpu3050_ids,
};
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index e09b4fe81913..40ac9a5adf89 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -48,7 +48,7 @@ static irqreturn_t pcap_keys_handler(int irq, void *_pcap_keys)
return IRQ_HANDLED;
}
-static int __devinit pcap_keys_probe(struct platform_device *pdev)
+static int pcap_keys_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
struct pcap_keys *pcap_keys;
@@ -104,7 +104,7 @@ fail:
return err;
}
-static int __devexit pcap_keys_remove(struct platform_device *pdev)
+static int pcap_keys_remove(struct platform_device *pdev)
{
struct pcap_keys *pcap_keys = platform_get_drvdata(pdev);
@@ -119,7 +119,7 @@ static int __devexit pcap_keys_remove(struct platform_device *pdev)
static struct platform_driver pcap_keys_device_driver = {
.probe = pcap_keys_probe,
- .remove = __devexit_p(pcap_keys_remove),
+ .remove = pcap_keys_remove,
.driver = {
.name = "pcap-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 53891de80b0e..73b13ebabe56 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -53,7 +53,7 @@ pcf50633_input_irq(int irq, void *data)
input_sync(input->input_dev);
}
-static int __devinit pcf50633_input_probe(struct platform_device *pdev)
+static int pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
struct input_dev *input_dev;
@@ -93,7 +93,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_input_remove(struct platform_device *pdev)
+static int pcf50633_input_remove(struct platform_device *pdev)
{
struct pcf50633_input *input = platform_get_drvdata(pdev);
@@ -111,7 +111,7 @@ static struct platform_driver pcf50633_input_driver = {
.name = "pcf50633-input",
},
.probe = pcf50633_input_probe,
- .remove = __devexit_p(pcf50633_input_remove),
+ .remove = pcf50633_input_remove,
};
module_platform_driver(pcf50633_input_driver);
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 544c6635abe9..e37392976fdd 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -82,7 +82,7 @@ static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int i, ret;
struct input_dev *idev;
@@ -156,7 +156,7 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
return ret;
}
-static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+static int pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
@@ -212,7 +212,7 @@ static struct i2c_driver pcf8574_kp_driver = {
#endif
},
.probe = pcf8574_kp_probe,
- .remove = __devexit_p(pcf8574_kp_remove),
+ .remove = pcf8574_kp_remove,
.id_table = pcf8574_kp_id,
};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index b2484aa07f32..199db78acc4f 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -63,7 +63,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
return 0;
}
-static int __devinit pcspkr_probe(struct platform_device *dev)
+static int pcspkr_probe(struct platform_device *dev)
{
struct input_dev *pcspkr_dev;
int err;
@@ -95,7 +95,7 @@ static int __devinit pcspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit pcspkr_remove(struct platform_device *dev)
+static int pcspkr_remove(struct platform_device *dev)
{
struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
@@ -131,7 +131,7 @@ static struct platform_driver pcspkr_platform_driver = {
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
- .remove = __devexit_p(pcspkr_remove),
+ .remove = pcspkr_remove,
.shutdown = pcspkr_shutdown,
};
module_platform_driver(pcspkr_platform_driver);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index dfbfb463ea5d..a9da65e41c5b 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -178,7 +178,7 @@ static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
return 0;
}
-static int __devinit pm8xxx_vib_probe(struct platform_device *pdev)
+static int pm8xxx_vib_probe(struct platform_device *pdev)
{
struct pm8xxx_vib *vib;
@@ -242,7 +242,7 @@ err_free_mem:
return error;
}
-static int __devexit pm8xxx_vib_remove(struct platform_device *pdev)
+static int pm8xxx_vib_remove(struct platform_device *pdev)
{
struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
@@ -270,7 +270,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
- .remove = __devexit_p(pm8xxx_vib_remove),
+ .remove = pm8xxx_vib_remove,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 0f83d0f1d015..4b811be73974 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -81,7 +81,7 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
-static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int key_release_irq = platform_get_irq(pdev, 0);
@@ -187,7 +187,7 @@ free_pwrkey:
return err;
}
-static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
{
struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
int key_release_irq = platform_get_irq(pdev, 0);
@@ -206,7 +206,7 @@ static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
- .remove = __devexit_p(pmic8xxx_pwrkey_remove),
+ .remove = pmic8xxx_pwrkey_remove,
.driver = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index fc84c8a51147..0808868461de 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -65,7 +65,7 @@ static int pwm_beeper_event(struct input_dev *input,
return 0;
}
-static int __devinit pwm_beeper_probe(struct platform_device *pdev)
+static int pwm_beeper_probe(struct platform_device *pdev)
{
unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
struct pwm_beeper *beeper;
@@ -75,7 +75,11 @@ static int __devinit pwm_beeper_probe(struct platform_device *pdev)
if (!beeper)
return -ENOMEM;
- beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ beeper->pwm = pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(beeper->pwm)) {
+ dev_dbg(&pdev->dev, "unable to request PWM, trying legacy API\n");
+ beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ }
if (IS_ERR(beeper->pwm)) {
error = PTR_ERR(beeper->pwm);
@@ -125,7 +129,7 @@ err_free:
return error;
}
-static int __devexit pwm_beeper_remove(struct platform_device *pdev)
+static int pwm_beeper_remove(struct platform_device *pdev)
{
struct pwm_beeper *beeper = platform_get_drvdata(pdev);
@@ -171,13 +175,21 @@ static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
#define PWM_BEEPER_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id pwm_beeper_match[] = {
+ { .compatible = "pwm-beeper", },
+ { },
+};
+#endif
+
static struct platform_driver pwm_beeper_driver = {
.probe = pwm_beeper_probe,
- .remove = __devexit_p(pwm_beeper_remove),
+ .remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
.owner = THIS_MODULE,
.pm = PWM_BEEPER_PM_OPS,
+ .of_match_table = of_match_ptr(pwm_beeper_match),
},
};
module_platform_driver(pwm_beeper_driver);
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index aeb02bcf7233..fb4f8ac3343b 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -51,7 +51,7 @@ static void rb532_button_poll(struct input_polled_dev *poll_dev)
input_sync(poll_dev->input);
}
-static int __devinit rb532_button_probe(struct platform_device *pdev)
+static int rb532_button_probe(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev;
int error;
@@ -81,7 +81,7 @@ static int __devinit rb532_button_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rb532_button_remove(struct platform_device *pdev)
+static int rb532_button_remove(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
@@ -94,7 +94,7 @@ static int __devexit rb532_button_remove(struct platform_device *pdev)
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
- .remove = __devexit_p(rb532_button_remove),
+ .remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
new file mode 100644
index 000000000000..7ca09baa0016
--- /dev/null
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -0,0 +1,99 @@
+/*
+ * Retu power button driver.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Original code written by Ari Saastamoinen, Juha Yrjölä and Felipe Balbi.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#define RETU_STATUS_PWRONX (1 << 5)
+
+static irqreturn_t retu_pwrbutton_irq(int irq, void *_pwr)
+{
+ struct input_dev *idev = _pwr;
+ struct retu_dev *rdev = input_get_drvdata(idev);
+ bool state;
+
+ state = !(retu_read(rdev, RETU_REG_STATUS) & RETU_STATUS_PWRONX);
+ input_report_key(idev, KEY_POWER, state);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static int retu_pwrbutton_probe(struct platform_device *pdev)
+{
+ struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *idev;
+ int irq;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = "retu-pwrbutton";
+ idev->dev.parent = &pdev->dev;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+ input_set_drvdata(idev, rdev);
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, retu_pwrbutton_irq, 0,
+ "retu-pwrbutton", idev);
+ if (error)
+ return error;
+
+ error = input_register_device(idev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int retu_pwrbutton_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver retu_pwrbutton_driver = {
+ .probe = retu_pwrbutton_probe,
+ .remove = retu_pwrbutton_remove,
+ .driver = {
+ .name = "retu-pwrbutton",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(retu_pwrbutton_driver);
+
+MODULE_ALIAS("platform:retu-pwrbutton");
+MODULE_DESCRIPTION("Retu Power Button");
+MODULE_AUTHOR("Ari Saastamoinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 99a49e4968d2..aff47b2c38ff 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -149,8 +149,7 @@ static struct of_device_id rotary_encoder_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-static struct rotary_encoder_platform_data * __devinit
-rotary_encoder_parse_dt(struct device *dev)
+static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
{
const struct of_device_id *of_id =
of_match_device(rotary_encoder_of_match, dev);
@@ -192,7 +191,7 @@ rotary_encoder_parse_dt(struct device *dev)
}
#endif
-static int __devinit rotary_encoder_probe(struct platform_device *pdev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
@@ -302,7 +301,7 @@ exit_free_mem:
return err;
}
-static int __devexit rotary_encoder_remove(struct platform_device *pdev)
+static int rotary_encoder_remove(struct platform_device *pdev)
{
struct rotary_encoder *encoder = platform_get_drvdata(pdev);
const struct rotary_encoder_platform_data *pdata = encoder->pdata;
@@ -325,7 +324,7 @@ static int __devexit rotary_encoder_remove(struct platform_device *pdev)
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = __devexit_p(rotary_encoder_remove),
+ .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 5d9fd5571199..ad6415ceaf5f 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -91,7 +91,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit sgi_buttons_probe(struct platform_device *pdev)
+static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -143,7 +143,7 @@ static int __devinit sgi_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sgi_buttons_remove(struct platform_device *pdev)
+static int sgi_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -158,7 +158,7 @@ static int __devexit sgi_buttons_remove(struct platform_device *pdev)
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
- .remove = __devexit_p(sgi_buttons_remove),
+ .remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0122f5351577..a53586a7fbdb 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -139,7 +139,7 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
return 0;
}
-static int __devinit sparcspkr_probe(struct device *dev)
+static int sparcspkr_probe(struct device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(dev);
struct input_dev *input_dev;
@@ -182,7 +182,7 @@ static void sparcspkr_shutdown(struct platform_device *dev)
state->event(input_dev, EV_SND, SND_BELL, 0);
}
-static int __devinit bbc_beep_probe(struct platform_device *op)
+static int bbc_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
@@ -229,7 +229,7 @@ out_err:
return err;
}
-static int __devexit bbc_remove(struct platform_device *op)
+static int bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct input_dev *input_dev = state->input_dev;
@@ -263,11 +263,11 @@ static struct platform_driver bbc_beep_driver = {
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
- .remove = __devexit_p(bbc_remove),
+ .remove = bbc_remove,
.shutdown = sparcspkr_shutdown,
};
-static int __devinit grover_beep_probe(struct platform_device *op)
+static int grover_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
@@ -310,7 +310,7 @@ out_err:
return err;
}
-static int __devexit grover_remove(struct platform_device *op)
+static int grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct grover_beep_info *info = &state->u.grover;
@@ -345,7 +345,7 @@ static struct platform_driver grover_beep_driver = {
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
- .remove = __devexit_p(grover_remove),
+ .remove = grover_remove,
.shutdown = sparcspkr_shutdown,
};
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b3dd96d6448b..27c2bc8aa890 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,8 +39,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
int err;
u8 value;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
- STS_HW_CONDITIONS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &value, STS_HW_CONDITIONS);
if (!err) {
pm_wakeup_event(pwr->dev.parent, 0);
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 2194a3c7236a..68a5f33152a8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -43,7 +43,6 @@ struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
- struct workqueue_struct *workqueue;
struct work_struct play_work;
bool enabled;
@@ -143,19 +142,7 @@ static int vibra_play(struct input_dev *input, void *data,
if (!info->speed)
info->speed = effect->u.rumble.weak_magnitude >> 9;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
- queue_work(info->workqueue, &info->play_work);
- return 0;
-}
-
-static int twl4030_vibra_open(struct input_dev *input)
-{
- struct vibra_info *info = input_get_drvdata(input);
-
- info->workqueue = create_singlethread_workqueue("vibra");
- if (info->workqueue == NULL) {
- dev_err(&input->dev, "couldn't create workqueue\n");
- return -ENOMEM;
- }
+ schedule_work(&info->play_work);
return 0;
}
@@ -164,9 +151,6 @@ static void twl4030_vibra_close(struct input_dev *input)
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
- INIT_WORK(&info->play_work, vibra_play_work); /* cleanup */
- destroy_workqueue(info->workqueue);
- info->workqueue = NULL;
if (info->enabled)
vibra_disable(info);
@@ -207,7 +191,7 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
return false;
}
-static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
+static int twl4030_vibra_probe(struct platform_device *pdev)
{
struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
@@ -219,7 +203,7 @@ static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -227,11 +211,10 @@ static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
INIT_WORK(&info->play_work, vibra_play_work);
- info->input_dev = input_allocate_device();
+ info->input_dev = devm_input_allocate_device(&pdev->dev);
if (info->input_dev == NULL) {
dev_err(&pdev->dev, "couldn't allocate input device\n");
- ret = -ENOMEM;
- goto err_kzalloc;
+ return -ENOMEM;
}
input_set_drvdata(info->input_dev, info);
@@ -239,14 +222,13 @@ static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
info->input_dev->dev.parent = pdev->dev.parent;
- info->input_dev->open = twl4030_vibra_open;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
- goto err_ialloc;
+ return ret;
}
ret = input_register_device(info->input_dev);
@@ -262,28 +244,11 @@ static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
err_iff:
input_ff_destroy(info->input_dev);
-err_ialloc:
- input_free_device(info->input_dev);
-err_kzalloc:
- kfree(info);
return ret;
}
-static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
-{
- struct vibra_info *info = platform_get_drvdata(pdev);
-
- /* this also free ff-memless and calls close if needed */
- input_unregister_device(info->input_dev);
- kfree(info);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
- .remove = __devexit_p(twl4030_vibra_remove),
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index c8a288ae1d5b..0c2dfc8e9691 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -255,7 +255,7 @@ static int twl6040_vibra_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
-static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
+static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
struct device *twl6040_core_dev = pdev->dev.parent;
@@ -275,7 +275,7 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
@@ -309,53 +309,23 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
- ret = -EINVAL;
- goto err_kzalloc;
+ return -EINVAL;
}
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
dev_err(info->dev, "invalid irq\n");
- ret = -EINVAL;
- goto err_kzalloc;
+ return -EINVAL;
}
mutex_init(&info->mutex);
- info->input_dev = input_allocate_device();
- if (info->input_dev == NULL) {
- dev_err(info->dev, "couldn't allocate input device\n");
- ret = -ENOMEM;
- goto err_kzalloc;
- }
-
- input_set_drvdata(info->input_dev, info);
-
- info->input_dev->name = "twl6040:vibrator";
- info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
- info->input_dev->close = twl6040_vibra_close;
- __set_bit(FF_RUMBLE, info->input_dev->ffbit);
-
- ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
- if (ret < 0) {
- dev_err(info->dev, "couldn't register vibrator to FF\n");
- goto err_ialloc;
- }
-
- ret = input_register_device(info->input_dev);
- if (ret < 0) {
- dev_err(info->dev, "couldn't register input device\n");
- goto err_iff;
- }
-
- platform_set_drvdata(pdev, info);
-
- ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
- "twl6040_irq_vib", info);
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ twl6040_vib_irq_handler, 0,
+ "twl6040_irq_vib", info);
if (ret) {
dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
- goto err_irq;
+ return ret;
}
info->supplies[0].supply = "vddvibl";
@@ -368,7 +338,7 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
ARRAY_SIZE(info->supplies), info->supplies);
if (ret) {
dev_err(info->dev, "couldn't get regulators %d\n", ret);
- goto err_regulator;
+ return ret;
}
if (vddvibl_uV) {
@@ -377,7 +347,7 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
if (ret) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
ret);
- goto err_voltage;
+ goto err_regulator;
}
}
@@ -387,53 +357,65 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
if (ret) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
ret);
- goto err_voltage;
+ goto err_regulator;
}
}
- info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
- if (info->workqueue == NULL) {
- dev_err(info->dev, "couldn't create workqueue\n");
+ INIT_WORK(&info->play_work, vibra_play_work);
+
+ info->input_dev = input_allocate_device();
+ if (info->input_dev == NULL) {
+ dev_err(info->dev, "couldn't allocate input device\n");
ret = -ENOMEM;
- goto err_voltage;
+ goto err_regulator;
}
- INIT_WORK(&info->play_work, vibra_play_work);
+
+ input_set_drvdata(info->input_dev, info);
+
+ info->input_dev->name = "twl6040:vibrator";
+ info->input_dev->id.version = 1;
+ info->input_dev->dev.parent = pdev->dev.parent;
+ info->input_dev->close = twl6040_vibra_close;
+ __set_bit(FF_RUMBLE, info->input_dev->ffbit);
+
+ ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register vibrator to FF\n");
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(info->input_dev);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register input device\n");
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, info);
return 0;
-err_voltage:
- regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
-err_regulator:
- free_irq(info->irq, info);
-err_irq:
- input_unregister_device(info->input_dev);
- info->input_dev = NULL;
err_iff:
- if (info->input_dev)
- input_ff_destroy(info->input_dev);
+ input_ff_destroy(info->input_dev);
err_ialloc:
input_free_device(info->input_dev);
-err_kzalloc:
- kfree(info);
+err_regulator:
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
return ret;
}
-static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
+static int twl6040_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
input_unregister_device(info->input_dev);
- free_irq(info->irq, info);
regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
- destroy_workqueue(info->workqueue);
- kfree(info);
return 0;
}
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
- .remove = __devexit_p(twl6040_vibra_remove),
+ .remove = twl6040_vibra_remove,
.driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index e2bdfd4bea70..56536f4b9572 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -170,7 +170,7 @@ static u16 bios_pop_queue(void)
return regs.eax;
}
-static void __devinit bios_attach(void)
+static void bios_attach(void)
{
struct regs regs;
@@ -190,7 +190,7 @@ static void bios_detach(void)
call_bios(&regs);
}
-static u8 __devinit bios_get_cmos_address(void)
+static u8 bios_get_cmos_address(void)
{
struct regs regs;
@@ -202,7 +202,7 @@ static u8 __devinit bios_get_cmos_address(void)
return regs.ecx;
}
-static u16 __devinit bios_get_default_setting(u8 subsys)
+static u16 bios_get_default_setting(u8 subsys)
{
struct regs regs;
@@ -1052,7 +1052,7 @@ static struct led_classdev wistron_wifi_led = {
.brightness_set = wistron_wifi_led_set,
};
-static void __devinit wistron_led_init(struct device *parent)
+static void wistron_led_init(struct device *parent)
{
if (leds_present & FE_WIFI_LED) {
u16 wifi = bios_get_default_setting(WIFI);
@@ -1077,7 +1077,7 @@ static void __devinit wistron_led_init(struct device *parent)
}
}
-static void __devexit wistron_led_remove(void)
+static void wistron_led_remove(void)
{
if (leds_present & FE_MAIL_LED)
led_classdev_unregister(&wistron_mail_led);
@@ -1168,7 +1168,7 @@ static void wistron_poll(struct input_polled_dev *dev)
dev->poll_interval = POLL_INTERVAL_DEFAULT;
}
-static int __devinit wistron_setup_keymap(struct input_dev *dev,
+static int wistron_setup_keymap(struct input_dev *dev,
struct key_entry *entry)
{
switch (entry->type) {
@@ -1199,7 +1199,7 @@ static int __devinit wistron_setup_keymap(struct input_dev *dev,
return 0;
}
-static int __devinit setup_input_dev(void)
+static int setup_input_dev(void)
{
struct input_dev *input_dev;
int error;
@@ -1237,7 +1237,7 @@ static int __devinit setup_input_dev(void)
/* Driver core */
-static int __devinit wistron_probe(struct platform_device *dev)
+static int wistron_probe(struct platform_device *dev)
{
int err;
@@ -1277,7 +1277,7 @@ static int __devinit wistron_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wistron_remove(struct platform_device *dev)
+static int wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
input_unregister_polled_device(wistron_idev);
@@ -1334,7 +1334,7 @@ static struct platform_driver wistron_driver = {
#endif
},
.probe = wistron_probe,
- .remove = __devexit_p(wistron_remove),
+ .remove = wistron_remove,
};
static int __init wb_module_init(void)
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 6790a812a1db..caa2c4068f09 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -69,14 +69,15 @@ static irqreturn_t wm831x_on_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit wm831x_on_probe(struct platform_device *pdev)
+static int wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
- wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
+ wm831x_on = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_on),
+ GFP_KERNEL);
if (!wm831x_on) {
dev_err(&pdev->dev, "Can't allocate data\n");
return -ENOMEM;
@@ -85,7 +86,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
wm831x_on->wm831x = wm831x;
INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
- wm831x_on->dev = input_allocate_device();
+ wm831x_on->dev = devm_input_allocate_device(&pdev->dev);
if (!wm831x_on->dev) {
dev_err(&pdev->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
@@ -118,28 +119,24 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
err_irq:
free_irq(irq, wm831x_on);
err_input_dev:
- input_free_device(wm831x_on->dev);
err:
- kfree(wm831x_on);
return ret;
}
-static int __devexit wm831x_on_remove(struct platform_device *pdev)
+static int wm831x_on_remove(struct platform_device *pdev)
{
struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
- input_unregister_device(wm831x_on->dev);
- kfree(wm831x_on);
return 0;
}
static struct platform_driver wm831x_on_driver = {
.probe = wm831x_on_probe,
- .remove = __devexit_p(wm831x_on_remove),
+ .remove = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6f7d99013031..e21c1816a8f9 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -104,7 +104,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit xenkbd_probe(struct xenbus_device *dev,
+static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i, abs;
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index cd6268cf7cd5..802bd6a72d73 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -68,6 +68,16 @@ config MOUSE_PS2_SYNAPTICS
If unsure, say Y.
+config MOUSE_PS2_CYPRESS
+ bool "Cypress PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a Cypress PS/2 Trackpad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
default y
@@ -193,6 +203,18 @@ config MOUSE_BCM5974
To compile this driver as a module, choose M here: the
module will be called bcm5974.
+config MOUSE_CYAPA
+ tristate "Cypress APA I2C Trackpad support"
+ depends on I2C
+ help
+ This driver adds support for Cypress All Points Addressable (APA)
+ I2C Trackpads, including the ones used in 2012 Samsung Chromebooks.
+
+ Say Y here if you have a Cypress APA I2C Trackpad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cyapa.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 46ba7556fd4f..c25efdb3f288 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
+obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -32,3 +33,4 @@ psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o
psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
+psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index cf5af1f495ec..7b99fc7c9438 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -27,14 +27,11 @@
/*
* Definitions for ALPS version 3 and 4 command mode protocol
*/
-#define ALPS_V3_X_MAX 2000
-#define ALPS_V3_Y_MAX 1400
-
-#define ALPS_BITMAP_X_BITS 15
-#define ALPS_BITMAP_Y_BITS 11
-
#define ALPS_CMD_NIBBLE_10 0x01f2
+#define ALPS_REG_BASE_RUSHMORE 0xc2c0
+#define ALPS_REG_BASE_PINNACLE 0x0000
+
static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
{ PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
{ PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
@@ -109,11 +106,14 @@ static const struct alps_model_info alps_model_data[] = {
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
- { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
- { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
{ { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
};
+static void alps_set_abs_params_st(struct alps_data *priv,
+ struct input_dev *dev1);
+static void alps_set_abs_params_mt(struct alps_data *priv,
+ struct input_dev *dev1);
+
/*
* XXX - this entry is suspicious. First byte has zero lower nibble,
* which is what a normal mouse would report. Also, the value 0x0e
@@ -122,10 +122,10 @@ static const struct alps_model_info alps_model_data[] = {
/* Packet formats are described in Documentation/input/alps.txt */
-static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+static bool alps_is_valid_first_byte(struct alps_data *priv,
unsigned char data)
{
- return (data & model->mask0) == model->byte0;
+ return (data & priv->mask0) == priv->byte0;
}
static void alps_report_buttons(struct psmouse *psmouse,
@@ -158,14 +158,13 @@ static void alps_report_buttons(struct psmouse *psmouse,
static void alps_process_packet_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if (model->proto_version == ALPS_PROTO_V1) {
+ if (priv->proto_version == ALPS_PROTO_V1) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
middle = 0;
@@ -181,12 +180,12 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
z = packet[5];
}
- if (model->flags & ALPS_FW_BK_1) {
+ if (priv->flags & ALPS_FW_BK_1) {
back = packet[0] & 0x10;
forward = packet[2] & 4;
}
- if (model->flags & ALPS_FW_BK_2) {
+ if (priv->flags & ALPS_FW_BK_2) {
back = packet[3] & 4;
forward = packet[2] & 4;
if ((middle = forward && back))
@@ -196,7 +195,7 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
ges = packet[2] & 1;
fin = packet[2] & 2;
- if ((model->flags & ALPS_DUALPOINT) && z == 127) {
+ if ((priv->flags & ALPS_DUALPOINT) && z == 127) {
input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
@@ -239,15 +238,15 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, z);
input_report_key(dev, BTN_TOOL_FINGER, z > 0);
- if (model->flags & ALPS_WHEEL)
+ if (priv->flags & ALPS_WHEEL)
input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07));
- if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+ if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
input_report_key(dev, BTN_FORWARD, forward);
input_report_key(dev, BTN_BACK, back);
}
- if (model->flags & ALPS_FOUR_BUTTONS) {
+ if (priv->flags & ALPS_FOUR_BUTTONS) {
input_report_key(dev, BTN_0, packet[2] & 4);
input_report_key(dev, BTN_1, packet[0] & 0x10);
input_report_key(dev, BTN_2, packet[3] & 4);
@@ -267,7 +266,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
* These points are returned in x1, y1, x2, and y2 when the return value
* is greater than 0.
*/
-static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+static int alps_process_bitmap(struct alps_data *priv,
+ unsigned int x_map, unsigned int y_map,
int *x1, int *y1, int *x2, int *y2)
{
struct alps_bitmap_point {
@@ -309,7 +309,7 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
* y bitmap is reversed for what we need (lower positions are in
* higher bits), so we process from the top end.
*/
- y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits);
prev_bit = 0;
point = &y_low;
for (i = 0; y_map != 0; i++, y_map <<= 1) {
@@ -355,16 +355,18 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
}
}
- *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
- (2 * (ALPS_BITMAP_X_BITS - 1));
- *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
- (2 * (ALPS_BITMAP_Y_BITS - 1));
+ *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
if (fingers > 1) {
- *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
- (2 * (ALPS_BITMAP_X_BITS - 1));
- *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
- (2 * (ALPS_BITMAP_Y_BITS - 1));
+ *x2 = (priv->x_max *
+ (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ *y2 = (priv->y_max *
+ (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
}
return fingers;
@@ -448,17 +450,57 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
return;
}
+static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
+{
+ f->left = !!(p[3] & 0x01);
+ f->right = !!(p[3] & 0x02);
+ f->middle = !!(p[3] & 0x04);
+
+ f->ts_left = !!(p[3] & 0x10);
+ f->ts_right = !!(p[3] & 0x20);
+ f->ts_middle = !!(p[3] & 0x40);
+}
+
+static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
+{
+ f->first_mp = !!(p[4] & 0x40);
+ f->is_mp = !!(p[0] & 0x40);
+
+ f->fingers = (p[5] & 0x3) + 1;
+ f->x_map = ((p[4] & 0x7e) << 8) |
+ ((p[1] & 0x7f) << 2) |
+ ((p[0] & 0x30) >> 4);
+ f->y_map = ((p[3] & 0x70) << 4) |
+ ((p[2] & 0x7f) << 1) |
+ (p[4] & 0x01);
+
+ f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
+ ((p[0] & 0x30) >> 4);
+ f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
+ f->z = p[5] & 0x7f;
+
+ alps_decode_buttons_v3(f, p);
+}
+
+static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
+{
+ alps_decode_pinnacle(f, p);
+
+ f->x_map |= (p[5] & 0x10) << 11;
+ f->y_map |= (p[5] & 0x20) << 6;
+}
+
static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
- int x, y, z;
- int left, right, middle;
int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
int fingers = 0, bmap_fingers;
- unsigned int x_bitmap, y_bitmap;
+ struct alps_fields f;
+
+ priv->decode_fields(&f, packet);
/*
* There's no single feature of touchpad position and bitmap packets
@@ -473,16 +515,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* packet. Check for this, and when it happens process the
* position packet as usual.
*/
- if (packet[0] & 0x40) {
- fingers = (packet[5] & 0x3) + 1;
- x_bitmap = ((packet[4] & 0x7e) << 8) |
- ((packet[1] & 0x7f) << 2) |
- ((packet[0] & 0x30) >> 4);
- y_bitmap = ((packet[3] & 0x70) << 4) |
- ((packet[2] & 0x7f) << 1) |
- (packet[4] & 0x01);
-
- bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ if (f.is_mp) {
+ fingers = f.fingers;
+ bmap_fingers = alps_process_bitmap(priv,
+ f.x_map, f.y_map,
&x1, &y1, &x2, &y2);
/*
@@ -493,7 +529,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
fingers = bmap_fingers;
/* Now process position packet */
- packet = priv->multi_data;
+ priv->decode_fields(&f, priv->multi_data);
} else {
priv->multi_packet = 0;
}
@@ -507,10 +543,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* out misidentified bitmap packets, we reject anything with this
* bit set.
*/
- if (packet[0] & 0x40)
+ if (f.is_mp)
return;
- if (!priv->multi_packet && (packet[4] & 0x40)) {
+ if (!priv->multi_packet && f.first_mp) {
priv->multi_packet = 1;
memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
return;
@@ -518,22 +554,13 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
priv->multi_packet = 0;
- left = packet[3] & 0x01;
- right = packet[3] & 0x02;
- middle = packet[3] & 0x04;
-
- x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
- ((packet[0] & 0x30) >> 4);
- y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
- z = packet[5] & 0x7f;
-
/*
* Sometimes the hardware sends a single packet with z = 0
* in the middle of a stream. Real releases generate packets
* with x, y, and z all zero, so these seem to be flukes.
* Ignore them.
*/
- if (x && y && !z)
+ if (f.x && f.y && !f.z)
return;
/*
@@ -541,12 +568,12 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* to rely on ST data.
*/
if (!fingers) {
- x1 = x;
- y1 = y;
- fingers = z > 0 ? 1 : 0;
+ x1 = f.x;
+ y1 = f.y;
+ fingers = f.z > 0 ? 1 : 0;
}
- if (z >= 64)
+ if (f.z >= 64)
input_report_key(dev, BTN_TOUCH, 1);
else
input_report_key(dev, BTN_TOUCH, 0);
@@ -555,26 +582,22 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
input_mt_report_finger_count(dev, fingers);
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
- input_report_key(dev, BTN_MIDDLE, middle);
+ input_report_key(dev, BTN_LEFT, f.left);
+ input_report_key(dev, BTN_RIGHT, f.right);
+ input_report_key(dev, BTN_MIDDLE, f.middle);
- if (z > 0) {
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
+ if (f.z > 0) {
+ input_report_abs(dev, ABS_X, f.x);
+ input_report_abs(dev, ABS_Y, f.y);
}
- input_report_abs(dev, ABS_PRESSURE, z);
+ input_report_abs(dev, ABS_PRESSURE, f.z);
input_sync(dev);
if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
- left = packet[3] & 0x10;
- right = packet[3] & 0x20;
- middle = packet[3] & 0x40;
-
- input_report_key(dev2, BTN_LEFT, left);
- input_report_key(dev2, BTN_RIGHT, right);
- input_report_key(dev2, BTN_MIDDLE, middle);
+ input_report_key(dev2, BTN_LEFT, f.ts_left);
+ input_report_key(dev2, BTN_RIGHT, f.ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f.ts_middle);
input_sync(dev2);
}
}
@@ -639,7 +662,7 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
((priv->multi_data[3] & 0x1f) << 5) |
(priv->multi_data[1] & 0x1f);
- fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap,
&x1, &y1, &x2, &y2);
/* Store MT data.*/
@@ -696,25 +719,6 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
input_sync(dev);
}
-static void alps_process_packet(struct psmouse *psmouse)
-{
- struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
-
- switch (model->proto_version) {
- case ALPS_PROTO_V1:
- case ALPS_PROTO_V2:
- alps_process_packet_v1_v2(psmouse);
- break;
- case ALPS_PROTO_V3:
- alps_process_packet_v3(psmouse);
- break;
- case ALPS_PROTO_V4:
- alps_process_packet_v4(psmouse);
- break;
- }
-}
-
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -765,15 +769,14 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
if (((psmouse->packet[3] |
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) ||
- (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
+ (!alps_is_valid_first_byte(priv, psmouse->packet[6]))) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5], psmouse->packet[6]);
+ "refusing packet %4ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
return PSMOUSE_BAD_DATA;
}
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
/* Continue with the next packet */
psmouse->packet[0] = psmouse->packet[6];
@@ -817,6 +820,7 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
static void alps_flush_packet(unsigned long data)
{
struct psmouse *psmouse = (struct psmouse *)data;
+ struct alps_data *priv = psmouse->private;
serio_pause_rx(psmouse->ps2dev.serio);
@@ -831,11 +835,10 @@ static void alps_flush_packet(unsigned long data)
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5]);
+ "refusing packet %3ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
} else {
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
}
psmouse->pktcnt = 0;
}
@@ -846,7 +849,6 @@ static void alps_flush_packet(unsigned long data)
static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
if (psmouse->pktcnt == 3) {
@@ -859,15 +861,15 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
/* Check for PS/2 packet stuffed in the middle of ALPS packet. */
- if ((model->flags & ALPS_PS2_INTERLEAVED) &&
+ if ((priv->flags & ALPS_PS2_INTERLEAVED) &&
psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
return alps_handle_interleaved_ps2(psmouse);
}
- if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
+ if (!alps_is_valid_first_byte(priv, psmouse->packet[0])) {
psmouse_dbg(psmouse,
"refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
- psmouse->packet[0], model->mask0, model->byte0);
+ psmouse->packet[0], priv->mask0, priv->byte0);
return PSMOUSE_BAD_DATA;
}
@@ -881,7 +883,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
if (psmouse->pktcnt == psmouse->pktsize) {
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
@@ -969,24 +971,42 @@ static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
return __alps_command_mode_write_reg(psmouse, value);
}
+static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
+ int repeated_command, unsigned char *param)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ param[0] = 0;
+ if (init_command && ps2_command(ps2dev, param, init_command))
+ return -EIO;
+
+ if (ps2_command(ps2dev, NULL, repeated_command) ||
+ ps2_command(ps2dev, NULL, repeated_command) ||
+ ps2_command(ps2dev, NULL, repeated_command))
+ return -EIO;
+
+ param[0] = param[1] = param[2] = 0xff;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -EIO;
+
+ psmouse_dbg(psmouse, "%2.2X report: %2.2x %2.2x %2.2x\n",
+ repeated_command, param[0], param[1], param[2]);
+ return 0;
+}
+
static int alps_enter_command_mode(struct psmouse *psmouse,
unsigned char *resp)
{
unsigned char param[4];
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_RESET_WRAP, param)) {
psmouse_err(psmouse, "failed to enter command mode\n");
return -1;
}
- if (param[0] != 0x88 && param[1] != 0x07) {
+ if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
psmouse_dbg(psmouse,
- "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
- param[0], param[1], param[2]);
+ "unknown response while entering command mode\n");
return -1;
}
@@ -1003,99 +1023,6 @@ static inline int alps_exit_command_mode(struct psmouse *psmouse)
return 0;
}
-static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
-{
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
- unsigned char param[4];
- const struct alps_model_info *model = NULL;
- int i;
-
- /*
- * First try "E6 report".
- * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
- * The bits 0-2 of the first byte will be 1s if some buttons are
- * pressed.
- */
- param[0] = 0;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
- return NULL;
-
- param[0] = param[1] = param[2] = 0xff;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
- return NULL;
-
- psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
- if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
- (param[2] != 10 && param[2] != 100))
- return NULL;
-
- /*
- * Now try "E7 report". Allowed responses are in
- * alps_model_data[].signature
- */
- param[0] = 0;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21))
- return NULL;
-
- param[0] = param[1] = param[2] = 0xff;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
- return NULL;
-
- psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
- if (version) {
- for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++)
- /* empty */;
- *version = (param[0] << 8) | (param[1] << 4) | i;
- }
-
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
- if (!memcmp(param, alps_model_data[i].signature,
- sizeof(alps_model_data[i].signature))) {
- model = alps_model_data + i;
- break;
- }
- }
-
- if (model && model->proto_version > ALPS_PROTO_V2) {
- /*
- * Need to check command mode response to identify
- * model
- */
- model = NULL;
- if (alps_enter_command_mode(psmouse, param)) {
- psmouse_warn(psmouse,
- "touchpad failed to enter command mode\n");
- } else {
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
- if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
- alps_model_data[i].command_mode_resp == param[0]) {
- model = alps_model_data + i;
- break;
- }
- }
- alps_exit_command_mode(psmouse);
-
- if (!model)
- psmouse_dbg(psmouse,
- "Unknown command mode response %2.2x\n",
- param[0]);
- }
- }
-
- return model;
-}
-
/*
* For DualPoint devices select the device that should respond to
* subsequent commands. It looks like glidepad is behind stickpointer,
@@ -1139,18 +1066,10 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
static int alps_get_status(struct psmouse *psmouse, char *param)
{
- struct ps2dev *ps2dev = &psmouse->ps2dev;
-
/* Get status: 0xF5 0xF5 0xF5 0xE9 */
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_DISABLE, param))
return -1;
- psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
return 0;
}
@@ -1192,16 +1111,16 @@ static int alps_poll(struct psmouse *psmouse)
unsigned char buf[sizeof(psmouse->packet)];
bool poll_failed;
- if (priv->i->flags & ALPS_PASS)
+ if (priv->flags & ALPS_PASS)
alps_passthrough_mode_v2(psmouse, true);
poll_failed = ps2_command(&psmouse->ps2dev, buf,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
- if (priv->i->flags & ALPS_PASS)
+ if (priv->flags & ALPS_PASS)
alps_passthrough_mode_v2(psmouse, false);
- if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
+ if (poll_failed || (buf[0] & priv->mask0) != priv->byte0)
return -1;
if ((psmouse->badbyte & 0xc8) == 0x08) {
@@ -1219,9 +1138,8 @@ static int alps_poll(struct psmouse *psmouse)
static int alps_hw_init_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
- if ((model->flags & ALPS_PASS) &&
+ if ((priv->flags & ALPS_PASS) &&
alps_passthrough_mode_v2(psmouse, true)) {
return -1;
}
@@ -1236,7 +1154,7 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
return -1;
}
- if ((model->flags & ALPS_PASS) &&
+ if ((priv->flags & ALPS_PASS) &&
alps_passthrough_mode_v2(psmouse, false)) {
return -1;
}
@@ -1251,26 +1169,31 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
}
/*
- * Enable or disable passthrough mode to the trackstick. Must be in
- * command mode when calling this function.
+ * Enable or disable passthrough mode to the trackstick.
*/
-static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v3(struct psmouse *psmouse,
+ int reg_base, bool enable)
{
- int reg_val;
+ int reg_val, ret = -1;
- reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
- if (reg_val == -1)
+ if (alps_enter_command_mode(psmouse, NULL))
return -1;
+ reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
+ if (reg_val == -1)
+ goto error;
+
if (enable)
reg_val |= 0x01;
else
reg_val &= ~0x01;
- if (__alps_command_mode_write_reg(psmouse, reg_val))
- return -1;
+ ret = __alps_command_mode_write_reg(psmouse, reg_val);
- return 0;
+error:
+ if (alps_exit_command_mode(psmouse))
+ ret = -1;
+ return ret;
}
/* Must be in command mode when calling this function */
@@ -1289,73 +1212,102 @@ static int alps_absolute_mode_v3(struct psmouse *psmouse)
return 0;
}
-static int alps_hw_init_v3(struct psmouse *psmouse)
+static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
{
- struct alps_data *priv = psmouse->private;
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- int reg_val;
- unsigned char param[4];
-
- priv->nibble_commands = alps_v3_nibble_commands;
- priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+ int ret = -EIO, reg_val;
if (alps_enter_command_mode(psmouse, NULL))
goto error;
- /* Check for trackstick */
- reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
if (reg_val == -1)
goto error;
- if (reg_val & 0x80) {
- if (alps_passthrough_mode_v3(psmouse, true))
- goto error;
- if (alps_exit_command_mode(psmouse))
- goto error;
+
+ /* bit 7: trackstick is present */
+ ret = reg_val & 0x80 ? 0 : -ENODEV;
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
+static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int ret = 0;
+ unsigned char param[4];
+
+ if (alps_passthrough_mode_v3(psmouse, reg_base, true))
+ return -EIO;
+
+ /*
+ * E7 report for the trackstick
+ *
+ * There have been reports of failures to seem to trace back
+ * to the above trackstick check failing. When these occur
+ * this E7 report fails, so when that happens we continue
+ * with the assumption that there isn't a trackstick after
+ * all.
+ */
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
+ psmouse_warn(psmouse, "trackstick E7 report failed\n");
+ ret = -ENODEV;
+ } else {
+ psmouse_dbg(psmouse,
+ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
/*
- * E7 report for the trackstick
- *
- * There have been reports of failures to seem to trace back
- * to the above trackstick check failing. When these occur
- * this E7 report fails, so when that happens we continue
- * with the assumption that there isn't a trackstick after
- * all.
+ * Not sure what this does, but it is absolutely
+ * essential. Without it, the touchpad does not
+ * work at all and the trackstick just emits normal
+ * PS/2 packets.
*/
- param[0] = 0x64;
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- psmouse_warn(psmouse, "trackstick E7 report failed\n");
- } else {
- psmouse_dbg(psmouse,
- "trackstick E7 report: %2.2x %2.2x %2.2x\n",
- param[0], param[1], param[2]);
-
- /*
- * Not sure what this does, but it is absolutely
- * essential. Without it, the touchpad does not
- * work at all and the trackstick just emits normal
- * PS/2 packets.
- */
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- alps_command_mode_send_nibble(psmouse, 0x9) ||
- alps_command_mode_send_nibble(psmouse, 0x4)) {
- psmouse_err(psmouse,
- "Error sending magic E6 sequence\n");
- goto error_passthrough;
- }
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ alps_command_mode_send_nibble(psmouse, 0x9) ||
+ alps_command_mode_send_nibble(psmouse, 0x4)) {
+ psmouse_err(psmouse,
+ "Error sending magic E6 sequence\n");
+ ret = -EIO;
+ goto error;
}
- if (alps_enter_command_mode(psmouse, NULL))
- goto error_passthrough;
- if (alps_passthrough_mode_v3(psmouse, false))
- goto error;
+ /*
+ * This ensures the trackstick packets are in the format
+ * supported by this driver. If bit 1 isn't set the packet
+ * format is different.
+ */
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_command_mode_write_reg(psmouse,
+ reg_base + 0x08, 0x82) ||
+ alps_exit_command_mode(psmouse))
+ ret = -EIO;
}
- if (alps_absolute_mode_v3(psmouse)) {
+error:
+ if (alps_passthrough_mode_v3(psmouse, reg_base, false))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val;
+ unsigned char param[4];
+
+ reg_val = alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE);
+ if (reg_val == -EIO)
+ goto error;
+ if (reg_val == 0 &&
+ alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
+ goto error;
+
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_absolute_mode_v3(psmouse)) {
psmouse_err(psmouse, "Failed to enter absolute mode\n");
goto error;
}
@@ -1392,14 +1344,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
goto error;
- /*
- * This ensures the trackstick packets are in the format
- * supported by this driver. If bit 1 isn't set the packet
- * format is different.
- */
- if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
- goto error;
-
alps_exit_command_mode(psmouse);
/* Set rate and enable data reporting */
@@ -1412,10 +1356,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
return 0;
-error_passthrough:
- /* Something failed while in passthrough mode, so try to get out */
- if (!alps_enter_command_mode(psmouse, NULL))
- alps_passthrough_mode_v3(psmouse, false);
error:
/*
* Leaving the touchpad in command mode will essentially render
@@ -1426,6 +1366,50 @@ error:
return -1;
}
+static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val, ret = -1;
+
+ if (priv->flags & ALPS_DUALPOINT) {
+ reg_val = alps_setup_trackstick_v3(psmouse,
+ ALPS_REG_BASE_RUSHMORE);
+ if (reg_val == -EIO)
+ goto error;
+ if (reg_val == -ENODEV)
+ priv->flags &= ~ALPS_DUALPOINT;
+ }
+
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
+ alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val & 0xfd))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64))
+ goto error;
+
+ /* enter absolute mode */
+ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+ return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
/* Must be in command mode when calling this function */
static int alps_absolute_mode_v4(struct psmouse *psmouse)
{
@@ -1444,13 +1428,9 @@ static int alps_absolute_mode_v4(struct psmouse *psmouse)
static int alps_hw_init_v4(struct psmouse *psmouse)
{
- struct alps_data *priv = psmouse->private;
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[4];
- priv->nibble_commands = alps_v4_nibble_commands;
- priv->addr_command = PSMOUSE_CMD_DISABLE;
-
if (alps_enter_command_mode(psmouse, NULL))
goto error;
@@ -1519,39 +1499,140 @@ error:
return -1;
}
-static int alps_hw_init(struct psmouse *psmouse)
+static void alps_set_defaults(struct alps_data *priv)
{
- struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
- int ret = -1;
+ priv->byte0 = 0x8f;
+ priv->mask0 = 0x8f;
+ priv->flags = ALPS_DUALPOINT;
+
+ priv->x_max = 2000;
+ priv->y_max = 1400;
+ priv->x_bits = 15;
+ priv->y_bits = 11;
- switch (model->proto_version) {
+ switch (priv->proto_version) {
case ALPS_PROTO_V1:
case ALPS_PROTO_V2:
- ret = alps_hw_init_v1_v2(psmouse);
+ priv->hw_init = alps_hw_init_v1_v2;
+ priv->process_packet = alps_process_packet_v1_v2;
+ priv->set_abs_params = alps_set_abs_params_st;
break;
case ALPS_PROTO_V3:
- ret = alps_hw_init_v3(psmouse);
+ priv->hw_init = alps_hw_init_v3;
+ priv->process_packet = alps_process_packet_v3;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->decode_fields = alps_decode_pinnacle;
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
break;
case ALPS_PROTO_V4:
- ret = alps_hw_init_v4(psmouse);
+ priv->hw_init = alps_hw_init_v4;
+ priv->process_packet = alps_process_packet_v4;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->nibble_commands = alps_v4_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_DISABLE;
break;
}
+}
- return ret;
+static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
+ unsigned char *e7, unsigned char *ec)
+{
+ const struct alps_model_info *model;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ model = &alps_model_data[i];
+
+ if (!memcmp(e7, model->signature, sizeof(model->signature)) &&
+ (!model->command_mode_resp ||
+ model->command_mode_resp == ec[2])) {
+
+ priv->proto_version = model->proto_version;
+ alps_set_defaults(priv);
+
+ priv->flags = model->flags;
+ priv->byte0 = model->byte0;
+ priv->mask0 = model->mask0;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
+{
+ unsigned char e6[4], e7[4], ec[4];
+
+ /*
+ * First try "E6 report".
+ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+ * The bits 0-2 of the first byte will be 1s if some buttons are
+ * pressed.
+ */
+ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_SETSCALE11, e6))
+ return -EIO;
+
+ if ((e6[0] & 0xf8) != 0 || e6[1] != 0 || (e6[2] != 10 && e6[2] != 100))
+ return -EINVAL;
+
+ /*
+ * Now get the "E7" and "EC" reports. These will uniquely identify
+ * most ALPS touchpads.
+ */
+ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_SETSCALE21, e7) ||
+ alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_RESET_WRAP, ec) ||
+ alps_exit_command_mode(psmouse))
+ return -EIO;
+
+ if (alps_match_table(psmouse, priv, e7, ec) == 0) {
+ return 0;
+ } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+ priv->proto_version = ALPS_PROTO_V3;
+ alps_set_defaults(priv);
+
+ priv->hw_init = alps_hw_init_rushmore_v3;
+ priv->decode_fields = alps_decode_rushmore;
+ priv->x_bits = 16;
+ priv->y_bits = 12;
+
+ /* hack to make addr_command, nibble_command available */
+ psmouse->private = priv;
+
+ if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
+ priv->flags &= ~ALPS_DUALPOINT;
+
+ return 0;
+ } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+ ec[2] >= 0x90 && ec[2] <= 0x9d) {
+ priv->proto_version = ALPS_PROTO_V3;
+ alps_set_defaults(priv);
+
+ return 0;
+ }
+
+ psmouse_info(psmouse,
+ "Unknown ALPS touchpad: E7=%2.2x %2.2x %2.2x, EC=%2.2x %2.2x %2.2x\n",
+ e7[0], e7[1], e7[2], ec[0], ec[1], ec[2]);
+
+ return -EINVAL;
}
static int alps_reconnect(struct psmouse *psmouse)
{
- const struct alps_model_info *model;
+ struct alps_data *priv = psmouse->private;
psmouse_reset(psmouse);
- model = alps_get_model(psmouse, NULL);
- if (!model)
+ if (alps_identify(psmouse, priv) < 0)
return -1;
- return alps_hw_init(psmouse);
+ return priv->hw_init(psmouse);
}
static void alps_disconnect(struct psmouse *psmouse)
@@ -1564,12 +1645,33 @@ static void alps_disconnect(struct psmouse *psmouse)
kfree(priv);
}
+static void alps_set_abs_params_st(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+}
+
+static void alps_set_abs_params_mt(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, 2, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
+
+ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+
+ input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
+}
+
int alps_init(struct psmouse *psmouse)
{
struct alps_data *priv;
- const struct alps_model_info *model;
struct input_dev *dev1 = psmouse->dev, *dev2;
- int version;
priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
dev2 = input_allocate_device();
@@ -1583,13 +1685,10 @@ int alps_init(struct psmouse *psmouse)
psmouse_reset(psmouse);
- model = alps_get_model(psmouse, &version);
- if (!model)
+ if (alps_identify(psmouse, priv) < 0)
goto init_fail;
- priv->i = model;
-
- if (alps_hw_init(psmouse))
+ if (priv->hw_init(psmouse))
goto init_fail;
/*
@@ -1611,41 +1710,20 @@ int alps_init(struct psmouse *psmouse)
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
- switch (model->proto_version) {
- case ALPS_PROTO_V1:
- case ALPS_PROTO_V2:
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
- break;
- case ALPS_PROTO_V3:
- case ALPS_PROTO_V4:
- set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
- input_mt_init_slots(dev1, 2, 0);
- input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
- input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
-
- set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
- set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
- set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
-
- input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
- break;
- }
-
+ priv->set_abs_params(priv, dev1);
input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
- if (model->flags & ALPS_WHEEL) {
+ if (priv->flags & ALPS_WHEEL) {
dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL);
}
- if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+ if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD);
dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK);
}
- if (model->flags & ALPS_FOUR_BUTTONS) {
+ if (priv->flags & ALPS_FOUR_BUTTONS) {
dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0);
dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1);
dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2);
@@ -1656,7 +1734,8 @@ int alps_init(struct psmouse *psmouse)
snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
- dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse";
+ dev2->name = (priv->flags & ALPS_DUALPOINT) ?
+ "DualPoint Stick" : "PS/2 Mouse";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
dev2->id.product = PSMOUSE_ALPS;
@@ -1675,7 +1754,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->poll = alps_poll;
psmouse->disconnect = alps_disconnect;
psmouse->reconnect = alps_reconnect;
- psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
+ psmouse->pktsize = priv->proto_version == ALPS_PROTO_V4 ? 8 : 6;
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
@@ -1692,18 +1771,16 @@ init_fail:
int alps_detect(struct psmouse *psmouse, bool set_properties)
{
- int version;
- const struct alps_model_info *model;
+ struct alps_data dummy;
- model = alps_get_model(psmouse, &version);
- if (!model)
+ if (alps_identify(psmouse, &dummy) < 0)
return -1;
if (set_properties) {
psmouse->vendor = "ALPS";
- psmouse->name = model->flags & ALPS_DUALPOINT ?
+ psmouse->name = dummy.flags & ALPS_DUALPOINT ?
"DualPoint TouchPad" : "GlidePoint";
- psmouse->model = version;
+ psmouse->model = dummy.proto_version << 8;
}
return 0;
}
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ae1ac354c778..970480551b6e 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,35 +12,146 @@
#ifndef _ALPS_H
#define _ALPS_H
-#define ALPS_PROTO_V1 0
-#define ALPS_PROTO_V2 1
-#define ALPS_PROTO_V3 2
-#define ALPS_PROTO_V4 3
+#define ALPS_PROTO_V1 1
+#define ALPS_PROTO_V2 2
+#define ALPS_PROTO_V3 3
+#define ALPS_PROTO_V4 4
+/**
+ * struct alps_model_info - touchpad ID table
+ * @signature: E7 response string to match.
+ * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
+ * (aka command mode response) identifies the firmware minor version. This
+ * can be used to distinguish different hardware models which are not
+ * uniquely identifiable through their E7 responses.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ * known format for this model. The first byte of the report, ANDed with
+ * mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ *
+ * Many (but not all) ALPS touchpads can be identified by looking at the
+ * values returned in the "E7 report" and/or the "EC report." This table
+ * lists a number of such touchpads.
+ */
struct alps_model_info {
- unsigned char signature[3];
- unsigned char command_mode_resp; /* v3/v4 only */
+ unsigned char signature[3];
+ unsigned char command_mode_resp;
unsigned char proto_version;
- unsigned char byte0, mask0;
- unsigned char flags;
+ unsigned char byte0, mask0;
+ unsigned char flags;
};
+/**
+ * struct alps_nibble_commands - encodings for register accesses
+ * @command: PS/2 command used for the nibble
+ * @data: Data supplied as an argument to the PS/2 command, if applicable
+ *
+ * The ALPS protocol uses magic sequences to transmit binary data to the
+ * touchpad, as it is generally not OK to send arbitrary bytes out the
+ * PS/2 port. Each of the sequences in this table sends one nibble of the
+ * register address or (write) data. Different versions of the ALPS protocol
+ * use slightly different encodings.
+ */
struct alps_nibble_commands {
int command;
unsigned char data;
};
+/**
+ * struct alps_fields - decoded version of the report packet
+ * @x_map: Bitmap of active X positions for MT.
+ * @y_map: Bitmap of active Y positions for MT.
+ * @fingers: Number of fingers for MT.
+ * @x: X position for ST.
+ * @y: Y position for ST.
+ * @z: Z position for ST.
+ * @first_mp: Packet is the first of a multi-packet report.
+ * @is_mp: Packet is part of a multi-packet report.
+ * @left: Left touchpad button is active.
+ * @right: Right touchpad button is active.
+ * @middle: Middle touchpad button is active.
+ * @ts_left: Left trackstick button is active.
+ * @ts_right: Right trackstick button is active.
+ * @ts_middle: Middle trackstick button is active.
+ */
+struct alps_fields {
+ unsigned int x_map;
+ unsigned int y_map;
+ unsigned int fingers;
+ unsigned int x;
+ unsigned int y;
+ unsigned int z;
+ unsigned int first_mp:1;
+ unsigned int is_mp:1;
+
+ unsigned int left:1;
+ unsigned int right:1;
+ unsigned int middle:1;
+
+ unsigned int ts_left:1;
+ unsigned int ts_right:1;
+ unsigned int ts_middle:1;
+};
+
+/**
+ * struct alps_data - private data structure for the ALPS driver
+ * @dev2: "Relative" device used to report trackstick or mouse activity.
+ * @phys: Physical path for the relative device.
+ * @nibble_commands: Command mapping used for touchpad register accesses.
+ * @addr_command: Command used to tell the touchpad that a register address
+ * follows.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ * known format for this model. The first byte of the report, ANDed with
+ * mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @x_max: Largest possible X position value.
+ * @y_max: Largest possible Y position value.
+ * @x_bits: Number of X bits in the MT bitmap.
+ * @y_bits: Number of Y bits in the MT bitmap.
+ * @hw_init: Protocol-specific hardware init function.
+ * @process_packet: Protocol-specific function to process a report packet.
+ * @decode_fields: Protocol-specific function to read packet bitfields.
+ * @set_abs_params: Protocol-specific function to configure the input_dev.
+ * @prev_fin: Finger bit from previous packet.
+ * @multi_packet: Multi-packet data in progress.
+ * @multi_data: Saved multi-packet data.
+ * @x1: First X coordinate from last MT report.
+ * @x2: Second X coordinate from last MT report.
+ * @y1: First Y coordinate from last MT report.
+ * @y2: Second Y coordinate from last MT report.
+ * @fingers: Number of fingers from last MT report.
+ * @quirks: Bitmap of ALPS_QUIRK_*.
+ * @timer: Timer for flushing out the final report packet in the stream.
+ */
struct alps_data {
- struct input_dev *dev2; /* Relative device */
- char phys[32]; /* Phys */
- const struct alps_model_info *i;/* Info */
+ struct input_dev *dev2;
+ char phys[32];
+
+ /* these are autodetected when the device is identified */
const struct alps_nibble_commands *nibble_commands;
- int addr_command; /* Command to set register address */
- int prev_fin; /* Finger bit from previous packet */
- int multi_packet; /* Multi-packet data in progress */
- unsigned char multi_data[6]; /* Saved multi-packet data */
- int x1, x2, y1, y2; /* Coordinates from last MT report */
- int fingers; /* Number of fingers from MT report */
+ int addr_command;
+ unsigned char proto_version;
+ unsigned char byte0, mask0;
+ unsigned char flags;
+ int x_max;
+ int y_max;
+ int x_bits;
+ int y_bits;
+
+ int (*hw_init)(struct psmouse *psmouse);
+ void (*process_packet)(struct psmouse *psmouse);
+ void (*decode_fields)(struct alps_fields *f, unsigned char *p);
+ void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
+
+ int prev_fin;
+ int multi_packet;
+ unsigned char multi_data[6];
+ int x1, x2, y1, y2;
+ int fingers;
u8 quirks;
struct timer_list timer;
};
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
new file mode 100644
index 000000000000..b409c3d7d4fb
--- /dev/null
+++ b/drivers/input/mouse/cyapa.c
@@ -0,0 +1,973 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ * Further cleanup and restructuring by:
+ * Daniel Kurtz <djkurtz@chromium.org>
+ * Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2012 Google, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* APA trackpad firmware generation */
+#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
+
+#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
+
+/* commands for read/write registers of Cypress trackpad */
+#define CYAPA_CMD_SOFT_RESET 0x00
+#define CYAPA_CMD_POWER_MODE 0x01
+#define CYAPA_CMD_DEV_STATUS 0x02
+#define CYAPA_CMD_GROUP_DATA 0x03
+#define CYAPA_CMD_GROUP_CMD 0x04
+#define CYAPA_CMD_GROUP_QUERY 0x05
+#define CYAPA_CMD_BL_STATUS 0x06
+#define CYAPA_CMD_BL_HEAD 0x07
+#define CYAPA_CMD_BL_CMD 0x08
+#define CYAPA_CMD_BL_DATA 0x09
+#define CYAPA_CMD_BL_ALL 0x0a
+#define CYAPA_CMD_BLK_PRODUCT_ID 0x0b
+#define CYAPA_CMD_BLK_HEAD 0x0c
+
+/* report data start reg offset address. */
+#define DATA_REG_START_OFFSET 0x0000
+
+#define BL_HEAD_OFFSET 0x00
+#define BL_DATA_OFFSET 0x10
+
+/*
+ * Operational Device Status Register
+ *
+ * bit 7: Valid interrupt source
+ * bit 6 - 4: Reserved
+ * bit 3 - 2: Power status
+ * bit 1 - 0: Device status
+ */
+#define REG_OP_STATUS 0x00
+#define OP_STATUS_SRC 0x80
+#define OP_STATUS_POWER 0x0c
+#define OP_STATUS_DEV 0x03
+#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
+
+/*
+ * Operational Finger Count/Button Flags Register
+ *
+ * bit 7 - 4: Number of touched finger
+ * bit 3: Valid data
+ * bit 2: Middle Physical Button
+ * bit 1: Right Physical Button
+ * bit 0: Left physical Button
+ */
+#define REG_OP_DATA1 0x01
+#define OP_DATA_VALID 0x08
+#define OP_DATA_MIDDLE_BTN 0x04
+#define OP_DATA_RIGHT_BTN 0x02
+#define OP_DATA_LEFT_BTN 0x01
+#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
+ OP_DATA_LEFT_BTN)
+
+/*
+ * Bootloader Status Register
+ *
+ * bit 7: Busy
+ * bit 6 - 5: Reserved
+ * bit 4: Bootloader running
+ * bit 3 - 1: Reserved
+ * bit 0: Checksum valid
+ */
+#define REG_BL_STATUS 0x01
+#define BL_STATUS_BUSY 0x80
+#define BL_STATUS_RUNNING 0x10
+#define BL_STATUS_DATA_VALID 0x08
+#define BL_STATUS_CSUM_VALID 0x01
+
+/*
+ * Bootloader Error Register
+ *
+ * bit 7: Invalid
+ * bit 6: Invalid security key
+ * bit 5: Bootloading
+ * bit 4: Command checksum
+ * bit 3: Flash protection error
+ * bit 2: Flash checksum error
+ * bit 1 - 0: Reserved
+ */
+#define REG_BL_ERROR 0x02
+#define BL_ERROR_INVALID 0x80
+#define BL_ERROR_INVALID_KEY 0x40
+#define BL_ERROR_BOOTLOADING 0x20
+#define BL_ERROR_CMD_CSUM 0x10
+#define BL_ERROR_FLASH_PROT 0x08
+#define BL_ERROR_FLASH_CSUM 0x04
+
+#define BL_STATUS_SIZE 3 /* length of bootloader status registers */
+#define BLK_HEAD_BYTES 32
+
+#define PRODUCT_ID_SIZE 16
+#define QUERY_DATA_SIZE 27
+#define REG_PROTOCOL_GEN_QUERY_OFFSET 20
+
+#define REG_OFFSET_DATA_BASE 0x0000
+#define REG_OFFSET_COMMAND_BASE 0x0028
+#define REG_OFFSET_QUERY_BASE 0x002a
+
+#define CAPABILITY_LEFT_BTN_MASK (0x01 << 3)
+#define CAPABILITY_RIGHT_BTN_MASK (0x01 << 4)
+#define CAPABILITY_MIDDLE_BTN_MASK (0x01 << 5)
+#define CAPABILITY_BTN_MASK (CAPABILITY_LEFT_BTN_MASK | \
+ CAPABILITY_RIGHT_BTN_MASK | \
+ CAPABILITY_MIDDLE_BTN_MASK)
+
+#define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE
+
+#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
+
+#define PWR_MODE_MASK 0xfc
+#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
+#define PWR_MODE_IDLE (0x05 << 2) /* default sleep time is 50 ms. */
+#define PWR_MODE_OFF (0x00 << 2)
+
+#define PWR_STATUS_MASK 0x0c
+#define PWR_STATUS_ACTIVE (0x03 << 2)
+#define PWR_STATUS_IDLE (0x02 << 2)
+#define PWR_STATUS_OFF (0x00 << 2)
+
+/*
+ * CYAPA trackpad device states.
+ * Used in register 0x00, bit1-0, DeviceStatus field.
+ * Other values indicate device is in an abnormal state and must be reset.
+ */
+#define CYAPA_DEV_NORMAL 0x03
+#define CYAPA_DEV_BUSY 0x01
+
+enum cyapa_state {
+ CYAPA_STATE_OP,
+ CYAPA_STATE_BL_IDLE,
+ CYAPA_STATE_BL_ACTIVE,
+ CYAPA_STATE_BL_BUSY,
+ CYAPA_STATE_NO_DEVICE,
+};
+
+
+struct cyapa_touch {
+ /*
+ * high bits or x/y position value
+ * bit 7 - 4: high 4 bits of x position value
+ * bit 3 - 0: high 4 bits of y position value
+ */
+ u8 xy_hi;
+ u8 x_lo; /* low 8 bits of x position value. */
+ u8 y_lo; /* low 8 bits of y position value. */
+ u8 pressure;
+ /* id range is 1 - 15. It is incremented with every new touch. */
+ u8 id;
+} __packed;
+
+/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
+#define CYAPA_MAX_MT_SLOTS 15
+
+struct cyapa_reg_data {
+ /*
+ * bit 0 - 1: device status
+ * bit 3 - 2: power mode
+ * bit 6 - 4: reserved
+ * bit 7: interrupt valid bit
+ */
+ u8 device_status;
+ /*
+ * bit 7 - 4: number of fingers currently touching pad
+ * bit 3: valid data check bit
+ * bit 2: middle mechanism button state if exists
+ * bit 1: right mechanism button state if exists
+ * bit 0: left mechanism button state if exists
+ */
+ u8 finger_btn;
+ /* CYAPA reports up to 5 touches per packet. */
+ struct cyapa_touch touches[5];
+} __packed;
+
+/* The main device structure */
+struct cyapa {
+ enum cyapa_state state;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32]; /* device physical location */
+ int irq;
+ bool irq_wake; /* irq wake is enabled */
+ bool smbus;
+
+ /* read from query data region. */
+ char product_id[16];
+ u8 btn_capability;
+ u8 gen;
+ int max_abs_x;
+ int max_abs_y;
+ int physical_size_x;
+ int physical_size_y;
+};
+
+static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07 };
+
+struct cyapa_cmd_len {
+ u8 cmd;
+ u8 len;
+};
+
+#define CYAPA_ADAPTER_FUNC_NONE 0
+#define CYAPA_ADAPTER_FUNC_I2C 1
+#define CYAPA_ADAPTER_FUNC_SMBUS 2
+#define CYAPA_ADAPTER_FUNC_BOTH 3
+
+/*
+ * macros for SMBus communication
+ */
+#define SMBUS_READ 0x01
+#define SMBUS_WRITE 0x00
+#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
+#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
+#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
+#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
+
+ /* for byte read/write command */
+#define CMD_RESET 0
+#define CMD_POWER_MODE 1
+#define CMD_DEV_STATUS 2
+#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
+#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
+#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
+#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
+
+ /* for group registers read/write command */
+#define REG_GROUP_DATA 0
+#define REG_GROUP_CMD 2
+#define REG_GROUP_QUERY 3
+#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
+#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
+#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
+#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
+
+ /* for register block read/write command */
+#define CMD_BL_STATUS 0
+#define CMD_BL_HEAD 1
+#define CMD_BL_CMD 2
+#define CMD_BL_DATA 3
+#define CMD_BL_ALL 4
+#define CMD_BLK_PRODUCT_ID 5
+#define CMD_BLK_HEAD 6
+#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
+
+/* register block read/write command in bootloader mode */
+#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
+#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
+#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
+#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
+#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
+
+/* register block read/write command in operational mode */
+#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
+#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
+
+static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
+ { CYAPA_OFFSET_SOFT_RESET, 1 },
+ { REG_OFFSET_COMMAND_BASE + 1, 1 },
+ { REG_OFFSET_DATA_BASE, 1 },
+ { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
+ { REG_OFFSET_COMMAND_BASE, 0 },
+ { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE },
+ { BL_HEAD_OFFSET, 3 },
+ { BL_HEAD_OFFSET, 16 },
+ { BL_HEAD_OFFSET, 16 },
+ { BL_DATA_OFFSET, 16 },
+ { BL_HEAD_OFFSET, 32 },
+ { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
+ { REG_OFFSET_DATA_BASE, 32 }
+};
+
+static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
+ { CYAPA_SMBUS_RESET, 1 },
+ { CYAPA_SMBUS_POWER_MODE, 1 },
+ { CYAPA_SMBUS_DEV_STATUS, 1 },
+ { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
+ { CYAPA_SMBUS_GROUP_CMD, 2 },
+ { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
+ { CYAPA_SMBUS_BL_STATUS, 3 },
+ { CYAPA_SMBUS_BL_HEAD, 16 },
+ { CYAPA_SMBUS_BL_CMD, 16 },
+ { CYAPA_SMBUS_BL_DATA, 16 },
+ { CYAPA_SMBUS_BL_ALL, 32 },
+ { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
+ { CYAPA_SMBUS_BLK_HEAD, 16 },
+};
+
+static ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
+ u8 *values)
+{
+ return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
+ size_t len, const u8 *values)
+{
+ return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+/*
+ * cyapa_smbus_read_block - perform smbus block read command
+ * @cyapa - private data structure of the driver
+ * @cmd - the properly encoded smbus command
+ * @len - expected length of smbus command result
+ * @values - buffer to store smbus command result
+ *
+ * Returns negative errno, else the number of bytes written.
+ *
+ * Note:
+ * In trackpad device, the memory block allocated for I2C register map
+ * is 256 bytes, so the max read block for I2C bus is 256 bytes.
+ */
+static ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
+ u8 *values)
+{
+ ssize_t ret;
+ u8 index;
+ u8 smbus_cmd;
+ u8 *buf;
+ struct i2c_client *client = cyapa->client;
+
+ if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
+ return -EINVAL;
+
+ if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
+ /* read specific block registers command. */
+ smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
+ goto out;
+ }
+
+ ret = 0;
+ for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
+ smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
+ smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
+ buf = values + I2C_SMBUS_BLOCK_MAX * index;
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret > 0 ? len : ret;
+}
+
+static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_read_byte_data(cyapa->client, cmd);
+}
+
+static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
+}
+
+static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
+{
+ u8 cmd;
+ size_t len;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ len = cyapa_smbus_cmds[cmd_idx].len;
+ return cyapa_smbus_read_block(cyapa, cmd, len, values);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ len = cyapa_i2c_cmds[cmd_idx].len;
+ return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
+ }
+}
+
+/*
+ * Query device for its current operating state.
+ *
+ */
+static int cyapa_get_state(struct cyapa *cyapa)
+{
+ int ret;
+ u8 status[BL_STATUS_SIZE];
+
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+ /*
+ * Get trackpad status by reading 3 registers starting from 0.
+ * If the device is in the bootloader, this will be BL_HEAD.
+ * If the device is in operation mode, this will be the DATA regs.
+ *
+ */
+ ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+ status);
+
+ /*
+ * On smbus systems in OP mode, the i2c_reg_read will fail with
+ * -ETIMEDOUT. In this case, try again using the smbus equivalent
+ * command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
+ */
+ if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+
+ if (ret != BL_STATUS_SIZE)
+ goto error;
+
+ if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
+ switch (status[REG_OP_STATUS] & OP_STATUS_DEV) {
+ case CYAPA_DEV_NORMAL:
+ case CYAPA_DEV_BUSY:
+ cyapa->state = CYAPA_STATE_OP;
+ break;
+ default:
+ ret = -EAGAIN;
+ goto error;
+ }
+ } else {
+ if (status[REG_BL_STATUS] & BL_STATUS_BUSY)
+ cyapa->state = CYAPA_STATE_BL_BUSY;
+ else if (status[REG_BL_ERROR] & BL_ERROR_BOOTLOADING)
+ cyapa->state = CYAPA_STATE_BL_ACTIVE;
+ else
+ cyapa->state = CYAPA_STATE_BL_IDLE;
+ }
+
+ return 0;
+error:
+ return (ret < 0) ? ret : -EAGAIN;
+}
+
+/*
+ * Poll device for its status in a loop, waiting up to timeout for a response.
+ *
+ * When the device switches state, it usually takes ~300 ms.
+ * However, when running a new firmware image, the device must calibrate its
+ * sensors, which can take as long as 2 seconds.
+ *
+ * Note: The timeout has granularity of the polling rate, which is 100 ms.
+ *
+ * Returns:
+ * 0 when the device eventually responds with a valid non-busy state.
+ * -ETIMEDOUT if device never responds (too many -EAGAIN)
+ * < 0 other errors
+ */
+static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
+{
+ int ret;
+ int tries = timeout / 100;
+
+ ret = cyapa_get_state(cyapa);
+ while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+ msleep(100);
+ ret = cyapa_get_state(cyapa);
+ }
+ return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+}
+
+static int cyapa_bl_deactivate(struct cyapa *cyapa)
+{
+ int ret;
+
+ ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (ret < 0)
+ return ret;
+
+ /* wait for bootloader to switch to idle state; should take < 100ms */
+ msleep(100);
+ ret = cyapa_poll_state(cyapa, 500);
+ if (ret < 0)
+ return ret;
+ if (cyapa->state != CYAPA_STATE_BL_IDLE)
+ return -EAGAIN;
+ return 0;
+}
+
+/*
+ * Exit bootloader
+ *
+ * Send bl_exit command, then wait 50 - 100 ms to let device transition to
+ * operational mode. If this is the first time the device's firmware is
+ * running, it can take up to 2 seconds to calibrate its sensors. So, poll
+ * the device's new state for up to 2 seconds.
+ *
+ * Returns:
+ * -EIO failure while reading from device
+ * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
+ * 0 device is supported and in operational mode
+ */
+static int cyapa_bl_exit(struct cyapa *cyapa)
+{
+ int ret;
+
+ ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for bootloader to exit, and operation mode to start.
+ * Normally, this takes at least 50 ms.
+ */
+ usleep_range(50000, 100000);
+ /*
+ * In addition, when a device boots for the first time after being
+ * updated to new firmware, it must first calibrate its sensors, which
+ * can take up to an additional 2 seconds.
+ */
+ ret = cyapa_poll_state(cyapa, 2000);
+ if (ret < 0)
+ return ret;
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * Set device power mode
+ *
+ */
+static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
+{
+ struct device *dev = &cyapa->client->dev;
+ int ret;
+ u8 power;
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return 0;
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
+ if (ret < 0)
+ return ret;
+
+ power = ret & ~PWR_MODE_MASK;
+ power |= power_mode & PWR_MODE_MASK;
+ ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
+ if (ret < 0)
+ dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
+ power_mode, ret);
+ return ret;
+}
+
+static int cyapa_get_query_data(struct cyapa *cyapa)
+{
+ u8 query_data[QUERY_DATA_SIZE];
+ int ret;
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EBUSY;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
+ if (ret < 0)
+ return ret;
+ if (ret != QUERY_DATA_SIZE)
+ return -EIO;
+
+ memcpy(&cyapa->product_id[0], &query_data[0], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &query_data[5], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &query_data[11], 2);
+ cyapa->product_id[15] = '\0';
+
+ cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
+
+ cyapa->gen = query_data[20] & 0x0f;
+
+ cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
+ cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
+
+ cyapa->physical_size_x =
+ ((query_data[24] & 0xf0) << 4) | query_data[25];
+ cyapa->physical_size_y =
+ ((query_data[24] & 0x0f) << 8) | query_data[26];
+
+ return 0;
+}
+
+/*
+ * Check if device is operational.
+ *
+ * An operational device is responding, has exited bootloader, and has
+ * firmware supported by this driver.
+ *
+ * Returns:
+ * -EBUSY no device or in bootloader
+ * -EIO failure while reading from device
+ * -EAGAIN device is still in bootloader
+ * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
+ * -EINVAL device is in operational mode, but not supported by this driver
+ * 0 device is supported
+ */
+static int cyapa_check_is_operational(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ static const char unique_str[] = "CYTRA";
+ int ret;
+
+ ret = cyapa_poll_state(cyapa, 2000);
+ if (ret < 0)
+ return ret;
+ switch (cyapa->state) {
+ case CYAPA_STATE_BL_ACTIVE:
+ ret = cyapa_bl_deactivate(cyapa);
+ if (ret)
+ return ret;
+
+ /* Fallthrough state */
+ case CYAPA_STATE_BL_IDLE:
+ ret = cyapa_bl_exit(cyapa);
+ if (ret)
+ return ret;
+
+ /* Fallthrough state */
+ case CYAPA_STATE_OP:
+ ret = cyapa_get_query_data(cyapa);
+ if (ret < 0)
+ return ret;
+
+ /* only support firmware protocol gen3 */
+ if (cyapa->gen != CYAPA_GEN3) {
+ dev_err(dev, "unsupported protocol version (%d)",
+ cyapa->gen);
+ return -EINVAL;
+ }
+
+ /* only support product ID starting with CYTRA */
+ if (memcmp(cyapa->product_id, unique_str,
+ sizeof(unique_str) - 1) != 0) {
+ dev_err(dev, "unsupported product ID (%s)\n",
+ cyapa->product_id);
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+static irqreturn_t cyapa_irq(int irq, void *dev_id)
+{
+ struct cyapa *cyapa = dev_id;
+ struct device *dev = &cyapa->client->dev;
+ struct input_dev *input = cyapa->input;
+ struct cyapa_reg_data data;
+ int i;
+ int ret;
+ int num_fingers;
+
+ if (device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data))
+ goto out;
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+ goto out;
+ }
+
+ num_fingers = (data.finger_btn >> 4) & 0x0f;
+ for (i = 0; i < num_fingers; i++) {
+ const struct cyapa_touch *touch = &data.touches[i];
+ /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
+ int slot = touch->id - 1;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X,
+ ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
+ input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
+ }
+
+ input_mt_sync_frame(input);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ input_report_key(input, BTN_LEFT,
+ data.finger_btn & OP_DATA_LEFT_BTN);
+
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ input_report_key(input, BTN_MIDDLE,
+ data.finger_btn & OP_DATA_MIDDLE_BTN);
+
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ input_report_key(input, BTN_RIGHT,
+ data.finger_btn & OP_DATA_RIGHT_BTN);
+
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
+{
+ u8 ret = CYAPA_ADAPTER_FUNC_NONE;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ ret |= CYAPA_ADAPTER_FUNC_I2C;
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ ret |= CYAPA_ADAPTER_FUNC_SMBUS;
+ return ret;
+}
+
+static int cyapa_create_input_dev(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ int ret;
+ struct input_dev *input;
+
+ if (!cyapa->physical_size_x || !cyapa->physical_size_y)
+ return -EINVAL;
+
+ input = cyapa->input = input_allocate_device();
+ if (!input) {
+ dev_err(dev, "allocate memory for input device failed\n");
+ return -ENOMEM;
+ }
+
+ input->name = CYAPA_NAME;
+ input->phys = cyapa->phys;
+ input->id.bustype = BUS_I2C;
+ input->id.version = 1;
+ input->id.product = 0; /* means any product in eventcomm. */
+ input->dev.parent = &cyapa->client->dev;
+
+ input_set_drvdata(input, cyapa);
+
+ __set_bit(EV_ABS, input->evbit);
+
+ /* finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ cyapa->max_abs_x / cyapa->physical_size_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ cyapa->max_abs_y / cyapa->physical_size_y);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(BTN_LEFT, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ __set_bit(BTN_MIDDLE, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ __set_bit(BTN_RIGHT, input->keybit);
+
+ if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ /* handle pointer emulation and unused slots in core */
+ ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (ret) {
+ dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
+ goto err_free_device;
+ }
+
+ /* Register the device in input subsystem */
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(dev, "input device register failed, %d\n", ret);
+ goto err_free_device;
+ }
+ return 0;
+
+err_free_device:
+ input_free_device(input);
+ cyapa->input = NULL;
+ return ret;
+}
+
+static int cyapa_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int ret;
+ u8 adapter_func;
+ struct cyapa *cyapa;
+ struct device *dev = &client->dev;
+
+ adapter_func = cyapa_check_adapter_functionality(client);
+ if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
+ dev_err(dev, "not a supported I2C/SMBus adapter\n");
+ return -EIO;
+ }
+
+ cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
+ if (!cyapa) {
+ dev_err(dev, "allocate memory for cyapa failed\n");
+ return -ENOMEM;
+ }
+
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->client = client;
+ i2c_set_clientdata(client, cyapa);
+ sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
+ client->addr);
+
+ /* i2c isn't supported, use smbus */
+ if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
+ cyapa->smbus = true;
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ ret = cyapa_check_is_operational(cyapa);
+ if (ret) {
+ dev_err(dev, "device not operational, %d\n", ret);
+ goto err_mem_free;
+ }
+
+ ret = cyapa_create_input_dev(cyapa);
+ if (ret) {
+ dev_err(dev, "create input_dev instance failed, %d\n", ret);
+ goto err_mem_free;
+ }
+
+ ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (ret) {
+ dev_err(dev, "set active power failed, %d\n", ret);
+ goto err_unregister_device;
+ }
+
+ cyapa->irq = client->irq;
+ ret = request_threaded_irq(cyapa->irq,
+ NULL,
+ cyapa_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyapa",
+ cyapa);
+ if (ret) {
+ dev_err(dev, "IRQ request failed: %d\n, ", ret);
+ goto err_unregister_device;
+ }
+
+ return 0;
+
+err_unregister_device:
+ input_unregister_device(cyapa->input);
+err_mem_free:
+ kfree(cyapa);
+
+ return ret;
+}
+
+static int cyapa_remove(struct i2c_client *client)
+{
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+
+ free_irq(cyapa->irq, cyapa);
+ input_unregister_device(cyapa->input);
+ cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ kfree(cyapa);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cyapa_suspend(struct device *dev)
+{
+ int ret;
+ u8 power_mode;
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+
+ disable_irq(cyapa->irq);
+
+ /*
+ * Set trackpad device to idle mode if wakeup is allowed,
+ * otherwise turn off.
+ */
+ power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
+ : PWR_MODE_OFF;
+ ret = cyapa_set_power_mode(cyapa, power_mode);
+ if (ret < 0)
+ dev_err(dev, "set power mode failed, %d\n", ret);
+
+ if (device_may_wakeup(dev))
+ cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+ return 0;
+}
+
+static int cyapa_resume(struct device *dev)
+{
+ int ret;
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && cyapa->irq_wake)
+ disable_irq_wake(cyapa->irq);
+
+ ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (ret)
+ dev_warn(dev, "resume active power failed, %d\n", ret);
+
+ enable_irq(cyapa->irq);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
+
+static const struct i2c_device_id cyapa_id_table[] = {
+ { "cyapa", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
+
+static struct i2c_driver cyapa_driver = {
+ .driver = {
+ .name = "cyapa",
+ .owner = THIS_MODULE,
+ .pm = &cyapa_pm_ops,
+ },
+
+ .probe = cyapa_probe,
+ .remove = cyapa_remove,
+ .id_table = cyapa_id_table,
+};
+
+module_i2c_driver(cyapa_driver);
+
+MODULE_DESCRIPTION("Cypress APA I2C Trackpad Driver");
+MODULE_AUTHOR("Dudley Du <dudl@cypress.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
new file mode 100644
index 000000000000..1673dc6c8092
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -0,0 +1,725 @@
+/*
+ * Cypress Trackpad PS/2 mouse driver
+ *
+ * Copyright (c) 2012 Cypress Semiconductor Corporation.
+ *
+ * Author:
+ * Dudley Du <dudl@cypress.com>
+ *
+ * Additional contributors include:
+ * Kamal Mostafa <kamal@canonical.com>
+ * Kyle Fazzari <git@status.e4ward.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "cypress_ps2.h"
+
+#undef CYTP_DEBUG_VERBOSE /* define this and DEBUG for more verbose dump */
+
+static void cypress_set_packet_size(struct psmouse *psmouse, unsigned int n)
+{
+ struct cytp_data *cytp = psmouse->private;
+ cytp->pkt_size = n;
+}
+
+static const unsigned char cytp_rate[] = {10, 20, 40, 60, 100, 200};
+static const unsigned char cytp_resolution[] = {0x00, 0x01, 0x02, 0x03};
+
+static int cypress_ps2_sendbyte(struct psmouse *psmouse, int value)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_sendbyte(ps2dev, value & 0xff, CYTP_CMD_TIMEOUT) < 0) {
+ psmouse_dbg(psmouse,
+ "sending command 0x%02x failed, resp 0x%02x\n",
+ value & 0xff, ps2dev->nak);
+ if (ps2dev->nak == CYTP_PS2_RETRY)
+ return CYTP_PS2_RETRY;
+ else
+ return CYTP_PS2_ERROR;
+ }
+
+#ifdef CYTP_DEBUG_VERBOSE
+ psmouse_dbg(psmouse, "sending command 0x%02x succeeded, resp 0xfa\n",
+ value & 0xff);
+#endif
+
+ return 0;
+}
+
+static int cypress_ps2_ext_cmd(struct psmouse *psmouse, unsigned short cmd,
+ unsigned char data)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ ps2_begin_command(ps2dev);
+
+ do {
+ /*
+ * Send extension command byte (0xE8 or 0xF3).
+ * If sending the command fails, send recovery command
+ * to make the device return to the ready state.
+ */
+ rc = cypress_ps2_sendbyte(psmouse, cmd & 0xff);
+ if (rc == CYTP_PS2_RETRY) {
+ rc = cypress_ps2_sendbyte(psmouse, 0x00);
+ if (rc == CYTP_PS2_RETRY)
+ rc = cypress_ps2_sendbyte(psmouse, 0x0a);
+ }
+ if (rc == CYTP_PS2_ERROR)
+ continue;
+
+ rc = cypress_ps2_sendbyte(psmouse, data);
+ if (rc == CYTP_PS2_RETRY)
+ rc = cypress_ps2_sendbyte(psmouse, data);
+ if (rc == CYTP_PS2_ERROR)
+ continue;
+ else
+ break;
+ } while (--tries > 0);
+
+ ps2_end_command(ps2dev);
+
+ return rc;
+}
+
+static int cypress_ps2_read_cmd_status(struct psmouse *psmouse,
+ unsigned char cmd,
+ unsigned char *param)
+{
+ int rc;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ enum psmouse_state old_state;
+ int pktsize;
+
+ ps2_begin_command(&psmouse->ps2dev);
+
+ old_state = psmouse->state;
+ psmouse->state = PSMOUSE_CMD_MODE;
+ psmouse->pktcnt = 0;
+
+ pktsize = (cmd == CYTP_CMD_READ_TP_METRICS) ? 8 : 3;
+ memset(param, 0, pktsize);
+
+ rc = cypress_ps2_sendbyte(psmouse, 0xe9);
+ if (rc < 0)
+ goto out;
+
+ wait_event_timeout(ps2dev->wait,
+ (psmouse->pktcnt >= pktsize),
+ msecs_to_jiffies(CYTP_CMD_TIMEOUT));
+
+ memcpy(param, psmouse->packet, pktsize);
+
+ psmouse_dbg(psmouse, "Command 0x%02x response data (0x): %*ph\n",
+ cmd, pktsize, param);
+
+out:
+ psmouse->state = old_state;
+ psmouse->pktcnt = 0;
+
+ ps2_end_command(&psmouse->ps2dev);
+
+ return rc;
+}
+
+static bool cypress_verify_cmd_state(struct psmouse *psmouse,
+ unsigned char cmd, unsigned char *param)
+{
+ bool rate_match = false;
+ bool resolution_match = false;
+ int i;
+
+ /* callers will do further checking. */
+ if (cmd == CYTP_CMD_READ_CYPRESS_ID ||
+ cmd == CYTP_CMD_STANDARD_MODE ||
+ cmd == CYTP_CMD_READ_TP_METRICS)
+ return true;
+
+ if ((~param[0] & DFLT_RESP_BITS_VALID) == DFLT_RESP_BITS_VALID &&
+ (param[0] & DFLT_RESP_BIT_MODE) == DFLT_RESP_STREAM_MODE) {
+ for (i = 0; i < sizeof(cytp_resolution); i++)
+ if (cytp_resolution[i] == param[1])
+ resolution_match = true;
+
+ for (i = 0; i < sizeof(cytp_rate); i++)
+ if (cytp_rate[i] == param[2])
+ rate_match = true;
+
+ if (resolution_match && rate_match)
+ return true;
+ }
+
+ psmouse_dbg(psmouse, "verify cmd state failed.\n");
+ return false;
+}
+
+static int cypress_send_ext_cmd(struct psmouse *psmouse, unsigned char cmd,
+ unsigned char *param)
+{
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ psmouse_dbg(psmouse, "send extension cmd 0x%02x, [%d %d %d %d]\n",
+ cmd, DECODE_CMD_AA(cmd), DECODE_CMD_BB(cmd),
+ DECODE_CMD_CC(cmd), DECODE_CMD_DD(cmd));
+
+ do {
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_DD(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_CC(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_BB(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_AA(cmd));
+
+ rc = cypress_ps2_read_cmd_status(psmouse, cmd, param);
+ if (rc)
+ continue;
+
+ if (cypress_verify_cmd_state(psmouse, cmd, param))
+ return 0;
+
+ } while (--tries > 0);
+
+ return -EIO;
+}
+
+int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+ return -ENODEV;
+
+ /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+ if (param[0] != 0x33 || param[1] != 0xCC)
+ return -ENODEV;
+
+ if (set_properties) {
+ psmouse->vendor = "Cypress";
+ psmouse->name = "Trackpad";
+ }
+
+ return 0;
+}
+
+static int cypress_read_fw_version(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+ return -ENODEV;
+
+ /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+ if (param[0] != 0x33 || param[1] != 0xCC)
+ return -ENODEV;
+
+ cytp->fw_version = param[2] & FW_VERSION_MASX;
+ cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
+
+ psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
+ psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
+ cytp->tp_metrics_supported);
+
+ return 0;
+}
+
+static int cypress_read_tp_metrics(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[8];
+
+ /* set default values for tp metrics. */
+ cytp->tp_width = CYTP_DEFAULT_WIDTH;
+ cytp->tp_high = CYTP_DEFAULT_HIGH;
+ cytp->tp_max_abs_x = CYTP_ABS_MAX_X;
+ cytp->tp_max_abs_y = CYTP_ABS_MAX_Y;
+ cytp->tp_min_pressure = CYTP_MIN_PRESSURE;
+ cytp->tp_max_pressure = CYTP_MAX_PRESSURE;
+ cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+ cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+ memset(param, 0, sizeof(param));
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
+ /* Update trackpad parameters. */
+ cytp->tp_max_abs_x = (param[1] << 8) | param[0];
+ cytp->tp_max_abs_y = (param[3] << 8) | param[2];
+ cytp->tp_min_pressure = param[4];
+ cytp->tp_max_pressure = param[5];
+ }
+
+ if (!cytp->tp_max_pressure ||
+ cytp->tp_max_pressure < cytp->tp_min_pressure ||
+ !cytp->tp_width || !cytp->tp_high ||
+ !cytp->tp_max_abs_x ||
+ cytp->tp_max_abs_x < cytp->tp_width ||
+ !cytp->tp_max_abs_y ||
+ cytp->tp_max_abs_y < cytp->tp_high)
+ return -EINVAL;
+
+ cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+ cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+#ifdef CYTP_DEBUG_VERBOSE
+ psmouse_dbg(psmouse, "Dump trackpad hardware configuration as below:\n");
+ psmouse_dbg(psmouse, "cytp->tp_width = %d\n", cytp->tp_width);
+ psmouse_dbg(psmouse, "cytp->tp_high = %d\n", cytp->tp_high);
+ psmouse_dbg(psmouse, "cytp->tp_max_abs_x = %d\n", cytp->tp_max_abs_x);
+ psmouse_dbg(psmouse, "cytp->tp_max_abs_y = %d\n", cytp->tp_max_abs_y);
+ psmouse_dbg(psmouse, "cytp->tp_min_pressure = %d\n", cytp->tp_min_pressure);
+ psmouse_dbg(psmouse, "cytp->tp_max_pressure = %d\n", cytp->tp_max_pressure);
+ psmouse_dbg(psmouse, "cytp->tp_res_x = %d\n", cytp->tp_res_x);
+ psmouse_dbg(psmouse, "cytp->tp_res_y = %d\n", cytp->tp_res_y);
+
+ psmouse_dbg(psmouse, "tp_type_APA = %d\n",
+ (param[6] & TP_METRICS_BIT_APA) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_type_MTG = %d\n",
+ (param[6] & TP_METRICS_BIT_MTG) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_palm = %d\n",
+ (param[6] & TP_METRICS_BIT_PALM) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_stubborn = %d\n",
+ (param[6] & TP_METRICS_BIT_STUBBORN) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_1f_jitter = %d\n",
+ (param[6] & TP_METRICS_BIT_1F_JITTER) >> 2);
+ psmouse_dbg(psmouse, "tp_2f_jitter = %d\n",
+ (param[6] & TP_METRICS_BIT_2F_JITTER) >> 4);
+ psmouse_dbg(psmouse, "tp_1f_spike = %d\n",
+ param[7] & TP_METRICS_BIT_1F_SPIKE);
+ psmouse_dbg(psmouse, "tp_2f_spike = %d\n",
+ (param[7] & TP_METRICS_BIT_2F_SPIKE) >> 2);
+ psmouse_dbg(psmouse, "tp_abs_packet_format_set = %d\n",
+ (param[7] & TP_METRICS_BIT_ABS_PKT_FORMAT_SET) >> 4);
+#endif
+
+ return 0;
+}
+
+static int cypress_query_hardware(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ int ret;
+
+ ret = cypress_read_fw_version(psmouse);
+ if (ret)
+ return ret;
+
+ if (cytp->tp_metrics_supported) {
+ ret = cypress_read_tp_metrics(psmouse);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cypress_set_absolute_mode(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_ABS_WITH_PRESSURE_MODE, param) < 0)
+ return -1;
+
+ cytp->mode = (cytp->mode & ~CYTP_BIT_ABS_REL_MASK)
+ | CYTP_BIT_ABS_PRESSURE;
+ cypress_set_packet_size(psmouse, 5);
+
+ return 0;
+}
+
+/*
+ * Reset trackpad device.
+ * This is also the default mode when trackpad powered on.
+ */
+static void cypress_reset(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ cytp->mode = 0;
+
+ psmouse_reset(psmouse);
+}
+
+static int cypress_set_input_params(struct input_dev *input,
+ struct cytp_data *cytp)
+{
+ int ret;
+
+ if (!cytp->tp_res_x || !cytp->tp_res_y)
+ return -EINVAL;
+
+ __set_bit(EV_ABS, input->evbit);
+ input_set_abs_params(input, ABS_X, 0, cytp->tp_max_abs_x, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, cytp->tp_max_abs_y, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ cytp->tp_min_pressure, cytp->tp_max_pressure, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, 255, 0, 0);
+
+ /* finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, cytp->tp_max_abs_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cytp->tp_max_abs_y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ ret = input_mt_init_slots(input, CYTP_MAX_MT_SLOTS,
+ INPUT_MT_DROP_UNUSED|INPUT_MT_TRACK);
+ if (ret < 0)
+ return ret;
+
+ __set_bit(INPUT_PROP_SEMI_MT, input->propbit);
+
+ input_abs_set_res(input, ABS_X, cytp->tp_res_x);
+ input_abs_set_res(input, ABS_Y, cytp->tp_res_y);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X, cytp->tp_res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, cytp->tp_res_y);
+
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+
+ __clear_bit(EV_REL, input->evbit);
+ __clear_bit(REL_X, input->relbit);
+ __clear_bit(REL_Y, input->relbit);
+
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+ __set_bit(BTN_MIDDLE, input->keybit);
+
+ input_set_drvdata(input, cytp);
+
+ return 0;
+}
+
+static int cypress_get_finger_count(unsigned char header_byte)
+{
+ unsigned char bits6_7;
+ int finger_count;
+
+ bits6_7 = header_byte >> 6;
+ finger_count = bits6_7 & 0x03;
+
+ if (finger_count == 1)
+ return 1;
+
+ if (header_byte & ABS_HSCROLL_BIT) {
+ /* HSCROLL gets added on to 0 finger count. */
+ switch (finger_count) {
+ case 0: return 4;
+ case 2: return 5;
+ default:
+ /* Invalid contact (e.g. palm). Ignore it. */
+ return -1;
+ }
+ }
+
+ return finger_count;
+}
+
+
+static int cypress_parse_packet(struct psmouse *psmouse,
+ struct cytp_data *cytp, struct cytp_report_data *report_data)
+{
+ unsigned char *packet = psmouse->packet;
+ unsigned char header_byte = packet[0];
+ int contact_cnt;
+
+ memset(report_data, 0, sizeof(struct cytp_report_data));
+
+ contact_cnt = cypress_get_finger_count(header_byte);
+
+ if (contact_cnt < 0) /* e.g. palm detect */
+ return -EINVAL;
+
+ report_data->contact_cnt = contact_cnt;
+
+ report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
+
+ if (report_data->contact_cnt == 1) {
+ report_data->contacts[0].x =
+ ((packet[1] & 0x70) << 4) | packet[2];
+ report_data->contacts[0].y =
+ ((packet[1] & 0x07) << 8) | packet[3];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[0].z = packet[4];
+
+ } else if (report_data->contact_cnt >= 2) {
+ report_data->contacts[0].x =
+ ((packet[1] & 0x70) << 4) | packet[2];
+ report_data->contacts[0].y =
+ ((packet[1] & 0x07) << 8) | packet[3];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[0].z = packet[4];
+
+ report_data->contacts[1].x =
+ ((packet[5] & 0xf0) << 4) | packet[6];
+ report_data->contacts[1].y =
+ ((packet[5] & 0x0f) << 8) | packet[7];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[1].z = report_data->contacts[0].z;
+ }
+
+ report_data->left = (header_byte & BTN_LEFT_BIT) ? 1 : 0;
+ report_data->right = (header_byte & BTN_RIGHT_BIT) ? 1 : 0;
+
+ /*
+ * This is only true if one of the mouse buttons were tapped. Make
+ * sure it doesn't turn into a click. The regular tap-to-click
+ * functionality will handle that on its own. If we don't do this,
+ * disabling tap-to-click won't affect the mouse button zones.
+ */
+ if (report_data->tap)
+ report_data->left = 0;
+
+#ifdef CYTP_DEBUG_VERBOSE
+ {
+ int i;
+ int n = report_data->contact_cnt;
+ psmouse_dbg(psmouse, "Dump parsed report data as below:\n");
+ psmouse_dbg(psmouse, "contact_cnt = %d\n",
+ report_data->contact_cnt);
+ if (n > CYTP_MAX_MT_SLOTS)
+ n = CYTP_MAX_MT_SLOTS;
+ for (i = 0; i < n; i++)
+ psmouse_dbg(psmouse, "contacts[%d] = {%d, %d, %d}\n", i,
+ report_data->contacts[i].x,
+ report_data->contacts[i].y,
+ report_data->contacts[i].z);
+ psmouse_dbg(psmouse, "left = %d\n", report_data->left);
+ psmouse_dbg(psmouse, "right = %d\n", report_data->right);
+ psmouse_dbg(psmouse, "middle = %d\n", report_data->middle);
+ }
+#endif
+
+ return 0;
+}
+
+static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
+{
+ int i;
+ struct input_dev *input = psmouse->dev;
+ struct cytp_data *cytp = psmouse->private;
+ struct cytp_report_data report_data;
+ struct cytp_contact *contact;
+ struct input_mt_pos pos[CYTP_MAX_MT_SLOTS];
+ int slots[CYTP_MAX_MT_SLOTS];
+ int n;
+
+ if (cypress_parse_packet(psmouse, cytp, &report_data))
+ return;
+
+ n = report_data.contact_cnt;
+
+ if (n > CYTP_MAX_MT_SLOTS)
+ n = CYTP_MAX_MT_SLOTS;
+
+ for (i = 0; i < n; i++) {
+ contact = &report_data.contacts[i];
+ pos[i].x = contact->x;
+ pos[i].y = contact->y;
+ }
+
+ input_mt_assign_slots(input, slots, pos, n);
+
+ for (i = 0; i < n; i++) {
+ contact = &report_data.contacts[i];
+ input_mt_slot(input, slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, contact->x);
+ input_report_abs(input, ABS_MT_POSITION_Y, contact->y);
+ input_report_abs(input, ABS_MT_PRESSURE, contact->z);
+ }
+
+ input_mt_sync_frame(input);
+
+ input_mt_report_finger_count(input, report_data.contact_cnt);
+
+ input_report_key(input, BTN_LEFT, report_data.left);
+ input_report_key(input, BTN_RIGHT, report_data.right);
+ input_report_key(input, BTN_MIDDLE, report_data.middle);
+
+ input_sync(input);
+}
+
+static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
+{
+ int contact_cnt;
+ int index = psmouse->pktcnt - 1;
+ unsigned char *packet = psmouse->packet;
+ struct cytp_data *cytp = psmouse->private;
+
+ if (index < 0 || index > cytp->pkt_size)
+ return PSMOUSE_BAD_DATA;
+
+ if (index == 0 && (packet[0] & 0xfc) == 0) {
+ /* call packet process for reporting finger leave. */
+ cypress_process_packet(psmouse, 1);
+ return PSMOUSE_FULL_PACKET;
+ }
+
+ /*
+ * Perform validation (and adjust packet size) based only on the
+ * first byte; allow all further bytes through.
+ */
+ if (index != 0)
+ return PSMOUSE_GOOD_DATA;
+
+ /*
+ * If absolute/relative mode bit has not been set yet, just pass
+ * the byte through.
+ */
+ if ((cytp->mode & CYTP_BIT_ABS_REL_MASK) == 0)
+ return PSMOUSE_GOOD_DATA;
+
+ if ((packet[0] & 0x08) == 0x08)
+ return PSMOUSE_BAD_DATA;
+
+ contact_cnt = cypress_get_finger_count(packet[0]);
+
+ if (contact_cnt < 0)
+ return PSMOUSE_BAD_DATA;
+
+ if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
+ cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
+ else
+ cypress_set_packet_size(psmouse, contact_cnt == 2 ? 8 : 5);
+
+ return PSMOUSE_GOOD_DATA;
+}
+
+static psmouse_ret_t cypress_protocol_handler(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ if (psmouse->pktcnt >= cytp->pkt_size) {
+ cypress_process_packet(psmouse, 0);
+ return PSMOUSE_FULL_PACKET;
+ }
+
+ return cypress_validate_byte(psmouse);
+}
+
+static void cypress_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ if (rate >= 80) {
+ psmouse->rate = 80;
+ cytp->mode |= CYTP_BIT_HIGH_RATE;
+ } else {
+ psmouse->rate = 40;
+ cytp->mode &= ~CYTP_BIT_HIGH_RATE;
+ }
+
+ ps2_command(&psmouse->ps2dev, (unsigned char *)&psmouse->rate,
+ PSMOUSE_CMD_SETRATE);
+}
+
+static void cypress_disconnect(struct psmouse *psmouse)
+{
+ cypress_reset(psmouse);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+}
+
+static int cypress_reconnect(struct psmouse *psmouse)
+{
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ do {
+ cypress_reset(psmouse);
+ rc = cypress_detect(psmouse, false);
+ } while (rc && (--tries > 0));
+
+ if (rc) {
+ psmouse_err(psmouse, "Reconnect: unable to detect trackpad.\n");
+ return -1;
+ }
+
+ if (cypress_set_absolute_mode(psmouse)) {
+ psmouse_err(psmouse, "Reconnect: Unable to initialize Cypress absolute mode.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int cypress_init(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp;
+
+ cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+ psmouse->private = (void *)cytp;
+ if (cytp == NULL)
+ return -ENOMEM;
+
+ cypress_reset(psmouse);
+
+ psmouse->pktsize = 8;
+
+ if (cypress_query_hardware(psmouse)) {
+ psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
+ goto err_exit;
+ }
+
+ if (cypress_set_absolute_mode(psmouse)) {
+ psmouse_err(psmouse, "init: Unable to initialize Cypress absolute mode.\n");
+ goto err_exit;
+ }
+
+ if (cypress_set_input_params(psmouse->dev, cytp) < 0) {
+ psmouse_err(psmouse, "init: Unable to set input params.\n");
+ goto err_exit;
+ }
+
+ psmouse->model = 1;
+ psmouse->protocol_handler = cypress_protocol_handler;
+ psmouse->set_rate = cypress_set_rate;
+ psmouse->disconnect = cypress_disconnect;
+ psmouse->reconnect = cypress_reconnect;
+ psmouse->cleanup = cypress_reset;
+ psmouse->resync_time = 0;
+
+ return 0;
+
+err_exit:
+ /*
+ * Reset Cypress Trackpad as a standard mouse. Then
+ * let psmouse driver commmunicating with it as default PS2 mouse.
+ */
+ cypress_reset(psmouse);
+
+ psmouse->private = NULL;
+ kfree(cytp);
+
+ return -1;
+}
+
+bool cypress_supported(void)
+{
+ return true;
+}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
new file mode 100644
index 000000000000..4720f21d2d70
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -0,0 +1,191 @@
+#ifndef _CYPRESS_PS2_H
+#define _CYPRESS_PS2_H
+
+#include "psmouse.h"
+
+#define CMD_BITS_MASK 0x03
+#define COMPOSIT(x, s) (((x) & CMD_BITS_MASK) << (s))
+
+#define ENCODE_CMD(aa, bb, cc, dd) \
+ (COMPOSIT((aa), 6) | COMPOSIT((bb), 4) | COMPOSIT((cc), 2) | COMPOSIT((dd), 0))
+#define CYTP_CMD_ABS_NO_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 0)
+#define CYTP_CMD_ABS_WITH_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 1)
+#define CYTP_CMD_SMBUS_MODE ENCODE_CMD(0, 1, 1, 0)
+#define CYTP_CMD_STANDARD_MODE ENCODE_CMD(0, 2, 0, 0) /* not implemented yet. */
+#define CYTP_CMD_CYPRESS_REL_MODE ENCODE_CMD(1, 1, 1, 1) /* not implemented yet. */
+#define CYTP_CMD_READ_CYPRESS_ID ENCODE_CMD(0, 0, 0, 0)
+#define CYTP_CMD_READ_TP_METRICS ENCODE_CMD(0, 0, 0, 1)
+#define CYTP_CMD_SET_HSCROLL_WIDTH(w) ENCODE_CMD(1, 1, 0, (w))
+#define CYTP_CMD_SET_HSCROLL_MASK ENCODE_CMD(1, 1, 0, 0)
+#define CYTP_CMD_SET_VSCROLL_WIDTH(w) ENCODE_CMD(1, 2, 0, (w))
+#define CYTP_CMD_SET_VSCROLL_MASK ENCODE_CMD(1, 2, 0, 0)
+#define CYTP_CMD_SET_PALM_GEOMETRY(e) ENCODE_CMD(1, 2, 1, (e))
+#define CYTP_CMD_PALM_GEMMETRY_MASK ENCODE_CMD(1, 2, 1, 0)
+#define CYTP_CMD_SET_PALM_SENSITIVITY(s) ENCODE_CMD(1, 2, 2, (s))
+#define CYTP_CMD_PALM_SENSITIVITY_MASK ENCODE_CMD(1, 2, 2, 0)
+#define CYTP_CMD_SET_MOUSE_SENSITIVITY(s) ENCODE_CMD(1, 3, ((s) >> 2), (s))
+#define CYTP_CMD_MOUSE_SENSITIVITY_MASK ENCODE_CMD(1, 3, 0, 0)
+#define CYTP_CMD_REQUEST_BASELINE_STATUS ENCODE_CMD(2, 0, 0, 1)
+#define CYTP_CMD_REQUEST_RECALIBRATION ENCODE_CMD(2, 0, 0, 3)
+
+#define DECODE_CMD_AA(x) (((x) >> 6) & CMD_BITS_MASK)
+#define DECODE_CMD_BB(x) (((x) >> 4) & CMD_BITS_MASK)
+#define DECODE_CMD_CC(x) (((x) >> 2) & CMD_BITS_MASK)
+#define DECODE_CMD_DD(x) ((x) & CMD_BITS_MASK)
+
+/* Cypress trackpad working mode. */
+#define CYTP_BIT_ABS_PRESSURE (1 << 3)
+#define CYTP_BIT_ABS_NO_PRESSURE (1 << 2)
+#define CYTP_BIT_CYPRESS_REL (1 << 1)
+#define CYTP_BIT_STANDARD_REL (1 << 0)
+#define CYTP_BIT_REL_MASK (CYTP_BIT_CYPRESS_REL | CYTP_BIT_STANDARD_REL)
+#define CYTP_BIT_ABS_MASK (CYTP_BIT_ABS_PRESSURE | CYTP_BIT_ABS_NO_PRESSURE)
+#define CYTP_BIT_ABS_REL_MASK (CYTP_BIT_ABS_MASK | CYTP_BIT_REL_MASK)
+
+#define CYTP_BIT_HIGH_RATE (1 << 4)
+/*
+ * report mode bit is set, firmware working in Remote Mode.
+ * report mode bit is cleared, firmware working in Stream Mode.
+ */
+#define CYTP_BIT_REPORT_MODE (1 << 5)
+
+/* scrolling width values for set HSCROLL and VSCROLL width command. */
+#define SCROLL_WIDTH_NARROW 1
+#define SCROLL_WIDTH_NORMAL 2
+#define SCROLL_WIDTH_WIDE 3
+
+#define PALM_GEOMETRY_ENABLE 1
+#define PALM_GEOMETRY_DISABLE 0
+
+#define TP_METRICS_MASK 0x80
+#define FW_VERSION_MASX 0x7f
+#define FW_VER_HIGH_MASK 0x70
+#define FW_VER_LOW_MASK 0x0f
+
+/* Times to retry a ps2_command and millisecond delay between tries. */
+#define CYTP_PS2_CMD_TRIES 3
+#define CYTP_PS2_CMD_DELAY 500
+
+/* time out for PS/2 command only in milliseconds. */
+#define CYTP_CMD_TIMEOUT 200
+#define CYTP_DATA_TIMEOUT 30
+
+#define CYTP_EXT_CMD 0xe8
+#define CYTP_PS2_RETRY 0xfe
+#define CYTP_PS2_ERROR 0xfc
+
+#define CYTP_RESP_RETRY 0x01
+#define CYTP_RESP_ERROR 0xfe
+
+
+#define CYTP_105001_WIDTH 97 /* Dell XPS 13 */
+#define CYTP_105001_HIGH 59
+#define CYTP_DEFAULT_WIDTH (CYTP_105001_WIDTH)
+#define CYTP_DEFAULT_HIGH (CYTP_105001_HIGH)
+
+#define CYTP_ABS_MAX_X 1600
+#define CYTP_ABS_MAX_Y 900
+#define CYTP_MAX_PRESSURE 255
+#define CYTP_MIN_PRESSURE 0
+
+/* header byte bits of relative package. */
+#define BTN_LEFT_BIT 0x01
+#define BTN_RIGHT_BIT 0x02
+#define BTN_MIDDLE_BIT 0x04
+#define REL_X_SIGN_BIT 0x10
+#define REL_Y_SIGN_BIT 0x20
+
+/* header byte bits of absolute package. */
+#define ABS_VSCROLL_BIT 0x10
+#define ABS_HSCROLL_BIT 0x20
+#define ABS_MULTIFINGER_TAP 0x04
+#define ABS_EDGE_MOTION_MASK 0x80
+
+#define DFLT_RESP_BITS_VALID 0x88 /* SMBus bit should not be set. */
+#define DFLT_RESP_SMBUS_BIT 0x80
+#define DFLT_SMBUS_MODE 0x80
+#define DFLT_PS2_MODE 0x00
+#define DFLT_RESP_BIT_MODE 0x40
+#define DFLT_RESP_REMOTE_MODE 0x40
+#define DFLT_RESP_STREAM_MODE 0x00
+#define DFLT_RESP_BIT_REPORTING 0x20
+#define DFLT_RESP_BIT_SCALING 0x10
+
+#define TP_METRICS_BIT_PALM 0x80
+#define TP_METRICS_BIT_STUBBORN 0x40
+#define TP_METRICS_BIT_2F_JITTER 0x30
+#define TP_METRICS_BIT_1F_JITTER 0x0c
+#define TP_METRICS_BIT_APA 0x02
+#define TP_METRICS_BIT_MTG 0x01
+#define TP_METRICS_BIT_ABS_PKT_FORMAT_SET 0xf0
+#define TP_METRICS_BIT_2F_SPIKE 0x0c
+#define TP_METRICS_BIT_1F_SPIKE 0x03
+
+/* bits of first byte response of E9h-Status Request command. */
+#define RESP_BTN_RIGHT_BIT 0x01
+#define RESP_BTN_MIDDLE_BIT 0x02
+#define RESP_BTN_LEFT_BIT 0x04
+#define RESP_SCALING_BIT 0x10
+#define RESP_ENABLE_BIT 0x20
+#define RESP_REMOTE_BIT 0x40
+#define RESP_SMBUS_BIT 0x80
+
+#define CYTP_MAX_MT_SLOTS 2
+
+struct cytp_contact {
+ int x;
+ int y;
+ int z; /* also named as touch pressure. */
+};
+
+/* The structure of Cypress Trackpad event data. */
+struct cytp_report_data {
+ int contact_cnt;
+ struct cytp_contact contacts[CYTP_MAX_MT_SLOTS];
+ unsigned int left:1;
+ unsigned int right:1;
+ unsigned int middle:1;
+ unsigned int tap:1; /* multi-finger tap detected. */
+};
+
+/* The structure of Cypress Trackpad device private data. */
+struct cytp_data {
+ int fw_version;
+
+ int pkt_size;
+ int mode;
+
+ int tp_min_pressure;
+ int tp_max_pressure;
+ int tp_width; /* X direction physical size in mm. */
+ int tp_high; /* Y direction physical size in mm. */
+ int tp_max_abs_x; /* Max X absolute units that can be reported. */
+ int tp_max_abs_y; /* Max Y absolute units that can be reported. */
+
+ int tp_res_x; /* X resolution in units/mm. */
+ int tp_res_y; /* Y resolution in units/mm. */
+
+ int tp_metrics_supported;
+};
+
+
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+int cypress_detect(struct psmouse *psmouse, bool set_properties);
+int cypress_init(struct psmouse *psmouse);
+bool cypress_supported(void);
+#else
+inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+ return -ENOSYS;
+}
+inline int cypress_init(struct psmouse *psmouse)
+{
+ return -ENOSYS;
+}
+inline bool cypress_supported(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MOUSE_PS2_CYPRESS */
+
+#endif /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 39fe9b737cae..532eaca4cc56 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
input_sync(input);
}
-static int __devinit gpio_mouse_probe(struct platform_device *pdev)
+static int gpio_mouse_probe(struct platform_device *pdev)
{
struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
struct input_polled_dev *input_poll;
@@ -150,7 +150,7 @@ static int __devinit gpio_mouse_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_mouse_remove(struct platform_device *pdev)
+static int gpio_mouse_remove(struct platform_device *pdev)
{
struct input_polled_dev *input = platform_get_drvdata(pdev);
struct gpio_mouse_platform_data *pdata = input->private;
@@ -172,7 +172,7 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
static struct platform_driver gpio_mouse_device_driver = {
.probe = gpio_mouse_probe,
- .remove = __devexit_p(gpio_mouse_remove),
+ .remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
index 5f278176eb9b..0a60717b91c6 100644
--- a/drivers/input/mouse/maplemouse.c
+++ b/drivers/input/mouse/maplemouse.c
@@ -64,7 +64,7 @@ static void dc_mouse_close(struct input_dev *dev)
}
/* allow the mouse to be used */
-static int __devinit probe_maple_mouse(struct device *dev)
+static int probe_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
@@ -114,7 +114,7 @@ fail:
return error;
}
-static int __devexit remove_maple_mouse(struct device *dev)
+static int remove_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_mouse *mse = maple_get_drvdata(mdev);
@@ -132,7 +132,7 @@ static struct maple_driver dc_mouse_driver = {
.drv = {
.name = "Dreamcast_mouse",
.probe = probe_maple_mouse,
- .remove = __devexit_p(remove_maple_mouse),
+ .remove = remove_maple_mouse,
},
};
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index c29ae7654d5e..8e1b98ea5648 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -206,7 +206,7 @@ static void navpoint_close(struct input_dev *input)
navpoint_down(navpoint);
}
-static int __devinit navpoint_probe(struct platform_device *pdev)
+static int navpoint_probe(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -299,7 +299,7 @@ err_free_gpio:
return error;
}
-static int __devexit navpoint_remove(struct platform_device *pdev)
+static int navpoint_remove(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -353,7 +353,7 @@ static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
static struct platform_driver navpoint_driver = {
.probe = navpoint_probe,
- .remove = __devexit_p(navpoint_remove),
+ .remove = navpoint_remove,
.driver = {
.name = "navpoint",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 22fe2547e169..cff065f6261c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -34,6 +34,7 @@
#include "touchkit_ps2.h"
#include "elantech.h"
#include "sentelic.h"
+#include "cypress_ps2.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -759,6 +760,28 @@ static int psmouse_extensions(struct psmouse *psmouse,
}
/*
+ * Try Cypress Trackpad.
+ * Must try it before Finger Sensing Pad because Finger Sensing Pad probe
+ * upsets some modules of Cypress Trackpads.
+ */
+ if (max_proto > PSMOUSE_IMEX &&
+ cypress_detect(psmouse, set_properties) == 0) {
+ if (cypress_supported()) {
+ if (cypress_init(psmouse) == 0)
+ return PSMOUSE_CYPRESS;
+
+ /*
+ * Finger Sensing Pad probe upsets some modules of
+ * Cypress Trackpad, must avoid Finger Sensing Pad
+ * probe if Cypress Trackpad device detected.
+ */
+ return PSMOUSE_PS2;
+ }
+
+ max_proto = PSMOUSE_IMEX;
+ }
+
+/*
* Try ALPS TouchPad
*/
if (max_proto > PSMOUSE_IMEX) {
@@ -896,6 +919,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.alias = "thinkps",
.detect = thinking_detect,
},
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+ {
+ .type = PSMOUSE_CYPRESS,
+ .name = "CyPS/2",
+ .alias = "cypress",
+ .detect = cypress_detect,
+ .init = cypress_init,
+ },
+#endif
{
.type = PSMOUSE_GENPS,
.name = "GenPS/2",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index fe1df231ba4c..2f0b39d59a9b 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -95,6 +95,7 @@ enum psmouse_type {
PSMOUSE_ELANTECH,
PSMOUSE_FSP,
PSMOUSE_SYNAPTICS_RELATIVE,
+ PSMOUSE_CYPRESS,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 4fe055f2c536..0ecb9e7945eb 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -143,7 +143,7 @@ static void pxa930_trkball_close(struct input_dev *dev)
pxa930_trkball_disable(trkball);
}
-static int __devinit pxa930_trkball_probe(struct platform_device *pdev)
+static int pxa930_trkball_probe(struct platform_device *pdev)
{
struct pxa930_trkball *trkball;
struct input_dev *input;
@@ -230,7 +230,7 @@ failed:
return error;
}
-static int __devexit pxa930_trkball_remove(struct platform_device *pdev)
+static int pxa930_trkball_remove(struct platform_device *pdev)
{
struct pxa930_trkball *trkball = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -248,7 +248,7 @@ static struct platform_driver pxa930_trkball_driver = {
.name = "pxa930-trkball",
},
.probe = pxa930_trkball_probe,
- .remove = __devexit_p(pxa930_trkball_remove),
+ .remove = pxa930_trkball_remove,
};
module_platform_driver(pxa930_trkball_driver);
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index e582922bacf7..cc7e0d4a8f93 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -791,7 +791,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y);
fsp_set_slot(dev, 1, false, 0, 0);
}
- if (fgrs > 0) {
+ if (fgrs == 1 || (fgrs == 2 && !(packet[0] & FSP_PB0_MFMC_FGR2))) {
input_report_abs(dev, ABS_X, abs_x);
input_report_abs(dev, ABS_Y, abs_y);
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 12d12ca3fee0..2f78538e09d0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -722,11 +722,13 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
default:
/*
* If the finger slot contained in SGM is valid, and either
- * hasn't changed, or is new, then report SGM in MTB slot 0.
+ * hasn't changed, or is new, or the old SGM has now moved to
+ * AGM, then report SGM in MTB slot 0.
* Otherwise, empty MTB slot 0.
*/
if (mt_state->sgm != -1 &&
- (mt_state->sgm == old->sgm || old->sgm == -1))
+ (mt_state->sgm == old->sgm ||
+ old->sgm == -1 || mt_state->agm == old->sgm))
synaptics_report_slot(dev, 0, sgm);
else
synaptics_report_slot(dev, 0, NULL);
@@ -735,9 +737,31 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
* If the finger slot contained in AGM is valid, and either
* hasn't changed, or is new, then report AGM in MTB slot 1.
* Otherwise, empty MTB slot 1.
+ *
+ * However, in the case where the AGM is new, make sure that
+ * that it is either the same as the old SGM, or there was no
+ * SGM.
+ *
+ * Otherwise, if the SGM was just 1, and the new AGM is 2, then
+ * the new AGM will keep the old SGM's tracking ID, which can
+ * cause apparent drumroll. This happens if in the following
+ * valid finger sequence:
+ *
+ * Action SGM AGM (MTB slot:Contact)
+ * 1. Touch contact 0 (0:0)
+ * 2. Touch contact 1 (0:0, 1:1)
+ * 3. Lift contact 0 (1:1)
+ * 4. Touch contacts 2,3 (0:2, 1:3)
+ *
+ * In step 4, contact 3, in AGM must not be given the same
+ * tracking ID as contact 1 had in step 3. To avoid this,
+ * the first agm with contact 3 is dropped and slot 1 is
+ * invalidated (tracking ID = -1).
*/
if (mt_state->agm != -1 &&
- (mt_state->agm == old->agm || old->agm == -1))
+ (mt_state->agm == old->agm ||
+ (old->agm == -1 &&
+ (old->sgm == -1 || mt_state->agm == old->sgm))))
synaptics_report_slot(dev, 1, agm);
else
synaptics_report_slot(dev, 1, NULL);
@@ -1247,11 +1271,11 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
- input_mt_init_slots(dev, 2, 0);
set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 063a174d3a88..ad822608f6ee 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -535,7 +535,7 @@ static struct synaptics_i2c *synaptics_i2c_touch_create(struct i2c_client *clien
return touch;
}
-static int __devinit synaptics_i2c_probe(struct i2c_client *client,
+static int synaptics_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
int ret;
@@ -601,7 +601,7 @@ err_mem_free:
return ret;
}
-static int __devexit synaptics_i2c_remove(struct i2c_client *client)
+static int synaptics_i2c_remove(struct i2c_client *client)
{
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -662,7 +662,7 @@ static struct i2c_driver synaptics_i2c_driver = {
},
.probe = synaptics_i2c_probe,
- .remove = __devexit_p(synaptics_i2c_remove),
+ .remove = synaptics_i2c_remove,
.id_table = synaptics_i2c_id_table,
};
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 55f2c2293ec6..6e9cc765e0dc 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -36,6 +36,7 @@ config SERIO_I8042
config SERIO_SERPORT
tristate "Serial port line discipline"
default y
+ depends on TTY
help
Say Y here if you plan to use an input device (mouse, joystick,
tablet, 6dof) that communicates over the RS232 serial (COM) port.
@@ -234,4 +235,14 @@ config SERIO_PS2MULT
To compile this driver as a module, choose M here: the
module will be called ps2mult.
+config SERIO_ARC_PS2
+ tristate "ARC PS/2 support"
+ depends on GENERIC_HARDIRQS
+ help
+ Say Y here if you have an ARC FPGA platform with a PS/2
+ controller in it.
+
+ To compile this driver as a module, choose M here; the module
+ will be called arc_ps2.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index dbbe37616c92..4b0c8f84f1c1 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SERIO_RAW) += serio_raw.o
obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
+obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index cc11f4efe119..479ce5fe8955 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -81,7 +81,7 @@ static void altera_ps2_close(struct serio *io)
/*
* Add one device to this driver.
*/
-static int __devinit altera_ps2_probe(struct platform_device *pdev)
+static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -159,7 +159,7 @@ static int __devinit altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int __devexit altera_ps2_remove(struct platform_device *pdev)
+static int altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match);
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = __devexit_p(altera_ps2_remove),
+ .remove = altera_ps2_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 2e77246c2e5a..4e2fd44865e1 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable_unprepare(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev,
+static int amba_kmi_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct amba_kmi_port *kmi;
@@ -163,7 +163,7 @@ static int __devinit amba_kmi_probe(struct amba_device *dev,
return ret;
}
-static int __devexit amba_kmi_remove(struct amba_device *dev)
+static int amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -204,7 +204,7 @@ static struct amba_driver ambakmi_driver = {
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
- .remove = __devexit_p(amba_kmi_remove),
+ .remove = amba_kmi_remove,
.resume = amba_kmi_resume,
};
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
new file mode 100644
index 000000000000..c52e3e589f72
--- /dev/null
+++ b/drivers/input/serio/arc_ps2.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver is originally developed by Pavel Sokolov <psokolov@synopsys.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#define ARC_PS2_PORTS 2
+
+#define ARC_ARC_PS2_ID 0x0001f609
+
+#define STAT_TIMEOUT 128
+
+#define PS2_STAT_RX_FRM_ERR (1)
+#define PS2_STAT_RX_BUF_OVER (1 << 1)
+#define PS2_STAT_RX_INT_EN (1 << 2)
+#define PS2_STAT_RX_VAL (1 << 3)
+#define PS2_STAT_TX_ISNOT_FUL (1 << 4)
+#define PS2_STAT_TX_INT_EN (1 << 5)
+
+struct arc_ps2_port {
+ void __iomem *data_addr;
+ void __iomem *status_addr;
+ struct serio *io;
+};
+
+struct arc_ps2_data {
+ struct arc_ps2_port port[ARC_PS2_PORTS];
+ void __iomem *addr;
+ unsigned int frame_error;
+ unsigned int buf_overflow;
+ unsigned int total_int;
+};
+
+static void arc_ps2_check_rx(struct arc_ps2_data *arc_ps2,
+ struct arc_ps2_port *port)
+{
+ unsigned int timeout = 1000;
+ unsigned int flag, status;
+ unsigned char data;
+
+ do {
+ status = ioread32(port->status_addr);
+ if (!(status & PS2_STAT_RX_VAL))
+ return;
+
+ data = ioread32(port->data_addr) & 0xff;
+
+ flag = 0;
+ arc_ps2->total_int++;
+ if (status & PS2_STAT_RX_FRM_ERR) {
+ arc_ps2->frame_error++;
+ flag |= SERIO_PARITY;
+ } else if (status & PS2_STAT_RX_BUF_OVER) {
+ arc_ps2->buf_overflow++;
+ flag |= SERIO_FRAME;
+ }
+
+ serio_interrupt(port->io, data, flag);
+ } while (--timeout);
+
+ dev_err(&port->io->dev, "PS/2 hardware stuck\n");
+}
+
+static irqreturn_t arc_ps2_interrupt(int irq, void *dev)
+{
+ struct arc_ps2_data *arc_ps2 = dev;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ arc_ps2_check_rx(arc_ps2, &arc_ps2->port[i]);
+
+ return IRQ_HANDLED;
+}
+
+static int arc_ps2_write(struct serio *io, unsigned char val)
+{
+ unsigned status;
+ struct arc_ps2_port *port = io->port_data;
+ int timeout = STAT_TIMEOUT;
+
+ do {
+ status = ioread32(port->status_addr);
+ cpu_relax();
+
+ if (status & PS2_STAT_TX_ISNOT_FUL) {
+ iowrite32(val & 0xff, port->data_addr);
+ return 0;
+ }
+
+ } while (--timeout);
+
+ dev_err(&io->dev, "write timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int arc_ps2_open(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(PS2_STAT_RX_INT_EN, port->status_addr);
+
+ return 0;
+}
+
+static void arc_ps2_close(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(ioread32(port->status_addr) & ~PS2_STAT_RX_INT_EN,
+ port->status_addr);
+}
+
+static void __iomem *arc_ps2_calc_addr(struct arc_ps2_data *arc_ps2,
+ int index, bool status)
+{
+ void __iomem *addr;
+
+ addr = arc_ps2->addr + 4 + 4 * index;
+ if (status)
+ addr += ARC_PS2_PORTS * 4;
+
+ return addr;
+}
+
+static void arc_ps2_inhibit_ports(struct arc_ps2_data *arc_ps2)
+{
+ void __iomem *addr;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ addr = arc_ps2_calc_addr(arc_ps2, i, true);
+ val = ioread32(addr);
+ val &= ~(PS2_STAT_RX_INT_EN | PS2_STAT_TX_INT_EN);
+ iowrite32(val, addr);
+ }
+}
+
+static int arc_ps2_create_port(struct platform_device *pdev,
+ struct arc_ps2_data *arc_ps2,
+ int index)
+{
+ struct arc_ps2_port *port = &arc_ps2->port[index];
+ struct serio *io;
+
+ io = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!io)
+ return -ENOMEM;
+
+ io->id.type = SERIO_8042;
+ io->write = arc_ps2_write;
+ io->open = arc_ps2_open;
+ io->close = arc_ps2_close;
+ snprintf(io->name, sizeof(io->name), "ARC PS/2 port%d", index);
+ snprintf(io->phys, sizeof(io->phys), "arc/serio%d", index);
+ io->port_data = port;
+
+ port->io = io;
+
+ port->data_addr = arc_ps2_calc_addr(arc_ps2, index, false);
+ port->status_addr = arc_ps2_calc_addr(arc_ps2, index, true);
+
+ dev_dbg(&pdev->dev, "port%d is allocated (data = 0x%p, status = 0x%p)\n",
+ index, port->data_addr, port->status_addr);
+
+ serio_register_port(port->io);
+ return 0;
+}
+
+static int arc_ps2_probe(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2;
+ struct resource *res;
+ int irq;
+ int error, id, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no IO memory defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq_byname(pdev, "arc_ps2_irq");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ defined\n");
+ return -EINVAL;
+ }
+
+ arc_ps2 = devm_kzalloc(&pdev->dev, sizeof(struct arc_ps2_data),
+ GFP_KERNEL);
+ if (!arc_ps2) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ arc_ps2->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(arc_ps2->addr))
+ return PTR_ERR(arc_ps2->addr);
+
+ dev_info(&pdev->dev, "irq = %d, address = 0x%p, ports = %i\n",
+ irq, arc_ps2->addr, ARC_PS2_PORTS);
+
+ id = ioread32(arc_ps2->addr);
+ if (id != ARC_ARC_PS2_ID) {
+ dev_err(&pdev->dev, "device id does not match\n");
+ return -ENXIO;
+ }
+
+ arc_ps2_inhibit_ports(arc_ps2);
+
+ error = devm_request_irq(&pdev->dev, irq, arc_ps2_interrupt,
+ 0, "arc_ps2", arc_ps2);
+ if (error) {
+ dev_err(&pdev->dev, "Could not allocate IRQ\n");
+ return error;
+ }
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ error = arc_ps2_create_port(pdev, arc_ps2, i);
+ if (error) {
+ while (--i >= 0)
+ serio_unregister_port(arc_ps2->port[i].io);
+ return error;
+ }
+ }
+
+ platform_set_drvdata(pdev, arc_ps2);
+
+ return 0;
+}
+
+static int arc_ps2_remove(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ serio_unregister_port(arc_ps2->port[i].io);
+
+ dev_dbg(&pdev->dev, "interrupt count = %i\n", arc_ps2->total_int);
+ dev_dbg(&pdev->dev, "frame error count = %i\n", arc_ps2->frame_error);
+ dev_dbg(&pdev->dev, "buffer overflow count = %i\n",
+ arc_ps2->buf_overflow);
+
+ return 0;
+}
+
+static struct platform_driver arc_ps2_driver = {
+ .driver = {
+ .name = "arc_ps2",
+ .owner = THIS_MODULE,
+ },
+ .probe = arc_ps2_probe,
+ .remove = arc_ps2_remove,
+};
+
+module_platform_driver(arc_ps2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Sokolov <psokolov@synopsys.com>");
+MODULE_DESCRIPTION("ARC PS/2 Driver");
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 852816567241..cfe549d4eaa5 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -175,7 +175,7 @@ static int __init ct82c710_detect(void)
return 0;
}
-static int __devinit ct82c710_probe(struct platform_device *dev)
+static int ct82c710_probe(struct platform_device *dev)
{
ct82c710_port = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ct82c710_port)
@@ -199,7 +199,7 @@ static int __devinit ct82c710_probe(struct platform_device *dev)
return 0;
}
-static int __devexit ct82c710_remove(struct platform_device *dev)
+static int ct82c710_remove(struct platform_device *dev)
{
serio_unregister_port(ct82c710_port);
@@ -212,7 +212,7 @@ static struct platform_driver ct82c710_driver = {
.owner = THIS_MODULE,
},
.probe = ct82c710_probe,
- .remove = __devexit_p(ct82c710_remove),
+ .remove = ct82c710_remove,
};
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 4225f5d6b15f..8d9ba0c3827c 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -327,7 +327,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int __devinit gscps2_probe(struct parisc_device *dev)
+static int gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -414,7 +414,7 @@ fail_nomem:
* @return: success/error report
*/
-static int __devexit gscps2_remove(struct parisc_device *dev)
+static int gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
@@ -444,7 +444,7 @@ static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = __devexit_p(gscps2_remove),
+ .remove = gscps2_remove,
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index bfd3865d886b..65605e4ef3cf 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -686,13 +686,12 @@ static int hilse_donode(hil_mlc *mlc)
write_lock_irqsave(&mlc->lock, flags);
pack = node->object.packet;
out:
- if (mlc->istarted)
- goto out2;
- /* Prepare to receive input */
- if ((node + 1)->act & HILSE_IN)
- hilse_setup_input(mlc, node + 1);
+ if (!mlc->istarted) {
+ /* Prepare to receive input */
+ if ((node + 1)->act & HILSE_IN)
+ hilse_setup_input(mlc, node + 1);
+ }
- out2:
write_unlock_irqrestore(&mlc->lock, flags);
if (down_trylock(&mlc->osem)) {
@@ -1010,8 +1009,6 @@ static int __init hil_mlc_init(void)
static void __exit hil_mlc_exit(void)
{
del_timer_sync(&hil_mlcs_kicker);
-
- tasklet_disable(&hil_mlcs_tasklet);
tasklet_kill(&hil_mlcs_tasklet);
}
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 5d48bb66aa73..a5eed2ade53d 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -76,7 +76,7 @@ static inline int i8042_platform_init(void)
if (check_legacy_ioport(I8042_DATA_REG))
return -ENODEV;
#endif
-#if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__)
+#if !defined(__sh__) && !defined(__alpha__)
if (!request_region(I8042_DATA_REG, 16, "i8042"))
return -EBUSY;
#endif
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 395a9af3adcd..d6aa4c67dbb6 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -49,7 +49,7 @@ static inline void i8042_write_command(int val)
#define OBP_PS2MS_NAME1 "kdmouse"
#define OBP_PS2MS_NAME2 "mouse"
-static int __devinit sparc_i8042_probe(struct platform_device *op)
+static int sparc_i8042_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -80,7 +80,7 @@ static int __devinit sparc_i8042_probe(struct platform_device *op)
return 0;
}
-static int __devexit sparc_i8042_remove(struct platform_device *op)
+static int sparc_i8042_remove(struct platform_device *op)
{
of_iounmap(kbd_res, kbd_iobase, 8);
@@ -102,7 +102,7 @@ static struct platform_driver sparc_i8042_driver = {
.of_match_table = sparc_i8042_match,
},
.probe = sparc_i8042_probe,
- .remove = __devexit_p(sparc_i8042_remove),
+ .remove = sparc_i8042_remove,
};
static int __init i8042_platform_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d6cc77a53c7e..5f306f79da0c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void)
int retval;
#ifdef CONFIG_X86
+ u8 a20_on = 0xdf;
/* Just return if pre-detection shows no i8042 controller exist */
if (!x86_platform.i8042_detect())
return -ENODEV;
@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+
+ /*
+ * A20 was already enabled during early kernel init. But some buggy
+ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
+ * resume from S3. So we do it here and hope that nothing breaks.
+ */
+ i8042_command(&a20_on, 0x10d1);
+ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
#endif /* CONFIG_X86 */
return retval;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 86564414b75a..78e4de42efaa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1284,7 +1284,7 @@ static void __init i8042_register_ports(void)
}
}
-static void __devexit i8042_unregister_ports(void)
+static void i8042_unregister_ports(void)
{
int i;
@@ -1437,7 +1437,7 @@ static int __init i8042_probe(struct platform_device *dev)
return error;
}
-static int __devexit i8042_remove(struct platform_device *dev)
+static int i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
@@ -1455,7 +1455,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
- .remove = __devexit_p(i8042_remove),
+ .remove = i8042_remove,
.shutdown = i8042_shutdown,
};
diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c
index 61da763b1209..bc85e1cc66d8 100644
--- a/drivers/input/serio/maceps2.c
+++ b/drivers/input/serio/maceps2.c
@@ -116,7 +116,7 @@ static void maceps2_close(struct serio *dev)
}
-static struct serio * __devinit maceps2_allocate_port(int idx)
+static struct serio *maceps2_allocate_port(int idx)
{
struct serio *serio;
@@ -135,7 +135,7 @@ static struct serio * __devinit maceps2_allocate_port(int idx)
return serio;
}
-static int __devinit maceps2_probe(struct platform_device *dev)
+static int maceps2_probe(struct platform_device *dev)
{
maceps2_port[0] = maceps2_allocate_port(0);
maceps2_port[1] = maceps2_allocate_port(1);
@@ -151,7 +151,7 @@ static int __devinit maceps2_probe(struct platform_device *dev)
return 0;
}
-static int __devexit maceps2_remove(struct platform_device *dev)
+static int maceps2_remove(struct platform_device *dev)
{
serio_unregister_port(maceps2_port[0]);
serio_unregister_port(maceps2_port[1]);
@@ -165,7 +165,7 @@ static struct platform_driver maceps2_driver = {
.owner = THIS_MODULE,
},
.probe = maceps2_probe,
- .remove = __devexit_p(maceps2_remove),
+ .remove = maceps2_remove,
};
static int __init maceps2_init(void)
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 0c42497aaaf4..76f83836fd5a 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -127,7 +127,7 @@ static void pcips2_close(struct serio *io)
free_irq(ps2if->dev->irq, ps2if);
}
-static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pcips2_data *ps2if;
struct serio *serio;
@@ -176,7 +176,7 @@ static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_i
return ret;
}
-static void __devexit pcips2_remove(struct pci_dev *dev)
+static void pcips2_remove(struct pci_dev *dev)
{
struct pcips2_data *ps2if = pci_get_drvdata(dev);
@@ -212,7 +212,7 @@ static struct pci_driver pcips2_driver = {
.name = "pcips2",
.id_table = pcips2_ids,
.probe = pcips2_probe,
- .remove = __devexit_p(pcips2_remove),
+ .remove = pcips2_remove,
};
module_pci_driver(pcips2_driver);
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 0c0df7f73802..70fe542839fb 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -122,7 +122,7 @@ static void q40kbd_close(struct serio *port)
q40kbd_flush(q40kbd);
}
-static int __devinit q40kbd_probe(struct platform_device *pdev)
+static int q40kbd_probe(struct platform_device *pdev)
{
struct q40kbd *q40kbd;
struct serio *port;
@@ -168,7 +168,7 @@ err_free_mem:
return error;
}
-static int __devexit q40kbd_remove(struct platform_device *pdev)
+static int q40kbd_remove(struct platform_device *pdev)
{
struct q40kbd *q40kbd = platform_get_drvdata(pdev);
@@ -190,7 +190,7 @@ static struct platform_driver q40kbd_driver = {
.name = "q40kbd",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(q40kbd_remove),
+ .remove = q40kbd_remove,
};
static int __init q40kbd_init(void)
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 2af5df6a8fba..567566ae0dae 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -114,7 +114,7 @@ static void rpckbd_close(struct serio *port)
* Allocate and initialize serio structure for subsequent registration
* with serio core.
*/
-static int __devinit rpckbd_probe(struct platform_device *dev)
+static int rpckbd_probe(struct platform_device *dev)
{
struct rpckbd_data *rpckbd;
struct serio *serio;
@@ -153,7 +153,7 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
return 0;
}
-static int __devexit rpckbd_remove(struct platform_device *dev)
+static int rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
struct rpckbd_data *rpckbd = serio->port_data;
@@ -166,7 +166,7 @@ static int __devexit rpckbd_remove(struct platform_device *dev)
static struct platform_driver rpckbd_driver = {
.probe = rpckbd_probe,
- .remove = __devexit_p(rpckbd_remove),
+ .remove = rpckbd_remove,
.driver = {
.name = "kart",
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 389766707534..b3e688911fd9 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -193,7 +193,7 @@ static void ps2_close(struct serio *io)
/*
* Clear the input buffer.
*/
-static void __devinit ps2_clear_input(struct ps2if *ps2if)
+static void ps2_clear_input(struct ps2if *ps2if)
{
int maxread = 100;
@@ -203,7 +203,7 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
}
}
-static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
+static unsigned int ps2_test_one(struct ps2if *ps2if,
unsigned int mask)
{
unsigned int val;
@@ -220,7 +220,7 @@ static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
-static int __devinit ps2_test(struct ps2if *ps2if)
+static int ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
@@ -251,7 +251,7 @@ static int __devinit ps2_test(struct ps2if *ps2if)
/*
* Add one device to this driver.
*/
-static int __devinit ps2_probe(struct sa1111_dev *dev)
+static int ps2_probe(struct sa1111_dev *dev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -334,7 +334,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int __devexit ps2_remove(struct sa1111_dev *dev)
+static int ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -357,7 +357,7 @@ static struct sa1111_driver ps2_driver = {
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
- .remove = __devexit_p(ps2_remove),
+ .remove = ps2_remove,
};
static int __init ps2_init(void)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index d0f7533dbf88..25fc5971f426 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -891,8 +891,6 @@ static int serio_bus_match(struct device *dev, struct device_driver *drv)
return serio_match_port(serio_drv->id_table, serio);
}
-#ifdef CONFIG_HOTPLUG
-
#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(env, fmt, val); \
@@ -920,15 +918,6 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#undef SERIO_ADD_UEVENT_VAR
-#else
-
-static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_HOTPLUG */
-
#ifdef CONFIG_PM
static int serio_suspend(struct device *dev)
{
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 1e983bec7d86..17be85948ffd 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -233,7 +233,7 @@ static void sxps2_close(struct serio *pserio)
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
-static int __devinit xps2_of_probe(struct platform_device *ofdev)
+static int xps2_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -333,7 +333,7 @@ failed1:
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
-static int __devexit xps2_of_remove(struct platform_device *of_dev)
+static int xps2_of_remove(struct platform_device *of_dev)
{
struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
@@ -355,7 +355,7 @@ static int __devexit xps2_of_remove(struct platform_device *of_dev)
}
/* Match table for of_platform binding */
-static const struct of_device_id xps2_of_match[] __devinitconst = {
+static const struct of_device_id xps2_of_match[] = {
{ .compatible = "xlnx,xps-ps2-1.00.a", },
{ /* end of list */ },
};
@@ -368,7 +368,7 @@ static struct platform_driver xps2_of_driver = {
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
- .remove = __devexit_p(xps2_of_remove),
+ .remove = xps2_of_remove,
};
module_platform_driver(xps2_of_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 858ad446de91..aaf23aeae2ea 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -386,23 +386,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
- if (features->type == TABLETPC2FG) {
- /* need to reset back */
+
+ switch (features->type) {
+ case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
- }
+ break;
- if (features->type == MTSCREEN || features->type == WACOM_24HDT)
+ case MTSCREEN:
+ case WACOM_24HDT:
features->pktlen = WACOM_PKGLEN_MTOUCH;
+ break;
- if (features->type == BAMBOO_PT) {
- /* need to reset back */
+ case MTTPC:
+ features->pktlen = WACOM_PKGLEN_MTTPC;
+ break;
+
+ case BAMBOO_PT:
features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ break;
+
+ default:
+ features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ break;
+ }
+
+ switch (features->type) {
+ case BAMBOO_PT:
features->x_phy =
get_unaligned_le16(&report[i + 5]);
features->x_max =
get_unaligned_le16(&report[i + 8]);
i += 15;
- } else if (features->type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -410,7 +427,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i - 1];
features->unitExpo = report[i - 3];
i += 12;
- } else {
+ break;
+
+ default:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -418,10 +437,11 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
+ break;
}
} else if (pen) {
/* penabled only accepts exact bytes of data */
- if (features->type == TABLETPC2FG)
+ if (features->type >= TABLETPC)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
@@ -434,32 +454,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- int type = features->type;
-
- if (type == TABLETPC2FG || type == MTSCREEN) {
+ switch (features->type) {
+ case TABLETPC2FG:
+ case MTSCREEN:
+ case MTTPC:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
- } else if (type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i - 2]);
i += 7;
- } else if (type == BAMBOO_PT) {
+ break;
+
+ case BAMBOO_PT:
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
get_unaligned_le16(&report[i + 6]);
i += 12;
- } else {
+ break;
+
+ default:
features->y_max =
features->x_max;
features->y_phy =
get_unaligned_le16(&report[i + 3]);
i += 4;
+ break;
}
} else if (pen) {
features->y_max =
@@ -525,10 +553,10 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
if (!rep_data)
return error;
- rep_data[0] = report_id;
- rep_data[1] = mode;
-
do {
+ rep_data[0] = report_id;
+ rep_data[1] = mode;
+
error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
report_id, rep_data, length, 1);
if (error >= 0)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0a67031ffc13..41b6fbf60112 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -359,6 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
+ case 0x18803: /* DTH2242 Grip Pen */
case 0x022:
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -467,9 +468,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
- (features->type >= WACOM_21UX2 && features->type <= WACOM_24HD)) {
+ if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -540,6 +539,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == DTK) {
+ input_report_key(input, BTN_0, (data[6] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x02));
+ input_report_key(input, BTN_2, (data[6] & 0x04));
+ input_report_key(input, BTN_3, (data[6] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x10));
+ input_report_key(input, BTN_5, (data[6] & 0x20));
} else if (features->type == WACOM_24HD) {
input_report_key(input, BTN_0, (data[6] & 0x01));
input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -787,25 +793,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
return 1;
}
-static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
-{
- int touch_max = wacom->features.touch_max;
- int i;
-
- if (!wacom->slots)
- return -1;
-
- for (i = 0; i < touch_max; ++i) {
- if (wacom->slots[i] == contactid)
- return i;
- }
- for (i = 0; i < touch_max; ++i) {
- if (wacom->slots[i] == -1)
- return i;
- }
- return -1;
-}
-
static int int_dist(int x1, int y1, int x2, int y2)
{
int x = x2 - x1;
@@ -835,8 +822,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
- int id = data[offset + 1];
- int slot = find_slot_from_contactid(wacom, id);
+ int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
if (slot < 0)
continue;
@@ -858,9 +844,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
input_report_abs(input, ABS_MT_ORIENTATION, w > h);
}
- wacom->slots[slot] = touch ? id : -1;
}
-
input_mt_report_pointer_emulation(input, true);
wacom->num_contacts_left -= contacts_to_send;
@@ -877,6 +861,11 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
int i;
int current_num_contacts = data[2];
int contacts_to_send = 0;
+ int x_offset = 0;
+
+ /* MTTPC does not support Height and Width */
+ if (wacom->features.type == MTTPC)
+ x_offset = -4;
/*
* First packet resets the counter since only the first
@@ -889,10 +878,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
contacts_to_send = min(5, wacom->num_contacts_left);
for (i = 0; i < contacts_to_send; i++) {
- int offset = (WACOM_BYTES_PER_MT_PACKET * i) + 3;
+ int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
bool touch = data[offset] & 0x1;
int id = le16_to_cpup((__le16 *)&data[offset + 1]);
- int slot = find_slot_from_contactid(wacom, id);
+ int slot = input_mt_get_slot_by_key(input, id);
if (slot < 0)
continue;
@@ -900,14 +889,12 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = le16_to_cpup((__le16 *)&data[offset + 7]);
- int y = le16_to_cpup((__le16 *)&data[offset + 9]);
+ int x = le16_to_cpup((__le16 *)&data[offset + x_offset + 7]);
+ int y = le16_to_cpup((__le16 *)&data[offset + x_offset + 9]);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
- wacom->slots[slot] = touch ? id : -1;
}
-
input_mt_report_pointer_emulation(input, true);
wacom->num_contacts_left -= contacts_to_send;
@@ -939,12 +926,11 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
contact_with_no_pen_down_count++;
}
}
+ input_mt_report_pointer_emulation(input, true);
/* keep touch state for pen event */
wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
- input_mt_report_pointer_emulation(input, true);
-
return 1;
}
@@ -1101,12 +1087,15 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
struct input_dev *input = wacom->input;
- int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
bool touch = data[1] & 0x80;
+ int slot = input_mt_get_slot_by_key(input, data[0]);
+
+ if (slot < 0)
+ return;
touch = touch && !wacom->shared->stylus_in_proximity;
- input_mt_slot(input, slot_id);
+ input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
@@ -1159,7 +1148,6 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
wacom_bpt3_button_msg(wacom, data + offset);
}
-
input_mt_report_pointer_emulation(input, true);
input_sync(input);
@@ -1316,6 +1304,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_21UX2:
case WACOM_22HD:
case WACOM_24HD:
+ case DTK:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1336,6 +1325,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case TABLETPCE:
case TABLETPC2FG:
case MTSCREEN:
+ case MTTPC:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1440,39 +1430,64 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
return (logical_max * 100) / physical_max;
}
-int wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac)
+static void wacom_abs_set_axis(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
- int i;
-
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
- features->y_fuzz, 0);
if (features->device_type == BTN_TOOL_PEN) {
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
- features->pressure_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
+ features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0,
+ features->pressure_max, features->pressure_fuzz, 0);
/* penabled devices have fixed resolution for each model */
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
- input_abs_set_res(input_dev, ABS_X,
- wacom_calculate_touch_res(features->x_max,
- features->x_phy));
- input_abs_set_res(input_dev, ABS_Y,
- wacom_calculate_touch_res(features->y_max,
- features->y_phy));
+ if (features->touch_max <= 2) {
+ input_set_abs_params(input_dev, ABS_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
+ }
+
+ if (features->touch_max > 1) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
+ }
}
+}
+
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
+{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
+ wacom_abs_set_axis(input_dev, wacom_wac);
+
switch (wacom_wac->features.type) {
case WACOM_MO:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
@@ -1509,12 +1524,17 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_Y, input_dev->keybit);
__set_bit(BTN_Z, input_dev->keybit);
- for (i = 0; i < 10; i++)
+ for (i = 6; i < 10; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
__set_bit(KEY_PROG1, input_dev->keybit);
__set_bit(KEY_PROG2, input_dev->keybit);
__set_bit(KEY_PROG3, input_dev->keybit);
+ /* fall through */
+
+ case DTK:
+ for (i = 0; i < 6; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
@@ -1610,24 +1630,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
} else if (features->device_type == BTN_TOOL_FINGER) {
__clear_bit(ABS_MISC, input_dev->absbit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
-
- input_mt_init_slots(input_dev, features->touch_max, 0);
-
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
-
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max,
- features->y_fuzz, 0);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
}
break;
@@ -1657,27 +1664,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case MTSCREEN:
- if (features->device_type == BTN_TOOL_FINGER) {
- wacom_wac->slots = kmalloc(features->touch_max *
- sizeof(int),
- GFP_KERNEL);
- if (!wacom_wac->slots)
- return -ENOMEM;
-
- for (i = 0; i < features->touch_max; i++)
- wacom_wac->slots[i] = -1;
- }
- /* fall through */
-
+ case MTTPC:
case TABLETPC2FG:
if (features->device_type == BTN_TOOL_FINGER) {
- input_mt_init_slots(input_dev, features->touch_max, 0);
- input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
- 0, MT_TOOL_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max, 0, 0);
+ unsigned int flags = INPUT_MT_DIRECT;
+
+ if (wacom_wac->features.type == TABLETPC2FG)
+ flags = 0;
+
+ input_mt_init_slots(input_dev, features->touch_max, flags);
}
/* fall through */
@@ -1720,35 +1715,26 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_FINGER) {
+ unsigned int flags = INPUT_MT_POINTER;
+
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
__set_bit(BTN_BACK, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- input_mt_init_slots(input_dev, features->touch_max, 0);
-
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- __set_bit(BTN_TOOL_TRIPLETAP,
- input_dev->keybit);
- __set_bit(BTN_TOOL_QUADTAP,
- input_dev->keybit);
-
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
+ } else {
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ flags = 0;
}
-
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max,
- features->y_fuzz, 0);
+ input_mt_init_slots(input_dev, features->touch_max, flags);
} else if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1973,6 +1959,13 @@ static const struct wacom_features wacom_features_0xCE =
static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x59 = /* Pen */
+ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
+ 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
+static const struct wacom_features wacom_features_0x5D = /* Touch */
+ { "Wacom DTH2242", .type = WACOM_24HDT,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
static const struct wacom_features wacom_features_0xCC =
{ "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047,
63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2018,6 +2011,15 @@ static const struct wacom_features wacom_features_0xED =
static const struct wacom_features wacom_features_0xEF =
{ "Wacom ISDv4 EF", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x100 =
+ { "Wacom ISDv4 100", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x101 =
+ { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x4001 =
+ { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2034,7 +2036,8 @@ static const struct wacom_features wacom_features_0xD1 =
.touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
@@ -2137,6 +2140,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x43) },
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
+ { USB_DEVICE_WACOM(0x59) },
+ { USB_DEVICE_WACOM(0x5D) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
@@ -2194,6 +2199,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0xED) },
{ USB_DEVICE_WACOM(0xEF) },
+ { USB_DEVICE_WACOM(0x100) },
+ { USB_DEVICE_WACOM(0x101) },
+ { USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 345f1e76975e..5f9a7721e16c 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -26,6 +26,7 @@
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
#define WACOM_PKGLEN_MTOUCH 62
+#define WACOM_PKGLEN_MTTPC 40
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -77,6 +78,7 @@ enum {
INTUOS5L,
WACOM_21UX2,
WACOM_22HD,
+ DTK,
WACOM_24HD,
CINTIQ,
WACOM_BEE,
@@ -88,6 +90,7 @@ enum {
TABLETPCE,
TABLETPC2FG,
MTSCREEN,
+ MTTPC,
MAX_TYPE
};
@@ -133,7 +136,6 @@ struct wacom_wac {
int pid;
int battery_capacity;
int num_contacts_left;
- int *slots;
};
#endif
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 326218dbd6e6..c7068942ebe8 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -115,7 +115,7 @@ static void pm860x_touch_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
+static int pm860x_touch_dt_init(struct platform_device *pdev,
struct pm860x_chip *chip,
int *res_x)
{
@@ -169,7 +169,7 @@ static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
#define pm860x_touch_dt_init(x, y, z) (-1)
#endif
-static int __devinit pm860x_touch_probe(struct platform_device *pdev)
+static int pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_touch_pdata *pdata = pdev->dev.platform_data;
@@ -293,7 +293,7 @@ out:
return ret;
}
-static int __devexit pm860x_touch_remove(struct platform_device *pdev)
+static int pm860x_touch_remove(struct platform_device *pdev)
{
struct pm860x_touch *touch = platform_get_drvdata(pdev);
@@ -310,7 +310,7 @@ static struct platform_driver pm860x_touch_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
- .remove = __devexit_p(pm860x_touch_remove),
+ .remove = pm860x_touch_remove,
};
module_platform_driver(pm860x_touch_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f7668b24c378..f9a5fd89bc02 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -111,18 +111,6 @@ config TOUCHSCREEN_AUO_PIXCIR
To compile this driver as a module, choose M here: the
module will be called auo-pixcir-ts.
-config TOUCHSCREEN_BITSY
- tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
- depends on SA1100_BITSY
- select SERIO
- help
- Say Y here if you have the h3600 (Bitsy) touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called h3600_ts_input.
-
config TOUCHSCREEN_BU21013
tristate "BU21013 based touch panel controllers"
depends on I2C
@@ -371,7 +359,7 @@ config TOUCHSCREEN_MCS5000
config TOUCHSCREEN_MMS114
tristate "MELFAS MMS114 touchscreen"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
Say Y here if you have the MELFAS MMS114 touchscreen controller
chip in your system.
@@ -529,9 +517,9 @@ config TOUCHSCREEN_TOUCHWIN
To compile this driver as a module, choose M here: the
module will be called touchwin.
-config TOUCHSCREEN_TI_TSCADC
+config TOUCHSCREEN_TI_AM335X_TSC
tristate "TI Touchscreen Interface"
- depends on ARCH_OMAP2PLUS
+ depends on MFD_TI_AM335X_TSCADC
help
Say Y here if you have 4/5/8 wire touchscreen controller
to be connected to the ADC controller on your TI AM335x SoC.
@@ -539,7 +527,7 @@ config TOUCHSCREEN_TI_TSCADC
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called ti_tscadc.
+ module will be called ti_am335x_tsc.
config TOUCHSCREEN_ATMEL_TSADCC
tristate "Atmel Touchscreen Interface"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 178eb128d90f..6bfbeab67c9f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
-obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
@@ -52,7 +51,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
-obj-$(CONFIG_TOUCHSCREEN_TI_TSCADC) += ti_tscadc.o
+obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 2c7692108e6c..23fa829b869d 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -682,7 +682,7 @@ static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
}
}
-static int __devinit ad7877_probe(struct spi_device *spi)
+static int ad7877_probe(struct spi_device *spi)
{
struct ad7877 *ts;
struct input_dev *input_dev;
@@ -810,7 +810,7 @@ err_free_mem:
return err;
}
-static int __devexit ad7877_remove(struct spi_device *spi)
+static int ad7877_remove(struct spi_device *spi)
{
struct ad7877 *ts = dev_get_drvdata(&spi->dev);
@@ -857,7 +857,7 @@ static struct spi_driver ad7877_driver = {
.pm = &ad7877_pm,
},
.probe = ad7877_probe,
- .remove = __devexit_p(ad7877_remove),
+ .remove = ad7877_remove,
};
module_spi_driver(ad7877_driver);
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index 3054354d0dd3..dcf390771549 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -54,7 +54,7 @@ static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
.write = ad7879_i2c_write,
};
-static int __devinit ad7879_i2c_probe(struct i2c_client *client,
+static int ad7879_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7879 *ts;
@@ -75,7 +75,7 @@ static int __devinit ad7879_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad7879_i2c_remove(struct i2c_client *client)
+static int ad7879_i2c_remove(struct i2c_client *client)
{
struct ad7879 *ts = i2c_get_clientdata(client);
@@ -98,7 +98,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
- .remove = __devexit_p(ad7879_i2c_remove),
+ .remove = ad7879_i2c_remove,
.id_table = ad7879_id,
};
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index db49abf056ba..606da5bd6115 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -110,7 +110,7 @@ static const struct ad7879_bus_ops ad7879_spi_bus_ops = {
.write = ad7879_spi_write,
};
-static int __devinit ad7879_spi_probe(struct spi_device *spi)
+static int ad7879_spi_probe(struct spi_device *spi)
{
struct ad7879 *ts;
int err;
@@ -137,7 +137,7 @@ static int __devinit ad7879_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad7879_spi_remove(struct spi_device *spi)
+static int ad7879_spi_remove(struct spi_device *spi)
{
struct ad7879 *ts = spi_get_drvdata(spi);
@@ -154,7 +154,7 @@ static struct spi_driver ad7879_spi_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
- .remove = __devexit_p(ad7879_spi_remove),
+ .remove = ad7879_spi_remove,
};
module_spi_driver(ad7879_spi_driver);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 78e5d9ab0ba7..4f702b3ec1a3 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -955,7 +955,7 @@ static int ads7846_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
-static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+static int ads7846_setup_pendown(struct spi_device *spi,
struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
@@ -997,7 +997,7 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi,
* Set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
-static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
+static void ads7846_setup_spi_msg(struct ads7846 *ts,
const struct ads7846_platform_data *pdata)
{
struct spi_message *m = &ts->msg[0];
@@ -1196,7 +1196,7 @@ static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
spi_message_add_tail(x, m);
}
-static int __devinit ads7846_probe(struct spi_device *spi)
+static int ads7846_probe(struct spi_device *spi)
{
struct ads7846 *ts;
struct ads7846_packet *packet;
@@ -1390,7 +1390,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
return err;
}
-static int __devexit ads7846_remove(struct spi_device *spi)
+static int ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
@@ -1434,7 +1434,7 @@ static struct spi_driver ads7846_driver = {
.pm = &ads7846_pm,
},
.probe = ads7846_probe,
- .remove = __devexit_p(ads7846_remove),
+ .remove = ads7846_remove,
};
module_spi_driver(ads7846_driver);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1df2396af008..d04f810cb1dd 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1095,7 +1095,7 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-static int __devinit mxt_probe(struct i2c_client *client,
+static int mxt_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mxt_platform_data *pdata = client->dev.platform_data;
@@ -1200,7 +1200,7 @@ err_free_mem:
return error;
}
-static int __devexit mxt_remove(struct i2c_client *client)
+static int mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
@@ -1270,7 +1270,7 @@ static struct i2c_driver mxt_driver = {
.pm = &mxt_pm_ops,
},
.probe = mxt_probe,
- .remove = __devexit_p(mxt_remove),
+ .remove = mxt_remove,
.id_table = mxt_id,
};
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index ea392ee138ed..95f6785a94b0 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -177,7 +177,7 @@ static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev)
* The functions for inserting/removing us as a module.
*/
-static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
+static int atmel_tsadcc_probe(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev;
struct input_dev *input_dev;
@@ -323,7 +323,7 @@ err_free_mem:
return err;
}
-static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
+static int atmel_tsadcc_remove(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev = dev_get_drvdata(&pdev->dev);
struct resource *res;
@@ -346,7 +346,7 @@ static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
static struct platform_driver atmel_tsadcc_driver = {
.probe = atmel_tsadcc_probe,
- .remove = __devexit_p(atmel_tsadcc_remove),
+ .remove = atmel_tsadcc_remove,
.driver = {
.name = "atmel_tsadcc",
},
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index c7047b6bb020..c6e19a96348e 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -286,7 +286,7 @@ static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
return 0;
}
-static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
int int_setting)
{
struct i2c_client *client = ts->client;
@@ -482,7 +482,7 @@ unlock:
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
auo_pixcir_resume);
-static int __devinit auo_pixcir_probe(struct i2c_client *client,
+static int auo_pixcir_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -599,7 +599,7 @@ err_gpio_int:
return ret;
}
-static int __devexit auo_pixcir_remove(struct i2c_client *client)
+static int auo_pixcir_remove(struct i2c_client *client)
{
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -631,7 +631,7 @@ static struct i2c_driver auo_pixcir_driver = {
.pm = &auo_pixcir_pm_ops,
},
.probe = auo_pixcir_probe,
- .remove = __devexit_p(auo_pixcir_remove),
+ .remove = auo_pixcir_remove,
.id_table = auo_pixcir_idtable,
};
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 5c487d23f11c..b9b5ddad6658 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -14,6 +14,9 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
@@ -148,11 +151,12 @@
struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
- bool touch_stopped;
const struct bu21013_platform_device *chip;
struct input_dev *in_dev;
- unsigned int intr_pin;
struct regulator *regulator;
+ unsigned int irq;
+ unsigned int intr_pin;
+ bool touch_stopped;
};
/**
@@ -262,7 +266,7 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
return IRQ_NONE;
}
- data->intr_pin = data->chip->irq_read_val();
+ data->intr_pin = gpio_get_value(data->chip->touch_pin);
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
msecs_to_jiffies(2));
@@ -418,8 +422,70 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
{
bu21013_data->touch_stopped = true;
wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
+ free_irq(bu21013_data->irq, bu21013_data);
+}
+
+/**
+ * bu21013_cs_disable() - deconfigures the touch panel controller
+ * @bu21013_data: device structure pointer
+ *
+ * This function is used to deconfigure the chip selection
+ * for touch panel controller.
+ */
+static void bu21013_cs_disable(struct bu21013_ts_data *bu21013_data)
+{
+ int error;
+
+ error = gpio_direction_output(bu21013_data->chip->cs_pin, 0);
+ if (error < 0)
+ dev_warn(&bu21013_data->client->dev,
+ "%s: gpio direction failed, error: %d\n",
+ __func__, error);
+ else
+ gpio_set_value(bu21013_data->chip->cs_pin, 0);
+
+ gpio_free(bu21013_data->chip->cs_pin);
+}
+
+#ifdef CONFIG_OF
+static const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct bu21013_platform_device *pdata;
+
+ if (!np) {
+ dev_err(dev, "no device tree or platform data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->y_flip = pdata->x_flip = false;
+
+ pdata->x_flip = of_property_read_bool(np, "rohm,flip-x");
+ pdata->y_flip = of_property_read_bool(np, "rohm,flip-y");
+
+ of_property_read_u32(np, "rohm,touch-max-x", &pdata->touch_x_max);
+ of_property_read_u32(np, "rohm,touch-max-y", &pdata->touch_y_max);
+
+ pdata->touch_pin = of_get_named_gpio(np, "touch-gpio", 0);
+ pdata->cs_pin = of_get_named_gpio(np, "reset-gpio", 0);
+
+ pdata->ext_clk = false;
+
+ return pdata;
}
+#else
+static inline const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data available\n");
+ return ERR_PTR(-EINVAL);
+}
+#endif
/**
* bu21013_probe() - initializes the i2c-client touchscreen driver
@@ -429,13 +495,13 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
* This function used to initializes the i2c-client touchscreen
* driver and returns integer.
*/
-static int __devinit bu21013_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bu21013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
+ const struct bu21013_platform_device *pdata =
+ dev_get_platdata(&client->dev);
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
- client->dev.platform_data;
int error;
if (!i2c_check_functionality(client->adapter,
@@ -445,7 +511,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
}
if (!pdata) {
- dev_err(&client->dev, "platform data not defined\n");
+ pdata = bu21013_parse_dt(&client->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ if (!gpio_is_valid(pdata->touch_pin)) {
+ dev_err(&client->dev, "invalid touch_pin supplied\n");
return -EINVAL;
}
@@ -460,8 +532,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
+ bu21013_data->irq = gpio_to_irq(pdata->touch_pin);
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
dev_err(&client->dev, "regulator_get failed\n");
error = PTR_ERR(bu21013_data->regulator);
@@ -478,12 +551,11 @@ static int __devinit bu21013_probe(struct i2c_client *client,
init_waitqueue_head(&bu21013_data->wait);
/* configure the gpio pins */
- if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
- dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
- }
+ error = gpio_request_one(pdata->cs_pin, GPIOF_OUT_INIT_HIGH,
+ "touchp_reset");
+ if (error < 0) {
+ dev_err(&client->dev, "Unable to request gpio reset_pin\n");
+ goto err_disable_regulator;
}
/* configure the touch panel controller */
@@ -508,12 +580,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
pdata->touch_y_max, 0, 0);
input_set_drvdata(in_dev, bu21013_data);
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ error = request_threaded_irq(bu21013_data->irq, NULL, bu21013_gpio_irq,
IRQF_TRIGGER_FALLING | IRQF_SHARED |
IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
- dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
+ dev_err(&client->dev, "request irq %d failed\n",
+ bu21013_data->irq);
goto err_cs_disable;
}
@@ -531,7 +604,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
err_free_irq:
bu21013_free_irq(bu21013_data);
err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
+ bu21013_cs_disable(bu21013_data);
err_disable_regulator:
regulator_disable(bu21013_data->regulator);
err_put_regulator:
@@ -549,13 +622,13 @@ err_free_mem:
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
-static int __devexit bu21013_remove(struct i2c_client *client)
+static int bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
bu21013_free_irq(bu21013_data);
- bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
+ bu21013_cs_disable(bu21013_data);
input_unregister_device(bu21013_data->in_dev);
@@ -584,9 +657,9 @@ static int bu21013_suspend(struct device *dev)
bu21013_data->touch_stopped = true;
if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
+ enable_irq_wake(bu21013_data->irq);
else
- disable_irq(bu21013_data->chip->irq);
+ disable_irq(bu21013_data->irq);
regulator_disable(bu21013_data->regulator);
@@ -621,9 +694,9 @@ static int bu21013_resume(struct device *dev)
bu21013_data->touch_stopped = false;
if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
+ disable_irq_wake(bu21013_data->irq);
else
- enable_irq(bu21013_data->chip->irq);
+ enable_irq(bu21013_data->irq);
return 0;
}
@@ -649,7 +722,7 @@ static struct i2c_driver bu21013_driver = {
#endif
},
.probe = bu21013_probe,
- .remove = __devexit_p(bu21013_remove),
+ .remove = bu21013_remove,
.id_table = bu21013_id,
};
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 464f1bf4b61d..96e0eedcc7e5 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -99,9 +99,18 @@ static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc,
int ret;
struct i2c_msg msg[2] = {
/* first write slave position to i2c devices */
- { client->addr, 0, 1, &cmd },
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &cmd
+ },
/* Second read data from position */
- { client->addr, I2C_M_RD, len, data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data
+ }
};
ret = i2c_transfer(client->adapter, msg, 2);
@@ -166,7 +175,7 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit cy8ctmg110_probe(struct i2c_client *client,
+static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct cy8ctmg110_pdata *pdata = client->dev.platform_data;
@@ -314,7 +323,7 @@ static int cy8ctmg110_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
#endif
-static int __devexit cy8ctmg110_remove(struct i2c_client *client)
+static int cy8ctmg110_remove(struct i2c_client *client)
{
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -348,7 +357,7 @@ static struct i2c_driver cy8ctmg110_driver = {
},
.id_table = cy8ctmg110_idtable,
.probe = cy8ctmg110_probe,
- .remove = __devexit_p(cy8ctmg110_remove),
+ .remove = cy8ctmg110_remove,
};
module_i2c_driver(cy8ctmg110_driver);
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 2af1d0c52bcd..4dbdf44b8fc5 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -81,7 +81,7 @@ static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
.read = cyttsp_i2c_read_block_data,
};
-static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
+static int cyttsp_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cyttsp *ts;
@@ -102,7 +102,7 @@ static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cyttsp_i2c_remove(struct i2c_client *client)
+static int cyttsp_i2c_remove(struct i2c_client *client)
{
struct cyttsp *ts = i2c_get_clientdata(client);
@@ -124,7 +124,7 @@ static struct i2c_driver cyttsp_i2c_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
- .remove = __devexit_p(cyttsp_i2c_remove),
+ .remove = cyttsp_i2c_remove,
.id_table = cyttsp_i2c_id,
};
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 9f263410407b..861b7f77605b 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -147,7 +147,7 @@ static const struct cyttsp_bus_ops cyttsp_spi_bus_ops = {
.read = cyttsp_spi_read_block_data,
};
-static int __devinit cyttsp_spi_probe(struct spi_device *spi)
+static int cyttsp_spi_probe(struct spi_device *spi)
{
struct cyttsp *ts;
int error;
@@ -172,7 +172,7 @@ static int __devinit cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit cyttsp_spi_remove(struct spi_device *spi)
+static int cyttsp_spi_remove(struct spi_device *spi)
{
struct cyttsp *ts = spi_get_drvdata(spi);
@@ -188,12 +188,11 @@ static struct spi_driver cyttsp_spi_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
- .remove = __devexit_p(cyttsp_spi_remove),
+ .remove = cyttsp_spi_remove,
};
module_spi_driver(cyttsp_spi_driver);
-MODULE_ALIAS("spi:cyttsp");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 36b65cf10d7f..34ad84105e6e 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -297,7 +297,7 @@ static void da9034_touch_close(struct input_dev *dev)
}
-static int __devinit da9034_touch_probe(struct platform_device *pdev)
+static int da9034_touch_probe(struct platform_device *pdev)
{
struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
struct da9034_touch *touch;
@@ -361,7 +361,7 @@ err_free_touch:
return ret;
}
-static int __devexit da9034_touch_remove(struct platform_device *pdev)
+static int da9034_touch_remove(struct platform_device *pdev)
{
struct da9034_touch *touch = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ static struct platform_driver da9034_touch_driver = {
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
- .remove = __devexit_p(da9034_touch_remove),
+ .remove = da9034_touch_remove,
};
module_platform_driver(da9034_touch_driver);
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
index e8df341090c0..8f561e22bdd4 100644
--- a/drivers/input/touchscreen/da9052_tsi.c
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -27,8 +27,6 @@ struct da9052_tsi {
struct input_dev *dev;
struct delayed_work ts_pen_work;
struct mutex mutex;
- unsigned int irq_pendwn;
- unsigned int irq_datardy;
bool stopped;
bool adc_on;
};
@@ -45,8 +43,8 @@ static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
if (!tsi->stopped) {
/* Mask PEN_DOWN event and unmask TSI_READY event */
- disable_irq_nosync(tsi->irq_pendwn);
- enable_irq(tsi->irq_datardy);
+ da9052_disable_irq_nosync(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, true);
@@ -137,13 +135,13 @@ static void da9052_ts_pen_work(struct work_struct *work)
return;
/* Mask TSI_READY event and unmask PEN_DOWN event */
- disable_irq(tsi->irq_datardy);
- enable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
}
}
-static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
+static int da9052_ts_configure_gpio(struct da9052 *da9052)
{
int error;
@@ -162,7 +160,7 @@ static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
return 0;
}
-static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
+static int da9052_configure_tsi(struct da9052_tsi *tsi)
{
int error;
@@ -197,7 +195,7 @@ static int da9052_ts_input_open(struct input_dev *input_dev)
mb();
/* Unmask PEN_DOWN event */
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
/* Enable Pen Detect Circuit */
return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
@@ -210,11 +208,11 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
tsi->stopped = true;
mb();
- disable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
cancel_delayed_work_sync(&tsi->ts_pen_work);
if (tsi->adc_on) {
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, false);
/*
@@ -222,33 +220,24 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
* twice and we need to enable it to keep enable/disable
* counter balanced. IRQ is still off though.
*/
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
/* Disable Pen Detect Circuit */
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
}
-static int __devinit da9052_ts_probe(struct platform_device *pdev)
+static int da9052_ts_probe(struct platform_device *pdev)
{
struct da9052 *da9052;
struct da9052_tsi *tsi;
struct input_dev *input_dev;
- int irq_pendwn;
- int irq_datardy;
int error;
da9052 = dev_get_drvdata(pdev->dev.parent);
if (!da9052)
return -EINVAL;
- irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
- irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
- if (irq_pendwn < 0 || irq_datardy < 0) {
- dev_err(da9052->dev, "Unable to determine device interrupts\n");
- return -ENXIO;
- }
-
tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
input_dev = input_allocate_device();
if (!tsi || !input_dev) {
@@ -258,8 +247,6 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
tsi->da9052 = da9052;
tsi->dev = input_dev;
- tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
- tsi->irq_datardy = da9052->irq_base + irq_datardy;
tsi->stopped = true;
INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
@@ -287,31 +274,25 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
/* Disable ADC */
da9052_ts_adc_toggle(tsi, false);
- error = request_threaded_irq(tsi->irq_pendwn,
- NULL, da9052_ts_pendwn_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "PENDWN", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_PENDOWN,
+ "pendown-irq", da9052_ts_pendwn_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register PENDWN IRQ %d, error = %d\n",
- tsi->irq_pendwn, error);
+ "Failed to register PENDWN IRQ: %d\n", error);
goto err_free_mem;
}
- error = request_threaded_irq(tsi->irq_datardy,
- NULL, da9052_ts_datardy_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "TSIRDY", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_TSIREADY,
+ "tsiready-irq", da9052_ts_datardy_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register TSIRDY IRQ %d, error = %d\n",
- tsi->irq_datardy, error);
+ "Failed to register TSIRDY IRQ :%d\n", error);
goto err_free_pendwn_irq;
}
/* Mask PEN_DOWN and TSI_READY events */
- disable_irq(tsi->irq_pendwn);
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
error = da9052_configure_tsi(tsi);
if (error)
@@ -326,9 +307,9 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
return 0;
err_free_datardy_irq:
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
err_free_pendwn_irq:
- free_irq(tsi->irq_pendwn, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
err_free_mem:
kfree(tsi);
input_free_device(input_dev);
@@ -336,14 +317,14 @@ err_free_mem:
return error;
}
-static int __devexit da9052_ts_remove(struct platform_device *pdev)
+static int da9052_ts_remove(struct platform_device *pdev)
{
struct da9052_tsi *tsi = platform_get_drvdata(pdev);
da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
- free_irq(tsi->irq_pendwn, tsi);
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
input_unregister_device(tsi->dev);
kfree(tsi);
@@ -355,7 +336,7 @@ static int __devexit da9052_ts_remove(struct platform_device *pdev)
static struct platform_driver da9052_tsi_driver = {
.probe = da9052_ts_probe,
- .remove = __devexit_p(da9052_ts_remove),
+ .remove = da9052_ts_remove,
.driver = {
.name = "da9052-tsi",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 099d144ab7c9..a9170157b442 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -491,14 +491,6 @@ static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode)
DEFINE_SIMPLE_ATTRIBUTE(debugfs_mode_fops, edt_ft5x06_debugfs_mode_get,
edt_ft5x06_debugfs_mode_set, "%llu\n");
-static int edt_ft5x06_debugfs_raw_data_open(struct inode *inode,
- struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file,
char __user *buf, size_t count, loff_t *off)
{
@@ -579,11 +571,11 @@ out:
static const struct file_operations debugfs_raw_data_fops = {
- .open = edt_ft5x06_debugfs_raw_data_open,
+ .open = simple_open,
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void __devinit
+static void
edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
const char *debugfs_name)
{
@@ -600,7 +592,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
}
-static void __devexit
+static void
edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
if (tsdata->debug_dir)
@@ -625,7 +617,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
-static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
+static int edt_ft5x06_ts_reset(struct i2c_client *client,
int reset_pin)
{
int error;
@@ -649,7 +641,7 @@ static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
return 0;
}
-static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
+static int edt_ft5x06_ts_identify(struct i2c_client *client,
char *model_name,
char *fw_version)
{
@@ -683,7 +675,7 @@ static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
edt_ft5x06_register_write(tsdata, reg, pdata->name)
-static void __devinit
+static void
edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
const struct edt_ft5x06_platform_data *pdata)
{
@@ -697,7 +689,7 @@ edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE);
}
-static void __devinit
+static void
edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
{
tsdata->threshold = edt_ft5x06_register_read(tsdata,
@@ -710,7 +702,7 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y);
}
-static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client,
+static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct edt_ft5x06_platform_data *pdata =
@@ -830,7 +822,7 @@ err_free_mem:
return error;
}
-static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
+static int edt_ft5x06_ts_remove(struct i2c_client *client)
{
const struct edt_ft5x06_platform_data *pdata =
dev_get_platdata(&client->dev);
@@ -891,7 +883,7 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
- .remove = __devexit_p(edt_ft5x06_ts_remove),
+ .remove = edt_ft5x06_ts_remove,
};
module_i2c_driver(edt_ft5x06_ts_driver);
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 908407efc672..55255a940072 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -154,7 +154,7 @@ static void eeti_ts_close(struct input_dev *dev)
eeti_ts_stop(priv);
}
-static int __devinit eeti_ts_probe(struct i2c_client *client,
+static int eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
struct eeti_ts_platform_data *pdata = client->dev.platform_data;
@@ -248,7 +248,7 @@ err0:
return err;
}
-static int __devexit eeti_ts_remove(struct i2c_client *client)
+static int eeti_ts_remove(struct i2c_client *client)
{
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -321,7 +321,7 @@ static struct i2c_driver eeti_ts_driver = {
#endif
},
.probe = eeti_ts_probe,
- .remove = __devexit_p(eeti_ts_remove),
+ .remove = eeti_ts_remove,
.id_table = eeti_ts_id,
};
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 13fa62fdfb0b..17c9097f3b5d 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -153,7 +153,7 @@ static int egalax_wake_up_device(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_firmware_version(struct i2c_client *client)
+static int egalax_firmware_version(struct i2c_client *client)
{
static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
int ret;
@@ -165,7 +165,7 @@ static int __devinit egalax_firmware_version(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_ts_probe(struct i2c_client *client,
+static int egalax_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct egalax_ts *ts;
@@ -246,7 +246,7 @@ err_free_ts:
return error;
}
-static __devexit int egalax_ts_remove(struct i2c_client *client)
+static int egalax_ts_remove(struct i2c_client *client)
{
struct egalax_ts *ts = i2c_get_clientdata(client);
@@ -301,7 +301,7 @@ static struct i2c_driver egalax_ts_driver = {
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
- .remove = __devexit_p(egalax_ts_remove),
+ .remove = egalax_ts_remove,
};
module_i2c_driver(egalax_ts_driver);
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
deleted file mode 100644
index b9e8686a6f1c..000000000000
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Copyright (c) 2001 "Crazy" James Simmons jsimmons@transvirtual.com
- *
- * Sponsored by Transvirtual Technology.
- *
- * Derived from the code in h3600_ts.[ch] by Charles Flynn
- */
-
-/*
- * Driver for the h3600 Touch Screen and other Atmel controlled devices.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so by
- * e-mail - mail your message to <jsimmons@transvirtual.com>.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/serio.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-/* SA1100 serial defines */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-#define DRIVER_DESC "H3600 touchscreen driver"
-
-MODULE_AUTHOR("James Simmons <jsimmons@transvirtual.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
-/*
- * Definitions & global arrays.
- */
-
-/* The start and end of frame characters SOF and EOF */
-#define CHAR_SOF 0x02
-#define CHAR_EOF 0x03
-#define FRAME_OVERHEAD 3 /* CHAR_SOF,CHAR_EOF,LENGTH = 3 */
-
-/*
- Atmel events and response IDs contained in frame.
- Programmer has no control over these numbers.
- TODO there are holes - specifically 1,7,0x0a
-*/
-#define VERSION_ID 0 /* Get Version (request/response) */
-#define KEYBD_ID 2 /* Keyboard (event) */
-#define TOUCHS_ID 3 /* Touch Screen (event)*/
-#define EEPROM_READ_ID 4 /* (request/response) */
-#define EEPROM_WRITE_ID 5 /* (request/response) */
-#define THERMAL_ID 6 /* (request/response) */
-#define NOTIFY_LED_ID 8 /* (request/response) */
-#define BATTERY_ID 9 /* (request/response) */
-#define SPI_READ_ID 0x0b /* ( request/response) */
-#define SPI_WRITE_ID 0x0c /* ( request/response) */
-#define FLITE_ID 0x0d /* backlight ( request/response) */
-#define STX_ID 0xa1 /* extension pack status (req/resp) */
-
-#define MAX_ID 14
-
-#define H3600_MAX_LENGTH 16
-#define H3600_KEY 0xf
-
-#define H3600_SCANCODE_RECORD 1 /* 1 -> record button */
-#define H3600_SCANCODE_CALENDAR 2 /* 2 -> calendar */
-#define H3600_SCANCODE_CONTACTS 3 /* 3 -> contact */
-#define H3600_SCANCODE_Q 4 /* 4 -> Q button */
-#define H3600_SCANCODE_START 5 /* 5 -> start menu */
-#define H3600_SCANCODE_UP 6 /* 6 -> up */
-#define H3600_SCANCODE_RIGHT 7 /* 7 -> right */
-#define H3600_SCANCODE_LEFT 8 /* 8 -> left */
-#define H3600_SCANCODE_DOWN 9 /* 9 -> down */
-
-/*
- * Per-touchscreen data.
- */
-struct h3600_dev {
- struct input_dev *dev;
- struct serio *serio;
- unsigned char event; /* event ID from packet */
- unsigned char chksum;
- unsigned char len;
- unsigned char idx;
- unsigned char buf[H3600_MAX_LENGTH];
- char phys[32];
-};
-
-static irqreturn_t action_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_ACTION_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- input_report_key(dev, KEY_ENTER, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t npower_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_NPOWER_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- /*
- * This interrupt is only called when we release the key. So we have
- * to fake a key press.
- */
- input_report_key(dev, KEY_SUSPEND, 1);
- input_report_key(dev, KEY_SUSPEND, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-
-static int flite_brightness = 25;
-
-enum flite_pwr {
- FLITE_PWR_OFF = 0,
- FLITE_PWR_ON = 1
-};
-
-/*
- * h3600_flite_power: enables or disables power to frontlight, using last bright */
-unsigned int h3600_flite_power(struct input_dev *dev, enum flite_pwr pwr)
-{
- unsigned char brightness = (pwr == FLITE_PWR_OFF) ? 0 : flite_brightness;
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- /* Must be in this order */
- serio_write(ts->serio, 1);
- serio_write(ts->serio, pwr);
- serio_write(ts->serio, brightness);
-
- return 0;
-}
-
-#endif
-
-/*
- * This function translates the native event packets to linux input event
- * packets. Some packets coming from serial are not touchscreen related. In
- * this case we send them off to be processed elsewhere.
- */
-static void h3600ts_process_packet(struct h3600_dev *ts)
-{
- struct input_dev *dev = ts->dev;
- static int touched = 0;
- int key, down = 0;
-
- switch (ts->event) {
- /*
- Buttons - returned as a single byte
- 7 6 5 4 3 2 1 0
- S x x x N N N N
-
- S switch state ( 0=pressed 1=released)
- x Unused.
- NNNN switch number 0-15
-
- Note: This is true for non interrupt generated key events.
- */
- case KEYBD_ID:
- down = (ts->buf[0] & 0x80) ? 0 : 1;
-
- switch (ts->buf[0] & 0x7f) {
- case H3600_SCANCODE_RECORD:
- key = KEY_RECORD;
- break;
- case H3600_SCANCODE_CALENDAR:
- key = KEY_PROG1;
- break;
- case H3600_SCANCODE_CONTACTS:
- key = KEY_PROG2;
- break;
- case H3600_SCANCODE_Q:
- key = KEY_Q;
- break;
- case H3600_SCANCODE_START:
- key = KEY_PROG3;
- break;
- case H3600_SCANCODE_UP:
- key = KEY_UP;
- break;
- case H3600_SCANCODE_RIGHT:
- key = KEY_RIGHT;
- break;
- case H3600_SCANCODE_LEFT:
- key = KEY_LEFT;
- break;
- case H3600_SCANCODE_DOWN:
- key = KEY_DOWN;
- break;
- default:
- key = 0;
- }
- if (key)
- input_report_key(dev, key, down);
- break;
- /*
- * Native touchscreen event data is formatted as shown below:-
- *
- * +-------+-------+-------+-------+
- * | Xmsb | Xlsb | Ymsb | Ylsb |
- * +-------+-------+-------+-------+
- * byte 0 1 2 3
- */
- case TOUCHS_ID:
- if (!touched) {
- input_report_key(dev, BTN_TOUCH, 1);
- touched = 1;
- }
-
- if (ts->len) {
- unsigned short x, y;
-
- x = ts->buf[0]; x <<= 8; x += ts->buf[1];
- y = ts->buf[2]; y <<= 8; y += ts->buf[3];
-
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
- } else {
- input_report_key(dev, BTN_TOUCH, 0);
- touched = 0;
- }
- break;
- default:
- /* Send a non input event elsewhere */
- break;
- }
-
- input_sync(dev);
-}
-
-/*
- * h3600ts_event() handles events from the input module.
- */
-static int h3600ts_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
-{
-#if 0
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- switch (type) {
- case EV_LED: {
- // serio_write(ts->serio, SOME_CMD);
- return 0;
- }
- }
- return -1;
-#endif
- return 0;
-}
-
-/*
- Frame format
- byte 1 2 3 len + 4
- +-------+---------------+---------------+--=------------+
- |SOF |id |len | len bytes | Chksum |
- +-------+---------------+---------------+--=------------+
- bit 0 7 8 11 12 15 16
-
- +-------+---------------+-------+
- |SOF |id |0 |Chksum | - Note Chksum does not include SOF
- +-------+---------------+-------+
- bit 0 7 8 11 12 15 16
-
-*/
-
-static int state;
-
-/* decode States */
-#define STATE_SOF 0 /* start of FRAME */
-#define STATE_ID 1 /* state where we decode the ID & len */
-#define STATE_DATA 2 /* state where we decode data */
-#define STATE_EOF 3 /* state where we decode checksum or EOF */
-
-static irqreturn_t h3600ts_interrupt(struct serio *serio, unsigned char data,
- unsigned int flags)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- /*
- * We have a new frame coming in.
- */
- switch (state) {
- case STATE_SOF:
- if (data == CHAR_SOF)
- state = STATE_ID;
- break;
- case STATE_ID:
- ts->event = (data & 0xf0) >> 4;
- ts->len = (data & 0xf);
- ts->idx = 0;
- if (ts->event >= MAX_ID) {
- state = STATE_SOF;
- break;
- }
- ts->chksum = data;
- state = (ts->len > 0) ? STATE_DATA : STATE_EOF;
- break;
- case STATE_DATA:
- ts->chksum += data;
- ts->buf[ts->idx]= data;
- if (++ts->idx == ts->len)
- state = STATE_EOF;
- break;
- case STATE_EOF:
- state = STATE_SOF;
- if (data == CHAR_EOF || data == ts->chksum)
- h3600ts_process_packet(ts);
- break;
- default:
- printk("Error3\n");
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * h3600ts_connect() is the routine that is called when someone adds a
- * new serio device that supports H3600 protocol and registers it as
- * an input device.
- */
-static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
-{
- struct h3600_dev *ts;
- struct input_dev *input_dev;
- int err;
-
- ts = kzalloc(sizeof(struct h3600_dev), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto fail1;
- }
-
- ts->serio = serio;
- ts->dev = input_dev;
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", serio->phys);
-
- input_dev->name = "H3600 TouchScreen";
- input_dev->phys = ts->phys;
- input_dev->id.bustype = BUS_RS232;
- input_dev->id.vendor = SERIO_H3600;
- input_dev->id.product = 0x0666; /* FIXME !!! We can ask the hardware */
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &serio->dev;
-
- input_set_drvdata(input_dev, ts);
-
- input_dev->event = h3600ts_event;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_LED) | BIT_MASK(EV_PWR);
- input_dev->ledbit[0] = BIT_MASK(LED_SLEEP);
- input_set_abs_params(input_dev, ABS_X, 60, 985, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 35, 1024, 0, 0);
-
- set_bit(KEY_RECORD, input_dev->keybit);
- set_bit(KEY_Q, input_dev->keybit);
- set_bit(KEY_PROG1, input_dev->keybit);
- set_bit(KEY_PROG2, input_dev->keybit);
- set_bit(KEY_PROG3, input_dev->keybit);
- set_bit(KEY_UP, input_dev->keybit);
- set_bit(KEY_RIGHT, input_dev->keybit);
- set_bit(KEY_LEFT, input_dev->keybit);
- set_bit(KEY_DOWN, input_dev->keybit);
- set_bit(KEY_ENTER, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(BTN_TOUCH, input_dev->keybit);
-
- /* Device specific stuff */
- set_GPIO_IRQ_edge(GPIO_BITSY_ACTION_BUTTON, GPIO_BOTH_EDGES);
- set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
-
- if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
- IRQF_SHARED, "h3600_action", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
- err = -EBUSY;
- goto fail1;
- }
-
- if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
- IRQF_SHARED, "h3600_suspend", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
- err = -EBUSY;
- goto fail2;
- }
-
- serio_set_drvdata(serio, ts);
-
- err = serio_open(serio, drv);
- if (err)
- goto fail3;
-
- //h3600_flite_control(1, 25); /* default brightness */
- err = input_register_device(ts->dev);
- if (err)
- goto fail4;
-
- return 0;
-
-fail4: serio_close(serio);
-fail3: serio_set_drvdata(serio, NULL);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
-fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: input_free_device(input_dev);
- kfree(ts);
- return err;
-}
-
-/*
- * h3600ts_disconnect() is the opposite of h3600ts_connect()
- */
-
-static void h3600ts_disconnect(struct serio *serio)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
- input_get_device(ts->dev);
- input_unregister_device(ts->dev);
- serio_close(serio);
- serio_set_drvdata(serio, NULL);
- input_put_device(ts->dev);
- kfree(ts);
-}
-
-/*
- * The serio driver structure.
- */
-
-static struct serio_device_id h3600ts_serio_ids[] = {
- {
- .type = SERIO_RS232,
- .proto = SERIO_H3600,
- .id = SERIO_ANY,
- .extra = SERIO_ANY,
- },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(serio, h3600ts_serio_ids);
-
-static struct serio_driver h3600ts_drv = {
- .driver = {
- .name = "h3600ts",
- },
- .description = DRIVER_DESC,
- .id_table = h3600ts_serio_ids,
- .interrupt = h3600ts_interrupt,
- .connect = h3600ts_connect,
- .disconnect = h3600ts_disconnect,
-};
-
-module_serio_driver(h3600ts_drv);
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index d13143b68b3e..6c4fb8436957 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -102,7 +102,7 @@ static void htcpen_close(struct input_dev *dev)
synchronize_irq(HTCPEN_IRQ);
}
-static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
+static int htcpen_isa_probe(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev;
int err = -EBUSY;
@@ -174,7 +174,7 @@ static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id)
+static int htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
@@ -210,7 +210,7 @@ static int htcpen_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver htcpen_isa_driver = {
.probe = htcpen_isa_probe,
- .remove = __devexit_p(htcpen_isa_remove),
+ .remove = htcpen_isa_remove,
#ifdef CONFIG_PM
.suspend = htcpen_isa_suspend,
.resume = htcpen_isa_resume,
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4ac69760ec08..1418bdda61bb 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -180,7 +180,7 @@ static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
};
-static int __devinit ili210x_i2c_probe(struct i2c_client *client,
+static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -298,7 +298,7 @@ err_free_mem:
return error;
}
-static int __devexit ili210x_i2c_remove(struct i2c_client *client)
+static int ili210x_i2c_remove(struct i2c_client *client)
{
struct ili210x *priv = i2c_get_clientdata(client);
@@ -350,7 +350,7 @@ static struct i2c_driver ili210x_ts_driver = {
},
.id_table = ili210x_i2c_id,
.probe = ili210x_i2c_probe,
- .remove = __devexit_p(ili210x_i2c_remove),
+ .remove = ili210x_i2c_remove,
};
module_i2c_driver(ili210x_ts_driver);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index cf299377fc49..465db5dba8b4 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -427,7 +427,7 @@ out:
}
/* Utility to read PMIC ID */
-static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
+static int mrstouch_read_pmic_id(uint *vendor, uint *rev)
{
int err;
u8 r;
@@ -446,7 +446,7 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
* Parse ADC channels to find end of the channel configured by other ADC user
* NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
*/
-static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
+static int mrstouch_chan_parse(struct mrstouch_dev *tsdev)
{
int found = 0;
int err, i;
@@ -478,7 +478,7 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
/*
* Writes touch screen channels to ADC address selection registers
*/
-static int __devinit mrstouch_ts_chan_set(uint offset)
+static int mrstouch_ts_chan_set(uint offset)
{
u16 chan;
@@ -494,7 +494,7 @@ static int __devinit mrstouch_ts_chan_set(uint offset)
}
/* Initialize ADC */
-static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
+static int mrstouch_adc_init(struct mrstouch_dev *tsdev)
{
int err, start;
u8 ra, rm;
@@ -568,7 +568,7 @@ static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
/* Probe function for touch screen driver */
-static int __devinit mrstouch_probe(struct platform_device *pdev)
+static int mrstouch_probe(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev;
struct input_dev *input;
@@ -643,7 +643,7 @@ err_free_mem:
return err;
}
-static int __devexit mrstouch_remove(struct platform_device *pdev)
+static int mrstouch_remove(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
@@ -662,7 +662,7 @@ static struct platform_driver mrstouch_driver = {
.owner = THIS_MODULE,
},
.probe = mrstouch_probe,
- .remove = __devexit_p(mrstouch_remove),
+ .remove = mrstouch_remove,
};
module_platform_driver(mrstouch_driver);
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 7f03d1bd916e..282d7c7ad2fc 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -99,7 +99,7 @@ static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit jornada720_ts_probe(struct platform_device *pdev)
+static int jornada720_ts_probe(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts;
struct input_dev *input_dev;
@@ -151,7 +151,7 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
return error;
}
-static int __devexit jornada720_ts_remove(struct platform_device *pdev)
+static int jornada720_ts_remove(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
@@ -168,7 +168,7 @@ MODULE_ALIAS("platform:jornada_ts");
static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
- .remove = __devexit_p(jornada720_ts_remove),
+ .remove = jornada720_ts_remove,
.driver = {
.name = "jornada_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 4c2b8ed3bf16..9101ee529c92 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -203,7 +203,7 @@ static void lpc32xx_ts_close(struct input_dev *dev)
lpc32xx_stop_tsc(tsc);
}
-static int __devinit lpc32xx_ts_probe(struct platform_device *pdev)
+static int lpc32xx_ts_probe(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc;
struct input_dev *input;
@@ -309,7 +309,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_ts_remove(struct platform_device *pdev)
+static int lpc32xx_ts_remove(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
struct resource *res;
@@ -394,7 +394,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
- .remove = __devexit_p(lpc32xx_ts_remove),
+ .remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4eab50b856d7..00bc6caa27f5 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -156,7 +156,7 @@ out:
return IRQ_HANDLED;
}
-static void __devinit max11801_ts_phy_init(struct max11801_data *data)
+static void max11801_ts_phy_init(struct max11801_data *data)
{
struct i2c_client *client = data->client;
@@ -174,7 +174,7 @@ static void __devinit max11801_ts_phy_init(struct max11801_data *data)
max11801_write_reg(client, OP_MODE_CONF_REG, 0x36);
}
-static int __devinit max11801_ts_probe(struct i2c_client *client,
+static int max11801_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max11801_data *data;
@@ -228,7 +228,7 @@ err_free_mem:
return error;
}
-static __devexit int max11801_ts_remove(struct i2c_client *client)
+static int max11801_ts_remove(struct i2c_client *client)
{
struct max11801_data *data = i2c_get_clientdata(client);
@@ -252,7 +252,7 @@ static struct i2c_driver max11801_ts_driver = {
},
.id_table = max11801_ts_id,
.probe = max11801_ts_probe,
- .remove = __devexit_p(max11801_ts_remove),
+ .remove = max11801_ts_remove,
};
module_i2c_driver(max11801_ts_driver);
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 48dc5b0d26f1..02103b6abb39 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -229,7 +229,7 @@ err_free_mem:
return ret;
}
-static int __devexit mc13783_ts_remove(struct platform_device *pdev)
+static int mc13783_ts_remove(struct platform_device *pdev)
{
struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
@@ -243,7 +243,7 @@ static int __devexit mc13783_ts_remove(struct platform_device *pdev)
}
static struct platform_driver mc13783_ts_driver = {
- .remove = __devexit_p(mc13783_ts_remove),
+ .remove = mc13783_ts_remove,
.driver = {
.owner = THIS_MODULE,
.name = MC13783_TS_NAME,
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index b528511861ce..f9f4e0c56eda 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -187,7 +187,7 @@ static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
OP_MODE_ACTIVE | REPORT_RATE_80);
}
-static int __devinit mcs5000_ts_probe(struct i2c_client *client,
+static int mcs5000_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcs5000_ts_data *data;
@@ -249,7 +249,7 @@ err_free_mem:
return ret;
}
-static int __devexit mcs5000_ts_remove(struct i2c_client *client)
+static int mcs5000_ts_remove(struct i2c_client *client)
{
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
static struct i2c_driver mcs5000_ts_driver = {
.probe = mcs5000_ts_probe,
- .remove = __devexit_p(mcs5000_ts_remove),
+ .remove = mcs5000_ts_remove,
.driver = {
.name = "mcs5000_ts",
#ifdef CONFIG_PM
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 560cf09d1c5a..4a29ddf6bf1e 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i2c/mms114.h>
#include <linux/input/mt.h>
@@ -360,14 +361,63 @@ static void mms114_input_close(struct input_dev *dev)
mms114_stop(data);
}
-static int __devinit mms114_probe(struct i2c_client *client,
+#ifdef CONFIG_OF
+static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ struct mms114_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate platform data\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
+ dev_err(dev, "failed to get x-size property\n");
+ return NULL;
+ };
+
+ if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
+ dev_err(dev, "failed to get y-size property\n");
+ return NULL;
+ };
+
+ of_property_read_u32(np, "contact-threshold",
+ &pdata->contact_threshold);
+ of_property_read_u32(np, "moving-threshold",
+ &pdata->moving_threshold);
+
+ if (of_find_property(np, "x-invert", NULL))
+ pdata->x_invert = true;
+ if (of_find_property(np, "y-invert", NULL))
+ pdata->y_invert = true;
+
+ return pdata;
+}
+#else
+static inline struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int mms114_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct mms114_platform_data *pdata;
struct mms114_data *data;
struct input_dev *input_dev;
int error;
- if (!client->dev.platform_data) {
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
+ pdata = mms114_parse_dt(&client->dev);
+
+ if (!pdata) {
dev_err(&client->dev, "Need platform data\n");
return -EINVAL;
}
@@ -379,17 +429,17 @@ static int __devinit mms114_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(struct mms114_data), GFP_KERNEL);
- input_dev = input_allocate_device();
+ data = devm_kzalloc(&client->dev, sizeof(struct mms114_data),
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&client->dev);
if (!data || !input_dev) {
dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
data->client = client;
data->input_dev = input_dev;
- data->pdata = client->dev.platform_data;
+ data->pdata = pdata;
input_dev->name = "MELPAS MMS114 Touchscreen";
input_dev->id.bustype = BUS_I2C;
@@ -416,57 +466,36 @@ static int __devinit mms114_probe(struct i2c_client *client,
input_set_drvdata(input_dev, data);
i2c_set_clientdata(client, data);
- data->core_reg = regulator_get(&client->dev, "avdd");
+ data->core_reg = devm_regulator_get(&client->dev, "avdd");
if (IS_ERR(data->core_reg)) {
error = PTR_ERR(data->core_reg);
dev_err(&client->dev,
"Unable to get the Core regulator (%d)\n", error);
- goto err_free_mem;
+ return error;
}
- data->io_reg = regulator_get(&client->dev, "vdd");
+ data->io_reg = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(data->io_reg)) {
error = PTR_ERR(data->io_reg);
dev_err(&client->dev,
"Unable to get the IO regulator (%d)\n", error);
- goto err_core_reg;
+ return error;
}
- error = request_threaded_irq(client->irq, NULL, mms114_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mms114", data);
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ mms114_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_io_reg;
+ return error;
}
disable_irq(client->irq);
error = input_register_device(data->input_dev);
- if (error)
- goto err_free_irq;
-
- return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_io_reg:
- regulator_put(data->io_reg);
-err_core_reg:
- regulator_put(data->core_reg);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static int __devexit mms114_remove(struct i2c_client *client)
-{
- struct mms114_data *data = i2c_get_clientdata(client);
-
- free_irq(client->irq, data);
- regulator_put(data->io_reg);
- regulator_put(data->core_reg);
- input_unregister_device(data->input_dev);
- kfree(data);
+ if (error) {
+ dev_err(&client->dev, "Failed to register input device\n");
+ return error;
+ }
return 0;
}
@@ -525,14 +554,21 @@ static const struct i2c_device_id mms114_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mms114_id);
+#ifdef CONFIG_OF
+static struct of_device_id mms114_dt_match[] = {
+ { .compatible = "melfas,mms114" },
+ { }
+};
+#endif
+
static struct i2c_driver mms114_driver = {
.driver = {
.name = "mms114",
.owner = THIS_MODULE,
.pm = &mms114_pm_ops,
+ .of_match_table = of_match_ptr(mms114_dt_match),
},
.probe = mms114_probe,
- .remove = __devexit_p(mms114_remove),
.id_table = mms114_id,
};
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index f57aeb80f7e3..f22e04dd4e16 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -137,7 +137,7 @@ static void pcap_ts_close(struct input_dev *dev)
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
}
-static int __devinit pcap_ts_probe(struct platform_device *pdev)
+static int pcap_ts_probe(struct platform_device *pdev)
{
struct input_dev *input_dev;
struct pcap_ts *pcap_ts;
@@ -202,7 +202,7 @@ fail:
return err;
}
-static int __devexit pcap_ts_remove(struct platform_device *pdev)
+static int pcap_ts_remove(struct platform_device *pdev)
{
struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
@@ -245,7 +245,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = {
static struct platform_driver pcap_ts_driver = {
.probe = pcap_ts_probe,
- .remove = __devexit_p(pcap_ts_remove),
+ .remove = pcap_ts_remove,
.driver = {
.name = "pcap-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 953b4c105cad..6cc6b36663ff 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -125,7 +125,7 @@ static int pixcir_i2c_ts_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
-static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
@@ -189,7 +189,7 @@ err_free_mem:
return error;
}
-static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+static int pixcir_i2c_ts_remove(struct i2c_client *client)
{
struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
@@ -218,7 +218,7 @@ static struct i2c_driver pixcir_i2c_ts_driver = {
.pm = &pixcir_dev_pm_ops,
},
.probe = pixcir_i2c_ts_probe,
- .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .remove = pixcir_i2c_ts_remove,
.id_table = pixcir_i2c_ts_id,
};
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 549fa29548f8..b061af2c8376 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -238,7 +238,7 @@ static void s3c24xx_ts_select(struct s3c_adc_client *client, unsigned select)
* Initialise, find and allocate any resources we need to run and then
* register with the ADC and input systems.
*/
-static int __devinit s3c2410ts_probe(struct platform_device *pdev)
+static int s3c2410ts_probe(struct platform_device *pdev)
{
struct s3c2410_ts_mach_info *info;
struct device *dev = &pdev->dev;
@@ -365,7 +365,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
*
* Free up our state ready to be removed.
*/
-static int __devexit s3c2410ts_remove(struct platform_device *pdev)
+static int s3c2410ts_remove(struct platform_device *pdev)
{
free_irq(ts.irq_tc, ts.input);
del_timer_sync(&touch_timer);
@@ -430,7 +430,7 @@ static struct platform_driver s3c_ts_driver = {
},
.id_table = s3cts_driver_ids,
.probe = s3c2410ts_probe,
- .remove = __devexit_p(s3c2410ts_remove),
+ .remove = s3c2410ts_remove,
};
module_platform_driver(s3c_ts_driver);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6cb68a1981bf..d9d05e222428 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -139,7 +139,7 @@ end:
return IRQ_HANDLED;
}
-static int __devinit st1232_ts_probe(struct i2c_client *client,
+static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
@@ -206,7 +206,7 @@ err_free_mem:
return error;
}
-static int __devexit st1232_ts_remove(struct i2c_client *client)
+static int st1232_ts_remove(struct i2c_client *client)
{
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -255,7 +255,7 @@ static const struct i2c_device_id st1232_ts_id[] = {
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
#ifdef CONFIG_OF
-static const struct of_device_id st1232_ts_dt_ids[] __devinitconst = {
+static const struct of_device_id st1232_ts_dt_ids[] = {
{ .compatible = "sitronix,st1232", },
{ }
};
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
- .remove = __devexit_p(st1232_ts_remove),
+ .remove = st1232_ts_remove,
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 692b685720ce..59e81b00f244 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -1,4 +1,5 @@
-/* STMicroelectronics STMPE811 Touchscreen Driver
+/*
+ * STMicroelectronics STMPE811 Touchscreen Driver
*
* (C) 2010 Luotao Fu <l.fu@pengutronix.de>
* All rights reserved.
@@ -16,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -118,6 +120,7 @@ static void stmpe_work(struct work_struct *work)
__stmpe_reset_fifo(ts->stmpe);
input_report_abs(ts->idev, ABS_PRESSURE, 0);
+ input_report_key(ts->idev, BTN_TOUCH, 0);
input_sync(ts->idev);
}
@@ -151,6 +154,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
input_report_abs(ts->idev, ABS_X, x);
input_report_abs(ts->idev, ABS_Y, y);
input_report_abs(ts->idev, ABS_PRESSURE, z);
+ input_report_key(ts->idev, BTN_TOUCH, 1);
input_sync(ts->idev);
/* flush the FIFO after we have read out our values. */
@@ -166,7 +170,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit stmpe_init_hw(struct stmpe_touch *ts)
+static int stmpe_init_hw(struct stmpe_touch *ts)
{
int ret;
u8 adc_ctrl1, adc_ctrl1_mask, tsc_cfg, tsc_cfg_mask;
@@ -261,41 +265,18 @@ static void stmpe_ts_close(struct input_dev *dev)
STMPE_TSC_CTRL_TSC_EN, 0);
}
-static int __devinit stmpe_input_probe(struct platform_device *pdev)
+static void stmpe_ts_get_platform_info(struct platform_device *pdev,
+ struct stmpe_touch *ts)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- struct stmpe_platform_data *pdata = stmpe->pdata;
- struct stmpe_touch *ts;
- struct input_dev *idev;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_ts_platform_data *ts_pdata = NULL;
- int ret;
- int ts_irq;
-
- ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
- if (ts_irq < 0)
- return ts_irq;
-
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- if (!ts) {
- ret = -ENOMEM;
- goto err_out;
- }
- idev = input_allocate_device();
- if (!idev) {
- ret = -ENOMEM;
- goto err_free_ts;
- }
-
- platform_set_drvdata(pdev, ts);
ts->stmpe = stmpe;
- ts->idev = idev;
- ts->dev = &pdev->dev;
- if (pdata)
- ts_pdata = pdata->ts;
+ if (stmpe->pdata && stmpe->pdata->ts) {
+ ts_pdata = stmpe->pdata->ts;
- if (ts_pdata) {
ts->sample_time = ts_pdata->sample_time;
ts->mod_12b = ts_pdata->mod_12b;
ts->ref_sel = ts_pdata->ref_sel;
@@ -305,22 +286,71 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
ts->settling = ts_pdata->settling;
ts->fraction_z = ts_pdata->fraction_z;
ts->i_drive = ts_pdata->i_drive;
+ } else if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "st,sample-time", &val))
+ ts->sample_time = val;
+ if (!of_property_read_u32(np, "st,mod-12b", &val))
+ ts->mod_12b = val;
+ if (!of_property_read_u32(np, "st,ref-sel", &val))
+ ts->ref_sel = val;
+ if (!of_property_read_u32(np, "st,adc-freq", &val))
+ ts->adc_freq = val;
+ if (!of_property_read_u32(np, "st,ave-ctrl", &val))
+ ts->ave_ctrl = val;
+ if (!of_property_read_u32(np, "st,touch-det-delay", &val))
+ ts->touch_det_delay = val;
+ if (!of_property_read_u32(np, "st,settling", &val))
+ ts->settling = val;
+ if (!of_property_read_u32(np, "st,fraction-z", &val))
+ ts->fraction_z = val;
+ if (!of_property_read_u32(np, "st,i-drive", &val))
+ ts->i_drive = val;
}
+}
+
+static int stmpe_input_probe(struct platform_device *pdev)
+{
+ struct stmpe_touch *ts;
+ struct input_dev *idev;
+ int error;
+ int ts_irq;
+
+ ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
+ if (ts_irq < 0)
+ return ts_irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ts);
+ ts->idev = idev;
+ ts->dev = &pdev->dev;
+
+ stmpe_ts_get_platform_info(pdev, ts);
INIT_DELAYED_WORK(&ts->work, stmpe_work);
- ret = request_threaded_irq(ts_irq, NULL, stmpe_ts_handler,
- IRQF_ONESHOT, STMPE_TS_NAME, ts);
- if (ret) {
+ error = devm_request_threaded_irq(&pdev->dev, ts_irq,
+ NULL, stmpe_ts_handler,
+ IRQF_ONESHOT, STMPE_TS_NAME, ts);
+ if (error) {
dev_err(&pdev->dev, "Failed to request IRQ %d\n", ts_irq);
- goto err_free_input;
+ return error;
}
- ret = stmpe_init_hw(ts);
- if (ret)
- goto err_free_irq;
+ error = stmpe_init_hw(ts);
+ if (error)
+ return error;
idev->name = STMPE_TS_NAME;
+ idev->phys = STMPE_TS_NAME"/input0";
idev->id.bustype = BUS_I2C;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
@@ -334,40 +364,21 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
input_set_abs_params(idev, ABS_Y, 0, XY_MASK, 0, 0);
input_set_abs_params(idev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
- ret = input_register_device(idev);
- if (ret) {
+ error = input_register_device(idev);
+ if (error) {
dev_err(&pdev->dev, "Could not register input device\n");
- goto err_free_irq;
+ return error;
}
- return ret;
-
-err_free_irq:
- free_irq(ts_irq, ts);
-err_free_input:
- input_free_device(idev);
- platform_set_drvdata(pdev, NULL);
-err_free_ts:
- kfree(ts);
-err_out:
- return ret;
+ return 0;
}
-static int __devexit stmpe_ts_remove(struct platform_device *pdev)
+static int stmpe_ts_remove(struct platform_device *pdev)
{
struct stmpe_touch *ts = platform_get_drvdata(pdev);
- unsigned int ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
- free_irq(ts_irq, ts);
-
- platform_set_drvdata(pdev, NULL);
-
- input_unregister_device(ts->idev);
-
- kfree(ts);
-
return 0;
}
@@ -377,7 +388,7 @@ static struct platform_driver stmpe_ts_driver = {
.owner = THIS_MODULE,
},
.probe = stmpe_input_probe,
- .remove = __devexit_p(stmpe_ts_remove),
+ .remove = stmpe_ts_remove,
};
module_platform_driver(stmpe_ts_driver);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
new file mode 100644
index 000000000000..51e7b87827a4
--- /dev/null
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -0,0 +1,398 @@
+/*
+ * TI Touch Screen driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+
+#define ADCFSM_STEPID 0x10
+#define SEQ_SETTLE 275
+#define MAX_12BIT ((1 << 12) - 1)
+
+struct titsc {
+ struct input_dev *input;
+ struct ti_tscadc_dev *mfd_tscadc;
+ unsigned int irq;
+ unsigned int wires;
+ unsigned int x_plate_resistance;
+ bool pen_down;
+ int steps_to_configure;
+};
+
+static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
+{
+ return readl(ts->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_writel(struct titsc *tsc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, tsc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_step_config(struct titsc *ts_dev)
+{
+ unsigned int config;
+ int i, total_steps;
+
+ /* Configure the Step registers */
+ total_steps = 2 * ts_dev->steps_to_configure;
+
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_XPP;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ case 5:
+ config |= STEPCONFIG_YNN |
+ STEPCONFIG_INP_AN4 | STEPCONFIG_XNN |
+ STEPCONFIG_YPP;
+ break;
+ case 8:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ }
+
+ for (i = 1; i <= ts_dev->steps_to_configure; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YNN |
+ STEPCONFIG_INM_ADCREFM | STEPCONFIG_FIFO1;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_YPP;
+ break;
+ case 5:
+ config |= STEPCONFIG_XPP | STEPCONFIG_INP_AN4 |
+ STEPCONFIG_XNP | STEPCONFIG_YPN;
+ break;
+ case 8:
+ config |= STEPCONFIG_YPP;
+ break;
+ }
+
+ for (i = (ts_dev->steps_to_configure + 1); i <= total_steps; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ /* Charge step configuration */
+ config = STEPCONFIG_XPP | STEPCONFIG_YNN |
+ STEPCHARGE_RFP_XPUL | STEPCHARGE_RFM_XNUR |
+ STEPCHARGE_INM_AN1 | STEPCHARGE_INP_AN1;
+
+ titsc_writel(ts_dev, REG_CHARGECONFIG, config);
+ titsc_writel(ts_dev, REG_CHARGEDELAY, CHARGEDLY_OPENDLY);
+
+ config = 0;
+ /* Configure to calculate pressure */
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YPP |
+ STEPCONFIG_XNN | STEPCONFIG_INM_ADCREFM;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 1), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 1),
+ STEPCONFIG_OPENDLY);
+
+ config |= STEPCONFIG_INP_AN3 | STEPCONFIG_FIFO1;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 2), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 2),
+ STEPCONFIG_OPENDLY);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+}
+
+static void titsc_read_coordinates(struct titsc *ts_dev,
+ unsigned int *x, unsigned int *y)
+{
+ unsigned int fifocount = titsc_readl(ts_dev, REG_FIFO0CNT);
+ unsigned int prev_val_x = ~0, prev_val_y = ~0;
+ unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
+ unsigned int read, diff;
+ unsigned int i, channel;
+
+ /*
+ * Delta filter is used to remove large variations in sampled
+ * values from ADC. The filter tries to predict where the next
+ * coordinate could be. This is done by taking a previous
+ * coordinate and subtracting it form current one. Further the
+ * algorithm compares the difference with that of a present value,
+ * if true the value is reported to the sub system.
+ */
+ for (i = 0; i < fifocount - 1; i++) {
+ read = titsc_readl(ts_dev, REG_FIFO0);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= 0) && (channel < ts_dev->steps_to_configure)) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_x);
+ if (diff < prev_diff_x) {
+ prev_diff_x = diff;
+ *x = read;
+ }
+ prev_val_x = read;
+ }
+
+ read = titsc_readl(ts_dev, REG_FIFO1);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= ts_dev->steps_to_configure) &&
+ (channel < (2 * ts_dev->steps_to_configure - 1))) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_y);
+ if (diff < prev_diff_y) {
+ prev_diff_y = diff;
+ *y = read;
+ }
+ prev_val_y = read;
+ }
+ }
+}
+
+static irqreturn_t titsc_irq(int irq, void *dev)
+{
+ struct titsc *ts_dev = dev;
+ struct input_dev *input_dev = ts_dev->input;
+ unsigned int status, irqclr = 0;
+ unsigned int x = 0, y = 0;
+ unsigned int z1, z2, z;
+ unsigned int fsm;
+ unsigned int fifo1count, fifo0count;
+ int i;
+
+ status = titsc_readl(ts_dev, REG_IRQSTATUS);
+ if (status & IRQENB_FIFO0THRES) {
+ titsc_read_coordinates(ts_dev, &x, &y);
+
+ z1 = titsc_readl(ts_dev, REG_FIFO0) & 0xfff;
+ z2 = titsc_readl(ts_dev, REG_FIFO1) & 0xfff;
+
+ fifo1count = titsc_readl(ts_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ titsc_readl(ts_dev, REG_FIFO1);
+
+ fifo0count = titsc_readl(ts_dev, REG_FIFO0CNT);
+ for (i = 0; i < fifo0count; i++)
+ titsc_readl(ts_dev, REG_FIFO0);
+
+ if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
+ /*
+ * Calculate pressure using formula
+ * Resistance(touch) = x plate resistance *
+ * x postion/4096 * ((z2 / z1) - 1)
+ */
+ z = z2 - z1;
+ z *= x;
+ z *= ts_dev->x_plate_resistance;
+ z /= z1;
+ z = (z + 2047) >> 12;
+
+ if (z <= MAX_12BIT) {
+ input_report_abs(input_dev, ABS_X, x);
+ input_report_abs(input_dev, ABS_Y, y);
+ input_report_abs(input_dev, ABS_PRESSURE, z);
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_sync(input_dev);
+ }
+ }
+ irqclr |= IRQENB_FIFO0THRES;
+ }
+
+ /*
+ * Time for sequencer to settle, to read
+ * correct state of the sequencer.
+ */
+ udelay(SEQ_SETTLE);
+
+ status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
+ if (status & IRQENB_PENUP) {
+ /* Pen up event */
+ fsm = titsc_readl(ts_dev, REG_ADCFSM);
+ if (fsm == ADCFSM_STEPID) {
+ ts_dev->pen_down = false;
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ } else {
+ ts_dev->pen_down = true;
+ }
+ irqclr |= IRQENB_PENUP;
+ }
+
+ titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The functions for inserting/removing driver as a module.
+ */
+
+static int titsc_probe(struct platform_device *pdev)
+{
+ struct titsc *ts_dev;
+ struct input_dev *input_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts_dev || !input_dev) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tscadc_dev->tsc = ts_dev;
+ ts_dev->mfd_tscadc = tscadc_dev;
+ ts_dev->input = input_dev;
+ ts_dev->irq = tscadc_dev->irq;
+ ts_dev->wires = pdata->tsc_init->wires;
+ ts_dev->x_plate_resistance = pdata->tsc_init->x_plate_resistance;
+ ts_dev->steps_to_configure = pdata->tsc_init->steps_to_configure;
+
+ err = request_irq(ts_dev->irq, titsc_irq,
+ 0, pdev->dev.driver->name, ts_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_free_mem;
+ }
+
+ titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR, ts_dev->steps_to_configure);
+
+ input_dev->name = "ti-tsc";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+
+ /* register to the input system */
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, ts_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(ts_dev->irq, ts_dev);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(ts_dev);
+ return err;
+}
+
+static int titsc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ free_irq(ts_dev->irq, ts_dev);
+
+ input_unregister_device(ts_dev->input);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int titsc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+ unsigned int idle;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ idle = titsc_readl(ts_dev, REG_IRQENABLE);
+ titsc_writel(ts_dev, REG_IRQENABLE,
+ (idle | IRQENB_HW_PEN));
+ titsc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
+ }
+ return 0;
+}
+
+static int titsc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ titsc_writel(ts_dev, REG_IRQWAKEUP,
+ 0x00);
+ titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
+ }
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR,
+ ts_dev->steps_to_configure);
+ return 0;
+}
+
+static const struct dev_pm_ops titsc_pm_ops = {
+ .suspend = titsc_suspend,
+ .resume = titsc_resume,
+};
+#define TITSC_PM_OPS (&titsc_pm_ops)
+#else
+#define TITSC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tsc_driver = {
+ .probe = titsc_probe,
+ .remove = titsc_remove,
+ .driver = {
+ .name = "tsc",
+ .owner = THIS_MODULE,
+ .pm = TITSC_PM_OPS,
+ },
+};
+module_platform_driver(ti_tsc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ti_tscadc.c b/drivers/input/touchscreen/ti_tscadc.c
deleted file mode 100644
index d229c741d544..000000000000
--- a/drivers/input/touchscreen/ti_tscadc.c
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * TI Touch Screen driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/input/ti_tscadc.h>
-#include <linux/delay.h>
-
-#define REG_IRQEOI 0x020
-#define REG_RAWIRQSTATUS 0x024
-#define REG_IRQSTATUS 0x028
-#define REG_IRQENABLE 0x02C
-#define REG_IRQWAKEUP 0x034
-#define REG_CTRL 0x040
-#define REG_ADCFSM 0x044
-#define REG_CLKDIV 0x04C
-#define REG_SE 0x054
-#define REG_IDLECONFIG 0x058
-#define REG_CHARGECONFIG 0x05C
-#define REG_CHARGEDELAY 0x060
-#define REG_STEPCONFIG(n) (0x64 + ((n - 1) * 8))
-#define REG_STEPDELAY(n) (0x68 + ((n - 1) * 8))
-#define REG_STEPCONFIG13 0x0C4
-#define REG_STEPDELAY13 0x0C8
-#define REG_STEPCONFIG14 0x0CC
-#define REG_STEPDELAY14 0x0D0
-#define REG_FIFO0CNT 0xE4
-#define REG_FIFO1THR 0xF4
-#define REG_FIFO0 0x100
-#define REG_FIFO1 0x200
-
-/* Register Bitfields */
-#define IRQWKUP_ENB BIT(0)
-#define STPENB_STEPENB 0x7FFF
-#define IRQENB_FIFO1THRES BIT(5)
-#define IRQENB_PENUP BIT(9)
-#define STEPCONFIG_MODE_HWSYNC 0x2
-#define STEPCONFIG_SAMPLES_AVG (1 << 4)
-#define STEPCONFIG_XPP (1 << 5)
-#define STEPCONFIG_XNN (1 << 6)
-#define STEPCONFIG_YPP (1 << 7)
-#define STEPCONFIG_YNN (1 << 8)
-#define STEPCONFIG_XNP (1 << 9)
-#define STEPCONFIG_YPN (1 << 10)
-#define STEPCONFIG_INM (1 << 18)
-#define STEPCONFIG_INP (1 << 20)
-#define STEPCONFIG_INP_5 (1 << 21)
-#define STEPCONFIG_FIFO1 (1 << 26)
-#define STEPCONFIG_OPENDLY 0xff
-#define STEPCONFIG_Z1 (3 << 19)
-#define STEPIDLE_INP (1 << 22)
-#define STEPCHARGE_RFP (1 << 12)
-#define STEPCHARGE_INM (1 << 15)
-#define STEPCHARGE_INP (1 << 19)
-#define STEPCHARGE_RFM (1 << 23)
-#define STEPCHARGE_DELAY 0x1
-#define CNTRLREG_TSCSSENB (1 << 0)
-#define CNTRLREG_STEPID (1 << 1)
-#define CNTRLREG_STEPCONFIGWRT (1 << 2)
-#define CNTRLREG_4WIRE (1 << 5)
-#define CNTRLREG_5WIRE (1 << 6)
-#define CNTRLREG_8WIRE (3 << 5)
-#define CNTRLREG_TSCENB (1 << 7)
-#define ADCFSM_STEPID 0x10
-
-#define SEQ_SETTLE 275
-#define ADC_CLK 3000000
-#define MAX_12BIT ((1 << 12) - 1)
-#define TSCADC_DELTA_X 15
-#define TSCADC_DELTA_Y 15
-
-struct tscadc {
- struct input_dev *input;
- struct clk *tsc_ick;
- void __iomem *tsc_base;
- unsigned int irq;
- unsigned int wires;
- unsigned int x_plate_resistance;
- bool pen_down;
-};
-
-static unsigned int tscadc_readl(struct tscadc *ts, unsigned int reg)
-{
- return readl(ts->tsc_base + reg);
-}
-
-static void tscadc_writel(struct tscadc *tsc, unsigned int reg,
- unsigned int val)
-{
- writel(val, tsc->tsc_base + reg);
-}
-
-static void tscadc_step_config(struct tscadc *ts_dev)
-{
- unsigned int config;
- int i;
-
- /* Configure the Step registers */
-
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_XPP;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- case 5:
- config |= STEPCONFIG_YNN |
- STEPCONFIG_INP_5 | STEPCONFIG_XNN |
- STEPCONFIG_YPP;
- break;
- case 8:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- }
-
- for (i = 1; i < 7; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YNN |
- STEPCONFIG_INM | STEPCONFIG_FIFO1;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_YPP;
- break;
- case 5:
- config |= STEPCONFIG_XPP | STEPCONFIG_INP_5 |
- STEPCONFIG_XNP | STEPCONFIG_YPN;
- break;
- case 8:
- config |= STEPCONFIG_YPP;
- break;
- }
-
- for (i = 7; i < 13; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- /* Charge step configuration */
- config = STEPCONFIG_XPP | STEPCONFIG_YNN |
- STEPCHARGE_RFP | STEPCHARGE_RFM |
- STEPCHARGE_INM | STEPCHARGE_INP;
-
- tscadc_writel(ts_dev, REG_CHARGECONFIG, config);
- tscadc_writel(ts_dev, REG_CHARGEDELAY, STEPCHARGE_DELAY);
-
- config = 0;
- /* Configure to calculate pressure */
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YPP |
- STEPCONFIG_XNN | STEPCONFIG_INM;
- tscadc_writel(ts_dev, REG_STEPCONFIG13, config);
- tscadc_writel(ts_dev, REG_STEPDELAY13, STEPCONFIG_OPENDLY);
-
- config |= STEPCONFIG_Z1 | STEPCONFIG_FIFO1;
- tscadc_writel(ts_dev, REG_STEPCONFIG14, config);
- tscadc_writel(ts_dev, REG_STEPDELAY14, STEPCONFIG_OPENDLY);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
-}
-
-static void tscadc_idle_config(struct tscadc *ts_config)
-{
- unsigned int idleconfig;
-
- idleconfig = STEPCONFIG_YNN |
- STEPCONFIG_INM |
- STEPCONFIG_YPN | STEPIDLE_INP;
- tscadc_writel(ts_config, REG_IDLECONFIG, idleconfig);
-}
-
-static void tscadc_read_coordinates(struct tscadc *ts_dev,
- unsigned int *x, unsigned int *y)
-{
- unsigned int fifocount = tscadc_readl(ts_dev, REG_FIFO0CNT);
- unsigned int prev_val_x = ~0, prev_val_y = ~0;
- unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
- unsigned int read, diff;
- unsigned int i;
-
- /*
- * Delta filter is used to remove large variations in sampled
- * values from ADC. The filter tries to predict where the next
- * coordinate could be. This is done by taking a previous
- * coordinate and subtracting it form current one. Further the
- * algorithm compares the difference with that of a present value,
- * if true the value is reported to the sub system.
- */
- for (i = 0; i < fifocount - 1; i++) {
- read = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- diff = abs(read - prev_val_x);
- if (diff < prev_diff_x) {
- prev_diff_x = diff;
- *x = read;
- }
- prev_val_x = read;
-
- read = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
- diff = abs(read - prev_val_y);
- if (diff < prev_diff_y) {
- prev_diff_y = diff;
- *y = read;
- }
- prev_val_y = read;
- }
-}
-
-static irqreturn_t tscadc_irq(int irq, void *dev)
-{
- struct tscadc *ts_dev = dev;
- struct input_dev *input_dev = ts_dev->input;
- unsigned int status, irqclr = 0;
- unsigned int x = 0, y = 0;
- unsigned int z1, z2, z;
- unsigned int fsm;
-
- status = tscadc_readl(ts_dev, REG_IRQSTATUS);
- if (status & IRQENB_FIFO1THRES) {
- tscadc_read_coordinates(ts_dev, &x, &y);
-
- z1 = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- z2 = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
-
- if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
- /*
- * Calculate pressure using formula
- * Resistance(touch) = x plate resistance *
- * x postion/4096 * ((z2 / z1) - 1)
- */
- z = z2 - z1;
- z *= x;
- z *= ts_dev->x_plate_resistance;
- z /= z1;
- z = (z + 2047) >> 12;
-
- if (z <= MAX_12BIT) {
- input_report_abs(input_dev, ABS_X, x);
- input_report_abs(input_dev, ABS_Y, y);
- input_report_abs(input_dev, ABS_PRESSURE, z);
- input_report_key(input_dev, BTN_TOUCH, 1);
- input_sync(input_dev);
- }
- }
- irqclr |= IRQENB_FIFO1THRES;
- }
-
- /*
- * Time for sequencer to settle, to read
- * correct state of the sequencer.
- */
- udelay(SEQ_SETTLE);
-
- status = tscadc_readl(ts_dev, REG_RAWIRQSTATUS);
- if (status & IRQENB_PENUP) {
- /* Pen up event */
- fsm = tscadc_readl(ts_dev, REG_ADCFSM);
- if (fsm == ADCFSM_STEPID) {
- ts_dev->pen_down = false;
- input_report_key(input_dev, BTN_TOUCH, 0);
- input_report_abs(input_dev, ABS_PRESSURE, 0);
- input_sync(input_dev);
- } else {
- ts_dev->pen_down = true;
- }
- irqclr |= IRQENB_PENUP;
- }
-
- tscadc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- /* check pending interrupts */
- tscadc_writel(ts_dev, REG_IRQEOI, 0x0);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
- return IRQ_HANDLED;
-}
-
-/*
- * The functions for inserting/removing driver as a module.
- */
-
-static int __devinit tscadc_probe(struct platform_device *pdev)
-{
- const struct tsc_data *pdata = pdev->dev.platform_data;
- struct resource *res;
- struct tscadc *ts_dev;
- struct input_dev *input_dev;
- struct clk *clk;
- int err;
- int clk_value, ctrl, irq;
-
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform data.\n");
- return -EINVAL;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined.\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq ID is specified.\n");
- return -EINVAL;
- }
-
- /* Allocate memory for device */
- ts_dev = kzalloc(sizeof(struct tscadc), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts_dev || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory.\n");
- err = -ENOMEM;
- goto err_free_mem;
- }
-
- ts_dev->input = input_dev;
- ts_dev->irq = irq;
- ts_dev->wires = pdata->wires;
- ts_dev->x_plate_resistance = pdata->x_plate_resistance;
-
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to reserve registers.\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- ts_dev->tsc_base = ioremap(res->start, resource_size(res));
- if (!ts_dev->tsc_base) {
- dev_err(&pdev->dev, "failed to map registers.\n");
- err = -ENOMEM;
- goto err_release_mem_region;
- }
-
- err = request_irq(ts_dev->irq, tscadc_irq,
- 0, pdev->dev.driver->name, ts_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to allocate irq.\n");
- goto err_unmap_regs;
- }
-
- ts_dev->tsc_ick = clk_get(&pdev->dev, "adc_tsc_ick");
- if (IS_ERR(ts_dev->tsc_ick)) {
- dev_err(&pdev->dev, "failed to get TSC ick\n");
- goto err_free_irq;
- }
- clk_enable(ts_dev->tsc_ick);
-
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get TSC fck\n");
- err = PTR_ERR(clk);
- goto err_disable_clk;
- }
-
- clk_value = clk_get_rate(clk) / ADC_CLK;
- clk_put(clk);
-
- if (clk_value < 7) {
- dev_err(&pdev->dev, "clock input less than min clock requirement\n");
- goto err_disable_clk;
- }
- /* CLKDIV needs to be configured to the value minus 1 */
- tscadc_writel(ts_dev, REG_CLKDIV, clk_value - 1);
-
- /* Enable wake-up of the SoC using touchscreen */
- tscadc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
-
- ctrl = CNTRLREG_STEPCONFIGWRT |
- CNTRLREG_TSCENB |
- CNTRLREG_STEPID;
- switch (ts_dev->wires) {
- case 4:
- ctrl |= CNTRLREG_4WIRE;
- break;
- case 5:
- ctrl |= CNTRLREG_5WIRE;
- break;
- case 8:
- ctrl |= CNTRLREG_8WIRE;
- break;
- }
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- tscadc_idle_config(ts_dev);
- tscadc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
- tscadc_step_config(ts_dev);
- tscadc_writel(ts_dev, REG_FIFO1THR, 6);
-
- ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- input_dev->name = "ti-tsc-adc";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
-
- /* register to the input system */
- err = input_register_device(input_dev);
- if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ts_dev);
- return 0;
-
-err_disable_clk:
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-err_free_irq:
- free_irq(ts_dev->irq, ts_dev);
-err_unmap_regs:
- iounmap(ts_dev->tsc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts_dev);
- return err;
-}
-
-static int __devexit tscadc_remove(struct platform_device *pdev)
-{
- struct tscadc *ts_dev = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(ts_dev->irq, ts_dev);
-
- input_unregister_device(ts_dev->input);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(ts_dev->tsc_base);
- release_mem_region(res->start, resource_size(res));
-
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-
- kfree(ts_dev);
-
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-static struct platform_driver ti_tsc_driver = {
- .probe = tscadc_probe,
- .remove = __devexit_p(tscadc_remove),
- .driver = {
- .name = "tsc",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_tsc_driver);
-
-MODULE_DESCRIPTION("TI touchscreen controller driver");
-MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 368d2c6cf780..acfb87607b87 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -243,7 +243,7 @@ static void tsc_stop(struct input_dev *dev)
clk_disable(ts->clk);
}
-static int __devinit tsc_probe(struct platform_device *pdev)
+static int tsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tsc_data *ts;
@@ -357,7 +357,7 @@ error_res:
return error;
}
-static int __devexit tsc_remove(struct platform_device *pdev)
+static int tsc_remove(struct platform_device *pdev)
{
struct tsc_data *ts = platform_get_drvdata(pdev);
@@ -374,7 +374,7 @@ static int __devexit tsc_remove(struct platform_device *pdev)
static struct platform_driver tsc_driver = {
.probe = tsc_probe,
- .remove = __devexit_p(tsc_remove),
+ .remove = tsc_remove,
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index f7eda3d00fad..820a066c3b8a 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -345,7 +345,7 @@ err0:
return error;
}
-static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+static int tps6507x_ts_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_ts *tsc = tps6507x_dev->ts;
@@ -367,7 +367,7 @@ static struct platform_driver tps6507x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_ts_probe,
- .remove = __devexit_p(tps6507x_ts_remove),
+ .remove = tps6507x_ts_remove,
};
module_platform_driver(tps6507x_ts_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 5ce3fa8ce646..7213e8b07e79 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -555,7 +555,7 @@ static void tsc2005_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
+static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
{
tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
@@ -569,7 +569,7 @@ static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
}
-static int __devinit tsc2005_probe(struct spi_device *spi)
+static int tsc2005_probe(struct spi_device *spi)
{
const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
struct tsc2005 *ts;
@@ -686,7 +686,7 @@ err_free_mem:
return error;
}
-static int __devexit tsc2005_remove(struct spi_device *spi)
+static int tsc2005_remove(struct spi_device *spi)
{
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -745,7 +745,7 @@ static struct spi_driver tsc2005_driver = {
.pm = &tsc2005_pm_ops,
},
.probe = tsc2005_probe,
- .remove = __devexit_p(tsc2005_remove),
+ .remove = tsc2005_remove,
};
module_spi_driver(tsc2005_driver);
@@ -753,3 +753,4 @@ module_spi_driver(tsc2005_driver);
MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
MODULE_DESCRIPTION("TSC2005 Touchscreen Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tsc2005");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 1473d2382afd..0b67ba476b4c 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -273,7 +273,7 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-static int __devinit tsc2007_probe(struct i2c_client *client,
+static int tsc2007_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tsc2007 *ts;
@@ -366,7 +366,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
return err;
}
-static int __devexit tsc2007_remove(struct i2c_client *client)
+static int tsc2007_remove(struct i2c_client *client)
{
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
@@ -396,7 +396,7 @@ static struct i2c_driver tsc2007_driver = {
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
- .remove = __devexit_p(tsc2007_remove),
+ .remove = tsc2007_remove,
};
module_i2c_driver(tsc2007_driver);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 46e83ad53f43..1271f97b4079 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -274,7 +274,7 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -318,7 +318,7 @@ static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
return 0;
}
-static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
+static int ucb1400_ts_probe(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
@@ -397,7 +397,7 @@ err:
return error;
}
-static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
+static int ucb1400_ts_remove(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
@@ -442,7 +442,7 @@ static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = __devexit_p(ucb1400_ts_remove),
+ .remove = ucb1400_ts_remove,
.driver = {
.name = "ucb1400_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 9396b21d0e8f..d2ef8f05c66e 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -215,7 +215,7 @@ static void w90p910_close(struct input_dev *dev)
clk_disable(w90p910_ts->clk);
}
-static int __devinit w90x900ts_probe(struct platform_device *pdev)
+static int w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
@@ -301,7 +301,7 @@ fail1: input_free_device(input_dev);
return err;
}
-static int __devexit w90x900ts_remove(struct platform_device *pdev)
+static int w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
@@ -325,7 +325,7 @@ static int __devexit w90x900ts_remove(struct platform_device *pdev)
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
- .remove = __devexit_p(w90x900ts_remove),
+ .remove = w90x900ts_remove,
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 0c01657132fd..bf0d07620bac 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -144,7 +144,7 @@ static void wacom_i2c_close(struct input_dev *dev)
disable_irq(client->irq);
}
-static int __devinit wacom_i2c_probe(struct i2c_client *client,
+static int wacom_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wacom_i2c *wac_i2c;
@@ -225,7 +225,7 @@ err_free_mem:
return error;
}
-static int __devexit wacom_i2c_remove(struct i2c_client *client)
+static int wacom_i2c_remove(struct i2c_client *client)
{
struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
@@ -272,7 +272,7 @@ static struct i2c_driver wacom_i2c_driver = {
},
.probe = wacom_i2c_probe,
- .remove = __devexit_p(wacom_i2c_remove),
+ .remove = wacom_i2c_remove,
.id_table = wacom_i2c_id,
};
module_i2c_driver(wacom_i2c_driver);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 52abb98a8ae5..6be2eb6a153a 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -233,7 +233,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
}
}
-static __devinit int wm831x_ts_probe(struct platform_device *pdev)
+static int wm831x_ts_probe(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts;
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -245,8 +245,9 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
if (core_pdata)
pdata = core_pdata->touch;
- wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
- input_dev = input_allocate_device();
+ wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&pdev->dev);
if (!wm831x_ts || !input_dev) {
error = -ENOMEM;
goto err_alloc;
@@ -375,22 +376,17 @@ err_pd_irq:
err_data_irq:
free_irq(wm831x_ts->data_irq, wm831x_ts);
err_alloc:
- input_free_device(input_dev);
- kfree(wm831x_ts);
return error;
}
-static __devexit int wm831x_ts_remove(struct platform_device *pdev)
+static int wm831x_ts_remove(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
- input_unregister_device(wm831x_ts->input_dev);
- kfree(wm831x_ts);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -400,7 +396,7 @@ static struct platform_driver wm831x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = wm831x_ts_probe,
- .remove = __devexit_p(wm831x_ts_remove),
+ .remove = wm831x_ts_remove,
};
module_platform_driver(wm831x_ts_driver);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e39f9dbf297b..01068987809d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -65,8 +65,8 @@ config AMD_IOMMU_STATS
If unsure, say N.
config AMD_IOMMU_V2
- tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
- depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+ tristate "AMD IOMMU Version 2 driver"
+ depends on AMD_IOMMU && PROFILING
select MMU_NOTIFIER
---help---
This option enables support for the AMD IOMMUv2 features of the IOMMU
@@ -119,8 +119,8 @@ config INTEL_IOMMU_FLOPPY_WA
16MiB to make floppy (an ISA device) work.
config IRQ_REMAP
- bool "Support for Interrupt Remapping (EXPERIMENTAL)"
- depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+ bool "Support for Interrupt Remapping"
+ depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
select DMAR_TABLE
---help---
Supports Interrupt remapping for IO-APIC and MSI devices.
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20eb..d33eaaf783ad 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
+ * 512GB Pages are not supported due to a hardware bug
*/
-#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
list_del(&dev_data->dev_data_list);
spin_unlock_irqrestore(&dev_data_list_lock, flags);
+ if (dev_data->group)
+ iommu_group_put(dev_data->group);
+
kfree(dev_data);
}
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
*from = to;
}
-#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
-
-static int iommu_init_device(struct device *dev)
+static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
- struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
- struct iommu_dev_data *dev_data;
- struct iommu_group *group;
- u16 alias;
- int ret;
-
- if (dev->archdata.iommu)
- return 0;
-
- dev_data = find_dev_data(get_device_id(dev));
- if (!dev_data)
- return -ENOMEM;
-
- alias = amd_iommu_alias_table[dev_data->devid];
- if (alias != dev_data->devid) {
- struct iommu_dev_data *alias_data;
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ return ERR_PTR(-ENODEV);
+ }
- alias_data = find_dev_data(alias);
- if (alias_data == NULL) {
- pr_err("AMD-Vi: Warning: Unhandled device %s\n",
- dev_name(dev));
- free_dev_data(dev_data);
- return -ENOTSUPP;
- }
- dev_data->alias_data = alias_data;
+ return bus;
+}
- dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
- }
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
- if (dma_pdev == NULL)
- dma_pdev = pci_dev_get(pdev);
+static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
+{
+ struct pci_dev *dma_pdev = pdev;
/* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
* Finding the next device may require skipping virtual buses.
*/
while (!pci_is_root_bus(dma_pdev->bus)) {
- struct pci_bus *bus = dma_pdev->bus;
-
- while (!bus->self) {
- if (!pci_is_root_bus(bus))
- bus = bus->parent;
- else
- goto root_bus;
- }
+ struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
+ if (IS_ERR(bus))
+ break;
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
-root_bus:
- group = iommu_group_get(&dma_pdev->dev);
- pci_dev_put(dma_pdev);
+ return dma_pdev;
+}
+
+static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(&pdev->dev);
+ int ret;
+
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
+
+ WARN_ON(&pdev->dev != dev);
}
ret = iommu_group_add_device(group, dev);
-
iommu_group_put(group);
+ return ret;
+}
+
+static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
+ struct device *dev)
+{
+ if (!dev_data->group) {
+ struct iommu_group *group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ dev_data->group = group;
+ }
+
+ return iommu_group_add_device(dev_data->group, dev);
+}
+
+static int init_iommu_group(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_group *group;
+ struct pci_dev *dma_pdev;
+ int ret;
+
+ group = iommu_group_get(dev);
+ if (group) {
+ iommu_group_put(group);
+ return 0;
+ }
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ if (dev_data->alias_data) {
+ u16 alias;
+ struct pci_bus *bus;
+
+ if (dev_data->alias_data->group)
+ goto use_group;
+
+ /*
+ * If the alias device exists, it's effectively just a first
+ * level quirk for finding the DMA source.
+ */
+ alias = amd_iommu_alias_table[dev_data->devid];
+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
+ if (dma_pdev) {
+ dma_pdev = get_isolation_root(dma_pdev);
+ goto use_pdev;
+ }
+
+ /*
+ * If the alias is virtual, try to find a parent device
+ * and test whether the IOMMU group is actualy rooted above
+ * the alias. Be careful to also test the parent device if
+ * we think the alias is the root of the group.
+ */
+ bus = pci_find_bus(0, alias >> 8);
+ if (!bus)
+ goto use_group;
+
+ bus = find_hosted_bus(bus);
+ if (IS_ERR(bus) || !bus->self)
+ goto use_group;
+
+ dma_pdev = get_isolation_root(pci_dev_get(bus->self));
+ if (dma_pdev != bus->self || (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
+ goto use_pdev;
+
+ pci_dev_put(dma_pdev);
+ goto use_group;
+ }
+
+ dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
+use_pdev:
+ ret = use_pdev_iommu_group(dma_pdev, dev);
+ pci_dev_put(dma_pdev);
+ return ret;
+use_group:
+ return use_dev_data_iommu_group(dev_data->alias_data, dev);
+}
+
+static int iommu_init_device(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iommu_dev_data *dev_data;
+ u16 alias;
+ int ret;
+
+ if (dev->archdata.iommu)
+ return 0;
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ struct iommu_dev_data *alias_data;
+
+ alias_data = find_dev_data(alias);
+ if (alias_data == NULL) {
+ pr_err("AMD-Vi: Warning: Unhandled device %s\n",
+ dev_name(dev));
+ free_dev_data(dev_data);
+ return -ENOTSUPP;
+ }
+ dev_data->alias_data = alias_data;
+ }
+ ret = init_iommu_group(dev);
if (ret)
return ret;
@@ -3927,10 +4017,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
index -= count - 1;
+ cfg->remapped = 1;
irte_info = &cfg->irq_2_iommu;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
goto out;
}
@@ -4037,9 +4127,9 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
index = attr->ioapic_pin;
/* Setup IRQ remapping info */
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
/* Setup IRTE for IOMMU */
irte.val = 0;
@@ -4198,9 +4288,9 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
devid = get_device_id(&pdev->dev);
irte_info = &cfg->irq_2_iommu;
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index + offset;
- irte_info->iommu = (void *)cfg;
return 0;
}
@@ -4224,9 +4314,9 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
if (index < 0)
return index;
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
return 0;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 81837b0710a9..faf10ba1ed9a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -975,6 +975,38 @@ static void __init free_iommu_all(void)
}
/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ * BIOS should disable L2B micellaneous clock gating by setting
+ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x10) ||
+ (boot_cpu_data.x86_model > 0x1f))
+ return;
+
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+ pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+ if (value & BIT(2))
+ return;
+
+ /* Select NB indirect register 0x90 and enable writing */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+
+ /* Clear the enable writing bit */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
+ amd_iommu_erratum_746_workaround(iommu);
+
return pci_enable_device(iommu->dev);
}
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff0..e38ab438bb34 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reference count */
+ struct iommu_group *group; /* IOMMU group for virtual aliases */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 86e2f4a62b9a..174bb654453d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -41,6 +41,8 @@
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
+#include "irq_remapping.h"
+
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
* these units are not supported by the architecture.
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0badfa48b32b..43d5c8b8e7ad 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -46,6 +46,8 @@
#include <asm/cacheflush.h>
#include <asm/iommu.h>
+#include "irq_remapping.h"
+
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -1827,10 +1829,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (!pte)
return -ENOMEM;
/* It is large page*/
- if (largepage_lvl > 1)
+ if (largepage_lvl > 1) {
pteval |= DMA_PTE_LARGE_PAGE;
- else
+ /* Ensure that old small page tables are removed to make room
+ for superpage, if they exist. */
+ dma_pte_clear_range(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ dma_pte_free_pagetable(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ } else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ }
}
/* We don't need lock here, nobody else
@@ -2320,8 +2329,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static bool device_has_rmrr(struct pci_dev *dev)
+{
+ struct dmar_rmrr_unit *rmrr;
+ int i;
+
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ /*
+ * Return TRUE if this RMRR contains the device that
+ * is passed in.
+ */
+ if (rmrr->devices[i] == dev)
+ return true;
+ }
+ }
+ return false;
+}
+
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
+
+ /*
+ * We want to prevent any device associated with an RMRR from
+ * getting placed into the SI Domain. This is done because
+ * problems exist when devices are moved in and out of domains
+ * and their respective RMRR info is lost. We exempt USB devices
+ * from this process due to their usage of RMRRs that are known
+ * to not be needed after BIOS hand-off to OS.
+ */
+ if (device_has_rmrr(pdev) &&
+ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
+ return 0;
+
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return 1;
@@ -4196,7 +4236,22 @@ static struct iommu_ops intel_iommu_ops = {
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
-static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+{
+ /* G4x/GM45 integrated gfx dmar support is totally busted. */
+ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+ dmar_map_gfx = 0;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+
+static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
@@ -4204,12 +4259,6 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
-
- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
- if (dev->revision == 0x07) {
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
@@ -4224,7 +4273,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
-static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{
unsigned short ggc;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index af8904de1d44..f3b8f23b5d8f 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -68,6 +68,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
@@ -115,6 +116,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
for (i = index; i < index + count; i++)
table->base[i].present = 1;
+ cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
@@ -155,6 +157,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned long flags;
if (!irq_iommu)
@@ -162,6 +165,7 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
@@ -425,11 +429,22 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
+ iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /*
+ * With CFI clear in the Global Command register, we should be
+ * protected from dangerous (i.e. compatibility) interrupts
+ * regardless of x2apic status. Check just to be sure.
+ */
+ if (sts & DMA_GSTS_CFIS)
+ WARN(1, KERN_WARNING
+ "Compatibility-format IRQs enabled despite intr remapping;\n"
+ "you are vulnerable to IRQ injection.\n");
+
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
@@ -526,20 +541,24 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
+ bool x2apic_present;
int setup = 0;
int eim = 0;
+ x2apic_present = x2apic_supported();
+
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
- return -1;
+ goto error;
}
- if (x2apic_supported()) {
+ if (x2apic_present) {
eim = !dmar_x2apic_optout();
- WARN(!eim, KERN_WARNING
- "Your BIOS is broken and requested that x2apic be disabled\n"
- "This will leave your machine vulnerable to irq-injection attacks\n"
- "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
+ if (!eim)
+ printk(KERN_WARNING
+ "Your BIOS is broken and requested that x2apic be disabled.\n"
+ "This will slightly decrease performance.\n"
+ "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
for_each_drhd_unit(drhd) {
@@ -578,7 +597,7 @@ static int __init intel_enable_irq_remapping(void)
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
- return -1;
+ goto error;
}
}
@@ -594,7 +613,7 @@ static int __init intel_enable_irq_remapping(void)
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
" invalidation, ecap %Lx, ret %d\n",
drhd->reg_base_addr, iommu->ecap, ret);
- return -1;
+ goto error;
}
}
@@ -617,6 +636,14 @@ static int __init intel_enable_irq_remapping(void)
goto error;
irq_remapping_enabled = 1;
+
+ /*
+ * VT-d has a different layout for IO-APIC entries when
+ * interrupt remapping is enabled. So it needs a special routine
+ * to print IO-APIC entries for debugging purposes too.
+ */
+ x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
+
pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
@@ -625,6 +652,11 @@ error:
/*
* handle error condition gracefully here!
*/
+
+ if (x2apic_present)
+ WARN(1, KERN_WARNING
+ "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
+
return -1;
}
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index faf85d6e33fe..d56f8c17c5fe 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -1,11 +1,18 @@
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/msi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
#include <asm/hw_irq.h>
#include <asm/irq_remapping.h>
+#include <asm/processor.h>
+#include <asm/x86_init.h>
+#include <asm/apic.h>
#include "irq_remapping.h"
@@ -17,6 +24,152 @@ int no_x2apic_optout;
static struct irq_remap_ops *remap_ops;
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+ int index, int sub_handle);
+static int set_remapped_irq_affinity(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
+
+static bool irq_remapped(struct irq_cfg *cfg)
+{
+ return (cfg->remapped == 1);
+}
+
+static void irq_remapping_disable_io_apic(void)
+{
+ /*
+ * With interrupt-remapping, for now we will use virtual wire A
+ * mode, as virtual wire B is little complex (need to configure
+ * both IOAPIC RTE as well as interrupt-remapping table entry).
+ * As this gets called during crash dump, keep this simple for
+ * now.
+ */
+ if (cpu_has_apic || apic_from_smp_config())
+ disconnect_bsp_APIC(0);
+}
+
+static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
+{
+ int node, ret, sub_handle, index = 0;
+ unsigned int irq;
+ struct msi_desc *msidesc;
+
+ nvec = __roundup_pow_of_two(nvec);
+
+ WARN_ON(!list_is_singular(&dev->msi_list));
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ WARN_ON(msidesc->irq);
+ WARN_ON(msidesc->msi_attrib.multiple);
+
+ node = dev_to_node(&dev->dev);
+ irq = __create_irqs(get_nr_irqs_gsi(), nvec, node);
+ if (irq == 0)
+ return -ENOSPC;
+
+ msidesc->msi_attrib.multiple = ilog2(nvec);
+ for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
+ if (!sub_handle) {
+ index = msi_alloc_remapped_irq(dev, irq, nvec);
+ if (index < 0) {
+ ret = index;
+ goto error;
+ }
+ } else {
+ ret = msi_setup_remapped_irq(dev, irq + sub_handle,
+ index, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ return 0;
+
+error:
+ destroy_irqs(irq, nvec);
+
+ /*
+ * Restore altered MSI descriptor fields and prevent just destroyed
+ * IRQs from tearing down again in default_teardown_msi_irqs()
+ */
+ msidesc->irq = 0;
+ msidesc->msi_attrib.multiple = 0;
+
+ return ret;
+}
+
+static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
+{
+ int node, ret, sub_handle, index = 0;
+ struct msi_desc *msidesc;
+ unsigned int irq;
+
+ node = dev_to_node(&dev->dev);
+ irq = get_nr_irqs_gsi();
+ sub_handle = 0;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+
+ irq = create_irq_nr(irq, node);
+ if (irq == 0)
+ return -1;
+
+ if (sub_handle == 0)
+ ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
+ else
+ ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
+
+ if (ret < 0)
+ goto error;
+
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
+ if (ret < 0)
+ goto error;
+
+ sub_handle += 1;
+ irq += 1;
+ }
+
+ return 0;
+
+error:
+ destroy_irq(irq);
+ return ret;
+}
+
+static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
+ int nvec, int type)
+{
+ if (type == PCI_CAP_ID_MSI)
+ return do_setup_msi_irqs(dev, nvec);
+ else
+ return do_setup_msix_irqs(dev, nvec);
+}
+
+void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+{
+ /*
+ * Intr-remapping uses pin number as the virtual vector
+ * in the RTE. Actual vector is programmed in
+ * intr-remapping table entry. Hence for the io-apic
+ * EOI we use the pin number.
+ */
+ io_apic_eoi(apic, pin);
+}
+
+static void __init irq_remapping_modify_x86_ops(void)
+{
+ x86_io_apic_ops.disable = irq_remapping_disable_io_apic;
+ x86_io_apic_ops.set_affinity = set_remapped_irq_affinity;
+ x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry;
+ x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped;
+ x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs;
+ x86_msi.setup_hpet_msi = setup_hpet_msi_remapped;
+ x86_msi.compose_msi_msg = compose_remapped_msi_msg;
+}
+
static __init int setup_nointremap(char *str)
{
disable_irq_remap = 1;
@@ -79,15 +232,24 @@ int __init irq_remapping_prepare(void)
int __init irq_remapping_enable(void)
{
+ int ret;
+
if (!remap_ops || !remap_ops->enable)
return -ENODEV;
- return remap_ops->enable();
+ ret = remap_ops->enable();
+
+ if (irq_remapping_enabled)
+ irq_remapping_modify_x86_ops();
+
+ return ret;
}
void irq_remapping_disable(void)
{
- if (!remap_ops || !remap_ops->disable)
+ if (!irq_remapping_enabled ||
+ !remap_ops ||
+ !remap_ops->disable)
return;
remap_ops->disable();
@@ -95,7 +257,9 @@ void irq_remapping_disable(void)
int irq_remapping_reenable(int mode)
{
- if (!remap_ops || !remap_ops->reenable)
+ if (!irq_remapping_enabled ||
+ !remap_ops ||
+ !remap_ops->reenable)
return 0;
return remap_ops->reenable(mode);
@@ -103,6 +267,9 @@ int irq_remapping_reenable(int mode)
int __init irq_remap_enable_fault_handling(void)
{
+ if (!irq_remapping_enabled)
+ return 0;
+
if (!remap_ops || !remap_ops->enable_faulting)
return -ENODEV;
@@ -133,23 +300,28 @@ int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
void free_remapped_irq(int irq)
{
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
+
if (!remap_ops || !remap_ops->free_irq)
return;
- remap_ops->free_irq(irq);
+ if (irq_remapped(cfg))
+ remap_ops->free_irq(irq);
}
void compose_remapped_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- if (!remap_ops || !remap_ops->compose_msi_msg)
- return;
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
- remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+ if (!irq_remapped(cfg))
+ native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+ else if (remap_ops && remap_ops->compose_msi_msg)
+ remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
}
-int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
{
if (!remap_ops || !remap_ops->msi_alloc_irq)
return -ENODEV;
@@ -157,8 +329,8 @@ int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
return remap_ops->msi_alloc_irq(pdev, irq, nvec);
}
-int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
- int index, int sub_handle)
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+ int index, int sub_handle)
{
if (!remap_ops || !remap_ops->msi_setup_irq)
return -ENODEV;
@@ -173,3 +345,42 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
return remap_ops->setup_hpet_msi(irq, id);
}
+
+void panic_if_irq_remap(const char *msg)
+{
+ if (irq_remapping_enabled)
+ panic(msg);
+}
+
+static void ir_ack_apic_edge(struct irq_data *data)
+{
+ ack_APIC_irq();
+}
+
+static void ir_ack_apic_level(struct irq_data *data)
+{
+ ack_APIC_irq();
+ eoi_ioapic_irq(data->irq, data->chip_data);
+}
+
+static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+{
+ seq_printf(p, " IR-%s", data->chip->name);
+}
+
+void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+ chip->irq_print_chip = ir_print_prefix;
+ chip->irq_ack = ir_ack_apic_edge;
+ chip->irq_eoi = ir_ack_apic_level;
+ chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+}
+
+bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
+{
+ if (!irq_remapped(cfg))
+ return false;
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ irq_remap_modify_chip_defaults(chip);
+ return true;
+}
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 95363acb583f..ecb637670405 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -34,6 +34,7 @@ struct msi_msg;
extern int disable_irq_remap;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
+extern int irq_remapping_enabled;
struct irq_remap_ops {
/* Check whether Interrupt Remapping is supported */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index badc17c2bcb4..d33c980e9c20 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,13 +16,13 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/iommu.h>
#include <linux/omap-iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <asm/cacheflush.h>
@@ -143,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj)
+ if (!obj || !pdata)
return -EINVAL;
if (!arch_iommu)
return -ENODEV;
- clk_enable(obj->clk);
+ if (pdata->deassert_reset) {
+ err = pdata->deassert_reset(pdev, pdata->reset_name);
+ if (err) {
+ dev_err(obj->dev, "deassert_reset failed: %d\n", err);
+ return err;
+ }
+ }
+
+ pm_runtime_get_sync(obj->dev);
err = arch_iommu->enable(obj);
- clk_disable(obj->clk);
return err;
}
static void iommu_disable(struct omap_iommu *obj)
{
- if (!obj)
- return;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- clk_enable(obj->clk);
+ if (!obj || !pdata)
+ return;
arch_iommu->disable(obj);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
+
+ if (pdata->assert_reset)
+ pdata->assert_reset(pdev, pdata->reset_name);
}
/*
@@ -290,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
@@ -320,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return PTR_ERR(cr);
}
@@ -334,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return err;
}
@@ -364,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
int i;
struct cr_regs cr;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
@@ -383,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -397,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
{
struct iotlb_lock l;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
l.base = 0;
l.vict = 0;
@@ -405,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
iommu_write_reg(obj, 1, MMU_GFLUSH);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
}
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -415,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
if (!obj || !buf)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return bytes;
}
@@ -433,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
struct cr_regs tmp;
struct cr_regs *p = crs;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
@@ -443,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
iotlb_lock_set(obj, &saved);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return p - crs;
}
@@ -807,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!obj->refcount)
return IRQ_NONE;
- clk_enable(obj->clk);
errs = iommu_report_fault(obj, &da);
- clk_disable(obj->clk);
if (errs == 0)
return IRQ_HANDLED;
@@ -923,7 +934,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
/*
* OMAP Device MMU(IOMMU) detection
*/
-static int __devinit omap_iommu_probe(struct platform_device *pdev)
+static int omap_iommu_probe(struct platform_device *pdev)
{
int err = -ENODEV;
int irq;
@@ -931,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (pdev->num_resources != 2)
- return -EINVAL;
-
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
@@ -984,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
goto err_irq;
platform_set_drvdata(pdev, obj);
+ pm_runtime_irq_safe(obj->dev);
+ pm_runtime_enable(obj->dev);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
@@ -992,13 +999,11 @@ err_irq:
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_mem:
- clk_put(obj->clk);
-err_clk:
kfree(obj);
return err;
}
-static int __devexit omap_iommu_remove(struct platform_device *pdev)
+static int omap_iommu_remove(struct platform_device *pdev)
{
int irq;
struct resource *res;
@@ -1014,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
iounmap(obj->regbase);
- clk_put(obj->clk);
+ pm_runtime_disable(obj->dev);
+
dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0;
@@ -1022,7 +1028,7 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
- .remove = __devexit_p(omap_iommu_remove),
+ .remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
},
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 2b5f3c04d167..120084206602 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,7 +29,6 @@ struct iotlb_entry {
struct omap_iommu {
const char *name;
struct module *owner;
- struct clk *clk;
void __iomem *regbase;
struct device *dev;
void *isr_priv;
@@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
* MMU Register offsets
*/
#define MMU_REVISION 0x00
-#define MMU_SYSCONFIG 0x10
-#define MMU_SYSSTATUS 0x14
#define MMU_IRQSTATUS 0x18
#define MMU_IRQENABLE 0x1c
#define MMU_WALKING_ST 0x40
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
index c02020292377..d745094a69dd 100644
--- a/drivers/iommu/omap-iommu2.c
+++ b/drivers/iommu/omap-iommu2.c
@@ -28,19 +28,6 @@
*/
#define IOMMU_ARCH_VERSION 0x00000011
-/* SYSCONF */
-#define MMU_SYS_IDLE_SHIFT 3
-#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
-
-#define MMU_SYS_SOFTRESET (1 << 1)
-#define MMU_SYS_AUTOIDLE 1
-
-/* SYSSTATUS */
-#define MMU_SYS_RESETDONE 1
-
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
@@ -97,7 +84,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on)
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
- unsigned long timeout;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
@@ -106,29 +92,10 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
- iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
-
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- l = iommu_read_reg(obj, MMU_SYSSTATUS);
- if (l & MMU_SYS_RESETDONE)
- break;
- } while (!time_after(jiffies, timeout));
-
- if (!(l & MMU_SYS_RESETDONE)) {
- dev_err(obj->dev, "can't take mmu out of reset\n");
- return -ENODEV;
- }
-
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
- l = iommu_read_reg(obj, MMU_SYSCONFIG);
- l &= ~MMU_SYS_IDLE_MASK;
- l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
- iommu_write_reg(obj, l, MMU_SYSCONFIG);
-
iommu_write_reg(obj, pa, MMU_TTB);
__iommu_set_twl(obj, true);
@@ -142,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
- iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
@@ -271,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
char *p = buf;
pr_reg(REVISION);
- pr_reg(SYSCONFIG);
- pr_reg(SYSSTATUS);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4bd..8219f1d596ee 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
+ bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0;
fail:
@@ -430,7 +431,7 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
};
#ifdef CONFIG_OF
-static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+static struct of_device_id tegra_gart_of_match[] = {
{ .compatible = "nvidia,tegra20-gart", },
{ },
};
@@ -448,9 +449,8 @@ static struct platform_driver tegra_gart_driver = {
},
};
-static int __devinit tegra_gart_init(void)
+static int tegra_gart_init(void)
{
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return platform_driver_register(&tegra_gart_driver);
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4252d743963d..f08dbcd2f175 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "%s(): " fmt, __func__
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -694,10 +695,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
*pte = _PTE_VACANT(iova);
FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
- if (!--(*count)) {
+ if (!--(*count))
free_ptbl(as, iova);
- smmu_flush_regs(as->smmu, 0);
- }
}
static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1178,9 +1177,9 @@ static int tegra_smmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!smmu->regs[i])
- return -EBUSY;
+ smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(smmu->regs[i]))
+ return PTR_ERR(smmu->regs[i]);
}
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
@@ -1232,6 +1231,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
smmu_debugfs_create(smmu);
smmu_handle = smmu;
+ bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return 0;
}
@@ -1256,7 +1256,7 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
};
#ifdef CONFIG_OF
-static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
+static struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
@@ -1274,9 +1274,8 @@ static struct platform_driver tegra_smmu_driver = {
},
};
-static int __devinit tegra_smmu_init(void)
+static int tegra_smmu_init(void)
{
- bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return platform_driver_register(&tegra_smmu_driver);
}
diff --git a/drivers/ipack/devices/Kconfig b/drivers/ipack/devices/Kconfig
index 0b82fdc198c0..907a8cb48f2a 100644
--- a/drivers/ipack/devices/Kconfig
+++ b/drivers/ipack/devices/Kconfig
@@ -1,6 +1,6 @@
config SERIAL_IPOCTAL
tristate "IndustryPack IP-OCTAL uart support"
- depends on IPACK_BUS
+ depends on IPACK_BUS && TTY
help
This driver supports the IPOCTAL serial port device for the IndustryPack bus.
default n
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 576d53d92677..141094e7c06e 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -20,7 +20,6 @@
#include <linux/serial.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
-#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ipack.h>
#include "ipoctal.h"
@@ -38,21 +37,19 @@ struct ipoctal_channel {
spinlock_t lock;
unsigned int pointer_read;
unsigned int pointer_write;
- atomic_t open;
struct tty_port tty_port;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
- unsigned char *board_write;
u8 isr_rx_rdy_mask;
u8 isr_tx_rdy_mask;
+ unsigned int rx_enable;
};
struct ipoctal {
struct ipack_device *dev;
unsigned int board_id;
struct ipoctal_channel channel[NR_CHANNELS];
- unsigned char write;
struct tty_driver *tty_drv;
u8 __iomem *mem8_space;
u8 __iomem *int_space;
@@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
channel = dev_get_drvdata(tty->dev);
+ /*
+ * Enable RX. TX will be enabled when
+ * there is something to send
+ */
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
return 0;
}
static int ipoctal_open(struct tty_struct *tty, struct file *file)
{
- int res;
struct ipoctal_channel *channel;
channel = dev_get_drvdata(tty->dev);
-
- if (atomic_read(&channel->open))
- return -EBUSY;
-
tty->driver_data = channel;
- res = tty_port_open(&channel->tty_port, tty, file);
- if (res)
- return res;
-
- atomic_inc(&channel->open);
- return 0;
+ return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
struct ipoctal_channel *channel = tty->driver_data;
tty_port_close(&channel->tty_port, tty, filp);
-
- if (atomic_dec_and_test(&channel->open))
- ipoctal_free_channel(channel);
+ ipoctal_free_channel(channel);
}
static int ipoctal_get_icount(struct tty_struct *tty,
@@ -133,15 +123,16 @@ static int ipoctal_get_icount(struct tty_struct *tty,
return 0;
}
-static void ipoctal_irq_rx(struct ipoctal_channel *channel,
- struct tty_struct *tty, u8 sr)
+static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
{
+ struct tty_port *port = &channel->tty_port;
unsigned char value;
- unsigned char flag = TTY_NORMAL;
+ unsigned char flag;
u8 isr;
do {
value = ioread8(&channel->regs->r.rhr);
+ flag = TTY_NORMAL;
/* Error: count statistics */
if (sr & SR_ERROR) {
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -149,7 +140,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
if (sr & SR_OVERRUN_ERROR) {
channel->stats.overrun_err++;
/* Overrun doesn't affect the current character*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (sr & SR_PARITY_ERROR) {
channel->stats.parity_err++;
@@ -165,7 +156,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
flag = TTY_BREAK;
}
}
- tty_insert_flip_char(tty, value, flag);
+ tty_insert_flip_char(port, value, flag);
/* Check if there are more characters in RX FIFO
* If there are more, the isr register for this channel
@@ -175,7 +166,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
sr = ioread8(&channel->regs->r.sr);
} while (isr & channel->isr_rx_rdy_mask);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void ipoctal_irq_tx(struct ipoctal_channel *channel)
@@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
unsigned char value;
unsigned int *pointer_write = &channel->pointer_write;
- if (channel->nb_bytes <= 0) {
- channel->nb_bytes = 0;
+ if (channel->nb_bytes == 0)
return;
- }
value = channel->tty_port.xmit_buf[*pointer_write];
iowrite8(value, &channel->regs->w.thr);
@@ -194,55 +183,38 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
(*pointer_write)++;
*pointer_write = *pointer_write % PAGE_SIZE;
channel->nb_bytes--;
-
- if ((channel->nb_bytes == 0) &&
- (waitqueue_active(&channel->queue))) {
-
- if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
- }
- }
}
static void ipoctal_irq_channel(struct ipoctal_channel *channel)
{
u8 isr, sr;
- struct tty_struct *tty;
- /* If there is no client, skip the check */
- if (!atomic_read(&channel->open))
- return;
-
- tty = tty_port_tty_get(&channel->tty_port);
- if (!tty)
- return;
+ spin_lock(&channel->lock);
/* The HW is organized in pair of channels. See which register we need
* to read from */
isr = ioread8(&channel->block_regs->r.isr);
sr = ioread8(&channel->regs->r.sr);
- /* In case of RS-485, change from TX to RX when finishing TX.
- * Half-duplex. */
- if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
- (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
+ if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
- iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
+ /* In case of RS-485, change from TX to RX when finishing TX.
+ * Half-duplex. */
+ if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
+ iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
+ }
}
/* RX data */
if ((isr & channel->isr_rx_rdy_mask) && (sr & SR_RX_READY))
- ipoctal_irq_rx(channel, tty, sr);
+ ipoctal_irq_rx(channel, sr);
/* TX of each character */
if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
ipoctal_irq_tx(channel);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ spin_unlock(&channel->lock);
}
static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -250,14 +222,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
unsigned int i;
struct ipoctal *ipoctal = (struct ipoctal *) arg;
- /* Check all channels */
- for (i = 0; i < NR_CHANNELS; i++)
- ipoctal_irq_channel(&ipoctal->channel[i]);
-
/* Clear the IPack device interrupt */
readw(ipoctal->int_space + ACK_INT_REQ0);
readw(ipoctal->int_space + ACK_INT_REQ1);
+ /* Check all channels */
+ for (i = 0; i < NR_CHANNELS; i++)
+ ipoctal_irq_channel(&ipoctal->channel[i]);
+
return IRQ_HANDLED;
}
@@ -311,7 +283,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->mem8_space =
devm_ioremap_nocache(&ipoctal->dev->dev,
region->start, 0x8000);
- if (!addr) {
+ if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] MEM8 space!\n",
bus_nr, slot);
@@ -324,7 +296,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
struct ipoctal_channel *channel = &ipoctal->channel[i];
channel->regs = chan_regs + i;
channel->block_regs = block_regs + (i >> 1);
- channel->board_write = &ipoctal->write;
channel->board_id = ipoctal->board_id;
if (i & 1) {
channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -335,6 +306,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
}
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -407,8 +379,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_reset_stats(&channel->stats);
channel->nb_bytes = 0;
- init_waitqueue_head(&channel->queue);
-
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
@@ -419,12 +389,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
continue;
}
dev_set_drvdata(tty_dev, channel);
-
- /*
- * Enable again the RX. TX will be enabled when
- * there is something to send
- */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
return 0;
@@ -464,6 +428,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
/* As the IP-OCTAL 485 only supports half duplex, do it manually */
if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
}
@@ -472,10 +437,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
* operations
*/
iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
- wait_event_interruptible(channel->queue, *channel->board_write);
- iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-
- *channel->board_write = 0;
return char_copied;
}
@@ -627,8 +588,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
iowrite8(mr2, &channel->regs->w.mr);
iowrite8(csr, &channel->regs->w.csr);
- /* Enable again the RX */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ /* Enable again the RX, if it was before */
+ if (channel->rx_enable)
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
static void ipoctal_hangup(struct tty_struct *tty)
@@ -648,6 +610,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -657,6 +620,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
wake_up_interruptible(&channel->tty_port.open_wait);
}
+static void ipoctal_shutdown(struct tty_struct *tty)
+{
+ struct ipoctal_channel *channel = tty->driver_data;
+
+ if (channel == NULL)
+ return;
+
+ iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
+ iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
+ clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+}
+
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
.open = ipoctal_open,
@@ -667,6 +646,7 @@ static const struct tty_operations ipoctal_fops = {
.chars_in_buffer = ipoctal_chars_in_buffer,
.get_icount = ipoctal_get_icount,
.hangup = ipoctal_hangup,
+ .shutdown = ipoctal_shutdown,
};
static int ipoctal_probe(struct ipack_device *dev)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 62ca575701d3..a350969e5efe 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,30 @@
+config IRQCHIP
+ def_bool y
+ depends on OF_IRQ
+
+config ARM_GIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config GIC_NON_BANKED
+ bool
+
+config ARM_VIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config ARM_VIC_NR
+ int
+ default 4 if ARCH_S5PV210
+ default 3 if ARCH_S5PC100
+ default 2
+ depends on ARM_VIC
+ help
+ The maximum number of VICs available in the system, for
+ power management.
+
config VERSATILE_FPGA_IRQ
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 02bd37a6187f..e65fbf2cdf71 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,3 +1,9 @@
-obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
-obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_IRQCHIP) += irqchip.o
+
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
+obj-$(CONFIG_ARM_GIC) += irq-gic.o
+obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
new file mode 100644
index 000000000000..04d86a9803f4
--- /dev/null
+++ b/drivers/irqchip/exynos-combiner.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Combiner irqchip for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/mach/irq.h>
+
+#include <plat/cpu.h>
+
+#include "irqchip.h"
+
+#define COMBINER_ENABLE_SET 0x0
+#define COMBINER_ENABLE_CLEAR 0x4
+#define COMBINER_INT_STATUS 0xC
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+ unsigned int irq_offset;
+ unsigned int irq_mask;
+ void __iomem *base;
+};
+
+static struct irq_domain *combiner_irq_domain;
+static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+ struct combiner_chip_data *combiner_data =
+ irq_data_get_irq_chip_data(data);
+
+ return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, combiner_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ spin_lock(&irq_controller_lock);
+ status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ spin_unlock(&irq_controller_lock);
+ status &= chip_data->irq_mask;
+
+ if (status == 0)
+ goto out;
+
+ combiner_irq = __ffs(status);
+
+ cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
+ if (unlikely(cascade_irq >= NR_IRQS))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+};
+
+static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+{
+ unsigned int max_nr;
+
+ if (soc_is_exynos5250())
+ max_nr = EXYNOS5_MAX_COMBINER_NR;
+ else
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+
+ if (combiner_nr >= max_nr)
+ BUG();
+ if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+}
+
+static void __init combiner_init_one(unsigned int combiner_nr,
+ void __iomem *base)
+{
+ combiner_data[combiner_nr].base = base;
+ combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+ combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
+ combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+
+ /* Disable all interrupts */
+ __raw_writel(combiner_data[combiner_nr].irq_mask,
+ base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np)
+{
+ int i, irq, irq_base;
+ unsigned int max_nr, nr_irq;
+
+ if (np) {
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_warning("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, EXYNOS4_MAX_COMBINER_NR);
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+ }
+ } else {
+ max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+ EXYNOS4_MAX_COMBINER_NR;
+ }
+ nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
+
+ irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ irq_base = COMBINER_IRQ(0, 0);
+ pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ }
+
+ combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+ &combiner_irq_domain_ops, &combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warning("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+ irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+ if (np)
+ irq = irq_of_parse_and_map(np, i);
+#endif
+ combiner_cascade_irq(i, irq);
+ }
+}
+
+#ifdef CONFIG_OF
+static int __init combiner_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ combiner_init(combiner_base, np);
+
+ return 0;
+}
+IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
+ combiner_of_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
new file mode 100644
index 000000000000..644d72468423
--- /dev/null
+++ b/drivers/irqchip/irq-gic.c
@@ -0,0 +1,845 @@
+/*
+ * linux/arch/arm/common/gic.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU. The base address of the CPU interface is usually
+ * aliased so that the same address points to different chips depending
+ * on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+union gic_base {
+ void __iomem *common_base;
+ void __percpu __iomem **percpu_base;
+};
+
+struct gic_chip_data {
+ union gic_base dist_base;
+ union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+#endif
+ struct irq_domain *domain;
+ unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+ void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering. Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+ .irq_eoi = NULL,
+ .irq_mask = NULL,
+ .irq_unmask = NULL,
+ .irq_retrigger = NULL,
+ .irq_set_type = NULL,
+ .irq_set_wake = NULL,
+};
+
+#ifndef MAX_GIC_NR
+#define MAX_GIC_NR 1
+#endif
+
+static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+ return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+ return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+ void __iomem *(*f)(union gic_base *))
+{
+ data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d) ((d)->dist_base.common_base)
+#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d,f)
+#endif
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_mask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_eoi) {
+ raw_spin_lock(&irq_controller_lock);
+ gic_arch_extn.irq_eoi(d);
+ raw_spin_unlock(&irq_controller_lock);
+ }
+
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem *base = gic_dist_base(d);
+ unsigned int gicirq = gic_irq(d);
+ u32 enablemask = 1 << (gicirq % 32);
+ u32 enableoff = (gicirq / 32) * 4;
+ u32 confmask = 0x2 << ((gicirq % 16) * 2);
+ u32 confoff = (gicirq / 16) * 4;
+ bool enabled = false;
+ u32 val;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (gicirq < 16)
+ return -EINVAL;
+
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+
+ val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~confmask;
+ else if (type == IRQ_TYPE_EDGE_RISING)
+ val |= confmask;
+
+ /*
+ * As recommended by the spec, disable the interrupt before changing
+ * the configuration
+ */
+ if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+ enabled = true;
+ }
+
+ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ return 0;
+}
+
+static int gic_retrigger(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_retrigger)
+ return gic_arch_extn.irq_retrigger(d);
+
+ return -ENXIO;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+ unsigned int shift = (gic_irq(d) % 4) * 8;
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ u32 val, mask, bit;
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ mask = 0xff << shift;
+ bit = gic_cpu_map[cpu] << shift;
+
+ raw_spin_lock(&irq_controller_lock);
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+ raw_spin_unlock(&irq_controller_lock);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+ int ret = -ENXIO;
+
+ if (gic_arch_extn.irq_set_wake)
+ ret = gic_arch_extn.irq_set_wake(d, on);
+
+ return ret;
+}
+
+#else
+#define gic_set_wake NULL
+#endif
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ struct gic_chip_data *gic = &gic_data[0];
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & ~0x1c00;
+
+ if (likely(irqnr > 15 && irqnr < 1021)) {
+ irqnr = irq_find_mapping(gic->domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ if (irqnr < 16) {
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+ handle_IPI(irqnr, regs);
+#endif
+ continue;
+ }
+ break;
+ } while (1);
+}
+
+static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, gic_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ raw_spin_lock(&irq_controller_lock);
+ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+ raw_spin_unlock(&irq_controller_lock);
+
+ gic_irq = (status & 0x3ff);
+ if (gic_irq == 1023)
+ goto out;
+
+ cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+ if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip gic_chip = {
+ .name = "GIC",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+ .irq_set_wake = gic_set_wake,
+};
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+ if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, gic_handle_cascade_irq);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
+static void __init gic_dist_init(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 cpumask;
+ unsigned int gic_irqs = gic->gic_irqs;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ writel_relaxed(0, base + GIC_DIST_CTRL);
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < gic_irqs; i += 16)
+ writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+ /*
+ * Set priority on all global interrupts.
+ */
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+ /*
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as these enables are banked registers.
+ */
+ for (i = 32; i < gic_irqs; i += 32)
+ writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+
+ writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+
+static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
+{
+ void __iomem *dist_base = gic_data_dist_base(gic);
+ void __iomem *base = gic_data_cpu_base(gic);
+ unsigned int cpu_mask, cpu = smp_processor_id();
+ int i;
+
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_GIC_CPU_IF);
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
+
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+ writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
+ writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic_data[gic_nr].saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic_data[gic_nr].saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(0xa0a0a0a0,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+ /* Skip over unused GICs */
+ if (!gic_data[i].get_base)
+ continue;
+#endif
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER:
+ gic_dist_save(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ gic_dist_restore(i);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_enable);
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_conf);
+
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_percpu_devid_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ } else {
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_fasteoi_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize < 3)
+ return -EINVAL;
+
+ /* Get the interrupt number and add 16 to skip over SGIs */
+ *out_hwirq = intspec[1] + 16;
+
+ /* For SPIs, we need to add 16 more to get the GIC irq ID number */
+ if (!intspec[0])
+ *out_hwirq += 16;
+
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
+};
+
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ void __iomem *dist_base, void __iomem *cpu_base,
+ u32 percpu_offset, struct device_node *node)
+{
+ irq_hw_number_t hwirq_base;
+ struct gic_chip_data *gic;
+ int gic_irqs, irq_base, i;
+
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic = &gic_data[gic_nr];
+#ifdef CONFIG_GIC_NON_BANKED
+ if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ unsigned int cpu;
+
+ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+ if (WARN_ON(!gic->dist_base.percpu_base ||
+ !gic->cpu_base.percpu_base)) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ }
+
+ gic_set_base_accessor(gic, gic_get_percpu_base);
+ } else
+#endif
+ { /* Normal, sane GIC... */
+ WARN(percpu_offset,
+ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+ percpu_offset);
+ gic->dist_base.common_base = dist_base;
+ gic->cpu_base.common_base = cpu_base;
+ gic_set_base_accessor(gic, gic_get_common_base);
+ }
+
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
+
+ /*
+ * For primary GICs, skip over SGIs.
+ * For secondary GICs, skip over PPIs, too.
+ */
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
+ }
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ irq_start);
+ irq_base = irq_start;
+ }
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+ if (WARN_ON(!gic->domain))
+ return;
+
+#ifdef CONFIG_SMP
+ set_smp_cross_call(gic_raise_softirq);
+#endif
+
+ set_handle_irq(gic_handle_irq);
+
+ gic_chip.flags |= gic_arch_extn.flags;
+ gic_dist_init(gic);
+ gic_cpu_init(gic);
+ gic_pm_init(gic);
+}
+
+void __cpuinit gic_secondary_init(unsigned int gic_nr)
+{
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic_cpu_init(&gic_data[gic_nr]);
+}
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata = 0;
+
+int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *cpu_base;
+ void __iomem *dist_base;
+ u32 percpu_offset;
+ int irq;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ dist_base = of_iomap(node, 0);
+ WARN(!dist_base, "unable to map gic dist registers\n");
+
+ cpu_base = of_iomap(node, 1);
+ WARN(!cpu_base, "unable to map gic cpu registers\n");
+
+ if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+ gic_cascade_irq(gic_cnt, irq);
+ }
+ gic_cnt++;
+ return 0;
+}
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+
+#endif
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
new file mode 100644
index 000000000000..3cf97aaebe40
--- /dev/null
+++ b/drivers/irqchip/irq-vic.c
@@ -0,0 +1,489 @@
+/*
+ * linux/arch/arm/common/vic.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/irqchip/arm-vic.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define VIC_IRQ_STATUS 0x00
+#define VIC_FIQ_STATUS 0x04
+#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_SOFT 0x18
+#define VIC_INT_SOFT_CLEAR 0x1c
+#define VIC_PROTECT 0x20
+#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
+#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
+
+#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
+#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
+#define VIC_ITCR 0x300 /* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE (1 << 5)
+
+#define VIC_PL192_VECT_ADDR 0xF00
+
+/**
+ * struct vic_device - VIC PM device
+ * @irq: The IRQ number for the base of the VIC.
+ * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
+ * @resume_sources: A bitmask of interrupts for resume.
+ * @resume_irqs: The IRQs enabled for resume.
+ * @int_select: Save for VIC_INT_SELECT.
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
+ */
+struct vic_device {
+ void __iomem *base;
+ int irq;
+ u32 valid_sources;
+ u32 resume_sources;
+ u32 resume_irqs;
+ u32 int_select;
+ u32 int_enable;
+ u32 soft_int;
+ u32 protect;
+ struct irq_domain *domain;
+};
+
+/* we cannot allocate memory when VICs are initially registered */
+static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+static int vic_id;
+
+static void vic_handle_irq(struct pt_regs *regs);
+
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(VIC_VECT_CNTL_ENABLE | i, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
+
+#ifdef CONFIG_PM
+static void resume_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
+
+ /* re-initialise static settings */
+ vic_init2(base);
+
+ writel(vic->int_select, base + VIC_INT_SELECT);
+ writel(vic->protect, base + VIC_PROTECT);
+
+ /* set the enabled ints and then clear the non-enabled */
+ writel(vic->int_enable, base + VIC_INT_ENABLE);
+ writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
+
+ /* and the same for the soft-int register */
+
+ writel(vic->soft_int, base + VIC_INT_SOFT);
+ writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_resume(void)
+{
+ int id;
+
+ for (id = vic_id - 1; id >= 0; id--)
+ resume_one_vic(vic_devices + id);
+}
+
+static void suspend_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
+
+ vic->int_select = readl(base + VIC_INT_SELECT);
+ vic->int_enable = readl(base + VIC_INT_ENABLE);
+ vic->soft_int = readl(base + VIC_INT_SOFT);
+ vic->protect = readl(base + VIC_PROTECT);
+
+ /* set the interrupts (if any) that are used for
+ * resuming the system */
+
+ writel(vic->resume_irqs, base + VIC_INT_ENABLE);
+ writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static int vic_suspend(void)
+{
+ int id;
+
+ for (id = 0; id < vic_id; id++)
+ suspend_one_vic(vic_devices + id);
+
+ return 0;
+}
+
+struct syscore_ops vic_syscore_ops = {
+ .suspend = vic_suspend,
+ .resume = vic_resume,
+};
+
+/**
+ * vic_pm_init - initicall to register VIC pm
+ *
+ * This is called via late_initcall() to register
+ * the resources for the VICs due to the early
+ * nature of the VIC's registration.
+*/
+static int __init vic_pm_init(void)
+{
+ if (vic_id > 0)
+ register_syscore_ops(&vic_syscore_ops);
+
+ return 0;
+}
+late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct vic_device *v = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(v->valid_sources & (1 << hwirq)))
+ return -ENOTSUPP;
+ irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+ irq_set_chip_data(irq, v->base);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ return 0;
+}
+
+/*
+ * Handle each interrupt in a single VIC. Returns non-zero if we've
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+ u32 stat, irq;
+ int handled = 0;
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ irq = ffs(stat) - 1;
+ handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < vic_id; ++i)
+ handled |= handle_one_vic(&vic_devices[i], regs);
+ } while (handled);
+}
+
+static struct irq_domain_ops vic_irqdomain_ops = {
+ .map = vic_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
+ */
+static void __init vic_register(void __iomem *base, unsigned int irq,
+ u32 valid_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ struct vic_device *v;
+ int i;
+
+ if (vic_id >= ARRAY_SIZE(vic_devices)) {
+ printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+ return;
+ }
+
+ v = &vic_devices[vic_id];
+ v->base = base;
+ v->valid_sources = valid_sources;
+ v->resume_sources = resume_sources;
+ v->irq = irq;
+ set_handle_irq(vic_handle_irq);
+ vic_id++;
+ v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
+ /* create an IRQ mapping for each valid IRQ */
+ for (i = 0; i < fls(valid_sources); i++)
+ if (valid_sources & (1 << i))
+ irq_create_mapping(v->domain, i);
+}
+
+static void vic_ack_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
+static struct vic_device *vic_from_irq(unsigned int irq)
+{
+ struct vic_device *v = vic_devices;
+ unsigned int base_irq = irq & ~31;
+ int id;
+
+ for (id = 0; id < vic_id; id++, v++) {
+ if (v->irq == base_irq)
+ return v;
+ }
+
+ return NULL;
+}
+
+static int vic_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct vic_device *v = vic_from_irq(d->irq);
+ unsigned int off = d->hwirq;
+ u32 bit = 1 << off;
+
+ if (!v)
+ return -EINVAL;
+
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
+ if (on)
+ v->resume_irqs |= bit;
+ else
+ v->resume_irqs &= ~bit;
+
+ return 0;
+}
+#else
+#define vic_set_wake NULL
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .irq_ack = vic_ack_irq,
+ .irq_mask = vic_mask_irq,
+ .irq_unmask = vic_unmask_irq,
+ .irq_set_wake = vic_set_wake,
+};
+
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ * and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, struct device_node *node)
+{
+ unsigned int i;
+ int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /*
+ * Make sure we clear all existing interrupts. The vector registers
+ * in this cell are after the second block of general registers,
+ * so we can address them using standard offsets, but only from
+ * the second base address, which is 0x20 in the page
+ */
+ if (vic_2nd_block) {
+ vic_clear_interrupts(base);
+
+ /* ST has 16 vectors as well, but we don't enable them by now */
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(0, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+ }
+
+ vic_register(base, irq_start, vic_sources, 0, node);
+}
+
+void __init __vic_init(void __iomem *base, int irq_start,
+ u32 vic_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ unsigned int i;
+ u32 cellid = 0;
+ enum amba_vendor vendor;
+
+ /* Identify which VIC cell this one is, by reading the ID */
+ for (i = 0; i < 4; i++) {
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ cellid |= (readl(addr) & 0xff) << (8 * i);
+ }
+ vendor = (cellid >> 12) & 0xff;
+ printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+ base, cellid, vendor);
+
+ switch(vendor) {
+ case AMBA_VENDOR_ST:
+ vic_init_st(base, irq_start, vic_sources, node);
+ return;
+ default:
+ printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+ /* fall through */
+ case AMBA_VENDOR_ARM:
+ break;
+ }
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
+
+ vic_init2(base);
+
+ vic_register(base, irq_start, vic_sources, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources)
+{
+ __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+int __init vic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *regs;
+
+ if (WARN(parent, "non-root VICs are not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ /*
+ * Passing 0 as first IRQ makes the simple domain allocate descriptors
+ */
+ __vic_init(regs, 0, ~0, ~0, node);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
+#endif /* CONFIG OF */
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
new file mode 100644
index 000000000000..f496afce29de
--- /dev/null
+++ b/drivers/irqchip/irqchip.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_irq.h>
+
+#include "irqchip.h"
+
+/*
+ * This special of_device_id is the sentinel at the end of the
+ * of_device_id[] array of all irqchips. It is automatically placed at
+ * the end of the array by the linker, thanks to being part of a
+ * special section.
+ */
+static const struct of_device_id
+irqchip_of_match_end __used __section(__irqchip_of_end);
+
+extern struct of_device_id __irqchip_begin[];
+
+void __init irqchip_init(void)
+{
+ of_irq_init(__irqchip_begin);
+}
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
new file mode 100644
index 000000000000..e445ba2d6add
--- /dev/null
+++ b/drivers/irqchip/irqchip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _IRQCHIP_H
+#define _IRQCHIP_H
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name,compstr,fn) \
+ static const struct of_device_id irqchip_of_match_##name \
+ __used __section(__irqchip_of_table) \
+ = { .compatible = compstr, .data = fn }
+
+#endif
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
new file mode 100644
index 000000000000..8527743b5cef
--- /dev/null
+++ b/drivers/irqchip/spear-shirq.c
@@ -0,0 +1,321 @@
+/*
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009-2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/spear-shirq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "irqchip.h"
+
+static DEFINE_SPINLOCK(lock);
+
+/* spear300 shared irq registers offsets and masks */
+#define SPEAR300_INT_ENB_MASK_REG 0x54
+#define SPEAR300_INT_STS_MASK_REG 0x58
+
+static struct spear_shirq spear300_shirq_ras1 = {
+ .irq_nr = 9,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = SPEAR300_INT_ENB_MASK_REG,
+ .status_reg = SPEAR300_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear300_shirq_blocks[] = {
+ &spear300_shirq_ras1,
+};
+
+/* spear310 shared irq registers offsets and masks */
+#define SPEAR310_INT_STS_MASK_REG 0x04
+
+static struct spear_shirq spear310_shirq_ras1 = {
+ .irq_nr = 8,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras2 = {
+ .irq_nr = 5,
+ .irq_bit_off = 8,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras3 = {
+ .irq_nr = 1,
+ .irq_bit_off = 13,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_intrcomm_ras = {
+ .irq_nr = 3,
+ .irq_bit_off = 14,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear310_shirq_blocks[] = {
+ &spear310_shirq_ras1,
+ &spear310_shirq_ras2,
+ &spear310_shirq_ras3,
+ &spear310_shirq_intrcomm_ras,
+};
+
+/* spear320 shared irq registers offsets and masks */
+#define SPEAR320_INT_STS_MASK_REG 0x04
+#define SPEAR320_INT_CLR_MASK_REG 0x04
+#define SPEAR320_INT_ENB_MASK_REG 0x08
+
+static struct spear_shirq spear320_shirq_ras1 = {
+ .irq_nr = 3,
+ .irq_bit_off = 7,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras2 = {
+ .irq_nr = 1,
+ .irq_bit_off = 10,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras3 = {
+ .irq_nr = 3,
+ .irq_bit_off = 0,
+ .invalid_irq = 1,
+ .regs = {
+ .enb_reg = SPEAR320_INT_ENB_MASK_REG,
+ .reset_to_enb = 1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_intrcomm_ras = {
+ .irq_nr = 11,
+ .irq_bit_off = 11,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq *spear320_shirq_blocks[] = {
+ &spear320_shirq_ras3,
+ &spear320_shirq_ras1,
+ &spear320_shirq_ras2,
+ &spear320_shirq_intrcomm_ras,
+};
+
+static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
+{
+ struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+ u32 val, offset = d->irq - shirq->irq_base;
+ unsigned long flags;
+
+ if (shirq->regs.enb_reg == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->base + shirq->regs.enb_reg);
+
+ if (mask ^ shirq->regs.reset_to_enb)
+ val &= ~(0x1 << shirq->irq_bit_off << offset);
+ else
+ val |= 0x1 << shirq->irq_bit_off << offset;
+
+ writel(val, shirq->base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+
+}
+
+static void shirq_irq_mask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 1);
+}
+
+static void shirq_irq_unmask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 0);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear-shirq",
+ .irq_ack = shirq_irq_mask,
+ .irq_mask = shirq_irq_mask,
+ .irq_unmask = shirq_irq_unmask,
+};
+
+static void shirq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 i, j, val, mask, tmp;
+ struct irq_chip *chip;
+ struct spear_shirq *shirq = irq_get_handler_data(irq);
+
+ chip = irq_get_chip(irq);
+ chip->irq_ack(&desc->irq_data);
+
+ mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
+ while ((val = readl(shirq->base + shirq->regs.status_reg) &
+ mask)) {
+
+ val >>= shirq->irq_bit_off;
+ for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
+
+ if (!(j & val))
+ continue;
+
+ generic_handle_irq(shirq->irq_base + i);
+
+ /* clear interrupt */
+ if (shirq->regs.clear_reg == -1)
+ continue;
+
+ tmp = readl(shirq->base + shirq->regs.clear_reg);
+ if (shirq->regs.reset_to_clear)
+ tmp &= ~(j << shirq->irq_bit_off);
+ else
+ tmp |= (j << shirq->irq_bit_off);
+ writel(tmp, shirq->base + shirq->regs.clear_reg);
+ }
+ }
+ chip->irq_unmask(&desc->irq_data);
+}
+
+static void __init spear_shirq_register(struct spear_shirq *shirq)
+{
+ int i;
+
+ if (shirq->invalid_irq)
+ return;
+
+ irq_set_chained_handler(shirq->irq, shirq_handler);
+ for (i = 0; i < shirq->irq_nr; i++) {
+ irq_set_chip_and_handler(shirq->irq_base + i,
+ &shirq_chip, handle_simple_irq);
+ set_irq_flags(shirq->irq_base + i, IRQF_VALID);
+ irq_set_chip_data(shirq->irq_base + i, shirq);
+ }
+
+ irq_set_handler_data(shirq->irq, shirq);
+}
+
+static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
+ struct device_node *np)
+{
+ int i, irq_base, hwirq = 0, irq_nr = 0;
+ static struct irq_domain *shirq_domain;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map shirq registers\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < block_nr; i++)
+ irq_nr += shirq_blocks[i]->irq_nr;
+
+ irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: irq desc alloc failed\n", __func__);
+ goto err_unmap;
+ }
+
+ shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (WARN_ON(!shirq_domain)) {
+ pr_warn("%s: irq domain init failed\n", __func__);
+ goto err_free_desc;
+ }
+
+ for (i = 0; i < block_nr; i++) {
+ shirq_blocks[i]->base = base;
+ shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
+ hwirq);
+ shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
+
+ spear_shirq_register(shirq_blocks[i]);
+ hwirq += shirq_blocks[i]->irq_nr;
+ }
+
+ return 0;
+
+err_free_desc:
+ irq_free_descs(irq_base, irq_nr);
+err_unmap:
+ iounmap(base);
+ return -ENXIO;
+}
+
+int __init spear300_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear300_shirq_blocks,
+ ARRAY_SIZE(spear300_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
+
+int __init spear310_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear310_shirq_blocks,
+ ARRAY_SIZE(spear310_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
+
+int __init spear320_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear320_shirq_blocks,
+ ARRAY_SIZE(spear320_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 86cd75a0e84d..ef661acdda17 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -22,6 +22,7 @@ if ISDN
menuconfig ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
+ depends on TTY
---help---
This driver allows you to use an ISDN adapter for networking
connections and as dialin/out device. The isdn-tty's have a built
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 15c3ffd9d860..f04686580040 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -18,6 +18,7 @@ config CAPI_TRACE
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
+ depends on TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
diff --git a/drivers/isdn/divert/divert_init.c b/drivers/isdn/divert/divert_init.c
index 5374c25f036c..267dede13bfd 100644
--- a/drivers/isdn/divert/divert_init.c
+++ b/drivers/isdn/divert/divert_init.c
@@ -22,13 +22,13 @@ MODULE_LICENSE("GPL");
/****************************************/
/* structure containing interface to hl */
/****************************************/
-isdn_divert_if divert_if =
-{ DIVERT_IF_MAGIC, /* magic value */
- DIVERT_CMD_REG, /* register cmd */
- ll_callback, /* callback routine from ll */
- NULL, /* command still not specified */
- NULL, /* drv_to_name */
- NULL, /* name_to_drv */
+isdn_divert_if divert_if = {
+ DIVERT_IF_MAGIC, /* magic value */
+ DIVERT_CMD_REG, /* register cmd */
+ ll_callback, /* callback routine from ll */
+ NULL, /* command still not specified */
+ NULL, /* drv_to_name */
+ NULL, /* name_to_drv */
};
/*************************/
@@ -36,14 +36,15 @@ isdn_divert_if divert_if =
/* no cmd line parms */
/*************************/
static int __init divert_init(void)
-{ int i;
+{
+ int i;
- if (divert_dev_init())
- { printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
+ if (divert_dev_init()) {
+ printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
return (-EIO);
}
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
- { divert_dev_deinit();
+ if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
+ divert_dev_deinit();
printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n", i);
return (-EIO);
}
@@ -61,13 +62,13 @@ static void __exit divert_exit(void)
spin_lock_irqsave(&divert_lock, flags);
divert_if.cmd = DIVERT_CMD_REL; /* release */
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
- { printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
+ if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
+ printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
- if (divert_dev_deinit())
- { printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
+ if (divert_dev_deinit()) {
+ printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index e61e55f1f193..db432e635496 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -19,8 +19,8 @@
/**********************************/
/* structure keeping calling info */
/**********************************/
-struct call_struc
-{ isdn_ctrl ics; /* delivered setup + driver parameters */
+struct call_struc {
+ isdn_ctrl ics; /* delivered setup + driver parameters */
ulong divert_id; /* Id delivered to user */
unsigned char akt_state; /* actual state */
char deflect_dest[35]; /* deflection destination */
@@ -34,8 +34,8 @@ struct call_struc
/********************************************/
/* structure keeping deflection table entry */
/********************************************/
-struct deflect_struc
-{ struct deflect_struc *next, *prev;
+struct deflect_struc {
+ struct deflect_struc *next, *prev;
divert_rule rule; /* used rule */
};
@@ -64,16 +64,16 @@ static void deflect_timer_expire(ulong arg)
del_timer(&cs->timer); /* delete active timer */
spin_unlock_irqrestore(&divert_lock, flags);
- switch (cs->akt_state)
- { case DEFLECT_PROCEED:
- cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
- divert_if.ll_cmd(&cs->ics);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
+ switch (cs->akt_state) {
+ case DEFLECT_PROCEED:
+ cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
+ divert_if.ll_cmd(&cs->ics);
+ spin_lock_irqsave(&divert_lock, flags);
+ cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
+ cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
+ add_timer(&cs->timer);
+ spin_unlock_irqrestore(&divert_lock, flags);
+ break;
case DEFLECT_ALERT:
cs->ics.command = ISDN_CMD_REDIR; /* protocol */
@@ -111,7 +111,8 @@ static void deflect_timer_expire(ulong arg)
int cf_command(int drvid, int mode,
u_char proc, char *msn,
u_char service, char *fwd_nr, ulong *procid)
-{ unsigned long flags;
+{
+ unsigned long flags;
int retval, msnlen;
int fwd_len;
char *p, *ielenp, tmp[60];
@@ -130,8 +131,8 @@ int cf_command(int drvid, int mode,
*p++ = 1; /* length */
*p++ = service; /* service to handle */
- if (mode == 1)
- { if (!*fwd_nr) return (-EINVAL); /* destination missing */
+ if (mode == 1) {
+ if (!*fwd_nr) return (-EINVAL); /* destination missing */
if (strchr(fwd_nr, '.')) return (-EINVAL); /* subaddress not allowed */
fwd_len = strlen(fwd_nr);
*p++ = 0x30; /* number enumeration */
@@ -144,12 +145,12 @@ int cf_command(int drvid, int mode,
msnlen = strlen(msn);
*p++ = 0x80; /* msn number */
- if (msnlen > 1)
- { *p++ = msnlen; /* length */
+ if (msnlen > 1) {
+ *p++ = msnlen; /* length */
strcpy(p, msn);
p += msnlen;
- }
- else *p++ = 0;
+ } else
+ *p++ = 0;
*ielenp = p - ielenp - 1; /* set total IE length */
@@ -186,14 +187,13 @@ int cf_command(int drvid, int mode,
retval = divert_if.ll_cmd(&cs->ics); /* execute command */
- if (!retval)
- { cs->prev = NULL;
+ if (!retval) {
+ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
kfree(cs);
return (retval);
} /* cf_command */
@@ -203,15 +203,16 @@ int cf_command(int drvid, int mode,
/* handle a external deflection command */
/****************************************/
int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
-{ struct call_struc *cs;
+{
+ struct call_struc *cs;
isdn_ctrl ic;
unsigned long flags;
int i;
if ((cmd & 0x7F) > 2) return (-EINVAL); /* invalid command */
cs = divert_head; /* start of parameter list */
- while (cs)
- { if (cs->divert_id == callid) break; /* found */
+ while (cs) {
+ if (cs->divert_id == callid) break; /* found */
cs = cs->next;
} /* search entry */
if (!cs) return (-EINVAL); /* invalid callid */
@@ -220,32 +221,30 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
ic.arg = cs->ics.arg;
i = -EINVAL;
if (cs->akt_state == DEFLECT_AUTODEL) return (i); /* no valid call */
- switch (cmd & 0x7F)
- { case 0: /* hangup */
- del_timer(&cs->timer);
- ic.command = ISDN_CMD_HANGUP;
- i = divert_if.ll_cmd(&ic);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
+ switch (cmd & 0x7F) {
+ case 0: /* hangup */
+ del_timer(&cs->timer);
+ ic.command = ISDN_CMD_HANGUP;
+ i = divert_if.ll_cmd(&ic);
+ spin_lock_irqsave(&divert_lock, flags);
+ cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
+ cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
+ add_timer(&cs->timer);
+ spin_unlock_irqrestore(&divert_lock, flags);
+ break;
case 1: /* alert */
if (cs->akt_state == DEFLECT_ALERT) return (0);
cmd &= 0x7F; /* never wait */
del_timer(&cs->timer);
ic.command = ISDN_CMD_ALERT;
- if ((i = divert_if.ll_cmd(&ic)))
- {
+ if ((i = divert_if.ll_cmd(&ic))) {
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
cs->akt_state = DEFLECT_ALERT;
break;
@@ -254,15 +253,13 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
ic.command = ISDN_CMD_REDIR;
- if ((i = divert_if.ll_cmd(&ic)))
- {
+ if ((i = divert_if.ll_cmd(&ic))) {
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
cs->akt_state = DEFLECT_ALERT;
break;
@@ -274,19 +271,19 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
/* insert a new rule before idx */
/********************************/
int insertrule(int idx, divert_rule *newrule)
-{ struct deflect_struc *ds, *ds1 = NULL;
+{
+ struct deflect_struc *ds, *ds1 = NULL;
unsigned long flags;
- if (!(ds = kmalloc(sizeof(struct deflect_struc),
- GFP_KERNEL)))
+ if (!(ds = kmalloc(sizeof(struct deflect_struc), GFP_KERNEL)))
return (-ENOMEM); /* no memory */
ds->rule = *newrule; /* set rule */
spin_lock_irqsave(&divert_lock, flags);
- if (idx >= 0)
- { ds1 = table_head;
+ if (idx >= 0) {
+ ds1 = table_head;
while ((ds1) && (idx > 0))
{ idx--;
ds1 = ds1->next;
@@ -294,17 +291,16 @@ int insertrule(int idx, divert_rule *newrule)
if (!ds1) idx = -1;
}
- if (idx < 0)
- { ds->prev = table_tail; /* previous entry */
+ if (idx < 0) {
+ ds->prev = table_tail; /* previous entry */
ds->next = NULL; /* end of chain */
if (ds->prev)
ds->prev->next = ds; /* last forward */
else
table_head = ds; /* is first entry */
table_tail = ds; /* end of queue */
- }
- else
- { ds->next = ds1; /* next entry */
+ } else {
+ ds->next = ds1; /* next entry */
ds->prev = ds1->prev; /* prev entry */
ds1->prev = ds; /* backward chain old element */
if (!ds->prev)
@@ -319,17 +315,18 @@ int insertrule(int idx, divert_rule *newrule)
/* delete the rule at position idx */
/***********************************/
int deleterule(int idx)
-{ struct deflect_struc *ds, *ds1;
+{
+ struct deflect_struc *ds, *ds1;
unsigned long flags;
- if (idx < 0)
- { spin_lock_irqsave(&divert_lock, flags);
+ if (idx < 0) {
+ spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
table_head = NULL;
table_tail = NULL;
spin_unlock_irqrestore(&divert_lock, flags);
- while (ds)
- { ds1 = ds;
+ while (ds) {
+ ds1 = ds;
ds = ds->next;
kfree(ds1);
}
@@ -339,13 +336,12 @@ int deleterule(int idx)
spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
- while ((ds) && (idx > 0))
- { idx--;
+ while ((ds) && (idx > 0)) {
+ idx--;
ds = ds->next;
}
- if (!ds)
- {
+ if (!ds) {
spin_unlock_irqrestore(&divert_lock, flags);
return (-EINVAL);
}
@@ -369,12 +365,13 @@ int deleterule(int idx)
/* get a pointer to a specific rule number */
/*******************************************/
divert_rule *getruleptr(int idx)
-{ struct deflect_struc *ds = table_head;
+{
+ struct deflect_struc *ds = table_head;
if (idx < 0) return (NULL);
- while ((ds) && (idx >= 0))
- { if (!(idx--))
- { return (&ds->rule);
+ while ((ds) && (idx >= 0)) {
+ if (!(idx--)) {
+ return (&ds->rule);
break;
}
ds = ds->next;
@@ -386,7 +383,8 @@ divert_rule *getruleptr(int idx)
/* called from common module on an incoming call */
/*************************************************/
static int isdn_divert_icall(isdn_ctrl *ic)
-{ int retval = 0;
+{
+ int retval = 0;
unsigned long flags;
struct call_struc *cs = NULL;
struct deflect_struc *dv;
@@ -394,8 +392,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
u_char accept;
/* first check the internal deflection table */
- for (dv = table_head; dv; dv = dv->next)
- { /* scan table */
+ for (dv = table_head; dv; dv = dv->next) {
+ /* scan table */
if (((dv->rule.callopt == 1) && (ic->command == ISDN_STAT_ICALLW)) ||
((dv->rule.callopt == 2) && (ic->command == ISDN_STAT_ICALL)))
continue; /* call option check */
@@ -409,10 +407,10 @@ static int isdn_divert_icall(isdn_ctrl *ic)
p = dv->rule.my_msn;
p1 = ic->parm.setup.eazmsn;
accept = 0;
- while (*p)
- { /* complete compare */
- if (*p == '-')
- { accept = 1; /* call accepted */
+ while (*p) {
+ /* complete compare */
+ if (*p == '-') {
+ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
@@ -422,14 +420,15 @@ static int isdn_divert_icall(isdn_ctrl *ic)
} /* complete compare */
if (!accept) continue; /* not accepted */
- if ((strcmp(dv->rule.caller, "0")) || (ic->parm.setup.phone[0]))
- { p = dv->rule.caller;
+ if ((strcmp(dv->rule.caller, "0")) ||
+ (ic->parm.setup.phone[0])) {
+ p = dv->rule.caller;
p1 = ic->parm.setup.phone;
accept = 0;
- while (*p)
- { /* complete compare */
- if (*p == '-')
- { accept = 1; /* call accepted */
+ while (*p) {
+ /* complete compare */
+ if (*p == '-') {
+ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
@@ -440,10 +439,10 @@ static int isdn_divert_icall(isdn_ctrl *ic)
if (!accept) continue; /* not accepted */
}
- switch (dv->rule.action)
- { case DEFLECT_IGNORE:
- return (0);
- break;
+ switch (dv->rule.action) {
+ case DEFLECT_IGNORE:
+ return (0);
+ break;
case DEFLECT_ALERT:
case DEFLECT_PROCEED:
@@ -465,31 +464,29 @@ static int isdn_divert_icall(isdn_ctrl *ic)
cs->ics.parm.setup.screen = dv->rule.screen;
if (dv->rule.waittime)
cs->timer.expires = jiffies + (HZ * dv->rule.waittime);
+ else if (dv->rule.action == DEFLECT_PROCEED)
+ cs->timer.expires = jiffies + (HZ * extern_wait_max);
else
- if (dv->rule.action == DEFLECT_PROCEED)
- cs->timer.expires = jiffies + (HZ * extern_wait_max);
- else
- cs->timer.expires = 0;
+ cs->timer.expires = 0;
cs->akt_state = dv->rule.action;
spin_lock_irqsave(&divert_lock, flags);
cs->divert_id = next_id++; /* new sequence number */
spin_unlock_irqrestore(&divert_lock, flags);
cs->prev = NULL;
- if (cs->akt_state == DEFLECT_ALERT)
- { strcpy(cs->deflect_dest, dv->rule.to_nr);
- if (!cs->timer.expires)
- { strcpy(ic->parm.setup.eazmsn, "Testtext direct");
+ if (cs->akt_state == DEFLECT_ALERT) {
+ strcpy(cs->deflect_dest, dv->rule.to_nr);
+ if (!cs->timer.expires) {
+ strcpy(ic->parm.setup.eazmsn,
+ "Testtext direct");
ic->parm.setup.screen = dv->rule.screen;
strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
retval = 5;
- }
- else
+ } else
retval = 1; /* alerting */
- }
- else
- { cs->deflect_dest[0] = '\0';
+ } else {
+ cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
@@ -505,8 +502,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
dv->rule.waittime,
cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
- (dv->rule.action == DEFLECT_REJECT))
- { put_info_buffer(cs->info);
+ (dv->rule.action == DEFLECT_REJECT)) {
+ put_info_buffer(cs->info);
kfree(cs); /* remove */
return ((dv->rule.action == DEFLECT_REPORT) ? 0 : 2); /* nothing to do */
}
@@ -519,8 +516,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
break;
} /* scan_table */
- if (cs)
- { cs->prev = NULL;
+ if (cs) {
+ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
@@ -529,21 +526,21 @@ static int isdn_divert_icall(isdn_ctrl *ic)
put_info_buffer(cs->info);
return (retval);
- }
- else
+ } else
return (0);
} /* isdn_divert_icall */
void deleteprocs(void)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
unsigned long flags;
spin_lock_irqsave(&divert_lock, flags);
cs = divert_head;
divert_head = NULL;
- while (cs)
- { del_timer(&cs->timer);
+ while (cs) {
+ del_timer(&cs->timer);
cs1 = cs;
cs = cs->next;
kfree(cs1);
@@ -555,12 +552,13 @@ void deleteprocs(void)
/* put a address including address type into buffer */
/****************************************************/
static int put_address(char *st, u_char *p, int len)
-{ u_char retval = 0;
+{
+ u_char retval = 0;
u_char adr_typ = 0; /* network standard */
if (len < 2) return (retval);
- if (*p == 0xA1)
- { retval = *(++p) + 2; /* total length */
+ if (*p == 0xA1) {
+ retval = *(++p) + 2; /* total length */
if (retval > len) return (0); /* too short */
len = retval - 2; /* remaining length */
if (len < 3) return (0);
@@ -572,16 +570,13 @@ static int put_address(char *st, u_char *p, int len)
if (*p++ != 0x12) return (0);
if (*p > len) return (0); /* check number length */
len = *p++;
- }
- else
- if (*p == 0x80)
- { retval = *(++p) + 2; /* total length */
- if (retval > len) return (0);
- len = retval - 2;
- p++;
- }
- else
- return (0); /* invalid address information */
+ } else if (*p == 0x80) {
+ retval = *(++p) + 2; /* total length */
+ if (retval > len) return (0);
+ len = retval - 2;
+ p++;
+ } else
+ return (0); /* invalid address information */
sprintf(st, "%d ", adr_typ);
st += strlen(st);
@@ -598,7 +593,8 @@ static int put_address(char *st, u_char *p, int len)
/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
-{ char *src = ic->parm.dss1_io.data;
+{
+ char *src = ic->parm.dss1_io.data;
int restlen = ic->parm.dss1_io.datalen;
int cnt = 1;
u_char n, n1;
@@ -608,50 +604,44 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
if (*src++ != 0x30) return (-101);
if ((n = *src++) > 0x81) return (-102); /* invalid length field */
restlen -= 2; /* remaining bytes */
- if (n == 0x80)
- { if (restlen < 2) return (-103);
+ if (n == 0x80) {
+ if (restlen < 2) return (-103);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-104);
restlen -= 2;
- }
+ } else if (n == 0x81) {
+ n = *src++;
+ restlen--;
+ if (n > restlen) return (-105);
+ restlen = n;
+ } else if (n > restlen)
+ return (-106);
else
- if (n == 0x81)
- { n = *src++;
- restlen--;
- if (n > restlen) return (-105);
- restlen = n;
- }
- else
- if (n > restlen) return (-106);
- else
- restlen = n; /* standard format */
+ restlen = n; /* standard format */
if (restlen < 3) return (-107); /* no procedure */
if ((*src++ != 2) || (*src++ != 1) || (*src++ != 0x0B)) return (-108);
restlen -= 3;
if (restlen < 2) return (-109); /* list missing */
- if (*src == 0x31)
- { src++;
+ if (*src == 0x31) {
+ src++;
if ((n = *src++) > 0x81) return (-110); /* invalid length field */
restlen -= 2; /* remaining bytes */
- if (n == 0x80)
- { if (restlen < 2) return (-111);
+ if (n == 0x80) {
+ if (restlen < 2) return (-111);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-112);
restlen -= 2;
- }
+ } else if (n == 0x81) {
+ n = *src++;
+ restlen--;
+ if (n > restlen) return (-113);
+ restlen = n;
+ } else if (n > restlen)
+ return (-114);
else
- if (n == 0x81)
- { n = *src++;
- restlen--;
- if (n > restlen) return (-113);
- restlen = n;
- }
- else
- if (n > restlen) return (-114);
- else
- restlen = n; /* standard format */
+ restlen = n; /* standard format */
} /* result list header */
- while (restlen >= 2)
- { stp = st;
+ while (restlen >= 2) {
+ stp = st;
sprintf(stp, "%d 0x%lx %d %s ", DIVERT_REPORT, ic->parm.dss1_io.ll_id,
cnt++, divert_if.drv_to_name(ic->driver));
stp += strlen(stp);
@@ -674,8 +664,8 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
sprintf(stp, "%d ", (*p++) & 0xFF);
stp += strlen(stp);
n -= 6;
- if (n > 2)
- { if (*p++ != 0x30) continue;
+ if (n > 2) {
+ if (*p++ != 0x30) continue;
if (*p > (n - 2)) continue;
n = *p++;
if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
@@ -692,58 +682,58 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
/* callback for protocol specific extensions */
/*********************************************/
static int prot_stat_callback(isdn_ctrl *ic)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
int i;
unsigned long flags;
cs = divert_head; /* start of list */
cs1 = NULL;
- while (cs)
- { if (ic->driver == cs->ics.driver)
- { switch (cs->ics.arg)
- { case DSS1_CMD_INVOKE:
- if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
- (cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id))
- { switch (ic->arg)
- { case DSS1_STAT_INVOKE_ERR:
- sprintf(cs->info, "128 0x%lx 0x%x\n",
- ic->parm.dss1_io.ll_id,
- ic->parm.dss1_io.timeout);
- put_info_buffer(cs->info);
- break;
-
- case DSS1_STAT_INVOKE_RES:
- switch (cs->ics.parm.dss1_io.proc)
- { case 7:
- case 8:
- put_info_buffer(cs->info);
- break;
-
- case 11:
- i = interrogate_success(ic, cs);
- if (i)
- sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
- ic->parm.dss1_io.ll_id, i);
- put_info_buffer(cs->info);
- break;
-
- default:
- printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
- break;
- }
-
+ while (cs) {
+ if (ic->driver == cs->ics.driver) {
+ switch (cs->ics.arg) {
+ case DSS1_CMD_INVOKE:
+ if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
+ (cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id)) {
+ switch (ic->arg) {
+ case DSS1_STAT_INVOKE_ERR:
+ sprintf(cs->info, "128 0x%lx 0x%x\n",
+ ic->parm.dss1_io.ll_id,
+ ic->parm.dss1_io.timeout);
+ put_info_buffer(cs->info);
+ break;
+
+ case DSS1_STAT_INVOKE_RES:
+ switch (cs->ics.parm.dss1_io.proc) {
+ case 7:
+ case 8:
+ put_info_buffer(cs->info);
+ break;
+ case 11:
+ i = interrogate_success(ic, cs);
+ if (i)
+ sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
+ ic->parm.dss1_io.ll_id, i);
+ put_info_buffer(cs->info);
break;
default:
- printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
+ printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
break;
}
- cs1 = cs; /* remember structure */
- cs = NULL;
- continue; /* abort search */
- } /* id found */
- break;
+
+ break;
+
+ default:
+ printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
+ break;
+ }
+ cs1 = cs; /* remember structure */
+ cs = NULL;
+ continue; /* abort search */
+ } /* id found */
+ break;
case DSS1_CMD_INVOKE_ABORT:
printk(KERN_WARNING "dss1_divert unhandled invoke abort\n");
@@ -757,13 +747,12 @@ static int prot_stat_callback(isdn_ctrl *ic)
} /* driver ok */
}
- if (!cs1)
- { printk(KERN_WARNING "dss1_divert unhandled process\n");
+ if (!cs1) {
+ printk(KERN_WARNING "dss1_divert unhandled process\n");
return (0);
}
- if (cs1->ics.driver == -1)
- {
+ if (cs1->ics.driver == -1) {
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs1->timer);
if (cs1->prev)
@@ -784,20 +773,22 @@ static int prot_stat_callback(isdn_ctrl *ic)
/* status callback from HL */
/***************************/
static int isdn_divert_stat_callback(isdn_ctrl *ic)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
unsigned long flags;
int retval;
retval = -1;
cs = divert_head; /* start of list */
- while (cs)
- { if ((ic->driver == cs->ics.driver) && (ic->arg == cs->ics.arg))
- { switch (ic->command)
- { case ISDN_STAT_DHUP:
- sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
- del_timer(&cs->timer);
- cs->ics.driver = -1;
- break;
+ while (cs) {
+ if ((ic->driver == cs->ics.driver) &&
+ (ic->arg == cs->ics.arg)) {
+ switch (ic->command) {
+ case ISDN_STAT_DHUP:
+ sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
+ del_timer(&cs->timer);
+ cs->ics.driver = -1;
+ break;
case ISDN_STAT_CAUSE:
sprintf(cs->info, "130 0x%lx %s\n", cs->divert_id, ic->parm.num);
@@ -818,8 +809,7 @@ static int isdn_divert_stat_callback(isdn_ctrl *ic)
}
cs1 = cs;
cs = cs->next;
- if (cs1->ics.driver == -1)
- {
+ if (cs1->ics.driver == -1) {
spin_lock_irqsave(&divert_lock, flags);
if (cs1->prev)
cs1->prev->next = cs1->next; /* forward link */
@@ -840,20 +830,19 @@ static int isdn_divert_stat_callback(isdn_ctrl *ic)
/********************/
int ll_callback(isdn_ctrl *ic)
{
- switch (ic->command)
- { case ISDN_STAT_ICALL:
+ switch (ic->command) {
+ case ISDN_STAT_ICALL:
case ISDN_STAT_ICALLW:
return (isdn_divert_icall(ic));
break;
case ISDN_STAT_PROT:
- if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO)
- { if (ic->arg != DSS1_STAT_INVOKE_BRD)
+ if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO) {
+ if (ic->arg != DSS1_STAT_INVOKE_BRD)
return (prot_stat_callback(ic));
else
return (0); /* DSS1 invoke broadcast */
- }
- else
+ } else
return (-1); /* protocol not euro */
default:
diff --git a/drivers/isdn/divert/isdn_divert.h b/drivers/isdn/divert/isdn_divert.h
index 42f289320d2d..55033dd872c0 100644
--- a/drivers/isdn/divert/isdn_divert.h
+++ b/drivers/isdn/divert/isdn_divert.h
@@ -43,8 +43,8 @@
#define DEFLECT_ALL_IDS 0xFFFFFFFF /* all drivers selected */
-typedef struct
-{ ulong drvid; /* driver ids, bit mapped */
+typedef struct {
+ ulong drvid; /* driver ids, bit mapped */
char my_msn[35]; /* desired msn, subaddr allowed */
char caller[35]; /* caller id, partial string with * + subaddr allowed */
char to_nr[35]; /* deflected to number incl. subaddress */
@@ -65,18 +65,18 @@ typedef struct
u_char waittime; /* maximum wait time for proceeding */
} divert_rule;
-typedef union
-{ int drv_version; /* return of driver version */
- struct
- { int drvid; /* id of driver */
+typedef union {
+ int drv_version; /* return of driver version */
+ struct {
+ int drvid; /* id of driver */
char drvnam[30]; /* name of driver */
} getid;
- struct
- { int ruleidx; /* index of rule */
+ struct {
+ int ruleidx; /* index of rule */
divert_rule rule; /* rule parms */
} getsetrule;
- struct
- { u_char subcmd; /* 0 = hangup/reject,
+ struct {
+ u_char subcmd; /* 0 = hangup/reject,
1 = alert,
2 = deflect */
ulong callid; /* id of call delivered by ascii output */
@@ -84,8 +84,8 @@ typedef union
else uus1 string (maxlen 31),
data from rule used if empty */
} fwd_ctrl;
- struct
- { int drvid; /* id of driver */
+ struct {
+ int drvid; /* id of driver */
u_char cfproc; /* cfu = 0, cfb = 1, cfnr = 2 */
ulong procid; /* process id returned when no error */
u_char service; /* basically coded service, 0 = all */
@@ -104,8 +104,8 @@ typedef union
/**************************************************/
/* structure keeping ascii info for device output */
/**************************************************/
-struct divert_info
-{ struct divert_info *next;
+struct divert_info {
+ struct divert_info *next;
ulong usage_cnt; /* number of files still to work */
char info_start[2]; /* info string start */
};
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index b18a92c32184..dde5e09e6267 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,5 +1,6 @@
menuconfig ISDN_DRV_GIGASET
tristate "Siemens Gigaset support"
+ depends on TTY
select CRC_CCITT
select BITREVERSE
help
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 68452b768da2..03a0a01a4054 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
CAPIMSG_CONTROL(data));
l -= 12;
+ if (l <= 0)
+ return;
dbgline = kmalloc(3 * l, GFP_ATOMIC);
if (!dbgline)
return;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 6849a11a1b24..7c7814497e3e 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -467,11 +467,6 @@ void gigaset_freecs(struct cardstate *cs)
mutex_lock(&cs->mutex);
- if (!cs->bcs)
- goto f_cs;
- if (!cs->inbuf)
- goto f_bcs;
-
spin_lock_irqsave(&cs->lock, flags);
cs->running = 0;
spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
@@ -507,17 +502,16 @@ void gigaset_freecs(struct cardstate *cs)
gig_dbg(DEBUG_INIT, "clearing at_state");
clear_at_state(&cs->at_state);
dealloc_temp_at_states(cs);
+ clear_events(cs);
tty_port_destroy(&cs->port);
/* fall through */
case 0: /* error in basic setup */
- clear_events(cs);
gig_dbg(DEBUG_INIT, "freeing inbuf");
kfree(cs->inbuf);
+ kfree(cs->bcs);
}
-f_bcs: gig_dbg(DEBUG_INIT, "freeing bcs[]");
- kfree(cs->bcs);
-f_cs: gig_dbg(DEBUG_INIT, "freeing cs");
+
mutex_unlock(&cs->mutex);
free_cs(cs);
}
@@ -687,19 +681,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
return NULL;
}
- gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
- cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
- if (!cs->bcs) {
- pr_err("out of memory\n");
- goto error;
- }
- gig_dbg(DEBUG_INIT, "allocating inbuf");
- cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
- if (!cs->inbuf) {
- pr_err("out of memory\n");
- goto error;
- }
-
cs->cs_init = 0;
cs->channels = channels;
cs->onechannel = onechannel;
@@ -729,6 +710,12 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
cs->mode = M_UNKNOWN;
cs->mstate = MS_UNINITIALIZED;
+ cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
+ cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
+ if (!cs->bcs || !cs->inbuf) {
+ pr_err("out of memory\n");
+ goto error;
+ }
++cs->cs_init;
gig_dbg(DEBUG_INIT, "setting up at_state");
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 2e6963dc740e..7459b127ddd5 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -351,10 +351,11 @@ struct reply_t gigaset_tab_cid[] =
static const struct resp_type_t {
- unsigned char *response;
- int resp_code;
- int type;
-} resp_type[] =
+ char *response;
+ int resp_code;
+ int type;
+}
+resp_type[] =
{
{"OK", RSP_OK, RT_NOTHING},
{"ERROR", RSP_ERROR, RT_NOTHING},
@@ -374,11 +375,12 @@ static const struct resp_type_t {
};
static const struct zsau_resp_t {
- unsigned char *str;
- int code;
-} zsau_resp[] =
+ char *str;
+ int code;
+}
+zsau_resp[] =
{
- {"OUTGOING_CALL_PROCEEDING", ZSAU_OUTGOING_CALL_PROCEEDING},
+ {"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
{"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
{"ACTIVE", ZSAU_ACTIVE},
{"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
@@ -434,7 +436,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
len = cs->cbytes;
if (!len) {
/* ignore additional LFs/CRs (M10x config mode or cx100) */
- gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]);
+ gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
return;
}
cs->respdata[len] = 0;
@@ -707,27 +709,29 @@ static void schedule_init(struct cardstate *cs, int state)
cs->commands_pending = 1;
}
-/* Add "AT" to a command, add the cid, dle encode it, send the result to the
- hardware. */
-static void send_command(struct cardstate *cs, const char *cmd, int cid,
- int dle, gfp_t kmallocflags)
+/* send an AT command
+ * adding the "AT" prefix, cid and DLE encapsulation as appropriate
+ */
+static void send_command(struct cardstate *cs, const char *cmd,
+ struct at_state_t *at_state)
{
+ int cid = at_state->cid;
struct cmdbuf_t *cb;
size_t buflen;
buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
- cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, kmallocflags);
+ cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
if (!cb) {
dev_err(cs->dev, "%s: out of memory\n", __func__);
return;
}
if (cid > 0 && cid <= 65535)
cb->len = snprintf(cb->buf, buflen,
- dle ? "\020(AT%d%s\020)" : "AT%d%s",
+ cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
cid, cmd);
else
cb->len = snprintf(cb->buf, buflen,
- dle ? "\020(AT%s\020)" : "AT%s",
+ cs->dle ? "\020(AT%s\020)" : "AT%s",
cmd);
cb->offset = 0;
cb->next = NULL;
@@ -886,7 +890,7 @@ static void finish_shutdown(struct cardstate *cs)
gigaset_isdn_stop(cs);
}
- /* The rest is done by cleanup_cs () in user mode. */
+ /* The rest is done by cleanup_cs() in process context. */
cs->cmd_result = -ENODEV;
cs->waiting = 0;
@@ -976,10 +980,9 @@ exit:
}
static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
- struct at_state_t **p_at_state)
+ struct at_state_t *at_state)
{
int retval;
- struct at_state_t *at_state = *p_at_state;
retval = gigaset_isdn_icall(at_state);
switch (retval) {
@@ -1176,7 +1179,7 @@ static void do_action(int action, struct cardstate *cs,
spin_unlock_irqrestore(&cs->lock, flags);
break;
case ACT_ICALL:
- handle_icall(cs, bcs, p_at_state);
+ handle_icall(cs, bcs, at_state);
break;
case ACT_FAILSDOWN:
dev_warn(cs->dev, "Could not shut down the device.\n");
@@ -1264,7 +1267,7 @@ static void do_action(int action, struct cardstate *cs,
cs->commands_pending = 1;
break;
}
- /* fall through */
+ /* bad cid: fall through */
case ACT_FAILCID:
cs->cur_at_seq = SEQ_NONE;
channel = cs->curchannel;
@@ -1339,7 +1342,6 @@ static void do_action(int action, struct cardstate *cs,
*p_resp_code = RSP_ERROR;
break;
}
- /*at_state->getstring = 1;*/
cs->gotfwver = 0;
break;
case ACT_GOTVER:
@@ -1471,7 +1473,6 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
int rcode;
int genresp = 0;
int resp_code = RSP_ERROR;
- int sendcid;
struct at_state_t *at_state;
int index;
int curact;
@@ -1499,7 +1500,6 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
at_state->ConState, ev->type);
bcs = at_state->bcs;
- sendcid = at_state->cid;
/* Setting the pointer to the dial array */
rep = at_state->replystruct;
@@ -1510,10 +1510,12 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
|| !at_state->timer_active) {
ev->type = RSP_NONE; /* old timeout */
gig_dbg(DEBUG_EVENT, "old timeout");
- } else if (!at_state->waiting)
- gig_dbg(DEBUG_EVENT, "timeout occurred");
- else
- gig_dbg(DEBUG_EVENT, "stopped waiting");
+ } else {
+ if (at_state->waiting)
+ gig_dbg(DEBUG_EVENT, "stopped waiting");
+ else
+ gig_dbg(DEBUG_EVENT, "timeout occurred");
+ }
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -1561,45 +1563,40 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
&genresp, &resp_code, ev);
if (!at_state)
- break; /* may be freed after disconnect */
+ /* at_state destroyed by disconnect */
+ return;
}
- if (at_state) {
- /* Jump to the next con-state regarding the array */
- if (rep->new_ConState >= 0)
- at_state->ConState = rep->new_ConState;
+ /* Jump to the next con-state regarding the array */
+ if (rep->new_ConState >= 0)
+ at_state->ConState = rep->new_ConState;
- if (genresp) {
- spin_lock_irqsave(&cs->lock, flags);
+ if (genresp) {
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+ } else {
+ /* Send command to modem if not NULL... */
+ if (p_command) {
+ if (cs->connected)
+ send_command(cs, p_command, at_state);
+ else
+ gigaset_add_event(cs, at_state, RSP_NODEV,
+ NULL, 0, NULL);
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!rep->timeout) {
at_state->timer_expires = 0;
at_state->timer_active = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_add_event(cs, at_state, resp_code,
- NULL, 0, NULL);
- } else {
- /* Send command to modem if not NULL... */
- if (p_command) {
- if (cs->connected)
- send_command(cs, p_command,
- sendcid, cs->dle,
- GFP_ATOMIC);
- else
- gigaset_add_event(cs, at_state,
- RSP_NODEV,
- NULL, 0, NULL);
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!rep->timeout) {
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- } else if (rep->timeout > 0) { /* new timeout */
- at_state->timer_expires = rep->timeout * 10;
- at_state->timer_active = 1;
- ++at_state->timer_index;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
+ } else if (rep->timeout > 0) { /* new timeout */
+ at_state->timer_expires = rep->timeout * 10;
+ at_state->timer_active = 1;
+ ++at_state->timer_index;
}
+ spin_unlock_irqrestore(&cs->lock, flags);
}
}
@@ -1693,6 +1690,11 @@ static void process_command_flags(struct cardstate *cs)
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
if (bcs->at_state.pending_commands & PC_HUP) {
+ if (cs->dle) {
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
+ return;
+ }
bcs->at_state.pending_commands &= ~PC_HUP;
if (bcs->at_state.pending_commands & PC_CID) {
/* not yet dialing: PC_NOCID is sufficient */
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 8e2fc8f31d16..eb63a0f7a02a 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -111,11 +111,10 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
/* connection state */
#define ZSAU_NONE 0
-#define ZSAU_DISCONNECT_IND 4
-#define ZSAU_OUTGOING_CALL_PROCEEDING 1
#define ZSAU_PROCEEDING 1
#define ZSAU_CALL_DELIVERED 2
#define ZSAU_ACTIVE 3
+#define ZSAU_DISCONNECT_IND 4
#define ZSAU_NULL 5
#define ZSAU_DISCONNECT_REQ 6
#define ZSAU_UNKNOWN -1
@@ -183,18 +182,22 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
#define AT_NUM 7
/* variables in struct at_state_t */
+/* - numeric */
#define VAR_ZSAU 0
#define VAR_ZDLE 1
#define VAR_ZCTP 2
+/* total number */
#define VAR_NUM 3
-
+/* - string */
#define STR_NMBR 0
#define STR_ZCPN 1
#define STR_ZCON 2
#define STR_ZBC 3
#define STR_ZHLC 4
+/* total number */
#define STR_NUM 5
+/* event types */
#define EV_TIMEOUT -105
#define EV_IF_VER -106
#define EV_PROC_CIDMODE -107
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 67abf3ff45e8..e2b539675b66 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -112,36 +112,6 @@ static int if_config(struct cardstate *cs, int *arg)
}
/*** the terminal driver ***/
-/* stolen from usbserial and some other tty drivers */
-
-static int if_open(struct tty_struct *tty, struct file *filp);
-static void if_close(struct tty_struct *tty, struct file *filp);
-static int if_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-static int if_write_room(struct tty_struct *tty);
-static int if_chars_in_buffer(struct tty_struct *tty);
-static void if_throttle(struct tty_struct *tty);
-static void if_unthrottle(struct tty_struct *tty);
-static void if_set_termios(struct tty_struct *tty, struct ktermios *old);
-static int if_tiocmget(struct tty_struct *tty);
-static int if_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int if_write(struct tty_struct *tty,
- const unsigned char *buf, int count);
-
-static const struct tty_operations if_ops = {
- .open = if_open,
- .close = if_close,
- .ioctl = if_ioctl,
- .write = if_write,
- .write_room = if_write_room,
- .chars_in_buffer = if_chars_in_buffer,
- .set_termios = if_set_termios,
- .throttle = if_throttle,
- .unthrottle = if_unthrottle,
- .tiocmget = if_tiocmget,
- .tiocmset = if_tiocmset,
-};
static int if_open(struct tty_struct *tty, struct file *filp)
{
@@ -164,7 +134,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
if (cs->port.count == 1) {
tty_port_tty_set(&cs->port, tty);
- tty->low_latency = 1;
+ cs->port.low_latency = 1;
}
mutex_unlock(&cs->mutex);
@@ -355,7 +325,7 @@ done:
static int if_write_room(struct tty_struct *tty)
{
struct cardstate *cs = tty->driver_data;
- int retval = -ENODEV;
+ int retval;
gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
@@ -498,6 +468,20 @@ out:
mutex_unlock(&cs->mutex);
}
+static const struct tty_operations if_ops = {
+ .open = if_open,
+ .close = if_close,
+ .ioctl = if_ioctl,
+ .write = if_write,
+ .write_room = if_write_room,
+ .chars_in_buffer = if_chars_in_buffer,
+ .set_termios = if_set_termios,
+ .throttle = if_throttle,
+ .unthrottle = if_unthrottle,
+ .tiocmget = if_tiocmget,
+ .tiocmset = if_tiocmset,
+};
+
/* wakeup tasklet for the write operation */
static void if_wake(unsigned long data)
@@ -562,16 +546,8 @@ void gigaset_if_free(struct cardstate *cs)
void gigaset_if_receive(struct cardstate *cs,
unsigned char *buffer, size_t len)
{
- struct tty_struct *tty = tty_port_tty_get(&cs->port);
-
- if (tty == NULL) {
- gig_dbg(DEBUG_IF, "receive on closed device");
- return;
- }
-
- tty_insert_flip_string(tty, buffer, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&cs->port, buffer, len);
+ tty_flip_buffer_push(&cs->port);
}
EXPORT_SYMBOL_GPL(gigaset_if_receive);
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index b305e6b2b8ee..ac4863c2ecbc 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -299,8 +299,8 @@ static void b1pciv4_remove(struct pci_dev *pdev)
#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
-static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int b1pci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
@@ -344,7 +344,7 @@ static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
return retval;
}
-static void __devexit b1pci_pci_remove(struct pci_dev *pdev)
+static void b1pci_pci_remove(struct pci_dev *pdev)
{
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
avmcard *card = pci_get_drvdata(pdev);
@@ -362,7 +362,7 @@ static struct pci_driver b1pci_pci_driver = {
.name = "b1pci",
.id_table = b1pci_pci_tbl,
.probe = b1pci_pci_probe,
- .remove = __devexit_p(b1pci_pci_remove),
+ .remove = b1pci_pci_remove,
};
static struct capi_driver capi_driver_b1pci = {
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 98f18812441d..1d7fc44e3eef 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1249,8 +1249,7 @@ err:
/* ------------------------------------------------------------- */
-static int __devinit c4_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
int nr = ent->driver_data;
int retval = 0;
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index cb9a30427bd2..2180b1685691 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -187,8 +187,7 @@ static char *t1pci_procinfo(struct capi_ctr *ctrl)
/* ------------------------------------------------------------- */
-static int __devinit t1pci_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
index 3942efbbfb58..a315a2914d70 100644
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ b/drivers/isdn/hardware/eicon/divacapi.h
@@ -422,11 +422,11 @@ struct _DIVA_CAPI_ADAPTER {
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* ISO7776 (X.75 SLP) modified to support V.42 bis compression */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 layer-2 protocol supporting V.42 bis compression */
@@ -1125,7 +1125,7 @@ extern word li_total_channels;
| Direction | word | Enable compression/decompression for |
| | | 0: All direction |
| | | 1: disable outgoing data |
- | | | 2: disable incomming data |
+ | | | 2: disable incoming data |
| | | 3: disable both direction (default) |
+---------------------+------+-----------------------------------------+
| Number of code | word | Parameter P1 of V.42bis in accordance |
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index ca6d276bb256..52377b4bf039 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -150,12 +150,12 @@ MODULE_DEVICE_TABLE(pci, divas_pci_tbl);
static int divas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static void __devexit divas_remove_one(struct pci_dev *pdev);
+static void divas_remove_one(struct pci_dev *pdev);
static struct pci_driver diva_pci_driver = {
.name = "divas",
.probe = divas_init_one,
- .remove = __devexit_p(divas_remove_one),
+ .remove = divas_remove_one,
.id_table = divas_pci_tbl,
};
@@ -688,8 +688,7 @@ static int __init divas_register_chrdev(void)
/* --------------------------------------------------------------------------
PCI driver section
-------------------------------------------------------------------------- */
-static int __devinit divas_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int divas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
void *pdiva = NULL;
u8 pci_latency;
@@ -749,7 +748,7 @@ static int __devinit divas_init_one(struct pci_dev *pdev,
return (0);
}
-static void __devexit divas_remove_one(struct pci_dev *pdev)
+static void divas_remove_one(struct pci_dev *pdev)
{
void *pdiva = pci_get_drvdata(pdev);
diff --git a/drivers/isdn/hardware/eicon/pc.h b/drivers/isdn/hardware/eicon/pc.h
index 889dc984bbca..329c0c26abfb 100644
--- a/drivers/isdn/hardware/eicon/pc.h
+++ b/drivers/isdn/hardware/eicon/pc.h
@@ -419,11 +419,11 @@ struct dual
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* x.75 with V.42bis */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 asynchronous mode supporting V.42bis compression */
#define LISTENER 27 /* Layer 2 to listen line */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index eadc1cd34a20..b8611e3e5e74 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -76,6 +76,7 @@ config MISDN_NETJET
tristate "Support for NETJet cards"
depends on MISDN
depends on PCI
+ depends on TTY
select MISDN_IPAC
select ISDN_HDLC
select ISDN_I4L
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index dceaec821b0e..292991c90c02 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1034,7 +1034,7 @@ release_card(struct fritzcard *card)
AVM_cnt--;
}
-static int __devinit
+static int
setup_instance(struct fritzcard *card)
{
int i, err;
@@ -1096,7 +1096,7 @@ error:
return err;
}
-static int __devinit
+static int
fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1130,7 +1130,7 @@ fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
fritz_remove_pci(struct pci_dev *pdev)
{
struct fritzcard *card = pci_get_drvdata(pdev);
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
pr_info("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id fcpci_ids[] __devinitdata = {
+static struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
@@ -1154,7 +1154,7 @@ MODULE_DEVICE_TABLE(pci, fcpci_ids);
static struct pci_driver fcpci_driver = {
.name = "fcpci",
.probe = fritzpci_probe,
- .remove = __devexit_p(fritz_remove_pci),
+ .remove = fritz_remove_pci,
.id_table = fcpci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index f02794203bb1..28543d795188 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5274,7 +5274,7 @@ free_card:
return ret_err;
}
-static void __devexit hfc_remove_pci(struct pci_dev *pdev)
+static void hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_multi *card = pci_get_drvdata(pdev);
u_long flags;
@@ -5351,7 +5351,7 @@ static const struct hm_map hfcm_map[] = {
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] __devinitdata = {
+static struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
@@ -5472,7 +5472,7 @@ hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static struct pci_driver hfcmultipci_driver = {
.name = "hfc_multi",
.probe = hfcmulti_probe,
- .remove = __devexit_p(hfc_remove_pci),
+ .remove = hfc_remove_pci,
.id_table = hfmultipci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 6e99d73563b8..a7e4939787c9 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2215,7 +2215,7 @@ static struct pci_device_id hfc_ids[] =
{},
};
-static int __devinit
+static int
hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -2246,7 +2246,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_pci *card = pci_get_drvdata(pdev);
@@ -2263,7 +2263,7 @@ hfc_remove_pci(struct pci_dev *pdev)
static struct pci_driver hfc_driver = {
.name = "hfcpci",
.probe = hfc_probe,
- .remove = __devexit_p(hfc_remove_pci),
+ .remove = hfc_remove_pci,
.id_table = hfc_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 631eb3fa63cf..c1493f4162fb 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -125,7 +125,7 @@ struct inf_hw {
#define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53
#define PCI_SUB_ID_SEDLBAUER 0x01
-static struct pci_device_id infineon_ids[] __devinitdata = {
+static struct pci_device_id infineon_ids[] = {
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20), INF_DIVA20 },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U), INF_DIVA20U },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201), INF_DIVA201 },
@@ -603,7 +603,7 @@ inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg)
return ret;
}
-static int __devinit
+static int
init_irq(struct inf_hw *hw)
{
int ret, cnt = 3;
@@ -662,7 +662,7 @@ release_io(struct inf_hw *hw)
}
}
-static int __devinit
+static int
setup_io(struct inf_hw *hw)
{
int err = 0;
@@ -896,7 +896,7 @@ release_card(struct inf_hw *card) {
inf_cnt--;
}
-static int __devinit
+static int
setup_instance(struct inf_hw *card)
{
int err;
@@ -1060,7 +1060,7 @@ static const struct inf_cinfo inf_card_info[] = {
}
};
-static const struct inf_cinfo * __devinit
+static const struct inf_cinfo *
get_card_info(enum inf_types typ)
{
const struct inf_cinfo *ci = inf_card_info;
@@ -1073,7 +1073,7 @@ get_card_info(enum inf_types typ)
return NULL;
}
-static int __devinit
+static int
inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1135,7 +1135,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
inf_remove(struct pci_dev *pdev)
{
struct inf_hw *card = pci_get_drvdata(pdev);
@@ -1149,7 +1149,7 @@ inf_remove(struct pci_dev *pdev)
static struct pci_driver infineon_driver = {
.name = "ISDN Infineon pci",
.probe = inf_probe,
- .remove = __devexit_p(inf_remove),
+ .remove = inf_remove,
.id_table = infineon_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 9bcade59eb73..8e2944784e00 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1008,7 +1008,7 @@ nj_setup(struct tiger_hw *card)
}
-static int __devinit
+static int
setup_instance(struct tiger_hw *card)
{
int i, err;
@@ -1059,7 +1059,7 @@ error:
return err;
}
-static int __devinit
+static int
nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1124,7 +1124,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
-static void __devexit nj_remove(struct pci_dev *pdev)
+static void nj_remove(struct pci_dev *pdev)
{
struct tiger_hw *card = pci_get_drvdata(pdev);
@@ -1137,7 +1137,7 @@ static void __devexit nj_remove(struct pci_dev *pdev)
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] __devinitdata = {
+static struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
@@ -1147,7 +1147,7 @@ MODULE_DEVICE_TABLE(pci, nj_pci_ids);
static struct pci_driver nj_driver = {
.name = "netjet",
.probe = nj_probe,
- .remove = __devexit_p(nj_remove),
+ .remove = nj_remove,
.id_table = nj_pci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 93f344d74e54..9815bb4eec9c 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -282,7 +282,7 @@ sfax_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
return err;
}
-static int __devinit
+static int
init_card(struct sfax_hw *sf)
{
int ret, cnt = 3;
@@ -321,7 +321,7 @@ init_card(struct sfax_hw *sf)
}
-static int __devinit
+static int
setup_speedfax(struct sfax_hw *sf)
{
u_long flags;
@@ -371,7 +371,7 @@ release_card(struct sfax_hw *card) {
sfax_cnt--;
}
-static int __devinit
+static int
setup_instance(struct sfax_hw *card)
{
const struct firmware *firmware;
@@ -451,7 +451,7 @@ error_fw:
return err;
}
-static int __devinit
+static int
sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -480,7 +480,7 @@ sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
sfax_remove_pci(struct pci_dev *pdev)
{
struct sfax_hw *card = pci_get_drvdata(pdev);
@@ -491,7 +491,7 @@ sfax_remove_pci(struct pci_dev *pdev)
pr_debug("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id sfaxpci_ids[] __devinitdata = {
+static struct pci_device_id sfaxpci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_SPEEDFAX_PYRAMID, PCI_SUB_ID_SEDLBAUER,
0, 0, (unsigned long) "Pyramid Speedfax + PCI"
@@ -507,7 +507,7 @@ MODULE_DEVICE_TABLE(pci, sfaxpci_ids);
static struct pci_driver sfaxpci_driver = {
.name = "speedfax+ pci",
.probe = sfaxpci_probe,
- .remove = __devexit_p(sfax_remove_pci),
+ .remove = sfax_remove_pci,
.id_table = sfaxpci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 335fe6455002..de69f6828c76 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1355,7 +1355,7 @@ error_setup:
return err;
}
-static int __devinit
+static int
w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1387,7 +1387,7 @@ w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
w6692_remove_pci(struct pci_dev *pdev)
{
struct w6692_hw *card = pci_get_drvdata(pdev);
@@ -1414,7 +1414,7 @@ MODULE_DEVICE_TABLE(pci, w6692_ids);
static struct pci_driver w6692_driver = {
.name = "w6692",
.probe = w6692_probe,
- .remove = __devexit_p(w6692_remove_pci),
+ .remove = w6692_remove_pci,
.id_table = w6692_ids,
};
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 70ecd0c19500..5313c9ea44dc 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -389,8 +389,8 @@ config HISAX_TELES_CS
comment "HiSax sub driver modules"
config HISAX_ST5481
- tristate "ST5481 USB ISDN modem (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ tristate "ST5481 USB ISDN modem"
+ depends on USB
select ISDN_HDLC
select CRC_CCITT
select BITREVERSE
@@ -399,20 +399,19 @@ config HISAX_ST5481
e.g. the BeWan Gazel 128 USB
config HISAX_HFCUSB
- tristate "HFC USB based ISDN modems (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ tristate "HFC USB based ISDN modems"
+ depends on USB
help
This enables the driver for HFC USB based ISDN modems.
config HISAX_HFC4S8S
- tristate "HFC-4S/8S based ISDN cards (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "HFC-4S/8S based ISDN cards"
help
This enables the driver for HFC-4S/8S based ISDN cards.
config HISAX_FRITZ_PCIPNP
- tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "AVM Fritz!Card PCI/PCIv2/PnP support"
+ depends on PCI
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 525471e776a7..1063babe1d3a 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -786,8 +786,7 @@ void Amd7930_init(struct IsdnCardState *cs)
}
}
-void __devinit
-setup_Amd7930(struct IsdnCardState *cs)
+void setup_Amd7930(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, Amd7930_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
index 2b74a40ad2a0..62f9c43e2377 100644
--- a/drivers/isdn/hisax/asuscom.c
+++ b/drivers/isdn/hisax/asuscom.c
@@ -295,7 +295,7 @@ Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id asus_ids[] __devinitdata = {
+static struct isapnp_device_id asus_ids[] = {
{ ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
(unsigned long) "Asus1688 PnP" },
@@ -311,12 +311,11 @@ static struct isapnp_device_id asus_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &asus_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &asus_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_asuscom(struct IsdnCard *card)
+int setup_asuscom(struct IsdnCard *card)
{
int bytecnt;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_a1.c b/drivers/isdn/hisax/avm_a1.c
index 402d489cbbf1..7dd74087ad72 100644
--- a/drivers/isdn/hisax/avm_a1.c
+++ b/drivers/isdn/hisax/avm_a1.c
@@ -177,8 +177,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_avm_a1(struct IsdnCard *card)
+int setup_avm_a1(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_a1p.c b/drivers/isdn/hisax/avm_a1p.c
index 39347198d643..bc52d54ff5e1 100644
--- a/drivers/isdn/hisax/avm_a1p.c
+++ b/drivers/isdn/hisax/avm_a1p.c
@@ -213,7 +213,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return 0;
}
-int __devinit setup_avm_a1_pcmcia(struct IsdnCard *card)
+int setup_avm_a1_pcmcia(struct IsdnCard *card)
{
u_char model, vers;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 979492d69dae..ee9b9a03cffa 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -718,7 +718,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit avm_setup_rest(struct IsdnCardState *cs)
+static int avm_setup_rest(struct IsdnCardState *cs)
{
u_int val, ver;
@@ -770,16 +770,16 @@ static int __devinit avm_setup_rest(struct IsdnCardState *cs)
#ifndef __ISAPNP__
-static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
+static int avm_pnp_setup(struct IsdnCardState *cs)
{
return (1); /* no-op: success */
}
#else
-static struct pnp_card *pnp_avm_c __devinitdata = NULL;
+static struct pnp_card *pnp_avm_c = NULL;
-static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
+static int avm_pnp_setup(struct IsdnCardState *cs)
{
struct pnp_dev *pnp_avm_d = NULL;
@@ -825,16 +825,16 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
#ifndef CONFIG_PCI
-static int __devinit avm_pci_setup(struct IsdnCardState *cs)
+static int avm_pci_setup(struct IsdnCardState *cs)
{
return (1); /* no-op: success */
}
#else
-static struct pci_dev *dev_avm __devinitdata = NULL;
+static struct pci_dev *dev_avm = NULL;
-static int __devinit avm_pci_setup(struct IsdnCardState *cs)
+static int avm_pci_setup(struct IsdnCardState *cs)
{
if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
@@ -867,8 +867,7 @@ static int __devinit avm_pci_setup(struct IsdnCardState *cs)
#endif /* CONFIG_PCI */
-int __devinit
-setup_avm_pcipnp(struct IsdnCard *card)
+int setup_avm_pcipnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index c644557ae614..4e676bcf8506 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -38,11 +38,11 @@ module_param(isdnprot, int, 0);
/*====================================================================*/
-static int avma1cs_config(struct pcmcia_device *link) __devinit;
+static int avma1cs_config(struct pcmcia_device *link);
static void avma1cs_release(struct pcmcia_device *link);
-static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit;
+static void avma1cs_detach(struct pcmcia_device *p_dev);
-static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
+static int avma1cs_probe(struct pcmcia_device *p_dev)
{
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
@@ -54,7 +54,7 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
return avma1cs_config(p_dev);
} /* avma1cs_attach */
-static void __devexit avma1cs_detach(struct pcmcia_device *link)
+static void avma1cs_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
avma1cs_release(link);
@@ -72,7 +72,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
}
-static int __devinit avma1cs_config(struct pcmcia_device *link)
+static int avma1cs_config(struct pcmcia_device *link)
{
int i = -1;
char devname[128];
@@ -156,7 +156,7 @@ static struct pcmcia_driver avma1cs_driver = {
.owner = THIS_MODULE,
.name = "avma1_cs",
.probe = avma1cs_probe,
- .remove = __devexit_p(avma1cs_detach),
+ .remove = avma1cs_detach,
.id_table = avma1cs_ids,
};
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index f6bf9c68892e..c360164bde1b 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -253,10 +253,8 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
- struct IsdnCardState *cs,
- u_int *found,
- u_int *pci_memaddr)
+static int a4t_pci_probe(struct pci_dev *dev_a4t, struct IsdnCardState *cs,
+ u_int *found, u_int *pci_memaddr)
{
u16 sub_sys;
u16 sub_vendor;
@@ -275,9 +273,8 @@ static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
return (-1); /* continue looping */
}
-static int __devinit a4t_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs,
- u_int pci_memaddr)
+static int a4t_cs_init(struct IsdnCard *card, struct IsdnCardState *cs,
+ u_int pci_memaddr)
{
I20_REGISTER_FILE *pI20_Regs;
@@ -323,10 +320,9 @@ static int __devinit a4t_cs_init(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_a4t __devinitdata = NULL;
+static struct pci_dev *dev_a4t = NULL;
-int __devinit
-setup_bkm_a4t(struct IsdnCard *card)
+int setup_bkm_a4t(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
index c9c98f071af6..dd663ea57ec6 100644
--- a/drivers/isdn/hisax/bkm_a8.c
+++ b/drivers/isdn/hisax/bkm_a8.c
@@ -255,8 +255,7 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit
-sct_alloc_io(u_int adr, u_int len)
+static int sct_alloc_io(u_int adr, u_int len)
{
if (!request_region(adr, len, "scitel")) {
printk(KERN_WARNING
@@ -267,15 +266,14 @@ sct_alloc_io(u_int adr, u_int len)
return (0);
}
-static struct pci_dev *dev_a8 __devinitdata = NULL;
-static u16 sub_vendor_id __devinitdata = 0;
-static u16 sub_sys_id __devinitdata = 0;
-static u_char pci_bus __devinitdata = 0;
-static u_char pci_device_fn __devinitdata = 0;
-static u_char pci_irq __devinitdata = 0;
+static struct pci_dev *dev_a8 = NULL;
+static u16 sub_vendor_id = 0;
+static u16 sub_sys_id = 0;
+static u_char pci_bus = 0;
+static u_char pci_device_fn = 0;
+static u_char pci_irq = 0;
-int __devinit
-setup_sct_quadro(struct IsdnCard *card)
+int setup_sct_quadro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b5edc0eeec06..bf04d2a3cf4a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -338,11 +338,11 @@ static int io[HISAX_MAX_CARDS] = { 0, };
#define IO0_IO1
#endif
#ifdef IO0_IO1
-static int io0[HISAX_MAX_CARDS] __devinitdata = { 0, };
-static int io1[HISAX_MAX_CARDS] __devinitdata = { 0, };
+static int io0[HISAX_MAX_CARDS] = { 0, };
+static int io1[HISAX_MAX_CARDS] = { 0, };
#endif
-static int irq[HISAX_MAX_CARDS] __devinitdata = { 0, };
-static int mem[HISAX_MAX_CARDS] __devinitdata = { 0, };
+static int irq[HISAX_MAX_CARDS] = { 0, };
+static int mem[HISAX_MAX_CARDS] = { 0, };
static char *id = HiSaxID;
MODULE_DESCRIPTION("ISDN4Linux: Driver for passive ISDN cards");
@@ -852,7 +852,7 @@ static int init_card(struct IsdnCardState *cs)
return 3;
}
-static int __devinit hisax_cs_setup_card(struct IsdnCard *card)
+static int hisax_cs_setup_card(struct IsdnCard *card)
{
int ret;
@@ -1171,12 +1171,8 @@ outf_cs:
return 0;
}
-/* Used from an exported function but calls __devinit functions.
- * Tell modpost not to warn (__ref)
- */
-static int __ref checkcard(int cardnr, char *id, int *busy_flag,
- struct module *lockowner,
- hisax_setup_func_t card_setup)
+static int checkcard(int cardnr, char *id, int *busy_flag,
+ struct module *lockowner, hisax_setup_func_t card_setup)
{
int ret;
struct IsdnCard *card = cards + cardnr;
@@ -1547,9 +1543,7 @@ static void __exit HiSax_exit(void)
printk(KERN_INFO "HiSax module removed\n");
}
-#ifdef CONFIG_HOTPLUG
-
-int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
+int hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
{
u_char ids[16];
int ret = -1;
@@ -1568,9 +1562,7 @@ int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *
error:
return ret;
}
-
EXPORT_SYMBOL(hisax_init_pcmcia);
-#endif
EXPORT_SYMBOL(HiSax_closecard);
@@ -1917,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
-static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
+static struct pci_device_id hisax_pci_tbl[] __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 62a2945fa7f2..8d0cf6e4dc00 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -904,7 +904,7 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit setup_diva_common(struct IsdnCardState *cs)
+static int setup_diva_common(struct IsdnCardState *cs)
{
int bytecnt;
u_char val;
@@ -997,7 +997,7 @@ static int __devinit setup_diva_common(struct IsdnCardState *cs)
#ifdef CONFIG_ISA
-static int __devinit setup_diva_isa(struct IsdnCard *card)
+static int setup_diva_isa(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1033,7 +1033,7 @@ static int __devinit setup_diva_isa(struct IsdnCard *card)
#else /* if !CONFIG_ISA */
-static int __devinit setup_diva_isa(struct IsdnCard *card)
+static int setup_diva_isa(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
@@ -1041,7 +1041,7 @@ static int __devinit setup_diva_isa(struct IsdnCard *card)
#endif /* CONFIG_ISA */
#ifdef __ISAPNP__
-static struct isapnp_device_id diva_ids[] __devinitdata = {
+static struct isapnp_device_id diva_ids[] = {
{ ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
(unsigned long) "Diva picola" },
@@ -1063,10 +1063,10 @@ static struct isapnp_device_id diva_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &diva_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &diva_ids[0];
+static struct pnp_card *pnp_c = NULL;
-static int __devinit setup_diva_isapnp(struct IsdnCard *card)
+static int setup_diva_isapnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
struct pnp_dev *pnp_d;
@@ -1141,7 +1141,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
#else /* if !ISAPNP */
-static int __devinit setup_diva_isapnp(struct IsdnCard *card)
+static int setup_diva_isapnp(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
@@ -1149,12 +1149,12 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
#endif /* ISAPNP */
#ifdef CONFIG_PCI
-static struct pci_dev *dev_diva __devinitdata = NULL;
-static struct pci_dev *dev_diva_u __devinitdata = NULL;
-static struct pci_dev *dev_diva201 __devinitdata = NULL;
-static struct pci_dev *dev_diva202 __devinitdata = NULL;
+static struct pci_dev *dev_diva = NULL;
+static struct pci_dev *dev_diva_u = NULL;
+static struct pci_dev *dev_diva201 = NULL;
+static struct pci_dev *dev_diva202 = NULL;
-static int __devinit setup_diva_pci(struct IsdnCard *card)
+static int setup_diva_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -1231,15 +1231,14 @@ static int __devinit setup_diva_pci(struct IsdnCard *card)
#else /* if !CONFIG_PCI */
-static int __devinit setup_diva_pci(struct IsdnCard *card)
+static int setup_diva_pci(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_diva(struct IsdnCard *card)
+int setup_diva(struct IsdnCard *card)
{
int rc, have_card = 0;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 64ba26a4afe6..1df6f9a56ca2 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -831,8 +831,7 @@ probe_elsa(struct IsdnCardState *cs)
return (CARD_portlist[i]);
}
-static int __devinit
-setup_elsa_isa(struct IsdnCard *card)
+static int setup_elsa_isa(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -902,7 +901,7 @@ setup_elsa_isa(struct IsdnCard *card)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id elsa_ids[] __devinitdata = {
+static struct isapnp_device_id elsa_ids[] = {
{ ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
(unsigned long) "Elsa QS1000" },
@@ -912,12 +911,11 @@ static struct isapnp_device_id elsa_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &elsa_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &elsa_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif /* __ISAPNP__ */
-static int __devinit
-setup_elsa_isapnp(struct IsdnCard *card)
+static int setup_elsa_isapnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -994,8 +992,7 @@ setup_elsa_isapnp(struct IsdnCard *card)
return (1);
}
-static void __devinit
-setup_elsa_pcmcia(struct IsdnCard *card)
+static void setup_elsa_pcmcia(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1027,11 +1024,10 @@ setup_elsa_pcmcia(struct IsdnCard *card)
}
#ifdef CONFIG_PCI
-static struct pci_dev *dev_qs1000 __devinitdata = NULL;
-static struct pci_dev *dev_qs3000 __devinitdata = NULL;
+static struct pci_dev *dev_qs1000 = NULL;
+static struct pci_dev *dev_qs3000 = NULL;
-static int __devinit
-setup_elsa_pci(struct IsdnCard *card)
+static int setup_elsa_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -1089,15 +1085,13 @@ setup_elsa_pci(struct IsdnCard *card)
#else
-static int __devinit
-setup_elsa_pci(struct IsdnCard *card)
+static int setup_elsa_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
-static int __devinit
-setup_elsa_common(struct IsdnCard *card)
+static int setup_elsa_common(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1212,8 +1206,7 @@ setup_elsa_common(struct IsdnCard *card)
return (1);
}
-int __devinit
-setup_elsa(struct IsdnCard *card)
+int setup_elsa(struct IsdnCard *card)
{
int rc;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index a8c4d3fc9a6d..ebe56918f6fc 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -62,9 +62,9 @@ MODULE_LICENSE("Dual MPL/GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int elsa_cs_config(struct pcmcia_device *link) __devinit;
+static int elsa_cs_config(struct pcmcia_device *link);
static void elsa_cs_release(struct pcmcia_device *link);
-static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
+static void elsa_cs_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -72,7 +72,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit elsa_cs_probe(struct pcmcia_device *link)
+static int elsa_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -90,7 +90,7 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
return elsa_cs_config(link);
} /* elsa_cs_attach */
-static void __devexit elsa_cs_detach(struct pcmcia_device *link)
+static void elsa_cs_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -126,7 +126,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
return -ENODEV;
}
-static int __devinit elsa_cs_config(struct pcmcia_device *link)
+static int elsa_cs_config(struct pcmcia_device *link)
{
int i;
IsdnCard_t icard;
@@ -210,7 +210,7 @@ static struct pcmcia_driver elsa_cs_driver = {
.owner = THIS_MODULE,
.name = "elsa_cs",
.probe = elsa_cs_probe,
- .remove = __devexit_p(elsa_cs_detach),
+ .remove = elsa_cs_detach,
.id_table = elsa_ids,
.suspend = elsa_suspend,
.resume = elsa_resume,
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b1e38b54ebac..e8d431a8302d 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,8 +300,7 @@ enpci_interrupt(int intno, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int en_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
if (pci_enable_device(dev_netjet))
return (0);
@@ -326,8 +325,7 @@ static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static void __devinit en_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static void en_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
@@ -350,8 +348,7 @@ static void __devinit en_cs_init(struct IsdnCard *card,
outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
}
-static int __devinit en_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int en_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -384,11 +381,10 @@ static int __devinit en_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
/* called by config.c */
-int __devinit
-setup_enternow_pci(struct IsdnCard *card)
+int setup_enternow_pci(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 4fef77562554..35c6df6534ec 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -483,8 +483,7 @@ error:
return 1;
}
-static int __devinit
-setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
+static int setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
{
printk(KERN_INFO "Gazel: ISA PnP card automatic recognition\n");
// we got an irq parameter, assume it is an ISA card
@@ -532,10 +531,9 @@ setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
}
#ifdef CONFIG_PCI
-static struct pci_dev *dev_tel __devinitdata = NULL;
+static struct pci_dev *dev_tel = NULL;
-static int __devinit
-setup_gazelpci(struct IsdnCardState *cs)
+static int setup_gazelpci(struct IsdnCardState *cs)
{
u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0;
u_char pci_irq = 0, found;
@@ -622,8 +620,7 @@ setup_gazelpci(struct IsdnCardState *cs)
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_gazel(struct IsdnCard *card)
+int setup_gazel(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index dea04de8e7ca..c49c294fc81e 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1497,7 +1497,7 @@ enable_pci_ports(hfc4s8s_hw *hw)
/* initialise the HFC-4s/8s hardware */
/* return 0 on success. */
/*************************************/
-static int __devinit
+static int
setup_instance(hfc4s8s_hw *hw)
{
int err = -EIO;
@@ -1585,7 +1585,7 @@ out:
/*****************************************/
/* PCI hotplug interface: probe new card */
/*****************************************/
-static int __devinit
+static int
hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1640,7 +1640,7 @@ out:
/**************************************/
/* PCI hotplug interface: remove card */
/**************************************/
-static void __devexit
+static void
hfc4s8s_remove(struct pci_dev *pdev)
{
hfc4s8s_hw *hw = pci_get_drvdata(pdev);
@@ -1662,7 +1662,7 @@ hfc4s8s_remove(struct pci_dev *pdev)
static struct pci_driver hfc4s8s_driver = {
.name = "hfc4s8s_l1",
.probe = hfc4s8s_probe,
- .remove = __devexit_p(hfc4s8s_remove),
+ .remove = hfc4s8s_remove,
.id_table = hfc4s8s_ids,
};
@@ -1688,14 +1688,6 @@ hfc4s8s_module_init(void)
}
printk(KERN_INFO "HFC-4S/8S: found %d cards\n", card_cnt);
-#if !defined(CONFIG_HOTPLUG)
- if (err == 0) {
- err = -ENODEV;
- pci_unregister_driver(&hfc4s8s_driver);
- goto out;
- }
-#endif
-
return 0;
out:
return (err);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index f60d4be58941..3ccd724ff8c2 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1632,9 +1632,9 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
/* this variable is used as card index when more than one cards are present */
-static struct pci_dev *dev_hfcpci __devinitdata = NULL;
+static struct pci_dev *dev_hfcpci = NULL;
-int __devinit
+int
setup_hfcpci(struct IsdnCard *card)
{
u_long flags;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4ec279ce052f..90f34ae2b80f 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1381,19 +1381,18 @@ hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] __devinitdata = {
+static struct isapnp_device_id hfc_ids[] = {
{ ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
(unsigned long) "Teles 16.3c2" },
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &hfc_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_hfcsx(struct IsdnCard *card)
+int setup_hfcsx(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index a5f048bd2bb3..394da646e97b 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -136,7 +136,7 @@ hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] __devinitdata = {
+static struct isapnp_device_id hfc_ids[] = {
{ ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
(unsigned long) "Acer P10" },
@@ -161,12 +161,11 @@ static struct isapnp_device_id hfc_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &hfc_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_hfcs(struct IsdnCard *card)
+int setup_hfcs(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index e4f47fe3f7fd..5e8a5d967162 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -70,7 +70,7 @@ static struct pci_device_id fcpci_ids[] = {
MODULE_DEVICE_TABLE(pci, fcpci_ids);
#ifdef CONFIG_PNP
-static struct pnp_device_id fcpnp_ids[] __devinitdata = {
+static struct pnp_device_id fcpnp_ids[] = {
{
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
@@ -712,7 +712,7 @@ static inline void fcpci_init(struct fritz_adapter *adapter)
// ----------------------------------------------------------------------
-static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
+static int fcpcipnp_setup(struct fritz_adapter *adapter)
{
u32 val = 0;
int retval;
@@ -825,7 +825,7 @@ err:
return retval;
}
-static void __devexit fcpcipnp_release(struct fritz_adapter *adapter)
+static void fcpcipnp_release(struct fritz_adapter *adapter)
{
DBG(1, "");
@@ -836,8 +836,7 @@ static void __devexit fcpcipnp_release(struct fritz_adapter *adapter)
// ----------------------------------------------------------------------
-static struct fritz_adapter * __devinit
-new_adapter(void)
+static struct fritz_adapter *new_adapter(void)
{
struct fritz_adapter *adapter;
struct hisax_b_if *b_if[2];
@@ -876,8 +875,7 @@ static void delete_adapter(struct fritz_adapter *adapter)
kfree(adapter);
}
-static int __devinit fcpci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int fcpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct fritz_adapter *adapter;
int retval;
@@ -917,7 +915,7 @@ err:
}
#ifdef CONFIG_PNP
-static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct fritz_adapter *adapter;
int retval;
@@ -959,7 +957,7 @@ err:
return retval;
}
-static void __devexit fcpnp_remove(struct pnp_dev *pdev)
+static void fcpnp_remove(struct pnp_dev *pdev)
{
struct fritz_adapter *adapter = pnp_get_drvdata(pdev);
@@ -973,12 +971,12 @@ static void __devexit fcpnp_remove(struct pnp_dev *pdev)
static struct pnp_driver fcpnp_driver = {
.name = "fcpnp",
.probe = fcpnp_probe,
- .remove = __devexit_p(fcpnp_remove),
+ .remove = fcpnp_remove,
.id_table = fcpnp_ids,
};
#endif
-static void __devexit fcpci_remove(struct pci_dev *pdev)
+static void fcpci_remove(struct pci_dev *pdev)
{
struct fritz_adapter *adapter = pci_get_drvdata(pdev);
@@ -990,7 +988,7 @@ static void __devexit fcpci_remove(struct pci_dev *pdev)
static struct pci_driver fcpci_driver = {
.name = "fcpci",
.probe = fcpci_probe,
- .remove = __devexit_p(fcpci_remove),
+ .remove = fcpci_remove,
.id_table = fcpci_ids,
};
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 7be762b17c70..db5321f6379b 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -673,8 +673,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
cs->writeisac(cs, ICC_MASK, 0xFF);
}
-void __devinit
-setup_icc(struct IsdnCardState *cs)
+void setup_icc(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, icc_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index bcd70a387307..a365ccc1c99c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -24,11 +24,11 @@
#define DBUSY_TIMER_VALUE 80
#define ARCOFI_USE 1
-static char *ISACVer[] __devinitdata =
+static char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
-void __devinit ISACVersion(struct IsdnCardState *cs, char *s)
+void ISACVersion(struct IsdnCardState *cs, char *s)
{
int val;
@@ -669,8 +669,7 @@ void clear_pending_isac_ints(struct IsdnCardState *cs)
cs->writeisac(cs, ISAC_MASK, 0xFF);
}
-void __devinit
-setup_isac(struct IsdnCardState *cs)
+void setup_isac(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, isac_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index c1530fe248c2..1399ddd4f6cb 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -194,11 +194,10 @@ isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
}
#ifdef __ISAPNP__
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_isurf(struct IsdnCard *card)
+int setup_isurf(struct IsdnCard *card)
{
int ver;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
index 5f299f82b801..7ae39f5e865d 100644
--- a/drivers/isdn/hisax/ix1_micro.c
+++ b/drivers/isdn/hisax/ix1_micro.c
@@ -209,7 +209,7 @@ ix1_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id itk_ids[] __devinitdata = {
+static struct isapnp_device_id itk_ids[] = {
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
(unsigned long) "ITK micro 2" },
@@ -219,13 +219,12 @@ static struct isapnp_device_id itk_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &itk_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &itk_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_ix1micro(struct IsdnCard *card)
+int setup_ix1micro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/mic.c b/drivers/isdn/hisax/mic.c
index 08a6b7fb17f7..93398676f78f 100644
--- a/drivers/isdn/hisax/mic.c
+++ b/drivers/isdn/hisax/mic.c
@@ -187,8 +187,7 @@ mic_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_mic(struct IsdnCard *card)
+int setup_mic(struct IsdnCard *card)
{
int bytecnt;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index 6569e0315cca..e4c33cfe3ef4 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -223,10 +223,10 @@ static int niccy_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit setup_niccy(struct IsdnCard *card)
+int setup_niccy(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
@@ -298,7 +298,7 @@ int __devinit setup_niccy(struct IsdnCard *card)
}
} else {
#ifdef CONFIG_PCI
- static struct pci_dev *niccy_dev __devinitdata;
+ static struct pci_dev *niccy_dev;
u_int pci_ioaddr;
cs->subtyp = 0;
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index f36ff69c07e1..32b4bbd18eb9 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,8 +148,7 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int njs_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
u32 cfg;
@@ -187,8 +186,7 @@ static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static int __devinit njs_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int njs_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
@@ -225,8 +223,7 @@ static int __devinit njs_cs_init(struct IsdnCard *card,
return 1; /* end loop */
}
-static int __devinit njs_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int njs_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -256,10 +253,9 @@ static int __devinit njs_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
-int __devinit
-setup_netjet_s(struct IsdnCard *card)
+int setup_netjet_s(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 333484aef425..4e8adbede361 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,8 +128,7 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int nju_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
if (pci_enable_device(dev_netjet))
return (0);
@@ -148,8 +147,7 @@ static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static int __devinit nju_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int nju_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
@@ -187,8 +185,7 @@ static int __devinit nju_cs_init(struct IsdnCard *card,
return 1; /* end loop */
}
-static int __devinit nju_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int nju_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -219,10 +216,9 @@ static int __devinit nju_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
-int __devinit
-setup_netjet_u(struct IsdnCard *card)
+int setup_netjet_u(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/s0box.c b/drivers/isdn/hisax/s0box.c
index 383c4e7ce50b..4e7d0aa227ad 100644
--- a/drivers/isdn/hisax/s0box.c
+++ b/drivers/isdn/hisax/s0box.c
@@ -210,8 +210,7 @@ S0Box_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_s0box(struct IsdnCard *card)
+int setup_s0box(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c
index 75dcae6d36e0..6b2d0eccdd56 100644
--- a/drivers/isdn/hisax/saphir.c
+++ b/drivers/isdn/hisax/saphir.c
@@ -240,8 +240,7 @@ saphir_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
-int __devinit
-setup_saphir(struct IsdnCard *card)
+int setup_saphir(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 1ee531b6be99..f16a47bcef48 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -517,7 +517,7 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id sedl_ids[] __devinitdata = {
+static struct isapnp_device_id sedl_ids[] = {
{ ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
(unsigned long) "Speed win" },
@@ -527,11 +527,10 @@ static struct isapnp_device_id sedl_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &sedl_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &sedl_ids[0];
+static struct pnp_card *pnp_c = NULL;
-static int __devinit
-setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
+static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
struct IsdnCardState *cs = card->cs;
struct pnp_dev *pnp_d;
@@ -591,18 +590,16 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
}
#else
-static int __devinit
-setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
+static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
return -1;
}
#endif /* __ISAPNP__ */
#ifdef CONFIG_PCI
-static struct pci_dev *dev_sedl __devinitdata = NULL;
+static struct pci_dev *dev_sedl = NULL;
-static int __devinit
-setup_sedlbauer_pci(struct IsdnCard *card)
+static int setup_sedlbauer_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u16 sub_vendor_id, sub_id;
@@ -667,16 +664,14 @@ setup_sedlbauer_pci(struct IsdnCard *card)
#else
-static int __devinit
-setup_sedlbauer_pci(struct IsdnCard *card)
+static int setup_sedlbauer_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_sedlbauer(struct IsdnCard *card)
+int setup_sedlbauer(struct IsdnCard *card)
{
int bytecnt = 8, ver, val, rc;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index f0dfc0c976eb..90f81291641b 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -62,10 +62,10 @@ MODULE_LICENSE("Dual MPL/GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int sedlbauer_config(struct pcmcia_device *link) __devinit;
+static int sedlbauer_config(struct pcmcia_device *link);
static void sedlbauer_release(struct pcmcia_device *link);
-static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
+static void sedlbauer_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -73,7 +73,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit sedlbauer_probe(struct pcmcia_device *link)
+static int sedlbauer_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -90,7 +90,7 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
return sedlbauer_config(link);
} /* sedlbauer_attach */
-static void __devexit sedlbauer_detach(struct pcmcia_device *link)
+static void sedlbauer_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
@@ -110,7 +110,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev, void *priv_data)
return pcmcia_request_io(p_dev);
}
-static int __devinit sedlbauer_config(struct pcmcia_device *link)
+static int sedlbauer_config(struct pcmcia_device *link)
{
int ret;
IsdnCard_t icard;
@@ -201,7 +201,7 @@ static struct pcmcia_driver sedlbauer_driver = {
.owner = THIS_MODULE,
.name = "sedlbauer_cs",
.probe = sedlbauer_probe,
- .remove = __devexit_p(sedlbauer_detach),
+ .remove = sedlbauer_detach,
.id_table = sedlbauer_ids,
.suspend = sedlbauer_suspend,
.resume = sedlbauer_resume,
diff --git a/drivers/isdn/hisax/sportster.c b/drivers/isdn/hisax/sportster.c
index 1267298ef551..18cee6360d0a 100644
--- a/drivers/isdn/hisax/sportster.c
+++ b/drivers/isdn/hisax/sportster.c
@@ -183,8 +183,7 @@ Sportster_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit
-get_io_range(struct IsdnCardState *cs)
+static int get_io_range(struct IsdnCardState *cs)
{
int i, j, adr;
@@ -208,8 +207,7 @@ get_io_range(struct IsdnCardState *cs)
}
}
-int __devinit
-setup_sportster(struct IsdnCard *card)
+int setup_sportster(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c
index fa329e27cc5b..bf647545c70c 100644
--- a/drivers/isdn/hisax/teleint.c
+++ b/drivers/isdn/hisax/teleint.c
@@ -259,8 +259,7 @@ TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_TeleInt(struct IsdnCard *card)
+int setup_TeleInt(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/teles0.c b/drivers/isdn/hisax/teles0.c
index 49b4a26f91e0..ce9eabdd2f6e 100644
--- a/drivers/isdn/hisax/teles0.c
+++ b/drivers/isdn/hisax/teles0.c
@@ -263,8 +263,7 @@ Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_teles0(struct IsdnCard *card)
+int setup_teles0(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index 220b919fafc3..38fb2c1a3f0f 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -253,7 +253,7 @@ Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
#ifdef __ISAPNP__
-static struct isapnp_device_id teles_ids[] __devinitdata = {
+static struct isapnp_device_id teles_ids[] = {
{ ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
(unsigned long) "Teles 16.3 PnP" },
@@ -266,12 +266,11 @@ static struct isapnp_device_id teles_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &teles_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &teles_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_teles3(struct IsdnCard *card)
+int setup_teles3(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 4deac451807c..f2476ffb04fd 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -43,9 +43,9 @@ MODULE_LICENSE("GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int teles_cs_config(struct pcmcia_device *link) __devinit;
+static int teles_cs_config(struct pcmcia_device *link);
static void teles_cs_release(struct pcmcia_device *link);
-static void teles_detach(struct pcmcia_device *p_dev) __devexit;
+static void teles_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -53,7 +53,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit teles_probe(struct pcmcia_device *link)
+static int teles_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -72,7 +72,7 @@ static int __devinit teles_probe(struct pcmcia_device *link)
return teles_cs_config(link);
} /* teles_attach */
-static void __devexit teles_detach(struct pcmcia_device *link)
+static void teles_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -108,7 +108,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
return -ENODEV;
}
-static int __devinit teles_cs_config(struct pcmcia_device *link)
+static int teles_cs_config(struct pcmcia_device *link)
{
int i;
IsdnCard_t icard;
@@ -192,7 +192,7 @@ static struct pcmcia_driver teles_cs_driver = {
.owner = THIS_MODULE,
.name = "teles_cs",
.probe = teles_probe,
- .remove = __devexit_p(teles_detach),
+ .remove = teles_detach,
.id_table = teles_ids,
.suspend = teles_suspend,
.resume = teles_resume,
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index 9c002c9dc771..f6ab63aa6995 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -283,10 +283,9 @@ TelesPCI_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static struct pci_dev *dev_tel __devinitdata = NULL;
+static struct pci_dev *dev_tel = NULL;
-int __devinit
-setup_telespci(struct IsdnCard *card)
+int setup_telespci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 0f0d094af85b..d8cac6935818 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -991,10 +991,9 @@ w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg)
static int id_idx;
-static struct pci_dev *dev_w6692 __devinitdata = NULL;
+static struct pci_dev *dev_w6692 = NULL;
-int __devinit
-setup_w6692(struct IsdnCard *card)
+int setup_w6692(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b61bbb4bb52b..0db2f7506250 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -56,8 +56,8 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
/* is assumed and the module will not be kept in memory. */
/****************************************************************************/
-static int __devinit hysdn_pci_init_one(struct pci_dev *akt_pcidev,
- const struct pci_device_id *ent)
+static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
+ const struct pci_device_id *ent)
{
hysdn_card *card;
int rc;
@@ -109,7 +109,7 @@ err_out:
return rc;
}
-static void __devexit hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
+static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
{
hysdn_card *card = pci_get_drvdata(akt_pcidev);
@@ -147,7 +147,7 @@ static struct pci_driver hysdn_pci_driver = {
.name = "hysdn",
.id_table = hysdn_pci_tbl,
.probe = hysdn_pci_init_one,
- .remove = __devexit_p(hysdn_pci_remove_one),
+ .remove = hysdn_pci_remove_one,
};
static int hysdn_have_procfs;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e2a945ee9f05..b87d9e577be2 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -876,7 +876,7 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
* of the mapping (di,ch)<->minor, happen during the sleep? --he
*/
int
-isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
+isdn_readbchan_tty(int di, int channel, struct tty_port *port, int cisco_hack)
{
int count;
int count_pull;
@@ -891,7 +891,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
return 0;
- len = tty_buffer_request_room(tty, dev->drv[di]->rcvcount[channel]);
+ len = tty_buffer_request_room(port, dev->drv[di]->rcvcount[channel]);
if (len == 0)
return len;
@@ -912,7 +912,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
while ((count_pull < skb->len) && (len > 0)) {
/* push every character but the last to the tty buffer directly */
if (count_put)
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
len--;
if (dev->drv[di]->DLEflag & DLEmask) {
last = DLE;
@@ -940,7 +940,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
}
count_put = count_pull;
if (count_put > 1)
- tty_insert_flip_string(tty, skb->data, count_put - 1);
+ tty_insert_flip_string(port, skb->data, count_put - 1);
last = skb->data[count_put - 1];
len -= count_put;
#ifdef CONFIG_ISDN_AUDIO
@@ -952,16 +952,16 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
* Now we can dequeue it.
*/
if (cisco_hack)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
#ifdef CONFIG_ISDN_AUDIO
ISDN_AUDIO_SKB_LOCK(skb) = 0;
#endif
skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
dev_kfree_skb(skb);
} else {
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
/* Not yet emptied this buff, so it
* must stay in the queue, for further calls
* but we pull off the data we got until now.
diff --git a/drivers/isdn/i4l/isdn_common.h b/drivers/isdn/i4l/isdn_common.h
index 9a471f62e1d4..2260ef07ab9c 100644
--- a/drivers/isdn/i4l/isdn_common.h
+++ b/drivers/isdn/i4l/isdn_common.h
@@ -37,7 +37,7 @@ extern void isdn_timer_ctrl(int tf, int onoff);
extern void isdn_unexclusive_channel(int di, int ch);
extern int isdn_getnum(char **);
extern int isdn_readbchan(int, int, u_char *, u_char *, int, wait_queue_head_t *);
-extern int isdn_readbchan_tty(int, int, struct tty_struct *, int);
+extern int isdn_readbchan_tty(int, int, struct tty_port *, int);
extern int isdn_get_free_channel(int, int, int, int, int, char *);
extern int isdn_writebuf_skb_stub(int, int, int, struct sk_buff *);
extern int register_isdn(isdn_if *i);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index e09dc8a5e743..d8a7d8323414 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -60,18 +60,14 @@ static int si2bit[8] =
static int
isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
{
+ struct tty_port *port = &info->port;
int c;
int len;
- struct tty_struct *tty;
char last;
if (!info->online)
return 0;
- tty = info->port.tty;
- if (!tty)
- return 0;
-
if (!(info->mcr & UART_MCR_RTS))
return 0;
@@ -81,7 +77,7 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
#endif
;
- c = tty_buffer_request_room(tty, len);
+ c = tty_buffer_request_room(port, len);
if (c < len)
return 0;
@@ -91,25 +87,25 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
unsigned char *dp = skb->data;
while (--l) {
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
- tty_insert_flip_char(tty, *dp++, 0);
+ tty_insert_flip_char(port, DLE, 0);
+ tty_insert_flip_char(port, *dp++, 0);
}
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
+ tty_insert_flip_char(port, DLE, 0);
last = *dp;
} else {
#endif
if (len > 1)
- tty_insert_flip_string(tty, skb->data, len - 1);
+ tty_insert_flip_string(port, skb->data, len - 1);
last = skb->data[len - 1];
#ifdef CONFIG_ISDN_AUDIO
}
#endif
if (info->emu.mdmreg[REG_CPPP] & BIT_CPPP)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
+ tty_flip_buffer_push(port);
kfree_skb(skb);
return 1;
@@ -126,7 +122,6 @@ isdn_tty_readmodem(void)
int midx;
int i;
int r;
- struct tty_struct *tty;
modem_info *info;
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
@@ -144,20 +139,21 @@ isdn_tty_readmodem(void)
if ((info->vonline & 1) && (info->emu.vpar[1]))
isdn_audio_eval_silence(info);
#endif
- tty = info->port.tty;
- if (tty) {
- if (info->mcr & UART_MCR_RTS) {
- /* CISCO AsyncPPP Hack */
- if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 0);
- else
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 1);
- if (r)
- tty_flip_buffer_push(tty);
- } else
- r = 1;
+ if (info->mcr & UART_MCR_RTS) {
+ /* CISCO AsyncPPP Hack */
+ if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 0);
+ else
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 1);
+ if (r)
+ tty_flip_buffer_push(&info->port);
} else
r = 1;
+
if (r) {
info->rcvsched = 0;
resched = 1;
@@ -2229,7 +2225,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
void
isdn_tty_at_cout(char *msg, modem_info *info)
{
- struct tty_struct *tty;
+ struct tty_port *port = &info->port;
atemu *m = &info->emu;
char *p;
char c;
@@ -2246,15 +2242,14 @@ isdn_tty_at_cout(char *msg, modem_info *info)
l = strlen(msg);
spin_lock_irqsave(&info->readlock, flags);
- tty = info->port.tty;
- if ((info->port.flags & ASYNC_CLOSING) || (!tty)) {
+ if (port->flags & ASYNC_CLOSING) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
/* use queue instead of direct, if online and */
/* data is in queue or buffer is full */
- if (info->online && ((tty_buffer_request_room(tty, l) < l) ||
+ if (info->online && ((tty_buffer_request_room(port, l) < l) ||
!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) {
skb = alloc_skb(l, GFP_ATOMIC);
if (!skb) {
@@ -2285,7 +2280,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
if (skb) {
*sp++ = c;
} else {
- if (tty_insert_flip_char(tty, c, TTY_NORMAL) == 0)
+ if (tty_insert_flip_char(port, c, TTY_NORMAL) == 0)
break;
}
}
@@ -2299,7 +2294,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
} else {
spin_unlock_irqrestore(&info->readlock, flags);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
}
diff --git a/drivers/isdn/i4l/isdn_x25iface.h b/drivers/isdn/i4l/isdn_x25iface.h
index 0b26e3b336e7..ca08e082cf7c 100644
--- a/drivers/isdn/i4l/isdn_x25iface.h
+++ b/drivers/isdn/i4l/isdn_x25iface.h
@@ -19,7 +19,6 @@
#endif
#include <linux/skbuff.h>
-#include <linux/wanrouter.h>
#include <linux/isdn.h>
#include <linux/concap.h>
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index c401634c00ec..da30c5cb9609 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -140,7 +140,6 @@ static struct device_attribute mISDN_dev_attrs[] = {
{}
};
-#ifdef CONFIG_HOTPLUG
static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -153,7 +152,6 @@ static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#endif
static void mISDN_class_release(struct class *cls)
{
@@ -163,22 +161,20 @@ static void mISDN_class_release(struct class *cls)
static struct class mISDN_class = {
.name = "mISDN",
.owner = THIS_MODULE,
-#ifdef CONFIG_HOTPLUG
.dev_uevent = mISDN_uevent,
-#endif
.dev_attrs = mISDN_dev_attrs,
.dev_release = mISDN_dev_release,
.class_release = mISDN_class_release,
};
static int
-_get_mdevice(struct device *dev, void *id)
+_get_mdevice(struct device *dev, const void *id)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return 0;
- if (mdev->id != *(u_int *)id)
+ if (mdev->id != *(const u_int *)id)
return 0;
return 1;
}
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 28c99c623bcd..22b720ec80cb 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1217,8 +1217,7 @@ static void __exit dsp_cleanup(void)
{
mISDN_unregister_Bprotocol(&DSP);
- if (timer_pending(&dsp_spl_tl))
- del_timer(&dsp_spl_tl);
+ del_timer_sync(&dsp_spl_tl);
if (!list_empty(&dsp_ilist)) {
printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f8e405c383a0..2c0d2c2bf946 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -689,7 +689,7 @@ l1oip_socket_thread(void *data)
hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
- /* bind to incomming port */
+ /* bind to incoming port */
if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 5f21f629b7ae..deda591f70b9 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/mISDNif.h>
#include <linux/kthread.h>
+#include <linux/sched.h>
#include "core.h"
static u_int *debug;
@@ -202,6 +203,9 @@ static int
mISDNStackd(void *data)
{
struct mISDNstack *st = data;
+#ifdef MISDN_MSG_STATS
+ cputime_t utime, stime;
+#endif
int err = 0;
sigfillset(&current->blocked);
@@ -303,9 +307,10 @@ mISDNStackd(void *data)
"msg %d sleep %d stopped\n",
dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
st->stopped_cnt);
+ task_cputime(st->thread, &utime, &stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
- dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
+ dev_name(&st->dev->dev), utime, stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b58bc8a14b9c..4469b441b785 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -154,7 +154,7 @@ config LEDS_HP6XX
config LEDS_PCA9532
tristate "LED driver for PCA9532 dimmer"
depends on LEDS_CLASS
- depends on I2C && INPUT && EXPERIMENTAL
+ depends on I2C && INPUT
help
This option enables support for NXP pca9532
LED controller. It is generally only useful
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 48cce18e9d6d..a20752f562bc 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -211,7 +211,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
led_trigger_set_default(led_cdev);
#endif
- printk(KERN_DEBUG "Registered led device: %s\n",
+ dev_dbg(parent, "Registered led device: %s\n",
led_cdev->name);
return 0;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 262eb4193710..3c972b2f9893 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -166,6 +166,19 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig)
+{
+ /* new name must be on a temporary string to prevent races */
+ BUG_ON(name == trig->name);
+
+ down_write(&triggers_list_lock);
+ /* this assumes that trig->name was originaly allocated to
+ * non constant storage */
+ strcpy((char *)trig->name, name);
+ up_write(&triggers_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_trigger_rename_static);
+
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trig)
@@ -300,13 +313,13 @@ void led_trigger_register_simple(const char *name, struct led_trigger **tp)
if (err < 0) {
kfree(trig);
trig = NULL;
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (%d)\n", name, err);
+ pr_warn("LED trigger %s failed to register (%d)\n",
+ name, err);
}
- } else
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (no memory)\n", name);
-
+ } else {
+ pr_warn("LED trigger %s failed to register (no memory)\n",
+ name);
+ }
*tp = trig;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index b7e8cc0957fc..6be2edd41173 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -165,15 +165,13 @@ static int pm860x_led_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "control");
if (!res) {
dev_err(&pdev->dev, "No REG resource for control\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_control = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "blink");
if (!res) {
dev_err(&pdev->dev, "No REG resource for blink\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_blink = res->start;
memset(data->name, 0, MFD_NAME_SIZE);
@@ -224,9 +222,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
}
pm860x_led_set(&data->cdev, 0);
return 0;
-out:
- devm_kfree(&pdev->dev, data);
- return ret;
}
static int pm860x_led_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index dcd9128a51a9..e8072abe76e5 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -5,10 +5,10 @@
*
* Loosely derived from leds-da903x:
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* Licensed under the GPL-2 or later.
*/
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 9abe8de40edd..851517030cc1 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -26,8 +26,8 @@
#define BD2802_LED_OFFSET 0xa
#define BD2802_COLOR_OFFSET 0x3
-#define BD2802_REG_CLKSETUP 0x00
-#define BD2802_REG_CONTROL 0x01
+#define BD2802_REG_CLKSETUP 0x00
+#define BD2802_REG_CONTROL 0x01
#define BD2802_REG_HOURSETUP 0x02
#define BD2802_REG_CURRENT1SETUP 0x03
#define BD2802_REG_CURRENT2SETUP 0x04
@@ -93,7 +93,7 @@ struct bd2802_led {
* In ADF mode, user can set registers of BD2802GU directly,
* therefore BD2802GU doesn't enter reset state.
*/
- int adf_on;
+ int adf_on;
enum led_ids led_id;
enum led_colors color;
@@ -328,7 +328,7 @@ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
@@ -492,7 +492,7 @@ static ssize_t bd2802_store_##attr_name(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index b02547052e12..6a8405df76a3 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@ -26,7 +27,7 @@ static struct platform_device *pdev;
static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
{
- printk(KERN_INFO KBUILD_MODNAME ": '%s' found\n", id->ident);
+ pr_info("'%s' found\n", id->ident);
return 1;
}
@@ -135,8 +136,7 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev,
status = 0;
} else {
- printk(KERN_DEBUG KBUILD_MODNAME
- ": clevo_mail_led_blink(..., %lu, %lu),"
+ pr_debug("clevo_mail_led_blink(..., %lu, %lu),"
" returning -EINVAL (unsupported)\n",
*delay_on, *delay_off);
}
@@ -183,7 +183,7 @@ static int __init clevo_mail_led_init(void)
count = dmi_check_system(clevo_mail_led_dmi_table);
} else {
count = 1;
- printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
+ pr_err("Skipping DMI detection. "
"If the driver works on your hardware please "
"report model and the output of dmidecode in tracker "
"at http://sourceforge.net/projects/clevo-mailled/\n");
@@ -197,8 +197,7 @@ static int __init clevo_mail_led_init(void)
error = platform_driver_probe(&clevo_mail_led_driver,
clevo_mail_led_probe);
if (error) {
- printk(KERN_ERR KBUILD_MODNAME
- ": Can't probe platform driver\n");
+ pr_err("Can't probe platform driver\n");
platform_device_unregister(pdev);
}
} else
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index ffa99303b629..8abcb66db01c 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -43,7 +43,7 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
@@ -52,12 +52,11 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
retval = led_classdev_register(&pdev->dev, &qube_front_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
return 0;
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
@@ -67,10 +66,8 @@ static int cobalt_qube_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&qube_front_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index d52e47de396f..001088b31373 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -85,13 +85,13 @@ static int cobalt_raq_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
retval = led_classdev_register(&pdev->dev, &raq_power_off_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
retval = led_classdev_register(&pdev->dev, &raq_web_led);
if (retval)
@@ -102,8 +102,7 @@ static int cobalt_raq_led_probe(struct platform_device *pdev)
err_unregister:
led_classdev_unregister(&raq_power_off_led);
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
@@ -114,10 +113,8 @@ static int cobalt_raq_led_remove(struct platform_device *pdev)
led_classdev_unregister(&raq_power_off_led);
led_classdev_unregister(&raq_web_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 6f31b776765b..c263a21db829 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -2,10 +2,10 @@
* LEDs driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -85,7 +85,7 @@ static void da903x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct da903x_led *led;
-
+
led = container_of(led_cdev, struct da903x_led, cdev);
led->new_brightness = value;
schedule_work(&led->work);
@@ -156,7 +156,7 @@ static struct platform_driver da903x_led_driver = {
module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
-MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
- "Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-led");
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index b9053fa6e253..b4d5a44cc41b 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -20,8 +20,8 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/io.h>
#define FSG_LED_WLAN_BIT 0
#define FSG_LED_WAN_BIT 1
@@ -149,11 +149,10 @@ static int fsg_led_probe(struct platform_device *pdev)
int ret;
/* Map the LED chip select address space */
- latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address) {
- ret = -ENOMEM;
- goto failremap;
- }
+ latch_address = (unsigned short *) devm_ioremap(&pdev->dev,
+ IXP4XX_EXP_BUS_BASE(2), 512);
+ if (!latch_address)
+ return -ENOMEM;
latch_value = 0xffff;
*latch_address = latch_value;
@@ -195,8 +194,6 @@ static int fsg_led_probe(struct platform_device *pdev)
failwan:
led_classdev_unregister(&fsg_wlan_led);
failwlan:
- iounmap(latch_address);
- failremap:
return ret;
}
@@ -210,8 +207,6 @@ static int fsg_led_remove(struct platform_device *pdev)
led_classdev_unregister(&fsg_sync_led);
led_classdev_unregister(&fsg_ring_led);
- iounmap(latch_address);
-
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 291c20797ca0..a0d931bcb37c 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/err.h>
struct gpio_led_data {
struct led_classdev cdev;
@@ -101,15 +102,11 @@ static int create_gpio_led(const struct gpio_led *template,
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
+ dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -129,20 +126,20 @@ static int create_gpio_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ (led_dat->active_low ^ state) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ template->name);
if (ret < 0)
- goto err;
-
+ return ret;
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_gpio_led(struct gpio_led_data *led)
@@ -151,7 +148,6 @@ static void delete_gpio_led(struct gpio_led_data *led)
return;
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
struct gpio_leds_priv {
@@ -176,12 +172,16 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
/* count LEDs in this device, so we know how much to allocate */
count = of_get_child_count(np);
if (!count)
- return NULL;
+ return ERR_PTR(-ENODEV);
+
+ for_each_child_of_node(np, child)
+ if (of_get_gpio(child, 0) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(count),
GFP_KERNEL);
if (!priv)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for_each_child_of_node(np, child) {
struct gpio_led led = {};
@@ -216,7 +216,7 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static const struct of_device_id of_gpio_leds_match[] = {
@@ -226,7 +226,7 @@ static const struct of_device_id of_gpio_leds_match[] = {
#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_OF_GPIO */
@@ -264,8 +264,8 @@ static int gpio_led_probe(struct platform_device *pdev)
}
} else {
priv = gpio_leds_create_of(pdev);
- if (!priv)
- return -ENODEV;
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index b13ce0371918..65d79284c488 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -408,10 +408,10 @@ static ssize_t lm3556_indicator_pattern_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
+static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
static const struct regmap_config lm355x_regmap = {
.reg_bits = 8,
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 215a7c1e56cc..07b3dde90613 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -201,13 +201,13 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(torch_pin, 0666, NULL, lm3642_torch_pin_store);
+static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
static void lm3642_deferred_torch_brightness_set(struct work_struct *work)
{
@@ -258,13 +258,13 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(strobe_pin, 0666, NULL, lm3642_strobe_pin_store);
+static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
static void lm3642_deferred_strobe_brightness_set(struct work_struct *work)
{
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index b081f67e1dea..0c4386e656c1 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -86,7 +86,7 @@ static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
tmp = i2c_smbus_read_byte_data(client, reg);
if (tmp < 0)
- return -EINVAL;
+ return tmp;
*value = tmp;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 966f158a07db..cb8a5220200b 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -152,7 +152,7 @@ static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -616,7 +616,7 @@ static ssize_t store_led_pattern(struct device *dev,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
@@ -788,10 +788,17 @@ static int lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
+ if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&client->dev,
+ "unexpected data in register (expected 0x%x got 0x%x)\n",
+ LP5521_REG_R_CURR_DEFAULT, buf);
+ ret = -EINVAL;
+ goto fail2;
+ }
usleep_range(10000, 20000);
ret = lp5521_detect(client);
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 7e304b7ff779..7f5be8948cde 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -171,7 +171,7 @@ static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
s32 ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -248,7 +248,10 @@ static int lp5523_configure(struct i2c_client *client)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp5523_read(client, LP5523_REG_STATUS, &status);
+ ret = lp5523_read(client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
status &= LP5523_ENG_STATUS_MASK;
if (status == LP5523_ENG_STATUS_MASK) {
@@ -464,10 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev,
LP5523_EN_LEDTEST | 16);
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ if (ret < 0)
+ goto fail;
+
vdd--; /* There may be some fluctuation in measurement */
for (i = 0; i < LP5523_LEDS; i++) {
@@ -489,9 +498,14 @@ static ssize_t lp5523_selftest(struct device *dev,
/* ADC conversion time is 2.7 ms typically */
usleep_range(3000, 6000);
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000);/* Was not ready. Wait. */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ if (ret < 0)
+ goto fail;
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
pos += sprintf(buf + pos, "LED %d FAIL\n", i);
@@ -696,7 +710,7 @@ static ssize_t store_current(struct device *dev,
ssize_t ret;
unsigned long curr;
- if (strict_strtoul(buf, 0, &curr))
+ if (kstrtoul(buf, 0, &curr))
return -EINVAL;
if (curr > led->max_current)
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 34b3ba4376fc..c9b9e1fec587 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -89,15 +89,11 @@ static int create_lt3593_led(const struct gpio_led *template,
/* skip leds on GPIOs that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
+ dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
KBUILD_MODNAME, template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -110,24 +106,21 @@ static int create_lt3593_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ GPIOF_DIR_OUT | state, template->name);
if (ret < 0)
- goto err;
+ return ret;
INIT_WORK(&led_dat->work, lt3593_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
- printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
+ dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
KBUILD_MODNAME, template->name, template->gpio);
return 0;
-
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_lt3593_led(struct lt3593_led_data *led)
@@ -137,7 +130,6 @@ static void delete_lt3593_led(struct lt3593_led_data *led)
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
static int lt3593_led_probe(struct platform_device *pdev)
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index f117f7326c5b..27d06c528246 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/nsc_gpio.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 58a800b17dc7..c61c5ebcc08e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -243,7 +243,7 @@ static ssize_t netxbig_led_sata_store(struct device *dev,
int mode_val;
int ret;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 7b75affb308a..d978171c25b4 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -150,7 +150,7 @@ static ssize_t ns2_led_sata_store(struct device *dev,
unsigned long enable;
enum ns2_led_modes mode;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
@@ -192,29 +192,22 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
int ret;
enum ns2_led_modes mode;
- ret = gpio_request(template->cmd, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->cmd,
- gpio_get_value(template->cmd));
- if (ret)
- gpio_free(template->cmd);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->cmd,
+ GPIOF_DIR_OUT | gpio_get_value(template->cmd),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
template->name);
+ return ret;
}
- ret = gpio_request(template->slow, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->slow,
- gpio_get_value(template->slow));
- if (ret)
- gpio_free(template->slow);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->slow,
+ GPIOF_DIR_OUT | gpio_get_value(template->slow),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
template->name);
- goto err_free_cmd;
+ return ret;
}
rwlock_init(&led_dat->rw_lock);
@@ -229,7 +222,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = ns2_led_get_mode(led_dat, &mode);
if (ret < 0)
- goto err_free_slow;
+ return ret;
/* Set LED initial state. */
led_dat->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
@@ -238,7 +231,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0)
- goto err_free_slow;
+ return ret;
ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata);
if (ret < 0)
@@ -248,11 +241,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
err_free_cdev:
led_classdev_unregister(&led_dat->cdev);
-err_free_slow:
- gpio_free(led_dat->slow);
-err_free_cmd:
- gpio_free(led_dat->cmd);
-
return ret;
}
@@ -260,8 +248,6 @@ static void delete_ns2_led(struct ns2_led_data *led_dat)
{
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
- gpio_free(led_dat->cmd);
- gpio_free(led_dat->slow);
}
#ifdef CONFIG_OF_GPIO
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 706791af8fc8..edf485b773c8 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -277,7 +277,7 @@ static int pca955x_probe(struct i2c_client *client,
return -ENODEV;
}
- printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
+ dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index e51ff7a3cd88..2157524f277c 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -26,7 +26,7 @@
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
- unsigned int active_low;
+ unsigned int active_low;
unsigned int period;
};
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index 9ebdd5011a7c..2e746d257b02 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -16,7 +16,7 @@
#include <asm/mach-rc32434/rb.h>
static void rb532_led_set(struct led_classdev *cdev,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
if (brightness)
set_latch_u5(LO_ULED, 0);
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index bc8984795a3e..e0fff1ca5923 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -204,10 +204,10 @@ static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
if (p->pin_state == R_TPU_PIN_GPIO_FN)
gpio_free(cfg->pin_gpio_fn);
- if (new_state == R_TPU_PIN_GPIO) {
- gpio_request(cfg->pin_gpio, cfg->name);
- gpio_direction_output(cfg->pin_gpio, !!brightness);
- }
+ if (new_state == R_TPU_PIN_GPIO)
+ gpio_request_one(cfg->pin_gpio, GPIOF_DIR_OUT | !!brightness,
+ cfg->name);
+
if (new_state == R_TPU_PIN_GPIO_FN)
gpio_request(cfg->pin_gpio_fn, cfg->name);
@@ -263,18 +263,18 @@ static int r_tpu_probe(struct platform_device *pdev)
}
/* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ p->mapbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
if (p->mapbase == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
}
/* get hold of clock */
- p->clk = clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err0;
+ return PTR_ERR(p->clk);
}
p->pdev = pdev;
@@ -293,7 +293,7 @@ static int r_tpu_probe(struct platform_device *pdev)
p->ldev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &p->ldev);
if (ret < 0)
- goto err1;
+ goto err0;
/* max_brightness may be updated by the LED core code */
p->min_rate = p->ldev.max_brightness * p->refresh_rate;
@@ -301,11 +301,8 @@ static int r_tpu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
- err1:
- r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
- clk_put(p->clk);
err0:
- iounmap(p->mapbase);
+ r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
return ret;
}
@@ -320,9 +317,7 @@ static int r_tpu_remove(struct platform_device *pdev)
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
pm_runtime_disable(&pdev->dev);
- clk_put(p->clk);
- iounmap(p->mapbase);
return 0;
}
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 6469849e8266..ec9b287ecfbf 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -459,7 +459,7 @@ static ssize_t nas_led_blink_store(struct device *dev,
struct led_classdev *led = dev_get_drvdata(dev);
unsigned long blink_state;
- ret = strict_strtoul(buf, 10, &blink_state);
+ ret = kstrtoul(buf, 10, &blink_state);
if (ret)
return ret;
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 88f23f845595..ed15157c8f6c 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -216,13 +216,13 @@ static int wm8350_led_probe(struct platform_device *pdev)
isink = devm_regulator_get(&pdev->dev, "led_isink");
if (IS_ERR(isink)) {
- printk(KERN_ERR "%s: can't get ISINK\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get ISINK\n", __func__);
return PTR_ERR(isink);
}
dcdc = devm_regulator_get(&pdev->dev, "led_vcc");
if (IS_ERR(dcdc)) {
- printk(KERN_ERR "%s: can't get DCDC\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get DCDC\n", __func__);
return PTR_ERR(dcdc);
}
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 6e21e654bb02..b358cc05eff5 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index b941685f2227..027a2b15d7d8 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -40,7 +40,7 @@ static int fb_notifier_callback(struct notifier_block *p,
int new_status = *blank ? BLANK : UNBLANK;
switch (event) {
- case FB_EVENT_BLANK :
+ case FB_EVENT_BLANK:
if (new_status == n->old_status)
break;
@@ -76,7 +76,7 @@ static ssize_t bl_trig_invert_store(struct device *dev,
unsigned long invert;
int ret;
- ret = strict_strtoul(buf, 10, &invert);
+ ret = kstrtoul(buf, 10, &invert);
if (ret < 0)
return ret;
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ba215dc42f98..72e3ebfc281f 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -110,7 +110,7 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
unsigned long inverted;
int ret;
- ret = strict_strtoul(buf, 10, &inverted);
+ ret = kstrtoul(buf, 10, &inverted);
if (ret < 0)
return ret;
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 34ae49dc557c..89875ea19ade 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
- depends on X86_32 && EXPERIMENTAL && EVENTFD
+ depends on X86_32 && EVENTFD && TTY
select HVC_DRIVER
---help---
This is a very simple module which allows you to run
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index b5fdcb78a75b..a5ebc0083d87 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
* eventfd (ie. the appropriate virtqueue thread)?
*/
if (!send_notify_to_eventfd(cpu)) {
- /* OK, we tell the main Laucher. */
+ /* OK, we tell the main Launcher. */
if (put_user(cpu->pending_notify, user))
return -EFAULT;
return sizeof(cpu->pending_notify);
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a555da64224e..696238b9f0f7 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -278,7 +278,7 @@ config PMAC_RACKMETER
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
- depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
+ depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C)
select INPUT_POLLDEV
help
Support for the motion sensor included in PowerBooks. Includes
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index ef87310b7662..ac5c87939860 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -679,7 +679,7 @@ void macio_release_resources(struct macio_dev *dev)
#ifdef CONFIG_PCI
-static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device_node* np;
struct macio_chip* chip;
@@ -739,7 +739,7 @@ static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_devi
return 0;
}
-static void __devexit macio_pci_remove(struct pci_dev* pdev)
+static void macio_pci_remove(struct pci_dev* pdev)
{
panic("removing of macio-asic not supported !\n");
}
@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
* MacIO is matched against any Apple ID, it's probe() function
* will then decide wether it applies or not
*/
-static const struct pci_device_id __devinitconst pci_ids[] = { {
+static const struct pci_device_id pci_ids[] = { {
.vendor = PCI_VENDOR_ID_APPLE,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 3f8d032f180f..d98e566a8f5e 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -556,7 +556,8 @@ static int media_bay_task(void *x)
return 0;
}
-static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int media_bay_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct media_bay_info* bay;
u32 __iomem *regbase;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 6dc26b61219b..cad0e19b47a2 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -253,7 +253,7 @@ static void rackmeter_do_timer(struct work_struct *work)
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
-static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
+static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
{
unsigned int cpu;
@@ -287,7 +287,7 @@ static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
}
-static int __devinit rackmeter_setup(struct rackmeter *rm)
+static int rackmeter_setup(struct rackmeter *rm)
{
pr_debug("rackmeter: setting up i2s..\n");
rackmeter_setup_i2s(rm);
@@ -362,8 +362,8 @@ static irqreturn_t rackmeter_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int __devinit rackmeter_probe(struct macio_dev* mdev,
- const struct of_device_id *match)
+static int rackmeter_probe(struct macio_dev* mdev,
+ const struct of_device_id *match)
{
struct device_node *i2s = NULL, *np = NULL;
struct rackmeter *rm = NULL;
@@ -521,7 +521,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
return rc;
}
-static int __devexit rackmeter_remove(struct macio_dev* mdev)
+static int rackmeter_remove(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
@@ -588,7 +588,7 @@ static struct macio_driver rackmeter_driver = {
.of_match_table = rackmeter_match,
},
.probe = rackmeter_probe,
- .remove = __devexit_p(rackmeter_remove),
+ .remove = rackmeter_remove,
.shutdown = rackmeter_shutdown,
};
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 196368009001..9c6b96414862 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -997,7 +997,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
"%02x !\n", id, hdr->id);
goto failure;
}
- if (prom_add_property(smu->of_node, prop)) {
+ if (of_add_property(smu->of_node, prop)) {
printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
"property !\n", id);
goto failure;
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index ac3f243b9c5a..7c28b71246c9 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -177,9 +177,9 @@ static const struct wf_sensor_ops wf_ad7417_adc_ops = {
.owner = THIS_MODULE,
};
-static void __devinit wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
- int index, const char *name,
- const struct wf_sensor_ops *ops)
+static void wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
+ int index, const char *name,
+ const struct wf_sensor_ops *ops)
{
pv->sensors[index].name = kasprintf(GFP_KERNEL, "%s-%d", name, pv->cpu);
pv->sensors[index].priv = pv;
@@ -188,7 +188,7 @@ static void __devinit wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
kref_get(&pv->ref);
}
-static void __devinit wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
+static void wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
{
int rc;
u8 buf[2];
@@ -230,8 +230,8 @@ static void __devinit wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
pv->config = config;
}
-static int __devinit wf_ad7417_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int wf_ad7417_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_ad7417_priv *pv;
const struct mpu_data *mpu;
@@ -290,7 +290,7 @@ static int __devinit wf_ad7417_probe(struct i2c_client *client,
return 0;
}
-static int __devexit wf_ad7417_remove(struct i2c_client *client)
+static int wf_ad7417_remove(struct i2c_client *client)
{
struct wf_ad7417_priv *pv = dev_get_drvdata(&client->dev);
int i;
@@ -322,7 +322,7 @@ static struct i2c_driver wf_ad7417_driver = {
.id_table = wf_ad7417_id,
};
-static int __devinit wf_ad7417_init(void)
+static int wf_ad7417_init(void)
{
/* This is only supported on these machines */
if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -333,7 +333,7 @@ static int __devinit wf_ad7417_init(void)
return i2c_add_driver(&wf_ad7417_driver);
}
-static void __devexit wf_ad7417_exit(void)
+static void wf_ad7417_exit(void)
{
i2c_del_driver(&wf_ad7417_driver);
}
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index b3411edb324b..0226b796a21c 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -282,7 +282,7 @@ static const struct wf_control_ops wf_fcu_fan_pwm_ops = {
.owner = THIS_MODULE,
};
-static void __devinit wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
+static void wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
{
const struct mpu_data *mpu = wf_get_mpu(0);
u16 pump_min = 0, pump_max = 0xffff;
@@ -317,7 +317,7 @@ static void __devinit wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
fan->ctrl.name, pump_min, pump_max);
}
-static void __devinit wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
+static void wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
{
struct wf_fcu_priv *pv = fan->fcu_priv;
const struct mpu_data *mpu0 = wf_get_mpu(0);
@@ -359,9 +359,8 @@ static void __devinit wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
fan->ctrl.name, fan->min, fan->max);
}
-static void __devinit wf_fcu_add_fan(struct wf_fcu_priv *pv,
- const char *name,
- int type, int id)
+static void wf_fcu_add_fan(struct wf_fcu_priv *pv, const char *name,
+ int type, int id)
{
struct wf_fcu_fan *fan;
@@ -399,7 +398,7 @@ static void __devinit wf_fcu_add_fan(struct wf_fcu_priv *pv,
kref_get(&pv->ref);
}
-static void __devinit wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
+static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
{
/* Translation of device-tree location properties to
* windfarm fan names
@@ -481,7 +480,7 @@ static void __devinit wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
}
}
-static void __devinit wf_fcu_default_fans(struct wf_fcu_priv *pv)
+static void wf_fcu_default_fans(struct wf_fcu_priv *pv)
{
/* We only support the default fans for PowerMac7,2 */
if (!of_machine_is_compatible("PowerMac7,2"))
@@ -496,7 +495,7 @@ static void __devinit wf_fcu_default_fans(struct wf_fcu_priv *pv)
wf_fcu_add_fan(pv, "cpu-rear-fan-1", FCU_FAN_RPM, 6);
}
-static int __devinit wf_fcu_init_chip(struct wf_fcu_priv *pv)
+static int wf_fcu_init_chip(struct wf_fcu_priv *pv)
{
unsigned char buf = 0xff;
int rc;
@@ -518,8 +517,8 @@ static int __devinit wf_fcu_init_chip(struct wf_fcu_priv *pv)
return 0;
}
-static int __devinit wf_fcu_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int wf_fcu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_fcu_priv *pv;
@@ -564,7 +563,7 @@ static int __devinit wf_fcu_probe(struct i2c_client *client,
return 0;
}
-static int __devexit wf_fcu_remove(struct i2c_client *client)
+static int wf_fcu_remove(struct i2c_client *client)
{
struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev);
struct wf_fcu_fan *fan;
@@ -593,19 +592,7 @@ static struct i2c_driver wf_fcu_driver = {
.id_table = wf_fcu_id,
};
-static int __init wf_fcu_init(void)
-{
- return i2c_add_driver(&wf_fcu_driver);
-}
-
-static void __exit wf_fcu_exit(void)
-{
- i2c_del_driver(&wf_fcu_driver);
-}
-
-
-module_init(wf_fcu_init);
-module_exit(wf_fcu_exit);
+module_i2c_driver(wf_fcu_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("FCU control objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index b0c2d3695b34..9ef32b3df91f 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -174,19 +174,7 @@ static struct i2c_driver wf_lm75_driver = {
.id_table = wf_lm75_id,
};
-static int __init wf_lm75_sensor_init(void)
-{
- return i2c_add_driver(&wf_lm75_driver);
-}
-
-static void __exit wf_lm75_sensor_exit(void)
-{
- i2c_del_driver(&wf_lm75_driver);
-}
-
-
-module_init(wf_lm75_sensor_init);
-module_exit(wf_lm75_sensor_exit);
+module_i2c_driver(wf_lm75_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("LM75 sensor objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 371b058d2f7d..945a25b2f31e 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -130,18 +130,7 @@ static struct i2c_driver wf_max6690_driver = {
.id_table = wf_max6690_id,
};
-static int __init wf_max6690_sensor_init(void)
-{
- return i2c_add_driver(&wf_max6690_driver);
-}
-
-static void __exit wf_max6690_sensor_exit(void)
-{
- i2c_del_driver(&wf_max6690_driver);
-}
-
-module_init(wf_max6690_sensor_init);
-module_exit(wf_max6690_sensor_exit);
+module_i2c_driver(wf_max6690_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("MAX6690 sensor objects for PowerMac thermal control");
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index e0ee80700cde..3024685e4cca 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -656,7 +656,7 @@ static int wf_pm112_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_pm112_remove(struct platform_device *dev)
+static int wf_pm112_remove(struct platform_device *dev)
{
wf_unregister_client(&pm112_events);
/* should release all sensors and controls */
@@ -665,7 +665,7 @@ static int __devexit wf_pm112_remove(struct platform_device *dev)
static struct platform_driver wf_pm112_driver = {
.probe = wf_pm112_probe,
- .remove = __devexit_p(wf_pm112_remove),
+ .remove = wf_pm112_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
@@ -681,7 +681,7 @@ static int __init wf_pm112_init(void)
/* Count the number of CPU cores */
nr_cores = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_cores;
printk(KERN_INFO "windfarm: initializing for dual-core desktop G5\n");
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 04067e073aa9..af605e915d41 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -987,7 +987,7 @@ static int pm121_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit pm121_remove(struct platform_device *ddev)
+static int pm121_remove(struct platform_device *ddev)
{
wf_unregister_client(&pm121_events);
return 0;
@@ -995,7 +995,7 @@ static int __devexit pm121_remove(struct platform_device *ddev)
static struct platform_driver pm121_driver = {
.probe = pm121_probe,
- .remove = __devexit_p(pm121_remove),
+ .remove = pm121_remove,
.driver = {
.name = "windfarm",
.bus = &platform_bus_type,
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 84ac913d7e3a..2f506b9d5a52 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -776,7 +776,7 @@ static int wf_pm72_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_pm72_remove(struct platform_device *dev)
+static int wf_pm72_remove(struct platform_device *dev)
{
wf_unregister_client(&pm72_events);
@@ -804,7 +804,7 @@ static int __init wf_pm72_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 990c87606be9..f84933ff3298 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -720,7 +720,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit wf_smu_remove(struct platform_device *ddev)
+static int wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -763,7 +763,7 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
static struct platform_driver wf_smu_driver = {
.probe = wf_smu_probe,
- .remove = __devexit_p(wf_smu_remove),
+ .remove = wf_smu_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 7653603cb00e..2eb484f213c8 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -642,7 +642,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit wf_smu_remove(struct platform_device *ddev)
+static int wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -692,7 +692,7 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
static struct platform_driver wf_smu_driver = {
.probe = wf_smu_probe,
- .remove = __devexit_p(wf_smu_remove),
+ .remove = wf_smu_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 3eca6d4b52fc..0b9a79b2f48a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -669,7 +669,7 @@ static int wf_rm31_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_rm31_remove(struct platform_device *dev)
+static int wf_rm31_remove(struct platform_device *dev)
{
wf_unregister_client(&rm31_events);
@@ -696,7 +696,7 @@ static int __init wf_rm31_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 426e810233d7..d87f5ee04ca9 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -364,18 +364,7 @@ static struct i2c_driver wf_sat_driver = {
.id_table = wf_sat_id,
};
-static int __init sat_sensors_init(void)
-{
- return i2c_add_driver(&wf_sat_driver);
-}
-
-static void __exit sat_sensors_exit(void)
-{
- i2c_del_driver(&wf_sat_driver);
-}
-
-module_init(sat_sensors_init);
-module_exit(sat_sensors_exit);
+module_i2c_driver(wf_sat_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("SMU satellite sensors for PowerMac thermal control");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
new file mode 100644
index 000000000000..9545c9f03809
--- /dev/null
+++ b/drivers/mailbox/Kconfig
@@ -0,0 +1,19 @@
+menuconfig MAILBOX
+ bool "Mailbox Hardware Support"
+ help
+ Mailbox is a framework to control hardware communication between
+ on-chip processors through queued messages and interrupt driven
+ signals. Say Y if your platform supports hardware mailboxes.
+
+if MAILBOX
+config PL320_MBOX
+ bool "ARM PL320 Mailbox"
+ depends on ARM_AMBA
+ help
+ An implementation of the ARM PL320 Interprocessor Communication
+ Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
+ send short messages between Highbank's A9 cores and the EnergyCore
+ Management Engine, primarily for cpufreq. Say Y here if you want
+ to use the PL320 IPCM support.
+
+endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
new file mode 100644
index 000000000000..543ad6a79505
--- /dev/null
+++ b/drivers/mailbox/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000000..c45b3aedafba
--- /dev/null
+++ b/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+
+#include <linux/mailbox.h>
+
+#define IPCMxSOURCE(m) ((m) * 0x40)
+#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+#define IPC_RX_MBOX 2
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+static void __iomem *ipc_base;
+static int ipc_irq;
+static DEFINE_MUTEX(ipc_m1_lock);
+static DECLARE_COMPLETION(ipc_completion);
+static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
+
+static inline void set_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+}
+
+static inline void clear_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+}
+
+static void __ipc_send(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
+ __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
+}
+
+static u32 __ipc_rcv(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
+ return data[1];
+}
+
+/* blocking implmentation from the A9 side, not usuable in interrupts! */
+int pl320_ipc_transmit(u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipc_m1_lock);
+
+ init_completion(&ipc_completion);
+ __ipc_send(IPC_TX_MBOX, data);
+ ret = wait_for_completion_timeout(&ipc_completion,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = __ipc_rcv(IPC_TX_MBOX, data);
+out:
+ mutex_unlock(&ipc_m1_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
+
+static irqreturn_t ipc_handler(int irq, void *dev)
+{
+ u32 irq_stat;
+ u32 data[7];
+
+ irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ complete(&ipc_completion);
+ }
+ if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
+ __ipc_rcv(IPC_RX_MBOX, data);
+ atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
+ __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ }
+
+ return IRQ_HANDLED;
+}
+
+int pl320_ipc_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
+
+int pl320_ipc_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
+
+static int __init pl320_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ int ret;
+
+ ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (ipc_base == NULL)
+ return -ENOMEM;
+
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+
+ ipc_irq = adev->irq[0];
+ ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Init slow mailbox */
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
+
+ /* Init receive mailbox */
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
+
+ return 0;
+err:
+ iounmap(ipc_base);
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init ipc_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+module_init(ipc_init);
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index e4e841567459..aefb78e3cbf9 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -208,31 +208,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again. In these situations we know that no other
- * bio may be in the cell. This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- BUG_ON(cell->holder != bio);
- BUG_ON(!bio_list_empty(&cell->bios));
-
- __cell_release(cell, NULL);
-}
-
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
-
- spin_lock_irqsave(&prison->lock, flags);
- __cell_release_singleton(cell, bio);
- spin_unlock_irqrestore(&prison->lock, flags);
-}
-EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
-
-/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 4e0ac376700a..53d1a7a84e2f 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct bio *inmate, struct dm_bio_prison_cell **ref);
void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison_cell *cell);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bbf459bca61d..f7369f9d8595 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1689,8 +1689,7 @@ bad:
return ret;
}
-static int crypt_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int crypt_map(struct dm_target *ti, struct bio *bio)
{
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
@@ -1846,7 +1845,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f53846f9ab50..cc1bd048acb2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -274,8 +274,7 @@ static void delay_resume(struct dm_target *ti)
atomic_set(&dc->may_delay, 1);
}
-static int delay_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int delay_map(struct dm_target *ti, struct bio *bio)
{
struct delay_c *dc = ti->private;
@@ -338,7 +337,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cc15543a6ad7..9721f2ffb1a2 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -39,6 +39,10 @@ enum feature_flag_bits {
DROP_WRITES
};
+struct per_bio_data {
+ bool bio_submitted;
+};
+
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
@@ -214,6 +218,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -265,11 +270,12 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
}
}
-static int flakey_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ pb->bio_submitted = false;
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
@@ -277,7 +283,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/*
* Flag this bio as submitted while down.
*/
- map_context->ll = 1;
+ pb->bio_submitted = true;
/*
* Map reads as normal.
@@ -314,17 +320,16 @@ map_bio:
return DM_MAPIO_REMAPPED;
}
-static int flakey_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct flakey_c *fc = ti->private;
- unsigned bio_submitted_while_down = map_context->ll;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
/*
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
@@ -406,7 +411,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 1c46f97d6664..ea49834377c8 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -287,7 +287,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
- sector_t discard_sectors;
+ unsigned short logical_block_size = queue_logical_block_size(q);
+ sector_t num_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -297,7 +298,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Allocate a suitably sized-bio.
*/
- if (rw & REQ_DISCARD)
+ if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1;
else
num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
@@ -310,9 +311,21 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
- discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = discard_sectors << SECTOR_SHIFT;
- remaining -= discard_sectors;
+ num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+ remaining -= num_sectors;
+ } else if (rw & REQ_WRITE_SAME) {
+ /*
+ * WRITE SAME only uses a single page.
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ bio_add_page(bio, page, logical_block_size, offset);
+ num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+
+ offset = 0;
+ remaining -= num_sectors;
+ dp->next_page(dp);
} else while (remaining) {
/*
* Try and add as many pages as possible.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afd95986d099..0666b5d14b88 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1543,7 +1543,21 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
+
+static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
+{
+ if (param_flags & DM_WIPE_BUFFER)
+ memset(param, 0, param_size);
+
+ if (param_flags & DM_PARAMS_VMALLOC)
+ vfree(param);
+ else
+ kfree(param);
+}
+
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl tmp, *dmi;
int secure_data;
@@ -1556,7 +1570,21 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
- dmi = vmalloc(tmp.data_size);
+ *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+
+ /*
+ * Try to avoid low memory issues when a device is suspended.
+ * Use kmalloc() rather than vmalloc() when we can.
+ */
+ dmi = NULL;
+ if (tmp.data_size <= KMALLOC_MAX_SIZE)
+ dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (!dmi) {
+ dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ *param_flags |= DM_PARAMS_VMALLOC;
+ }
+
if (!dmi) {
if (secure_data && clear_user(user, tmp.data_size))
return -EFAULT;
@@ -1566,6 +1594,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
if (copy_from_user(dmi, user, tmp.data_size))
goto bad;
+ /*
+ * Abort if something changed the ioctl data while it was being copied.
+ */
+ if (dmi->data_size != tmp.data_size) {
+ DMERR("rejecting ioctl: data size modified while processing parameters");
+ goto bad;
+ }
+
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, tmp.data_size))
goto bad;
@@ -1574,9 +1610,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
return 0;
bad:
- if (secure_data)
- memset(dmi, 0, tmp.data_size);
- vfree(dmi);
+ free_params(dmi, tmp.data_size, *param_flags);
+
return -EFAULT;
}
@@ -1613,7 +1648,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
- int wipe_buffer;
+ int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
@@ -1649,24 +1684,14 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
}
/*
- * Trying to avoid low memory issues when a device is
- * suspended.
- */
- current->flags |= PF_MEMALLOC;
-
- /*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param);
-
- current->flags &= ~PF_MEMALLOC;
+ r = copy_params(user, &param, &param_flags);
if (r)
return r;
input_param_size = param->data_size;
- wipe_buffer = param->flags & DM_SECURE_DATA_FLAG;
-
r = validate_params(cmd, param);
if (r)
goto out;
@@ -1681,10 +1706,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
r = -EFAULT;
out:
- if (wipe_buffer)
- memset(param, 0, input_param_size);
-
- vfree(param);
+ free_params(param, input_param_size, param_flags);
return r;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index bed444c93d8d..68c02673263b 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -349,7 +349,7 @@ static void complete_io(unsigned long error, void *context)
struct dm_kcopyd_client *kc = job->kc;
if (error) {
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err |= error;
else
job->read_err = 1;
@@ -361,7 +361,7 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
push(&kc->complete_jobs, job);
else {
@@ -432,7 +432,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -585,6 +585,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
struct kcopyd_job *job;
+ int i;
/*
* Allocate an array of jobs consisting of one master job
@@ -611,7 +612,16 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
- job->rw = WRITE;
+
+ /*
+ * Use WRITE SAME to optimize zeroing if all dests support it.
+ */
+ job->rw = WRITE | REQ_WRITE_SAME;
+ for (i = 0; i < job->num_dests; i++)
+ if (!bdev_write_same(job->dests[i].bdev)) {
+ job->rw = WRITE;
+ break;
+ }
}
job->fn = fn;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 1bf19a93eef0..328cad5617ab 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -55,6 +55,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->num_write_same_requests = 1;
ti->private = lc;
return 0;
@@ -87,8 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
-static int linear_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int linear_map(struct dm_target *ti, struct bio *bio)
{
linear_map_bio(ti, bio);
@@ -155,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 45d94a7e7f6d..9e58dbd8d8cb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -295,9 +295,11 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
* Choose a reasonable default. All figures in sectors.
*/
if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+ if (min_region_size & (min_region_size - 1))
+ region_size = 1 << fls(region_size);
DMINFO("Choosing default region size of %lu sectors",
region_size);
- region_size = min_region_size;
} else {
DMINFO("Choosing default region size of 4MiB");
region_size = 1 << 13; /* sectors */
@@ -338,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
}
/*
- * validate_rebuild_devices
+ * validate_raid_redundancy
* @rs
*
- * Determine if the devices specified for rebuild can result in a valid
- * usable array that is capable of rebuilding the given devices.
+ * Determine if there are enough devices in the array that haven't
+ * failed (or are being rebuilt) to form a usable array.
*
* Returns: 0 on success, -EINVAL on failure.
*/
-static int validate_rebuild_devices(struct raid_set *rs)
+static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
- if (!(rs->print_flags & DMPF_REBUILD))
- return 0;
-
for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+ !rs->dev[i].rdev.sb_page)
rebuild_cnt++;
switch (rs->raid_type->level) {
@@ -391,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)
* A A B B C
* C D D E E
*/
- rebuilds_per_group = 0;
for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
d = i % rs->md.raid_disks;
- if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
+ if ((!rs->dev[d].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
- if (!((i + 1) % copies))
- rebuilds_per_group = 0;
}
break;
default:
- DMERR("The rebuild parameter is not supported for %s",
- rs->raid_type->name);
- rs->ti->error = "Rebuild not supported for this RAID type";
- return -EINVAL;
+ if (rebuild_cnt)
+ return -EINVAL;
}
return 0;
too_many:
- rs->ti->error = "Too many rebuild devices specified";
return -EINVAL;
}
@@ -662,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.dev_sectors = sectors_per_dev;
- if (validate_rebuild_devices(rs))
- return -EINVAL;
-
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -993,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
int ret;
- unsigned redundancy = 0;
struct raid_dev *dev;
struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
- switch (rs->raid_type->level) {
- case 1:
- redundancy = rs->md.raid_disks - 1;
- break;
- case 4:
- case 5:
- case 6:
- redundancy = rs->raid_type->parity_devs;
- break;
- case 10:
- redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
- break;
- default:
- ti->error = "Unknown RAID type";
- return -EINVAL;
- }
-
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev) {
/*
@@ -1043,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
break;
default:
dev = container_of(rdev, struct raid_dev, rdev);
- if (redundancy--) {
- if (dev->meta_dev)
- dm_put_device(ti, dev->meta_dev);
-
- dev->meta_dev = NULL;
- rdev->meta_bdev = NULL;
+ if (dev->meta_dev)
+ dm_put_device(ti, dev->meta_dev);
- if (rdev->sb_page)
- put_page(rdev->sb_page);
+ dev->meta_dev = NULL;
+ rdev->meta_bdev = NULL;
- rdev->sb_page = NULL;
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
- rdev->sb_loaded = 0;
+ rdev->sb_page = NULL;
- /*
- * We might be able to salvage the data device
- * even though the meta device has failed. For
- * now, we behave as though '- -' had been
- * set for this device in the table.
- */
- if (dev->data_dev)
- dm_put_device(ti, dev->data_dev);
+ rdev->sb_loaded = 0;
- dev->data_dev = NULL;
- rdev->bdev = NULL;
+ /*
+ * We might be able to salvage the data device
+ * even though the meta device has failed. For
+ * now, we behave as though '- -' had been
+ * set for this device in the table.
+ */
+ if (dev->data_dev)
+ dm_put_device(ti, dev->data_dev);
- list_del(&rdev->same_set);
+ dev->data_dev = NULL;
+ rdev->bdev = NULL;
- continue;
- }
- ti->error = "Failed to load superblock";
- return ret;
+ list_del(&rdev->same_set);
}
}
if (!freshest)
return 0;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -1216,7 +1191,7 @@ static void raid_dtr(struct dm_target *ti)
context_free(rs);
}
-static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+static int raid_map(struct dm_target *ti, struct bio *bio)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
@@ -1430,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 3, 1},
+ .version = {1, 4, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fd61f98ee1f6..fa519185ebba 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -61,7 +61,6 @@ struct mirror_set {
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
- mempool_t *read_record_pool;
/* recovery */
region_t nr_regions;
@@ -139,14 +138,13 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
queue_bio(ms, bio, WRITE);
}
-#define MIN_READ_RECORDS 20
-struct dm_raid1_read_record {
+struct dm_raid1_bio_record {
struct mirror *m;
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
+ region_t write_region;
};
-static struct kmem_cache *_dm_raid1_read_record_cache;
-
/*
* Every mirror should look like this one.
*/
@@ -876,19 +874,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
- ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
- _dm_raid1_read_record_cache);
-
- if (!ms->read_record_pool) {
- ti->error = "Error creating mirror read_record_pool";
- kfree(ms);
- return NULL;
- }
-
ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -900,7 +888,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -916,7 +903,6 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
dm_io_client_destroy(ms->io_client);
dm_region_hash_destroy(ms->rh);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
}
@@ -1088,6 +1074,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
@@ -1155,18 +1142,20 @@ static void mirror_dtr(struct dm_target *ti)
/*
* Mirror mapping function
*/
-static int mirror_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int mirror_map(struct dm_target *ti, struct bio *bio)
{
int r, rw = bio_rw(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
- struct dm_raid1_read_record *read_record = NULL;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
+
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
- map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
+ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
@@ -1194,33 +1183,29 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
if (unlikely(!m))
return -EIO;
- read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
- if (likely(read_record)) {
- dm_bio_record(&read_record->details, bio);
- map_context->ptr = read_record;
- read_record->m = m;
- }
+ dm_bio_record(&bio_record->details, bio);
+ bio_record->m = m;
map_bio(m, bio);
return DM_MAPIO_REMAPPED;
}
-static int mirror_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
int rw = bio_rw(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
- struct dm_raid1_read_record *read_record = map_context->ptr;
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
/*
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
- dm_rh_dec(ms->rh, map_context->ll);
+ dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
@@ -1231,7 +1216,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(error)) {
- if (!read_record) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1241,7 +1226,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
return -EIO;
}
- m = read_record->m;
+ m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
m->dev->name);
@@ -1253,22 +1238,18 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* mirror.
*/
if (default_ok(m) || mirror_available(ms, bio)) {
- bd = &read_record->details;
+ bd = &bio_record->details;
dm_bio_restore(bd, bio);
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
+ bio_record->details.bi_bdev = NULL;
queue_bio(ms, bio, rw);
- return 1;
+ return DM_ENDIO_INCOMPLETE;
}
DMERR("All replicated volumes dead, failing I/O");
}
out:
- if (read_record) {
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
- }
+ bio_record->details.bi_bdev = NULL;
return error;
}
@@ -1422,7 +1403,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 12, 1},
+ .version = {1, 13, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1439,13 +1420,6 @@ static int __init dm_mirror_init(void)
{
int r;
- _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
- if (!_dm_raid1_read_record_cache) {
- DMERR("Can't allocate dm_raid1_read_record cache");
- r = -ENOMEM;
- goto bad_cache;
- }
-
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("Failed to register mirror target");
@@ -1455,15 +1429,12 @@ static int __init dm_mirror_init(void)
return 0;
bad_target:
- kmem_cache_destroy(_dm_raid1_read_record_cache);
-bad_cache:
return r;
}
static void __exit dm_mirror_exit(void)
{
dm_unregister_target(&mirror_target);
- kmem_cache_destroy(_dm_raid1_read_record_cache);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a143921feaf6..59fc18ae52c2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -79,7 +79,6 @@ struct dm_snapshot {
/* Chunks with outstanding reads */
spinlock_t tracked_chunk_lock;
- mempool_t *tracked_chunk_pool;
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
/* The on disk metadata handler */
@@ -191,35 +190,38 @@ struct dm_snap_tracked_chunk {
chunk_t chunk;
};
-static struct kmem_cache *tracked_chunk_cache;
+static void init_tracked_chunk(struct bio *bio)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ INIT_HLIST_NODE(&c->node);
+}
-static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
- chunk_t chunk)
+static bool is_bio_tracked(struct bio *bio)
{
- struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
- GFP_NOIO);
- unsigned long flags;
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ return !hlist_unhashed(&c->node);
+}
+
+static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
c->chunk = chunk;
- spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ spin_lock_irq(&s->tracked_chunk_lock);
hlist_add_head(&c->node,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
- spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- return c;
+ spin_unlock_irq(&s->tracked_chunk_lock);
}
-static void stop_tracking_chunk(struct dm_snapshot *s,
- struct dm_snap_tracked_chunk *c)
+static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
unsigned long flags;
spin_lock_irqsave(&s->tracked_chunk_lock, flags);
hlist_del(&c->node);
spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- mempool_free(c, s->tracked_chunk_pool);
}
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
@@ -1120,14 +1122,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_pending_pool;
}
- s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
- tracked_chunk_cache);
- if (!s->tracked_chunk_pool) {
- ti->error = "Could not allocate tracked_chunk mempool for "
- "tracking reads";
- goto bad_tracked_chunk_pool;
- }
-
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
@@ -1135,6 +1129,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->num_flush_requests = num_flush_requests;
+ ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -1183,9 +1178,6 @@ bad_read_metadata:
unregister_snapshot(s);
bad_load_and_register:
- mempool_destroy(s->tracked_chunk_pool);
-
-bad_tracked_chunk_pool:
mempool_destroy(s->pending_pool);
bad_pending_pool:
@@ -1290,8 +1282,6 @@ static void snapshot_dtr(struct dm_target *ti)
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
#endif
- mempool_destroy(s->tracked_chunk_pool);
-
__free_exceptions(s);
mempool_destroy(s->pending_pool);
@@ -1577,8 +1567,7 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
s->store->chunk_mask);
}
-static int snapshot_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
@@ -1586,6 +1575,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
@@ -1670,7 +1661,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
} else {
bio->bi_bdev = s->origin->bdev;
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
}
out_unlock:
@@ -1691,20 +1682,20 @@ out:
* If merging is currently taking place on the chunk in question, the
* I/O is deferred by adding it to s->bios_queued_during_merge.
*/
-static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
- if (!map_context->target_request_nr)
+ if (!dm_bio_get_target_request_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
- map_context->ptr = NULL;
return DM_MAPIO_REMAPPED;
}
@@ -1733,7 +1724,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
remap_exception(s, e, bio, chunk);
if (bio_rw(bio) == WRITE)
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
goto out_unlock;
}
@@ -1751,14 +1742,12 @@ out_unlock:
return r;
}
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct dm_snapshot *s = ti->private;
- struct dm_snap_tracked_chunk *c = map_context->ptr;
- if (c)
- stop_tracking_chunk(s, c);
+ if (is_bio_tracked(bio))
+ stop_tracking_chunk(s, bio);
return 0;
}
@@ -2127,8 +2116,7 @@ static void origin_dtr(struct dm_target *ti)
dm_put_device(ti, dev);
}
-static int origin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
@@ -2193,7 +2181,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 7, 1},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2206,7 +2194,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2220,7 +2208,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2281,17 +2269,8 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
- tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
- if (!tracked_chunk_cache) {
- DMERR("Couldn't create cache to track chunks in use.");
- r = -ENOMEM;
- goto bad_tracked_chunk_cache;
- }
-
return 0;
-bad_tracked_chunk_cache:
- kmem_cache_destroy(pending_cache);
bad_pending_cache:
kmem_cache_destroy(exception_cache);
bad_exception_cache:
@@ -2317,7 +2296,6 @@ static void __exit dm_snapshot_exit(void)
exit_origin_hash();
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
- kmem_cache_destroy(tracked_chunk_cache);
dm_exception_store_exit();
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e2f876539743..c89cde86d400 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -162,6 +162,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
+ ti->num_write_same_requests = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -251,8 +252,8 @@ static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
*result += sc->chunk_size; /* next chunk */
}
-static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
- uint32_t target_stripe)
+static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
+ uint32_t target_stripe)
{
sector_t begin, end;
@@ -271,23 +272,23 @@ static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
}
}
-static int stripe_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_request_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = map_context->target_request_nr;
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- target_request_nr = map_context->target_request_nr;
+ if (unlikely(bio->bi_rw & REQ_DISCARD) ||
+ unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_discard(sc, bio, target_request_nr);
+ return stripe_map_range(sc, bio, target_request_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -342,8 +343,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
return 0;
}
-static int stripe_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
{
unsigned i;
char major_minor[16];
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 100368eb7991..daf25d0890b3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t)
int dm_table_alloc_md_mempools(struct dm_table *t)
{
unsigned type = dm_table_get_type(t);
+ unsigned per_bio_data_size = 0;
+ struct dm_target *tgt;
+ unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
+ if (type == DM_TYPE_BIO_BASED)
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+ }
+
+ t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
if (!t->mempools)
return -ENOMEM;
@@ -1414,6 +1423,33 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return 1;
}
+static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !q->limits.max_write_same_sectors;
+}
+
+static bool dm_table_supports_write_same(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_write_same_requests)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1445,6 +1481,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 8da366cf381c..617d21a77256 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -126,15 +126,14 @@ static void io_err_dtr(struct dm_target *tt)
/* empty */
}
-static int io_err_map(struct dm_target *tt, struct bio *bio,
- union map_info *map_context)
+static int io_err_map(struct dm_target *tt, struct bio *bio)
{
return -EIO;
}
static struct target_type error_target = {
.name = "error",
- .version = {1, 0, 1},
+ .version = {1, 1, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 693e149e9727..4d6e85367b84 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -408,7 +408,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
- pmd->tl_info.value_type.context = &pmd->info;
+ pmd->tl_info.value_type.context = &pmd->bl_info;
pmd->tl_info.value_type.size = sizeof(__le64);
pmd->tl_info.value_type.inc = subtree_inc;
pmd->tl_info.value_type.dec = subtree_dec;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 058acf3a5ba7..5409607d4875 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -186,7 +186,6 @@ struct pool {
struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
- mempool_t *endio_hook_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
@@ -304,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
@@ -368,6 +367,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
dm_thin_changed_this_transaction(tc->td);
}
+static void inc_all_io_entry(struct pool *pool, struct bio *bio)
+{
+ struct dm_thin_endio_hook *h;
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
+ h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
+}
+
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
@@ -474,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct dm_thin_new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
@@ -499,8 +509,7 @@ static void overwrite_endio(struct bio *bio, int err)
/*
* This sends the bios in the cell back to the deferred_bios list.
*/
-static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
- dm_block_t data_block)
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
struct pool *pool = tc->pool;
unsigned long flags;
@@ -513,17 +522,13 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
}
/*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
*/
-static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
+static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- struct bio_list bios;
struct pool *pool = tc->pool;
unsigned long flags;
- bio_list_init(&bios);
-
spin_lock_irqsave(&pool->lock, flags);
dm_cell_release_no_holder(cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -561,7 +566,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR("dm_thin_insert_block() failed");
+ DMERR_LIMIT("dm_thin_insert_block() failed");
dm_cell_error(m->cell);
goto out;
}
@@ -573,10 +578,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
* the bios in the cell.
*/
if (bio) {
- cell_defer_except(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell);
bio_endio(bio, 0);
} else
- cell_defer(tc, m->cell, m->data_block);
+ cell_defer(tc, m->cell);
out:
list_del(&m->list);
@@ -588,8 +593,8 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
struct thin_c *tc = m->tc;
bio_io_error(m->bio);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -597,13 +602,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ inc_all_io_entry(tc->pool, m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
+
if (m->pass_discard)
remap_and_issue(tc, m->bio, m->data_block);
else
bio_endio(m->bio, 0);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -614,7 +621,7 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
r = dm_thin_remove_block(tc->td, m->virt_block);
if (r)
- DMERR("dm_thin_remove_block() failed");
+ DMERR_LIMIT("dm_thin_remove_block() failed");
process_prepared_discard_passdown(m);
}
@@ -706,11 +713,12 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_dest);
} else {
struct dm_io_region from, to;
@@ -727,7 +735,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_copy() failed");
+ DMERR_LIMIT("dm_kcopyd_copy() failed");
dm_cell_error(cell);
}
}
@@ -775,11 +783,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_block);
} else {
int r;
@@ -792,7 +801,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_zero() failed");
+ DMERR_LIMIT("dm_kcopyd_zero() failed");
dm_cell_error(cell);
}
}
@@ -804,7 +813,7 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR("commit failed, error = %d", r);
+ DMERR_LIMIT("commit failed: error = %d", r);
return r;
}
@@ -889,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -936,7 +945,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
build_data_key(tc->td, lookup_result.block, &key2);
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
break;
}
@@ -962,13 +971,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
} else {
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell2);
+
/*
* The DM core makes sure that the discard doesn't span
* a block boundary. So we submit the discard of a
* partial block appropriately.
*/
- dm_cell_release_singleton(cell, bio);
- dm_cell_release_singleton(cell2, bio);
if ((!lookup_result.shared) && pool->pf.discard_passdown)
remap_and_issue(tc, bio, lookup_result.block);
else
@@ -980,13 +991,14 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
/*
* It isn't provisioned, just forget it.
*/
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
break;
default:
- DMERR("discard: find block unexpectedly returned %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1012,7 +1024,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
dm_cell_error(cell);
break;
}
@@ -1037,11 +1050,12 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE && bio->bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
- dm_cell_release_singleton(cell, bio);
remap_and_issue(tc, bio, lookup_result->block);
}
}
@@ -1056,7 +1070,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, 0);
return;
}
@@ -1066,7 +1082,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
*/
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
return;
}
@@ -1085,7 +1101,8 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
set_pool_mode(tc->pool, PM_READ_ONLY);
dm_cell_error(cell);
break;
@@ -1111,34 +1128,31 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- /*
- * We can release this cell now. This thread is the only
- * one that puts bios into a cell, and we know there were
- * no preceding bios.
- */
- /*
- * TODO: this will probably have to change when discard goes
- * back in.
- */
- dm_cell_release_singleton(cell, bio);
-
- if (lookup_result.shared)
+ if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- else
+ cell_defer_no_holder(tc, cell);
+ } else {
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_to_origin_and_issue(tc, bio);
} else
provision_block(tc, bio, block, cell);
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1156,8 +1170,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
bio_io_error(bio);
- else
+ else {
+ inc_all_io_entry(tc->pool, bio);
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
@@ -1167,6 +1183,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
}
if (tc->origin_dev) {
+ inc_all_io_entry(tc->pool, bio);
remap_to_origin_and_issue(tc, bio);
break;
}
@@ -1176,7 +1193,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
bio_io_error(bio);
break;
}
@@ -1207,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
/*
@@ -1340,32 +1358,30 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
-static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
{
- struct pool *pool = tc->pool;
- struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->tc = tc;
h->shared_read_entry = NULL;
- h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
+ h->all_io_entry = NULL;
h->overwrite_mapping = NULL;
-
- return h;
}
/*
* Non-blocking function called from the thin target's map function.
*/
-static int thin_bio_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_bio_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct thin_c *tc = ti->private;
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
+ struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_cell_key key;
- map_context->ptr = thin_hook_bio(tc, bio);
+ thin_hook_bio(tc, bio);
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
@@ -1400,12 +1416,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* shared flag will be set in their case.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- } else {
- remap(tc, bio, result.block);
- r = DM_MAPIO_REMAPPED;
+ return DM_MAPIO_SUBMITTED;
}
- break;
+
+ build_virtual_key(tc->td, block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ return DM_MAPIO_SUBMITTED;
+
+ build_data_key(tc->td, result.block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
+ cell_defer_no_holder(tc, cell1);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell2);
+ cell_defer_no_holder(tc, cell1);
+
+ remap(tc, bio, result.block);
+ return DM_MAPIO_REMAPPED;
case -ENODATA:
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
@@ -1414,8 +1443,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* of doing so. Just error it.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
/* fall through */
@@ -1425,8 +1453,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* provide the hint to load the metadata into cache.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
default:
/*
@@ -1435,11 +1462,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* pool is switched to fail-io mode.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
-
- return r;
}
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
@@ -1566,14 +1590,12 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, pool->mapping_pool);
mempool_destroy(pool->mapping_pool);
- mempool_destroy(pool->endio_hook_pool);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
}
static struct kmem_cache *_new_mapping_cache;
-static struct kmem_cache *_endio_hook_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
@@ -1667,13 +1689,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_mapping_pool;
}
- pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
- _endio_hook_cache);
- if (!pool->endio_hook_pool) {
- *error = "Error creating pool's endio_hook mempool";
- err_p = ERR_PTR(-ENOMEM);
- goto bad_endio_hook_pool;
- }
pool->ref_count = 1;
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
@@ -1682,8 +1697,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
-bad_endio_hook_pool:
- mempool_destroy(pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
@@ -1966,8 +1979,7 @@ out_unlock:
return r;
}
-static int pool_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int pool_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct pool_c *pt = ti->private;
@@ -2358,7 +2370,9 @@ static int pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (pool->pf.discard_enabled && pool->pf.discard_passdown)
+ if (!pool->pf.discard_enabled)
+ DMEMIT("ignore_discard");
+ else if (pool->pf.discard_passdown)
DMEMIT("discard_passdown");
else
DMEMIT("no_discard_passdown");
@@ -2454,7 +2468,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2576,6 +2590,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_requests = 1;
ti->flush_supported = true;
+ ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
@@ -2609,20 +2624,17 @@ out_unlock:
return r;
}
-static int thin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_map(struct dm_target *ti, struct bio *bio)
{
bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
- return thin_bio_map(ti, bio, map_context);
+ return thin_bio_map(ti, bio);
}
-static int thin_endio(struct dm_target *ti,
- struct bio *bio, int err,
- union map_info *map_context)
+static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = map_context->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct list_head work;
struct dm_thin_new_mapping *m, *tmp;
struct pool *pool = h->tc->pool;
@@ -2643,14 +2655,15 @@ static int thin_endio(struct dm_target *ti,
if (h->all_io_entry) {
INIT_LIST_HEAD(&work);
dm_deferred_entry_dec(h->all_io_entry, &work);
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry_safe(m, tmp, &work, list)
- list_add(&m->list, &pool->prepared_discards);
- spin_unlock_irqrestore(&pool->lock, flags);
+ if (!list_empty(&work)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry_safe(m, tmp, &work, list)
+ list_add(&m->list, &pool->prepared_discards);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ wake_worker(pool);
+ }
}
- mempool_free(h, pool->endio_hook_pool);
-
return 0;
}
@@ -2733,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0;
}
-/*
- * A thin device always inherits its queue limits from its pool.
- */
-static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
- struct thin_c *tc = ti->private;
-
- *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
-}
-
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 5, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -2754,7 +2757,6 @@ static struct target_type thin_target = {
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
- .io_hints = thin_io_hints,
};
/*----------------------------------------------------------------*/
@@ -2779,14 +2781,8 @@ static int __init dm_thin_init(void)
if (!_new_mapping_cache)
goto bad_new_mapping_cache;
- _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
- if (!_endio_hook_cache)
- goto bad_endio_hook_cache;
-
return 0;
-bad_endio_hook_cache:
- kmem_cache_destroy(_new_mapping_cache);
bad_new_mapping_cache:
dm_unregister_target(&pool_target);
bad_pool_target:
@@ -2801,7 +2797,6 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
- kmem_cache_destroy(_endio_hook_cache);
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 9e7328bb4030..52cde982164a 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -55,7 +55,6 @@ struct dm_verity {
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
- mempool_t *io_mempool; /* mempool of struct dm_verity_io */
mempool_t *vec_mempool; /* mempool of bio vector */
struct workqueue_struct *verify_wq;
@@ -66,7 +65,6 @@ struct dm_verity {
struct dm_verity_io {
struct dm_verity *v;
- struct bio *bio;
/* original values of bio->bi_end_io and bio->bi_private */
bio_end_io_t *orig_bi_end_io;
@@ -389,8 +387,8 @@ test_block_hash:
*/
static void verity_finish_io(struct dm_verity_io *io, int error)
{
- struct bio *bio = io->bio;
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
@@ -398,8 +396,6 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
if (io->io_vec != io->io_vec_inline)
mempool_free(io->io_vec, v->vec_mempool);
- mempool_free(io, v->io_mempool);
-
bio_endio(bio, error);
}
@@ -462,8 +458,7 @@ no_prefetch_cluster:
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
-static int verity_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
@@ -486,9 +481,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
return -EIO;
- io = mempool_alloc(v->io_mempool, GFP_NOIO);
+ io = dm_per_bio_data(bio, ti->per_bio_data_size);
io->v = v;
- io->bio = bio;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -610,9 +604,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->vec_mempool)
mempool_destroy(v->vec_mempool);
- if (v->io_mempool)
- mempool_destroy(v->io_mempool);
-
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -841,13 +832,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
- sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
- if (!v->io_mempool) {
- ti->error = "Cannot allocate io mempool";
- r = -ENOMEM;
- goto bad;
- }
+ ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
BIO_MAX_PAGES * sizeof(struct bio_vec));
@@ -875,7 +860,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index cc2b3cb81946..69a5c3b3b340 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -33,8 +33,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Return zeros only on reads
*/
-static int zero_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch(bio_rw(bio)) {
case READ:
@@ -56,7 +55,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
static struct target_type zero_target = {
.name = "zero",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 77e6eff41cae..314a0e2faf79 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,18 +63,6 @@ struct dm_io {
};
/*
- * For bio-based dm.
- * One of these is allocated per target within a bio. Hopefully
- * this will be simplified out one day.
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- union map_info info;
- struct bio clone;
-};
-
-/*
* For request-based dm.
* One of these is allocated per request.
*/
@@ -657,7 +645,7 @@ static void clone_endio(struct bio *bio, int error)
error = -EIO;
if (endio) {
- r = endio(tio->ti, bio, error, &tio->info);
+ r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
@@ -1016,7 +1004,7 @@ static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
- r = ti->type->map(ti, clone, &tio->info);
+ r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1111,6 +1099,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
+ tio->target_request_nr = 0;
return tio;
}
@@ -1121,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
struct bio *clone = &tio->clone;
- tio->info.target_request_nr = request_nr;
+ tio->target_request_nr = request_nr;
/*
* Discard requests require the bio's inline iovecs be initialized.
@@ -1174,10 +1163,32 @@ static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
ci->sector_count = 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+
+static unsigned get_num_discard_requests(struct dm_target *ti)
+{
+ return ti->num_discard_requests;
+}
+
+static unsigned get_num_write_same_requests(struct dm_target *ti)
+{
+ return ti->num_write_same_requests;
+}
+
+typedef bool (*is_split_required_fn)(struct dm_target *ti);
+
+static bool is_split_required_for_discard(struct dm_target *ti)
+{
+ return ti->split_discard_requests;
+}
+
+static int __clone_and_map_changing_extent_only(struct clone_info *ci,
+ get_num_requests_fn get_num_requests,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
+ unsigned num_requests;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1185,20 +1196,21 @@ static int __clone_and_map_discard(struct clone_info *ci)
return -EIO;
/*
- * Even though the device advertised discard support,
- * that does not mean every target supports it, and
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
- if (!ti->num_discard_requests)
+ num_requests = get_num_requests ? get_num_requests(ti) : 0;
+ if (!num_requests)
return -EOPNOTSUPP;
- if (!ti->split_discard_requests)
+ if (is_split_required && !is_split_required(ti))
len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+ __issue_target_requests(ci, ti, num_requests, len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1206,6 +1218,17 @@ static int __clone_and_map_discard(struct clone_info *ci)
return 0;
}
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
+ is_split_required_for_discard);
+}
+
+static int __clone_and_map_write_same(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *bio = ci->bio;
@@ -1215,6 +1238,8 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __clone_and_map_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __clone_and_map_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
@@ -1946,13 +1971,20 @@ static void free_dev(struct mapped_device *md)
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
- struct dm_md_mempools *p;
+ struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
- /* the md already has necessary mempools */
+ if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
+ /*
+ * The md already has necessary mempools. Reload just the
+ * bioset because front_pad may have changed because
+ * a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
goto out;
+ }
- p = dm_table_get_md_mempools(t);
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
md->io_pool = p->io_pool;
@@ -2711,7 +2743,7 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
@@ -2719,6 +2751,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
if (!pools)
return NULL;
+ per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
mempool_create_slab_pool(MIN_IOS, _io_cache) :
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
@@ -2734,7 +2768,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
pools->bs = (type == DM_TYPE_BIO_BASED) ?
bioset_create(pool_size,
- offsetof(struct dm_target_io, clone)) :
+ per_bio_data_size + offsetof(struct dm_target_io, clone)) :
bioset_create(pool_size,
offsetof(struct dm_rq_clone_bio_info, clone));
if (!pools->bs)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 6a99fefaa743..45b97da1bd06 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -159,7 +159,7 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index bd8bf0953fe3..3db3d1b271f7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
- mddev->write_lock, /*nothing*/);
+ mddev->write_lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
@@ -1414,12 +1414,11 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
- int i;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
- for (i=0; size>=4; size -= 4 )
+ for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
@@ -4753,6 +4752,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
+ if (entry->store == new_dev_store)
+ flush_workqueue(md_misc_wq);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->store(mddev, page, length);
@@ -6346,24 +6347,23 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* Commands dealing with the RAID driver but not any
* particular array:
*/
- switch (cmd)
- {
- case RAID_VERSION:
- err = get_version(argp);
- goto done;
+ switch (cmd) {
+ case RAID_VERSION:
+ err = get_version(argp);
+ goto done;
- case PRINT_RAID_DEBUG:
- err = 0;
- md_print_devices();
- goto done;
+ case PRINT_RAID_DEBUG:
+ err = 0;
+ md_print_devices();
+ goto done;
#ifndef MODULE
- case RAID_AUTORUN:
- err = 0;
- autostart_arrays(arg);
- goto done;
+ case RAID_AUTORUN:
+ err = 0;
+ autostart_arrays(arg);
+ goto done;
#endif
- default:;
+ default:;
}
/*
@@ -6398,6 +6398,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
+ if (cmd == ADD_NEW_DISK)
+ /* need to ensure md_delayed_delete() has completed */
+ flush_workqueue(md_misc_wq);
+
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6406,50 +6410,44 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
- switch (cmd)
- {
- case SET_ARRAY_INFO:
- {
- mdu_array_info_t info;
- if (!arg)
- memset(&info, 0, sizeof(info));
- else if (copy_from_user(&info, argp, sizeof(info))) {
- err = -EFAULT;
- goto abort_unlock;
- }
- if (mddev->pers) {
- err = update_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
- goto abort_unlock;
- }
- goto done_unlock;
- }
- if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- err = set_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
- goto abort_unlock;
- }
+ if (cmd == SET_ARRAY_INFO) {
+ mdu_array_info_t info;
+ if (!arg)
+ memset(&info, 0, sizeof(info));
+ else if (copy_from_user(&info, argp, sizeof(info))) {
+ err = -EFAULT;
+ goto abort_unlock;
+ }
+ if (mddev->pers) {
+ err = update_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't update"
+ " array info. %d\n", err);
+ goto abort_unlock;
}
goto done_unlock;
-
- default:;
+ }
+ if (!list_empty(&mddev->disks)) {
+ printk(KERN_WARNING
+ "md: array %s already has disks!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ if (mddev->raid_disks) {
+ printk(KERN_WARNING
+ "md: array %s already initialised!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ err = set_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't set"
+ " array info. %d\n", err);
+ goto abort_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6468,52 +6466,51 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/*
* Commands even a read-only array can execute:
*/
- switch (cmd)
- {
- case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto done_unlock;
+ switch (cmd) {
+ case GET_BITMAP_FILE:
+ err = get_bitmap_file(mddev, argp);
+ goto done_unlock;
- case RESTART_ARRAY_RW:
- err = restart_array(mddev);
- goto done_unlock;
+ case RESTART_ARRAY_RW:
+ err = restart_array(mddev);
+ goto done_unlock;
- case STOP_ARRAY:
- err = do_md_stop(mddev, 0, bdev);
- goto done_unlock;
+ case STOP_ARRAY:
+ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
- case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, bdev);
- goto done_unlock;
+ case STOP_ARRAY_RO:
+ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
- case BLKROSET:
- if (get_user(ro, (int __user *)(arg))) {
- err = -EFAULT;
- goto done_unlock;
- }
- err = -EINVAL;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
- /* if the bdev is going readonly the value of mddev->ro
- * does not matter, no writes are coming
- */
- if (ro)
- goto done_unlock;
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
- /* are we are already prepared for writes? */
- if (mddev->ro != 1)
- goto done_unlock;
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
- /* transitioning to readauto need only happen for
- * arrays that call md_write_start
- */
- if (mddev->pers) {
- err = restart_array(mddev);
- if (err == 0) {
- mddev->ro = 2;
- set_disk_ro(mddev->gendisk, 0);
- }
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
}
- goto done_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6535,37 +6532,36 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
}
- switch (cmd)
+ switch (cmd) {
+ case ADD_NEW_DISK:
{
- case ADD_NEW_DISK:
- {
- mdu_disk_info_t info;
- if (copy_from_user(&info, argp, sizeof(info)))
- err = -EFAULT;
- else
- err = add_new_disk(mddev, &info);
- goto done_unlock;
- }
+ mdu_disk_info_t info;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ err = -EFAULT;
+ else
+ err = add_new_disk(mddev, &info);
+ goto done_unlock;
+ }
- case HOT_REMOVE_DISK:
- err = hot_remove_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_REMOVE_DISK:
+ err = hot_remove_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case HOT_ADD_DISK:
- err = hot_add_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_ADD_DISK:
+ err = hot_add_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case RUN_ARRAY:
- err = do_md_run(mddev);
- goto done_unlock;
+ case RUN_ARRAY:
+ err = do_md_run(mddev);
+ goto done_unlock;
- case SET_BITMAP_FILE:
- err = set_bitmap_file(mddev, (int)arg);
- goto done_unlock;
+ case SET_BITMAP_FILE:
+ err = set_bitmap_file(mddev, (int)arg);
+ goto done_unlock;
- default:
- err = -EINVAL;
- goto abort_unlock;
+ default:
+ err = -EINVAL;
+ goto abort_unlock;
}
done_unlock:
@@ -7184,6 +7180,7 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok)
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
@@ -7281,6 +7278,7 @@ EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
+#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
@@ -7289,6 +7287,7 @@ void md_do_sync(struct md_thread *thread)
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
+ unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
@@ -7448,6 +7447,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
+ update_time = jiffies;
blk_start_plug(&plug);
while (j < max_sectors) {
@@ -7459,6 +7459,7 @@ void md_do_sync(struct md_thread *thread)
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
+ time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
@@ -7466,6 +7467,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ j > mddev->recovery_cp)
+ mddev->recovery_cp = j;
+ update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7570,8 +7575,13 @@ void md_do_sync(struct md_thread *thread)
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
- mddev->recovery_cp =
- mddev->curr_resync_completed;
+ if (test_bit(MD_RECOVERY_ERROR,
+ &mddev->recovery))
+ mddev->recovery_cp =
+ mddev->curr_resync_completed;
+ else
+ mddev->recovery_cp =
+ mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index af443ab868db..eca59c3074ef 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -307,6 +307,7 @@ struct mddev {
* REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for check-only, no repair
* RESHAPE: A reshape is happening
+ * ERROR: sync-action interrupted because io-error
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/
@@ -320,6 +321,7 @@ struct mddev {
#define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
#define MD_RECOVERY_FROZEN 9
+#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -551,32 +553,6 @@ struct md_thread {
#define THREAD_WAKEUP 0
-#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, cmd); \
-} while (0)
-
static inline void safe_put_page(struct page *p)
{
if (p) put_page(p);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a3ae09124a67..28c3ed072a79 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -428,15 +428,17 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
- if (unlikely(r))
+ if (unlikely(r)) {
+ DMERR_LIMIT("%s validator check failed for block %llu", v->name,
+ (unsigned long long) dm_bufio_get_block_number(buf));
return r;
+ }
aux->validator = v;
} else {
if (unlikely(aux->validator != v)) {
- DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
- aux->validator->name, v ? v->name : "NULL",
- (unsigned long long)
- dm_bufio_get_block_number(buf));
+ DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
+ aux->validator->name, v ? v->name : "NULL",
+ (unsigned long long) dm_bufio_get_block_number(buf));
return -EINVAL;
}
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 5709bfeab1e8..accbb05f17b6 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -36,13 +36,13 @@ struct node_header {
__le32 padding;
} __packed;
-struct node {
+struct btree_node {
struct node_header header;
__le64 keys[0];
} __packed;
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
int new_block(struct dm_btree_info *info, struct dm_block **result);
@@ -64,7 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
-struct node *ro_node(struct ro_spine *s);
+struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
struct dm_btree_info *info;
@@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
/*
* Some inlines.
*/
-static inline __le64 *key_ptr(struct node *n, uint32_t index)
+static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
{
return n->keys + index;
}
-static inline void *value_base(struct node *n)
+static inline void *value_base(struct btree_node *n)
{
return &n->keys[le32_to_cpu(n->header.max_entries)];
}
-static inline void *value_ptr(struct node *n, uint32_t index)
+static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
return value_base(n) + (value_size * index);
@@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
/*
* Assumes the values are suitably-aligned and converts to core format.
*/
-static inline uint64_t value64(struct node *n, uint32_t index)
+static inline uint64_t value64(struct btree_node *n, uint32_t index)
{
__le64 *values_le = value_base(n);
@@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
/*
* Searching for a key within a single node.
*/
-int lower_bound(struct node *n, uint64_t key);
+int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index aa71e2359a07..c4f28133ef82 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -53,7 +53,7 @@
/*
* Some little utilities for moving node data around.
*/
-static void node_shift(struct node *n, int shift)
+static void node_shift(struct btree_node *n, int shift)
{
uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
uint32_t value_size = le32_to_cpu(n->header.value_size);
@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
}
}
-static void node_copy(struct node *left, struct node *right, int shift)
+static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
/*
* Delete a specific entry from a leaf node.
*/
-static void delete_at(struct node *n, unsigned index)
+static void delete_at(struct btree_node *n, unsigned index)
{
unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
unsigned nr_to_copy = nr_entries - (index + 1);
@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned merge_threshold(struct node *n)
+static unsigned merge_threshold(struct btree_node *n)
{
return le32_to_cpu(n->header.max_entries) / 3;
}
@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
struct child {
unsigned index;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
};
static struct dm_btree_value_type le64_type = {
@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
.equal = NULL
};
-static int init_child(struct dm_btree_info *info, struct node *parent,
+static int init_child(struct dm_btree_info *info, struct btree_node *parent,
unsigned index, struct child *result)
{
int r, inc;
@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
return dm_tm_unlock(info->tm, c->block);
}
-static void shift(struct node *left, struct node *right, int count)
+static void shift(struct btree_node *left, struct btree_node *right, int count)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
right->header.nr_entries = cpu_to_le32(nr_right + count);
}
-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *r)
{
- struct node *left = l->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
unsigned threshold = 2 * merge_threshold(left) + 1;
@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent;
+ struct btree_node *parent;
struct child left, right;
parent = dm_block_data(shadow_current(s));
@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something
* simple atm.
*/
-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
/*
* Redistributes entries among 3 sibling nodes.
*/
-static void redistribute3(struct dm_btree_info *info, struct node *parent,
+static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
int s;
@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
*key_ptr(parent, r->index) = right->keys[0];
}
-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r)
{
- struct node *left = l->n;
- struct node *center = c->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *center = c->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent = dm_block_data(shadow_current(s));
+ struct btree_node *parent = dm_block_data(shadow_current(s));
struct child left, center, right;
/*
@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
{
int r;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
if (r)
@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
{
int i, r, has_left_sibling, has_right_sibling;
uint32_t child_entries;
- struct node *n;
+ struct btree_node *n;
n = dm_block_data(shadow_current(s));
@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
return r;
}
-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
{
int i = lower_bound(n, key);
@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
uint64_t key, unsigned *index)
{
int i = *index, r;
- struct node *n;
+ struct btree_node *n;
for (;;) {
r = shadow_step(s, root, vt);
@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
unsigned level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index d9a7912ee8ee..f199a0c4ed04 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
h->blocknr = cpu_to_le64(dm_block_location(b));
@@ -38,15 +38,15 @@ static int node_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
size_t value_size;
__le32 csum_disk;
uint32_t flags;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
- DMERR("node_check failed blocknr %llu wanted %llu",
- le64_to_cpu(h->blocknr), dm_block_location(b));
+ DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(h->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -54,8 +54,8 @@ static int node_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
if (csum_disk != h->csum) {
- DMERR("node_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
+ DMERR_LIMIT("node_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
return -EILSEQ;
}
@@ -63,12 +63,12 @@ static int node_check(struct dm_block_validator *v,
if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
- DMERR("node_check failed: max_entries too large");
+ DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ;
}
if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
- DMERR("node_check failed, too many entries");
+ DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ;
}
@@ -77,7 +77,7 @@ static int node_check(struct dm_block_validator *v,
*/
flags = le32_to_cpu(h->flags);
if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
- DMERR("node_check failed, node is neither INTERNAL or LEAF");
+ DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
return -EILSEQ;
}
@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
-struct node *ro_node(struct ro_spine *s)
+struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index d12b2cc51f1a..4caf66918cdb 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
/*----------------------------------------------------------------*/
/* makes the assumption that no two keys are the same. */
-static int bsearch(struct node *n, uint64_t key, int want_hi)
+static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
{
int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
return want_hi ? hi : lo;
}
-int lower_bound(struct node *n, uint64_t key)
+int lower_bound(struct btree_node *n, uint64_t key)
{
return bsearch(n, key, 0);
}
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt)
{
unsigned i;
@@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
vt->inc(vt->context, value_ptr(n, i));
}
-static int insert_at(size_t value_size, struct node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
{
int r;
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
size_t block_size;
uint32_t max_entries;
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
#define MAX_SPINE_DEPTH 64
struct frame {
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
unsigned level;
unsigned nr_children;
unsigned current_child;
@@ -230,6 +230,11 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -241,7 +246,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s->tm = info->tm;
s->top = -1;
- r = push_frame(s, root, 1);
+ r = push_frame(s, root, 0);
if (r)
goto out;
@@ -267,7 +272,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
if (r)
goto out;
- } else if (f->level != (info->levels - 1)) {
+ } else if (is_internal_level(info, f)) {
b = value64(f->n, f->current_child);
f->current_child++;
r = push_frame(s, b, f->level + 1);
@@ -295,7 +300,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
/*----------------------------------------------------------------*/
static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
- int (*search_fn)(struct node *, uint64_t),
+ int (*search_fn)(struct btree_node *, uint64_t),
uint64_t *result_key, void *v, size_t value_size)
{
int i, r;
@@ -406,7 +411,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *parent;
- struct node *ln, *rn, *pn;
+ struct btree_node *ln, *rn, *pn;
__le64 location;
left = shadow_current(s);
@@ -491,7 +496,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
- struct node *pn, *ln, *rn;
+ struct btree_node *pn, *ln, *rn;
__le64 val;
new_parent = shadow_current(s);
@@ -576,7 +581,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
uint64_t key, unsigned *index)
{
int r, i = *index, top = 1;
- struct node *node;
+ struct btree_node *node;
for (;;) {
r = shadow_step(s, root, vt);
@@ -643,7 +648,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
unsigned level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
struct dm_btree_value_type le64_type;
le64_type.context = NULL;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index f3a9af8cdec3..3e7a88d99eb0 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -39,8 +39,8 @@ static int index_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
- DMERR("index_check failed blocknr %llu wanted %llu",
- le64_to_cpu(mi_le->blocknr), dm_block_location(b));
+ DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -48,8 +48,8 @@ static int index_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
- DMERR("index_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
+ DMERR_LIMIT("index_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
@@ -89,8 +89,8 @@ static int bitmap_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
- DMERR("bitmap check failed blocknr %llu wanted %llu",
- le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -98,8 +98,8 @@ static int bitmap_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BITMAP_CSUM_XOR));
if (csum_disk != disk_header->csum) {
- DMERR("bitmap check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
return -EILSEQ;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index e89ae5e7a519..906cf3df71af 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -337,7 +337,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{
int r = sm_metadata_new_block_(sm, b);
if (r)
- DMERR("out of metadata space");
+ DMERR("unable to allocate new metadata block");
return r;
}
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index d247a35da3c6..7b17a1fdeaf9 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -25,8 +25,8 @@ struct shadow_info {
/*
* It would be nice if we scaled with the size of transaction.
*/
-#define HASH_SIZE 256
-#define HASH_MASK (HASH_SIZE - 1)
+#define DM_HASH_SIZE 256
+#define DM_HASH_MASK (DM_HASH_SIZE - 1)
struct dm_transaction_manager {
int is_clone;
@@ -36,7 +36,7 @@ struct dm_transaction_manager {
struct dm_space_map *sm;
spinlock_t lock;
- struct hlist_head buckets[HASH_SIZE];
+ struct hlist_head buckets[DM_HASH_SIZE];
};
/*----------------------------------------------------------------*/
@@ -44,7 +44,7 @@ struct dm_transaction_manager {
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
- unsigned bucket = dm_hash_block(b, HASH_MASK);
+ unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
struct hlist_node *n;
@@ -71,7 +71,7 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
si->where = b;
- bucket = dm_hash_block(b, HASH_MASK);
+ bucket = dm_hash_block(b, DM_HASH_MASK);
spin_lock(&tm->lock);
hlist_add_head(&si->hlist, tm->buckets + bucket);
spin_unlock(&tm->lock);
@@ -86,7 +86,7 @@ static void wipe_shadow_table(struct dm_transaction_manager *tm)
int i;
spin_lock(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++) {
+ for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i;
hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
kfree(si);
@@ -115,7 +115,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
tm->sm = sm;
spin_lock_init(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++)
+ for (i = 0; i < DM_HASH_SIZE; i++)
INIT_HLIST_HEAD(tm->buckets + i);
return tm;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a0f73092176e..d5bddfc4010e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r1conf *conf)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c9acbd717131..64d48249c03b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3380372c0393..19d77a026639 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,8 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
+
#include "md.h"
#include "raid5.h"
#include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -466,7 +470,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
@@ -480,8 +484,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
- conf->device_lock,
- );
+ conf->device_lock);
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -671,6 +674,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_next = NULL;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
+ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ bi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(bi);
}
if (rrdev) {
@@ -698,6 +704,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL;
+ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ rbi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
@@ -1646,8 +1655,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
- conf->device_lock,
- );
+ conf->device_lock);
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -2855,8 +2863,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0)
+ if (rmw < rcw && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
+ (unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2867,7 +2877,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
- "%d for r-m-w\n", i);
+ "%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
@@ -2877,8 +2887,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
+ }
if (rcw <= rmw && rcw > 0) {
/* want reconstruct write, but need to get some data */
+ int qread =0;
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2897,12 +2909,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
+ qread++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
+ if (rcw)
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+ (unsigned long long)sh->sector,
+ rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
/* now if nothing is locked, and if we have enough data,
* we can start a write request
@@ -3224,10 +3241,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
/* done submitting copies, wait for them to complete */
- if (tx) {
- async_tx_ack(tx);
- dma_wait_for_async_tx(tx);
- }
+ async_tx_quiesce(&tx);
}
/*
@@ -3903,6 +3917,8 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
+ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+ raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4003,10 +4019,13 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ align_bi, disk_devt(mddev->gendisk),
+ raid_bio->bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4081,6 +4100,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct stripe_head *sh;
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
+ int cnt = 0;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4095,9 +4115,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
__release_stripe(conf, sh);
+ cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -4355,6 +4377,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
}
}
@@ -4731,8 +4755,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0)
+ if (remaining == 0) {
+ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+ raid_bio, 0);
bio_endio(raid_bio, 0);
+ }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
@@ -6095,7 +6122,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
wait_event_lock_irq(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
conf->quiesce = 1;
spin_unlock_irq(&conf->device_lock);
/* allow reshape to continue */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 4ef0d80b57f4..8567a7a64104 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -79,8 +79,7 @@ config MEDIA_RC_SUPPORT
#
config MEDIA_CONTROLLER
- bool "Media Controller API (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Media Controller API"
depends on MEDIA_CAMERA_SUPPORT
---help---
Enable the media controller API used to query media devices internal
@@ -100,8 +99,8 @@ config VIDEO_DEV
default y
config VIDEO_V4L2_SUBDEV_API
- bool "V4L2 sub-device userspace API (EXPERIMENTAL)"
- depends on VIDEO_DEV && MEDIA_CONTROLLER && EXPERIMENTAL
+ bool "V4L2 sub-device userspace API"
+ depends on VIDEO_DEV && MEDIA_CONTROLLER
---help---
Enables the V4L2 sub-device pad-level userspace API used to configure
video format, size and frame rate between hardware blocks.
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 121b0110af3c..d2a436ce77f8 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -1,3 +1,10 @@
+# Used by common drivers, when they need to ask questions
+config MEDIA_COMMON_OPTIONS
+ bool
+
+comment "common driver options"
+ depends on MEDIA_COMMON_OPTIONS
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/b2c2/Kconfig b/drivers/media/common/b2c2/Kconfig
index 1df9e578daa5..a8c6cdfaa2f5 100644
--- a/drivers/media/common/b2c2/Kconfig
+++ b/drivers/media/common/b2c2/Kconfig
@@ -17,11 +17,6 @@ config DVB_B2C2_FLEXCOP
select DVB_CX24123 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_CX24113 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the digital TV receiver chip made by B2C2 Inc. included in
- Technisats PCI cards and USB boxes.
-
- Say Y if you own such a device and want to use it.
# Selected via the PCI or USB flexcop drivers
config DVB_B2C2_FLEXCOP_DEBUG
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
index 425aeadfb49d..68f0f604678e 100644
--- a/drivers/media/common/siano/Kconfig
+++ b/drivers/media/common/siano/Kconfig
@@ -4,14 +4,16 @@
config SMS_SIANO_MDTV
tristate
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ depends on !RC_CORE || RC_CORE
depends on SMS_USB_DRV || SMS_SDIO_DRV
default y
- ---help---
- Choose Y or M here if you have MDTV receiver with a Siano chipset.
-
- To compile this driver as a module, choose M here
- (The module will be called smsmdtv).
- Further documentation on this driver can be found on the WWW
- at http://www.siano-ms.com/
+config SMS_SIANO_RC
+ bool "Enable Remote Controller support for Siano devices"
+ depends on SMS_SIANO_MDTV && RC_CORE
+ depends on SMS_USB_DRV || SMS_SDIO_DRV
+ depends on MEDIA_COMMON_OPTIONS
+ default y
+ ---help---
+ Choose Y to select Remote Controller support for Siano driver.
diff --git a/drivers/media/common/siano/Makefile b/drivers/media/common/siano/Makefile
index 2a09279e0648..81b1e985bea5 100644
--- a/drivers/media/common/siano/Makefile
+++ b/drivers/media/common/siano/Makefile
@@ -1,7 +1,11 @@
-smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
+smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o
obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
+ifeq ($(CONFIG_SMS_SIANO_RC),y)
+ smsmdtv-objs += smsir.o
+endif
+
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 9cc55546cc30..1842e64e6338 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1092,7 +1092,7 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
* @return pointer to descriptor on success, NULL on error.
*/
-struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
+static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 37bc5c4b8ad8..b8c5cad78537 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -88,7 +88,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
dev->priv = coredev;
dev->driver_type = RC_DRIVER_IR_RAW;
- dev->allowed_protos = RC_TYPE_ALL;
+ dev->allowed_protos = RC_BIT_ALL;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index ae92b3a8587e..69b59b9eee28 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -46,10 +46,19 @@ struct ir_t {
u32 controller;
};
+#ifdef CONFIG_SMS_SIANO_RC
int sms_ir_init(struct smscore_device_t *coredev);
void sms_ir_exit(struct smscore_device_t *coredev);
void sms_ir_event(struct smscore_device_t *coredev,
const char *buf, int len);
+#else
+inline static int sms_ir_init(struct smscore_device_t *coredev) {
+ return 0;
+}
+inline static void sms_ir_exit(struct smscore_device_t *coredev) {};
+inline static void sms_ir_event(struct smscore_device_t *coredev,
+ const char *buf, int len) {};
+#endif
#endif /* __SMS_IR_H__ */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 889c9c16c6df..d81dbb22aa81 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -877,7 +877,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
- if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
+ if ((unsigned)params->pes_type > DMX_PES_OTHER)
return -EINVAL;
dmxdevfilter->type = DMXDEV_TYPE_PES;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 02ebe28f830d..48c6cf92ab99 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
+#include <linux/time.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/fs.h>
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 58e0220447c0..388c2eb4d747 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -250,6 +250,7 @@
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 7e92793260f0..0223ad255cb4 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1029,12 +1029,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
/* Get */
_DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
_DTV_CMD(DTV_API_VERSION, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_HP, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_LP, 0, 0),
- _DTV_CMD(DTV_GUARD_INTERVAL, 0, 0),
- _DTV_CMD(DTV_TRANSMISSION_MODE, 0, 0),
- _DTV_CMD(DTV_HIERARCHY, 0, 0),
- _DTV_CMD(DTV_INTERLEAVING, 0, 0),
_DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
@@ -1042,13 +1036,11 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
_DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
- _DTV_CMD(DTV_ATSCMH_PARADE_ID, 0, 0),
_DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
_DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
@@ -1056,8 +1048,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
-
- _DTV_CMD(DTV_LNA, 0, 0),
};
static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
@@ -1830,7 +1820,7 @@ static int dvb_frontend_ioctl(struct file *file,
struct dvb_frontend *fe = dvbdev->priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int err = -ENOTTY;
+ int err = -EOPNOTSUPP;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
if (fepriv->exit != DVB_FE_NO_EXIT)
@@ -1948,7 +1938,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
}
} else
- err = -ENOTTY;
+ err = -EOPNOTSUPP;
out:
kfree(tvp);
@@ -2081,7 +2071,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = -ENOTTY;
+ int err = -EOPNOTSUPP;
switch (cmd) {
case FE_GET_INFO: {
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index f2a90f990ce3..3d399d9a6343 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -139,7 +139,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
@@ -152,7 +152,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
p->modulation != QAM_64)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 7e28b4ee7d4f..68c88ab58e71 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -338,7 +338,7 @@ static int cx24123_set_fec(struct cx24123_state *state, fe_code_rate_t fec)
{
u8 nom_reg = cx24123_readreg(state, 0x0e) & ~0x07;
- if ((fec < FEC_NONE) || (fec > FEC_AUTO))
+ if (((int)fec < FEC_NONE) || (fec > FEC_AUTO))
fec = FEC_AUTO;
/* Set the soft decision threshold */
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index b5781a48034c..de1cc91fd833 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -97,7 +97,7 @@ static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb
return -ENODEV;
}
-int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
+static inline int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 6d9853750d2b..e71cc60851e7 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1748,7 +1748,8 @@ static int DRX_Stop(struct drxd_state *state)
return status;
}
-int SetOperationMode(struct drxd_state *state, int oMode)
+#if 0 /* Currently unused */
+static int SetOperationMode(struct drxd_state *state, int oMode)
{
int status;
@@ -1788,6 +1789,7 @@ int SetOperationMode(struct drxd_state *state, int oMode)
state->operation_mode = oMode;
return status;
}
+#endif
static int StartDiversity(struct drxd_state *state)
{
@@ -2612,7 +2614,7 @@ static int CDRXD(struct drxd_state *state, u32 IntermediateFrequency)
return 0;
}
-int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
+static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
{
int status = 0;
u32 driverVersion;
@@ -2774,7 +2776,7 @@ int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
return status;
}
-int DRXD_status(struct drxd_state *state, u32 * pLockStatus)
+static int DRXD_status(struct drxd_state *state, u32 *pLockStatus)
{
DRX_GetLockStatus(state, pLockStatus);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index df9abe83f877..c2fc7da0d6bf 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -65,16 +65,6 @@ static bool IsQAM(struct drxk_state *state)
state->m_OperationMode == OM_QAM_ITU_C;
}
-bool IsA1WithPatchCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_PATCH_CODE;
-}
-
-bool IsA1WithRomCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_ROM_CODE;
-}
-
#define NOA1ROM 0
#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
@@ -189,7 +179,7 @@ static inline u32 MulDiv32(u32 a, u32 b, u32 c)
return (u32) tmp64;
}
-inline u32 Frac28a(u32 a, u32 c)
+static inline u32 Frac28a(u32 a, u32 c)
{
int i = 0;
u32 Q1 = 0;
@@ -587,7 +577,7 @@ static int write_block(struct drxk_state *state, u32 Address,
#define DRXK_MAX_RETRIES_POWERUP 20
#endif
-int PowerUpDevice(struct drxk_state *state)
+static int PowerUpDevice(struct drxk_state *state)
{
int status;
u8 data = 0;
@@ -720,11 +710,6 @@ static int init_state(struct drxk_state *state)
state->m_bPowerDown = (ulPowerDown != 0);
- state->m_DRXK_A1_PATCH_CODE = false;
- state->m_DRXK_A1_ROM_CODE = false;
- state->m_DRXK_A2_ROM_CODE = false;
- state->m_DRXK_A3_ROM_CODE = false;
- state->m_DRXK_A2_PATCH_CODE = false;
state->m_DRXK_A3_PATCH_CODE = false;
/* Init AGC and PGA parameters */
@@ -921,7 +906,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
if (status < 0)
goto error;
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
@@ -1217,7 +1202,7 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
goto error;
/* MPEG TS pad configuration */
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
@@ -5461,6 +5446,7 @@ static int QAMDemodulatorCommand(struct drxk_state *state,
} else {
printk(KERN_WARNING "drxk: Unknown QAM demodulator parameter "
"count %d\n", numberOfParameters);
+ status = -EINVAL;
}
error:
diff --git a/drivers/media/dvb-frontends/drxk_hard.h b/drivers/media/dvb-frontends/drxk_hard.h
index 6bb9fc4a7b96..d18a896a9835 100644
--- a/drivers/media/dvb-frontends/drxk_hard.h
+++ b/drivers/media/dvb-frontends/drxk_hard.h
@@ -320,11 +320,7 @@ struct drxk_state {
u8 *m_microcode;
int m_microcode_length;
- bool m_DRXK_A1_PATCH_CODE;
- bool m_DRXK_A1_ROM_CODE;
- bool m_DRXK_A2_ROM_CODE;
- bool m_DRXK_A3_ROM_CODE;
- bool m_DRXK_A2_PATCH_CODE;
+ bool m_DRXK_A3_ROM_CODE;
bool m_DRXK_A3_PATCH_CODE;
bool m_rfmirror;
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 5b639087ce45..60a529e3833f 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -30,7 +30,6 @@
#include "ds3000.h"
static int debug;
-static int force_fw_upload;
#define dprintk(args...) \
do { \
@@ -234,7 +233,6 @@ struct ds3000_state {
struct i2c_adapter *i2c;
const struct ds3000_config *config;
struct dvb_frontend frontend;
- u8 skip_fw_load;
/* previous uncorrected block counter for DVB-S2 */
u16 prevUCBS2;
};
@@ -397,9 +395,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- if (state->skip_fw_load || !force_fw_upload)
- return 0; /* Firmware already uploaded, skipping */
-
/* Load firmware */
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
@@ -413,9 +408,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
return ret;
}
- /* Make sure we don't recurse back through here during loading */
- state->skip_fw_load = 1;
-
ret = ds3000_load_firmware(fe, fw);
if (ret)
printk("%s: Writing firmware to device failed\n", __func__);
@@ -425,9 +417,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
dprintk("%s: Firmware upload %s\n", __func__,
ret == 0 ? "complete" : "failed");
- /* Ensure firmware is always loaded if required */
- state->skip_fw_load = 0;
-
return ret;
}
@@ -1309,10 +1298,8 @@ static struct dvb_frontend_ops ds3000_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-module_param(force_fw_upload, int, 0644);
-MODULE_PARM_DESC(force_fw_upload, "Force firmware upload (default:0)");
-
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
"DS3000/TS2020 hardware");
MODULE_AUTHOR("Konstantin Dimitrov");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 36fcf559e361..ddf866c46f8b 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -180,11 +180,11 @@ static int apply_frontend_param(struct dvb_frontend *fe)
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index e20bf13aa860..ec388c1d6913 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -549,7 +549,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->frequency > fe->ops.info.frequency_max))
return -EINVAL;
- if ((p->inversion < INVERSION_OFF)
+ if (((int)p->inversion < INVERSION_OFF)
|| (p->inversion > INVERSION_ON))
return -EINVAL;
@@ -557,7 +557,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->symbol_rate > fe->ops.info.symbol_rate_max))
return -EINVAL;
- if ((p->fec_inner < FEC_NONE)
+ if (((int)p->fec_inner < FEC_NONE)
|| (p->fec_inner > FEC_AUTO))
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index b0f6ec03d1eb..362d26d11e82 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -130,7 +130,7 @@ static int rtl2830_rd_reg(struct rtl2830_priv *priv, u16 reg, u8 *val)
}
/* write single register with mask */
-int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
+static int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -150,7 +150,7 @@ int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
+static int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
@@ -256,7 +256,7 @@ static int rtl2830_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2830_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 80c8e5f1182f..73887690b046 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -265,7 +265,7 @@ static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
return rtl2832_rd_regs(priv, reg, page, val, 1);
}
-int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
+static int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
{
int ret;
@@ -305,7 +305,7 @@ err:
}
-int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
+static int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
{
int ret, i;
u8 len;
@@ -510,7 +510,7 @@ static int rtl2832_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2832_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
struct rtl2832_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 79e29de87fb7..cc278b3d6d5a 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1260,7 +1260,7 @@ static inline void CONVERT32(u32 x, char *str)
*str = '\0';
}
-int stb0899_get_dev_id(struct stb0899_state *state)
+static int stb0899_get_dev_id(struct stb0899_state *state)
{
u8 chip_id, release;
u16 id;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 2a8aaeb1112d..0c8e45949b11 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -879,7 +879,8 @@ static u8 stv0367_readbits(struct stv0367_state *state, u32 label)
return val;
}
-u8 stv0367_getbits(u8 reg, u32 label)
+#if 0 /* Currently, unused */
+static u8 stv0367_getbits(u8 reg, u32 label)
{
u8 mask, pos;
@@ -887,7 +888,7 @@ u8 stv0367_getbits(u8 reg, u32 label)
return (reg & mask) >> pos;
}
-
+#endif
static int stv0367ter_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
@@ -1263,8 +1264,8 @@ stv0367_ter_signal_type stv0367ter_check_cpamp(struct stv0367_state *state,
return CPAMPStatus;
}
-enum
-stv0367_ter_signal_type stv0367ter_lock_algo(struct stv0367_state *state)
+static enum stv0367_ter_signal_type
+stv0367ter_lock_algo(struct stv0367_state *state)
{
enum stv0367_ter_signal_type ret_flag;
short int wd, tempo;
@@ -1528,7 +1529,7 @@ static int stv0367ter_sleep(struct dvb_frontend *fe)
return stv0367ter_standby(fe, 1);
}
-int stv0367ter_init(struct dvb_frontend *fe)
+static int stv0367ter_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
@@ -2378,9 +2379,9 @@ static u32 stv0367cab_get_adc_freq(struct dvb_frontend *fe, u32 ExtClk_Hz)
return ADCClk_Hz;
}
-enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
- u32 SymbolRate,
- enum stv0367cab_mod QAMSize)
+static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
+ u32 SymbolRate,
+ enum stv0367cab_mod QAMSize)
{
/* Set QAM size */
stv0367_writebits(state, F367CAB_QAM_MODE, QAMSize);
@@ -2762,7 +2763,7 @@ static int stv0367cab_sleep(struct dvb_frontend *fe)
return stv0367cab_standby(fe, 1);
}
-int stv0367cab_init(struct dvb_frontend *fe)
+static int stv0367cab_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index a83bf6802345..16a4bc54dbe7 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -96,7 +96,8 @@ static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
}
/* write single register with mask */
-int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
+static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -116,7 +117,8 @@ int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int tda10071_rd_reg_mask(struct tda10071_priv *priv, u8 reg, u8 *val, u8 mask)
+static int tda10071_rd_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index ad7c72e8f517..d281f77d5c28 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -32,6 +32,7 @@
#include <asm/div64.h>
#include "dvb_frontend.h"
+#include "tda18271c2dd.h"
struct SStandardParam {
s32 m_IFFrequency;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 4fdcd8cb7530..c2ba085e0d20 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -13,6 +13,7 @@
#ifndef _FIREDTV_H
#define _FIREDTV_H
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/list.h>
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 18a38b38fcb8..df163800c8e1 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -3,10 +3,10 @@
*
* Copyright (C) 2008--2011 Nokia Corporation
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Contributors:
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
* Tuukka Toivonen <tuukkat76@gmail.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 45ecf8db1eae..64d71fb87a96 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -540,8 +540,8 @@ static int init_device(struct i2c_client *client, struct adv7180_state *state)
return 0;
}
-static __devinit int adv7180_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adv7180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adv7180_state *state;
struct v4l2_subdev *sd;
@@ -587,7 +587,7 @@ err:
return ret;
}
-static __devexit int adv7180_remove(struct i2c_client *client)
+static int adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
@@ -652,7 +652,7 @@ static struct i2c_driver adv7180_driver = {
.name = KBUILD_MODNAME,
},
.probe = adv7180_probe,
- .remove = __devexit_p(adv7180_remove),
+ .remove = adv7180_remove,
#ifdef CONFIG_PM
.suspend = adv7180_suspend,
.resume = adv7180_resume,
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index e1d4c89d7140..6fed5b74e743 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -677,22 +677,11 @@ static struct i2c_driver adv7183_driver = {
.name = "adv7183",
},
.probe = adv7183_probe,
- .remove = __devexit_p(adv7183_remove),
+ .remove = adv7183_remove,
.id_table = adv7183_id,
};
-static __init int adv7183_init(void)
-{
- return i2c_add_driver(&adv7183_driver);
-}
-
-static __exit void adv7183_exit(void)
-{
- i2c_del_driver(&adv7183_driver);
-}
-
-module_init(adv7183_init);
-module_exit(adv7183_exit);
+module_i2c_driver(adv7183_driver);
MODULE_DESCRIPTION("Analog Devices ADV7183 video decoder driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 05f8950f6f91..f47555b1000a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -486,9 +486,19 @@ static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
struct i2c_client *client = state->i2c_edid;
u8 msgbuf0[1] = { 0 };
u8 msgbuf1[256];
- struct i2c_msg msg[2] = { { client->addr, 0, 1, msgbuf0 },
- { client->addr, 0 | I2C_M_RD, len, msgbuf1 }
- };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = msgbuf0
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = msgbuf1
+ },
+ };
if (i2c_transfer(client->adapter, msg, 2) < 0)
return -EIO;
diff --git a/drivers/media/i2c/as3645a.c b/drivers/media/i2c/as3645a.c
index 3bfdbf9d9bf1..58d523f2648f 100644
--- a/drivers/media/i2c/as3645a.c
+++ b/drivers/media/i2c/as3645a.c
@@ -713,7 +713,7 @@ static int as3645a_resume(struct device *dev)
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
-static int __devinit as3645a_init_controls(struct as3645a *flash)
+static int as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
@@ -804,8 +804,8 @@ static int __devinit as3645a_init_controls(struct as3645a *flash)
return flash->ctrls.error;
}
-static int __devinit as3645a_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int as3645a_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
@@ -846,7 +846,7 @@ done:
return ret;
}
-static int __devexit as3645a_remove(struct i2c_client *client)
+static int as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
@@ -877,7 +877,7 @@ static struct i2c_driver as3645a_i2c_driver = {
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
- .remove = __devexit_p(as3645a_remove),
+ .remove = as3645a_remove,
.id_table = as3645a_id_table,
};
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 2cee69e34184..f4149eb4d7b4 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -2065,7 +2065,7 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
#define DIF_BPF_COEFF3435 (0x38c)
#define DIF_BPF_COEFF36 (0x390)
-void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
+static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
{
u64 pll_freq;
u32 pll_freq_word;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 04f192a0398a..08ae067b2b6f 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -284,7 +284,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
char *ir_codes = NULL;
const char *name = NULL;
- u64 rc_type = RC_TYPE_UNKNOWN;
+ u64 rc_type = RC_BIT_UNKNOWN;
struct IR_i2c *ir;
struct rc_dev *rc = NULL;
struct i2c_adapter *adap = client->adapter;
@@ -303,7 +303,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x64:
name = "Pixelview";
ir->get_key = get_key_pixelview;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x18:
@@ -311,31 +311,31 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x1a:
name = "Hauppauge";
ir->get_key = get_key_haup;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 8131d651de9e..d4e7567b367c 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
mutex_lock(&info->lock);
format = __find_format(info, fh, fmt->which, info->res_type);
- if (!format)
+ if (format)
fmt->format = *format;
else
ret = -EINVAL;
@@ -926,8 +926,8 @@ static irqreturn_t m5mols_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit m5mols_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int m5mols_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct m5mols_platform_data *pdata = client->dev.platform_data;
struct m5mols_info *info;
@@ -1018,7 +1018,7 @@ out_free:
return ret;
}
-static int __devexit m5mols_remove(struct i2c_client *client)
+static int m5mols_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct m5mols_info *info = to_m5mols(sd);
@@ -1045,7 +1045,7 @@ static struct i2c_driver m5mols_i2c_driver = {
.name = MODULE_NAME,
},
.probe = m5mols_probe,
- .remove = __devexit_p(m5mols_remove),
+ .remove = m5mols_remove,
.id_table = m5mols_id,
};
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 49c1b3abb425..2750de634270 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -343,7 +343,7 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
}
regs_num = le32_to_cpu(get_unaligned_le32(fw->data));
- v4l2_dbg(3, debug, sd, "FW: %s size %d register sets %d\n",
+ v4l2_dbg(3, debug, sd, "FW: %s size %zu register sets %d\n",
S5K4ECGX_FIRMWARE, fw->size, regs_num);
regs_num++; /* Add header */
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index a577614bd84f..d8d5da7c52db 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -58,7 +58,7 @@ static int bounds_check(struct device *dev, uint32_t val,
if (val >= min && val <= max)
return 0;
- dev_warn(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
+ dev_dbg(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
return -EINVAL;
}
@@ -87,14 +87,14 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
}
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
- struct smiapp_pll *pll)
+static int __smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll, uint32_t mul,
+ uint32_t div, uint32_t lane_op_clock_ratio)
{
uint32_t sys_div;
uint32_t best_pix_div = INT_MAX >> 1;
uint32_t vt_op_binning_div;
- uint32_t lane_op_clock_ratio;
- uint32_t mul, div;
uint32_t more_mul_min, more_mul_max;
uint32_t more_mul_factor;
uint32_t min_vt_div, max_vt_div, vt_div;
@@ -102,54 +102,6 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
unsigned int i;
int rval;
- if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
- lane_op_clock_ratio = pll->lanes;
- else
- lane_op_clock_ratio = 1;
- dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
-
- dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
- pll->binning_vertical);
-
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- pll->pll_op_clk_freq_hz = pll->link_freq * 2
- * (pll->lanes / lane_op_clock_ratio);
-
- /* Figure out limits for pre-pll divider based on extclk */
- dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
- limits->max_pre_pll_clk_div =
- min_t(uint16_t, limits->max_pre_pll_clk_div,
- clk_div_even(pll->ext_clk_freq_hz /
- limits->min_pll_ip_freq_hz));
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(pll->ext_clk_freq_hz,
- limits->max_pll_ip_freq_hz)));
- dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
- mul = div_u64(pll->pll_op_clk_freq_hz, i);
- div = pll->ext_clk_freq_hz / i;
- dev_dbg(dev, "mul %d / div %d\n", mul, div);
-
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
- limits->max_pll_op_freq_hz)));
- dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- if (limits->min_pre_pll_clk_div > limits->max_pre_pll_clk_div) {
- dev_err(dev, "unable to compute pre_pll divisor\n");
- return -EINVAL;
- }
-
- pll->pre_pll_clk_div = limits->min_pre_pll_clk_div;
-
/*
* Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
* too high.
@@ -162,7 +114,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above max pll op frequency. */
more_mul_max =
- min_t(int,
+ min_t(uint32_t,
more_mul_max,
limits->max_pll_op_freq_hz
/ (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
@@ -170,7 +122,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above the division capability of op sys clock divider. */
more_mul_max = min(more_mul_max,
- limits->max_op_sys_clk_div * pll->pre_pll_clk_div
+ limits->op.max_sys_clk_div * pll->pre_pll_clk_div
/ div);
dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
more_mul_max);
@@ -193,14 +145,14 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_min);
if (more_mul_min > more_mul_max) {
- dev_warn(dev,
- "unable to compute more_mul_min and more_mul_max");
+ dev_dbg(dev,
+ "unable to compute more_mul_min and more_mul_max\n");
return -EINVAL;
}
more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
- more_mul_factor = lcm(more_mul_factor, limits->min_op_sys_clk_div);
+ more_mul_factor = lcm(more_mul_factor, limits->op.min_sys_clk_div);
dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
more_mul_factor);
i = roundup(more_mul_min, more_mul_factor);
@@ -209,7 +161,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "final more_mul: %d\n", i);
if (i > more_mul_max) {
- dev_warn(dev, "final more_mul is bad, max %d", more_mul_max);
+ dev_dbg(dev, "final more_mul is bad, max %d\n", more_mul_max);
return -EINVAL;
}
@@ -268,19 +220,19 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
min_vt_div = max(min_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz));
+ limits->vt.max_pix_clk_freq_hz));
dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
min_vt_div);
min_vt_div = max_t(uint32_t, min_vt_div,
- limits->min_vt_pix_clk_div
- * limits->min_vt_sys_clk_div);
+ limits->vt.min_pix_clk_div
+ * limits->vt.min_sys_clk_div);
dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
- max_vt_div = limits->max_vt_sys_clk_div * limits->max_vt_pix_clk_div;
+ max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div;
dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
max_vt_div = min(max_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
max_vt_div);
@@ -288,28 +240,28 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
* Find limitsits for sys_clk_div. Not all values are possible
* with all values of pix_clk_div.
*/
- min_sys_div = limits->min_vt_sys_clk_div;
+ min_sys_div = limits->vt.min_sys_clk_div;
dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
DIV_ROUND_UP(min_vt_div,
- limits->max_vt_pix_clk_div));
+ limits->vt.max_pix_clk_div));
dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
pll->pll_op_clk_freq_hz
- / limits->max_vt_sys_clk_freq_hz);
+ / limits->vt.max_sys_clk_freq_hz);
dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
min_sys_div = clk_div_even_up(min_sys_div);
dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
- max_sys_div = limits->max_vt_sys_clk_div;
+ max_sys_div = limits->vt.max_sys_clk_div;
dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(max_vt_div,
- limits->min_vt_pix_clk_div));
+ limits->vt.min_pix_clk_div));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
/*
@@ -322,15 +274,15 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
for (sys_div = min_sys_div;
sys_div <= max_sys_div;
sys_div += 2 - (sys_div & 1)) {
- int pix_div = DIV_ROUND_UP(vt_div, sys_div);
+ uint16_t pix_div = DIV_ROUND_UP(vt_div, sys_div);
- if (pix_div < limits->min_vt_pix_clk_div
- || pix_div > limits->max_vt_pix_clk_div) {
+ if (pix_div < limits->vt.min_pix_clk_div
+ || pix_div > limits->vt.max_pix_clk_div) {
dev_dbg(dev,
"pix_div %d too small or too big (%d--%d)\n",
pix_div,
- limits->min_vt_pix_clk_div,
- limits->max_vt_pix_clk_div);
+ limits->vt.min_pix_clk_div,
+ limits->vt.max_pix_clk_div);
continue;
}
@@ -354,16 +306,10 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
pll->pixel_rate_csi =
pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
- print_pll(dev, pll);
-
- rval = bounds_check(dev, pll->pre_pll_clk_div,
- limits->min_pre_pll_clk_div,
- limits->max_pre_pll_clk_div, "pre_pll_clk_div");
- if (!rval)
- rval = bounds_check(
- dev, pll->pll_ip_clk_freq_hz,
- limits->min_pll_ip_freq_hz, limits->max_pll_ip_freq_hz,
- "pll_ip_clk_freq_hz");
+ rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz,
+ limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->pll_multiplier,
@@ -377,42 +323,121 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_div,
- limits->min_op_sys_clk_div, limits->max_op_sys_clk_div,
+ limits->op.min_sys_clk_div, limits->op.max_sys_clk_div,
"op_sys_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_div,
- limits->min_op_pix_clk_div, limits->max_op_pix_clk_div,
+ limits->op.min_pix_clk_div, limits->op.max_pix_clk_div,
"op_pix_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_freq_hz,
- limits->min_op_sys_clk_freq_hz,
- limits->max_op_sys_clk_freq_hz,
+ limits->op.min_sys_clk_freq_hz,
+ limits->op.max_sys_clk_freq_hz,
"op_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_freq_hz,
- limits->min_op_pix_clk_freq_hz,
- limits->max_op_pix_clk_freq_hz,
+ limits->op.min_pix_clk_freq_hz,
+ limits->op.max_pix_clk_freq_hz,
"op_pix_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_sys_clk_freq_hz,
- limits->min_vt_sys_clk_freq_hz,
- limits->max_vt_sys_clk_freq_hz,
+ limits->vt.min_sys_clk_freq_hz,
+ limits->vt.max_sys_clk_freq_hz,
"vt_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_pix_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz,
+ limits->vt.min_pix_clk_freq_hz,
+ limits->vt.max_pix_clk_freq_hz,
"vt_pix_clk_freq_hz");
return rval;
}
+
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll)
+{
+ uint16_t min_pre_pll_clk_div;
+ uint16_t max_pre_pll_clk_div;
+ uint32_t lane_op_clock_ratio;
+ uint32_t mul, div;
+ unsigned int i;
+ int rval = -EINVAL;
+
+ if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
+ lane_op_clock_ratio = pll->csi2.lanes;
+ else
+ lane_op_clock_ratio = 1;
+ dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+
+ dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ pll->binning_vertical);
+
+ switch (pll->bus_type) {
+ case SMIAPP_PLL_BUS_TYPE_CSI2:
+ /* CSI transfers 2 bits per clock per lane; thus times 2 */
+ pll->pll_op_clk_freq_hz = pll->link_freq * 2
+ * (pll->csi2.lanes / lane_op_clock_ratio);
+ break;
+ case SMIAPP_PLL_BUS_TYPE_PARALLEL:
+ pll->pll_op_clk_freq_hz = pll->link_freq * pll->bits_per_pixel
+ / DIV_ROUND_UP(pll->bits_per_pixel,
+ pll->parallel.bus_width);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Figure out limits for pre-pll divider based on extclk */
+ dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+ max_pre_pll_clk_div =
+ min_t(uint16_t, limits->max_pre_pll_clk_div,
+ clk_div_even(pll->ext_clk_freq_hz /
+ limits->min_pll_ip_freq_hz));
+ min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ limits->max_pll_ip_freq_hz)));
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
+ mul = div_u64(pll->pll_op_clk_freq_hz, i);
+ div = pll->ext_clk_freq_hz / i;
+ dev_dbg(dev, "mul %d / div %d\n", mul, div);
+
+ min_pre_pll_clk_div =
+ max_t(uint16_t, min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
+ limits->max_pll_op_freq_hz)));
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ for (pll->pre_pll_clk_div = min_pre_pll_clk_div;
+ pll->pre_pll_clk_div <= max_pre_pll_clk_div;
+ pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) {
+ rval = __smiapp_pll_calculate(dev, limits, pll, mul, div,
+ lane_op_clock_ratio);
+ if (rval)
+ continue;
+
+ print_pll(dev, pll);
+ return 0;
+ }
+
+ dev_info(dev, "unable to compute pre_pll divisor\n");
+ return rval;
+}
EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ PLL calculator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index cb2d2db5d02d..a4a649834a18 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -27,16 +27,34 @@
#include <linux/device.h>
+/* CSI-2 or CCP-2 */
+#define SMIAPP_PLL_BUS_TYPE_CSI2 0x00
+#define SMIAPP_PLL_BUS_TYPE_PARALLEL 0x01
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+
struct smiapp_pll {
- uint8_t lanes;
+ /* input values */
+ uint8_t bus_type;
+ union {
+ struct {
+ uint8_t lanes;
+ } csi2;
+ struct {
+ uint8_t bus_width;
+ } parallel;
+ };
+ uint8_t flags;
uint8_t binning_horizontal;
uint8_t binning_vertical;
uint8_t scale_m;
uint8_t scale_n;
uint8_t bits_per_pixel;
- uint16_t flags;
uint32_t link_freq;
+ /* output values */
uint16_t pre_pll_clk_div;
uint16_t pll_multiplier;
uint16_t op_sys_clk_div;
@@ -55,6 +73,17 @@ struct smiapp_pll {
uint32_t pixel_rate_csi;
};
+struct smiapp_pll_branch_limits {
+ uint16_t min_sys_clk_div;
+ uint16_t max_sys_clk_div;
+ uint32_t min_sys_clk_freq_hz;
+ uint32_t max_sys_clk_freq_hz;
+ uint16_t min_pix_clk_div;
+ uint16_t max_pix_clk_div;
+ uint32_t min_pix_clk_freq_hz;
+ uint32_t max_pix_clk_freq_hz;
+};
+
struct smiapp_pll_limits {
/* Strict PLL limits */
uint32_t min_ext_clk_freq_hz;
@@ -68,36 +97,18 @@ struct smiapp_pll_limits {
uint32_t min_pll_op_freq_hz;
uint32_t max_pll_op_freq_hz;
- uint16_t min_vt_sys_clk_div;
- uint16_t max_vt_sys_clk_div;
- uint32_t min_vt_sys_clk_freq_hz;
- uint32_t max_vt_sys_clk_freq_hz;
- uint16_t min_vt_pix_clk_div;
- uint16_t max_vt_pix_clk_div;
- uint32_t min_vt_pix_clk_freq_hz;
- uint32_t max_vt_pix_clk_freq_hz;
-
- uint16_t min_op_sys_clk_div;
- uint16_t max_op_sys_clk_div;
- uint32_t min_op_sys_clk_freq_hz;
- uint32_t max_op_sys_clk_freq_hz;
- uint16_t min_op_pix_clk_div;
- uint16_t max_op_pix_clk_div;
- uint32_t min_op_pix_clk_freq_hz;
- uint32_t max_op_pix_clk_freq_hz;
+ struct smiapp_pll_branch_limits vt;
+ struct smiapp_pll_branch_limits op;
/* Other relevant limits */
uint32_t min_line_length_pck_bin;
uint32_t min_line_length_pck;
};
-/* op pix clock is for all lanes in total normally */
-#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
-#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
-
struct device;
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
struct smiapp_pll *pll);
#endif /* SMIAPP_PLL_H */
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e08e588ad24b..83c7ed7ffcc2 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Based on smiapp driver by Vimarsh Zutshi
* Based on jt8ev1.c by Vimarsh Zutshi
@@ -252,23 +252,23 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
.min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
.max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
- .min_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
- .max_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
- .min_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
- .max_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
- .min_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
- .max_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
- .min_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
- .max_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
-
- .min_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
- .max_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
- .min_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
- .max_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
- .min_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
- .max_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
- .min_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
- .max_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
+ .op.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
+ .op.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
+ .op.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
+ .op.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
+ .op.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
+ .op.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
+ .op.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
+ .op.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
+
+ .vt.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
+ .vt.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
+ .vt.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
+ .vt.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
+ .vt.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
+ .vt.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
+ .vt.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
+ .vt.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
.min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
.min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
@@ -276,11 +276,6 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
struct smiapp_pll *pll = &sensor->pll;
int rval;
- memset(&sensor->pll, 0, sizeof(sensor->pll));
-
- pll->lanes = sensor->platform_data->lanes;
- pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
-
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
/*
* Fill in operational clock divisors limits from the
@@ -288,28 +283,14 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
* requirements regarding them are essentially the
* same as on VT ones.
*/
- lim.min_op_sys_clk_div = lim.min_vt_sys_clk_div;
- lim.max_op_sys_clk_div = lim.max_vt_sys_clk_div;
- lim.min_op_pix_clk_div = lim.min_vt_pix_clk_div;
- lim.max_op_pix_clk_div = lim.max_vt_pix_clk_div;
- lim.min_op_sys_clk_freq_hz = lim.min_vt_sys_clk_freq_hz;
- lim.max_op_sys_clk_freq_hz = lim.max_vt_sys_clk_freq_hz;
- lim.min_op_pix_clk_freq_hz = lim.min_vt_pix_clk_freq_hz;
- lim.max_op_pix_clk_freq_hz = lim.max_vt_pix_clk_freq_hz;
- /* Profile 0 sensors have no separate OP clock branch. */
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ lim.op = lim.vt;
}
- if (smiapp_needs_quirk(sensor,
- SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
- pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
-
pll->binning_horizontal = sensor->binning_horizontal;
pll->binning_vertical = sensor->binning_vertical;
pll->link_freq =
sensor->link_freq->qmenu_int[sensor->link_freq->val];
pll->scale_m = sensor->scale_m;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
pll->bits_per_pixel = sensor->csi_format->compressed;
rval = smiapp_pll_calculate(&client->dev, &lim, pll);
@@ -1010,7 +991,7 @@ static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
* do not change, or if you do at least know what you're
* doing. :-)
*
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 2010-10-25
+ * Sakari Ailus <sakari.ailus@iki.fi> 2010-10-25
*
* flash_strobe_length [us] / 10^6 = (tFlash_strobe_width_ctrl
* / EXTCLK freq [Hz]) * flash_strobe_adjustment
@@ -2369,6 +2350,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_pll *pll = &sensor->pll;
struct smiapp_subdev *last = NULL;
u32 tmp;
unsigned int i;
@@ -2635,6 +2617,18 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (rval < 0)
goto out_nvm_release;
+ /* prepare PLL configuration input values */
+ pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ pll->csi2.lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ if (smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
+ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
rval = smiapp_update_mode(sensor);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -2893,6 +2887,6 @@ static struct i2c_driver smiapp_i2c_driver = {
module_i2c_driver(smiapp_i2c_driver);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.c b/drivers/media/i2c/smiapp/smiapp-limits.c
index fb2f81ad8c3b..847cb235e198 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.c
+++ b/drivers/media/i2c/smiapp/smiapp-limits.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.h b/drivers/media/i2c/smiapp/smiapp-limits.h
index 9ae765e23ea5..343e9c3827fc 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.h
+++ b/drivers/media/i2c/smiapp/smiapp-limits.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index 725cf62836c6..bb8c506e0e3d 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.h b/drivers/media/i2c/smiapp/smiapp-quirk.h
index 86fd3e8bfb0f..504a6d80ced5 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.h
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg-defs.h b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
index defa7c5adebf..3aa0ca948d87 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg-defs.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 54568ca2fe6d..b0dcbb8fa5e2 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 70e0d8db0130..4fac32cfcb3f 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.h b/drivers/media/i2c/smiapp/smiapp-regs.h
index 7f9013b47971..eefc6c84d5fe 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.h
+++ b/drivers/media/i2c/smiapp/smiapp-regs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 4182a695ab53..7cc5aae662fd 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 333ef178d6fb..d40a8858be01 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -15,6 +15,7 @@
#include <linux/log2.h>
#include <linux/module.h>
+#include <media/mt9v022.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
@@ -50,6 +51,7 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_PIXEL_OPERATION_MODE 0x0f
#define MT9V022_LED_OUT_CONTROL 0x1b
#define MT9V022_ADC_MODE_CONTROL 0x1c
+#define MT9V022_REG32 0x20
#define MT9V022_ANALOG_GAIN 0x35
#define MT9V022_BLACK_LEVEL_CALIB_CTRL 0x47
#define MT9V022_PIXCLK_FV_LV 0x74
@@ -71,7 +73,15 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_COLUMN_SKIP 1
#define MT9V022_ROW_SKIP 4
-#define is_mt9v024(id) (id == 0x1324)
+#define MT9V022_HORIZONTAL_BLANKING_MIN 43
+#define MT9V022_HORIZONTAL_BLANKING_MAX 1023
+#define MT9V022_HORIZONTAL_BLANKING_DEF 94
+#define MT9V022_VERTICAL_BLANKING_MIN 2
+#define MT9V022_VERTICAL_BLANKING_MAX 3000
+#define MT9V022_VERTICAL_BLANKING_DEF 45
+
+#define is_mt9v022_rev3(id) (id == 0x1313)
+#define is_mt9v024(id) (id == 0x1324)
/* MT9V022 has only one fixed colorspace per pixelcode */
struct mt9v022_datafmt {
@@ -136,6 +146,8 @@ struct mt9v022 {
struct v4l2_ctrl *autogain;
struct v4l2_ctrl *gain;
};
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
struct v4l2_rect rect; /* Sensor window */
const struct mt9v022_datafmt *fmt;
const struct mt9v022_datafmt *fmts;
@@ -143,6 +155,7 @@ struct mt9v022 {
int num_fmts;
int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
u16 chip_control;
+ u16 chip_version;
unsigned short y_skip_top; /* Lines to skip at the top */
};
@@ -225,12 +238,32 @@ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- if (enable)
+ if (enable) {
/* Switch to master "normal" mode */
mt9v022->chip_control &= ~0x10;
- else
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Unset snapshot mode specific settings: clear bit 9
+ * and bit 2 in reg. 0x20 when in normal mode.
+ */
+ if (reg_clear(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ } else {
/* Switch to snapshot mode */
mt9v022->chip_control |= 0x10;
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Required settings for snapshot mode: set bit 9
+ * (RST enable) and bit 2 (CR enable) in reg. 0x20
+ * See TechNote TN0960 or TN-09-225.
+ */
+ if (reg_set(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ }
if (reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control) < 0)
return -EIO;
@@ -282,11 +315,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
* Default 94, Phytec driver says:
* "width + horizontal blank >= 660"
*/
- ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
- rect.width > 660 - 43 ? 43 :
- 660 - rect.width);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->hblank,
+ rect.width > 660 - 43 ? 43 : 660 - rect.width);
if (!ret)
- ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->vblank, 45);
if (!ret)
ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width);
if (!ret)
@@ -509,6 +541,18 @@ static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
range = exp->maximum - exp->minimum;
exp->val = ((data - 1) * range + 239) / 479 + exp->minimum;
return 0;
+ case V4L2_CID_HBLANK:
+ data = reg_read(client, MT9V022_HORIZONTAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
+ case V4L2_CID_VBLANK:
+ data = reg_read(client, MT9V022_VERTICAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
}
return -EINVAL;
}
@@ -590,6 +634,16 @@ static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
return -EIO;
}
return 0;
+ case V4L2_CID_HBLANK:
+ if (reg_write(client, MT9V022_HORIZONTAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_VBLANK:
+ if (reg_write(client, MT9V022_VERTICAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
}
return -EINVAL;
}
@@ -621,6 +675,8 @@ static int mt9v022_video_probe(struct i2c_client *client)
goto ei2c;
}
+ mt9v022->chip_version = data;
+
mt9v022->reg = is_mt9v024(data) ? &mt9v024_register :
&mt9v022_register;
@@ -819,6 +875,7 @@ static int mt9v022_probe(struct i2c_client *client,
struct mt9v022 *mt9v022;
struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct mt9v022_platform_data *pdata = icl->priv;
int ret;
if (!icl) {
@@ -857,10 +914,21 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+ mt9v022->hblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_HBLANK, MT9V022_HORIZONTAL_BLANKING_MIN,
+ MT9V022_HORIZONTAL_BLANKING_MAX, 1,
+ MT9V022_HORIZONTAL_BLANKING_DEF);
+
+ mt9v022->vblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_VBLANK, MT9V022_VERTICAL_BLANKING_MIN,
+ MT9V022_VERTICAL_BLANKING_MAX, 1,
+ MT9V022_VERTICAL_BLANKING_DEF);
+
mt9v022->subdev.ctrl_handler = &mt9v022->hdl;
if (mt9v022->hdl.error) {
int err = mt9v022->hdl.error;
+ dev_err(&client->dev, "control initialisation err %d\n", err);
kfree(mt9v022);
return err;
}
@@ -871,10 +939,10 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT;
/*
- * MT9V022 _really_ corrupts the first read out line.
- * TODO: verify on i.MX31
+ * On some platforms the first read out line is corrupted.
+ * Workaround it by skipping if indicated by platform data.
*/
- mt9v022->y_skip_top = 1;
+ mt9v022->y_skip_top = pdata ? pdata->y_skip_top : 0;
mt9v022->rect.left = MT9V022_COLUMN_SKIP;
mt9v022->rect.top = MT9V022_ROW_SKIP;
mt9v022->rect.width = MT9V022_MAX_WIDTH;
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index d2d298b6354e..66698a83bda2 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -586,9 +586,20 @@ static const struct regval_list ov2640_format_change_preamble_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_yuv422_regs[] = {
+static const struct regval_list ov2640_yuyv_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_YUV422 },
+ { 0xd7, 0x03 },
+ { 0x33, 0xa0 },
+ { 0xe5, 0x1f },
+ { 0xe1, 0x67 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_uyvy_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_YUV422 },
- { 0xD7, 0x01 },
+ { 0xd7, 0x01 },
{ 0x33, 0xa0 },
{ 0xe1, 0x67 },
{ RESET, 0x00 },
@@ -596,7 +607,15 @@ static const struct regval_list ov2640_yuv422_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_rgb565_regs[] = {
+static const struct regval_list ov2640_rgb565_be_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_RGB565 },
+ { 0xd7, 0x03 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_rgb565_le_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_RGB565 },
{ 0xd7, 0x03 },
{ RESET, 0x00 },
@@ -605,7 +624,9 @@ static const struct regval_list ov2640_rgb565_regs[] = {
};
static enum v4l2_mbus_pixelcode ov2640_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_RGB565_2X8_BE,
V4L2_MBUS_FMT_RGB565_2X8_LE,
};
@@ -787,14 +808,22 @@ static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
/* select format */
priv->cfmt_code = 0;
switch (code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_be_regs;
+ break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
- dev_dbg(&client->dev, "%s: Selected cfmt RGB565", __func__);
- selected_cfmt_regs = ov2640_rgb565_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_le_regs;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
+ selected_cfmt_regs = ov2640_yuyv_regs;
break;
default:
case V4L2_MBUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "%s: Selected cfmt YUV422", __func__);
- selected_cfmt_regs = ov2640_yuv422_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
+ selected_cfmt_regs = ov2640_uyvy_regs;
}
/* reset hardware */
@@ -859,10 +888,12 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
mf->code = priv->cfmt_code;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -879,11 +910,13 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -896,21 +929,21 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
static int ov2640_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- const struct ov2640_win_size *win;
-
/*
- * select suitable win
+ * select suitable win, but don't store it
*/
- win = ov2640_select_win(&mf->width, &mf->height);
+ ov2640_select_win(&mf->width, &mf->height);
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 42ae9dc9c574..9ac1b8c3a837 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -788,7 +788,7 @@ static const struct v4l2_subdev_ops vs6624_ops = {
.video = &vs6624_video_ops,
};
-static int __devinit vs6624_probe(struct i2c_client *client,
+static int vs6624_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct vs6624 *sensor;
@@ -881,7 +881,7 @@ static int __devinit vs6624_probe(struct i2c_client *client,
return ret;
}
-static int __devexit vs6624_remove(struct i2c_client *client)
+static int vs6624_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct vs6624 *sensor = to_vs6624(sd);
@@ -906,22 +906,11 @@ static struct i2c_driver vs6624_driver = {
.name = "vs6624",
},
.probe = vs6624_probe,
- .remove = __devexit_p(vs6624_remove),
+ .remove = vs6624_remove,
.id_table = vs6624_id,
};
-static __init int vs6624_init(void)
-{
- return i2c_add_driver(&vs6624_driver);
-}
-
-static __exit void vs6624_exit(void)
-{
- i2c_del_driver(&vs6624_driver);
-}
-
-module_init(vs6624_init);
-module_exit(vs6624_exit);
+module_i2c_driver(vs6624_driver);
MODULE_DESCRIPTION("VS6624 sensor driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index fa62475be3bf..aa05ad3c1ccb 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_SDIO_DRV
tristate "Siano SMS1xxx based MDTV via SDIO interface"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
depends on MMC
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for SDIO interface
diff --git a/drivers/media/mmc/siano/smssdio.c b/drivers/media/mmc/siano/smssdio.c
index d6f3f100699a..15d34935e00b 100644
--- a/drivers/media/mmc/siano/smssdio.c
+++ b/drivers/media/mmc/siano/smssdio.c
@@ -50,7 +50,7 @@
#define SMSSDIO_INT 0x04
#define SMSSDIO_BLOCK_SIZE 128
-static const struct sdio_device_id smssdio_ids[] __devinitconst = {
+static const struct sdio_device_id smssdio_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
.driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
@@ -224,7 +224,7 @@ static void smssdio_interrupt(struct sdio_func *func)
smscore_onresponse(smsdev->coredev, cb);
}
-static int __devinit smssdio_probe(struct sdio_func *func,
+static int smssdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index b34fa95185e4..66eb0baab0e9 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL(bt878_device_control);
.driver_data = (unsigned long) name \
}
-static struct pci_device_id bt878_pci_tbl[] __devinitdata = {
+static struct pci_device_id bt878_pci_tbl[] = {
BROOKTREE_878_DEVICE(0x0071, 0x0101, "Nebula Electronics DigiTV"),
BROOKTREE_878_DEVICE(0x1461, 0x0761, "AverMedia AverTV DVB-T 761"),
BROOKTREE_878_DEVICE(0x11bd, 0x001c, "Pinnacle PCTV Sat"),
@@ -410,7 +410,7 @@ static struct pci_device_id bt878_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, bt878_pci_tbl);
-static const char * __devinit card_name(const struct pci_device_id *id)
+static const char * card_name(const struct pci_device_id *id)
{
return id->driver_data ? (const char *)id->driver_data : "Unknown";
}
@@ -419,8 +419,7 @@ static const char * __devinit card_name(const struct pci_device_id *id)
/* PCI device handling */
/***********************/
-static int __devinit bt878_probe(struct pci_dev *dev,
- const struct pci_device_id *pci_id)
+static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
{
int result = 0;
unsigned char lat;
@@ -529,7 +528,7 @@ static int __devinit bt878_probe(struct pci_dev *dev,
return result;
}
-static void __devexit bt878_remove(struct pci_dev *pci_dev)
+static void bt878_remove(struct pci_dev *pci_dev)
{
u8 command;
struct bt878 *bt = pci_get_drvdata(pci_dev);
@@ -573,7 +572,7 @@ static struct pci_driver bt878_pci_driver = {
.name = "bt878",
.id_table = bt878_pci_tbl,
.probe = bt878_probe,
- .remove = __devexit_p(bt878_remove),
+ .remove = bt878_remove,
};
/*******************************/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 38952faaffda..c4c59175e52c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -87,7 +87,7 @@ static int tea5757_read(struct bttv *btv);
static int tea5757_write(struct bttv *btv, int value);
static void identify_by_eeprom(struct bttv *btv,
unsigned char eeprom_data[256]);
-static int __devinit pvr_boot(struct bttv *btv);
+static int pvr_boot(struct bttv *btv);
/* config variables */
static unsigned int triton1;
@@ -151,7 +151,7 @@ static struct CARD {
unsigned id;
int cardnr;
char *name;
-} cards[] __devinitdata = {
+} cards[] = {
{ 0x13eb0070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV" },
{ 0x39000070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV-D" },
{ 0x45000070, BTTV_BOARD_HAUPPAUGEPVR, "Hauppauge WinTV/PVR" },
@@ -2837,7 +2837,7 @@ static unsigned char eeprom_data[256];
/*
* identify card
*/
-void __devinit bttv_idcard(struct bttv *btv)
+void bttv_idcard(struct bttv *btv)
{
unsigned int gpiobits;
int i,type;
@@ -3235,7 +3235,7 @@ static void bttv_reset_audio(struct bttv *btv)
}
/* initialization part one -- before registering i2c bus */
-void __devinit bttv_init_card1(struct bttv *btv)
+void bttv_init_card1(struct bttv *btv)
{
switch (btv->c.type) {
case BTTV_BOARD_HAUPPAUGE:
@@ -3267,7 +3267,7 @@ void __devinit bttv_init_card1(struct bttv *btv)
}
/* initialization part two -- after registering i2c bus */
-void __devinit bttv_init_card2(struct bttv *btv)
+void bttv_init_card2(struct bttv *btv)
{
btv->tuner_type = UNSET;
@@ -3571,7 +3571,7 @@ no_audio:
/* initialize the tuner */
-void __devinit bttv_init_tuner(struct bttv *btv)
+void bttv_init_tuner(struct bttv *btv)
{
int addr = ADDR_UNSET;
@@ -3635,7 +3635,7 @@ static void modtec_eeprom(struct bttv *btv)
}
}
-static void __devinit hauppauge_eeprom(struct bttv *btv)
+static void hauppauge_eeprom(struct bttv *btv)
{
struct tveeprom tv;
@@ -3709,8 +3709,7 @@ static int terratec_active_radio_upgrade(struct bttv *btv)
#define BTTV_ALT_DCLK 0x100000
#define BTTV_ALT_NCONFIG 0x800000
-static int __devinit pvr_altera_load(struct bttv *btv, const u8 *micro,
- u32 microlen)
+static int pvr_altera_load(struct bttv *btv, const u8 *micro, u32 microlen)
{
u32 n;
u8 bits;
@@ -3747,7 +3746,7 @@ static int __devinit pvr_altera_load(struct bttv *btv, const u8 *micro,
return 0;
}
-static int __devinit pvr_boot(struct bttv *btv)
+static int pvr_boot(struct bttv *btv)
{
const struct firmware *fw_entry;
int rc;
@@ -3767,7 +3766,7 @@ static int __devinit pvr_boot(struct bttv *btv)
/* ----------------------------------------------------------------------- */
/* some osprey specific stuff */
-static void __devinit osprey_eeprom(struct bttv *btv, const u8 ee[256])
+static void osprey_eeprom(struct bttv *btv, const u8 ee[256])
{
int i;
u32 serial = 0;
@@ -3898,7 +3897,7 @@ static int tuner_1_table[] = {
TUNER_TEMIC_4012FY5, TUNER_TEMIC_4012FY5, /* TUNER_TEMIC_SECAM */
TUNER_TEMIC_4012FY5, TUNER_TEMIC_PAL};
-static void __devinit avermedia_eeprom(struct bttv *btv)
+static void avermedia_eeprom(struct bttv *btv)
{
int tuner_make, tuner_tv_fm, tuner_format, tuner_type = 0;
@@ -3960,7 +3959,7 @@ u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits)
* Hauppauge: pin 5
* Voodoo: pin 20
*/
-static void __devinit boot_msp34xx(struct bttv *btv, int pin)
+static void boot_msp34xx(struct bttv *btv, int pin)
{
int mask = (1 << pin);
@@ -3983,11 +3982,10 @@ static void __devinit boot_msp34xx(struct bttv *btv, int pin)
* used by Alessandro Rubini in his pxc200
* driver, but using BTTV functions */
-static void __devinit init_PXC200(struct bttv *btv)
+static void init_PXC200(struct bttv *btv)
{
- static int vals[] __devinitdata = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
- 0x00 };
+ static int vals[] = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d, 0x01, 0x02,
+ 0x03, 0x04, 0x05, 0x06, 0x00 };
unsigned int i;
int tmp;
u32 val;
@@ -4851,7 +4849,7 @@ void __init bttv_check_chipset(void)
}
}
-int __devinit bttv_handle_chipset(struct bttv *btv)
+int bttv_handle_chipset(struct bttv *btv)
{
unsigned char command;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 56c6c77793d7..45e5d0661b60 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -200,7 +200,7 @@ static void flush_request_modules(struct bttv *dev)
}
#else
#define request_modules(dev)
-#define flush_request_modules(dev)
+#define flush_request_modules(dev) do {} while(0)
#endif /* CONFIG_MODULES */
@@ -301,11 +301,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* totalwidth */ 1135,
/* sqwidth */ 944,
/* vdelay */ 0x20,
- /* sheight */ 576,
- /* videostart0 */ 23)
/* bt878 (and bt848?) can capture another
line below active video. */
- .cropcap.bounds.height = (576 + 2) + 0x20 - 2,
+ /* sheight */ (576 + 2) + 0x20 - 2,
+ /* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
.name = "NTSC",
@@ -4200,7 +4199,7 @@ static void bttv_unregister_video(struct bttv *btv)
}
/* register video4linux devices */
-static int __devinit bttv_register_video(struct bttv *btv)
+static int bttv_register_video(struct bttv *btv)
{
if (no_overlay > 0)
pr_notice("Overlay support disabled\n");
@@ -4266,8 +4265,7 @@ static void pci_set_command(struct pci_dev *dev)
#endif
}
-static int __devinit bttv_probe(struct pci_dev *dev,
- const struct pci_device_id *pci_id)
+static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
{
int result;
unsigned char lat;
@@ -4455,7 +4453,7 @@ fail0:
return result;
}
-static void __devexit bttv_remove(struct pci_dev *pci_dev)
+static void bttv_remove(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct bttv *btv = to_bttv(v4l2_dev);
@@ -4599,7 +4597,7 @@ static struct pci_driver bttv_pci_driver = {
.name = "bttv",
.id_table = bttv_pci_tbl,
.probe = bttv_probe,
- .remove = __devexit_p(bttv_remove),
+ .remove = bttv_remove,
#ifdef CONFIG_PM
.suspend = bttv_suspend,
.resume = bttv_resume,
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index 580c8e682392..5039b8826e0a 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -99,7 +99,7 @@ static int bttv_bit_getsda(void *data)
return state;
}
-static struct i2c_algo_bit_data __devinitdata bttv_i2c_algo_bit_template = {
+static struct i2c_algo_bit_data bttv_i2c_algo_bit_template = {
.setsda = bttv_bit_setsda,
.setscl = bttv_bit_setscl,
.getsda = bttv_bit_getsda,
@@ -312,7 +312,7 @@ int bttv_I2CWrite(struct bttv *btv, unsigned char addr, unsigned char b1,
}
/* read EEPROM content */
-void __devinit bttv_readee(struct bttv *btv, unsigned char *eedata, int addr)
+void bttv_readee(struct bttv *btv, unsigned char *eedata, int addr)
{
memset(eedata, 0, 256);
if (0 != btv->i2c_rc)
@@ -347,7 +347,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
}
/* init + register i2c adapter */
-int __devinit init_bttv_i2c(struct bttv *btv)
+int init_bttv_i2c(struct bttv *btv)
{
strlcpy(btv->i2c_client.name, "bttv internal", I2C_NAME_SIZE);
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index ef4c7cd41982..04207a799055 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -368,7 +368,7 @@ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
}
/* Instantiate the I2C IR receiver device, if present */
-void __devinit init_bttv_i2c_ir(struct bttv *btv)
+void init_bttv_i2c_ir(struct bttv *btv)
{
const unsigned short addr_list[] = {
0x1a, 0x18, 0x64, 0x30, 0x71,
@@ -411,7 +411,7 @@ void __devinit init_bttv_i2c_ir(struct bttv *btv)
return;
}
-int __devexit fini_bttv_i2c(struct bttv *btv)
+int fini_bttv_i2c(struct bttv *btv)
{
if (0 != btv->i2c_rc)
return 0;
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 81fab9adc1ca..d407244fd1bc 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -118,7 +118,8 @@ static int is_pci_slot_eq(struct pci_dev* adev, struct pci_dev* bdev)
return 0;
}
-static struct bt878 __devinit *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev)
+static struct bt878 *dvb_bt8xx_878_match(unsigned int bttv_nr,
+ struct pci_dev* bttv_pci_dev)
{
unsigned int card_nr;
@@ -720,7 +721,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
}
}
-static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
+static int dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
{
int result;
@@ -811,7 +812,7 @@ err_unregister_adaptor:
return result;
}
-static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
+static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
{
struct dvb_bt8xx_card *card;
struct pci_dev* bttv_pci_dev;
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 6d2a98246b6d..8e971ff60588 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -197,7 +197,7 @@ err_exit:
return ret;
}
-int cx18_alsa_load(struct cx18 *cx)
+static int __init cx18_alsa_load(struct cx18 *cx)
{
struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
struct cx18_stream *s;
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 7a5b84a86bb3..180077c49123 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "cx18-streams.h"
#include "cx18-fileops.h"
#include "cx18-alsa.h"
+#include "cx18-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 039133d692e3..613e5ae7d5ca 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -53,7 +53,7 @@ int (*cx18_ext_init)(struct cx18 *);
EXPORT_SYMBOL(cx18_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
+static struct pci_device_id cx18_pci_tbl[] = {
{PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
@@ -691,7 +691,7 @@ done:
cx->card_i2c = cx->card->i2c;
}
-static int __devinit cx18_create_in_workq(struct cx18 *cx)
+static int cx18_create_in_workq(struct cx18 *cx)
{
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
@@ -703,7 +703,7 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
return 0;
}
-static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
+static void cx18_init_in_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
@@ -718,7 +718,7 @@ static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
No assumptions on the card type may be made here (see cx18_init_struct2
for that).
*/
-static int __devinit cx18_init_struct1(struct cx18 *cx)
+static int cx18_init_struct1(struct cx18 *cx)
{
int ret;
@@ -775,7 +775,7 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
/* Second initialization part. Here the card type has been
autodetected. */
-static void __devinit cx18_init_struct2(struct cx18 *cx)
+static void cx18_init_struct2(struct cx18 *cx)
{
int i;
@@ -892,8 +892,8 @@ static void cx18_init_subdevs(struct cx18 *cx)
cx->sd_extmux = cx18_find_hw(cx, cx->card->hw_muxer);
}
-static int __devinit cx18_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx18_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
int retval = 0;
int i;
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 51609d5c88ce..4908eb7bcf6c 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -98,7 +98,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
case CX18_HW_Z8F0811_IR_RX_HAUP:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = cx->card_name;
info.platform_data = init_data;
break;
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 72af9b5c2d7d..843c62b2f482 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -97,7 +97,7 @@ static struct {
};
-void cx18_dma_free(struct videobuf_queue *q,
+static void cx18_dma_free(struct videobuf_queue *q,
struct cx18_stream *s, struct cx18_videobuf_buffer *buf)
{
videobuf_waiton(q, &buf->vb, 0, 0);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 495781ee4711..2926f7fadccd 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -263,7 +263,7 @@ static int netup_fpga_op_rw(struct fpga_internal *inter, int addr,
}
/* flag - mem/io, read - read/write */
-int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 val)
{
@@ -298,31 +298,32 @@ int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
return mem;
}
-int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr)
+static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr)
{
return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr, u8 data)
+static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL,
NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
- u8 addr, u8 data)
+static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data);
}
-int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -365,13 +366,13 @@ int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
-int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -448,8 +449,8 @@ int altera_ci_irq(void *dev)
}
EXPORT_SYMBOL(altera_ci_irq);
-int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
- int open)
+static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct altera_ci_state *state = en50221->data;
@@ -459,7 +460,7 @@ int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
return state->status;
}
-void altera_hw_filt_release(void *main_dev, int filt_nr)
+static void altera_hw_filt_release(void *main_dev, int filt_nr)
{
struct fpga_inode *temp_int = find_inode(main_dev);
struct netup_hw_pid_filter *pid_filt = NULL;
@@ -581,7 +582,7 @@ static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt,
mutex_unlock(&inter->fpga_mutex);
}
-int altera_pid_feed_control(void *demux_dev, int filt_nr,
+static int altera_pid_feed_control(void *demux_dev, int filt_nr,
struct dvb_demux_feed *feed, int onoff)
{
struct fpga_inode *temp_int = find_dinode(demux_dev);
@@ -603,41 +604,41 @@ int altera_pid_feed_control(void *demux_dev, int filt_nr,
}
EXPORT_SYMBOL(altera_pid_feed_control);
-int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 1);
return 0;
}
-int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 0);
return 0;
}
-int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 1);
}
-int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 1);
}
-int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 2);
}
-int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 2);
}
-int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
+static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
{
struct netup_hw_pid_filter *pid_filt = NULL;
struct fpga_inode *temp_int = find_inode(config->dev);
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 6617774a326a..7344849183a7 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "cimax2.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
@@ -87,7 +88,7 @@ struct netup_ci_state {
};
-int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -120,7 +121,7 @@ int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -147,7 +148,7 @@ int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_ci_get_mem(struct cx23885_dev *dev)
+static int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
@@ -166,7 +167,7 @@ int netup_ci_get_mem(struct cx23885_dev *dev)
return mem & 0xff;
}
-int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
@@ -248,7 +249,8 @@ int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
@@ -295,7 +297,7 @@ int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
+static int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
@@ -399,7 +401,8 @@ int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
return 1;
}
-int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
+int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct netup_ci_state *state = en50221->data;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 795169237e70..c6c9bd58f8be 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -45,8 +45,10 @@
#define AUDIO_SRAM_CHANNEL SRAM_CH07
-#define dprintk(level, fmt, arg...) if (audio_debug >= level) \
- printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (audio_debug + 1 > level) \
+ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \
+} while(0)
#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index 134ebddd860f..e958a01fd554 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -22,6 +22,7 @@
*/
#include "cx23885.h"
+#include "cx23885-av.h"
void cx23885_av_work_handler(struct work_struct *work)
{
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 5acdf954ff6b..6277e145f0b8 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1427,7 +1427,7 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
}
}
-int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
+static int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
{
int data;
int tdo = 0;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 697728f09430..f0416a668b4c 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -303,7 +303,7 @@ static struct sram_channel cx23887_sram_channels[] = {
},
};
-void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
+static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
@@ -1516,8 +1516,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
buf = list_entry(q->queued.next, struct cx23885_buffer,
vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx23885_start_dma(port, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -1528,8 +1527,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
@@ -2088,8 +2086,8 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
/* TODO: 23-19 */
}
-static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx23885_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx23885_dev *dev;
int err;
@@ -2169,7 +2167,7 @@ fail_free:
return err;
}
-static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
+static void cx23885_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx23885_dev *dev = to_cx23885(v4l2_dev);
@@ -2212,7 +2210,7 @@ static struct pci_driver cx23885_pci_driver = {
.name = "cx23885",
.id_table = cx23885_pci_tbl,
.probe = cx23885_initdev,
- .remove = __devexit_p(cx23885_finidev),
+ .remove = cx23885_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4379d8a6dad5..2f5b902e63ae 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -659,7 +659,7 @@ static struct mt2063_config terratec_mt2063_config[] = {
},
};
-int netup_altera_fpga_rw(void *device, int flag, int data, int read)
+static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index 93998f220986..5444cc526008 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -29,6 +29,7 @@
*/
#include "cx23885.h"
+#include "cx23885-f300.h"
#define F300_DATA GPIO_0
#define F300_RESET GPIO_1
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 2c925f77cf2a..4f1055a194b5 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -40,6 +40,7 @@
#include <media/v4l2-subdev.h>
#include "cx23885.h"
+#include "cx23885-input.h"
#define MODULE_NAME "cx23885"
@@ -270,21 +271,21 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_NEC;
+ allowed_protos = RC_BIT_NEC;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
diff --git a/drivers/media/pci/cx23885/cx23885-input.h b/drivers/media/pci/cx23885/cx23885-input.h
index 75ef15d3f523..87dc44e69977 100644
--- a/drivers/media/pci/cx23885/cx23885-input.h
+++ b/drivers/media/pci/cx23885/cx23885-input.h
@@ -23,7 +23,7 @@
#ifndef _CX23885_INPUT_H_
#define _CX23885_INPUT_H_
-int cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
+void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
int cx23885_input_init(struct cx23885_dev *dev);
void cx23885_input_fini(struct cx23885_dev *dev);
diff --git a/drivers/media/pci/cx23885/cx23885-ioctl.c b/drivers/media/pci/cx23885/cx23885-ioctl.c
index 44812ca78899..ea9a614f3bbf 100644
--- a/drivers/media/pci/cx23885/cx23885-ioctl.c
+++ b/drivers/media/pci/cx23885/cx23885-ioctl.c
@@ -22,6 +22,8 @@
*/
#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
#include <media/v4l2-chip-ident.h>
int cx23885_g_chip_ident(struct file *file, void *fh,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 7125247dd255..bfef19359291 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -24,6 +24,7 @@
#include <media/v4l2-device.h>
#include "cx23885.h"
+#include "cx23885-ir.h"
#include "cx23885-input.h"
#define CX23885_IR_RX_FIFO_SERVICE_REQ 0
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c2bc39c58f82..c4bd1e95d33f 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -29,6 +29,7 @@
#include <media/rc-core.h>
#include "cx23885.h"
+#include "cx23888-ir.h"
static unsigned int ir_888_debug;
module_param(ir_888_debug, int, 0644);
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index f4893e69cd89..0044fef7ca24 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "netup-init.h"
static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
{
diff --git a/drivers/media/pci/cx25821/Kconfig b/drivers/media/pci/cx25821/Kconfig
index 5f6b54213713..4017c9420348 100644
--- a/drivers/media/pci/cx25821/Kconfig
+++ b/drivers/media/pci/cx25821/Kconfig
@@ -18,7 +18,7 @@ config VIDEO_CX25821
config VIDEO_CX25821_ALSA
tristate "Conexant 25821 DMA audio support"
- depends on VIDEO_CX25821 && SND && EXPERIMENTAL
+ depends on VIDEO_CX25821 && SND
select SND_PCM
---help---
This is a video4linux driver for direct (DMA) audio on
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 8b2a99975c23..87491ca05ee5 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -44,7 +44,7 @@ MODULE_LICENSE("GPL");
static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF |
FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR;
-int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
@@ -133,7 +133,7 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
struct pci_dev *pci,
unsigned int bpl, unsigned int lines)
{
@@ -197,7 +197,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
return 0;
}
-void cx25821_free_memory_audio(struct cx25821_dev *dev)
+static void cx25821_free_memory_audio(struct cx25821_dev *dev)
{
if (dev->_risc_virt_addr) {
pci_free_consistent(dev->pci, dev->_audiorisc_size,
@@ -256,7 +256,7 @@ void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
cx25821_free_memory_audio(dev);
}
-int cx25821_get_audio_data(struct cx25821_dev *dev,
+static int cx25821_get_audio_data(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -351,7 +351,7 @@ static void cx25821_audioups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile_audio(struct cx25821_dev *dev,
+static int cx25821_openfile_audio(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -490,7 +490,7 @@ error:
return ret;
}
-int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
+static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
u32 status)
{
int i = 0;
@@ -634,8 +634,8 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
}
-int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -700,9 +700,7 @@ fail_irq:
int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
{
struct sram_channel *sram_ch;
- int retval = 0;
int err = 0;
- int str_length = 0;
if (dev->_audio_is_running) {
pr_warn("Audio Channel is still running so return!\n");
@@ -731,27 +729,29 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
_line_size = AUDIO_LINE_SIZE;
if (dev->input_audiofilename) {
- str_length = strlen(dev->input_audiofilename);
- dev->_audiofilename = kmemdup(dev->input_audiofilename,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(dev->input_audiofilename,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
/* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0)
dev->_audiofilename = "/root/audioGOOD.wav";
} else {
- str_length = strlen(_defaultAudioName);
- dev->_audiofilename = kmemdup(_defaultAudioName,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(_defaultAudioName,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
}
- retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
- _line_size, 0);
+ cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
+ _line_size, 0);
dev->audio_upstream_riscbuf_size =
AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
@@ -759,9 +759,9 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
+ err = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
_line_size);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Audio upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-biffuncs.h b/drivers/media/pci/cx25821/cx25821-biffuncs.h
index 9326a7c729ec..937f5a70fb7a 100644
--- a/drivers/media/pci/cx25821/cx25821-biffuncs.h
+++ b/drivers/media/pci/cx25821/cx25821-biffuncs.h
@@ -25,17 +25,17 @@
#define SetBit(Bit) (1 << Bit)
-inline u8 getBit(u32 sample, u8 index)
+static inline u8 getBit(u32 sample, u8 index)
{
return (u8) ((sample >> index) & 1);
}
-inline u32 clearBitAtPos(u32 value, u8 bit)
+static inline u32 clearBitAtPos(u32 value, u8 bit)
{
return value & ~(1 << bit);
}
-inline u32 setBitAtPos(u32 sample, u8 bit)
+static inline u32 setBitAtPos(u32 sample, u8 bit)
{
sample |= (1 << bit);
return sample;
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index f11f6f07e915..1884e2cc35e9 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1361,8 +1361,8 @@ struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
}
EXPORT_SYMBOL(cx25821_dev_get);
-static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx25821_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx25821_dev *dev;
int err = 0;
@@ -1433,7 +1433,7 @@ fail_free:
return err;
}
-static void __devexit cx25821_finidev(struct pci_dev *pci_dev)
+static void cx25821_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
@@ -1478,7 +1478,7 @@ static struct pci_driver cx25821_pci_driver = {
.name = "cx25821",
.id_table = cx25821_pci_tbl,
.probe = cx25821_initdev,
- .remove = __devexit_p(cx25821_finidev),
+ .remove = cx25821_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 9844549764c9..a8dc945bbe17 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -329,7 +329,8 @@ int cx25821_i2c_unregister(struct cx25821_i2c *bus)
return 0;
}
-void cx25821_av_clk(struct cx25821_dev *dev, int enable)
+#if 0 /* Currently unused */
+static void cx25821_av_clk(struct cx25821_dev *dev, int enable)
{
/* write 0 to bus 2 addr 0x144 via i2x_xfer() */
char buffer[3];
@@ -351,6 +352,7 @@ void cx25821_av_clk(struct cx25821_dev *dev, int enable)
i2c_xfer(&dev->i2c_bus[0].i2c_adap, &msg, 1);
}
+#endif
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
index d33fc1a23030..cf2723c7197f 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
@@ -123,10 +123,11 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset, unsigned int bpl,
- unsigned int lines)
+static int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl,
+ unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -255,7 +256,8 @@ void cx25821_free_mem_upstream_ch2(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index_ch2;
@@ -360,7 +362,8 @@ static void cx25821_vidups_handler_ch2(struct work_struct *work)
_channel2_upstream_select].sram_channels);
}
-int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -507,8 +510,9 @@ error:
return ret;
}
-int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev,
+ int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -647,8 +651,8 @@ static void cx25821_set_pixelengine_ch2(struct cx25821_dev *dev,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -704,11 +708,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
- int str_length = 0;
if (dev->_is_running_ch2) {
pr_info("Video Channel is still running so return!\n");
@@ -744,20 +746,16 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
risc_buffer_size = dev->_isNTSC_ch2 ?
NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
- if (dev->input_filename_ch2) {
- str_length = strlen(dev->input_filename_ch2);
- dev->_filename_ch2 = kmemdup(dev->input_filename_ch2,
- str_length + 1, GFP_KERNEL);
-
- if (!dev->_filename_ch2)
- goto error;
- } else {
- str_length = strlen(dev->_defaultname_ch2);
- dev->_filename_ch2 = kmemdup(dev->_defaultname_ch2,
- str_length + 1, GFP_KERNEL);
+ if (dev->input_filename_ch2)
+ dev->_filename_ch2 = kstrdup(dev->input_filename_ch2,
+ GFP_KERNEL);
+ else
+ dev->_filename_ch2 = kstrdup(dev->_defaultname_ch2,
+ GFP_KERNEL);
- if (!dev->_filename_ch2)
- goto error;
+ if (!dev->_filename_ch2) {
+ err = -ENOENT;
+ goto error;
}
/* Default if filename is empty string */
@@ -773,7 +771,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
}
}
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size_ch2, 0);
/* setup fifo + format */
@@ -783,9 +781,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size_ch2 = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
+ err = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
dev->_line_size_ch2);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 6759fff8eb64..7fc97110d973 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -173,10 +173,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
return rp;
}
-int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset,
- unsigned int bpl, unsigned int lines)
+static int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl, unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -300,7 +300,8 @@ void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index;
@@ -405,7 +406,8 @@ static void cx25821_vidups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -486,8 +488,9 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
return 0;
}
-int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
- struct sram_channel *sram_ch, int bpl)
+static int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch,
+ int bpl)
{
int ret = 0;
dma_addr_t dma_addr;
@@ -548,8 +551,8 @@ error:
return ret;
}
-int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -664,8 +667,9 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
- int pix_format)
+static void cx25821_set_pixelengine(struct cx25821_dev *dev,
+ struct sram_channel *ch,
+ int pix_format)
{
int width = WIDTH_D1;
int height = dev->_lines_count;
@@ -696,8 +700,8 @@ void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -753,7 +757,6 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
@@ -796,15 +799,19 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_filename = kmemdup(dev->input_filename, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
} else {
str_length = strlen(dev->_defaultname);
dev->_filename = kmemdup(dev->_defaultname, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
}
/* Default if filename is empty string */
@@ -828,7 +835,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ?
(WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size, 0);
/* setup fifo + format */
@@ -838,8 +845,8 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
- if (retval < 0) {
+ err = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 0a80245165d0..53b16dd70320 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -291,9 +291,9 @@ int cx25821_start_video_dma(struct cx25821_dev *dev,
return 0;
}
-int cx25821_restart_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q,
- struct sram_channel *channel)
+static int cx25821_restart_video_queue(struct cx25821_dev *dev,
+ struct cx25821_dmaqueue *q,
+ struct sram_channel *channel)
{
struct cx25821_buffer *buf, *prev;
struct list_head *item;
@@ -342,7 +342,7 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
}
}
-void cx25821_vid_timeout(unsigned long data)
+static void cx25821_vid_timeout(unsigned long data)
{
struct cx25821_data *timeout_data = (struct cx25821_data *)data;
struct cx25821_dev *dev = timeout_data->dev;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 3aa6856ead3b..27d62623274b 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -45,11 +45,15 @@
#include "cx88.h"
#include "cx88-reg.h"
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
-#define dprintk_core(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg)
+#define dprintk_core(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
/****************************************************************************
Data type declarations - Can be moded to a header file later
@@ -536,7 +540,7 @@ static struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
+static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -749,7 +753,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
+static const struct pci_device_id cx88_audio_pci_tbl[] = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
@@ -788,10 +792,9 @@ static void snd_cx88_dev_free(struct snd_card * card)
*/
static int devno;
-static int __devinit snd_cx88_create(struct snd_card *card,
- struct pci_dev *pci,
- snd_cx88_card_t **rchip,
- struct cx88_core **core_ptr)
+static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
+ snd_cx88_card_t **rchip,
+ struct cx88_core **core_ptr)
{
snd_cx88_card_t *chip;
struct cx88_core *core;
@@ -858,8 +861,8 @@ static int __devinit snd_cx88_create(struct snd_card *card,
return 0;
}
-static int __devinit cx88_audio_initdev(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int cx88_audio_initdev(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
snd_cx88_card_t *chip;
@@ -927,7 +930,7 @@ error:
/*
* ALSA destructor
*/
-static void __devexit cx88_audio_finidev(struct pci_dev *pci)
+static void cx88_audio_finidev(struct pci_dev *pci)
{
struct cx88_audio_dev *card = pci_get_drvdata(pci);
@@ -946,7 +949,7 @@ static struct pci_driver cx88_audio_pci_driver = {
.name = "cx88_audio",
.id_table = cx88_audio_pci_tbl,
.probe = cx88_audio_initdev,
- .remove = __devexit_p(cx88_audio_finidev),
+ .remove = cx88_audio_finidev,
};
/****************************************************************************
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 62184eb919e5..a6ff8a6f4fc0 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -53,9 +53,10 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
-
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
+} while(0)
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index c97b174be3ab..19a58754c6e1 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -646,22 +646,22 @@ int cx88_reset(struct cx88_core *core)
/* ------------------------------------------------------------------ */
-static unsigned int inline norm_swidth(v4l2_std_id norm)
+static inline unsigned int norm_swidth(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
}
-static unsigned int inline norm_hdelay(v4l2_std_id norm)
+static inline unsigned int norm_hdelay(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 135 : 186;
}
-static unsigned int inline norm_vdelay(v4l2_std_id norm)
+static inline unsigned int norm_vdelay(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 0x24 : 0x18;
}
-static unsigned int inline norm_fsc8(v4l2_std_id norm)
+static inline unsigned int norm_fsc8(v4l2_std_id norm)
{
if (norm & V4L2_STD_PAL_M)
return 28604892; // 3.575611 MHz
@@ -681,7 +681,7 @@ static unsigned int inline norm_fsc8(v4l2_std_id norm)
return 35468950; // 4.43361875 MHz +/- 5 Hz
}
-static unsigned int inline norm_htotal(v4l2_std_id norm)
+static inline unsigned int norm_htotal(v4l2_std_id norm)
{
unsigned int fsc4=norm_fsc8(norm)/2;
@@ -692,7 +692,7 @@ static unsigned int inline norm_htotal(v4l2_std_id norm)
((fsc4+262)/525*1001+15000)/30000;
}
-static unsigned int inline norm_vbipack(v4l2_std_id norm)
+static inline unsigned int norm_vbipack(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 511 : 400;
}
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index ebf448c48ca3..f29e18c72f44 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -248,7 +248,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
struct cx88_IR *ir;
struct rc_dev *dev;
char *ir_codes = NULL;
- u64 rc_type = RC_TYPE_OTHER;
+ u64 rc_type = RC_BIT_OTHER;
int err = -ENOMEM;
u32 hardware_mask = 0; /* For devices with a hardware mask, when
* used with a full-code IR table
@@ -416,7 +416,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
ir_codes = RC_MAP_TWINHAN_VP1027_DVBS;
- rc_type = RC_TYPE_NEC;
+ rc_type = RC_BIT_NEC;
ir->sampling = 0xff00; /* address */
break;
}
@@ -592,7 +592,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
case CX88_BOARD_LEADTEK_PVR2000:
addr_list = pvr2000_addr_list;
core->init_data.name = "cx88 Leadtek PVR 2000 remote";
- core->init_data.type = RC_TYPE_UNKNOWN;
+ core->init_data.type = RC_BIT_UNKNOWN;
core->init_data.get_key = get_key_pvr2000;
core->init_data.ir_codes = RC_MAP_EMPTY;
break;
@@ -613,7 +613,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
/* Hauppauge XVR */
core->init_data.name = "cx88 Hauppauge XVR remote";
core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- core->init_data.type = RC_TYPE_RC5;
+ core->init_data.type = RC_BIT_RC5;
core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index d154bc197356..c9d3182f79d5 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -45,11 +45,15 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
+} while(0)
-#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg)
+#define mpeg_dbg(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
+} while(0)
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
@@ -217,8 +221,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
return 0;
buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx8802_start_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -229,8 +232,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
@@ -789,8 +791,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
}
/* ----------------------------------------------------------- */
-static int __devinit cx8802_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx8802_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx8802_dev *dev;
struct cx88_core *core;
@@ -838,7 +840,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
return err;
}
-static void __devexit cx8802_remove(struct pci_dev *pci_dev)
+static void cx8802_remove(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev;
@@ -896,7 +898,7 @@ static struct pci_driver cx8802_pci_driver = {
.name = "cx88-mpeg driver manager",
.id_table = cx8802_pci_tbl,
.probe = cx8802_probe,
- .remove = __devexit_p(cx8802_remove),
+ .remove = cx8802_remove,
};
static int __init cx8802_init(void)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 05171457bf28..bc78354262ac 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1696,8 +1696,8 @@ static void cx8800_unregister_video(struct cx8800_dev *dev)
}
}
-static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx8800_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx8800_dev *dev;
struct cx88_core *core;
@@ -1923,7 +1923,7 @@ fail_free:
return err;
}
-static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
+static void cx8800_finidev(struct pci_dev *pci_dev)
{
struct cx8800_dev *dev = pci_get_drvdata(pci_dev);
struct cx88_core *core = dev->core;
@@ -2052,7 +2052,7 @@ static struct pci_driver cx8800_pci_driver = {
.name = "cx8800",
.id_table = cx8800_pci_tbl,
.probe = cx8800_initdev,
- .remove = __devexit_p(cx8800_finidev),
+ .remove = cx8800_finidev,
#ifdef CONFIG_PM
.suspend = cx8800_suspend,
.resume = cx8800_resume,
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 44ffc8b3d45f..ba0dba4a4d22 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -94,13 +94,13 @@ enum cx8802_board_access {
/* ----------------------------------------------------------- */
/* tv norms */
-static unsigned int inline norm_maxw(v4l2_std_id norm)
+static inline unsigned int norm_maxw(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768;
}
-static unsigned int inline norm_maxh(v4l2_std_id norm)
+static inline unsigned int norm_maxh(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 576 : 480;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index feff57ee5a08..36e34522b9a8 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1542,7 +1542,7 @@ static void ddb_unmap(struct ddb *dev)
}
-static void __devexit ddb_remove(struct pci_dev *pdev)
+static void ddb_remove(struct pci_dev *pdev)
{
struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
@@ -1565,8 +1565,7 @@ static void __devexit ddb_remove(struct pci_dev *pdev)
}
-static int __devinit ddb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ddb *dev;
int stat = 0;
@@ -1679,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
.subvendor = _subvend, .subdevice = _subdev, \
.driver_data = (unsigned long)&_driverdata }
-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
+static const struct pci_device_id ddb_id_tbl[] = {
DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
@@ -1696,7 +1695,7 @@ static struct pci_driver ddb_pci_driver = {
.name = "DDBridge",
.id_table = ddb_id_tbl,
.probe = ddb_probe,
- .remove = __devexit_p(ddb_remove),
+ .remove = ddb_remove,
};
static __init int module_init_ddbridge(void)
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a609b3a9b146..904c3ea350f5 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -616,7 +616,7 @@ static void dm1105_set_dma_addr(struct dm1105_dev *dev)
dm_writel(DM1105_STADR, cpu_to_le32(dev->dma_addr));
}
-static int __devinit dm1105_dma_map(struct dm1105_dev *dev)
+static int dm1105_dma_map(struct dm1105_dev *dev)
{
dev->ts_buf = pci_alloc_consistent(dev->pdev,
6 * DM1105_DMA_BYTES,
@@ -736,7 +736,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
+static int dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct rc_dev *dev;
int err = -ENOMEM;
@@ -776,12 +776,12 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
return 0;
}
-void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
+static void dm1105_ir_exit(struct dm1105_dev *dm1105)
{
rc_unregister_device(dm1105->ir.dev);
}
-static int __devinit dm1105_hw_init(struct dm1105_dev *dev)
+static int dm1105_hw_init(struct dm1105_dev *dev)
{
dm1105_disable_irqs(dev);
@@ -849,7 +849,7 @@ static struct ds3000_config dvbworld_ds3000_config = {
.demod_address = 0x68,
};
-static int __devinit frontend_init(struct dm1105_dev *dev)
+static int frontend_init(struct dm1105_dev *dev)
{
int ret;
@@ -949,7 +949,7 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
return 0;
}
-static void __devinit dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
+static void dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
{
static u8 command[1] = { 0x28 };
@@ -971,7 +971,7 @@ static void __devinit dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
dev_info(&dev->pdev->dev, "MAC %pM\n", mac);
}
-static int __devinit dm1105_probe(struct pci_dev *pdev,
+static int dm1105_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dm1105_dev *dev;
@@ -1128,8 +1128,10 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
INIT_WORK(&dev->work, dm1105_dmx_buffer);
sprintf(dev->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num);
dev->wq = create_singlethread_workqueue(dev->wqn);
- if (!dev->wq)
+ if (!dev->wq) {
+ ret = -ENOMEM;
goto err_dvb_net;
+ }
ret = request_irq(pdev->irq, dm1105_irq, IRQF_SHARED,
DRIVER_NAME, dev);
@@ -1172,7 +1174,7 @@ err_kfree:
return ret;
}
-static void __devexit dm1105_remove(struct pci_dev *pdev)
+static void dm1105_remove(struct pci_dev *pdev)
{
struct dm1105_dev *dev = pci_get_drvdata(pdev);
struct dvb_adapter *dvb_adapter = &dev->dvb_adapter;
@@ -1205,7 +1207,7 @@ static void __devexit dm1105_remove(struct pci_dev *pdev)
kfree(dev);
}
-static struct pci_device_id dm1105_id_table[] __devinitdata = {
+static struct pci_device_id dm1105_id_table[] = {
{
.vendor = PCI_VENDOR_ID_TRIGEM,
.device = PCI_DEVICE_ID_DM1105,
@@ -1227,7 +1229,7 @@ static struct pci_driver dm1105_driver = {
.name = DRIVER_NAME,
.id_table = dm1105_id_table,
.probe = dm1105_probe,
- .remove = __devexit_p(dm1105_remove),
+ .remove = dm1105_remove,
};
static int __init dm1105_init(void)
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8deab1629b3b..4a221c693995 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -205,7 +205,7 @@ err_exit:
return ret;
}
-int ivtv_alsa_load(struct ivtv *itv)
+static int __init ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index f7022bd58ffd..e1863dbf4edc 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "ivtv-streams.h"
#include "ivtv-fileops.h"
#include "ivtv-alsa.h"
+#include "ivtv-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
@@ -69,8 +70,9 @@ static struct snd_pcm_hardware snd_ivtv_hw_capture = {
.periods_max = 98, /* 12544, */
};
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc, u8 *pcm_data,
- size_t num_bytes)
+static void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc,
+ u8 *pcm_data,
+ size_t num_bytes)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 5ab18319ea4d..23dfe0d12400 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -21,7 +21,3 @@
*/
int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
-
-/* Used by ivtv driver to announce the PCM data to the module */
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *card, u8 *pcm_data,
- size_t num_bytes);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 74e9a5032364..df88dc4ab555 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -73,7 +73,7 @@ int (*ivtv_ext_init)(struct ivtv *);
EXPORT_SYMBOL(ivtv_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
+static struct pci_device_id ivtv_pci_tbl[] = {
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16,
@@ -736,7 +736,7 @@ done:
No assumptions on the card type may be made here (see ivtv_init_struct2
for that).
*/
-static int __devinit ivtv_init_struct1(struct ivtv *itv)
+static int ivtv_init_struct1(struct ivtv *itv)
{
struct sched_param param = { .sched_priority = 99 };
@@ -802,7 +802,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
/* Second initialization part. Here the card type has been
autodetected. */
-static void __devinit ivtv_init_struct2(struct ivtv *itv)
+static void ivtv_init_struct2(struct ivtv *itv)
{
int i;
@@ -1001,8 +1001,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
}
}
-static int __devinit ivtv_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
int retval = 0;
int vbi_buf_size;
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 6ec7705af555..68387d4369d6 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -276,7 +276,7 @@ void ivtv_init_mpeg_decoder(struct ivtv *itv)
}
/* Try to restart the card & restore previous settings */
-int ivtv_firmware_restart(struct ivtv *itv)
+static int ivtv_firmware_restart(struct ivtv *itv)
{
int rc = 0;
v4l2_std_id std;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index d47f41a0ef66..46e262becb67 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -200,21 +200,21 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
- init_data->type = RC_TYPE_OTHER;
+ init_data->type = RC_BIT_OTHER;
init_data->name = "AVerMedia AVerTV card";
break;
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -222,7 +222,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->name = itv->card_name;
/* FIXME: The protocol and RC_MAP needs to be corrected */
init_data->ir_codes = RC_MAP_EMPTY;
- init_data->type = RC_TYPE_UNKNOWN;
+ init_data->type = RC_BIT_UNKNOWN;
break;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 949ae230e119..7a8b0d0b6127 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -993,7 +993,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
v4l2_std_id std;
int i;
- if (inp < 0 || inp >= itv->nof_inputs)
+ if (inp >= itv->nof_inputs)
return -EINVAL;
if (inp == itv->active_input) {
@@ -1168,7 +1168,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
}
}
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct ivtv *itv = fh2id(fh)->itv;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index cc0251e01077..6fe9fe5293dc 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -151,7 +151,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int hopper_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
@@ -230,7 +231,7 @@ fail0:
return err;
}
-static void __devexit hopper_pci_remove(struct pci_dev *pdev)
+static void hopper_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
@@ -259,12 +260,12 @@ static struct pci_driver hopper_pci_driver = {
.remove = hopper_pci_remove,
};
-static int __devinit hopper_init(void)
+static int hopper_init(void)
{
return pci_register_driver(&hopper_pci_driver);
}
-static void __devexit hopper_exit(void)
+static void hopper_exit(void)
{
return pci_unregister_driver(&hopper_pci_driver);
}
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index 0207d1f064e0..932a0d73a7f8 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -159,7 +159,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mantis_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int mantis_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
@@ -249,7 +250,7 @@ fail0:
return err;
}
-static void __devexit mantis_pci_remove(struct pci_dev *pdev)
+static void mantis_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
@@ -289,12 +290,12 @@ static struct pci_driver mantis_pci_driver = {
.remove = mantis_pci_remove,
};
-static int __devinit mantis_init(void)
+static int mantis_init(void)
{
return pci_register_driver(&mantis_pci_driver);
}
-static void __devexit mantis_exit(void)
+static void mantis_exit(void)
{
return pci_unregister_driver(&mantis_pci_driver);
}
diff --git a/drivers/media/pci/mantis/mantis_dvb.c b/drivers/media/pci/mantis/mantis_dvb.c
index 5d15c6b74d9b..5a71e1791cf5 100644
--- a/drivers/media/pci/mantis/mantis_dvb.c
+++ b/drivers/media/pci/mantis/mantis_dvb.c
@@ -144,7 +144,7 @@ static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return 0;
}
-int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+int mantis_dvb_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
int result = -1;
@@ -271,7 +271,7 @@ err0:
}
EXPORT_SYMBOL_GPL(mantis_dvb_init);
-int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
+int mantis_dvb_exit(struct mantis_pci *mantis)
{
int err;
diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c
index e7794517fe26..937fb9d50213 100644
--- a/drivers/media/pci/mantis/mantis_i2c.c
+++ b/drivers/media/pci/mantis/mantis_i2c.c
@@ -217,7 +217,7 @@ static struct i2c_algorithm mantis_algo = {
.functionality = mantis_i2c_func,
};
-int __devinit mantis_i2c_init(struct mantis_pci *mantis)
+int mantis_i2c_init(struct mantis_pci *mantis)
{
u32 intstat, intmask;
struct i2c_adapter *i2c_adapter = &mantis->adapter;
diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c
index db6d54d3fec0..0e5252e5c0ef 100644
--- a/drivers/media/pci/mantis/mantis_input.c
+++ b/drivers/media/pci/mantis/mantis_input.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#if 0 /* Currently unused */
+
#include <media/rc-core.h>
#include <linux/pci.h>
@@ -150,10 +152,11 @@ out:
return err;
}
-int mantis_exit(struct mantis_pci *mantis)
+int mantis_init_exit(struct mantis_pci *mantis)
{
rc_unregister_device(mantis->rc);
rc_map_unregister(&ir_mantis_map);
return 0;
}
+#endif
diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c
index 371558af2d96..a846036ea022 100644
--- a/drivers/media/pci/mantis/mantis_pci.c
+++ b/drivers/media/pci/mantis/mantis_pci.c
@@ -46,7 +46,7 @@
#define DRIVER_NAME "Mantis Core"
-int __devinit mantis_pci_init(struct mantis_pci *mantis)
+int mantis_pci_init(struct mantis_pci *mantis)
{
u8 latency;
struct mantis_hwconfig *config = mantis->hwconfig;
diff --git a/drivers/media/pci/mantis/mantis_uart.c b/drivers/media/pci/mantis/mantis_uart.c
index 85e977861b4a..a70719218631 100644
--- a/drivers/media/pci/mantis/mantis_uart.c
+++ b/drivers/media/pci/mantis/mantis_uart.c
@@ -61,7 +61,7 @@ static struct {
#define UART_MAX_BUF 16
-int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
+static int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
{
struct mantis_hwconfig *config = mantis->hwconfig;
u32 stat = 0, i;
diff --git a/drivers/media/pci/mantis/mantis_vp1033.c b/drivers/media/pci/mantis/mantis_vp1033.c
index ad013e93ed11..115003e8d19d 100644
--- a/drivers/media/pci/mantis/mantis_vp1033.c
+++ b/drivers/media/pci/mantis/mantis_vp1033.c
@@ -83,7 +83,7 @@ u8 lgtdqcs001f_inittab[] = {
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
-int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
+static int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
@@ -115,8 +115,8 @@ int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
return 0;
}
-int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
- u32 srate, u32 ratio)
+static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index e5a76da86081..049e18667cd0 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1728,8 +1728,7 @@ static int meye_resume(struct pci_dev *pdev)
}
#endif
-static int __devinit meye_probe(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
@@ -1889,7 +1888,7 @@ outnotdev:
return ret;
}
-static void __devexit meye_remove(struct pci_dev *pcidev)
+static void meye_remove(struct pci_dev *pcidev)
{
video_unregister_device(meye.vdev);
@@ -1935,7 +1934,7 @@ static struct pci_driver meye_driver = {
.name = "meye",
.id_table = meye_pci_tbl,
.probe = meye_probe,
- .remove = __devexit_p(meye_remove),
+ .remove = meye_remove,
#ifdef CONFIG_PM
.suspend = meye_suspend,
.resume = meye_resume,
@@ -1945,7 +1944,7 @@ static struct pci_driver meye_driver = {
static int __init meye_init(void)
{
gbuffers = max(2, min((int)gbuffers, MEYE_MAX_BUFNBRS));
- if (gbufsize < 0 || gbufsize > MEYE_MAX_BUFSIZE)
+ if (gbufsize > MEYE_MAX_BUFSIZE)
gbufsize = MEYE_MAX_BUFSIZE;
gbufsize = PAGE_ALIGN(gbufsize);
printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 96a13ed197d0..fad214113669 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -425,8 +425,10 @@ static int ReadEEProm(struct i2c_adapter *adapter,
status = i2c_read_eeprom(adapter, 0x50, Addr, data, Length);
if (!status) {
*pLength = EETag[2];
+#if 0
if (Length < EETag[2])
- ; /*status=STATUS_BUFFER_OVERFLOW; */
+ status = STATUS_BUFFER_OVERFLOW;
+#endif
}
}
return status;
@@ -741,7 +743,7 @@ static struct ngene_info ngene_info_terratec = {
/****************************************************************************/
-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
+static const struct pci_device_id ngene_id_tbl[] = {
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
@@ -798,7 +800,7 @@ static struct pci_driver ngene_pci_driver = {
.name = "ngene",
.id_table = ngene_id_tbl,
.probe = ngene_probe,
- .remove = __devexit_p(ngene_remove),
+ .remove = ngene_remove,
.err_handler = &ngene_errors,
.shutdown = ngene_shutdown,
};
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index c8e0d5b99d4c..37ebc42392ad 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -752,8 +752,8 @@ void set_transfer(struct ngene_channel *chan, int state)
if (chan->mode & NGENE_IO_TSIN)
chan->pBufferExchange = tsin_exchange;
spin_unlock_irq(&chan->state_lock);
- } else
- ;/* printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
+ }
+ /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
ngreadl(0x9310)); */
ret = ngene_command_stream_control(dev, chan->number,
@@ -1636,7 +1636,7 @@ void ngene_shutdown(struct pci_dev *pdev)
/* device probe/remove calls ************************************************/
/****************************************************************************/
-void __devexit ngene_remove(struct pci_dev *pdev)
+void ngene_remove(struct pci_dev *pdev)
{
struct ngene *dev = pci_get_drvdata(pdev);
int i;
@@ -1652,8 +1652,7 @@ void __devexit ngene_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
struct ngene *dev;
int stat = 0;
@@ -1691,7 +1690,8 @@ int __devinit ngene_probe(struct pci_dev *pci_dev,
dev->i2c_current_bus = -1;
/* Register DVB adapters and devices for both channels */
- if (init_channels(dev) < 0)
+ stat = init_channels(dev);
+ if (stat < 0)
goto fail2;
return 0;
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 5443dc0caea5..22c39ff6bfa0 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -887,9 +887,8 @@ struct ngene_buffer {
/* Provided by ngene-core.c */
-int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id);
-void __devexit ngene_remove(struct pci_dev *pdev);
+int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id);
+void ngene_remove(struct pci_dev *pdev);
void ngene_shutdown(struct pci_dev *pdev);
int ngene_command(struct ngene *dev, struct ngene_command *com);
int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level);
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index f148b19a206a..2290faee5852 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -240,7 +240,7 @@ static void pluto_set_dma_addr(struct pluto *pluto)
pluto_writereg(pluto, REG_PCAR, pluto->dma_addr);
}
-static int __devinit pluto_dma_map(struct pluto *pluto)
+static int pluto_dma_map(struct pluto *pluto)
{
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
@@ -368,7 +368,7 @@ static irqreturn_t pluto_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devinit pluto_enable_irqs(struct pluto *pluto)
+static void pluto_enable_irqs(struct pluto *pluto)
{
u32 val = pluto_readreg(pluto, REG_TSCR);
@@ -394,7 +394,7 @@ static void pluto_disable_irqs(struct pluto *pluto)
pluto_write_tscr(pluto, val);
}
-static int __devinit pluto_hw_init(struct pluto *pluto)
+static int pluto_hw_init(struct pluto *pluto)
{
pluto_reset_frontend(pluto, 1);
@@ -505,7 +505,7 @@ static int pluto2_request_firmware(struct dvb_frontend *fe,
return request_firmware(fw, name, &pluto->pdev->dev);
}
-static struct tda1004x_config pluto2_fe_config __devinitdata = {
+static struct tda1004x_config pluto2_fe_config = {
.demod_address = I2C_ADDR_TDA10046 >> 1,
.invert = 1,
.invert_oclk = 0,
@@ -515,7 +515,7 @@ static struct tda1004x_config pluto2_fe_config __devinitdata = {
.request_firmware = pluto2_request_firmware,
};
-static int __devinit frontend_init(struct pluto *pluto)
+static int frontend_init(struct pluto *pluto)
{
int ret;
@@ -536,14 +536,14 @@ static int __devinit frontend_init(struct pluto *pluto)
return 0;
}
-static void __devinit pluto_read_rev(struct pluto *pluto)
+static void pluto_read_rev(struct pluto *pluto)
{
u32 val = pluto_readreg(pluto, REG_MISC) & MISC_DVR;
dev_info(&pluto->pdev->dev, "board revision %d.%d\n",
(val >> 12) & 0x0f, (val >> 4) & 0xff);
}
-static void __devinit pluto_read_mac(struct pluto *pluto, u8 *mac)
+static void pluto_read_mac(struct pluto *pluto, u8 *mac)
{
u32 val = pluto_readreg(pluto, REG_MMAC);
mac[0] = (val >> 8) & 0xff;
@@ -560,7 +560,7 @@ static void __devinit pluto_read_mac(struct pluto *pluto, u8 *mac)
dev_info(&pluto->pdev->dev, "MAC %pM\n", mac);
}
-static int __devinit pluto_read_serial(struct pluto *pluto)
+static int pluto_read_serial(struct pluto *pluto)
{
struct pci_dev *pdev = pluto->pdev;
unsigned int i, j;
@@ -588,8 +588,7 @@ out:
return 0;
}
-static int __devinit pluto2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pluto *pluto;
struct dvb_adapter *dvb_adapter;
@@ -742,7 +741,7 @@ err_kfree:
goto out;
}
-static void __devexit pluto2_remove(struct pci_dev *pdev)
+static void pluto2_remove(struct pci_dev *pdev)
{
struct pluto *pluto = pci_get_drvdata(pdev);
struct dvb_adapter *dvb_adapter = &pluto->dvb_adapter;
@@ -777,7 +776,7 @@ static void __devexit pluto2_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_PLUTO2 0x0001
#endif
-static struct pci_device_id pluto2_id_table[] __devinitdata = {
+static struct pci_device_id pluto2_id_table[] = {
{
.vendor = PCI_VENDOR_ID_SCM,
.device = PCI_DEVICE_ID_PLUTO2,
@@ -794,7 +793,7 @@ static struct pci_driver pluto2_driver = {
.name = DRIVER_NAME,
.id_table = pluto2_id_table,
.probe = pluto2_probe,
- .remove = __devexit_p(pluto2_remove),
+ .remove = pluto2_remove,
};
static int __init pluto2_init(void)
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 15b35c4725f1..e9211086df49 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1058,7 +1058,7 @@ static void pt1_i2c_init(struct pt1 *pt1)
pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
}
-static void __devexit pt1_remove(struct pci_dev *pdev)
+static void pt1_remove(struct pci_dev *pdev)
{
struct pt1 *pt1;
void __iomem *regs;
@@ -1083,8 +1083,7 @@ static void __devexit pt1_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __devinit
-pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
void __iomem *regs;
@@ -1222,7 +1221,7 @@ MODULE_DEVICE_TABLE(pci, pt1_id_table);
static struct pci_driver pt1_driver = {
.name = DRIVER_NAME,
.probe = pt1_probe,
- .remove = __devexit_p(pt1_remove),
+ .remove = pt1_remove,
.id_table = pt1_id_table,
};
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index f2b37e05b964..e359d200d698 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -754,7 +754,7 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
return 0;
}
-static void __devinit must_configure_manually(int has_eeprom)
+static void must_configure_manually(int has_eeprom)
{
unsigned int i,p;
@@ -860,8 +860,8 @@ static void mpeg_ops_detach(struct saa7134_mpeg_ops *ops,
dev->mops = NULL;
}
-static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int saa7134_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct saa7134_dev *dev;
struct saa7134_mpeg_ops *mops;
@@ -944,8 +944,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
/* board config */
dev->board = pci_id->driver_data;
- if (card[dev->nr] >= 0 &&
- card[dev->nr] < saa7134_bcount)
+ if ((unsigned)card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
if (SAA7134_BOARD_UNKNOWN == dev->board)
must_configure_manually(0);
@@ -1103,7 +1102,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
return err;
}
-static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
+static void saa7134_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
@@ -1323,7 +1322,7 @@ static struct pci_driver saa7134_pci_driver = {
.name = "saa7134",
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
- .remove = __devexit_p(saa7134_finidev),
+ .remove = saa7134_finidev,
#ifdef CONFIG_PM
.suspend = saa7134_suspend,
.resume = saa7134_resume
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 0f78f5e537e2..e761262f7475 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -990,7 +990,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
dev->init_data.ir_codes = RC_MAP_BEHOLD;
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 4a77124ee70e..3abf52711e13 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2511,7 +2511,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
/* sanitycheck insmod options */
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
- if (gbufsize < 0 || gbufsize > gbufsize_max)
+ if (gbufsize > gbufsize_max)
gbufsize = gbufsize_max;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index c24b6512bd8f..075908fae4d9 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -739,7 +739,7 @@ extern int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
extern struct saa7134_board saa7134_boards[];
extern const unsigned int saa7134_bcount;
-extern struct pci_device_id __devinitdata saa7134_pci_tbl[];
+extern struct pci_device_id saa7134_pci_tbl[];
extern int saa7134_board_init1(struct saa7134_dev *dev);
extern int saa7134_board_init2(struct saa7134_dev *dev);
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index eff7135cf0e8..e042963d377d 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -165,7 +165,7 @@ int saa7164_api_set_vbi_format(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_gop_size(struct saa7164_port *port)
+static int saa7164_api_set_gop_size(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoGopStructure gs;
@@ -619,7 +619,7 @@ int saa7164_api_get_videomux(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
+static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
{
struct saa7164_dev *dev = port->dev;
@@ -822,8 +822,8 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
&reg[0], 128, buf);
}
-int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
- struct saa7164_port *port)
+static int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
+ struct saa7164_port *port)
{
struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
@@ -858,9 +858,10 @@ int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResTSFormatDescrHeader *tsfmt)
+static int
+saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
@@ -892,9 +893,10 @@ int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResPSFormatDescrHeader *fmt)
+static int
+saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResPSFormatDescrHeader *fmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
@@ -925,7 +927,7 @@ int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
+static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
struct saa7164_port *tsport = NULL;
struct saa7164_port *encport = NULL;
@@ -1486,7 +1488,7 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
return ret == SAA_OK ? 0 : -EIO;
}
-int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
+static int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a7f58a998752..5f6f3094c44e 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -81,7 +81,7 @@ void saa7164_bus_dump(struct saa7164_dev *dev)
}
/* Intensionally throw a BUG() if the state of the message bus looks corrupt */
-void saa7164_bus_verify(struct saa7164_dev *dev)
+static void saa7164_bus_verify(struct saa7164_dev *dev)
{
struct tmComResBusInfo *b = &dev->bus;
int bug = 0;
@@ -106,8 +106,8 @@ void saa7164_bus_verify(struct saa7164_dev *dev)
}
}
-void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m,
- void *buf)
+static void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo *m,
+ void *buf)
{
dprintk(DBGLVL_BUS, "Dumping msg structure:\n");
dprintk(DBGLVL_BUS, " .id = %d\n", m->id);
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 62fac7f9d04e..cfabcbacc33d 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -23,7 +23,7 @@
#include "saa7164.h"
-int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
+static int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
{
int i, ret = -1;
@@ -42,7 +42,7 @@ int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
return ret;
}
-void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -54,7 +54,7 @@ void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -64,7 +64,7 @@ void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
+static u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
{
int ret = 0;
@@ -132,7 +132,7 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
/* Commands to the f/w get marshelled to/from this code then onto the PCI
* -bus/c running buffer. */
-int saa7164_cmd_dequeue(struct saa7164_dev *dev)
+static int saa7164_cmd_dequeue(struct saa7164_dev *dev)
{
int loop = 1;
int ret;
@@ -186,8 +186,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
return SAA_OK;
}
-int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
- void *buf)
+static int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
+ void *buf)
{
struct tmComResBusInfo *bus = &dev->bus;
u8 cmd_sent;
@@ -259,7 +259,7 @@ out:
/* Wait for a signal event, without holding a mutex. Either return TIMEOUT if
* the event never occurred, or SAA_OK if it was signaled during the wait.
*/
-int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
+static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
{
wait_queue_head_t *q = NULL;
int ret = SAA_BUS_TIMEOUT;
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 2c9ad878bef3..63502e7a2a76 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -410,7 +410,7 @@ static void saa7164_work_enchandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
@@ -486,7 +486,7 @@ static void saa7164_work_vbihandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
@@ -1185,8 +1185,8 @@ static int saa7164_thread_function(void *data)
return 0;
}
-static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int saa7164_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct saa7164_dev *dev;
int err, i;
@@ -1376,7 +1376,7 @@ static void saa7164_shutdown(struct saa7164_dev *dev)
dprintk(1, "%s()\n", __func__);
}
-static void __devexit saa7164_finidev(struct pci_dev *pci_dev)
+static void saa7164_finidev(struct pci_dev *pci_dev)
{
struct saa7164_dev *dev = pci_get_drvdata(pci_dev);
@@ -1459,7 +1459,7 @@ static struct pci_driver saa7164_pci_driver = {
.name = "saa7164",
.id_table = saa7164_pci_tbl,
.probe = saa7164_initdev,
- .remove = __devexit_p(saa7164_finidev),
+ .remove = saa7164_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index a9ed686ad08a..994018e2d0d6 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1101,7 +1101,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -1287,8 +1288,8 @@ static const struct v4l2_file_operations mpeg_fops = {
.unlocked_ioctl = video_ioctl2,
};
-int saa7164_g_chip_ident(struct file *file, void *fh,
- struct v4l2_dbg_chip_ident *chip)
+static int saa7164_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *chip)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1297,8 +1298,8 @@ int saa7164_g_chip_ident(struct file *file, void *fh,
return 0;
}
-int saa7164_g_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1310,8 +1311,8 @@ int saa7164_g_register(struct file *file, void *fh,
return 0;
}
-int saa7164_s_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index a266bf0169e6..86763203d61d 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -37,7 +37,7 @@ struct fw_header {
u32 version;
};
-int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while ((saa7164_readl(reg) & 0x01) == 0) {
@@ -53,7 +53,7 @@ int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
return 0;
}
-int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while (saa7164_readl(reg) & 0x01) {
@@ -71,8 +71,8 @@ int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
/* TODO: move dlflags into dev-> and change to write/readl/b */
/* TODO: Excessive levels of debug */
-int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
- u32 dlflags, u8 *dst, u32 dstsize)
+static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
+ u32 dlflags, u8 *dst, u32 dstsize)
{
u32 reg, timeout, offset;
u8 *srcbuf = NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index d8e6c8f14079..b4532299c0ed 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -984,7 +984,8 @@ out:
return ret;
}
-int saa7164_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int saa7164_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
/* ntsc */
f->fmt.vbi.samples_per_line = 1600;
@@ -1047,7 +1048,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 4c10205264d4..27ae48842656 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1205,8 +1205,8 @@ static void vip_gpio_release(struct device *dev, int pin, const char *name)
*
* -ENODEV, device could not be detected or registered
*/
-static int __devinit sta2x11_vip_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sta2x11_vip_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
struct sta2x11_vip *vip;
@@ -1376,7 +1376,7 @@ disable:
* free memory
* free GPIO pins
*/
-static void __devexit sta2x11_vip_remove_one(struct pci_dev *pdev)
+static void sta2x11_vip_remove_one(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct sta2x11_vip *vip =
@@ -1517,7 +1517,7 @@ static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
static struct pci_driver sta2x11_vip_driver = {
.name = DRV_NAME,
.probe = sta2x11_vip_init_one,
- .remove = __devexit_p(sta2x11_vip_remove_one),
+ .remove = sta2x11_vip_remove_one,
.id_table = sta2x11_vip_pci_tbl,
#ifdef CONFIG_PM
.suspend = sta2x11_vip_suspend,
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 4bd8bd56befc..4656d4a10af0 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -2367,8 +2367,8 @@ static int frontend_init(struct av7110 *av7110)
* The same behaviour of missing VSYNC can be duplicated on budget
* cards, by seting DD1_INIT trigger mode 7 in 3rd nibble.
*/
-static int __devinit av7110_attach(struct saa7146_dev* dev,
- struct saa7146_pci_extension_data *pci_ext)
+static int av7110_attach(struct saa7146_dev* dev,
+ struct saa7146_pci_extension_data *pci_ext)
{
const int length = TS_WIDTH * TS_HEIGHT;
struct pci_dev *pdev = dev->pci;
@@ -2761,7 +2761,7 @@ err_kfree_0:
goto out;
}
-static int __devexit av7110_detach(struct saa7146_dev* saa)
+static int av7110_detach(struct saa7146_dev* saa)
{
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
@@ -2910,7 +2910,7 @@ static struct saa7146_extension av7110_extension_driver = {
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = av7110_attach,
- .detach = __devexit_p(av7110_detach),
+ .detach = av7110_detach,
.irq_mask = MASK_19 | MASK_03 | MASK_10,
.irq_func = av7110_irq,
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 88b3b2d6cc0e..a378662b1dcf 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/time.h>
#include <linux/dvb/video.h>
#include <linux/dvb/audio.h>
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index 908f272fe26c..eb822862a646 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -324,7 +324,7 @@ static void ir_handler(struct av7110 *av7110, u32 ircom)
}
-int __devinit av7110_ir_init(struct av7110 *av7110)
+int av7110_ir_init(struct av7110 *av7110)
{
struct input_dev *input_dev;
static struct proc_dir_entry *e;
@@ -385,7 +385,7 @@ int __devinit av7110_ir_init(struct av7110 *av7110)
}
-void __devexit av7110_ir_exit(struct av7110 *av7110)
+void av7110_ir_exit(struct av7110 *av7110)
{
int i;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 12ddb53c58dc..1f8b1bb0bf9f 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1477,8 +1477,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (saa7113_init(budget_av) == 0) {
budget_av->has_saa7113 = 1;
-
- if (0 != saa7146_vv_init(dev, &vv_data)) {
+ err = saa7146_vv_init(dev, &vv_data);
+ if (err != 0) {
/* fixme: proper cleanup here */
ERR("cannot init vv subsystem\n");
return err;
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index fffc54b452c8..a90a3b9b09bf 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -369,7 +369,7 @@ static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END };
static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END };
static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END };
-static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
+static struct card_info zoran_cards[NUM_CARDS] = {
{
.type = DC10_old,
.name = "DC10(old)",
@@ -948,8 +948,7 @@ zoran_open_init_params (struct zoran *zr)
zr->testing = 0;
}
-static void __devinit
-test_interrupts (struct zoran *zr)
+static void test_interrupts (struct zoran *zr)
{
DEFINE_WAIT(wait);
int timeout, icr;
@@ -974,8 +973,7 @@ test_interrupts (struct zoran *zr)
btwrite(icr, ZR36057_ICR);
}
-static int __devinit
-zr36057_init (struct zoran *zr)
+static int zr36057_init (struct zoran *zr)
{
int j, err;
@@ -1083,7 +1081,7 @@ exit_free:
return err;
}
-static void __devexit zoran_remove(struct pci_dev *pdev)
+static void zoran_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct zoran *zr = to_zoran(v4l2_dev);
@@ -1129,9 +1127,8 @@ zoran_vdev_release (struct video_device *vdev)
kfree(vdev);
}
-static struct videocodec_master * __devinit
-zoran_setup_videocodec (struct zoran *zr,
- int type)
+static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr,
+ int type)
{
struct videocodec_master *m = NULL;
@@ -1192,8 +1189,7 @@ static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *
* Scan for a Buz card (actually for the PCI controller ZR36057),
* request the irq and map the io memory
*/
-static int __devinit zoran_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned char latency, need_latency;
struct zoran *zr;
@@ -1459,7 +1455,7 @@ static struct pci_driver zoran_driver = {
.name = "zr36067",
.id_table = zr36067_pci_tbl,
.probe = zoran_probe,
- .remove = __devexit_p(zoran_remove),
+ .remove = zoran_remove,
};
static int __init zoran_init(void)
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 53f12c7466b0..e60ae41e2319 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -3080,7 +3080,7 @@ static const struct v4l2_file_operations zoran_fops = {
.poll = zoran_poll,
};
-struct video_device zoran_template __devinitdata = {
+struct video_device zoran_template = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 181c7686e412..33241120a58c 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -98,8 +98,8 @@ config VIDEO_OMAP2
This is a v4l2 driver for the TI OMAP2 camera capture interface
config VIDEO_OMAP3
- tristate "OMAP 3 Camera support (EXPERIMENTAL)"
- depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
+ tristate "OMAP 3 Camera support"
+ depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
---help---
Driver for an OMAP 3 camera controller.
@@ -109,6 +109,18 @@ config VIDEO_OMAP3_DEBUG
---help---
Enable debug messages on OMAP 3 camera controller driver.
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (PLAT_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/s5p-fimc/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
@@ -157,8 +169,8 @@ config VIDEO_SAMSUNG_S5P_G2D
2d graphics accelerator.
config VIDEO_SAMSUNG_S5P_JPEG
- tristate "Samsung S5P/Exynos4 JPEG codec driver (EXPERIMENTAL)"
- depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P && EXPERIMENTAL
+ tristate "Samsung S5P/Exynos4 JPEG codec driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index baaa55026c8e..4817d2802171 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VIDEO_CODA) += coda.o
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index cb2eb26850b1..1aad2a65d2f3 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -862,7 +862,7 @@ static struct v4l2_file_operations bcap_fops = {
.poll = bcap_poll
};
-static int __devinit bcap_probe(struct platform_device *pdev)
+static int bcap_probe(struct platform_device *pdev)
{
struct bcap_device *bcap_dev;
struct video_device *vfd;
@@ -1026,7 +1026,7 @@ err_free_dev:
return ret;
}
-static int __devexit bcap_remove(struct platform_device *pdev)
+static int bcap_remove(struct platform_device *pdev)
{
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct bcap_device *bcap_dev = container_of(v4l2_dev,
@@ -1048,21 +1048,9 @@ static struct platform_driver bcap_driver = {
.owner = THIS_MODULE,
},
.probe = bcap_probe,
- .remove = __devexit_p(bcap_remove),
+ .remove = bcap_remove,
};
-
-static __init int bcap_init(void)
-{
- return platform_driver_register(&bcap_driver);
-}
-
-static __exit void bcap_exit(void)
-{
- platform_driver_unregister(&bcap_driver);
-}
-
-module_init(bcap_init);
-module_exit(bcap_exit);
+module_platform_driver(bcap_driver);
MODULE_DESCRIPTION("Analog Devices blackfin video capture driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index cd04ae252c30..4a980e029ca7 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -23,8 +23,8 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/of.h>
+#include <linux/platform_data/imx-iram.h>
-#include <mach/iram.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -1540,7 +1540,7 @@ static irqreturn_t coda_irq_handler(int irq, void *data)
u32 wr_ptr, start_ptr;
struct coda_ctx *ctx;
- __cancel_delayed_work(&dev->timeout);
+ cancel_delayed_work(&dev->timeout);
/* read status register to attend the IRQ */
coda_read(dev, CODA_REG_BIT_INT_STATUS);
@@ -1877,7 +1877,7 @@ static const struct coda_devtype coda_devdata[] = {
static struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_7541 },
+ { .name = "coda-imx53", .driver_data = CODA_IMX53 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, coda_platform_ids);
@@ -1891,7 +1891,7 @@ static const struct of_device_id coda_dt_ids[] = {
MODULE_DEVICE_TABLE(of, coda_dt_ids);
#endif
-static int __devinit coda_probe(struct platform_device *pdev)
+static int coda_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
@@ -2033,7 +2033,7 @@ static int coda_remove(struct platform_device *pdev)
static struct platform_driver coda_driver = {
.probe = coda_probe,
- .remove = __devexit_p(coda_remove),
+ .remove = coda_remove,
.driver = {
.name = CODA_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 78e26d24f637..3c56037c82fc 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -101,7 +101,7 @@ config VIDEO_DM644X_VPBE
tristate "DM644X VPBE HW module"
depends on ARCH_DAVINCI_DM644x
select VIDEO_VPSS_SYSTEM
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
help
Enables VPBE modules used for display on a DM644x
SoC.
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index ce0e4131c067..f263cabade7a 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -965,7 +965,7 @@ static struct ccdc_hw_device ccdc_hw_dev = {
},
};
-static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
+static int dm355_ccdc_probe(struct platform_device *pdev)
{
void (*setup_pinmux)(void);
struct resource *res;
@@ -1003,7 +1003,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1014,7 +1014,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1034,8 +1034,10 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1050,6 +1052,8 @@ static int dm355_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
@@ -1065,7 +1069,7 @@ static struct platform_driver dm355_ccdc_driver = {
.name = "dm355_ccdc",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(dm355_ccdc_remove),
+ .remove = dm355_ccdc_remove,
.probe = dm355_ccdc_probe,
};
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index ee7942b1996e..318e80512998 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -957,7 +957,7 @@ static struct ccdc_hw_device ccdc_hw_dev = {
},
};
-static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
+static int dm644x_ccdc_probe(struct platform_device *pdev)
{
struct resource *res;
int status = 0;
@@ -994,7 +994,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1005,7 +1005,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1013,8 +1013,10 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1029,6 +1031,8 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
@@ -1046,8 +1050,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
/* Disable CCDC */
ccdc_enable(0);
/* Disable both master and slave clock */
- clk_disable(ccdc_cfg.mclk);
- clk_disable(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
return 0;
}
@@ -1055,8 +1059,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
static int dm644x_ccdc_resume(struct device *dev)
{
/* Enable both master and slave clock */
- clk_enable(ccdc_cfg.mclk);
- clk_enable(ccdc_cfg.sclk);
+ clk_prepare_enable(ccdc_cfg.mclk);
+ clk_prepare_enable(ccdc_cfg.sclk);
/* Restore CCDC context */
ccdc_restore_context();
@@ -1074,7 +1078,7 @@ static struct platform_driver dm644x_ccdc_driver = {
.owner = THIS_MODULE,
.pm = &dm644x_ccdc_pm_ops,
},
- .remove = __devexit_p(dm644x_ccdc_remove),
+ .remove = dm644x_ccdc_remove,
.probe = dm644x_ccdc_probe,
};
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index b99d5423e3a8..5050f9265f48 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1032,7 +1032,7 @@ static struct ccdc_hw_device isif_hw_dev = {
},
};
-static int __devinit isif_probe(struct platform_device *pdev)
+static int isif_probe(struct platform_device *pdev)
{
void (*setup_pinmux)(void);
struct resource *res;
@@ -1053,7 +1053,7 @@ static int __devinit isif_probe(struct platform_device *pdev)
status = PTR_ERR(isif_cfg.mclk);
goto fail_mclk;
}
- if (clk_enable(isif_cfg.mclk)) {
+ if (clk_prepare_enable(isif_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1125,6 +1125,7 @@ fail_nobase_res:
i--;
}
fail_mclk:
+ clk_disable_unprepare(isif_cfg.mclk);
clk_put(isif_cfg.mclk);
vpfe_unregister_ccdc_device(&isif_hw_dev);
return status;
@@ -1145,6 +1146,8 @@ static int isif_remove(struct platform_device *pdev)
i++;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
+ clk_disable_unprepare(isif_cfg.mclk);
+ clk_put(isif_cfg.mclk);
return 0;
}
@@ -1153,7 +1156,7 @@ static struct platform_driver isif_driver = {
.name = "isif",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(isif_remove),
+ .remove = isif_remove,
.probe = isif_probe,
};
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 69d7a58c92c3..841b91a3d255 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -612,7 +612,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
ret = PTR_ERR(vpbe_dev->dac_clk);
goto fail_mutex_unlock;
}
- if (clk_enable(vpbe_dev->dac_clk)) {
+ if (clk_prepare_enable(vpbe_dev->dac_clk)) {
ret = -ENODEV;
goto fail_mutex_unlock;
}
@@ -759,8 +759,10 @@ fail_kfree_encoders:
fail_dev_unregister:
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
fail_clk_put:
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
fail_mutex_unlock:
mutex_unlock(&vpbe_dev->lock);
return ret;
@@ -777,8 +779,10 @@ fail_mutex_unlock:
static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
{
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
kfree(vpbe_dev->amp);
kfree(vpbe_dev->encoders);
@@ -803,7 +807,7 @@ static struct vpbe_device_ops vpbe_dev_ops = {
.set_mode = vpbe_set_mode,
};
-static __devinit int vpbe_probe(struct platform_device *pdev)
+static int vpbe_probe(struct platform_device *pdev)
{
struct vpbe_device *vpbe_dev;
struct vpbe_config *cfg;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 161c77650e2f..e707a6f2325b 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -47,6 +47,9 @@ static int debug;
module_param(debug, int, 0644);
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer);
+
static int venc_is_second_field(struct vpbe_display *disp_dev)
{
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
@@ -73,10 +76,11 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
if (layer->cur_frm == layer->next_frm)
return;
ktime_get_ts(&timevalue);
- layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
- layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
- layer->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&layer->cur_frm->done);
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_sec =
+ timevalue.tv_sec;
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_usec =
+ timevalue.tv_nsec / NSEC_PER_USEC;
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
layer->cur_frm = layer->next_frm;
}
@@ -99,16 +103,14 @@ static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
* otherwise hold on current frame
* Get next from the buffer queue
*/
- layer->next_frm = list_entry(
- layer->dma_queue.next,
- struct videobuf_buffer,
- queue);
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
/* Remove that from the buffer queue */
- list_del(&layer->next_frm->queue);
+ list_del(&layer->next_frm->list);
spin_unlock(&disp_obj->dma_queue_lock);
/* Mark state of the frame to active */
- layer->next_frm->state = VIDEOBUF_ACTIVE;
- addr = videobuf_to_dma_contig(layer->next_frm);
+ layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
addr,
@@ -199,39 +201,29 @@ static irqreturn_t venc_isr(int irq, void *arg)
/*
* vpbe_buffer_prepare()
- * This is the callback function called from videobuf_qbuf() function
+ * This is the callback function called from vb2_qbuf() function
* the buffer is prepared and user space virtual address is converted into
* physical address
*/
-static int vpbe_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vpbe_buffer_prepare(struct vb2_buffer *vb)
{
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
unsigned long addr;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"vpbe_buffer_prepare\n");
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = layer->pix_fmt.width;
- vb->height = layer->pix_fmt.height;
- vb->size = layer->pix_fmt.sizeimage;
- vb->field = field;
-
- ret = videobuf_iolock(q, vb, NULL);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
- user address\n");
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- }
-
- addr = videobuf_to_dma_contig(vb);
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (q->streaming) {
if (!IS_ALIGNED(addr, 8)) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -240,7 +232,6 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
return -EINVAL;
}
}
- vb->state = VIDEOBUF_PREPARED;
}
return 0;
}
@@ -249,22 +240,26 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
* vpbe_buffer_setup()
* This function allocates memory for the buffers
*/
-static int vpbe_buffer_setup(struct videobuf_queue *q,
- unsigned int *count,
- unsigned int *size)
+static int
+vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
- *size = layer->pix_fmt.sizeimage;
-
/* Store number of buffers allocated in numbuffer member */
- if (*count < VPBE_DEFAULT_NUM_BUFS)
- *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+ if (*nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ *nplanes = 1;
+ sizes[0] = layer->pix_fmt.sizeimage;
+ alloc_ctxs[0] = layer->alloc_ctx;
return 0;
}
@@ -273,11 +268,12 @@ static int vpbe_buffer_setup(struct videobuf_queue *q,
* vpbe_buffer_queue()
* This function adds the buffer to DMA queue
*/
-static void vpbe_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buffer_queue(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
struct vpbe_layer *layer = fh->layer;
struct vpbe_display *disp = fh->disp_dev;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
@@ -288,39 +284,125 @@ static void vpbe_buffer_queue(struct videobuf_queue *q,
/* add the buffer to the DMA queue */
spin_lock_irqsave(&disp->dma_queue_lock, flags);
- list_add_tail(&vb->queue, &layer->dma_queue);
+ list_add_tail(&buf->list, &layer->dma_queue);
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
}
/*
- * vpbe_buffer_release()
- * This function is called from the videobuf layer to free memory allocated to
+ * vpbe_buf_cleanup()
+ * This function is called from the vb2 layer to free memory allocated to
* the buffers
*/
-static void vpbe_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buf_cleanup(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+ unsigned long flags;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buffer_release\n");
+ "vpbe_buf_cleanup\n");
- if (V4L2_MEMORY_USERPTR != layer->memory)
- videobuf_dma_contig_free(q, vb);
+ spin_lock_irqsave(&layer->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&layer->irqlock, flags);
+}
- vb->state = VIDEOBUF_NEEDS_INIT;
+static void vpbe_wait_prepare(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_unlock(&layer->opslock);
}
-static struct videobuf_queue_ops video_qops = {
- .buf_setup = vpbe_buffer_setup,
+static void vpbe_wait_finish(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_lock(&layer->opslock);
+}
+
+static int vpbe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&layer->dma_queue)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EINVAL;
+ }
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->list);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(fh->disp_dev, layer);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+ layer->layer_first_int = 1;
+
+ return ret;
+}
+
+static int vpbe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+
+ /* release all active buffers */
+ while (!list_empty(&layer->dma_queue)) {
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ list_del(&layer->next_frm->list);
+ vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpbe_buffer_queue_setup,
+ .wait_prepare = vpbe_wait_prepare,
+ .wait_finish = vpbe_wait_finish,
+ .buf_init = vpbe_buffer_init,
.buf_prepare = vpbe_buffer_prepare,
+ .start_streaming = vpbe_start_streaming,
+ .stop_streaming = vpbe_stop_streaming,
+ .buf_cleanup = vpbe_buf_cleanup,
.buf_queue = vpbe_buffer_queue,
- .buf_release = vpbe_buffer_release,
};
static
@@ -345,7 +427,7 @@ static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
unsigned long addr;
int ret;
- addr = videobuf_to_dma_contig(layer->cur_frm);
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
/* Set address in the display registers */
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
@@ -620,9 +702,12 @@ static int vpbe_display_querycap(struct file *file, void *priv,
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
cap->version = VPBE_DISPLAY_VERSION_CODE;
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
- strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->driver, sizeof(cap->driver), "%s",
+ dev_name(vpbe_dev->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpbe_dev->pdev));
strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
return 0;
@@ -1161,7 +1246,7 @@ static int vpbe_display_streamoff(struct file *file, void *priv,
osd_device->ops.disable_layer(osd_device,
layer->layer_info.id);
layer->started = 0;
- ret = videobuf_streamoff(&layer->buffer_queue);
+ ret = vb2_streamoff(&layer->buffer_queue, buf_type);
return ret;
}
@@ -1199,46 +1284,15 @@ static int vpbe_display_streamon(struct file *file, void *priv,
}
/*
- * Call videobuf_streamon to start streaming
+ * Call vb2_streamon to start streaming
* in videobuf
*/
- ret = videobuf_streamon(&layer->buffer_queue);
+ ret = vb2_streamon(&layer->buffer_queue, buf_type);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
- "error in videobuf_streamon\n");
+ "error in vb2_streamon\n");
return ret;
}
- /* If buffer queue is empty, return error */
- if (list_empty(&layer->dma_queue)) {
- v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- goto streamoff;
- }
- /* Get the next frame from the buffer queue */
- layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
- struct videobuf_buffer, queue);
- /* Remove buffer from the buffer queue */
- list_del(&layer->cur_frm->queue);
- /* Mark state of the current frame to active */
- layer->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- layer->field_id = 0;
-
- /* Set parameters in OSD and VENC */
- ret = vpbe_set_osd_display_params(disp_dev, layer);
- if (ret < 0)
- goto streamoff;
-
- /*
- * if request format is yuv420 semiplanar, need to
- * enable both video windows
- */
- layer->started = 1;
-
- layer->layer_first_int = 1;
-
- return ret;
-streamoff:
- ret = videobuf_streamoff(&layer->buffer_queue);
return ret;
}
@@ -1265,10 +1319,10 @@ static int vpbe_display_dqbuf(struct file *file, void *priv,
}
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
else
/* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
return ret;
}
@@ -1295,7 +1349,7 @@ static int vpbe_display_qbuf(struct file *file, void *priv,
return -EACCES;
}
- return videobuf_qbuf(&layer->buffer_queue, p);
+ return vb2_qbuf(&layer->buffer_queue, p);
}
static int vpbe_display_querybuf(struct file *file, void *priv,
@@ -1304,7 +1358,6 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_QUERYBUF, layer id = %d\n",
@@ -1314,11 +1367,8 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
return -EINVAL;
}
-
- /* Call videobuf_querybuf to get information */
- ret = videobuf_querybuf(&layer->buffer_queue, buf);
-
- return ret;
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&layer->buffer_queue, buf);
}
static int vpbe_display_reqbufs(struct file *file, void *priv,
@@ -1327,8 +1377,8 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vb2_queue *q;
int ret;
-
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
@@ -1342,15 +1392,26 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
return -EBUSY;
}
/* Initialize videobuf queue as per the buffer type */
- videobuf_queue_dma_contig_init(&layer->buffer_queue,
- &video_qops,
- vpbe_dev->pdev,
- &layer->irqlock,
- V4L2_BUF_TYPE_VIDEO_OUTPUT,
- layer->pix_fmt.field,
- sizeof(struct videobuf_buffer),
- fh, NULL);
+ layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
+ if (!layer->alloc_ctx) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
+ return -EINVAL;
+ }
+ q = &layer->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
+ return ret;
+ }
/* Set io allowed member of file handle to TRUE */
fh->io_allowed = 1;
/* Increment io usrs member of layer object to 1 */
@@ -1360,9 +1421,7 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
/* Initialize buffer queue */
INIT_LIST_HEAD(&layer->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
-
- return ret;
+ return vb2_reqbufs(q, req_buf);
}
/*
@@ -1381,7 +1440,7 @@ static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&layer->opslock))
return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&layer->buffer_queue, vma);
+ ret = vb2_mmap(&layer->buffer_queue, vma);
mutex_unlock(&layer->opslock);
return ret;
}
@@ -1398,7 +1457,7 @@ static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
if (layer->started) {
mutex_lock(&layer->opslock);
- err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+ err = vb2_poll(&layer->buffer_queue, filep, wait);
mutex_unlock(&layer->opslock);
}
return err;
@@ -1488,8 +1547,8 @@ static int vpbe_display_release(struct file *file)
layer->layer_info.id);
layer->started = 0;
/* Free buffers allocated */
- videobuf_queue_cancel(&layer->buffer_queue);
- videobuf_mmap_free(&layer->buffer_queue);
+ vb2_queue_release(&layer->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
}
/* Decrement layer usrs counter */
@@ -1603,8 +1662,8 @@ static int vpbe_device_get(struct device *dev, void *data)
return 0;
}
-static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
- struct platform_device *pdev)
+static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
{
struct vpbe_layer *vpbe_display_layer = NULL;
struct video_device *vbd = NULL;
@@ -1659,9 +1718,10 @@ static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
return 0;
}
-static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
- struct vpbe_display *disp_dev,
- struct platform_device *pdev) {
+static int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
int err;
v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
@@ -1693,7 +1753,7 @@ static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
* This function creates device entries by register itself to the V4L2 driver
* and initializes fields of each layer objects
*/
-static __devinit int vpbe_display_probe(struct platform_device *pdev)
+static int vpbe_display_probe(struct platform_device *pdev)
{
struct vpbe_layer *vpbe_display_layer;
struct vpbe_display *disp_dev;
@@ -1827,7 +1887,7 @@ static struct platform_driver vpbe_display_driver = {
.bus = &platform_bus_type,
},
.probe = vpbe_display_probe,
- .remove = __devexit_p(vpbe_display_remove),
+ .remove = vpbe_display_remove,
};
module_platform_driver(vpbe_display_driver);
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index bba299dbf396..707f243f810d 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -62,7 +62,7 @@ static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) | mask;
writel(val, addr);
@@ -74,7 +74,7 @@ static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) & ~mask;
writel(val, addr);
@@ -87,7 +87,7 @@ static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 new_val = (readl(addr) & ~mask) | (val & mask);
writel(new_val, addr);
@@ -1559,8 +1559,7 @@ static int osd_probe(struct platform_device *pdev)
ret = -ENODEV;
goto free_mem;
}
- osd->osd_base = (unsigned long)ioremap_nocache(res->start,
- osd->osd_size);
+ osd->osd_base = ioremap_nocache(res->start, osd->osd_size);
if (!osd->osd_base) {
dev_err(osd->dev, "Unable to map the OSD region\n");
ret = -ENODEV;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 8be492cd8ed4..be9d3e1b4868 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1831,7 +1831,7 @@ static struct vpfe_device *vpfe_initialize(void)
* itself to the V4L2 driver and initializes fields of each
* device objects
*/
-static __devinit int vpfe_probe(struct platform_device *pdev)
+static int vpfe_probe(struct platform_device *pdev)
{
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *vpfe_cfg;
@@ -2038,7 +2038,7 @@ probe_free_dev_mem:
/*
* vpfe_remove : It un-register device from V4L2 driver
*/
-static int __devexit vpfe_remove(struct platform_device *pdev)
+static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
@@ -2075,7 +2075,7 @@ static struct platform_driver vpfe_driver = {
.pm = &vpfe_dev_pm_ops,
},
.probe = vpfe_probe,
- .remove = __devexit_p(vpfe_remove),
+ .remove = vpfe_remove,
};
module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index cff3c0ab501f..28638a86f129 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -419,7 +419,7 @@ int vpif_channel_getfid(u8 channel_id)
}
EXPORT_SYMBOL(vpif_channel_getfid);
-static int __devinit vpif_probe(struct platform_device *pdev)
+static int vpif_probe(struct platform_device *pdev)
{
int status = 0;
@@ -444,7 +444,7 @@ static int __devinit vpif_probe(struct platform_device *pdev)
status = PTR_ERR(vpif_clk);
goto clk_fail;
}
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
@@ -457,10 +457,10 @@ fail:
return status;
}
-static int __devexit vpif_remove(struct platform_device *pdev)
+static int vpif_remove(struct platform_device *pdev)
{
if (vpif_clk) {
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
clk_put(vpif_clk);
}
@@ -472,13 +472,13 @@ static int __devexit vpif_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int vpif_suspend(struct device *dev)
{
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
return 0;
}
static int vpif_resume(struct device *dev)
{
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
return 0;
}
@@ -498,7 +498,7 @@ static struct platform_driver vpif_driver = {
.owner = THIS_MODULE,
.pm = vpif_pm_ops,
},
- .remove = __devexit_p(vpif_remove),
+ .remove = vpif_remove,
.probe = vpif_probe,
};
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index fcabc023885d..a409ccefb380 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -201,13 +201,16 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_cap_buffer *buf = container_of(vb,
struct vpif_cap_buffer, vb);
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
vpif_dbg(2, debug, "vpif_buffer_queue\n");
+ spin_lock_irqsave(&common->irqlock, flags);
/* add the buffer to the DMA queue */
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/**
@@ -278,10 +281,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
- /* If buffer queue is empty, return error */
+ /* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_dbg(1, debug, "buffer queue is empty\n");
return -EIO;
}
@@ -291,6 +297,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_cap_buffer, list);
/* Remove buffer from the buffer queue */
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
/* Initialize field_id and started member */
@@ -362,6 +369,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -369,12 +377,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -420,10 +430,12 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
@@ -468,8 +480,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
/* Check the field format */
if (1 == ch->vpifparams.std_info.frm_fmt) {
/* Progressive mode */
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
if (!channel_first_int[i][channel_id])
vpif_process_buffer_complete(common);
@@ -513,9 +529,13 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
vpif_process_buffer_complete(common);
} else if (1 == fid) {
/* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue) ||
- (common->cur_frm != common->next_frm))
+ (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
vpif_schedule_next_buffer(common);
}
@@ -1004,9 +1024,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1715,7 +1735,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
@@ -1735,7 +1755,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b716fbd4241f..9f2b603be9c9 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -177,11 +177,14 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_disp_buffer, vb);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
/* add the buffer to the DMA queue */
+ spin_lock_irqsave(&common->irqlock, flags);
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/*
@@ -246,10 +249,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
/* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_err("buffer queue is empty\n");
return -EIO;
}
@@ -260,6 +266,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_disp_buffer, list);
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -330,6 +337,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -337,12 +345,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -363,11 +373,13 @@ static void process_progressive_mode(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
/* Get the next buffer from buffer queue */
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
/* Mark status of the buffer as active */
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -398,16 +410,18 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
common->cur_frm = common->next_frm;
} else if (1 == fid) { /* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue)
|| (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
return;
}
+ spin_unlock(&common->irqlock);
/* one field is displayed configure the next
* frame if it is available else hold on current
* frame */
/* Get next from the buffer queue */
process_progressive_mode(common);
-
}
}
@@ -437,8 +451,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
continue;
if (1 == ch->vpifparams.std_info.frm_fmt) {
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
/* Progressive mode */
if (!channel_first_int[i][channel_id]) {
@@ -972,9 +990,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
}
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
@@ -1380,7 +1398,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 146e4b01ac17..684e815a81b6 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -25,7 +25,6 @@
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <media/davinci/vpss.h>
MODULE_LICENSE("GPL");
@@ -357,7 +356,7 @@ void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
}
EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
-static int __devinit vpss_probe(struct platform_device *pdev)
+static int vpss_probe(struct platform_device *pdev)
{
struct resource *r1, *r2;
char *platform_name;
@@ -445,7 +444,7 @@ fail1:
return status;
}
-static int __devexit vpss_remove(struct platform_device *pdev)
+static int vpss_remove(struct platform_device *pdev)
{
struct resource *res;
@@ -465,7 +464,7 @@ static struct platform_driver vpss_driver = {
.name = "vpss",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(vpss_remove),
+ .remove = vpss_remove,
.probe = vpss_probe,
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 19cbb12a12a2..c1a07133cc56 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -982,7 +982,7 @@ static void *gsc_get_drv_data(struct platform_device *pdev)
match = of_match_node(of_match_ptr(exynos_gsc_match),
pdev->dev.of_node);
if (match)
- driver_data = match->data;
+ driver_data = (struct gsc_driverdata *)match->data;
} else {
driver_data = (struct gsc_driverdata *)
platform_get_device_id(pdev)->driver_data;
@@ -1098,11 +1098,9 @@ static int gsc_probe(struct platform_device *pdev)
mutex_init(&gsc->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gsc->regs = devm_request_and_ioremap(dev, res);
- if (!gsc->regs) {
- dev_err(dev, "failed to map registers\n");
- return -ENOENT;
- }
+ gsc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gsc->regs))
+ return PTR_ERR(gsc->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -1151,7 +1149,7 @@ err_clk:
return ret;
}
-static int __devexit gsc_remove(struct platform_device *pdev)
+static int gsc_remove(struct platform_device *pdev)
{
struct gsc_dev *gsc = platform_get_drvdata(pdev);
@@ -1237,7 +1235,7 @@ static const struct dev_pm_ops gsc_pm_ops = {
static struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = __devexit_p(gsc_remove),
+ .remove = gsc_remove,
.id_table = gsc_driver_ids,
.driver = {
.name = GSC_MODULE_NAME,
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index c065d040ed94..c267c57c76fd 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -122,7 +122,7 @@ static void gsc_m2m_device_run(void *priv)
struct gsc_ctx *ctx = priv;
struct gsc_dev *gsc;
unsigned long flags;
- u32 ret;
+ int ret;
bool is_set = false;
if (WARN(!ctx, "null hardware context\n"))
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 31ac4dc69247..9115a2c8d075 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -352,8 +352,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
return 0;
buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
if (prev == NULL) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
dprintk(1, "Restarting video dma\n");
viu_stop_dma(vidq->dev);
@@ -367,8 +366,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
buf->vb.state = VIDEOBUF_ACTIVE;
dprintk(2, "[%p/%d] restart_queue - move to active\n",
buf, buf->vb.i);
@@ -1480,7 +1478,7 @@ static struct video_device viu_template = {
.current_norm = V4L2_STD_NTSC_M,
};
-static int __devinit viu_of_probe(struct platform_device *op)
+static int viu_of_probe(struct platform_device *op)
{
struct viu_dev *viu_dev;
struct video_device *vdev;
@@ -1617,7 +1615,7 @@ err:
return ret;
}
-static int __devexit viu_of_remove(struct platform_device *op)
+static int viu_of_remove(struct platform_device *op)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
@@ -1670,7 +1668,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
static struct platform_driver viu_of_platform_driver = {
.probe = viu_of_probe,
- .remove = __devexit_p(viu_of_remove),
+ .remove = viu_of_remove,
#ifdef CONFIG_PM
.suspend = viu_suspend,
.resume = viu_resume,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 45164c4f8452..05c560f2ef06 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -218,15 +218,14 @@ static void dma_callback(void *data)
static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
int do_callback)
{
- struct deinterlace_q_data *s_q_data, *d_q_data;
+ struct deinterlace_q_data *s_q_data;
struct vb2_buffer *src_buf, *dst_buf;
struct deinterlace_dev *pcdev = ctx->dev;
struct dma_chan *chan = pcdev->dma_chan;
struct dma_device *dmadev = chan->device;
struct dma_async_tx_descriptor *tx;
unsigned int s_width, s_height;
- unsigned int d_width, d_height;
- unsigned int d_size, s_size;
+ unsigned int s_size;
dma_addr_t p_in, p_out;
enum dma_ctrl_flags flags;
@@ -238,11 +237,6 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
s_height = s_q_data->height;
s_size = s_width * s_height;
- d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
- d_width = d_q_data->width;
- d_height = d_q_data->height;
- d_size = d_width * d_height;
-
p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
if (!p_in || !p_out) {
@@ -1108,17 +1102,5 @@ static struct platform_driver deinterlace_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit deinterlace_exit(void)
-{
- platform_driver_unregister(&deinterlace_pdrv);
-}
-
-static int __init deinterlace_init(void)
-{
- return platform_driver_register(&deinterlace_pdrv);
-}
-
-module_init(deinterlace_init);
-module_exit(deinterlace_exit);
+module_platform_driver(deinterlace_pdrv);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 2e2121e98133..7487d7208dea 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -839,7 +839,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &m2mtest_qops;
@@ -850,7 +850,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &m2mtest_qops;
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index bfa65079fc38..4b9e0a28616a 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -941,9 +941,9 @@ static int emmaprp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev);
- pcdev->base_emma = devm_request_and_ioremap(&pdev->dev, res_emma);
- if (!pcdev->base_emma) {
- ret = -ENXIO;
+ pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res_emma);
+ if (IS_ERR(pcdev->base_emma)) {
+ ret = PTR_ERR(pcdev->base_emma);
goto rel_vdev;
}
@@ -1013,16 +1013,4 @@ static struct platform_driver emmaprp_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit emmaprp_exit(void)
-{
- platform_driver_unregister(&emmaprp_pdrv);
-}
-
-static int __init emmaprp_init(void)
-{
- return platform_driver_register(&emmaprp_pdrv);
-}
-
-module_init(emmaprp_init);
-module_exit(emmaprp_exit);
+module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 993504015963..35cc526e6c93 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -44,8 +44,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <plat/cpu.h>
-#include <linux/omap-dma.h>
#include <video/omapvrfb.h>
#include <video/omapdss.h>
@@ -1174,13 +1172,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* set default crop and win */
omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
- /* Save the changes in the overlay strcuture */
- ret = omapvid_init(vout, 0);
- if (ret) {
- v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
- goto s_fmt_vid_out_exit;
- }
-
ret = 0;
s_fmt_vid_out_exit:
@@ -1684,20 +1675,6 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
omap_dispc_register_isr(omap_vout_isr, vout, mask);
- for (j = 0; j < ovid->num_overlays; j++) {
- struct omap_overlay *ovl = ovid->overlays[j];
-
- if (ovl->get_device(ovl)) {
- struct omap_overlay_info info;
- ovl->get_overlay_info(ovl, &info);
- info.paddr = addr;
- if (ovl->set_overlay_info(ovl, &info)) {
- ret = -EINVAL;
- goto streamon_err1;
- }
- }
- }
-
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
@@ -2064,7 +2041,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.id = k + 1;
/* Set VRFB as rotation_type for omap2 and omap3 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
vout->vid_info.rotation_type = VOUT_ROT_VRFB;
/* Setup the default configuration for the video devices
@@ -2094,11 +2071,12 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
}
video_set_drvdata(vfd, vout);
- /* Configure the overlay structure */
- ret = omapvid_init(vid_dev->vouts[k], 0);
- if (!ret)
- goto success;
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ continue;
error2:
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
omap_vout_release_vrfb(vout);
@@ -2108,12 +2086,6 @@ error1:
error:
kfree(vout);
return ret;
-
-success:
- dev_info(&pdev->dev, ": registered and initialized"
- " video device %d\n", vfd->minor);
- if (k == (pdev->num_resources - 1))
- return 0;
}
return -ENODEV;
@@ -2186,14 +2158,23 @@ static int __init omap_vout_probe(struct platform_device *pdev)
struct omap_dss_device *def_display;
struct omap2video_device *vid_dev = NULL;
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dss\n");
+ return ret;
+ }
+
if (pdev->num_resources == 0) {
dev_err(&pdev->dev, "probed for an unknown device\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_dss_init;
}
vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
- if (vid_dev == NULL)
- return -ENOMEM;
+ if (vid_dev == NULL) {
+ ret = -ENOMEM;
+ goto err_dss_init;
+ }
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
@@ -2288,6 +2269,8 @@ probe_err1:
}
probe_err0:
kfree(vid_dev);
+err_dss_init:
+ omapdss_compat_uninit();
return ret;
}
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 115408b9274f..80b0d88f125c 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
-#include <plat/cpu.h>
+#include <video/omapdss.h>
#include "omap_voutlib.h"
@@ -124,7 +124,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
win->chromakey = new_win->chromakey;
/* Adjust the cropping window to allow for resizing limitation */
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
/* For 24xx limit is 8x to 1/2x scaling. */
if ((crop->height/win->w.height) >= 2)
crop->height = win->w.height * 2;
@@ -140,7 +140,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
if (crop->height != win->w.height)
crop->width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
/* For 34xx limit is 8x to 1/4x scaling. */
if ((crop->height/win->w.height) >= 4)
crop->height = win->w.height * 4;
@@ -196,7 +196,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width <= 0 || try_crop.height <= 0)
return -EINVAL;
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
@@ -207,9 +207,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* vertical resizing */
vresize = (1024 * try_crop.height) / win->w.height;
- if (cpu_is_omap24xx() && (vresize > 2048))
+ if (omap_vout_dss_omap24xx() && (vresize > 2048))
vresize = 2048;
- else if (cpu_is_omap34xx() && (vresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (vresize > 4096))
vresize = 4096;
win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
@@ -226,9 +226,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* horizontal resizing */
hresize = (1024 * try_crop.width) / win->w.width;
- if (cpu_is_omap24xx() && (hresize > 2048))
+ if (omap_vout_dss_omap24xx() && (hresize > 2048))
hresize = 2048;
- else if (cpu_is_omap34xx() && (hresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (hresize > 4096))
hresize = 4096;
win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
@@ -243,7 +243,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width == 0)
try_crop.width = 2;
}
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if ((try_crop.height/win->w.height) >= 2)
try_crop.height = win->w.height * 2;
@@ -258,7 +258,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.height != win->w.height)
try_crop.width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
if ((try_crop.height/win->w.height) >= 4)
try_crop.height = win->w.height * 4;
@@ -337,3 +337,21 @@ void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
}
free_pages((unsigned long) virtaddr, order);
}
+
+bool omap_vout_dss_omap24xx(void)
+{
+ return omapdss_get_version() == OMAPDSS_VER_OMAP24xx;
+}
+
+bool omap_vout_dss_omap34xx(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
index e51750a597e3..f9d1c0779f33 100644
--- a/drivers/media/platform/omap/omap_voutlib.h
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -32,5 +32,8 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_window *win);
unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+
+bool omap_vout_dss_omap24xx(void);
+bool omap_vout_dss_omap34xx(void);
#endif /* #ifndef OMAP_VOUTLIB_H */
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/media/platform/omap24xxcam.c
index 70f45c381318..8b7ccea982e7 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/media/platform/omap24xxcam.c
@@ -1736,7 +1736,7 @@ static struct v4l2_int_device omap24xxcam = {
*
*/
-static int __devinit omap24xxcam_probe(struct platform_device *pdev)
+static int omap24xxcam_probe(struct platform_device *pdev)
{
struct omap24xxcam_device *cam;
struct resource *mem;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 7f182f0ff3da..e4aaee91201d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -71,8 +71,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
-#include <plat/cpu.h>
-
#include "isp.h"
#include "ispreg.h"
#include "ispccdc.h"
@@ -103,7 +101,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_RESZ |
1 << OMAP3_ISP_IOMEM_SBL |
1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY2,
+ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+ 1 << OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
},
{
.isp_rev = ISP_REVISION_15_0,
@@ -120,7 +119,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
1 << OMAP3_ISP_IOMEM_CSIPHY1 |
- 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2 |
+ 1 << OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
},
};
@@ -1331,7 +1331,8 @@ void omap3isp_subclk_disable(struct isp_device *isp,
* isp_enable_clocks - Enable ISP clocks
* @isp: OMAP3 ISP device
*
- * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ * Return 0 if successful, or clk_prepare_enable return value if any of them
+ * fails.
*/
static int isp_enable_clocks(struct isp_device *isp)
{
@@ -1348,14 +1349,11 @@ static int isp_enable_clocks(struct isp_device *isp)
* has to be twice of what is set on OMAP3430 to get
* the required value for cam_mclk
*/
- if (cpu_is_omap3630())
- divisor = 1;
- else
- divisor = 2;
+ divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
- r = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_ick failed\n");
+ dev_err(isp->dev, "failed to enable cam_ick clock\n");
goto out_clk_enable_ick;
}
r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
@@ -1364,9 +1362,9 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
goto out_clk_enable_mclk;
}
- r = clk_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_mclk failed\n");
+ dev_err(isp->dev, "failed to enable cam_mclk clock\n");
goto out_clk_enable_mclk;
}
rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1374,17 +1372,17 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
" expected : %d\n"
" actual : %ld\n", CM_CAM_MCLK_HZ, rate);
- r = clk_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
if (r) {
- dev_err(isp->dev, "clk_enable csi2_fck failed\n");
+ dev_err(isp->dev, "failed to enable csi2_fck clock\n");
goto out_clk_enable_csi2_fclk;
}
return 0;
out_clk_enable_csi2_fclk:
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
out_clk_enable_mclk:
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
out_clk_enable_ick:
return r;
}
@@ -1395,9 +1393,9 @@ out_clk_enable_ick:
*/
static void isp_disable_clocks(struct isp_device *isp)
{
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
- clk_disable(isp->clock[ISP_CLK_CSI2_FCK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
}
static const char *isp_clocks[] = {
@@ -1678,7 +1676,7 @@ isp_register_subdev_group(struct isp_device *isp,
adapter = i2c_get_adapter(board_info->i2c_adapter_id);
if (adapter == NULL) {
- printk(KERN_ERR "%s: Unable to get I2C adapter %d for "
+ dev_err(isp->dev, "%s: Unable to get I2C adapter %d for "
"device %s\n", __func__,
board_info->i2c_adapter_id,
board_info->board_info->type);
@@ -1688,7 +1686,7 @@ isp_register_subdev_group(struct isp_device *isp,
subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
board_info->board_info, NULL);
if (subdev == NULL) {
- printk(KERN_ERR "%s: Unable to register subdev %s\n",
+ dev_err(isp->dev, "%s: Unable to register subdev %s\n",
__func__, board_info->board_info->type);
continue;
}
@@ -1713,7 +1711,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->media_dev.link_notify = isp_pipeline_link_notify;
ret = media_device_register(&isp->media_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: Media device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: Media device registration failed (%d)\n",
__func__, ret);
return ret;
}
@@ -1721,7 +1719,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->v4l2_dev.mdev = &isp->media_dev;
ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: V4L2 device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
__func__, ret);
goto done;
}
@@ -1766,6 +1764,7 @@ static int isp_register_entities(struct isp_device *isp)
struct media_entity *input;
unsigned int flags;
unsigned int pad;
+ unsigned int i;
sensor = isp_register_subdev_group(isp, subdevs->subdevs);
if (sensor == NULL)
@@ -1807,13 +1806,25 @@ static int isp_register_entities(struct isp_device *isp)
break;
default:
- printk(KERN_ERR "%s: invalid interface type %u\n",
- __func__, subdevs->interface);
+ dev_err(isp->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
ret = -EINVAL;
goto done;
}
- ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+ for (i = 0; i < sensor->entity.num_pads; i++) {
+ if (sensor->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->entity.num_pads) {
+ dev_err(isp->dev,
+ "%s: no source pad in external entity\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, i, input, pad,
flags);
if (ret < 0)
goto done;
@@ -1979,7 +1990,7 @@ error_csiphy:
*
* Always returns 0.
*/
-static int __devexit isp_remove(struct platform_device *pdev)
+static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
int i;
@@ -2060,7 +2071,7 @@ static int isp_map_mem_resource(struct platform_device *pdev,
* -EINVAL if couldn't install ISR,
* or clk_get return error value.
*/
-static int __devinit isp_probe(struct platform_device *pdev)
+static int isp_probe(struct platform_device *pdev)
{
struct isp_platform_data *pdata = pdev->dev.platform_data;
struct isp_device *isp;
@@ -2096,7 +2107,11 @@ static int __devinit isp_probe(struct platform_device *pdev)
isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
- /* Clocks */
+ /* Clocks
+ *
+ * The ISP clock tree is revision-dependent. We thus need to enable ICLK
+ * manually to read the revision before calling __omap3isp_get().
+ */
ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
if (ret < 0)
goto error;
@@ -2105,6 +2120,16 @@ static int __devinit isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
+ ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (ret < 0)
+ goto error;
+
+ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ dev_info(isp->dev, "Revision %d.%d found\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+
if (__omap3isp_get(isp, false) == NULL) {
ret = -ENODEV;
goto error;
@@ -2115,10 +2140,6 @@ static int __devinit isp_probe(struct platform_device *pdev)
goto error_isp;
/* Memory resources */
- isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
- dev_info(isp->dev, "Revision %d.%d found\n",
- (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
-
for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
if (isp->revision == isp_res_maps[m].isp_rev)
break;
@@ -2229,7 +2250,7 @@ MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
- .remove = __devexit_p(isp_remove),
+ .remove = isp_remove,
.id_table = omap3isp_id_table,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 8d6866942b85..517d348ce32b 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -70,6 +70,8 @@ enum isp_mem_resources {
OMAP3_ISP_IOMEM_CSI2C_REGS1,
OMAP3_ISP_IOMEM_CSIPHY1,
OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
OMAP3_ISP_IOMEM_LAST
};
@@ -125,9 +127,6 @@ struct isp_reg {
struct isp_platform_callback {
u32 (*set_xclk)(struct isp_device *isp, u32 xclk, u8 xclksel);
- int (*csiphy_config)(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes);
};
/*
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6a3ff792af7d..783f4b05b153 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -517,7 +517,7 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (soft_reset_retries < 5);
if (soft_reset_retries == 5) {
- printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n");
+ dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n");
return -EBUSY;
}
@@ -535,8 +535,8 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (--i > 0);
if (i == 0) {
- printk(KERN_ERR
- "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ dev_err(isp->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
return -EBUSY;
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 348f67ebbbc9..3d56b33f85e8 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -32,34 +32,92 @@
#include "ispreg.h"
#include "ispcsiphy.h"
-/*
- * csiphy_lanes_config - Configuration of CSIPHY lanes.
- *
- * Updates HW configuration.
- * Called with phy->mutex taken.
- */
-static void csiphy_lanes_config(struct isp_csiphy *phy)
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
+ bool ccp2_strobe)
{
- unsigned int i;
- u32 reg;
+ u32 reg = isp_reg_readl(
+ phy->isp, OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ u32 shift, mode;
+
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2C_PHY1:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ case ISP_INTERFACE_CCP2B_PHY2:
+ reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2A_PHY2:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ }
- reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ /* Select data/clock or data/strobe mode for CCP2 */
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ if (ccp2_strobe)
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
+ else
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK;
+ }
- for (i = 0; i < phy->num_data_lanes; i++) {
- reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
- ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
- reg |= (phy->lanes.data[i].pol <<
- ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
- reg |= (phy->lanes.data[i].pos <<
- ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
+ reg |= mode << shift;
+
+ isp_reg_writel(phy->isp, reg,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+}
+
+static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ
+ | OMAP343X_CONTROL_CSIRXFE_RESET;
+
+ /* Only the CCP2B on PHY1 is configurable. */
+ if (iface != ISP_INTERFACE_CCP2B_PHY1)
+ return;
+
+ if (!on) {
+ isp_reg_writel(phy->isp, 0,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ return;
}
- reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
- ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
- reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
- reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+ if (ccp2_strobe)
+ csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
- isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ isp_reg_writel(phy->isp, csirxfe,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+}
+
+/*
+ * Configure OMAP 3 CSI PHY routing.
+ * @phy: relevant phy device
+ * @iface: ISP_INTERFACE_*
+ * @on: power on or off
+ * @ccp2_strobe: false: data/clock, true: data/strobe
+ *
+ * Note that the underlying routing configuration registers are part of the
+ * control (SCM) register space and part of the CORE power domain on both 3430
+ * and 3630, so they will not hold their contents in off-mode. This isn't an
+ * issue since the MPU power domain is forced on whilst the ISP is in use.
+ */
+static void csiphy_routing_cfg(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
+ && on)
+ return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE])
+ return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
}
/*
@@ -99,7 +157,7 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
} while ((reg != power >> 2) && (retry_count < 100));
if (retry_count == 100) {
- printk(KERN_ERR "CSI2 CIO set power failed!\n");
+ dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n");
return -EBUSY;
}
@@ -107,43 +165,28 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
}
/*
- * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
- *
- * Called with phy->mutex taken.
+ * TCLK values are OK at their reset values
*/
-static void csiphy_dphy_config(struct isp_csiphy *phy)
-{
- u32 reg;
-
- /* Set up ISPCSIPHY_REG0 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
-
- reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
- ISPCSIPHY_REG0_THS_SETTLE_MASK);
- reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT;
- reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
-
- /* Set up ISPCSIPHY_REG1 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
-
- reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
- ISPCSIPHY_REG1_TCLK_MISS_MASK |
- ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
- reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
- reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
- reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
-}
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
-static int csiphy_config(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes)
+static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct isp_csiphy_lanes_cfg *lanes;
+ int csi2_ddrclk_khz;
unsigned int used_lanes = 0;
unsigned int i;
+ u32 reg;
+
+ if (subdevs->interface == ISP_INTERFACE_CCP2B_PHY1
+ || subdevs->interface == ISP_INTERFACE_CCP2B_PHY2)
+ lanes = &subdevs->bus.ccp2.lanecfg;
+ else
+ lanes = &subdevs->bus.csi2.lanecfg;
/* Clock and data lanes verification */
for (i = 0; i < phy->num_data_lanes; i++) {
@@ -162,10 +205,61 @@ static int csiphy_config(struct isp_csiphy *phy,
if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
return -EINVAL;
- mutex_lock(&phy->mutex);
- phy->dphy = *dphy;
- phy->lanes = *lanes;
- mutex_unlock(&phy->mutex);
+ /*
+ * The PHY configuration is lost in off mode, that's not an
+ * issue since the MPU power domain is forced on whilst the
+ * ISP is in use.
+ */
+ csiphy_routing_cfg(phy, subdevs->interface, true,
+ subdevs->bus.ccp2.phy_layer);
+
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * hweight32(used_lanes)) * pipe->external_width;
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+ ISPCSIPHY_REG0_THS_SETTLE_MASK);
+ /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */
+ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1)
+ << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+ /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */
+ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
+ << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+ ISPCSIPHY_REG1_TCLK_MISS_MASK |
+ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+ reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+ reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+ reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+
+ /* DPHY lane configuration */
+ reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ for (i = 0; i < phy->num_data_lanes; i++) {
+ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (lanes->data[i].pol <<
+ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+ reg |= (lanes->data[i].pos <<
+ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+ reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+ reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
return 0;
}
@@ -190,8 +284,9 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
- csiphy_dphy_config(phy);
- csiphy_lanes_config(phy);
+ rval = omap3isp_csiphy_config(phy);
+ if (rval < 0)
+ goto done;
rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
if (rval) {
@@ -211,6 +306,14 @@ void omap3isp_csiphy_release(struct isp_csiphy *phy)
{
mutex_lock(&phy->mutex);
if (phy->phy_in_use) {
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs =
+ pipe->external->host_priv;
+
+ csiphy_routing_cfg(phy, subdevs->interface, false,
+ subdevs->bus.ccp2.phy_layer);
csiphy_power_autoswitch_enable(phy, false);
csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
regulator_disable(phy->vdd);
@@ -227,8 +330,6 @@ int omap3isp_csiphy_init(struct isp_device *isp)
struct isp_csiphy *phy1 = &isp->isp_csiphy1;
struct isp_csiphy *phy2 = &isp->isp_csiphy2;
- isp->platform_cb.csiphy_config = csiphy_config;
-
phy2->isp = isp;
phy2->csi2 = &isp->isp_csi2a;
phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index e93a661e65d9..14551fd77697 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -32,14 +32,6 @@
struct isp_csi2_device;
struct regulator;
-struct isp_csiphy_dphy_cfg {
- u8 ths_term;
- u8 ths_settle;
- u8 tclk_term;
- unsigned tclk_miss:1;
- u8 tclk_settle;
-};
-
struct isp_csiphy {
struct isp_device *isp;
struct mutex mutex; /* serialize csiphy configuration */
@@ -52,8 +44,6 @@ struct isp_csiphy {
unsigned int phy_regs;
u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
- struct isp_csiphy_lanes_cfg lanes;
- struct isp_csiphy_dphy_cfg dphy;
};
int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index e7f9c4292cc6..2d759c56f37c 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -74,11 +74,14 @@ static void hist_reset_mem(struct ispstat *hist)
static void hist_dma_config(struct ispstat *hist)
{
+ struct isp_device *isp = hist->isp;
+
hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
hist->dma_config.frame_count = 1;
hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
- hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA;
+ hist->dma_config.src_start = isp->mmio_base_phys[OMAP3_ISP_IOMEM_HIST]
+ + ISPHIST_DATA;
hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
}
@@ -479,6 +482,8 @@ int omap3isp_hist_init(struct isp_device *isp)
return -ENOMEM;
memset(hist, 0, sizeof(*hist));
+ hist->isp = isp;
+
if (HIST_CONFIG_DMA)
ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
hist_dma_cb, hist, &hist->dma_ch);
@@ -496,7 +501,6 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->ops = &hist_ops;
hist->priv = hist_cfg;
hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
- hist->isp = isp;
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 1ae1c0909ed1..691b92a3c3e7 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -200,10 +200,10 @@ static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable)
if (enable)
isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
else
isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
}
/*
@@ -1014,7 +1014,7 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
/*
* preview_config_input_format - Configure the input format
* @prev: The preview engine
- * @format: Format on the preview engine sink pad
+ * @info: Sink pad format information
*
* Enable and configure CFA interpolation for Bayer formats and disable it for
* greyscale formats.
@@ -1025,22 +1025,29 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
* reordered to support non-GRBG Bayer patterns.
*/
static void preview_config_input_format(struct isp_prev_device *prev,
- const struct v4l2_mbus_framefmt *format)
+ const struct isp_format_info *info)
{
struct isp_device *isp = to_isp_device(prev);
struct prev_params *params;
- switch (format->code) {
- case V4L2_MBUS_FMT_SGRBG10_1X10:
+ if (info->width == 8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+
+ switch (info->flavor) {
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
prev->params.cfa_order = 0;
break;
- case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
prev->params.cfa_order = 1;
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
prev->params.cfa_order = 2;
break;
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
prev->params.cfa_order = 3;
break;
default:
@@ -1081,7 +1088,8 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
@@ -1389,6 +1397,7 @@ static unsigned int preview_max_out_width(struct isp_prev_device *prev)
static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
+ const struct isp_format_info *info;
struct v4l2_mbus_framefmt *format;
unsigned long flags;
u32 update;
@@ -1402,17 +1411,18 @@ static void preview_configure(struct isp_prev_device *prev)
/* PREV_PAD_SINK */
format = &prev->formats[PREV_PAD_SINK];
+ info = omap3isp_video_format_info(format->code);
preview_adjust_bandwidth(prev);
- preview_config_input_format(prev, format);
+ preview_config_input_format(prev, info);
preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_CCDC)
preview_config_inlineoffset(prev, 0);
else
- preview_config_inlineoffset(prev,
- ALIGN(format->width, 0x20) * 2);
+ preview_config_inlineoffset(prev, ALIGN(format->width, 0x20) *
+ info->bpp);
preview_setup_hw(prev, update, active);
@@ -1709,6 +1719,11 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
V4L2_MBUS_FMT_Y10_1X10,
V4L2_MBUS_FMT_SGRBG10_1X10,
V4L2_MBUS_FMT_SRGGB10_1X10,
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index e2c57f334c5d..b7d90e6fb01d 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -29,83 +29,6 @@
#define CM_CAM_MCLK_HZ 172800000 /* Hz */
-/* ISP Submodules offset */
-
-#define L4_34XX_BASE 0x48000000
-#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
-
-#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE
-#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset))
-
-#define OMAP3ISP_CCP2_REG_OFFSET 0x0400
-#define OMAP3ISP_CCP2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCP2_REG_OFFSET)
-#define OMAP3ISP_CCP2_REG(offset) (OMAP3ISP_CCP2_REG_BASE + (offset))
-
-#define OMAP3ISP_CCDC_REG_OFFSET 0x0600
-#define OMAP3ISP_CCDC_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCDC_REG_OFFSET)
-#define OMAP3ISP_CCDC_REG(offset) (OMAP3ISP_CCDC_REG_BASE + (offset))
-
-#define OMAP3ISP_HIST_REG_OFFSET 0x0A00
-#define OMAP3ISP_HIST_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_HIST_REG_OFFSET)
-#define OMAP3ISP_HIST_REG(offset) (OMAP3ISP_HIST_REG_BASE + (offset))
-
-#define OMAP3ISP_H3A_REG_OFFSET 0x0C00
-#define OMAP3ISP_H3A_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_H3A_REG_OFFSET)
-#define OMAP3ISP_H3A_REG(offset) (OMAP3ISP_H3A_REG_BASE + (offset))
-
-#define OMAP3ISP_PREV_REG_OFFSET 0x0E00
-#define OMAP3ISP_PREV_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_PREV_REG_OFFSET)
-#define OMAP3ISP_PREV_REG(offset) (OMAP3ISP_PREV_REG_BASE + (offset))
-
-#define OMAP3ISP_RESZ_REG_OFFSET 0x1000
-#define OMAP3ISP_RESZ_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_RESZ_REG_OFFSET)
-#define OMAP3ISP_RESZ_REG(offset) (OMAP3ISP_RESZ_REG_BASE + (offset))
-
-#define OMAP3ISP_SBL_REG_OFFSET 0x1200
-#define OMAP3ISP_SBL_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_SBL_REG_OFFSET)
-#define OMAP3ISP_SBL_REG(offset) (OMAP3ISP_SBL_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS1_REG_OFFSET 0x1800
-#define OMAP3ISP_CSI2A_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS1_REG(offset) \
- (OMAP3ISP_CSI2A_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY2_REG_OFFSET 0x1970
-#define OMAP3ISP_CSIPHY2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY2_REG_OFFSET)
-#define OMAP3ISP_CSIPHY2_REG(offset) (OMAP3ISP_CSIPHY2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS2_REG_OFFSET 0x19C0
-#define OMAP3ISP_CSI2A_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS2_REG(offset) \
- (OMAP3ISP_CSI2A_REGS2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS1_REG_OFFSET 0x1C00
-#define OMAP3ISP_CSI2C_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS1_REG(offset) \
- (OMAP3ISP_CSI2C_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY1_REG_OFFSET 0x1D70
-#define OMAP3ISP_CSIPHY1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY1_REG_OFFSET)
-#define OMAP3ISP_CSIPHY1_REG(offset) (OMAP3ISP_CSIPHY1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS2_REG_OFFSET 0x1DC0
-#define OMAP3ISP_CSI2C_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS2_REG(offset) \
- (OMAP3ISP_CSI2C_REGS2_REG_BASE + (offset))
-
/* ISP module register offset */
#define ISP_REVISION (0x000)
@@ -1583,4 +1506,26 @@
#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK \
(0x7fffff << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
+/* -----------------------------------------------------------------------------
+ * CONTROL registers for CSI-2 phy routing
+ */
+
+/* OMAP343X_CONTROL_CSIRXFE */
+#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV (1 << 7)
+#define OMAP343X_CONTROL_CSIRXFE_RESENABLE (1 << 8)
+#define OMAP343X_CONTROL_CSIRXFE_SELFORM (1 << 10)
+#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ (1 << 12)
+#define OMAP343X_CONTROL_CSIRXFE_RESET (1 << 13)
+
+/* OMAP3630_CONTROL_CAMERA_PHY_CTRL */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT 2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT 0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY 0x0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE 0x1
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK 0x2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_GPI 0x3
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK 0x3
+/* CCP2B: set to receive data from PHY2 instead of PHY1 */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 (1 << 4)
+
#endif /* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index e7939869bda7..61e17f9bd8b9 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -257,7 +257,7 @@ static int isp_stat_buf_queue(struct ispstat *stat)
if (!stat->active_buf)
return STAT_NO_BUF;
- do_gettimeofday(&stat->active_buf->ts);
+ ktime_get_ts(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
@@ -537,7 +537,8 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
return PTR_ERR(buf);
}
- data->ts = buf->ts;
+ data->ts.tv_sec = buf->ts.tv_sec;
+ data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index fd15094de34a..9a047c929b9f 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -50,7 +50,7 @@ struct ispstat_buffer {
struct iovm_struct *iovm;
void *virt_addr;
dma_addr_t dma_addr;
- struct timeval ts;
+ struct timespec ts;
u32 buf_size;
u32 frame_number;
u16 config_counter;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 3311d6bb3456..8dac17511e61 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -35,9 +35,6 @@
#include <linux/vmalloc.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-#include <plat/omap-pm.h>
#include "ispvideo.h"
#include "isp.h"
@@ -1392,7 +1389,8 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
if (ret < 0)
- printk(KERN_ERR "%s: could not register video device (%d)\n",
+ dev_err(video->isp->dev,
+ "%s: could not register video device (%d)\n",
__func__, ret);
return ret;
diff --git a/drivers/media/platform/s3c-camif/Makefile b/drivers/media/platform/s3c-camif/Makefile
new file mode 100644
index 000000000000..50bf8c59b99c
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/Makefile
@@ -0,0 +1,5 @@
+# Makefile for s3c244x/s3c64xx CAMIF driver
+
+s3c-camif-objs := camif-core.o camif-capture.o camif-regs.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif.o
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
new file mode 100644
index 000000000000..a55793c3d811
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -0,0 +1,1672 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Based on drivers/media/platform/s5p-fimc,
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+#include "camif-regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+/* Locking: called with vp->camif->slock spinlock held */
+static void camif_cfg_video_path(struct camif_vp *vp)
+{
+ WARN_ON(s3c_camif_get_scaler_config(vp, &vp->scaler));
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_target_format(vp);
+ camif_hw_set_output_dma(vp);
+}
+
+static void camif_prepare_dma_offset(struct camif_vp *vp)
+{
+ struct camif_frame *f = &vp->out_frame;
+
+ f->dma_offset.initial = f->rect.top * f->f_width + f->rect.left;
+ f->dma_offset.line = f->f_width - (f->rect.left + f->rect.width);
+
+ pr_debug("dma_offset: initial: %d, line: %d\n",
+ f->dma_offset.initial, f->dma_offset.line);
+}
+
+/* Locking: called with camif->slock spinlock held */
+static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+
+ if (camif->sensor.sd == NULL || vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (variant->ip_revision == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_hw_set_camera_bus(camif);
+ camif_hw_set_source_format(camif);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ camif_hw_set_input_path(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+
+ return 0;
+}
+
+/*
+ * Initialize the video path, only up from the scaler stage. The camera
+ * input interface set up is skipped. This is useful to enable one of the
+ * video paths when the other is already running.
+ * Locking: called with camif->slock spinlock held.
+ */
+static int s3c_camif_hw_vp_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ camif_prepare_dma_offset(vp);
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+ return 0;
+}
+
+static int sensor_set_power(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.power_count)
+ err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (!err)
+ sensor->power_count += on ? 1 : -1;
+
+ pr_debug("on: %d, power_count: %d, err: %d\n",
+ on, sensor->power_count, err);
+
+ return err;
+}
+
+static int sensor_set_streaming(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.stream_count)
+ err = v4l2_subdev_call(sensor->sd, video, s_stream, on);
+ if (!err)
+ sensor->stream_count += on ? 1 : -1;
+
+ pr_debug("on: %d, stream_count: %d, err: %d\n",
+ on, sensor->stream_count, err);
+
+ return err;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start streaming again.
+ * Return any buffers to vb2, perform CAMIF software reset and
+ * turn off streaming at the data pipeline (sensor) if required.
+ */
+static int camif_reinitialize(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ streaming = vp->state & ST_VP_SENSOR_STREAMING;
+
+ vp->state &= ~(ST_VP_PENDING | ST_VP_RUNNING | ST_VP_OFF |
+ ST_VP_ABORTING | ST_VP_STREAMING |
+ ST_VP_SENSOR_STREAMING | ST_VP_LASTIRQ);
+
+ /* Release unused buffers */
+ while (!list_empty(&vp->pending_buf_q)) {
+ buf = camif_pending_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vp->active_buf_q)) {
+ buf = camif_active_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!streaming)
+ return 0;
+
+ return sensor_set_streaming(camif, 0);
+}
+
+static bool s3c_vp_active(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ ret = (vp->state & ST_VP_RUNNING) || (vp->state & ST_VP_PENDING);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return ret;
+}
+
+static bool camif_is_streaming(struct camif_dev *camif)
+{
+ unsigned long flags;
+ bool status;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ status = camif->stream_count > 0;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return status;
+}
+
+static int camif_stop_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ if (!s3c_vp_active(vp))
+ return 0;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->state &= ~(ST_VP_OFF | ST_VP_LASTIRQ);
+ vp->state |= ST_VP_ABORTING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ ret = wait_event_timeout(vp->irq_queue,
+ !(vp->state & ST_VP_ABORTING),
+ msecs_to_jiffies(CAMIF_STOP_TIMEOUT));
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (ret == 0 && !(vp->state & ST_VP_OFF)) {
+ /* Timed out, forcibly stop capture */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return camif_reinitialize(vp);
+}
+
+static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
+ struct camif_addr *paddr)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->rect.width * frame->rect.height;
+
+ pr_debug("colplanes: %d, pix_size: %u\n",
+ vp->out_fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ switch (vp->out_fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
+ else /* 420 */
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
+
+ if (vp->out_fmt->color == IMG_FMT_YCRCB420)
+ swap(paddr->cb, paddr->cr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("DMA address: y: %#x cb: %#x cr: %#x\n",
+ paddr->y, paddr->cb, paddr->cr);
+
+ return 0;
+}
+
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
+{
+ struct camif_vp *vp = priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ unsigned int status;
+
+ spin_lock(&camif->slock);
+
+ if (ip_rev == S3C6410_CAMIF_IP_REV)
+ camif_hw_clear_pending_irq(vp);
+
+ status = camif_hw_get_status(vp);
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV && (status & CISTATUS_OVF_MASK)) {
+ camif_hw_clear_fifo_overflow(vp);
+ goto unlock;
+ }
+
+ if (vp->state & ST_VP_ABORTING) {
+ if (vp->state & ST_VP_OFF) {
+ /* Last IRQ */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+ wake_up(&vp->irq_queue);
+ goto unlock;
+ } else if (vp->state & ST_VP_LASTIRQ) {
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ camif_hw_set_lastirq(vp, false);
+ vp->state |= ST_VP_OFF;
+ } else {
+ /* Disable capture, enable last IRQ */
+ camif_hw_set_lastirq(vp, true);
+ vp->state |= ST_VP_LASTIRQ;
+ }
+ }
+
+ if (!list_empty(&vp->pending_buf_q) && (vp->state & ST_VP_RUNNING) &&
+ !list_empty(&vp->active_buf_q)) {
+ unsigned int index;
+ struct camif_buffer *vbuf;
+ struct timeval *tv;
+ struct timespec ts;
+ /*
+ * Get previous DMA write buffer index:
+ * 0 => DMA buffer 0, 2;
+ * 1 => DMA buffer 1, 3.
+ */
+ index = (CISTATUS_FRAMECNT(status) + 2) & 1;
+
+ ktime_get_ts(&ts);
+ vbuf = camif_active_queue_peek(vp, index);
+
+ if (!WARN_ON(vbuf == NULL)) {
+ /* Dequeue a filled buffer */
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++;
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+ /* Set up an empty buffer at the DMA engine */
+ vbuf = camif_pending_queue_pop(vp);
+ vbuf->index = index;
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index);
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index + 2);
+
+ /* Scheduled in H/W, add to the queue */
+ camif_active_queue_add(vp, vbuf);
+ }
+ } else if (!(vp->state & ST_VP_ABORTING) &&
+ (vp->state & ST_VP_PENDING)) {
+ vp->state |= ST_VP_RUNNING;
+ }
+
+ if (vp->state & ST_VP_CONFIG) {
+ camif_prepare_dma_offset(vp);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (camif->variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ vp->state &= ~ST_VP_CONFIG;
+ }
+unlock:
+ spin_unlock(&camif->slock);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * We assume the codec capture path is always activated
+ * first, before the preview path starts streaming.
+ * This is required to avoid internal FIFO overflow and
+ * a need for CAMIF software reset.
+ */
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (camif->stream_count == 0) {
+ camif_hw_reset(camif);
+ ret = s3c_camif_hw_init(camif, vp);
+ } else {
+ ret = s3c_camif_hw_vp_init(camif, vp);
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (ret < 0) {
+ camif_reinitialize(vp);
+ return ret;
+ }
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->frame_sequence = 0;
+ vp->state |= ST_VP_PENDING;
+
+ if (!list_empty(&vp->pending_buf_q) &&
+ (!(vp->state & ST_VP_STREAMING) ||
+ !(vp->state & ST_VP_SENSOR_STREAMING))) {
+
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ vp->state |= ST_VP_STREAMING;
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ ret = sensor_set_streaming(camif, 1);
+ if (ret)
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+
+ return ret;
+ }
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ return camif_stop_capture(vp);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format *pix = NULL;
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int size;
+
+ if (pfmt) {
+ pix = &pfmt->fmt.pix;
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, -1);
+ size = (pix->width * pix->height * fmt->depth) / 8;
+ } else {
+ size = (frame->f_width * frame->f_height * fmt->depth) / 8;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+ *num_planes = 1;
+
+ if (pix)
+ sizes[0] = max(size, pix->sizeimage);
+ else
+ sizes[0] = size;
+ allocators[0] = camif->alloc_ctx;
+
+ pr_debug("size: %u\n", sizes[0]);
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < vp->payload) {
+ v4l2_err(&vp->vdev, "buffer too small: %lu, required: %u\n",
+ vb2_plane_size(vb, 0), vp->payload);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, vp->payload);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb);
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr));
+
+ if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
+ /* Schedule an empty buffer in H/W */
+ buf->index = vp->buf_index;
+
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index);
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index + 2);
+
+ camif_active_queue_add(vp, buf);
+ vp->buf_index = !vp->buf_index;
+ } else {
+ camif_pending_queue_add(vp, buf);
+ }
+
+ if (vb2_is_streaming(&vp->vb_queue) && !list_empty(&vp->pending_buf_q)
+ && !(vp->state & ST_VP_STREAMING)) {
+
+ vp->state |= ST_VP_STREAMING;
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ if (sensor_set_streaming(camif, 1) == 0)
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ else
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+ }
+ return;
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+}
+
+static void camif_lock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_lock(&vp->camif->lock);
+}
+
+static void camif_unlock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_unlock(&vp->camif->lock);
+}
+
+static const struct vb2_ops s3c_camif_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = camif_unlock,
+ .wait_finish = camif_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int s3c_camif_open(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ if (mutex_lock_interruptible(&camif->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(camif->dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = sensor_set_power(camif, 1);
+ if (!ret)
+ goto unlock;
+
+ pm_runtime_put(camif->dev);
+err_pm:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_close(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ mutex_lock(&camif->lock);
+
+ if (vp->owner == file->private_data) {
+ camif_stop_capture(vp);
+ vb2_queue_release(&vp->vb_queue);
+ vp->owner = NULL;
+ }
+
+ sensor_set_power(camif, 0);
+
+ pm_runtime_put(camif->dev);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static unsigned int s3c_camif_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ mutex_lock(&camif->lock);
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_poll(&vp->vb_queue, file, wait);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_mmap(&vp->vb_queue, vma);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations s3c_camif_fops = {
+ .owner = THIS_MODULE,
+ .open = s3c_camif_open,
+ .release = s3c_camif_close,
+ .poll = s3c_camif_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s3c_camif_mmap,
+};
+
+/*
+ * Video node IOCTLs
+ */
+
+static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ strlcpy(cap->driver, S3C_CAMIF_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(vp->camif->dev), vp->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_subdev *sensor = vp->camif->sensor.sd;
+
+ if (input->index || sensor == NULL)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(input->name, sensor->name, sizeof(input->name));
+ return 0;
+}
+
+static int s3c_camif_vidioc_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ return i == 0 ? 0 : -EINVAL;
+}
+
+static int s3c_camif_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ pr_debug("fmt(%d): %s\n", f->index, f->description);
+ return 0;
+}
+
+static int s3c_camif_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+
+ pix->bytesperline = frame->f_width * fmt->ybpp;
+ pix->sizeimage = vp->payload;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->width = frame->f_width;
+ pix->height = frame->f_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int __camif_video_try_format(struct camif_vp *vp,
+ struct v4l2_pix_format *pix,
+ const struct camif_fmt **ffmt)
+{
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ unsigned int wmin, hmin, sc_hrmax, sc_vrmax;
+ const struct vp_pix_limits *pix_lim;
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, 0);
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ if (ffmt)
+ *ffmt = fmt;
+
+ pix_lim = &camif->variant->vp_pix_limits[vp->id];
+
+ pr_debug("fmt: %ux%u, crop: %ux%u, bytesperline: %u\n",
+ pix->width, pix->height, crop->width, crop->height,
+ pix->bytesperline);
+ /*
+ * Calculate minimum width and height according to the configured
+ * camera input interface crop rectangle and the resizer's capabilities.
+ */
+ sc_hrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->width) - 3));
+ sc_vrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->height) - 1));
+
+ wmin = max_t(u32, pix_lim->min_out_width, crop->width / sc_hrmax);
+ wmin = round_up(wmin, pix_lim->out_width_align);
+ hmin = max_t(u32, 8, crop->height / sc_vrmax);
+ hmin = round_up(hmin, 8);
+
+ v4l_bound_align_image(&pix->width, wmin, pix_lim->max_sc_out_width,
+ ffs(pix_lim->out_width_align) - 1,
+ &pix->height, hmin, pix_lim->max_height, 0, 0);
+
+ pix->bytesperline = pix->width * fmt->ybpp;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) / 8;
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+
+ pr_debug("%ux%u, wmin: %d, hmin: %d, sc_hrmax: %d, sc_vrmax: %d\n",
+ pix->width, pix->height, wmin, hmin, sc_hrmax, sc_vrmax);
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return __camif_video_try_format(vp, &f->fmt.pix, NULL);
+}
+
+static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_frame *out_frame = &vp->out_frame;
+ const struct camif_fmt *fmt = NULL;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vb2_is_busy(&vp->vb_queue))
+ return -EBUSY;
+
+ ret = __camif_video_try_format(vp, &f->fmt.pix, &fmt);
+ if (ret < 0)
+ return ret;
+
+ vp->out_fmt = fmt;
+ vp->payload = pix->sizeimage;
+ out_frame->f_width = pix->width;
+ out_frame->f_height = pix->height;
+
+ /* Reset composition rectangle */
+ out_frame->rect.width = pix->width;
+ out_frame->rect.height = pix->height;
+ out_frame->rect.left = 0;
+ out_frame->rect.top = 0;
+
+ if (vp->owner == NULL)
+ vp->owner = priv;
+
+ pr_debug("%ux%u. payload: %u. fmt: %s. %d %d. sizeimage: %d. bpl: %d\n",
+ out_frame->f_width, out_frame->f_height, vp->payload, fmt->name,
+ pix->width * pix->height * fmt->depth, fmt->depth,
+ pix->sizeimage, pix->bytesperline);
+
+ return 0;
+}
+
+/* Only check pixel formats at the sensor and the camif subdev pads */
+static int camif_pipeline_validate(struct camif_dev *camif)
+{
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ /* Retrieve format at the sensor subdev source pad */
+ pad = media_entity_remote_source(&camif->pads[0]);
+ if (!pad || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EPIPE;
+
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(camif->sensor.sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != camif->mbus_fmt.width ||
+ src_fmt.format.height != camif->mbus_fmt.height ||
+ src_fmt.format.code != camif->mbus_fmt.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int s3c_camif_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct media_entity *sensor = &camif->sensor.sd->entity;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (s3c_vp_active(vp))
+ return 0;
+
+ ret = media_entity_pipeline_start(sensor, camif->m_pipeline);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_pipeline_validate(camif);
+ if (ret < 0) {
+ media_entity_pipeline_stop(sensor);
+ return ret;
+ }
+
+ return vb2_streamon(&vp->vb_queue, type);
+}
+
+static int s3c_camif_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ ret = vb2_streamoff(&vp->vb_queue, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&camif->sensor.sd->entity);
+ return ret;
+}
+
+static int s3c_camif_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
+ vp->id, rb->count, vp->owner, priv);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (rb->count)
+ rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
+ else
+ vp->owner = NULL;
+
+ ret = vb2_reqbufs(&vp->vb_queue, rb);
+ if (!ret) {
+ vp->reqbufs_count = rb->count;
+ if (vp->owner == NULL && rb->count > 0)
+ vp->owner = priv;
+ }
+
+ return ret;
+}
+
+static int s3c_camif_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_querybuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_qbuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int s3c_camif_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ create->count = max_t(u32, 1, create->count);
+ ret = vb2_create_bufs(&vp->vb_queue, create);
+
+ if (!ret && vp->owner == NULL)
+ vp->owner = priv;
+
+ return ret;
+}
+
+static int s3c_camif_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_prepare_buf(&vp->vb_queue, b);
+}
+
+static int s3c_camif_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = vp->out_frame.f_width;
+ sel->r.height = vp->out_frame.f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vp->out_frame.rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void __camif_try_compose(struct camif_dev *camif, struct camif_vp *vp,
+ struct v4l2_rect *r)
+{
+ /* s3c244x doesn't support composition */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ *r = vp->out_frame.rect;
+ return;
+ }
+
+ /* TODO: s3c64xx */
+}
+
+static int s3c_camif_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ __camif_try_compose(camif, vp, &rect);
+
+ sel->r = rect;
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->out_frame.rect = rect;
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s3c_camif_ioctl_ops = {
+ .vidioc_querycap = s3c_camif_vidioc_querycap,
+ .vidioc_enum_input = s3c_camif_vidioc_enum_input,
+ .vidioc_g_input = s3c_camif_vidioc_g_input,
+ .vidioc_s_input = s3c_camif_vidioc_s_input,
+ .vidioc_enum_fmt_vid_cap = s3c_camif_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = s3c_camif_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = s3c_camif_vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = s3c_camif_vidioc_g_fmt,
+ .vidioc_g_selection = s3c_camif_g_selection,
+ .vidioc_s_selection = s3c_camif_s_selection,
+ .vidioc_reqbufs = s3c_camif_reqbufs,
+ .vidioc_querybuf = s3c_camif_querybuf,
+ .vidioc_prepare_buf = s3c_camif_prepare_buf,
+ .vidioc_create_bufs = s3c_camif_create_bufs,
+ .vidioc_qbuf = s3c_camif_qbuf,
+ .vidioc_dqbuf = s3c_camif_dqbuf,
+ .vidioc_streamon = s3c_camif_streamon,
+ .vidioc_streamoff = s3c_camif_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+/*
+ * Video node controls
+ */
+static int s3c_camif_video_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_vp *vp = ctrl->priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ pr_debug("[vp%d] ctrl: %s, value: %d\n", vp->id,
+ ctrl->name, ctrl->val);
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ vp->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ vp->vflip = ctrl->val;
+ break;
+ }
+
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+/* Codec and preview video node control ops */
+static const struct v4l2_ctrl_ops s3c_camif_video_ctrl_ops = {
+ .s_ctrl = s3c_camif_video_s_ctrl,
+};
+
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+{
+ struct camif_vp *vp = &camif->vp[idx];
+ struct vb2_queue *q = &vp->vb_queue;
+ struct video_device *vfd = &vp->vdev;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "camif-%s",
+ vp->id == 0 ? "codec" : "preview");
+
+ vfd->fops = &s3c_camif_fops;
+ vfd->ioctl_ops = &s3c_camif_ioctl_ops;
+ vfd->v4l2_dev = &camif->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &camif->lock;
+ vp->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&vp->pending_buf_q);
+ INIT_LIST_HEAD(&vp->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &s3c_camif_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct camif_buffer);
+ q->drv_priv = vp;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_vd_rel;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vp->pad, 0);
+ if (ret)
+ goto err_vd_rel;
+
+ video_set_drvdata(vfd, vp);
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+
+ v4l2_ctrl_handler_init(&vp->ctrl_handler, 1);
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+
+ ret = vp->ctrl_handler.error;
+ if (ret < 0)
+ goto err_me_cleanup;
+
+ vfd->ctrl_handler = &vp->ctrl_handler;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrlh_free;
+
+ v4l2_info(&camif->v4l2_dev, "registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_vd_rel:
+ video_device_release(vfd);
+ return ret;
+}
+
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
+{
+ struct video_device *vfd = &camif->vp[idx].vdev;
+
+ if (video_is_registered(vfd)) {
+ video_unregister_device(vfd);
+ media_entity_cleanup(&vfd->entity);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ }
+}
+
+/* Media bus pixel formats supported at the camif input */
+static const enum v4l2_mbus_pixelcode camif_mbus_formats[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_YVYU8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_VYUY8_2X8,
+};
+
+/*
+ * Camera input interface subdev operations
+ */
+
+static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(camif_mbus_formats))
+ return -EINVAL;
+
+ code->code = camif_mbus_formats[code->index];
+ return 0;
+}
+
+static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ /* full camera input pixel size */
+ *mf = camif->mbus_fmt;
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* crop rectangle at camera interface input */
+ mf->width = camif->camif_crop.width;
+ mf->height = camif->camif_crop.height;
+ mf->code = camif->mbus_fmt.code;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static void __camif_subdev_try_format(struct camif_dev *camif,
+ struct v4l2_mbus_framefmt *mf, int pad)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+ const struct vp_pix_limits *pix_lim;
+ int i = ARRAY_SIZE(camif_mbus_formats);
+
+ /* FIXME: constraints against codec or preview path ? */
+ pix_lim = &variant->vp_pix_limits[VP_CODEC];
+
+ while (i-- >= 0)
+ if (camif_mbus_formats[i] == mf->code)
+ break;
+
+ mf->code = camif_mbus_formats[i];
+
+ if (pad == CAMIF_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, CAMIF_MAX_PIX_HEIGHT, 0,
+ 0);
+ } else {
+ struct v4l2_rect *crop = &camif->camif_crop;
+ v4l_bound_align_image(&mf->width, 8, crop->width,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, crop->height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &camif->subdev, "%ux%u\n", mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %ux%u\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&camif->lock);
+
+ /*
+ * No pixel format change at the camera input is allowed
+ * while streaming.
+ */
+ if (vb2_is_busy(&camif->vp[VP_CODEC].vb_queue) ||
+ vb2_is_busy(&camif->vp[VP_PREVIEW].vb_queue)) {
+ mutex_unlock(&camif->lock);
+ return -EBUSY;
+ }
+
+ __camif_subdev_try_format(camif, mf, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&camif->lock);
+ return 0;
+ }
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ camif->mbus_fmt = *mf;
+ /* Reset sink crop rectangle. */
+ crop->width = mf->width;
+ crop->height = mf->height;
+ crop->left = 0;
+ crop->top = 0;
+ /*
+ * Reset source format (the camif's crop rectangle)
+ * and the video output resolution.
+ */
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_frame *frame = &camif->vp[i].out_frame;
+ frame->rect = *crop;
+ frame->f_width = mf->width;
+ frame->f_height = mf->height;
+ }
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* Pixel format can be only changed on the sink pad. */
+ mf->code = camif->mbus_fmt.code;
+ mf->width = crop->width;
+ mf->height = crop->height;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ return 0;
+}
+
+static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = *crop;
+ } else { /* crop bounds */
+ sel->r.width = mf->width;
+ sel->r.height = mf->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ __func__, crop->left, crop->top, crop->width,
+ crop->height, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ const struct camif_pix_limits *pix_lim = &camif->variant->pix_limits;
+ unsigned int left = 2 * r->left;
+ unsigned int top = 2 * r->top;
+
+ /*
+ * Following constraints must be met:
+ * - r->width + 2 * r->left = mf->width;
+ * - r->height + 2 * r->top = mf->height;
+ * - crop rectangle size and position must be aligned
+ * to 8 or 2 pixels, depending on SoC version.
+ */
+ v4l_bound_align_image(&r->width, 0, mf->width,
+ ffs(pix_lim->win_hor_offset_align) - 1,
+ &r->height, 0, mf->height, 1, 0);
+
+ v4l_bound_align_image(&left, 0, mf->width - r->width,
+ ffs(pix_lim->win_hor_offset_align),
+ &top, 0, mf->height - r->height, 2, 0);
+
+ r->left = left / 2;
+ r->top = top / 2;
+ r->width = mf->width - left;
+ r->height = mf->height - top;
+ /*
+ * Make sure we either downscale or upscale both the pixel
+ * width and height. Just return current crop rectangle if
+ * this scaler constraint is not met.
+ */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV &&
+ camif_is_streaming(camif)) {
+ unsigned int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct v4l2_rect *or = &camif->vp[i].out_frame.rect;
+ if ((or->width > r->width) == (or->height > r->height))
+ continue;
+ *r = camif->camif_crop;
+ pr_debug("Width/height scaling direction limitation\n");
+ break;
+ }
+ }
+
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ r->left, r->top, r->width, r->height, mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct camif_scaler scaler;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&camif->lock);
+ __camif_try_crop(camif, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ *crop = sel->r;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ scaler = vp->scaler;
+ if (s3c_camif_get_scaler_config(vp, &scaler))
+ continue;
+ vp->scaler = scaler;
+ vp->state |= ST_VP_CONFIG;
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ }
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ __func__, crop->left, crop->top, crop->width, crop->height,
+ camif->mbus_fmt.width, camif->mbus_fmt.height);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
+ .enum_mbus_code = s3c_camif_subdev_enum_mbus_code,
+ .get_selection = s3c_camif_subdev_get_selection,
+ .set_selection = s3c_camif_subdev_set_selection,
+ .get_fmt = s3c_camif_subdev_get_fmt,
+ .set_fmt = s3c_camif_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops s3c_camif_subdev_ops = {
+ .pad = &s3c_camif_subdev_pad_ops,
+};
+
+static int s3c_camif_subdev_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_dev *camif = container_of(ctrl->handler, struct camif_dev,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ camif->colorfx = camif->ctrl_colorfx->val;
+ /* Set Cb, Cr */
+ switch (ctrl->val) {
+ case V4L2_COLORFX_SEPIA:
+ camif->colorfx_cb = 115;
+ camif->colorfx_cr = 145;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ camif->colorfx_cb = camif->ctrl_colorfx_cbcr->val >> 8;
+ camif->colorfx_cr = camif->ctrl_colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ /* for V4L2_COLORFX_BW and others */
+ camif->colorfx_cb = 128;
+ camif->colorfx_cr = 128;
+ }
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ camif->test_pattern = camif->ctrl_test_pattern->val;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ camif->vp[VP_CODEC].state |= ST_VP_CONFIG;
+ camif->vp[VP_PREVIEW].state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s3c_camif_subdev_ctrl_ops = {
+ .s_ctrl = s3c_camif_subdev_s_ctrl,
+};
+
+static const char * const s3c_camif_test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ "Horizontal increment",
+ "Vertical increment",
+};
+
+int s3c_camif_create_subdev(struct camif_dev *camif)
+{
+ struct v4l2_ctrl_handler *handler = &camif->ctrl_handler;
+ struct v4l2_subdev *sd = &camif->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &s3c_camif_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strlcpy(sd->name, "S3C-CAMIF", sizeof(sd->name));
+
+ camif->pads[CAMIF_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ camif->pads[CAMIF_SD_PAD_SOURCE_C].flags = MEDIA_PAD_FL_SOURCE;
+ camif->pads[CAMIF_SD_PAD_SOURCE_P].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&sd->entity, CAMIF_SD_PADS_NUM,
+ camif->pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 3);
+ camif->ctrl_test_pattern = v4l2_ctrl_new_std_menu_items(handler,
+ &s3c_camif_subdev_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
+ s3c_camif_test_pattern_menu);
+
+ camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x981f, V4L2_COLORFX_NONE);
+
+ camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+ if (handler->error) {
+ v4l2_ctrl_handler_free(handler);
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
+ V4L2_COLORFX_SET_CBCR, false);
+ if (!camif->variant->has_img_effect) {
+ camif->ctrl_colorfx->flags |= V4L2_CTRL_FLAG_DISABLED;
+ camif->ctrl_colorfx_cbcr->flags |= V4L2_CTRL_FLAG_DISABLED;
+ }
+ sd->ctrl_handler = handler;
+ v4l2_set_subdevdata(sd, camif);
+
+ return 0;
+}
+
+void s3c_camif_unregister_subdev(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = &camif->subdev;
+
+ /* Return if not registered */
+ if (v4l2_get_subdevdata(sd) == NULL)
+ return;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&camif->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+int s3c_camif_set_defaults(struct camif_dev *camif)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ struct camif_frame *f = &vp->out_frame;
+
+ vp->camif = camif;
+ vp->id = i;
+ vp->offset = camif->variant->vp_offset;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ vp->fmt_flags = i ? FMT_FL_S3C24XX_PREVIEW :
+ FMT_FL_S3C24XX_CODEC;
+ else
+ vp->fmt_flags = FMT_FL_S3C64XX;
+
+ vp->out_fmt = s3c_camif_find_format(vp, NULL, 0);
+ BUG_ON(vp->out_fmt == NULL);
+
+ memset(f, 0, sizeof(*f));
+ f->f_width = CAMIF_DEF_WIDTH;
+ f->f_height = CAMIF_DEF_HEIGHT;
+ f->rect.width = CAMIF_DEF_WIDTH;
+ f->rect.height = CAMIF_DEF_HEIGHT;
+
+ /* Scaler is always enabled */
+ vp->scaler.enable = 1;
+
+ vp->payload = (f->f_width * f->f_height *
+ vp->out_fmt->depth) / 8;
+ }
+
+ memset(&camif->mbus_fmt, 0, sizeof(camif->mbus_fmt));
+ camif->mbus_fmt.width = CAMIF_DEF_WIDTH;
+ camif->mbus_fmt.height = CAMIF_DEF_HEIGHT;
+ camif->mbus_fmt.code = camif_mbus_formats[0];
+
+ memset(&camif->camif_crop, 0, sizeof(camif->camif_crop));
+ camif->camif_crop.width = CAMIF_DEF_WIDTH;
+ camif->camif_crop.height = CAMIF_DEF_HEIGHT;
+
+ return 0;
+}
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
new file mode 100644
index 000000000000..09a8c9cac5c9
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -0,0 +1,660 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+
+static char *camif_clocks[CLK_MAX_NUM] = {
+ /* HCLK CAMIF clock */
+ [CLK_GATE] = "camif",
+ /* CAMIF / external camera sensor master clock */
+ [CLK_CAM] = "camera",
+};
+
+static const struct camif_fmt camif_formats[] = {
+ {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR422P,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YVU 4:2:0 planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCRCB420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "RGB565, 16 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .ybpp = 2,
+ .color = IMG_FMT_RGB565,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "XRGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_XRGB8888,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_RGB666,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C64XX,
+ }
+};
+
+/**
+ * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @pixelformat: fourcc to match, ignored if null
+ * @index: index to the camif_formats array, ignored if negative
+ */
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat,
+ int index)
+{
+ const struct camif_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(camif_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
+ fmt = &camif_formats[i];
+ if (vp && !(vp->fmt_flags & fmt->flags))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ unsigned int sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ unsigned int tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler)
+{
+ struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
+ int source_x = camif_crop->width;
+ int source_y = camif_crop->height;
+ int target_x = vp->out_frame.rect.width;
+ int target_y = vp->out_frame.rect.height;
+ int ret;
+
+ if (vp->rotation == 90 || vp->rotation == 270)
+ swap(target_x, target_y);
+
+ ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
+ &scaler->h_shift);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
+ &scaler->v_shift);
+ if (ret < 0)
+ return ret;
+
+ scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
+ scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
+
+ scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
+ scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
+
+ scaler->scaleup_h = (target_x >= source_x);
+ scaler->scaleup_v = (target_y >= source_y);
+
+ scaler->copy = 0;
+
+ pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
+ scaler->pre_h_ratio, scaler->h_shift,
+ scaler->pre_v_ratio, scaler->v_shift);
+
+ pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
+ source_x, source_y, target_x, target_y,
+ scaler->scaleup_h, scaler->scaleup_v);
+
+ return 0;
+}
+
+static int camif_register_sensor(struct camif_dev *camif)
+{
+ struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ camif->sensor.sd = NULL;
+
+ if (sensor->i2c_board_info.addr == 0)
+ return -EINVAL;
+
+ adapter = i2c_get_adapter(sensor->i2c_bus_num);
+ if (adapter == NULL) {
+ v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
+ sensor->i2c_bus_num);
+ return -EPROBE_DEFER;
+ }
+
+ sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
+ &sensor->i2c_board_info, NULL);
+ if (sd == NULL) {
+ i2c_put_adapter(adapter);
+ v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
+ sensor->i2c_board_info.type);
+ return -EPROBE_DEFER;
+ }
+ camif->sensor.sd = sd;
+
+ v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
+
+ /* Get initial pixel format and set it at the camif sink pad */
+ format.pad = 0;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+
+ if (ret < 0)
+ return 0;
+
+ format.pad = CAMIF_SD_PAD_SINK;
+ v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
+
+ v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
+ format.format.width, format.format.height,
+ format.format.code);
+ return 0;
+}
+
+static void camif_unregister_sensor(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = camif->sensor.sd;
+ struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
+ struct i2c_adapter *adapter;
+
+ if (client == NULL)
+ return;
+
+ adapter = client->adapter;
+ v4l2_device_unregister_subdev(sd);
+ camif->sensor.sd = NULL;
+ i2c_unregister_device(client);
+ if (adapter)
+ i2c_put_adapter(adapter);
+}
+
+static int camif_create_media_links(struct camif_dev *camif)
+{
+ int i, ret;
+
+ ret = media_entity_create_link(&camif->sensor.sd->entity, 0,
+ &camif->subdev.entity, CAMIF_SD_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
+ ret = media_entity_create_link(&camif->subdev.entity, i,
+ &camif->vp[i - 1].vdev.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ }
+
+ return ret;
+}
+
+static int camif_register_video_nodes(struct camif_dev *camif)
+{
+ int ret = s3c_camif_register_video_node(camif, VP_CODEC);
+ if (ret < 0)
+ return ret;
+
+ return s3c_camif_register_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_video_nodes(struct camif_dev *camif)
+{
+ s3c_camif_unregister_video_node(camif, VP_CODEC);
+ s3c_camif_unregister_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_media_entities(struct camif_dev *camif)
+{
+ camif_unregister_video_nodes(camif);
+ camif_unregister_sensor(camif);
+ s3c_camif_unregister_subdev(camif);
+}
+
+/*
+ * Media device
+ */
+static int camif_media_dev_register(struct camif_dev *camif)
+{
+ struct media_device *md = &camif->media_dev;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int ret;
+
+ memset(md, 0, sizeof(*md));
+ snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF",
+ ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
+ strlcpy(md->bus_info, "platform", sizeof(md->bus_info));
+ md->hw_revision = ip_rev;
+ md->driver_version = KERNEL_VERSION(1, 0, 0);
+
+ md->dev = camif->dev;
+
+ strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
+ v4l2_dev->mdev = md;
+
+ ret = v4l2_device_register(camif->dev, v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = media_device_register(md);
+ if (ret < 0)
+ v4l2_device_unregister(v4l2_dev);
+
+ return ret;
+}
+
+static void camif_clk_put(struct camif_dev *camif)
+{
+ int i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ if (IS_ERR_OR_NULL(camif->clock[i]))
+ continue;
+ clk_unprepare(camif->clock[i]);
+ clk_put(camif->clock[i]);
+ }
+}
+
+static int camif_clk_get(struct camif_dev *camif)
+{
+ int ret, i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
+ if (IS_ERR(camif->clock[i])) {
+ ret = PTR_ERR(camif->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(camif->clock[i]);
+ if (ret < 0) {
+ clk_put(camif->clock[i]);
+ camif->clock[i] = NULL;
+ goto err;
+ }
+ }
+ return 0;
+err:
+ camif_clk_put(camif);
+ dev_err(camif->dev, "failed to get clock: %s\n",
+ camif_clocks[i]);
+ return ret;
+}
+
+/*
+ * The CAMIF device has two relatively independent data processing paths
+ * that can source data from memory or the common camera input frontend.
+ * Register interrupts for each data processing path (camif_vp).
+ */
+static int camif_request_irqs(struct platform_device *pdev,
+ struct camif_dev *camif)
+{
+ int irq, ret, i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+
+ init_waitqueue_head(&vp->irq_queue);
+
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ %d\n", i);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
+ 0, dev_name(&pdev->dev), vp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int s3c_camif_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s3c_camif_plat_data *pdata = dev->platform_data;
+ struct s3c_camif_drvdata *drvdata;
+ struct camif_dev *camif;
+ struct resource *mres;
+ int ret = 0;
+
+ camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
+ if (!camif)
+ return -ENOMEM;
+
+ spin_lock_init(&camif->slock);
+ mutex_init(&camif->lock);
+
+ camif->dev = dev;
+
+ if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
+ dev_err(dev, "wrong platform data\n");
+ return -EINVAL;
+ }
+
+ camif->pdata = *pdata;
+ drvdata = (void *)platform_get_device_id(pdev)->driver_data;
+ camif->variant = drvdata->variant;
+
+ mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ camif->io_base = devm_ioremap_resource(dev, mres);
+ if (IS_ERR(camif->io_base))
+ return PTR_ERR(camif->io_base);
+
+ ret = camif_request_irqs(pdev, camif);
+ if (ret < 0)
+ return ret;
+
+ ret = pdata->gpio_get();
+ if (ret < 0)
+ return ret;
+
+ ret = s3c_camif_create_subdev(camif);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = camif_clk_get(camif);
+ if (ret < 0)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, camif);
+ clk_set_rate(camif->clock[CLK_CAM],
+ camif->pdata.sensor.clock_frequency);
+
+ dev_info(dev, "sensor clock frequency: %lu\n",
+ clk_get_rate(camif->clock[CLK_CAM]));
+ /*
+ * Set initial pixel format, resolution and crop rectangle.
+ * Must be done before a sensor subdev is registered as some
+ * settings are overrode with values from sensor subdev.
+ */
+ s3c_camif_set_defaults(camif);
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ /* Initialize contiguous memory allocator */
+ camif->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(camif->alloc_ctx)) {
+ ret = PTR_ERR(camif->alloc_ctx);
+ goto err_alloc;
+ }
+
+ ret = camif_media_dev_register(camif);
+ if (ret < 0)
+ goto err_mdev;
+
+ ret = camif_register_sensor(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
+ if (ret < 0)
+ goto err_sens;
+
+ mutex_lock(&camif->media_dev.graph_mutex);
+
+ ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_register_video_nodes(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_create_media_links(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ mutex_unlock(&camif->media_dev.graph_mutex);
+ pm_runtime_put(dev);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&camif->media_dev.graph_mutex);
+err_sens:
+ v4l2_device_unregister(&camif->v4l2_dev);
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+err_mdev:
+ vb2_dma_contig_cleanup_ctx(camif->alloc_ctx);
+err_alloc:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+err_pm:
+ camif_clk_put(camif);
+err_clk:
+ s3c_camif_unregister_subdev(camif);
+err_sd:
+ pdata->gpio_put();
+ return ret;
+}
+
+static int s3c_camif_remove(struct platform_device *pdev)
+{
+ struct camif_dev *camif = platform_get_drvdata(pdev);
+ struct s3c_camif_plat_data *pdata = &camif->pdata;
+
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+ v4l2_device_unregister(&camif->v4l2_dev);
+
+ pm_runtime_disable(&pdev->dev);
+ camif_clk_put(camif);
+ pdata->gpio_put();
+
+ return 0;
+}
+
+static int s3c_camif_runtime_resume(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ clk_enable(camif->clock[CLK_GATE]);
+ /* null op on s3c244x */
+ clk_enable(camif->clock[CLK_CAM]);
+ return 0;
+}
+
+static int s3c_camif_runtime_suspend(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ /* null op on s3c244x */
+ clk_disable(camif->clock[CLK_CAM]);
+
+ clk_disable(camif->clock[CLK_GATE]);
+ return 0;
+}
+
+static const struct s3c_camif_variant s3c244x_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 640,
+ .max_sc_out_width = 640,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 480,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C244X_CAMIF_IP_REV,
+};
+
+static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
+ .variant = &s3c244x_camif_variant,
+ .bus_clk_freq = 24000000UL,
+};
+
+static const struct s3c_camif_variant s3c6410_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 720,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C6410_CAMIF_IP_REV,
+ .has_img_effect = 1,
+ .vp_offset = 0x20,
+};
+
+static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
+ .variant = &s3c6410_camif_variant,
+ .bus_clk_freq = 133000000UL,
+};
+
+static struct platform_device_id s3c_camif_driver_ids[] = {
+ {
+ .name = "s3c2440-camif",
+ .driver_data = (unsigned long)&s3c244x_camif_drvdata,
+ }, {
+ .name = "s3c6410-camif",
+ .driver_data = (unsigned long)&s3c6410_camif_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
+
+static const struct dev_pm_ops s3c_camif_pm_ops = {
+ .runtime_suspend = s3c_camif_runtime_suspend,
+ .runtime_resume = s3c_camif_runtime_resume,
+};
+
+static struct platform_driver s3c_camif_driver = {
+ .probe = s3c_camif_probe,
+ .remove = s3c_camif_remove,
+ .id_table = s3c_camif_driver_ids,
+ .driver = {
+ .name = S3C_CAMIF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s3c_camif_pm_ops,
+ }
+};
+
+module_platform_driver(s3c_camif_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
new file mode 100644
index 000000000000..261134baa655
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -0,0 +1,393 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_CORE_H_
+#define CAMIF_CORE_H_
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-core.h>
+#include <media/s3c_camif.h>
+
+#define S3C_CAMIF_DRIVER_NAME "s3c-camif"
+#define CAMIF_REQ_BUFS_MIN 3
+#define CAMIF_MAX_OUT_BUFS 4
+#define CAMIF_MAX_PIX_WIDTH 4096
+#define CAMIF_MAX_PIX_HEIGHT 4096
+#define SCALER_MAX_RATIO 64
+#define CAMIF_DEF_WIDTH 640
+#define CAMIF_DEF_HEIGHT 480
+#define CAMIF_STOP_TIMEOUT 1500 /* ms */
+
+#define S3C244X_CAMIF_IP_REV 0x20 /* 2.0 */
+#define S3C2450_CAMIF_IP_REV 0x30 /* 3.0 - not implemented, not tested */
+#define S3C6400_CAMIF_IP_REV 0x31 /* 3.1 - not implemented, not tested */
+#define S3C6410_CAMIF_IP_REV 0x32 /* 3.2 */
+
+/* struct camif_vp::state */
+
+#define ST_VP_PENDING (1 << 0)
+#define ST_VP_RUNNING (1 << 1)
+#define ST_VP_STREAMING (1 << 2)
+#define ST_VP_SENSOR_STREAMING (1 << 3)
+
+#define ST_VP_ABORTING (1 << 4)
+#define ST_VP_OFF (1 << 5)
+#define ST_VP_LASTIRQ (1 << 6)
+
+#define ST_VP_CONFIG (1 << 8)
+
+#define CAMIF_SD_PAD_SINK 0
+#define CAMIF_SD_PAD_SOURCE_C 1
+#define CAMIF_SD_PAD_SOURCE_P 2
+#define CAMIF_SD_PADS_NUM 3
+
+enum img_fmt {
+ IMG_FMT_RGB565 = 0x0010,
+ IMG_FMT_RGB666,
+ IMG_FMT_XRGB8888,
+ IMG_FMT_YCBCR420 = 0x0020,
+ IMG_FMT_YCRCB420,
+ IMG_FMT_YCBCR422P,
+ IMG_FMT_YCBYCR422 = 0x0040,
+ IMG_FMT_YCRYCB422,
+ IMG_FMT_CBYCRY422,
+ IMG_FMT_CRYCBY422,
+};
+
+#define img_fmt_is_rgb(x) ((x) & 0x10)
+#define img_fmt_is_ycbcr(x) ((x) & 0x60)
+
+/* Possible values for struct camif_fmt::flags */
+#define FMT_FL_S3C24XX_CODEC (1 << 0)
+#define FMT_FL_S3C24XX_PREVIEW (1 << 1)
+#define FMT_FL_S3C64XX (1 << 2)
+
+/**
+ * struct camif_fmt - pixel format description
+ * @fourcc: fourcc code for this format, 0 if not applicable
+ * @color: a corresponding enum img_fmt
+ * @colplanes: number of physically contiguous data planes
+ * @flags: indicate for which SoCs revisions this format is valid
+ * @depth: bits per pixel (total)
+ * @ybpp: number of luminance bytes per pixel
+ */
+struct camif_fmt {
+ char *name;
+ u32 fourcc;
+ u32 color;
+ u16 colplanes;
+ u16 flags;
+ u8 depth;
+ u8 ybpp;
+};
+
+/**
+ * struct camif_dma_offset - pixel offset information for DMA
+ * @initial: offset (in pixels) to first pixel
+ * @line: offset (in pixels) from end of line to start of next line
+ */
+struct camif_dma_offset {
+ int initial;
+ int line;
+};
+
+/**
+ * struct camif_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @dma_offset: DMA offset configuration
+ */
+struct camif_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ struct camif_dma_offset dma_offset;
+};
+
+/* CAMIF clocks enumeration */
+enum {
+ CLK_GATE,
+ CLK_CAM,
+ CLK_MAX_NUM,
+};
+
+struct vp_pix_limits {
+ u16 max_out_width;
+ u16 max_sc_out_width;
+ u16 out_width_align;
+ u16 max_height;
+ u8 min_out_width;
+ u16 out_hor_offset_align;
+};
+
+struct camif_pix_limits {
+ u16 win_hor_offset_align;
+};
+
+/**
+ * struct s3c_camif_variant - CAMIF variant structure
+ * @vp_pix_limits: pixel limits for the codec and preview paths
+ * @camif_pix_limits: pixel limits for the camera input interface
+ * @ip_revision: the CAMIF IP revision: 0x20 for s3c244x, 0x32 for s3c6410
+ */
+struct s3c_camif_variant {
+ struct vp_pix_limits vp_pix_limits[2];
+ struct camif_pix_limits pix_limits;
+ u8 ip_revision;
+ u8 has_img_effect;
+ unsigned int vp_offset;
+};
+
+struct s3c_camif_drvdata {
+ const struct s3c_camif_variant *variant;
+ unsigned long bus_clk_freq;
+};
+
+struct camif_scaler {
+ u8 scaleup_h;
+ u8 scaleup_v;
+ u8 copy;
+ u8 enable;
+ u32 h_shift;
+ u32 v_shift;
+ u32 pre_h_ratio;
+ u32 pre_v_ratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_h_ratio;
+ u32 main_v_ratio;
+};
+
+struct camif_dev;
+
+/**
+ * struct camif_vp - CAMIF data processing path structure (codec/preview)
+ * @irq_queue: interrupt handling waitqueue
+ * @irq: interrupt number for this data path
+ * @camif: pointer to the camif structure
+ * @pad: media pad for the video node
+ * @vdev video device
+ * @ctrl_handler: video node controls handler
+ * @owner: file handle that own the streaming
+ * @pending_buf_q: pending (empty) buffers queue head
+ * @active_buf_q: active (being written) buffers queue head
+ * @active_buffers: counter of buffer set up at the DMA engine
+ * @buf_index: identifier of a last empty buffer set up in H/W
+ * @frame_sequence: image frame sequence counter
+ * @reqbufs_count: the number of buffers requested
+ * @scaler: the scaler structure
+ * @out_fmt: pixel format at this video path output
+ * @payload: the output data frame payload size
+ * @out_frame: the output pixel resolution
+ * @state: the video path's state
+ * @fmt_flags: flags determining supported pixel formats
+ * @id: CAMIF id, 0 - codec, 1 - preview
+ * @rotation: current image rotation value
+ * @hflip: apply horizontal flip if set
+ * @vflip: apply vertical flip if set
+ */
+struct camif_vp {
+ wait_queue_head_t irq_queue;
+ int irq;
+ struct camif_dev *camif;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_fh *owner;
+ struct vb2_queue vb_queue;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ unsigned int active_buffers;
+ unsigned int buf_index;
+ unsigned int frame_sequence;
+ unsigned int reqbufs_count;
+ struct camif_scaler scaler;
+ const struct camif_fmt *out_fmt;
+ unsigned int payload;
+ struct camif_frame out_frame;
+ unsigned int state;
+ u16 fmt_flags;
+ u8 id;
+ u8 rotation;
+ u8 hflip;
+ u8 vflip;
+ unsigned int offset;
+};
+
+/* Video processing path enumeration */
+#define VP_CODEC 0
+#define VP_PREVIEW 1
+#define CAMIF_VP_NUM 2
+
+/**
+ * struct camif_dev - the CAMIF driver private data structure
+ * @media_dev: top-level media device structure
+ * @v4l2_dev: root v4l2_device
+ * @subdev: camera interface ("catchcam") subdev
+ * @mbus_fmt: camera input media bus format
+ * @camif_crop: camera input interface crop rectangle
+ * @pads: the camif subdev's media pads
+ * @stream_count: the camera interface streaming reference counter
+ * @sensor: image sensor data structure
+ * @m_pipeline: video entity pipeline description
+ * @ctrl_handler: v4l2 control handler (owned by @subdev)
+ * @test_pattern: test pattern controls
+ * @vp: video path (DMA) description (codec/preview)
+ * @alloc_ctx: memory buffer allocator context
+ * @variant: variant information for this device
+ * @dev: pointer to the CAMIF device struct
+ * @pdata: a copy of the driver's platform data
+ * @clock: clocks required for the CAMIF operation
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting CAMIF registers
+ * @io_base: start address of the mmaped CAMIF registers
+ */
+struct camif_dev {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_rect camif_crop;
+ struct media_pad pads[CAMIF_SD_PADS_NUM];
+ int stream_count;
+
+ struct cam_sensor {
+ struct v4l2_subdev *sd;
+ short power_count;
+ short stream_count;
+ } sensor;
+ struct media_pipeline *m_pipeline;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_test_pattern;
+ struct {
+ struct v4l2_ctrl *ctrl_colorfx;
+ struct v4l2_ctrl *ctrl_colorfx_cbcr;
+ };
+ u8 test_pattern;
+ u8 colorfx;
+ u8 colorfx_cb;
+ u8 colorfx_cr;
+
+ struct camif_vp vp[CAMIF_VP_NUM];
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ const struct s3c_camif_variant *variant;
+ struct device *dev;
+ struct s3c_camif_plat_data pdata;
+ struct clk *clock[CLK_MAX_NUM];
+ struct mutex lock;
+ spinlock_t slock;
+ void __iomem *io_base;
+};
+
+/**
+ * struct camif_addr - Y/Cb/Cr DMA start address structure
+ * @y: luminance plane dma address
+ * @cb: Cb plane dma address
+ * @cr: Cr plane dma address
+ */
+struct camif_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/**
+ * struct camif_buffer - the camif video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA start addresses
+ * @index: an identifier of this buffer at the DMA engine
+ */
+struct camif_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ struct camif_addr paddr;
+ unsigned int index;
+};
+
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat, int index);
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx);
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx);
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv);
+int s3c_camif_create_subdev(struct camif_dev *camif);
+void s3c_camif_unregister_subdev(struct camif_dev *camif);
+int s3c_camif_set_defaults(struct camif_dev *camif);
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler);
+
+static inline void camif_active_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->active_buf_q);
+ vp->active_buffers++;
+}
+
+static inline struct camif_buffer *camif_active_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->active_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+}
+
+static inline struct camif_buffer *camif_active_queue_peek(
+ struct camif_vp *vp, int index)
+{
+ struct camif_buffer *tmp, *buf;
+
+ if (WARN_ON(list_empty(&vp->active_buf_q)))
+ return NULL;
+
+ list_for_each_entry_safe(buf, tmp, &vp->active_buf_q, list) {
+ if (buf->index == index) {
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+ }
+ }
+
+ return NULL;
+}
+
+static inline void camif_pending_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->pending_buf_q);
+}
+
+static inline struct camif_buffer *camif_pending_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->pending_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* CAMIF_CORE_H_ */
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
new file mode 100644
index 000000000000..1a3b4fc05ec6
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -0,0 +1,606 @@
+/*
+ * Samsung s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include "camif-regs.h"
+
+#define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off))
+#define camif_read(_camif, _off) readl((_camif)->io_base + (_off))
+
+void camif_hw_reset(struct camif_dev *camif)
+{
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg |= CISRCFMT_ITU601_8BIT;
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+
+ /* S/W reset */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_SWRST;
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIGCTRL_IRQ_LEVEL;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_SWRST;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+}
+
+void camif_hw_clear_pending_irq(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_IRQ_CLR(vp->id);
+ camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+/*
+ * Sets video test pattern (off, color bar, horizontal or vertical gradient).
+ * External sensor pixel clock must be active for the test pattern to work.
+ */
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern)
+{
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_TESTPATTERN_MASK;
+ cfg |= (pattern << 27);
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb)
+{
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS },
+ { V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE },
+ { V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE },
+ { V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING },
+ { V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE },
+ { V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY },
+ };
+ unsigned int i, cfg;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++)
+ if (colorfx[i].id == effect)
+ break;
+
+ if (i == ARRAY_SIZE(colorfx))
+ return;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset));
+ /* Set effect */
+ cfg &= ~CIIMGEFF_FIN_MASK;
+ cfg |= colorfx[i].value;
+ /* Set both paths */
+ if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) {
+ if (effect == V4L2_COLORFX_NONE)
+ cfg &= ~CIIMGEFF_IE_ENABLE_MASK;
+ else
+ cfg |= CIIMGEFF_IE_ENABLE_MASK;
+ }
+ cfg &= ~CIIMGEFF_PAT_CBCR_MASK;
+ cfg |= cr | (cb << 13);
+ camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg);
+}
+
+static const u32 src_pixfmt_map[8][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
+};
+
+/* Set camera input pixel format and resolution */
+void camif_hw_set_source_format(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (i-- >= 0) {
+ if (src_pixfmt_map[i][0] == mf->code)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != mf->code) {
+ dev_err(camif->dev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK);
+ cfg |= (mf->width << 16) | mf->height;
+ cfg |= src_pixfmt_map[i][1];
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void camif_hw_set_camera_crop(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ /* Note: s3c244x requirement: left = f_width - rect.width / 2 */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN);
+ cfg |= (crop->left << 16) | crop->top;
+ if (crop->left != 0 || crop->top != 0)
+ cfg |= CIWDOFST_WINOFSEN;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ hoff2 = mf->width - crop->width - crop->left;
+ voff2 = mf->height - crop->height - crop->top;
+ cfg = (hoff2 << 16) | voff2;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg);
+ }
+}
+
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ if (vp->id == 0)
+ cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB |
+ CIWDOFST_CLROVCOFICR);
+ else
+ cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB |
+ CIWDOFST_CLROVPRFICR);
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+}
+
+/* Set video bus signals polarity */
+void camif_hw_set_camera_bus(struct camif_dev *camif)
+{
+ unsigned int flags = camif->pdata.sensor.flags;
+
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+
+ cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC |
+ CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= CIGCTRL_INVPOLVSYNC;
+ /*
+ * HREF is normally high during frame active data
+ * transmission and low during horizontal synchronization
+ * period. Thus HREF active high means HSYNC active low.
+ */
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CIGCTRL_INVPOLFIELD;
+ cfg |= CIGCTRL_FIELDMODE;
+ }
+
+ pr_debug("Setting CIGCTRL to: %#x\n", cfg);
+
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_output_addr(struct camif_vp *vp,
+ struct camif_addr *paddr, int i)
+{
+ struct camif_dev *camif = vp->camif;
+
+ camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y);
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV
+ || vp->id == VP_CODEC) {
+ camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i),
+ paddr->cb);
+ camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i),
+ paddr->cr);
+ }
+
+ pr_debug("dst_buf[%d]: %#X, cb: %#X, cr: %#X\n",
+ i, paddr->y, paddr->cb, paddr->cr);
+}
+
+static void camif_hw_set_out_dma_size(struct camif_vp *vp)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst)
+{
+ unsigned int nwords = width * ybpp / 4;
+ unsigned int div, rem;
+
+ if (WARN_ON(width < 8 || (width * ybpp) & 7))
+ return;
+
+ for (div = 16; div >= 2; div /= 2) {
+ if (nwords < div)
+ continue;
+
+ rem = nwords & (div - 1);
+ if (rem == 0) {
+ *mburst = div;
+ *rburst = div;
+ break;
+ }
+ if (rem == div / 2 || rem == div / 4) {
+ *mburst = div;
+ *rburst = rem;
+ break;
+ }
+ }
+}
+
+void camif_hw_set_output_dma(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int ymburst = 0, yrburst = 0;
+ u32 cfg;
+
+ camif_hw_set_out_dma_size(vp);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ struct camif_dma_offset *offset = &frame->dma_offset;
+ /* Set the input dma offsets. */
+ cfg = S3C_CISS_OFFS_INITIAL(offset->initial);
+ cfg |= S3C_CISS_OFFS_LINE(offset->line);
+ camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg);
+ }
+
+ /* Configure DMA burst values */
+ camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset));
+ cfg &= ~CICTRL_BURST_MASK;
+
+ cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst);
+ cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2);
+
+ camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst);
+}
+
+void camif_hw_set_input_path(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id));
+ cfg &= ~MSCTRL_SEL_DMA_CAM;
+ camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg);
+}
+
+void camif_hw_set_target_format(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width,
+ frame->f_height, vp->out_fmt->color);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ /* We currently support only YCbCr 4:2:2 at the camera input */
+ cfg |= CITRGFMT_IN422;
+ cfg &= ~CITRGFMT_OUT422;
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ cfg |= CITRGFMT_OUT422;
+ } else {
+ cfg &= ~CITRGFMT_OUTFORMAT_MASK;
+ switch (vp->out_fmt->color) {
+ case IMG_FMT_RGB565...IMG_FMT_XRGB8888:
+ cfg |= CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ case IMG_FMT_YCBCR422P:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422I;
+ break;
+ }
+ }
+
+ /* Rotation is only supported by s3c64xx */
+ if (vp->rotation == 90 || vp->rotation == 270)
+ cfg |= (frame->f_height << 16) | frame->f_width;
+ else
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+
+ /* Target area, output pixel width * height */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset));
+ cfg &= ~CITAREA_MASK;
+ cfg |= (frame->f_width * frame->f_height);
+ camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg);
+}
+
+void camif_hw_set_flip(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif,
+ S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+
+ cfg &= ~CITRGFMT_FLIP_MASK;
+
+ if (vp->hflip)
+ cfg |= CITRGFMT_FLIP_Y_MIRROR;
+ if (vp->vflip)
+ cfg |= CITRGFMT_FLIP_X_MIRROR;
+
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_hw_set_prescaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *sc = &vp->scaler;
+ u32 cfg, shfactor, addr;
+
+ addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset);
+
+ shfactor = 10 - (sc->h_shift + sc->v_shift);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio;
+ camif_write(camif, addr, cfg);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg);
+}
+
+void camif_s3c244x_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS |
+ CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT);
+
+ if (scaler->enable) {
+ if (scaler->scaleup_h) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_H;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_H;
+ }
+ if (scaler->scaleup_v) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_V;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_V;
+ }
+ } else {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALERBYPASS;
+ }
+
+ cfg |= ((scaler->main_h_ratio & 0x1ff) << 16);
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ if (vp->id == VP_PREVIEW) {
+ if (color == IMG_FMT_XRGB8888)
+ cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT;
+ cfg |= CIPRSCCTRL_SAMPLE;
+ }
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE
+ | CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V
+ | CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE
+ | CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK
+ | CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION
+ | CISCCTRL_MAIN_RATIO_MASK);
+
+ cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE);
+
+ if (!scaler->enable) {
+ cfg |= CISCCTRL_SCALERBYPASS;
+ } else {
+ if (scaler->scaleup_h)
+ cfg |= CISCCTRL_SCALEUP_H;
+ if (scaler->scaleup_v)
+ cfg |= CISCCTRL_SCALEUP_V;
+ if (scaler->copy)
+ cfg |= CISCCTRL_ONE2ONE;
+ }
+
+ switch (color) {
+ case IMG_FMT_RGB666:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB666;
+ break;
+ case IMG_FMT_XRGB8888:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB888;
+ break;
+ }
+
+ cfg |= (scaler->main_h_ratio & 0x1ff) << 16;
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_hw_set_scaler(struct camif_vp *vp)
+{
+ unsigned int ip_rev = vp->camif->variant->ip_revision;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_s3c244x_hw_set_scaler(vp);
+ else
+ camif_s3c64xx_hw_set_scaler(vp);
+}
+
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on)
+{
+ u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (on)
+ cfg |= CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~CISCCTRL_SCALERSTART;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable)
+{
+ u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (enable)
+ cfg |= CICTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~CICTRL_LASTIRQ_ENABLE;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_enable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ camif->stream_count++;
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id);
+
+ if (vp->scaler.enable)
+ cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (camif->stream_count == 1)
+ cfg |= CIIMGCPT_IMGCPTEN;
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+}
+
+void camif_hw_disable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (WARN_ON(--(camif->stream_count) < 0))
+ camif->stream_count = 0;
+
+ if (camif->stream_count == 0)
+ cfg &= ~CIIMGCPT_IMGCPTEN;
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+}
+
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" },
+ { S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" },
+ { S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" },
+ { S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" },
+ { S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" },
+ { S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" },
+ { S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" },
+ { S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" },
+ { S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" },
+ { S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" },
+ { S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" },
+ { S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" },
+ { S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" },
+ { S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" },
+ { S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" },
+ { S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" },
+ { S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" },
+ { S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" },
+ { S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" },
+ { S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" },
+ { S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" },
+ { S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" },
+ { S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" },
+ { S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" },
+ { S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" },
+ { S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" },
+ { S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" },
+ { S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" },
+ { S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" },
+ { S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" },
+ { S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" },
+ { S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" },
+ { S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(camif->io_base + registers[i].offset);
+ printk(KERN_INFO "%s:\t0x%08x\n", registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h
new file mode 100644
index 000000000000..af2d472ea1dd
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.h
@@ -0,0 +1,269 @@
+/*
+ * Register definition file for s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_REGS_H_
+#define CAMIF_REGS_H_
+
+#include "camif-core.h"
+#include <media/s3c_camif.h>
+
+/*
+ * The id argument indicates the processing path:
+ * id = 0 - codec (FIMC C), 1 - preview (FIMC P).
+ */
+
+/* Camera input format */
+#define S3C_CAMIF_REG_CISRCFMT 0x00
+#define CISRCFMT_ITU601_8BIT (1 << 31)
+#define CISRCFMT_ITU656_8BIT (0 << 31)
+#define CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define CISRCFMT_ORDER422_CRYCBY (3 << 14)
+#define CISRCFMT_ORDER422_MASK (3 << 14)
+#define CISRCFMT_SIZE_CAM_MASK (0x1fff << 16 | 0x1fff)
+
+/* Window offset */
+#define S3C_CAMIF_REG_CIWDOFST 0x04
+#define CIWDOFST_WINOFSEN (1 << 31)
+#define CIWDOFST_CLROVCOFIY (1 << 30)
+#define CIWDOFST_CLROVRLB_PR (1 << 28)
+/* #define CIWDOFST_CLROVPRFIY (1 << 27) */
+#define CIWDOFST_CLROVCOFICB (1 << 15)
+#define CIWDOFST_CLROVCOFICR (1 << 14)
+#define CIWDOFST_CLROVPRFICB (1 << 13)
+#define CIWDOFST_CLROVPRFICR (1 << 12)
+#define CIWDOFST_OFST_MASK (0x7ff << 16 | 0x7ff)
+
+/* Window offset 2 */
+#define S3C_CAMIF_REG_CIWDOFST2 0x14
+#define CIWDOFST2_OFST2_MASK (0xfff << 16 | 0xfff)
+
+/* Global control */
+#define S3C_CAMIF_REG_CIGCTRL 0x08
+#define CIGCTRL_SWRST (1 << 31)
+#define CIGCTRL_CAMRST (1 << 30)
+#define CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define CIGCTRL_INVPOLPCLK (1 << 26)
+#define CIGCTRL_INVPOLVSYNC (1 << 25)
+#define CIGCTRL_INVPOLHREF (1 << 24)
+#define CIGCTRL_IRQ_OVFEN (1 << 22)
+#define CIGCTRL_HREF_MASK (1 << 21)
+#define CIGCTRL_IRQ_LEVEL (1 << 20)
+/* IRQ_CLR_C, IRQ_CLR_P */
+#define CIGCTRL_IRQ_CLR(id) (1 << (19 - (id)))
+#define CIGCTRL_FIELDMODE (1 << 2)
+#define CIGCTRL_INVPOLFIELD (1 << 1)
+#define CIGCTRL_CAM_INTERLACE (1 << 0)
+
+/* Y DMA output frame start address. n = 0..3. */
+#define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4)
+/* Cb plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICBSA(id, n) (0x28 + (id) * 0x54 + (n) * 4)
+/* Cr plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICRSA(id, n) (0x38 + (id) * 0x54 + (n) * 4)
+
+/* CICOTRGFMT, CIPRTRGFMT - Target format */
+#define S3C_CAMIF_REG_CITRGFMT(id, _offs) (0x48 + (id) * (0x34 + (_offs)))
+#define CITRGFMT_IN422 (1 << 31) /* only for s3c24xx */
+#define CITRGFMT_OUT422 (1 << 30) /* only for s3c24xx */
+#define CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422I (2 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_RGB (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_MASK (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_TARGETHSIZE(x) ((x) << 16)
+#define CITRGFMT_FLIP_NORMAL (0 << 14)
+#define CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define CITRGFMT_FLIP_180 (3 << 14)
+#define CITRGFMT_FLIP_MASK (3 << 14)
+/* Preview path only */
+#define CITRGFMT_ROT90_PR (1 << 13)
+#define CITRGFMT_TARGETVSIZE(x) ((x) << 0)
+#define CITRGFMT_TARGETSIZE_MASK ((0x1fff << 16) | 0x1fff)
+
+/* CICOCTRL, CIPRCTRL. Output DMA control. */
+#define S3C_CAMIF_REG_CICTRL(id, _offs) (0x4c + (id) * (0x34 + (_offs)))
+#define CICTRL_BURST_MASK (0xfffff << 4)
+/* xBURSTn - 5-bits width */
+#define CICTRL_YBURST1(x) ((x) << 19)
+#define CICTRL_YBURST2(x) ((x) << 14)
+#define CICTRL_RGBBURST1(x) ((x) << 19)
+#define CICTRL_RGBBURST2(x) ((x) << 14)
+#define CICTRL_CBURST1(x) ((x) << 9)
+#define CICTRL_CBURST2(x) ((x) << 4)
+#define CICTRL_LASTIRQ_ENABLE (1 << 2)
+#define CICTRL_ORDER422_MASK (3 << 0)
+
+/* CICOSCPRERATIO, CIPRSCPRERATIO. Pre-scaler control 1. */
+#define S3C_CAMIF_REG_CISCPRERATIO(id, _offs) (0x50 + (id) * (0x34 + (_offs)))
+
+/* CICOSCPREDST, CIPRSCPREDST. Pre-scaler control 2. */
+#define S3C_CAMIF_REG_CISCPREDST(id, _offs) (0x54 + (id) * (0x34 + (_offs)))
+
+/* CICOSCCTRL, CIPRSCCTRL. Main scaler control. */
+#define S3C_CAMIF_REG_CISCCTRL(id, _offs) (0x58 + (id) * (0x34 + (_offs)))
+#define CISCCTRL_SCALERBYPASS (1 << 31)
+/* s3c244x preview path only, s3c64xx both */
+#define CIPRSCCTRL_SAMPLE (1 << 31)
+/* 0 - 16-bit RGB, 1 - 24-bit RGB */
+#define CIPRSCCTRL_RGB_FORMAT_24BIT (1 << 30) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_H (1 << 29) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_V (1 << 28) /* only for s3c244x */
+/* s3c64xx */
+#define CISCCTRL_SCALEUP_H (1 << 30)
+#define CISCCTRL_SCALEUP_V (1 << 29)
+#define CISCCTRL_SCALEUP_MASK (0x3 << 29)
+#define CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define CISCCTRL_INTERLACE (1 << 25)
+#define CISCCTRL_SCALERSTART (1 << 15)
+#define CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define CISCCTRL_ONE2ONE (1 << 9)
+#define CISCCTRL_MAIN_RATIO_MASK (0x1ff << 16 | 0x1ff)
+
+/* CICOTAREA, CIPRTAREA. Target area for DMA (Hsize x Vsize). */
+#define S3C_CAMIF_REG_CITAREA(id, _offs) (0x5c + (id) * (0x34 + (_offs)))
+#define CITAREA_MASK 0xfffffff
+
+/* Codec (id = 0) or preview (id = 1) path status. */
+#define S3C_CAMIF_REG_CISTATUS(id, _offs) (0x64 + (id) * (0x34 + (_offs)))
+#define CISTATUS_OVFIY_STATUS (1 << 31)
+#define CISTATUS_OVFICB_STATUS (1 << 30)
+#define CISTATUS_OVFICR_STATUS (1 << 29)
+#define CISTATUS_OVF_MASK (0x7 << 29)
+#define CIPRSTATUS_OVF_MASK (0x3 << 30)
+#define CISTATUS_VSYNC_STATUS (1 << 28)
+#define CISTATUS_FRAMECNT_MASK (3 << 26)
+#define CISTATUS_FRAMECNT(__reg) (((__reg) >> 26) & 0x3)
+#define CISTATUS_WINOFSTEN_STATUS (1 << 25)
+#define CISTATUS_IMGCPTEN_STATUS (1 << 22)
+#define CISTATUS_IMGCPTENSC_STATUS (1 << 21)
+#define CISTATUS_VSYNC_A_STATUS (1 << 20)
+#define CISTATUS_FRAMEEND_STATUS (1 << 19) /* 17 on s3c64xx */
+
+/* Image capture enable */
+#define S3C_CAMIF_REG_CIIMGCPT(_offs) (0xa0 + (_offs))
+#define CIIMGCPT_IMGCPTEN (1 << 31)
+#define CIIMGCPT_IMGCPTEN_SC(id) (1 << (30 - (id)))
+/* Frame control: 1 - one-shot, 0 - free run */
+#define CIIMGCPT_CPT_FREN_ENABLE(id) (1 << (25 - (id)))
+#define CIIMGCPT_CPT_FRMOD_ENABLE (0 << 18)
+#define CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Capture sequence */
+#define S3C_CAMIF_REG_CICPTSEQ 0xc4
+
+/* Image effects */
+#define S3C_CAMIF_REG_CIIMGEFF(_offs) (0xb0 + (_offs))
+#define CIIMGEFF_IE_ENABLE(id) (1 << (30 + (id)))
+#define CIIMGEFF_IE_ENABLE_MASK (3 << 30)
+/* Image effect: 1 - after scaler, 0 - before scaler */
+#define CIIMGEFF_IE_AFTER_SC (1 << 29)
+#define CIIMGEFF_FIN_MASK (7 << 26)
+#define CIIMGEFF_FIN_BYPASS (0 << 26)
+#define CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+#define CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define CIIMGEFF_PAT_CR(x) (x)
+
+/* MSCOY0SA, MSPRY0SA. Y/Cb/Cr frame start address for input DMA. */
+#define S3C_CAMIF_REG_MSY0SA(id) (0xd4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0SA(id) (0xd8 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0SA(id) (0xdc + ((id) * 0x2c))
+
+/* MSCOY0END, MSCOY0END. Y/Cb/Cr frame end address for input DMA. */
+#define S3C_CAMIF_REG_MSY0END(id) (0xe0 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0END(id) (0xe4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0END(id) (0xe8 + ((id) * 0x2c))
+
+/* MSPRYOFF, MSPRYOFF. Y/Cb/Cr offset. n: 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSYOFF(id) (0x118 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCBOFF(id) (0x11c + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCROFF(id) (0x120 + ((id) * 0x2c))
+
+/* Real input DMA data size. n = 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSWIDTH(id) (0xf8 + (id) * 0x2c)
+#define AUTOLOAD_ENABLE (1 << 31)
+#define ADDR_CH_DIS (1 << 30)
+#define MSHEIGHT(x) (((x) & 0x3ff) << 16)
+#define MSWIDTH(x) ((x) & 0x3ff)
+
+/* Input DMA control. n = 0 - codec, 1 - preview */
+#define S3C_CAMIF_REG_MSCTRL(id) (0xfc + (id) * 0x2c)
+#define MSCTRL_ORDER422_M_YCBYCR (0 << 4)
+#define MSCTRL_ORDER422_M_YCRYCB (1 << 4)
+#define MSCTRL_ORDER422_M_CBYCRY (2 << 4)
+#define MSCTRL_ORDER422_M_CRYCBY (3 << 4)
+/* 0 - camera, 1 - DMA */
+#define MSCTRL_SEL_DMA_CAM (1 << 3)
+#define MSCTRL_INFORMAT_M_YCBCR420 (0 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422 (1 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422I (2 << 1)
+#define MSCTRL_INFORMAT_M_RGB (3 << 1)
+#define MSCTRL_ENVID_M (1 << 0)
+
+/* CICOSCOSY, CIPRSCOSY. Scan line Y/Cb/Cr offset. */
+#define S3C_CAMIF_REG_CISSY(id) (0x12c + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCB(id) (0x130 + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCR(id) (0x134 + (id) * 0x0c)
+#define S3C_CISS_OFFS_INITIAL(x) ((x) << 16)
+#define S3C_CISS_OFFS_LINE(x) ((x) << 0)
+
+/* ------------------------------------------------------------------ */
+
+void camif_hw_reset(struct camif_dev *camif);
+void camif_hw_clear_pending_irq(struct camif_vp *vp);
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp);
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable);
+void camif_hw_set_input_path(struct camif_vp *vp);
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on);
+void camif_hw_enable_capture(struct camif_vp *vp);
+void camif_hw_disable_capture(struct camif_vp *vp);
+void camif_hw_set_camera_bus(struct camif_dev *camif);
+void camif_hw_set_source_format(struct camif_dev *camif);
+void camif_hw_set_camera_crop(struct camif_dev *camif);
+void camif_hw_set_scaler(struct camif_vp *vp);
+void camif_hw_set_flip(struct camif_vp *vp);
+void camif_hw_set_output_dma(struct camif_vp *vp);
+void camif_hw_set_target_format(struct camif_vp *vp);
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern);
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb);
+void camif_hw_set_output_addr(struct camif_vp *vp, struct camif_addr *paddr,
+ int index);
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label);
+
+static inline u32 camif_hw_get_status(struct camif_vp *vp)
+{
+ return readl(vp->camif->io_base + S3C_CAMIF_REG_CISTATUS(vp->id,
+ vp->offset));
+}
+
+#endif /* CAMIF_REGS_H_ */
diff --git a/drivers/media/platform/s5p-fimc/Kconfig b/drivers/media/platform/s5p-fimc/Kconfig
index c16b20d86ed2..f997a5203b7c 100644
--- a/drivers/media/platform/s5p-fimc/Kconfig
+++ b/drivers/media/platform/s5p-fimc/Kconfig
@@ -2,7 +2,6 @@
config VIDEO_SAMSUNG_S5P_FIMC
bool "Samsung S5P/EXYNOS SoC camera interface driver (experimental)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PLAT_S5P && PM_RUNTIME
- depends on EXPERIMENTAL
help
Say Y here to enable camera host interface devices for
Samsung S5P and EXYNOS SoC series.
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index 891ee873c62b..fdb6740248a7 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -1230,6 +1230,14 @@ static int fimc_cap_qbuf(struct file *file, void *priv,
return vb2_qbuf(&fimc->vid_cap.vbq, buf);
}
+static int fimc_cap_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return vb2_expbuf(&fimc->vid_cap.vbq, eb);
+}
+
static int fimc_cap_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
@@ -1354,6 +1362,7 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_qbuf = fimc_cap_qbuf,
.vidioc_dqbuf = fimc_cap_dqbuf,
+ .vidioc_expbuf = fimc_cap_expbuf,
.vidioc_prepare_buf = fimc_cap_prepare_buf,
.vidioc_create_bufs = fimc_cap_create_bufs,
@@ -1729,7 +1738,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
q = &fimc->vid_cap.vbq;
memset(q, 0, sizeof(*q));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = fimc->vid_cap.ctx;
q->ops = &fimc_capture_qops;
q->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index 8d0d2b94a135..acc0f84ffa56 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -909,11 +909,9 @@ static int fimc_probe(struct platform_device *pdev)
mutex_init(&fimc->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (fimc->regs == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENOENT;
- }
+ fimc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
@@ -1035,7 +1033,7 @@ static int fimc_suspend(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static int __devexit fimc_remove(struct platform_device *pdev)
+static int fimc_remove(struct platform_device *pdev)
{
struct fimc_dev *fimc = platform_get_drvdata(pdev);
@@ -1234,7 +1232,7 @@ static const struct dev_pm_ops fimc_pm_ops = {
static struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = __devexit_p(fimc_remove),
+ .remove = fimc_remove,
.id_table = fimc_driver_ids,
.driver = {
.name = FIMC_MODULE_NAME,
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index 1b309a72f09f..67db9f8102e4 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -1406,7 +1406,7 @@ static int fimc_lite_clk_get(struct fimc_lite *fimc)
return ret;
}
-static int __devinit fimc_lite_probe(struct platform_device *pdev)
+static int fimc_lite_probe(struct platform_device *pdev)
{
struct flite_drvdata *drv_data = fimc_lite_get_drvdata(pdev);
struct fimc_lite *fimc;
@@ -1426,11 +1426,9 @@ static int __devinit fimc_lite_probe(struct platform_device *pdev)
mutex_init(&fimc->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (fimc->regs == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENOENT;
- }
+ fimc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fimc->regs))
+ return PTR_ERR(fimc->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
@@ -1547,7 +1545,7 @@ static int fimc_lite_suspend(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static int __devexit fimc_lite_remove(struct platform_device *pdev)
+static int fimc_lite_remove(struct platform_device *pdev)
{
struct fimc_lite *fimc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -1595,7 +1593,7 @@ static const struct dev_pm_ops fimc_lite_pm_ops = {
static struct platform_driver fimc_lite_driver = {
.probe = fimc_lite_probe,
- .remove = __devexit_p(fimc_lite_remove),
+ .remove = fimc_lite_remove,
.id_table = fimc_lite_driver_ids,
.driver = {
.name = FIMC_LITE_DRV_NAME,
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 62afed3162ea..1d21da4bd24b 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -105,7 +105,7 @@ static void fimc_device_run(void *priv)
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
unsigned long flags;
- u32 ret;
+ int ret;
if (WARN(!ctx, "Null context\n"))
return;
@@ -439,6 +439,15 @@ static int fimc_m2m_dqbuf(struct file *file, void *fh,
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
+static int fimc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+
static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
@@ -607,6 +616,7 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querybuf = fimc_m2m_querybuf,
.vidioc_qbuf = fimc_m2m_qbuf,
.vidioc_dqbuf = fimc_m2m_dqbuf,
+ .vidioc_expbuf = fimc_m2m_expbuf,
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
@@ -622,7 +632,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
@@ -633,7 +643,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index 0531ab70a94c..b4a68ecf0ca7 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -213,7 +213,7 @@ static int fimc_pipeline_close(struct fimc_pipeline *p)
* @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
int i, ret;
@@ -547,7 +547,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (ret)
break;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
if (flags == 0 || sensor == NULL)
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
{
struct media_entity *source, *sink;
unsigned int flags = MEDIA_LNK_FL_ENABLED;
- int i, ret;
+ int i, ret = 0;
for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
struct fimc_lite *fimc = fmd->fimc_lite[i];
@@ -1000,7 +1000,7 @@ err_md:
return ret;
}
-static int __devexit fimc_md_remove(struct platform_device *pdev)
+static int fimc_md_remove(struct platform_device *pdev)
{
struct fimc_md *fmd = platform_get_drvdata(pdev);
@@ -1015,7 +1015,7 @@ static int __devexit fimc_md_remove(struct platform_device *pdev)
static struct platform_driver fimc_md_driver = {
.probe = fimc_md_probe,
- .remove = __devexit_p(fimc_md_remove),
+ .remove = fimc_md_remove,
.driver = {
.name = "s5p-fimc-md",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
index 4c961b1b68e6..7abae012f55e 100644
--- a/drivers/media/platform/s5p-fimc/mipi-csis.c
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -654,7 +654,7 @@ static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit s5pcsis_probe(struct platform_device *pdev)
+static int s5pcsis_probe(struct platform_device *pdev)
{
struct s5p_platform_mipi_csis *pdata;
struct resource *mem_res;
@@ -686,11 +686,9 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (state->regs == NULL) {
- dev_err(&pdev->dev, "Failed to request and remap io memory\n");
- return -ENXIO;
- }
+ state->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
state->irq = platform_get_irq(pdev, 0);
if (state->irq < 0) {
@@ -851,7 +849,7 @@ static int s5pcsis_runtime_resume(struct device *dev)
}
#endif
-static int __devexit s5pcsis_remove(struct platform_device *pdev)
+static int s5pcsis_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
@@ -876,7 +874,7 @@ static const struct dev_pm_ops s5pcsis_pm_ops = {
static struct platform_driver s5pcsis_driver = {
.probe = s5pcsis_probe,
- .remove = __devexit_p(s5pcsis_remove),
+ .remove = s5pcsis_remove,
.driver = {
.name = CSIS_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 1bfbc325836b..6ed259fb1046 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -708,11 +708,9 @@ static int g2d_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (dev->regs == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENOENT;
- }
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
if (IS_ERR_OR_NULL(dev->clk)) {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 17983c4c9a9a..3b023752bcb4 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1325,11 +1325,9 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
/* memory-mapped registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (jpeg->regs == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENOENT;
- }
+ jpeg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpeg->regs))
+ return PTR_ERR(jpeg->regs);
/* interrupt service routine registration */
jpeg->irq = ret = platform_get_irq(pdev, 0);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 3afe879d54d7..8b7fbc7cc04d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -412,62 +412,48 @@ leave_handle_frame:
}
/* Error handling for interrupt */
-static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
- unsigned int reason, unsigned int err)
+static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
{
- struct s5p_mfc_dev *dev;
unsigned long flags;
- /* If no context is available then all necessary
- * processing has been done. */
- if (ctx == NULL)
- return;
-
- dev = ctx->dev;
mfc_err("Interrupt Error: %08x\n", err);
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
- wake_up_dev(dev, reason, err);
- /* Error recovery is dependent on the state of context */
- switch (ctx->state) {
- case MFCINST_INIT:
- /* This error had to happen while acquireing instance */
- case MFCINST_GOT_INST:
- /* This error had to happen while parsing the header */
- case MFCINST_HEAD_PARSED:
- /* This error had to happen while setting dst buffers */
- case MFCINST_RETURN_INST:
- /* This error had to happen while releasing instance */
- clear_work_bit(ctx);
- wake_up_ctx(ctx, reason, err);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
- s5p_mfc_clock_off();
- ctx->state = MFCINST_ERROR;
- break;
- case MFCINST_FINISHING:
- case MFCINST_FINISHED:
- case MFCINST_RUNNING:
- /* It is higly probable that an error occured
- * while decoding a frame */
- clear_work_bit(ctx);
- ctx->state = MFCINST_ERROR;
- /* Mark all dst buffers as having an error */
- spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
- &ctx->vq_dst);
- /* Mark all src buffers as having an error */
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
- &ctx->vq_src);
- spin_unlock_irqrestore(&dev->irqlock, flags);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
- s5p_mfc_clock_off();
- break;
- default:
- mfc_err("Encountered an error interrupt which had not been handled\n");
- break;
+ if (ctx != NULL) {
+ /* Error recovery is dependent on the state of context */
+ switch (ctx->state) {
+ case MFCINST_RES_CHANGE_INIT:
+ case MFCINST_RES_CHANGE_FLUSH:
+ case MFCINST_RES_CHANGE_END:
+ case MFCINST_FINISHING:
+ case MFCINST_FINISHED:
+ case MFCINST_RUNNING:
+ /* It is higly probable that an error occured
+ * while decoding a frame */
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ /* Mark all dst buffers as having an error */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ &ctx->dst_queue, &ctx->vq_dst);
+ /* Mark all src buffers as having an error */
+ s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ &ctx->src_queue, &ctx->vq_src);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ wake_up_ctx(ctx, reason, err);
+ break;
+ default:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ wake_up_ctx(ctx, reason, err);
+ break;
+ }
}
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_clock_off();
+ wake_up_dev(dev, reason, err);
return;
}
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
dev->warn_start)
s5p_mfc_handle_frame(ctx, reason, err);
else
- s5p_mfc_handle_error(ctx, reason, err);
+ s5p_mfc_handle_error(dev, ctx, reason, err);
clear_bit(0, &dev->enter_suspend);
break;
@@ -1061,11 +1047,9 @@ static int s5p_mfc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_request_and_ioremap(&pdev->dev, res);
- if (dev->regs_base == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENOENT;
- }
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
@@ -1203,7 +1187,7 @@ err_res:
}
/* Remove the driver */
-static int __devexit s5p_mfc_remove(struct platform_device *pdev)
+static int s5p_mfc_remove(struct platform_device *pdev)
{
struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
@@ -1368,7 +1352,7 @@ MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
- .remove = __devexit_p(s5p_mfc_remove),
+ .remove = s5p_mfc_remove,
.id_table = mfc_driver_ids,
.driver = {
.name = S5P_MFC_NAME,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index eb6a70b0f821..6dad9a74f61c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -636,6 +636,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EINVAL;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -813,6 +826,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_crop = vidioc_g_crop,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2af6d522f4ac..f92f6ddd739f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1165,6 +1165,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return ret;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -1542,7 +1555,7 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
}
static int vidioc_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
+ const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
@@ -1568,6 +1581,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_s_parm = vidioc_s_parm,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 367db7552289..2895333866fc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -28,7 +28,7 @@ static struct s5p_mfc_pm *pm;
static struct s5p_mfc_dev *p_dev;
#ifdef CLK_DEBUG
-atomic_t clk_ref;
+static atomic_t clk_ref;
#endif
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index ea11a513033f..7b659bd09bfd 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -7,9 +7,8 @@
# Licensed under GPL
config VIDEO_SAMSUNG_S5P_TV
- bool "Samsung TV driver for S5P platform (experimental)"
+ bool "Samsung TV driver for S5P platform"
depends on PLAT_S5P && PM_RUNTIME
- depends on EXPERIMENTAL
default n
---help---
Say Y here to enable selecting the TV output devices for
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 8a9cf43018f6..7c1116c73bf3 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -830,7 +830,7 @@ fail:
return -ENODEV;
}
-static int __devinit hdmi_probe(struct platform_device *pdev)
+static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -979,7 +979,7 @@ fail:
return ret;
}
-static int __devexit hdmi_remove(struct platform_device *pdev)
+static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_subdev *sd = dev_get_drvdata(dev);
@@ -997,7 +997,7 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
static struct platform_driver hdmi_driver __refdata = {
.probe = hdmi_probe,
- .remove = __devexit_p(hdmi_remove),
+ .remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "s5p-hdmi",
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index f67b38631801..06b5d2dbb2d9 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -279,8 +279,8 @@ static const struct v4l2_subdev_ops hdmiphy_ops = {
.video = &hdmiphy_video_ops,
};
-static int __devinit hdmiphy_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct hdmiphy_ctx *ctx;
@@ -295,7 +295,7 @@ static int __devinit hdmiphy_probe(struct i2c_client *client,
return 0;
}
-static int __devexit hdmiphy_remove(struct i2c_client *client)
+static int hdmiphy_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
@@ -322,7 +322,7 @@ static struct i2c_driver hdmiphy_driver = {
.owner = THIS_MODULE,
},
.probe = hdmiphy_probe,
- .remove = __devexit_p(hdmiphy_remove),
+ .remove = hdmiphy_remove,
.id_table = hdmiphy_id,
};
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index ddb422e23550..b671e20e9318 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -290,7 +290,7 @@ static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
struct mxr_platform_data;
/** acquiring common video resources */
-int __devinit mxr_acquire_video(struct mxr_device *mdev,
+int mxr_acquire_video(struct mxr_device *mdev,
struct mxr_output_conf *output_cont, int output_count);
/** releasing common video resources */
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index ca0f29717448..02faea03aa7d 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -151,8 +151,8 @@ void mxr_power_put(struct mxr_device *mdev)
/* --------- RESOURCE MANAGEMENT -------------*/
-static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
+static int mxr_acquire_plat_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
{
struct resource *res;
int ret;
@@ -271,8 +271,8 @@ fail:
return -ENODEV;
}
-static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
+static int mxr_acquire_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
{
int ret;
ret = mxr_acquire_plat_resources(mdev, pdev);
@@ -310,8 +310,8 @@ static void mxr_release_layers(struct mxr_device *mdev)
mxr_layer_release(mdev->layer[i]);
}
-static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
- struct mxr_platform_data *pdata)
+static int mxr_acquire_layers(struct mxr_device *mdev,
+ struct mxr_platform_data *pdata)
{
mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
@@ -372,7 +372,7 @@ static const struct dev_pm_ops mxr_pm_ops = {
/* --------- DRIVER INITIALIZATION ---------- */
-static int __devinit mxr_probe(struct platform_device *pdev)
+static int mxr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxr_platform_data *pdata = dev->platform_data;
@@ -431,7 +431,7 @@ fail:
return ret;
}
-static int __devexit mxr_remove(struct platform_device *pdev)
+static int mxr_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxr_device *mdev = to_mdev(dev);
@@ -450,7 +450,7 @@ static int __devexit mxr_remove(struct platform_device *pdev)
static struct platform_driver mxr_driver __refdata = {
.probe = mxr_probe,
- .remove = __devexit_p(mxr_remove),
+ .remove = mxr_remove,
.driver = {
.name = MXR_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 0c1cd895ff66..1f3b7436511c 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -19,7 +19,6 @@
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -63,8 +62,8 @@ done:
return sd;
}
-int __devinit mxr_acquire_video(struct mxr_device *mdev,
- struct mxr_output_conf *output_conf, int output_count)
+int mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_conf, int output_count)
{
struct device *dev = mdev->dev;
struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
@@ -698,6 +697,15 @@ static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
}
+static int mxr_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_expbuf(&layer->vb_queue, eb);
+}
+
static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct mxr_layer *layer = video_drvdata(file);
@@ -725,6 +733,7 @@ static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
.vidioc_querybuf = mxr_querybuf,
.vidioc_qbuf = mxr_qbuf,
.vidioc_dqbuf = mxr_dqbuf,
+ .vidioc_expbuf = mxr_expbuf,
/* Streaming control */
.vidioc_streamon = mxr_streamon,
.vidioc_streamoff = mxr_streamoff,
@@ -1093,7 +1102,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
layer->vb_queue = (struct vb2_queue) {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- .io_modes = VB2_MMAP | VB2_USERPTR,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
.drv_priv = layer,
.buf_struct_size = sizeof(struct mxr_buffer),
.ops = &mxr_video_qops,
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index ad68bbed014e..91a6939a270a 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -292,7 +292,7 @@ static const struct dev_pm_ops sdo_pm_ops = {
.runtime_resume = sdo_runtime_resume,
};
-static int __devinit sdo_probe(struct platform_device *pdev)
+static int sdo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdo_device *sdev;
@@ -419,7 +419,7 @@ fail:
return ret;
}
-static int __devexit sdo_remove(struct platform_device *pdev)
+static int sdo_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
struct sdo_device *sdev = sd_to_sdev(sd);
@@ -437,7 +437,7 @@ static int __devexit sdo_remove(struct platform_device *pdev)
static struct platform_driver sdo_driver __refdata = {
.probe = sdo_probe,
- .remove = __devexit_p(sdo_remove),
+ .remove = sdo_remove,
.driver = {
.name = "s5p-sdo",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index 716d4846f8bd..49191aac9634 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -315,8 +315,8 @@ static const struct v4l2_subdev_ops sii9234_ops = {
.video = &sii9234_video_ops,
};
-static int __devinit sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii9234_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct sii9234_platform_data *pdata = dev->platform_data;
@@ -378,7 +378,7 @@ fail:
return ret;
}
-static int __devexit sii9234_remove(struct i2c_client *client)
+static int sii9234_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -406,7 +406,7 @@ static struct i2c_driver sii9234_driver = {
.pm = &sii9234_pm_ops,
},
.probe = sii9234_probe,
- .remove = __devexit_p(sii9234_remove),
+ .remove = sii9234_remove,
.id_table = sii9234_id,
};
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index a1c87f0ceaab..f3c4571ac01e 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1326,7 +1326,7 @@ static const struct video_device sh_vou_video_template = {
.vfl_dir = VFL_DIR_TX,
};
-static int __devinit sh_vou_probe(struct platform_device *pdev)
+static int sh_vou_probe(struct platform_device *pdev)
{
struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
struct v4l2_rect *rect;
@@ -1461,7 +1461,7 @@ ereqmemreg:
return ret;
}
-static int __devexit sh_vou_remove(struct platform_device *pdev)
+static int sh_vou_remove(struct platform_device *pdev)
{
int irq = platform_get_irq(pdev, 0);
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
@@ -1487,7 +1487,7 @@ static int __devexit sh_vou_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_vou = {
- .remove = __devexit_p(sh_vou_remove),
+ .remove = sh_vou_remove,
.driver = {
.name = "sh-vou",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 9afe1e7bde74..cb6791e62bd4 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -19,6 +19,7 @@ config MX1_VIDEO
config VIDEO_MX1
tristate "i.MX1/i.MXL CMOS Sensor Interface driver"
+ depends on BROKEN
depends on VIDEO_DEV && ARCH_MX1 && SOC_CAMERA
select FIQ
select VIDEOBUF_DMA_CONTIG
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 6274a91c25c7..d96c8c7e01d9 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -897,7 +897,7 @@ static struct soc_camera_host_ops isi_soc_camera_host_ops = {
};
/* -----------------------------------------------------------------------*/
-static int __devexit atmel_isi_remove(struct platform_device *pdev)
+static int atmel_isi_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct atmel_isi *isi = container_of(soc_host,
@@ -921,7 +921,7 @@ static int __devexit atmel_isi_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit atmel_isi_probe(struct platform_device *pdev)
+static int atmel_isi_probe(struct platform_device *pdev)
{
unsigned int irq;
struct atmel_isi *isi;
@@ -1074,7 +1074,7 @@ err_clk_prepare_pclk:
static struct platform_driver atmel_isi_driver = {
.probe = atmel_isi_probe,
- .remove = __devexit_p(atmel_isi_remove),
+ .remove = atmel_isi_remove,
.driver = {
.name = "atmel_isi",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 791cd1d54a76..4a574f3cfb2f 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -345,7 +345,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
/*
* This is a generic configuration which is valid for most
* prp input-output format combinations.
- * We set the incomming and outgoing pixelformat to a
+ * We set the incoming and outgoing pixelformat to a
* 16 Bit wide format and adjust the bytesperline
* accordingly. With this configuration the inputdata
* will not be changed by the emma and could be any type
@@ -1692,7 +1692,7 @@ static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
return IRQ_HANDLED;
}
-static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
+static int mx27_camera_emma_init(struct platform_device *pdev)
{
struct mx2_camera_dev *pcdev = platform_get_drvdata(pdev);
struct resource *res_emma;
@@ -1707,9 +1707,9 @@ static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
goto out;
}
- pcdev->base_emma = devm_request_and_ioremap(pcdev->dev, res_emma);
- if (!pcdev->base_emma) {
- err = -EADDRNOTAVAIL;
+ pcdev->base_emma = devm_ioremap_resource(pcdev->dev, res_emma);
+ if (IS_ERR(pcdev->base_emma)) {
+ err = PTR_ERR(pcdev->base_emma);
goto out;
}
@@ -1750,7 +1750,7 @@ out:
return err;
}
-static int __devinit mx2_camera_probe(struct platform_device *pdev)
+static int mx2_camera_probe(struct platform_device *pdev)
{
struct mx2_camera_dev *pcdev;
struct resource *res_csi;
@@ -1824,9 +1824,9 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcdev->discard);
spin_lock_init(&pcdev->lock);
- pcdev->base_csi = devm_request_and_ioremap(&pdev->dev, res_csi);
- if (!pcdev->base_csi) {
- err = -EADDRNOTAVAIL;
+ pcdev->base_csi = devm_ioremap_resource(&pdev->dev, res_csi);
+ if (IS_ERR(pcdev->base_csi)) {
+ err = PTR_ERR(pcdev->base_csi);
goto exit;
}
@@ -1887,7 +1887,7 @@ exit:
return err;
}
-static int __devexit mx2_camera_remove(struct platform_device *pdev)
+static int mx2_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct mx2_camera_dev *pcdev = container_of(soc_host,
@@ -1912,7 +1912,7 @@ static struct platform_driver mx2_camera_driver = {
.name = MX2_CAM_DRV_NAME,
},
.id_table = mx2_camera_devtype,
- .remove = __devexit_p(mx2_camera_remove),
+ .remove = mx2_camera_remove,
};
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 06d16de76377..45aef1053a49 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -1143,7 +1143,7 @@ static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
.set_bus_param = mx3_camera_set_bus_param,
};
-static int __devinit mx3_camera_probe(struct platform_device *pdev)
+static int mx3_camera_probe(struct platform_device *pdev)
{
struct mx3_camera_dev *mx3_cam;
struct resource *res;
@@ -1246,7 +1246,7 @@ egetres:
return err;
}
-static int __devexit mx3_camera_remove(struct platform_device *pdev)
+static int mx3_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct mx3_camera_dev *mx3_cam = container_of(soc_host,
@@ -1279,7 +1279,7 @@ static struct platform_driver mx3_camera_driver = {
.name = MX3_CAM_DRV_NAME,
},
.probe = mx3_camera_probe,
- .remove = __devexit_p(mx3_camera_remove),
+ .remove = mx3_camera_remove,
};
module_platform_driver(mx3_camera_driver);
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 3434ffe79c6e..523330d00dee 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1651,7 +1651,7 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
.set_bus_param = pxa_camera_set_bus_param,
};
-static int __devinit pxa_camera_probe(struct platform_device *pdev)
+static int pxa_camera_probe(struct platform_device *pdev)
{
struct pxa_camera_dev *pcdev;
struct resource *res;
@@ -1801,7 +1801,7 @@ exit:
return err;
}
-static int __devexit pxa_camera_remove(struct platform_device *pdev)
+static int pxa_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct pxa_camera_dev *pcdev = container_of(soc_host,
@@ -1840,7 +1840,7 @@ static struct platform_driver pxa_camera_driver = {
.pm = &pxa_camera_pm,
},
.probe = pxa_camera_probe,
- .remove = __devexit_p(pxa_camera_remove),
+ .remove = pxa_camera_remove,
};
module_platform_driver(pxa_camera_driver);
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 2d8861c0e8f2..ebbc126e71a6 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -2071,7 +2071,7 @@ static int bus_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
+static int sh_mobile_ceu_probe(struct platform_device *pdev)
{
struct sh_mobile_ceu_dev *pcdev;
struct resource *res;
@@ -2258,7 +2258,7 @@ exit:
return err;
}
-static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
+static int sh_mobile_ceu_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
@@ -2307,7 +2307,7 @@ static struct platform_driver sh_mobile_ceu_driver = {
.pm = &sh_mobile_ceu_dev_pm_ops,
},
.probe = sh_mobile_ceu_probe,
- .remove = __devexit_p(sh_mobile_ceu_remove),
+ .remove = sh_mobile_ceu_remove,
};
static int __init sh_mobile_ceu_init(void)
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index 05286500b4d4..a17aba9a0104 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -294,7 +294,7 @@ static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
.video = &sh_csi2_subdev_video_ops,
};
-static __devinit int sh_csi2_probe(struct platform_device *pdev)
+static int sh_csi2_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int irq;
@@ -366,7 +366,7 @@ ereqreg:
return ret;
}
-static __devexit int sh_csi2_remove(struct platform_device *pdev)
+static int sh_csi2_remove(struct platform_device *pdev)
{
struct sh_csi2 *priv = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -382,7 +382,7 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_csi2_pdrv = {
- .remove = __devexit_p(sh_csi2_remove),
+ .remove = sh_csi2_remove,
.probe = sh_csi2_probe,
.driver = {
.name = "sh-mobile-csi2",
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index d3f0b84e2d70..2ec90eae6ba0 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -645,11 +645,17 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct soc_camera_device *icd = file->private_data;
- int err = -EINVAL;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ dev_dbg(icd->pdev, "read called, buf %p\n", buf);
+
+ if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ)
+ return vb2_read(&icd->vb2_vidq, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
dev_err(icd->pdev, "camera device read not implemented\n");
- return err;
+ return -EINVAL;
}
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1048,10 +1054,8 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
- int ret;
-
icd->parent = ici->v4l2_dev.dev;
- ret = soc_camera_probe(icd);
+ soc_camera_probe(icd);
}
}
@@ -1526,7 +1530,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
return 0;
}
-static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
+static int soc_camera_pdrv_probe(struct platform_device *pdev)
{
struct soc_camera_link *icl = pdev->dev.platform_data;
struct soc_camera_device *icd;
@@ -1554,7 +1558,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
* hot-pluggable. Now we know, that all our users - hosts and devices have
* been unloaded already
*/
-static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
+static int soc_camera_pdrv_remove(struct platform_device *pdev)
{
struct soc_camera_device *icd = platform_get_drvdata(pdev);
@@ -1568,7 +1572,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
static struct platform_driver __refdata soc_camera_pdrv = {
.probe = soc_camera_pdrv_probe,
- .remove = __devexit_p(soc_camera_pdrv_remove),
+ .remove = soc_camera_pdrv_remove,
.driver = {
.name = "soc-camera-pdrv",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index 02194c056b00..d854d08a6c7f 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
/* Platform device functions */
-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
+static struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
@@ -777,7 +777,7 @@ static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.poll = timblogiw_poll,
};
-static __devinitconst struct video_device timblogiw_template = {
+static struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
@@ -786,7 +786,7 @@ static __devinitconst struct video_device timblogiw_template = {
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
-static int __devinit timblogiw_probe(struct platform_device *pdev)
+static int timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
@@ -848,7 +848,7 @@ err:
return err;
}
-static int __devexit timblogiw_remove(struct platform_device *pdev)
+static int timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
@@ -869,7 +869,7 @@ static struct platform_driver timblogiw_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
- .remove = __devexit_p(timblogiw_remove),
+ .remove = timblogiw_remove,
};
module_platform_driver(timblogiw_platform_driver);
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index eb404c2ce270..63e8c3461239 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -1324,7 +1324,7 @@ static struct video_device viacam_v4l_template = {
#define VIACAM_SERIAL_CREG 0x46
#define VIACAM_SERIAL_BIT 0x40
-static __devinit bool viacam_serial_is_enabled(void)
+static bool viacam_serial_is_enabled(void)
{
struct pci_bus *pbus = pci_find_bus(0, 0);
u8 cbyte;
@@ -1353,7 +1353,7 @@ static struct ov7670_config sensor_cfg = {
.clock_speed = 90,
};
-static __devinit int viacam_probe(struct platform_device *pdev)
+static int viacam_probe(struct platform_device *pdev)
{
int ret;
struct i2c_adapter *sensor_adapter;
@@ -1490,7 +1490,7 @@ out_unregister:
return ret;
}
-static __devexit int viacam_remove(struct platform_device *pdev)
+static int viacam_remove(struct platform_device *pdev)
{
struct via_camera *cam = via_cam_info;
struct viafb_dev *viadev = pdev->dev.platform_data;
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index b366b050a3dd..0d59b9db83cb 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -39,7 +39,6 @@
/* Wake up at about 30 fps */
#define WAKE_NUMERATOR 30
#define WAKE_DENOMINATOR 1001
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
@@ -352,11 +351,6 @@ static void precalculate_bars(struct vivi_dev *dev)
}
}
-#define TSTAMP_MIN_Y 24
-#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
-#define TSTAMP_INPUT_X 10
-#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-
/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
{
@@ -1308,7 +1302,7 @@ static int __init vivi_create_instance(int inst)
/* initialize queue */
q = &dev->vb_vidq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivi_buffer);
q->ops = &vivi_video_qops;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8090b87b3066..9e580166161a 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -180,7 +180,7 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 12c70e876f58..a739ad492e7b 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -82,7 +82,7 @@ static struct radio_isa_card *rtrack_alloc(void)
#define AIMS_BIT_VOL_UP (1 << 6) /* active low */
#define AIMS_BIT_VOL_DN (1 << 7) /* active low */
-void rtrack_set_pins(void *handle, u8 pins)
+static void rtrack_set_pins(void *handle, u8 pins)
{
struct radio_isa_card *isa = handle;
struct rtrack *rt = container_of(isa, struct rtrack, isa);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 697a421c9940..643d80ac28fb 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -645,7 +645,8 @@ static int __init cadet_init(void)
set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+ res = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr);
+ if (res < 0)
goto err_hdl;
v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io);
return 0;
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 3c0067de4324..84b7b9f4385e 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -191,7 +191,7 @@ static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io)
return false;
}
-struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
+static struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
struct device *pdev)
{
struct v4l2_device *v4l2_dev;
@@ -207,8 +207,9 @@ struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
return isa;
}
-int radio_isa_common_probe(struct radio_isa_card *isa, struct device *pdev,
- int radio_nr, unsigned region_size)
+static int radio_isa_common_probe(struct radio_isa_card *isa,
+ struct device *pdev,
+ int radio_nr, unsigned region_size)
{
const struct radio_isa_driver *drv = isa->drv;
const struct radio_isa_ops *ops = drv->ops;
@@ -287,7 +288,8 @@ err_dev_reg:
return res;
}
-int radio_isa_common_remove(struct radio_isa_card *isa, unsigned region_size)
+static int radio_isa_common_remove(struct radio_isa_card *isa,
+ unsigned region_size)
{
const struct radio_isa_ops *ops = isa->drv->ops;
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index e10e525f33e5..296941a9ae25 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->vdev.vfl_dir = VFL_DIR_TX;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index b415211d0c4b..bd4d3a7cdadd 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -114,7 +114,8 @@ static struct snd_tea575x_ops maxiradio_tea_ops = {
.set_direction = maxiradio_tea575x_set_direction,
};
-static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int maxiradio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct maxiradio *dev;
struct v4l2_device *v4l2_dev;
@@ -172,7 +173,7 @@ errfr:
return retval;
}
-static void __devexit maxiradio_remove(struct pci_dev *pdev)
+static void maxiradio_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct maxiradio *dev = to_maxiradio(v4l2_dev);
@@ -196,7 +197,7 @@ static struct pci_driver maxiradio_driver = {
.name = "radio-maxiradio",
.id_table = maxiradio_pci_tbl,
.probe = maxiradio_probe,
- .remove = __devexit_p(maxiradio_remove),
+ .remove = maxiradio_remove,
};
static int __init maxiradio_init(void)
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 227dcdb54df3..637a55564958 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -64,7 +64,7 @@ bool pnp_attached;
#define FMI_BIT_VOL_SW (1 << 3)
#define FMI_BIT_TUN_STRQ (1 << 4)
-void fmi_set_pins(void *handle, u8 pins)
+static void fmi_set_pins(void *handle, u8 pins)
{
struct fmi *fmi = handle;
u8 bits = FMI_BIT_TUN_STRQ;
@@ -265,7 +265,7 @@ static const struct v4l2_ioctl_ops fmi_ioctl_ops = {
};
/* ladis: this is my card. does any other types exist? */
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
/* SF16-FMI */
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad10), 0},
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 4efcbec74c52..9c0990457a7c 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -197,13 +197,13 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
return 0;
}
-static struct pnp_device_id fmr2_pnp_ids[] __devinitdata = {
+static struct pnp_device_id fmr2_pnp_ids[] = {
{ .id = "MFRad13" }, /* tuner subdevice of SF16-FMD2 */
{ .id = "" }
};
MODULE_DEVICE_TABLE(pnp, fmr2_pnp_ids);
-static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
+static int fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
{
int err, i;
char *card_name = fmr2->is_fmd2 ? "SF16-FMD2" : "SF16-FMR2";
@@ -249,7 +249,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
return 0;
}
-static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
+static int fmr2_isa_match(struct device *pdev, unsigned int ndev)
{
struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
if (!fmr2)
@@ -265,8 +265,7 @@ static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int fmr2_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
{
int ret;
struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
@@ -285,7 +284,7 @@ static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
return 0;
}
-static void __devexit fmr2_remove(struct fmr2 *fmr2)
+static void fmr2_remove(struct fmr2 *fmr2)
{
snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
@@ -293,7 +292,7 @@ static void __devexit fmr2_remove(struct fmr2 *fmr2)
kfree(fmr2);
}
-static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
+static int fmr2_isa_remove(struct device *pdev, unsigned int ndev)
{
fmr2_remove(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -301,7 +300,7 @@ static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
return 0;
}
-static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
+static void fmr2_pnp_remove(struct pnp_dev *pdev)
{
fmr2_remove(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -309,7 +308,7 @@ static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
struct isa_driver fmr2_isa_driver = {
.match = fmr2_isa_match,
- .remove = __devexit_p(fmr2_isa_remove),
+ .remove = fmr2_isa_remove,
.driver = {
.name = "radio-sf16fmr2",
},
@@ -319,7 +318,7 @@ struct pnp_driver fmr2_pnp_driver = {
.name = "radio-sf16fmr2",
.id_table = fmr2_pnp_ids,
.probe = fmr2_pnp_probe,
- .remove = __devexit_p(fmr2_pnp_remove),
+ .remove = fmr2_pnp_remove,
};
static int __init fmr2_init(void)
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index a082e400ed0f..1507c9d508d7 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
.name = "radio-si4713",
.release = video_device_release,
.ioctl_ops = &radio_si4713_ioctl_ops,
+ .vfl_dir = VFL_DIR_TX,
};
/* Platform driver interface */
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index d0c905310071..1978516af67e 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -145,7 +145,7 @@ struct tea5764_device {
};
/* I2C code related */
-int tea5764_i2c_read(struct tea5764_device *radio)
+static int tea5764_i2c_read(struct tea5764_device *radio)
{
int i;
u16 *p = (u16 *) &radio->regs;
@@ -165,7 +165,7 @@ int tea5764_i2c_read(struct tea5764_device *radio)
return 0;
}
-int tea5764_i2c_write(struct tea5764_device *radio)
+static int tea5764_i2c_write(struct tea5764_device *radio)
{
struct tea5764_write_regs wr;
struct tea5764_regs *r = &radio->regs;
@@ -493,8 +493,8 @@ static struct video_device tea5764_radio_template = {
};
/* I2C probe: check if the device exists and register with v4l if it is */
-static int __devinit tea5764_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tea5764_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tea5764_device *radio;
struct tea5764_regs *r;
@@ -552,7 +552,7 @@ errfr:
return ret;
}
-static int __devexit tea5764_i2c_remove(struct i2c_client *client)
+static int tea5764_i2c_remove(struct i2c_client *client)
{
struct tea5764_device *radio = i2c_get_clientdata(client);
@@ -578,7 +578,7 @@ static struct i2c_driver tea5764_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tea5764_i2c_probe,
- .remove = __devexit_p(tea5764_i2c_remove),
+ .remove = tea5764_i2c_remove,
.id_table = tea5764_id,
};
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 5cf07779f4bb..b87effeb5dc6 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -145,7 +145,7 @@ static const struct v4l2_file_operations timbradio_fops = {
.unlocked_ioctl = video_ioctl2,
};
-static int __devinit timbradio_probe(struct platform_device *pdev)
+static int timbradio_probe(struct platform_device *pdev)
{
struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
struct timbradio *tr;
@@ -201,7 +201,7 @@ err:
return err;
}
-static int __devexit timbradio_remove(struct platform_device *pdev)
+static int timbradio_remove(struct platform_device *pdev)
{
struct timbradio *tr = platform_get_drvdata(pdev);
@@ -219,7 +219,7 @@ static struct platform_driver timbradio_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timbradio_probe,
- .remove = __devexit_p(timbradio_remove),
+ .remove = timbradio_remove,
};
module_platform_driver(timbradio_platform_driver);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 9b0c9fa0beb8..cabbe3adf435 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
.ioctl_ops = &wl1273_ioctl_ops,
.name = WL1273_FM_DRIVER_NAME,
.release = wl1273_vdev_release,
+ .vfl_dir = VFL_DIR_TX,
};
static int wl1273_fm_radio_remove(struct platform_device *pdev)
@@ -1990,7 +1991,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
+static int wl1273_fm_radio_probe(struct platform_device *pdev)
{
struct wl1273_core **core = pdev->dev.platform_data;
struct wl1273_device *radio;
@@ -2145,7 +2146,7 @@ pdata_err:
static struct platform_driver wl1273_fm_radio_driver = {
.probe = wl1273_fm_radio_probe,
- .remove = __devexit_p(wl1273_fm_radio_remove),
+ .remove = wl1273_fm_radio_remove,
.driver = {
.name = "wl1273_fm_radio",
.owner = THIS_MODULE,
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 54db36ccb9ee..06c06cc9ff25 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -373,8 +373,8 @@ static const struct v4l2_subdev_ops saa7706h_ops = {
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
-static int __devinit saa7706h_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int saa7706h_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct saa7706h_state *state;
struct v4l2_subdev *sd;
@@ -418,7 +418,7 @@ err:
return err;
}
-static int __devexit saa7706h_remove(struct i2c_client *client)
+static int saa7706h_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -441,7 +441,7 @@ static struct i2c_driver saa7706h_driver = {
.name = DRIVER_NAME,
},
.probe = saa7706h_probe,
- .remove = __devexit_p(saa7706h_remove),
+ .remove = saa7706h_remove,
.id_table = saa7706h_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 4ef55ec8045e..e5fc9acd0c4f 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -347,8 +347,8 @@ end:
/*
* si470x_i2c_probe - probe for the device
*/
-static int __devinit si470x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si470x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct si470x_device *radio;
int retval = 0;
@@ -451,7 +451,7 @@ err_initial:
/*
* si470x_i2c_remove - remove the device
*/
-static __devexit int si470x_i2c_remove(struct i2c_client *client)
+static int si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
@@ -514,7 +514,7 @@ static struct i2c_driver si470x_i2c_driver = {
#endif
},
.probe = si470x_i2c_probe,
- .remove = __devexit_p(si470x_i2c_remove),
+ .remove = si470x_i2c_remove,
.id_table = si470x_i2c_id,
};
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index e3079c142c5f..bd61b3bd0ca3 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1769,7 +1769,7 @@ exit:
}
/* si4713_ioctl - deal with private ioctls (only rnl for now) */
-long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+static long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct si4713_device *sdev = to_si4713_device(sd);
struct si4713_rnl *rnl = arg;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 06d47e5cce9f..b18c2dc268ba 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -165,8 +165,8 @@ static const struct v4l2_subdev_ops tef6862_ops = {
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
-static int __devinit tef6862_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tef6862_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tef6862_state *state;
struct v4l2_subdev *sd;
@@ -189,7 +189,7 @@ static int __devinit tef6862_probe(struct i2c_client *client,
return 0;
}
-static int __devexit tef6862_remove(struct i2c_client *client)
+static int tef6862_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -211,7 +211,7 @@ static struct i2c_driver tef6862_driver = {
.name = DRIVER_NAME,
},
.probe = tef6862_probe,
- .remove = __devexit_p(tef6862_remove),
+ .remove = tef6862_remove,
.id_table = tef6862_id,
};
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index ea1e6545df36..f359be7e9dd9 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -4,7 +4,7 @@
menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_V4L2 && RFKILL && GPIOLIB
+ depends on VIDEO_V4L2 && RFKILL && GPIOLIB && TTY
select TI_ST if NET
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index d84ad9dad323..aac0f025f767 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -60,7 +60,7 @@
#define fmdbg(format, ...) \
printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
#else /* DEBUG */
-#define fmdbg(format, ...)
+#define fmdbg(format, ...) do {} while(0)
#endif
enum {
FM_MODE_OFF,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index bf867a6b5ea0..602ef7ac8c24 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -742,7 +742,7 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
break;
- if (blk_idx < FM_RDS_BLK_IDX_A || blk_idx > FM_RDS_BLK_IDX_D) {
+ if (blk_idx > FM_RDS_BLK_IDX_D) {
fmdbg("Block sequence mismatch\n");
rds->last_blk_idx = -1;
break;
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index 3dd9fc097c47..ebf09a3927de 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -305,7 +305,7 @@ int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
- if (vol_to_set < FM_RX_VOLUME_MIN || vol_to_set > FM_RX_VOLUME_MAX) {
+ if (vol_to_set > FM_RX_VOLUME_MAX) {
fmerr("Volume is not within(%d-%d) range\n",
FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX);
return -EINVAL;
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 048de4536036..0a8ee8fab924 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
.ioctl_ops = &fm_drv_ioctl_ops,
.name = FM_DRV_NAME,
.release = video_device_release,
+ /*
+ * To ensure both the tuner and modulator ioctls are accessible we
+ * set the vfl_dir to M2M to indicate this.
+ *
+ * It is not really a mem2mem device of course, but it can both receive
+ * and transmit using the same radio device. It's the only radio driver
+ * that does this and it should really be split in two radio devices,
+ * but that would affect applications using this driver.
+ */
+ .vfl_dir = VFL_DIR_M2M,
};
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 79ba242fe263..19f3563c61da 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -291,7 +291,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && LIRC
+ depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 49bb356ed14c..2d6fb26a0170 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -784,7 +784,7 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote)
rdev->priv = ati_remote;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER;
+ rdev->allowed_protos = RC_BIT_OTHER;
rdev->driver_name = "ati_remote";
rdev->open = ati_remote_rc_open;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d05ac15b5de4..cef04786b52f 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -329,7 +329,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
}
/* Sense current received carrier */
-void ene_rx_sense_carrier(struct ene_device *dev)
+static void ene_rx_sense_carrier(struct ene_device *dev)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -1003,7 +1003,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
- goto error1;
+ goto failure;
/* validate resources */
error = -ENODEV;
@@ -1014,10 +1014,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
- goto error;
+ goto failure;
if (!pnp_irq_valid(pnp_dev, 0))
- goto error;
+ goto failure;
spin_lock_init(&dev->hw_lock);
@@ -1033,7 +1033,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
- goto error;
+ goto failure;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
@@ -1046,7 +1046,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
learning_mode_force = false;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
@@ -1078,30 +1078,27 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
- dev->hw_io = -1;
- dev->irq = -1;
- goto error;
+ goto failure;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
- dev->irq = -1;
- goto error;
+ goto failure2;
}
error = rc_register_device(rdev);
if (error < 0)
- goto error;
+ goto failure3;
pr_notice("driver has been successfully loaded\n");
return 0;
-error:
- if (dev && dev->irq >= 0)
- free_irq(dev->irq, dev);
- if (dev && dev->hw_io >= 0)
- release_region(dev->hw_io, ENE_IO_SIZE);
-error1:
+
+failure3:
+ free_irq(dev->irq, dev);
+failure2:
+ release_region(dev->hw_io, ENE_IO_SIZE);
+failure:
rc_free_device(rdev);
kfree(dev);
return error;
@@ -1175,7 +1172,7 @@ static struct pnp_driver ene_driver = {
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = ene_probe,
- .remove = __devexit_p(ene_remove),
+ .remove = ene_remove,
#ifdef CONFIG_PM
.suspend = ene_suspend,
.resume = ene_resume,
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 52fd7696b1ba..1df410e13688 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -541,7 +541,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* Set up the rc device */
rdev->priv = fintek;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->input_name = FINTEK_DESCRIPTION;
@@ -590,7 +590,7 @@ failure:
return ret;
}
-static void __devexit fintek_remove(struct pnp_dev *pdev)
+static void fintek_remove(struct pnp_dev *pdev)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -678,18 +678,18 @@ static struct pnp_driver fintek_driver = {
.id_table = fintek_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = fintek_probe,
- .remove = __devexit_p(fintek_remove),
+ .remove = fintek_remove,
.suspend = fintek_suspend,
.resume = fintek_resume,
.shutdown = fintek_shutdown,
};
-int fintek_init(void)
+static int fintek_init(void)
{
return pnp_register_driver(&fintek_driver);
}
-void fintek_exit(void)
+static void fintek_exit(void)
{
pnp_unregister_driver(&fintek_driver);
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 04cb272db16a..4f71a7d1f019 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -58,7 +58,7 @@ err_get_value:
return IRQ_HANDLED;
}
-static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
+static int gpio_ir_recv_probe(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
@@ -95,7 +95,7 @@ static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
if (pdata->allowed_protos)
rcdev->allowed_protos = pdata->allowed_protos;
else
- rcdev->allowed_protos = RC_TYPE_ALL;
+ rcdev->allowed_protos = RC_BIT_ALL;
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
@@ -140,7 +140,7 @@ err_allocate_device:
return rc;
}
-static int __devexit gpio_ir_recv_remove(struct platform_device *pdev)
+static int gpio_ir_recv_remove(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
@@ -188,7 +188,7 @@ static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
- .remove = __devexit_p(gpio_ir_recv_remove),
+ .remove = gpio_ir_recv_remove,
.driver = {
.name = GPIO_IR_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 51d7057aca04..b99b096d8a8f 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -425,8 +425,8 @@ static void iguanair_close(struct rc_dev *rdev)
mutex_unlock(&ir->lock);
}
-static int __devinit iguanair_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int iguanair_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct iguanair *ir;
@@ -499,7 +499,7 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
@@ -538,7 +538,7 @@ out:
return ret;
}
-static void __devexit iguanair_disconnect(struct usb_interface *intf)
+static void iguanair_disconnect(struct usb_interface *intf)
{
struct iguanair *ir = usb_get_intfdata(intf);
@@ -604,7 +604,7 @@ static const struct usb_device_id iguanair_table[] = {
static struct usb_driver iguanair_driver = {
.name = DRIVER_NAME,
.probe = iguanair_probe,
- .disconnect = __devexit_p(iguanair_disconnect),
+ .disconnect = iguanair_disconnect,
.suspend = iguanair_suspend,
.resume = iguanair_resume,
.reset_resume = iguanair_resume,
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 5dd0386604f0..78d109b978dd 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -255,7 +255,7 @@ static struct usb_device_id imon_usb_id_table[] = {
static struct usb_driver imon_driver = {
.name = MOD_NAME,
.probe = imon_probe,
- .disconnect = __devexit_p(imon_disconnect),
+ .disconnect = imon_disconnect,
.suspend = imon_suspend,
.resume = imon_resume,
.id_table = imon_usb_id_table,
@@ -1001,7 +1001,7 @@ static void imon_touch_display_timeout(unsigned long data)
* it is not, so we must acquire it prior to calling send_packet, which
* requires that the lock is held.
*/
-static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
int retval;
struct imon_context *ictx = rc->priv;
@@ -1010,31 +1010,27 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
- if (rc_type && !(rc_type & rc->allowed_protos))
+ if (*rc_type && !(*rc_type & rc->allowed_protos))
dev_warn(dev, "Looks like you're trying to use an IR protocol "
"this device does not support\n");
- switch (rc_type) {
- case RC_TYPE_RC6:
+ if (*rc_type & RC_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
- break;
- case RC_TYPE_UNKNOWN:
- case RC_TYPE_OTHER:
+ *rc_type = RC_BIT_RC6_MCE;
+ } else if (*rc_type & RC_BIT_OTHER) {
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
- default:
+ *rc_type = RC_BIT_OTHER;
+ } else {
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
+ *rc_type = RC_BIT_OTHER;
}
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
@@ -1048,7 +1044,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
if (retval)
goto out;
- ictx->rc_type = rc_type;
+ ictx->rc_type = *rc_type;
ictx->pad_mouse = false;
out:
@@ -1323,7 +1319,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
rel_x = buf[2];
rel_y = buf[3];
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
@@ -1390,7 +1386,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
buf[0] = 0x01;
buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
@@ -1511,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
kc = imon_panel_key_lookup(scancode);
} else {
scancode = be32_to_cpu(*((u32 *)buf));
- if (ictx->rc_type == RC_TYPE_RC6) {
+ if (ictx->rc_type == RC_BIT_RC6_MCE) {
ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
@@ -1744,7 +1740,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
{
u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
- u64 allowed_protos = RC_TYPE_OTHER;
+ u64 allowed_protos = RC_BIT_OTHER;
switch (ffdc_cfg_byte) {
/* iMON Knob, no display, iMON IR + vol knob */
@@ -1775,13 +1771,13 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
/* iMON LCD, MCE IR */
case 0x9f:
dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_LCD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
default:
dev_info(ictx->dev, "Unknown 0xffdc device, "
@@ -1789,7 +1785,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/* We don't know which one it is, allow user to set the
* RC6 one from userspace if OTHER wasn't correct. */
- allowed_protos |= RC_TYPE_RC6;
+ allowed_protos |= RC_BIT_RC6_MCE;
break;
}
@@ -1875,7 +1871,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->priv = ictx;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
+ rdev->allowed_protos = RC_BIT_OTHER | RC_BIT_RC6_MCE; /* iMON PAD or MCE */
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
@@ -1893,7 +1889,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
imon_set_display_type(ictx);
- if (ictx->rc_type == RC_TYPE_RC6)
+ if (ictx->rc_type == RC_BIT_RC6_MCE)
rdev->map_name = RC_MAP_IMON_MCE;
else
rdev->map_name = RC_MAP_IMON_PAD;
@@ -2292,8 +2288,8 @@ static void imon_init_display(struct imon_context *ictx,
/**
* Callback function for USB core API: Probe
*/
-static int __devinit imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+static int imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_device *usbdev = NULL;
struct usb_host_interface *iface_desc = NULL;
@@ -2376,7 +2372,7 @@ fail:
/**
* Callback function for USB core API: disconnect
*/
-static void __devexit imon_disconnect(struct usb_interface *interface)
+static void imon_disconnect(struct usb_interface *interface)
{
struct imon_context *ictx;
struct device *dev;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 035668e27f6b..69edffb9fe9a 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -47,7 +47,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
- if (!(dev->raw->enabled_protocols & RC_TYPE_JVC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_JVC))
return 0;
if (!is_timing_event(ev)) {
@@ -174,7 +174,7 @@ out:
}
static struct ir_raw_handler jvc_handler = {
- .protocols = RC_TYPE_JVC,
+ .protocols = RC_BIT_JVC,
.decode = ir_jvc_decode,
};
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 870c93052fd0..9945e5e7f61a 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!(dev->raw->enabled_protocols & RC_TYPE_LIRC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_LIRC))
return 0;
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
@@ -408,7 +408,7 @@ static int ir_lirc_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler lirc_handler = {
- .protocols = RC_TYPE_LIRC,
+ .protocols = RC_BIT_LIRC,
.decode = ir_lirc_decode,
.raw_register = ir_lirc_register,
.raw_unregister = ir_lirc_unregister,
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 3784ebf80ec7..33fafa4cf7cb 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -216,7 +216,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
unsigned long delay;
- if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
+ if (!(dev->raw->enabled_protocols & RC_BIT_MCE_KBD))
return 0;
if (!is_timing_event(ev)) {
@@ -422,7 +422,7 @@ static int ir_mce_kbd_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler mce_kbd_handler = {
- .protocols = RC_TYPE_MCE_KBD,
+ .protocols = RC_BIT_MCE_KBD,
.decode = ir_mce_kbd_decode,
.raw_register = ir_mce_kbd_register,
.raw_unregister = ir_mce_kbd_unregister,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 2ca509e6e16b..a47ee3634969 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -52,7 +52,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 address, not_address, command, not_command;
bool send_32bits = false;
- if (!(dev->raw->enabled_protocols & RC_TYPE_NEC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_NEC))
return 0;
if (!is_timing_event(ev)) {
@@ -201,7 +201,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler nec_handler = {
- .protocols = RC_TYPE_NEC,
+ .protocols = RC_BIT_NEC,
.decode = ir_nec_decode,
};
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 9ab663a507a4..5b4d1ddeac4e 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -52,8 +52,8 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5))
- return 0;
+ if (!(dev->raw->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X)))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,6 +128,10 @@ again:
if (data->wanted_bits == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5X)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
xdata = (data->bits & 0x0003F) >> 0;
command = (data->bits & 0x00FC0) >> 6;
system = (data->bits & 0x1F000) >> 12;
@@ -141,6 +145,10 @@ again:
} else {
/* RC5 */
u8 command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
command = (data->bits & 0x0003F) >> 0;
system = (data->bits & 0x007C0) >> 6;
toggle = (data->bits & 0x00800) ? 1 : 0;
@@ -164,7 +172,7 @@ out:
}
static struct ir_raw_handler rc5_handler = {
- .protocols = RC_TYPE_RC5,
+ .protocols = RC_BIT_RC5 | RC_BIT_RC5X,
.decode = ir_rc5_decode,
};
diff --git a/drivers/media/rc/ir-rc5-sz-decoder.c b/drivers/media/rc/ir-rc5-sz-decoder.c
index ec8d4a2e2c5a..fd807a8308d8 100644
--- a/drivers/media/rc/ir-rc5-sz-decoder.c
+++ b/drivers/media/rc/ir-rc5-sz-decoder.c
@@ -48,8 +48,8 @@ static int ir_rc5_sz_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle, command, system;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5_SZ))
- return 0;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5_SZ))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,7 +128,7 @@ out:
}
static struct ir_raw_handler rc5_sz_handler = {
- .protocols = RC_TYPE_RC5_SZ,
+ .protocols = RC_BIT_RC5_SZ,
.decode = ir_rc5_sz_decode,
};
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 4cfdd7fa4bbd..e19072ffb36c 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -89,7 +89,9 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 toggle;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC6))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
return 0;
if (!is_timing_event(ev)) {
@@ -271,7 +273,9 @@ out:
}
static struct ir_raw_handler rc6_handler = {
- .protocols = RC_TYPE_RC6,
+ .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
};
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 82e6c1e282d5..8ead492d03aa 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -443,7 +443,7 @@ static int lirc_rx51_resume(struct platform_device *dev)
#endif /* CONFIG_PM */
-static int __devinit lirc_rx51_probe(struct platform_device *dev)
+static int lirc_rx51_probe(struct platform_device *dev)
{
lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES;
lirc_rx51.pdata = dev->dev.platform_data;
@@ -479,18 +479,7 @@ struct platform_driver lirc_rx51_platform_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init lirc_rx51_init(void)
-{
- return platform_driver_register(&lirc_rx51_platform_driver);
-}
-module_init(lirc_rx51_init);
-
-static void __exit lirc_rx51_exit(void)
-{
- platform_driver_unregister(&lirc_rx51_platform_driver);
-}
-module_exit(lirc_rx51_exit);
+module_platform_driver(lirc_rx51_platform_driver);
MODULE_DESCRIPTION("LIRC TX driver for Nokia RX51");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7e54ec57bcf9..7e69a3b65370 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -58,7 +58,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 address, command, not_command;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
+ if (!(dev->raw->enabled_protocols & RC_BIT_SANYO))
return 0;
if (!is_timing_event(ev)) {
@@ -179,7 +179,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler sanyo_handler = {
- .protocols = RC_TYPE_SANYO,
+ .protocols = RC_BIT_SANYO,
.decode = ir_sanyo_decode,
};
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index dab98b37621a..fb914342cf4d 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -45,7 +45,8 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 device, subdevice, function;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SONY))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20)))
return 0;
if (!is_timing_event(ev)) {
@@ -123,16 +124,28 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
switch (data->count) {
case 12:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY12)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits << 3) & 0xF8);
subdevice = 0;
function = bitrev8((data->bits >> 4) & 0xFE);
break;
case 15:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY15)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 0) & 0xFF);
subdevice = 0;
function = bitrev8((data->bits >> 7) & 0xFE);
break;
case 20:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY20)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 5) & 0xF8);
subdevice = bitrev8((data->bits >> 0) & 0xFF);
function = bitrev8((data->bits >> 12) & 0xFE);
@@ -157,7 +170,7 @@ out:
}
static struct ir_raw_handler sony_handler = {
- .protocols = RC_TYPE_SONY,
+ .protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
.decode = ir_sony_decode,
};
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 24c77a42fc36..1b8669b6d042 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1563,7 +1563,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* set up ir-core props */
rdev->priv = itdev;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
@@ -1620,7 +1620,7 @@ failure:
return ret;
}
-static void __devexit ite_remove(struct pnp_dev *pdev)
+static void ite_remove(struct pnp_dev *pdev)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -1702,18 +1702,18 @@ static struct pnp_driver ite_driver = {
.name = ITE_DRIVER_NAME,
.id_table = ite_ids,
.probe = ite_probe,
- .remove = __devexit_p(ite_remove),
+ .remove = ite_remove,
.suspend = ite_suspend,
.resume = ite_resume,
.shutdown = ite_shutdown,
};
-int ite_init(void)
+static int ite_init(void)
{
return pnp_register_driver(&ite_driver);
}
-void ite_exit(void)
+static void ite_exit(void)
{
pnp_unregister_driver(&ite_driver);
}
diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c
index 124c7228ba8c..f0da960560b0 100644
--- a/drivers/media/rc/keymaps/rc-imon-mce.c
+++ b/drivers/media/rc/keymaps/rc-imon-mce.c
@@ -121,7 +121,7 @@ static struct rc_map_list imon_mce_map = {
.scan = imon_mce,
.size = ARRAY_SIZE(imon_mce),
/* its RC6, but w/a hardware decoder */
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_IMON_MCE,
}
};
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 753e43ec787b..ef4006fe4de0 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -97,7 +97,7 @@ static struct rc_map_list rc6_mce_map = {
.map = {
.scan = rc6_mce,
.size = ARRAY_SIZE(rc6_mce),
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_RC6_MCE,
}
};
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 850547fe711c..9afb9331217d 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1205,7 +1205,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
@@ -1229,8 +1229,8 @@ out:
return NULL;
}
-static int __devinit mceusb_dev_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int mceusb_dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *idesc;
@@ -1393,7 +1393,7 @@ mem_alloc_fail:
}
-static void __devexit mceusb_dev_disconnect(struct usb_interface *intf)
+static void mceusb_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct mceusb_dev *ir = usb_get_intfdata(intf);
@@ -1432,7 +1432,7 @@ static int mceusb_dev_resume(struct usb_interface *intf)
static struct usb_driver mceusb_dev_driver = {
.name = DRIVER_NAME,
.probe = mceusb_dev_probe,
- .disconnect = __devexit_p(mceusb_dev_disconnect),
+ .disconnect = mceusb_dev_disconnect,
.suspend = mceusb_dev_suspend,
.resume = mceusb_dev_resume,
.reset_resume = mceusb_dev_resume,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 2ea913a44ae8..b8aa9abb31ff 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -472,6 +472,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
}
+#if 0 /* Currently unused */
/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
@@ -504,7 +505,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
return carrier;
}
-
+#endif
/*
* set carrier frequency
*
@@ -620,7 +621,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
DEFINE_IR_RAW_EVENT(rawir);
- u32 carrier;
u8 sample;
int i;
@@ -629,9 +629,6 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
if (debug)
nvt_dump_rx_buf(nvt);
- if (nvt->carrier_detect_enabled)
- carrier = nvt_rx_carrier_detect(nvt);
-
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
init_ir_raw_event(&rawir);
@@ -1045,7 +1042,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
@@ -1116,7 +1113,7 @@ failure:
return ret;
}
-static void __devexit nvt_remove(struct pnp_dev *pdev)
+static void nvt_remove(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -1214,18 +1211,18 @@ static struct pnp_driver nvt_driver = {
.id_table = nvt_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = nvt_probe,
- .remove = __devexit_p(nvt_remove),
+ .remove = nvt_remove,
.suspend = nvt_suspend,
.resume = nvt_resume,
.shutdown = nvt_shutdown,
};
-int nvt_init(void)
+static int nvt_init(void)
{
return pnp_register_driver(&nvt_driver);
}
-void nvt_exit(void)
+static void nvt_exit(void)
{
pnp_unregister_driver(&nvt_driver);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 0d5e0872a2ea..7c3674ff5ea2 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -103,7 +103,6 @@ struct nvt_dev {
/* rx settings */
bool learning_enabled;
- bool carrier_detect_enabled;
/* track cir wake state */
u8 wake_state;
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index f9be68132c67..53d02827a472 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -195,7 +195,7 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index cabc19c10515..601d1ac1c688 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -725,25 +725,36 @@ static struct class ir_input_class = {
.devnode = ir_devnode,
};
+/*
+ * These are the protocol textual descriptions that are
+ * used by the sysfs protocols file. Note that the order
+ * of the entries is relevant.
+ */
static struct {
u64 type;
char *name;
} proto_names[] = {
- { RC_TYPE_UNKNOWN, "unknown" },
- { RC_TYPE_RC5, "rc-5" },
- { RC_TYPE_NEC, "nec" },
- { RC_TYPE_RC6, "rc-6" },
- { RC_TYPE_JVC, "jvc" },
- { RC_TYPE_SONY, "sony" },
- { RC_TYPE_RC5_SZ, "rc-5-sz" },
- { RC_TYPE_SANYO, "sanyo" },
- { RC_TYPE_MCE_KBD, "mce_kbd" },
- { RC_TYPE_LIRC, "lirc" },
- { RC_TYPE_OTHER, "other" },
+ { RC_BIT_NONE, "none" },
+ { RC_BIT_OTHER, "other" },
+ { RC_BIT_UNKNOWN, "unknown" },
+ { RC_BIT_RC5 |
+ RC_BIT_RC5X, "rc-5" },
+ { RC_BIT_NEC, "nec" },
+ { RC_BIT_RC6_0 |
+ RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE, "rc-6" },
+ { RC_BIT_JVC, "jvc" },
+ { RC_BIT_SONY12 |
+ RC_BIT_SONY15 |
+ RC_BIT_SONY20, "sony" },
+ { RC_BIT_RC5_SZ, "rc-5-sz" },
+ { RC_BIT_SANYO, "sanyo" },
+ { RC_BIT_MCE_KBD, "mce_kbd" },
+ { RC_BIT_LIRC, "lirc" },
};
-#define PROTO_NONE "none"
-
/**
* show_protocols() - shows the current IR protocol(s)
* @device: the device descriptor
@@ -790,6 +801,9 @@ static ssize_t show_protocols(struct device *device,
tmp += sprintf(tmp, "[%s] ", proto_names[i].name);
else if (allowed & proto_names[i].type)
tmp += sprintf(tmp, "%s ", proto_names[i].name);
+
+ if (allowed & proto_names[i].type)
+ allowed &= ~proto_names[i].type;
}
if (tmp != buf)
@@ -867,26 +881,20 @@ static ssize_t store_protocols(struct device *device,
disable = false;
}
- if (!enable && !disable && !strncasecmp(tmp, PROTO_NONE, sizeof(PROTO_NONE))) {
- tmp += sizeof(PROTO_NONE);
- mask = 0;
- count++;
- } else {
- for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
- if (!strcasecmp(tmp, proto_names[i].name)) {
- tmp += strlen(proto_names[i].name);
- mask = proto_names[i].type;
- break;
- }
- }
- if (i == ARRAY_SIZE(proto_names)) {
- IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- ret = -EINVAL;
- goto out;
+ for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
+ if (!strcasecmp(tmp, proto_names[i].name)) {
+ mask = proto_names[i].type;
+ break;
}
- count++;
}
+ if (i == ARRAY_SIZE(proto_names)) {
+ IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
+ return -EINVAL;
+ }
+
+ count++;
+
if (enable)
type |= mask;
else if (disable)
@@ -902,7 +910,7 @@ static ssize_t store_protocols(struct device *device,
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, type);
+ rc = dev->change_protocol(dev, &type);
if (rc < 0) {
IR_dprintk(1, "Error setting protocols to 0x%llx\n",
(long long)type);
@@ -1117,7 +1125,8 @@ int rc_register_device(struct rc_dev *dev)
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, rc_map->rc_type);
+ u64 rc_type = (1 << rc_map->rc_type);
+ rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_raw;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 9f5a17bb5ef5..1800326f93e6 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1082,7 +1082,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->dev.parent = dev;
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
@@ -1102,8 +1102,8 @@ out:
return NULL;
}
-static int __devinit redrat3_dev_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int redrat3_dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct device *dev = &intf->dev;
@@ -1241,7 +1241,7 @@ no_endpoints:
return retval;
}
-static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
+static void redrat3_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
@@ -1281,7 +1281,7 @@ static int redrat3_dev_resume(struct usb_interface *intf)
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
- .disconnect = __devexit_p(redrat3_dev_disconnect),
+ .disconnect = redrat3_dev_disconnect,
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index d6f4bfe09391..d7b11e6a9982 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -322,7 +322,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
@@ -346,8 +346,8 @@ out:
* On any failure the return value is the ERROR
* On success return 0
*/
-static int __devinit streamzap_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int streamzap_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
struct usb_host_interface *iface_host;
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index fef05235234a..78be8a914225 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -194,8 +194,8 @@ static void ttusbir_urb_complete(struct urb *urb)
dev_warn(tt->dev, "failed to resubmit urb: %d\n", rc);
}
-static int __devinit ttusbir_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int ttusbir_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct ttusbir *tt;
struct usb_interface_descriptor *idesc;
@@ -316,7 +316,7 @@ static int __devinit ttusbir_probe(struct usb_interface *intf,
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
@@ -367,7 +367,7 @@ out:
return ret;
}
-static void __devexit ttusbir_disconnect(struct usb_interface *intf)
+static void ttusbir_disconnect(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
struct usb_device *udev = tt->udev;
@@ -435,7 +435,7 @@ static struct usb_driver ttusbir_driver = {
.suspend = ttusbir_suspend,
.resume = ttusbir_resume,
.reset_resume = ttusbir_resume,
- .disconnect = __devexit_p(ttusbir_disconnect)
+ .disconnect = ttusbir_disconnect,
};
module_usb_driver(ttusbir_driver);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 7c9b5f33113b..930c61499037 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -7,6 +7,7 @@
* with minor modifications.
*
* Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2012 Sean Young <sean@mess.org>
* Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
*
* Dedicated to my daughter Matilda, without whose loving attention this
@@ -22,9 +23,7 @@
* o IR Receive
* o IR Transmit
* o Wake-On-CIR functionality
- *
- * To do:
- * o Learning
+ * o Carrier detection
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -149,6 +148,12 @@
#define WBCIR_REGSEL_MASK 0x20
/* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */
#define WBCIR_REG_ADDR0 0x00
+/* Enable carrier counter */
+#define WBCIR_CNTR_EN 0x01
+/* Reset carrier counter */
+#define WBCIR_CNTR_R 0x02
+/* Invert TX */
+#define WBCIR_IRTX_INV 0x04
/* Valid banks for the SP3 UART */
enum wbcir_bank {
@@ -184,7 +189,7 @@ enum wbcir_txstate {
};
/* Misc */
-#define WBCIR_NAME "winbond-cir"
+#define WBCIR_NAME "Winbond CIR"
#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
@@ -207,7 +212,8 @@ struct wbcir_data {
/* RX state */
enum wbcir_rxstate rxstate;
struct led_trigger *rxtrigger;
- struct ir_raw_event rxev;
+ int carrier_report_enabled;
+ u32 pulse_duration;
/* TX state */
enum wbcir_txstate txstate;
@@ -330,6 +336,30 @@ wbcir_to_rc6cells(u8 val)
*****************************************************************************/
static void
+wbcir_carrier_report(struct wbcir_data *data)
+{
+ unsigned counter = inb(data->ebase + WBCIR_REG_ECEIR_CNT_LO) |
+ inb(data->ebase + WBCIR_REG_ECEIR_CNT_HI) << 8;
+
+ if (counter > 0 && counter < 0xffff) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.carrier_report = 1;
+ ev.carrier = DIV_ROUND_CLOSEST(counter * 1000000u,
+ data->pulse_duration);
+
+ ir_raw_event_store(data->dev, &ev);
+ }
+
+ /* reset and restart the counter */
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+}
+
+static void
wbcir_idle_rx(struct rc_dev *dev, bool idle)
{
struct wbcir_data *data = dev->priv;
@@ -339,9 +369,16 @@ wbcir_idle_rx(struct rc_dev *dev, bool idle)
led_trigger_event(data->rxtrigger, LED_FULL);
}
- if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE)
+ if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) {
+ data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ led_trigger_event(data->rxtrigger, LED_OFF);
+
+ if (data->carrier_report_enabled)
+ wbcir_carrier_report(data);
+
/* Tell hardware to go idle by setting RXINACTIVE */
outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR);
+ }
}
static void
@@ -349,21 +386,22 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
{
u8 irdata;
DEFINE_IR_RAW_EVENT(rawir);
+ unsigned duration;
/* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) {
irdata = inb(data->sbase + WBCIR_REG_SP3_RXDATA);
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
+
+ duration = ((irdata & 0x7F) + 1) * 2;
rawir.pulse = irdata & 0x80 ? false : true;
- rawir.duration = US_TO_NS(((irdata & 0x7F) + 1) * 10);
- ir_raw_event_store_with_filter(data->dev, &rawir);
- }
+ rawir.duration = US_TO_NS(duration);
- /* Check if we should go idle */
- if (data->dev->idle) {
- led_trigger_event(data->rxtrigger, LED_OFF);
- data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ if (rawir.pulse)
+ data->pulse_duration += duration;
+
+ ir_raw_event_store_with_filter(data->dev, &rawir);
}
ir_raw_event_handle(data->dev);
@@ -492,6 +530,33 @@ wbcir_irq_handler(int irqno, void *cookie)
*****************************************************************************/
static int
+wbcir_set_carrier_report(struct rc_dev *dev, int enable)
+{
+ struct wbcir_data *data = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->spinlock, flags);
+
+ if (data->carrier_report_enabled == enable) {
+ spin_unlock_irqrestore(&data->spinlock, flags);
+ return 0;
+ }
+
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ if (enable && data->dev->idle)
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
+ WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ data->carrier_report_enabled = enable;
+ spin_unlock_irqrestore(&data->spinlock, flags);
+
+ return 0;
+}
+
+static int
wbcir_txcarrier(struct rc_dev *dev, u32 carrier)
{
struct wbcir_data *data = dev->priv;
@@ -837,7 +902,7 @@ wbcir_init_hw(struct wbcir_data *data)
/* Set IRTX_INV */
if (invert)
- outb(0x04, data->ebase + WBCIR_REG_ECEIR_CCTL);
+ outb(WBCIR_IRTX_INV, data->ebase + WBCIR_REG_ECEIR_CCTL);
else
outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL);
@@ -866,8 +931,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to sample every 10 us */
- outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
+ /* Set baud divisor to sample every 2 ns */
+ outb(0x03, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -876,9 +941,12 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /* Disable RX demod, enable run-length enc/dec, set freq span */
+ /*
+ * Disable RX demod, enable run-length enc/dec, set freq span and
+ * enable over-sampling
+ */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0xd0, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
@@ -915,9 +983,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* Clear RX state */
data->rxstate = WBCIR_RXSTATE_INACTIVE;
- data->rxev.duration = 0;
ir_raw_event_reset(data->dev);
- ir_raw_event_handle(data->dev);
+ ir_raw_event_set_idle(data->dev, true);
/* Clear TX state */
if (data->txstate == WBCIR_TXSTATE_ACTIVE) {
@@ -941,7 +1008,7 @@ wbcir_resume(struct pnp_dev *device)
return 0;
}
-static int __devinit
+static int
wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
{
struct device *dev = &device->dev;
@@ -1007,7 +1074,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
}
data->dev->driver_type = RC_DRIVER_IR_RAW;
- data->dev->driver_name = WBCIR_NAME;
+ data->dev->driver_name = DRVNAME;
data->dev->input_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
data->dev->input_id.bustype = BUS_HOST;
@@ -1016,13 +1083,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->input_id.version = WBCIR_ID_CHIP;
data->dev->map_name = RC_MAP_RC6_MCE;
data->dev->s_idle = wbcir_idle_rx;
+ data->dev->s_carrier_report = wbcir_set_carrier_report;
data->dev->s_tx_mask = wbcir_txmask;
data->dev->s_tx_carrier = wbcir_txcarrier;
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
data->dev->timeout = MS_TO_NS(100);
- data->dev->allowed_protos = RC_TYPE_ALL;
+ data->dev->rx_resolution = US_TO_NS(2);
+ data->dev->allowed_protos = RC_BIT_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
@@ -1086,7 +1155,7 @@ exit:
return err;
}
-static void __devexit
+static void
wbcir_remove(struct pnp_dev *device)
{
struct wbcir_data *data = pnp_get_drvdata(device);
@@ -1132,7 +1201,7 @@ static struct pnp_driver wbcir_driver = {
.name = WBCIR_NAME,
.id_table = wbcir_ids,
.probe = wbcir_probe,
- .remove = __devexit_p(wbcir_remove),
+ .remove = wbcir_remove,
.suspend = wbcir_suspend,
.resume = wbcir_resume,
.shutdown = wbcir_shutdown
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index aff39ae457a0..81f38aae9c66 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -35,8 +35,6 @@
* Currently it blind writes bunch of static registers from the
* fc2580_freq_regs_lut[] when fc2580_set_params() is called. Add some
* logic to reduce unneeded register writes.
- * There is also don't-care registers, initialized with value 0xff, and those
- * are also written to the chip currently (yes, not wise).
*/
/* write multiple registers */
@@ -111,6 +109,17 @@ static int fc2580_rd_reg(struct fc2580_priv *priv, u8 reg, u8 *val)
return fc2580_rd_regs(priv, reg, val, 1);
}
+/* write single register conditionally only when value differs from 0xff
+ * XXX: This is special routine meant only for writing fc2580_freq_regs_lut[]
+ * values. Do not use for the other purposes. */
+static int fc2580_wr_reg_ff(struct fc2580_priv *priv, u8 reg, u8 val)
+{
+ if (val == 0xff)
+ return 0;
+ else
+ return fc2580_wr_regs(priv, reg, &val, 1);
+}
+
static int fc2580_set_params(struct dvb_frontend *fe)
{
struct fc2580_priv *priv = fe->tuner_priv;
@@ -213,99 +222,99 @@ static int fc2580_set_params(struct dvb_frontend *fe)
if (i == ARRAY_SIZE(fc2580_freq_regs_lut))
goto err;
- ret = fc2580_wr_reg(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
+ ret = fc2580_wr_reg_ff(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
+ ret = fc2580_wr_reg_ff(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
+ ret = fc2580_wr_reg_ff(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
+ ret = fc2580_wr_reg_ff(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
+ ret = fc2580_wr_reg_ff(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
+ ret = fc2580_wr_reg_ff(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
+ ret = fc2580_wr_reg_ff(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
+ ret = fc2580_wr_reg_ff(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
+ ret = fc2580_wr_reg_ff(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
+ ret = fc2580_wr_reg_ff(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
+ ret = fc2580_wr_reg_ff(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
+ ret = fc2580_wr_reg_ff(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
+ ret = fc2580_wr_reg_ff(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
+ ret = fc2580_wr_reg_ff(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
if (ret < 0)
goto err;
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index ba84936aafd6..95ed46f2cd26 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -161,7 +161,7 @@ static int max2165_set_bandwidth(struct max2165_priv *priv, u32 bw)
return 0;
}
-int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
+static int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
{
u32 remainder;
u32 q, f = 0;
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 389668474070..83a6240f64d3 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -136,7 +136,7 @@ static int tua9001_set_params(struct dvb_frontend *fe)
{
struct tua9001_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
+ int ret = 0, i;
u16 val;
u32 frequency;
struct reg_val data[2];
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 4937712278f6..5c0fd787cc8f 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -934,7 +934,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
int rc = 0, is_retry = 0;
u16 hwmodel;
v4l2_std_id std0;
- u8 hw_major, hw_minor, fw_major, fw_minor;
+ u8 hw_major = 0, hw_minor = 0, fw_major = 0, fw_minor = 0;
dprintk(1, "%s called\n", __func__);
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 448361c6a13e..0cb7c28dcb17 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -25,7 +25,7 @@
#include "media/tuner.h"
#include "media/v4l2-common.h"
-void hvr950q_cs5340_audio(void *priv, int enable)
+static void hvr950q_cs5340_audio(void *priv, int enable)
{
/* Because the HVR-950q shares an i2s bus between the cs5340 and the
au8522, we need to hold cs5340 in reset when using the au8522 */
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index b328f6550d0b..9a6f15613a38 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -272,7 +272,6 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
struct au0828_dev *dev = container_of(work, struct au0828_dev,
restart_streaming);
struct au0828_dvb *dvb = &dev->dvb;
- int ret;
if (dev->urb_streaming == 0)
return;
@@ -282,7 +281,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
mutex_lock(&dvb->lock);
/* Stop transport */
- ret = stop_urb_transfer(dev);
+ stop_urb_transfer(dev);
au0828_write(dev, 0x608, 0x00);
au0828_write(dev, 0x609, 0x00);
au0828_write(dev, 0x60a, 0x00);
@@ -293,7 +292,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
au0828_write(dev, 0x609, 0x72);
au0828_write(dev, 0x60a, 0x71);
au0828_write(dev, 0x60b, 0x01);
- ret = start_urb_transfer(dev);
+ start_urb_transfer(dev);
mutex_unlock(&dvb->lock);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 870585570571..45387aab10c7 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -158,7 +158,7 @@ static void au0828_irq_callback(struct urb *urb)
/*
* Stop and Deallocate URBs
*/
-void au0828_uninit_isoc(struct au0828_dev *dev)
+static void au0828_uninit_isoc(struct au0828_dev *dev)
{
struct urb *urb;
int i;
@@ -197,9 +197,9 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
/*
* Allocate URBs and start IRQ
*/
-int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
- int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
+static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
{
struct au0828_dmaqueue *dma_q = &dev->vidq;
int i;
@@ -783,7 +783,7 @@ static int au0828_i2s_init(struct au0828_dev *dev)
* Auvitek au0828 analog stream enable
* Please set interface0 to AS5 before enable the stream
*/
-int au0828_analog_stream_enable(struct au0828_dev *d)
+static int au0828_analog_stream_enable(struct au0828_dev *d)
{
dprintk(1, "au0828_analog_stream_enable called\n");
au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
@@ -810,7 +810,7 @@ int au0828_analog_stream_disable(struct au0828_dev *d)
return 0;
}
-void au0828_analog_stream_reset(struct au0828_dev *dev)
+static void au0828_analog_stream_reset(struct au0828_dev *dev)
{
dprintk(1, "au0828_analog_stream_reset called\n");
au0828_writereg(dev, AU0828_SENSORCTRL_100, 0x0);
@@ -913,7 +913,7 @@ static int get_ressource(struct au0828_fh *fh)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vidq;
@@ -937,7 +937,7 @@ void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vbiq;
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 447148eff958..722207913740 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1068,12 +1068,12 @@ int cx231xx_unmute_audio(struct cx231xx *dev)
}
EXPORT_SYMBOL_GPL(cx231xx_unmute_audio);
-int stopAudioFirmware(struct cx231xx *dev)
+static int stopAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03);
}
-int restartAudioFirmware(struct cx231xx *dev)
+static int restartAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13);
}
@@ -2631,11 +2631,6 @@ int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
rc = cx231xx_stop_stream(dev, ep_mask);
}
- if (dev->mode == CX231XX_ANALOG_MODE)
- ;/* do any in Analog mode */
- else
- ;/* do any in digital mode */
-
return rc;
}
EXPORT_SYMBOL_GPL(cx231xx_capture_start);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index b84ebc54d91b..bbed1e40eeda 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -686,7 +686,7 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
}
EXPORT_SYMBOL_GPL(cx231xx_tuner_callback);
-void cx231xx_reset_out(struct cx231xx *dev)
+static void cx231xx_reset_out(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
msleep(200);
@@ -694,11 +694,13 @@ void cx231xx_reset_out(struct cx231xx *dev)
msleep(200);
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
}
-void cx231xx_enable_OSC(struct cx231xx *dev)
+
+static void cx231xx_enable_OSC(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1);
}
-void cx231xx_sleep_s5h1432(struct cx231xx *dev)
+
+static void cx231xx_sleep_s5h1432(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0);
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 781feed406f7..96a5a0965399 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -72,8 +72,8 @@ static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus,
/*
* cx231xx_i2c_send_bytes()
*/
-int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
- const struct i2c_msg *msg)
+static int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
+ const struct i2c_msg *msg)
{
struct cx231xx_i2c *bus = i2c_adap->algo_data;
struct cx231xx *dev = bus->dev;
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 96176e9db5a2..0f7b42446826 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -99,7 +99,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
/* The i2c micro-controller only outputs the cmd part of NEC protocol */
dev->init_data.rc_dev->scanmask = 0xff;
dev->init_data.rc_dev->driver_name = "cx231xx";
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x30;
/* Load and bind ir-kbd-i2c */
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 834bfecbed73..7a622dbe9b6d 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -134,7 +134,7 @@ config DVB_USB_MXL111SF
config DVB_USB_RTL28XXU
tristate "Realtek RTL28xxU DVB USB support"
- depends on DVB_USB_V2 && EXPERIMENTAL
+ depends on DVB_USB_V2
select DVB_RTL2830
select DVB_RTL2832
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 3d7526e28d42..943d93423705 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1306,7 +1306,7 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = af9015_rc_query;
rc->interval = 500;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index ea27eaff4e34..61ae7f9d0b27 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1023,10 +1023,10 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
switch (tmp) {
case 0: /* NEC */
default:
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
break;
case 1: /* RC6 */
- rc->allowed_protos = RC_TYPE_RC6;
+ rc->allowed_protos = RC_BIT_RC6_MCE;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index ec540140c810..d05c5b563dac 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -1048,7 +1048,7 @@ static int anysee_rc_query(struct dvb_usb_device *d)
static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = anysee_rc_query;
rc->interval = 250; /* windows driver uses 500ms */
@@ -1170,7 +1170,7 @@ static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot,
struct dvb_usb_device *d = ci->data;
struct anysee_state *state = d_to_priv(d);
int ret;
- u8 tmp;
+ u8 tmp = 0;
ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40);
if (ret)
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 54f1221d930d..d75dbf27e99e 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -826,7 +826,7 @@ static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
pr_debug("Getting az6007 Remote Control properties\n");
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = az6007_rc_query;
rc->interval = 400;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index bae16a1189d6..059291b892b8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -137,7 +137,7 @@ struct dvb_usb_driver_info {
struct dvb_usb_rc {
const char *map_name;
u64 allowed_protos;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
int (*query) (struct dvb_usb_device *d);
unsigned int interval;
const enum rc_driver_type driver_type;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index ba51f65204de..671b4fa232b4 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -224,7 +224,7 @@ static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buf,
dvb_dmx_swfilter_raw(&adap->demux, buf, len);
}
-int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -236,7 +236,7 @@ int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
return usb_urb_initv2(&adap->stream, &adap->props->stream);
}
-int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -368,7 +368,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return dvb_usb_ctrl_feed(dvbdmxfeed, -1);
}
-int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -440,7 +440,7 @@ err_dvb_register_adapter:
return ret;
}
-int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -456,7 +456,7 @@ int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
return 0;
}
-int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
+static int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
@@ -553,7 +553,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
{
int ret, i, count_registered = 0;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -622,7 +622,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
int i;
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 695f9106bc54..47204280b8b3 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -659,13 +659,19 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_SW_RST, 0x1);
it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x0f);
it913x_wr_reg(d, DEV_0, EP0_TX_NAK, 0x1b);
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
+ if (st->proprietary_ir == false) /* Enable endpoint 3 */
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x3f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
ret = it913x_wr_reg(d, DEV_0, EP4_MAX_PKT, pkt_size);
} else if (adap->id == 1 && adap->fe[0]) {
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
+ if (st->proprietary_ir == false)
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x7f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
@@ -698,7 +704,7 @@ static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = it913x_rc_query;
rc->interval = 250;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index c41d9d9ec7b5..6427ac359f21 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -799,7 +799,7 @@ static const char fw_c_rs2000[] = LME2510_C_RS2000;
static const char fw_lg[] = LME2510_LG;
static const char fw_s0194[] = LME2510_S0194;
-const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
+static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
{
struct lme2510_state *st = d->priv;
struct usb_device *udev = d->udev;
@@ -1253,7 +1253,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
static int lme2510_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 093f1acce403..a4c302d0aa37 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1197,7 +1197,7 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2831u_rc_query;
rc->interval = 400;
@@ -1269,7 +1269,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2832u_rc_query;
rc->interval = 400;
@@ -1338,6 +1338,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
&rtl2832u_props, "NOXON DAB/DAB+ USB dongle", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
+ &rtl2832u_props, "NOXON DAB/DAB+ USB dongle (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 5989b6590377..7346f85f3f2f 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -112,7 +112,7 @@ int usb_urb_submitv2(struct usb_data_stream *stream,
return 0;
}
-int usb_urb_free_urbs(struct usb_data_stream *stream)
+static int usb_urb_free_urbs(struct usb_data_stream *stream)
{
int i;
@@ -205,7 +205,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
return 0;
}
-int usb_free_stream_buffers(struct usb_data_stream *stream)
+static int usb_free_stream_buffers(struct usb_data_stream *stream)
{
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
@@ -223,8 +223,8 @@ int usb_free_stream_buffers(struct usb_data_stream *stream)
return 0;
}
-int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
- unsigned long size)
+static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
+ unsigned long size)
{
stream->buf_num = 0;
stream->buf_size = size;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 5e45ae605427..91e0119e8a87 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -298,7 +298,8 @@ struct stb6100_config az6027_stb6100_config = {
/* check for mutex FIXME */
-int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret = -1;
if (mutex_lock_interruptible(&d->usb_mutex))
@@ -1051,10 +1052,10 @@ static struct i2c_algorithm az6027_i2c_algo = {
.functionality = az6027_i2c_func,
};
-int az6027_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
- int *cold)
+static int az6027_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
{
u8 *b;
s16 ret;
diff --git a/drivers/media/usb/dvb-usb/dib0700.h b/drivers/media/usb/dvb-usb/dib0700.h
index 7de125c0b36f..637b6123f391 100644
--- a/drivers/media/usb/dvb-usb/dib0700.h
+++ b/drivers/media/usb/dvb-usb/dib0700.h
@@ -64,7 +64,7 @@ extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
extern struct i2c_algorithm dib0700_i2c_algo;
extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc, int *cold);
-extern int dib0700_change_protocol(struct rc_dev *dev, u64 rc_type);
+extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_type);
extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
extern int dib0700_device_count;
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index ef87229de6af..19b5ed2825d7 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -605,7 +605,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
-int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
+int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct dvb_usb_device *d = rc->priv;
struct dib0700_state *st = d->priv;
@@ -621,17 +621,19 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
st->buf[2] = 0;
/* Set the IR mode */
- if (rc_type == RC_TYPE_RC5)
+ if (*rc_type & RC_BIT_RC5) {
new_proto = 1;
- else if (rc_type == RC_TYPE_NEC)
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
new_proto = 0;
- else if (rc_type == RC_TYPE_RC6) {
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type & RC_BIT_RC6_MCE) {
if (st->fw_version < 0x10200) {
ret = -EINVAL;
goto out;
}
-
new_proto = 2;
+ *rc_type = RC_BIT_RC6_MCE;
} else {
ret = -EINVAL;
goto out;
@@ -645,7 +647,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
goto out;
}
- d->props.rc.core.protocol = rc_type;
+ d->props.rc.core.protocol = *rc_type;
out:
mutex_unlock(&d->usb_mutex);
@@ -707,7 +709,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
purb->actual_length);
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
toggle = 0;
/* NEC protocol sends repeat code as 0 0 0 FF */
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 510001da6e83..11798426fa88 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -518,7 +518,7 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
d->last_event = 0;
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
(key[3] == 0xff))
@@ -3658,9 +3658,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3698,9 +3698,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3763,9 +3763,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3808,9 +3808,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3890,9 +3890,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3936,9 +3936,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3987,9 +3987,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4055,9 +4055,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4106,9 +4106,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4177,9 +4177,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4215,9 +4215,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4295,9 +4295,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4341,9 +4341,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4394,9 +4394,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4433,9 +4433,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4472,9 +4472,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4511,9 +4511,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4550,9 +4550,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4589,9 +4589,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4644,9 +4644,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4681,9 +4681,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4721,9 +4721,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4761,9 +4761,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4802,9 +4802,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
},
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index aab0f99bc892..ce4c4e3b58bb 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -202,7 +202,7 @@ struct dvb_rc {
u64 protocol;
u64 allowed_protos;
enum rc_driver_type driver_type;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
char *module_name;
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 02e878577c3d..d1ddfa13de86 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -927,7 +927,7 @@ static struct dvb_usb_device_properties pctv452e_properties = {
.rc.core = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
@@ -980,7 +980,7 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
.rc.core = {
.rc_codes = RC_MAP_TT_1500,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 7a8c8c18590f..40832a1aef6c 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -732,7 +732,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.rc_codes = RC_MAP_TECHNISAT_USB2,
.module_name = "technisat-usb2",
.rc_query = technisat_usb2_rc_query,
- .allowed_protos = RC_TYPE_ALL,
+ .allowed_protos = RC_BIT_ALL,
.driver_type = RC_DRIVER_IR_RAW,
}
};
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index 6a50cdea3bce..bcdac225ebe1 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -741,7 +741,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
.rc_interval = 150, /* Less than IR_KEYPRESS_TIMEOUT */
.rc_codes = RC_MAP_TT_1500,
.rc_query = tt3650_rc_query,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_adapters = 1,
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 07c673a6e764..22cf9f96cb9e 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -56,7 +56,7 @@ static int vp702x_usb_in_op_unlocked(struct dvb_usb_device *d, u8 req,
}
int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+ u16 index, u8 *b, int blen)
{
int ret;
@@ -67,8 +67,8 @@ int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
-int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+static int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret;
deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index);
@@ -86,7 +86,7 @@ int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
return 0;
}
-int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
int ret;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 16a84f9f46d8..619bffbab3bc 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1979,6 +1979,15 @@ struct em28xx_board em28xx_boards[] = {
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ [EM2884_BOARD_TERRATEC_HTC_USB_XS] = {
+ .name = "Terratec Cinergy HTC USB XS",
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ .tuner_type = TUNER_ABSENT,
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -2057,9 +2066,9 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0043),
.driver_info = EM2870_BOARD_TERRATEC_XS },
{ USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
@@ -3297,7 +3306,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->num_alt = interface->num_altsetting;
- if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
+ if ((unsigned)card[nr] < em28xx_bcount)
dev->model = card[nr];
/* save our data pointer in this interface device */
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 13ae821949e9..63f2e7070c00 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -331,7 +331,7 @@ static struct drxk_config hauppauge_930c_drxk = {
.load_firmware_sync = true,
};
-struct drxk_config terratec_htc_stick_drxk = {
+static struct drxk_config terratec_htc_stick_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
@@ -520,7 +520,10 @@ static void terratec_htc_stick_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- /* Init the analog decoder? */
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
struct {
unsigned char r[4];
int len;
@@ -547,6 +550,64 @@ static void terratec_htc_stick_init(struct em28xx *dev)
em28xx_gpio_set(dev, terratec_htc_stick_end);
};
+static void terratec_htc_usb_xs_init(struct em28xx *dev)
+{
+ int i;
+
+ struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 100},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xb6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ {EM2874_R80_GPIO, 0xa6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_init);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_end);
+};
+
static void pctv_520e_init(struct em28xx *dev)
{
/*
@@ -1155,6 +1216,25 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2884_BOARD_TERRATEC_HTC_USB_XS:
+ terratec_htc_usb_xs_init(dev);
+
+ /* attach demodulator */
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
+ &dev->i2c_adap);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap,
+ &em28xx_cxd2820r_tda18271_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 97d36b4f19db..660bf803c9e4 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -345,7 +345,7 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
{
int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
@@ -354,14 +354,16 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- if (rc_type == RC_TYPE_RC5) {
+ if (*rc_type & RC_BIT_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
- } else if (rc_type == RC_TYPE_NEC) {
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else if (rc_type != RC_TYPE_UNKNOWN)
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type != RC_BIT_UNKNOWN)
rc = -EINVAL;
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
@@ -524,6 +526,7 @@ static int em28xx_ir_init(struct em28xx *dev)
struct em28xx_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (dev->board.ir_codes == NULL) {
/* No remote control support */
@@ -546,14 +549,15 @@ static int em28xx_ir_init(struct em28xx *dev)
* em2874 supports more protocols. For now, let's just announce
* the two protocols that were already tested
*/
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
rc->priv = ir;
rc->change_protocol = em28xx_ir_change_protocol;
rc->open = em28xx_ir_start;
rc->close = em28xx_ir_stop;
/* By default, keep protocol field untouched */
- err = em28xx_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ err = em28xx_ir_change_protocol(rc, &rc_type);
if (err)
goto err_out_free;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 8757523e6863..86e90d86da6d 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -128,6 +128,7 @@
#define EM2874_BOARD_MAXMEDIA_UB425_TC 84
#define EM2884_BOARD_PCTV_510E 85
#define EM2884_BOARD_PCTV_520E 86
+#define EM2884_BOARD_TERRATEC_HTC_USB_XS 87
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index a2b934146ebf..e0a431bb0d42 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1586,8 +1586,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
- if (v4l2_buf->index < 0
- || v4l2_buf->index >= gspca_dev->nframes)
+ if (v4l2_buf->index >= gspca_dev->nframes)
return -EINVAL;
frame = &gspca_dev->frame[v4l2_buf->index];
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index e3eab82cd4e5..352317d7acdb 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -32,7 +32,7 @@ do { \
#define D_USBO 0x00
#define D_V4L2 0x0100
#else
-#define PDEBUG(level, fmt, ...)
+#define PDEBUG(level, fmt, ...) do {} while(0)
#endif
#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index b897aa86f315..1ba29fe7fada 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -114,7 +114,7 @@ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
}
/* Responses are one byte only */
-static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
+static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char *response)
{
int retval;
@@ -123,7 +123,7 @@ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
retval = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x84),
gspca_dev->usb_buf, 1, NULL, 500);
- response = gspca_dev->usb_buf[0];
+ *response = gspca_dev->usb_buf[0];
if (retval < 0) {
pr_err("read command [%02x] error %d\n",
gspca_dev->usb_buf[0], retval);
@@ -260,7 +260,7 @@ static int jlj_start(struct gspca_dev *gspca_dev)
if (start_commands[i].delay)
msleep(start_commands[i].delay);
if (start_commands[i].ack_wanted)
- jlj_read1(gspca_dev, response);
+ jlj_read1(gspca_dev, &response);
}
setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
msleep(2);
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 40ad6687ee5d..3773a8a745df 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x045e, 0x02ae)},
+ {USB_DEVICE(0x045e, 0x02bf)},
{}
};
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
index cc8ec3f7e8dc..c8e1572eb502 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
@@ -74,6 +74,12 @@ static
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2548")
}
}, {
+ .ident = "Fujitsu-Siemens Amilo Pi 2530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 2530")
+ }
+ }, {
.ident = "MSI GX700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2d5c6d8343a0..4f5869a98082 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -29,14 +29,13 @@
* Register page 0:
*
* Address Description
- * 0x02 Red balance control
- * 0x03 Green balance control
- * 0x04 Blue balance control
- * Valus are inverted (0=max, 255=min).
+ * 0x01 Red balance control
+ * 0x02 Green balance control
+ * 0x03 Blue balance control
* The Windows driver uses a quadratic approach to map
* the settable values (0-200) on register values:
- * min=0x80, default=0x40, max=0x20
- * 0x0f-0x20 Colors, saturation and exposure control
+ * min=0x20, default=0x40, max=0x80
+ * 0x0f-0x20 Color and saturation control
* 0xa2-0xab Brightness, contrast and gamma control
* 0xb6 Sharpness control (bits 0-4)
*
@@ -78,12 +77,12 @@
*
* Page | Register | Function
* -----+------------+---------------------------------------------------
+ * 0 | 0x01 | setredbalance()
+ * 0 | 0x03 | setbluebalance()
* 0 | 0x0f..0x20 | setcolors()
* 0 | 0xa2..0xab | setbrightcont()
* 0 | 0xb6 | setsharpness()
- * 0 | 0xc5 | setredbalance()
* 0 | 0xc6 | setwhitebalance()
- * 0 | 0xc7 | setbluebalance()
* 0 | 0xdc | setbrightcont(), setcolors()
* 3 | 0x02 | setexposure()
* 3 | 0x10, 0x12 | setgain()
@@ -99,10 +98,13 @@
/* Include pac common sof detection functions */
#include "pac_common.h"
-#define PAC7302_GAIN_DEFAULT 15
-#define PAC7302_GAIN_KNEE 42
-#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
-#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
+#define PAC7302_RGB_BALANCE_MIN 0
+#define PAC7302_RGB_BALANCE_MAX 200
+#define PAC7302_RGB_BALANCE_DEFAULT 100
+#define PAC7302_GAIN_DEFAULT 15
+#define PAC7302_GAIN_KNEE 42
+#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
+#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
"Thomas Kaiser thomas@kaiser-linux.li");
@@ -439,12 +441,31 @@ static void setwhitebalance(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xdc, 0x01);
}
+static u8 rgbbalance_ctrl_to_reg_value(s32 rgb_ctrl_val)
+{
+ const unsigned int k = 1000; /* precision factor */
+ unsigned int norm;
+
+ /* Normed value [0...k] */
+ norm = k * (rgb_ctrl_val - PAC7302_RGB_BALANCE_MIN)
+ / (PAC7302_RGB_BALANCE_MAX - PAC7302_RGB_BALANCE_MIN);
+ /* Qudratic apporach improves control at small (register) values: */
+ return 64 * norm * norm / (k*k) + 32 * norm / k + 32;
+ /* Y = 64*X*X + 32*X + 32
+ * => register values 0x20-0x80; Windows driver uses these limits */
+
+ /* NOTE: for full value range (0x00-0xff) use
+ * Y = 254*X*X + X
+ * => 254 * norm * norm / (k*k) + 1 * norm / k */
+}
+
static void setredbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc5, sd->red_balance->val);
+ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
+ reg_w(gspca_dev, 0x01,
+ rgbbalance_ctrl_to_reg_value(sd->red_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -454,7 +475,8 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc7, sd->blue_balance->val);
+ reg_w(gspca_dev, 0x03,
+ rgbbalance_ctrl_to_reg_value(sd->blue_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -643,9 +665,15 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_WHITE_BALANCE_TEMPERATURE,
0, 255, 1, 55);
sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_RED_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_BLUE_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index fd1f8d2d3b0b..1220340e7602 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
}
}
-static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
+static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
{
int retry = 60;
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
return;
/* is i2c ready */
- reg_w(gspca_dev, 0x08, buffer, 8);
+ reg_w(gspca_dev, 0x08, buf, 8);
while (retry--) {
if (gspca_dev->usb_err < 0)
return;
- msleep(10);
+ msleep(1);
reg_r(gspca_dev, 0x08);
if (gspca_dev->usb_buf[0] & 0x04) {
if (gspca_dev->usb_buf[0] & 0x08) {
dev_err(gspca_dev->v4l2_dev.dev,
- "i2c write error\n");
+ "i2c error writing %02x %02x %02x %02x"
+ " %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7]);
gspca_dev->usb_err = -EIO;
}
return;
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
for (;;) {
if (gspca_dev->usb_err < 0)
return;
- reg_w(gspca_dev, 0x08, *buffer, 8);
+ i2c_w(gspca_dev, *buffer);
len -= 8;
if (len <= 0)
break;
@@ -1449,6 +1452,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
+ {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
{USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
{USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
{USB_DEVICE(0x0c45, 0x602a), SB(HV7131D, 102)},
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 5a86047b846f..36307a9028a9 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
0,
gspca_dev->usb_buf, 8,
500);
+ msleep(2);
if (ret < 0) {
pr_err("i2c_w1 err %d\n", ret);
gspca_dev->usb_err = ret;
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index bab01c86c315..bcd2c04c770e 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -590,8 +590,7 @@ static const struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 304f43ef59eb..84dc26fe80ee 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -401,12 +401,14 @@ static int hdpvr_probe(struct usb_interface *interface,
client = hdpvr_register_ir_rx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
client = hdpvr_register_ir_tx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 82e819fa91c0..031cf024304c 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -55,7 +55,7 @@ struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = "HD-PVR";
init_data->polling_interval = 405; /* ms, duplicated from Windows */
hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 32b11c15bb1a..60a2604e4cb3 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -17,9 +17,9 @@ config VIDEO_PVRUSB2
module will be called pvrusb2
config VIDEO_PVRUSB2_SYSFS
- bool "pvrusb2 sysfs support (EXPERIMENTAL)"
+ bool "pvrusb2 sysfs support"
default y
- depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL
+ depends on VIDEO_PVRUSB2 && SYSFS
---help---
This option enables the operation of a sysfs based
interface for query and control of the pvrusb2 driver.
@@ -33,9 +33,9 @@ config VIDEO_PVRUSB2_SYSFS
Note: This feature is experimental and subject to change.
config VIDEO_PVRUSB2_DVB
- bool "pvrusb2 ATSC/DVB support (EXPERIMENTAL)"
+ bool "pvrusb2 ATSC/DVB support"
default y
- depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
+ depends on VIDEO_PVRUSB2 && DVB_CORE
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index fb828ba1dbbe..299751a8b06b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3563,9 +3563,9 @@ void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,
enum pvr2_v4l_type index,int v)
{
switch (index) {
- case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;
- case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;
- case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;
+ case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;break;
+ case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;break;
+ case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;break;
default: break;
}
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 885ce11f222d..9ab596c78a4e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -581,7 +581,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
/* IR Receiver */
@@ -596,7 +596,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
/* IR Receiver */
info.addr = 0x71;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index db249cad3cd9..6930676051e7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -196,7 +196,7 @@ static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
return ret;
}
-int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
+static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
@@ -365,7 +365,7 @@ static int pvr2_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
vt->audmode);
}
-int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+static int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 1f506fde97d0..3a1618580ed6 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -179,6 +179,8 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
return -EINVAL;
if (frames < 4)
frames = 4;
+ else if (size > PSZ_QCIF && frames > 15)
+ frames = 15;
else if (frames > 25)
frames = 25;
frames = frames2frames[frames];
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 42e36bac4d72..5210239cbaee 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -155,7 +155,7 @@ static struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
-struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
+static struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
unsigned long flags = 0;
struct pwc_frame_buf *buf = NULL;
@@ -1000,7 +1000,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
pdev->vb_queue.ops = &pwc_vb_queue_ops;
pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
- vb2_queue_init(&pdev->vb_queue);
+ rc = vb2_queue_init(&pdev->vb_queue);
+ if (rc < 0) {
+ PWC_ERROR("Oops, could not initialize vb2 queue.\n");
+ goto err_free_mem;
+ }
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 2191f6ddf9e7..8ebec0d7bf59 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1651,7 +1651,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
int is_ntsc = 0;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
- if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ if (fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
switch (fe->width) {
case 640:
diff --git a/drivers/media/usb/siano/Kconfig b/drivers/media/usb/siano/Kconfig
index 3c76e62d820d..5afbd9a4b55c 100644
--- a/drivers/media/usb/siano/Kconfig
+++ b/drivers/media/usb/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_USB_DRV
tristate "Siano SMS1xxx based MDTV receiver"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for USB interface
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index aac622200e99..de2c10289eec 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -389,7 +389,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
return rc;
}
-static int __devinit smsusb_probe(struct usb_interface *intf,
+static int smsusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 5bfc8e2f018f..73605864fffa 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -2481,11 +2481,13 @@ sn9c102_vidioc_enum_framesizes(struct sn9c102_device* cam, void __user * arg)
if (frmsize.pixel_format != V4L2_PIX_FMT_SN9C10X &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
if (frmsize.pixel_format != V4L2_PIX_FMT_JPEG &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
}
frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE;
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 176ac937306b..850cf285ada8 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -116,7 +116,7 @@ static int stk1160_i2c_read_reg(struct stk1160 *dev, u8 addr,
if (rc < 0)
return rc;
- stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
+ rc = stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
if (rc < 0)
return rc;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 8bdfb0275313..fa3671de02aa 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -475,7 +475,11 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
if (!dev->isoc_ctl.transfer_buffer[i]) {
stk1160_err("cannot alloc %d bytes for tx[%d] buffer\n",
sb_size, i);
- goto free_i_bufs;
+
+ /* Not enough transfer buffers, so just give up */
+ if (i < STK1160_MIN_BUFS)
+ goto free_i_bufs;
+ goto nomore_tx_bufs;
}
memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
@@ -506,13 +510,28 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
}
}
- stk1160_dbg("urbs allocated\n");
+ stk1160_dbg("%d urbs allocated\n", num_bufs);
/* At last we can say we have some buffers */
dev->isoc_ctl.num_bufs = num_bufs;
return 0;
+nomore_tx_bufs:
+ /*
+ * Failed to allocate desired buffer count. However, we may have
+ * enough to work fine, so we just free the extra urb,
+ * store the allocated count and keep going, fingers crossed!
+ */
+ usb_free_urb(dev->isoc_ctl.urb[i]);
+ dev->isoc_ctl.urb[i] = NULL;
+
+ stk1160_warn("%d urbs allocated. Trying to continue...\n", i - 1);
+
+ dev->isoc_ctl.num_bufs = i - 1;
+
+ return 0;
+
free_i_bufs:
/* Save the allocated buffers so far, so we can properly free them */
dev->isoc_ctl.num_bufs = i+1;
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 68c8707d36ab..05b05b160e1e 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -30,11 +30,12 @@
#define STK1160_VERSION "0.9.5"
#define STK1160_VERSION_NUM 0x000905
-/* TODO: Decide on number of packets for each buffer */
+/* Decide on number of packets for each buffer */
#define STK1160_NUM_PACKETS 64
/* Number of buffers for isoc transfers */
-#define STK1160_NUM_BUFS 16 /* TODO */
+#define STK1160_NUM_BUFS 16
+#define STK1160_MIN_BUFS 1
/* TODO: This endpoint address should be retrieved */
#define STK1160_EP_VIDEO 0x82
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 86a0fc56c330..5d3c032d733c 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -54,10 +54,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jaime Velasco Juan <jsagarribay@gmail.com> and Nicolas VIVIEN");
MODULE_DESCRIPTION("Syntek DC1125 webcam driver");
-
-/* bool for webcam LED management */
-int first_init = 1;
-
/* Some cameras have audio interfaces, we aren't interested in those */
static struct usb_device_id stkwebcam_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) },
@@ -554,6 +550,7 @@ static void stk_free_buffers(struct stk_camera *dev)
static int v4l_stk_open(struct file *fp)
{
+ static int first_init = 1; /* webcam LED management */
struct stk_camera *dev;
struct video_device *vdev;
diff --git a/drivers/media/usb/tlg2300/pd-dvb.c b/drivers/media/usb/tlg2300/pd-dvb.c
index 30fcb117e898..ca4994a5190c 100644
--- a/drivers/media/usb/tlg2300/pd-dvb.c
+++ b/drivers/media/usb/tlg2300/pd-dvb.c
@@ -1,6 +1,7 @@
#include "pd-common.h"
#include <linux/kernel.h>
#include <linux/usb.h>
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/media/usb/tlg2300/pd-video.c
index 1f448ac7a496..3082bfa9b2c5 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/media/usb/tlg2300/pd-video.c
@@ -888,7 +888,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *in)
{
struct front_face *front = fh;
- if (in->index < 0 || in->index >= POSEIDON_INPUTS)
+ if (in->index >= POSEIDON_INPUTS)
return -EINVAL;
strcpy(in->name, pd_inputs[in->index].name);
in->type = V4L2_INPUT_TYPE_TUNER;
@@ -923,7 +923,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
struct poseidon *pd = front->pd;
s32 ret, cmd_status;
- if (i < 0 || i >= POSEIDON_INPUTS)
+ if (i >= POSEIDON_INPUTS)
return -EINVAL;
ret = send_set_req(pd, SGNL_SRC_SEL,
pd_inputs[i].tlg_src, &cmd_status);
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index dffbd4bd47b1..8a6bbf1d80e1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -109,12 +109,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
*/
switch (ir->rc_type) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
leader = 900; /* ms */
pulse = 700; /* ms - the actual value would be 562 */
break;
default:
- case RC_TYPE_RC5:
+ case RC_BIT_RC5:
leader = 900; /* ms - from the NEC decoding */
pulse = 1780; /* ms - The actual value would be 1776 */
break;
@@ -122,12 +122,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
pulse = ir_clock_mhz * pulse;
leader = ir_clock_mhz * leader;
- if (ir->rc_type == RC_TYPE_NEC)
+ if (ir->rc_type == RC_BIT_NEC)
leader = leader | 0x8000;
dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n",
__func__,
- (ir->rc_type == RC_TYPE_NEC) ? "NEC" : "RC-5",
+ (ir->rc_type == RC_BIT_NEC) ? "NEC" : "RC-5",
ir_clock_mhz, leader, pulse);
/* Remote WAKEUP = enable, normal mode, from IR decoder output */
@@ -297,7 +297,7 @@ static void tm6000_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct tm6000_IR *ir = rc->priv;
@@ -306,10 +306,10 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
dprintk(2, "%s\n",__func__);
- if ((rc->rc_map.scan) && (rc_type == RC_TYPE_NEC))
+ if ((rc->rc_map.scan) && (*rc_type == RC_BIT_NEC))
ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
- ir->rc_type = rc_type;
+ ir->rc_type = *rc_type;
tm6000_ir_config(ir);
/* TODO */
@@ -398,6 +398,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
struct tm6000_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (!enable_ir)
return -ENODEV;
@@ -421,7 +422,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
ir->rc = rc;
/* input setup */
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
/* Neded, in order to support NEC remotes with 24 or 32 bits */
rc->scanmask = 0xffff;
rc->priv = ir;
@@ -444,7 +445,8 @@ int tm6000_ir_init(struct tm6000_core *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- tm6000_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ tm6000_ir_change_protocol(rc, &rc_type);
rc->input_name = ir->name;
rc->input_phys = ir->phys;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 4342cd4f5c8a..f656fd7a39a2 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1802,6 +1802,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (!dev->radio_dev) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
+ ret = -ENXIO;
return ret; /* FIXME release resource */
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 5c36a57e6590..ad7f7448072e 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1363,7 +1363,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
}
/* register video4linux devices */
-static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
+static int usbvision_register_video(struct usb_usbvision *usbvision)
{
/* Video Device: */
usbvision->vdev = usbvision_vdev_init(usbvision,
@@ -1510,8 +1510,8 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
* if it looks like USBVISION video device
*
*/
-static int __devinit usbvision_probe(struct usb_interface *intf,
- const struct usb_device_id *devid)
+static int usbvision_probe(struct usb_interface *intf,
+ const struct usb_device_id *devid)
{
struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf));
struct usb_interface *uif;
@@ -1619,7 +1619,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
* with no ill consequences.
*
*/
-static void __devexit usbvision_disconnect(struct usb_interface *intf)
+static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
@@ -1664,7 +1664,7 @@ static struct usb_driver usbvision_driver = {
.name = "usbvision",
.id_table = usbvision_table,
.probe = usbvision_probe,
- .disconnect = __devexit_p(usbvision_disconnect),
+ .disconnect = usbvision_disconnect,
};
/*
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 43cf61fe4943..8a25876d72c6 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -167,7 +167,7 @@ enum {
/* This macro restricts an int variable to an inclusive range */
#define RESTRICT_TO_RANGE(v, mi, ma) \
- { if ((v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
+ { if (((int)v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
/*
* We use macros to do YUV -> RGB conversion because this is
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index f7061a5ef1d2..d5baab17a5ef 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -927,7 +927,7 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
- return -EINVAL;
+ return -EACCES;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0)
+ if (ctrl == NULL)
return -EINVAL;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ return -EACCES;
/* Clamp out of range values. */
switch (mapping->v4l2_type) {
@@ -1452,8 +1454,12 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
if (step == 0)
step = 1;
- xctrl->value = min + (xctrl->value - min + step/2) / step * step;
- xctrl->value = clamp(xctrl->value, min, max);
+ xctrl->value = min + ((u32)(xctrl->value - min) + step / 2)
+ / step * step;
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
+ xctrl->value = clamp(xctrl->value, min, max);
+ else
+ xctrl->value = clamp_t(u32, xctrl->value, min, max);
value = xctrl->value;
break;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5967081747ce..5dbefa68b1d2 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1562,6 +1562,9 @@ static int uvc_scan_device(struct uvc_device *dev)
INIT_LIST_HEAD(&chain->entities);
mutex_init(&chain->ctrl_mutex);
chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
kfree(chain);
@@ -1722,6 +1725,8 @@ static int uvc_register_video(struct uvc_device *dev,
vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
+ vdev->prio = &stream->chain->prio;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags);
if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vdev->vfl_dir = VFL_DIR_TX;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1741,6 +1746,11 @@ static int uvc_register_video(struct uvc_device *dev,
return ret;
}
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ stream->chain->caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else
+ stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT;
+
atomic_inc(&dev->nstreams);
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 29e239911d0e..dc56a59ecadc 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -93,6 +93,8 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
} else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ if (entity->flags & UVC_ENTITY_FLAG_DEFAULT)
+ entity->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
} else
ret = 0;
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 18a91fae6bc1..778addc5caff 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -128,7 +128,7 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
int ret;
queue->queue.type = type;
- queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
+ queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue->queue.drv_priv = queue;
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f00db3060e0e..68d59b527492 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -165,17 +165,18 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fcc[0], fcc[1], fcc[2], fcc[3],
fmt->fmt.pix.width, fmt->fmt.pix.height);
- /* Check if the hardware supports the requested format. */
+ /* Check if the hardware supports the requested format, use the default
+ * format otherwise.
+ */
for (i = 0; i < stream->nformats; ++i) {
format = &stream->format[i];
if (format->fcc == fmt->fmt.pix.pixelformat)
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
- uvc_trace(UVC_TRACE_FORMAT, "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
- return -EINVAL;
+ if (i == stream->nformats) {
+ format = stream->def_format;
+ fmt->fmt.pix.pixelformat = format->fcc;
}
/* Find the closest image size. The distance between image sizes is
@@ -564,15 +565,30 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
usb_make_path(stream->dev->udev,
cap->bus_info, sizeof(cap->bus_info));
cap->version = LINUX_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | chain->caps;
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
break;
}
+ /* Priority */
+ case VIDIOC_G_PRIORITY:
+ *(u32 *)arg = v4l2_prio_max(vdev->prio);
+ break;
+
+ case VIDIOC_S_PRIORITY:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_prio_change(vdev->prio, &handle->vfh.prio,
+ *(u32 *)arg);
+
/* Get, Set & Query control */
case VIDIOC_QUERYCTRL:
return uvc_query_v4l2_ctrl(chain, arg);
@@ -601,6 +617,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_control *ctrl = arg;
struct v4l2_ext_control xctrl;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
memset(&xctrl, 0, sizeof xctrl);
xctrl.id = ctrl->id;
xctrl.value = ctrl->value;
@@ -647,6 +667,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_EXT_CTRLS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+ /* Fall through */
case VIDIOC_TRY_EXT_CTRLS:
{
struct v4l2_ext_controls *ctrls = arg;
@@ -661,7 +685,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = i;
+ ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
+ ? ctrls->count : i;
return ret;
}
}
@@ -739,6 +764,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
u32 input = *(u32 *)arg + 1;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -792,6 +821,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_FMT:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -894,6 +927,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_v4l2_get_streamparm(stream, arg);
case VIDIOC_S_PARM:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -924,10 +961,14 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_CROP:
case VIDIOC_S_CROP:
- return -EINVAL;
+ return -ENOTTY;
/* Buffers & streaming */
case VIDIOC_REQBUFS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -973,6 +1014,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -991,6 +1036,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -1030,7 +1079,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_ENUMOUTPUT:
uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd);
- return -EINVAL;
+ return -ENOTTY;
case UVCIOC_CTRL_MAP:
return uvc_ioctl_ctrl_map(chain, arg);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 57c3076a4625..3394c3432011 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1812,6 +1812,7 @@ int uvc_video_init(struct uvc_streaming *stream)
probe->bFormatIndex = format->index;
probe->bFrameIndex = frame->bFrameIndex;
+ stream->def_format = format;
stream->cur_format = format;
stream->cur_frame = frame;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index af216ec45e39..af505fdd9b3f 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -225,10 +225,14 @@ struct uvc_format_desc {
* always be accessed with the UVC_ENTITY_* macros and never directly.
*/
+#define UVC_ENTITY_FLAG_DEFAULT (1 << 0)
+
struct uvc_entity {
struct list_head list; /* Entity as part of a UVC device. */
struct list_head chain; /* Entity as part of a video device
* chain. */
+ unsigned int flags;
+
__u8 id;
__u16 type;
char name[64];
@@ -371,6 +375,9 @@ struct uvc_video_chain {
struct uvc_entity *selector; /* Selector unit */
struct mutex ctrl_mutex; /* Protects ctrl.info */
+
+ struct v4l2_prio_state prio; /* V4L2 priority state */
+ u32 caps; /* V4L2 chain-wide caps */
};
struct uvc_stats_frame {
@@ -436,6 +443,7 @@ struct uvc_streaming {
struct uvc_format *format;
struct uvc_streaming_control ctrl;
+ struct uvc_format *def_format;
struct uvc_format *cur_format;
struct uvc_frame *cur_frame;
/* Protect access to ctrl, cur_format, cur_frame and hardware video
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 9afab35878b4..39edd4442932 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1007,8 +1007,7 @@ static void read_pipe_completion(struct urb *purb)
return;
}
- if (purb->actual_length < 0 ||
- purb->actual_length > pipe_info->transfer_size) {
+ if (purb->actual_length > pipe_info->transfer_size) {
dev_err(&cam->udev->dev, "wrong number of bytes\n");
return;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 0c54e19d9944..65875c3aba1b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -59,6 +59,7 @@ config VIDEOBUF_DVB
# Used by drivers that need Videobuf2 modules
config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
tristate
config VIDEOBUF2_MEMOPS
@@ -68,11 +69,13 @@ config VIDEOBUF2_DMA_CONTIG
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_VMALLOC
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_DMA_SG
tristate
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f995dd31151d..380ddd89fa4c 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -837,7 +837,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
struct v4l2_dv_timings *fmt)
{
int pix_clk;
- int v_fp, v_bp, h_fp, h_bp, hsync;
+ int v_fp, v_bp, h_fp, hsync;
int frame_width, image_height, image_width;
bool default_gtf;
int h_blank;
@@ -885,7 +885,6 @@ bool v4l2_detect_gtf(unsigned frame_height,
hsync = hsync - hsync % GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
- h_bp = h_blank / 2;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 83ffb6436baf..7157af301b14 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -297,6 +297,7 @@ struct v4l2_plane32 {
union {
__u32 mem_offset;
compat_long_t userptr;
+ __s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
@@ -318,6 +319,7 @@ struct v4l2_buffer32 {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
+ __s32 fd;
} m;
__u32 length;
__u32 reserved2;
@@ -341,6 +343,9 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
up_pln = compat_ptr(p);
if (put_user((unsigned long)up_pln, &up->m.userptr))
return -EFAULT;
+ } else if (memory == V4L2_MEMORY_DMABUF) {
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ return -EFAULT;
} else {
if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
sizeof(__u32)))
@@ -364,6 +369,11 @@ static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
sizeof(__u32)))
return -EFAULT;
+ /* For DMABUF, driver might've set up the fd, so copy it back. */
+ if (memory == V4L2_MEMORY_DMABUF)
+ if (copy_in_user(&up32->m.fd, &up->m.fd,
+ sizeof(int)))
+ return -EFAULT;
return 0;
}
@@ -446,6 +456,10 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (get_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -510,6 +524,10 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (put_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -1000,6 +1018,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_FBUF32:
case VIDIOC_OVERLAY32:
case VIDIOC_QBUF32:
+ case VIDIOC_EXPBUF:
case VIDIOC_DQBUF32:
case VIDIOC_STREAMON32:
case VIDIOC_STREAMOFF32:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a2df842e5100..98dcad9c8a3b 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -571,6 +571,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 18a040b935a3..c72009218152 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 9e3fc040ea20..e57c002b4150 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8f388ff31ebb..aa6e7c788db2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ static const char *v4l2_memory_names[] = {
[V4L2_MEMORY_MMAP] = "mmap",
[V4L2_MEMORY_USERPTR] = "userptr",
[V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
};
#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
@@ -453,6 +454,15 @@ static void v4l_print_buffer(const void *arg, bool write_only)
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
@@ -1960,6 +1970,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 3ac83583ad7a..438ea45d1074 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -369,6 +369,19 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+/**
* v4l2_m2m_streamon() - turn on streaming for a video queue
*/
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -510,12 +523,10 @@ struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops)
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
+ WARN_ON(!m2m_ops->job_abort))
return ERR_PTR(-EINVAL);
- BUG_ON(!m2m_ops->device_run);
- BUG_ON(!m2m_ops->job_abort);
-
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index dced41c1d993..996c248dea42 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,20 +412,20 @@ static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{
- switch (media_entity_type(pad->entity)) {
- case MEDIA_ENT_T_V4L2_SUBDEV:
+ if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
- return v4l2_subdev_call(media_entity_to_v4l2_subdev(
- pad->entity),
- pad, get_fmt, NULL, fmt);
- default:
- WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
- media_entity_type(pad->entity), pad->entity->name);
- /* Fall through */
- case MEDIA_ENT_T_DEVNODE_V4L:
- return -EINVAL;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
}
+
+ WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->type, pad->entity->name);
+
+ return -EINVAL;
}
int v4l2_subdev_link_validate(struct media_link *link)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index bf7a326b1cdc..5449e8aa984a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -335,6 +335,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case V4L2_MEMORY_OVERLAY:
b->m.offset = vb->boff;
break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
}
b->flags = 0;
@@ -405,6 +408,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
break;
case V4L2_MEMORY_USERPTR:
case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
/* nothing */
break;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 432df119af27..e02c4797b1c6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -109,6 +109,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_memop(q, unmap_dmabuf, p->mem_priv);
+
+ call_memop(q, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
/* Free MMAP buffers or release USERPTR buffers */
if (q->memory == V4L2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
}
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
else if (q->memory == V4L2_MEMORY_USERPTR)
b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = vb->v4l2_planes[0].m.fd;
}
/*
@@ -454,13 +488,28 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* __verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*/
static int __verify_memory_type(struct vb2_queue *q,
enum v4l2_memory memory, enum v4l2_buf_type type)
{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
+ memory != V4L2_MEMORY_DMABUF) {
dprintk(1, "reqbufs: unsupported memory type\n");
return -EINVAL;
}
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
return -EINVAL;
}
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
/*
* Place the busy tests at the end: -EBUSY can be ignored when
* create_bufs is called with count == 0, but count == 0 should still
@@ -790,6 +844,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
+ unsigned int plane;
if (vb->state != VB2_BUF_STATE_ACTIVE)
return;
@@ -800,6 +855,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, vb->state);
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, finish, vb->planes[plane].mem_priv);
+
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
@@ -845,6 +904,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
b->m.planes[plane].length;
}
}
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ v4l2_planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
+ }
+ }
} else {
/*
* Single-planar buffers do not use planes array,
@@ -852,13 +921,22 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
* In videobuf we use our internal V4l2_planes struct for
* single-planar buffers as well, for simplicity.
*/
- if (V4L2_TYPE_IS_OUTPUT(b->type))
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
v4l2_planes[0].bytesused = b->bytesused;
+ v4l2_planes[0].data_offset = 0;
+ }
if (b->memory == V4L2_MEMORY_USERPTR) {
v4l2_planes[0].m.userptr = b->m.userptr;
v4l2_planes[0].length = b->length;
}
+
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ v4l2_planes[0].m.fd = b->m.fd;
+ v4l2_planes[0].length = b->length;
+ v4l2_planes[0].data_offset = 0;
+ }
+
}
vb->v4l2_buf.field = b->field;
@@ -959,14 +1037,121 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < planes[plane].data_offset +
+ q->plane_sizes[plane]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->v4l2_planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, planes[plane].length, write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->queued_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, prepare, vb->planes[plane].mem_priv);
+
q->ops->buf_queue(vb);
}
@@ -982,6 +1167,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
case V4L2_MEMORY_USERPTR:
ret = __qbuf_userptr(vb, b);
break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
@@ -1303,6 +1491,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int i;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ /* unmap DMABUF buffer */
+ if (q->memory == V4L2_MEMORY_DMABUF)
+ for (i = 0; i < vb->num_planes; ++i) {
+ if (!vb->planes[i].dbuf_mapped)
+ continue;
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+}
+
+/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
* @q: videobuf2 queue
* @b: buffer structure passed from userspace to vidioc_dqbuf handler
@@ -1363,11 +1575,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
dprintk(1, "dqbuf of buffer %d, with state %d\n",
vb->v4l2_buf.index, vb->state);
- vb->state = VB2_BUF_STATE_DEQUEUED;
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1406,7 +1619,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Reinitialize all buffers for next use.
*/
for (i = 0; i < q->num_buffers; ++i)
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+ __vb2_dqbuf(q->bufs[i]);
}
/**
@@ -1540,6 +1753,79 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
}
/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(1, "Queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (eb->flags & ~O_CLOEXEC) {
+ dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ return -EINVAL;
+ }
+
+ if (eb->type != q->type) {
+ dprintk(1, "qbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (eb->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[eb->index];
+
+ if (eb->plane >= vb->num_planes) {
+ dprintk(1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ vb_plane = &vb->planes[eb->plane];
+
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "Failed to export buffer %d, plane %d\n",
+ eb->index, eb->plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, eb->flags);
+ if (ret < 0) {
+ dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+ eb->index, eb->plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+ eb->index, eb->plane, ret);
+ eb->fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
* vb2_mmap() - map video buffers into application address space
* @q: videobuf2 queue
* @vma: vma passed to the mmap file operation handler in the driver
@@ -2245,6 +2531,16 @@ int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
/* v4l2_file_operations helpers */
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4b7132660a93..10beaee7f0ae 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -10,7 +10,10 @@
* the Free Software Foundation.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -23,40 +26,158 @@ struct vb2_dc_conf {
};
struct vb2_dc_buf {
- struct vb2_dc_conf *conf;
+ struct device *dev;
void *vaddr;
- dma_addr_t dma_addr;
unsigned long size;
- struct vm_area_struct *vma;
- atomic_t refcount;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+
+ /* MMAP related */
struct vb2_vmarea_handler handler;
+ atomic_t refcount;
+ struct sg_table *sgt_base;
+
+ /* USERPTR related */
+ struct vm_area_struct *vma;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
};
-static void vb2_dma_contig_put(void *buf_priv);
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+
+static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
+ void (*cb)(struct page *pg))
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ struct page *page = sg_page(s);
+ unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
+ >> PAGE_SHIFT;
+ unsigned int j;
+
+ for (j = 0; j < n_pages; ++j, ++page)
+ cb(page);
+ }
+}
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!atomic_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ put_device(buf->dev);
+ kfree(buf);
+}
-static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_dc_conf *conf = alloc_ctx;
+ struct device *dev = conf->dev;
struct vb2_dc_buf *buf;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
- GFP_KERNEL);
+ /* align image size to PAGE_SIZE */
+ size = PAGE_ALIGN(size);
+
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
if (!buf->vaddr) {
- dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
- size);
+ dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- buf->conf = conf;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
buf->size = size;
buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dma_contig_put;
+ buf->handler.put = vb2_dc_put;
buf->handler.arg = buf;
atomic_inc(&buf->refcount);
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
return buf;
}
-static void vb2_dma_contig_put(void *buf_priv)
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dc_buf *buf = buf_priv;
+ int ret;
- if (atomic_dec_and_test(&buf->refcount)) {
- dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->dma_addr);
- kfree(buf);
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+ * map whole buffer
+ */
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+ buf->dma_addr, buf->size);
+
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
}
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
}
-static void *vb2_dma_contig_cookie(void *buf_priv)
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
- return &buf->dma_addr;
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
}
-static void *vb2_dma_contig_vaddr(void *buf_priv)
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
- if (!buf)
- return NULL;
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dir == dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ attach->dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dir = dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
return buf->vaddr;
}
-static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
{
- struct vb2_dc_buf *buf = buf_priv;
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
- return atomic_read(&buf->refcount);
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .kmap = vb2_dc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+ buf->size);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
}
-static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
+ int n_pages, struct vm_area_struct *vma, int write)
+{
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ pages[i] = pfn_to_page(pfn);
+ }
+ } else {
+ int n;
+
+ n = get_user_pages(current, current->mm, start & PAGE_MASK,
+ n_pages, write, 1, pages, NULL);
+ /* negative error means that no page was pinned */
+ n = max(n, 0);
+ if (n != n_pages) {
+ pr_err("got only %d of %d user pages\n", n, n_pages);
+ while (n)
+ put_page(pages[--n]);
+ return -EFAULT;
+ }
}
- return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
- &vb2_common_vm_ops, &buf->handler);
+ return 0;
}
-static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+static void vb2_dc_put_dirty_page(struct page *page)
{
+ set_page_dirty_lock(page);
+ put_page(page);
+}
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
+ unsigned long start;
+ unsigned long end;
+ unsigned long offset;
+ struct page **pages;
+ int n_pages;
+ int ret = 0;
struct vm_area_struct *vma;
- dma_addr_t dma_addr = 0;
- int ret;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
+ buf->dev = conf->dev;
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ start = vaddr & PAGE_MASK;
+ offset = vaddr & ~PAGE_MASK;
+ end = PAGE_ALIGN(vaddr + size);
+ n_pages = (end - start) >> PAGE_SHIFT;
+
+ pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate pages table\n");
+ goto fail_buf;
+ }
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ pr_err("no vma for address %lu\n", vaddr);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ pr_err("failed to copy vma\n");
+ ret = -ENOMEM;
+ goto fail_pages;
+ }
+
+ /* extract page list from userspace mapping */
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
- printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
- vaddr);
- kfree(buf);
- return ERR_PTR(ret);
+ pr_err("failed to get user pages\n");
+ goto fail_vma;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_get_user_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
}
+ /* pages are no longer needed */
+ kfree(pages);
+ pages = NULL;
+
+ sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
+ if (sgt->nents <= 0) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
buf->size = size;
- buf->dma_addr = dma_addr;
- buf->vma = vma;
+ buf->dma_sgt = sgt;
return buf;
+
+fail_map_sg:
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+
+fail_sgt_init:
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, put_page);
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_get_user_pages:
+ if (pages && !vma_is_io(buf->vma))
+ while (n_pages)
+ put_page(pages[--n_pages]);
+
+fail_vma:
+ vb2_put_vma(buf->vma);
+
+fail_pages:
+ kfree(pages); /* kfree is NULL-proof */
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
}
-static void vb2_dma_contig_put_userptr(void *mem_priv)
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
- if (!buf)
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR_OR_NULL(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu b\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
return;
+ }
- vb2_put_vma(buf->vma);
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
kfree(buf);
}
+static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dma_contig_alloc,
- .put = vb2_dma_contig_put,
- .cookie = vb2_dma_contig_cookie,
- .vaddr = vb2_dma_contig_vaddr,
- .mmap = vb2_dma_contig_mmap,
- .get_userptr = vb2_dma_contig_get_userptr,
- .put_userptr = vb2_dma_contig_put_userptr,
- .num_users = vb2_dma_contig_num_users,
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 051ea3571b20..81c1ad8b2cf1 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -137,46 +137,6 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
/**
- * vb2_mmap_pfn_range() - map physical pages to userspace
- * @vma: virtual memory region for the mapping
- * @paddr: starting physical address of the memory to be mapped
- * @size: size of the memory to be mapped
- * @vm_ops: vm operations to be assigned to the created area
- * @priv: private data to be associated with the area
- *
- * Returns 0 on success.
- */
-int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
- unsigned long size,
- const struct vm_operations_struct *vm_ops,
- void *priv)
-{
- int ret;
-
- size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (ret) {
- printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
- return ret;
- }
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = priv;
- vma->vm_ops = vm_ops;
-
- vma->vm_ops->open(vma);
-
- pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, paddr, vma->vm_start, size);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
-
-/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 94efa04d8d55..a47fd4f589a1 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -30,6 +30,7 @@ struct vb2_vmalloc_buf {
unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
};
static void vb2_vmalloc_put(void *buf_priv);
@@ -207,11 +208,66 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+ return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->write = write;
+ buf->size = size;
+
+ return buf;
+}
+
+
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 06d31c99e6ac..df0873694858 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -10,6 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/platform_data/emif_plat.h>
@@ -1468,12 +1469,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
goto error;
}
- emif->base = devm_request_and_ioremap(emif->dev, res);
- if (!emif->base) {
- dev_err(emif->dev, "%s: devm_request_and_ioremap() failed\n",
- __func__);
+ emif->base = devm_ioremap_resource(emif->dev, res);
+ if (IS_ERR(emif->base))
goto error;
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/drivers/memory/tegra20-mc.c b/drivers/memory/tegra20-mc.c
index e6764bb41cb9..2ca5f2814f4a 100644
--- a/drivers/memory/tegra20-mc.c
+++ b/drivers/memory/tegra20-mc.c
@@ -17,6 +17,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
@@ -177,7 +178,7 @@ static void tegra20_mc_decode(struct tegra20_mc *mc, int n)
"carveout" : "trustzone") : "");
}
-static const struct of_device_id tegra20_mc_of_match[] __devinitconst = {
+static const struct of_device_id tegra20_mc_of_match[] = {
{ .compatible = "nvidia,tegra20-mc", },
{},
};
@@ -198,7 +199,7 @@ static irqreturn_t tegra20_mc_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit tegra20_mc_probe(struct platform_device *pdev)
+static int tegra20_mc_probe(struct platform_device *pdev)
{
struct resource *irq;
struct tegra20_mc *mc;
@@ -216,9 +217,9 @@ static int __devinit tegra20_mc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- mc->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!mc->regs[i])
- return -EBUSY;
+ mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mc->regs[i]))
+ return PTR_ERR(mc->regs[i]);
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/memory/tegra30-mc.c b/drivers/memory/tegra30-mc.c
index 802b9ea431fa..0b975986777d 100644
--- a/drivers/memory/tegra30-mc.c
+++ b/drivers/memory/tegra30-mc.c
@@ -17,6 +17,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
@@ -295,7 +296,7 @@ static UNIVERSAL_DEV_PM_OPS(tegra30_mc_pm,
tegra30_mc_suspend,
tegra30_mc_resume, NULL);
-static const struct of_device_id tegra30_mc_of_match[] __devinitconst = {
+static const struct of_device_id tegra30_mc_of_match[] = {
{ .compatible = "nvidia,tegra30-mc", },
{},
};
@@ -316,7 +317,7 @@ static irqreturn_t tegra30_mc_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit tegra30_mc_probe(struct platform_device *pdev)
+static int tegra30_mc_probe(struct platform_device *pdev)
{
struct resource *irq;
struct tegra30_mc *mc;
@@ -336,9 +337,9 @@ static int __devinit tegra30_mc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- mc->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!mc->regs[i])
- return -EBUSY;
+ mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mc->regs[i]))
+ return PTR_ERR(mc->regs[i]);
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/memstick/Kconfig b/drivers/memstick/Kconfig
index f0ca41c20323..1314605d791f 100644
--- a/drivers/memstick/Kconfig
+++ b/drivers/memstick/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig MEMSTICK
- tristate "Sony MemoryStick card support (EXPERIMENTAL)"
+ tristate "Sony MemoryStick card support"
help
Sony MemoryStick is a proprietary storage/extension card protocol.
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 4f7a17fd1aa7..1b37cf8cd204 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -5,8 +5,8 @@
comment "MemoryStick Host Controller Drivers"
config MEMSTICK_TIFM_MS
- tristate "TI Flash Media MemoryStick Interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "TI Flash Media MemoryStick Interface support "
+ depends on PCI
select TIFM_CORE
help
Say Y here if you want to be able to access MemoryStick cards with
@@ -21,8 +21,8 @@ config MEMSTICK_TIFM_MS
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
- tristate "JMicron JMB38X MemoryStick interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "JMicron JMB38X MemoryStick interface support"
+ depends on PCI
help
Say Y here if you want to be able to access MemoryStick cards with
@@ -32,8 +32,8 @@ config MEMSTICK_JMICRON_38X
module will be called jmb38x_ms.
config MEMSTICK_R592
- tristate "Ricoh R5C592 MemoryStick interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "Ricoh R5C592 MemoryStick interface support"
+ depends on PCI
help
Say Y here if you want to be able to access MemoryStick cards with
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d784c36707c0..c13cd9bc590b 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -100,7 +100,7 @@ static int mptfc_slave_alloc(struct scsi_device *sdev);
static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
-static void __devexit mptfc_remove(struct pci_dev *pdev);
+static void mptfc_remove(struct pci_dev *pdev);
static int mptfc_abort(struct scsi_cmnd *SCpnt);
static int mptfc_dev_reset(struct scsi_cmnd *SCpnt);
static int mptfc_bus_reset(struct scsi_cmnd *SCpnt);
@@ -1360,7 +1360,7 @@ static struct pci_driver mptfc_driver = {
.name = "mptfc",
.id_table = mptfc_pci_table,
.probe = mptfc_probe,
- .remove = __devexit_p(mptfc_remove),
+ .remove = mptfc_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
@@ -1496,8 +1496,7 @@ mptfc_init(void)
* @pdev: Pointer to pci_dev structure
*
*/
-static void __devexit
-mptfc_remove(struct pci_dev *pdev)
+static void mptfc_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptfc_rport_info *p, *n;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 551262e4b96e..fa43c391c8ed 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5332,7 +5332,7 @@ mptsas_shutdown(struct pci_dev *pdev)
mptsas_cleanup_fw_event_q(ioc);
}
-static void __devexit mptsas_remove(struct pci_dev *pdev)
+static void mptsas_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptsas_portinfo *p, *n;
@@ -5387,7 +5387,7 @@ static struct pci_driver mptsas_driver = {
.name = "mptsas",
.id_table = mptsas_pci_table,
.probe = mptsas_probe,
- .remove = __devexit_p(mptsas_remove),
+ .remove = mptsas_remove,
.shutdown = mptsas_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0c3ced70707b..164afa71bba7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,6 +792,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
+ break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
if (ioc->bus_type == FC)
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 8f61ba6aac23..c3aabde2dc4f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1550,7 +1550,7 @@ static struct pci_driver mptspi_driver = {
.name = "mptspi",
.id_table = mptspi_pci_table,
.probe = mptspi_probe,
- .remove = __devexit_p(mptscsih_remove),
+ .remove = mptscsih_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 7190d5239b4f..0f9f3e1a2b6b 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -37,7 +37,7 @@
#define OSM_DESCRIPTION "I2O-subsystem"
/* PCI device id table for all I2O controllers */
-static struct pci_device_id __devinitdata i2o_pci_ids[] = {
+static struct pci_device_id i2o_pci_ids[] = {
{PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
{PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
{.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
@@ -84,7 +84,7 @@ static void i2o_pci_free(struct i2o_controller *c)
*
* Returns 0 on success or negative error code on failure.
*/
-static int __devinit i2o_pci_alloc(struct i2o_controller *c)
+static int i2o_pci_alloc(struct i2o_controller *c)
{
struct pci_dev *pdev = c->pdev;
struct device *dev = &pdev->dev;
@@ -315,8 +315,7 @@ static void i2o_pci_irq_disable(struct i2o_controller *c)
*
* Returns 0 on success or negative error code on failure.
*/
-static int __devinit i2o_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i2o_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct i2o_controller *c;
int rc;
@@ -453,7 +452,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
* Reset the I2O controller, disable interrupts and remove all allocated
* resources.
*/
-static void __devexit i2o_pci_remove(struct pci_dev *pdev)
+static void i2o_pci_remove(struct pci_dev *pdev)
{
struct i2o_controller *c;
c = pci_get_drvdata(pdev);
@@ -474,7 +473,7 @@ static struct pci_driver i2o_pci_driver = {
.name = "PCI_I2O",
.id_table = i2o_pci_ids,
.probe = i2o_pci_probe,
- .remove = __devexit_p(i2o_pci_remove),
+ .remove = i2o_pci_remove,
};
/**
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b63987c6ed20..ff553babf455 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -104,6 +104,17 @@ config MFD_TI_SSP
To compile this driver as a module, choose M here: the
module will be called ti-ssp.
+config MFD_TI_AM335X_TSCADC
+ tristate "TI ADC / Touch Screen chip support"
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for Texas Instruments series
+ of Touch Screen /ADC chips.
+ To compile this driver as a module, choose M here: the
+ module will be called ti_am335x_tscadc.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -226,6 +237,7 @@ config MFD_TPS65910
depends on I2C=y && GPIOLIB
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
@@ -253,6 +265,20 @@ config MFD_TPS65912_SPI
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
+config MFD_TPS80031
+ bool "TI TPS80031/TPS80032 Power Management chips"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the Texas Instruments
+ TPS80031/ TPS80032 Fully Integrated Power Management with Power
+ Path and Battery Charger. The device provides five configurable
+ step-down converters, 11 general purpose LDOs, USB OTG Module,
+ ADC, RTC, 2 PWM, System Voltage Regulator/Battery Charger with
+ Power Path from USB, 32K clock generator.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -267,6 +293,7 @@ config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
select IRQ_DOMAIN
+ select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
@@ -309,10 +336,10 @@ config MFD_TWL4030_AUDIO
config TWL6040_CORE
bool "Support for TWL6040 audio codec"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
- select IRQ_DOMAIN
+ select REGMAP_IRQ
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
@@ -990,6 +1017,7 @@ config MFD_TPS65090
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
If you say yes here you get support for the TPS65090 series of
Power Management chips.
@@ -1034,6 +1062,7 @@ config MFD_STA2X11
bool "STA2X11 multi function device support"
depends on STA2X11
select MFD_CORE
+ select REGMAP_MMIO
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
@@ -1053,6 +1082,38 @@ config MFD_PALMAS
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
+config MFD_VIPERBOARD
+ tristate "Support for Nano River Technologies Viperboard"
+ select MFD_CORE
+ depends on USB
+ default n
+ help
+ Say yes here if you want support for Nano River Technologies
+ Viperboard.
+ There are mfd cell drivers available for i2c master, adc and
+ both gpios found on the board. The spi part does not yet
+ have a driver.
+ You need to select the mfd cell drivers separately.
+ The drivers do not support all features the board exposes.
+
+config MFD_RETU
+ tristate "Support for Retu multi-function device"
+ select MFD_CORE
+ depends on I2C
+ select REGMAP_IRQ
+ help
+ Retu is a multi-function device found on Nokia Internet Tablets
+ (770, N800 and N810).
+
+config MFD_AS3711
+ bool "Support for AS3711"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C=y
+ help
+ Support for the AS3711 PMIC from AMS
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 69f260ae0225..8b977f8045ae 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
@@ -55,18 +56,19 @@ obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o
tps65912-objs := tps65912-core.o tps65912-irq.o
obj-$(CONFIG_MFD_TPS65912) += tps65912.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
+obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
-obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
+obj-$(CONFIG_TWL6040_CORE) += twl6040.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+obj-$(CONFIG_PMIC_DA9052) += da9052-irq.o
obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
@@ -137,8 +140,11 @@ obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
+obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
+obj-$(CONFIG_MFD_RETU) += retu-mfd.o
+obj-$(CONFIG_MFD_AS3711) += as3711.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 3e27c031aeaa..8b5d685ab980 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -19,6 +19,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
#include <linux/of.h>
@@ -586,38 +587,6 @@ int ab8500_suspend(struct ab8500 *ab8500)
return 0;
}
-/* AB8500 GPIO Resources */
-static struct resource __devinitdata ab8500_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-/* AB9540 GPIO Resources */
-static struct resource __devinitdata ab9540_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT14",
- .start = AB9540_INT_GPIO50R,
- .end = AB9540_INT_GPIO54R,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT15",
- .start = AB9540_INT_GPIO50F,
- .end = AB9540_INT_GPIO54F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
static struct resource ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
@@ -781,6 +750,12 @@ static struct resource ab8500_charger_resources[] = {
.end = AB8500_INT_CH_WD_EXP,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "VBUS_CH_DROP_END",
+ .start = AB8500_INT_VBUS_CH_DROP_END,
+ .end = AB8500_INT_VBUS_CH_DROP_END,
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct resource ab8500_btemp_resources[] = {
@@ -979,6 +954,10 @@ static struct mfd_cell abx500_common_devs[] = {
.of_compatible = "stericsson,ab8500-regulator",
},
{
+ .name = "abx500-clk",
+ .of_compatible = "stericsson,abx500-clk",
+ },
+ {
.name = "ab8500-gpadc",
.of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
@@ -1036,23 +1015,35 @@ static struct mfd_cell abx500_common_devs[] = {
static struct mfd_cell ab8500_bm_devs[] = {
{
.name = "ab8500-charger",
+ .of_compatible = "stericsson,ab8500-charger",
.num_resources = ARRAY_SIZE(ab8500_charger_resources),
.resources = ab8500_charger_resources,
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
},
{
.name = "ab8500-btemp",
+ .of_compatible = "stericsson,ab8500-btemp",
.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
.resources = ab8500_btemp_resources,
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
},
{
.name = "ab8500-fg",
+ .of_compatible = "stericsson,ab8500-fg",
.num_resources = ARRAY_SIZE(ab8500_fg_resources),
.resources = ab8500_fg_resources,
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
},
{
.name = "ab8500-chargalg",
+ .of_compatible = "stericsson,ab8500-chargalg",
.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
.resources = ab8500_chargalg_resources,
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
},
};
@@ -1060,8 +1051,6 @@ static struct mfd_cell ab8500_devs[] = {
{
.name = "ab8500-gpio",
.of_compatible = "stericsson,ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
- .resources = ab8500_gpio_resources,
},
{
.name = "ab8500-usb",
@@ -1078,8 +1067,6 @@ static struct mfd_cell ab8500_devs[] = {
static struct mfd_cell ab9540_devs[] = {
{
.name = "ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab9540_gpio_resources),
- .resources = ab9540_gpio_resources,
},
{
.name = "ab9540-usb",
@@ -1264,7 +1251,7 @@ static int ab8500_probe(struct platform_device *pdev)
int i;
u8 value;
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL);
if (!ab8500)
return -ENOMEM;
@@ -1274,10 +1261,8 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->dev = &pdev->dev;
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!resource) {
- ret = -ENODEV;
- goto out_free_ab8500;
- }
+ if (!resource)
+ return -ENODEV;
ab8500->irq = resource->start;
@@ -1300,7 +1285,7 @@ static int ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->version = value;
}
@@ -1308,7 +1293,7 @@ static int ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->chip_id = value;
@@ -1325,14 +1310,13 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->mask_size = AB8500_NUM_IRQ_REGS;
ab8500->irq_reg_offset = ab8500_irq_regoffset;
}
- ab8500->mask = kzalloc(ab8500->mask_size, GFP_KERNEL);
+ ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
if (!ab8500->mask)
return -ENOMEM;
- ab8500->oldmask = kzalloc(ab8500->mask_size, GFP_KERNEL);
- if (!ab8500->oldmask) {
- ret = -ENOMEM;
- goto out_freemask;
- }
+ ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+ if (!ab8500->oldmask)
+ return -ENOMEM;
+
/*
* ab8500 has switched off due to (SWITCH_OFF_STATUS):
* 0x01 Swoff bit programming
@@ -1386,37 +1370,37 @@ static int ab8500_probe(struct platform_device *pdev)
ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
if (ret)
- goto out_freeoldmask;
+ return ret;
for (i = 0; i < ab8500->mask_size; i++)
ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
ret = ab8500_irq_init(ab8500, np);
if (ret)
- goto out_freeoldmask;
+ return ret;
/* Activate this feature only in ab9540 */
/* till tests are done on ab8500 1p2 or later*/
if (is_ab9540(ab8500)) {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_hierarchical_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
}
else {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
- goto out_freeoldmask;
+ return ret;
}
ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
ARRAY_SIZE(abx500_common_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
@@ -1427,14 +1411,14 @@ static int ab8500_probe(struct platform_device *pdev)
ARRAY_SIZE(ab8500_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500) || is_ab8505(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
ARRAY_SIZE(ab9540_ab8505_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (!no_bm) {
/* Add battery management devices */
@@ -1455,17 +1439,6 @@ static int ab8500_probe(struct platform_device *pdev)
dev_err(ab8500->dev, "error creating sysfs entries\n");
return ret;
-
-out_freeirq:
- free_irq(ab8500->irq, ab8500);
-out_freeoldmask:
- kfree(ab8500->oldmask);
-out_freemask:
- kfree(ab8500->mask);
-out_free_ab8500:
- kfree(ab8500);
-
- return ret;
}
static int ab8500_remove(struct platform_device *pdev)
@@ -1478,11 +1451,6 @@ static int ab8500_remove(struct platform_device *pdev)
sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
mfd_remove_devices(ab8500->dev);
- free_irq(ab8500->irq, ab8500);
-
- kfree(ab8500->oldmask);
- kfree(ab8500->mask);
- kfree(ab8500);
return 0;
}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index c784f4602a74..222c03a5ddc0 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)
return ret;
}
- regcache_sync(arizona->regmap);
+ ret = regcache_sync(arizona->regmap);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to restore register cache\n");
+ regulator_disable(arizona->dcvdd);
+ return ret;
+ }
return 0;
}
@@ -292,6 +297,7 @@ int arizona_dev_init(struct arizona *arizona)
struct device *dev = arizona->dev;
const char *type_name;
unsigned int reg, val;
+ int (*apply_patch)(struct arizona *) = NULL;
int ret, i;
dev_set_drvdata(arizona->dev, arizona);
@@ -391,7 +397,7 @@ int arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5102;
}
- ret = wm5102_patch(arizona);
+ apply_patch = wm5102_patch;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -402,7 +408,7 @@ int arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5110;
}
- ret = wm5110_patch(arizona);
+ apply_patch = wm5110_patch;
break;
#endif
default:
@@ -412,9 +418,6 @@ int arizona_dev_init(struct arizona *arizona)
dev_info(dev, "%s revision %c\n", type_name, arizona->rev + 'A');
- if (ret != 0)
- dev_err(arizona->dev, "Failed to apply patch: %d\n", ret);
-
/* If we have a /RESET GPIO we'll already be reset */
if (!arizona->pdata.reset) {
regcache_mark_dirty(arizona->regmap);
@@ -438,6 +441,15 @@ int arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ if (apply_patch) {
+ ret = apply_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to apply patch: %d\n",
+ ret);
+ goto err_reset;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
if (!arizona->pdata.gpio_defaults[i])
continue;
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index b1b009177405..2bec5f0db3ee 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5102_aod;
irq = &wm5102_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5110_aod;
irq = &wm5110_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
default:
@@ -224,6 +210,7 @@ int arizona_irq_init(struct arizona *arizona)
arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
arizona);
if (!arizona->virq) {
+ dev_err(arizona->dev, "Failed to add core IRQ domain\n");
ret = -EINVAL;
goto err;
}
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
new file mode 100644
index 000000000000..e994c9691124
--- /dev/null
+++ b/drivers/mfd/as3711.c
@@ -0,0 +1,217 @@
+/*
+ * AS3711 PMIC MFC driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ AS3711_REGULATOR,
+ AS3711_BACKLIGHT,
+};
+
+/*
+ * Ok to have it static: it is only used during probing and multiple I2C devices
+ * cannot be probed simultaneously. Just make sure to avoid stale data.
+ */
+static struct mfd_cell as3711_subdevs[] = {
+ [AS3711_REGULATOR] = {.name = "as3711-regulator",},
+ [AS3711_BACKLIGHT] = {.name = "as3711-backlight",},
+};
+
+static bool as3711_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_REG_STATUS:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_precious_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_SD_1_VOLTAGE:
+ case AS3711_SD_2_VOLTAGE:
+ case AS3711_SD_3_VOLTAGE:
+ case AS3711_SD_4_VOLTAGE:
+ case AS3711_LDO_1_VOLTAGE:
+ case AS3711_LDO_2_VOLTAGE:
+ case AS3711_LDO_3_VOLTAGE:
+ case AS3711_LDO_4_VOLTAGE:
+ case AS3711_LDO_5_VOLTAGE:
+ case AS3711_LDO_6_VOLTAGE:
+ case AS3711_LDO_7_VOLTAGE:
+ case AS3711_LDO_8_VOLTAGE:
+ case AS3711_SD_CONTROL:
+ case AS3711_GPIO_SIGNAL_OUT:
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_SD_CONTROL_1:
+ case AS3711_SD_CONTROL_2:
+ case AS3711_CURR_CONTROL:
+ case AS3711_CURR1_VALUE:
+ case AS3711_CURR2_VALUE:
+ case AS3711_CURR3_VALUE:
+ case AS3711_STEPUP_CONTROL_1:
+ case AS3711_STEPUP_CONTROL_2:
+ case AS3711_STEPUP_CONTROL_4:
+ case AS3711_STEPUP_CONTROL_5:
+ case AS3711_REG_STATUS:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_ASIC_ID_1:
+ case AS3711_ASIC_ID_2:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config as3711_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = as3711_volatile_reg,
+ .readable_reg = as3711_readable_reg,
+ .precious_reg = as3711_precious_reg,
+ .max_register = AS3711_MAX_REGS,
+ .num_reg_defaults_raw = AS3711_MAX_REGS,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int as3711_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct as3711 *as3711;
+ struct as3711_platform_data *pdata = client->dev.platform_data;
+ unsigned int id1, id2;
+ int ret;
+
+ if (!pdata)
+ dev_dbg(&client->dev, "Platform data not found\n");
+
+ as3711 = devm_kzalloc(&client->dev, sizeof(struct as3711), GFP_KERNEL);
+ if (!as3711) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ as3711->dev = &client->dev;
+ i2c_set_clientdata(client, as3711);
+
+ if (client->irq)
+ dev_notice(&client->dev, "IRQ not supported yet\n");
+
+ as3711->regmap = devm_regmap_init_i2c(client, &as3711_regmap_config);
+ if (IS_ERR(as3711->regmap)) {
+ ret = PTR_ERR(as3711->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_1, &id1);
+ if (!ret)
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_2, &id2);
+ if (ret < 0) {
+ dev_err(&client->dev, "regmap_read() failed: %d\n", ret);
+ return ret;
+ }
+ if (id1 != 0x8b)
+ return -ENODEV;
+ dev_info(as3711->dev, "AS3711 detected: %x:%x\n", id1, id2);
+
+ /* We can reuse as3711_subdevs[], it will be copied in mfd_add_devices() */
+ if (pdata) {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = &pdata->regulator;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = sizeof(pdata->regulator);
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = &pdata->backlight;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = sizeof(pdata->backlight);
+ } else {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = NULL;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = 0;
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = NULL;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = 0;
+ }
+
+ ret = mfd_add_devices(as3711->dev, -1, as3711_subdevs,
+ ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
+ if (ret < 0)
+ dev_err(&client->dev, "add mfd devices failed: %d\n", ret);
+
+ return ret;
+}
+
+static int as3711_i2c_remove(struct i2c_client *client)
+{
+ struct as3711 *as3711 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(as3711->dev);
+ return 0;
+}
+
+static const struct i2c_device_id as3711_i2c_id[] = {
+ {.name = "as3711", .driver_data = 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, as3711_i2c_id);
+
+static struct i2c_driver as3711_i2c_driver = {
+ .driver = {
+ .name = "as3711",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_i2c_probe,
+ .remove = as3711_i2c_remove,
+ .id_table = as3711_i2c_id,
+};
+
+static int __init as3711_i2c_init(void)
+{
+ return i2c_add_driver(&as3711_i2c_driver);
+}
+/* Initialise early */
+subsys_initcall(as3711_i2c_init);
+
+static void __exit as3711_i2c_exit(void)
+{
+ i2c_del_driver(&as3711_i2c_driver);
+}
+module_exit(as3711_i2c_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("AS3711 PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 689b747416af..a3c9613f9166 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -24,16 +23,6 @@
#include <linux/mfd/da9052/pdata.h>
#include <linux/mfd/da9052/reg.h>
-#define DA9052_NUM_IRQ_REGS 4
-#define DA9052_IRQ_MASK_POS_1 0x01
-#define DA9052_IRQ_MASK_POS_2 0x02
-#define DA9052_IRQ_MASK_POS_3 0x04
-#define DA9052_IRQ_MASK_POS_4 0x08
-#define DA9052_IRQ_MASK_POS_5 0x10
-#define DA9052_IRQ_MASK_POS_6 0x20
-#define DA9052_IRQ_MASK_POS_7 0x40
-#define DA9052_IRQ_MASK_POS_8 0x80
-
static bool da9052_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -425,15 +414,6 @@ err:
}
EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
-static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
-{
- struct da9052 *da9052 = irq_data;
-
- complete(&da9052->done);
-
- return IRQ_HANDLED;
-}
-
int da9052_adc_read_temp(struct da9052 *da9052)
{
int tbat;
@@ -447,74 +427,6 @@ int da9052_adc_read_temp(struct da9052 *da9052)
}
EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
-static struct resource da9052_rtc_resource = {
- .name = "ALM",
- .start = DA9052_IRQ_ALARM,
- .end = DA9052_IRQ_ALARM,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_onkey_resource = {
- .name = "ONKEY",
- .start = DA9052_IRQ_NONKEY,
- .end = DA9052_IRQ_NONKEY,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_bat_resources[] = {
- {
- .name = "BATT TEMP",
- .start = DA9052_IRQ_TBAT,
- .end = DA9052_IRQ_TBAT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN DET",
- .start = DA9052_IRQ_DCIN,
- .end = DA9052_IRQ_DCIN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN REM",
- .start = DA9052_IRQ_DCINREM,
- .end = DA9052_IRQ_DCINREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS DET",
- .start = DA9052_IRQ_VBUS,
- .end = DA9052_IRQ_VBUS,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS REM",
- .start = DA9052_IRQ_VBUSREM,
- .end = DA9052_IRQ_VBUSREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "CHG END",
- .start = DA9052_IRQ_CHGEND,
- .end = DA9052_IRQ_CHGEND,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource da9052_tsi_resources[] = {
- {
- .name = "PENDWN",
- .start = DA9052_IRQ_PENDOWN,
- .end = DA9052_IRQ_PENDOWN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TSIRDY",
- .start = DA9052_IRQ_TSIREADY,
- .end = DA9052_IRQ_TSIREADY,
- .flags = IORESOURCE_IRQ,
- },
-};
-
static struct mfd_cell da9052_subdev_info[] = {
{
.name = "da9052-regulator",
@@ -574,13 +486,9 @@ static struct mfd_cell da9052_subdev_info[] = {
},
{
.name = "da9052-onkey",
- .resources = &da9052_onkey_resource,
- .num_resources = 1,
},
{
.name = "da9052-rtc",
- .resources = &da9052_rtc_resource,
- .num_resources = 1,
},
{
.name = "da9052-gpio",
@@ -602,160 +510,15 @@ static struct mfd_cell da9052_subdev_info[] = {
},
{
.name = "da9052-tsi",
- .resources = da9052_tsi_resources,
- .num_resources = ARRAY_SIZE(da9052_tsi_resources),
},
{
.name = "da9052-bat",
- .resources = da9052_bat_resources,
- .num_resources = ARRAY_SIZE(da9052_bat_resources),
},
{
.name = "da9052-watchdog",
},
};
-static struct regmap_irq da9052_irqs[] = {
- [DA9052_IRQ_DCIN] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_VBUS] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_DCINREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_VBUSREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_VDDLOW] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ALARM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_SEQRDY] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_COMP1V2] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_NONKEY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_IDFLOAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_IDGND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_CHGEND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_TBAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ADC_EOM] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_PENDOWN] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_TSIREADY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI0] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI1] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI2] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI3] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI4] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI5] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI6] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI7] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI8] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI9] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI10] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI11] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI12] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI13] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI14] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI15] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
-};
-
-static struct regmap_irq_chip da9052_regmap_irq_chip = {
- .name = "da9052_irq",
- .status_base = DA9052_EVENT_A_REG,
- .mask_base = DA9052_IRQ_MASK_A_REG,
- .ack_base = DA9052_EVENT_A_REG,
- .num_regs = DA9052_NUM_IRQ_REGS,
- .irqs = da9052_irqs,
- .num_irqs = ARRAY_SIZE(da9052_irqs),
-};
-
struct regmap_config da9052_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -782,45 +545,31 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->chip_id = chip_id;
- if (!pdata || !pdata->irq_base)
- da9052->irq_base = -1;
- else
- da9052->irq_base = pdata->irq_base;
-
- ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- da9052->irq_base, &da9052_regmap_irq_chip,
- &da9052->irq_data);
- if (ret < 0)
- goto regmap_err;
-
- da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
-
- ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "adc irq", da9052);
- if (ret != 0)
- dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+ ret = da9052_irq_init(da9052);
+ if (ret != 0) {
+ dev_err(da9052->dev, "da9052_irq_init failed: %d\n", ret);
+ return ret;
+ }
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
- if (ret)
+ if (ret) {
+ dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
goto err;
+ }
return 0;
err:
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- mfd_remove_devices(da9052->dev);
-regmap_err:
+ da9052_irq_exit(da9052);
+
return ret;
}
void da9052_device_exit(struct da9052 *da9052)
{
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
+ da9052_irq_exit(da9052);
}
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ac74a4d1daea..885e56780358 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -27,6 +27,66 @@
#include <linux/of_device.h>
#endif
+/* I2C safe register check */
+static inline bool i2c_safe_reg(unsigned char reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC
+ * gets lockup up or fails to respond following a system reset.
+ * This fix is to follow any read or write with a dummy read to a safe
+ * register.
+ */
+int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+{
+ int val;
+
+ switch (da9052->chip_id) {
+ case DA9052:
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ /* A dummy read to a safe register address. */
+ if (!i2c_safe_reg(reg))
+ return regmap_read(da9052->regmap,
+ DA9052_PARK_REGISTER,
+ &val);
+ break;
+ default:
+ /*
+ * For other chips parking of I2C register
+ * to a safe place is not required.
+ */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(da9052_i2c_fix);
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -83,6 +143,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
da9052->dev = &client->dev;
da9052->chip_irq = client->irq;
+ da9052->fix_io = da9052_i2c_fix;
i2c_set_clientdata(client, da9052);
diff --git a/drivers/mfd/da9052-irq.c b/drivers/mfd/da9052-irq.c
new file mode 100644
index 000000000000..57ae7841f536
--- /dev/null
+++ b/drivers/mfd/da9052-irq.c
@@ -0,0 +1,288 @@
+/*
+ * DA9052 interrupt support
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ * Based on arizona-irq.c, which is:
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+static int da9052_map_irq(struct da9052 *da9052, int irq)
+{
+ return regmap_irq_get_virq(da9052->irq_data, irq);
+}
+
+int da9052_enable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ enable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_enable_irq);
+
+int da9052_disable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq);
+
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq_nosync(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync);
+
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ name, data);
+}
+EXPORT_SYMBOL_GPL(da9052_request_irq);
+
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+EXPORT_SYMBOL_GPL(da9052_free_irq);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_irq_init(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ -1, &da9052_regmap_irq_chip,
+ &da9052->irq_data);
+ if (ret < 0) {
+ dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret);
+ goto regmap_err;
+ }
+
+ ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
+ da9052_auxadc_irq, da9052);
+
+ if (ret != 0) {
+ dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret);
+ goto request_irq_err;
+ }
+
+ return 0;
+
+request_irq_err:
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+regmap_err:
+ return ret;
+
+}
+
+int da9052_irq_exit(struct da9052 *da9052)
+{
+ da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052);
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+
+ return 0;
+}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 29710565a08f..a2bacf95b59e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -26,22 +26,18 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
-#include <asm/hardware/gic.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
-#include <mach/id.h>
#include "dbx500-prcmu-regs.h"
-/* Offset for the firmware version within the TCPM */
-#define PRCMU_FW_VERSION_OFFSET 0xA4
-
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
@@ -216,10 +212,8 @@
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
-#define PRCMU_I2C_WRITE(slave) \
- (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
-#define PRCMU_I2C_READ(slave) \
- (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
+#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
+#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
@@ -1049,12 +1043,13 @@ int db8500_prcmu_get_ddr_opp(void)
*
* This function sets the operating point of the DDR.
*/
+static bool enable_set_ddr_opp;
int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
- if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
+ if (enable_set_ddr_opp)
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
@@ -2524,7 +2519,7 @@ static bool read_mailbox_0(void)
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
if (ev & prcmu_irq_bit[n])
- generic_handle_irq(IRQ_PRCMU_BASE + n);
+ generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
}
r = true;
break;
@@ -2706,21 +2701,43 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static char *fw_project_name(u8 project)
+static __init char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
return "U8500";
- case PRCMU_FW_PROJECT_U8500_C2:
- return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8400:
+ return "U8400";
case PRCMU_FW_PROJECT_U9500:
return "U9500";
- case PRCMU_FW_PROJECT_U9500_C2:
- return "U9500 C2";
+ case PRCMU_FW_PROJECT_U8500_MBB:
+ return "U8500 MBB";
+ case PRCMU_FW_PROJECT_U8500_C1:
+ return "U8500 C1";
+ case PRCMU_FW_PROJECT_U8500_C2:
+ return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8500_C3:
+ return "U8500 C3";
+ case PRCMU_FW_PROJECT_U8500_C4:
+ return "U8500 C4";
+ case PRCMU_FW_PROJECT_U9500_MBL:
+ return "U9500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL:
+ return "U8500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL2:
+ return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
- return "U8520";
+ return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U9540:
+ return "U9540";
+ case PRCMU_FW_PROJECT_A9420:
+ return "A9420";
+ case PRCMU_FW_PROJECT_L8540:
+ return "L8540";
+ case PRCMU_FW_PROJECT_L8580:
+ return "L8580";
default:
return "Unknown";
}
@@ -2737,13 +2754,14 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
}
static struct irq_domain_ops db8500_irq_ops = {
- .map = db8500_irq_map,
- .xlate = irq_domain_xlate_twocell,
+ .map = db8500_irq_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int db8500_irq_init(struct device_node *np)
{
- int irq_base = -1;
+ int irq_base = 0;
+ int i;
/* In the device tree case, just take some IRQs */
if (!np)
@@ -2758,35 +2776,51 @@ static int db8500_irq_init(struct device_node *np)
return -ENOSYS;
}
+ /* All wakeups will be used, so create mappings for all */
+ for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
+ irq_create_mapping(db8500_irq_domain, i);
+
return 0;
}
-void __init db8500_prcmu_early_init(void)
+static void dbx500_fw_version_init(struct platform_device *pdev,
+ u32 version_offset)
{
- if (cpu_is_u8500v2()) {
- void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
-
- if (tcpm_base != NULL) {
- u32 version;
- version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
- fw_info.version.project = version & 0xFF;
- fw_info.version.api_version = (version >> 8) & 0xFF;
- fw_info.version.func_version = (version >> 16) & 0xFF;
- fw_info.version.errata = (version >> 24) & 0xFF;
- fw_info.valid = true;
- pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
- fw_project_name(fw_info.version.project),
- (version >> 8) & 0xFF, (version >> 16) & 0xFF,
- (version >> 24) & 0xFF);
- iounmap(tcpm_base);
- }
+ struct resource *res;
+ void __iomem *tcpm_base;
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
- } else {
- pr_err("prcmu: Unsupported chip version\n");
- BUG();
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "prcmu-tcpm");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Error: no prcmu tcpm memory region provided\n");
+ return;
+ }
+ tcpm_base = ioremap(res->start, resource_size(res));
+ if (tcpm_base != NULL) {
+ u32 version;
+
+ version = readl(tcpm_base + version_offset);
+ fw_info.version.project = (version & 0xFF);
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ strncpy(fw_info.version.project_name,
+ fw_project_name(fw_info.version.project),
+ PRCMU_FW_PROJECT_NAME_LEN);
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
+ fw_info.version.project_name,
+ fw_info.version.project,
+ fw_info.version.api_version,
+ fw_info.version.func_version,
+ fw_info.version.errata);
+ iounmap(tcpm_base);
}
+}
+void __init db8500_prcmu_early_init(void)
+{
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
@@ -3068,8 +3102,8 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_regulators),
},
{
- .name = "cpufreq-u8500",
- .of_compatible = "stericsson,cpufreq-u8500",
+ .name = "cpufreq-ux500",
+ .of_compatible = "stericsson,cpufreq-ux500",
.platform_data = &db8500_cpufreq_table,
.pdata_size = sizeof(db8500_cpufreq_table),
},
@@ -3096,23 +3130,30 @@ static void db8500_prcmu_update_cpufreq(void)
*/
static int db8500_prcmu_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
+ struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0, i;
-
- if (ux500_is_svp())
- return -ENODEV;
+ struct resource *res;
init_prcm_registers();
+ dbx500_fw_version_init(pdev, pdata->version_offset);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
+ if (!res) {
+ dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
+ return -ENOENT;
+ }
+ tcdm_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- if (np)
- irq = platform_get_irq(pdev, 0);
-
- if (!np || irq <= 0)
- irq = IRQ_DB8500_PRCMU1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no prcmu irq provided\n");
+ return -ENOENT;
+ }
err = request_threaded_irq(irq, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
@@ -3126,13 +3167,12 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) {
if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) {
- db8500_prcmu_devs[i].platform_data = ab8500_platdata;
+ db8500_prcmu_devs[i].platform_data = pdata->ab_platdata;
db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data);
}
}
- if (cpu_is_u8500v20_or_later())
- prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index ab8d0b2739b2..1804331bd52c 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -424,11 +425,9 @@ static int intel_msic_probe(struct platform_device *pdev)
return -ENODEV;
}
- msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!msic->irq_base) {
- dev_err(&pdev->dev, "failed to map SRAM memory\n");
- return -ENOMEM;
- }
+ msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msic->irq_base))
+ return PTR_ERR(msic->irq_base);
platform_set_drvdata(pdev, msic);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 0b8b55bb9b11..e80587f1a792 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -211,7 +211,7 @@ static int jz4740_adc_probe(struct platform_device *pdev)
int ret;
int irq_base;
- adc = kmalloc(sizeof(*adc), GFP_KERNEL);
+ adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
if (!adc) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
@@ -221,30 +221,27 @@ static int jz4740_adc_probe(struct platform_device *pdev)
if (adc->irq < 0) {
ret = adc->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
- goto err_free;
+ return ret;
}
irq_base = platform_get_irq(pdev, 1);
if (irq_base < 0) {
- ret = irq_base;
- dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
- goto err_free;
+ dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
+ return irq_base;
}
mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_base) {
- ret = -ENOENT;
dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- goto err_free;
+ return -ENOENT;
}
/* Only request the shared registers for the MFD driver */
adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
pdev->name);
if (!adc->mem) {
- ret = -EBUSY;
dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- goto err_free;
+ return -EBUSY;
}
adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
@@ -301,9 +298,6 @@ err_iounmap:
iounmap(adc->base);
err_release_mem_region:
release_mem_region(adc->mem->start, resource_size(adc->mem));
-err_free:
- kfree(adc);
-
return ret;
}
@@ -325,8 +319,6 @@ static int jz4740_adc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- kfree(adc);
-
return 0;
}
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 2ad24caa07db..d9d930302e98 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -734,7 +734,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
lpc_ich_cells[LPC_GPIO].num_resources--;
goto gpe0_done;
}
@@ -760,7 +760,7 @@ gpe0_done:
pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
ret = -ENODEV;
goto gpio_done;
}
@@ -810,7 +810,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -830,12 +830,15 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*/
- if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+ /* Don't register iomem for TCO ver 1 */
+ lpc_ich_cells[LPC_WDT].num_resources--;
+ } else {
pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xffffc000;
if (!(base_addr_cfg & 1)) {
- pr_err("RCBA is disabled by hardware/BIOS, "
- "device disabled\n");
+ dev_notice(&dev->dev, "RCBA is disabled by "
+ "hardware/BIOS, device disabled\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -871,6 +874,7 @@ static int lpc_ich_probe(struct pci_dev *dev,
* successfully.
*/
if (!cell_added) {
+ dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
return -ENODEV;
}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f6878f8db57d..4d73963cd8f0 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
if (max77686 == NULL)
return -ENOMEM;
- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
- if (IS_ERR(max77686->regmap)) {
- ret = PTR_ERR(max77686->regmap);
- dev_err(max77686->dev, "Failed to allocate register map: %d\n",
- ret);
- kfree(max77686);
- return ret;
- }
-
i2c_set_clientdata(i2c, max77686);
max77686->dev = &i2c->dev;
max77686->i2c = i2c;
@@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
+ max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (IS_ERR(max77686->regmap)) {
+ ret = PTR_ERR(max77686->regmap);
+ dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(max77686);
+ return ret;
+ }
+
if (regmap_read(max77686->regmap,
MAX77686_REG_DEVICE_ID, &data) < 0) {
dev_err(max77686->dev,
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cc5155e20494..9e60fed5ff82 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
u8 reg_data;
int ret = 0;
+ if (!pdata) {
+ dev_err(&i2c->dev, "No platform data found.\n");
+ return -EINVAL;
+ }
+
max77693 = devm_kzalloc(&i2c->dev,
sizeof(struct max77693_dev), GFP_KERNEL);
if (max77693 == NULL)
return -ENOMEM;
- max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
- if (IS_ERR(max77693->regmap)) {
- ret = PTR_ERR(max77693->regmap);
- dev_err(max77693->dev,"failed to allocate register map: %d\n",
- ret);
- goto err_regmap;
- }
-
i2c_set_clientdata(i2c, max77693);
max77693->dev = &i2c->dev;
max77693->i2c = i2c;
max77693->irq = i2c->irq;
max77693->type = id->driver_data;
- if (!pdata)
- goto err_regmap;
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
max77693->wakeup = pdata->wakeup;
- if (max77693_read_reg(max77693->regmap,
- MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
+ &reg_data);
+ if (ret < 0) {
dev_err(max77693->dev, "device not found on this channel\n");
- ret = -ENODEV;
- goto err_regmap;
+ return ret;
} else
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
@@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ goto err_regmap_muic;
}
ret = max77693_irq_init(max77693);
@@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
err_mfd:
max77693_irq_exit(max77693);
err_irq:
+err_regmap_muic:
i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
-err_regmap:
return ret;
}
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index abd5c80c7cf5..14714058f2d2 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -50,7 +50,7 @@ static struct mfd_cell max8997_devs[] = {
};
#ifdef CONFIG_OF
-static struct of_device_id __devinitdata max8997_pmic_dt_match[] = {
+static struct of_device_id max8997_pmic_dt_match[] = {
{ .compatible = "maxim,max8997-pmic", .data = TYPE_MAX8997 },
{},
};
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1aba0238f426..2a9b100c4825 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -119,6 +119,11 @@
#define MC13XXX_REVISION_FAB (0x03 << 11)
#define MC13XXX_REVISION_ICIDCODE (0x3f << 13)
+#define MC34708_REVISION_REVMETAL (0x07 << 0)
+#define MC34708_REVISION_REVFULL (0x07 << 3)
+#define MC34708_REVISION_FIN (0x07 << 6)
+#define MC34708_REVISION_FAB (0x07 << 9)
+
#define MC13XXX_ADC1 44
#define MC13XXX_ADC1_ADEN (1 << 0)
#define MC13XXX_ADC1_RAND (1 << 1)
@@ -410,62 +415,52 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-static const char *mc13xxx_chipname[] = {
- [MC13XXX_ID_MC13783] = "mc13783",
- [MC13XXX_ID_MC13892] = "mc13892",
-};
-
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx)
+static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
- u32 icid;
- u32 revision;
- int ret;
-
- /*
- * Get the generation ID from register 46, as apparently some older
- * IC revisions only have this info at this location. Newer ICs seem to
- * have both.
- */
- ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
- if (ret)
- return ret;
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
+ "fin: %d, fab: %d, icid: %d/%d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC13XXX_REVISION_REVFULL),
+ maskval(revision, MC13XXX_REVISION_REVMETAL),
+ maskval(revision, MC13XXX_REVISION_FIN),
+ maskval(revision, MC13XXX_REVISION_FAB),
+ maskval(revision, MC13XXX_REVISION_ICID),
+ maskval(revision, MC13XXX_REVISION_ICIDCODE));
+}
- icid = (icid >> 6) & 0x7;
+static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
+{
+ dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC34708_REVISION_REVFULL),
+ maskval(revision, MC34708_REVISION_REVMETAL),
+ maskval(revision, MC34708_REVISION_FIN),
+ maskval(revision, MC34708_REVISION_FAB));
+}
- switch (icid) {
- case 2:
- mc13xxx->ictype = MC13XXX_ID_MC13783;
- break;
- case 7:
- mc13xxx->ictype = MC13XXX_ID_MC13892;
- break;
- default:
- mc13xxx->ictype = MC13XXX_ID_INVALID;
- break;
- }
+/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
+struct mc13xxx_variant mc13xxx_variant_mc13783 = {
+ .name = "mc13783",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13783);
- if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
- mc13xxx->ictype == MC13XXX_ID_MC13892) {
- ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-
- dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
- "fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[mc13xxx->ictype],
- maskval(revision, MC13XXX_REVISION_REVFULL),
- maskval(revision, MC13XXX_REVISION_REVMETAL),
- maskval(revision, MC13XXX_REVISION_FIN),
- maskval(revision, MC13XXX_REVISION_FAB),
- maskval(revision, MC13XXX_REVISION_ICID),
- maskval(revision, MC13XXX_REVISION_ICIDCODE));
- }
+struct mc13xxx_variant mc13xxx_variant_mc13892 = {
+ .name = "mc13892",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13892);
- return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
-}
+struct mc13xxx_variant mc13xxx_variant_mc34708 = {
+ .name = "mc34708",
+ .print_revision = mc34708_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc34708);
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- return mc13xxx_chipname[mc13xxx->ictype];
+ return mc13xxx->variant->name;
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -653,13 +648,16 @@ int mc13xxx_common_init(struct mc13xxx *mc13xxx,
struct mc13xxx_platform_data *pdata, int irq)
{
int ret;
+ u32 revision;
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
if (ret)
goto err_revision;
+ mc13xxx->variant->print_revision(mc13xxx, revision);
+
/* mask all irqs */
ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
if (ret)
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 7957999f30bb..f745e27ee874 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -24,7 +24,10 @@
static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
{
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -34,7 +37,10 @@ MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
{
.compatible = "fsl,mc13892",
- .data = (void *) &mc13xxx_i2c_device_id[0],
+ .data = &mc13xxx_variant_mc13892,
+ }, {
+ .compatible = "fsl,mc34708",
+ .data = &mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -76,11 +82,15 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+ if (client->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &client->dev);
+ mc13xxx->variant = of_id->data;
+ } else {
+ mc13xxx->variant = (void *)id->driver_data;
+ }
- if (ret == 0 && (id->driver_data != mc13xxx->ictype))
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index cb32f69d80ba..3032bae20b62 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -28,10 +28,13 @@
static const struct spi_device_id mc13xxx_device_id[] = {
{
.name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13783,
}, {
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -39,8 +42,9 @@ static const struct spi_device_id mc13xxx_device_id[] = {
MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { .compatible = "fsl,mc13783", .data = &mc13xxx_variant_mc13783, },
+ { .compatible = "fsl,mc13892", .data = &mc13xxx_variant_mc13892, },
+ { .compatible = "fsl,mc34708", .data = &mc13xxx_variant_mc34708, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
@@ -144,19 +148,18 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+ if (spi->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (ret) {
- dev_set_drvdata(&spi->dev, NULL);
+ mc13xxx->variant = of_id->data;
} else {
- const struct spi_device_id *devid =
- spi_get_device_id(spi);
- if (!devid || devid->driver_data != mc13xxx->ictype)
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+ mc13xxx->variant = (void *)id_entry->driver_data;
}
- return ret;
+ return mc13xxx_common_init(mc13xxx, pdata, spi->irq);
}
static int mc13xxx_spi_remove(struct spi_device *spi)
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
index bbba06feea06..460ec5c7b18c 100644
--- a/drivers/mfd/mc13xxx.h
+++ b/drivers/mfd/mc13xxx.h
@@ -13,19 +13,25 @@
#include <linux/regmap.h>
#include <linux/mfd/mc13xxx.h>
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx;
+
+struct mc13xxx_variant {
+ const char *name;
+ void (*print_revision)(struct mc13xxx *mc13xxx, u32 revision);
};
-#define MC13XXX_NUMREGS 0x3f
+extern struct mc13xxx_variant
+ mc13xxx_variant_mc13783,
+ mc13xxx_variant_mc13892,
+ mc13xxx_variant_mc34708;
struct mc13xxx {
struct regmap *regmap;
struct device *dev;
- enum mc13xxx_id ictype;
+ const struct mc13xxx_variant *variant;
struct mutex lock;
int irq;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f8b77711ad2d..7604f4e5df40 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -21,6 +21,10 @@
#include <linux/irqdomain.h>
#include <linux/of.h>
+static struct device_type mfd_dev_type = {
+ .name = "mfd_device",
+};
+
int mfd_cell_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
@@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
+ pdev->dev.type = &mfd_dev_type;
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
@@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct platform_device *pdev;
+ const struct mfd_cell *cell;
atomic_t **usage_count = c;
+ if (dev->type != &mfd_dev_type)
+ return 0;
+
+ pdev = to_platform_device(dev);
+ cell = mfd_get_cell(pdev);
+
/* find the base address of usage_count pointers (for freeing) */
if (!*usage_count || (cell->usage_count < *usage_count))
*usage_count = cell->usage_count;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 770a0d01e0b9..05164d7f054b 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
-#include <plat/cpu.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/pm_runtime.h>
@@ -384,7 +383,7 @@ static void omap_usbhs_init(struct device *dev)
reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
- if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ if (pdata->single_ulpi_bypass) {
dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
if (is_ehci_phy_mode(pdata->port_mode[0]) ||
is_ehci_phy_mode(pdata->port_mode[1]) ||
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 64803f13bcec..d11567307fbe 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -208,6 +208,8 @@ static int pcf50633_probe(struct i2c_client *client,
if (!pcf)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf);
+ pcf->dev = &client->dev;
pcf->pdata = pdata;
mutex_init(&pcf->lock);
@@ -219,9 +221,6 @@ static int pcf50633_probe(struct i2c_client *client,
return ret;
}
- i2c_set_clientdata(client, pcf);
- pcf->dev = &client->dev;
-
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index fe00cdd6f83d..b41db5968706 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -345,7 +345,7 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
mutex_init(&rc5t583->irq_lock);
/* Initailize all int register to 0 */
- for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) {
+ for (i = 0; i < RC5T583_MAX_INTERRUPT_EN_REGS; i++) {
ret = rc5t583_write(rc5t583->dev, irq_en_add[i],
rc5t583->irq_en_reg[i]);
if (ret < 0)
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
new file mode 100644
index 000000000000..3ba048655bf3
--- /dev/null
+++ b/drivers/mfd/retu-mfd.c
@@ -0,0 +1,263 @@
+/*
+ * Retu MFD driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall and Mikko Ylinen.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+/* Registers */
+#define RETU_REG_ASICR 0x00 /* ASIC ID and revision */
+#define RETU_REG_ASICR_VILMA (1 << 7) /* Bit indicating Vilma */
+#define RETU_REG_IDR 0x01 /* Interrupt ID */
+#define RETU_REG_IMR 0x02 /* Interrupt mask */
+
+/* Interrupt sources */
+#define RETU_INT_PWR 0 /* Power button */
+
+struct retu_dev {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex mutex;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static struct resource retu_pwrbutton_res[] = {
+ {
+ .name = "retu-pwrbutton",
+ .start = RETU_INT_PWR,
+ .end = RETU_INT_PWR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell retu_devs[] = {
+ {
+ .name = "retu-wdt"
+ },
+ {
+ .name = "retu-pwrbutton",
+ .resources = retu_pwrbutton_res,
+ .num_resources = ARRAY_SIZE(retu_pwrbutton_res),
+ }
+};
+
+static struct regmap_irq retu_irqs[] = {
+ [RETU_INT_PWR] = {
+ .mask = 1 << RETU_INT_PWR,
+ }
+};
+
+static struct regmap_irq_chip retu_irq_chip = {
+ .name = "RETU",
+ .irqs = retu_irqs,
+ .num_irqs = ARRAY_SIZE(retu_irqs),
+ .num_regs = 1,
+ .status_base = RETU_REG_IDR,
+ .mask_base = RETU_REG_IMR,
+ .ack_base = RETU_REG_IDR,
+};
+
+/* Retu device registered for the power off. */
+static struct retu_dev *retu_pm_power_off;
+
+int retu_read(struct retu_dev *rdev, u8 reg)
+{
+ int ret;
+ int value;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_read(rdev->regmap, reg, &value);
+ mutex_unlock(&rdev->mutex);
+
+ return ret ? ret : value;
+}
+EXPORT_SYMBOL_GPL(retu_read);
+
+int retu_write(struct retu_dev *rdev, u8 reg, u16 data)
+{
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_write(rdev->regmap, reg, data);
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(retu_write);
+
+static void retu_power_off(void)
+{
+ struct retu_dev *rdev = retu_pm_power_off;
+ int reg;
+
+ mutex_lock(&retu_pm_power_off->mutex);
+
+ /* Ignore power button state */
+ regmap_read(rdev->regmap, RETU_REG_CC1, &reg);
+ regmap_write(rdev->regmap, RETU_REG_CC1, reg | 2);
+
+ /* Expire watchdog immediately */
+ regmap_write(rdev->regmap, RETU_REG_WATCHDOG, 0);
+
+ /* Wait for poweroff */
+ for (;;)
+ cpu_relax();
+
+ mutex_unlock(&retu_pm_power_off->mutex);
+}
+
+static int retu_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(reg_size != 1 || val_size != 2);
+
+ ret = i2c_smbus_read_word_data(i2c, *(u8 const *)reg);
+ if (ret < 0)
+ return ret;
+
+ *(u16 *)val = ret;
+ return 0;
+}
+
+static int retu_regmap_write(void *context, const void *data, size_t count)
+{
+ u8 reg;
+ u16 val;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(count != sizeof(reg) + sizeof(val));
+ memcpy(&reg, data, sizeof(reg));
+ memcpy(&val, data + sizeof(reg), sizeof(val));
+ return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus retu_bus = {
+ .read = retu_regmap_read,
+ .write = retu_regmap_write,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct regmap_config retu_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
+
+static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct retu_dev *rdev;
+ int ret;
+
+ rdev = devm_kzalloc(&i2c->dev, sizeof(*rdev), GFP_KERNEL);
+ if (rdev == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rdev);
+ rdev->dev = &i2c->dev;
+ mutex_init(&rdev->mutex);
+ rdev->regmap = devm_regmap_init(&i2c->dev, &retu_bus, &i2c->dev,
+ &retu_config);
+ if (IS_ERR(rdev->regmap))
+ return PTR_ERR(rdev->regmap);
+
+ ret = retu_read(rdev, RETU_REG_ASICR);
+ if (ret < 0) {
+ dev_err(rdev->dev, "could not read Retu revision: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(rdev->dev, "Retu%s v%d.%d found\n",
+ (ret & RETU_REG_ASICR_VILMA) ? " & Vilma" : "",
+ (ret >> 4) & 0x7, ret & 0xf);
+
+ /* Mask all RETU interrupts. */
+ ret = retu_write(rdev, RETU_REG_IMR, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_add_irq_chip(rdev->regmap, i2c->irq, IRQF_ONESHOT, -1,
+ &retu_irq_chip, &rdev->irq_data);
+ if (ret < 0)
+ return ret;
+
+ ret = mfd_add_devices(rdev->dev, -1, retu_devs, ARRAY_SIZE(retu_devs),
+ NULL, regmap_irq_chip_get_base(rdev->irq_data),
+ NULL);
+ if (ret < 0) {
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+ return ret;
+ }
+
+ if (!pm_power_off) {
+ retu_pm_power_off = rdev;
+ pm_power_off = retu_power_off;
+ }
+
+ return 0;
+}
+
+static int retu_remove(struct i2c_client *i2c)
+{
+ struct retu_dev *rdev = i2c_get_clientdata(i2c);
+
+ if (retu_pm_power_off == rdev) {
+ pm_power_off = NULL;
+ retu_pm_power_off = NULL;
+ }
+ mfd_remove_devices(rdev->dev);
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id retu_id[] = {
+ { "retu-mfd", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, retu_id);
+
+static struct i2c_driver retu_driver = {
+ .driver = {
+ .name = "retu-mfd",
+ .owner = THIS_MODULE,
+ },
+ .probe = retu_probe,
+ .remove = retu_remove,
+ .id_table = retu_id,
+};
+module_i2c_driver(retu_driver);
+
+MODULE_DESCRIPTION("Retu MFD driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 89f046ca9e41..3d3b4addf81a 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -112,6 +112,21 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
BPP_LDO_POWB, BPP_LDO_SUSPEND);
}
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 mask, val;
+
+ mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
+ if (voltage == OUTPUT_3V3)
+ val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
+ else if (voltage == OUTPUT_1V8)
+ val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
+ else
+ return -EINVAL;
+
+ return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
+}
+
static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
{
unsigned int card_exist;
@@ -163,6 +178,18 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
return card_exist;
}
+static int rtl8411_conv_clk_and_div_n(int input, int dir)
+{
+ int output;
+
+ if (dir == CLK_TO_DIV_N)
+ output = input * 4 / 5 - 2;
+ else
+ output = (input + 2) * 5 / 4;
+
+ return output;
+}
+
static const struct pcr_ops rtl8411_pcr_ops = {
.extra_init_hw = rtl8411_extra_init_hw,
.optimize_phy = NULL,
@@ -172,7 +199,9 @@ static const struct pcr_ops rtl8411_pcr_ops = {
.disable_auto_blink = rtl8411_disable_auto_blink,
.card_power_on = rtl8411_card_power_on,
.card_power_off = rtl8411_card_power_off,
+ .switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
+ .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index 283a4f148084..98fe0f39463e 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -144,6 +144,25 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
return rtsx_pci_send_cmd(pcr, 100);
}
+static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pcr_ops rts5209_pcr_ops = {
.extra_init_hw = rts5209_extra_init_hw,
.optimize_phy = rts5209_optimize_phy,
@@ -153,7 +172,9 @@ static const struct pcr_ops rts5209_pcr_ops = {
.disable_auto_blink = rts5209_disable_auto_blink,
.card_power_on = rts5209_card_power_on,
.card_power_off = rts5209_card_power_off,
+ .switch_output_voltage = rts5209_switch_output_voltage,
.cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index b9dbab266fda..29d889cbb9c5 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -114,6 +114,25 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
return rtsx_pci_send_cmd(pcr, 100);
}
+static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pcr_ops rts5229_pcr_ops = {
.extra_init_hw = rts5229_extra_init_hw,
.optimize_phy = rts5229_optimize_phy,
@@ -123,7 +142,9 @@ static const struct pcr_ops rts5229_pcr_ops = {
.disable_auto_blink = rts5229_disable_auto_blink,
.card_power_on = rts5229_card_power_on,
.card_power_off = rts5229_card_power_off,
+ .switch_output_voltage = rts5229_switch_output_voltage,
.cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 56d4377c62c2..9fc57009e228 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
@@ -629,7 +630,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (clk == pcr->cur_clock)
return 0;
- N = (u8)(clk - 2);
+ if (pcr->ops->conv_clk_and_div_n)
+ N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ N = (u8)(clk - 2);
if ((clk <= 2) || (N > max_N))
return -EINVAL;
@@ -640,7 +644,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
/* Make sure that the SSC clock div_n is equal or greater than min_N */
div = CLK_DIV_1;
while ((N < min_N) && (div < max_div)) {
- N = (N + 2) * 2 - 2;
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+ DIV_N_TO_CLK) * 2;
+ N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ N = (N + 2) * 2 - 2;
+ }
div++;
}
dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
@@ -702,6 +713,15 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ if (pcr->ops->switch_output_voltage)
+ return pcr->ops->switch_output_voltage(pcr, voltage);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
+
unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
{
unsigned int val;
@@ -766,10 +786,10 @@ static void rtsx_pci_card_detect(struct work_struct *work)
spin_unlock_irqrestore(&pcr->lock, flags);
- if (card_detect & SD_EXIST)
+ if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
pcr->slots[RTSX_SD_CARD].card_event(
pcr->slots[RTSX_SD_CARD].p_dev);
- if (card_detect & MS_EXIST)
+ if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
pcr->slots[RTSX_MS_CARD].card_event(
pcr->slots[RTSX_MS_CARD].p_dev);
}
@@ -997,8 +1017,8 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
return 0;
}
-static int __devinit rtsx_pci_probe(struct pci_dev *pcidev,
- const struct pci_device_id *id)
+static int rtsx_pci_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
{
struct rtsx_pcr *pcr;
struct pcr_handle *handle;
@@ -1122,7 +1142,7 @@ disable:
return ret;
}
-static void __devexit rtsx_pci_remove(struct pci_dev *pcidev)
+static void rtsx_pci_remove(struct pci_dev *pcidev)
{
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
@@ -1240,7 +1260,7 @@ static struct pci_driver rtsx_pci_driver = {
.name = DRV_NAME_RTSX_PCI,
.id_table = rtsx_pci_ids,
.probe = rtsx_pci_probe,
- .remove = __devexit_p(rtsx_pci_remove),
+ .remove = rtsx_pci_remove,
.suspend = rtsx_pci_suspend,
.resume = rtsx_pci_resume,
};
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 49d361a618d0..77ee26ef5941 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -60,6 +61,15 @@ static struct mfd_cell s2mps11_devs[] = {
},
};
+#ifdef CONFIG_OF
+static struct of_device_id sec_dt_match[] = {
+ { .compatible = "samsung,s5m8767-pmic",
+ .data = (void *)S5M8767X,
+ },
+ {},
+};
+#endif
+
int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
{
return regmap_read(sec_pmic->regmap, reg, dest);
@@ -95,6 +105,57 @@ static struct regmap_config sec_regmap_config = {
.val_bits = 8,
};
+
+#ifdef CONFIG_OF
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ struct sec_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * ToDo: the 'wakeup' member in the platform data is more of a linux
+ * specfic information. Hence, there is no binding for that yet and
+ * not parsed here.
+ */
+
+ return pd;
+}
+#else
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+#ifdef CONFIG_OF
+ if (i2c->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(sec_dt_match, i2c->dev.of_node);
+ return (int)match->data;
+ }
+#endif
+ return (int)id->driver_data;
+}
+
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -111,13 +172,22 @@ static int sec_pmic_probe(struct i2c_client *i2c,
sec_pmic->dev = &i2c->dev;
sec_pmic->i2c = i2c;
sec_pmic->irq = i2c->irq;
- sec_pmic->type = id->driver_data;
-
+ sec_pmic->type = sec_i2c_get_driver_data(i2c, id);
+
+ if (sec_pmic->dev->of_node) {
+ pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
+ }
+ pdata->device_type = sec_pmic->type;
+ }
if (pdata) {
sec_pmic->device_type = pdata->device_type;
sec_pmic->ono = pdata->ono;
sec_pmic->irq_base = pdata->irq_base;
sec_pmic->wakeup = pdata->wakeup;
+ sec_pmic->pdata = pdata;
}
sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config);
@@ -192,6 +262,7 @@ static struct i2c_driver sec_pmic_driver = {
.driver = {
.name = "sec_pmic",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sec_dt_match),
},
.probe = sec_pmic_probe,
.remove = sec_pmic_remove,
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index c901fa50fea1..0dd84e99081e 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -24,67 +24,67 @@
static struct regmap_irq s2mps11_irqs[] = {
[S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONF_MASK,
},
[S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONR_MASK,
},
[S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBF_MASK,
},
[S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBR_MASK,
},
[S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBF_MASK,
},
[S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBR_MASK,
},
[S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRON1S_MASK,
},
[S2MPS11_IRQ_MRB] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_MRB_MASK,
},
[S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
[S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA1_MASK,
},
[S2MPS11_IRQ_RTCA2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA2_MASK,
},
[S2MPS11_IRQ_SMPL] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_SMPL_MASK,
},
[S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC1S_MASK,
},
[S2MPS11_IRQ_WTSR] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_WTSR_MASK,
},
[S2MPS11_IRQ_INT120C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT120C_MASK,
},
[S2MPS11_IRQ_INT140C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT140C_MASK,
},
};
@@ -92,146 +92,146 @@ static struct regmap_irq s2mps11_irqs[] = {
static struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRR_MASK,
},
[S5M8767_IRQ_PWRF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRF_MASK,
},
[S5M8767_IRQ_PWR1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWR1S_MASK,
},
[S5M8767_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGR_MASK,
},
[S5M8767_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGF_MASK,
},
[S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT2_MASK,
},
[S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT1_MASK,
},
[S5M8767_IRQ_MRB] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_MRB_MASK,
},
[S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK2_MASK,
},
[S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK3_MASK,
},
[S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK4_MASK,
},
[S5M8767_IRQ_RTC60S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC60S_MASK,
},
[S5M8767_IRQ_RTCA1] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA1_MASK,
},
[S5M8767_IRQ_RTCA2] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA2_MASK,
},
[S5M8767_IRQ_SMPL] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_SMPL_MASK,
},
[S5M8767_IRQ_RTC1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC1S_MASK,
},
[S5M8767_IRQ_WTSR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_WTSR_MASK,
},
};
static struct regmap_irq s5m8763_irqs[] = {
[S5M8763_IRQ_DCINF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINF_MASK,
},
[S5M8763_IRQ_DCINR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINR_MASK,
},
[S5M8763_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGF_MASK,
},
[S5M8763_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGR_MASK,
},
[S5M8763_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONF_MASK,
},
[S5M8763_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONR_MASK,
},
[S5M8763_IRQ_WTSREVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_WTSREVNT_MASK,
},
[S5M8763_IRQ_SMPLEVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_SMPLEVNT_MASK,
},
[S5M8763_IRQ_ALARM1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM1_MASK,
},
[S5M8763_IRQ_ALARM0] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM0_MASK,
},
[S5M8763_IRQ_ONKEY1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_ONKEY1S_MASK,
},
[S5M8763_IRQ_TOPOFFR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_TOPOFFR_MASK,
},
[S5M8763_IRQ_DCINOVPR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DCINOVPR_MASK,
},
[S5M8763_IRQ_CHGRSTF] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGRSTF_MASK,
},
[S5M8763_IRQ_DONER] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DONER_MASK,
},
[S5M8763_IRQ_CHGFAULT] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGFAULT_MASK,
},
[S5M8763_IRQ_LOBAT1] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT1_MASK,
},
[S5M8763_IRQ_LOBAT2] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT2_MASK,
},
};
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index d6284cacd27a..9bd33169a111 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,21 +27,28 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/sta2x11-mfd.h>
+#include <linux/regmap.h>
#include <asm/sta2x11.h>
+static inline int __reg_within_range(unsigned int r,
+ unsigned int start,
+ unsigned int end)
+{
+ return ((r >= start) && (r <= end));
+}
+
/* This describes STA2X11 MFD chip for us, we may have several */
struct sta2x11_mfd {
struct sta2x11_instance *instance;
- spinlock_t lock;
+ struct regmap *regmap[sta2x11_n_mfd_plat_devs];
+ spinlock_t lock[sta2x11_n_mfd_plat_devs];
struct list_head list;
- void __iomem *sctl_regs;
- void __iomem *apbreg_regs;
+ void __iomem *regs[sta2x11_n_mfd_plat_devs];
};
static LIST_HEAD(sta2x11_mfd_list);
@@ -71,6 +78,7 @@ static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
{
+ int i;
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
struct sta2x11_instance *instance;
@@ -83,7 +91,8 @@ static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
if (!mfd)
return -ENOMEM;
INIT_LIST_HEAD(&mfd->list);
- spin_lock_init(&mfd->lock);
+ for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
+ spin_lock_init(&mfd->lock[i]);
mfd->instance = instance;
list_add(&mfd->list, &sta2x11_mfd_list);
return 0;
@@ -100,161 +109,276 @@ static int mfd_remove(struct pci_dev *pdev)
return 0;
}
-/* These two functions are exported and are not expected to fail */
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+/* This function is exported and is not expected to fail */
+u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
+ enum sta2x11_mfd_plat_dev index)
{
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
u32 r;
unsigned long flags;
+ void __iomem *regs;
if (!mfd) {
dev_warn(&pdev->dev, ": can't access sctl regs\n");
return 0;
}
- if (!mfd->sctl_regs) {
+
+ regs = mfd->regs[index];
+ if (!regs) {
dev_warn(&pdev->dev, ": system ctl not initialized\n");
return 0;
}
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->sctl_regs + reg);
+ spin_lock_irqsave(&mfd->lock[index], flags);
+ r = readl(regs + reg);
r &= ~mask;
r |= val;
if (mask)
- writel(r, mfd->sctl_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
+ writel(r, regs + reg);
+ spin_unlock_irqrestore(&mfd->lock[index], flags);
return r;
}
-EXPORT_SYMBOL(sta2x11_sctl_mask);
+EXPORT_SYMBOL(__sta2x11_mfd_mask);
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+int sta2x11_mfd_get_regs_data(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index,
+ void __iomem **regs,
+ spinlock_t **lock)
{
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
- u32 r;
- unsigned long flags;
+ struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ struct sta2x11_mfd *mfd;
- if (!mfd) {
- dev_warn(&pdev->dev, ": can't access apb regs\n");
- return 0;
- }
- if (!mfd->apbreg_regs) {
- dev_warn(&pdev->dev, ": apb bridge not initialized\n");
- return 0;
- }
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->apbreg_regs + reg);
- r &= ~mask;
- r |= val;
- if (mask)
- writel(r, mfd->apbreg_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
- return r;
+ if (!pdev)
+ return -ENODEV;
+ mfd = sta2x11_mfd_find(pdev);
+ if (!mfd)
+ return -ENODEV;
+ if (index >= sta2x11_n_mfd_plat_devs)
+ return -ENODEV;
+ *regs = mfd->regs[index];
+ *lock = &mfd->lock[index];
+ pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
+ return *regs ? 0 : -ENODEV;
}
-EXPORT_SYMBOL(sta2x11_apbreg_mask);
-
-/* Two debugfs files, for our registers (FIXME: one instance only) */
-#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
-static struct debugfs_reg32 sta2x11_sctl_regs[] = {
- REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
- REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
- REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
- REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
- REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
- REG(SCCLKSTAT2), REG(SCRSTSTA),
-};
-#undef REG
+EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
-static struct debugfs_regset32 sctl_regset = {
- .regs = sta2x11_sctl_regs,
- .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
-};
+/*
+ * Special sta2x11-mfd regmap lock/unlock functions
+ */
+
+static void sta2x11_regmap_lock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_lock(lock);
+}
-#define REG(regname) {.name = #regname, .offset = regname}
-static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
- REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
- REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+static void sta2x11_regmap_unlock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_unlock(lock);
+}
+
+/* OTP (one time programmable registers do not require locking */
+static void sta2x11_regmap_nolock(void *__lock)
+{
+}
+
+static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
+ [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
+ [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
+ [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
};
-#undef REG
-static struct debugfs_regset32 apbreg_regset = {
- .regs = sta2x11_apbreg_regs,
- .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
+}
+
+static struct regmap_config sta2x11_sctl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = SCTL_SCRSTSTA,
+ .writeable_reg = sta2x11_sctl_writeable_reg,
};
-static struct dentry *sta2x11_sctl_debugfs;
-static struct dentry *sta2x11_apbreg_debugfs;
+static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == STA2X11_SECR_CR) ||
+ __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
+}
-/* Probe for the two platform devices */
-static int sta2x11_sctl_probe(struct platform_device *dev)
+static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
{
- struct pci_dev **pdev;
- struct sta2x11_mfd *mfd;
- struct resource *res;
+ return false;
+}
- pdev = dev->dev.platform_data;
- mfd = sta2x11_mfd_find(*pdev);
- if (!mfd)
- return -ENODEV;
+static struct regmap_config sta2x11_scr_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_nolock,
+ .unlock = sta2x11_regmap_nolock,
+ .max_register = STA2X11_SECR_FVR1,
+ .readable_reg = sta2x11_scr_readable_reg,
+ .writeable_reg = sta2x11_scr_writeable_reg,
+};
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ switch (reg) {
+ case APBREG_BSR:
+ case APBREG_PAER:
+ case APBREG_PWAC:
+ case APBREG_PRAC:
+ case APBREG_PCG:
+ case APBREG_PUR:
+ case APBREG_EMU_PCG:
+ return true;
+ default:
+ return false;
+ }
+}
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-sctl"))
- return -EBUSY;
+static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ if (!sta2x11_apbreg_readable_reg(dev, reg))
+ return false;
+ return reg != APBREG_PAER;
+}
- mfd->sctl_regs = ioremap(res->start, resource_size(res));
- if (!mfd->sctl_regs) {
- release_mem_region(res->start, resource_size(res));
- return -ENOMEM;
+static struct regmap_config sta2x11_apbreg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = APBREG_EMU_PCG_SARAC,
+ .readable_reg = sta2x11_apbreg_readable_reg,
+ .writeable_reg = sta2x11_apbreg_writeable_reg,
+};
+
+static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
+ unsigned int reg)
+{
+ return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
+ __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
+ __reg_within_range(reg, MASTER_LOCK_REG,
+ SYSTEM_CONFIG_STATUS_REG) ||
+ reg == MSP_CLK_CTRL_REG ||
+ __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
+}
+
+static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
+ return false;
+ switch (reg) {
+ case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
+ case SYSTEM_CONFIG_STATUS_REG:
+ case COMPENSATION_REG1:
+ case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
+ case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
+ return false;
+ default:
+ return true;
}
- sctl_regset.base = mfd->sctl_regs;
- sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
- S_IFREG | S_IRUGO,
- NULL, &sctl_regset);
- return 0;
}
-static int sta2x11_apbreg_probe(struct platform_device *dev)
+static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = TEST_CTL_REG,
+ .readable_reg = sta2x11_apb_soc_regs_readable_reg,
+ .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
+};
+
+static struct regmap_config *
+sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
+ [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
+ [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
+ [sta2x11_scr] = &sta2x11_scr_regmap_config,
+};
+
+/* Probe for the four platform devices */
+
+static int sta2x11_mfd_platform_probe(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index)
{
struct pci_dev **pdev;
struct sta2x11_mfd *mfd;
struct resource *res;
+ const char *name = sta2x11_mfd_names[index];
+ struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
pdev = dev->dev.platform_data;
- dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
- dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
-
mfd = sta2x11_mfd_find(*pdev);
if (!mfd)
return -ENODEV;
+ if (!regmap_config)
+ return -ENODEV;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-apbreg"))
+ if (!request_mem_region(res->start, resource_size(res), name))
return -EBUSY;
- mfd->apbreg_regs = ioremap(res->start, resource_size(res));
- if (!mfd->apbreg_regs) {
+ mfd->regs[index] = ioremap(res->start, resource_size(res));
+ if (!mfd->regs[index]) {
release_mem_region(res->start, resource_size(res));
return -ENOMEM;
}
- dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+ regmap_config->lock_arg = &mfd->lock;
+ /*
+ No caching, registers could be reached both via regmap and via
+ void __iomem *
+ */
+ regmap_config->cache_type = REGCACHE_NONE;
+ mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
+ regmap_config);
+ WARN_ON(!mfd->regmap[index]);
- apbreg_regset.base = mfd->apbreg_regs;
- sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
- S_IFREG | S_IRUGO,
- NULL, &apbreg_regset);
return 0;
}
-/* The two platform drivers */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
+}
+
+static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
+}
+
+static int sta2x11_scr_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
+}
+
+/* The three platform drivers */
static struct platform_driver sta2x11_sctl_platform_driver = {
.driver = {
- .name = "sta2x11-sctl",
+ .name = STA2X11_MFD_SCTL_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_sctl_probe,
@@ -268,7 +392,7 @@ static int __init sta2x11_sctl_init(void)
static struct platform_driver sta2x11_platform_driver = {
.driver = {
- .name = "sta2x11-apbreg",
+ .name = STA2X11_MFD_APBREG_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_apbreg_probe,
@@ -280,13 +404,44 @@ static int __init sta2x11_apbreg_init(void)
return platform_driver_register(&sta2x11_platform_driver);
}
+static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_APB_SOC_REGS_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apb_soc_regs_probe,
+};
+
+static int __init sta2x11_apb_soc_regs_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_apb_soc_regs_platform_driver);
+}
+
+static struct platform_driver sta2x11_scr_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_SCR_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_scr_probe,
+};
+
+static int __init sta2x11_scr_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_scr_platform_driver);
+}
+
+
/*
- * What follows is the PCI device that hosts the above two pdevs.
+ * What follows are the PCI devices that host the above pdevs.
* Each logic block is 4kB and they are all consecutive: we use this info.
*/
-/* Bar 0 */
-enum bar0_cells {
+/* Mfd 0 device */
+
+/* Mfd 0, Bar 0 */
+enum mfd0_bar0_cells {
STA2X11_GPIO_0 = 0,
STA2X11_GPIO_1,
STA2X11_GPIO_2,
@@ -295,8 +450,8 @@ enum bar0_cells {
STA2X11_SCR,
STA2X11_TIME,
};
-/* Bar 1 */
-enum bar1_cells {
+/* Mfd 0 , Bar 1 */
+enum mfd0_bar1_cells {
STA2X11_APBREG = 0,
};
#define CELL_4K(_name, _cell) { \
@@ -307,40 +462,71 @@ enum bar1_cells {
static const struct resource gpio_resources[] = {
{
- .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ /* 4 consecutive cells, 1 driver */
+ .name = STA2X11_MFD_GPIO_NAME,
.start = 0,
.end = (4 * 4096) - 1,
.flags = IORESOURCE_MEM,
}
};
static const struct resource sctl_resources[] = {
- CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+ CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
};
static const struct resource scr_resources[] = {
- CELL_4K("sta2x11-scr", STA2X11_SCR),
+ CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
};
static const struct resource time_resources[] = {
- CELL_4K("sta2x11-time", STA2X11_TIME),
+ CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
};
static const struct resource apbreg_resources[] = {
- CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+ CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
};
#define DEV(_name, _r) \
{ .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
-static struct mfd_cell sta2x11_mfd_bar0[] = {
- DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
- DEV("sta2x11-sctl", sctl_resources),
- DEV("sta2x11-scr", scr_resources),
- DEV("sta2x11-time", time_resources),
+static struct mfd_cell sta2x11_mfd0_bar0[] = {
+ /* offset 0: we add pdata later */
+ DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
+ DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
+ DEV(STA2X11_MFD_SCR_NAME, scr_resources),
+ DEV(STA2X11_MFD_TIME_NAME, time_resources),
};
-static struct mfd_cell sta2x11_mfd_bar1[] = {
- DEV("sta2x11-apbreg", apbreg_resources),
+static struct mfd_cell sta2x11_mfd0_bar1[] = {
+ DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
};
+/* Mfd 1 devices */
+
+/* Mfd 1, Bar 0 */
+enum mfd1_bar0_cells {
+ STA2X11_VIC = 0,
+};
+
+/* Mfd 1, Bar 1 */
+enum mfd1_bar1_cells {
+ STA2X11_APB_SOC_REGS = 0,
+};
+
+static const struct resource vic_resources[] = {
+ CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
+};
+
+static const struct resource apb_soc_regs_resources[] = {
+ CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
+};
+
+static struct mfd_cell sta2x11_mfd1_bar0[] = {
+ DEV(STA2X11_MFD_VIC_NAME, vic_resources),
+};
+
+static struct mfd_cell sta2x11_mfd1_bar1[] = {
+ DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
+};
+
+
static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
@@ -363,11 +549,63 @@ static int sta2x11_mfd_resume(struct pci_dev *pdev)
return 0;
}
+struct sta2x11_mfd_bar_setup_data {
+ struct mfd_cell *cells;
+ int ncells;
+};
+
+struct sta2x11_mfd_setup_data {
+ struct sta2x11_mfd_bar_setup_data bars[2];
+};
+
+#define STA2X11_MFD0 0
+#define STA2X11_MFD1 1
+
+static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
+ /* Mfd 0: gpio, sctl, scr, timers / apbregs */
+ [STA2X11_MFD0] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd0_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd0_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
+ },
+ },
+ },
+ /* Mfd 1: vic / apb-soc-regs */
+ [STA2X11_MFD1] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd1_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd1_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
+ },
+ },
+ },
+};
+
+static void sta2x11_mfd_setup(struct pci_dev *pdev,
+ struct sta2x11_mfd_setup_data *sd)
+{
+ int i, j;
+ for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
+ for (j = 0; j < sd->bars[i].ncells; j++) {
+ sd->bars[i].cells[j].pdata_size = sizeof(pdev);
+ sd->bars[i].cells[j].platform_data = &pdev;
+ }
+}
+
static int sta2x11_mfd_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+ const struct pci_device_id *pci_id)
{
int err, i;
- struct sta2x11_gpio_pdata *gpio_data;
+ struct sta2x11_mfd_setup_data *setup_data;
dev_info(&pdev->dev, "%s\n", __func__);
@@ -381,46 +619,29 @@ static int sta2x11_mfd_probe(struct pci_dev *pdev,
if (err)
dev_info(&pdev->dev, "Enable msi failed\n");
- /* Read gpio config data as pci device's platform data */
- gpio_data = dev_get_platdata(&pdev->dev);
- if (!gpio_data)
- dev_warn(&pdev->dev, "no gpio configuration\n");
-
- dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
- gpio_data, &gpio_data);
- dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
- pdev, &pdev);
+ setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
+ &mfd_setup_data[STA2X11_MFD0] :
+ &mfd_setup_data[STA2X11_MFD1];
/* platform data is the pci device for all of them */
- for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
- sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar0[i].platform_data = &pdev;
- }
- sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar1[0].platform_data = &pdev;
+ sta2x11_mfd_setup(pdev, setup_data);
/* Record this pdev before mfd_add_devices: their probe looks for it */
- sta2x11_mfd_add(pdev, GFP_ATOMIC);
-
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar0,
- ARRAY_SIZE(sta2x11_mfd_bar0),
- &pdev->resource[0],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
- goto err_disable;
- }
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar1,
- ARRAY_SIZE(sta2x11_mfd_bar1),
- &pdev->resource[1],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
- goto err_disable;
+ if (!sta2x11_mfd_find(pdev))
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+ /* Just 2 bars for all mfd's at present */
+ for (i = 0; i < 2; i++) {
+ err = mfd_add_devices(&pdev->dev, -1,
+ setup_data->bars[i].cells,
+ setup_data->bars[i].ncells,
+ &pdev->resource[i],
+ 0, NULL);
+ if (err) {
+ dev_err(&pdev->dev,
+ "mfd_add_devices[%d] failed: %d\n", i, err);
+ goto err_disable;
+ }
}
return 0;
@@ -434,6 +655,7 @@ err_disable:
static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
{0,},
};
@@ -459,6 +681,8 @@ static int __init sta2x11_mfd_init(void)
*/
subsys_initcall(sta2x11_apbreg_init);
subsys_initcall(sta2x11_sctl_init);
+subsys_initcall(sta2x11_apb_soc_regs_init);
+subsys_initcall(sta2x11_scr_init);
rootfs_initcall(sta2x11_mfd_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 36df18778029..fd5fcb630685 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -82,11 +82,13 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, stmpe_id);
static struct i2c_driver stmpe_i2c_driver = {
- .driver.name = "stmpe-i2c",
- .driver.owner = THIS_MODULE,
+ .driver = {
+ .name = "stmpe-i2c",
+ .owner = THIS_MODULE,
#ifdef CONFIG_PM
- .driver.pm = &stmpe_dev_pm_ops,
+ .pm = &stmpe_dev_pm_ops,
#endif
+ },
.probe = stmpe_i2c_probe,
.remove = stmpe_i2c_remove,
.id_table = stmpe_i2c_id,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 79e88d1fd99a..4b11202061be 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -7,11 +7,15 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
@@ -312,20 +316,17 @@ static struct mfd_cell stmpe_gpio_cell_noirq = {
static struct resource stmpe_keypad_resources[] = {
{
.name = "KEYPAD",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "KEYPAD_OVER",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_keypad_cell = {
.name = "stmpe-keypad",
+ .of_compatible = "st,stmpe-keypad",
.resources = stmpe_keypad_resources,
.num_resources = ARRAY_SIZE(stmpe_keypad_resources),
};
@@ -399,20 +400,17 @@ static struct stmpe_variant_info stmpe801_noirq = {
static struct resource stmpe_ts_resources[] = {
{
.name = "TOUCH_DET",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "FIFO_TH",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_ts_cell = {
.name = "stmpe-ts",
+ .of_compatible = "st,stmpe-ts",
.resources = stmpe_ts_resources,
.num_resources = ARRAY_SIZE(stmpe_ts_resources),
};
@@ -528,12 +526,12 @@ static const u8 stmpe1601_regs[] = {
static struct stmpe_variant_block stmpe1601_blocks[] = {
{
.cell = &stmpe_gpio_cell,
- .irq = STMPE24XX_IRQ_GPIOC,
+ .irq = STMPE1601_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
- .irq = STMPE24XX_IRQ_KEYPAD,
+ .irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
@@ -767,7 +765,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int i;
if (variant->id_val == STMPE801_ID) {
- handle_nested_irq(stmpe->irq_base);
+ int base = irq_create_mapping(stmpe->domain, 0);
+
+ handle_nested_irq(base);
return IRQ_HANDLED;
}
@@ -788,8 +788,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
while (status) {
int bit = __ffs(status);
int line = bank * 8 + bit;
+ int nestedirq = irq_create_mapping(stmpe->domain, line);
- handle_nested_irq(stmpe->irq_base + line);
+ handle_nested_irq(nestedirq);
status &= ~(1 << bit);
}
@@ -830,7 +831,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
static void stmpe_irq_mask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -840,7 +841,7 @@ static void stmpe_irq_mask(struct irq_data *data)
static void stmpe_irq_unmask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -855,46 +856,61 @@ static struct irq_chip stmpe_irq_chip = {
.irq_unmask = stmpe_irq_unmask,
};
-static int __devinit stmpe_irq_init(struct stmpe *stmpe)
+static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
{
+ struct stmpe *stmpe = d->host_data;
struct irq_chip *chip = NULL;
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
if (stmpe->variant->id_val != STMPE801_ID)
chip = &stmpe_irq_chip;
- for (irq = base; irq < base + num_irqs; irq++) {
- irq_set_chip_data(irq, stmpe);
- irq_set_chip_and_handler(irq, chip, handle_edge_irq);
- irq_set_nested_thread(irq, 1);
+ irq_set_chip_data(virq, stmpe);
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
+ set_irq_flags(virq, IRQF_VALID);
#else
- irq_set_noprobe(irq);
+ irq_set_noprobe(virq);
#endif
- }
return 0;
}
-static void stmpe_irq_remove(struct stmpe *stmpe)
+static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
{
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
-
- for (irq = base; irq < base + num_irqs; irq++) {
#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
+ set_irq_flags(virq, 0);
#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops stmpe_irq_ops = {
+ .map = stmpe_irq_map,
+ .unmap = stmpe_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np)
+{
+ int base = 0;
+ int num_irqs = stmpe->variant->num_irqs;
+
+ if (!np)
+ base = stmpe->irq_base;
+
+ stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
+ &stmpe_irq_ops, stmpe);
+ if (!stmpe->domain) {
+ dev_err(stmpe->dev, "Failed to create irqdomain\n");
+ return -ENOSYS;
}
+
+ return 0;
}
-static int __devinit stmpe_chip_init(struct stmpe *stmpe)
+static int stmpe_chip_init(struct stmpe *stmpe)
{
unsigned int irq_trigger = stmpe->pdata->irq_trigger;
int autosleep_timeout = stmpe->pdata->autosleep_timeout;
@@ -942,13 +958,6 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
else
icr |= STMPE_ICR_LSB_HIGH;
}
-
- if (stmpe->pdata->irq_invert_polarity) {
- if (id == STMPE801_ID)
- icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
- else
- icr ^= STMPE_ICR_LSB_HIGH;
- }
}
if (stmpe->pdata->autosleep) {
@@ -960,19 +969,18 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr);
}
-static int __devinit stmpe_add_device(struct stmpe *stmpe,
- struct mfd_cell *cell, int irq)
+static int stmpe_add_device(struct stmpe *stmpe, struct mfd_cell *cell)
{
return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
- NULL, stmpe->irq_base + irq, NULL);
+ NULL, stmpe->irq_base, stmpe->domain);
}
-static int __devinit stmpe_devices_init(struct stmpe *stmpe)
+static int stmpe_devices_init(struct stmpe *stmpe)
{
struct stmpe_variant_info *variant = stmpe->variant;
unsigned int platform_blocks = stmpe->pdata->blocks;
int ret = -EINVAL;
- int i;
+ int i, j;
for (i = 0; i < variant->num_blocks; i++) {
struct stmpe_variant_block *block = &variant->blocks[i];
@@ -980,8 +988,17 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
if (!(platform_blocks & block->block))
continue;
+ for (j = 0; j < block->cell->num_resources; j++) {
+ struct resource *res =
+ (struct resource *) &block->cell->resources[j];
+
+ /* Dynamically fill in a variant's IRQ. */
+ if (res->flags & IORESOURCE_IRQ)
+ res->start = res->end = block->irq + j;
+ }
+
platform_blocks &= ~block->block;
- ret = stmpe_add_device(stmpe, block->cell, block->irq);
+ ret = stmpe_add_device(stmpe, block->cell);
if (ret)
return ret;
}
@@ -994,17 +1011,55 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
+void stmpe_of_probe(struct stmpe_platform_data *pdata, struct device_node *np)
+{
+ struct device_node *child;
+
+ pdata->id = -1;
+ pdata->irq_trigger = IRQF_TRIGGER_NONE;
+
+ of_property_read_u32(np, "st,autosleep-timeout",
+ &pdata->autosleep_timeout);
+
+ pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
+
+ for_each_child_of_node(np, child) {
+ if (!strcmp(child->name, "stmpe_gpio")) {
+ pdata->blocks |= STMPE_BLOCK_GPIO;
+ } else if (!strcmp(child->name, "stmpe_keypad")) {
+ pdata->blocks |= STMPE_BLOCK_KEYPAD;
+ } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+ pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
+ } else if (!strcmp(child->name, "stmpe_adc")) {
+ pdata->blocks |= STMPE_BLOCK_ADC;
+ } else if (!strcmp(child->name, "stmpe_pwm")) {
+ pdata->blocks |= STMPE_BLOCK_PWM;
+ } else if (!strcmp(child->name, "stmpe_rotator")) {
+ pdata->blocks |= STMPE_BLOCK_ROTATOR;
+ }
+ }
+}
+
/* Called from client specific probe routines */
-int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
+int stmpe_probe(struct stmpe_client_info *ci, int partnum)
{
struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+ struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ if (!np)
+ return -EINVAL;
+
+ pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ stmpe_of_probe(pdata, np);
+ }
- stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL);
+ stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
return -ENOMEM;
@@ -1026,11 +1081,12 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
ci->init(stmpe);
if (pdata->irq_over_gpio) {
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+ ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
+ GPIOF_DIR_IN, "stmpe");
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
ret);
- goto out_free;
+ return ret;
}
stmpe->irq = gpio_to_irq(pdata->irq_gpio);
@@ -1047,51 +1103,40 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
dev_err(stmpe->dev,
"%s does not support no-irq mode!\n",
stmpe->variant->name);
- ret = -ENODEV;
- goto free_gpio;
+ return -ENODEV;
}
stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
+ } else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
+ pdata->irq_trigger =
+ irqd_get_trigger_type(irq_get_irq_data(stmpe->irq));
}
ret = stmpe_chip_init(stmpe);
if (ret)
- goto free_gpio;
+ return ret;
if (stmpe->irq >= 0) {
- ret = stmpe_irq_init(stmpe);
+ ret = stmpe_irq_init(stmpe, np);
if (ret)
- goto free_gpio;
+ return ret;
- ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
- pdata->irq_trigger | IRQF_ONESHOT,
+ ret = devm_request_threaded_irq(ci->dev, stmpe->irq, NULL,
+ stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT,
"stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n",
ret);
- goto out_removeirq;
+ return ret;
}
}
ret = stmpe_devices_init(stmpe);
- if (ret) {
- dev_err(stmpe->dev, "failed to add children\n");
- goto out_removedevs;
- }
-
- return 0;
+ if (!ret)
+ return 0;
-out_removedevs:
+ dev_err(stmpe->dev, "failed to add children\n");
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0)
- free_irq(stmpe->irq, stmpe);
-out_removeirq:
- if (stmpe->irq >= 0)
- stmpe_irq_remove(stmpe);
-free_gpio:
- if (pdata->irq_over_gpio)
- gpio_free(pdata->irq_gpio);
-out_free:
- kfree(stmpe);
+
return ret;
}
@@ -1099,16 +1144,6 @@ int stmpe_remove(struct stmpe *stmpe)
{
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0) {
- free_irq(stmpe->irq, stmpe);
- stmpe_irq_remove(stmpe);
- }
-
- if (stmpe->pdata->irq_over_gpio)
- gpio_free(stmpe->pdata->irq_gpio);
-
- kfree(stmpe);
-
return 0;
}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index a06d66b929b1..ecc092c7f745 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
}
static struct irq_domain_ops tc3589x_irq_ops = {
- .map = tc3589x_irq_map,
+ .map = tc3589x_irq_map,
.unmap = tc3589x_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twocell,
};
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
{
int base = tc3589x->irq_base;
- if (base) {
- tc3589x->domain = irq_domain_add_legacy(
- NULL, TC3589x_NR_INTERNAL_IRQS, base,
- 0, &tc3589x_irq_ops, tc3589x);
- }
- else {
- tc3589x->domain = irq_domain_add_linear(
- np, TC3589x_NR_INTERNAL_IRQS,
- &tc3589x_irq_ops, tc3589x);
- }
+ tc3589x->domain = irq_domain_add_simple(
+ np, TC3589x_NR_INTERNAL_IRQS, base,
+ &tc3589x_irq_ops, tc3589x);
if (!tc3589x->domain) {
dev_err(tc3589x->dev, "Failed to create irqdomain\n");
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
new file mode 100644
index 000000000000..e9f3fb510b44
--- /dev/null
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -0,0 +1,274 @@
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
+{
+ unsigned int val;
+
+ regmap_read(tsadc->regmap_tscadc, reg, &val);
+ return val;
+}
+
+static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
+ unsigned int val)
+{
+ regmap_write(tsadc->regmap_tscadc, reg, val);
+}
+
+static const struct regmap_config tscadc_regmap_config = {
+ .name = "ti_tscadc",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static void tscadc_idle_config(struct ti_tscadc_dev *config)
+{
+ unsigned int idleconfig;
+
+ idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
+
+ tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+}
+
+static int ti_tscadc_probe(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc;
+ struct resource *res;
+ struct clk *clk;
+ struct mfd_tscadc_board *pdata = pdev->dev.platform_data;
+ struct mfd_cell *cell;
+ int err, ctrl;
+ int clk_value, clock_rate;
+ int tsc_wires, adc_channels = 0, total_channels;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdata->adc_init)
+ adc_channels = pdata->adc_init->adc_channels;
+
+ tsc_wires = pdata->tsc_init->wires;
+ total_channels = tsc_wires + adc_channels;
+
+ if (total_channels > 8) {
+ dev_err(&pdev->dev, "Number of i/p channels more than 8\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined.\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ tscadc = devm_kzalloc(&pdev->dev,
+ sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+ if (!tscadc) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ tscadc->dev = &pdev->dev;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "no irq ID is specified.\n");
+ goto ret;
+ } else
+ tscadc->irq = err;
+
+ res = devm_request_mem_region(&pdev->dev,
+ res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to reserve registers.\n");
+ return -EBUSY;
+ }
+
+ tscadc->tscadc_base = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!tscadc->tscadc_base) {
+ dev_err(&pdev->dev, "failed to map registers.\n");
+ return -ENOMEM;
+ }
+
+ tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+ tscadc->tscadc_base, &tscadc_regmap_config);
+ if (IS_ERR(tscadc->regmap_tscadc)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ err = PTR_ERR(tscadc->regmap_tscadc);
+ goto ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /*
+ * The TSC_ADC_Subsystem has 2 clock domains
+ * OCP_CLK and ADC_CLK.
+ * The ADC clock is expected to run at target of 3MHz,
+ * and expected to capture 12-bit data at a rate of 200 KSPS.
+ * The TSC_ADC_SS controller design assumes the OCP clock is
+ * at least 6x faster than the ADC clock.
+ */
+ clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get TSC fck\n");
+ err = PTR_ERR(clk);
+ goto err_disable_clk;
+ }
+ clock_rate = clk_get_rate(clk);
+ clk_put(clk);
+ clk_value = clock_rate / ADC_CLK;
+ if (clk_value < MAX_CLK_DIV) {
+ dev_err(&pdev->dev, "clock input less than min clock requirement\n");
+ err = -EINVAL;
+ goto err_disable_clk;
+ }
+ /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
+ clk_value = clk_value - 1;
+ tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+
+ /* Set the control register bits */
+ ctrl = CNTRLREG_STEPCONFIGWRT |
+ CNTRLREG_TSCENB |
+ CNTRLREG_STEPID |
+ CNTRLREG_4WIRE;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* Set register bits for Idle Config Mode */
+ tscadc_idle_config(tscadc);
+
+ /* Enable the TSC module enable bit */
+ ctrl = tscadc_readl(tscadc, REG_CTRL);
+ ctrl |= CNTRLREG_TSCSSENB;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* TSC Cell */
+ cell = &tscadc->cells[TSC_CELL];
+ cell->name = "tsc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ /* ADC Cell */
+ cell = &tscadc->cells[ADC_CELL];
+ cell->name = "tiadc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
+ TSCADC_CELLS, NULL, 0, NULL);
+ if (err < 0)
+ goto err_disable_clk;
+
+ device_init_wakeup(&pdev->dev, true);
+ platform_set_drvdata(pdev, tscadc);
+
+ return 0;
+
+err_disable_clk:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ret:
+ return err;
+}
+
+static int ti_tscadc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
+
+ tscadc_writel(tscadc, REG_SE, 0x00);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ mfd_remove_devices(tscadc->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tscadc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+
+ tscadc_writel(tscadc_dev, REG_SE, 0x00);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tscadc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ unsigned int restore, ctrl;
+
+ pm_runtime_get_sync(dev);
+
+ /* context restore */
+ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
+ CNTRLREG_STEPID | CNTRLREG_4WIRE;
+ tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ tscadc_idle_config(tscadc_dev);
+ tscadc_writel(tscadc_dev, REG_SE, STPENB_STEPENB);
+ restore = tscadc_readl(tscadc_dev, REG_CTRL);
+ tscadc_writel(tscadc_dev, REG_CTRL,
+ (restore | CNTRLREG_TSCSSENB));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tscadc_pm_ops = {
+ .suspend = tscadc_suspend,
+ .resume = tscadc_resume,
+};
+#define TSCADC_PM_OPS (&tscadc_pm_ops)
+#else
+#define TSCADC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tscadc_driver = {
+ .driver = {
+ .name = "ti_tscadc",
+ .owner = THIS_MODULE,
+ .pm = TSCADC_PM_OPS,
+ },
+ .probe = ti_tscadc_probe,
+ .remove = ti_tscadc_remove,
+
+};
+
+module_platform_driver(ti_tscadc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen / ADC MFD controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 1b203499c744..409afa23d5dc 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -86,9 +86,9 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tps6507x_dev *tps6507x;
- int ret = 0;
- tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ tps6507x = devm_kzalloc(&i2c->dev, sizeof(struct tps6507x_dev),
+ GFP_KERNEL);
if (tps6507x == NULL)
return -ENOMEM;
@@ -98,19 +98,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
tps6507x->read_dev = tps6507x_i2c_read_device;
tps6507x->write_dev = tps6507x_i2c_write_device;
- ret = mfd_add_devices(tps6507x->dev, -1,
- tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
- NULL, 0, NULL);
-
- if (ret < 0)
- goto err;
-
- return ret;
-
-err:
- mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
- return ret;
+ return mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
+ ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
}
static int tps6507x_i2c_remove(struct i2c_client *i2c)
@@ -118,8 +107,6 @@ static int tps6507x_i2c_remove(struct i2c_client *i2c)
struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
-
return 0;
}
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 382a857b0dde..8d12a8e00d9c 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -25,7 +25,6 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
-#include <linux/regmap.h>
#include <linux/err.h>
#define NUM_INT_REG 2
@@ -39,204 +38,102 @@
#define TPS65090_INT_MSK 0x2
#define TPS65090_INT_MSK2 0x3
-struct tps65090_irq_data {
- u8 mask_reg;
- u8 mask_pos;
-};
-
-#define TPS65090_IRQ(_reg, _mask_pos) \
- { \
- .mask_reg = (_reg), \
- .mask_pos = (_mask_pos), \
- }
-
-static const struct tps65090_irq_data tps65090_irqs[] = {
- [0] = TPS65090_IRQ(0, 0),
- [1] = TPS65090_IRQ(0, 1),
- [2] = TPS65090_IRQ(0, 2),
- [3] = TPS65090_IRQ(0, 3),
- [4] = TPS65090_IRQ(0, 4),
- [5] = TPS65090_IRQ(0, 5),
- [6] = TPS65090_IRQ(0, 6),
- [7] = TPS65090_IRQ(0, 7),
- [8] = TPS65090_IRQ(1, 0),
- [9] = TPS65090_IRQ(1, 1),
- [10] = TPS65090_IRQ(1, 2),
- [11] = TPS65090_IRQ(1, 3),
- [12] = TPS65090_IRQ(1, 4),
- [13] = TPS65090_IRQ(1, 5),
- [14] = TPS65090_IRQ(1, 6),
- [15] = TPS65090_IRQ(1, 7),
-};
+#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1
+#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2
+#define TPS65090_INT1_MASK_BAT_STATUS_CHANGE 3
+#define TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE 4
+#define TPS65090_INT1_MASK_CHARGING_COMPLETE 5
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC1 6
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC2 7
+#define TPS65090_INT2_MASK_OVERLOAD_DCDC3 0
+#define TPS65090_INT2_MASK_OVERLOAD_FET1 1
+#define TPS65090_INT2_MASK_OVERLOAD_FET2 2
+#define TPS65090_INT2_MASK_OVERLOAD_FET3 3
+#define TPS65090_INT2_MASK_OVERLOAD_FET4 4
+#define TPS65090_INT2_MASK_OVERLOAD_FET5 5
+#define TPS65090_INT2_MASK_OVERLOAD_FET6 6
+#define TPS65090_INT2_MASK_OVERLOAD_FET7 7
static struct mfd_cell tps65090s[] = {
{
.name = "tps65090-pmic",
},
{
- .name = "tps65090-regulator",
+ .name = "tps65090-charger",
},
};
-int tps65090_write(struct device *dev, int reg, uint8_t val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_write(tps->rmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65090_write);
-
-int tps65090_read(struct device *dev, int reg, uint8_t *val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- unsigned int temp_val;
- int ret;
- ret = regmap_read(tps->rmap, reg, &temp_val);
- if (!ret)
- *val = temp_val;
- return ret;
-}
-EXPORT_SYMBOL_GPL(tps65090_read);
-
-int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_set_bits);
-
-int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_clr_bits);
-
-static void tps65090_irq_lock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65090->irq_lock);
-}
-
-static void tps65090_irq_mask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->hwirq;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_unmask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps65090->irq_base;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_unlock(&tps65090->irq_lock);
-}
-
-static irqreturn_t tps65090_irq(int irq, void *data)
-{
- struct tps65090 *tps65090 = data;
- int ret = 0;
- u8 status, mask;
- unsigned long int acks = 0;
- int i;
-
- for (i = 0; i < NUM_INT_REG; i++) {
- ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read mask reg [addr:%d]\n",
- TPS65090_INT_MSK + i);
- return IRQ_NONE;
- }
- ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
- &status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read status reg [addr:%d]\n",
- TPS65090_INT_STS + i);
- return IRQ_NONE;
- }
- if (status) {
- /* Ack only those interrupts which are not masked */
- status &= (~mask);
- ret = tps65090_write(tps65090->dev,
- TPS65090_INT_STS + i, status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to write interrupt status\n");
- return IRQ_NONE;
- }
- acks |= (status << (i * 8));
- }
- }
-
- for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
- handle_nested_irq(tps65090->irq_base + i);
- return acks ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int tps65090_irq_init(struct tps65090 *tps65090, int irq,
- int irq_base)
-{
- int i, ret;
-
- if (!irq_base) {
- dev_err(tps65090->dev, "IRQ base not set\n");
- return -EINVAL;
- }
-
- mutex_init(&tps65090->irq_lock);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
-
- tps65090->irq_base = irq_base;
- tps65090->irq_chip.name = "tps65090";
- tps65090->irq_chip.irq_mask = tps65090_irq_mask;
- tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
- tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
- tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
- int __irq = i + tps65090->irq_base;
- irq_set_chip_data(__irq, tps65090);
- irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
- }
-
- ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
- "tps65090", tps65090);
- if (!ret) {
- device_init_wakeup(tps65090->dev, 1);
- enable_irq_wake(irq);
- }
+static const struct regmap_irq tps65090_irqs[] = {
+ /* INT1 IRQs*/
+ [TPS65090_IRQ_VAC_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_VSYS_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_BAT_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_COMPLETE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC1] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC2] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2,
+ },
+ /* INT2 IRQs*/
+ [TPS65090_IRQ_OVERLOAD_DCDC3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET1] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET1,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET2] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET2,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET4] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET4,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET5] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET5,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET6] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET6,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET7] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET7,
+ },
+};
- return ret;
-}
+static struct regmap_irq_chip tps65090_irq_chip = {
+ .name = "tps65090",
+ .irqs = tps65090_irqs,
+ .num_irqs = ARRAY_SIZE(tps65090_irqs),
+ .num_regs = NUM_INT_REG,
+ .status_base = TPS65090_INT_STS,
+ .mask_base = TPS65090_INT_MSK,
+ .mask_invert = true,
+};
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
- if (reg == TPS65090_INT_STS)
+ if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS2))
return true;
else
return false;
@@ -263,36 +160,36 @@ static int tps65090_i2c_probe(struct i2c_client *client,
return -EINVAL;
}
- tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
- GFP_KERNEL);
- if (tps65090 == NULL)
+ tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
+ if (!tps65090) {
+ dev_err(&client->dev, "mem alloc for tps65090 failed\n");
return -ENOMEM;
+ }
- tps65090->client = client;
tps65090->dev = &client->dev;
i2c_set_clientdata(client, tps65090);
- mutex_init(&tps65090->lock);
-
- if (client->irq) {
- ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
- if (ret) {
- dev_err(&client->dev, "IRQ init failed with err: %d\n",
- ret);
- goto err_exit;
- }
- }
-
- tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(client, &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
ret = PTR_ERR(tps65090->rmap);
dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
- goto err_irq_exit;
+ return ret;
+ }
+
+ if (client->irq) {
+ ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+ &tps65090_irq_chip, &tps65090->irq_data);
+ if (ret) {
+ dev_err(&client->dev,
+ "IRQ init failed with err: %d\n", ret);
+ return ret;
+ }
}
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
- ARRAY_SIZE(tps65090s), NULL, 0, NULL);
+ ARRAY_SIZE(tps65090s), NULL,
+ regmap_irq_chip_get_base(tps65090->irq_data), NULL);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
@@ -303,8 +200,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
err_irq_exit:
if (client->irq)
- free_irq(client->irq, tps65090);
-err_exit:
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return ret;
}
@@ -314,7 +210,7 @@ static int tps65090_i2c_remove(struct i2c_client *client)
mfd_remove_devices(tps65090->dev);
if (client->irq)
- free_irq(client->irq, tps65090);
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return 0;
}
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index e14e252e3473..b8f48647661e 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -160,6 +160,7 @@ static int tps65217_probe(struct i2c_client *client,
unsigned int version;
unsigned int chip_id = ids->driver_data;
const struct of_device_id *match;
+ bool status_off = false;
int ret;
if (client->dev.of_node) {
@@ -170,6 +171,8 @@ static int tps65217_probe(struct i2c_client *client,
return -EINVAL;
}
chip_id = (unsigned int)match->data;
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
}
if (!chip_id) {
@@ -207,6 +210,15 @@ static int tps65217_probe(struct i2c_client *client,
return ret;
}
+ /* Set the PMIC to shutdown on PWR_EN toggle */
+ if (status_off) {
+ ret = tps65217_set_bits(tps, TPS65217_REG_STATUS,
+ TPS65217_STATUS_OFF, TPS65217_STATUS_OFF,
+ TPS65217_PROTECT_NONE);
+ if (ret)
+ dev_warn(tps->dev, "unable to set the status OFF\n");
+ }
+
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
(version & TPS65217_CHIPID_CHIP_MASK) >> 4,
version & TPS65217_CHIPID_REV_MASK);
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 87ba7ada3bbc..721b9186a5d1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -17,12 +17,14 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/core.h>
@@ -92,6 +94,14 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
[TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
};
+static struct resource tps6586x_rtc_resources[] = {
+ {
+ .start = TPS6586X_INT_RTC_ALM1,
+ .end = TPS6586X_INT_RTC_ALM1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell tps6586x_cell[] = {
{
.name = "tps6586x-gpio",
@@ -101,6 +111,8 @@ static struct mfd_cell tps6586x_cell[] = {
},
{
.name = "tps6586x-rtc",
+ .num_resources = ARRAY_SIZE(tps6586x_rtc_resources),
+ .resources = &tps6586x_rtc_resources[0],
},
{
.name = "tps6586x-onkey",
@@ -117,6 +129,7 @@ struct tps6586x {
int irq_base;
u32 irq_en;
u8 mask_reg[5];
+ struct irq_domain *irq_domain;
};
static inline struct tps6586x *dev_to_tps6586x(struct device *dev)
@@ -185,6 +198,14 @@ int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
}
EXPORT_SYMBOL_GPL(tps6586x_update);
+int tps6586x_irq_get_virq(struct device *dev, int irq)
+{
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return irq_create_mapping(tps6586x->irq_domain, irq);
+}
+EXPORT_SYMBOL_GPL(tps6586x_irq_get_virq);
+
static int __remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
@@ -206,7 +227,7 @@ static void tps6586x_irq_lock(struct irq_data *data)
static void tps6586x_irq_enable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
@@ -217,7 +238,7 @@ static void tps6586x_irq_disable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
@@ -240,6 +261,39 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&tps6586x->irq_lock);
}
+static struct irq_chip tps6586x_irq_chip = {
+ .name = "tps6586x",
+ .irq_bus_lock = tps6586x_irq_lock,
+ .irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
+ .irq_disable = tps6586x_irq_disable,
+ .irq_enable = tps6586x_irq_enable,
+};
+
+static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps6586x *tps6586x = h->host_data;
+
+ irq_set_chip_data(virq, tps6586x);
+ irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps6586x_domain_ops = {
+ .map = tps6586x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static irqreturn_t tps6586x_irq(int irq, void *data)
{
struct tps6586x *tps6586x = data;
@@ -260,7 +314,8 @@ static irqreturn_t tps6586x_irq(int irq, void *data)
int i = __ffs(acks);
if (tps6586x->irq_en & (1 << i))
- handle_nested_irq(tps6586x->irq_base + i);
+ handle_nested_irq(
+ irq_find_mapping(tps6586x->irq_domain, i));
acks &= ~(1 << i);
}
@@ -273,11 +328,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
{
int i, ret;
u8 tmp[4];
-
- if (!irq_base) {
- dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
- return -EINVAL;
- }
+ int new_irq_base;
+ int irq_num = ARRAY_SIZE(tps6586x_irqs);
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
@@ -287,25 +339,24 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
- tps6586x->irq_base = irq_base;
-
- tps6586x->irq_chip.name = "tps6586x";
- tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
- tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
- tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
- tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
- int __irq = i + tps6586x->irq_base;
- irq_set_chip_data(__irq, tps6586x);
- irq_set_chip_and_handler(__irq, &tps6586x->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
+ if (irq_base > 0) {
+ new_irq_base = irq_alloc_descs(irq_base, 0, irq_num, -1);
+ if (new_irq_base < 0) {
+ dev_err(tps6586x->dev,
+ "Failed to alloc IRQs: %d\n", new_irq_base);
+ return new_irq_base;
+ }
+ } else {
+ new_irq_base = 0;
}
+ tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node,
+ irq_num, new_irq_base, &tps6586x_domain_ops,
+ tps6586x);
+ if (!tps6586x->irq_domain) {
+ dev_err(tps6586x->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
"tps6586x", tps6586x);
@@ -461,7 +512,7 @@ static int tps6586x_i2c_probe(struct i2c_client *client,
ret = mfd_add_devices(tps6586x->dev, -1,
tps6586x_cell, ARRAY_SIZE(tps6586x_cell),
- NULL, 0, NULL);
+ NULL, 0, tps6586x->irq_domain);
if (ret < 0) {
dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
goto err_mfd_add;
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
deleted file mode 100644
index 09aab3e4776d..000000000000
--- a/drivers/mfd/tps65910-irq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * tps65910-irq.c -- TI TPS6591x
- *
- * Copyright 2010 Texas Instruments Inc.
- *
- * Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65910.h>
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads. We're also assuming that
- * it's rare to get lots of interrupts firing simultaneously so try to
- * minimise I/O.
- */
-static irqreturn_t tps65910_irq(int irq, void *irq_data)
-{
- struct tps65910 *tps65910 = irq_data;
- unsigned int reg;
- u32 irq_sts;
- u32 irq_mask;
- int i;
-
- tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
- irq_sts = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
- irq_sts |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
- irq_sts |= reg << 16;
- }
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- irq_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- irq_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- irq_mask |= reg << 16;
- }
-
- irq_sts &= ~irq_mask;
-
- if (!irq_sts)
- return IRQ_NONE;
-
- for (i = 0; i < tps65910->irq_num; i++) {
-
- if (!(irq_sts & (1 << i)))
- continue;
-
- handle_nested_irq(irq_find_mapping(tps65910->domain, i));
- }
-
- /* Write the STS register back to clear IRQs we handled */
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
- reg = irq_sts & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = irq_sts >> 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
- }
-
- return IRQ_HANDLED;
-}
-
-static void tps65910_irq_lock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- u32 reg_mask;
- unsigned int reg;
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- reg_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- reg_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- reg_mask |= reg << 16;
- }
-
- if (tps65910->irq_mask != reg_mask) {
- reg = tps65910->irq_mask & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
- reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = tps65910->irq_mask >> 16;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
- }
- }
- mutex_unlock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_enable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask &= ~(1 << data->hwirq);
-}
-
-static void tps65910_irq_disable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask |= (1 << data->hwirq);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tps65910_irq_set_wake(struct irq_data *data, unsigned int enable)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- return irq_set_irq_wake(tps65910->chip_irq, enable);
-}
-#else
-#define tps65910_irq_set_wake NULL
-#endif
-
-static struct irq_chip tps65910_irq_chip = {
- .name = "tps65910",
- .irq_bus_lock = tps65910_irq_lock,
- .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
- .irq_disable = tps65910_irq_disable,
- .irq_enable = tps65910_irq_enable,
- .irq_set_wake = tps65910_irq_set_wake,
-};
-
-static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct tps65910 *tps65910 = h->host_data;
-
- irq_set_chip_data(virq, tps65910);
- irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
- irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
- irq_set_noprobe(virq);
-#endif
-
- return 0;
-}
-
-static struct irq_domain_ops tps65910_domain_ops = {
- .map = tps65910_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
- struct tps65910_platform_data *pdata)
-{
- int ret;
- int flags = IRQF_ONESHOT;
-
- if (!irq) {
- dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
- return -EINVAL;
- }
-
- if (!pdata) {
- dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
- return -EINVAL;
- }
-
- switch (tps65910_chip_id(tps65910)) {
- case TPS65910:
- tps65910->irq_num = TPS65910_NUM_IRQ;
- break;
- case TPS65911:
- tps65910->irq_num = TPS65911_NUM_IRQ;
- break;
- }
-
- if (pdata->irq_base > 0) {
- pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
- tps65910->irq_num, -1);
- if (pdata->irq_base < 0) {
- dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
- pdata->irq_base);
- return pdata->irq_base;
- }
- }
-
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
- if (pdata->irq_base > 0)
- tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
- tps65910->irq_num,
- pdata->irq_base,
- 0,
- &tps65910_domain_ops, tps65910);
- else
- tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
- tps65910->irq_num,
- &tps65910_domain_ops, tps65910);
-
- if (!tps65910->domain) {
- dev_err(tps65910->dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
- "tps65910", tps65910);
-
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
-
- if (ret != 0)
- dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
-
- return ret;
-}
-
-int tps65910_irq_exit(struct tps65910 *tps65910)
-{
- if (tps65910->chip_irq)
- free_irq(tps65910->chip_irq, tps65910);
- return 0;
-}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index ce054654f5bb..d79277204835 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,6 +19,9 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
@@ -50,6 +53,219 @@ static struct mfd_cell tps65910s[] = {
};
+static const struct regmap_irq tps65911_irqs[] = {
+ /* INT_STS */
+ [TPS65911_IRQ_PWRHOLD_F] = {
+ .mask = INT_MSK_PWRHOLD_F_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_VBAT_VMHI] = {
+ .mask = INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON] = {
+ .mask = INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON_LP] = {
+ .mask = INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRHOLD_R] = {
+ .mask = INT_MSK_PWRHOLD_R_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_HOTDIE] = {
+ .mask = INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_ALARM] = {
+ .mask = INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_PERIOD] = {
+ .mask = INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO0_R] = {
+ .mask = INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO0_F] = {
+ .mask = INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_R] = {
+ .mask = INT_MSK2_GPIO1_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_F] = {
+ .mask = INT_MSK2_GPIO1_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_R] = {
+ .mask = INT_MSK2_GPIO2_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_F] = {
+ .mask = INT_MSK2_GPIO2_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_R] = {
+ .mask = INT_MSK2_GPIO3_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_F] = {
+ .mask = INT_MSK2_GPIO3_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO4_R] = {
+ .mask = INT_MSK3_GPIO4_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO4_F] = {
+ .mask = INT_MSK3_GPIO4_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_R] = {
+ .mask = INT_MSK3_GPIO5_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_F] = {
+ .mask = INT_MSK3_GPIO5_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_WTCHDG] = {
+ .mask = INT_MSK3_WTCHDG_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_H] = {
+ .mask = INT_MSK3_VMBCH2_H_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_L] = {
+ .mask = INT_MSK3_VMBCH2_L_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_PWRDN] = {
+ .mask = INT_MSK3_PWRDN_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+};
+
+static const struct regmap_irq tps65910_irqs[] = {
+ /* INT_STS */
+ [TPS65910_IRQ_VBAT_VMBDCH] = {
+ .mask = TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_VBAT_VMHI] = {
+ .mask = TPS65910_INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON] = {
+ .mask = TPS65910_INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON_LP] = {
+ .mask = TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRHOLD] = {
+ .mask = TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_HOTDIE] = {
+ .mask = TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_ALARM] = {
+ .mask = TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_PERIOD] = {
+ .mask = TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65910_IRQ_GPIO_R] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65910_IRQ_GPIO_F] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+};
+
+static struct regmap_irq_chip tps65911_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65911_irqs,
+ .num_irqs = ARRAY_SIZE(tps65911_irqs),
+ .num_regs = 3,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static struct regmap_irq_chip tps65910_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65910_irqs,
+ .num_irqs = ARRAY_SIZE(tps65910_irqs),
+ .num_regs = 2,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata)
+{
+ int ret = 0;
+ static struct regmap_irq_chip *tps6591x_irqs_chip;
+
+ if (!irq) {
+ dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
+ }
+
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
+ return -EINVAL;
+ }
+
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps6591x_irqs_chip = &tps65910_irq_chip;
+ break;
+ case TPS65911:
+ tps6591x_irqs_chip = &tps65911_irq_chip;
+ break;
+ }
+
+ tps65910->chip_irq = irq;
+ ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
+ IRQF_ONESHOT, pdata->irq_base,
+ tps6591x_irqs_chip, &tps65910->irq_data);
+ if (ret < 0)
+ dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
+ return ret;
+}
+
+static int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+ if (tps65910->chip_irq > 0)
+ regmap_del_irq_chip(tps65910->chip_irq, tps65910->irq_data);
+ return 0;
+}
+
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -270,7 +486,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
tps65910->id = chip_id;
- mutex_init(&tps65910->io_mutex);
tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
@@ -279,14 +494,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(tps65910->dev, -1,
- tps65910s, ARRAY_SIZE(tps65910s),
- NULL, 0, NULL);
- if (ret < 0) {
- dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
- return ret;
- }
-
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
@@ -299,6 +506,15 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
pm_power_off = tps65910_power_off;
}
+ ret = mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0,
+ regmap_irq_get_domain(tps65910->irq_data));
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
+
return ret;
}
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
new file mode 100644
index 000000000000..c90a2c450f51
--- /dev/null
+++ b/drivers/mfd/tps80031.c
@@ -0,0 +1,573 @@
+/*
+ * tps80031.c -- TI TPS80031/TPS80032 mfd core driver.
+ *
+ * MFD core driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static struct resource tps80031_rtc_resources[] = {
+ {
+ .start = TPS80031_INT_RTC_ALARM,
+ .end = TPS80031_INT_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* TPS80031 sub mfd devices */
+static struct mfd_cell tps80031_cell[] = {
+ {
+ .name = "tps80031-pmic",
+ },
+ {
+ .name = "tps80031-clock",
+ },
+ {
+ .name = "tps80031-rtc",
+ .num_resources = ARRAY_SIZE(tps80031_rtc_resources),
+ .resources = tps80031_rtc_resources,
+ },
+ {
+ .name = "tps80031-gpadc",
+ },
+ {
+ .name = "tps80031-fuel-gauge",
+ },
+ {
+ .name = "tps80031-charger",
+ },
+};
+
+static int tps80031_slave_address[TPS80031_NUM_SLAVES] = {
+ TPS80031_I2C_ID0_ADDR,
+ TPS80031_I2C_ID1_ADDR,
+ TPS80031_I2C_ID2_ADDR,
+ TPS80031_I2C_ID3_ADDR,
+};
+
+struct tps80031_pupd_data {
+ u8 reg;
+ u8 pullup_bit;
+ u8 pulldown_bit;
+};
+
+#define TPS80031_IRQ(_reg, _mask) \
+ { \
+ .reg_offset = (TPS80031_INT_MSK_LINE_##_reg) - \
+ TPS80031_INT_MSK_LINE_A, \
+ .mask = BIT(_mask), \
+ }
+
+static const struct regmap_irq tps80031_main_irqs[] = {
+ [TPS80031_INT_PWRON] = TPS80031_IRQ(A, 0),
+ [TPS80031_INT_RPWRON] = TPS80031_IRQ(A, 1),
+ [TPS80031_INT_SYS_VLOW] = TPS80031_IRQ(A, 2),
+ [TPS80031_INT_RTC_ALARM] = TPS80031_IRQ(A, 3),
+ [TPS80031_INT_RTC_PERIOD] = TPS80031_IRQ(A, 4),
+ [TPS80031_INT_HOT_DIE] = TPS80031_IRQ(A, 5),
+ [TPS80031_INT_VXX_SHORT] = TPS80031_IRQ(A, 6),
+ [TPS80031_INT_SPDURATION] = TPS80031_IRQ(A, 7),
+ [TPS80031_INT_WATCHDOG] = TPS80031_IRQ(B, 0),
+ [TPS80031_INT_BAT] = TPS80031_IRQ(B, 1),
+ [TPS80031_INT_SIM] = TPS80031_IRQ(B, 2),
+ [TPS80031_INT_MMC] = TPS80031_IRQ(B, 3),
+ [TPS80031_INT_RES] = TPS80031_IRQ(B, 4),
+ [TPS80031_INT_GPADC_RT] = TPS80031_IRQ(B, 5),
+ [TPS80031_INT_GPADC_SW2_EOC] = TPS80031_IRQ(B, 6),
+ [TPS80031_INT_CC_AUTOCAL] = TPS80031_IRQ(B, 7),
+ [TPS80031_INT_ID_WKUP] = TPS80031_IRQ(C, 0),
+ [TPS80031_INT_VBUSS_WKUP] = TPS80031_IRQ(C, 1),
+ [TPS80031_INT_ID] = TPS80031_IRQ(C, 2),
+ [TPS80031_INT_VBUS] = TPS80031_IRQ(C, 3),
+ [TPS80031_INT_CHRG_CTRL] = TPS80031_IRQ(C, 4),
+ [TPS80031_INT_EXT_CHRG] = TPS80031_IRQ(C, 5),
+ [TPS80031_INT_INT_CHRG] = TPS80031_IRQ(C, 6),
+ [TPS80031_INT_RES2] = TPS80031_IRQ(C, 7),
+};
+
+static struct regmap_irq_chip tps80031_irq_chip = {
+ .name = "tps80031",
+ .irqs = tps80031_main_irqs,
+ .num_irqs = ARRAY_SIZE(tps80031_main_irqs),
+ .num_regs = 3,
+ .status_base = TPS80031_INT_STS_A,
+ .mask_base = TPS80031_INT_MSK_LINE_A,
+};
+
+#define PUPD_DATA(_reg, _pulldown_bit, _pullup_bit) \
+ { \
+ .reg = TPS80031_CFG_INPUT_PUPD##_reg, \
+ .pulldown_bit = _pulldown_bit, \
+ .pullup_bit = _pullup_bit, \
+ }
+
+static const struct tps80031_pupd_data tps80031_pupds[] = {
+ [TPS80031_PREQ1] = PUPD_DATA(1, BIT(0), BIT(1)),
+ [TPS80031_PREQ2A] = PUPD_DATA(1, BIT(2), BIT(3)),
+ [TPS80031_PREQ2B] = PUPD_DATA(1, BIT(4), BIT(5)),
+ [TPS80031_PREQ2C] = PUPD_DATA(1, BIT(6), BIT(7)),
+ [TPS80031_PREQ3] = PUPD_DATA(2, BIT(0), BIT(1)),
+ [TPS80031_NRES_WARM] = PUPD_DATA(2, 0, BIT(2)),
+ [TPS80031_PWM_FORCE] = PUPD_DATA(2, BIT(5), 0),
+ [TPS80031_CHRG_EXT_CHRG_STATZ] = PUPD_DATA(2, 0, BIT(6)),
+ [TPS80031_SIM] = PUPD_DATA(3, BIT(0), BIT(1)),
+ [TPS80031_MMC] = PUPD_DATA(3, BIT(2), BIT(3)),
+ [TPS80031_GPADC_START] = PUPD_DATA(3, BIT(4), 0),
+ [TPS80031_DVSI2C_SCL] = PUPD_DATA(4, 0, BIT(0)),
+ [TPS80031_DVSI2C_SDA] = PUPD_DATA(4, 0, BIT(1)),
+ [TPS80031_CTLI2C_SCL] = PUPD_DATA(4, 0, BIT(2)),
+ [TPS80031_CTLI2C_SDA] = PUPD_DATA(4, 0, BIT(3)),
+};
+static struct tps80031 *tps80031_power_off_dev;
+
+int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add)
+{
+ u8 res_ass_reg = 0;
+ int preq_mask_bit = 0;
+ int ret;
+
+ if (!(ext_ctrl_flag & TPS80031_EXT_PWR_REQ))
+ return 0;
+
+ if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ1) {
+ res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 5;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ2) {
+ res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 6;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ3) {
+ res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 7;
+ }
+
+ /* Configure REQ_ASS registers */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, res_ass_reg,
+ BIT(preq_bit & 0x7));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x setbit failed, err = %d\n",
+ res_ass_reg, ret);
+ return ret;
+ }
+
+ /* Unmask the PREQ */
+ ret = tps80031_clr_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x clrbit failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+
+ /* Switch regulator control to resource now */
+ if (ext_ctrl_flag & (TPS80031_PWR_REQ_INPUT_PREQ2 |
+ TPS80031_PWR_REQ_INPUT_PREQ3)) {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, state_reg_add,
+ 0x0, TPS80031_STATE_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ state_reg_add, ret);
+ } else {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, trans_reg_add,
+ TPS80031_TRANS_SLEEP_OFF,
+ TPS80031_TRANS_SLEEP_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ trans_reg_add, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config);
+
+static void tps80031_power_off(void)
+{
+ dev_info(tps80031_power_off_dev->dev, "switching off PMU\n");
+ tps80031_write(tps80031_power_off_dev->dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_DEV_ON, TPS80031_DEVOFF);
+}
+
+static void tps80031_pupd_init(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct tps80031_pupd_init_data *pupd_init_data = pdata->pupd_init_data;
+ int data_size = pdata->pupd_init_data_size;
+ int i;
+
+ for (i = 0; i < data_size; ++i) {
+ struct tps80031_pupd_init_data *pupd_init = &pupd_init_data[i];
+ const struct tps80031_pupd_data *pupd =
+ &tps80031_pupds[pupd_init->input_pin];
+ u8 update_value = 0;
+ u8 update_mask = pupd->pulldown_bit | pupd->pullup_bit;
+
+ if (pupd_init->setting == TPS80031_PUPD_PULLDOWN)
+ update_value = pupd->pulldown_bit;
+ else if (pupd_init->setting == TPS80031_PUPD_PULLUP)
+ update_value = pupd->pullup_bit;
+
+ tps80031_update(tps80031->dev, TPS80031_SLAVE_ID1, pupd->reg,
+ update_value, update_mask);
+ }
+}
+
+static int tps80031_init_ext_control(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct device *dev = tps80031->dev;
+ int ret;
+ int i;
+
+ /* Clear all external control for this rail */
+ for (i = 0; i < 9; ++i) {
+ ret = tps80031_write(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PREQ1_RES_ASS_A + i, 0);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x write failed, err = %d\n",
+ TPS80031_PREQ1_RES_ASS_A + i, ret);
+ return ret;
+ }
+ }
+
+ /* Mask the PREQ */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x set_bits failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tps80031_irq_init(struct tps80031 *tps80031, int irq, int irq_base)
+{
+ struct device *dev = tps80031->dev;
+ int i, ret;
+
+ /*
+ * The MASK register used for updating status register when
+ * interrupt occurs and LINE register used to pass the status
+ * to actual interrupt line. As per datasheet:
+ * When INT_MSK_LINE [i] is set to 1, the associated interrupt
+ * number i is INT line masked, which means that no interrupt is
+ * generated on the INT line.
+ * When INT_MSK_LINE [i] is set to 0, the associated interrupt
+ * number i is line enabled: An interrupt is generated on the
+ * INT line.
+ * In any case, the INT_STS [i] status bit may or may not be updated,
+ * only linked to the INT_MSK_STS [i] configuration register bit.
+ *
+ * When INT_MSK_STS [i] is set to 1, the associated interrupt number
+ * i is status masked, which means that no interrupt is stored in
+ * the INT_STS[i] status bit. Note that no interrupt number i is
+ * generated on the INT line, even if the INT_MSK_LINE [i] register
+ * bit is set to 0.
+ * When INT_MSK_STS [i] is set to 0, the associated interrupt number i
+ * is status enabled: An interrupt status is updated in the INT_STS [i]
+ * register. The interrupt may or may not be generated on the INT line,
+ * depending on the INT_MSK_LINE [i] configuration register bit.
+ */
+ for (i = 0; i < 3; i++)
+ tps80031_write(dev, TPS80031_SLAVE_ID2,
+ TPS80031_INT_MSK_STS_A + i, 0x00);
+
+ ret = regmap_add_irq_chip(tps80031->regmap[TPS80031_SLAVE_ID2], irq,
+ IRQF_ONESHOT, irq_base,
+ &tps80031_irq_chip, &tps80031->irq_data);
+ if (ret < 0) {
+ dev_err(dev, "add irq failed, err = %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static bool rd_wr_reg_id0(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS1_CFG_FORCE ... TPS80031_SMPS2_CFG_VOLTAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SECONDS_REG ... TPS80031_RTC_RESET_STATUS_REG:
+ case TPS80031_VALIDITY0 ... TPS80031_VALIDITY7:
+ case TPS80031_PHOENIX_START_CONDITION ... TPS80031_KEY_PRESS_DUR_CFG:
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_BROADCAST_ADDR_ALL ... TPS80031_BROADCAST_ADDR_CLK_RST:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ case TPS80031_BACKUP_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_volatile_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id2(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_USB_VENDOR_ID_LSB ... TPS80031_USB_OTG_REVISION:
+ case TPS80031_GPADC_CTRL ... TPS80031_CTRL_P1:
+ case TPS80031_RTCH0_LSB ... TPS80031_GPCH0_MSB:
+ case TPS80031_TOGGLE1 ... TPS80031_VIBMODE:
+ case TPS80031_PWM1ON ... TPS80031_PWM2OFF:
+ case TPS80031_FG_REG_00 ... TPS80031_FG_REG_11:
+ case TPS80031_INT_STS_A ... TPS80031_INT_MSK_STS_C:
+ case TPS80031_CONTROLLER_CTRL2 ... TPS80031_LED_PWM_CTRL2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id3(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_GPADC_TRIM0 ... TPS80031_GPADC_TRIM18:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps80031_regmap_configs[] = {
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id0,
+ .readable_reg = rd_wr_reg_id0,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id1,
+ .readable_reg = rd_wr_reg_id1,
+ .volatile_reg = is_volatile_reg_id1,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id2,
+ .readable_reg = rd_wr_reg_id2,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id3,
+ .readable_reg = rd_wr_reg_id3,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+};
+
+static int tps80031_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps80031_platform_data *pdata = client->dev.platform_data;
+ struct tps80031 *tps80031;
+ int ret;
+ uint8_t es_version;
+ uint8_t ep_ver;
+ int i;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps80031 requires platform data\n");
+ return -EINVAL;
+ }
+
+ tps80031 = devm_kzalloc(&client->dev, sizeof(*tps80031), GFP_KERNEL);
+ if (!tps80031) {
+ dev_err(&client->dev, "Malloc failed for tps80031\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031_slave_address[i] == client->addr)
+ tps80031->clients[i] = client;
+ else
+ tps80031->clients[i] = i2c_new_dummy(client->adapter,
+ tps80031_slave_address[i]);
+ if (!tps80031->clients[i]) {
+ dev_err(&client->dev, "can't attach client %d\n", i);
+ ret = -ENOMEM;
+ goto fail_client_reg;
+ }
+
+ i2c_set_clientdata(tps80031->clients[i], tps80031);
+ tps80031->regmap[i] = devm_regmap_init_i2c(tps80031->clients[i],
+ &tps80031_regmap_configs[i]);
+ if (IS_ERR(tps80031->regmap[i])) {
+ ret = PTR_ERR(tps80031->regmap[i]);
+ dev_err(&client->dev,
+ "regmap %d init failed, err %d\n", i, ret);
+ goto fail_client_reg;
+ }
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_JTAGVERNUM, &es_version);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon version number read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_EPROM_REV, &ep_ver);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon eeprom version read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n",
+ es_version, ep_ver);
+ tps80031->es_version = es_version;
+ tps80031->dev = &client->dev;
+ i2c_set_clientdata(client, tps80031);
+ tps80031->chip_info = id->driver_data;
+
+ ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ tps80031_pupd_init(tps80031, pdata);
+
+ tps80031_init_ext_control(tps80031, pdata);
+
+ ret = mfd_add_devices(tps80031->dev, -1,
+ tps80031_cell, ARRAY_SIZE(tps80031_cell),
+ NULL, 0,
+ regmap_irq_get_domain(tps80031->irq_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
+ goto fail_mfd_add;
+ }
+
+ if (pdata->use_power_off && !pm_power_off) {
+ tps80031_power_off_dev = tps80031;
+ pm_power_off = tps80031_power_off;
+ }
+ return 0;
+
+fail_mfd_add:
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+fail_client_reg:
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] && (tps80031->clients[i] != client))
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return ret;
+}
+
+static int tps80031_remove(struct i2c_client *client)
+{
+ struct tps80031 *tps80031 = i2c_get_clientdata(client);
+ int i;
+
+ if (tps80031_power_off_dev == tps80031) {
+ tps80031_power_off_dev = NULL;
+ pm_power_off = NULL;
+ }
+
+ mfd_remove_devices(tps80031->dev);
+
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] != client)
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id tps80031_id_table[] = {
+ { "tps80031", TPS80031 },
+ { "tps80032", TPS80032 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
+
+static struct i2c_driver tps80031_driver = {
+ .driver = {
+ .name = "tps80031",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_probe,
+ .remove = tps80031_remove,
+ .id_table = tps80031_id_table,
+};
+
+static int __init tps80031_init(void)
+{
+ return i2c_add_driver(&tps80031_driver);
+}
+subsys_initcall(tps80031_init);
+
+static void __exit tps80031_exit(void)
+{
+ i2c_del_driver(&tps80031_driver);
+}
+module_exit(tps80031_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("TPS80031 core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 11b76c0109f5..4f3baadd0038 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/device.h>
@@ -65,9 +66,6 @@
/* Triton Core internal information (BEGIN) */
-/* Last - for index max*/
-#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
-
#define TWL_NUM_SLAVES 4
#define SUB_CHIP_ID0 0
@@ -171,13 +169,7 @@ EXPORT_SYMBOL(twl_rev);
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
- u8 address;
-
- /* max numb of i2c_msg required is for read =2 */
- struct i2c_msg xfer_msg[2];
-
- /* To lock access to xfer_msg */
- struct mutex xfer_lock;
+ struct regmap *regmap;
};
static struct twl_client twl_modules[TWL_NUM_SLAVES];
@@ -189,7 +181,7 @@ struct twl_mapping {
};
static struct twl_mapping *twl_map;
-static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[] = {
/*
* NOTE: don't change this table without updating the
* <linux/i2c/twl.h> defines for TWL4030_MODULE_*
@@ -197,34 +189,62 @@ static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
*/
{ 0, TWL4030_BASEADD_USB },
-
{ 1, TWL4030_BASEADD_AUDIO_VOICE },
{ 1, TWL4030_BASEADD_GPIO },
{ 1, TWL4030_BASEADD_INTBR },
{ 1, TWL4030_BASEADD_PIH },
- { 1, TWL4030_BASEADD_TEST },
+ { 1, TWL4030_BASEADD_TEST },
{ 2, TWL4030_BASEADD_KEYPAD },
{ 2, TWL4030_BASEADD_MADC },
{ 2, TWL4030_BASEADD_INTERRUPTS },
{ 2, TWL4030_BASEADD_LED },
+
{ 2, TWL4030_BASEADD_MAIN_CHARGE },
{ 2, TWL4030_BASEADD_PRECHARGE },
{ 2, TWL4030_BASEADD_PWM0 },
{ 2, TWL4030_BASEADD_PWM1 },
{ 2, TWL4030_BASEADD_PWMA },
+
{ 2, TWL4030_BASEADD_PWMB },
{ 2, TWL5031_BASEADD_ACCESSORY },
{ 2, TWL5031_BASEADD_INTERRUPTS },
-
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
+
{ 3, TWL4030_BASEADD_PM_MASTER },
{ 3, TWL4030_BASEADD_PM_RECEIVER },
{ 3, TWL4030_BASEADD_RTC },
{ 3, TWL4030_BASEADD_SECURED_REG },
};
+static struct regmap_config twl4030_regmap_config[4] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4b */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
static struct twl_mapping twl6030_map[] = {
/*
* NOTE: don't change this table without updating the
@@ -254,14 +274,35 @@ static struct twl_mapping twl6030_map[] = {
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
-
{ SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
{ SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
{ SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
};
+static struct regmap_config twl6030_regmap_config[3] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
/*----------------------------------------------------------------------*/
/* Exported Functions */
@@ -283,9 +324,8 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int ret;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -301,32 +341,14 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /*
- * [MSG1]: fill the register address data
- * fill the data Tx buffer
- */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = num_bytes + 1;
- msg->flags = 0;
- msg->buf = value;
- /* over write the first byte of buffer with the register address */
- *value = twl_map[mod_no].base + reg;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 1) {
- pr_err("%s: i2c_write failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_write);
@@ -342,12 +364,10 @@ EXPORT_SYMBOL(twl_i2c_write);
int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
- u8 val;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -363,34 +383,14 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /* [MSG1] fill the register address data */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = 1;
- msg->flags = 0; /* Read the register value */
- val = twl_map[mod_no].base + reg;
- msg->buf = &val;
- /* [MSG2] fill the data rx buffer */
- msg = &twl->xfer_msg[1];
- msg->addr = twl->address;
- msg->flags = I2C_M_RD; /* Read the register value */
- msg->len = num_bytes; /* only n bytes */
- msg->buf = value;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 2) {
- pr_err("%s: i2c_read failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_read);
@@ -404,12 +404,7 @@ EXPORT_SYMBOL(twl_i2c_read);
*/
int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
{
-
- /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
- u8 temp_buffer[2] = { 0 };
- /* offset 1 contains the data */
- temp_buffer[1] = value;
- return twl_i2c_write(mod_no, temp_buffer, reg, 1);
+ return twl_i2c_write(mod_no, &value, reg, 1);
}
EXPORT_SYMBOL(twl_i2c_write_u8);
@@ -646,8 +641,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc) {
- child = add_child(2, "twl4030_madc",
+ if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
+ twl_class_is_4030()) {
+ child = add_child(SUB_CHIP_ID2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -663,15 +659,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* HW security concerns, and "least privilege".
*/
sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
- child = add_child(sub_chip_id, "twl_rtc",
- NULL, 0,
+ child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
true, irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_PWM_TWL6030) && twl_class_is_6030()) {
- child = add_child(SUB_CHIP_ID1, "twl6030-pwm", NULL, 0,
+ if (IS_ENABLED(CONFIG_PWM_TWL)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+ false, 0, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
+ if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -723,9 +725,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
- child = add_child(0, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq0 = USB_PRES, irq1 = USB */
irq_base + USB_PRES_INTR_OFFSET,
irq_base + USB_INTR_OFFSET);
@@ -773,9 +774,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
pdata->usb->features = features;
- child = add_child(0, "twl6030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq1 = VBUS_PRES, irq0 = USB ID */
irq_base + USBOTG_INTR_OFFSET,
irq_base + USB_PRES_INTR_OFFSET);
@@ -799,22 +799,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
+ false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(1, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
+ true, irq_base + 8 + 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
twl_class_is_4030()) {
- sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl4030-audio",
+ child = add_child(SUB_CHIP_ID1, "twl4030-audio",
pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
@@ -1054,7 +1054,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(3, "twl4030_bci",
+ child = add_child(SUB_CHIP_ID3, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci), false,
/* irq0 = CHG_PRES, irq1 = BCI */
irq_base + BCI_PRES_INTR_OFFSET,
@@ -1077,8 +1077,8 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1086,12 +1086,10 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1176,6 +1174,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct twl4030_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
+ struct regmap_config *twl_regmap_config;
int irq_base = 0;
int status;
unsigned i, num_slaves;
@@ -1229,22 +1228,23 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
if ((id->driver_data) & TWL6030_CLASS) {
twl_id = TWL6030_CLASS_ID;
twl_map = &twl6030_map[0];
+ twl_regmap_config = twl6030_regmap_config;
num_slaves = TWL_NUM_SLAVES - 1;
} else {
twl_id = TWL4030_CLASS_ID;
twl_map = &twl4030_map[0];
+ twl_regmap_config = twl4030_regmap_config;
num_slaves = TWL_NUM_SLAVES;
}
for (i = 0; i < num_slaves; i++) {
struct twl_client *twl = &twl_modules[i];
- twl->address = client->addr + i;
if (i == 0) {
twl->client = client;
} else {
twl->client = i2c_new_dummy(client->adapter,
- twl->address);
+ client->addr + i);
if (!twl->client) {
dev_err(&client->dev,
"can't attach client %d\n", i);
@@ -1252,7 +1252,16 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto fail;
}
}
- mutex_init(&twl->xfer_lock);
+
+ twl->regmap = devm_regmap_init_i2c(twl->client,
+ &twl_regmap_config[i]);
+ if (IS_ERR(twl->regmap)) {
+ status = PTR_ERR(twl->regmap);
+ dev_err(&client->dev,
+ "Failed to allocate regmap %d, err: %d\n", i,
+ status);
+ goto fail;
+ }
}
inuse = true;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index cdd1173ed4e9..a5f9888aa19c 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -295,8 +295,8 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid)
irqreturn_t ret;
u8 pih_isr;
- ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
- REG_PIH_ISR_P1);
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
return IRQ_NONE;
@@ -501,7 +501,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
} imr;
/* byte[0] gets overwritten as we write ... */
- imr.word = cpu_to_le32(agent->imr << 8);
+ imr.word = cpu_to_le32(agent->imr);
agent->imr_change_pending = false;
/* write the whole mask ... simpler than subsetting it */
@@ -526,7 +526,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
* any processor on the other IRQ line, EDR registers are
* shared.
*/
- status = twl_i2c_read(sih->module, bytes + 1,
+ status = twl_i2c_read(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -538,7 +538,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
while (edge_change) {
int i = fls(edge_change) - 1;
struct irq_data *idata;
- int byte = 1 + (i >> 2);
+ int byte = i >> 2;
int off = (i & 0x3) * 2;
unsigned int type;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index a39dcf3e2133..88ff9dc83305 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -173,7 +173,7 @@ static int twl4030battery_temperature(int raw_volt)
volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
/* Getting and calculating the supply current in micro ampers */
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
REG_BCICTL2);
if (ret < 0)
return ret;
@@ -196,7 +196,7 @@ static int twl4030battery_current(int raw_volt)
int ret;
u8 val;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
TWL4030_BCI_BCICTL1);
if (ret)
return ret;
@@ -635,7 +635,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
int ret;
u8 regval;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
@@ -646,7 +646,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
else
regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
@@ -668,7 +668,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
u8 regval;
int ret;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_MADC_CTRL1);
if (ret) {
dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
@@ -725,7 +725,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_current_generator;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
@@ -733,7 +733,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
goto err_i2c;
}
regval |= TWL4030_BCI_MESBAT;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index a5332063183a..dd362c1078e1 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -128,12 +128,10 @@ static int twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_MEMORY_ADDRESS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_MEMORY_ADDRESS);
if (err)
goto out;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
- R_MEMORY_DATA);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, byte, R_MEMORY_DATA);
out:
return err;
}
@@ -161,7 +159,7 @@ out:
static int twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
- int err;
+ int err = -EINVAL;
for (; len; len--, address++, script++) {
if (len == 1) {
@@ -189,19 +187,16 @@ static int twl4030_config_wakeup3_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 wakeup sequence for P3 config error\n");
@@ -214,43 +209,38 @@ static int twl4030_config_wakeup12_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
}
@@ -267,8 +257,7 @@ static int twl4030_config_sleep_sequence(u8 address)
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_A2S);
if (err)
pr_err("TWL4030 sleep sequence config error\n");
@@ -282,42 +271,35 @@ static int twl4030_config_warmreset_sequence(u8 address)
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 warmreset seq config error\n");
@@ -341,7 +323,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &grp,
rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
@@ -352,7 +334,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
grp &= ~DEV_GRP_MASK;
grp |= rconfig->devgroup << DEV_GRP_SHIFT;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
@@ -361,7 +343,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set resource types */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
@@ -379,7 +361,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
type |= rconfig->type2 << TYPE2_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
@@ -387,7 +369,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set remap states */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d remap could not be read\n",
@@ -405,7 +387,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
remap |= rconfig->remap_sleep << SLEEP_STATE_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
@@ -463,49 +445,47 @@ int twl4030_remove_script(u8 flags)
{
int err = 0;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
if (flags & TWL4030_WRST_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_WARM);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP12_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A12);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP3_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A3);
if (err)
return err;
}
if (flags & TWL4030_SLEEP_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_A2S);
if (err)
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
@@ -521,7 +501,7 @@ void twl4030_power_off(void)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
pr_err("TWL4030 Unable to power off\n");
@@ -534,15 +514,13 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
struct twl4030_resconfig *resconfig;
u8 val, address = twl4030_start_script_address;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
@@ -567,14 +545,14 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
/* Board has to be wired properly to use this feature */
if (twl4030_scripts->use_poweroff && !pm_power_off) {
/* Default for SEQ_OFFSYNC is set, lets ensure this */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_warning("TWL4030 Unable to read registers\n");
} else if (!(val & SEQ_OFFSYNC)) {
val |= SEQ_OFFSYNC;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
@@ -586,8 +564,8 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
relock:
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index b76902f1e44a..277a8dba42d5 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -355,7 +355,7 @@ int twl6030_init_irq(struct device *dev, int irq_num)
static struct irq_chip twl6030_irq_chip;
int status = 0;
int i;
- u8 mask[4];
+ u8 mask[3];
nr_irqs = TWL6030_NR_IRQS;
@@ -370,9 +370,9 @@ int twl6030_init_irq(struct device *dev, int irq_num)
irq_end = irq_base + nr_irqs;
+ mask[0] = 0xFF;
mask[1] = 0xFF;
mask[2] = 0xFF;
- mask[3] = 0xFF;
/* mask all int lines */
twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
deleted file mode 100644
index 4b42543da228..000000000000
--- a/drivers/mfd/twl6040-irq.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Interrupt controller support for TWL6040
- *
- * Author: Misael Lopez Cruz <misael.lopez@ti.com>
- *
- * Copyright: (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl6040.h>
-
-struct twl6040_irq_data {
- int mask;
- int status;
-};
-
-static struct twl6040_irq_data twl6040_irqs[] = {
- {
- .mask = TWL6040_THMSK,
- .status = TWL6040_THINT,
- },
- {
- .mask = TWL6040_PLUGMSK,
- .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
- },
- {
- .mask = TWL6040_HOOKMSK,
- .status = TWL6040_HOOKINT,
- },
- {
- .mask = TWL6040_HFMSK,
- .status = TWL6040_HFINT,
- },
- {
- .mask = TWL6040_VIBMSK,
- .status = TWL6040_VIBINT,
- },
- {
- .mask = TWL6040_READYMSK,
- .status = TWL6040_READYINT,
- },
-};
-
-static inline
-struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
- int irq)
-{
- return &twl6040_irqs[irq - twl6040->irq_base];
-}
-
-static void twl6040_irq_lock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_sync_unlock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- /* write back to hardware any change in irq mask */
- if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
- twl6040->irq_masks_cache = twl6040->irq_masks_cur;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
- twl6040->irq_masks_cur);
- }
-
- mutex_unlock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_enable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur &= ~irq_data->mask;
-}
-
-static void twl6040_irq_disable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur |= irq_data->mask;
-}
-
-static struct irq_chip twl6040_irq_chip = {
- .name = "twl6040",
- .irq_bus_lock = twl6040_irq_lock,
- .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
- .irq_enable = twl6040_irq_enable,
- .irq_disable = twl6040_irq_disable,
-};
-
-static irqreturn_t twl6040_irq_thread(int irq, void *data)
-{
- struct twl6040 *twl6040 = data;
- u8 intid;
- int i;
-
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* apply masking and report (backwards to handle READYINT first) */
- for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
- if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
- intid &= ~twl6040_irqs[i].status;
- if (intid & twl6040_irqs[i].status)
- handle_nested_irq(twl6040->irq_base + i);
- }
-
- /* ack unmasked irqs */
- twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
-
- return IRQ_HANDLED;
-}
-
-int twl6040_irq_init(struct twl6040 *twl6040)
-{
- struct device_node *node = twl6040->dev->of_node;
- int i, nr_irqs, irq_base, ret;
- u8 val;
-
- mutex_init(&twl6040->irq_mutex);
-
- /* mask the individual interrupt sources */
- twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
- twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
-
- nr_irqs = ARRAY_SIZE(twl6040_irqs);
-
- irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
- dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
- return irq_base;
- }
- twl6040->irq_base = irq_base;
-
- irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
- &irq_domain_simple_ops, NULL);
-
- /* Register them with genirq */
- for (i = irq_base; i < irq_base + nr_irqs; i++) {
- irq_set_chip_data(i, twl6040);
- irq_set_chip_and_handler(i, &twl6040_irq_chip,
- handle_level_irq);
- irq_set_nested_thread(i, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(i, IRQF_VALID);
-#else
- irq_set_noprobe(i);
-#endif
- }
-
- ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
- IRQF_ONESHOT, "twl6040", twl6040);
- if (ret) {
- dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
- twl6040->irq, ret);
- return ret;
- }
-
- /* reset interrupts */
- val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* interrupts cleared on write */
- twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
-
- return 0;
-}
-EXPORT_SYMBOL(twl6040_irq_init);
-
-void twl6040_irq_exit(struct twl6040 *twl6040)
-{
- free_irq(twl6040->irq, twl6040);
-}
-EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040.c
index 3f2a1cf02fc0..f361bf38a0aa 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040.c
@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
#include <linux/regulator/consumer.h>
@@ -104,7 +103,7 @@ int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
EXPORT_SYMBOL(twl6040_clear_bits);
/* twl6040 codec manual power-up sequence */
-static int twl6040_power_up(struct twl6040 *twl6040)
+static int twl6040_power_up_manual(struct twl6040 *twl6040)
{
u8 ldoctl, ncpctl, lppllctl;
int ret;
@@ -158,11 +157,12 @@ ncp_err:
ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ dev_err(twl6040->dev, "manual power-up failed\n");
return ret;
}
/* twl6040 manual power-down sequence */
-static void twl6040_power_down(struct twl6040 *twl6040)
+static void twl6040_power_down_manual(struct twl6040 *twl6040)
{
u8 ncpctl, ldoctl, lppllctl;
@@ -192,45 +192,48 @@ static void twl6040_power_down(struct twl6040 *twl6040)
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
}
-static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+static irqreturn_t twl6040_readyint_handler(int irq, void *data)
{
struct twl6040 *twl6040 = data;
- u8 intid, status;
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ complete(&twl6040->ready);
- if (intid & TWL6040_READYINT)
- complete(&twl6040->ready);
+ return IRQ_HANDLED;
+}
- if (intid & TWL6040_THINT) {
- status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
- if (status & TWL6040_TSHUTDET) {
- dev_warn(twl6040->dev,
- "Thermal shutdown, powering-off");
- twl6040_power(twl6040, 0);
- } else {
- dev_warn(twl6040->dev,
- "Leaving thermal shutdown, powering-on");
- twl6040_power(twl6040, 1);
- }
+static irqreturn_t twl6040_thint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 status;
+
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_TSHUTDET) {
+ dev_warn(twl6040->dev, "Thermal shutdown, powering-off");
+ twl6040_power(twl6040, 0);
+ } else {
+ dev_warn(twl6040->dev, "Leaving thermal shutdown, powering-on");
+ twl6040_power(twl6040, 1);
}
return IRQ_HANDLED;
}
-static int twl6040_power_up_completion(struct twl6040 *twl6040,
- int naudint)
+static int twl6040_power_up_automatic(struct twl6040 *twl6040)
{
int time_left;
- u8 intid;
+
+ gpio_set_value(twl6040->audpwron, 1);
time_left = wait_for_completion_timeout(&twl6040->ready,
msecs_to_jiffies(144));
if (!time_left) {
+ u8 intid;
+
+ dev_warn(twl6040->dev, "timeout waiting for READYINT\n");
intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
if (!(intid & TWL6040_READYINT)) {
- dev_err(twl6040->dev,
- "timeout waiting for READYINT\n");
+ dev_err(twl6040->dev, "automatic power-up failed\n");
+ gpio_set_value(twl6040->audpwron, 0);
return -ETIMEDOUT;
}
}
@@ -240,8 +243,6 @@ static int twl6040_power_up_completion(struct twl6040 *twl6040,
int twl6040_power(struct twl6040 *twl6040, int on)
{
- int audpwron = twl6040->audpwron;
- int naudint = twl6040->irq;
int ret = 0;
mutex_lock(&twl6040->mutex);
@@ -251,23 +252,17 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (twl6040->power_count++)
goto out;
- if (gpio_is_valid(audpwron)) {
- /* use AUDPWRON line */
- gpio_set_value(audpwron, 1);
- /* wait for power-up completion */
- ret = twl6040_power_up_completion(twl6040, naudint);
+ if (gpio_is_valid(twl6040->audpwron)) {
+ /* use automatic power-up sequence */
+ ret = twl6040_power_up_automatic(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "automatic power-down failed\n");
twl6040->power_count = 0;
goto out;
}
} else {
/* use manual power-up sequence */
- ret = twl6040_power_up(twl6040);
+ ret = twl6040_power_up_manual(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "manual power-up failed\n");
twl6040->power_count = 0;
goto out;
}
@@ -288,15 +283,15 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (--twl6040->power_count)
goto out;
- if (gpio_is_valid(audpwron)) {
+ if (gpio_is_valid(twl6040->audpwron)) {
/* use AUDPWRON line */
- gpio_set_value(audpwron, 0);
+ gpio_set_value(twl6040->audpwron, 0);
/* power-down sequence latency */
usleep_range(500, 700);
} else {
/* use manual power-down sequence */
- twl6040_power_down(twl6040);
+ twl6040_power_down_manual(twl6040);
}
twl6040->sysclk = 0;
twl6040->mclk = 0;
@@ -503,8 +498,27 @@ static struct regmap_config twl6040_regmap_config = {
.readable_reg = twl6040_readable_reg,
};
-static int __devinit twl6040_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct regmap_irq twl6040_irqs[] = {
+ { .reg_offset = 0, .mask = TWL6040_THINT, },
+ { .reg_offset = 0, .mask = TWL6040_PLUGINT | TWL6040_UNPLUGINT, },
+ { .reg_offset = 0, .mask = TWL6040_HOOKINT, },
+ { .reg_offset = 0, .mask = TWL6040_HFINT, },
+ { .reg_offset = 0, .mask = TWL6040_VIBINT, },
+ { .reg_offset = 0, .mask = TWL6040_READYINT, },
+};
+
+static struct regmap_irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irqs = twl6040_irqs,
+ .num_irqs = ARRAY_SIZE(twl6040_irqs),
+
+ .num_regs = 1,
+ .status_base = TWL6040_REG_INTID,
+ .mask_base = TWL6040_REG_INTMR,
+};
+
+static int twl6040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct twl6040_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
@@ -578,18 +592,31 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto gpio_err;
}
- /* codec interrupt */
- ret = twl6040_irq_init(twl6040);
- if (ret)
+ ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
+ IRQF_ONESHOT, 0, &twl6040_irq_chip,
+ &twl6040->irq_data);
+ if (ret < 0)
goto irq_init_err;
- ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
- NULL, twl6040_naudint_handler, IRQF_ONESHOT,
+ twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_READY);
+ twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_TH);
+
+ ret = request_threaded_irq(twl6040->irq_ready, NULL,
+ twl6040_readyint_handler, IRQF_ONESHOT,
"twl6040_irq_ready", twl6040);
if (ret) {
- dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
- ret);
- goto irq_err;
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
+ goto readyirq_err;
+ }
+
+ ret = request_threaded_irq(twl6040->irq_th, NULL,
+ twl6040_thint_handler, IRQF_ONESHOT,
+ "twl6040_irq_th", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
+ goto thirq_err;
}
/* dual-access registers controlled by I2C only */
@@ -601,7 +628,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
* The ASoC codec can work without pdata, pass the platform_data only if
* it has been provided.
*/
- irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
cell = &twl6040->cells[children];
cell->name = "twl6040-codec";
twl6040_codec_rsrc[0].start = irq;
@@ -615,7 +642,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
children++;
if (twl6040_has_vibra(pdata, node)) {
- irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -654,9 +681,11 @@ static int __devinit twl6040_probe(struct i2c_client *client,
return 0;
mfd_err:
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
-irq_err:
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+thirq_err:
+ free_irq(twl6040->irq_ready, twl6040);
+readyirq_err:
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
@@ -670,7 +699,7 @@ err:
return ret;
}
-static int __devexit twl6040_remove(struct i2c_client *client)
+static int twl6040_remove(struct i2c_client *client)
{
struct twl6040 *twl6040 = i2c_get_clientdata(client);
@@ -680,8 +709,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_ready, twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
@@ -705,7 +735,7 @@ static struct i2c_driver twl6040_driver = {
.owner = THIS_MODULE,
},
.probe = twl6040_probe,
- .remove = __devexit_p(twl6040_remove),
+ .remove = twl6040_remove,
.id_table = twl6040_i2c_id,
};
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index fae15d880758..3c1723aa6225 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -67,6 +67,7 @@ struct vexpress_config_bridge *vexpress_config_bridge_register(
return bridge;
}
+EXPORT_SYMBOL(vexpress_config_bridge_register);
void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
{
@@ -83,6 +84,7 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
while (!list_empty(&__bridge.transactions))
cpu_relax();
}
+EXPORT_SYMBOL(vexpress_config_bridge_unregister);
struct vexpress_config_func {
@@ -142,6 +144,7 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
return func;
}
+EXPORT_SYMBOL(__vexpress_config_func_get);
void vexpress_config_func_put(struct vexpress_config_func *func)
{
@@ -149,7 +152,7 @@ void vexpress_config_func_put(struct vexpress_config_func *func)
of_node_put(func->bridge->node);
kfree(func);
}
-
+EXPORT_SYMBOL(vexpress_config_func_put);
struct vexpress_config_trans {
struct vexpress_config_func *func;
@@ -229,6 +232,7 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
complete(&trans->completion);
}
+EXPORT_SYMBOL(vexpress_config_complete);
int vexpress_config_wait(struct vexpress_config_trans *trans)
{
@@ -236,7 +240,7 @@ int vexpress_config_wait(struct vexpress_config_trans *trans)
return trans->status;
}
-
+EXPORT_SYMBOL(vexpress_config_wait);
int vexpress_config_read(struct vexpress_config_func *func, int offset,
u32 *data)
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 733c06bd2d17..558c2928f261 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)
}
-void __init vexpress_sysreg_early_init(void __iomem *base)
+void vexpress_sysreg_setup(struct device_node *node)
{
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
-
- if (node)
- base = of_iomap(node, 0);
-
- if (WARN_ON(!base))
+ if (WARN_ON(!vexpress_sysreg_base))
return;
- vexpress_sysreg_base = base;
-
if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
vexpress_master_site = VEXPRESS_SITE_DB2;
else
@@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
WARN_ON(!vexpress_sysreg_config_bridge);
}
+void __init vexpress_sysreg_early_init(void __iomem *base)
+{
+ vexpress_sysreg_base = base;
+ vexpress_sysreg_setup(NULL);
+}
+
void __init vexpress_sysreg_of_early_init(void)
{
- vexpress_sysreg_early_init(NULL);
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-sysreg");
+
+ if (node) {
+ vexpress_sysreg_base = of_iomap(node, 0);
+ vexpress_sysreg_setup(node);
+ } else {
+ pr_info("vexpress-sysreg: No Device Tree node found.");
+ }
}
@@ -414,7 +420,7 @@ static ssize_t vexpress_sysreg_sys_id_show(struct device *dev,
DEVICE_ATTR(sys_id, S_IRUGO, vexpress_sysreg_sys_id_show, NULL);
-static int __devinit vexpress_sysreg_probe(struct platform_device *pdev)
+static int vexpress_sysreg_probe(struct platform_device *pdev)
{
int err;
struct resource *res = platform_get_resource(pdev,
@@ -426,9 +432,11 @@ static int __devinit vexpress_sysreg_probe(struct platform_device *pdev)
return -EBUSY;
}
- if (!vexpress_sysreg_base)
+ if (!vexpress_sysreg_base) {
vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
+ vexpress_sysreg_setup(pdev->dev.of_node);
+ }
if (!vexpress_sysreg_base) {
dev_err(&pdev->dev, "Failed to obtain base address!\n");
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
new file mode 100644
index 000000000000..af2a6703f34f
--- /dev/null
+++ b/drivers/mfd/viperboard.c
@@ -0,0 +1,137 @@
+/*
+ * Nano River Technologies viperboard driver
+ *
+ * This is the core driver for the viperboard. There are cell drivers
+ * available for I2C, ADC and both GPIOs. SPI is not yet supported.
+ * The drivers do not support all features the board exposes. See user
+ * manual of the viperboard.
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/viperboard.h>
+
+#include <linux/usb.h>
+
+
+static const struct usb_device_id vprbrd_table[] = {
+ { USB_DEVICE(0x2058, 0x1005) }, /* Nano River Technologies */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, vprbrd_table);
+
+static struct mfd_cell vprbrd_devs[] = {
+ {
+ .name = "viperboard-gpio",
+ },
+ {
+ .name = "viperboard-i2c",
+ },
+ {
+ .name = "viperboard-adc",
+ },
+};
+
+static int vprbrd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct vprbrd *vb;
+
+ u16 version = 0;
+ int pipe, ret;
+
+ /* allocate memory for our device state and initialize it */
+ vb = kzalloc(sizeof(*vb), GFP_KERNEL);
+ if (vb == NULL) {
+ dev_err(&interface->dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&vb->lock);
+
+ vb->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, vb);
+ dev_set_drvdata(&vb->pdev.dev, vb);
+
+ /* get version information, major first, minor then */
+ pipe = usb_rcvctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MAJOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1)
+ version = vb->buf[0];
+
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MINOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1) {
+ version <<= 8;
+ version = version | vb->buf[0];
+ }
+
+ dev_info(&interface->dev,
+ "version %x.%02x found at bus %03d address %03d\n",
+ version >> 8, version & 0xff,
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
+ ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+ if (ret != 0) {
+ dev_err(&interface->dev, "Failed to add mfd devices to core.");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (vb) {
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+ }
+
+ return ret;
+}
+
+static void vprbrd_disconnect(struct usb_interface *interface)
+{
+ struct vprbrd *vb = usb_get_intfdata(interface);
+
+ mfd_remove_devices(&interface->dev);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+
+ dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver vprbrd_driver = {
+ .name = "viperboard",
+ .probe = vprbrd_probe,
+ .disconnect = vprbrd_disconnect,
+ .id_table = vprbrd_table,
+};
+
+module_usb_driver(vprbrd_driver);
+
+MODULE_DESCRIPTION("Nano River Technologies viperboard mfd core driver");
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 3141c4a173a7..a9d9d41d95d3 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -56,6 +56,18 @@ static const struct reg_default wm5102_reva_patch[] = {
{ 0x80, 0x0000 },
};
+static const struct reg_default wm5102_revb_patch[] = {
+ { 0x80, 0x0003 },
+ { 0x081, 0xE022 },
+ { 0x410, 0x6080 },
+ { 0x418, 0x6080 },
+ { 0x420, 0x6080 },
+ { 0x428, 0xC000 },
+ { 0x441, 0x8014 },
+ { 0x458, 0x000b },
+ { 0x80, 0x0000 },
+};
+
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
@@ -65,11 +77,19 @@ int wm5102_patch(struct arizona *arizona)
wm5102_reva_patch,
ARRAY_SIZE(wm5102_reva_patch));
default:
- return 0;
+ return regmap_register_patch(arizona->regmap,
+ wm5102_revb_patch,
+ ARRAY_SIZE(wm5102_revb_patch));
}
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+ .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+ },
+ [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+ .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+ },
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -82,6 +102,7 @@ const struct regmap_irq_chip wm5102_aod = {
.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
.ack_base = ARIZONA_AOD_IRQ1,
.wake_base = ARIZONA_WAKE_CONTROL,
+ .wake_invert = 1,
.num_regs = 1,
.irqs = wm5102_aod_irqs,
.num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
@@ -291,12 +312,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x00000212, 0x0001 }, /* R530 - LDO1 Control 2 */
{ 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@@ -1056,6 +1079,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1071,6 +1095,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
@@ -1089,6 +1114,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
+ case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
@@ -1805,6 +1832,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
return true;
default:
return false;
@@ -1813,15 +1841,23 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
+ if (reg > 0xffff)
+ return true;
+
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FX_CTRL2:
case ARIZONA_INTERRUPT_STATUS_1:
case ARIZONA_INTERRUPT_STATUS_2:
@@ -1847,7 +1883,9 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:
@@ -1855,12 +1893,14 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
}
}
+#define WM5102_MAX_REGISTER 0x1a9800
+
const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
@@ -1874,7 +1914,7 @@ const struct regmap_config wm5102_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index adda6b10b90d..c41599815299 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = {
.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
.ack_base = ARIZONA_AOD_IRQ1,
.wake_base = ARIZONA_WAKE_CONTROL,
+ .wake_invert = 1,
.num_regs = 1,
.irqs = wm5110_aod_irqs,
.num_irqs = ARRAY_SIZE(wm5110_aod_irqs),
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index bcb226ff9d2b..57c488d42d3e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -535,11 +535,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
case 2:
case 3:
+ default:
regmap_patch = wm8994_revc_patch;
patch_regs = ARRAY_SIZE(wm8994_revc_patch);
break;
- default:
- break;
}
break;
@@ -558,17 +557,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
/* Revision C did not change the relevant layer */
if (wm8994->revision > 1)
wm8994->revision++;
- switch (wm8994->revision) {
- case 0:
- case 1:
- case 2:
- case 3:
- regmap_patch = wm1811_reva_patch;
- patch_regs = ARRAY_SIZE(wm1811_reva_patch);
- break;
- default:
- break;
- }
+
+ regmap_patch = wm1811_reva_patch;
+ patch_regs = ARRAY_SIZE(wm1811_reva_patch);
break;
default:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b151b7c1bd59..e83fdfe0c8ca 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -127,7 +127,7 @@ config PHANTOM
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI
+ depends on PCI && TTY
default n
help
The PTI (Parallel Trace Interface) driver directs
@@ -192,7 +192,7 @@ config ICS932S401
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
- depends on AVR32 || ARCH_AT91
+ depends on HAS_IOMEM
---help---
This option enables device driver support for Atmel Synchronized
Serial Communication peripheral (SSC).
@@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config LATTICE_ECP3_CONFIG
+ tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
+ depends on SPI && SYSFS
+ select FW_LOADER
+ default n
+ help
+ This option enables support for bitstream configuration (programming
+ or loading) of the Lattice ECP3 FPGA family via SPI.
+
+ If unsure, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vmw_vmci/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377c0de6..35a1463c72d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,3 +49,6 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
+obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 158da5a81a66..c09c28f92055 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
@@ -131,6 +132,13 @@ static int ssc_probe(struct platform_device *pdev)
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "Failed to request pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
@@ -151,11 +159,9 @@ static int ssc_probe(struct platform_device *pdev)
return -ENXIO;
}
- ssc->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!ssc->regs) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- return -EINVAL;
- }
+ ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(ssc->regs))
+ return PTR_ERR(ssc->regs);
ssc->phybase = regs->start;
@@ -167,7 +173,7 @@ static int ssc_probe(struct platform_device *pdev)
/* disable all interrupts */
clk_enable(ssc->clk);
- ssc_writel(ssc->regs, IDR, ~0UL);
+ ssc_writel(ssc->regs, IDR, -1);
ssc_readl(ssc->regs, SR);
clk_disable(ssc->clk);
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 22429b8b1068..5acb9c5b49c4 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
config CB710_CORE
tristate "ENE CB710/720 Flash memory card reader support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This option enables support for PCI ENE CB710/720 Flash memory card
reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644
index 000000000000..155700bfd2b6
--- /dev/null
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define FIRMWARE_NAME "lattice-ecp3.bit"
+
+/*
+ * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
+ * reversed as noted in the manual.
+ */
+#define ID_ECP3_17 0xc2088080
+#define ID_ECP3_35 0xc2048080
+
+/* FPGA commands */
+#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
+#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
+#define FPGA_CMD_CLEAR 0x70
+#define FPGA_CMD_REFRESH 0x71
+#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
+#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
+#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
+
+/*
+ * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
+ * (LatticeECP3 Slave SPI Port User's Guide)
+ */
+#define FPGA_STATUS_DONE 0x00004000
+#define FPGA_STATUS_CLEARED 0x00010000
+
+#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
+#define FPGA_CLEAR_MSLEEP 10
+#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
+
+struct fpga_data {
+ struct completion fw_loaded;
+};
+
+struct ecp3_dev {
+ u32 jedec_id;
+ char *name;
+};
+
+static const struct ecp3_dev ecp3_dev[] = {
+ {
+ .jedec_id = ID_ECP3_17,
+ .name = "Lattice ECP3-17",
+ },
+ {
+ .jedec_id = ID_ECP3_35,
+ .name = "Lattice ECP3-35",
+ },
+};
+
+static void firmware_load(const struct firmware *fw, void *context)
+{
+ struct spi_device *spi = (struct spi_device *)context;
+ struct fpga_data *data = dev_get_drvdata(&spi->dev);
+ u8 *buffer;
+ int ret;
+ u8 txbuf[8];
+ u8 rxbuf[8];
+ int rx_len = 8;
+ int i;
+ u32 jedec_id;
+ u32 status;
+
+ if (fw->size == 0) {
+ dev_err(&spi->dev, "Error: Firmware size is 0!\n");
+ return;
+ }
+
+ /* Fill dummy data (24 stuffing bits for commands) */
+ txbuf[1] = 0x00;
+ txbuf[2] = 0x00;
+ txbuf[3] = 0x00;
+
+ /* Trying to speak with the FPGA via SPI... */
+ txbuf[0] = FPGA_CMD_READ_ID;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
+ jedec_id = *(u32 *)&rxbuf[4];
+
+ for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
+ if (jedec_id == ecp3_dev[i].jedec_id)
+ break;
+ }
+ if (i == ARRAY_SIZE(ecp3_dev)) {
+ dev_err(&spi->dev,
+ "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
+ jedec_id);
+ return;
+ }
+
+ dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+
+ buffer = kzalloc(fw->size + 8, GFP_KERNEL);
+ if (!buffer) {
+ dev_err(&spi->dev, "Error: Can't allocate memory!\n");
+ return;
+ }
+
+ /*
+ * Insert WRITE_INC command into stream (one SPI frame)
+ */
+ buffer[0] = FPGA_CMD_WRITE_INC;
+ buffer[1] = 0xff;
+ buffer[2] = 0xff;
+ buffer[3] = 0xff;
+ memcpy(buffer + 4, fw->data, fw->size);
+
+ txbuf[0] = FPGA_CMD_REFRESH;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_WRITE_EN;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_CLEAR;
+ ret = spi_write(spi, txbuf, 4);
+
+ /*
+ * Wait for FPGA memory to become cleared
+ */
+ for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ status = *(u32 *)&rxbuf[4];
+ if (status == FPGA_STATUS_CLEARED)
+ break;
+
+ msleep(FPGA_CLEAR_MSLEEP);
+ }
+
+ if (i == FPGA_CLEAR_LOOP_COUNT) {
+ dev_err(&spi->dev,
+ "Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
+ status);
+ kfree(buffer);
+ return;
+ }
+
+ dev_info(&spi->dev, "Configuring the FPGA...\n");
+ ret = spi_write(spi, buffer, fw->size + 8);
+
+ txbuf[0] = FPGA_CMD_WRITE_DIS;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+ status = *(u32 *)&rxbuf[4];
+
+ /* Check result */
+ if (status & FPGA_STATUS_DONE)
+ dev_info(&spi->dev, "FPGA succesfully configured!\n");
+ else
+ dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
+
+ /*
+ * Don't forget to release the firmware again
+ */
+ release_firmware(fw);
+
+ kfree(buffer);
+
+ complete(&data->fw_loaded);
+}
+
+static int lattice_ecp3_probe(struct spi_device *spi)
+{
+ struct fpga_data *data;
+ int err;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
+ return -ENOMEM;
+ }
+ spi_set_drvdata(spi, data);
+
+ init_completion(&data->fw_loaded);
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+ FIRMWARE_NAME, &spi->dev,
+ GFP_KERNEL, spi, firmware_load);
+ if (err) {
+ dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
+ return err;
+ }
+
+ dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
+
+ return 0;
+}
+
+static int lattice_ecp3_remove(struct spi_device *spi)
+{
+ struct fpga_data *data = spi_get_drvdata(spi);
+
+ wait_for_completion(&data->fw_loaded);
+
+ return 0;
+}
+
+static const struct spi_device_id lattice_ecp3_id[] = {
+ { "ecp3-17", 0 },
+ { "ecp3-35", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
+
+static struct spi_driver lattice_ecp3_driver = {
+ .driver = {
+ .name = "lattice-ecp3",
+ .owner = THIS_MODULE,
+ },
+ .probe = lattice_ecp3_probe,
+ .remove = lattice_ecp3_remove,
+ .id_table = lattice_ecp3_id,
+};
+
+module_spi_driver(lattice_ecp3_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 5a79ccde2fdf..d21b4d006a55 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,11 +1,22 @@
config INTEL_MEI
- tristate "Intel Management Engine Interface (Intel MEI)"
+ tristate "Intel Management Engine Interface"
depends on X86 && PCI && WATCHDOG_CORE
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
if selected /dev/mei misc device will be created.
+ For more information see
+ <http://software.intel.com/en-us/manageability/>
+
+config INTEL_MEI_ME
+ bool "ME Enabled Intel Chipsets"
+ depends on INTEL_MEI
+ depends on X86 && PCI && WATCHDOG_CORE
+ default y
+ help
+ MEI support for ME Enabled Intel chipsets.
+
Supported Chipsets are:
7 Series Chipset Family
6 Series Chipset Family
@@ -24,5 +35,3 @@ config INTEL_MEI
82Q33 Express
82X38/X48 Express
- For more information see
- <http://software.intel.com/en-us/manageability/>
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 0017842e166c..040af6c7b147 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -4,9 +4,11 @@
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
+mei-objs += hbm.o
mei-objs += interrupt.o
-mei-objs += interface.o
-mei-objs += iorw.o
+mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
mei-objs += wd.o
+mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
+mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 18794aea6062..c86d7e3839a4 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -31,15 +31,16 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
+#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
-const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
- 0xa8, 0x46, 0xe0, 0xff, 0x65,
- 0x81, 0x4c);
+const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
+ 0xac, 0xa8, 0x46, 0xe0,
+ 0xff, 0x65, 0x81, 0x4c);
/**
* mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
* @dev: the device structure
*
*/
-void mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev)
{
- int i;
+ struct mei_cl *cl = &dev->iamthif_cl;
unsigned char *msg_buf;
+ int ret, i;
- mei_cl_init(&dev->iamthif_cl, dev);
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+ mei_cl_init(cl, dev);
- /* find ME amthi client */
- i = mei_me_cl_link(dev, &dev->iamthif_cl,
- &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+ i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
- return;
+ dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
+ return -ENOENT;
}
+ cl->me_client_id = dev->me_clients[i].client_id;
+
/* Assign iamthif_mtu to the value received from ME */
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
msg_buf = kcalloc(dev->iamthif_mtu,
sizeof(unsigned char), GFP_KERNEL);
if (!msg_buf) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
- return;
+ dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
+ return -ENOMEM;
}
dev->iamthif_msg_buf = msg_buf;
- if (mei_connect(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
- dev->iamthif_cl.host_client_id = 0;
+ ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "amthif: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
} else {
- dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
}
+ return 0;
}
/**
@@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
if (i < 0) {
- dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+ dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
return -ENODEV;
}
- dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+ dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
/* Check for if we can block or not*/
@@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
return -EAGAIN;
- dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+ dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
while (cb == NULL) {
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
@@ -187,27 +200,27 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
(cb = mei_amthif_find_read_list_entry(dev, file)));
+ /* Locking again the Mutex */
+ mutex_lock(&dev->device_lock);
+
if (wait_ret)
return -ERESTARTSYS;
dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
- /* Locking again the Mutex */
- mutex_lock(&dev->device_lock);
}
- dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+ dev_dbg(&dev->pdev->dev, "Got amthif data\n");
dev->iamthif_timer = 0;
if (cb) {
timeout = cb->read_time +
mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+ dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
timeout);
if (time_after(jiffies, timeout)) {
- dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+ dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
cb->response_buffer.size);
- dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+ dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
/* length is being turncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
}
free:
- dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+ dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
*offset = 0;
mei_io_cb_free(cb);
out:
@@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (!dev || !cb)
return -ENODEV;
- dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+ dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
- ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
if (ret < 0)
return ret;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
- if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
if (mei_write_message(dev, &mei_hdr,
- (unsigned char *)(dev->iamthif_msg_buf),
- mei_hdr.length))
+ (unsigned char *)dev->iamthif_msg_buf))
return -ENODEV;
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+ if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
return -ENODEV;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+ dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
dev->iamthif_current_cb = cb;
dev->iamthif_file_object = cb->file_object;
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
- dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+ dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
- if (!(dev->mei_host_buffer_is_empty))
+ if (!dev->hbuf_is_ready)
dev_dbg(&dev->pdev->dev, "host buffer is not empty");
dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+ dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
list_del(&pos->list);
@@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
status = mei_amthif_send_cmd(dev, pos);
if (status) {
dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
+ "amthif write failed status = %d\n",
status);
return;
}
@@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
dev->iamthif_file_object == file) {
mask |= (POLLIN | POLLRDNORM);
- dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
return mask;
@@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
}
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
- mei_hdr->length)) {
+ if (mei_write_message(dev, &mei_hdr,
+ dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->status = -ENODEV;
list_del(&cb->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
- dev->iamthif_msg_buf_index += mei_hdr->length;
+ dev->iamthif_msg_buf_index += mei_hdr.length;
cl->status = 0;
- if (mei_hdr->msg_complete) {
+ if (mei_hdr.msg_complete) {
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev->iamthif_flow_control_pending = true;
- /* save iamthif cb sent to amthi client */
+ /* save iamthif cb sent to amthif client */
cb->buf_idx = dev->iamthif_msg_buf_index;
dev->iamthif_current_cb = cb;
@@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
/**
* mei_amthif_irq_read_message - read routine after ISR to
- * handle the read amthi message
+ * handle the read amthif message
*
* @complete_list: An instance of our list structure
* @dev: the device structure
- * @mei_hdr: header of amthi message
+ * @mei_hdr: header of amthif message
*
* returns 0 on success, <0 on failure.
*/
@@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
return 0;
dev_dbg(&dev->pdev->dev,
- "amthi_message_buffer_index =%d\n",
+ "amthif_message_buffer_index =%d\n",
mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+ dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
if (!dev->iamthif_current_cb)
return -ENODEV;
@@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
cb->read_time = jiffies;
if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
/* found the iamthif cb */
- dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
- dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+ dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
+ dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
list_add_tail(&cb->list, &complete_list->list);
}
return 0;
@@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
return -EMSGSIZE;
}
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
return -EIO;
}
@@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
return 0;
}
@@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_msg_buf,
dev->iamthif_msg_buf_index);
list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
- dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+ dev_dbg(&dev->pdev->dev, "amthif read completed\n");
dev->iamthif_timer = jiffies;
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
@@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
mei_amthif_run_next_cmd(dev);
}
- dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+ dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
wake_up_interruptible(&dev->iamthif_cl.wait);
}
@@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
if (dev->iamthif_current_cb == cb_pos) {
dev->iamthif_current_cb = NULL;
/* send flow control to iamthif client */
- mei_send_flow_control(dev, &dev->iamthif_cl);
+ mei_hbm_cl_flow_control_req(dev,
+ &dev->iamthif_cl);
}
/* free all allocated buffers */
mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_file_object == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
- dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
dev->iamthif_state);
dev->iamthif_canceled = true;
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
- dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
mei_amthif_run_next_cmd(dev);
}
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644
index 000000000000..1569afe935de
--- /dev/null
+++ b/drivers/misc/mei/client.c
@@ -0,0 +1,729 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * mei_me_cl_by_uuid - locate index of me client
+ *
+ * @dev: mei device
+ * returns me client index or -ENOENT if not found
+ */
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
+{
+ int i, res = -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; ++i)
+ if (uuid_le_cmp(*uuid,
+ dev->me_clients[i].props.protocol_name) == 0) {
+ res = i;
+ break;
+ }
+
+ return res;
+}
+
+
+/**
+ * mei_me_cl_by_id return index to me_clients for client_id
+ *
+ * @dev: the device structure
+ * @client_id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+{
+ int i;
+ for (i = 0; i < dev->me_clients_num; i++)
+ if (dev->me_clients[i].client_id == client_id)
+ break;
+ if (WARN_ON(dev->me_clients[i].client_id != client_id))
+ return -ENOENT;
+
+ if (i == dev->me_clients_num)
+ return -ENOENT;
+
+ return i;
+}
+
+
+/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ struct mei_cl_cb *cb;
+ struct mei_cl_cb *next;
+
+ list_for_each_entry_safe(cb, next, &list->list, list) {
+ if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+ list_del(&cb->list);
+ }
+}
+
+/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+ if (cb == NULL)
+ return;
+
+ kfree(cb->request_buffer.data);
+ kfree(cb->response_buffer.data);
+ kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+ struct mei_cl_cb *cb;
+
+ cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ mei_io_list_init(cb);
+
+ cb->file_object = fp;
+ cb->cl = cl;
+ cb->buf_idx = 0;
+ return cb;
+}
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->request_buffer.data)
+ return -ENOMEM;
+ cb->request_buffer.size = length;
+ return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->response_buffer.data)
+ return -ENOMEM;
+ cb->response_buffer.size = length;
+ return 0;
+}
+
+
+
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: host client
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ mei_io_list_flush(&cl->dev->read_list, cl);
+ mei_io_list_flush(&cl->dev->write_list, cl);
+ mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+ return 0;
+}
+
+
+/**
+ * mei_cl_init - initializes intialize cl.
+ *
+ * @cl: host client to be initialized
+ * @dev: mei device
+ */
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+{
+ memset(cl, 0, sizeof(struct mei_cl));
+ init_waitqueue_head(&cl->wait);
+ init_waitqueue_head(&cl->rx_wait);
+ init_waitqueue_head(&cl->tx_wait);
+ INIT_LIST_HEAD(&cl->link);
+ cl->reading_state = MEI_IDLE;
+ cl->writing_state = MEI_IDLE;
+ cl->dev = dev;
+}
+
+/**
+ * mei_cl_allocate - allocates cl structure and sets it up.
+ *
+ * @dev: mei device
+ * returns The allocated file or NULL on failure
+ */
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
+{
+ struct mei_cl *cl;
+
+ cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ mei_cl_init(cl, dev);
+
+ return cl;
+}
+
+/**
+ * mei_cl_find_read_cb - find this cl's callback in the read list
+ *
+ * @dev: device structure
+ * returns cb on success, NULL on error
+ */
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+ if (mei_cl_cmp_id(cl, cb->cl))
+ return cb;
+ return NULL;
+}
+
+/** mei_cl_link: allocte host id in the host map
+ *
+ * @cl - host client
+ * @id - fixed host id or -1 for genereting one
+ * returns 0 on success
+ * -EINVAL on incorrect values
+ * -ENONET if client not found
+ */
+int mei_cl_link(struct mei_cl *cl, int id)
+{
+ struct mei_device *dev;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ /* If Id is not asigned get one*/
+ if (id == MEI_HOST_CLIENT_ID_ANY)
+ id = find_first_zero_bit(dev->host_clients_map,
+ MEI_CLIENTS_MAX);
+
+ if (id >= MEI_CLIENTS_MAX) {
+ dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+
+ cl->host_client_id = id;
+ list_add_tail(&cl->link, &dev->file_list);
+
+ set_bit(id, dev->host_clients_map);
+
+ cl->state = MEI_FILE_INITIALIZING;
+
+ dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+ return 0;
+}
+
+/**
+ * mei_cl_unlink - remove me_cl from the list
+ *
+ * @dev: the device structure
+ */
+int mei_cl_unlink(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos, *next;
+
+ /* don't shout on error exit path */
+ if (!cl)
+ return 0;
+
+ /* wd and amthif might not be initialized */
+ if (!cl->dev)
+ return 0;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if (cl->host_client_id == pos->host_client_id) {
+ dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+ pos->host_client_id, pos->me_client_id);
+ list_del_init(&pos->link);
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void mei_host_client_init(struct work_struct *work)
+{
+ struct mei_device *dev = container_of(work,
+ struct mei_device, init_work);
+ struct mei_client_properties *client_props;
+ int i;
+
+ mutex_lock(&dev->device_lock);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first three client IDs
+ * 0: Reserved for MEI Bus Message communications
+ * 1: Reserved for Watchdog
+ * 2: Reserved for AMTHI
+ */
+ bitmap_set(dev->host_clients_map, 0, 3);
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client_props = &dev->me_clients[i].props;
+
+ if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
+ mei_amthif_host_init(dev);
+ else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+ mei_wd_host_init(dev);
+ }
+
+ dev->dev_state = MEI_DEV_ENABLED;
+
+ mutex_unlock(&dev->device_lock);
+}
+
+
+/**
+ * mei_cl_disconnect - disconnect host clinet form the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets, err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_DISCONNECTING)
+ return 0;
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fop_type = MEI_FOP_CLOSE;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
+ rets = -ENODEV;
+ dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+ goto free;
+ }
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
+ }
+ mutex_unlock(&dev->device_lock);
+
+ err = wait_event_timeout(dev->wait_recvd_msg,
+ MEI_FILE_DISCONNECTED == cl->state,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+ mutex_lock(&dev->device_lock);
+ if (MEI_FILE_DISCONNECTED == cl->state) {
+ rets = 0;
+ dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+ } else {
+ rets = -ENODEV;
+ if (MEI_FILE_DISCONNECTED != cl->state)
+ dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+
+ if (err)
+ dev_dbg(&dev->pdev->dev,
+ "wait failed disconnect err=%08x\n",
+ err);
+
+ dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+ }
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+free:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+
+/**
+ * mei_cl_is_other_connecting - checks if other
+ * client with the same me client id is connecting
+ *
+ * @cl: private data of the file object
+ *
+ * returns ture if other client is connected, 0 - otherwise.
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos;
+ struct mei_cl *next;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if ((pos->state == MEI_FILE_CONNECTING) &&
+ (pos != cl) && cl->me_client_id == pos->me_client_id)
+ return true;
+
+ }
+
+ return false;
+}
+
+/**
+ * mei_cl_connect - connect host clinet to the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_connect(struct mei_cl *cl, struct file *file)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ cb = mei_io_cb_init(cl, file);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ cb->fop_type = MEI_FOP_IOCTL;
+
+ if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
+ dev->hbuf_is_ready = false;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_timeout(dev->wait_recvd_msg,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED),
+ timeout * HZ);
+ mutex_lock(&dev->device_lock);
+
+ if (cl->state != MEI_FILE_CONNECTED) {
+ rets = -EFAULT;
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ goto out;
+ }
+
+ rets = cl->status;
+
+out:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ *
+ * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ * -ENOENT if mei_cl is not present
+ * -EINVAL if single_recv_buf == 0
+ */
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return 0;
+
+ if (cl->mei_flow_ctrl_creds > 0)
+ return 1;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->mei_flow_ctrl_creds) {
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
+ return -EINVAL;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ * @returns
+ * 0 on success
+ * -ENOENT when me client is not found
+ * -EINVAL when ctrl credits are <= 0
+ */
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->props.single_recv_buf != 0) {
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ dev->me_clients[i].mei_flow_ctrl_creds--;
+ } else {
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->mei_flow_ctrl_creds--;
+ }
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_start_read - the start read client message function.
+ *
+ * @cl: host client
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_read_start(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ return -ENODEV;
+
+ if (cl->read_cb) {
+ dev_dbg(&dev->pdev->dev, "read is pending.\n");
+ return -EBUSY;
+ }
+ i = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (i < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ cl->me_client_id);
+ return -ENODEV;
+ }
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ rets = mei_io_cb_alloc_resp_buf(cb,
+ dev->me_clients[i].props.max_msg_length);
+ if (rets)
+ goto err;
+
+ cb->fop_type = MEI_FOP_READ;
+ cl->read_cb = cb;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto err;
+ }
+ list_add_tail(&cb->list, &dev->read_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+ return rets;
+err:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_all_disconnect - disconnect forcefully all connected clients
+ *
+ * @dev - mei device
+ */
+
+void mei_cl_all_disconnect(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->mei_flow_ctrl_creds = 0;
+ cl->read_cb = NULL;
+ cl->timer_count = 0;
+ }
+}
+
+
+/**
+ * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
+ *
+ * @dev - mei device
+ */
+void mei_cl_all_read_wakeup(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (waitqueue_active(&cl->rx_wait)) {
+ dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+ wake_up_interruptible(&cl->rx_wait);
+ }
+ }
+}
+
+/**
+ * mei_cl_all_write_clear - clear all pending writes
+
+ * @dev - mei device
+ */
+void mei_cl_all_write_clear(struct mei_device *dev)
+{
+ struct mei_cl_cb *cb, *next;
+
+ list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+}
+
+
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644
index 000000000000..214b2397ec3e
--- /dev/null
+++ b/drivers/misc/mei/client.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_CLIENT_H_
+#define _MEI_CLIENT_H_
+
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/poll.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
+
+/*
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
+ */
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+ INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+
+/*
+ * MEI Host Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+
+
+int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_unlink(struct mei_cl *cl);
+
+int mei_cl_flush_queues(struct mei_cl *cl);
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
+/*
+ * MEI input output function prototype
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl);
+int mei_cl_disconnect(struct mei_cl *cl);
+
+int mei_cl_read_start(struct mei_cl *cl);
+
+int mei_cl_connect(struct mei_cl *cl, struct file *file);
+
+void mei_host_client_init(struct work_struct *work);
+
+
+void mei_cl_all_disconnect(struct mei_device *dev);
+void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_write_clear(struct mei_device *dev);
+
+
+#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644
index 000000000000..fb9e63ba3bb1
--- /dev/null
+++ b/drivers/misc/mei/hbm.c
@@ -0,0 +1,669 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+
+/**
+ * mei_hbm_me_cl_allocate - allocates storage for me clients
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+{
+ struct mei_me_client *clients;
+ int b;
+
+ /* count how many ME clients we have */
+ for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
+ dev->me_clients_num++;
+
+ if (dev->me_clients_num <= 0)
+ return;
+
+ kfree(dev->me_clients);
+ dev->me_clients = NULL;
+
+ dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+ dev->me_clients_num * sizeof(struct mei_me_client));
+ /* allocate storage for ME clients representation */
+ clients = kcalloc(dev->me_clients_num,
+ sizeof(struct mei_me_client), GFP_KERNEL);
+ if (!clients) {
+ dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ return;
+ }
+ dev->me_clients = clients;
+ return;
+}
+
+/**
+ * mei_hbm_cl_hdr - construct client hbm header
+ * @cl: - client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline
+void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->me_addr = cl->me_client_id;
+}
+
+/**
+ * same_disconn_addr - tells if they have the same address
+ *
+ * @file: private data of the file object.
+ * @disconn: disconnection request.
+ *
+ * returns true if addres are same
+ */
+static inline
+bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+ return cl->host_client_id == cmd->host_addr &&
+ cl->me_client_id == cmd->me_addr;
+}
+
+
+/**
+ * is_treat_specially_client - checks if the message belongs
+ * to the file private data.
+ *
+ * @cl: private data of the file object
+ * @rs: connect response bus message
+ *
+ */
+static bool is_treat_specially_client(struct mei_cl *cl,
+ struct hbm_client_connect_response *rs)
+{
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = MEI_FILE_CONNECTED;
+ cl->status = 0;
+
+ } else {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ cl->timer_count = 0;
+
+ return true;
+ }
+ return false;
+}
+
+/**
+ * mei_hbm_start_req - sends start request message.
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_start_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ /* host start message */
+ start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+ dev->recvd_msg = false;
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_START_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return ;
+}
+
+/**
+ * mei_hbm_enum_clients_req - sends enumeration client request message.
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_enum_clients_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
+ /* enumerate clients */
+ mei_hbm_hdr(mei_hdr, len);
+
+ enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
+ memset(enum_req, 0, len);
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return;
+}
+
+/**
+ * mei_hbm_prop_requsest - request property for a single client
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+
+static int mei_hbm_prop_req(struct mei_device *dev)
+{
+
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ u8 client_num;
+
+
+ client_num = dev->me_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+ dev->me_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == MEI_CLIENTS_MAX) {
+ schedule_work(&dev->init_work);
+
+ return 0;
+ }
+
+ dev->me_clients[client_num].client_id = next_client_index;
+ dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+ mei_hbm_hdr(mei_hdr, len);
+ prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_err(&dev->pdev->dev, "Properties request command failed\n");
+ mei_reset(dev, 1);
+
+ return -EIO;
+ }
+
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->me_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * mei_hbm_stop_req_prepare - perpare stop request message
+ *
+ * @dev - mei device
+ * @mei_hdr - mei message header
+ * @data - hbm message body buffer
+ */
+static void mei_hbm_stop_req_prepare(struct mei_device *dev,
+ struct mei_msg_hdr *mei_hdr, unsigned char *data)
+{
+ struct hbm_host_stop_request *req =
+ (struct hbm_host_stop_request *)data;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ memset(req, 0, len);
+ req->hbm_cmd = HOST_STOP_REQ_CMD;
+ req->reason = DRIVER_STOP_REQUEST;
+}
+
+/**
+ * mei_hbm_cl_flow_control_req - sends flow control requst.
+ *
+ * @dev: the device structure
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_flow_control);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
+
+ dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
+ cl->host_client_id, cl->me_client_id);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * add_single_flow_creds - adds single buffer credentials.
+ *
+ * @file: private data ot the file object.
+ * @flow: flow control.
+ */
+static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+ struct hbm_flow_control *flow)
+{
+ struct mei_me_client *client;
+ int i;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client = &dev->me_clients[i];
+ if (client && flow->me_addr == client->client_id) {
+ if (client->props.single_recv_buf) {
+ client->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+ flow->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+ client->mei_flow_ctrl_creds);
+ } else {
+ BUG(); /* error in flow control */
+ }
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_flow_control_res - flow control response from me
+ *
+ * @dev: the device structure
+ * @flow_control: flow control response bus message
+ */
+static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
+ struct hbm_flow_control *flow_control)
+{
+ struct mei_cl *cl = NULL;
+ struct mei_cl *next = NULL;
+
+ if (!flow_control->host_addr) {
+ /* single receive buffer */
+ mei_hbm_add_single_flow_creds(dev, flow_control);
+ return;
+ }
+
+ /* normal connection */
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, flow_control)) {
+ cl->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
+ flow_control->host_addr, flow_control->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
+ cl->mei_flow_ctrl_creds);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_disconnect_res - disconnect response from ME
+ *
+ * @dev: the device structure
+ * @rs: disconnect response bus message
+ */
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "disconnect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ cl = pos->cl;
+
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ list_del(&pos->list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
+
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_connect_req - send connection request to specific me client
+ *
+ * @dev: the device structure
+ * @cl: a client to connect to
+ *
+ * returns -EIO on write failure
+ */
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_connect_res - connect resposne from the ME
+ *
+ * @dev: the device structure
+ * @rs: connect response bus message
+ */
+static void mei_hbm_cl_connect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "connect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ /* if WD or iamthif client treat specially */
+
+ if (is_treat_specially_client(&dev->wd_cl, rs)) {
+ dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
+
+ return;
+ }
+
+ if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ return;
+ }
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+
+ cl = pos->cl;
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+ if (pos->fop_type == MEI_FOP_IOCTL) {
+ if (is_treat_specially_client(cl, rs)) {
+ list_del(&pos->list);
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+ }
+}
+
+
+/**
+ * mei_client_disconnect_request - disconnect request initiated by me
+ * host sends disoconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the me
+ */
+static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct mei_cl *cl, *next;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
+ dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
+ disconnect_req->host_addr,
+ disconnect_req->me_addr);
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->timer_count = 0;
+ if (cl == &dev->wd_cl)
+ dev->wd_pending = false;
+ else if (cl == &dev->iamthif_cl)
+ dev->iamthif_timer = 0;
+
+ /* prepare disconnect response */
+ mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
+ dev->wr_ext_msg.data, len);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ *
+ * @dev: the device structure
+ * @mei_hdr: header of bus message
+ */
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+{
+ struct mei_bus_message *mei_msg;
+ struct mei_me_client *me_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_flow_control *flow_control;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+
+ /* read the message to our buffer */
+ BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
+ mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
+ switch (mei_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)mei_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->me_max_version;
+ dev_dbg(&dev->pdev->dev, "version mismatch.\n");
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ mei_write_message(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_START_MESSAGE) {
+ dev->init_clients_timer = 0;
+ mei_hbm_enum_clients_req(dev);
+ } else {
+ dev->recvd_msg = false;
+ dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ dev->recvd_msg = true;
+ dev_dbg(&dev->pdev->dev, "host start response message received.\n");
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_connect_res(dev, connect_res);
+ dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_disconnect_res(dev, disconnect_res);
+ dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case MEI_FLOW_CONTROL_CMD:
+ flow_control = (struct hbm_flow_control *) mei_msg;
+ mei_hbm_cl_flow_control_res(dev, flow_control);
+ dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)mei_msg;
+ me_client = &dev->me_clients[dev->me_client_presentation_num];
+
+ if (props_res->status || !dev->me_clients) {
+ dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ if (me_client->client_id != props_res->address) {
+ dev_err(&dev->pdev->dev,
+ "Host client properties reply mismatch\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+ dev_err(&dev->pdev->dev,
+ "Unexpected client properties reply\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ me_client->props = props_res->client_properties;
+ dev->me_client_index++;
+ dev->me_client_presentation_num++;
+
+ /* request property for the next client */
+ mei_hbm_prop_req(dev);
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) mei_msg;
+ memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
+ dev->init_clients_timer = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+ mei_hbm_me_cl_allocate(dev);
+ dev->init_clients_state =
+ MEI_CLIENT_PROPERTIES_MESSAGE;
+
+ /* first property reqeust */
+ mei_hbm_prop_req(dev);
+ } else {
+ dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ dev->dev_state = MEI_DEV_DISABLED;
+ dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
+ mei_reset(dev, 1);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req = (struct hbm_client_connect_request *)mei_msg;
+ mei_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case ME_STOP_REQ_CMD:
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
+ dev->wr_ext_msg.data);
+ break;
+ default:
+ BUG();
+ break;
+
+ }
+}
+
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644
index 000000000000..b552afbaf85c
--- /dev/null
+++ b/drivers/misc/mei/hbm.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HBM_H_
+#define _MEI_HBM_H_
+
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+
+static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->me_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+void mei_hbm_start_req(struct mei_device *dev);
+
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
+
+
+#endif /* _MEI_HBM_H_ */
+
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644
index 000000000000..6a203b6e8346
--- /dev/null
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_MEI_REGS_H_
+#define _MEI_HW_MEI_REGS_H_
+
+/*
+ * MEI device IDs
+ */
+#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
+#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
+#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
+#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
+
+#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
+#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
+
+#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
+#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
+#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
+#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
+#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
+
+#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
+
+#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
+
+#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
+
+#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
+#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
+
+#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
+#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
+
+#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
+#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
+#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
+
+#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
+#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
+/*
+ * MEI HW Section
+ */
+
+/* MEI registers */
+/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
+#define H_CB_WW 0
+/* H_CSR - Host Control Status register */
+#define H_CSR 4
+/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
+#define ME_CB_RW 8
+/* ME_CSR_HA - ME Control Status Host Access register (read only) */
+#define ME_CSR_HA 0xC
+
+
+/* register bits of H_CSR (Host Control Status register) */
+/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+#define H_CBD 0xFF000000
+/* Host Circular Buffer Write Pointer */
+#define H_CBWP 0x00FF0000
+/* Host Circular Buffer Read Pointer */
+#define H_CBRP 0x0000FF00
+/* Host Reset */
+#define H_RST 0x00000010
+/* Host Ready */
+#define H_RDY 0x00000008
+/* Host Interrupt Generate */
+#define H_IG 0x00000004
+/* Host Interrupt Status */
+#define H_IS 0x00000002
+/* Host Interrupt Enable */
+#define H_IE 0x00000001
+
+
+/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
+/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
+access to ME_CBD */
+#define ME_CBD_HRA 0xFF000000
+/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
+#define ME_CBWP_HRA 0x00FF0000
+/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
+#define ME_CBRP_HRA 0x0000FF00
+/* ME Reset HRA - host read only access to ME_RST */
+#define ME_RST_HRA 0x00000010
+/* ME Ready HRA - host read only access to ME_RDY */
+#define ME_RDY_HRA 0x00000008
+/* ME Interrupt Generate HRA - host read only access to ME_IG */
+#define ME_IG_HRA 0x00000004
+/* ME Interrupt Status HRA - host read only access to ME_IS */
+#define ME_IS_HRA 0x00000002
+/* ME Interrupt Enable HRA - host read only access to ME_IE */
+#define ME_IE_HRA 0x00000001
+
+#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644
index 000000000000..45ea7185c003
--- /dev/null
+++ b/drivers/misc/mei/hw-me.c
@@ -0,0 +1,576 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+
+#include "hbm.h"
+
+
+/**
+ * mei_reg_read - Reads 32bit data from the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to read the data
+ *
+ * returns register value (u32)
+ */
+static inline u32 mei_reg_read(const struct mei_me_hw *hw,
+ unsigned long offset)
+{
+ return ioread32(hw->mem_addr + offset);
+}
+
+
+/**
+ * mei_reg_write - Writes 32bit data to the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to write the data
+ * @value: register value to write (u32)
+ */
+static inline void mei_reg_write(const struct mei_me_hw *hw,
+ unsigned long offset, u32 value)
+{
+ iowrite32(value, hw->mem_addr + offset);
+}
+
+/**
+ * mei_mecbrw_read - Reads 32bit data from ME circular buffer
+ * read window register
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CB_RW register value (u32)
+ */
+static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+{
+ return mei_reg_read(to_me_hw(dev), ME_CB_RW);
+}
+/**
+ * mei_mecsr_read - Reads 32bit data from the ME CSR
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CSR_HA register value (u32)
+ */
+static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, ME_CSR_HA);
+}
+
+/**
+ * mei_hcsr_read - Reads 32bit data from the host CSR
+ *
+ * @dev: the device structure
+ *
+ * returns H_CSR register value (u32)
+ */
+static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, H_CSR);
+}
+
+/**
+ * mei_hcsr_set - writes H_CSR register to the mei device,
+ * and ignores the H_IS bit for it is write-one-to-zero.
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+{
+ hcsr &= ~H_IS;
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+
+
+/**
+ * me_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ */
+static void mei_me_hw_config(struct mei_device *dev)
+{
+ u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+ /* Doesn't change in runtime */
+ dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+}
+/**
+ * mei_clear_interrupts - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_clear(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ if ((hcsr & H_IS) == H_IS)
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+/**
+ * mei_me_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_enable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr |= H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_disable_interrupts - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_disable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr &= ~H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @interrupts_enabled: if interrupt should be enabled after reset.
+ */
+static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
+
+ hcsr |= (H_RST | H_IG);
+
+ if (intr_enable)
+ hcsr |= H_IE;
+ else
+ hcsr &= ~H_IE;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ hcsr &= ~H_RST;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+}
+
+/**
+ * mei_me_host_set_ready - enable device
+ *
+ * @dev - mei device
+ * returns bool
+ */
+
+static void mei_me_host_set_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state |= H_IE | H_IG | H_RDY;
+ mei_hcsr_set(hw, hw->host_hw_state);
+}
+/**
+ * mei_me_host_is_ready - check whether the host has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_host_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state = mei_hcsr_read(hw);
+ return (hw->host_hw_state & H_RDY) == H_RDY;
+}
+
+/**
+ * mei_me_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->me_hw_state = mei_mecsr_read(hw);
+ return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+}
+
+/**
+ * mei_hbuf_filled_slots - gets number of device filled buffer slots
+ *
+ * @dev: the device structure
+ *
+ * returns number of filled slots
+ */
+static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+
+ hw->host_hw_state = mei_hcsr_read(hw);
+
+ read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
+ write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+
+ return (unsigned char) (write_ptr - read_ptr);
+}
+
+/**
+ * mei_hbuf_is_empty - checks if host buffer is empty.
+ *
+ * @dev: the device structure
+ *
+ * returns true if empty, false - otherwise.
+ */
+static bool mei_me_hbuf_is_empty(struct mei_device *dev)
+{
+ return mei_hbuf_filled_slots(dev) == 0;
+}
+
+/**
+ * mei_me_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ */
+static int mei_me_hbuf_empty_slots(struct mei_device *dev)
+{
+ unsigned char filled_slots, empty_slots;
+
+ filled_slots = mei_hbuf_filled_slots(dev);
+ empty_slots = dev->hbuf_depth - filled_slots;
+
+ /* check for overflow */
+ if (filled_slots > dev->hbuf_depth)
+ return -EOVERFLOW;
+
+ return empty_slots;
+}
+
+static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
+}
+
+
+/**
+ * mei_write_message - writes a message to mei device.
+ *
+ * @dev: the device structure
+ * @header: mei HECI header of message
+ * @buf: message payload will be written
+ *
+ * This function returns -EIO if write has failed
+ */
+static int mei_me_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *header,
+ unsigned char *buf)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long rem, dw_cnt;
+ unsigned long length = header->length;
+ u32 *reg_buf = (u32 *)buf;
+ u32 hcsr;
+ int i;
+ int empty_slots;
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+ empty_slots = mei_hbuf_empty_slots(dev);
+ dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
+
+ dw_cnt = mei_data2slots(length);
+ if (empty_slots < 0 || dw_cnt > empty_slots)
+ return -EIO;
+
+ mei_reg_write(hw, H_CB_WW, *((u32 *) header));
+
+ for (i = 0; i < length / 4; i++)
+ mei_reg_write(hw, H_CB_WW, reg_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ u32 reg = 0;
+ memcpy(&reg, &buf[length - rem], rem);
+ mei_reg_write(hw, H_CB_WW, reg);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ if (!mei_me_hw_is_ready(dev))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mei_me_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ */
+static int mei_me_count_full_read_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+ unsigned char buffer_depth, filled_slots;
+
+ hw->me_hw_state = mei_mecsr_read(hw);
+ buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
+ read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
+ write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+ filled_slots = (unsigned char) (write_ptr - read_ptr);
+
+ /* check for overflow */
+ if (filled_slots > buffer_depth)
+ return -EOVERFLOW;
+
+ dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
+ return (int)filled_slots;
+}
+
+/**
+ * mei_me_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buffer: message buffer will be written
+ * @buffer_length: message size will be read
+ */
+static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 *reg_buf = (u32 *)buffer;
+ u32 hcsr;
+
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_me_mecbrw_read(dev);
+
+ if (buffer_length > 0) {
+ u32 reg = mei_me_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ return 0;
+}
+
+/**
+ * mei_me_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 csr_reg = mei_hcsr_read(hw);
+
+ if ((csr_reg & H_IS) != H_IS)
+ return IRQ_NONE;
+
+ /* clear H_IS bit in H_CSR */
+ mei_reg_write(hw, H_CSR, csr_reg);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_cl_cb complete_list;
+ struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl *cl;
+ s32 slots;
+ int rets;
+ bool bus_message_received;
+
+
+ dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ mei_io_list_init(&complete_list);
+
+ /* Ack the interrupt here
+ * In case of MSI we don't go through the quick handler */
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_clear_interrupts(dev);
+
+ /* check if ME wants a reset */
+ if (!mei_hw_is_ready(dev) &&
+ dev->dev_state != MEI_DEV_RESETING &&
+ dev->dev_state != MEI_DEV_INITIALIZING) {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+
+ /* check if we need to start the dev */
+ if (!mei_host_is_ready(dev)) {
+ if (mei_hw_is_ready(dev)) {
+ dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+
+ mei_host_set_ready(dev);
+
+ dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+ /* link is established * start sending messages. */
+
+ dev->dev_state = MEI_DEV_INIT_CLIENTS;
+
+ mei_hbm_start_req(dev);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ } else {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+ }
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ while (slots > 0) {
+ /* we have urgent data to send so break the read */
+ if (dev->wr_ext_msg.hdr.length)
+ break;
+ dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
+ dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ if (rets)
+ goto end;
+ }
+ rets = mei_irq_write_handler(dev, &complete_list);
+end:
+ dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+ bus_message_received = false;
+ if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
+ dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
+ bus_message_received = true;
+ }
+ mutex_unlock(&dev->device_lock);
+ if (bus_message_received) {
+ dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
+ wake_up_interruptible(&dev->wait_recvd_msg);
+ bus_message_received = false;
+ }
+ if (list_empty(&complete_list.list))
+ return IRQ_HANDLED;
+
+
+ list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+ cl = cb_pos->cl;
+ list_del(&cb_pos->list);
+ if (cl) {
+ if (cl != &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "completing call back.\n");
+ mei_irq_complete_handler(cl, cb_pos);
+ cb_pos = NULL;
+ } else if (cl == &dev->iamthif_cl) {
+ mei_amthif_complete(dev, cb_pos);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+static const struct mei_hw_ops mei_me_hw_ops = {
+
+ .host_set_ready = mei_me_host_set_ready,
+ .host_is_ready = mei_me_host_is_ready,
+
+ .hw_is_ready = mei_me_hw_is_ready,
+ .hw_reset = mei_me_hw_reset,
+ .hw_config = mei_me_hw_config,
+
+ .intr_clear = mei_me_intr_clear,
+ .intr_enable = mei_me_intr_enable,
+ .intr_disable = mei_me_intr_disable,
+
+ .hbuf_free_slots = mei_me_hbuf_empty_slots,
+ .hbuf_is_ready = mei_me_hbuf_is_empty,
+ .hbuf_max_len = mei_me_hbuf_max_len,
+
+ .write = mei_me_write_message,
+
+ .rdbuf_full_slots = mei_me_count_full_read_slots,
+ .read_hdr = mei_me_mecbrw_read,
+ .read = mei_me_read_slots
+};
+
+/**
+ * init_mei_device - allocates and initializes the mei device structure
+ *
+ * @pdev: The pci device structure
+ *
+ * returns The mei_device_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ dev = kzalloc(sizeof(struct mei_device) +
+ sizeof(struct mei_me_hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev);
+
+ INIT_LIST_HEAD(&dev->wd_cl.link);
+ INIT_LIST_HEAD(&dev->iamthif_cl.link);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
+
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+
+ dev->ops = &mei_me_hw_ops;
+
+ dev->pdev = pdev;
+ return dev;
+}
+
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644
index 000000000000..8518d3eeb838
--- /dev/null
+++ b/drivers/misc/mei/hw-me.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+
+#ifndef _MEI_INTERFACE_H_
+#define _MEI_INTERFACE_H_
+
+#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_me_hw {
+ void __iomem *mem_addr;
+ /*
+ * hw states of host and fw(ME)
+ */
+ u32 host_hw_state;
+ u32 me_hw_state;
+};
+
+#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+
+/* get slots (dwords) from a message length + header (bytes) */
+static inline unsigned char mei_data2slots(size_t length)
+{
+ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+}
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+
+#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index be8ca6b333ca..cb2f556b4252 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,109 +31,6 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
-/*
- * Internal Clients Number
- */
-#define MEI_WD_HOST_CLIENT_ID 1
-#define MEI_IAMTHIF_HOST_CLIENT_ID 2
-
-/*
- * MEI device IDs
- */
-#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
-#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
-#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
-#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
-
-#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
-#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
-
-#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
-#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
-#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
-#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
-#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
-
-#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
-
-#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
-
-#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
-
-#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
-#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
-
-#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
-#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
-
-#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
-#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
-#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
-
-#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
-#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
-/*
- * MEI HW Section
- */
-
-/* MEI registers */
-/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
-#define H_CB_WW 0
-/* H_CSR - Host Control Status register */
-#define H_CSR 4
-/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
-#define ME_CB_RW 8
-/* ME_CSR_HA - ME Control Status Host Access register (read only) */
-#define ME_CSR_HA 0xC
-
-
-/* register bits of H_CSR (Host Control Status register) */
-/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
-#define H_CBD 0xFF000000
-/* Host Circular Buffer Write Pointer */
-#define H_CBWP 0x00FF0000
-/* Host Circular Buffer Read Pointer */
-#define H_CBRP 0x0000FF00
-/* Host Reset */
-#define H_RST 0x00000010
-/* Host Ready */
-#define H_RDY 0x00000008
-/* Host Interrupt Generate */
-#define H_IG 0x00000004
-/* Host Interrupt Status */
-#define H_IS 0x00000002
-/* Host Interrupt Enable */
-#define H_IE 0x00000001
-
-
-/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
-/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
-access to ME_CBD */
-#define ME_CBD_HRA 0xFF000000
-/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
-#define ME_CBWP_HRA 0x00FF0000
-/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
-#define ME_CBRP_HRA 0x0000FF00
-/* ME Reset HRA - host read only access to ME_RST */
-#define ME_RST_HRA 0x00000010
-/* ME Ready HRA - host read only access to ME_RDY */
-#define ME_RDY_HRA 0x00000008
-/* ME Interrupt Generate HRA - host read only access to ME_IG */
-#define ME_IG_HRA 0x00000004
-/* ME Interrupt Status HRA - host read only access to ME_IS */
-#define ME_IS_HRA 0x00000002
-/* ME Interrupt Enable HRA - host read only access to ME_IE */
-#define ME_IE_HRA 0x00000001
/*
* MEI Version
@@ -224,6 +121,22 @@ struct mei_bus_message {
u8 data[0];
} __packed;
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct mei_hbm_cl_cmd {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 data;
+};
+
struct hbm_version {
u8 minor_version;
u8 major_version;
@@ -333,11 +246,5 @@ struct hbm_flow_control {
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
} __packed;
-struct mei_me_client {
- struct mei_client_properties props;
- u8 client_id;
- u8 mei_flow_ctrl_creds;
-} __packed;
-
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index a54cd5567ca2..6ec530168afb 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -19,11 +19,11 @@
#include <linux/wait.h>
#include <linux/delay.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
-
-
-/**
- * mei_io_list_flush - removes list entry belonging to cl.
- *
- * @list: An instance of our list structure
- * @cl: private data of the file object
- */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
-{
- struct mei_cl_cb *pos;
- struct mei_cl_cb *next;
-
- list_for_each_entry_safe(pos, next, &list->list, list) {
- if (pos->cl) {
- if (mei_cl_cmp_id(cl, pos->cl))
- list_del(&pos->list);
- }
- }
-}
-/**
- * mei_cl_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-int mei_cl_flush_queues(struct mei_cl *cl)
+void mei_device_init(struct mei_device *dev)
{
- if (!cl || !cl->dev)
- return -EINVAL;
-
- dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
- mei_io_list_flush(&cl->dev->read_list, cl);
- mei_io_list_flush(&cl->dev->write_list, cl);
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
- return 0;
-}
-
-
-
-/**
- * init_mei_device - allocates and initializes the mei device structure
- *
- * @pdev: The pci device structure
- *
- * returns The mei_device_device pointer on success, NULL on failure.
- */
-struct mei_device *mei_device_init(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
- if (!dev)
- return NULL;
-
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
- INIT_LIST_HEAD(&dev->wd_cl.link);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
mei_io_list_init(&dev->ctrl_rd_list);
- mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
- dev->pdev = pdev;
- return dev;
}
/**
@@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
*/
int mei_hw_init(struct mei_device *dev)
{
- int err = 0;
- int ret;
+ int ret = 0;
mutex_lock(&dev->device_lock);
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* acknowledge interrupt and stop interupts */
- if ((dev->host_hw_state & H_IS) == H_IS)
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
+ mei_clear_interrupts(dev);
- /* Doesn't change in runtime */
- dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
+ mei_hw_config(dev);
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
mei_reset(dev, 1);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* wait for ME to turn on ME_RDY */
if (!dev->recvd_msg) {
mutex_unlock(&dev->device_lock);
- err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
+ ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->recvd_msg,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
}
- if (err <= 0 && !dev->recvd_msg) {
+ if (ret <= 0 && !dev->recvd_msg) {
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
"on wait for ME to turn on ME_RDY.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
- if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
- ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev,
- "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
- if (!(dev->host_hw_state & H_RDY))
- dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
- if (!(dev->me_hw_state & ME_RDY_HRA))
- dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
+ if (!mei_host_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "host is not ready.\n");
+ goto err;
+ }
- dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
- ret = -ENODEV;
- goto out;
+ if (!mei_hw_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "ME is not ready.\n");
+ goto err;
}
if (dev->version.major_version != HBM_MAJOR_VERSION ||
dev->version.minor_version != HBM_MINOR_VERSION) {
dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
- dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
- dev_dbg(&dev->pdev->dev, "MEI start success.\n");
- ret = 0;
-out:
mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
- */
-static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
-{
- dev->host_hw_state |= (H_RST | H_IG);
-
- if (interrupts_enabled)
- mei_enable_interrupts(dev);
- else
- mei_disable_interrupts(dev);
+ return 0;
+err:
+ dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ mutex_unlock(&dev->device_lock);
+ return -ENODEV;
}
/**
@@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
*/
void mei_reset(struct mei_device *dev, int interrupts_enabled)
{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
bool unexpected;
- if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->need_reset = true;
+ if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
return;
- }
unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN &&
dev->dev_state != MEI_DEV_POWER_UP);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
mei_hw_reset(dev, interrupts_enabled);
- dev->host_hw_state &= ~H_RST;
- dev->host_hw_state |= H_IG;
-
- mei_hcsr_set(dev);
-
- dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
- dev->need_reset = false;
if (dev->dev_state != MEI_DEV_INITIALIZING) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETING;
- list_for_each_entry_safe(cl_pos,
- cl_next, &dev->file_list, link) {
- cl_pos->state = MEI_FILE_DISCONNECTED;
- cl_pos->mei_flow_ctrl_creds = 0;
- cl_pos->read_cb = NULL;
- cl_pos->timer_count = 0;
- }
+ mei_cl_all_disconnect(dev);
+
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
-
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
+ mei_cl_unlink(&dev->wd_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
mei_amthif_reset_params(dev);
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
- /* update the state of the registers after reset */
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
if (unexpected)
dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
- /* Wake up all readings so they can be interrupted */
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (waitqueue_active(&cl_pos->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up client!\n");
- wake_up_interruptible(&cl_pos->rx_wait);
- }
- }
- /* remove all waiting requests */
- list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
- list_del(&cb_pos->list);
- mei_io_cb_free(cb_pos);
- }
-}
-
-
-
-/**
- * host_start_message - mei host sends start message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_start_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_version_request *start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- /* host start message */
- start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
- memset(start_req, 0, len);
- start_req->hbm_cmd = HOST_START_REQ_CMD;
- start_req->host_version.major_version = HBM_MAJOR_VERSION;
- start_req->host_version.minor_version = HBM_MINOR_VERSION;
-
- dev->recvd_msg = false;
- if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
- dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_START_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return ;
-}
-
-/**
- * host_enum_clients_message - host sends enumeration client request message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_enum_clients_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_enum_request *enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
- /* enumerate clients */
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
- memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
- enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return;
-}
-
-
-/**
- * allocate_me_clients_storage - allocates storage for me clients
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_allocate_me_clients_storage(struct mei_device *dev)
-{
- struct mei_me_client *clients;
- int b;
-
- /* count how many ME clients we have */
- for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
- dev->me_clients_num++;
-
- if (dev->me_clients_num <= 0)
- return ;
-
-
- if (dev->me_clients != NULL) {
- kfree(dev->me_clients);
- dev->me_clients = NULL;
- }
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
- dev->me_clients_num * sizeof(struct mei_me_client));
- /* allocate storage for ME clients representation */
- clients = kcalloc(dev->me_clients_num,
- sizeof(struct mei_me_client), GFP_KERNEL);
- if (!clients) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- return ;
- }
- dev->me_clients = clients;
- return ;
-}
-
-void mei_host_client_init(struct work_struct *work)
-{
- struct mei_device *dev = container_of(work,
- struct mei_device, init_work);
- struct mei_client_properties *client_props;
- int i;
-
- mutex_lock(&dev->device_lock);
-
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
-
- /*
- * Reserving the first three client IDs
- * 0: Reserved for MEI Bus Message communications
- * 1: Reserved for Watchdog
- * 2: Reserved for AMTHI
- */
- bitmap_set(dev->host_clients_map, 0, 3);
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client_props = &dev->me_clients[i].props;
-
- if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
- mei_amthif_host_init(dev);
- else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
- mei_wd_host_init(dev);
- }
-
- dev->dev_state = MEI_DEV_ENABLED;
-
- mutex_unlock(&dev->device_lock);
-}
-
-int mei_host_client_enumerate(struct mei_device *dev)
-{
-
- struct mei_msg_hdr *mei_hdr;
- struct hbm_props_request *prop_req;
- const size_t len = sizeof(struct hbm_props_request);
- unsigned long next_client_index;
- u8 client_num;
-
-
- client_num = dev->me_client_presentation_num;
-
- next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
- dev->me_client_index);
-
- /* We got all client properties */
- if (next_client_index == MEI_CLIENTS_MAX) {
- schedule_work(&dev->init_work);
-
- return 0;
- }
-
- dev->me_clients[client_num].client_id = next_client_index;
- dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
- prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
- memset(prop_req, 0, sizeof(struct hbm_props_request));
-
-
- prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->address = next_client_index;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
- mei_hdr->length)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_err(&dev->pdev->dev, "Properties request command failed\n");
- mei_reset(dev, 1);
-
- return -EIO;
- }
-
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- dev->me_client_index = next_client_index;
-
- return 0;
-}
-
-/**
- * mei_init_file_private - initializes private file structure.
- *
- * @priv: private file structure to be initialized
- * @file: the file structure
- */
-void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
-{
- memset(priv, 0, sizeof(struct mei_cl));
- init_waitqueue_head(&priv->wait);
- init_waitqueue_head(&priv->rx_wait);
- init_waitqueue_head(&priv->tx_wait);
- INIT_LIST_HEAD(&priv->link);
- priv->reading_state = MEI_IDLE;
- priv->writing_state = MEI_IDLE;
- priv->dev = dev;
-}
-
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
-{
- int i, res = -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; ++i)
- if (uuid_le_cmp(*cuuid,
- dev->me_clients[i].props.protocol_name) == 0) {
- res = i;
- break;
- }
-
- return res;
-}
-
-
-/**
- * mei_me_cl_link - create link between host and me clinet and add
- * me_cl to the list
- *
- * @dev: the device structure
- * @cl: link between me and host client assocated with opened file descriptor
- * @cuuid: uuid of ME client
- * @client_id: id of the host client
- *
- * returns ME client index if ME client
- * -EINVAL on incorrect values
- * -ENONET if client not found
- */
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cuuid, u8 host_cl_id)
-{
- int i;
-
- if (!dev || !cl || !cuuid)
- return -EINVAL;
-
- /* check for valid client id */
- i = mei_me_cl_by_uuid(dev, cuuid);
- if (i >= 0) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- cl->host_client_id = host_cl_id;
-
- list_add_tail(&cl->link, &dev->file_list);
- return (u8)i;
- }
-
- return -ENOENT;
-}
-/**
- * mei_me_cl_unlink - remove me_cl from the list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl *pos, *next;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (cl->host_client_id == pos->host_client_id) {
- dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
- pos->host_client_id, pos->me_client_id);
- list_del_init(&pos->link);
- break;
- }
- }
-}
+ /* wake up all readings so they can be interrupted */
+ mei_cl_all_read_wakeup(dev);
-/**
- * mei_alloc_file_private - allocates a private file structure and sets it up.
- * @file: the file structure
- *
- * returns The allocated file or NULL on failure
- */
-struct mei_cl *mei_cl_allocate(struct mei_device *dev)
-{
- struct mei_cl *cl;
-
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
- if (!cl)
- return NULL;
-
- mei_cl_init(cl, dev);
-
- return cl;
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
}
-/**
- * mei_disconnect_host_client - sends disconnect message to fw from host client.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets, err;
-
- if (!dev || !cl)
- return -ENODEV;
-
- if (cl->state != MEI_FILE_DISCONNECTING)
- return 0;
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- cb->fop_type = MEI_FOP_CLOSE;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_disconnect(dev, cl)) {
- rets = -ENODEV;
- dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
- goto free;
- }
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
- }
- mutex_unlock(&dev->device_lock);
-
- err = wait_event_timeout(dev->wait_recvd_msg,
- MEI_FILE_DISCONNECTED == cl->state,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_DISCONNECTED == cl->state) {
- rets = 0;
- dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
- } else {
- rets = -ENODEV;
- if (MEI_FILE_DISCONNECTED != cl->state)
- dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
-
- if (err)
- dev_dbg(&dev->pdev->dev,
- "wait failed disconnect err=%08x\n",
- err);
-
- dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
- }
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
- mei_io_cb_free(cb);
- return rets;
-}
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644
index 8de854785960..000000000000
--- a/drivers/misc/mei/interface.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/pci.h>
-#include "mei_dev.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-
-
-/**
- * mei_set_csr_register - writes H_CSR register to the mei device,
- * and ignores the H_IS bit for it is write-one-to-zero.
- *
- * @dev: the device structure
- */
-void mei_hcsr_set(struct mei_device *dev)
-{
- if ((dev->host_hw_state & H_IS) == H_IS)
- dev->host_hw_state &= ~H_IS;
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
- dev->host_hw_state = mei_hcsr_read(dev);
-}
-
-/**
- * mei_csr_enable_interrupts - enables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_enable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state |= H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_csr_disable_interrupts - disables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_disable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state &= ~H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_hbuf_filled_slots - gets number of device filled buffer slots
- *
- * @device: the device structure
- *
- * returns number of filled slots
- */
-static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
-
- dev->host_hw_state = mei_hcsr_read(dev);
-
- read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
- write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
-
- return (unsigned char) (write_ptr - read_ptr);
-}
-
-/**
- * mei_hbuf_is_empty - checks if host buffer is empty.
- *
- * @dev: the device structure
- *
- * returns true if empty, false - otherwise.
- */
-bool mei_hbuf_is_empty(struct mei_device *dev)
-{
- return mei_hbuf_filled_slots(dev) == 0;
-}
-
-/**
- * mei_hbuf_empty_slots - counts write empty slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
- */
-int mei_hbuf_empty_slots(struct mei_device *dev)
-{
- unsigned char filled_slots, empty_slots;
-
- filled_slots = mei_hbuf_filled_slots(dev);
- empty_slots = dev->hbuf_depth - filled_slots;
-
- /* check for overflow */
- if (filled_slots > dev->hbuf_depth)
- return -EOVERFLOW;
-
- return empty_slots;
-}
-
-/**
- * mei_write_message - writes a message to mei device.
- *
- * @dev: the device structure
- * @header: header of message
- * @write_buffer: message buffer will be written
- * @write_length: message size will be written
- *
- * This function returns -EIO if write has failed
- */
-int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
- unsigned char *buf, unsigned long length)
-{
- unsigned long rem, dw_cnt;
- u32 *reg_buf = (u32 *)buf;
- int i;
- int empty_slots;
-
-
- dev_dbg(&dev->pdev->dev,
- "mei_write_message header=%08x.\n",
- *((u32 *) header));
-
- empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
-
- dw_cnt = mei_data2slots(length);
- if (empty_slots < 0 || dw_cnt > empty_slots)
- return -EIO;
-
- mei_reg_write(dev, H_CB_WW, *((u32 *) header));
-
- for (i = 0; i < length / 4; i++)
- mei_reg_write(dev, H_CB_WW, reg_buf[i]);
-
- rem = length & 0x3;
- if (rem > 0) {
- u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
- mei_reg_write(dev, H_CB_WW, reg);
- }
-
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
- return -EIO;
-
- return 0;
-}
-
-/**
- * mei_count_full_read_slots - counts read full slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
- */
-int mei_count_full_read_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
- unsigned char buffer_depth, filled_slots;
-
- dev->me_hw_state = mei_mecsr_read(dev);
- buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
- read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
- write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
- filled_slots = (unsigned char) (write_ptr - read_ptr);
-
- /* check for overflow */
- if (filled_slots > buffer_depth)
- return -EOVERFLOW;
-
- dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
- return (int)filled_slots;
-}
-
-/**
- * mei_read_slots - reads a message from mei device.
- *
- * @dev: the device structure
- * @buffer: message buffer will be written
- * @buffer_length: message size will be read
- */
-void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
- unsigned long buffer_length)
-{
- u32 *reg_buf = (u32 *)buffer;
-
- for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
- *reg_buf++ = mei_mecbrw_read(dev);
-
- if (buffer_length > 0) {
- u32 reg = mei_mecbrw_read(dev);
- memcpy(reg_buf, &reg, buffer_length);
- }
-
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_flow_ctrl_creds - checks flow_control credentials.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- * -ENOENT if mei_cl is not present
- * -EINVAL if single_recv_buf == 0
- */
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return 0;
-
- if (cl->mei_flow_ctrl_creds > 0)
- return 1;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->mei_flow_ctrl_creds) {
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
- } else {
- return 0;
- }
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_flow_ctrl_reduce - reduces flow_control.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- * @returns
- * 0 on success
- * -ENOENT when me client is not found
- * -EINVAL when ctrl credits are <= 0
- */
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->props.single_recv_buf != 0) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- dev->me_clients[i].mei_flow_ctrl_creds--;
- } else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- cl->mei_flow_ctrl_creds--;
- }
- return 0;
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_send_flow_control - sends flow control to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_flow_control *flow_ctrl;
- const size_t len = sizeof(struct hbm_flow_control);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
- memset(flow_ctrl, 0, len);
- flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
- flow_ctrl->host_addr = cl->host_client_id;
- flow_ctrl->me_addr = cl->me_client_id;
- /* FIXME: reserved !? */
- memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
- dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
-
- return mei_write_message(dev, mei_hdr,
- (unsigned char *) flow_ctrl, len);
-}
-
-/**
- * mei_other_client_is_connecting - checks if other
- * client with the same client id is connected.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if other client is connected, 0 - otherwise.
- */
-int mei_other_client_is_connecting(struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if ((cl_pos->state == MEI_FILE_CONNECTING) &&
- (cl_pos != cl) &&
- cl->me_client_id == cl_pos->me_client_id)
- return 1;
-
- }
- return 0;
-}
-
-/**
- * mei_disconnect - sends disconnect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
- memset(req, 0, len);
- req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
-}
-
-/**
- * mei_connect - sends connect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_connect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
- req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
-}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644
index ec6c785a3961..000000000000
--- a/drivers/misc/mei/interface.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-
-#ifndef _MEI_INTERFACE_H_
-#define _MEI_INTERFACE_H_
-
-#include <linux/mei.h>
-#include "mei_dev.h"
-
-
-
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer,
- unsigned long buffer_length);
-
-int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length);
-
-bool mei_hbuf_is_empty(struct mei_device *dev);
-
-int mei_hbuf_empty_slots(struct mei_device *dev);
-
-static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
-{
- return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
-
-/* get slots (dwords) from a message length + header (bytes) */
-static inline unsigned char mei_data2slots(size_t length)
-{
- return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
-}
-
-int mei_count_full_read_slots(struct mei_device *dev);
-
-
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
-
-
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
-/*
- * mei_watchdog_register - Registering watchdog interface
- * once we got connection to the WD Client
- * @dev - mei device
- */
-void mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister - Unregistering watchdog interface
- * @dev - mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
-int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
-int mei_connect(struct mei_device *dev, struct mei_cl *cl);
-
-#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 04fa2134615e..3535b2676c97 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,41 +21,21 @@
#include <linux/fs.h>
#include <linux/jiffies.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "hw.h"
-#include "interface.h"
-
-
-/**
- * mei_interrupt_quick_handler - The ISR of the MEI device
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- u32 csr_reg = mei_hcsr_read(dev);
-
- if ((csr_reg & H_IS) != H_IS)
- return IRQ_NONE;
- /* clear H_IS bit in H_CSR */
- mei_reg_write(dev, H_CSR, csr_reg);
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
- return IRQ_WAKE_THREAD;
-}
/**
- * _mei_cmpl - processes completed operation.
+ * mei_complete_handler - processes completed operation.
*
* @cl: private data of the file object.
* @cb_pos: callback block.
*/
-static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
if (cb_pos->fop_type == MEI_FOP_WRITE) {
mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@ quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
- *(u32 *) dev->rd_msg_buf);
+ dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(mei_hdr));
}
return 0;
@@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_disconnect(dev, cl)) {
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
return 0;
}
-/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
- struct hbm_client_connect_response *rs)
-{
-
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
- if (!rs->status) {
- cl->state = MEI_FILE_CONNECTED;
- cl->status = 0;
-
- } else {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = -ENODEV;
- }
- cl->timer_count = 0;
-
- return true;
- }
- return false;
-}
-
-/**
- * mei_client_connect_response - connects to response irq routine
- *
- * @dev: the device structure
- * @rs: connect response bus message
- */
-static void mei_client_connect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
-
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "connect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- /* if WD or iamthif client treat specially */
-
- if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- mei_watchdog_register(dev);
-
- return;
- }
-
- if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- return;
- }
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-
- cl = pos->cl;
- if (!cl) {
- list_del(&pos->list);
- return;
- }
- if (pos->fop_type == MEI_FOP_IOCTL) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&pos->list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
- }
-}
-
-/**
- * mei_client_disconnect_response - disconnects from response irq routine
- *
- * @dev: the device structure
- * @rs: disconnect response bus message
- */
-static void mei_client_disconnect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "disconnect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = pos->cl;
-
- if (!cl) {
- list_del(&pos->list);
- return;
- }
-
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
-
- list_del(&pos->list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
-
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
-}
-
-/**
- * same_flow_addr - tells if they have the same address.
- *
- * @file: private data of the file object.
- * @flow: flow control.
- *
- * returns !=0, same; 0,not.
- */
-static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
-{
- return (cl->host_client_id == flow->host_addr &&
- cl->me_client_id == flow->me_addr);
-}
-
-/**
- * add_single_flow_creds - adds single buffer credentials.
- *
- * @file: private data ot the file object.
- * @flow: flow control.
- */
-static void add_single_flow_creds(struct mei_device *dev,
- struct hbm_flow_control *flow)
-{
- struct mei_me_client *client;
- int i;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client = &dev->me_clients[i];
- if (client && flow->me_addr == client->client_id) {
- if (client->props.single_recv_buf) {
- client->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
- flow->me_addr);
- dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
- client->mei_flow_ctrl_creds);
- } else {
- BUG(); /* error in flow control */
- }
- }
- }
-}
-
-/**
- * mei_client_flow_control_response - flow control response irq routine
- *
- * @dev: the device structure
- * @flow_control: flow control response bus message
- */
-static void mei_client_flow_control_response(struct mei_device *dev,
- struct hbm_flow_control *flow_control)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- if (!flow_control->host_addr) {
- /* single receive buffer */
- add_single_flow_creds(dev, flow_control);
- } else {
- /* normal connection */
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
-
- dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- if (same_flow_addr(cl_pos, flow_control)) {
- dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- cl_pos->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
- cl_pos->mei_flow_ctrl_creds);
- break;
- }
- }
- }
-}
-
-/**
- * same_disconn_addr - tells if they have the same address
- *
- * @file: private data of the file object.
- * @disconn: disconnection request.
- *
- * returns !=0, same; 0,not.
- */
-static int same_disconn_addr(struct mei_cl *cl,
- struct hbm_client_connect_request *req)
-{
- return (cl->host_client_id == req->host_addr &&
- cl->me_client_id == req->me_addr);
-}
-
-/**
- * mei_client_disconnect_request - disconnects from request irq routine
- *
- * @dev: the device structure.
- * @disconnect_req: disconnect request bus message.
- */
-static void mei_client_disconnect_request(struct mei_device *dev,
- struct hbm_client_connect_request *disconnect_req)
-{
- struct hbm_client_connect_response *disconnect_res;
- struct mei_cl *pos, *next;
- const size_t len = sizeof(struct hbm_client_connect_response);
-
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (same_disconn_addr(pos, disconnect_req)) {
- dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
- disconnect_req->host_addr,
- disconnect_req->me_addr);
- pos->state = MEI_FILE_DISCONNECTED;
- pos->timer_count = 0;
- if (pos == &dev->wd_cl)
- dev->wd_pending = false;
- else if (pos == &dev->iamthif_cl)
- dev->iamthif_timer = 0;
-
- /* prepare disconnect response */
- (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- disconnect_res =
- (struct hbm_client_connect_response *)
- &dev->wr_ext_msg.data;
- disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
- disconnect_res->host_addr = pos->host_client_id;
- disconnect_res->me_addr = pos->me_client_id;
- disconnect_res->status = 0;
- break;
- }
- }
-}
-
-/**
- * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
- * handle the read bus message cmd processing.
- *
- * @dev: the device structure
- * @mei_hdr: header of bus message
- */
-static void mei_irq_thread_read_bus_message(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr)
-{
- struct mei_bus_message *mei_msg;
- struct mei_me_client *me_client;
- struct hbm_host_version_response *version_res;
- struct hbm_client_connect_response *connect_res;
- struct hbm_client_connect_response *disconnect_res;
- struct hbm_client_connect_request *disconnect_req;
- struct hbm_flow_control *flow_control;
- struct hbm_props_response *props_res;
- struct hbm_host_enum_response *enum_res;
- struct hbm_host_stop_request *stop_req;
-
- /* read the message to our buffer */
- BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
-
- switch (mei_msg->hbm_cmd) {
- case HOST_START_RES_CMD:
- version_res = (struct hbm_host_version_response *) mei_msg;
- if (version_res->host_version_supported) {
- dev->version.major_version = HBM_MAJOR_VERSION;
- dev->version.minor_version = HBM_MINOR_VERSION;
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_START_MESSAGE) {
- dev->init_clients_timer = 0;
- mei_host_enum_clients_message(dev);
- } else {
- dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- } else {
- u32 *buf = dev->wr_msg_buf;
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- dev->version = version_res->me_max_version;
-
- /* send stop message */
- mei_hdr = mei_hbm_hdr(&buf[0], len);
- stop_req = (struct hbm_host_stop_request *)&buf[1];
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
-
- mei_write_message(dev, mei_hdr,
- (unsigned char *)stop_req, len);
- dev_dbg(&dev->pdev->dev, "version mismatch.\n");
- return;
- }
-
- dev->recvd_msg = true;
- dev_dbg(&dev->pdev->dev, "host start response message received.\n");
- break;
-
- case CLIENT_CONNECT_RES_CMD:
- connect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_connect_response(dev, connect_res);
- dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case CLIENT_DISCONNECT_RES_CMD:
- disconnect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
- dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case MEI_FLOW_CONTROL_CMD:
- flow_control = (struct hbm_flow_control *) mei_msg;
- mei_client_flow_control_response(dev, flow_control);
- dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
- break;
-
- case HOST_CLIENT_PROPERTIES_RES_CMD:
- props_res = (struct hbm_props_response *)mei_msg;
- me_client = &dev->me_clients[dev->me_client_presentation_num];
-
- if (props_res->status || !dev->me_clients) {
- dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
- mei_reset(dev, 1);
- return;
- }
-
- if (me_client->client_id != props_res->address) {
- dev_err(&dev->pdev->dev,
- "Host client properties reply mismatch\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
- dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
- dev_err(&dev->pdev->dev,
- "Unexpected client properties reply\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- me_client->props = props_res->client_properties;
- dev->me_client_index++;
- dev->me_client_presentation_num++;
-
- mei_host_client_enumerate(dev);
-
- break;
-
- case HOST_ENUM_RES_CMD:
- enum_res = (struct hbm_host_enum_response *) mei_msg;
- memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
- dev->init_clients_timer = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
- mei_allocate_me_clients_storage(dev);
- dev->init_clients_state =
- MEI_CLIENT_PROPERTIES_MESSAGE;
-
- mei_host_client_enumerate(dev);
- } else {
- dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- break;
-
- case HOST_STOP_RES_CMD:
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
- mei_reset(dev, 1);
- break;
-
- case CLIENT_DISCONNECT_REQ_CMD:
- /* search for client */
- disconnect_req = (struct hbm_client_connect_request *)mei_msg;
- mei_client_disconnect_request(dev, disconnect_req);
- break;
-
- case ME_STOP_REQ_CMD:
- {
- /* prepare stop request: sent in next interrupt event */
-
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
- break;
- }
- default:
- BUG();
- break;
-
- }
-}
-
/**
* _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
}
cl->state = MEI_FILE_CONNECTING;
- *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_connect(dev, cl)) {
+ *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
+ if (mei_hbm_cl_connect_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_del(&cb_pos->list);
@@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = cb->request_buffer.size - cb->buf_idx;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
@@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- cb->request_buffer.data + cb->buf_idx, len)) {
+ if (mei_write_message(dev, &mei_hdr,
+ cb->request_buffer.data + cb->buf_idx)) {
cl->status = -ENODEV;
list_move_tail(&cb->list, &cmpl_list->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
cl->status = 0;
- cb->buf_idx += mei_hdr->length;
- if (mei_hdr->msg_complete)
+ cb->buf_idx += mei_hdr.length;
+ if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
@@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
* mei_irq_thread_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
- * @cmpl_list: An instance of our list structure
* @dev: the device structure
+ * @cmpl_list: An instance of our list structure
* @slots: slots to read.
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
- struct mei_device *dev,
- s32 *slots)
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
int ret = 0;
if (!dev->rd_msg_hdr) {
- dev->rd_msg_hdr = mei_mecbrw_read(dev);
+ dev->rd_msg_hdr = mei_read_hdr(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
- mei_irq_thread_read_bus_message(dev, mei_hdr);
+ mei_hbm_dispatch(dev, mei_hdr);
dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
- mei_hdr->length);
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
if (ret)
goto end;
-
} else {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@ end:
/**
- * mei_irq_thread_write_handler - bottom half write routine after
- * ISR to handle the write processing.
+ * mei_irq_write_handler - dispatch write requests
+ * after irq received
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_write_handler(struct mei_device *dev,
+int mei_irq_write_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list)
{
@@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
s32 slots;
int ret;
- if (!mei_hbuf_is_empty(dev)) {
+ if (!mei_hbuf_is_ready(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
@@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
if (dev->wr_ext_msg.hdr.length) {
mei_write_message(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+ dev->wr_ext_msg.data);
slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
dev->wr_ext_msg.hdr.length = 0;
}
if (dev->dev_state == MEI_DEV_ENABLED) {
if (dev->wd_pending &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
+ else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
return -ENODEV;
dev->wd_pending = false;
@@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
break;
case MEI_FOP_IOCTL:
/* connect message */
- if (mei_other_client_is_connecting(dev, cl))
+ if (mei_cl_is_other_connecting(cl))
continue;
ret = _mei_irq_thread_ioctl(dev, &slots, pos,
cl, cmpl_list);
@@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
cl = pos->cl;
if (cl == NULL)
continue;
- if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+ if (mei_cl_flow_ctrl_creds(cl) <= 0) {
dev_dbg(&dev->pdev->dev,
"No flow control credentials for client %d, not sending.\n",
cl->host_client_id);
@@ -1123,115 +665,3 @@ out:
mutex_unlock(&dev->device_lock);
}
-/**
- * mei_interrupt_thread_handler - function called after ISR to handle the interrupt
- * processing.
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- *
- */
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_cl_cb complete_list;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
- struct mei_cl *cl;
- s32 slots;
- int rets;
- bool bus_message_received;
-
-
- dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
- /* initialize our complete list */
- mutex_lock(&dev->device_lock);
- mei_io_list_init(&complete_list);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- /* Ack the interrupt here
- * In case of MSI we don't go through the quick handler */
- if (pci_dev_msi_enabled(dev->pdev))
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
-
- dev->me_hw_state = mei_mecsr_read(dev);
-
- /* check if ME wants a reset */
- if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
- dev->dev_state != MEI_DEV_RESETING &&
- dev->dev_state != MEI_DEV_INITIALIZING) {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
-
- /* check if we need to start the dev */
- if ((dev->host_hw_state & H_RDY) == 0) {
- if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
- dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
- dev->host_hw_state |= (H_IE | H_IG | H_RDY);
- mei_hcsr_set(dev);
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
- dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
- /* link is established
- * start sending messages.
- */
- mei_host_start_message(dev);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- } else {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
- }
- /* check slots available for reading */
- slots = mei_count_full_read_slots(dev);
- while (slots > 0) {
- /* we have urgent data to send so break the read */
- if (dev->wr_ext_msg.hdr.length)
- break;
- dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
- dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
- rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
- if (rets)
- goto end;
- }
- rets = mei_irq_thread_write_handler(dev, &complete_list);
-end:
- dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
-
- bus_message_received = false;
- if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
- dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
- bus_message_received = true;
- }
- mutex_unlock(&dev->device_lock);
- if (bus_message_received) {
- dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
- wake_up_interruptible(&dev->wait_recvd_msg);
- bus_message_received = false;
- }
- if (list_empty(&complete_list.list))
- return IRQ_HANDLED;
-
-
- list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
- cl = cb_pos->cl;
- list_del(&cb_pos->list);
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "completing call back.\n");
- _mei_cmpl(cl, cb_pos);
- cb_pos = NULL;
- } else if (cl == &dev->iamthif_cl) {
- mei_amthif_complete(dev, cb_pos);
- }
- }
- }
- return IRQ_HANDLED;
-}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644
index eb93a1b53b9b..000000000000
--- a/drivers/misc/mei/iorw.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/aio.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-
-
-#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-/**
- * mei_io_cb_free - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_io_cb_free(struct mei_cl_cb *cb)
-{
- if (cb == NULL)
- return;
-
- kfree(cb->request_buffer.data);
- kfree(cb->response_buffer.data);
- kfree(cb);
-}
-/**
- * mei_io_cb_init - allocate and initialize io callback
- *
- * @cl - mei client
- * @file: pointer to file structure
- *
- * returns mei_cl_cb pointer or NULL;
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
-{
- struct mei_cl_cb *cb;
-
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
- if (!cb)
- return NULL;
-
- mei_io_list_init(cb);
-
- cb->file_object = fp;
- cb->cl = cl;
- cb->buf_idx = 0;
- return cb;
-}
-
-
-/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->request_buffer.data)
- return -ENOMEM;
- cb->request_buffer.size = length;
- return 0;
-}
-/**
- * mei_io_cb_alloc_req_buf - allocate respose buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->response_buffer.data)
- return -ENOMEM;
- cb->response_buffer.size = length;
- return 0;
-}
-
-
-/**
- * mei_me_cl_by_id return index to me_clients for client_id
- *
- * @dev: the device structure
- * @client_id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns index on success, -ENOENT on failure.
- */
-
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
-{
- int i;
- for (i = 0; i < dev->me_clients_num; i++)
- if (dev->me_clients[i].client_id == client_id)
- break;
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
- return -ENOENT;
-
- if (i == dev->me_clients_num)
- return -ENOENT;
-
- return i;
-}
-
-/**
- * mei_ioctl_connect_client - the connect to fw client IOCTL function
- *
- * @dev: the device structure
- * @data: IOCTL connect data, input and output parameters
- * @file: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data)
-{
- struct mei_device *dev;
- struct mei_cl_cb *cb;
- struct mei_client *client;
- struct mei_cl *cl;
- long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
- int i;
- int err;
- int rets;
-
- cl = file->private_data;
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
-
- dev = cl->dev;
-
- dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
- /* buffered ioctl cb */
- cb = mei_io_cb_init(cl, file);
- if (!cb) {
- rets = -ENOMEM;
- goto end;
- }
-
- cb->fop_type = MEI_FOP_IOCTL;
-
- if (dev->dev_state != MEI_DEV_ENABLED) {
- rets = -ENODEV;
- goto end;
- }
- if (cl->state != MEI_FILE_INITIALIZING &&
- cl->state != MEI_FILE_DISCONNECTED) {
- rets = -EBUSY;
- goto end;
- }
-
- /* find ME client we're trying to connect to */
- i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- }
-
- dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
- cl->me_client_id);
- dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
- dev->me_clients[i].props.protocol_version);
- dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
- dev->me_clients[i].props.max_msg_length);
-
- /* if we're connecting to amthi client then we will use the
- * existing connection
- */
- if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
- dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
- if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
- rets = -ENODEV;
- goto end;
- }
- clear_bit(cl->host_client_id, dev->host_clients_map);
- mei_me_cl_unlink(dev, cl);
-
- kfree(cl);
- cl = NULL;
- file->private_data = &dev->iamthif_cl;
-
- client = &data->out_client_properties;
- client->max_msg_length =
- dev->me_clients[i].props.max_msg_length;
- client->protocol_version =
- dev->me_clients[i].props.protocol_version;
- rets = dev->iamthif_cl.status;
-
- goto end;
- }
-
- if (cl->state != MEI_FILE_CONNECTING) {
- rets = -ENODEV;
- goto end;
- }
-
-
- /* prepare the output buffer */
- client = &data->out_client_properties;
- client->max_msg_length = dev->me_clients[i].props.max_msg_length;
- client->protocol_version = dev->me_clients[i].props.protocol_version;
- dev_dbg(&dev->pdev->dev, "Can connect?\n");
- if (dev->mei_host_buffer_is_empty
- && !mei_other_client_is_connecting(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
- dev->mei_host_buffer_is_empty = false;
- if (mei_connect(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
- rets = -ENODEV;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- }
-
-
- } else {
- dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
- dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- mutex_unlock(&dev->device_lock);
- err = wait_event_timeout(dev->wait_recvd_msg,
- (MEI_FILE_CONNECTED == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state), timeout);
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_CONNECTED == cl->state) {
- dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
- rets = cl->status;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
- cl->state);
- if (!err) {
- dev_dbg(&dev->pdev->dev,
- "wait_event_interruptible_timeout failed on client"
- " connect message fw response message.\n");
- }
- rets = -EFAULT;
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
- goto end;
- }
- rets = 0;
-end:
- dev_dbg(&dev->pdev->dev, "free connect cb memory.");
- mei_io_cb_free(cb);
- return rets;
-}
-
-/**
- * mei_start_read - the start read client message function.
- *
- * @dev: the device structure
- * @if_num: minor number
- * @cl: private data of the file object
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets;
- int i;
-
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
-
- if (dev->dev_state != MEI_DEV_ENABLED)
- return -ENODEV;
-
- if (cl->read_pending || cl->read_cb) {
- dev_dbg(&dev->pdev->dev, "read is pending.\n");
- return -EBUSY;
- }
- i = mei_me_cl_by_id(dev, cl->me_client_id);
- if (i < 0) {
- dev_err(&dev->pdev->dev, "no such me client %d\n",
- cl->me_client_id);
- return -ENODEV;
- }
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- rets = mei_io_cb_alloc_resp_buf(cb,
- dev->me_clients[i].props.max_msg_length);
- if (rets)
- goto err;
-
- cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_send_flow_control(dev, cl)) {
- rets = -ENODEV;
- goto err;
- }
- list_add_tail(&cb->list, &dev->read_list.list);
- } else {
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- return rets;
-err:
- mei_io_cb_free(cb);
- return rets;
-}
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 43fb52ff98ad..903f809b21f7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -37,79 +37,11 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "interface.h"
-
-/* AMT device is a singleton on the platform */
-static struct pci_dev *mei_pdev;
-
-/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
-
- /* required last entry */
- {0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
-static DEFINE_MUTEX(mei_mutex);
-
-
-/**
- * find_read_list_entry - find read list entry
- *
- * @dev: device structure
- * @file: pointer to file structure
- *
- * returns cb on success, NULL on error
- */
-static struct mei_cl_cb *find_read_list_entry(
- struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
-
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
- if (mei_cl_cmp_id(cl, pos->cl))
- return pos;
- return NULL;
-}
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
/**
* mei_open - the open function
@@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
*/
static int mei_open(struct inode *inode, struct file *file)
{
+ struct miscdevice *misc = file->private_data;
+ struct pci_dev *pdev;
struct mei_cl *cl;
struct mei_device *dev;
- unsigned long cl_id;
+
int err;
err = -ENODEV;
- if (!mei_pdev)
+ if (!misc->parent)
goto out;
- dev = pci_get_drvdata(mei_pdev);
+ pdev = container_of(misc->parent, struct pci_dev, dev);
+
+ dev = pci_get_drvdata(pdev);
if (!dev)
goto out;
@@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
goto out_unlock;
}
- cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
- if (cl_id >= MEI_CLIENTS_MAX) {
- dev_err(&dev->pdev->dev, "client_id exceded %d",
- MEI_CLIENTS_MAX) ;
+ err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
+ if (err)
goto out_unlock;
- }
-
- cl->host_client_id = cl_id;
-
- dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
-
- dev->open_handle_count++;
-
- list_add_tail(&cl->link, &dev->file_list);
-
- set_bit(cl->host_client_id, dev->host_clients_map);
- cl->state = MEI_FILE_INITIALIZING;
- cl->sm_state = 0;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
"ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
- rets = mei_disconnect_host_client(dev, cl);
+ rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
- mei_me_cl_unlink(dev, cl);
+ mei_cl_unlink(cl);
+
/* free read cb */
cb = NULL;
if (cl->read_cb) {
- cb = find_read_list_entry(dev, cl);
+ cb = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb)
list_del(&cb->list);
@@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- err = mei_start_read(dev, cl);
+ err = mei_cl_read_start(cl);
if (err && err != -EBUSY) {
dev_dbg(&dev->pdev->dev,
"mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@ copy_buffer:
goto out;
free:
- cb_pos = find_read_list_entry(dev, cl);
+ cb_pos = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb_pos)
list_del(&cb_pos->list);
mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
out:
dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
/* free entry used in read */
if (cl->reading_state == MEI_READ_COMPLETE) {
*offset = 0;
- write_cb = find_read_list_entry(dev, cl);
+ write_cb = mei_cl_find_read_cb(cl);
if (write_cb) {
list_del(&write_cb->list);
mei_io_cb_free(write_cb);
write_cb = NULL;
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE)
*offset = 0;
@@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
if (rets) {
dev_err(&dev->pdev->dev,
- "amthi write failed with status = %d\n", rets);
+ "amthif write failed with status = %d\n", rets);
goto err;
}
mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
- rets = mei_flow_ctrl_creds(dev, cl);
+ rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
- if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+ if (rets == 0 || !dev->hbuf_is_ready) {
write_cb->buf_idx = 0;
mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
goto out;
}
- dev->mei_host_buffer_is_empty = false;
- if (length > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (length > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = length;
@@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
- dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
- *((u32 *) &mei_hdr));
- if (mei_write_message(dev, &mei_hdr,
- write_cb->request_buffer.data, mei_hdr.length)) {
+
+ dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(&mei_hdr));
+ if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
rets = -ENODEV;
goto err;
}
@@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
out:
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, cl)) {
+ if (mei_cl_flow_ctrl_reduce(cl)) {
rets = -ENODEV;
goto err;
}
@@ -582,6 +502,103 @@ err:
return rets;
}
+/**
+ * mei_ioctl_connect_client - the connect to fw client IOCTL function
+ *
+ * @dev: the device structure
+ * @data: IOCTL connect data, input and output parameters
+ * @file: private data of the file object
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_client(struct file *file,
+ struct mei_connect_client_data *data)
+{
+ struct mei_device *dev;
+ struct mei_client *client;
+ struct mei_cl *cl;
+ int i;
+ int rets;
+
+ cl = file->private_data;
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (dev->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_INITIALIZING &&
+ cl->state != MEI_FILE_DISCONNECTED) {
+ rets = -EBUSY;
+ goto end;
+ }
+
+ /* find ME client we're trying to connect to */
+ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+ if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+ cl->state = MEI_FILE_CONNECTING;
+ }
+
+ dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
+ cl->me_client_id);
+ dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
+ dev->me_clients[i].props.protocol_version);
+ dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
+ dev->me_clients[i].props.max_msg_length);
+
+ /* if we're connecting to amthif client then we will use the
+ * existing connection
+ */
+ if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
+ dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
+ if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+ rets = -ENODEV;
+ goto end;
+ }
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ mei_cl_unlink(cl);
+
+ kfree(cl);
+ cl = NULL;
+ file->private_data = &dev->iamthif_cl;
+
+ client = &data->out_client_properties;
+ client->max_msg_length =
+ dev->me_clients[i].props.max_msg_length;
+ client->protocol_version =
+ dev->me_clients[i].props.protocol_version;
+ rets = dev->iamthif_cl.status;
+
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_CONNECTING) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+
+ /* prepare the output buffer */
+ client = &data->out_client_properties;
+ client->max_msg_length = dev->me_clients[i].props.max_msg_length;
+ client->protocol_version = dev->me_clients[i].props.protocol_version;
+ dev_dbg(&dev->pdev->dev, "Can connect?\n");
+
+
+ rets = mei_cl_connect(cl, file);
+
+end:
+ dev_dbg(&dev->pdev->dev, "free connect cb memory.");
+ return rets;
+}
+
/**
* mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
+
rets = mei_ioctl_connect_client(file, connect_data);
/* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
.llseek = no_llseek
};
-
/*
* Misc Device Struct
*/
@@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
};
-/**
- * mei_quirk_probe - probe for devices that doesn't valid ME interface
- * @pdev: PCI device structure
- * @ent: entry into pci_device_table
- *
- * returns true if ME Interface is valid, false otherwise
- */
-static bool mei_quirk_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int mei_register(struct device *dev)
{
- u32 reg;
- if (ent->device == MEI_DEV_ID_PBG_1) {
- pci_read_config_dword(pdev, 0x48, &reg);
- /* make sure that bit 9 is up and bit 10 is down */
- if ((reg & 0x600) == 0x200) {
- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
- return false;
- }
- }
- return true;
-}
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
-
- if (!mei_quirk_probe(pdev, ent)) {
- err = -ENODEV;
- goto end;
- }
-
- if (mei_pdev) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto disable_msi;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
-
- if (mei_hw_init(dev)) {
- dev_err(&pdev->dev, "init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
-
- err = misc_register(&mei_misc_device);
- if (err)
- goto release_irq;
-
- mei_pdev = pdev;
- pci_set_drvdata(pdev, dev);
-
-
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
-disable_msi:
- pci_disable_msi(pdev);
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- dev_err(&pdev->dev, "initialization failed.\n");
- return err;
+ mei_misc_device.parent = dev;
+ return misc_register(&mei_misc_device);
}
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void mei_remove(struct pci_dev *pdev)
+void mei_deregister(void)
{
- struct mei_device *dev;
-
- if (mei_pdev != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- mei_wd_stop(dev);
-
- mei_pdev = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- mei_watchdog_unregister(dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
misc_deregister(&mei_misc_device);
-}
-#ifdef CONFIG_PM
-static int mei_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev = pci_get_drvdata(pdev);
- int err;
-
- if (!dev)
- return -ENODEV;
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- /* Stop watchdog if exists */
- err = mei_wd_stop(dev);
- /* Set new mei state */
- if (dev->dev_state == MEI_DEV_ENABLED ||
- dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->dev_state = MEI_DEV_POWER_DOWN;
- mei_reset(dev, 0);
- }
- mutex_unlock(&dev->device_lock);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
- return err;
+ mei_misc_device.parent = NULL;
}
-static int mei_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
- int err;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
- pdev->irq);
- return err;
- }
-
- mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
-
- /* Start timer if stopped in suspend */
- schedule_delayed_work(&dev->timer_work, HZ);
-
- return err;
-}
-static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
-#define MEI_PM_OPS (&mei_pm_ops)
-#else
-#define MEI_PM_OPS NULL
-#endif /* CONFIG_PM */
-/*
- * PCI driver structure
- */
-static struct pci_driver mei_driver = {
- .name = KBUILD_MODNAME,
- .id_table = mei_pci_tbl,
- .probe = mei_probe,
- .remove = mei_remove,
- .shutdown = mei_remove,
- .driver.pm = MEI_PM_OPS,
-};
-
-module_pci_driver(mei_driver);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 25da04549d04..cb80166161f0 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -21,7 +21,9 @@
#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
+
#include "hw.h"
+#include "hw-me-regs.h"
/*
* watch dog definition
@@ -44,7 +46,7 @@
/*
* AMTHI Client UUID
*/
-extern const uuid_le mei_amthi_guid;
+extern const uuid_le mei_amthif_guid;
/*
* Watchdog Client UUID
@@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
* Number of File descriptors/handles
* that can be opened to the driver.
*
- * Limit to 253: 256 Total Clients
+ * Limit to 255: 256 Total Clients
* minus internal client for MEI Bus Messags
- * minus internal client for AMTHI
- * minus internal client for Watchdog
*/
-#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
+#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
+
+/*
+ * Internal Clients Number
+ */
+#define MEI_HOST_CLIENT_ID_ANY (-1)
+#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
+#define MEI_WD_HOST_CLIENT_ID 1
+#define MEI_IAMTHIF_HOST_CLIENT_ID 2
/* File state */
@@ -150,6 +158,19 @@ struct mei_message_data {
unsigned char *data;
};
+/**
+ * struct mei_me_client - representation of me (fw) client
+ *
+ * @props - client properties
+ * @client_id - me client id
+ * @mei_flow_ctrl_creds - flow control credits
+ */
+struct mei_me_client {
+ struct mei_client_properties props;
+ u8 client_id;
+ u8 mei_flow_ctrl_creds;
+};
+
struct mei_cl;
@@ -178,7 +199,6 @@ struct mei_cl {
wait_queue_head_t tx_wait;
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
- int read_pending;
int status;
/* ID of client connected */
u8 host_client_id;
@@ -191,10 +211,67 @@ struct mei_cl {
struct mei_cl_cb *read_cb;
};
+/** struct mei_hw_ops
+ *
+ * @host_set_ready - notify FW that host side is ready
+ * @host_is_ready - query for host readiness
+
+ * @hw_is_ready - query if hw is ready
+ * @hw_reset - reset hw
+ * @hw_config - configure hw
+
+ * @intr_clear - clear pending interrupts
+ * @intr_enable - enable interrupts
+ * @intr_disable - disable interrupts
+
+ * @hbuf_free_slots - query for write buffer empty slots
+ * @hbuf_is_ready - query if write buffer is empty
+ * @hbuf_max_len - query for write buffer max len
+
+ * @write - write a message to FW
+
+ * @rdbuf_full_slots - query how many slots are filled
+
+ * @read_hdr - get first 4 bytes (header)
+ * @read - read a buffer from the FW
+ */
+struct mei_hw_ops {
+
+ void (*host_set_ready) (struct mei_device *dev);
+ bool (*host_is_ready) (struct mei_device *dev);
+
+ bool (*hw_is_ready) (struct mei_device *dev);
+ void (*hw_reset) (struct mei_device *dev, bool enable);
+ void (*hw_config) (struct mei_device *dev);
+
+ void (*intr_clear) (struct mei_device *dev);
+ void (*intr_enable) (struct mei_device *dev);
+ void (*intr_disable) (struct mei_device *dev);
+
+ int (*hbuf_free_slots) (struct mei_device *dev);
+ bool (*hbuf_is_ready) (struct mei_device *dev);
+ size_t (*hbuf_max_len) (const struct mei_device *dev);
+
+ int (*write)(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf);
+
+ int (*rdbuf_full_slots)(struct mei_device *dev);
+
+ u32 (*read_hdr)(const struct mei_device *dev);
+ int (*read) (struct mei_device *dev,
+ unsigned char *buf, unsigned long len);
+};
+
/**
* struct mei_device - MEI private device struct
- * @hbuf_depth - depth of host(write) buffer
- * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
+
+ * @mem_addr - mem mapped base register address
+
+ * @hbuf_depth - depth of hardware host/write buffer is slots
+ * @hbuf_is_ready - query if the host host/write buffer is ready
+ * @wr_msg - the buffer for hbm control messages
+ * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -213,24 +290,14 @@ struct mei_device {
*/
struct list_head file_list;
long open_handle_count;
- /*
- * memory of device
- */
- unsigned int mem_base;
- unsigned int mem_length;
- void __iomem *mem_addr;
+
/*
* lock for the device
*/
struct mutex device_lock; /* device lock */
struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
bool recvd_msg;
- /*
- * hw states of host and fw(ME)
- */
- u32 host_hw_state;
- u32 me_hw_state;
- u8 hbuf_depth;
+
/*
* waiting queue for receive message from FW
*/
@@ -243,11 +310,20 @@ struct mei_device {
enum mei_dev_state dev_state;
enum mei_init_clients_states init_clients_state;
u16 init_clients_timer;
- bool need_reset;
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
- u32 wr_msg_buf[128]; /* used for control messages */
+
+ /* write buffer */
+ u8 hbuf_depth;
+ bool hbuf_is_ready;
+
+ /* used for control messages */
+ struct {
+ struct mei_msg_hdr hdr;
+ unsigned char data[128];
+ } wr_msg;
+
struct {
struct mei_msg_hdr hdr;
unsigned char data[4]; /* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@ struct mei_device {
u8 me_clients_num;
u8 me_client_presentation_num;
u8 me_client_index;
- bool mei_host_buffer_is_empty;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
+
+ const struct mei_hw_ops *ops;
+ char hw[0] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
/*
* mei init function prototypes
*/
-struct mei_device *mei_device_init(struct pci_dev *pdev);
+void mei_device_init(struct mei_device *dev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
-int mei_task_initialize_clients(void *data);
-int mei_initialize_clients(struct mei_device *dev);
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_allocate_me_clients_storage(struct mei_device *dev);
-
-
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cguid, u8 host_client_id);
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
-/*
- * MEI IO Functions
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
-void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
-
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
- INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
-/*
- * MEI ME Client Functions
- */
-
-struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_flush_queues(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
- const struct mei_cl *cl2)
-{
- return cl1 && cl2 &&
- (cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
-}
-
-
-
-/*
- * MEI Host Client Functions
- */
-void mei_host_start_message(struct mei_device *dev);
-void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_enumerate(struct mei_device *dev);
-void mei_host_client_init(struct work_struct *work);
/*
* MEI interrupt functions prototype
*/
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
-void mei_timer(struct work_struct *work);
-/*
- * MEI input output function prototype
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data);
+void mei_timer(struct work_struct *work);
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots);
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
+int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
/*
* AMTHIF - AMT Host Interface Functions
*/
void mei_amthif_reset_params(struct mei_device *dev);
-void mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev);
int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
@@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_read_message(struct mei_cl_cb *complete_list,
- struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
-
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
@@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
+
+int mei_wd_send(struct mei_device *dev);
+int mei_wd_stop(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev);
/*
- * Register Access Function
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Unregistering watchdog interface
+ * @dev - mei device
*/
+void mei_watchdog_unregister(struct mei_device *dev);
-/**
- * mei_reg_read - Reads 32bit data from the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to read the data
- *
- * returns register value (u32)
+/*
+ * Register Access Function
*/
-static inline u32 mei_reg_read(const struct mei_device *dev,
- unsigned long offset)
+
+static inline void mei_hw_config(struct mei_device *dev)
+{
+ dev->ops->hw_config(dev);
+}
+static inline void mei_hw_reset(struct mei_device *dev, bool enable)
{
- return ioread32(dev->mem_addr + offset);
+ dev->ops->hw_reset(dev, enable);
}
-/**
- * mei_reg_write - Writes 32bit data to the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to write the data
- * @value: register value to write (u32)
- */
-static inline void mei_reg_write(const struct mei_device *dev,
- unsigned long offset, u32 value)
+static inline void mei_clear_interrupts(struct mei_device *dev)
{
- iowrite32(value, dev->mem_addr + offset);
+ dev->ops->intr_clear(dev);
}
-/**
- * mei_hcsr_read - Reads 32bit data from the host CSR
- *
- * @dev: the device structure
- *
- * returns the byte read.
- */
-static inline u32 mei_hcsr_read(const struct mei_device *dev)
+static inline void mei_enable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, H_CSR);
+ dev->ops->intr_enable(dev);
}
-/**
- * mei_mecsr_read - Reads 32bit data from the ME CSR
- *
- * @dev: the device structure
- *
- * returns ME_CSR_HA register value (u32)
- */
-static inline u32 mei_mecsr_read(const struct mei_device *dev)
+static inline void mei_disable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CSR_HA);
+ dev->ops->intr_disable(dev);
}
-/**
- * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
- *
- * @dev: the device structure
- *
- * returns ME_CB_RW register value (u32)
- */
-static inline u32 mei_mecbrw_read(const struct mei_device *dev)
+static inline void mei_host_set_ready(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CB_RW);
+ dev->ops->host_set_ready(dev);
+}
+static inline bool mei_host_is_ready(struct mei_device *dev)
+{
+ return dev->ops->host_is_ready(dev);
+}
+static inline bool mei_hw_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hw_is_ready(dev);
}
+static inline bool mei_hbuf_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hbuf_is_ready(dev);
+}
-/*
- * mei interface function prototypes
- */
-void mei_hcsr_set(struct mei_device *dev);
-void mei_csr_clear_his(struct mei_device *dev);
+static inline int mei_hbuf_empty_slots(struct mei_device *dev)
+{
+ return dev->ops->hbuf_free_slots(dev);
+}
+
+static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->ops->hbuf_max_len(dev);
+}
-void mei_enable_interrupts(struct mei_device *dev);
-void mei_disable_interrupts(struct mei_device *dev);
+static inline int mei_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf)
+{
+ return dev->ops->write(dev, hdr, buf);
+}
-static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+static inline u32 mei_read_hdr(const struct mei_device *dev)
{
- struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
- hdr->host_addr = 0;
- hdr->me_addr = 0;
- hdr->length = length;
- hdr->msg_complete = 1;
- hdr->reserved = 0;
- return hdr;
+ return dev->ops->read_hdr(dev);
}
+static inline void mei_read_slots(struct mei_device *dev,
+ unsigned char *buf, unsigned long len)
+{
+ dev->ops->read(dev, buf, len);
+}
+
+static inline int mei_count_full_read_slots(struct mei_device *dev)
+{
+ return dev->ops->rdbuf_full_slots(dev);
+}
+
+int mei_register(struct device *dev);
+void mei_deregister(void);
+
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_PRM(hdr) \
+ (hdr)->host_addr, (hdr)->me_addr, \
+ (hdr)->length, (hdr)->msg_complete
+
#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644
index 000000000000..b40ec0601ab0
--- /dev/null
+++ b/drivers/misc/mei/pci-me.c
@@ -0,0 +1,396 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/compat.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
+
+/* AMT device is a singleton on the platform */
+static struct pci_dev *mei_pdev;
+
+/* mei_pci_tbl - PCI Device ID Table */
+static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
+
+static DEFINE_MUTEX(mei_mutex);
+
+/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool mei_quirk_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u32 reg;
+ if (ent->device == MEI_DEV_ID_PBG_1) {
+ pci_read_config_dword(pdev, 0x48, &reg);
+ /* make sure that bit 9 is up and bit 10 is down */
+ if ((reg & 0x600) == 0x200) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
+ }
+ }
+ return true;
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+ int err;
+
+ mutex_lock(&mei_mutex);
+
+ if (!mei_quirk_probe(pdev, ent)) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ if (mei_pdev) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_me_dev_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_me_hw(dev);
+ /* mapping IO device memory */
+ hw->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!hw->mem_addr) {
+ dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto disable_msi;
+ }
+
+ if (mei_hw_init(dev)) {
+ dev_err(&pdev->dev, "init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = mei_register(&pdev->dev);
+ if (err)
+ goto release_irq;
+
+ mei_pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+disable_msi:
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, hw->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ dev_err(&pdev->dev, "initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+
+ if (mei_pdev != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ hw = to_me_hw(dev);
+
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ mei_wd_stop(dev);
+
+ mei_pdev = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->wd_cl);
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (hw->mem_addr)
+ pci_iounmap(pdev, hw->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ mei_deregister();
+
+}
+#ifdef CONFIG_PM
+static int mei_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ /* Stop watchdog if exists */
+ err = mei_wd_stop(dev);
+ /* Set new mei state */
+ if (dev->dev_state == MEI_DEV_ENABLED ||
+ dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_reset(dev, 0);
+ }
+ mutex_unlock(&dev->device_lock);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ return err;
+}
+
+static int mei_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int err;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+ pdev->irq);
+ return err;
+ }
+
+ mutex_lock(&dev->device_lock);
+ dev->dev_state = MEI_DEV_POWER_UP;
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ return err;
+}
+static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
+#define MEI_PM_OPS (&mei_pm_ops)
+#else
+#define MEI_PM_OPS NULL
+#endif /* CONFIG_PM */
+/*
+ * PCI driver structure
+ */
+static struct pci_driver mei_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mei_pci_tbl,
+ .probe = mei_probe,
+ .remove = mei_remove,
+ .shutdown = mei_remove,
+ .driver.pm = MEI_PM_OPS,
+};
+
+module_pci_driver(mei_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 636409f9667f..2413247fc392 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -21,11 +21,13 @@
#include <linux/sched.h>
#include <linux/watchdog.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
+
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
@@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*/
int mei_wd_host_init(struct mei_device *dev)
{
- int id;
- mei_cl_init(&dev->wd_cl, dev);
+ struct mei_cl *cl = &dev->wd_cl;
+ int i;
+ int ret;
+
+ mei_cl_init(cl, dev);
- /* look for WD client and connect to it */
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
- /* Connect WD ME client to the host client */
- id = mei_me_cl_link(dev, &dev->wd_cl,
- &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
- if (id < 0) {
+ /* check for valid client id */
+ i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+ if (i < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
return -ENOENT;
}
- if (mei_connect(dev, &dev->wd_cl)) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+
+ ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_info(&dev->pdev->dev, "wd: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
- dev->wd_cl.host_client_id = 0;
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
return -EIO;
}
- dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
return 0;
}
@@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
*/
int mei_wd_send(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr hdr;
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = dev->wd_cl.host_client_id;
- mei_hdr->me_addr = dev->wd_cl.me_client_id;
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
+ hdr.host_addr = dev->wd_cl.host_client_id;
+ hdr.me_addr = dev->wd_cl.me_client_id;
+ hdr.msg_complete = 1;
+ hdr.reserved = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_START_MSG_SIZE;
+ hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
+ hdr.length = MEI_WD_STOP_MSG_SIZE;
else
return -EINVAL;
- return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
+ return mei_write_message(dev, &hdr, dev->wd_data);
}
/**
@@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
dev->wd_state = MEI_WD_STOPPING;
- ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
goto out;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
if (!mei_wd_send(dev)) {
- ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
if (ret)
goto out;
} else {
@@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
/* Check if we can send the ping to HW*/
- if (dev->mei_host_buffer_is_empty &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
goto end;
}
- if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+ if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
dev_err(&dev->pdev->dev,
- "wd: mei_flow_ctrl_reduce() failed.\n");
+ "wd: mei_cl_flow_ctrl_reduce() failed.\n");
ret = -EIO;
goto end;
}
@@ -370,7 +381,7 @@ void mei_watchdog_register(struct mei_device *dev)
void mei_watchdog_unregister(struct mei_device *dev)
{
- if (test_bit(WDOG_UNREGISTERED, &amt_wd_dev.status))
+ if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
return;
watchdog_set_drvdata(&amt_wd_dev, NULL);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 492c8cac69ac..44d273c5e19d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -517,7 +517,7 @@ static int __init gru_init(void)
{
int ret;
- if (!is_uv_system())
+ if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub()))
return 0;
#if defined CONFIG_IA64
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8d082b46426b..d971817182f7 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -53,6 +53,10 @@
#include <linux/kthread.h>
#include "xpc.h"
+#ifdef CONFIG_X86_64
+#include <asm/traps.h>
+#endif
+
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE;
}
+/* Used to only allow one cpu to complete disconnect */
+static unsigned int xpc_die_disconnecting;
+
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
long keep_waiting;
long wait_to_print;
+ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
+ return;
+
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
* about the lack of a heartbeat.
*/
static int
-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
break;
}
#else
- xpc_die_deactivate();
+ struct die_args *die_args = _die_args;
+
+ switch (event) {
+ case DIE_TRAP:
+ if (die_args->trapnr == X86_TRAP_DF)
+ xpc_die_deactivate();
+
+ if (((die_args->trapnr == X86_TRAP_MF) ||
+ (die_args->trapnr == X86_TRAP_XF)) &&
+ !user_mode_vm(die_args->regs))
+ xpc_die_deactivate();
+
+ break;
+ case DIE_INT3:
+ case DIE_DEBUG:
+ break;
+ case DIE_OOPS:
+ case DIE_GPF:
+ default:
+ xpc_die_deactivate();
+ }
#endif
return NOTIFY_DONE;
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index abb5de1afce3..f34dcc514730 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,7 @@
menu "Texas Instruments shared transport line discipline"
config TI_ST
tristate "Shared transport core driver"
- depends on NET && GPIOLIB
+ depends on NET && GPIOLIB && TTY
select FW_LOADER
help
This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b90a2241d79c..0a1428016350 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
char *ptr;
struct st_proto_s *proto;
unsigned short payload_len = 0;
- int len = 0, type = 0;
+ int len = 0;
+ unsigned char type = 0;
unsigned char *plen;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 9ff942a346ed..83269f1d16e3 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
if (pdata->chip_enable)
pdata->chip_enable(kim_gdata);
+ /* Configure BT nShutdown to HIGH state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(5); /* FIXME: a proper toggle */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(100);
/* re-initialize the completion */
INIT_COMPLETION(kim_gdata->ldisc_installed);
/* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
* (b) upon failure to either install ldisc or download firmware.
* The function is responsible to (a) notify UIM about un-installation,
* (b) flush UART if the ldisc was installed.
- * (c) invoke platform's chip disabling routine.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
err = -ETIMEDOUT;
}
+ /* By default configure BT nShutdown to LOW state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
/* platform specific disable */
if (pdata->chip_disable)
pdata->chip_disable(kim_gdata);
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)
/* refer to itself */
kim_gdata->core_data->kim_data = kim_gdata;
+ /* Claim the chip enable nShutdown gpio from the system */
+ kim_gdata->nshutdown = pdata->nshutdown_gpio;
+ err = gpio_request(kim_gdata->nshutdown, "kim");
+ if (unlikely(err)) {
+ pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+ return err;
+ }
+
+ /* Configure nShutdown GPIO as output=0 */
+ err = gpio_direction_output(kim_gdata->nshutdown, 0);
+ if (unlikely(err)) {
+ pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+ return err;
+ }
/* get reference of pdev for request_firmware
*/
kim_gdata->kim_pdev = pdev;
@@ -779,10 +806,18 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
+ /* free the GPIOs requested */
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
kim_gdata = dev_get_drvdata(&pdev->dev);
+ /* Free the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ gpio_free(pdata->nshutdown_gpio);
+ pr_info("nshutdown GPIO Freed");
+
debugfs_remove_recursive(kim_debugfs_dir);
sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
pr_info("sysfs entries removed");
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644
index 000000000000..39c2ecadb273
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -0,0 +1,16 @@
+#
+# VMware VMCI device
+#
+
+config VMWARE_VMCI
+ tristate "VMware VMCI Driver"
+ depends on X86 && PCI
+ help
+ This is VMware's Virtual Machine Communication Interface. It enables
+ high-speed communication between host and guest in a virtual
+ environment via the VMCI virtual device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644
index 000000000000..4da9893c3942
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
+vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
+ vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
+ vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644
index 000000000000..f866a4baecb5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -0,0 +1,1214 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+/*
+ * List of current VMCI contexts. Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+ * These, along with context lookup, are protected by the
+ * list structure's lock.
+ */
+static struct {
+ struct list_head head;
+ spinlock_t lock; /* Spinlock for context list operations */
+} ctx_list = {
+ .head = LIST_HEAD_INIT(ctx_list.head),
+ .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
+};
+
+/* Used by contexts that did not set up notify flag pointers */
+static bool ctx_dummy_notify;
+
+static void ctx_signal_notify(struct vmci_ctx *context)
+{
+ *context->notify = true;
+}
+
+static void ctx_clear_notify(struct vmci_ctx *context)
+{
+ *context->notify = false;
+}
+
+/*
+ * If nothing requires the attention of the guest, clears both
+ * notify flag and call.
+ */
+static void ctx_clear_notify_call(struct vmci_ctx *context)
+{
+ if (context->pending_datagrams == 0 &&
+ vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
+ ctx_clear_notify(context);
+}
+
+/*
+ * Sets the context's notify flag iff datagrams are pending for this
+ * context. Called from vmci_setup_notify().
+ */
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
+{
+ spin_lock(&context->lock);
+ if (context->pending_datagrams)
+ ctx_signal_notify(context);
+ spin_unlock(&context->lock);
+}
+
+/*
+ * Allocates and initializes a VMCI context.
+ */
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+ uintptr_t event_hnd,
+ int user_version,
+ const struct cred *cred)
+{
+ struct vmci_ctx *context;
+ int error;
+
+ if (cid == VMCI_INVALID_ID) {
+ pr_devel("Invalid context ID for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
+ pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
+ priv_flags);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (user_version == 0) {
+ pr_devel("Invalid suer_version %d\n", user_version);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ pr_warn("Failed to allocate memory for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ kref_init(&context->kref);
+ spin_lock_init(&context->lock);
+ INIT_LIST_HEAD(&context->list_item);
+ INIT_LIST_HEAD(&context->datagram_queue);
+ INIT_LIST_HEAD(&context->notifier_list);
+
+ /* Initialize host-specific VMCI context. */
+ init_waitqueue_head(&context->host_context.wait_queue);
+
+ context->queue_pair_array = vmci_handle_arr_create(0);
+ if (!context->queue_pair_array) {
+ error = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+ context->doorbell_array = vmci_handle_arr_create(0);
+ if (!context->doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_qp_array;
+ }
+
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_db_array;
+ }
+
+ context->user_version = user_version;
+
+ context->priv_flags = priv_flags;
+
+ if (cred)
+ context->cred = get_cred(cred);
+
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ /*
+ * If we collide with an existing context we generate a new
+ * and use it instead. The VMX will determine if regeneration
+ * is okay. Since there isn't 4B - 16 VMs running on a given
+ * host, the below loop will terminate.
+ */
+ spin_lock(&ctx_list.lock);
+
+ while (vmci_ctx_exists(cid)) {
+ /* We reserve the lowest 16 ids for fixed contexts. */
+ cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
+ if (cid == VMCI_INVALID_ID)
+ cid = VMCI_RESERVED_CID_LIMIT;
+ }
+ context->cid = cid;
+
+ list_add_tail_rcu(&context->list_item, &ctx_list.head);
+ spin_unlock(&ctx_list.lock);
+
+ return context;
+
+ err_free_db_array:
+ vmci_handle_arr_destroy(context->doorbell_array);
+ err_free_qp_array:
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ err_free_ctx:
+ kfree(context);
+ err_out:
+ return ERR_PTR(error);
+}
+
+/*
+ * Destroy VMCI context.
+ */
+void vmci_ctx_destroy(struct vmci_ctx *context)
+{
+ spin_lock(&ctx_list.lock);
+ list_del_rcu(&context->list_item);
+ spin_unlock(&ctx_list.lock);
+ synchronize_rcu();
+
+ vmci_ctx_put(context);
+}
+
+/*
+ * Fire notification for all contexts interested in given cid.
+ */
+static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+{
+ u32 i, array_size;
+ struct vmci_ctx *sub_ctx;
+ struct vmci_handle_arr *subscriber_array;
+ struct vmci_handle context_handle =
+ vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+
+ /*
+ * We create an array to hold the subscribers we find when
+ * scanning through all contexts.
+ */
+ subscriber_array = vmci_handle_arr_create(0);
+ if (subscriber_array == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ /*
+ * Scan all contexts to find who is interested in being
+ * notified about given contextID.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
+ struct vmci_handle_list *node;
+
+ /*
+ * We only deliver notifications of the removal of
+ * contexts, if the two contexts are allowed to
+ * interact.
+ */
+ if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
+ continue;
+
+ list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
+ if (!vmci_handle_is_equal(node->handle, context_handle))
+ continue;
+
+ vmci_handle_arr_append_entry(&subscriber_array,
+ vmci_make_handle(sub_ctx->cid,
+ VMCI_EVENT_HANDLER));
+ }
+ }
+ rcu_read_unlock();
+
+ /* Fire event to all subscribers. */
+ array_size = vmci_handle_arr_get_size(subscriber_array);
+ for (i = 0; i < array_size; i++) {
+ int result;
+ struct vmci_event_ctx ev;
+
+ ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
+ ev.payload.context_id = context_id;
+
+ result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (result < VMCI_SUCCESS) {
+ pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
+ ev.msg.event_data.event,
+ ev.msg.hdr.dst.context);
+ /* We continue to enqueue on next subscriber. */
+ }
+ }
+ vmci_handle_arr_destroy(subscriber_array);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Returns the current number of pending datagrams. The call may
+ * also serve as a synchronization point for the datagram queue,
+ * as no enqueue operations can occur concurrently.
+ */
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
+{
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(cid);
+ if (context == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ spin_lock(&context->lock);
+ if (pending)
+ *pending = context->pending_datagrams;
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Queues a VMCI datagram for the appropriate target VM context.
+ */
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct vmci_ctx *context;
+ struct vmci_handle dg_src;
+ size_t vmci_dg_size;
+
+ vmci_dg_size = VMCI_DG_SIZE(dg);
+ if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
+ pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Get the target VM's VMCI context. */
+ context = vmci_ctx_get(cid);
+ if (!context) {
+ pr_devel("Invalid context (ID=0x%x)\n", cid);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Allocate guest call entry and add it to the target VM's queue. */
+ dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
+ if (dq_entry == NULL) {
+ pr_warn("Failed to allocate memory for datagram\n");
+ vmci_ctx_put(context);
+ return VMCI_ERROR_NO_MEM;
+ }
+ dq_entry->dg = dg;
+ dq_entry->dg_size = vmci_dg_size;
+ dg_src = dg->src;
+ INIT_LIST_HEAD(&dq_entry->list_item);
+
+ spin_lock(&context->lock);
+
+ /*
+ * We put a higher limit on datagrams from the hypervisor. If
+ * the pending datagram is not from hypervisor, then we check
+ * if enqueueing it would exceed the
+ * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
+ * the pending datagram is from hypervisor, we allow it to be
+ * queued at the destination side provided we don't reach the
+ * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
+ */
+ if (context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
+ (!vmci_handle_is_equal(dg_src,
+ vmci_make_handle
+ (VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID)) ||
+ context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+ kfree(dq_entry);
+ pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ list_add(&dq_entry->list_item, &context->datagram_queue);
+ context->pending_datagrams++;
+ context->datagram_queue_size += vmci_dg_size;
+ ctx_signal_notify(context);
+ wake_up(&context->host_context.wait_queue);
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return vmci_dg_size;
+}
+
+/*
+ * Verifies whether a context with the specified context ID exists.
+ * FIXME: utility is dubious as no decisions can be reliably made
+ * using this data as context can appear and disappear at any time.
+ */
+bool vmci_ctx_exists(u32 cid)
+{
+ struct vmci_ctx *context;
+ bool exists = false;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
+ if (context->cid == cid) {
+ exists = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return exists;
+}
+
+/*
+ * Retrieves VMCI context corresponding to the given cid.
+ */
+struct vmci_ctx *vmci_ctx_get(u32 cid)
+{
+ struct vmci_ctx *c, *context = NULL;
+
+ if (cid == VMCI_INVALID_ID)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
+ if (c->cid == cid) {
+ /*
+ * The context owner drops its own reference to the
+ * context only after removing it from the list and
+ * waiting for RCU grace period to expire. This
+ * means that we are not about to increase the
+ * reference count of something that is in the
+ * process of being destroyed.
+ */
+ context = c;
+ kref_get(&context->kref);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return context;
+}
+
+/*
+ * Deallocates all parts of a context data structure. This
+ * function doesn't lock the context, because it assumes that
+ * the caller was holding the last reference to context.
+ */
+static void ctx_free_ctx(struct kref *kref)
+{
+ struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
+ struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
+ struct vmci_handle temp_handle;
+ struct vmci_handle_list *notifier, *tmp;
+
+ /*
+ * Fire event to all contexts interested in knowing this
+ * context is dying.
+ */
+ ctx_fire_notification(context->cid, context->priv_flags);
+
+ /*
+ * Cleanup all queue pair resources attached to context. If
+ * the VM dies without cleaning up, this code will make sure
+ * that no resources are leaked.
+ */
+ temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
+ if (vmci_qp_broker_detach(temp_handle,
+ context) < VMCI_SUCCESS) {
+ /*
+ * When vmci_qp_broker_detach() succeeds it
+ * removes the handle from the array. If
+ * detach fails, we must remove the handle
+ * ourselves.
+ */
+ vmci_handle_arr_remove_entry(context->queue_pair_array,
+ temp_handle);
+ }
+ temp_handle =
+ vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ }
+
+ /*
+ * It is fine to destroy this without locking the callQueue, as
+ * this is the only thread having a reference to the context.
+ */
+ list_for_each_entry_safe(dq_entry, dq_entry_tmp,
+ &context->datagram_queue, list_item) {
+ WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
+ list_del(&dq_entry->list_item);
+ kfree(dq_entry->dg);
+ kfree(dq_entry);
+ }
+
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ list_del(&notifier->node);
+ kfree(notifier);
+ }
+
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ vmci_handle_arr_destroy(context->doorbell_array);
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ vmci_ctx_unset_notify(context);
+ if (context->cred)
+ put_cred(context->cred);
+ kfree(context);
+}
+
+/*
+ * Drops reference to VMCI context. If this is the last reference to
+ * the context it will be deallocated. A context is created with
+ * a reference count of one, and on destroy, it is removed from
+ * the context list before its reference count is decremented. Thus,
+ * if we reach zero, we are sure that nobody else are about to increment
+ * it (they need the entry in the context list for that), and so there
+ * is no need for locking.
+ */
+void vmci_ctx_put(struct vmci_ctx *context)
+{
+ kref_put(&context->kref, ctx_free_ctx);
+}
+
+/*
+ * Dequeues the next datagram and returns it to caller.
+ * The caller passes in a pointer to the max size datagram
+ * it can handle and the datagram is only unqueued if the
+ * size is less than max_size. If larger max_size is set to
+ * the size of the datagram to give the caller a chance to
+ * set up a larger buffer for the guestcall.
+ */
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size,
+ struct vmci_datagram **dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct list_head *list_item;
+ int rv;
+
+ /* Dequeue the next datagram entry. */
+ spin_lock(&context->lock);
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ spin_unlock(&context->lock);
+ pr_devel("No datagrams pending\n");
+ return VMCI_ERROR_NO_MORE_DATAGRAMS;
+ }
+
+ list_item = context->datagram_queue.next;
+
+ dq_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
+
+ /* Check size of caller's buffer. */
+ if (*max_size < dq_entry->dg_size) {
+ *max_size = dq_entry->dg_size;
+ spin_unlock(&context->lock);
+ pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
+ (u32) *max_size);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ list_del(list_item);
+ context->pending_datagrams--;
+ context->datagram_queue_size -= dq_entry->dg_size;
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ rv = VMCI_SUCCESS;
+ } else {
+ /*
+ * Return the size of the next datagram.
+ */
+ struct vmci_datagram_queue_entry *next_entry;
+
+ list_item = context->datagram_queue.next;
+ next_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry,
+ list_item);
+
+ /*
+ * The following size_t -> int truncation is fine as
+ * the maximum size of a (routable) datagram is 68KB.
+ */
+ rv = (int)next_entry->dg_size;
+ }
+ spin_unlock(&context->lock);
+
+ /* Caller must free datagram. */
+ *dg = dq_entry->dg;
+ dq_entry->dg = NULL;
+ kfree(dq_entry);
+
+ return rv;
+}
+
+/*
+ * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
+ * page mapped/locked by vmci_setup_notify().
+ */
+void vmci_ctx_unset_notify(struct vmci_ctx *context)
+{
+ struct page *notify_page;
+
+ spin_lock(&context->lock);
+
+ notify_page = context->notify_page;
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ spin_unlock(&context->lock);
+
+ if (notify_page) {
+ kunmap(notify_page);
+ put_page(notify_page);
+ }
+}
+
+/*
+ * Add remote_cid to list of contexts current contexts wants
+ * notifications from/about.
+ */
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *n;
+ int result;
+ bool exists = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
+ pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
+ context_id, remote_cid);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+
+ notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
+ if (!notifier) {
+ result = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&notifier->node);
+ notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+
+ list_for_each_entry(n, &context->notifier_list, node) {
+ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+ exists = true;
+ break;
+ }
+ }
+
+ if (exists) {
+ kfree(notifier);
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ } else {
+ list_add_tail_rcu(&notifier->node, &context->notifier_list);
+ context->n_notifiers++;
+ result = VMCI_SUCCESS;
+ }
+
+ spin_unlock(&context->lock);
+
+ out:
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Remove remote_cid from current context's list of contexts it is
+ * interested in getting notifications from/about.
+ */
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *tmp;
+ struct vmci_handle handle;
+ bool found = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ if (vmci_handle_is_equal(notifier->handle, handle)) {
+ list_del_rcu(&notifier->node);
+ context->n_notifiers--;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&context->lock);
+
+ if (found) {
+ synchronize_rcu();
+ kfree(notifier);
+ }
+
+ vmci_ctx_put(context);
+
+ return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+}
+
+static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ u32 *notifiers;
+ size_t data_size;
+ struct vmci_handle_list *entry;
+ int i = 0;
+
+ if (context->n_notifiers == 0) {
+ *buf_size = 0;
+ *pbuf = NULL;
+ return VMCI_SUCCESS;
+ }
+
+ data_size = context->n_notifiers * sizeof(*notifiers);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
+ if (!notifiers)
+ return VMCI_ERROR_NO_MEM;
+
+ list_for_each_entry(entry, &context->notifier_list, node)
+ notifiers[i++] = entry->handle.context;
+
+ *buf_size = data_size;
+ *pbuf = notifiers;
+ return VMCI_SUCCESS;
+}
+
+static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ struct dbell_cpt_state *dbells;
+ size_t n_doorbells;
+ int i;
+
+ n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+ if (n_doorbells > 0) {
+ size_t data_size = n_doorbells * sizeof(*dbells);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ dbells = kmalloc(data_size, GFP_ATOMIC);
+ if (!dbells)
+ return VMCI_ERROR_NO_MEM;
+
+ for (i = 0; i < n_doorbells; i++)
+ dbells[i].handle = vmci_handle_arr_get_entry(
+ context->doorbell_array, i);
+
+ *buf_size = data_size;
+ *pbuf = dbells;
+ } else {
+ *buf_size = 0;
+ *pbuf = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Get current context's checkpoint state of given type.
+ */
+int vmci_ctx_get_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 *buf_size,
+ void **pbuf)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ switch (cpt_type) {
+ case VMCI_NOTIFICATION_CPT_STATE:
+ result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
+ break;
+
+ case VMCI_WELLKNOWN_CPT_STATE:
+ /*
+ * For compatibility with VMX'en with VM to VM communication, we
+ * always return zero wellknown handles.
+ */
+
+ *buf_size = 0;
+ *pbuf = NULL;
+ result = VMCI_SUCCESS;
+ break;
+
+ case VMCI_DOORBELL_CPT_STATE:
+ result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
+ break;
+
+ default:
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ result = VMCI_ERROR_INVALID_ARGS;
+ break;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Set current context's checkpoint state of given type.
+ */
+int vmci_ctx_set_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 buf_size,
+ void *cpt_buf)
+{
+ u32 i;
+ u32 current_id;
+ int result = VMCI_SUCCESS;
+ u32 num_ids = buf_size / sizeof(u32);
+
+ if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
+ /*
+ * We would end up here if VMX with VM to VM communication
+ * attempts to restore a checkpoint with wellknown handles.
+ */
+ pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
+ return VMCI_ERROR_OBSOLETE;
+ }
+
+ if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
+ current_id = ((u32 *)cpt_buf)[i];
+ result = vmci_ctx_add_notification(context_id, current_id);
+ if (result != VMCI_SUCCESS)
+ break;
+ }
+ if (result != VMCI_SUCCESS)
+ pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
+ cpt_type, result);
+
+ return result;
+}
+
+/*
+ * Retrieves the specified context's pending notifications in the
+ * form of a handle array. The handle arrays returned are the
+ * actual data - not a copy and should not be modified by the
+ * caller. They must be released using
+ * vmci_ctx_rcv_notifications_release.
+ */
+int vmci_ctx_rcv_notifications_get(u32 context_id,
+ struct vmci_handle_arr **db_handle_array,
+ struct vmci_handle_arr **qp_handle_array)
+{
+ struct vmci_ctx *context;
+ int result = VMCI_SUCCESS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ *db_handle_array = context->pending_doorbell_array;
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ context->pending_doorbell_array = *db_handle_array;
+ *db_handle_array = NULL;
+ result = VMCI_ERROR_NO_MEM;
+ }
+ *qp_handle_array = NULL;
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Releases handle arrays with pending notifications previously
+ * retrieved using vmci_ctx_rcv_notifications_get. If the
+ * notifications were not successfully handed over to the guest,
+ * success must be false.
+ */
+void vmci_ctx_rcv_notifications_release(u32 context_id,
+ struct vmci_handle_arr *db_handle_array,
+ struct vmci_handle_arr *qp_handle_array,
+ bool success)
+{
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+
+ spin_lock(&context->lock);
+ if (!success) {
+ struct vmci_handle handle;
+
+ /*
+ * New notifications may have been added while we were not
+ * holding the context lock, so we transfer any new pending
+ * doorbell notifications to the old array, and reinstate the
+ * old array.
+ */
+
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ while (!vmci_handle_is_invalid(handle)) {
+ if (!vmci_handle_arr_has_entry(db_handle_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &db_handle_array, handle);
+ }
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ }
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ context->pending_doorbell_array = db_handle_array;
+ db_handle_array = NULL;
+ } else {
+ ctx_clear_notify_call(context);
+ }
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ if (db_handle_array)
+ vmci_handle_arr_destroy(db_handle_array);
+
+ if (qp_handle_array)
+ vmci_handle_arr_destroy(qp_handle_array);
+}
+
+/*
+ * Registers that a new doorbell handle has been allocated by the
+ * context. Only doorbell handles registered can be notified.
+ */
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+ vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Unregisters a doorbell handle that was previously registered
+ * with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle removed_handle;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ removed_handle =
+ vmci_handle_arr_remove_entry(context->doorbell_array, handle);
+ vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return vmci_handle_is_invalid(removed_handle) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Unregisters all doorbell handles that were previously
+ * registered with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy_all(u32 context_id)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle handle;
+
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ do {
+ struct vmci_handle_arr *arr = context->doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ do {
+ struct vmci_handle_arr *arr = context->pending_doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Registers a notification of a doorbell handle initiated by the
+ * specified source context. The notification of doorbells are
+ * subject to the same isolation rules as datagram delivery. To
+ * allow host side senders of notifications a finer granularity
+ * of sender rights than those assigned to the sending context
+ * itself, the host context is required to specify a different
+ * set of privilege flags that will override the privileges of
+ * the source context.
+ */
+int vmci_ctx_notify_dbell(u32 src_cid,
+ struct vmci_handle handle,
+ u32 src_priv_flags)
+{
+ struct vmci_ctx *dst_context;
+ int result;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Get the target VM's VMCI context. */
+ dst_context = vmci_ctx_get(handle.context);
+ if (!dst_context) {
+ pr_devel("Invalid context (ID=0x%x)\n", handle.context);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (src_cid != handle.context) {
+ u32 dst_priv_flags;
+
+ if (VMCI_CONTEXT_IS_VM(src_cid) &&
+ VMCI_CONTEXT_IS_VM(handle.context)) {
+ pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
+ src_cid, handle.context);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ goto out;
+ }
+
+ if (src_cid != VMCI_HOST_CONTEXT_ID ||
+ src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
+ src_priv_flags = vmci_context_get_priv_flags(src_cid);
+ }
+
+ if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+ }
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ result = vmci_dbell_host_context_notify(src_cid, handle);
+ } else {
+ spin_lock(&dst_context->lock);
+
+ if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
+ handle)) {
+ result = VMCI_ERROR_NOT_FOUND;
+ } else {
+ if (!vmci_handle_arr_has_entry(
+ dst_context->pending_doorbell_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &dst_context->pending_doorbell_array,
+ handle);
+
+ ctx_signal_notify(dst_context);
+ wake_up(&dst_context->host_context.wait_queue);
+
+ }
+ result = VMCI_SUCCESS;
+ }
+ spin_unlock(&dst_context->lock);
+ }
+
+ out:
+ vmci_ctx_put(dst_context);
+
+ return result;
+}
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
+{
+ return context && context->user_version >= VMCI_VERSION_HOSTQP;
+}
+
+/*
+ * Registers that a new queue pair handle has been allocated by
+ * the context.
+ */
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ int result;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+ vmci_handle_arr_append_entry(&context->queue_pair_array,
+ handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ return result;
+}
+
+/*
+ * Unregisters a queue pair handle that was previously registered
+ * with vmci_ctx_qp_create.
+ */
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ struct vmci_handle hndl;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
+
+ return vmci_handle_is_invalid(hndl) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Determines whether a given queue pair handle is registered
+ * with the given context.
+ */
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return false;
+
+ return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
+}
+
+/*
+ * vmci_context_get_priv_flags() - Retrieve privilege flags.
+ * @context_id: The context ID of the VMCI context.
+ *
+ * Retrieves privilege flags of the given VMCI context ID.
+ */
+u32 vmci_context_get_priv_flags(u32 context_id)
+{
+ if (vmci_host_code_active()) {
+ u32 flags;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_LEAST_PRIVILEGE_FLAGS;
+
+ flags = context->priv_flags;
+ vmci_ctx_put(context);
+ return flags;
+ }
+ return VMCI_NO_PRIVILEGE_FLAGS;
+}
+EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
+
+/*
+ * vmci_is_context_owner() - Determimnes if user is the context owner
+ * @context_id: The context ID of the VMCI context.
+ * @uid: The host user id (real kernel value).
+ *
+ * Determines whether a given UID is the owner of given VMCI context.
+ */
+bool vmci_is_context_owner(u32 context_id, kuid_t uid)
+{
+ bool is_owner = false;
+
+ if (vmci_host_code_active()) {
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+ if (context) {
+ if (context->cred)
+ is_owner = uid_eq(context->cred->uid, uid);
+ vmci_ctx_put(context);
+ }
+ }
+
+ return is_owner;
+}
+EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644
index 000000000000..24a88e68a1e6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.h
@@ -0,0 +1,182 @@
+/*
+ * VMware VMCI driver (vmciContext.h)
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_CONTEXT_H_
+#define _VMCI_CONTEXT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_datagram.h"
+
+/* Used to determine what checkpoint state to get and set. */
+enum {
+ VMCI_NOTIFICATION_CPT_STATE = 1,
+ VMCI_WELLKNOWN_CPT_STATE = 2,
+ VMCI_DG_OUT_STATE = 3,
+ VMCI_DG_IN_STATE = 4,
+ VMCI_DG_IN_SIZE_STATE = 5,
+ VMCI_DOORBELL_CPT_STATE = 6,
+};
+
+/* Host specific struct used for signalling */
+struct vmci_host {
+ wait_queue_head_t wait_queue;
+};
+
+struct vmci_handle_list {
+ struct list_head node;
+ struct vmci_handle handle;
+};
+
+struct vmci_ctx {
+ struct list_head list_item; /* For global VMCI list. */
+ u32 cid;
+ struct kref kref;
+ struct list_head datagram_queue; /* Head of per VM queue. */
+ u32 pending_datagrams;
+ size_t datagram_queue_size; /* Size of datagram queue in bytes. */
+
+ /*
+ * Version of the code that created
+ * this context; e.g., VMX.
+ */
+ int user_version;
+ spinlock_t lock; /* Locks callQueue and handle_arrays. */
+
+ /*
+ * queue_pairs attached to. The array of
+ * handles for queue pairs is accessed
+ * from the code for QP API, and there
+ * it is protected by the QP lock. It
+ * is also accessed from the context
+ * clean up path, which does not
+ * require a lock. VMCILock is not
+ * used to protect the QP array field.
+ */
+ struct vmci_handle_arr *queue_pair_array;
+
+ /* Doorbells created by context. */
+ struct vmci_handle_arr *doorbell_array;
+
+ /* Doorbells pending for context. */
+ struct vmci_handle_arr *pending_doorbell_array;
+
+ /* Contexts current context is subscribing to. */
+ struct list_head notifier_list;
+ unsigned int n_notifiers;
+
+ struct vmci_host host_context;
+ u32 priv_flags;
+
+ const struct cred *cred;
+ bool *notify; /* Notify flag pointer - hosted only. */
+ struct page *notify_page; /* Page backing the notify UVA. */
+};
+
+/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
+struct vmci_ctx_info {
+ u32 remote_cid;
+ int result;
+};
+
+/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
+struct vmci_ctx_chkpt_buf_info {
+ u64 cpt_buf;
+ u32 cpt_type;
+ u32 buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * VMCINotificationReceiveInfo: Used to recieve pending notifications
+ * for doorbells and queue pairs.
+ */
+struct vmci_ctx_notify_recv_info {
+ u64 db_handle_buf_uva;
+ u64 db_handle_buf_size;
+ u64 qp_handle_buf_uva;
+ u64 qp_handle_buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Utilility function that checks whether two entities are allowed
+ * to interact. If one of them is restricted, the other one must
+ * be trusted.
+ */
+static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
+{
+ return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
+ ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
+}
+
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
+ uintptr_t event_hnd, int version,
+ const struct cred *cred);
+void vmci_ctx_destroy(struct vmci_ctx *context);
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size, struct vmci_datagram **dg);
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
+struct vmci_ctx *vmci_ctx_get(u32 cid);
+void vmci_ctx_put(struct vmci_ctx *context);
+bool vmci_ctx_exists(u32 cid);
+
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 *num_cids, void **cpt_buf_ptr);
+int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 num_cids, void *cpt_buf);
+
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
+
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
+void vmci_ctx_unset_notify(struct vmci_ctx *context);
+
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy_all(u32 context_id);
+int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
+ u32 src_priv_flags);
+
+int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
+ **db_handle_array, struct vmci_handle_arr
+ **qp_handle_array);
+void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
+ *db_handle_array, struct vmci_handle_arr
+ *qp_handle_array, bool success);
+
+static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
+{
+ if (!context)
+ return VMCI_INVALID_ID;
+ return context->cid;
+}
+
+#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644
index 000000000000..ed5c433cd493
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -0,0 +1,500 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * struct datagram_entry describes the datagram entity. It is used for datagram
+ * entities created only on the host.
+ */
+struct datagram_entry {
+ struct vmci_resource resource;
+ u32 flags;
+ bool run_delayed;
+ vmci_datagram_recv_cb recv_cb;
+ void *client_data;
+ u32 priv_flags;
+};
+
+struct delayed_datagram_info {
+ struct datagram_entry *entry;
+ struct vmci_datagram msg;
+ struct work_struct work;
+ bool in_dg_host_queue;
+};
+
+/* Number of in-flight host->host datagrams */
+static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
+
+/*
+ * Create a datagram entry given a handle pointer.
+ */
+static int dg_create_handle(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data, struct vmci_handle *out_handle)
+{
+ int result;
+ u32 context_id;
+ struct vmci_handle handle;
+ struct datagram_entry *entry;
+
+ if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
+ context_id = VMCI_INVALID_ID;
+ } else {
+ context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ handle = vmci_make_handle(context_id, resource_id);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
+ entry->flags = flags;
+ entry->recv_cb = recv_cb;
+ entry->client_data = client_data;
+ entry->priv_flags = priv_flags;
+
+ /* Make datagram resource live. */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ return result;
+ }
+
+ *out_handle = vmci_resource_handle(&entry->resource);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Internal utility function with the same purpose as
+ * vmci_datagram_get_priv_flags that also takes a context_id.
+ */
+static int vmci_datagram_get_priv_flags(u32 context_id,
+ struct vmci_handle handle,
+ u32 *priv_flags)
+{
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct datagram_entry *src_entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src_entry = container_of(resource, struct datagram_entry,
+ resource);
+ *priv_flags = src_entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
+ *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
+ else
+ *priv_flags = vmci_context_get_priv_flags(context_id);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dg_delayed_dispatch(struct work_struct *work)
+{
+ struct delayed_datagram_info *dg_info =
+ container_of(work, struct delayed_datagram_info, work);
+
+ dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
+
+ vmci_resource_put(&dg_info->entry->resource);
+
+ if (dg_info->in_dg_host_queue)
+ atomic_dec(&delayed_dg_host_queue_size);
+
+ kfree(dg_info);
+}
+
+/*
+ * Dispatch datagram as a host, to the host, or other vm context. This
+ * function cannot dispatch to hypervisor context handlers. This should
+ * have been handled before we get here by vmci_datagram_dispatch.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+{
+ int retval;
+ size_t dg_size;
+ u32 src_priv_flags;
+
+ dg_size = VMCI_DG_SIZE(dg);
+
+ /* Host cannot send to the hypervisor. */
+ if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /* Check that source handle matches sending context. */
+ if (dg->src.context != context_id) {
+ pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
+ context_id, dg->src.context, dg->src.resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /* Get hold of privileges of sending endpoint. */
+ retval = vmci_datagram_get_priv_flags(context_id, dg->src,
+ &src_priv_flags);
+ if (retval != VMCI_SUCCESS) {
+ pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
+ dg->src.context, dg->src.resource);
+ return retval;
+ }
+
+ /* Determine if we should route to host or guest destination. */
+ if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
+ /* Route to host datagram entry. */
+ struct datagram_entry *dst_entry;
+ struct vmci_resource *resource;
+
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ return vmci_event_dispatch(dg);
+ }
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+ dst_entry = container_of(resource, struct datagram_entry,
+ resource);
+ if (vmci_deny_interaction(src_priv_flags,
+ dst_entry->priv_flags)) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /*
+ * If a VMCI datagram destined for the host is also sent by the
+ * host, we always run it delayed. This ensures that no locks
+ * are held when the datagram callback runs.
+ */
+ if (dst_entry->run_delayed ||
+ dg->src.context == VMCI_HOST_CONTEXT_ID) {
+ struct delayed_datagram_info *dg_info;
+
+ if (atomic_add_return(1, &delayed_dg_host_queue_size)
+ == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info = kmalloc(sizeof(*dg_info) +
+ (size_t) dg->payload_size, GFP_ATOMIC);
+ if (!dg_info) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = true;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, dg_size);
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ retval = VMCI_SUCCESS;
+
+ } else {
+ retval = dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+ }
+ } else {
+ /* Route to destination VM context. */
+ struct vmci_datagram *new_dg;
+
+ if (context_id != dg->dst.context) {
+ if (vmci_deny_interaction(src_priv_flags,
+ vmci_context_get_priv_flags
+ (dg->dst.context))) {
+ return VMCI_ERROR_NO_ACCESS;
+ } else if (VMCI_CONTEXT_IS_VM(context_id)) {
+ /*
+ * If the sending context is a VM, it
+ * cannot reach another VM.
+ */
+
+ pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
+ context_id, dg->dst.context);
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /* We make a copy to enqueue. */
+ new_dg = kmalloc(dg_size, GFP_KERNEL);
+ if (new_dg == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ memcpy(new_dg, dg, dg_size);
+ retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
+ if (retval < VMCI_SUCCESS) {
+ kfree(new_dg);
+ return retval;
+ }
+ }
+
+ /*
+ * We currently truncate the size to signed 32 bits. This doesn't
+ * matter for this handler as it only support 4Kb messages.
+ */
+ return (int)dg_size;
+}
+
+/*
+ * Dispatch datagram as a guest, down through the VMX and potentially to
+ * the host.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_guest(struct vmci_datagram *dg)
+{
+ int retval;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(dg->src,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_NO_HANDLE;
+
+ retval = vmci_send_datagram(dg);
+ vmci_resource_put(resource);
+ return retval;
+}
+
+/*
+ * Dispatch datagram. This will determine the routing for the datagram
+ * and dispatch it accordingly.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+int vmci_datagram_dispatch(u32 context_id,
+ struct vmci_datagram *dg, bool from_guest)
+{
+ int retval;
+ enum vmci_route route;
+
+ BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
+
+ if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
+ pr_devel("Payload (size=%llu bytes) too big to send\n",
+ (unsigned long long)dg->payload_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
+ if (retval < VMCI_SUCCESS) {
+ pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
+ dg->src.context, dg->dst.context, retval);
+ return retval;
+ }
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ if (VMCI_INVALID_ID == context_id)
+ context_id = VMCI_HOST_CONTEXT_ID;
+ return dg_dispatch_as_host(context_id, dg);
+ }
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dg_dispatch_as_guest(dg);
+
+ pr_warn("Unknown route (%d) for datagram\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+
+/*
+ * Invoke the handler for the given datagram. This is intended to be
+ * called only when acting as a guest and receiving a datagram from the
+ * virtual device.
+ */
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+{
+ struct vmci_resource *resource;
+ struct datagram_entry *dst_entry;
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_NO_HANDLE;
+ }
+
+ dst_entry = container_of(resource, struct datagram_entry, resource);
+ if (dst_entry->run_delayed) {
+ struct delayed_datagram_info *dg_info;
+
+ dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
+ GFP_ATOMIC);
+ if (!dg_info) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = false;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ } else {
+ dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
+ * @resource_id: The resource ID.
+ * @flags: Datagram Flags.
+ * @priv_flags: Privilege Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_data: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to it.
+ */
+int vmci_datagram_create_handle_priv(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ if (out_handle == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (recv_cb == NULL) {
+ pr_devel("Client callback needed when creating datagram\n");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
+ client_data, out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
+
+/*
+ * vmci_datagram_create_handle() - Create host context datagram endpoint
+ * @resource_id: Resource ID.
+ * @flags: Datagram Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_ata: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to
+ * it. Same as vmci_datagram_create_handle_priv without the priviledge
+ * flags argument.
+ */
+int vmci_datagram_create_handle(u32 resource_id,
+ u32 flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ return vmci_datagram_create_handle_priv(
+ resource_id, flags,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ recv_cb, client_data,
+ out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
+
+/*
+ * vmci_datagram_destroy_handle() - Destroys datagram handle
+ * @handle: vmci_handle to be destroyed and reaped.
+ *
+ * Use this function to destroy any datagram handles created by
+ * vmci_datagram_create_handle{,Priv} functions.
+ */
+int vmci_datagram_destroy_handle(struct vmci_handle handle)
+{
+ struct datagram_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct datagram_entry, resource);
+
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
+
+/*
+ * vmci_datagram_send() - Send a datagram
+ * @msg: The datagram to send.
+ *
+ * Sends the provided datagram on its merry way.
+ */
+int vmci_datagram_send(struct vmci_datagram *msg)
+{
+ if (msg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644
index 000000000000..eb4aab7f64ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DATAGRAM_H_
+#define _VMCI_DATAGRAM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vmci_context.h"
+
+#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
+
+/*
+ * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
+ * datagram queues. It is allocated in non-paged memory, as the
+ * content is accessed while holding a spinlock. The pending datagram
+ * itself may be allocated from paged memory. We shadow the size of
+ * the datagram in the non-paged queue entry as this size is used
+ * while holding the same spinlock as above.
+ */
+struct vmci_datagram_queue_entry {
+ struct list_head list_item; /* For queuing. */
+ size_t dg_size; /* Size of datagram. */
+ struct vmci_datagram *dg; /* Pending datagram. */
+};
+
+/* VMCIDatagramSendRecvInfo */
+struct vmci_datagram_snd_rcv_info {
+ u64 addr;
+ u32 len;
+ s32 result;
+};
+
+/* Datagram API for non-public use. */
+int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
+ bool from_guest);
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
+
+#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644
index 000000000000..c3e8397f62ed
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -0,0 +1,604 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/completion.h>
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+
+#define VMCI_DOORBELL_INDEX_BITS 6
+#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
+#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
+
+/*
+ * DoorbellEntry describes the a doorbell notification handle allocated by the
+ * host.
+ */
+struct dbell_entry {
+ struct vmci_resource resource;
+ struct hlist_node node;
+ struct work_struct work;
+ vmci_callback notify_cb;
+ void *client_data;
+ u32 idx;
+ u32 priv_flags;
+ bool run_delayed;
+ atomic_t active; /* Only used by guest personality */
+};
+
+/* The VMCI index table keeps track of currently registered doorbells. */
+struct dbell_index_table {
+ spinlock_t lock; /* Index table lock */
+ struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
+};
+
+static struct dbell_index_table vmci_doorbell_it = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
+};
+
+/*
+ * The max_notify_idx is one larger than the currently known bitmap index in
+ * use, and is used to determine how much of the bitmap needs to be scanned.
+ */
+static u32 max_notify_idx;
+
+/*
+ * The notify_idx_count is used for determining whether there are free entries
+ * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
+ */
+static u32 notify_idx_count;
+
+/*
+ * The last_notify_idx_reserved is used to track the last index handed out - in
+ * the case where multiple handles share a notification index, we hand out
+ * indexes round robin based on last_notify_idx_reserved.
+ */
+static u32 last_notify_idx_reserved;
+
+/* This is a one entry cache used to by the index allocation. */
+static u32 last_notify_idx_released = PAGE_SIZE;
+
+
+/*
+ * Utility function that retrieves the privilege flags associated
+ * with a given doorbell handle. For guest endpoints, the
+ * privileges are determined by the context ID, but for host
+ * endpoints privileges are associated with the complete
+ * handle. Hypervisor endpoints are not yet supported.
+ */
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
+{
+ if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource)
+ return VMCI_ERROR_NOT_FOUND;
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ *priv_flags = entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * Hypervisor endpoints for notifications are not
+ * supported (yet).
+ */
+ return VMCI_ERROR_INVALID_ARGS;
+ } else {
+ *priv_flags = vmci_context_get_priv_flags(handle.context);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Find doorbell entry by bitmap index.
+ */
+static struct dbell_entry *dbell_index_table_find(u32 idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(idx);
+ struct dbell_entry *dbell;
+ struct hlist_node *node;
+
+ hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+ node) {
+ if (idx == dbell->idx)
+ return dbell;
+ }
+
+ return NULL;
+}
+
+/*
+ * Add the given entry to the index table. This willi take a reference to the
+ * entry's resource so that the entry is not deleted before it is removed from
+ * the * table.
+ */
+static void dbell_index_table_add(struct dbell_entry *entry)
+{
+ u32 bucket;
+ u32 new_notify_idx;
+
+ vmci_resource_get(&entry->resource);
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ /*
+ * Below we try to allocate an index in the notification
+ * bitmap with "not too much" sharing between resources. If we
+ * use less that the full bitmap, we either add to the end if
+ * there are no unused flags within the currently used area,
+ * or we search for unused ones. If we use the full bitmap, we
+ * allocate the index round robin.
+ */
+ if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
+ if (last_notify_idx_released < max_notify_idx &&
+ !dbell_index_table_find(last_notify_idx_released)) {
+ new_notify_idx = last_notify_idx_released;
+ last_notify_idx_released = PAGE_SIZE;
+ } else {
+ bool reused = false;
+ new_notify_idx = last_notify_idx_reserved;
+ if (notify_idx_count + 1 < max_notify_idx) {
+ do {
+ if (!dbell_index_table_find
+ (new_notify_idx)) {
+ reused = true;
+ break;
+ }
+ new_notify_idx = (new_notify_idx + 1) %
+ max_notify_idx;
+ } while (new_notify_idx !=
+ last_notify_idx_released);
+ }
+ if (!reused) {
+ new_notify_idx = max_notify_idx;
+ max_notify_idx++;
+ }
+ }
+ } else {
+ new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
+ }
+
+ last_notify_idx_reserved = new_notify_idx;
+ notify_idx_count++;
+
+ entry->idx = new_notify_idx;
+ bucket = VMCI_DOORBELL_HASH(entry->idx);
+ hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Remove the given entry from the index table. This will release() the
+ * entry's resource.
+ */
+static void dbell_index_table_remove(struct dbell_entry *entry)
+{
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_del_init(&entry->node);
+
+ notify_idx_count--;
+ if (entry->idx == max_notify_idx - 1) {
+ /*
+ * If we delete an entry with the maximum known
+ * notification index, we take the opportunity to
+ * prune the current max. As there might be other
+ * unused indices immediately below, we lower the
+ * maximum until we hit an index in use.
+ */
+ while (max_notify_idx > 0 &&
+ !dbell_index_table_find(max_notify_idx - 1))
+ max_notify_idx--;
+ }
+
+ last_notify_idx_released = entry->idx;
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Creates a link between the given doorbell handle and the given
+ * index in the bitmap in the device backend. A notification state
+ * is created in hypervisor.
+ */
+static int dbell_link(struct vmci_handle handle, u32 notify_idx)
+{
+ struct vmci_doorbell_link_msg link_msg;
+
+ link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_LINK);
+ link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
+ link_msg.handle = handle;
+ link_msg.notify_idx = notify_idx;
+
+ return vmci_send_datagram(&link_msg.hdr);
+}
+
+/*
+ * Unlinks the given doorbell handle from an index in the bitmap in
+ * the device backend. The notification state is destroyed in hypervisor.
+ */
+static int dbell_unlink(struct vmci_handle handle)
+{
+ struct vmci_doorbell_unlink_msg unlink_msg;
+
+ unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_UNLINK);
+ unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
+ unlink_msg.handle = handle;
+
+ return vmci_send_datagram(&unlink_msg.hdr);
+}
+
+/*
+ * Notify another guest or the host. We send a datagram down to the
+ * host via the hypervisor with the notification info.
+ */
+static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
+{
+ struct vmci_doorbell_notify_msg notify_msg;
+
+ notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_NOTIFY);
+ notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
+ notify_msg.handle = handle;
+
+ return vmci_send_datagram(&notify_msg.hdr);
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dbell_delayed_dispatch(struct work_struct *work)
+{
+ struct dbell_entry *entry = container_of(work,
+ struct dbell_entry, work);
+
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Dispatches a doorbell notification to the host context.
+ */
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle)) {
+ pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ if (entry->run_delayed) {
+ schedule_work(&entry->work);
+ } else {
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Register the notification bitmap with the host.
+ */
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+{
+ int result;
+ struct vmci_notify_bm_set_msg bitmap_set_msg;
+
+ bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_SET_NOTIFY_BITMAP);
+ bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
+ VMCI_DG_HEADERSIZE;
+ bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+
+ result = vmci_send_datagram(&bitmap_set_msg.hdr);
+ if (result != VMCI_SUCCESS) {
+ pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+ bitmap_ppn, result);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Executes or schedules the handlers for a given notify index.
+ */
+static void dbell_fire_entries(u32 notify_idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
+ struct dbell_entry *dbell;
+ struct hlist_node *node;
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_for_each_entry(dbell, node,
+ &vmci_doorbell_it.entries[bucket], node) {
+ if (dbell->idx == notify_idx &&
+ atomic_read(&dbell->active) == 1) {
+ if (dbell->run_delayed) {
+ vmci_resource_get(&dbell->resource);
+ schedule_work(&dbell->work);
+ } else {
+ dbell->notify_cb(dbell->client_data);
+ }
+ }
+ }
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Scans the notification bitmap, collects pending notifications,
+ * resets the bitmap and invokes appropriate callbacks.
+ */
+void vmci_dbell_scan_notification_entries(u8 *bitmap)
+{
+ u32 idx;
+
+ for (idx = 0; idx < max_notify_idx; idx++) {
+ if (bitmap[idx] & 0x1) {
+ bitmap[idx] &= ~1;
+ dbell_fire_entries(idx);
+ }
+ }
+}
+
+/*
+ * vmci_doorbell_create() - Creates a doorbell
+ * @handle: A handle used to track the resource. Can be invalid.
+ * @flags: Flag that determines context of callback.
+ * @priv_flags: Privileges flags.
+ * @notify_cb: The callback to be ivoked when the doorbell fires.
+ * @client_data: A parameter to be passed to the callback.
+ *
+ * Creates a doorbell with the given callback. If the handle is
+ * VMCI_INVALID_HANDLE, a free handle will be assigned, if
+ * possible. The callback can be run immediately (potentially with
+ * locks held - the default) or delayed (in a kernel thread) by
+ * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
+ * is selected, a given callback may not be run if the kernel is
+ * unable to allocate memory for the delayed execution (highly
+ * unlikely).
+ */
+int vmci_doorbell_create(struct vmci_handle *handle,
+ u32 flags,
+ u32 priv_flags,
+ vmci_callback notify_cb, void *client_data)
+{
+ struct dbell_entry *entry;
+ struct vmci_handle new_handle;
+ int result;
+
+ if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
+ priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ if (vmci_handle_is_invalid(*handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ /* Let resource code allocate a free ID for us */
+ new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ } else {
+ bool valid_context = false;
+
+ /*
+ * Validate the handle. We must do both of the checks below
+ * because we can be acting as both a host and a guest at the
+ * same time. We always allow the host context ID, since the
+ * host functionality is in practice always there with the
+ * unified driver.
+ */
+ if (handle->context == VMCI_HOST_CONTEXT_ID ||
+ (vmci_guest_code_active() &&
+ vmci_get_context_id() == handle->context)) {
+ valid_context = true;
+ }
+
+ if (!valid_context || handle->resource == VMCI_INVALID_ID) {
+ pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
+ handle->context, handle->resource);
+ result = VMCI_ERROR_INVALID_ARGS;
+ goto free_mem;
+ }
+
+ new_handle = *handle;
+ }
+
+ entry->idx = 0;
+ INIT_HLIST_NODE(&entry->node);
+ entry->priv_flags = priv_flags;
+ INIT_WORK(&entry->work, dbell_delayed_dispatch);
+ entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
+ entry->notify_cb = notify_cb;
+ entry->client_data = client_data;
+ atomic_set(&entry->active, 0);
+
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ new_handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ new_handle.context, new_handle.resource, result);
+ goto free_mem;
+ }
+
+ new_handle = vmci_resource_handle(&entry->resource);
+ if (vmci_guest_code_active()) {
+ dbell_index_table_add(entry);
+ result = dbell_link(new_handle, entry->idx);
+ if (VMCI_SUCCESS != result)
+ goto destroy_resource;
+
+ atomic_set(&entry->active, 1);
+ }
+
+ *handle = new_handle;
+
+ return result;
+
+ destroy_resource:
+ dbell_index_table_remove(entry);
+ vmci_resource_remove(&entry->resource);
+ free_mem:
+ kfree(entry);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_create);
+
+/*
+ * vmci_doorbell_destroy() - Destroy a doorbell.
+ * @handle: The handle tracking the resource.
+ *
+ * Destroys a doorbell previously created with vmcii_doorbell_create. This
+ * operation may block waiting for a callback to finish.
+ */
+int vmci_doorbell_destroy(struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+
+ if (vmci_guest_code_active()) {
+ int result;
+
+ dbell_index_table_remove(entry);
+
+ result = dbell_unlink(handle);
+ if (VMCI_SUCCESS != result) {
+
+ /*
+ * The only reason this should fail would be
+ * an inconsistency between guest and
+ * hypervisor state, where the guest believes
+ * it has an active registration whereas the
+ * hypervisor doesn't. One case where this may
+ * happen is if a doorbell is unregistered
+ * following a hibernation at a time where the
+ * doorbell state hasn't been restored on the
+ * hypervisor side yet. Since the handle has
+ * now been removed in the guest, we just
+ * print a warning and return success.
+ */
+ pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
+ handle.context, handle.resource, result);
+ }
+ }
+
+ /*
+ * Now remove the resource from the table. It might still be in use
+ * after this, in a callback or still on the delayed work queue.
+ */
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
+
+/*
+ * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
+ * @dst: The handlle identifying the doorbell resource
+ * @priv_flags: Priviledge flags.
+ *
+ * Generates a notification on the doorbell identified by the
+ * handle. For host side generation of notifications, the caller
+ * can specify what the privilege of the calling side is.
+ */
+int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
+{
+ int retval;
+ enum vmci_route route;
+ struct vmci_handle src;
+
+ if (vmci_handle_is_invalid(dst) ||
+ (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src = VMCI_INVALID_HANDLE;
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+
+ if (VMCI_ROUTE_AS_HOST == route)
+ return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
+ dst, priv_flags);
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dbell_notify_as_guest(dst, priv_flags);
+
+ pr_warn("Unknown route (%d) for doorbell\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644
index 000000000000..e4c0b17486a5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -0,0 +1,51 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef VMCI_DOORBELL_H
+#define VMCI_DOORBELL_H
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_driver.h"
+
+/*
+ * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
+ * generate a notification for a doorbell or queue pair.
+ */
+struct vmci_dbell_notify_resource_info {
+ struct vmci_handle handle;
+ u16 resource;
+ u16 action;
+ s32 result;
+};
+
+/*
+ * Structure used for checkpointing the doorbell mappings. It is
+ * written to the checkpoint as is, so changing this structure will
+ * break checkpoint compatibility.
+ */
+struct dbell_cpt_state {
+ struct vmci_handle handle;
+ u64 bitmap_idx;
+};
+
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
+
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+void vmci_dbell_scan_notification_entries(u8 *bitmap);
+
+#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644
index 000000000000..7b3fce2da6c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -0,0 +1,117 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+static bool vmci_disable_host;
+module_param_named(disable_host, vmci_disable_host, bool, 0);
+MODULE_PARM_DESC(disable_host,
+ "Disable driver host personality (default=enabled)");
+
+static bool vmci_disable_guest;
+module_param_named(disable_guest, vmci_disable_guest, bool, 0);
+MODULE_PARM_DESC(disable_guest,
+ "Disable driver guest personality (default=enabled)");
+
+static bool vmci_guest_personality_initialized;
+static bool vmci_host_personality_initialized;
+
+/*
+ * vmci_get_context_id() - Gets the current context ID.
+ *
+ * Returns the current context ID. Note that since this is accessed only
+ * from code running in the host, this always returns the host context ID.
+ */
+u32 vmci_get_context_id(void)
+{
+ if (vmci_guest_code_active())
+ return vmci_get_vm_context_id();
+ else if (vmci_host_code_active())
+ return VMCI_HOST_CONTEXT_ID;
+
+ return VMCI_INVALID_ID;
+}
+EXPORT_SYMBOL_GPL(vmci_get_context_id);
+
+static int __init vmci_drv_init(void)
+{
+ int vmci_err;
+ int error;
+
+ vmci_err = vmci_event_init();
+ if (vmci_err < VMCI_SUCCESS) {
+ pr_err("Failed to initialize VMCIEvent (result=%d)\n",
+ vmci_err);
+ return -EINVAL;
+ }
+
+ if (!vmci_disable_guest) {
+ error = vmci_guest_init();
+ if (error) {
+ pr_warn("Failed to initialize guest personality (err=%d)\n",
+ error);
+ } else {
+ vmci_guest_personality_initialized = true;
+ pr_info("Guest personality initialized and is %s\n",
+ vmci_guest_code_active() ?
+ "active" : "inactive");
+ }
+ }
+
+ if (!vmci_disable_host) {
+ error = vmci_host_init();
+ if (error) {
+ pr_warn("Unable to initialize host personality (err=%d)\n",
+ error);
+ } else {
+ vmci_host_personality_initialized = true;
+ pr_info("Initialized host personality\n");
+ }
+ }
+
+ if (!vmci_guest_personality_initialized &&
+ !vmci_host_personality_initialized) {
+ vmci_event_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(vmci_drv_init);
+
+static void __exit vmci_drv_exit(void)
+{
+ if (vmci_guest_personality_initialized)
+ vmci_guest_exit();
+
+ if (vmci_host_personality_initialized)
+ vmci_host_exit();
+
+ vmci_event_exit();
+}
+module_exit(vmci_drv_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644
index 000000000000..f69156a1f30c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -0,0 +1,50 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DRIVER_H_
+#define _VMCI_DRIVER_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/wait.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_context.h"
+
+enum vmci_obj_type {
+ VMCIOBJ_VMX_VM = 10,
+ VMCIOBJ_CONTEXT,
+ VMCIOBJ_SOCKET,
+ VMCIOBJ_NOT_SET,
+};
+
+/* For storing VMCI structures in file handles. */
+struct vmci_obj {
+ void *ptr;
+ enum vmci_obj_type type;
+};
+
+u32 vmci_get_context_id(void);
+int vmci_send_datagram(struct vmci_datagram *dg);
+
+int vmci_host_init(void);
+void vmci_host_exit(void);
+bool vmci_host_code_active(void);
+
+int vmci_guest_init(void);
+void vmci_guest_exit(void);
+bool vmci_guest_code_active(void);
+u32 vmci_get_vm_context_id(void);
+
+#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644
index 000000000000..8449516d6ac6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -0,0 +1,224 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define EVENT_MAGIC 0xEABE0000
+#define VMCI_EVENT_MAX_ATTEMPTS 10
+
+struct vmci_subscription {
+ u32 id;
+ u32 event;
+ vmci_event_cb callback;
+ void *callback_data;
+ struct list_head node; /* on one of subscriber lists */
+};
+
+static struct list_head subscriber_array[VMCI_EVENT_MAX];
+static DEFINE_MUTEX(subscriber_mutex);
+
+int __init vmci_event_init(void)
+{
+ int i;
+
+ for (i = 0; i < VMCI_EVENT_MAX; i++)
+ INIT_LIST_HEAD(&subscriber_array[i]);
+
+ return VMCI_SUCCESS;
+}
+
+void vmci_event_exit(void)
+{
+ int e;
+
+ /* We free all memory at exit. */
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur, *p2;
+ list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
+
+ /*
+ * We should never get here because all events
+ * should have been unregistered before we try
+ * to unload the driver module.
+ */
+ pr_warn("Unexpected free events occurring\n");
+ list_del(&cur->node);
+ kfree(cur);
+ }
+ }
+}
+
+/*
+ * Find entry. Assumes subscriber_mutex is held.
+ */
+static struct vmci_subscription *event_find(u32 sub_id)
+{
+ int e;
+
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur;
+ list_for_each_entry(cur, &subscriber_array[e], node) {
+ if (cur->id == sub_id)
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Actually delivers the events to the subscribers.
+ * The callback function for each subscriber is invoked.
+ */
+static void event_deliver(struct vmci_event_msg *event_msg)
+{
+ struct vmci_subscription *cur;
+ struct list_head *subscriber_list;
+
+ rcu_read_lock();
+ subscriber_list = &subscriber_array[event_msg->event_data.event];
+ list_for_each_entry_rcu(cur, subscriber_list, node) {
+ cur->callback(cur->id, &event_msg->event_data,
+ cur->callback_data);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
+ * subscribers for given event.
+ */
+int vmci_event_dispatch(struct vmci_datagram *msg)
+{
+ struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
+
+ if (msg->payload_size < sizeof(u32) ||
+ msg->payload_size > sizeof(struct vmci_event_data_max))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!VMCI_EVENT_VALID(event_msg->event_data.event))
+ return VMCI_ERROR_EVENT_UNKNOWN;
+
+ event_deliver(event_msg);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_event_subscribe() - Subscribe to a given event.
+ * @event: The event to subscribe to.
+ * @callback: The callback to invoke upon the event.
+ * @callback_data: Data to pass to the callback.
+ * @subscription_id: ID used to track subscription. Used with
+ * vmci_event_unsubscribe()
+ *
+ * Subscribes to the provided event. The callback specified will be
+ * fired from RCU critical section and therefore must not sleep.
+ */
+int vmci_event_subscribe(u32 event,
+ vmci_event_cb callback,
+ void *callback_data,
+ u32 *new_subscription_id)
+{
+ struct vmci_subscription *sub;
+ int attempts;
+ int retval;
+ bool have_new_id = false;
+
+ if (!new_subscription_id) {
+ pr_devel("%s: Invalid subscription (NULL)\n", __func__);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (!VMCI_EVENT_VALID(event) || !callback) {
+ pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
+ __func__, event, callback, callback_data);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ sub = kzalloc(sizeof(*sub), GFP_KERNEL);
+ if (!sub)
+ return VMCI_ERROR_NO_MEM;
+
+ sub->id = VMCI_EVENT_MAX;
+ sub->event = event;
+ sub->callback = callback;
+ sub->callback_data = callback_data;
+ INIT_LIST_HEAD(&sub->node);
+
+ mutex_lock(&subscriber_mutex);
+
+ /* Creation of a new event is always allowed. */
+ for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
+ static u32 subscription_id;
+ /*
+ * We try to get an id a couple of time before
+ * claiming we are out of resources.
+ */
+
+ /* Test for duplicate id. */
+ if (!event_find(++subscription_id)) {
+ sub->id = subscription_id;
+ have_new_id = true;
+ break;
+ }
+ }
+
+ if (have_new_id) {
+ list_add_rcu(&sub->node, &subscriber_array[event]);
+ retval = VMCI_SUCCESS;
+ } else {
+ retval = VMCI_ERROR_NO_RESOURCES;
+ }
+
+ mutex_unlock(&subscriber_mutex);
+
+ *new_subscription_id = sub->id;
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_event_subscribe);
+
+/*
+ * vmci_event_unsubscribe() - unsubscribe from an event.
+ * @sub_id: A subscription ID as provided by vmci_event_subscribe()
+ *
+ * Unsubscribe from given event. Removes it from list and frees it.
+ * Will return callback_data if requested by caller.
+ */
+int vmci_event_unsubscribe(u32 sub_id)
+{
+ struct vmci_subscription *s;
+
+ mutex_lock(&subscriber_mutex);
+ s = event_find(sub_id);
+ if (s)
+ list_del_rcu(&s->node);
+ mutex_unlock(&subscriber_mutex);
+
+ if (!s)
+ return VMCI_ERROR_NOT_FOUND;
+
+ synchronize_rcu();
+ kfree(s);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644
index 000000000000..7df9b1c0a96c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.h
@@ -0,0 +1,25 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMCI_EVENT_H__
+#define __VMCI_EVENT_H__
+
+#include <linux/vmw_vmci_api.h>
+
+int vmci_event_init(void);
+void vmci_event_exit(void);
+int vmci_event_dispatch(struct vmci_datagram *msg);
+
+#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644
index 000000000000..60c01999f489
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -0,0 +1,759 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+static bool vmci_disable_msi;
+module_param_named(disable_msi, vmci_disable_msi, bool, 0);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+static bool vmci_disable_msix;
+module_param_named(disable_msix, vmci_disable_msix, bool, 0);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+static u32 ctx_update_sub_id = VMCI_INVALID_ID;
+static u32 vm_context_id = VMCI_INVALID_ID;
+
+struct vmci_guest_device {
+ struct device *dev; /* PCI device we are attached to */
+ void __iomem *iobase;
+
+ unsigned int irq;
+ unsigned int intr_type;
+ bool exclusive_vectors;
+ struct msix_entry msix_entries[VMCI_MAX_INTRS];
+
+ struct tasklet_struct datagram_tasklet;
+ struct tasklet_struct bm_tasklet;
+
+ void *data_buffer;
+ void *notification_bitmap;
+};
+
+/* vmci_dev singleton device and supporting data*/
+static struct vmci_guest_device *vmci_dev_g;
+static DEFINE_SPINLOCK(vmci_dev_spinlock);
+
+static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
+
+bool vmci_guest_code_active(void)
+{
+ return atomic_read(&vmci_num_guest_devices) != 0;
+}
+
+u32 vmci_get_vm_context_id(void)
+{
+ if (vm_context_id == VMCI_INVALID_ID) {
+ struct vmci_datagram get_cid_msg;
+ get_cid_msg.dst =
+ vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_GET_CONTEXT_ID);
+ get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
+ get_cid_msg.payload_size = 0;
+ vm_context_id = vmci_send_datagram(&get_cid_msg);
+ }
+ return vm_context_id;
+}
+
+/*
+ * VM to hypervisor call mechanism. We use the standard VMware naming
+ * convention since shared code is calling this function as well.
+ */
+int vmci_send_datagram(struct vmci_datagram *dg)
+{
+ unsigned long flags;
+ int result;
+
+ /* Check args. */
+ if (dg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * Need to acquire spinlock on the device because the datagram
+ * data may be spread over multiple pages and the monitor may
+ * interleave device user rpc calls from multiple
+ * VCPUs. Acquiring the spinlock precludes that
+ * possibility. Disabling interrupts to avoid incoming
+ * datagrams during a "rep out" and possibly landing up in
+ * this function.
+ */
+ spin_lock_irqsave(&vmci_dev_spinlock, flags);
+
+ if (vmci_dev_g) {
+ iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
+ dg, VMCI_DG_SIZE(dg));
+ result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+ } else {
+ result = VMCI_ERROR_UNAVAILABLE;
+ }
+
+ spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_send_datagram);
+
+/*
+ * Gets called with the new context id if updated or resumed.
+ * Context id.
+ */
+static void vmci_guest_cid_update(u32 sub_id,
+ const struct vmci_event_data *event_data,
+ void *client_data)
+{
+ const struct vmci_event_payld_ctx *ev_payload =
+ vmci_event_data_const_payload(event_data);
+
+ if (sub_id != ctx_update_sub_id) {
+ pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
+ return;
+ }
+
+ if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
+ pr_devel("Invalid event data\n");
+ return;
+ }
+
+ pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
+ vm_context_id, ev_payload->context_id, event_data->event);
+
+ vm_context_id = ev_payload->context_id;
+}
+
+/*
+ * Verify that the host supports the hypercalls we need. If it does not,
+ * try to find fallback hypercalls and use those instead. Returns
+ * true if required hypercalls (or fallback hypercalls) are
+ * supported by the host, false otherwise.
+ */
+static bool vmci_check_host_caps(struct pci_dev *pdev)
+{
+ bool result;
+ struct vmci_resource_query_msg *msg;
+ u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
+ VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+ struct vmci_datagram *check_msg;
+
+ check_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!check_msg) {
+ dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+ return false;
+ }
+
+ check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_RESOURCES_QUERY);
+ check_msg->src = VMCI_ANON_SRC_HANDLE;
+ check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
+
+ msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
+ msg->resources[0] = VMCI_GET_CONTEXT_ID;
+
+ /* Checks that hyper calls are supported */
+ result = vmci_send_datagram(check_msg) == 0x01;
+ kfree(check_msg);
+
+ dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
+ __func__, result ? "PASSED" : "FAILED");
+
+ /* We need the vector. There are no fallbacks. */
+ return result;
+}
+
+/*
+ * Reads datagrams from the data in port and dispatches them. We
+ * always start reading datagrams into only the first page of the
+ * datagram buffer. If the datagrams don't fit into one page, we
+ * use the maximum datagram buffer size for the remainder of the
+ * invocation. This is a simple heuristic for not penalizing
+ * small datagrams.
+ *
+ * This function assumes that it has exclusive access to the data
+ * in port for the duration of the call.
+ */
+static void vmci_dispatch_dgs(unsigned long data)
+{
+ struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+ u8 *dg_in_buffer = vmci_dev->data_buffer;
+ struct vmci_datagram *dg;
+ size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+ size_t current_dg_in_buffer_size = PAGE_SIZE;
+ size_t remaining_bytes;
+
+ BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer, current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+
+ while (dg->dst.resource != VMCI_INVALID_ID ||
+ remaining_bytes > PAGE_SIZE) {
+ unsigned dg_in_size;
+
+ /*
+ * When the input buffer spans multiple pages, a datagram can
+ * start on any page boundary in the buffer.
+ */
+ if (dg->dst.resource == VMCI_INVALID_ID) {
+ dg = (struct vmci_datagram *)roundup(
+ (uintptr_t)dg + 1, PAGE_SIZE);
+ remaining_bytes =
+ (size_t)(dg_in_buffer +
+ current_dg_in_buffer_size -
+ (u8 *)dg);
+ continue;
+ }
+
+ dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
+
+ if (dg_in_size <= dg_in_buffer_size) {
+ int result;
+
+ /*
+ * If the remaining bytes in the datagram
+ * buffer doesn't contain the complete
+ * datagram, we first make sure we have enough
+ * room for it and then we read the reminder
+ * of the datagram and possibly any following
+ * datagrams.
+ */
+ if (dg_in_size > remaining_bytes) {
+ if (remaining_bytes !=
+ current_dg_in_buffer_size) {
+
+ /*
+ * We move the partial
+ * datagram to the front and
+ * read the reminder of the
+ * datagram and possibly
+ * following calls into the
+ * following bytes.
+ */
+ memmove(dg_in_buffer, dg_in_buffer +
+ current_dg_in_buffer_size -
+ remaining_bytes,
+ remaining_bytes);
+ dg = (struct vmci_datagram *)
+ dg_in_buffer;
+ }
+
+ if (current_dg_in_buffer_size !=
+ dg_in_buffer_size)
+ current_dg_in_buffer_size =
+ dg_in_buffer_size;
+
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer +
+ remaining_bytes,
+ current_dg_in_buffer_size -
+ remaining_bytes);
+ }
+
+ /*
+ * We special case event datagrams from the
+ * hypervisor.
+ */
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ result = vmci_event_dispatch(dg);
+ } else {
+ result = vmci_datagram_invoke_guest_handler(dg);
+ }
+ if (result < VMCI_SUCCESS)
+ dev_dbg(vmci_dev->dev,
+ "Datagram with resource (ID=0x%x) failed (err=%d)\n",
+ dg->dst.resource, result);
+
+ /* On to the next datagram. */
+ dg = (struct vmci_datagram *)((u8 *)dg +
+ dg_in_size);
+ } else {
+ size_t bytes_to_skip;
+
+ /*
+ * Datagram doesn't fit in datagram buffer of maximal
+ * size. We drop it.
+ */
+ dev_dbg(vmci_dev->dev,
+ "Failed to receive datagram (size=%u bytes)\n",
+ dg_in_size);
+
+ bytes_to_skip = dg_in_size - remaining_bytes;
+ if (current_dg_in_buffer_size != dg_in_buffer_size)
+ current_dg_in_buffer_size = dg_in_buffer_size;
+
+ for (;;) {
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ if (bytes_to_skip <= current_dg_in_buffer_size)
+ break;
+
+ bytes_to_skip -= current_dg_in_buffer_size;
+ }
+ dg = (struct vmci_datagram *)(dg_in_buffer +
+ bytes_to_skip);
+ }
+
+ remaining_bytes =
+ (size_t) (dg_in_buffer + current_dg_in_buffer_size -
+ (u8 *)dg);
+
+ if (remaining_bytes < VMCI_DG_HEADERSIZE) {
+ /* Get the next batch of datagrams. */
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+ }
+ }
+}
+
+/*
+ * Scans the notification bitmap for raised flags, clears them
+ * and handles the notifications.
+ */
+static void vmci_process_bitmap(unsigned long data)
+{
+ struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+
+ if (!dev->notification_bitmap) {
+ dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+ return;
+ }
+
+ vmci_dbell_scan_notification_entries(dev->notification_bitmap);
+}
+
+/*
+ * Enable MSI-X. Try exclusive vectors first, then shared vectors.
+ */
+static int vmci_enable_msix(struct pci_dev *pdev,
+ struct vmci_guest_device *vmci_dev)
+{
+ int i;
+ int result;
+
+ for (i = 0; i < VMCI_MAX_INTRS; ++i) {
+ vmci_dev->msix_entries[i].entry = i;
+ vmci_dev->msix_entries[i].vector = i;
+ }
+
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+ if (result == 0)
+ vmci_dev->exclusive_vectors = true;
+ else if (result > 0)
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+
+ return result;
+}
+
+/*
+ * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
+ * interrupt (vector VMCI_INTR_DATAGRAM).
+ */
+static irqreturn_t vmci_interrupt(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /*
+ * If we are using MSI-X with exclusive vectors then we simply schedule
+ * the datagram tasklet, since we know the interrupt was meant for us.
+ * Otherwise we must read the ICR to determine what to do.
+ */
+
+ if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ } else {
+ unsigned int icr;
+
+ /* Acknowledge interrupt and determine what needs doing. */
+ icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+ if (icr == 0 || icr == ~0)
+ return IRQ_NONE;
+
+ if (icr & VMCI_ICR_DATAGRAM) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ icr &= ~VMCI_ICR_DATAGRAM;
+ }
+
+ if (icr & VMCI_ICR_NOTIFICATION) {
+ tasklet_schedule(&dev->bm_tasklet);
+ icr &= ~VMCI_ICR_NOTIFICATION;
+ }
+
+ if (icr != 0)
+ dev_warn(dev->dev,
+ "Ignoring unknown interrupt cause (%d)\n",
+ icr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
+ * which is for the notification bitmap. Will only get called if we are
+ * using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /* For MSI-X we can just assume it was meant for us. */
+ tasklet_schedule(&dev->bm_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Most of the initialization at module load time is done here.
+ */
+static int vmci_guest_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct vmci_guest_device *vmci_dev;
+ void __iomem *iobase;
+ unsigned int capabilities;
+ unsigned long cmd;
+ int vmci_err;
+ int error;
+
+ dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
+
+ error = pcim_enable_device(pdev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to enable VMCI device: %d\n", error);
+ return error;
+ }
+
+ error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+ return error;
+ }
+
+ iobase = pcim_iomap_table(pdev)[0];
+
+ dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
+ (unsigned long)iobase, pdev->irq);
+
+ vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
+ if (!vmci_dev) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for VMCI device\n");
+ return -ENOMEM;
+ }
+
+ vmci_dev->dev = &pdev->dev;
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->exclusive_vectors = false;
+ vmci_dev->iobase = iobase;
+
+ tasklet_init(&vmci_dev->datagram_tasklet,
+ vmci_dispatch_dgs, (unsigned long)vmci_dev);
+ tasklet_init(&vmci_dev->bm_tasklet,
+ vmci_process_bitmap, (unsigned long)vmci_dev);
+
+ vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ if (!vmci_dev->data_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram buffer\n");
+ return -ENOMEM;
+ }
+
+ pci_set_master(pdev); /* To enable queue_pair functionality. */
+
+ /*
+ * Verify that the VMCI Device supports the capabilities that
+ * we need. If the device is missing capabilities that we would
+ * like to use, check for fallback capabilities and use those
+ * instead (so we can run a new VM on old hosts). Fail the load if
+ * a required capability is missing and there is no fallback.
+ *
+ * Right now, we need datagrams. There are no fallbacks.
+ */
+ capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+ if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
+ dev_err(&pdev->dev, "Device does not support datagrams\n");
+ error = -ENXIO;
+ goto err_free_data_buffer;
+ }
+
+ /*
+ * If the hardware supports notifications, we will use that as
+ * well.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+ if (!vmci_dev->notification_bitmap) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate notification bitmap\n");
+ } else {
+ memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+ capabilities |= VMCI_CAPS_NOTIFICATIONS;
+ }
+ }
+
+ dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+
+ /* Let the host know which capabilities we intend to use. */
+ iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+
+ /* Set up global device so that we can start sending datagrams */
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = vmci_dev;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ /*
+ * Register notification bitmap with device if that capability is
+ * used.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ struct page *page =
+ vmalloc_to_page(vmci_dev->notification_bitmap);
+ unsigned long bitmap_ppn = page_to_pfn(page);
+ if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
+ dev_warn(&pdev->dev,
+ "VMCI device unable to register notification bitmap with PPN 0x%x\n",
+ (u32) bitmap_ppn);
+ goto err_remove_vmci_dev_g;
+ }
+ }
+
+ /* Check host capabilities. */
+ if (!vmci_check_host_caps(pdev))
+ goto err_remove_bitmap;
+
+ /* Enable device. */
+
+ /*
+ * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
+ * update the internal context id when needed.
+ */
+ vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
+ vmci_guest_cid_update, NULL,
+ &ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to subscribe to event (type=%d): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
+
+ /*
+ * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
+ * legacy interrupts.
+ */
+ if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
+ vmci_dev->irq = vmci_dev->msix_entries[0].vector;
+ } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
+ vmci_dev->irq = pdev->irq;
+ } else {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->irq = pdev->irq;
+ }
+
+ /*
+ * Request IRQ for legacy or MSI interrupts, or for first
+ * MSI-X vector.
+ */
+ error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Irq %u in use: %d\n",
+ vmci_dev->irq, error);
+ goto err_disable_msi;
+ }
+
+ /*
+ * For MSI-X with exclusive vectors we need to request an
+ * interrupt for each vector so that we get a separate
+ * interrupt handler routine. This allows us to distinguish
+ * between the vectors.
+ */
+ if (vmci_dev->exclusive_vectors) {
+ error = request_irq(vmci_dev->msix_entries[1].vector,
+ vmci_interrupt_bm, 0, KBUILD_MODNAME,
+ vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to allocate irq %u: %d\n",
+ vmci_dev->msix_entries[1].vector, error);
+ goto err_free_irq;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "Registered device\n");
+
+ atomic_inc(&vmci_num_guest_devices);
+
+ /* Enable specific interrupt bits. */
+ cmd = VMCI_IMR_DATAGRAM;
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+ cmd |= VMCI_IMR_NOTIFICATION;
+ iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+
+ /* Enable interrupts. */
+ iowrite32(VMCI_CONTROL_INT_ENABLE,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ pci_set_drvdata(pdev, vmci_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(vmci_dev->irq, &vmci_dev);
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+err_disable_msi:
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
+ pci_disable_msix(pdev);
+ else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
+ pci_disable_msi(pdev);
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+err_remove_bitmap:
+ if (vmci_dev->notification_bitmap) {
+ iowrite32(VMCI_CONTROL_RESET,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+err_remove_vmci_dev_g:
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+err_free_data_buffer:
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+ return error;
+}
+
+static void vmci_guest_remove_device(struct pci_dev *pdev)
+{
+ struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
+ int vmci_err;
+
+ dev_dbg(&pdev->dev, "Removing device\n");
+
+ atomic_dec(&vmci_num_guest_devices);
+
+ vmci_qp_guest_endpoints_exit();
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ dev_dbg(&pdev->dev, "Resetting vmci device\n");
+ iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ /*
+ * Free IRQ and then disable MSI/MSI-X as appropriate. For
+ * MSI-X, we might have multiple vectors, each with their own
+ * IRQ, which we must free too.
+ */
+ free_irq(vmci_dev->irq, vmci_dev);
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
+ if (vmci_dev->exclusive_vectors)
+ free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
+ pci_disable_msix(pdev);
+ } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
+ pci_disable_msi(pdev);
+ }
+
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+ if (vmci_dev->notification_bitmap) {
+ /*
+ * The device reset above cleared the bitmap state of the
+ * device, so we can safely free it here.
+ */
+
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, vmci_ids);
+
+static struct pci_driver vmci_guest_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = vmci_ids,
+ .probe = vmci_guest_probe_device,
+ .remove = vmci_guest_remove_device,
+};
+
+int __init vmci_guest_init(void)
+{
+ return pci_register_driver(&vmci_guest_driver);
+}
+
+void __exit vmci_guest_exit(void)
+{
+ pci_unregister_driver(&vmci_guest_driver);
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644
index 000000000000..344973a0fb0a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -0,0 +1,142 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/slab.h>
+#include "vmci_handle_array.h"
+
+static size_t handle_arr_calc_size(size_t capacity)
+{
+ return sizeof(struct vmci_handle_arr) +
+ capacity * sizeof(struct vmci_handle);
+}
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+{
+ struct vmci_handle_arr *array;
+
+ if (capacity == 0)
+ capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+
+ array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ array->capacity = capacity;
+ array->size = 0;
+
+ return array;
+}
+
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+{
+ kfree(array);
+}
+
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle)
+{
+ struct vmci_handle_arr *array = *array_ptr;
+
+ if (unlikely(array->size >= array->capacity)) {
+ /* reallocate. */
+ struct vmci_handle_arr *new_array;
+ size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+ size_t new_size = handle_arr_calc_size(new_capacity);
+
+ new_array = krealloc(array, new_size, GFP_ATOMIC);
+ if (!new_array)
+ return;
+
+ new_array->capacity = new_capacity;
+ *array_ptr = array = new_array;
+ }
+
+ array->entries[array->size] = handle;
+ array->size++;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
+ */
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+ size_t i;
+
+ for (i = 0; i < array->size; i++) {
+ if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+ handle = array->entries[i];
+ array->size--;
+ array->entries[i] = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ break;
+ }
+ }
+
+ return handle;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
+ */
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+
+ if (array->size) {
+ array->size--;
+ handle = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ }
+
+ return handle;
+}
+
+/*
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+{
+ if (unlikely(index >= array->size))
+ return VMCI_INVALID_HANDLE;
+
+ return array->entries[index];
+}
+
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ size_t i;
+
+ for (i = 0; i < array->size; i++)
+ if (vmci_handle_is_equal(array->entries[i], entry_handle))
+ return true;
+
+ return false;
+}
+
+/*
+ * NULL if the array is empty. Otherwise, a pointer to the array
+ * of VMCI handles in the handle array.
+ */
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
+{
+ if (array->size)
+ return array->entries;
+
+ return NULL;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644
index 000000000000..b5f3a7f98cf1
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_HANDLE_ARRAY_H_
+#define _VMCI_HANDLE_ARRAY_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
+
+struct vmci_handle_arr {
+ size_t capacity;
+ size_t size;
+ struct vmci_handle entries[];
+};
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle);
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle
+ entry_handle);
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle);
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+static inline size_t vmci_handle_arr_get_size(
+ const struct vmci_handle_arr *array)
+{
+ return array->size;
+}
+
+
+#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644
index 000000000000..d4722b3dc8ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -0,0 +1,1043 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+enum {
+ VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
+ VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
+};
+
+enum {
+ VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
+ VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
+ VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
+};
+
+/*
+ * VMCI driver initialization. This block can also be used to
+ * pass initial group membership etc.
+ */
+struct vmci_init_blk {
+ u32 cid;
+ u32 flags;
+};
+
+/* VMCIqueue_pairAllocInfo_VMToVM */
+struct vmci_qp_alloc_info_vmvm {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 _pad;
+};
+
+/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
+struct vmci_set_notify_info {
+ u64 notify_uva;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Per-instance host state
+ */
+struct vmci_host_dev {
+ struct vmci_ctx *context;
+ int user_version;
+ enum vmci_obj_type ct_type;
+ struct mutex lock; /* Mutex lock for vmci context access */
+};
+
+static struct vmci_ctx *host_context;
+static bool vmci_host_device_initialized;
+static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
+
+/*
+ * Determines whether the VMCI host personality is
+ * available. Since the core functionality of the host driver is
+ * always present, all guests could possibly use the host
+ * personality. However, to minimize the deviation from the
+ * pre-unified driver state of affairs, we only consider the host
+ * device active if there is no active guest device or if there
+ * are VMX'en with active VMCI contexts using the host device.
+ */
+bool vmci_host_code_active(void)
+{
+ return vmci_host_device_initialized &&
+ (!vmci_guest_code_active() ||
+ atomic_read(&vmci_host_active_users) > 0);
+}
+
+/*
+ * Called on open of /dev/vmci.
+ */
+static int vmci_host_open(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev;
+
+ vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
+ if (vmci_host_dev == NULL)
+ return -ENOMEM;
+
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+ mutex_init(&vmci_host_dev->lock);
+ filp->private_data = vmci_host_dev;
+
+ return 0;
+}
+
+/*
+ * Called on close of /dev/vmci, most often when the process
+ * exits.
+ */
+static int vmci_host_close(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+
+ /*
+ * The number of active contexts is used to track whether any
+ * VMX'en are using the host personality. It is incremented when
+ * a context is created through the IOCTL_VMCI_INIT_CONTEXT
+ * ioctl.
+ */
+ atomic_dec(&vmci_host_active_users);
+ }
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+
+ kfree(vmci_host_dev);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/*
+ * This is used to wake up the VMX when a VMCI call arrives, or
+ * to wake up select() or poll() at the next clock tick.
+ */
+static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ struct vmci_ctx *context = vmci_host_dev->context;
+ unsigned int mask = 0;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /* Check for VMCI calls to this VM context. */
+ if (wait)
+ poll_wait(filp, &context->host_context.wait_queue,
+ wait);
+
+ spin_lock(&context->lock);
+ if (context->pending_datagrams > 0 ||
+ vmci_handle_arr_get_size(
+ context->pending_doorbell_array) > 0) {
+ mask = POLLIN;
+ }
+ spin_unlock(&context->lock);
+ }
+ return mask;
+}
+
+/*
+ * Copies the handles of a handle array into a user buffer, and
+ * returns the new length in userBufferSize. If the copy to the
+ * user buffer fails, the functions still returns VMCI_SUCCESS,
+ * but retval != 0.
+ */
+static int drv_cp_harray_to_user(void __user *user_buf_uva,
+ u64 *user_buf_size,
+ struct vmci_handle_arr *handle_array,
+ int *retval)
+{
+ u32 array_size = 0;
+ struct vmci_handle *handles;
+
+ if (handle_array)
+ array_size = vmci_handle_arr_get_size(handle_array);
+
+ if (array_size * sizeof(*handles) > *user_buf_size)
+ return VMCI_ERROR_MORE_DATA;
+
+ *user_buf_size = array_size * sizeof(*handles);
+ if (*user_buf_size)
+ *retval = copy_to_user(user_buf_uva,
+ vmci_handle_arr_get_handles
+ (handle_array), *user_buf_size);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
+ * which maps the notify boolean in user VA in kernel space.
+ */
+static int vmci_host_setup_notify(struct vmci_ctx *context,
+ unsigned long uva)
+{
+ struct page *page;
+ int retval;
+
+ if (context->notify_page) {
+ pr_devel("%s: Notify mechanism is already set up\n", __func__);
+ return VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ /*
+ * We are using 'bool' internally, but let's make sure we explicit
+ * about the size.
+ */
+ BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
+ if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Lock physical page backing a given user VA.
+ */
+ down_read(&current->mm->mmap_sem);
+ retval = get_user_pages(current, current->mm,
+ PAGE_ALIGN(uva),
+ 1, 1, 0, &page, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (retval != 1)
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Map the locked page and set up notify pointer.
+ */
+ context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+ vmci_ctx_check_signal_notify(context);
+
+ return VMCI_SUCCESS;
+}
+
+static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
+ unsigned int cmd, void __user *uptr)
+{
+ if (cmd == IOCTL_VMCI_VERSION2) {
+ int __user *vptr = uptr;
+ if (get_user(vmci_host_dev->user_version, vptr))
+ return -EFAULT;
+ }
+
+ /*
+ * The basic logic here is:
+ *
+ * If the user sends in a version of 0 tell it our version.
+ * If the user didn't send in a version, tell it our version.
+ * If the user sent in an old version, tell it -its- version.
+ * If the user sent in an newer version, tell it our version.
+ *
+ * The rationale behind telling the caller its version is that
+ * Workstation 6.5 required that VMX and VMCI kernel module were
+ * version sync'd. All new VMX users will be programmed to
+ * handle the VMCI kernel module version.
+ */
+
+ if (vmci_host_dev->user_version > 0 &&
+ vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
+ return vmci_host_dev->user_version;
+ }
+
+ return VMCI_VERSION;
+}
+
+#define vmci_ioctl_err(fmt, ...) \
+ pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
+
+static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_init_blk init_block;
+ const struct cred *cred;
+ int retval;
+
+ if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
+ vmci_ioctl_err("error reading init block\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&vmci_host_dev->lock);
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
+ vmci_ioctl_err("received VMCI init on initialized handle\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ vmci_ioctl_err("unsupported VMCI restriction flag\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ cred = get_current_cred();
+ vmci_host_dev->context = vmci_ctx_create(init_block.cid,
+ init_block.flags, 0,
+ vmci_host_dev->user_version,
+ cred);
+ put_cred(cred);
+ if (IS_ERR(vmci_host_dev->context)) {
+ retval = PTR_ERR(vmci_host_dev->context);
+ vmci_ioctl_err("error initializing context\n");
+ goto out;
+ }
+
+ /*
+ * Copy cid to userlevel, we do this to allow the VMX
+ * to enforce its policy on cid generation.
+ */
+ init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
+ if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+ vmci_ioctl_err("error writing init block\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
+ atomic_inc(&vmci_host_active_users);
+
+ retval = 0;
+
+out:
+ mutex_unlock(&vmci_host_dev->lock);
+ return retval;
+}
+
+static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info send_info;
+ struct vmci_datagram *dg = NULL;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&send_info, uptr, sizeof(send_info)))
+ return -EFAULT;
+
+ if (send_info.len > VMCI_MAX_DG_SIZE) {
+ vmci_ioctl_err("datagram is too big (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ if (send_info.len < sizeof(*dg)) {
+ vmci_ioctl_err("datagram is too small (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ dg = kmalloc(send_info.len, GFP_KERNEL);
+ if (!dg) {
+ vmci_ioctl_err(
+ "cannot allocate memory to dispatch datagram\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
+ send_info.len)) {
+ vmci_ioctl_err("error getting datagram\n");
+ kfree(dg);
+ return -EFAULT;
+ }
+
+ pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
+ dg->dst.context, dg->dst.resource,
+ dg->src.context, dg->src.resource,
+ (unsigned long long)dg->payload_size);
+
+ /* Get source context id. */
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ send_info.result = vmci_datagram_dispatch(cid, dg, true);
+ kfree(dg);
+
+ return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info recv_info;
+ struct vmci_datagram *dg = NULL;
+ int retval;
+ size_t size;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
+ return -EFAULT;
+
+ size = recv_info.len;
+ recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
+ &size, &dg);
+
+ if (recv_info.result >= VMCI_SUCCESS) {
+ void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
+ retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
+ kfree(dg);
+ if (retval != 0)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_handle handle;
+ int vmci_status;
+ int __user *retptr;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ struct vmci_qp_alloc_info_vmvm alloc_info;
+ struct vmci_qp_alloc_info_vmvm __user *info = uptr;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ NULL,
+ vmci_host_dev->context);
+
+ if (vmci_status == VMCI_SUCCESS)
+ vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
+ } else {
+ struct vmci_qp_alloc_info alloc_info;
+ struct vmci_qp_alloc_info __user *info = uptr;
+ struct vmci_qp_page_store page_store;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ page_store.pages = alloc_info.ppn_va;
+ page_store.len = alloc_info.num_ppns;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ &page_store,
+ vmci_host_dev->context);
+ }
+
+ if (put_user(vmci_status, retptr)) {
+ if (vmci_status >= VMCI_SUCCESS) {
+ vmci_status = vmci_qp_broker_detach(handle,
+ vmci_host_dev->context);
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_set_va_info set_va_info;
+ struct vmci_qp_set_va_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("is not allowed\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
+ return -EFAULT;
+
+ if (set_va_info.va) {
+ /*
+ * VMX is passing down a new VA for the queue
+ * pair mapping.
+ */
+ result = vmci_qp_broker_map(set_va_info.handle,
+ vmci_host_dev->context,
+ set_va_info.va);
+ } else {
+ /*
+ * The queue pair is about to be unmapped by
+ * the VMX.
+ */
+ result = vmci_qp_broker_unmap(set_va_info.handle,
+ vmci_host_dev->context, 0);
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_page_file_info page_file_info;
+ struct vmci_qp_page_file_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
+ vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("not supported on this VMX (version=%d)\n",
+ vmci_host_dev->user_version);
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
+ return -EFAULT;
+
+ /*
+ * Communicate success pre-emptively to the caller. Note that the
+ * basic premise is that it is incumbent upon the caller not to look at
+ * the info.result field until after the ioctl() returns. And then,
+ * only if the ioctl() result indicates no error. We send up the
+ * SUCCESS status before calling SetPageStore() store because failing
+ * to copy up the result code means unwinding the SetPageStore().
+ *
+ * It turns out the logic to unwind a SetPageStore() opens a can of
+ * worms. For example, if a host had created the queue_pair and a
+ * guest attaches and SetPageStore() is successful but writing success
+ * fails, then ... the host has to be stopped from writing (anymore)
+ * data into the queue_pair. That means an additional test in the
+ * VMCI_Enqueue() code path. Ugh.
+ */
+
+ if (put_user(VMCI_SUCCESS, &info->result)) {
+ /*
+ * In this case, we can't write a result field of the
+ * caller's info block. So, we don't even try to
+ * SetPageStore().
+ */
+ return -EFAULT;
+ }
+
+ result = vmci_qp_broker_set_page_store(page_file_info.handle,
+ page_file_info.produce_va,
+ page_file_info.consume_va,
+ vmci_host_dev->context);
+ if (result < VMCI_SUCCESS) {
+ if (put_user(result, &info->result)) {
+ /*
+ * Note that in this case the SetPageStore()
+ * call failed but we were unable to
+ * communicate that to the caller (because the
+ * copy_to_user() call failed). So, if we
+ * simply return an error (in this case
+ * -EFAULT) then the caller will know that the
+ * SetPageStore failed even though we couldn't
+ * put the result code in the result field and
+ * indicate exactly why it failed.
+ *
+ * That says nothing about the issue where we
+ * were once able to write to the caller's info
+ * memory and now can't. Something more
+ * serious is probably going on than the fact
+ * that SetPageStore() didn't work.
+ */
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_dtch_info detach_info;
+ struct vmci_qp_dtch_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
+ return -EFAULT;
+
+ result = vmci_qp_broker_detach(detach_info.handle,
+ vmci_host_dev->context);
+ if (result == VMCI_SUCCESS &&
+ vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ result = VMCI_SUCCESS_LAST_DETACH;
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ s32 result;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ u32 cid;
+ int result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_remove_notification(cid,
+ ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info get_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&get_info, uptr, sizeof(get_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
+ &get_info.buf_size, &cpt_buf);
+ if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
+ void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
+ retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
+ kfree(cpt_buf);
+
+ if (retval)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info set_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_info, uptr, sizeof(set_info)))
+ return -EFAULT;
+
+ cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
+ if (!cpt_buf) {
+ vmci_ioctl_err(
+ "cannot allocate memory to set cpt state (type=%d)\n",
+ set_info.cpt_type);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
+ set_info.buf_size)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
+ set_info.buf_size, cpt_buf);
+
+ retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
+
+out:
+ kfree(cpt_buf);
+ return retval;
+}
+
+static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ u32 __user *u32ptr = uptr;
+
+ return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_set_notify_info notify_info;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
+ return -EFAULT;
+
+ if (notify_info.notify_uva) {
+ notify_info.result =
+ vmci_host_setup_notify(vmci_host_dev->context,
+ notify_info.notify_uva);
+ } else {
+ vmci_ctx_unset_notify(vmci_host_dev->context);
+ notify_info.result = VMCI_SUCCESS;
+ }
+
+ return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
+ -EFAULT : 0;
+}
+
+static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_dbell_notify_resource_info info;
+ u32 cid;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("invalid for current VMX versions\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ switch (info.action) {
+ case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
+ if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
+ u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
+ info.result = vmci_ctx_notify_dbell(cid, info.handle,
+ flags);
+ } else {
+ info.result = VMCI_ERROR_UNAVAILABLE;
+ }
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
+ info.result = vmci_ctx_dbell_create(cid, info.handle);
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
+ info.result = vmci_ctx_dbell_destroy(cid, info.handle);
+ break;
+
+ default:
+ vmci_ioctl_err("got unknown action (action=%d)\n",
+ info.action);
+ info.result = VMCI_ERROR_INVALID_ARGS;
+ }
+
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_notify_recv_info info;
+ struct vmci_handle_arr *db_handle_array;
+ struct vmci_handle_arr *qp_handle_array;
+ void __user *ubuf;
+ u32 cid;
+ int retval = 0;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("not supported for the current vmx version\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
+ (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ info.result = vmci_ctx_rcv_notifications_get(cid,
+ &db_handle_array, &qp_handle_array);
+ if (info.result != VMCI_SUCCESS)
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+
+ ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
+ db_handle_array, &retval);
+ if (info.result == VMCI_SUCCESS && !retval) {
+ ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf,
+ &info.qp_handle_buf_size,
+ qp_handle_array, &retval);
+ }
+
+ if (!retval && copy_to_user(uptr, &info, sizeof(info)))
+ retval = -EFAULT;
+
+ vmci_ctx_rcv_notifications_release(cid,
+ db_handle_array, qp_handle_array,
+ info.result == VMCI_SUCCESS && !retval);
+
+ return retval;
+}
+
+static long vmci_host_unlocked_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
+ char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
+ return vmci_host_do_ ## ioctl_fn( \
+ vmci_host_dev, name, uptr); \
+ } while (0)
+
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ void __user *uptr = (void __user *)ioarg;
+
+ switch (iocmd) {
+ case IOCTL_VMCI_INIT_CONTEXT:
+ VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
+ case IOCTL_VMCI_DATAGRAM_SEND:
+ VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
+ case IOCTL_VMCI_DATAGRAM_RECEIVE:
+ VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
+ case IOCTL_VMCI_QUEUEPAIR_ALLOC:
+ VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
+ case IOCTL_VMCI_QUEUEPAIR_SETVA:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
+ case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
+ case IOCTL_VMCI_QUEUEPAIR_DETACH:
+ VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
+ case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
+ case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
+ case IOCTL_VMCI_CTX_GET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
+ case IOCTL_VMCI_CTX_SET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
+ case IOCTL_VMCI_GET_CONTEXT_ID:
+ VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
+ case IOCTL_VMCI_SET_NOTIFY:
+ VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
+ case IOCTL_VMCI_NOTIFY_RESOURCE:
+ VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
+ case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
+ VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
+
+ case IOCTL_VMCI_VERSION:
+ case IOCTL_VMCI_VERSION2:
+ return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
+
+ default:
+ pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
+ return -EINVAL;
+ }
+
+#undef VMCI_DO_IOCTL
+}
+
+static const struct file_operations vmuser_fops = {
+ .owner = THIS_MODULE,
+ .open = vmci_host_open,
+ .release = vmci_host_close,
+ .poll = vmci_host_poll,
+ .unlocked_ioctl = vmci_host_unlocked_ioctl,
+ .compat_ioctl = vmci_host_unlocked_ioctl,
+};
+
+static struct miscdevice vmci_host_miscdev = {
+ .name = "vmci",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &vmuser_fops,
+};
+
+int __init vmci_host_init(void)
+{
+ int error;
+
+ host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ -1, VMCI_VERSION, NULL);
+ if (IS_ERR(host_context)) {
+ error = PTR_ERR(host_context);
+ pr_warn("Failed to initialize VMCIContext (error%d)\n",
+ error);
+ return error;
+ }
+
+ error = misc_register(&vmci_host_miscdev);
+ if (error) {
+ pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
+ vmci_host_miscdev.name,
+ MISC_MAJOR, vmci_host_miscdev.minor,
+ error);
+ pr_warn("Unable to initialize host personality\n");
+ vmci_ctx_destroy(host_context);
+ return error;
+ }
+
+ pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
+ vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
+
+ vmci_host_device_initialized = true;
+ return 0;
+}
+
+void __exit vmci_host_exit(void)
+{
+ int error;
+
+ vmci_host_device_initialized = false;
+
+ error = misc_deregister(&vmci_host_miscdev);
+ if (error)
+ pr_warn("Error unregistering character device: %d\n", error);
+
+ vmci_ctx_destroy(host_context);
+ vmci_qp_broker_exit();
+
+ pr_debug("VMCI host driver module unloaded\n");
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644
index 000000000000..d94245dbd765
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -0,0 +1,3425 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * In the following, we will distinguish between two kinds of VMX processes -
+ * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
+ * VMCI page files in the VMX and supporting VM to VM communication and the
+ * newer ones that use the guest memory directly. We will in the following
+ * refer to the older VMX versions as old-style VMX'en, and the newer ones as
+ * new-style VMX'en.
+ *
+ * The state transition datagram is as follows (the VMCIQPB_ prefix has been
+ * removed for readability) - see below for more details on the transtions:
+ *
+ * -------------- NEW -------------
+ * | |
+ * \_/ \_/
+ * CREATED_NO_MEM <-----------------> CREATED_MEM
+ * | | |
+ * | o-----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
+ * | | |
+ * | o----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
+ * | |
+ * | |
+ * -------------> gone <-------------
+ *
+ * In more detail. When a VMCI queue pair is first created, it will be in the
+ * VMCIQPB_NEW state. It will then move into one of the following states:
+ *
+ * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
+ *
+ * - the created was performed by a host endpoint, in which case there is
+ * no backing memory yet.
+ *
+ * - the create was initiated by an old-style VMX, that uses
+ * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
+ * a later point in time. This state can be distinguished from the one
+ * above by the context ID of the creator. A host side is not allowed to
+ * attach until the page store has been set.
+ *
+ * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
+ * is created by a VMX using the queue pair device backend that
+ * sets the UVAs of the queue pair immediately and stores the
+ * information for later attachers. At this point, it is ready for
+ * the host side to attach to it.
+ *
+ * Once the queue pair is in one of the created states (with the exception of
+ * the case mentioned for older VMX'en above), it is possible to attach to the
+ * queue pair. Again we have two new states possible:
+ *
+ * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
+ * paths:
+ *
+ * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
+ * pair, and attaches to a queue pair previously created by the host side.
+ *
+ * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
+ * already created by a guest.
+ *
+ * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
+ * vmci_qp_broker_set_page_store (see below).
+ *
+ * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
+ * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
+ * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
+ * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
+ * will be entered.
+ *
+ * From the attached queue pair, the queue pair can enter the shutdown states
+ * when either side of the queue pair detaches. If the guest side detaches
+ * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
+ * the content of the queue pair will no longer be available. If the host
+ * side detaches first, the queue pair will either enter the
+ * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
+ * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
+ * (e.g., the host detaches while a guest is stunned).
+ *
+ * New-style VMX'en will also unmap guest memory, if the guest is
+ * quiesced, e.g., during a snapshot operation. In that case, the guest
+ * memory will no longer be available, and the queue pair will transition from
+ * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
+ * in which case the queue pair will transition from the *_NO_MEM state at that
+ * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
+ * since the peer may have either attached or detached in the meantime. The
+ * values are laid out such that ++ on a state will move from a *_NO_MEM to a
+ * *_MEM state, and vice versa.
+ */
+
+/*
+ * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
+ * types are passed around to enqueue and dequeue routines. Note that
+ * often the functions passed are simply wrappers around memcpy
+ * itself.
+ *
+ * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
+ * there's an unused last parameter for the hosted side. In
+ * ESX, that parameter holds a buffer type.
+ */
+typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
+ u64 queue_offset, const void *src,
+ size_t src_offset, size_t size);
+typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+ struct page **page;
+ struct page **header_page;
+ void *va;
+ struct mutex __mutex; /* Protects the queue. */
+ struct mutex *mutex; /* Shared by producer and consumer queues. */
+ bool host;
+ size_t num_pages;
+ bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+ struct vmci_handle handle;
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ u64 produce_q_size;
+ u64 consume_q_size;
+ u32 peer;
+ u32 flags;
+ u32 priv_flags;
+ bool guest_endpoint;
+ unsigned int blocked;
+ unsigned int generation;
+ wait_queue_head_t event;
+};
+
+enum qp_broker_state {
+ VMCIQPB_NEW,
+ VMCIQPB_CREATED_NO_MEM,
+ VMCIQPB_CREATED_MEM,
+ VMCIQPB_ATTACHED_NO_MEM,
+ VMCIQPB_ATTACHED_MEM,
+ VMCIQPB_SHUTDOWN_NO_MEM,
+ VMCIQPB_SHUTDOWN_MEM,
+ VMCIQPB_GONE
+};
+
+#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
+ _qpb->state == VMCIQPB_ATTACHED_MEM || \
+ _qpb->state == VMCIQPB_SHUTDOWN_MEM)
+
+/*
+ * In the queue pair broker, we always use the guest point of view for
+ * the produce and consume queue values and references, e.g., the
+ * produce queue size stored is the guests produce queue size. The
+ * host endpoint will need to swap these around. The only exception is
+ * the local queue pairs on the host, in which case the host endpoint
+ * that creates the queue pair will have the right orientation, and
+ * the attaching host endpoint will need to swap.
+ */
+struct qp_entry {
+ struct list_head list_item;
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u32 ref_count;
+};
+
+struct qp_broker_entry {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u32 create_id;
+ u32 attach_id;
+ enum qp_broker_state state;
+ bool require_trusted_attach;
+ bool created_by_trusted;
+ bool vmci_page_files; /* Created by VMX using VMCI page files */
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ struct vmci_queue_header saved_produce_q;
+ struct vmci_queue_header saved_consume_q;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+ void *local_mem; /* Kernel memory for local queue pair */
+};
+
+struct qp_guest_endpoint {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u64 num_ppns;
+ void *produce_q;
+ void *consume_q;
+ struct ppn_set ppn_set;
+};
+
+struct qp_list {
+ struct list_head head;
+ struct mutex mutex; /* Protect queue list. */
+};
+
+static struct qp_list qp_broker_list = {
+ .head = LIST_HEAD_INIT(qp_broker_list.head),
+ .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
+};
+
+static struct qp_list qp_guest_endpoints = {
+ .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
+ .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
+};
+
+#define INVALID_VMCI_GUEST_MEM_ID 0
+#define QPE_NUM_PAGES(_QPE) ((u32) \
+ (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
+ DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
+
+
+/*
+ * Frees kernel VA space for a given queue and its queue header, and
+ * frees physical data pages.
+ */
+static void qp_free_queue(void *q, u64 size)
+{
+ struct vmci_queue *queue = q;
+
+ if (queue) {
+ u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ if (queue->kernel_if->mapped) {
+ vunmap(queue->kernel_if->va);
+ queue->kernel_if->va = NULL;
+ }
+
+ while (i)
+ __free_page(queue->kernel_if->page[--i]);
+
+ vfree(queue->q_header);
+ }
+}
+
+/*
+ * Allocates kernel VA space of specified size, plus space for the
+ * queue structure/kernel interface and the queue header. Allocates
+ * physical pages for the queue data pages.
+ *
+ * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
+ * PAGE m+1: struct vmci_queue
+ * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
+ * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ */
+static void *qp_alloc_queue(u64 size, u32 flags)
+{
+ u64 i;
+ struct vmci_queue *queue;
+ struct vmci_queue_header *q_header;
+ const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ const uint queue_size =
+ PAGE_SIZE +
+ sizeof(*queue) + sizeof(*(queue->kernel_if)) +
+ num_data_pages * sizeof(*(queue->kernel_if->page));
+
+ q_header = vmalloc(queue_size);
+ if (!q_header)
+ return NULL;
+
+ queue = (void *)q_header + PAGE_SIZE;
+ queue->q_header = q_header;
+ queue->saved_header = NULL;
+ queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
+ queue->kernel_if->header_page = NULL; /* Unused in guest. */
+ queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+ queue->kernel_if->host = false;
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+
+ for (i = 0; i < num_data_pages; i++) {
+ queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
+ if (!queue->kernel_if->page[i])
+ goto fail;
+ }
+
+ if (vmci_qp_pinned(flags)) {
+ queue->kernel_if->va =
+ vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
+ PAGE_KERNEL);
+ if (!queue->kernel_if->va)
+ goto fail;
+
+ queue->kernel_if->mapped = true;
+ }
+
+ return (void *)queue;
+
+ fail:
+ qp_free_queue(queue, i * PAGE_SIZE);
+ return NULL;
+}
+
+/*
+ * Copies from a given buffer or iovector to a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up from this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)src;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_fromiovec((u8 *)va + page_offset,
+ iov, to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)va + page_offset,
+ (u8 *)src + bytes_copied, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Copies to a given buffer or iovector from a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_from_queue(void *dest,
+ const struct vmci_queue *queue,
+ u64 queue_offset,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)dest;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+ to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)dest + bytes_copied,
+ (u8 *)va + page_offset, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Allocates two list of PPNs --- one for the pages in the produce queue,
+ * and the other for the pages in the consume queue. Intializes the list
+ * of PPNs with the page frame numbers of the KVA for the two queues (and
+ * the queue headers).
+ */
+static int qp_alloc_ppn_set(void *prod_q,
+ u64 num_produce_pages,
+ void *cons_q,
+ u64 num_consume_pages, struct ppn_set *ppn_set)
+{
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ struct vmci_queue *produce_q = prod_q;
+ struct vmci_queue *consume_q = cons_q;
+ u64 i;
+
+ if (!produce_q || !num_produce_pages || !consume_q ||
+ !num_consume_pages || !ppn_set)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (ppn_set->initialized)
+ return VMCI_ERROR_ALREADY_EXISTS;
+
+ produce_ppns =
+ kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+ if (!produce_ppns)
+ return VMCI_ERROR_NO_MEM;
+
+ consume_ppns =
+ kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+ if (!consume_ppns) {
+ kfree(produce_ppns);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
+ for (i = 1; i < num_produce_pages; i++) {
+ unsigned long pfn;
+
+ produce_ppns[i] =
+ page_to_pfn(produce_q->kernel_if->page[i - 1]);
+ pfn = produce_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*produce_ppns)
+ && pfn != produce_ppns[i])
+ goto ppn_error;
+ }
+
+ consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
+ for (i = 1; i < num_consume_pages; i++) {
+ unsigned long pfn;
+
+ consume_ppns[i] =
+ page_to_pfn(consume_q->kernel_if->page[i - 1]);
+ pfn = consume_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*consume_ppns)
+ && pfn != consume_ppns[i])
+ goto ppn_error;
+ }
+
+ ppn_set->num_produce_pages = num_produce_pages;
+ ppn_set->num_consume_pages = num_consume_pages;
+ ppn_set->produce_ppns = produce_ppns;
+ ppn_set->consume_ppns = consume_ppns;
+ ppn_set->initialized = true;
+ return VMCI_SUCCESS;
+
+ ppn_error:
+ kfree(produce_ppns);
+ kfree(consume_ppns);
+ return VMCI_ERROR_INVALID_ARGS;
+}
+
+/*
+ * Frees the two list of PPNs for a queue pair.
+ */
+static void qp_free_ppn_set(struct ppn_set *ppn_set)
+{
+ if (ppn_set->initialized) {
+ /* Do not call these functions on NULL inputs. */
+ kfree(ppn_set->produce_ppns);
+ kfree(ppn_set->consume_ppns);
+ }
+ memset(ppn_set, 0, sizeof(*ppn_set));
+}
+
+/*
+ * Populates the list of PPNs in the hypercall structure with the PPNS
+ * of the produce queue and the consume queue.
+ */
+static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
+{
+ memcpy(call_buf, ppn_set->produce_ppns,
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
+ memcpy(call_buf +
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
+ ppn_set->consume_ppns,
+ ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+
+ return VMCI_SUCCESS;
+}
+
+static int qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src, size_t src_offset, size_t size)
+{
+ return __qp_memcpy_to_queue(queue, queue_offset,
+ (u8 *)src + src_offset, size, false);
+}
+
+static int qp_memcpy_from_queue(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
+ queue, queue_offset, size, false);
+}
+
+/*
+ * Copies from a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t src_offset, size_t size)
+{
+
+ /*
+ * We ignore src_offset because src is really a struct iovec * and will
+ * maintain offset internally.
+ */
+ return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+}
+
+/*
+ * Copies to a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_from_queue_iov(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ /*
+ * We ignore dest_offset because dest is really a struct iovec * and
+ * will maintain offset internally.
+ */
+ return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
+}
+
+/*
+ * Allocates kernel VA space of specified size plus space for the queue
+ * and kernel interface. This is different from the guest queue allocator,
+ * because we do not allocate our own queue header/data pages here but
+ * share those of the guest.
+ */
+static struct vmci_queue *qp_host_alloc_queue(u64 size)
+{
+ struct vmci_queue *queue;
+ const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ const size_t queue_page_size =
+ num_pages * sizeof(*queue->kernel_if->page);
+
+ queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ if (queue) {
+ queue->q_header = NULL;
+ queue->saved_header = NULL;
+ queue->kernel_if =
+ (struct vmci_queue_kern_if *)((u8 *)queue +
+ sizeof(*queue));
+ queue->kernel_if->host = true;
+ queue->kernel_if->mutex = NULL;
+ queue->kernel_if->num_pages = num_pages;
+ queue->kernel_if->header_page =
+ (struct page **)((u8 *)queue + queue_size);
+ queue->kernel_if->page = &queue->kernel_if->header_page[1];
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+ }
+
+ return queue;
+}
+
+/*
+ * Frees kernel memory for a given queue (header plus translation
+ * structure).
+ */
+static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
+{
+ kfree(queue);
+}
+
+/*
+ * Initialize the mutex for the pair of queues. This mutex is used to
+ * protect the q_header and the buffer from changing out from under any
+ * users of either queue. Of course, it's only any good if the mutexes
+ * are actually acquired. Queue structure must lie on non-paged memory
+ * or we cannot guarantee access to the mutex.
+ */
+static void qp_init_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ /*
+ * Only the host queue has shared state - the guest queues do not
+ * need to synchronize access using a queue mutex.
+ */
+
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ mutex_init(produce_q->kernel_if->mutex);
+ }
+}
+
+/*
+ * Cleans up the mutex for the pair of queues.
+ */
+static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = NULL;
+ consume_q->kernel_if->mutex = NULL;
+ }
+}
+
+/*
+ * Acquire the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_acquire_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_lock(queue->kernel_if->mutex);
+}
+
+/*
+ * Release the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_release_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_unlock(queue->kernel_if->mutex);
+}
+
+/*
+ * Helper function to release pages in the PageStoreAttachInfo
+ * previously obtained using get_user_pages.
+ */
+static void qp_release_pages(struct page **pages,
+ u64 num_pages, bool dirty)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ page_cache_release(pages[i]);
+ pages[i] = NULL;
+ }
+}
+
+/*
+ * Lock the user pages referenced by the {produce,consume}Buffer
+ * struct into memory and populate the {produce,consume}Pages
+ * arrays in the attach structure with them.
+ */
+static int qp_host_get_user_memory(u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int retval;
+ int err = VMCI_SUCCESS;
+
+ down_write(&current->mm->mmap_sem);
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) produce_uva,
+ produce_q->kernel_if->num_pages,
+ 1, 0, produce_q->kernel_if->header_page, NULL);
+ if (retval < produce_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+ qp_release_pages(produce_q->kernel_if->header_page, retval,
+ false);
+ err = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) consume_uva,
+ consume_q->kernel_if->num_pages,
+ 1, 0, consume_q->kernel_if->header_page, NULL);
+ if (retval < consume_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+ qp_release_pages(consume_q->kernel_if->header_page, retval,
+ false);
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, false);
+ err = VMCI_ERROR_NO_MEM;
+ }
+
+ out:
+ up_write(&current->mm->mmap_sem);
+
+ return err;
+}
+
+/*
+ * Registers the specification of the user pages used for backing a queue
+ * pair. Enough information to map in pages is stored in the OS specific
+ * part of the struct vmci_queue structure.
+ */
+static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ u64 produce_uva;
+ u64 consume_uva;
+
+ /*
+ * The new style and the old style mapping only differs in
+ * that we either get a single or two UVAs, so we split the
+ * single UVA range at the appropriate spot.
+ */
+ produce_uva = page_store->pages;
+ consume_uva = page_store->pages +
+ produce_q->kernel_if->num_pages * PAGE_SIZE;
+ return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
+ consume_q);
+}
+
+/*
+ * Releases and removes the references to user pages stored in the attach
+ * struct. Pages are released from the page cache and may become
+ * swappable again.
+ */
+static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, true);
+ memset(produce_q->kernel_if->header_page, 0,
+ sizeof(*produce_q->kernel_if->header_page) *
+ produce_q->kernel_if->num_pages);
+ qp_release_pages(consume_q->kernel_if->header_page,
+ consume_q->kernel_if->num_pages, true);
+ memset(consume_q->kernel_if->header_page, 0,
+ sizeof(*consume_q->kernel_if->header_page) *
+ consume_q->kernel_if->num_pages);
+}
+
+/*
+ * Once qp_host_register_user_memory has been performed on a
+ * queue, the queue pair headers can be mapped into the
+ * kernel. Once mapped, they must be unmapped with
+ * qp_host_unmap_queues prior to calling
+ * qp_host_unregister_user_memory.
+ * Pages are pinned.
+ */
+static int qp_host_map_queues(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int result;
+
+ if (!produce_q->q_header || !consume_q->q_header) {
+ struct page *headers[2];
+
+ if (produce_q->q_header != consume_q->q_header)
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (produce_q->kernel_if->header_page == NULL ||
+ *produce_q->kernel_if->header_page == NULL)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ headers[0] = *produce_q->kernel_if->header_page;
+ headers[1] = *consume_q->kernel_if->header_page;
+
+ produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
+ if (produce_q->q_header != NULL) {
+ consume_q->q_header =
+ (struct vmci_queue_header *)((u8 *)
+ produce_q->q_header +
+ PAGE_SIZE);
+ result = VMCI_SUCCESS;
+ } else {
+ pr_warn("vmap failed\n");
+ result = VMCI_ERROR_NO_MEM;
+ }
+ } else {
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Unmaps previously mapped queue pair headers from the kernel.
+ * Pages are unpinned.
+ */
+static int qp_host_unmap_queues(u32 gid,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->q_header) {
+ if (produce_q->q_header < consume_q->q_header)
+ vunmap(produce_q->q_header);
+ else
+ vunmap(consume_q->q_header);
+
+ produce_q->q_header = NULL;
+ consume_q->q_header = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle. Assumes
+ * that the list is locked.
+ */
+static struct qp_entry *qp_list_find(struct qp_list *qp_list,
+ struct vmci_handle handle)
+{
+ struct qp_entry *entry;
+
+ if (vmci_handle_is_invalid(handle))
+ return NULL;
+
+ list_for_each_entry(entry, &qp_list->head, list_item) {
+ if (vmci_handle_is_equal(entry->handle, handle))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_guest_endpoint *
+qp_guest_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_guest_endpoint *entry;
+ struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_guest_endpoint, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_broker_entry *
+qp_broker_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_broker_entry *entry;
+ struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_broker_entry, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Dispatches a queue pair event message directly into the local event
+ * queue.
+ */
+static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
+{
+ u32 context_id = vmci_get_context_id();
+ struct vmci_event_qp ev;
+
+ ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event =
+ attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.peer_id = context_id;
+ ev.payload.handle = handle;
+
+ return vmci_event_dispatch(&ev.msg.hdr);
+}
+
+/*
+ * Allocates and initializes a qp_guest_endpoint structure.
+ * Allocates a queue_pair rid (and handle) iff the given entry has
+ * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
+ * are reserved handles. Assumes that the QP list mutex is held
+ * by the caller.
+ */
+static struct qp_guest_endpoint *
+qp_guest_endpoint_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u64 produce_size,
+ u64 consume_size,
+ void *produce_q,
+ void *consume_q)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ /* One page each for the queue headers. */
+ const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
+
+ if (vmci_handle_is_invalid(handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = produce_size;
+ entry->qp.consume_size = consume_size;
+ entry->qp.ref_count = 0;
+ entry->num_ppns = num_ppns;
+ entry->produce_q = produce_q;
+ entry->consume_q = consume_q;
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ /* Add resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ handle);
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if ((result != VMCI_SUCCESS) ||
+ qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ entry = NULL;
+ }
+ }
+ return entry;
+}
+
+/*
+ * Frees a qp_guest_endpoint structure.
+ */
+static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
+{
+ qp_free_ppn_set(&entry->ppn_set);
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+}
+
+/*
+ * Helper to make a queue_pairAlloc hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
+{
+ struct vmci_qp_alloc_msg *alloc_msg;
+ size_t msg_size;
+ int result;
+
+ if (!entry || entry->num_ppns <= 2)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ msg_size = sizeof(*alloc_msg) +
+ (size_t) entry->num_ppns * sizeof(u32);
+ alloc_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!alloc_msg)
+ return VMCI_ERROR_NO_MEM;
+
+ alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_ALLOC);
+ alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
+ alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ alloc_msg->handle = entry->qp.handle;
+ alloc_msg->peer = entry->qp.peer;
+ alloc_msg->flags = entry->qp.flags;
+ alloc_msg->produce_size = entry->qp.produce_size;
+ alloc_msg->consume_size = entry->qp.consume_size;
+ alloc_msg->num_ppns = entry->num_ppns;
+
+ result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
+ &entry->ppn_set);
+ if (result == VMCI_SUCCESS)
+ result = vmci_send_datagram(&alloc_msg->hdr);
+
+ kfree(alloc_msg);
+
+ return result;
+}
+
+/*
+ * Helper to make a queue_pairDetach hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_detatch_hypercall(struct vmci_handle handle)
+{
+ struct vmci_qp_detach_msg detach_msg;
+
+ detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_DETACH);
+ detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ detach_msg.hdr.payload_size = sizeof(handle);
+ detach_msg.handle = handle;
+
+ return vmci_send_datagram(&detach_msg.hdr);
+}
+
+/*
+ * Adds the given entry to the list. Assumes that the list is locked.
+ */
+static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
+{
+ if (entry)
+ list_add(&entry->list_item, &qp_list->head);
+}
+
+/*
+ * Removes the given entry from the list. Assumes that the list is locked.
+ */
+static void qp_list_remove_entry(struct qp_list *qp_list,
+ struct qp_entry *entry)
+{
+ if (entry)
+ list_del(&entry->list_item);
+}
+
+/*
+ * Helper for VMCI queue_pair detach interface. Frees the physical
+ * pages for the queue pair.
+ */
+static int qp_detatch_guest_work(struct vmci_handle handle)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ u32 ref_count = ~0; /* To avoid compiler warning below */
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ entry = qp_guest_handle_to_entry(handle);
+ if (!entry) {
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ result = VMCI_SUCCESS;
+
+ if (entry->qp.ref_count > 1) {
+ result = qp_notify_peer_local(false, handle);
+ /*
+ * We can fail to notify a local queuepair
+ * because we can't allocate. We still want
+ * to release the entry if that happens, so
+ * don't bail out yet.
+ */
+ }
+ } else {
+ result = qp_detatch_hypercall(handle);
+ if (result < VMCI_SUCCESS) {
+ /*
+ * We failed to notify a non-local queuepair.
+ * That other queuepair might still be
+ * accessing the shared memory, so don't
+ * release the entry yet. It will get cleaned
+ * up by VMCIqueue_pair_Exit() if necessary
+ * (assuming we are going away, otherwise why
+ * did this fail?).
+ */
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+ }
+ }
+
+ /*
+ * If we get here then we either failed to notify a local queuepair, or
+ * we succeeded in all cases. Release the entry if required.
+ */
+
+ entry->qp.ref_count--;
+ if (entry->qp.ref_count == 0)
+ qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
+
+ /* If we didn't remove the entry, this could change once we unlock. */
+ if (entry)
+ ref_count = entry->qp.ref_count;
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ if (ref_count == 0)
+ qp_guest_endpoint_destroy(entry);
+
+ return result;
+}
+
+/*
+ * This functions handles the actual allocation of a VMCI queue
+ * pair guest endpoint. Allocates physical pages for the queue
+ * pair. It makes OS dependent calls through generic wrappers.
+ */
+static int qp_alloc_guest_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ const u64 num_produce_pages =
+ DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
+ const u64 num_consume_pages =
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
+ void *my_produce_q = NULL;
+ void *my_consume_q = NULL;
+ int result;
+ struct qp_guest_endpoint *queue_pair_entry = NULL;
+
+ if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
+ return VMCI_ERROR_NO_ACCESS;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ queue_pair_entry = qp_guest_handle_to_entry(*handle);
+ if (queue_pair_entry) {
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local attach case. */
+ if (queue_pair_entry->qp.ref_count > 1) {
+ pr_devel("Error attempting to attach more than once\n");
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto error_keep_entry;
+ }
+
+ if (queue_pair_entry->qp.produce_size != consume_size ||
+ queue_pair_entry->qp.consume_size !=
+ produce_size ||
+ queue_pair_entry->qp.flags !=
+ (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
+ pr_devel("Error mismatched queue pair in local attach\n");
+ result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ goto error_keep_entry;
+ }
+
+ /*
+ * Do a local attach. We swap the consume and
+ * produce queues for the attacher and deliver
+ * an attach event.
+ */
+ result = qp_notify_peer_local(true, *handle);
+ if (result < VMCI_SUCCESS)
+ goto error_keep_entry;
+
+ my_produce_q = queue_pair_entry->consume_q;
+ my_consume_q = queue_pair_entry->produce_q;
+ goto out;
+ }
+
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto error_keep_entry;
+ }
+
+ my_produce_q = qp_alloc_queue(produce_size, flags);
+ if (!my_produce_q) {
+ pr_warn("Error allocating pages for produce queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ my_consume_q = qp_alloc_queue(consume_size, flags);
+ if (!my_consume_q) {
+ pr_warn("Error allocating pages for consume queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
+ produce_size, consume_size,
+ my_produce_q, my_consume_q);
+ if (!queue_pair_entry) {
+ pr_warn("Error allocating memory in %s\n", __func__);
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
+ num_consume_pages,
+ &queue_pair_entry->ppn_set);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_ppn_set failed\n");
+ goto error;
+ }
+
+ /*
+ * It's only necessary to notify the host if this queue pair will be
+ * attached to from another context.
+ */
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local create case. */
+ u32 context_id = vmci_get_context_id();
+
+ /*
+ * Enforce similar checks on local queue pairs as we
+ * do for regular ones. The handle's context must
+ * match the creator or attacher context id (here they
+ * are both the current context id) and the
+ * attach-only flag cannot exist during create. We
+ * also ensure specified peer is this context or an
+ * invalid one.
+ */
+ if (queue_pair_entry->qp.handle.context != context_id ||
+ (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
+ queue_pair_entry->qp.peer != context_id)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto error;
+ }
+
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto error;
+ }
+ } else {
+ result = qp_alloc_hypercall(queue_pair_entry);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_hypercall result = %d\n", result);
+ goto error;
+ }
+ }
+
+ qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
+ (struct vmci_queue *)my_consume_q);
+
+ qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
+
+ out:
+ queue_pair_entry->qp.ref_count++;
+ *handle = queue_pair_entry->qp.handle;
+ *produce_q = (struct vmci_queue *)my_produce_q;
+ *consume_q = (struct vmci_queue *)my_consume_q;
+
+ /*
+ * We should initialize the queue pair header pages on a local
+ * queue pair create. For non-local queue pairs, the
+ * hypervisor initializes the header pages in the create step.
+ */
+ if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
+ queue_pair_entry->qp.ref_count == 1) {
+ vmci_q_header_init((*produce_q)->q_header, *handle);
+ vmci_q_header_init((*consume_q)->q_header, *handle);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ return VMCI_SUCCESS;
+
+ error:
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ if (queue_pair_entry) {
+ /* The queues will be freed inside the destroy routine. */
+ qp_guest_endpoint_destroy(queue_pair_entry);
+ } else {
+ qp_free_queue(my_produce_q, produce_size);
+ qp_free_queue(my_consume_q, consume_size);
+ }
+ return result;
+
+ error_keep_entry:
+ /* This path should only be used when an existing entry was found. */
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+}
+
+/*
+ * The first endpoint issuing a queue pair allocation will create the state
+ * of the queue pair in the queue pair broker.
+ *
+ * If the creator is a guest, it will associate a VMX virtual address range
+ * with the queue pair as specified by the page_store. For compatibility with
+ * older VMX'en, that would use a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later using
+ * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
+ * used.
+ *
+ * If the creator is the host, a page_store of NULL should be used as well,
+ * since the host is not able to supply a page store for the queue pair.
+ *
+ * For older VMX and host callers, the queue pair will be created in the
+ * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
+ * created in VMCOQPB_CREATED_MEM state.
+ */
+static int qp_broker_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data, struct qp_broker_entry **ent)
+{
+ struct qp_broker_entry *entry = NULL;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+ u64 guest_produce_size;
+ u64 guest_consume_size;
+
+ /* Do not create if the caller asked not to. */
+ if (flags & VMCI_QPFLAG_ATTACH_ONLY)
+ return VMCI_ERROR_NOT_FOUND;
+
+ /*
+ * Creator's context ID should match handle's context ID or the creator
+ * must allow the context in handle's context ID as the "peer".
+ */
+ if (handle.context != context_id && handle.context != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * Creator's context ID for local queue pairs should match the
+ * peer, if a peer is specified.
+ */
+ if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return VMCI_ERROR_NO_MEM;
+
+ if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so a creating host side endpoint should swap
+ * produce and consume values -- unless it is a local queue
+ * pair, in which case no swapping is necessary, since the local
+ * attacher will swap queues.
+ */
+
+ guest_produce_size = consume_size;
+ guest_consume_size = produce_size;
+ } else {
+ guest_produce_size = produce_size;
+ guest_consume_size = consume_size;
+ }
+
+ entry->qp.handle = handle;
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = guest_produce_size;
+ entry->qp.consume_size = guest_consume_size;
+ entry->qp.ref_count = 1;
+ entry->create_id = context_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ entry->state = VMCIQPB_NEW;
+ entry->require_trusted_attach =
+ !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
+ entry->created_by_trusted =
+ !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
+ entry->vmci_page_files = false;
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ entry->produce_q = qp_host_alloc_queue(guest_produce_size);
+ if (entry->produce_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->consume_q = qp_host_alloc_queue(guest_consume_size);
+ if (entry->consume_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ qp_init_queue_mutex(entry->produce_q, entry->consume_q);
+
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ if (is_local) {
+ u8 *tmp;
+
+ entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
+ PAGE_SIZE, GFP_KERNEL);
+ if (entry->local_mem == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->state = VMCIQPB_CREATED_MEM;
+ entry->produce_q->q_header = entry->local_mem;
+ tmp = (u8 *)entry->local_mem + PAGE_SIZE *
+ (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
+ entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
+ } else if (page_store) {
+ /*
+ * The VMX already initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto error;
+
+ entry->state = VMCIQPB_CREATED_MEM;
+ } else {
+ /*
+ * A create without a page_store may be either a host
+ * side create (in which case we are waiting for the
+ * guest side to supply the memory) or an old style
+ * queue pair create (in which case we will expect a
+ * set page store call as the next step).
+ */
+ entry->state = VMCIQPB_CREATED_NO_MEM;
+ }
+
+ qp_list_add_entry(&qp_broker_list, &entry->qp);
+ if (ent != NULL)
+ *ent = entry;
+
+ /* Add to resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ goto error;
+ }
+
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if (is_local) {
+ vmci_q_header_init(entry->produce_q->q_header,
+ entry->qp.handle);
+ vmci_q_header_init(entry->consume_q->q_header,
+ entry->qp.handle);
+ }
+
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ return VMCI_SUCCESS;
+
+ error:
+ if (entry != NULL) {
+ qp_host_free_queue(entry->produce_q, guest_produce_size);
+ qp_host_free_queue(entry->consume_q, guest_consume_size);
+ kfree(entry);
+ }
+
+ return result;
+}
+
+/*
+ * Enqueues an event datagram to notify the peer VM attached to
+ * the given queue pair handle about attach/detach event by the
+ * given VM. Returns Payload size of datagram enqueued on
+ * success, error code otherwise.
+ */
+static int qp_notify_peer(bool attach,
+ struct vmci_handle handle,
+ u32 my_id,
+ u32 peer_id)
+{
+ int rv;
+ struct vmci_event_qp ev;
+
+ if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
+ peer_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
+ * number of pending events from the hypervisor to a given VM
+ * otherwise a rogue VM could do an arbitrary number of attach
+ * and detach operations causing memory pressure in the host
+ * kernel.
+ */
+
+ ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = attach ?
+ VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.handle = handle;
+ ev.payload.peer_id = my_id;
+
+ rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (rv < VMCI_SUCCESS)
+ pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
+ attach ? "ATTACH" : "DETACH", peer_id);
+
+ return rv;
+}
+
+/*
+ * The second endpoint issuing a queue pair allocation will attach to
+ * the queue pair registered with the queue pair broker.
+ *
+ * If the attacher is a guest, it will associate a VMX virtual address
+ * range with the queue pair as specified by the page_store. At this
+ * point, the already attach host endpoint may start using the queue
+ * pair, and an attach event is sent to it. For compatibility with
+ * older VMX'en, that used a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later
+ * using vmci_qp_broker_set_page_store. In that case, a page_store of
+ * NULL should be used, and the attach event will be generated once
+ * the actual page store has been set.
+ *
+ * If the attacher is the host, a page_store of NULL should be used as
+ * well, since the page store information is already set by the guest.
+ *
+ * For new VMX and host callers, the queue pair will be moved to the
+ * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
+ * moved to the VMCOQPB_ATTACHED_NO_MEM state.
+ */
+static int qp_broker_attach(struct qp_broker_entry *entry,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_CREATED_MEM)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ if (is_local) {
+ if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
+ context_id != entry->create_id) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else if (context_id == entry->create_id ||
+ context_id == entry->attach_id) {
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (VMCI_CONTEXT_IS_VM(context_id) &&
+ VMCI_CONTEXT_IS_VM(entry->create_id))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * If we are attaching from a restricted context then the queuepair
+ * must have been created by a trusted endpoint.
+ */
+ if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !entry->created_by_trusted)
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If we are attaching to a queuepair that was created by a restricted
+ * context then we must be trusted.
+ */
+ if (entry->require_trusted_attach &&
+ (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If the creator specifies VMCI_INVALID_ID in "peer" field, access
+ * control check is not performed.
+ */
+ if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
+ /*
+ * Do not attach if the caller doesn't support Host Queue Pairs
+ * and a host created this queue pair.
+ */
+
+ if (!vmci_ctx_supports_host_qp(context))
+ return VMCI_ERROR_INVALID_RESOURCE;
+
+ } else if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct vmci_ctx *create_context;
+ bool supports_host_qp;
+
+ /*
+ * Do not attach a host to a user created queue pair if that
+ * user doesn't support host queue pair end points.
+ */
+
+ create_context = vmci_ctx_get(entry->create_id);
+ supports_host_qp = vmci_ctx_supports_host_qp(create_context);
+ vmci_ctx_put(create_context);
+
+ if (!supports_host_qp)
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+
+ if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so an attaching guest should match the values
+ * stored in the entry.
+ */
+
+ if (entry->qp.produce_size != produce_size ||
+ entry->qp.consume_size != consume_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+ } else if (entry->qp.produce_size != consume_size ||
+ entry->qp.consume_size != produce_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * If a guest attached to a queue pair, it will supply
+ * the backing memory. If this is a pre NOVMVM vmx,
+ * the backing memory will be supplied by calling
+ * vmci_qp_broker_set_page_store() following the
+ * return of the vmci_qp_broker_alloc() call. If it is
+ * a vmx of version NOVMVM or later, the page store
+ * must be supplied as part of the
+ * vmci_qp_broker_alloc call. Under all circumstances
+ * must the initially created queue pair not have any
+ * memory associated with it already.
+ */
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (page_store != NULL) {
+ /*
+ * Patch up host state to point to guest
+ * supplied memory. The VMX already
+ * initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ /*
+ * Preemptively load in the headers if non-blocking to
+ * prevent blocking later.
+ */
+ if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
+ result = qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(
+ entry->produce_q,
+ entry->consume_q);
+ return result;
+ }
+ }
+
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ } else {
+ entry->state = VMCIQPB_ATTACHED_NO_MEM;
+ }
+ } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
+ /*
+ * The host side is attempting to attach to a queue
+ * pair that doesn't have any memory associated with
+ * it. This must be a pre NOVMVM vmx that hasn't set
+ * the page store information yet, or a quiesced VM.
+ */
+
+ return VMCI_ERROR_UNAVAILABLE;
+ } else {
+ /*
+ * For non-blocking queue pairs, we cannot rely on
+ * enqueue/dequeue to map in the pages on the
+ * host-side, since it may block, so we make an
+ * attempt here.
+ */
+
+ if (flags & VMCI_QPFLAG_NONBLOCK) {
+ result =
+ qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ entry->qp.flags |= flags &
+ (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
+ }
+
+ /* The host side has successfully attached to a queue pair. */
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ }
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, entry->qp.handle, context_id,
+ entry->create_id);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+
+ entry->attach_id = context_id;
+ entry->qp.ref_count++;
+ if (wakeup_cb) {
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ }
+
+ /*
+ * When attaching to local queue pairs, the context already has
+ * an entry tracking the queue pair, so don't add another one.
+ */
+ if (!is_local)
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ if (ent != NULL)
+ *ent = entry;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * queue_pair_Alloc for use when setting up queue pair endpoints
+ * on the host.
+ */
+static int qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent,
+ bool *swap)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool create;
+ struct qp_broker_entry *entry = NULL;
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) ||
+ (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
+ !(produce_size || consume_size) ||
+ !context || context_id == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In the initial argument check, we ensure that non-vmkernel hosts
+ * are not allowed to create local queue pairs.
+ */
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!is_local && vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ mutex_unlock(&qp_broker_list.mutex);
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (handle.resource != VMCI_INVALID_ID)
+ entry = qp_broker_handle_to_entry(handle);
+
+ if (!entry) {
+ create = true;
+ result =
+ qp_broker_create(handle, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ } else {
+ create = false;
+ result =
+ qp_broker_attach(entry, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+
+ if (swap)
+ *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
+ !(create && is_local);
+
+ return result;
+}
+
+/*
+ * This function implements the kernel API for allocating a queue
+ * pair.
+ */
+static int qp_alloc_host_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ struct vmci_handle new_handle;
+ struct vmci_ctx *context;
+ struct qp_broker_entry *entry;
+ int result;
+ bool swap;
+
+ if (vmci_handle_is_invalid(*handle)) {
+ new_handle = vmci_make_handle(
+ VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
+ } else
+ new_handle = *handle;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+ entry = NULL;
+ result =
+ qp_broker_alloc(new_handle, peer, flags, priv_flags,
+ produce_size, consume_size, NULL, context,
+ wakeup_cb, client_data, &entry, &swap);
+ if (result == VMCI_SUCCESS) {
+ if (swap) {
+ /*
+ * If this is a local queue pair, the attacher
+ * will swap around produce and consume
+ * queues.
+ */
+
+ *produce_q = entry->consume_q;
+ *consume_q = entry->produce_q;
+ } else {
+ *produce_q = entry->produce_q;
+ *consume_q = entry->consume_q;
+ }
+
+ *handle = vmci_resource_handle(&entry->resource);
+ } else {
+ *handle = VMCI_INVALID_HANDLE;
+ pr_devel("queue pair broker failed to alloc (result=%d)\n",
+ result);
+ }
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Allocates a VMCI queue_pair. Only checks validity of input
+ * arguments. The real work is done in the host or guest
+ * specific function.
+ */
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ bool guest_endpoint,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ if (!handle || !produce_q || !consume_q ||
+ (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint) {
+ return qp_alloc_guest_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer,
+ flags, priv_flags);
+ } else {
+ return qp_alloc_host_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer, flags,
+ priv_flags, wakeup_cb, client_data);
+ }
+}
+
+/*
+ * This function implements the host kernel API for detaching from
+ * a queue pair.
+ */
+static int qp_detatch_host_work(struct vmci_handle handle)
+{
+ int result;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+
+ result = vmci_qp_broker_detach(handle, context);
+
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Detaches from a VMCI queue_pair. Only checks validity of input argument.
+ * Real work is done in the host or guest specific function.
+ */
+static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
+{
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint)
+ return qp_detatch_guest_work(handle);
+ else
+ return qp_detatch_host_work(handle);
+}
+
+/*
+ * Returns the entry from the head of the list. Assumes that the list is
+ * locked.
+ */
+static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
+{
+ if (!list_empty(&qp_list->head)) {
+ struct qp_entry *entry =
+ list_first_entry(&qp_list->head, struct qp_entry,
+ list_item);
+ return entry;
+ }
+
+ return NULL;
+}
+
+void vmci_qp_broker_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_broker_entry *be;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ while ((entry = qp_list_get_head(&qp_broker_list))) {
+ be = (struct qp_broker_entry *)entry;
+
+ qp_list_remove_entry(&qp_broker_list, entry);
+ kfree(be);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+}
+
+/*
+ * Requests that a queue pair be allocated with the VMCI queue
+ * pair broker. Allocates a queue pair entry if one does not
+ * exist. Attaches to one if it exists, and retrieves the page
+ * files backing that queue_pair. Assumes that the queue pair
+ * broker lock is held.
+ */
+int vmci_qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context)
+{
+ return qp_broker_alloc(handle, peer, flags, priv_flags,
+ produce_size, consume_size,
+ page_store, context, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
+ * step to add the UVAs of the VMX mapping of the queue pair. This function
+ * provides backwards compatibility with such VMX'en, and takes care of
+ * registering the page store for a queue pair previously allocated by the
+ * VMX during create or attach. This function will move the queue pair state
+ * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
+ * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
+ * attached state with memory, the queue pair is ready to be used by the
+ * host peer, and an attached event will be generated.
+ *
+ * Assumes that the queue pair broker lock is held.
+ *
+ * This function is only used by the hosted platform, since there is no
+ * issue with backwards compatibility for vmkernel.
+ */
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ int result;
+ const u32 context_id = vmci_ctx_get_id(context);
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * We only support guest to host queue pairs, so the VMX must
+ * supply UVAs for the mapped page files.
+ */
+
+ if (produce_uva == 0 || consume_uva == 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ /*
+ * If I'm the owner then I can set the page store.
+ *
+ * Or, if a host created the queue_pair and I'm the attached peer
+ * then I can set the page store.
+ */
+ if (entry->create_id != context_id &&
+ (entry->create_id != VMCI_HOST_CONTEXT_ID ||
+ entry->attach_id != context_id)) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
+ goto out;
+ }
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_ATTACHED_NO_MEM) {
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto out;
+ }
+
+ result = qp_host_get_user_memory(produce_uva, consume_uva,
+ entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto out;
+
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+ goto out;
+ }
+
+ if (entry->state == VMCIQPB_CREATED_NO_MEM)
+ entry->state = VMCIQPB_CREATED_MEM;
+ else
+ entry->state = VMCIQPB_ATTACHED_MEM;
+
+ entry->vmci_page_files = true;
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, handle, context_id, entry->create_id);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+ }
+
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Resets saved queue headers for the given QP broker
+ * entry. Should be used when guest memory becomes available
+ * again, or the guest detaches.
+ */
+static void qp_reset_saved_headers(struct qp_broker_entry *entry)
+{
+ entry->produce_q->saved_header = NULL;
+ entry->consume_q->saved_header = NULL;
+}
+
+/*
+ * The main entry point for detaching from a queue pair registered with the
+ * queue pair broker. If more than one endpoint is attached to the queue
+ * pair, the first endpoint will mainly decrement a reference count and
+ * generate a notification to its peer. The last endpoint will clean up
+ * the queue pair state registered with the broker.
+ *
+ * When a guest endpoint detaches, it will unmap and unregister the guest
+ * memory backing the queue pair. If the host is still attached, it will
+ * no longer be able to access the queue pair content.
+ *
+ * If the queue pair is already in a state where there is no memory
+ * registered for the queue pair (any *_NO_MEM state), it will transition to
+ * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
+ * endpoint is the first of two endpoints to detach. If the host endpoint is
+ * the first out of two to detach, the queue pair will move to the
+ * VMCIQPB_SHUTDOWN_MEM state.
+ */
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ u32 peer_id;
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ if (context_id == entry->create_id) {
+ peer_id = entry->attach_id;
+ entry->create_id = VMCI_INVALID_ID;
+ } else {
+ peer_id = entry->create_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ }
+ entry->qp.ref_count--;
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ bool headers_mapped;
+
+ /*
+ * Pre NOVMVM vmx'en may detach from a queue pair
+ * before setting the page store, and in that case
+ * there is no user memory to detach from. Also, more
+ * recent VMX'en may detach from a queue pair in the
+ * quiesced state.
+ */
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ headers_mapped = entry->produce_q->q_header ||
+ entry->consume_q->q_header;
+ if (QPBROKERSTATE_HAS_MEM(entry)) {
+ result =
+ qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource,
+ result);
+
+ if (entry->vmci_page_files)
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+ else
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+
+ }
+
+ if (!headers_mapped)
+ qp_reset_saved_headers(entry);
+
+ qp_release_queue_mutex(entry->produce_q);
+
+ if (!headers_mapped && entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+
+ } else {
+ if (entry->wakeup_cb) {
+ entry->wakeup_cb = NULL;
+ entry->client_data = NULL;
+ }
+ }
+
+ if (entry->qp.ref_count == 0) {
+ qp_list_remove_entry(&qp_broker_list, &entry->qp);
+
+ if (is_local)
+ kfree(entry->local_mem);
+
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ vmci_ctx_qp_destroy(context, handle);
+ } else {
+ qp_notify_peer(false, handle, context_id, peer_id);
+ if (context_id == VMCI_HOST_CONTEXT_ID &&
+ QPBROKERSTATE_HAS_MEM(entry)) {
+ entry->state = VMCIQPB_SHUTDOWN_MEM;
+ } else {
+ entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
+ }
+
+ if (!is_local)
+ vmci_ctx_qp_destroy(context, handle);
+
+ }
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Establishes the necessary mappings for a queue pair given a
+ * reference to the queue pair guest memory. This is usually
+ * called when a guest is unquiesced and the VMX is allowed to
+ * map guest memory once again.
+ */
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u64 guest_mem)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+ result = VMCI_SUCCESS;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ struct vmci_qp_page_store page_store;
+
+ page_store.pages = guest_mem;
+ page_store.len = QPE_NUM_PAGES(entry->qp);
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ qp_reset_saved_headers(entry);
+ result =
+ qp_host_register_user_memory(&page_store,
+ entry->produce_q,
+ entry->consume_q);
+ qp_release_queue_mutex(entry->produce_q);
+ if (result == VMCI_SUCCESS) {
+ /* Move state from *_NO_MEM to *_MEM */
+
+ entry->state++;
+
+ if (entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+ }
+ }
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Saves a snapshot of the queue headers for the given QP broker
+ * entry. Should be used when guest memory is unmapped.
+ * Results:
+ * VMCI_SUCCESS on success, appropriate error code if guest memory
+ * can't be accessed..
+ */
+static int qp_save_headers(struct qp_broker_entry *entry)
+{
+ int result;
+
+ if (entry->produce_q->saved_header != NULL &&
+ entry->consume_q->saved_header != NULL) {
+ /*
+ * If the headers have already been saved, we don't need to do
+ * it again, and we don't want to map in the headers
+ * unnecessarily.
+ */
+
+ return VMCI_SUCCESS;
+ }
+
+ if (NULL == entry->produce_q->q_header ||
+ NULL == entry->consume_q->q_header) {
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+ }
+
+ memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
+ sizeof(entry->saved_produce_q));
+ entry->produce_q->saved_header = &entry->saved_produce_q;
+ memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
+ sizeof(entry->saved_consume_q));
+ entry->consume_q->saved_header = &entry->saved_consume_q;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Removes all references to the guest memory of a given queue pair, and
+ * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
+ * called when a VM is being quiesced where access to guest memory should
+ * avoided.
+ */
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u32 gid)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ qp_acquire_queue_mutex(entry->produce_q);
+ result = qp_save_headers(entry);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource, result);
+
+ qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
+
+ /*
+ * On hosted, when we unmap queue pairs, the VMX will also
+ * unmap the guest memory, so we invalidate the previously
+ * registered memory. If the queue pair is mapped again at a
+ * later point in time, we will need to reregister the user
+ * memory with a possibly new user VA.
+ */
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+
+ /*
+ * Move state from *_MEM to *_NO_MEM.
+ */
+ entry->state--;
+
+ qp_release_queue_mutex(entry->produce_q);
+ }
+
+ result = VMCI_SUCCESS;
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Destroys all guest queue pair endpoints. If active guest queue
+ * pairs still exist, hypercalls to attempt detach from these
+ * queue pairs will be made. Any failure to detach is silently
+ * ignored.
+ */
+void vmci_qp_guest_endpoints_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_guest_endpoint *ep;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
+ ep = (struct qp_guest_endpoint *)entry;
+
+ /* Don't make a hypercall for local queue_pairs. */
+ if (!(entry->flags & VMCI_QPFLAG_LOCAL))
+ qp_detatch_hypercall(entry->handle);
+
+ /* We cannot fail the exit, so let's reset ref_count. */
+ entry->ref_count = 0;
+ qp_list_remove_entry(&qp_guest_endpoints, entry);
+
+ qp_guest_endpoint_destroy(ep);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+}
+
+/*
+ * Helper routine that will lock the queue pair before subsequent
+ * operations.
+ * Note: Non-blocking on the host side is currently only implemented in ESX.
+ * Since non-blocking isn't yet implemented on the host personality we
+ * have no reason to acquire a spin lock. So to avoid the use of an
+ * unnecessary lock only acquire the mutex if we can block.
+ * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
+ * we can use the same locking function for access to both the queue
+ * and the queue headers as it is the same logic. Assert this behvior.
+ */
+static void qp_lock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_acquire_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * Helper routine that unlocks the queue pair after calling
+ * qp_lock. Respects non-blocking and pinning flags.
+ */
+static void qp_unlock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_release_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * The queue headers may not be mapped at all times. If a queue is
+ * currently not mapped, it will be attempted to do so.
+ */
+static int qp_map_queue_headers(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ bool can_block)
+{
+ int result;
+
+ if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
+ if (can_block)
+ result = qp_host_map_queues(produce_q, consume_q);
+ else
+ result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
+
+ if (result < VMCI_SUCCESS)
+ return (produce_q->saved_header &&
+ consume_q->saved_header) ?
+ VMCI_ERROR_QUEUEPAIR_NOT_READY :
+ VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Helper routine that will retrieve the produce and consume
+ * headers of a given queue pair. If the guest memory of the
+ * queue pair is currently not available, the saved queue headers
+ * will be returned, if these are available.
+ */
+static int qp_get_queue_headers(const struct vmci_qp *qpair,
+ struct vmci_queue_header **produce_q_header,
+ struct vmci_queue_header **consume_q_header)
+{
+ int result;
+
+ result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
+ vmci_can_block(qpair->flags));
+ if (result == VMCI_SUCCESS) {
+ *produce_q_header = qpair->produce_q->q_header;
+ *consume_q_header = qpair->consume_q->q_header;
+ } else if (qpair->produce_q->saved_header &&
+ qpair->consume_q->saved_header) {
+ *produce_q_header = qpair->produce_q->saved_header;
+ *consume_q_header = qpair->consume_q->saved_header;
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Callback from VMCI queue pair broker indicating that a queue
+ * pair that was previously not ready, now either is ready or
+ * gone forever.
+ */
+static int qp_wakeup_cb(void *client_data)
+{
+ struct vmci_qp *qpair = (struct vmci_qp *)client_data;
+
+ qp_lock(qpair);
+ while (qpair->blocked > 0) {
+ qpair->blocked--;
+ qpair->generation++;
+ wake_up(&qpair->event);
+ }
+ qp_unlock(qpair);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Makes the calling thread wait for the queue pair to become
+ * ready for host side access. Returns true when thread is
+ * woken up after queue pair state change, false otherwise.
+ */
+static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
+{
+ unsigned int generation;
+
+ if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
+ return false;
+
+ qpair->blocked++;
+ generation = qpair->generation;
+ qp_unlock(qpair);
+ wait_event(qpair->event, generation != qpair->generation);
+ qp_lock(qpair);
+
+ return true;
+}
+
+/*
+ * Enqueues a given buffer to the produce queue using the provided
+ * function. As many bytes as possible (space available in the queue)
+ * are enqueued. Assumes the queue->mutex has been acquired. Returns
+ * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
+ * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
+ * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
+ * an error occured when accessing the buffer,
+ * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
+ * available. Otherwise, the number of bytes written to the queue is
+ * returned. Updates the tail pointer of the produce queue.
+ */
+static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 produce_q_size,
+ const void *buf,
+ size_t buf_size,
+ vmci_memcpy_to_queue_func memcpy_to_queue,
+ bool can_block)
+{
+ s64 free_space;
+ u64 tail;
+ size_t written;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ free_space = vmci_q_header_free_space(produce_q->q_header,
+ consume_q->q_header,
+ produce_q_size);
+ if (free_space == 0)
+ return VMCI_ERROR_QUEUEPAIR_NOSPACE;
+
+ if (free_space < VMCI_SUCCESS)
+ return (ssize_t) free_space;
+
+ written = (size_t) (free_space > buf_size ? buf_size : free_space);
+ tail = vmci_q_header_producer_tail(produce_q->q_header);
+ if (likely(tail + written < produce_q_size)) {
+ result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+ } else {
+ /* Tail pointer wraps around. */
+
+ const size_t tmp = (size_t) (produce_q_size - tail);
+
+ result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_to_queue(produce_q, 0, buf, tmp,
+ written - tmp);
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ vmci_q_header_add_producer_tail(produce_q->q_header, written,
+ produce_q_size);
+ return written;
+}
+
+/*
+ * Dequeues data (if available) from the given consume queue. Writes data
+ * to the user provided buffer using the provided function.
+ * Assumes the queue->mutex has been acquired.
+ * Results:
+ * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
+ * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
+ * (as defined by the queue size).
+ * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
+ * Otherwise the number of bytes dequeued is returned.
+ * Side effects:
+ * Updates the head pointer of the consume queue.
+ */
+static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 consume_q_size,
+ void *buf,
+ size_t buf_size,
+ vmci_memcpy_from_queue_func memcpy_from_queue,
+ bool update_consumer,
+ bool can_block)
+{
+ s64 buf_ready;
+ u64 head;
+ size_t read;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
+ produce_q->q_header,
+ consume_q_size);
+ if (buf_ready == 0)
+ return VMCI_ERROR_QUEUEPAIR_NODATA;
+
+ if (buf_ready < VMCI_SUCCESS)
+ return (ssize_t) buf_ready;
+
+ read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
+ head = vmci_q_header_consumer_head(produce_q->q_header);
+ if (likely(head + read < consume_q_size)) {
+ result = memcpy_from_queue(buf, 0, consume_q, head, read);
+ } else {
+ /* Head pointer wraps around. */
+
+ const size_t tmp = (size_t) (consume_q_size - head);
+
+ result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_from_queue(buf, tmp, consume_q, 0,
+ read - tmp);
+
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ if (update_consumer)
+ vmci_q_header_add_consumer_head(produce_q->q_header,
+ read, consume_q_size);
+
+ return read;
+}
+
+/*
+ * vmci_qpair_alloc() - Allocates a queue pair.
+ * @qpair: Pointer for the new vmci_qp struct.
+ * @handle: Handle to track the resource.
+ * @produce_qsize: Desired size of the producer queue.
+ * @consume_qsize: Desired size of the consumer queue.
+ * @peer: ContextID of the peer.
+ * @flags: VMCI flags.
+ * @priv_flags: VMCI priviledge flags.
+ *
+ * This is the client interface for allocating the memory for a
+ * vmci_qp structure and then attaching to the underlying
+ * queue. If an error occurs allocating the memory for the
+ * vmci_qp structure no attempt is made to attach. If an
+ * error occurs attaching, then the structure is freed.
+ */
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+ struct vmci_handle *handle,
+ u64 produce_qsize,
+ u64 consume_qsize,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ struct vmci_qp *my_qpair;
+ int retval;
+ struct vmci_handle src = VMCI_INVALID_HANDLE;
+ struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
+ enum vmci_route route;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+
+ /*
+ * Restrict the size of a queuepair. The device already
+ * enforces a limit on the total amount of memory that can be
+ * allocated to queuepairs for a guest. However, we try to
+ * allocate this memory before we make the queuepair
+ * allocation hypercall. On Linux, we allocate each page
+ * separately, which means rather than fail, the guest will
+ * thrash while it tries to allocate, and will become
+ * increasingly unresponsive to the point where it appears to
+ * be hung. So we place a limit on the size of an individual
+ * queuepair here, and leave the device to enforce the
+ * restriction on total queuepair memory. (Note that this
+ * doesn't prevent all cases; a user with only this much
+ * physical memory could still get into trouble.) The error
+ * used by the device is NO_RESOURCES, so use that here too.
+ */
+
+ if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
+ produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ route = vmci_guest_code_active() ?
+ VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
+
+ /* If NONBLOCK or PINNED is set, we better be the guest personality. */
+ if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
+ VMCI_ROUTE_AS_GUEST != route) {
+ pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /*
+ * Limit the size of pinned QPs and check sanity.
+ *
+ * Pinned pages implies non-blocking mode. Mutexes aren't acquired
+ * when the NONBLOCK flag is set in qpair code; and also should not be
+ * acquired when the PINNED flagged is set. Since pinning pages
+ * implies we want speed, it makes no sense not to have NONBLOCK
+ * set if PINNED is set. Hence enforce this implication.
+ */
+ if (vmci_qp_pinned(flags)) {
+ if (vmci_can_block(flags)) {
+ pr_err("Attempted to enable pinning w/o non-blocking");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
+ if (!my_qpair)
+ return VMCI_ERROR_NO_MEM;
+
+ my_qpair->produce_q_size = produce_qsize;
+ my_qpair->consume_q_size = consume_qsize;
+ my_qpair->peer = peer;
+ my_qpair->flags = flags;
+ my_qpair->priv_flags = priv_flags;
+
+ wakeup_cb = NULL;
+ client_data = NULL;
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ my_qpair->guest_endpoint = false;
+ if (!(flags & VMCI_QPFLAG_LOCAL)) {
+ my_qpair->blocked = 0;
+ my_qpair->generation = 0;
+ init_waitqueue_head(&my_qpair->event);
+ wakeup_cb = qp_wakeup_cb;
+ client_data = (void *)my_qpair;
+ }
+ } else {
+ my_qpair->guest_endpoint = true;
+ }
+
+ retval = vmci_qp_alloc(handle,
+ &my_qpair->produce_q,
+ my_qpair->produce_q_size,
+ &my_qpair->consume_q,
+ my_qpair->consume_q_size,
+ my_qpair->peer,
+ my_qpair->flags,
+ my_qpair->priv_flags,
+ my_qpair->guest_endpoint,
+ wakeup_cb, client_data);
+
+ if (retval < VMCI_SUCCESS) {
+ kfree(my_qpair);
+ return retval;
+ }
+
+ *qpair = my_qpair;
+ my_qpair->handle = *handle;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
+
+/*
+ * vmci_qpair_detach() - Detatches the client from a queue pair.
+ * @qpair: Reference of a pointer to the qpair struct.
+ *
+ * This is the client interface for detaching from a VMCIQPair.
+ * Note that this routine will free the memory allocated for the
+ * vmci_qp structure too.
+ */
+int vmci_qpair_detach(struct vmci_qp **qpair)
+{
+ int result;
+ struct vmci_qp *old_qpair;
+
+ if (!qpair || !(*qpair))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ old_qpair = *qpair;
+ result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
+
+ /*
+ * The guest can fail to detach for a number of reasons, and
+ * if it does so, it will cleanup the entry (if there is one).
+ * The host can fail too, but it won't cleanup the entry
+ * immediately, it will do that later when the context is
+ * freed. Either way, we need to release the qpair struct
+ * here; there isn't much the caller can do, and we don't want
+ * to leak.
+ */
+
+ memset(old_qpair, 0, sizeof(*old_qpair));
+ old_qpair->handle = VMCI_INVALID_HANDLE;
+ old_qpair->peer = VMCI_INVALID_ID;
+ kfree(old_qpair);
+ *qpair = NULL;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_detach);
+
+/*
+ * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
+ * @qpair: Pointer to the queue pair struct.
+ * @producer_tail: Reference used for storing producer tail index.
+ * @consumer_head: Reference used for storing the consumer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the producer.
+ */
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+ u64 *producer_tail,
+ u64 *consumer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(produce_q_header, consume_q_header,
+ producer_tail, consumer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
+ (consumer_head && *consumer_head >= qpair->produce_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
+
+/*
+ * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
+ * @qpair: Pointer to the queue pair struct.
+ * @consumer_tail: Reference used for storing consumer tail index.
+ * @producer_head: Reference used for storing the producer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the consumer.
+ */
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+ u64 *consumer_tail,
+ u64 *producer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(consume_q_header, produce_q_header,
+ consumer_tail, producer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
+ (producer_head && *producer_head >= qpair->consume_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
+
+/*
+ * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the producer which is the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
+
+/*
+ * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the consumer which is not the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
+
+/*
+ * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
+ * producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the producer which is not the common case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
+
+/*
+ * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
+ * consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the consumer which is the normal case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
+
+/*
+ * vmci_qpair_enqueue() - Throw data on the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer containing data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * Returns number of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+ const void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ buf, buf_size,
+ qp_memcpy_to_queue,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
+
+/*
+ * vmci_qpair_dequeue() - Get data from the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, true,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
+
+/*
+ * vmci_qpair_peek() - Peek at the data in the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, false,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peek);
+
+/*
+ * vmci_qpair_enquev() - Throw data on the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer containing data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ iov, iov_size,
+ qp_memcpy_to_queue_iov,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
+
+/*
+ * vmci_qpair_dequev() - Get data from the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ true, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
+
+/*
+ * vmci_qpair_peekv() - Peek at the data in the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes peeked or < 0 on error.
+ */
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ false, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644
index 000000000000..58c6959f6b6d
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -0,0 +1,191 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_QUEUE_PAIR_H_
+#define _VMCI_QUEUE_PAIR_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_event_release_cb) (void *client_data);
+
+/* Guest device port I/O. */
+struct ppn_set {
+ u64 num_produce_pages;
+ u64 num_consume_pages;
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ bool initialized;
+};
+
+/* VMCIqueue_pairAllocInfo */
+struct vmci_qp_alloc_info {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 ppn_va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ s32 result;
+ u32 version;
+};
+
+/* VMCIqueue_pairSetVAInfo */
+struct vmci_qp_set_va_info {
+ struct vmci_handle handle;
+ u64 va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ u32 version;
+ s32 result;
+};
+
+/*
+ * For backwards compatibility, here is a version of the
+ * VMCIqueue_pairPageFileInfo before host support end-points was added.
+ * Note that the current version of that structure requires VMX to
+ * pass down the VA of the mapped file. Before host support was added
+ * there was nothing of the sort. So, when the driver sees the ioctl
+ * with a parameter that is the sizeof
+ * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
+ * of VMX running can't attach to host end points because it doesn't
+ * provide the VA of the mapped files.
+ *
+ * The Linux driver doesn't get an indication of the size of the
+ * structure passed down from user space. So, to fix a long standing
+ * but unfiled bug, the _pad field has been renamed to version.
+ * Existing versions of VMX always initialize the PageFileInfo
+ * structure so that _pad, er, version is set to 0.
+ *
+ * A version value of 1 indicates that the size of the structure has
+ * been increased to include two UVA's: produce_uva and consume_uva.
+ * These UVA's are of the mmap()'d queue contents backing files.
+ *
+ * In addition, if when VMX is sending down the
+ * VMCIqueue_pairPageFileInfo structure it gets an error then it will
+ * try again with the _NoHostQP version of the file to see if an older
+ * VMCI kernel module is running.
+ */
+
+/* VMCIqueue_pairPageFileInfo */
+struct vmci_qp_page_file_info {
+ struct vmci_handle handle;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 version; /* Was _pad. */
+ u64 produce_va; /* User VA of the mapped file. */
+ u64 consume_va; /* User VA of the mapped file. */
+};
+
+/* vmci queuepair detach info */
+struct vmci_qp_dtch_info {
+ struct vmci_handle handle;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * struct vmci_qp_page_store describes how the memory of a given queue pair
+ * is backed. When the queue pair is between the host and a guest, the
+ * page store consists of references to the guest pages. On vmkernel,
+ * this is a list of PPNs, and on hosted, it is a user VA where the
+ * queue pair is mapped into the VMX address space.
+ */
+struct vmci_qp_page_store {
+ /* Reference to pages backing the queue pair. */
+ u64 pages;
+ /* Length of pageList/virtual addres range (in pages). */
+ u32 len;
+};
+
+/*
+ * This data type contains the information about a queue.
+ * There are two queues (hence, queue pairs) per transaction model between a
+ * pair of end points, A & B. One queue is used by end point A to transmit
+ * commands and responses to B. The other queue is used by B to transmit
+ * commands and responses.
+ *
+ * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains
+ * either a direct pointer to the linear address of the buffer contents or a
+ * pointer to structures which help the OS locate those data pages. See
+ * vmciKernelIf.c for each platform for its definition.
+ */
+struct vmci_queue {
+ struct vmci_queue_header *q_header;
+ struct vmci_queue_header *saved_header;
+ struct vmci_queue_kern_if *kernel_if;
+};
+
+/*
+ * Utility function that checks whether the fields of the page
+ * store contain valid values.
+ * Result:
+ * true if the page store is wellformed. false otherwise.
+ */
+static inline bool
+VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
+{
+ return page_store->len >= 2;
+}
+
+/*
+ * Helper function to check if the non-blocking flag
+ * is set for a given queue pair.
+ */
+static inline bool vmci_can_block(u32 flags)
+{
+ return !(flags & VMCI_QPFLAG_NONBLOCK);
+}
+
+/*
+ * Helper function to check if the queue pair is pinned
+ * into memory.
+ */
+static inline bool vmci_qp_pinned(u32 flags)
+{
+ return flags & VMCI_QPFLAG_PINNED;
+}
+
+void vmci_qp_broker_exit(void);
+int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
+ u32 flags, u32 priv_flags,
+ u64 produce_size, u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context);
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva, u64 consume_uva,
+ struct vmci_ctx *context);
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
+
+void vmci_qp_guest_endpoints_exit(void);
+
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q, u64 produce_size,
+ struct vmci_queue **consume_q, u64 consume_size,
+ u32 peer, u32 flags, u32 priv_flags,
+ bool guest_endpoint, vmci_event_release_cb wakeup_cb,
+ void *client_data);
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context, u64 guest_mem);
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context, u32 gid);
+
+#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644
index 000000000000..a196f84a4fd2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -0,0 +1,229 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/rculist.h>
+
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+
+
+#define VMCI_RESOURCE_HASH_BITS 7
+#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
+
+struct vmci_hash_table {
+ spinlock_t lock;
+ struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
+};
+
+static struct vmci_hash_table vmci_resource_table = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
+};
+
+static unsigned int vmci_resource_hash(struct vmci_handle handle)
+{
+ return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
+}
+
+/*
+ * Gets a resource (if one exists) matching given handle from the hash table.
+ */
+static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
+ enum vmci_resource_type type)
+{
+ struct vmci_resource *r, *resource = NULL;
+ struct hlist_node *node;
+ unsigned int idx = vmci_resource_hash(handle);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(r, node,
+ &vmci_resource_table.entries[idx], node) {
+ u32 cid = r->handle.context;
+ u32 rid = r->handle.resource;
+
+ if (r->type == type &&
+ rid == handle.resource &&
+ (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ resource = r;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Find an unused resource ID and return it. The first
+ * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
+ * its value + 1.
+ * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
+ */
+static u32 vmci_resource_find_id(u32 context_id,
+ enum vmci_resource_type resource_type)
+{
+ static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ u32 old_rid = resource_id;
+ u32 current_rid;
+
+ /*
+ * Generate a unique resource ID. Keep on trying until we wrap around
+ * in the RID space.
+ */
+ do {
+ struct vmci_handle handle;
+
+ current_rid = resource_id;
+ resource_id++;
+ if (unlikely(resource_id == VMCI_INVALID_ID)) {
+ /* Skip the reserved rids. */
+ resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ }
+
+ handle = vmci_make_handle(context_id, current_rid);
+ if (!vmci_resource_lookup(handle, resource_type))
+ return current_rid;
+ } while (resource_id != old_rid);
+
+ return VMCI_INVALID_ID;
+}
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle)
+
+{
+ unsigned int idx;
+ int result;
+
+ spin_lock(&vmci_resource_table.lock);
+
+ if (handle.resource == VMCI_INVALID_ID) {
+ handle.resource = vmci_resource_find_id(handle.context,
+ resource_type);
+ if (handle.resource == VMCI_INVALID_ID) {
+ result = VMCI_ERROR_NO_HANDLE;
+ goto out;
+ }
+ } else if (vmci_resource_lookup(handle, resource_type)) {
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto out;
+ }
+
+ resource->handle = handle;
+ resource->type = resource_type;
+ INIT_HLIST_NODE(&resource->node);
+ kref_init(&resource->kref);
+ init_completion(&resource->done);
+
+ idx = vmci_resource_hash(resource->handle);
+ hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
+
+ result = VMCI_SUCCESS;
+
+out:
+ spin_unlock(&vmci_resource_table.lock);
+ return result;
+}
+
+void vmci_resource_remove(struct vmci_resource *resource)
+{
+ struct vmci_handle handle = resource->handle;
+ unsigned int idx = vmci_resource_hash(handle);
+ struct vmci_resource *r;
+ struct hlist_node *node;
+
+ /* Remove resource from hash table. */
+ spin_lock(&vmci_resource_table.lock);
+
+ hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+ if (vmci_handle_is_equal(r->handle, resource->handle)) {
+ hlist_del_init_rcu(&r->node);
+ break;
+ }
+ }
+
+ spin_unlock(&vmci_resource_table.lock);
+ synchronize_rcu();
+
+ vmci_resource_put(resource);
+ wait_for_completion(&resource->done);
+}
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type)
+{
+ struct vmci_resource *r, *resource = NULL;
+
+ rcu_read_lock();
+
+ r = vmci_resource_lookup(resource_handle, resource_type);
+ if (r &&
+ (resource_type == r->type ||
+ resource_type == VMCI_RESOURCE_TYPE_ANY)) {
+ resource = vmci_resource_get(r);
+ }
+
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Get a reference to given resource.
+ */
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
+{
+ kref_get(&resource->kref);
+
+ return resource;
+}
+
+static void vmci_release_resource(struct kref *kref)
+{
+ struct vmci_resource *resource =
+ container_of(kref, struct vmci_resource, kref);
+
+ /* Verify the resource has been unlinked from hash table */
+ WARN_ON(!hlist_unhashed(&resource->node));
+
+ /* Signal that container of this resource can now be destroyed */
+ complete(&resource->done);
+}
+
+/*
+ * Resource's release function will get called if last reference.
+ * If it is the last reference, then we are sure that nobody else
+ * can increment the count again (it's gone from the resource hash
+ * table), so there's no need for locking here.
+ */
+int vmci_resource_put(struct vmci_resource *resource)
+{
+ /*
+ * We propagate the information back to caller in case it wants to know
+ * whether entry was freed.
+ */
+ return kref_put(&resource->kref, vmci_release_resource) ?
+ VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
+}
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
+{
+ return resource->handle;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644
index 000000000000..9190cd298bee
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.h
@@ -0,0 +1,59 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_RESOURCE_H_
+#define _VMCI_RESOURCE_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+
+enum vmci_resource_type {
+ VMCI_RESOURCE_TYPE_ANY,
+ VMCI_RESOURCE_TYPE_API,
+ VMCI_RESOURCE_TYPE_GROUP,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST
+};
+
+struct vmci_resource {
+ struct vmci_handle handle;
+ enum vmci_resource_type type;
+ struct hlist_node node;
+ struct kref kref;
+ struct completion done;
+};
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle);
+
+void vmci_resource_remove(struct vmci_resource *resource);
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type);
+
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
+int vmci_resource_put(struct vmci_resource *resource);
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
+
+#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644
index 000000000000..91090658b929
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.c
@@ -0,0 +1,226 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+/*
+ * Make a routing decision for the given source and destination handles.
+ * This will try to determine the route using the handles and the available
+ * devices. Will set the source context if it is invalid.
+ */
+int vmci_route(struct vmci_handle *src,
+ const struct vmci_handle *dst,
+ bool from_guest,
+ enum vmci_route *route)
+{
+ bool has_host_device = vmci_host_code_active();
+ bool has_guest_device = vmci_guest_code_active();
+
+ *route = VMCI_ROUTE_NONE;
+
+ /*
+ * "from_guest" is only ever set to true by
+ * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
+ * which comes from the VMX, so we know it is coming from a
+ * guest.
+ *
+ * To avoid inconsistencies, test these once. We will test
+ * them again when we do the actual send to ensure that we do
+ * not touch a non-existent device.
+ */
+
+ /* Must have a valid destination context. */
+ if (VMCI_INVALID_ID == dst->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Anywhere to hypervisor. */
+ if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
+
+ /*
+ * If this message already came from a guest then we
+ * cannot send it to the hypervisor. It must come
+ * from a local client.
+ */
+ if (from_guest)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * We must be acting as a guest in order to send to
+ * the hypervisor.
+ */
+ if (!has_guest_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ /* And we cannot send if the source is the host context. */
+ if (VMCI_HOST_CONTEXT_ID == src->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * If the client passed the ANON source handle then
+ * respect it (both context and resource are invalid).
+ * However, if they passed only an invalid context,
+ * then they probably mean ANY, in which case we
+ * should set the real context here before passing it
+ * down.
+ */
+ if (VMCI_INVALID_ID == src->context &&
+ VMCI_INVALID_ID != src->resource)
+ src->context = vmci_get_context_id();
+
+ /* Send from local client down to the hypervisor. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /* Anywhere to local client on host. */
+ if (VMCI_HOST_CONTEXT_ID == dst->context) {
+ /*
+ * If it is not from a guest but we are acting as a
+ * guest, then we need to send it down to the host.
+ * Note that if we are also acting as a host then this
+ * will prevent us from sending from local client to
+ * local client, but we accept that restriction as a
+ * way to remove any ambiguity from the host context.
+ */
+ if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * If the hypervisor is the source, this is
+ * host local communication. The hypervisor
+ * may send vmci event datagrams to the host
+ * itself, but it will never send datagrams to
+ * an "outer host" through the guest device.
+ */
+
+ if (has_host_device) {
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else {
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+ }
+
+ if (!from_guest && has_guest_device) {
+ /* If no source context then use the current. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /* Send it from local client down to the host. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * Otherwise we already received it from a guest and
+ * it is destined for a local client on this host, or
+ * it is from another local client on this host. We
+ * must be acting as a host to service it.
+ */
+ if (!has_host_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it must have a
+ * valid context. Otherwise we can use the
+ * host context.
+ */
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ }
+
+ /* Route to local client. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * If we are acting as a host then this might be destined for
+ * a guest.
+ */
+ if (has_host_device) {
+ /* It will have a context if it is meant for a guest. */
+ if (vmci_ctx_exists(dst->context)) {
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it
+ * must have a valid context.
+ * Otherwise we can use the host
+ * context.
+ */
+
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ } else if (VMCI_CONTEXT_IS_VM(src->context) &&
+ src->context != dst->context) {
+ /*
+ * VM to VM communication is not
+ * allowed. Since we catch all
+ * communication destined for the host
+ * above, this must be destined for a
+ * VM since there is a valid context.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+
+ /* Pass it up to the guest. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else if (!has_guest_device) {
+ /*
+ * The host is attempting to reach a CID
+ * without an active context, and we can't
+ * send it down, since we have no guest
+ * device.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /*
+ * We must be a guest trying to send to another guest, which means
+ * we need to send it down to the host. We do not filter out VM to
+ * VM communication here, since we want to be able to use the guest
+ * driver on older versions that do support VM to VM communication.
+ */
+ if (!has_guest_device) {
+ /*
+ * Ending up here means we have neither guest nor host
+ * device.
+ */
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+
+ /* If no source context then use the current context. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /*
+ * Send it from local client down to the host, which will
+ * route it to the other guest for us.
+ */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644
index 000000000000..3b30e82419c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.h
@@ -0,0 +1,30 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_ROUTE_H_
+#define _VMCI_ROUTE_H_
+
+#include <linux/vmw_vmci_defs.h>
+
+enum vmci_route {
+ VMCI_ROUTE_NONE,
+ VMCI_ROUTE_AS_HOST,
+ VMCI_ROUTE_AS_GUEST,
+};
+
+int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
+ bool from_guest, enum vmci_route *route);
+
+#endif /* _VMCI_ROUTE_H_ */
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924..5562308699bc 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -52,6 +52,7 @@ config MMC_BLOCK_BOUNCE
config SDIO_UART
tristate "SDIO UART/GPS class support"
+ depends on TTY
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index bd57a11acc79..c931dfe6a59c 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -381,7 +381,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
unsigned int ch, flag;
int max_count = 256;
@@ -418,23 +417,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
- if (tty)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+
+ tty_flip_buffer_push(&port->port);
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517f..269d072ef55e 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -18,8 +18,7 @@ config MMC_UNSAFE_RESUME
module parameter "removable=0" or "removable=1".
config MMC_CLKGATE
- bool "MMC host clock gating (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "MMC host clock gating"
help
This will attempt to aggressively gate the clock to the MMC card.
This is done to save power due to gating off the logic and bus
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8d13c6594520..3be8b94d7914 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -69,7 +69,7 @@ config MMC_SDHCI_PCI
If unsure, say N.
config MMC_RICOH_MMC
- bool "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
+ bool "Ricoh MMC Controller Disabler"
depends on MMC_SDHCI_PCI
help
This adds a pci quirk to disable Ricoh MMC Controller. This
@@ -186,9 +186,6 @@ config MMC_SDHCI_S3C
often referrered to as the HSMMC block in some of the Samsung S3C
range of SoC.
- Note, due to the problems with DMA, the DMA support is only
- available with CONFIG_EXPERIMENTAL is selected.
-
If you have a controller with this interface, say Y or M here.
If unsure, say N.
@@ -233,7 +230,7 @@ config MMC_SDHCI_SPEAR
config MMC_SDHCI_S3C_DMA
bool "DMA support on S3C SDHCI"
- depends on MMC_SDHCI_S3C && EXPERIMENTAL
+ depends on MMC_SDHCI_S3C
help
Enable DMA support on the Samsung S3C SDHCI glue. The DMA
has proved to be problematic if the controller encounters
@@ -330,8 +327,8 @@ config MMC_MXS
If unsure, say N.
config MMC_TIFM_SD
- tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "TI Flash Media MMC/SD Interface support"
+ depends on PCI
select TIFM_CORE
help
Say Y here if you want to be able to access MMC/SD cards with
@@ -410,8 +407,7 @@ config MMC_S3C_PIO
the S3C MCI driver.
config MMC_S3C_DMA
- bool "Use DMA transfers only (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Use DMA transfers only"
help
Use DMA to transfer data between memory and the hardare.
@@ -420,7 +416,7 @@ config MMC_S3C_DMA
option is useful.
config MMC_S3C_PIODMA
- bool "Support for both PIO and DMA (EXPERIMENTAL)"
+ bool "Support for both PIO and DMA"
help
Compile both the PIO and DMA transfer routines into the
driver and let the platform select at run-time which one
@@ -431,8 +427,8 @@ config MMC_S3C_PIODMA
endchoice
config MMC_SDRICOH_CS
- tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI && PCMCIA
+ tristate "MMC/SD driver for Ricoh Bay1Controllers"
+ depends on PCI && PCMCIA
help
Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
card whenever you insert a MMC or SD card into the card slot.
@@ -461,7 +457,7 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b648058d7182..e4e218c930bd 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -49,6 +49,8 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index 8ee0f74f9374..083fcd29c9c6 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -134,7 +134,7 @@ static struct pci_driver dw_mci_pci_driver = {
.name = "dw_mmc_pci",
.id_table = dw_mci_pci_id,
.probe = dw_mci_pci_probe,
- .remove = __devexit_p(dw_mci_pci_remove),
+ .remove = dw_mci_pci_remove,
.driver = {
.pm = &dw_mci_pci_pmops
},
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 222036c9e053..41c27b74b003 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -10,6 +10,7 @@
* (at your option) any later version.
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -46,9 +47,9 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->dev = &pdev->dev;
host->irq_flags = 0;
host->pdata = pdev->dev.platform_data;
- host->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!host->regs)
- return -ENOMEM;
+ host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
@@ -120,7 +121,7 @@ MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
static struct platform_driver dw_mci_pltfm_driver = {
.probe = dw_mci_pltfm_probe,
- .remove = __devexit_p(dw_mci_pltfm_remove),
+ .remove = dw_mci_pltfm_remove,
.driver = {
.name = "dw_mmc",
.of_match_table = of_match_ptr(dw_mci_pltfm_match),
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 150772395cc6..372e921389c8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
@@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
* @pwrreg_powerup: power up value for MMCIPOWER register
* @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
*/
struct variant_data {
unsigned int clkreg;
@@ -71,6 +73,7 @@ struct variant_data {
bool blksz_datactrl16;
u32 pwrreg_powerup;
bool signal_direction;
+ bool pwrreg_clkgate;
};
static struct variant_data variant_arm = {
@@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
.pwrreg_powerup = MCI_PWR_UP,
};
+static struct variant_data variant_arm_extended_fifo_hwfc = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .clkreg_enable = MCI_ARM_HWFCEN,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+};
+
static struct variant_data variant_u300 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
@@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
.sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_nomadik = {
@@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_ux500 = {
@@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_ux500v2 = {
@@ -131,9 +145,28 @@ static struct variant_data variant_ux500v2 = {
.blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
* This must be called with host->lock held
*/
static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ clk |= MCI_ST_UX500_NEG_EDGE;
+
mmci_write_clkreg(host, clk);
}
@@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ host->data->host_cookie = 0;
+}
+
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
- struct dma_chan *chan = host->dma_current;
+ struct dma_chan *chan;
enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
u32 status;
int i;
@@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- dmaengine_terminate_all(chan);
+ mmci_dma_data_error(host);
if (!data->error)
data->error = -EIO;
}
- if (data->flags & MMC_DATA_WRITE) {
- dir = DMA_TO_DEVICE;
- } else {
- dir = DMA_FROM_DEVICE;
- }
-
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmci_dma_unmap(host, data);
/*
* Use of DMA with scatter-gather is impossible.
@@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
-}
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
}
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
- struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
enum dma_data_direction buffer_dirn;
int nr_sg;
- /* Check if next job is already prepared */
- if (data->host_cookie && !next &&
- host->dma_current && host->dma_desc_current)
- return 0;
-
- if (!next) {
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- }
-
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
@@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!desc)
goto unmap_exit;
- if (next) {
- next->dma_chan = chan;
- next->dma_desc = desc;
- } else {
- host->dma_current = chan;
- host->dma_desc_current = desc;
- }
+ *dma_chan = chan;
+ *dma_desc = desc;
return 0;
unmap_exit:
- if (!next)
- dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
+static inline int mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
int ret;
struct mmc_data *data = host->data;
- ret = mmci_dma_prep_data(host, host->data, NULL);
+ ret = mmci_dma_prep_data(host, host->data);
if (ret)
return ret;
@@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_host_next *next = &host->next_data;
- if (data->host_cookie && data->host_cookie != next->cookie) {
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
- " host->next_data.cookie %d\n",
- __func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
- }
-
- if (!data->host_cookie)
- return;
+ WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
+ WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
host->dma_desc_current = next->dma_desc;
host->dma_current = next->dma_chan;
-
next->dma_desc = NULL;
next->dma_chan = NULL;
}
@@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
return;
- }
- /* if config for dma */
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
- if (mmci_dma_prep_data(host, data, nd))
- data->host_cookie = 0;
- else
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
- }
+ if (!mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- struct dma_chan *chan;
- enum dma_data_direction dir;
- if (!data)
+ if (!data || !data->host_cookie)
return;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
- chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
- chan = host->dma_tx_channel;
- }
+ mmci_dma_unmap(host, data);
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
- /* if config for dma */
- if (chan) {
- if (err)
- dmaengine_terminate_all(chan);
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dir);
- mrq->data->host_cookie = 0;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
}
@@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
+static inline void mmci_dma_finalize(struct mmci_host *host,
+ struct mmc_data *data)
+{
+}
+
static inline void mmci_dma_data_error(struct mmci_host *host)
{
}
@@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
mmci_write_clkreg(host, clk);
}
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ datactrl |= MCI_ST_DPSM_DDRMODE;
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
@@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, data);
+ }
/*
* Calculate how far we are into the transfer. Note that
@@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
- mmci_dma_unmap(host, data);
+ mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
@@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if (!cmd->data || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, host->data);
+ }
mmci_stop_data(host);
}
mmci_request_end(host, cmd->mrq);
@@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
- int ret;
pm_runtime_get_sync(mmc_dev(mmc));
@@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if (host->vcc)
- ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break;
case MMC_POWER_UP:
- if (host->vcc) {
- ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
- if (ret) {
- dev_err(mmc_dev(mmc), "unable to set OCR\n");
- /*
- * The .set_ios() function in the mmc_host_ops
- * struct return void, and failing to set the
- * power should be rare so we print an error
- * and return here.
- */
- goto out;
- }
- }
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
/*
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
* and instead uses MCI_PWR_ON so apply whatever value is
@@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+ /*
+ * If clock = 0 and the variant requires the MMCIPOWER to be used for
+ * gating the clock, the MCI_PWR_ON bit is cleared.
+ */
+ if (!ios->clock && variant->pwrreg_clkgate)
+ pwr &= ~MCI_PWR_ON;
+
spin_lock_irqsave(&host->lock, flags);
mmci_set_clkreg(host, ios->clock);
@@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
- out:
pm_runtime_mark_last_busy(mmc_dev(mmc));
pm_runtime_put_autosuspend(mmc_dev(mmc));
}
@@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
} else
dev_warn(&dev->dev, "could not get default pinstate\n");
-#ifdef CONFIG_REGULATOR
- /* If we're using the regulator framework, try to fetch a regulator */
- host->vcc = regulator_get(&dev->dev, "vmmc");
- if (IS_ERR(host->vcc))
- host->vcc = NULL;
- else {
- int mask = mmc_regulator_get_ocrmask(host->vcc);
-
- if (mask < 0)
- dev_err(&dev->dev, "error getting OCR mask (%d)\n",
- mask);
- else {
- host->mmc->ocr_avail = (u32) mask;
- if (plat->ocr_mask)
- dev_warn(&dev->dev,
- "Provided ocr_mask/setpower will not be used "
- "(using regulator instead)\n");
- }
- }
-#endif
- /* Fall back to platform data if no regulator is found */
- if (host->vcc == NULL)
+ /* Get regulators and the supported OCR mask */
+ mmc_regulator_get_supply(mmc);
+ if (!mmc->ocr_avail)
mmc->ocr_avail = plat->ocr_mask;
+ else if (plat->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+
mmc->caps = plat->capabilities;
mmc->caps2 = plat->capabilities2;
+ /* We support these PM capabilities. */
+ mmc->pm_caps = MMC_PM_KEEP_POWER;
+
/*
* We can do SGIO
*/
@@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
clk_disable_unprepare(host->clk);
clk_put(host->clk);
- if (host->vcc)
- mmc_regulator_set_ocr(mmc, host->vcc, 0);
- regulator_put(host->vcc);
-
mmc_free_host(mmc);
amba_release_regions(dev);
@@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_disable_unprepare(host->clk);
+ }
+
+ return 0;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_prepare_enable(host->clk);
+ }
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops mmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
+ SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static struct amba_id mmci_ids[] = {
@@ -1652,6 +1703,11 @@ static struct amba_id mmci_ids[] = {
.data = &variant_arm_extended_fifo,
},
{
+ .id = 0x02041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo_hwfc,
+ },
+ {
.id = 0x00041181,
.mask = 0x000fffff,
.data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d34d8c0add8e..1f33ad5333a0 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -28,6 +28,8 @@
#define MCI_ST_UX500_NEG_EDGE (1 << 13)
#define MCI_ST_UX500_HWFCEN (1 << 14)
#define MCI_ST_UX500_CLK_INV (1 << 15)
+/* Modified PL180 on Versatile Express platform */
+#define MCI_ARM_HWFCEN (1 << 12)
#define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c
@@ -193,7 +195,6 @@ struct mmci_host {
/* pio stuff */
struct sg_mapping_iter sg_miter;
unsigned int size;
- struct regulator *vcc;
/* pinctrl handles */
struct pinctrl *pinctrl;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index de4c20b3936c..f8dd36102949 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -50,8 +50,6 @@ struct mvsd_host {
struct timer_list timer;
struct mmc_host *mmc;
struct device *dev;
- struct resource *res;
- int irq;
struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (!r || irq < 0 || !mvsd_data)
return -ENXIO;
- r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
- host->res = r;
host->base_clock = mvsd_data->clock / 2;
+ host->clk = ERR_PTR(-EINVAL);
mmc->ops = &mvsd_ops;
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = ioremap(r->start, SZ_4K);
+ host->base = devm_request_and_ioremap(&pdev->dev, r);
if (!host->base) {
ret = -ENOMEM;
goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
mvsd_power_down(host);
- ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
goto out;
- } else
- host->irq = irq;
+ }
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
- host->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(host->clk)) {
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
- }
if (mvsd_data->gpio_card_detect) {
- ret = gpio_request(mvsd_data->gpio_card_detect,
- DRIVER_NAME " cd");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_card_detect,
+ GPIOF_IN, DRIVER_NAME " cd");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_card_detect);
irq = gpio_to_irq(mvsd_data->gpio_card_detect);
- ret = request_irq(irq, mvsd_card_detect_irq,
- IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING,
- DRIVER_NAME " cd", host);
+ ret = devm_request_irq(&pdev->dev, irq,
+ mvsd_card_detect_irq,
+ IRQ_TYPE_EDGE_RISING |
+ IRQ_TYPE_EDGE_FALLING,
+ DRIVER_NAME " cd", host);
if (ret == 0)
host->gpio_card_detect =
mvsd_data->gpio_card_detect;
else
- gpio_free(mvsd_data->gpio_card_detect);
+ devm_gpio_free(&pdev->dev,
+ mvsd_data->gpio_card_detect);
}
}
if (!host->gpio_card_detect)
mmc->caps |= MMC_CAP_NEEDS_POLL;
if (mvsd_data->gpio_write_protect) {
- ret = gpio_request(mvsd_data->gpio_write_protect,
- DRIVER_NAME " wp");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_write_protect,
+ GPIOF_IN, DRIVER_NAME " wp");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_write_protect);
host->gpio_write_protect =
mvsd_data->gpio_write_protect;
}
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
return 0;
out:
- if (host) {
- if (host->irq)
- free_irq(host->irq, host);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- if (host->base)
- iounmap(host->base);
- }
- if (r)
- release_resource(r);
- if (mmc)
- if (!IS_ERR_OR_NULL(host->clk)) {
+ if (mmc) {
+ if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
mmc_free_host(mmc);
+ }
return ret;
}
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
- if (mmc) {
- struct mvsd_host *host = mmc_priv(mmc);
+ struct mvsd_host *host = mmc_priv(mmc);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- mmc_remove_host(mmc);
- free_irq(host->irq, host);
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- del_timer_sync(&host->timer);
- mvsd_power_down(host);
- iounmap(host->base);
- release_resource(host->res);
+ mmc_remove_host(mmc);
+ del_timer_sync(&host->timer);
+ mvsd_power_down(host);
+
+ if (!IS_ERR(host->clk))
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
- if (!IS_ERR(host->clk)) {
- clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
- mmc_free_host(mmc);
- }
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 206fe499ded5..5b665551a6f3 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -614,9 +614,9 @@ static int mxs_mmc_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
ssp = &host->ssp;
ssp->dev = &pdev->dev;
- ssp->base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!ssp->base) {
- ret = -EADDRNOTAVAIL;
+ ssp->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(ssp->base)) {
+ ret = PTR_ERR(ssp->base);
goto out_mmc_free;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 12eff6f8cab7..f74b5adca642 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -21,6 +21,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -382,8 +383,6 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
0xFF, (u8)data->blocks);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
0xFF, (u8)(data->blocks >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
DMA_DONE_INT, DMA_DONE_INT);
@@ -407,6 +406,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
trans_mode | SD_TRANSFER_START);
rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
@@ -1060,26 +1060,6 @@ static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
return 0;
}
-static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err;
-
- if (voltage == SD_IO_3V3) {
- err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
- if (err < 0)
- return err;
- } else if (voltage == SD_IO_1V8) {
- err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
- if (err < 0)
- return err;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -1098,11 +1078,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
rtsx_pci_start_run(pcr);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- voltage = SD_IO_3V3;
+ voltage = OUTPUT_3V3;
else
- voltage = SD_IO_1V8;
+ voltage = OUTPUT_1V8;
- if (voltage == SD_IO_1V8) {
+ if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
if (err < 0)
@@ -1113,11 +1093,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
goto out;
}
- err = sd_change_bank_voltage(host, voltage);
+ err = rtsx_pci_switch_output_voltage(pcr, voltage);
if (err < 0)
goto out;
- if (voltage == SD_IO_1V8) {
+ if (voltage == OUTPUT_1V8) {
err = sd_wait_voltage_stable_2(host);
if (err < 0)
goto out;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 12b0a78497f6..2592dddbd965 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -111,7 +111,7 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
return NULL;
}
-static int __devinit sdhci_acpi_probe(struct platform_device *pdev)
+static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
@@ -214,7 +214,7 @@ err_free:
return err;
}
-static int __devexit sdhci_acpi_remove(struct platform_device *pdev)
+static int sdhci_acpi_remove(struct platform_device *pdev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -302,7 +302,7 @@ static struct platform_driver sdhci_acpi_driver = {
.pm = &sdhci_acpi_pm_ops,
},
.probe = sdhci_acpi_probe,
- .remove = __devexit_p(sdhci_acpi_remove),
+ .remove = sdhci_acpi_remove,
};
module_platform_driver(sdhci_acpi_driver);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 82a8de148a8f..a0c621421ee8 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -651,10 +651,9 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->ioaddr) {
- dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
}
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 5ba4605e4f80..154f0e8e931c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -766,7 +766,7 @@ static struct of_device_id wmt_mci_dt_ids[] = {
{ /* Sentinel */ },
};
-static int __devinit wmt_mci_probe(struct platform_device *pdev)
+static int wmt_mci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
@@ -892,7 +892,7 @@ fail1:
return ret;
}
-static int __devexit wmt_mci_remove(struct platform_device *pdev)
+static int wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 73fcbbeb78d0..03f2eb5627ec 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -291,7 +291,7 @@ config SSFDC
config SM_FTL
tristate "SmartMedia/xD new translation layer"
- depends on EXPERIMENTAL && BLOCK
+ depends on BLOCK
select MTD_BLKDEVS
select MTD_NAND_ECC
help
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 945393129952..7c057a05adb6 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,19 +26,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
-#include <linux/magic.h>
#include <linux/module.h>
+#include <uapi/linux/magic.h>
+
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-#ifndef SQUASHFS_MAGIC
-#define SQUASHFS_MAGIC 0x73717368
-#endif
-
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 63d2a64331f7..6eeb84c81bc2 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -37,8 +37,7 @@
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
-#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
@@ -79,7 +78,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
unsigned int rootfsaddr, kerneladdr, spareaddr;
unsigned int rootfslen, kernellen, sparelen, totallen;
unsigned int cfelen, nvramlen;
- int namelen = 0;
+ unsigned int cfe_erasesize;
int i;
u32 computed_crc;
bool rootfs_first = false;
@@ -87,8 +86,11 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
if (bcm63xx_detect_cfe(master))
return -EINVAL;
- cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
- nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM63XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = cfe_erasesize;
/* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
@@ -121,7 +123,6 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
spareaddr = roundup(totallen, master->erasesize) + cfelen;
- sparelen = master->size - spareaddr - nvramlen;
if (rootfsaddr < kerneladdr) {
/* default Broadcom layout */
@@ -139,19 +140,15 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
rootfslen = 0;
rootfsaddr = 0;
spareaddr = cfelen;
- sparelen = master->size - cfelen - nvramlen;
}
+ sparelen = master->size - spareaddr - nvramlen;
/* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
+ if (rootfslen > 0)
nrparts++;
- namelen += 6;
- }
- if (kernellen > 0) {
+
+ if (kernellen > 0)
nrparts++;
- namelen += 6;
- }
/* Ask kernel for more memory */
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
@@ -193,17 +190,16 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
+ curpart++;
/* Global partition "linux" to make easy firmware upgrade */
- curpart++;
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
for (i = 0; i < nrparts; i++)
- pr_info("Partition %d is %s offset %lx and length %lx\n", i,
- parts[i].name, (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
pr_info("Spare partition is offset %x and length %x\n", spareaddr,
sparelen);
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index e469b01d40d2..c219e3d098d9 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -225,7 +225,7 @@ config MTD_ABSENT
config MTD_XIP
bool "XIP aware MTD support"
- depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
+ depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP
default y if XIP_KERNEL
help
This allows MTD support to work with flash memory which is also
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 5ff5c4a16943..b86197286f24 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1536,8 +1536,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1);
}
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index aed1b8a63c9f..c533f27d863f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -56,8 +56,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING UINT_MAX
-#define OFFSET_CONTINUOUS UINT_MAX
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -89,7 +89,7 @@ static struct mtd_partition * newpart(char *s,
int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size, offset = OFFSET_CONTINUOUS;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -104,7 +104,8 @@ static struct mtd_partition * newpart(char *s,
} else {
size = memparse(s, &s);
if (size < PAGE_SIZE) {
- printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ printk(KERN_ERR ERRP "partition size too small (%llx)\n",
+ size);
return ERR_PTR(-EINVAL);
}
}
@@ -296,7 +297,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
- unsigned long offset;
+ unsigned long long offset;
int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
@@ -308,48 +309,52 @@ static int parse_cmdline_partitions(struct mtd_info *master,
return err;
}
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
for (part = partitions; part; part = part->next) {
- if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) {
- for (i = 0, offset = 0; i < part->num_parts; i++) {
- if (part->parts[i].offset == OFFSET_CONTINUOUS)
- part->parts[i].offset = offset;
- else
- offset = part->parts[i].offset;
-
- if (part->parts[i].size == SIZE_REMAINING)
- part->parts[i].size = master->size - offset;
-
- if (part->parts[i].size == 0) {
- printk(KERN_WARNING ERRP
- "%s: skipping zero sized partition\n",
- part->mtd_id);
- part->num_parts--;
- memmove(&part->parts[i],
- &part->parts[i + 1],
- sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
- }
- offset += part->parts[i].size;
- }
-
- *pparts = kmemdup(part->parts,
- sizeof(*part->parts) * part->num_parts,
- GFP_KERNEL);
- if (!*pparts)
- return -ENOMEM;
-
- return part->num_parts;
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ continue;
}
+
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
}
- return 0;
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 27f80cd8aef3..12311f506ca1 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -52,7 +52,7 @@ config MTD_MS02NV
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to AT45xxx DataFlash chips, using SPI.
Sometimes DataFlash chips are packaged inside MMC-format
@@ -81,7 +81,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF,
@@ -272,6 +272,7 @@ config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
select BCH_CONST_PARAMS
+ select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 2dc5a6f3fd57..4714584aa993 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -66,7 +66,7 @@ out:
return err;
}
-static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
@@ -77,7 +77,7 @@ static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcma_sflash_driver = {
- .remove = __devexit_p(bcm47xxsflash_remove),
+ .remove = bcm47xxsflash_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 681e2ee0f2d6..e081bfeaaf7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -62,6 +62,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
memset(page_address(page), 0xff, PAGE_SIZE);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
break;
}
@@ -152,6 +153,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
memcpy(page_address(page) + offset, buf, cpylen);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
}
page_cache_release(page);
@@ -433,7 +435,7 @@ static int __init block2mtd_init(void)
}
-static void __devexit block2mtd_exit(void)
+static void block2mtd_exit(void)
{
struct list_head *pos, *next;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index d34d83b8f9c2..8510ccb9c6f0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1440,7 +1440,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
oobdelta = mtd->ecclayout->oobavail;
break;
default:
- oobdelta = 0;
+ return -EINVAL;
}
if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
(ofs % DOC_LAYOUT_PAGE_SIZE))
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 706b847b46b3..88b3fd3e18a7 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -70,8 +70,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 03838bab1f59..4eeeb2d7f6ea 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,6 @@
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 5
-#ifdef CONFIG_M25PXX_USE_FAST_READ
-#define OPCODE_READ OPCODE_FAST_READ
-#define FAST_READ_DUMMY_BYTE 1
-#else
-#define OPCODE_READ OPCODE_NORM_READ
-#define FAST_READ_DUMMY_BYTE 0
-#endif
-
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
/****************************************************************************/
@@ -93,6 +85,7 @@ struct m25p {
u16 addr_width;
u8 erase_opcode;
u8 *command;
+ bool fast_read;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -168,6 +161,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
switch (JEDEC_MFR(jedec_id)) {
case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
return spi_write(flash->spi, flash->command, 1);
default:
@@ -342,6 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct m25p *flash = mtd_to_m25p(mtd);
struct spi_transfer t[2];
struct spi_message m;
+ uint8_t opcode;
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
@@ -354,7 +349,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
* Should add 1 byte DUMMY_BYTE.
*/
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
+ t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
@@ -376,12 +371,14 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
/* Set up the write data buffer. */
- flash->command[0] = OPCODE_READ;
+ opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
+ flash->command[0] = opcode;
m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
+ *retlen = m.actual_length - m25p_cmdsz(flash) -
+ (flash->fast_read ? 1 : 0);
mutex_unlock(&flash->lock);
@@ -664,7 +661,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
/* Micron */
- { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
@@ -745,6 +743,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -756,7 +756,7 @@ static const struct spi_device_id m25p_ids[] = {
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
-static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
+static const struct spi_device_id *jedec_probe(struct spi_device *spi)
{
int tmp;
u8 code = OPCODE_RDID;
@@ -801,7 +801,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
* matches what the READ command supports, at least until this driver
* understands FAST_READ (for clocks over 25 MHz).
*/
-static int __devinit m25p_probe(struct spi_device *spi)
+static int m25p_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct flash_platform_data *data;
@@ -809,9 +809,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct flash_info *info;
unsigned i;
struct mtd_part_parser_data ppdata;
+ struct device_node __maybe_unused *np = spi->dev.of_node;
#ifdef CONFIG_MTD_OF_PARTS
- if (!of_device_is_available(spi->dev.of_node))
+ if (!of_device_is_available(np))
return -ENODEV;
#endif
@@ -863,7 +864,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+ flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
+ GFP_KERNEL);
if (!flash->command) {
kfree(flash);
return -ENOMEM;
@@ -920,6 +922,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
+ flash->fast_read = false;
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(np, "m25p,fast-read"))
+ flash->fast_read = true;
+#endif
+
+#ifdef CONFIG_M25PXX_USE_FAST_READ
+ flash->fast_read = true;
+#endif
+
if (info->addr_width)
flash->addr_width = info->addr_width;
else {
@@ -961,7 +973,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
}
-static int __devexit m25p_remove(struct spi_device *spi)
+static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -983,7 +995,7 @@ static struct spi_driver m25p80_driver = {
},
.id_table = m25p_ids,
.probe = m25p_probe,
- .remove = __devexit_p(m25p_remove),
+ .remove = m25p_remove,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 928fb0e6d73a..945c9f762349 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -618,9 +618,8 @@ static char *otp_setup(struct mtd_info *device, char revision)
/*
* Register DataFlash device with MTD subsystem.
*/
-static int __devinit
-add_dataflash_otp(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset, char revision)
+static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
{
struct dataflash *priv;
struct mtd_info *device;
@@ -679,9 +678,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
return err;
}
-static inline int __devinit
-add_dataflash(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset)
+static inline int add_dataflash(struct spi_device *spi, char *name,
+ int nr_pages, int pagesize, int pageoffset)
{
return add_dataflash_otp(spi, name, nr_pages, pagesize,
pageoffset, 0);
@@ -705,7 +703,7 @@ struct flash_info {
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
-static struct flash_info __devinitdata dataflash_data [] = {
+static struct flash_info dataflash_data[] = {
/*
* NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -740,7 +738,7 @@ static struct flash_info __devinitdata dataflash_data [] = {
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
};
-static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+static struct flash_info *jedec_probe(struct spi_device *spi)
{
int tmp;
uint8_t code = OP_READ_ID;
@@ -823,7 +821,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
-static int __devinit dataflash_probe(struct spi_device *spi)
+static int dataflash_probe(struct spi_device *spi)
{
int status;
struct flash_info *info;
@@ -897,7 +895,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
return status;
}
-static int __devexit dataflash_remove(struct spi_device *spi)
+static int dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -920,7 +918,7 @@ static struct spi_driver dataflash_driver = {
},
.probe = dataflash_probe,
- .remove = __devexit_p(dataflash_remove),
+ .remove = dataflash_remove,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index dcc3c9511530..8a82b8bc21e1 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -756,8 +756,8 @@ err_probe:
#ifdef CONFIG_OF
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *pp = NULL;
@@ -799,8 +799,8 @@ static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
return 0;
}
#else
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
return -ENOSYS;
}
@@ -901,7 +901,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
* and do proper init for any found one.
* Returns 0 on success, non zero otherwise
*/
-static int __devinit spear_smi_probe(struct platform_device *pdev)
+static int spear_smi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
@@ -949,10 +949,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base);
- if (!dev->io_base) {
- ret = -EIO;
- dev_err(&pdev->dev, "devm_request_and_ioremap fail\n");
+ dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
goto err;
}
@@ -1016,7 +1015,7 @@ err:
*
* free all allocations and delete the partitions.
*/
-static int __devexit spear_smi_remove(struct platform_device *pdev)
+static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
@@ -1092,20 +1091,9 @@ static struct platform_driver spear_smi_driver = {
#endif
},
.probe = spear_smi_probe,
- .remove = __devexit_p(spear_smi_remove),
+ .remove = spear_smi_remove,
};
-
-static int spear_smi_init(void)
-{
- return platform_driver_register(&spear_smi_driver);
-}
-module_init(spear_smi_init);
-
-static void spear_smi_exit(void)
-{
- platform_driver_unregister(&spear_smi_driver);
-}
-module_exit(spear_smi_exit);
+module_platform_driver(spear_smi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index ab8a2f4c8d60..8091b0163694 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -64,7 +64,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __devinitdata sst25l_flash_info[] = {
+static struct flash_info sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -313,7 +313,7 @@ out:
return ret;
}
-static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -353,7 +353,7 @@ static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __devinit sst25l_probe(struct spi_device *spi)
+static int sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
@@ -411,7 +411,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __devexit sst25l_remove(struct spi_device *spi)
+static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
@@ -428,7 +428,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __devexit_p(sst25l_remove),
+ .remove = sst25l_remove,
};
module_spi_driver(sst25l_driver);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index df304868bebb..62ba82c396c2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -358,13 +358,6 @@ config MTD_IXP2000
IXP2000 based board and would like to use the flash chips on it,
say 'Y'.
-config MTD_FORTUNET
- tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && SA1100_FORTUNET
- help
- This enables access to the Flash on the FortuNet board. If you
- have such a board, say 'Y'.
-
config MTD_AUTCPU12
bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a0240edd1961..4ded28711bc1 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
-obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index e2875d6fe129..f7207b0a76dc 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
}
-static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -289,7 +289,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
+static void amd76xrom_remove_one(struct pci_dev *pdev)
{
struct amd76xrom_window *window = &amd76xrom_window;
@@ -347,4 +347,3 @@ module_exit(cleanup_amd76xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
-
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 76fb594bb1d9..c3525d2a2fa8 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -33,7 +34,7 @@ struct autcpu12_nvram_priv {
struct map_info map;
};
-static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
+static int autcpu12_nvram_probe(struct platform_device *pdev)
{
map_word tmp, save0, save1;
struct resource *res;
@@ -55,12 +56,10 @@ static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
priv->map.bankwidth = 4;
priv->map.phys = res->start;
priv->map.size = resource_size(res);
- priv->map.virt = devm_request_and_ioremap(&pdev->dev, res);
+ priv->map.virt = devm_ioremap_resource(&pdev->dev, res);
strcpy((char *)priv->map.name, res->name);
- if (!priv->map.virt) {
- dev_err(&pdev->dev, "failed to remap mem resource\n");
- return -EBUSY;
- }
+ if (IS_ERR(priv->map.virt))
+ return PTR_ERR(priv->map.virt);
simple_map_init(&priv->map);
@@ -105,7 +104,7 @@ static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
return -ENOMEM;
}
-static int __devexit autcpu12_nvram_remove(struct platform_device *pdev)
+static int autcpu12_nvram_remove(struct platform_device *pdev)
{
struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
@@ -121,7 +120,7 @@ static struct platform_driver autcpu12_nvram_driver = {
.owner = THIS_MODULE,
},
.probe = autcpu12_nvram_probe,
- .remove = __devexit_p(autcpu12_nvram_remove),
+ .remove = autcpu12_nvram_remove,
};
module_platform_driver(autcpu12_nvram_driver);
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index ef5cde84a8b3..f833edfaab79 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -30,7 +30,8 @@
#include <linux/io.h>
#include <asm/unaligned.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "bfin-async-flash"
@@ -123,7 +124,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit bfin_flash_probe(struct platform_device *pdev)
+static int bfin_flash_probe(struct platform_device *pdev)
{
int ret;
struct physmap_flash_data *pdata = pdev->dev.platform_data;
@@ -172,7 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bfin_flash_remove(struct platform_device *pdev)
+static int bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
@@ -184,7 +185,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
static struct platform_driver bfin_flash_driver = {
.probe = bfin_flash_probe,
- .remove = __devexit_p(bfin_flash_remove),
+ .remove = bfin_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d0e762fa5f2..586a1c77e48a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -320,7 +320,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
+static void ck804xrom_remove_one(struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 08322b1c3e81..f784cf0caa13 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int __devinit esb2rom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
@@ -378,13 +378,13 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
return 0;
}
-static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
+static void esb2rom_remove_one(struct pci_dev *pdev)
{
struct esb2rom_window *window = &esb2rom_window;
esb2rom_cleanup(window);
}
-static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
+static struct pci_device_id esb2rom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
deleted file mode 100644
index 956e2e4f30ea..000000000000
--- a/drivers/mtd/maps/fortunet.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/* fortunet.c memory map
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define MAX_NUM_REGIONS 4
-#define MAX_NUM_PARTITIONS 8
-
-#define DEF_WINDOW_ADDR_PHY 0x00000000
-#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
-
-#define MTD_FORTUNET_PK "MTD FortuNet: "
-
-#define MAX_NAME_SIZE 128
-
-struct map_region
-{
- int window_addr_physical;
- int altbankwidth;
- struct map_info map_info;
- struct mtd_info *mymtd;
- struct mtd_partition parts[MAX_NUM_PARTITIONS];
- char map_name[MAX_NAME_SIZE];
- char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
-};
-
-static struct map_region map_regions[MAX_NUM_REGIONS];
-static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
-static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
-
-
-
-struct map_info default_map = {
- .size = DEF_WINDOW_SIZE,
- .bankwidth = 4,
-};
-
-static char * __init get_string_option(char *dest,int dest_size,char *sor)
-{
- if(!dest_size)
- return sor;
- dest_size--;
- while(*sor)
- {
- if(*sor==',')
- {
- sor++;
- break;
- }
- else if(*sor=='\"')
- {
- sor++;
- while(*sor)
- {
- if(*sor=='\"')
- {
- sor++;
- break;
- }
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- else
- {
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- *dest = 0;
- return sor;
-}
-
-static int __init MTD_New_Region(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[6];
- get_options (get_string_option(string,sizeof(string),line),6,params);
- if(params[0]<1)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
- " name,region-number[,base,size,bankwidth,altbankwidth]\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
- memcpy(&map_regions[params[1]].map_info,
- &default_map,sizeof(map_regions[params[1]].map_info));
- map_regions_set[params[1]] = 1;
- map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[params[1]].altbankwidth = 2;
- map_regions[params[1]].mymtd = NULL;
- map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
- strcpy(map_regions[params[1]].map_info.name,string);
- if(params[0]>1)
- {
- map_regions[params[1]].window_addr_physical = params[2];
- }
- if(params[0]>2)
- {
- map_regions[params[1]].map_info.size = params[3];
- }
- if(params[0]>3)
- {
- map_regions[params[1]].map_info.bankwidth = params[4];
- }
- if(params[0]>4)
- {
- map_regions[params[1]].altbankwidth = params[5];
- }
- return 1;
-}
-
-static int __init MTD_New_Partition(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[4];
- get_options (get_string_option(string,sizeof(string),line),4,params);
- if(params[0]<3)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
- " name,region-number,size,offset\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
- {
- printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
- return 1;
- }
- map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
- map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
- strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
- map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
- params[2];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
- params[3];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
- map_regions_parts[params[1]]++;
- return 1;
-}
-
-__setup("MTD_Region=", MTD_New_Region);
-__setup("MTD_Partition=", MTD_New_Partition);
-
-/* Backwards-spelling-compatibility */
-__setup("MTD_Partion=", MTD_New_Partition);
-
-static int __init init_fortunet(void)
-{
- int ix,iy;
- for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_parts[ix]&&(!map_regions_set[ix]))
- {
- printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
- ix);
- memset(&map_regions[ix],0,sizeof(map_regions[ix]));
- memcpy(&map_regions[ix].map_info,&default_map,
- sizeof(map_regions[ix].map_info));
- map_regions_set[ix] = 1;
- map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[ix].altbankwidth = 2;
- map_regions[ix].mymtd = NULL;
- map_regions[ix].map_info.name = map_regions[ix].map_name;
- strcpy(map_regions[ix].map_info.name,"FORTUNET");
- }
- if(map_regions_set[ix])
- {
- iy++;
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
- " address %x size %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
-
- map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
-
- map_regions[ix].map_info.virt =
- ioremap_nocache(
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
- if(!map_regions[ix].map_info.virt)
- {
- int j = 0;
- printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
- map_regions[ix].map_info.name);
- for (j = 0 ; j < ix; j++)
- iounmap(map_regions[j].map_info.virt);
- return -ENXIO;
- }
- simple_map_init(&map_regions[ix].map_info);
-
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].map_info.virt);
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- if((!map_regions[ix].mymtd)&&(
- map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
- {
- printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
- "for %s flash.\n",
- map_regions[ix].map_info.name);
- map_regions[ix].map_info.bankwidth =
- map_regions[ix].altbankwidth;
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- }
- map_regions[ix].mymtd->owner = THIS_MODULE;
- mtd_device_register(map_regions[ix].mymtd,
- map_regions[ix].parts,
- map_regions_parts[ix]);
- }
- }
- if(iy)
- return 0;
- return -ENXIO;
-}
-
-static void __exit cleanup_fortunet(void)
-{
- int ix;
- for(ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_set[ix])
- {
- if( map_regions[ix].mymtd )
- {
- mtd_device_unregister(map_regions[ix].mymtd);
- map_destroy( map_regions[ix].mymtd );
- }
- iounmap((void *)map_regions[ix].map_info.virt);
- }
- }
-}
-
-module_init(init_fortunet);
-module_exit(cleanup_fortunet);
-
-MODULE_AUTHOR("FortuNet, Inc.");
-MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index e4de96ba52b3..7b643de2500b 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -26,7 +26,8 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "gpio-addr-flash"
#define PFX DRIVER_NAME ": "
@@ -142,7 +143,8 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
*
* See gf_copy_from() caveat.
*/
-static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void gf_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
{
struct async_state *state = gf_map_info_to_state(map);
@@ -185,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
* ...
* };
*/
-static int __devinit gpio_flash_probe(struct platform_device *pdev)
+static int gpio_flash_probe(struct platform_device *pdev)
{
size_t i, arr_size;
struct physmap_flash_data *pdata;
@@ -258,7 +260,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit gpio_flash_remove(struct platform_device *pdev)
+static int gpio_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
size_t i = 0;
@@ -273,7 +275,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
static struct platform_driver gpio_flash_driver = {
.probe = gpio_flash_probe,
- .remove = __devexit_p(gpio_flash_remove),
+ .remove = gpio_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6689dcb3124d..c7478e18f485 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int __devinit ichxrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
+static void ichxrom_remove_one(struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
-static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
+static struct pci_device_id ichxrom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 93f03175c82d..b14053b25026 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -63,24 +63,24 @@ struct vr_nor_mtd {
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
-static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
-static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
-static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
-static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
@@ -96,7 +96,7 @@ static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
return 0;
}
-static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
@@ -116,7 +116,7 @@ static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
-static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
@@ -176,7 +176,7 @@ static struct pci_device_id vr_nor_pci_ids[] = {
{0,}
};
-static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
+static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
@@ -189,8 +189,7 @@ static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static int __devinit
-vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
@@ -256,7 +255,7 @@ vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
- .remove = __devexit_p(vr_nor_pci_remove),
+ .remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index c03456f17004..d1da6ede3845 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -7,6 +7,7 @@
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -45,7 +46,7 @@ struct ltq_mtd {
};
static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = {
+static const char *ltq_probe_types[] = {
"cmdlinepart", "ofpart", NULL };
static map_word
@@ -109,7 +110,7 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __devinit
+static int
ltq_mtd_probe(struct platform_device *pdev)
{
struct mtd_part_parser_data ppdata;
@@ -136,10 +137,9 @@ ltq_mtd_probe(struct platform_device *pdev)
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
- ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
- if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to remap mem resource\n");
- err = -EBUSY;
+ ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt)) {
+ err = PTR_ERR(ltq_mtd->map->virt);
goto err_out;
}
@@ -185,7 +185,7 @@ err_out:
return err;
}
-static int __devexit
+static int
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
static struct platform_driver ltq_mtd_driver = {
.probe = ltq_mtd_probe,
- .remove = __devexit_p(ltq_mtd_remove),
+ .remove = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 3c7ad17fca78..ab0fead56b83 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -125,7 +125,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
return 0;
}
-static int __devinit latch_addr_flash_probe(struct platform_device *dev)
+static int latch_addr_flash_probe(struct platform_device *dev)
{
struct latch_addr_flash_data *latch_addr_data;
struct latch_addr_flash_info *info;
@@ -218,7 +218,7 @@ done:
static struct platform_driver latch_addr_flash_driver = {
.probe = latch_addr_flash_probe,
- .remove = __devexit_p(latch_addr_flash_remove),
+ .remove = latch_addr_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1c30c1a307f4..c3aebd5da5d6 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -253,8 +253,7 @@ static struct pci_device_id mtd_pci_ids[] = {
* Generic code follows.
*/
-static int __devinit
-mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
struct map_pci_info *map = NULL;
@@ -308,8 +307,7 @@ out:
return err;
}
-static void __devexit
-mtd_pci_remove(struct pci_dev *dev)
+static void mtd_pci_remove(struct pci_dev *dev)
{
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
@@ -326,7 +324,7 @@ mtd_pci_remove(struct pci_dev *dev)
static struct pci_driver mtd_pci_driver = {
.name = "MTD PCI",
.probe = mtd_pci_probe,
- .remove = __devexit_p(mtd_pci_remove),
+ .remove = mtd_pci_remove,
.id_table = mtd_pci_ids,
};
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 6f19acadb06c..7901d72c9242 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -77,8 +77,8 @@ static int of_flash_remove(struct platform_device *dev)
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
-static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
- struct map_info *map)
+static struct mtd_info *obsolete_probe(struct platform_device *dev,
+ struct map_info *map)
{
struct device_node *dp = dev->dev.of_node;
const char *of_probe;
@@ -116,7 +116,7 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
information. */
static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
"ofpart", "ofoldpart", NULL };
-static const char ** __devinit of_get_probes(struct device_node *dp)
+static const char **of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
@@ -145,14 +145,14 @@ static const char ** __devinit of_get_probes(struct device_node *dp)
return res;
}
-static void __devinit of_free_probes(const char **probes)
+static void of_free_probes(const char **probes)
{
if (probes != part_probe_types_def)
kfree(probes);
}
static struct of_device_id of_flash_match[];
-static int __devinit of_flash_probe(struct platform_device *dev)
+static int of_flash_probe(struct platform_device *dev)
{
const char **part_probe_types;
const struct of_device_id *match;
@@ -170,6 +170,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
bool map_indirect;
+ const char *mtd_name = NULL;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -178,6 +179,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
/*
* Get number of "reg" tuples. Scan for MTD devices on area's
* described by each "reg" region. This makes it possible (including
@@ -234,7 +237,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
@@ -282,6 +285,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
err = 0;
+ info->cmtd = NULL;
if (info->list_size == 1) {
info->cmtd = info->list[0].mtd;
} else if (info->list_size > 1) {
@@ -290,9 +294,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
*/
info->cmtd = mtd_concat_create(mtd_list, info->list_size,
dev_name(&dev->dev));
- if (info->cmtd == NULL)
- err = -ENXIO;
}
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 65bd1cd4d627..dc6df9abea0b 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
pismo->vpp(pismo->vpp_data, on);
}
-static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
+static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
@@ -66,8 +66,8 @@ static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
return 1 << width;
}
-static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
- u8 addr, size_t size)
+static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
+ size_t size)
{
int ret;
struct i2c_msg msg[] = {
@@ -88,8 +88,9 @@ static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
-static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
- struct pismo_mem *region, const char *name, void *pdata, size_t psize)
+static int pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name,
+ void *pdata, size_t psize)
{
struct platform_device *dev;
struct resource res = { };
@@ -129,8 +130,8 @@ static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
return ret;
}
-static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct physmap_flash_data data = {
.width = region->width,
@@ -143,8 +144,8 @@ static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
.bankwidth = region->width,
@@ -154,8 +155,8 @@ static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
- const struct pismo_cs_block *cs, phys_addr_t base)
+static void pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
struct pismo_mem region;
@@ -197,7 +198,7 @@ static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int __devexit pismo_remove(struct i2c_client *client)
+static int pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -210,8 +211,8 @@ static int __devexit pismo_remove(struct i2c_client *client)
return 0;
}
-static int __devinit pismo_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pismo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct pismo_pdata *pdata = client->dev.platform_data;
@@ -267,7 +268,7 @@ static struct i2c_driver pismo_driver = {
.owner = THIS_MODULE,
},
.probe = pismo_probe,
- .remove = __devexit_p(pismo_remove),
+ .remove = pismo_remove,
.id_table = pismo_id,
};
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 81884c277405..43e3dbb976d9 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -49,7 +49,7 @@ struct pxa2xx_flash_info {
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
+static int pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = pdev->dev.platform_data;
struct pxa2xx_flash_info *info;
@@ -105,7 +105,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
+static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -139,7 +139,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
- .remove = __devexit_p(pxa2xx_flash_remove),
+ .remove = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a675bdbcb0fe..f694417cf7e6 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -149,8 +149,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
-static struct sa_info *__devinit
-sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
{
struct sa_info *info;
int nr, size, i, ret = 0;
@@ -246,7 +246,7 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
+static int sa1100_mtd_probe(struct platform_device *pdev)
{
struct flash_platform_data *plat = pdev->dev.platform_data;
struct sa_info *info;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 9dcbc684abdb..c77b68c9412f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -69,8 +69,7 @@ static struct map_info scb2_map = {
};
static int region_fail;
-static int __devinit
-scb2_fixup_mtd(struct mtd_info *mtd)
+static int scb2_fixup_mtd(struct mtd_info *mtd)
{
int i;
int done = 0;
@@ -133,8 +132,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
#define CSB5_FCR 0x41
#define CSB5_FCR_DECODE_ALL 0x0e
-static int __devinit
-scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int scb2_flash_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
u8 reg;
@@ -197,8 +196,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
return 0;
}
-static void __devexit
-scb2_flash_remove(struct pci_dev *dev)
+static void scb2_flash_remove(struct pci_dev *dev)
{
if (!scb2_mtd)
return;
@@ -231,7 +229,7 @@ static struct pci_driver scb2_flash_driver = {
.name = "Intel SCB2 BIOS Flash",
.id_table = scb2_flash_pci_ids,
.probe = scb2_flash_probe,
- .remove = __devexit_p(scb2_flash_remove),
+ .remove = scb2_flash_remove,
};
module_pci_driver(scb2_flash_driver);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 175e537b444f..d467f3b11c96 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op)
+static int uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -121,7 +121,7 @@ static int __devinit uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int __devexit uflash_remove(struct platform_device *op)
+static int uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -155,7 +155,7 @@ static struct platform_driver uflash_driver = {
.of_match_table = uflash_match,
},
.probe = uflash_probe,
- .remove = __devexit_p(uflash_remove),
+ .remove = uflash_remove,
};
module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 2e2b0945edc7..6b223cfe92b7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -596,7 +596,7 @@ fail_name:
}
/* Handles very basic info about the flash, queries for details */
-static int __devinit vmu_connect(struct maple_device *mdev)
+static int vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
@@ -690,7 +690,7 @@ fail_nomem:
return error;
}
-static void __devexit vmu_disconnect(struct maple_device *mdev)
+static void vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
@@ -772,7 +772,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
}
-static int __devinit probe_maple_vmu(struct device *dev)
+static int probe_maple_vmu(struct device *dev)
{
int error;
struct maple_device *mdev = to_maple_dev(dev);
@@ -789,7 +789,7 @@ static int __devinit probe_maple_vmu(struct device *dev)
return 0;
}
-static int __devexit remove_maple_vmu(struct device *dev)
+static int remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
@@ -802,7 +802,7 @@ static struct maple_driver vmu_flash_driver = {
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
- .remove = __devexit_p(remove_maple_vmu),
+ .remove = remove_maple_vmu,
},
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f1f06715d4e0..5ad39bb5ab4c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,7 +32,6 @@
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "mtdcore.h"
@@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
{
- if (kthread_should_stop())
- return 1;
-
return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static int mtd_blktrans_thread(void *arg)
+static void mtd_blktrans_work(struct work_struct *work)
{
- struct mtd_blktrans_dev *dev = arg;
+ struct mtd_blktrans_dev *dev =
+ container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
@@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
+ while (1) {
int res;
dev->bg_stop = false;
@@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg)
background_done = !dev->bg_stop;
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- set_current_state(TASK_RUNNING);
-
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
+ break;
}
spin_unlock_irq(rq->queue_lock);
@@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
-
- return 0;
}
static void mtd_blktrans_request(struct request_queue *rq)
@@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else {
- dev->bg_stop = true;
- wake_up_process(dev->thread);
- }
+ else
+ queue_work(dev->wq, &dev->work);
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -325,7 +310,7 @@ unlock:
return ret;
}
-static const struct block_device_operations mtd_blktrans_ops = {
+static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
@@ -401,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
- gd->fops = &mtd_blktrans_ops;
+ gd->fops = &mtd_block_ops;
if (tr->part_bits)
if (new->devnum < 26)
@@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing thread */
- /* TODO: workqueue ? */
- new->thread = kthread_run(mtd_blktrans_thread, new,
- "%s%d", tr->name, new->mtd->index);
- if (IS_ERR(new->thread)) {
- ret = PTR_ERR(new->thread);
+ /* Create processing workqueue */
+ new->wq = alloc_workqueue("%s%d", 0, 0,
+ tr->name, new->mtd->index);
+ if (!new->wq)
goto error4;
- }
+ INIT_WORK(&new->work, mtd_blktrans_work);
+
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
-
- /* Stop the thread */
- kthread_stop(old->thread);
+ /* Stop workqueue. This will perform any pending request. */
+ destroy_workqueue(old->wq);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f5b3f91fa1cc..97bb8f6304d4 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -271,7 +271,7 @@ static void find_next_position(struct mtdoops_context *cxt)
if (count[0] == 0xffffffff && count[1] == 0xffffffff)
mark_page_unused(cxt, page);
- if (count[0] == 0xffffffff)
+ if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
@@ -289,14 +289,13 @@ static void find_next_position(struct mtdoops_context *cxt)
}
}
if (maxcount == 0xffffffff) {
- cxt->nextpage = 0;
- cxt->nextcount = 1;
- schedule_work(&cxt->work_erase);
- return;
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
}
-
- cxt->nextpage = maxpos;
- cxt->nextcount = maxcount;
mtdoops_inc_counter(cxt);
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dae191b3c081..81bf5e52601e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -50,16 +50,30 @@ config MTD_NAND_MUSEUM_IDS
of these chips were reused by later, larger chips.
config MTD_NAND_DENALI
- depends on PCI
+ tristate "Support Denali NAND controller"
+ help
+ Enable support for the Denali NAND controller. This should be
+ combined with either the PCI or platform drivers to provide device
+ registration.
+
+config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
+ depends on PCI && MTD_NAND_DENALI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
-
+
+config MTD_NAND_DENALI_DT
+ tristate "Support Denali NAND controller as a DT device"
+ depends on HAVE_CLK && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
- depends on MTD_NAND_DENALI
+ depends on MTD_NAND_DENALI_PCI
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
@@ -246,8 +260,7 @@ config MTD_NAND_S3C2410_CLKSTOP
approximately 5mA of power when there is nothing happening.
config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
depends on HAS_IOMEM
select REED_SOLOMON
select REED_SOLOMON_DEC16
@@ -317,8 +330,8 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
parameter "inftl_bbt_write=1".
config MTD_NAND_DOCG4
- tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
- depends on EXPERIMENTAL && HAS_IOMEM
+ tristate "Support for DiskOnChip G4"
+ depends on HAS_IOMEM
select BCH
select BITREVERSE
help
@@ -433,6 +446,14 @@ config MTD_NAND_GPMI_NAND
block, such as SD card. So pay attention to it when you enable
the GPMI.
+config MTD_NAND_BCM47XXNFLASH
+ tristate "Support for NAND flash on BCM4706 BCMA bus"
+ depends on BCMA_NFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
depends on HAS_IOMEM
@@ -499,12 +520,6 @@ config MTD_NAND_MXC
This enables the driver for the NAND flash controller on the
MXC processors.
-config MTD_NAND_NOMADIK
- tristate "ST Nomadik 8815 NAND support"
- depends on ARCH_NOMADIK
- help
- Driver for the NAND flash controller on the Nomadik, with ECC.
-
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on SUPERH || ARCH_SHMOBILE
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6c7f2b3ca8ae..d76d91205691 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -45,11 +47,11 @@ obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
-obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e7723aa7acc..f1d71cdc8aac 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -173,7 +173,7 @@ static const struct gpio _mandatory_gpio[] = {
/*
* Main initialization routine
*/
-static int __devinit ams_delta_init(struct platform_device *pdev)
+static int ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -270,7 +270,7 @@ out_free:
/*
* Clean up routine
*/
-static int __devexit ams_delta_cleanup(struct platform_device *pdev)
+static int ams_delta_cleanup(struct platform_device *pdev)
{
void __iomem *io_base = platform_get_drvdata(pdev);
@@ -289,7 +289,7 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
static struct platform_driver ams_delta_nand_driver = {
.probe = ams_delta_init,
- .remove = __devexit_p(ams_delta_cleanup),
+ .remove = ams_delta_cleanup,
.driver = {
.name = "ams-delta-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 92623ac2015a..c516a9408087 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -331,14 +331,14 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* 12-bits 20-bytes 21-bytes
* 24-bits 39-bytes 42-bytes
*/
-static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
{
int m = 12 + sector_size / 512;
return (m * cap + 7) / 8;
}
-static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
- int oobsize, int ecc_len)
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
{
int i;
@@ -353,7 +353,7 @@ static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
oobsize - ecc_len - layout->oobfree[0].offset;
}
-static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
{
int table_size;
@@ -375,7 +375,7 @@ static void pmecc_data_free(struct atmel_nand_host *host)
kfree(host->pmecc_delta);
}
-static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
+static int pmecc_data_alloc(struct atmel_nand_host *host)
{
const int cap = host->pmecc_corr_cap;
@@ -724,6 +724,7 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
struct atmel_nand_host *host = nand_chip->priv;
int i, err_nbr, eccbytes;
uint8_t *buf_pos;
+ int total_err = 0;
eccbytes = nand_chip->ecc.bytes;
for (i = 0; i < eccbytes; i++)
@@ -751,12 +752,13 @@ normal_check:
pmecc_correct_data(mtd, buf_pos, ecc, i,
host->pmecc_bytes_per_sector, err_nbr);
mtd->ecc_stats.corrected += err_nbr;
+ total_err += err_nbr;
}
}
pmecc_stat >>= 1;
}
- return 0;
+ return total_err;
}
static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
@@ -768,6 +770,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
unsigned long end_time;
+ int bitflips = 0;
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -790,11 +793,14 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
}
stat = pmecc_readl_relaxed(host->ecc, ISR);
- if (stat != 0)
- if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
- return -EIO;
+ if (stat != 0) {
+ bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ if (bitflips < 0)
+ /* uncorrectable errors */
+ return 0;
+ }
- return 0;
+ return bitflips;
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -1206,8 +1212,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
#if defined(CONFIG_OF)
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
{
u32 val, table_offset;
u32 offset[2];
@@ -1293,8 +1299,8 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
return 0;
}
#else
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
{
return -EINVAL;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5c47b200045a..217459d02b2f 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -382,7 +382,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-static int __devinit find_nand_cs(unsigned long nand_base)
+static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
@@ -403,7 +403,7 @@ static int __devinit find_nand_cs(unsigned long nand_base)
return -ENODEV;
}
-static int __devinit au1550nd_probe(struct platform_device *pdev)
+static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
@@ -491,7 +491,7 @@ out1:
return ret;
}
-static int __devexit au1550nd_remove(struct platform_device *pdev)
+static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -509,7 +509,7 @@ static struct platform_driver au1550nd_driver = {
.owner = THIS_MODULE,
},
.probe = au1550nd_probe,
- .remove = __devexit_p(au1550nd_remove),
+ .remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 000000000000..f05b119e134b
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000000..0bdb2ce4da75
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,22 @@
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 000000000000..8363a9a5fa3f
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,108 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ int err = 0;
+
+ b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
+ if (!b47n) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ b47n->nand_chip.priv = b47n;
+ b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.priv = &b47n->nand_chip; /* Required */
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ goto err_init;
+ }
+
+ err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ goto err_dev_reg;
+ }
+
+ return 0;
+
+err_dev_reg:
+err_init:
+ kfree(b47n);
+out:
+ return err;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+
+ if (nflash->mtd)
+ mtd_device_unregister(nflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .remove = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm47xxnflash_init(void)
+{
+ int err;
+
+ /*
+ * Platform device "bcma_nflash" exists on SoCs and is registered very
+ * early, it won't be added during runtime (use platform_driver_probe).
+ */
+ err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ if (err)
+ pr_err("Failed to register serial flash driver: %d\n", err);
+
+ return err;
+}
+
+static void __exit bcm47xxnflash_exit(void)
+{
+ platform_driver_unregister(&bcm47xxnflash_driver);
+}
+
+module_init(bcm47xxnflash_init);
+module_exit(bcm47xxnflash_exit);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000000..595de4012e71
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,413 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+ int chip)
+{
+ return;
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ pr_warn("Chip reset not implemented yet\n");
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ freq = 100000000;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq * 0xFFF) >> 3;
+ freq = (freq * 25000000) >> 3;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->mtd, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = b47n->nand_chip.chipsize >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index ab0caa74eb43..4271e948d1e2 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -658,7 +658,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
/*
* Device management interface
*/
-static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
struct mtd_partition *parts = info->platform->partitions;
@@ -667,7 +667,7 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
return mtd_device_register(mtd, parts, nr);
}
-static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
+static int bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
@@ -725,7 +725,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
+static int bf5xx_nand_probe(struct platform_device *pdev)
{
struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
struct bf5xx_nand_info *info = NULL;
@@ -865,7 +865,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
- .remove = __devexit_p(bf5xx_nand_remove),
+ .remove = bf5xx_nand_remove,
.suspend = bf5xx_nand_suspend,
.resume = bf5xx_nand_resume,
.driver = {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2bb7170502c2..010d61266536 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -585,7 +585,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
}
/* F_2[X]/(X**6+X+1) */
-static unsigned short __devinit gf64_mul(u8 a, u8 b)
+static unsigned short gf64_mul(u8 a, u8 b)
{
u8 c;
unsigned int i;
@@ -604,7 +604,7 @@ static unsigned short __devinit gf64_mul(u8 a, u8 b)
}
/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
-static u16 __devinit gf4096_mul(u16 a, u16 b)
+static u16 gf4096_mul(u16 a, u16 b)
{
u8 ah, al, bh, bl, ch, cl;
@@ -619,14 +619,14 @@ static u16 __devinit gf4096_mul(u16 a, u16 b)
return (ch << 6) ^ cl;
}
-static int __devinit cafe_mul(int x)
+static int cafe_mul(int x)
{
if (x == 0)
return 1;
return gf4096_mul(x, 0xe01);
}
-static int __devinit cafe_nand_probe(struct pci_dev *pdev,
+static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mtd_info *mtd;
@@ -821,7 +821,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
return err;
}
-static void __devexit cafe_nand_remove(struct pci_dev *pdev)
+static void cafe_nand_remove(struct pci_dev *pdev)
{
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
@@ -887,7 +887,7 @@ static struct pci_driver cafe_nand_pci_driver = {
.name = "CAFÉ NAND",
.id_table = cafe_nand_tbl,
.probe = cafe_nand_probe,
- .remove = __devexit_p(cafe_nand_remove),
+ .remove = cafe_nand_remove,
.resume = cafe_nand_resume,
};
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index adb6c3ef37fb..2cdeab8bebc4 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
this->ecc.hwctl = cs_enable_hwecc;
this->ecc.calculate = cs_calculate_ecc;
this->ecc.correct = nand_correct_data;
+ this->ecc.strength = 1;
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
@@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_ior;
}
- this->ecc.strength = 1;
-
new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
cs553x_mtd[cs] = new_mtd;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 945047ad0952..feae55c7b880 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata
@@ -821,9 +821,16 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
- pdata->nr_parts);
+ if (pdata->parts)
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata->parts, pdata->nr_parts);
+ else {
+ struct mtd_part_parser_data ppdata;
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
+ NULL, 0);
+ }
if (ret < 0)
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index e706a237170f..0c8bb6bf8424 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,14 +16,12 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -89,13 +87,6 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
* format the bank into the proper bits for the controller */
#define BANK(x) ((x) << 24)
-/* List of platforms this NAND controller has be integrated into */
-static const struct pci_device_id denali_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
- { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
- { /* end: all zeroes */ }
-};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -699,7 +690,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
if (comp_res == 0) {
/* timeout */
- printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
intr_status, irq_mask);
intr_status = 0;
@@ -1305,8 +1296,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
/* TODO: Read OOB data */
break;
default:
- printk(KERN_ERR ": unsupported command"
- " received 0x%x\n", cmd);
+ pr_err(": unsupported command received 0x%x\n", cmd);
break;
}
}
@@ -1425,107 +1415,48 @@ void denali_drv_init(struct denali_nand_info *denali)
denali->irq_status = 0;
}
-/* driver entry point */
-static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int denali_init(struct denali_nand_info *denali)
{
- int ret = -ENODEV;
- resource_size_t csr_base, mem_base;
- unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
-
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
- if (!denali)
- return -ENOMEM;
+ int ret;
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
- }
-
- if (id->driver_data == INTEL_CE4100) {
+ if (denali->platform == INTEL_CE4100) {
/* Due to a silicon limitation, we can only support
* ONFI timing mode 1 and below.
*/
if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
- printk(KERN_ERR "Intel CE4100 only supports"
- " ONFI timing mode 1 or below\n");
- ret = -EINVAL;
- goto failed_enable_dev;
- }
- denali->platform = INTEL_CE4100;
- mem_base = pci_resource_start(dev, 0);
- mem_len = pci_resource_len(dev, 1);
- csr_base = pci_resource_start(dev, 1);
- csr_len = pci_resource_len(dev, 1);
- } else {
- denali->platform = INTEL_MRST;
- csr_base = pci_resource_start(dev, 0);
- csr_len = pci_resource_len(dev, 0);
- mem_base = pci_resource_start(dev, 1);
- mem_len = pci_resource_len(dev, 1);
- if (!mem_len) {
- mem_base = csr_base + csr_len;
- mem_len = csr_len;
+ pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+ return -EINVAL;
}
}
/* Is 32-bit DMA supported? */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
if (ret) {
- printk(KERN_ERR "Spectra: no usable DMA configuration\n");
- goto failed_enable_dev;
+ pr_err("Spectra: no usable DMA configuration\n");
+ return ret;
}
- denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
DENALI_BUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
- dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
- goto failed_enable_dev;
- }
-
- pci_set_master(dev);
- denali->dev = &dev->dev;
- denali->mtd.dev.parent = &dev->dev;
-
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request memory regions\n");
- goto failed_dma_map;
- }
-
- denali->flash_reg = ioremap_nocache(csr_base, csr_len);
- if (!denali->flash_reg) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
- }
-
- denali->flash_mem = ioremap_nocache(mem_base, mem_len);
- if (!denali->flash_mem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- ret = -ENOMEM;
- goto failed_remap_reg;
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ return -EIO;
}
-
+ denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
/* denali_isr register is done after all the hardware
* initilization is finished*/
- if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
DENALI_NAND_NAME, denali)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- goto failed_remap_mem;
+ pr_err("Spectra: Unable to allocate IRQ\n");
+ return -ENODEV;
}
/* now that our ISR is registered, we can enable interrupts */
denali_set_intr_modes(denali, true);
-
- pci_set_drvdata(dev, denali);
-
denali->mtd.name = "denali-nand";
denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
@@ -1549,8 +1480,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
*/
if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
ret = -ENODEV;
- printk(KERN_ERR "Spectra: device size not supported by this "
- "version of MTD.");
+ pr_err("Spectra: device size not supported by this version of MTD.");
goto failed_req_irq;
}
@@ -1602,8 +1532,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
ECC_8BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE))) {
- printk(KERN_ERR "Your NAND chip OOB is not large enough to"
- " contain 8bit ECC correction codes");
+ pr_err("Your NAND chip OOB is not large enough to \
+ contain 8bit ECC correction codes");
goto failed_req_irq;
} else {
denali->nand.ecc.strength = 8;
@@ -1655,56 +1585,24 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
- dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
ret);
goto failed_req_irq;
}
return 0;
failed_req_irq:
- denali_irq_cleanup(dev->irq, denali);
-failed_remap_mem:
- iounmap(denali->flash_mem);
-failed_remap_reg:
- iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_dma_map:
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+
return ret;
}
+EXPORT_SYMBOL(denali_init);
/* driver exit point */
-static void denali_pci_remove(struct pci_dev *dev)
+void denali_remove(struct denali_nand_info *denali)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
-
- nand_release(&denali->mtd);
-
- denali_irq_cleanup(dev->irq, denali);
-
- iounmap(denali->flash_reg);
- iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- pci_set_drvdata(dev, NULL);
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+ dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
}
-
-MODULE_DEVICE_TABLE(pci, denali_pci_ids);
-
-static struct pci_driver denali_pci_driver = {
- .name = DENALI_NAND_NAME,
- .id_table = denali_pci_ids,
- .probe = denali_pci_probe,
- .remove = denali_pci_remove,
-};
-
-module_pci_driver(denali_pci_driver);
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index fabb9d56b39e..cec5712862c9 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -466,6 +466,7 @@ struct nand_buf {
#define INTEL_CE4100 1
#define INTEL_MRST 2
+#define DT 3
struct denali_nand_info {
struct mtd_info mtd;
@@ -487,6 +488,7 @@ struct denali_nand_info {
uint32_t irq_status;
int irq_debug_array[32];
int idx;
+ int irq;
uint32_t devnum; /* represent how many nands connected */
uint32_t fwblks; /* represent how many blocks FW used */
@@ -496,4 +498,7 @@ struct denali_nand_info {
uint32_t max_banks;
};
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 000000000000..546f8cb5688d
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,167 @@
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_nand_info denali;
+ struct clk *clk;
+};
+
+static void __iomem *request_and_map(struct device *dev,
+ const struct resource *res)
+{
+ void __iomem *ptr;
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ "denali-dt")) {
+ dev_err(dev, "unable to request %s\n", res->name);
+ return NULL;
+ }
+
+ ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!res)
+ dev_err(dev, "ioremap_nocache of %s failed!", res->name);
+
+ return ptr;
+}
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ { .compatible = "denali,denali-nand-dt" },
+ { /* sentinel */ }
+ };
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *denali_reg, *nand_data;
+ struct denali_dt *dt;
+ struct denali_nand_info *denali;
+ int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->denali;
+
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ if (!denali_reg || !nand_data) {
+ dev_err(&ofdev->dev, "resources not completely defined\n");
+ return -EINVAL;
+ }
+
+ denali->platform = DT;
+ denali->dev = &ofdev->dev;
+ denali->irq = platform_get_irq(ofdev, 0);
+ if (denali->irq < 0) {
+ dev_err(&ofdev->dev, "no irq defined\n");
+ return -ENXIO;
+ }
+
+ denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
+ if (!denali->flash_reg)
+ return -ENOMEM;
+
+ denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
+ if (!denali->flash_mem)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(ofdev->dev.of_node,
+ "dma-mask", (u32 *)&denali_dma_mask)) {
+ denali->dev->dma_mask = &denali_dma_mask;
+ } else {
+ denali->dev->dma_mask = NULL;
+ }
+
+ dt->clk = clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(dt->clk)) {
+ dev_err(&ofdev->dev, "no clk available\n");
+ return PTR_ERR(dt->clk);
+ }
+ clk_prepare_enable(dt->clk);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+ clk_put(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+ denali_remove(&dt->denali);
+ clk_disable(dt->clk);
+ clk_put(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(denali_nand_dt_ids),
+ },
+};
+
+static int __init denali_init_dt(void)
+{
+ return platform_driver_register(&denali_dt_driver);
+}
+module_init(denali_init_dt);
+
+static void __exit denali_exit_dt(void)
+{
+ platform_driver_unregister(&denali_dt_driver);
+}
+module_exit(denali_exit_dt);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 000000000000..e3e46623b2b4
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,144 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ pr_err("Spectra: Unable to request memory regions\n");
+ goto failed_enable_dev;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ pr_err("Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ pr_err("Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ goto failed_remap_mem;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int denali_init_pci(void)
+{
+ pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+module_init(denali_init_pci);
+
+static void denali_exit_pci(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 256eb30f6180..81fa5784f98b 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -53,8 +53,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 799da5d1c857..18fa4489e52e 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -46,6 +46,25 @@
#include <linux/bitrev.h>
/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data. The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk. Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode. The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address). This module parameter enables you to use this
+ * driver to write the SPL. When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ... Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
* You'll want to ignore badblocks if you're reading a partition that contains
* data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
* it does not use mtd nand's method for marking bad blocks (using oob area).
@@ -113,6 +132,7 @@ struct docg4_priv {
#define DOCG4_SEQ_PAGEWRITE 0x16
#define DOCG4_SEQ_PAGEPROG 0x1e
#define DOCG4_SEQ_BLOCKERASE 0x24
+#define DOCG4_SEQ_SETMODE 0x45
/* DOC_FLASHCOMMAND register commands */
#define DOCG4_CMD_PAGE_READ 0x00
@@ -122,6 +142,8 @@ struct docg4_priv {
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOCG4_CMD_PAGEWRITE 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_RESET 0xff
/* DOC_POWERMODE register bits */
@@ -190,17 +212,20 @@ struct docg4_priv {
#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
/*
- * Oob bytes 0 - 6 are available to the user.
- * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
* Byte 15 (the last) is used by the driver as a "page written" flag.
*/
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobavail = 7,
- .oobfree = { {0, 7} }
+ .oobavail = 5,
+ .oobfree = { {.offset = 2, .length = 5} }
};
/*
@@ -611,6 +636,14 @@ static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
dev_dbg(doc->dev,
"docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
sequence_reset(mtd);
+
+ if (unlikely(reliable_mode)) {
+ writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ }
+
writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
@@ -691,6 +724,15 @@ static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
break;
case NAND_CMD_SEQIN:
+ if (unlikely(reliable_mode)) {
+ uint16_t g4_page = g4_addr >> 16;
+
+ /* writes to odd-numbered 2k pages are invalid */
+ if (g4_page & 0x01)
+ dev_warn(doc->dev,
+ "invalid reliable mode address\n");
+ }
+
write_page_prologue(mtd, g4_addr);
/* hack for deferred write of oob bytes */
@@ -979,16 +1021,15 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
struct docg4_priv *doc = nand->priv;
uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
uint8_t *buf;
- int i, block, status;
+ int i, block;
+ __u32 eccfailed_stats = mtd->ecc_stats.failed;
buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
- status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
- if (status)
- goto exit;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
/*
* If no memory-based bbt was created, exit. This will happen if module
@@ -1000,6 +1041,20 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
if (nand->bbt == NULL) /* no memory-based bbt */
goto exit;
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ /*
+ * Whoops, an ecc failure ocurred reading the factory bbt.
+ * It is stored redundantly, so we get another chance.
+ */
+ eccfailed_stats = mtd->ecc_stats.failed;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ dev_warn(doc->dev,
+ "The factory bbt could not be read!\n");
+ goto exit;
+ }
+ }
+
/*
* Parse factory bbt and update memory-based bbt. Factory bbt format is
* simple: one bit per block, block numbers increase left to right (msb
@@ -1019,7 +1074,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
}
exit:
kfree(buf);
- return status;
+ return 0;
}
static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index cc1480a5e4c1..20657209a472 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -109,20 +109,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
};
/*
- * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
- * 1, so we have to adjust bad block pattern. This pattern should be used for
- * x8 chips only. So far hardware does not support x16 chips anyway.
- */
-static u8 scan_ff_pattern[] = { 0xff, };
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
-/*
* ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
* interfere with ECC positions, that's why we implement our own descriptors.
* OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -699,7 +685,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
&fsl_elbc_oob_lp_eccm1 :
&fsl_elbc_oob_lp_eccm0;
- chip->badblock_pattern = &largepage_memorybased;
}
} else {
dev_err(priv->dev,
@@ -814,7 +799,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
static DEFINE_MUTEX(fsl_elbc_nand_mutex);
-static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
{
struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 3551a99076ba..ad6222627fed 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -389,7 +389,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
timing = IFC_FIR_OP_RBCD;
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(timing << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -754,7 +754,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
/* READID */
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -922,7 +922,7 @@ static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
static DEFINE_MUTEX(fsl_ifc_nand_mutex);
-static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
+static int fsl_ifc_nand_probe(struct platform_device *dev)
{
struct fsl_ifc_regs __iomem *ifc;
struct fsl_ifc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 45df542b9c61..04e07252d74b 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -152,9 +152,9 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
fun_wait_rnb(fun);
}
-static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
- const struct device_node *upm_np,
- const struct resource *io_res)
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
{
int ret;
struct device_node *flash_np;
@@ -201,7 +201,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev)
+static int fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -318,7 +318,7 @@ err1:
return ret;
}
-static int __devexit fun_remove(struct platform_device *ofdev)
+static int fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
int i;
@@ -350,7 +350,7 @@ static struct platform_driver of_fun_driver = {
.of_match_table = of_fun_match,
},
.probe = fun_probe,
- .remove = __devexit_p(fun_remove),
+ .remove = fun_remove,
};
module_platform_driver(of_fun_driver);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 38d26240d8b1..09af555408b7 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -361,7 +361,7 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- void *__iomem *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -383,13 +383,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
pc |= FSMC_ENABLE;
else
pc &= ~FSMC_ENABLE;
- writel(pc, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
}
mb();
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb_relaxed(cmd, this->IO_ADDR_W);
}
/*
@@ -426,14 +426,18 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (busw)
- writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_16,
+ FSMC_NAND_REG(regs, bank, PC));
else
- writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_8,
+ FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
FSMC_NAND_REG(regs, bank, PC));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, COMM));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, ATTRIB));
}
/*
@@ -446,11 +450,11 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
}
@@ -470,7 +474,7 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
- if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+ if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
break;
else
cond_resched();
@@ -481,25 +485,25 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
return -ETIMEDOUT;
}
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
ecc[3] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
ecc[4] = (uint8_t) (ecc_tmp >> 0);
ecc[5] = (uint8_t) (ecc_tmp >> 8);
ecc[6] = (uint8_t) (ecc_tmp >> 16);
ecc[7] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
ecc[8] = (uint8_t) (ecc_tmp >> 0);
ecc[9] = (uint8_t) (ecc_tmp >> 8);
ecc[10] = (uint8_t) (ecc_tmp >> 16);
ecc[11] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
ecc[12] = (uint8_t) (ecc_tmp >> 16);
return 0;
@@ -519,7 +523,7 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
uint32_t bank = host->bank;
uint32_t ecc_tmp;
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -601,7 +605,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_async_issue_pending(chan);
ret =
- wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+ wait_for_completion_timeout(&host->dma_access_complete,
msecs_to_jiffies(3000));
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -628,10 +632,10 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- writel(p[i], chip->IO_ADDR_W);
+ writel_relaxed(p[i], chip->IO_ADDR_W);
} else {
for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
+ writeb_relaxed(buf[i], chip->IO_ADDR_W);
}
}
@@ -651,10 +655,10 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- p[i] = readl(chip->IO_ADDR_R);
+ p[i] = readl_relaxed(chip->IO_ADDR_R);
} else {
for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ buf[i] = readb_relaxed(chip->IO_ADDR_R);
}
}
@@ -783,7 +787,7 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
uint32_t num_err, i;
uint32_t ecc1, ecc2, ecc3, ecc4;
- num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+ num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
/* no bit flipping */
if (likely(num_err == 0))
@@ -826,10 +830,10 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
* uint64_t array and error offset indexes are populated in err_idx
* array
*/
- ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
- ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
- ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
- ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
err_idx[0] = (ecc1 >> 0) & 0x1FFF;
err_idx[1] = (ecc1 >> 13) & 0x1FFF;
@@ -860,8 +864,8 @@ static bool filter(struct dma_chan *chan, void *slave)
}
#ifdef CONFIG_OF
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 val;
@@ -876,16 +880,14 @@ static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
return -EINVAL;
}
}
- of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
- of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
if (of_get_property(np, "nand-skip-bbtscan", NULL))
pdata->options = NAND_SKIP_BBTSCAN;
return 0;
}
#else
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
return -ENOSYS;
}
@@ -935,62 +937,35 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory data resourse\n");
- return -ENOENT;
- }
-
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
host->data_pa = (dma_addr_t)res->start;
- host->data_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!host->data_va) {
- dev_err(&pdev->dev, "data ioremap failed\n");
- return -ENOMEM;
- }
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ if (!res)
+ return -EINVAL;
- host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res));
- if (!host->addr_va) {
- dev_err(&pdev->dev, "ale ioremap failed\n");
- return -ENOMEM;
- }
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ if (!res)
+ return -EINVAL;
- host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res));
- if (!host->cmd_va) {
- dev_err(&pdev->dev, "ale ioremap failed\n");
- return -ENOMEM;
- }
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
- return -ENOENT;
- }
-
- host->regs_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!host->regs_va) {
- dev_err(&pdev->dev, "regs ioremap failed\n");
- return -ENOMEM;
- }
+ host->regs_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_va))
+ return PTR_ERR(host->regs_va);
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
@@ -1236,6 +1211,7 @@ static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
#ifdef CONFIG_OF
static const struct of_device_id fsmc_nand_id_table[] = {
{ .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
{}
};
MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index bc73bc5f2713..e789e3f51710 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -90,14 +90,14 @@ static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- writesb(this->IO_ADDR_W, buf, len);
+ iowrite8_rep(this->IO_ADDR_W, buf, len);
}
static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- readsb(this->IO_ADDR_R, buf, len);
+ ioread8_rep(this->IO_ADDR_R, buf, len);
}
static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
@@ -106,7 +106,7 @@ static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- writesw(this->IO_ADDR_W, buf, len>>1);
+ iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -121,7 +121,7 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- readsw(this->IO_ADDR_R, buf, len>>1);
+ ioread16_rep(this->IO_ADDR_R, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -134,7 +134,11 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ return 1;
}
#ifdef CONFIG_OF
@@ -227,7 +231,7 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
-static int __devexit gpio_nand_remove(struct platform_device *dev)
+static int gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
struct resource *res;
@@ -252,7 +256,8 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
gpio_free(gpiomtd->plat.gpio_nce);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_free(gpiomtd->plat.gpio_nwp);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
kfree(gpiomtd);
@@ -277,7 +282,7 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
return ptr;
}
-static int __devinit gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *dev)
{
struct gpiomtd *gpiomtd;
struct nand_chip *this;
@@ -336,10 +341,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
if (ret)
goto err_cle;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
- ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
- if (ret)
- goto err_rdy;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+ ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+ if (ret)
+ goto err_rdy;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ }
this->IO_ADDR_W = this->IO_ADDR_R;
@@ -386,7 +393,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
err_rdy:
gpio_free(gpiomtd->plat.gpio_cle);
err_cle:
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 3502accd4bc3..d84699c7968e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -18,7 +18,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/mtd/gpmi-nand.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -166,6 +165,15 @@ int gpmi_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+
/* Choose NAND mode. */
writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d79696b2f19b..e9b1c47e3cf9 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
@@ -33,6 +32,12 @@
#include <linux/of_mtd.h>
#include "gpmi-nand.h"
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
+
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -222,7 +227,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
- pr_err("map failed.\n");
+ pr_err("DMA mapping failed.\n");
this->direct_dma_map_ok = false;
}
@@ -314,8 +319,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
return 0;
}
-static int __devinit
-acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
{
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
@@ -355,8 +360,7 @@ static void release_register_block(struct gpmi_nand_data *this)
res->bch_regs = NULL;
}
-static int __devinit
-acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
@@ -422,7 +426,7 @@ static void release_dma_channels(struct gpmi_nand_data *this)
}
}
-static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct resource *r_dma;
@@ -456,7 +460,7 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
if (!dma_chan) {
- pr_err("dma_request_channel failed.\n");
+ pr_err("Failed to request DMA channel.\n");
goto acquire_err;
}
@@ -487,7 +491,7 @@ static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
-static int __devinit gpmi_get_clks(struct gpmi_nand_data *this)
+static int gpmi_get_clks(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
char **extra_clks = NULL;
@@ -533,7 +537,7 @@ err_clock:
return -ENOMEM;
}
-static int __devinit acquire_resources(struct gpmi_nand_data *this)
+static int acquire_resources(struct gpmi_nand_data *this)
{
struct pinctrl *pinctrl;
int ret;
@@ -583,7 +587,7 @@ static void release_resources(struct gpmi_nand_data *this)
release_dma_channels(this);
}
-static int __devinit init_hardware(struct gpmi_nand_data *this)
+static int init_hardware(struct gpmi_nand_data *this)
{
int ret;
@@ -625,7 +629,8 @@ static int read_page_prepare(struct gpmi_nand_data *this,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -675,7 +680,8 @@ static int send_page_prepare(struct gpmi_nand_data *this,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, source_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -763,7 +769,7 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
error_alloc:
gpmi_free_dma_buffer(this);
- pr_err("allocate DMA buffer ret!!\n");
+ pr_err("Error allocating DMA buffers!\n");
return -ENOMEM;
}
@@ -1474,7 +1480,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
- pr_err("set geometry ret : %d\n", ret);
+ pr_err("Error setting BCH geometry : %d\n", ret);
return ret;
}
@@ -1535,7 +1541,7 @@ static void gpmi_nfc_exit(struct gpmi_nand_data *this)
gpmi_free_dma_buffer(this);
}
-static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nfc_init(struct gpmi_nand_data *this)
{
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
@@ -1618,7 +1624,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
-static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+static int gpmi_nand_probe(struct platform_device *pdev)
{
struct gpmi_nand_data *this;
const struct of_device_id *of_id;
@@ -1668,7 +1674,7 @@ exit_acquire_resources:
return ret;
}
-static int __devexit gpmi_nand_remove(struct platform_device *pdev)
+static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
@@ -1685,7 +1691,7 @@ static struct platform_driver gpmi_nand_driver = {
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
- .remove = __devexit_p(gpmi_nand_remove),
+ .remove = gpmi_nand_remove,
.id_table = gpmi_ids,
};
module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 7ac25c1e58f9..3d93a5e39090 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -130,7 +130,6 @@ struct gpmi_nand_data {
/* System Interface */
struct device *dev;
struct platform_device *pdev;
- struct gpmi_nand_platform_data *pdata;
/* Resources */
struct resources resources;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 100b6775e175..b76460eeaf22 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -316,13 +316,18 @@ err:
return ret;
}
-static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base)
+static inline void jz_nand_iounmap_resource(struct resource *res,
+ void __iomem *base)
{
iounmap(base);
release_mem_region(res->start, resource_size(res));
}
-static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) {
+static int jz_nand_detect_bank(struct platform_device *pdev,
+ struct jz_nand *nand, unsigned char bank,
+ size_t chipnr, uint8_t *nand_maf_id,
+ uint8_t *nand_dev_id)
+{
int ret;
int gpio;
char gpio_name[9];
@@ -400,7 +405,7 @@ notfound_gpio:
return ret;
}
-static int __devinit jz_nand_probe(struct platform_device *pdev)
+static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
struct jz_nand *nand;
@@ -541,7 +546,7 @@ err_free:
return ret;
}
-static int __devexit jz_nand_remove(struct platform_device *pdev)
+static int jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -573,7 +578,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
- .remove = __devexit_p(jz_nand_remove),
+ .remove = jz_nand_remove,
.driver = {
.name = "jz4740-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c29b7ac1f6af..0ca22ae9135c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -655,7 +655,7 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -677,11 +677,10 @@ static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
return -ENXIO;
}
- host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
- if (host->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- return -EIO;
- }
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
host->io_base_phy = rc->start;
mtd = &host->mtd;
@@ -845,7 +844,7 @@ err_exit1:
/*
* Remove NAND device
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -907,7 +906,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 32409c45d479..be94ed5abefb 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -755,7 +755,7 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -778,11 +778,9 @@ static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
}
host->io_base_dma = rc->start;
- host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
- if (host->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
- }
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
if (pdev->dev.of_node)
host->ncfg = lpc32xx_parse_dt(&pdev->dev);
@@ -949,7 +947,7 @@ err_exit1:
/*
* Remove NAND device.
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
@@ -1021,7 +1019,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index f776c8577b8c..3c9cdcbc4cba 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op)
+static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -827,7 +827,7 @@ error:
return retval;
}
-static int __devexit mpc5121_nfc_remove(struct platform_device *op)
+static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
@@ -841,14 +841,14 @@ static int __devexit mpc5121_nfc_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+static struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
- .remove = __devexit_p(mpc5121_nfc_remove),
+ .remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 022dcdc256fb..60ac5b98b718 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -266,7 +266,8 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
+static const char const *part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
@@ -1378,7 +1379,7 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
}
#endif
-static int __devinit mxcnd_probe(struct platform_device *pdev)
+static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
@@ -1436,9 +1437,9 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->regs_ip)
- return -ENOMEM;
+ host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
} else {
@@ -1448,9 +1449,9 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- host->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->base)
- return -ENOMEM;
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
host->main_area0 = host->base;
@@ -1556,12 +1557,13 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- clk_disable_unprepare(host->clk);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __devexit mxcnd_remove(struct platform_device *pdev)
+static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
@@ -1580,7 +1582,7 @@ static struct platform_driver mxcnd_driver = {
},
.id_table = mxcnd_devtype,
.probe = mxcnd_probe,
- .remove = __devexit_p(mxcnd_remove),
+ .remove = mxcnd_remove,
};
module_platform_driver(mxcnd_driver);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1a03b7f673ce..3766682a0289 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -93,8 +93,7 @@ static struct nand_ecclayout nand_oob_128 = {
.length = 78} }
};
-static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
- int new_state);
+static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
@@ -130,15 +129,12 @@ static int check_offs_len(struct mtd_info *mtd,
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
*
- * Deselect, release chip lock and wake up anyone waiting on the device.
+ * Release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* De-select the NAND device */
- chip->select_chip(mtd, -1);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -160,7 +156,7 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
}
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* @mtd: MTD device structure
*
@@ -303,7 +299,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
@@ -333,8 +329,10 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip)
+ if (getchip) {
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
+ }
return res;
}
@@ -383,7 +381,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_oob_ops ops;
loff_t wr_ofs = ofs;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.datbuf = NULL;
ops.oobbuf = buf;
@@ -492,7 +490,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- unsigned long timeo = jiffies + 2;
+ unsigned long timeo = jiffies + msecs_to_jiffies(20);
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
@@ -750,15 +748,15 @@ static void panic_nand_get_device(struct nand_chip *chip,
/**
* nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
static int
-nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+nand_get_device(struct mtd_info *mtd, int new_state)
{
+ struct nand_chip *chip = mtd->priv;
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
@@ -865,6 +863,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
@@ -899,7 +899,7 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -932,7 +932,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ofs + len == mtd->size)
len -= mtd->erasesize;
- nand_get_device(chip, mtd, FL_UNLOCKING);
+ nand_get_device(mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -950,6 +950,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -981,7 +982,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
- nand_get_device(chip, mtd, FL_LOCKING);
+ nand_get_device(mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -1004,7 +1005,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -1014,6 +1015,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -1550,6 +1552,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -1577,11 +1580,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
@@ -1804,6 +1806,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->oobretlen = ops->ooblen - readlen;
@@ -1827,7 +1830,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -1839,7 +1841,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2186,8 +2188,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
- return -EIO;
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -2199,8 +2203,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
/* Don't allow multipage oob writes with offset */
- if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
- return -EINVAL;
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
while (1) {
int bytes = mtd->writesize;
@@ -2251,6 +2257,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
return ret;
}
@@ -2302,11 +2311,10 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
@@ -2377,8 +2385,10 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
return -EROFS;
+ }
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagebuf)
@@ -2391,6 +2401,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
else
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+ chip->select_chip(mtd, -1);
+
if (status)
return status;
@@ -2408,7 +2420,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -2420,7 +2431,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2513,7 +2524,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_ERASING);
+ nand_get_device(mtd, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -2623,6 +2634,7 @@ erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
/* Do call back function */
@@ -2658,12 +2670,10 @@ erase_exit:
*/
static void nand_sync(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_SYNCING);
+ nand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
nand_release_device(mtd);
}
@@ -2749,9 +2759,7 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
*/
static int nand_suspend(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
- return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
@@ -2849,6 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
@@ -2913,7 +2926,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
*
* Check if an ID string is repeated within a given sequence of bytes at
* specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 2). This is a helper function for nand_id_len(). Returns non-zero
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
* if the repetition has a period of @period; otherwise, returns zero.
*/
static int nand_id_has_period(u8 *id_data, int arrlen, int period)
@@ -3242,11 +3255,15 @@ ident_done:
break;
}
- /*
- * Check, if buswidth is correct. Hardware drivers should set
- * chip correct!
- */
- if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
@@ -3285,10 +3302,10 @@ ident_done:
chip->cmdfunc = nand_command_lp;
pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " page size: %d, OOB size: %d\n",
+ " %dMiB, page size: %d, OOB size: %d\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name,
- mtd->writesize, mtd->oobsize);
+ (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
return type;
}
@@ -3327,6 +3344,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ chip->select_chip(mtd, -1);
+
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
@@ -3336,8 +3355,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
break;
+ }
+ chip->select_chip(mtd, -1);
}
if (i > 1)
pr_info("%d NAND chips detected\n", i);
@@ -3596,9 +3618,6 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Initialize state */
chip->state = FL_READY;
- /* De-select the device */
- chip->select_chip(mtd, -1);
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a932c485eb04..818b65c85d12 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -42,6 +42,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -105,7 +107,6 @@ static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
-static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
@@ -130,7 +131,6 @@ module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
-module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
@@ -162,7 +162,6 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
-MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
@@ -286,6 +285,11 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
+struct nandsim_debug_info {
+ struct dentry *dfs_root;
+ struct dentry *dfs_wear_report;
+};
+
/*
* A union to represent flash memory contents and flash buffer.
*/
@@ -365,6 +369,8 @@ struct nandsim {
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
+
+ struct nandsim_debug_info dbg;
};
/*
@@ -442,11 +448,123 @@ static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
-static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+ .open = nandsim_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+ struct nandsim_debug_info *dbg = &dev->dbg;
+ struct dentry *dent;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dent = debugfs_create_dir("nandsim", NULL);
+ if (IS_ERR_OR_NULL(dent)) {
+ int err = dent ? -ENODEV : PTR_ERR(dent);
+
+ NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+ err);
+ return err;
+ }
+ dbg->dfs_root = dent;
+
+ dent = debugfs_create_file("wear_report", S_IRUSR,
+ dbg->dfs_root, dev, &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dbg->dfs_wear_report = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dbg->dfs_root);
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ns->dbg.dfs_root);
+}
+
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
@@ -911,8 +1029,6 @@ static int setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
- if (!rptwear)
- return 0;
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
@@ -929,64 +1045,18 @@ static int setup_wear_reporting(struct mtd_info *mtd)
static void update_wear(unsigned int erase_block_no)
{
- unsigned long wmin = -1, wmax = 0, avg;
- unsigned long deciles[10], decile_max[10], tot = 0;
- unsigned int i;
-
if (!erase_block_wear)
return;
total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
- rptwear_cnt += 1;
- if (rptwear_cnt < rptwear)
- return;
- rptwear_cnt = 0;
- /* Calc wear stats */
- for (i = 0; i < wear_eb_count; ++i) {
- unsigned long wear = erase_block_wear[i];
- if (wear < wmin)
- wmin = wear;
- if (wear > wmax)
- wmax = wear;
- tot += wear;
- }
- for (i = 0; i < 9; ++i) {
- deciles[i] = 0;
- decile_max[i] = (wmax * (i + 1) + 5) / 10;
- }
- deciles[9] = 0;
- decile_max[9] = wmax;
- for (i = 0; i < wear_eb_count; ++i) {
- int d;
- unsigned long wear = erase_block_wear[i];
- for (d = 0; d < 10; ++d)
- if (wear <= decile_max[d]) {
- deciles[d] += 1;
- break;
- }
- }
- avg = tot / wear_eb_count;
- /* Output wear report */
- NS_INFO("*** Wear Report ***\n");
- NS_INFO("Total numbers of erases: %lu\n", tot);
- NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
- NS_INFO("Average number of erases: %lu\n", avg);
- NS_INFO("Maximum number of erases: %lu\n", wmax);
- NS_INFO("Minimum number of erases: %lu\n", wmin);
- for (i = 0; i < 10; ++i) {
- unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
- if (from > decile_max[i])
- continue;
- NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
- from,
- decile_max[i],
- deciles[i]);
- }
- NS_INFO("*** End of Wear Report ***\n");
}
/*
@@ -1397,10 +1467,7 @@ int do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
+ prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
@@ -2330,6 +2397,9 @@ static int __init ns_init_module(void)
if ((retval = setup_wear_reporting(nsmtd)) != 0)
goto err_exit;
+ if ((retval = nandsim_debugfs_create(nand)) != 0)
+ goto err_exit;
+
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
@@ -2369,6 +2439,7 @@ static void __exit ns_cleanup_module(void)
struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
+ nandsim_debugfs_remove(ns);
free_nandsim(ns); /* Free nandsim private resources */
nand_release(nsmtd); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 5fd3f010e3ae..8e148f1478fd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -197,7 +197,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev)
+static int ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc;
const __be32 *reg;
@@ -256,7 +256,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
return 0;
}
-static int __devexit ndfc_remove(struct platform_device *ofdev)
+static int ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
@@ -279,7 +279,7 @@ static struct platform_driver ndfc_driver = {
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
- .remove = __devexit_p(ndfc_remove),
+ .remove = ndfc_remove,
};
module_platform_driver(ndfc_driver);
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
deleted file mode 100644
index 9ee0c4edfacf..000000000000
--- a/drivers/mtd/nand/nomadik_nand.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * drivers/mtd/nand/nomadik_nand.c
- *
- * Overview:
- * Driver for on-board NAND flash on Nomadik Platforms
- *
- * Copyright © 2007 STMicroelectronics Pvt. Ltd.
- * Author: Sachin Verma <sachin.verma@st.com>
- *
- * Copyright © 2009 Alessandro Rubini
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/platform_data/mtd-nomadik-nand.h>
-#include <mach/fsmc.h>
-
-#include <mtd/mtd-abi.h>
-
-struct nomadik_nand_host {
- struct mtd_info mtd;
- struct nand_chip nand;
- void __iomem *data_va;
- void __iomem *cmd_va;
- void __iomem *addr_va;
- struct nand_bbt_descr *bbt_desc;
-};
-
-static struct nand_ecclayout nomadik_ecc_layout = {
- .eccbytes = 3 * 4,
- .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
- 0x02, 0x03, 0x04,
- 0x12, 0x13, 0x14,
- 0x22, 0x23, 0x24,
- 0x32, 0x33, 0x34},
- /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
- .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
-};
-
-static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
-{
- /* No need to enable hw ecc, it's on by default */
-}
-
-static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *nand = mtd->priv;
- struct nomadik_nand_host *host = nand->priv;
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, host->cmd_va);
- else
- writeb(cmd, host->addr_va);
-}
-
-static int nomadik_nand_probe(struct platform_device *pdev)
-{
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nomadik_nand_host *host;
- struct mtd_info *mtd;
- struct nand_chip *nand;
- struct resource *res;
- int ret = 0;
-
- /* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
- return -ENOMEM;
- }
-
- /* Call the client's init function, if any */
- if (pdata->init)
- ret = pdata->init();
- if (ret < 0) {
- dev_err(&pdev->dev, "Init function failed\n");
- goto err;
- }
-
- /* ioremap three regions */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->addr_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->data_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->cmd_va = ioremap(res->start, resource_size(res));
-
- if (!host->addr_va || !host->data_va || !host->cmd_va) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- /* Link all private pointers */
- mtd = &host->mtd;
- nand = &host->nand;
- mtd->priv = nand;
- nand->priv = host;
-
- host->mtd.owner = THIS_MODULE;
- nand->IO_ADDR_R = host->data_va;
- nand->IO_ADDR_W = host->data_va;
- nand->cmd_ctrl = nomadik_cmd_ctrl;
-
- /*
- * This stanza declares ECC_HW but uses soft routines. It's because
- * HW claims to make the calculation but not the correction. However,
- * I haven't managed to get the desired data out of it until now.
- */
- nand->ecc.mode = NAND_ECC_SOFT;
- nand->ecc.layout = &nomadik_ecc_layout;
- nand->ecc.hwctl = nomadik_ecc_control;
- nand->ecc.size = 512;
- nand->ecc.bytes = 3;
-
- nand->options = pdata->options;
-
- /*
- * Scan to find existence of the device
- */
- if (nand_scan(&host->mtd, 1)) {
- ret = -ENXIO;
- goto err_unmap;
- }
-
- mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
-
- platform_set_drvdata(pdev, host);
- return 0;
-
- err_unmap:
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->data_va)
- iounmap(host->data_va);
- if (host->addr_va)
- iounmap(host->addr_va);
- err:
- kfree(host);
- return ret;
-}
-
-/*
- * Clean up routine
- */
-static int nomadik_nand_remove(struct platform_device *pdev)
-{
- struct nomadik_nand_host *host = platform_get_drvdata(pdev);
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
-
- if (pdata->exit)
- pdata->exit();
-
- if (host) {
- nand_release(&host->mtd);
- iounmap(host->cmd_va);
- iounmap(host->data_va);
- iounmap(host->addr_va);
- kfree(host);
- }
- return 0;
-}
-
-static int nomadik_nand_suspend(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- int ret = 0;
- if (host)
- ret = mtd_suspend(&host->mtd);
- return ret;
-}
-
-static int nomadik_nand_resume(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- if (host)
- mtd_resume(&host->mtd);
- return 0;
-}
-
-static const struct dev_pm_ops nomadik_nand_pm_ops = {
- .suspend = nomadik_nand_suspend,
- .resume = nomadik_nand_resume,
-};
-
-static struct platform_driver nomadik_nand_driver = {
- .probe = nomadik_nand_probe,
- .remove = nomadik_nand_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "nomadik_nand",
- .pm = &nomadik_nand_pm_ops,
- },
-};
-
-module_platform_driver(nomadik_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
-MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 94dc46bc118c..a6191198d259 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -246,7 +246,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit nuc900_nand_probe(struct platform_device *pdev)
+static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
@@ -317,7 +317,7 @@ fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit nuc900_nand_remove(struct platform_device *pdev)
+static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
@@ -340,7 +340,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
static struct platform_driver nuc900_nand_driver = {
.probe = nuc900_nand_probe,
- .remove = __devexit_p(nuc900_nand_remove),
+ .remove = nuc900_nand_remove,
.driver = {
.name = "nuc900-fmi",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1f34ba104ef4..1d333497cfcb 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1323,7 +1323,7 @@ static void omap3_free_bch(struct mtd_info *mtd)
}
#endif /* CONFIG_MTD_NAND_OMAP_BCH */
-static int __devinit omap_nand_probe(struct platform_device *pdev)
+static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
@@ -1332,6 +1332,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
dma_cap_mask_t mask;
unsigned sig;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1557,7 +1558,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ ppdata.of_node = pdata->of_node;
+ mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
pdata->nr_parts);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index aefaf8cd31ef..cd72b9299f6b 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -194,7 +194,7 @@ no_res:
return ret;
}
-static int __devexit orion_nand_remove(struct platform_device *pdev)
+static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
@@ -223,7 +223,7 @@ static struct of_device_id orion_nand_of_match_table[] = {
#endif
static struct platform_driver orion_nand_driver = {
- .remove = __devexit_p(orion_nand_remove),
+ .remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 1440e51cedcc..5a67082c07ee 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
+static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -184,7 +184,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
+static int pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a47ee68a0cfa..c004566a9ad2 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -28,7 +28,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit plat_nand_probe(struct platform_device *pdev)
+static int plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct mtd_part_parser_data ppdata;
@@ -134,7 +134,7 @@ out_free:
/*
* Remove a NAND device.
*/
-static int __devexit plat_nand_remove(struct platform_device *pdev)
+static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = pdev->dev.platform_data;
@@ -160,7 +160,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match);
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
+ .remove = plat_nand_remove,
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 79ded48e7427..d65afd23e171 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -730,11 +730,14 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
- if (set)
+ if (set) {
mtd->mtd.name = set->name;
- return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
set->partitions, set->nr_partitions);
+ }
+
+ return -ENODEV;
}
/**
@@ -949,10 +952,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info->platform = plat;
info->cpu_type = cpu_type;
- info->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (info->regs == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -EIO;
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
goto exit_error;
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index f48ac5d80bbf..57b3971c9c0a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,11 +23,18 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -106,6 +113,84 @@ static void wait_completion(struct sh_flctl *flctl)
writeb(0x0, FLTRCR(flctl));
}
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = pdata->slave_id_fifo0_tx;
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.slave_id = pdata->slave_id_fifo0_rx;
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -225,7 +310,7 @@ static enum flctl_ecc_res_t wait_recfifo_ready
for (i = 0; i < 3; i++) {
uint8_t org;
- int index;
+ unsigned int index;
data = readl(ecc_reg[i]);
@@ -261,6 +346,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie = -EINVAL;
+ uint32_t reg;
+ int ret;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (dma_addr)
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret > 0 is success */
+ return ret;
+}
+
static void read_datareg(struct sh_flctl *flctl, int offset)
{
unsigned long data;
@@ -279,11 +428,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
len_4align = (rlen + 3) / 4;
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
buf[i] = readl(FLDTFIFO(flctl));
- buf[i] = be32_to_cpu(buf[i]);
}
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
}
static enum flctl_ecc_res_t read_ecfiforeg
@@ -305,28 +463,39 @@ static enum flctl_ecc_res_t read_ecfiforeg
return res;
}
-static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_wfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), fifo_addr);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
}
}
-static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ return; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_wecfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
+ writel(buf[i], FLECFIFO(flctl));
}
}
@@ -750,41 +919,35 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(&flctl->done_buff[index], buf, len);
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
static uint8_t flctl_read_byte(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
uint8_t data;
- data = flctl->done_buff[index];
+ data = flctl->done_buff[flctl->index];
flctl->index++;
return data;
}
static uint16_t flctl_read_word(struct mtd_info *mtd)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- uint16_t data;
- uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
- data = *buf;
- flctl->index += 2;
- return data;
+ flctl->index += 2;
+ return *buf;
}
static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(buf, &flctl->done_buff[index], len);
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
flctl->index += len;
}
@@ -858,7 +1021,74 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit flctl_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+ struct device_node *dn = dev->of_node;
+ int ret;
+
+ match = of_match_device(of_flctl_match, dev);
+ if (match)
+ config = (struct flctl_soc_config *)match->data;
+ else {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "%s: failed to allocate config data\n", __func__);
+ return NULL;
+ }
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ /* parse user defined options */
+ ret = of_get_nand_bus_width(dn);
+ if (ret == 16)
+ pdata->flcmncr_val |= SEL_16BIT;
+ else if (ret != 8) {
+ dev_err(dev, "%s: invalid bus width\n", __func__);
+ return NULL;
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+#define of_flctl_match NULL
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static int flctl_probe(struct platform_device *pdev)
{
struct resource *res;
struct sh_flctl *flctl;
@@ -867,12 +1097,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
struct sh_flctl_platform_data *pdata;
int ret = -ENXIO;
int irq;
-
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
+ struct mtd_part_parser_data ppdata = {};
flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
if (!flctl) {
@@ -904,6 +1129,17 @@ static int __devinit flctl_probe(struct platform_device *pdev)
goto err_flste;
}
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+
platform_set_drvdata(pdev, flctl);
flctl_mtd = &flctl->mtd;
nand = &flctl->chip;
@@ -932,6 +1168,8 @@ static int __devinit flctl_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ flctl_setup_dma(flctl);
+
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err_chip;
@@ -944,12 +1182,16 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err_chip;
- mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
return 0;
err_chip:
+ flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
+err_pdata:
free_irq(irq, flctl);
err_flste:
iounmap(flctl->reg);
@@ -958,10 +1200,11 @@ err_iomap:
return ret;
}
-static int __devexit flctl_remove(struct platform_device *pdev)
+static int flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ flctl_release_dma(flctl);
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
free_irq(platform_get_irq(pdev, 0), flctl);
@@ -976,6 +1219,7 @@ static struct platform_driver flctl_driver = {
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
+ .of_match_table = of_flctl_match,
},
};
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 3421e3762a5a..127bc4271821 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -106,7 +106,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
/*
* Main initialization routine
*/
-static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
+static int sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *r;
@@ -205,7 +205,7 @@ err_get_res:
/*
* Clean up routine
*/
-static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
+static int sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
@@ -228,7 +228,7 @@ static struct platform_driver sharpsl_nand_driver = {
.owner = THIS_MODULE,
},
.probe = sharpsl_nand_probe,
- .remove = __devexit_p(sharpsl_nand_remove),
+ .remove = sharpsl_nand_remove,
};
module_platform_driver(sharpsl_nand_driver);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index f3f28fafbf7a..09dde7d27178 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -140,7 +140,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev)
+static int socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
@@ -220,7 +220,7 @@ out:
/*
* Remove a NAND device.
*/
-static int __devexit socrates_nand_remove(struct platform_device *ofdev)
+static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = &host->mtd;
@@ -251,7 +251,7 @@ static struct platform_driver socrates_nand_driver = {
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
- .remove = __devexit_p(socrates_nand_remove),
+ .remove = socrates_nand_remove,
};
module_platform_driver(socrates_nand_driver);
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index e3d7266e256f..e1e8748aa47b 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -9,6 +9,7 @@
* (C) Copyright TOSHIBA CORPORATION 2004-2007
* All Rights Reserved.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -286,9 +287,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->base = devm_request_and_ioremap(&dev->dev, res);
- if (!drvdata->base)
- return -EBUSY;
+ drvdata->base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
hold = plat->hold ?: 20; /* tDH */
spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d9127e2ed808..dbd3aa574eaf 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -71,7 +71,10 @@ static int parse_ofpart_partitions(struct mtd_info *master,
(*pparts)[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
- (*pparts)[i].mask_flags = MTD_WRITEABLE;
+ (*pparts)[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
i++;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 1c4f97c63e62..9f11562f849d 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -35,7 +35,7 @@ struct onenand_info {
struct onenand_chip onenand;
};
-static int __devinit generic_onenand_probe(struct platform_device *pdev)
+static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
struct onenand_platform_data *pdata = pdev->dev.platform_data;
@@ -88,7 +88,7 @@ out_free_info:
return err;
}
-static int __devexit generic_onenand_remove(struct platform_device *pdev)
+static int generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
@@ -112,7 +112,7 @@ static struct platform_driver generic_onenand_driver = {
.owner = THIS_MODULE,
},
.probe = generic_onenand_probe,
- .remove = __devexit_p(generic_onenand_remove),
+ .remove = generic_onenand_remove,
};
module_platform_driver(generic_onenand_driver);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 00cd3da29435..eec2aedb4ab8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -630,13 +630,14 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
return ret;
}
-static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+static int omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
struct onenand_chip *this;
int r;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -767,7 +768,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = mtd_device_parse_register(&c->mtd, NULL, NULL,
+ ppdata.of_node = pdata->of_node;
+ r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (r)
@@ -799,7 +801,7 @@ err_kfree:
return r;
}
-static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+static int omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -822,7 +824,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = __devexit_p(omap2_onenand_remove),
+ .remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 8e4b3f2742ba..33f2a8fb8df9 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1053,7 +1053,7 @@ onenand_fail:
return err;
}
-static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+static int s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
@@ -1130,7 +1130,7 @@ static struct platform_driver s3c_onenand_driver = {
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
- .remove = __devexit_p(s3c_onenand_remove),
+ .remove = s3c_onenand_remove,
};
module_platform_driver(s3c_onenand_driver);
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
index cc8d62cb280c..207bf9a9972f 100644
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -39,6 +39,9 @@
* this program; see the file COPYING. If not, write to the Free Software
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -47,8 +50,6 @@
#include <linux/mtd/nand.h>
#include <linux/slab.h>
-#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
-
static int dev;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -103,7 +104,7 @@ static int erase_block(void)
struct erase_info ei;
loff_t addr = eraseblock * mtd->erasesize;
- msg("erase_block\n");
+ pr_info("erase_block\n");
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = mtd;
@@ -112,7 +113,7 @@ static int erase_block(void)
err = mtd_erase(mtd, &ei);
if (err || ei.state == MTD_ERASE_FAILED) {
- msg("error %d while erasing\n", err);
+ pr_err("error %d while erasing\n", err);
if (!err)
err = -EIO;
return err;
@@ -128,11 +129,11 @@ static int write_page(int log)
size_t written;
if (log)
- msg("write_page\n");
+ pr_info("write_page\n");
err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
if (err || written != mtd->writesize) {
- msg("error: write failed at %#llx\n", (long long)offset);
+ pr_err("error: write failed at %#llx\n", (long long)offset);
if (!err)
err = -EIO;
}
@@ -147,7 +148,7 @@ static int rewrite_page(int log)
struct mtd_oob_ops ops;
if (log)
- msg("rewrite page\n");
+ pr_info("rewrite page\n");
ops.mode = MTD_OPS_RAW; /* No ECC */
ops.len = mtd->writesize;
@@ -160,7 +161,7 @@ static int rewrite_page(int log)
err = mtd_write_oob(mtd, offset, &ops);
if (err || ops.retlen != mtd->writesize) {
- msg("error: write_oob failed (%d)\n", err);
+ pr_err("error: write_oob failed (%d)\n", err);
if (!err)
err = -EIO;
}
@@ -177,7 +178,7 @@ static int read_page(int log)
struct mtd_ecc_stats oldstats;
if (log)
- msg("read_page\n");
+ pr_info("read_page\n");
/* Saving last mtd stats */
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
@@ -187,7 +188,7 @@ static int read_page(int log)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
- msg("error: read failed at %#llx\n", (long long)offset);
+ pr_err("error: read failed at %#llx\n", (long long)offset);
if (err >= 0)
err = -EIO;
}
@@ -201,11 +202,11 @@ static int verify_page(int log)
unsigned i, errs = 0;
if (log)
- msg("verify_page\n");
+ pr_info("verify_page\n");
for (i = 0; i < mtd->writesize; i++) {
if (rbuffer[i] != hash(i+seed)) {
- msg("Error: page offset %u, expected %02x, got %02x\n",
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
i, hash(i+seed), rbuffer[i]);
errs++;
}
@@ -230,13 +231,13 @@ static int insert_biterror(unsigned byte)
for (bit = 7; bit >= 0; bit--) {
if (CBIT(wbuffer[byte], bit)) {
BCLR(wbuffer[byte], bit);
- msg("Inserted biterror @ %u/%u\n", byte, bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
return 0;
}
}
byte++;
}
- msg("biterror: Failed to find a '1' bit\n");
+ pr_err("biterror: Failed to find a '1' bit\n");
return -EIO;
}
@@ -248,7 +249,7 @@ static int incremental_errors_test(void)
unsigned i;
unsigned errs_per_subpage = 0;
- msg("incremental biterrors test\n");
+ pr_info("incremental biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -265,9 +266,9 @@ static int incremental_errors_test(void)
err = read_page(1);
if (err > 0)
- msg("Read reported %d corrected bit errors\n", err);
+ pr_info("Read reported %d corrected bit errors\n", err);
if (err < 0) {
- msg("After %d biterrors per subpage, read reported error %d\n",
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
errs_per_subpage, err);
err = 0;
goto exit;
@@ -275,11 +276,11 @@ static int incremental_errors_test(void)
err = verify_page(1);
if (err) {
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_err("ECC failure, read data is incorrect despite read success\n");
goto exit;
}
- msg("Successfully corrected %d bit errors per subpage\n",
+ pr_info("Successfully corrected %d bit errors per subpage\n",
errs_per_subpage);
for (i = 0; i < subcount; i++) {
@@ -311,7 +312,7 @@ static int overwrite_test(void)
memset(bitstats, 0, sizeof(bitstats));
- msg("overwrite biterrors test\n");
+ pr_info("overwrite biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -329,18 +330,18 @@ static int overwrite_test(void)
err = read_page(0);
if (err >= 0) {
if (err >= MAXBITS) {
- msg("Implausible number of bit errors corrected\n");
+ pr_info("Implausible number of bit errors corrected\n");
err = -EIO;
break;
}
bitstats[err]++;
if (err > max_corrected) {
max_corrected = err;
- msg("Read reported %d corrected bit errors\n",
+ pr_info("Read reported %d corrected bit errors\n",
err);
}
} else { /* err < 0 */
- msg("Read reported error %d\n", err);
+ pr_info("Read reported error %d\n", err);
err = 0;
break;
}
@@ -348,7 +349,7 @@ static int overwrite_test(void)
err = verify_page(0);
if (err) {
bitstats[max_corrected] = opno;
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_info("ECC failure, read data is incorrect despite read success\n");
break;
}
@@ -357,9 +358,9 @@ static int overwrite_test(void)
/* At this point bitstats[0] contains the number of ops with no bit
* errors, bitstats[1] the number of ops with 1 bit error, etc. */
- msg("Bit error histogram (%d operations total):\n", opno);
+ pr_info("Bit error histogram (%d operations total):\n", opno);
for (i = 0; i < max_corrected; i++)
- msg("Page reads with %3d corrected bit errors: %d\n",
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
i, bitstats[i]);
exit:
@@ -370,36 +371,36 @@ static int __init mtd_nandbiterrs_init(void)
{
int err = 0;
- msg("\n");
- msg("==================================================\n");
- msg("MTD device: %d\n", dev);
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- msg("error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
goto exit_mtddev;
}
if (mtd->type != MTD_NANDFLASH) {
- msg("this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
}
- msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, mtd->oobsize);
subsize = mtd->writesize >> mtd->subpage_sft;
subcount = mtd->writesize / subsize;
- msg("Device uses %d subpages of %d bytes\n", subcount, subsize);
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
offset = page_offset * mtd->writesize;
eraseblock = mtd_div_by_eb(offset, mtd);
- msg("Using page=%u, offset=%llu, eraseblock=%u\n",
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
page_offset, offset, eraseblock);
wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
@@ -432,8 +433,8 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_error;
err = -EIO;
- msg("finished successfully.\n");
- msg("==================================================\n");
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
exit_error:
kfree(rbuffer);
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index b437fa425077..1eee264509a8 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -264,13 +266,13 @@ static int nand_ecc_test_run(const size_t size)
correct_data, size);
if (err) {
- pr_err("mtd_nandecctest: not ok - %s-%zd\n",
+ pr_err("not ok - %s-%zd\n",
nand_ecc_test[i].name, size);
dump_data_ecc(error_data, error_ecc,
correct_data, correct_ecc, size);
break;
}
- pr_info("mtd_nandecctest: ok - %s-%zd\n",
+ pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
}
error:
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index ed9b62827f1b..e827fa8cd844 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_oobtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -80,13 +80,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
+ pr_err("some erase error occurred at EB %d\n", ebnum);
return -EIO;
}
@@ -98,7 +97,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -107,7 +106,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -141,9 +140,9 @@ static int write_eraseblock(int ebnum)
ops.oobbuf = writebuf;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: writeoob failed at %#llx\n",
+ pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
+ pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
@@ -160,7 +159,7 @@ static int write_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -168,10 +167,10 @@ static int write_whole_device(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
return 0;
}
@@ -194,17 +193,17 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -221,29 +220,28 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
- printk(PRINT_PREF "error: readoob failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf + use_offset, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many "
- "errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -251,12 +249,12 @@ static int verify_eraseblock(int ebnum)
for (k = use_offset + use_len;
k < mtd->ecclayout->oobavail; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -286,17 +284,17 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -309,7 +307,7 @@ static int verify_all_eraseblocks(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -317,10 +315,10 @@ static int verify_all_eraseblocks(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -331,7 +329,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -341,18 +339,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kmalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -368,22 +366,22 @@ static int __init mtd_oobtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -392,7 +390,7 @@ static int __init mtd_oobtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -401,12 +399,12 @@ static int __init mtd_oobtest_init(void)
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -420,7 +418,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
- printk(PRINT_PREF "test 1 of 5\n");
+ pr_info("test 1 of 5\n");
err = erase_whole_device();
if (err)
@@ -440,7 +438,7 @@ static int __init mtd_oobtest_init(void)
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
- printk(PRINT_PREF "test 2 of 5\n");
+ pr_info("test 2 of 5\n");
err = erase_whole_device();
if (err)
@@ -453,7 +451,7 @@ static int __init mtd_oobtest_init(void)
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -461,16 +459,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
- printk(PRINT_PREF "test 3 of 5\n");
+ pr_info("test 3 of 5\n");
err = erase_whole_device();
if (err)
@@ -503,7 +501,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* Fourth test: try to write off end of device */
- printk(PRINT_PREF "test 4 of 5\n");
+ pr_info("test 4 of 5\n");
err = erase_whole_device();
if (err)
@@ -522,14 +520,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to start write past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can write past end of OOB\n");
+ pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
@@ -542,19 +540,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to start read past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can read past end of OOB\n");
+ pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
- printk(PRINT_PREF "skipping end of device tests because last "
+ pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
@@ -566,14 +564,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -586,14 +584,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
@@ -610,14 +608,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -630,20 +628,20 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
- printk(PRINT_PREF "test 5 of 5\n");
+ pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
err = erase_whole_device();
@@ -652,7 +650,7 @@ static int __init mtd_oobtest_init(void)
/* Write all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
@@ -674,17 +672,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock "
- "%u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
addr += mtd->writesize;
}
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
@@ -702,28 +699,28 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 252ddb092fb2..f93a76f88113 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_pagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -79,12 +79,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -102,7 +102,7 @@ static int write_eraseblock(int ebnum)
cond_resched();
err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
return err;
@@ -131,7 +131,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -139,7 +139,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -148,12 +148,12 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
break;
}
if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -166,7 +166,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -174,7 +174,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -183,14 +183,14 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err;
}
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
set_random_data(boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -206,10 +206,10 @@ static int crosstest(void)
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
- printk(PRINT_PREF "crosstest\n");
+ pr_info("crosstest\n");
pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
if (!pp1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
pp2 = pp1 + pgsize;
@@ -231,7 +231,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -243,7 +243,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -251,12 +251,12 @@ static int crosstest(void)
/* Read first page to pp2 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -264,12 +264,12 @@ static int crosstest(void)
/* Read last page to pp3 */
addr = addrn - pgsize;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -277,25 +277,25 @@ static int crosstest(void)
/* Read first page again to pp4 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
}
/* pp2 and pp4 should be the same */
- printk(PRINT_PREF "verifying pages read at %#llx match\n",
+ pr_info("verifying pages read at %#llx match\n",
(long long)addr0);
if (memcmp(pp2, pp4, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
} else if (!err)
- printk(PRINT_PREF "crosstest ok\n");
+ pr_info("crosstest ok\n");
kfree(pp1);
return err;
}
@@ -307,7 +307,7 @@ static int erasecrosstest(void)
loff_t addr0;
char *readbuf = twopages;
- printk(PRINT_PREF "erasecrosstest\n");
+ pr_info("erasecrosstest\n");
ebnum = 0;
addr0 = 0;
@@ -320,79 +320,79 @@ static int erasecrosstest(void)
while (ebnum2 && bbt[ebnum2])
ebnum2 -= 1;
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_info("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum2);
+ pr_info("erasing block %d\n", ebnum2);
err = erase_eraseblock(ebnum2);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
if (!err)
- printk(PRINT_PREF "erasecrosstest ok\n");
+ pr_info("erasecrosstest ok\n");
return err;
}
@@ -402,7 +402,7 @@ static int erasetest(void)
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
- printk(PRINT_PREF "erasetest\n");
+ pr_info("erasetest\n");
ebnum = 0;
addr0 = 0;
@@ -411,40 +411,40 @@ static int erasetest(void)
ebnum += 1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
+ pr_info("verifying 1st page of block %d is all 0xff\n",
ebnum);
for (i = 0; i < pgsize; ++i)
if (twopages[i] != 0xff) {
- printk(PRINT_PREF "verifying all 0xff failed at %d\n",
+ pr_err("verifying all 0xff failed at %d\n",
i);
errcnt += 1;
ok = 0;
@@ -452,7 +452,7 @@ static int erasetest(void)
}
if (ok && !err)
- printk(PRINT_PREF "erasetest ok\n");
+ pr_info("erasetest ok\n");
return err;
}
@@ -464,7 +464,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -474,18 +474,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -499,22 +499,22 @@ static int __init mtd_pagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -524,7 +524,7 @@ static int __init mtd_pagetest_init(void)
pgcnt = mtd->erasesize / mtd->writesize;
pgsize = mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -534,17 +534,17 @@ static int __init mtd_pagetest_init(void)
bufsize = pgsize * 2;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
twopages = kmalloc(bufsize, GFP_KERNEL);
if (!twopages) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
boundary = kmalloc(bufsize, GFP_KERNEL);
if (!boundary) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -553,7 +553,7 @@ static int __init mtd_pagetest_init(void)
goto out;
/* Erase all eraseblocks */
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -562,11 +562,11 @@ static int __init mtd_pagetest_init(void)
goto out;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
/* Write all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -574,14 +574,14 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -589,10 +589,10 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = crosstest();
if (err)
@@ -606,7 +606,7 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -615,7 +615,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 121aba189cec..266de04b6d29 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_readtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,12 +51,12 @@ static int read_eraseblock_by_page(int ebnum)
void *oobbuf = iobuf1;
for (i = 0; i < pgcnt; i++) {
- memset(buf, 0 , pgcnt);
+ memset(buf, 0 , pgsize);
ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
if (!err)
err = ret;
@@ -77,7 +77,7 @@ static int read_eraseblock_by_page(int ebnum)
ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
- printk(PRINT_PREF "error: read oob failed at "
+ pr_err("error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
err = ret;
@@ -99,7 +99,7 @@ static void dump_eraseblock(int ebnum)
char line[128];
int pg, oob;
- printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
+ pr_info("dumping eraseblock %d\n", ebnum);
n = mtd->erasesize;
for (i = 0; i < n;) {
char *p = line;
@@ -112,7 +112,7 @@ static void dump_eraseblock(int ebnum)
}
if (!mtd->oobsize)
return;
- printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
n = mtd->oobsize;
for (pg = 0, i = 0; pg < pgcnt; pg++)
for (oob = 0; oob < n;) {
@@ -134,7 +134,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -144,21 +144,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -171,21 +171,21 @@ static int __init mtd_readtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: Cannot get MTD device\n");
+ pr_err("error: Cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -196,7 +196,7 @@ static int __init mtd_readtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -205,12 +205,12 @@ static int __init mtd_readtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -219,7 +219,7 @@ static int __init mtd_readtest_init(void)
goto out;
/* Read all eraseblocks 1 page at a time */
- printk(PRINT_PREF "testing page read\n");
+ pr_info("testing page read\n");
for (i = 0; i < ebcnt; ++i) {
int ret;
@@ -235,9 +235,9 @@ static int __init mtd_readtest_init(void)
}
if (err)
- printk(PRINT_PREF "finished with errors\n");
+ pr_info("finished with errors\n");
else
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
@@ -246,7 +246,7 @@ out:
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 42b0f7456fc4..596cbea8df4c 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -28,8 +30,6 @@
#include <linux/sched.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_speedtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -70,12 +70,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -96,13 +96,13 @@ static int multiblock_erase(int ebnum, int blocks)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
+ pr_err("error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d,"
+ pr_err("some erase error occurred at EB %d,"
"blocks %d\n", ebnum, blocks);
return -EIO;
}
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
- printk(PRINT_PREF "error: write failed at %#llx\n", addr);
+ pr_err("error: write failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -152,7 +152,7 @@ static int write_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -175,7 +175,7 @@ static int write_eraseblock_by_2pages(int ebnum)
for (i = 0; i < n; i++) {
err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -187,7 +187,7 @@ static int write_eraseblock_by_2pages(int ebnum)
if (pgcnt % 2) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -208,7 +208,7 @@ static int read_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != mtd->erasesize) {
- printk(PRINT_PREF "error: read failed at %#llx\n", addr);
+ pr_err("error: read failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -229,7 +229,7 @@ static int read_eraseblock_by_page(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -255,7 +255,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != sz) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -270,7 +270,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -287,7 +287,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -321,21 +321,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
goto out;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
out:
goodebcnt = ebcnt - bad;
return 0;
@@ -351,25 +351,25 @@ static int __init mtd_speedtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
if (count)
- printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
+ pr_info("MTD device: %d count: %d\n", dev, count);
else
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -380,7 +380,7 @@ static int __init mtd_speedtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -392,7 +392,7 @@ static int __init mtd_speedtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -407,7 +407,7 @@ static int __init mtd_speedtest_init(void)
goto out;
/* Write all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock write speed\n");
+ pr_info("testing eraseblock write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -419,10 +419,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock read speed\n");
+ pr_info("testing eraseblock read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -434,14 +434,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page write speed\n");
+ pr_info("testing page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -453,10 +453,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
+ pr_info("page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page read speed\n");
+ pr_info("testing page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -468,14 +468,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
+ pr_info("page read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page write speed\n");
+ pr_info("testing 2 page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -487,10 +487,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page read speed\n");
+ pr_info("testing 2 page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -502,10 +502,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
/* Erase all eraseblocks */
- printk(PRINT_PREF "Testing erase speed\n");
+ pr_info("Testing erase speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -517,12 +517,12 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+ pr_info("erase speed is %ld KiB/s\n", speed);
/* Multi-block erase all eraseblocks */
for (k = 1; k < 7; k++) {
blocks = 1 << k;
- printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
+ pr_info("Testing %dx multi-block erase speed\n",
blocks);
start_timing();
for (i = 0; i < ebcnt; ) {
@@ -541,16 +541,16 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
blocks, speed);
}
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
kfree(iobuf);
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index cb268cebf01a..3729f679ae5d 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -29,8 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_stresstest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -94,12 +94,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (unlikely(ei.state == MTD_ERASE_FAILED)) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -114,7 +114,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -137,7 +137,7 @@ static int do_read(void)
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
- printk(PRINT_PREF "error: read failed at 0x%llx\n",
+ pr_err("error: read failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -174,7 +174,7 @@ static int do_write(void)
addr = eb * mtd->erasesize + offs;
err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
- printk(PRINT_PREF "error: write failed at 0x%llx\n",
+ pr_err("error: write failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -203,21 +203,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -231,22 +231,22 @@ static int __init mtd_stresstest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -257,14 +257,14 @@ static int __init mtd_stresstest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (ebcnt < 2) {
- printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ pr_err("error: need at least 2 eraseblocks\n");
err = -ENOSPC;
goto out_put_mtd;
}
@@ -277,7 +277,7 @@ static int __init mtd_stresstest_init(void)
writebuf = vmalloc(bufsize);
offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
for (i = 0; i < ebcnt; i++)
@@ -290,16 +290,16 @@ static int __init mtd_stresstest_init(void)
goto out;
/* Do operations */
- printk(PRINT_PREF "doing operations\n");
+ pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
- printk(PRINT_PREF "%d operations done\n", op);
+ pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
cond_resched();
}
- printk(PRINT_PREF "finished, %d operations done\n", op);
+ pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
@@ -309,7 +309,7 @@ out:
out_put_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 9667bf535282..c880c2229c59 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -82,12 +82,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -100,7 +100,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -109,7 +109,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -122,11 +122,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -136,11 +136,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -160,12 +160,12 @@ static int write_eraseblock2(int ebnum)
set_random_data(writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n",
+ pr_err(" write size: %#x\n",
subpgsize * k);
- printk(PRINT_PREF " written: %#08zx\n",
+ pr_err(" written: %#08zx\n",
written);
}
return err ? err : -1;
@@ -198,23 +198,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -225,23 +225,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_info("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -262,17 +262,17 @@ static int verify_eraseblock2(int ebnum)
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -295,17 +295,17 @@ static int verify_eraseblock_ff(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify 0xff failed at "
+ pr_err("error: verify 0xff failed at "
"%#llx\n", (long long)addr);
errcnt += 1;
}
@@ -320,7 +320,7 @@ static int verify_all_eraseblocks_ff(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
+ pr_info("verifying all eraseblocks for 0xff\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -328,10 +328,10 @@ static int verify_all_eraseblocks_ff(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -342,7 +342,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -352,18 +352,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -377,22 +377,22 @@ static int __init mtd_subpagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -402,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -412,12 +412,12 @@ static int __init mtd_subpagetest_init(void)
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
readbuf = kmalloc(bufsize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
@@ -429,7 +429,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
simple_srand(1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -438,13 +438,13 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -452,10 +452,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -467,7 +467,7 @@ static int __init mtd_subpagetest_init(void)
/* Write all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -475,14 +475,14 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -490,10 +490,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -503,7 +503,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -511,7 +511,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index b65861bc7b8e..c4cde1e9eddb 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -23,6 +23,8 @@
* damage caused by this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -31,7 +33,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_torturetest: "
#define RETRIES 3
static int eb = 8;
@@ -107,12 +108,12 @@ static inline int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -139,40 +140,40 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
retry:
err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
- printk(PRINT_PREF "single bit flip occurred at EB %d "
+ pr_err("single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
- printk(PRINT_PREF "error %d while reading EB %d, "
+ pr_err("error %d while reading EB %d, "
"read %zd\n", err, ebnum, read);
return err;
}
if (read != len) {
- printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
+ pr_err("failed to read %zd bytes from EB %d, "
"read only %zd, but no error reported\n",
len, ebnum, read);
return -EIO;
}
if (memcmp(buf, check_buf, len)) {
- printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
+ pr_err("read wrong data from EB %d\n", ebnum);
report_corrupt(check_buf, buf);
if (retries++ < RETRIES) {
/* Try read again */
yield();
- printk(PRINT_PREF "re-try reading data from EB %d\n",
+ pr_info("re-try reading data from EB %d\n",
ebnum);
goto retry;
} else {
- printk(PRINT_PREF "retried %d times, still errors, "
+ pr_info("retried %d times, still errors, "
"give-up\n", RETRIES);
return -EINVAL;
}
}
if (retries != 0)
- printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
+ pr_info("only attempt number %d was OK (!!!)\n",
retries);
return 0;
@@ -191,12 +192,12 @@ static inline int write_pattern(int ebnum, void *buf)
}
err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
- printk(PRINT_PREF "error %d while writing EB %d, written %zd"
+ pr_err("error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
return err;
}
if (written != len) {
- printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
+ pr_info("written only %zd bytes of %zd, but no error"
" reported\n", written, len);
return -EIO;
}
@@ -211,64 +212,64 @@ static int __init tort_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "Warning: this program is trying to wear out your "
+ pr_info("Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
- printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
if (pgcnt)
- printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
+ pr_info("torturing just %d pages per eraseblock\n",
pgcnt);
- printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
- printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
goto out_mtd;
}
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_5A5) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_mtd;
}
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_A5A) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_5A5;
}
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_FF) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_A5A;
}
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!check_buf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_FF;
}
@@ -295,13 +296,13 @@ static int __init tort_init(void)
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
- printk(PRINT_PREF "block_isbad() returned %d "
+ pr_info("block_isbad() returned %d "
"for EB %d\n", err, i);
goto out;
}
if (err) {
- printk("EB %d is bad. Skip it.\n", i);
+ pr_err("EB %d is bad. Skip it.\n", i);
bad_ebs[i - eb] = 1;
}
}
@@ -329,7 +330,7 @@ static int __init tort_init(void)
continue;
err = check_eraseblock(i, patt_FF);
if (err) {
- printk(PRINT_PREF "verify failed"
+ pr_info("verify failed"
" for 0xFF... pattern\n");
goto out;
}
@@ -362,7 +363,7 @@ static int __init tort_init(void)
patt = patt_A5A;
err = check_eraseblock(i, patt);
if (err) {
- printk(PRINT_PREF "verify failed for %s"
+ pr_info("verify failed for %s"
" pattern\n",
((eb + erase_cycles) & 1) ?
"0x55AA55..." : "0xAA55AA...");
@@ -380,7 +381,7 @@ static int __init tort_init(void)
stop_timing();
ms = (finish.tv_sec - start.tv_sec) * 1000 +
(finish.tv_usec - start.tv_usec) / 1000;
- printk(PRINT_PREF "%08u erase cycles done, took %lu "
+ pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
start_timing();
@@ -391,7 +392,7 @@ static int __init tort_init(void)
}
out:
- printk(PRINT_PREF "finished after %u erase cycles\n",
+ pr_info("finished after %u erase cycles\n",
erase_cycles);
kfree(check_buf);
out_patt_FF:
@@ -403,7 +404,7 @@ out_patt_5A5:
out_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred during torturing\n", err);
+ pr_info("error %d occurred during torturing\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
@@ -441,9 +442,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
&bits) >= 0)
pages++;
- printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
pages, bytes, bits);
- printk(PRINT_PREF "The following is a list of all differences between"
+ pr_info("The following is a list of all differences between"
" what was read from flash and what was expected\n");
for (i = 0; i < check_len; i += pgsize) {
@@ -457,7 +458,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
printk("-------------------------------------------------------"
"----------------------------------\n");
- printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
" starting at offset 0x%x\n",
(mtd->erasesize - check_len + i) / pgsize,
bytes, bits, first);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index fec406b4553d..c071d410488f 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -322,7 +322,6 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
- void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
@@ -393,18 +392,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
- goto out_free_buf;
+ goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
@@ -415,8 +410,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err;
}
+ mutex_unlock(&ubi->buf_mutex);
- vfree(buf);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
@@ -426,8 +421,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
-out_free_buf:
- vfree(buf);
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
return err;
@@ -1453,7 +1448,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
- if (ubi->fm && ubi->dbg->chk_gen) {
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
@@ -1503,7 +1498,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 344b4cb49d4e..a56133585e92 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -825,8 +825,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -986,14 +985,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_debugging_init_dev(ubi);
- if (err)
- goto out_free;
-
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
- goto out_debugging;
+ goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
@@ -1060,8 +1055,6 @@ out_detach:
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
-out_debugging:
- ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
@@ -1139,7 +1132,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 26908a59506b..63cb1d7236ce 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -217,32 +217,6 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
pr_err("\t1st 16 characters of name: %s\n", nm);
}
-/**
- * ubi_debugging_init_dev - initialize debugging for an UBI device.
- * @ubi: UBI device description object
- *
- * This function initializes debugging-related data for UBI device @ubi.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-int ubi_debugging_init_dev(struct ubi_device *ubi)
-{
- ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
- if (!ubi->dbg)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ubi_debugging_exit_dev - free debugging data for an UBI device.
- * @ubi: UBI device description object
- */
-void ubi_debugging_exit_dev(struct ubi_device *ubi)
-{
- kfree(ubi->dbg);
-}
-
/*
* Root directory for UBI stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular UBI devices.
@@ -295,7 +269,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
@@ -341,7 +315,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
@@ -398,7 +372,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
unsigned long ubi_num = ubi->ubi_num;
const char *fname;
struct dentry *dent;
- struct ubi_debug_info *d = ubi->dbg;
+ struct ubi_debug_info *d = &ubi->dbg;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -471,5 +445,5 @@ out:
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3dbc877d9663..33f8f3b2c9b2 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -60,51 +60,11 @@ void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len);
-int ubi_debugging_init_dev(struct ubi_device *ubi);
-void ubi_debugging_exit_dev(struct ubi_device *ubi);
int ubi_debugfs_init(void);
void ubi_debugfs_exit(void);
int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
-/*
- * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
- * + 2 for the number plus 1 for the trailing zero byte.
- */
-#define UBI_DFS_DIR_NAME "ubi%d"
-#define UBI_DFS_DIR_LEN (3 + 2 + 1)
-
-/**
- * struct ubi_debug_info - debugging information for an UBI device.
- *
- * @chk_gen: if UBI general extra checks are enabled
- * @chk_io: if UBI I/O extra checks are enabled
- * @disable_bgt: disable the background task for testing purposes
- * @emulate_bitflips: emulate bit-flips for testing purposes
- * @emulate_io_failures: emulate write/erase failures for testing purposes
- * @dfs_dir_name: name of debugfs directory containing files of this UBI device
- * @dfs_dir: direntry object of the UBI device debugfs directory
- * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
- * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
- * @dfs_disable_bgt: debugfs knob to disable the background task
- * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
- * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
- */
-struct ubi_debug_info {
- unsigned int chk_gen:1;
- unsigned int chk_io:1;
- unsigned int disable_bgt:1;
- unsigned int emulate_bitflips:1;
- unsigned int emulate_io_failures:1;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
- struct dentry *dfs_dir;
- struct dentry *dfs_chk_gen;
- struct dentry *dfs_chk_io;
- struct dentry *dfs_disable_bgt;
- struct dentry *dfs_emulate_bitflips;
- struct dentry *dfs_emulate_io_failures;
-};
-
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
* @ubi: UBI device description object
@@ -114,7 +74,7 @@ struct ubi_debug_info {
*/
static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi->dbg->disable_bgt;
+ return ubi->dbg.disable_bgt;
}
/**
@@ -125,7 +85,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_bitflips)
+ if (ubi->dbg.emulate_bitflips)
return !(random32() % 200);
return 0;
}
@@ -139,7 +99,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 500);
return 0;
}
@@ -153,9 +113,18 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
return 0;
}
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1a5f53c090d4..0648c6996d43 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -814,10 +814,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
- list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- list_del(&tmp_aeb->u.list);
- list_add_tail(&tmp_aeb->u.list, &ai->free);
- }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
/*
* If fastmap is leaking PEBs (must not happen), raise a
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 4bd4db8c84c9..b93807b4c459 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -171,17 +171,17 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -189,11 +189,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -211,7 +211,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
@@ -220,12 +220,12 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
@@ -233,11 +233,11 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 78a1dcbf2107..bf79def40126 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1132,7 +1132,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
@@ -1159,7 +1159,7 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1197,7 +1197,7 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1241,7 +1241,7 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1282,7 +1282,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1334,7 +1334,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1398,7 +1398,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7d57469723cf..8ea6297a208f 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -85,6 +85,13 @@
#define UBI_UNKNOWN -1
/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
@@ -342,6 +349,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -545,7 +583,7 @@ struct ubi_device {
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct ubi_debug_info *dbg;
+ struct ubi_debug_info dbg;
};
/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 9f2ebd8750e7..ec2c2dc1c1ca 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 9169e58c262e..8330703c098f 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -535,7 +535,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -847,7 +847,7 @@ static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 926e3df14fb2..d77b1c1d7c72 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -858,7 +858,7 @@ out_free:
*/
static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2144f611196e..5df49d3cb5c7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,5 +1,4 @@
/*
- * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -2050,7 +2049,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -2090,7 +2089,7 @@ out_free:
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
@@ -2116,7 +2115,7 @@ static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *p;
int i;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6a70184c3f23..56c2d75a63d4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -123,8 +123,7 @@ config IFB
source "drivers/net/team/Kconfig"
config MACVLAN
- tristate "MAC-VLAN support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "MAC-VLAN support"
---help---
This allows one to create virtual interfaces that map packets to
or from specific MAC addresses to a particular interface.
@@ -138,7 +137,7 @@ config MACVLAN
will be called macvlan.
config MACVTAP
- tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ tristate "MAC-VLAN based tap driver"
depends on MACVLAN
help
This adds a specialized tap character device driver that is based
@@ -189,6 +188,10 @@ config NETPOLL_TRAP
config NET_POLL_CONTROLLER
def_bool NETPOLL
+config NTB_NETDEV
+ tristate "Virtual Ethernet over NTB"
+ depends on NTB
+
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
depends on RAPIDIO
@@ -234,8 +237,8 @@ config VETH
versa.
config VIRTIO_NET
- tristate "Virtio network driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
+ tristate "Virtio network driver"
+ depends on VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 335db78fd987..ef3d090efedf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
+obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index e3f0faca98d0..3a8c7532ee0d 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -37,35 +37,14 @@
ethernet adaptor have the name "eth[0123...]".
*/
-extern struct net_device *ne2_probe(int unit);
extern struct net_device *hp100_probe(int unit);
extern struct net_device *ultra_probe(int unit);
-extern struct net_device *ultra32_probe(int unit);
extern struct net_device *wd_probe(int unit);
-extern struct net_device *el2_probe(int unit);
extern struct net_device *ne_probe(int unit);
-extern struct net_device *hp_probe(int unit);
-extern struct net_device *hp_plus_probe(int unit);
-extern struct net_device *express_probe(int unit);
-extern struct net_device *eepro_probe(int unit);
-extern struct net_device *at1700_probe(int unit);
extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *eth16i_probe(int unit);
extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ewrk3_probe(int unit);
-extern struct net_device *el1_probe(int unit);
-extern struct net_device *el16_probe(int unit);
-extern struct net_device *elmc_probe(int unit);
-extern struct net_device *elplus_probe(int unit);
-extern struct net_device *ac3200_probe(int unit);
-extern struct net_device *es_probe(int unit);
-extern struct net_device *lne390_probe(int unit);
-extern struct net_device *e2100_probe(int unit);
-extern struct net_device *ni5010_probe(int unit);
-extern struct net_device *ni52_probe(int unit);
extern struct net_device *ni65_probe(int unit);
extern struct net_device *sonic_probe(int unit);
-extern struct net_device *seeq8005_probe(int unit);
extern struct net_device *smc_init(int unit);
extern struct net_device *atarilance_probe(int unit);
extern struct net_device *sun3lance_probe(int unit);
@@ -77,13 +56,9 @@ extern struct net_device *tc515_probe(int unit);
extern struct net_device *lance_probe(int unit);
extern struct net_device *mac8390_probe(int unit);
extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *mc32_probe(int unit);
extern struct net_device *cops_probe(int unit);
extern struct net_device *ltpc_probe(void);
-/* Detachable devices ("pocket adaptors") */
-extern struct net_device *de620_probe(int unit);
-
/* Fibre Channel adapters */
extern int iph5526_probe(struct net_device *dev);
@@ -111,29 +86,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
}
/*
- * This is a bit of an artificial separation as there are PCI drivers
- * that also probe for EISA cards (in the PCI group) and there are ISA
- * drivers that probe for EISA cards (in the ISA group). These are the
- * legacy EISA only driver probes, and also the legacy PCI probes
- */
-
-static struct devprobe2 eisa_probes[] __initdata = {
-#ifdef CONFIG_ULTRA32
- {ultra32_probe, 0},
-#endif
-#ifdef CONFIG_AC3200
- {ac3200_probe, 0},
-#endif
-#ifdef CONFIG_ES3210
- {es_probe, 0},
-#endif
-#ifdef CONFIG_LNE390
- {lne390_probe, 0},
-#endif
- {NULL, 0},
-};
-
-/*
* ISA probes that touch addresses < 0x400 (including those that also
* look for EISA/PCI cards in addition to ISA cards).
*/
@@ -150,18 +102,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_WD80x3
{wd_probe, 0},
#endif
-#ifdef CONFIG_EL2 /* 3c503 */
- {el2_probe, 0},
-#endif
-#ifdef CONFIG_HPLAN
- {hp_probe, 0},
-#endif
-#ifdef CONFIG_HPLAN_PLUS
- {hp_plus_probe, 0},
-#endif
-#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
- {e2100_probe, 0},
-#endif
#if defined(CONFIG_NE2000) || \
defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */
{ne_probe, 0},
@@ -172,60 +112,20 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_SMC9194
{smc_init, 0},
#endif
-#ifdef CONFIG_SEEQ8005
- {seeq8005_probe, 0},
-#endif
#ifdef CONFIG_CS89x0
#ifndef CONFIG_CS89x0_PLATFORM
{cs89x0_probe, 0},
#endif
#endif
-#ifdef CONFIG_AT1700
- {at1700_probe, 0},
-#endif
-#ifdef CONFIG_ETH16I
- {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */
-#endif
-#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
- {express_probe, 0},
-#endif
-#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
- {eepro_probe, 0},
-#endif
-#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
- {ewrk3_probe, 0},
-#endif
-#if defined(CONFIG_APRICOT) || defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
{i82596_probe, 0},
#endif
-#ifdef CONFIG_EL1 /* 3c501 */
- {el1_probe, 0},
-#endif
-#ifdef CONFIG_EL16 /* 3c507 */
- {el16_probe, 0},
-#endif
-#ifdef CONFIG_ELPLUS /* 3c505 */
- {elplus_probe, 0},
-#endif
-#ifdef CONFIG_NI5010
- {ni5010_probe, 0},
-#endif
-#ifdef CONFIG_NI52
- {ni52_probe, 0},
-#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
#endif
{NULL, 0},
};
-static struct devprobe2 parport_probes[] __initdata = {
-#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
- {de620_probe, 0},
-#endif
- {NULL, 0},
-};
-
static struct devprobe2 m68k_probes[] __initdata = {
#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
{atarilance_probe, 0},
@@ -264,9 +164,7 @@ static void __init ethif_probe2(int unit)
return;
(void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
- probe_list2(unit, eisa_probes, base_addr == 0) &&
- probe_list2(unit, isa_probes, base_addr == 0) &&
- probe_list2(unit, parport_probes, base_addr == 0));
+ probe_list2(unit, isa_probes, base_addr == 0));
}
/* Statically configured drivers -- order matters here. */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a030e635f001..fc58d118d844 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -389,13 +389,13 @@ static u8 __get_duplex(struct port *port)
/**
* __initialize_port_locks - initialize a port's STATE machine spinlock
- * @port: the port we're looking at
+ * @port: the slave of the port we're looking at
*
*/
-static inline void __initialize_port_locks(struct port *port)
+static inline void __initialize_port_locks(struct slave *slave)
{
// make sure it isn't called twice
- spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
}
//conversions
@@ -1127,7 +1127,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// INFO_RECEIVED_LOOPBACK_FRAMES
pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
- port->slave->dev->master->name, port->slave->dev->name);
+ port->slave->bond->dev->name, port->slave->dev->name);
return;
}
__update_selected(lacpdu, port);
@@ -1306,7 +1306,7 @@ static void ad_port_selection_logic(struct port *port)
}
if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->dev->master->name,
+ port->slave->bond->dev->name,
port->actor_port_number,
port->slave->dev->name,
port->aggregator->aggregator_identifier);
@@ -1386,7 +1386,7 @@ static void ad_port_selection_logic(struct port *port)
port->aggregator->aggregator_identifier);
} else {
pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
- port->slave->dev->master->name,
+ port->slave->bond->dev->name,
port->actor_port_number, port->slave->dev->name);
}
}
@@ -1463,7 +1463,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
default:
pr_warning("%s: Impossible agg select mode %d\n",
- curr->slave->dev->master->name,
+ curr->slave->bond->dev->name,
__get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1571,7 +1571,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->dev->master->name : "NULL");
+ best->slave ? best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1898,7 +1898,7 @@ int bond_3ad_bind_slave(struct slave *slave)
if (bond == NULL) {
pr_err("%s: The slave %s is not attached to its bond\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return -1;
}
@@ -1910,6 +1910,7 @@ int bond_3ad_bind_slave(struct slave *slave)
ad_initialize_port(port, bond->params.lacp_fast);
+ __initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
// key is determined according to the link speed, duplex and user key(which is yet not supported)
@@ -1932,8 +1933,6 @@ int bond_3ad_bind_slave(struct slave *slave)
port->next_port_in_aggregator = NULL;
__disable_port(port);
- __initialize_port_locks(port);
-
// aggregator initialization
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1973,7 +1972,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2009,7 +2008,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
pr_info("%s: Removing an active aggregator\n",
- aggregator->slave->dev->master->name);
+ aggregator->slave->bond->dev->name);
// select new active aggregator
select_new_active_agg = 1;
}
@@ -2040,7 +2039,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_agg_selection_logic(__get_first_agg(port));
} else {
pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
}
} else { // in case that the only port related to this aggregator is the one we want to remove
select_new_active_agg = aggregator->is_active;
@@ -2048,7 +2047,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_clear_agg(aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
}
@@ -2076,7 +2075,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
}
@@ -2184,7 +2183,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
if (!port->slave) {
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->dev->master->name);
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2240,7 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2268,7 +2267,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2297,7 +2296,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2494,11 +2493,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
struct port *port = NULL;
int lacp_fast;
- read_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
lacp_fast = bond->params.lacp_fast;
bond_for_each_slave(bond, slave, i) {
port = &(SLAVE_AD_INFO(slave).port);
+ if (port->slave == NULL)
+ continue;
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2507,5 +2508,5 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
__release_state_machine_lock(port);
}
- read_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7c9d136e74be..f5e052723029 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -507,7 +507,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
client_info->mac_dst);
if (!skb) {
pr_err("%s: Error: failed to create an ARP packet\n",
- client_info->slave->dev->master->name);
+ client_info->slave->bond->dev->name);
continue;
}
@@ -517,7 +517,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb = vlan_put_tag(skb, client_info->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
- client_info->slave->dev->master->name);
+ client_info->slave->bond->dev->name);
continue;
}
}
@@ -1043,7 +1043,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
if (dev_set_mac_address(dev, &s_addr)) {
pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
"ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
- dev->master->name, dev->name);
+ slave->bond->dev->name, dev->name);
return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ef2cb2418535..11d01d67b3f5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -746,11 +746,9 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
- rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
- rcu_read_unlock();
}
/*
@@ -760,9 +758,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
- struct net_device *bond_dev, *vlan_dev, *master_dev;
+ struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
+ rcu_read_lock();
read_lock(&bond->lock);
bond_dev = bond->dev;
@@ -774,18 +773,14 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
* if bond is enslaved to a bridge,
* then rejoin all groups on its master
*/
- master_dev = bond_dev->master;
- if (master_dev)
- if ((master_dev->priv_flags & IFF_EBRIDGE)
- && (bond_dev->priv_flags & IFF_BRIDGE_PORT))
- __bond_resend_igmp_join_requests(master_dev);
+ upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
+ if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
+ __bond_resend_igmp_join_requests(upper_dev);
/* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
vlan_dev = __vlan_find_dev_deep(bond_dev,
vlan->vlan_id);
- rcu_read_unlock();
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
@@ -794,13 +789,16 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
read_unlock(&bond->lock);
+ rcu_read_unlock();
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
+ rcu_read_lock();
bond_resend_igmp_join_requests(bond);
+ rcu_read_unlock();
}
/*
@@ -1251,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- __netpoll_free_rcu(np);
+ __netpoll_free_async(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
@@ -1322,14 +1320,15 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
/*---------------------------------- IOCTL ----------------------------------*/
-static int bond_sethwaddr(struct net_device *bond_dev,
- struct net_device *slave_dev)
+static void bond_set_dev_addr(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
pr_debug("bond_dev=%p\n", bond_dev);
pr_debug("slave_dev=%p\n", slave_dev);
pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len);
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
- return 0;
+ bond_dev->addr_assign_type = NET_ADDR_SET;
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
static netdev_features_t bond_fix_features(struct net_device *dev,
@@ -1493,6 +1492,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
return ret;
}
+static int bond_master_upper_dev_link(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+{
+ int err;
+
+ err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+ if (err)
+ return err;
+ slave_dev->flags |= IFF_SLAVE;
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ return 0;
+}
+
+static void bond_upper_dev_unlink(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+{
+ netdev_upper_dev_unlink(slave_dev, bond_dev);
+ slave_dev->flags &= ~IFF_SLAVE;
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1609,10 +1629,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (is_zero_ether_addr(bond->dev->dev_addr))
- memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
- slave_dev->addr_len);
-
+ if (bond->dev_addr_from_first)
+ bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
if (!new_slave) {
@@ -1655,9 +1673,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = netdev_set_bond_master(slave_dev, bond_dev);
+ res = bond_master_upper_dev_link(bond_dev, slave_dev);
if (res) {
- pr_debug("Error %d calling netdev_set_bond_master\n", res);
+ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
goto err_restore_mac;
}
@@ -1891,7 +1909,7 @@ err_close:
dev_close(slave_dev);
err_unset_master:
- netdev_set_bond_master(slave_dev, NULL);
+ bond_upper_dev_unlink(bond_dev, slave_dev);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@@ -1919,7 +1937,8 @@ err_undo_flags:
/*
* Try to release the slave device <slave> from the bond device <master>
* It is legal to access curr_active_slave without a lock because all the function
- * is write-locked.
+ * is write-locked. If "all" is true it means that the function is being called
+ * while destroying a bond interface and all slaves are being released.
*
* The rules for slave state should be:
* for Active/Backup:
@@ -1927,7 +1946,9 @@ err_undo_flags:
* for Bonded connections:
* The first up interface should be left on and all others downed.
*/
-int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+static int __bond_release_one(struct net_device *bond_dev,
+ struct net_device *slave_dev,
+ bool all)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
@@ -1936,7 +1957,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
- (slave_dev->master != bond_dev)) {
+ !netdev_has_upper_dev(slave_dev, bond_dev)) {
pr_err("%s: Error: cannot release %s.\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
@@ -1964,7 +1985,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
synchronize_net();
write_lock_bh(&bond->lock);
- if (!bond->params.fail_over_mac) {
+ if (!all && !bond->params.fail_over_mac) {
if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond->slave_cnt > 1)
pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -2010,7 +2031,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
write_lock_bh(&bond->lock);
}
- if (oldcurrent == slave) {
+ if (all) {
+ bond->curr_active_slave = NULL;
+ } else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
* is no concern that another slave add/remove event
@@ -2029,12 +2052,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->slave_cnt == 0) {
bond_set_carrier(bond);
-
- /* if the last slave was removed, zero the mac address
- * of the master so it will be set by the application
- * to the mac address of the first slave
- */
- memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
+ eth_hw_addr_random(bond_dev);
+ bond->dev_addr_from_first = true;
if (bond_vlan_used(bond)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -2080,7 +2099,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_bond_master(slave_dev, NULL);
+ bond_upper_dev_unlink(bond_dev, slave_dev);
slave_disable_netpoll(slave);
@@ -2103,6 +2122,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return 0; /* deletion OK */
}
+/* A wrapper used because of ndo_del_link */
+int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ return __bond_release_one(bond_dev, slave_dev, false);
+}
+
/*
* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
@@ -2124,121 +2149,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
}
/*
- * This function releases all slaves.
- */
-static int bond_release_all(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- struct net_device *slave_dev;
- struct sockaddr addr;
-
- write_lock_bh(&bond->lock);
-
- netif_carrier_off(bond_dev);
-
- if (bond->slave_cnt == 0)
- goto out;
-
- bond->current_arp_slave = NULL;
- bond->primary_slave = NULL;
- bond_change_active_slave(bond, NULL);
-
- while ((slave = bond->first_slave) != NULL) {
- /* Inform AD package of unbinding of slave
- * before slave is detached from the list.
- */
- if (bond->params.mode == BOND_MODE_8023AD)
- bond_3ad_unbind_slave(slave);
-
- slave_dev = slave->dev;
- bond_detach_slave(bond, slave);
-
- /* now that the slave is detached, unlock and perform
- * all the undo steps that should not be called from
- * within a lock.
- */
- write_unlock_bh(&bond->lock);
-
- /* unregister rx_handler early so bond_handle_frame wouldn't
- * be called for this slave anymore.
- */
- netdev_rx_handler_unregister(slave_dev);
- synchronize_net();
-
- if (bond_is_lb(bond)) {
- /* must be called only after the slave
- * has been detached from the list
- */
- bond_alb_deinit_slave(bond, slave);
- }
-
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
-
- /* If the mode USES_PRIMARY, then we should only remove its
- * promisc and mc settings if it was the curr_active_slave, but that was
- * already taken care of above when we detached the slave
- */
- if (!USES_PRIMARY(bond->params.mode)) {
- /* unset promiscuity level from slave */
- if (bond_dev->flags & IFF_PROMISC)
- dev_set_promiscuity(slave_dev, -1);
-
- /* unset allmulti level from slave */
- if (bond_dev->flags & IFF_ALLMULTI)
- dev_set_allmulti(slave_dev, -1);
-
- /* flush master's mc_list from slave */
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_flush(bond_dev, slave_dev);
- netif_addr_unlock_bh(bond_dev);
- }
-
- netdev_set_bond_master(slave_dev, NULL);
-
- slave_disable_netpoll(slave);
-
- /* close slave before restoring its mac address */
- dev_close(slave_dev);
-
- if (!bond->params.fail_over_mac) {
- /* restore original ("permanent") mac address*/
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
- }
-
- kfree(slave);
-
- /* re-acquire the lock before getting the next slave */
- write_lock_bh(&bond->lock);
- }
-
- /* zero the mac address of the master so it will be
- * set by the application to the mac address of the
- * first slave
- */
- memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
-
- if (bond_vlan_used(bond)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
- }
-
- pr_info("%s: released all slaves\n", bond_dev->name);
-
-out:
- write_unlock_bh(&bond->lock);
-
- bond_compute_features(bond);
-
- return 0;
-}
-
-/*
* This function changes the active slave to slave <slave_dev>.
* It returns -EINVAL in the following cases.
* - <slave_dev> is not found in the list.
@@ -2259,8 +2169,9 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
if (!USES_PRIMARY(bond->params.mode))
return -EINVAL;
- /* Verify that master_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev))
+ /* Verify that bond_dev is indeed the master of slave_dev */
+ if (!(slave_dev->flags & IFF_SLAVE) ||
+ !netdev_has_upper_dev(slave_dev, bond_dev))
return -EINVAL;
read_lock(&bond->lock);
@@ -3258,36 +3169,32 @@ static int bond_master_netdev_event(unsigned long event,
static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
- struct net_device *bond_dev = slave_dev->master;
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
+ struct slave *slave = bond_slave_get_rtnl(slave_dev);
+ struct bonding *bond = slave->bond;
+ struct net_device *bond_dev = slave->bond->dev;
+ u32 old_speed;
+ u8 old_duplex;
switch (event) {
case NETDEV_UNREGISTER:
- if (bond_dev) {
- if (bond->setup_by_slave)
- bond_release_and_destroy(bond_dev, slave_dev);
- else
- bond_release(bond_dev, slave_dev);
- }
+ if (bond->setup_by_slave)
+ bond_release_and_destroy(bond_dev, slave_dev);
+ else
+ bond_release(bond_dev, slave_dev);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
- slave = bond_get_slave_by_dev(bond, slave_dev);
- if (slave) {
- u32 old_speed = slave->speed;
- u8 old_duplex = slave->duplex;
+ old_speed = slave->speed;
+ old_duplex = slave->duplex;
- bond_update_speed_duplex(slave);
+ bond_update_speed_duplex(slave);
- if (bond->params.mode == BOND_MODE_8023AD) {
- if (old_speed != slave->speed)
- bond_3ad_adapter_speed_changed(slave);
- if (old_duplex != slave->duplex)
- bond_3ad_adapter_duplex_changed(slave);
- }
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ if (old_speed != slave->speed)
+ bond_3ad_adapter_speed_changed(slave);
+ if (old_duplex != slave->duplex)
+ bond_3ad_adapter_duplex_changed(slave);
}
-
break;
case NETDEV_DOWN:
/*
@@ -3604,6 +3511,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
+ struct net *net;
int res = 0;
pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
@@ -3670,10 +3578,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
}
- if (!capable(CAP_NET_ADMIN))
+ net = dev_net(bond_dev);
+
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
+ slave_dev = dev_get_by_name(net, ifr->ifr_slave);
pr_debug("slave_dev=%p:\n", slave_dev);
@@ -3692,7 +3602,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
- res = bond_sethwaddr(bond_dev, slave_dev);
+ bond_set_dev_addr(bond_dev, slave_dev);
+ res = 0;
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
@@ -4314,11 +4225,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
}
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION, 32);
- snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
+ BOND_ABI_VERSION);
}
static const struct ethtool_ops bond_ethtool_ops = {
@@ -4352,6 +4264,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
};
+static const struct device_type bond_type = {
+ .name = "bond",
+};
+
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -4382,6 +4298,8 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->destructor = bond_destructor;
+ SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
+
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
@@ -4427,12 +4345,12 @@ static void bond_uninit(struct net_device *bond_dev)
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- bond_release_all(bond_dev);
+ while (bond->first_slave != NULL)
+ __bond_release_one(bond_dev, bond->first_slave->dev, true);
+ pr_info("%s: released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
- bond_work_cancel_all(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@@ -4843,6 +4761,13 @@ static int bond_init(struct net_device *bond_dev)
bond_debug_register(bond);
+ /* Ensure valid dev_addr */
+ if (is_zero_ether_addr(bond_dev->dev_addr) &&
+ bond_dev->addr_assign_type == NET_ADDR_PERM) {
+ eth_hw_addr_random(bond_dev);
+ bond->dev_addr_from_first = true;
+ }
+
__hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca086..1c9e09fbdff8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
pr_info("%s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 21b68e5c14fd..2baec24388b1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -248,6 +248,7 @@ struct bonding {
/* debugging support via debugfs */
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
+ bool dev_addr_from_first;
};
static inline bool bond_vlan_used(struct bonding *bond)
@@ -258,6 +259,9 @@ static inline bool bond_vlan_used(struct bonding *bond)
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
+#define bond_slave_get_rtnl(dev) \
+ ((struct slave *) rtnl_dereference(dev->rx_handler_data))
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
@@ -280,11 +284,9 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
{
- if (!slave || !slave->dev->master) {
+ if (!slave || !slave->bond)
return NULL;
- }
-
- return netdev_priv(slave->dev->master);
+ return slave->bond;
}
static inline bool bond_is_lb(const struct bonding *bond)
@@ -360,10 +362,9 @@ static inline void bond_netpoll_send_skb(const struct slave *slave,
static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
- struct bonding *bond = netdev_priv(slave->dev->master);
- if (!bond_is_lb(bond))
+ if (!bond_is_lb(slave->bond))
bond_set_backup_slave(slave);
- if (!bond->params.all_slaves_active)
+ if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index abf4d7a9dcce..60c2142373c9 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -6,7 +6,7 @@ comment "CAIF transport drivers"
config CAIF_TTY
tristate "CAIF TTY transport driver"
- depends on CAIF
+ depends on CAIF && TTY
default n
---help---
The CAIF TTY transport driver is a Line Discipline (ldisc)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 5de74e762021..666891a9a248 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -91,7 +91,7 @@ static inline void update_tty_status(struct ser_device *ser)
ser->tty->hw_stopped << 4 |
ser->tty->flow_stopped << 3 |
ser->tty->packet << 2 |
- ser->tty->low_latency << 1 |
+ ser->tty->port->low_latency << 1 |
ser->tty->warned;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index bc497d718858..bce8bac311c9 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -633,9 +633,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (tx_buf == NULL) {
- pr_warn("ERROR, Could not"
- " allocate dynamic mem. for tx_buf,"
- " Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
@@ -662,9 +659,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (rx_buf == NULL) {
- pr_warn("ERROR, Could not"
- " allocate dynamic mem.for rx_buf,"
- " Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b56bd9e80957..9862b2e07644 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,9 +1,7 @@
menu "CAN Device Drivers"
- depends on CAN
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
- depends on CAN
---help---
Similar to the network loopback devices, vcan offers a
virtual local CAN interface.
@@ -13,7 +11,7 @@ config CAN_VCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on CAN
+ depends on TTY
---help---
CAN driver for several 'low cost' CAN interfaces that are attached
via serial lines or via USB-to-serial adapters using the LAWICEL
@@ -33,16 +31,16 @@ config CAN_SLCAN
config CAN_DEV
tristate "Platform CAN drivers with Netlink support"
- depends on CAN
default y
---help---
Enables the common framework for platform CAN drivers with Netlink
support. This is the standard library for CAN drivers.
If unsure, say Y.
+if CAN_DEV
+
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
- depends on CAN_DEV
default y
---help---
If enabled, CAN bit-timing parameters will be calculated for the
@@ -54,15 +52,26 @@ config CAN_CALC_BITTIMING
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
If unsure, say Y.
+config CAN_LEDS
+ bool "Enable LED triggers for Netlink based drivers"
+ depends on LEDS_CLASS
+ select LEDS_TRIGGERS
+ ---help---
+ This option adds two LED triggers for packet receive and transmit
+ events on each supported CAN device.
+
+ Say Y here if you are working on a system with led-class supported
+ LEDs and you want to use them as canbus activity indicators.
+
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5)
+ depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
config CAN_TI_HECC
- depends on CAN_DEV && ARCH_OMAP3
+ depends on ARCH_OMAP3
tristate "TI High End CAN Controller"
---help---
Driver for TI HECC (High End CAN Controller) module found on many
@@ -70,12 +79,12 @@ config CAN_TI_HECC
config CAN_MCP251X
tristate "Microchip MCP251x SPI CAN controllers"
- depends on CAN_DEV && SPI && HAS_DMA
+ depends on SPI && HAS_DMA
---help---
Driver for the Microchip MCP251x SPI CAN controllers.
config CAN_BFIN
- depends on CAN_DEV && (BF534 || BF536 || BF537 || BF538 || BF539 || BF54x)
+ depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
---help---
Driver for the Analog Devices Blackfin on-chip CAN controllers
@@ -85,7 +94,7 @@ config CAN_BFIN
config CAN_JANZ_ICAN3
tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
- depends on CAN_DEV && MFD_JANZ_CMODIO
+ depends on MFD_JANZ_CMODIO
---help---
Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
connects to a MODULbus carrier board.
@@ -98,13 +107,13 @@ config HAVE_CAN_FLEXCAN
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on CAN_DEV && HAVE_CAN_FLEXCAN
+ depends on HAVE_CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
config PCH_CAN
tristate "Intel EG20T PCH CAN controller"
- depends on CAN_DEV && PCI
+ depends on PCI
---help---
This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -112,7 +121,7 @@ config PCH_CAN
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
- depends on CAN_DEV && OF
+ depends on OF
---help---
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
Note that the driver supports little endian, even though little
@@ -131,9 +140,10 @@ source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
+endif
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
- depends on CAN
---help---
Say Y here if you want the CAN device drivers to produce a bunch of
debug messages to the system log. Select this if you are having
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7de59862bbe9..c7440392adbb 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
+can-dev-$(CONFIG_CAN_LEDS) += led.o
+
obj-y += usb/
obj-y += softing/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 81baefda037b..44f363792b59 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -37,6 +37,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
@@ -641,6 +642,8 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(dev, CAN_LED_EVENT_RX);
}
/**
@@ -875,6 +878,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
dev->stats.tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
}
}
@@ -1128,6 +1132,8 @@ static int at91_open(struct net_device *dev)
goto out_close;
}
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
@@ -1159,6 +1165,8 @@ static int at91_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1321,6 +1329,8 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
+ devm_can_led_init(dev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 3b83bafcd947..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_C_CAN
tristate "Bosch C_CAN/D_CAN devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_C_CAN
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5233b8f58d77..a668cd491cb3 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -39,6 +39,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include "c_can.h"
@@ -477,6 +478,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
return 0;
}
@@ -488,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
+
+ /* According to C_CAN documentation, the reserved bit
+ * in IFx_MASK2 register is fixed 1
+ */
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
- IFX_WRITE_HIGH_16BIT(mask));
+ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
@@ -751,6 +758,7 @@ static void c_can_do_tx(struct net_device *dev)
C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
c_can_inval_msg_object(dev, 0, msg_obj_no);
} else {
break;
@@ -960,7 +968,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL);
break;
case LEC_BIT1_ERROR:
@@ -973,7 +981,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL);
break;
default:
@@ -1115,6 +1123,8 @@ static int c_can_open(struct net_device *dev)
napi_enable(&priv->napi);
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
/* start the c_can controller */
c_can_start(dev);
@@ -1143,6 +1153,8 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1268,6 +1280,8 @@ int register_c_can_dev(struct net_device *dev)
err = register_candev(dev);
if (err)
c_can_pm_runtime_disable(priv);
+ else
+ devm_can_led_init(dev);
return err;
}
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 22c07a8c8b43..6a9a5ba79220 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_CC770
tristate "Bosch CC770 and Intel AN82527 devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_CC770
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 8233e5ed2939..f9cba4123c66 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -24,7 +24,9 @@
#include <linux/if_arp.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/netlink.h>
+#include <linux/can/led.h>
#include <net/rtnetlink.h>
#define MOD_DESC "CAN device driver interface"
@@ -501,13 +503,18 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
if (unlikely(!skb))
return NULL;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
@@ -794,10 +801,25 @@ void unregister_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(unregister_candev);
+/*
+ * Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops))
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
static __init int can_dev_init(void)
{
int err;
+ can_led_notifier_init();
+
err = rtnl_link_register(&can_link_ops);
if (!err)
printk(KERN_INFO MOD_DESC "\n");
@@ -809,6 +831,8 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
rtnl_link_unregister(&can_link_ops);
+
+ can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0289a6d86f66..769d29ed106d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -23,6 +23,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include <linux/can/platform/flexcan.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -564,6 +565,8 @@ static int flexcan_read_frame(struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
return 1;
}
@@ -652,6 +655,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
stats->tx_bytes += can_get_echo_skb(dev, 0);
stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
netif_wake_queue(dev);
}
@@ -865,6 +869,9 @@ static int flexcan_open(struct net_device *dev)
err = flexcan_chip_start(dev);
if (err)
goto out_close;
+
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -893,6 +900,8 @@ static int flexcan_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1092,6 +1101,8 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ devm_can_led_init(dev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->base, dev->irq);
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
new file mode 100644
index 000000000000..f27fca65dc4a
--- /dev/null
+++ b/drivers/net/can/led.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
+ * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+
+#include <linux/can/led.h>
+
+static unsigned long led_delay = 50;
+module_param(led_delay, ulong, 0644);
+MODULE_PARM_DESC(led_delay,
+ "blink delay time for activity leds (msecs, default: 50).");
+
+/* Trigger a LED event in response to a CAN device event */
+void can_led_event(struct net_device *netdev, enum can_led_event event)
+{
+ struct can_priv *priv = netdev_priv(netdev);
+
+ switch (event) {
+ case CAN_LED_EVENT_OPEN:
+ led_trigger_event(priv->tx_led_trig, LED_FULL);
+ led_trigger_event(priv->rx_led_trig, LED_FULL);
+ break;
+ case CAN_LED_EVENT_STOP:
+ led_trigger_event(priv->tx_led_trig, LED_OFF);
+ led_trigger_event(priv->rx_led_trig, LED_OFF);
+ break;
+ case CAN_LED_EVENT_TX:
+ if (led_delay)
+ led_trigger_blink_oneshot(priv->tx_led_trig,
+ &led_delay, &led_delay, 1);
+ break;
+ case CAN_LED_EVENT_RX:
+ if (led_delay)
+ led_trigger_blink_oneshot(priv->rx_led_trig,
+ &led_delay, &led_delay, 1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(can_led_event);
+
+static void can_led_release(struct device *gendev, void *res)
+{
+ struct can_priv *priv = netdev_priv(to_net_dev(gendev));
+
+ led_trigger_unregister_simple(priv->tx_led_trig);
+ led_trigger_unregister_simple(priv->rx_led_trig);
+}
+
+/* Register CAN LED triggers for a CAN device
+ *
+ * This is normally called from a driver's probe function
+ */
+void devm_can_led_init(struct net_device *netdev)
+{
+ struct can_priv *priv = netdev_priv(netdev);
+ void *res;
+
+ res = devres_alloc(can_led_release, 0, GFP_KERNEL);
+ if (!res) {
+ netdev_err(netdev, "cannot register LED triggers\n");
+ return;
+ }
+
+ snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
+ "%s-tx", netdev->name);
+ snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
+ "%s-rx", netdev->name);
+
+ led_trigger_register_simple(priv->tx_led_trig_name,
+ &priv->tx_led_trig);
+ led_trigger_register_simple(priv->rx_led_trig_name,
+ &priv->rx_led_trig);
+
+ devres_add(&netdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_can_led_init);
+
+/* NETDEV rename notifier to rename the associated led triggers too */
+static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
+ void *data)
+{
+ struct net_device *netdev = data;
+ struct can_priv *priv = safe_candev_priv(netdev);
+ char name[CAN_LED_NAME_SZ];
+
+ if (!priv)
+ return NOTIFY_DONE;
+
+ if (msg == NETDEV_CHANGENAME) {
+ snprintf(name, sizeof(name), "%s-tx", netdev->name);
+ led_trigger_rename_static(name, priv->tx_led_trig);
+
+ snprintf(name, sizeof(name), "%s-rx", netdev->name);
+ led_trigger_rename_static(name, priv->rx_led_trig);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* notifier block for netdevice event */
+static struct notifier_block can_netdev_notifier __read_mostly = {
+ .notifier_call = can_led_notifier,
+};
+
+int __init can_led_notifier_init(void)
+{
+ return register_netdevice_notifier(&can_netdev_notifier);
+}
+
+void __exit can_led_notifier_exit(void)
+{
+ unregister_netdevice_notifier(&can_netdev_notifier);
+}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 5eaf47b8e37b..f32b9fc6a983 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -60,6 +60,7 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
+#include <linux/can/led.h>
#include <linux/can/platform/mcp251x.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -494,6 +495,9 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
priv->net->stats.rx_packets++;
priv->net->stats.rx_bytes += frame->can_dlc;
+
+ can_led_event(priv->net, CAN_LED_EVENT_RX);
+
netif_rx_ni(skb);
}
@@ -707,6 +711,8 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
+ can_led_event(net, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -905,6 +911,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (intf & CANINTF_TX) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
+ can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
can_get_echo_skb(net, 0);
priv->tx_len = 0;
@@ -968,6 +975,9 @@ static int mcp251x_open(struct net_device *net)
mcp251x_open_clean(net);
goto open_unlock;
}
+
+ can_led_event(net, CAN_LED_EVENT_OPEN);
+
netif_wake_queue(net);
open_unlock:
@@ -1077,10 +1087,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
pdata->transceiver_enable(0);
ret = register_candev(net);
- if (!ret) {
- dev_info(&spi->dev, "probed\n");
- return ret;
- }
+ if (ret)
+ goto error_probe;
+
+ devm_can_led_init(net);
+
+ dev_info(&spi->dev, "probed\n");
+
+ return ret;
+
error_probe:
if (!mcp251x_enable_dma)
kfree(priv->spi_rx_buf);
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index d38706958af6..f19be5269e7b 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on CAN_DEV && (PPC || M68K)
+ depends on PPC || M68K
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 7d1748575b1f..5c314a961970 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
stats->rx_errors++;
break;
case PCH_CRC_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 92f73c708a3d..b39ca5b3ea7f 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_SJA1000
tristate "Philips/NXP SJA1000 devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_SJA1000
@@ -99,11 +99,11 @@ config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
help
- This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
- The driver supports multiple boards and automatically configures them:
- PLD IO base addresses are read from jumpers JP1 and JP2,
- IRQ numbers are read from jumpers JP4 and JP5,
- SJA1000 IO base addresses are chosen heuristically (first that works).
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 036a326836b2..36d298da2af6 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -238,7 +238,6 @@ static int ems_pci_add_card(struct pci_dev *pdev,
/* Allocating card structures to hold addresses, ... */
card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
if (card == NULL) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
pci_disable_device(pdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index d84888f03d92..d1e7f1006ddd 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
*/
static void peak_pciec_start_led_work(struct peak_pciec_card *card)
{
- if (!delayed_work_pending(&card->led_work))
- schedule_delayed_work(&card->led_work, HZ);
+ schedule_delayed_work(&card->led_work, HZ);
}
/*
@@ -451,11 +450,8 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
} else {
/* create the bit banging I2C adapter structure */
card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL);
- if (!card) {
- dev_err(&pdev->dev,
- "failed allocating memory for i2c chip\n");
+ if (!card)
return -ENOMEM;
- }
card->cfg_base = chan->cfg_base;
card->reg_base = priv->reg_base;
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index f1175142b0a0..1a7020ba37f5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -660,7 +660,6 @@ static int pcan_probe(struct pcmcia_device *pdev)
card = kzalloc(sizeof(struct pcan_pccard), GFP_KERNEL);
if (!card) {
- dev_err(&pdev->dev, "couldn't allocate card memory\n");
err = -ENOMEM;
goto probe_err_2;
}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 11d1062a9449..a042cdc260dc 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -508,7 +508,6 @@ static int plx_pci_add_card(struct pci_dev *pdev,
/* Allocate card structures to hold addresses, ... */
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
pci_disable_device(pdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 83ee11eca0e2..daf4013a8fc7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,6 +60,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include "sja1000.h"
@@ -368,6 +369,8 @@ static void sja1000_rx(struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -521,6 +524,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
can_get_echo_skb(dev, 0);
}
netif_wake_queue(dev);
+ can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -575,6 +579,8 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
netif_start_queue(dev);
return 0;
@@ -592,6 +598,8 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -639,6 +647,8 @@ static const struct net_device_ops sja1000_netdev_ops = {
int register_sja1000dev(struct net_device *dev)
{
+ int ret;
+
if (!sja1000_probe_chip(dev))
return -ENODEV;
@@ -648,7 +658,12 @@ int register_sja1000dev(struct net_device *dev)
set_reset_mode(dev);
chipset_init(dev);
- return register_candev(dev);
+ ret = register_candev(dev);
+
+ if (!ret)
+ devm_can_led_init(dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 0f5917000aa2..6433b81256cd 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -121,7 +121,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
}
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (irq == 0) {
dev_err(&ofdev->dev, "no irq found\n");
err = -ENODEV;
goto exit_unmap_mem;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index adc3708d8829..06b7e097d36e 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -55,6 +55,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/can.h>
+#include <linux/can/skb.h>
static __initconst const char banner[] =
KERN_INFO "slcan: serial line CAN interface driver\n";
@@ -184,7 +185,8 @@ static void slc_bump(struct slcan *sl)
cf.data[i] |= tmp;
}
- skb = dev_alloc_skb(sizeof(struct can_frame));
+ skb = dev_alloc_skb(sizeof(struct can_frame) +
+ sizeof(struct can_skb_priv));
if (!skb)
return;
@@ -192,6 +194,10 @@ static void slc_bump(struct slcan *sl)
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
netif_rx_ni(skb);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 5de46a9a77bb..96b6fe158b5b 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -1,6 +1,6 @@
config CAN_SOFTING
tristate "Softing Gmbh CAN generic support"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
---help---
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f898c6363729..f21fc37ec578 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -50,6 +50,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include <linux/can/platform/ti_hecc.h>
#define DRV_NAME "ti_hecc"
@@ -593,6 +594,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
stats->rx_bytes += cf->can_dlc;
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
netif_receive_skb(skb);
stats->rx_packets++;
@@ -746,12 +748,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
}
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL;
}
}
@@ -796,6 +798,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
stats->tx_bytes += hecc_read_mbx(priv, mbxno,
HECC_CANMCF) & 0xF;
stats->tx_packets++;
+ can_led_event(ndev, CAN_LED_EVENT_TX);
can_get_echo_skb(ndev, mbxno);
--priv->tx_tail;
}
@@ -851,6 +854,8 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+
ti_hecc_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -869,6 +874,8 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -961,6 +968,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "register_candev() failed\n");
goto probe_exit_clk;
}
+
+ devm_can_led_init(ndev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32) ndev->irq);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index a4e4bee35710..fc96a3d83ebe 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,5 +1,5 @@
menu "CAN USB interfaces"
- depends on USB && CAN_DEV
+ depends on USB
config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
@@ -48,4 +48,10 @@ config CAN_PEAK_USB
This driver supports the PCAN-USB and PCAN-USB Pro adapters
from PEAK-System Technik (http://www.peak-system.com).
+config CAN_8DEV_USB
+ tristate "8 devices USB2CAN interface"
+ ---help---
+ This driver supports the USB2CAN interface
+ from 8 devices (http://www.8devices.com).
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 80a2ee41fd61..becef460a91a 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
+obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index c69f0b72b352..5f9a7ad9b964 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1014,17 +1014,13 @@ static int ems_usb_probe(struct usb_interface *intf,
}
dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
- if (!dev->intr_in_buffer) {
- dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
+ if (!dev->intr_in_buffer)
goto cleanup_intr_urb;
- }
dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
sizeof(struct ems_cpc_msg), GFP_KERNEL);
- if (!dev->tx_msg_buffer) {
- dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
+ if (!dev->tx_msg_buffer)
goto cleanup_intr_in_buffer;
- }
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 5b58a4d87397..45cb9f3c1324 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -561,7 +561,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -1268,7 +1267,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
stats->tx_dropped++;
goto nobufmem;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d9290ea788e0..a0f647f92bf5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -386,7 +386,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
err = -ENOMEM;
break;
@@ -442,7 +441,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
err = -ENOMEM;
break;
@@ -634,7 +632,6 @@ static int peak_usb_restart(struct peak_usb_device *dev)
/* also allocate enough space for the commands to send */
buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC);
if (!buf) {
- netdev_err(dev->netdev, "no memory left for async cmd\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -729,8 +726,6 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
/* allocate a buffer large enough to send commands */
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
- dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n",
- PCAN_USB_DRIVER_NAME);
err = -ENOMEM;
goto lbl_set_intf_data;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
new file mode 100644
index 000000000000..6e15ef08f301
--- /dev/null
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -0,0 +1,1031 @@
+/*
+ * CAN driver for "8 devices" USB2CAN converter
+ *
+ * Copyright (C) 2012 Bernd Krumboeck (krumboeck@universalnet.at)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.
+ *
+ * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c
+ * and drivers/net/can/usb/esd_usb2.c
+ *
+ * Many thanks to Gerhard Bertelsmann (info@gerhard-bertelsmann.de)
+ * for testing and fixing this driver. Also many thanks to "8 devices",
+ * who were very cooperative and answered my questions.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+/* driver constants */
+#define MAX_RX_URBS 20
+#define MAX_TX_URBS 20
+#define RX_BUFFER_SIZE 64
+
+/* vendor and product id */
+#define USB_8DEV_VENDOR_ID 0x0483
+#define USB_8DEV_PRODUCT_ID 0x1234
+
+/* endpoints */
+enum usb_8dev_endpoint {
+ USB_8DEV_ENDP_DATA_RX = 1,
+ USB_8DEV_ENDP_DATA_TX,
+ USB_8DEV_ENDP_CMD_RX,
+ USB_8DEV_ENDP_CMD_TX
+};
+
+/* device CAN clock */
+#define USB_8DEV_ABP_CLOCK 32000000
+
+/* setup flags */
+#define USB_8DEV_SILENT 0x01
+#define USB_8DEV_LOOPBACK 0x02
+#define USB_8DEV_DISABLE_AUTO_RESTRANS 0x04
+#define USB_8DEV_STATUS_FRAME 0x08
+
+/* commands */
+enum usb_8dev_cmd {
+ USB_8DEV_RESET = 1,
+ USB_8DEV_OPEN,
+ USB_8DEV_CLOSE,
+ USB_8DEV_SET_SPEED,
+ USB_8DEV_SET_MASK_FILTER,
+ USB_8DEV_GET_STATUS,
+ USB_8DEV_GET_STATISTICS,
+ USB_8DEV_GET_SERIAL,
+ USB_8DEV_GET_SOFTW_VER,
+ USB_8DEV_GET_HARDW_VER,
+ USB_8DEV_RESET_TIMESTAMP,
+ USB_8DEV_GET_SOFTW_HARDW_VER
+};
+
+/* command options */
+#define USB_8DEV_BAUD_MANUAL 0x09
+#define USB_8DEV_CMD_START 0x11
+#define USB_8DEV_CMD_END 0x22
+
+#define USB_8DEV_CMD_SUCCESS 0
+#define USB_8DEV_CMD_ERROR 255
+
+#define USB_8DEV_CMD_TIMEOUT 1000
+
+/* frames */
+#define USB_8DEV_DATA_START 0x55
+#define USB_8DEV_DATA_END 0xAA
+
+#define USB_8DEV_TYPE_CAN_FRAME 0
+#define USB_8DEV_TYPE_ERROR_FRAME 3
+
+#define USB_8DEV_EXTID 0x01
+#define USB_8DEV_RTR 0x02
+#define USB_8DEV_ERR_FLAG 0x04
+
+/* status */
+#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */
+#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */
+#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */
+#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */
+#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */
+#define USB_8DEV_STATUSMSG_STUFF 0x20 /* Stuff Error */
+#define USB_8DEV_STATUSMSG_FORM 0x21 /* Form Error */
+#define USB_8DEV_STATUSMSG_ACK 0x23 /* Ack Error */
+#define USB_8DEV_STATUSMSG_BIT0 0x24 /* Bit1 Error */
+#define USB_8DEV_STATUSMSG_BIT1 0x25 /* Bit0 Error */
+#define USB_8DEV_STATUSMSG_CRC 0x27 /* CRC Error */
+
+#define USB_8DEV_RP_MASK 0x7F /* Mask for Receive Error Bit */
+
+
+/* table of devices that work with this driver */
+static const struct usb_device_id usb_8dev_table[] = {
+ { USB_DEVICE(USB_8DEV_VENDOR_ID, USB_8DEV_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_8dev_table);
+
+struct usb_8dev_tx_urb_context {
+ struct usb_8dev_priv *priv;
+
+ u32 echo_index;
+ u8 dlc;
+};
+
+/* Structure to hold all of our device specific stuff */
+struct usb_8dev_priv {
+ struct can_priv can; /* must be the first member */
+
+ struct sk_buff *echo_skb[MAX_TX_URBS];
+
+ struct usb_device *udev;
+ struct net_device *netdev;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct usb_8dev_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+ struct usb_anchor rx_submitted;
+
+ struct can_berr_counter bec;
+
+ u8 *cmd_msg_buffer;
+
+ struct mutex usb_8dev_cmd_lock;
+
+};
+
+/* tx frame */
+struct __packed usb_8dev_tx_msg {
+ u8 begin;
+ u8 flags; /* RTR and EXT_ID flag */
+ __be32 id; /* upper 3 bits not used */
+ u8 dlc; /* data length code 0-8 bytes */
+ u8 data[8]; /* 64-bit data */
+ u8 end;
+};
+
+/* rx frame */
+struct __packed usb_8dev_rx_msg {
+ u8 begin;
+ u8 type; /* frame type */
+ u8 flags; /* RTR and EXT_ID flag */
+ __be32 id; /* upper 3 bits not used */
+ u8 dlc; /* data length code 0-8 bytes */
+ u8 data[8]; /* 64-bit data */
+ __be32 timestamp; /* 32-bit timestamp */
+ u8 end;
+};
+
+/* command frame */
+struct __packed usb_8dev_cmd_msg {
+ u8 begin;
+ u8 channel; /* unkown - always 0 */
+ u8 command; /* command to execute */
+ u8 opt1; /* optional parameter / return value */
+ u8 opt2; /* optional parameter 2 */
+ u8 data[10]; /* optional parameter and data */
+ u8 end;
+};
+
+static int usb_8dev_send_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size)
+{
+ int actual_length;
+
+ return usb_bulk_msg(priv->udev,
+ usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_TX),
+ msg, size, &actual_length, USB_8DEV_CMD_TIMEOUT);
+}
+
+static int usb_8dev_wait_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size,
+ int *actual_length)
+{
+ return usb_bulk_msg(priv->udev,
+ usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_RX),
+ msg, size, actual_length, USB_8DEV_CMD_TIMEOUT);
+}
+
+/* Send command to device and receive result.
+ * Command was successful when opt1 = 0.
+ */
+static int usb_8dev_send_cmd(struct usb_8dev_priv *priv,
+ struct usb_8dev_cmd_msg *out,
+ struct usb_8dev_cmd_msg *in)
+{
+ int err;
+ int num_bytes_read;
+ struct net_device *netdev;
+
+ netdev = priv->netdev;
+
+ out->begin = USB_8DEV_CMD_START;
+ out->end = USB_8DEV_CMD_END;
+
+ mutex_lock(&priv->usb_8dev_cmd_lock);
+
+ memcpy(priv->cmd_msg_buffer, out,
+ sizeof(struct usb_8dev_cmd_msg));
+
+ err = usb_8dev_send_cmd_msg(priv, priv->cmd_msg_buffer,
+ sizeof(struct usb_8dev_cmd_msg));
+ if (err < 0) {
+ netdev_err(netdev, "sending command message failed\n");
+ goto failed;
+ }
+
+ err = usb_8dev_wait_cmd_msg(priv, priv->cmd_msg_buffer,
+ sizeof(struct usb_8dev_cmd_msg),
+ &num_bytes_read);
+ if (err < 0) {
+ netdev_err(netdev, "no command message answer\n");
+ goto failed;
+ }
+
+ memcpy(in, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg));
+
+ if (in->begin != USB_8DEV_CMD_START || in->end != USB_8DEV_CMD_END ||
+ num_bytes_read != 16 || in->opt1 != 0)
+ err = -EPROTO;
+
+failed:
+ mutex_unlock(&priv->usb_8dev_cmd_lock);
+ return err;
+}
+
+/* Send open command to device */
+static int usb_8dev_cmd_open(struct usb_8dev_priv *priv)
+{
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct usb_8dev_cmd_msg outmsg;
+ struct usb_8dev_cmd_msg inmsg;
+ u32 ctrlmode = priv->can.ctrlmode;
+ u32 flags = USB_8DEV_STATUS_FRAME;
+ __be32 beflags;
+ __be16 bebrp;
+
+ memset(&outmsg, 0, sizeof(outmsg));
+ outmsg.command = USB_8DEV_OPEN;
+ outmsg.opt1 = USB_8DEV_BAUD_MANUAL;
+ outmsg.data[0] = bt->prop_seg + bt->phase_seg1;
+ outmsg.data[1] = bt->phase_seg2;
+ outmsg.data[2] = bt->sjw;
+
+ /* BRP */
+ bebrp = cpu_to_be16((u16)bt->brp);
+ memcpy(&outmsg.data[3], &bebrp, sizeof(bebrp));
+
+ /* flags */
+ if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ flags |= USB_8DEV_LOOPBACK;
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ flags |= USB_8DEV_SILENT;
+ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ flags |= USB_8DEV_DISABLE_AUTO_RESTRANS;
+
+ beflags = cpu_to_be32(flags);
+ memcpy(&outmsg.data[5], &beflags, sizeof(beflags));
+
+ return usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+}
+
+/* Send close command to device */
+static int usb_8dev_cmd_close(struct usb_8dev_priv *priv)
+{
+ struct usb_8dev_cmd_msg inmsg;
+ struct usb_8dev_cmd_msg outmsg = {
+ .channel = 0,
+ .command = USB_8DEV_CLOSE,
+ .opt1 = 0,
+ .opt2 = 0
+ };
+
+ return usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+}
+
+/* Get firmware and hardware version */
+static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res)
+{
+ struct usb_8dev_cmd_msg inmsg;
+ struct usb_8dev_cmd_msg outmsg = {
+ .channel = 0,
+ .command = USB_8DEV_GET_SOFTW_HARDW_VER,
+ .opt1 = 0,
+ .opt2 = 0
+ };
+
+ int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+ if (err)
+ return err;
+
+ *res = be32_to_cpup((__be32 *)inmsg.data);
+
+ return err;
+}
+
+/* Set network device mode
+ *
+ * Maybe we should leave this function empty, because the device
+ * set mode variable with open command.
+ */
+static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = usb_8dev_cmd_open(priv);
+ if (err)
+ netdev_warn(netdev, "couldn't start device");
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/* Read error/status frames */
+static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
+ struct usb_8dev_rx_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ /* Error message:
+ * byte 0: Status
+ * byte 1: bit 7: Receive Passive
+ * byte 1: bit 0-6: Receive Error Counter
+ * byte 2: Transmit Error Counter
+ * byte 3: Always 0 (maybe reserved for future use)
+ */
+
+ u8 state = msg->data[0];
+ u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK;
+ u8 txerr = msg->data[2];
+ int rx_errors = 0;
+ int tx_errors = 0;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb)
+ return;
+
+ switch (state) {
+ case USB_8DEV_STATUSMSG_OK:
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ break;
+ case USB_8DEV_STATUSMSG_BUSOFF:
+ priv->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(priv->netdev);
+ break;
+ case USB_8DEV_STATUSMSG_OVERRUN:
+ case USB_8DEV_STATUSMSG_BUSLIGHT:
+ case USB_8DEV_STATUSMSG_BUSHEAVY:
+ cf->can_id |= CAN_ERR_CRTL;
+ break;
+ default:
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ priv->can.can_stats.bus_error++;
+ break;
+ }
+
+ switch (state) {
+ case USB_8DEV_STATUSMSG_OK:
+ case USB_8DEV_STATUSMSG_BUSOFF:
+ break;
+ case USB_8DEV_STATUSMSG_ACK:
+ cf->can_id |= CAN_ERR_ACK;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_CRC:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_OVERRUN:
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BUSLIGHT:
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ priv->can.can_stats.error_warning++;
+ break;
+ case USB_8DEV_STATUSMSG_BUSHEAVY:
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ break;
+ default:
+ netdev_warn(priv->netdev,
+ "Unknown status/error message (%d)\n", state);
+ break;
+ }
+
+ if (tx_errors) {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+
+ if (rx_errors)
+ stats->rx_errors++;
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ priv->bec.txerr = txerr;
+ priv->bec.rxerr = rxerr;
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/* Read data and status frames */
+static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
+ struct usb_8dev_rx_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ if (msg->type == USB_8DEV_TYPE_ERROR_FRAME &&
+ msg->flags == USB_8DEV_ERR_FLAG) {
+ usb_8dev_rx_err_msg(priv, msg);
+ } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) {
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = be32_to_cpu(msg->id);
+ cf->can_dlc = get_can_dlc(msg->dlc & 0xF);
+
+ if (msg->flags & USB_8DEV_EXTID)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (msg->flags & USB_8DEV_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, msg->data, cf->can_dlc);
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(priv->netdev, CAN_LED_EVENT_RX);
+ } else {
+ netdev_warn(priv->netdev, "frame type %d unknown",
+ msg->type);
+ }
+
+}
+
+/* Callback for reading data from device
+ *
+ * Check urb status, call read function and resubmit urb read operation.
+ */
+static void usb_8dev_read_bulk_callback(struct urb *urb)
+{
+ struct usb_8dev_priv *priv = urb->context;
+ struct net_device *netdev;
+ int retval;
+ int pos = 0;
+
+ netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ netdev_info(netdev, "Rx URB aborted (%d)\n",
+ urb->status);
+ goto resubmit_urb;
+ }
+
+ while (pos < urb->actual_length) {
+ struct usb_8dev_rx_msg *msg;
+
+ if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) {
+ netdev_err(priv->netdev, "format error\n");
+ break;
+ }
+
+ msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos);
+ usb_8dev_rx_can_msg(priv, msg);
+
+ pos += sizeof(struct usb_8dev_rx_msg);
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX),
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ usb_8dev_read_bulk_callback, priv);
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval == -ENODEV)
+ netif_device_detach(netdev);
+ else if (retval)
+ netdev_err(netdev,
+ "failed resubmitting read bulk urb: %d\n", retval);
+}
+
+/* Callback handler for write operations
+ *
+ * Free allocated buffers, check transmit status and
+ * calculate statistic.
+ */
+static void usb_8dev_write_bulk_callback(struct urb *urb)
+{
+ struct usb_8dev_tx_urb_context *context = urb->context;
+ struct usb_8dev_priv *priv;
+ struct net_device *netdev;
+
+ BUG_ON(!context);
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n",
+ urb->status);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += context->dlc;
+
+ can_get_echo_skb(netdev, context->echo_index);
+
+ can_led_event(netdev, CAN_LED_EVENT_TX);
+
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+ netif_wake_queue(netdev);
+}
+
+/* Send data to device */
+static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *) skb->data;
+ struct usb_8dev_tx_msg *msg;
+ struct urb *urb;
+ struct usb_8dev_tx_urb_context *context = NULL;
+ u8 *buf;
+ int i, err;
+ size_t size = sizeof(struct usb_8dev_tx_msg);
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ goto nomem;
+ }
+
+ buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ goto nomembuf;
+ }
+
+ memset(buf, 0, size);
+
+ msg = (struct usb_8dev_tx_msg *)buf;
+ msg->begin = USB_8DEV_DATA_START;
+ msg->flags = 0x00;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ msg->flags |= USB_8DEV_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ msg->flags |= USB_8DEV_EXTID;
+
+ msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK);
+ msg->dlc = cf->can_dlc;
+ memcpy(msg->data, cf->data, cf->can_dlc);
+ msg->end = USB_8DEV_DATA_END;
+
+ for (i = 0; i < MAX_TX_URBS; i++) {
+ if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ context = &priv->tx_contexts[i];
+ break;
+ }
+ }
+
+ /* May never happen! When this happens we'd more URBs in flight as
+ * allowed (MAX_TX_URBS).
+ */
+ if (!context)
+ goto nofreecontext;
+
+ context->priv = priv;
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
+ buf, size, usb_8dev_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&priv->active_tx_urbs);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err))
+ goto failed;
+ else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ /* Slow down tx path */
+ netif_stop_queue(netdev);
+
+ /* Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+nofreecontext:
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ netdev_warn(netdev, "couldn't find free context");
+
+ return NETDEV_TX_BUSY;
+
+failed:
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+
+nomembuf:
+ usb_free_urb(urb);
+
+nomem:
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static int usb_8dev_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+
+ bec->txerr = priv->bec.txerr;
+ bec->rxerr = priv->bec.rxerr;
+
+ return 0;
+}
+
+/* Start USB device */
+static int usb_8dev_start(struct usb_8dev_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int err, i;
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev,
+ USB_8DEV_ENDP_DATA_RX),
+ buf, RX_BUFFER_SIZE,
+ usb_8dev_read_bulk_callback, priv);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ netdev_warn(netdev, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < MAX_RX_URBS)
+ netdev_warn(netdev, "rx performance may be slow\n");
+
+ err = usb_8dev_cmd_open(priv);
+ if (err)
+ goto failed;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+failed:
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+
+ netdev_warn(netdev, "couldn't submit control: %d\n", err);
+
+ return err;
+}
+
+/* Open USB device */
+static int usb_8dev_open(struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ can_led_event(netdev, CAN_LED_EVENT_OPEN);
+
+ /* finally start device */
+ err = usb_8dev_start(priv);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+
+ netdev_warn(netdev, "couldn't start device: %d\n",
+ err);
+
+ close_candev(netdev);
+
+ return err;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static void unlink_all_urbs(struct usb_8dev_priv *priv)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&priv->rx_submitted);
+
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+/* Close USB device */
+static int usb_8dev_close(struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ /* Send CLOSE command to CAN controller */
+ err = usb_8dev_cmd_close(priv);
+ if (err)
+ netdev_warn(netdev, "couldn't stop device");
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ unlink_all_urbs(priv);
+
+ close_candev(netdev);
+
+ can_led_event(netdev, CAN_LED_EVENT_STOP);
+
+ return err;
+}
+
+static const struct net_device_ops usb_8dev_netdev_ops = {
+ .ndo_open = usb_8dev_open,
+ .ndo_stop = usb_8dev_close,
+ .ndo_start_xmit = usb_8dev_start_xmit,
+};
+
+static const struct can_bittiming_const usb_8dev_bittiming_const = {
+ .name = "usb_8dev",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Probe USB device
+ *
+ * Check device and firmware.
+ * Set supported modes and bittiming constants.
+ * Allocate some memory.
+ */
+static int usb_8dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *netdev;
+ struct usb_8dev_priv *priv;
+ int i, err = -ENOMEM;
+ u32 version;
+ char buf[18];
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ /* product id looks strange, better we also check iProduct string */
+ if (usb_string(usbdev, usbdev->descriptor.iProduct, buf,
+ sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) {
+ dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n");
+ return -ENODEV;
+ }
+
+ netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ priv->udev = usbdev;
+ priv->netdev = netdev;
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = USB_8DEV_ABP_CLOCK;
+ priv->can.bittiming_const = &usb_8dev_bittiming_const;
+ priv->can.do_set_mode = usb_8dev_set_mode;
+ priv->can.do_get_berr_counter = usb_8dev_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_ONE_SHOT;
+
+ netdev->netdev_ops = &usb_8dev_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ init_usb_anchor(&priv->rx_submitted);
+
+ init_usb_anchor(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+ priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
+ GFP_KERNEL);
+ if (!priv->cmd_msg_buffer)
+ goto cleanup_candev;
+
+ usb_set_intfdata(intf, priv);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ mutex_init(&priv->usb_8dev_cmd_lock);
+
+ err = register_candev(netdev);
+ if (err) {
+ netdev_err(netdev,
+ "couldn't register CAN device: %d\n", err);
+ goto cleanup_cmd_msg_buffer;
+ }
+
+ err = usb_8dev_cmd_version(priv, &version);
+ if (err) {
+ netdev_err(netdev, "can't get firmware version\n");
+ goto cleanup_cmd_msg_buffer;
+ } else {
+ netdev_info(netdev,
+ "firmware: %d.%d, hardware: %d.%d\n",
+ (version>>24) & 0xff, (version>>16) & 0xff,
+ (version>>8) & 0xff, version & 0xff);
+ }
+
+ devm_can_led_init(netdev);
+
+ return 0;
+
+cleanup_cmd_msg_buffer:
+ kfree(priv->cmd_msg_buffer);
+
+cleanup_candev:
+ free_candev(netdev);
+
+ return err;
+
+}
+
+/* Called by the usb core when driver is unloaded or device is removed */
+static void usb_8dev_disconnect(struct usb_interface *intf)
+{
+ struct usb_8dev_priv *priv = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (priv) {
+ netdev_info(priv->netdev, "device disconnected\n");
+
+ unregister_netdev(priv->netdev);
+ free_candev(priv->netdev);
+
+ unlink_all_urbs(priv);
+ }
+
+}
+
+static struct usb_driver usb_8dev_driver = {
+ .name = "usb_8dev",
+ .probe = usb_8dev_probe,
+ .disconnect = usb_8dev_disconnect,
+ .id_table = usb_8dev_table,
+};
+
+module_usb_driver(usb_8dev_driver);
+
+MODULE_AUTHOR("Bernd Krumboeck <krumboeck@universalnet.at>");
+MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 021d69c5d9bc..29e272cc7a98 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1448,10 +1448,10 @@ static int e100_set_settings(struct net_device *dev,
static void e100_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1);
- strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1);
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
- strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
+ strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver));
+ strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int e100_nway_reset(struct net_device *dev)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 325391d19bad..7a54ec04b418 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -66,36 +68,30 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 6; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0x8000) == 0x0000)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -103,15 +99,13 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
static int mv88e6060_setup_global(struct dsa_switch *ds)
{
- /*
- * Disable discarding of frames with excessive collisions,
+ /* Disable discarding of frames with excessive collisions,
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
- /*
- * Enable automatic address learning, set the address
+ /* Enable automatic address learning, set the address
* database size to 1024 entries, and set the default aging
* time to 5 minutes.
*/
@@ -124,16 +118,14 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
{
int addr = REG_PORT(p);
- /*
- * Do not force flow control, disable Ingress and Egress
+ /* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
@@ -144,8 +136,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
ds->phys_port_mask :
(1 << ds->dst->cpu_port)));
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
@@ -245,7 +236,7 @@ static void mv88e6060_poll_link(struct dsa_switch *ds)
if (!link) {
if (netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
netif_carrier_off(dev);
}
continue;
@@ -256,10 +247,11 @@ static void mv88e6060_poll_link(struct dsa_switch *ds)
fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
if (!netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half",
- fc ? "en" : "dis");
+ netdev_info(dev,
+ "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed,
+ duplex ? "full" : "half",
+ fc ? "en" : "dis");
netif_carrier_on(dev);
}
}
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index c17c75b9f531..41ee5b6ae917 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -50,36 +52,30 @@ static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 8; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0xc800) == 0xc800)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -90,54 +86,45 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
int ret;
int i;
- /*
- * Disable the PHY polling unit (since there won't be any
+ /* Disable the PHY polling unit (since there won't be any
* external PHYs to poll), don't discard packets with
* excessive collisions, and mask all interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
- /*
- * Set the default address aging time to 5 minutes, and
+ /* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
- /*
- * Configure the priority mapping registers.
- */
+ /* Configure the priority mapping registers. */
ret = mv88e6xxx_config_prio(ds);
if (ret < 0)
return ret;
- /*
- * Configure the upstream port, and configure the upstream
+ /* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
- /*
- * Disable remote management for now, and set the switch's
+ /* Disable remote management for now, and set the switch's
* DSA device number.
*/
REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
- /*
- * Disable the loopback filter, disable flow control
+ /* Disable the loopback filter, disable flow control
* messages, disable flood broadcast override, disable
* removing of provider tags, disable ATU age violation
* interrupts, disable tag flow control, force flow
@@ -146,9 +133,7 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
*/
REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
- /*
- * Program the DSA routing table.
- */
+ /* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop;
@@ -159,33 +144,24 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
}
- /*
- * Clear all trunk masks.
- */
+ /* Clear all trunk masks. */
for (i = 0; i < 8; i++)
REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
- /*
- * Clear all trunk mappings.
- */
+ /* Clear all trunk mappings. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
- /*
- * Disable ingress rate limiting by resetting all ingress
+ /* Disable ingress rate limiting by resetting all ingress
* rate limit registers to their initial state.
*/
for (i = 0; i < 6; i++)
REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
- /*
- * Initialise cross-chip port VLAN table to reset defaults.
- */
+ /* Initialise cross-chip port VLAN table to reset defaults. */
REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
- /*
- * Clear the priority override table.
- */
+ /* Clear the priority override table. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
@@ -199,8 +175,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
int addr = REG_PORT(p);
u16 val;
- /*
- * MAC Forcing register: don't force link, speed, duplex
+ /* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
* full duplex.
@@ -210,15 +185,13 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
else
REG_WRITE(addr, 0x01, 0x0003);
- /*
- * Do not limit the period of time that this port can be
+ /* Do not limit the period of time that this port can be
* paused for by the remote end or the period of time that
* this port can pause the remote end.
*/
REG_WRITE(addr, 0x02, 0x0000);
- /*
- * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
* disable Header mode, enable IGMP/MLD snooping, disable VLAN
* tunneling, determine priority by looking at 802.1p and IP
* priority fields (IP prio has precedence), and set STP state
@@ -245,14 +218,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
val |= 0x000c;
REG_WRITE(addr, 0x04, val);
- /*
- * Port Control 1: disable trunking. Also, if this is the
+ /* Port Control 1: disable trunking. Also, if this is the
* CPU port, enable learn messages to be sent to this port.
*/
REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
@@ -264,14 +235,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
val |= 1 << dsa_upstream_port(ds);
REG_WRITE(addr, 0x06, val);
- /*
- * Default VLAN ID and priority: don't set a default VLAN
+ /* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
REG_WRITE(addr, 0x07, 0x0000);
- /*
- * Port Control 2: don't force a good FCS, set the maximum
+ /* Port Control 2: don't force a good FCS, set the maximum
* frame size to 10240 bytes, don't let the switch add or
* strip 802.1q tags, don't discard tagged or untagged frames
* on this port, do a destination address lookup on all
@@ -281,48 +250,36 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x08, 0x2080);
- /*
- * Egress rate control: disable egress rate control.
- */
+ /* Egress rate control: disable egress rate control. */
REG_WRITE(addr, 0x09, 0x0001);
- /*
- * Egress rate control 2: disable egress rate control.
- */
+ /* Egress rate control 2: disable egress rate control. */
REG_WRITE(addr, 0x0a, 0x0000);
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
REG_WRITE(addr, 0x0b, 1 << p);
- /*
- * Port ATU control: disable limiting the number of address
+ /* Port ATU control: disable limiting the number of address
* database entries that this port is allowed to use.
*/
REG_WRITE(addr, 0x0c, 0x0000);
- /*
- * Priorit Override: disable DA, SA and VTU priority override.
- */
+ /* Priority Override: disable DA, SA and VTU priority override. */
REG_WRITE(addr, 0x0d, 0x0000);
- /*
- * Port Ethertype: use the Ethertype DSA Ethertype value.
- */
+ /* Port Ethertype: use the Ethertype DSA Ethertype value. */
REG_WRITE(addr, 0x0f, ETH_P_EDSA);
- /*
- * Tag Remap: use an identity 802.1p prio -> switch prio
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x18, 0x3210);
- /*
- * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x19, 0x7654);
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 55888b06d8b4..dadfafba64e9 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -15,9 +17,7 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-/*
- * Switch product IDs
- */
+/* Switch product IDs */
#define ID_6085 0x04a0
#define ID_6095 0x0950
#define ID_6131 0x1060
@@ -44,36 +44,30 @@ static int mv88e6131_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 11; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0xc800) == 0xc800)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -84,42 +78,34 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
int ret;
int i;
- /*
- * Enable the PHY polling unit, don't discard packets with
+ /* Enable the PHY polling unit, don't discard packets with
* excessive collisions, use a weighted fair queueing scheme
* to arbitrate between packet queues, set the maximum frame
* size to 1632, and mask all interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
- /*
- * Set the default address aging time to 5 minutes, and
+ /* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
- /*
- * Configure the priority mapping registers.
- */
+ /* Configure the priority mapping registers. */
ret = mv88e6xxx_config_prio(ds);
if (ret < 0)
return ret;
- /*
- * Set the VLAN ethertype to 0x8100.
- */
+ /* Set the VLAN ethertype to 0x8100. */
REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
- /*
- * Disable ARP mirroring, and configure the upstream port as
+ /* Disable ARP mirroring, and configure the upstream port as
* the port to which ingress and egress monitor frames are to
* be sent.
*/
REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
- /*
- * Disable cascade port functionality unless this device
+ /* Disable cascade port functionality unless this device
* is used in a cascade configuration, and set the switch's
* DSA device number.
*/
@@ -128,23 +114,19 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
else
REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
- /*
- * Ignore removed tag data on doubly tagged packets, disable
+ /* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
- /*
- * Program the DSA routing table.
- */
+ /* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop;
@@ -155,20 +137,15 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
}
- /*
- * Clear all trunk masks.
- */
+ /* Clear all trunk masks. */
for (i = 0; i < 8; i++)
REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
- /*
- * Clear all trunk mappings.
- */
+ /* Clear all trunk mappings. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
- /*
- * Force the priority of IGMP/MLD snoop frames and ARP frames
+ /* Force the priority of IGMP/MLD snoop frames and ARP frames
* to the highest setting.
*/
REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
@@ -182,8 +159,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
int addr = REG_PORT(p);
u16 val;
- /*
- * MAC Forcing register: don't force link, speed, duplex
+ /* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
* (100 Mb/s on 6085) full duplex.
@@ -196,8 +172,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
else
REG_WRITE(addr, 0x01, 0x0003);
- /*
- * Port Control: disable Core Tag, disable Drop-on-Lock,
+ /* Port Control: disable Core Tag, disable Drop-on-Lock,
* transmit frames unmodified, disable Header mode,
* enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
* tunneling, determine priority by looking at 802.1p and
@@ -214,8 +189,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val = 0x0433;
if (p == dsa_upstream_port(ds)) {
val |= 0x0104;
- /*
- * On 6085, unknown multicast forward is controlled
+ /* On 6085, unknown multicast forward is controlled
* here rather than in Port Control 2 register.
*/
if (ps->id == ID_6085)
@@ -225,14 +199,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val |= 0x0100;
REG_WRITE(addr, 0x04, val);
- /*
- * Port Control 1: disable trunking. Also, if this is the
+ /* Port Control 1: disable trunking. Also, if this is the
* CPU port, enable learn messages to be sent to this port.
*/
REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
@@ -244,14 +216,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val |= 1 << dsa_upstream_port(ds);
REG_WRITE(addr, 0x06, val);
- /*
- * Default VLAN ID and priority: don't set a default VLAN
+ /* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
REG_WRITE(addr, 0x07, 0x0000);
- /*
- * Port Control 2: don't force a good FCS, don't use
+ /* Port Control 2: don't force a good FCS, don't use
* VLAN-based, source address-based or destination
* address-based priority overrides, don't let the switch
* add or strip 802.1q tags, don't discard tagged or
@@ -264,8 +234,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* forwarding of unknown multicast addresses.
*/
if (ps->id == ID_6085)
- /*
- * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ /* on 6085, bits 3:0 are reserved, bit 6 control ARP
* mirroring, and multicast forward is handled in
* Port Control register.
*/
@@ -277,32 +246,25 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
REG_WRITE(addr, 0x08, val);
}
- /*
- * Rate Control: disable ingress rate limiting.
- */
+ /* Rate Control: disable ingress rate limiting. */
REG_WRITE(addr, 0x09, 0x0000);
- /*
- * Rate Control 2: disable egress rate limiting.
- */
+ /* Rate Control 2: disable egress rate limiting. */
REG_WRITE(addr, 0x0a, 0x0000);
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
REG_WRITE(addr, 0x0b, 1 << p);
- /*
- * Tag Remap: use an identity 802.1p prio -> switch prio
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x18, 0x3210);
- /*
- * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x19, 0x7654);
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index a2c62c2f30ee..17314ed9456d 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -15,8 +17,7 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-/*
- * If the switch's ADDR[4:0] strap pins are strapped to zero, it will
+/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
* use all 32 SMI bus addresses on its SMI bus, and all switch registers
* will be directly accessible on some {device address,register address}
* pair. If the ADDR[4:0] pins are not strapped to zero, the switch
@@ -48,30 +49,22 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
if (sw_addr == 0)
return mdiobus_read(bus, addr, reg);
- /*
- * Wait for the bus to become free.
- */
+ /* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Transmit the read command.
- */
+ /* Transmit the read command. */
ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
if (ret < 0)
return ret;
- /*
- * Wait for the read command to complete.
- */
+ /* Wait for the read command to complete. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Read the data.
- */
+ /* Read the data. */
ret = mdiobus_read(bus, sw_addr, 1);
if (ret < 0)
return ret;
@@ -100,30 +93,22 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
if (sw_addr == 0)
return mdiobus_write(bus, addr, reg, val);
- /*
- * Wait for the bus to become free.
- */
+ /* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Transmit the data to write.
- */
+ /* Transmit the data to write. */
ret = mdiobus_write(bus, sw_addr, 1, val);
if (ret < 0)
return ret;
- /*
- * Transmit the write command.
- */
+ /* Transmit the write command. */
ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
if (ret < 0)
return ret;
- /*
- * Wait for the write command to complete.
- */
+ /* Wait for the write command to complete. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
@@ -146,9 +131,7 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
int mv88e6xxx_config_prio(struct dsa_switch *ds)
{
- /*
- * Configure the IP ToS mapping registers.
- */
+ /* Configure the IP ToS mapping registers. */
REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
@@ -158,9 +141,7 @@ int mv88e6xxx_config_prio(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
- /*
- * Configure the IEEE 802.1p priority mapping register.
- */
+ /* Configure the IEEE 802.1p priority mapping register. */
REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
return 0;
@@ -183,14 +164,10 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
for (i = 0; i < 6; i++) {
int j;
- /*
- * Write the MAC address byte.
- */
+ /* Write the MAC address byte. */
REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
- /*
- * Wait for the write to complete.
- */
+ /* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
ret = REG_READ(REG_GLOBAL2, 0x0d);
if ((ret & 0x8000) == 0)
@@ -221,16 +198,17 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
{
int ret;
- int i;
+ unsigned long timeout;
ret = REG_READ(REG_GLOBAL, 0x04);
REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
- for (i = 0; i < 1000; i++) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- msleep(1);
- if ((ret & 0xc000) != 0xc000)
- return 0;
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ usleep_range(1000, 2000);
+ if ((ret & 0xc000) != 0xc000)
+ return 0;
}
return -ETIMEDOUT;
@@ -239,16 +217,17 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
{
int ret;
- int i;
+ unsigned long timeout;
ret = REG_READ(REG_GLOBAL, 0x04);
REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
- for (i = 0; i < 1000; i++) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- msleep(1);
- if ((ret & 0xc000) == 0xc000)
- return 0;
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ usleep_range(1000, 2000);
+ if ((ret & 0xc000) == 0xc000)
+ return 0;
}
return -ETIMEDOUT;
@@ -260,11 +239,11 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
if (mutex_trylock(&ps->ppu_mutex)) {
- struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
+ struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
- if (mv88e6xxx_ppu_enable(ds) == 0)
- ps->ppu_disabled = 0;
- mutex_unlock(&ps->ppu_mutex);
+ if (mv88e6xxx_ppu_enable(ds) == 0)
+ ps->ppu_disabled = 0;
+ mutex_unlock(&ps->ppu_mutex);
}
}
@@ -282,22 +261,21 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
mutex_lock(&ps->ppu_mutex);
- /*
- * If the PHY polling unit is enabled, disable it so that
+ /* If the PHY polling unit is enabled, disable it so that
* we can access the PHY registers. If it was already
* disabled, cancel the timer that is going to re-enable
* it.
*/
if (!ps->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(ds);
- if (ret < 0) {
- mutex_unlock(&ps->ppu_mutex);
- return ret;
- }
- ps->ppu_disabled = 1;
+ ret = mv88e6xxx_ppu_disable(ds);
+ if (ret < 0) {
+ mutex_unlock(&ps->ppu_mutex);
+ return ret;
+ }
+ ps->ppu_disabled = 1;
} else {
- del_timer(&ps->ppu_timer);
- ret = 0;
+ del_timer(&ps->ppu_timer);
+ ret = 0;
}
return ret;
@@ -307,9 +285,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
- /*
- * Schedule a timer to re-enable the PHY polling unit.
- */
+ /* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
mutex_unlock(&ps->ppu_mutex);
}
@@ -331,8 +307,8 @@ int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
ret = mv88e6xxx_ppu_access_get(ds);
if (ret >= 0) {
- ret = mv88e6xxx_reg_read(ds, addr, regnum);
- mv88e6xxx_ppu_access_put(ds);
+ ret = mv88e6xxx_reg_read(ds, addr, regnum);
+ mv88e6xxx_ppu_access_put(ds);
}
return ret;
@@ -345,8 +321,8 @@ int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
ret = mv88e6xxx_ppu_access_get(ds);
if (ret >= 0) {
- ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
- mv88e6xxx_ppu_access_put(ds);
+ ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ds);
}
return ret;
@@ -380,7 +356,7 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
if (!link) {
if (netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
netif_carrier_off(dev);
}
continue;
@@ -404,10 +380,11 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
fc = (port_status & 0x8000) ? 1 : 0;
if (!netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half",
- fc ? "en" : "dis");
+ netdev_info(dev,
+ "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed,
+ duplex ? "full" : "half",
+ fc ? "en" : "dis");
netif_carrier_on(dev);
}
}
@@ -431,14 +408,10 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
- /*
- * Snapshot the hardware statistics counters for this port.
- */
+ /* Snapshot the hardware statistics counters for this port. */
REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
- /*
- * Wait for the snapshotting to complete.
- */
+ /* Wait for the snapshotting to complete. */
ret = mv88e6xxx_stats_wait(ds);
if (ret < 0)
return ret;
@@ -502,9 +475,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
return;
}
- /*
- * Read each of the counters.
- */
+ /* Read each of the counters. */
for (i = 0; i < nr_stats; i++) {
struct mv88e6xxx_hw_stat *s = stats + i;
u32 low;
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index fc2cd7b90e8d..911ede58dd12 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -16,16 +16,14 @@
#define REG_GLOBAL2 0x1c
struct mv88e6xxx_priv_state {
- /*
- * When using multi-chip addressing, this mutex protects
+ /* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
* mode, this mutex is effectively useless.)
*/
struct mutex smi_mutex;
#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
- /*
- * Handles automatic disabling and re-enabling of the PHY
+ /* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
struct mutex ppu_mutex;
@@ -34,8 +32,7 @@ struct mv88e6xxx_priv_state {
struct timer_list ppu_timer;
#endif
- /*
- * This mutex serialises access to the statistics unit.
+ /* This mutex serialises access to the statistics unit.
* Hold this mutex over snapshot + dump sequences.
*/
struct mutex stats_mutex;
@@ -52,7 +49,7 @@ struct mv88e6xxx_hw_stat {
int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
- int reg, u16 val);
+ int reg, u16 val);
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
int mv88e6xxx_config_prio(struct dsa_switch *ds);
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index c260af5411d0..42aa54af6842 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -100,6 +100,15 @@ static void dummy_dev_uninit(struct net_device *dev)
free_percpu(dev->dstats);
}
+static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ if (new_carrier)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -108,6 +117,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
+ .ndo_change_carrier = dummy_change_carrier,
};
static void dummy_setup(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
deleted file mode 100644
index 2038eaabaea4..000000000000
--- a/drivers/net/ethernet/3com/3c501.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
-/*
- Written 1992,1993,1994 Donald Becker
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a device driver for the 3Com Etherlink 3c501.
- Do not purchase this card, even as a joke. It's performance is horrible,
- and it breaks in many ways.
-
- The original author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Fixed (again!) the missing interrupt locking on TX/RX shifting.
- Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Removed calls to init_etherdev since they are no longer needed, and
- cleaned up modularization just a bit. The driver still allows only
- the default address for cards when loaded as a module, but that's
- really less braindead than anyone using a 3c501 board. :)
- 19950208 (invid@msen.com)
-
- Added traps for interrupts hitting the window as we clear and TX load
- the board. Now getting 150K/second FTP with a 3c501 card. Still playing
- with a TX-TX optimisation to see if we can touch 180-200K/second as seems
- theoretically maximum.
- 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Cleaned up for 2.3.x because we broke SMP now.
- 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Check up pass for 2.5. Nothing significant changed
- 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Fixed zero fill corner case
- 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-
- For the avoidance of doubt the "preferred form" of this code is one which
- is in an open non patent encumbered format. Where cryptographic key signing
- forms part of the process of creating an executable the information
- including keys needed to generate an equivalently functional executable
- are deemed to be part of the source code.
-
-*/
-
-
-/**
- * DOC: 3c501 Card Notes
- *
- * Some notes on this thing if you have to hack it. [Alan]
- *
- * Some documentation is available from 3Com. Due to the boards age
- * standard responses when you ask for this will range from 'be serious'
- * to 'give it to a museum'. The documentation is incomplete and mostly
- * of historical interest anyway.
- *
- * The basic system is a single buffer which can be used to receive or
- * transmit a packet. A third command mode exists when you are setting
- * things up.
- *
- * If it's transmitting it's not receiving and vice versa. In fact the
- * time to get the board back into useful state after an operation is
- * quite large.
- *
- * The driver works by keeping the board in receive mode waiting for a
- * packet to arrive. When one arrives it is copied out of the buffer
- * and delivered to the kernel. The card is reloaded and off we go.
- *
- * When transmitting lp->txing is set and the card is reset (from
- * receive mode) [possibly losing a packet just received] to command
- * mode. A packet is loaded and transmit mode triggered. The interrupt
- * handler runs different code for transmit interrupts and can handle
- * returning to receive mode or retransmissions (yes you have to help
- * out with those too).
- *
- * DOC: Problems
- *
- * There are a wide variety of undocumented error returns from the card
- * and you basically have to kick the board and pray if they turn up. Most
- * only occur under extreme load or if you do something the board doesn't
- * like (eg touching a register at the wrong time).
- *
- * The driver is less efficient than it could be. It switches through
- * receive mode even if more transmits are queued. If this worries you buy
- * a real Ethernet card.
- *
- * The combination of slow receive restart and no real multicast
- * filter makes the board unusable with a kernel compiled for IP
- * multicasting in a real multicast environment. That's down to the board,
- * but even with no multicast programs running a multicast IP kernel is
- * in group 224.0.0.1 and you will therefore be listening to all multicasts.
- * One nv conference running over that Ethernet and you can give up.
- *
- */
-
-#define DRV_NAME "3c501"
-#define DRV_VERSION "2002/10/09"
-
-
-static const char version[] =
- DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n";
-
-/*
- * Braindamage remaining:
- * The 3c501 board.
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/fcntl.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c501.h"
-
-/*
- * The boilerplate probe code.
- */
-
-static int io = 0x280;
-static int irq = 5;
-static int mem_start;
-
-/**
- * el1_probe - probe for a 3c501
- * @dev: The device structure passed in to probe.
- *
- * This can be called from two places. The network layer will probe using
- * a device structure passed in with the probe information completed. For a
- * modular driver we use #init_module to fill in our own structure and probe
- * for it.
- *
- * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
- * probe and failing to find anything.
- */
-
-struct net_device * __init el1_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x280, 0x300, 0};
- const unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 7;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = el1_probe1(dev, io);
- } else if (io != 0) {
- err = -ENXIO; /* Don't probe at all. */
- } else {
- for (port = ports; *port && el1_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, EL1_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops el_netdev_ops = {
- .ndo_open = el_open,
- .ndo_stop = el1_close,
- .ndo_start_xmit = el_start_xmit,
- .ndo_tx_timeout = el_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/**
- * el1_probe1:
- * @dev: The device structure to use
- * @ioaddr: An I/O address to probe at.
- *
- * The actual probe. This is iterated over by #el1_probe in order to
- * check all the applicable device locations.
- *
- * Returns 0 for a success, in which case the device is activated,
- * EAGAIN if the IRQ is in use by another driver, and ENODEV if the
- * board cannot be found.
- */
-
-static int __init el1_probe1(struct net_device *dev, int ioaddr)
-{
- struct net_local *lp;
- const char *mname; /* Vendor name */
- unsigned char station_addr[6];
- int autoirq = 0;
- int i;
-
- /*
- * Reserve I/O resource for exclusive use by this driver
- */
-
- if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- /*
- * Read the station address PROM data from the special port.
- */
-
- for (i = 0; i < 6; i++) {
- outw(i, ioaddr + EL1_DATAPTR);
- station_addr[i] = inb(ioaddr + EL1_SAPROM);
- }
- /*
- * Check the first three octets of the S.A. for 3Com's prefix, or
- * for the Sager NP943 prefix.
- */
-
- if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
- station_addr[2] == 0x8c)
- mname = "3c501";
- else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
- station_addr[2] == 0xC8)
- mname = "NP943";
- else {
- release_region(ioaddr, EL1_IO_EXTENT);
- return -ENODEV;
- }
-
- /*
- * We auto-IRQ by shutting off the interrupt line and letting it
- * float high.
- */
-
- dev->irq = irq;
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- inb(RX_STATUS); /* Clear pending interrupts. */
- inb(TX_STATUS);
- outb(AX_LOOP + 1, AX_CMD);
-
- outb(0x00, AX_CMD);
-
- mdelay(20);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- pr_warning("%s probe at %#x failed to detect IRQ line.\n",
- mname, ioaddr);
- release_region(ioaddr, EL1_IO_EXTENT);
- return -EAGAIN;
- }
- }
-
- outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
- dev->base_addr = ioaddr;
- memcpy(dev->dev_addr, station_addr, ETH_ALEN);
-
- if (mem_start & 0xf)
- el_debug = mem_start & 0x7;
- if (autoirq)
- dev->irq = autoirq;
-
- pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
- dev->name, mname, dev->base_addr,
- autoirq ? "auto":"assigned ", dev->irq);
-
-#ifdef CONFIG_IP_MULTICAST
- pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
-#endif
-
- if (el_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- /*
- * The EL1-specific entries in the device structure.
- */
-
- dev->netdev_ops = &el_netdev_ops;
- dev->watchdog_timeo = HZ;
- dev->ethtool_ops = &netdev_ethtool_ops;
- return 0;
-}
-
-/**
- * el1_open:
- * @dev: device that is being opened
- *
- * When an ifconfig is issued which changes the device flags to include
- * IFF_UP this function is called. It is only called when the change
- * occurs, not when the interface remains up. #el1_close will be called
- * when it goes down.
- *
- * Returns 0 for a successful open, or -EAGAIN if someone has run off
- * with our interrupt line.
- */
-
-static int el_open(struct net_device *dev)
-{
- int retval;
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- if (el_debug > 2)
- pr_debug("%s: Doing el_open()...\n", dev->name);
-
- retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
- if (retval)
- return retval;
-
- spin_lock_irqsave(&lp->lock, flags);
- el_reset(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- lp->txing = 0; /* Board in RX mode */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- netif_start_queue(dev);
- return 0;
-}
-
-/**
- * el_timeout:
- * @dev: The 3c501 card that has timed out
- *
- * Attempt to restart the board. This is basically a mixture of extreme
- * violence and prayer
- *
- */
-
-static void el_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug)
- pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
- dev->name, inb(TX_STATUS),
- inb(AX_STATUS), inb(RX_STATUS));
- dev->stats.tx_errors++;
- outb(TX_NORM, TX_CMD);
- outb(RX_NORM, RX_CMD);
- outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- lp->txing = 0; /* Ripped back in to RX */
- netif_wake_queue(dev);
-}
-
-
-/**
- * el_start_xmit:
- * @skb: The packet that is queued to be sent
- * @dev: The 3c501 card we want to throw it down
- *
- * Attempt to send a packet to a 3c501 card. There are some interesting
- * catches here because the 3c501 is an extremely old and therefore
- * stupid piece of technology.
- *
- * If we are handling an interrupt on the other CPU we cannot load a packet
- * as we may still be attempting to retrieve the last RX packet buffer.
- *
- * When a transmit times out we dump the card into control mode and just
- * start again. It happens enough that it isn't worth logging.
- *
- * We avoid holding the spin locks when doing the packet load to the board.
- * The device is very slow, and its DMA mode is even slower. If we held the
- * lock while loading 1500 bytes onto the controller we would drop a lot of
- * serial port characters. This requires we do extra locking, but we have
- * no real choice.
- */
-
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- /*
- * Avoid incoming interrupts between us flipping txing and flipping
- * mode as the driver assumes txing is a faithful indicator of card
- * state
- */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /*
- * Avoid timer-based retransmission conflicts.
- */
-
- netif_stop_queue(dev);
-
- do {
- int len = skb->len;
- int pad = 0;
- int gp_start;
- unsigned char *buf = skb->data;
-
- if (len < ETH_ZLEN)
- pad = ETH_ZLEN - len;
-
- gp_start = 0x800 - (len + pad);
-
- lp->tx_pkt_start = gp_start;
- lp->collisions = 0;
-
- dev->stats.tx_bytes += skb->len;
-
- /*
- * Command mode with status cleared should [in theory]
- * mean no more interrupts can be pending on the card.
- */
-
- outb_p(AX_SYS, AX_CMD);
- inb_p(RX_STATUS);
- inb_p(TX_STATUS);
-
- lp->loading = 1;
- lp->txing = 1;
-
- /*
- * Turn interrupts back on while we spend a pleasant
- * afternoon loading bytes into the board
- */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Set rx packet area to 0. */
- outw(0x00, RX_BUF_CLR);
- /* aim - packet will be loaded into buffer start */
- outw(gp_start, GP_LOW);
- /* load buffer (usual thing each byte increments the pointer) */
- outsb(DATAPORT, buf, len);
- if (pad) {
- while (pad--) /* Zero fill buffer tail */
- outb(0, DATAPORT);
- }
- /* the board reuses the same register */
- outw(gp_start, GP_LOW);
-
- if (lp->loading != 2) {
- /* fire ... Trigger xmit. */
- outb(AX_XMIT, AX_CMD);
- lp->loading = 0;
- if (el_debug > 2)
- pr_debug(" queued xmit.\n");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- /* A receive upset our load, despite our best efforts */
- if (el_debug > 2)
- pr_debug("%s: burped during tx load.\n", dev->name);
- spin_lock_irqsave(&lp->lock, flags);
- } while (1);
-}
-
-/**
- * el_interrupt:
- * @irq: Interrupt number
- * @dev_id: The 3c501 that burped
- *
- * Handle the ether interface interrupts. The 3c501 needs a lot more
- * hand holding than most cards. In particular we get a transmit interrupt
- * with a collision error because the board firmware isn't capable of rewinding
- * its own transmit buffer pointers. It can however count to 16 for us.
- *
- * On the receive side the card is also very dumb. It has no buffering to
- * speak of. We simply pull the packet out of its PIO buffer (which is slow)
- * and queue it for the kernel. Then we reset the card for the next packet.
- *
- * We sometimes get surprise interrupts late both because the SMP IRQ delivery
- * is message passing and because the card sometimes seems to deliver late. I
- * think if it is part way through a receive and the mode is changed it carries
- * on receiving and sends us an interrupt. We have to band aid all these cases
- * to get a sensible 150kBytes/second performance. Even then you want a small
- * TCP window.
- */
-
-static irqreturn_t el_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr;
- int axsr; /* Aux. status reg. */
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- /*
- * What happened ?
- */
-
- axsr = inb(AX_STATUS);
-
- /*
- * Log it
- */
-
- if (el_debug > 3)
- pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
-
- if (lp->loading == 1 && !lp->txing)
- pr_warning("%s: Inconsistent state loading while not in tx\n",
- dev->name);
-
- if (lp->txing) {
- /*
- * Board in transmit mode. May be loading. If we are
- * loading we shouldn't have got this.
- */
- int txsr = inb(TX_STATUS);
-
- if (lp->loading == 1) {
- if (el_debug > 2)
- pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
- dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
-
- /* Force a reload */
- lp->loading = 2;
- spin_unlock(&lp->lock);
- goto out;
- }
- if (el_debug > 6)
- pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
- txsr, inw(GP_LOW), inw(RX_LOW));
-
- if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
- /*
- * FIXME: is there a logic to whether to keep
- * on trying or reset immediately ?
- */
- if (el_debug > 1)
- pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
- dev->name, txsr, axsr,
- inw(ioaddr + EL1_DATAPTR),
- inw(ioaddr + EL1_RXPTR));
- lp->txing = 0;
- netif_wake_queue(dev);
- } else if (txsr & TX_16COLLISIONS) {
- /*
- * Timed out
- */
- if (el_debug)
- pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
- outb(AX_SYS, AX_CMD);
- lp->txing = 0;
- dev->stats.tx_aborted_errors++;
- netif_wake_queue(dev);
- } else if (txsr & TX_COLLISION) {
- /*
- * Retrigger xmit.
- */
-
- if (el_debug > 6)
- pr_debug("%s: retransmitting after a collision.\n", dev->name);
- /*
- * Poor little chip can't reset its own start
- * pointer
- */
-
- outb(AX_SYS, AX_CMD);
- outw(lp->tx_pkt_start, GP_LOW);
- outb(AX_XMIT, AX_CMD);
- dev->stats.collisions++;
- spin_unlock(&lp->lock);
- goto out;
- } else {
- /*
- * It worked.. we will now fall through and receive
- */
- dev->stats.tx_packets++;
- if (el_debug > 6)
- pr_debug("%s: Tx succeeded %s\n", dev->name,
- (txsr & TX_RDY) ? "." : "but tx is busy!");
- /*
- * This is safe the interrupt is atomic WRT itself.
- */
- lp->txing = 0;
- /* In case more to transmit */
- netif_wake_queue(dev);
- }
- } else {
- /*
- * In receive mode.
- */
-
- int rxsr = inb(RX_STATUS);
- if (el_debug > 5)
- pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
- dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
- /*
- * Just reading rx_status fixes most errors.
- */
- if (rxsr & RX_MISSED)
- dev->stats.rx_missed_errors++;
- else if (rxsr & RX_RUNT) {
- /* Handled to avoid board lock-up. */
- dev->stats.rx_length_errors++;
- if (el_debug > 5)
- pr_debug("%s: runt.\n", dev->name);
- } else if (rxsr & RX_GOOD) {
- /*
- * Receive worked.
- */
- el_receive(dev);
- } else {
- /*
- * Nothing? Something is broken!
- */
- if (el_debug > 2)
- pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
- dev->name, rxsr);
- el_reset(dev);
- }
- }
-
- /*
- * Move into receive mode
- */
-
- outb(AX_RX, AX_CMD);
- outw(0x00, RX_BUF_CLR);
- inb(RX_STATUS); /* Be certain that interrupts are cleared. */
- inb(TX_STATUS);
- spin_unlock(&lp->lock);
-out:
- return IRQ_HANDLED;
-}
-
-
-/**
- * el_receive:
- * @dev: Device to pull the packets from
- *
- * We have a good packet. Well, not really "good", just mostly not broken.
- * We must check everything to see if it is good. In particular we occasionally
- * get wild packet sizes from the card. If the packet seems sane we PIO it
- * off the card and queue it for the protocol layers.
- */
-
-static void el_receive(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int pkt_len;
- struct sk_buff *skb;
-
- pkt_len = inw(RX_LOW);
-
- if (el_debug > 4)
- pr_debug(" el_receive %d.\n", pkt_len);
-
- if (pkt_len < 60 || pkt_len > 1536) {
- if (el_debug)
- pr_debug("%s: bogus packet, length=%d\n",
- dev->name, pkt_len);
- dev->stats.rx_over_errors++;
- return;
- }
-
- /*
- * Command mode so we can empty the buffer
- */
-
- outb(AX_SYS, AX_CMD);
- skb = netdev_alloc_skb(dev, pkt_len + 2);
-
- /*
- * Start of frame
- */
-
- outw(0x00, GP_LOW);
- if (skb == NULL) {
- pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- } else {
- skb_reserve(skb, 2); /* Force 16 byte alignment */
- /*
- * The read increments through the bytes. The interrupt
- * handler will fix the pointer when it returns to
- * receive mode.
- */
- insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-}
-
-/**
- * el_reset: Reset a 3c501 card
- * @dev: The 3c501 card about to get zapped
- *
- * Even resetting a 3c501 isn't simple. When you activate reset it loses all
- * its configuration. You must hold the lock when doing this. The function
- * cannot take the lock itself as it is callable from the irq handler.
- */
-
-static void el_reset(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("3c501 reset...\n");
- outb(AX_RESET, AX_CMD); /* Reset the chip */
- /* Aux control, irq and loopback enabled */
- outb(AX_LOOP, AX_CMD);
- {
- int i;
- for (i = 0; i < 6; i++) /* Set the station address. */
- outb(dev->dev_addr[i], ioaddr + i);
- }
-
- outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
- outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
- outb(RX_NORM, RX_CMD); /* Set Rx commands. */
- inb(RX_STATUS); /* Clear status. */
- inb(TX_STATUS);
- lp->txing = 0;
-}
-
-/**
- * el1_close:
- * @dev: 3c501 card to shut down
- *
- * Close a 3c501 card. The IFF_UP flag has been cleared by the user via
- * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
- * and then disable the interrupts. Finally we reset the chip. The effects
- * of the rest will be cleaned up by #el1_open. Always returns 0 indicating
- * a success.
- */
-
-static int el1_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("%s: Shutting down Ethernet card at %#x.\n",
- dev->name, ioaddr);
-
- netif_stop_queue(dev);
-
- /*
- * Free and disable the IRQ.
- */
-
- free_irq(dev->irq, dev);
- outb(AX_RESET, AX_CMD); /* Reset the chip */
-
- return 0;
-}
-
-/**
- * set_multicast_list:
- * @dev: The device to adjust
- *
- * Set or clear the multicast filter for this adaptor to use the best-effort
- * filtering supported. The 3c501 supports only three modes of filtering.
- * It always receives broadcasts and packets for itself. You can choose to
- * optionally receive all packets, or all multicast packets on top of this.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (dev->flags & IFF_PROMISC) {
- outb(RX_PROM, RX_CMD);
- inb(RX_STATUS);
- } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
- /* Multicast or all multicast is the same */
- outb(RX_MULT, RX_CMD);
- inb(RX_STATUS); /* Clear status. */
- } else {
- outb(RX_NORM, RX_CMD);
- inb(RX_STATUS);
- }
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-
-static struct net_device *dev_3c501;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink I/O base address");
-MODULE_PARM_DESC(irq, "EtherLink IRQ number");
-
-/**
- * init_module:
- *
- * When the driver is loaded as a module this function is called. We fake up
- * a device structure with the base I/O and interrupt set as if it were being
- * called from Space.c. This minimises the extra code that would otherwise
- * be required.
- *
- * Returns 0 for success or -EIO if a card is not found. Returning an error
- * here also causes the module to be unloaded
- */
-
-int __init init_module(void)
-{
- dev_3c501 = el1_probe(-1);
- if (IS_ERR(dev_3c501))
- return PTR_ERR(dev_3c501);
- return 0;
-}
-
-/**
- * cleanup_module:
- *
- * The module is being unloaded. We unhook our network device from the system
- * and then free up the resources we took when the card was found.
- */
-
-void __exit cleanup_module(void)
-{
- struct net_device *dev = dev_3c501;
- unregister_netdev(dev);
- release_region(dev->base_addr, EL1_IO_EXTENT);
- free_netdev(dev);
-}
-
-#endif /* MODULE */
-
-MODULE_AUTHOR("Donald Becker, Alan Cox");
-MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h
deleted file mode 100644
index 183fd55f03cb..000000000000
--- a/drivers/net/ethernet/3com/3c501.h
+++ /dev/null
@@ -1,91 +0,0 @@
-
-/*
- * Index to functions.
- */
-
-static int el1_probe1(struct net_device *dev, int ioaddr);
-static int el_open(struct net_device *dev);
-static void el_timeout(struct net_device *dev);
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t el_interrupt(int irq, void *dev_id);
-static void el_receive(struct net_device *dev);
-static void el_reset(struct net_device *dev);
-static int el1_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-#define EL1_IO_EXTENT 16
-
-#ifndef EL_DEBUG
-#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
-#endif /* Anything above 5 is wordy death! */
-#define debug el_debug
-static int el_debug = EL_DEBUG;
-
-/*
- * Board-specific info in netdev_priv(dev).
- */
-
-struct net_local
-{
- int tx_pkt_start; /* The length of the current Tx packet. */
- int collisions; /* Tx collisions this packet */
- int loading; /* Spot buffer load collisions */
- int txing; /* True if card is in TX mode */
- spinlock_t lock; /* Serializing lock */
-};
-
-
-#define RX_STATUS (ioaddr + 0x06)
-#define RX_CMD RX_STATUS
-#define TX_STATUS (ioaddr + 0x07)
-#define TX_CMD TX_STATUS
-#define GP_LOW (ioaddr + 0x08)
-#define GP_HIGH (ioaddr + 0x09)
-#define RX_BUF_CLR (ioaddr + 0x0A)
-#define RX_LOW (ioaddr + 0x0A)
-#define RX_HIGH (ioaddr + 0x0B)
-#define SAPROM (ioaddr + 0x0C)
-#define AX_STATUS (ioaddr + 0x0E)
-#define AX_CMD AX_STATUS
-#define DATAPORT (ioaddr + 0x0F)
-#define TX_RDY 0x08 /* In TX_STATUS */
-
-#define EL1_DATAPTR 0x08
-#define EL1_RXPTR 0x0A
-#define EL1_SAPROM 0x0C
-#define EL1_DATAPORT 0x0f
-
-/*
- * Writes to the ax command register.
- */
-
-#define AX_OFF 0x00 /* Irq off, buffer access on */
-#define AX_SYS 0x40 /* Load the buffer */
-#define AX_XMIT 0x44 /* Transmit a packet */
-#define AX_RX 0x48 /* Receive a packet */
-#define AX_LOOP 0x0C /* Loopback mode */
-#define AX_RESET 0x80
-
-/*
- * Normal receive mode written to RX_STATUS. We must intr on short packets
- * to avoid bogus rx lockups.
- */
-
-#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
-#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
-#define RX_MULT 0xE8 /* Accept multicast packets. */
-#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
-
-/*
- * TX_STATUS register.
- */
-
-#define TX_COLLISION 0x02
-#define TX_16COLLISIONS 0x04
-#define TX_READY 0x08
-
-#define RX_RUNT 0x08
-#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
-#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
-
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 633c709b9d99..f36ff99fd394 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1161,8 +1161,8 @@ el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 59e1e001bc3f..94c656f5a05d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1542,9 +1542,10 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
+ dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 66df93638085..ffd8de28a76a 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)
netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
cardname, dev->base_addr, dev->irq, dev->dev_addr);
netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
- 8 << config & Ram_size,
+ 8 << (config & Ram_size),
ram_split[(config & Ram_split) >> Ram_split_shift],
config & Autoselect ? "autoselect " : "");
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ed0feb3cc6fa..1928e2001587 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1293,7 +1293,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index eb56174469a7..1c71c763f680 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_3COM
bool "3Com devices"
default y
- depends on ISA || EISA || MCA || PCI || PCMCIA
+ depends on ISA || EISA || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,23 +18,9 @@ config NET_VENDOR_3COM
if NET_VENDOR_3COM
-config EL1
- tristate "3c501 \"EtherLink\" support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Also, consider buying a
- new card, since the 3c501 is slow, broken, and obsolete: you will
- have problems. Some people suggest to ping ("man ping") a nearby
- machine every minute ("man cron") when using this card.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c501.
-
config EL3
- tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
- depends on (ISA || EISA || MCA)
+ tristate "3c509/3c579 \"EtherLink III\" support"
+ depends on (ISA || EISA)
---help---
If you have a network (Ethernet) card belonging to the 3Com
EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
index 1e5382a30ead..74046afab993 100644
--- a/drivers/net/ethernet/3com/Makefile
+++ b/drivers/net/ethernet/3com/Makefile
@@ -2,7 +2,6 @@
# Makefile for the 3Com Ethernet device drivers
#
-obj-$(CONFIG_EL1) += 3c501.o
obj-$(CONFIG_EL3) += 3c509.o
obj-$(CONFIG_3C515) += 3c515.o
obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c
deleted file mode 100644
index 49d76bd0dc86..000000000000
--- a/drivers/net/ethernet/8390/3c503.c
+++ /dev/null
@@ -1,777 +0,0 @@
-/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- This driver should work with the 3c503 and 3c503/16. It should be used
- in shared memory mode for best performance, although it may also work
- in programmed-I/O mode.
-
- Sources:
- EtherLink II Technical Reference Manual,
- EtherLink II/16 Technical Reference Manual Supplement,
- 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
-
- The Crynwr 3c503 packet driver.
-
- Changelog:
-
- Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
- Paul Gortmaker : multiple card support for module users.
- rjohnson@analogic.com : Fix up PIO interface for efficient operation.
- Jeff Garzik : ethtool support
-
-*/
-
-#define DRV_NAME "3c503"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include "8390.h"
-#include "3c503.h"
-#define WRD_COUNT 4
-
-static int el2_pio_probe(struct net_device *dev);
-static int el2_probe1(struct net_device *dev, int ioaddr);
-
-/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
-
-#define EL2_IO_EXTENT 16
-
-static int el2_open(struct net_device *dev);
-static int el2_close(struct net_device *dev);
-static void el2_reset_8390(struct net_device *dev);
-static void el2_init_card(struct net_device *dev);
-static void el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset);
-static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-
-/* This routine probes for a memory-mapped 3c503 board by looking for
- the "location register" at the end of the jumpered boot PROM space.
- This works even if a PROM isn't there.
-
- If the ethercard isn't found there is an optional probe for
- ethercard jumpered to programmed-I/O mode.
- */
-static int __init do_el2_probe(struct net_device *dev)
-{
- int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (addr = addrs; *addr; addr++) {
- void __iomem *p = ioremap(*addr, 1);
- unsigned base_bits;
- int i;
-
- if (!p)
- continue;
- base_bits = readb(p);
- iounmap(p);
- i = ffs(base_bits) - 1;
- if (i == -1 || base_bits != (1 << i))
- continue;
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-#if ! defined(no_probe_nonshared_memory)
- return el2_pio_probe(dev);
-#else
- return -ENODEV;
-#endif
-}
-
-/* Try all of the locations that aren't obviously empty. This touches
- a lot of locations, and is much riskier than the code above. */
-static int __init
-el2_pio_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; netcard_portlist[i]; i++) {
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init el2_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_el2_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops el2_netdev_ops = {
- .ndo_open = el2_open,
- .ndo_stop = el2_close,
-
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-/* Probe for the Etherlink II card at I/O port base IOADDR,
- returning non-zero on success. If found, set the station
- address and memory parameters in DEVICE. */
-static int __init
-el2_probe1(struct net_device *dev, int ioaddr)
-{
- int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
- static unsigned version_printed;
- unsigned long vendor_id;
-
- if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- /* Reset and/or avoid any lurking NE2000 */
- if (inb(ioaddr + 0x408) == 0xff) {
- mdelay(1);
- retval = -ENODEV;
- goto out1;
- }
-
- /* We verify that it's a 3C503 board by checking the first three octets
- of its ethernet address. */
- iobase_reg = inb(ioaddr+0x403);
- membase_reg = inb(ioaddr+0x404);
- /* ASIC location registers should be 0 or have only a single bit set. */
- if ((iobase_reg & (iobase_reg - 1)) ||
- (membase_reg & (membase_reg - 1))) {
- retval = -ENODEV;
- goto out1;
- }
- saved_406 = inb_p(ioaddr + 0x406);
- outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
- outb_p(ECNTRL_THIN, ioaddr + 0x406);
- /* Map the station addr PROM into the lower I/O ports. We now check
- for both the old and new 3Com prefix */
- outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
- vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
- if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
- /* Restore the register we frobbed. */
- outb(saved_406, ioaddr + 0x406);
- retval = -ENODEV;
- goto out1;
- }
-
- if (ei_debug && version_printed++ == 0)
- pr_debug("%s", version);
-
- dev->base_addr = ioaddr;
-
- pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont("%pM", dev->dev_addr);
-
- /* Map the 8390 back into the window. */
- outb(ECNTRL_THIN, ioaddr + 0x406);
-
- /* Check for EL2/16 as described in tech. man. */
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
- outb_p(0, ioaddr + EN0_DCFG);
- outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
- wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
-
- /* Probe for, turn on and clear the board's shared memory. */
- if (ei_debug > 2)
- pr_cont(" memory jumpers %2.2x ", membase_reg);
- outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
-
- /* This should be probed for (or set via an ioctl()) at run-time.
- Right now we use a sleazy hack to pass in the interface number
- at boot-time via the low bits of the mem_end field. That value is
- unused, and the low bits would be discarded even if it was used. */
-#if defined(EI8390_THICK) || defined(EL2_AUI)
- ei_status.interface_num = 1;
-#else
- ei_status.interface_num = dev->mem_end & 0xf;
-#endif
- pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
-
- if ((membase_reg & 0xf0) == 0) {
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- ei_status.mem = NULL;
- } else {
- dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
- ((membase_reg & 0xA0) ? 0x4000 : 0);
-#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
- ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE);
-
-#ifdef EL2MEMTEST
- /* This has never found an error, but someone might care.
- Note that it only tests the 2nd 8kB on 16kB 3c503/16
- cards between card addr. 0x2000 and 0x3fff. */
- { /* Check the card's memory. */
- void __iomem *mem_base = ei_status.mem;
- unsigned int test_val = 0xbbadf00d;
- writel(0xba5eba5e, mem_base);
- for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
- writel(test_val, mem_base + i);
- if (readl(mem_base) != 0xba5eba5e ||
- readl(mem_base + i) != test_val) {
- pr_warning("3c503: memory failure or memory address conflict.\n");
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- iounmap(mem_base);
- ei_status.mem = NULL;
- break;
- }
- test_val += 0x55555555;
- writel(0, mem_base + i);
- }
- }
-#endif /* EL2MEMTEST */
-
- if (dev->mem_start)
- dev->mem_end = dev->mem_start + EL2_MEMSIZE;
-
- if (wordlength) { /* No Tx pages to skip over to get to Rx */
- ei_status.priv = 0;
- ei_status.name = "3c503/16";
- } else {
- ei_status.priv = TX_PAGES * 256;
- ei_status.name = "3c503";
- }
- }
-
- /*
- Divide up the memory on the card. This is the same regardless of
- whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
- we use the entire 8k of bank1 for an Rx ring. We only use 3k
- of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
- (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
- 5kB for an Rx ring. */
-
- if (wordlength) {
- ei_status.tx_start_page = EL2_MB0_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG;
- } else {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- }
-
- /* Finish setting the board's parameters. */
- ei_status.stop_page = EL2_MB1_STOP_PG;
- ei_status.word16 = wordlength;
- ei_status.reset_8390 = el2_reset_8390;
- ei_status.get_8390_hdr = el2_get_8390_hdr;
- ei_status.block_input = el2_block_input;
- ei_status.block_output = el2_block_output;
-
- if (dev->irq == 2)
- dev->irq = 9;
- else if (dev->irq > 5 && dev->irq != 9) {
- pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
- dev->irq);
- dev->irq = 0;
- }
-
- ei_status.saved_irq = dev->irq;
-
- dev->netdev_ops = &el2_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
-
- if (dev->mem_start)
- pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
- dev->name, ei_status.name, (wordlength+1)<<3,
- dev->mem_start, dev->mem_end-1);
-
- else
- {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
- dev->name, ei_status.name, (wordlength+1)<<3);
- }
- release_region(ioaddr + 0x400, 8);
- return 0;
-out1:
- release_region(ioaddr + 0x400, 8);
-out:
- release_region(ioaddr, EL2_IO_EXTENT);
- return retval;
-}
-
-static irqreturn_t el2_probe_interrupt(int irq, void *seen)
-{
- *(bool *)seen = true;
- return IRQ_HANDLED;
-}
-
-static int
-el2_open(struct net_device *dev)
-{
- int retval;
-
- if (dev->irq < 2) {
- static const int irqlist[] = {5, 9, 3, 4, 0};
- const int *irqp = irqlist;
-
- outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
- do {
- bool seen;
-
- retval = request_irq(*irqp, el2_probe_interrupt, 0,
- dev->name, &seen);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
-
- /* Twinkle the interrupt, and check if it's seen. */
- seen = false;
- smp_wmb();
- outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
- outb_p(0x00, E33G_IDCFR);
- msleep(1);
- free_irq(*irqp, &seen);
- if (!seen)
- continue;
-
- retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
- dev->name, dev);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
- break;
- } while (*++irqp);
-
- if (*irqp == 0) {
- err_disable:
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
- return -EAGAIN;
- }
- } else {
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
- }
-
- el2_init_card(dev);
- eip_open(dev);
- return 0;
-}
-
-static int
-el2_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
-
- eip_close(dev);
- return 0;
-}
-
-/* This is called whenever we have a unrecoverable failure:
- transmit timeout
- Bad ring buffer packet header
- */
-static void
-el2_reset_8390(struct net_device *dev)
-{
- if (ei_debug > 1) {
- pr_debug("%s: Resetting the 3c503 board...", dev->name);
- pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
- E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
- }
- outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
- ei_status.txing = 0;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- el2_init_card(dev);
- if (ei_debug > 1)
- pr_cont("done\n");
-}
-
-/* Initialize the 3c503 GA registers after a reset. */
-static void
-el2_init_card(struct net_device *dev)
-{
- /* Unmap the station PROM and select the DIX or BNC connector. */
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-
- /* Set ASIC copy of rx's first and last+1 buffer pages */
- /* These must be the same as in the 8390. */
- outb(ei_status.rx_start_page, E33G_STARTPG);
- outb(ei_status.stop_page, E33G_STOPPG);
-
- /* Point the vector pointer registers somewhere ?harmless?. */
- outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
- outb(0xff, E33G_VP1);
- outb(0x00, E33G_VP0);
- /* Turn off all interrupts until we're opened. */
- outb_p(0x00, dev->base_addr + EN0_IMR);
- /* Enable IRQs iff started. */
- outb(EGACFR_NORM, E33G_GACFR);
-
- /* Set the interrupt line. */
- outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
- outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
- outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
- outb_p(0x00, E33G_DMAAL);
- return; /* We always succeed */
-}
-
-/*
- * Either use the shared memory (if enabled on the board) or put the packet
- * out through the ASIC FIFO.
- */
-static void
-el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- unsigned short int *wrd;
- int boguscount; /* timeout counter */
- unsigned short word; /* temporary for better machine code */
- void __iomem *base = ei_status.mem;
-
- if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
- outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
- else
- outb(EGACFR_NORM, E33G_GACFR);
-
- if (base) { /* Shared memory transfer */
- memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8),
- buf, count);
- outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
- return;
- }
-
-/*
- * No shared memory, put the packet out the other way.
- * Set up then start the internal memory transfer to Tx Start Page
- */
-
- word = (unsigned short)start_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I am going to write data to the FIFO as quickly as possible.
- * Note that E33G_FIFOH is defined incorrectly. It is really
- * E33G_FIFOL, the lowest port address for both the byte and
- * word write. Variable 'count' is NOT checked. Caller must supply a
- * valid count. Note that I may write a harmless extra byte to the
- * 8390 if the byte-count was not even.
- */
- wrd = (unsigned short int *) buf;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- outsw(E33G_FIFOH, wrd, WRD_COUNT);
- wrd += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- outsw(E33G_FIFOH, wrd, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-/* Read the 4 byte, page aligned 8390 specific header. */
-static void
-el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int boguscount;
- void __iomem *base = ei_status.mem;
- unsigned short word;
-
- if (base) { /* Use the shared memory. */
- void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
-
- word = (unsigned short)ring_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
- memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void
-el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int boguscount = 0;
- void __iomem *base = ei_status.mem;
- unsigned short int *buf;
- unsigned short word;
-
- /* Maybe enable shared memory just be to be safe... nahh.*/
- if (base) { /* Use the shared memory. */
- ring_offset -= (EL2_MB1_START_PG<<8);
- if (ring_offset + count > EL2_MEMSIZE) {
- /* We must wrap the input move. */
- int semi_count = EL2_MEMSIZE - ring_offset;
- memcpy_fromio(skb->data, base + ring_offset, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
- } else {
- memcpy_fromio(skb->data, base + ring_offset, count);
- }
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
- word = (unsigned short) ring_offset;
- outb(word>>8, E33G_DMAAH);
- outb(word&0xFF, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I also try to get data as fast as possible. I am betting that I
- * can read one extra byte without clobbering anything in the kernel because
- * this would only occur on an odd byte-count and allocation of skb->data
- * is word-aligned. Variable 'count' is NOT checked. Caller must check
- * for a valid count.
- * [This is currently quite safe.... but if one day the 3c503 explodes
- * you know where to come looking ;)]
- */
-
- buf = (unsigned short int *) skb->data;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- insw(E33G_FIFOH, buf, WRD_COUNT);
- buf += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- insw(E33G_FIFOH, buf, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
-#ifdef MODULE
-#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
-
-static struct net_device *dev_el2[MAX_EL2_CARDS];
-static int io[MAX_EL2_CARDS];
-static int irq[MAX_EL2_CARDS];
-static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_el2_probe(dev) == 0) {
- dev_el2[found++] = dev;
- continue;
- }
- free_netdev(dev);
- pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: el2_close() handles free_irq */
- release_region(dev->base_addr, EL2_IO_EXTENT);
- if (ei_status.mem)
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- struct net_device *dev = dev_el2[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h
deleted file mode 100644
index e2367b82a2ec..000000000000
--- a/drivers/net/ethernet/8390/3c503.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* Definitions for the 3Com 3c503 Etherlink 2. */
-/* This file is distributed under the GPL.
- Many of these names and comments are directly from the Crynwr packet
- drivers, which are released under the GPL. */
-
-#define EL2H (dev->base_addr + 0x400)
-#define EL2L (dev->base_addr)
-
-/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
- out of available addresses on the first one... */
-
-#define OLD_3COM_ID 0x02608c
-#define NEW_3COM_ID 0x0020af
-
-/* Shared memory management parameters. NB: The 8 bit cards have only
- one bank (MB1) which serves both Tx and Rx packet space. The 16bit
- cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
- You choose which bank appears in the sh. mem window with EGACFR_MBSn */
-
-#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
-#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
-#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
-
-/* 3Com 3c503 ASIC registers */
-#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
-#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
-#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
-#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
- /* (non-useful, but it also appears at the end of EPROM space) */
-#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
-#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
-#define E33G_CNTRL (EL2H+6) /* Board's main control register */
-#define E33G_STATUS (EL2H+7) /* Status on completions. */
-#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
- /* (Which IRQ to assert, DMA chan to use) */
-#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
-#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
-/* "Vector pointer" - if this address matches a read, the EPROM (rather than
- shared RAM) is mapped into memory space. */
-#define E33G_VP2 (EL2H+11)
-#define E33G_VP1 (EL2H+12)
-#define E33G_VP0 (EL2H+13)
-#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
-#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
-
-/* Bits in E33G_CNTRL register: */
-
-#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
-#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
-#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
-#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
-#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
-#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
-#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
-#define ECNTRL_START (0x80) /* Start the DMA logic */
-
-/* Bits in E33G_STATUS register: */
-
-#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
-#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
-#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
-#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
-#define ESTAT_DIP (0x08) /* DMA In Progress */
-
-/* Bits in E33G_GACFR register: */
-
-#define EGACFR_NIM (0x80) /* NIC interrupt mask */
-#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
-#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
-#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
-#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
-#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
-
-#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
-#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
-
-/*
- MBS2 MBS1 MBS0 Sh. mem windows card mem at:
- ---- ---- ---- -----------------------------
- 0 0 0 0x0000 -- bank 0
- 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
- 0 1 0 0x4000 -- bank 2, not used
- 0 1 1 0x6000 -- bank 3, not used
-
-There was going to be a 32k card that used bank 2 and 3, but it
-never got produced.
-
-*/
-
-
-/* End of 3C503 parameter definitions */
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e1219e037c04..a5f91e1e8fe3 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -5,10 +5,7 @@
config NET_VENDOR_8390
bool "National Semi-conductor 8390 devices"
default y
- depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \
- ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \
- MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \
- EXPERIMENTAL)
+ depends on NET_VENDOR_NATSEMI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -21,30 +18,6 @@ config NET_VENDOR_8390
if NET_VENDOR_8390
-config EL2
- tristate "3c503 \"EtherLink II\" support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c503.
-
-config AC3200
- tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
- depends on PCI && (ISA || EISA) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ac3200.
-
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
depends on PCMCIA
@@ -74,54 +47,6 @@ config AX88796_93CX6
---help---
Select this if your platform comes with an external 93CX6 eeprom.
-config E2100
- tristate "Cabletron E21xx support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called e2100.
-
-config ES3210
- tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called es3210.
-
-config HPLAN_PLUS
- tristate "HP PCLAN+ (27247B and 27252A) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp-plus.
-
-config HPLAN
- tristate "HP PCLAN (27245 and other 27xxx series) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp.
-
config HYDRA
tristate "Hydra support"
depends on ZORRO
@@ -140,18 +65,6 @@ config ARM_ETHERH
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config LNE390
- tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called lne390.
-
config MAC8390
bool "Macintosh NS 8390 based ethernet cards"
depends on MAC
@@ -187,11 +100,7 @@ config NE2000
without a specific driver are compatible with NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 and clone support" under "EISA, VLB, PCI and on board
- controllers" below. If you have a NE2000 card and are running on
- an MCA system (a bus system used on some IBM PS/2 computers and
- laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
- below.
+ NE2000 and clone support" below.
To compile this driver as a module, choose M here. The module
will be called ne.
@@ -226,19 +135,6 @@ config APNE
To compile this driver as a module, choose M here: the module
will be called apne.
-config NE3210
- tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this driver
- will NOT WORK for NE3200 cards as they are completely different.
-
- To compile this driver as a module, choose M here. The module
- will be called ne3210.
-
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
depends on PCMCIA
@@ -288,18 +184,6 @@ config ULTRA
To compile this driver as a module, choose M here. The module
will be called smc-ultra.
-config ULTRA32
- tristate "SMC Ultra32 EISA support"
- depends on EISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-ultra32.
-
config WD80x3
tristate "WD80*3 support"
depends on ISA
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index f43038babf86..588954a79b2a 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -3,27 +3,17 @@
#
obj-$(CONFIG_MAC8390) += mac8390.o
-obj-$(CONFIG_AC3200) += ac3200.o 8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_ARM_ETHERH) += etherh.o
obj-$(CONFIG_AX88796) += ax88796.o
-obj-$(CONFIG_E2100) += e2100.o 8390.o
-obj-$(CONFIG_EL2) += 3c503.o 8390p.o
-obj-$(CONFIG_ES3210) += es3210.o 8390.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
-obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
-obj-$(CONFIG_LNE390) += lne390.o 8390.o
obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
-obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
-obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c
deleted file mode 100644
index ccf07942ff6e..000000000000
--- a/drivers/net/ethernet/8390/ac3200.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
-/*
- Written 1993, 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and distributed
- according to the terms of the GNU General Public License as modified by SRC,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
- Adapter. The programming information is from the users manual, as related
- by glee@ardnassak.math.clemson.edu.
-
- Changelog:
-
- Paul Gortmaker 05/98 : add support for shared mem above 1MB.
-
- */
-
-static const char version[] =
- "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ac3200"
-
-/* Offsets from the base address. */
-#define AC_NIC_BASE 0x00
-#define AC_SA_PROM 0x16 /* The station address PROM. */
-#define AC_ADDR0 0x00 /* Prefix station address values. */
-#define AC_ADDR1 0x40
-#define AC_ADDR2 0x90
-#define AC_ID_PORT 0xC80
-#define AC_EISA_ID 0x0110d305
-#define AC_RESET_PORT 0xC84
-#define AC_RESET 0x00
-#define AC_ENABLE 0x01
-#define AC_CONFIG 0xC90 /* The configuration port. */
-
-#define AC_IO_EXTENT 0x20
- /* Actually accessed is:
- * AC_NIC_BASE (0-15)
- * AC_SA_PROM (0-5)
- * AC_ID_PORT (0-3)
- * AC_RESET_PORT
- * AC_CONFIG
- */
-
-/* Decoding of the configuration register. */
-static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static int addrmap[8] =
-{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
-static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
-
-#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
-#define config2mem(configval) addrmap[(configval) & 7]
-#define config2name(configval) port_name[((configval) >> 6) & 3]
-
-/* First and last 8390 pages. */
-#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
-#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
-
-static int ac_probe1(int ioaddr, struct net_device *dev);
-
-static int ac_open(struct net_device *dev);
-static void ac_reset_8390(struct net_device *dev);
-static void ac_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ac_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-static int ac_close_card(struct net_device *dev);
-
-
-/* Probe for the AC3200.
-
- The AC3200 can be identified by either the EISA configuration registers,
- or the unique value in the station address PROM.
- */
-
-static int __init do_ac3200_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return ac_probe1(ioaddr, dev);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if ( ! EISA_bus)
- return -ENXIO;
-
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (ac_probe1(ioaddr, dev) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init ac3200_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_ac3200_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ac_netdev_ops = {
- .ndo_open = ac_open,
- .ndo_stop = ac_close_card,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ac_probe1(int ioaddr, struct net_device *dev)
-{
- int i, retval;
-
- if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb_p(ioaddr + AC_ID_PORT) == 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
- retval = -ENODEV;
- goto out;
- }
-
-#ifndef final_version
- printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x,"
- " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
- inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
- inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
-#endif
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
-
- printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM",
- ioaddr/0x1000, dev->dev_addr);
-#if 0
- /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
- if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
- || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
- || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
- printk(", not found (invalid prefix).\n");
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- /* Assign and allocate the interrupt now. */
- if (dev->irq == 0) {
- dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
- printk(", using");
- } else {
- dev->irq = irq_canonicalize(dev->irq);
- printk(", assigning");
- }
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
-
- printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
-
- dev->base_addr = ioaddr;
-
-#ifdef notyet
- if (dev->mem_start) { /* Override the value from the board. */
- for (i = 0; i < 7; i++)
- if (addrmap[i] == dev->mem_start)
- break;
- if (i >= 7)
- i = 0;
- outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
- }
-#endif
-
- dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
- dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
-
- printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n",
- dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start);
-
- /*
- * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- * the card mem within the region covered by `normal' RAM !!!
- *
- * ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "ac3200.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out1;
- }
- printk("ac3200.c: remapped %dkB card memory to virtual address %p\n",
- AC_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256;
-
- ei_status.name = "AC3200";
- ei_status.tx_start_page = AC_START_PG;
- ei_status.rx_start_page = AC_START_PG + TX_PAGES;
- ei_status.stop_page = AC_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &ac_reset_8390;
- ei_status.block_input = &ac_block_input;
- ei_status.block_output = &ac_block_output;
- ei_status.get_8390_hdr = &ac_get_8390_hdr;
-
- dev->netdev_ops = &ac_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out2;
- return 0;
-out2:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, AC_IO_EXTENT);
- return retval;
-}
-
-static int ac_open(struct net_device *dev)
-{
-#ifdef notyet
- /* Someday we may enable the IRQ and shared memory here. */
- int ioaddr = dev->base_addr;
-#endif
-
- ei_open(dev);
- return 0;
-}
-
-static void ac_reset_8390(struct net_device *dev)
-{
- ushort ioaddr = dev->base_addr;
-
- outb(AC_RESET, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
-
- ei_status.txing = 0;
- outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void
-ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps. */
-
-static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256;
-
- if (ring_offset + count > AC_STOP_PG*256) {
- /* We must wrap the input move. */
- int semi_count = AC_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ac_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8);
-
- memcpy_toio(shmem, buf, count);
-}
-
-static int ac_close_card(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
-#ifdef notyet
- /* We should someday disable shared memory and interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-#endif
-
- ei_close(dev);
- return 0;
-}
-
-#ifdef MODULE
-#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
-static struct net_device *dev_ac32[MAX_AC32_CARDS];
-static int io[MAX_AC32_CARDS];
-static int irq[MAX_AC32_CARDS];
-static int mem[MAX_AC32_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "Memory base address(es)");
-MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-static int __init ac3200_module_init(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
- if (do_ac3200_probe(dev) == 0) {
- dev_ac32[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* Someday free_irq may be in ac_close_card() */
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, AC_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-static void __exit ac3200_module_exit(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- struct net_device *dev = dev_ac32[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-module_init(ac3200_module_init);
-module_exit(ac3200_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 70dba5d01ad3..cab306a9888e 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -358,7 +358,7 @@ static int ax_mii_probe(struct net_device *dev)
return -ENODEV;
}
- ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change,
PHY_INTERFACE_MODE_MII);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -469,9 +469,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pdev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c
deleted file mode 100644
index ed55ce85ebbf..000000000000
--- a/drivers/net/ethernet/8390/e2100.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
-/*
- Written 1993-1994 by Donald Becker.
-
- Copyright 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a driver for the Cabletron E2100 series ethercards.
-
- The Author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- The E2100 series ethercard is a fairly generic shared memory 8390
- implementation. The only unusual aspect is the way the shared memory
- registers are set: first you do an inb() in what is normally the
- station address region, and the low three bits of next outb() *address*
- is used as the write value for that register. Either someone wasn't
- too used to dem bit en bites, or they were trying to obfuscate the
- programming interface.
-
- There is an additional complication when setting the window on the packet
- buffer. You must first do a read into the packet buffer region with the
- low 8 address bits the address setting the page for the start of the packet
- buffer window, and then do the above operation. See mem_on() for details.
-
- One bug on the chip is that even a hard reset won't disable the memory
- window, usually resulting in a hung machine if mem_off() isn't called.
- If this happens, you must power down the machine for about 30 seconds.
-*/
-
-static const char version[] =
- "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "e2100"
-
-static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
-
-/* Offsets from the base_addr.
- Read from the ASIC register, and the low three bits of the next outb()
- address is used to set the corresponding register. */
-#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
-#define E21_ASIC 0x10
-#define E21_MEM_ENABLE 0x10
-#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
-#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
-#define E21_MEM_BASE 0x11
-#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
-#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
-#define E21_MEDIA 0x14 /* (alias). */
-#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
-#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
-#define E21_SAPROM 0x10 /* Offset to station address data. */
-#define E21_IO_EXTENT 0x20
-
-static inline void mem_on(short port, volatile char __iomem *mem_base,
- unsigned char start_page )
-{
- /* This is a little weird: set the shared memory window by doing a
- read. The low address bits specify the starting page. */
- readb(mem_base+start_page);
- inb(port + E21_MEM_ENABLE);
- outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
-}
-
-static inline void mem_off(short port)
-{
- inb(port + E21_MEM_ENABLE);
- outb(0x00, port + E21_MEM_ENABLE);
-}
-
-/* In other drivers I put the TX pages first, but the E2100 window circuitry
- is designed to have a 4K Tx region last. The windowing circuitry wraps the
- window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
- appear contiguously in the window. */
-#define E21_RX_START_PG 0x00 /* First page of RX buffer */
-#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
-#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
-
-static int e21_probe1(struct net_device *dev, int ioaddr);
-
-static int e21_open(struct net_device *dev);
-static void e21_reset_8390(struct net_device *dev);
-static void e21_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void e21_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static int e21_open(struct net_device *dev);
-static int e21_close(struct net_device *dev);
-
-
-/* Probe for the E2100 series ethercards. These cards have an 8390 at the
- base address and the station address at both offset 0x10 and 0x18. I read
- the station address from offset 0x18 to avoid the dataport of NE2000
- ethercards, and look for Ctron's unique ID (first three octets of the
- station address).
- */
-
-static int __init do_e2100_probe(struct net_device *dev)
-{
- int *port;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return e21_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (port = e21_probe_list; *port; port++) {
- dev->irq = irq;
- if (e21_probe1(dev, *port) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init e2100_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_e2100_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops e21_netdev_ops = {
- .ndo_open = e21_open,
- .ndo_stop = e21_close,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init e21_probe1(struct net_device *dev, int ioaddr)
-{
- int i, status, retval;
- unsigned char *station_addr = dev->dev_addr;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* First check the station address for the Ctron prefix. */
- if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Verify by making certain that there is a 8390 at there. */
- outb(E8390_NODMA + E8390_STOP, ioaddr);
- udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
- status = inb(ioaddr);
- if (status != 0x21 && status != 0x23) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Read the station address PROM. */
- for (i = 0; i < 6; i++)
- station_addr[i] = inb(ioaddr + E21_SAPROM + i);
-
- inb(ioaddr + E21_MEDIA); /* Point to media selection. */
- outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- for (i = 0; i < 6; i++)
- printk(" %02X", station_addr[i]);
-
- if (dev->irq < 2) {
- static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
- for (i = 0; i < ARRAY_SIZE(irqlist); i++)
- if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
- dev->irq = irqlist[i];
- break;
- }
- if (i >= ARRAY_SIZE(irqlist)) {
- printk(" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
- } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
- dev->irq = 9;
-
- /* The 8390 is at the base address. */
- dev->base_addr = ioaddr;
-
- ei_status.name = "E2100";
- ei_status.word16 = 1;
- ei_status.tx_start_page = E21_TX_START_PG;
- ei_status.rx_start_page = E21_RX_START_PG;
- ei_status.stop_page = E21_RX_STOP_PG;
- ei_status.saved_irq = dev->irq;
-
- /* Check the media port used. The port can be passed in on the
- low mem_end bits. */
- if (dev->mem_end & 15)
- dev->if_port = dev->mem_end & 7;
- else {
- dev->if_port = 0;
- inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
- for(i = 0; i < 6; i++)
- if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
- dev->if_port = 1;
- break;
- }
- }
-
- /* Never map in the E21 shared memory unless you are actively using it.
- Also, the shared memory has effective only one setting -- spread all
- over the 128K region! */
- if (dev->mem_start == 0)
- dev->mem_start = 0xd0000;
-
- ei_status.mem = ioremap(dev->mem_start, 2*1024);
- if (!ei_status.mem) {
- printk("unable to remap memory\n");
- retval = -EAGAIN;
- goto out;
- }
-
-#ifdef notdef
- /* These values are unused. The E2100 has a 2K window into the packet
- buffer. The window can be set to start on any page boundary. */
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
-#endif
-
- printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
- dev->if_port ? "secondary" : "primary", dev->mem_start);
-
- ei_status.reset_8390 = &e21_reset_8390;
- ei_status.block_input = &e21_block_input;
- ei_status.block_output = &e21_block_output;
- ei_status.get_8390_hdr = &e21_get_8390_hdr;
-
- dev->netdev_ops = &e21_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out;
- return 0;
-out:
- release_region(ioaddr, E21_IO_EXTENT);
- return retval;
-}
-
-static int
-e21_open(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- int retval;
-
- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
- return retval;
-
- /* Set the interrupt line and memory base on the hardware. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
- + (dev->if_port ? E21_ALT_IFPORT : 0));
- inb(ioaddr + E21_MEM_BASE);
- outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
-
- ei_open(dev);
- return 0;
-}
-
-static void
-e21_reset_8390(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- outb(0x01, ioaddr);
- if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
- ei_status.txing = 0;
-
- /* Set up the ASIC registers, just in case something changed them. */
-
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. We put the 2k window so the header page
- appears at the start of the shared memory. */
-
-static void
-e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, ring_page);
-
-#ifdef notdef
- /* Officially this is what we are doing, but the readl() is faster */
- memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
-#else
- ((unsigned int*)hdr)[0] = readl(shared_mem);
-#endif
-
- /* Turn off memory access: we would need to reprogram the window anyway. */
- mem_off(ioaddr);
-
-}
-
-/* Block input and output are easy on shared memory ethercards.
- The E21xx makes block_input() especially easy by wrapping the top
- ring buffer to the bottom automatically. */
-static void
-e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, (ring_offset>>8));
-
- memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
-
- mem_off(ioaddr);
-}
-
-static void
-e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
-{
- short ioaddr = dev->base_addr;
- volatile char __iomem *shared_mem = ei_status.mem;
-
- /* Set the shared memory window start by doing a read, with the low address
- bits specifying the starting page. */
- readb(shared_mem + start_page);
- mem_on(ioaddr, shared_mem, start_page);
-
- memcpy_toio(shared_mem, buf, count);
- mem_off(ioaddr);
-}
-
-static int
-e21_close(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
-
- /* Shut off the interrupt line and secondary interface. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC);
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC);
-
- ei_close(dev);
-
- /* Double-check that the memory has been turned off, because really
- really bad things happen if it isn't. */
- mem_off(ioaddr);
-
- return 0;
-}
-
-
-#ifdef MODULE
-#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
-static struct net_device *dev_e21[MAX_E21_CARDS];
-static int io[MAX_E21_CARDS];
-static int irq[MAX_E21_CARDS];
-static int mem[MAX_E21_CARDS];
-static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, " memory base address(es)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_e2100_probe(dev) == 0) {
- dev_e21[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: e21_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr, E21_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- struct net_device *dev = dev_e21[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
deleted file mode 100644
index ba1b5c95531f..000000000000
--- a/drivers/net/ethernet/8390/es3210.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- es3210.c
-
- Linux driver for Racal-Interlan ES3210 EISA Network Adapter
-
- Copyright (C) 1996, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) The existing myriad of Linux 8390 drivers written by Donald Becker.
-
- 2) Once again Russ Nelson's asm packet driver provided additional info.
-
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
- Too bad it doesn't work -- see below.
-
- The ES3210 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- Which rules out using eth_io_copy_and_sum() in this driver.
-
- Apparently there are two slightly different revisions of the
- card, since there are two distinct EISA cfg files (!rii0101.cfg
- and !rii0102.cfg) One has media select in the cfg file and the
- other doesn't. Hopefully this will work with either.
-
- That is about all I can tell you about it, having never actually
- even seen one of these cards. :) Try http://www.interlan.com
- if you want more info.
-
- Thanks go to Mark Salazar for testing v0.02 of this driver.
-
- Bugs, to-fix, etc:
-
- 1) The EISA cfg ports that are *supposed* to have the IRQ and shared
- mem values just read 0xff all the time. Hrrmpf. Apparently the
- same happens with the packet driver as the code for reading
- these registers is disabled there. In the meantime, boot with:
- ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
- shared memory detection. (The i/o port detection is okay.)
-
- 2) Module support currently untested. Probably works though.
-
-*/
-
-static const char version[] =
- "es3210.c: Driver revision v0.03, 14/09/96\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-static int es_probe1(struct net_device *dev, int ioaddr);
-
-static void es_reset_8390(struct net_device *dev);
-
-static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
-
-#define ES_START_PG 0x00 /* First page of TX buffer */
-#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
-
-#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
-#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
-#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
-#define ES_RESET_PORT 0xc84 /* From the packet driver source */
-#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
-
-#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
-#define ES_ADDR1 0x07
-#define ES_ADDR2 0x01
-
-/*
- * Two card revisions. EISA ID's are always rev. minor, rev. major,, and
- * then the three vendor letters stored in 5 bits each, with an "a" = 1.
- * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
- * config utility determines automagically what config file(s) to use.
- */
-#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
-#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
-
-#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
-#define ES_CFG2 0xcc1
-#define ES_CFG3 0xcc2
-#define ES_CFG4 0xcc3
-#define ES_CFG5 0xcc4
-#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
-
-/*
- * You can OR any of the following bits together and assign it
- * to ES_DEBUG to get verbose driver info during operation.
- * Some of these don't do anything yet.
- */
-
-#define ES_D_PROBE 0x01
-#define ES_D_RX_PKT 0x02
-#define ES_D_TX_PKT 0x04
-#define ED_D_IRQ 0x08
-
-#define ES_DEBUG 0
-
-static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
-static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we check the prefix of the station address
- * PROM for a match against the Racal-Interlan assigned value.
- */
-
-static int __init do_es_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return es_probe1(dev, ioaddr);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: Not EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (es_probe1(dev, ioaddr) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init es_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_es_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init es_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned long eisa_id;
-
- if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
- return -ENODEV;
-
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
- printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
- inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
- inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
-#endif
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + ES_ID_PORT);
- if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN ; i++)
- dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
-
-/* Check the Racal vendor ID as well. */
- if (dev->dev_addr[0] != ES_ADDR0 ||
- dev->dev_addr[1] != ES_ADDR1 ||
- dev->dev_addr[2] != ES_ADDR2) {
- printk("es3210.c: card not found %pM (invalid_prefix).\n",
- dev->dev_addr);
- retval = -ENODEV;
- goto out;
- }
-
- printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
- eisa_id>>24, ioaddr, dev->dev_addr);
-
- /* Snarf the interrupt now. */
- if (dev->irq == 0) {
- unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
- unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
-
- if (hi_irq != 0) {
- dev->irq = hi_irq_map[hi_irq - 1];
- } else {
- int i = 0;
- while (lo_irq > (1<<i)) i++;
- dev->irq = lo_irq_map[i];
- }
- printk(" using IRQ %d", dev->irq);
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
- hi_irq, lo_irq, dev->irq);
-#endif
- } else {
- if (dev->irq == 2)
- dev->irq = 9; /* Doh! */
- printk(" assigning IRQ %d", dev->irq);
- }
-
- if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
- unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
-
- if (mem_enabled != 0x80) {
- printk(" shared mem disabled - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
- dev->mem_start = 0xC0000 + mem_bits*0x4000;
- printk(" using ");
- } else {
- printk(" assigning ");
- }
-
- ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
- if (!ei_status.mem) {
- printk("ioremap failed - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
-
- dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
-
- printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
-
-#if ES_DEBUG & ES_D_PROBE
- if (inb(ioaddr + ES_CFG5))
- printk("es3210: Warning - DMA channel enabled, but not used here.\n");
-#endif
- /* Note, point at the 8390, and not the card... */
- dev->base_addr = ioaddr + ES_NIC_OFFSET;
-
- ei_status.name = "ES3210";
- ei_status.tx_start_page = ES_START_PG;
- ei_status.rx_start_page = ES_START_PG + TX_PAGES;
- ei_status.stop_page = ES_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &es_reset_8390;
- ei_status.block_input = &es_block_input;
- ei_status.block_output = &es_block_output;
- ei_status.get_8390_hdr = &es_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
- return retval;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void es_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- unsigned long end;
-
- outb(0x04, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
-
- end = jiffies + 2*HZ/100;
- while ((signed)(end - jiffies) > 0) continue;
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via es_get_8390_hdr() above.
- */
-
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
-
- if (ring_offset + count > ES_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = ES_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void es_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
-#define NAMELEN 8 /* # of chars for storing dev->name */
-static struct net_device *dev_es3210[MAX_ES_CARDS];
-static int io[MAX_ES_CARDS];
-static int irq[MAX_ES_CARDS];
-static int mem[MAX_ES_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_es_probe(dev) == 0) {
- dev_es3210[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ES_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- struct net_device *dev = dev_es3210[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
deleted file mode 100644
index 52f70f999c00..000000000000
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
-/*
- Written 1994 by Donald Becker.
-
- This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
- These cards are sold under several model numbers, usually 2724*.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- As is often the case, a great deal of credit is owed to Russ Nelson.
- The Crynwr packet driver was my primary source of HP-specific
- programming information.
-*/
-
-static const char version[] =
-"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-
-#include <linux/string.h> /* Important -- this inlines word moves. */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp-plus"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hpplus_portlist[] __initdata =
-{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
-
-/*
- The HP EtherTwist chip implementation is a fairly routine DP8390
- implementation. It allows both shared memory and programmed-I/O buffer
- access, using a custom interface for both. The programmed-I/O mode is
- entirely implemented in the HP EtherTwist chip, bypassing the problem
- ridden built-in 8390 facilities used on NE2000 designs. The shared
- memory mode is likewise special, with an offset register used to make
- packets appear at the shared memory base. Both modes use a base and bounds
- page register to hide the Rx ring buffer wrap -- a packet that spans the
- end of physical buffer memory appears continuous to the driver. (c.f. the
- 3c503 and Cabletron E2100)
-
- A special note: the internal buffer of the board is only 8 bits wide.
- This lays several nasty traps for the unaware:
- - the 8390 must be programmed for byte-wide operations
- - all I/O and memory operations must work on whole words (the access
- latches are serially preloaded and have no byte-swapping ability).
-
- This board is laid out in I/O space much like the earlier HP boards:
- the first 16 locations are for the board registers, and the second 16 are
- for the 8390. The board is easy to identify, with both a dedicated 16 bit
- ID register and a constant 0x530* value in the upper bits of the paging
- register.
-*/
-
-#define HP_ID 0x00 /* ID register, always 0x4850. */
-#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
-#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
-#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
-#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
-#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
-#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
-#define HP_IO_EXTENT 32
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-/* The register set selected in HP_PAGING. */
-enum PageName {
- Perf_Page = 0, /* Normal operation. */
- MAC_Page = 1, /* The ethernet address (+checksum). */
- HW_Page = 2, /* EEPROM-loaded hardware parameters. */
- LAN_Page = 4, /* Transceiver selection, testing, etc. */
- ID_Page = 6 };
-
-/* The bit definitions for the HPP_OPTION register. */
-enum HP_Option {
- NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
- EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
- MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
-
-static int hpp_probe1(struct net_device *dev, int ioaddr);
-
-static void hpp_reset_8390(struct net_device *dev);
-static int hpp_open(struct net_device *dev);
-static int hpp_close(struct net_device *dev);
-static void hpp_mem_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hpp_io_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-
-/* Probe a list of addresses for an HP LAN+ adaptor.
- This routine is almost boilerplate. */
-
-static int __init do_hpp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hpp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hpplus_portlist[i]; i++) {
- if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_plus_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hpp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops hpp_netdev_ops = {
- .ndo_open = hpp_open,
- .ndo_stop = hpp_close,
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-
-/* Do the interesting part of the probe at a single address. */
-static int __init hpp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned char checksum = 0;
- const char name[] = "HP-PC-LAN+";
- int mem_start;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP+ signature, 50 48 0x 53. */
- if (inw(ioaddr + HP_ID) != 0x4850 ||
- (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s at %#3x, ", dev->name, name, ioaddr);
-
- /* Retrieve and checksum the station address. */
- outw(MAC_Page, ioaddr + HP_PAGING);
-
- for(i = 0; i < ETH_ALEN; i++) {
- unsigned char inval = inb(ioaddr + 8 + i);
- dev->dev_addr[i] = inval;
- checksum += inval;
- }
- checksum += inb(ioaddr + 14);
-
- printk("%pM", dev->dev_addr);
-
- if (checksum != 0xff) {
- printk(" bad checksum %2.2x.\n", checksum);
- retval = -ENODEV;
- goto out;
- } else {
- /* Point at the Software Configuration Flags. */
- outw(ID_Page, ioaddr + HP_PAGING);
- printk(" ID %4.4x", inw(ioaddr + 12));
- }
-
- /* Read the IRQ line. */
- outw(HW_Page, ioaddr + HP_PAGING);
- {
- int irq = inb(ioaddr + 13) & 0x0f;
- int option = inw(ioaddr + HPP_OPTION);
-
- dev->irq = irq;
- if (option & MemEnable) {
- mem_start = inw(ioaddr + 9) << 8;
- printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
- } else {
- mem_start = 0;
- printk(", IRQ %d, programmed-I/O mode.\n", irq);
- }
- }
-
- /* Set the wrap registers for string I/O reads. */
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
-
- dev->netdev_ops = &hpp_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
- ei_status.stop_page = HP_STOP_PG;
-
- ei_status.reset_8390 = &hpp_reset_8390;
- ei_status.block_input = &hpp_io_block_input;
- ei_status.block_output = &hpp_io_block_output;
- ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
-
- /* Check if the memory_enable flag is set in the option register. */
- if (mem_start) {
- ei_status.block_input = &hpp_mem_block_input;
- ei_status.block_output = &hpp_mem_block_output;
- ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
- dev->mem_start = mem_start;
- ei_status.mem = ioremap(mem_start,
- (HP_STOP_PG - HP_START_PG)*256);
- if (!ei_status.mem) {
- retval = -ENOMEM;
- goto out;
- }
- ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
- dev->mem_end = ei_status.rmem_end
- = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
- }
-
- outw(Perf_Page, ioaddr + HP_PAGING);
- NS8390p_init(dev, 0);
- /* Leave the 8390 and HP chip reset. */
- outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- iounmap(ei_status.mem);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static int
-hpp_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg;
- int retval;
-
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
-
- /* Reset the 8390 and HP chip. */
- option_reg = inw(ioaddr + HPP_OPTION);
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- udelay(5);
- /* Unreset the board and enable interrupts. */
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- /* Set the wrap registers for programmed-I/O operation. */
- outw(HW_Page, ioaddr + HP_PAGING);
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Select the operational page. */
- outw(Perf_Page, ioaddr + HP_PAGING);
-
- return eip_open(dev);
-}
-
-static int
-hpp_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- free_irq(dev->irq, dev);
- eip_close(dev);
- outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
- ioaddr + HPP_OPTION);
-
- return 0;
-}
-
-static void
-hpp_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
-
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- /* Pause a few cycles for the hardware reset to take place. */
- udelay(5);
- ei_status.txing = 0;
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- udelay(5);
-
-
- if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-/* The programmed-I/O version of reading the 4 byte 8390 specific header.
- Note that transfer with the EtherTwist+ must be on word boundaries. */
-
-static void
-hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. */
-
-static void
-hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- char *buf = skb->data;
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, buf, count>>1);
- if (count & 0x01)
- buf[count-1] = inw(ioaddr + HP_DATAPORT);
-}
-
-/* The corresponding shared memory versions of the above 2 functions. */
-
-static void
-hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr));
- outw(option_reg, ioaddr + HPP_OPTION);
- hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
-}
-
-static void
-hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
-
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
-
- /* Caution: this relies on get_8390_hdr() rounding up count!
- Also note that we *can't* use eth_io_copy_and_sum() because
- it will not always copy "count" bytes (e.g. padded IP). */
-
- memcpy_fromio(skb->data, ei_status.mem, count);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-/* A special note: we *must* always transfer >=16 bit words.
- It's always safe to round up, so we do. */
-static void
-hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
-}
-
-static void
-hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-
-#ifdef MODULE
-#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
-static struct net_device *dev_hpp[MAX_HPP_CARDS];
-static int io[MAX_HPP_CARDS];
-static int irq[MAX_HPP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O port address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
-MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hpp_probe(dev) == 0) {
- dev_hpp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: hpp_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- struct net_device *dev = dev_hpp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
deleted file mode 100644
index 37fa89aa4578..000000000000
--- a/drivers/net/ethernet/8390/hp.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/* hp.c: A HP LAN ethernet driver for linux. */
-/*
- Written 1993-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a driver for the HP PC-LAN adaptors.
-
- Sources:
- The Crynwr packet driver.
-*/
-
-static const char version[] =
- "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hppclan_portlist[] __initdata =
-{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
-
-#define HP_IO_EXTENT 32
-
-#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
-#define HP_ID 0x07
-#define HP_CONFIGURE 0x08 /* Configuration register. */
-#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
-#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
-#define HP_DATAON 0x10 /* Turn on dataport */
-#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
-#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
-
-static int hp_probe1(struct net_device *dev, int ioaddr);
-
-static void hp_reset_8390(struct net_device *dev);
-static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hp_block_input(struct net_device *dev, int count,
- struct sk_buff *skb , int ring_offset);
-static void hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-
-static void hp_init_card(struct net_device *dev);
-
-/* The map from IRQ number to HP_CONFIGURE register setting. */
-/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
-static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
-
-
-/* Probe for an HP LAN adaptor.
- Also initialize the card and fill in STATION_ADDR with the station
- address. */
-
-static int __init do_hp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hppclan_portlist[i]; i++) {
- if (hp_probe1(dev, hppclan_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init hp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval, board_id, wordmode;
- const char *name;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP physical address, 08 00 09 xx xx xx. */
- /* This really isn't good enough: we may pick up HP LANCE boards
- also! Avoid the lance 0x5757 signature. */
- if (inb(ioaddr) != 0x08
- || inb(ioaddr+1) != 0x00
- || inb(ioaddr+2) != 0x09
- || inb(ioaddr+14) == 0x57) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Set up the parameters based on the board ID.
- If you have additional mappings, please mail them to me -djb. */
- if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
- name = "HP27247";
- wordmode = 1;
- } else {
- name = "HP27250";
- wordmode = 0;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
-
- for(i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
-
- printk(" %pM", dev->dev_addr);
-
- /* Snarf the interrupt now. Someday this could be moved to open(). */
- if (dev->irq < 2) {
- static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
- static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
- const int *irqp = wordmode ? irq_16list : irq_8list;
- do {
- int irq = *irqp;
- if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
- unsigned long cookie = probe_irq_on();
- /* Twinkle the interrupt, and check if it's seen. */
- outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
- outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
- if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
- && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) {
- printk(" selecting IRQ %d.\n", irq);
- dev->irq = *irqp;
- break;
- }
- }
- } while (*++irqp);
- if (*irqp == 0) {
- printk(" no free IRQ lines.\n");
- retval = -EBUSY;
- goto out;
- }
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
- }
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
- dev->netdev_ops = &eip_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = wordmode;
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES;
- ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
-
- ei_status.reset_8390 = hp_reset_8390;
- ei_status.get_8390_hdr = hp_get_8390_hdr;
- ei_status.block_input = hp_block_input;
- ei_status.block_output = hp_block_output;
- hp_init_card(dev);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static void
-hp_reset_8390(struct net_device *dev)
-{
- int hp_base = dev->base_addr - NIC_OFFSET;
- int saved_config = inb_p(hp_base + HP_CONFIGURE);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
- outb_p(0x00, hp_base + HP_CONFIGURE);
- ei_status.txing = 0;
- /* Pause just a few cycles for the hardware reset to take place. */
- udelay(5);
-
- outb_p(saved_config, hp_base + HP_CONFIGURE);
- udelay(5);
-
- if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-static void
-hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
-
- if (ei_status.word16)
- insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
- else
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you are
- porting to a new ethercard look at the packet driver source for hints.
- The HP LAN doesn't use shared memory -- we put the packet
- out through the "remote DMA" dataport. */
-
-static void
-hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
- int xfer_count = count;
- char *buf = skb->data;
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
- outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
- if (ei_status.word16) {
- insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
- } else {
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- /* Check only the lower 8 bits so we can ignore ring wrap. */
- if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
- printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-static void
-hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
- if (ei_status.word16 && (count & 0x01))
- count++;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work. */
- outb_p(0x42, nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0xff, nic_base + EN0_RSARLO);
- outb_p(0x00, nic_base + EN0_RSARHI);
-#define NE_CMD 0x00
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- inb_p(0x61);
- inb_p(0x61);
-#endif
-
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(0x00, nic_base + EN0_RSARLO);
- outb_p(start_page, nic_base + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, nic_base);
- if (ei_status.word16) {
- /* Use the 'rep' sequence for 16 bit boards. */
- outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
- } else {
- outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
-
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
-
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- if ((start_page << 8) + count != addr)
- printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
- dev->name, (start_page << 8) + count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* This function resets the ethercard if something screws up. */
-static void __init
-hp_init_card(struct net_device *dev)
-{
- int irq = dev->irq;
- NS8390p_init(dev, 0);
- outb_p(irqmap[irq&0x0f] | HP_RUN,
- dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
-}
-
-#ifdef MODULE
-#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
-static struct net_device *dev_hp[MAX_HP_CARDS];
-static int io[MAX_HP_CARDS];
-static int irq[MAX_HP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hp_probe(dev) == 0) {
- dev_hp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- struct net_device *dev = dev_hp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
deleted file mode 100644
index 479409bf2e3c..000000000000
--- a/drivers/net/ethernet/8390/lne390.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- lne390.c
-
- Linux driver for Mylex LNE390 EISA Network Adapter
-
- Copyright (C) 1996-1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon framework of es3210 driver.
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Russ Nelson's asm packet driver provided additional info.
- 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
-
- The LNE390 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- There are two versions of the card: the lne390a and the lne390b.
- Going by the EISA cfg files, the "a" has jumpers to select between
- BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU.
- The shared memory address selection is also slightly different.
- Note that shared memory address > 1MB are supported with this driver.
-
- You can try <http://www.mylex.com> if you want more info, as I've
- never even seen one of these cards. :)
-
- Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01
- - get rid of check_region
- - no need to check if dev == NULL in lne390_probe1
-*/
-
-static const char *version =
- "lne390.c: Driver revision v0.99.1, 01/09/2000\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "lne390"
-
-static int lne390_probe1(struct net_device *dev, int ioaddr);
-
-static void lne390_reset_8390(struct net_device *dev);
-
-static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define LNE390_START_PG 0x00 /* First page of TX buffer */
-#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */
-#define LNE390_IO_EXTENT 0x20
-#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */
-#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */
-#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */
-#define LNE390_ADDR1 0x80
-#define LNE390_ADDR2 0xe5
-
-#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */
-#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */
-
-#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define LNE390_CFG2 0xc90
-
-/*
- * You can OR any of the following bits together and assign it
- * to LNE390_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define LNE390_D_PROBE 0x01
-#define LNE390_D_RX_PKT 0x02
-#define LNE390_D_TX_PKT 0x04
-#define LNE390_D_IRQ 0x08
-
-#define LNE390_DEBUG 0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we can check the prefix of the station address
- * PROM for a match against the value assigned to Mylex.
- */
-
-static int __init do_lne390_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
- int ret;
-
- if (ioaddr > 0x1ff) { /* Check a single specified location. */
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- return -EBUSY;
- ret = lne390_probe1(dev, ioaddr);
- if (ret)
- release_region(ioaddr, LNE390_IO_EXTENT);
- return ret;
- }
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: Not an EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- continue;
- if (lne390_probe1(dev, ioaddr) == 0)
- return 0;
- release_region(ioaddr, LNE390_IO_EXTENT);
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init lne390_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_lne390_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init lne390_probe1(struct net_device *dev, int ioaddr)
-{
- int i, revision, ret;
- unsigned long eisa_id;
-
- if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
-
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT));
- printk("lne390-debug: config regs: %#x %#x\n",
- inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2));
-#endif
-
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + LNE390_ID_PORT);
- if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) {
- return -ENODEV;
- }
-
- revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */
-
-#if 0
-/* Check the Mylex vendor ID as well. Not really required. */
- if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0
- || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
- || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
- printk("lne390.c: card not found");
- for (i = 0; i < ETH_ALEN; i++)
- printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
- printk(" (invalid prefix).\n");
- return -ENODEV;
- }
-#endif
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
- printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
- 0xa+revision, ioaddr/0x1000, dev->dev_addr);
-
- printk("lne390.c: ");
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- if (dev->irq == 0) {
- unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3;
- dev->irq = irq_map[irq_reg & 0x07];
- printk("using");
- } else {
- /* This is useless unless we reprogram the card here too */
- if (dev->irq == 2) dev->irq = 9; /* Doh! */
- printk("assigning");
- }
- printk(" IRQ %d,", dev->irq);
-
- if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- return ret;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07;
-
- if (revision) /* LNE390B */
- dev->mem_start = shmem_mapB[mem_reg] * 0x10000;
- else /* LNE390A */
- dev->mem_start = shmem_mapA[mem_reg] * 0x10000;
- printk(" using ");
- } else {
- /* Should check for value in shmem_map and reprogram the card to use it */
- dev->mem_start &= 0xfff0000;
- printk(" assigning ");
- }
-
- printk("%dkB memory at physical address %#lx\n",
- LNE390_STOP_PG/4, dev->mem_start);
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
-
- ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "lne390.c: Driver NOT installed.\n");
- ret = -EAGAIN;
- goto cleanup;
- }
- printk("lne390.c: remapped %dkB card memory to virtual address %p\n",
- LNE390_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256;
-
- /* The 8390 offset is zero for the LNE390 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "LNE390";
- ei_status.tx_start_page = LNE390_START_PG;
- ei_status.rx_start_page = LNE390_START_PG + TX_PAGES;
- ei_status.stop_page = LNE390_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &lne390_reset_8390;
- ei_status.block_input = &lne390_block_input;
- ei_status.block_output = &lne390_block_output;
- ei_status.get_8390_hdr = &lne390_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- ret = register_netdev(dev);
- if (ret)
- goto unmap;
- return 0;
-unmap:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-cleanup:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void lne390_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via lne390_get_8390_hdr() above.
- */
-
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8);
-
- if (ring_offset + count > (LNE390_STOP_PG<<8)) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = (LNE390_STOP_PG<<8) - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + (TX_PAGES<<8), count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void lne390_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-
-#ifdef MODULE
-#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
-static struct net_device *dev_lne[MAX_LNE_CARDS];
-static int io[MAX_LNE_CARDS];
-static int irq[MAX_LNE_CARDS];
-static int mem[MAX_LNE_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_lne390_probe(dev) == 0) {
- dev_lne[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, LNE390_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- struct net_device *dev = dev_lne[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index c0c127913dec..587a885de259 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -374,7 +374,6 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
NS8390_init(dev, 0);
memcpy(dev->dev_addr, SA_prom, dev->addr_len);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
deleted file mode 100644
index ebcdb52ec739..000000000000
--- a/drivers/net/ethernet/8390/ne3210.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- ne3210.c
-
- Linux driver for Novell NE3210 EISA Network Adapter
-
- Copyright (C) 1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32)
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file
-
- The NE3210 is an EISA shared memory NS8390 implementation. Shared
- memory address > 1MB should work with this driver.
-
- Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched
- around (or perhaps there are some defective/backwards cards ???)
-
- This driver WILL NOT WORK FOR THE NE3200 - it is completely different
- and does not use an 8390 at all.
-
- Updated to EISA probing API 5/2003 by Marc Zyngier.
-*/
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ne3210"
-
-static void ne3210_reset_8390(struct net_device *dev);
-
-static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define NE3210_START_PG 0x00 /* First page of TX buffer */
-#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define NE3210_IO_EXTENT 0x20
-#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */
-#define NE3210_RESET_PORT 0xc84
-#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */
-#define NE3210_ADDR1 0x00
-#define NE3210_ADDR2 0x1b
-
-#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define NE3210_CFG2 0xc90
-#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1)
-
-/*
- * You can OR any of the following bits together and assign it
- * to NE3210_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define NE3210_D_PROBE 0x01
-#define NE3210_D_RX_PKT 0x02
-#define NE3210_D_TX_PKT 0x04
-#define NE3210_D_IRQ 0x08
-
-#define NE3210_DEBUG 0x0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
-static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"};
-static int ifmap_val[] __initdata = {
- IF_PORT_10BASET,
- IF_PORT_UNKNOWN,
- IF_PORT_10BASE2,
- IF_PORT_AUI,
-};
-
-static int __init ne3210_eisa_probe (struct device *device)
-{
- unsigned long ioaddr, phys_mem;
- int i, retval, port_index;
- struct eisa_device *edev = to_eisa_device (device);
- struct net_device *dev;
-
- /* Allocate dev->priv and fill in 8390 specific dev fields. */
- if (!(dev = alloc_ei_netdev ())) {
- printk ("ne3210.c: unable to allocate memory for dev!\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(dev, device);
- dev_set_drvdata(device, dev);
- ioaddr = edev->base_addr;
-
- if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- if (!request_region(ioaddr + NE3210_CFG1,
- NE3210_CFG_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out1;
- }
-
-#if NE3210_DEBUG & NE3210_D_PROBE
- printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig);
- printk("ne3210-debug: config regs: %#x %#x\n",
- inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2));
-#endif
-
- port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
- printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
- edev->slot, ifmap[port_index], dev->dev_addr);
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
- printk("ne3210.c: using IRQ %d, ", dev->irq);
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out2;
- }
-
- phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000;
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
- */
- if (phys_mem > 1024*1024) { /* phys addr > 1MB */
- if (phys_mem < virt_to_phys(high_memory)) {
- printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
- printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
- printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
- (u64)virt_to_phys(high_memory));
- printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out3;
- }
- }
-
- if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) {
- printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n",
- phys_mem);
- goto out3;
- }
-
- printk("%dkB memory at physical address %#lx\n",
- NE3210_STOP_PG/4, phys_mem);
-
- ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n");
- printk(KERN_ERR "ne3210.c: Driver NOT installed.\n");
- retval = -EAGAIN;
- goto out4;
- }
- printk("ne3210.c: remapped %dkB card memory to virtual address %p\n",
- NE3210_STOP_PG/4, ei_status.mem);
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256;
-
- /* The 8390 offset is zero for the NE3210 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "NE3210";
- ei_status.tx_start_page = NE3210_START_PG;
- ei_status.rx_start_page = NE3210_START_PG + TX_PAGES;
- ei_status.stop_page = NE3210_STOP_PG;
- ei_status.word16 = 1;
- ei_status.priv = phys_mem;
-
- if (ei_debug > 0)
- printk("ne3210 loaded.\n");
-
- ei_status.reset_8390 = &ne3210_reset_8390;
- ei_status.block_input = &ne3210_block_input;
- ei_status.block_output = &ne3210_block_output;
- ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
-
- dev->if_port = ifmap_val[port_index];
-
- if ((retval = register_netdev (dev)))
- goto out5;
-
- NS8390_init(dev, 0);
- return 0;
-
- out5:
- iounmap(ei_status.mem);
- out4:
- release_mem_region (phys_mem, NE3210_STOP_PG*0x100);
- out3:
- free_irq (dev->irq, dev);
- out2:
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- out1:
- release_region (ioaddr, NE3210_IO_EXTENT);
- out:
- free_netdev (dev);
-
- return retval;
-}
-
-static int ne3210_eisa_remove(struct device *device)
-{
- struct net_device *dev = dev_get_drvdata(device);
- unsigned long ioaddr = to_eisa_device (device)->base_addr;
-
- unregister_netdev (dev);
- iounmap(ei_status.mem);
- release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100);
- free_irq (dev->irq, dev);
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- release_region (ioaddr, NE3210_IO_EXTENT);
- free_netdev (dev);
-
- return 0;
-}
-
-/*
- * Reset by toggling the "Board Enable" bits (bit 2 and 0).
- */
-
-static void ne3210_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via ne3210_get_8390_hdr() above.
- */
-
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256;
-
- if (ring_offset + count > NE3210_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = NE3210_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ne3210_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-static struct eisa_device_id ne3210_ids[] = {
- { "EGL0101" },
- { "NVL1801" },
- { "" },
-};
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static struct eisa_driver ne3210_eisa_driver = {
- .id_table = ne3210_ids,
- .driver = {
- .name = "ne3210",
- .probe = ne3210_eisa_probe,
- .remove = ne3210_eisa_remove,
- },
-};
-
-MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static int ne3210_init(void)
-{
- return eisa_driver_register (&ne3210_eisa_driver);
-}
-
-static void ne3210_cleanup(void)
-{
- eisa_driver_unregister (&ne3210_eisa_driver);
-}
-
-module_init (ne3210_init);
-module_exit (ne3210_cleanup);
diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c
deleted file mode 100644
index 923e42aedcfd..000000000000
--- a/drivers/net/ethernet/8390/smc-ultra32.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
-
-Sources:
-
- This driver is based on (cloned from) the ISA SMC Ultra driver
- written by Donald Becker. Modifications to support the EISA
- version of the card by Paul Gortmaker and Leonard N. Zubkoff.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
-Theory of Operation:
-
- The SMC Ultra32C card uses the SMC 83c790 chip which is also
- found on the ISA SMC Ultra cards. It has a shared memory mode of
- operation that makes it similar to the ISA version of the card.
- The main difference is that the EISA card has 32KB of RAM, but
- only an 8KB window into that memory. The EISA card also can be
- set for a bus-mastering mode of operation via the ECU, but that
- is not (and probably will never be) supported by this driver.
- The ECU should be run to enable shared memory and to disable the
- bus-mastering feature for use with linux.
-
- By programming the 8390 to use only 8KB RAM, the modifications
- to the ISA driver can be limited to the probe and initialization
- code. This allows easy integration of EISA support into the ISA
- driver. However, the driver development kit from SMC provided the
- register information for sliding the 8KB window, and hence the 8390
- is programmed to use the full 32KB RAM.
-
- Unfortunately this required code changes outside the probe/init
- routines, and thus we decided to separate the EISA driver from
- the ISA one. In this way, ISA users don't end up with a larger
- driver due to the EISA code, and EISA users don't end up with a
- larger driver due to the ISA EtherEZ PIO code. The driver is
- similar to the 3c503/16 driver, in that the window must be set
- back to the 1st 8KB of space for access to the two 8390 Tx slots.
-
- In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
- be a limiting factor, since the EISA bus could get packets off
- the card fast enough, but having the use of lots of RAM as Rx
- space is extra insurance if interrupt latencies become excessive.
-
-*/
-
-static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
-
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "smc-ultra32"
-
-static int ultra32_probe1(struct net_device *dev, int ioaddr);
-static int ultra32_open(struct net_device *dev);
-static void ultra32_reset_8390(struct net_device *dev);
-static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ultra32_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ultra32_block_output(struct net_device *dev, int count,
- const unsigned char *buf,
- const int start_page);
-static int ultra32_close(struct net_device *dev);
-
-#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
-#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
-#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
-#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
-#define ULTRA32_IO_EXTENT 32
-#define EN0_ERWCNT 0x08 /* Early receive warning count. */
-
-/*
- * Defines that apply only to the Ultra32 EISA card. Note that
- * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
- * into an EISA ID of 0x1080A34D
- */
-#define ULTRA32_BASE 0xca0
-#define ULTRA32_ID 0x1080a34d
-#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
-/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
-#define ULTRA32_CFG1 0x04 /* 0xca4 */
-#define ULTRA32_CFG2 0x05 /* 0xca5 */
-#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
-#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
-#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
-#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
-#define ULTRA32_CFG7 0x0d /* 0xcad */
-
-static void cleanup_card(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
- /* NB: ultra32_close_card() does free_irq */
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-/* Probe for the Ultra32. This looks like a 8013 with the station
- address PROM at I/O ports <base>+8 to <base>+13, with a checksum
- following.
-*/
-
-struct net_device * __init ultra32_probe(int unit)
-{
- struct net_device *dev;
- int base;
- int irq;
- int err = -ENODEV;
-
- if (!EISA_bus)
- return ERR_PTR(-ENODEV);
-
- dev = alloc_ei_netdev();
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- irq = dev->irq;
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) {
- if (ultra32_probe1(dev, base) == 0)
- break;
- dev->irq = irq;
- }
- if (base >= 0x9000)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-
-static const struct net_device_ops ultra32_netdev_ops = {
- .ndo_open = ultra32_open,
- .ndo_stop = ultra32_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
-{
- int i, edge, media, retval;
- int checksum = 0;
- const char *model_name;
- static unsigned version_printed;
- /* Values from various config regs. */
- unsigned char idreg;
- unsigned char reg4;
- const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
-
- if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb(ioaddr + ULTRA32_IDPORT) == 0xff ||
- inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) {
- retval = -ENODEV;
- goto out;
- }
-
- media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
- edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
- printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
- ioaddr >> 12, ifmap[media],
- (edge ? "Edge Triggered" : "Level Sensitive"));
-
- idreg = inb(ioaddr + 7);
- reg4 = inb(ioaddr + 4) & 0x7f;
-
- /* Check the ID nibble. */
- if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */
- retval = -ENODEV;
- goto out;
- }
-
- /* Select the station address register set. */
- outb(reg4, ioaddr + 4);
-
- for (i = 0; i < 8; i++)
- checksum += inb(ioaddr + 8 + i);
- if ((checksum & 0xff) != 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- model_name = "SMC Ultra32";
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
-
- printk("%s: %s at 0x%X, %pM",
- dev->name, model_name, ioaddr, dev->dev_addr);
-
- /* Switch from the station address to the alternate register set and
- read the useful registers there. */
- outb(0x80 | reg4, ioaddr + 4);
-
- /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
- outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
-
- /* Reset RAM addr. */
- outb(0x00, ioaddr + 0x0b);
-
- /* Switch back to the station address register set so that the
- MS-DOS driver can find the card after a warm boot. */
- outb(reg4, ioaddr + 4);
-
- if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
- printk("\nsmc-ultra32: Card RAM is disabled! "
- "Run EISA config utility.\n");
- retval = -ENODEV;
- goto out;
- }
- if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
- printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
- "Run EISA config utility.\n");
-
- if (dev->irq < 2) {
- unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
- int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
- if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
- retval = -EAGAIN;
- goto out;
- }
- dev->irq = irq;
- }
-
- /* The 8390 isn't at the base address, so fake the offset */
- dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
-
- /* Save RAM address in the unused reg0 to avoid excess inb's. */
- ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
-
- dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
-
- ei_status.name = model_name;
- ei_status.word16 = 1;
- ei_status.tx_start_page = 0;
- ei_status.rx_start_page = TX_PAGES;
- /* All Ultra32 cards have 32KB memory with an 8KB window. */
- ei_status.stop_page = 128;
-
- ei_status.mem = ioremap(dev->mem_start, 0x2000);
- if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
- retval = -ENOMEM;
- goto out;
- }
- dev->mem_end = dev->mem_start + 0x1fff;
-
- printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
- dev->irq, dev->mem_start, dev->mem_end);
- ei_status.block_input = &ultra32_block_input;
- ei_status.block_output = &ultra32_block_output;
- ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
- ei_status.reset_8390 = &ultra32_reset_8390;
-
- dev->netdev_ops = &ultra32_netdev_ops;
- NS8390_init(dev, 0);
-
- return 0;
-out:
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- return retval;
-}
-
-static int ultra32_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
- int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED;
- int retval;
-
- retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
- if (retval)
- return retval;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- /* Set the early receive warning level in window 0 high enough not
- to receive ERW interrupts. */
- outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
- outb(0xff, dev->base_addr + EN0_ERWCNT);
- ei_open(dev);
- return 0;
-}
-
-static int ultra32_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
-
- netif_stop_queue(dev);
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-
- NS8390_init(dev, 0);
-
- return 0;
-}
-
-static void ultra32_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
-
- outb(ULTRA32_RESET, ioaddr);
- if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
- ei_status.txing = 0;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ultra32_get_8390_hdr(struct net_device *dev,
- struct e8390_pkt_hdr *hdr,
- int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select correct 8KB Window. */
- outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
-
-#ifdef __BIG_ENDIAN
- /* Officially this is what we are doing, but the readl() is faster */
- /* unfortunately it isn't endian aware of the struct */
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
-#else
- ((unsigned int*)hdr)[0] = readl(hdr_start);
-#endif
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps, or in this case, when a
- packet spans an 8KB boundary. Note that the current 8KB segment is
- already set by the get_8390_hdr routine. */
-
-static void ultra32_block_input(struct net_device *dev,
- int count,
- struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
- int semi_count = 8192 - (ring_offset & 0x1FFF);
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- if (ring_offset < 96*256) {
- /* Select next 8KB Window. */
- ring_offset += semi_count;
- outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
- }
- } else {
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void ultra32_block_output(struct net_device *dev,
- int count,
- const unsigned char *buf,
- int start_page)
-{
- void __iomem *xfer_start = ei_status.mem + (start_page<<8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
-
- memcpy_toio(xfer_start, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
-static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
-
-MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = ultra32_probe(-1);
- if (IS_ERR(dev))
- break;
- dev_ultra[found++] = dev;
- }
- if (found)
- return 0;
- printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = dev_ultra[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e4ff38949112..ed956e08d38b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -135,7 +135,6 @@ config ETHOC
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
-source "drivers/net/ethernet/racal/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index d4473072654a..8268d85f9448 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -53,7 +53,6 @@ obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
-obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index e49c0eff040b..a9481606bbcd 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,6 +61,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
select PTP_1588_CLOCK
default y
---help---
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c1fdb8be8bee..a175d0be1ae1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -425,8 +425,8 @@ static int mii_probe(struct net_device *dev, int phy_mode)
return -EINVAL;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, phy_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &bfin_mac_adjust_link, phy_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "could not attach PHY\n");
@@ -498,10 +498,10 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static void bfin_mac_ethtool_getwol(struct net_device *dev,
@@ -647,7 +647,6 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
setup_mac_addr(dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index aa53115bb38b..0be2195e5034 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1127,10 +1127,11 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strncpy(info->driver, dev_driver_string(greth->dev), 32);
- strncpy(info->version, "revision: 1.0", 32);
- strncpy(info->bus_info, greth->dev->bus->name, 32);
- strncpy(info->fw_version, "N/A", 32);
+ strlcpy(info->driver, dev_driver_string(greth->dev),
+ sizeof(info->driver));
+ strlcpy(info->version, "revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
info->eedump_len = 0;
info->regdump_len = sizeof(struct greth_regs);
}
@@ -1287,9 +1288,7 @@ static int greth_mdio_probe(struct net_device *dev)
}
ret = phy_connect_direct(dev, phy, &greth_link_change,
- 0, greth->gbit_mac ?
- PHY_INTERFACE_MODE_GMII :
- PHY_INTERFACE_MODE_MII);
+ greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
if (ret) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8350f4b37a8a..13d74aa4033d 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -105,19 +105,6 @@ config DECLANCE
DEC (now Compaq) based on the AMD LANCE chipset, including the
DEPCA series. (This chipset is better known via the NE2100 cards.)
-config DEPCA
- tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
- depends on (ISA || EISA || MCA)
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:drivers/net/ethernet/amd/depca.c>.
-
- To compile this driver as a module, choose M here. The module
- will be called depca.
-
config HPLANCE
bool "HP on-board LANCE support"
depends on DIO
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 175caa5328c9..cdd4301a973d 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_DECLANCE) += declance.o
-obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 2ea221ed4777..de774d419144 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -437,8 +437,8 @@ static int au1000_mii_probe(struct net_device *dev)
/* now we are supposed to have a proper phydev, to attach to... */
BUG_ON(phydev->attached_dev);
- phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -587,10 +587,10 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
+ aup->mac_id);
info->regdump_len = 0;
}
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
deleted file mode 100644
index 34a485363d5b..000000000000
--- a/drivers/net/ethernet/amd/depca.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
-
- Written 1994, 1995 by David C. Davies.
-
-
- Copyright 1994 David C. Davies
- and
- United States Government
- (as represented by the Director, National Security Agency).
-
- Copyright 1995 Digital Equipment Corporation.
-
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of DEPCA and EtherWORKS ethernet cards:
-
- DEPCA (the original)
- DE100
- DE101
- DE200 Turbo
- DE201 Turbo
- DE202 Turbo (TP BNC)
- DE210
- DE422 (EISA)
-
- The driver has been tested on DE100, DE200 and DE202 cards in a
- relatively busy network. The DE422 has been tested a little.
-
- This driver will NOT work for the DE203, DE204 and DE205 series of
- cards, since they have a new custom ASIC in place of the AMD LANCE
- chip. See the 'ewrk3.c' driver in the Linux source tree for running
- those cards.
-
- I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
- a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com
-
- =========================================================================
-
- The driver was originally based on the 'lance.c' driver from Donald
- Becker which is included with the standard driver distribution for
- linux. V0.4 is a complete re-write with only the kernel interface
- remaining from the original code.
-
- 1) Lance.c code in /linux/drivers/net/
- 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
- AMD, 1992 [(800) 222-9323].
- 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
- AMD, Pub. #17881, May 1993.
- 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
- AMD, Pub. #16907, May 1992
- 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
- 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
- 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
- Digital Equipment Corporation, 1989
- 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
-
-
- Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
- driver.
-
- The original DEPCA card requires that the ethernet ROM address counter
- be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
- only done when a 0x08 is read as the first address octet (to minimise
- the chances of writing over some other hardware's I/O register). The
- NICSR accesses have been changed to byte accesses for all the cards
- supported by this driver, since there is only one useful bit in the MSB
- (remote boot timeout) and it is not used. Also, there is a maximum of
- only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
- help debugging all this (and holding my feet to the fire until I got it
- right).
-
- The DE200 series boards have on-board 64kB RAM for use as a shared
- memory network buffer. Only the DE100 cards make use of a 2kB buffer
- mode which has not been implemented in this driver (only the 32kB and
- 64kB modes are supported [16kB/48kB for the original DEPCA]).
-
- At the most only 2 DEPCA cards can be supported on the ISA bus because
- there is only provision for two I/O base addresses on each card (0x300
- and 0x200). The I/O address is detected by searching for a byte sequence
- in the Ethernet station address PROM at the expected I/O address for the
- Ethernet PROM. The shared memory base address is 'autoprobed' by
- looking for the self test PROM and detecting the card name. When a
- second DEPCA is detected, information is placed in the base_addr
- variable of the next device structure (which is created if necessary),
- thus enabling ethif_probe initialization for the device. More than 2
- EISA cards can be supported, but care will be needed assigning the
- shared memory to ensure that each slot has the correct IRQ, I/O address
- and shared memory address assigned.
-
- ************************************************************************
-
- NOTE: If you are using two ISA DEPCAs, it is important that you assign
- the base memory addresses correctly. The driver autoprobes I/O 0x300
- then 0x200. The base memory address for the first device must be less
- than that of the second so that the auto probe will correctly assign the
- I/O and memory addresses on the same card. I can't think of a way to do
- this unambiguously at the moment, since there is nothing on the cards to
- tie I/O and memory information together.
-
- I am unable to test 2 cards together for now, so this code is
- unchecked. All reports, good or bad, are welcome.
-
- ************************************************************************
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
- {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
- really IRQ9 in machines with 16 IRQ lines.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been added. To
- utilise this ability, you have to do <8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy depca.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) if you wish, edit the source code near line 1530 to reflect the I/O
- address and IRQ you're using (see also 5).
- 3) compile depca.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the depca configuration turned off and reboot.
- 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
- [Alan Cox: Changed the code to allow command line irq/io assignments]
- [Dave Davies: Changed the code to allow command line mem/name
- assignments]
- 6) run the net startup bits for your eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod depca'.
-
- To assign a base memory address for the shared memory when running as a
- loadable module, see 5 above. To include the adapter name (if you have
- no PROM but know the card name) also see 5 above. Note that this last
- option will not work with kernel built-in depca's.
-
- The shared memory assignment for a loadable module makes sense to avoid
- the 'memory autoprobe' picking the wrong shared memory (for the case of
- 2 depca's in a PC).
-
- ************************************************************************
- Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted)
- Verified to work with up to 2 DE212 cards in a system (although not
- fully stress-tested).
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 25-jan-94 Initial writing.
- 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
- 0.3 1-feb-94 Added multiple DEPCA support.
- 0.31 4-feb-94 Added DE202 recognition.
- 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
- 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
- Add jabber packet fix from murf@perftech.com
- and becker@super.org
- 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
- 0.35 8-mar-94 Added DE201 recognition. Tidied up.
- 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
- 0.36 16-may-94 DE422 fix released.
- 0.37 22-jul-94 Added MODULE support
- 0.38 15-aug-94 Added DBR ROM switch in depca_close().
- Multi DEPCA bug fix.
- 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
- 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
- 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
- <stromain@alf.dec.com>
- 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
- 0.385 3-apr-95 Fix a recognition bug reported by
- <ryan.niemi@lastfrontier.com>
- 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
- 0.40 25-May-95 Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
- suggestion by <heiko@colossus.escape.de>
- 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
- modules.
- Add 'adapter_name' for loadable modules when no PROM.
- Both above from a suggestion by
- <pchen@woodruffs121.residence.gatech.edu>.
- Add new multicasting code.
- 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
- 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
- reported by <mmogilvi@elbert.uccs.edu>
- 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
- by <tymm@computer.org>
- 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
- 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
- 0.51 27-Jun-99 Correct received packet length for CRC from
- report by <worm@dkik.dk>
- 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
- Fix show-stopper (ints left masked) in depca_interrupt
- by <peterd@pnd-pc.demon.co.uk>
- 0.53 12-Jan-01 Release resources on failure, bss tidbits
- by acme@conectiva.com.br
- 0.54 08-Nov-01 use library crc32 functions
- by Matt_Domsch@dell.com
- 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
-
- =========================================================================
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#ifdef CONFIG_EISA
-#include <linux/eisa.h>
-#endif
-
-#include "depca.h"
-
-static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
-
-#ifdef DEPCA_DEBUG
-static int depca_debug = DEPCA_DEBUG;
-#else
-static int depca_debug = 1;
-#endif
-
-#define DEPCA_NDA 0xffe0 /* No Device Address */
-
-#define TX_TIMEOUT (1*HZ)
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Set the number of Tx and Rx buffers. Ensure that the memory requested
-** here is <= to the amount of shared memory set up by the board switches.
-** The number of descriptors MUST BE A POWER OF 2.
-**
-** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 8 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
-#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
-
-/*
-** EISA bus defines
-*/
-#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-
-/*
-** ISA Bus defines
-*/
-#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
-#define DEPCA_TOTAL_SIZE 0x10
-
-static struct {
- u_long iobase;
- struct platform_device *device;
-} depca_io_ports[] = {
- { 0x300, NULL },
- { 0x200, NULL },
- { 0 , NULL },
-};
-
-/*
-** Name <-> Adapter mapping
-*/
-#define DEPCA_SIGNATURE {"DEPCA",\
- "DE100","DE101",\
- "DE200","DE201","DE202",\
- "DE210","DE212",\
- "DE422",\
- ""}
-
-static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
-
-enum depca_type {
- DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
-};
-
-static char depca_string[] = "depca";
-
-static int depca_device_remove (struct device *device);
-
-#ifdef CONFIG_EISA
-static struct eisa_device_id depca_eisa_ids[] = {
- { "DEC4220", de422 },
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
-
-static int depca_eisa_probe (struct device *device);
-
-static struct eisa_driver depca_eisa_driver = {
- .id_table = depca_eisa_ids,
- .driver = {
- .name = depca_string,
- .probe = depca_eisa_probe,
- .remove = depca_device_remove
- }
-};
-#endif
-
-static int depca_isa_probe (struct platform_device *);
-
-static int depca_isa_remove(struct platform_device *pdev)
-{
- return depca_device_remove(&pdev->dev);
-}
-
-static struct platform_driver depca_isa_driver = {
- .probe = depca_isa_probe,
- .remove = depca_isa_remove,
- .driver = {
- .name = depca_string,
- },
-};
-
-/*
-** Miscellaneous info...
-*/
-#define DEPCA_STRLEN 16
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
-#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
-
-/*
-** The DEPCA Rx and Tx ring descriptors.
-*/
-struct depca_rx_desc {
- volatile s32 base;
- s16 buf_length; /* This length is negative 2's complement! */
- s16 msg_length; /* This length is "normal". */
-};
-
-struct depca_tx_desc {
- volatile s32 base;
- s16 length; /* This length is negative 2's complement! */
- s16 misc; /* Errors and TDR info */
-};
-
-#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
- to LANCE memory address space */
-
-/*
-** The Lance initialization block, described in databook, in common memory.
-*/
-struct depca_init {
- u16 mode; /* Mode register */
- u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
- u8 mcast_table[8]; /* Multicast Hash Table. */
- u32 rx_ring; /* Rx ring base pointer & ring length */
- u32 tx_ring; /* Tx ring base pointer & ring length */
-};
-
-#define DEPCA_PKT_STAT_SZ 16
-#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DEPCA_PKT_STAT_SZ */
-struct depca_private {
- char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
- enum depca_type adapter; /* Adapter type */
- enum {
- DEPCA_BUS_ISA = 1,
- DEPCA_BUS_EISA,
- } depca_bus; /* type of bus */
- struct depca_init init_block; /* Shadow Initialization block */
-/* CPU address space fields */
- struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
- struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
- void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
- u_long mem_start; /* Bus address of device RAM (before remap) */
- u_long mem_len; /* device memory size */
-/* Device address space fields */
- u_long device_ram_start; /* Start of RAM in device addr space */
-/* Offsets used in both address spaces */
- u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
- u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
- u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
-/* Kernel-only (not device) fields */
- int rx_new, tx_new; /* The next free ring entry */
- int rx_old, tx_old; /* The ring entries to be free()ed. */
- spinlock_t lock;
- struct { /* Private stats counters */
- u32 bins[DEPCA_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
- } pktStats;
- int txRingMask; /* TX ring mask */
- int rxRingMask; /* RX ring mask */
- s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
- s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
-};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingMask = tx_new Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingMask-lp->tx_new:\
- lp->tx_old -lp->tx_new-1)
-
-/*
-** Public Functions
-*/
-static int depca_open(struct net_device *dev);
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t depca_interrupt(int irq, void *dev_id);
-static int depca_close(struct net_device *dev);
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void depca_tx_timeout(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/*
-** Private functions
-*/
-static void depca_init_ring(struct net_device *dev);
-static int depca_rx(struct net_device *dev);
-static int depca_tx(struct net_device *dev);
-
-static void LoadCSRs(struct net_device *dev);
-static int InitRestartDepca(struct net_device *dev);
-static int DepcaSignature(char *name, u_long paddr);
-static int DevicePresent(u_long ioaddr);
-static int get_hw_addr(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int load_packet(struct net_device *dev, struct sk_buff *skb);
-static void depca_dbg_open(struct net_device *dev);
-
-static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
-static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
-static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
-static u_char *depca_irq;
-
-static int irq;
-static int io;
-static char *adapter_name;
-static int mem; /* For loadable module assignment
- use insmod mem=0x????? .... */
-module_param (irq, int, 0);
-module_param (io, int, 0);
-module_param (adapter_name, charp, 0);
-module_param (mem, int, 0);
-MODULE_PARM_DESC(irq, "DEPCA IRQ number");
-MODULE_PARM_DESC(io, "DEPCA I/O base address");
-MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
-MODULE_PARM_DESC(mem, "DEPCA shared memory address");
-MODULE_LICENSE("GPL");
-
-/*
-** Miscellaneous defines...
-*/
-#define STOP_DEPCA \
- outw(CSR0, DEPCA_ADDR);\
- outw(STOP, DEPCA_DATA)
-
-static const struct net_device_ops depca_netdev_ops = {
- .ndo_open = depca_open,
- .ndo_start_xmit = depca_start_xmit,
- .ndo_stop = depca_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = depca_ioctl,
- .ndo_tx_timeout = depca_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init depca_hw_init (struct net_device *dev, struct device *device)
-{
- struct depca_private *lp;
- int i, j, offset, netRAM, mem_len, status = 0;
- s16 nicsr;
- u_long ioaddr;
- u_long mem_start;
-
- /*
- * We are now supposed to enter this function with the
- * following fields filled with proper values :
- *
- * dev->base_addr
- * lp->mem_start
- * lp->depca_bus
- * lp->adapter
- *
- * dev->irq can be set if known from device configuration (on
- * MCA or EISA) or module option. Otherwise, it will be auto
- * detected.
- */
-
- ioaddr = dev->base_addr;
-
- STOP_DEPCA;
-
- nicsr = inb(DEPCA_NICSR);
- nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
- outb(nicsr, DEPCA_NICSR);
-
- if (inw(DEPCA_DATA) != STOP) {
- return -ENXIO;
- }
-
- lp = netdev_priv(dev);
- mem_start = lp->mem_start;
-
- if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
- return -ENXIO;
-
- printk("%s: %s at 0x%04lx",
- dev_name(device), depca_signature[lp->adapter], ioaddr);
-
- switch (lp->depca_bus) {
-#ifdef CONFIG_EISA
- case DEPCA_BUS_EISA:
- printk(" (EISA slot %d)", to_eisa_device(device)->slot);
- break;
-#endif
-
- case DEPCA_BUS_ISA:
- break;
-
- default:
- printk("Unknown DEPCA bus %d\n", lp->depca_bus);
- return -ENXIO;
- }
-
- printk(", h/w address ");
- status = get_hw_addr(dev);
- printk("%pM", dev->dev_addr);
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- }
-
- /* Set up the maximum amount of network RAM(kB) */
- netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
- if ((nicsr & _128KB) && (lp->adapter == de422))
- netRAM = 128;
-
- /* Shared Memory Base Address */
- if (nicsr & BUF) {
- nicsr &= ~BS; /* DEPCA RAM in top 32k */
- netRAM -= 32;
- mem_start += 0x8000;
- }
-
- if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
- > (netRAM << 10)) {
- printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
- return -ENXIO;
- }
-
- printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
-
- /* Enable the shadow RAM. */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- spin_lock_init(&lp->lock);
- sprintf(lp->adapter_name, "%s (%s)",
- depca_signature[lp->adapter], dev_name(device));
- status = -EBUSY;
-
- /* Initialisation Block */
- if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
- printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
- goto out_priv;
- }
-
- status = -EIO;
- lp->sh_mem = ioremap(mem_start, mem_len);
- if (lp->sh_mem == NULL) {
- printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
- goto out1;
- }
-
- lp->mem_start = mem_start;
- lp->mem_len = mem_len;
- lp->device_ram_start = mem_start & LA_MASK;
-
- offset = 0;
- offset += sizeof(struct depca_init);
-
- /* Tx & Rx descriptors (aligned to a quadword boundary) */
- offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
- lp->rx_ring = lp->sh_mem + offset;
- lp->rx_ring_offset = offset;
-
- offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
- lp->tx_ring = lp->sh_mem + offset;
- lp->tx_ring_offset = offset;
-
- offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
-
- lp->buffs_offset = offset;
-
- /* Finish initialising the ring information. */
- lp->rxRingMask = NUM_RX_DESC - 1;
- lp->txRingMask = NUM_TX_DESC - 1;
-
- /* Calculate Tx/Rx RLEN size for the descriptors. */
- for (i = 0, j = lp->rxRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->rx_rlen = (s32) (i << 29);
- for (i = 0, j = lp->txRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->tx_rlen = (s32) (i << 29);
-
- /* Load the initialisation block */
- depca_init_ring(dev);
-
- /* Initialise the control and status registers */
- LoadCSRs(dev);
-
- /* Enable DEPCA board interrupts for autoprobing */
- nicsr = ((nicsr & ~IM) | IEN);
- outb(nicsr, DEPCA_NICSR);
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
- unsigned char irqnum;
- unsigned long irq_mask, delay;
-
- irq_mask = probe_irq_on();
-
- /* Assign the correct irq list */
- switch (lp->adapter) {
- case DEPCA:
- case de100:
- case de101:
- depca_irq = de1xx_irq;
- break;
- case de200:
- case de201:
- case de202:
- case de210:
- case de212:
- depca_irq = de2xx_irq;
- break;
- case de422:
- depca_irq = de422_irq;
- break;
-
- default:
- break; /* Not reached */
- }
-
- /* Trigger an initialization just for the interrupt. */
- outw(INEA | INIT, DEPCA_DATA);
-
- delay = jiffies + HZ/50;
- while (time_before(jiffies, delay))
- yield();
-
- irqnum = probe_irq_off(irq_mask);
-
- status = -ENXIO;
- if (!irqnum) {
- printk(" and failed to detect IRQ line.\n");
- goto out2;
- } else {
- for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
- if (irqnum == depca_irq[i]) {
- dev->irq = irqnum;
- printk(" and uses IRQ%d.\n", dev->irq);
- }
-
- if (!dev->irq) {
- printk(" but incorrect IRQ line detected.\n");
- goto out2;
- }
- }
- } else {
- printk(" and assigned IRQ%d.\n", dev->irq);
- }
-
- if (depca_debug > 1) {
- printk(version);
- }
-
- /* The DEPCA-specific entries in the device structure. */
- dev->netdev_ops = &depca_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- dev->mem_start = 0;
-
- dev_set_drvdata(device, dev);
- SET_NETDEV_DEV (dev, device);
-
- status = register_netdev(dev);
- if (status == 0)
- return 0;
-out2:
- iounmap(lp->sh_mem);
-out1:
- release_mem_region (mem_start, mem_len);
-out_priv:
- return status;
-}
-
-
-static int depca_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- s16 nicsr;
- int status = 0;
-
- STOP_DEPCA;
- nicsr = inb(DEPCA_NICSR);
-
- /* Make sure the shadow RAM is enabled */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /* Re-initialize the DEPCA... */
- depca_init_ring(dev);
- LoadCSRs(dev);
-
- depca_dbg_open(dev);
-
- if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
- printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /* Enable DEPCA board interrupts and turn off LED */
- nicsr = ((nicsr & ~IM & ~LED) | IEN);
- outb(nicsr, DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR);
-
- netif_start_queue(dev);
-
- status = InitRestartDepca(dev);
-
- if (depca_debug > 1) {
- printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
- printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
- }
- }
- return status;
-}
-
-/* Initialize the lance Rx and Tx descriptor rings. */
-static void depca_init_ring(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_int i;
- u_long offset;
-
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- /* Initialize the base address and length of each buffer in the ring */
- for (i = 0; i <= lp->rxRingMask; i++) {
- offset = lp->buffs_offset + i * RX_BUFF_SZ;
- writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
- writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
- lp->rx_buff[i] = lp->sh_mem + offset;
- }
-
- for (i = 0; i <= lp->txRingMask; i++) {
- offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
- writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
- lp->tx_buff[i] = lp->sh_mem + offset;
- }
-
- /* Set up the initialization block */
- lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
- lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
-
- SetMulticastFilter(dev);
-
- for (i = 0; i < ETH_ALEN; i++) {
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- }
-
- lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
-}
-
-
-static void depca_tx_timeout(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
-
- printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
-
- STOP_DEPCA;
- depca_init_ring(dev);
- LoadCSRs(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- InitRestartDepca(dev);
-}
-
-
-/*
-** Writes a socket buffer to TX descriptor ring and starts transmission
-*/
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int status = 0;
-
- /* Transmitter timeout, serious problems. */
- if (skb->len < 1)
- goto out;
-
- if (skb_padto(skb, ETH_ZLEN))
- goto out;
-
- netif_stop_queue(dev);
-
- if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
- status = load_packet(dev, skb);
-
- if (!status) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
-
- dev_kfree_skb(skb);
- }
- if (TX_BUFFS_AVAIL)
- netif_start_queue(dev);
- } else
- status = NETDEV_TX_LOCKED;
-
- out:
- return status;
-}
-
-/*
-** The DEPCA interrupt handler.
-*/
-static irqreturn_t depca_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct depca_private *lp;
- s16 csr0, nicsr;
- u_long ioaddr;
-
- if (dev == NULL) {
- printk("depca_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- /* mask the DEPCA board interrupts and turn on the LED */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= (IM | LED);
- outb(nicsr, DEPCA_NICSR);
-
- outw(CSR0, DEPCA_ADDR);
- csr0 = inw(DEPCA_DATA);
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- outw(csr0 & INTE, DEPCA_DATA);
-
- if (csr0 & RINT) /* Rx interrupt (packet arrived) */
- depca_rx(dev);
-
- if (csr0 & TINT) /* Tx interrupt (packet sent) */
- depca_tx(dev);
-
- /* Any resources available? */
- if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
- netif_wake_queue(dev);
- }
-
- /* Unmask the DEPCA board interrupts and turn off the LED */
- nicsr = (nicsr & ~IM & ~LED);
- outb(nicsr, DEPCA_NICSR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->lock held */
-static int depca_rx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry;
- s32 status;
-
- for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
- status = readl(&lp->rx_ring[entry].base) >> 16;
- if (status & R_STP) { /* Remember start of frame */
- lp->rx_old = entry;
- }
- if (status & R_ENP) { /* Valid frame status */
- if (status & R_ERR) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (status & R_FRAM)
- dev->stats.rx_frame_errors++;
- if (status & R_OFLO)
- dev->stats.rx_over_errors++;
- if (status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (status & R_BUFF)
- dev->stats.rx_fifo_errors++;
- } else {
- short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
- unsigned char *buf;
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- buf = skb_put(skb, pkt_len);
- if (entry < lp->rx_old) { /* Wrapped buffer */
- len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
- memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
- } else { /* Linear buffer */
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
- }
-
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DEPCA_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
- }
- } else {
- printk("%s: Memory squeeze, deferring packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- /* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
- writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
- }
- writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
- }
-
- return 0;
-}
-
-/*
-** Buffer sent - check for buffer errors.
-** Called with lp->lock held
-*/
-static int depca_tx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int entry;
- s32 status;
- u_long ioaddr = dev->base_addr;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = readl(&lp->tx_ring[entry].base) >> 16;
-
- if (status < 0) { /* Packet not yet sent! */
- break;
- } else if (status & T_ERR) { /* An error occurred. */
- status = readl(&lp->tx_ring[entry].misc);
- dev->stats.tx_errors++;
- if (status & TMD3_RTRY)
- dev->stats.tx_aborted_errors++;
- if (status & TMD3_LCAR)
- dev->stats.tx_carrier_errors++;
- if (status & TMD3_LCOL)
- dev->stats.tx_window_errors++;
- if (status & TMD3_UFLO)
- dev->stats.tx_fifo_errors++;
- if (status & (TMD3_BUFF | TMD3_UFLO)) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
- }
- } else if (status & (T_MORE | T_ONE)) {
- dev->stats.collisions++;
- } else {
- dev->stats.tx_packets++;
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
- }
-
- return 0;
-}
-
-static int depca_close(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- s16 nicsr;
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- outw(CSR0, DEPCA_ADDR);
-
- if (depca_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
- }
-
- /*
- ** We stop the DEPCA here -- it occasionally polls
- ** memory if we don't.
- */
- outw(STOP, DEPCA_DATA);
-
- /*
- ** Give back the ROM in case the user wants to go to DOS
- */
- if (lp->adapter != DEPCA) {
- nicsr = inb(DEPCA_NICSR);
- nicsr &= ~SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /*
- ** Free the associated irq
- */
- free_irq(dev->irq, dev);
- return 0;
-}
-
-static void LoadCSRs(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
- outw((u16) lp->device_ram_start, DEPCA_DATA);
- outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
- outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
- outw(CSR3, DEPCA_ADDR); /* ALE control */
- outw(ACON, DEPCA_DATA);
-
- outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-}
-
-static int InitRestartDepca(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int i, status = 0;
-
- /* Copy the shadow init_block to shared memory */
- memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
-
- outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
- outw(INIT, DEPCA_DATA); /* initialize DEPCA */
-
- /* wait for lance to complete initialisation */
- for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
-
- if (i != 100) {
- /* clear IDON by writing a "1", enable interrupts and start lance */
- outw(IDON | INEA | STRT, DEPCA_DATA);
- if (depca_debug > 2) {
- printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- }
- } else {
- printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- status = -1;
- }
-
- return status;
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void set_multicast_list(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
- lp->init_block.mode |= PROM;
- } else {
- SetMulticastFilter(dev);
- lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
- }
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Big endian crc one liner is mine, all mine, ha ha ha ha!
-** LANCE calculates its hash codes big endian.
-*/
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i, j, bit, byte;
- u16 hashcode;
- u32 crc;
-
- if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- lp->init_block.mcast_table[i] = (char) 0xff;
- }
- } else {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
- lp->init_block.mcast_table[i] = 0;
- }
- /* Add multicast addresses */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc(ETH_ALEN, ha->addr);
- hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
- for (j = 0; j < 5; j++) { /* ... in reverse order. */
- hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
- }
-
- byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
- lp->init_block.mcast_table[byte] |= bit;
- }
- }
-}
-
-static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
-{
- int status = 0;
-
- if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
- status = -EBUSY;
- goto out;
- }
-
- if (DevicePresent(ioaddr)) {
- status = -ENODEV;
- goto out_release;
- }
-
- if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
- status = -ENOMEM;
- goto out_release;
- }
-
- return 0;
-
- out_release:
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** ISA bus I/O device probe
-*/
-
-static void __init depca_platform_probe (void)
-{
- int i;
- struct platform_device *pldev;
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- depca_io_ports[i].device = NULL;
-
- /* if an address has been specified on the command
- * line, use it (if valid) */
- if (io && io != depca_io_ports[i].iobase)
- continue;
-
- pldev = platform_device_alloc(depca_string, i);
- if (!pldev)
- continue;
-
- pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
- depca_io_ports[i].device = pldev;
-
- if (platform_device_add(pldev)) {
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_put(pldev);
- continue;
- }
-
- if (!pldev->dev.driver) {
- /* The driver was not bound to this device, there was
- * no hardware at this address. Unregister it, as the
- * release function will take care of freeing the
- * allocated structure */
-
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_unregister (pldev);
- }
- }
-}
-
-static enum depca_type __init depca_shmem_probe (ulong *mem_start)
-{
- u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
- enum depca_type adapter = unknown;
- int i;
-
- for (i = 0; mem_base[i]; i++) {
- *mem_start = mem ? mem : mem_base[i];
- adapter = DepcaSignature (adapter_name, *mem_start);
- if (adapter != unknown)
- break;
- }
-
- return adapter;
-}
-
-static int depca_isa_probe(struct platform_device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start = 0;
- enum depca_type adapter = unknown;
- int status = 0;
-
- ioaddr = (u_long) device->dev.platform_data;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- adapter = depca_shmem_probe (&mem_start);
-
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq; /* Use whatever value the user gave
- * us, and 0 if he didn't. */
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_ISA;
- lp->adapter = adapter;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, &device->dev)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** EISA callbacks from sysfs.
-*/
-
-#ifdef CONFIG_EISA
-static int __init depca_eisa_probe (struct device *device)
-{
- enum depca_type adapter = unknown;
- struct eisa_device *edev;
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start;
- int status = 0;
-
- edev = to_eisa_device (device);
- ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- /* It would have been nice to get card configuration from the
- * card. Unfortunately, this register is write-only (shares
- * it's address with the ethernet prom)... As we don't parse
- * the EISA configuration structures (yet... :-), just rely on
- * the ISA probing to sort it out... */
-
- adapter = depca_shmem_probe (&mem_start);
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_EISA;
- lp->adapter = edev->id.driver_data;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, device)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-#endif
-
-static int depca_device_remove(struct device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- int bus;
-
- dev = dev_get_drvdata(device);
- lp = netdev_priv(dev);
-
- unregister_netdev (dev);
- iounmap (lp->sh_mem);
- release_mem_region (lp->mem_start, lp->mem_len);
- release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
- bus = lp->depca_bus;
- free_netdev (dev);
-
- return 0;
-}
-
-/*
-** Look for a particular board name in the on-board Remote Diagnostics
-** and Boot (readb) ROM. This will also give us a clue to the network RAM
-** base address.
-*/
-static int __init DepcaSignature(char *name, u_long base_addr)
-{
- u_int i, j, k;
- void __iomem *ptr;
- char tmpstr[16];
- u_long prom_addr = base_addr + 0xc000;
- u_long mem_addr = base_addr + 0x8000; /* 32KB */
-
- /* Can't reserve the prom region, it is already marked as
- * used, at least on x86. Instead, reserve a memory region a
- * board would certainly use. If it works, go ahead. If not,
- * run like hell... */
-
- if (!request_mem_region (mem_addr, 16, depca_string))
- return unknown;
-
- /* Copy the first 16 bytes of ROM */
-
- ptr = ioremap(prom_addr, 16);
- if (ptr == NULL) {
- printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
- return unknown;
- }
- for (i = 0; i < 16; i++) {
- tmpstr[i] = readb(ptr + i);
- }
- iounmap(ptr);
-
- release_mem_region (mem_addr, 16);
-
- /* Check if PROM contains a valid string */
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
- if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
- k++;
- } else { /* lost signature; begin search again */
- k = 0;
- }
- }
- if (k == strlen(depca_signature[i]))
- break;
- }
-
- /* Check if name string is valid, provided there's no PROM */
- if (name && *name && (i == unknown)) {
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- if (strcmp(name, depca_signature[i]) == 0)
- break;
- }
- }
-
- return i;
-}
-
-/*
-** Look for a special sequence in the Ethernet station address PROM that
-** is common across all DEPCA products. Note that the original DEPCA needs
-** its ROM address counter to be initialized and enabled. Only enable
-** if the first address octet is a 0x08 - this minimises the chances of
-** messing around with some other hardware, but it assumes that this DEPCA
-** card initialized itself correctly.
-**
-** Search the Ethernet address ROM for the signature. Since the ROM address
-** counter can start at an arbitrary point, the search must include the entire
-** probe sequence length plus the (length_of_the_signature - 1).
-** Stop the search IMMEDIATELY after the signature is found so that the
-** PROM address counter is correctly positioned at the start of the
-** ethernet address for later read out.
-*/
-static int __init DevicePresent(u_long ioaddr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength = 0;
- s8 data;
- s16 nicsr;
- int i, j, status = 0;
-
- data = inb(DEPCA_PROM); /* clear counter on DEPCA */
- data = inb(DEPCA_PROM); /* read data */
-
- if (data == 0x08) { /* Enable counter on DEPCA */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= AAC;
- outb(nicsr, DEPCA_NICSR);
- }
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(DEPCA_PROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
-
- return status;
-}
-
-/*
-** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
-** reason: access the upper half of the PROM with x=0; access the lower half
-** with x=1.
-*/
-static int __init get_hw_addr(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
- struct depca_private *lp = netdev_priv(dev);
- int i, k, tmp, status = 0;
- u_short j, x, chksum;
-
- x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
-
- for (i = 0, k = 0, j = 0; j < 3; j++) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(DEPCA_PROM + x));
- dev->dev_addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
-
- chksum = (u_char) inb(DEPCA_PROM + x);
- chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
- if (k != chksum)
- status = -1;
-
- return status;
-}
-
-/*
-** Load a packet into the shared memory
-*/
-static int load_packet(struct net_device *dev, struct sk_buff *skb)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry, end, len, status = NETDEV_TX_OK;
-
- entry = lp->tx_new; /* Ring around buffer number. */
- end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
- if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
- /*
- ** Caution: the write order is important here... don't set up the
- ** ownership rights until all the other information is in place.
- */
- if (end < entry) { /* wrapped buffer */
- len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
- memcpy_toio(lp->tx_buff[entry], skb->data, len);
- memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
- } else { /* linear buffer */
- memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
- }
-
- /* set up the buffer descriptors */
- len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
- /* clean out flags */
- writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
- writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
- writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
- len -= TX_BUFF_SZ;
- }
- /* clean out flags */
- writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
- writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
- writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
-
- /* start of packet */
- writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
- /* end of packet */
- writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
-
- for (i = end; i != entry; --i) {
- /* ownership of packet */
- writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
- if (i == 0)
- i = lp->txRingMask + 1;
- }
- writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
-
- lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
- } else {
- status = NETDEV_TX_LOCKED;
- }
-
- return status;
-}
-
-static void depca_dbg_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- struct depca_init *p = &lp->init_block;
- int i;
-
- if (depca_debug > 1) {
- /* Do not copy the shadow init block into shared memory */
- /* Debugging should not affect normal operation! */
- /* The shadow init block will get copied across during InitRestartDepca */
- printk("%s: depca open with irq %d\n", dev->name, dev->irq);
- printk("Descriptor head addresses (CPU):\n");
- printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
- printk("Descriptor addresses (CPU):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->rx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->rx_ring[i].base);
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->tx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->tx_ring[i].base);
- printk("\nDescriptor buffers (Device):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
- printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
- printk(" mode: 0x%4.4x\n", p->mode);
- printk(" physical address: %pM\n", p->phys_addr);
- printk(" multicast hash table: ");
- for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
- printk("%2.2x:", p->mcast_table[i]);
- }
- printk("%2.2x\n", p->mcast_table[i]);
- printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
- printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
- printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
- printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
- printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
- outw(CSR2, DEPCA_ADDR);
- printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
- outw(CSR1, DEPCA_ADDR);
- printk("%4.4x\n", inw(DEPCA_DATA));
- outw(CSR3, DEPCA_ADDR);
- printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases.
-** All multicast IOCTLs will not work here and are for testing purposes only.
-*/
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
- int i, status = 0;
- u_long ioaddr = dev->base_addr;
- union {
- u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
- u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
- } tmp;
- unsigned long flags;
- void *buf;
-
- switch (ioc->cmd) {
- case DEPCA_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
- return -EFAULT;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SET_PROM: /* Set Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode |= PROM; /* Set promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DEPCA_GET_MCA: /* Get the multicast address table */
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_MCA: /* Set a multicast address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (ioc->len >= HASH_TABLE_LEN)
- return -EINVAL;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
- return -EFAULT;
- set_multicast_list(dev);
- break;
-
- case DEPCA_CLR_MCA: /* Clear all multicast addresses */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_GET_STATS: /* Get the driver statistics */
- ioc->len = sizeof(lp->pktStats);
- buf = kmalloc(ioc->len, GFP_KERNEL);
- if(!buf)
- return -ENOMEM;
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(buf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, buf, ioc->len))
- status = -EFAULT;
- kfree(buf);
- break;
-
- case DEPCA_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DEPCA_GET_REG: /* Get the DEPCA Registers */
- i = 0;
- tmp.sval[i++] = inw(DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR); /* status register */
- tmp.sval[i++] = inw(DEPCA_DATA);
- memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
- ioc->len = i + sizeof(struct depca_init);
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init depca_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_EISA
- err = eisa_driver_register(&depca_eisa_driver);
- if (err)
- goto err_eisa;
-#endif
- err = platform_driver_register(&depca_isa_driver);
- if (err)
- goto err_eisa;
-
- depca_platform_probe();
- return 0;
-
-err_eisa:
-#ifdef CONFIG_EISA
- eisa_driver_unregister(&depca_eisa_driver);
-#endif
- return err;
-}
-
-static void __exit depca_module_exit (void)
-{
- int i;
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&depca_eisa_driver);
-#endif
- platform_driver_unregister (&depca_isa_driver);
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- if (depca_io_ports[i].device) {
- depca_io_ports[i].device->dev.platform_data = NULL;
- platform_device_unregister (depca_io_ports[i].device);
- depca_io_ports[i].device = NULL;
- }
- }
-}
-
-module_init (depca_module_init);
-module_exit (depca_module_exit);
diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h
deleted file mode 100644
index cdcfe4252c16..000000000000
--- a/drivers/net/ethernet/amd/depca.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 David C. Davies. This software may be used and distributed
- according to the terms of the GNU General Public License, incorporated herein by
- reference.
-*/
-
-/*
-** I/O addresses. Note that the 2k buffer option is not supported in
-** this driver.
-*/
-#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
-#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
-#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
-#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
-#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
-#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
-#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
-#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
-
-/*
-** These are LANCE registers addressable through DEPCA_ADDR
-*/
-#define CSR0 0
-#define CSR1 1
-#define CSR2 2
-#define CSR3 3
-
-/*
-** NETWORK INTERFACE CSR (NI_CSR) bit definitions
-*/
-
-#define TO 0x0100 /* Time Out for remote boot */
-#define SHE 0x0080 /* SHadow memory Enable */
-#define BS 0x0040 /* Bank Select */
-#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
-#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
-#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
-#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
-#define IM 0x0004 /* Interrupt Mask (1->mask) */
-#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
-#define LED 0x0001 /* LED control */
-
-/*
-** Control and Status Register 0 (CSR0) bit definitions
-*/
-
-#define ERR 0x8000 /* Error summary */
-#define BABL 0x4000 /* Babble transmitter timeout error */
-#define CERR 0x2000 /* Collision Error */
-#define MISS 0x1000 /* Missed packet */
-#define MERR 0x0800 /* Memory Error */
-#define RINT 0x0400 /* Receiver Interrupt */
-#define TINT 0x0200 /* Transmit Interrupt */
-#define IDON 0x0100 /* Initialization Done */
-#define INTR 0x0080 /* Interrupt Flag */
-#define INEA 0x0040 /* Interrupt Enable */
-#define RXON 0x0020 /* Receiver on */
-#define TXON 0x0010 /* Transmitter on */
-#define TDMD 0x0008 /* Transmit Demand */
-#define STOP 0x0004 /* Stop */
-#define STRT 0x0002 /* Start */
-#define INIT 0x0001 /* Initialize */
-#define INTM 0xff00 /* Interrupt Mask */
-#define INTE 0xfff0 /* Interrupt Enable */
-
-/*
-** CONTROL AND STATUS REGISTER 3 (CSR3)
-*/
-
-#define BSWP 0x0004 /* Byte SWaP */
-#define ACON 0x0002 /* ALE control */
-#define BCON 0x0001 /* Byte CONtrol */
-
-/*
-** Initialization Block Mode Register
-*/
-
-#define PROM 0x8000 /* Promiscuous Mode */
-#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
-#define INTL 0x0040 /* Internal Loopback */
-#define DRTY 0x0020 /* Disable Retry */
-#define COLL 0x0010 /* Force Collision */
-#define DTCR 0x0008 /* Disable Transmit CRC */
-#define LOOP 0x0004 /* Loopback */
-#define DTX 0x0002 /* Disable the Transmitter */
-#define DRX 0x0001 /* Disable the Receiver */
-
-/*
-** Receive Message Descriptor 1 (RMD1) bit definitions.
-*/
-
-#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define R_ERR 0x4000 /* Error Summary */
-#define R_FRAM 0x2000 /* Framing Error */
-#define R_OFLO 0x1000 /* Overflow Error */
-#define R_CRC 0x0800 /* CRC Error */
-#define R_BUFF 0x0400 /* Buffer Error */
-#define R_STP 0x0200 /* Start of Packet */
-#define R_ENP 0x0100 /* End of Packet */
-
-/*
-** Transmit Message Descriptor 1 (TMD1) bit definitions.
-*/
-
-#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define T_ERR 0x4000 /* Error Summary */
-#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
-#define T_MORE 0x1000 /* >1 retry to transmit packet */
-#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
-#define T_DEF 0x0400 /* Deferred */
-#define T_STP 0x02000000 /* Start of Packet */
-#define T_ENP 0x01000000 /* End of Packet */
-#define T_FLAGS 0xff000000 /* TX Flags Field */
-
-/*
-** Transmit Message Descriptor 3 (TMD3) bit definitions.
-*/
-
-#define TMD3_BUFF 0x8000 /* BUFFer error */
-#define TMD3_UFLO 0x4000 /* UnderFLOw error */
-#define TMD3_RES 0x2000 /* REServed */
-#define TMD3_LCOL 0x1000 /* Late COLlision */
-#define TMD3_LCAR 0x0800 /* Loss of CARrier */
-#define TMD3_RTRY 0x0400 /* ReTRY error */
-
-/*
-** EISA configuration Register (CNFG) bit definitions
-*/
-
-#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
-#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
-#define IRQ11 0x0040 /* Enable -> 1 */
-#define IRQ10 0x0020 /* Enable -> 1 */
-#define IRQ9 0x0010 /* Enable -> 1 */
-#define IRQ5 0x0008 /* Enable -> 1 */
-#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
-#define PADR16 0x0002 /* RAM on 64kB boundary */
-#define PADR17 0x0001 /* RAM on 128kB boundary */
-
-/*
-** Miscellaneous
-*/
-#define HASH_TABLE_LEN 64 /* Bits */
-#define HASH_BITS 0x003f /* 6 LS bits */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
-#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct depca_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
-#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
-#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
-#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
-#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
-#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
-#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
-#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DEPCA_GET_REG 0x0c /* Get the Register contents */
-#define DEPCA_SET_REG 0x0d /* Set the Register contents */
-#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
-
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a227ccdcb9b5..797f847edf13 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_tx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
@@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_rx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
@@ -1688,10 +1679,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memcpy(dev->dev_addr, promaddr, 6);
}
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->perm_addr))
+ if (!is_valid_ether_addr(dev->dev_addr))
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1934,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_dma_addr)
return -ENOMEM;
- }
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_dma_addr)
return -ENOMEM;
- }
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_skbuff)
return -ENOMEM;
- }
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_skbuff)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index c2d696c88e46..6a40290d3727 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1284,8 +1284,8 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunlance");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 1ed886d421f8..36d6abd1cfff 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -44,8 +44,8 @@ config ATL1
will be called atl1.
config ATL1E
- tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Atheros L1E Gigabit Ethernet support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
@@ -56,8 +56,8 @@ config ATL1E
will be called atl1e.
config ATL1C
- tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Atheros L1C Gigabit Ethernet support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 56d3f697e0c7..1f07fc633ab9 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
@@ -472,7 +472,6 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -983,11 +982,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
rfd_ring->count);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
- if (unlikely(!tpd_ring->buffer_info)) {
- dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
- size);
+ if (unlikely(!tpd_ring->buffer_info))
goto err_nomem;
- }
+
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
(tpd_ring->buffer_info + count);
@@ -1652,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
u16 num_alloc = 0;
u16 rfd_next_to_use, next_next;
struct atl1c_rx_free_desc *rfd_desc;
+ dma_addr_t mapping;
next_next = rfd_next_to_use = rfd_ring->next_to_use;
if (++next_next == rfd_ring->count)
@@ -1678,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma = pci_map_single(pdev, vir_addr,
+ mapping = pci_map_single(pdev, vir_addr,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->length = 0;
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ break;
+ }
+ buffer_info->dma = mapping;
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2015,7 +2022,29 @@ check_sum:
return 0;
}
-static void atl1c_tx_map(struct atl1c_adapter *adapter,
+static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
+ struct atl1c_tpd_desc *first_tpd,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+ struct atl1c_buffer *buffer_info;
+ struct atl1c_tpd_desc *tpd;
+ u16 first_index, index;
+
+ first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
+ index = first_index;
+ while (index != tpd_ring->next_to_use) {
+ tpd = ATL1C_TPD_DESC(tpd_ring, index);
+ buffer_info = &tpd_ring->buffer_info[index];
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+ memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
+ if (++index == tpd_ring->count)
+ index = 0;
+ }
+ tpd_ring->next_to_use = first_index;
+}
+
+static int atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type)
{
@@ -2040,6 +2069,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
@@ -2062,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
@@ -2083,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
frag, 0,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
@@ -2095,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
/* The last buffer info contain the skb address,
so it will be free after unmap */
buffer_info->skb = skb;
+
+ return 0;
+
+err_dma:
+ buffer_info->dma = 0;
+ buffer_info->length = 0;
+ return -1;
}
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2157,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
- atl1c_tx_map(adapter, skb, tpd, type);
- atl1c_tx_queue(adapter, skb, tpd, type);
+ if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+ netif_info(adapter, tx_done, adapter->netdev,
+ "tx-skb droppted due to dma error\n");
+ /* roll back tpd/buffer */
+ atl1c_tx_rollback(adapter, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb(skb);
+ } else {
+ atl1c_tx_queue(adapter, skb, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ }
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2540,10 +2594,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (atl1c_read_mac_addr(&adapter->hw)) {
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e4466a36d106..92f4734f860d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -819,8 +819,6 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
- size);
err = -ENOMEM;
goto failed;
}
@@ -2342,7 +2340,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 71b3d7daa21d..5b0d9931c720 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3053,7 +3053,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
if (atl1_read_mac_addr(&adapter->hw)) {
/* mark random mac */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index aab83a2d4e07..1278b47022e0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1433,14 +1433,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
-/* FIXME: do we still need this? */
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
-#else
if (!is_valid_ether_addr(netdev->dev_addr)) {
-#endif
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 77ffbc4a5071..f82eb1699464 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -84,7 +84,6 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atlx_set_mac_addr(&adapter->hw);
return 0;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f55267363f35..3e69b3f88099 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -121,4 +121,22 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config BNX2X_SRIOV
+ bool "Broadcom 578xx and 57712 SR-IOV support"
+ depends on BNX2X && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the 578xx and 57712 products. This
+ allows for virtual function acceleration in virtual environments.
+
+config BGMAC
+ tristate "BCMA bus GBit core support"
+ depends on BCMA_HOST_SOC && HAS_DMA
+ ---help---
+ This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
+ They can be found on BCM47xx SoCs and provide gigabit ethernet.
+ In case of using this driver on BCM4706 it's also requires to enable
+ BCMA_DRIVER_GMAC_CMN to make it work.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index b7896051d54e..68efa1a3fb88 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
+obj-$(CONFIG_BGMAC) += bgmac.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 219f6226fcb1..a7efec293037 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -809,11 +809,10 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = netdev_alloc_skb(bp->dev, len + 2);
+ copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
/* DMA sync done above, copy just the actual packet */
skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
@@ -1518,10 +1517,8 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
u8 pwol_mask[B44_PMASK_SIZE];
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
- if (!pwol_pattern) {
- pr_err("Memory not available for WOL\n");
+ if (!pwol_pattern)
return;
- }
/* Ipv4 magic packet pattern - pattern 0.*/
memset(pwol_mask, 0, B44_PMASK_SIZE);
@@ -2111,8 +2108,6 @@ static int b44_get_invariants(struct b44 *bp)
return -EINVAL;
}
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
-
bp->imask = IMASK_DEF;
/* XXX - really required?
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 39387d67b722..7d81e059e811 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -799,7 +799,7 @@ static int bcm_enet_open(struct net_device *dev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mii_bus->id, priv->phy_id);
- phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
@@ -886,10 +886,9 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
- priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+ priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -900,10 +899,9 @@ static int bcm_enet_open(struct net_device *dev)
spin_lock_init(&priv->tx_lock);
/* init & fill rx ring with skbs */
- priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+ priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -1227,10 +1225,11 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, bcm_enet_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
new file mode 100644
index 000000000000..3fd32880e526
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -0,0 +1,1461 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bgmac.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <asm/mach-bcm47xx/nvram.h>
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+ u32 value, int timeout)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < timeout / 10; i++) {
+ val = bcma_read32(core, reg);
+ if ((val & mask) == value)
+ return true;
+ udelay(10);
+ }
+ pr_err("Timeout waiting for reg 0x%X\n", reg);
+ return false;
+}
+
+/**************************************************
+ * DMA
+ **************************************************/
+
+static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ u32 val;
+ int i;
+
+ if (!ring->mmio_base)
+ return;
+
+ /* Suspend DMA TX ring first.
+ * bgmac_wait_value doesn't support waiting for any of few values, so
+ * implement whole loop here.
+ */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
+ BGMAC_DMA_TX_SUSPEND);
+ for (i = 0; i < 10000 / 10; i++) {
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ val &= BGMAC_DMA_TX_STAT;
+ if (val == BGMAC_DMA_TX_STAT_DISABLED ||
+ val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
+ val == BGMAC_DMA_TX_STAT_STOPPED) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+ ring->mmio_base, val);
+
+ /* Remove SUSPEND bit */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_TX_STATUS,
+ BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
+ 10000)) {
+ bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+ ring->mmio_base);
+ udelay(300);
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
+ bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
+ ring->mmio_base);
+ }
+}
+
+static void bgmac_dma_tx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ ctl |= BGMAC_DMA_TX_ENABLE;
+ ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
+}
+
+static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct net_device *net_dev = bgmac->net_dev;
+ struct bgmac_dma_desc *dma_desc;
+ struct bgmac_slot_info *slot;
+ u32 ctl0, ctl1;
+ int free_slots;
+
+ if (skb->len > BGMAC_DESC_CTL1_LEN) {
+ bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+ goto err_stop_drop;
+ }
+
+ if (ring->start <= ring->end)
+ free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
+ else
+ free_slots = ring->start - ring->end;
+ if (free_slots == 1) {
+ bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+ netif_stop_queue(net_dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->end];
+ slot->skb = skb;
+ slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
+ goto err_stop_drop;
+ }
+
+ ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
+ if (ring->end == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+
+ dma_desc = ring->cpu_base;
+ dma_desc += ring->end;
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+
+ wmb();
+
+ /* Increase ring->end to point empty slot. We tell hardware the first
+ * slot it should *not* read.
+ */
+ if (++ring->end >= BGMAC_TX_RING_SLOTS)
+ ring->end = 0;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->end * sizeof(struct bgmac_dma_desc));
+
+ /* Always keep one slot free to allow detecting bugged calls. */
+ if (--free_slots == 1)
+ netif_stop_queue(net_dev);
+
+ return NETDEV_TX_OK;
+
+err_stop_drop:
+ netif_stop_queue(net_dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Free transmitted packets */
+static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ int empty_slot;
+ bool freed = false;
+
+ /* The last slot that hardware didn't consume yet */
+ empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot /= sizeof(struct bgmac_dma_desc);
+
+ while (ring->start != empty_slot) {
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+
+ if (slot->skb) {
+ /* Unmap no longer used buffer */
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ slot->dma_addr = 0;
+
+ /* Free memory! :) */
+ dev_kfree_skb(slot->skb);
+ slot->skb = NULL;
+ } else {
+ bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
+ ring->start, ring->end);
+ }
+
+ if (++ring->start >= BGMAC_TX_RING_SLOTS)
+ ring->start = 0;
+ freed = true;
+ }
+
+ if (freed && netif_queue_stopped(bgmac->net_dev))
+ netif_wake_queue(bgmac->net_dev);
+}
+
+static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ if (!ring->mmio_base)
+ return;
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_RX_STATUS,
+ BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
+ 10000))
+ bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
+ ring->mmio_base);
+}
+
+static void bgmac_dma_rx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+ ctl |= BGMAC_DMA_RX_ENABLE;
+ ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
+ ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
+ ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
+}
+
+static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
+ struct bgmac_slot_info *slot)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_rx_header *rx;
+
+ /* Alloc skb */
+ slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+ if (!slot->skb) {
+ bgmac_err(bgmac, "Allocation of skb failed!\n");
+ return -ENOMEM;
+ }
+
+ /* Poison - if everything goes fine, hardware will overwrite it */
+ rx = (struct bgmac_rx_header *)slot->skb->data;
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ /* Map skb for the DMA */
+ slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "DMA mapping error\n");
+ return -ENOMEM;
+ }
+ if (slot->dma_addr & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ return 0;
+}
+
+static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ int weight)
+{
+ u32 end_slot;
+ int handled = 0;
+
+ end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot /= sizeof(struct bgmac_dma_desc);
+
+ ring->end = end_slot;
+
+ while (ring->start != ring->end) {
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+ struct sk_buff *skb = slot->skb;
+ struct sk_buff *new_skb;
+ struct bgmac_rx_header *rx;
+ u16 len, flags;
+
+ /* Unmap buffer to make it accessible to the CPU */
+ dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ /* Get info from the header */
+ rx = (struct bgmac_rx_header *)skb->data;
+ len = le16_to_cpu(rx->len);
+ flags = le16_to_cpu(rx->flags);
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ } else {
+ new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
+ if (new_skb) {
+ skb_put(new_skb, len);
+ skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
+ new_skb->data,
+ len);
+ new_skb->protocol =
+ eth_type_trans(new_skb, bgmac->net_dev);
+ netif_receive_skb(new_skb);
+ handled++;
+ } else {
+ bgmac->net_dev->stats.rx_dropped++;
+ bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ }
+
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+ }
+
+ /* Make it back accessible to the hardware */
+ dma_sync_single_for_device(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (++ring->start >= BGMAC_RX_RING_SLOTS)
+ ring->start = 0;
+
+ if (handled >= weight) /* Should never be greater */
+ break;
+ }
+
+ return handled;
+}
+
+/* Does ring support unaligned addressing? */
+static bool bgmac_dma_unaligned(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ enum bgmac_dma_ring_type ring_type)
+{
+ switch (ring_type) {
+ case BGMAC_DMA_RING_TX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
+ return true;
+ break;
+ case BGMAC_DMA_RING_RX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void bgmac_dma_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot;
+ int size;
+ int i;
+
+ for (i = 0; i < ring->num_slots; i++) {
+ slot = &ring->slots[i];
+ if (slot->skb) {
+ if (slot->dma_addr)
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ }
+ }
+
+ if (ring->cpu_base) {
+ /* Free ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ dma_free_coherent(dma_dev, size, ring->cpu_base,
+ ring->dma_base);
+ }
+}
+
+static void bgmac_dma_free(struct bgmac *bgmac)
+{
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+}
+
+static int bgmac_dma_alloc(struct bgmac *bgmac)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_dma_ring *ring;
+ static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
+ BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
+ int size; /* ring size: different for Tx and Rx */
+ int err;
+ int i;
+
+ BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
+ BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
+
+ if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+ ring->num_slots = BGMAC_TX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
+ bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
+ ring->mmio_base);
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* No need to alloc TX slots yet */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ ring->num_slots = BGMAC_RX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
+ bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
+ ring->mmio_base);
+ err = -ENOMEM;
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* Alloc RX slots */
+ for (i = 0; i < ring->num_slots; i++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
+ if (err) {
+ bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
+ goto err_dma_free;
+ }
+ }
+ }
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+ return -ENOMEM;
+}
+
+static void bgmac_dma_init(struct bgmac *bgmac)
+{
+ struct bgmac_dma_ring *ring;
+ struct bgmac_dma_desc *dma_desc;
+ u32 ctl0, ctl1;
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_tx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ ring->start = 0;
+ ring->end = 0; /* Points the slot that should *not* be read */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_rx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
+ i++, dma_desc++) {
+ ctl0 = ctl1 = 0;
+
+ if (i == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+ }
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->num_slots * sizeof(struct bgmac_dma_desc));
+
+ ring->start = 0;
+ ring->end = 0;
+ }
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+ BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+ BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+ BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+ BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ tmp = BGMAC_PA_START;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return 0xffff;
+ }
+
+ return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bgmac_warn(bgmac, "Error setting MDIO int\n");
+
+ tmp = BGMAC_PA_START;
+ tmp |= BGMAC_PA_WRITE;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ tmp |= value;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
+static void bgmac_phy_force(struct bgmac *bgmac)
+{
+ u16 ctl;
+ u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
+ BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (bgmac->autoneg)
+ return;
+
+ ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
+ ctl &= mask;
+ if (bgmac->full_duplex)
+ ctl |= BGMAC_PHY_CTL_DUPLEX;
+ if (bgmac->speed == BGMAC_SPEED_100)
+ ctl |= BGMAC_PHY_CTL_SPEED_100;
+ else if (bgmac->speed == BGMAC_SPEED_1000)
+ ctl |= BGMAC_PHY_CTL_SPEED_1000;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
+static void bgmac_phy_advertise(struct bgmac *bgmac)
+{
+ u16 adv;
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (!bgmac->autoneg)
+ return;
+
+ /* Adv selected 10/100 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
+ adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
+ BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10HALF;
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10FULL;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
+
+ /* Adv selected 1000 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
+ adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
+
+ /* Restart */
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
+ BGMAC_PHY_CTL_RESTART);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bgmac_phy_init(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 i;
+
+ if (ci->id == BCMA_CHIP_ID_BCM5356) {
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
+ bgmac_phy_write(bgmac, i, 0x15, 0x0100);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+ bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+ bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5284);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ bgmac_phy_write(bgmac, i, 0x17, 0x0010);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5296);
+ bgmac_phy_write(bgmac, i, 0x17, 0x1073);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9073);
+ bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9273);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static void bgmac_phy_reset(struct bgmac *bgmac)
+{
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ BGMAC_PHY_CTL_RESET);
+ udelay(100);
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
+ BGMAC_PHY_CTL_RESET)
+ bgmac_err(bgmac, "PHY reset failed\n");
+ bgmac_phy_init(bgmac);
+}
+
+/**************************************************
+ * Chip ops
+ **************************************************/
+
+/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
+ * nothing to change? Try if after stabilizng driver.
+ */
+static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
+{
+ u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 new_val = (cmdcfg & mask) | set;
+
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ udelay(2);
+
+ if (new_val != cmdcfg || force)
+ bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ udelay(2);
+}
+
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+ u32 tmp;
+
+ tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ tmp = (addr[4] << 8) | addr[5];
+ bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ if (net_dev->flags & IFF_PROMISC)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
+#if 0 /* We don't use that regs yet */
+static void bgmac_chip_stats_update(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac->mib_tx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac->mib_rx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_RX_GOOD_OCTETS + (i * 4));
+ }
+
+ /* TODO: what else? how to handle BCM4706? Specs are needed */
+}
+#endif
+
+static void bgmac_clear_mib(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ return;
+
+ bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
+static void bgmac_speed(struct bgmac *bgmac, int speed)
+{
+ u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 set = 0;
+
+ if (speed & BGMAC_SPEED_10)
+ set |= BGMAC_CMDCFG_ES_10;
+ if (speed & BGMAC_SPEED_100)
+ set |= BGMAC_CMDCFG_ES_100;
+ if (speed & BGMAC_SPEED_1000)
+ set |= BGMAC_CMDCFG_ES_1000;
+ if (!bgmac->full_duplex)
+ set |= BGMAC_CMDCFG_HD;
+ bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+}
+
+static void bgmac_miiconfig(struct bgmac *bgmac)
+{
+ u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ if (bgmac->autoneg)
+ bgmac_speed(bgmac, BGMAC_SPEED_100);
+ else
+ bgmac_speed(bgmac, bgmac->speed);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
+static void bgmac_chip_reset(struct bgmac *bgmac)
+{
+ struct bcma_device *core = bgmac->core;
+ struct bcma_bus *bus = core->bus;
+ struct bcma_chipinfo *ci = &bus->chipinfo;
+ u32 flags = 0;
+ u32 iost;
+ int i;
+
+ if (bcma_core_is_enabled(core)) {
+ if (!bgmac->stats_grabbed) {
+ /* bgmac_chip_stats_update(bgmac); */
+ bgmac->stats_grabbed = true;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
+
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ udelay(1);
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
+
+ /* TODO: Clear software multicast filter list */
+ }
+
+ iost = bcma_aread32(core, BCMA_IOST);
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+
+ bcma_core_enable(core, flags);
+
+ if (core->id.rev > 2) {
+ bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ 1000);
+ }
+
+ if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572) {
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 et_swtype = 0;
+ u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
+ BGMAC_CHIPCTL_1_IF_TYPE_RMII;
+ char buf[2];
+
+ if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (kstrtou8(buf, 0, &et_swtype))
+ bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
+ buf);
+ et_swtype &= 0x0f;
+ et_swtype <<= 4;
+ sw_type = et_swtype;
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+ } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
+ }
+ bcma_chipco_chipctl_maskset(cc, 1,
+ ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+ sw_type);
+ }
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
+
+ /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
+ * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
+ * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * be keps until taking MAC out of the reset.
+ */
+ bgmac_cmdcfg_maskset(bgmac,
+ ~(BGMAC_CMDCFG_TE |
+ BGMAC_CMDCFG_RE |
+ BGMAC_CMDCFG_RPI |
+ BGMAC_CMDCFG_TAI |
+ BGMAC_CMDCFG_HD |
+ BGMAC_CMDCFG_ML |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_RL |
+ BGMAC_CMDCFG_RED |
+ BGMAC_CMDCFG_PE |
+ BGMAC_CMDCFG_TPI |
+ BGMAC_CMDCFG_PAD_EN |
+ BGMAC_CMDCFG_PF),
+ BGMAC_CMDCFG_PROM |
+ BGMAC_CMDCFG_NLC |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_SR,
+ false);
+
+ bgmac_clear_mib(bgmac);
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
+ BCMA_GMAC_CMN_PC_MTE);
+ else
+ bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
+ bgmac_miiconfig(bgmac);
+ bgmac_phy_init(bgmac);
+
+ bgmac->int_status = 0;
+}
+
+static void bgmac_chip_intrs_on(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
+}
+
+static void bgmac_chip_intrs_off(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+ bgmac_read(bgmac, BGMAC_INT_MASK);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
+static void bgmac_enable(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ u32 cmdcfg;
+ u32 mode;
+ u32 rxq_ctl;
+ u32 fl_ctl;
+ u16 bp_clk;
+ u8 mdp;
+
+ cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
+ BGMAC_CMDCFG_SR, true);
+ udelay(2);
+ cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
+ bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+
+ mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+ bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+ if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
+ bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
+ BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+ switch (ci->id) {
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM47162:
+ fl_ctl = 0x03cb04cb;
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572)
+ fl_ctl = 0x2300e1;
+ bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
+ bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ break;
+ }
+
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
+static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
+{
+ struct bgmac_dma_ring *ring;
+ int i;
+
+ /* 1 interrupt per received frame */
+ bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
+
+ /* Enable 802.3x tx flow control (honor received PAUSE frames) */
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+
+ bgmac_set_rx_mode(bgmac->net_dev);
+
+ bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
+
+ if (bgmac->loopback)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+
+ bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+
+ if (!bgmac->autoneg) {
+ bgmac_speed(bgmac, bgmac->speed);
+ bgmac_phy_force(bgmac);
+ } else if (bgmac->speed) { /* if there is anything to adv */
+ bgmac_phy_advertise(bgmac);
+ }
+
+ if (full_init) {
+ bgmac_dma_init(bgmac);
+ if (1) /* FIXME: is there any case we don't want IRQs? */
+ bgmac_chip_intrs_on(bgmac);
+ } else {
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ bgmac_dma_rx_enable(bgmac, ring);
+ }
+ }
+
+ bgmac_enable(bgmac);
+}
+
+static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
+{
+ struct bgmac *bgmac = netdev_priv(dev_id);
+
+ u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
+ int_status &= bgmac->int_mask;
+
+ if (!int_status)
+ return IRQ_NONE;
+
+ /* Ack */
+ bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
+
+ /* Disable new interrupts until handling existing ones */
+ bgmac_chip_intrs_off(bgmac);
+
+ bgmac->int_status = int_status;
+
+ napi_schedule(&bgmac->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bgmac_poll(struct napi_struct *napi, int weight)
+{
+ struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
+ struct bgmac_dma_ring *ring;
+ int handled = 0;
+
+ if (bgmac->int_status & BGMAC_IS_TX0) {
+ ring = &bgmac->tx_ring[0];
+ bgmac_dma_tx_free(bgmac, ring);
+ bgmac->int_status &= ~BGMAC_IS_TX0;
+ }
+
+ if (bgmac->int_status & BGMAC_IS_RX) {
+ ring = &bgmac->rx_ring[0];
+ handled += bgmac_dma_rx_read(bgmac, ring, weight);
+ bgmac->int_status &= ~BGMAC_IS_RX;
+ }
+
+ if (bgmac->int_status) {
+ bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
+ bgmac->int_status = 0;
+ }
+
+ if (handled < weight)
+ napi_complete(napi);
+
+ bgmac_chip_intrs_on(bgmac);
+
+ return handled;
+}
+
+/**************************************************
+ * net_device_ops
+ **************************************************/
+
+static int bgmac_open(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int err = 0;
+
+ bgmac_chip_reset(bgmac);
+ /* Specs say about reclaiming rings here, but we do that in DMA init */
+ bgmac_chip_init(bgmac, true);
+
+ err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, net_dev);
+ if (err < 0) {
+ bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+ goto err_out;
+ }
+ napi_enable(&bgmac->napi);
+
+ netif_carrier_on(net_dev);
+
+err_out:
+ return err;
+}
+
+static int bgmac_stop(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ netif_carrier_off(net_dev);
+
+ napi_disable(&bgmac->napi);
+ bgmac_chip_intrs_off(bgmac);
+ free_irq(bgmac->core->irq, net_dev);
+
+ bgmac_chip_reset(bgmac);
+
+ return 0;
+}
+
+static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct bgmac_dma_ring *ring;
+
+ /* No QOS support yet */
+ ring = &bgmac->tx_ring[0];
+ return bgmac_dma_tx_add(bgmac, ring, skb);
+}
+
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(net_dev, addr);
+ if (ret < 0)
+ return ret;
+ bgmac_write_mac_address(bgmac, (u8 *)addr);
+ eth_commit_mac_addr_change(net_dev, addr);
+ return 0;
+}
+
+static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = bgmac->phyaddr;
+ /* fallthru */
+ case SIOCGMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ data->val_out = bgmac_phy_read(bgmac, data->phy_id,
+ data->reg_num & 0x1f);
+ return 0;
+ case SIOCSMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
+ data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops bgmac_netdev_ops = {
+ .ndo_open = bgmac_open,
+ .ndo_stop = bgmac_stop,
+ .ndo_start_xmit = bgmac_start_xmit,
+ .ndo_set_rx_mode = bgmac_set_rx_mode,
+ .ndo_set_mac_address = bgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bgmac_ioctl,
+};
+
+/**************************************************
+ * ethtool_ops
+ **************************************************/
+
+static int bgmac_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg;
+
+ if (bgmac->autoneg) {
+ WARN_ON(cmd->advertising);
+ if (bgmac->full_duplex) {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Half;
+ }
+ } else {
+ switch (bgmac->speed) {
+ case BGMAC_SPEED_10:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ case BGMAC_SPEED_100:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ case BGMAC_SPEED_1000:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ }
+ }
+
+ cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+
+ cmd->autoneg = bgmac->autoneg;
+
+ return 0;
+}
+
+#if 0
+static int bgmac_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ return -1;
+}
+#endif
+
+static void bgmac_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bgmac_ethtool_ops = {
+ .get_settings = bgmac_get_settings,
+ .get_drvinfo = bgmac_get_drvinfo,
+};
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+ struct net_device *net_dev;
+ struct bgmac *bgmac;
+ struct ssb_sprom *sprom = &core->bus->sprom;
+ u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+ int err;
+
+ /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
+ if (core->core_unit > 1) {
+ pr_err("Unsupported core_unit %d\n", core->core_unit);
+ return -ENOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+ eth_random_addr(mac);
+ dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+ }
+
+ /* Allocation and references */
+ net_dev = alloc_etherdev(sizeof(*bgmac));
+ if (!net_dev)
+ return -ENOMEM;
+ net_dev->netdev_ops = &bgmac_netdev_ops;
+ net_dev->irq = core->irq;
+ SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ bgmac = netdev_priv(net_dev);
+ bgmac->net_dev = net_dev;
+ bgmac->core = core;
+ bcma_set_drvdata(core, bgmac);
+
+ /* Defaults */
+ bgmac->autoneg = true;
+ bgmac->full_duplex = true;
+ bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
+ memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
+
+ /* On BCM4706 we need common core to access PHY */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ !core->bus->drv_gmac_cmn.core) {
+ bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac->cmn = core->bus->drv_gmac_cmn.core;
+
+ bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
+ sprom->et0phyaddr;
+ bgmac->phyaddr &= BGMAC_PHY_MASK;
+ if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+ bgmac_err(bgmac, "No PHY found\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+ bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ bgmac_err(bgmac, "PCI setup not implemented\n");
+ err = -ENOTSUPP;
+ goto err_netdev_free;
+ }
+
+ bgmac_chip_reset(bgmac);
+
+ err = bgmac_dma_alloc(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+ goto err_netdev_free;
+ }
+
+ bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
+ if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
+
+ /* TODO: reset the external phy. Specs are needed */
+ bgmac_phy_reset(bgmac);
+
+ bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+ BGMAC_BFL_ENETROBO);
+ if (bgmac->has_robosw)
+ bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
+
+ if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+
+ err = register_netdev(bgmac->net_dev);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register net device\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
+ netif_carrier_off(net_dev);
+
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+
+err_netdev_free:
+ bcma_set_drvdata(core, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+ struct bgmac *bgmac = bcma_get_drvdata(core);
+
+ netif_napi_del(&bgmac->napi);
+ unregister_netdev(bgmac->net_dev);
+ bgmac_dma_free(bgmac);
+ bcma_set_drvdata(core, NULL);
+ free_netdev(bgmac->net_dev);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bgmac_bcma_tbl,
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+ int err;
+
+ err = bcma_driver_register(&bgmac_bcma_driver);
+ if (err)
+ return err;
+ pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+ return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+ bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
new file mode 100644
index 000000000000..4ede614c81f8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -0,0 +1,453 @@
+#ifndef _BGMAC_H
+#define _BGMAC_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define bgmac_err(bgmac, fmt, ...) \
+ dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_warn(bgmac, fmt, ...) \
+ dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_info(bgmac, fmt, ...) \
+ dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_dbg(bgmac, fmt, ...) \
+ dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+
+#include <linux/bcma/bcma.h>
+#include <linux/netdevice.h>
+
+#define BGMAC_DEV_CTL 0x000
+#define BGMAC_DC_TSM 0x00000002
+#define BGMAC_DC_CFCO 0x00000004
+#define BGMAC_DC_RLSS 0x00000008
+#define BGMAC_DC_MROR 0x00000010
+#define BGMAC_DC_FCM_MASK 0x00000060
+#define BGMAC_DC_FCM_SHIFT 5
+#define BGMAC_DC_NAE 0x00000080
+#define BGMAC_DC_TF 0x00000100
+#define BGMAC_DC_RDS_MASK 0x00030000
+#define BGMAC_DC_RDS_SHIFT 16
+#define BGMAC_DC_TDS_MASK 0x000c0000
+#define BGMAC_DC_TDS_SHIFT 18
+#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */
+#define BGMAC_DS_RBF 0x00000001
+#define BGMAC_DS_RDF 0x00000002
+#define BGMAC_DS_RIF 0x00000004
+#define BGMAC_DS_TBF 0x00000008
+#define BGMAC_DS_TDF 0x00000010
+#define BGMAC_DS_TIF 0x00000020
+#define BGMAC_DS_PO 0x00000040
+#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */
+#define BGMAC_DS_MM_SHIFT 8
+#define BGMAC_BIST_STATUS 0x00c
+#define BGMAC_INT_STATUS 0x020 /* Interrupt status */
+#define BGMAC_IS_MRO 0x00000001
+#define BGMAC_IS_MTO 0x00000002
+#define BGMAC_IS_TFD 0x00000004
+#define BGMAC_IS_LS 0x00000008
+#define BGMAC_IS_MDIO 0x00000010
+#define BGMAC_IS_MR 0x00000020
+#define BGMAC_IS_MT 0x00000040
+#define BGMAC_IS_TO 0x00000080
+#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */
+#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */
+#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */
+#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */
+#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */
+#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */
+#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */
+#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */
+#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */
+#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */
+#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */
+#define BGMAC_IS_TX_MASK 0x0f000000
+#define BGMAC_IS_INTMASK 0x0f01fcff
+#define BGMAC_IS_ERRMASK 0x0000fc00
+#define BGMAC_INT_MASK 0x024 /* Interrupt mask */
+#define BGMAC_GP_TIMER 0x028
+#define BGMAC_INT_RECV_LAZY 0x100
+#define BGMAC_IRL_TO_MASK 0x00ffffff
+#define BGMAC_IRL_FC_MASK 0xff000000
+#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */
+#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */
+#define BGMAC_WRRTHRESH 0x108
+#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c
+#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */
+#define BGMAC_PA_DATA_MASK 0x0000ffff
+#define BGMAC_PA_ADDR_MASK 0x001f0000
+#define BGMAC_PA_ADDR_SHIFT 16
+#define BGMAC_PA_REG_MASK 0x1f000000
+#define BGMAC_PA_REG_SHIFT 24
+#define BGMAC_PA_WRITE 0x20000000
+#define BGMAC_PA_START 0x40000000
+#define BGMAC_PHY_CNTL 0x188 /* PHY control address */
+#define BGMAC_PC_EPA_MASK 0x0000001f
+#define BGMAC_PC_MCT_MASK 0x007f0000
+#define BGMAC_PC_MCT_SHIFT 16
+#define BGMAC_PC_MTE 0x00800000
+#define BGMAC_TXQ_CTL 0x18c
+#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_TXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL 0x190
+#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_RXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL_PTE 0x00001000
+#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000
+#define BGMAC_RXQ_CTL_MDP_SHIFT 24
+#define BGMAC_GPIO_SELECT 0x194
+#define BGMAC_GPIO_OUTPUT_EN 0x198
+/* For 0x1e0 see BCMA_CLKCTLST */
+#define BGMAC_HW_WAR 0x1e4
+#define BGMAC_PWR_CTL 0x1e8
+#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
+#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */
+#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */
+#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */
+#define BGMAC_TX_GOOD_OCTETS 0x300
+#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304
+#define BGMAC_TX_GOOD_PKTS 0x308
+#define BGMAC_TX_OCTETS 0x30c
+#define BGMAC_TX_OCTETS_HIGH 0x310
+#define BGMAC_TX_PKTS 0x314
+#define BGMAC_TX_BROADCAST_PKTS 0x318
+#define BGMAC_TX_MULTICAST_PKTS 0x31c
+#define BGMAC_TX_LEN_64 0x320
+#define BGMAC_TX_LEN_65_TO_127 0x324
+#define BGMAC_TX_LEN_128_TO_255 0x328
+#define BGMAC_TX_LEN_256_TO_511 0x32c
+#define BGMAC_TX_LEN_512_TO_1023 0x330
+#define BGMAC_TX_LEN_1024_TO_1522 0x334
+#define BGMAC_TX_LEN_1523_TO_2047 0x338
+#define BGMAC_TX_LEN_2048_TO_4095 0x33c
+#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_8192_TO_MAX 0x344
+#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
+#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
+#define BGMAC_TX_FRAGMENT_PKTS 0x350
+#define BGMAC_TX_UNDERRUNS 0x354 /* Error */
+#define BGMAC_TX_TOTAL_COLS 0x358
+#define BGMAC_TX_SINGLE_COLS 0x35c
+#define BGMAC_TX_MULTIPLE_COLS 0x360
+#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */
+#define BGMAC_TX_LATE_COLS 0x368 /* Error */
+#define BGMAC_TX_DEFERED 0x36c
+#define BGMAC_TX_CARRIER_LOST 0x370
+#define BGMAC_TX_PAUSE_PKTS 0x374
+#define BGMAC_TX_UNI_PKTS 0x378
+#define BGMAC_TX_Q0_PKTS 0x37c
+#define BGMAC_TX_Q0_OCTETS 0x380
+#define BGMAC_TX_Q0_OCTETS_HIGH 0x384
+#define BGMAC_TX_Q1_PKTS 0x388
+#define BGMAC_TX_Q1_OCTETS 0x38c
+#define BGMAC_TX_Q1_OCTETS_HIGH 0x390
+#define BGMAC_TX_Q2_PKTS 0x394
+#define BGMAC_TX_Q2_OCTETS 0x398
+#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c
+#define BGMAC_TX_Q3_PKTS 0x3a0
+#define BGMAC_TX_Q3_OCTETS 0x3a4
+#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8
+#define BGMAC_RX_GOOD_OCTETS 0x3b0
+#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4
+#define BGMAC_RX_GOOD_PKTS 0x3b8
+#define BGMAC_RX_OCTETS 0x3bc
+#define BGMAC_RX_OCTETS_HIGH 0x3c0
+#define BGMAC_RX_PKTS 0x3c4
+#define BGMAC_RX_BROADCAST_PKTS 0x3c8
+#define BGMAC_RX_MULTICAST_PKTS 0x3cc
+#define BGMAC_RX_LEN_64 0x3d0
+#define BGMAC_RX_LEN_65_TO_127 0x3d4
+#define BGMAC_RX_LEN_128_TO_255 0x3d8
+#define BGMAC_RX_LEN_256_TO_511 0x3dc
+#define BGMAC_RX_LEN_512_TO_1023 0x3e0
+#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
+#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
+#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
+#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
+#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
+#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
+#define BGMAC_RX_FRAGMENT_PKTS 0x400
+#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */
+#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */
+#define BGMAC_RX_UNDERSIZE 0x40c /* Error */
+#define BGMAC_RX_CRC_ERRS 0x410 /* Error */
+#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */
+#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */
+#define BGMAC_RX_PAUSE_PKTS 0x41c
+#define BGMAC_RX_NONPAUSE_PKTS 0x420
+#define BGMAC_RX_SACHANGES 0x424
+#define BGMAC_RX_UNI_PKTS 0x428
+#define BGMAC_UNIMAC_VERSION 0x800
+#define BGMAC_HDBKP_CTL 0x804
+#define BGMAC_CMDCFG 0x808 /* Configuration */
+#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
+#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
+#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
+#define BGMAC_CMDCFG_ES_10 0x00000000
+#define BGMAC_CMDCFG_ES_100 0x00000004
+#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
+#define BGMAC_CMDCFG_PAD_EN 0x00000020
+#define BGMAC_CMDCFG_CF 0x00000040
+#define BGMAC_CMDCFG_PF 0x00000080
+#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
+#define BGMAC_CMDCFG_TAI 0x00000200
+#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
+#define BGMAC_CMDCFG_HD_SHIFT 10
+#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
+#define BGMAC_CMDCFG_AE 0x00400000
+#define BGMAC_CMDCFG_CFE 0x00800000
+#define BGMAC_CMDCFG_NLC 0x01000000
+#define BGMAC_CMDCFG_RL 0x02000000
+#define BGMAC_CMDCFG_RED 0x04000000
+#define BGMAC_CMDCFG_PE 0x08000000
+#define BGMAC_CMDCFG_TPI 0x10000000
+#define BGMAC_CMDCFG_AT 0x20000000
+#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
+#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
+#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
+#define BGMAC_PAUSEQUANTA 0x818
+#define BGMAC_MAC_MODE 0x844
+#define BGMAC_OUTERTAG 0x848
+#define BGMAC_INNERTAG 0x84c
+#define BGMAC_TXIPG 0x85c
+#define BGMAC_PAUSE_CTL 0xb30
+#define BGMAC_TX_FLUSH 0xb34
+#define BGMAC_RX_STATUS 0xb38
+#define BGMAC_TX_STATUS 0xb3c
+
+#define BGMAC_PHY_CTL 0x00
+#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
+#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
+#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
+#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
+#define BGMAC_PHY_CTL_SPEED 0x2000
+#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
+#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
+/* Helpers */
+#define BGMAC_PHY_CTL_SPEED_10 0
+#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
+#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
+#define BGMAC_PHY_ADV 0x04
+#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
+#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
+#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
+#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
+#define BGMAC_PHY_ADV2 0x09
+#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
+#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
+
+/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
+#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
+#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
+
+/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
+#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
+
+#define BGMAC_NUM_MIB_TX_REGS \
+ (((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1)
+#define BGMAC_NUM_MIB_RX_REGS \
+ (((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1)
+
+#define BGMAC_DMA_TX_CTL 0x00
+#define BGMAC_DMA_TX_ENABLE 0x00000001
+#define BGMAC_DMA_TX_SUSPEND 0x00000002
+#define BGMAC_DMA_TX_LOOPBACK 0x00000004
+#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_INDEX 0x04
+#define BGMAC_DMA_TX_RINGLO 0x08
+#define BGMAC_DMA_TX_RINGHI 0x0C
+#define BGMAC_DMA_TX_STATUS 0x10
+#define BGMAC_DMA_TX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_TX_STAT 0xF0000000
+#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_TX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_TX_ERROR 0x14
+#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_TX_ERR 0xF0000000
+#define BGMAC_DMA_TX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_TX_ERR_PROT 0x10000000
+#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_TX_ERR_CORE 0x50000000
+#define BGMAC_DMA_RX_CTL 0x20
+#define BGMAC_DMA_RX_ENABLE 0x00000001
+#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE
+#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1
+#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
+#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
+#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_INDEX 0x24
+#define BGMAC_DMA_RX_RINGLO 0x28
+#define BGMAC_DMA_RX_RINGHI 0x2C
+#define BGMAC_DMA_RX_STATUS 0x30
+#define BGMAC_DMA_RX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_RX_STAT 0xF0000000
+#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_RX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_RX_ERROR 0x34
+#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_RX_ERR 0xF0000000
+#define BGMAC_DMA_RX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_RX_ERR_PROT 0x10000000
+#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_RX_ERR_CORE 0x50000000
+
+#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */
+#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
+#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */
+#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */
+#define BGMAC_DESC_CTL1_LEN 0x00001FFF
+
+#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_MASK 0x1F
+
+#define BGMAC_MAX_TX_RINGS 4
+#define BGMAC_MAX_RX_RINGS 1
+
+#define BGMAC_TX_RING_SLOTS 128
+#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */
+
+#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */
+#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
+#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
+#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+
+#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */
+#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */
+#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */
+
+#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
+#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
+#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
+#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
+
+#define BGMAC_SPEED_10 0x0001
+#define BGMAC_SPEED_100 0x0002
+#define BGMAC_SPEED_1000 0x0004
+
+#define BGMAC_WEIGHT 64
+
+#define ETHER_MAX_LEN 1518
+
+struct bgmac_slot_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct bgmac_dma_desc {
+ __le32 ctl0;
+ __le32 ctl1;
+ __le32 addr_low;
+ __le32 addr_high;
+} __packed;
+
+enum bgmac_dma_ring_type {
+ BGMAC_DMA_RING_TX,
+ BGMAC_DMA_RING_RX,
+};
+
+/**
+ * bgmac_dma_ring - contains info about DMA ring (either TX or RX one)
+ * @start: index of the first slot containing data
+ * @end: index of a slot that can *not* be read (yet)
+ *
+ * Be really aware of the specific @end meaning. It's an index of a slot *after*
+ * the one containing data that can be read. If @start equals @end the ring is
+ * empty.
+ */
+struct bgmac_dma_ring {
+ u16 num_slots;
+ u16 start;
+ u16 end;
+
+ u16 mmio_base;
+ struct bgmac_dma_desc *cpu_base;
+ dma_addr_t dma_base;
+
+ struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
+};
+
+struct bgmac_rx_header {
+ __le16 len;
+ __le16 flags;
+ __le16 pad[12];
+};
+
+struct bgmac {
+ struct bcma_device *core;
+ struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+ struct net_device *net_dev;
+ struct napi_struct napi;
+
+ /* DMA */
+ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
+ struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS];
+
+ /* Stats */
+ bool stats_grabbed;
+ u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS];
+ u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
+
+ /* Int */
+ u32 int_mask;
+ u32 int_status;
+
+ /* Speed-related */
+ int speed;
+ bool autoneg;
+ bool full_duplex;
+
+ u8 phyaddr;
+ bool has_robosw;
+
+ bool loopback;
+};
+
+static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_read32(bgmac->core, offset);
+}
+
+static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bcma_write32(bgmac->core, offset, value);
+}
+
+static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set)
+{
+ bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set);
+}
+
+static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask)
+{
+ bgmac_maskset(bgmac, offset, mask, 0);
+}
+
+static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
+{
+ bgmac_maskset(bgmac, offset, ~0, set);
+}
+
+#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a1adfaf87f49..2f0ba8f2fd6c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8543,7 +8543,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
memcpy(dev->dev_addr, bp->mac_addr, 6);
- memcpy(dev->perm_addr, bp->mac_addr, 6);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 48fbdd48f88f..116762daae09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e8d4db10c8f3..e4605a965084 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,9 +13,12 @@
#ifndef BNX2X_H
#define BNX2X_H
+
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
+#include <linux/pci_regs.h>
/* compilation time flags */
@@ -23,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.00-0"
-#define DRV_MODULE_RELDATE "2012/09/27"
+#define DRV_MODULE_VERSION "1.78.02-0"
+#define DRV_MODULE_RELDATE "2013/01/14"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -48,6 +51,13 @@
#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+ BNX2X_INT_MODE_MSIX,
+ BNX2X_INT_MODE_INTX,
+ BNX2X_INT_MODE_MSI
+};
/* error/debug prints */
@@ -112,29 +122,29 @@ do { \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
#ifdef BNX2X_STOP_ON_ERROR
-void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_int_disable(bp); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, true); \
} while (0)
#else
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, false); \
} while (0)
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define bnx2x_uc_addr(ha) ((ha)->addr)
-#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
-#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -334,6 +344,9 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+ SGE_PAGES), 0xffff)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -789,48 +802,63 @@ struct bnx2x_common {
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
#define CHIP_NUM_57713 0x1651
#define CHIP_NUM_57713E 0x1652
#define CHIP_NUM_57800 0x168a
#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
#define CHIP_NUM_57810 0x168e
#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBSOLETE 0x168d
#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
#define CHIP_NUM_57840_4_10 0x16a1
#define CHIP_NUM_57840_2_20 0x16a2
#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
#define CHIP_IS_57840(bp) \
((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
- CHIP_IS_57712_MF(bp))
+ CHIP_IS_57712_MF(bp) || \
+ CHIP_IS_57712_VF(bp))
#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57800_VF(bp) || \
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57810_VF(bp) || \
CHIP_IS_57811(bp) || \
CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp) || \
CHIP_IS_57840(bp) || \
- CHIP_IS_57840_MF(bp))
+ CHIP_IS_57840_MF(bp) || \
+ CHIP_IS_57840_VF(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
@@ -954,6 +982,11 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_CID_WND 0
+#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
+#define BNX2X_CLIENTS_PER_VF 1
+#define BNX2X_FIRST_VF_CID 256
+#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
#define BNX2X_VF_ID_INVALID 0xFF
/*
@@ -1104,6 +1137,7 @@ struct hw_context {
/* forward */
struct bnx2x_ilt;
+struct bnx2x_vfdb;
enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
@@ -1165,19 +1199,22 @@ struct bnx2x_fw_stats_req {
};
struct bnx2x_fw_stats_data {
- struct stats_counter storm_counters;
- struct per_port_stats port;
- struct per_pf_stats pf;
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[1];
};
/* Public slow path states */
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
- BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_FAN_FAILURE,
+ BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ BNX2X_SP_RTNL_ENABLE_SRIOV,
+ BNX2X_SP_RTNL_VFPF_MCAST,
+ BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
};
@@ -1231,6 +1268,21 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+#ifdef CONFIG_BNX2X_SRIOV
+ /* vf pf channel mailbox contains request and response buffers */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ dma_addr_t vf2pf_mbox_mapping;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* bulletin board for messages from pf to vf */
+ union pf_vf_bulletin *pf2vf_bulletin;
+ dma_addr_t pf2vf_bulletin_mapping;
+
+ struct pf_vf_bulletin_content old_bulletin;
+#endif /* CONFIG_BNX2X_SRIOV */
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1295,8 +1347,6 @@ struct bnx2x {
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
-
-
/* Counter for marking that there is a STAT_QUERY ramrod pending */
u16 stats_pending;
/* Counter for completed statistics ramrods */
@@ -1318,8 +1368,6 @@ struct bnx2x {
#define DISABLE_MSI_FLAG (1 << 7)
#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-
-#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
@@ -1330,6 +1378,17 @@ struct bnx2x {
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
+#define IS_VF_FLAG (1 << 22)
+
+#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1349,6 +1408,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
+ atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
struct delayed_work period_task;
@@ -1432,6 +1492,7 @@ struct bnx2x {
u8 igu_sb_cnt;
u8 min_msix_vec_cnt;
+ u32 igu_base_addr;
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1580,6 +1641,9 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ struct bnx2x_vfdb *vfdb;
+#define IS_SRIOV(bp) ((bp)->vfdb)
+
/* DCB support on/off */
u16 dcb_state;
#define BNX2X_DCB_STATE_OFF 0
@@ -1599,6 +1663,10 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
+
+ /* used only in sriov */
+ struct bnx2x_credit_pool_obj vlans_pool;
+
struct bnx2x_credit_pool_obj macs_pool;
/* RX_MODE object */
@@ -1636,6 +1704,9 @@ struct bnx2x {
/* priority to cos mapping */
u8 prio_to_cos[8];
+
+ int fp_array_size;
+ u32 dump_preset_idx;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1813,12 +1884,16 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
/* Init Function API */
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
void bnx2x_read_mf_cfg(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1830,6 +1905,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
@@ -1854,6 +1941,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val;
}
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ bool is_pf);
+
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -1990,10 +2080,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
-
#define STROM_ASSERT_ARRAY_SIZE 50
-
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
@@ -2024,7 +2112,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* Memory of fairness algorithm . 2 cycles */
#define FAIR_MEM 2
-
#define ATTN_NIG_FOR_FUNC (1L << 8)
#define ATTN_SW_TIMER_4_FUNC (1L << 9)
#define GPIO_2_FUNC (1L << 10)
@@ -2067,6 +2154,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
@@ -2128,7 +2216,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MULTI_MASK 0x7f
-
#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
@@ -2156,18 +2243,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
-#define SET_FLAG(value, mask, flag) \
- do {\
- (value) &= ~(mask);\
- (value) |= ((flag) << (mask##_SHIFT));\
- } while (0)
-
-#define GET_FLAG(value, mask) \
- (((value) & (mask)) >> (mask##_SHIFT))
-
-#define GET_FIELD(value, fname) \
- (((value) & (fname##_MASK)) >> (fname##_SHIFT))
-
#define CAM_IS_INVALID(x) \
(GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@@ -2178,7 +2253,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
-
#ifndef PXP2_REG_PXP2_INT_STS
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
@@ -2190,9 +2264,16 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+ (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
/* Congestion management fairness mode */
-#define CMNG_FNS_NONE 0
-#define CMNG_FNS_MINMAX 1
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
#define HC_SEG_ACCESS_ATTN 4
@@ -2208,7 +2289,6 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
@@ -2229,6 +2309,18 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
enum {
SWITCH_UPDATE,
AFEX_UPDATE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a2998bea5d4b..ecac04a3687c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
+#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -28,8 +29,6 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
-
-
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -80,12 +79,65 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
}
- memcpy(&bp->bnx2x_txq[old_txdata_index],
- &bp->bnx2x_txq[new_txdata_index],
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
sizeof(struct bnx2x_fp_txdata));
to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+ if (IS_PF(bp)) {
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
+ strlcpy(buf, bp->fw_ver, buf_len);
+ snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ } else {
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+ }
+}
+
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overriden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
@@ -185,7 +237,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
- &pkts_compl, &bytes_compl);
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
@@ -291,14 +343,14 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
-/* Set Toeplitz hash value in the skb using the value from the
+/* Get Toeplitz hash value in the skb using the value from the
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
bool *l4_rxhash)
{
- /* Set Toeplitz hash from CQE */
+ /* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
(cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
enum eth_rss_hash_type htype;
@@ -365,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
- tpa_info->full_page =
- SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
tpa_info->gro_size = gro_size;
}
@@ -387,31 +438,34 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/
#define TPA_TSTAMP_OPT_LEN 12
/**
- * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ * bnx2x_set_gro_params - compute GRO values
*
- * @bp: driver handle
+ * @skb: packet skb
* @parsing_flags: parsing flags from the START CQE
* @len_on_bd: total length of the first packet for the
* aggregation.
+ * @pkt_len: length of all segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
*/
-static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
- u16 len_on_bd)
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+ u16 len_on_bd, unsigned int pkt_len)
{
- /*
- * TPA arrgregation won't have either IP options or TCP options
+ /* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
*/
u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6)
+ PRS_FLAG_OVERETH_IPV6) {
hdrs_len += sizeof(struct ipv6hdr);
- else /* IPv4 */
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
hdrs_len += sizeof(struct iphdr);
-
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
/* Check if there was a TCP timestamp, if there is it's will
* always be 12 bytes length: nop nop kind length echo val.
@@ -421,7 +475,13 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
hdrs_len += TPA_TSTAMP_OPT_LEN;
- return len_on_bd - hdrs_len;
+ skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
+ skb_shinfo(skb)->gso_size);
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -438,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
}
mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n");
@@ -475,22 +535,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* This is needed in order to enable forwarding support */
- if (frag_size) {
- skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
- tpa_info->parsing_flags, len_on_bd);
-
- /* set for GRO */
- if (fp->mode == TPA_MODE_GRO)
- skb_shinfo(skb)->gso_type =
- (GET_FLAG(tpa_info->parsing_flags,
- PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6) ?
- SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
- }
-
+ if (frag_size)
+ bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+ le16_to_cpu(cqe->pkt_len));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
@@ -508,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */
- frag_len = min_t(u32, frag_size,
- (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+ frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -525,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap the page as we r going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -543,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
skb->data_len += frag_len;
- skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
+ skb->truesize += SGE_PAGES;
skb->len += frag_len;
frag_size -= frag_len;
@@ -568,6 +617,54 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+ switch (be16_to_cpu(skb->protocol)) {
+ case ETH_P_IP:
+ bnx2x_gro_ip_csum(bp, skb);
+ break;
+ case ETH_P_IPV6:
+ bnx2x_gro_ipv6_csum(bp, skb);
+ break;
+ default:
+ BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
+ }
+ tcp_gro_complete(skb);
+ }
+#endif
+ napi_gro_receive(&fp->napi, skb);
+}
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
@@ -622,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
- napi_gro_receive(&fp->napi, skb);
+ bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new pages - dropping packet!\n");
@@ -1064,7 +1161,7 @@ void __bnx2x_link_report(struct bnx2x *bp)
struct bnx2x_link_report_data cur_data;
/* reread mf_cfg */
- if (!CHIP_IS_E1(bp))
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
/* Read the current link report info */
@@ -1406,10 +1503,14 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
if (nvecs == offset)
return;
- free_irq(bp->msix_table[offset].vector, bp->dev);
- DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
- bp->msix_table[offset].vector);
- offset++;
+
+ /* VFs don't have a default SB */
+ if (IS_PF(bp)) {
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+ }
if (CNIC_SUPPORT(bp)) {
if (nvecs == offset)
@@ -1430,21 +1531,30 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
void bnx2x_free_irq(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG &&
- !(bp->flags & USING_SINGLE_MSIX_FLAG))
- bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_SUPPORT(bp) + 1);
- else
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+ /* vfs don't have a default status block */
+ if (IS_PF(bp))
+ nvecs++;
+
+ bnx2x_free_msix_irqs(bp, nvecs);
+ } else {
free_irq(bp->dev->irq, bp->dev);
+ }
}
int bnx2x_enable_msix(struct bnx2x *bp)
{
- int msix_vec = 0, i, rc, req_cnt;
+ int msix_vec = 0, i, rc;
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
- bp->msix_table[0].entry);
- msix_vec++;
+ /* VFs don't have a default status block */
+ if (IS_PF(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+ }
/* Cnic requires an msix vector for itself */
if (CNIC_SUPPORT(bp)) {
@@ -1462,9 +1572,10 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
+ DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+ msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
/*
* reconfigure number of tx/rx queues according to available
@@ -1472,7 +1583,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
/* how less vectors we will have? */
- int diff = req_cnt - rc;
+ int diff = msix_vec - rc;
BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
@@ -1526,12 +1637,15 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{
int i, rc, offset = 0;
- rc = request_irq(bp->msix_table[offset++].vector,
- bnx2x_msix_sp_int, 0,
- bp->dev->name, bp->dev);
- if (rc) {
- BNX2X_ERR("request sp irq failed\n");
- return -EBUSY;
+ /* no default status block for vf */
+ if (IS_PF(bp)) {
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
}
if (CNIC_SUPPORT(bp))
@@ -1555,12 +1669,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_SUPPORT(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
-
+ if (IS_PF(bp)) {
+ offset = 1 + CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ } else {
+ offset = CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ }
return 0;
}
@@ -1605,7 +1727,6 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
if (rc)
return rc;
} else {
- bnx2x_ack_int(bp);
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
@@ -1703,7 +1824,6 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
-
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
@@ -1832,7 +1952,6 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
- int i;
/* Although RSS is meaningless when there is a single HW queue we
* still need it enabled in order to have HW Rx hash generated.
@@ -1864,9 +1983,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- for (i = 0; i < sizeof(params.rss_key) / 4; i++)
- params.rss_key[i] = random32();
-
+ prandom_bytes(params.rss_key, sizeof(params.rss_key));
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
@@ -1971,27 +2088,212 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
} while (0)
#endif /*BNX2X_STOP_ON_ERROR*/
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
{
- /* build FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups, vf_headroom = 0;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+ * and fcoe l2 queue) stats + num of queues (which includes another 1
+ * for fcoe l2 queue if applicable)
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+ /* vf stats appear in the request list, but their data is allocated by
+ * the VFs themselves. We don't include them in the bp->fw_stats_num as
+ * it is used to determine where to place the vf stats queries in the
+ * request struct
+ */
+ if (IS_SRIOV(bp))
+ vf_headroom = bnx2x_vf_headroom(bp);
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups =
+ (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+ 1 : 0));
+
+ DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+ bp->fw_stats_num, vf_headroom, num_groups);
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_counter
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+
+ DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping));
+ DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
+ U64_HI(bp->fw_stats_data_mapping),
+ U64_LO(bp->fw_stats_data_mapping));
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_fw_stats_mem(bp);
+ BNX2X_ERR("Can't allocate FW stats memory\n");
+ return -ENOMEM;
+}
- /* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
+{
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+
+ /* if mcp fails to respond we must abort */
+ if (!(*load_code)) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
- DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+ /* If mcp refused (e.g. other port is in diagnostic mode) we
+ * must abort
+ */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+ return 0;
+}
- if (loaded_fw != my_fw) {
- if (is_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+{
+ /* is another pf loaded on this engine? */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build my FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
loaded_fw, my_fw);
- return false;
+ return -EBUSY;
+ }
}
+ return 0;
+}
- return true;
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /* We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ } else {
+ bp->port.pmf = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ if (SHMEM2_HAS(bp, afex_driver_support))
+ SHMEM2_WR(bp, afex_driver_support,
+ SHMEM_AFEX_SUPPORTED_VERSION_ONE);
+ }
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
}
/**
@@ -2006,49 +2308,15 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
int cos;
struct napi_struct orig_napi = fp->napi;
struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init) {
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
- } else {
- /* Keep Queue statistics */
- struct bnx2x_eth_q_stats *tmp_eth_q_stats;
- struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
-
- tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
- GFP_KERNEL);
- if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
-
- tmp_eth_q_stats_old =
- kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
- GFP_KERNEL);
- if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
-
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
-
- if (tmp_eth_q_stats) {
- memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
- kfree(tmp_eth_q_stats);
- }
-
- if (tmp_eth_q_stats_old) {
- memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
- kfree(tmp_eth_q_stats_old);
- }
-
- }
+ if (fp->tpa_info)
+ memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+ sizeof(struct bnx2x_agg_info));
+ memset(fp, 0, sizeof(*fp));
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
@@ -2094,10 +2362,12 @@ int bnx2x_load_cnic(struct bnx2x *bp)
mutex_init(&bp->cnic_mutex);
- rc = bnx2x_alloc_mem_cnic(bp);
- if (rc) {
- BNX2X_ERR("Unable to allocate bp memory for cnic\n");
- LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
}
rc = bnx2x_alloc_fp_mem_cnic(bp);
@@ -2124,14 +2394,17 @@ int bnx2x_load_cnic(struct bnx2x *bp)
bnx2x_nic_init_cnic(bp);
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+ if (IS_PF(bp)) {
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
- for_each_cnic_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ /* setup cnic queues */
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
}
}
@@ -2172,13 +2445,11 @@ load_error_cnic0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
int port = BP_PORT(bp);
- u32 load_code;
- int i, rc;
+ int i, rc = 0, load_code = 0;
DP(NETIF_MSG_IFUP, "Starting NIC load\n");
DP(NETIF_MSG_IFUP,
@@ -2193,15 +2464,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
- /* Set the initial link reported state to link down */
- bnx2x_acquire_phy_lock(bp);
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags);
- bnx2x_release_phy_lock(bp);
- /* must be called before memory allocation and HW init */
- bnx2x_ilt_set_info(bp);
+ if (IS_PF(bp))
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
/*
* Zero fastpath structures preserving invariants like napi, which are
@@ -2220,8 +2489,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
- if (bnx2x_alloc_mem(bp))
- return -ENOMEM;
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory\n");
+ return rc;
+ }
+ }
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
+ /* need to be done after alloc mem, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ rc = bnx2x_alloc_fp_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for fps\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* request pf to initialize status blocks */
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_init(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
@@ -2244,98 +2538,48 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
- /* set pf load just before approaching the MCP */
- bnx2x_set_pf_load(bp);
-
- /* Send LOAD_REQUEST command to MCP
- * Returns the type of LOAD command:
- * if it is the first port to be initialized
- * common blocks should be initialized, otherwise - not
- */
- if (!BP_NOMCP(bp)) {
- /* init fw_seq */
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-
- /* Get current FW pulse sequence */
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
- DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- BNX2X_ERR("Driver load refused\n");
- rc = -EBUSY; /* other port in diagnostic mode */
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
- load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* abort nic load if version mismatch */
- if (!bnx2x_test_firmware_version(bp, true)) {
- rc = -EBUSY;
+ if (IS_PF(bp)) {
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
+ /* if mcp exists send load request and analyze response */
+ if (!BP_NOMCP(bp)) {
+ /* attempt to load pf */
+ rc = bnx2x_nic_load_request(bp, &load_code);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error1);
+
+ /* what did mcp say? */
+ rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
+ } else {
+ load_code = bnx2x_nic_load_no_mcp(bp, port);
}
- } else {
- int path = BP_PATH(bp);
-
- DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
- DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
- }
-
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
- bp->port.pmf = 1;
- /*
- * We need the barrier to ensure the ordering between the
- * writing to bp->port.pmf here and reading it from the
- * bnx2x_periodic_task().
- */
- smp_mb();
- } else
- bp->port.pmf = 0;
-
- DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
+ /* mark pmf if applicable */
+ bnx2x_nic_load_pmf(bp, load_code);
- /* Init Function state controlling object */
- bnx2x__init_func_obj(bp);
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
- /* Initialize HW */
- rc = bnx2x_init_hw(bp, load_code);
- if (rc) {
- BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error2);
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
}
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
- BNX2X_ERR("IRQs setup failed\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ BNX2X_ERR("setup irqs failed\n");
+ if (IS_PF(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -2343,78 +2587,89 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_nic_init(bp, load_code);
/* Init per-function objects */
- bnx2x_init_bp_objs(bp);
-
- if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
- (bp->common.shmem2_base)) {
- if (SHMEM2_HAS(bp, dcc_support))
- SHMEM2_WR(bp, dcc_support,
- (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
- SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
- if (SHMEM2_HAS(bp, afex_driver_support))
- SHMEM2_WR(bp, afex_driver_support,
- SHMEM_AFEX_SUPPORTED_VERSION_ONE);
- }
+ if (IS_PF(bp)) {
+ bnx2x_init_bp_objs(bp);
+ bnx2x_iov_nic_init(bp);
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+ bnx2x_nic_load_afex_dcc(bp, load_code);
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- /* Set AFEX default VLAN tag to an invalid value */
- bp->afex_def_vlan_tag = -1;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
- rc = bnx2x_func_start(bp);
- if (rc) {
- BNX2X_ERR("Function start failed!\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp,
+ DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- /* Send LOAD_DONE command to MCP */
- if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ /* setup rss */
+ rc = bnx2x_init_rss_pf(bp);
if (rc) {
- BNX2X_ERR("Queue setup failed\n");
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_init_rss_pf(bp);
- if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
+ } else { /* vf */
+ for_each_eth_queue(bp, i) {
+ rc = bnx2x_vfpf_setup_q(bp, i);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
}
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
- rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp))
+ rc = bnx2x_set_eth_mac(bp, true);
+ else /* vf */
+ rc = bnx2x_vfpf_set_mac(bp);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- if (bp->pending_max) {
+ if (IS_PF(bp) && bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
bp->pending_max = 0;
}
- if (bp->port.pmf)
- bnx2x_initial_phy_init(bp, load_mode);
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
/* Start fast path */
@@ -2456,8 +2711,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
- /* mark driver is loaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ /* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
@@ -2466,7 +2721,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Wait for all pending SP commands to complete */
- if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
@@ -2482,10 +2737,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#ifndef BNX2X_STOP_ON_ERROR
load_error3:
- bnx2x_int_disable_sync(bp, 1);
+ if (IS_PF(bp)) {
+ bnx2x_int_disable_sync(bp, 1);
- /* Clean queueable objects */
- bnx2x_squeeze_objects(bp);
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+ }
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
@@ -2495,7 +2752,7 @@ load_error3:
/* Release IRQs */
bnx2x_free_irq(bp);
load_error2:
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -2503,15 +2760,35 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+
/* clear pf_load status, as it was already set */
- bnx2x_clear_pf_load(bp);
+ if (IS_PF(bp))
+ bnx2x_clear_pf_load(bp);
load_error0:
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_fw_stats_mem(bp);
bnx2x_free_mem(bp);
return rc;
#endif /* ! BNX2X_STOP_ON_ERROR */
}
+static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+ u8 rc = 0, cos, i;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
@@ -2521,15 +2798,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
/* mark driver is unloaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
}
- if ((bp->state == BNX2X_STATE_CLOSED) ||
- (bp->state == BNX2X_STATE_ERROR)) {
+ if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ (bp->state == BNX2X_STATE_CLOSED ||
+ bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
* during parity error recovery and is either waiting for a
* leader to complete or for other functions to unload and
@@ -2547,8 +2825,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
return -EINVAL;
}
- /*
- * It's important to set the bp->state to the value different from
+ /* Nothing to do during unload if previous bnx2x_nic_load()
+ * have not completed succesfully - all resourses are released.
+ *
+ * we can get here only after unsuccessful ndo_* callback, during which
+ * dev->IFF_UP flag is still on.
+ */
+ if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+ return 0;
+
+ /* It's important to set the bp->state to the value different from
* BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
* may restart the Tx from the NAPI context (see bnx2x_tx_int()).
*/
@@ -2566,16 +2852,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- /* Set ALWAYS_ALIVE bit in shmem */
- bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
-
- bnx2x_drv_pulse(bp);
+ if (IS_PF(bp)) {
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(bp);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
+ }
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_save_statistics(bp);
+ /* wait till consumers catch up with producers in all queues */
+ bnx2x_drain_tx_queues(bp);
- /* Cleanup the chip if needed */
- if (unload_mode != UNLOAD_RECOVERY)
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(bp))
+ bnx2x_vfpf_close_vf(bp);
+ else if (unload_mode != UNLOAD_RECOVERY)
+ /* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
@@ -2608,7 +2902,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
* At this stage no more interrupts will arrive so we may safly clean
* the queueable objects here in case they failed to get cleaned so far.
*/
- bnx2x_squeeze_objects(bp);
+ if (IS_PF(bp))
+ bnx2x_squeeze_objects(bp);
/* There should be no more pending SP commands at this stage */
bp->sp_state = 0;
@@ -2622,19 +2917,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem(bp);
+ if (CNIC_LOADED(bp))
bnx2x_free_fp_mem_cnic(bp);
- bnx2x_free_mem_cnic(bp);
- }
- bnx2x_free_mem(bp);
+ if (IS_PF(bp)) {
+ bnx2x_free_mem(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_mem_cnic(bp);
+ }
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
- if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
bnx2x_set_reset_in_progress(bp);
/* Set RESET_IS_GLOBAL if needed */
@@ -2646,7 +2944,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (IS_PF(bp) &&
+ !bnx2x_clear_pf_load(bp) &&
+ bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
@@ -2730,7 +3030,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
-
if (bnx2x_has_rx_work(fp)) {
work_done += bnx2x_rx_int(fp, budget - work_done);
@@ -2829,17 +3128,21 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
return bd_prod;
}
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
+ __sum16 tsum = (__force __sum16) csum;
+
if (fix > 0)
- csum = (u16) ~csum_fold(csum_sub(csum,
- csum_partial(t_header - fix, fix, 0)));
+ tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+ csum_partial(t_header - fix, fix, 0)));
else if (fix < 0)
- csum = (u16) ~csum_fold(csum_add(csum,
- csum_partial(t_header, -fix, 0)));
+ tsum = ~csum_fold(csum_add((__force __wsum) csum,
+ csum_partial(t_header, -fix, 0)));
- return swab16(csum);
+ return bswab16(csum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
@@ -2973,23 +3276,24 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) {
- pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->ip_id = bswab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum =
- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
} else
pbd->tcp_pseudo_csum =
- swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
- pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+ pbd->global_data |=
+ cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
}
/**
@@ -3003,12 +3307,12 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* 57712 related
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3016,12 +3320,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
- } else
- /* We support checksum offload for TCP and UDP only.
- * No need to pass the UDP header length - it's a constant.
- */
- return skb_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ }
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
@@ -3056,8 +3359,9 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
/* for now NS flag is not used in Linux */
pbd->global_data =
- (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
- ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+ cpu_to_le16(hlen |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) >> 1;
@@ -3074,7 +3378,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
- pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+ pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
@@ -3154,17 +3458,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
- netif_tx_stop_queue(txq);
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
DP(NETIF_MSG_TX_QUEUED,
- "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
- ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+ skb->len);
eth = (struct ethhdr *)skb->data;
@@ -3245,8 +3550,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le16(vlan_tx_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else
- tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ } else {
+ /* when transmitting in a vf, start bd must hold the ethertype
+ * for fw to enforce it
+ */
+#ifndef BNX2X_STOP_ON_ERROR
+ if (IS_VF(bp)) {
+#endif
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+#ifndef BNX2X_STOP_ON_ERROR
+ } else {
+ /* used by FW for packet accounting */
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
+#endif
+ }
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3262,9 +3581,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
- if (IS_MF_SI(bp)) {
- /*
- * fill in the MAC addresses in the PBD - for local
+
+ if (IS_MF_SI(bp) || IS_VF(bp)) {
+ /* fill in the MAC addresses in the PBD - for local
* switching
*/
bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
@@ -3545,7 +3864,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3741,6 +4059,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
} else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
+ DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
@@ -3866,6 +4186,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
+ bnx2x_shrink_eth_fp(bp, delta);
if (CNIC_SUPPORT(bp))
/* move non eth FPs next to last eth FP
* must be done in that order
@@ -3886,7 +4207,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
- kfree(bp->fp->tpa_info);
+ int i;
+
+ for (i = 0; i < bp->fp_array_size; i++)
+ kfree(bp->fp[i].tpa_info);
kfree(bp->fp);
kfree(bp->sp_objs);
kfree(bp->fp_stats);
@@ -3906,18 +4230,22 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
/*
* The biggest MSI-X table we might need is as a maximum number of fast
- * path IGU SBs plus default SB (for PF).
+ * path IGU SBs plus default SB (for PF only).
*/
- msix_table_size = bp->igu_sb_cnt + 1;
+ msix_table_size = bp->igu_sb_cnt;
+ if (IS_PF(bp))
+ msix_table_size++;
+ BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
/* fp array: RSS plus CNIC related L2 queues */
fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
- BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+ bp->fp_array_size = fp_array_size;
+ BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
- fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
+ fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
- for (i = 0; i < fp_array_size; i++) {
+ for (i = 0; i < bp->fp_array_size; i++) {
fp[i].tpa_info =
kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
sizeof(struct bnx2x_agg_info), GFP_KERNEL);
@@ -3928,13 +4256,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* allocate sp objs */
- bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
GFP_KERNEL);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
- bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
GFP_KERNEL);
if (!bp->fp_stats)
goto alloc_err;
@@ -4013,7 +4341,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
{
u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
/*
- * The selected actived PHY is always after swapping (in case PHY
+ * The selected activated PHY is always after swapping (in case PHY
* swapping is enabled). So when swapping is enabled, we need to reverse
* the configuration
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0991534f61da..aee7671ff4c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include "bnx2x.h"
+#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -196,6 +197,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
/* Disable transactions from chip to host */
void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/**
* bnx2x__link_status_update - handles link status change.
@@ -401,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh().
*/
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/**
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
@@ -413,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* @tx_accept_flags: tx accept configuration (tx switch)
* @ramrod_flags: ramrod configuration
*/
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags);
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
@@ -477,8 +479,6 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
/* Error handling */
-void bnx2x_panic_dump(struct bnx2x *bp);
-
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
/* validate currect fw is loaded */
@@ -496,9 +496,44 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod,
+ u16 rx_sge_prod)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
+ ((u32 *)&rx_prods)[i]);
+
+ mmiowb(); /* keep prod updates ordered */
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
/* reload helper */
int bnx2x_reload_if_running(struct net_device *dev);
@@ -507,9 +542,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -612,38 +644,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
-static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 bd_prod,
- u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
-{
- struct ustorm_eth_rx_producers rx_prods = {0};
- u32 i;
-
- /* Update producers */
- rx_prods.bd_prod = bd_prod;
- rx_prods.cqe_prod = rx_comp_prod;
- rx_prods.sge_prod = rx_sge_prod;
-
- /*
- * Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- * This is only applicable for weak-ordered memory model archs such
- * as IA-64. The following barrier is also mandatory since FW will
- * assumes BDs must have buffers.
- */
- wmb();
-
- for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
-
- mmiowb(); /* keep prod updates ordered */
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
- fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
u8 segment, u16 index, u8 op,
u8 update, u32 igu_addr)
@@ -819,7 +819,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -863,7 +863,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
-void bnx2x_set_int_mode(struct bnx2x *bp);
+int bnx2x_set_int_mode(struct bnx2x *bp);
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
@@ -973,7 +973,6 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
return bnx2x_func_state_change(bp, &func_params);
}
-
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
@@ -982,8 +981,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
* @fw_lo: pointer to lower part
* @mac: pointer to MAC address
*/
-static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
- u8 *mac)
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
{
((u8 *)fw_hi)[0] = mac[1];
((u8 *)fw_hi)[1] = mac[0];
@@ -1108,6 +1107,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_get_path_func_num(bp));
+
/* RSS configuration object */
bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
@@ -1125,15 +1127,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
-
- if (!CHIP_IS_E1x(bp))
- return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
- else
- return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
-}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
@@ -1228,7 +1222,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
#endif
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 0;
@@ -1263,7 +1257,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
}
netif_addr_unlock_bh(bp->dev);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
smp_mb();
@@ -1393,4 +1387,13 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
return false;
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 10bc093d2ca4..568205436a15 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -416,6 +416,7 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
if (bp->dcbx_port_params.pfc.enabled &&
(!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
/*
@@ -558,6 +559,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
if (!bp->dcbx_port_params.ets.enabled ||
@@ -1904,11 +1906,13 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+ /* Fail to set state to "enabled" if dcbx is disabled in nvram */
if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
(bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
return 1;
}
+
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
}
@@ -2052,7 +2056,6 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
-
if (setting) {
bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
bp->dcbx_config_params.admin_pfc_tx_enable = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 06c7a0435948..d153f44cf8f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b926f58e983b..bff5e33eaa14 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -22,120 +22,37 @@
#ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
-/*definitions */
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define TSTORM_CAM_MODE 0x1B1440
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
-#define RI_E1 0x1
-#define RI_E1H 0x2
-#define RI_E2 0x4
-#define RI_E3 0x8
-#define RI_E3B0 0x10
-#define RI_ONLINE 0x100
-#define RI_OFFLINE 0x0
-#define RI_PATH0_DUMP 0x200
-#define RI_PATH1_DUMP 0x400
-
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
-#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
-#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
-#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
-#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3E3B0_ONLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
-#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
-#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
-#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
-#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
-#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3E3B0_OFFLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
-#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
-
-#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
-#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
- ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
-
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-
-struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+struct dump_header {
+ u32 header_size; /* Size in DWORDs excluding this field */
+ u32 version;
+ u32 preset;
+ u32 dump_meta_data; /* OR of CHIP and PATH. */
};
+#define BNX2X_DUMP_VERSION 0x50acff01
struct reg_addr {
u32 addr;
u32 size;
- u16 info;
+ u32 chips;
+ u32 presets;
};
struct wreg_addr {
@@ -143,1005 +60,2168 @@ struct wreg_addr {
u32 size;
u32 read_regs_count;
const u32 *read_regs;
- u16 info;
+ u32 chips;
+ u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+ {0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+ {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
};
static const struct reg_addr reg_addrs[] = {
- { 0x2000, 341, RI_ALL_ONLINE },
- { 0x2800, 103, RI_ALL_ONLINE },
- { 0x3000, 287, RI_ALL_ONLINE },
- { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_ALL_ONLINE },
- { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x9000, 147, RI_E2E3E3B0_ONLINE },
- { 0x924c, 1, RI_E2_ONLINE },
- { 0x9250, 16, RI_E2E3E3B0_ONLINE },
- { 0x9400, 33, RI_E2E3E3B0_ONLINE },
- { 0x9484, 5, RI_E3E3B0_ONLINE },
- { 0xa000, 27, RI_ALL_ONLINE },
- { 0xa06c, 1, RI_E1E1H_ONLINE },
- { 0xa070, 71, RI_ALL_ONLINE },
- { 0xa18c, 4, RI_E1E1H_ONLINE },
- { 0xa19c, 62, RI_ALL_ONLINE },
- { 0xa294, 2, RI_E1E1H_ONLINE },
- { 0xa29c, 2, RI_ALL_ONLINE },
- { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
- { 0xa2ac, 52, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3b8, 2, RI_E3E3B0_ONLINE },
- { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa400, 40, RI_ALL_ONLINE },
- { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa4a4, 2, RI_ALL_ONLINE },
- { 0xa4ac, 2, RI_E1E1H_ONLINE },
- { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
- { 0xa4b8, 2, RI_E1E1H_ONLINE },
- { 0xa4c0, 3, RI_ALL_ONLINE },
- { 0xa4cc, 5, RI_E1E1H_ONLINE },
- { 0xa4e0, 3, RI_ALL_ONLINE },
- { 0xa4fc, 2, RI_ALL_ONLINE },
- { 0xa504, 1, RI_E1E1H_ONLINE },
- { 0xa508, 3, RI_ALL_ONLINE },
- { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE },
- { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE },
- { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE },
- { 0xa548, 1, RI_E1E1H_ONLINE },
- { 0xa550, 1, RI_E1E1H_ONLINE },
- { 0xa558, 1, RI_E1E1H_ONLINE },
- { 0xa560, 1, RI_E1E1H_ONLINE },
- { 0xa568, 1, RI_E1E1H_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE },
- { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE },
- { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE },
- { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f8, 1, RI_E1HE2_ONLINE },
- { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
- { 0xa620, 6, RI_E2E3E3B0_ONLINE },
- { 0xa638, 20, RI_E2_ONLINE },
- { 0xa688, 42, RI_E2E3E3B0_ONLINE },
- { 0xa730, 1, RI_E2_ONLINE },
- { 0xa734, 2, RI_E2E3E3B0_ONLINE },
- { 0xa73c, 4, RI_E2_ONLINE },
- { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
- { 0xa760, 5, RI_E2_ONLINE },
- { 0xa774, 7, RI_E2E3E3B0_ONLINE },
- { 0xa790, 15, RI_E2_ONLINE },
- { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
- { 0xa7e0, 6, RI_E3E3B0_ONLINE },
- { 0xa800, 18, RI_E2_ONLINE },
- { 0xa848, 33, RI_E2E3E3B0_ONLINE },
- { 0xa8cc, 2, RI_E3E3B0_ONLINE },
- { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
- { 0xa8e4, 1, RI_E3E3B0_ONLINE },
- { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f8, 30, RI_E3E3B0_ONLINE },
- { 0xa974, 73, RI_E3E3B0_ONLINE },
- { 0xac30, 1, RI_E3E3B0_ONLINE },
- { 0xac40, 1, RI_E3E3B0_ONLINE },
- { 0xac50, 1, RI_E3E3B0_ONLINE },
- { 0xac60, 1, RI_E3B0_ONLINE },
- { 0x10000, 9, RI_ALL_ONLINE },
- { 0x10024, 1, RI_E1E1HE2_ONLINE },
- { 0x10028, 5, RI_ALL_ONLINE },
- { 0x1003c, 6, RI_E1E1HE2_ONLINE },
- { 0x10054, 20, RI_ALL_ONLINE },
- { 0x100a4, 4, RI_E1E1HE2_ONLINE },
- { 0x100b4, 11, RI_ALL_ONLINE },
- { 0x100e0, 4, RI_E1E1HE2_ONLINE },
- { 0x100f0, 8, RI_ALL_ONLINE },
- { 0x10110, 6, RI_E1E1HE2_ONLINE },
- { 0x10128, 110, RI_ALL_ONLINE },
- { 0x102e0, 4, RI_E1E1HE2_ONLINE },
- { 0x102f0, 18, RI_ALL_ONLINE },
- { 0x10338, 20, RI_E1E1HE2_ONLINE },
- { 0x10388, 10, RI_ALL_ONLINE },
- { 0x10400, 6, RI_E1E1HE2_ONLINE },
- { 0x10418, 6, RI_ALL_ONLINE },
- { 0x10430, 10, RI_E1E1HE2_ONLINE },
- { 0x10458, 22, RI_ALL_ONLINE },
- { 0x104b0, 12, RI_E1E1HE2_ONLINE },
- { 0x104e0, 1, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE },
- { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE },
- { 0x10750, 2, RI_E1E1HE2_ONLINE },
- { 0x10760, 2, RI_E1E1HE2_ONLINE },
- { 0x10770, 2, RI_E1E1HE2_ONLINE },
- { 0x10780, 2, RI_E1E1HE2_ONLINE },
- { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_E1E1HE2_ONLINE },
- { 0x107b0, 2, RI_E1E1HE2_ONLINE },
- { 0x107c0, 2, RI_E1E1HE2_ONLINE },
- { 0x107d0, 2, RI_E1E1HE2_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE },
- { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE },
- { 0x16000, 1, RI_E1HE2_ONLINE },
- { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
- { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x16090, 4, RI_E1HE2E3_ONLINE },
- { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0x160dc, 2, RI_E1HE2_ONLINE },
- { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
- { 0x1610c, 2, RI_E1HE2_ONLINE },
- { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
- { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18010, 35, RI_E2E3E3B0_ONLINE },
- { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
- { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
- { 0x180e4, 1, RI_E2E3_ONLINE },
- { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x180f0, 1, RI_E2E3_ONLINE },
- { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
- { 0x18230, 1, RI_E2E3_ONLINE },
- { 0x18234, 2, RI_E2E3E3B0_ONLINE },
- { 0x1823c, 1, RI_E2E3_ONLINE },
- { 0x18240, 13, RI_E2E3E3B0_ONLINE },
- { 0x18274, 1, RI_E2_ONLINE },
- { 0x18278, 81, RI_E2E3E3B0_ONLINE },
- { 0x18440, 63, RI_E2E3E3B0_ONLINE },
- { 0x18570, 42, RI_E3E3B0_ONLINE },
- { 0x18618, 25, RI_E3B0_ONLINE },
- { 0x18680, 44, RI_E3B0_ONLINE },
- { 0x18748, 12, RI_E3B0_ONLINE },
- { 0x18788, 1, RI_E3B0_ONLINE },
- { 0x1879c, 6, RI_E3B0_ONLINE },
- { 0x187c4, 51, RI_E3B0_ONLINE },
- { 0x18a00, 48, RI_E3B0_ONLINE },
- { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE },
- { 0x20080, 94, RI_ALL_ONLINE },
- { 0x201f8, 1, RI_E1E1H_ONLINE },
- { 0x201fc, 1, RI_ALL_ONLINE },
- { 0x20200, 1, RI_E1E1H_ONLINE },
- { 0x20204, 1, RI_ALL_ONLINE },
- { 0x20208, 1, RI_E1E1H_ONLINE },
- { 0x2020c, 39, RI_ALL_ONLINE },
- { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
- { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
- { 0x202f0, 1, RI_E3B0_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE },
- { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE },
- { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE },
- { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE },
- { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE },
- { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x401c8, 1, RI_E1H_ONLINE },
- { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE },
- { 0x40220, 6, RI_E2E3E3B0_ONLINE },
- { 0x40238, 8, RI_E2E3_ONLINE },
- { 0x40258, 4, RI_E2E3E3B0_ONLINE },
- { 0x40268, 2, RI_E3E3B0_ONLINE },
- { 0x40270, 17, RI_E3B0_ONLINE },
- { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
- { 0x40500, 2, RI_ALL_ONLINE },
- { 0x40510, 2, RI_ALL_ONLINE },
- { 0x40520, 2, RI_ALL_ONLINE },
- { 0x40530, 2, RI_ALL_ONLINE },
- { 0x40540, 2, RI_ALL_ONLINE },
- { 0x40550, 10, RI_E2E3E3B0_ONLINE },
- { 0x40610, 2, RI_E2E3E3B0_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE },
- { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
- { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
- { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE },
- { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42640, 5, RI_E2E3E3B0_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE },
- { 0x50000, 1, RI_ALL_ONLINE },
- { 0x50004, 19, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE },
- { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE },
- { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE },
- { 0x50280, 1, RI_ALL_ONLINE },
- { 0x50300, 1, RI_E2E3E3B0_ONLINE },
- { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50318, 1, RI_E2E3E3B0_ONLINE },
- { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50320, 2, RI_E2E3E3B0_ONLINE },
- { 0x50330, 1, RI_E3B0_ONLINE },
- { 0x52000, 1, RI_ALL_ONLINE },
- { 0x54000, 1, RI_ALL_ONLINE },
- { 0x54004, 3327, RI_ALL_OFFLINE },
- { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_E1E1H_OFFLINE },
- { 0x60000, 26, RI_ALL_ONLINE },
- { 0x60068, 8, RI_E1E1H_ONLINE },
- { 0x60088, 12, RI_ALL_ONLINE },
- { 0x600b8, 9, RI_E1E1H_ONLINE },
- { 0x600dc, 1, RI_ALL_ONLINE },
- { 0x600e0, 5, RI_E1E1H_ONLINE },
- { 0x600f4, 1, RI_E1E1HE2_ONLINE },
- { 0x600f8, 1, RI_E1E1H_ONLINE },
- { 0x600fc, 8, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE },
- { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
- { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
- { 0x60200, 1, RI_ALL_ONLINE },
- { 0x60204, 2, RI_ALL_OFFLINE },
- { 0x60210, 13, RI_E2E3E3B0_ONLINE },
- { 0x60244, 16, RI_E3B0_ONLINE },
- { 0x61000, 1, RI_ALL_ONLINE },
- { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x61800, 512, RI_E3E3B0_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE },
- { 0x70020, 8184, RI_ALL_OFFLINE },
- { 0x78000, 8192, RI_E3E3B0_OFFLINE },
- { 0x85000, 3, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_OFFLINE },
- { 0xb0000, 16384, RI_E1H_OFFLINE },
- { 0xc1000, 7, RI_ALL_ONLINE },
- { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
- { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE },
- { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
- { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
- { 0xc2400, 49, RI_ALL_ONLINE },
- { 0xc24c8, 38, RI_ALL_ONLINE },
- { 0xc2568, 2, RI_ALL_ONLINE },
- { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE },
- { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
- { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE },
- { 0xc4570, 2, RI_ALL_ONLINE },
- { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
- { 0xc4600, 1, RI_ALL_ONLINE },
- { 0xd0000, 19, RI_ALL_ONLINE },
- { 0xd004c, 8, RI_ALL_ONLINE },
- { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE },
- { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE },
- { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd0818, 1, RI_E3B0_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE },
- { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE },
- { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE },
- { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 49, RI_ALL_ONLINE },
- { 0xe0138, 1, RI_E1E1H_ONLINE },
- { 0xe013c, 35, RI_ALL_ONLINE },
- { 0xe01f4, 1, RI_E2_ONLINE },
- { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
- { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE },
- { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xe0280, 1, RI_ALL_ONLINE },
- { 0xe0300, 1, RI_ALL_ONLINE },
- { 0xe0400, 1, RI_E3B0_ONLINE },
- { 0xe1000, 1, RI_ALL_ONLINE },
- { 0xe2000, 1, RI_ALL_ONLINE },
- { 0xe2004, 2047, RI_ALL_OFFLINE },
- { 0xf0000, 1, RI_ALL_ONLINE },
- { 0xf0004, 16383, RI_ALL_OFFLINE },
- { 0x101000, 12, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x101054, 3, RI_E2E3E3B0_ONLINE },
- { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE },
- { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102068, 6, RI_E2E3E3B0_ONLINE },
- { 0x102080, 17, RI_ALL_ONLINE },
- { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE },
- { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
- { 0x1030b4, 1, RI_E2_ONLINE },
- { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
- { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
- { 0x103400, 1, RI_E2E3E3B0_ONLINE },
- { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
- { 0x103800, 8, RI_ALL_ONLINE },
- { 0x104000, 63, RI_ALL_ONLINE },
- { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE },
- { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE },
- { 0x105000, 256, RI_ALL_ONLINE },
- { 0x105400, 768, RI_ALL_OFFLINE },
- { 0x107000, 7, RI_E2E3E3B0_ONLINE },
- { 0x10701c, 1, RI_E3E3B0_ONLINE },
- { 0x108000, 33, RI_E1E1H_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE },
- { 0x108100, 5, RI_E1E1H_ONLINE },
- { 0x108120, 5, RI_E1E1H_ONLINE },
- { 0x108200, 74, RI_E1E1H_ONLINE },
- { 0x108400, 74, RI_E1E1H_ONLINE },
- { 0x108800, 152, RI_E1E1H_ONLINE },
- { 0x110000, 111, RI_E2E3E3B0_ONLINE },
- { 0x1101dc, 1, RI_E3E3B0_ONLINE },
- { 0x110200, 4, RI_E2E3E3B0_ONLINE },
- { 0x120000, 2, RI_ALL_ONLINE },
- { 0x120008, 4, RI_ALL_ONLINE },
- { 0x120018, 3, RI_ALL_ONLINE },
- { 0x120024, 4, RI_ALL_ONLINE },
- { 0x120034, 3, RI_ALL_ONLINE },
- { 0x120040, 4, RI_ALL_ONLINE },
- { 0x120050, 3, RI_ALL_ONLINE },
- { 0x12005c, 4, RI_ALL_ONLINE },
- { 0x12006c, 3, RI_ALL_ONLINE },
- { 0x120078, 4, RI_ALL_ONLINE },
- { 0x120088, 3, RI_ALL_ONLINE },
- { 0x120094, 4, RI_ALL_ONLINE },
- { 0x1200a4, 3, RI_ALL_ONLINE },
- { 0x1200b0, 4, RI_ALL_ONLINE },
- { 0x1200c0, 3, RI_ALL_ONLINE },
- { 0x1200cc, 4, RI_ALL_ONLINE },
- { 0x1200dc, 3, RI_ALL_ONLINE },
- { 0x1200e8, 4, RI_ALL_ONLINE },
- { 0x1200f8, 3, RI_ALL_ONLINE },
- { 0x120104, 4, RI_ALL_ONLINE },
- { 0x120114, 1, RI_ALL_ONLINE },
- { 0x120118, 22, RI_ALL_ONLINE },
- { 0x120170, 2, RI_E1E1H_ONLINE },
- { 0x120178, 243, RI_ALL_ONLINE },
- { 0x120544, 4, RI_E1E1H_ONLINE },
- { 0x120554, 6, RI_ALL_ONLINE },
- { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205f4, 1, RI_E1HE2_ONLINE },
- { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
- { 0x120618, 1, RI_E2E3E3B0_ONLINE },
- { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
- { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
- { 0x120698, 3, RI_E2E3E3B0_ONLINE },
- { 0x1206a4, 1, RI_E2_ONLINE },
- { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
- { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
- { 0x1207dc, 1, RI_E2_ONLINE },
- { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE },
- { 0x120910, 7, RI_E2E3E3B0_ONLINE },
- { 0x120930, 9, RI_E2E3E3B0_ONLINE },
- { 0x12095c, 37, RI_E3E3B0_ONLINE },
- { 0x120a00, 2, RI_E1E1HE2_ONLINE },
- { 0x120b00, 1, RI_E3E3B0_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE },
- { 0x122008, 2046, RI_E1_OFFLINE },
- { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
- { 0x130000, 35, RI_E2E3E3B0_ONLINE },
- { 0x130100, 29, RI_E2E3E3B0_ONLINE },
- { 0x130180, 1, RI_E2E3E3B0_ONLINE },
- { 0x130200, 1, RI_E2E3E3B0_ONLINE },
- { 0x130280, 1, RI_E2E3E3B0_ONLINE },
- { 0x130300, 5, RI_E2E3E3B0_ONLINE },
- { 0x130380, 1, RI_E2E3E3B0_ONLINE },
- { 0x130400, 1, RI_E2E3E3B0_ONLINE },
- { 0x130480, 5, RI_E2E3E3B0_ONLINE },
- { 0x130800, 72, RI_E2E3E3B0_ONLINE },
- { 0x131000, 136, RI_E2E3E3B0_ONLINE },
- { 0x132000, 148, RI_E2E3E3B0_ONLINE },
- { 0x134000, 544, RI_E2E3E3B0_ONLINE },
- { 0x140000, 1, RI_ALL_ONLINE },
- { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140028, 8, RI_ALL_ONLINE },
- { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
- { 0x140070, 1, RI_ALL_ONLINE },
- { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
- { 0x14009c, 1, RI_ALL_ONLINE },
- { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
- { 0x1400b4, 7, RI_ALL_ONLINE },
- { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
- { 0x1400f8, 2, RI_ALL_ONLINE },
- { 0x140100, 5, RI_E1E1H_ONLINE },
- { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
- { 0x140128, 7, RI_ALL_ONLINE },
- { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140168, 8, RI_ALL_ONLINE },
- { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
- { 0x140194, 13, RI_ALL_ONLINE },
- { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
- { 0x140260, 4, RI_E2E3_ONLINE },
- { 0x140280, 4, RI_E2E3_ONLINE },
- { 0x1402e0, 2, RI_E2E3_ONLINE },
- { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x1402f0, 9, RI_E2E3_ONLINE },
- { 0x140314, 44, RI_E3B0_ONLINE },
- { 0x144000, 4, RI_E1E1H_ONLINE },
- { 0x148000, 4, RI_E1E1H_ONLINE },
- { 0x14c000, 4, RI_E1E1H_ONLINE },
- { 0x150000, 4, RI_E1E1H_ONLINE },
- { 0x154000, 4, RI_E1E1H_ONLINE },
- { 0x158000, 4, RI_E1E1H_ONLINE },
- { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x15c008, 5, RI_E1H_ONLINE },
- { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c040, 1, RI_E2E3_ONLINE },
- { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
- { 0x15c04c, 8, RI_E2E3_ONLINE },
- { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
- { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
- { 0x15c128, 2, RI_E2E3_ONLINE },
- { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c150, 2, RI_E3E3B0_ONLINE },
- { 0x15c158, 2, RI_E3_ONLINE },
- { 0x15c160, 149, RI_E3B0_ONLINE },
- { 0x161000, 7, RI_ALL_ONLINE },
- { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE },
- { 0x162000, 54, RI_E3E3B0_ONLINE },
- { 0x162200, 60, RI_E3E3B0_ONLINE },
- { 0x162400, 54, RI_E3E3B0_ONLINE },
- { 0x162600, 60, RI_E3E3B0_ONLINE },
- { 0x162800, 54, RI_E3E3B0_ONLINE },
- { 0x162a00, 60, RI_E3E3B0_ONLINE },
- { 0x162c00, 54, RI_E3E3B0_ONLINE },
- { 0x162e00, 60, RI_E3E3B0_ONLINE },
- { 0x164000, 60, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x164118, 15, RI_E2E3E3B0_ONLINE },
- { 0x164200, 1, RI_ALL_ONLINE },
- { 0x164208, 1, RI_ALL_ONLINE },
- { 0x164210, 1, RI_ALL_ONLINE },
- { 0x164218, 1, RI_ALL_ONLINE },
- { 0x164220, 1, RI_ALL_ONLINE },
- { 0x164228, 1, RI_ALL_ONLINE },
- { 0x164230, 1, RI_ALL_ONLINE },
- { 0x164238, 1, RI_ALL_ONLINE },
- { 0x164240, 1, RI_ALL_ONLINE },
- { 0x164248, 1, RI_ALL_ONLINE },
- { 0x164250, 1, RI_ALL_ONLINE },
- { 0x164258, 1, RI_ALL_ONLINE },
- { 0x164260, 1, RI_ALL_ONLINE },
- { 0x164270, 2, RI_ALL_ONLINE },
- { 0x164280, 2, RI_ALL_ONLINE },
- { 0x164800, 2, RI_ALL_ONLINE },
- { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE },
- { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x166400, 49, RI_ALL_ONLINE },
- { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE },
- { 0x166570, 5, RI_E2E3E3B0_ONLINE },
- { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 137, RI_ALL_ONLINE },
- { 0x168224, 2, RI_E1E1H_ONLINE },
- { 0x16822c, 29, RI_ALL_ONLINE },
- { 0x1682a0, 12, RI_E1E1H_ONLINE },
- { 0x1682d0, 12, RI_ALL_ONLINE },
- { 0x168300, 2, RI_E1E1H_ONLINE },
- { 0x168308, 68, RI_ALL_ONLINE },
- { 0x168418, 2, RI_E1E1H_ONLINE },
- { 0x168420, 6, RI_ALL_ONLINE },
- { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE },
- { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE },
- { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE },
- { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
- { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE },
- { 0x16e400, 161, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e68c, 12, RI_E1H_ONLINE },
- { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e6cc, 4, RI_E1H_ONLINE },
- { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
- { 0x16e6e8, 5, RI_E2E3_ONLINE },
- { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
- { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
- { 0x16e7ac, 12, RI_E3B0_ONLINE },
- { 0x170000, 24, RI_ALL_ONLINE },
- { 0x170060, 4, RI_E1E1H_ONLINE },
- { 0x170070, 65, RI_ALL_ONLINE },
- { 0x170194, 11, RI_E2E3E3B0_ONLINE },
- { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x1701e8, 1, RI_E3E3B0_ONLINE },
- { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
- { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE },
- { 0x170218, 77, RI_E2E3E3B0_ONLINE },
- { 0x170400, 64, RI_E2E3E3B0_ONLINE },
- { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE },
- { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE },
- { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180380, 1, RI_E2E3E3B0_ONLINE },
- { 0x180388, 1, RI_E2E3E3B0_ONLINE },
- { 0x180390, 1, RI_E2E3E3B0_ONLINE },
- { 0x180398, 1, RI_E2E3E3B0_ONLINE },
- { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
- { 0x1803b4, 2, RI_E3E3B0_ONLINE },
- { 0x180404, 255, RI_E1E1H_OFFLINE },
- { 0x181000, 4, RI_ALL_ONLINE },
- { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x182000, 4, RI_E3E3B0_ONLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE },
- { 0x1a0004, 5631, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1a8000, 1, RI_ALL_ONLINE },
- { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1b0000, 1, RI_ALL_ONLINE },
- { 0x1b0004, 15, RI_E1H_OFFLINE },
- { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0044, 239, RI_E1H_OFFLINE },
- { 0x1b0400, 1, RI_ALL_ONLINE },
- { 0x1b0404, 255, RI_E1H_OFFLINE },
- { 0x1b0800, 1, RI_ALL_ONLINE },
- { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0c00, 1, RI_ALL_ONLINE },
- { 0x1b1000, 1, RI_ALL_ONLINE },
- { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1400, 1, RI_ALL_ONLINE },
- { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE },
- { 0x1b2000, 1, RI_ALL_ONLINE },
- { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE },
- { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE },
- { 0x1b8100, 1, RI_ALL_ONLINE },
- { 0x1b8140, 1, RI_ALL_ONLINE },
- { 0x1b8180, 1, RI_ALL_ONLINE },
- { 0x1b81c0, 1, RI_ALL_ONLINE },
- { 0x1b8200, 1, RI_ALL_ONLINE },
- { 0x1b8240, 1, RI_ALL_ONLINE },
- { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE },
- { 0x1b8300, 1, RI_ALL_ONLINE },
- { 0x1b8340, 1, RI_ALL_ONLINE },
- { 0x1b8380, 1, RI_ALL_ONLINE },
- { 0x1b83c0, 1, RI_ALL_ONLINE },
- { 0x1b8400, 1, RI_ALL_ONLINE },
- { 0x1b8440, 1, RI_ALL_ONLINE },
- { 0x1b8480, 1, RI_ALL_ONLINE },
- { 0x1b84c0, 1, RI_ALL_ONLINE },
- { 0x1b8500, 1, RI_ALL_ONLINE },
- { 0x1b8540, 1, RI_ALL_ONLINE },
- { 0x1b8580, 1, RI_ALL_ONLINE },
- { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x1b8800, 1, RI_ALL_ONLINE },
- { 0x1b8840, 1, RI_ALL_ONLINE },
- { 0x1b8880, 1, RI_ALL_ONLINE },
- { 0x1b88c0, 1, RI_ALL_ONLINE },
- { 0x1b8900, 1, RI_ALL_ONLINE },
- { 0x1b8940, 1, RI_ALL_ONLINE },
- { 0x1b8980, 1, RI_ALL_ONLINE },
- { 0x1b89c0, 1, RI_ALL_ONLINE },
- { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a40, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE },
- { 0x1b8ac0, 1, RI_ALL_ONLINE },
- { 0x1b8b00, 1, RI_ALL_ONLINE },
- { 0x1b8b40, 1, RI_ALL_ONLINE },
- { 0x1b8b80, 1, RI_ALL_ONLINE },
- { 0x1b8bc0, 1, RI_ALL_ONLINE },
- { 0x1b8c00, 1, RI_ALL_ONLINE },
- { 0x1b8c40, 1, RI_ALL_ONLINE },
- { 0x1b8c80, 1, RI_ALL_ONLINE },
- { 0x1b8cc0, 1, RI_ALL_ONLINE },
- { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8d00, 1, RI_ALL_ONLINE },
- { 0x1b8d40, 1, RI_ALL_ONLINE },
- { 0x1b8d80, 1, RI_ALL_ONLINE },
- { 0x1b8dc0, 1, RI_ALL_ONLINE },
- { 0x1b8e00, 1, RI_ALL_ONLINE },
- { 0x1b8e40, 1, RI_ALL_ONLINE },
- { 0x1b8e80, 1, RI_ALL_ONLINE },
- { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x1b905c, 1, RI_E3E3B0_ONLINE },
- { 0x1b9064, 1, RI_E3B0_ONLINE },
- { 0x1b9080, 10, RI_E3B0_ONLINE },
- { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE },
- { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE },
- { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE },
- { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE },
- { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE },
- { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200380, 1, RI_E2E3E3B0_ONLINE },
- { 0x200388, 1, RI_E2E3E3B0_ONLINE },
- { 0x200390, 1, RI_E2E3E3B0_ONLINE },
- { 0x200398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x200404, 255, RI_E1E1H_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE },
- { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x204000, 4, RI_E3E3B0_ONLINE },
- { 0x220000, 1, RI_ALL_ONLINE },
- { 0x220004, 5631, RI_ALL_OFFLINE },
- { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x228000, 1, RI_ALL_ONLINE },
- { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x230000, 1, RI_ALL_ONLINE },
- { 0x230004, 15, RI_E1H_OFFLINE },
- { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230044, 239, RI_E1H_OFFLINE },
- { 0x230400, 1, RI_ALL_ONLINE },
- { 0x230404, 255, RI_E1H_OFFLINE },
- { 0x230800, 1, RI_ALL_ONLINE },
- { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230c00, 1, RI_ALL_ONLINE },
- { 0x231000, 1, RI_ALL_ONLINE },
- { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231400, 1, RI_ALL_ONLINE },
- { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE },
- { 0x232000, 1, RI_ALL_ONLINE },
- { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x238100, 1, RI_ALL_ONLINE },
- { 0x238140, 1, RI_ALL_ONLINE },
- { 0x238180, 1, RI_ALL_ONLINE },
- { 0x2381c0, 1, RI_ALL_ONLINE },
- { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE },
- { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE },
- { 0x238300, 1, RI_ALL_ONLINE },
- { 0x238340, 1, RI_ALL_ONLINE },
- { 0x238380, 1, RI_ALL_ONLINE },
- { 0x2383c0, 1, RI_ALL_ONLINE },
- { 0x238400, 1, RI_ALL_ONLINE },
- { 0x238440, 1, RI_ALL_ONLINE },
- { 0x238480, 1, RI_ALL_ONLINE },
- { 0x2384c0, 1, RI_ALL_ONLINE },
- { 0x238500, 1, RI_ALL_ONLINE },
- { 0x238540, 1, RI_ALL_ONLINE },
- { 0x238580, 1, RI_ALL_ONLINE },
- { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x238800, 1, RI_ALL_ONLINE },
- { 0x238840, 1, RI_ALL_ONLINE },
- { 0x238880, 1, RI_ALL_ONLINE },
- { 0x2388c0, 1, RI_ALL_ONLINE },
- { 0x238900, 1, RI_ALL_ONLINE },
- { 0x238940, 1, RI_ALL_ONLINE },
- { 0x238980, 1, RI_ALL_ONLINE },
- { 0x2389c0, 1, RI_ALL_ONLINE },
- { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a40, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE },
- { 0x238ac0, 1, RI_ALL_ONLINE },
- { 0x238b00, 1, RI_ALL_ONLINE },
- { 0x238b40, 1, RI_ALL_ONLINE },
- { 0x238b80, 1, RI_ALL_ONLINE },
- { 0x238bc0, 1, RI_ALL_ONLINE },
- { 0x238c00, 1, RI_ALL_ONLINE },
- { 0x238c40, 1, RI_ALL_ONLINE },
- { 0x238c80, 1, RI_ALL_ONLINE },
- { 0x238cc0, 1, RI_ALL_ONLINE },
- { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x238d00, 1, RI_ALL_ONLINE },
- { 0x238d40, 1, RI_ALL_ONLINE },
- { 0x238d80, 1, RI_ALL_ONLINE },
- { 0x238dc0, 1, RI_ALL_ONLINE },
- { 0x238e00, 1, RI_ALL_ONLINE },
- { 0x238e40, 1, RI_ALL_ONLINE },
- { 0x238e80, 1, RI_ALL_ONLINE },
- { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x238fe8, 2, RI_E3E3B0_ONLINE },
- { 0x239000, 1, RI_E2E3E3B0_ONLINE },
- { 0x239040, 3, RI_E2E3E3B0_ONLINE },
- { 0x23905c, 1, RI_E3E3B0_ONLINE },
- { 0x239064, 1, RI_E3B0_ONLINE },
- { 0x239080, 10, RI_E3B0_ONLINE },
- { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE },
- { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE },
- { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280380, 1, RI_E2E3E3B0_ONLINE },
- { 0x280388, 1, RI_E2E3E3B0_ONLINE },
- { 0x280390, 1, RI_E2E3E3B0_ONLINE },
- { 0x280398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x280404, 255, RI_E1E1H_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE },
- { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x284000, 4, RI_E3E3B0_ONLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE },
- { 0x2a0004, 5631, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2a8000, 1, RI_ALL_ONLINE },
- { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2b0000, 1, RI_ALL_ONLINE },
- { 0x2b0004, 15, RI_E1H_OFFLINE },
- { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0044, 239, RI_E1H_OFFLINE },
- { 0x2b0400, 1, RI_ALL_ONLINE },
- { 0x2b0404, 255, RI_E1H_OFFLINE },
- { 0x2b0800, 1, RI_ALL_ONLINE },
- { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0c00, 1, RI_ALL_ONLINE },
- { 0x2b1000, 1, RI_ALL_ONLINE },
- { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1400, 1, RI_ALL_ONLINE },
- { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE },
- { 0x2b2000, 1, RI_ALL_ONLINE },
- { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE },
- { 0x2b8100, 1, RI_ALL_ONLINE },
- { 0x2b8140, 1, RI_ALL_ONLINE },
- { 0x2b8180, 1, RI_ALL_ONLINE },
- { 0x2b81c0, 1, RI_ALL_ONLINE },
- { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE },
- { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE },
- { 0x2b8300, 1, RI_ALL_ONLINE },
- { 0x2b8340, 1, RI_ALL_ONLINE },
- { 0x2b8380, 1, RI_ALL_ONLINE },
- { 0x2b83c0, 1, RI_ALL_ONLINE },
- { 0x2b8400, 1, RI_ALL_ONLINE },
- { 0x2b8440, 1, RI_ALL_ONLINE },
- { 0x2b8480, 1, RI_ALL_ONLINE },
- { 0x2b84c0, 1, RI_ALL_ONLINE },
- { 0x2b8500, 1, RI_ALL_ONLINE },
- { 0x2b8540, 1, RI_ALL_ONLINE },
- { 0x2b8580, 1, RI_ALL_ONLINE },
- { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b8800, 1, RI_ALL_ONLINE },
- { 0x2b8840, 1, RI_ALL_ONLINE },
- { 0x2b8880, 1, RI_ALL_ONLINE },
- { 0x2b88c0, 1, RI_ALL_ONLINE },
- { 0x2b8900, 1, RI_ALL_ONLINE },
- { 0x2b8940, 1, RI_ALL_ONLINE },
- { 0x2b8980, 1, RI_ALL_ONLINE },
- { 0x2b89c0, 1, RI_ALL_ONLINE },
- { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a40, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE },
- { 0x2b8ac0, 1, RI_ALL_ONLINE },
- { 0x2b8b00, 1, RI_ALL_ONLINE },
- { 0x2b8b40, 1, RI_ALL_ONLINE },
- { 0x2b8b80, 1, RI_ALL_ONLINE },
- { 0x2b8bc0, 1, RI_ALL_ONLINE },
- { 0x2b8c00, 1, RI_ALL_ONLINE },
- { 0x2b8c40, 1, RI_ALL_ONLINE },
- { 0x2b8c80, 1, RI_ALL_ONLINE },
- { 0x2b8cc0, 1, RI_ALL_ONLINE },
- { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8d00, 1, RI_ALL_ONLINE },
- { 0x2b8d40, 1, RI_ALL_ONLINE },
- { 0x2b8d80, 1, RI_ALL_ONLINE },
- { 0x2b8dc0, 1, RI_ALL_ONLINE },
- { 0x2b8e00, 1, RI_ALL_ONLINE },
- { 0x2b8e40, 1, RI_ALL_ONLINE },
- { 0x2b8e80, 1, RI_ALL_ONLINE },
- { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x2b905c, 1, RI_E3E3B0_ONLINE },
- { 0x2b9064, 1, RI_E3B0_ONLINE },
- { 0x2b9080, 10, RI_E3B0_ONLINE },
- { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
- { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
- { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE },
- { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x300200, 58, RI_ALL_ONLINE },
- { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300380, 1, RI_E2E3E3B0_ONLINE },
- { 0x300388, 1, RI_E2E3E3B0_ONLINE },
- { 0x300390, 1, RI_E2E3E3B0_ONLINE },
- { 0x300398, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x300404, 255, RI_E1E1H_OFFLINE },
- { 0x302000, 4, RI_ALL_ONLINE },
- { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x304000, 4, RI_E3E3B0_ONLINE },
- { 0x320000, 1, RI_ALL_ONLINE },
- { 0x320004, 5631, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x328000, 1, RI_ALL_ONLINE },
- { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x330000, 1, RI_ALL_ONLINE },
- { 0x330004, 15, RI_E1H_OFFLINE },
- { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330044, 239, RI_E1H_OFFLINE },
- { 0x330400, 1, RI_ALL_ONLINE },
- { 0x330404, 255, RI_E1H_OFFLINE },
- { 0x330800, 1, RI_ALL_ONLINE },
- { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330c00, 1, RI_ALL_ONLINE },
- { 0x331000, 1, RI_ALL_ONLINE },
- { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331400, 1, RI_ALL_ONLINE },
- { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE },
- { 0x332000, 1, RI_ALL_ONLINE },
- { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE },
- { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE },
- { 0x338100, 1, RI_ALL_ONLINE },
- { 0x338140, 1, RI_ALL_ONLINE },
- { 0x338180, 1, RI_ALL_ONLINE },
- { 0x3381c0, 1, RI_ALL_ONLINE },
- { 0x338200, 1, RI_ALL_ONLINE },
- { 0x338240, 1, RI_ALL_ONLINE },
- { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE },
- { 0x338300, 1, RI_ALL_ONLINE },
- { 0x338340, 1, RI_ALL_ONLINE },
- { 0x338380, 1, RI_ALL_ONLINE },
- { 0x3383c0, 1, RI_ALL_ONLINE },
- { 0x338400, 1, RI_ALL_ONLINE },
- { 0x338440, 1, RI_ALL_ONLINE },
- { 0x338480, 1, RI_ALL_ONLINE },
- { 0x3384c0, 1, RI_ALL_ONLINE },
- { 0x338500, 1, RI_ALL_ONLINE },
- { 0x338540, 1, RI_ALL_ONLINE },
- { 0x338580, 1, RI_ALL_ONLINE },
- { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x338800, 1, RI_ALL_ONLINE },
- { 0x338840, 1, RI_ALL_ONLINE },
- { 0x338880, 1, RI_ALL_ONLINE },
- { 0x3388c0, 1, RI_ALL_ONLINE },
- { 0x338900, 1, RI_ALL_ONLINE },
- { 0x338940, 1, RI_ALL_ONLINE },
- { 0x338980, 1, RI_ALL_ONLINE },
- { 0x3389c0, 1, RI_ALL_ONLINE },
- { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a40, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE },
- { 0x338ac0, 1, RI_ALL_ONLINE },
- { 0x338b00, 1, RI_ALL_ONLINE },
- { 0x338b40, 1, RI_ALL_ONLINE },
- { 0x338b80, 1, RI_ALL_ONLINE },
- { 0x338bc0, 1, RI_ALL_ONLINE },
- { 0x338c00, 1, RI_ALL_ONLINE },
- { 0x338c40, 1, RI_ALL_ONLINE },
- { 0x338c80, 1, RI_ALL_ONLINE },
- { 0x338cc0, 1, RI_ALL_ONLINE },
- { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x338d00, 1, RI_ALL_ONLINE },
- { 0x338d40, 1, RI_ALL_ONLINE },
- { 0x338d80, 1, RI_ALL_ONLINE },
- { 0x338dc0, 1, RI_ALL_ONLINE },
- { 0x338e00, 1, RI_ALL_ONLINE },
- { 0x338e40, 1, RI_ALL_ONLINE },
- { 0x338e80, 1, RI_ALL_ONLINE },
- { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x338fe8, 2, RI_E3E3B0_ONLINE },
- { 0x339000, 1, RI_E2E3E3B0_ONLINE },
- { 0x339040, 3, RI_E2E3E3B0_ONLINE },
- { 0x33905c, 1, RI_E3E3B0_ONLINE },
- { 0x339064, 1, RI_E3B0_ONLINE },
- { 0x339080, 10, RI_E3B0_ONLINE },
- { 0x340000, 2, RI_ALL_ONLINE },
+ { 0x2000, 1, 0x1f, 0xfff},
+ { 0x2004, 1, 0x1f, 0x1fff},
+ { 0x2008, 25, 0x1f, 0xfff},
+ { 0x206c, 1, 0x1f, 0x1fff},
+ { 0x2070, 313, 0x1f, 0xfff},
+ { 0x2800, 103, 0x1f, 0xfff},
+ { 0x3000, 287, 0x1f, 0xfff},
+ { 0x3800, 331, 0x1f, 0xfff},
+ { 0x8800, 6, 0x1f, 0x924},
+ { 0x8818, 1, 0x1e, 0x924},
+ { 0x9000, 4, 0x1c, 0x924},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x902c, 1, 0x1c, 0x924},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9034, 13, 0x1c, 0x924},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x90a8, 98, 0x1c, 0x924},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9238, 3, 0x1c, 0x924},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9248, 1, 0x1c, 0x924},
+ { 0x924c, 1, 0x4, 0x924},
+ { 0x9250, 16, 0x1c, 0x924},
+ { 0x92a8, 2, 0x1c, 0x1fff},
+ { 0x92b4, 1, 0x1c, 0x1fff},
+ { 0x9400, 33, 0x1c, 0x924},
+ { 0x9484, 5, 0x18, 0x924},
+ { 0xa000, 27, 0x1f, 0x924},
+ { 0xa06c, 1, 0x3, 0x924},
+ { 0xa070, 2, 0x1f, 0x924},
+ { 0xa078, 1, 0x1f, 0x1fff},
+ { 0xa07c, 31, 0x1f, 0x924},
+ { 0xa0f8, 1, 0x1f, 0x1fff},
+ { 0xa0fc, 3, 0x1f, 0x924},
+ { 0xa108, 1, 0x1f, 0x1fff},
+ { 0xa10c, 3, 0x1f, 0x924},
+ { 0xa118, 1, 0x1f, 0x1fff},
+ { 0xa11c, 28, 0x1f, 0x924},
+ { 0xa18c, 4, 0x3, 0x924},
+ { 0xa19c, 3, 0x1f, 0x924},
+ { 0xa1a8, 1, 0x1f, 0x1fff},
+ { 0xa1ac, 3, 0x1f, 0x924},
+ { 0xa1b8, 1, 0x1f, 0x1fff},
+ { 0xa1bc, 54, 0x1f, 0x924},
+ { 0xa294, 2, 0x3, 0x924},
+ { 0xa29c, 2, 0x1f, 0x924},
+ { 0xa2a4, 2, 0x7, 0x924},
+ { 0xa2ac, 2, 0x1f, 0x924},
+ { 0xa2b4, 1, 0x1f, 0x1fff},
+ { 0xa2b8, 49, 0x1f, 0x924},
+ { 0xa38c, 2, 0x1f, 0x1fff},
+ { 0xa398, 1, 0x1f, 0x1fff},
+ { 0xa39c, 7, 0x1e, 0x924},
+ { 0xa3b8, 2, 0x18, 0x924},
+ { 0xa3c0, 1, 0x1e, 0x924},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa3c8, 1, 0x1e, 0x924},
+ { 0xa3d0, 1, 0x1e, 0x924},
+ { 0xa3d8, 1, 0x1e, 0x924},
+ { 0xa3e0, 1, 0x1e, 0x924},
+ { 0xa3e8, 1, 0x1e, 0x924},
+ { 0xa3f0, 1, 0x1e, 0x924},
+ { 0xa3f8, 1, 0x1e, 0x924},
+ { 0xa400, 1, 0x1f, 0x924},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa410, 7, 0x1f, 0x924},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa45c, 1, 0x1f, 0x924},
+ { 0xa460, 1, 0x1f, 0x1924},
+ { 0xa464, 15, 0x1f, 0x924},
+ { 0xa4a0, 1, 0x7, 0x924},
+ { 0xa4a4, 2, 0x1f, 0x924},
+ { 0xa4ac, 2, 0x3, 0x924},
+ { 0xa4b4, 1, 0x7, 0x924},
+ { 0xa4b8, 2, 0x3, 0x924},
+ { 0xa4c0, 3, 0x1f, 0x924},
+ { 0xa4cc, 5, 0x3, 0x924},
+ { 0xa4e0, 3, 0x1f, 0x924},
+ { 0xa4fc, 2, 0x1f, 0x924},
+ { 0xa504, 1, 0x3, 0x924},
+ { 0xa508, 3, 0x1f, 0x924},
+ { 0xa518, 1, 0x1f, 0x924},
+ { 0xa520, 1, 0x1f, 0x924},
+ { 0xa528, 1, 0x1f, 0x924},
+ { 0xa530, 1, 0x1f, 0x924},
+ { 0xa538, 1, 0x1f, 0x924},
+ { 0xa540, 1, 0x1f, 0x924},
+ { 0xa548, 1, 0x3, 0x924},
+ { 0xa550, 1, 0x3, 0x924},
+ { 0xa558, 1, 0x3, 0x924},
+ { 0xa560, 1, 0x3, 0x924},
+ { 0xa568, 1, 0x3, 0x924},
+ { 0xa570, 1, 0x1f, 0x924},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa5a0, 1, 0x7, 0x924},
+ { 0xa5c0, 1, 0x1f, 0x924},
+ { 0xa5e0, 1, 0x1e, 0x924},
+ { 0xa5e8, 1, 0x1e, 0x924},
+ { 0xa5f0, 1, 0x1e, 0x924},
+ { 0xa5f8, 1, 0x6, 0x924},
+ { 0xa5fc, 1, 0x1e, 0x924},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa614, 1, 0x1e, 0x924},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa61c, 1, 0x1e, 0x924},
+ { 0xa620, 6, 0x1c, 0x924},
+ { 0xa638, 20, 0x4, 0x924},
+ { 0xa688, 35, 0x1c, 0x924},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa718, 2, 0x1c, 0x924},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa724, 3, 0x1c, 0x924},
+ { 0xa730, 1, 0x4, 0x924},
+ { 0xa734, 2, 0x1c, 0x924},
+ { 0xa73c, 4, 0x4, 0x924},
+ { 0xa74c, 1, 0x1c, 0x924},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xa754, 3, 0x1c, 0x924},
+ { 0xa760, 5, 0x4, 0x924},
+ { 0xa774, 7, 0x1c, 0x924},
+ { 0xa790, 15, 0x4, 0x924},
+ { 0xa7cc, 4, 0x1c, 0x924},
+ { 0xa7e0, 6, 0x18, 0x924},
+ { 0xa800, 18, 0x4, 0x924},
+ { 0xa848, 33, 0x1c, 0x924},
+ { 0xa8cc, 2, 0x18, 0x924},
+ { 0xa8d4, 4, 0x1c, 0x924},
+ { 0xa8e4, 1, 0x18, 0x924},
+ { 0xa8e8, 1, 0x1c, 0x924},
+ { 0xa8f0, 1, 0x1c, 0x924},
+ { 0xa8f8, 30, 0x18, 0x924},
+ { 0xa974, 73, 0x18, 0x924},
+ { 0xac30, 1, 0x18, 0x924},
+ { 0xac40, 1, 0x18, 0x924},
+ { 0xac50, 1, 0x18, 0x924},
+ { 0xac60, 1, 0x10, 0x924},
+ { 0x10000, 9, 0x1f, 0x924},
+ { 0x10024, 1, 0x7, 0x924},
+ { 0x10028, 5, 0x1f, 0x924},
+ { 0x1003c, 6, 0x7, 0x924},
+ { 0x10054, 20, 0x1f, 0x924},
+ { 0x100a4, 4, 0x7, 0x924},
+ { 0x100b4, 11, 0x1f, 0x924},
+ { 0x100e0, 4, 0x7, 0x924},
+ { 0x100f0, 8, 0x1f, 0x924},
+ { 0x10110, 6, 0x7, 0x924},
+ { 0x10128, 110, 0x1f, 0x924},
+ { 0x102e0, 4, 0x7, 0x924},
+ { 0x102f0, 18, 0x1f, 0x924},
+ { 0x10338, 20, 0x7, 0x924},
+ { 0x10388, 10, 0x1f, 0x924},
+ { 0x103d0, 2, 0x3, 0x1fff},
+ { 0x103dc, 1, 0x3, 0x1fff},
+ { 0x10400, 6, 0x7, 0x924},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x1041c, 1, 0x1f, 0x924},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10424, 1, 0x1f, 0x924},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x1042c, 1, 0x1f, 0x924},
+ { 0x10430, 10, 0x7, 0x924},
+ { 0x10458, 2, 0x1f, 0x924},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10464, 4, 0x1f, 0x924},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x10478, 14, 0x1f, 0x924},
+ { 0x104b0, 12, 0x7, 0x924},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104e8, 1, 0x1f, 0x924},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f4, 1, 0x1f, 0x924},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10500, 2, 0x1f, 0x924},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x1050c, 9, 0x1f, 0x924},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10534, 1, 0x1f, 0x924},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x1053c, 3, 0x1f, 0x924},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x1054c, 3, 0x1f, 0x924},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x1055c, 123, 0x1f, 0x924},
+ { 0x10750, 2, 0x7, 0x924},
+ { 0x10760, 2, 0x7, 0x924},
+ { 0x10770, 2, 0x7, 0x924},
+ { 0x10780, 2, 0x7, 0x924},
+ { 0x10790, 2, 0x1f, 0x924},
+ { 0x107a0, 2, 0x7, 0x924},
+ { 0x107b0, 2, 0x7, 0x924},
+ { 0x107c0, 2, 0x7, 0x924},
+ { 0x107d0, 2, 0x7, 0x924},
+ { 0x107e0, 2, 0x1f, 0x924},
+ { 0x10880, 2, 0x1f, 0x924},
+ { 0x10900, 2, 0x1f, 0x924},
+ { 0x16000, 1, 0x6, 0x924},
+ { 0x16004, 25, 0x1e, 0x924},
+ { 0x16070, 8, 0x1e, 0x924},
+ { 0x16090, 4, 0xe, 0x924},
+ { 0x160a0, 6, 0x1e, 0x924},
+ { 0x160c0, 7, 0x1e, 0x924},
+ { 0x160dc, 2, 0x6, 0x924},
+ { 0x160e4, 6, 0x1e, 0x924},
+ { 0x160fc, 4, 0x1e, 0x1fff},
+ { 0x1610c, 2, 0x6, 0x924},
+ { 0x16114, 6, 0x1e, 0x924},
+ { 0x16140, 48, 0x1e, 0x1fff},
+ { 0x16204, 5, 0x1e, 0x924},
+ { 0x18000, 1, 0x1e, 0x924},
+ { 0x18008, 1, 0x1e, 0x924},
+ { 0x18010, 35, 0x1c, 0x924},
+ { 0x180a4, 2, 0x1c, 0x924},
+ { 0x180c0, 9, 0x1c, 0x924},
+ { 0x180e4, 1, 0xc, 0x924},
+ { 0x180e8, 2, 0x1c, 0x924},
+ { 0x180f0, 1, 0xc, 0x924},
+ { 0x180f4, 79, 0x1c, 0x924},
+ { 0x18230, 1, 0xc, 0x924},
+ { 0x18234, 2, 0x1c, 0x924},
+ { 0x1823c, 1, 0xc, 0x924},
+ { 0x18240, 13, 0x1c, 0x924},
+ { 0x18274, 1, 0x4, 0x924},
+ { 0x18278, 12, 0x1c, 0x924},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182ac, 3, 0x1c, 0x924},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x182bc, 19, 0x1c, 0x924},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x1830c, 3, 0x1c, 0x924},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x1831c, 7, 0x1c, 0x924},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x1833c, 3, 0x1c, 0x924},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x1834c, 28, 0x1c, 0x924},
+ { 0x183bc, 2, 0x1c, 0x1fff},
+ { 0x183c8, 3, 0x1c, 0x1fff},
+ { 0x183d8, 1, 0x1c, 0x1fff},
+ { 0x18440, 48, 0x1c, 0x1fff},
+ { 0x18500, 15, 0x1c, 0x924},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18574, 1, 0x18, 0x924},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1857c, 4, 0x18, 0x924},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18590, 1, 0x18, 0x924},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x18598, 32, 0x18, 0x924},
+ { 0x18618, 5, 0x10, 0x924},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x1863c, 16, 0x10, 0x924},
+ { 0x18680, 44, 0x10, 0x924},
+ { 0x18748, 12, 0x10, 0x924},
+ { 0x18788, 1, 0x10, 0x924},
+ { 0x1879c, 6, 0x10, 0x924},
+ { 0x187c4, 51, 0x10, 0x924},
+ { 0x18a00, 48, 0x10, 0x924},
+ { 0x20000, 24, 0x1f, 0x924},
+ { 0x20060, 8, 0x1f, 0x9e4},
+ { 0x20080, 94, 0x1f, 0x924},
+ { 0x201f8, 1, 0x3, 0x924},
+ { 0x201fc, 1, 0x1f, 0x924},
+ { 0x20200, 1, 0x3, 0x924},
+ { 0x20204, 1, 0x1f, 0x924},
+ { 0x20208, 1, 0x3, 0x924},
+ { 0x2020c, 4, 0x1f, 0x924},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x20248, 24, 0x1f, 0x924},
+ { 0x202b8, 2, 0x1f, 0x1fff},
+ { 0x202c4, 1, 0x1f, 0x1fff},
+ { 0x202c8, 1, 0x1c, 0x924},
+ { 0x202d8, 4, 0x1c, 0x924},
+ { 0x202f0, 1, 0x10, 0x924},
+ { 0x20400, 1, 0x1f, 0x924},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x20414, 2, 0x1f, 0x924},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x20424, 2, 0x1f, 0x924},
+ { 0x2042c, 18, 0x1e, 0x924},
+ { 0x20480, 1, 0x1f, 0x924},
+ { 0x20500, 1, 0x1f, 0x924},
+ { 0x20600, 1, 0x1f, 0x924},
+ { 0x28000, 1, 0x1f, 0x9e4},
+ { 0x28004, 255, 0x1f, 0x180},
+ { 0x28400, 1, 0x1f, 0x1c0},
+ { 0x28404, 255, 0x1f, 0x180},
+ { 0x28800, 1, 0x1f, 0x1c0},
+ { 0x28804, 255, 0x1f, 0x180},
+ { 0x28c00, 1, 0x1f, 0x1c0},
+ { 0x28c04, 255, 0x1f, 0x180},
+ { 0x29000, 1, 0x1f, 0x1c0},
+ { 0x29004, 255, 0x1f, 0x180},
+ { 0x29400, 1, 0x1f, 0x1c0},
+ { 0x29404, 255, 0x1f, 0x180},
+ { 0x29800, 1, 0x1f, 0x1c0},
+ { 0x29804, 255, 0x1f, 0x180},
+ { 0x29c00, 1, 0x1f, 0x1c0},
+ { 0x29c04, 255, 0x1f, 0x180},
+ { 0x2a000, 1, 0x1f, 0x1c0},
+ { 0x2a004, 255, 0x1f, 0x180},
+ { 0x2a400, 1, 0x1f, 0x1c0},
+ { 0x2a404, 255, 0x1f, 0x180},
+ { 0x2a800, 1, 0x1f, 0x1c0},
+ { 0x2a804, 255, 0x1f, 0x180},
+ { 0x2ac00, 1, 0x1f, 0x1c0},
+ { 0x2ac04, 255, 0x1f, 0x180},
+ { 0x2b000, 1, 0x1f, 0x1c0},
+ { 0x2b004, 255, 0x1f, 0x180},
+ { 0x2b400, 1, 0x1f, 0x1c0},
+ { 0x2b404, 255, 0x1f, 0x180},
+ { 0x2b800, 1, 0x1f, 0x1c0},
+ { 0x2b804, 255, 0x1f, 0x180},
+ { 0x2bc00, 1, 0x1f, 0x1c0},
+ { 0x2bc04, 255, 0x1f, 0x180},
+ { 0x2c000, 1, 0x1f, 0x1c0},
+ { 0x2c004, 255, 0x1f, 0x180},
+ { 0x2c400, 1, 0x1f, 0x1c0},
+ { 0x2c404, 255, 0x1f, 0x180},
+ { 0x2c800, 1, 0x1f, 0x1c0},
+ { 0x2c804, 255, 0x1f, 0x180},
+ { 0x2cc00, 1, 0x1f, 0x1c0},
+ { 0x2cc04, 255, 0x1f, 0x180},
+ { 0x2d000, 1, 0x1f, 0x1c0},
+ { 0x2d004, 255, 0x1f, 0x180},
+ { 0x2d400, 1, 0x1f, 0x1c0},
+ { 0x2d404, 255, 0x1f, 0x180},
+ { 0x2d800, 1, 0x1f, 0x1c0},
+ { 0x2d804, 255, 0x1f, 0x180},
+ { 0x2dc00, 1, 0x1f, 0x1c0},
+ { 0x2dc04, 255, 0x1f, 0x180},
+ { 0x2e000, 1, 0x1f, 0x1c0},
+ { 0x2e004, 255, 0x1f, 0x180},
+ { 0x2e400, 1, 0x1f, 0x1c0},
+ { 0x2e404, 255, 0x1f, 0x180},
+ { 0x2e800, 1, 0x1f, 0x1c0},
+ { 0x2e804, 255, 0x1f, 0x180},
+ { 0x2ec00, 1, 0x1f, 0x1c0},
+ { 0x2ec04, 255, 0x1f, 0x180},
+ { 0x2f000, 1, 0x1f, 0x1c0},
+ { 0x2f004, 255, 0x1f, 0x180},
+ { 0x2f400, 1, 0x1f, 0x1c0},
+ { 0x2f404, 255, 0x1f, 0x180},
+ { 0x2f800, 1, 0x1f, 0x1c0},
+ { 0x2f804, 255, 0x1f, 0x180},
+ { 0x2fc00, 1, 0x1f, 0x1c0},
+ { 0x2fc04, 255, 0x1f, 0x180},
+ { 0x30000, 1, 0x1f, 0x9e4},
+ { 0x30004, 255, 0x1f, 0x180},
+ { 0x30400, 1, 0x1f, 0x1c0},
+ { 0x30404, 255, 0x1f, 0x180},
+ { 0x30800, 1, 0x1f, 0x1c0},
+ { 0x30804, 255, 0x1f, 0x180},
+ { 0x30c00, 1, 0x1f, 0x1c0},
+ { 0x30c04, 255, 0x1f, 0x180},
+ { 0x31000, 1, 0x1f, 0x1c0},
+ { 0x31004, 255, 0x1f, 0x180},
+ { 0x31400, 1, 0x1f, 0x1c0},
+ { 0x31404, 255, 0x1f, 0x180},
+ { 0x31800, 1, 0x1f, 0x1c0},
+ { 0x31804, 255, 0x1f, 0x180},
+ { 0x31c00, 1, 0x1f, 0x1c0},
+ { 0x31c04, 255, 0x1f, 0x180},
+ { 0x32000, 1, 0x1f, 0x1c0},
+ { 0x32004, 255, 0x1f, 0x180},
+ { 0x32400, 1, 0x1f, 0x1c0},
+ { 0x32404, 255, 0x1f, 0x180},
+ { 0x32800, 1, 0x1f, 0x1c0},
+ { 0x32804, 255, 0x1f, 0x180},
+ { 0x32c00, 1, 0x1f, 0x1c0},
+ { 0x32c04, 255, 0x1f, 0x180},
+ { 0x33000, 1, 0x1f, 0x1c0},
+ { 0x33004, 255, 0x1f, 0x180},
+ { 0x33400, 1, 0x1f, 0x1c0},
+ { 0x33404, 255, 0x1f, 0x180},
+ { 0x33800, 1, 0x1f, 0x1c0},
+ { 0x33804, 255, 0x1f, 0x180},
+ { 0x33c00, 1, 0x1f, 0x1c0},
+ { 0x33c04, 255, 0x1f, 0x180},
+ { 0x34000, 1, 0x1f, 0x1c0},
+ { 0x34004, 255, 0x1f, 0x180},
+ { 0x34400, 1, 0x1f, 0x1c0},
+ { 0x34404, 255, 0x1f, 0x180},
+ { 0x34800, 1, 0x1f, 0x1c0},
+ { 0x34804, 255, 0x1f, 0x180},
+ { 0x34c00, 1, 0x1f, 0x1c0},
+ { 0x34c04, 255, 0x1f, 0x180},
+ { 0x35000, 1, 0x1f, 0x1c0},
+ { 0x35004, 255, 0x1f, 0x180},
+ { 0x35400, 1, 0x1f, 0x1c0},
+ { 0x35404, 255, 0x1f, 0x180},
+ { 0x35800, 1, 0x1f, 0x1c0},
+ { 0x35804, 255, 0x1f, 0x180},
+ { 0x35c00, 1, 0x1f, 0x1c0},
+ { 0x35c04, 255, 0x1f, 0x180},
+ { 0x36000, 1, 0x1f, 0x1c0},
+ { 0x36004, 255, 0x1f, 0x180},
+ { 0x36400, 1, 0x1f, 0x1c0},
+ { 0x36404, 255, 0x1f, 0x180},
+ { 0x36800, 1, 0x1f, 0x1c0},
+ { 0x36804, 255, 0x1f, 0x180},
+ { 0x36c00, 1, 0x1f, 0x1c0},
+ { 0x36c04, 255, 0x1f, 0x180},
+ { 0x37000, 1, 0x1f, 0x1c0},
+ { 0x37004, 255, 0x1f, 0x180},
+ { 0x37400, 1, 0x1f, 0x1c0},
+ { 0x37404, 255, 0x1f, 0x180},
+ { 0x37800, 1, 0x1f, 0x1c0},
+ { 0x37804, 255, 0x1f, 0x180},
+ { 0x37c00, 1, 0x1f, 0x1c0},
+ { 0x37c04, 255, 0x1f, 0x180},
+ { 0x38000, 1, 0x1f, 0x1c0},
+ { 0x38004, 255, 0x1f, 0x180},
+ { 0x38400, 1, 0x1f, 0x1c0},
+ { 0x38404, 255, 0x1f, 0x180},
+ { 0x38800, 1, 0x1f, 0x1c0},
+ { 0x38804, 255, 0x1f, 0x180},
+ { 0x38c00, 1, 0x1f, 0x1c0},
+ { 0x38c04, 255, 0x1f, 0x180},
+ { 0x39000, 1, 0x1f, 0x1c0},
+ { 0x39004, 255, 0x1f, 0x180},
+ { 0x39400, 1, 0x1f, 0x1c0},
+ { 0x39404, 255, 0x1f, 0x180},
+ { 0x39800, 1, 0x1f, 0x1c0},
+ { 0x39804, 255, 0x1f, 0x180},
+ { 0x39c00, 1, 0x1f, 0x1c0},
+ { 0x39c04, 255, 0x1f, 0x180},
+ { 0x3a000, 1, 0x1f, 0x1c0},
+ { 0x3a004, 255, 0x1f, 0x180},
+ { 0x3a400, 1, 0x1f, 0x1c0},
+ { 0x3a404, 255, 0x1f, 0x180},
+ { 0x3a800, 1, 0x1f, 0x1c0},
+ { 0x3a804, 255, 0x1f, 0x180},
+ { 0x3ac00, 1, 0x1f, 0x1c0},
+ { 0x3ac04, 255, 0x1f, 0x180},
+ { 0x3b000, 1, 0x1f, 0x1c0},
+ { 0x3b004, 255, 0x1f, 0x180},
+ { 0x3b400, 1, 0x1f, 0x1c0},
+ { 0x3b404, 255, 0x1f, 0x180},
+ { 0x3b800, 1, 0x1f, 0x1c0},
+ { 0x3b804, 255, 0x1f, 0x180},
+ { 0x3bc00, 1, 0x1f, 0x1c0},
+ { 0x3bc04, 255, 0x1f, 0x180},
+ { 0x3c000, 1, 0x1f, 0x1c0},
+ { 0x3c004, 255, 0x1f, 0x180},
+ { 0x3c400, 1, 0x1f, 0x1c0},
+ { 0x3c404, 255, 0x1f, 0x180},
+ { 0x3c800, 1, 0x1f, 0x1c0},
+ { 0x3c804, 255, 0x1f, 0x180},
+ { 0x3cc00, 1, 0x1f, 0x1c0},
+ { 0x3cc04, 255, 0x1f, 0x180},
+ { 0x3d000, 1, 0x1f, 0x1c0},
+ { 0x3d004, 255, 0x1f, 0x180},
+ { 0x3d400, 1, 0x1f, 0x1c0},
+ { 0x3d404, 255, 0x1f, 0x180},
+ { 0x3d800, 1, 0x1f, 0x1c0},
+ { 0x3d804, 255, 0x1f, 0x180},
+ { 0x3dc00, 1, 0x1f, 0x1c0},
+ { 0x3dc04, 255, 0x1f, 0x180},
+ { 0x3e000, 1, 0x1f, 0x1c0},
+ { 0x3e004, 255, 0x1f, 0x180},
+ { 0x3e400, 1, 0x1f, 0x1c0},
+ { 0x3e404, 255, 0x1f, 0x180},
+ { 0x3e800, 1, 0x1f, 0x1c0},
+ { 0x3e804, 255, 0x1f, 0x180},
+ { 0x3ec00, 1, 0x1f, 0x1c0},
+ { 0x3ec04, 255, 0x1f, 0x180},
+ { 0x3f000, 1, 0x1f, 0x1c0},
+ { 0x3f004, 255, 0x1f, 0x180},
+ { 0x3f400, 1, 0x1f, 0x1c0},
+ { 0x3f404, 255, 0x1f, 0x180},
+ { 0x3f800, 1, 0x1f, 0x1c0},
+ { 0x3f804, 255, 0x1f, 0x180},
+ { 0x3fc00, 1, 0x1f, 0x1c0},
+ { 0x3fc04, 255, 0x1f, 0x180},
+ { 0x40000, 85, 0x1f, 0x924},
+ { 0x40154, 13, 0x1f, 0xfff},
+ { 0x40198, 2, 0x1f, 0x1fff},
+ { 0x401a4, 1, 0x1f, 0x1fff},
+ { 0x401a8, 8, 0x1e, 0x924},
+ { 0x401c8, 1, 0x2, 0x924},
+ { 0x401cc, 2, 0x1e, 0x924},
+ { 0x401d4, 2, 0x1c, 0x924},
+ { 0x40200, 4, 0x1f, 0x924},
+ { 0x40220, 6, 0x1c, 0x924},
+ { 0x40238, 8, 0xc, 0x924},
+ { 0x40258, 4, 0x1c, 0x924},
+ { 0x40268, 2, 0x18, 0x924},
+ { 0x40270, 17, 0x10, 0x924},
+ { 0x40400, 43, 0x1f, 0x924},
+ { 0x404bc, 2, 0x1f, 0x1fff},
+ { 0x404c8, 1, 0x1f, 0x1fff},
+ { 0x404cc, 3, 0x1e, 0x924},
+ { 0x404e0, 1, 0x1c, 0x924},
+ { 0x40500, 2, 0x1f, 0x924},
+ { 0x40510, 2, 0x1f, 0x924},
+ { 0x40520, 2, 0x1f, 0x924},
+ { 0x40530, 2, 0x1f, 0x924},
+ { 0x40540, 2, 0x1f, 0x924},
+ { 0x40550, 10, 0x1c, 0x924},
+ { 0x40610, 2, 0x1c, 0x924},
+ { 0x42000, 164, 0x1f, 0x924},
+ { 0x422b0, 2, 0x1f, 0x1fff},
+ { 0x422bc, 1, 0x1f, 0x1fff},
+ { 0x422c0, 4, 0x1c, 0x924},
+ { 0x422d4, 5, 0x1e, 0x924},
+ { 0x422e8, 1, 0x1c, 0x924},
+ { 0x42400, 49, 0x1f, 0x924},
+ { 0x424c8, 32, 0x1f, 0x924},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x4254c, 1, 0x1f, 0x924},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42554, 1, 0x1f, 0x924},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x4255c, 1, 0x1f, 0x924},
+ { 0x42568, 2, 0x1f, 0x924},
+ { 0x42640, 5, 0x1c, 0x924},
+ { 0x42800, 1, 0x1f, 0x924},
+ { 0x50000, 1, 0x1f, 0x1fff},
+ { 0x50004, 19, 0x1f, 0x924},
+ { 0x50050, 8, 0x1f, 0x93c},
+ { 0x50070, 60, 0x1f, 0x924},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x50180, 20, 0x1f, 0x924},
+ { 0x501e0, 2, 0x1f, 0x1fff},
+ { 0x501ec, 1, 0x1f, 0x1fff},
+ { 0x501f0, 4, 0x1e, 0x924},
+ { 0x50200, 1, 0x1f, 0x924},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x50214, 2, 0x1f, 0x924},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x50220, 2, 0x1f, 0x924},
+ { 0x50228, 6, 0x1e, 0x924},
+ { 0x50240, 1, 0x1f, 0x924},
+ { 0x50280, 1, 0x1f, 0x924},
+ { 0x50300, 1, 0x1c, 0x924},
+ { 0x5030c, 1, 0x1c, 0x924},
+ { 0x50318, 1, 0x1c, 0x934},
+ { 0x5031c, 1, 0x1c, 0x924},
+ { 0x50320, 2, 0x1c, 0x934},
+ { 0x50330, 1, 0x10, 0x924},
+ { 0x52000, 1, 0x1f, 0x924},
+ { 0x54000, 1, 0x1f, 0x93c},
+ { 0x54004, 255, 0x1f, 0x30},
+ { 0x54400, 1, 0x1f, 0x38},
+ { 0x54404, 255, 0x1f, 0x30},
+ { 0x54800, 1, 0x1f, 0x38},
+ { 0x54804, 255, 0x1f, 0x30},
+ { 0x54c00, 1, 0x1f, 0x38},
+ { 0x54c04, 255, 0x1f, 0x30},
+ { 0x55000, 1, 0x1f, 0x38},
+ { 0x55004, 255, 0x1f, 0x30},
+ { 0x55400, 1, 0x1f, 0x38},
+ { 0x55404, 255, 0x1f, 0x30},
+ { 0x55800, 1, 0x1f, 0x38},
+ { 0x55804, 255, 0x1f, 0x30},
+ { 0x55c00, 1, 0x1f, 0x38},
+ { 0x55c04, 255, 0x1f, 0x30},
+ { 0x56000, 1, 0x1f, 0x38},
+ { 0x56004, 255, 0x1f, 0x30},
+ { 0x56400, 1, 0x1f, 0x38},
+ { 0x56404, 255, 0x1f, 0x30},
+ { 0x56800, 1, 0x1f, 0x38},
+ { 0x56804, 255, 0x1f, 0x30},
+ { 0x56c00, 1, 0x1f, 0x38},
+ { 0x56c04, 255, 0x1f, 0x30},
+ { 0x57000, 1, 0x1f, 0x38},
+ { 0x57004, 255, 0x1f, 0x30},
+ { 0x58000, 1, 0x1f, 0x934},
+ { 0x58004, 8191, 0x3, 0x30},
+ { 0x60000, 26, 0x1f, 0x924},
+ { 0x60068, 8, 0x3, 0x924},
+ { 0x60088, 2, 0x1f, 0x924},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x60094, 9, 0x1f, 0x924},
+ { 0x600b8, 9, 0x3, 0x924},
+ { 0x600dc, 1, 0x1f, 0x924},
+ { 0x600e0, 5, 0x3, 0x924},
+ { 0x600f4, 1, 0x7, 0x924},
+ { 0x600f8, 1, 0x3, 0x924},
+ { 0x600fc, 8, 0x1f, 0x924},
+ { 0x6012c, 2, 0x1f, 0x1fff},
+ { 0x60138, 1, 0x1f, 0x1fff},
+ { 0x6013c, 24, 0x2, 0x924},
+ { 0x6019c, 2, 0x1c, 0x924},
+ { 0x601ac, 18, 0x1c, 0x924},
+ { 0x60200, 1, 0x1f, 0xb6d},
+ { 0x60204, 2, 0x1f, 0x249},
+ { 0x60210, 13, 0x1c, 0x924},
+ { 0x60244, 16, 0x10, 0x924},
+ { 0x61000, 1, 0x1f, 0xb6d},
+ { 0x61004, 511, 0x1f, 0x249},
+ { 0x61800, 512, 0x18, 0x249},
+ { 0x70000, 8, 0x1f, 0xb6d},
+ { 0x70020, 8184, 0x1f, 0x249},
+ { 0x78000, 8192, 0x18, 0x249},
+ { 0x85000, 3, 0x1f, 0x1000},
+ { 0x8501c, 7, 0x1f, 0x1000},
+ { 0x85048, 1, 0x1f, 0x1000},
+ { 0x85200, 32, 0x1f, 0x1000},
+ { 0xa0000, 16384, 0x3, 0x1000},
+ { 0xb0000, 16384, 0x2, 0x1000},
+ { 0xc1000, 7, 0x1f, 0x924},
+ { 0xc102c, 2, 0x1f, 0x1fff},
+ { 0xc1038, 1, 0x1f, 0x1fff},
+ { 0xc103c, 2, 0x1c, 0x924},
+ { 0xc1800, 2, 0x1f, 0x924},
+ { 0xc2000, 164, 0x1f, 0x924},
+ { 0xc22b0, 2, 0x1f, 0x1fff},
+ { 0xc22bc, 1, 0x1f, 0x1fff},
+ { 0xc22c0, 5, 0x1c, 0x924},
+ { 0xc22d8, 4, 0x1c, 0x924},
+ { 0xc2400, 49, 0x1f, 0x924},
+ { 0xc24c8, 32, 0x1f, 0x924},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc254c, 1, 0x1f, 0x924},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2554, 1, 0x1f, 0x924},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc255c, 1, 0x1f, 0x924},
+ { 0xc2568, 2, 0x1f, 0x924},
+ { 0xc2600, 1, 0x1f, 0x924},
+ { 0xc4000, 165, 0x1f, 0x924},
+ { 0xc42b4, 2, 0x1f, 0x1fff},
+ { 0xc42c0, 1, 0x1f, 0x1fff},
+ { 0xc42d8, 2, 0x1c, 0x924},
+ { 0xc42e0, 7, 0x1e, 0x924},
+ { 0xc42fc, 1, 0x1c, 0x924},
+ { 0xc4400, 51, 0x1f, 0x924},
+ { 0xc44d0, 32, 0x1f, 0x924},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4554, 1, 0x1f, 0x924},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc455c, 1, 0x1f, 0x924},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xc4564, 1, 0x1f, 0x924},
+ { 0xc4570, 2, 0x1f, 0x924},
+ { 0xc4578, 5, 0x1c, 0x924},
+ { 0xc4600, 1, 0x1f, 0x924},
+ { 0xd0000, 19, 0x1f, 0x924},
+ { 0xd004c, 8, 0x1f, 0x1927},
+ { 0xd006c, 64, 0x1f, 0x924},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd018c, 19, 0x1f, 0x924},
+ { 0xd01e8, 2, 0x1f, 0x1fff},
+ { 0xd01f4, 1, 0x1f, 0x1fff},
+ { 0xd01fc, 1, 0x1c, 0x924},
+ { 0xd0200, 1, 0x1f, 0x924},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xd0218, 4, 0x1f, 0x924},
+ { 0xd0228, 18, 0x1e, 0x924},
+ { 0xd0280, 1, 0x1f, 0x924},
+ { 0xd0300, 1, 0x1f, 0x924},
+ { 0xd0400, 1, 0x1f, 0x924},
+ { 0xd0818, 1, 0x10, 0x924},
+ { 0xd4000, 1, 0x1f, 0x1927},
+ { 0xd4004, 255, 0x1f, 0x6},
+ { 0xd4400, 1, 0x1f, 0x1007},
+ { 0xd4404, 255, 0x1f, 0x6},
+ { 0xd4800, 1, 0x1f, 0x1007},
+ { 0xd4804, 255, 0x1f, 0x6},
+ { 0xd4c00, 1, 0x1f, 0x1007},
+ { 0xd4c04, 255, 0x1f, 0x6},
+ { 0xd5000, 1, 0x1f, 0x1007},
+ { 0xd5004, 255, 0x1f, 0x6},
+ { 0xd5400, 1, 0x1f, 0x1007},
+ { 0xd5404, 255, 0x1f, 0x6},
+ { 0xd5800, 1, 0x1f, 0x1007},
+ { 0xd5804, 255, 0x1f, 0x6},
+ { 0xd5c00, 1, 0x1f, 0x1007},
+ { 0xd5c04, 255, 0x1f, 0x6},
+ { 0xd6000, 1, 0x1f, 0x1007},
+ { 0xd6004, 255, 0x1f, 0x6},
+ { 0xd6400, 1, 0x1f, 0x1007},
+ { 0xd6404, 255, 0x1f, 0x6},
+ { 0xd8000, 1, 0x1f, 0x1927},
+ { 0xd8004, 255, 0x1f, 0x6},
+ { 0xd8400, 1, 0x1f, 0x1007},
+ { 0xd8404, 255, 0x1f, 0x6},
+ { 0xd8800, 1, 0x1f, 0x1007},
+ { 0xd8804, 255, 0x1f, 0x6},
+ { 0xd8c00, 1, 0x1f, 0x1007},
+ { 0xd8c04, 255, 0x1f, 0x6},
+ { 0xd9000, 1, 0x1f, 0x1007},
+ { 0xd9004, 255, 0x1f, 0x6},
+ { 0xd9400, 1, 0x1f, 0x1007},
+ { 0xd9404, 255, 0x1f, 0x6},
+ { 0xd9800, 1, 0x1f, 0x1007},
+ { 0xd9804, 255, 0x1f, 0x6},
+ { 0xd9c00, 1, 0x1f, 0x1007},
+ { 0xd9c04, 255, 0x1f, 0x6},
+ { 0xda000, 1, 0x1f, 0x1007},
+ { 0xda004, 255, 0x1f, 0x6},
+ { 0xda400, 1, 0x1f, 0x1007},
+ { 0xda404, 255, 0x1f, 0x6},
+ { 0xda800, 1, 0x1f, 0x1007},
+ { 0xda804, 255, 0x1f, 0x6},
+ { 0xdac00, 1, 0x1f, 0x1007},
+ { 0xdac04, 255, 0x1f, 0x6},
+ { 0xdb000, 1, 0x1f, 0x1007},
+ { 0xdb004, 255, 0x1f, 0x6},
+ { 0xdb400, 1, 0x1f, 0x1007},
+ { 0xdb404, 255, 0x1f, 0x6},
+ { 0xdb800, 1, 0x1f, 0x1007},
+ { 0xdb804, 255, 0x1f, 0x6},
+ { 0xdbc00, 1, 0x1f, 0x1007},
+ { 0xdbc04, 255, 0x1f, 0x6},
+ { 0xdc000, 1, 0x1f, 0x1007},
+ { 0xdc004, 255, 0x1f, 0x6},
+ { 0xdc400, 1, 0x1f, 0x1007},
+ { 0xdc404, 255, 0x1f, 0x6},
+ { 0xdc800, 1, 0x1f, 0x1007},
+ { 0xdc804, 255, 0x1f, 0x6},
+ { 0xdcc00, 1, 0x1f, 0x1007},
+ { 0xdcc04, 255, 0x1f, 0x6},
+ { 0xdd000, 1, 0x1f, 0x1007},
+ { 0xdd004, 255, 0x1f, 0x6},
+ { 0xdd400, 1, 0x1f, 0x1007},
+ { 0xdd404, 255, 0x1f, 0x6},
+ { 0xdd800, 1, 0x1f, 0x1007},
+ { 0xdd804, 255, 0x1f, 0x6},
+ { 0xddc00, 1, 0x1f, 0x1007},
+ { 0xddc04, 255, 0x1f, 0x6},
+ { 0xde000, 1, 0x1f, 0x1007},
+ { 0xde004, 255, 0x1f, 0x6},
+ { 0xde400, 1, 0x1f, 0x1007},
+ { 0xde404, 255, 0x1f, 0x6},
+ { 0xde800, 1, 0x1f, 0x1007},
+ { 0xde804, 255, 0x1f, 0x6},
+ { 0xdec00, 1, 0x1f, 0x1007},
+ { 0xdec04, 255, 0x1f, 0x6},
+ { 0xdf000, 1, 0x1f, 0x1007},
+ { 0xdf004, 255, 0x1f, 0x6},
+ { 0xdf400, 1, 0x1f, 0x1007},
+ { 0xdf404, 255, 0x1f, 0x6},
+ { 0xdf800, 1, 0x1f, 0x1007},
+ { 0xdf804, 255, 0x1f, 0x6},
+ { 0xdfc00, 1, 0x1f, 0x1007},
+ { 0xdfc04, 255, 0x1f, 0x6},
+ { 0xe0000, 21, 0x1f, 0x924},
+ { 0xe0054, 8, 0x1f, 0xf24},
+ { 0xe0074, 49, 0x1f, 0x924},
+ { 0xe0138, 1, 0x3, 0x924},
+ { 0xe013c, 6, 0x1f, 0x924},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe0174, 21, 0x1f, 0x924},
+ { 0xe01d8, 2, 0x1f, 0x1fff},
+ { 0xe01e4, 1, 0x1f, 0x1fff},
+ { 0xe01f4, 1, 0x4, 0x924},
+ { 0xe01f8, 1, 0x1c, 0x924},
+ { 0xe0200, 1, 0x1f, 0x924},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe0214, 2, 0x1f, 0x924},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0xe0224, 2, 0x1f, 0x924},
+ { 0xe022c, 18, 0x1e, 0x924},
+ { 0xe0280, 1, 0x1f, 0x924},
+ { 0xe0300, 1, 0x1f, 0x924},
+ { 0xe0400, 1, 0x10, 0x924},
+ { 0xe1000, 1, 0x1f, 0x924},
+ { 0xe2000, 1, 0x1f, 0xf24},
+ { 0xe2004, 255, 0x1f, 0xc00},
+ { 0xe2400, 1, 0x1f, 0xe00},
+ { 0xe2404, 255, 0x1f, 0xc00},
+ { 0xe2800, 1, 0x1f, 0xe00},
+ { 0xe2804, 255, 0x1f, 0xc00},
+ { 0xe2c00, 1, 0x1f, 0xe00},
+ { 0xe2c04, 255, 0x1f, 0xc00},
+ { 0xe3000, 1, 0x1f, 0xe00},
+ { 0xe3004, 255, 0x1f, 0xc00},
+ { 0xe3400, 1, 0x1f, 0xe00},
+ { 0xe3404, 255, 0x1f, 0xc00},
+ { 0xe3800, 1, 0x1f, 0xe00},
+ { 0xe3804, 255, 0x1f, 0xc00},
+ { 0xe3c00, 1, 0x1f, 0xe00},
+ { 0xe3c04, 255, 0x1f, 0xc00},
+ { 0xf0000, 1, 0x1f, 0xf24},
+ { 0xf0004, 255, 0x1f, 0xc00},
+ { 0xf0400, 1, 0x1f, 0xe00},
+ { 0xf0404, 255, 0x1f, 0xc00},
+ { 0xf0800, 1, 0x1f, 0xe00},
+ { 0xf0804, 255, 0x1f, 0xc00},
+ { 0xf0c00, 1, 0x1f, 0xe00},
+ { 0xf0c04, 255, 0x1f, 0xc00},
+ { 0xf1000, 1, 0x1f, 0xe00},
+ { 0xf1004, 255, 0x1f, 0xc00},
+ { 0xf1400, 1, 0x1f, 0xe00},
+ { 0xf1404, 255, 0x1f, 0xc00},
+ { 0xf1800, 1, 0x1f, 0xe00},
+ { 0xf1804, 255, 0x1f, 0xc00},
+ { 0xf1c00, 1, 0x1f, 0xe00},
+ { 0xf1c04, 255, 0x1f, 0xc00},
+ { 0xf2000, 1, 0x1f, 0xe00},
+ { 0xf2004, 255, 0x1f, 0xc00},
+ { 0xf2400, 1, 0x1f, 0xe00},
+ { 0xf2404, 255, 0x1f, 0xc00},
+ { 0xf2800, 1, 0x1f, 0xe00},
+ { 0xf2804, 255, 0x1f, 0xc00},
+ { 0xf2c00, 1, 0x1f, 0xe00},
+ { 0xf2c04, 255, 0x1f, 0xc00},
+ { 0xf3000, 1, 0x1f, 0xe00},
+ { 0xf3004, 255, 0x1f, 0xc00},
+ { 0xf3400, 1, 0x1f, 0xe00},
+ { 0xf3404, 255, 0x1f, 0xc00},
+ { 0xf3800, 1, 0x1f, 0xe00},
+ { 0xf3804, 255, 0x1f, 0xc00},
+ { 0xf3c00, 1, 0x1f, 0xe00},
+ { 0xf3c04, 255, 0x1f, 0xc00},
+ { 0xf4000, 1, 0x1f, 0xe00},
+ { 0xf4004, 255, 0x1f, 0xc00},
+ { 0xf4400, 1, 0x1f, 0xe00},
+ { 0xf4404, 255, 0x1f, 0xc00},
+ { 0xf4800, 1, 0x1f, 0xe00},
+ { 0xf4804, 255, 0x1f, 0xc00},
+ { 0xf4c00, 1, 0x1f, 0xe00},
+ { 0xf4c04, 255, 0x1f, 0xc00},
+ { 0xf5000, 1, 0x1f, 0xe00},
+ { 0xf5004, 255, 0x1f, 0xc00},
+ { 0xf5400, 1, 0x1f, 0xe00},
+ { 0xf5404, 255, 0x1f, 0xc00},
+ { 0xf5800, 1, 0x1f, 0xe00},
+ { 0xf5804, 255, 0x1f, 0xc00},
+ { 0xf5c00, 1, 0x1f, 0xe00},
+ { 0xf5c04, 255, 0x1f, 0xc00},
+ { 0xf6000, 1, 0x1f, 0xe00},
+ { 0xf6004, 255, 0x1f, 0xc00},
+ { 0xf6400, 1, 0x1f, 0xe00},
+ { 0xf6404, 255, 0x1f, 0xc00},
+ { 0xf6800, 1, 0x1f, 0xe00},
+ { 0xf6804, 255, 0x1f, 0xc00},
+ { 0xf6c00, 1, 0x1f, 0xe00},
+ { 0xf6c04, 255, 0x1f, 0xc00},
+ { 0xf7000, 1, 0x1f, 0xe00},
+ { 0xf7004, 255, 0x1f, 0xc00},
+ { 0xf7400, 1, 0x1f, 0xe00},
+ { 0xf7404, 255, 0x1f, 0xc00},
+ { 0xf7800, 1, 0x1f, 0xe00},
+ { 0xf7804, 255, 0x1f, 0xc00},
+ { 0xf7c00, 1, 0x1f, 0xe00},
+ { 0xf7c04, 255, 0x1f, 0xc00},
+ { 0xf8000, 1, 0x1f, 0xe00},
+ { 0xf8004, 255, 0x1f, 0xc00},
+ { 0xf8400, 1, 0x1f, 0xe00},
+ { 0xf8404, 255, 0x1f, 0xc00},
+ { 0xf8800, 1, 0x1f, 0xe00},
+ { 0xf8804, 255, 0x1f, 0xc00},
+ { 0xf8c00, 1, 0x1f, 0xe00},
+ { 0xf8c04, 255, 0x1f, 0xc00},
+ { 0xf9000, 1, 0x1f, 0xe00},
+ { 0xf9004, 255, 0x1f, 0xc00},
+ { 0xf9400, 1, 0x1f, 0xe00},
+ { 0xf9404, 255, 0x1f, 0xc00},
+ { 0xf9800, 1, 0x1f, 0xe00},
+ { 0xf9804, 255, 0x1f, 0xc00},
+ { 0xf9c00, 1, 0x1f, 0xe00},
+ { 0xf9c04, 255, 0x1f, 0xc00},
+ { 0xfa000, 1, 0x1f, 0xe00},
+ { 0xfa004, 255, 0x1f, 0xc00},
+ { 0xfa400, 1, 0x1f, 0xe00},
+ { 0xfa404, 255, 0x1f, 0xc00},
+ { 0xfa800, 1, 0x1f, 0xe00},
+ { 0xfa804, 255, 0x1f, 0xc00},
+ { 0xfac00, 1, 0x1f, 0xe00},
+ { 0xfac04, 255, 0x1f, 0xc00},
+ { 0xfb000, 1, 0x1f, 0xe00},
+ { 0xfb004, 255, 0x1f, 0xc00},
+ { 0xfb400, 1, 0x1f, 0xe00},
+ { 0xfb404, 255, 0x1f, 0xc00},
+ { 0xfb800, 1, 0x1f, 0xe00},
+ { 0xfb804, 255, 0x1f, 0xc00},
+ { 0xfbc00, 1, 0x1f, 0xe00},
+ { 0xfbc04, 255, 0x1f, 0xc00},
+ { 0xfc000, 1, 0x1f, 0xe00},
+ { 0xfc004, 255, 0x1f, 0xc00},
+ { 0xfc400, 1, 0x1f, 0xe00},
+ { 0xfc404, 255, 0x1f, 0xc00},
+ { 0xfc800, 1, 0x1f, 0xe00},
+ { 0xfc804, 255, 0x1f, 0xc00},
+ { 0xfcc00, 1, 0x1f, 0xe00},
+ { 0xfcc04, 255, 0x1f, 0xc00},
+ { 0xfd000, 1, 0x1f, 0xe00},
+ { 0xfd004, 255, 0x1f, 0xc00},
+ { 0xfd400, 1, 0x1f, 0xe00},
+ { 0xfd404, 255, 0x1f, 0xc00},
+ { 0xfd800, 1, 0x1f, 0xe00},
+ { 0xfd804, 255, 0x1f, 0xc00},
+ { 0xfdc00, 1, 0x1f, 0xe00},
+ { 0xfdc04, 255, 0x1f, 0xc00},
+ { 0xfe000, 1, 0x1f, 0xe00},
+ { 0xfe004, 255, 0x1f, 0xc00},
+ { 0xfe400, 1, 0x1f, 0xe00},
+ { 0xfe404, 255, 0x1f, 0xc00},
+ { 0xfe800, 1, 0x1f, 0xe00},
+ { 0xfe804, 255, 0x1f, 0xc00},
+ { 0xfec00, 1, 0x1f, 0xe00},
+ { 0xfec04, 255, 0x1f, 0xc00},
+ { 0xff000, 1, 0x1f, 0xe00},
+ { 0xff004, 255, 0x1f, 0xc00},
+ { 0xff400, 1, 0x1f, 0xe00},
+ { 0xff404, 255, 0x1f, 0xc00},
+ { 0xff800, 1, 0x1f, 0xe00},
+ { 0xff804, 255, 0x1f, 0xc00},
+ { 0xffc00, 1, 0x1f, 0xe00},
+ { 0xffc04, 255, 0x1f, 0xc00},
+ { 0x101000, 5, 0x1f, 0x924},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101018, 6, 0x1f, 0x924},
+ { 0x101040, 2, 0x1f, 0x1fff},
+ { 0x10104c, 1, 0x1f, 0x1fff},
+ { 0x101050, 1, 0x1e, 0x924},
+ { 0x101054, 3, 0x1c, 0x924},
+ { 0x101100, 1, 0x1f, 0x924},
+ { 0x101800, 8, 0x1f, 0x924},
+ { 0x102000, 18, 0x1f, 0x924},
+ { 0x102058, 2, 0x1f, 0x1fff},
+ { 0x102064, 1, 0x1f, 0x1fff},
+ { 0x102068, 6, 0x1c, 0x924},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x1020c0, 1, 0x1f, 0x924},
+ { 0x1020c8, 8, 0x2, 0x924},
+ { 0x1020e8, 9, 0x1c, 0x924},
+ { 0x102400, 1, 0x1f, 0x924},
+ { 0x103000, 1, 0x1f, 0x924},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x10300c, 23, 0x1f, 0x924},
+ { 0x103088, 2, 0x1f, 0x1fff},
+ { 0x103094, 1, 0x1f, 0x1fff},
+ { 0x103098, 1, 0x1e, 0x924},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030a4, 2, 0x1e, 0x924},
+ { 0x1030ac, 2, 0x1c, 0x924},
+ { 0x1030b4, 1, 0x4, 0x924},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030c0, 3, 0x1c, 0x924},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030d0, 1, 0x1c, 0x924},
+ { 0x1030d8, 2, 0x1c, 0x924},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x1030e4, 5, 0x1c, 0x924},
+ { 0x103400, 136, 0x1c, 0x1fff},
+ { 0x103800, 8, 0x1f, 0x924},
+ { 0x104000, 1, 0x1f, 0x924},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104008, 4, 0x1f, 0x924},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x10401c, 1, 0x1f, 0x924},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x104024, 6, 0x1f, 0x924},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x104040, 47, 0x1f, 0x924},
+ { 0x10410c, 2, 0x1f, 0x1fff},
+ { 0x104118, 1, 0x1f, 0x1fff},
+ { 0x10411c, 16, 0x1c, 0x924},
+ { 0x104200, 17, 0x1f, 0x924},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104500, 192, 0x1f, 0xdb6},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x104900, 192, 0x1f, 0xdb6},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x105400, 768, 0x1f, 0xdb6},
+ { 0x107000, 7, 0x1c, 0x924},
+ { 0x10701c, 1, 0x18, 0x924},
+ { 0x108000, 33, 0x3, 0x924},
+ { 0x1080ac, 5, 0x2, 0x924},
+ { 0x108100, 5, 0x3, 0x924},
+ { 0x108120, 5, 0x3, 0x924},
+ { 0x108200, 74, 0x3, 0x924},
+ { 0x108400, 74, 0x3, 0x924},
+ { 0x108800, 152, 0x3, 0x924},
+ { 0x110000, 111, 0x1c, 0x924},
+ { 0x1101cc, 2, 0x1c, 0x1fff},
+ { 0x1101d8, 1, 0x1c, 0x1fff},
+ { 0x1101dc, 1, 0x18, 0x924},
+ { 0x110200, 4, 0x1c, 0x924},
+ { 0x120000, 92, 0x1f, 0x924},
+ { 0x120170, 2, 0x3, 0x924},
+ { 0x120178, 14, 0x1f, 0x924},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x1201b8, 93, 0x1f, 0x924},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x120330, 15, 0x1f, 0x924},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120378, 36, 0x1f, 0x924},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120410, 1, 0x1f, 0x924},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120450, 10, 0x1f, 0x924},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x120480, 43, 0x1f, 0x924},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120530, 5, 0x1f, 0x924},
+ { 0x120544, 4, 0x3, 0x924},
+ { 0x120554, 4, 0x1f, 0x924},
+ { 0x120564, 2, 0x1f, 0xfff},
+ { 0x12057c, 2, 0x1f, 0x1fff},
+ { 0x120588, 3, 0x1f, 0x1fff},
+ { 0x120598, 1, 0x1f, 0x1fff},
+ { 0x12059c, 22, 0x1e, 0x924},
+ { 0x1205f4, 1, 0x6, 0x924},
+ { 0x1205f8, 4, 0x1c, 0x924},
+ { 0x120618, 1, 0x1c, 0x924},
+ { 0x12061c, 31, 0x1e, 0x924},
+ { 0x120698, 3, 0x1c, 0x924},
+ { 0x1206a4, 1, 0x4, 0x924},
+ { 0x1206a8, 1, 0x1c, 0x924},
+ { 0x1206b0, 38, 0x1c, 0x924},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x12074c, 11, 0x1c, 0x924},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120780, 23, 0x1c, 0x924},
+ { 0x1207dc, 1, 0x4, 0x924},
+ { 0x1207fc, 1, 0x1c, 0x924},
+ { 0x12080c, 2, 0x1f, 0xfff},
+ { 0x120814, 1, 0x1f, 0x924},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x12081c, 1, 0x1f, 0x924},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120824, 1, 0x1f, 0x924},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x12082c, 1, 0x1f, 0x924},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120834, 1, 0x1f, 0x924},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x12083c, 1, 0x1f, 0x924},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120844, 1, 0x1f, 0x924},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x12084c, 1, 0x1f, 0x924},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120854, 1, 0x1f, 0x924},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x12085c, 1, 0x1f, 0x924},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120864, 1, 0x1f, 0x924},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x12086c, 1, 0x1f, 0x924},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120874, 1, 0x1f, 0x924},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x12087c, 1, 0x1f, 0x924},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120884, 1, 0x1f, 0x924},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x12088c, 1, 0x1f, 0x924},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120894, 1, 0x1f, 0x924},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x12089c, 1, 0x1f, 0x924},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a4, 1, 0x1f, 0x924},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208ac, 1, 0x1f, 0x924},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b4, 1, 0x1f, 0x924},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208bc, 1, 0x1f, 0x924},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c4, 1, 0x1f, 0x924},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208cc, 1, 0x1f, 0x924},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d4, 1, 0x1f, 0x924},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208dc, 1, 0x1f, 0x924},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e4, 1, 0x1f, 0x924},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208ec, 1, 0x1f, 0x924},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f4, 1, 0x1f, 0x924},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x1208fc, 1, 0x1f, 0x924},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120904, 1, 0x1f, 0x924},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x12090c, 1, 0x1f, 0x924},
+ { 0x120910, 7, 0x1c, 0x924},
+ { 0x120930, 9, 0x1c, 0x924},
+ { 0x12095c, 37, 0x18, 0x924},
+ { 0x120a00, 2, 0x7, 0x924},
+ { 0x120b00, 1, 0x18, 0x924},
+ { 0x122000, 2, 0x1f, 0x924},
+ { 0x122008, 2046, 0x1, 0x924},
+ { 0x128000, 6144, 0x1e, 0x924},
+ { 0x130000, 1, 0x1c, 0x1fff},
+ { 0x130004, 11, 0x1c, 0x924},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x130034, 6, 0x1c, 0x924},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130058, 3, 0x1c, 0x924},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13006c, 8, 0x1c, 0x924},
+ { 0x13009c, 2, 0x1c, 0x1fff},
+ { 0x1300a8, 1, 0x1c, 0x1fff},
+ { 0x130100, 12, 0x1c, 0x924},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x130134, 14, 0x1c, 0x924},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130170, 1, 0x1c, 0x924},
+ { 0x130180, 1, 0x1c, 0x924},
+ { 0x130200, 1, 0x1c, 0x924},
+ { 0x130280, 1, 0x1c, 0x924},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130304, 4, 0x1c, 0x924},
+ { 0x130380, 1, 0x1c, 0x924},
+ { 0x130400, 1, 0x1c, 0x924},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x130484, 4, 0x1c, 0x924},
+ { 0x130800, 72, 0x1c, 0x924},
+ { 0x131000, 136, 0x1c, 0x924},
+ { 0x132000, 148, 0x1c, 0x924},
+ { 0x134000, 544, 0x1c, 0x924},
+ { 0x140000, 1, 0x1f, 0x924},
+ { 0x140004, 9, 0xf, 0x924},
+ { 0x140028, 8, 0x1f, 0x924},
+ { 0x140048, 5, 0xf, 0x924},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x140064, 3, 0xf, 0x924},
+ { 0x140070, 1, 0x1f, 0x924},
+ { 0x140074, 10, 0xf, 0x924},
+ { 0x14009c, 1, 0x1f, 0x924},
+ { 0x1400a0, 5, 0xf, 0x924},
+ { 0x1400b4, 7, 0x1f, 0x924},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400d8, 2, 0xf, 0x924},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1400e4, 5, 0xf, 0x924},
+ { 0x1400f8, 2, 0x1f, 0x924},
+ { 0x140100, 5, 0x3, 0x924},
+ { 0x140114, 5, 0xf, 0x924},
+ { 0x140128, 7, 0x1f, 0x924},
+ { 0x140144, 9, 0xf, 0x924},
+ { 0x140168, 8, 0x1f, 0x924},
+ { 0x140188, 3, 0xf, 0x924},
+ { 0x140194, 13, 0x1f, 0x924},
+ { 0x1401d8, 2, 0x1f, 0x1fff},
+ { 0x1401e4, 1, 0x1f, 0x1fff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x1402e0, 2, 0xc, 0x924},
+ { 0x1402e8, 2, 0x1c, 0x924},
+ { 0x1402f0, 9, 0xc, 0x924},
+ { 0x140314, 9, 0x10, 0x924},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140354, 7, 0x10, 0x924},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x14038c, 14, 0x10, 0x924},
+ { 0x1404b0, 14, 0x10, 0x924},
+ { 0x15c000, 2, 0x1e, 0x924},
+ { 0x15c008, 5, 0x2, 0x924},
+ { 0x15c020, 8, 0x1c, 0x924},
+ { 0x15c040, 1, 0xc, 0x924},
+ { 0x15c044, 2, 0x1c, 0x924},
+ { 0x15c04c, 8, 0xc, 0x924},
+ { 0x15c06c, 8, 0x1c, 0x924},
+ { 0x15c090, 13, 0x1c, 0x924},
+ { 0x15c0c8, 24, 0x1c, 0x924},
+ { 0x15c128, 2, 0xc, 0x924},
+ { 0x15c130, 1, 0x1c, 0x924},
+ { 0x15c138, 6, 0x1c, 0x924},
+ { 0x15c150, 2, 0x18, 0x924},
+ { 0x15c158, 2, 0x8, 0x924},
+ { 0x15c160, 23, 0x10, 0x924},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c1d4, 23, 0x10, 0x924},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x15c24c, 90, 0x10, 0x924},
+ { 0x160004, 6, 0x18, 0x924},
+ { 0x16003c, 1, 0x10, 0x924},
+ { 0x160040, 6, 0x18, 0x924},
+ { 0x16005c, 6, 0x18, 0x924},
+ { 0x160074, 1, 0x10, 0x924},
+ { 0x160078, 2, 0x18, 0x924},
+ { 0x160300, 8, 0x18, 0x924},
+ { 0x160330, 6, 0x18, 0x924},
+ { 0x160404, 6, 0x18, 0x924},
+ { 0x16043c, 1, 0x10, 0x924},
+ { 0x160440, 6, 0x18, 0x924},
+ { 0x16045c, 6, 0x18, 0x924},
+ { 0x160474, 1, 0x10, 0x924},
+ { 0x160478, 2, 0x18, 0x924},
+ { 0x160700, 8, 0x18, 0x924},
+ { 0x160730, 6, 0x18, 0x924},
+ { 0x161000, 7, 0x1f, 0x924},
+ { 0x16102c, 2, 0x1f, 0x1fff},
+ { 0x161038, 1, 0x1f, 0x1fff},
+ { 0x16103c, 2, 0x1c, 0x924},
+ { 0x161800, 2, 0x1f, 0x924},
+ { 0x162000, 54, 0x18, 0x924},
+ { 0x162200, 60, 0x18, 0x924},
+ { 0x162400, 54, 0x18, 0x924},
+ { 0x162600, 60, 0x18, 0x924},
+ { 0x162800, 54, 0x18, 0x924},
+ { 0x162a00, 60, 0x18, 0x924},
+ { 0x162c00, 54, 0x18, 0x924},
+ { 0x162e00, 60, 0x18, 0x924},
+ { 0x163000, 1, 0x18, 0x924},
+ { 0x163008, 1, 0x18, 0x924},
+ { 0x163010, 1, 0x18, 0x924},
+ { 0x163018, 1, 0x18, 0x924},
+ { 0x163020, 5, 0x18, 0x924},
+ { 0x163038, 3, 0x18, 0x924},
+ { 0x163048, 3, 0x18, 0x924},
+ { 0x163058, 1, 0x18, 0x924},
+ { 0x163060, 1, 0x18, 0x924},
+ { 0x163068, 1, 0x18, 0x924},
+ { 0x163070, 3, 0x18, 0x924},
+ { 0x163080, 1, 0x18, 0x924},
+ { 0x163088, 3, 0x18, 0x924},
+ { 0x163098, 1, 0x18, 0x924},
+ { 0x1630a0, 1, 0x18, 0x924},
+ { 0x1630a8, 1, 0x18, 0x924},
+ { 0x1630b0, 2, 0x10, 0x924},
+ { 0x1630c0, 1, 0x18, 0x924},
+ { 0x1630c8, 1, 0x18, 0x924},
+ { 0x1630d0, 1, 0x18, 0x924},
+ { 0x1630d8, 1, 0x18, 0x924},
+ { 0x1630e0, 2, 0x18, 0x924},
+ { 0x163110, 1, 0x18, 0x924},
+ { 0x163120, 2, 0x18, 0x924},
+ { 0x163420, 4, 0x18, 0x924},
+ { 0x163438, 2, 0x18, 0x924},
+ { 0x163488, 2, 0x18, 0x924},
+ { 0x163520, 2, 0x18, 0x924},
+ { 0x163800, 1, 0x18, 0x924},
+ { 0x163808, 1, 0x18, 0x924},
+ { 0x163810, 1, 0x18, 0x924},
+ { 0x163818, 1, 0x18, 0x924},
+ { 0x163820, 5, 0x18, 0x924},
+ { 0x163838, 3, 0x18, 0x924},
+ { 0x163848, 3, 0x18, 0x924},
+ { 0x163858, 1, 0x18, 0x924},
+ { 0x163860, 1, 0x18, 0x924},
+ { 0x163868, 1, 0x18, 0x924},
+ { 0x163870, 3, 0x18, 0x924},
+ { 0x163880, 1, 0x18, 0x924},
+ { 0x163888, 3, 0x18, 0x924},
+ { 0x163898, 1, 0x18, 0x924},
+ { 0x1638a0, 1, 0x18, 0x924},
+ { 0x1638a8, 1, 0x18, 0x924},
+ { 0x1638b0, 2, 0x10, 0x924},
+ { 0x1638c0, 1, 0x18, 0x924},
+ { 0x1638c8, 1, 0x18, 0x924},
+ { 0x1638d0, 1, 0x18, 0x924},
+ { 0x1638d8, 1, 0x18, 0x924},
+ { 0x1638e0, 2, 0x18, 0x924},
+ { 0x163910, 1, 0x18, 0x924},
+ { 0x163920, 2, 0x18, 0x924},
+ { 0x163c20, 4, 0x18, 0x924},
+ { 0x163c38, 2, 0x18, 0x924},
+ { 0x163c88, 2, 0x18, 0x924},
+ { 0x163d20, 2, 0x18, 0x924},
+ { 0x164000, 5, 0x1f, 0x924},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x16401c, 53, 0x1f, 0x924},
+ { 0x164100, 2, 0x1f, 0x1fff},
+ { 0x16410c, 1, 0x1f, 0x1fff},
+ { 0x164110, 2, 0x1e, 0x924},
+ { 0x164118, 15, 0x1c, 0x924},
+ { 0x164200, 1, 0x1f, 0x924},
+ { 0x164208, 1, 0x1f, 0x924},
+ { 0x164210, 1, 0x1f, 0x924},
+ { 0x164218, 1, 0x1f, 0x924},
+ { 0x164220, 1, 0x1f, 0x924},
+ { 0x164228, 1, 0x1f, 0x924},
+ { 0x164230, 1, 0x1f, 0x924},
+ { 0x164238, 1, 0x1f, 0x924},
+ { 0x164240, 1, 0x1f, 0x924},
+ { 0x164248, 1, 0x1f, 0x924},
+ { 0x164250, 1, 0x1f, 0x924},
+ { 0x164258, 1, 0x1f, 0x924},
+ { 0x164260, 1, 0x1f, 0x924},
+ { 0x164270, 2, 0x1f, 0x924},
+ { 0x164280, 2, 0x1f, 0x924},
+ { 0x164800, 2, 0x1f, 0x924},
+ { 0x165000, 2, 0x1f, 0x924},
+ { 0x166000, 164, 0x1f, 0x924},
+ { 0x1662b0, 2, 0x1f, 0x1fff},
+ { 0x1662bc, 1, 0x1f, 0x1fff},
+ { 0x1662cc, 7, 0x1c, 0x924},
+ { 0x166400, 49, 0x1f, 0x924},
+ { 0x1664c8, 32, 0x1f, 0x924},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x16654c, 1, 0x1f, 0x924},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166554, 1, 0x1f, 0x924},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x16655c, 1, 0x1f, 0x924},
+ { 0x166568, 2, 0x1f, 0x924},
+ { 0x166570, 5, 0x1c, 0x924},
+ { 0x166800, 1, 0x1f, 0x924},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168004, 1, 0x1f, 0x924},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x16800c, 1, 0x1f, 0x924},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168014, 1, 0x1f, 0x924},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x16801c, 3, 0x1f, 0x924},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168030, 10, 0x1f, 0x924},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x16807c, 106, 0x1f, 0x924},
+ { 0x168224, 2, 0x3, 0x924},
+ { 0x16822c, 3, 0x1f, 0x924},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x16823c, 25, 0x1f, 0x924},
+ { 0x1682a0, 12, 0x3, 0x924},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x1682ec, 5, 0x1f, 0x924},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x16840c, 1, 0x1f, 0x924},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168418, 2, 0x3, 0x924},
+ { 0x168420, 6, 0x1f, 0x924},
+ { 0x168448, 2, 0x1f, 0x1fff},
+ { 0x168454, 1, 0x1f, 0x1fff},
+ { 0x168800, 19, 0x1f, 0x924},
+ { 0x168900, 1, 0x1f, 0x924},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16a000, 1536, 0x1f, 0x924},
+ { 0x16c000, 1536, 0x1f, 0x924},
+ { 0x16e000, 16, 0x2, 0x924},
+ { 0x16e040, 8, 0x1c, 0x924},
+ { 0x16e100, 1, 0x2, 0x924},
+ { 0x16e200, 2, 0x2, 0xfff},
+ { 0x16e400, 1, 0x2, 0x924},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e40c, 94, 0x2, 0x924},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e69c, 8, 0x2, 0x924},
+ { 0x16e6bc, 4, 0x1e, 0x924},
+ { 0x16e6cc, 4, 0x2, 0x924},
+ { 0x16e6e0, 2, 0x1c, 0x924},
+ { 0x16e6e8, 5, 0xc, 0x924},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e70c, 1, 0x1c, 0x924},
+ { 0x16e768, 17, 0x1c, 0x924},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x170000, 24, 0x1f, 0x924},
+ { 0x170060, 4, 0x3, 0x924},
+ { 0x170070, 13, 0x1f, 0x924},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700a8, 1, 0x1f, 0x924},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700b4, 3, 0x1f, 0x924},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x1700c4, 44, 0x1f, 0x924},
+ { 0x170184, 2, 0x1f, 0x1fff},
+ { 0x170190, 1, 0x1f, 0x1fff},
+ { 0x170194, 11, 0x1c, 0x924},
+ { 0x1701c4, 1, 0x1c, 0x924},
+ { 0x1701cc, 7, 0x1c, 0x924},
+ { 0x1701e8, 1, 0x18, 0x924},
+ { 0x1701ec, 1, 0x1c, 0x924},
+ { 0x1701f4, 1, 0x1c, 0x924},
+ { 0x170200, 4, 0x1f, 0x924},
+ { 0x170214, 1, 0x1f, 0x924},
+ { 0x170218, 77, 0x1c, 0x924},
+ { 0x170400, 64, 0x1c, 0x924},
+ { 0x178000, 1, 0x1f, 0x924},
+ { 0x180000, 61, 0x1f, 0x924},
+ { 0x180114, 2, 0x1f, 0x1fff},
+ { 0x180120, 3, 0x1f, 0x1fff},
+ { 0x180130, 1, 0x1f, 0x1fff},
+ { 0x18013c, 2, 0x1e, 0x924},
+ { 0x180200, 27, 0x1f, 0x924},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x180270, 12, 0x1f, 0x924},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1802a4, 17, 0x1f, 0x924},
+ { 0x180340, 4, 0x1f, 0x924},
+ { 0x180380, 1, 0x1c, 0x924},
+ { 0x180388, 1, 0x1c, 0x924},
+ { 0x180390, 1, 0x1c, 0x924},
+ { 0x180398, 1, 0x1c, 0x924},
+ { 0x1803a0, 5, 0x1c, 0x924},
+ { 0x1803b4, 2, 0x18, 0x924},
+ { 0x180400, 256, 0x3, 0xfff},
+ { 0x181000, 4, 0x1f, 0x93c},
+ { 0x181010, 1020, 0x1f, 0x38},
+ { 0x182000, 4, 0x18, 0x924},
+ { 0x1a0000, 1, 0x1f, 0x92c},
+ { 0x1a0004, 5631, 0x1f, 0x8},
+ { 0x1a5800, 2560, 0x1e, 0x8},
+ { 0x1a8000, 1, 0x1f, 0x92c},
+ { 0x1a8004, 8191, 0x1e, 0x8},
+ { 0x1b0000, 1, 0x1f, 0x92c},
+ { 0x1b0004, 15, 0x2, 0x8},
+ { 0x1b0040, 1, 0x1e, 0x92c},
+ { 0x1b0044, 239, 0x2, 0x8},
+ { 0x1b0400, 1, 0x1f, 0x92c},
+ { 0x1b0404, 255, 0x2, 0x8},
+ { 0x1b0800, 1, 0x1f, 0x924},
+ { 0x1b0840, 1, 0x1e, 0x924},
+ { 0x1b0c00, 1, 0x1f, 0x1fff},
+ { 0x1b1000, 1, 0x1f, 0x1fff},
+ { 0x1b1040, 1, 0x1e, 0x1fff},
+ { 0x1b1400, 1, 0x1f, 0x924},
+ { 0x1b1440, 1, 0x1e, 0x924},
+ { 0x1b1480, 1, 0x1e, 0x924},
+ { 0x1b14c0, 1, 0x1e, 0x924},
+ { 0x1b1800, 128, 0x1f, 0x10},
+ { 0x1b1c00, 128, 0x1f, 0x10},
+ { 0x1b2000, 1, 0x1f, 0xdb6},
+ { 0x1b2400, 1, 0x1e, 0x92c},
+ { 0x1b2404, 5631, 0x1c, 0x8},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x1b8100, 1, 0x1f, 0x924},
+ { 0x1b8140, 1, 0x1f, 0x924},
+ { 0x1b8180, 1, 0x1f, 0x924},
+ { 0x1b81c0, 1, 0x1f, 0x924},
+ { 0x1b8200, 1, 0x1f, 0x924},
+ { 0x1b8240, 1, 0x1f, 0x924},
+ { 0x1b8280, 1, 0x1f, 0x924},
+ { 0x1b82c0, 1, 0x1f, 0x924},
+ { 0x1b8300, 1, 0x1f, 0x924},
+ { 0x1b8340, 1, 0x1f, 0x924},
+ { 0x1b8380, 1, 0x1f, 0x924},
+ { 0x1b83c0, 1, 0x1f, 0x924},
+ { 0x1b8400, 1, 0x1f, 0x924},
+ { 0x1b8440, 1, 0x1f, 0x924},
+ { 0x1b8480, 1, 0x1f, 0x924},
+ { 0x1b84c0, 1, 0x1f, 0x924},
+ { 0x1b8500, 1, 0x1f, 0x924},
+ { 0x1b8540, 1, 0x1f, 0x924},
+ { 0x1b8580, 1, 0x1f, 0x924},
+ { 0x1b85c0, 19, 0x1c, 0x924},
+ { 0x1b8800, 1, 0x1f, 0x924},
+ { 0x1b8840, 1, 0x1f, 0x924},
+ { 0x1b8880, 1, 0x1f, 0x924},
+ { 0x1b88c0, 1, 0x1f, 0x924},
+ { 0x1b8900, 1, 0x1f, 0x924},
+ { 0x1b8940, 1, 0x1f, 0x924},
+ { 0x1b8980, 1, 0x1f, 0x924},
+ { 0x1b89c0, 1, 0x1f, 0x924},
+ { 0x1b8a00, 1, 0x1f, 0x934},
+ { 0x1b8a40, 1, 0x1f, 0x924},
+ { 0x1b8a80, 1, 0x1f, 0x492},
+ { 0x1b8ac0, 1, 0x1f, 0x924},
+ { 0x1b8b00, 1, 0x1f, 0x924},
+ { 0x1b8b40, 1, 0x1f, 0x924},
+ { 0x1b8b80, 1, 0x1f, 0x924},
+ { 0x1b8bc0, 1, 0x1f, 0x924},
+ { 0x1b8c00, 1, 0x1f, 0x924},
+ { 0x1b8c40, 1, 0x1f, 0x924},
+ { 0x1b8c80, 1, 0x1f, 0x924},
+ { 0x1b8cc0, 1, 0x1f, 0x924},
+ { 0x1b8cc4, 1, 0x1c, 0x924},
+ { 0x1b8d00, 1, 0x1f, 0x924},
+ { 0x1b8d40, 1, 0x1f, 0x924},
+ { 0x1b8d80, 1, 0x1f, 0x924},
+ { 0x1b8dc0, 1, 0x1f, 0x924},
+ { 0x1b8e00, 1, 0x1f, 0x924},
+ { 0x1b8e40, 1, 0x1f, 0x924},
+ { 0x1b8e80, 1, 0x1f, 0x924},
+ { 0x1b8e84, 1, 0x1c, 0x924},
+ { 0x1b8ec0, 1, 0x1e, 0x924},
+ { 0x1b8f00, 1, 0x1e, 0x924},
+ { 0x1b8f40, 1, 0x1e, 0x924},
+ { 0x1b8f80, 1, 0x1e, 0x924},
+ { 0x1b8fc0, 1, 0x1e, 0x924},
+ { 0x1b8fd4, 5, 0x1c, 0x924},
+ { 0x1b8fe8, 2, 0x18, 0x924},
+ { 0x1b9000, 1, 0x1c, 0x924},
+ { 0x1b9040, 3, 0x1c, 0x924},
+ { 0x1b905c, 1, 0x18, 0x924},
+ { 0x1b9064, 1, 0x10, 0x924},
+ { 0x1b9080, 10, 0x10, 0x924},
+ { 0x1c0000, 2, 0x1f, 0x924},
+ { 0x200000, 65, 0x1f, 0x924},
+ { 0x200124, 2, 0x1f, 0x1fff},
+ { 0x200130, 3, 0x1f, 0x1fff},
+ { 0x200140, 1, 0x1f, 0x1fff},
+ { 0x20014c, 2, 0x1e, 0x924},
+ { 0x200200, 27, 0x1f, 0x924},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x200270, 12, 0x1f, 0x924},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x2002a4, 17, 0x1f, 0x924},
+ { 0x200340, 4, 0x1f, 0x924},
+ { 0x200380, 1, 0x1c, 0x924},
+ { 0x200388, 1, 0x1c, 0x924},
+ { 0x200390, 1, 0x1c, 0x924},
+ { 0x200398, 1, 0x1c, 0x924},
+ { 0x2003a0, 1, 0x1c, 0x924},
+ { 0x2003a8, 2, 0x1c, 0x924},
+ { 0x200400, 256, 0x3, 0xfff},
+ { 0x202000, 4, 0x1f, 0x1927},
+ { 0x202010, 2044, 0x1f, 0x1007},
+ { 0x204000, 4, 0x18, 0x924},
+ { 0x220000, 1, 0x1f, 0x925},
+ { 0x220004, 5631, 0x1f, 0x1},
+ { 0x225800, 2560, 0x1e, 0x1},
+ { 0x228000, 1, 0x1f, 0x925},
+ { 0x228004, 8191, 0x1e, 0x1},
+ { 0x230000, 1, 0x1f, 0x925},
+ { 0x230004, 15, 0x2, 0x1},
+ { 0x230040, 1, 0x1e, 0x925},
+ { 0x230044, 239, 0x2, 0x1},
+ { 0x230400, 1, 0x1f, 0x925},
+ { 0x230404, 255, 0x2, 0x1},
+ { 0x230800, 1, 0x1f, 0x924},
+ { 0x230840, 1, 0x1e, 0x924},
+ { 0x230c00, 1, 0x1f, 0x924},
+ { 0x231000, 1, 0x1f, 0x924},
+ { 0x231040, 1, 0x1e, 0x924},
+ { 0x231400, 1, 0x1f, 0x924},
+ { 0x231440, 1, 0x1e, 0x924},
+ { 0x231480, 1, 0x1e, 0x924},
+ { 0x2314c0, 1, 0x1e, 0x924},
+ { 0x231800, 128, 0x1f, 0x2},
+ { 0x231c00, 128, 0x1f, 0x2},
+ { 0x232000, 1, 0x1f, 0xdb6},
+ { 0x232400, 1, 0x1e, 0x925},
+ { 0x232404, 5631, 0x1c, 0x1},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x238100, 1, 0x1f, 0x924},
+ { 0x238140, 1, 0x1f, 0x924},
+ { 0x238180, 1, 0x1f, 0x924},
+ { 0x2381c0, 1, 0x1f, 0x924},
+ { 0x238200, 1, 0x1f, 0x924},
+ { 0x238240, 1, 0x1f, 0x924},
+ { 0x238280, 1, 0x1f, 0x924},
+ { 0x2382c0, 1, 0x1f, 0x924},
+ { 0x238300, 1, 0x1f, 0x924},
+ { 0x238340, 1, 0x1f, 0x924},
+ { 0x238380, 1, 0x1f, 0x924},
+ { 0x2383c0, 1, 0x1f, 0x924},
+ { 0x238400, 1, 0x1f, 0x924},
+ { 0x238440, 1, 0x1f, 0x924},
+ { 0x238480, 1, 0x1f, 0x924},
+ { 0x2384c0, 1, 0x1f, 0x924},
+ { 0x238500, 1, 0x1f, 0x924},
+ { 0x238540, 1, 0x1f, 0x924},
+ { 0x238580, 1, 0x1f, 0x924},
+ { 0x2385c0, 19, 0x1c, 0x924},
+ { 0x238800, 1, 0x1f, 0x924},
+ { 0x238840, 1, 0x1f, 0x924},
+ { 0x238880, 1, 0x1f, 0x924},
+ { 0x2388c0, 1, 0x1f, 0x924},
+ { 0x238900, 1, 0x1f, 0x924},
+ { 0x238940, 1, 0x1f, 0x924},
+ { 0x238980, 1, 0x1f, 0x924},
+ { 0x2389c0, 1, 0x1f, 0x924},
+ { 0x238a00, 1, 0x1f, 0x926},
+ { 0x238a40, 1, 0x1f, 0x924},
+ { 0x238a80, 1, 0x1f, 0x492},
+ { 0x238ac0, 1, 0x1f, 0x924},
+ { 0x238b00, 1, 0x1f, 0x924},
+ { 0x238b40, 1, 0x1f, 0x924},
+ { 0x238b80, 1, 0x1f, 0x924},
+ { 0x238bc0, 1, 0x1f, 0x924},
+ { 0x238c00, 1, 0x1f, 0x924},
+ { 0x238c40, 1, 0x1f, 0x924},
+ { 0x238c80, 1, 0x1f, 0x924},
+ { 0x238cc0, 1, 0x1f, 0x924},
+ { 0x238cc4, 1, 0x1c, 0x924},
+ { 0x238d00, 1, 0x1f, 0x924},
+ { 0x238d40, 1, 0x1f, 0x924},
+ { 0x238d80, 1, 0x1f, 0x924},
+ { 0x238dc0, 1, 0x1f, 0x924},
+ { 0x238e00, 1, 0x1f, 0x924},
+ { 0x238e40, 1, 0x1f, 0x924},
+ { 0x238e80, 1, 0x1f, 0x924},
+ { 0x238e84, 1, 0x1c, 0x924},
+ { 0x238ec0, 1, 0x1e, 0x924},
+ { 0x238f00, 1, 0x1e, 0x924},
+ { 0x238f40, 1, 0x1e, 0x924},
+ { 0x238f80, 1, 0x1e, 0x924},
+ { 0x238fc0, 1, 0x1e, 0x924},
+ { 0x238fd4, 5, 0x1c, 0x924},
+ { 0x238fe8, 2, 0x18, 0x924},
+ { 0x239000, 1, 0x1c, 0x924},
+ { 0x239040, 3, 0x1c, 0x924},
+ { 0x23905c, 1, 0x18, 0x924},
+ { 0x239064, 1, 0x10, 0x924},
+ { 0x239080, 10, 0x10, 0x924},
+ { 0x240000, 2, 0x1f, 0x924},
+ { 0x280000, 65, 0x1f, 0x924},
+ { 0x280124, 2, 0x1f, 0x1fff},
+ { 0x280130, 3, 0x1f, 0x1fff},
+ { 0x280140, 1, 0x1f, 0x1fff},
+ { 0x28014c, 2, 0x1e, 0x924},
+ { 0x280200, 27, 0x1f, 0x924},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x280270, 12, 0x1f, 0x924},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2802a4, 17, 0x1f, 0x924},
+ { 0x280340, 4, 0x1f, 0x924},
+ { 0x280380, 1, 0x1c, 0x924},
+ { 0x280388, 1, 0x1c, 0x924},
+ { 0x280390, 1, 0x1c, 0x924},
+ { 0x280398, 1, 0x1c, 0x924},
+ { 0x2803a0, 1, 0x1c, 0x924},
+ { 0x2803a8, 2, 0x1c, 0x924},
+ { 0x280400, 256, 0x3, 0xfff},
+ { 0x282000, 4, 0x1f, 0x9e4},
+ { 0x282010, 2044, 0x1f, 0x1c0},
+ { 0x284000, 4, 0x18, 0x924},
+ { 0x2a0000, 1, 0x1f, 0x964},
+ { 0x2a0004, 5631, 0x1f, 0x40},
+ { 0x2a5800, 2560, 0x1e, 0x40},
+ { 0x2a8000, 1, 0x1f, 0x964},
+ { 0x2a8004, 8191, 0x1e, 0x40},
+ { 0x2b0000, 1, 0x1f, 0x964},
+ { 0x2b0004, 15, 0x2, 0x40},
+ { 0x2b0040, 1, 0x1e, 0x964},
+ { 0x2b0044, 239, 0x2, 0x40},
+ { 0x2b0400, 1, 0x1f, 0x964},
+ { 0x2b0404, 255, 0x2, 0x40},
+ { 0x2b0800, 1, 0x1f, 0x924},
+ { 0x2b0840, 1, 0x1e, 0x924},
+ { 0x2b0c00, 1, 0x1f, 0x924},
+ { 0x2b1000, 1, 0x1f, 0x924},
+ { 0x2b1040, 1, 0x1e, 0x924},
+ { 0x2b1400, 1, 0x1f, 0x924},
+ { 0x2b1440, 1, 0x1e, 0x924},
+ { 0x2b1480, 1, 0x1e, 0x924},
+ { 0x2b14c0, 1, 0x1e, 0x924},
+ { 0x2b1800, 128, 0x1f, 0x80},
+ { 0x2b1c00, 128, 0x1f, 0x80},
+ { 0x2b2000, 1, 0x1f, 0xdb6},
+ { 0x2b2400, 1, 0x1e, 0x964},
+ { 0x2b2404, 5631, 0x1c, 0x40},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x2b80c0, 1, 0x1f, 0x924},
+ { 0x2b8100, 1, 0x1f, 0x924},
+ { 0x2b8140, 1, 0x1f, 0x924},
+ { 0x2b8180, 1, 0x1f, 0x924},
+ { 0x2b81c0, 1, 0x1f, 0x924},
+ { 0x2b8200, 1, 0x1f, 0x924},
+ { 0x2b8240, 1, 0x1f, 0x924},
+ { 0x2b8280, 1, 0x1f, 0x924},
+ { 0x2b82c0, 1, 0x1f, 0x924},
+ { 0x2b8300, 1, 0x1f, 0x924},
+ { 0x2b8340, 1, 0x1f, 0x924},
+ { 0x2b8380, 1, 0x1f, 0x924},
+ { 0x2b83c0, 1, 0x1f, 0x924},
+ { 0x2b8400, 1, 0x1f, 0x924},
+ { 0x2b8440, 1, 0x1f, 0x924},
+ { 0x2b8480, 1, 0x1f, 0x924},
+ { 0x2b84c0, 1, 0x1f, 0x924},
+ { 0x2b8500, 1, 0x1f, 0x924},
+ { 0x2b8540, 1, 0x1f, 0x924},
+ { 0x2b8580, 1, 0x1f, 0x924},
+ { 0x2b85c0, 19, 0x1c, 0x924},
+ { 0x2b8800, 1, 0x1f, 0x924},
+ { 0x2b8840, 1, 0x1f, 0x924},
+ { 0x2b8880, 1, 0x1f, 0x924},
+ { 0x2b88c0, 1, 0x1f, 0x924},
+ { 0x2b8900, 1, 0x1f, 0x924},
+ { 0x2b8940, 1, 0x1f, 0x924},
+ { 0x2b8980, 1, 0x1f, 0x924},
+ { 0x2b89c0, 1, 0x1f, 0x924},
+ { 0x2b8a00, 1, 0x1f, 0x9a4},
+ { 0x2b8a40, 1, 0x1f, 0x924},
+ { 0x2b8a80, 1, 0x1f, 0x492},
+ { 0x2b8ac0, 1, 0x1f, 0x924},
+ { 0x2b8b00, 1, 0x1f, 0x924},
+ { 0x2b8b40, 1, 0x1f, 0x924},
+ { 0x2b8b80, 1, 0x1f, 0x924},
+ { 0x2b8bc0, 1, 0x1f, 0x924},
+ { 0x2b8c00, 1, 0x1f, 0x924},
+ { 0x2b8c40, 1, 0x1f, 0x924},
+ { 0x2b8c80, 1, 0x1f, 0x924},
+ { 0x2b8cc0, 1, 0x1f, 0x924},
+ { 0x2b8cc4, 1, 0x1c, 0x924},
+ { 0x2b8d00, 1, 0x1f, 0x924},
+ { 0x2b8d40, 1, 0x1f, 0x924},
+ { 0x2b8d80, 1, 0x1f, 0x924},
+ { 0x2b8dc0, 1, 0x1f, 0x924},
+ { 0x2b8e00, 1, 0x1f, 0x924},
+ { 0x2b8e40, 1, 0x1f, 0x924},
+ { 0x2b8e80, 1, 0x1f, 0x924},
+ { 0x2b8e84, 1, 0x1c, 0x924},
+ { 0x2b8ec0, 1, 0x1e, 0x924},
+ { 0x2b8f00, 1, 0x1e, 0x924},
+ { 0x2b8f40, 1, 0x1e, 0x924},
+ { 0x2b8f80, 1, 0x1e, 0x924},
+ { 0x2b8fc0, 1, 0x1e, 0x924},
+ { 0x2b8fd4, 5, 0x1c, 0x924},
+ { 0x2b8fe8, 2, 0x18, 0x924},
+ { 0x2b9000, 1, 0x1c, 0x924},
+ { 0x2b9040, 3, 0x1c, 0x924},
+ { 0x2b905c, 1, 0x18, 0x924},
+ { 0x2b9064, 1, 0x10, 0x924},
+ { 0x2b9080, 10, 0x10, 0x924},
+ { 0x2c0000, 2, 0x1f, 0x1fff},
+ { 0x300000, 65, 0x1f, 0x924},
+ { 0x300124, 2, 0x1f, 0x1fff},
+ { 0x300130, 3, 0x1f, 0x1fff},
+ { 0x300140, 1, 0x1f, 0x1fff},
+ { 0x30014c, 2, 0x1e, 0x924},
+ { 0x300200, 27, 0x1f, 0x924},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x300270, 12, 0x1f, 0x924},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x3002a4, 17, 0x1f, 0x924},
+ { 0x300340, 4, 0x1f, 0x924},
+ { 0x300380, 1, 0x1c, 0x924},
+ { 0x300388, 1, 0x1c, 0x924},
+ { 0x300390, 1, 0x1c, 0x924},
+ { 0x300398, 1, 0x1c, 0x924},
+ { 0x3003a0, 1, 0x1c, 0x924},
+ { 0x3003a8, 2, 0x1c, 0x924},
+ { 0x300400, 256, 0x3, 0xfff},
+ { 0x302000, 4, 0x1f, 0xf24},
+ { 0x302010, 2044, 0x1f, 0xe00},
+ { 0x304000, 4, 0x18, 0x924},
+ { 0x320000, 1, 0x1f, 0xb24},
+ { 0x320004, 5631, 0x1f, 0x200},
+ { 0x325800, 2560, 0x1e, 0x200},
+ { 0x328000, 1, 0x1f, 0xb24},
+ { 0x328004, 8191, 0x1e, 0x200},
+ { 0x330000, 1, 0x1f, 0xb24},
+ { 0x330004, 15, 0x2, 0x200},
+ { 0x330040, 1, 0x1e, 0xb24},
+ { 0x330044, 239, 0x2, 0x200},
+ { 0x330400, 1, 0x1f, 0xb24},
+ { 0x330404, 255, 0x2, 0x200},
+ { 0x330800, 1, 0x1f, 0x924},
+ { 0x330840, 1, 0x1e, 0x924},
+ { 0x330c00, 1, 0x1f, 0x924},
+ { 0x331000, 1, 0x1f, 0x924},
+ { 0x331040, 1, 0x1e, 0x924},
+ { 0x331400, 1, 0x1f, 0x924},
+ { 0x331440, 1, 0x1e, 0x924},
+ { 0x331480, 1, 0x1e, 0x924},
+ { 0x3314c0, 1, 0x1e, 0x924},
+ { 0x331800, 128, 0x1f, 0x400},
+ { 0x331c00, 128, 0x1f, 0x400},
+ { 0x332000, 1, 0x1f, 0xdb6},
+ { 0x332400, 1, 0x1e, 0xb24},
+ { 0x332404, 5631, 0x1c, 0x200},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff},
+ { 0x338100, 1, 0x1f, 0x924},
+ { 0x338140, 1, 0x1f, 0x924},
+ { 0x338180, 1, 0x1f, 0x924},
+ { 0x3381c0, 1, 0x1f, 0x924},
+ { 0x338200, 1, 0x1f, 0x924},
+ { 0x338240, 1, 0x1f, 0x924},
+ { 0x338280, 1, 0x1f, 0x924},
+ { 0x3382c0, 1, 0x1f, 0x924},
+ { 0x338300, 1, 0x1f, 0x924},
+ { 0x338340, 1, 0x1f, 0x924},
+ { 0x338380, 1, 0x1f, 0x924},
+ { 0x3383c0, 1, 0x1f, 0x924},
+ { 0x338400, 1, 0x1f, 0x924},
+ { 0x338440, 1, 0x1f, 0x924},
+ { 0x338480, 1, 0x1f, 0x924},
+ { 0x3384c0, 1, 0x1f, 0x924},
+ { 0x338500, 1, 0x1f, 0x924},
+ { 0x338540, 1, 0x1f, 0x924},
+ { 0x338580, 1, 0x1f, 0x924},
+ { 0x3385c0, 19, 0x1c, 0x924},
+ { 0x338800, 1, 0x1f, 0x924},
+ { 0x338840, 1, 0x1f, 0x924},
+ { 0x338880, 1, 0x1f, 0x924},
+ { 0x3388c0, 1, 0x1f, 0x924},
+ { 0x338900, 1, 0x1f, 0x924},
+ { 0x338940, 1, 0x1f, 0x924},
+ { 0x338980, 1, 0x1f, 0x924},
+ { 0x3389c0, 1, 0x1f, 0x924},
+ { 0x338a00, 1, 0x1f, 0xd24},
+ { 0x338a40, 1, 0x1f, 0x924},
+ { 0x338a80, 1, 0x1f, 0x492},
+ { 0x338ac0, 1, 0x1f, 0x924},
+ { 0x338b00, 1, 0x1f, 0x924},
+ { 0x338b40, 1, 0x1f, 0x924},
+ { 0x338b80, 1, 0x1f, 0x924},
+ { 0x338bc0, 1, 0x1f, 0x924},
+ { 0x338c00, 1, 0x1f, 0x924},
+ { 0x338c40, 1, 0x1f, 0x924},
+ { 0x338c80, 1, 0x1f, 0x924},
+ { 0x338cc0, 1, 0x1f, 0x924},
+ { 0x338cc4, 1, 0x1c, 0x924},
+ { 0x338d00, 1, 0x1f, 0x924},
+ { 0x338d40, 1, 0x1f, 0x924},
+ { 0x338d80, 1, 0x1f, 0x924},
+ { 0x338dc0, 1, 0x1f, 0x924},
+ { 0x338e00, 1, 0x1f, 0x924},
+ { 0x338e40, 1, 0x1f, 0x924},
+ { 0x338e80, 1, 0x1f, 0x924},
+ { 0x338e84, 1, 0x1c, 0x924},
+ { 0x338ec0, 1, 0x1e, 0x924},
+ { 0x338f00, 1, 0x1e, 0x924},
+ { 0x338f40, 1, 0x1e, 0x924},
+ { 0x338f80, 1, 0x1e, 0x924},
+ { 0x338fc0, 1, 0x1e, 0x924},
+ { 0x338fd4, 5, 0x1c, 0x924},
+ { 0x338fe8, 2, 0x18, 0x924},
+ { 0x339000, 1, 0x1c, 0x924},
+ { 0x339040, 3, 0x1c, 0x924},
+ { 0x33905c, 1, 0x18, 0x924},
+ { 0x339064, 1, 0x10, 0x924},
+ { 0x339080, 10, 0x10, 0x924},
+ { 0x340000, 2, 0x1f, 0x924},
+ { 0x3a0000, 40960, 0x1c, 0x1000}
};
-#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const u32 page_vals_e2[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
+static const struct reg_addr idle_reg_addrs[] = {
+ { 0x2104, 1, 0x1f, 0xfff},
+ { 0x2110, 2, 0x1f, 0xfff},
+ { 0x211c, 8, 0x1f, 0xfff},
+ { 0x2814, 1, 0x1f, 0xfff},
+ { 0x281c, 2, 0x1f, 0xfff},
+ { 0x2854, 1, 0x1f, 0xfff},
+ { 0x285c, 1, 0x1f, 0xfff},
+ { 0x3040, 1, 0x1f, 0xfff},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9298, 1, 0x1c, 0xfff},
+ { 0x92a8, 1, 0x1c, 0x1fff},
+ { 0xa38c, 1, 0x1f, 0x1fff},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xc09c, 1, 0x3, 0xfff},
+ { 0x103b0, 1, 0x1f, 0xfff},
+ { 0x103c0, 1, 0x1f, 0xfff},
+ { 0x103d0, 1, 0x3, 0x1fff},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x183bc, 1, 0x1c, 0x1fff},
+ { 0x183cc, 1, 0x1c, 0x1fff},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x202a8, 1, 0x1f, 0xfff},
+ { 0x202b8, 1, 0x1f, 0x1fff},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x40154, 14, 0x1f, 0xfff},
+ { 0x40198, 1, 0x1f, 0x1fff},
+ { 0x404ac, 1, 0x1f, 0xfff},
+ { 0x404bc, 1, 0x1f, 0x1fff},
+ { 0x42290, 1, 0x1f, 0xfff},
+ { 0x422a0, 1, 0x1f, 0xfff},
+ { 0x422b0, 1, 0x1f, 0x1fff},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x501d0, 1, 0x1f, 0xfff},
+ { 0x501e0, 1, 0x1f, 0x1fff},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x6011c, 1, 0x1f, 0xfff},
+ { 0x6012c, 1, 0x1f, 0x1fff},
+ { 0xc101c, 1, 0x1f, 0xfff},
+ { 0xc102c, 1, 0x1f, 0x1fff},
+ { 0xc2290, 1, 0x1f, 0xfff},
+ { 0xc22a0, 1, 0x1f, 0xfff},
+ { 0xc22b0, 1, 0x1f, 0x1fff},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc4294, 1, 0x1f, 0xfff},
+ { 0xc42a4, 1, 0x1f, 0xfff},
+ { 0xc42b4, 1, 0x1f, 0x1fff},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd01d8, 1, 0x1f, 0xfff},
+ { 0xd01e8, 1, 0x1f, 0x1fff},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe01c8, 1, 0x1f, 0xfff},
+ { 0xe01d8, 1, 0x1f, 0x1fff},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101030, 1, 0x1f, 0xfff},
+ { 0x101040, 1, 0x1f, 0x1fff},
+ { 0x102058, 1, 0x1f, 0x1fff},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x103068, 1, 0x1f, 0xfff},
+ { 0x103078, 1, 0x1f, 0xfff},
+ { 0x103088, 1, 0x1f, 0x1fff},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x1040fc, 1, 0x1f, 0xfff},
+ { 0x10410c, 1, 0x1f, 0x1fff},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x108094, 1, 0x3, 0xfff},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120564, 3, 0x1f, 0xfff},
+ { 0x12057c, 1, 0x1f, 0x1fff},
+ { 0x12058c, 1, 0x1f, 0x1fff},
+ { 0x120608, 1, 0x1e, 0xfff},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120808, 3, 0x1f, 0xfff},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13009c, 1, 0x1c, 0x1fff},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1401c8, 1, 0xf, 0xfff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x16101c, 1, 0x1f, 0xfff},
+ { 0x16102c, 1, 0x1f, 0x1fff},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x1640f0, 1, 0x1f, 0xfff},
+ { 0x166290, 1, 0x1f, 0xfff},
+ { 0x1662a0, 1, 0x1f, 0xfff},
+ { 0x1662b0, 1, 0x1f, 0x1fff},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168438, 1, 0x1f, 0xfff},
+ { 0x168448, 1, 0x1f, 0x1fff},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16e200, 128, 0x2, 0xfff},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x170174, 1, 0x1f, 0xfff},
+ { 0x170184, 1, 0x1f, 0x1fff},
+ { 0x1800f4, 1, 0x1f, 0xfff},
+ { 0x180104, 1, 0x1f, 0xfff},
+ { 0x180114, 1, 0x1f, 0x1fff},
+ { 0x180124, 1, 0x1f, 0x1fff},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x200104, 1, 0x1f, 0xfff},
+ { 0x200114, 1, 0x1f, 0xfff},
+ { 0x200124, 1, 0x1f, 0x1fff},
+ { 0x200134, 1, 0x1f, 0x1fff},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x280104, 1, 0x1f, 0xfff},
+ { 0x280114, 1, 0x1f, 0xfff},
+ { 0x280124, 1, 0x1f, 0x1fff},
+ { 0x280134, 1, 0x1f, 0x1fff},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x300104, 1, 0x1f, 0xfff},
+ { 0x300114, 1, 0x1f, 0xfff},
+ { 0x300124, 1, 0x1f, 0x1fff},
+ { 0x300134, 1, 0x1f, 0x1fff},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff}
+};
-static const u32 page_write_regs_e2[] = { 328476 };
-#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
-static const struct reg_addr page_read_regs_e2[] = {
- { 0x58000, 4608, RI_E2_ONLINE } };
-#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
+static const u32 read_reg_e1[] = {
+ 0x1b1000};
-static const u32 page_vals_e3[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
+static const struct wreg_addr wreg_addr_e1 = {
+ 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
-static const u32 page_write_regs_e3[] = { 328476 };
-#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
+static const u32 read_reg_e1h[] = {
+ 0x1b1040, 0x1b1000};
-static const struct reg_addr page_read_regs_e3[] = {
- { 0x58000, 4608, RI_E3E3B0_ONLINE } };
-#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
+static const struct wreg_addr wreg_addr_e1h = {
+ 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
+
+static const u32 read_reg_e2[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e2 = {
+ 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
-#endif /* BNX2X_DUMP_H */
+static const u32 read_reg_e3[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+ 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+ 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+ {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812,
+ 26247, 35655, 19074},
+ {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804,
+ 26977, 40957, 35895},
+ {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+ 25608, 41377, 43903},
+ {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+ 25616, 42067, 43903},
+ {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+ 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 277f17e3c8f8..9a674b14b403 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -186,6 +186,7 @@ static const struct {
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
static int bnx2x_get_port_type(struct bnx2x *bp)
{
int port_type;
@@ -233,7 +234,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
@@ -399,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successully */
+ /* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -596,29 +597,58 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
-#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
-#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
-#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+#define DUMP_ALL_PRESETS 0x1FFF
+#define DUMP_MAX_PRESETS 13
-static bool bnx2x_is_reg_online(struct bnx2x *bp,
- const struct reg_addr *reg_info)
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
{
if (CHIP_IS_E1(bp))
- return IS_E1_ONLINE(reg_info->info);
+ return dump_num_registers[0][preset-1];
else if (CHIP_IS_E1H(bp))
- return IS_E1H_ONLINE(reg_info->info);
+ return dump_num_registers[1][preset-1];
else if (CHIP_IS_E2(bp))
- return IS_E2_ONLINE(reg_info->info);
+ return dump_num_registers[2][preset-1];
else if (CHIP_IS_E3A0(bp))
- return IS_E3_ONLINE(reg_info->info);
+ return dump_num_registers[3][preset-1];
else if (CHIP_IS_E3B0(bp))
- return IS_E3B0_ONLINE(reg_info->info);
+ return dump_num_registers[4][preset-1];
else
- return false;
+ return 0;
+}
+
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ u32 preset_idx;
+ int regdump_len = 0;
+
+ /* Calculate the total preset regs length */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+ regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
}
+#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx) \
+ ((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
/******* Paged registers info selectors ********/
static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
{
@@ -680,38 +710,39 @@ static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
return 0;
}
-static int __bnx2x_get_regs_len(struct bnx2x *bp)
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
{
- int num_pages = __bnx2x_get_page_reg_num(bp);
- int page_write_num = __bnx2x_get_page_write_num(bp);
- const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
- int page_read_num = __bnx2x_get_page_read_num(bp);
- int regdump_len = 0;
- int i, j, k;
-
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < num_pages; i++)
- for (j = 0; j < page_write_num; j++)
- for (k = 0; k < page_read_num; k++)
- if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
- regdump_len += page_read_addr[k].size;
-
- return regdump_len;
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(reg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(reg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(reg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(reg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(reg_info->chips);
+ else
+ return false;
}
-static int bnx2x_get_regs_len(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int regdump_len = 0;
-
- regdump_len = __bnx2x_get_regs_len(bp);
- regdump_len *= 4;
- regdump_len += sizeof(struct dump_hdr);
- return regdump_len;
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+ const struct wreg_addr *wreg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(wreg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(wreg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(wreg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(wreg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(wreg_info->chips);
+ else
+ return false;
}
/**
@@ -725,9 +756,10 @@ static int bnx2x_get_regs_len(struct net_device *dev)
* ("read address"). There may be more than one write address per "page" and
* more than one read address per write address.
*/
-static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
u32 i, j, k, n;
+
/* addresses of the paged registers */
const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
/* number of paged registers */
@@ -740,32 +772,100 @@ static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
/* number of read addresses */
int read_num = __bnx2x_get_page_read_num(bp);
+ u32 addr, size;
for (i = 0; i < num_pages; i++) {
for (j = 0; j < write_num; j++) {
REG_WR(bp, write_addr[j], page_addr[i]);
- for (k = 0; k < read_num; k++)
- if (bnx2x_is_reg_online(bp, &read_addr[k]))
- for (n = 0; n <
- read_addr[k].size; n++)
- *p++ = REG_RD(bp,
- read_addr[k].addr + n*4);
+
+ for (k = 0; k < read_num; k++) {
+ if (IS_REG_IN_PRESET(read_addr[k].presets,
+ preset)) {
+ size = read_addr[k].size;
+ for (n = 0; n < size; n++) {
+ addr = read_addr[k].addr + n*4;
+ *p++ = REG_RD(bp, addr);
+ }
+ }
+ }
}
}
}
-static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
- u32 i, j;
+ u32 i, j, addr;
+ const struct wreg_addr *wreg_addr_p = NULL;
+
+ if (CHIP_IS_E1(bp))
+ wreg_addr_p = &wreg_addr_e1;
+ else if (CHIP_IS_E1H(bp))
+ wreg_addr_p = &wreg_addr_e1h;
+ else if (CHIP_IS_E2(bp))
+ wreg_addr_p = &wreg_addr_e2;
+ else if (CHIP_IS_E3A0(bp))
+ wreg_addr_p = &wreg_addr_e3;
+ else if (CHIP_IS_E3B0(bp))
+ wreg_addr_p = &wreg_addr_e3b0;
+
+ /* Read the idle_chk registers */
+ for (i = 0; i < IDLE_REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+ IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+ for (j = 0; j < idle_reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+ }
+ }
/* Read the regular registers */
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (i = 0; i < REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+ IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the CAM registers */
+ if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+ IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+ for (i = 0; i < wreg_addr_p->size; i++) {
+ *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
+
+ /* In case of wreg_addr register, read additional
+ registers from read_regs array
+ */
+ for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+ addr = *(wreg_addr_p->read_regs);
+ *p++ = REG_RD(bp, addr + j*4);
+ }
+ }
+ }
+
+ /* Paged registers are supported in E2 & E3 only */
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p, preset);
+ }
- /* Read "paged" registes */
- bnx2x_read_pages_regs(bp, p);
+ return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 preset_idx;
+
+ /* Read all registers, by reading all preset registers */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+ /* Skip presets with IOR */
+ if ((preset_idx == 2) ||
+ (preset_idx == 5) ||
+ (preset_idx == 8) ||
+ (preset_idx == 11))
+ continue;
+ __bnx2x_get_preset_regs(bp, p, preset_idx);
+ p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+ }
}
static void bnx2x_get_regs(struct net_device *dev,
@@ -773,9 +873,9 @@ static void bnx2x_get_regs(struct net_device *dev,
{
u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
- struct dump_hdr dump_hdr = {0};
+ struct dump_header dump_hdr = {0};
- regs->version = 1;
+ regs->version = 2;
memset(p, 0, regs->len);
if (!netif_running(bp->dev))
@@ -785,53 +885,173 @@ static void bnx2x_get_regs(struct net_device *dev,
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
bnx2x_disable_blocks_parity(bp);
- dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
- dump_hdr.dump_sign = dump_sign_all;
- dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
- dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
- dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
- dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
- if (CHIP_IS_E1(bp))
- dump_hdr.info = RI_E1_ONLINE;
- else if (CHIP_IS_E1H(bp))
- dump_hdr.info = RI_E1H_ONLINE;
- else if (!CHIP_IS_E1x(bp))
- dump_hdr.info = RI_E2_ONLINE |
- (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = DUMP_ALL_PRESETS;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
- memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
- p += dump_hdr.hdr_size + 1;
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions */
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Use the ethtool_dump "flag" field as the dump preset index */
+ bp->dump_preset_idx = val->flag;
+ return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Calculate the requested preset idx length */
+ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+ DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+ bp->dump_preset_idx, dump->len);
+
+ dump->flag = ETHTOOL_GET_DUMP_DATA;
+ return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump,
+ void *buffer)
+{
+ u32 *p = buffer;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ memset(p, 0, dump->len);
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = bp->dump_preset_idx;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
+
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ return 0;
}
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 phy_fw_ver[PHY_FW_VER_LEN];
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- phy_fw_ver[0] = '\0';
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
- snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
- (bp->common.bc_ver & 0xff0000) >> 16,
- (bp->common.bc_ver & 0xff00) >> 8,
- (bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS(bp);
@@ -861,13 +1081,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bnx2x *bp = netdev_priv(dev);
if (wol->wolopts & ~WAKE_MAGIC) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
if (wol->wolopts & WAKE_MAGIC) {
if (bp->flags & NO_WOL_FLAG) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
bp->wol = 1;
@@ -890,7 +1110,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
if (capable(CAP_NET_ADMIN)) {
/* dump MCP trace */
- if (level & BNX2X_MSG_MCP)
+ if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
bnx2x_fw_dump_lvl(bp, KERN_INFO);
bp->msg_enable = level;
}
@@ -940,7 +1160,7 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* Pf B takes the lock and proceeds to perform it's own access.
* pf A unlocks the per port lock, while pf B is still working (!).
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own
- * acess corrupted by pf B).*
+ * access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
@@ -1070,7 +1290,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
/* we read nvram data in cpu order
* but ethtool sees it as an array of bytes
- * converting to big-endian will do the work */
+ * converting to big-endian will do the work
+ */
*ret_val = cpu_to_be32(val);
rc = 0;
break;
@@ -1297,7 +1518,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
val |= (*data_buf << BYTE_OFFSET(offset));
/* nvram data is returned as an array of bytes
- * convert it back to cpu order */
+ * convert it back to cpu order
+ */
val = be32_to_cpu(val);
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
@@ -1509,6 +1731,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ DP(BNX2X_MSG_ETHTOOL,
+ "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -1747,7 +1973,6 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1875,7 +2100,8 @@ static int bnx2x_test_registers(struct bnx2x *bp)
hw = BNX2X_CHIP_MASK_E3;
/* Repeat the test twice:
- First by writing 0x00000000, second by writing 0xffffffff */
+ * First by writing 0x00000000, second by writing 0xffffffff
+ */
for (idx = 0; idx < 2; idx++) {
switch (idx) {
@@ -2388,8 +2614,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 is_serdes;
- int rc;
+ u8 is_serdes, link_up;
+ int rc, cnt = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
@@ -2397,6 +2623,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+
DP(BNX2X_MSG_ETHTOOL,
"Self-test command parameters: offline = %d, external_lb = %d\n",
(etest->flags & ETH_TEST_FL_OFFLINE),
@@ -2411,20 +2638,17 @@ static void bnx2x_self_test(struct net_device *dev,
}
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
-
+ link_up = bp->link_vars.link_up;
/* offline tests are not supported in MF mode */
if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
- u8 link_up;
/* save current value of input enable for TX port IF */
val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = bp->link_vars.link_up;
-
bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
@@ -2486,17 +2710,19 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bnx2x_link_test(bp, is_serdes) != 0) {
+ if (link_up) {
+ cnt = 100;
+ while (bnx2x_link_test(bp, is_serdes) && --cnt)
+ msleep(20);
+ }
+
+ if (!cnt) {
if (!IS_MF(bp))
buf[6] = 1;
else
buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
-
-#ifdef BNX2X_EXTRA_DEBUG
- bnx2x_panic_dump(bp);
-#endif
}
#define IS_PORT_STAT(i) \
@@ -2753,15 +2979,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
/* For UDP either 2-tupple hash or 4-tupple hash is supported */
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
udp_rss_requested = 1;
else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
udp_rss_requested = 0;
@@ -2777,13 +3002,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- } else {
- return 0;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
}
+ return 0;
+
case IPV4_FLOW:
case IPV6_FLOW:
/* For IP only 2-tupple hash is supported */
@@ -2791,9 +3016,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2809,9 +3034,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
default:
return -EINVAL;
}
@@ -2964,6 +3189,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
+ .get_dump_flag = bnx2x_get_dump_flag,
+ .get_dump_data = bnx2x_get_dump_data,
+ .set_dump = bnx2x_set_dump,
.get_wol = bnx2x_get_wol,
.set_wol = bnx2x_set_wol,
.get_msglevel = bnx2x_get_msglevel,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 60a83ad10370..e5f808377c91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -305,12 +305,10 @@
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
-
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
-
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
@@ -353,7 +351,6 @@
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
-
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
@@ -380,7 +377,6 @@
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
-
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
@@ -391,8 +387,6 @@
#define INVALID_VNIC_ID 0xFF
-
#define UNDEF_IRO 0x80000000
-
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 4bed52ba300d..f572ae164fce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3369a50ac6b4..037860ecc343 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -899,6 +899,10 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -3374,6 +3378,10 @@ struct regpair {
__le32 hi;
};
+struct regpair_native {
+ u32 lo;
+ u32 hi;
+};
/*
* Classify rule opcodes in E2/E3
@@ -4400,13 +4408,13 @@ struct tstorm_eth_function_common_config {
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
- __le32 ucast_drop_all;
- __le32 ucast_accept_all;
- __le32 mcast_drop_all;
- __le32 mcast_accept_all;
- __le32 bcast_accept_all;
- __le32 vlan_filter[2];
- __le32 unmatched_unicast;
+ u32 ucast_drop_all;
+ u32 ucast_accept_all;
+ u32 mcast_drop_all;
+ u32 mcast_accept_all;
+ u32 bcast_accept_all;
+ u32 vlan_filter[2];
+ u32 unmatched_unicast;
};
@@ -4898,7 +4906,7 @@ union event_data {
* per PF event ring data
*/
struct event_ring_data {
- struct regpair base_addr;
+ struct regpair_native base_addr;
#if defined(__BIG_ENDIAN)
u8 index_id;
u8 sb_id;
@@ -5131,7 +5139,7 @@ struct pci_entity {
* The fast-path status block meta-data, common to all chips
*/
struct hc_sb_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
struct pci_entity p_func;
#if defined(__BIG_ENDIAN)
@@ -5145,7 +5153,7 @@ struct hc_sb_data {
u8 state;
u8 rsrv0;
#endif
- struct regpair rsrv1[2];
+ struct regpair_native rsrv1[2];
};
@@ -5163,7 +5171,7 @@ enum hc_segment {
* The fast-path status block meta-data
*/
struct hc_sp_status_block_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
#if defined(__BIG_ENDIAN)
u8 rsrv1;
u8 state;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c8f10f0e8a0d..76df015f486a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index d755acfe7a40..8ab0dd900960 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -218,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
- ((u32 *)GUNZIP_BUF(bp))[i] =
+ ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
@@ -232,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
- union init_op *op;
+ const union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
@@ -244,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
for (op_idx = op_start; op_idx < op_end; op_idx++) {
- op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 09096b43a6e9..c6da77fa9d07 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -3659,7 +3659,7 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3713,7 +3713,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3854,7 +3854,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4242,7 +4242,7 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3<<13));
- for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
@@ -4748,6 +4748,12 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
+ bp->link_params.loopback_mode != LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
if (bnx2x_eee_has_cap(params))
vars->eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
@@ -9520,7 +9526,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+ for (i = 0; i < ARRAY_SIZE(reg_set);
i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -9592,7 +9598,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -13395,7 +13401,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
};
DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ee6e7ec85457..d25c7d79787a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 940ef859dc60..e81a747ea8ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,6 +59,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -127,45 +128,66 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr;
+ u32 umac_val;
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711,
BCM57711E,
BCM57712,
BCM57712_MF,
+ BCM57712_VF,
BCM57800,
BCM57800_MF,
+ BCM57800_VF,
BCM57810,
BCM57810_MF,
- BCM57840_O,
+ BCM57810_VF,
BCM57840_4_10,
BCM57840_2_20,
- BCM57840_MFO,
BCM57840_MF,
+ BCM57840_VF,
BCM57811,
- BCM57811_MF
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] = {
- { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
+ [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -183,12 +205,18 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
#endif
@@ -198,6 +226,9 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
#endif
@@ -210,29 +241,41 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
{ 0 }
};
@@ -335,6 +378,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+}
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -385,7 +487,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
@@ -401,9 +503,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
dmae->comp_val = DMAE_COMP_VAL;
}
-/* issue a dmae command over the init-channel and wailt for completion */
-static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
- struct dmae_command *dmae)
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
@@ -681,12 +782,16 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
+
+ /* dump buffer after the mark */
for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
+
+ /* dump buffer before the mark */
for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -701,7 +806,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
-void bnx2x_panic_dump(struct bnx2x *bp)
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
@@ -711,6 +880,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
u16 start = 0, end = 0;
u8 cos;
#endif
+ if (disable_int)
+ bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
@@ -856,6 +1027,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
#ifdef BNX2X_STOP_ON_ERROR
+
+ /* event queue */
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ }
+
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
@@ -1027,8 +1209,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val;
}
-static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
- char *msg, u32 poll_cnt)
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
{
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) {
@@ -1038,7 +1220,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0;
}
-static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp))
@@ -1050,7 +1233,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT;
}
-static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ?
@@ -1125,10 +1308,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
-static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
- u32 poll_cnt)
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
- struct sdm_op_gen op_gen = {0};
+ u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
@@ -1139,19 +1321,20 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return 1;
}
- op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
- op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
- op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
- op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
- REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr)));
- ret = 1;
+ bnx2x_panic();
+ return 1;
}
/* Zero completion for nxt FLR */
REG_WR(bp, comp_addr, 0);
@@ -1159,7 +1342,7 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return ret;
}
-static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
u16 status;
@@ -1371,26 +1554,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix)
val |= IGU_PF_CONF_SINGLE_ISR_EN;
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_INT_LINE_EN |
+ val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
@@ -1425,71 +1613,6 @@ void bnx2x_int_enable(struct bnx2x *bp)
bnx2x_igu_int_enable(bp);
}
-static void bnx2x_hc_int_disable(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- u32 val = REG_RD(bp, addr);
-
- /*
- * in E1 we must use only PCI configuration space to disable
- * MSI/MSIX capablility
- * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
- */
- if (CHIP_IS_E1(bp)) {
- /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
- * Use mask register to prevent from HC sending interrupts
- * after we exit the function
- */
- REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
-
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- } else
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
- DP(NETIF_MSG_IFDOWN,
- "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, addr, val);
- if (REG_RD(bp, addr) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_igu_int_disable(struct bnx2x *bp)
-{
- u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
-
- val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
- IGU_PF_CONF_INT_LINE_EN |
- IGU_PF_CONF_ATTN_BIT_EN);
-
- DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
- if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
- if (bp->common.int_block == INT_BLOCK_HC)
- bnx2x_hc_int_disable(bp);
- else
- bnx2x_igu_int_disable(bp);
-}
-
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1575,11 +1698,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
}
/**
- * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
*
* @bp: driver handle
*
- * Tries to aquire a leader lock for current engine.
+ * Tries to acquire a leader lock for current engine.
*/
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
@@ -1588,6 +1711,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1602,6 +1743,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -1653,6 +1801,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -1669,7 +1819,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* mark pending ACK to MCP bit.
* prevent case that both bits are cleared.
* At the end of load/unload driver checks that
- * sp_state is cleaerd, and this order prevents
+ * sp_state is cleared, and this order prevents
* races
*/
smp_mb__before_clear_bit();
@@ -1678,22 +1828,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
smp_mb__after_clear_bit();
- /* schedule workqueue to send ack to MCP */
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
}
return;
}
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
-{
- u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
-
- bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
- start);
-}
-
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1734,21 +1875,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data,
- NULL);
- rcu_read_unlock();
- }
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
status &= ~0x1;
if (!status)
@@ -2448,23 +2591,55 @@ void bnx2x__link_status_update(struct bnx2x *bp)
return;
/* read updated dcb configuration */
- bnx2x_dcbx_pmf_update(bp);
-
- bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
- if (bp->link_vars.link_up)
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
- else
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* indicate link status */
- bnx2x_link_report(bp);
+ }
}
static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
u16 vlan_val, u8 allowed_prio)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_update_params *f_update_params =
&func_params.params.afex_update;
@@ -2489,7 +2664,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
u16 vif_index, u8 func_bit_map)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_viflists_params *update_params =
&func_params.params.afex_viflists;
int rc;
@@ -2505,7 +2680,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
/* set parameters according to cmd_type */
update_params->afex_vif_list_command = cmd_type;
- update_params->vif_list_index = cpu_to_le16(vif_index);
+ update_params->vif_list_index = vif_index;
update_params->func_bit_map =
(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
update_params->func_to_clear = 0;
@@ -2789,6 +2964,10 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
return flags;
}
@@ -2864,15 +3043,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
- tpa_agg_size = min_t(u32,
- (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ tpa_agg_size = TPA_AGG_SIZE;
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
- 0xffff);
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
}
/* pause - not for e1 */
@@ -2917,7 +3093,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
- * For PF Clients it should be the maximum avaliable number.
+ * For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@ -3011,7 +3187,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* init Event Queue */
+ /* init Event Queue - PCI bus guarantees correct endianity*/
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
eq_data.producer = bp->eq_prod;
@@ -3101,65 +3277,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
- ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_ucast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_bcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_mcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
- ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->ucast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->bcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->mcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
}
/* ask L5 driver to add data to the struct */
@@ -3630,7 +3816,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
"Please contact OEM Support for assistance\n");
/*
- * Scheudle device reset (unload)
+ * Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
@@ -3780,6 +3966,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_vf_handle_flr_event(bp);
+
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4576,8 +4766,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
@@ -4605,7 +4795,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
- bnx2x_panic_dump(bp);
+ bnx2x_panic_dump(bp, false);
}
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
@@ -4647,7 +4837,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+ >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -4724,7 +4915,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
struct bnx2x_queue_update_params *q_update_params =
&queue_params.params.update;
- /* Send Q update command with afex vlan removal values for all Qs */
+ /* Send Q update command with afex vlan removal values for all Qs */
queue_params.cmd = BNX2X_Q_CMD_UPDATE;
/* set silent vlan removal values according to vlan mode */
@@ -4798,7 +4989,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u8 echo;
u32 cid;
u8 opcode;
- int spqe_cnt = 0;
+ int rc, spqe_cnt = 0;
struct bnx2x_queue_sp_obj *q_obj;
struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
@@ -4826,15 +5017,27 @@ static void bnx2x_eq_int(struct bnx2x *bp)
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
-
elem = &bp->eq_ring[EQ_DESC(sw_cons)];
- cid = SW_CID(elem->message.data.cfc_del_event.cid);
- opcode = elem->message.opcode;
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
+ bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ continue;
+
case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
"got statistics comp event %d\n",
@@ -5000,50 +5203,65 @@ next_spqe:
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
- u16 status;
- status = bnx2x_update_dsb_idx(bp);
-/* if (status == 0) */
-/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
+ /* make sure the atomic interupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
- /* HW attentions */
- if (status & BNX2X_DEF_SB_ATT_IDX) {
- bnx2x_attn_int(bp);
- status &= ~BNX2X_DEF_SB_ATT_IDX;
- }
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
+
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
- /* SP events: STAT_QUERY and others */
- if (status & BNX2X_DEF_SB_IDX) {
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
if (FCOE_INIT(bp) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- /*
- * Prevent local bottom-halves from running as
- * we are going to change the local NAPI list.
- */
- local_bh_disable();
- napi_schedule(&bnx2x_fcoe(bp, napi));
- local_bh_enable();
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
}
- /* Handle EQ completions */
- bnx2x_eq_int(bp);
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
- bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
- le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
- status &= ~BNX2X_DEF_SB_IDX;
}
- if (unlikely(status))
- DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
- status);
-
- bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
- le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ /* must be called after the EQ processing (since eq leads to sriov
+ * ramrod completion flows).
+ * This flow may have been scheduled by the arrival of a ramrod
+ * completion, or by the sriov code rescheduling itself.
+ */
+ bnx2x_iov_sp_task(bp);
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@ -5076,7 +5294,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
rcu_read_unlock();
}
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
return IRQ_HANDLED;
}
@@ -5090,7 +5311,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -5098,7 +5318,8 @@ static void bnx2x_timer(unsigned long data)
if (!netif_running(bp->dev))
return;
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
u32 mcp_pulse;
@@ -5125,6 +5346,10 @@ static void bnx2x_timer(unsigned long data)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5267,7 +5492,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
-static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -5323,7 +5548,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
- /* write indecies to HW */
+ /* write indices to HW - PCI guarantees endianity of regpairs */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
@@ -5411,6 +5636,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ /* PCI guarantees endianity of regpairs */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
@@ -5467,13 +5693,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-
/* called with netif_addr_lock_bh() */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5503,22 +5728,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
- return;
+ return rc;
}
+
+ return 0;
}
-/* called with netif_addr_lock_bh() */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
-
- if (!NO_FCOE(bp))
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
- /* Configure rx_mode of FCoE Queue */
- __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-
- switch (bp->rx_mode) {
+ switch (rx_mode) {
case BNX2X_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
@@ -5526,25 +5750,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/
break;
case BNX2X_RX_MODE_NORMAL:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_PROMISC:
@@ -5552,36 +5776,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp))
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break;
default:
- BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
- return;
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
}
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
__set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
- tx_accept_flags, ramrod_flags);
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
@@ -5688,6 +5933,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid;
}
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
@@ -5697,13 +5949,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
- bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
- fp->fw_sb_id, fp->igu_sb_id);
-
- bnx2x_update_fpsb_idx(fp);
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@ -5775,17 +6024,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+
+ if (IS_VF(bp))
+ return;
+
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
BP_PORT(bp));
- /* ensure status block indices were read */
- rmb();
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
- bnx2x_init_rx_rings(bp);
- bnx2x_init_tx_rings(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
@@ -6225,49 +6479,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
-{
- u32 offset = 0;
-
- if (CHIP_IS_E1(bp))
- return;
- if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
- return;
-
- switch (BP_ABS_FUNC(bp)) {
- case 0:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
- break;
- case 1:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
- break;
- case 2:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
- break;
- case 3:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
- break;
- case 4:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
- break;
- case 5:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
- break;
- case 6:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
- break;
- case 7:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
- break;
- default:
- return;
- }
-
- REG_WR(bp, offset, pretend_func_num);
- REG_RD(bp, offset);
- DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
-}
-
void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@ -6311,7 +6522,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
- * take the UNDI lock to protect undi_unload flow from accessing
+ * take the RESET lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
@@ -6441,7 +6652,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
- * occured while driver was down)
+ * occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
@@ -6482,7 +6693,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -6499,7 +6710,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
-
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
@@ -6524,6 +6734,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+ bnx2x_iov_init_dmae(bp);
+
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@ -6980,7 +7192,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
-
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
@@ -7009,15 +7220,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
REG_WR_DMAE(bp, reg, wb_write, 2);
}
-static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
- u8 idu_sb_id, bool is_Pf)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -7208,8 +7418,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
/* FLR cleanup - hmmm */
if (!CHIP_IS_E1x(bp)) {
rc = bnx2x_pf_flr_clnup(bp);
- if (rc)
+ if (rc) {
+ bnx2x_fw_dump(bp);
return rc;
+ }
}
/* set MSI reconfigure capability */
@@ -7226,12 +7438,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
bp->context[i].cxt_mapping;
ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
+
bnx2x_ilt_init_op(bp, INITOP_SET);
if (!CONFIGURE_NIC_MODE(bp)) {
@@ -7304,6 +7525,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ bnx2x_iov_init_dq(bp);
+
bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@ -7512,10 +7736,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- /* fastpath */
- bnx2x_free_fp_mem(bp);
- /* end of fastpath */
-
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7536,69 +7756,11 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-}
-
-static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
-{
- int num_groups;
- int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
-
- /* number of queues for statistics is number of eth queues + FCoE */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
-
- /* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
- * num of queues
- */
- bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
-
-
- /* Request is built from stats_query_header and an array of
- * stats_query_cmd_group each of which contains
- * STATS_QUERY_CMD_COUNT rules. The real number or requests is
- * configured in the stats_query_header.
- */
- num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
- (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
-
- bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
- num_groups * sizeof(struct stats_query_cmd_group);
-
- /* Data for statistics requests + stats_conter
- *
- * stats_counter holds per-STORM counters that are incremented
- * when STORM has finished with the current request.
- *
- * memory for FCoE offloaded statistics are counted anyway,
- * even if they will not be sent.
- */
- bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
- sizeof(struct per_pf_stats) +
- sizeof(struct fcoe_statistics_params) +
- sizeof(struct per_queue_stats) * num_queue_stats +
- sizeof(struct stats_counter);
-
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
-
- /* Set shortcuts */
- bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
- bp->fw_stats_req_mapping = bp->fw_stats_mapping;
-
- bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
- ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
-
- bp->fw_stats_data_mapping = bp->fw_stats_mapping +
- bp->fw_stats_req_sz;
- return 0;
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- BNX2X_ERR("Can't allocate memory\n");
- return -ENOMEM;
+ bnx2x_iov_free_mem(bp);
}
+
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp))
@@ -7644,10 +7806,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- goto alloc_mem_err;
-
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
* functions because CDU differs in few aspects:
@@ -7676,6 +7834,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
/* Slow path ring */
BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
@@ -7683,13 +7844,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-
- /* fastpath */
- /* need to be done at the end, since it's self adjusting to amount
- * of memory available for RSS queues
- */
- if (bnx2x_alloc_fp_mem(bp))
- goto alloc_mem_err;
return 0;
alloc_mem_err:
@@ -7792,43 +7946,53 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-void bnx2x_set_int_mode(struct bnx2x *bp)
+int bnx2x_set_int_mode(struct bnx2x *bp)
{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ return -EINVAL;
+
switch (int_mode) {
- case INT_MODE_MSI:
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ /* falling through... */
+ case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
+
/* falling through... */
- case INT_MODE_INTx:
+ case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- if (bnx2x_enable_msix(bp) ||
- bp->flags & USING_SINGLE_MSIX_FLAG) {
- /* failed to enable multiple MSI-X */
- BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues,
- 1 + bp->num_cnic_queues);
-
- bp->num_queues = 1 + bp->num_cnic_queues;
-
- /* Try to enable MSI */
- if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
- !(bp->flags & DISABLE_MSI_FLAG))
- bnx2x_enable_msi(bp);
- }
- break;
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
}
+ return 0;
}
-/* must be called prioir to any HW initializations */
+/* must be called prior to any HW initializations */
static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
return L2_ILT_LINES(bp);
}
@@ -8211,8 +8375,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
- SB_DISABLED);
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -8513,7 +8677,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/* Give HW time to discard old tx messages */
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
@@ -8551,6 +8715,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
netif_addr_unlock_bh(bp->dev);
+ bnx2x_iov_chip_cleanup(bp);
/*
@@ -8678,7 +8843,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
(val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
- /* Prevent incomming interrupts in IGU */
+ /* Prevent incoming interrupts in IGU */
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
@@ -8936,7 +9101,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
if (pend_bits == 0)
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8953,8 +9118,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
- u32 tags_63_32 = 0;
-
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
@@ -8972,7 +9136,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -9005,7 +9169,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Prepare to chip reset: */
/* MCP */
@@ -9288,8 +9452,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(bp->dev))
- goto sp_rtnl_exit;
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
@@ -9308,7 +9474,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_parity_recover(bp);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
@@ -9322,7 +9489,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
#ifdef BNX2X_STOP_ON_ERROR
sp_rtnl_not_reset:
@@ -9340,13 +9508,33 @@ sp_rtnl_not_reset:
DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
}
-sp_rtnl_exit:
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
rtnl_unlock();
-}
-/* end of nic load/unload */
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state))
+ bnx2x_enable_sriov(bp);
+}
static void bnx2x_period_task(struct work_struct *work)
{
@@ -9383,49 +9571,26 @@ period_task_exit:
* Init service functions
*/
-static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
-{
- u32 reg = bnx2x_get_pretend_reg(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Pretend to be function 0 */
- REG_WR(bp, reg, 0);
- REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
-
- /* From now we are in the "like-E1" mode */
- bnx2x_int_disable(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Restore the original function */
- REG_WR(bp, reg, BP_ABS_FUNC(bp));
- REG_RD(bp, reg);
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- bnx2x_int_disable(bp);
- else
- bnx2x_undi_int_disable_e1h(bp);
-}
-
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
{
u32 val, base_addr, offset, mask, reset_reg;
bool mac_stopped = false;
u8 port = BP_PORT(bp);
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +9612,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
*/
wb_data[0] = REG_RD(bp, base_addr + offset);
wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR(bp, base_addr + offset, wb_data[0]);
- REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
}
BNX2X_DEV_INFO("Disable emac Rx\n");
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
mac_stopped = true;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,14 +9634,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
val & ~(1 << 1));
REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
val | (1 << 1));
- REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(bp, vals->umac_addr);
+ REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
}
@@ -9632,11 +9805,13 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_test_firmware_version(bp, false);
+ rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
if (!rc) {
/* fw version is good */
@@ -9664,12 +9839,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
{
u32 reset_reg, tmp_reg = 0, rc;
bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
/* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BNX2X_DEV_INFO("Common unload Flow\n");
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
@@ -9680,12 +9859,14 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
u32 timer_count = 1000;
/* Close the MAC Rx to prevent BRB from filling up */
- bnx2x_prev_unload_close_mac(bp);
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+ /* close LLH filters towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
@@ -9693,6 +9874,8 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
prev_undi = true;
/* clear the UNDI indication */
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
/* wait until BRB is empty */
@@ -9727,6 +9910,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* No packets are in the pipeline, path is ready for reset */
bnx2x_reset_common(bp);
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr)
+ REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
rc = bnx2x_prev_mark_path(bp, prev_undi);
if (rc) {
bnx2x_prev_mcp_done(bp);
@@ -9748,7 +9942,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ DP(BNX2X_MSG_SP,
+ "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
@@ -9790,7 +9985,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
-
do {
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
@@ -10357,10 +10551,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
- mac_hi = cpu_to_be16(mac_hi);
- mac_lo = cpu_to_be32(mac_lo);
- memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
- memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
}
static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
@@ -10396,6 +10590,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
@@ -10503,21 +10704,21 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_upper);
bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_lower);
/* Node info */
bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_upper);
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
@@ -10615,7 +10816,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
- if (IS_MF_FCOE_AFEX(bp))
+ if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
@@ -10678,7 +10879,6 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10743,7 +10943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
tout--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@ -11081,9 +11281,13 @@ static int bnx2x_init_bp(struct bnx2x *bp)
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
- rc = bnx2x_get_hwinfo(bp);
- if (rc)
- return rc;
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ }
bnx2x_set_modes_bitmap(bp);
@@ -11096,7 +11300,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* init fw_seq */
bp->fw_seq =
SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -11133,6 +11337,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11161,12 +11367,18 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->cnic_base_cl_id = FP_SB_MAX_E2;
/* multiple tx priority */
- if (CHIP_IS_E1x(bp))
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
- if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
- if (CHIP_IS_E3B0(bp))
+ else if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
/* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for
@@ -11190,6 +11402,26 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
+static int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held).
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
+ return 0;
+}
+
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11197,6 +11429,7 @@ static int bnx2x_open(struct net_device *dev)
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
bool other_load_status, load_status;
+ int rc;
bp->stats_init = true;
@@ -11204,53 +11437,57 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
- other_load_status = bnx2x_get_load_status(bp, other_engine);
- load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
-
- /*
- * If parity had happen during the unload, then attentions
+ /* If parity had happen during the unload, then attentions
* and/or RECOVERY_IN_PROGRES may still be set. In this case we
* want the first function loaded on the current engine to
* complete the recovery.
+ * Parity recovery is only relevant for PF driver.
*/
- if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
- bnx2x_chk_parity_attn(bp, &global, true))
- do {
- /*
- * If there are attentions and they are in a global
- * blocks, set the GLOBAL_RESET bit regardless whether
- * it will be this function that will complete the
- * recovery or not.
- */
- if (global)
- bnx2x_set_reset_global(bp);
+ if (IS_PF(bp)) {
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
- /*
- * Only the first function on the current engine should
- * try to recover in open. In case of attentions in
- * global blocks only the first in the chip should try
- * to recover.
- */
- if ((!load_status &&
- (!global || !other_load_status)) &&
- bnx2x_trylock_leader_lock(bp) &&
- !bnx2x_leader_reset(bp)) {
- netdev_info(bp->dev, "Recovered in open\n");
- break;
- }
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
- /* recovery has failed... */
- bnx2x_set_power_state(bp, PCI_D3hot);
- bp->recovery_state = BNX2X_RECOVERY_FAILED;
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
- "If you still see this message after a few retries then power cycle is required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
- return -EAGAIN;
- } while (0);
+ return -EAGAIN;
+ } while (0);
+ }
+ }
bp->recovery_state = BNX2X_RECOVERY_DONE;
- return bnx2x_nic_load(bp, LOAD_OPEN);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+ return bnx2x_open_epilog(bp);
}
/* called with rtnl_lock */
@@ -11384,7 +11621,6 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
return rc;
}
-
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
void bnx2x_set_rx_mode(struct net_device *dev)
{
@@ -11405,12 +11641,25 @@ void bnx2x_set_rx_mode(struct net_device *dev)
CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else {
- /* some multicasts */
- if (bnx2x_set_mc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- if (bnx2x_set_uc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
bp->rx_mode = rx_mode;
@@ -11424,7 +11673,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
return;
}
- bnx2x_set_storm_rx_mode(bp);
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ } else {
+ /* configuring rx mode to storms in a vf involves sleeping (when
+ * we wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
/* called with rtnl_lock */
@@ -11527,7 +11789,9 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
@@ -11551,10 +11815,9 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
- unsigned long board_type)
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
{
- struct bnx2x *bp;
int rc;
u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
@@ -11562,11 +11825,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
- bp = netdev_priv(dev);
bp->dev = dev;
bp->pdev = pdev;
- bp->flags = 0;
rc = pci_enable_device(pdev);
if (rc) {
@@ -11582,9 +11843,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
goto err_out_disable;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&bp->pdev->dev, "Cannot find second PCI device"
- " base address, aborting\n");
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11609,12 +11869,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
pci_save_state(pdev);
}
- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (bp->pm_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find power management capability, aborting\n");
- rc = -EIO;
- goto err_out_release;
+ if (IS_PF(bp)) {
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
}
if (!pci_is_pcie(pdev)) {
@@ -11646,13 +11908,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* support Physical Device Assignment where kernel BDF maybe arbitrary
* (depending on hypervisor).
*/
- if (chip_is_e1x)
+ if (chip_is_e1x) {
bp->pf_num = PCI_FUNC(pdev->devfn);
- else {/* chip is E2/3*/
+ } else {
+ /* chip is E2/3*/
pci_read_config_dword(bp->pdev,
PCICFG_ME_REGISTER, &pci_cfg_dword);
bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
+ ME_REG_ABS_PF_NUM_SHIFT);
}
BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
@@ -11665,25 +11928,28 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
- if (chip_is_e1x) {
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
- /*
- * Enable internal target-read (in case we are probed after PF FLR).
- * Must be done prior to any BAR read access. Only for 57712 and up
- */
- if (!chip_is_e1x)
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11734,8 +12000,9 @@ err_out:
static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
{
- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+ u32 val = 0;
+ pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
/* return value of 1=2.5GHz 2=5GHz */
@@ -11748,7 +12015,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
struct bnx2x_fw_file_hdr *fw_hdr;
struct bnx2x_fw_file_section *sections;
u32 offset, len, num_ops;
- u16 *ops_offsets;
+ __be16 *ops_offsets;
int i;
const u8 *fw_ver;
@@ -11773,7 +12040,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Likewise for the init_ops offsets */
offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
- ops_offsets = (u16 *)(firmware->data + offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@ -12000,8 +12267,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
if (CNIC_SUPPORT(bp))
cid_count += CNIC_CID_MAX;
+
return roundup(cid_count, QM_CID_ROUND);
}
@@ -12012,10 +12283,10 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt)
+ int cnic_cnt, bool is_vf)
{
- int pos;
- u16 control;
+ int pos, index;
+ u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -12023,85 +12294,114 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos)
+ if (!pos) {
+ dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
/*
* The value in the PCI configuration space is the index of the last
* entry, namely one less than the actual size of the table, which is
* exactly what we want to return from this function: number of all SBs
* without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
*/
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- return control & PCI_MSIX_FLAGS_QSIZE;
-}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ index = control & PCI_MSIX_FLAGS_QSIZE;
-static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct bnx2x *bp;
- int pcie_width, pcie_speed;
- int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count, doorbell_size;
- int cnic_cnt;
- /*
- * An estimated maximum supported CoS number according to the chip
- * version.
- * We will try to roughly estimate the maximum number of CoSes this chip
- * may support in order to minimize the memory allocated for Tx
- * netdev_queue's. This number will be accurately calculated during the
- * initialization of bp->max_cos based on the chip versions AND chip
- * revision in the bnx2x_init_bp().
- */
- u8 max_cos_est = 0;
+ return is_vf ? index + 1 : index;
+}
- switch (ent->driver_data) {
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
case BCM57710:
case BCM57711:
case BCM57711E:
- max_cos_est = BNX2X_MULTI_TX_COS_E1X;
- break;
-
+ return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
- break;
-
+ case BCM57712_VF:
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
+ case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
- case BCM57840_O:
case BCM57840_4_10:
case BCM57840_2_20:
+ case BCM57840_O:
case BCM57840_MFO:
+ case BCM57810_VF:
case BCM57840_MF:
+ case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
- break;
-
+ case BCM57811_VF:
+ return BNX2X_MULTI_TX_COS_E3B0;
+ return 1;
default:
- pr_err("Unknown board_type (%ld), aborting\n",
- ent->driver_data);
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
return -ENODEV;
}
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
- cnic_cnt = 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
- WARN_ON(!max_non_def_sbs);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - cnic_cnt;
+ rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+
+ if (rss_count < 1)
+ return -EINVAL;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
rx_count = rss_count + cnic_cnt;
- /*
- * Maximum number of netdev Tx queues:
+ /* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
tx_count = rss_count * max_cos_est + cnic_cnt;
@@ -12113,42 +12413,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
bp->msg_enable = debug;
bp->cnic_support = cnic_cnt;
bp->cnic_probe = bnx2x_cnic_probe;
pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
- BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
-
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
+ tx_count, rx_count);
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
- /*
- * Map doorbels here as we need the real value of bp->max_cos which
- * is initialized in bnx2x_init_bp().
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
*/
- doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
- if (doorbell_size > pci_resource_len(pdev, 2)) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbells, bar size too small, aborting\n");
- rc = -ENOMEM;
- goto init_one_exit;
+ if (IS_VF(bp)) {
+ bnx2x_vf_map_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ doorbell_size);
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -12156,8 +12469,25 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_one_exit;
}
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_exit;
+ }
+
+ /* Enable SRIOV if capability found in configuration space.
+ * Once the generic SR-IOV framework makes it in from the
+ * pci tree this will be revised, to allow dynamic control
+ * over the number of VFs. Right now, change the num of vfs
+ * param below to enable SR-IOV.
+ */
+ rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ if (rc)
+ goto init_one_exit;
+
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
/* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
@@ -12179,13 +12509,20 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure interrupt mode: try to enable MSI-X/MSI if
* needed.
*/
- bnx2x_set_int_mode(bp);
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_exit;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+ /* register the net device */
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto init_one_exit;
}
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
if (!NO_FCOE(bp)) {
@@ -12196,6 +12533,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+ BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
+ pcie_width, pcie_speed);
BNX2X_DEV_INFO(
"%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
@@ -12213,7 +12552,7 @@ init_one_exit:
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
+ if (IS_PF(bp) && bp->doorbells)
iounmap(bp->doorbells);
free_netdev(dev);
@@ -12253,25 +12592,37 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- bnx2x_set_power_state(bp, PCI_D0);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
/* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->sp_rtnl_task);
+ bnx2x_iov_remove_one(bp);
+
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
+
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
- iounmap(bp->doorbells);
-
- bnx2x_release_firmware(bp);
+ /* for vf doorbells are part of the regview and were unmapped along with
+ * it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+ bnx2x_release_firmware(bp);
+ }
bnx2x_free_mem_bp(bp);
free_netdev(dev);
@@ -13059,4 +13410,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index ddd5106ad2f9..caf1aef651eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,6 @@
/* bnx2x_mfw_req.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bc2f65b32649..791eb2d53011 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -825,6 +825,7 @@
/* [RW 28] The value sent to CM header in the case of CFC load error. */
#define DORQ_REG_ERR_CMHEAD 0x170058
#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec
#define DORQ_REG_MODE_ACT 0x170008
/* [RW 5] The normal mode CID extraction offset. */
#define DORQ_REG_NORM_CID_OFST 0x17002c
@@ -847,6 +848,22 @@
writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
read reads this written value. */
#define DORQ_REG_RSP_INIT_CRD 0x170048
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
@@ -859,6 +876,7 @@
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT 0x170320
#define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120
@@ -2136,6 +2154,8 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4
/* [R 32] Legacy E1 and E1H location for parity error mask register. */
#define NIG_REG_NIG_PRTY_MASK 0x103dc
/* [RW 32] Parity mask register #0 read/write */
@@ -2571,6 +2591,7 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+#define PBF_REG_DISABLE_VF 0x1402ec
/* [RW 18] For port 0: For each client that is subject to WFQ (the
* corresponding bit is 1); indicates to which of the credit registers this
* client is mapped. For clients which are not credit blocked; their mapping
@@ -3708,6 +3729,10 @@
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
/* [WB 160] Used for initialization of the inbound interrupts memory */
#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400
/* [RW 32] Interrupt mask register #0 read/write */
#define PXP_REG_PXP_INT_MASK_0 0x103074
#define PXP_REG_PXP_INT_MASK_1 0x103084
@@ -5966,6 +5991,7 @@
#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
@@ -6305,6 +6331,15 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
@@ -6554,6 +6589,27 @@
(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE 0x3000
+#define PXP_VF_ADDR_IGU_END\
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE 0x200
+#define PXP_VF_ADDR_DB_END\
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 09b625e0fdaa..7306416bc90d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -325,7 +325,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
return 0;
}
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
@@ -707,7 +707,8 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
struct eth_classify_header *hdr, int rule_cnt)
{
- hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
hdr->rule_cnt = (u8)rule_cnt;
}
@@ -813,8 +814,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
hdr->length = 1;
hdr->offset = (u8)cam_offset;
- hdr->client_id = 0xff;
- hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+ hdr->client_id = cpu_to_le16(0xff);
+ hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
}
static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
@@ -903,7 +905,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
@@ -953,7 +955,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
@@ -1407,7 +1409,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
/* Wait until there are no pending commands */
if (!bnx2x_exe_queue_empty(exeq))
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
else
return 0;
}
@@ -1442,7 +1444,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
if (cqe->message.error)
return -EINVAL;
- /* Run the next bulk of pending commands if requeted */
+ /* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
if (rc < 0)
@@ -1532,7 +1534,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
bool restore,
struct bnx2x_vlan_mac_registry_elem **re)
{
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
struct bnx2x_vlan_mac_registry_elem *reg_elem;
/* Allocate a new registry element if needed. */
@@ -1591,7 +1593,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
struct bnx2x_vlan_mac_registry_elem *reg_elem;
- int cmd;
+ enum bnx2x_vlan_mac_cmd cmd;
/*
* If DRIVER_ONLY execution is requested, cleanup a registry
@@ -2103,7 +2105,7 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp,
static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
struct bnx2x_rx_mode_ramrod_params *p)
{
- /* update the bp MAC filter structure */
+ /* update the bp MAC filter structure */
u32 mask = (1 << p->cl_id);
struct tstorm_eth_mac_filter_config *mac_filters =
@@ -2166,7 +2168,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
mac_filters->bcast_accept_all);
@@ -2186,12 +2188,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
struct eth_classify_header *hdr,
u8 rule_cnt)
{
- hdr->echo = cid;
+ hdr->echo = cpu_to_le32(cid);
hdr->rule_cnt = rule_cnt;
}
static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
- unsigned long accept_flags,
+ unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd,
bool clear_accept_all)
{
@@ -2201,33 +2203,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (accept_flags) {
- if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
- }
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
- state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- }
- if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
- }
- if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
}
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) {
state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
@@ -2260,8 +2262,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
/* Rx */
@@ -2272,8 +2275,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
@@ -2293,9 +2297,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
/* Rx */
@@ -2306,9 +2311,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
}
@@ -2429,7 +2435,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
@@ -2561,7 +2567,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct eth_multicast_rules_ramrod_data *data =
@@ -2625,7 +2631,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2(
int *rdata_idx)
{
int cur_bin, cnt = *rdata_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the bins from it */
for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
@@ -2657,7 +2663,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
{
struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
int cnt = *line_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
link) {
@@ -2780,7 +2786,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
int *line_idx)
{
struct bnx2x_mcast_list_elem *mlist_pos;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = *line_idx;
list_for_each_entry(mlist_pos, &p->mcast_list, link) {
@@ -2790,7 +2796,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- mlist_pos->mac);
+ mlist_pos->mac);
}
*line_idx = cnt;
@@ -2827,7 +2833,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
* Returns number of lines filled in the ramrod data in total.
*/
static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd,
int start_cnt)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -2861,7 +2868,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -2930,8 +2937,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
struct eth_multicast_rules_ramrod_data *data =
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
- data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->header.rule_cnt = len;
}
@@ -2965,7 +2973,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3051,7 +3059,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
@@ -3085,7 +3093,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
- mlist_pos->mac, bit);
+ mlist_pos->mac, bit);
/* bookkeeping... */
BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
@@ -3113,7 +3121,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
*/
static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int i;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3167,7 +3175,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -3240,7 +3248,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct mac_configuration_cmd *data =
@@ -3284,9 +3292,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
BNX2X_MAX_MULTICAST*(1 + r->func_id));
data->hdr.offset = offset;
- data->hdr.client_id = 0xff;
- data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.client_id = cpu_to_le16(0xff);
+ data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->hdr.length = len;
}
@@ -3309,7 +3318,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
{
struct bnx2x_mcast_mac_elem *elem;
int i = 0;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the MACs from it. */
list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
@@ -3319,7 +3328,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ cfg_data.mac);
}
*rdata_idx = i;
@@ -3334,7 +3343,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
struct bnx2x_pending_mcast_cmd *cmd_pos;
struct bnx2x_mcast_mac_elem *pmac_pos;
struct bnx2x_mcast_obj *o = p->mcast_obj;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = 0;
@@ -3355,7 +3364,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
}
break;
@@ -3458,7 +3467,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *raw = &o->raw;
@@ -3562,7 +3571,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
int bnx2x_config_mcast(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *r = &o->raw;
@@ -4085,8 +4094,8 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "Configuring RSS\n");
/* Set an echo field */
- data->echo = (r->cid & BNX2X_SWCID_MASK) |
- (r->state << BNX2X_SWCID_SHIFT);
+ data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT));
/* RSS mode */
if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
@@ -4237,11 +4246,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
unsigned long *pending = &o->pending;
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params))
+ rc = o->check_transition(bp, o, params);
+ if (rc) {
+ BNX2X_ERR("check transition returned an error. rc %d\n", rc);
return -EINVAL;
+ }
/* Set "pending" bit */
+ DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
pending_bit = o->set_pending(o, params);
+ DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -5025,8 +5039,11 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
- if (o->pending)
+ if (o->pending) {
+ BNX2X_ERR("Blocking transition since pending was %lx\n",
+ o->pending);
return -EBUSY;
+ }
switch (state) {
case BNX2X_Q_STATE_RESET:
@@ -5199,6 +5216,27 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj)
+{
+ switch (obj->state) {
+ case BNX2X_Q_STATE_ACTIVE:
+ case BNX2X_Q_STATE_MULTI_COS:
+ return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+ case BNX2X_Q_STATE_RESET:
+ case BNX2X_Q_STATE_INITIALIZED:
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ case BNX2X_Q_STATE_INACTIVE:
+ case BNX2X_Q_STATE_STOPPED:
+ case BNX2X_Q_STATE_TERMINATED:
+ case BNX2X_Q_STATE_FLRED:
+ return BNX2X_Q_LOGICAL_STATE_STOPPED;
+ default:
+ return -EINVAL;
+ }
+}
+
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5631,9 +5669,9 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
/*
@@ -5716,21 +5754,20 @@ inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o = params->f_obj;
struct afex_vif_list_ramrod_data *rdata =
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
- struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+ struct bnx2x_func_afex_viflists_params *afex_vif_params =
&params->params.afex_viflists;
u64 *p_rdata = (u64 *)rdata;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->vif_list_index = afex_viflist_params->vif_list_index;
- rdata->func_bit_map = afex_viflist_params->func_bit_map;
- rdata->afex_vif_list_command =
- afex_viflist_params->afex_vif_list_command;
- rdata->func_to_clear = afex_viflist_params->func_to_clear;
+ rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
/* send in echo type of sub command */
- rdata->echo = afex_viflist_params->afex_vif_list_command;
+ rdata->echo = afex_vif_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index adbd91b1bdfc..ff907609b9fc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -54,7 +54,7 @@ typedef enum {
BNX2X_OBJ_TYPE_RX_TX,
} bnx2x_obj_type;
-/* Filtering states */
+/* Public slow path states */
enum {
BNX2X_FILTER_MAC_PENDING,
BNX2X_FILTER_VLAN_PENDING,
@@ -524,7 +524,7 @@ struct bnx2x_mcast_ramrod_params {
int mcast_list_len;
};
-enum {
+enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_ADD,
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
@@ -573,7 +573,8 @@ struct bnx2x_mcast_obj {
* @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
*/
int (*config_mcast)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Fills the ramrod data during the RESTORE flow.
@@ -590,11 +591,13 @@ struct bnx2x_mcast_obj {
int start_bin, int *rdata_idx);
int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
void (*set_one_rule)(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
- union bnx2x_mcast_config_data *cfg_data, int cmd);
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd);
/** Checks if there are more mcast MACs to be set or a previous
* command is still pending.
@@ -617,7 +620,8 @@ struct bnx2x_mcast_obj {
* feasible.
*/
int (*validate)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Restore the values of internal counters in case of a failure.
@@ -776,6 +780,12 @@ enum bnx2x_q_state {
BNX2X_Q_STATE_MAX,
};
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+ BNX2X_Q_LOGICAL_STATE_ACTIVE,
+ BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
/* Allowed commands */
enum bnx2x_queue_cmd {
BNX2X_Q_CMD_INIT,
@@ -1261,6 +1271,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
int bnx2x_queue_state_change(struct bnx2x *bp,
struct bnx2x_queue_state_params *params);
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj);
+
/********************* VLAN-MAC ****************/
void bnx2x_init_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
@@ -1338,7 +1351,8 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* completions.
*/
int bnx2x_config_mcast(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/****************** CREDIT POOL ****************/
void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 000000000000..6adfa2093581
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3198 @@
+/* bnx2x_sriov.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ int idx;
+
+ for_each_vf(bp, idx)
+ if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+ break;
+ return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+ return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u8 igu_sb_id, u8 segment, u16 index, u8 op,
+ u8 update)
+{
+ /* acking a VF sb through the PF - use the GRC */
+ u32 ctl;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 func_encode = vf->abs_vfid;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr_data);
+ REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+ mmiowb();
+ barrier();
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+}
+/* VFOP - VF slow-path operation support */
+
+#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+
+/* VFOP operations states */
+enum bnx2x_vfop_qctor_state {
+ BNX2X_VFOP_QCTOR_INIT,
+ BNX2X_VFOP_QCTOR_SETUP,
+ BNX2X_VFOP_QCTOR_INT_EN
+};
+
+enum bnx2x_vfop_qdtor_state {
+ BNX2X_VFOP_QDTOR_HALT,
+ BNX2X_VFOP_QDTOR_TERMINATE,
+ BNX2X_VFOP_QDTOR_CFCDEL,
+ BNX2X_VFOP_QDTOR_DONE
+};
+
+enum bnx2x_vfop_vlan_mac_state {
+ BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ BNX2X_VFOP_VLAN_MAC_CLEAR,
+ BNX2X_VFOP_VLAN_MAC_CHK_DONE,
+ BNX2X_VFOP_MAC_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST_0
+};
+
+enum bnx2x_vfop_qsetup_state {
+ BNX2X_VFOP_QSETUP_CTOR,
+ BNX2X_VFOP_QSETUP_VLAN0,
+ BNX2X_VFOP_QSETUP_DONE
+};
+
+enum bnx2x_vfop_mcast_state {
+ BNX2X_VFOP_MCAST_DEL,
+ BNX2X_VFOP_MCAST_ADD,
+ BNX2X_VFOP_MCAST_CHK_DONE
+};
+enum bnx2x_vfop_qflr_state {
+ BNX2X_VFOP_QFLR_CLR_VLAN,
+ BNX2X_VFOP_QFLR_CLR_MAC,
+ BNX2X_VFOP_QFLR_TERMINATE,
+ BNX2X_VFOP_QFLR_DONE
+};
+
+enum bnx2x_vfop_flr_state {
+ BNX2X_VFOP_FLR_QUEUES,
+ BNX2X_VFOP_FLR_HW
+};
+
+enum bnx2x_vfop_close_state {
+ BNX2X_VFOP_CLOSE_QUEUES,
+ BNX2X_VFOP_CLOSE_HW
+};
+
+enum bnx2x_vfop_rxmode_state {
+ BNX2X_VFOP_RXMODE_CONFIG,
+ BNX2X_VFOP_RXMODE_DONE
+};
+
+enum bnx2x_vfop_qteardown_state {
+ BNX2X_VFOP_QTEARDOWN_RXMODE,
+ BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
+ BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_QDTOR,
+ BNX2X_VFOP_QTEARDOWN_DONE
+};
+
+#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
+
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->tx.sb_cq_index,
+ init_params->tx.hc_rate,
+ setup_params->flags,
+ setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+ "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->rx.sb_cq_index,
+ init_params->rx.hc_rate,
+ setup_params->gen_params.mtu,
+ rxq_params->buf_sz,
+ rxq_params->sge_buf_sz,
+ rxq_params->max_sges_pkt,
+ rxq_params->tpa_agg_sz,
+ setup_params->flags,
+ rxq_params->drop_flags,
+ rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type)
+{
+ struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+ struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+ /* INIT */
+
+ /* Enable host coalescing in the transition to INIT state */
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+ /* FW SB ID */
+ init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+ /* context */
+ init_p->cxts[0] = q->cxt;
+
+ /* SETUP */
+
+ /* Setup-op general parameters */
+ setup_p->gen_params.spcl_id = vf->sp_cl_id;
+ setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+
+ /* Setup-op pause params:
+ * Nothing to do, the pause thresholds are set by default to 0 which
+ * effectively turns off the feature for this queue. We don't want
+ * one queue (VF) to interfering with another queue (another VF)
+ */
+ if (vf->cfg_flags & VF_CFG_FW_FC)
+ BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
+ vf->abs_vfid);
+ /* Setup-op flags:
+ * collect statistics, zero statistics, local-switching, security,
+ * OV for Flex10, RSS and MCAST for leading
+ */
+ if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+ /* for VFs, enable tx switching, bd coherency, and mac address
+ * anti-spoofing
+ */
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+ if (vfq_is_leading(q)) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
+ /* Setup-op rx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+ struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+ rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+ rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+ rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+ }
+
+ /* Setup-op tx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+ setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+ setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ }
+}
+
+/* VFOP queue construction */
+static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qctor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QCTOR_INIT:
+
+ /* has this queue already been opened? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qctor but queue was already up. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_SETUP;
+
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_SETUP:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
+
+ /* copy pre-prepared setup params to the queue-state params */
+ vfop->op_p->qctor.qstate.params.setup =
+ vfop->op_p->qctor.prep_qsetup;
+
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_INT_EN:
+
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qctor.qid = qid;
+ vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
+ bnx2x_vfop_qctor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue destruction */
+static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qdtor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QDTOR_HALT:
+
+ /* has this queue already been stopped? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qdtor but queue was already stopped. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
+
+ q_params->cmd = BNX2X_Q_CMD_HALT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_TERMINATE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
+
+ q_params->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_CFCDEL:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QDTOR_DONE:
+ /* invalidate the context */
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_queue_state_params *qstate =
+ &vf->op_params.qctor.qstate;
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qdtor.qid = qid;
+ vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
+ bnx2x_vfop_qdtor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
+ cmd->block);
+ }
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ return -ENOMEM;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ if (vf) {
+ if (!vf_sb_count(vf))
+ vf->igu_base_id = igu_sb_id;
+ ++vf_sb_count(vf);
+ }
+}
+
+/* VFOP MAC/VLAN helpers */
+static inline void bnx2x_vfop_credit(struct bnx2x *bp,
+ struct bnx2x_vfop *vfop,
+ struct bnx2x_vlan_mac_obj *obj)
+{
+ struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
+
+ /* update credit only if there is no error
+ * and a valid credit counter
+ */
+ if (!vfop->rc && args->credit) {
+ int cnt = 0;
+ struct list_head *pos;
+
+ list_for_each(pos, &obj->head)
+ cnt++;
+
+ atomic_set(args->credit, cnt);
+ }
+}
+
+static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
+ struct bnx2x_vfop_filter *pos,
+ struct bnx2x_vlan_mac_data *user_req)
+{
+ user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ switch (pos->type) {
+ case BNX2X_VFOP_FILTER_MAC:
+ memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
+ break;
+ case BNX2X_VFOP_FILTER_VLAN:
+ user_req->u.vlan.vlan = pos->vid;
+ break;
+ default:
+ BNX2X_ERR("Invalid filter type, skipping\n");
+ return 1;
+ }
+ return 0;
+}
+
+static int
+bnx2x_vfop_config_vlan0(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
+ bool add)
+{
+ int rc;
+
+ vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+ vlan_mac->user_req.u.vlan.vlan = 0;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc == -EEXIST)
+ rc = 0;
+ return rc;
+}
+
+static int bnx2x_vfop_config_list(struct bnx2x *bp,
+ struct bnx2x_vfop_filters *filters,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+{
+ struct bnx2x_vfop_filter *pos, *tmp;
+ struct list_head rollback_list, *filters_list = &filters->head;
+ struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
+ int rc = 0, cnt = 0;
+
+ INIT_LIST_HEAD(&rollback_list);
+
+ list_for_each_entry_safe(pos, tmp, filters_list, link) {
+ if (bnx2x_vfop_set_user_req(bp, pos, user_req))
+ continue;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc >= 0) {
+ cnt += pos->add ? 1 : -1;
+ list_del(&pos->link);
+ list_add(&pos->link, &rollback_list);
+ rc = 0;
+ } else if (rc == -EEXIST) {
+ rc = 0;
+ } else {
+ BNX2X_ERR("Failed to add a new vlan_mac command\n");
+ break;
+ }
+ }
+
+ /* rollback if error or too many rules added */
+ if (rc || cnt > filters->add_cnt) {
+ BNX2X_ERR("error or too many rules added. Performing rollback\n");
+ list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
+ pos->add = !pos->add; /* reverse op */
+ bnx2x_vfop_set_user_req(bp, pos, user_req);
+ bnx2x_config_vlan_mac(bp, vlan_mac);
+ list_del(&pos->link);
+ }
+ cnt = 0;
+ if (!rc)
+ rc = -EINVAL;
+ }
+ filters->add_cnt = cnt;
+ return rc;
+}
+
+/* VFOP set VLAN/MAC */
+static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
+ struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
+ struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
+
+ enum bnx2x_vfop_vlan_mac_state state = vfop->state;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ bnx2x_vfop_reset_wq(vf);
+
+ switch (state) {
+ case BNX2X_VFOP_VLAN_MAC_CLEAR:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do delete */
+ vfop->rc = obj->delete_all(bp, obj,
+ &vlan_mac->user_req.vlan_mac_flags,
+ &vlan_mac->ramrod_flags);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do config */
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (vfop->rc == -EEXIST)
+ vfop->rc = 0;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
+ vfop->rc = !!obj->raw.check_pending(&obj->raw);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MAC_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do list config */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (vfop->rc)
+ goto op_err;
+
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
+
+ /* remove vlan0 - could be no-op */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
+ if (vfop->rc)
+ goto op_err;
+
+ /* Do vlan list config. if this operation fails we try to
+ * restore vlan0 to keep the queue is working order
+ */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (!vfop->rc) {
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ if (list_empty(&obj->head))
+ /* add vlan0 */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
+op_done:
+ kfree(filters);
+ bnx2x_vfop_credit(bp, vfop, obj);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+struct bnx2x_vfop_vlan_mac_flags {
+ bool drv_only;
+ bool dont_consume;
+ bool single_cmd;
+ bool add;
+};
+
+static void
+bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* ramrod flags */
+ if (flags->drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
+ if (flags->single_cmd)
+ set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+
+ /* mac_vlan flags */
+ if (flags->dont_consume)
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
+
+ /* cmd */
+ ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
+}
+
+static inline void
+bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
+ set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+}
+
+static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single */
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false /* don't care */,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = macs,
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care since only the items in the
+ * filters list affect the sp operation,
+ * not the list itself
+ */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = false,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = add,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+ ramrod->user_req.u.vlan.vlan = vid;
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = vlans,
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
+ atomic_read(filters.credit);
+
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue setup (queue constructor + set vlan 0) */
+static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qctor.qid;
+ enum bnx2x_vfop_qsetup_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_qsetup,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QSETUP_CTOR:
+ /* init the queue ctor command */
+ vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
+ vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QSETUP_VLAN0:
+ /* skip if non-leading or FPGA/EMU*/
+ if (qid)
+ goto op_done;
+
+ /* init the queue set-vlan command (for vlan 0) */
+ vfop->state = BNX2X_VFOP_QSETUP_DONE;
+ vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QSETUP_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qctor.qid = qid;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
+ bnx2x_vfop_qsetup, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
+static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qflr_state state = vfop->state;
+ struct bnx2x_queue_state_params *qstate;
+ struct bnx2x_vfop_cmd cmd;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qflr;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QFLR_CLR_VLAN:
+ /* vlan-clear-all: driver-only, don't consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_CLR_MAC:
+ /* mac-clear-all: driver only consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
+ vf->abs_vfid, vfop->rc);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_TERMINATE:
+ qstate = &vfop->op_p->qctor.qstate;
+ memset(qstate , 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ vfop->state = BNX2X_VFOP_QFLR_DONE;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
+ vf->abs_vfid, qstate->q_obj->state);
+
+ if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
+ qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, qstate);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
+ } else {
+ goto op_done;
+ }
+
+op_err:
+ BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QFLR_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
+ bnx2x_vfop_qflr, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP multi-casts */
+static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
+ struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
+ struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
+ enum bnx2x_vfop_mcast_state state = vfop->state;
+ int i;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_MCAST_DEL:
+ /* clear existing mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_MCAST_ADD:
+ if (raw->check_pending(raw))
+ goto op_pending;
+
+ if (args->mc_num) {
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MCAST_CHK_DONE:
+ vfop->rc = raw->check_pending(raw) ? 1 : 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
+op_done:
+ kfree(args->mc);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = NULL;
+ size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
+ struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
+ NULL;
+
+ if (!mc_sz || mc) {
+ vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ int i;
+ struct bnx2x_mcast_ramrod_params *ramrod =
+ &vf->op_params.mcast;
+
+ /* set ramrod params */
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY,
+ &ramrod->ramrod_flags);
+
+ /* copy mcasts pointers */
+ vfop->args.mc_list.mc_num = mcast_num;
+ vfop->args.mc_list.mc = mc;
+ for (i = 0; i < mcast_num; i++)
+ mc[i].mac = mcasts[i];
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
+ bnx2x_vfop_mcast, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
+ cmd->block);
+ } else {
+ kfree(mc);
+ }
+ }
+ return -ENOMEM;
+}
+
+/* VFOP rx-mode */
+static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
+ enum bnx2x_vfop_rxmode_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RXMODE_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RXMODE_DONE;
+
+ vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RXMODE_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_rx_mode_ramrod_params *ramrod =
+ &vf->op_params.rx_mode;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* Prepare ramrod parameters */
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata =
+ bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping =
+ bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
+ bnx2x_vfop_rxmode, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
+ * queue destructor)
+ */
+static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qteardown_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qdown;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QTEARDOWN_RXMODE:
+ /* Drop all */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
+ /* vlan-clear-all: don't consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
+ /* mac-clear-all: consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_QDTOR:
+ /* run the queue destruction flow */
+ DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
+ vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
+ DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
+ vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
+ DP(BNX2X_MSG_IOV, "returned from cmd\n");
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+
+ case BNX2X_VFOP_QTEARDOWN_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
+ bnx2x_vfop_qdown, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
+ cmd->block);
+ }
+
+ return -ENOMEM;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. this routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+ u32 was_err_reg = 0;
+
+ switch (was_err_group) {
+ case 0:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+ break;
+ case 1:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+ break;
+ case 2:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+ break;
+ case 3:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+ break;
+ }
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+ u32 val;
+
+ /* Set VF masks and configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+ if (vf->cfg_flags & VF_CFG_INT_SIMD)
+ val |= IGU_VF_CONF_SINGLE_ISR_EN;
+ val &= ~IGU_VF_CONF_PARENT_MASK;
+ val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+ DP(BNX2X_MSG_IOV,
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
+ vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf_sb_count(vf); i++) {
+ u8 igu_sb_id = vf_igu_sb(vf, i);
+
+ /* zero prod memory */
+ REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+ /* clear sb state machine */
+ bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+ false /* VF */);
+
+ /* disable + update */
+ bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 1);
+ }
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* set the VF-PF association in the FW */
+ storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
+ storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+
+ /* clear vf errors*/
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+ /* internal vf-enable - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+ bnx2x_vf_enable_internal(bp, true);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ bnx2x_vf_igu_reset(bp, vf);
+
+ /* pretend to enable the vf with the PBF */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+ struct pci_dev *dev;
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf)
+ goto unknown_dev;
+
+ dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
+ if (dev)
+ return bnx2x_is_pcie_pending(dev);
+
+unknown_dev:
+ BNX2X_ERR("Unknown device\n");
+ return false;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* Wait 100ms */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ return 0;
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+{
+ u16 vlan_count = 0;
+
+ /* will be set only during VF-ACQUIRE */
+ resc->num_rxqs = 0;
+ resc->num_txqs = 0;
+
+ /* no credit calculcis for macs (just yet) */
+ resc->num_mac_filters = 1;
+
+ /* divvy up vlan rules */
+ vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
+ vlan_count = 1 << ilog2(vlan_count);
+ resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+
+ /* no real limitation */
+ resc->num_mc_filters = 0;
+
+ /* num_sbs already set */
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* reset the state variables */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ /* DQ usage counter */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+ "DQ VF usage counter timed out",
+ poll_cnt);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* FW cleanup command - poll for the results */
+ if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+ poll_cnt))
+ BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+ /* verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_flr_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_flr,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_FLR_QUEUES:
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
+ qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_FLR_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
+ 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ case BNX2X_VFOP_FLR_HW:
+
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
+
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
+
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ bnx2x_vfop_end(bp, vf, vfop);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+}
+
+static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t done)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
+ bnx2x_vfop_flr, done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+{
+ int i = prev_vf ? prev_vf->index + 1 : 0;
+ struct bnx2x_virtf *vf;
+
+ /* find next VF to cleanup */
+next_vf_to_clean:
+ for (;
+ i < BNX2X_NR_VIRTFN(bp) &&
+ (bnx2x_vf(bp, i, state) != VF_RESET ||
+ bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
+ i++)
+ ;
+
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
+ BNX2X_NR_VIRTFN(bp));
+
+ if (i < BNX2X_NR_VIRTFN(bp)) {
+ vf = BP_VF(bp, i);
+
+ /* lock the vf pf channel */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+ /* invoke the VF FLR SM */
+ if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
+ BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
+ vf->abs_vfid);
+
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ goto next_vf_to_clean;
+ }
+ return;
+ }
+
+ /* we are done, update vf records */
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+
+ if (vf->flr_clnup_stage != VF_FLR_ACK)
+ continue;
+
+ vf->flr_clnup_stage = VF_FLR_EPILOG;
+ }
+
+ /* Acknowledge the handled VFs.
+ * we are acknowledge all the vfs which an flr was requested for, even
+ * if amongst them there are such that we never opened, since the mcp
+ * will interrupt us immediately again if we only ack some of the bits,
+ * resulting in an endless loop. This can happen for example in KVM
+ * where an 'all ones' flr request is sometimes given by hyper visor
+ */
+ DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+ bp->vfdb->flrd_vfs[i]);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+ /* clear the acked bits - better yet if the MCP implemented
+ * write to clear semantics
+ */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+ int i;
+
+ /* Read FLR'd VFs */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+ DP(BNX2X_MSG_MCP,
+ "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+ u32 reset = 0;
+
+ if (vf->abs_vfid < 32)
+ reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+ else
+ reset = bp->vfdb->flrd_vfs[1] &
+ (1 << (vf->abs_vfid - 32));
+
+ if (reset) {
+ /* set as reset and ready for cleanup */
+ vf->state = VF_RESET;
+ vf->flr_clnup_stage = VF_FLR_CLN;
+
+ DP(BNX2X_MSG_IOV,
+ "Initiating Final cleanup for VF %d\n",
+ vf->abs_vfid);
+ }
+ }
+
+ /* do the FLR cleanup for all marked VFs*/
+ bnx2x_vf_flr_clnup(bp, NULL);
+}
+
+/* IOV global initialization routines */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* Set the DQ such that the CID reflect the abs_vfid */
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+ * the PF L2 queues
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+ /* The VF window size is the log2 of the max number of CIDs per VF */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
+ * the Pf doorbell size although the 2 are independent.
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
+ BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+
+ /* No security checks for now -
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
+ * CID range 0 - 0x1ffff
+ */
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+ /* set the number of VF alllowed doorbells to the full DQ range */
+ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
+
+ /* set the VF doorbell threshold */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
+ if (!IS_SRIOV(bp))
+ return;
+
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return dev->bus->number + ((dev->devfn + iov->offset +
+ iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i, n;
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+ size /= iov->total;
+ vf->bars[n].bar = start + size * vf->abs_vfid;
+ vf->bars[n].size = size;
+ }
+}
+
+static int bnx2x_ari_enabled(struct pci_dev *dev)
+{
+ return dev->bus->self && dev->bus->self->ari_enabled;
+}
+
+static void
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+ int sb_id;
+ u32 val;
+ u8 fid;
+
+ /* IGU in normal mode - read CAM */
+ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+ if (!(fid & IGU_FID_ENCODE_IS_PF))
+ bnx2x_vf_set_igu_info(bp, sb_id,
+ (fid & IGU_FID_VF_NUM_MASK));
+
+ DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+ ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+ ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+ (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+ GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+ }
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+ if (bp->vfdb) {
+ kfree(bp->vfdb->vfqs);
+ kfree(bp->vfdb->vfs);
+ kfree(bp->vfdb);
+ }
+ bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ int pos;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ BNX2X_ERR("failed to find SRIOV capability in device\n");
+ return -ENODEV;
+ }
+
+ iov->pos = pos;
+ DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ u32 val;
+
+ /* read the SRIOV capability structure
+ * The fields can be read via configuration read or
+ * directly from the device (starting at offset PCICFG_OFFSET)
+ */
+ if (bnx2x_sriov_pci_cfg_info(bp, iov))
+ return -ENODEV;
+
+ /* get the number of SRIOV bars */
+ iov->nres = 0;
+
+ /* read the first_vfid */
+ val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+ iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+ * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+ DP(BNX2X_MSG_IOV,
+ "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ BP_FUNC(bp),
+ iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+ iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ return 0;
+}
+
+static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
+{
+ int i;
+ u8 queue_count = 0;
+
+ if (IS_SRIOV(bp))
+ for_each_vf(bp, i)
+ queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+
+ return queue_count;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param)
+{
+ int err, i, qcount;
+ struct bnx2x_sriov *iov;
+ struct pci_dev *dev = bp->pdev;
+
+ bp->vfdb = NULL;
+
+ /* verify is pf */
+ if (IS_VF(bp))
+ return 0;
+
+ /* verify sriov capability is present in configuration space */
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ /* verify chip revision */
+ if (CHIP_IS_E1x(bp))
+ return 0;
+
+ /* check if SRIOV support is turned off */
+ if (!num_vfs_param)
+ return 0;
+
+ /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+ if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+ BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+ BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+ return 0;
+ }
+
+ /* SRIOV can be enabled only with MSIX */
+ if (int_mode_param == BNX2X_INT_MODE_MSI ||
+ int_mode_param == BNX2X_INT_MODE_INTX)
+ BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+
+ err = -EIO;
+ /* verify ari is enabled */
+ if (!bnx2x_ari_enabled(bp->pdev)) {
+ BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* verify igu is in normal mode */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* allocate the vfs database */
+ bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+ if (!bp->vfdb) {
+ BNX2X_ERR("failed to allocate vf database\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* get the sriov info - Linux already collected all the pertinent
+ * information, however the sriov structure is for the private use
+ * of the pci module. Also we want this information regardless
+ * of the hyper-visor.
+ */
+ iov = &(bp->vfdb->sriov);
+ err = bnx2x_sriov_info(bp, iov);
+ if (err)
+ goto failed;
+
+ /* SR-IOV capability was enabled but there are no VFs*/
+ if (iov->total == 0)
+ goto failed;
+
+ /* calculate the actual number of VFs */
+ iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+
+ /* allocate the vf array */
+ bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
+ BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+ if (!bp->vfdb->vfs) {
+ BNX2X_ERR("failed to allocate vf array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+ for_each_vf(bp, i) {
+ bnx2x_vf(bp, i, index) = i;
+ bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+ bnx2x_vf(bp, i, state) = VF_FREE;
+ INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
+ mutex_init(&bnx2x_vf(bp, i, op_mutex));
+ bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ }
+
+ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ /* get the total queue count and allocate the global queue arrays */
+ qcount = bnx2x_iov_get_max_queue_count(bp);
+
+ /* allocate the queue arrays for all VFs */
+ bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+ if (!bp->vfdb->vfqs) {
+ BNX2X_ERR("failed to allocate vf queue array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ return 0;
+failed:
+ DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+ __bnx2x_iov_free_vfdb(bp);
+ return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+ /* if SRIOV is not enabled there's nothing to do */
+ if (!IS_SRIOV(bp))
+ return;
+
+ DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* free vf database */
+ __bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* free vfs hw contexts */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = &bp->vfdb->context[i];
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+ }
+
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+ BP_VFDB(bp)->sp_dma.mapping,
+ BP_VFDB(bp)->sp_dma.size);
+
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+ BP_VF_MBX_DMA(bp)->mapping,
+ BP_VF_MBX_DMA(bp)->size);
+
+ BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+ BP_VF_BULLETIN_DMA(bp)->mapping,
+ BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+ size_t tot_size;
+ int i, rc = 0;
+
+ if (!IS_SRIOV(bp))
+ return rc;
+
+ /* allocate vfs hw contexts */
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+ if (cxt->size) {
+ BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ } else {
+ cxt->addr = NULL;
+ cxt->mapping = 0;
+ }
+ tot_size -= cxt->size;
+ }
+
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+ BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ BP_VFDB(bp)->sp_dma.size = tot_size;
+
+ /* allocate mailboxes */
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ BP_VF_MBX_DMA(bp)->size = tot_size;
+
+ /* allocate local bulletin boards */
+ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
+ &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+ return 0;
+
+alloc_mem_err:
+ return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+ unsigned long q_type = 0;
+
+ set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Queue State object */
+ bnx2x_init_queue_obj(bp, &q->sp_obj,
+ cl_id, &q->cid, 1, func_id,
+ bnx2x_vf_sp(bp, vf, q_data),
+ bnx2x_vf_sp_map(bp, vf, q_data),
+ q_type);
+
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d\n",
+ vf->abs_vfid, q->sp_obj.func_id);
+
+ /* mac/vlan objects are per queue, but only those
+ * that belong to the leading queue are initialized
+ */
+ if (vfq_is_leading(q)) {
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ }
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+ int vfid, qcount, i;
+
+ if (!IS_SRIOV(bp)) {
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+ return 0;
+ }
+
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+ /* initialize vf database */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+ /* init statically provisioned resources */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+
+ /* queues are initialized during VF-ACQUIRE */
+
+ /* reserve the vf vlan credit */
+ bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
+
+ vf->filter_state = 0;
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ /* init mcast object - This object will be re-initialized
+ * during VF-ACQUIRE with the proper cl_id and cid.
+ * It needs to be initialized here so that it can be safely
+ * handled by a subsequent FLR flow.
+ */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+ 0xFF, 0xFF, 0xFF,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* set the mailbox message addresses */
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+ MBX_MSG_ALIGNED_SIZE);
+
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+ vfid * MBX_MSG_ALIGNED_SIZE;
+
+ /* Enable vf mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ }
+
+ /* Final VF init */
+ qcount = 0;
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ /* fill in the BDF and bars */
+ vf->bus = bnx2x_vf_bus(bp, i);
+ vf->devfn = bnx2x_vf_devfn(bp, i);
+ bnx2x_vf_set_bars(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+ vf->abs_vfid, vf->bus, vf->devfn,
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+ }
+
+ return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return 0;
+
+ /* release all the VFs */
+ for_each_vf(bp, i)
+ bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+
+ return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+ int i;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ if (!IS_SRIOV(bp))
+ return line;
+
+ /* set vfs ilt lines */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+ ilt->lines[line+i].page = hw_cxt->addr;
+ ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+ ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+ }
+ return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+ return ((cid >= BNX2X_FIRST_VF_CID) &&
+ ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+ struct bnx2x_vf_queue *vfq,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* Always push next commands out, don't wait here */
+ set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+ &ramrod_flags);
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+ &ramrod_flags);
+ break;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc;
+
+ rparam.mcast_obj = &vf->mcast_obj;
+ vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ smp_mb__after_clear_bit();
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+ struct bnx2x_virtf *vf;
+ int qidx = 0, abs_vfid;
+ u8 opcode;
+ u16 cid = 0xffff;
+
+ if (!IS_SRIOV(bp))
+ return 1;
+
+ /* first get the cid - the only events we handle here are cfc-delete
+ * and set-mac completion
+ */
+ opcode = elem->message.opcode;
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ cid = (elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK);
+ DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ abs_vfid = elem->message.data.vf_flr_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ default:
+ return 1;
+ }
+
+ /* check if the cid is the VF range */
+ if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+ DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+ return 1;
+ }
+
+ /* extract vf and rxq index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+ abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+ vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf) {
+ BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+ cid, abs_vfid);
+ return 0;
+ }
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+ vf->abs_vfid, qidx);
+ vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+ &vfq_get(vf,
+ qidx)->sp_obj,
+ BNX2X_Q_CMD_CFC_DEL);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+ break;
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_mcast_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_filters_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ }
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, false);
+
+ return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+ /* extract the vf from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+ return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj)
+{
+ struct bnx2x_virtf *vf;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+ if (vf) {
+ /* extract queue index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+ *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+ } else {
+ BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+ }
+}
+
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
+{
+ struct bnx2x_virtf *vf;
+
+ /* check if the cid is the VF range */
+ if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+ if (vf) {
+ /* set in_progress flag */
+ atomic_set(&vf->op_in_progress, 1);
+ if (queue_work)
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ }
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index, num_queues_req;
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+ u8 stats_count = 0;
+ bool is_fcoe = false;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ if (!NO_FCOE(bp))
+ is_fcoe = true;
+
+ /* fcoe adds one global request and one queue request */
+ num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+ (is_fcoe ? 0 : 1);
+
+ DP(BNX2X_MSG_IOV,
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
+
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats) +
+ num_queues_req * sizeof(struct per_queue_stats);
+
+ cur_query_entry = &bp->fw_stats_req->
+ query[first_queue_query_index + num_queues_req];
+
+ for_each_vf(bp, i) {
+ int j;
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (vf->state != VF_ENABLED) {
+ DP(BNX2X_MSG_IOV,
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ for_each_vfq(vf, j) {
+ struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+ /* collect stats fro active queues only */
+ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED)
+ continue;
+
+ /* create stats query entry for this queue */
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->funcID =
+ cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(vf->fw_stat_map));
+ DP(BNX2X_MSG_IOV,
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo, cur_query_entry->funcID,
+ j, cur_query_entry->index);
+ cur_query_entry++;
+ cur_data_offset += sizeof(struct per_queue_stats);
+ stats_count++;
+ }
+ }
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+void bnx2x_iov_sp_task(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+ /* Iterate over all VFs and invoke state transition for VFs with
+ * 'in-progress' slow-path operations
+ */
+ DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (!list_empty(&vf->op_list_head) &&
+ atomic_read(&vf->op_in_progress)) {
+ DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
+ bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
+ }
+ }
+}
+
+static inline
+struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
+{
+ int i;
+ struct bnx2x_virtf *vf = NULL;
+
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+ if (stat_id >= vf->igu_base_id &&
+ stat_id < vf->igu_base_id + vf_sb_count(vf))
+ break;
+ }
+ return vf;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+ u8 enable)
+{
+ u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+ u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+ REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 val;
+
+ /* clear the VF configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+ IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+ BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *req_resc)
+{
+ u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ return ((req_resc->num_rxqs <= rxq_cnt) &&
+ (req_resc->num_txqs <= txq_cnt) &&
+ (req_resc->num_sbs <= vf_sb_count(vf)) &&
+ (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc)
+{
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+ int i;
+
+ /* if state is 'acquired' the VF was not released or FLR'd, in
+ * this case the returned resources match the acquired already
+ * acquired resources. Verify that the requested numbers do
+ * not exceed the already acquired numbers.
+ */
+ if (vf->state == VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+ vf->abs_vfid);
+
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+ vf->abs_vfid);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Otherwise vf state must be 'free' or 'reset' */
+ if (vf->state != VF_FREE && vf->state != VF_RESET) {
+ BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* static allocation:
+ * the global maximum number are fixed per VF. fail the request if
+ * requested number exceed these globals
+ */
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ DP(BNX2X_MSG_IOV,
+ "cannot fulfill vf resource request. Placing maximal available values in response\n");
+ /* set the max resource in the vf */
+ return -ENOMEM;
+ }
+
+ /* Set resources counters - 0 request means max available */
+ vf_sb_count(vf) = resc->num_sbs;
+ vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ if (resc->num_mac_filters)
+ vf_mac_rules_cnt(vf) = resc->num_mac_filters;
+ if (resc->num_vlan_filters)
+ vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+
+ DP(BNX2X_MSG_IOV,
+ "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+ vf_sb_count(vf), vf_rxq_count(vf),
+ vf_txq_count(vf), vf_mac_rules_cnt(vf),
+ vf_vlan_rules_cnt(vf));
+
+ /* Initialize the queues */
+ if (!vf->vfqs) {
+ DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+ return -EINVAL;
+ }
+
+ for_each_vfq(vf, i) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+ if (!q) {
+ DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ return -EINVAL;
+ }
+
+ q->index = i;
+ q->cxt = &((base_cxt + i)->eth);
+ q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+ DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+ vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+ /* init SP objects */
+ bnx2x_vfq_init(bp, vf, q);
+ }
+ vf->state = VF_ACQUIRED;
+ return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ u16 flags = 0;
+ int i;
+
+ /* the sb resources are initialized at this point, do the
+ * FW/HW initializations
+ */
+ for_each_vf_sb(vf, i)
+ bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+ vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+ /* Sanity checks */
+ if (vf->state != VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+ /* FLR cleanup epilogue */
+ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+ return -EBUSY;
+
+ /* reset IGU VF statistics: MSIX */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+ /* vf init */
+ if (vf->cfg_flags & VF_CFG_STATS)
+ flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
+
+ if (vf->cfg_flags & VF_CFG_TPA)
+ flags |= FUNC_FLG_TPA;
+
+ if (is_vf_multi(vf))
+ flags |= FUNC_FLG_RSS;
+
+ /* function setup */
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+ func_init.fw_stat_map = vf->fw_stat_map;
+ func_init.spq_map = vf->spq_map;
+ func_init.spq_prod = 0;
+ bnx2x_func_init(bp, &func_init);
+
+ /* Enable the vf */
+ bnx2x_vf_enable_access(bp, vf->abs_vfid);
+ bnx2x_vf_enable_traffic(bp, vf);
+
+ /* queue protection table */
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+ vf->state = VF_ENABLED;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf->index);
+
+ return 0;
+}
+
+/* VFOP close (teardown the queues, delete mcasts and close HW) */
+static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_close_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_close,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_CLOSE_QUEUES:
+
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_CLOSE_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_CLOSE_HW:
+
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
+
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->state = VF_ACQUIRED;
+ DP(BNX2X_MSG_IOV, "set state to acquired\n");
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
+ bnx2x_vfop_close, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release can be called either: 1. the VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_release,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+ vf->state == VF_FREE ? "Free" :
+ vf->state == VF_ACQUIRED ? "Acquired" :
+ vf->state == VF_ENABLED ? "Enabled" :
+ vf->state == VF_RESET ? "Reset" :
+ "Unknown");
+
+ switch (vf->state) {
+ case VF_ENABLED:
+ vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+ goto op_done;
+
+ case VF_FREE:
+ case VF_RESET:
+ /* do nothing */
+ goto op_done;
+ default:
+ bnx2x_vfop_default(vf->state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(-1, /* use vf->state */
+ bnx2x_vfop_release, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = NULL,
+ .block = block,
+ };
+ int rc;
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+ rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (rc)
+ WARN(rc,
+ "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+ vf->abs_vfid, rc);
+}
+
+static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, u32 *sbdf)
+{
+ *sbdf = vf->devfn | (vf->bus << 8);
+}
+
+static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_bar_info *bar_info)
+{
+ int n;
+
+ bar_info->nr_bars = bp->vfdb->sriov.nres;
+ for (n = 0; n < bar_info->nr_bars; n++)
+ bar_info->bars[n] = vf->bars[n];
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv)
+{
+ /* lock the channel */
+ mutex_lock(&vf->op_mutex);
+
+ /* record the locking op */
+ vf->op_current = tlv;
+
+ /* log the lock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+ vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv)
+{
+ WARN(expected_tlv != vf->op_current,
+ "lock mismatch: expected %d found %d", expected_tlv,
+ vf->op_current);
+
+ /* lock the channel */
+ mutex_unlock(&vf->op_mutex);
+
+ /* log the unlock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+ vf->abs_vfid, vf->op_current);
+
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+}
+
+void bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0;
+
+ /* disbale sriov in case it is still enabled */
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* enable sriov */
+ DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
+ rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ if (rc)
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ else
+ DP(BNX2X_MSG_IOV, "sriov enabled\n");
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state, vfidx = queue;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* if SRIOV is disabled there is nothing to do (and somewhere, someone
+ * has erred).
+ */
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* update PF's copy of the VF's bulletin. will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ return -EINVAL;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ return -EINVAL;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &flags);
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. compute the crc over the
+ * entire bulletin board excluding the crc field itself
+ */
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int attempts;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin.version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* validate crc of new bulletin board */
+ if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ bulletin = bp->pf2vf_bulletin->content;
+ if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
+ &bulletin))
+ break;
+ BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
+ bulletin.crc,
+ bnx2x_crc_vf_bulletin(bp, &bulletin));
+ }
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ }
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
+ memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ /* update new mac to net device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ }
+
+ /* copy new bulletin board to bp */
+ bp->old_bulletin = bulletin;
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ /* allocate vf2pf mailbox for vf to pf channel */
+ BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* allocate pf 2 vf bulletin board */
+ BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(union pf_vf_bulletin));
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 000000000000..b4050173add9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,809 @@
+/* bnx2x_sriov.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES 16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
+
+struct bnx2x_sriov {
+ u32 first_vf_in_pf;
+
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+ u64 bar;
+ u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+ u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+ struct eth_context *cxt;
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+ atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* Queue Slow-path State object */
+ struct bnx2x_queue_sp_obj sp_obj;
+
+ u32 cid;
+ u16 index;
+ u16 sb_idx;
+};
+
+/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
+ * q-init, q-setup and SB index
+ */
+struct bnx2x_vfop_qctor_params {
+ struct bnx2x_queue_state_params qstate;
+ struct bnx2x_queue_setup_params prep_qsetup;
+};
+
+/* VFOP parameters (one copy per VF) */
+union bnx2x_vfop_params {
+ struct bnx2x_vlan_mac_ramrod_params vlan_mac;
+ struct bnx2x_rx_mode_ramrod_params rx_mode;
+ struct bnx2x_mcast_ramrod_params mcast;
+ struct bnx2x_config_rss_params rss;
+ struct bnx2x_vfop_qctor_params qctor;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+struct bnx2x_vfop_cmd {
+ vfop_handler_t done;
+ bool block;
+};
+
+/* VFOP queue filters command additional arguments */
+struct bnx2x_vfop_filter {
+ struct list_head link;
+ int type;
+#define BNX2X_VFOP_FILTER_MAC 1
+#define BNX2X_VFOP_FILTER_VLAN 2
+
+ bool add;
+ u8 *mac;
+ u16 vid;
+};
+
+struct bnx2x_vfop_filters {
+ int add_cnt;
+ struct list_head head;
+ struct bnx2x_vfop_filter filters[];
+};
+
+/* transient list allocated, built and saved until its
+ * passed to the SP-VERBs layer.
+ */
+struct bnx2x_vfop_args_mcast {
+ int mc_num;
+ struct bnx2x_mcast_list_elem *mc;
+};
+
+struct bnx2x_vfop_args_qctor {
+ int qid;
+ u16 sb_idx;
+};
+
+struct bnx2x_vfop_args_qdtor {
+ int qid;
+ struct eth_context *cxt;
+};
+
+struct bnx2x_vfop_args_defvlan {
+ int qid;
+ bool enable;
+ u16 vid;
+ u8 prio;
+};
+
+struct bnx2x_vfop_args_qx {
+ int qid;
+ bool en_add;
+};
+
+struct bnx2x_vfop_args_filters {
+ struct bnx2x_vfop_filters *multi_filter;
+ atomic_t *credit; /* non NULL means 'don't consume credit' */
+};
+
+union bnx2x_vfop_args {
+ struct bnx2x_vfop_args_mcast mc_list;
+ struct bnx2x_vfop_args_qctor qctor;
+ struct bnx2x_vfop_args_qdtor qdtor;
+ struct bnx2x_vfop_args_defvlan defvlan;
+ struct bnx2x_vfop_args_qx qx;
+ struct bnx2x_vfop_args_filters filters;
+};
+
+struct bnx2x_vfop {
+ struct list_head link;
+ int rc; /* return code */
+ int state; /* next state */
+ union bnx2x_vfop_args args; /* extra arguments */
+ union bnx2x_vfop_params *op_p; /* ramrod params */
+
+ /* state machine callbacks */
+ vfop_handler_t transition;
+ vfop_handler_t done;
+};
+
+/* vf context */
+struct bnx2x_virtf {
+ u16 cfg_flags;
+#define VF_CFG_STATS 0x0001
+#define VF_CFG_FW_FC 0x0002
+#define VF_CFG_TPA 0x0004
+#define VF_CFG_INT_SIMD 0x0008
+#define VF_CACHE_LINE 0x0010
+
+ u8 state;
+#define VF_FREE 0 /* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED 1 /* VF aquired, but not initalized */
+#define VF_ENABLED 2 /* VF Enabled */
+#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+
+ /* non 0 during flr cleanup */
+ u8 flr_clnup_stage;
+#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
+ * sans the end-wait
+ */
+#define VF_FLR_ACK 2 /* ACK flr notification */
+#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
+ * ~ final cleanup' end wait
+ */
+
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t spq_map;
+ dma_addr_t bulletin_map;
+
+ /* Allocated resources counters. Before the VF is acquired, the
+ * counters hold the following values:
+ *
+ * - xxq_count = 0 as the queues memory is not allocated yet.
+ *
+ * - sb_count = The number of status blocks configured for this VF in
+ * the IGU CAM. Initially read during probe.
+ *
+ * - xx_rules_count = The number of rules statically and equally
+ * allocated for each VF, during PF load.
+ */
+ struct vf_pf_resc_request alloc_resc;
+#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+
+ u8 sb_count; /* actual number of SBs */
+ u8 igu_base_id; /* base igu status block id */
+
+ struct bnx2x_vf_queue *vfqs;
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+
+ u8 index; /* index in the vf array */
+ u8 abs_vfid;
+ u8 sp_cl_id;
+ u32 error; /* 0 means all's-well */
+
+ /* BDF */
+ unsigned int bus;
+ unsigned int devfn;
+
+ /* bars */
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+ /* set-mac ramrod state 1-pending, 0-done */
+ unsigned long filter_state;
+
+ /* leading rss client id ~~ the client id of the first rxq, must be
+ * set for each txq.
+ */
+ int leading_rss;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* slow-path operations */
+ atomic_t op_in_progress;
+ int op_rc;
+ bool op_wait_blocking;
+ struct list_head op_list_head;
+ union bnx2x_vfop_params op_params;
+ struct mutex op_mutex; /* one vfop at a time mutex */
+ enum channel_tlvs op_current;
+};
+
+#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+ for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE 8
+
+#define FW_VF_HANDLE(abs_vfid) \
+ (abs_vfid + FW_PF_MAX_HANDLE)
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ * The actual response will be placed according to the offset parameter
+ * provided in the request
+ */
+
+#define MBX_MSG_ALIGN 8
+#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+ MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+ struct bnx2x_vf_mbx_msg *msg;
+ dma_addr_t msg_mapping;
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+struct bnx2x_vf_sp {
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_data;
+};
+
+struct hw_dma {
+ void *addr;
+ dma_addr_t mapping;
+ size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp) ((bp)->vfdb)
+ /* vf array */
+ struct bnx2x_virtf *vfs;
+#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+
+ /* queue array - for all vfs */
+ struct bnx2x_vf_queue *vfqs;
+
+ /* vf HW contexts */
+ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)])
+
+ /* SR-IOV information */
+ struct bnx2x_sriov sriov;
+ struct hw_dma mbx_dma;
+#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
+ struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)]))
+
+ struct hw_dma bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
+#define BP_VF_BULLETIN(bp, vf) \
+ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ + (vf))
+
+ struct hw_dma sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+ u32 flrd_vfs[FLRD_VFS_DWORDS];
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+ return &(vf->vfqs[index]);
+}
+
+static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
+{
+ return (vfq->index == 0);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj);
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+void bnx2x_iov_sp_task(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ dma_addr_t *sb_map);
+
+/* VFOP generic helpers */
+#define bnx2x_vfop_default(state) do { \
+ BNX2X_ERR("Bad state %d\n", (state)); \
+ vfop->rc = -EINVAL; \
+ goto op_err; \
+ } while (0)
+
+enum {
+ VFOP_DONE,
+ VFOP_CONT,
+ VFOP_VERIFY_PEND,
+};
+
+#define bnx2x_vfop_finalize(vf, rc, next) do { \
+ if ((rc) < 0) \
+ goto op_err; \
+ else if ((rc) > 0) \
+ goto op_pending; \
+ else if ((next) == VFOP_DONE) \
+ goto op_done; \
+ else if ((next) == VFOP_VERIFY_PEND) \
+ BNX2X_ERR("expected pending\n"); \
+ else { \
+ DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
+ atomic_set(&vf->op_in_progress, 1); \
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
+ return; \
+ } \
+ } while (0)
+
+#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
+ do { \
+ vfop->state = first_state; \
+ vfop->op_p = &vf->op_params; \
+ vfop->transition = trans_hndlr; \
+ vfop->done = done_hndlr; \
+ } while (0)
+
+static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ WARN_ON(list_empty(&vf->op_list_head));
+ return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
+}
+
+static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
+
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ if (vfop) {
+ INIT_LIST_HEAD(&vfop->link);
+ list_add(&vfop->link, &vf->op_list_head);
+ }
+ return vfop;
+}
+
+static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vfop *vfop)
+{
+ /* rc < 0 - error, otherwise set to 0 */
+ DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
+ if (vfop->rc >= 0)
+ vfop->rc = 0;
+ DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
+
+ /* unlink the current op context and propagate error code
+ * must be done before invoking the 'done()' handler
+ */
+ WARN(!mutex_is_locked(&vf->op_mutex),
+ "about to access vf op linked list but mutex was not locked!");
+ list_del(&vfop->link);
+
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
+ vf->op_rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ } else {
+ struct bnx2x_vfop *cur_vfop;
+
+ DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
+ cur_vfop = bnx2x_vfop_cur(bp, vf);
+ cur_vfop->rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ }
+
+ /* invoke done handler */
+ if (vfop->done) {
+ DP(BNX2X_MSG_IOV, "calling done handler\n");
+ vfop->done(bp, vf);
+ } else {
+ /* there is no done handler for the operation to unlock
+ * the mutex. Must have gotten here from PF initiated VF RELEASE
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ }
+
+ DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+
+ /* if this is the last nested op reset the wait_blocking flag
+ * to release any blocking wrappers, only after 'done()' is invoked
+ */
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
+ vf->op_wait_blocking = false;
+ }
+
+ kfree(vfop);
+}
+
+static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+ might_sleep();
+ while (cnt--) {
+ if (vf->op_wait_blocking == false) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+ usleep_range(1000, 2000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static inline int bnx2x_vfop_transition(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t transition,
+ bool block)
+{
+ if (block)
+ vf->op_wait_blocking = true;
+ transition(bp, vf);
+ if (block)
+ return bnx2x_vfop_wait_blocking(bp, vf);
+ return 0;
+}
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type);
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add);
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only);
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length);
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length);
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+void bnx2x_enable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+}
+
+#else /* CONFIG_BNX2X_SRIOV */
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
+ bool queue_work) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
+static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+
+static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 89ec0667140a..4397f8b76f2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +19,7 @@
#include "bnx2x_stats.h"
#include "bnx2x_cmn.h"
-
+#include "bnx2x_sriov.h"
/* Statistics */
@@ -79,6 +79,42 @@ static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
* Init service functions
*/
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+ int i;
+
+ DP(BNX2X_MSG_STATS, "dumping stats:\n"
+ "fw_stats_req\n"
+ " hdr\n"
+ " cmd_num %d\n"
+ " reserved0 %d\n"
+ " drv_stats_counter %d\n"
+ " reserved1 %d\n"
+ " stats_counters_addrs %x %x\n",
+ bp->fw_stats_req->hdr.cmd_num,
+ bp->fw_stats_req->hdr.reserved0,
+ bp->fw_stats_req->hdr.drv_stats_counter,
+ bp->fw_stats_req->hdr.reserved1,
+ bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+ bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+ for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+ DP(BNX2X_MSG_STATS,
+ "query[%d]\n"
+ " kind %d\n"
+ " index %d\n"
+ " funcID %d\n"
+ " reserved %d\n"
+ " address %x %x\n",
+ i, bp->fw_stats_req->query[i].kind,
+ bp->fw_stats_req->query[i].index,
+ bp->fw_stats_req->query[i].funcID,
+ bp->fw_stats_req->query[i].reserved,
+ bp->fw_stats_req->query[i].address.hi,
+ bp->fw_stats_req->query[i].address.lo);
+ }
+}
+
/* Post the next statistics ramrod. Protect it with the spin in
* order to ensure the strict order between statistics ramrods
* (each ramrod has a sequence number passed in a
@@ -103,7 +139,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
-
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -174,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
break;
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 1;
}
@@ -482,6 +520,12 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
static void bnx2x_stats_start(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
+
if (bp->port.pmf)
bnx2x_port_stats_init(bp);
@@ -501,6 +545,11 @@ static void bnx2x_stats_pmf_start(struct bnx2x *bp)
static void bnx2x_stats_restart(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
bnx2x_stats_comp(bp);
bnx2x_stats_start(bp);
}
@@ -832,19 +881,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
return 0;
}
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
{
- struct tstorm_per_port_stats *tport =
- &bp->fw_stats_data->port.tstorm_port_statistics;
- struct tstorm_per_pf_stats *tfunc =
- &bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = &bp->func_stats;
- struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
- int i;
u16 cur_stats_counter;
-
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
@@ -880,6 +920,23 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &bp->func_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+ return -EAGAIN;
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
@@ -953,8 +1010,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -1033,15 +1090,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(estats->total_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->total_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
- ADD_64(estats->error_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->error_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
@@ -1174,23 +1231,34 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (bnx2x_edebug_stats_stopped(bp))
return;
- if (*stats_comp != DMAE_COMP_VAL)
- return;
+ if (IS_PF(bp)) {
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
- if (bp->port.pmf)
- bnx2x_hw_stats_update(bp);
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
- if (bnx2x_storm_stats_update(bp)) {
- if (bp->stats_pending++ == 3) {
- BNX2X_ERR("storm stats were not updated for 3 times\n");
- bnx2x_panic();
+ if (bnx2x_storm_stats_update(bp)) {
+ if (bp->stats_pending++ == 3) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ }
+ return;
}
- return;
+ } else {
+ /* vf doesn't collect HW statistics, and doesn't get completions
+ * perform only update
+ */
+ bnx2x_storm_stats_update(bp);
}
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
+ /* vf is done */
+ if (IS_VF(bp))
+ return;
+
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b4d7b26c7fe7..364e37ecbc5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -421,16 +421,19 @@ struct bnx2x_fw_port_stats_old {
new->s); \
} while (0)
-#define UPDATE_EXTEND_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
- diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ diff = le##size##_to_cpu(tclient->s) - \
+ le##size##_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
-#define UPDATE_EXTEND_E_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
- UPDATE_EXTEND_TSTAT(s, t); \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 000000000000..36246129864c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,1651 @@
+/* bnx2x_vfpf.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* place a given tlv on the tlv buffer at a given offset */
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length)
+{
+ struct channel_tlv *tl =
+ (struct channel_tlv *)(tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
+{
+ DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+ type);
+
+ /* Clear mailbox */
+ memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* init type and length */
+ bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+ /* init first tlv header */
+ first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+ int i = 1;
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ while (tlv->type != CHANNEL_TLV_LIST_END) {
+ /* output tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+
+ /* advance to next tlv */
+ tlvs_list += tlv->length;
+
+ /* cast general tlv list pointer to channel tlv header*/
+ tlv = (struct channel_tlv *)tlvs_list;
+
+ i++;
+
+ /* break condition for this loop */
+ if (i > MAX_TLVS_IN_LIST) {
+ WARN(true, "corrupt tlvs");
+ return;
+ }
+ }
+
+ /* output last tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+ switch (rc) {
+ case 0:
+ return PFVF_STATUS_SUCCESS;
+ case -ENOMEM:
+ return PFVF_STATUS_NO_RESOURCE;
+ default:
+ return PFVF_STATUS_FAILURE;
+ }
+}
+
+int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 600, interval = 100; /* wait for 60 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble the
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ bp->acquire_resp.resc.num_txqs;
+ req->resc_request.num_rxqs =
+ bp->acquire_resp.resc.num_rxqs;
+ req->resc_request.num_sbs =
+ bp->acquire_resp.resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ bp->acquire_resp.resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ bp->acquire_resp.resc.num_vlan_filters;
+ req->resc_request.num_mc_filters =
+ bp->acquire_resp.resc.num_mc_filters;
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = 1;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ memcpy(bp->dev->dev_addr,
+ bp->acquire_resp.resc.current_mac_addr,
+ ETH_ALEN);
+
+ return 0;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc = 0, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ return rc;
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+ return 0;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+free_irq:
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (!fp->disable_tpa) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ return rc;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = 0;
+ req->n_mac_vlan_filters = 1;
+ req->filters[0].flags =
+ VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ return rc;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+ * addresses tops
+ */
+ if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ return -EINVAL;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ return rc;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ switch (mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ BNX2X_ERR("BAD rx mode (%d)\n", mode);
+ return -EINVAL;
+ }
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, 1);
+}
+
+static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_vf(bp, i)
+ storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
+}
+
+/* enable vf_pf mailbox (aka vf-pf-chanell) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+ bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+ /* enable the mailbox in the FW */
+ storm_memset_vf_mbx_ack(bp, abs_vfid);
+ storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+ /* enable the VF access to the mailbox */
+ bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+ dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+ u32 vf_addr_lo, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Chip revision does not support VFs\n");
+ return DMAE_NOT_RDY;
+ }
+
+ if (!bp->dmae_ready) {
+ BNX2X_ERR("DMAE is not ready, can not copy\n");
+ return DMAE_NOT_RDY;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+ if (from_vf) {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+ (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+ (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = vf_addr_lo;
+ dmae.src_addr_hi = vf_addr_hi;
+ dmae.dst_addr_lo = U64_LO(pf_addr);
+ dmae.dst_addr_hi = U64_HI(pf_addr);
+ } else {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+ (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+ (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = U64_LO(pf_addr);
+ dmae.src_addr_hi = U64_HI(pf_addr);
+ dmae.dst_addr_lo = vf_addr_lo;
+ dmae.dst_addr_hi = vf_addr_hi;
+ }
+ dmae.len = len32;
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
+
+ /* issue the command and wait for completion */
+ return bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ u64 vf_addr;
+ dma_addr_t pf_addr;
+ u16 length, type;
+ int rc;
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+
+ /* prepare response */
+ type = mbx->first_tlv.tl.type;
+ length = type == CHANNEL_TLV_ACQUIRE ?
+ sizeof(struct pfvf_acquire_resp_tlv) :
+ sizeof(struct pfvf_general_resp_tlv);
+ bnx2x_add_tlv(bp, resp, 0, type, length);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+ bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ bnx2x_dp_tlv_list(bp, resp);
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* send response */
+ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+ mbx->first_tlv.resp_msg_offset;
+ pf_addr = mbx->msg_mapping +
+ offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+ /* copy the response body, if there is one, before the header, as the vf
+ * is sensitive to the header being written
+ */
+ if (resp->hdr.tl.length > sizeof(u64)) {
+ length = resp->hdr.tl.length - sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ length/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
+ }
+
+ /* ack the FW */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ mmiowb();
+
+ /* initiate dmae to send the response */
+ mbx->flags &= ~VF_MSG_INPROCESS;
+
+ /* copy the response header including status-done field,
+ * must be last dmae, must be after FW is acked
+ */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ sizeof(u64)/4);
+
+ /* unlock channel mutex */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ if (rc) {
+ BNX2X_ERR("Failed to copy response status to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ return;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+ int i;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ u8 status = bnx2x_pfvf_status_codes(vfop_status);
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* fill in pfdev info */
+ resp->pfdev_info.chip_num = bp->common.chip_id;
+ resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+ resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+ /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+ sizeof(resp->pfdev_info.fw_ver));
+
+ if (status == PFVF_STATUS_NO_RESOURCE ||
+ status == PFVF_STATUS_SUCCESS) {
+ /* set resources numbers, if status equals NO_RESOURCE these
+ * are max possible numbers
+ */
+ resc->num_rxqs = vf_rxq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_txqs = vf_txq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_sbs = vf_sb_count(vf);
+ resc->num_mac_filters = vf_mac_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_mc_filters = 0;
+
+ if (status == PFVF_STATUS_SUCCESS) {
+ /* fill in the allocated resources */
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ for_each_vfq(vf, i)
+ resc->hw_qid[i] =
+ vfq_qzone_id(vf, vfq_get(vf, i));
+
+ for_each_vf_sb(vf, i) {
+ resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+ resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+ }
+
+ /* if a mac has been set for this vf, supply it */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ memcpy(resc->current_mac_addr, bulletin->mac,
+ ETH_ALEN);
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+ vf->abs_vfid,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.pf_cap,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters,
+ resc->num_mc_filters,
+ resp->pfdev_info.fw_ver);
+
+ DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+ for (i = 0; i < vf_rxq_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+ DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+ for (i = 0; i < vf_sb_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+ resc->hw_sbs[i].hw_sb_id,
+ resc->hw_sbs[i].sb_qid);
+ DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+ /* send the response */
+ vf->op_rc = vfop_status;
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+ struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+ /* log vfdef info */
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+ acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+ acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+ acquire->resc_request.num_vlan_filters,
+ acquire->resc_request.num_mc_filters);
+
+ /* acquire the resources */
+ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+ /* store address of vf's bulletin board */
+ vf->bulletin_map = acquire->bulletin_addr;
+
+ /* response */
+ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_init_tlv *init = &mbx->msg->req.init;
+
+ /* record ghost addresses from vf message */
+ vf->spq_map = init->spq_addr;
+ vf->fw_stat_map = init->stats_addr;
+ vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* response */
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+ unsigned long *sp_q_flags)
+{
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+ __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+ __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+ __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+ __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+ __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+ __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* verify vf_qid */
+ if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+ BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+ setup_q->vf_qid, vf_rxq_count(vf));
+ vf->op_rc = -EINVAL;
+ goto response;
+ }
+
+ /* tx queues must be setup alongside rx queues thus if the rx queue
+ * is not marked as valid there's nothing to do.
+ */
+ if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+ unsigned long q_type = 0;
+
+ struct bnx2x_queue_init_params *init_p;
+ struct bnx2x_queue_setup_params *setup_p;
+
+ /* reinit the VF operation context */
+ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
+ setup_p = &vf->op_params.qctor.prep_qsetup;
+ init_p = &vf->op_params.qctor.qstate.params.init;
+
+ /* activate immediately */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+ if (setup_q->param_valid & VFPF_TXQ_VALID) {
+ struct bnx2x_txq_setup_params *txq_params =
+ &setup_p->txq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* save sb resource index */
+ q->sb_idx = setup_q->txq.vf_sb;
+
+ /* tx init */
+ init_p->tx.hc_rate = setup_q->txq.hc_rate;
+ init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &init_p->tx.flags);
+
+ /* tx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &setup_p->flags);
+
+ /* tx setup - general, nothing */
+
+ /* tx setup - tx */
+ txq_params->dscr_map = setup_q->txq.txq_addr;
+ txq_params->sb_cq_index = setup_q->txq.sb_index;
+ txq_params->traffic_type = setup_q->txq.traffic_type;
+
+ bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+
+ if (setup_q->param_valid & VFPF_RXQ_VALID) {
+ struct bnx2x_rxq_setup_params *rxq_params =
+ &setup_p->rxq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Note: there is no support for different SBs
+ * for TX and RX
+ */
+ q->sb_idx = setup_q->rxq.vf_sb;
+
+ /* rx init */
+ init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+ init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &init_p->rx.flags);
+
+ /* rx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &setup_p->flags);
+
+ /* rx setup - general */
+ setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+ /* rx setup - rx */
+ rxq_params->drop_flags = setup_q->rxq.drop_flags;
+ rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+ rxq_params->sge_map = setup_q->rxq.sge_addr;
+ rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+ rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+ rxq_params->buf_sz = setup_q->rxq.buf_sz;
+ rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+ rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+ rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+ rxq_params->cache_line_log =
+ setup_q->rxq.cache_line_log;
+ rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+ bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+ /* complete the preparations */
+ bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+
+ vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
+ if (vf->op_rc)
+ goto response;
+ return;
+ }
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+enum bnx2x_vfop_filters_state {
+ BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
+ BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
+ BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
+ BNX2X_VFOP_MBX_Q_FILTERS_DONE
+};
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *tlv,
+ struct bnx2x_vfop_filters **pfl,
+ u32 type_flag)
+{
+ int i, j;
+ struct bnx2x_vfop_filters *fl = NULL;
+ size_t fsz;
+
+ fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
+ sizeof(struct bnx2x_vfop_filters);
+
+ fl = kzalloc(fsz, GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fl->head);
+
+ for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+ struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+ if ((msg_filter->flags & type_flag) != type_flag)
+ continue;
+ if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ fl->filters[j].mac = msg_filter->mac;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ } else {
+ fl->filters[j].vid = msg_filter->vlan_tag;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ }
+ fl->filters[j].add =
+ (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
+ true : false;
+ list_add_tail(&fl->filters[j++].link, &fl->head);
+ }
+ if (list_empty(&fl->head))
+ kfree(fl);
+ else
+ *pfl = fl;
+
+ return 0;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+ struct vfpf_q_mac_vlan_filter *filter)
+{
+ DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+ if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+ DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+ if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+ DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+ DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ int i;
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+ &filters->filters[i]);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+ DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+ for (i = 0; i < filters->n_multicast; i++)
+ DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+
+static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ struct vfpf_set_q_filters_tlv *msg =
+ &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_filters_state state = vfop->state;
+
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_mbx_qfilters,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ switch (state) {
+ case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set mac list */
+ rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build vlan list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+
+ /* covert VF-PF if mask to bnx2x accept flags */
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+
+ if (msg->rx_mask &
+ VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan
+ */
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
+ msg->vf_qid, accept);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
+ msg->n_multicast, false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+op_done:
+ case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+op_err:
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, vfop->rc);
+ goto op_done;
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ bnx2x_vfop_mbx_qfilters, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* if a mac was already set for this VF via the set vf mac ndo, we only
+ * accept mac configurations of that mac. Why accept them at all?
+ * because PF may have been unable to configure the mac at the time
+ * since queue was not set up.
+ */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ /* once a mac was set by ndo can only accept a single mac... */
+ if (filters->n_mac_vlan_filters > 1) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+
+ /* ...and only the mac set by the ndo */
+ if (filters->n_mac_vlan_filters == 1 &&
+ memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+ vf->abs_vfid);
+
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+
+ /* verify vf_qid */
+ if (filters->vf_qid > vf_rxq_count(vf))
+ goto response;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+ vf->abs_vfid,
+ filters->vf_qid);
+
+ /* print q_filter message */
+ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+ vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ goto response;
+ return;
+
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int qid = mbx->msg->req.q_op.vf_qid;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+ vf->abs_vfid, qid);
+
+ vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int i;
+
+ /* check if tlv type is known */
+ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ bnx2x_vf_mbx_acquire(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_INIT:
+ bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SETUP_Q:
+ bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SET_Q_FILTERS:
+ bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_TEARDOWN_Q:
+ bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+ break;
+ }
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+ for (i = 0; i < 20; i++)
+ DP_CONT(BNX2X_MSG_IOV, "%x ",
+ mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+
+ /* test whether we can respond to the VF (do we have an address
+ * for it?)
+ */
+ if (vf->state == VF_ACQUIRED) {
+ /* mbx_resp uses the op_rc of the VF */
+ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just unlock the channel and be done with.
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf,
+ mbx->first_tlv.tl.type);
+ }
+ }
+}
+
+/* handle new vf-pf message */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+{
+ struct bnx2x_virtf *vf;
+ struct bnx2x_vf_mbx *mbx;
+ u8 vf_idx;
+ int rc;
+
+ DP(BNX2X_MSG_IOV,
+ "vf pf event received: vfid %d, address_hi %x, address lo %x",
+ vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+ /* Sanity checks consider removing later */
+
+ /* check if the vf_id is valid */
+ if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+ BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+ vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+ goto mbx_done;
+ }
+ vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+ mbx = BP_VF_MBX(bp, vf_idx);
+
+ /* verify an event is not currently being processed -
+ * debug failsafe only
+ */
+ if (mbx->flags & VF_MSG_INPROCESS) {
+ BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
+ vfpf_event->vf_id);
+ goto mbx_done;
+ }
+ vf = BP_VF(bp, vf_idx);
+
+ /* save the VF message address */
+ mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
+ mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
+ mbx->vf_addr_hi, mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
+ goto mbx_error;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ goto mbx_done;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+mbx_done:
+ return;
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+ dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+ vf * BULLETIN_CONTENT_SIZE;
+ dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+ int rc;
+
+ /* can only update vf after init took place */
+ if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+ bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+ return 0;
+
+ /* increment bulletin board version and compute crc */
+ bulletin->version++;
+ bulletin->length = BULLETIN_CONTENT_SIZE;
+ bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+
+ /* propagate bulletin board via dmae to vm memory */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+ bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+ U64_LO(vf_addr), bulletin->length / 4);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 000000000000..bfc80baec00d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,360 @@
+/* bnx2x_vfpf.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+};
+
+struct hw_sb_info {
+ u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE 1024
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_QUEUE_FLG_TPA 0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
+#define VFPF_QUEUE_FLG_STATS 0x0010
+#define VFPF_QUEUE_FLG_OV 0x0020
+#define VFPF_QUEUE_FLG_VLAN 0x0040
+#define VFPF_QUEUE_FLG_COS 0x0080
+#define VFPF_QUEUE_FLG_HC 0x0100
+#define VFPF_QUEUE_FLG_DHC 0x0200
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
+#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+ /* the following fields are for debug purposes */
+ u8 vf_id; /* ME register value */
+ u8 vf_os; /* e.g. Linux, W2K8 */
+ u8 padding[2];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 vf_qid;
+ u8 padding[3];
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 pf_cap;
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+ char fw_ver[32];
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 padding;
+ } pfdev_info;
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 permanent_mac_addr[ETH_ALEN];
+ u8 current_mac_addr[ETH_ALEN];
+ u8 padding[2];
+ } resc;
+};
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+ aligned_u64 spq_addr;
+ aligned_u64 stats_addr;
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_rxq_params {
+ /* physical addresses */
+ aligned_u64 rcq_addr;
+ aligned_u64 rcq_np_addr;
+ aligned_u64 rxq_addr;
+ aligned_u64 sge_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ /* rx buffer info */
+ u16 mtu;
+ u16 buf_sz;
+ u16 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+
+ /* valid iff VFPF_QUEUE_FLG_TPA */
+ u16 sge_buf_sz;
+ u16 tpa_agg_sz;
+ u8 max_sge_pkt;
+
+ u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
+ * all the flags are turned off
+ */
+
+ u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
+ u8 padding;
+ } rxq;
+
+ struct vf_pf_txq_params {
+ /* physical addresses */
+ aligned_u64 txq_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+ u8 traffic_type; /* see in setup_context() */
+ u8 padding;
+ } txq;
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 param_valid;
+#define VFPF_RXQ_VALID 0x01
+#define VFPF_TXQ_VALID 0x02
+ u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 n_mac_vlan_filters;
+ u8 n_multicast;
+ u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS 16
+#define PFVF_MAX_VLAN_FILTERS 16
+#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
+ PFVF_MAX_VLAN_FILTERS)
+ struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF 32
+ u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+ u32 rx_mask; /* see mask constants at the top of the file */
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id; /* for debug */
+ u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id;
+ u8 padding[2];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_close_tlv close;
+ struct vfpf_q_op_tlv q_op;
+ struct vfpf_setup_q_tlv setup_q;
+ struct vfpf_set_q_filters_tlv set_q_filters;
+ struct vfpf_release_tlv release;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+ u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u16 version;
+ u16 length;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+union pf_vf_bulletin {
+ struct pf_vf_bulletin_content content;
+ struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+ CHANNEL_TLV_NONE,
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_INIT,
+ CHANNEL_TLV_SETUP_Q,
+ CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_TEARDOWN_Q,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_PF_RELEASE_VF,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_FLR,
+ CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index df8c30d1a52c..149a3a038491 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4816,6 +4816,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
+
return 0;
}
@@ -5136,6 +5138,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return ret;
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
return 0;
}
@@ -5387,6 +5390,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
cp->stop_cm(dev);
+ cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5421,11 +5425,9 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
- cdev = kzalloc(alloc_size , GFP_KERNEL);
- if (cdev == NULL) {
- netdev_err(dev, "allocate dev struct failure\n");
+ cdev = kzalloc(alloc_size, GFP_KERNEL);
+ if (cdev == NULL)
return NULL;
- }
cdev->netdev = dev;
cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 2a35436f9095..0c9367a0f57d 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -179,6 +179,7 @@ struct cnic_eth_dev {
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
+#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 3a1c8a3cf7c9..e9b35da375cb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2385,7 +2385,7 @@ static int sbmac_mii_probe(struct net_device *dev)
return -ENXIO;
}
- phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll,
PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 78ea90c40e19..fdb9b5655414 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2012 Broadcom Corporation.
+ * Copyright (C) 2005-2013 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -44,6 +44,7 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 128
+#define TG3_MIN_NUM 130
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 03, 2012"
+#define DRV_MODULE_RELDATE "February 14, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
@@ -330,6 +332,10 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -570,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
tp->write32_mbox(tp, off, val);
- if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+ (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+ !tg3_flag(tp, ICH_WORKAROUND)))
tp->read32_mbox(tp, off);
}
@@ -580,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
writel(val, mbox);
if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
- if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tg3_flag(tp, FLUSH_POSTED_WRITES))
readl(mbox);
}
@@ -609,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
return;
@@ -634,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
*val = 0;
return;
@@ -662,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp)
int i;
u32 regbase, bit;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
regbase = TG3_APE_LOCK_GRANT;
else
regbase = TG3_APE_PER_LOCK_GRANT;
@@ -698,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -717,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return -EINVAL;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5761) {
req = TG3_APE_LOCK_REQ;
gnt = TG3_APE_LOCK_GRANT;
} else {
@@ -755,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -774,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
gnt = TG3_APE_LOCK_GRANT;
else
gnt = TG3_APE_PER_LOCK_GRANT;
@@ -1088,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
#define PHY_BUSY_LOOPS 5000
-static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 *val)
{
u32 frame_val;
unsigned int loops;
@@ -1104,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
*val = 0x0;
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1141,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
return ret;
}
-static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 val)
{
u32 frame_val;
unsigned int loops;
@@ -1159,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
tg3_ape_lock(tp, tp->phy_ape_lock);
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1194,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
{
int err;
@@ -1283,14 +1304,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
}
-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
- MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+ u32 val;
+ int err;
-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_TX_6DB);
+ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+ if (err)
+ return err;
+ if (enable)
+
+ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+ else
+ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+ return err;
+}
static int tg3_bmcr_reset(struct tg3 *tp)
{
@@ -1446,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp)
udelay(80);
if (tg3_flag(tp, MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
}
@@ -1461,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->phy_addr = tp->pci_fn + 1;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
@@ -1549,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tg3_flag_set(tp, MDIOBUS_INITED);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
@@ -1766,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* We don't use firmware. */
+ return 0;
+ }
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* Wait up to 20ms for init done. */
for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
@@ -1795,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp)
netdev_info(tp->dev, "No firmware running\n");
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
/* The 57765 A0 needs a little more
* time to do some important work.
*/
@@ -1925,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else if (phydev->speed == SPEED_1000 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ tg3_asic_rev(tp) != ASIC_REV_5785)
mac_mode |= MAC_MODE_PORT_MODE_GMII;
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -1952,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev)
udelay(40);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5785) {
if (phydev->speed == SPEED_10)
tw32(MAC_MI_STAT,
MAC_MI_STAT_10MBPS_MODE |
@@ -2001,8 +2039,8 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
/* Attach the MAC to the PHY. */
- phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
- phydev->dev_flags, phydev->interface);
+ phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
+ tg3_adjust_link, phydev->interface);
if (IS_ERR(phydev)) {
dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -2144,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
@@ -2223,7 +2261,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
otp = tp->phy_otp;
- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
return;
phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2248,7 +2286,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2284,9 +2322,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
if (!tp->setlpicnt) {
if (current_link_up == 1 &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2299,14 +2337,14 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
u32 val;
if (tp->link_config.active_speed == SPEED_1000 &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2450,7 +2488,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000,
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (err)
return err;
@@ -2471,7 +2509,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
@@ -2504,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp)
u32 val, cpmuctrl;
int err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
@@ -2519,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp)
tg3_link_report(tp);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
err = tg3_phy_reset_5703_4_5(tp);
if (err)
return err;
@@ -2529,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp)
}
cpmuctrl = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
cpmuctrl = tr32(TG3_CPMU_CTRL);
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
tw32(TG3_CPMU_CTRL,
@@ -2548,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp)
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2572,10 +2610,10 @@ static int tg3_phy_reset(struct tg3 *tp)
out:
if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x201f, 0x2aaa);
tg3_phydsp_write(tp, 0x000a, 0x0323);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2584,14 +2622,14 @@ out:
}
if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x000a, 0x310b);
tg3_phydsp_write(tp, 0x201f, 0x9506);
tg3_phydsp_write(tp, 0x401f, 0x14e2);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2600,7 +2638,7 @@ out:
} else
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
}
@@ -2627,11 +2665,14 @@ out:
val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
}
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
+ tg3_phydsp_write(tp, 0xffb, 0x4000);
+
tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
@@ -2657,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
{
u32 status, shift;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
else
status = tr32(TG3_CPMU_DRV_STATUS);
@@ -2667,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
status &= ~(TG3_GPIO_MSG_MASK << shift);
status |= (newstat << shift);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
else
tw32(TG3_CPMU_DRV_STATUS, status);
@@ -2681,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
return -EIO;
@@ -2706,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
u32 grc_local_ctrl;
if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
return;
grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
@@ -2730,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
(GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OE1 |
@@ -2763,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
u32 grc_local_ctrl = 0;
/* Workaround to prevent overdrawing Amps. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
grc_local_ctrl,
@@ -2835,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
@@ -2889,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
u32 val;
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2901,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
@@ -2940,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5780 &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
!tp->pci_fn))
return;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -3332,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
!tg3_flag(tp, 57765_PLUS))
tw32(NVRAM_ADDR, phy_addr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
!tg3_flag(tp, 5755_PLUS) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -3417,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -3435,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
udelay(10);
} else {
+ /*
+ * There is only an Rx CPU for the 5750 derivative in the
+ * BCM4785.
+ */
+ if (tg3_flag(tp, IS_SSB_CORE))
+ return 0;
+
for (i = 0; i < 10000; i++) {
tw32(offset + CPU_STATE, 0xffffffff);
tw32(offset + CPU_MODE, CPU_MODE_HALT);
@@ -3588,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
info.fw_len = tp->fw->size - 12;
info.fw_data = &fw_data[3];
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
} else {
@@ -3646,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
for (i = 0; i < 12; i++) {
tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -3766,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_setup_phy(tp, 0);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val;
val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3808,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
SPEED_100 : SPEED_10;
if (tg3_5700_link_polarity(tp, speed))
@@ -3842,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
}
if (!tg3_flag(tp, WOL_SPEED_100MB) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 base_val;
base_val = tp->pci_clock_ctrl;
@@ -3854,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
CLOCK_CTRL_PWRDOWN_PLL133, 40);
} else if (tg3_flag(tp, 5780_CLASS) ||
tg3_flag(tp, CPMU_PRESENT) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
/* do nothing */
} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
u32 newbits1, newbits2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_ALTCLK);
@@ -3882,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (!tg3_flag(tp, 5705_PLUS)) {
u32 newbits3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_44MHZ_CORE);
@@ -3902,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_frob_aux_power(tp, true);
/* Workaround for unstable PLL clock */
- if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
- (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
u32 val = tr32(0x7d00);
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
@@ -3994,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
err = tg3_writephy(tp, MII_CTRL1000, new_adv);
@@ -4009,7 +4057,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (!err) {
u32 err2;
@@ -4024,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (err)
val = 0;
- switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ switch (tg3_asic_rev(tp)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
case ASIC_REV_57766:
@@ -4037,12 +4085,13 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
/* Fall through */
case ASIC_REV_5720:
+ case ASIC_REV_5762:
if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
MII_TG3_DSP_CH34TP2_HIBW01);
}
- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
if (!err)
err = err2;
}
@@ -4171,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
return false;
if (tgtadv &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
@@ -4256,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
/* Some third-party PHYs need to be reset on link going
* down.
*/
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) &&
tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -4300,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
return err;
}
}
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
/* 5701 {A0,B0} CRC bug workaround */
tg3_writephy(tp, 0x15, 0x0a75);
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
@@ -4318,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
tg3_writephy(tp, MII_TG3_IMASK, ~0);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -4423,6 +4472,15 @@ relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
+ if (tg3_flag(tp, ROBOSWITCH)) {
+ current_link_up = 1;
+ /* FIXME: when BCM5325 switch is used use 100 MBit/s */
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+ }
+
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
@@ -4441,11 +4499,31 @@ relink:
else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ /* In order for the 5750 core in BCM4785 chip to work properly
+ * in RGMII mode, the Led Control Register must be set up.
+ */
+ if (tg3_flag(tp, RGMII_MODE)) {
+ u32 led_ctrl = tr32(MAC_LED_CTRL);
+ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+ if (tp->link_config.active_speed == SPEED_10)
+ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+ else if (tp->link_config.active_speed == SPEED_100)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_100MBPS_ON);
+ else if (tp->link_config.active_speed == SPEED_1000)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON);
+
+ tw32(MAC_LED_CTRL, led_ctrl);
+ udelay(40);
+ }
+
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
if (current_link_up == 1 &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -4457,7 +4535,7 @@ relink:
* ??? send/receive packets...
*/
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
- tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
@@ -4476,7 +4554,7 @@ relink:
}
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
current_link_up == 1 &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
@@ -4931,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
port_a = 1;
current_link_up = 0;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
workaround = 1;
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
port_a = 0;
@@ -5261,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5330,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
bmcr = new_bmcr;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5466,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
else
err = tg3_setup_copper_phy(tp, force_reset);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
u32 scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -5484,7 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
(6 << TX_LENGTHS_IPG_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -5773,10 +5851,8 @@ static void tg3_dump_state(struct tg3 *tp)
u32 *regs;
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
- if (!regs) {
- netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ if (!regs)
return;
- }
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Read up to but not including private PCI registers */
@@ -6950,6 +7026,9 @@ static void tg3_poll_controller(struct net_device *dev)
int i;
struct tg3 *tp = netdev_priv(dev);
+ if (tg3_irq_sync(tp))
+ return;
+
for (i = 0; i < tp->irq_cnt; i++)
tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
@@ -7107,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
dma_addr_t new_addr = 0;
int ret = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5701)
new_skb = skb_copy(skb, GFP_ATOMIC);
else {
int more_headroom = 4 - ((unsigned long)skb->data & 3);
@@ -7281,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -7437,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable)
if (tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_asic_rev(tp) == ASIC_REV_5700)
tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
@@ -7496,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
udelay(40);
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_asic_rev(tp) == ASIC_REV_5785) {
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
MII_TG3_FET_PTEST_FRC_TX_LINK |
MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -7520,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
if (masked_phy_id == TG3_PHY_ID_BCM5401)
@@ -8198,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
@@ -8270,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tg3_save_pci_state(tp);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 5755_PLUS))
tw32(GRC_FASTBOOT_PC, 0);
@@ -8305,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
@@ -8315,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Force PCIe 1.0a mode */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS) &&
tr32(TG3_PCIE_PHY_TSTCTL) ==
(TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
tw32(GRC_MISC_CFG, (1 << 29));
val |= (1 << 29);
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
tw32(GRC_VCPU_EXT_CTRL,
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -8370,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
u16 val16;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
int j;
u32 cfg_val;
@@ -8411,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
tw32(0x5000, 0x400);
}
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /*
+ * BCM4785: In order to avoid repercussions from using
+ * potentially defective internal ROM, stop the Rx RISC CPU,
+ * which is not required.
+ */
+ tg3_stop_fw(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ }
+
tw32(GRC_MODE, tp->grc_mode);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
val = tr32(0xc4);
tw32(0xc4, val | (1 << 15));
}
if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
@@ -8453,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_start(tp);
if (tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720) {
val = tr32(TG3_CPMU_CLCK_ORIDE);
tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
}
@@ -8672,7 +8761,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8688,7 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
@@ -8794,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787)
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
else
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
@@ -8979,9 +9070,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Enable MAC control of LPI */
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
- TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
- TG3_CPMU_EEE_LNKIDL_UART_IDL);
+ val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL;
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+ val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
+
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
tw32_f(TG3_CPMU_EEE_CTRL,
TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
@@ -8991,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_EEEMD_LPI_IN_RX |
TG3_CPMU_EEEMD_EEE_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (tg3_asic_rev(tp) != ASIC_REV_5717)
val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
if (tg3_flag(tp, ENABLE_APE))
@@ -9017,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
@@ -9038,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_HST_ACC, val);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
PCIE_PWR_MGMT_L1_THRESH_4MS;
@@ -9068,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_CLASS)) {
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of PL PCIE block registers. */
@@ -9083,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+ u32 grc_mode;
+
+ /* Fix transmit hangs */
+ val = tr32(TG3_CPMU_PADRNG_CTL);
+ val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+ tw32(TG3_CPMU_PADRNG_CTL, val);
+
+ grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of DL PCIE block registers. */
val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
@@ -9116,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE)) {
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_RETRY_SAME_DMA;
@@ -9134,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3PCI_PCISTATE, val);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
/* Enable some hw fixes. */
val = tr32(TG3PCI_MSI_DATA);
val |= (1 << 26) | (1 << 28) | (1 << 29);
@@ -9153,14 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
if (!tg3_flag(tp, 57765_CLASS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+ tg3_asic_rev(tp) != ASIC_REV_5761) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
@@ -9200,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Initialize MBUF/DESC pool. */
if (tg3_flag(tp, 5750_PLUS)) {
/* Do nothing. */
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
else
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
@@ -9240,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->bufmgr_config.dma_high_water);
val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
@@ -9257,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return -ENODEV;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
tg3_setup_rxbd_thresholds(tp);
@@ -9295,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -9308,7 +9410,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- tg3_flag(tp, 57765_CLASS))
+ tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -9350,7 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(6 << TX_LENGTHS_IPG_SHIFT) |
(32 << TX_LENGTHS_SLOT_TIME_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -9370,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9394,26 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ tp->dma_limit = 0;
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+ }
+ }
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
if (tg3_flag(tp, 57765_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS)) {
- val = tr32(TG3_RDMA_RSRVCTRL_REG);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_RDMA_RSRVCTRL_REG2;
+ else
+ tgtreg = TG3_RDMA_RSRVCTRL_REG;
+
+ val = tr32(tgtreg);
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9421,14 +9542,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
- tw32(TG3_RDMA_RSRVCTRL_REG,
- val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
+ else
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
+
+ val = tr32(tgtreg);
+ tw32(tgtreg, val |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
}
@@ -9505,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -9523,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
GRC_LCLCTRL_GPIO_OUTPUT3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5755)
gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
tp->grc_local_ctrl &= ~gpio_mask;
@@ -9562,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
WDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9578,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 5755_PLUS))
val |= WDMAC_MODE_STATUS_TAG_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
val |= WDMAC_MODE_BURST_ALL_DATA;
tw32_f(WDMAC_MODE, val);
@@ -9589,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
&pcix_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703) {
pcix_cmd &= ~PCI_X_CMD_MAX_READ;
pcix_cmd |= PCI_X_CMD_READ_2K;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
pcix_cmd |= PCI_X_CMD_READ_2K;
}
@@ -9603,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
@@ -9620,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
tw32(SNDDATAC_MODE,
SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
else
@@ -9643,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
err = tg3_load_5701_a0_firmware_fix(tp);
if (err)
return err;
@@ -9658,10 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->tx_mode = TX_MODE_ENABLE;
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
tp->tx_mode &= ~val;
tp->tx_mode |= tr32(MAC_TX_MODE) & val;
@@ -9712,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
- !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
/* Set drive transmission level to 1.2V */
/* only if the signal pre-emphasis bit is not set */
val = tr32(MAC_SERDES_CFG);
@@ -9721,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val |= 0x880;
tw32(MAC_SERDES_CFG, val);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
tw32(MAC_SERDES_CFG, 0x616000);
}
@@ -9734,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val = 2;
tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
/* Use hardware link auto-negotiation */
tg3_flag_set(tp, HW_AUTONEG);
}
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_asic_rev(tp) == ASIC_REV_5714) {
u32 tmp;
tmp = tr32(SERDES_RX_CTRL);
@@ -9995,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
} else {
u32 val = tr32(HOSTCC_FLOW_ATTN);
@@ -10045,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ /* BCM4785: Flush posted writes from GbE to host memory. */
+ tr32(HOSTCC_MODE);
+ }
+
if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
@@ -10166,7 +10300,7 @@ restart_timer:
static void tg3_timer_init(struct tg3 *tp)
{
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
!tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
@@ -10747,7 +10881,7 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10817,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp)
struct tg3_hw_stats *hw_stats = tp->hw_stats;
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 val;
if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
@@ -12342,11 +12476,12 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
else if (tg3_flag(tp, 5705_PLUS))
mem_tbl = mem_tbl_5705;
@@ -12458,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
mss |= (TG3_TSO_TCP_OPT_LEN << 9);
} else {
base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
@@ -12644,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
* errata. Also, the MAC loopback test is deprecated for
* all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT)) {
tg3_mac_loopback(tp, true);
@@ -12922,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ err = __tg3_readphy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &mii_regval);
spin_unlock_bh(&tp->lock);
data->val_out = mii_regval;
@@ -12938,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ err = __tg3_writephy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
spin_unlock_bh(&tp->lock);
return err;
@@ -13129,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
reset_phy = 1;
err = tg3_restart_hw(tp, reset_phy);
@@ -13242,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp)
tw32(NVRAM_CFG1, nvcfg1);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
tg3_flag(tp, 5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
@@ -13683,6 +13820,22 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ switch (nvmpinstrp) {
+ case FLASH_5762_EEPROM_HD:
+ nvmpinstrp = FLASH_5720_EEPROM_HD;
+ break;
+ case FLASH_5762_EEPROM_LD:
+ nvmpinstrp = FLASH_5720_EEPROM_LD;
+ break;
+ }
+ }
+
switch (nvmpinstrp) {
case FLASH_5720_EEPROM_HD:
case FLASH_5720_EEPROM_LD:
@@ -13728,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13774,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13786,11 +13941,30 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tg3_nvram_get_pagesize(tp, nvcfg1);
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 val;
+
+ if (tg3_nvram_read(tp, 0, &val))
+ return;
+
+ if (val != TG3_EEPROM_MAGIC &&
+ (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
+ tg3_flag_set(tp, NO_NVRAM);
+ }
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void tg3_nvram_init(struct tg3 *tp)
{
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -13803,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp)
tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
udelay(100);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701) {
tg3_flag_set(tp, NVRAM);
if (tg3_nvram_lock(tp)) {
@@ -13817,25 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp)
tp->nvram_size = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_get_5787_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5761)
tg3_get_5761_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_get_5720_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -13948,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, WOL_CAP);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
tg3_flag_clear(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, IS_NIC);
@@ -13975,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
ver >>= NIC_SRAM_DATA_VER_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_asic_rev(tp) != ASIC_REV_5703 &&
(ver > 0) && (ver < 0x100))
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
@@ -14029,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5701)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break;
case SHASTA_EXT_LED_SHARED:
tp->led_ctrl = LED_CTRL_MODE_SHARED;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
@@ -14051,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case SHASTA_EXT_LED_COMBO:
tp->led_ctrl = LED_CTRL_MODE_COMBO;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) &&
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
@@ -14107,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
if ((tg3_flag(tp, 57765_PLUS) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
if (tg3_flag(tp, PCI_EXPRESS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
u32 cfg3;
@@ -14137,6 +14310,39 @@ done:
device_set_wakeup_capable(&tp->pdev->dev, false);
}
+static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int i, err;
+ u32 val2, off = offset * 8;
+
+ err = tg3_nvram_lock(tp);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
+ APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
+ tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
+ udelay(10);
+
+ for (i = 0; i < 100; i++) {
+ val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
+ if (val2 & APE_OTP_STATUS_CMD_DONE) {
+ *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
+ break;
+ }
+ udelay(10);
+ }
+
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
+
+ tg3_nvram_unlock(tp);
+ if (val2 & APE_OTP_STATUS_CMD_DONE)
+ return 0;
+
+ return -EBUSY;
+}
+
static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
{
int i;
@@ -14283,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp)
* subsys device table.
*/
p = tg3_lookup_by_subsys(tp);
- if (!p)
+ if (p) {
+ tp->phy_id = p->phy_id;
+ } else if (!tg3_flag(tp, IS_SSB_CORE)) {
+ /* For now we saw the IDs 0xbc050cd0,
+ * 0xbc050f80 and 0xbc050c30 on devices
+ * connected to an BCM4785 and there are
+ * probably more. Just assume that the phy is
+ * supported when it is connected to a SSB core
+ * for now.
+ */
return -ENODEV;
+ }
- tp->phy_id = p->phy_id;
if (!tp->phy_id ||
tp->phy_id == TG3_PHY_ID_BCM8002)
tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
@@ -14294,12 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+ (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
tg3_phy_init_link_config(tp);
@@ -14409,7 +14625,7 @@ out_not_found:
return;
out_no_vpd:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
@@ -14417,7 +14633,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM5718");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
strcpy(tp->board_part_number, "BCM57780");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
@@ -14428,7 +14644,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57788");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
strcpy(tp->board_part_number, "BCM57761");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
@@ -14443,7 +14659,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
strcpy(tp->board_part_number, "BCM57762");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
@@ -14454,7 +14670,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57786");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
nomatch:
@@ -14676,6 +14892,8 @@ static void tg3_read_dash_ver(struct tg3 *tp)
if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
+ fwtype = "SMASH";
else
fwtype = "DASH";
@@ -14689,6 +14907,31 @@ static void tg3_read_dash_ver(struct tg3 *tp)
(apedata & APE_FW_VERSION_BLDMSK));
}
+static void tg3_read_otp_ver(struct tg3 *tp)
+{
+ u32 val, val2;
+
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ return;
+
+ if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
+ !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
+ TG3_OTP_MAGIC0_VALID(val)) {
+ u64 val64 = (u64) val << 32 | val2;
+ u32 ver = 0;
+ int i, vlen;
+
+ for (i = 0; i < 7; i++) {
+ if ((val64 & 0xff) == 0)
+ break;
+ ver = val64 & 0xff;
+ val64 >>= 8;
+ }
+ vlen = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
+ }
+}
+
static void tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
@@ -14699,6 +14942,7 @@ static void tg3_read_fw_ver(struct tg3 *tp)
if (tg3_flag(tp, NO_NVRAM)) {
strcat(tp->fw_ver, "sb");
+ tg3_read_otp_ver(tp);
return;
}
@@ -14773,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp)
static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
{
tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
u32 reg;
/* All devices that use the alternate
@@ -14785,7 +15029,10 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -14807,46 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
/* Wrong chip ID in 5752 A0. This code can be removed later
* as A0 is not in production.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tg3_flag_set(tp, 5717_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766)
tg3_flag_set(tp, 57765_CLASS);
- if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, 5755_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tg3_flag_set(tp, 5780_CLASS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5906 ||
tg3_flag(tp, 5755_PLUS) ||
tg3_flag(tp, 5780_CLASS))
tg3_flag_set(tp, 5750_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, 5705_PLUS);
}
@@ -14856,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp,
{
u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET))
return true;
if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
return true;
} else {
@@ -14923,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* enable this workaround if the 5703 is on the secondary
* bus of these ICH bridges.
*/
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
- (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -14964,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5701) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -15024,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} while (bridge);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
/* Determine TSO capabilities */
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
else if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, HW_TSO_2);
else if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, HW_TSO_1);
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
- tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
tg3_flag_clear(tp, TSO_BUG);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
tp->fw_needed = FIRMWARE_TG3TSO;
@@ -15068,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->fw_needed = NULL;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSI);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
- tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+ (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
tp->pdev_peer == tp->pdev))
tg3_flag_clear(tp, SUPPORT_MSI);
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_set(tp, 1SHOT_MSI);
}
@@ -15099,25 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rxq_max = TG3_RSS_MAX_NUM_QS;
tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tp->txq_max = tp->irq_max - 1;
}
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
@@ -15135,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_clear(tp, HW_TSO_2);
tg3_flag_clear(tp, TSO_CAPABLE);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
tg3_flag_set(tp, CLKREQ_BUG);
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
tg3_flag_set(tp, L1PLLPD_EN);
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
/* BCM5785 devices are effectively PCIe devices, and should
* follow PCIe codepaths, but do not have a PCIe capabilities
* section.
@@ -15181,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&tp->pci_cacheline_sz);
pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
&tp->pci_lat_timer);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
@@ -15191,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Important! -- It is critical that the PCI-X hw workaround
* situation is decided before the first MMIO register access.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
*/
@@ -15233,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCI_32BIT);
/* Chip-specific fixup from Broadcom driver */
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
(!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
@@ -15250,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Various workaround register access methods */
if (tg3_flag(tp, PCIX_TARGET_HWBUG))
tp->write32 = tg3_write_indirect_reg32;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
(tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
/*
* Back to back register writes can cause problems on these
* chips, the workaround is to read back all reg writes
@@ -15284,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pci_cmd &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->read32_mbox = tg3_read32_mbox_5906;
tp->write32_mbox = tg3_write32_mbox_5906;
tp->write32_tx_mbox = tg3_write32_mbox_5906;
@@ -15293,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->write32 == tg3_write_indirect_reg32 ||
(tg3_flag(tp, PCIX_MODE) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)))
tg3_flag_set(tp, SRAM_USE_CONFIG);
/* The memory arbiter has to be enabled in order for SRAM accesses
@@ -15306,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tg3_flag(tp, PCIX_MODE)) {
pci_read_config_dword(tp->pdev,
@@ -15314,21 +15562,23 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&val);
tp->pci_fn = val & 0x7;
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
- tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
- tp->pci_fn = tp->pci_fn ? 1 : 0;
- }
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
+ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
+ val = tr32(TG3_CPMU_STATUS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
+ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
+ else
tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
TG3_CPMU_STATUS_FSHFT_5719;
- }
+ }
+
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ tp->write32_tx_mbox = tg3_write_flush_reg32;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
}
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -15366,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
/* Unused GPIO3 must be driven as output on 5752 because there
* are no pull-up resistors on unused GPIO pins.
*/
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5752)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
@@ -15391,6 +15641,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
GRC_LCLCTRL_GPIO_OUTPUT0;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->grc_local_ctrl |=
+ tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
+
/* Switch out of Vaux if it is a NIC */
tg3_pwrsrc_switch_to_vmain(tp);
@@ -15401,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, JUMBO_RING_ENABLE);
/* Determine WakeOnLan speed to use. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
tg3_flag_clear(tp, WOL_SPEED_100MB);
} else {
tg3_flag_set(tp, WOL_SPEED_100MB);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (tg3_asic_rev(tp) == ASIC_REV_5906)
tp->phy_flags |= TG3_PHYFLG_IS_FET;
/* A few boards don't want Ethernet@WireSpeed phy feature */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5704_AX)
tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
if (tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_57780 &&
!tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
@@ -15446,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->phy_flags |= TG3_PHYFLG_BER_BUG;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
tp->phy_otp = tg3_read_otp_phycfg(tp);
if (tp->phy_otp == 0)
tp->phy_otp = TG3_OTP_DEFAULT;
@@ -15459,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->mi_mode = MAC_MI_MODE_BASE;
tp->coalesce_mode = 0;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+ tg3_chip_rev(tp) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
/* Set these bits to enable statistics workaround. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
tg3_flag_set(tp, USE_PHYLIB);
err = tg3_mdio_init(tp);
@@ -15481,7 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
GRC_MODE_WORD_SWAP_B2HRX_DATA |
GRC_MODE_B2HRX_ENABLE |
@@ -15501,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
!tg3_flag(tp, PCIX_TARGET_HWBUG)) {
- u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
-
- if (chiprevid == CHIPREV_ID_5701_A0 ||
- chiprevid == CHIPREV_ID_5701_B0 ||
- chiprevid == CHIPREV_ID_5701_B2 ||
- chiprevid == CHIPREV_ID_5701_B5) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
void __iomem *sram_base;
/* Write some dummy words into the SRAM status block
@@ -15529,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
tg3_flag_set(tp, IS_5788);
if (!tg3_flag(tp, IS_5788) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tg3_flag_set(tp, TAGGED_STATUS);
if (tg3_flag(tp, TAGGED_STATUS)) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
@@ -15568,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
else
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
@@ -15578,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* change bit implementation, so we must use the
* status register in those cases.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tg3_flag_set(tp, USE_LINKCHG_REG);
else
tg3_flag_clear(tp, USE_LINKCHG_REG);
@@ -15588,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* upon subsystem IDs.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_asic_rev(tp) == ASIC_REV_5701 &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
tg3_flag_set(tp, USE_LINKCHG_REG);
@@ -15602,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -15619,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
if (tg3_flag(tp, ASPM_WORKAROUND))
@@ -15643,7 +15896,6 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
addr = of_get_property(dp, "local-mac-address", &len);
if (addr && len == 6) {
memcpy(dev->dev_addr, addr, 6);
- memcpy(dev->perm_addr, dev->dev_addr, 6);
return 0;
}
return -ENODEV;
@@ -15654,7 +15906,6 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
struct net_device *dev = tp->dev;
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
- memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
return 0;
}
#endif
@@ -15664,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp)
struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
+ int err;
#ifdef CONFIG_SPARC
if (!tg3_get_macaddr_sparc(tp))
return 0;
#endif
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+ if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ return 0;
+ }
+
mac_offset = 0x7c;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -15684,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp)
mac_offset = 0xcc;
if (tp->pci_fn > 1)
mac_offset += 0x18c;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mac_offset = 0x10;
/* First try to get it from MAC address mailbox. */
@@ -15731,7 +15989,6 @@ static int tg3_get_device_address(struct tg3 *tp)
#endif
return -EINVAL;
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
}
@@ -15753,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
/* On 5703 and later chips, the boundary bits have no
* effect.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
!tg3_flag(tp, PCI_EXPRESS))
goto out;
@@ -15992,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp)
/* DMA read watermark not used on PCIE */
tp->dma_rwctrl |= 0x00180000;
} else if (!tg3_flag(tp, PCIX_MODE)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750)
tp->dma_rwctrl |= 0x003f0000;
else
tp->dma_rwctrl |= 0x003f000f;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
u32 read_water = 0x7;
@@ -16008,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp)
* better performance.
*/
if (tg3_flag(tp, 40BIT_DMA_BUG) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703)
read_water = 4;
/* Set bit 23 to enable PCIX hw bug fix */
tp->dma_rwctrl |=
(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
(1 << 23);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
/* 5780 always in PCIX mode */
tp->dma_rwctrl |= 0x00144000;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
/* 5714 always in PCIX mode */
tp->dma_rwctrl |= 0x00148000;
} else {
tp->dma_rwctrl |= 0x001b000f;
}
}
+ if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl &= 0xfffffff0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
/* Remove this if it causes problems for some boards. */
tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
@@ -16060,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp)
tg3_switch_clocks(tp);
#endif
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701)
goto out;
/* It is best to perform DMA test with maximum write burst size
@@ -16180,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp)
DEFAULT_MB_MACRX_LOW_WATER_5705;
tp->bufmgr_config.mbuf_high_water =
DEFAULT_MB_HIGH_WATER_5705;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->bufmgr_config.mbuf_mac_rx_low_water =
DEFAULT_MB_MACRX_LOW_WATER_5906;
tp->bufmgr_config.mbuf_high_water =
@@ -16238,6 +16497,7 @@ static char *tg3_phy_string(struct tg3 *tp)
case TG3_PHY_ID_BCM57765: return "57765";
case TG3_PHY_ID_BCM5719C: return "5719C";
case TG3_PHY_ID_BCM5720C: return "5720C";
+ case TG3_PHY_ID_BCM5762: return "5762C";
case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -16367,12 +16627,25 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pm_cap = pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
+ tp->irq_sync = 1;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
else
tp->msg_enable = TG3_DEF_MSG_ENABLE;
+ if (pdev_is_ssb_gige_core(pdev)) {
+ tg3_flag_set(tp, IS_SSB_CORE);
+ if (ssb_gige_must_flush_posted_writes(pdev))
+ tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+ if (ssb_gige_one_dma_at_once(pdev))
+ tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+ if (ssb_gige_have_roboswitch(pdev))
+ tg3_flag_set(tp, ROBOSWITCH);
+ if (ssb_gige_is_rgmii(pdev))
+ tg3_flag_set(tp, RGMII_MODE);
+ }
+
/* The word/byte swap controls here control register access byte
* swapping. DMA data byte swapping is controlled in the GRC_MODE
* setting below.
@@ -16413,7 +16686,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -16485,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev,
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
- if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (tg3_flag(tp, 5755_PLUS))
@@ -16505,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev,
if (features & NETIF_F_IPV6_CSUM)
features |= NETIF_F_TSO6;
if (tg3_flag(tp, HW_TSO_3) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
features |= NETIF_F_TSO_ECN;
}
@@ -16521,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev,
* MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
* loopback for the remaining devices.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT))
/* Add the loopback capability */
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
tg3_flag_set(tp, MAX_RXPEND_64);
@@ -16607,8 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
if (tg3_flag(tp, 5717_PLUS)) {
@@ -16618,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_timer_init(tp);
+ tg3_carrier_off(tp);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -16626,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev,
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
- tp->pci_chip_rev_id,
+ tg3_chip_rev_id(tp),
tg3_bus_string(tp, str),
dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d330e81f5793..8d7d4c2ab5d6 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2012 Broadcom Corporation.
+ * Copyright (C) 2007-2013 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -65,6 +65,9 @@
#define TG3PCI_DEVICE_TIGON3_57766 0x1686
#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
+#define TG3PCI_DEVICE_TIGON3_5762 0x1687
+#define TG3PCI_DEVICE_TIGON3_5725 0x1643
+#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -117,9 +120,7 @@
#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
#define MISC_HOST_CTRL_CHIPREV 0xffff0000
#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
-#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
- (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
- MISC_HOST_CTRL_CHIPREV_SHIFT)
+
#define CHIPREV_ID_5700_A0 0x7000
#define CHIPREV_ID_5700_A1 0x7001
#define CHIPREV_ID_5700_B0 0x7100
@@ -159,7 +160,8 @@
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
-#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+#define CHIPREV_ID_5762_A0 0x05762000
+
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
#define ASIC_REV_5703 0x01
@@ -182,7 +184,7 @@
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
#define ASIC_REV_57766 0x57766
-#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
+#define ASIC_REV_5762 0x5762
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
#define CHIPREV_5700_CX 0x72
@@ -195,7 +197,6 @@
#define CHIPREV_5784_AX 0x57840
#define CHIPREV_5761_AX 0x57610
#define CHIPREV_57765_AX 0x577650
-#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
#define METAL_REV_A0 0x00
#define METAL_REV_A1 0x01
#define METAL_REV_B0 0x00
@@ -774,7 +775,7 @@
#define SG_DIG_AUTONEG_ERROR 0x00000001
#define TG3_TX_TSTAMP_LSB 0x000005c0
#define TG3_TX_TSTAMP_MSB 0x000005c4
-#define TG3_TSTAMP_MASK 0x7fffffffffffffff
+#define TG3_TSTAMP_MASK 0x7fffffffffffffffLL
/* 0x5c8 --> 0x600 unused */
#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
@@ -1159,6 +1160,8 @@
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
#define TG3_CPMU_PHY_STRAP 0x00003664
#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
+#define TG3_CPMU_PADRNG_CTL 0x00003668
+#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
@@ -1178,6 +1181,7 @@
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
+#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002
/* 0x36c0 --> 0x36d0 unused */
#define TG3_CPMU_EEE_CTRL 0x000036d0
@@ -1400,7 +1404,10 @@
#define RDMAC_STATUS_FIFOURUN 0x00000080
#define RDMAC_STATUS_FIFOOREAD 0x00000100
#define RDMAC_STATUS_LNGREAD 0x00000200
-/* 0x4808 --> 0x4900 unused */
+/* 0x4808 --> 0x4890 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG2 0x00004890
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
@@ -1850,6 +1857,7 @@
#define FLASH_VENDOR_SST_SMALL 0x00000001
#define FLASH_VENDOR_SST_LARGE 0x02000001
#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003
+#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003
#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000
#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000
#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
@@ -1910,6 +1918,8 @@
#define FLASH_5717VENDOR_ST_45USPT 0x03400001
#define FLASH_5720_EEPROM_HD 0x00000001
#define FLASH_5720_EEPROM_LD 0x00000003
+#define FLASH_5762_EEPROM_HD 0x02000001
+#define FLASH_5762_EEPROM_LD 0x02000003
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2365,6 +2375,20 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
+#define TG3_APE_OTP_CTRL 0x00e8
+#define APE_OTP_CTRL_PROG_EN 0x200000
+#define APE_OTP_CTRL_CMD_RD 0x000000
+#define APE_OTP_CTRL_START 0x000001
+#define TG3_APE_OTP_STATUS 0x00ec
+#define APE_OTP_STATUS_CMD_DONE 0x000001
+#define TG3_APE_OTP_ADDR 0x00f0
+#define APE_OTP_ADDR_CPU_ENABLE 0x80000000
+#define TG3_APE_OTP_RD_DATA 0x00f8
+
+#define OTP_ADDRESS_MAGIC0 0x00000050
+#define TG3_OTP_MAGIC0_VALID(val) \
+ ((((val) & 0xf0000000) == 0xa0000000) ||\
+ (((val) & 0x0f000000) == 0x0a000000))
/* APE shared memory. Accessible through BAR1 */
#define TG3_APE_SHMEM_BASE 0x4000
@@ -3030,6 +3054,11 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_57765_CLASS,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_IS_SSB_CORE,
+ TG3_FLAG_FLUSH_POSTED_WRITES,
+ TG3_FLAG_ROBOSWITCH,
+ TG3_FLAG_ONE_DMA_AT_ONCE,
+ TG3_FLAG_RGMII_MODE,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -3206,6 +3235,7 @@ struct tg3 {
#define TG3_PHY_ID_BCM57765 0x5c0d8a40
#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
+#define TG3_PHY_ID_BCM5762 0x85803780
#define TG3_PHY_ID_BCM5906 0xdc00ac40
#define TG3_PHY_ID_BCM8002 0x60010140
#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3230,6 +3260,7 @@ struct tg3 {
(X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
(X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
(X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+ (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \
(X) == TG3_PHY_ID_BCM8002)
u32 phy_flags;
@@ -3320,10 +3351,22 @@ struct tg3 {
const struct firmware *fw;
u32 fw_len; /* includes BSS */
-#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon_dev;
-#endif
bool link_up;
};
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ * larger object code with gcc 4.7.
+ * Using statement expression macros to check tp with
+ * typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp) \
+ ((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 8)
+
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index ceb0de0cf62c..1194446f859a 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
+ depends on GENERIC_HARDIRQS
select NET_CORE
select MACB
---help---
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a9b0830fb39d..79039439bfdc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -287,7 +287,7 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status))
+ napi_reschedule(napi);
}
/* TODO: Handle errors */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b407043ce9b0..a170065b5973 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -548,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
return -1;
}
+ /* All frames should fit into a single buffer */
+ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ return -1;
+
/* Check if packet has checksum already */
if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
!(ext_status & RXDESC_IP_PAYLOAD_MASK))
@@ -1459,7 +1463,6 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index c8fdeaae56c0..20d2085f61c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -131,7 +131,7 @@ static void t1_set_rxmode(struct net_device *dev)
static void link_report(struct port_info *p)
{
if (!netif_carrier_ok(p->dev))
- printk(KERN_INFO "%s: link down\n", p->dev->name);
+ netdev_info(p->dev, "link down\n");
else {
const char *s = "10Mbps";
@@ -141,9 +141,9 @@ static void link_report(struct port_info *p)
case SPEED_100: s = "100Mbps"; break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
- p->dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(p->dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -976,19 +976,13 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
- DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
err = pci_enable_device(pdev);
if (err)
@@ -1124,8 +1118,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < bi->port_number; ++i) {
err = register_netdev(adapter->port[i].dev);
if (err)
- pr_warning("%s: cannot register net device %s, skipping\n",
- pci_name(pdev), adapter->port[i].dev->name);
+ pr_warn("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
else {
/*
* Change the name we use for messages to the name of
@@ -1143,10 +1137,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_adapter_res;
}
- printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
- bi->desc, adapter->params.chip_revision,
- adapter->params.pci.is_pcix ? "PCIX" : "PCI",
- adapter->params.pci.speed, adapter->params.pci.width);
+ pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
+ adapter->name, bi->desc, adapter->params.chip_revision,
+ adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+ adapter->params.pci.speed, adapter->params.pci.width);
/*
* Set the T1B ASIC and memory clocks.
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index d84872e88171..482976925154 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1822,8 +1822,8 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(skb->len < ETH_HLEN ||
skb->len > dev->mtu + eth_hdr_len(skb->data))) {
- pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
- skb->len, eth_hdr_len(skb->data), dev->mtu);
+ netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1831,7 +1831,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
- pr_debug("%s: unable to do udp checksum\n", dev->name);
+ netdev_dbg(dev, "unable to do udp checksum\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index f15ee326d5c1..2b5e62193cea 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -29,6 +29,9 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -153,7 +156,7 @@ struct workqueue_struct *cxgb3_wq;
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
else {
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
@@ -170,8 +173,9 @@ static void link_report(struct net_device *dev)
break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -318,10 +322,10 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
const struct port_info *pi = netdev_priv(dev);
if (pi->phy.modtype == phy_modtype_none)
- printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
+ netdev_info(dev, "PHY module unplugged\n");
else
- printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
- mod_str[pi->phy.modtype]);
+ netdev_info(dev, "%s PHY module inserted\n",
+ mod_str[pi->phy.modtype]);
}
static void cxgb_set_rxmode(struct net_device *dev)
@@ -1422,8 +1426,7 @@ static int cxgb_open(struct net_device *dev)
if (is_offload(adapter) && !ofld_disable) {
err = offload_open(dev);
if (err)
- printk(KERN_WARNING
- "Could not initialize offload capabilities\n");
+ pr_warn("Could not initialize offload capabilities\n");
}
netif_set_real_num_tx_queues(dev, pi->nqsets);
@@ -3132,14 +3135,13 @@ static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
if (!test_bit(i, &adap->registered_device_map))
continue;
- printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
- dev->name, ai->desc, pi->phy.desc,
- is_offload(adap) ? "R" : "", adap->params.rev, buf,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
+ ai->desc, pi->phy.desc,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
- printk(KERN_INFO
- "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+ pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
adap->name, t3_mc7_size(&adap->cm) >> 20,
t3_mc7_size(&adap->pmtx) >> 20,
t3_mc7_size(&adap->pmrx) >> 20,
@@ -3177,24 +3179,18 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
- printk(KERN_ERR DRV_NAME
- ": cannot initialize work queue\n");
+ pr_err("cannot initialize work queue\n");
return -ENOMEM;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 942dace361d2..4232767862b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/list.h>
#include <linux/slab.h>
#include <net/neighbour.h>
@@ -62,9 +64,8 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
- const void *daddr);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh, const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -182,14 +183,17 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
struct net_device *dev = adapter->port[i];
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- rcu_read_lock();
dev = __vlan_find_dev_deep(dev, vlan);
- rcu_read_unlock();
} else if (netif_is_bond_slave(dev)) {
- while (dev->master)
- dev = dev->master;
+ struct net_device *upper_dev;
+
+ while ((upper_dev =
+ netdev_master_upper_dev_get_rcu(dev)))
+ dev = upper_dev;
}
+ rcu_read_unlock();
return dev;
}
}
@@ -232,8 +236,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
if ((val >> S_MAXRXDATA) != 0x3f60) {
val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
val |= V_MAXRXDATA(0x3f60);
- printk(KERN_INFO
- "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
+ pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
adapter->name, val);
t3_write_reg(adapter, A_TP_PARA_REG2, val);
}
@@ -253,8 +256,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
for (i = 0; i < 4; i++)
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
- printk(KERN_INFO
- "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
+ pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
adapter->name, val, uiip->pgsz_factor[0],
uiip->pgsz_factor[1], uiip->pgsz_factor[2],
uiip->pgsz_factor[3]);
@@ -706,8 +708,7 @@ static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -718,8 +719,7 @@ static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -730,8 +730,7 @@ static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_rte_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -751,7 +750,7 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
t3c_tid->
ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_OPEN_RPL);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -769,7 +768,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -787,7 +786,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -814,7 +813,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -908,7 +907,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_ESTABLISH);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -954,7 +953,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -970,10 +969,9 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->old_neigh,
- nr->new, nr->new_neigh,
+ cxgb_redirect(nr->old, nr->new, nr->neigh,
nr->daddr);
- cxgb_neigh_update(nr->new_neigh);
+ cxgb_neigh_update(nr->neigh);
break;
}
default:
@@ -991,8 +989,7 @@ static struct notifier_block nb = {
*/
static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
{
- printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
- *skb->data);
+ pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -1010,8 +1007,8 @@ void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
if (opcode < NUM_CPL_CMDS)
cpl_handlers[opcode] = h ? h : do_bad_cpl;
else
- printk(KERN_ERR "T3C: handler registration for "
- "opcode %x failed\n", opcode);
+ pr_err("T3C: handler registration for opcode %x failed\n",
+ opcode);
}
EXPORT_SYMBOL(t3_register_cpl_handler);
@@ -1030,9 +1027,8 @@ static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
if (ret & CPL_RET_UNKNOWN_TID) {
union opcode_tid *p = cplhdr(skb);
- printk(KERN_ERR "%s: CPL message (opcode %u) had "
- "unknown TID %u\n", dev->name, opcode,
- G_TID(ntohl(p->opcode_tid)));
+ pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
+ dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
}
#endif
if (ret & CPL_RET_BUF_DONE)
@@ -1096,7 +1092,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb) {
- printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
+ pr_err("%s: cannot allocate skb!\n", __func__);
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
@@ -1111,11 +1107,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh,
const void *daddr)
{
- struct net_device *olddev, *newdev;
+ struct net_device *dev;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1123,29 +1119,17 @@ static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = old_neigh->dev;
- newdev = new_neigh->dev;
+ dev = neigh->dev;
- if (!is_offloading(olddev))
- return;
- if (!is_offloading(newdev)) {
- printk(KERN_WARNING "%s: Redirect to non-offload "
- "device ignored.\n", __func__);
+ if (!is_offloading(dev))
return;
- }
- tdev = dev2t3cdev(olddev);
+ tdev = dev2t3cdev(dev);
BUG_ON(!tdev);
- if (tdev != dev2t3cdev(newdev)) {
- printk(KERN_WARNING "%s: Redirect to different "
- "offload device ignored.\n", __func__);
- return;
- }
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev, daddr);
+ e = t3_l2t_get(tdev, new, dev, daddr);
if (!e) {
- printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
- __func__);
+ pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index dd901c5061b9..9d67eb794c4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1278,7 +1278,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* update port statistics */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
@@ -2130,8 +2130,10 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
- if (cpl->vlan_valid)
+ if (cpl->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ }
napi_gro_frags(&qs->napi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 3dee68612c9e..c74a898fcd4f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3725,8 +3725,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(adapter->port[i]->dev_addr, hw_addr,
ETH_ALEN);
- memcpy(adapter->port[i]->perm_addr, hw_addr,
- ETH_ALEN);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 378988b5709a..6db997c78a5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -35,6 +35,8 @@
#ifndef __CXGB4_H__
#define __CXGB4_H__
+#include "t4_hw.h"
+
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -212,6 +214,8 @@ struct tp_err_stats {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned short tx_modq_map; /* TX modulation scheduler queue to */
+ /* channel map */
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
@@ -526,6 +530,7 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
struct l2t_data *l2t;
@@ -545,6 +550,129 @@ struct adapter {
spinlock_t stats_lock;
};
+/* Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/* Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ *
+ * Most of the following data structures are modeled on T4 capabilities.
+ * Drivers for earlier chips use the subsets which make sense for those chips.
+ * We really need to come up with a hardware-independent mechanism to
+ * represent hardware filter capabilities ...
+ */
+struct ch_filter_tuple {
+ /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /* Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+};
+
+/* A filter ioctl command.
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter.
+ */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /* Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+
+ /* Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t rpttid:1; /* report TID in RSS hash field */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
+ uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
+ /* 1 => TCB contains IQ ID */
+
+ /* Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some Ethernet header rewriting.
+ */
+ uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t newdmac:1; /* rewrite destination MAC address */
+ uint32_t newsmac:1; /* rewrite source MAC address */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
+ uint8_t smac[ETH_ALEN]; /* new source MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /* Filter rule value/mask pairs.
+ */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum {
+ VLAN_NOCHANGE = 0, /* default */
+ VLAN_REMOVE,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -701,6 +829,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+
+struct fw_filter_wr;
+
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
@@ -737,6 +871,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 130dd9d5b493..c6c05bfef0e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -175,6 +175,30 @@ enum {
MIN_FL_ENTRIES = 16
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -325,6 +349,9 @@ enum {
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+module_param(tp_vlan_pri_map, uint, 0644);
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -506,8 +533,67 @@ static int link_start(struct net_device *dev)
return ret;
}
-/*
- * Response queue handler for the FW event queue.
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/* Handle a filter write/deletion reply.
+ */
+static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int idx = GET_TID(rpl);
+ unsigned int nidx = idx - adap->tids.ftid_base;
+ unsigned int ret;
+ struct filter_entry *f;
+
+ if (idx >= adap->tids.ftid_base && nidx <
+ (adap->tids.nftids + adap->tids.nsftids)) {
+ idx = nidx;
+ ret = GET_TCB_COOKIE(rpl->cookie);
+ f = &adap->tids.ftid_tab[idx];
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ }
+ }
+}
+
+/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
@@ -542,6 +628,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (void *)rsp;
+
+ filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@@ -983,6 +1073,148 @@ static void t4_free_mem(void *addr)
kfree(addr);
}
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+static int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int ftid;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ if (f->l2t == NULL)
+ return -EAGAIN;
+ if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
+ f->fs.eport, f->fs.dmac)) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->tid_to_iq =
+ htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
+ V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
+ V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+ V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
+ V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
+ V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
+ V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+ V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
+ V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
+ V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
+ V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Delete the filter at a specified index.
+ */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int len, ftid;
+
+ len = sizeof(*fwr);
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -1762,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
-
- return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
- c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ struct sge_rspq *q;
+ int i;
+ int r = 0;
+
+ for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+ q = &adap->sge.ethrxq[i].rspq;
+ r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
+ if (r) {
+ dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+ break;
+ }
+ }
+ return r;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2195,7 +2438,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data)
if (t->afree) {
union aopen_entry *p = t->afree;
- atid = p - t->atid_tab;
+ atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->data = data;
t->atids_in_use++;
@@ -2210,7 +2453,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid);
*/
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
- union aopen_entry *p = &t->atid_tab[atid];
+ union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
@@ -2249,8 +2492,34 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
}
EXPORT_SYMBOL(cxgb4_alloc_stid);
-/*
- * Release a server TID.
+/* Allocate a server filter TID and set it to the supplied value.
+ */
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_next_zero_bit(t->stid_bmap,
+ t->nstids + t->nsftids, t->nstids);
+ if (stid < (t->nstids + t->nsftids))
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_sftid);
+
+/* Release a server TID.
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
@@ -2362,18 +2631,26 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
static int tid_init(struct tid_info *t)
{
size_t size;
+ unsigned int stid_bmap_size;
unsigned int natids = t->natids;
- size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
- BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->nsftids * sizeof(*t->stid_tab) +
+ stid_bmap_size * sizeof(long) +
+ t->nftids * sizeof(*t->ftid_tab) +
+ t->nsftids * sizeof(*t->ftid_tab);
+
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
- t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
+ t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
@@ -2388,7 +2665,7 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids);
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
return 0;
}
@@ -2404,7 +2681,8 @@ static int tid_init(struct tid_info *t)
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue)
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
@@ -2750,6 +3028,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
+ unsigned short i;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
@@ -2776,10 +3055,16 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
+ lli.filt_mode = adap->filter_mode;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
+ lli.sge_pktshift = adap->sge.pktshift;
+ lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2999,6 +3284,126 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
}
+/* Return an error number if the indicated filter isn't writable ...
+ */
+static int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+static int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue, unsigned char port, unsigned char mask)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+ int i;
+ u8 *val;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ /* Check to make sure the filter requested is writable ...
+ */
+ f = &adap->tids.ftid_tab[stid];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adap, f);
+
+ /* Clear out filter specifications */
+ memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+ f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.mask.lport = ~0;
+ val = (u8 *)&sip;
+ if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+ for (i = 0; i < 4; i++) {
+ f->fs.val.lip[i] = val[i];
+ f->fs.mask.lip[i] = ~0;
+ }
+ if (adap->filter_mode & F_PORT) {
+ f->fs.val.iport = port;
+ f->fs.mask.iport = mask;
+ }
+ }
+
+ f->fs.dirsteer = 1;
+ f->fs.iq = queue;
+ /* Mark filter as locked */
+ f->locked = 1;
+ f->fs.rpttid = 1;
+
+ ret = set_filter_wr(adap, stid);
+ if (ret) {
+ clear_filter(adap, f);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_create_server_filter);
+
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ f = &adap->tids.ftid_tab[stid];
+ /* Unlock the filter */
+ f->locked = 0;
+
+ ret = delete_filter(adap, stid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *ns)
{
@@ -3203,7 +3608,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- c->retval_len16 = htonl(FW_LEN16(*c));
+ c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3245,6 +3650,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ /* first 4 Tx modulation queues point to consecutive Tx channels */
+ adap->params.tp.tx_modq_map = 0xE4;
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+
+ /* associate each Tx modulation queue with consecutive Tx channels */
+ v = 0x84218421;
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_HDR);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_FIFO);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_PCMD);
+
+#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
+ if (is_offload(adap)) {
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ }
+
/* get basic stuff going */
return t4_early_init(adap, adap->fn);
}
@@ -3397,7 +3830,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -3422,7 +3855,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -3497,7 +3930,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -3594,8 +4027,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_GET(
- FW_PFVF_CMD_CMASK_MASK),
+ FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
@@ -3929,7 +4361,7 @@ static int adap_init0(struct adapter *adap)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -4035,6 +4467,10 @@ static int adap_init0(struct adapter *adap)
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->filter_mode, 1,
+ TP_VLAN_PRI_MAP);
+
adap->flags |= FW_OK;
return 0;
@@ -4661,6 +5097,17 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ for (i = 0; i < (adapter->tids.nftids +
+ adapter->tids.nsftids); i++, f++)
+ if (f->valid)
+ clear_filter(adapter, f);
+ }
+
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
@@ -4694,7 +5141,7 @@ static int __init cxgb4_init_module(void)
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
- pr_warning("could not create debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 39bec73ff87c..e2bbc7f3e2de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
#include <linux/atomic.h>
/* CPL message priority levels */
@@ -97,7 +98,9 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
+ unsigned int atid_base;
+ struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -129,7 +132,7 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
- return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+ return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
@@ -141,6 +144,7 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
@@ -148,8 +152,14 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue);
-
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue);
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue,
+ unsigned char port, unsigned char mask);
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -221,9 +231,16 @@ struct cxgb4_lld_info {
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned short filt_mode; /* filter optional components */
+ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
+ /* scheduler queue */
void __iomem *gts_reg; /* address of GTS register */
void __iomem *db_reg; /* address of kernel doorbell */
int dbfifo_int_thresh; /* doorbell fifo int threshold */
+ unsigned int sge_pktshift; /* Padding between CPL and */
+ /* packet data */
+ bool enable_fw_ofld_conn; /* Enable connection through fw */
+ /* WR */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6ac77a62f361..29878098101e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
+/* Allocate an L2T entry for use by a switching rule. Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ write_lock_bh(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_set(&e->refcnt, 1);
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&d->lock);
+ return e;
+}
+
+/* Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETH_ALEN);
+ return write_l2e(adap, e, 0);
+}
+
struct l2t_data *t4_init_l2t(void)
{
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c6410..108c0f1fce1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 3ecc087d732d..fe9a2ea3588b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -508,7 +508,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= 8) {
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
q->pend_cred &= 7;
}
@@ -2082,10 +2082,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PADEN(1));
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
c.fl0size = htons(flsz);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 45f2bea2e929..4ce62031f62f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
@@ -648,12 +648,12 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA);
return ret;
@@ -676,14 +676,14 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_DATA, val);
t4_write_reg(adapter, SF_OP, lock |
cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
}
/**
@@ -2252,14 +2252,14 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
}
#undef EPIO_REG
@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return 0;
}
+/* t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
@@ -2405,7 +2425,7 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_mbasyncnot = htonl(
+ c.err_to_clearinit = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
@@ -2426,7 +2446,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_mbasyncnot);
+ v = ntohl(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
if (state) {
if (v & FW_HELLO_CMD_ERR)
@@ -2774,7 +2794,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -2797,7 +2817,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
}
@@ -3583,7 +3603,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
- memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index eb71b8250b91..261d17703adc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -193,8 +193,24 @@ struct work_request_hdr {
__be64 wr_lo;
};
+/* wr_hi fields */
+#define S_WR_OP 24
+#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+
#define WR_HDR struct work_request_hdr wr
+/* option 0 fields */
+#define S_MSS_IDX 60
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define M_RSS_QUEUE 0x3FF
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
@@ -204,12 +220,14 @@ struct cpl_pass_open_req {
__be32 peer_ip;
__be64 opt0;
#define TX_CHAN(x) ((x) << 2)
+#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
+#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
#define WND_SCALE(x) ((u64)(x) << 50)
#define KEEP_ALIVE(x) ((u64)(x) << 54)
@@ -247,8 +265,10 @@ struct cpl_pass_accept_rpl {
#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
+#define PACE(x) ((x) << 16)
#define TX_QUEUE(x) ((x) << 23)
#define RX_CHANNEL(x) ((x) << 26)
+#define CCTRL_ECN(x) ((x) << 27)
#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
@@ -292,6 +312,9 @@ struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
+#define PASS_OPEN_TID(x) ((x) << 0)
+#define PASS_OPEN_TOS(x) ((x) << 24)
+#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
@@ -332,6 +355,7 @@ struct cpl_set_tcb_field {
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
+#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
@@ -536,6 +560,37 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+/* rx_pkt.l2info fields */
+#define S_RX_ETHHDR_LEN 0
+#define M_RX_ETHHDR_LEN 0x1F
+#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define S_RX_MACIDX 8
+#define M_RX_MACIDX 0x1FF
+#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define S_RXF_SYN 21
+#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define F_RXF_SYN V_RXF_SYN(1U)
+
+#define S_RX_CHAN 28
+#define M_RX_CHAN 0xF
+#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define S_RX_TCPHDR_LEN 0
+#define M_RX_TCPHDR_LEN 0x3F
+#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define S_RX_IPHDR_LEN 6
+#define M_RX_IPHDR_LEN 0x3FF
+#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
@@ -634,6 +689,17 @@ struct cpl_fw6_msg {
/* cpl_fw6_msg.type values */
enum {
FW6_TYPE_CMD_RPL = 0,
+ FW6_TYPE_WR_RPL = 1,
+ FW6_TYPE_CQE = 2,
+ FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+};
+
+struct cpl_fw6_msg_ofld_connection_wr_rpl {
+ __u64 cookie;
+ __be32 tid; /* or atid in case of active failure */
+ __u8 t_state;
+ __u8 retval;
+ __u8 rsvd[2];
};
enum {
@@ -658,6 +724,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
+#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1a8b57200f6..83ec5f7844ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -67,7 +67,7 @@
#define QID_MASK 0xffff8000U
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO 0x00004000U
+#define DBPRIO(x) ((x) << 14)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
@@ -193,6 +193,12 @@
#define SGE_FL_BUFFER_SIZE1 0x1048
#define SGE_FL_BUFFER_SIZE2 0x104c
#define SGE_FL_BUFFER_SIZE3 0x1050
+#define SGE_FL_BUFFER_SIZE4 0x1054
+#define SGE_FL_BUFFER_SIZE5 0x1058
+#define SGE_FL_BUFFER_SIZE6 0x105c
+#define SGE_FL_BUFFER_SIZE7 0x1060
+#define SGE_FL_BUFFER_SIZE8 0x1064
+
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
@@ -217,6 +223,17 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define SGE_DBFIFO_STATUS 0x10a4
+#define HP_INT_THRESH_SHIFT 28
+#define HP_INT_THRESH_MASK 0xfU
+#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
+#define LP_INT_THRESH_SHIFT 12
+#define LP_INT_THRESH_MASK 0xfU
+#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
+
+#define SGE_DOORBELL_CONTROL 0x10a8
+#define ENABLE_DROP (1 << 13)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -277,6 +294,10 @@
#define A_SGE_CTXT_CMD 0x11fc
#define A_SGE_DBQ_CTXT_BADDR 0x1084
+#define PCIE_PF_CFG 0x40
+#define AIVEC(x) ((x) << 4)
+#define AIVEC_MASK 0x3ffU
+
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
#define UNXSPLCPLERR 0x20000000U
@@ -322,6 +343,13 @@
#define PCIE_MEM_ACCESS_OFFSET 0x306c
#define PCIE_FW 0x30b8
+#define PCIE_FW_ERR 0x80000000U
+#define PCIE_FW_INIT 0x40000000U
+#define PCIE_FW_HALT 0x20000000U
+#define PCIE_FW_MASTER_VLD 0x00008000U
+#define PCIE_FW_MASTER(x) ((x) << 12)
+#define PCIE_FW_MASTER_MASK 0x7
+#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
@@ -432,6 +460,9 @@
#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+#define CIM_PF_HOST_INT_ENABLE 0x288
+#define MBMSGRDYINTEN(x) ((x) << 19)
+
#define CIM_PF_HOST_INT_CAUSE 0x28c
#define MBMSGRDYINT 0x00080000U
@@ -922,7 +953,7 @@
#define SF_DATA 0x193f8
#define SF_OP 0x193fc
-#define BUSY 0x80000000U
+#define SF_BUSY 0x80000000U
#define SF_LOCK 0x00000010U
#define SF_CONT 0x00000008U
#define BYTECNT_MASK 0x00000006U
@@ -981,6 +1012,7 @@
#define I2CM 0x00000002U
#define CIM 0x00000001U
+#define PL_INT_ENABLE 0x19410
#define PL_INT_MAP0 0x19414
#define PL_RST 0x19428
#define PIORST 0x00000002U
@@ -1032,4 +1064,41 @@
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
#define XGMAC_PORT_INT_CAUSE 0x10dc
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+
+#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+
+#define S_TX_MODQ_WEIGHT3 24
+#define M_TX_MODQ_WEIGHT3 0xffU
+#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+
+#define S_TX_MODQ_WEIGHT2 16
+#define M_TX_MODQ_WEIGHT2 0xffU
+#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+
+#define S_TX_MODQ_WEIGHT1 8
+#define M_TX_MODQ_WEIGHT1 0xffU
+#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+
+#define S_TX_MODQ_WEIGHT0 0
+#define M_TX_MODQ_WEIGHT0 0xffU
+#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+
+#define A_TP_TX_SCHED_HDR 0x23
+
+#define A_TP_TX_SCHED_FIFO 0x24
+
+#define A_TP_TX_SCHED_PCMD 0x25
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a6364632b490..a0dcccd846c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -35,6 +35,45 @@
#ifndef _T4FW_INTERFACE_H_
#define _T4FW_INTERFACE_H_
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed sucessfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+ FW_SCSI_ABORT_REQUESTED = 128, /* */
+ FW_SCSI_ABORT_TIMEDOUT = 129, /* */
+ FW_SCSI_ABORTED = 130, /* */
+ FW_SCSI_CLOSE_REQUESTED = 131, /* */
+ FW_ERR_LINK_DOWN = 132, /* */
+ FW_RDEV_NOT_READY = 133, /* */
+ FW_ERR_RDEV_LOST = 134, /* */
+ FW_ERR_RDEV_LOGO = 135, /* */
+ FW_FCOE_NO_XCHG = 136, /* */
+ FW_SCSI_RSP_ERR = 137, /* */
+ FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_DDP_ERR = 141, /* DDP error*/
+ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
+};
+
#define FW_T4VF_SGE_BASE_ADDR 0x0000
#define FW_T4VF_MPS_BASE_ADDR 0x0100
#define FW_T4VF_PL_BASE_ADDR 0x0200
@@ -46,6 +85,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
FW_CMD_WR = 0x10,
@@ -68,6 +108,7 @@ struct fw_wr_hdr {
};
#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_WR_ATOMIC(x) ((x) << 23)
#define FW_WR_FLUSH(x) ((x) << 22)
#define FW_WR_COMPL(x) ((x) << 21)
@@ -80,6 +121,282 @@ struct fw_wr_hdr {
#define FW_WR_LEN16(x) ((x) << 0)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define M_FW_FILTER_WR_TID 0xfffff
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+#define G_FW_FILTER_WR_TID(x) \
+ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define M_FW_FILTER_WR_RQTYPE 0x1
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+#define G_FW_FILTER_WR_RQTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
+#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define M_FW_FILTER_WR_NOREPLY 0x1
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+#define G_FW_FILTER_WR_NOREPLY(x) \
+ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
+#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
+
+#define S_FW_FILTER_WR_IQ 0
+#define M_FW_FILTER_WR_IQ 0x3ff
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+#define G_FW_FILTER_WR_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define M_FW_FILTER_WR_DEL_FILTER 0x1
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define G_FW_FILTER_WR_DEL_FILTER(x) \
+ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define M_FW_FILTER_WR_RPTTID 0x1
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+#define G_FW_FILTER_WR_RPTTID(x) \
+ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
+#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
+
+#define S_FW_FILTER_WR_DROP 24
+#define M_FW_FILTER_WR_DROP 0x1
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+#define G_FW_FILTER_WR_DROP(x) \
+ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
+#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define M_FW_FILTER_WR_DIRSTEER 0x1
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+#define G_FW_FILTER_WR_DIRSTEER(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
+#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define M_FW_FILTER_WR_MASKHASH 0x1
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+#define G_FW_FILTER_WR_MASKHASH(x) \
+ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
+#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
+#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define M_FW_FILTER_WR_LPBK 0x1
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+#define G_FW_FILTER_WR_LPBK(x) \
+ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
+#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define M_FW_FILTER_WR_DMAC 0x1
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+#define G_FW_FILTER_WR_DMAC(x) \
+ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
+#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define M_FW_FILTER_WR_SMAC 0x1
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+#define G_FW_FILTER_WR_SMAC(x) \
+ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
+#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define M_FW_FILTER_WR_INSVLAN 0x1
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+#define G_FW_FILTER_WR_INSVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
+#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define M_FW_FILTER_WR_RMVLAN 0x1
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+#define G_FW_FILTER_WR_RMVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
+#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define M_FW_FILTER_WR_HITCNTS 0x1
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+#define G_FW_FILTER_WR_HITCNTS(x) \
+ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
+#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define M_FW_FILTER_WR_TXCHAN 0x3
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+#define G_FW_FILTER_WR_TXCHAN(x) \
+ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define M_FW_FILTER_WR_PRIO 0x1
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+#define G_FW_FILTER_WR_PRIO(x) \
+ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
+#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define M_FW_FILTER_WR_L2TIX 0xfff
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+#define G_FW_FILTER_WR_L2TIX(x) \
+ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define M_FW_FILTER_WR_FRAG 0x1
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+#define G_FW_FILTER_WR_FRAG(x) \
+ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
+#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define M_FW_FILTER_WR_FRAGM 0x1
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+#define G_FW_FILTER_WR_FRAGM(x) \
+ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
+#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define M_FW_FILTER_WR_IVLAN_VLD 0x1
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+#define G_FW_FILTER_WR_IVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
+#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define M_FW_FILTER_WR_OVLAN_VLD 0x1
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+#define G_FW_FILTER_WR_OVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
+#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
+#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
+#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define M_FW_FILTER_WR_RX_CHAN 0x1
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+#define G_FW_FILTER_WR_RX_CHAN(x) \
+ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
+#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define M_FW_FILTER_WR_MACI 0x1ff
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+#define G_FW_FILTER_WR_MACI(x) \
+ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define M_FW_FILTER_WR_MACIM 0x1ff
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+#define G_FW_FILTER_WR_MACIM(x) \
+ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define M_FW_FILTER_WR_FCOE 0x1
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+#define G_FW_FILTER_WR_FCOE(x) \
+ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
+#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define M_FW_FILTER_WR_FCOEM 0x1
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+#define G_FW_FILTER_WR_FCOEM(x) \
+ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
+#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
+
+#define S_FW_FILTER_WR_PORT 9
+#define M_FW_FILTER_WR_PORT 0x7
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+#define G_FW_FILTER_WR_PORT(x) \
+ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define M_FW_FILTER_WR_PORTM 0x7
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+#define G_FW_FILTER_WR_PORTM(x) \
+ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define M_FW_FILTER_WR_MATCHTYPE 0x7
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+#define G_FW_FILTER_WR_MATCHTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define M_FW_FILTER_WR_MATCHTYPEM 0x7
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define G_FW_FILTER_WR_MATCHTYPEM(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -99,6 +416,108 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+struct fw_ofld_connection_wr {
+ __be32 op_compl;
+ __be32 len16_pkd;
+ __u64 cookie;
+ __be64 r2;
+ __be64 r3;
+ struct fw_ofld_connection_le {
+ __be32 version_cpl;
+ __be32 filter;
+ __be32 r1;
+ __be16 lport;
+ __be16 pport;
+ union fw_ofld_connection_leip {
+ struct fw_ofld_connection_le_ipv4 {
+ __be32 pip;
+ __be32 lip;
+ __be64 r0;
+ __be64 r1;
+ __be64 r2;
+ } ipv4;
+ struct fw_ofld_connection_le_ipv6 {
+ __be64 pip_hi;
+ __be64 pip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ } ipv6;
+ } u;
+ } le;
+ struct fw_ofld_connection_tcb {
+ __be32 t_state_to_astid;
+ __be16 cplrxdataack_cplpassacceptrpl;
+ __be16 rcv_adv;
+ __be32 rcv_nxt;
+ __be32 tx_max;
+ __be64 opt0;
+ __be32 opt2;
+ __be32 r1;
+ __be64 r2;
+ __be64 r3;
+ } tcb;
+};
+
+#define S_FW_OFLD_CONNECTION_WR_VERSION 31
+#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
+#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
+#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
+ M_FW_OFLD_CONNECTION_WR_VERSION)
+#define F_FW_OFLD_CONNECTION_WR_VERSION \
+ V_FW_OFLD_CONNECTION_WR_VERSION(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPL 30
+#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
+#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
+#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
+#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
+#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
+#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
+ M_FW_OFLD_CONNECTION_WR_T_STATE)
+
+#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
+#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
+#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
+ M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+
+#define S_FW_OFLD_CONNECTION_WR_ASTID 0
+#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
+#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
+#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
+#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
+ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
+ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
+#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
+ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
+ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -222,6 +641,7 @@ struct fw_cmd_hdr {
#define FW_CMD_OP(x) ((x) << 24)
#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
#define FW_CMD_READ (1U << 22)
#define FW_CMD_WRITE (1U << 21)
#define FW_CMD_EXEC (1U << 20)
@@ -229,6 +649,7 @@ struct fw_cmd_hdr {
#define FW_CMD_RETVAL(x) ((x) << 8)
#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
#define FW_CMD_LEN16(x) ((x) << 0)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -241,7 +662,8 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_TP_MIB = 0x0012,
FW_LDST_ADDRSPC_MDIO = 0x0018,
FW_LDST_ADDRSPC_MPS = 0x0020,
- FW_LDST_ADDRSPC_FUNC = 0x0028
+ FW_LDST_ADDRSPC_FUNC = 0x0028,
+ FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
};
enum fw_ldst_mps_fid {
@@ -303,6 +725,16 @@ struct fw_ldst_cmd {
__be64 data0;
__be64 data1;
} func;
+ struct fw_ldst_pcie {
+ u8 ctrl_to_fn;
+ u8 bnum;
+ u8 r;
+ u8 ext_r;
+ u8 select_naccess;
+ u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
} u;
};
@@ -312,6 +744,9 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID(x) ((x) << 15)
#define FW_LDST_CMD_CTL(x) ((x) << 0)
#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+#define FW_LDST_CMD_LC (1U << 4)
+#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
+#define FW_LDST_CMD_FN(x) ((x) << 0)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -333,7 +768,7 @@ enum fw_hellow_cmd {
struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
- __be32 err_to_mbasyncnot;
+ __be32 err_to_clearinit;
#define FW_HELLO_CMD_ERR (1U << 31)
#define FW_HELLO_CMD_INIT (1U << 30)
#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
@@ -343,6 +778,7 @@ struct fw_hello_cmd {
#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
#define FW_HELLO_CMD_MBMASTER_GET(x) \
(((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
+#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
#define FW_HELLO_CMD_CLEARINIT (1U << 16)
@@ -428,6 +864,7 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_fcoe {
FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+ FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004,
};
enum fw_memtype_cf {
@@ -440,7 +877,7 @@ enum fw_memtype_cf {
struct fw_caps_config_cmd {
__be32 op_to_write;
- __be32 retval_len16;
+ __be32 cfvalid_to_len16;
__be32 r2;
__be32 hwmbitmap;
__be16 nbmcaps;
@@ -701,8 +1138,8 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN (1U << 2)
-#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
+#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
#define FW_IQ_CMD_FL0CONGEN (1U << 0)
#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
@@ -1190,6 +1627,14 @@ enum fw_port_dcb_cfg_rc {
FW_PORT_DCB_CFG_ERROR = 0x1
};
+enum fw_port_dcb_type {
+ FW_PORT_DCB_TYPE_PGID = 0x00,
+ FW_PORT_DCB_TYPE_PGRATE = 0x01,
+ FW_PORT_DCB_TYPE_PRIORATE = 0x02,
+ FW_PORT_DCB_TYPE_PFC = 0x03,
+ FW_PORT_DCB_TYPE_APP_ID = 0x04,
+};
+
struct fw_port_cmd {
__be32 op_to_portid;
__be32 action_to_len16;
@@ -1257,6 +1702,7 @@ struct fw_port_cmd {
#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
#define FW_PORT_CMD_TXPAUSE (1U << 23)
@@ -1305,6 +1751,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 611396c4b381..68eaa9c88c7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -466,7 +466,6 @@ static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
- memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0188df705719..56b46ab2d4c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -33,6 +33,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -196,11 +198,10 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
break;
}
- printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
- dev->name, s, fc);
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
netif_carrier_off(dev);
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -2465,8 +2466,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int version_printed;
-
int pci_using_dac;
int err, pidx;
unsigned int pmask;
@@ -2478,10 +2477,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Print our driver banner the first time we're called to initialize a
* device.
*/
- if (version_printed == 0) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- version_printed = 1;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
/*
* Initialize generic PCI device state.
@@ -2920,18 +2916,15 @@ static int __init cxgb4vf_module_init(void)
* Vet our module parameters.
*/
if (msi != MSI_MSIX && msi != MSI_MSI) {
- printk(KERN_WARNING KBUILD_MODNAME
- ": bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n",
- msi, MSI_MSIX, MSI_MSI);
+ pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
return -EINVAL;
}
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- printk(KERN_WARNING KBUILD_MODNAME ": could not create"
- " debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f16745f4b36b..9488032d6d2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -536,7 +536,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO |
+ DBPRIO(1) |
QID(fl->cntxt_id) |
PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
fl->pend_cred %= FL_PER_EQ_UNIT;
@@ -952,7 +952,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* Warn if we write doorbells with the wrong priority and write
* descriptors before telling HW.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | PIDX(n));
@@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- if (pkt->vlan_ex)
+ if (pkt->vlan_ex) {
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
ret = napi_gro_frags(&rxq->rspq.napi);
if (ret == GRO_HELD)
@@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
@@ -2126,8 +2128,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqns_to_fl0congen =
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PACKEN(1) |
+ FW_IQ_CMD_FL0PADEN(1));
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 78c55213eaf7..354cbb78ed50 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -710,8 +710,8 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 64866ff1aea0..ec1a233622c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -865,7 +865,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
}
memcpy(netdev->dev_addr, addr, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1491,7 +1490,8 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-rx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_rq;
enic->msix[intr].devid = &enic->napi[i];
@@ -1499,20 +1499,23 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-tx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_wq;
enic->msix[intr].devid = enic;
}
intr = enic_msix_err_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c73472c369cd..8cdf02503d13 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -434,9 +434,10 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
board_info_t *dm = to_dm9000_board(dev);
- strcpy(info->driver, CARDNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, to_platform_device(dm->dev)->name);
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ sizeof(info->bus_info));
}
static u32 dm9000_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
index 37940279ded8..68262aa57d01 100644
--- a/drivers/net/ethernet/dec/Kconfig
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -17,21 +17,5 @@ config NET_VENDOR_DEC
your specific card in the following questions.
if NET_VENDOR_DEC
-
-config EWRK3
- tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
- depends on ISA
- select CRC32
- ---help---
- This driver supports the DE203, DE204 and DE205 network (Ethernet)
- cards. If this is for you, say Y and read
- <file:Documentation/networking/ewrk3.txt> in the kernel source as
- well as the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ewrk3.
-
source "drivers/net/ethernet/dec/tulip/Kconfig"
-
endif # NET_VENDOR_DEC
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile
index 1b01ed8d42c8..32993fccbbfd 100644
--- a/drivers/net/ethernet/dec/Makefile
+++ b/drivers/net/ethernet/dec/Makefile
@@ -2,5 +2,4 @@
# Makefile for the Digital Equipment Inc. network device drivers.
#
-obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
deleted file mode 100644
index 9f992b95eddc..000000000000
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ /dev/null
@@ -1,1961 +0,0 @@
-/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
-
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of EtherWORKS ethernet cards:
-
- DE203 Turbo (BNC)
- DE204 Turbo (TP)
- DE205 Turbo (TP BNC)
-
- The driver has been tested on a relatively busy network using the DE205
- card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
- (7.8Mb/s) to a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'depca.c' and in turn from
- Donald Becker's 'lance.c' should be obvious.
-
- The DE203/4/5 boards all use a new proprietary chip in place of the
- LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
- Use the depca.c driver in the standard distribution for the LANCE based
- cards from DIGITAL; this driver will not work with them.
-
- The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
- only makes all the card accesses through I/O transactions and no high
- (shared) memory is used. This mode provides a >48% performance penalty
- and is deprecated in this driver, although allowed to provide initial
- setup when hardstrapped.
-
- The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
- no point in using any mode other than the 2kB mode - their performances
- are virtually identical, although the driver has been tested in the 2kB
- and 32kB modes. I would suggest you uncomment the line:
-
- FORCE_2K_MODE;
-
- to allow the driver to configure the card as a 2kB card at your current
- base address, thus leaving more room to clutter your system box with
- other memory hungry boards.
-
- As many ISA and EISA cards can be supported under this driver as you
- wish, limited primarily by the available IRQ lines, rather than by the
- available I/O addresses (24 ISA, 16 EISA). I have checked different
- configurations of multiple depca cards and ewrk3 cards and have not
- found a problem yet (provided you have at least depca.c v0.38) ...
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. All these cards are at
- {5,10,11,15}.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) edit the source code near line 1898 to reflect the I/O address and
- IRQ you're using.
- 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the ewrk3 configuration turned off and reboot.
- 5) insmod ewrk3.o
- [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
- [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
- 6) run the net startup bits for your new eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod ewrk3'.
-
- Promiscuous mode has been turned off in this driver, but all the
- multicast address bits have been turned on. This improved the send
- performance on a busy network by about 13%.
-
- Ioctl's have now been provided (primarily because I wanted to grab some
- packet size statistics). They are patterned after 'plipconfig.c' from a
- suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
- mode, add/delete multicast addresses, change the hardware address, get
- packet size distribution statistics and muck around with the control and
- status register. I'll add others if and when the need arises.
-
- TO DO:
- ------
-
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 26-aug-94 Initial writing. ALPHA code release.
- 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
- LeMAC version calc.,
- IRQ vector assignments during autoprobe.
- 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
- Fixed up MCA hash table algorithm.
- 0.20 4-sep-94 Added IOCTL functionality.
- 0.21 14-sep-94 Added I/O mode.
- 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
- 0.22 16-sep-94 Added more IOCTLs & tidied up.
- 0.23 21-sep-94 Added transmit cut through.
- 0.24 31-oct-94 Added uid checks in some ioctls.
- 0.30 1-nov-94 BETA code release.
- 0.31 5-dec-94 Added check/allocate region code.
- 0.32 16-jan-95 Broadcast packet fix.
- 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
- Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- Added verify_area() calls in ewrk3_ioctl() from
- suggestion by <heiko@colossus.escape.de>.
- Add new multicasting code.
- 0.41 20-Jan-96 Fix IRQ set up problem reported by
- <kenneth@bbs.sas.ntu.ac.sg>.
- 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
- 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
- 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com>
- 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com>
- 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua>
- ioctl locking, signature search cleanup <akropel1@rochester.rr.com>
-
- =========================================================================
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-
-#include "ewrk3.h"
-
-#define DRV_NAME "ewrk3"
-#define DRV_VERSION "0.48"
-
-static char version[] __initdata =
-DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n";
-
-#ifdef EWRK3_DEBUG
-static int ewrk3_debug = EWRK3_DEBUG;
-#else
-static int ewrk3_debug = 1;
-#endif
-
-#define EWRK3_NDA 0xffe0 /* No Device Address */
-
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-#ifndef EWRK3_SIGNATURE
-#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
-#define EWRK3_STRLEN 8
-#endif
-
-#ifndef EWRK3_RAM_BASE_ADDRESSES
-#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
-#endif
-
-/*
- ** Sets up the I/O area for the autoprobe.
- */
-#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
-#define EWRK3_IOP_INC 0x20 /* I/O address increment */
-#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
-
-#ifndef MAX_NUM_EWRK3S
-#define MAX_NUM_EWRK3S 21
-#endif
-
-#ifndef EWRK3_EISA_IO_PORTS
-#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#endif
-
-#ifndef MAX_EISA_SLOTS
-#define MAX_EISA_SLOTS 16
-#define EISA_SLOT_INC 0x1000
-#endif
-
-#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
-
-/*
- ** EtherWORKS 3 shared memory window sizes
- */
-#define IO_ONLY 0x00
-#define SHMEM_2K 0x800
-#define SHMEM_32K 0x8000
-#define SHMEM_64K 0x10000
-
-/*
- ** EtherWORKS 3 IRQ ENABLE/DISABLE
- */
-#define ENABLE_IRQs { \
- icr |= lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs { \
- icr = inb(EWRK3_ICR);\
- icr &= ~lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Disable the IRQs */\
-}
-
-/*
- ** EtherWORKS 3 START/STOP
- */
-#define START_EWRK3 { \
- csr = inb(EWRK3_CSR);\
- csr &= ~(CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_EWRK3 { \
- csr = (CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
-}
-
-/*
- ** The EtherWORKS 3 private structure
- */
-#define EWRK3_PKT_STAT_SZ 16
-#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase EWRK3_PKT_STAT_SZ */
-
-struct ewrk3_stats {
- u32 bins[EWRK3_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
-};
-
-struct ewrk3_private {
- char adapter_name[80]; /* Name exported to /proc/ioports */
- u_long shmem_base; /* Shared memory start address */
- void __iomem *shmem;
- u_long shmem_length; /* Shared memory window length */
- struct ewrk3_stats pktStats; /* Private stats counters */
- u_char irq_mask; /* Adapter IRQ mask bits */
- u_char mPage; /* Maximum 2kB Page number */
- u_char lemac; /* Chip rev. level */
- u_char hard_strapped; /* Don't allow a full open */
- u_char txc; /* Transmit cut through */
- void __iomem *mctbl; /* Pointer to the multicast table */
- u_char led_mask; /* Used to reserve LED access for ethtool */
- spinlock_t hw_lock;
-};
-
-/*
- ** Force the EtherWORKS 3 card to be in 2kB MODE
- */
-#define FORCE_2K_MODE { \
- shmem_length = SHMEM_2K;\
- outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
-}
-
-/*
- ** Public Functions
- */
-static int ewrk3_open(struct net_device *dev);
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id);
-static int ewrk3_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops ethtool_ops_203;
-static const struct ethtool_ops ethtool_ops;
-
-/*
- ** Private functions
- */
-static int ewrk3_hw_init(struct net_device *dev, u_long iobase);
-static void ewrk3_init(struct net_device *dev);
-static int ewrk3_rx(struct net_device *dev);
-static int ewrk3_tx(struct net_device *dev);
-static void ewrk3_timeout(struct net_device *dev);
-
-static void EthwrkSignature(char *name, char *eeprom_image);
-static int DevicePresent(u_long iobase);
-static void SetMulticastFilter(struct net_device *dev);
-static int EISA_signature(char *name, s32 eisa_id);
-
-static int Read_EEPROM(u_long iobase, u_char eaddr);
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
-static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType);
-
-static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq);
-static int isa_probe(struct net_device *dev, u_long iobase);
-static int eisa_probe(struct net_device *dev, u_long iobase);
-
-static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12};
-
-static char name[EWRK3_STRLEN + 1];
-static int num_ewrks3s;
-
-/*
- ** Miscellaneous defines...
- */
-#define INIT_EWRK3 {\
- outb(EEPROM_INIT, EWRK3_IOPR);\
- mdelay(1);\
-}
-
-#ifndef MODULE
-struct net_device * __init ewrk3_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = ewrk3_probe1(dev, dev->base_addr, dev->irq);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-
-}
-#endif
-
-static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
-{
- int err;
-
- dev->base_addr = iobase;
- dev->irq = irq;
-
- /* Address PROM pattern */
- err = isa_probe(dev, iobase);
- if (err != 0)
- err = eisa_probe(dev, iobase);
-
- if (err)
- return err;
-
- err = register_netdev(dev);
- if (err)
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
-
- return err;
-}
-
-static const struct net_device_ops ewrk3_netdev_ops = {
- .ndo_open = ewrk3_open,
- .ndo_start_xmit = ewrk3_queue_pkt,
- .ndo_stop = ewrk3_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = ewrk3_ioctl,
- .ndo_tx_timeout = ewrk3_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init
-ewrk3_hw_init(struct net_device *dev, u_long iobase)
-{
- struct ewrk3_private *lp;
- int i, status = 0;
- u_long mem_start, shmem_length;
- u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
- u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
-
- /*
- ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
- ** This also disables the EISA_ENABLE bit in the EISA Control Register.
- */
- if (iobase > 0x400)
- eisa_cr = inb(EISA_CR);
- INIT_EWRK3;
-
- nicsr = inb(EWRK3_CSR);
-
- icr = inb(EWRK3_ICR);
- icr &= 0x70;
- outb(icr, EWRK3_ICR); /* Disable all the IRQs */
-
- if (nicsr != (CSR_TXD | CSR_RXD))
- return -ENXIO;
-
- /* Check that the EEPROM is alive and well and not living on Pluto... */
- for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
- union {
- short val;
- char c[2];
- } tmp;
-
- tmp.val = (short) Read_EEPROM(iobase, (i >> 1));
- eeprom_image[i] = tmp.c[0];
- eeprom_image[i + 1] = tmp.c[1];
- chksum += eeprom_image[i] + eeprom_image[i + 1];
- }
-
- if (chksum != 0) { /* Bad EEPROM Data! */
- printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
- return -ENXIO;
- }
-
- EthwrkSignature(name, eeprom_image);
- if (*name == '\0')
- return -ENXIO;
-
- dev->base_addr = iobase;
-
- if (iobase > 0x400) {
- outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
- }
- lemac = eeprom_image[EEPROM_CHIPVER];
- cmr = inb(EWRK3_CMR);
-
- if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
- ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- hard_strapped = 1;
- } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) {
- /* EISA slot address */
- printk("%s: %s at %#4lx (EISA slot %ld)",
- dev->name, name, iobase, ((iobase >> 12) & 0x0f));
- } else { /* ISA port address */
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- }
-
- printk(", h/w address ");
- if (lemac != LeMAC2)
- DevicePresent(iobase); /* need after EWRK3_INIT */
- status = get_hw_addr(dev, eeprom_image, lemac);
- printk("%pM\n", dev->dev_addr);
-
- if (status) {
- printk(" which has an EEPROM CRC error.\n");
- return -ENXIO;
- }
-
- if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
- cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
- if (eeprom_image[EEPROM_MISC0] & READ_AHEAD)
- cmr |= CMR_RA;
- if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND)
- cmr |= CMR_WB;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL)
- cmr |= CMR_POLARITY;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK)
- cmr |= CMR_LINK;
- if (eeprom_image[EEPROM_MISC0] & _0WS_ENA)
- cmr |= CMR_0WS;
- }
- if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM)
- cmr |= CMR_DRAM;
- outb(cmr, EWRK3_CMR);
-
- cr = inb(EWRK3_CR); /* Set up the Control Register */
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
- if (cr & SETUP_APD)
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
- cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
- cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
- outb(cr, EWRK3_CR);
-
- /*
- ** Determine the base address and window length for the EWRK3
- ** RAM from the memory base register.
- */
- mem_start = inb(EWRK3_MBR);
- shmem_length = 0;
- if (mem_start != 0) {
- if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
- mem_start *= SHMEM_64K;
- shmem_length = SHMEM_64K;
- } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
- mem_start *= SHMEM_32K;
- shmem_length = SHMEM_32K;
- } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
- mem_start = mem_start * SHMEM_2K + 0x80000;
- shmem_length = SHMEM_2K;
- } else {
- return -ENXIO;
- }
- }
- /*
- ** See the top of this source code for comments about
- ** uncommenting this line.
- */
-/* FORCE_2K_MODE; */
-
- if (hard_strapped) {
- printk(" is hard strapped.\n");
- } else if (mem_start) {
- printk(" has a %dk RAM window", (int) (shmem_length >> 10));
- printk(" at 0x%.5lx", mem_start);
- } else {
- printk(" is in I/O only mode");
- }
-
- lp = netdev_priv(dev);
- lp->shmem_base = mem_start;
- lp->shmem = ioremap(mem_start, shmem_length);
- if (!lp->shmem)
- return -ENOMEM;
- lp->shmem_length = shmem_length;
- lp->lemac = lemac;
- lp->hard_strapped = hard_strapped;
- lp->led_mask = CR_LED;
- spin_lock_init(&lp->hw_lock);
-
- lp->mPage = 64;
- if (cmr & CMR_DRAM)
- lp->mPage <<= 1; /* 2 DRAMS on module */
-
- sprintf(lp->adapter_name, "%s (%s)", name, dev->name);
-
- lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM;
-
- if (!hard_strapped) {
- /*
- ** Enable EWRK3 board interrupts for autoprobing
- */
- icr |= ICR_IE; /* Enable interrupts */
- outb(icr, EWRK3_ICR);
-
- /* The DMA channel may be passed in on this parameter. */
- dev->dma = 0;
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
-#ifndef MODULE
- u_char irqnum;
- unsigned long irq_mask;
-
-
- irq_mask = probe_irq_on();
-
- /*
- ** Trigger a TNE interrupt.
- */
- icr |= ICR_TNEM;
- outb(1, EWRK3_TDQ); /* Write to the TX done queue */
- outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
-
- irqnum = irq[((icr & IRQ_SEL) >> 4)];
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if ((dev->irq) && (irqnum == dev->irq)) {
- printk(" and uses IRQ%d.\n", dev->irq);
- } else {
- if (!dev->irq) {
- printk(" and failed to detect IRQ line.\n");
- } else if ((irqnum == 1) && (lemac == LeMAC2)) {
- printk(" and an illegal IRQ line detected.\n");
- } else {
- printk(", but incorrect IRQ line detected.\n");
- }
- iounmap(lp->shmem);
- return -ENXIO;
- }
-
- DISABLE_IRQs; /* Mask all interrupts */
-
-#endif /* MODULE */
- } else {
- printk(" and requires IRQ%d.\n", dev->irq);
- }
- }
-
- if (ewrk3_debug > 1) {
- printk(version);
- }
- /* The EWRK3-specific entries in the device structure. */
- dev->netdev_ops = &ewrk3_netdev_ops;
- if (lp->adapter_name[4] == '3')
- SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
-
- dev->mem_start = 0;
-
- return 0;
-}
-
-
-static int ewrk3_open(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int status = 0;
- u_char icr, csr;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- if (!lp->hard_strapped) {
- if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
- printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /*
- ** Re-initialize the EWRK3...
- */
- ewrk3_init(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
- printk(" physical address: %pM\n", dev->dev_addr);
- if (lp->shmem_length == 0) {
- printk(" no shared memory, I/O only mode\n");
- } else {
- printk(" start of shared memory: 0x%08lx\n", lp->shmem_base);
- printk(" window length: 0x%04lx\n", lp->shmem_length);
- }
- printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
- printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
- printk(" cr: 0x%02x\n", inb(EWRK3_CR));
- printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
- printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
- printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
- }
- netif_start_queue(dev);
- /*
- ** Unmask EWRK3 board interrupts
- */
- icr = inb(EWRK3_ICR);
- ENABLE_IRQs;
-
- }
- } else {
- printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name);
- printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n");
- return -EINVAL;
- }
-
- return status;
-}
-
-/*
- ** Initialize the EtherWORKS 3 operating conditions
- */
-static void ewrk3_init(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char csr, page;
- u_long iobase = dev->base_addr;
- int i;
-
- /*
- ** Enable any multicasts
- */
- set_multicast_list(dev);
-
- /*
- ** Set hardware MAC address. Address is initialized from the EEPROM
- ** during startup but may have since been changed by the user.
- */
- for (i=0; i<ETH_ALEN; i++)
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
-
- /*
- ** Clean out any remaining entries in all the queues here
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
- while (inb(EWRK3_FMQ));
-
- /*
- ** Write a clean free memory queue
- */
- for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */
- outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
- }
-
- START_EWRK3; /* Enable the TX and/or RX */
-}
-
-/*
- * Transmit timeout
- */
-
-static void ewrk3_timeout(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char icr, csr;
- u_long iobase = dev->base_addr;
-
- if (!lp->hard_strapped)
- {
- printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n",
- dev->name, inb(EWRK3_CSR));
-
- /*
- ** Mask all board interrupts
- */
- DISABLE_IRQs;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- ewrk3_init(dev);
-
- /*
- ** Unmask EWRK3 board interrupts
- */
- ENABLE_IRQs;
-
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- }
-}
-
-/*
- ** Writes a socket buffer to the free page queue
- */
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- void __iomem *buf = NULL;
- u_char icr;
- u_char page;
-
- spin_lock_irq (&lp->hw_lock);
- DISABLE_IRQs;
-
- /* if no resources available, exit, request packet be queued */
- if (inb (EWRK3_FMQC) == 0) {
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n",
- dev->name);
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",
- dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR),
- inb (EWRK3_FMQC));
- goto err_out;
- }
-
- /*
- ** Get a free page from the FMQ
- */
- if ((page = inb (EWRK3_FMQ)) >= lp->mPage) {
- printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
- (u_char) page);
- goto err_out;
- }
-
-
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb (page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb (page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb ((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb ((page >> 5), EWRK3_MPR);
- } else {
- printk (KERN_ERR "%s: Oops - your private data area is hosed!\n",
- dev->name);
- BUG ();
- }
-
- /*
- ** Set up the buffer control structures and copy the data from
- ** the socket buffer to the shared memory .
- */
- if (lp->shmem_length == IO_ONLY) {
- int i;
- u_char *p = skb->data;
- outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
- outb ((char) (skb->len & 0xff), EWRK3_DATA);
- outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA);
- outb ((char) 0x04, EWRK3_DATA);
- for (i = 0; i < skb->len; i++) {
- outb (*p++, EWRK3_DATA);
- }
- outb (page, EWRK3_TQ); /* Start sending pkt */
- } else {
- writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */
- buf += 1;
- writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */
- buf += 1;
- if (lp->txc) {
- writeb(((skb->len >> 8) & 0xff) | XCT, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- writeb (0x00, (buf + skb->len)); /* Write the XCT flag */
- memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- memcpy_toio (buf + PRELOAD,
- skb->data + PRELOAD,
- skb->len - PRELOAD);
- writeb (0xff, (buf + skb->len)); /* Write the XCT flag */
- } else {
- writeb ((skb->len >> 8) & 0xff, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- }
- }
-
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
-
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb (skb);
-
- /* Check for free resources: stop Tx queue if there are none */
- if (inb (EWRK3_FMQC) == 0)
- netif_stop_queue (dev);
-
- return NETDEV_TX_OK;
-
-err_out:
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
- return NETDEV_TX_BUSY;
-}
-
-/*
- ** The EWRK3 interrupt handler.
- */
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ewrk3_private *lp;
- u_long iobase;
- u_char icr, cr, csr;
-
- lp = netdev_priv(dev);
- iobase = dev->base_addr;
-
- /* get the interrupt information */
- csr = inb(EWRK3_CSR);
-
- /*
- ** Mask the EWRK3 board interrupts and turn on the LED
- */
- spin_lock(&lp->hw_lock);
- DISABLE_IRQs;
-
- cr = inb(EWRK3_CR);
- cr |= lp->led_mask;
- outb(cr, EWRK3_CR);
-
- if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
- ewrk3_rx(dev);
-
- if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
- ewrk3_tx(dev);
-
- /*
- ** Now deal with the TX/RX disable flags. These are set when there
- ** are no more resources. If resources free up then enable these
- ** interrupts, otherwise mask them - failure to do this will result
- ** in the system hanging in an interrupt loop.
- */
- if (inb(EWRK3_FMQC)) { /* any resources available? */
- lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */
- csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */
- outb(csr, EWRK3_CSR);
- netif_wake_queue(dev);
- } else {
- lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */
- }
-
- /* Unmask the EWRK3 board interrupts and turn off the LED */
- cr &= ~(lp->led_mask);
- outb(cr, EWRK3_CR);
- ENABLE_IRQs;
- spin_unlock(&lp->hw_lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->hw_lock held */
-static int ewrk3_rx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- u_char page;
- void __iomem *buf = NULL;
-
- while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
- if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb(page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb(page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb((page >> 5), EWRK3_MPR);
- } else {
- status = -1;
- printk("%s: Oops - your private data area is hosed!\n", dev->name);
- }
-
- if (!status) {
- char rx_status;
- int pkt_len;
-
- if (lp->shmem_length == IO_ONLY) {
- rx_status = inb(EWRK3_DATA);
- pkt_len = inb(EWRK3_DATA);
- pkt_len |= ((u_short) inb(EWRK3_DATA) << 8);
- } else {
- rx_status = readb(buf);
- buf += 1;
- pkt_len = readw(buf);
- buf += 3;
- }
-
- if (!(rx_status & R_ROK)) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (rx_status & R_DBE)
- dev->stats.rx_frame_errors++;
- if (rx_status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (rx_status & R_PLL)
- dev->stats.rx_fifo_errors++;
- } else {
- struct sk_buff *skb;
- skb = netdev_alloc_skb(dev,
- pkt_len + 2);
-
- if (skb != NULL) {
- unsigned char *p;
- skb_reserve(skb, 2); /* Align to 16 bytes */
- p = skb_put(skb, pkt_len);
-
- if (lp->shmem_length == IO_ONLY) {
- *p = inb(EWRK3_DATA); /* dummy read */
- for (i = 0; i < pkt_len; i++) {
- *p++ = inb(EWRK3_DATA);
- }
- } else {
- memcpy_fromio(p, buf, pkt_len);
- }
-
- for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < i * EWRK3_PKT_BIN_SZ) {
- lp->pktStats.bins[i]++;
- i = EWRK3_PKT_STAT_SZ;
- }
- }
- p = skb->data; /* Look at the dest addr */
- if (is_multicast_ether_addr(p)) {
- if (is_broadcast_ether_addr(p)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(p,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- }
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- printk("%s: Insufficient memory; nuking packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- }
- /*
- ** Return the received buffer to the free memory queue
- */
- outb(page, EWRK3_FMQ);
- } else {
- printk("ewrk3_rx(): Illegal page number, page %d\n", page);
- printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC));
- }
- }
- return status;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-** Called with lp->hw_lock held
-*/
-static int ewrk3_tx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char tx_status;
-
- while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
- if (tx_status & T_VSTS) { /* The status is valid */
- if (tx_status & T_TXE) {
- dev->stats.tx_errors++;
- if (tx_status & T_NCL)
- dev->stats.tx_carrier_errors++;
- if (tx_status & T_LCL)
- dev->stats.tx_window_errors++;
- if (tx_status & T_CTU) {
- if ((tx_status & T_COLL) ^ T_XUR) {
- lp->pktStats.tx_underruns++;
- } else {
- lp->pktStats.excessive_underruns++;
- }
- } else if (tx_status & T_COLL) {
- if ((tx_status & T_COLL) ^ T_XCOLL) {
- dev->stats.collisions++;
- } else {
- lp->pktStats.excessive_collisions++;
- }
- }
- } else {
- dev->stats.tx_packets++;
- }
- }
- }
-
- return 0;
-}
-
-static int ewrk3_close(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char icr, csr;
-
- netif_stop_queue(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, inb(EWRK3_CSR));
- }
- /*
- ** We stop the EWRK3 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
-
- STOP_EWRK3;
-
- /*
- ** Clean out the TX and RX queues here (note that one entry
- ** may get added to either the TXD or RX queues if the TX or RX
- ** just starts processing a packet before the STOP_EWRK3 command
- ** is received. This will be flushed in the ewrk3_open() call).
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
-
- if (!lp->hard_strapped) {
- free_irq(dev->irq, dev);
- }
- return 0;
-}
-
-/*
- ** Set or clear the multicast filter for this adapter.
- */
-static void set_multicast_list(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char csr;
-
- csr = inb(EWRK3_CSR);
-
- if (lp->shmem_length == IO_ONLY) {
- lp->mctbl = NULL;
- } else {
- lp->mctbl = lp->shmem + PAGE0_HTE;
- }
-
- csr &= ~(CSR_PME | CSR_MCE);
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- csr |= CSR_PME;
- outb(csr, EWRK3_CSR);
- } else {
- SetMulticastFilter(dev);
- csr |= CSR_MCE;
- outb(csr, EWRK3_CSR);
- }
-}
-
-/*
- ** Calculate the hash code and update the logical address filter
- ** from a list of ethernet multicast addresses.
- ** Little endian crc one liner from Matt Thomas, DEC.
- **
- ** Note that when clearing the table, the broadcast bit must remain asserted
- ** to receive broadcast messages.
- */
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i;
- char bit, byte;
- short __iomem *p = lp->mctbl;
- u16 hashcode;
- u32 crc;
-
- spin_lock_irq(&lp->hw_lock);
-
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- } else {
- outb(0, EWRK3_MPR);
- }
-
- if (dev->flags & IFF_ALLMULTI) {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- if (lp->shmem_length == IO_ONLY) {
- outb(0xff, EWRK3_DATA);
- } else { /* memset didn't work here */
- writew(0xffff, p);
- p++;
- i++;
- }
- }
- } else {
- /* Clear table except for broadcast bit */
- if (lp->shmem_length == IO_ONLY) {
- for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) {
- outb(0x00, EWRK3_DATA);
- }
- outb(0x80, EWRK3_DATA);
- i++; /* insert the broadcast bit */
- for (; i < (HASH_TABLE_LEN >> 3); i++) {
- outb(0x00, EWRK3_DATA);
- }
- } else {
- memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3);
- writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1);
- }
-
- /* Update table */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
-
- if (lp->shmem_length == IO_ONLY) {
- u_char tmp;
-
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- tmp = inb(EWRK3_DATA);
- tmp |= bit;
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- outb(tmp, EWRK3_DATA);
- } else {
- writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
- }
- }
- }
-
- spin_unlock_irq(&lp->hw_lock);
-}
-
-/*
- ** ISA bus I/O device probe
- */
-static int __init isa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i = num_ewrks3s, maxSlots;
- int ret = -ENODEV;
-
- u_long iobase;
-
- if (ioaddr >= 0x400)
- goto out;
-
- if (ioaddr == 0) { /* Autoprobing */
- iobase = EWRK3_IO_BASE; /* Get the first slot address */
- maxSlots = 24;
- } else { /* Probe a specific location */
- iobase = ioaddr;
- maxSlots = i + 1;
- }
-
- for (; (i < maxSlots) && (dev != NULL);
- iobase += EWRK3_IOP_INC, i++)
- {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) {
- if (DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
- out:
-
- return ret;
-}
-
-/*
- ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
- ** the motherboard.
- */
-static int __init eisa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i, maxSlots;
- u_long iobase;
- int ret = -ENODEV;
-
- if (ioaddr < 0x1000)
- goto out;
-
- iobase = ioaddr;
- i = (ioaddr >> 12);
- maxSlots = i + 1;
-
- for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) {
- if (EISA_signature(name, EISA_ID) == 0) {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) &&
- DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
-
- out:
- return ret;
-}
-
-
-/*
- ** Read the EWRK3 EEPROM using this routine
- */
-static int Read_EEPROM(u_long iobase, u_char eaddr)
-{
- int i;
-
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return inw(EWRK3_EPROM1); /* 16 bits data return */
-}
-
-/*
- ** Write the EWRK3 EEPROM using this routine
- */
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
-{
- int i;
-
- outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
- outw(data, EWRK3_EPROM1); /* write data to register */
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
- for (i = 0; i < 75000; i++)
- inb(EWRK3_CSR); /* wait 15msec */
- outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return 0;
-}
-
-/*
- ** Look for a particular board name in the on-board EEPROM.
- */
-static void __init EthwrkSignature(char *name, char *eeprom_image)
-{
- int i;
- char *signatures[] = EWRK3_SIGNATURE;
-
- for (i=0; *signatures[i] != '\0'; i++)
- if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) )
- break;
-
- if (*signatures[i] != '\0') {
- memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN);
- name[EWRK3_STRLEN] = '\0';
- } else
- name[0] = '\0';
-}
-
-/*
- ** Look for a special sequence in the Ethernet station address PROM that
- ** is common across all EWRK3 products.
- **
- ** Search the Ethernet address ROM for the signature. Since the ROM address
- ** counter can start at an arbitrary point, the search must include the entire
- ** probe sequence length plus the (length_of_the_signature - 1).
- ** Stop the search IMMEDIATELY after the signature is found so that the
- ** PROM address counter is correctly positioned at the start of the
- ** ethernet address for later read out.
- */
-
-static int __init DevicePresent(u_long iobase)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength;
- char data;
- int i, j, status = 0;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(EWRK3_APROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) {
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
- return status;
-}
-
-static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType)
-{
- int i, j, k;
- u_short chksum;
- u_char crc, lfsr, sd, status = 0;
- u_long iobase = dev->base_addr;
- u16 tmp;
-
- if (chipType == LeMAC2) {
- for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) {
- sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
- outb(dev->dev_addr[j], EWRK3_PAR0 + j);
- for (k = 0; k < 8; k++, sd >>= 1) {
- lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
- crc = (crc >> 1) + lfsr;
- }
- }
- if (crc != eeprom_image[EEPROM_PA_CRC])
- status = -1;
- } else {
- for (i = 0, k = 0; i < ETH_ALEN;) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(EWRK3_APROM));
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
- k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
- chksum = inb(EWRK3_APROM);
- chksum |= (inb(EWRK3_APROM) << 8);
- if (k != chksum)
- status = -1;
- }
-
- return status;
-}
-
-/*
- ** Look for a particular board name in the EISA configuration space
- */
-static int __init EISA_signature(char *name, s32 eisa_id)
-{
- u_long i;
- char *signatures[] = EWRK3_SIGNATURE;
- char ManCode[EWRK3_STRLEN];
- union {
- s32 ID;
- char Id[4];
- } Eisa;
- int status = 0;
-
- *name = '\0';
- for (i = 0; i < 4; i++) {
- Eisa.Id[i] = inb(eisa_id + i);
- }
-
- ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40);
- ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40);
- ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30);
- ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30);
- ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30);
- ManCode[5] = '\0';
-
- for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) {
- if (strstr(ManCode, signatures[i]) != NULL) {
- strcpy(name, ManCode);
- status = 1;
- }
- }
-
- return status; /* return the device name string */
-}
-
-static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL);
-
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->fw_version, "%d", fwrev);
- strcpy(info->bus_info, "N/A");
- info->eedump_len = EEPROM_MAX;
-}
-
-static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr = inb(EWRK3_CR);
-
- switch (lp->adapter_name[4]) {
- case '3': /* DE203 */
- ecmd->supported = SUPPORTED_BNC;
- ecmd->port = PORT_BNC;
- break;
-
- case '4': /* DE204 */
- ecmd->supported = SUPPORTED_TP;
- ecmd->port = PORT_TP;
- break;
-
- case '5': /* DE205 */
- ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI;
- ecmd->autoneg = !(cr & CR_APD);
- /*
- ** Port is only valid if autoneg is disabled
- ** and even then we don't know if AUI is jumpered.
- */
- if (!ecmd->autoneg)
- ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP;
- break;
- }
-
- ecmd->supported |= SUPPORTED_10baseT_Half;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->duplex = DUPLEX_HALF;
- return 0;
-}
-
-static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- unsigned long flags;
- u8 cr;
-
- /* DE205 is the only card with anything to set */
- if (lp->adapter_name[4] != '5')
- return -EOPNOTSUPP;
-
- /* Sanity-check parameters */
- if (ecmd->speed != SPEED_10)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC)
- return -EINVAL; /* AUI is not software-selectable */
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF)
- return -EINVAL;
- if (ecmd->phy_address != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- cr = inb(EWRK3_CR);
-
- /* If Autoneg is set, change to Auto Port mode */
- /* Otherwise, disable Auto Port and set port explicitly */
- if (ecmd->autoneg) {
- cr &= ~CR_APD;
- } else {
- cr |= CR_APD;
- if (ecmd->port == PORT_TP)
- cr &= ~CR_PSEL; /* Force TP */
- else
- cr |= CR_PSEL; /* Force BNC */
- }
-
- /* Commit the changes */
- outb(cr, EWRK3_CR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- return 0;
-}
-
-static u32 ewrk3_get_link(struct net_device *dev)
-{
- unsigned long iobase = dev->base_addr;
- u8 cmr = inb(EWRK3_CMR);
- /* DE203 has BNC only and link status does not apply */
- /* On DE204 this is always valid since TP is the only port. */
- /* On DE205 this reflects TP status even if BNC or AUI is selected. */
- return !(cmr & CMR_LINK);
-}
-
-static int ewrk3_set_phys_id(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr;
-
- spin_lock_irq(&lp->hw_lock);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Prevent ISR from twiddling the LED */
- lp->led_mask = 0;
- spin_unlock_irq(&lp->hw_lock);
- return 2; /* cycle on/off twice per second */
-
- case ETHTOOL_ID_ON:
- cr = inb(EWRK3_CR);
- outb(cr | CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_OFF:
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- lp->led_mask = CR_LED;
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- }
- spin_unlock_irq(&lp->hw_lock);
-
- return 0;
-}
-
-static const struct ethtool_ops ethtool_ops_203 = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-static const struct ethtool_ops ethtool_ops = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .get_link = ewrk3_get_link,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-/*
- ** Perform IOCTL call functions here. Some are privileged operations and the
- ** effective uid is checked in those cases.
- */
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- u_char csr;
- unsigned long flags;
- union ewrk3_addr {
- u_char addr[HASH_TABLE_LEN * ETH_ALEN];
- u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- };
-
- union ewrk3_addr *tmp;
-
- /* All we handle are private IOCTLs */
- if (cmd != EWRK3IOCTL)
- return -EOPNOTSUPP;
-
- tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL);
- if(tmp==NULL)
- return -ENOMEM;
-
- switch (ioc->cmd) {
- case EWRK3_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp->addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
-
- case EWRK3_SET_HWADDR: /* Set the hardware address */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= (CSR_TXD | CSR_RXD);
- outb(csr, EWRK3_CSR); /* Disable the TX and RX */
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) {
- status = -EFAULT;
- break;
- }
- spin_lock_irqsave(&lp->hw_lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp->addr[i];
- outb(tmp->addr[i], EWRK3_PAR0 + i);
- }
-
- csr = inb(EWRK3_CSR);
- csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_PROM: /* Set Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_PME;
- csr &= ~CSR_MCE;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_MCA: /* Get the multicast address table */
- spin_lock_irqsave(&lp->hw_lock, flags);
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- tmp->addr[i] = inb(EWRK3_DATA);
- }
- } else {
- outb(0, EWRK3_MPR);
- memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3));
- }
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
-
- break;
- case EWRK3_SET_MCA: /* Set a multicast address */
- if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > HASH_TABLE_LEN) {
- status = -EINVAL;
- break;
- }
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
- status = -EFAULT;
- break;
- }
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_MCA: /* Clear all multicast addresses */
- if (capable(CAP_NET_ADMIN)) {
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_MCA_EN: /* Enable multicast addressing */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_MCE;
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_STATS: { /* Get the driver statistics */
- struct ewrk3_stats *tmp_stats =
- kmalloc(sizeof(lp->pktStats), GFP_KERNEL);
- if (!tmp_stats) {
- status = -ENOMEM;
- break;
- }
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = sizeof(lp->pktStats);
- if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats)))
- status = -EFAULT;
- kfree(tmp_stats);
- break;
- }
- case EWRK3_CLR_STATS: /* Zero out the driver statistics */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock,flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CSR: /* Get the CSR Register contents */
- tmp->addr[0] = inb(EWRK3_CSR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_CSR: /* Set the CSR Register contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, 1)) {
- status = -EFAULT;
- break;
- }
- outb(tmp->addr[0], EWRK3_CSR);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- tmp->val[i] = (short) Read_EEPROM(iobase, i);
- }
- i = EEPROM_MAX;
- tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
- for (j = 0; j < ETH_ALEN; j++) {
- tmp->addr[i++] = inb(EWRK3_PAR0 + j);
- }
- ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) {
- status = -EFAULT;
- break;
- }
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- Write_EEPROM(tmp->val[i], iobase, i);
- }
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CMR: /* Get the CMR Register contents */
- tmp->addr[0] = inb(EWRK3_CMR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 1;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 0;
- } else {
- status = -EPERM;
- }
-
- break;
- default:
- status = -EOPNOTSUPP;
- }
- kfree(tmp);
- return status;
-}
-
-#ifdef MODULE
-static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
-static int ndevs;
-static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, byte, NULL, 0);
-MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
-
-static __exit void ewrk3_exit_module(void)
-{
- int i;
-
- for( i=0; i<ndevs; i++ ) {
- struct net_device *dev = ewrk3_devs[i];
- struct ewrk3_private *lp = netdev_priv(dev);
- ewrk3_devs[i] = NULL;
- unregister_netdev(dev);
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
- iounmap(lp->shmem);
- free_netdev(dev);
- }
-}
-
-static __init int ewrk3_init_module(void)
-{
- int i=0;
-
- while( io[i] && irq[i] ) {
- struct net_device *dev
- = alloc_etherdev(sizeof(struct ewrk3_private));
-
- if (!dev)
- break;
-
- if (ewrk3_probe1(dev, io[i], irq[i]) != 0) {
- free_netdev(dev);
- break;
- }
-
- ewrk3_devs[ndevs++] = dev;
- i++;
- }
-
- return ndevs ? 0 : -EIO;
-}
-
-
-/* Hack for breakage in new module stuff */
-module_exit(ewrk3_exit_module);
-module_init(ewrk3_init_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h
deleted file mode 100644
index 8e0ee906567b..000000000000
--- a/drivers/net/ethernet/dec/ewrk3.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** I/O Address Register Map
-*/
-#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
-#define EWRK3_CR iobase+0x01 /* Control Register */
-#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
-#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
-#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
-#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
-#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
-#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
-#define EWRK3_RQ iobase+0x08 /* Receive Queue */
-#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
-#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
-#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
-#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
-#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
-#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
-#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
-#define EWRK3_DATA iobase+0x10 /* Data Register */
-#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
-#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
-#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
-#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
-#define EWRK3_APROM iobase+0x15 /* Address PROM */
-#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
-#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
-#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
-#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
-#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
-#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
-#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
-#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
-#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
-
-/*
-** Control Page Map
-*/
-#define PAGE0_FMQ 0x000 /* Free Memory Queue */
-#define PAGE0_RQ 0x080 /* Receive Queue */
-#define PAGE0_TQ 0x100 /* Transmit Queue */
-#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
-#define PAGE0_HTE 0x200 /* Hash Table Entries */
-#define PAGE0_RSVD 0x240 /* RESERVED */
-#define PAGE0_USRD 0x600 /* User Data */
-
-/*
-** Control and Status Register bit definitions (EWRK3_CSR)
-*/
-#define CSR_RA 0x80 /* Runt Accept */
-#define CSR_PME 0x40 /* Promiscuous Mode Enable */
-#define CSR_MCE 0x20 /* Multicast Enable */
-#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
-#define CSR_RNE 0x04 /* RX Queue Not Empty */
-#define CSR_TXD 0x02 /* TX Disable */
-#define CSR_RXD 0x01 /* RX Disable */
-
-/*
-** Control Register bit definitions (EWRK3_CR)
-*/
-#define CR_APD 0x80 /* Auto Port Disable */
-#define CR_PSEL 0x40 /* Port Select (0->TP port) */
-#define CR_LBCK 0x20 /* LoopBaCK enable */
-#define CR_FDUP 0x10 /* Full DUPlex enable */
-#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
-#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
-#define CR_LED 0x02 /* LED (1-> turn on) */
-
-/*
-** Interrupt Control Register bit definitions (EWRK3_ICR)
-*/
-#define ICR_IE 0x80 /* Interrupt Enable */
-#define ICR_IS 0x60 /* Interrupt Selected */
-#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
-#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
-#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
-#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
-
-/*
-** Transmit Status Register bit definitions (EWRK3_TSR)
-*/
-#define TSR_NCL 0x80 /* No Carrier Loopback */
-#define TSR_ID 0x40 /* Initially Deferred */
-#define TSR_LCL 0x20 /* Late CoLlision */
-#define TSR_ECL 0x10 /* Excessive CoLlisions */
-#define TSR_RCNTR 0x0f /* Retries CouNTeR */
-
-/*
-** I/O Page Register bit definitions (EWRK3_IOPR)
-*/
-#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
-#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
-#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
-#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
-#define EEPROM_RD 0xe0 /* EEPROM READ command */
-
-/*
-** I/O Base Register bit definitions (EWRK3_IOBR)
-*/
-#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
-#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
-
-/*
-** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
-*/
-#define CMR_RA 0x80 /* Read Ahead */
-#define CMR_WB 0x40 /* Write Behind */
-#define CMR_LINK 0x20 /* 0->TP */
-#define CMR_POLARITY 0x10 /* Informational */
-#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
-#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
-#define CMR_PNP 0x04 /* Plug 'n Play */
-#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
-#define CMR_0WS 0x01 /* Zero Wait State */
-
-/*
-** MAC Receive Status Register bit definitions
-*/
-
-#define R_ROK 0x80 /* Receive OK summary */
-#define R_IAM 0x10 /* Individual Address Match */
-#define R_MCM 0x08 /* MultiCast Match */
-#define R_DBE 0x04 /* Dribble Bit Error */
-#define R_CRC 0x02 /* CRC error */
-#define R_PLL 0x01 /* Phase Lock Lost */
-
-/*
-** MAC Transmit Control Register bit definitions
-*/
-
-#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
-#define TCR_SED 0x20 /* Stop when Error Detected */
-#define TCR_QMODE 0x10 /* Q_MODE */
-#define TCR_LAB 0x08 /* Less Aggressive Backoff */
-#define TCR_PAD 0x04 /* PAD Runt Packets */
-#define TCR_IFC 0x02 /* Insert Frame Check */
-#define TCR_ISA 0x01 /* Insert Source Address */
-
-/*
-** MAC Transmit Status Register bit definitions
-*/
-
-#define T_VSTS 0x80 /* Valid STatuS */
-#define T_CTU 0x40 /* Cut Through Used */
-#define T_SQE 0x20 /* Signal Quality Error */
-#define T_NCL 0x10 /* No Carrier Loopback */
-#define T_LCL 0x08 /* Late Collision */
-#define T_ID 0x04 /* Initially Deferred */
-#define T_COLL 0x03 /* COLLision status */
-#define T_XCOLL 0x03 /* Excessive Collisions */
-#define T_MCOLL 0x02 /* Multiple Collisions */
-#define T_OCOLL 0x01 /* One Collision */
-#define T_NOCOLL 0x00 /* No Collisions */
-#define T_XUR 0x03 /* Excessive Underruns */
-#define T_TXE 0x7f /* TX Errors */
-
-/*
-** EISA Configuration Register bit definitions
-*/
-
-#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
-
-/*
-** EEPROM BYTES
-*/
-#define EEPROM_MEMB 0x00
-#define EEPROM_IOB 0x01
-#define EEPROM_EISA_ID0 0x02
-#define EEPROM_EISA_ID1 0x03
-#define EEPROM_EISA_ID2 0x04
-#define EEPROM_EISA_ID3 0x05
-#define EEPROM_MISC0 0x06
-#define EEPROM_MISC1 0x07
-#define EEPROM_PNAME7 0x08
-#define EEPROM_PNAME6 0x09
-#define EEPROM_PNAME5 0x0a
-#define EEPROM_PNAME4 0x0b
-#define EEPROM_PNAME3 0x0c
-#define EEPROM_PNAME2 0x0d
-#define EEPROM_PNAME1 0x0e
-#define EEPROM_PNAME0 0x0f
-#define EEPROM_SWFLAGS 0x10
-#define EEPROM_HWCAT 0x11
-#define EEPROM_NETMAN2 0x12
-#define EEPROM_REVLVL 0x13
-#define EEPROM_NETMAN0 0x14
-#define EEPROM_NETMAN1 0x15
-#define EEPROM_CHIPVER 0x16
-#define EEPROM_SETUP 0x17
-#define EEPROM_PADDR0 0x18
-#define EEPROM_PADDR1 0x19
-#define EEPROM_PADDR2 0x1a
-#define EEPROM_PADDR3 0x1b
-#define EEPROM_PADDR4 0x1c
-#define EEPROM_PADDR5 0x1d
-#define EEPROM_PA_CRC 0x1e
-#define EEPROM_CHKSUM 0x1f
-
-/*
-** EEPROM bytes for checksumming
-*/
-#define EEPROM_MAX 32 /* bytes */
-
-/*
-** EEPROM MISCELLANEOUS FLAGS
-*/
-#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
-#define READ_AHEAD 0x0080 /* Read Ahead feature */
-#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
-#define IRQ_SEL 0x0060 /* IRQ line selection */
-#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
-#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
-#define WRITE_BEHIND 0x0002 /* Write Behind feature */
-#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
-
-/*
-** EEPROM NETWORK MANAGEMENT FLAGS
-*/
-#define NETMAN_POL 0x04 /* Polarity defeat */
-#define NETMAN_LINK 0x02 /* Link defeat */
-#define NETMAN_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM SW FLAGS
-*/
-#define SW_SQE 0x10 /* Signal Quality Error */
-#define SW_LAB 0x08 /* Less Aggressive Backoff */
-#define SW_INIT 0x04 /* Initialized */
-#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
-#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
-
-/*
-** EEPROM SETUP FLAGS
-*/
-#define SETUP_APD 0x80 /* AutoPort Disable */
-#define SETUP_PS 0x40 /* Port Select */
-#define SETUP_MP 0x20 /* MultiPort */
-#define SETUP_1TP 0x10 /* 1 port, TP */
-#define SETUP_1COAX 0x00 /* 1 port, Coax */
-#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
-
-/*
-** EEPROM MANAGEMENT FLAGS
-*/
-#define MGMT_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM VERSIONS
-*/
-#define LeMAC 0x11
-#define LeMAC2 0x12
-
-/*
-** Miscellaneous
-*/
-
-#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-
-#define HASH_TABLE_LEN 512 /* Bits */
-
-#define XCT 0x80 /* Transmit Cut Through */
-#define PRELOAD 16 /* 4 long words */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-#define EWRK3IOCTL SIOCDEVPRIVATE
-
-struct ewrk3_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
-#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
-#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
-#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
-#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
-#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
-#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
-#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
-#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
-#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
-#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
-#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
-#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
-#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 1203be0436e2..0c37fb2cc867 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -57,8 +57,8 @@ config TULIP
be called tulip.
config TULIP_MWI
- bool "New bus configuration (EXPERIMENTAL)"
- depends on TULIP && EXPERIMENTAL
+ bool "New bus configuration"
+ depends on TULIP
---help---
This configures your Tulip card specifically for the card and
system cache line size type you are using.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index b5afe218c31b..ee26ce78e270 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_DLINK
bool "D-Link devices"
default y
- depends on PCI || PARPORT
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,36 +18,6 @@ config NET_VENDOR_DLINK
if NET_VENDOR_DLINK
-config DE600
- tristate "D-Link DE600 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de600.
-
-config DE620
- tristate "D-Link DE620 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de620.
-
config DL2K
tristate "DL2000/TC902x-based Gigabit Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index c705eaa4f5b2..40085f67157b 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -2,7 +2,5 @@
# Makefile for the D-Link network device drivers.
#
-obj-$(CONFIG_DE600) += de600.o
-obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_DL2K) += dl2k.o
obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
deleted file mode 100644
index 414f0eea1049..000000000000
--- a/drivers/net/ethernet/dlink/de600.c
+++ /dev/null
@@ -1,529 +0,0 @@
-static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n";
-/*
- * de600.c
- *
- * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
- * The Author may be reached as bj0rn@blox.se
- *
- * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
- * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
- * For DE600.asm:
- * Portions (C) Copyright 1990 D-Link, Inc.
- * Copyright, 1988-1992, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- **************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- **************************************************************/
-
-/* Add more time here if your adapter won't work OK: */
-#define DE600_SLOW_DOWN udelay(delay_time)
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-#include "de600.h"
-
-static bool check_lost = true;
-module_param(check_lost, bool, 0);
-MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
-
-static unsigned int delay_time = 10;
-module_param(delay_time, int, 0);
-MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
-
-
-/*
- * D-Link driver variables:
- */
-
-static volatile int rx_page;
-
-#define TX_PAGES 2
-static volatile int tx_fifo[TX_PAGES];
-static volatile int tx_fifo_in;
-static volatile int tx_fifo_out;
-static volatile int free_tx_pages = TX_PAGES;
-static int was_down;
-static DEFINE_SPINLOCK(de600_lock);
-
-static inline u8 de600_read_status(struct net_device *dev)
-{
- u8 status;
-
- outb_p(STATUS, DATA_PORT);
- status = inb(STATUS_PORT);
- outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
-
- return status;
-}
-
-static inline u8 de600_read_byte(unsigned char type, struct net_device *dev)
-{
- /* dev used by macros */
- u8 lo;
- outb_p((type), DATA_PORT);
- lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
- outb_p((type) | HI_NIBBLE, DATA_PORT);
- return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int de600_open(struct net_device *dev)
-{
- unsigned long flags;
- int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
- if (ret) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
- return ret;
- }
- spin_lock_irqsave(&de600_lock, flags);
- ret = adapter_init(dev);
- spin_unlock_irqrestore(&de600_lock, flags);
- return ret;
-}
-
-/*
- * The inverse routine to de600_open().
- */
-
-static int de600_close(struct net_device *dev)
-{
- select_nic();
- rx_page = 0;
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- de600_put_command(0);
- select_prn();
- free_irq(DE600_IRQ, dev);
- return 0;
-}
-
-static inline void trigger_interrupt(struct net_device *dev)
-{
- de600_put_command(FLIP_IRQ);
- select_prn();
- DE600_SLOW_DOWN;
- select_nic();
- de600_put_command(0);
-}
-
-/*
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int transmit_from;
- int len;
- int tickssofar;
- u8 *buffer = skb->data;
- int i;
-
- if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev_trans_start(dev);
- if (tickssofar < HZ/20)
- return NETDEV_TX_BUSY;
- /* else */
- printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
- /* Restart the adapter. */
- spin_lock_irqsave(&de600_lock, flags);
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- }
-
- /* Start real output */
- pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
-
- spin_lock_irqsave(&de600_lock, flags);
- select_nic();
- tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
- tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
-
- if(check_lost)
- {
- /* This costs about 40 instructions per packet... */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- }
- }
-
- de600_setup_address(transmit_from, RW_ADDR);
- for (i = 0; i < skb->len ; ++i, ++buffer)
- de600_put_byte(*buffer);
- for (; i < len; ++i)
- de600_put_byte(0);
-
- if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
- dev->trans_start = jiffies;
- netif_start_queue(dev); /* allow more packets into adapter */
- /* Send page and generate a faked interrupt */
- de600_setup_address(transmit_from, TX_ADDR);
- de600_put_command(TX_ENABLE);
- }
- else {
- if (free_tx_pages)
- netif_start_queue(dev);
- else
- netif_stop_queue(dev);
- select_prn();
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-
-static irqreturn_t de600_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u8 irq_status;
- int retrig = 0;
- int boguscount = 0;
-
- spin_lock(&de600_lock);
-
- select_nic();
- irq_status = de600_read_status(dev);
-
- do {
- pr_debug("de600_interrupt (%02X)\n", irq_status);
-
- if (irq_status & RX_GOOD)
- de600_rx_intr(dev);
- else if (!(irq_status & RX_BUSY))
- de600_put_command(RX_ENABLE);
-
- /* Any transmission in progress? */
- if (free_tx_pages < TX_PAGES)
- retrig = de600_tx_intr(dev, irq_status);
- else
- retrig = 0;
-
- irq_status = de600_read_status(dev);
- } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
- /*
- * Yeah, it _looks_ like busy waiting, smells like busy waiting
- * and I know it's not PC, but please, it will only occur once
- * in a while and then only for a loop or so (< 1ms for sure!)
- */
-
- /* Enable adapter interrupts */
- select_prn();
- if (retrig)
- trigger_interrupt(dev);
- spin_unlock(&de600_lock);
- return IRQ_HANDLED;
-}
-
-static int de600_tx_intr(struct net_device *dev, int irq_status)
-{
- /*
- * Returns 1 if tx still not done
- */
-
- /* Check if current transmission is done yet */
- if (irq_status & TX_BUSY)
- return 1; /* tx not done, try again */
-
- /* else */
- /* If last transmission OK then bump fifo index */
- if (!(irq_status & TX_FAILED16)) {
- tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
- ++free_tx_pages;
- dev->stats.tx_packets++;
- netif_wake_queue(dev);
- }
-
- /* More to send, or resend last packet? */
- if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
- dev->trans_start = jiffies;
- de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
- de600_put_command(TX_ENABLE);
- return 1;
- }
- /* else */
-
- return 0;
-}
-
-/*
- * We have a good packet, get it out of the adapter.
- */
-static void de600_rx_intr(struct net_device *dev)
-{
- struct sk_buff *skb;
- int i;
- int read_from;
- int size;
- unsigned char *buffer;
-
- /* Get size of received packet */
- size = de600_read_byte(RX_LEN, dev); /* low byte */
- size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
- size -= 4; /* Ignore trailing 4 CRC-bytes */
-
- /* Tell adapter where to store next incoming packet, enable receiver */
- read_from = rx_page_adr();
- next_rx_page();
- de600_put_command(RX_ENABLE);
-
- if ((size < 32) || (size > 1535)) {
- printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size);
- if (size > 10000)
- adapter_init(dev);
- return;
- }
-
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) {
- printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- return;
- }
- /* else */
-
- skb_reserve(skb,2); /* Align */
-
- /* 'skb->data' points to the start of sk_buff data area. */
- buffer = skb_put(skb,size);
-
- /* copy the packet into the buffer */
- de600_setup_address(read_from, RW_ADDR);
- for (i = size; i > 0; --i, ++buffer)
- *buffer = de600_read_byte(READ_DATA, dev);
-
- skb->protocol=eth_type_trans(skb,dev);
-
- netif_rx(skb);
-
- /* update stats */
- dev->stats.rx_packets++; /* count all receives */
- dev->stats.rx_bytes += size; /* count all received bytes */
-
- /*
- * If any worth-while packets have been received, netif_rx()
- * will work on them when we get to the tasklets.
- */
-}
-
-static const struct net_device_ops de600_netdev_ops = {
- .ndo_open = de600_open,
- .ndo_stop = de600_close,
- .ndo_start_xmit = de600_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static struct net_device * __init de600_probe(void)
-{
- int i;
- struct net_device *dev;
- int err;
-
- dev = alloc_etherdev(0);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
-
- if (!request_region(DE600_IO, 3, "de600")) {
- printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO);
- err = -EBUSY;
- goto out;
- }
-
- printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
- /* Alpha testers must have the version number to report bugs. */
- pr_debug("%s", version);
-
- /* probe for adapter */
- err = -ENODEV;
- rx_page = 0;
- select_nic();
- (void)de600_read_status(dev);
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- if (de600_read_status(dev) & 0xf0) {
- printk(": not at I/O %#3x.\n", DATA_PORT);
- goto out1;
- }
-
- /*
- * Maybe we found one,
- * have to check if it is a D-Link DE-600 adapter...
- */
-
- /* Get the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
- dev->broadcast[i] = 0xff;
- }
-
- /* Check magic code */
- if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
- /* OK, install real address */
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0xc8;
- dev->dev_addr[3] &= 0x0f;
- dev->dev_addr[3] |= 0x70;
- } else {
- printk(" not identified in the printer port\n");
- goto out1;
- }
-
- printk(", Ethernet Address: %pM\n", dev->dev_addr);
-
- dev->netdev_ops = &de600_netdev_ops;
-
- dev->flags&=~IFF_MULTICAST;
-
- select_prn();
-
- err = register_netdev(dev);
- if (err)
- goto out1;
-
- return dev;
-
-out1:
- release_region(DE600_IO, 3);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static int adapter_init(struct net_device *dev)
-{
- int i;
-
- select_nic();
- rx_page = 0; /* used by RESET */
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
-
- /* Check if it is still there... */
- /* Get the some bytes of the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
- (de600_read_byte(READ_DATA, dev) != 0x15)) {
- /* was: if (de600_read_status(dev) & 0xf0) { */
- printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n");
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de600_close(dev);
- was_down = 1;
- netif_stop_queue(dev); /* Transmit busy... */
- return 1; /* failed */
- }
-
- if (was_down) {
- printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- tx_fifo_in = 0;
- tx_fifo_out = 0;
- free_tx_pages = TX_PAGES;
-
-
- /* set the ether address. */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++)
- de600_put_byte(dev->dev_addr[i]);
-
- /* where to start saving incoming packets */
- rx_page = RX_BP | RX_BASE_PAGE;
- de600_setup_address(MEM_4K, RW_ADDR);
- /* Enable receiver */
- de600_put_command(RX_ENABLE);
- select_prn();
-
- netif_start_queue(dev);
-
- return 0; /* OK */
-}
-
-static struct net_device *de600_dev;
-
-static int __init de600_init(void)
-{
- de600_dev = de600_probe();
- if (IS_ERR(de600_dev))
- return PTR_ERR(de600_dev);
- return 0;
-}
-
-static void __exit de600_exit(void)
-{
- unregister_netdev(de600_dev);
- release_region(DE600_IO, 3);
- free_netdev(de600_dev);
-}
-
-module_init(de600_init);
-module_exit(de600_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h
deleted file mode 100644
index e80ecbabcf4e..000000000000
--- a/drivers/net/ethernet/dlink/de600.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/**************************************************
- * *
- * Definition of D-Link Ethernet Pocket adapter *
- * *
- **************************************************/
-/*
- * D-Link Ethernet pocket adapter ports
- */
-/*
- * OK, so I'm cheating, but there are an awful lot of
- * reads and writes in order to get anything in and out
- * of the DE-600 with 4 bits at a time in the parallel port,
- * so every saved instruction really helps :-)
- */
-
-#ifndef DE600_IO
-#define DE600_IO 0x378
-#endif
-
-#define DATA_PORT (DE600_IO)
-#define STATUS_PORT (DE600_IO + 1)
-#define COMMAND_PORT (DE600_IO + 2)
-
-#ifndef DE600_IRQ
-#define DE600_IRQ 7
-#endif
-/*
- * It really should look like this, and autoprobing as well...
- *
-#define DATA_PORT (dev->base_addr + 0)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-#define DE600_IRQ dev->irq
- */
-
-/*
- * D-Link COMMAND_PORT commands
- */
-#define SELECT_NIC 0x04 /* select Network Interface Card */
-#define SELECT_PRN 0x1c /* select Printer */
-#define NML_PRN 0xec /* normal Printer situation */
-#define IRQEN 0x10 /* enable IRQ line */
-
-/*
- * D-Link STATUS_PORT
- */
-#define RX_BUSY 0x80
-#define RX_GOOD 0x40
-#define TX_FAILED16 0x10
-#define TX_BUSY 0x08
-
-/*
- * D-Link DATA_PORT commands
- * command in low 4 bits
- * data in high 4 bits
- * select current data nibble with HI_NIBBLE bit
- */
-#define WRITE_DATA 0x00 /* write memory */
-#define READ_DATA 0x01 /* read memory */
-#define STATUS 0x02 /* read status register */
-#define COMMAND 0x03 /* write command register (see COMMAND below) */
-#define NULL_COMMAND 0x04 /* null command */
-#define RX_LEN 0x05 /* read received packet length */
-#define TX_ADDR 0x06 /* set adapter transmit memory address */
-#define RW_ADDR 0x07 /* set adapter read/write memory address */
-#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
- or-ed with rest of command */
-
-/*
- * command register, accessed through DATA_PORT with low bits = COMMAND
- */
-#define RX_ALL 0x01 /* PROMISCUOUS */
-#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
-#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
-
-#define TX_ENABLE 0x04 /* bit 2 */
-#define RX_ENABLE 0x08 /* bit 3 */
-
-#define RESET 0x80 /* set bit 7 high */
-#define STOP_RESET 0x00 /* set bit 7 low */
-
-/*
- * data to command register
- * (high 4 bits in write to DATA_PORT)
- */
-#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
-#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
-#define FLIP_IRQ 0x40 /* bit 6 */
-
-/*
- * D-Link adapter internal memory:
- *
- * 0-2K 1:st transmit page (send from pointer up to 2K)
- * 2-4K 2:nd transmit page (send from pointer up to 4K)
- *
- * 4-6K 1:st receive page (data from 4K upwards)
- * 6-8K 2:nd receive page (data from 6K upwards)
- *
- * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
- */
-#define MEM_2K 0x0800 /* 2048 */
-#define MEM_4K 0x1000 /* 4096 */
-#define MEM_6K 0x1800 /* 6144 */
-#define NODE_ADDRESS 0x2000 /* 8192 */
-
-#define RUNT 60 /* Too small Ethernet packet */
-
-/**************************************************
- * *
- * End of definition *
- * *
- **************************************************/
-
-/*
- * Index to functions, as function prototypes.
- */
-/* Routines used internally. (See "convenience macros") */
-static u8 de600_read_status(struct net_device *dev);
-static u8 de600_read_byte(unsigned char type, struct net_device *dev);
-
-/* Put in the device structure. */
-static int de600_open(struct net_device *dev);
-static int de600_close(struct net_device *dev);
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de600_interrupt(int irq, void *dev_id);
-static int de600_tx_intr(struct net_device *dev, int irq_status);
-static void de600_rx_intr(struct net_device *dev);
-
-/* Initialization */
-static void trigger_interrupt(struct net_device *dev);
-static int adapter_init(struct net_device *dev);
-
-/*
- * Convenience macros/functions for D-Link adapter
- */
-
-#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
-#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
-
-/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
-#define de600_put_byte(data) ( \
- outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
- outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
-
-/*
- * The first two outb_p()'s below could perhaps be deleted if there
- * would be more delay in the last two. Not certain about it yet...
- */
-#define de600_put_command(cmd) ( \
- outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
- outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
- outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
- outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
-
-#define de600_setup_address(addr,type) ( \
- outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
- outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
- outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
- outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
-
-#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
-
-/* Flip bit, only 2 pages */
-#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
-
-#define tx_page_adr(a) (((a) + 1) * MEM_2K)
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c
deleted file mode 100644
index 2e2bc60ee811..000000000000
--- a/drivers/net/ethernet/dlink/de620.c
+++ /dev/null
@@ -1,987 +0,0 @@
-/*
- * de620.c $Revision: 1.40 $ BETA
- *
- *
- * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
- *
- * Based on adapter information gathered from DOS packetdriver
- * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
- * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
- * Copyright, 1988, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- * Valuable assistance from:
- * J. Joshua Kopper <kopper@rtsg.mot.com>
- * Olav Kvittem <Olav.Kvittem@uninett.no>
- * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
- * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
- *
- *****************************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************************/
-static const char version[] =
- "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
-
-/***********************************************************************
- *
- * "Tuning" section.
- *
- * Compile-time options: (see below for descriptions)
- * -DDE620_IO=0x378 (lpt1)
- * -DDE620_IRQ=7 (lpt1)
- * -DSHUTDOWN_WHEN_LOST
- * -DCOUNT_LOOPS
- * -DLOWSPEED
- * -DREAD_DELAY
- * -DWRITE_DELAY
- */
-
-/*
- * This driver assumes that the printer port is a "normal",
- * dumb, uni-directional port!
- * If your port is "fancy" in any way, please try to set it to "normal"
- * with your BIOS setup. I have no access to machines with bi-directional
- * ports, so I can't test such a driver :-(
- * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
- *
- * There are some clones of DE620 out there, with different names.
- * If the current driver does not recognize a clone, try to change
- * the following #define to:
- *
- * #define DE620_CLONE 1
- */
-#define DE620_CLONE 0
-
-/*
- * If the adapter has problems with high speeds, enable this #define
- * otherwise full printerport speed will be attempted.
- *
- * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
- *
-#define LOWSPEED
- */
-
-#ifndef READ_DELAY
-#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
-#endif
-
-#ifndef WRITE_DELAY
-#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
-#endif
-
-/*
- * Enable this #define if you want the adapter to do a "ifconfig down" on
- * itself when we have detected that something is possibly wrong with it.
- * The default behaviour is to retry with "adapter_init()" until success.
- * This should be used for debugging purposes only.
- *
-#define SHUTDOWN_WHEN_LOST
- */
-
-#ifdef LOWSPEED
-/*
- * Enable this #define if you want to see debugging output that show how long
- * we have to wait before the DE-620 is ready for the next read/write/command.
- *
-#define COUNT_LOOPS
- */
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-/* Constant definitions for the DE-620 registers, commands and bits */
-#include "de620.h"
-
-typedef unsigned char byte;
-
-/*******************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * See also "de620.h" *
- * *
- *******************************************************/
-#ifndef DE620_IO /* Compile-time configurable */
-#define DE620_IO 0x378
-#endif
-
-#ifndef DE620_IRQ /* Compile-time configurable */
-#define DE620_IRQ 7
-#endif
-
-#define DATA_PORT (dev->base_addr)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-
-#define RUNT 60 /* Too small Ethernet packet */
-#define GIANT 1514 /* largest legal size packet, no fcs */
-
-/*
- * Force media with insmod:
- * insmod de620.o bnc=1
- * or
- * insmod de620.o utp=1
- *
- * Force io and/or irq with insmod:
- * insmod de620.o io=0x378 irq=7
- *
- * Make a clone skip the Ethernet-address range check:
- * insmod de620.o clone=1
- */
-static int bnc;
-static int utp;
-static int io = DE620_IO;
-static int irq = DE620_IRQ;
-static int clone = DE620_CLONE;
-
-static spinlock_t de620_lock;
-
-module_param(bnc, int, 0);
-module_param(utp, int, 0);
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(clone, int, 0);
-MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
-MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
-MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
-MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
-MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
-
-/***********************************************
- * *
- * Index to functions, as function prototypes. *
- * *
- ***********************************************/
-
-/*
- * Routines used internally. (See also "convenience macros.. below")
- */
-
-/* Put in the device structure. */
-static int de620_open(struct net_device *);
-static int de620_close(struct net_device *);
-static void de620_set_multicast_list(struct net_device *);
-static int de620_start_xmit(struct sk_buff *, struct net_device *);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de620_interrupt(int, void *);
-static int de620_rx_intr(struct net_device *);
-
-/* Initialization */
-static int adapter_init(struct net_device *);
-static int read_eeprom(struct net_device *);
-
-
-/*
- * D-Link driver variables:
- */
-#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
-#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
-#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
-#define DEF_NIC_CMD IRQEN | ICEN | DS1
-
-static volatile byte NIC_Cmd;
-static volatile byte next_rx_page;
-static byte first_rx_page;
-static byte last_rx_page;
-static byte EIPRegister;
-
-static struct nic {
- byte NodeID[6];
- byte RAM_Size;
- byte Model;
- byte Media;
- byte SCR;
-} nic_data;
-
-/**********************************************************
- * *
- * Convenience macros/functions for D-Link DE-620 adapter *
- * *
- **********************************************************/
-#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
-#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
-
-/* Check for ready-status, and return a nibble (high 4 bits) for data input */
-#ifdef COUNT_LOOPS
-static int tot_cnt;
-#endif
-static inline byte
-de620_ready(struct net_device *dev)
-{
- byte value;
- register short int cnt = 0;
-
- while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
- ++cnt;
-
-#ifdef COUNT_LOOPS
- tot_cnt += cnt;
-#endif
- return value & 0xf0; /* nibble */
-}
-
-static inline void
-de620_send_command(struct net_device *dev, byte cmd)
-{
- de620_ready(dev);
- if (cmd == W_DUMMY)
- outb(NIC_Cmd, COMMAND_PORT);
-
- outb(cmd, DATA_PORT);
-
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
- de620_ready(dev);
- outb(NIC_Cmd, COMMAND_PORT);
-}
-
-static inline void
-de620_put_byte(struct net_device *dev, byte value)
-{
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- de620_ready(dev);
- outb(value, DATA_PORT);
- de620_flip_ds(dev);
-}
-
-static inline byte
-de620_read_byte(struct net_device *dev)
-{
- byte value;
-
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- value = de620_ready(dev); /* High nibble */
- de620_flip_ds(dev);
- value |= de620_ready(dev) >> 4; /* Low nibble */
- return value;
-}
-
-static inline void
-de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
-{
-#ifndef LOWSPEED
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
-#ifdef COUNT_LOOPS
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
- /* No further optimization useful, the limit is in the adapter. */
- for ( ; count > 0; --count, ++buffer) {
- de620_put_byte(dev,*buffer);
- }
- for ( count = pad ; count > 0; --count, ++buffer) {
- de620_put_byte(dev, 0);
- }
- de620_send_command(dev,W_DUMMY);
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- for ( ; count > 0; count -=2) {
- outb(*buffer++, DATA_PORT);
- outb(uflip, COMMAND_PORT);
- outb(*buffer++, DATA_PORT);
- outb(dflip, COMMAND_PORT);
- }
- de620_send_command(dev,W_DUMMY);
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_read_block(struct net_device *dev, byte *data, int count)
-{
-#ifndef LOWSPEED
- byte value;
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
- /* No further optimization useful, the limit is in the adapter. */
- while (count-- > 0) {
- *data++ = de620_read_byte(dev);
- de620_flip_ds(dev);
- }
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- while (count-- > 0) {
- value = inb(STATUS_PORT) & 0xf0; /* High nibble */
- outb(uflip, COMMAND_PORT);
- *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
- outb(dflip , COMMAND_PORT);
- }
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_set_delay(struct net_device *dev)
-{
- de620_ready(dev);
- outb(W_DFR, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(WRITE_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(READ_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-}
-
-static inline void
-de620_set_register(struct net_device *dev, byte reg, byte value)
-{
- de620_ready(dev);
- outb(reg, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_put_byte(dev, value);
-}
-
-static inline byte
-de620_get_register(struct net_device *dev, byte reg)
-{
- byte value;
-
- de620_send_command(dev,reg);
- value = de620_read_byte(dev);
- de620_send_command(dev,W_DUMMY);
-
- return value;
-}
-
-/*********************************************************************
- *
- * Open/initialize the board.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- *
- */
-static int de620_open(struct net_device *dev)
-{
- int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
- if (ret) {
- printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
- return ret;
- }
-
- if (adapter_init(dev)) {
- ret = -EIO;
- goto out_free_irq;
- }
-
- netif_start_queue(dev);
- return 0;
-
-out_free_irq:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/************************************************
- *
- * The inverse routine to de620_open().
- *
- */
-
-static int de620_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- /* disable recv */
- de620_set_register(dev, W_TCR, RXOFF);
- free_irq(dev->irq, dev);
- return 0;
-}
-
-/*********************************************
- *
- * Set or clear the multicast filter for this adaptor.
- * (no real multicast implemented for the DE-620, but she can be promiscuous...)
- *
- */
-
-static void de620_set_multicast_list(struct net_device *dev)
-{
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- { /* Enable promiscuous mode */
- de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
- }
- else
- { /* Disable promiscuous mode, use normal mode */
- de620_set_register(dev, W_TCR, TCR_DEF);
- }
-}
-
-/*******************************************************
- *
- * Handle timeouts on transmit
- */
-
-static void de620_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
- /* Restart the adapter. */
- if (!adapter_init(dev)) /* maybe close it */
- netif_wake_queue(dev);
-}
-
-/*******************************************************
- *
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int len;
- byte *buffer = skb->data;
- byte using_txbuf;
-
- using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
-
- netif_stop_queue(dev);
-
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
- if (len & 1) /* send an even number of bytes */
- ++len;
-
- /* Start real output */
-
- spin_lock_irqsave(&de620_lock, flags);
- pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
- (int)skb->len, using_txbuf);
-
- /* select a free tx buffer. if there is one... */
- switch (using_txbuf) {
- default: /* both are free: use TXBF0 */
- case TXBF1: /* use TXBF0 */
- de620_send_command(dev,W_CR | RW0);
- using_txbuf |= TXBF0;
- break;
-
- case TXBF0: /* use TXBF1 */
- de620_send_command(dev,W_CR | RW1);
- using_txbuf |= TXBF1;
- break;
-
- case (TXBF0 | TXBF1): /* NONE!!! */
- printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
- spin_unlock_irqrestore(&de620_lock, flags);
- return NETDEV_TX_BUSY;
- }
- de620_write_block(dev, buffer, skb->len, len-skb->len);
-
- if(!(using_txbuf == (TXBF0 | TXBF1)))
- netif_wake_queue(dev);
-
- dev->stats.tx_packets++;
- spin_unlock_irqrestore(&de620_lock, flags);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*****************************************************
- *
- * Handle the network interface interrupts.
- *
- */
-static irqreturn_t
-de620_interrupt(int irq_in, void *dev_id)
-{
- struct net_device *dev = dev_id;
- byte irq_status;
- int bogus_count = 0;
- int again = 0;
-
- spin_lock(&de620_lock);
-
- /* Read the status register (_not_ the status port) */
- irq_status = de620_get_register(dev, R_STS);
-
- pr_debug("de620_interrupt (%2.2X)\n", irq_status);
-
- if (irq_status & RXGOOD) {
- do {
- again = de620_rx_intr(dev);
- pr_debug("again=%d\n", again);
- }
- while (again && (++bogus_count < 100));
- }
-
- if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
- netif_wake_queue(dev);
-
- spin_unlock(&de620_lock);
- return IRQ_HANDLED;
-}
-
-/**************************************
- *
- * Get a packet from the adapter
- *
- * Send it "upstairs"
- *
- */
-static int de620_rx_intr(struct net_device *dev)
-{
- struct header_buf {
- byte status;
- byte Rx_NextPage;
- unsigned short Rx_ByteCount;
- } header_buf;
- struct sk_buff *skb;
- int size;
- byte *buffer;
- byte pagelink;
- byte curr_page;
-
- pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
-
- /* Tell the adapter that we are going to read data, and from where */
- de620_send_command(dev, W_CR | RRN);
- de620_set_register(dev, W_RSA1, next_rx_page);
- de620_set_register(dev, W_RSA0, 0);
-
- /* Deep breath, and away we goooooo */
- de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
- pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
- header_buf.status, header_buf.Rx_NextPage,
- header_buf.Rx_ByteCount);
-
- /* Plausible page header? */
- pagelink = header_buf.Rx_NextPage;
- if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
- /* Ouch... Forget it! Skip all and start afresh... */
- printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
- /* You win some, you lose some. And sometimes plenty... */
- adapter_init(dev);
- netif_wake_queue(dev);
- dev->stats.rx_over_errors++;
- return 0;
- }
-
- /* OK, this look good, so far. Let's see if it's consistent... */
- /* Let's compute the start of the next packet, based on where we are */
- pagelink = next_rx_page +
- ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
-
- /* Are we going to wrap around the page counter? */
- if (pagelink > last_rx_page)
- pagelink -= (last_rx_page - first_rx_page + 1);
-
- /* Is the _computed_ next page number equal to what the adapter says? */
- if (pagelink != header_buf.Rx_NextPage) {
- /* Naah, we'll skip this packet. Probably bogus data as well */
- printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
- next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
- de620_send_command(dev, W_DUMMY);
- de620_set_register(dev, W_NPRF, next_rx_page);
- dev->stats.rx_over_errors++;
- return 0;
- }
- next_rx_page = pagelink;
-
- size = header_buf.Rx_ByteCount - 4;
- if ((size < RUNT) || (GIANT < size)) {
- printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
- }
- else { /* Good packet? */
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) { /* Yeah, but no place to put it... */
- printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- dev->stats.rx_dropped++;
- }
- else { /* Yep! Go get it! */
- skb_reserve(skb,2); /* Align */
- /* skb->data points to the start of sk_buff data area */
- buffer = skb_put(skb,size);
- /* copy the packet into the buffer */
- de620_read_block(dev, buffer, size);
- pr_debug("Read %d bytes\n", size);
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb); /* deliver it "upstairs" */
- /* count all receives */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += size;
- }
- }
-
- /* Let's peek ahead to see if we have read the last current packet */
- /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
- curr_page = de620_get_register(dev, R_CPR);
- de620_set_register(dev, W_NPRF, next_rx_page);
- pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
-
- return next_rx_page != curr_page; /* That was slightly tricky... */
-}
-
-/*********************************************
- *
- * Reset the adapter to a known state
- *
- */
-static int adapter_init(struct net_device *dev)
-{
- int i;
- static int was_down;
-
- if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
- EIPRegister = NCTL0;
- if (nic_data.Media != 1)
- EIPRegister |= NIS0; /* not BNC */
- }
- else if (nic_data.Model == 2) { /* UTP */
- EIPRegister = NCTL0 | NIS0;
- }
-
- if (utp)
- EIPRegister = NCTL0 | NIS0;
- if (bnc)
- EIPRegister = NCTL0;
-
- de620_send_command(dev, W_CR | RNOP | CLEAR);
- de620_send_command(dev, W_CR | RNOP);
-
- de620_set_register(dev, W_SCR, SCR_DEF);
- /* disable recv to wait init */
- de620_set_register(dev, W_TCR, RXOFF);
-
- /* Set the node ID in the adapter */
- for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
- de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
- }
-
- de620_set_register(dev, W_EIP, EIPRegister);
-
- next_rx_page = first_rx_page = DE620_RX_START_PAGE;
- if (nic_data.RAM_Size)
- last_rx_page = nic_data.RAM_Size - 1;
- else /* 64k RAM */
- last_rx_page = 255;
-
- de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
- de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
- de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
- de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
- de620_send_command(dev, W_DUMMY);
- de620_set_delay(dev);
-
- /* Final sanity check: Anybody out there? */
- /* Let's hope some bits from the statusregister make a good check */
-#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
-#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
- /* success: X 0 0 X 0 0 X X */
- /* ignore: EEDI RXGOOD COLS LNKS*/
-
- if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
- printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
-#ifdef SHUTDOWN_WHEN_LOST
- " and do a new ifconfig"
-#endif
- "! (%02x)\n", dev->name, i);
-#ifdef SHUTDOWN_WHEN_LOST
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de620_close(dev);
-#endif
- was_down = 1;
- return 1; /* failed */
- }
- if (was_down) {
- printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- /* All OK, go ahead... */
- de620_set_register(dev, W_TCR, TCR_DEF);
-
- return 0; /* all ok */
-}
-
-static const struct net_device_ops de620_netdev_ops = {
- .ndo_open = de620_open,
- .ndo_stop = de620_close,
- .ndo_start_xmit = de620_start_xmit,
- .ndo_tx_timeout = de620_timeout,
- .ndo_set_rx_mode = de620_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************************************
- *
- * Only start-up code below
- *
- */
-/****************************************
- *
- * Check if there is a DE-620 connected
- */
-struct net_device * __init de620_probe(int unit)
-{
- byte checkbyte = 0xa5;
- struct net_device *dev;
- int err = -ENOMEM;
- int i;
-
- dev = alloc_etherdev(0);
- if (!dev)
- goto out;
-
- spin_lock_init(&de620_lock);
-
- /*
- * This is where the base_addr and irq gets set.
- * Tunable at compile-time and insmod-time
- */
- dev->base_addr = io;
- dev->irq = irq;
-
- /* allow overriding parameters on command line */
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- pr_debug("%s", version);
-
- printk(KERN_INFO "D-Link DE-620 pocket adapter");
-
- if (!request_region(dev->base_addr, 3, "de620")) {
- printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
- err = -EBUSY;
- goto out1;
- }
-
- /* Initially, configure basic nibble mode, so we can read the EEPROM */
- NIC_Cmd = DEF_NIC_CMD;
- de620_set_register(dev, W_EIP, EIPRegister);
-
- /* Anybody out there? */
- de620_set_register(dev, W_CPR, checkbyte);
- checkbyte = de620_get_register(dev, R_CPR);
-
- if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
- printk(" not identified in the printer port\n");
- err = -ENODEV;
- goto out2;
- }
-
- /* else, got it! */
- dev->dev_addr[0] = nic_data.NodeID[0];
- for (i = 1; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = nic_data.NodeID[i];
- dev->broadcast[i] = 0xff;
- }
-
- printk(", Ethernet Address: %pM", dev->dev_addr);
-
- printk(" (%dk RAM,",
- (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
-
- if (nic_data.Media == 1)
- printk(" BNC)\n");
- else
- printk(" UTP)\n");
-
- dev->netdev_ops = &de620_netdev_ops;
- dev->watchdog_timeo = HZ*2;
-
- /* base_addr and irq are already set, see above! */
-
- /* dump eeprom */
- pr_debug("\nEEPROM contents:\n"
- "RAM_Size = 0x%02X\n"
- "NodeID = %pM\n"
- "Model = %d\n"
- "Media = %d\n"
- "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
- nic_data.Model, nic_data.Media, nic_data.SCR);
-
- err = register_netdev(dev);
- if (err)
- goto out2;
- return dev;
-
-out2:
- release_region(dev->base_addr, 3);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-/**********************************
- *
- * Read info from on-board EEPROM
- *
- * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
- */
-#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
-
-static unsigned short __init ReadAWord(struct net_device *dev, int from)
-{
- unsigned short data;
- int nbits;
-
- /* cs [__~~] SET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
-
- /* Send the 9-bit address from where we want to read the 16-bit word */
- for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
- if (from & 0x0100) { /* bit set? */
- /* cs [~~~~] SEND 1 */
- /* di [~~~~] */
- /* sck [_~~_] */
- sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
- }
- else {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- }
- }
-
- /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
- for (data = 0, nbits = 16; nbits > 0; --nbits) {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
- }
- /* cs [____] RESET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
-
- return data;
-}
-
-static int __init read_eeprom(struct net_device *dev)
-{
- unsigned short wrd;
-
- /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
- wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
- if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[0] = wrd & 0xff;
- nic_data.NodeID[1] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
- if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[2] = wrd & 0xff;
- nic_data.NodeID[3] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
- nic_data.NodeID[4] = wrd & 0xff;
- nic_data.NodeID[5] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
- nic_data.RAM_Size = (wrd >> 8);
-
- wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
- nic_data.Model = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
- nic_data.Media = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
- nic_data.SCR = (wrd >> 8);
-
- return 0; /* no errors */
-}
-
-/******************************************************************************
- *
- * Loadable module skeleton
- *
- */
-#ifdef MODULE
-static struct net_device *de620_dev;
-
-int __init init_module(void)
-{
- de620_dev = de620_probe(-1);
- if (IS_ERR(de620_dev))
- return PTR_ERR(de620_dev);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unregister_netdev(de620_dev);
- release_region(de620_dev->base_addr, 3);
- free_netdev(de620_dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h
deleted file mode 100644
index e8d9a88f4cb5..000000000000
--- a/drivers/net/ethernet/dlink/de620.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*********************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * *
- *********************************************************/
-
-/* DE-620's CMD port Command */
-#define CS0 0x08 /* 1->0 command strobe */
-#define ICEN 0x04 /* 0=enable DL3520 host interface */
-#define DS0 0x02 /* 1->0 data strobe 0 */
-#define DS1 0x01 /* 1->0 data strobe 1 */
-
-#define WDIR 0x20 /* general 0=read 1=write */
-#define RDIR 0x00 /* (not 100% confirm ) */
-#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
-#define PS2RDIR 0x20
-
-#define IRQEN 0x10 /* 1 = enable printer IRQ line */
-#define SELECTIN 0x08 /* 1 = select printer */
-#define INITP 0x04 /* 0 = initial printer */
-#define AUTOFEED 0x02 /* 1 = printer auto form feed */
-#define STROBE 0x01 /* 0->1 data strobe */
-
-#define RESET 0x08
-#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
-#define NCTL0 0x10
-
-/* DE-620 DIC Command */
-#define W_DUMMY 0x00 /* DIC reserved command */
-#define W_CR 0x20 /* DIC write command register */
-#define W_NPR 0x40 /* DIC write Next Page Register */
-#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
-#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
-
-/* DE-620's STAT port bits 7-4 */
-#define EMPTY 0x80 /* 1 = receive buffer empty */
-#define INTLEVEL 0x40 /* 1 = interrupt level is high */
-#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
-#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
-#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
-
-/* IDC 1 Command */
-#define W_RSA1 0xa0 /* write remote start address 1 */
-#define W_RSA0 0xa1 /* write remote start address 0 */
-#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
-#define W_DFR 0xa3 /* write delay factor register */
-#define W_CPR 0xa4 /* write current page register */
-#define W_SPR 0xa5 /* write start page register */
-#define W_EPR 0xa6 /* write end page register */
-#define W_SCR 0xa7 /* write system configuration register */
-#define W_TCR 0xa8 /* write Transceiver Configuration reg */
-#define W_EIP 0xa9 /* write EEPM Interface port */
-#define W_PAR0 0xaa /* write physical address register 0 */
-#define W_PAR1 0xab /* write physical address register 1 */
-#define W_PAR2 0xac /* write physical address register 2 */
-#define W_PAR3 0xad /* write physical address register 3 */
-#define W_PAR4 0xae /* write physical address register 4 */
-#define W_PAR5 0xaf /* write physical address register 5 */
-
-/* IDC 2 Command */
-#define R_STS 0xc0 /* read status register */
-#define R_CPR 0xc1 /* read current page register */
-#define R_BPR 0xc2 /* read boundary page register */
-#define R_TDR 0xc3 /* read time domain reflectometry reg */
-
-/* STATUS Register */
-#define EEDI 0x80 /* EEPM DO pin */
-#define TXSUC 0x40 /* tx success */
-#define T16 0x20 /* tx fail 16 times */
-#define TS1 0x40 /* 0=Tx success, 1=T16 */
-#define TS0 0x20 /* 0=Tx success, 1=T16 */
-#define RXGOOD 0x10 /* rx a good packet */
-#define RXCRC 0x08 /* rx a CRC error packet */
-#define RXSHORT 0x04 /* rx a short packet */
-#define COLS 0x02 /* coaxial collision status */
-#define LNKS 0x01 /* UTP link status */
-
-/* Command Register */
-#define CLEAR 0x10 /* reset part of hardware */
-#define NOPER 0x08 /* No Operation */
-#define RNOP 0x08
-#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
-#define RRN 0x04 /* Normal Remote Read mode */
-#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
-#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
-#define TXEN 0x01 /* 0->1 tx enable */
-
-/* System Configuration Register */
-#define TESTON 0x80 /* test host data transfer reliability */
-#define SLEEP 0x40 /* sleep mode */
-#if 0
-#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x02 /* byte mode */
-#else
-#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x10 /* byte mode */
-#endif
-#define NIBBLEMODE 0x00 /* nibble mode */
-#define IRQINV 0x08 /* turn off IRQ line inverter */
-#define IRQNML 0x00 /* turn on IRQ line inverter */
-#define INTON 0x04
-#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
-#define AUTOTX 0x01 /* auto tx when leave RW mode */
-
-/* Transceiver Configuration Register */
-#define JABBER 0x80 /* generate jabber condition */
-#define TXSUCINT 0x40 /* enable tx success interrupt */
-#define T16INT 0x20 /* enable T16 interrupt */
-#define RXERRPKT 0x10 /* accept CRC error or short packet */
-#define EXTERNALB2 0x0C /* external loopback 2 */
-#define EXTERNALB1 0x08 /* external loopback 1 */
-#define INTERNALB 0x04 /* internal loopback */
-#define NMLOPERATE 0x00 /* normal operation */
-#define RXPBM 0x03 /* rx physical, broadcast, multicast */
-#define RXPB 0x02 /* rx physical, broadcast */
-#define RXALL 0x01 /* rx all packet */
-#define RXOFF 0x00 /* rx disable */
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1d342d37915c..110d26f4c602 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1156,9 +1156,10 @@ set_multicast (struct net_device *dev)
static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, "dl2k");
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+
+ strlcpy(info->driver, "dl2k", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 28fc11b2f1ea..50d9c6315930 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -530,7 +530,6 @@ static int sundance_probe1(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
np = netdev_priv(dev);
np->base = ioaddr;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2c177b329c8b..f3d60eb13c3a 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -281,11 +281,11 @@ static int dnet_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
if (bp->capabilities & DNET_HAS_RMII) {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_RMII);
} else {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_MII);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index abf26c7c1d19..28ceb8414185 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,15 +34,15 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.4.161.0u"
+#define DRV_VER "4.6.62.0u"
#define DRV_NAME "be2net"
-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME "Emulex BladeEngine2"
+#define BE3_NAME "Emulex BladeEngine3"
+#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -190,6 +190,7 @@ struct be_eq_obj {
u8 idx; /* array index */
u16 tx_budget;
+ u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
@@ -616,7 +617,7 @@ static inline bool be_error(struct be_adapter *adapter)
return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
}
-static inline bool be_crit_error(struct be_adapter *adapter)
+static inline bool be_hw_error(struct be_adapter *adapter)
{
return adapter->eeh_error || adapter->hw_error;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index f2875aa47661..071aea79d218 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter)
* little endian) */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
+ u32 flags;
+
if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else {
- return false;
+ flags = le32_to_cpu(compl->flags);
+ if (flags & CQE_FLAGS_VALID_MASK) {
+ compl->flags = flags;
+ return true;
+ }
}
+ return false;
}
/* Need to reset the entire word that houses the valid bit */
@@ -298,7 +301,12 @@ void be_async_mcc_enable(struct be_adapter *adapter)
void be_async_mcc_disable(struct be_adapter *adapter)
{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
adapter->mcc_obj.rearm_cq = false;
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
}
int be_process_mcc(struct be_adapter *adapter)
@@ -3133,6 +3141,39 @@ err:
return status;
}
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_iface_list *req;
+ struct be_cmd_resp_get_iface_list *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
+ wrb, NULL);
+ req->hdr.domain = vf_num + 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ resp = (struct be_cmd_resp_get_iface_list *)req;
+ vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses sync mcc */
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d6552e19ffee..96970860c915 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
#define OPCODE_ETH_RSS_CONFIG 1
@@ -1795,6 +1796,23 @@ static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
return flags & adapter->cmd_privileges ? true : false;
}
+/************** Get IFACE LIST *******************/
+struct be_if_desc {
+ u32 if_id;
+ u32 cap_flags;
+ u32 en_flags;
+};
+
+struct be_cmd_req_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+ u32 if_cnt;
+ struct be_if_desc if_desc;
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1917,4 +1935,6 @@ extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
+extern int be_cmd_get_if_id(struct be_adapter *adapter,
+ struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 00454a10f88d..76b302f30c87 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -183,12 +183,12 @@ static void be_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
- if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
- strcat(drvinfo->fw_version, " [");
- strcat(drvinfo->fw_version, fw_on_flash);
- strcat(drvinfo->fw_version, "]");
- }
+ if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN))
+ strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%s [%s]", adapter->fw_ver, fw_on_flash);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f95612b907ae..3860888ac711 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static unsigned int num_vfs;
@@ -1689,15 +1689,41 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+ int flush_wait = 0;
u16 tail;
- /* First cleanup pending rx completions */
- while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(rxo, rxcp);
- be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
+ /* Consume pending rx completions.
+ * Wait for the flush completion (identified by zero num_rcvd)
+ * to arrive. Notify CQ even when there are no more CQ entries
+ * for HW to flush partially coalesced CQ entries.
+ * In Lancer, there is no need to wait for flush compl.
+ */
+ for (;;) {
+ rxcp = be_rx_compl_get(rxo);
+ if (rxcp == NULL) {
+ if (lancer_chip(adapter))
+ break;
+
+ if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ dev_warn(&adapter->pdev->dev,
+ "did not receive flush compl\n");
+ break;
+ }
+ be_cq_notify(adapter, rx_cq->id, true, 0);
+ mdelay(1);
+ } else {
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, true, 1);
+ if (rxcp->num_rcvd == 0)
+ break;
+ }
}
- /* Then free posted rx buffer that were not used */
+ /* After cleanup, leave the CQ in unarmed state */
+ be_cq_notify(adapter, rx_cq->id, false, 0);
+
+ /* Then free posted rx buffers that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
page_info = get_rx_page_info(rxo, tail);
@@ -2000,19 +2026,30 @@ static irqreturn_t be_intx(int irq, void *dev)
struct be_adapter *adapter = eqo->adapter;
int num_evts = 0;
- /* On Lancer, clear-intr bit of the EQ DB does not work.
- * INTx is de-asserted only on notifying num evts.
+ /* IRQ is not expected when NAPI is scheduled as the EQ
+ * will not be armed.
+ * But, this can happen on Lancer INTx where it takes
+ * a while to de-assert INTx or in BE2 where occasionaly
+ * an interrupt may be raised even when EQ is unarmed.
+ * If NAPI is already scheduled, then counting & notifying
+ * events will orphan them.
*/
- if (lancer_chip(adapter))
+ if (napi_schedule_prep(&eqo->napi)) {
num_evts = events_get(eqo);
+ __napi_schedule(&eqo->napi);
+ if (num_evts)
+ eqo->spurious_intr = 0;
+ }
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- /* The EQ-notify may not de-assert INTx rightaway, causing
- * the ISR to be invoked again. So, return HANDLED even when
- * num_evts is zero.
+ /* Return IRQ_HANDLED only for the the first spurious intr
+ * after a valid intr to stop the kernel from branding
+ * this irq as a bad one!
*/
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- napi_schedule(&eqo->napi);
- return IRQ_HANDLED;
+ if (num_evts || eqo->spurious_intr++ == 0)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
static irqreturn_t be_msix(int irq, void *dev)
@@ -2157,7 +2194,7 @@ void be_detect_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (be_crit_error(adapter))
+ if (be_hw_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2398,13 +2435,22 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- be_async_mcc_disable(adapter);
-
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
- for_all_evt_queues(adapter, eqo, i) {
+ for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
+
+ be_async_mcc_disable(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ be_tx_compl_clean(adapter);
+
+ be_rx_qs_destroy(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
synchronize_irq(be_msix_vec_get(adapter, eqo));
else
@@ -2414,12 +2460,6 @@ static int be_close(struct net_device *netdev)
be_irq_unregister(adapter);
- /* Wait for all pending tx completions to arrive so that
- * all tx skbs are freed.
- */
- be_tx_compl_clean(adapter);
-
- be_rx_qs_destroy(adapter);
return 0;
}
@@ -2557,7 +2597,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
* These addresses are programmed in the ASIC by the PF and the VF driver
* queries for the MAC address during its probe.
*/
-static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+static int be_vf_eth_addr_config(struct be_adapter *adapter)
{
u32 vf;
int status = 0;
@@ -2586,13 +2626,34 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
return status;
}
+static int be_vfs_mac_query(struct be_adapter *adapter)
+{
+ int status, vf;
+ u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
+ bool active;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ be_cmd_get_mac_from_list(adapter, mac, &active,
+ &vf_cfg->pmac_id, 0);
+
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+ }
+ return 0;
+}
+
static void be_vf_clear(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
u32 vf;
if (be_find_vfs(adapter, ASSIGNED)) {
- dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
+ dev_warn(&adapter->pdev->dev,
+ "VFs are assigned to VMs: not disabling VFs\n");
goto done;
}
@@ -2641,21 +2702,29 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
- u32 *cap_flags, u8 domain)
+static int be_vfs_if_create(struct be_adapter *adapter)
{
- bool profile_present = false;
+ struct be_vf_cfg *vf_cfg;
+ u32 cap_flags, en_flags, vf;
int status;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, cap_flags, domain);
- if (!status)
- profile_present = true;
- }
+ cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
- if (!profile_present)
- *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (!BE3_chip(adapter))
+ be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
+
+ /* If a FW profile exists, then cap_flags are updated */
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
+ if (status)
+ goto err;
+ }
+err:
+ return status;
}
static int be_vf_setup_init(struct be_adapter *adapter)
@@ -2678,65 +2747,70 @@ static int be_vf_setup_init(struct be_adapter *adapter)
static int be_vf_setup(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
- struct device *dev = &adapter->pdev->dev;
- u32 cap_flags, en_flags, vf;
u16 def_vlan, lnk_speed;
- int status, enabled_vfs;
-
- enabled_vfs = be_find_vfs(adapter, ENABLED);
- if (enabled_vfs) {
- dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
- dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
- return 0;
- }
-
- if (num_vfs > adapter->dev_num_vfs) {
- dev_warn(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- num_vfs = adapter->dev_num_vfs;
- }
+ int status, old_vfs, vf;
+ struct device *dev = &adapter->pdev->dev;
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- if (!status) {
- adapter->num_vfs = num_vfs;
+ old_vfs = be_find_vfs(adapter, ENABLED);
+ if (old_vfs) {
+ dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+ if (old_vfs != num_vfs)
+ dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+ adapter->num_vfs = old_vfs;
} else {
- /* Platform doesn't support SRIOV though device supports it */
- dev_warn(dev, "SRIOV enable failed\n");
- return 0;
+ if (num_vfs > adapter->dev_num_vfs)
+ dev_info(dev, "Device supports %d VFs and not %d\n",
+ adapter->dev_num_vfs, num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ return 0;
+ }
}
status = be_vf_setup_init(adapter);
if (status)
goto err;
- for_all_vfs(adapter, vf_cfg, vf) {
- be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
-
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST);
-
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ if (old_vfs) {
+ for_all_vfs(adapter, vf_cfg, vf) {
+ status = be_cmd_get_if_id(adapter, vf_cfg, vf);
+ if (status)
+ goto err;
+ }
+ } else {
+ status = be_vfs_if_create(adapter);
if (status)
goto err;
}
- if (!enabled_vfs) {
+ if (old_vfs) {
+ status = be_vfs_mac_query(adapter);
+ if (status)
+ goto err;
+ } else {
status = be_vf_eth_addr_config(adapter);
if (status)
goto err;
}
for_all_vfs(adapter, vf_cfg, vf) {
- lnk_speed = 1000;
- status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
- if (status)
- goto err;
- vf_cfg->tx_rate = lnk_speed * 10;
+ /* BE3 FW, by default, caps VF TX-rate to 100mbps.
+ * Allow full available bandwidth
+ */
+ if (BE3_chip(adapter) && !old_vfs)
+ be_cmd_set_qos(adapter, 1000, vf+1);
+
+ status = be_cmd_link_status_query(adapter, &lnk_speed,
+ NULL, vf + 1);
+ if (!status)
+ vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2745,6 +2819,8 @@ static int be_vf_setup(struct be_adapter *adapter)
}
return 0;
err:
+ dev_err(dev, "VF setup failed\n");
+ be_vf_clear(adapter);
return status;
}
@@ -2798,12 +2874,12 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
static void be_get_resources(struct be_adapter *adapter)
{
- int status;
+ u16 dev_num_vfs;
+ int pos, status;
bool profile_present = false;
- if (lancer_chip(adapter)) {
+ if (!BEx_chip(adapter)) {
status = be_cmd_get_func_config(adapter);
-
if (!status)
profile_present = true;
}
@@ -2859,13 +2935,21 @@ static void be_get_resources(struct be_adapter *adapter)
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
}
+
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
+ &dev_num_vfs);
+ if (BE3_chip(adapter))
+ dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
+ adapter->dev_num_vfs = dev_num_vfs;
+ }
}
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
- int pos, status;
- u16 dev_num_vfs;
+ int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode,
@@ -2883,14 +2967,6 @@ static int be_get_config(struct be_adapter *adapter)
goto err;
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (!lancer_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
err:
return status;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8db1c06008de..5722bc61fa58 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -206,7 +206,7 @@ struct ethoc {
unsigned int num_rx;
unsigned int cur_rx;
- void** vma;
+ void **vma;
struct net_device *netdev;
struct napi_struct napi;
@@ -292,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
{
struct ethoc_bd bd;
int i;
- void* vma;
+ void *vma;
dev->cur_tx = 0;
dev->dty_tx = 0;
@@ -447,8 +447,8 @@ static int ethoc_rx(struct net_device *dev, int limit)
netif_receive_skb(skb);
} else {
if (net_ratelimit())
- dev_warn(&dev->dev, "low on memory - "
- "packet dropped\n");
+ dev_warn(&dev->dev,
+ "low on memory - packet dropped\n");
dev->stats.rx_dropped++;
break;
@@ -555,9 +555,8 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
pending = ethoc_read(priv, INT_SOURCE);
pending &= mask;
- if (unlikely(pending == 0)) {
+ if (unlikely(pending == 0))
return IRQ_NONE;
- }
ethoc_ack_irq(priv, pending);
@@ -620,7 +619,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -628,7 +627,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -643,14 +642,14 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -671,19 +670,18 @@ static int ethoc_mdio_probe(struct net_device *dev)
struct phy_device *phy;
int err;
- if (priv->phy_id != -1) {
+ if (priv->phy_id != -1)
phy = priv->mdio->phy_map[priv->phy_id];
- } else {
+ else
phy = phy_find_first(priv->mdio);
- }
if (!phy) {
dev_err(&dev->dev, "no PHY found\n");
return -ENXIO;
}
- err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
- PHY_INTERFACE_MODE_GMII);
+ err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
+ PHY_INTERFACE_MODE_GMII);
if (err) {
dev_err(&dev->dev, "could not attach to PHY\n");
return err;
@@ -771,21 +769,24 @@ static int ethoc_config(struct net_device *dev, struct ifmap *map)
return -ENOSYS;
}
-static int ethoc_set_mac_address(struct net_device *dev, void *addr)
+static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
- u8 *mac = (u8 *)addr;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+}
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+static int ethoc_set_mac_address(struct net_device *dev, void *p)
+{
+ const struct sockaddr *addr = p;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1022,7 +1023,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
priv->num_tx, priv->num_rx);
- priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
goto error;
@@ -1038,7 +1039,7 @@ static int ethoc_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
{
- const uint8_t* mac;
+ const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
"local-mac-address",
@@ -1050,25 +1051,23 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Check that the given MAC address is valid. If it isn't, read the
- * current MAC from the controller. */
+ * current MAC from the controller.
+ */
if (!is_valid_ether_addr(netdev->dev_addr))
ethoc_get_mac_address(netdev, netdev->dev_addr);
/* Check the MAC again for validity, if it still isn't choose and
- * program a random one. */
+ * program a random one.
+ */
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_random_addr(netdev->dev_addr);
random_mac = true;
}
- ret = ethoc_set_mac_address(netdev, netdev->dev_addr);
- if (ret) {
- dev_err(&netdev->dev, "failed to set MAC address\n");
- goto error;
- }
+ ethoc_do_set_mac_address(netdev);
if (random_mac)
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
/* register MII bus */
priv->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 74d749e29aab..7c361d1db94c 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -858,8 +858,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
}
phydev = phy_connect(netdev, dev_name(&phydev->dev),
- &ftgmac100_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
@@ -955,9 +954,9 @@ static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftgmac100_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b901a01e3fa5..b5ea8fbd8a76 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -820,9 +820,9 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 5ba6e1cbd346..6048dc8604ee 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -26,6 +26,7 @@ config FEC
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -92,13 +93,4 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
-config FEC_PTP
- bool "PTP Hardware Clock (PHC)"
- depends on FEC && ARCH_MXC
- select PTP_1588_CLOCK
- default y if SOC_IMX6Q
- --help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index d4d19b3d00ae..b7d58fe6f531 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,8 +2,7 @@
# Makefile for the Freescale network device drivers.
#
-obj-$(CONFIG_FEC) += fec.o
-obj-$(CONFIG_FEC_PTP) += fec_ptp.o
+obj-$(CONFIG_FEC) += fec.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 0704bcab178a..29d82cf1528e 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -67,6 +67,15 @@
#endif
#define DRIVER_NAME "fec"
+#define FEC_NAPI_WEIGHT 64
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE (1 << 5)
+#define FEC_ENET_RSEM_V 0x84
+#define FEC_ENET_RSFL_V 16
+#define FEC_ENET_RAEM_V 0x8
+#define FEC_ENET_RAFL_V 0x8
+#define FEC_ENET_OPD_V 0xFFF0
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
@@ -76,6 +85,8 @@
#define FEC_QUIRK_USE_GASKET (1 << 2)
/* Controller has GBIT support */
#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
static struct platform_device_id fec_devtype[] = {
{
@@ -93,7 +104,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX,
}, {
/* sentinel */
}
@@ -140,7 +152,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
@@ -157,6 +169,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
@@ -190,8 +203,29 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+#define FEC_PAUSE_FLAG_AUTONEG 0x1
+#define FEC_PAUSE_FLAG_ENABLE 0x2
+
static int mii_cnt;
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex + 1);
+ else
+ return bdp + 1;
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex - 1);
+ else
+ return bdp - 1;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -248,7 +282,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
- index = bdp - fep->tx_bd_base;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -280,17 +318,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_bdu = 0;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
fep->hwts_tx_en)) {
- bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
+ } else {
- bdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
+ }
}
-#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -298,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
@@ -359,8 +399,12 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
+ if (fep->bufdesc_ex)
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ else
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
@@ -439,6 +483,25 @@ fec_restart(struct net_device *ndev, int duplex)
}
#endif
}
+
+ /* enable pause frame*/
+ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+ ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+ fep->phy_dev && fep->phy_dev->pause)) {
+ rcntl |= FEC_ENET_FCE;
+
+ /* set FIFO thresh hold parameter to reduce overrun */
+ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+ writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+ writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+ writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+ /* OPD */
+ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+ } else {
+ rcntl &= ~FEC_ENET_FCE;
+ }
+
writel(rcntl, fep->hwp + FEC_R_CNTRL);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
@@ -448,17 +511,16 @@ fec_restart(struct net_device *ndev, int duplex)
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
-#ifdef CONFIG_FEC_PTP
- ecntl |= (1 << 4);
-#endif
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-#ifdef CONFIG_FEC_PTP
- fec_ptp_start_cyclecounter(ndev);
-#endif
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
@@ -544,19 +606,20 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_packets++;
}
-#ifdef CONFIG_FEC_PTP
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps.hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
+
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
@@ -575,7 +638,7 @@ fec_enet_tx(struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -595,8 +658,8 @@ fec_enet_tx(struct net_device *ndev)
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static void
-fec_enet_rx(struct net_device *ndev)
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
@@ -606,13 +669,12 @@ fec_enet_rx(struct net_device *ndev)
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+ int pkt_received = 0;
#ifdef CONFIG_M532x
flush_cache_all();
#endif
- spin_lock(&fep->hw_lock);
-
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
@@ -620,6 +682,10 @@ fec_enet_rx(struct net_device *ndev)
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
@@ -683,23 +749,25 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
-#ifdef CONFIG_FEC_PTP
+
/* Get receive timestamp from the skb */
- if (fep->hwts_rx_en) {
+ if (fep->hwts_rx_en && fep->bufdesc_ex) {
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
unsigned long flags;
+ struct bufdesc_ex *ebdp =
+ (struct bufdesc_ex *)bdp;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps->hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-#endif
+
if (!skb_defer_rx_timestamp(skb))
- netif_rx(skb);
+ napi_gro_receive(&fep->napi, skb);
}
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -712,17 +780,19 @@ rx_processing_done:
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
- bdp->cbd_prot = 0;
- bdp->cbd_bdu = 0;
-#endif
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -731,7 +801,7 @@ rx_processing_done:
}
fep->cur_rx = bdp;
- spin_unlock(&fep->hw_lock);
+ return pkt_received;
}
static irqreturn_t
@@ -748,7 +818,13 @@ fec_enet_interrupt(int irq, void *dev_id)
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
- fec_enet_rx(ndev);
+
+ /* Disable the RX interrupt */
+ if (napi_schedule_prep(&fep->napi)) {
+ writel(FEC_RX_DISABLED_IMASK,
+ fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
}
/* Transmit OK, or non-fatal error. Update the buffer
@@ -769,10 +845,21 @@ fec_enet_interrupt(int irq, void *dev_id)
return ret;
}
+static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ int pkts = fec_enet_rx(ndev, budget);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ if (pkts < budget) {
+ napi_complete(napi);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+ return pkts;
+}
/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *ndev)
+static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -973,7 +1060,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
@@ -981,8 +1068,10 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->supported |= SUPPORTED_Pause;
+ }
else
phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1133,17 +1222,95 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strcpy(info->driver, fep->pdev->dev.driver->name);
- strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&ndev->dev));
+ strlcpy(info->driver, fep->pdev->dev.driver->name,
+ sizeof(info->driver));
+ strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->bufdesc_ex) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (fep->ptp_clock)
+ info->phc_index = ptp_clock_index(fep->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+ } else {
+ return ethtool_op_get_ts_info(ndev, info);
+ }
+}
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+ pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+ pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+ return -EINVAL;
+ }
+
+ fep->pause_flag = 0;
+
+ /* tx pause must be same as rx pause */
+ fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+ fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+ if (pause->rx_pause || pause->autoneg) {
+ fep->phy_dev->supported |= ADVERTISED_Pause;
+ fep->phy_dev->advertising |= ADVERTISED_Pause;
+ } else {
+ fep->phy_dev->supported &= ~ADVERTISED_Pause;
+ fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ }
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+ if (netif_running(ndev))
+ fec_restart(ndev, 0);
+
+ return 0;
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_pauseparam = fec_enet_get_pauseparam,
+ .set_pauseparam = fec_enet_set_pauseparam,
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = fec_enet_get_ts_info,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -1157,10 +1324,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
-#ifdef CONFIG_FEC_PTP
- if (cmd == SIOCSHWTSTAMP)
+ if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
return fec_ptp_ioctl(ndev, rq, cmd);
-#endif
+
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1180,7 +1346,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
bdp = fep->tx_bd_base;
@@ -1207,14 +1373,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
@@ -1224,14 +1393,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1243,6 +1414,8 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ napi_enable(&fep->napi);
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1444,24 +1617,31 @@ static int fec_enet_init(struct net_device *ndev)
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ if (fep->bufdesc_ex)
+ fep->tx_bd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ else
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->netdev_ops = &fec_netdev_ops;
ndev->ethtool_ops = &fec_enet_ethtool_ops;
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
@@ -1471,11 +1651,11 @@ static int fec_enet_init(struct net_device *ndev)
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fec_restart(ndev, 0);
@@ -1509,22 +1689,25 @@ static void fec_reset_phy(struct platform_device *pdev)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ if (!gpio_is_valid(phy_reset))
+ return;
+
err = devm_gpio_request_one(&pdev->dev, phy_reset,
GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
- pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+ dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
-static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+static int fec_get_phy_mode_dt(struct platform_device *pdev)
{
return -ENODEV;
}
-static inline void fec_reset_phy(struct platform_device *pdev)
+static void fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
@@ -1570,10 +1753,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ /* default enable pause frame auto negotiation */
+ if (pdev->id_entry &&
+ (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
fep->dev_id = dev_id++;
+ fep->bufdesc_ex = 0;
+
if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
@@ -1628,19 +1818,19 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
-#ifdef CONFIG_FEC_PTP
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ fep->bufdesc_ex =
+ pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
ret = PTR_ERR(fep->clk_ptp);
- goto failed_clk;
+ fep->bufdesc_ex = 0;
}
-#endif
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_prepare_enable(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_prepare_enable(fep->clk_ptp);
+
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
@@ -1653,6 +1843,9 @@ fec_probe(struct platform_device *pdev)
fec_reset_phy(pdev);
+ if (fep->bufdesc_ex)
+ fec_ptp_init(ndev, pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1668,10 +1861,6 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
-#ifdef CONFIG_FEC_PTP
- fec_ptp_init(ndev, pdev);
-#endif
-
return 0;
failed_register:
@@ -1681,9 +1870,8 @@ failed_init:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_disable_unprepare(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1716,12 +1904,10 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
-#ifdef CONFIG_FEC_PTP
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
-#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c5a3bc1475c7..01579b8e37c4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,11 +13,9 @@
#define FEC_H
/****************************************************************************/
-#ifdef CONFIG_FEC_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -50,6 +48,10 @@
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -94,14 +96,17 @@ struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
-#ifdef CONFIG_FEC_PTP
+};
+
+struct bufdesc_ex {
+ struct bufdesc desc;
unsigned long cbd_esc;
unsigned long cbd_prot;
unsigned long cbd_bdu;
unsigned long ts;
unsigned short res0[4];
-#endif
};
+
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
@@ -203,9 +208,7 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
-#ifdef CONFIG_FEC_PTP
struct clk *clk_ptp;
-#endif
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -243,8 +246,11 @@ struct fec_enet_private {
int full_duplex;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
+ int bufdesc_ex;
+ int pause_flag;
+
+ struct napi_struct napi;
-#ifdef CONFIG_FEC_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -257,15 +263,12 @@ struct fec_enet_private {
int hwts_rx_en;
int hwts_tx_en;
struct timer_list time_keep;
-#endif
};
-#ifdef CONFIG_FEC_PTP
void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
-#endif
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 817d081d2cd8..77943a6a1b8c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
@@ -40,8 +41,8 @@
#include <asm/delay.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/fec.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/fec.h>
#include "fec_mpc52xx.h"
@@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
-static u8 mpc52xx_fec_mac_addr[6];
-module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
-MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
-
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
static int debug = -1; /* the above default */
@@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
-static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct mpc52xx_fec __iomem *fec = priv->fec;
-
- *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
-}
-
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
@@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
struct resource mem;
const u32 *prop;
int prop_size;
+ struct device_node *np = op->dev.of_node;
+ const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+ rv = of_address_to_resource(np, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ ndev->irq = irq_of_parse_and_map(np, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* TX */
priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
- /* MAC address init */
- if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
- memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
- else
- mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr) {
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ } else {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
@@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+ prop = of_get_property(np, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
+ printk(KERN_INFO "%s: %s MAC %pM\n",
+ ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c40526c78c20..1f17ca0f2201 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
unsigned long flags;
int inc;
- inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+ inc = 1000000000 / fep->cycle_speed;
/* grab the ptp lock */
spin_lock_irqsave(&fep->tmreg_lock, flags);
@@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
fep->ptp_caps.settime = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+
spin_lock_init(&fep->tmreg_lock);
fec_ptp_start_cyclecounter(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e9879c5af7ba..46df28893c10 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -888,8 +888,8 @@ static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bffb2edd6858..4b5e8a692481 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
@@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
dma_addr_t addr;
int i, j, k;
struct gfar_private *priv = netdev_priv(ndev);
- struct device *dev = &priv->ofdev->dev;
+ struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size,
- GFP_KERNEL);
- if (!tx_queue->tx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate tx_skbuff\n");
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
goto cleanup;
- }
for (k = 0; k < tx_queue->tx_ring_size; k++)
tx_queue->tx_skbuff[k] = NULL;
@@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size,
- GFP_KERNEL);
-
- if (!rx_queue->rx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate rx_skbuff\n");
+ rx_queue->rx_skbuff =
+ kmalloc_array(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_skbuff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_skbuff)
goto cleanup;
- }
for (j = 0; j < rx_queue->rx_ring_size; j++)
rx_queue->rx_skbuff[j] = NULL;
@@ -349,14 +344,23 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
gfar_write(&regs->rir0, DEFAULT_RIR0);
}
- if (ndev->features & NETIF_F_RXCSUM)
+ /* Restore PROMISC mode */
+ if (ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+ if (ndev->features & NETIF_F_RXCSUM) {
rctrl |= RCTRL_CHECKSUMMING;
+ priv->uses_rxfcb = 1;
+ }
if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;
@@ -378,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev)
}
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en) {
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+ priv->uses_rxfcb = 1;
+ }
- if (ndev->features & NETIF_F_HW_VLAN_RX)
+ if (ndev->features & NETIF_F_HW_VLAN_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 1;
+ }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -501,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
-static bool gfar_is_vlan_on(struct gfar_private *priv)
-{
- return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
- (priv->ndev->features & NETIF_F_HW_VLAN_TX);
-}
-
-/* Returns 1 if incoming frames use an FCB */
-static inline int gfar_uses_fcb(struct gfar_private *priv)
-{
- return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
-}
-
static void free_tx_pointers(struct gfar_private *priv)
{
int i;
@@ -540,6 +534,19 @@ static void unmap_group_regs(struct gfar_private *priv)
iounmap(priv->gfargrp[i].regs);
}
+static void free_gfar_dev(struct gfar_private *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->num_grps; i++)
+ for (j = 0; j < GFAR_NUM_IRQS; j++) {
+ kfree(priv->gfargrp[i].irqinfo[j]);
+ priv->gfargrp[i].irqinfo[j] = NULL;
+ }
+
+ free_netdev(priv->ndev);
+}
+
static void disable_napi(struct gfar_private *priv)
{
int i;
@@ -559,40 +566,46 @@ static void enable_napi(struct gfar_private *priv)
static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
u32 *queue_mask;
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
+ return -ENOMEM;
+ }
- priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
- if (!priv->gfargrp[priv->num_grps].regs)
+ grp->regs = of_iomap(np, 0);
+ if (!grp->regs)
return -ENOMEM;
- priv->gfargrp[priv->num_grps].interruptTransmit =
- irq_of_parse_and_map(np, 0);
+ gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
/* If we aren't the FEC we have multiple interrupts */
if (model && strcasecmp(model, "FEC")) {
- priv->gfargrp[priv->num_grps].interruptReceive =
- irq_of_parse_and_map(np, 1);
- priv->gfargrp[priv->num_grps].interruptError =
- irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
+ gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
+ gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
+ if (gfar_irq(grp, TX)->irq == NO_IRQ ||
+ gfar_irq(grp, RX)->irq == NO_IRQ ||
+ gfar_irq(grp, ER)->irq == NO_IRQ)
return -EINVAL;
}
- priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
- priv->gfargrp[priv->num_grps].priv = priv;
- spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+ grp->grp_id = priv->num_grps;
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ grp->rx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ grp->tx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
- priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
- priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
}
priv->num_grps++;
@@ -645,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->dev.of_node;
priv->ndev = dev;
priv->num_tx_queues = num_tx_qs;
@@ -777,7 +789,7 @@ tx_alloc_failed:
free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -983,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->dev.of_node;
+ priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -1020,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) regs;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->mtu = 1500;
@@ -1182,15 +1192,16 @@ static int gfar_probe(struct platform_device *ofdev)
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
- sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
- sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
+ sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
- strcpy(priv->gfargrp[i].int_name_tx, dev->name);
+ strcpy(gfar_irq(grp, TX)->name, dev->name);
}
/* Initialize the filer table */
@@ -1223,7 +1234,7 @@ register_fail:
of_node_put(priv->phy_node);
if (priv->tbi_node)
of_node_put(priv->tbi_node);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -1240,7 +1251,7 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
- free_netdev(priv->ndev);
+ free_gfar_dev(priv);
return 0;
}
@@ -1648,9 +1659,9 @@ void gfar_halt(struct net_device *dev)
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- free_irq(grp->interruptError, grp);
- free_irq(grp->interruptTransmit, grp);
- free_irq(grp->interruptReceive, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
void stop_gfar(struct net_device *dev)
@@ -1679,7 +1690,7 @@ void stop_gfar(struct net_device *dev)
free_grp_irqs(&priv->gfargrp[i]);
} else {
for (i = 0; i < priv->num_grps; i++)
- free_irq(priv->gfargrp[i].interruptTransmit,
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
&priv->gfargrp[i]);
}
@@ -1698,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i])
continue;
- dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_single(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
- dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_page(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
@@ -1725,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
+ dma_unmap_single(priv->dev, rxbdp->bufPtr,
+ priv->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
@@ -1765,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv)
free_skb_rx_queue(rx_queue);
}
- dma_free_coherent(&priv->ofdev->dev,
+ dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
@@ -1854,32 +1865,34 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- if ((err = request_irq(grp->interruptError, gfar_error,
- 0, grp->int_name_er, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ gfar_irq(grp, ER)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptError);
+ gfar_irq(grp, ER)->irq);
goto err_irq_fail;
}
-
- if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
-
- if ((err = request_irq(grp->interruptReceive, gfar_receive,
- 0, grp->int_name_rx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ gfar_irq(grp, RX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptReceive);
+ gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
}
@@ -1887,9 +1900,9 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
return 0;
rx_irq_fail:
- free_irq(grp->interruptTransmit, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
tx_irq_fail:
- free_irq(grp->interruptError, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
err_irq_fail:
return err;
@@ -2143,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
length,
@@ -2195,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_TOE);
}
- txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+ txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
/* If time stamping is requested one additional TxBD must be set up. The
@@ -2308,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
tempval = gfar_read(&regs->rctrl);
/* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
+ if (tempval & RCTRL_REQ_PARSER) {
tempval |= RCTRL_PRSDEP_INIT;
- else
+ priv->uses_rxfcb = 1;
+ } else {
tempval &= ~RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 0;
+ }
gfar_write(&regs->rctrl, tempval);
}
@@ -2344,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, tempval);
+ priv->uses_rxfcb = 1;
} else {
/* Disable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
@@ -2367,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (gfar_is_vlan_on(priv))
- frame_size += VLAN_HLEN;
-
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
- if (gfar_uses_fcb(priv))
+ if (priv->uses_rxfcb)
frame_size += GMAC_FCB_LEN;
frame_size += priv->padding;
@@ -2508,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
} else
buflen = bdp->length;
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2527,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_page(priv->dev, bdp->bufPtr,
bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2593,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct gfar_private *priv = netdev_priv(dev);
dma_addr_t buf;
- buf = dma_map_single(&priv->ofdev->dev, skb->data,
+ buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
gfar_init_rxbdp(rx_queue, bdp, buf);
}
@@ -2627,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
- estats->rx_trunc++;
+ atomic64_inc(&estats->rx_trunc);
return;
}
@@ -2636,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
stats->rx_length_errors++;
if (status & RXBD_LARGE)
- estats->rx_large++;
+ atomic64_inc(&estats->rx_large);
else
- estats->rx_short++;
+ atomic64_inc(&estats->rx_short);
}
if (status & RXBD_NONOCTET) {
stats->rx_frame_errors++;
- estats->rx_nonoctet++;
+ atomic64_inc(&estats->rx_nonoctet);
}
if (status & RXBD_CRCERR) {
- estats->rx_crcerr++;
+ atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
if (status & RXBD_OVERRUN) {
- estats->rx_overrun++;
+ atomic64_inc(&estats->rx_overrun);
stats->rx_crc_errors++;
}
}
@@ -2674,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi)
{
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
@@ -2722,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Send the packet up the stack */
ret = napi_gro_receive(napi, skb);
- if (GRO_DROP == ret)
- priv->extra_stats.kernel_dropped++;
-
- return 0;
+ if (unlikely(GRO_DROP == ret))
+ atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2746,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2758,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
@@ -2791,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
- priv->extra_stats.rx_skbmissing++;
+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
}
}
@@ -3224,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
- priv->extra_stats.tx_underrun++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
local_irq_save(flags);
lock_tx_qs(priv);
@@ -3239,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_bsy++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
gfar_receive(irq, grp_id);
@@ -3248,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_babr++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
- priv->extra_stats.eberr++;
+ atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
- priv->extra_stats.tx_babt++;
+ atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 22eabc13ca99..63a28d294e20 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -627,36 +627,29 @@ struct rmon_mib
};
struct gfar_extra_stats {
- u64 kernel_dropped;
- u64 rx_large;
- u64 rx_short;
- u64 rx_nonoctet;
- u64 rx_crcerr;
- u64 rx_overrun;
- u64 rx_bsy;
- u64 rx_babr;
- u64 rx_trunc;
- u64 eberr;
- u64 tx_babt;
- u64 tx_underrun;
- u64 rx_skbmissing;
- u64 tx_timeout;
+ atomic64_t kernel_dropped;
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t rx_skbmissing;
+ atomic64_t tx_timeout;
};
#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
-#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
-/* Number of stats in the stats structure (ignore car and cam regs)*/
+/* Number of stats exported via ethtool */
#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
-#define GFAR_INFOSTR_LEN 32
-
-struct gfar_stats {
- u64 extra[GFAR_EXTRA_STATS_LEN];
- u64 rmon[GFAR_RMON_LEN];
-};
-
-
struct gfar {
u32 tsec_id; /* 0x.000 - Controller ID register */
u32 tsec_id2; /* 0x.004 - Controller ID2 register */
@@ -937,26 +930,25 @@ struct tx_q_stats {
* @txtime: coalescing value if based on time
*/
struct gfar_priv_tx_q {
+ /* cacheline 1 */
spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** tx_skbuff;
- /* Buffer descriptor pointers */
- dma_addr_t tx_bd_dma_base;
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
- struct txbd8 *dirty_tx;
+ unsigned int num_txbdfree;
+ unsigned short skb_curtx;
+ unsigned short tx_ring_size;
struct tx_q_stats stats;
- struct net_device *dev;
struct gfar_priv_grp *grp;
- u16 skb_curtx;
- u16 skb_dirtytx;
- u16 qindex;
- unsigned int tx_ring_size;
- unsigned int num_txbdfree;
+ /* cacheline 2 */
+ struct net_device *dev;
+ struct sk_buff **tx_skbuff;
+ struct txbd8 *dirty_tx;
+ unsigned short skb_dirtytx;
+ unsigned short qindex;
/* Configuration info for the coalescing features */
- unsigned char txcoalescing;
+ unsigned int txcoalescing;
unsigned long txic;
- unsigned short txcount;
- unsigned short txtime;
+ dma_addr_t tx_bd_dma_base;
};
/*
@@ -999,18 +991,25 @@ struct gfar_priv_rx_q {
unsigned long rxic;
};
+enum gfar_irqinfo_id {
+ GFAR_TX = 0,
+ GFAR_RX = 1,
+ GFAR_ER = 2,
+ GFAR_NUM_IRQS = 3
+};
+
+struct gfar_irqinfo {
+ unsigned int irq;
+ char name[GFAR_INT_NAME_MAX];
+};
+
/**
* struct gfar_priv_grp - per group structure
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
* @grp_id: group id for this group
- * @interruptTransmit: The TX interrupt number for this group
- * @interruptReceive: The RX interrupt number for this group
- * @interruptError: The ERROR interrupt number for this group
- * @int_name_tx: tx interrupt name for this group
- * @int_name_rx: rx interrupt name for this group
- * @int_name_er: er interrupt name for this group
+ * @irqinfo: TX/RX/ER irq data for this group
*/
struct gfar_priv_grp {
@@ -1019,23 +1018,20 @@ struct gfar_priv_grp {
struct gfar_private *priv;
struct gfar __iomem *regs;
unsigned int grp_id;
- unsigned long rx_bit_map;
- unsigned long tx_bit_map;
- unsigned long num_tx_queues;
unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
+ /* cacheline 3 */
unsigned int rstat;
unsigned int tstat;
- unsigned int imask;
- unsigned int ievent;
- unsigned int interruptTransmit;
- unsigned int interruptReceive;
- unsigned int interruptError;
-
- char int_name_tx[GFAR_INT_NAME_MAX];
- char int_name_rx[GFAR_INT_NAME_MAX];
- char int_name_er[GFAR_INT_NAME_MAX];
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
};
+#define gfar_irq(grp, ID) \
+ ((grp)->irqinfo[GFAR_##ID])
+
enum gfar_errata {
GFAR_ERRATA_74 = 0x01,
GFAR_ERRATA_76 = 0x02,
@@ -1053,28 +1049,65 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
-
- /* Indicates how many tx, rx queues are enabled */
- unsigned int num_tx_queues;
unsigned int num_rx_queues;
- unsigned int num_grps;
- unsigned int mode;
- /* The total tx and rx ring size for the enabled queues */
- unsigned int total_tx_ring_size;
- unsigned int total_rx_ring_size;
-
- struct device_node *node;
+ struct device *dev;
struct net_device *ndev;
- struct platform_device *ofdev;
enum gfar_errata errata;
+ unsigned int rx_buffer_size;
+
+ u16 uses_rxfcb;
+ u16 padding;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
- struct gfar_priv_grp gfargrp[MAXGROUPS];
struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ u32 device_flags;
+
+ unsigned int mode;
+ unsigned int num_tx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Wake-on-LAN enabled */
+ wol_en:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
/* RX per device parameters */
- unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1093,39 +1126,6 @@ struct gfar_private {
unsigned int fifo_starve;
unsigned int fifo_starve_off;
- /* Bitfield update lock */
- spinlock_t bflock;
-
- phy_interface_t interface;
- struct device_node *phy_node;
- struct device_node *tbi_node;
- u32 device_flags;
- unsigned char
- extended_hash:1,
- bd_stash_en:1,
- rx_filer_enable:1,
- wol_en:1, /* Wake-on-LAN enabled */
- prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
- unsigned short padding;
-
- /* PHY stuff */
- struct phy_device *phydev;
- struct mii_bus *mii_bus;
- int oldspeed;
- int oldduplex;
- int oldlink;
-
- uint32_t msg_enable;
-
- struct work_struct reset_task;
-
- /* Network Statistics */
- struct gfar_extra_stats extra_stats;
-
- /* HW time stamping enabled flag */
- int hwts_rx_en;
- int hwts_tx_en;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1138,16 +1138,16 @@ static inline int gfar_has_errata(struct gfar_private *priv,
return priv->errata & err;
}
-static inline u32 gfar_read(volatile unsigned __iomem *addr)
+static inline u32 gfar_read(unsigned __iomem *addr)
{
u32 val;
- val = in_be32(addr);
+ val = ioread32be(addr);
return val;
}
-static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
+static inline void gfar_write(unsigned __iomem *addr, u32 val)
{
- out_be32(addr, val);
+ iowrite32be(val, addr);
}
static inline void gfar_write_filer(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ab6762caa957..75e89acf4912 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
int i;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u64 *extra = (u64 *) & priv->extra_stats;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
- struct gfar_stats *stats = (struct gfar_stats *) buf;
-
- for (i = 0; i < GFAR_RMON_LEN; i++)
- stats->rmon[i] = (u64) gfar_read(&rmon[i]);
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- stats->extra[i] = extra[i];
- } else
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- buf[i] = extra[i];
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
}
static int gfar_sset_count(struct net_device *dev, int sset)
@@ -184,10 +181,11 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
- strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, gfar_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
@@ -715,12 +713,11 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
int j = MAX_FILER_IDX, l = 0x0;
int ret = 1;
- local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
- local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
- pr_err("Out of memory\n");
ret = 0;
goto err;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 37b035306013..1ebf7128ec04 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -350,10 +350,10 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
drvinfo->eedump_len = 0;
drvinfo->regdump_len = uec_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index dffee9d44fd5..6231bc02b964 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL)
+ depends on ISA || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -17,18 +17,6 @@ config NET_VENDOR_FUJITSU
if NET_VENDOR_FUJITSU
-config AT1700
- tristate "AT1700/1720 support (EXPERIMENTAL)"
- depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called at1700.
-
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
depends on PCMCIA
@@ -40,15 +28,4 @@ config PCMCIA_FMVJ18X
To compile this driver as a module, choose M here: the module will be
called fmvj18x_cs. If unsure, say N.
-config ETH16I
- tristate "ICL EtherTeam 16i/32 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eth16i.
-
endif # NET_VENDOR_FUJITSU
diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile
index 2730ae67d3aa..21561fdcc69f 100644
--- a/drivers/net/ethernet/fujitsu/Makefile
+++ b/drivers/net/ethernet/fujitsu/Makefile
@@ -2,6 +2,4 @@
# Makefile for the Fujitsu network device drivers.
#
-obj-$(CONFIG_AT1700) += at1700.o
-obj-$(CONFIG_ETH16I) += eth16i.o
obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
deleted file mode 100644
index 4b80dc4531ad..000000000000
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ /dev/null
@@ -1,791 +0,0 @@
-/* at1700.c: A network device driver for the Allied Telesis AT1700.
-
- Written 1993-98 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a device driver for the Allied Telesis AT1700, and
- Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are
- straight-forward Fujitsu MB86965 implementations.
-
- Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya
- (tamy@flab.fujitsu.co.jp).
-
- Sources:
- The Fujitsu MB86965 datasheet.
-
- After the initial version of this driver was written Gerry Sawkins of
- ATI provided their EEPROM configuration code header file.
- Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
-
- MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu>
-
- Bugs:
- The MB86965 has a design flaw that makes all probes unreliable. Not
- only is it difficult to detect, it also moves around in I/O space in
- response to inb()s from other device probes!
-*/
-
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-static char version[] __initdata =
- "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#define DRV_NAME "at1700"
-
-/* Tunable parameters. */
-
-/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
-#define MC_FILTERBREAK 64
-
-/* These unusual address orders are used to verify the CONFIG register. */
-
-static int fmv18x_probe_list[] __initdata = {
- 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
-};
-
-/*
- * ISA
- */
-
-static unsigned at1700_probe_list[] __initdata = {
- 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-typedef unsigned char uchar;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- spinlock_t lock;
- unsigned char mc_filter[8];
- uint jumpered:1; /* Set iff the board has jumper config. */
- uint tx_started:1; /* Packets are on the Tx queue. */
- uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
- uint rx_started:1; /* Packets are Rxing. */
- uchar tx_queue; /* Number of packet on the Tx queue. */
- ushort tx_queue_len; /* Current length of the Tx queue. */
-};
-
-
-/* Offsets from the base address. */
-#define STATUS 0
-#define TX_STATUS 0
-#define RX_STATUS 1
-#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
-#define RX_INTR 3
-#define TX_MODE 4
-#define RX_MODE 5
-#define CONFIG_0 6 /* Misc. configuration settings. */
-#define CONFIG_1 7
-/* Run-time register bank 2 definitions. */
-#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
-#define TX_START 10
-#define COL16CNTL 11 /* Control Reg for 16 collisions */
-#define MODE13 13
-#define RX_CTRL 14
-/* Configuration registers only on the '865A/B chips. */
-#define EEPROM_Ctrl 16
-#define EEPROM_Data 17
-#define CARDSTATUS 16 /* FMV-18x Card Status */
-#define CARDSTATUS1 17 /* FMV-18x Card Status */
-#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
-#define IOCONFIG1 19
-#define SAPROM 20 /* The station address PROM, if no EEPROM. */
-#define MODE24 24
-#define RESET 31 /* Write to reset some parts of the chip. */
-#define AT1700_IO_EXTENT 32
-#define PORT_OFFSET(o) (o)
-
-
-#define TX_TIMEOUT (HZ/10)
-
-
-/* Index to functions, as function prototypes. */
-
-static int at1700_probe1(struct net_device *dev, int ioaddr);
-static int read_eeprom(long ioaddr, int location);
-static int net_open(struct net_device *dev);
-static netdev_tx_t net_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void net_tx_timeout (struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-
-static int io = 0x260;
-
-static int irq;
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, NULL);
- release_region(dev->base_addr, AT1700_IO_EXTENT);
-}
-
-struct net_device * __init at1700_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- } else {
- dev->base_addr = io;
- dev->irq = irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = at1700_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = at1700_probe_list; *port; port++) {
- if (at1700_probe1(dev, *port) == 0)
- break;
- dev->irq = irq;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops at1700_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_tx_timeout = net_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
- "signature", the default bit pattern after a reset. This *doesn't* work --
- there is no way to reset the bus interface without a complete power-cycle!
-
- It turns out that ATI came to the same conclusion I did: the only thing
- that can be done is checking a few bits and then diving right into an
- EEPROM read. */
-
-static int __init at1700_probe1(struct net_device *dev, int ioaddr)
-{
- static const char fmv_irqmap[4] = {3, 7, 10, 15};
- static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
- static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
- unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
- int ret = -ENODEV;
- struct net_local *lp = netdev_priv(dev);
-
- if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Resetting the chip doesn't reset the ISA interface, so don't bother.
- That means we have to be careful with the register values we probe
- for.
- */
-#ifdef notdef
- printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
- ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
- read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
-#endif
- /* We must check for the EEPROM-config boards first, else accessing
- IOCONFIG0 will move the board! */
- if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
- read_eeprom(ioaddr, 4) == 0x0000 &&
- (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
- is_at1700 = 1;
- else if (inb(ioaddr + SAPROM ) == 0x00 &&
- inb(ioaddr + SAPROM + 1) == 0x00 &&
- inb(ioaddr + SAPROM + 2) == 0x0e)
- is_fmv18x = 1;
- else {
- goto err_out;
- }
-
- /* Reset the internal state machines. */
- outb(0, ioaddr + RESET);
-
- if (is_at1700) {
- irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
- | (read_eeprom(ioaddr, 0)>>14)];
- } else {
- /* Check PnP mode for FMV-183/184/183A/184A. */
- /* This PnP routine is very poor. IO and IRQ should be known. */
- if (inb(ioaddr + CARDSTATUS1) & 0x20) {
- irq = dev->irq;
- for (i = 0; i < 8; i++) {
- if (irq == fmv_irqmap_pnp[i])
- break;
- }
- if (i == 8) {
- goto err_out;
- }
- } else {
- if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
- goto err_out;
- irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
- }
- }
-
- printk("%s: %s found at %#3x, IRQ %d, address ", dev->name,
- is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (is_at1700) {
- for(i = 0; i < 3; i++) {
- unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
- }
- } else {
- for(i = 0; i < 6; i++) {
- unsigned char val = inb(ioaddr + SAPROM + i);
- dev->dev_addr[i] = val;
- }
- }
- printk("%pM", dev->dev_addr);
-
- /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
- rather than 150 ohm shielded twisted pair compensation.
- 0x0000 == auto-sense the interface
- 0x0800 == use TP interface
- 0x1800 == use coax interface
- */
- {
- const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
- if (is_at1700) {
- ushort setup_value = read_eeprom(ioaddr, 12);
- dev->if_port = setup_value >> 8;
- } else {
- ushort setup_value = inb(ioaddr + CARDSTATUS);
- switch (setup_value & 0x07) {
- case 0x01: /* 10base5 */
- case 0x02: /* 10base2 */
- dev->if_port = 0x18; break;
- case 0x04: /* 10baseT */
- dev->if_port = 0x08; break;
- default: /* auto-sense */
- dev->if_port = 0x00; break;
- }
- }
- printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
- }
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, two 4K Tx queues, and disabled Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* Set the station address in bank zero. */
- outb(0x00, ioaddr + CONFIG_1);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i));
-
- /* Switch to bank 1 and set the multicast table to accept none. */
- outb(0x04, ioaddr + CONFIG_1);
- for (i = 0; i < 8; i++)
- outb(0x00, ioaddr + PORT_OFFSET(8 + i));
-
-
- /* Switch to bank 2 */
- /* Lock our I/O address, and set manual processing mode for 16 collisions. */
- outb(0x08, ioaddr + CONFIG_1);
- outb(dev->if_port, ioaddr + MODE13);
- outb(0x00, ioaddr + COL16CNTL);
-
- if (net_debug)
- printk(version);
-
- dev->netdev_ops = &at1700_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- spin_lock_init(&lp->lock);
-
- lp->jumpered = is_fmv18x;
- /* Snarf the interrupt vector now. */
- ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
- if (ret) {
- printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
- "conflict on IRQ %d.\n",
- ioaddr, irq);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- release_region(ioaddr, AT1700_IO_EXTENT);
- return ret;
-}
-
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
-#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
-#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
-#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD (5 << 6)
-#define EE_READ_CMD (6 << 6)
-#define EE_ERASE_CMD (7 << 6)
-
-static int __init read_eeprom(long ioaddr, int location)
-{
- int i;
- unsigned short retval = 0;
- long ee_addr = ioaddr + EEPROM_Ctrl;
- long ee_daddr = ioaddr + EEPROM_Data;
- int read_cmd = location | EE_READ_CMD;
-
- /* Shift the read command bits out. */
- for (i = 9; i >= 0; i--) {
- short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- outb(EE_CS, ee_addr);
- outb(dataval, ee_daddr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
- }
- outb(EE_DATA_WRITE, ee_daddr);
- for (i = 16; i > 0; i--) {
- outb(EE_CS, ee_addr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr);
- retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
- }
-
- /* Terminate the EEPROM access. */
- outb(EE_CS, ee_addr);
- outb(EE_SHIFT_CLK, ee_addr);
- outb(0, ee_addr);
- return retval;
-}
-
-
-
-static int net_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, and two 4K Tx queues. */
- outb(0x5a, ioaddr + CONFIG_0);
-
- /* Powerup, switch to register bank 2, and enable the Rx and Tx. */
- outb(0xe8, ioaddr + CONFIG_1);
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on hardware Tx and Rx interrupts. */
- outb(0x82, ioaddr + TX_INTR);
- outb(0x81, ioaddr + RX_INTR);
-
- /* Enable the IRQ on boards of fmv18x it is feasible. */
- if (lp->jumpered) {
- outb(0x80, ioaddr + IOCONFIG1);
- }
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void net_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- printk ("%s: transmit timed out with status %04x, %s?\n", dev->name,
- inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80
- ? "IRQ conflict" : "network cable problem");
- printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
- dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE),
- inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START),
- inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL));
- dev->stats.tx_errors++;
- /* ToDo: We should try to restart the adaptor... */
- outw(0xffff, ioaddr + MODE24);
- outw (0xffff, ioaddr + TX_STATUS);
- outb (0x5a, ioaddr + CONFIG_0);
- outb (0xe8, ioaddr + CONFIG_1);
- outw (0x8182, ioaddr + TX_INTR);
- outb (0x00, ioaddr + TX_START);
- outb (0x03, ioaddr + COL16CNTL);
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- netif_wake_queue(dev);
-}
-
-
-static netdev_tx_t net_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- short len = skb->len;
- unsigned char *buf = skb->data;
- static u8 pad[ETH_ZLEN];
-
- netif_stop_queue (dev);
-
- /* We may not start transmitting unless we finish transferring
- a packet into the Tx queue. During executing the following
- codes we possibly catch a Tx interrupt. Thus we flag off
- tx_queue_ready, so that we prevent the interrupt routine
- (net_interrupt) to start transmitting. */
- lp->tx_queue_ready = 0;
- {
- outw (length, ioaddr + DATAPORT);
- /* Packet data */
- outsw (ioaddr + DATAPORT, buf, len >> 1);
- /* Check for dribble byte */
- if (len & 1) {
- outw(skb->data[skb->len-1], ioaddr + DATAPORT);
- len++;
- }
- /* Check for packet padding */
- if (length != skb->len)
- outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1);
-
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_queue_ready = 1;
-
- if (lp->tx_started == 0) {
- /* If the Tx is idle, always trigger a transmit. */
- outb (0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_start_queue (dev);
- } else if (lp->tx_queue_len < 4096 - 1502)
- /* Yes, there is room for one more packet. */
- netif_start_queue (dev);
- dev_kfree_skb (skb);
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t net_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status;
- int handled = 0;
-
- if (dev == NULL) {
- printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock (&lp->lock);
-
- status = inw(ioaddr + TX_STATUS);
- outw(status, ioaddr + TX_STATUS);
-
- if (net_debug > 4)
- printk("%s: Interrupt with status %04x.\n", dev->name, status);
- if (lp->rx_started == 0 &&
- (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
- /* Got a packet(s).
- We cannot execute net_rx more than once at the same time for
- the same device. During executing net_rx, we possibly catch a
- Tx interrupt. Thus we flag on rx_started, so that we prevent
- the interrupt routine (net_interrupt) to dive into net_rx
- again. */
- handled = 1;
- lp->rx_started = 1;
- outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
- net_rx(dev);
- outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
- lp->rx_started = 0;
- }
- if (status & 0x00ff) {
- handled = 1;
- if (status & 0x02) {
- /* More than 16 collisions occurred */
- if (net_debug > 4)
- printk("%s: 16 Collision occur during Txing.\n", dev->name);
- /* Cancel sending a packet. */
- outb(0x03, ioaddr + COL16CNTL);
- dev->stats.collisions++;
- }
- if (status & 0x82) {
- dev->stats.tx_packets++;
- /* The Tx queue has any packets and is not being
- transferred a packet from the host, start
- transmitting. */
- if (lp->tx_queue && lp->tx_queue_ready) {
- outb(0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
- } else {
- lp->tx_started = 0;
- netif_wake_queue (dev);
- }
- }
- }
-
- spin_unlock (&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = 5;
-
- while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
- ushort status = inw(ioaddr + DATAPORT);
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if (net_debug > 4)
- printk("%s: Rxing packet mode %02x status %04x.\n",
- dev->name, inb(ioaddr + RX_MODE), status);
-#ifndef final_version
- if (status == 0) {
- outb(0x05, ioaddr + RX_CTRL);
- break;
- }
-#endif
-
- if ((status & 0xF0) != 0x20) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & 0x08) dev->stats.rx_length_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x01) dev->stats.rx_over_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- if (pkt_len > 1550) {
- printk("%s: The AT1700 claimed a very large packet, size %d.\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_errors++;
- break;
- }
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet (len %d).\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb,2);
-
- insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
- skb->protocol=eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- if (--boguscount <= 0)
- break;
- }
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
- {
- int i;
- for (i = 0; i < 20; i++) {
- if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
- break;
- inw(ioaddr + DATAPORT); /* dummy status read */
- outb(0x05, ioaddr + RX_CTRL);
- }
-
- if (net_debug > 5)
- printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
- dev->name, inb(ioaddr + RX_MODE), i);
- }
-}
-
-/* The inverse routine to net_open(). */
-static int net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- /* Set configuration register 0 to disable Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* No statistic counters on the chip to update. */
-
- /* Disable the IRQ on boards of fmv18x where it is feasible. */
- if (lp->jumpered)
- outb(0x00, ioaddr + IOCONFIG1);
-
- /* Power-down the chip. Green, green, green! */
- outb(0x00, ioaddr + CONFIG_1);
- return 0;
-}
-
-/*
- Set the multicast/promiscuous mode for this adaptor.
-*/
-
-static void
-set_rx_mode(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned char mc_filter[8]; /* Multicast hash filter */
- unsigned long flags;
-
- if (dev->flags & IFF_PROMISC) {
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (netdev_mc_empty(dev)) {
- memset(mc_filter, 0x00, sizeof(mc_filter));
- outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- unsigned int bit =
- ether_crc_le(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit >> 3] |= (1 << bit);
- }
- outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
- }
-
- spin_lock_irqsave (&lp->lock, flags);
- if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
- int i;
- int saved_bank = inw(ioaddr + CONFIG_0);
- /* Switch to bank 1 and set the multicast table. */
- outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
- for (i = 0; i < 8; i++)
- outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i));
- memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
- outw(saved_bank, ioaddr + CONFIG_0);
- }
- spin_unlock_irqrestore (&lp->lock, flags);
-}
-
-#ifdef MODULE
-static struct net_device *dev_at1700;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(net_debug, int, 0);
-MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
-MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
-MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
-
-static int __init at1700_module_init(void)
-{
- if (io == 0)
- printk("at1700: You should not use auto-probing with insmod!\n");
- dev_at1700 = at1700_probe(-1);
- if (IS_ERR(dev_at1700))
- return PTR_ERR(dev_at1700);
- return 0;
-}
-
-static void __exit at1700_module_exit(void)
-{
- unregister_netdev(dev_at1700);
- cleanup_card(dev_at1700);
- free_netdev(dev_at1700);
-}
-module_init(at1700_module_init);
-module_exit(at1700_module_exit);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
deleted file mode 100644
index a992d1f7e0d2..000000000000
--- a/drivers/net/ethernet/fujitsu/eth16i.c
+++ /dev/null
@@ -1,1483 +0,0 @@
-/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
-
- Written 1994-1999 by Mika Kuoppala
-
- Copyright (C) 1994-1999 by Mika Kuoppala
- Based on skeleton.c and heavily on at1700.c by Donald Becker
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as miku@iki.fi
-
- This driver supports following cards :
- - ICL EtherTeam 16i
- - ICL EtherTeam 32 EISA
- (Uses true 32 bit transfers rather than 16i compatibility mode)
-
- Example Module usage:
- insmod eth16i.o io=0x2a0 mediatype=bnc
-
- mediatype can be one of the following: bnc,tp,dix,auto,eprom
-
- 'auto' will try to autoprobe mediatype.
- 'eprom' will use whatever type defined in eprom.
-
- I have benchmarked driver with PII/300Mhz as a ftp client
- and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
-
- Sources:
- - skeleton.c a sample network driver core for linux,
- written by Donald Becker <becker@scyld.com>
- - at1700.c a driver for Allied Telesis AT1700, written
- by Donald Becker.
- - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
- written by Markku Viima
- - The Fujitsu MB86965 databook.
-
- Author thanks following persons due to their valueble assistance:
- Markku Viima (ICL)
- Ari Valve (ICL)
- Donald Becker
- Kurt Huwig <kurt@huwig.de>
-
- Revision history:
-
- Version Date Description
-
- 0.01 15.12-94 Initial version (card detection)
- 0.02 23.01-95 Interrupt is now hooked correctly
- 0.03 01.02-95 Rewrote initialization part
- 0.04 07.02-95 Base skeleton done...
- Made a few changes to signature checking
- to make it a bit reliable.
- - fixed bug in tx_buf mapping
- - fixed bug in initialization (DLC_EN
- wasn't enabled when initialization
- was done.)
- 0.05 08.02-95 If there were more than one packet to send,
- transmit was jammed due to invalid
- register write...now fixed
- 0.06 19.02-95 Rewrote interrupt handling
- 0.07 13.04-95 Wrote EEPROM read routines
- Card configuration now set according to
- data read from EEPROM
- 0.08 23.06-95 Wrote part that tries to probe used interface
- port if AUTO is selected
-
- 0.09 01.09-95 Added module support
-
- 0.10 04.09-95 Fixed receive packet allocation to work
- with kernels > 1.3.x
-
- 0.20 20.09-95 Added support for EtherTeam32 EISA
-
- 0.21 17.10-95 Removed the unnecessary extern
- init_etherdev() declaration. Some
- other cleanups.
-
- 0.22 22.02-96 Receive buffer was not flushed
- correctly when faulty packet was
- received. Now fixed.
-
- 0.23 26.02-96 Made resetting the adapter
- more reliable.
-
- 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
-
- 0.25 22.05-96 kfree() was missing from cleanup_module.
-
- 0.26 11.06-96 Sometimes card was not found by
- check_signature(). Now made more reliable.
-
- 0.27 23.06-96 Oops. 16 consecutive collisions halted
- adapter. Now will try to retransmit
- MAX_COL_16 times before finally giving up.
-
- 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
-
- 0.29 29.10-97 Multiple card support for module users
-
- 0.30 30.10-97 Fixed irq allocation bug.
- (request_irq moved from probe to open)
-
- 0.30a 21.08-98 Card detection made more relaxed. Driver
- had problems with some TCP/IP-PROM boots
- to find the card. Suggested by
- Kurt Huwig <kurt@huwig.de>
-
- 0.31 28.08-98 Media interface port can now be selected
- with module parameters or kernel
- boot parameters.
-
- 0.32 31.08-98 IRQ was never freed if open/close
- pair wasn't called. Now fixed.
-
- 0.33 10.09-98 When eth16i_open() was called after
- eth16i_close() chip never recovered.
- Now more shallow reset is made on
- close.
-
- 0.34 29.06-99 Fixed one bad #ifdef.
- Changed ioaddr -> io for consistency
-
- 0.35 01.07-99 transmit,-receive bytes were never
- updated in stats.
-
- Bugs:
- In some cases the media interface autoprobing code doesn't find
- the correct interface type. In this case you can
- manually choose the interface type in DOS with E16IC.EXE which is
- configuration software for EtherTeam16i and EtherTeam32 cards.
- This is also true for IRQ setting. You cannot use module
- parameter to configure IRQ of the card (yet).
-
- To do:
- - Real multicast support
- - Rewrite the media interface autoprobing code. Its _horrible_ !
- - Possibly merge all the MB86965 specific code to external
- module for use by eth16.c and Donald's at1700.c
- - IRQ configuration with module parameter. I will do
- this when i will get enough info about setting
- irq without configuration utility.
-*/
-
-static char *version =
- "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-
-
-
-/* Few macros */
-#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
-#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
-
-/* This is the I/O address space for Etherteam 16i adapter. */
-#define ETH16I_IO_EXTENT 32
-
-/* Ticks before deciding that transmit has timed out */
-#define TX_TIMEOUT (400*HZ/1000)
-
-/* Maximum loop count when receiving packets */
-#define MAX_RX_LOOP 20
-
-/* Some interrupt masks */
-#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
-#define ETH16I_INTR_OFF 0x0000
-
-/* Buffers header status byte meanings */
-#define PKT_GOOD BIT(5)
-#define PKT_GOOD_RMT BIT(4)
-#define PKT_SHORT BIT(3)
-#define PKT_ALIGN_ERR BIT(2)
-#define PKT_CRC_ERR BIT(1)
-#define PKT_RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit status register (DLCR0) */
-#define TX_STATUS_REG 0
-#define TX_DONE BIT(7)
-#define NET_BUSY BIT(6)
-#define TX_PKT_RCD BIT(5)
-#define CR_LOST BIT(4)
-#define TX_JABBER_ERR BIT(3)
-#define COLLISION BIT(2)
-#define COLLISIONS_16 BIT(1)
-
-/* Receive status register (DLCR1) */
-#define RX_STATUS_REG 1
-#define RX_PKT BIT(7) /* Packet received */
-#define BUS_RD_ERR BIT(6)
-#define SHORT_PKT_ERR BIT(3)
-#define ALIGN_ERR BIT(2)
-#define CRC_ERR BIT(1)
-#define RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit Interrupt Enable Register (DLCR2) */
-#define TX_INTR_REG 2
-#define TX_INTR_DONE BIT(7)
-#define TX_INTR_COL BIT(2)
-#define TX_INTR_16_COL BIT(1)
-
-/* Receive Interrupt Enable Register (DLCR3) */
-#define RX_INTR_REG 3
-#define RX_INTR_RECEIVE BIT(7)
-#define RX_INTR_SHORT_PKT BIT(3)
-#define RX_INTR_CRC_ERR BIT(1)
-#define RX_INTR_BUF_OVERFLOW BIT(0)
-
-/* Transmit Mode Register (DLCR4) */
-#define TRANSMIT_MODE_REG 4
-#define LOOPBACK_CONTROL BIT(1)
-#define CONTROL_OUTPUT BIT(2)
-
-/* Receive Mode Register (DLCR5) */
-#define RECEIVE_MODE_REG 5
-#define RX_BUFFER_EMPTY BIT(6)
-#define ACCEPT_BAD_PACKETS BIT(5)
-#define RECEIVE_SHORT_ADDR BIT(4)
-#define ACCEPT_SHORT_PACKETS BIT(3)
-#define REMOTE_RESET BIT(2)
-
-#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
-#define REJECT_ALL 0
-#define ACCEPT_ALL 3
-#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
-#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
-
-/* Configuration Register 0 (DLCR6) */
-#define CONFIG_REG_0 6
-#define DLC_EN BIT(7)
-#define SRAM_CYCLE_TIME_100NS BIT(6)
-#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
-#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
-#define TBS1 BIT(3)
-#define TBS0 BIT(2)
-#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
-#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
-
-#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
-#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
-#endif
-#define TX_BUF_1x2048 0
-#define TX_BUF_2x2048 1
-#define TX_BUF_2x4098 2
-#define TX_BUF_2x8192 3
-
-/* Configuration Register 1 (DLCR7) */
-#define CONFIG_REG_1 7
-#define POWERUP BIT(5)
-
-/* Transmit start register */
-#define TRANSMIT_START_REG 10
-#define TRANSMIT_START_RB 2
-#define TX_START BIT(7) /* Rest of register bit indicate*/
- /* number of packets in tx buffer*/
-/* Node ID registers (DLCR8-13) */
-#define NODE_ID_0 8
-#define NODE_ID_RB 0
-
-/* Hash Table registers (HT8-15) */
-#define HASH_TABLE_0 8
-#define HASH_TABLE_RB 1
-
-/* Buffer memory ports */
-#define BUFFER_MEM_PORT_LB 8
-#define DATAPORT BUFFER_MEM_PORT_LB
-#define BUFFER_MEM_PORT_HB 9
-
-/* 16 Collision control register (BMPR11) */
-#define COL_16_REG 11
-#define HALT_ON_16 0x00
-#define RETRANS_AND_HALT_ON_16 0x02
-
-/* Maximum number of attempts to send after 16 concecutive collisions */
-#define MAX_COL_16 10
-
-/* DMA Burst and Transceiver Mode Register (BMPR13) */
-#define TRANSCEIVER_MODE_REG 13
-#define TRANSCEIVER_MODE_RB 2
-#define IO_BASE_UNLOCK BIT(7)
-#define LOWER_SQUELCH_TRESH BIT(6)
-#define LINK_TEST_DISABLE BIT(5)
-#define AUI_SELECT BIT(4)
-#define DIS_AUTO_PORT_SEL BIT(3)
-
-/* Filter Self Receive Register (BMPR14) */
-#define FILTER_SELF_RX_REG 14
-#define SKIP_RX_PACKET BIT(2)
-#define FILTER_SELF_RECEIVE BIT(0)
-
-/* EEPROM Control Register (BMPR 16) */
-#define EEPROM_CTRL_REG 16
-
-/* EEPROM Data Register (BMPR 17) */
-#define EEPROM_DATA_REG 17
-
-/* NMC93CSx6 EEPROM Control Bits */
-#define CS_0 0x00
-#define CS_1 0x20
-#define SK_0 0x00
-#define SK_1 0x40
-#define DI_0 0x00
-#define DI_1 0x80
-
-/* NMC93CSx6 EEPROM Instructions */
-#define EEPROM_READ 0x80
-
-/* NMC93CSx6 EEPROM Addresses */
-#define E_NODEID_0 0x02
-#define E_NODEID_1 0x03
-#define E_NODEID_2 0x04
-#define E_PORT_SELECT 0x14
- #define E_PORT_BNC 0x00
- #define E_PORT_DIX 0x01
- #define E_PORT_TP 0x02
- #define E_PORT_AUTO 0x03
- #define E_PORT_FROM_EPROM 0x04
-#define E_PRODUCT_CFG 0x30
-
-
-/* Macro to slow down io between EEPROM clock transitions */
-#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
-
-/* Jumperless Configuration Register (BMPR19) */
-#define JUMPERLESS_CONFIG 19
-
-/* ID ROM registers, writing to them also resets some parts of chip */
-#define ID_ROM_0 24
-#define ID_ROM_7 31
-#define RESET ID_ROM_0
-
-/* This is the I/O address list to be probed when seeking the card */
-static unsigned int eth16i_portlist[] __initdata = {
- 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-static unsigned int eth32i_portlist[] __initdata = {
- 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
- 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0
-};
-
-/* This is the Interrupt lookup table for Eth16i card */
-static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 };
-#define NUM_OF_ISA_IRQS 4
-
-/* This is the Interrupt lookup table for Eth32i card */
-static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
-#define EISA_IRQ_REG 0xc89
-#define NUM_OF_EISA_IRQS 8
-
-static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef ETH16I_DEBUG
-#define ETH16I_DEBUG 0
-#endif
-static unsigned int eth16i_debug = ETH16I_DEBUG;
-
-/* Information for each board */
-
-struct eth16i_local {
- unsigned char tx_started;
- unsigned char tx_buf_busy;
- unsigned short tx_queue; /* Number of packets in transmit buffer */
- unsigned short tx_queue_len;
- unsigned int tx_buf_size;
- unsigned long open_time;
- unsigned long tx_buffered_packets;
- unsigned long tx_buffered_bytes;
- unsigned long col_16;
- spinlock_t lock;
-};
-
-/* Function prototypes */
-
-static int eth16i_probe1(struct net_device *dev, int ioaddr);
-static int eth16i_check_signature(int ioaddr);
-static int eth16i_probe_port(int ioaddr);
-static void eth16i_set_port(int ioaddr, int porttype);
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
-static int eth16i_receive_probe_packet(int ioaddr);
-static int eth16i_get_irq(int ioaddr);
-static int eth16i_read_eeprom(int ioaddr, int offset);
-static int eth16i_read_eeprom_word(int ioaddr);
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
-static int eth16i_open(struct net_device *dev);
-static int eth16i_close(struct net_device *dev);
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev);
-static void eth16i_rx(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id);
-static void eth16i_reset(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static void eth16i_skip_packet(struct net_device *dev);
-static void eth16i_multicast(struct net_device *dev);
-static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
-static void eth16i_initialize(struct net_device *dev, int boot);
-
-#if 0
-static int eth16i_set_irq(struct net_device *dev);
-#endif
-
-#ifdef MODULE
-static ushort eth16i_parse_mediatype(const char* s);
-#endif
-
-static char cardname[] __initdata = "ICL EtherTeam 16i/32";
-
-static int __init do_eth16i_probe(struct net_device *dev)
-{
- int i;
- int ioaddr;
- int base_addr = dev->base_addr;
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "Probing started for %s\n", cardname);
-
- if(base_addr > 0x1ff) /* Check only single location */
- return eth16i_probe1(dev, base_addr);
- else if(base_addr != 0) /* Don't probe at all */
- return -ENXIO;
-
- /* Seek card from the ISA io address space */
- for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- /* Seek card from the EISA io address space */
- for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eth16i_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eth16i_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops eth16i_netdev_ops = {
- .ndo_open = eth16i_open,
- .ndo_stop = eth16i_close,
- .ndo_start_xmit = eth16i_tx,
- .ndo_set_rx_mode = eth16i_multicast,
- .ndo_tx_timeout = eth16i_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- static unsigned version_printed;
- int retval;
-
- /* Let's grab the region */
- if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname))
- return -EBUSY;
-
- /*
- The MB86985 chip has on register which holds information in which
- io address the chip lies. First read this register and compare
- it to our current io address and if match then this could
- be our chip.
- */
-
- if(ioaddr < 0x1000) {
- if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
- != ioaddr) {
- retval = -ENODEV;
- goto out;
- }
- }
-
- /* Now we will go a bit deeper and try to find the chip's signature */
-
- if(eth16i_check_signature(ioaddr) != 0) {
- retval = -ENODEV;
- goto out;
- }
-
- /*
- Now it seems that we have found a ethernet chip in this particular
- ioaddr. The MB86985 chip has this feature, that when you read a
- certain register it will increase it's io base address to next
- configurable slot. Now when we have found the chip, first thing is
- to make sure that the chip's ioaddr will hold still here.
- */
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
-
- outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
- BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
-
- if( (eth16i_debug & version_printed++) == 0)
- printk(KERN_INFO "%s", version);
-
- dev->base_addr = ioaddr;
- dev->irq = eth16i_get_irq(ioaddr);
-
- /* Try to obtain interrupt vector */
-
- if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) {
- printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n",
- cardname, ioaddr, dev->irq);
- goto out;
- }
-
- printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
- dev->name, cardname, ioaddr, dev->irq);
-
-
- /* Now we will have to lock the chip's io address */
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
-
- eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */
-
- /* Now let's same some energy by shutting down the chip ;) */
- BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
-
- /* Initialize the device structure */
- dev->netdev_ops = &eth16i_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- spin_lock_init(&lp->lock);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, ETH16I_IO_EXTENT);
- return retval;
-}
-
-
-static void eth16i_initialize(struct net_device *dev, int boot)
-{
- int ioaddr = dev->base_addr;
- int i, node_w = 0;
- unsigned char node_byte = 0;
-
- /* Setup station address */
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
- for(i = 0 ; i < 3 ; i++) {
- unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
- }
-
- for(i = 0; i < 6; i++) {
- outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
- if(boot) {
- printk("%02x", inb(ioaddr + NODE_ID_0 + i));
- if(i != 5)
- printk(":");
- }
- }
-
- /* Now we will set multicast addresses to accept none */
- eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
- for(i = 0; i < 8; i++)
- outb(0x00, ioaddr + HASH_TABLE_0 + i);
-
- /*
- Now let's disable the transmitter and receiver, set the buffer ram
- cycle time, bus width and buffer data path width. Also we shall
- set transmit buffer size and total buffer size.
- */
-
- eth16i_select_regbank(2, ioaddr);
-
- node_byte = 0;
- node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
-
- if( (node_w & 0xFF00) == 0x0800)
- node_byte |= BUFFER_WIDTH_8;
-
- node_byte |= SRAM_BS1;
-
- if( (node_w & 0x00FF) == 64)
- node_byte |= SRAM_BS0;
-
- node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
-
- outb(node_byte, ioaddr + CONFIG_REG_0);
-
- /* We shall halt the transmitting, if 16 collisions are detected */
- outb(HALT_ON_16, ioaddr + COL_16_REG);
-
-#ifdef MODULE
- /* if_port already set by init_module() */
-#else
- dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
- dev->mem_start : E_PORT_FROM_EPROM;
-#endif
-
- /* Set interface port type */
- if(boot) {
- static const char * const porttype[] = {
- "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
- };
-
- switch(dev->if_port)
- {
-
- case E_PORT_FROM_EPROM:
- dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
- break;
-
- case E_PORT_AUTO:
- dev->if_port = eth16i_probe_port(ioaddr);
- break;
-
- case E_PORT_BNC:
- case E_PORT_TP:
- case E_PORT_DIX:
- break;
- }
-
- printk(" %s interface.\n", porttype[dev->if_port]);
-
- eth16i_set_port(ioaddr, dev->if_port);
- }
-
- /* Set Receive Mode to normal operation */
- outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
-}
-
-static int eth16i_probe_port(int ioaddr)
-{
- int i;
- int retcode;
- unsigned char dummy_packet[64];
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
-
- for(i = 0; i < 6; i++) {
- dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
- dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
- }
-
- dummy_packet[12] = 0x00;
- dummy_packet[13] = 0x04;
- memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14);
-
- eth16i_select_regbank(2, ioaddr);
-
- for(i = 0; i < 3; i++) {
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
- eth16i_set_port(ioaddr, i);
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Set port number %d\n", i);
-
- retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
- if(retcode == 0) {
- retcode = eth16i_receive_probe_packet(ioaddr);
- if(retcode != -1) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
- return i;
- }
- }
- else {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
- }
- }
-
- if( eth16i_debug > 1)
- printk(KERN_DEBUG "Using default port\n");
-
- return E_PORT_BNC;
-}
-
-static void eth16i_set_port(int ioaddr, int porttype)
-{
- unsigned short temp = 0;
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
-
- temp |= DIS_AUTO_PORT_SEL;
-
- switch(porttype) {
-
- case E_PORT_BNC :
- temp |= AUI_SELECT;
- break;
-
- case E_PORT_TP :
- break;
-
- case E_PORT_DIX :
- temp |= AUI_SELECT;
- BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
- break;
- }
-
- outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
- printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
- inb(ioaddr+TRANSCEIVER_MODE_REG));
- }
-}
-
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
-{
- unsigned long starttime;
-
- outb(0xff, ioaddr + TX_STATUS_REG);
-
- outw(l, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
-
- starttime = jiffies;
- outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
-
- while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- return -1;
- }
- }
-
- return 0;
-}
-
-static int eth16i_receive_probe_packet(int ioaddr)
-{
- unsigned long starttime;
-
- starttime = jiffies;
-
- while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
- starttime = jiffies;
- while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
- return -1;
- }
- }
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "RECEIVE_PACKET\n");
- return 0; /* Found receive packet */
- }
- }
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
- printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
- }
-
- return 0; /* Return success */
-}
-
-#if 0
-static int eth16i_set_irq(struct net_device* dev)
-{
- const int ioaddr = dev->base_addr;
- const int irq = dev->irq;
- int i = 0;
-
- if(ioaddr < 0x1000) {
- while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
- i++;
-
- if(i < NUM_OF_ISA_IRQS) {
- u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- cbyte = (cbyte & 0x3F) | (i << 6);
- outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
- return 0;
- }
- }
- else {
- printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
- }
-
- return -1;
-
-}
-#endif
-
-static int __init eth16i_get_irq(int ioaddr)
-{
- unsigned char cbyte;
-
- if( ioaddr < 0x1000) {
- cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
- } else { /* Oh..the card is EISA so method getting IRQ different */
- unsigned short index = 0;
- cbyte = inb(ioaddr + EISA_IRQ_REG);
- while( (cbyte & 0x01) == 0) {
- cbyte = cbyte >> 1;
- index++;
- }
- return eth32i_irqmap[index];
- }
-}
-
-static int __init eth16i_check_signature(int ioaddr)
-{
- int i;
- unsigned char creg[4] = { 0 };
-
- for(i = 0; i < 4 ; i++) {
-
- creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
-
- if(eth16i_debug > 1)
- printk("eth16i: read signature byte %x at %x\n",
- creg[i],
- ioaddr + TRANSMIT_MODE_REG + i);
- }
-
- creg[0] &= 0x0F; /* Mask collision cnr */
- creg[2] &= 0x7F; /* Mask DCLEN bit */
-
-#if 0
- /*
- This was removed because the card was sometimes left to state
- from which it couldn't be find anymore. If there is need
- to more strict check still this have to be fixed.
- */
- if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
- if(creg[1] != 0x42)
- return -1;
- }
-#endif
-
- if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
- creg[2] &= 0x40;
- creg[3] &= 0x03;
-
- if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
- return -1;
- }
-
- if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
- return -1;
-
- if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
- return -1;
-
- return 0;
-}
-
-static int eth16i_read_eeprom(int ioaddr, int offset)
-{
- int data = 0;
-
- eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
- outb(CS_1, ioaddr + EEPROM_CTRL_REG);
- data = eth16i_read_eeprom_word(ioaddr);
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
-
- return data;
-}
-
-static int eth16i_read_eeprom_word(int ioaddr)
-{
- int i;
- int data = 0;
-
- for(i = 16; i > 0; i--) {
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- data = (data << 1) |
- ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
-
- eeprom_slow_io();
- }
-
- return data;
-}
-
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
-{
- int i;
-
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_0, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_1, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
-
- for(i = 7; i >= 0; i--) {
- short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
- outb(cmd, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- }
-}
-
-static int eth16i_open(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- /* Initialize the chip */
- eth16i_initialize(dev, 0);
-
- /* Set the transmit buffer size */
- lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
-
- if(eth16i_debug > 0)
- printk(KERN_DEBUG "%s: transmit buffer size %d\n",
- dev->name, lp->tx_buf_size);
-
- /* Now enable Transmitter and Receiver sections */
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Now switch to register bank 2, for run time operation */
- eth16i_select_regbank(2, ioaddr);
-
- lp->open_time = jiffies;
- lp->tx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on interrupts*/
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- netif_start_queue(dev);
- return 0;
-}
-
-static int eth16i_close(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- eth16i_reset(dev);
-
- /* Turn off interrupts*/
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- netif_stop_queue(dev);
-
- lp->open_time = 0;
-
- /* Disable transmit and receive */
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Reset the chip */
- /* outb(0xff, ioaddr + RESET); */
- /* outw(0xffff, ioaddr + TX_STATUS_REG); */
-
- outb(0x00, ioaddr + CONFIG_REG_1);
-
- return 0;
-}
-
-static void eth16i_timeout(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- /*
- If we get here, some higher level has decided that
- we are broken. There should really be a "kick me"
- function call instead.
- */
-
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
- printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
- dev->name,
- inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
- "IRQ conflict" : "network cable problem");
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- /* Let's dump all registers */
- if(eth16i_debug > 0) {
- printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
- dev->name, inb(ioaddr + 0),
- inb(ioaddr + 1), inb(ioaddr + 2),
- inb(ioaddr + 3), inb(ioaddr + 4),
- inb(ioaddr + 5),
- inb(ioaddr + 6), inb(ioaddr + 7));
-
- printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
- dev->name, inb(ioaddr + TRANSMIT_START_REG),
- inb(ioaddr + COL_16_REG));
- printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
- printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
- printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
- }
- dev->stats.tx_errors++;
- eth16i_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int status = 0;
- ushort length = skb->len;
- unsigned char *buf;
- unsigned long flags;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- netif_stop_queue(dev);
-
- /* Turn off TX interrupts */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* We would be better doing the disable_irq tricks the 3c509 does,
- that would make this suck a lot less */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
- if(eth16i_debug > 0)
- printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
- }
- else {
- outw(length, ioaddr + DATAPORT);
-
- if( ioaddr < 0x1000 )
- outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
- else {
- unsigned char frag = length % 4;
- outsl(ioaddr + DATAPORT, buf, length >> 2);
- if( frag != 0 ) {
- outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
- if( frag == 3 )
- outsw(ioaddr + DATAPORT,
- (buf + (length & 0xFFFC) + 2), 1);
- }
- }
- lp->tx_buffered_packets++;
- lp->tx_buffered_bytes = length;
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_buf_busy = 0;
-
- if(lp->tx_started == 0) {
- /* If the transmitter is idle..always trigger a transmit */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_wake_queue(dev);
- }
- else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- /* Turn TX interrupts back on */
- /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
- status = 0;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void eth16i_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = MAX_RX_LOOP;
-
- /* Loop until all packets have been read */
- while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
-
- /* Read status byte from receive buffer */
- ushort status = inw(ioaddr + DATAPORT);
-
- /* Get the size of the packet from receive buffer */
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
- dev->name,
- inb(ioaddr + RECEIVE_MODE_REG), status);
-
- if( !(status & PKT_GOOD) ) {
- dev->stats.rx_errors++;
-
- if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
- dev->stats.rx_length_errors++;
- eth16i_reset(dev);
- return;
- }
- else {
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- }
- }
- else { /* Ok so now we should have a good packet */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if( skb == NULL ) {
- printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
- dev->name, pkt_len);
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /*
- Now let's get the packet out of buffer.
- size is (pkt_len + 1) >> 1, cause we are now reading words
- and it have to be even aligned.
- */
-
- if(ioaddr < 0x1000)
- insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
- (pkt_len + 1) >> 1);
- else {
- unsigned char *buf = skb_put(skb, pkt_len);
- unsigned char frag = pkt_len % 4;
-
- insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
-
- if(frag != 0) {
- unsigned short rest[2];
- rest[0] = inw( ioaddr + DATAPORT );
- if(frag == 3)
- rest[1] = inw( ioaddr + DATAPORT );
-
- memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
- }
- }
-
- skb->protocol=eth_type_trans(skb, dev);
-
- if( eth16i_debug > 5 ) {
- int i;
- printk(KERN_DEBUG "%s: Received packet of length %d.\n",
- dev->name, pkt_len);
- for(i = 0; i < 14; i++)
- printk(KERN_DEBUG " %02x", skb->data[i]);
- printk(KERN_DEBUG ".\n");
- }
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
-
- } /* else */
-
- if(--boguscount <= 0)
- break;
-
- } /* while */
-}
-
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eth16i_local *lp;
- int ioaddr = 0, status;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- /* Turn off all interrupts from adapter */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* eth16i_tx won't be called */
- spin_lock(&lp->lock);
-
- status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
- outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
-
- if (status)
- handled = 1;
-
- if(eth16i_debug > 3)
- printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
-
- if( status & 0x7f00 ) {
-
- dev->stats.rx_errors++;
-
- if(status & (BUS_RD_ERR << 8) )
- printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
- if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++;
- if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++;
- if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++;
- if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++;
- }
- if( status & 0x001a) {
-
- dev->stats.tx_errors++;
-
- if(status & CR_LOST) dev->stats.tx_carrier_errors++;
- if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++;
-
-#if 0
- if(status & COLLISION) {
- dev->stats.collisions +=
- ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
- }
-#endif
- if(status & COLLISIONS_16) {
- if(lp->col_16 < MAX_COL_16) {
- lp->col_16++;
- dev->stats.collisions++;
- /* Resume transmitting, skip failed packet */
- outb(0x02, ioaddr + COL_16_REG);
- }
- else {
- printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
- }
- }
- }
-
- if( status & 0x00ff ) { /* Let's check the transmit status reg */
-
- if(status & TX_DONE) { /* The transmit has been done */
- dev->stats.tx_packets = lp->tx_buffered_packets;
- dev->stats.tx_bytes += lp->tx_buffered_bytes;
- lp->col_16 = 0;
-
- if(lp->tx_queue) { /* Is there still packets ? */
- /* There was packet(s) so start transmitting and write also
- how many packets there is to be sended */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- }
- else {
- lp->tx_started = 0;
- }
- netif_wake_queue(dev);
- }
- }
-
- if( ( status & 0x8000 ) ||
- ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
- eth16i_rx(dev); /* We have packet in receive buffer */
- }
-
- /* Turn interrupts back on */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void eth16i_skip_packet(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
-
- outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
- while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
-}
-
-static void eth16i_reset(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- outw(0xffff, ioaddr + TX_STATUS_REG);
- eth16i_select_regbank(2, ioaddr);
-
- lp->tx_started = 0;
- lp->tx_buf_busy = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-}
-
-static void eth16i_multicast(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- {
- outb(3, ioaddr + RECEIVE_MODE_REG);
- } else {
- outb(2, ioaddr + RECEIVE_MODE_REG);
- }
-}
-
-static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
-{
- unsigned char data;
-
- data = inb(ioaddr + CONFIG_REG_1);
- outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
-}
-
-#ifdef MODULE
-
-static ushort eth16i_parse_mediatype(const char* s)
-{
- if(!s)
- return E_PORT_FROM_EPROM;
-
- if (!strncmp(s, "bnc", 3))
- return E_PORT_BNC;
- else if (!strncmp(s, "tp", 2))
- return E_PORT_TP;
- else if (!strncmp(s, "dix", 3))
- return E_PORT_DIX;
- else if (!strncmp(s, "auto", 4))
- return E_PORT_AUTO;
- else
- return E_PORT_FROM_EPROM;
-}
-
-#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
-
-static struct net_device *dev_eth16i[MAX_ETH16I_CARDS];
-static int io[MAX_ETH16I_CARDS];
-#if 0
-static int irq[MAX_ETH16I_CARDS];
-#endif
-static char* mediatype[MAX_ETH16I_CARDS];
-static int debug = -1;
-
-MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
-MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
-MODULE_LICENSE("GPL");
-
-
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
-
-#if 0
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "eth16i interrupt request number");
-#endif
-
-module_param_array(mediatype, charp, NULL, 0);
-MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
-
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
- struct net_device *dev;
-
- for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct eth16i_local));
- if (!dev)
- break;
-
- dev->base_addr = io[this_dev];
-
- if(debug != -1)
- eth16i_debug = debug;
-
- if(eth16i_debug > 1)
- printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
-
- dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
-
- if(io[this_dev] == 0) {
- if (this_dev != 0) { /* Only autoprobe 1st one */
- free_netdev(dev);
- break;
- }
-
- printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
- }
-
- if (do_eth16i_probe(dev) == 0) {
- dev_eth16i[found++] = dev;
- continue;
- }
- printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
- io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- struct net_device *dev = dev_eth16i[this_dev];
-
- if (netdev_priv(dev)) {
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ETH16I_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
deleted file mode 100644
index 6a5c21b82c51..000000000000
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ /dev/null
@@ -1,1671 +0,0 @@
-/*
- * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505)
- * By Craig Southeren, Juha Laiho and Philip Blundell
- *
- * 3c505.c This module implements an interface to the 3Com
- * Etherlink Plus (3c505) Ethernet card. Linux device
- * driver interface reverse engineered from the Linux 3C509
- * device drivers. Some 3C505 information gleaned from
- * the Crynwr packet driver. Still this driver would not
- * be here without 3C505 technical reference provided by
- * 3Com.
- *
- * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $
- *
- * Authors: Linux 3c505 device driver by
- * Craig Southeren, <craigs@ineluki.apana.org.au>
- * Final debugging by
- * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
- * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
- * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
- * Linux 3C509 driver by
- * Donald Becker, <becker@super.org>
- * (Now at <becker@scyld.com>)
- * Crynwr packet driver by
- * Krishnan Gopalan and Gregg Stefancik,
- * Clemson University Engineering Computer Operations.
- * Portions of the code have been adapted from the 3c505
- * driver for NCSA Telnet by Bruce Orchard and later
- * modified by Warren Van Houten and krus@diku.dk.
- * 3C505 technical information provided by
- * Terry Murphy, of 3Com Network Adapter Division
- * Linux 1.3.0 changes by
- * Alan Cox <Alan.Cox@linux.org>
- * More debugging, DMA support, currently maintained by
- * Philip Blundell <philb@gnu.org>
- * Multicard/soft configurable dma channel/rev 2 hardware support
- * by Christopher Collins <ccollins@pcug.org.au>
- * Ethtool support (jgarzik), 11/17/2001
- */
-
-#define DRV_NAME "3c505"
-#define DRV_VERSION "1.10a"
-
-
-/* Theory of operation:
- *
- * The 3c505 is quite an intelligent board. All communication with it is done
- * by means of Primary Command Blocks (PCBs); these are transferred using PIO
- * through the command register. The card has 256k of on-board RAM, which is
- * used to buffer received packets. It might seem at first that more buffers
- * are better, but in fact this isn't true. From my tests, it seems that
- * more than about 10 buffers are unnecessary, and there is a noticeable
- * performance hit in having more active on the card. So the majority of the
- * card's memory isn't, in fact, used. Sadly, the card only has one transmit
- * buffer and, short of loading our own firmware into it (which is what some
- * drivers resort to) there's nothing we can do about this.
- *
- * We keep up to 4 "receive packet" commands active on the board at a time.
- * When a packet comes in, so long as there is a receive command active, the
- * board will send us a "packet received" PCB and then add the data for that
- * packet to the DMA queue. If a DMA transfer is not already in progress, we
- * set one up to start uploading the data. We have to maintain a list of
- * backlogged receive packets, because the card may decide to tell us about
- * a newly-arrived packet at any time, and we may not be able to start a DMA
- * transfer immediately (ie one may already be going on). We can't NAK the
- * PCB, because then it would throw the packet away.
- *
- * Trying to send a PCB to the card at the wrong moment seems to have bad
- * effects. If we send it a transmit PCB while a receive DMA is happening,
- * it will just NAK the PCB and so we will have wasted our time. Worse, it
- * sometimes seems to interrupt the transfer. The majority of the low-level
- * code is protected by one huge semaphore -- "busy" -- which is set whenever
- * it probably isn't safe to do anything to the card. The receive routine
- * must gain a lock on "busy" before it can start a DMA transfer, and the
- * transmit routine must gain a lock before it sends the first PCB to the card.
- * The send_pcb() routine also has an internal semaphore to protect it against
- * being re-entered (which would be disastrous) -- this is needed because
- * several things can happen asynchronously (re-priming the receiver and
- * asking the card for statistics, for example). send_pcb() will also refuse
- * to talk to the card at all if a DMA upload is happening. The higher-level
- * networking code will reschedule a later retry if some part of the driver
- * is blocked. In practice, this doesn't seem to happen very often.
- */
-
-/* This driver may now work with revision 2.x hardware, since all the read
- * operations on the HCR have been removed (we now keep our own softcopy).
- * But I don't have an old card to test it on.
- *
- * This has had the bad effect that the autoprobe routine is now a bit
- * less friendly to other devices. However, it was never very good.
- * before, so I doubt it will hurt anybody.
- */
-
-/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
- * to make it more reliable, and secondly to add DMA mode. Many things could
- * probably be done better; the concurrency protection is particularly awful.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c505.h"
-
-/*********************************************************
- *
- * define debug messages here as common strings to reduce space
- *
- *********************************************************/
-
-#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
-#define TIMEOUT_MSG(lineno) \
- pr_notice(timeout_msg, __FILE__, __func__, (lineno))
-
-#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
-#define INVALID_PCB_MSG(len) \
- pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__)
-
-#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
-
-#define stilllooking_msg "still looking..."
-
-#define found_msg "found.\n"
-
-#define notfound_msg "not found (reason = %d)\n"
-
-#define couldnot_msg "%s: 3c505 not found\n"
-
-/*********************************************************
- *
- * various other debug stuff
- *
- *********************************************************/
-
-#ifdef ELP_DEBUG
-static int elp_debug = ELP_DEBUG;
-#else
-static int elp_debug;
-#endif
-#define debug elp_debug
-
-/*
- * 0 = no messages (well, some)
- * 1 = messages when high level commands performed
- * 2 = messages when low level commands performed
- * 3 = messages when interrupts received
- */
-
-/*****************************************************************
- *
- * List of I/O-addresses we try to auto-sense
- * Last element MUST BE 0!
- *****************************************************************/
-
-static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0};
-
-/* Dma Memory related stuff */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-
-/*****************************************************************
- *
- * Functions for I/O (note the inline !)
- *
- *****************************************************************/
-
-static inline unsigned char inb_status(unsigned int base_addr)
-{
- return inb(base_addr + PORT_STATUS);
-}
-
-static inline int inb_command(unsigned int base_addr)
-{
- return inb(base_addr + PORT_COMMAND);
-}
-
-static inline void outb_control(unsigned char val, struct net_device *dev)
-{
- outb(val, dev->base_addr + PORT_CONTROL);
- ((elp_device *)(netdev_priv(dev)))->hcr_val = val;
-}
-
-#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val)
-
-static inline void outb_command(unsigned char val, unsigned int base_addr)
-{
- outb(val, base_addr + PORT_COMMAND);
-}
-
-static inline unsigned int backlog_next(unsigned int n)
-{
- return (n + 1) % BACKLOG_SIZE;
-}
-
-/*****************************************************************
- *
- * useful functions for accessing the adapter
- *
- *****************************************************************/
-
-/*
- * use this routine when accessing the ASF bits as they are
- * changed asynchronously by the adapter
- */
-
-/* get adapter PCB status */
-#define GET_ASF(addr) \
- (get_status(addr)&ASF_PCB_MASK)
-
-static inline int get_status(unsigned int base_addr)
-{
- unsigned long timeout = jiffies + 10*HZ/100;
- register int stat1;
- do {
- stat1 = inb_status(base_addr);
- } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- return stat1;
-}
-
-static inline void set_hsf(struct net_device *dev, int hsf)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->lock, flags);
- outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static bool start_receive(struct net_device *, pcb_struct *);
-
-static inline void adapter_reset(struct net_device *dev)
-{
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned char orig_hcr = adapter->hcr_val;
-
- outb_control(0, dev);
-
- if (inb_status(dev->base_addr) & ACRF) {
- do {
- inb_command(dev->base_addr);
- timeout = jiffies + 2*HZ/100;
- while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
- } while (inb_status(dev->base_addr) & ACRF);
- set_hsf(dev, HSF_PCB_NAK);
- }
- outb_control(adapter->hcr_val | ATTN | DIR, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~ATTN, dev);
- mdelay(10);
- outb_control(adapter->hcr_val | FLSH, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~FLSH, dev);
- mdelay(10);
-
- outb_control(orig_hcr, dev);
- if (!start_receive(dev, &adapter->tx_pcb))
- pr_err("%s: start receive command failed\n", dev->name);
-}
-
-/* Check to make sure that a DMA transfer hasn't timed out. This should
- * never happen in theory, but seems to occur occasionally if the card gets
- * prodded at the wrong time.
- */
-static inline void check_3c505_dma(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
- unsigned long flags, f;
- pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name,
- adapter->current_dma.direction ? "download" : "upload",
- get_dma_residue(dev->dma));
- spin_lock_irqsave(&adapter->lock, flags);
- adapter->dmaing = 0;
- adapter->busy = 0;
-
- f=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(f);
-
- if (adapter->rx_active)
- adapter->rx_active--;
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- }
-}
-
-/* Primitive functions used by send_pcb() */
-static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte)
-{
- unsigned long timeout;
- outb_command(byte, base_addr);
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_slow timed out\n");
- return true;
-}
-
-static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
-{
- unsigned int timeout;
- outb_command(byte, base_addr);
- for (timeout = 0; timeout < 40000; timeout++) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_fast timed out\n");
- return true;
-}
-
-/* Check to see if the receiver needs restarting, and kick it if so */
-static inline void prime_rx(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
- if (!start_receive(dev, &adapter->itx_pcb))
- break;
- }
-}
-
-/*****************************************************************
- *
- * send_pcb
- * Send a PCB to the adapter.
- *
- * output byte to command reg --<--+
- * wait until HCRE is non zero |
- * loop until all bytes sent -->--+
- * set HSF1 and HSF2 to 1
- * output pcb length
- * wait until ASF give ACK or NAK
- * set HSF1 and HSF2 to 0
- *
- *****************************************************************/
-
-/* This can be quite slow -- the adapter is allowed to take up to 40ms
- * to respond to the initial interrupt.
- *
- * We run initially with interrupts turned on, but with a semaphore set
- * so that nobody tries to re-enter this code. Once the first byte has
- * gone through, we turn interrupts off and then send the others (the
- * timeout is reduced to 500us).
- */
-
-static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i;
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- check_3c505_dma(dev);
-
- if (adapter->dmaing && adapter->current_dma.direction == 0)
- return false;
-
- /* Avoid contention */
- if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
- if (elp_debug >= 3) {
- pr_debug("%s: send_pcb entered while threaded\n", dev->name);
- }
- return false;
- }
- /*
- * load each byte into the command register and
- * wait for the HCRE bit to indicate the adapter
- * had read the byte
- */
- set_hsf(dev, 0);
-
- if (send_pcb_slow(dev->base_addr, pcb->command))
- goto abort;
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (send_pcb_fast(dev->base_addr, pcb->length))
- goto sti_abort;
-
- for (i = 0; i < pcb->length; i++) {
- if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
- goto sti_abort;
- }
-
- outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */
- outb_command(2 + pcb->length, dev->base_addr);
-
- /* now wait for the acknowledgement */
- spin_unlock_irqrestore(&adapter->lock, flags);
-
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- switch (GET_ASF(dev->base_addr)) {
- case ASF_PCB_ACK:
- adapter->send_pcb_semaphore = 0;
- return true;
-
- case ASF_PCB_NAK:
-#ifdef ELP_DEBUG
- pr_debug("%s: send_pcb got NAK\n", dev->name);
-#endif
- goto abort;
- }
- }
-
- if (elp_debug >= 1)
- pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
- dev->name, inb_status(dev->base_addr));
- goto abort;
-
- sti_abort:
- spin_unlock_irqrestore(&adapter->lock, flags);
- abort:
- adapter->send_pcb_semaphore = 0;
- return false;
-}
-
-
-/*****************************************************************
- *
- * receive_pcb
- * Read a PCB from the adapter
- *
- * wait for ACRF to be non-zero ---<---+
- * input a byte |
- * if ASF1 and ASF2 were not both one |
- * before byte was read, loop --->---+
- * set HSF1 and HSF2 for ack
- *
- *****************************************************************/
-
-static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i, j;
- int total_length;
- int stat;
- unsigned long timeout;
- unsigned long flags;
-
- elp_device *adapter = netdev_priv(dev);
-
- set_hsf(dev, 0);
-
- /* get the command code */
- timeout = jiffies + 2*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- pcb->command = inb_command(dev->base_addr);
-
- /* read the data length */
- timeout = jiffies + 3*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- pr_info("%s: status %02x\n", dev->name, stat);
- return false;
- }
- pcb->length = inb_command(dev->base_addr);
-
- if (pcb->length > MAX_PCB_DATA) {
- INVALID_PCB_MSG(pcb->length);
- adapter_reset(dev);
- return false;
- }
- /* read the data */
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < MAX_PCB_DATA; i++) {
- for (j = 0; j < 20000; j++) {
- stat = get_status(dev->base_addr);
- if (stat & ACRF)
- break;
- }
- pcb->data.raw[i] = inb_command(dev->base_addr);
- if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
- break;
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- if (i >= MAX_PCB_DATA) {
- INVALID_PCB_MSG(i);
- return false;
- }
- if (j >= 20000) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- /* the last "data" byte was really the length! */
- total_length = pcb->data.raw[i];
-
- /* safety check total length vs data length */
- if (total_length != (pcb->length + 2)) {
- if (elp_debug >= 2)
- pr_warning("%s: mangled PCB received\n", dev->name);
- set_hsf(dev, HSF_PCB_NAK);
- return false;
- }
-
- if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
- set_hsf(dev, HSF_PCB_NAK);
- pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
- pcb->command = 0;
- return true;
- } else {
- pcb->command = 0xff;
- }
- }
- }
- set_hsf(dev, HSF_PCB_ACK);
- return true;
-}
-
-/******************************************************
- *
- * queue a receive command on the adapter so we will get an
- * interrupt when a packet is received.
- *
- ******************************************************/
-
-static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
-{
- bool status;
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: restarting receiver\n", dev->name);
- tx_pcb->command = CMD_RECEIVE_PACKET;
- tx_pcb->length = sizeof(struct Rcv_pkt);
- tx_pcb->data.rcv_pkt.buf_seg
- = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
- tx_pcb->data.rcv_pkt.buf_len = 1600;
- tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
- status = send_pcb(dev, tx_pcb);
- if (status)
- adapter->rx_active++;
- return status;
-}
-
-/******************************************************
- *
- * extract a packet from the adapter
- * this routine is only called from within the interrupt
- * service routine, so no cli/sti calls are needed
- * note that the length is always assumed to be even
- *
- ******************************************************/
-
-static void receive_packet(struct net_device *dev, int len)
-{
- int rlen;
- elp_device *adapter = netdev_priv(dev);
- void *target;
- struct sk_buff *skb;
- unsigned long flags;
-
- rlen = (len + 1) & ~1;
- skb = netdev_alloc_skb(dev, rlen + 2);
-
- if (!skb) {
- pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
- target = adapter->dma_buffer;
- adapter->current_dma.target = NULL;
- /* FIXME: stats */
- return;
- }
-
- skb_reserve(skb, 2);
- target = skb_put(skb, rlen);
- if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
- adapter->current_dma.target = target;
- target = adapter->dma_buffer;
- } else {
- adapter->current_dma.target = NULL;
- }
-
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_err("%s: rx blocked, DMA in progress, dir %d\n",
- dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 0;
- adapter->current_dma.length = rlen;
- adapter->current_dma.skb = skb;
- adapter->current_dma.start_time = jiffies;
-
- outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x04); /* dma read */
- set_dma_addr(dev->dma, isa_virt_to_bus(target));
- set_dma_count(dev->dma, rlen);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3) {
- pr_debug("%s: rx DMA transfer started\n", dev->name);
- }
-
- if (adapter->rx_active)
- adapter->rx_active--;
-
- if (!adapter->busy)
- pr_warning("%s: receive_packet called, busy not set.\n", dev->name);
-}
-
-/******************************************************
- *
- * interrupt handler
- *
- ******************************************************/
-
-static irqreturn_t elp_interrupt(int irq, void *dev_id)
-{
- int len;
- int dlen;
- int icount = 0;
- struct net_device *dev = dev_id;
- elp_device *adapter = netdev_priv(dev);
- unsigned long timeout;
-
- spin_lock(&adapter->lock);
-
- do {
- /*
- * has a DMA transfer finished?
- */
- if (inb_status(dev->base_addr) & DONE) {
- if (!adapter->dmaing)
- pr_warning("%s: phantom DMA completed\n", dev->name);
-
- if (elp_debug >= 3)
- pr_debug("%s: %s DMA complete, status %02x\n", dev->name,
- adapter->current_dma.direction ? "tx" : "rx",
- inb_status(dev->base_addr));
-
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- if (adapter->current_dma.direction) {
- dev_kfree_skb_irq(adapter->current_dma.skb);
- } else {
- struct sk_buff *skb = adapter->current_dma.skb;
- if (skb) {
- if (adapter->current_dma.target) {
- /* have already done the skb_put() */
- memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
- }
- skb->protocol = eth_type_trans(skb,dev);
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- }
- }
- adapter->dmaing = 0;
- if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
- int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
- adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
- if (elp_debug >= 2)
- pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t);
- receive_packet(dev, t);
- } else {
- adapter->busy = 0;
- }
- } else {
- /* has one timed out? */
- check_3c505_dma(dev);
- }
-
- /*
- * receive a PCB from the adapter
- */
- timeout = jiffies + 3*HZ/100;
- while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
- if (receive_pcb(dev, &adapter->irx_pcb)) {
- switch (adapter->irx_pcb.command)
- {
- case 0:
- break;
- /*
- * received a packet - this must be handled fast
- */
- case 0xff:
- case CMD_RECEIVE_PACKET_COMPLETE:
- /* if the device isn't open, don't pass packets up the stack */
- if (!netif_running(dev))
- break;
- len = adapter->irx_pcb.data.rcv_resp.pkt_len;
- dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
- if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
- pr_err("%s: interrupt - packet not received correctly\n", dev->name);
- } else {
- if (elp_debug >= 3) {
- pr_debug("%s: interrupt - packet received of length %i (%i)\n",
- dev->name, len, dlen);
- }
- if (adapter->irx_pcb.command == 0xff) {
- if (elp_debug >= 2)
- pr_debug("%s: adding packet to backlog (len = %d)\n",
- dev->name, dlen);
- adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
- adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
- } else {
- receive_packet(dev, dlen);
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet received\n", dev->name);
- }
- break;
-
- /*
- * 82586 configured correctly
- */
- case CMD_CONFIGURE_82586_RESPONSE:
- adapter->got[CMD_CONFIGURE_82586] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - configure response received\n", dev->name);
- break;
-
- /*
- * Adapter memory configuration
- */
- case CMD_CONFIGURE_ADAPTER_RESPONSE:
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Adapter memory configuration %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Multicast list loading
- */
- case CMD_LOAD_MULTICAST_RESPONSE:
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Multicast address list loading %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Station address setting
- */
- case CMD_SET_ADDRESS_RESPONSE:
- adapter->got[CMD_SET_STATION_ADDRESS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Ethernet address setting %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
-
- /*
- * received board statistics
- */
- case CMD_NETWORK_STATISTICS_RESPONSE:
- dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
- dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
- dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
- dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
- dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
- dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
- adapter->got[CMD_NETWORK_STATISTICS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - statistics response received\n", dev->name);
- break;
-
- /*
- * sent a packet
- */
- case CMD_TRANSMIT_PACKET_COMPLETE:
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - packet sent\n", dev->name);
- if (!netif_running(dev))
- break;
- switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
- case 0xffff:
- dev->stats.tx_aborted_errors++;
- pr_info("%s: transmit timed out, network cable problem?\n", dev->name);
- break;
- case 0xfffe:
- dev->stats.tx_fifo_errors++;
- pr_info("%s: transmit timed out, FIFO underrun\n", dev->name);
- break;
- }
- netif_wake_queue(dev);
- break;
-
- /*
- * some unknown PCB
- */
- default:
- pr_debug("%s: unknown PCB received - %2.2x\n",
- dev->name, adapter->irx_pcb.command);
- break;
- }
- } else {
- pr_warning("%s: failed to read PCB on interrupt\n", dev->name);
- adapter_reset(dev);
- }
- }
-
- } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
-
- prime_rx(dev);
-
- /*
- * indicate no longer in interrupt routine
- */
- spin_unlock(&adapter->lock);
- return IRQ_HANDLED;
-}
-
-
-/******************************************************
- *
- * open the board
- *
- ******************************************************/
-
-static int elp_open(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int retval;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to open device\n", dev->name);
-
- /*
- * make sure we actually found the device
- */
- if (adapter == NULL) {
- pr_err("%s: Opening a non-existent physical device\n", dev->name);
- return -EAGAIN;
- }
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * clear any pending interrupts
- */
- inb_command(dev->base_addr);
- adapter_reset(dev);
-
- /*
- * no receive PCBs active
- */
- adapter->rx_active = 0;
-
- adapter->busy = 0;
- adapter->send_pcb_semaphore = 0;
- adapter->rx_backlog.in = 0;
- adapter->rx_backlog.out = 0;
-
- spin_lock_init(&adapter->lock);
-
- /*
- * install our interrupt service routine
- */
- if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
- pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
- return retval;
- }
- if ((retval = request_dma(dev->dma, dev->name))) {
- free_irq(dev->irq, dev);
- pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
- return retval;
- }
- adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
- if (!adapter->dma_buffer) {
- pr_err("%s: could not allocate DMA buffer\n", dev->name);
- free_dma(dev->dma);
- free_irq(dev->irq, dev);
- return -ENOMEM;
- }
- adapter->dmaing = 0;
-
- /*
- * enable interrupts on the board
- */
- outb_control(CMDE, dev);
-
- /*
- * configure adapter memory: we need 10 multicast addresses, default==0
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 3c505 memory configuration command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.data.memconf.cmd_q = 10;
- adapter->tx_pcb.data.memconf.rcv_q = 20;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 20;
- adapter->tx_pcb.data.memconf.rcv_b = 20;
- adapter->tx_pcb.data.memconf.progs = 0;
- adapter->tx_pcb.length = sizeof(struct Memconf);
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send memory configuration command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
-
- /*
- * configure adapter to receive broadcast messages and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
- /* enable burst-mode DMA */
- /* outb(0x1, dev->base_addr + PORT_AUXDMA); */
-
- /*
- * queue receive commands to provide buffering
- */
- prime_rx(dev);
- if (elp_debug >= 3)
- pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
-
- /*
- * device is now officially open!
- */
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-/******************************************************
- *
- * send a packet to the adapter
- *
- ******************************************************/
-
-static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long target;
- unsigned long flags;
-
- /*
- * make sure the length is even and no shorter than 60 bytes
- */
- unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
-
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (elp_debug >= 2)
- pr_debug("%s: transmit blocked\n", dev->name);
- return false;
- }
-
- dev->stats.tx_bytes += nlen;
-
- /*
- * send the adapter a transmit packet command. Ignore segment and offset
- * and make sure the length is even
- */
- adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
- adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
- adapter->tx_pcb.data.xmit_pkt.buf_ofs
- = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
- adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
-
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- adapter->busy = 0;
- return false;
- }
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 1;
- adapter->current_dma.start_time = jiffies;
-
- if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
- skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
- memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
- target = isa_virt_to_bus(adapter->dma_buffer);
- }
- else {
- target = isa_virt_to_bus(skb->data);
- }
- adapter->current_dma.skb = skb;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x48); /* dma memory -> io */
- set_dma_addr(dev->dma, target);
- set_dma_count(dev->dma, nlen);
- outb_control(adapter->hcr_val | DMAE | TCEN, dev);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3)
- pr_debug("%s: DMA transfer started\n", dev->name);
-
- return true;
-}
-
-/*
- * The upper layer thinks we timed out
- */
-
-static void elp_timeout(struct net_device *dev)
-{
- int stat;
-
- stat = inb_status(dev->base_addr);
- pr_warning("%s: transmit timed out, lost %s?\n", dev->name,
- (stat & ACRF) ? "interrupt" : "command");
- if (elp_debug >= 1)
- pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies; /* prevent tx timeout */
- dev->stats.tx_dropped++;
- netif_wake_queue(dev);
-}
-
-/******************************************************
- *
- * start the transmitter
- * return 0 if sent OK, else return 1
- *
- ******************************************************/
-
-static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- elp_device *adapter = netdev_priv(dev);
-
- spin_lock_irqsave(&adapter->lock, flags);
- check_3c505_dma(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
-
- netif_stop_queue(dev);
-
- /*
- * send the packet at skb->data for skb->len
- */
- if (!send_packet(dev, skb)) {
- if (elp_debug >= 2) {
- pr_debug("%s: failed to transmit packet\n", dev->name);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- return NETDEV_TX_BUSY;
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
-
- prime_rx(dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- netif_start_queue(dev);
- return NETDEV_TX_OK;
-}
-
-/******************************************************
- *
- * return statistics on the board
- *
- ******************************************************/
-
-static struct net_device_stats *elp_get_stats(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request for stats\n", dev->name);
-
- /* If the device is closed, just return the latest stats we have,
- - we cannot ask from the adapter without interrupts */
- if (!netif_running(dev))
- return &dev->stats;
-
- /* send a get statistics command to the board */
- adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
- adapter->tx_pcb.length = 0;
- adapter->got[CMD_NETWORK_STATISTICS] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send get statistics command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return &dev->stats;
- }
- }
-
- /* statistics are now up to date */
- return &dev->stats;
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-/******************************************************
- *
- * close the board
- *
- ******************************************************/
-
-static int elp_close(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to close device\n", dev->name);
-
- netif_stop_queue(dev);
-
- /* Someone may request the device statistic information even when
- * the interface is closed. The following will update the statistics
- * structure in the driver, so we'll be able to give current statistics.
- */
- (void) elp_get_stats(dev);
-
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * release the IRQ
- */
- free_irq(dev->irq, dev);
-
- free_dma(dev->dma);
- free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE));
-
- return 0;
-}
-
-
-/************************************************************
- *
- * Set multicast list
- * num_addrs==0: clear mc_list
- * num_addrs==-1: set promiscuous mode
- * num_addrs>0: set mc_list
- *
- ************************************************************/
-
-static void elp_set_mc_list(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
- unsigned long flags;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to set multicast list\n", dev->name);
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- /* send a "load multicast list" command to the board, max 10 addrs/cmd */
- /* if num_addrs==0 the list will be cleared */
- adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++],
- ha->addr, 6);
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send set_multicast command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- }
- }
- if (!netdev_mc_empty(dev))
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
- else /* num_addrs == 0 */
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- } else
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
- /*
- * configure adapter to receive messages (as specified above)
- * and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- {
- spin_unlock_irqrestore(&adapter->lock, flags);
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- }
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- spin_unlock_irqrestore(&adapter->lock, flags);
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-}
-
-/************************************************************
- *
- * A couple of tests to see if there's 3C505 or not
- * Called only by elp_autodetect
- ************************************************************/
-
-static int __init elp_sense(struct net_device *dev)
-{
- int addr = dev->base_addr;
- const char *name = dev->name;
- byte orig_HSR;
-
- if (!request_region(addr, ELP_IO_EXTENT, "3c505"))
- return -ENODEV;
-
- orig_HSR = inb_status(addr);
-
- if (elp_debug > 0)
- pr_debug(search_msg, name, addr);
-
- if (orig_HSR == 0xff) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 1);
- goto out;
- }
-
- /* Wait for a while; the adapter may still be booting up */
- if (elp_debug > 0)
- pr_cont(stilllooking_msg);
-
- if (orig_HSR & DIR) {
- /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
- outb(0, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (inb_status(addr) & DIR) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 2);
- goto out;
- }
- } else {
- /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
- outb(DIR, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (!(inb_status(addr) & DIR)) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 3);
- goto out;
- }
- }
- /*
- * It certainly looks like a 3c505.
- */
- if (elp_debug > 0)
- pr_cont(found_msg);
-
- return 0;
-out:
- release_region(addr, ELP_IO_EXTENT);
- return -ENODEV;
-}
-
-/*************************************************************
- *
- * Search through addr_list[] and try to find a 3C505
- * Called only by eplus_probe
- *************************************************************/
-
-static int __init elp_autodetect(struct net_device *dev)
-{
- int idx = 0;
-
- /* if base address set, then only check that address
- otherwise, run through the table */
- if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- } else
- while ((dev->base_addr = addr_list[idx++])) {
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- }
-
- /* could not find an adapter */
- if (elp_debug > 0)
- pr_debug(couldnot_msg, dev->name);
-
- return 0; /* Because of this, the layer above will return -ENODEV */
-}
-
-static const struct net_device_ops elp_netdev_ops = {
- .ndo_open = elp_open,
- .ndo_stop = elp_close,
- .ndo_get_stats = elp_get_stats,
- .ndo_start_xmit = elp_start_xmit,
- .ndo_tx_timeout = elp_timeout,
- .ndo_set_rx_mode = elp_set_mc_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************
- *
- * probe for an Etherlink Plus board at the specified address
- *
- ******************************************************/
-
-/* There are three situations we need to be able to detect here:
-
- * a) the card is idle
- * b) the card is still booting up
- * c) the card is stuck in a strange state (some DOS drivers do this)
- *
- * In case (a), all is well. In case (b), we wait 10 seconds to see if the
- * card finishes booting, and carry on if so. In case (c), we do a hard reset,
- * loop round, and hope for the best.
- *
- * This is all very unpleasant, but hopefully avoids the problems with the old
- * probe code (which had a 15-second delay if the card was idle, and didn't
- * work at all if it was in a weird state).
- */
-
-static int __init elplus_setup(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int i, tries, tries1, okay;
- unsigned long timeout;
- unsigned long cookie = 0;
- int err = -ENODEV;
-
- /*
- * setup adapter structure
- */
-
- dev->base_addr = elp_autodetect(dev);
- if (!dev->base_addr)
- return -ENODEV;
-
- adapter->send_pcb_semaphore = 0;
-
- for (tries1 = 0; tries1 < 3; tries1++) {
- outb_control((adapter->hcr_val | CMDE) & ~DIR, dev);
- /* First try to write just one byte, to see if the card is
- * responding at all normally.
- */
- timeout = jiffies + 5*HZ/100;
- okay = 0;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if ((inb_status(dev->base_addr) & HCRE)) {
- outb_command(0, dev->base_addr); /* send a spurious byte */
- timeout = jiffies + 5*HZ/100;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if (inb_status(dev->base_addr) & HCRE)
- okay = 1;
- }
- if (!okay) {
- /* Nope, it's ignoring the command register. This means that
- * either it's still booting up, or it's died.
- */
- pr_err("%s: command register wouldn't drain, ", dev->name);
- if ((inb_status(dev->base_addr) & 7) == 3) {
- /* If the adapter status is 3, it *could* still be booting.
- * Give it the benefit of the doubt for 10 seconds.
- */
- pr_cont("assuming 3c505 still starting\n");
- timeout = jiffies + 10*HZ;
- while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
- if (inb_status(dev->base_addr) & 7) {
- pr_err("%s: 3c505 failed to start\n", dev->name);
- } else {
- okay = 1; /* It started */
- }
- } else {
- /* Otherwise, it must just be in a strange
- * state. We probably need to kick it.
- */
- pr_cont("3c505 is sulking\n");
- }
- }
- for (tries = 0; tries < 5 && okay; tries++) {
-
- /*
- * Try to set the Ethernet address, to make sure that the board
- * is working.
- */
- adapter->tx_pcb.command = CMD_STATION_ADDRESS;
- adapter->tx_pcb.length = 0;
- cookie = probe_irq_on();
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- pr_err("%s: could not send first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if (!receive_pcb(dev, &adapter->rx_pcb)) {
- pr_err("%s: could not read first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
- (adapter->rx_pcb.length != 6)) {
- pr_err("%s: first PCB wrong (%d, %d)\n", dev->name,
- adapter->rx_pcb.command, adapter->rx_pcb.length);
- probe_irq_off(cookie);
- continue;
- }
- goto okay;
- }
- /* It's broken. Do a hard reset to re-initialise the board,
- * and try again.
- */
- pr_info("%s: resetting adapter\n", dev->name);
- outb_control(adapter->hcr_val | FLSH | ATTN, dev);
- outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
- }
- pr_err("%s: failed to initialise 3c505\n", dev->name);
- goto out;
-
- okay:
- if (dev->irq) { /* Is there a preset IRQ? */
- int rpt = probe_irq_off(cookie);
- if (dev->irq != rpt) {
- pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
- }
- /* if dev->irq == probe_irq_off(cookie), all is well */
- } else /* No preset IRQ; just use what we can detect */
- dev->irq = probe_irq_off(cookie);
- switch (dev->irq) { /* Legal, sane? */
- case 0:
- pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n",
- dev->name);
- goto out;
- case 1:
- case 6:
- case 8:
- case 13:
- pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n",
- dev->name, dev->irq);
- goto out;
- }
- /*
- * Now we have the IRQ number so we can disable the interrupts from
- * the board until the board is opened.
- */
- outb_control(adapter->hcr_val & ~CMDE, dev);
-
- /*
- * copy Ethernet address into structure
- */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
-
- /* find a DMA channel */
- if (!dev->dma) {
- if (dev->mem_start) {
- dev->dma = dev->mem_start & 7;
- }
- else {
- pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name);
- dev->dma = ELP_DMA;
- }
- }
-
- /*
- * print remainder of startup message
- */
- pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ",
- dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr);
- /*
- * read more information from the adapter
- */
-
- adapter->tx_pcb.command = CMD_ADAPTER_INFO;
- adapter->tx_pcb.length = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
- (adapter->rx_pcb.length != 10)) {
- pr_cont("not responding to second PCB\n");
- }
- pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers,
- adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
-
- /*
- * reconfigure the adapter memory to better suit our purposes
- */
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.length = 12;
- adapter->tx_pcb.data.memconf.cmd_q = 8;
- adapter->tx_pcb.data.memconf.rcv_q = 8;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 10;
- adapter->tx_pcb.data.memconf.rcv_b = 10;
- adapter->tx_pcb.data.memconf.progs = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
- (adapter->rx_pcb.length != 2)) {
- pr_err("%s: could not configure adapter memory\n", dev->name);
- }
- if (adapter->rx_pcb.data.configure) {
- pr_err("%s: adapter configuration failed\n", dev->name);
- }
-
- dev->netdev_ops = &elp_netdev_ops;
- dev->watchdog_timeo = 10*HZ;
- dev->ethtool_ops = &netdev_ethtool_ops; /* local */
-
- dev->mem_start = dev->mem_end = 0;
-
- err = register_netdev(dev);
- if (err)
- goto out;
-
- return 0;
-out:
- release_region(dev->base_addr, ELP_IO_EXTENT);
- return err;
-}
-
-#ifndef MODULE
-struct net_device * __init elplus_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- int err;
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = elplus_setup(dev);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- return dev;
-}
-
-#else
-static struct net_device *dev_3c505[ELP_MAX_CARDS];
-static int io[ELP_MAX_CARDS];
-static int irq[ELP_MAX_CARDS];
-static int dma[ELP_MAX_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(dma, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
-MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- if (!dev)
- break;
-
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (dma[this_dev]) {
- dev->dma = dma[this_dev];
- } else {
- dev->dma = ELP_DMA;
- pr_warning("3c505.c: warning, using default DMA channel,\n");
- }
- if (io[this_dev] == 0) {
- if (this_dev) {
- free_netdev(dev);
- break;
- }
- pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n");
- }
- if (elplus_setup(dev) != 0) {
- pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- dev_3c505[this_dev] = dev;
- found++;
- }
- if (!found)
- return -ENODEV;
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_3c505[this_dev];
- if (dev) {
- unregister_netdev(dev);
- release_region(dev->base_addr, ELP_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h
deleted file mode 100644
index 04df2a9002b6..000000000000
--- a/drivers/net/ethernet/i825xx/3c505.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/*****************************************************************
- *
- * defines for 3Com Etherlink Plus adapter
- *
- *****************************************************************/
-
-#define ELP_DMA 6
-#define ELP_RX_PCBS 4
-#define ELP_MAX_CARDS 4
-
-/*
- * I/O register offsets
- */
-#define PORT_COMMAND 0x00 /* read/write, 8-bit */
-#define PORT_STATUS 0x02 /* read only, 8-bit */
-#define PORT_AUXDMA 0x02 /* write only, 8-bit */
-#define PORT_DATA 0x04 /* read/write, 16-bit */
-#define PORT_CONTROL 0x06 /* read/write, 8-bit */
-
-#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
-
-/*
- * host control registers bits
- */
-#define ATTN 0x80 /* attention */
-#define FLSH 0x40 /* flush data register */
-#define DMAE 0x20 /* DMA enable */
-#define DIR 0x10 /* direction */
-#define TCEN 0x08 /* terminal count interrupt enable */
-#define CMDE 0x04 /* command register interrupt enable */
-#define HSF2 0x02 /* host status flag 2 */
-#define HSF1 0x01 /* host status flag 1 */
-
-/*
- * combinations of HSF flags used for PCB transmission
- */
-#define HSF_PCB_ACK HSF1
-#define HSF_PCB_NAK HSF2
-#define HSF_PCB_END (HSF2|HSF1)
-#define HSF_PCB_MASK (HSF2|HSF1)
-
-/*
- * host status register bits
- */
-#define HRDY 0x80 /* data register ready */
-#define HCRE 0x40 /* command register empty */
-#define ACRF 0x20 /* adapter command register full */
-/* #define DIR 0x10 direction - same as in control register */
-#define DONE 0x08 /* DMA done */
-#define ASF3 0x04 /* adapter status flag 3 */
-#define ASF2 0x02 /* adapter status flag 2 */
-#define ASF1 0x01 /* adapter status flag 1 */
-
-/*
- * combinations of ASF flags used for PCB reception
- */
-#define ASF_PCB_ACK ASF1
-#define ASF_PCB_NAK ASF2
-#define ASF_PCB_END (ASF2|ASF1)
-#define ASF_PCB_MASK (ASF2|ASF1)
-
-/*
- * host aux DMA register bits
- */
-#define DMA_BRST 0x01 /* DMA burst */
-
-/*
- * maximum amount of data allowed in a PCB
- */
-#define MAX_PCB_DATA 62
-
-/*****************************************************************
- *
- * timeout value
- * this is a rough value used for loops to stop them from
- * locking up the whole machine in the case of failure or
- * error conditions
- *
- *****************************************************************/
-
-#define TIMEOUT 300
-
-/*****************************************************************
- *
- * PCB commands
- *
- *****************************************************************/
-
-enum {
- /*
- * host PCB commands
- */
- CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
- CMD_CONFIGURE_82586 = 0x02,
- CMD_STATION_ADDRESS = 0x03,
- CMD_DMA_DOWNLOAD = 0x04,
- CMD_DMA_UPLOAD = 0x05,
- CMD_PIO_DOWNLOAD = 0x06,
- CMD_PIO_UPLOAD = 0x07,
- CMD_RECEIVE_PACKET = 0x08,
- CMD_TRANSMIT_PACKET = 0x09,
- CMD_NETWORK_STATISTICS = 0x0a,
- CMD_LOAD_MULTICAST_LIST = 0x0b,
- CMD_CLEAR_PROGRAM = 0x0c,
- CMD_DOWNLOAD_PROGRAM = 0x0d,
- CMD_EXECUTE_PROGRAM = 0x0e,
- CMD_SELF_TEST = 0x0f,
- CMD_SET_STATION_ADDRESS = 0x10,
- CMD_ADAPTER_INFO = 0x11,
- NUM_TRANSMIT_CMDS,
-
- /*
- * adapter PCB commands
- */
- CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
- CMD_CONFIGURE_82586_RESPONSE = 0x32,
- CMD_ADDRESS_RESPONSE = 0x33,
- CMD_DOWNLOAD_DATA_REQUEST = 0x34,
- CMD_UPLOAD_DATA_REQUEST = 0x35,
- CMD_RECEIVE_PACKET_COMPLETE = 0x38,
- CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
- CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
- CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
- CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
- CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
- CMD_EXECUTE_RESPONSE = 0x3e,
- CMD_SELF_TEST_RESPONSE = 0x3f,
- CMD_SET_ADDRESS_RESPONSE = 0x40,
- CMD_ADAPTER_INFO_RESPONSE = 0x41
-};
-
-/* Definitions for the PCB data structure */
-
-/* Data units */
-typedef unsigned char byte;
-typedef unsigned short int word;
-typedef unsigned long int dword;
-
-/* Data structures */
-struct Memconf {
- word cmd_q,
- rcv_q,
- mcast,
- frame,
- rcv_b,
- progs;
-};
-
-struct Rcv_pkt {
- word buf_ofs,
- buf_seg,
- buf_len,
- timeout;
-};
-
-struct Xmit_pkt {
- word buf_ofs,
- buf_seg,
- pkt_len;
-};
-
-struct Rcv_resp {
- word buf_ofs,
- buf_seg,
- buf_len,
- pkt_len,
- timeout,
- status;
- dword timetag;
-};
-
-struct Xmit_resp {
- word buf_ofs,
- buf_seg,
- c_stat,
- status;
-};
-
-
-struct Netstat {
- dword tot_recv,
- tot_xmit;
- word err_CRC,
- err_align,
- err_res,
- err_ovrrun;
-};
-
-
-struct Selftest {
- word error;
- union {
- word ROM_cksum;
- struct {
- word ofs, seg;
- } RAM;
- word i82586;
- } failure;
-};
-
-struct Info {
- byte minor_vers,
- major_vers;
- word ROM_cksum,
- RAM_sz,
- free_ofs,
- free_seg;
-};
-
-struct Memdump {
- word size,
- off,
- seg;
-};
-
-/*
-Primary Command Block. The most important data structure. All communication
-between the host and the adapter is done with these. (Except for the actual
-Ethernet data, which has different packaging.)
-*/
-typedef struct {
- byte command;
- byte length;
- union {
- struct Memconf memconf;
- word configure;
- struct Rcv_pkt rcv_pkt;
- struct Xmit_pkt xmit_pkt;
- byte multicast[10][6];
- byte eth_addr[6];
- byte failed;
- struct Rcv_resp rcv_resp;
- struct Xmit_resp xmit_resp;
- struct Netstat netstat;
- struct Selftest selftest;
- struct Info info;
- struct Memdump memdump;
- byte raw[62];
- } data;
-} pcb_struct;
-
-/* These defines for 'configure' */
-#define RECV_STATION 0x00
-#define RECV_BROAD 0x01
-#define RECV_MULTI 0x02
-#define RECV_PROMISC 0x04
-#define NO_LOOPBACK 0x00
-#define INT_LOOPBACK 0x08
-#define EXT_LOOPBACK 0x10
-
-/*****************************************************************
- *
- * structure to hold context information for adapter
- *
- *****************************************************************/
-
-#define DMA_BUFFER_SIZE 1600
-#define BACKLOG_SIZE 4
-
-typedef struct {
- volatile short got[NUM_TRANSMIT_CMDS]; /* flags for
- command completion */
- pcb_struct tx_pcb; /* PCB for foreground sending */
- pcb_struct rx_pcb; /* PCB for foreground receiving */
- pcb_struct itx_pcb; /* PCB for background sending */
- pcb_struct irx_pcb; /* PCB for background receiving */
-
- void *dma_buffer;
-
- struct {
- unsigned int length[BACKLOG_SIZE];
- unsigned int in;
- unsigned int out;
- } rx_backlog;
-
- struct {
- unsigned int direction;
- unsigned int length;
- struct sk_buff *skb;
- void *target;
- unsigned long start_time;
- } current_dma;
-
- /* flags */
- unsigned long send_pcb_semaphore;
- unsigned long dmaing;
- unsigned long busy;
-
- unsigned int rx_active; /* number of receive PCBs */
- volatile unsigned char hcr_val; /* what we think the HCR contains */
- spinlock_t lock; /* Interrupt v tx lock */
-} elp_device;
diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
deleted file mode 100644
index e8984b059905..000000000000
--- a/drivers/net/ethernet/i825xx/3c507.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/* 3c507.c: An EtherLink16 device driver for Linux. */
-/*
- Written 1993,1994 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
- and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
- Mark Salazar <leslie@access.digex.net> made the changes for cards with
- only 16K packet buffers.
-
- Things remaining to do:
- Verify that the tx and rx buffers don't have fencepost errors.
- Move the theory of operation and memory map documentation.
- The statistics need to be updated correctly.
-*/
-
-#define DRV_NAME "3c507"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-/*
- Sources:
- This driver wouldn't have been written with the availability of the
- Crynwr driver source code. It provided a known-working implementation
- that filled in the gaping holes of the Intel documentation. Three cheers
- for Russ Nelson.
-
- Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
- info that the casual reader might think that it documents the i82586 :-<.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-/* use 0 for production, 1 for verification, 2..7 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-#define debug net_debug
-
-
-/*
- Details of the i82586.
-
- You'll really need the databook to understand the details of this part,
- but the outline is that the i82586 has two separate processing units.
- Both are started from a list of three configuration tables, of which only
- the last, the System Control Block (SCB), is used after reset-time. The SCB
- has the following fields:
- Status word
- Command word
- Tx/Command block addr.
- Rx block addr.
- The command word accepts the following controls for the Tx and Rx units:
- */
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-
-/* The Rx unit uses a list of frame descriptors and a list of data buffer
- descriptors. We use full-sized (1518 byte) data buffers, so there is
- a one-to-one pairing of frame descriptors to buffer descriptors.
-
- The Tx ("command") unit executes a list of commands that look like:
- Status word Written by the 82586 when the command is done.
- Command word Command in lower 3 bits, post-command action in upper 3
- Link word The address of the next command.
- Parameters (as needed).
-
- Some definitions related to the Command Word are:
- */
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-enum commands {
- CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
- CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
-
-/* Information that need to be kept for each board. */
-struct net_local {
- int last_restart;
- ushort rx_head;
- ushort rx_tail;
- ushort tx_head;
- ushort tx_cmd_link;
- ushort tx_reap;
- ushort tx_pkts_in_ring;
- spinlock_t lock;
- void __iomem *base;
-};
-
-/*
- Details of the EtherLink16 Implementation
- The 3c507 is a generic shared-memory i82586 implementation.
- The host can map 16K, 32K, 48K, or 64K of the 64K memory into
- 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
- */
-
-/* Offsets from the base I/O address. */
-#define SA_DATA 0 /* Station address data, or 3Com signature. */
-#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
-#define RESET_IRQ 10 /* Reset the latched IRQ line. */
-#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
-#define ROM_CONFIG 13
-#define MEM_CONFIG 14
-#define IRQ_CONFIG 15
-#define EL16_IO_EXTENT 16
-
-/* The ID port is used at boot-time to locate the ethercard. */
-#define ID_PORT 0x100
-
-/* Offsets to registers in the mailbox (SCB). */
-#define iSCB_STATUS 0x8
-#define iSCB_CMD 0xA
-#define iSCB_CBL 0xC /* Command BLock offset. */
-#define iSCB_RFA 0xE /* Rx Frame Area offset. */
-
-/* Since the 3c507 maps the shared memory window so that the last byte is
- at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
- 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
- We can account for this be setting the 'SBC Base' entry in the ISCP table
- below for all the 16 bit offset addresses, and also adding the 'SCB Base'
- value to all 24 bit physical addresses (in the SCP table and the TX and RX
- Buffer Descriptors).
- -Mark
- */
-#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
-
-/*
- What follows in 'init_words[]' is the "program" that is downloaded to the
- 82586 memory. It's mostly tables and command blocks, and starts at the
- reset address 0xfffff6. This is designed to be similar to the EtherExpress,
- thus the unusual location of the SCB at 0x0008.
-
- Even with the additional "don't care" values, doing it this way takes less
- program space than initializing the individual tables, and I feel it's much
- cleaner.
-
- The databook is particularly useless for the first two structures, I had
- to use the Crynwr driver as an example.
-
- The memory setup is as follows:
- */
-
-#define CONFIG_CMD 0x0018
-#define SET_SA_CMD 0x0024
-#define SA_OFFSET 0x002A
-#define IDLELOOP 0x30
-#define TDR_CMD 0x38
-#define TDR_TIME 0x3C
-#define DUMP_CMD 0x40
-#define DIAG_CMD 0x48
-#define SET_MC_CMD 0x4E
-#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
-
-#define TX_BUF_START 0x0100
-#define NUM_TX_BUFS 5
-#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
-
-#define RX_BUF_START 0x2000
-#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
-#define RX_BUF_END (dev->mem_end - dev->mem_start)
-
-#define TX_TIMEOUT (HZ/20)
-
-/*
- That's it: only 86 bytes to set up the beast, including every extra
- command available. The 170 byte buffer at DUMP_DATA is shared between the
- Dump command (called only by the diagnostic program) and the SetMulticastList
- command.
-
- To complete the memory setup you only have to write the station address at
- SA_OFFSET and create the Tx & Rx buffer lists.
-
- The Tx command chain and buffer list is setup as follows:
- A Tx command table, with the data buffer pointing to...
- A Tx data buffer descriptor. The packet is in a single buffer, rather than
- chaining together several smaller buffers.
- A NoOp command, which initially points to itself,
- And the packet data.
-
- A transmit is done by filling in the Tx command table and data buffer,
- re-writing the NoOp command, and finally changing the offset of the last
- command to point to the current Tx command. When the Tx command is finished,
- it jumps to the NoOp, when it loops until the next Tx command changes the
- "link offset" in the NoOp. This way the 82586 never has to go through the
- slow restart sequence.
-
- The Rx buffer list is set up in the obvious ring structure. We have enough
- memory (and low enough interrupt latency) that we can avoid the complicated
- Rx buffer linked lists by alway associating a full-size Rx data buffer with
- each Rx data frame.
-
- I current use four transmit buffers starting at TX_BUF_START (0x0100), and
- use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
-
- */
-
-static unsigned short init_words[] = {
- /* System Configuration Pointer (SCP). */
- 0x0000, /* Set bus size to 16 bits. */
- 0,0, /* pad words. */
- 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
-
- /* Intermediate System Configuration Pointer (ISCP). */
- 0x0001, /* Status word that's cleared when init is done. */
- 0x0008,0,0, /* SCB offset, (skip, skip) */
-
- /* System Control Block (SCB). */
- 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
- CONFIG_CMD, /* Command list pointer, points to Configure. */
- RX_BUF_START, /* Rx block list. */
- 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
-
- /* 0x0018: Configure command. Change to put MAC data with packet. */
- 0, CmdConfigure, /* Status, command. */
- SET_SA_CMD, /* Next command is Set Station Addr. */
- 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
- 0x2e40, /* Magic values, including MAC data location. */
- 0, /* Unused pad word. */
-
- /* 0x0024: Setup station address command. */
- 0, CmdSASetup,
- SET_MC_CMD, /* Next command. */
- 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
-
- /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
- 0, CmdNOp, IDLELOOP, 0 /* pad */,
-
- /* 0x0038: A unused Time-Domain Reflectometer command. */
- 0, CmdTDR, IDLELOOP, 0,
-
- /* 0x0040: An unused Dump State command. */
- 0, CmdDump, IDLELOOP, DUMP_DATA,
-
- /* 0x0048: An unused Diagnose command. */
- 0, CmdDiagnose, IDLELOOP,
-
- /* 0x004E: An empty set-multicast-list command. */
- 0, CmdMulticastList, IDLELOOP, 0,
-};
-
-/* Index to functions, as function prototypes. */
-
-static int el16_probe1(struct net_device *dev, int ioaddr);
-static int el16_open(struct net_device *dev);
-static netdev_tx_t el16_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t el16_interrupt(int irq, void *dev_id);
-static void el16_rx(struct net_device *dev);
-static int el16_close(struct net_device *dev);
-static void el16_tx_timeout (struct net_device *dev);
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad);
-static void init_82586_mem(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-static void init_rx_bufs(struct net_device *);
-
-static int io = 0x300;
-static int irq;
-static int mem_start;
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, (detachable devices only) allocate space for the
- device and return success.
- */
-
-struct net_device * __init el16_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
- const unsigned *port;
- int err = -ENODEV;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 15;
- }
-
- if (io > 0x1ff) /* Check a single specified location. */
- err = el16_probe1(dev, io);
- else if (io != 0)
- err = -ENXIO; /* Don't probe at all. */
- else {
- for (port = ports; *port; port++) {
- err = el16_probe1(dev, *port);
- if (!err)
- break;
- }
- }
-
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = el16_open,
- .ndo_stop = el16_close,
- .ndo_start_xmit = el16_send_packet,
- .ndo_tx_timeout = el16_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init el16_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned char init_ID_done;
- int i, irq, irqval, retval;
- struct net_local *lp;
-
- if (init_ID_done == 0) {
- ushort lrs_state = 0xff;
- /* Send the ID sequence to the ID_PORT to enable the board(s). */
- outb(0x00, ID_PORT);
- for(i = 0; i < 255; i++) {
- outb(lrs_state, ID_PORT);
- lrs_state <<= 1;
- if (lrs_state & 0x100)
- lrs_state ^= 0xe7;
- }
- outb(0x00, ID_PORT);
- init_ID_done = 1;
- }
-
- if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') ||
- (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) {
- retval = -ENODEV;
- goto out;
- }
-
- pr_info("%s: 3c507 at %#x,", dev->name, ioaddr);
-
- /* We should make a few more checks here, like the first three octets of
- the S.A. for the manufacturer's code. */
-
- irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
- if (irqval) {
- pr_cont("\n");
- pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
-
- /* We've committed to using the board, and can start filling in *dev. */
- dev->base_addr = ioaddr;
-
- outb(0x01, ioaddr + MISC_CTRL);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont(" %pM", dev->dev_addr);
-
- if (mem_start)
- net_debug = mem_start & 7;
-
-#ifdef MEM_BASE
- dev->mem_start = MEM_BASE;
- dev->mem_end = dev->mem_start + 0x10000;
-#else
- {
- int base;
- int size;
- char mem_config = inb(ioaddr + MEM_CONFIG);
- if (mem_config & 0x20) {
- size = 64*1024;
- base = 0xf00000 + (mem_config & 0x08 ? 0x080000
- : ((mem_config & 3) << 17));
- } else {
- size = ((mem_config & 3) + 1) << 14;
- base = 0x0c0000 + ( (mem_config & 0x18) << 12);
- }
- dev->mem_start = base;
- dev->mem_end = base + size;
- }
-#endif
-
- dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
- dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
- dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
-
- if (net_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->lock);
- lp->base = ioremap(dev->mem_start, RX_BUF_END);
- if (!lp->base) {
- pr_err("3c507: unable to remap memory\n");
- retval = -EAGAIN;
- goto out1;
- }
-
- dev->netdev_ops = &netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, EL16_IO_EXTENT);
- return retval;
-}
-
-static int el16_open(struct net_device *dev)
-{
- /* Initialize the 82586 memory and start it. */
- init_82586_mem(dev);
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-static void el16_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- if (net_debug > 1)
- pr_debug("%s: transmit timed out, %s? ", dev->name,
- readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
- "network cable problem");
- /* Try to restart the adaptor. */
- if (lp->last_restart == dev->stats.tx_packets) {
- if (net_debug > 1)
- pr_cont("Resetting board.\n");
- /* Completely reset the adaptor. */
- init_82586_mem (dev);
- lp->tx_pkts_in_ring = 0;
- } else {
- /* Issue the channel attention signal and hope it "gets better". */
- if (net_debug > 1)
- pr_cont("Kicking board.\n");
- writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
- outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
- lp->last_restart = dev->stats.tx_packets;
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
-}
-
-
-static netdev_tx_t el16_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char *buf = skb->data;
-
- netif_stop_queue (dev);
-
- spin_lock_irqsave (&lp->lock, flags);
-
- dev->stats.tx_bytes += length;
- /* Disable the 82586's input to the interrupt line. */
- outb (0x80, ioaddr + MISC_CTRL);
-
- hardware_send_packet (dev, buf, skb->len, length - skb->len);
-
- /* Enable the 82586 interrupt input. */
- outb (0x84, ioaddr + MISC_CTRL);
-
- spin_unlock_irqrestore (&lp->lock, flags);
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t el16_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- ushort ack_cmd = 0;
- void __iomem *shmem;
-
- if (dev == NULL) {
- pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
- shmem = lp->base;
-
- spin_lock(&lp->lock);
-
- status = readw(shmem+iSCB_STATUS);
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
- }
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* Reap the Tx packet buffers. */
- while (lp->tx_pkts_in_ring) {
- unsigned short tx_status = readw(shmem+lp->tx_reap);
- if (!(tx_status & 0x8000)) {
- if (net_debug > 5)
- pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap);
- break;
- }
- /* Tx unsuccessful or some interesting status bit set. */
- if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) {
- dev->stats.tx_errors++;
- if (tx_status & 0x0600) dev->stats.tx_carrier_errors++;
- if (tx_status & 0x0100) dev->stats.tx_fifo_errors++;
- if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++;
- if (tx_status & 0x0020) dev->stats.tx_aborted_errors++;
- dev->stats.collisions += tx_status & 0xf;
- }
- dev->stats.tx_packets++;
- if (net_debug > 5)
- pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
- lp->tx_reap += TX_BUF_SIZE;
- if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_reap = TX_BUF_START;
-
- lp->tx_pkts_in_ring--;
- /* There is always more space in the Tx ring buffer now. */
- netif_wake_queue(dev);
-
- if (++boguscount > 10)
- break;
- }
-
- if (status & 0x4000) { /* Packet received. */
- if (net_debug > 5)
- pr_debug("Received packet, rx_head %04x.\n", lp->rx_head);
- el16_rx(dev);
- }
-
- /* Acknowledge the interrupt sources. */
- ack_cmd = status & 0xf000;
-
- if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
- if (net_debug)
- pr_debug("%s: Command unit stopped, status %04x, restarting.\n",
- dev->name, status);
- /* If this ever occurs we should really re-write the idle loop, reset
- the Tx list, and do a complete restart of the command unit.
- For now we rely on the Tx timeout if the resume doesn't work. */
- ack_cmd |= CUC_RESUME;
- }
-
- if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
- /* The Rx unit is not ready, it must be hung. Restart the receiver by
- initializing the rx buffers, and issuing an Rx start command. */
- if (net_debug)
- pr_debug("%s: Rx unit stopped, status %04x, restarting.\n",
- dev->name, status);
- init_rx_bufs(dev);
- writew(RX_BUF_START,shmem+iSCB_RFA);
- ack_cmd |= RX_START;
- }
-
- writew(ack_cmd,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
-
- /* Clear the latched interrupt. */
- outb(0, ioaddr + RESET_IRQ);
-
- /* Enable the 82586's interrupt input. */
- outb(0x84, ioaddr + MISC_CTRL);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-static int el16_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx. */
- writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA);
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* We always physically use the IRQ line, so we don't do free_irq(). */
-
- /* Update the statistics here. */
-
- return 0;
-}
-
-/* Initialize the Rx-block list. */
-static void init_rx_bufs(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *write_ptr;
- unsigned short SCB_base = SCB_BASE;
-
- int cur_rxbuf = lp->rx_head = RX_BUF_START;
-
- /* Initialize each Rx frame + data buffer. */
- do { /* While there is room for one more. */
-
- write_ptr = lp->base + cur_rxbuf;
-
- writew(0x0000,write_ptr); /* Status */
- writew(0x0000,write_ptr+=2); /* Command */
- writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */
- writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */
- writew(0x0000,write_ptr+=2); /* Pad for dest addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for source addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for protocol. */
-
- writew(0x0000,write_ptr+=2); /* Buffer: Actual count */
- writew(-1,write_ptr+=2); /* Buffer: Next (none). */
- writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */
- writew(0x0000,write_ptr+=2);
- /* Finally, the number of bytes in the buffer. */
- writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2);
-
- lp->rx_tail = cur_rxbuf;
- cur_rxbuf += RX_BUF_SIZE;
- } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
-
- /* Terminate the list by setting the EOL bit, and wrap the pointer to make
- the list a ring. */
- write_ptr = lp->base + lp->rx_tail + 2;
- writew(0xC000,write_ptr); /* Command, mark as last. */
- writew(lp->rx_head,write_ptr+2); /* Link */
-}
-
-static void init_82586_mem(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- /* Enable loopback to protect the wire while starting up,
- and hold the 586 in reset during the memory initialization. */
- outb(0x20, ioaddr + MISC_CTRL);
-
- /* Fix the ISCP address and base. */
- init_words[3] = SCB_BASE;
- init_words[7] = SCB_BASE;
-
- /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
- memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10);
-
- /* Write the words at 0x0000. */
- memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
-
- /* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
-
- /* The Tx-block list is written as needed. We just set up the values. */
- lp->tx_cmd_link = IDLELOOP + 4;
- lp->tx_head = lp->tx_reap = TX_BUF_START;
-
- init_rx_bufs(dev);
-
- /* Start the 586 by releasing the reset line, but leave loopback. */
- outb(0xA0, ioaddr + MISC_CTRL);
-
- /* This was time consuming to track down: you need to give two channel
- attention signals to reliably start up the i82586. */
- outb(0, ioaddr + SIGNAL_CA);
-
- {
- int boguscnt = 50;
- while (readw(shmem+iSCB_STATUS) == 0)
- if (--boguscnt == 0) {
- pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
- dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
- break;
- }
- /* Issue channel-attn -- the 82586 won't start. */
- outb(0, ioaddr + SIGNAL_CA);
- }
-
- /* Disable loopback and enable interrupts. */
- outb(0x84, ioaddr + MISC_CTRL);
- if (net_debug > 4)
- pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
- readw(shmem+iSCB_STATUS));
-}
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- ushort tx_block = lp->tx_head;
- void __iomem *write_ptr = lp->base + tx_block;
- static char padding[ETH_ZLEN];
-
- /* Set the write pointer to the Tx block, and put out the header. */
- writew(0x0000,write_ptr); /* Tx status */
- writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */
- writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
-
- /* Output the data buffer descriptor. */
- writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
- writew(-1,write_ptr+=2); /* No next data buffer. */
- writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
- writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
-
- /* Output the Loop-back NoOp command. */
- writew(0x0000,write_ptr+=2); /* Tx status */
- writew(CmdNOp,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next is myself. */
-
- /* Output the packet at the write pointer. */
- memcpy_toio(write_ptr+2, buf, length);
- if (pad)
- memcpy_toio(write_ptr+length+2, padding, pad);
-
- /* Set the old command link pointing to this send packet. */
- writew(tx_block,lp->base + lp->tx_cmd_link);
- lp->tx_cmd_link = tx_block + 20;
-
- /* Set the next free tx region. */
- lp->tx_head = tx_block + TX_BUF_SIZE;
- if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_head = TX_BUF_START;
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
- dev->name, ioaddr, length, tx_block, lp->tx_head);
- }
-
- /* Grimly block further packets if there has been insufficient reaping. */
- if (++lp->tx_pkts_in_ring < NUM_TX_BUFS)
- netif_wake_queue(dev);
-}
-
-static void el16_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *shmem = lp->base;
- ushort rx_head = lp->rx_head;
- ushort rx_tail = lp->rx_tail;
- ushort boguscount = 10;
- short frame_status;
-
- while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */
- void __iomem *read_frame = lp->base + rx_head;
- ushort rfd_cmd = readw(read_frame+2);
- ushort next_rx_frame = readw(read_frame+4);
- ushort data_buffer_addr = readw(read_frame+6);
- void __iomem *data_frame = lp->base + data_buffer_addr;
- ushort pkt_len = readw(data_frame);
-
- if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 ||
- (pkt_len & 0xC000) != 0xC000) {
- pr_err("%s: Rx frame at %#x corrupted, "
- "status %04x cmd %04x next %04x "
- "data-buf @%04x %04x.\n",
- dev->name, rx_head, frame_status, rfd_cmd,
- next_rx_frame, data_buffer_addr, pkt_len);
- } else if ((frame_status & 0x2000) == 0) {
- /* Frame Rxed, but with error. */
- dev->stats.rx_errors++;
- if (frame_status & 0x0800) dev->stats.rx_crc_errors++;
- if (frame_status & 0x0400) dev->stats.rx_frame_errors++;
- if (frame_status & 0x0200) dev->stats.rx_fifo_errors++;
- if (frame_status & 0x0100) dev->stats.rx_over_errors++;
- if (frame_status & 0x0080) dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
- pr_err("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /* 'skb->data' points to the start of sk_buff data area. */
- memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- /* Clear the status word and set End-of-List on the rx frame. */
- writew(0,read_frame);
- writew(0xC000,read_frame+2);
- /* Clear the end-of-list on the prev. RFD. */
- writew(0x0000,lp->base + rx_tail + 2);
-
- rx_tail = rx_head;
- rx_head = next_rx_frame;
- if (--boguscount == 0)
- break;
- }
-
- lp->rx_head = rx_head;
- lp->rx_tail = rx_tail;
-}
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-static struct net_device *dev_3c507;
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
-MODULE_PARM_DESC(irq, "(ignored)");
-
-int __init init_module(void)
-{
- if (io == 0)
- pr_notice("3c507: You should not use auto-probing with insmod!\n");
- dev_3c507 = el16_probe(-1);
- return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
-}
-
-void __exit
-cleanup_module(void)
-{
- struct net_device *dev = dev_3c507;
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
- free_netdev(dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 6aa927af382c..1c54e229e3cc 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -95,9 +95,6 @@ static char version[] __initdata =
#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
#define ENABLE_BVME6000_NET
#endif
-#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
-#define ENABLE_APRICOT
-#endif
#ifdef ENABLE_MVME16x_NET
#include <asm/mvme16xhw.h>
@@ -120,8 +117,15 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define ISCP_BUSY 0x00010000
-#define MACH_IS_APRICOT 0
#else
+#error 82596.c: unknown architecture
+#endif
+
+/*
+ * These were the intel versions, left here for reference. There
+ * are currently no x86 users of this legacy i82596 chip.
+ */
+#if 0
#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
@@ -130,7 +134,6 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
#define WSWAPchar(x) ((char *)((long)x))
#define ISCP_BUSY 0x0001
-#define MACH_IS_APRICOT 1
#endif
/*
@@ -383,11 +386,6 @@ static inline void CA(struct net_device *dev)
i = *(volatile u32 *) (dev->base_addr);
}
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT) {
- outw(0, (short) (dev->base_addr) + 4);
- }
-#endif
}
@@ -617,9 +615,6 @@ static void rebuild_rx_bufs(struct net_device *dev)
static int init_i596_mem(struct net_device *dev)
{
struct i596_private *lp = dev->ml_priv;
-#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
- short ioaddr = dev->base_addr;
-#endif
unsigned long flags;
MPU_PORT(dev, PORT_RESET, NULL);
@@ -653,18 +648,6 @@ static int init_i596_mem(struct net_device *dev)
MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
-#elif defined(ENABLE_APRICOT)
-
- {
- u32 scp = virt_to_bus(&lp->scp);
-
- /* change the scp address */
- outw(0, ioaddr);
- outw(0, ioaddr);
- outb(4, ioaddr + 0xf);
- outw(scp | 2, ioaddr);
- outw(scp >> 16, ioaddr);
- }
#endif
lp->last_cmd = jiffies;
@@ -677,10 +660,6 @@ static int init_i596_mem(struct net_device *dev)
if (MACH_IS_BVME6000)
lp->scp.sysbus = 0x0000004c;
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT)
- lp->scp.sysbus = 0x00440000;
-#endif
lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
@@ -698,10 +677,6 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
-#if defined(ENABLE_APRICOT)
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
if (wait_istat(dev,lp,1000,"initialization timed out"))
@@ -1203,43 +1178,6 @@ struct net_device * __init i82596_probe(int unit)
goto found;
}
#endif
-#ifdef ENABLE_APRICOT
- {
- int checksum = 0;
- int ioaddr = 0x300;
-
- /* this is easy the ethernet interface can only be at 0x300 */
- /* first check nothing is already registered here */
-
- if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
- err = -EBUSY;
- goto out;
- }
-
- dev->base_addr = ioaddr;
-
- for (i = 0; i < 8; i++) {
- eth_addr[i] = inb(ioaddr + 8 + i);
- checksum += eth_addr[i];
- }
-
- /* checksum is a multiple of 0x100, got this wrong first time
- some machines have 0x100, some 0x200. The DOS driver doesn't
- even bother with the checksum.
- Some other boards trip the checksum.. but then appear as
- ether address 0. Trap these - AC */
-
- if ((checksum % 0x100) ||
- (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
- err = -ENODEV;
- goto out1;
- }
-
- dev->irq = 10;
- goto found;
- }
-#endif
err = -ENODEV;
goto out;
@@ -1296,9 +1234,6 @@ out2:
#endif
free_page ((u32)(dev->mem_start));
out1:
-#ifdef ENABLE_APRICOT
- release_region(dev->base_addr, I596_TOTAL_SIZE);
-#endif
out:
free_netdev(dev);
return ERR_PTR(err);
@@ -1455,10 +1390,6 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
*ethirq = 3;
}
#endif
-#ifdef ENABLE_APRICOT
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
@@ -1589,11 +1520,6 @@ static void set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *dev_82596;
-#ifdef ENABLE_APRICOT
-module_param(irq, int, 0);
-MODULE_PARM_DESC(irq, "Apricot IRQ number");
-#endif
-
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
@@ -1620,10 +1546,6 @@ void __exit cleanup_module(void)
IOMAP_FULL_CACHING);
#endif
free_page ((u32)(dev_82596->mem_start));
-#ifdef ENABLE_APRICOT
- /* If we don't do this, we can't re-insmod it later. */
- release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
-#endif
free_netdev(dev_82596);
}
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index 959faf7388e2..9521e68aa3b3 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -5,9 +5,7 @@
config NET_VENDOR_I825XX
bool "Intel (82586/82593/82596) devices"
default y
- depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
- GSC || BVME6000 || MVME16x || EXPERIMENTAL)
+ depends on NET_VENDOR_INTEL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,29 +18,6 @@ config NET_VENDOR_I825XX
if NET_VENDOR_I825XX
-config ELPLUS
- tristate "3c505 \"EtherLink Plus\" support"
- depends on ISA && ISA_DMA_API
- ---help---
- Information about this network (Ethernet) card can be found in
- <file:Documentation/networking/3c505.txt>. If you have a card of
- this type, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c505.
-
-config EL16
- tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c507.
-
config ARM_ETHER1
tristate "Acorn Ether1 support"
depends on ARM && ARCH_ACORN
@@ -50,17 +25,6 @@ config ARM_ETHER1
If you have an Acorn system with one of these (AKA25) network cards,
you should say Y to this option if you wish to use it with Linux.
-config APRICOT
- tristate "Apricot Xen-II on board Ethernet"
- depends on ISA
- ---help---
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called apricot.
-
config BVME6000_NET
tristate "BVME6000 Ethernet support"
depends on BVME6000
@@ -70,33 +34,6 @@ config BVME6000_NET
in your kernel.
To compile this driver as a module, choose M here.
-config EEXPRESS
- tristate "EtherExpress 16 support"
- depends on ISA
- ---help---
- If you have an EtherExpress16 network (Ethernet) card, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that the Intel
- EtherExpress16 card used to be regarded as a very poor choice
- because the driver was very unreliable. We now have a new driver
- that should do better.
-
- To compile this driver as a module, choose M here. The module
- will be called eexpress.
-
-config EEXPRESS_PRO
- tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y. This
- driver supports Intel i82595{FX,TX} based boards. Note however
- that the EtherExpress PRO/100 Ethernet card has its own separate
- driver. Please read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eepro.
-
config LASI_82596
tristate "Lasi ethernet"
depends on GSC
@@ -104,14 +41,6 @@ config LASI_82596
Say Y here to support the builtin Intel 82596 ethernet controller
found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
-config LP486E
- tristate "LP486E on board Ethernet"
- depends on ISA
- ---help---
- Say Y here to support the 82596-based on-board Ethernet controller
- for the Panther motherboard, which is one of the two shipped in the
- Intel Professional Workstation.
-
config MVME16x_NET
tristate "MVME16x Ethernet support"
depends on MVME16x
@@ -121,17 +50,6 @@ config MVME16x_NET
driver for this chip in your kernel.
To compile this driver as a module, choose M here.
-config NI52
- tristate "NI5210 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ni52.
-
config SNI_82596
tristate "SNI RM ethernet"
depends on SNI_RM
@@ -148,14 +66,4 @@ config SUN3_82586
that this driver does not support 82586-based adapters on additional
VME boards.
-config ZNET
- tristate "Zenith Z-Note support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ISA_DMA_API && X86
- ---help---
- The Zenith Z-Note notebook computer has a built-in network
- (Ethernet) card, and this is the Linux driver for it. Note that the
- IBM Thinkpad 300 is compatible with the Z-Note and is also supported
- by this driver. Read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index 6adff85e8ecc..8c8dcd29c40d 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -3,15 +3,7 @@
#
obj-$(CONFIG_ARM_ETHER1) += ether1.o
-obj-$(CONFIG_EEXPRESS) += eexpress.o
-obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
-obj-$(CONFIG_ELPLUS) += 3c505.o
-obj-$(CONFIG_EL16) += 3c507.o
-obj-$(CONFIG_LP486E) += lp486e.o
-obj-$(CONFIG_NI52) += ni52.o
obj-$(CONFIG_SUN3_82586) += sun3_82586.o
-obj-$(CONFIG_ZNET) += znet.o
-obj-$(CONFIG_APRICOT) += 82596.o
obj-$(CONFIG_LASI_82596) += lasi_82596.o
obj-$(CONFIG_SNI_82596) += sni_82596.o
obj-$(CONFIG_MVME16x_NET) += 82596.o
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
deleted file mode 100644
index 7f49fd54c521..000000000000
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ /dev/null
@@ -1,1822 +0,0 @@
-/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
-/*
- Written 1994, 1995,1996 by Bao C. Ha.
-
- Copyright (C) 1994, 1995,1996 by Bao C. Ha.
-
- This software may be used and distributed
- according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached at bao.ha@srs.gov
- or 418 Hastings Place, Martinez, GA 30907.
-
- Things remaining to do:
- Better record keeping of errors.
- Eliminate transmit interrupt to reduce overhead.
- Implement "concurrent processing". I won't be doing it!
-
- Bugs:
-
- If you have a problem of not detecting the 82595 during a
- reboot (warm reset), disable the FLASH memory should fix it.
- This is a compatibility hardware problem.
-
- Versions:
- 0.13b basic ethtool support (aris, 09/13/2004)
- 0.13a in memory shortage, drop packets also in board
- (Michael Westermann <mw@microdata-pos.de>, 07/30/2002)
- 0.13 irq sharing, rewrote probe function, fixed a nasty bug in
- hardware_send_packet and a major cleanup (aris, 11/08/2001)
- 0.12d fixing a problem with single card detected as eight eth devices
- fixing a problem with sudden drop in card performance
- (chris (asdn@go2.pl), 10/29/2001)
- 0.12c fixing some problems with old cards (aris, 01/08/2001)
- 0.12b misc fixes (aris, 06/26/2000)
- 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
- (aris (aris@conectiva.com.br), 05/19/2000)
- 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999)
- 0.11d added __initdata, __init stuff; call spin_lock_init
- in eepro_probe1. Replaced "eepro" by dev->name. Augmented
- the code protected by spin_lock in interrupt routine
- (PdP, 12/12/1998)
- 0.11c minor cleanup (PdP, RMC, 09/12/1998)
- 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module
- under 2.1.xx. Debug messages are flagged as KERN_DEBUG to
- avoid console flooding. Added locking at critical parts. Now
- the dawn thing is SMP safe.
- 0.11a Attempt to get 2.1.xx support up (RMC)
- 0.11 Brian Candler added support for multiple cards. Tested as
- a module, no idea if it works when compiled into kernel.
-
- 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails
- because the irq is lost somewhere. Fixed that by moving
- request_irq and free_irq to eepro_open and eepro_close respectively.
- 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt.
- I'll need to find a way to specify an ioport other than
- the default one in the PnP case. PnP definitively sucks.
- And, yes, this is not the only reason.
- 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup;
- to use.
- 0.10b Should work now with (some) Pro/10+. At least for
- me (and my two cards) it does. _No_ guarantee for
- function with non-Pro/10+ cards! (don't have any)
- (RMC, 9/11/96)
-
- 0.10 Added support for the Etherexpress Pro/10+. The
- IRQ map was changed significantly from the old
- pro/10. The new interrupt map was provided by
- Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
- (BCH, 9/3/96)
-
- 0.09 Fixed a race condition in the transmit algorithm,
- which causes crashes under heavy load with fast
- pentium computers. The performance should also
- improve a bit. The size of RX buffer, and hence
- TX buffer, can also be changed via lilo or insmod.
- (BCH, 7/31/96)
-
- 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
- based lan cards. Disable full-duplex mode if TPE
- is not used. (BCH, 4/8/96)
-
- 0.07a Fix a stat report which counts every packet as a
- heart-beat failure. (BCH, 6/3/95)
-
- 0.07 Modified to support all other 82595-based lan cards.
- The IRQ vector of the EtherExpress Pro will be set
- according to the value saved in the EEPROM. For other
- cards, I will do autoirq_request() to grab the next
- available interrupt vector. (BCH, 3/17/95)
-
- 0.06a,b Interim released. Minor changes in the comments and
- print out format. (BCH, 3/9/95 and 3/14/95)
-
- 0.06 First stable release that I am comfortable with. (BCH,
- 3/2/95)
-
- 0.05 Complete testing of multicast. (BCH, 2/23/95)
-
- 0.04 Adding multicast support. (BCH, 2/14/95)
-
- 0.03 First widely alpha release for public testing.
- (BCH, 2/14/95)
-
-*/
-
-static const char version[] =
- "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n";
-
-#include <linux/module.h>
-
-/*
- Sources:
-
- This driver wouldn't have been written without the availability
- of the Crynwr's Lan595 driver source code. It helps me to
- familiarize with the 82595 chipset while waiting for the Intel
- documentation. I also learned how to detect the 82595 using
- the packet driver's technique.
-
- This driver is written by cutting and pasting the skeleton.c driver
- provided by Donald Becker. I also borrowed the EEPROM routine from
- Donald Becker's 82586 driver.
-
- Datasheet for the Intel 82595 (including the TX and FX version). It
- provides just enough info that the casual reader might think that it
- documents the i82595.
-
- The User Manual for the 82595. It provides a lot of the missing
- information.
-
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/ethtool.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "eepro"
-#define DRV_VERSION "0.13c"
-
-#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
-/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
-#define SLOW_DOWN inb(0x80)
-/* udelay(2) */
-#define compat_init_data __initdata
-enum iftype { AUI=0, BNC=1, TPE=2 };
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int eepro_portlist[] compat_init_data =
- { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
-/* note: 0x300 is default, the 595FX supports ALL IO Ports
- from 0x000 to 0x3F0, some of which are reserved in PCs */
-
-/* To try the (not-really PnP Wakeup: */
-/*
-#define PnPWakeup
-*/
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define EEPRO_IO_EXTENT 16
-
-/* Different 82595 chips */
-#define LAN595 0
-#define LAN595TX 1
-#define LAN595FX 2
-#define LAN595FX_10ISA 3
-
-/* Information that need to be kept for each board. */
-struct eepro_local {
- unsigned rx_start;
- unsigned tx_start; /* start of the transmit chain */
- int tx_last; /* pointer to last packet in the transmit chain */
- unsigned tx_end; /* end of the transmit chain (plus 1) */
- int eepro; /* 1 for the EtherExpress Pro/10,
- 2 for the EtherExpress Pro/10+,
- 3 for the EtherExpress 10 (blue cards),
- 0 for other 82595-based lan cards. */
- int version; /* a flag to indicate if this is a TX or FX
- version of the 82595 chip. */
- int stepping;
-
- spinlock_t lock; /* Serializing lock */
-
- unsigned rcv_ram; /* pre-calculated space for rx */
- unsigned xmt_ram; /* pre-calculated space for tx */
- unsigned char xmt_bar;
- unsigned char xmt_lower_limit_reg;
- unsigned char xmt_upper_limit_reg;
- short xmt_lower_limit;
- short xmt_upper_limit;
- short rcv_lower_limit;
- short rcv_upper_limit;
- unsigned char eeprom_reg;
- unsigned short word[8];
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
-#define SA_ADDR1 0xaa
-#define SA_ADDR2 0x00
-
-#define GetBit(x,y) ((x & (1<<y))>>y)
-
-/* EEPROM Word 0: */
-#define ee_PnP 0 /* Plug 'n Play enable bit */
-#define ee_Word1 1 /* Word 1? */
-#define ee_BusWidth 2 /* 8/16 bit */
-#define ee_FlashAddr 3 /* Flash Address */
-#define ee_FlashMask 0x7 /* Mask */
-#define ee_AutoIO 6 /* */
-#define ee_reserved0 7 /* =0! */
-#define ee_Flash 8 /* Flash there? */
-#define ee_AutoNeg 9 /* Auto Negotiation enabled? */
-#define ee_IO0 10 /* IO Address LSB */
-#define ee_IO0Mask 0x /*...*/
-#define ee_IO1 15 /* IO MSB */
-
-/* EEPROM Word 1: */
-#define ee_IntSel 0 /* Interrupt */
-#define ee_IntMask 0x7
-#define ee_LI 3 /* Link Integrity 0= enabled */
-#define ee_PC 4 /* Polarity Correction 0= enabled */
-#define ee_TPE_AUI 5 /* PortSelection 1=TPE */
-#define ee_Jabber 6 /* Jabber prevention 0= enabled */
-#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */
-#define ee_SMOUT 8 /* SMout Pin Control 0= Input */
-#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */
-#define ee_reserved1 10 /* .. 12 =0! */
-#define ee_AltReady 13 /* Alternate Ready, 0=normal */
-#define ee_reserved2 14 /* =0! */
-#define ee_Duplex 15
-
-/* Word2,3,4: */
-#define ee_IA5 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA4 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA3 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA2 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA1 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA0 8 /*bit start for individual Addr Byte 5 */
-
-/* Word 5: */
-#define ee_BNC_TPE 0 /* 0=TPE */
-#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */
-#define ee_BootTypeMask 0x3
-#define ee_NumConn 3 /* Number of Connections 0= One or Two */
-#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */
-#define ee_PortTPE 5
-#define ee_PortBNC 6
-#define ee_PortAUI 7
-#define ee_PowerMgt 10 /* 0= disabled */
-#define ee_CP 13 /* Concurrent Processing */
-#define ee_CPMask 0x7
-
-/* Word 6: */
-#define ee_Stepping 0 /* Stepping info */
-#define ee_StepMask 0x0F
-#define ee_BoardID 4 /* Manucaturer Board ID, reserved */
-#define ee_BoardMask 0x0FFF
-
-/* Word 7: */
-#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */
-#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */
-
-/*..*/
-#define ee_SIZE 0x40 /* total EEprom Size */
-#define ee_Checksum 0xBABA /* initial and final value for adding checksum */
-
-
-/* Card identification via EEprom: */
-#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */
-#define ee_addr_id 0x11 /* Word offset for Card ID */
-#define ee_addr_SN 0x12 /* Serial Number */
-#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */
-
-
-#define ee_vendor_intel0 0x25 /* Vendor ID Intel */
-#define ee_vendor_intel1 0xD4
-#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
-#define ee_id_eepro10p1 0x31
-
-#define TX_TIMEOUT ((4*HZ)/10)
-
-/* Index to functions, as function prototypes. */
-
-static int eepro_probe1(struct net_device *dev, int autoprobe);
-static int eepro_open(struct net_device *dev);
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t eepro_interrupt(int irq, void *dev_id);
-static void eepro_rx(struct net_device *dev);
-static void eepro_transmit_interrupt(struct net_device *dev);
-static int eepro_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void eepro_tx_timeout (struct net_device *dev);
-
-static int read_eeprom(int ioaddr, int location, struct net_device *dev);
-static int hardware_send_packet(struct net_device *dev, void *buf, short length);
-static int eepro_grab_irq(struct net_device *dev);
-
-/*
- Details of the i82595.
-
-You will need either the datasheet or the user manual to understand what
-is going on here. The 82595 is very different from the 82586, 82593.
-
-The receive algorithm in eepro_rx() is just an implementation of the
-RCV ring structure that the Intel 82595 imposes at the hardware level.
-The receive buffer is set at 24K, and the transmit buffer is 8K. I
-am assuming that the total buffer memory is 32K, which is true for the
-Intel EtherExpress Pro/10. If it is less than that on a generic card,
-the driver will be broken.
-
-The transmit algorithm in the hardware_send_packet() is similar to the
-one in the eepro_rx(). The transmit buffer is a ring linked list.
-I just queue the next available packet to the end of the list. In my
-system, the 82595 is so fast that the list seems to always contain a
-single packet. In other systems with faster computers and more congested
-network traffics, the ring linked list should improve performance by
-allowing up to 8K worth of packets to be queued.
-
-The sizes of the receive and transmit buffers can now be changed via lilo
-or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
-where rx-buffer is in KB unit. Modules uses the parameter mem which is
-also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
-The receive buffer has to be more than 3K or less than 29K. Otherwise,
-it is reset to the default of 24K, and, hence, 8K for the trasnmit
-buffer (transmit-buffer = 32K - receive-buffer).
-
-*/
-#define RAM_SIZE 0x8000
-
-#define RCV_HEADER 8
-#define RCV_DEFAULT_RAM 0x6000
-
-#define XMT_HEADER 8
-#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM)
-
-#define XMT_START_PRO RCV_DEFAULT_RAM
-#define XMT_START_10 0x0000
-#define RCV_START_PRO 0x0000
-#define RCV_START_10 XMT_DEFAULT_RAM
-
-#define RCV_DONE 0x0008
-#define RX_OK 0x2000
-#define RX_ERROR 0x0d81
-
-#define TX_DONE_BIT 0x0080
-#define TX_OK 0x2000
-#define CHAIN_BIT 0x8000
-#define XMT_STATUS 0x02
-#define XMT_CHAIN 0x04
-#define XMT_COUNT 0x06
-
-#define BANK0_SELECT 0x00
-#define BANK1_SELECT 0x40
-#define BANK2_SELECT 0x80
-
-/* Bank 0 registers */
-#define COMMAND_REG 0x00 /* Register 0 */
-#define MC_SETUP 0x03
-#define XMT_CMD 0x04
-#define DIAGNOSE_CMD 0x07
-#define RCV_ENABLE_CMD 0x08
-#define RCV_DISABLE_CMD 0x0a
-#define STOP_RCV_CMD 0x0b
-#define RESET_CMD 0x0e
-#define POWER_DOWN_CMD 0x18
-#define RESUME_XMT_CMD 0x1c
-#define SEL_RESET_CMD 0x1e
-#define STATUS_REG 0x01 /* Register 1 */
-#define RX_INT 0x02
-#define TX_INT 0x04
-#define EXEC_STATUS 0x30
-#define ID_REG 0x02 /* Register 2 */
-#define R_ROBIN_BITS 0xc0 /* round robin counter */
-#define ID_REG_MASK 0x2c
-#define ID_REG_SIG 0x24
-#define AUTO_ENABLE 0x10
-#define INT_MASK_REG 0x03 /* Register 3 */
-#define RX_STOP_MASK 0x01
-#define RX_MASK 0x02
-#define TX_MASK 0x04
-#define EXEC_MASK 0x08
-#define ALL_MASK 0x0f
-#define IO_32_BIT 0x10
-#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
-#define RCV_STOP 0x06
-
-#define XMT_BAR_PRO 0x0a
-#define XMT_BAR_10 0x0b
-
-#define HOST_ADDRESS_REG 0x0c
-#define IO_PORT 0x0e
-#define IO_PORT_32_BIT 0x0c
-
-/* Bank 1 registers */
-#define REG1 0x01
-#define WORD_WIDTH 0x02
-#define INT_ENABLE 0x80
-#define INT_NO_REG 0x02
-#define RCV_LOWER_LIMIT_REG 0x08
-#define RCV_UPPER_LIMIT_REG 0x09
-
-#define XMT_LOWER_LIMIT_REG_PRO 0x0a
-#define XMT_UPPER_LIMIT_REG_PRO 0x0b
-#define XMT_LOWER_LIMIT_REG_10 0x0b
-#define XMT_UPPER_LIMIT_REG_10 0x0a
-
-/* Bank 2 registers */
-#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
-#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
-#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
-#define REG2 0x02
-#define PRMSC_Mode 0x01
-#define Multi_IA 0x20
-#define REG3 0x03
-#define TPE_BIT 0x04
-#define BNC_BIT 0x20
-#define REG13 0x0d
-#define FDX 0x00
-#define A_N_ENABLE 0x02
-
-#define I_ADD_REG0 0x04
-#define I_ADD_REG1 0x05
-#define I_ADD_REG2 0x06
-#define I_ADD_REG3 0x07
-#define I_ADD_REG4 0x08
-#define I_ADD_REG5 0x09
-
-#define EEPROM_REG_PRO 0x0a
-#define EEPROM_REG_10 0x0b
-
-#define EESK 0x01
-#define EECS 0x02
-#define EEDI 0x04
-#define EEDO 0x08
-
-/* do a full reset */
-#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr)
-
-/* do a nice reset */
-#define eepro_sel_reset(ioaddr) { \
- outb(SEL_RESET_CMD, ioaddr); \
- SLOW_DOWN; \
- SLOW_DOWN; \
- }
-
-/* disable all interrupts */
-#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG)
-
-/* clear all interrupts */
-#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG)
-
-/* enable tx/rx */
-#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \
- ioaddr + INT_MASK_REG)
-
-/* enable exec event interrupt */
-#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG)
-
-/* enable rx */
-#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr)
-
-/* disable rx */
-#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr)
-
-/* switch bank */
-#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr)
-#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr)
-#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr)
-
-/* enable interrupt line */
-#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\
- ioaddr + REG1)
-
-/* disable interrupt line */
-#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \
- ioaddr + REG1);
-
-/* set diagnose flag */
-#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
-
-/* ack for rx int */
-#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
-
-/* ack for tx int */
-#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG)
-
-/* a complete sel reset */
-#define eepro_complete_selreset(ioaddr) { \
- dev->stats.tx_errors++;\
- eepro_sel_reset(ioaddr);\
- lp->tx_end = \
- lp->xmt_lower_limit;\
- lp->tx_start = lp->tx_end;\
- lp->tx_last = 0;\
- dev->trans_start = jiffies;\
- netif_wake_queue(dev);\
- eepro_en_rx(ioaddr);\
- }
-
-/* Check for a network adaptor of this type, and return '0' if one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-static int __init do_eepro_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
-#ifdef PnPWakeup
- /* XXXX for multiple cards should this only be run once? */
-
- /* Wakeup: */
- #define WakeupPort 0x279
- #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\
- 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\
- 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\
- 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43}
-
- {
- unsigned short int WS[32]=WakeupSeq;
-
- if (request_region(WakeupPort, 2, "eepro wakeup")) {
- if (net_debug>5)
- printk(KERN_DEBUG "Waking UP\n");
-
- outb_p(0,WakeupPort);
- outb_p(0,WakeupPort);
- for (i=0; i<32; i++) {
- outb_p(WS[i],WakeupPort);
- if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
- }
-
- release_region(WakeupPort, 2);
- } else
- printk(KERN_WARNING "PnP wakeup region busy!\n");
- }
-#endif
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return eepro_probe1(dev, 0);
-
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; eepro_portlist[i]; i++) {
- dev->base_addr = eepro_portlist[i];
- dev->irq = irq;
- if (eepro_probe1(dev, 1) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eepro_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eepro_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static void __init printEEPROMInfo(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned short Word;
- int i,j;
-
- j = ee_Checksum;
- for (i = 0; i < 8; i++)
- j += lp->word[i];
- for ( ; i < ee_SIZE; i++)
- j += read_eeprom(ioaddr, i, dev);
-
- printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff);
-
- Word = lp->word[0];
- printk(KERN_DEBUG "Word0:\n");
- printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP));
- printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 );
- printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg));
- printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4);
-
- if (net_debug>4) {
- Word = lp->word[1];
- printk(KERN_DEBUG "Word1:\n");
- printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask);
- printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI));
- printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
- printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
- printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
- printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
- printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
- }
-
- Word = lp->word[5];
- printk(KERN_DEBUG "Word5:\n");
- printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE));
- printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn));
- printk(KERN_DEBUG " Has ");
- if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
- if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
- if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s)\n");
-
- Word = lp->word[6];
- printk(KERN_DEBUG "Word6:\n");
- printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask);
- printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID);
-
- Word = lp->word[7];
- printk(KERN_DEBUG "Word7:\n");
- printk(KERN_DEBUG " INT to IRQ:\n");
-
- for (i=0, j=0; i<15; i++)
- if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i);
-
- printk(KERN_DEBUG "\n");
-}
-
-/* function to recalculate the limits of buffer based on rcv_ram */
-static void eepro_recalc (struct net_device *dev)
-{
- struct eepro_local * lp;
-
- lp = netdev_priv(dev);
- lp->xmt_ram = RAM_SIZE - lp->rcv_ram;
-
- if (lp->eepro == LAN595FX_10ISA) {
- lp->xmt_lower_limit = XMT_START_10;
- lp->xmt_upper_limit = (lp->xmt_ram - 2);
- lp->rcv_lower_limit = lp->xmt_ram;
- lp->rcv_upper_limit = (RAM_SIZE - 2);
- }
- else {
- lp->rcv_lower_limit = RCV_START_PRO;
- lp->rcv_upper_limit = (lp->rcv_ram - 2);
- lp->xmt_lower_limit = lp->rcv_ram;
- lp->xmt_upper_limit = (RAM_SIZE - 2);
- }
-}
-
-/* prints boot-time info */
-static void __init eepro_print_info (struct net_device *dev)
-{
- struct eepro_local * lp = netdev_priv(dev);
- int i;
- const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
-
- i = inb(dev->base_addr + ID_REG);
- printk(KERN_DEBUG " id: %#x ",i);
- printk(" io: %#x ", (unsigned)dev->base_addr);
-
- switch (lp->eepro) {
- case LAN595FX_10ISA:
- printk("%s: Intel EtherExpress 10 ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595FX:
- printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595TX:
- printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595:
- printk("%s: Intel 82595-based lan card at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- }
-
- printk(" %pM", dev->dev_addr);
-
- if (net_debug > 3)
- printk(KERN_DEBUG ", %dK RCV buffer",
- (int)(lp->rcv_ram)/1024);
-
- if (dev->irq > 2)
- printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
- else
- printk(", %s.\n", ifmap[dev->if_port]);
-
- if (net_debug > 3) {
- i = lp->word[5];
- if (i & 0x2000) /* bit 13 of EEPROM word 5 */
- printk(KERN_DEBUG "%s: Concurrent Processing is "
- "enabled but not used!\n", dev->name);
- }
-
- /* Check the station address for the manufacturer's code */
- if (net_debug>3)
- printEEPROMInfo(dev);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops;
-
-static const struct net_device_ops eepro_netdev_ops = {
- .ndo_open = eepro_open,
- .ndo_stop = eepro_close,
- .ndo_start_xmit = eepro_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = eepro_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probe avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init eepro_probe1(struct net_device *dev, int autoprobe)
-{
- unsigned short station_addr[3], id, counter;
- int i;
- struct eepro_local *lp;
- int ioaddr = dev->base_addr;
- int err;
-
- /* Grab the region so we can find another board if autoIRQ fails. */
- if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
- if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
- ioaddr);
- return -EBUSY;
- }
-
- /* Now, we are going to check for the signature of the
- ID_REG (register 2 of bank 0) */
-
- id = inb(ioaddr + ID_REG);
-
- if ((id & ID_REG_MASK) != ID_REG_SIG)
- goto exit;
-
- /* We seem to have the 82595 signature, let's
- play with its counter (last 2 bits of
- register 2 of bank 0) to be sure. */
-
- counter = id & R_ROBIN_BITS;
-
- if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40))
- goto exit;
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct eepro_local));
- lp->xmt_bar = XMT_BAR_PRO;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO;
- lp->eeprom_reg = EEPROM_REG_PRO;
- spin_lock_init(&lp->lock);
-
- /* Now, get the ethernet hardware address from
- the EEPROM */
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
-
- /* FIXME - find another way to know that we've found
- * an Etherexpress 10
- */
- if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) {
- lp->eepro = LAN595FX_10ISA;
- lp->eeprom_reg = EEPROM_REG_10;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10;
- lp->xmt_bar = XMT_BAR_10;
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
- }
-
- /* get all words at once. will be used here and for ethtool */
- for (i = 0; i < 8; i++) {
- lp->word[i] = read_eeprom(ioaddr, i, dev);
- }
- station_addr[1] = lp->word[3];
- station_addr[2] = lp->word[4];
-
- if (!lp->eepro) {
- if (lp->word[7] == ee_FX_INT2IRQ)
- lp->eepro = 2;
- else if (station_addr[2] == SA_ADDR1)
- lp->eepro = 1;
- }
-
- /* Fill in the 'dev' fields. */
- for (i=0; i < 6; i++)
- dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
-
- /* RX buffer must be more than 3K and less than 29K */
- if (dev->mem_end < 3072 || dev->mem_end > 29696)
- lp->rcv_ram = RCV_DEFAULT_RAM;
-
- /* calculate {xmt,rcv}_{lower,upper}_limit */
- eepro_recalc(dev);
-
- if (GetBit(lp->word[5], ee_BNC_TPE))
- dev->if_port = BNC;
- else
- dev->if_port = TPE;
-
- if (dev->irq < 2 && lp->eepro != 0) {
- /* Mask off INT number */
- int count = lp->word[1] & 7;
- unsigned irqMask = lp->word[7];
-
- while (count--)
- irqMask &= irqMask - 1;
-
- count = ffs(irqMask);
-
- if (count)
- dev->irq = count - 1;
-
- if (dev->irq < 2) {
- printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
- goto exit;
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
- }
-
- dev->netdev_ops = &eepro_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &eepro_ethtool_ops;
-
- /* print boot time info */
- eepro_print_info(dev);
-
- /* reset 82595 */
- eepro_reset(ioaddr);
-
- err = register_netdev(dev);
- if (err)
- goto err;
- return 0;
-exit:
- err = -ENODEV;
-err:
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- return err;
-}
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-
-static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
-static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
-static int eepro_grab_irq(struct net_device *dev)
-{
- static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
- const int *irqp = irqlist;
- int temp_reg, ioaddr = dev->base_addr;
-
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* be CAREFUL, BANK 0 now */
- eepro_sw2bank0(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Let EXEC event to interrupt */
- eepro_en_intexec(ioaddr);
-
- do {
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) {
- unsigned long irq_mask;
- /* Twinkle the interrupt, and check if it's seen */
- irq_mask = probe_irq_on();
-
- eepro_diag(ioaddr); /* RESET the 82595 */
- mdelay(20);
-
- if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */
- break;
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
- }
- } while (*++irqp);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- eepro_dis_intline(ioaddr);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- return dev->irq;
-}
-
-static int eepro_open(struct net_device *dev)
-{
- unsigned short temp_reg, old8, old9;
- int irqMask;
- int i, ioaddr = dev->base_addr;
- struct eepro_local *lp = netdev_priv(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name);
-
- irqMask = lp->word[7];
-
- if (lp->eepro == LAN595FX_10ISA) {
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n");
- }
- else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */
- {
- lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n");
- }
-
- else if ((dev->dev_addr[0] == SA_ADDR0 &&
- dev->dev_addr[1] == SA_ADDR1 &&
- dev->dev_addr[2] == SA_ADDR2))
- {
- lp->eepro = 1;
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n");
- } /* Yes, an Intel EtherExpress Pro/10 */
-
- else lp->eepro = 0; /* No, it is a generic 82585 lan card */
-
- /* Get the interrupt vector for the 82595 */
- if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- /* Initialize the 82595. */
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + lp->eeprom_reg);
-
- lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
-
- if (net_debug > 3)
- printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping);
-
- if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
- outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg);
- for (i=0; i < 6; i++)
- outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
-
- temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
- outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
- | RCV_Discard_BadFrame, ioaddr + REG1);
-
- temp_reg = inb(ioaddr + REG2); /* Match broadcast */
- outb(temp_reg | 0x14, ioaddr + REG2);
-
- temp_reg = inb(ioaddr + REG3);
- outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
-
- /* Set the receiving mode */
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Set the interrupt vector */
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg);
-
-
- /* Initialize the RCV and XMT upper and lower limits */
- outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
- outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
- outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
- outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* Switch back to Bank 0 */
- eepro_sw2bank0(ioaddr);
-
- /* Let RX and TX events to interrupt */
- eepro_en_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Initialize RCV */
- outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
- lp->rx_start = lp->rcv_lower_limit;
- outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
-
- /* Initialize XMT */
- outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Check for the i82595TX and i82595FX */
- old8 = inb(ioaddr + 8);
- outb(~old8, ioaddr + 8);
-
- if ((temp_reg = inb(ioaddr + 8)) == old8) {
- if (net_debug > 3)
- printk(KERN_DEBUG "i82595 detected!\n");
- lp->version = LAN595;
- }
- else {
- lp->version = LAN595TX;
- outb(old8, ioaddr + 8);
- old9 = inb(ioaddr + 9);
-
- if (irqMask==ee_FX_INT2IRQ) {
- if (net_debug > 3) {
- printk(KERN_DEBUG "IrqMask: %#x\n",irqMask);
- printk(KERN_DEBUG "i82595FX detected!\n");
- }
- lp->version = LAN595FX;
- outb(old9, ioaddr + 9);
- if (dev->if_port != TPE) { /* Hopefully, this will fix the
- problem of using Pentiums and
- pro/10 w/ BNC. */
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + REG13);
- /* disable the full duplex mode since it is not
- applicable with the 10Base2 cable. */
- outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
- eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */
- }
- }
- else if (net_debug > 3) {
- printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
- printk(KERN_DEBUG "i82595TX detected!\n");
- }
- }
-
- eepro_sel_reset(ioaddr);
-
- netif_start_queue(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name);
-
- /* enabling rx */
- eepro_en_rx(ioaddr);
-
- return 0;
-}
-
-static void eepro_tx_timeout (struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* if (net_debug > 1) */
- printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- /* This is not a duplicate. One message for the console,
- one for the log file */
- printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- eepro_complete_selreset(ioaddr);
-}
-
-
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ioaddr = dev->base_addr;
- short length = skb->len;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- netif_stop_queue (dev);
-
- eepro_dis_int(ioaddr);
- spin_lock_irqsave(&lp->lock, flags);
-
- {
- unsigned char *buf = skb->data;
-
- if (hardware_send_packet(dev, buf, length))
- /* we won't wake queue here because we're out of space */
- dev->stats.tx_dropped++;
- else {
- dev->stats.tx_bytes+=skb->len;
- netif_wake_queue(dev);
- }
-
- }
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
- /* dev->stats.tx_aborted_errors++; */
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
-
- eepro_en_int(ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-
-static irqreturn_t
-eepro_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eepro_local *lp;
- int ioaddr, status, boguscount = 20;
- int handled = 0;
-
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name);
-
- ioaddr = dev->base_addr;
-
- while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--))
- {
- handled = 1;
- if (status & RX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name);
-
- eepro_dis_int(ioaddr);
-
- /* Get the received packets */
- eepro_ack_rx(ioaddr);
- eepro_rx(dev);
-
- eepro_en_int(ioaddr);
- }
- if (status & TX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name);
-
-
- eepro_dis_int(ioaddr);
-
- /* Process the status of transmitted packets */
- eepro_ack_tx(ioaddr);
- eepro_transmit_interrupt(dev);
-
- eepro_en_int(ioaddr);
- }
- }
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name);
-
- spin_unlock(&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-static int eepro_close(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short temp_reg;
-
- netif_stop_queue(dev);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- temp_reg = inb(ioaddr + REG1);
- outb(temp_reg & 0x7f, ioaddr + REG1);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Flush the Tx and disable Rx. */
- outb(STOP_RCV_CMD, ioaddr);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Reset the 82595 */
- eepro_reset(ioaddr);
-
- /* release the interrupt */
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. What statistics? */
-
- return 0;
-}
-
-/* Set or clear the multicast filter for this adaptor.
- */
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned short mode;
- struct netdev_hw_addr *ha;
- int mc_count = netdev_mc_count(dev);
-
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | PRMSC_Mode, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else if (mc_count == 0)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else
- {
- unsigned short status, *eaddrs;
- int i, boguscount = 0;
-
- /* Disable RX and TX interrupts. Necessary to avoid
- corruption of the HOST_ADDRESS_REG by interrupt
- service routines. */
- eepro_dis_int(ioaddr);
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | Multi_IA, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
- outw(MC_SETUP, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(6 * (mc_count + 1), ioaddr + IO_PORT);
-
- netdev_for_each_mc_addr(ha, dev) {
- eaddrs = (unsigned short *) ha->addr;
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- }
-
- eaddrs = (unsigned short *) dev->dev_addr;
- outw(eaddrs[0], ioaddr + IO_PORT);
- outw(eaddrs[1], ioaddr + IO_PORT);
- outw(eaddrs[2], ioaddr + IO_PORT);
- outw(lp->tx_end, ioaddr + lp->xmt_bar);
- outb(MC_SETUP, ioaddr);
-
- /* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
-
- if (lp->tx_start != lp->tx_end)
- {
- /* update the next address and the chain bit in the
- last packet */
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(i, ioaddr + IO_PORT);
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
- lp->tx_end = i ;
- }
- else {
- lp->tx_start = lp->tx_end = i ;
- }
-
- /* Acknowledge that the MC setup is done */
- do { /* We should be doing this in the eepro_interrupt()! */
- SLOW_DOWN;
- SLOW_DOWN;
- if (inb(ioaddr + STATUS_REG) & 0x08)
- {
- i = inb(ioaddr);
- outb(0x08, ioaddr + STATUS_REG);
-
- if (i & 0x20) { /* command ABORTed */
- printk(KERN_NOTICE "%s: multicast setup failed.\n",
- dev->name);
- break;
- } else if ((i & 0x0f) == 0x03) { /* MC-Done */
- printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, mc_count,
- mc_count > 1 ? "es":"");
- break;
- }
- }
- } while (++boguscount < 100);
-
- /* Re-enable RX and TX interrupts */
- eepro_en_int(ioaddr);
- }
- if (lp->eepro == LAN595FX_10ISA) {
- eepro_complete_selreset(ioaddr);
- }
- else
- eepro_en_rx(ioaddr);
-}
-
-/* The horrible routine to read a word from the serial EEPROM. */
-/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
-
-/* The delay between EEPROM clock transitions. */
-#define eeprom_delay() { udelay(40); }
-#define EE_READ_CMD (6 << 6)
-
-static int
-read_eeprom(int ioaddr, int location, struct net_device *dev)
-{
- int i;
- unsigned short retval = 0;
- struct eepro_local *lp = netdev_priv(dev);
- short ee_addr = ioaddr + lp->eeprom_reg;
- int read_cmd = location | EE_READ_CMD;
- short ctrl_val = EECS ;
-
- /* XXXX - black magic */
- eepro_sw2bank1(ioaddr);
- outb(0x00, ioaddr + STATUS_REG);
- /* XXXX - black magic */
-
- eepro_sw2bank2(ioaddr);
- outb(ctrl_val, ee_addr);
-
- /* Shift the read command bits out. */
- for (i = 8; i >= 0; i--) {
- short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
- : ctrl_val;
- outb(outval, ee_addr);
- outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
- eeprom_delay();
- outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
- eeprom_delay();
- }
- outb(ctrl_val, ee_addr);
-
- for (i = 16; i > 0; i--) {
- outb(ctrl_val | EESK, ee_addr); eeprom_delay();
- retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
- outb(ctrl_val, ee_addr); eeprom_delay();
- }
-
- /* Terminate the EEPROM access. */
- ctrl_val &= ~EECS;
- outb(ctrl_val | EESK, ee_addr);
- eeprom_delay();
- outb(ctrl_val, ee_addr);
- eeprom_delay();
- eepro_sw2bank0(ioaddr);
- return retval;
-}
-
-static int
-hardware_send_packet(struct net_device *dev, void *buf, short length)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned status, tx_available, last, end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
-
- /* determine how much of the transmit buffer space is available */
- if (lp->tx_end > lp->tx_start)
- tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
- else if (lp->tx_end < lp->tx_start)
- tx_available = lp->tx_start - lp->tx_end;
- else tx_available = lp->xmt_ram;
-
- if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
- /* No space available ??? */
- return 1;
- }
-
- last = lp->tx_end;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
-
- if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
- if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
- /* Arrrr!!!, must keep the xmt header together,
- several days were lost to chase this one down. */
- last = lp->xmt_lower_limit;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
- }
- else end = lp->xmt_lower_limit + (end -
- lp->xmt_upper_limit + 2);
- }
-
- outw(last, ioaddr + HOST_ADDRESS_REG);
- outw(XMT_CMD, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(end, ioaddr + IO_PORT);
- outw(length, ioaddr + IO_PORT);
-
- if (lp->version == LAN595)
- outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- /* A dummy read to flush the DRAM write pipeline */
- status = inw(ioaddr + IO_PORT);
-
- if (lp->tx_start == lp->tx_end) {
- outw(last, ioaddr + lp->xmt_bar);
- outb(XMT_CMD, ioaddr);
- lp->tx_start = last; /* I don't like to change tx_start here */
- }
- else {
- /* update the next address and the chain bit in the
- last packet */
-
- if (lp->tx_end != last) {
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(last, ioaddr + IO_PORT);
- }
-
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
-
- /* Continue the transmit command */
- outb(RESUME_XMT_CMD, ioaddr);
- }
-
- lp->tx_last = last;
- lp->tx_end = end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
-
- return 0;
-}
-
-static void
-eepro_rx(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 20;
- short rcv_car = lp->rx_start;
- unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name);
-
- /* Set the read pointer to the start of the RCV */
- outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
-
- rcv_event = inw(ioaddr + IO_PORT);
-
- while (rcv_event == RCV_DONE) {
-
- rcv_status = inw(ioaddr + IO_PORT);
- rcv_next_frame = inw(ioaddr + IO_PORT);
- rcv_size = inw(ioaddr + IO_PORT);
-
- if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
-
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- dev->stats.rx_bytes+=rcv_size;
- rcv_size &= 0x3fff;
- skb = netdev_alloc_skb(dev, rcv_size + 5);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
-
- break;
- }
- skb_reserve(skb,2);
-
- if (lp->version == LAN595)
- insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size),
- (rcv_size + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- }
-
- else { /* Not sure will ever reach here,
- I set the 595 to discard bad received frames */
- dev->stats.rx_errors++;
-
- if (rcv_status & 0x0100)
- dev->stats.rx_over_errors++;
-
- else if (rcv_status & 0x0400)
- dev->stats.rx_frame_errors++;
-
- else if (rcv_status & 0x0800)
- dev->stats.rx_crc_errors++;
-
- printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
- dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
- }
-
- if (rcv_status & 0x1000)
- dev->stats.rx_length_errors++;
-
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
-
- if (--boguscount == 0)
- break;
-
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
- rcv_event = inw(ioaddr + IO_PORT);
-
- }
- if (rcv_car == 0)
- rcv_car = lp->rcv_upper_limit | 0xff;
-
- outw(rcv_car - 1, ioaddr + RCV_STOP);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name);
-}
-
-static void
-eepro_transmit_interrupt(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 25;
- short xmt_status;
-
- while ((lp->tx_start != lp->tx_end) && boguscount--) {
-
- outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
- xmt_status = inw(ioaddr+IO_PORT);
-
- if (!(xmt_status & TX_DONE_BIT))
- break;
-
- xmt_status = inw(ioaddr+IO_PORT);
- lp->tx_start = inw(ioaddr+IO_PORT);
-
- netif_wake_queue (dev);
-
- if (xmt_status & TX_OK)
- dev->stats.tx_packets++;
- else {
- dev->stats.tx_errors++;
- if (xmt_status & 0x0400) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_DEBUG "%s: carrier error\n",
- dev->name);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- else {
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- }
- if (xmt_status & 0x000f) {
- dev->stats.collisions += (xmt_status & 0x000f);
- }
-
- if ((xmt_status & 0x0040) == 0x0) {
- dev->stats.tx_heartbeat_errors++;
- }
- }
-}
-
-static int eepro_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct eepro_local *lp = netdev_priv(dev);
-
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_Autoneg;
- cmd->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_Autoneg;
-
- if (GetBit(lp->word[5], ee_PortTPE)) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
- }
- if (GetBit(lp->word[5], ee_PortBNC)) {
- cmd->supported |= SUPPORTED_BNC;
- cmd->advertising |= ADVERTISED_BNC;
- }
- if (GetBit(lp->word[5], ee_PortAUI)) {
- cmd->supported |= SUPPORTED_AUI;
- cmd->advertising |= ADVERTISED_AUI;
- }
-
- ethtool_cmd_speed_set(cmd, SPEED_10);
-
- if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
- cmd->duplex = DUPLEX_FULL;
- }
- else {
- cmd->duplex = DUPLEX_HALF;
- }
-
- cmd->port = dev->if_port;
- cmd->phy_address = dev->base_addr;
- cmd->transceiver = XCVR_INTERNAL;
-
- if (lp->word[0] & ee_AutoNeg) {
- cmd->autoneg = 1;
- }
-
- return 0;
-}
-
-static void eepro_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
- "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops = {
- .get_settings = eepro_ethtool_get_settings,
- .get_drvinfo = eepro_ethtool_get_drvinfo,
-};
-
-#ifdef MODULE
-
-#define MAX_EEPRO 8
-static struct net_device *dev_eepro[MAX_EEPRO];
-
-static int io[MAX_EEPRO] = {
- [0 ... MAX_EEPRO-1] = -1
-};
-static int irq[MAX_EEPRO];
-static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */
- [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024
-};
-static int autodetect;
-
-static int n_eepro;
-/* For linux 2.1.xx */
-
-MODULE_AUTHOR("Pascal Dupuis and others");
-MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
-MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
-MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int i;
- if (io[0] == -1 && autodetect == 0) {
- printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n");
- printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n");
- return -ENODEV;
- }
- else if (autodetect) {
- /* if autodetect is set then we must force detection */
- for (i = 0; i < MAX_EEPRO; i++) {
- io[i] = 0;
- }
-
- printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
- }
-
- for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) {
- dev = alloc_etherdev(sizeof(struct eepro_local));
- if (!dev)
- break;
-
- dev->mem_end = mem[i];
- dev->base_addr = io[i];
- dev->irq = irq[i];
-
- if (do_eepro_probe(dev) == 0) {
- dev_eepro[n_eepro++] = dev;
- continue;
- }
- free_netdev(dev);
- break;
- }
-
- if (n_eepro)
- printk(KERN_INFO "%s", version);
-
- return n_eepro ? 0 : -ENODEV;
-}
-
-void __exit
-cleanup_module(void)
-{
- int i;
-
- for (i=0; i<n_eepro; i++) {
- struct net_device *dev = dev_eepro[i];
- unregister_netdev(dev);
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- free_netdev(dev);
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
deleted file mode 100644
index 7a6a2f04c5b1..000000000000
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ /dev/null
@@ -1,1661 +0,0 @@
-/* Intel EtherExpress 16 device driver for Linux
- *
- * Written by John Sullivan, 1995
- * based on original code by Donald Becker, with changes by
- * Alan Cox and Pauline Middelink.
- *
- * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu>
- *
- * Many modifications, and currently maintained, by
- * Philip Blundell <philb@gnu.org>
- * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Added MCA support Adam Fritzler (now deleted)
- *
- * Note - this driver is experimental still - it has problems on faster
- * machines. Someone needs to sit down and go through it line by line with
- * a databook...
- */
-
-/* The EtherExpress 16 is a fairly simple card, based on a shared-memory
- * design using the i82586 Ethernet coprocessor. It bears no relationship,
- * as far as I know, to the similarly-named "EtherExpress Pro" range.
- *
- * Historically, Linux support for these cards has been very bad. However,
- * things seem to be getting better slowly.
- */
-
-/* If your card is confused about what sort of interface it has (eg it
- * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART'
- * or 'SOFTSET /LISA' from DOS seems to help.
- */
-
-/* Here's the scoop on memory mapping.
- *
- * There are three ways to access EtherExpress card memory: either using the
- * shared-memory mapping, or using PIO through the dataport, or using PIO
- * through the "shadow memory" ports.
- *
- * The shadow memory system works by having the card map some of its memory
- * as follows:
- *
- * (the low five bits of the SMPTR are ignored)
- *
- * base+0x4000..400f memory at SMPTR+0..15
- * base+0x8000..800f memory at SMPTR+16..31
- * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently)
- * base+0xc008..c00f memory at 0x0008..0x000f
- *
- * This last set (the one at c008) is particularly handy because the SCB
- * lives at 0x0008. So that set of ports gives us easy random access to data
- * in the SCB without having to mess around setting up pointers and the like.
- * We always use this method to access the SCB (via the scb_xx() functions).
- *
- * Dataport access works by aiming the appropriate (read or write) pointer
- * at the first address you're interested in, and then reading or writing from
- * the dataport. The pointers auto-increment after each transfer. We use
- * this for data transfer.
- *
- * We don't use the shared-memory system because it allegedly doesn't work on
- * all cards, and because it's a bit more prone to go wrong (it's one more
- * thing to configure...).
- */
-
-/* Known bugs:
- *
- * - The card seems to want to give us two interrupts every time something
- * happens, where just one would be better.
- */
-
-/*
- *
- * Note by Zoltan Szilagyi 10-12-96:
- *
- * I've succeeded in eliminating the "CU wedged" messages, and hence the
- * lockups, which were only occurring with cards running in 8-bit mode ("force
- * 8-bit operation" in Intel's SoftSet utility). This version of the driver
- * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the
- * CU before submitting a packet for transmission, and then restarts it as soon
- * as the process of handing the packet is complete. This is definitely an
- * unnecessary slowdown if the card is running in 16-bit mode; therefore one
- * should detect 16-bit vs 8-bit mode from the EEPROM settings and act
- * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for
- * ftp's, which is significantly better than I get in DOS, so the overhead of
- * stopping and restarting the CU with each transmit is not prohibitive in
- * practice.
- *
- * Update by David Woodhouse 11/5/99:
- *
- * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture.
- * I assume that this is because 16-bit accesses are actually handled as two
- * 8-bit accesses.
- */
-
-#ifdef __alpha__
-#define LOCKUP16 1
-#endif
-#ifndef LOCKUP16
-#define LOCKUP16 0
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#ifndef NET_DEBUG
-#define NET_DEBUG 4
-#endif
-
-#include "eexpress.h"
-
-#define EEXP_IO_EXTENT 16
-
-/*
- * Private data declarations
- */
-
-struct net_local
-{
- unsigned long last_tx; /* jiffies when last transmit started */
- unsigned long init_time; /* jiffies when eexp_hw_init586 called */
- unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
- unsigned short rx_last; /* last rx buf */
- unsigned short rx_ptr; /* first rx buf to look at */
- unsigned short tx_head; /* next free tx buf */
- unsigned short tx_reap; /* first in-use tx buf */
- unsigned short tx_tail; /* previous tx buf to tx_head */
- unsigned short tx_link; /* last known-executing tx buf */
- unsigned short last_tx_restart; /* set to tx_link when we
- restart the CU */
- unsigned char started;
- unsigned short rx_buf_start;
- unsigned short rx_buf_end;
- unsigned short num_tx_bufs;
- unsigned short num_rx_bufs;
- unsigned char width; /* 0 for 16bit, 1 for 8bit */
- unsigned char was_promisc;
- unsigned char old_mc_count;
- spinlock_t lock;
-};
-
-/* This is the code and data that is downloaded to the EtherExpress card's
- * memory at boot time.
- */
-
-static unsigned short start_code[] = {
-/* 0x0000 */
- 0x0001, /* ISCP: busy - cleared after reset */
- 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
-
- 0x0000,0x0000, /* SCB: status, commands */
- 0x0000,0x0000, /* links to first command block,
- first receive descriptor */
- 0x0000,0x0000, /* CRC error, alignment error counts */
- 0x0000,0x0000, /* out of resources, overrun error counts */
-
- 0x0000,0x0000, /* pad */
- 0x0000,0x0000,
-
-/* 0x20 -- start of 82586 CU program */
-#define CONF_LINK 0x20
- 0x0000,Cmd_Config,
- 0x0032, /* link to next command */
- 0x080c, /* 12 bytes follow : fifo threshold=8 */
- 0x2e40, /* don't rx bad frames
- * SRDY/ARDY => ext. sync. : preamble len=8
- * take addresses from data buffers
- * 6 bytes/address
- */
- 0x6000, /* default backoff method & priority
- * interframe spacing = 0x60 */
- 0xf200, /* slot time=0x200
- * max collision retry = 0xf */
-#define CONF_PROMISC 0x2e
- 0x0000, /* no HDLC : normal CRC : enable broadcast
- * disable promiscuous/multicast modes */
- 0x003c, /* minimum frame length = 60 octets) */
-
- 0x0000,Cmd_SetAddr,
- 0x003e, /* link to next command */
-#define CONF_HWADDR 0x38
- 0x0000,0x0000,0x0000, /* hardware address placed here */
-
- 0x0000,Cmd_MCast,
- 0x0076, /* link to next command */
-#define CONF_NR_MULTICAST 0x44
- 0x0000, /* number of bytes in multicast address(es) */
-#define CONF_MULTICAST 0x46
- 0x0000, 0x0000, 0x0000, /* some addresses */
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
-
-#define CONF_DIAG_RESULT 0x76
- 0x0000, Cmd_Diag,
- 0x007c, /* link to next command */
-
- 0x0000,Cmd_TDR|Cmd_INT,
- 0x0084,
-#define CONF_TDR_RESULT 0x82
- 0x0000,
-
- 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
- 0x0084 /* dummy link */
-};
-
-/* maps irq number to EtherExpress magic value */
-static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
-
-/*
- * Prototypes for Linux interface
- */
-
-static int eexp_open(struct net_device *dev);
-static int eexp_close(struct net_device *dev);
-static void eexp_timeout(struct net_device *dev);
-static netdev_tx_t eexp_xmit(struct sk_buff *buf,
- struct net_device *dev);
-
-static irqreturn_t eexp_irq(int irq, void *dev_addr);
-static void eexp_set_multicast(struct net_device *dev);
-
-/*
- * Prototypes for hardware access functions
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev);
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len);
-static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr);
-static unsigned short eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location);
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev);
-static void eexp_hw_txrestart(struct net_device *dev);
-
-static void eexp_hw_txinit (struct net_device *dev);
-static void eexp_hw_rxinit (struct net_device *dev);
-
-static void eexp_hw_init586 (struct net_device *dev);
-static void eexp_setup_filter (struct net_device *dev);
-
-static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"};
-enum eexp_iftype {AUI=0, BNC=1, TPE=2};
-
-#define STARTED_RU 2
-#define STARTED_CU 1
-
-/*
- * Primitive hardware access functions.
- */
-
-static inline unsigned short scb_status(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc008);
-}
-
-static inline unsigned short scb_rdcmd(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc00a);
-}
-
-static inline void scb_command(struct net_device *dev, unsigned short cmd)
-{
- outw(cmd, dev->base_addr + 0xc00a);
-}
-
-static inline void scb_wrcbl(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00c);
-}
-
-static inline void scb_wrrfa(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00e);
-}
-
-static inline void set_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config);
-}
-
-static inline void clear_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config);
-}
-
-static inline unsigned short int SHADOW(short int addr)
-{
- addr &= 0x1f;
- if (addr > 0xf) addr += 0x3ff0;
- return addr + 0x4000;
-}
-
-/*
- * Linux interface
- */
-
-/*
- * checks for presence of EtherExpress card
- */
-
-static int __init do_express_probe(struct net_device *dev)
-{
- unsigned short *port;
- static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 };
- unsigned short ioaddr = dev->base_addr;
- int dev_irq = dev->irq;
- int err;
-
- dev->if_port = 0xff; /* not set */
-
- if (ioaddr&0xfe00) {
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
- return -EBUSY;
- err = eexp_hw_probe(dev,ioaddr);
- release_region(ioaddr, EEXP_IO_EXTENT);
- return err;
- } else if (ioaddr)
- return -ENXIO;
-
- for (port=&ports[0] ; *port ; port++ )
- {
- unsigned short sum = 0;
- int i;
- if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress"))
- continue;
- for ( i=0 ; i<4 ; i++ )
- {
- unsigned short t;
- t = inb(*port + ID_PORT);
- sum |= (t>>4) << ((t & 0x03)<<2);
- }
- if (sum==0xbaba && !eexp_hw_probe(dev,*port)) {
- release_region(*port, EEXP_IO_EXTENT);
- return 0;
- }
- release_region(*port, EEXP_IO_EXTENT);
- dev->irq = dev_irq;
- }
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init express_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_express_probe(dev);
- if (!err)
- return dev;
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-/*
- * open and initialize the adapter, ready for use
- */
-
-static int eexp_open(struct net_device *dev)
-{
- int ret;
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
-#endif
-
- if (!dev->irq || !irqrmap[dev->irq])
- return -ENXIO;
-
- ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
- if (ret)
- return ret;
-
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr);
- goto err_out1;
- }
- if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x4000);
- goto err_out2;
- }
- if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x8000);
- goto err_out3;
- }
- if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0xc000);
- goto err_out4;
- }
-
- if (lp->width) {
- printk("%s: forcing ASIC to 8-bit mode\n", dev->name);
- outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config);
- }
-
- eexp_hw_init586(dev);
- netif_start_queue(dev);
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
-#endif
- return 0;
-
- err_out4:
- release_region(ioaddr+0x8000, EEXP_IO_EXTENT);
- err_out3:
- release_region(ioaddr+0x4000, EEXP_IO_EXTENT);
- err_out2:
- release_region(ioaddr, EEXP_IO_EXTENT);
- err_out1:
- free_irq(dev->irq, dev);
- return -EBUSY;
-}
-
-/*
- * close and disable the interface, leaving the 586 in reset.
- */
-
-static int eexp_close(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
- int irq = dev->irq;
-
- netif_stop_queue(dev);
-
- outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
- lp->started = 0;
- scb_command(dev, SCB_CUsuspend|SCB_RUsuspend);
- outb(0,ioaddr+SIGNAL_CA);
- free_irq(irq,dev);
- outb(i586_RST,ioaddr+EEPROM_Ctrl);
- release_region(ioaddr, EEXP_IO_EXTENT);
- release_region(ioaddr+0x4000, 16);
- release_region(ioaddr+0x8000, 16);
- release_region(ioaddr+0xc000, 16);
-
- return 0;
-}
-
-/*
- * This gets called when a higher level thinks we are broken. Check that
- * nothing has become jammed in the CU.
- */
-
-static void unstick_cu(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (lp->started)
- {
- if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
- {
- if (lp->tx_link==lp->last_tx_restart)
- {
- unsigned short boguscount=200,rsst;
- printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
- dev->name, scb_status(dev));
- eexp_hw_txinit(dev);
- lp->last_tx_restart = 0;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- while (!SCB_complete(rsst=scb_status(dev)))
- {
- if (!--boguscount)
- {
- boguscount=200;
- printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
- dev->name,rsst);
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- }
- netif_wake_queue(dev);
- }
- else
- {
- unsigned short status = scb_status(dev);
- if (SCB_CUdead(status))
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
- dev->name, status, txstatus);
- eexp_hw_txrestart(dev);
- }
- else
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- if (netif_queue_stopped(dev) && !txstatus)
- {
- printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
- dev->name,status,txstatus);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- else
- {
- printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
- }
- }
- }
- }
- }
- else
- {
- if (time_after(jiffies, lp->init_time + 10))
- {
- unsigned short status = scb_status(dev);
- printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
- dev->name, status);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- }
-}
-
-static void eexp_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-#ifdef CONFIG_SMP
- unsigned long flags;
-#endif
- int status;
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- status = scb_status(dev);
- unstick_cu(dev);
- printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name,
- (SCB_complete(status)?"lost interrupt":
- "board on fire"));
- dev->stats.tx_errors++;
- lp->last_tx = jiffies;
- if (!SCB_complete(status)) {
- scb_command(dev, SCB_CUabort);
- outb(0,dev->base_addr+SIGNAL_CA);
- }
- netif_wake_queue(dev);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
-}
-
-/*
- * Called to transmit a packet, or to allow us to right ourselves
- * if the kernel thinks we've died.
- */
-static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev)
-{
- short length = buf->len;
-#ifdef CONFIG_SMP
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-#endif
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
-#endif
-
- if (buf->len < ETH_ZLEN) {
- if (skb_padto(buf, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- {
- unsigned short *data = (unsigned short *)buf->data;
-
- dev->stats.tx_bytes += length;
-
- eexp_hw_tx_pio(dev,data,length);
- }
- dev_kfree_skb(buf);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
- enable_irq(dev->irq);
- return NETDEV_TX_OK;
-}
-
-/*
- * Handle an EtherExpress interrupt
- * If we've finished initializing, start the RU and CU up.
- * If we've already started, reap tx buffers, handle any received packets,
- * check to make sure we've not become wedged.
- */
-
-static unsigned short eexp_start_irq(struct net_device *dev,
- unsigned short status)
-{
- unsigned short ack_cmd = SCB_ack(status);
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) {
- short diag_status, tdr_status;
- while (SCB_CUstat(status)==2)
- status = scb_status(dev);
-#if NET_DEBUG > 4
- printk("%s: CU went non-active (status %04x)\n",
- dev->name, status);
-#endif
-
- outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR);
- diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT));
- if (diag_status & 1<<11) {
- printk(KERN_WARNING "%s: 82586 failed self-test\n",
- dev->name);
- } else if (!(diag_status & 1<<13)) {
- printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name);
- }
-
- outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR);
- tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT));
- if (tdr_status & (TDR_SHORT|TDR_OPEN)) {
- printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : "");
- }
- else if (tdr_status & TDR_XCVRPROBLEM) {
- printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name);
- }
- else if (tdr_status & TDR_LINKOK) {
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name);
-#endif
- } else {
- printk("%s: TDR is ga-ga (status %04x)\n", dev->name,
- tdr_status);
- }
-
- lp->started |= STARTED_CU;
- scb_wrcbl(dev, lp->tx_link);
- /* if the RU isn't running, start it now */
- if (!(lp->started & STARTED_RU)) {
- ack_cmd |= SCB_RUstart;
- scb_wrrfa(dev, lp->rx_buf_start);
- lp->rx_ptr = lp->rx_buf_start;
- lp->started |= STARTED_RU;
- }
- ack_cmd |= SCB_CUstart | 0x2000;
- }
-
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4)
- lp->started|=STARTED_RU;
-
- return ack_cmd;
-}
-
-static void eexp_cmd_clear(struct net_device *dev)
-{
- unsigned long int oldtime = jiffies;
- while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10)));
- if (scb_rdcmd(dev)) {
- printk("%s: command didn't clear\n", dev->name);
- }
-}
-
-static irqreturn_t eexp_irq(int dummy, void *dev_info)
-{
- struct net_device *dev = dev_info;
- struct net_local *lp;
- unsigned short ioaddr,status,ack_cmd;
- unsigned short old_read_ptr, old_write_ptr;
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- old_read_ptr = inw(ioaddr+READ_PTR);
- old_write_ptr = inw(ioaddr+WRITE_PTR);
-
- outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
- status = scb_status(dev);
-
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status);
-#endif
-
- if (lp->started == (STARTED_CU | STARTED_RU)) {
-
- do {
- eexp_cmd_clear(dev);
-
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
-
- eexp_cmd_clear(dev);
-
- if (SCB_complete(status)) {
- if (!eexp_hw_lasttxstat(dev)) {
- printk("%s: tx interrupt but no status\n", dev->name);
- }
- }
-
- if (SCB_rxdframe(status))
- eexp_hw_rx_pio(dev);
-
- status = scb_status(dev);
- } while (status & 0xc000);
-
- if (SCB_RUdead(status))
- {
- printk(KERN_WARNING "%s: RU stopped: status %04x\n",
- dev->name,status);
-#if 0
- printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd);
- outw(lp->cur_rfd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT));
- outw(lp->cur_rfd+6, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT));
- outw(rbd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT));
- outw(rbd+8, ioaddr+READ_PTR);
- printk("[%04x]\n", inw(ioaddr+DATAPORT));
-#endif
- dev->stats.rx_errors++;
-#if 1
- eexp_hw_rxinit(dev);
-#else
- lp->cur_rfd = lp->first_rfd;
-#endif
- scb_wrrfa(dev, lp->rx_buf_start);
- scb_command(dev, SCB_RUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- } else {
- if (status & 0x8000)
- ack_cmd = eexp_start_irq(dev, status);
- else
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
- }
-
- eexp_cmd_clear(dev);
-
- outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_irq()\n", dev->name);
-#endif
- outw(old_read_ptr, ioaddr+READ_PTR);
- outw(old_write_ptr, ioaddr+WRITE_PTR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * Hardware access functions
- */
-
-/*
- * Set the cable type to use.
- */
-
-static void eexp_hw_set_interface(struct net_device *dev)
-{
- unsigned char oldval = inb(dev->base_addr + 0x300e);
- oldval &= ~0x82;
- switch (dev->if_port) {
- case TPE:
- oldval |= 0x2;
- case BNC:
- oldval |= 0x80;
- break;
- }
- outb(oldval, dev->base_addr+0x300e);
- mdelay(20);
-}
-
-/*
- * Check all the receive buffers, and hand any received packets
- * to the upper levels. Basic sanity check on each frame
- * descriptor, though we don't bother trying to fix broken ones.
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_ptr;
- unsigned short boguscount = lp->num_rx_bufs;
- unsigned short ioaddr = dev->base_addr;
- unsigned short status;
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
-#endif
-
- do {
- unsigned short rfd_cmd, rx_next, pbuf, pkt_len;
-
- outw(rx_block, ioaddr + READ_PTR);
- status = inw(ioaddr + DATAPORT);
-
- if (FD_Done(status))
- {
- rfd_cmd = inw(ioaddr + DATAPORT);
- rx_next = inw(ioaddr + DATAPORT);
- pbuf = inw(ioaddr + DATAPORT);
-
- outw(pbuf, ioaddr + READ_PTR);
- pkt_len = inw(ioaddr + DATAPORT);
-
- if (rfd_cmd!=0x0000)
- {
- printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n",
- dev->name, rfd_cmd);
- continue;
- }
- else if (pbuf!=rx_block+0x16)
- {
- printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n",
- dev->name, rx_block+0x16, pbuf);
- continue;
- }
- else if ((pkt_len & 0xc000)!=0xc000)
- {
- printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n",
- dev->name, pkt_len & 0xc000);
- continue;
- }
- else if (!FD_OK(status))
- {
- dev->stats.rx_errors++;
- if (FD_CRC(status))
- dev->stats.rx_crc_errors++;
- if (FD_Align(status))
- dev->stats.rx_frame_errors++;
- if (FD_Resrc(status))
- dev->stats.rx_fifo_errors++;
- if (FD_DMA(status))
- dev->stats.rx_over_errors++;
- if (FD_Short(status))
- dev->stats.rx_length_errors++;
- }
- else
- {
- struct sk_buff *skb;
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 16);
- if (skb == NULL)
- {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2);
- outw(pbuf+10, ioaddr+READ_PTR);
- insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- outw(rx_block, ioaddr+WRITE_PTR);
- outw(0, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- rx_block = rx_next;
- }
- } while (FD_Done(status) && boguscount--);
- lp->rx_ptr = rx_block;
-}
-
-/*
- * Hand a packet to the card for transmission
- * If we get here, we MUST have already checked
- * to make sure there is room in the transmit
- * buffer region.
- */
-
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (LOCKUP16 || lp->width) {
- /* Stop the CU so that there is no chance that it
- jumps off to a bogus address while we are writing the
- pointer to the next transmit packet in 8-bit mode --
- this eliminates the "CU wedged" errors in 8-bit mode.
- (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUsuspend);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- outw(lp->tx_head, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
- outw(lp->tx_head+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
-
- outw(0x8000|len, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(lp->tx_head+0x16, ioaddr + DATAPORT);
- outw(0, ioaddr + DATAPORT);
-
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
-
- outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
- outw(lp->tx_head, ioaddr + DATAPORT);
-
- dev->trans_start = jiffies;
- lp->tx_tail = lp->tx_head;
- if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_head = TX_BUF_START;
- else
- lp->tx_head += TX_BUF_SIZE;
- if (lp->tx_head != lp->tx_reap)
- netif_wake_queue(dev);
-
- if (LOCKUP16 || lp->width) {
- /* Restart the CU so that the packet can actually
- be transmitted. (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUresume);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- dev->stats.tx_packets++;
- lp->last_tx = jiffies;
-}
-
-static const struct net_device_ops eexp_netdev_ops = {
- .ndo_open = eexp_open,
- .ndo_stop = eexp_close,
- .ndo_start_xmit = eexp_xmit,
- .ndo_set_rx_mode = eexp_set_multicast,
- .ndo_tx_timeout = eexp_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * Sanity check the suspected EtherExpress card
- * Read hardware address, reset card, size memory and initialize buffer
- * memory pointers. These are held in netdev_priv(), in case someone has more
- * than one card in a machine.
- */
-
-static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
-{
- unsigned short hw_addr[3];
- unsigned char buswidth;
- unsigned int memory_size;
- int i;
- unsigned short xsum = 0;
- struct net_local *lp = netdev_priv(dev);
-
- printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr);
-
- outb(ASIC_RST, ioaddr+EEPROM_Ctrl);
- outb(0, ioaddr+EEPROM_Ctrl);
- udelay(500);
- outb(i586_RST, ioaddr+EEPROM_Ctrl);
-
- hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
- hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
- hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
-
- /* Standard Address or Compaq LTE Address */
- if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
- (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
- {
- printk(" rejected: invalid address %04x%04x%04x\n",
- hw_addr[2],hw_addr[1],hw_addr[0]);
- return -ENODEV;
- }
-
- /* Calculate the EEPROM checksum. Carry on anyway if it's bad,
- * though.
- */
- for (i = 0; i < 64; i++)
- xsum += eexp_hw_readeeprom(ioaddr, i);
- if (xsum != 0xbaba)
- printk(" (bad EEPROM xsum 0x%02x)", xsum);
-
- dev->base_addr = ioaddr;
- for ( i=0 ; i<6 ; i++ )
- dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
-
- {
- static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
- unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
-
- /* Use the IRQ from EEPROM if none was given */
- if (!dev->irq)
- dev->irq = irqmap[setupval>>13];
-
- if (dev->if_port == 0xff) {
- dev->if_port = !(setupval & 0x1000) ? AUI :
- eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC;
- }
-
- buswidth = !((setupval & 0x400) >> 10);
- }
-
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- printk("(IRQ %d, %s connector, %d-bit bus", dev->irq,
- eexp_ifmap[dev->if_port], buswidth?8:16);
-
- if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress"))
- return -EBUSY;
-
- eexp_hw_set_interface(dev);
-
- release_region(dev->base_addr + 0x300e, 1);
-
- /* Find out how much RAM we have on the card */
- outw(0, dev->base_addr + WRITE_PTR);
- for (i = 0; i < 32768; i++)
- outw(0, dev->base_addr + DATAPORT);
-
- for (memory_size = 0; memory_size < 64; memory_size++)
- {
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT))
- break;
- outw(memory_size<<10, dev->base_addr + WRITE_PTR);
- outw(memory_size | 0x5000, dev->base_addr+DATAPORT);
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000))
- break;
- }
-
- /* Sort out the number of buffers. We may have 16, 32, 48 or 64k
- * of RAM to play with.
- */
- lp->num_tx_bufs = 4;
- lp->rx_buf_end = 0x3ff6;
- switch (memory_size)
- {
- case 64:
- lp->rx_buf_end += 0x4000;
- case 48:
- lp->num_tx_bufs += 4;
- lp->rx_buf_end += 0x4000;
- case 32:
- lp->rx_buf_end += 0x4000;
- case 16:
- printk(", %dk RAM)\n", memory_size);
- break;
- default:
- printk(") bad memory size (%dk).\n", memory_size);
- return -ENODEV;
- break;
- }
-
- lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
- lp->width = buswidth;
-
- dev->netdev_ops = &eexp_netdev_ops;
- dev->watchdog_timeo = 2*HZ;
-
- return register_netdev(dev);
-}
-
-/*
- * Read a word from the EtherExpress on-board serial EEPROM.
- * The EEPROM contains 64 words of 16 bits.
- */
-static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location)
-{
- unsigned short cmd = 0x180|(location&0x7f);
- unsigned short rval = 0,wval = EC_CS|i586_RST;
- int i;
-
- outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
- for (i=0x100 ; i ; i>>=1 )
- {
- if (cmd&i)
- wval |= EC_Wr;
- else
- wval &= ~EC_Wr;
-
- outb(wval,ioaddr+EEPROM_Ctrl);
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_Wr;
- outb(wval,ioaddr+EEPROM_Ctrl);
- for (i=0x8000 ; i ; i>>=1 )
- {
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
- rval |= i;
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_CS;
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- return rval;
-}
-
-/*
- * Reap tx buffers and return last transmit status.
- * if ==0 then either:
- * a) we're not transmitting anything, so why are we here?
- * b) we've died.
- * otherwise, Stat_Busy(return) means we've still got some packets
- * to transmit, Stat_Done(return) means our buffers should be empty
- * again
- */
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = lp->tx_reap;
- unsigned short status;
-
- if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
- return 0x0000;
-
- do
- {
- outw(tx_block & ~31, dev->base_addr + SM_PTR);
- status = inw(dev->base_addr + SHADOW(tx_block));
- if (!Stat_Done(status))
- {
- lp->tx_link = tx_block;
- return status;
- }
- else
- {
- lp->last_tx_restart = 0;
- dev->stats.collisions += Stat_NoColl(status);
- if (!Stat_OK(status))
- {
- char *whatsup = NULL;
- dev->stats.tx_errors++;
- if (Stat_Abort(status))
- dev->stats.tx_aborted_errors++;
- if (Stat_TNoCar(status)) {
- whatsup = "aborted, no carrier";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoCTS(status)) {
- whatsup = "aborted, lost CTS";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoDMA(status)) {
- whatsup = "FIFO underran";
- dev->stats.tx_fifo_errors++;
- }
- if (Stat_TXColl(status)) {
- whatsup = "aborted, too many collisions";
- dev->stats.tx_aborted_errors++;
- }
- if (whatsup)
- printk(KERN_INFO "%s: transmit %s\n",
- dev->name, whatsup);
- }
- else
- dev->stats.tx_packets++;
- }
- if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_reap = tx_block = TX_BUF_START;
- else
- lp->tx_reap = tx_block += TX_BUF_SIZE;
- netif_wake_queue(dev);
- }
- while (lp->tx_reap != lp->tx_head);
-
- lp->tx_link = lp->tx_tail + 0x08;
-
- return status;
-}
-
-/*
- * This should never happen. It is called when some higher routine detects
- * that the CU has stopped, to try to restart it from the last packet we knew
- * we were working on, or the idle loop if we had finished for the time.
- */
-
-static void eexp_hw_txrestart(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- lp->last_tx_restart = lp->tx_link;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short boguscount=50,failcount=5;
- while (!scb_status(dev))
- {
- if (!--boguscount)
- {
- if (--failcount)
- {
- printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- boguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- return;
- }
- }
- }
- }
-}
-
-/*
- * Writes down the list of transmit buffers into card memory. Each
- * entry consists of an 82586 transmit command, followed by a jump
- * pointing to itself. When we want to transmit a packet, we write
- * the data into the appropriate transmit buffer and then modify the
- * preceding jump to point at the new transmit command. This means that
- * the 586 command unit is continuously active.
- */
-
-static void eexp_hw_txinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = TX_BUF_START;
- unsigned short curtbuf;
- unsigned short ioaddr = dev->base_addr;
-
- for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
- {
- outw(tx_block, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
- outw(tx_block+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
-
- outw(0x8000, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(tx_block+0x16, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
-
- tx_block += TX_BUF_SIZE;
- }
- lp->tx_head = TX_BUF_START;
- lp->tx_reap = TX_BUF_START;
- lp->tx_tail = tx_block - TX_BUF_SIZE;
- lp->tx_link = lp->tx_tail + 0x08;
- lp->rx_buf_start = tx_block;
-
-}
-
-/*
- * Write the circular list of receive buffer descriptors to card memory.
- * The end of the list isn't marked, which means that the 82586 receive
- * unit will loop until buffers become available (this avoids it giving us
- * "out of resources" messages).
- */
-
-static void eexp_hw_rxinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_buf_start;
- unsigned short ioaddr = dev->base_addr;
-
- lp->num_rx_bufs = 0;
- lp->rx_first = lp->rx_ptr = rx_block;
- do
- {
- lp->num_rx_bufs++;
-
- outw(rx_block, ioaddr + WRITE_PTR);
-
- outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT);
- outw(0xffff, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT);
- outw(rx_block + 0x20, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT);
-
- lp->rx_last = rx_block;
- rx_block += RX_BUF_SIZE;
- } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
-
-
- /* Make first Rx frame descriptor point to first Rx buffer
- descriptor */
- outw(lp->rx_first + 6, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
- /* Close Rx frame descriptor ring */
- outw(lp->rx_last + 4, ioaddr+WRITE_PTR);
- outw(lp->rx_first, ioaddr+DATAPORT);
-
- /* Close Rx buffer descriptor ring */
- outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
-}
-
-/*
- * Un-reset the 586, and start the configuration sequence. We don't wait for
- * this to finish, but allow the interrupt handler to start the CU and RU for
- * us. We can't start the receive/transmission system up before we know that
- * the hardware is configured correctly.
- */
-
-static void eexp_hw_init586(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- int i;
-
-#if NET_DEBUG > 6
- printk("%s: eexp_hw_init586()\n", dev->name);
-#endif
-
- lp->started = 0;
-
- set_loopback(dev);
-
- outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- /* Download the startup code */
- outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR);
- outw(lp->width?0x0001:0x0000, ioaddr + 0x8006);
- outw(0x0000, ioaddr + 0x8008);
- outw(0x0000, ioaddr + 0x800a);
- outw(0x0000, ioaddr + 0x800c);
- outw(0x0000, ioaddr + 0x800e);
-
- for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
- int j;
- outw(i, ioaddr + SM_PTR);
- for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j)/2],
- ioaddr+0x4000+j);
- for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j+16)/2],
- ioaddr+0x8000+j);
- }
-
- /* Do we want promiscuous mode or multicast? */
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
-#if 0
- eexp_setup_filter(dev);
-#endif
-
- /* Write our hardware address */
- outw(CONF_HWADDR & ~31, ioaddr+SM_PTR);
- outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR));
- outw(((unsigned short *)dev->dev_addr)[1],
- ioaddr+SHADOW(CONF_HWADDR+2));
- outw(((unsigned short *)dev->dev_addr)[2],
- ioaddr+SHADOW(CONF_HWADDR+4));
-
- eexp_hw_txinit(dev);
- eexp_hw_rxinit(dev);
-
- outb(0,ioaddr+EEPROM_Ctrl);
- mdelay(5);
-
- scb_command(dev, 0xf000);
- outb(0,ioaddr+SIGNAL_CA);
-
- outw(0, ioaddr+SM_PTR);
-
- {
- unsigned short rboguscount=50,rfailcount=5;
- while (inw(ioaddr+0x4000))
- {
- if (!--rboguscount)
- {
- printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
- dev->name);
- scb_command(dev, 0);
- outb(0,ioaddr+SIGNAL_CA);
- rboguscount = 100;
- if (!--rfailcount)
- {
- printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
- dev->name);
- return;
- }
- }
- }
- }
-
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short iboguscount=50,ifailcount=5;
- while (!scb_status(dev))
- {
- if (!--iboguscount)
- {
- if (--ifailcount)
- {
- printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
- dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- iboguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
- return;
- }
- }
- }
- }
-
- clear_loopback(dev);
- outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- lp->init_time = jiffies;
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_hw_init586()\n", dev->name);
-#endif
-}
-
-static void eexp_setup_filter(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- unsigned short ioaddr = dev->base_addr;
- int count = netdev_mc_count(dev);
- int i;
- if (count > 8) {
- printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
- dev->name, count);
- count = 8;
- }
-
- outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
- outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- unsigned short *data = (unsigned short *) ha->addr;
-
- if (i == count)
- break;
- outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
- outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
- outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
- outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
- outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
- outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
- i++;
- }
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- */
-static void
-eexp_set_multicast(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- int kick = 0, i;
- if ((dev->flags & IFF_PROMISC) != lp->was_promisc) {
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
- kick = 1;
- }
- if (!(dev->flags & IFF_PROMISC)) {
- eexp_setup_filter(dev);
- if (lp->old_mc_count != netdev_mc_count(dev)) {
- kick = 1;
- lp->old_mc_count = netdev_mc_count(dev);
- }
- }
- if (kick) {
- unsigned long oj;
- scb_command(dev, SCB_CUsuspend);
- outb(0, ioaddr+SIGNAL_CA);
- outb(0, ioaddr+SIGNAL_CA);
-#if 0
- printk("%s: waiting for CU to go suspended\n", dev->name);
-#endif
- oj = jiffies;
- while ((SCB_CUstat(scb_status(dev)) == 2) &&
- (time_before(jiffies, oj + 2000)));
- if (SCB_CUstat(scb_status(dev)) == 2)
- printk("%s: warning, CU didn't stop\n", dev->name);
- lp->started &= ~(STARTED_CU);
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, SCB_CUstart);
- outb(0, ioaddr+SIGNAL_CA);
- }
-}
-
-
-/*
- * MODULE stuff
- */
-
-#ifdef MODULE
-
-#define EEXP_MAX_CARDS 4 /* max number of cards to support */
-
-static struct net_device *dev_eexp[EEXP_MAX_CARDS];
-static int irq[EEXP_MAX_CARDS];
-static int io[EEXP_MAX_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
-MODULE_LICENSE("GPL");
-
-
-/* Ideally the user would give us io=, irq= for every card. If any parameters
- * are specified, we verify and then use them. If no parameters are given, we
- * autoprobe for one card only.
- */
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct net_local));
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (io[this_dev] == 0) {
- if (this_dev)
- break;
- printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
- }
- if (do_express_probe(dev) == 0) {
- dev_eexp[this_dev] = dev;
- found++;
- continue;
- }
- printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_eexp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
-}
-#endif
-
-/*
- * Local Variables:
- * c-file-style: "linux"
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h
deleted file mode 100644
index dc9c6ea289e9..000000000000
--- a/drivers/net/ethernet/i825xx/eexpress.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * eexpress.h: Intel EtherExpress16 defines
- */
-
-/*
- * EtherExpress card register addresses
- * as offsets from the base IO region (dev->base_addr)
- */
-
-#define DATAPORT 0x0000
-#define WRITE_PTR 0x0002
-#define READ_PTR 0x0004
-#define SIGNAL_CA 0x0006
-#define SET_IRQ 0x0007
-#define SM_PTR 0x0008
-#define MEM_Dec 0x000a
-#define MEM_Ctrl 0x000b
-#define MEM_Page_Ctrl 0x000c
-#define Config 0x000d
-#define EEPROM_Ctrl 0x000e
-#define ID_PORT 0x000f
-#define MEM_ECtrl 0x000f
-
-/*
- * card register defines
- */
-
-/* SET_IRQ */
-#define SIRQ_en 0x08
-#define SIRQ_dis 0x00
-
-/* EEPROM_Ctrl */
-#define EC_Clk 0x01
-#define EC_CS 0x02
-#define EC_Wr 0x04
-#define EC_Rd 0x08
-#define ASIC_RST 0x40
-#define i586_RST 0x80
-
-#define eeprom_delay() { udelay(40); }
-
-/*
- * i82586 Memory Configuration
- */
-
-/* (System Configuration Pointer) System start up block, read after 586_RST */
-#define SCP_START 0xfff6
-
-/* Intermediate System Configuration Pointer */
-#define ISCP_START 0x0000
-
-/* System Command Block */
-#define SCB_START 0x0008
-
-/* Start of buffer region. Everything before this is used for control
- * structures and the CU configuration program. The memory layout is
- * determined in eexp_hw_probe(), once we know how much memory is
- * available on the card.
- */
-
-#define TX_BUF_START 0x0100
-
-#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
-#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
-
-/*
- * SCB defines
- */
-
-/* these functions take the SCB status word and test the relevant status bit */
-#define SCB_complete(s) (((s) & 0x8000) != 0)
-#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
-#define SCB_CUdead(s) (((s) & 0x2000) != 0)
-#define SCB_RUdead(s) (((s) & 0x1000) != 0)
-#define SCB_ack(s) ((s) & 0xf000)
-
-/* Command unit status: 0=idle, 1=suspended, 2=active */
-#define SCB_CUstat(s) (((s)&0x0300)>>8)
-
-/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
-#define SCB_RUstat(s) (((s)&0x0070)>>4)
-
-/* SCB commands */
-#define SCB_CUnop 0x0000
-#define SCB_CUstart 0x0100
-#define SCB_CUresume 0x0200
-#define SCB_CUsuspend 0x0300
-#define SCB_CUabort 0x0400
-#define SCB_resetchip 0x0080
-
-#define SCB_RUnop 0x0000
-#define SCB_RUstart 0x0010
-#define SCB_RUresume 0x0020
-#define SCB_RUsuspend 0x0030
-#define SCB_RUabort 0x0040
-
-/*
- * Command block defines
- */
-
-#define Stat_Done(s) (((s) & 0x8000) != 0)
-#define Stat_Busy(s) (((s) & 0x4000) != 0)
-#define Stat_OK(s) (((s) & 0x2000) != 0)
-#define Stat_Abort(s) (((s) & 0x1000) != 0)
-#define Stat_STFail (((s) & 0x0800) != 0)
-#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
-#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
-#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
-#define Stat_TDefer(s) (((s) & 0x0080) != 0)
-#define Stat_TColl(s) (((s) & 0x0040) != 0)
-#define Stat_TXColl(s) (((s) & 0x0020) != 0)
-#define Stat_NoColl(s) ((s) & 0x000f)
-
-/* Cmd_END will end AFTER the command if this is the first
- * command block after an SCB_CUstart, but BEFORE the command
- * for all subsequent commands. Best strategy is to place
- * Cmd_INT on the last command in the sequence, followed by a
- * dummy Cmd_Nop with Cmd_END after this.
- */
-
-#define Cmd_END 0x8000
-#define Cmd_SUS 0x4000
-#define Cmd_INT 0x2000
-
-#define Cmd_Nop 0x0000
-#define Cmd_SetAddr 0x0001
-#define Cmd_Config 0x0002
-#define Cmd_MCast 0x0003
-#define Cmd_Xmit 0x0004
-#define Cmd_TDR 0x0005
-#define Cmd_Dump 0x0006
-#define Cmd_Diag 0x0007
-
-
-/*
- * Frame Descriptor (Receive block) defines
- */
-
-#define FD_Done(s) (((s) & 0x8000) != 0)
-#define FD_Busy(s) (((s) & 0x4000) != 0)
-#define FD_OK(s) (((s) & 0x2000) != 0)
-
-#define FD_CRC(s) (((s) & 0x0800) != 0)
-#define FD_Align(s) (((s) & 0x0400) != 0)
-#define FD_Resrc(s) (((s) & 0x0200) != 0)
-#define FD_DMA(s) (((s) & 0x0100) != 0)
-#define FD_Short(s) (((s) & 0x0080) != 0)
-#define FD_NoEOF(s) (((s) & 0x0040) != 0)
-
-struct rfd_header {
- volatile unsigned long flags;
- volatile unsigned short link;
- volatile unsigned short rbd_offset;
- volatile unsigned short dstaddr1;
- volatile unsigned short dstaddr2;
- volatile unsigned short dstaddr3;
- volatile unsigned short srcaddr1;
- volatile unsigned short srcaddr2;
- volatile unsigned short srcaddr3;
- volatile unsigned short length;
-
- /* This is actually a Receive Buffer Descriptor. The way we
- * arrange memory means that an RBD always follows the RFD that
- * points to it, so they might as well be in the same structure.
- */
- volatile unsigned short actual_count;
- volatile unsigned short next_rbd;
- volatile unsigned short buf_addr1;
- volatile unsigned short buf_addr2;
- volatile unsigned short size;
-};
-
-/* Returned data from the Time Domain Reflectometer */
-
-#define TDR_LINKOK (1<<15)
-#define TDR_XCVRPROBLEM (1<<14)
-#define TDR_OPEN (1<<13)
-#define TDR_SHORT (1<<12)
-#define TDR_TIME 0x7ff
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
deleted file mode 100644
index 3735bfa53600..000000000000
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ /dev/null
@@ -1,1337 +0,0 @@
-/* Intel Professional Workstation/panther ethernet driver */
-/* lp486e.c: A panther 82596 ethernet driver for linux. */
-/*
- History and copyrights:
-
- Driver skeleton
- Written 1993 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and
- distributed according to the terms of the GNU General Public License
- as modified by SRC, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Apricot
- Written 1994 by Mark Evans.
- This driver is for the Apricot 82596 bus-master interface
-
- Modularised 12/94 Mark Evans
-
- Professional Workstation
- Derived from apricot.c by Ard van Breemen
- <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
-
- Credits:
- Thanks to Murphy Software BV for letting me write this in their time.
- Well, actually, I get paid doing this...
- (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
- more information on the Professional Workstation)
-
- Present version
- aeb@cwi.nl
-*/
-/*
- There are currently two motherboards that I know of in the
- professional workstation. The only one that I know is the
- intel panther motherboard. -- ard
-*/
-/*
-The pws is equipped with an intel 82596. This is a very intelligent controller
-which runs its own micro-code. Communication with the hostprocessor is done
-through linked lists of commands and buffers in the hostprocessors memory.
-A complete description of the 82596 is available from intel. Search for
-a file called "29021806.pdf". It is a complete description of the chip itself.
-To use it for the pws some additions are needed regarding generation of
-the PORT and CA signal, and the interrupt glue needed for a pc.
-I/O map:
-PORT SIZE ACTION MEANING
-0xCB0 2 WRITE Lower 16 bits for PORT command
-0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
-0xCB4 1 WRITE Generation of CA signal
-0xCB8 1 WRITE Clear interrupt glue
-All other communication is through memory!
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "lp486e"
-
-/* debug print flags */
-#define LOG_SRCDST 0x80000000
-#define LOG_STATINT 0x40000000
-#define LOG_STARTINT 0x20000000
-
-#define i596_debug debug
-
-static int i596_debug = 0;
-
-static const char * const medianame[] = {
- "10baseT", "AUI",
- "10baseT-FD", "AUI-FD",
-};
-
-#define LP486E_TOTAL_SIZE 16
-
-#define I596_NULL (0xffffffff)
-
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-#define CMD_FLEX 0x0008 /* Enable flexible memory model */
-
-enum commands {
- CmdNOP = 0,
- CmdIASetup = 1,
- CmdConfigure = 2,
- CmdMulticastList = 3,
- CmdTx = 4,
- CmdTDR = 5,
- CmdDump = 6,
- CmdDiagnose = 7
-};
-
-#if 0
-static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
- "Tx", "TDR", "Dump", "Diagnose" };
-#endif
-
-/* Status word bits */
-#define STAT_CX 0x8000 /* The CU finished executing a command
- with the Interrupt bit set */
-#define STAT_FR 0x4000 /* The RU finished receiving a frame */
-#define STAT_CNA 0x2000 /* The CU left the active state */
-#define STAT_RNR 0x1000 /* The RU left the active state */
-#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
-#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
- 2: active, 3-7: unused */
-#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
- 2: no resources, 4: ready,
- 10: no resources due to no more RBDs,
- 12: no more RBDs, other: unused */
-#define STAT_T 0x0008 /* Bus throttle timers loaded */
-#define STAT_ZERO 0x0807 /* Always zero */
-
-#if 0
-static char *CUstates[8] = {
- "idle", "suspended", "active", 0, 0, 0, 0, 0
-};
-static char *RUstates[16] = {
- "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
- 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
-};
-
-static void
-i596_out_status(int status) {
- int bad = 0;
- char *s;
-
- printk("status %4.4x:", status);
- if (status == 0xffff)
- printk(" strange..\n");
- else {
- if (status & STAT_CX)
- printk(" CU done");
- if (status & STAT_CNA)
- printk(" CU stopped");
- if (status & STAT_FR)
- printk(" got a frame");
- if (status & STAT_RNR)
- printk(" RU stopped");
- if (status & STAT_T)
- printk(" throttled");
- if (status & STAT_ZERO)
- bad = 1;
- s = CUstates[(status & STAT_CUS) >> 8];
- if (!s)
- bad = 1;
- else
- printk(" CU(%s)", s);
- s = RUstates[(status & STAT_RUS) >> 4];
- if (!s)
- bad = 1;
- else
- printk(" RU(%s)", s);
- if (bad)
- printk(" bad status");
- printk("\n");
- }
-}
-#endif
-
-/* Command word bits */
-#define ACK_CX 0x8000
-#define ACK_FR 0x4000
-#define ACK_CNA 0x2000
-#define ACK_RNR 0x1000
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define CUC_ABORT 0x0400
-
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-#define RX_ABORT 0x0040
-
-typedef u32 phys_addr;
-
-static inline phys_addr
-va_to_pa(void *x) {
- return x ? virt_to_bus(x) : I596_NULL;
-}
-
-static inline void *
-pa_to_va(phys_addr x) {
- return (x == I596_NULL) ? NULL : bus_to_virt(x);
-}
-
-/* status bits for cmd */
-#define CMD_STAT_C 0x8000 /* CU command complete */
-#define CMD_STAT_B 0x4000 /* CU command in progress */
-#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
-#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
-
-struct i596_cmd { /* 8 bytes */
- unsigned short status;
- unsigned short command;
- phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
-};
-
-#define EOF 0x8000
-#define SIZE_MASK 0x3fff
-
-struct i596_tbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- struct sk_buff *skb;
-};
-
-struct tx_cmd {
- struct i596_cmd cmd;
- phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
- unsigned short size;
- unsigned short pad;
-};
-
-/* status bits for rfd */
-#define RFD_STAT_C 0x8000 /* Frame reception complete */
-#define RFD_STAT_B 0x4000 /* Frame reception in progress */
-#define RFD_STAT_OK 0x2000 /* Frame received without errors */
-#define RFD_STATUS 0x1fff
-#define RFD_LENGTH_ERR 0x1000
-#define RFD_CRC_ERR 0x0800
-#define RFD_ALIGN_ERR 0x0400
-#define RFD_NOBUFS_ERR 0x0200
-#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
-#define RFD_SHORT_FRAME_ERR 0x0080
-#define RFD_NOEOP_ERR 0x0040
-#define RFD_TRUNC_ERR 0x0020
-#define RFD_MULTICAST 0x0002 /* 0: destination had our address
- 1: destination was broadcast/multicast */
-#define RFD_COLLISION 0x0001
-
-/* receive frame descriptor */
-struct i596_rfd {
- unsigned short stat;
- unsigned short cmd;
- phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
- phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
- unsigned short count;
- unsigned short size;
- char data[1532];
-};
-
-#define RBD_EL 0x8000
-#define RBD_P 0x4000
-#define RBD_SIZEMASK 0x3fff
-#define RBD_EOF 0x8000
-#define RBD_F 0x4000
-
-/* receive buffer descriptor */
-struct i596_rbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
-
- /* Driver private part */
- struct sk_buff *skb;
-};
-
-#define RX_RING_SIZE 64
-#define RX_SKBSIZE (ETH_FRAME_LEN+10)
-#define RX_RBD_SIZE 32
-
-/* System Control Block - 40 bytes */
-struct i596_scb {
- u16 status; /* 0 */
- u16 command; /* 2 */
- phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
- phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
- u32 crc_err; /* 12 */
- u32 align_err; /* 16 */
- u32 resource_err; /* 20 */
- u32 over_err; /* 24 */
- u32 rcvdt_err; /* 28 */
- u32 short_err; /* 32 */
- u16 t_on; /* 36 */
- u16 t_off; /* 38 */
-};
-
-/* Intermediate System Configuration Pointer - 8 bytes */
-struct i596_iscp {
- u32 busy; /* 0 */
- phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
-};
-
-/* System Configuration Pointer - 12 bytes */
-struct i596_scp {
- u32 sysbus; /* 0 */
- u32 pad; /* 4 */
- phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
-};
-
-/* Selftest and dump results - needs 16-byte alignment */
-/*
- * The size of the dump area is 304 bytes. When the dump is executed
- * by the Port command an extra word will be appended to the dump area.
- * The extra word is a copy of the Dump status word (containing the
- * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
- */
-struct i596_dump {
- u16 dump[153]; /* (304 = 130h) + 2 bytes */
-};
-
-struct i596_private { /* aligned to a 16-byte boundary */
- struct i596_scp scp; /* 0 - needs 16-byte alignment */
- struct i596_iscp iscp; /* 12 */
- struct i596_scb scb; /* 20 */
- u32 dummy; /* 60 */
- struct i596_dump dump; /* 64 - needs 16-byte alignment */
-
- struct i596_cmd set_add;
- char eth_addr[8]; /* directly follows set_add */
-
- struct i596_cmd set_conf;
- char i596_config[16]; /* directly follows set_conf */
-
- struct i596_cmd tdr;
- unsigned long tdr_stat; /* directly follows tdr */
-
- int last_restart;
- struct i596_rbd *rbd_list;
- struct i596_rbd *rbd_tail;
- struct i596_rfd *rx_tail;
- struct i596_cmd *cmd_tail;
- struct i596_cmd *cmd_head;
- int cmd_backlog;
- unsigned long last_cmd;
- spinlock_t cmd_lock;
-};
-
-static char init_setup[14] = {
- 0x8E, /* length 14 bytes, prefetch on */
- 0xC8, /* default: fifo to 8, monitor off */
- 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
- 0x2E, /* (default is 0x26)
- No source address insertion, 8 byte preamble */
- 0x00, /* default priority and backoff */
- 0x60, /* default interframe spacing */
- 0x00, /* default slot time LSB */
- 0xf2, /* default slot time and nr of retries */
- 0x00, /* default various bits
- (0: promiscuous mode, 1: broadcast disable,
- 2: encoding mode, 3: transmit on no CRS,
- 4: no CRC insertion, 5: CRC type,
- 6: bit stuffing, 7: padding) */
- 0x00, /* default carrier sense and collision detect */
- 0x40, /* default minimum frame length */
- 0xff, /* (default is 0xff, and that is what apricot.c has;
- elp486.c has 0xfb: Enable crc append in memory.) */
- 0x00, /* default: not full duplex */
- 0x7f /* (default is 0x3f) multi IA */
-};
-
-static int i596_open(struct net_device *dev);
-static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t i596_interrupt(int irq, void *dev_id);
-static int i596_close(struct net_device *dev);
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void print_eth(char *);
-static void set_multicast_list(struct net_device *dev);
-static void i596_tx_timeout(struct net_device *dev);
-
-static int
-i596_timeout(struct net_device *dev, char *msg, int ct) {
- struct i596_private *lp;
- int boguscnt = ct;
-
- lp = netdev_priv(dev);
- while (lp->scb.command) {
- if (--boguscnt == 0) {
- printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
- dev->name, msg,
- lp->scb.status, lp->scb.command);
- return 1;
- }
- udelay(5);
- barrier();
- }
- return 0;
-}
-
-static inline int
-init_rx_bufs(struct net_device *dev, int num) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
- int i;
- // struct i596_rbd *rbd;
-
- lp = netdev_priv(dev);
- lp->scb.pa_rfd = I596_NULL;
-
- for (i = 0; i < num; i++) {
- rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
- if (rfd == NULL)
- break;
-
- rfd->stat = 0;
- rfd->pa_rbd = I596_NULL;
- rfd->count = 0;
- rfd->size = 1532;
- if (i == 0) {
- rfd->cmd = CMD_EOL;
- lp->rx_tail = rfd;
- } else {
- rfd->cmd = 0;
- }
- rfd->pa_next = lp->scb.pa_rfd;
- lp->scb.pa_rfd = va_to_pa(rfd);
- lp->rx_tail->pa_next = lp->scb.pa_rfd;
- }
-
-#if 0
- for (i = 0; i<RX_RBD_SIZE; i++) {
- rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
- if (rbd) {
- rbd->pad = 0;
- rbd->count = 0;
- rbd->skb = dev_alloc_skb(RX_SKBSIZE);
- if (!rbd->skb) {
- printk("dev_alloc_skb failed");
- }
- rbd->next = rfd->rbd;
- if (i) {
- rfd->rbd->prev = rbd;
- rbd->size = RX_SKBSIZE;
- } else {
- rbd->size = (RX_SKBSIZE | RBD_EL);
- lp->rbd_tail = rbd;
- }
-
- rfd->rbd = rbd;
- }
- }
- lp->rbd_tail->next = rfd->rbd;
-#endif
- return i;
-}
-
-static inline void
-remove_rx_bufs(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
-
- lp = netdev_priv(dev);
- lp->rx_tail->pa_next = I596_NULL;
-
- do {
- rfd = pa_to_va(lp->scb.pa_rfd);
- lp->scb.pa_rfd = rfd->pa_next;
- kfree(rfd);
- } while (rfd != lp->rx_tail);
-
- lp->rx_tail = NULL;
-
-#if 0
- for (lp->rbd_list) {
- }
-#endif
-}
-
-#define PORT_RESET 0x00 /* reset 82596 */
-#define PORT_SELFTEST 0x01 /* selftest */
-#define PORT_ALTSCP 0x02 /* alternate SCB address */
-#define PORT_DUMP 0x03 /* dump */
-
-#define IOADDR 0xcb0 /* real constant */
-#define IRQ 10 /* default IRQ - can be changed by ECU */
-
-/* The 82596 requires two 16-bit write cycles for a port command */
-static inline void
-PORT(phys_addr a, unsigned int cmd) {
- if (a & 0xf)
- printk("lp486e.c: PORT: address not aligned\n");
- outw(((a & 0xffff) | cmd), IOADDR);
- outw(((a>>16) & 0xffff), IOADDR+2);
-}
-
-static inline void
-CA(void) {
- outb(0, IOADDR+4);
- udelay(8);
-}
-
-static inline void
-CLEAR_INT(void) {
- outb(0, IOADDR+8);
-}
-
-#if 0
-/* selftest or dump */
-static void
-i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
- struct i596_private *lp = netdev_priv(dev);
- u16 *outp;
- int i, m;
-
- memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
- outp = &(lp->dump.dump[0]);
-
- PORT(va_to_pa(outp), portcmd);
- mdelay(30); /* random, unmotivated */
-
- printk("lp486e i82596 %s result:\n", cmdname);
- for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
- ;
- for (i = 0; i < m; i++) {
- printk(" %04x", lp->dump.dump[i]);
- if (i%8 == 7)
- printk("\n");
- }
- printk("\n");
-}
-#endif
-
-static int
-i596_scp_setup(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int boguscnt;
-
- /* Setup SCP, ISCP, SCB */
- /*
- * sysbus bits:
- * only a single byte is significant - here 0x44
- * 0x80: big endian mode (details depend on stepping)
- * 0x40: 1
- * 0x20: interrupt pin is active low
- * 0x10: lock function disabled
- * 0x08: external triggering of bus throttle timers
- * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
- * 0x01: unused
- */
- lp->scp.sysbus = 0x00440000; /* linear mode */
- lp->scp.pad = 0; /* must be zero */
- lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
-
- /*
- * The CPU sets the ISCP to 1 before it gives the first CA()
- */
- lp->iscp.busy = 0x0001;
- lp->iscp.pa_scb = va_to_pa(&(lp->scb));
-
- lp->scb.command = 0;
- lp->scb.status = 0;
- lp->scb.pa_cmd = I596_NULL;
- /* lp->scb.pa_rfd has been initialised already */
-
- lp->last_cmd = jiffies;
- lp->cmd_backlog = 0;
- lp->cmd_head = NULL;
-
- /*
- * Reset the 82596.
- * We need to wait 10 systemclock cycles, and
- * 5 serial clock cycles.
- */
- PORT(0, PORT_RESET); /* address part ignored */
- udelay(100);
-
- /*
- * Before the CA signal is asserted, the default SCP address
- * (0x00fffff4) can be changed to a 16-byte aligned value
- */
- PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
-
- /*
- * The initialization procedure begins when a
- * Channel Attention signal is asserted after a reset.
- */
-
- CA();
-
- /*
- * The ISCP busy is cleared by the 82596 after the SCB address is read.
- */
- boguscnt = 100;
- while (lp->iscp.busy) {
- if (--boguscnt == 0) {
- /* No i82596 present? */
- printk("%s: i82596 initialization timed out\n",
- dev->name);
- return 1;
- }
- udelay(5);
- barrier();
- }
- /* I find here boguscnt==100, so no delay was required. */
-
- return 0;
-}
-
-static int
-init_i596(struct net_device *dev) {
- struct i596_private *lp;
-
- if (i596_scp_setup(dev))
- return 1;
-
- lp = netdev_priv(dev);
- lp->scb.command = 0;
-
- memcpy ((void *)lp->i596_config, init_setup, 14);
- lp->set_conf.command = CmdConfigure;
- i596_add_cmd(dev, (void *)&lp->set_conf);
-
- memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
- lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, &lp->set_add);
-
- lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, &lp->tdr);
-
- if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
- return 1;
-
- lp->scb.command = RX_START;
- CA();
-
- barrier();
-
- if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
- return 1;
-
- return 0;
-}
-
-/* Receive a single frame */
-static inline int
-i596_rx_one(struct net_device *dev, struct i596_private *lp,
- struct i596_rfd *rfd, int *frames) {
-
- if (rfd->stat & RFD_STAT_OK) {
- /* a good frame */
- int pkt_len = (rfd->count & 0x3fff);
- struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
-
- (*frames)++;
-
- if (rfd->cmd & CMD_EOL)
- printk("Received on EOL\n");
-
- if (skb == NULL) {
- printk ("%s: i596_rx Memory squeeze, "
- "dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return 1;
- }
-
- memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- } else {
-#if 0
- printk("Frame reception error status %04x\n",
- rfd->stat);
-#endif
- dev->stats.rx_errors++;
- if (rfd->stat & RFD_COLLISION)
- dev->stats.collisions++;
- if (rfd->stat & RFD_SHORT_FRAME_ERR)
- dev->stats.rx_length_errors++;
- if (rfd->stat & RFD_DMA_ERR)
- dev->stats.rx_over_errors++;
- if (rfd->stat & RFD_NOBUFS_ERR)
- dev->stats.rx_fifo_errors++;
- if (rfd->stat & RFD_ALIGN_ERR)
- dev->stats.rx_frame_errors++;
- if (rfd->stat & RFD_CRC_ERR)
- dev->stats.rx_crc_errors++;
- if (rfd->stat & RFD_LENGTH_ERR)
- dev->stats.rx_length_errors++;
- }
- rfd->stat = rfd->count = 0;
- return 0;
-}
-
-static int
-i596_rx(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_rfd *rfd;
- int frames = 0;
-
- while (1) {
- rfd = pa_to_va(lp->scb.pa_rfd);
- if (!rfd) {
- printk(KERN_ERR "i596_rx: NULL rfd?\n");
- return 0;
- }
-#if 1
- if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
- printk("SF:%p-%04x\n", rfd, rfd->stat);
-#endif
- if (!(rfd->stat & RFD_STAT_C))
- break; /* next one not ready */
- if (i596_rx_one(dev, lp, rfd, &frames))
- break; /* out of memory */
- rfd->cmd = CMD_EOL;
- lp->rx_tail->cmd = 0;
- lp->rx_tail = rfd;
- lp->scb.pa_rfd = rfd->pa_next;
- barrier();
- }
-
- return frames;
-}
-
-static void
-i596_cleanup_cmd(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_cmd *cmd;
-
- lp = netdev_priv(dev);
- while (lp->cmd_head) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- switch ((cmd->command) & 0x7) {
- case CmdTx: {
- struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
- struct i596_tbd * tx_cmd_tbd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- dev_kfree_skb_any(tx_cmd_tbd->skb);
-
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
- case CmdMulticastList: {
- // unsigned short count = *((unsigned short *) (ptr + 1));
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
- }
- default: {
- cmd->pa_next = I596_NULL;
- break;
- }
- }
- barrier();
- }
-
- if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
- ;
-
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
-}
-
-static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
-
- if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
- ;
-
- netif_stop_queue(dev);
-
- lp->scb.command = CUC_ABORT | RX_ABORT;
- CA();
- barrier();
-
- /* wait for shutdown */
- if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
- ;
-
- i596_cleanup_cmd(dev);
- i596_rx(dev);
-
- netif_start_queue(dev);
- /*dev_kfree_skb(skb, FREE_WRITE);*/
- init_i596(dev);
-}
-
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- cmd->status = 0;
- cmd->command |= (CMD_EOL | CMD_INTR);
- cmd->pa_next = I596_NULL;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
-
- if (lp->cmd_head) {
- lp->cmd_tail->pa_next = va_to_pa(cmd);
- } else {
- lp->cmd_head = cmd;
- if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
- ;
- lp->scb.pa_cmd = va_to_pa(cmd);
- lp->scb.command = CUC_START;
- CA();
- }
- lp->cmd_tail = cmd;
- lp->cmd_backlog++;
-
- lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-
- if (lp->cmd_backlog > 16) {
- int tickssofar = jiffies - lp->last_cmd;
- if (tickssofar < HZ/4)
- return;
-
- printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
- i596_reset(dev, lp, ioaddr);
- }
-}
-
-static int i596_open(struct net_device *dev)
-{
- int i;
-
- i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
- if (i) {
- printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
- return i;
- }
-
- if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
- printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
-
- if (i < 4) {
- free_irq(dev->irq, dev);
- return -EAGAIN;
- }
- netif_start_queue(dev);
- init_i596(dev);
- return 0; /* Always succeed */
-}
-
-static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
- struct tx_cmd *tx_cmd;
- short length;
-
- length = skb->len;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
- if (tx_cmd == NULL) {
- printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.tx_dropped++;
- dev_kfree_skb (skb);
- } else {
- struct i596_tbd *tx_cmd_tbd;
- tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
- tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
- tx_cmd_tbd->pa_next = I596_NULL;
-
- tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
-
- tx_cmd->pad = 0;
- tx_cmd->size = 0;
- tx_cmd_tbd->pad = 0;
- tx_cmd_tbd->size = (EOF | length);
-
- tx_cmd_tbd->pa_data = va_to_pa (skb->data);
- tx_cmd_tbd->skb = skb;
-
- if (i596_debug & LOG_SRCDST)
- print_eth (skb->data);
-
- i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
-
- dev->stats.tx_packets++;
- }
-
- return NETDEV_TX_OK;
-}
-
-static void
-i596_tx_timeout (struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Transmitter timeout, serious problems. */
- printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
- dev->stats.tx_errors++;
-
- /* Try to restart the adaptor */
- if (lp->last_restart == dev->stats.tx_packets) {
- printk ("Resetting board.\n");
-
- /* Shutdown and restart */
- i596_reset (dev, lp, ioaddr);
- } else {
- /* Issue a channel attention signal */
- printk ("Kicking board.\n");
- lp->scb.command = (CUC_START | RX_START);
- CA();
- lp->last_restart = dev->stats.tx_packets;
- }
- netif_wake_queue(dev);
-}
-
-static void print_eth(char *add)
-{
- int i;
-
- printk ("Dest ");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i]);
- printk ("\n");
-
- printk ("Source");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i+6]);
- printk ("\n");
-
- printk ("type %2.2X%2.2X\n",
- (unsigned char) add[12], (unsigned char) add[13]);
-}
-
-static const struct net_device_ops i596_netdev_ops = {
- .ndo_open = i596_open,
- .ndo_stop = i596_close,
- .ndo_start_xmit = i596_start_xmit,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = i596_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init lp486e_probe(struct net_device *dev) {
- struct i596_private *lp;
- unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
- unsigned char *bios;
- int i, j;
- int ret = -ENOMEM;
- static int probed;
-
- if (probed)
- return -ENODEV;
- probed++;
-
- if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
- return -EBUSY;
- }
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->cmd_lock);
-
- /*
- * Do we really have this thing?
- */
- if (i596_scp_setup(dev)) {
- ret = -ENODEV;
- goto err_out_kfree;
- }
-
- dev->base_addr = IOADDR;
- dev->irq = IRQ;
-
-
- /*
- * How do we find the ethernet address? I don't know.
- * One possibility is to look at the EISA configuration area
- * [0xe8000-0xe9fff]. This contains the ethernet address
- * but not at a fixed address - things depend on setup options.
- *
- * If we find no address, or the wrong address, use
- * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
- * with the value found in the BIOS setup.
- */
- bios = bus_to_virt(0xe8000);
- for (j = 0; j < 0x2000; j++) {
- if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
- printk("%s: maybe address at BIOS 0x%x:",
- dev->name, 0xe8000+j);
- for (i = 0; i < 6; i++) {
- eth_addr[i] = bios[i+j];
- printk(" %2.2X", eth_addr[i]);
- }
- printk("\n");
- }
- }
-
- printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
- dev->name, dev->base_addr, dev->irq);
- for (i = 0; i < 6; i++)
- printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
- printk("\n");
-
- /* The LP486E-specific entries in the device structure. */
- dev->netdev_ops = &i596_netdev_ops;
- dev->watchdog_timeo = 5*HZ;
-
-#if 0
- /* selftest reports 0x320925ae - don't know what that means */
- i596_port_do(dev, PORT_SELFTEST, "selftest");
- i596_port_do(dev, PORT_DUMP, "dump");
-#endif
- return 0;
-
-err_out_kfree:
- release_region(IOADDR, LP486E_TOTAL_SIZE);
- return ret;
-}
-
-static inline void
-i596_handle_CU_completion(struct net_device *dev,
- struct i596_private *lp,
- unsigned short status,
- unsigned short *ack_cmdp) {
- struct i596_cmd *cmd;
- int frames_out = 0;
- int commands_done = 0;
- int cmd_val;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
- cmd = lp->cmd_head;
-
- while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- commands_done++;
- cmd_val = cmd->command & 0x7;
-#if 0
- printk("finished CU %s command (%d)\n",
- CUcmdnames[cmd_val], cmd_val);
-#endif
- switch (cmd_val) {
- case CmdTx:
- {
- struct tx_cmd *tx_cmd;
- struct i596_tbd *tx_cmd_tbd;
-
- tx_cmd = (struct tx_cmd *) cmd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- frames_out++;
- if (cmd->status & CMD_STAT_OK) {
- if (i596_debug)
- print_eth(pa_to_va(tx_cmd_tbd->pa_data));
- } else {
- dev->stats.tx_errors++;
- if (i596_debug)
- printk("transmission failure:%04x\n",
- cmd->status);
- if (cmd->status & 0x0020)
- dev->stats.collisions++;
- if (!(cmd->status & 0x0040))
- dev->stats.tx_heartbeat_errors++;
- if (cmd->status & 0x0400)
- dev->stats.tx_carrier_errors++;
- if (cmd->status & 0x0800)
- dev->stats.collisions++;
- if (cmd->status & 0x1000)
- dev->stats.tx_aborted_errors++;
- }
- dev_kfree_skb_irq(tx_cmd_tbd->skb);
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
-
- case CmdMulticastList:
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
-
- case CmdTDR:
- {
- unsigned long status = *((unsigned long *) (cmd + 1));
- if (status & 0x8000) {
- if (i596_debug)
- printk("%s: link ok.\n", dev->name);
- } else {
- if (status & 0x4000)
- printk("%s: Transceiver problem.\n",
- dev->name);
- if (status & 0x2000)
- printk("%s: Termination problem.\n",
- dev->name);
- if (status & 0x1000)
- printk("%s: Short circuit.\n",
- dev->name);
- printk("%s: Time %ld.\n",
- dev->name, status & 0x07ff);
- }
- }
- default:
- cmd->pa_next = I596_NULL;
- lp->last_cmd = jiffies;
-
- }
- barrier();
- }
-
- cmd = lp->cmd_head;
- while (cmd && (cmd != lp->cmd_tail)) {
- cmd->command &= 0x1fff;
- cmd = pa_to_va(cmd->pa_next);
- barrier();
- }
-
- if (lp->cmd_head)
- *ack_cmdp |= CUC_START;
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-}
-
-static irqreturn_t
-i596_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = dev_instance;
- struct i596_private *lp = netdev_priv(dev);
- unsigned short status, ack_cmd = 0;
- int frames_in = 0;
-
- /*
- * The 82596 examines the command, performs the required action,
- * and then clears the SCB command word.
- */
- if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
- ;
-
- /*
- * The status word indicates the status of the 82596.
- * It is modified only by the 82596.
- *
- * [So, we must not clear it. I find often status 0xffff,
- * which is not one of the values allowed by the docs.]
- */
- status = lp->scb.status;
-#if 0
- if (i596_debug) {
- printk("%s: i596 interrupt, ", dev->name);
- i596_out_status(status);
- }
-#endif
- /* Impossible, but it happens - perhaps when we get
- a receive interrupt but scb.pa_rfd is I596_NULL. */
- if (status == 0xffff) {
- printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
- goto out;
- }
-
- ack_cmd = (status & STAT_ACK);
-
- if (status & (STAT_CX | STAT_CNA))
- i596_handle_CU_completion(dev, lp, status, &ack_cmd);
-
- if (status & (STAT_FR | STAT_RNR)) {
- /* Restart the receive unit when it got inactive somehow */
- if ((status & STAT_RNR) && netif_running(dev))
- ack_cmd |= RX_START;
-
- if (status & STAT_FR) {
- frames_in = i596_rx(dev);
- if (!frames_in)
- printk("receive frame reported, but no frames\n");
- }
- }
-
- /* acknowledge the interrupt */
- /*
- if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
- ack_cmd |= CUC_START;
- */
-
- if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
- ;
-
- lp->scb.command = ack_cmd;
-
- CLEAR_INT();
- CA();
-
- out:
- return IRQ_HANDLED;
-}
-
-static int i596_close(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
-
- netif_stop_queue(dev);
-
- if (i596_debug)
- printk("%s: Shutting down ethercard, status was %4.4x.\n",
- dev->name, lp->scb.status);
-
- lp->scb.command = (CUC_ABORT | RX_ABORT);
- CA();
-
- i596_cleanup_cmd(dev);
-
- if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
- ;
-
- free_irq(dev->irq, dev);
- remove_rx_bufs(dev);
-
- return 0;
-}
-
-/*
-* Set or clear the multicast filter for this adaptor.
-*/
-
-static void set_multicast_list(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_cmd *cmd;
-
- if (i596_debug > 1)
- printk ("%s: set multicast list %d\n",
- dev->name, netdev_mc_count(dev));
-
- if (!netdev_mc_empty(dev)) {
- struct netdev_hw_addr *ha;
- char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
- netdev_mc_count(dev) * 6, GFP_ATOMIC);
- if (cmd == NULL) {
- printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
- return;
- }
- cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
- cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(cp, ha->addr, 6);
- cp += 6;
- }
- if (i596_debug & LOG_SRCDST)
- print_eth (((char *)(cmd + 1)) + 2);
- i596_add_cmd(dev, cmd);
- } else {
- if (lp->set_conf.pa_next != I596_NULL) {
- return;
- }
- if (netdev_mc_empty(dev) &&
- !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- lp->i596_config[8] &= ~0x01;
- } else {
- lp->i596_config[8] |= 0x01;
- }
-
- i596_add_cmd(dev, &lp->set_conf);
- }
-}
-
-MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
-MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
-MODULE_LICENSE("GPL");
-
-static struct net_device *dev_lp486e;
-static int full_duplex;
-static int options;
-static int io = IOADDR;
-static int irq = IRQ;
-
-module_param(debug, int, 0);
-//module_param(max_interrupt_work, int, 0);
-//module_param(reverse_probe, int, 0);
-//module_param(rx_copybreak, int, 0);
-module_param(options, int, 0);
-module_param(full_duplex, int, 0);
-
-static int __init lp486e_init_module(void) {
- int err;
- struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = irq;
- dev->base_addr = io;
- err = lp486e_probe(dev);
- if (err) {
- free_netdev(dev);
- return err;
- }
- err = register_netdev(dev);
- if (err) {
- release_region(dev->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev);
- return err;
- }
- dev_lp486e = dev;
- full_duplex = 0;
- options = 0;
- return 0;
-}
-
-static void __exit lp486e_cleanup_module(void) {
- unregister_netdev(dev_lp486e);
- release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev_lp486e);
-}
-
-module_init(lp486e_init_module);
-module_exit(lp486e_cleanup_module);
diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
deleted file mode 100644
index 272976e1bb0f..000000000000
--- a/drivers/net/ethernet/i825xx/ni52.c
+++ /dev/null
@@ -1,1346 +0,0 @@
-/*
- * net-3-driver for the NI5210 card (i82586 Ethernet chip)
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
- * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
- * [feel free to mail ....]
- *
- * when using as module: (no autoprobing!)
- * run with e.g:
- * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
- *
- * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
- *
- * If you find a bug, please report me:
- * The kernel panic output and any kmsg from the ni52 driver
- * the ni5210-driver-version and the linux-kernel version
- * how many shared memory (memsize) on the netcard,
- * bootprom: yes/no, base_addr, mem_start
- * maybe the ni5210-card revision and the i82586 version
- *
- * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
- * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
- * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
- *
- * sources:
- * skeleton.c from Donald Becker
- *
- * I have also done a look in the following sources: (mail me if you need them)
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's (fourth) i82586-driver for BSD
- * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
- * helped me a lot to understand this tricky chip.)
- *
- * Known Problems:
- * The internal sysbus seems to be slow. So we often lose packets because of
- * overruns while receiving from a fast remote host.
- * This can slow down TCP connections. Maybe the newer ni5210 cards are
- * better. My experience is, that if a machine sends with more than about
- * 500-600K/s the fifo/sysbus overflows.
- *
- * IMPORTANT NOTE:
- * On fast networks, it's a (very) good idea to have 16K shared memory. With
- * 8K, we can store only 4 receive frames, so it can (easily) happen that a
- * remote machine 'overruns' our system.
- *
- * Known i82586/card problems (I'm sure, there are many more!):
- * Running the NOP-mode, the i82586 sometimes seems to forget to report
- * every xmit-interrupt until we restart the CU.
- * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
- * in the RBD-Struct which indicates an end of the RBD queue.
- * Instead, the RU fetches another (randomly selected and
- * usually used) RBD and begins to fill it. (Maybe, this happens only if
- * the last buffer from the previous RFD fits exact into the queue and
- * the next RFD can't fetch an initial RBD. Anyone knows more? )
- *
- * results from ftp performance tests with Linux 1.2.5
- * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
- * sending in NOP-mode: peak performance up to 530K/s (but better don't
- * run this mode)
- */
-
-/*
- * 29.Sept.96: virt_to_bus changes for new memory scheme
- * 19.Feb.96: more Mcast changes, module support (MH)
- *
- * 18.Nov.95: Mcast changes (AC).
- *
- * 23.April.95: fixed(?) receiving problems by configuring a RFD more
- * than the number of RBD's. Can maybe cause other problems.
- * 18.April.95: Added MODULE support (MH)
- * 17.April.95: MC related changes in init586() and set_multicast_list().
- * removed use of 'jiffies' in init586() (MH)
- *
- * 19.Sep.94: Added Multicast support (not tested yet) (MH)
- *
- * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
- * Now, every RFD has exact one RBD. (MH)
- *
- * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
- *
- * 19.Aug.94: changed request_irq() parameter (MH)
- *
- * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
- *
- * 19.July.94: lotsa cleanups .. (MH)
- *
- * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
- *
- * 4.July.94: patches for Linux 1.1.24 (MH)
- *
- * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
- *
- * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
- * too (MH)
- *
- * < 30.Sep.93: first versions
- */
-
-static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
-static int automatic_resume; /* experimental .. better should be zero */
-static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
-static int fifo = 0x8; /* don't change */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni52.h"
-
-#define DRV_NAME "ni52"
-
-#define DEBUG /* debug on */
-#define SYSBUSVAL 1 /* 8 Bit */
-
-#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
-#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
-#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
-#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
-
-#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
-#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
-#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
- - p->memtop))
-
-/******************* how to calculate the buffers *****************************
-
- * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
- * --------------- in a different (more stable?) mode. Only in this mode it's
- * possible to configure the driver with 'NO_NOPCOMMANDS'
-
-sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
-sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
-sizeof(rfd) = 24; sizeof(rbd) = 12;
-sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
-sizeof(nop_cmd) = 8;
-
- * if you don't know the driver, better do not change these values: */
-
-#define RECV_BUFF_SIZE 1524 /* slightly oversized */
-#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
-#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
-#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
-#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
-#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
-
-/**************************************************************************/
-
-
-#define NI52_TOTAL_SIZE 16
-#define NI52_ADDR0 0x02
-#define NI52_ADDR1 0x07
-#define NI52_ADDR2 0x01
-
-static int ni52_probe1(struct net_device *dev, int ioaddr);
-static irqreturn_t ni52_interrupt(int irq, void *dev_id);
-static int ni52_open(struct net_device *dev);
-static int ni52_close(struct net_device *dev);
-static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *);
-static struct net_device_stats *ni52_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void ni52_timeout(struct net_device *dev);
-
-/* helper-functions */
-static int init586(struct net_device *dev);
-static int check586(struct net_device *dev, unsigned size);
-static void alloc586(struct net_device *dev);
-static void startrecv586(struct net_device *dev);
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
-static void ni52_rcv_int(struct net_device *dev);
-static void ni52_xmt_int(struct net_device *dev);
-static void ni52_rnr_int(struct net_device *dev);
-
-struct priv {
- char __iomem *base;
- char __iomem *mapped;
- char __iomem *memtop;
- spinlock_t spinlock;
- int reset;
- struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
- struct scp_struct __iomem *scp;
- struct iscp_struct __iomem *iscp;
- struct scb_struct __iomem *scb;
- struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
-#if (NUM_XMIT_BUFFS == 1)
- struct transmit_cmd_struct __iomem *xmit_cmds[2];
- struct nop_cmd_struct __iomem *nop_cmds[2];
-#else
- struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
- struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
-#endif
- int nop_point, num_recv_buffs;
- char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
- int xmit_count, xmit_last;
-};
-
-/* wait for command with timeout: */
-static void wait_for_scb_cmd(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_cuc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_scb_cmd_ruc(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_ruc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_ruc),
- readb(&p->scb->rus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_stat_compl(void __iomem *p)
-{
- struct nop_cmd_struct __iomem *addr = p;
- int i;
- for (i = 0; i < 32767; i++) {
- if (readw(&((addr)->cmd_status)) & STAT_COMPL)
- break;
- udelay(32);
- }
-}
-
-/**********************************************
- * close device
- */
-static int ni52_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- ni_reset586(); /* the hard way to stop the receiver */
- netif_stop_queue(dev);
- return 0;
-}
-
-/**********************************************
- * open device
- */
-static int ni52_open(struct net_device *dev)
-{
- int ret;
-
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
-
- ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
- if (ret) {
- ni_reset586();
- return ret;
- }
- netif_start_queue(dev);
- return 0; /* most done by init */
-}
-
-static int check_iscp(struct net_device *dev, void __iomem *addr)
-{
- struct iscp_struct __iomem *iscp = addr;
- struct priv *p = netdev_priv(dev);
- memset_io(iscp, 0, sizeof(struct iscp_struct));
-
- writel(make24(iscp), &p->scp->iscp);
- writeb(1, &iscp->busy);
-
- ni_reset586();
- ni_attn586();
- mdelay(32); /* wait a while... */
- /* i82586 clears 'busy' after successful init */
- if (readb(&iscp->busy))
- return 0;
- return 1;
-}
-
-/**********************************************
- * Check to see if there's an 82586 out there.
- */
-static int check586(struct net_device *dev, unsigned size)
-{
- struct priv *p = netdev_priv(dev);
- int i;
-
- p->mapped = ioremap(dev->mem_start, size);
- if (!p->mapped)
- return 0;
-
- p->base = p->mapped + size - 0x01000000;
- p->memtop = p->mapped + size;
- p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
- p->scb = (struct scb_struct __iomem *) p->mapped;
- p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
- memset_io(p->scp, 0, sizeof(struct scp_struct));
- for (i = 0; i < sizeof(struct scp_struct); i++)
- /* memory was writeable? */
- if (readb((char __iomem *)p->scp + i))
- goto Enodev;
- writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
- if (readb(&p->scp->sysbus) != SYSBUSVAL)
- goto Enodev;
-
- if (!check_iscp(dev, p->mapped))
- goto Enodev;
- if (!check_iscp(dev, p->iscp))
- goto Enodev;
- return 1;
-Enodev:
- iounmap(p->mapped);
- return 0;
-}
-
-/******************************************************************
- * set iscp at the right place, called by ni52_probe1 and open586.
- */
-static void alloc586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- ni_reset586();
- mdelay(32);
-
- memset_io(p->iscp, 0, sizeof(struct iscp_struct));
- memset_io(p->scp , 0, sizeof(struct scp_struct));
-
- writel(make24(p->iscp), &p->scp->iscp);
- writeb(SYSBUSVAL, &p->scp->sysbus);
- writew(make16(p->scb), &p->iscp->scb_offset);
-
- writeb(1, &p->iscp->busy);
- ni_reset586();
- ni_attn586();
-
- mdelay(32);
-
- if (readb(&p->iscp->busy))
- printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
-
- p->reset = 0;
-
- memset_io(p->scb, 0, sizeof(struct scb_struct));
-}
-
-/* set: io,irq,memstart,memend or set it when calling insmod */
-static int irq = 9;
-static int io = 0x300;
-static long memstart; /* e.g 0xd0000 */
-static long memend; /* e.g 0xd4000 */
-
-/**********************************************
- * probe the ni5210-card
- */
-struct net_device * __init ni52_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct priv));
- static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
- const int *port;
- struct priv *p;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- p = netdev_priv(dev);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- memstart = dev->mem_start;
- memend = dev->mem_end;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni52_probe1(dev, io);
- } else if (io > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
- ;
- if (*port)
- goto got_it;
-#ifdef FULL_IO_PROBE
- for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
- ;
- if (io < 0x400)
- goto got_it;
-#endif
- err = -ENODEV;
- }
- if (err)
- goto out;
-got_it:
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- iounmap(p->mapped);
- release_region(dev->base_addr, NI52_TOTAL_SIZE);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni52_netdev_ops = {
- .ndo_open = ni52_open,
- .ndo_stop = ni52_close,
- .ndo_get_stats = ni52_get_stats,
- .ndo_tx_timeout = ni52_timeout,
- .ndo_start_xmit = ni52_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init ni52_probe1(struct net_device *dev, int ioaddr)
-{
- int i, size, retval;
- struct priv *priv = netdev_priv(dev);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- dev->mem_start = memstart;
- dev->mem_end = memend;
-
- spin_lock_init(&priv->spinlock);
-
- if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
- return -EBUSY;
-
- if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
- !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(dev->base_addr+i);
-
- if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 ||
- dev->dev_addr[2] != NI52_ADDR2) {
- retval = -ENODEV;
- goto out;
- }
-
- printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
- dev->name, dev->base_addr);
-
- /*
- * check (or search) IO-Memory, 8K and 16K
- */
-#ifdef MODULE
- size = dev->mem_end - dev->mem_start;
- if (size != 0x2000 && size != 0x4000) {
- printk("\n");
- printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
- retval = -ENODEV;
- goto out;
- }
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
- retval = -ENODEV;
- goto out;
- }
-#else
- if (dev->mem_start != 0) {
- /* no auto-mem-probe */
- size = 0x4000; /* check for 16K mem */
- if (!check586(dev, size)) {
- size = 0x2000; /* check for 8K mem */
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
- retval = -ENODEV;
- goto out;
- }
- }
- } else {
- static const unsigned long memaddrs[] = {
- 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
- 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
- };
- for (i = 0;; i++) {
- if (!memaddrs[i]) {
- printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
- retval = -ENODEV;
- goto out;
- }
- dev->mem_start = memaddrs[i];
- size = 0x2000; /* check for 8K mem */
- if (check586(dev, size))
- /* 8K-check */
- break;
- size = 0x4000; /* check for 16K mem */
- if (check586(dev, size))
- /* 16K-check */
- break;
- }
- }
- /* set mem_end showed by 'ifconfig' */
- dev->mem_end = dev->mem_start + size;
-#endif
-
- alloc586(dev);
-
- /* set number of receive-buffs according to memsize */
- if (size == 0x2000)
- priv->num_recv_buffs = NUM_RECV_BUFFS_8;
- else
- priv->num_recv_buffs = NUM_RECV_BUFFS_16;
-
- printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
- dev->mem_start, size);
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- ni_reset586();
- ni_attn586();
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if (!dev->irq) {
- printk("?autoirq, Failed to detect IRQ line!\n");
- retval = -EAGAIN;
- iounmap(priv->mapped);
- goto out;
- }
- printk("IRQ %d (autodetected).\n", dev->irq);
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- printk("IRQ %d (assigned and not checked!).\n", dev->irq);
- }
-
- dev->netdev_ops = &ni52_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- return 0;
-out:
- release_region(ioaddr, NI52_TOTAL_SIZE);
- return retval;
-}
-
-/**********************************************
- * init the chip (ni52-interrupt should be disabled?!)
- * needs a correct 'allocated' memory
- */
-
-static int init586(struct net_device *dev)
-{
- void __iomem *ptr;
- int i, result = 0;
- struct priv *p = netdev_priv(dev);
- struct configure_cmd_struct __iomem *cfg_cmd;
- struct iasetup_cmd_struct __iomem *ias_cmd;
- struct tdr_cmd_struct __iomem *tdr_cmd;
- struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct netdev_hw_addr *ha;
- int num_addrs = netdev_mc_count(dev);
-
- ptr = p->scb + 1;
-
- cfg_cmd = ptr; /* configure-command */
- writew(0, &cfg_cmd->cmd_status);
- writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
- writew(0xFFFF, &cfg_cmd->cmd_link);
-
- /* number of cfg bytes */
- writeb(0x0a, &cfg_cmd->byte_cnt);
- /* fifo-limit (8=tx:32/rx:64) */
- writeb(fifo, &cfg_cmd->fifo);
- /* hold or discard bad recv frames (bit 7) */
- writeb(0x40, &cfg_cmd->sav_bf);
- /* addr_len |!src_insert |pre-len |loopback */
- writeb(0x2e, &cfg_cmd->adr_len);
- writeb(0x00, &cfg_cmd->priority);
- writeb(0x60, &cfg_cmd->ifs);
- writeb(0x00, &cfg_cmd->time_low);
- writeb(0xf2, &cfg_cmd->time_high);
- writeb(0x00, &cfg_cmd->promisc);
- if (dev->flags & IFF_ALLMULTI) {
- int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
- if (num_addrs > len) {
- printk(KERN_ERR "%s: switching to promisc. mode\n",
- dev->name);
- writeb(0x01, &cfg_cmd->promisc);
- }
- }
- if (dev->flags & IFF_PROMISC)
- writeb(0x01, &cfg_cmd->promisc);
- writeb(0x00, &cfg_cmd->carr_coll);
- writew(make16(cfg_cmd), &p->scb->cbl_offset);
- writeb(0, &p->scb->cmd_ruc);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(cfg_cmd);
-
- if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_COMPL|STAT_OK)) {
- printk(KERN_ERR "%s: configure command failed: %x\n",
- dev->name, readw(&cfg_cmd->cmd_status));
- return 1;
- }
-
- /*
- * individual address setup
- */
-
- ias_cmd = ptr;
-
- writew(0, &ias_cmd->cmd_status);
- writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
- writew(0xffff, &ias_cmd->cmd_link);
-
- memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
-
- writew(make16(ias_cmd), &p->scb->cbl_offset);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(ias_cmd);
-
- if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_OK|STAT_COMPL)) {
- printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
- return 1;
- }
-
- /*
- * TDR, wire check .. e.g. no resistor e.t.c
- */
-
- tdr_cmd = ptr;
-
- writew(0, &tdr_cmd->cmd_status);
- writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
- writew(0xffff, &tdr_cmd->cmd_link);
- writew(0, &tdr_cmd->status);
-
- writew(make16(tdr_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(tdr_cmd);
-
- if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
- printk(KERN_ERR "%s: Problems while running the TDR.\n",
- dev->name);
- else {
- udelay(16);
- result = readw(&tdr_cmd->status);
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586(); /* ack the interrupts */
-
- if (result & TDR_LNK_OK)
- ;
- else if (result & TDR_XCVR_PRB)
- printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
- dev->name);
- else if (result & TDR_ET_OPN)
- printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- else if (result & TDR_ET_SRT) {
- /* time == 0 -> strange :-) */
- if (result & TDR_TIMEMASK)
- printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- } else
- printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
- dev->name, result);
- }
-
- /*
- * Multicast setup
- */
- if (num_addrs && !(dev->flags & IFF_PROMISC)) {
- mc_cmd = ptr;
- writew(0, &mc_cmd->cmd_status);
- writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
- writew(0xffff, &mc_cmd->cmd_link);
- writew(num_addrs * 6, &mc_cmd->mc_cnt);
-
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
-
- writew(make16(mc_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
-
- wait_for_stat_compl(mc_cmd);
-
- if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
- != (STAT_COMPL|STAT_OK))
- printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
- }
-
- /*
- * alloc nop/xmit-cmds
- */
-#if (NUM_XMIT_BUFFS == 1)
- for (i = 0; i < 2; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#else
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#endif
-
- ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
-
- /*
- * alloc xmit-buffs / init xmit_cmds
- */
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- /* Transmit cmd/buff 0 */
- p->xmit_cmds[i] = ptr;
- ptr = ptr + sizeof(struct transmit_cmd_struct);
- p->xmit_cbuffs[i] = ptr; /* char-buffs */
- ptr = ptr + XMIT_BUFF_SIZE;
- p->xmit_buffs[i] = ptr; /* TBD */
- ptr = ptr + sizeof(struct tbd_struct);
- if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
- printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
- dev->name);
- return 1;
- }
- memset_io(p->xmit_cmds[i], 0,
- sizeof(struct transmit_cmd_struct));
- memset_io(p->xmit_buffs[i], 0,
- sizeof(struct tbd_struct));
- writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
- &p->xmit_cmds[i]->cmd_link);
- writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
- writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
- writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
- writew(0xffff, &p->xmit_buffs[i]->next);
- writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
- }
-
- p->xmit_count = 0;
- p->xmit_last = 0;
-#ifndef NO_NOPCOMMANDS
- p->nop_point = 0;
-#endif
-
- /*
- * 'start transmitter'
- */
-#ifndef NO_NOPCOMMANDS
- writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
-#else
- writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
- writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
-#endif
-
- /*
- * ack. interrupts
- */
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586();
- udelay(16);
-
- ni_enaint();
-
- return 0;
-}
-
-/******************************************************
- * This is a helper routine for ni52_rnr_int() and init586().
- * It sets up the Receive Frame Area (RFA).
- */
-
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
-{
- struct rfd_struct __iomem *rfd = ptr;
- struct rbd_struct __iomem *rbd;
- int i;
- struct priv *p = netdev_priv(dev);
-
- memset_io(rfd, 0,
- sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
- p->rfd_first = rfd;
-
- for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
- writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
- &rfd[i].next);
- writew(0xffff, &rfd[i].rbd_offset);
- }
- /* RU suspend */
- writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
-
- ptr = rfd + (p->num_recv_buffs + rfdadd);
-
- rbd = ptr;
- ptr = rbd + p->num_recv_buffs;
-
- /* clr descriptors */
- memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
-
- for (i = 0; i < p->num_recv_buffs; i++) {
- writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
- writew(RECV_BUFF_SIZE, &rbd[i].size);
- writel(make24(ptr), &rbd[i].buffer);
- ptr = ptr + RECV_BUFF_SIZE;
- }
- p->rfd_top = p->rfd_first;
- p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
-
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writew(make16(rbd), &p->rfd_first->rbd_offset);
-
- return ptr;
-}
-
-
-/**************************************************
- * Interrupt Handler ...
- */
-
-static irqreturn_t ni52_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- unsigned int stat;
- int cnt = 0;
- struct priv *p;
-
- p = netdev_priv(dev);
-
- if (debuglevel > 1)
- printk("I");
-
- spin_lock(&p->spinlock);
-
- wait_for_scb_cmd(dev); /* wait for last command */
-
- while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
- writeb(stat, &p->scb->cmd_cuc);
- ni_attn586();
-
- if (stat & STAT_FR) /* received a frame */
- ni52_rcv_int(dev);
-
- if (stat & STAT_RNR) { /* RU went 'not ready' */
- printk("(R)");
- if (readb(&p->scb->rus) & RU_SUSPEND) {
- /* special case: RU_SUSPEND */
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- } else {
- printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->rus));
- ni52_rnr_int(dev);
- }
- }
-
- /* Command with I-bit set complete */
- if (stat & STAT_CX)
- ni52_xmt_int(dev);
-
-#ifndef NO_NOPCOMMANDS
- if (stat & STAT_CNA) { /* CU went 'not ready' */
- if (netif_running(dev))
- printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->cus));
- }
-#endif
-
- if (debuglevel > 1)
- printk("%d", cnt++);
-
- /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
- wait_for_scb_cmd(dev);
- if (readb(&p->scb->cmd_cuc)) { /* timed out? */
- printk(KERN_ERR "%s: Acknowledge timed out.\n",
- dev->name);
- ni_disint();
- break;
- }
- }
- spin_unlock(&p->spinlock);
-
- if (debuglevel > 1)
- printk("i");
- return IRQ_HANDLED;
-}
-
-/*******************************************************
- * receive-interrupt
- */
-
-static void ni52_rcv_int(struct net_device *dev)
-{
- int status, cnt = 0;
- unsigned short totlen;
- struct sk_buff *skb;
- struct rbd_struct __iomem *rbd;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("R");
-
- for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
- rbd = make32(readw(&p->rfd_top->rbd_offset));
- if (status & RFD_OK) { /* frame received without error? */
- totlen = readw(&rbd->status);
- if (totlen & RBD_LAST) {
- /* the first and the last buffer? */
- totlen &= RBD_MASK; /* length of this frame */
- writew(0x00, &rbd->status);
- skb = netdev_alloc_skb(dev, totlen + 2);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- skb_put(skb, totlen);
- memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += totlen;
- } else
- dev->stats.rx_dropped++;
- } else {
- int rstat;
- /* free all RBD's until RBD_LAST is set */
- totlen = 0;
- while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
- totlen += rstat & RBD_MASK;
- if (!rstat) {
- printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
- break;
- }
- writew(0, &rbd->status);
- rbd = make32(readw(&rbd->next));
- }
- totlen += rstat & RBD_MASK;
- writew(0, &rbd->status);
- printk(KERN_ERR "%s: received oversized frame! length: %d\n",
- dev->name, totlen);
- dev->stats.rx_dropped++;
- }
- } else {/* frame !(ok), only with 'save-bad-frames' */
- printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
- dev->name, status);
- dev->stats.rx_errors++;
- }
- writeb(0, &p->rfd_top->stat_high);
- writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
- writew(0xffff, &p->rfd_top->rbd_offset);
- writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
- p->rfd_last = p->rfd_top;
- p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
- writew(make16(p->rfd_top), &p->scb->rfa_offset);
-
- if (debuglevel > 0)
- printk("%d", cnt++);
- }
-
- if (automatic_resume) {
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- }
-
-#ifdef WAIT_4_BUSY
- {
- int i;
- for (i = 0; i < 1024; i++) {
- if (p->rfd_top->status)
- break;
- udelay(16);
- if (i == 1023)
- printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
- }
- }
-#endif
- if (debuglevel > 0)
- printk("r");
-}
-
-/**********************************************************
- * handle 'Receiver went not ready'.
- */
-
-static void ni52_rnr_int(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- dev->stats.rx_errors++;
-
- wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
- writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
- ni_attn586();
- wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
-
- alloc_rfa(dev, p->rfd_first);
- /* maybe add a check here, before restarting the RU */
- startrecv586(dev); /* restart RU */
-
- printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
- dev->name, readb(&p->scb->rus));
-
-}
-
-/**********************************************************
- * handle xmit - interrupt
- */
-
-static void ni52_xmt_int(struct net_device *dev)
-{
- int status;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("X");
-
- status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
- if (!(status & STAT_COMPL))
- printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
-
- if (status & STAT_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
- } else {
- dev->stats.tx_errors++;
- if (status & TCMD_LATECOLL) {
- printk(KERN_ERR "%s: late collision detected.\n",
- dev->name);
- dev->stats.collisions++;
- } else if (status & TCMD_NOCARRIER) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_ERR "%s: no carrier detected.\n",
- dev->name);
- } else if (status & TCMD_LOSTCTS)
- printk(KERN_ERR "%s: loss of CTS detected.\n",
- dev->name);
- else if (status & TCMD_UNDERRUN) {
- dev->stats.tx_fifo_errors++;
- printk(KERN_ERR "%s: DMA underrun detected.\n",
- dev->name);
- } else if (status & TCMD_MAXCOLL) {
- printk(KERN_ERR "%s: Max. collisions exceeded.\n",
- dev->name);
- dev->stats.collisions += 16;
- }
- }
-#if (NUM_XMIT_BUFFS > 1)
- if ((++p->xmit_last) == NUM_XMIT_BUFFS)
- p->xmit_last = 0;
-#endif
- netif_wake_queue(dev);
-}
-
-/***********************************************************
- * (re)start the receiver
- */
-
-static void startrecv586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- wait_for_scb_cmd(dev);
- wait_for_scb_cmd_ruc(dev);
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writeb(RUC_START, &p->scb->cmd_ruc);
- ni_attn586(); /* start cmd. */
- wait_for_scb_cmd_ruc(dev);
- /* wait for accept cmd. (no timeout!!) */
-}
-
-static void ni52_timeout(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-#ifndef NO_NOPCOMMANDS
- if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
- netif_wake_queue(dev);
-#ifdef DEBUG
- printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
- dev->name);
- printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
- dev->name, (int)p->xmit_cmds[0]->cmd_status,
- readw(&p->nop_cmds[0]->cmd_status),
- readw(&p->nop_cmds[1]->cmd_status),
- p->nop_point);
-#endif
- writeb(CUC_ABORT, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- return 0;
- }
-#endif
- {
-#ifdef DEBUG
- printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
- dev->name, readb(&p->scb->cus));
- printk(KERN_ERR "%s: command-stats: %04x %04x\n",
- dev->name,
- readw(&p->xmit_cmds[0]->cmd_status),
- readw(&p->xmit_cmds[1]->cmd_status));
- printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
- dev->name);
-#endif
- ni52_close(dev);
- ni52_open(dev);
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-/******************************************************
- * send frame
- */
-
-static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- int len, i;
-#ifndef NO_NOPCOMMANDS
- int next_nop;
-#endif
- struct priv *p = netdev_priv(dev);
-
- if (skb->len > XMIT_BUFF_SIZE) {
- printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
- return NETDEV_TX_OK;
- }
-
- netif_stop_queue(dev);
-
- memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
- len = skb->len;
- if (len < ETH_ZLEN) {
- len = ETH_ZLEN;
- memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
- len - skb->len);
- }
-
-#if (NUM_XMIT_BUFFS == 1)
-# ifdef NO_NOPCOMMANDS
-
-#ifdef DEBUG
- if (readb(&p->scb->cus) & CU_ACTIVE) {
- printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
- printk(KERN_ERR "%s: stat: %04x %04x\n",
- dev->name, readb(&p->scb->cus),
- readw(&p->xmit_cmds[0]->cmd_status));
- }
-#endif
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- for (i = 0; i < 16; i++) {
- writew(0, &p->xmit_cmds[0]->cmd_status);
- wait_for_scb_cmd(dev);
- if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
- writeb(CUC_RESUME, &p->scb->cmd_cuc);
- else {
- writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- }
- ni_attn586();
- if (!i)
- dev_kfree_skb(skb);
- wait_for_scb_cmd(dev);
- /* test it, because CU sometimes doesn't start immediately */
- if (readb(&p->scb->cus) & CU_ACTIVE)
- break;
- if (readw(&p->xmit_cmds[0]->cmd_status))
- break;
- if (i == 15)
- printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
- }
-# else
- next_nop = (p->nop_point + 1) & 0x1;
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->xmit_cmds[0]->cmd_status);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
-
- writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- p->nop_point = next_nop;
- dev_kfree_skb(skb);
-# endif
-#else
- writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
- next_nop = p->xmit_count + 1
- if (next_nop == NUM_XMIT_BUFFS)
- next_nop = 0;
- writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
- /* linkpointer of xmit-command already points to next nop cmd */
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
- writew(make16(p->xmit_cmds[p->xmit_count]),
- &p->nop_cmds[p->xmit_count]->cmd_link);
- p->xmit_count = next_nop;
- {
- unsigned long flags;
- spin_lock_irqsave(&p->spinlock);
- if (p->xmit_count != p->xmit_last)
- netif_wake_queue(dev);
- spin_unlock_irqrestore(&p->spinlock);
- }
- dev_kfree_skb(skb);
-#endif
- return NETDEV_TX_OK;
-}
-
-/*******************************************
- * Someone wanna have the statistics
- */
-
-static struct net_device_stats *ni52_get_stats(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- unsigned short crc, aln, rsc, ovrn;
-
- /* Get error-statistics from the ni82586 */
- crc = readw(&p->scb->crc_errs);
- writew(0, &p->scb->crc_errs);
- aln = readw(&p->scb->aln_errs);
- writew(0, &p->scb->aln_errs);
- rsc = readw(&p->scb->rsc_errs);
- writew(0, &p->scb->rsc_errs);
- ovrn = readw(&p->scb->ovrn_errs);
- writew(0, &p->scb->ovrn_errs);
-
- dev->stats.rx_crc_errors += crc;
- dev->stats.rx_fifo_errors += ovrn;
- dev->stats.rx_frame_errors += aln;
- dev->stats.rx_dropped += rsc;
-
- return &dev->stats;
-}
-
-/********************************************************
- * Set MC list ..
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- netif_stop_queue(dev);
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni52;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(memstart, long, 0);
-module_param(memend, long, 0);
-MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
-MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
-MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
-MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
-
-int __init init_module(void)
-{
- if (io <= 0x0 || !memend || !memstart || irq < 2) {
- printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
- printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
- return -ENODEV;
- }
- dev_ni52 = ni52_probe(-1);
- if (IS_ERR(dev_ni52))
- return PTR_ERR(dev_ni52);
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- struct priv *p = netdev_priv(dev_ni52);
- unregister_netdev(dev_ni52);
- iounmap(p->mapped);
- release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
- free_netdev(dev_ni52);
-}
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h
deleted file mode 100644
index 0a03b2883327..000000000000
--- a/drivers/net/ethernet/i825xx/ni52.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Intel i82586 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's i82586-driver for BSD
- */
-
-
-#define NI52_RESET 0 /* writing to this address, resets the i82586 */
-#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
-#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
-#define NI52_TDIS 2 /* Xmit disable */
-#define NI52_INTENA 5 /* Interrupt enable */
-#define NI52_INTDIS 4 /* Interrupt disable */
-#define NI52_MAGIC1 6 /* dunno exact function */
-#define NI52_MAGIC2 7 /* dunno exact function */
-
-#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
-#define NI52_MAGICVAL2 0x55
-
-/*
- * where to find the System Configuration Pointer (SCP)
- */
-#define SCP_DEFAULT_ADDRESS 0xfffff4
-
-
-/*
- * System Configuration Pointer Struct
- */
-
-struct scp_struct
-{
- u16 zero_dum0; /* has to be zero */
- u8 sysbus; /* 0=16Bit,1=8Bit */
- u8 zero_dum1; /* has to be zero for 586 */
- u16 zero_dum2;
- u16 zero_dum3;
- u32 iscp; /* pointer to the iscp-block */
-};
-
-
-/*
- * Intermediate System Configuration Pointer (ISCP)
- */
-struct iscp_struct
-{
- u8 busy; /* 586 clears after successful init */
- u8 zero_dummy; /* has to be zero */
- u16 scb_offset; /* pointeroffset to the scb_base */
- u32 scb_base; /* base-address of all 16-bit offsets */
-};
-
-/*
- * System Control Block (SCB)
- */
-struct scb_struct
-{
- u8 rus;
- u8 cus;
- u8 cmd_ruc; /* command word: RU part */
- u8 cmd_cuc; /* command word: CU part & ACK */
- u16 cbl_offset; /* pointeroffset, command block list */
- u16 rfa_offset; /* pointeroffset, receive frame area */
- u16 crc_errs; /* CRC-Error counter */
- u16 aln_errs; /* alignmenterror counter */
- u16 rsc_errs; /* Resourceerror counter */
- u16 ovrn_errs; /* OVerrunerror counter */
-};
-
-/*
- * possible command values for the command word
- */
-#define RUC_MASK 0x0070 /* mask for RU commands */
-#define RUC_NOP 0x0000 /* NOP-command */
-#define RUC_START 0x0010 /* start RU */
-#define RUC_RESUME 0x0020 /* resume RU after suspend */
-#define RUC_SUSPEND 0x0030 /* suspend RU */
-#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
-
-#define CUC_MASK 0x07 /* mask for CU command */
-#define CUC_NOP 0x00 /* NOP-command */
-#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
-#define CUC_RESUME 0x02 /* resume after suspend */
-#define CUC_SUSPEND 0x03 /* Suspend CU */
-#define CUC_ABORT 0x04 /* abort command operation immediately */
-
-#define ACK_MASK 0xf0 /* mask for ACK command */
-#define ACK_CX 0x80 /* acknowledges STAT_CX */
-#define ACK_FR 0x40 /* ack. STAT_FR */
-#define ACK_CNA 0x20 /* ack. STAT_CNA */
-#define ACK_RNR 0x10 /* ack. STAT_RNR */
-
-/*
- * possible status values for the status word
- */
-#define STAT_MASK 0xf0 /* mask for cause of interrupt */
-#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
-#define STAT_FR 0x40 /* RU finished receiving a frame */
-#define STAT_CNA 0x20 /* CU left active state */
-#define STAT_RNR 0x10 /* RU left ready state */
-
-#define CU_STATUS 0x7 /* CU status, 0=idle */
-#define CU_SUSPEND 0x1 /* CU is suspended */
-#define CU_ACTIVE 0x2 /* CU is active */
-
-#define RU_STATUS 0x70 /* RU status, 0=idle */
-#define RU_SUSPEND 0x10 /* RU suspended */
-#define RU_NOSPACE 0x20 /* RU no resources */
-#define RU_READY 0x40 /* RU is ready */
-
-/*
- * Receive Frame Descriptor (RFD)
- */
-struct rfd_struct
-{
- u8 stat_low; /* status word */
- u8 stat_high; /* status word */
- u8 rfd_sf; /* 82596 mode only */
- u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
- u16 next; /* linkoffset to next RFD */
- u16 rbd_offset; /* pointeroffset to RBD-buffer */
- u8 dest[6]; /* ethernet-address, destination */
- u8 source[6]; /* ethernet-address, source */
- u16 length; /* 802.3 frame-length */
- u16 zero_dummy; /* dummy */
-};
-
-#define RFD_LAST 0x80 /* last: last rfd in the list */
-#define RFD_SUSP 0x40 /* last: suspend RU after */
-#define RFD_COMPL 0x80
-#define RFD_OK 0x20
-#define RFD_BUSY 0x40
-#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
-#define RFD_ERR_CRC 0x08 /* CRC error */
-#define RFD_ERR_ALGN 0x04 /* Alignment error */
-#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
-#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
-#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
-#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
-#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
-#define RFD_COLLDET 0x0001 /* Detected collision during reception */
-
-/*
- * Receive Buffer Descriptor (RBD)
- */
-struct rbd_struct
-{
- u16 status; /* status word,number of used bytes in buff */
- u16 next; /* pointeroffset to next RBD */
- u32 buffer; /* receive buffer address pointer */
- u16 size; /* size of this buffer */
- u16 zero_dummy; /* dummy */
-};
-
-#define RBD_LAST 0x8000 /* last buffer */
-#define RBD_USED 0x4000 /* this buffer has data */
-#define RBD_MASK 0x3fff /* size-mask for length */
-
-/*
- * Statusvalues for Commands/RFD
- */
-#define STAT_COMPL 0x8000 /* status: frame/command is complete */
-#define STAT_BUSY 0x4000 /* status: frame/command is busy */
-#define STAT_OK 0x2000 /* status: frame/command is ok */
-
-/*
- * Action-Commands
- */
-#define CMD_NOP 0x0000 /* NOP */
-#define CMD_IASETUP 0x0001 /* initial address setup command */
-#define CMD_CONFIGURE 0x0002 /* configure command */
-#define CMD_MCSETUP 0x0003 /* MC setup command */
-#define CMD_XMIT 0x0004 /* transmit command */
-#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
-#define CMD_DUMP 0x0006 /* dump command */
-#define CMD_DIAGNOSE 0x0007 /* diagnose command */
-
-/*
- * Action command bits
- */
-#define CMD_LAST 0x8000 /* indicates last command in the CBL */
-#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
-#define CMD_INT 0x2000 /* generate interrupt after execution */
-
-/*
- * NOP - command
- */
-struct nop_cmd_struct
-{
- u16 cmd_status; /* status of this command */
- u16 cmd_cmd; /* the command itself (+bits) */
- u16 cmd_link; /* offsetpointer to next command */
-};
-
-/*
- * IA Setup command
- */
-struct iasetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 iaddr[6];
-};
-
-/*
- * Configure command
- */
-struct configure_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 byte_cnt; /* size of the config-cmd */
- u8 fifo; /* fifo/recv monitor */
- u8 sav_bf; /* save bad frames (bit7=1)*/
- u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
- u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
- u8 ifs; /* inter frame spacing */
- u8 time_low; /* slot time low */
- u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
- u8 promisc; /* promisc-mode(0) , et al (1-7) */
- u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
- u8 fram_len; /* minimal frame len */
- u8 dummy; /* dummy */
-};
-
-/*
- * Multicast Setup command
- */
-struct mcsetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 mc_cnt; /* number of bytes in the MC-List */
- u8 mc_list[0][6]; /* pointer to 6 bytes entries */
-};
-
-/*
- * DUMP command
- */
-struct dump_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 dump_offset; /* pointeroffset to DUMP space */
-};
-
-/*
- * transmit command
- */
-struct transmit_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 tbd_offset; /* pointeroffset to TBD */
- u8 dest[6]; /* destination address of the frame */
- u16 length; /* user defined: 802.3 length / Ether type */
-};
-
-#define TCMD_ERRMASK 0x0fa0
-#define TCMD_MAXCOLLMASK 0x000f
-#define TCMD_MAXCOLL 0x0020
-#define TCMD_HEARTBEAT 0x0040
-#define TCMD_DEFERRED 0x0080
-#define TCMD_UNDERRUN 0x0100
-#define TCMD_LOSTCTS 0x0200
-#define TCMD_NOCARRIER 0x0400
-#define TCMD_LATECOLL 0x0800
-
-struct tdr_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 status;
-};
-
-#define TDR_LNK_OK 0x8000 /* No link problem identified */
-#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
-#define TDR_ET_OPN 0x2000 /* open, no correct termination */
-#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
-#define TDR_TIMEMASK 0x07ff /* mask for the time field */
-
-/*
- * Transmit Buffer Descriptor (TBD)
- */
-struct tbd_struct
-{
- u16 size; /* size + EOF-Flag(15) */
- u16 next; /* pointeroffset to next TBD */
- u32 buffer; /* pointer to buffer */
-};
-
-#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
-
-
-
-
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
deleted file mode 100644
index c9479e081b8a..000000000000
--- a/drivers/net/ethernet/i825xx/znet.c
+++ /dev/null
@@ -1,928 +0,0 @@
-/* znet.c: An Zenith Z-Note ethernet driver for linux. */
-
-/*
- Written by Donald Becker.
-
- The author may be reached as becker@scyld.com.
- This driver is based on the Linux skeleton driver. The copyright of the
- skeleton driver is held by the United States Government, as represented
- by DIRNSA, and it is released under the GPL.
-
- Thanks to Mike Hollick for alpha testing and suggestions.
-
- References:
- The Crynwr packet driver.
-
- "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
- Intel Microcommunications Databook, Vol. 1, 1990.
- As usual with Intel, the documentation is incomplete and inaccurate.
- I had to read the Crynwr packet driver to figure out how to actually
- use the i82593, and guess at what register bits matched the loosely
- related i82586.
-
- Theory of Operation
-
- The i82593 used in the Zenith Z-Note series operates using two(!) slave
- DMA channels, one interrupt, and one 8-bit I/O port.
-
- While there several ways to configure '593 DMA system, I chose the one
- that seemed commensurate with the highest system performance in the face
- of moderate interrupt latency: Both DMA channels are configured as
- recirculating ring buffers, with one channel (#0) dedicated to Rx and
- the other channel (#1) to Tx and configuration. (Note that this is
- different than the Crynwr driver, where the Tx DMA channel is initialized
- before each operation. That approach simplifies operation and Tx error
- recovery, but requires additional I/O in normal operation and precludes
- transmit buffer chaining.)
-
- Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
- a reasonable ring size for Rx, while simplifying DMA buffer allocation --
- DMA buffers must not cross a 128K boundary. (In truth the size selection
- was influenced by my lack of '593 documentation. I thus was constrained
- to use the Crynwr '593 initialization table, which sets the Rx ring size
- to 8K.)
-
- Despite my usual low opinion about Intel-designed parts, I must admit
- that the bulk data handling of the i82593 is a good design for
- an integrated system, like a laptop, where using two slave DMA channels
- doesn't pose a problem. I still take issue with using only a single I/O
- port. In the same controlled environment there are essentially no
- limitations on I/O space, and using multiple locations would eliminate
- the need for multiple operations when looking at status registers,
- setting the Rx ring boundary, or switching to promiscuous mode.
-
- I also question Zenith's selection of the '593: one of the advertised
- advantages of earlier Intel parts was that if you figured out the magic
- initialization incantation you could use the same part on many different
- network types. Zenith's use of the "FriendlyNet" (sic) connector rather
- than an on-board transceiver leads me to believe that they were planning
- to take advantage of this. But, uhmmm, the '593 omits all but ethernet
- functionality from the serial subsystem.
- */
-
-/* 10/2002
-
- o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> :
-
- - Removed strange DMA snooping in znet_sent_packet, which lead to
- TX buffer corruption on my laptop.
- - Use init_etherdev stuff.
- - Use kmalloc-ed DMA buffers.
- - Use as few global variables as possible.
- - Use proper resources management.
- - Use wireless/i82593.h as much as possible (structure, constants)
- - Compiles as module or build-in.
- - Now survives unplugging/replugging cable.
-
- Some code was taken from wavelan_cs.
-
- Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on
- anything else. Testers (and detailed bug reports) are welcome :-).
-
- o TODO :
-
- - Properly handle multicast
- - Understand why some traffic patterns add a 1s latency...
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/i82593.h>
-
-static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
-
-#ifndef ZNET_DEBUG
-#define ZNET_DEBUG 1
-#endif
-static unsigned int znet_debug = ZNET_DEBUG;
-module_param (znet_debug, int, 0);
-MODULE_PARM_DESC (znet_debug, "ZNet debug level");
-MODULE_LICENSE("GPL");
-
-/* The DMA modes we need aren't in <dma.h>. */
-#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
-#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
-#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
-#define RX_BUF_SIZE 8192
-#define TX_BUF_SIZE 8192
-#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-
-#define TX_TIMEOUT (HZ/10)
-
-struct znet_private {
- int rx_dma, tx_dma;
- spinlock_t lock;
- short sia_base, sia_size, io_size;
- struct i82593_conf_block i593_init;
- /* The starting, current, and end pointers for the packet buffers. */
- ushort *rx_start, *rx_cur, *rx_end;
- ushort *tx_start, *tx_cur, *tx_end;
- ushort tx_buf_len; /* Tx buffer length, in words. */
-};
-
-/* Only one can be built-in;-> */
-static struct net_device *znet_dev;
-
-#define NETIDBLK_MAGIC "NETIDBLK"
-#define NETIDBLK_MAGIC_SIZE 8
-
-struct netidblk {
- char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */
- unsigned char netid[8]; /* The physical station address */
- char nettype, globalopt;
- char vendor[8]; /* The machine vendor and product name. */
- char product[8];
- char irq1, irq2; /* Interrupts, only one is currently used. */
- char dma1, dma2;
- short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
- short iobase1, iosize1;
- short iobase2, iosize2; /* Second iobase unused. */
- char driver_options; /* Misc. bits */
- char pad;
-};
-
-static int znet_open(struct net_device *dev);
-static netdev_tx_t znet_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t znet_interrupt(int irq, void *dev_id);
-static void znet_rx(struct net_device *dev);
-static int znet_close(struct net_device *dev);
-static void hardware_init(struct net_device *dev);
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
-static void znet_tx_timeout (struct net_device *dev);
-
-/* Request needed resources */
-static int znet_request_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
- goto failed;
- if (request_dma (znet->rx_dma, "ZNet rx"))
- goto free_irq;
- if (request_dma (znet->tx_dma, "ZNet tx"))
- goto free_rx_dma;
- if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
- goto free_tx_dma;
- if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
- goto free_sia;
-
- return 0; /* Happy ! */
-
- free_sia:
- release_region (znet->sia_base, znet->sia_size);
- free_tx_dma:
- free_dma (znet->tx_dma);
- free_rx_dma:
- free_dma (znet->rx_dma);
- free_irq:
- free_irq (dev->irq, dev);
- failed:
- return -1;
-}
-
-static void znet_release_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- release_region (znet->sia_base, znet->sia_size);
- release_region (dev->base_addr, znet->io_size);
- free_dma (znet->tx_dma);
- free_dma (znet->rx_dma);
- free_irq (dev->irq, dev);
-}
-
-/* Keep the magical SIA stuff in a single function... */
-static void znet_transceiver_power (struct net_device *dev, int on)
-{
- struct znet_private *znet = netdev_priv(dev);
- unsigned char v;
-
- /* Turn on/off the 82501 SIA, using zenith-specific magic. */
- /* Select LAN control register */
- outb(0x10, znet->sia_base);
-
- if (on)
- v = inb(znet->sia_base + 1) | 0x84;
- else
- v = inb(znet->sia_base + 1) & ~0x84;
-
- outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */
-}
-
-/* Init the i82593, with current promisc/mcast configuration.
- Also used from hardware_init. */
-static void znet_set_multicast_list (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- struct i82593_conf_block *cfblk = &znet->i593_init;
-
- memset(cfblk, 0x00, sizeof(struct i82593_conf_block));
-
- /* The configuration block. What an undocumented nightmare.
- The first set of values are those suggested (without explanation)
- for ethernet in the Intel 82586 databook. The rest appear to be
- completely undocumented, except for cryptic notes in the Crynwr
- packet driver. This driver uses the Crynwr values verbatim. */
-
- /* maz : Rewritten to take advantage of the wanvelan includes.
- At least we have names, not just blind values */
-
- /* Byte 0 */
- cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */
- cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */
- cfblk->fifo_32 = 1;
- cfblk->d6mod = 0; /* Run in i82593 advanced mode */
- cfblk->throttle_enb = 1;
-
- /* Byte 1 */
- cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */
- cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */
- cfblk->contin = 1; /* enable continuous mode */
-
- /* Byte 2 */
- cfblk->addr_len = ETH_ALEN;
- cfblk->acloc = 1; /* Disable source addr insertion by i82593 */
- cfblk->preamb_len = 2; /* 8 bytes preamble */
- cfblk->loopback = 0; /* Loopback off */
-
- /* Byte 3 */
- cfblk->lin_prio = 0; /* Default priorities & backoff methods. */
- cfblk->tbofstop = 0;
- cfblk->exp_prio = 0;
- cfblk->bof_met = 0;
-
- /* Byte 4 */
- cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */
-
- /* Byte 5 */
- cfblk->slottim_low = 0; /* 512 bit times slot time (low) */
-
- /* Byte 6 */
- cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */
- cfblk->max_retr = 15; /* 15 collisions retries */
-
- /* Byte 7 */
- cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */
- cfblk->bc_dis = 0; /* Enable broadcast reception */
- cfblk->crs_1 = 0; /* Don't transmit without carrier sense */
- cfblk->nocrc_ins = 0; /* i82593 generates CRC */
- cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */
- cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */
-
- /* Byte 8 */
- cfblk->cs_filter = 0; /* CS is recognized immediately */
- cfblk->crs_src = 0; /* External carrier sense */
- cfblk->cd_filter = 0; /* CD is recognized immediately */
-
- /* Byte 9 */
- cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */
-
- /* Byte A */
- cfblk->lng_typ = 1; /* Type/length checks OFF */
- cfblk->lng_fld = 1; /* Disable 802.3 length field check */
- cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */
- cfblk->artx = 1; /* Disable automatic retransmission */
- cfblk->sarec = 1; /* Disable source addr trig of CD */
- cfblk->tx_jabber = 0; /* Disable jabber jam sequence */
- cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */
- cfblk->lbpkpol = 0; /* Loopback pin active high */
-
- /* Byte B */
- cfblk->fdx = 0; /* Disable full duplex operation */
-
- /* Byte C */
- cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */
- cfblk->mult_ia = 0; /* No multiple individual addresses */
- cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */
-
- /* Byte D */
- cfblk->dummy_1 = 1; /* set to 1 */
- cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
- cfblk->mc_all = (!netdev_mc_empty(dev) ||
- (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
- cfblk->rcv_mon = 0; /* Monitor mode disabled */
- cfblk->frag_acpt = 0; /* Do not accept fragments */
- cfblk->tstrttrs = 0; /* No start transmission threshold */
-
- /* Byte E */
- cfblk->fretx = 1; /* FIFO automatic retransmission */
- cfblk->runt_eop = 0; /* drop "runt" packets */
- cfblk->hw_sw_pin = 0; /* ?? */
- cfblk->big_endn = 0; /* Big Endian ? no... */
- cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */
- cfblk->sttlen = 1; /* 6 byte status registers */
- cfblk->rx_eop = 0; /* Signal EOP on packet reception */
- cfblk->tx_eop = 0; /* Signal EOP on packet transmission */
-
- /* Byte F */
- cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */
- cfblk->rcvstop = 1; /* Enable Receive Stop Register */
-
- if (znet_debug > 2) {
- int i;
- unsigned char *c;
-
- for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++)
- printk ("%02X ", c[i]);
- printk ("\n");
- }
-
- *znet->tx_cur++ = sizeof(struct i82593_conf_block);
- memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block));
- znet->tx_cur += sizeof(struct i82593_conf_block)/2;
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- /* XXX FIXME maz : Add multicast addresses here, so having a
- * multicast address configured isn't equal to IFF_ALLMULTI */
-}
-
-static const struct net_device_ops znet_netdev_ops = {
- .ndo_open = znet_open,
- .ndo_stop = znet_close,
- .ndo_start_xmit = znet_send_packet,
- .ndo_set_rx_mode = znet_set_multicast_list,
- .ndo_tx_timeout = znet_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
- BIOS area. We just scan for the signature, and pull the vital parameters
- out of the structure. */
-
-static int __init znet_probe (void)
-{
- int i;
- struct netidblk *netinfo;
- struct znet_private *znet;
- struct net_device *dev;
- char *p;
- char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE);
- int err = -ENOMEM;
-
- /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
- for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++)
- if (*p == 'N' &&
- strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0)
- break;
-
- if (p > plast) {
- if (znet_debug > 1)
- printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
- return -ENODEV;
- }
-
- dev = alloc_etherdev(sizeof(struct znet_private));
- if (!dev)
- return -ENOMEM;
-
- znet = netdev_priv(dev);
-
- netinfo = (struct netidblk *)p;
- dev->base_addr = netinfo->iobase1;
- dev->irq = netinfo->irq1;
-
- /* The station address is in the "netidblk" at 0x0f0000. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = netinfo->netid[i];
-
- printk(KERN_INFO "%s: ZNET at %#3lx, %pM"
- ", using IRQ %d DMA %d and %d.\n",
- dev->name, dev->base_addr, dev->dev_addr,
- dev->irq, netinfo->dma1, netinfo->dma2);
-
- if (znet_debug > 1) {
- printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
- dev->name, netinfo->vendor,
- netinfo->irq1, netinfo->irq2,
- netinfo->dma1, netinfo->dma2);
- printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
- dev->name, netinfo->iobase1, netinfo->iosize1,
- netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
- }
-
- if (znet_debug > 0)
- printk(KERN_INFO "%s", version);
-
- znet->rx_dma = netinfo->dma1;
- znet->tx_dma = netinfo->dma2;
- spin_lock_init(&znet->lock);
- znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */
- znet->sia_size = 2;
- /* maz: Despite the '593 being advertised above as using a
- * single 8bits I/O port, this driver does many 16bits
- * access. So set io_size accordingly */
- znet->io_size = 2;
-
- if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_dev;
- if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_rx;
-
- if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) ||
- !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) {
- printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n");
- goto free_tx;
- }
-
- znet->rx_end = znet->rx_start + RX_BUF_SIZE/2;
- znet->tx_buf_len = TX_BUF_SIZE/2;
- znet->tx_end = znet->tx_start + znet->tx_buf_len;
-
- /* The ZNET-specific entries in the device structure. */
- dev->netdev_ops = &znet_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- err = register_netdev(dev);
- if (err)
- goto free_tx;
- znet_dev = dev;
- return 0;
-
- free_tx:
- kfree(znet->tx_start);
- free_rx:
- kfree(znet->rx_start);
- free_dev:
- free_netdev(dev);
- return err;
-}
-
-
-static int znet_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (znet_debug > 2)
- printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
-
- /* These should never fail. You can't add devices to a sealed box! */
- if (znet_request_resources (dev)) {
- printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
- return -EBUSY;
- }
-
- znet_transceiver_power (dev, 1);
-
- /* According to the Crynwr driver we should wait 50 msec. for the
- LAN clock to stabilize. My experiments indicates that the '593 can
- be initialized immediately. The delay is probably needed for the
- DC-to-DC converter to come up to full voltage, and for the oscillator
- to be spot-on at 20Mhz before transmitting.
- Until this proves to be a problem we rely on the higher layers for the
- delay and save allocating a timer entry. */
-
- /* maz : Well, I'm getting every time the following message
- * without the delay on a 486@33. This machine is much too
- * fast... :-) So maybe the Crynwr driver wasn't wrong after
- * all, even if the message is completly harmless on my
- * setup. */
- mdelay (50);
-
- /* This follows the packet driver's lead, and checks for success. */
- if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
- printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
- dev->name);
-
- hardware_init(dev);
- netif_start_queue (dev);
-
- return 0;
-}
-
-
-static void znet_tx_timeout (struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- ushort event, tx_status, rx_offset, state;
-
- outb (CR0_STATUS_0, ioaddr);
- event = inb (ioaddr);
- outb (CR0_STATUS_1, ioaddr);
- tx_status = inw (ioaddr);
- outb (CR0_STATUS_2, ioaddr);
- rx_offset = inw (ioaddr);
- outb (CR0_STATUS_3, ioaddr);
- state = inb (ioaddr);
- printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
- " resetting.\n", dev->name, event, tx_status, rx_offset, state);
- if (tx_status == TX_LOST_CRS)
- printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
- dev->name);
- outb (OP0_RESET, ioaddr);
- hardware_init (dev);
- netif_wake_queue (dev);
-}
-
-static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short length = skb->len;
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- netif_stop_queue (dev);
-
- /* Check that the part hasn't reset itself, probably from suspend. */
- outb(CR0_STATUS_0, ioaddr);
- if (inw(ioaddr) == 0x0010 &&
- inw(ioaddr) == 0x0000 &&
- inw(ioaddr) == 0x0010) {
- if (znet_debug > 1)
- printk (KERN_WARNING "%s : waking up\n", dev->name);
- hardware_init(dev);
- znet_transceiver_power (dev, 1);
- }
-
- if (1) {
- unsigned char *buf = (void *)skb->data;
- ushort *tx_link = znet->tx_cur - 1;
- ushort rnd_len = (length + 1)>>1;
-
- dev->stats.tx_bytes+=length;
-
- if (znet->tx_cur >= znet->tx_end)
- znet->tx_cur = znet->tx_start;
- *znet->tx_cur++ = length;
- if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
- int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
- memcpy(znet->tx_cur, buf, semi_cnt);
- rnd_len -= semi_cnt>>1;
- memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
- znet->tx_cur = znet->tx_start + rnd_len;
- } else {
- memcpy(znet->tx_cur, buf, skb->len);
- znet->tx_cur += rnd_len;
- }
- *znet->tx_cur++ = 0;
-
- spin_lock_irqsave(&znet->lock, flags);
- {
- *tx_link = OP0_TRANSMIT | CR0_CHNL;
- /* Is this always safe to do? */
- outb(OP0_TRANSMIT | CR0_CHNL, ioaddr);
- }
- spin_unlock_irqrestore (&znet->lock, flags);
-
- netif_start_queue (dev);
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
- }
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* The ZNET interrupt handler. */
-static irqreturn_t znet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr;
- int boguscnt = 20;
- int handled = 0;
-
- spin_lock (&znet->lock);
-
- ioaddr = dev->base_addr;
-
- outb(CR0_STATUS_0, ioaddr);
- do {
- ushort status = inb(ioaddr);
- if (znet_debug > 5) {
- ushort result, rx_ptr, running;
- outb(CR0_STATUS_1, ioaddr);
- result = inw(ioaddr);
- outb(CR0_STATUS_2, ioaddr);
- rx_ptr = inw(ioaddr);
- outb(CR0_STATUS_3, ioaddr);
- running = inb(ioaddr);
- printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
- dev->name, status, result, rx_ptr, running, boguscnt);
- }
- if ((status & SR0_INTERRUPT) == 0)
- break;
-
- handled = 1;
-
- if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) {
- int tx_status;
- outb(CR0_STATUS_1, ioaddr);
- tx_status = inw(ioaddr);
- /* It's undocumented, but tx_status seems to match the i82586. */
- if (tx_status & TX_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += tx_status & TX_NCOL_MASK;
- } else {
- if (tx_status & (TX_LOST_CTS | TX_LOST_CRS))
- dev->stats.tx_carrier_errors++;
- if (tx_status & TX_UND_RUN)
- dev->stats.tx_fifo_errors++;
- if (!(tx_status & TX_HRT_BEAT))
- dev->stats.tx_heartbeat_errors++;
- if (tx_status & TX_MAX_COL)
- dev->stats.tx_aborted_errors++;
- /* ...and the catch-all. */
- if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL))
- dev->stats.tx_errors++;
-
- /* Transceiver may be stuck if cable
- * was removed while emitting a
- * packet. Flip it off, then on to
- * reset it. This is very empirical,
- * but it seems to work. */
-
- znet_transceiver_power (dev, 0);
- znet_transceiver_power (dev, 1);
- }
- netif_wake_queue (dev);
- }
-
- if ((status & SR0_RECEPTION) ||
- (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) {
- znet_rx(dev);
- }
- /* Clear the interrupts we've handled. */
- outb(CR0_INT_ACK, ioaddr);
- } while (boguscnt--);
-
- spin_unlock (&znet->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void znet_rx(struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 1;
- short next_frame_end_offset = 0; /* Offset of next frame start. */
- short *cur_frame_end;
- short cur_frame_end_offset;
-
- outb(CR0_STATUS_2, ioaddr);
- cur_frame_end_offset = inw(ioaddr);
-
- if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) {
- printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
- dev->name, cur_frame_end_offset);
- return;
- }
-
- /* Use same method as the Crynwr driver: construct a forward list in
- the same area of the backwards links we now have. This allows us to
- pass packets to the upper layers in the order they were received --
- important for fast-path sequential operations. */
- while (znet->rx_start + cur_frame_end_offset != znet->rx_cur &&
- ++boguscount < 5) {
- unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
- int count, status;
-
- if (cur_frame_end_offset < 4) {
- /* Oh no, we have a special case: the frame trailer wraps around
- the end of the ring buffer. We've saved space at the end of
- the ring buffer for just this problem. */
- memcpy(znet->rx_end, znet->rx_start, 8);
- cur_frame_end_offset += (RX_BUF_SIZE/2);
- }
- cur_frame_end = znet->rx_start + cur_frame_end_offset - 4;
-
- lo_status = *cur_frame_end++;
- hi_status = *cur_frame_end++;
- status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
- lo_cnt = *cur_frame_end++;
- hi_cnt = *cur_frame_end++;
- count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
- " count %#x status %04x.\n",
- cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
- count, status);
- cur_frame_end[-4] = status;
- cur_frame_end[-3] = next_frame_end_offset;
- cur_frame_end[-2] = count;
- next_frame_end_offset = cur_frame_end_offset;
- cur_frame_end_offset -= ((count + 1)>>1) + 3;
- if (cur_frame_end_offset < 0)
- cur_frame_end_offset += RX_BUF_SIZE/2;
- }
-
- /* Now step forward through the list. */
- do {
- ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- int status = this_rfp_ptr[-4];
- int pkt_len = this_rfp_ptr[-2];
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
- " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
- this_rfp_ptr[-3]<<1);
- /* Once again we must assume that the i82586 docs apply. */
- if ( ! (status & RX_RCV_OK)) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++;
- if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++;
-#if 0
- if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */
- if (status & 0x0100) dev->stats.rx_fifo_errors++;
-#else
- /* maz : Wild guess... */
- if (status & RX_OVRRUN) dev->stats.rx_over_errors++;
-#endif
- if (status & RX_SRT_FRM) dev->stats.rx_length_errors++;
- } else if (pkt_len > 1536) {
- dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- if (znet_debug)
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
- int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
- memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt);
- memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start,
- pkt_len - semi_cnt);
- } else {
- memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len);
- if (znet_debug > 6) {
- unsigned int *packet = (unsigned int *) skb->data;
- printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
- packet[1], packet[2], packet[3]);
- }
- }
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- znet->rx_cur = this_rfp_ptr;
- if (znet->rx_cur >= znet->rx_end)
- znet->rx_cur -= RX_BUF_SIZE/2;
- update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1);
- next_frame_end_offset = this_rfp_ptr[-3];
- if (next_frame_end_offset == 0) /* Read all the frames? */
- break; /* Done for now */
- this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- } while (--boguscount);
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(INET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to znet_open(). */
-static int znet_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- netif_stop_queue (dev);
-
- outb(OP0_RESET, ioaddr); /* CMD0_RESET */
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- /* Turn off transceiver power. */
- znet_transceiver_power (dev, 0);
-
- znet_release_resources (dev);
-
- return 0;
-}
-
-static void show_dma(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- unsigned char stat = inb (ioaddr);
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
- unsigned addr = inb(dma_port);
- short residue;
-
- addr |= inb(dma_port) << 8;
- residue = get_dma_residue(znet->tx_dma);
-
- if (znet_debug > 1) {
- flags=claim_dma_lock();
- printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n",
- stat, addr<<1, residue);
- release_dma_lock(flags);
- }
-}
-
-/* Initialize the hardware. We have to do this when the board is open()ed
- or when we come out of suspend mode. */
-static void hardware_init(struct net_device *dev)
-{
- unsigned long flags;
- short ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
-
- znet->rx_cur = znet->rx_start;
- znet->tx_cur = znet->tx_start;
-
- /* Reset the chip, and start it up. */
- outb(OP0_RESET, ioaddr);
-
- flags=claim_dma_lock();
- disable_dma(znet->rx_dma); /* reset by an interrupting task. */
- clear_dma_ff(znet->rx_dma);
- set_dma_mode(znet->rx_dma, DMA_RX_MODE);
- set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
- set_dma_count(znet->rx_dma, RX_BUF_SIZE);
- enable_dma(znet->rx_dma);
- /* Now set up the Tx channel. */
- disable_dma(znet->tx_dma);
- clear_dma_ff(znet->tx_dma);
- set_dma_mode(znet->tx_dma, DMA_TX_MODE);
- set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
- set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
- enable_dma(znet->tx_dma);
- release_dma_lock(flags);
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n",
- dev->name, znet->rx_start,znet->tx_start);
- /* Do an empty configure command, just like the Crynwr driver. This
- resets to chip to its default values. */
- *znet->tx_cur++ = 0;
- *znet->tx_cur++ = 0;
- show_dma(dev);
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- znet_set_multicast_list (dev);
-
- *znet->tx_cur++ = 6;
- memcpy(znet->tx_cur, dev->dev_addr, 6);
- znet->tx_cur += 3;
- show_dma(dev);
- outb(OP0_IA_SETUP | CR0_CHNL, ioaddr);
- show_dma(dev);
-
- update_stop_hit(ioaddr, 8192);
- if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n");
- outb(OP0_RCV_ENABLE, ioaddr);
- netif_start_queue (dev);
-}
-
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
-{
- outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr);
- if (znet_debug > 5)
- printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
- (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE);
- outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr);
- outb(OP1_SWIT_TO_PORT_0, ioaddr);
-}
-
-static __exit void znet_cleanup (void)
-{
- if (znet_dev) {
- struct znet_private *znet = netdev_priv(znet_dev);
-
- unregister_netdev (znet_dev);
- kfree (znet->rx_start);
- kfree (znet->tx_start);
- free_netdev (znet_dev);
- }
-}
-
-module_init (znet_probe);
-module_exit (znet_cleanup);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 19b64de7124b..328f47c92e26 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
"Default = 1");
@@ -1921,10 +1921,8 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
u64 hret;
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
- if (!ehea_mcl_entry) {
- pr_err("no mem for mcl_entry\n");
+ if (!ehea_mcl_entry)
return;
- }
INIT_LIST_HEAD(&ehea_mcl_entry->list);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 8364815c32ff..99b6c2a38dbf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -39,26 +39,6 @@
* hcp_* - structures, variables and functions releated to Hypervisor Calls
*/
-static inline u32 get_longbusy_msecs(int long_busy_ret_code)
-{
- switch (long_busy_ret_code) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
#define EHEA_MAX_RPAGE 512
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 27f881758d16..9b03033bb557 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -64,11 +64,10 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
- if (!queue->queue_pages) {
- pr_err("no mem for queue_pages\n");
+ queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
+ GFP_KERNEL);
+ if (!queue->queue_pages)
return -ENOMEM;
- }
/*
* allocate pages for queue:
@@ -129,10 +128,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
void *vpage;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- pr_err("no mem for cq\n");
+ if (!cq)
goto out_nomem;
- }
cq->attr.max_nr_of_cqes = nr_of_cqe;
cq->attr.cq_token = cq_token;
@@ -257,10 +254,8 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
struct ehea_eq *eq;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- pr_err("no mem for eq\n");
+ if (!eq)
return NULL;
- }
eq->adapter = adapter;
eq->attr.type = type;
@@ -428,10 +423,8 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- pr_err("no mem for qp\n");
+ if (!qp)
return NULL;
- }
qp->adapter = adapter;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 256bdb8e1994..4989481c19f0 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2190,11 +2190,10 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strcpy(info->driver, "ibm_emac");
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->dev.of_node->full_name);
+ strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 50ea12bfb579..1f7ecf57181e 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -528,12 +528,9 @@ static int mal_probe(struct platform_device *ofdev)
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
- if (!mal) {
- printk(KERN_ERR
- "mal%d: out of memory allocating MAL structure!\n",
- index);
+ if (!mal)
return -ENOMEM;
- }
+
mal->index = index;
mal->ofdev = ofdev;
mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f2fdbb79837e..c859771a9902 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -637,7 +637,6 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to allocate bounce buffer\n");
rc = -ENOMEM;
goto err_out_free_irq;
}
@@ -722,9 +721,8 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, ibmveth_driver_version,
- sizeof(info->version) - 1);
+ strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
index 3aff81d7989f..5119ef18953b 100644
--- a/drivers/net/ethernet/icplus/Kconfig
+++ b/drivers/net/ethernet/icplus/Kconfig
@@ -4,7 +4,7 @@
config IP1000
tristate "IP1000 Gigabit Ethernet support"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select NET_CORE
select MII
---help---
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ddee4060948a..05f7264c51f7 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,11 +5,6 @@
config NET_VENDOR_INTEL
bool "Intel devices"
default y
- depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
- GSC || BVME6000 || MVME16x || \
- (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
- EXPERIMENTAL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -74,6 +69,7 @@ config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
+ select PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -94,6 +90,8 @@ config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
select PTP_1588_CLOCK
+ select I2C
+ select I2C_ALGOBIT
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -112,6 +110,17 @@ config IGB
To compile this driver as a module, choose M here. The module
will be called igb.
+config IGB_HWMON
+ bool "Intel(R) PCI-Express Gigabit adapters HWMON support"
+ default y
+ depends on IGB && HWMON && !(IGB=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain thermal sensors, both external and internal.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
default y
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a59f0779e1c3..ec800b093e7e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2928,8 +2928,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
- memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba0..26d9cd59ec75 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6aa..43462d596a4e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
-
+ * and HALF_DUPLEX != DUPLEX_HALF
+ */
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
- ETH_TP_MDI_X :
- ETH_TP_MDI);
+ ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+ GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
- rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+ GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
+ * then restore the new back again
+ */
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
-
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
-
} else {
-
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
}
value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
- /* NOTE: we don't test MSI interrupts here, yet */
- /* Hook up test interrupt handler just for this test */
+ /* NOTE: we don't test MSI interrupts here, yet
+ * Hook up test interrupt handler just for this test
+ */
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev))
+ netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
- * duplex link is detected. */
+ * duplex link is detected.
+ */
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
- 1024);
+ 1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
+ * so exclude FUNC_1 ports from having WoL enabled
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
+ * supported by this hardware
+ */
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-/* BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd2451538..2879b9631e15 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) {
msleep(20);
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
+ /* Save off the current value of register 0x2F5B to be restored
+ * at the end of this routine.
+ */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541:
case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
+ * reset, so use IO-mapping as a workaround to issue the reset
+ */
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break;
}
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
+ /* After MAC reset, force reload of EEPROM to restore power-on settings
+ * to device. Later controllers reload the EEPROM automatically, so
+ * just wait for reload to complete.
*/
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5);
}
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
+ /* Setup the receive address. This involves initializing all of the
+ * Receive Address Registers (RARs 0 - 15).
*/
e1000_init_rx_addrs(hw);
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
- * occurring when accessing our register space */
+ * occurring when accessing our register space
+ */
E1000_WRITE_FLUSH();
}
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3:
break;
default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ /* Workaround for PCI-X problem when BIOS sets MMRBC
+ * incorrectly.
+ */
if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
+ * error crash in a PCI slot.
+ */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
}
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0);
ew32(FCRTH, 0);
} else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
*/
if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
+ * the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Tranmsit Config Word Register (TXCW) and re-start
+ * auto-negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
*
* The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
*/
switch (hw->fc) {
case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
+ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
+ /* Rx Flow control is enabled and Tx Flow control is disabled by
+ * a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-negotiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
*/
e_dbg("Auto-negotiation enabled\n");
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw;
msleep(1);
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
+ /* If we have a signal (the cable is plugged in) then poll for a
+ * "Link-Up" indication in the Device Status Register. Time-out if a
+ * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+ * complete in less than 500 milliseconds even if the other end is doing
+ * it in SW). For internal serdes, we just assume a signal is present,
+ * then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
+ * e1000_check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
+ /* With 82543, we need to force speed and duplex on the MAC equal to
+ * what the PHY speed and duplex configuration is. In addition, we need
+ * to perform a hardware reset on the PHY to take it out of reset.
*/
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
+ * resolution as hardware default.
+ */
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ * and perform autonegotiation
+ */
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
+ * depending on value from forced_speed_duplex.
+ */
e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+ * auto-negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
+ * hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
+ /* We want to force full duplex so we SET the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n");
} else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
+ /* We want to force half duplex so we CLEAR the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+ * MDI forced whenever speed are duplex are forced.
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100);
}
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
- /* We didn't get link. Reset the DSP and wait again for link. */
+ /* We didn't get link. Reset the DSP and wait again
+ * for link.
+ */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
- /* This loop will early-out if the link condition has been met. */
+ /* This loop will early-out if the link condition has been
+ * met
+ */
for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
+ /* Because we reset the PHY above, we need to re-force TX_CLK in
+ * the Extended PHY Specific Control Register to 25MHz clock.
+ * This value defaults back to a 2.5MHz clock when the PHY is
+ * reset.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
+ /* In addition, because of the s/w reset above, we need to
+ * enable CRS on Tx. This must be set for both full and half
+ * duplex operation.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
+ * MAC speed/duplex configuration.
+ */
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ &phy_data);
if (ret_val)
return ret_val;
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
- M88E1000_PSSR_100MBS)
+ M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
+ * (Address 4) and the Auto_Negotiation Base Page
+ * Ability Register (Address 5) to determine how flow
+ * control was negotiated.
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
+ /* Two bits in the Auto Negotiation Advertisement
+ * Register (Address 4) and two bits in the Auto
+ * Negotiation Base Page Ability Register (Address 5)
+ * determine flow control for both the PHY and the link
+ * partner. The following table, taken out of the IEEE
+ * 802.3ab/D6.0 dated March 25, 1999, describes these
+ * PAUSE resolution bits and how flow control is
+ * determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ /* Now we need to check if the user selected Rx
+ * ONLY of pause frames. In this case, we had
+ * to advertise FULL flow control because we
+ * could not advertise Rx ONLY. Hence, we must
+ * now check to see if we need to turn OFF the
+ * TRANSMISSION of PAUSE frames.
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
*
*/
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
*
*/
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* Per the IEEE spec, at this point flow control should
+ * be disabled. However, we want to consider that we
+ * could be connected to a legacy switch that doesn't
+ * advertise desired flow control, but can be forced on
+ * the link partner. So if we advertised no flow
+ * control, that is what we will resolve to. If we
+ * advertised some kind of receive capability (Rx Pause
+ * Only or Full Flow Control) and the link partner
+ * advertised none, we will configure ourselves to
+ * enable Rx Flow Control only. We can do this safely
+ * for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx,
+ * no harm done since we won't be receiving any PAUSE
+ * frames anyway. If the intent on the link partner was
+ * to have flow control enabled, then by us enabling Rx
+ * only, we can at least receive pause frames and
+ * process them. This is a good idea because in most
+ * cases, since we are predominantly a server NIC, more
+ * times than not we will be asked to delay transmission
+ * of packets than asking our link partner to pause
+ * transmission of frames.
*/
else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
+ * are forced to 10H or 10F, then we will implement the
+ * polarity reversal workaround. We disable interrupts
+ * first, and upon returning, place the devices
+ * interrupt state to its previous value except for the
+ * link status change interrupt which will
* happen due to the execution of this workaround.
*/
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
}
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control settings
+ * because we may have had to re-autoneg with a different link
+ * partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
/* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
+ * auto-negotiated link. These are conditions for checking the
+ * link partner capability register. We use the link speed to
+ * determine if TBI compatibility needs to be turned on or off.
+ * If the link is not at gigabit speed, then TBI compatibility
+ * is not needed. If we are at gigabit speed, we turn on TBI
+ * compatibility.
*/
if (hw->tbi_compatibility_en) {
u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val;
}
if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
+ /* If link speed is not set to gigabit speed, we
+ * do not need to enable TBI compatibility.
*/
if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
+ /* If we previously were in the mode,
+ * turn it off.
+ */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
+ /* If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX;
}
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade
+ * even if it is operating at half duplex. Here we set the duplex
+ * settings to match the duplex in the link partner's capabilities.
*/
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
+ /* Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
+ /* Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
*/
if (data & mask)
ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i;
/* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
+ * of 18 bits from the PHY. The first two bit (turnaround) times are
+ * used to avoid contention on the MDIO pin when a read operation is
+ * performed. These two bits are ignored by us and thrown away. Bits are
+ * "shifted in" by raising the input to the Management Data Clock
+ * (setting the MDC bit), and then reading the value of the MDIO bit.
*/
ctrl = er32(CTRL);
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl);
E1000_WRITE_FLUSH();
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
+ /* Raise and Lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked out
+ * the last bit of the Register Address.
*/
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
+ * Control register. The MAC will take care of interfacing with
+ * the PHY to retrieve the desired data.
*/
if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic;
}
} else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We must first send a preamble through the MDIO pin to signal
+ * the beginning of an MII instruction. This is done by sending
+ * 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
+ * e1000_shift_out_mdi_bits routine five different times. The
+ * format of a MII read instruction consists of a shift out of
+ * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
+ * followed by a shift in of 18 bits. This first two bits
+ * shifted in are TurnAround bits used to avoid contention on
+ * the MDIO pin when a READ operation is performed. These two
+ * bits are thrown away followed by a shift in of 16 bits which
+ * contains the desired data.
*/
mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14);
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
+ /* Now that we've shifted out the read command to the MII, we
+ * need to "shift in" the 16-bit value (18 total bits) of the
+ * requested PHY register address.
*/
*phy_data = e1000_shift_in_mdi_bits(hw);
}
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
}
} else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We'll need to use the SW defined pins to shift the write
+ * command out to the PHY. We first send a preamble to the PHY
+ * to signal the beginning of the MII instruction. This is done
+ * by sending 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ /* Now combine the remaining required fields that will indicate
+ * a write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the
+ * command. The format of a MII write instruction is as follows:
+ * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
+ /* Read the device control register and assert the
+ * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert
- * and deassert.
+ * and de-assert.
*/
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH();
} else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
+ /* Read the Extended Device Control Register, assert the
+ * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+ * out of reset.
*/
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ /* Local/Remote Receiver Information are only valid @ 1000
+ * Mbps
+ */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
}
if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+ * 128B to 32KB (incremented by powers of 2).
*/
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+ */
if (eeprom_size)
eeprom_size++;
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO;
}
do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK bit
+ * controls the clock input to the EEPROM). A "0" is shifted
+ * out to the EEPROM by setting "DI" to "0" and then raising and
+ * then lowering the clock.
*/
eecd &= ~E1000_EECD_DI;
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.
*/
eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should release it */
+ * acquired the EEPROM at this point, so any returns should release it
+ */
if (eeprom->type == e1000_eeprom_spi) {
u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits);
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
+ /* Read the data. The address of the eeprom internally
+ * increments with each byte (spi) being read, saving on the
+ * overhead of eeprom setup and tear-down. The address counter
+ * will roll over if reading beyond the size of the eeprom, thus
+ * allowing the entire memory to be read starting from any
+ * offset.
+ */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits);
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
+ /* Read the data. For microwire, each word requires the
+ * overhead of eeprom setup and tear-down.
+ */
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
}
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ /* Loop to allow for up to whole page write (32 bytes) of
+ * eeprom
+ */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16);
widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
+ /* Some larger eeprom sizes are capable of a 32-byte
+ * PAGE WRITE operation, while the smaller eeproms are
+ * capable of an 8-byte PAGE WRITE operation. Break the
+ * inner loop to pass new address
*/
if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16);
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
+ /* Toggle the CS line. This in effect tells the EEPROM to
+ * execute the previous command.
*/
e1000_standby_eeprom(hw);
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
+ /* Read DO repeatedly until it is high (equal to '1'). The
+ * EEPROM will signal that the command has been completed by
+ * raising the DO signal. If DO does not go high in 10
+ * milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
+ * manageability unit
+ */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
+ /* If speed is 1000 Mbps, must read the
+ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+ */
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed :
e1000_rev_polarity_normal;
} else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
+ /* For 10 Mbps, read the polarity bit in the status
+ * register. (for 100 Mbps this bit is always 0)
+ */
*polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
+ * from the lowest speeds starting from 10Mbps. The capability is used
+ * for Dx transitions and states
+ */
if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) {
ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val;
}
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 294da56b824c..8502c625dbef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-
static int __init e1000_init_module(void)
{
int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
-
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
- E1000_DESC_UNUSED(ring));
+ E1000_DESC_UNUSED(ring));
}
}
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
- *
**/
-
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
- * settings across a power-down/up cycle */
+ * settings across a power-down/up cycle
+ */
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
- * (c) SoL/IDER session is active */
+ * (c) SoL/IDER session is active
+ */
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- /*
- * Setting DOWN must be after irq_disable to prevent
+ /* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the tx fifo also stores 16 bytes of information about the tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
- /* if short on rx space, rx wins and must trump tx
- * adjustment or use Early Receive if available */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
- /*
- * flow control settings:
+ /* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
- * can cause a loss of link at power off or driver unload */
+ * can cause a loss of link at power off or driver unload
+ */
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /*
- * there is a workaround being applied below that limits
+ /* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
+ /* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
- * put the device in a known good starting state */
+ * put the device in a known good starting state
+ */
e1000_reset_hw(hw);
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
- /*
- * set MAC address to all zeroes to invalidate and temporary
+ /* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@@ -1123,9 +1118,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -1170,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port */
+ * wake on lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@@ -1178,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@@ -1271,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1307,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
-
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1338,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
-
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1368,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1402,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@@ -1445,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1460,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it) */
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -1484,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
- * write location to cross 64k boundary due to errata 23 */
+ * write location to cross 64k boundary due to errata 23
+ */
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@@ -1501,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
-
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@@ -1510,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -1578,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1603,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
-
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@@ -1624,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
- adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
- adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1680,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
- * need this to apply a workaround later in the send path. */
+ * need this to apply a workaround later in the send path.
+ */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@@ -1696,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@@ -1705,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
desc_len = sizeof(struct e1000_rx_desc);
@@ -1778,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1847,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1869,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
-
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@@ -1902,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
switch (adapter->num_rx_queues) {
case 1:
default:
@@ -1912,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
- adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
- adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1939,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
-
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -1962,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
-
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -1997,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -2033,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2049,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
-
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2072,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
-
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2086,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2145,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2205,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2240,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2253,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
int mta_reg_count = E1000_NUM_MTA_REGISTERS;
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
- if (!mcarray) {
- e_err(probe, "memory allocation failed\n");
+ if (!mcarray)
return;
- }
/* Check for Promiscuous and All Multicast modes */
@@ -2326,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
- * both stupid write combining chipsets, and flushing each write */
+ * both stupid write combining chipsets, and flushing each write
+ */
for (i = mta_reg_count - 1; i >= 0 ; i--) {
- /*
- * If we are on an 82544 has an errata where writing odd
+ /* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@@ -2467,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2542,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@@ -2552,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -2668,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2705,10 +2680,11 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
@@ -2870,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
- * DMA'd to the controller */
+ * DMA'd to the controller
+ */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@@ -2878,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
+ * in TSO mode. Append 4-byte sentinel desc
+ */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@@ -2891,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
- * terminating buffers within evenly-aligned dwords. */
+ * terminating buffers within evenly-aligned dwords.
+ */
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@@ -2903,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2934,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ * in TSO mode. Append 4-byte sentinel desc
+ */
+ if (unlikely(mss && f == (nr_frags-1) &&
+ size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
- * dwords. */
+ * dwords.
+ */
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@@ -3003,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3044,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -3099,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3114,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
}
static int e1000_maybe_stop_tx(struct net_device *netdev,
- struct e1000_tx_ring *tx_ring, int size)
+ struct e1000_tx_ring *tx_ring, int size)
{
if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -3138,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
- /* This goes back to the question of how to logically map a tx queue
+ /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
+ * if using multiple Tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again.
+ */
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@@ -3166,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
- * drops. */
+ * drops.
+ */
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@@ -3182,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
- * into the next dword */
- if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ * into the next dword
+ */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1)
+ & 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@@ -3231,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time */
+ * head, otherwise try next time
+ */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@@ -3270,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
- nr_frags, mss);
+ nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
@@ -3372,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
- /*
- * transmit dump
- */
+ /* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3435,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
- /*
- * receive dump
- */
+ /* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@@ -3502,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3530,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
-
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@@ -3544,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3581,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs */
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3617,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3628,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -3719,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3773,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3784,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
- /*
- * we might have caused the interrupt, but the above
+ /* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@@ -3811,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
- * bug, but not a hard error, so enable ints and continue */
+ * bug, but not a hard error, so enable ints and continue
+ */
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@@ -3825,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3916,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
@@ -3963,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
-
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@@ -3999,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -4095,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4107,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
- * too */
+ * too
+ */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@@ -4123,7 +4108,7 @@ process_skb:
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@@ -4141,38 +4126,42 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr, length);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4214,8 +4203,7 @@ next_desc:
return cleaned;
}
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@@ -4319,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4386,10 +4374,9 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
-
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring, int cleaned_count)
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -4430,7 +4417,7 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
+ buffer_info->page, 0,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4460,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@@ -4470,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
-
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@@ -4541,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
- /*
- * XXX if it was allocated cleanly it will never map to a
+ /* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@@ -4580,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@@ -4590,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
-
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4603,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
+ * we assume back-to-back
+ */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4616,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL,
- &phy_ctrl)) {
+ &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4647,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
-
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -4666,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
-
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@@ -4928,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -5131,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f37..c9cde352b1c8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
- .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD
}}
};
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_int_delay = opt.def;
}
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break;
case 4:
e_dev_info("%s set to simplified "
- "(2000-8000) ints mode\n", opt.name);
+ "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
- * used as control */
+ * used as control
+ */
adapter->itr_setting = adapter->itr & ~3;
break;
}
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
-
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
-
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_100_HALF;
+ ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
- ADVERTISE_100_FULL |
- ADVERTISE_1000_FULL;
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL;
+ ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e73c2c355993..e0991388664c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,69 +32,6 @@
#include "e1000.h"
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
- /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
- 1 = 50-80M
- 2 = 80-110M
- 3 = 110-140M
- 4 = >140M
- */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY 0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
- /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-
/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
@@ -111,11 +48,10 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 *data);
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 data);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
/**
@@ -625,16 +561,16 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("GG82563 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
/* Reset the phy to commit changes. */
- phy_data |= MII_CR_RESET;
+ phy_data |= BMCR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -696,7 +632,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 phy_data, index;
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
@@ -774,6 +710,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
e1000_release_phy_80003es2lan(hw);
@@ -833,6 +772,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
@@ -1006,7 +947,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* SW Reset the PHY so all changes take effect */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = hw->phy.ops.commit(hw);
if (ret_val) {
e_dbg("Error Resetting the PHY\n");
return ret_val;
@@ -1272,7 +1213,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1307,7 +1248,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1331,7 +1272,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
**/
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1434,18 +1375,18 @@ static const struct e1000_phy_operations es2_phy_ops = {
.acquire = e1000_acquire_phy_80003es2lan,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
- .commit = e1000e_phy_sw_reset,
- .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
- .get_cfg_done = e1000_get_cfg_done_80003es2lan,
- .get_cable_length = e1000_get_cable_length_80003es2lan,
- .get_info = e1000e_get_phy_info_m88,
- .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
.release = e1000_release_phy_80003es2lan,
- .reset = e1000e_phy_hw_reset_generic,
- .set_d0_lplu_state = NULL,
- .set_d3_lplu_state = e1000e_set_d3_lplu_state,
- .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
- .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
};
static const struct e1000_nvm_operations es2_nvm_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644
index 000000000000..90d363b2d280
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c77d010d5c59..2faffbde179e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,21 +44,6 @@
#include "e1000.h"
-#define ID_LED_RESERVED_F746 0xF746
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
-#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
-#define E1000_BASE1000T_STATUS 10
-#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
-#define E1000_RECEIVE_ERROR_COUNTER 21
-#define E1000_RECEIVE_ERROR_MAX 0xFFFF
-
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -67,9 +52,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
-static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
-static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
@@ -449,13 +432,13 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
break;
case e1000_82574:
case e1000_82583:
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -556,16 +539,14 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
@@ -937,6 +918,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -1329,9 +1312,10 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
- E1000_VFTA_ENTRY_MASK;
- vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
@@ -1399,7 +1383,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
- s32 ret_val = 0;
+ s32 ret_val;
/* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
@@ -1544,13 +1528,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
- rxcw = er32(RXCW);
+ er32(RXCW);
/* SYNCH bit and IV bit are sticky */
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
-
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
@@ -1799,6 +1782,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
}
}
@@ -1812,7 +1797,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
if (hw->mac.type == e1000_82571) {
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1931,7 +1916,7 @@ static const struct e1000_phy_operations e82_phy_ops_igp = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_igp,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_m88 = {
@@ -1940,7 +1925,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_m88,
@@ -1949,7 +1934,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_m88,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_bm = {
@@ -1958,7 +1943,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_bm2,
@@ -1967,7 +1952,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_bm2,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_nvm_operations e82571_nvm_ops = {
@@ -2044,6 +2029,7 @@ const struct e1000_info e1000_82574_info = {
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
@@ -2065,6 +2051,7 @@ const struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644
index 000000000000..85cb1a3b7cd4
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 591b71324505..c2dcfcc10857 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,5 +34,5 @@ obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 02a12b69555f..fc3a4fe1ac71 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,25 +29,6 @@
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -86,7 +67,6 @@
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
-#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -107,6 +87,7 @@
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
@@ -115,19 +96,19 @@
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
- E1000_RXD_ERR_CE | \
- E1000_RXD_ERR_SE | \
- E1000_RXD_ERR_SEQ | \
- E1000_RXD_ERR_CXE | \
- E1000_RXD_ERR_RXE)
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
@@ -232,6 +213,7 @@
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -241,9 +223,9 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/* Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -259,8 +241,6 @@
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
-/* Constants used to interpret the masked PCI-X bus speed. */
-
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
@@ -273,14 +253,15 @@
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
- ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
@@ -318,6 +299,7 @@
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
@@ -327,8 +309,6 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-/* Transmit Arbitration Count */
-
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -383,12 +363,23 @@
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_RXA_MASK 0xFFFF
+
#define E1000_PBS_16K E1000_PBA_16K
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
@@ -408,6 +399,7 @@
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -431,11 +423,11 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -443,6 +435,7 @@
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
@@ -533,6 +526,28 @@
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
@@ -548,66 +563,6 @@
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
-/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-
-/* Autoneg Advertisement Register */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-
-/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
-
-/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
-
-/* 1000BASE-T Control Register */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
- /* 0=DTE device */
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
- /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
- /* 0=Automatic Master/Slave config */
-
-/* 1000BASE-T Status Register */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-
-
-/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CONTROL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Register */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
-#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
-
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
@@ -639,6 +594,10 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
@@ -647,8 +606,6 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
@@ -757,9 +714,6 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
- * 0=Normal 10BASE-T Rx Threshold
- */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
@@ -795,11 +749,6 @@
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-
/* Bits...
* 15-5: page
* 4-0: register offset
@@ -846,8 +795,4 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
-/* FW Semaphore */
-#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
-#define E1000_FWSM_WLOCK_MAC_SHIFT 7
-
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6782a2eea1bc..fcc758138b8a 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,11 @@
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
-
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/mii.h>
#include "hw.h"
struct e1000_info;
@@ -75,9 +79,6 @@ struct e1000_info;
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
-/* Early Receive defines */
-#define E1000_ERT_2048 0x100
-
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
@@ -94,70 +95,6 @@ struct e1000_info;
#define DEFAULT_JUMBO 9234
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE 769
-
-#define PHY_UPPER_SHIFT 21
-#define BM_PHY_REG(page, reg) \
- (((reg) & MAX_PHY_REG_ADDRESS) |\
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE 778
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS 17
-#define BM_CS_STATUS_LINK_UP 0x0400
-#define BM_CS_STATUS_RESOLVED 0x0800
-#define BM_CS_STATUS_SPEED_MASK 0xC000
-#define BM_CS_STATUS_SPEED_1000 0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS 26
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
-#define HV_M_STATUS_SPEED_MASK 0x0300
-#define HV_M_STATUS_SPEED_1000 0x0200
-#define HV_M_STATUS_LINK_UP 0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
-
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -309,6 +246,8 @@ struct e1000_adapter {
struct napi_struct napi;
+ unsigned int uncorr_errors; /* uncorrectable ECC errors */
+ unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;
@@ -353,6 +292,7 @@ struct e1000_adapter {
u64 gorc_old;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
+ u32 rx_hwtstamp_cleared;
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
@@ -366,7 +306,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
- spinlock_t stats64_lock;
+ spinlock_t stats64_lock; /* protects statistics counters */
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -402,6 +342,16 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct delayed_work systim_overflow_work;
+ struct sk_buff *tx_hwtstamp_skb;
+ struct work_struct tx_hwtstamp_work;
+ spinlock_t systim_lock; /* protects SYSTIML/H regsters */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
struct e1000_info {
@@ -416,6 +366,40 @@ struct e1000_info {
const struct e1000_nvm_operations *nvm_ops;
};
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
+
+/* The system time is maintained by a 64-bit counter comprised of the 32-bit
+ * SYSTIMH and SYSTIML registers. How the counter increments (and therefore
+ * its resolution) is based on the contents of the TIMINCA register - it
+ * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0).
+ * For the best accuracy, the incperiod should be as small as possible. The
+ * incvalue is scaled by a factor as large as possible (while still fitting
+ * in bits 23:0) so that relatively small clock corrections can be made.
+ *
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
+ * bits to count nanoseconds leaving the rest for fractional nonseconds.
+ */
+#define INCVALUE_96MHz 125
+#define INCVALUE_SHIFT_96MHz 17
+#define INCPERIOD_SHIFT_96MHz 2
+#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+
+#define INCVALUE_25MHz 40
+#define INCVALUE_SHIFT_25MHz 18
+#define INCPERIOD_25MHz 1
+
+/* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+ * by simply reading the clock before it overflows.
+ *
+ * Clock ns bits Overflows after
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
+ * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs
+ * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
+ */
+#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
#define FLAG_HAS_FLASH (1 << 1)
@@ -431,7 +415,7 @@ struct e1000_info {
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
-/* reserved bit14 */
+#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
@@ -447,7 +431,7 @@ struct e1000_info {
#define FLAG_MSI_ENABLED (1 << 27)
/* reserved (1 << 28) */
#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
#define FLAG2_CRC_STRIPPING (1 << 0)
@@ -463,6 +447,7 @@ struct e1000_info {
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
+#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -512,8 +497,6 @@ extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
-extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
-
extern const struct e1000_info e1000_82571_info;
extern const struct e1000_info e1000_82572_info;
extern const struct e1000_info e1000_82573_info;
@@ -527,138 +510,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-
-extern s32 e1000e_commit_phy(struct e1000_hw *hw);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count);
-extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+extern void e1000e_ptp_init(struct e1000_adapter *adapter);
+extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -685,20 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
- return hw->phy.ops.get_cable_length(hw);
-}
-
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
@@ -733,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
return hw->phy.ops.get_info(hw);
}
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f95bc6ee1c22..2c1813737f6d 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/mdio.h>
#include "e1000.h"
@@ -98,7 +99,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
E1000_STAT("tx_flow_control_xon", stats.xontxc),
E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
- E1000_STAT("rx_long_byte_count", stats.gorc),
E1000_STAT("rx_csum_offload_good", hw_csum_good),
E1000_STAT("rx_csum_offload_errors", hw_csum_err),
E1000_STAT("rx_header_split", rx_hdr_split),
@@ -108,6 +108,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+ E1000_STAT("corr_ecc_errors", corr_errors),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -127,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev,
u32 speed;
if (hw->phy.media_type == e1000_media_type_copper) {
-
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -325,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev,
}
/* reset the link */
-
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else
+ } else {
e1000e_reset(adapter);
+ }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -415,7 +417,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-static int e1000_get_regs_len(struct net_device *netdev)
+static int e1000_get_regs_len(struct net_device __always_unused *netdev)
{
#define E1000_REGS_LEN 32 /* overestimate */
return E1000_REGS_LEN * sizeof(u32);
@@ -469,10 +471,10 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[22] = adapter->phy_stats.receive_errors;
regs_buff[23] = regs_buff[13]; /* mdix mode */
}
- regs_buff[21] = 0; /* was idle_errors */
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
- regs_buff[24] = (u32)phy_data; /* phy local receiver status */
- regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, MII_STAT1000, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -759,8 +761,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
(test[pat] & write));
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (val != (test[pat] & write & mask)) {
- e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
- reg + offset, val, (test[pat] & write & mask));
+ e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
+ reg + (offset << 2), val,
+ (test[pat] & write & mask));
*data = reg;
return 1;
}
@@ -775,7 +778,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
return 1;
@@ -883,12 +886,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- /* Cannot test write-protected SHRAL[n] registers */
- if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
- continue;
+ if (mac->type == e1000_pch_lpt) {
+ /* Cannot test write-protected SHRAL[n] registers */
+ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
+ continue;
- REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
- mask, 0xFFFFFFFF);
+ /* SHRAH[9] different than the others */
+ if (i == 10)
+ mask |= (1 << 30);
+ else
+ mask &= ~(1 << 30);
+ }
+
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
+ 0xFFFFFFFF);
}
for (i = 0; i < mac->mta_reg_count; i++)
@@ -922,7 +933,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static irqreturn_t e1000_test_intr(int irq, void *data)
+static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1272,7 +1283,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_ife) {
/* force 100, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x6100);
+ e1e_wphy(hw, MII_BMCR, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1295,9 +1306,9 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Auto-MDI/MDIX Off */
e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ e1e_wphy(hw, MII_BMCR, 0x9140);
/* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ e1e_wphy(hw, MII_BMCR, 0x8140);
break;
case e1000_phy_gg82563:
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
@@ -1309,7 +1320,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
phy_reg |= 0x006;
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
- e1000e_commit_phy(hw);
+ hw->phy.ops.commit(hw);
mdelay(1);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
@@ -1343,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
/* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL 19
e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
break;
default:
@@ -1351,7 +1361,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
}
/* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ e1e_wphy(hw, MII_BMCR, 0x4140);
mdelay(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
@@ -1393,7 +1403,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl = er32(CTRL);
- int link = 0;
+ int link;
/* special requirements for 82571/82572 fiber adapters */
@@ -1526,11 +1536,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->mac.autoneg = 1;
if (hw->phy.type == e1000_phy_gg82563)
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
- e1e_rphy(hw, PHY_CONTROL, &phy_reg);
- if (phy_reg & MII_CR_LOOPBACK) {
- phy_reg &= ~MII_CR_LOOPBACK;
- e1e_wphy(hw, PHY_CONTROL, phy_reg);
- e1000e_commit_phy(hw);
+ e1e_rphy(hw, MII_BMCR, &phy_reg);
+ if (phy_reg & BMCR_LOOPBACK) {
+ phy_reg &= ~BMCR_LOOPBACK;
+ e1e_wphy(hw, MII_BMCR, phy_reg);
+ if (hw->phy.ops.commit)
+ hw->phy.ops.commit(hw);
}
break;
}
@@ -1692,7 +1703,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static int e1000e_get_sset_count(struct net_device *netdev, int sset)
+static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
+ int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -1955,7 +1967,7 @@ static int e1000_nway_reset(struct net_device *netdev)
}
static void e1000_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
+ struct ethtool_stats __always_unused *stats,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1984,8 +1996,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
}
-static void e1000_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+static void e1000_get_strings(struct net_device __always_unused *netdev,
+ u32 stringset, u8 *data)
{
u8 *p = data;
int i;
@@ -2005,7 +2017,8 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+ struct ethtool_rxnfc *info,
+ u32 __always_unused *rule_locs)
{
info->data = 0;
@@ -2051,6 +2064,171 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl;
+ u32 status, ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ cap_addr = I82579_EEE_CAPABILITY;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ lpa_addr = I82579_EEE_LP_ABILITY;
+ pcs_stat_addr = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ cap_addr = I217_EEE_CAPABILITY;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ lpa_addr = I217_EEE_LP_ABILITY;
+ pcs_stat_addr = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return -EBUSY;
+
+ /* EEE Capability */
+ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+
+ /* EEE Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE Link Partner Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE PCS Status */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (hw->phy.type == e1000_phy_82579)
+ phy_data <<= 8;
+
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return -ENODATA;
+
+ e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ status = er32(STATUS);
+
+ /* Result of the EEE auto negotiation - there is no register that
+ * has the status of the EEE negotiation so do a best-guess based
+ * on whether both Tx and Rx LPI indications have been received or
+ * base it on the link speed, the EEE advertised speeds on both ends
+ * and the speeds on which EEE is enabled locally.
+ */
+ if (((phy_data & E1000_EEE_TX_LPI_RCVD) &&
+ (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (edata->advertised & ADVERTISED_100baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
+ ((status & E1000_STATUS_SPEED_1000) &&
+ (edata->advertised & ADVERTISED_1000baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
+ edata->tx_lpi_enabled = true;
+ edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
+
+ return 0;
+}
+
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ ret_val = e1000e_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.advertised != edata->advertised) {
+ e_err("Setting EEE advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err("Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err("Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int e1000e_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return 0;
+
+ info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
@@ -2078,7 +2256,9 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = e1000e_get_ts_info,
+ .get_eee = e1000e_get_eee,
+ .set_eee = e1000e_set_eee,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index cf217777586c..1e6b889aee87 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,331 +29,10 @@
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
-#include <linux/types.h>
-
-struct e1000_hw;
-struct e1000_adapter;
-
+#include "regs.h"
#include "defines.h"
-enum e1e_registers {
- E1000_CTRL = 0x00000, /* Device Control - RW */
- E1000_STATUS = 0x00008, /* Device Status - RO */
- E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
- E1000_EERD = 0x00014, /* EEPROM Read - RW */
- E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
- E1000_FLA = 0x0001C, /* Flash Access - RW */
- E1000_MDIC = 0x00020, /* MDI Control - RW */
- E1000_SCTL = 0x00024, /* SerDes Control - RW */
- E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
- E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
- E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
- E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
- E1000_FCT = 0x00030, /* Flow Control Type - RW */
- E1000_VET = 0x00038, /* VLAN Ether Type - RW */
- E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
- E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
- E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
- E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
- E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
- E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
- E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
- E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
- E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
- E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
- E1000_RCTL = 0x00100, /* Rx Control - RW */
- E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
- E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
- E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
- E1000_TCTL = 0x00400, /* Tx Control - RW */
- E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
- E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
- E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
- E1000_LEDCTL = 0x00E00, /* LED Control - RW */
- E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
- E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
- E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
-#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
- E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
- E1000_PBS = 0x01008, /* Packet Buffer Size */
- E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
- E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
- E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
- E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
- E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
- E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
- E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
- E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL(current_rx_queue)
- */
- E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
-#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
- E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
-#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
- E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
-#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
- E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
-#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
- E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
-#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
- E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
- E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
- E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
- E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
-#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
- E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
-#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
- E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
-#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
- E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
-#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
- E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
-#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
- E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
- E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
-#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
- E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
- E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
-#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
- E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
- E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
- E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
- E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
- E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
- E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
- E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
- E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
- E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
- E1000_COLC = 0x04028, /* Collision Count - R/clr */
- E1000_DC = 0x04030, /* Defer Count - R/clr */
- E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
- E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
- E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
- E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
- E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
- E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
- E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
- E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
- E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
- E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
- E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
- E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
- E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
- E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
- E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
- E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
- E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
- E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
- E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
- E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
- E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
- E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
- E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
- E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
- E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
- E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
- E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
- E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
- E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
- E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
- E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
- E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
- E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
- E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
- E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
- E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
- E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
- E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
- E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
- E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
- E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
- E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
- E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
- E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
- E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
- E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
- E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
- E1000_IAC = 0x04100, /* Interrupt Assertion Count */
- E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
- E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
- E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
- E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
- E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
- E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
- E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
- E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
- E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
- E1000_RFCTL = 0x05008, /* Receive Filter Control */
- E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
- E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
-#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
-#define E1000_RA (E1000_RAL(0))
- E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
-#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
- E1000_SHRAL_PCH_LPT_BASE = 0x05408,
-#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
- E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
-#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
- E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
-#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
- E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
-#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
- E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
- E1000_WUC = 0x05800, /* Wakeup Control - RW */
- E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
- E1000_WUS = 0x05810, /* Wakeup Status - RO */
- E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
- E1000_MANC = 0x05820, /* Management Control - RW */
- E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
- E1000_HOST_IF = 0x08800, /* Host Interface */
-
- E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
- E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
- E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
-#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
- E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
- E1000_GCR = 0x05B00, /* PCI-Ex Control */
- E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
- E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
- E1000_SWSM = 0x05B50, /* SW Semaphore */
- E1000_FWSM = 0x05B54, /* FW Semaphore */
- E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
-#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
- E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
-#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
- E1000_FFLT_DBG = 0x05F04, /* Debug Register */
- E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
-#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
-#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
- E1000_HICR = 0x08F00, /* Host Interface Control */
-};
-
-#define E1000_MAX_PHY_ADDR 4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
-#define IGP_PAGE_SHIFT 5
-#define PHY_REG_MASK 0x1F
-
-#define BM_WUC_PAGE 800
-#define BM_WUC_ADDRESS_OPCODE 0x11
-#define BM_WUC_DATA_OPCODE 0x12
-#define BM_WUC_ENABLE_PAGE 769
-#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
-
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-/* manage.c */
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET 0x80
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_IAMT_MODE 0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-/* nvm.c */
-#define E1000_STM_OPCODE 0xDB00
-
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN 0x00200000
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
-#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
-#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED 0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
-#define IFE_PSC_FORCE_POLARITY 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE 0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+struct e1000_hw;
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -373,13 +52,11 @@ enum e1e_registers {
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
-#define E1000_DEV_ID_82583V 0x150C
-
+#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
-
#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
@@ -414,12 +91,12 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
-#define E1000_REVISION_4 4
+#define E1000_REVISION_4 4
-#define E1000_FUNC_1 1
+#define E1000_FUNC_1 1
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_82571,
@@ -524,16 +201,6 @@ enum e1000_serdes_link_state {
e1000_serdes_link_forced_up
};
-/* Receive Descriptor */
-struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
-};
-
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
@@ -656,7 +323,7 @@ struct e1000_data_desc {
struct {
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
- __le16 special; /* */
+ __le16 special;
} fields;
} upper;
};
@@ -752,7 +419,7 @@ struct e1000_host_command_header {
u8 checksum;
};
-#define E1000_HI_MAX_DATA_LENGTH 252
+#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
@@ -767,13 +434,18 @@ struct e1000_host_mng_command_header {
u16 command_length;
};
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
-/* Function pointers and static data for the MAC. */
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
+/* Function pointers for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
@@ -1002,4 +674,8 @@ struct e1000_hw {
} dev_spec;
};
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 976336547607..dff7bff8b8e0 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,147 +57,6 @@
#include "e1000.h"
-#define ICH_FLASH_GFPREG 0x0000
-#define ICH_FLASH_HSFSTS 0x0004
-#define ICH_FLASH_HSFCTL 0x0006
-#define ICH_FLASH_FADDR 0x0008
-#define ICH_FLASH_FDATA0 0x0010
-#define ICH_FLASH_PR0 0x0074
-
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
-#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
-
-#define ICH_CYCLE_READ 0
-#define ICH_CYCLE_WRITE 2
-#define ICH_CYCLE_ERASE 3
-
-#define FLASH_GFPREG_BASE_MASK 0x1FFF
-#define FLASH_SECTOR_ADDR_SHIFT 12
-
-#define ICH_FLASH_SEG_SIZE_256 256
-#define ICH_FLASH_SEG_SIZE_4K 4096
-#define ICH_FLASH_SEG_SIZE_8K 8192
-#define ICH_FLASH_SEG_SIZE_64K 65536
-
-
-#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
-/* FW established a valid mode */
-#define E1000_ICH_FWSM_FW_VALID 0x00008000
-
-#define E1000_ICH_MNG_IAMT_MODE 0x2
-
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_OFF2 << 8) | \
- (ID_LED_DEF1_ON2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
-
-#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
-
-#define E1000_FEXTNVM_SW_CONFIG 1
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
-
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
-
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
-
-#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
-
-#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
-#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
-#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
-
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
-#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
-
-#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
-
-#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
-
-/* SMBus Control Phy Register */
-#define CV_SMB_CTRL PHY_REG(769, 23)
-#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
-
-/* SMBus Address Phy Register */
-#define HV_SMB_ADDR PHY_REG(768, 26)
-#define HV_SMB_ADDR_MASK 0x007F
-#define HV_SMB_ADDR_PEC_EN 0x0200
-#define HV_SMB_ADDR_VALID 0x0080
-#define HV_SMB_ADDR_FREQ_MASK 0x1100
-#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
-#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
-
-/* PHY Power Management Control */
-#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
-
-/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
-
-/* EMI Registers */
-#define I82579_EMI_ADDR 0x10
-#define I82579_EMI_DATA 0x11
-#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
-#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
-#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
-#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
-#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
-
-/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
-#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
-#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
-#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
-#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-
-/* Strapping Option Register - RO */
-#define E1000_STRAP 0x0000C
-#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
-#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
-#define E1000_STRAP_SMT_FREQ_SHIFT 12
-
-/* OEM Bits Phy Register */
-#define HV_OEM_BITS PHY_REG(768, 25)
-#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
-#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
-
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
-#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
-
-/* KMRN Mode Control */
-#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
-#define HV_KMRN_MDIO_SLOW 0x0400
-
-/* KMRN FIFO Control and Status */
-#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
-
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -252,7 +111,6 @@ union ich8_flash_protected_range {
u32 regval;
};
-static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
@@ -264,9 +122,7 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
-static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
-static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
@@ -278,7 +134,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
-static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
@@ -330,12 +186,12 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u16 retry_count;
for (retry_count = 0; retry_count < 2; retry_count++) {
- ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF))
continue;
phy_id = (u32)(phy_reg << 16);
- ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF)) {
phy_id = 0;
continue;
@@ -378,10 +234,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
s32 ret_val;
u16 phy_reg;
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
- return ret_val;
+ goto out;
}
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
@@ -402,13 +263,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
- /* Gate automatic PHY configuration by hardware on
- * non-managed 82579
- */
- if ((hw->mac.type == e1000_pch2lan) &&
- !(fwsm & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
if (e1000_phy_is_accessible_pchlan(hw)) {
if (hw->mac.type == e1000_pch_lpt) {
/* Unforce SMBus mode in PHY */
@@ -443,6 +297,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * So ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -476,6 +339,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+out:
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
@@ -495,7 +359,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
phy->addr = 1;
phy->reset_delay_us = 100;
@@ -778,68 +642,143 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Gate automatic PHY configuration by hardware on managed
- * 82579 and i217
- */
- if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
- (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
return 0;
}
/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
+ else
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
* e1000_set_eee_pchlan - Enable/disable EEE support
* @hw: pointer to the HW structure
*
- * Enable/disable EEE based on setting in dev_spec structure. The bits in
- * the LPI Control register will remain set only if/when link is up.
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
**/
static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- s32 ret_val = 0;
- u16 phy_reg;
+ s32 ret_val;
+ u16 lpi_ctrl;
if ((hw->phy.type != e1000_phy_82579) &&
(hw->phy.type != e1000_phy_i217))
return 0;
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- if (dev_spec->eee_disable)
- phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
- else
- phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
-
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
if (ret_val)
- return ret_val;
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ u16 lpa, pcs_status, data;
- if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
/* Save off link partner's EEE ability */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_LP_ABILITY);
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto release;
+ }
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
- /* EEE is not supported in 100Half, so ignore partner's EEE
- * in 100 ability if full-duplex is not advertised.
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable.
*/
- e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
- if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
- dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
-release:
- hw->phy.ops.release(hw);
+ if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ e1e_rphy_locked(hw, MII_LPA, &data);
+ if (data & LPA_100FULL)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
}
- return 0;
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
}
/**
@@ -1017,7 +956,7 @@ static DEFINE_MUTEX(nvm_mutex);
*
* Acquires the mutex for performing NVM operations.
**/
-static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_lock(&nvm_mutex);
@@ -1030,7 +969,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
*
* Releases the mutex used while performing NVM operations.
**/
-static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_unlock(&nvm_mutex);
}
@@ -1322,7 +1261,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
u32 strap = er32(STRAP);
u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
E1000_STRAP_SMT_FREQ_SHIFT;
- s32 ret_val = 0;
+ s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1558,7 +1497,7 @@ release:
**/
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
{
- s32 ret_val = 0;
+ s32 ret_val;
u32 ctrl_reg = 0;
u32 ctrl_ext = 0;
u32 reg = 0;
@@ -1727,7 +1666,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
*/
if (hw->phy.revision < 2) {
e1000e_phy_sw_reset(hw);
- ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+ ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
}
}
@@ -1757,6 +1696,11 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
release:
hw->phy.ops.release(hw);
@@ -1983,22 +1927,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
/* Set MDIO slow mode before any other MDIO access */
ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
- if (ret_val)
- goto release;
/* set MSE higher to enable link to stay up when noise is high */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
- if (ret_val)
- goto release;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
if (ret_val)
goto release;
/* drop link after 5 times MSE threshold was reached */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
release:
hw->phy.ops.release(hw);
@@ -2172,10 +2112,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I82579_LPI_UPDATE_TIMER);
- if (!ret_val)
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
hw->phy.ops.release(hw);
}
@@ -2219,7 +2158,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 oem_reg;
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
@@ -2277,6 +2216,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -2949,19 +2890,32 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 data;
+ u16 word;
+ u16 valid_csum_mask;
- /* Read 0x19 and check bit 6. If this bit is 0, the checksum
- * needs to be fixed. This bit is an indication that the NVM
- * was prepared by OEM software and did not calculate the
- * checksum...a likely scenario.
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
*/
- ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = e1000_read_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
- if (!(data & 0x40)) {
- data |= 0x40;
- ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
@@ -3624,6 +3578,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type == e1000_pch_lpt) {
+ reg = er32(PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ ew32(PBECCSTS, reg);
+
+ reg = er32(CTRL);
+ reg |= E1000_CTRL_MEHE;
+ ew32(CTRL, reg);
+ }
}
/**
@@ -3964,8 +3929,7 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
- ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
}
/**
@@ -4000,19 +3964,20 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if (!dev_spec->eee_disable) {
u16 eee_advert;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_ADVERTISEMENT);
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
/* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
- if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
(dev_spec->eee_lp_ability &
- I217_EEE_100_SUPPORTED) &&
+ I82579_EEE_100_SUPPORTED) &&
(hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_NOND0A_LPLU);
@@ -4026,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* The SMBus release must also be disabled on LCD reset.
*/
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-
/* Enable proxy to reset only on power good. */
e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
@@ -4287,7 +4251,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
u32 bank = 0;
u32 status;
- e1000e_get_cfg_done(hw);
+ e1000e_get_cfg_done_generic(hw);
/* Wait for indication from h/w that it has completed basic config */
if (hw->mac.type >= e1000_ich10lan) {
@@ -4416,7 +4380,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
.reset_hw = e1000_reset_hw_ich8lan,
.init_hw = e1000_init_hw_ich8lan,
.setup_link = e1000_setup_link_ich8lan,
- .setup_physical_interface= e1000_setup_copper_link_ich8lan,
+ .setup_physical_interface = e1000_setup_copper_link_ich8lan,
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
@@ -4438,7 +4402,7 @@ static const struct e1000_phy_operations ich8_phy_ops = {
static const struct e1000_nvm_operations ich8_nvm_ops = {
.acquire = e1000_acquire_nvm_ich8lan,
- .read = e1000_read_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
.release = e1000_release_nvm_ich8lan,
.reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_ich8lan,
@@ -4520,6 +4484,7 @@ const struct e1000_info e1000_pch2_info = {
.mac = e1000_pch2lan,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4528,7 +4493,7 @@ const struct e1000_info e1000_pch2_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -4539,6 +4504,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.mac = e1000_pch_lpt,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4547,7 +4513,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644
index 000000000000..b6d3174d7d2d
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -0,0 +1,268 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV 0x000F8
+#define E1000_LTRV_SCALE_MAX 5
+#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_REQ_SHIFT 15
+#define E1000_LTRV_NOSNOOP_SHIFT 16
+#define E1000_LTRV_SEND (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT 0xA8
+
+/* OBFF Control & Threshold Defines */
+#define E1000_SVCR_OFF_EN 0x00000001
+#define E1000_SVCR_OFF_MASKINT 0x00001000
+#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
+#define E1000_SVCR_OFF_TIMER_SHIFT 16
+#define E1000_SVT_OFF_HWM_MASK 0x0000001F
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 54d9dafaf126..b78e02174601 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -165,7 +165,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
u32 i;
- s32 ret_val = 0;
+ s32 ret_val;
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
@@ -1021,6 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1052,14 +1053,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
e_dbg("Copper PHY and Auto Neg has not completed.\n");
return ret_val;
}
@@ -1070,11 +1071,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val =
- e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
@@ -1111,8 +1111,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | DC | 1 | DC | E1000_fc_full
*
*/
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
/* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
@@ -1134,10 +1134,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
@@ -1148,10 +1148,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*/
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
@@ -1185,6 +1185,130 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = er32(PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ e_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = er32(PCS_ANADV);
+ pcs_lp_ability_reg = er32(PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = er32(PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ ew32(PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
return 0;
}
@@ -1231,8 +1355,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
* Sets the speed and duplex to gigabit full duplex (the only possible option)
* for fiber/serdes links.
**/
-s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
+ *hw, u16 *speed, u16 *duplex)
{
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644
index 000000000000..a61fee404ebe
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 6dc47beb3adc..e4b0f1ef92f6 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,19 +28,6 @@
#include "e1000.h"
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644
index 000000000000..326897c29ea8
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fbf75fdca994..a177b8b65c44 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
@@ -56,7 +55,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
+#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -87,20 +86,7 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
/* General Registers */
{E1000_CTRL, "CTRL"},
{E1000_STATUS, "STATUS"},
@@ -488,20 +474,87 @@ static int e1000_desc_unused(struct e1000_ring *ring)
}
/**
+ * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
+ * @adapter: board private structure
+ * @hwtstamps: time stamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * Convert the system time value stored in the RX/TXSTMP registers into a
+ * hwtstamp which can be used by the upper level time stamping functions.
+ *
+ * The 'systim_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two 32 bit registers. The first read latches the
+ * value.
+ **/
+static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
+ * @adapter: board private structure
+ * @status: descriptor extended error and status field
+ * @skb: particular skb to include time stamp
+ *
+ * If the time stamp is valid, convert it into the timecounter ns value
+ * and store that result into the shhwtstamps structure which is passed
+ * up the network stack.
+ **/
+static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rxstmp;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
+ !(status & E1000_RXDEXT_STATERR_TST) ||
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ /* The Rx time stamp registers contain the time stamp. No other
+ * received packet will be time stamped until the Rx time stamp
+ * registers are read. Because only one packet can be time stamped
+ * at a time, the register values must belong to this packet and
+ * therefore none of the other additional attributes need to be
+ * compared.
+ */
+ rxstmp = (u64)er32(RXSTMPL);
+ rxstmp |= (u64)er32(RXSTMPH) << 32;
+ e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
+
+ adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
+}
+
+/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
- * @status: descriptor status field as written by hardware
+ * @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
struct net_device *netdev, struct sk_buff *skb,
- u8 status, __le16 vlan)
+ u32 staterr, __le16 vlan)
{
u16 tag = le16_to_cpu(vlan);
+
+ e1000e_rx_hwtstamp(adapter, staterr, skb);
+
skb->protocol = eth_type_trans(skb, netdev);
- if (status & E1000_RXD_STAT_VP)
+ if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, tag);
napi_gro_receive(&adapter->napi, skb);
@@ -765,7 +818,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
- unsigned int bufsz = 256 - 16 /* for skb_reserve */;
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
@@ -1050,9 +1103,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = false;
netif_stop_queue(netdev);
- e1e_rphy(hw, PHY_STATUS, &phy_status);
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
- e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+ e1e_rphy(hw, MII_BMSR, &phy_status);
+ e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
+ e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
@@ -1092,6 +1145,41 @@ static void e1000_print_hw_hang(struct work_struct *work)
}
/**
+ * e1000e_tx_hwtstamp_work - check for Tx time stamp
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb. The timestamp must
+ * be for this skb because only one such packet is allowed in the queue.
+ */
+static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ tx_hwtstamp_work);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!adapter->tx_hwtstamp_skb)
+ return;
+
+ if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 txstmp;
+
+ txstmp = er32(TXSTMPL);
+ txstmp |= (u64)er32(TXSTMPH) << 32;
+
+ e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
+
+ skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ } else {
+ /* reschedule to check later */
+ schedule_work(&adapter->tx_hwtstamp_work);
+ }
+}
+
+/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: Tx descriptor ring
*
@@ -1345,8 +1433,8 @@ copydone:
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
- e1000_receive_skb(adapter, netdev, skb,
- staterr, rx_desc->wb.middle.vlan);
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.middle.vlan);
next_desc:
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
@@ -1645,7 +1733,7 @@ static void e1000e_downshift_workaround(struct work_struct *work)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi(int irq, void *data)
+static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1671,13 +1759,30 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
/* disable receives */
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1694,7 +1799,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr(int irq, void *data)
+static irqreturn_t e1000_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1734,13 +1839,30 @@ static irqreturn_t e1000_intr(int irq, void *data)
/* disable receives */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1752,7 +1874,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_msix_other(int irq, void *data)
+static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1784,8 +1906,7 @@ no_link_interrupt:
return IRQ_HANDLED;
}
-
-static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1803,7 +1924,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1890,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574 0x01F00000
ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
ctrl_ext |= E1000_CTRL_EXT_EIAME;
ew32(CTRL_EXT, ctrl_ext);
@@ -2104,6 +2224,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
@@ -2358,9 +2480,7 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
* while increasing bulk throughput. This functionality is controlled
* by the InterruptThrottleRate module parameter.
**/
-static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
+static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
{
unsigned int retval = itr_setting;
@@ -2405,7 +2525,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
static void e1000_set_itr(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
u16 current_itr;
u32 new_itr = adapter->itr;
@@ -2421,18 +2540,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2468,10 +2585,7 @@ set_itr_now:
if (adapter->msix_entries)
adapter->rx_ring->set_itr = 1;
else
- if (new_itr)
- ew32(ITR, 1000000000 / (new_itr * 256));
- else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, new_itr);
}
}
@@ -3013,7 +3127,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
/**
@@ -3108,18 +3222,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rxcsum &= ~E1000_RXCSUM_TUOFL;
ew32(RXCSUM, rxcsum);
- if (adapter->hw.mac.type == e1000_pch2lan) {
- /* With jumbo frames, excessive C-state transition
- * latencies result in dropped transactions.
- */
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ /* With jumbo frames, excessive C-state transition latencies result
+ * in dropped transactions.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 lat =
+ ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
+ adapter->max_frame_size) * 8 / 1000;
+
+ if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3);
- pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
- } else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
}
+
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
+ } else {
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
@@ -3308,6 +3427,241 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
}
/**
+ * e1000e_get_base_timinca - get default SYSTIM time increment attributes
+ * @adapter: board private structure
+ * @timinca: pointer to returned time increment attributes
+ *
+ * Get attributes for incrementing the System Time Register SYSTIML/H at
+ * the default base frequency, and set the cyclecounter shift value.
+ **/
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 incvalue, incperiod, shift;
+
+ /* Make sure clock is enabled on I217 before checking the frequency */
+ if ((hw->mac.type == e1000_pch_lpt) &&
+ !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
+ u32 fextnvm7 = er32(FEXTNVM7);
+
+ if (!(fextnvm7 & (1 << 0))) {
+ ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ e1e_flush();
+ }
+ }
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ /* On I217, the clock frequency is 25MHz or 96MHz as
+ * indicated by the System Clock Frequency Indication
+ */
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
+ ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
+
+ return 0;
+}
+
+/**
+ * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
+ * @adapter: board private structure
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config *config = &adapter->hwtstamp_config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 rxmtrl = 0;
+ u16 rxudp = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return -EINVAL;
+
+ /* flags reserved for future extensions - must be zero */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ /* Also time stamps V2 L2 Path Delay Request/Response */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /* Also time stamps V2 L2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* Hardware cannot filter just V2 L4 Sync messages;
+ * fall-through to V2 (both L2 and L4) Sync.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* Hardware cannot filter just V2 L4 Delay Request messages;
+ * fall-through to V2 (both L2 and L4) Delay Request.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ /* Hardware cannot filter just V2 L4 or L2 Event messages;
+ * fall-through to all V2 (both L2 and L4) Events.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* For V1, the hardware can only filter Sync messages or
+ * Delay Request messages but not both so fall-through to
+ * time stamp all packets.
+ */
+ case HWTSTAMP_FILTER_ALL:
+ is_l2 = true;
+ is_l4 = true;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* enable/disable Tx h/w time stamping */
+ regval = er32(TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ ew32(TSYNCTXCTL, regval);
+ if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
+ (regval & E1000_TSYNCTXCTL_ENABLED)) {
+ e_err("Timesync Tx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* enable/disable Rx h/w time stamping */
+ regval = er32(TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ ew32(TSYNCRXCTL, regval);
+ if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK)) !=
+ (regval & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK))) {
+ e_err("Timesync Rx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* L2: define ethertype filter for time stamped packets */
+ if (is_l2)
+ rxmtrl |= ETH_P_1588;
+
+ /* define which PTP packets get time stamped */
+ ew32(RXMTRL, rxmtrl);
+
+ /* Filter by destination port */
+ if (is_l4) {
+ rxudp = PTP_EV_PORT;
+ cpu_to_be16s(&rxudp);
+ }
+ ew32(RXUDP, rxudp);
+
+ e1e_flush();
+
+ /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
+ er32(RXSTMPH);
+ er32(TXSTMPH);
+
+ /* Get and set the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &regval);
+ if (ret_val)
+ return ret_val;
+ ew32(TIMINCA, regval);
+
+ /* reset the ns time counter */
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+/**
* e1000_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
@@ -3473,14 +3827,17 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
case e1000_pch2lan:
case e1000_pch_lpt:
- fc->high_water = 0x05C20;
- fc->low_water = 0x05048;
- fc->pause_time = 0x0650;
fc->refresh_time = 0x0400;
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- pba = 14;
- ew32(PBA, pba);
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN) {
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ break;
}
+
+ fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
+ fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
}
@@ -3533,6 +3890,9 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
+ /* initialize systim and reset the ns time counter */
+ e1000e_config_hwtstamp(adapter);
+
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
@@ -3669,6 +4029,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+ struct e1000_hw *hw = &adapter->hw;
+ cycle_t systim;
+
+ /* latch SYSTIMH on read of SYSTIML */
+ systim = (cycle_t)er32(SYSTIML);
+ systim |= (cycle_t)er32(SYSTIMH) << 32;
+
+ return systim;
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
@@ -3694,6 +4072,17 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
+ /* Setup hardware time stamping cyclecounter */
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ adapter->cc.read = e1000e_cyclecounter_read;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ /* cc.shift set in e1000e_get_base_tininca() */
+
+ spin_lock_init(&adapter->systim_lock);
+ INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
+ }
+
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -3706,7 +4095,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3877,10 +4266,8 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_add_request(&adapter->netdev->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3988,8 +4375,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4251,6 +4637,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
+
+ /* Correctable ECC Errors */
+ if (hw->mac.type == e1000_pch_lpt) {
+ u32 pbeccsts = er32(PBECCSTS);
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ }
}
/**
@@ -4266,14 +4662,14 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
- ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
- ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
- ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
- ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
- ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
+ ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
+ ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
+ ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
} else {
@@ -4300,9 +4696,8 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
+ pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name, adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
(ctrl & E1000_CTRL_RFCE) ? "Rx" :
@@ -4355,11 +4750,11 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
{
/* make sure the receive unit is started */
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW)) {
+ (adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
ew32(RCTL, rctl | E1000_RCTL_EN);
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
}
@@ -4435,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
@@ -4446,9 +4848,9 @@ static void e1000_watchdog_task(struct work_struct *work)
(adapter->link_duplex == HALF_DUPLEX)) {
u16 autoneg_exp;
- e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+ e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
- if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+ if (!(autoneg_exp & EXPANSION_NWAY))
e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
@@ -4521,15 +4923,22 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
- adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
- schedule_work(&adapter->reset_task);
+ /* The link is lost so the controller stops DMA.
+ * If there is queued Tx work that cannot be done
+ * or if on an 8000ES2LAN which requires a Rx packet
+ * buffer work-around on link down event, reset the
+ * controller to flush the Tx/Rx packet buffers.
+ * (Do the reset outside of interrupt context).
+ */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
LINK_TIMEOUT);
@@ -4551,20 +4960,14 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
- e1000e_update_adaptive(&adapter->hw);
-
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
+ if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
}
+ e1000e_update_adaptive(&adapter->hw);
+
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (adapter->itr_setting == 4) {
/* Symmetric Tx/Rx gets a reduced ITR=2000;
@@ -4601,6 +5004,17 @@ link_up:
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
e1000e_check_82574_phy_workaround(adapter);
+ /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
+ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
+ er32(RXSTMPH);
+ adapter->rx_hwtstamp_cleared++;
+ } else {
+ adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
+ }
+ }
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4612,6 +5026,7 @@ link_up:
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
#define E1000_TX_FLAGS_NO_FCS 0x00000010
+#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
@@ -4870,6 +5285,11 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
txd_lower &= ~(E1000_TXD_CMD_IFCS);
+ if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
+ }
+
i = tx_ring->next_to_use;
do {
@@ -4918,12 +5338,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
- if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
- (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
- return 0;
- }
+ if (vlan_tx_tag_present(skb) &&
+ !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
@@ -5094,7 +5513,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) {
- skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !adapter->tx_hwtstamp_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ skb_tx_timestamp(skb);
+ }
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
@@ -5134,10 +5561,9 @@ static void e1000_reset_task(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
}
@@ -5323,6 +5749,61 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
return 0;
}
+/**
+ * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * @netdev: network interface device structure
+ * @ifreq: interface request
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ adapter->hwtstamp_config = config;
+
+ ret_val = e1000e_config_hwtstamp(adapter);
+ if (ret_val)
+ return ret_val;
+
+ config = adapter->hwtstamp_config;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* With V2 type filters which specify a Sync or Delay Request,
+ * Path Delay Request/Response messages are also time stamped
+ * by hardware so notify the caller the requested packets plus
+ * some others are time stamped.
+ */
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ default:
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5330,6 +5811,8 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return e1000e_hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -5340,7 +5823,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
struct e1000_hw *hw = &adapter->hw;
u32 i, mac_reg;
u16 phy_reg, wuc_enable;
- int retval = 0;
+ int retval;
/* copy MAC RARs to PHY RARs */
e1000_copy_rx_addrs_to_phy_ich8lan(hw);
@@ -5554,14 +6037,21 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ u16 aspm_ctl = 0;
+
+ if (state & PCIE_LINK_STATE_L0S)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
if (pdev->bus->self)
pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- state);
+ aspm_ctl);
}
#endif
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
@@ -5746,7 +6236,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_NET_POLL_CONTROLLER
-static irqreturn_t e1000_intr_msix(int irq, void *data)
+static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5910,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
*/
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
-
}
static void e1000_print_device_info(struct e1000_adapter *adapter)
@@ -6068,8 +6557,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6228,11 +6717,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->perm_addr);
+ netdev->dev_addr);
err = -EIO;
goto err_eeprom;
}
@@ -6318,6 +6806,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
@@ -6366,6 +6857,8 @@ static void e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
+ e1000e_ptp_remove(adapter);
+
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
@@ -6380,6 +6873,14 @@ static void e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ cancel_work_sync(&adapter->tx_hwtstamp_work);
+ if (adapter->tx_hwtstamp_skb) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ }
+ }
+
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
@@ -6532,7 +7033,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index b6468804cb2e..84fecc268162 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -359,7 +359,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/* A check for invalid values: offset too large, too many words,
@@ -371,16 +371,18 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
return -E1000_ERR_NVM;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = e1000_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
e1000_standby_nvm(hw);
@@ -413,12 +415,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(10000, 20000);
+ nvm->ops.release(hw);
}
- usleep_range(10000, 20000);
-release:
- nvm->ops.release(hw);
-
return ret_val;
}
@@ -464,8 +464,8 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
if (nvm_data != NVM_PBA_PTR_GUARD) {
e_dbg("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
e_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644
index 000000000000..45fc69561627
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 89d536dd7ff5..98da75dff936 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,8 +53,7 @@ MODULE_PARM_DESC(copybreak,
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int X[E1000_MAX_NIC+1] \
- = E1000_PARAM_INIT; \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -447,8 +446,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
- && spd)
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 28b38ff37e84..0930c136aa31 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,12 @@
#include "e1000.h"
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
-static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read, bool page_set);
static u32 e1000_get_phy_addr_for_hv_page(u32 page);
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read);
+ u16 *data, bool read);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
@@ -57,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
-#define BM_PHY_REG_PAGE(offset) \
- ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
-#define BM_PHY_REG_NUM(offset) \
- ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
- (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
- ~MAX_PHY_REG_ADDRESS)))
-
-#define HV_INTC_FC_PAGE_START 768
-#define I82578_ADDR_REG 29
-#define I82577_ADDR_REG 16
-#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG 23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2 18
-#define I82577_PHY_STATUS_2 26
-#define I82577_PHY_DIAG_STATUS 31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82577_PHY_STATUS2_MDIX 0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* BM PHY Copper Specific Control 1 */
-#define BM_CS_CTRL1 16
-
-#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
-#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
-
/**
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
@@ -135,13 +89,13 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return 0;
while (retry_count < 2) {
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -645,31 +599,31 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
u16 phy_data;
/* Resolve Master/Slave mode */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data);
if (ret_val)
return ret_val;
/* load defaults for future use */
- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
+ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ?
+ ((phy_data & CTL1000_AS_MASTER) ?
e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
switch (hw->phy.ms_type) {
case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
break;
case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
+ phy_data |= CTL1000_ENABLE_MASTER;
+ phy_data &= ~(CTL1000_AS_MASTER);
break;
case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
+ phy_data &= ~CTL1000_ENABLE_MASTER;
/* fall-through */
default:
break;
}
- return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
+ return e1e_wphy(hw, MII_CTRL1000, phy_data);
}
/**
@@ -792,7 +746,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = phy->ops.commit(hw);
if (ret_val) {
e_dbg("Error committing the PHY changes\n");
return ret_val;
@@ -848,10 +802,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val) {
- e_dbg("Error committing the PHY changes\n");
- return ret_val;
+ if (phy->ops.commit) {
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
}
if (phy->type == e1000_phy_82578) {
@@ -895,10 +851,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
msleep(100);
/* disable lplu d0 during driver init */
- ret_val = e1000_set_d0_lplu_state(hw, false);
- if (ret_val) {
- e_dbg("Error Disabling LPLU D0\n");
- return ret_val;
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
}
/* Configure mdi-mdix settings */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
@@ -943,12 +901,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
return ret_val;
/* Set auto Master/Slave resolution process */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &data);
if (ret_val)
return ret_val;
- data &= ~CR_1000T_MS_ENABLE;
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+ data &= ~CTL1000_ENABLE_MASTER;
+ ret_val = e1e_wphy(hw, MII_CTRL1000, data);
if (ret_val)
return ret_val;
}
@@ -978,13 +936,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
@@ -1000,36 +958,35 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
- NWAY_AR_100TX_HD_CAPS |
- NWAY_AR_10T_FD_CAPS |
- NWAY_AR_10T_HD_CAPS);
- mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_10FULL | ADVERTISE_10HALF);
+ mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
e_dbg("Advertise 10mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10HALF;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
e_dbg("Advertise 10mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10FULL;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
e_dbg("Advertise 100mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100HALF;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
e_dbg("Advertise 100mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
@@ -1039,14 +996,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
e_dbg("Advertise 1000mb Full duplex\n");
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
}
/* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * Advertisement Register (MII_ADVERTISE) and re-start auto-
* negotiation.
*
* The possible values of the "fc" parameter are:
@@ -1064,7 +1021,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg &=
+ ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1076,34 +1034,36 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* (in e1000e_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
+ mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
default:
e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
- ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg);
return ret_val;
}
@@ -1145,12 +1105,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -1196,7 +1156,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
* depending on user settings.
*/
e_dbg("Forcing Speed and Duplex\n");
- ret_val = e1000_phy_force_speed_duplex(hw);
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
@@ -1237,13 +1197,13 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -1315,20 +1275,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("M88E1000 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
/* Reset the phy to commit changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.ops.commit) {
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+ }
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
@@ -1406,13 +1368,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
u16 data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+ ret_val = e1e_wphy(hw, MII_BMCR, data);
if (ret_val)
return ret_val;
@@ -1456,13 +1418,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
/**
* e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
* @hw: pointer to the HW structure
- * @phy_ctrl: pointer to current value of PHY_CONTROL
+ * @phy_ctrl: pointer to current value of MII_BMCR
*
* Forces speed and duplex on the PHY by doing the following: disable flow
* control, force speed/duplex on the MAC, disable auto speed detection,
* disable auto-negotiation, configure duplex, configure speed, configure
* the collision distance, write configuration to CTRL register. The
- * caller must write to the PHY_CONTROL register for these settings to
+ * caller must write to the MII_BMCR register for these settings to
* take affect.
**/
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
@@ -1482,29 +1444,28 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
ctrl &= ~E1000_CTRL_ASDE;
/* Disable autoneg on the phy */
- *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+ *phy_ctrl &= ~BMCR_ANENABLE;
/* Forcing Full or Half Duplex? */
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
ctrl &= ~E1000_CTRL_FD;
- *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ *phy_ctrl &= ~BMCR_FULLDPLX;
e_dbg("Half Duplex\n");
} else {
ctrl |= E1000_CTRL_FD;
- *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ *phy_ctrl |= BMCR_FULLDPLX;
e_dbg("Full Duplex\n");
}
/* Forcing 10mb or 100mb? */
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100;
- *phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ *phy_ctrl |= BMCR_SPEED100;
+ *phy_ctrl &= ~BMCR_SPEED1000;
e_dbg("Forcing 100mb\n");
} else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl |= MII_CR_SPEED_10;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100);
e_dbg("Forcing 10mb\n");
}
@@ -1745,13 +1706,13 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ if (phy_status & BMSR_ANEGCOMPLETE)
break;
msleep(100);
}
@@ -1778,21 +1739,21 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /* Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
udelay(usec_interval);
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_LINK_STATUS)
+ if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval/1000);
@@ -1962,21 +1923,19 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data);
if (ret_val)
return ret_val;
- phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (phy_data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (phy_data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
/* Set values to "undefined" */
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2026,21 +1985,19 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2114,12 +2071,12 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= MII_CR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= BMCR_RESET;
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -2166,17 +2123,17 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
phy->ops.release(hw);
- return e1000_get_phy_cfg_done(hw);
+ return phy->ops.get_cfg_done(hw);
}
/**
- * e1000e_get_cfg_done - Generic configuration done
+ * e1000e_get_cfg_done_generic - Generic configuration done
* @hw: pointer to the HW structure
*
* Generic function to wait 10 milli-seconds for configuration to complete
* and return success.
**/
-s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw)
{
mdelay(10);
@@ -2266,38 +2223,6 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
return 0;
}
-/* Internal function pointers */
-
-/**
- * e1000_get_phy_cfg_done - Generic PHY configuration done
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family did not implement a family specific
- * get_cfg_done function.
- **/
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_cfg_done)
- return hw->phy.ops.get_cfg_done(hw);
-
- return 0;
-}
-
-/**
- * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
- * @hw: pointer to the HW structure
- *
- * When the silicon family has not implemented a forced speed/duplex
- * function for the PHY, simply return 0.
- **/
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
-{
- if (hw->phy.ops.force_speed_duplex)
- return hw->phy.ops.force_speed_duplex(hw);
-
- return 0;
-}
-
/**
* e1000e_get_phy_type_from_id - Get PHY type from id
* @phy_id: phy_id read from the phy
@@ -2549,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
-
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
@@ -2672,7 +2596,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
**/
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* Select Port Control Registers page */
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
@@ -2781,9 +2705,9 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg &= ~MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg &= ~BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
}
/**
@@ -2799,50 +2723,13 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg |= BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
}
/**
- * e1000e_commit_phy - Soft PHY reset
- * @hw: pointer to the HW structure
- *
- * Performs a soft PHY reset on those that apply. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000e_commit_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.commit)
- return hw->phy.ops.commit(hw);
-
- return 0;
-}
-
-/**
- * e1000_set_d0_lplu_state - Sets low power link up state for D0
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D0
- * and SmartSpeed is disabled when active is true, else clear lplu for D0
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained. This is a function pointer entry point called by drivers.
- **/
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
- if (hw->phy.ops.set_d0_lplu_state)
- return hw->phy.ops.set_d0_lplu_state(hw, active);
-
- return 0;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -3104,8 +2991,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
{
s32 ret_val;
- u32 addr_reg = 0;
- u32 data_reg = 0;
+ u32 addr_reg;
+ u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ?
@@ -3154,8 +3041,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, PHY_CONTROL, &data);
- if (data & PHY_CONTROL_LB)
+ e1e_rphy(hw, MII_BMCR, &data);
+ if (data & BMCR_LOOPBACK)
return 0;
/* check if link is up and at 1Gbps */
@@ -3173,8 +3060,9 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
msleep(200);
/* flush the packets in the fifo buffer */
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
if (ret_val)
return ret_val;
@@ -3218,13 +3106,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -3292,17 +3180,15 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -3333,7 +3219,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = -E1000_ERR_PHY;
+ return -E1000_ERR_PHY;
phy->cable_length = length;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644
index 000000000000..f4f71b9991e3
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
new file mode 100644
index 000000000000..b477fa53ec94
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -0,0 +1,277 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* PTP 1588 Hardware Clock (PHC)
+ * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ struct e1000_hw *hw = &adapter->hw;
+ bool neg_adj = false;
+ u64 adjustment;
+ u32 timinca, incvalue;
+ s32 ret_val;
+
+ if ((delta > ptp->max_adj) || (delta <= -1000000000))
+ return -EINVAL;
+
+ if (delta < 0) {
+ neg_adj = true;
+ delta = -delta;
+ }
+
+ /* Get the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (ret_val)
+ return ret_val;
+
+ incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
+
+ adjustment = incvalue;
+ adjustment *= delta;
+ adjustment = div_u64(adjustment, 1000000000);
+
+ incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+
+ timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
+ timinca |= incvalue;
+
+ ew32(TIMINCA, timinca);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ now = timecounter_read(&adapter->tc);
+ now += delta;
+ timecounter_init(&adapter->tc, &adapter->cc, now);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int e1000e_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 ns;
+
+ ns = ts->tv_sec * NSEC_PER_SEC;
+ ns += ts->tv_nsec;
+
+ /* reset the timecounter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc, ns);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void e1000e_systim_overflow_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ systim_overflow_work.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec ts;
+
+ adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+
+ e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+}
+
+static const struct ptp_clock_info e1000e_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+ .gettime = e1000e_phc_gettime,
+ .settime = e1000e_phc_settime,
+ .enable = e1000e_phc_enable,
+};
+
+/**
+ * e1000e_ptp_init - initialize PTP for devices which support it
+ * @adapter: board private structure
+ *
+ * This function performs the required steps for enabling PTP support.
+ * If PTP support has already been loaded it simply calls the cyclecounter
+ * init routine and exits.
+ **/
+void e1000e_ptp_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->ptp_clock = NULL;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ adapter->ptp_clock_info = e1000e_ptp_clock_info;
+
+ snprintf(adapter->ptp_clock_info.name,
+ sizeof(adapter->ptp_clock_info.name), "%pm",
+ adapter->netdev->perm_addr);
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ adapter->ptp_clock_info.max_adj = 24000000 - 1;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ break;
+ default:
+ break;
+ }
+
+ INIT_DELAYED_WORK(&adapter->systim_overflow_work,
+ e1000e_systim_overflow_work);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ e_err("ptp_clock_register failed\n");
+ } else {
+ e_info("registered PHC clock\n");
+ }
+}
+
+/**
+ * e1000e_ptp_remove - disable PTP device and stop the overflow check
+ * @adapter: board private structure
+ *
+ * Stop the PTP support, and cancel the delayed work.
+ **/
+void e1000e_ptp_remove(struct e1000_adapter *adapter)
+{
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ cancel_delayed_work_sync(&adapter->systim_overflow_work);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ e_info("removed PHC\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644
index 000000000000..794fe1497666
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -0,0 +1,252 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 624476cfa727..f19700e285bb 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,4 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fdaaf2709d0a..84e7e0909def 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/i2c.h>
#include "e1000_mac.h"
#include "e1000_82575.h"
@@ -110,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
return ext_mdio;
}
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+/**
+ * igb_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- struct e1000_nvm_info *nvm = &hw->nvm;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
- u32 eecd;
- s32 ret_val;
- u16 size;
- u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
- default:
- return -E1000_ERR_MAC_INIT;
- break;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
}
- /* Set media type */
- /*
- * The 82575 uses bits 22:23 for link mode. The mode can be changed
- * based on the EEPROM. We cannot rely upon device ID. There
- * is no distinguishable difference between fiber and internal
- * SerDes mode on the 82575. There can be an external PHY attached
- * on the SGMII interface. For this, we'll set sgmii_active to true.
- */
- phy->media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = false;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
ctrl_ext = rd32(E1000_CTRL_EXT);
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- dev_spec->sgmii_active = true;
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- break;
+
+ if (igb_sgmii_active_82575(hw)) {
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = igb_phy_hw_reset;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set rar entry count */
- switch (mac->type) {
- case e1000_82576:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ igb_reset_mdicnfg_82580(hw);
+
+ if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ phy->ops.read_reg = igb_read_phy_reg_82580;
+ phy->ops.write_reg = igb_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = igb_read_phy_reg_gs40g;
+ phy->ops.write_reg = igb_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
+ }
+ }
+
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length =
+ igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
- case e1000_82580:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.get_phy_info = igb_get_phy_info_igp;
+ phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
- case e1000_i350:
- mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.force_speed_duplex =
+ igb_phy_force_speed_duplex_82580;
+ phy->ops.get_cable_length = igb_get_cable_length_82580;
+ phy->ops.get_phy_info = igb_get_phy_info_82580;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break;
- default:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
}
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = igb_reset_hw_82580;
- else
- mac->ops.reset_hw = igb_reset_hw_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- } else {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
- }
+out:
+ return ret_val;
+}
- /* Set if part includes ASF firmware */
- mac->asf_firmware_present = true;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid =
- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* enable EEE on i350 parts and later parts */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
- else
- dev_spec->eee_disable = true;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? igb_setup_copper_link_82575
- : igb_setup_serdes_link_82575;
+/**
+ * igb_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ u16 size;
- /* NVM initialization */
- eecd = rd32(E1000_EECD);
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
-
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /*
- * Check for invalid size
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
*/
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ if (size > 15)
size = 15;
- }
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
switch (nvm->override) {
case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
+ nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
+ nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
- nvm->page_size = eecd
- & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd
- & E1000_EECD_ADDR_BITS ? 16 : 8;
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
break;
}
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
- } else
+ } else {
nvm->type = e1000_nvm_flash_hw;
+ }
/* NVM Function Pointers */
switch (hw->mac.type) {
@@ -344,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* if part supports SR-IOV then initialize mailbox parameters */
+ return 0;
+}
+
+/**
+ * igb_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
switch (mac->type) {
case e1000_82576:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ break;
+ case e1000_82580:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ break;
case e1000_i350:
- igb_init_mbx_params_pf(hw);
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break;
}
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = igb_reset_hw_82580;
+ else
+ mac->ops.reset_hw = igb_reset_hw_82575;
- /* setup PHY parameters */
- if (phy->media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- return 0;
- }
-
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
-
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- /* PHY function pointers */
- if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
- phy->ops.reset = igb_phy_hw_reset;
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
- wr32(E1000_CTRL_EXT, ctrl_ext);
- igb_reset_mdicnfg_82580(hw);
-
- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if ((hw->mac.type == e1000_82580)
- || (hw->mac.type == e1000_i350)) {
- phy->ops.read_reg = igb_read_phy_reg_82580;
- phy->ops.write_reg = igb_write_phy_reg_82580;
- } else if (hw->phy.type >= e1000_phy_i210) {
- phy->ops.read_reg = igb_read_phy_reg_gs40g;
- phy->ops.write_reg = igb_write_phy_reg_gs40g;
- } else {
- phy->ops.read_reg = igb_read_phy_reg_igp;
- phy->ops.write_reg = igb_write_phy_reg_igp;
- }
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Set if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* enable EEE on i350 parts and later parts */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
- /* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ return 0;
+}
- /* Set phy->phy_addr and phy->id. */
- ret_val = igb_get_phy_id_82575(hw);
- if (ret_val)
- return ret_val;
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ s32 ret_val;
+ u32 ctrl_ext = 0;
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- phy->type = e1000_phy_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ break;
+ }
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- else
- phy->ops.get_cable_length = igb_get_cable_length_m88;
+ /* Set media type */
+ /*
+ * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ * based on the EEPROM. We cannot rely upon device ID. There
+ * is no distinguishable difference between fiber and internal
+ * SerDes mode on the 82575. There can be an external PHY attached
+ * on the SGMII interface. For this, we'll set sgmii_active to true.
+ */
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = false;
- if (phy->id == I210_I_PHY_ID) {
- phy->ops.get_cable_length =
- igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state =
- igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state =
- igb_set_d3_lplu_state_82580;
- }
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ dev_spec->sgmii_active = true;
break;
- case IGP03E1000_E_PHY_ID:
- phy->type = e1000_phy_igp_3;
- phy->ops.get_phy_info = igb_get_phy_info_igp;
- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
break;
- case I82580_I_PHY_ID:
- case I350_I_PHY_ID:
- phy->type = e1000_phy_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
- phy->ops.get_cable_length = igb_get_cable_length_82580;
- phy->ops.get_phy_info = igb_get_phy_info_82580;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ default:
break;
- case I210_I_PHY_ID:
- phy->type = e1000_phy_i210;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ }
+
+ /* mac initialization and operations */
+ ret_val = igb_init_mac_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igb_init_nvm_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
+ igb_init_mbx_params_pf(hw);
break;
default:
- return -E1000_ERR_PHY;
+ break;
}
- return 0;
+ /* setup PHY parameters */
+ ret_val = igb_init_phy_params_82575(hw);
+
+out:
+ return ret_val;
}
/**
@@ -2302,18 +2345,157 @@ out:
return ret_val;
}
+static const u8 e1000_emc_temp_data[4] = {
+ E1000_EMC_INTERNAL_DATA,
+ E1000_EMC_DIODE1_DATA,
+ E1000_EMC_DIODE2_DATA,
+ E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+ E1000_EMC_INTERNAL_THERM_LIMIT,
+ E1000_EMC_DIODE1_THERM_LIMIT,
+ E1000_EMC_DIODE2_THERM_LIMIT,
+ E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ */
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > E1000_MAX_SENSORS)
+ num_sensors = E1000_MAX_SENSORS;
+
+ for (i = 1; i < num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0)
+ hw->phy.ops.read_i2c_byte(hw,
+ e1000_emc_temp_data[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ }
+ return status;
+}
+
+/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ */
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+ data->sensor[0].location = 0x1;
+ data->sensor[0].caution_thresh =
+ (rd32(E1000_THHIGHTC) & 0xFF);
+ data->sensor[0].max_op_thresh =
+ (rd32(E1000_THLOWTC) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+ NVM_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ e1000_emc_therm_limit[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ therm_limit);
+
+ if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+}
+
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+#ifdef CONFIG_IGB_HWMON
+ .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+ .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+#endif
};
static struct e1000_phy_operations e1000_phy_ops_82575 = {
.acquire = igb_acquire_phy_82575,
.get_cfg_done = igb_get_cfg_done_82575,
.release = igb_release_phy_82575,
+ .write_i2c_byte = igb_write_i2c_byte,
+ .read_i2c_byte = igb_read_i2c_byte,
};
static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 44b76b3b6816..73ab41f0e032 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,6 +32,10 @@ extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
@@ -260,5 +264,16 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_set_eee_i350(struct e1000_hw *);
-
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+
+#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define E1000_EMC_INTERNAL_DATA 0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
+#define E1000_EMC_DIODE1_DATA 0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
+#define E1000_EMC_DIODE2_DATA 0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
+#define E1000_EMC_DIODE3_DATA 0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 45dce06eff26..7e13337d3b9d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -470,6 +470,7 @@
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
+#define E1000_ERR_I2C 20
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -674,6 +675,18 @@
#define NVM_COMB_VER_SHFT 8
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETRACK_SHIFT 16
+#define NVM_ETS_CFG 0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT 6
+#define NVM_ETS_TYPE_MASK 0x0038
+#define NVM_ETS_TYPE_SHIFT 3
+#define NVM_ETS_TYPE_EMC 0x000
+#define NVM_ETS_NUM_SENSORS_MASK 0x0007
+#define NVM_ETS_DATA_LOC_MASK 0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT 10
+#define NVM_ETS_DATA_INDEX_MASK 0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT 8
+#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index c2a51dcda550..0d5cf9c63d0d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -325,6 +325,10 @@ struct e1000_mac_operations {
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
+#ifdef CONFIG_IGB_HWMON
+ s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+#endif
};
@@ -342,6 +346,8 @@ struct e1000_phy_operations {
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
@@ -354,6 +360,19 @@ struct e1000_nvm_operations {
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
+#define E1000_MAX_SENSORS 3
+
+struct e1000_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+ struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
struct e1000_info {
s32 (*get_invariants)(struct e1000_hw *);
struct e1000_mac_operations *mac_ops;
@@ -399,6 +418,7 @@ struct e1000_mac_info {
bool report_tx_early;
bool serdes_has_link;
bool tx_pkt_filtering;
+ struct e1000_thermal_sensor_data thermal_sensor_data;
};
struct e1000_phy_info {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index fbcdbebb0b5f..6a42344f24f1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 1c89358a99ab..e4e1a73b7c75 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 101e6e4da97f..a5c7200b9a71 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e2b2c4b9c951..e6d6ce433261 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 5988b8958baf..38e0df350904 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index dbcfa3d5caec..c13b56d9edb2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fbb7604db364..5b62adbe134d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 7012d458c6f7..6bfc0c43aace 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2012 Intel Corporation.
+ Copyright(c) 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index fe76004aca4e..2918c979b5bb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index ed282f877d9a..784fd1c40989 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e5db48594e8a..15343286082e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -75,6 +75,14 @@
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
/* IEEE 1588 TIMESYNCH */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -124,6 +132,14 @@
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
/*
* Convenience macros
*
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 17f1686ee411..d27edbc63923 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,8 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
struct igb_adapter;
@@ -137,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -167,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
@@ -219,6 +230,7 @@ struct igb_ring {
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
@@ -272,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -301,6 +321,32 @@ static inline int igb_desc_unused(struct igb_ring *ring)
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}
+struct igb_i2c_client_list {
+ struct i2c_client *client;
+ struct igb_i2c_client_list *next;
+};
+
+#ifdef CONFIG_IGB_HWMON
+
+#define IGB_HWMON_TYPE_LOC 0
+#define IGB_HWMON_TYPE_TEMP 1
+#define IGB_HWMON_TYPE_CAUTION 2
+#define IGB_HWMON_TYPE_MAX 3
+
+struct hwmon_attr {
+ struct device_attribute dev_attr;
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor;
+ char name[12];
+ };
+
+struct hwmon_buff {
+ struct device *device;
+ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+#endif
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -386,11 +432,22 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
char fw_version[32];
+#ifdef CONFIG_IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+ bool ets;
+#endif
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct igb_i2c_client_list *i2c_clients;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -449,6 +506,7 @@ extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
@@ -466,7 +524,10 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-
+#ifdef CONFIG_IGB_HWMON
+extern void igb_sysfs_exit(struct igb_adapter *adapter);
+extern int igb_sysfs_init(struct igb_adapter *adapter);
+#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index bfe9208c4b18..a3830a8ba4c1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -92,6 +92,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define IGB_NETDEV_STAT(_net_stat) { \
@@ -1889,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
- msleep(4000);
+ msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -2272,12 +2274,21 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
new file mode 100644
index 000000000000..0a9b073d0b03
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_IGB_HWMON
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+ value = igb_attr->sensor->temp;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+ n_attr = adapter->igb_hwmon_buff.n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* These always the same regardless of type */
+ igb_attr->sensor =
+ &adapter->hw.mac.thermal_sensor_data.sensor[offset];
+ igb_attr->hw = &adapter->hw;
+ igb_attr->dev_attr.store = NULL;
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
+ rc = device_create_file(&adapter->pdev->dev,
+ &igb_attr->dev_attr);
+ if (rc == 0)
+ ++adapter->igb_hwmon_buff.n_hwmon;
+
+ return rc;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+ int i;
+
+ if (adapter == NULL)
+ return;
+
+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
+ device_remove_file(&adapter->pdev->dev,
+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
+ }
+
+ kfree(adapter->igb_hwmon_buff.hwmon_list);
+
+ if (adapter->igb_hwmon_buff.device)
+ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+ igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
+ int n_attrs;
+ int rc = 0;
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+ goto exit;
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+ if (rc)
+ goto exit;
+
+ /* Allocation space for max attributes
+ * max num sensors * values (loc, temp, max, caution)
+ */
+ n_attrs = E1000_MAX_SENSORS * 4;
+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+ GFP_KERNEL);
+ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+ if (IS_ERR(igb_hwmon->device)) {
+ rc = PTR_ERR(igb_hwmon->device);
+ goto err;
+ }
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ /* Only create hwmon sysfs entries for sensors that have
+ * meaningful data.
+ */
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto err;
+ }
+
+ goto exit;
+
+err:
+ igb_sysfs_del_adapter(adapter);
+exit:
+ return rc;
+}
+#endif
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 31cfe2ec75df..ed79a1c53b59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
+#include <linux/i2c.h>
#include "igb.h"
#define MAJ 4
@@ -68,7 +69,8 @@ char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2013 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -193,6 +195,7 @@ static const struct dev_pm_ops igb_pm_ops = {
};
#endif
static void igb_shutdown(struct pci_dev *);
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -234,6 +237,7 @@ static struct pci_driver igb_driver = {
.driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
.err_handler = &igb_err_handler
};
@@ -565,6 +569,91 @@ exit:
return;
}
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+}
+
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
/**
* igb_get_hw_dev - return device
* used by hardware layer to print debugging information
@@ -1708,6 +1797,18 @@ void igb_reset(struct igb_adapter *adapter)
igb_force_mac_fc(hw);
igb_init_dmac(adapter, pba);
+#ifdef CONFIG_IGB_HWMON
+ /* Re-initialize the thermal sensor on i350 devices. */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /* If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
+ mac->ops.init_thermal_sensor_thresh(hw);
+ }
+ }
+#endif
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -1822,6 +1923,37 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
+static const struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", 0Xf8),
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -2022,9 +2154,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -2115,6 +2246,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* reset the hardware with the new settings */
igb_reset(adapter);
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+
/* let the f/w know that the h/w is now under the control of the
* driver. */
igb_get_hw_control(adapter);
@@ -2135,7 +2273,27 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
+#ifdef CONFIG_IGB_HWMON
+ /* Initialize the thermal sensor on i350 devices. */
+ if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+ u16 ets_word;
+ /*
+ * Read the NVM to determine if this i350 device supports an
+ * external thermal sensor.
+ */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ if (igb_sysfs_init(adapter))
+ dev_err(&pdev->dev,
+ "failed to allocate sysfs resources\n");
+ } else {
+ adapter->ets = false;
+ }
+#endif
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2176,6 +2334,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register:
igb_release_hw_control(adapter);
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
err_eeprom:
if (!igb_check_reset_block(hw))
igb_reset_phy(hw);
@@ -2196,6 +2355,111 @@ err_dma:
return err;
}
+#ifdef CONFIG_PCI_IOV
+static int igb_disable_sriov(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ if (igb_vfs_are_assigned(adapter)) {
+ dev_warn(&pdev->dev,
+ "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ } else {
+ pci_disable_sriov(pdev);
+ msleep(500);
+ }
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+
+ /* Re-enable DMA Coalescing flag since IOV is turned off */
+ adapter->flags |= IGB_FLAG_DMAC;
+ }
+
+ return 0;
+}
+
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int old_vfs = pci_num_vf(pdev);
+ int err = 0;
+ int i;
+
+ if (!num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs == num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs != num_vfs)
+ err = igb_disable_sriov(pdev);
+
+ if (err)
+ goto out;
+
+ if (num_vfs > 7) {
+ err = -EPERM;
+ goto out;
+ }
+
+ adapter->vfs_allocated_count = num_vfs;
+
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
+ adapter->vfs_allocated_count = 0;
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for VF Data Storage\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+
+ dev_info(&pdev->dev, "%d VFs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ goto out;
+
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+out:
+ return err;
+}
+
+#endif
+/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+}
+
/**
* igb_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -2212,8 +2476,11 @@ static void igb_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_HWMON
+ igb_sysfs_exit(adapter);
+#endif
+ igb_remove_i2c(adapter);
igb_ptp_stop(adapter);
-
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2243,23 +2510,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- /* reclaim resources allocated to VFs */
- if (adapter->vf_data) {
- /* disable iov and allow time for transactions to clear */
- if (igb_vfs_are_assigned(adapter)) {
- dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- } else {
- pci_disable_sriov(pdev);
- msleep(500);
- }
-
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
- wrfl();
- msleep(100);
- dev_info(&pdev->dev, "IOV Disabled\n");
- }
+ igb_disable_sriov(pdev);
#endif
iounmap(hw->hw_addr);
@@ -2290,103 +2541,22 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- int old_vfs = pci_num_vf(adapter->pdev);
- int i;
/* Virtualization features not supported on i210 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
- if (old_vfs) {
- dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
- "max_vfs setting of %d\n", old_vfs, max_vfs);
- adapter->vfs_allocated_count = old_vfs;
- }
-
- if (!adapter->vfs_allocated_count)
- return;
-
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage), GFP_KERNEL);
-
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev, "Unable to allocate memory for VF "
- "Data Storage\n");
- goto out;
- }
-
- if (!old_vfs) {
- if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
- goto err_out;
- }
- dev_info(&pdev->dev, "%d VFs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++)
- igb_vf_configure(adapter, i);
+ igb_enable_sriov(pdev, max_vfs);
+ pci_sriov_set_totalvfs(pdev, 7);
- /* DMA Coalescing is not supported in IOV mode. */
- adapter->flags &= ~IGB_FLAG_DMAC;
- goto out;
-err_out:
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
-out:
- return;
#endif /* CONFIG_PCI_IOV */
}
-/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
- *
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int igb_sw_init(struct igb_adapter *adapter)
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u32 max_rss_queues;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
-
- /* set default ring sizes */
- adapter->tx_ring_count = IGB_DEFAULT_TXD;
- adapter->rx_ring_count = IGB_DEFAULT_RXD;
-
- /* set default ITR values */
- adapter->rx_itr_setting = IGB_DEFAULT_ITR;
- adapter->tx_itr_setting = IGB_DEFAULT_ITR;
-
- /* set default work limits */
- adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
-
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
-
- spin_lock_init(&adapter->stats64_lock);
-#ifdef CONFIG_PCI_IOV
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- if (max_vfs > 7) {
- dev_warn(&pdev->dev,
- "Maximum of 7 VFs per PF, using max\n");
- adapter->vfs_allocated_count = 7;
- } else
- adapter->vfs_allocated_count = max_vfs;
- break;
- default:
- break;
- }
-#endif /* CONFIG_PCI_IOV */
-
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
case e1000_i211:
@@ -2445,11 +2615,64 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+#ifdef CONFIG_PCI_IOV
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (max_vfs > 7) {
+ dev_warn(&pdev->dev,
+ "Maximum of 7 VFs per PF, using max\n");
+ adapter->vfs_allocated_count = 7;
+ } else
+ adapter->vfs_allocated_count = max_vfs;
+ if (adapter->vfs_allocated_count)
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
- adapter->shadow_vfta = kzalloc(sizeof(u32) *
- E1000_VLAN_FILTER_TBL_SIZE,
- GFP_ATOMIC);
+ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+ GFP_ATOMIC);
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
@@ -3131,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+ (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+ (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+ /* set build_skb flag */
+ if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+ set_ring_build_skb_enabled(rx_ring);
+ else
+ clear_ring_build_skb_enabled(rx_ring);
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3150,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3768,6 +4008,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
igb_spoof_check(adapter);
+ igb_ptp_rx_hang(adapter);
/* Reset the timer */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -4193,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4368,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page,
+ /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
* + 1 desc for context descriptor,
- * otherwise try next time */
- if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ * otherwise try next time
+ */
+ if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+ unsigned short f;
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ } else {
+ count += skb_shinfo(skb)->nr_frags;
+ }
+
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
@@ -4387,12 +4631,15 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ skb_tx_timestamp(skb);
+
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
@@ -4415,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -4969,7 +5216,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- eth_random_addr(mac_addr);
+ eth_zero_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
return 0;
@@ -5322,9 +5569,9 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
{
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- /* generate a new mac address as we were hotplug removed/added */
+ /* clear mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- eth_random_addr(vf_mac);
+ eth_zero_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5703,7 +5950,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
@@ -5819,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -5870,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
+{
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /* since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
/**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -5892,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
{
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
@@ -5914,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+ rx_buffer->page_offset, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+ void *page_addr;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(NET_SKB_PAD +
+ NET_IP_ALIGN +
+ size);
+#endif
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+ /* If we spanned a buffer we have a huge mess so test for it */
+ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /*
- * since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
- */
- atomic_set(&page->_count, 2);
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+ /* Guarantee this function can be used by verifying buffer sizes */
+ BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
+ NET_IP_ALIGN +
+ IGB_TS_HDR_LEN +
+ ETH_FRAME_LEN +
+ ETH_FCS_LEN));
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
- /* bump ref count on page before it is given to the stack */
- get_page(page);
+ page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif
- return true;
+ /* build an skb to around the page buffer */
+ skb = build_skb(page_addr, truesize);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
+ }
+
+ if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
@@ -5957,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
page = rx_buffer->page;
prefetchw(page);
@@ -6363,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_rx_buffer(rx_ring, rx_desc);
+ else
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6451,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ if (ring_uses_build_skb(rx_ring))
+ return NET_SKB_PAD + NET_IP_ALIGN;
+ else
+ return 0;
+}
+
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6477,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+ bi->page_offset +
+ igb_rx_offset(rx_ring));
rx_desc++;
bi++;
@@ -6903,6 +7253,72 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PCI_IOV
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_pci_disable_sriov(struct pci_dev *dev)
+{
+ int err = igb_disable_sriov(dev);
+
+ if (!err)
+ err = igb_sriov_reinit(dev);
+
+ return err;
+}
+
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
+{
+ int err = igb_enable_sriov(dev, num_vfs);
+
+ if (err)
+ goto out;
+
+ err = igb_sriov_reinit(dev);
+ if (!err)
+ return num_vfs;
+
+out:
+ return err;
+}
+
+#endif
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ if (num_vfs == 0)
+ return igb_pci_disable_sriov(dev);
+ else
+ return igb_pci_enable_sriov(dev, num_vfs);
+#endif
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@@ -7308,4 +7724,133 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
+static DEFINE_SPINLOCK(i2c_clients_lock);
+
+/* igb_get_i2c_client - returns matching client
+ * in adapters's client list.
+ * @adapter: adapter struct
+ * @dev_addr: device address of i2c needed.
+ */
+static struct i2c_client *
+igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
+{
+ ulong flags;
+ struct igb_i2c_client_list *client_list;
+ struct i2c_client *client = NULL;
+ struct i2c_board_info client_info = {
+ I2C_BOARD_INFO("igb", 0x00),
+ };
+
+ spin_lock_irqsave(&i2c_clients_lock, flags);
+ client_list = adapter->i2c_clients;
+
+ /* See if we already have an i2c_client */
+ while (client_list) {
+ if (client_list->client->addr == (dev_addr >> 1)) {
+ client = client_list->client;
+ goto exit;
+ } else {
+ client_list = client_list->next;
+ }
+ }
+
+ /* no client_list found, create a new one */
+ client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
+ if (client_list == NULL)
+ goto exit;
+
+ /* dev_addr passed to us is left-shifted by 1 bit
+ * i2c_new_device call expects it to be flush to the right.
+ */
+ client_info.addr = dev_addr >> 1;
+ client_info.platform_data = adapter;
+ client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
+ if (client_list->client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto err_no_client;
+ }
+
+ /* insert new client at head of list */
+ client_list->next = adapter->i2c_clients;
+ adapter->i2c_clients = client_list;
+
+ client = client_list->client;
+ goto exit;
+
+err_no_client:
+ kfree(client_list);
+exit:
+ spin_unlock_irqrestore(&i2c_clients_lock, flags);
+ return client;
+}
+
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = 0;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status < 0)
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+ return E1000_SUCCESS;
+ }
+}
+
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status)
+ return E1000_ERR_I2C;
+ else
+ return E1000_SUCCESS;
+
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab3429729bde..0987822359f0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/ptp_classify.h>
#include "igb.h"
@@ -70,6 +71,7 @@
*/
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
@@ -396,6 +398,15 @@ void igb_ptp_tx_work(struct work_struct *work)
if (!adapter->ptp_tx_skb)
return;
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
tsynctxctl = rd32(E1000_TSYNCTXCTL);
if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
igb_ptp_tx_hwtstamp(adapter);
@@ -419,6 +430,51 @@ static void igb_ptp_overflow_check(struct work_struct *work)
}
/**
+ * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ */
+void igb_ptp_rx_hang(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *rx_ring;
+ u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
+
+ if (hw->mac.type != e1000_82576)
+ return;
+
+ /* If we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ }
+}
+
+/**
* igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure.
*
@@ -643,7 +699,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
else
wr32(E1000_ETQF(3), 0);
-#define PTP_PORT 319
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
@@ -652,12 +707,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
| E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_PORT));
+ wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
wr32(E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_PORT));
+ wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
wr32(E1000_FTQF(3), ftqf);
@@ -801,6 +856,10 @@ void igb_ptp_stop(struct igb_adapter *adapter)
}
cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b672776..a1463e3d14c0 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
+ union e1000_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 277f5dfe3d90..d60cd4393415 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ buffer_info = &tx_ring->buffer_info[i];
+ eop_desc = buffer_info->next_to_watch;
+
+ do {
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ buffer_info->next_to_watch = NULL;
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
+
+ eop_desc = buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -1399,12 +1412,10 @@ static void igbvf_set_multi(struct net_device *netdev)
int i;
if (!netdev_mc_empty(netdev)) {
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list) {
- dev_err(&adapter->pdev->dev,
- "failed to allocate multicast filter list\n");
+ mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mta_list)
return;
- }
}
/* prepare a packed array of only addresses. */
@@ -1738,7 +1749,6 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1964,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2024,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2064,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
+ struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@@ -2080,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@@ -2103,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@@ -2112,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -2123,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@@ -2142,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ int tx_flags, int count,
+ unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2192,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
+ tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2258,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
+ first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
@@ -2736,30 +2741,24 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address."
- " Is the PF interface up?\n");
- eth_hw_addr_random(netdev);
- memcpy(adapter->hw.mac.addr, netdev->dev_addr,
- netdev->addr_len);
+ "PF still in reset state. Is the PF interface up?\n");
} else {
err = hw->mac.ops.read_mac_addr(hw);
- if (err) {
- dev_err(&pdev->dev, "Error reading MAC address\n");
- goto err_hw_init;
- }
+ if (err)
+ dev_info(&pdev->dev, "Error reading MAC address.\n");
+ else if (is_zero_ether_addr(adapter->hw.mac.addr))
+ dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ netdev->addr_len);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->dev_addr);
- err = -EIO;
- goto err_hw_init;
+ dev_info(&pdev->dev, "Assigning random MAC address.\n");
+ eth_hw_addr_random(netdev);
+ memcpy(adapter->hw.mac.addr, netdev->dev_addr,
+ netdev->addr_len);
}
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
-
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
(unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ae96c10251be..ea4808373435 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -500,9 +500,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -709,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor ring memory\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -798,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptor ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
/* Round up to nearest 4K */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index f3a632bf8d96..be2989e60009 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,7 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
@@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
+ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8e786764c60e..a8e10cff7a89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/cpumask.h>
#include <linux/aer.h>
#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
@@ -91,21 +92,26 @@
*/
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
-#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
-#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
+enum ixgbe_tx_flags {
+ /* cmd_type flags */
+ IXGBE_TX_FLAGS_HW_VLAN = 0x01,
+ IXGBE_TX_FLAGS_TSO = 0x02,
+ IXGBE_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IXGBE_TX_FLAGS_CC = 0x08,
+ IXGBE_TX_FLAGS_IPV4 = 0x10,
+ IXGBE_TX_FLAGS_CSUM = 0x20,
+
+ /* software defined flags */
+ IXGBE_TX_FLAGS_SW_VLAN = 0x40,
+ IXGBE_TX_FLAGS_FCOE = 0x80,
+};
+
+/* VLAN info */
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -150,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -195,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@@ -224,6 +231,7 @@ struct ixgbe_ring {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
unsigned long state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -271,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -573,11 +576,14 @@ struct ixgbe_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
u32 base_incval;
/* SR-IOV */
@@ -614,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
+ __IXGBE_READ_I2C,
};
struct ixgbe_cb {
@@ -694,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
@@ -742,15 +749,32 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
-extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
+extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+ return;
+
+ __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+
+ /*
+ * Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ rx_ring->last_rx_timestamp = jiffies;
+}
+
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
+#endif
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 42537336110c..d0113fc97b6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,6 @@
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -633,15 +632,15 @@ out:
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if auto-negotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -685,20 +684,18 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1006,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
- * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
{
s32 status = 0;
u16 sfp_addr = 0;
@@ -1028,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1060,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
- goto out;
}
out:
@@ -1068,6 +1065,36 @@ out:
}
/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure
*
@@ -1300,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.write_reg = &ixgbe_write_phy_reg_generic,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
.check_overtemp = &ixgbe_tn_check_overtemp,
};
@@ -1311,4 +1339,3 @@ struct ixgbe_info ixgbe_82598_info = {
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
};
-
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1073aea5da40..203a00c24330 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,21 +45,17 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
@@ -234,13 +230,13 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* ixgbe_get_link_capabilities_82599 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
- * @negotiation: true when autoneg or autotry is enabled
+ * @autoneg: true when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
- bool *negotiation)
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -251,7 +247,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
goto out;
}
@@ -268,22 +264,22 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_1G_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -295,7 +291,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -306,12 +302,12 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_SGMII_1G_100M:
*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
default:
@@ -323,7 +319,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
}
out:
@@ -510,14 +506,12 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -527,11 +521,11 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
u32 i = 0;
bool link_up = false;
- bool negotiation;
+ bool autoneg = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
- &negotiation);
+ &autoneg);
if (status != 0)
return status;
@@ -564,7 +558,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -617,7 +610,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -646,7 +638,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
highest_link_speed,
- autoneg,
autoneg_wait_to_complete);
out:
@@ -666,13 +657,12 @@ out:
* ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Implements the Intel SmartSpeed algorithm.
**/
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -703,7 +693,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* First, try to get link with full advertisement */
hw->phy.smart_speed_active = false;
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -738,7 +728,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* Turn SmartSpeed on to disable KR support */
hw->phy.smart_speed_active = true;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -764,7 +754,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* We didn't get link. Turn SmartSpeed back off. */
hw->phy.smart_speed_active = false;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
out:
@@ -778,14 +768,13 @@ out:
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -799,6 +788,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
bool got_lock = false;
+ bool autoneg = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -911,20 +901,18 @@ out:
* ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -2253,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 5e68afdd502a..99e472ebaa75 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f7a0970a251c..bc3948ead6e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 9bc17c0cb972..1f2c805684dd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1f4108ee154b..1634de8b627f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 87592b458c9c..ac780770863d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index ba835708fcac..3164f5453b8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 4eac80d01857..05e23b80b5e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index 4dec47faeb00..a4ef07631d1e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f1e002d5fa8f..f3d68f9696ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include <linux/dcbnl.h>
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
@@ -301,7 +302,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
-#ifdef IXGBE_FCOE
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -320,7 +320,6 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
-#endif
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
@@ -450,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -461,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
- rval = -EINVAL;
+ return -EINVAL;
break;
}
} else {
- rval = -EINVAL;
+ return -EINVAL;
}
- return rval;
+ return 0;
}
static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
@@ -541,6 +539,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err = 0;
__u8 max_tc = 0;
+ __u8 map_chg = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
@@ -550,15 +549,22 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
GFP_KERNEL);
if (!adapter->ixgbe_ieee_ets)
return -ENOMEM;
- }
- memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+ /* initialize UP2TC mappings to invalid value */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ adapter->ixgbe_ieee_ets->prio_tc[i] =
+ IEEE_8021QAZ_MAX_TCS;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i])
+ map_chg = 1;
}
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
if (max_tc)
max_tc++;
@@ -567,6 +573,8 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc != netdev_get_num_tc(dev))
err = ixgbe_setup_tc(dev, max_tc);
+ else if (map_chg)
+ ixgbe_dcbnl_devreset(dev);
if (err)
goto err_out;
@@ -643,9 +651,11 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
return err;
err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
#ifdef IXGBE_FCOE
- if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
@@ -656,6 +666,23 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+
+ /* VF devices should use default UP when available */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0) {
+ int vf;
+
+ adapter->default_up = app->priority;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ app->priority, vf);
+ }
+ }
+
return 0;
}
@@ -683,6 +710,24 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+ /* IF default priority is being removed clear VF default UP */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0 && adapter->default_up == app->priority) {
+ int vf;
+ long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app);
+ int qos = app_mask ? find_first_bit(&app_mask, 8) : 0;
+
+ adapter->default_up = qos;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ qos, vf);
+ }
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50aa546b8c7a..c5933f6dceee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -24,9 +24,6 @@
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)
{
debugfs_remove_recursive(ixgbe_dbg_root);
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 326858424345..f4d2e9e3c6d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
+#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@@ -156,7 +157,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
u32 link_speed = 0;
- bool autoneg;
+ bool autoneg = false;
bool link_up;
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
@@ -333,10 +334,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
return err;
/* this sets the link speed and restarts auto-neg */
hw->mac.autotry_restart = true;
- err = hw->mac.ops.setup_link(hw, advertised, true, true);
+ err = hw->mac.ops.setup_link(hw, advertised, true);
if (err) {
e_info(probe, "setup link failed with code %d\n", err);
- hw->mac.ops.setup_link(hw, old, true, true);
+ hw->mac.ops.setup_link(hw, old, true);
}
} else {
/* in this case we currently only support 10Gb/FULL */
@@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
p = (char *) adapter +
ixgbe_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
@@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < IXGBE_TEST_LEN; i++) {
+ memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
@@ -1837,19 +1843,11 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- e_info(hw, "offline testing starting\n");
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (ixgbe_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1870,12 +1868,24 @@ static void ixgbe_diag_test(struct net_device *netdev,
}
}
+ /* Offline tests */
+ e_info(hw, "offline testing starting\n");
+
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- ixgbe_reset(adapter);
+ /* bringing adapter down disables SFP+ optics */
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1908,16 +1918,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
skip_loopback:
ixgbe_reset(adapter);
+ /* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
e_info(hw, "online testing starting\n");
+
+ /* if adapter is down, SFP+ optics will be disabled */
+ if (!if_running && hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* Offline tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
@@ -1925,6 +1941,10 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
+
+ /* if adapter was down, ensure SFP+ optics are disabled again */
+ if (!if_running && hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2093,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- u16 tx_itr_param, rx_itr_param;
+ u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EINVAL;
+ tx_itr_prev = adapter->rx_itr_setting;
+ } else {
+ tx_itr_prev = adapter->tx_itr_setting;
+ }
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2125,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* mixed Rx/Tx */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+ /* detect ITR changes that require update of TXDCTL.WTHRESH */
+ if ((adapter->tx_itr_setting > 1) &&
+ (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ if ((tx_itr_prev == 1) ||
+ (tx_itr_prev > IXGBE_100K_ITR))
+ need_reset = true;
+ } else {
+ if ((tx_itr_prev > 1) &&
+ (tx_itr_prev < IXGBE_100K_ITR))
+ need_reset = true;
+ }
+#endif
/* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
+ need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@@ -2695,6 +2736,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
@@ -2704,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+ unsigned int max_combined;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ /* SR-IOV currently only allows one queue on the PF */
+ max_combined = 1;
+ } else if (tcs > 1) {
+ /* For DCB report channels per traffic class */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ max_combined = 4;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ max_combined = 8;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ max_combined = 16;
+ }
+ } else if (adapter->atr_sample_rate) {
+ /* support up to 64 queues with ATR */
+ max_combined = IXGBE_MAX_FDIR_INDICES;
+ } else {
+ /* support up to 16 queues with RSS */
+ max_combined = IXGBE_MAX_RSS_INDICES;
+ }
+
+ return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = ixgbe_max_channels(adapter);
+
+ /* report info for other vector */
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* nothing else to report if RSS is disabled */
+ if (ch->combined_count == 1)
+ return;
+
+ /* we do not support ATR queueing if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return;
+
+ /* same thing goes for being DCB enabled */
+ if (netdev_get_num_tc(dev) > 1)
+ return;
+
+ /* if ATR is disabled we can exit */
+ if (!adapter->atr_sample_rate)
+ return;
+
+ /* report flow director queues as maximum channels */
+ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > ixgbe_max_channels(adapter))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+ /* cap RSS limit at 16 */
+ if (count > IXGBE_MAX_RSS_INDICES)
+ count = IXGBE_MAX_RSS_INDICES;
+ adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+ /* cap FCoE limit at 8 */
+ if (count > IXGBE_FCRETA_SIZE)
+ count = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+ /* use setup TC to update any traffic class queue mapping */
+ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+ u8 sff8472_rev, addr_mode;
+ int ret_val = 0;
+ bool page_swap = false;
+
+ /* avoid concurent i2c reads */
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+
+ /* used by the service task */
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u8 databyte = 0xFF;
+ int i = 0;
+ int ret_val = 0;
+
+ /* ixgbe_get_module_info is called before this function in all
+ * cases, so we do not need any checks we already do above,
+ * and can trust ee->len to be a known value.
+ */
+
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Read the first block, SFF-8079 */
+ for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+
+ /* If the second block is requested, check if SFF-8472 is supported. */
+ if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+ if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+ return -EOPNOTSUPP;
+
+ /* Read the second block, SFF-8472 */
+ for (i = ETH_MODULE_SFF_8079_LEN;
+ i < ETH_MODULE_SFF_8472_LEN; i++) {
+ status = hw->phy.ops.read_i2c_sff8472(hw,
+ i - ETH_MODULE_SFF_8079_LEN, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ return ret_val;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2732,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 252850d9a3e0..f58db453a97e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -544,15 +544,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- first->tx_flags |= IXGBE_TX_FLAGS_FSO;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO;
}
/* set flag indicating FCOE to ixgbe_tx_map call */
- first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
+ first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
- /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
+ /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
@@ -717,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (!buffer) {
- e_err(drv, "failed to allocate extra DDP buffer\n");
+ if (!buffer)
return -ENOMEM;
- }
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index bf724da99375..3a02759b5e95 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8c74f739011d..ef5f7a678ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
- /* reserve no more than number of CPUs */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
- f->indices = min_t(u16, num_online_cpus(), f->limit);
- rss_i = max_t(u16, rss_i, f->indices);
+ rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = -1;
+ int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
+ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 20a5af6d87d0..68478d6dfa2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
+ "Copyright (c) 1999-2013 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
}
@@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -1401,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
/* set gso_size to avoid messing up TCP MSS */
skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
IXGBE_CB(skb)->append_cnt);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
@@ -1441,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -2180,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
- u32 autoneg;
+ u32 speed;
bool link_up = false;
- hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up)
return;
@@ -2787,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
- * higher than 1 when ITR is 0 as it could cause false TX hangs
+ * higher than 1 when:
+ * - ITR is 0 as it could cause false TX hangs
+ * - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
+#if IS_ENABLED(CONFIG_BQL)
+ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2814,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
+ /* initialize XPS */
+ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+ if (q_vector)
+ netif_set_xps_queue(adapter->netdev,
+ &q_vector->affinity_mask,
+ ring->queue_index);
+ }
+
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@@ -3996,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 autoneg;
- bool negotiation, link_up = false;
+ u32 speed;
+ bool autoneg, link_up = false;
u32 ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
- ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (ret)
goto link_cfg_out;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
- &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ ret = hw->mac.ops.get_link_capabilities(hw, &speed,
+ &autoneg);
if (ret)
goto link_cfg_out;
if (hw->mac.ops.setup_link)
- ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+ ret = hw->mac.ops.setup_link(hw, speed, link_up);
link_cfg_out:
return ret;
}
@@ -4466,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
+ unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4481,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
- /* Set capability flags */
+ /* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
+ adapter->atr_sample_rate = 20;
+ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_FDIR].limit = fdir;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+
+ /* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
+ adapter->ring_feature[RING_F_FDIR].limit = 0;
+ adapter->atr_sample_rate = 0;
+ adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ break;
+ case ixgbe_mac_82599EB:
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- case ixgbe_mac_82599EB:
- adapter->max_q_vectors = MAX_Q_VECTORS_82599;
- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* Flow Director hash filters enabled */
- adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].limit =
- IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-#ifdef IXGBE_FCOE
- adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-#ifdef CONFIG_IXGBE_DCB
- /* Default traffic class to use for FCoE */
- adapter->fcoe.up = IXGBE_FCOE_DEFTC;
-#endif
-#endif /* IXGBE_FCOE */
break;
default:
break;
@@ -4871,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5534,6 +5568,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
+ adapter->last_rx_ptp_check = jiffies;
+
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
ixgbe_ptp_start_cyclecounter(adapter);
@@ -5614,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
+ e_warn(drv, "initiating reset to clear Tx work after link loss\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
}
}
@@ -5678,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ /* concurent i2c reads are not supported */
+ if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+ return;
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -5738,8 +5779,8 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg;
- bool negotiation;
+ u32 speed;
+ bool autoneg = false;
if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
return;
@@ -5750,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+ hw->mac.ops.setup_link(hw, speed, true);
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
@@ -5878,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
-
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -5886,7 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- ixgbe_ptp_overflow_check(adapter);
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ ixgbe_ptp_overflow_check(adapter);
+ ixgbe_ptp_rx_hang(adapter);
+ }
ixgbe_service_event_complete(adapter);
}
@@ -5899,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -5941,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- /* mss_l4len_id: use 1 as index for TSO */
+ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
@@ -5966,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
- if (unlikely(skb->no_fcs))
- first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
- if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
- }
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+ return;
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -6029,30 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
type_tucmd, mss_l4len_idx);
}
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
- if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
+ IXGBE_ADVTXD_DCMD_VLE);
/* set segmentation enable bits for TSO/FSO */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
+ IXGBE_ADVTXD_DCMD_TSE);
+
+ /* set timestamp bit if present */
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
+ IXGBE_ADVTXD_MAC_TSTAMP);
/* insert frame checksum */
- if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -6060,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen)
{
- __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CSUM,
+ IXGBE_ADVTXD_POPTS_TXSM);
/* enble IPv4 checksum for TSO */
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
-
- /* use index 1 context for TSO/FSO/FCOE */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPV4,
+ IXGBE_ADVTXD_POPTS_IXSM);
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TXSW)
-#endif
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CC,
+ IXGBE_ADVTXD_CC);
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6099,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBE_TX_DESC(tx_ring, i);
- ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6128,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
#endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -6148,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -6167,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
#ifdef IXGBE_FCOE
size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6177,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
@@ -6353,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
-#ifdef IXGBE_FCOE
- __be16 protocol = vlan_get_protocol(skb);
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_ring_feature *f;
+ int txq;
- if (((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- struct ixgbe_ring_feature *f;
+ /*
+ * only execute the code below if protocol is FCoE
+ * or FIP and we have FCoE enabled on the adapter
+ */
+ switch (vlan_get_protocol(skb)) {
+ case __constant_htons(ETH_P_FCOE):
+ case __constant_htons(ETH_P_FIP):
+ adapter = netdev_priv(dev);
- f = &adapter->ring_feature[RING_F_FCOE];
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ break;
+ default:
+ return __netdev_pick_tx(dev, skb);
+ }
- while (txq >= f->indices)
- txq -= f->indices;
- txq += adapter->ring_feature[RING_F_FCOE].offset;
+ f = &adapter->ring_feature[RING_F_FCOE];
- return txq;
- }
-#endif
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
+ while (txq >= f->indices)
+ txq -= f->indices;
- return skb_tx_hash(dev, skb);
+ return txq + f->offset;
}
+#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6445,6 +6478,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+
+ /* schedule check for Tx timestamp */
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ schedule_work(&adapter->ptp_tx_work);
}
#ifdef CONFIG_PCI_IOV
@@ -6453,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* Tx switch had been disabled.
*/
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
+ tx_flags |= IXGBE_TX_FLAGS_CC;
#endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
@@ -6784,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
+#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -6809,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -6831,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
- ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+ ixgbe_init_interrupt_scheme(adapter);
+
if (netif_running(dev))
- ixgbe_open(dev);
+ return ixgbe_open(dev);
return 0;
}
-#endif /* CONFIG_IXGBE_DCB */
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ rtnl_unlock();
+}
+
+#endif
void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -6985,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -7062,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
}
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
+ struct net_device *dev,
+ u32 filter_mask)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
u16 mode;
@@ -7082,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
+#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7194,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
- unsigned int indices = num_possible_cpus();
- unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7245,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
+ if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
- if (ii->mac == ixgbe_mac_82598EB)
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_RSS_INDICES);
- else
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_FDIR_INDICES);
+ /* 8 TC w/ 4 queues per TC */
+ indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+ indices = IXGBE_MAX_RSS_INDICES;
#endif
+ }
- if (ii->mac == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7366,7 +7411,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#ifdef CONFIG_PCI_IOV
- ixgbe_enable_sriov(adapter, ii);
+ /* SR-IOV not supported on the 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ goto skip_sriov;
+ /* Mailbox */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ ixgbe_enable_sriov(adapter);
+ pci_sriov_set_totalvfs(pdev, 63);
+skip_sriov:
#endif
netdev->features = NETIF_F_SG |
@@ -7410,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ unsigned int fcoe_l;
+
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+ adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;
@@ -7444,9 +7501,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
@@ -7623,8 +7679,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- ixgbe_disable_sriov(adapter);
-
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Only disable SR-IOV on unload if the user specified the now
+ * deprecated max_vfs module parameter.
+ */
+ if (max_vfs)
+ ixgbe_disable_sriov(adapter);
+#endif
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
@@ -7729,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (vfdev) {
e_dev_err("Issuing VFLR to VF %d\n", vf);
pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ /* Free device reference count */
+ pci_dev_put(vfdev);
}
pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -7838,6 +7902,7 @@ static struct pci_driver ixgbe_driver = {
.resume = ixgbe_resume,
#endif
.shutdown = ixgbe_shutdown,
+ .sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1f3e32b576a5..d4a64e665398 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 42dd65e6ac97..e44ff47659b5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 71659edf81aa..060d2ad2ac96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -494,11 +494,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
@@ -854,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -872,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@@ -986,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1206,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1293,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index cc18165b4c05..886a3431cf5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@@ -41,6 +42,8 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -51,6 +54,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -88,6 +92,9 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
@@ -98,7 +105,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -126,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 1a751c9d09c4..331987d6815c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -96,15 +96,12 @@
#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
+#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
#ifndef NSECS_PER_SEC
#define NSECS_PER_SEC 1000000000ULL
#endif
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
/**
* ixgbe_ptp_setup_sdp
* @hw: the hardware private structure
@@ -405,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
}
}
-
/**
- * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
- * @work: structure containing information about this work task
+ * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @adapter: private adapter struct
*
- * this work function is scheduled to continue reading the timecounter
+ * this watchdog task periodically reads the timecounter
* in order to prevent missing when the system time registers wrap
- * around. This needs to be run approximately twice a minute when no
- * PTP activity is occurring.
+ * around. This needs to be run approximately twice a minute.
*/
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
- unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
+ bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
+ IXGBE_OVERFLOW_PERIOD);
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
- (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
+ if (timeout) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
}
}
/**
- * ixgbe_ptp_match - determine if this skb matches a ptp packet
- * @skb: pointer to the skb
- * @hwtstamp: pointer to the hwtstamp_config to check
- *
- * Determine whether the skb should have been timestamped, assuming the
- * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
- * should have a timestamp waiting in the registers, and 0 otherwise.
+ * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
*
- * V1 packets have to check the version type to determine whether they are
- * correct. However, we can't directly access the data because it might be
- * fragmented in the SKB, in paged memory. In order to work around this, we
- * use skb_copy_bits which will properly copy the data whether it is in the
- * paged memory fragments or not. We have to copy the IP header as well as the
- * message type.
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
*/
-static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
{
- struct iphdr iph;
- u8 msgtype;
- unsigned int type, offset;
-
- if (rx_filter == HWTSTAMP_FILTER_NONE)
- return 0;
-
- type = sk_run_filter(skb, ptp_filter);
-
- if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
- return type & PTP_CLASS_V2;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring *rx_ring;
+ u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
- /* For the remaining cases actually check message type */
- switch (type) {
- case PTP_CLASS_V1_IPV4:
- skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
- offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
- break;
- case PTP_CLASS_V1_IPV6:
- offset = OFF_PTP6 + OFF_PTP_CONTROL;
- break;
- default:
- /* other cases invalid or handled above */
- return 0;
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
}
- /* Make sure our buffer is long enough */
- if (skb->len < offset)
- return 0;
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
- skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5*HZ)) {
+ IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
- switch (rx_filter) {
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
- break;
- default:
- return 0;
+ e_warn(drv, "clearing RX Timestamp hang");
}
}
/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: structure containing interrupt and ring information
- * @skb: particular skb to send timestamp with
+ * @adapter: the private adapter struct
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
+ struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0, ns;
- u32 tsynctxctl;
unsigned long flags;
- /* we cannot process timestamps on a ring without a q_vector */
- if (!q_vector || !q_vector->adapter)
- return;
-
- adapter = q_vector->adapter;
- hw = &adapter->hw;
-
- tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
- /*
- * if TX timestamp is not valid, exit after clearing the
- * timestamp registers
- */
- if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
- return;
-
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+/**
+ * ixgbe_ptp_tx_hwtstamp_work
+ * @work: pointer to the work struct
+ *
+ * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
+ * timestamp has been taken for the current skb. It is necesary, because the
+ * descriptor's "done" bit does not correlate with the timestamp event.
+ */
+static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter,
+ ptp_tx_work);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IXGBE_PTP_TX_TIMEOUT);
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb */
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (timeout) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ e_warn(drv, "clearing Tx Timestamp hang");
+ return;
+ }
+
+ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID)
+ ixgbe_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to keep checking if it's not available yet */
+ schedule_work(&adapter->ptp_tx_work);
}
/**
- * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
- * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
@@ -563,37 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
- if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
- return;
-
+ /*
+ * Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
-
- /* Check if we have a valid timestamp and make sure the skb should
- * have been timestamped */
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
- /*
- * Always read the registers, in order to clear a possible fault
- * because of stagnant RX timestamp values for a packet that never
- * reached the queue.
- */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
- /*
- * If the timestamp bit is set in the packet's descriptor, we know the
- * timestamp belongs to this packet. No other packet can be
- * timestamped until the registers for timestamping have been read.
- * Therefor only one packet with this bit can be in the queue at a
- * time, and the rx timestamp values that were in the registers belong
- * to this packet.
- *
- * If nothing went wrong, then it should have a skb_shared_tx that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
- return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
@@ -660,11 +633,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -698,9 +671,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
- /* Store filter value for later use */
- adapter->rx_hwtstamp_filter = config.rx_filter;
-
/* define ethertype filter for timestamping L2 packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
@@ -902,11 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
- /* initialize the ptp filter */
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
- e_dev_warn("ptp_filter_init failed\n");
-
spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
@@ -938,6 +905,12 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
ixgbe_ptp_setup_sdp(adapter);
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 85cddac673ef..d44b4d21268c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,50 +44,11 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
- int pre_existing_vfs = 0;
-
- pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
- return;
-
- /* If there are pre-existing VFs then we have to force
- * use of that many because they were not deleted the last
- * time someone removed the PF driver. That would have
- * been because they were allocated to guest VMs and can't
- * be removed. Go ahead and just re-enable the old amount.
- * If the user wants to change the number of VFs they can
- * use ethtool while making sure no VFs are allocated to
- * guest VMs... i.e. the right way.
- */
- if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
- dev_warn(&adapter->pdev->dev, "Virtual Functions already "
- "enabled for this device - Please reload all "
- "VF drivers to avoid spoofed packet errors\n");
- } else {
- int err;
- /*
- * The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
-
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
- return;
- }
- }
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
@@ -128,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
kcalloc(adapter->num_vfs,
sizeof(struct vf_data_storage), GFP_KERNEL);
if (adapter->vfinfo) {
- /* Now that we're sure SR-IOV is enabled
- * and memory allocated set up the mailbox parameters
- */
- ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
-
/* limit trafffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(adapter->num_vfs < 16)) {
@@ -157,10 +112,62 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/* Note this function is called when the user wants to enable SR-IOV
+ * VFs using the now deprecated module parameter
+ */
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+{
+ int pre_existing_vfs = 0;
+
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
+ if (!pre_existing_vfs && !adapter->num_vfs)
return;
+
+ if (!pre_existing_vfs)
+ dev_warn(&adapter->pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+
+ /* If there are pre-existing VFs then we have to force
+ * use of that many - over ride any module parameter value.
+ * This may result from the user unloading the PF driver
+ * while VFs were assigned to guest VMs or because the VFs
+ * have been created via the new PCI SR-IOV sysfs interface.
+ */
+ if (pre_existing_vfs) {
+ adapter->num_vfs = pre_existing_vfs;
+ dev_warn(&adapter->pdev->dev,
+ "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
+ } else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- /* Oh oh */
+ if (!__ixgbe_enable_sriov(adapter))
+ return;
+
+ /* If we have gotten to this point then there is no memory available
+ * to manage the VF devices - print message and bail.
+ */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
ixgbe_disable_sriov(adapter);
@@ -200,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
}
#endif /* #ifdef CONFIG_PCI_IOV */
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie;
u32 vmdctl;
+ int rss;
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
@@ -219,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* if SR-IOV is already disabled then there is nothing to do */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return;
+ return 0;
#ifdef CONFIG_PCI_IOV
/*
@@ -229,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
*/
if (ixgbe_vfs_are_assigned(adapter)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- return;
+ return -EPERM;
}
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
@@ -252,10 +260,94 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+
/* take a breather then clean up driver data */
msleep(100);
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ return 0;
+}
+
+static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err = 0;
+ int i;
+ int pre_existing_vfs = pci_num_vf(dev);
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ err = ixgbe_disable_sriov(adapter);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (err)
+ goto err_out;
+
+ /* While the SR-IOV capability structure reports total VFs to be
+ * 64 we limit the actual number that can be allocated to 63 so
+ * that some transmit/receive resources can be reserved to the
+ * PF. The PCI bus driver already checks for other values out of
+ * range.
+ */
+ if (num_vfs > 63) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ adapter->num_vfs = num_vfs;
+
+ err = __ixgbe_enable_sriov(adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(dev, (i | 0x10000000));
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ e_dev_warn("Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+ ixgbe_sriov_reinit(adapter);
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+ u32 current_flags = adapter->flags;
+
+ err = ixgbe_disable_sriov(adapter);
+
+ /* Only reinit if no error and state changed */
+ if (!err && current_flags != adapter->flags) {
+ /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+#ifdef CONFIG_PCI_IOV
+ ixgbe_sriov_reinit(adapter);
+#endif
+ }
+
+ return err;
+}
+
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return ixgbe_pci_sriov_disable(dev);
+ else
+ return ixgbe_pci_sriov_enable(dev, num_vfs);
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
@@ -447,15 +539,6 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
- u16 vid, u16 qos, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
-}
-
static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 1be1d30e4e78..4713f9fc7f46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,12 +41,20 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
#endif
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
+}
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 16ddf14e8ba4..d118def16f35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9cd8a13711d3..6652e96c352d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2822,7 +2822,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
@@ -2869,12 +2869,12 @@ struct ixgbe_phy_operations {
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
s32 (*check_overtemp)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index c73b92993391..66c5e946284e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -72,14 +72,13 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
}
@@ -879,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8f2070439b59..c9d0c12d6f04 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -99,6 +99,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
+ hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 257357ae66c3..c3db6cd69b68 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -750,12 +750,37 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg;
+ bool got_ack = false;
hw->mac.get_link_status = 1;
+ if (!hw->mbx.ops.check_for_ack(hw))
+ got_ack = true;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies);
+ if (!hw->mbx.ops.check_for_msg(hw)) {
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+ adapter->link_up = false;
+ }
+
+ if (msg & IXGBE_VT_MSGTYPE_NACK)
+ dev_info(&pdev->dev,
+ "Last Request of type %2.2x to PF Nacked\n",
+ msg & 0xFF);
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
+ }
+
+ /* checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it
+ */
+ if (got_ack)
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -2095,6 +2120,9 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ if (!adapter->link_up)
+ return;
+
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
@@ -2217,9 +2245,23 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- 10 : 1);
+ char *link_speed_string;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ link_speed_string = "10 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ link_speed_string = "1 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ link_speed_string = "100 Mbps";
+ break;
+ default:
+ link_speed_string = "unknown speed";
+ break;
+ }
+ dev_info(&adapter->pdev->dev,
+ "NIC Link is Up, %s\n", link_speed_string);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
}
@@ -2227,7 +2269,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
adapter->link_up = false;
adapter->link_speed = 0;
if (netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
@@ -3328,8 +3370,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
/* The HW MAC address was set and/or determined in sw_init */
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
if (!is_valid_ether_addr(netdev->dev_addr)) {
pr_err("invalid MAC address\n");
err = -EIO;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index bc58f1dc22f5..5409fe876a44 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -695,9 +695,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, lp->dev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index c124e67a1a1c..6a2127489af7 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -302,9 +302,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "Lantiq ETOP");
- strcpy(info->bus_info, "internal");
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int
@@ -393,8 +393,8 @@ ltq_etop_mdio_probe(struct net_device *dev)
return -ENODEV;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
- 0, priv->pldata->mii_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &ltq_etop_mdio_link, priv->pldata->mii_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -655,7 +655,7 @@ ltq_etop_init(struct net_device *dev)
/* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
if (random_mac)
- dev->addr_assign_type |= NET_ADDR_RANDOM;
+ dev->addr_assign_type = NET_ADDR_RANDOM;
ltq_etop_set_multicast_list(dev);
err = ltq_etop_mdio_init(dev);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 0029934748bc..edfba9370922 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -31,6 +31,30 @@ config MV643XX_ETH
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
+config MVMDIO
+ tristate "Marvell MDIO interface support"
+ ---help---
+ This driver supports the MDIO interface found in the network
+ interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
+ Dove, Armada 370 and Armada XP).
+
+ For now, this driver is only needed for the MVNETA driver
+ (used on Armada 370 and XP), but it could be used in the
+ future by the MV643XX_ETH driver.
+
+config MVNETA
+ tristate "Marvell Armada 370/XP network interface support"
+ depends on MACH_ARMADA_370_XP
+ select PHYLIB
+ select MVMDIO
+ ---help---
+ This driver supports the network interface units in the
+ Marvell ARMADA XP and ARMADA 370 SoC family.
+
+ Note that this driver is distinct from the mv643xx_eth
+ driver, which should be used for the older Marvell SoCs
+ (Dove, Orion, Discovery, Kirkwood).
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 57e3234a37ba..7f63b4aac434 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -3,6 +3,8 @@
#
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 84c13263c514..29140502b71a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1879,12 +1879,10 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size;
- rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
- GFP_KERNEL);
- if (rxq->rx_skb == NULL) {
- netdev_err(mp->dev, "can't allocate rx skb ring\n");
+ rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+ GFP_KERNEL);
+ if (rxq->rx_skb == NULL)
goto out_free;
- }
rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
@@ -2789,7 +2787,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy_reset(mp);
- phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
+ phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 000000000000..77b7c80262f4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for the MDIO interface of Marvell network interfaces.
+ *
+ * Since the MDIO interface of Marvell network interfaces is shared
+ * between all network interfaces, having a single driver allows to
+ * handle concurrent accesses properly (you may have four Ethernet
+ * ports, but they in fact share the same SMI interface to access the
+ * MDIO bus). Moreover, this MDIO interface code is similar between
+ * the mv643xx_eth driver and the mvneta driver. For now, it is only
+ * used by the mvneta driver, but it could later be used by the
+ * mv643xx_eth driver as well.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+
+struct orion_mdio_dev {
+ struct mutex lock;
+ void __iomem *smireg;
+};
+
+/* Wait for the SMI unit to be ready for another operation
+ */
+static int orion_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (!(val & MVMDIO_SMI_BUSY))
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ return 0;
+}
+
+static int orion_mdio_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_READ_OPERATION),
+ dev->smireg);
+
+ /* Wait for the value to become available */
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (val & MVMDIO_SMI_READ_VALID)
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout when reading PHY\n");
+ mutex_unlock(&dev->lock);
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ mutex_unlock(&dev->lock);
+
+ return val & 0xFFFF;
+}
+
+static int orion_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_WRITE_OPERATION |
+ (value << MVMDIO_SMI_DATA_SHIFT)),
+ dev->smireg);
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int orion_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static int orion_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct orion_mdio_dev *dev;
+ int i, ret;
+
+ bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
+ if (!bus) {
+ dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ bus->name = "orion_mdio_bus";
+ bus->read = orion_mdio_read;
+ bus->write = orion_mdio_write;
+ bus->reset = orion_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ mdiobus_free(bus);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_POLL;
+
+ dev = bus->priv;
+ dev->smireg = of_iomap(pdev->dev.of_node, 0);
+ if (!dev->smireg) {
+ dev_err(&pdev->dev, "No SMI register address given in DT\n");
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return -ENODEV;
+ }
+
+ mutex_init(&dev->lock);
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ iounmap(dev->smireg);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int orion_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+ mdiobus_unregister(bus);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return 0;
+}
+
+static const struct of_device_id orion_mdio_match[] = {
+ { .compatible = "marvell,orion-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orion_mdio_match);
+
+static struct platform_driver orion_mdio_driver = {
+ .probe = orion_mdio_probe,
+ .remove = orion_mdio_remove,
+ .driver = {
+ .name = "orion-mdio",
+ .of_match_table = orion_mdio_match,
+ },
+};
+
+module_platform_driver(orion_mdio_driver);
+
+MODULE_DESCRIPTION("Marvell MDIO interface driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 000000000000..cd345b8969bc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,2846 @@
+/*
+ * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Rami Rosen <rosenr@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+/* Registers */
+#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
+#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
+#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
+#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
+#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
+#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
+#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
+#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
+#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
+#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
+#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
+#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
+#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
+#define MVNETA_PORT_RX_RESET 0x1cc0
+#define MVNETA_PORT_RX_DMA_RESET BIT(0)
+#define MVNETA_PHY_ADDR 0x2000
+#define MVNETA_PHY_ADDR_MASK 0x1f
+#define MVNETA_MBUS_RETRY 0x2010
+#define MVNETA_UNIT_INTR_CAUSE 0x2080
+#define MVNETA_UNIT_CONTROL 0x20B0
+#define MVNETA_PHY_POLLING_ENABLE BIT(1)
+#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
+#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
+#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
+#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_PORT_CONFIG 0x2400
+#define MVNETA_UNI_PROMISC_MODE BIT(0)
+#define MVNETA_DEF_RXQ(q) ((q) << 1)
+#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
+#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
+#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
+#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
+#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
+#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
+#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
+ MVNETA_DEF_RXQ_ARP(q) | \
+ MVNETA_DEF_RXQ_TCP(q) | \
+ MVNETA_DEF_RXQ_UDP(q) | \
+ MVNETA_DEF_RXQ_BPDU(q) | \
+ MVNETA_TX_UNSET_ERR_SUM | \
+ MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
+#define MVNETA_PORT_CONFIG_EXTEND 0x2404
+#define MVNETA_MAC_ADDR_LOW 0x2414
+#define MVNETA_MAC_ADDR_HIGH 0x2418
+#define MVNETA_SDMA_CONFIG 0x241c
+#define MVNETA_SDMA_BRST_SIZE_16 4
+#define MVNETA_NO_DESC_SWAP 0x0
+#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
+#define MVNETA_RX_NO_DATA_SWAP BIT(4)
+#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_PORT_STATUS 0x2444
+#define MVNETA_TX_IN_PRGRS BIT(1)
+#define MVNETA_TX_FIFO_EMPTY BIT(8)
+#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+#define MVNETA_TYPE_PRIO 0x24bc
+#define MVNETA_FORCE_UNI BIT(21)
+#define MVNETA_TXQ_CMD_1 0x24e4
+#define MVNETA_TXQ_CMD 0x2448
+#define MVNETA_TXQ_DISABLE_SHIFT 8
+#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_ACC_MODE 0x2500
+#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
+#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+#define MVNETA_INTR_NEW_CAUSE 0x25a0
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_INTR_NEW_MASK 0x25a4
+#define MVNETA_INTR_OLD_CAUSE 0x25a8
+#define MVNETA_INTR_OLD_MASK 0x25ac
+#define MVNETA_INTR_MISC_CAUSE 0x25b0
+#define MVNETA_INTR_MISC_MASK 0x25b4
+#define MVNETA_INTR_ENABLE 0x25b8
+#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_CMD 0x2680
+#define MVNETA_RXQ_DISABLE_SHIFT 8
+#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
+#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
+#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
+#define MVNETA_GMAC_CTRL_0 0x2c00
+#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
+#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
+#define MVNETA_GMAC_CTRL_2 0x2c08
+#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PORT_RGMII BIT(4)
+#define MVNETA_GMAC2_PORT_RESET BIT(6)
+#define MVNETA_GMAC_STATUS 0x2c10
+#define MVNETA_GMAC_LINK_UP BIT(0)
+#define MVNETA_GMAC_SPEED_1000 BIT(1)
+#define MVNETA_GMAC_SPEED_100 BIT(2)
+#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
+#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_LATE_COLLISION 0x7c
+#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
+#define MVNETA_DA_FILT_OTH_MCAST 0x3500
+#define MVNETA_DA_FILT_UCAST_BASE 0x3600
+#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
+#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
+#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
+#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
+#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
+#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
+#define MVNETA_TXQ_SENT_DESC_SHIFT 16
+#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
+#define MVNETA_PORT_TX_RESET 0x3cf0
+#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TX_MTU 0x3e0c
+#define MVNETA_TX_TOKEN_SIZE 0x3e14
+#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
+#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+
+#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVNETA_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVNETA_TXDONE_COAL_PKTS 16
+#define MVNETA_RX_COAL_PKTS 32
+#define MVNETA_RX_COAL_USEC 100
+
+/* Timer */
+#define MVNETA_TX_DONE_TIMER_PERIOD 10
+
+/* Napi polling weight */
+#define MVNETA_RX_POLL_WEIGHT 64
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVNETA_MH_SIZE 2
+
+#define MVNETA_VLAN_TAG_LEN 4
+
+#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
+#define MVNETA_TX_CSUM_MAX_SIZE 9800
+#define MVNETA_ACC_MODE_EXT 1
+
+/* Timeout constants */
+#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
+
+#define MVNETA_TX_MTU_MAX 0x3ffff
+
+/* Max number of Rx descriptors */
+#define MVNETA_MAX_RXD 128
+
+/* Max number of Tx descriptors */
+#define MVNETA_MAX_TXD 532
+
+/* descriptor aligned size */
+#define MVNETA_DESC_ALIGNED_SIZE 32
+
+#define MVNETA_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, \
+ MVNETA_CPU_D_CACHE_LINE_SIZE)
+
+#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+
+struct mvneta_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct mvneta_port {
+ int pkt_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+ struct mvneta_tx_queue *txqs;
+ struct timer_list tx_done_timer;
+ struct net_device *dev;
+
+ u32 cause_rx_tx;
+ struct napi_struct napi;
+
+ /* Flags */
+ unsigned long flags;
+#define MVNETA_F_TX_DONE_TIMER_BIT 0
+
+ /* Napi weight */
+ int weight;
+
+ /* Core clock */
+ struct clk *clk;
+ u8 mcast_count[256];
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+ struct mvneta_stats tx_stats;
+ struct mvneta_stats rx_stats;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ phy_interface_t phy_interface;
+ struct device_node *phy_node;
+ unsigned int link;
+ unsigned int duplex;
+ unsigned int speed;
+};
+
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+#define MVNETA_TX_L3_OFF_SHIFT 0
+#define MVNETA_TX_IP_HLEN_SHIFT 8
+#define MVNETA_TX_L4_UDP BIT(16)
+#define MVNETA_TX_L3_IP6 BIT(17)
+#define MVNETA_TXD_IP_CSUM BIT(18)
+#define MVNETA_TXD_Z_PAD BIT(19)
+#define MVNETA_TXD_L_DESC BIT(20)
+#define MVNETA_TXD_F_DESC BIT(21)
+#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
+ MVNETA_TXD_L_DESC | \
+ MVNETA_TXD_F_DESC)
+#define MVNETA_TX_L4_CSUM_FULL BIT(30)
+#define MVNETA_TX_L4_CSUM_NOT BIT(31)
+
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
+#define MVNETA_RXD_ERR_CRC 0x0
+#define MVNETA_RXD_ERR_SUMMARY BIT(16)
+#define MVNETA_RXD_ERR_OVERRUN BIT(17)
+#define MVNETA_RXD_ERR_LEN BIT(18)
+#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
+#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
+#define MVNETA_RXD_L3_IP4 BIT(25)
+#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u16 data_size; /* Size of received packet in bytes */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+
+struct mvneta_tx_queue {
+ /* Number of this TX queue, in the range 0-7 */
+ u8 id;
+
+ /* Number of TX DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used TX DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ /* Array of transmitted skb */
+ struct sk_buff **tx_skb;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of the TX DMA descriptors array */
+ struct mvneta_tx_desc *descs;
+
+ /* DMA address of the TX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last TX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next TX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mvneta_rx_queue {
+ /* rx queue number, in the range 0-7 */
+ u8 id;
+
+ /* num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ /* counter of times when mvneta_refill() failed */
+ int missed;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvneta_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+static int rxq_number = 8;
+static int txq_number = 8;
+
+static int rxq_def;
+static int txq_def;
+
+#define MVNETA_DRIVER_NAME "mvneta"
+#define MVNETA_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+/* Write helper method */
+static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
+{
+ writel(data, pp->base + offset);
+}
+
+/* Read helper method */
+static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
+{
+ return readl(pp->base + offset);
+}
+
+/* Increment txq get counter */
+static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
+{
+ txq->txq_get_index++;
+ if (txq->txq_get_index == txq->size)
+ txq->txq_get_index = 0;
+}
+
+/* Increment txq put counter */
+static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
+{
+ txq->txq_put_index++;
+ if (txq->txq_put_index == txq->size)
+ txq->txq_put_index = 0;
+}
+
+
+/* Clear all MIB counters */
+static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+{
+ int i;
+ u32 dummy;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
+ dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+}
+
+/* Get System Network Statistics */
+struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ unsigned int start;
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
+ stats->rx_packets = pp->rx_stats.packets;
+ stats->rx_bytes = pp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
+ stats->tx_packets = pp->tx_stats.packets;
+ stats->tx_bytes = pp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+
+ return stats;
+}
+
+/* Rx descriptors helper methods */
+
+/* Checks whether the given RX descriptor is both the first and the
+ * last descriptor for the RX packet. Each RX packet is currently
+ * received through a single RX descriptor, so not having each RX
+ * descriptor with its first and last bits set is an error
+ */
+static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+{
+ return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ MVNETA_RXD_FIRST_LAST_DESC;
+}
+
+/* Add number of descriptors ready to receive new packets */
+static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int ndescs)
+{
+ /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
+ * be added at once
+ */
+ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
+ MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+ ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
+ return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
+}
+
+/* Update num of rx desc called upon return from rx path or
+ * from mvneta_rxq_drop_pkts().
+ */
+static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int rx_done, int rx_filled)
+{
+ u32 val;
+
+ if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
+ val = rx_done |
+ (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ return;
+ }
+
+ /* Only 255 descriptors can be added at once */
+ while ((rx_done > 0) || (rx_filled > 0)) {
+ if (rx_done <= 0xff) {
+ val = rx_done;
+ rx_done = 0;
+ } else {
+ val = 0xff;
+ rx_done -= 0xff;
+ }
+ if (rx_filled <= 0xff) {
+ val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled = 0;
+ } else {
+ val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled -= 0xff;
+ }
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ }
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static struct mvneta_rx_desc *
+mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ return rxq->descs + rx_desc;
+}
+
+/* Change maximum receive size of the port. */
+static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
+ val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
+ MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+
+/* Set rx queue offset */
+static void mvneta_rxq_offset_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int offset)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
+
+ /* Offset is in */
+ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+/* Tx descriptors helper methods */
+
+/* Update HW with number of TX descriptors to be sent */
+static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int pend_desc)
+{
+ u32 val;
+
+ /* Only 255 descriptors can be added at once ; Assume caller
+ * process TX desriptors in quanta less than 256
+ */
+ val = pend_desc;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static struct mvneta_tx_desc *
+mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Release the last allocated TX descriptor. Useful to handle DMA
+ * mapping failures in the TX path.
+ */
+static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set rxq buf size */
+static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int buf_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
+
+ val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
+ val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
+}
+
+/* Disable buffer management (BM) */
+static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+
+/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
+static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ if (enable)
+ val |= MVNETA_GMAC2_PORT_RGMII;
+ else
+ val &= ~MVNETA_GMAC2_PORT_RGMII;
+
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Config SGMII port */
+static void mvneta_port_sgmii_config(struct mvneta_port *pp)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val |= MVNETA_GMAC2_PSC_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Start the Ethernet port RX and TX activity */
+static void mvneta_port_up(struct mvneta_port *pp)
+{
+ int queue;
+ u32 q_map;
+
+ /* Enable all initialized TXs. */
+ mvneta_mib_counters_clear(pp);
+ q_map = 0;
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ if (txq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+ mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+
+ /* Enable all initialized RXQs. */
+ q_map = 0;
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ if (rxq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+}
+
+/* Stop the Ethernet port activity */
+static void mvneta_port_down(struct mvneta_port *pp)
+{
+ u32 val;
+ int count;
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
+
+ /* Issue stop command for active channels only */
+ if (val != 0)
+ mvreg_write(pp, MVNETA_RXQ_CMD,
+ val << MVNETA_RXQ_DISABLE_SHIFT);
+
+ /* Wait for all Rx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_RXQ_CMD);
+ } while (val & 0xff);
+
+ /* Stop Tx port activity. Check port Tx activity. Issue stop
+ * command for active channels only
+ */
+ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
+
+ if (val != 0)
+ mvreg_write(pp, MVNETA_TXQ_CMD,
+ (val << MVNETA_TXQ_DISABLE_SHIFT));
+
+ /* Wait for all Tx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for TX stopped status=0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ /* Check TX Command reg that all Txqs are stopped */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD);
+
+ } while (val & 0xff);
+
+ /* Double check to verify that TX FIFO is empty */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
+ netdev_warn(pp->dev,
+ "TX FIFO empty timeout status=0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_PORT_STATUS);
+ } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
+ (val & MVNETA_TX_IN_PRGRS));
+
+ udelay(200);
+}
+
+/* Enable the port by setting the port enable bit of the MAC control register */
+static void mvneta_port_enable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Enable port */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val |= MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+/* Disable the port and wait for about 200 usec before retuning */
+static void mvneta_port_disable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Reset the Enable bit in the Serial Control Register */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+
+ udelay(200);
+}
+
+/* Multicast tables methods */
+
+/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
+}
+
+/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
+
+}
+
+/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
+static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
+ val = 0;
+ } else {
+ memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
+}
+
+/* This method sets defaults to the NETA port:
+ * Clears interrupt Cause and Mask registers.
+ * Clears all MAC tables.
+ * Sets defaults to all registers.
+ * Resets RX and TX descriptor rings.
+ * Resets PHY.
+ * This method can be called after mvneta_port_down() to return the port
+ * settings to defaults.
+ */
+static void mvneta_defaults_set(struct mvneta_port *pp)
+{
+ int cpu;
+ int queue;
+ u32 val;
+
+ /* Clear all Cause registers */
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
+
+ /* Enable MBUS Retry bit16 */
+ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
+
+ /* Set CPU queue access map - all CPUs have access to all RX
+ * queues and to all TX queues
+ */
+ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+ mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+ (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+ MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+
+ /* Reset RX and TX DMAs */
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
+ for (queue = 0; queue < txq_number; queue++) {
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
+ }
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+
+ /* Set Port Acceleration Mode */
+ val = MVNETA_ACC_MODE_EXT;
+ mvreg_write(pp, MVNETA_ACC_MODE, val);
+
+ /* Update val of portCfg register accordingly with all RxQueue types */
+ val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
+ mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+ val = 0;
+ mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
+ mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
+
+ /* Build PORT_SDMA_CONFIG_REG */
+ val = 0;
+
+ /* Default burst size */
+ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+
+ val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
+ MVNETA_NO_DESC_SWAP);
+
+ /* Assign port SDMA configuration */
+ mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ /* Set port interrupt enable register - default enable all */
+ mvreg_write(pp, MVNETA_INTR_ENABLE,
+ (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
+ | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+}
+
+/* Set max sizes for tx queues */
+static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
+
+{
+ u32 val, size, mtu;
+ int queue;
+
+ mtu = max_tx_size * 8;
+ if (mtu > MVNETA_TX_MTU_MAX)
+ mtu = MVNETA_TX_MTU_MAX;
+
+ /* Set MTU */
+ val = mvreg_read(pp, MVNETA_TX_MTU);
+ val &= ~MVNETA_TX_MTU_MAX;
+ val |= mtu;
+ mvreg_write(pp, MVNETA_TX_MTU, val);
+
+ /* TX token size and all TXQs token size must be larger that MTU */
+ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
+
+ size = val & MVNETA_TX_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
+ }
+ for (queue = 0; queue < txq_number; queue++) {
+ val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
+
+ size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
+ }
+ }
+}
+
+/* Set unicast address */
+static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
+ int queue)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ last_nibble = (0xf & last_nibble);
+
+ /* offset from unicast tbl base */
+ tbl_offset = (last_nibble / 4) * 4;
+
+ /* offset within the above reg */
+ reg_offset = last_nibble % 4;
+
+ unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified unicast DA tbl entry */
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
+}
+
+/* Set mac address */
+static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
+ int queue)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ if (queue != -1) {
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
+ }
+
+ /* Accept frames of this address */
+ mvneta_set_ucast_addr(pp, addr[5], queue);
+}
+
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
+ */
+static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
+ value | MVNETA_RXQ_NON_OCCUPIED(0));
+ rxq->pkts_coal = value;
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ u32 val;
+ unsigned long clk_rate;
+
+ clk_rate = clk_get_rate(pp->clk);
+ val = (clk_rate / 1000000) * value;
+
+ mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
+ rxq->time_coal = value;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, u32 value)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
+
+ val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
+ val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
+
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
+
+ txq->done_pkts_coal = value;
+}
+
+/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
+static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
+{
+ if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
+ pp->tx_done_timer.expires = jiffies +
+ msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
+ add_timer(&pp->tx_done_timer);
+ }
+}
+
+
+/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+ u32 phys_addr, u32 cookie)
+{
+ rx_desc->buf_cookie = cookie;
+ rx_desc->buf_phys_addr = phys_addr;
+}
+
+/* Decrement sent descriptors counter */
+static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int sent_desc)
+{
+ u32 val;
+
+ /* Only 255 TX descriptors can be updated at once */
+ while (sent_desc > 0xff) {
+ val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ sent_desc = sent_desc - 0xff;
+ }
+
+ val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ u32 val;
+ int sent_desc;
+
+ val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
+ sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
+ MVNETA_TXQ_SENT_DESC_SHIFT;
+
+ return sent_desc;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ */
+static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int sent_desc;
+
+ /* Get number of sent descriptors */
+ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
+
+ /* Decrement sent descriptors counter */
+ if (sent_desc)
+ mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
+
+ return sent_desc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type; required only for checksum
+ * calculation
+ */
+ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
+ command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+ if (l3_proto == swab16(ETH_P_IP))
+ command |= MVNETA_TXD_IP_CSUM;
+ else
+ command |= MVNETA_TX_L3_IP6;
+
+ if (l4_proto == IPPROTO_TCP)
+ command |= MVNETA_TX_L4_CSUM_FULL;
+ else if (l4_proto == IPPROTO_UDP)
+ command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
+ else
+ command |= MVNETA_TX_L4_CSUM_NOT;
+
+ return command;
+}
+
+
+/* Display more error info */
+static void mvneta_rx_error(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+{
+ u32 status = rx_desc->status;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ netdev_err(pp->dev,
+ "bad rx status %08x (buffer oversize), size=%d\n",
+ rx_desc->status, rx_desc->data_size);
+ return;
+ }
+
+ switch (status & MVNETA_RXD_ERR_CODE_MASK) {
+ case MVNETA_RXD_ERR_CRC:
+ netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_OVERRUN:
+ netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_LEN:
+ netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_RESOURCE:
+ netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ }
+}
+
+/* Handle RX checksum offload */
+static void mvneta_rx_csum(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
+ (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+}
+
+/* Free tx queue skbuffs */
+static void mvneta_txq_bufs_free(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct mvneta_tx_desc *tx_desc = txq->descs +
+ txq->txq_get_index;
+ struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+
+ mvneta_txq_inc_get(txq);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Handle end of transmission */
+static int mvneta_txq_done(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+ int tx_done;
+
+ tx_done = mvneta_txq_sent_desc_proc(pp, txq);
+ if (tx_done == 0)
+ return tx_done;
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ txq->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq)) {
+ if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ netif_tx_wake_queue(nq);
+ }
+
+ return tx_done;
+}
+
+/* Refill processing */
+static int mvneta_rx_refill(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+
+{
+ dma_addr_t phys_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
+ if (!skb)
+ return -ENOMEM;
+
+ phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else
+ return MVNETA_TX_L4_CSUM_NOT;
+
+ return mvneta_txq_desc_csum(skb_network_offset(skb),
+ skb->protocol, ip_hdr_len, l4_proto);
+ }
+
+ return MVNETA_TX_L4_CSUM_NOT;
+}
+
+/* Returns rx queue pointer (find last set bit) according to causeRxTx
+ * value
+ */
+static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause >> 8) - 1;
+
+ return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
+}
+
+/* Drop packets received by the RXQ and free buffers */
+static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ int rx_done, i;
+
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ for (i = 0; i < rxq->size; i++) {
+ struct mvneta_rx_desc *rx_desc = rxq->descs + i;
+ struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ dev_kfree_skb_any(skb);
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+ }
+
+ if (rx_done)
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+}
+
+/* Main rx processing */
+static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+{
+ struct net_device *dev = pp->dev;
+ int rx_done, rx_filled;
+
+ /* Get number of received packets */
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ if (rx_todo > rx_done)
+ rx_todo = rx_done;
+
+ rx_done = 0;
+ rx_filled = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct sk_buff *skb;
+ u32 rx_status;
+ int rx_bytes, err;
+
+ prefetch(rx_desc);
+ rx_done++;
+ rx_filled++;
+ rx_status = rx_desc->status;
+ skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ dev->stats.rx_errors++;
+ mvneta_rx_error(pp, rx_desc);
+ mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
+ (u32)skb);
+ continue;
+ }
+
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ u64_stats_update_begin(&pp->rx_stats.syncp);
+ pp->rx_stats.packets++;
+ pp->rx_stats.bytes += rx_bytes;
+ u64_stats_update_end(&pp->rx_stats.syncp);
+
+ /* Linux processing */
+ skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_put(skb, rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ mvneta_rx_csum(pp, rx_desc, skb);
+
+ napi_gro_receive(&pp->napi, skb);
+
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ rx_filled--;
+ }
+ }
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+
+ return rx_done;
+}
+
+/* Handle tx fragmentation processing */
+static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
+ struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = page_address(frag->page.p) + frag->page_offset;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = frag->size;
+
+ tx_desc->buf_phys_addr =
+ dma_map_single(pp->dev->dev.parent, addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto error;
+ }
+
+ if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ /* Last descriptor */
+ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->tx_skb[txq->txq_put_index] = skb;
+
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ tx_desc->command = 0;
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ }
+ }
+
+ return 0;
+
+error:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ for (i = i - 1; i >= 0; i--) {
+ tx_desc = txq->descs + i;
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+
+ return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+ struct mvneta_tx_desc *tx_desc;
+ struct netdev_queue *nq;
+ int frags = 0;
+ u32 tx_cmd;
+
+ if (!netif_running(dev))
+ goto out;
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ nq = netdev_get_tx_queue(dev, txq_def);
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ tx_cmd = mvneta_skb_tx_csum(pp, skb);
+
+ tx_desc->data_size = skb_headlen(skb);
+
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVNETA_TXD_FLZ_DESC;
+ tx_desc->command = tx_cmd;
+ txq->tx_skb[txq->txq_put_index] = skb;
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVNETA_TXD_F_DESC;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ tx_desc->command = tx_cmd;
+ /* Continue with other skb fragments */
+ if (mvneta_tx_frag_process(pp, skb, txq)) {
+ dma_unmap_single(dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+ }
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+ netif_tx_stop_queue(nq);
+
+out:
+ if (frags > 0) {
+ u64_stats_update_begin(&pp->tx_stats.syncp);
+ pp->tx_stats.packets++;
+ pp->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&pp->tx_stats.syncp);
+
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
+ mvneta_txq_done(pp, txq);
+
+ /* If after calling mvneta_txq_done, count equals
+ * frags, we need to set the timer
+ */
+ if (txq->count == frags && frags > 0)
+ mvneta_add_tx_done_timer(pp);
+
+ return NETDEV_TX_OK;
+}
+
+
+/* Free tx resources, when resetting a port */
+static void mvneta_txq_done_force(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+
+{
+ int tx_done = txq->count;
+
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ /* reset txq */
+ txq->count = 0;
+ txq->txq_put_index = 0;
+ txq->txq_get_index = 0;
+}
+
+/* handle tx done - called from tx done timer callback */
+static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
+ int *tx_todo)
+{
+ struct mvneta_tx_queue *txq;
+ u32 tx_done = 0;
+ struct netdev_queue *nq;
+
+ *tx_todo = 0;
+ while (cause_tx_done != 0) {
+ txq = mvneta_tx_done_policy(pp, cause_tx_done);
+ if (!txq)
+ break;
+
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (txq->count) {
+ tx_done += mvneta_txq_done(pp, txq);
+ *tx_todo += txq->count;
+ }
+
+ __netif_tx_unlock(nq);
+ cause_tx_done &= ~((1 << txq->id));
+ }
+
+ return tx_done;
+}
+
+/* Compute crc8 of the specified address, using a unique algorithm ,
+ * according to hw spec, different than generic crc8 algorithm
+ */
+static int mvneta_addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+/* This method controls the net device special MAC multicast support.
+ * The Special Multicast Table for MAC addresses supports MAC of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table. This method set the Special
+ * Multicast Table appropriate entry.
+ */
+static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
+ unsigned char last_byte,
+ int queue)
+{
+ unsigned int smc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Register offset from SMC table base */
+ tbl_offset = (last_byte / 4);
+ /* Entry offset within the above reg */
+ reg_offset = last_byte % 4;
+
+ smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
+ + tbl_offset * 4));
+
+ if (queue == -1)
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ else {
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
+ smc_table_reg);
+}
+
+/* This method controls the network device Other MAC multicast support.
+ * The Other Multicast Table is used for multicast of another type.
+ * A CRC-8 is used as an index to the Other Multicast Table entries
+ * in the DA-Filter table.
+ * The method gets the CRC-8 value from the calling routine and
+ * sets the Other Multicast Table appropriate entry according to the
+ * specified CRC-8 .
+ */
+static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
+ unsigned char crc8,
+ int queue)
+{
+ unsigned int omc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+ reg_offset = crc8 % 4; /* Entry offset within the above reg */
+
+ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified Other DA table entry */
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
+}
+
+/* The network device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8 value
+ * is used as an index to the Other Multicast Table entries in the
+ * DA-Filter table.
+ */
+static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
+ int queue)
+{
+ unsigned char crc_result = 0;
+
+ if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
+ return 0;
+ }
+
+ crc_result = mvneta_addr_crc(p_addr);
+ if (queue == -1) {
+ if (pp->mcast_count[crc_result] == 0) {
+ netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
+ crc_result);
+ return -EINVAL;
+ }
+
+ pp->mcast_count[crc_result]--;
+ if (pp->mcast_count[crc_result] != 0) {
+ netdev_info(pp->dev,
+ "After delete there are %d valid Mcast for crc8=0x%02x\n",
+ pp->mcast_count[crc_result], crc_result);
+ return -EINVAL;
+ }
+ } else
+ pp->mcast_count[crc_result]++;
+
+ mvneta_set_other_mcast_addr(pp, crc_result, queue);
+
+ return 0;
+}
+
+/* Configure Fitering mode of Ethernet port */
+static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
+ int is_promisc)
+{
+ u32 port_cfg_reg, val;
+
+ port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
+
+ val = mvreg_read(pp, MVNETA_TYPE_PRIO);
+
+ /* Set / Clear UPM bit in port configuration register */
+ if (is_promisc) {
+ /* Accept all Unicast addresses */
+ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
+ val |= MVNETA_FORCE_UNI;
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
+ } else {
+ /* Reject all Unicast addresses */
+ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
+ val &= ~MVNETA_FORCE_UNI;
+ }
+
+ mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
+ mvreg_write(pp, MVNETA_TYPE_PRIO, val);
+}
+
+/* register unicast and multicast addresses */
+static void mvneta_set_rx_mode(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Accept all: Multicast + Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 1);
+ mvneta_set_ucast_table(pp, rxq_def);
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept single Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 0);
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast */
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept only initialized multicast */
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ mvneta_mcast_addr_set(pp, ha->addr,
+ rxq_def);
+ }
+ }
+ }
+ }
+}
+
+/* Interrupt handling - the callback for request_irq() */
+static irqreturn_t mvneta_isr(int irq, void *dev_id)
+{
+ struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+
+ napi_schedule(&pp->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* NAPI handler
+ * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
+ * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
+ * Bits 8 -15 of the cause Rx Tx register indicate that are received
+ * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
+ * Each CPU has its own causeRxTx register
+ */
+static int mvneta_poll(struct napi_struct *napi, int budget)
+{
+ int rx_done = 0;
+ u32 cause_rx_tx;
+ unsigned long flags;
+ struct mvneta_port *pp = netdev_priv(napi->dev);
+
+ if (!netif_running(pp->dev)) {
+ napi_complete(napi);
+ return rx_done;
+ }
+
+ /* Read cause register */
+ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
+ MVNETA_RX_INTR_MASK(rxq_number);
+
+ /* For the case where the last mvneta_poll did not process all
+ * RX packets
+ */
+ cause_rx_tx |= pp->cause_rx_tx;
+ if (rxq_number > 1) {
+ while ((cause_rx_tx != 0) && (budget > 0)) {
+ int count;
+ struct mvneta_rx_queue *rxq;
+ /* get rx queue number from cause_rx_tx */
+ rxq = mvneta_rx_policy(pp, cause_rx_tx);
+ if (!rxq)
+ break;
+
+ /* process the packet in that rx queue */
+ count = mvneta_rx(pp, budget, rxq);
+ rx_done += count;
+ budget -= count;
+ if (budget > 0) {
+ /* set off the rx bit of the
+ * corresponding bit in the cause rx
+ * tx register, so that next iteration
+ * will find the next rx queue where
+ * packets are received on
+ */
+ cause_rx_tx &= ~((1 << rxq->id) << 8);
+ }
+ }
+ } else {
+ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+ budget -= rx_done;
+ }
+
+ if (budget > 0) {
+ cause_rx_tx = 0;
+ napi_complete(napi);
+ local_irq_save(flags);
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+ local_irq_restore(flags);
+ }
+
+ pp->cause_rx_tx = cause_rx_tx;
+ return rx_done;
+}
+
+/* tx done timer callback */
+static void mvneta_tx_done_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvneta_port *pp = netdev_priv(dev);
+ int tx_done = 0, tx_todo = 0;
+
+ if (!netif_running(dev))
+ return ;
+
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ tx_done = mvneta_tx_done_gbe(pp,
+ (((1 << txq_number) - 1) &
+ MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
+ &tx_todo);
+ if (tx_todo > 0)
+ mvneta_add_tx_done_timer(pp);
+}
+
+/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ int num)
+{
+ struct net_device *dev = pp->dev;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct sk_buff *skb;
+ struct mvneta_rx_desc *rx_desc;
+ unsigned long phys_addr;
+
+ skb = dev_alloc_skb(pp->pkt_size);
+ if (!skb) {
+ netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
+ break;
+ }
+
+ rx_desc = rxq->descs + i;
+ memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
+ phys_addr = dma_map_single(dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+ }
+
+ /* Add this number of RX descriptors as non occupied (ready to
+ * get packets)
+ */
+ mvneta_rxq_non_occup_desc_add(pp, rxq, i);
+
+ return i;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+static void mvneta_tx_reset(struct mvneta_port *pp)
+{
+ int queue;
+
+ /* free the skb's in the hal tx ring */
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_done_force(pp, &pp->txqs[queue]);
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+}
+
+static void mvneta_rx_reset(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+
+{
+ rxq->size = pp->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &rxq->descs_phys, GFP_KERNEL);
+ if (rxq->descs == NULL) {
+ netdev_err(pp->dev,
+ "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
+ rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->size);
+ return -ENOMEM;
+ }
+
+ BUG_ON(rxq->descs !=
+ PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ rxq->last_desc = rxq->size - 1;
+
+ /* Set Rx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
+
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
+
+ /* Set coalescing pkts and time */
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+ /* Fill RXQ with buffers from RX pool */
+ mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+
+ return 0;
+}
+
+/* Cleanup Rx queue */
+static void mvneta_rxq_deinit(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ mvneta_rxq_drop_pkts(pp, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_phys);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_phys = 0;
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ txq->size = pp->tx_ring_size;
+
+ /* Allocate memory for TX descriptors */
+ txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &txq->descs_phys, GFP_KERNEL);
+ if (txq->descs == NULL) {
+ netdev_err(pp->dev,
+ "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
+ txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->size);
+ return -ENOMEM;
+ }
+
+ /* Make sure descriptor address is cache line size aligned */
+ BUG_ON(txq->descs !=
+ PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ txq->last_desc = txq->size - 1;
+
+ /* Set maximum bandwidth for enabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+ /* Set Tx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+ txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
+ if (txq->tx_skb == NULL) {
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+
+ return 0;
+}
+
+/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ kfree(txq->tx_skb);
+
+ if (txq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_phys = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
+}
+
+/* Cleanup all Tx queues */
+static void mvneta_cleanup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_deinit(pp, &pp->txqs[queue]);
+}
+
+/* Cleanup all Rx queues */
+static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++)
+ mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+}
+
+
+/* Init all Rx queues */
+static int mvneta_setup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_rxqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Init all tx queues */
+static int mvneta_setup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++) {
+ int err = mvneta_txq_init(pp, &pp->txqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create txq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_txqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void mvneta_start_dev(struct mvneta_port *pp)
+{
+ mvneta_max_rx_size_set(pp, pp->pkt_size);
+ mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+
+ /* start the Rx/Tx activity */
+ mvneta_port_enable(pp);
+
+ /* Enable polling on the port */
+ napi_enable(&pp->napi);
+
+ /* Unmask interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+
+ phy_start(pp->phy_dev);
+ netif_tx_start_all_queues(pp->dev);
+}
+
+static void mvneta_stop_dev(struct mvneta_port *pp)
+{
+ phy_stop(pp->phy_dev);
+
+ napi_disable(&pp->napi);
+
+ netif_carrier_off(pp->dev);
+
+ mvneta_port_down(pp);
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* Stop the port activity */
+ mvneta_port_disable(pp);
+
+ /* Clear all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+ mvneta_tx_reset(pp);
+ mvneta_rx_reset(pp);
+}
+
+/* tx timeout callback - display a message and stop/start the network device */
+static void mvneta_tx_timeout(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ netdev_info(dev, "tx timeout\n");
+ mvneta_stop_dev(pp);
+ mvneta_start_dev(pp);
+}
+
+/* Return positive if MTU is valid */
+static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
+{
+ if (mtu < 68) {
+ netdev_err(dev, "cannot change mtu to less than 68\n");
+ return -EINVAL;
+ }
+
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ if (mtu > 9676) {
+ netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
+ mtu = 9676;
+ }
+
+ if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+ mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+ }
+
+ return mtu;
+}
+
+/* Change the device mtu */
+static int mvneta_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mtu = mvneta_check_mtu_valid(dev, mtu);
+ if (mtu < 0)
+ return -EINVAL;
+
+ dev->mtu = mtu;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* The interface is running, so we have to force a
+ * reallocation of the RXQs
+ */
+ mvneta_stop_dev(pp);
+
+ mvneta_cleanup_txqs(pp);
+ mvneta_cleanup_rxqs(pp);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret) {
+ netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ return ret;
+ }
+
+ mvneta_setup_txqs(pp);
+
+ mvneta_start_dev(pp);
+ mvneta_port_up(pp);
+
+ return 0;
+}
+
+/* Handle setting mac address */
+static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u8 *mac = addr + 2;
+ int i;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Remove previous address table entry */
+ mvneta_mac_addr_set(pp, dev->dev_addr, -1);
+
+ /* Set new addr in hw */
+ mvneta_mac_addr_set(pp, mac, rxq_def);
+
+ /* Set addr in the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = mac[i];
+
+ return 0;
+}
+
+static void mvneta_adjust_link(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ struct phy_device *phydev = pp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((pp->speed != phydev->speed) ||
+ (pp->duplex != phydev->duplex)) {
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+
+ if (phydev->duplex)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (phydev->speed == SPEED_1000)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+
+ pp->duplex = phydev->duplex;
+ pp->speed = phydev->speed;
+ }
+ }
+
+ if (phydev->link != pp->link) {
+ if (!phydev->link) {
+ pp->duplex = -1;
+ pp->speed = 0;
+ }
+
+ pp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val |= (MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ mvneta_port_up(pp);
+ netdev_info(pp->dev, "link up\n");
+ } else {
+ mvneta_port_down(pp);
+ netdev_info(pp->dev, "link down\n");
+ }
+ }
+}
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct phy_device *phy_dev;
+
+ phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
+ pp->phy_interface);
+ if (!phy_dev) {
+ netdev_err(pp->dev, "could not find the PHY\n");
+ return -ENODEV;
+ }
+
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
+
+ pp->phy_dev = phy_dev;
+ pp->link = 0;
+ pp->duplex = 0;
+ pp->speed = 0;
+
+ return 0;
+}
+
+static void mvneta_mdio_remove(struct mvneta_port *pp)
+{
+ phy_disconnect(pp->phy_dev);
+ pp->phy_dev = NULL;
+}
+
+static int mvneta_open(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret)
+ return ret;
+
+ ret = mvneta_setup_txqs(pp);
+ if (ret)
+ goto err_cleanup_rxqs;
+
+ /* Connect to port interrupt line */
+ ret = request_irq(pp->dev->irq, mvneta_isr, 0,
+ MVNETA_DRIVER_NAME, pp);
+ if (ret) {
+ netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
+ goto err_cleanup_txqs;
+ }
+
+ /* In default link is down */
+ netif_carrier_off(pp->dev);
+
+ ret = mvneta_mdio_probe(pp);
+ if (ret < 0) {
+ netdev_err(dev, "cannot probe MDIO bus\n");
+ goto err_free_irq;
+ }
+
+ mvneta_start_dev(pp);
+
+ return 0;
+
+err_free_irq:
+ free_irq(pp->dev->irq, pp);
+err_cleanup_txqs:
+ mvneta_cleanup_txqs(pp);
+err_cleanup_rxqs:
+ mvneta_cleanup_rxqs(pp);
+ return ret;
+}
+
+/* Stop the port, free port interrupt line */
+static int mvneta_stop(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+ free_irq(dev->irq, pp);
+ mvneta_cleanup_rxqs(pp);
+ mvneta_cleanup_txqs(pp);
+ del_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ return 0;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(pp->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(pp->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvneta_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvneta_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
+ c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
+
+ c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
+ return 0;
+}
+
+
+static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+
+static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(netdev);
+
+ ring->rx_max_pending = MVNETA_MAX_RXD;
+ ring->tx_max_pending = MVNETA_MAX_TXD;
+ ring->rx_pending = pp->rx_ring_size;
+ ring->tx_pending = pp->tx_ring_size;
+}
+
+static int mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+ return -EINVAL;
+ pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
+ ring->rx_pending : MVNETA_MAX_RXD;
+ pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
+ ring->tx_pending : MVNETA_MAX_TXD;
+
+ if (netif_running(dev)) {
+ mvneta_stop(dev);
+ if (mvneta_open(dev)) {
+ netdev_err(dev,
+ "error on opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops mvneta_netdev_ops = {
+ .ndo_open = mvneta_open,
+ .ndo_stop = mvneta_stop,
+ .ndo_start_xmit = mvneta_tx,
+ .ndo_set_rx_mode = mvneta_set_rx_mode,
+ .ndo_set_mac_address = mvneta_set_mac_addr,
+ .ndo_change_mtu = mvneta_change_mtu,
+ .ndo_tx_timeout = mvneta_tx_timeout,
+ .ndo_get_stats64 = mvneta_get_stats64,
+};
+
+const struct ethtool_ops mvneta_eth_tool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = mvneta_ethtool_get_settings,
+ .set_settings = mvneta_ethtool_set_settings,
+ .set_coalesce = mvneta_ethtool_set_coalesce,
+ .get_coalesce = mvneta_ethtool_get_coalesce,
+ .get_drvinfo = mvneta_ethtool_get_drvinfo,
+ .get_ringparam = mvneta_ethtool_get_ringparam,
+ .set_ringparam = mvneta_ethtool_set_ringparam,
+};
+
+/* Initialize hw */
+static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+{
+ int queue;
+
+ /* Disable port */
+ mvneta_port_disable(pp);
+
+ /* Set port default values */
+ mvneta_defaults_set(pp);
+
+ pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
+ if (!pp->txqs)
+ return -ENOMEM;
+
+ /* Initialize TX descriptor rings */
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->id = queue;
+ txq->size = pp->tx_ring_size;
+ txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
+ }
+
+ pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs) {
+ kfree(pp->txqs);
+ return -ENOMEM;
+ }
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->id = queue;
+ rxq->size = pp->rx_ring_size;
+ rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
+ rxq->time_coal = MVNETA_RX_COAL_USEC;
+ }
+
+ return 0;
+}
+
+static void mvneta_deinit(struct mvneta_port *pp)
+{
+ kfree(pp->txqs);
+ kfree(pp->rxqs);
+}
+
+/* platform glue : initialize decoding windows */
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+ const struct mbus_dram_target_info *dram)
+{
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+ mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Power up the port */
+static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+{
+ u32 val;
+
+ /* MAC Cause register should be cleared */
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mvneta_port_sgmii_config(pp);
+
+ mvneta_gmac_rgmii_set(pp, 1);
+
+ /* Cancel Port Reset */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
+}
+
+/* Device initialization routine */
+static int mvneta_probe(struct platform_device *pdev)
+{
+ const struct mbus_dram_target_info *dram_target_info;
+ struct device_node *dn = pdev->dev.of_node;
+ struct device_node *phy_node;
+ u32 phy_addr;
+ struct mvneta_port *pp;
+ struct net_device *dev;
+ const char *mac_addr;
+ int phy_mode;
+ int err;
+
+ /* Our multiqueue support is not complete, so for now, only
+ * allow the usage of the first RX queue
+ */
+ if (rxq_def != 0) {
+ dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
+ return -EINVAL;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0) {
+ err = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ phy_node = of_parse_phandle(dn, "phy", 0);
+ if (!phy_node) {
+ dev_err(&pdev->dev, "no associated PHY\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ phy_mode = of_get_phy_mode(dn);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto err_free_irq;
+ }
+
+ mac_addr = of_get_mac_address(dn);
+
+ if (!mac_addr || !is_valid_ether_addr(mac_addr))
+ eth_hw_addr_random(dev);
+ else
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+
+ SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+
+ pp = netdev_priv(dev);
+
+ pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+ init_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ pp->weight = MVNETA_RX_POLL_WEIGHT;
+ pp->phy_node = phy_node;
+ pp->phy_interface = phy_mode;
+
+ pp->base = of_iomap(dn, 0);
+ if (pp->base == NULL) {
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_unmap;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->tx_done_timer.data = (unsigned long)dev;
+
+ pp->tx_ring_size = MVNETA_MAX_TXD;
+ pp->rx_ring_size = MVNETA_MAX_RXD;
+
+ pp->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = mvneta_init(pp, phy_addr);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't init eth hal\n");
+ goto err_clk;
+ }
+ mvneta_port_power_up(pp, phy_mode);
+
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvneta_conf_mbus_windows(pp, dram_target_info);
+
+ netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register\n");
+ goto err_deinit;
+ }
+
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ platform_set_drvdata(pdev, pp->dev);
+
+ return 0;
+
+err_deinit:
+ mvneta_deinit(pp);
+err_clk:
+ clk_disable_unprepare(pp->clk);
+err_unmap:
+ iounmap(pp->base);
+err_free_irq:
+ irq_dispose_mapping(dev->irq);
+err_free_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+/* Device removal routine */
+static int mvneta_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ mvneta_deinit(pp);
+ clk_disable_unprepare(pp->clk);
+ iounmap(pp->base);
+ irq_dispose_mapping(dev->irq);
+ free_netdev(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mvneta_match[] = {
+ { .compatible = "marvell,armada-370-neta" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_match);
+
+static struct platform_driver mvneta_driver = {
+ .probe = mvneta_probe,
+ .remove = mvneta_remove,
+ .driver = {
+ .name = MVNETA_DRIVER_NAME,
+ .of_match_table = mvneta_match,
+ },
+};
+
+module_platform_driver(mvneta_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, S_IRUGO);
+module_param(txq_number, int, S_IRUGO);
+
+module_param(rxq_def, int, S_IRUGO);
+module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 10d678d3dd01..037ed866c22f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -627,7 +627,6 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
@@ -1391,7 +1390,7 @@ static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
struct phy_device *phy = pep->phy;
ethernet_phy_reset(pep);
- phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+ phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
@@ -1444,10 +1443,10 @@ static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRIVER_NAME, 32);
- strncpy(info->version, DRIVER_VERSION, 32);
- strncpy(info->fw_version, "N/A", 32);
- strncpy(info->bus_info, "N/A", 32);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5544a1fe2f94..171f4b3dda07 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3855,7 +3855,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -3917,10 +3916,9 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* space for skge@pci:0000:04:00.0 */
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
+
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3269eb38cc57..fc07ca35721b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4801,7 +4801,6 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -4970,10 +4969,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 3d1899ff1076..fdc5f23d8e9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u32 reply;
u8 is_going_down = 0;
int i;
+ unsigned long flags;
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
@@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
goto reset_slave;
}
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = cmd;
else
is_going_down = 1;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d)"
" executing from slave:%d\n",
@@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
reset_slave:
/* cleanup any slave resources */
mlx4_delete_all_resources_for_slave(dev, slave);
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0,
@@ -1755,7 +1756,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
spin_lock_init(&s_state->lock);
}
- memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index aa9c2f6cf3c0..b8d0854a7ad1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,7 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
cq->is_tx = mode;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 03447dad07e9..00f25b5f297f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,8 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
+#include <linux/in.h>
+#include <net/ip.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
priv->prof->rss_rings = rss_rings;
@@ -664,27 +666,90 @@ static int mlx4_en_validate_flow(struct net_device *dev,
if ((cmd->fs.flow_type & FLOW_EXT)) {
if (cmd->fs.m_ext.vlan_etype ||
- !(cmd->fs.m_ext.vlan_tci == 0 ||
- cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ 0 ||
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL;
+
+ if (cmd->fs.m_ext.vlan_tci) {
+ if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+
+ }
}
return 0;
}
+static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ unsigned char *mac)
+{
+ int err = 0;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) &&
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ return err;
+}
+
+static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ __be32 ipv4_dst)
+{
+#ifdef CONFIG_INET
+ unsigned char mac[ETH_ALEN];
+
+ if (!ipv4_is_multicast(ipv4_dst)) {
+ if (cmd->fs.flow_type & FLOW_MAC_EXT)
+ memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
+ else
+ memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
+ } else {
+ ip_eth_mc_map(ipv4_dst, mac);
+ }
+
+ return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
+#else
+ return -EINVAL;
+#endif
+}
+
static int add_ip_rule(struct mlx4_en_priv *priv,
- struct ethtool_rxnfc *cmd,
- struct list_head *list_h)
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
{
- struct mlx4_spec_list *spec_l3;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- if (!spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- return -ENOMEM;
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3) {
+ err = -ENOMEM;
+ goto free_spec;
}
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
+ cmd->fs.h_u.
+ usr_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
if (l3_mask->ip4src)
@@ -695,34 +760,52 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l3->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ return err;
}
static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *list_h, int proto)
{
- struct mlx4_spec_list *spec_l3;
- struct mlx4_spec_list *spec_l4;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
+ struct mlx4_spec_list *spec_l4 = NULL;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
- if (!spec_l4 || !spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- kfree(spec_l3);
- kfree(spec_l4);
- return -ENOMEM;
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3 || !spec_l4) {
+ err = -ENOMEM;
+ goto free_spec;
}
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
if (proto == TCP_V4_FLOW) {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ tcp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
} else {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ udp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
@@ -744,6 +827,12 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l4->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return err;
}
static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
@@ -751,43 +840,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct list_head *rule_list_h)
{
int err;
- __be64 be_mac;
struct ethhdr *eth_spec;
- struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_spec_list *spec_l2;
- __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+ struct mlx4_en_priv *priv = netdev_priv(dev);
err = mlx4_en_validate_flow(dev, cmd);
if (err)
return err;
- spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
- if (!spec_l2)
- return -ENOMEM;
-
- if (cmd->fs.flow_type & FLOW_MAC_EXT) {
- memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
- } else {
- u64 mac = priv->mac & MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- }
-
- spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
- if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
- memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
-
- if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
- spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
- spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
- }
-
- list_add_tail(&spec_l2->list, rule_list_h);
-
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
eth_spec = &cmd->fs.h_u.ether_spec;
- memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
+ &eth_spec->h_dest[0]);
spec_l2->eth.ether_type = eth_spec->h_proto;
if (eth_spec->h_proto)
spec_l2->eth.ether_type_enable = 1;
@@ -861,6 +930,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = 0;
memset(&loc_rule->flow_spec, 0,
sizeof(struct ethtool_rx_flow_spec));
+ list_del(&loc_rule->list);
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
@@ -871,6 +941,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = reg_id;
memcpy(&loc_rule->flow_spec, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
+ list_add_tail(&loc_rule->list, &priv->ethtool_list);
out_free_list:
list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
@@ -904,6 +975,7 @@ static int mlx4_en_flow_detach(struct net_device *dev,
}
rule->id = 0;
memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+ list_del(&rule->list);
out:
return err;
@@ -952,7 +1024,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
cmd->cmd == ETHTOOL_GRXCLSRULE ||
cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
- mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
return -EINVAL;
switch (cmd->cmd) {
@@ -988,7 +1061,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ if (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
return -EINVAL;
switch (cmd->cmd) {
@@ -1037,7 +1111,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 3a2b8c65642d..b2cca58de910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -64,7 +64,7 @@ static const char mlx4_en_version[] =
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
- "Enable RSS for incomming UDP traffic or disabled (0)");
+ "Enable RSS for incoming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i;
}
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
+
+ /* Drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) &&
+ !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
+
+ /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
+ * is requested
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+}
+
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
@@ -191,10 +213,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_en_version);
- mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
- dev_err(&dev->pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d1287f81a31..5088dc5c3d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
.priority = MLX4_DOMAIN_RFS,
};
int rc;
- __be64 mac;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list);
- mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
- memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
return 0;
}
+static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+{
+ unsigned int i;
+ for (i = ETH_ALEN - 1; i; --i) {
+ dst_mac[i] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+ memset(&dst_mac[ETH_ALEN], 0, 2);
+}
+
+static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
+ unsigned char *mac, int *qpn, u64 *reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = priv->port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (err)
+ en_warn(priv, "Failed Attaching Unicast\n");
+
+ return err;
+}
+
+static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
+ unsigned char *mac, int qpn, u64 reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ en_err(priv, "Invalid steering mode.\n");
+ }
+}
+
+static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ struct mlx4_mac_entry *entry;
+ int index = 0;
+ int err = 0;
+ u64 reg_id;
+ int *qpn = &priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
+ priv->dev->dev_addr);
+ index = mlx4_register_mac(dev, priv->port, mac);
+ if (index < 0) {
+ err = index;
+ en_err(priv, "Failed adding MAC: %pM\n",
+ priv->dev->dev_addr);
+ return err;
+ }
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+ int base_qpn = mlx4_get_base_qpn(dev, priv->port);
+ *qpn = base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
+ if (err) {
+ en_err(priv, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ entry->reg_id = reg_id;
+
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
+}
+
+static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int qpn = priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ priv->dev->dev_addr);
+ mlx4_unregister_mac(dev, priv->port, mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ priv->dev->dev_addr)) {
+ en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
+ priv->port, priv->dev->dev_addr, qpn);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_qp_release_range(dev, qpn, 1);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ break;
+ }
+ }
+ }
+}
+
+static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+ unsigned char *new_mac, unsigned char *prev_mac)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err = 0;
+ u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+
+ bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_unregister_mac(dev, priv->port,
+ prev_mac_u64);
+ hlist_del_rcu(&entry->hlist);
+ synchronize_rcu();
+ memcpy(entry->mac, new_mac, ETH_ALEN);
+ entry->reg_id = 0;
+ mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[mac_hash]);
+ mlx4_register_mac(dev, priv->port, new_mac_u64);
+ err = mlx4_en_uc_steer_add(priv, new_mac,
+ &qpn,
+ &entry->reg_id);
+ return err;
+ }
+ }
+ return -EINVAL;
+ }
+
+ return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
+}
+
u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
queue_work(mdev->workqueue, &priv->mac_task);
return 0;
}
@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
- err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac);
+ err = mlx4_en_replace_mac(priv, priv->base_qpn,
+ priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
} else
- en_dbg(HW, priv, "Port is down while "
- "registering mac, exiting...\n");
+ en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock);
}
@@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
mlx4_en_clear_list(dev);
return;
}
@@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
}
}
if (!found) {
- new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ new_mc = kmemdup(src_tmp,
+ sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
- if (!new_mc) {
- en_err(priv, "Failed to allocate current multicast list\n");
+ if (!new_mc)
return;
- }
- memcpy(new_mc, src_tmp,
- sizeof(struct mlx4_en_mc_list));
+
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
-static void mlx4_en_set_multicast(struct net_device *dev)
+static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
- queue_work(priv->mdev->workqueue, &priv->mcast_task);
+ queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
-static void mlx4_en_do_set_multicast(struct work_struct *work)
+static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
{
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- mcast_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_mc_list *mclist, *tmp;
- u64 mcast_addr = 0;
- u8 mc_list[16] = {0};
int err = 0;
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up) {
- en_dbg(HW, priv, "Card is not up, "
- "ignoring multicast change.\n");
- goto out;
- }
- if (!priv->port_up) {
- en_dbg(HW, priv, "Port is down, "
- "ignoring multicast change.\n");
- goto out;
- }
-
- if (!netif_carrier_ok(dev)) {
- if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
- if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
- netif_carrier_on(dev);
- en_dbg(LINK, priv, "Link Up\n");
- }
- }
- }
-
- /*
- * Promsicuous mode: disable all filters
- */
-
- if (dev->flags & IFF_PROMISC) {
- if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
- if (netif_msg_rx_status(priv))
- en_warn(priv, "Entering promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_PROMISC;
-
- /* Enable promiscouos mode */
- switch (mdev->dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_add(mdev->dev,
- priv->port,
- priv->base_qpn,
- MLX4_FS_PROMISC_UPLINK);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- break;
-
- case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling unicast promiscuous mode\n");
-
- /* Add the default qp number as multicast
- * promisc
- */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling multicast promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
- break;
-
- case MLX4_STEERING_MODE_A0:
- err = mlx4_SET_PORT_qpn_calc(mdev->dev,
- priv->port,
- priv->base_qpn,
- 1);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- break;
- }
-
- /* Disable port multicast filter (unconditionally) */
- err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
- 0, MLX4_MCAST_DISABLE);
- if (err)
- en_err(priv, "Failed disabling "
- "multicast filter\n");
-
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
- }
- goto out;
- }
-
- /*
- * Not in promiscuous mode
- */
-
- if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
- en_warn(priv, "Leaving promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+ en_warn(priv, "Entering promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
- /* Disable promiscouos mode */
+ /* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling unicast promiscuous mode\n");
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
- priv->base_qpn, 0);
+ priv->base_qpn,
+ 1);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
- /* Enable port VLAN filter */
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
- en_err(priv, "Failed enabling VLAN filter\n");
+ en_err(priv, "Failed disabling VLAN filter\n");
}
+}
+
+static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
+{
+ int err = 0;
+
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Leaving promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
+ }
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed enabling VLAN filter\n");
+}
+
+static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_mc_list *mclist, *tmp;
+ u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
+ int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
@@ -767,9 +964,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
- netif_tx_lock_bh(dev);
+ netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
- netif_tx_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
@@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
}
}
+}
+
+static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct netdev_hw_addr *ha;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ bool found;
+ u64 mac;
+ int err = 0;
+ struct hlist_head *bucket;
+ unsigned int i;
+ int removed = 0;
+ u32 prev_flags;
+
+ /* Note that we do not need to protect our mac_hash traversal with rcu,
+ * since all modification code is protected by mdev->state_lock
+ */
+
+ /* find what to remove */
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ found = false;
+ netdev_for_each_uc_addr(ha, dev) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ /* MAC address of the port is not in uc list */
+ if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+ found = true;
+
+ if (!found) {
+ mac = mlx4_en_mac_to_u64(entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ priv->base_qpn,
+ entry->reg_id);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ entry->mac, priv->port);
+ ++removed;
+ }
+ }
+ }
+
+ /* if we didn't remove anything, there is no use in trying to add
+ * again once we are in a forced promisc mode state
+ */
+ if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
+ return;
+
+ prev_flags = priv->flags;
+ priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
+
+ /* find what to add */
+ netdev_for_each_uc_addr(ha, dev) {
+ found = false;
+ bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
+ ha->addr, priv->port);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ mac = mlx4_en_mac_to_u64(ha->addr);
+ memcpy(entry->mac, ha->addr, ETH_ALEN);
+ err = mlx4_register_mac(mdev->dev, priv->port, mac);
+ if (err < 0) {
+ en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ err = mlx4_en_uc_steer_add(priv, ha->addr,
+ &priv->base_qpn,
+ &entry->reg_id);
+ if (err) {
+ en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ } else {
+ unsigned int mac_hash;
+ en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
+ ha->addr, priv->port);
+ mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_add_head_rcu(&entry->hlist, bucket);
+ }
+ }
+ }
+
+ if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Forcing promiscuous mode on port:%d\n",
+ priv->port);
+ } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
+ priv->port);
+ }
+}
+
+static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ rx_mode_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
+ goto out;
+ }
+
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
+ if (dev->priv_flags & IFF_UNICAST_FLT)
+ mlx4_en_do_uc_filter(priv, dev, mdev);
+
+ /* Promsicuous mode: disable all filters */
+ if ((dev->flags & IFF_PROMISC) ||
+ (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
+ mlx4_en_set_promisc_mode(priv, mdev);
+ goto out;
+ }
+
+ /* Not in promiscuous mode */
+ if (priv->flags & MLX4_EN_FLAG_PROMISC)
+ mlx4_en_clear_promisc_mode(priv, mdev);
+
+ mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
@@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
- "rx_frames:%d rx_usecs:%d\n",
- priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
- en_err(priv, "Failed modifying moderation "
- "for cq:%d\n", ring);
+ en_err(priv, "Failed modifying moderation for cq:%d\n",
+ ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
@@ -977,12 +1337,12 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
- if (err)
- en_dbg(HW, priv, "Could not update stats\n");
-
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ en_dbg(HW, priv, "Could not update stats\n");
+
if (priv->port_up)
mlx4_en_auto_moderation(priv);
@@ -1039,6 +1399,9 @@ int mlx4_en_start_port(struct net_device *dev)
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list);
+ INIT_LIST_HEAD(&priv->ethtool_list);
+ memset(&priv->ethtool_rules[0], 0,
+ sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
@@ -1074,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
- err = mlx4_get_eth_qp(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn);
+ err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
@@ -1138,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto tx_err;
}
/* Set default qp number */
@@ -1167,23 +1529,16 @@ int mlx4_en_start_port(struct net_device *dev)
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
- if (mdev->dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_ALL_MULTI);
- }
/* Schedule multicast task to populate multicast list */
- queue_work(mdev->workqueue, &priv->mcast_task);
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
priv->port_up = true;
netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+
return 0;
tx_err:
@@ -1195,7 +1550,7 @@ tx_err:
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -1206,11 +1561,12 @@ cq_err:
}
-void mlx4_en_stop_port(struct net_device *dev)
+void mlx4_en_stop_port(struct net_device *dev, int detach)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
+ struct ethtool_flow_id *flow, *tmp_flow;
int i;
u8 mc_list[16] = {0};
@@ -1221,12 +1577,42 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
+ if (detach)
+ netif_device_detach(dev);
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
+ netif_tx_disable(dev);
+
/* Set port as not active */
priv->port_up = false;
+ /* Promsicuous mode */
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
+ MLX4_EN_FLAG_MC_PROMISC);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ }
+
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1263,8 +1649,20 @@ void mlx4_en_stop_port(struct net_device *dev)
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
+ mlx4_en_put_qp(priv);
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ mdev->mac_removed[priv->port] = 1;
+
+ /* Remove flow steering rules for the port*/
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ASSERT_RTNL();
+ list_for_each_entry_safe(flow, tmp_flow,
+ &priv->ethtool_list, list) {
+ mlx4_flow_detach(mdev->dev, flow->id);
+ list_del(&flow->list);
+ }
+ }
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -1284,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work)
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- int i;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
- mlx4_en_stop_port(dev);
- for (i = 0; i < priv->tx_ring_num; i++)
- netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
+ mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
@@ -1362,7 +1757,7 @@ static int mlx4_en_close(struct net_device *dev)
mutex_lock(&mdev->state_lock);
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock);
@@ -1437,9 +1832,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
if (!priv->dev->rx_cpu_rmap)
goto err;
-
- INIT_LIST_HEAD(&priv->filters);
- spin_lock_init(&priv->filters_lock);
#endif
return 0;
@@ -1503,7 +1895,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
@@ -1527,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev,
priv->ctrl_flags &=
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ mlx4_en_update_loopback_state(netdev, features);
+
return 0;
}
+static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses, allow only
+ * permanent addresses if ndm_state is given
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Add FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+static int mlx4_en_fdb_del(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Del FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int mlx4_en_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev, int idx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+
+ if (mlx4_is_mfunc(mdev))
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
- .ndo_set_rx_mode = mlx4_en_set_multicast,
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
@@ -1552,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_fdb_add = mlx4_en_fdb_add,
+ .ndo_fdb_del = mlx4_en_fdb_del,
+ .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1604,10 +2074,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
priv->rx_ring_num = prof->rx_ring_num;
+ priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
@@ -1617,22 +2088,35 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
#endif
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
+ INIT_HLIST_HEAD(&priv->mac_hash[i]);
+
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
- priv->mac = mdev->dev->caps.def_mac[priv->port];
- if (ILLEGAL_MAC(priv->mac)) {
- en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
- priv->port, priv->mac);
+
+ /* Set default MAC */
+ dev->addr_len = ETH_ALEN;
+ mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
}
+ memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
+#ifdef CONFIG_RFS_ACCEL
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@ -1652,13 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
- /* Set defualt MAC */
- dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- }
-
/*
* Set driver features
*/
@@ -1678,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_STEERING_MODE_DEVICE_MANAGED)
dev->hw_features |= NETIF_F_NTUPLE;
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -1691,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f76c9671f362..ce38654bbdd0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -563,9 +563,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
- struct ethhdr *ethh;
- dma_addr_t dma;
- u64 s_mac;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return 0;
@@ -574,7 +572,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
- cqe = &cq->buf[index];
+ cqe = &cq->buf[(index << factor) + factor];
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -602,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
- /* Get pointer to first fragment since we haven't skb yet and
- * cast it to ethhdr struct */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
- DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
- s_mac = mlx4_en_mac_to_u64(ethh->h_source);
-
- /* If source MAC is equal to our own MAC and not performing
- * the selftest or flb disabled - drop the packet */
- if (s_mac == priv->mac &&
- !((dev->features & NETIF_F_LOOPBACK) ||
- priv->validate_loopback))
- goto next;
+ /* Check if we need to drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
+ struct ethhdr *ethh;
+ dma_addr_t dma;
+ /* Get pointer to first fragment since we haven't
+ * skb yet and cast it to ethhdr struct
+ */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
+
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ /* Drop the packet, since HW loopback-ed it */
+ mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ethh->h_source)) {
+ rcu_read_unlock();
+ goto next;
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
/*
* Packet is OK - process it.
@@ -709,7 +727,7 @@ next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
- cqe = &cq->buf[index];
+ cqe = &cq->buf[(index << factor) + factor];
if (++polled == budget)
goto out;
}
@@ -834,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof *context , GFP_KERNEL);
- if (!context) {
- en_err(priv, "Failed to allocate qp context\n");
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- }
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index bf2e5d3f177c..3488c6d9e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1f571d009155..49308cc65ee7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
+ netdev_tx_reset_queue(ring->tx_queue);
+
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -315,12 +317,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
/* Process all completed CQEs */
@@ -349,7 +352,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
++cons_index;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
}
@@ -514,10 +517,6 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
- tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- (!!vlan_tx_tag_present(skb));
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -591,7 +590,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
priv->port_stats.queue_stopped++;
- return NETDEV_TX_BUSY;
+ /* If queue was emptied after the if, and before the
+ * stop_queue - need to wake the queue, or else it will remain
+ * stopped forever.
+ * Need a memory barrier to make sure ring->cons was not
+ * updated before queue was stopped.
+ */
+ wmb();
+
+ if (unlikely(((int)(ring->prod - ring->cons)) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* Track current inflight packets for performance analysis */
@@ -629,10 +642,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
- /* Copy dst mac address to wqe */
- ethh = (struct ethhdr *)skb->data;
- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ * so that VFs and PF can communicate with each other
+ */
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c48cf6f6529c..251ae2f93116 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -101,15 +101,21 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
mb();
}
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
{
- unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
- return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+ /* (entry & (eq->nent - 1)) gives us a cyclic array */
+ unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
+ /* CX3 is capable of extending the EQE from 32 to 64 bytes.
+ * When this feature is enabled, the first (in the lower addresses)
+ * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
+ * contain the legacy EQE information.
+ */
+ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
{
- struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
@@ -177,7 +183,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
wmb();
@@ -401,6 +407,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int i;
int err;
+ unsigned long flags;
mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
@@ -412,10 +419,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
slave_state[i].is_slave_going_down = 0;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*notify the FW:*/
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
@@ -440,8 +447,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
u8 update_slave_state;
int i;
enum slave_port_gen_event gen_event;
+ unsigned long flags;
- while ((eqe = next_eqe_sw(eq))) {
+ while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -647,13 +655,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
} else
update_slave_state = 1;
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (update_slave_state) {
priv->mfunc.master.slave_state[flr_slave].active = false;
priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
@@ -864,7 +872,8 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
- npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -966,8 +975,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err;
- int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
int i;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 4f30b99324cf..38b62c78d5da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -110,6 +110,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[42] = "Multicast VEP steering support",
[48] = "Counters support",
[59] = "Port management change event support",
+ [61] = "64 byte EQE support",
+ [62] = "64 byte CQE support",
};
int i;
@@ -125,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support"
+ [3] = "Device manage flow steering support",
+ [4] = "Automatic mac reassignment support"
};
int i;
@@ -235,7 +238,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- size = 0; /* no PF behaviour is set for now */
+ size = dev->caps.function_caps; /* set PF behaviours */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
field = 0; /* protected FMR support not available as yet */
@@ -476,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -635,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
+ if (field & 1<<6)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1237,6 +1244,24 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1267,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* Enable Ethernet flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
@@ -1318,7 +1343,9 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
+ u32 dword_field;
int err;
+ u8 byte_field;
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
@@ -1351,10 +1378,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+ MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
+ if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
+ param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ } else {
+ MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
+ if (byte_field & 0x8)
+ param->steering_mode = MLX4_STEERING_MODE_B0;
+ else
+ param->steering_mode = MLX4_STEERING_MODE_A0;
+ }
/* steering attributes */
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
-
+ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
@@ -1370,6 +1405,13 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
}
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
+ if (byte_field & 0x20) /* 64-bytes eqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
+ if (byte_field & 0x40) /* 64-bytes cqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 85abe9c11a22..3af33ff669cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -171,7 +171,8 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
- u8 fs_hash_enable_bits;
+ u8 steering_mode; /* for QUERY_HCA */
+ u64 dev_cap_enabled;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 200cc0ec8052..b9dde139dac5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -85,18 +85,24 @@ static int probe_vf;
module_param(probe_vf, int, 0644);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
-int mlx4_log_num_mgm_entry_size = 10;
+int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
- " 10 gives 248.range: 9<="
+ " 10 gives 248.range: 7 <="
" log_num_mgm_entry_size <= 12."
- " Not in use with device managed"
- " flow steering");
+ " To activate device managed"
+ " flow steering when available, set to -1");
+
+static bool enable_64b_cqe_eqe;
+module_param(enable_64b_cqe_eqe, bool, 0444);
+MODULE_PARM_DESC(enable_64b_cqe_eqe,
+ "Enable 64 byte CQEs/EQEs when the the FW supports this");
#define HCA_GLOBAL_CAP_MASK 0
-#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
+#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
@@ -275,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
- if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
- dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
- dev->caps.fs_log_max_ucast_qp_range_size =
- dev_cap->fs_log_max_ucast_qp_range_size;
- } else {
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
- } else {
- dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
- }
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
- }
- mlx4_dbg(dev, "Steering mode is: %s\n",
- mlx4_steering_mode_str(dev->caps.steering_mode));
-
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -386,6 +370,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
+
+ if (!enable_64b_cqe_eqe) {
+ if (dev_cap->flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
+ mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
+ }
+ }
+
+ if ((dev->caps.flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
+ mlx4_is_master(dev))
+ dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
+
return 0;
}
/*The function checks if there are live vf, return the num of them*/
@@ -472,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
}
EXPORT_SYMBOL(mlx4_is_slave_active);
+static void slave_adjust_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *hca_param)
+{
+ dev->caps.steering_mode = hca_param->steering_mode;
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else
+ dev->caps.num_qp_per_mgm =
+ 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
+
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+}
+
static int mlx4_slave_cap(struct mlx4_dev *dev)
{
int err;
@@ -599,6 +615,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
goto err_mem;
}
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
+ slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+
return 0;
err_mem:
@@ -1285,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
}
}
+static int choose_log_fs_mgm_entry_size(int qp_per_entry)
+{
+ int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
+
+ for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
+ i++) {
+ if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
+ break;
+ }
+
+ return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
+}
+
+static void choose_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (mlx4_log_num_mgm_entry_size == -1 &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
+ (!mlx4_is_mfunc(dev) ||
+ (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
+ dev->oper_log_mgm_entry_size =
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->oper_log_mgm_entry_size =
+ mlx4_log_num_mgm_entry_size > 0 ?
+ mlx4_log_num_mgm_entry_size :
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
+ "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode),
+ dev->oper_log_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size);
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1324,25 +1410,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ choose_steering_mode(dev, &dev_cap);
+
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
- priv->fs_hash_mode = MLX4_FS_L2_HASH;
-
- switch (priv->fs_hash_mode) {
- case MLX4_FS_L2_HASH:
- init_hca.fs_hash_enable_bits = 0;
- break;
-
- case MLX4_FS_L2_L3_L4_HASH:
- /* Enable flow steering with
- * udp unicast and tcp unicast
- */
- init_hca.fs_hash_enable_bits =
- MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
- break;
- }
-
profile = default_profile;
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -1702,15 +1774,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- /* In multifunction mode each function gets 2 msi-X vectors
- * one for data path completions anf the other for asynch events
- * or command completions */
- if (mlx4_is_mfunc(dev)) {
- nreq = 2;
- } else {
- nreq = min_t(int, dev->caps.num_eqs -
- dev->caps.reserved_eqs, nreq);
- }
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -1768,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
- INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn =
- dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
- (port - 1) * (1 << log_num_mac);
+ info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
@@ -1989,10 +2051,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(&pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_release_regions;
}
@@ -2081,7 +2141,8 @@ slave_start:
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
else {
dev->num_slaves = 0;
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init slave mfunc"
" interface, aborting.\n");
goto err_cmd;
@@ -2105,7 +2166,8 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
@@ -2122,6 +2184,7 @@ slave_start:
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
+ err = -ENOSYS;
mlx4_err(dev, "INTx is not supported in multi-function mode."
" aborting.\n");
goto err_free_eq;
@@ -2416,6 +2479,17 @@ static int __init mlx4_verify_params(void)
port_type_array[0] = true;
}
+ if (mlx4_log_num_mgm_entry_size != -1 &&
+ (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
+ mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
+ pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
+ "in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index e151c21baf2b..52685524708d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,12 +54,7 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED)
- return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
- else
- return min((1 << mlx4_log_num_mgm_entry_size),
- MLX4_MAX_MGM_ENTRY_SIZE);
+ return 1 << dev->oper_log_mgm_entry_size;
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -669,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw);
- hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
@@ -1162,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
.priority = MLX4_DOMAIN_NIC,
};
- rule.allow_loopback = ~block_mcast_loopback;
+ rule.allow_loopback = !block_mcast_loopback;
rule.port = port;
rule.qpn = qp->qpn;
INIT_LIST_HEAD(&rule.list);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1cf42036d7bb..ed4a6959e828 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,11 +60,6 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17)
-enum {
- MLX4_FS_L2_HASH = 0,
- MLX4_FS_L2_L3_L4_HASH,
-};
-
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -94,8 +89,10 @@ enum {
};
enum {
- MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
- MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
+ MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
MLX4_MTT_ENTRY_PER_SEG = 8,
};
@@ -656,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
-struct mlx4_mac_entry {
- u64 mac;
- u64 reg_id;
-};
-
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -670,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16];
struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table;
- struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
int base_qpn;
};
@@ -694,9 +685,12 @@ struct mlx4_steer {
struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl;
- __be32 vf_vep_port;
+ u8 rsvd1;
+ u8 funcid;
+ u8 vep;
+ u8 port;
__be32 qpn;
- __be32 reserved;
+ __be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 334ec483480b..c313d7e943a9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -198,7 +198,6 @@ enum cq_type {
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
-#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
@@ -427,10 +426,26 @@ struct mlx4_en_frag_info {
#endif
struct ethtool_flow_id {
+ struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
u64 id;
};
+enum {
+ MLX4_EN_FLAG_PROMISC = (1 << 0),
+ MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
+ /* whether we need to enable hardware loopback by putting dmac
+ * in Tx WQE
+ */
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
+ /* whether we need to drop packets that hardware loopback-ed */
+ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+};
+
+#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
+#define MLX4_EN_MAC_HASH_IDX 5
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -441,6 +456,8 @@ struct mlx4_en_priv {
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
+ /* To allow rules removal while port is going down */
+ struct list_head ethtool_list;
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -469,16 +486,15 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
- u64 mac;
+ unsigned char prev_mac[ETH_ALEN + 2];
int mac_index;
unsigned max_mtu;
int base_qpn;
+ int cqe_factor;
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
-#define MLX4_EN_FLAG_PROMISC 0x1
-#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
@@ -492,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
- struct work_struct mcast_task;
+ struct work_struct rx_mode_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
@@ -509,6 +525,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
int base_tx_qpn;
+ struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -528,14 +545,24 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62),
};
+struct mlx4_mac_entry {
+ struct hlist_node hlist;
+ unsigned char mac[ETH_ALEN + 2];
+ u64 reg_id;
+ struct rcu_head rcu;
+};
+
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features);
+
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev);
-void mlx4_en_stop_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4c51b05efa28..719ead15e491 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u64 *reg_id)
-{
- __be64 be_mac;
- int err;
-
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
-
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
-
- qp.qpn = *qpn;
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- struct mlx4_spec_list spec_eth = { {NULL} };
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- rule.port = port;
- rule.qpn = *qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
- memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
- list_add_tail(&spec_eth.list, &rule.list);
-
- err = mlx4_flow_attach(dev, &rule, reg_id);
- break;
- }
- default:
- return -EINVAL;
- }
- if (err)
- mlx4_warn(dev, "Failed Attaching Unicast\n");
-
- return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u64 reg_id)
-{
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
-
- qp.qpn = qpn;
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- mlx4_flow_detach(dev, reg_id);
- break;
- }
- default:
- mlx4_err(dev, "Invalid steering mode.\n");
- }
-}
-
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL;
}
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
- int index = 0;
- int err = 0;
- u64 reg_id;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
- (unsigned long long) mac);
- index = mlx4_register_mac(dev, port, mac);
- if (index < 0) {
- err = index;
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
- (unsigned long long) mac);
- return err;
- }
-
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- *qpn = info->base_qpn + index;
- return 0;
- }
-
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
- }
- entry->mac = mac;
- entry->reg_id = reg_id;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err)
- goto insert_err;
- return 0;
-
-insert_err:
- kfree(entry);
-
-alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, port, mac);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
-
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
- (unsigned long long) mac);
- mlx4_unregister_mac(dev, port, mac);
-
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
- " qpn %d\n", port,
- (unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_qp_release_range(dev, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- kfree(entry);
- }
- }
-}
-EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
-
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
+{
+ return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << dev->caps.log_num_macs);
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (!entry)
- return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_unregister_mac(dev, port, entry->mac);
- entry->mac = new_mac;
- entry->reg_id = 0;
- mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac,
- &qpn, &entry->reg_id);
- return err;
- }
-
/* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
@@ -439,7 +262,7 @@ out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b05705f50f0f..5997adc943d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
+ port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */
@@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err;
+ int qpn;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
@@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, NULL);
+ if (err) {
+ pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ return err;
+ }
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
- if (validate_eth_header_mac(slave, rule_header, rlist))
- return -EINVAL;
+ if (validate_eth_header_mac(slave, rule_header, rlist)) {
+ err = -EINVAL;
+ goto err_put;
+ }
break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
@@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
- if (add_eth_header(dev, slave, inbox, rlist, header_id))
- return -EINVAL;
+ if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
+ err = -EINVAL;
+ goto err_put;
+ }
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
pr_err("Corrupted mailbox.\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put;
}
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- return err;
+ goto err_put;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources.\n ");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
- MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
+err_put:
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b71eb39ab448..fbcb9e74d7fc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1080,7 +1080,6 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, mac, netdev->addr_len);
ks8842_write_mac_addr(adapter, mac);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 286816a4e783..33bcb63d56a2 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -69,7 +69,6 @@ union ks8851_tx_hdr {
* @mii: The MII state information for the mii calls.
* @rxctrl: RX settings for @rxctrl_work.
* @tx_work: Work queue for tx packets
- * @irq_work: Work queue for servicing interrupts
* @rxctrl_work: Work queue for updating RX mode and multicast lists
* @txq: Queue of packets for transmission.
* @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
@@ -121,7 +120,6 @@ struct ks8851_net {
struct ks8851_rxctrl rxctrl;
struct work_struct tx_work;
- struct work_struct irq_work;
struct work_struct rxctrl_work;
struct sk_buff_head txq;
@@ -444,23 +442,6 @@ static void ks8851_init_mac(struct ks8851_net *ks)
}
/**
- * ks8851_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ handler.
- * @pw: The private word passed to register_irq(), our struct ks8851_net.
- *
- * Disable the interrupt from happening again until we've processed the
- * current status by scheduling ks8851_irq_work().
- */
-static irqreturn_t ks8851_irq(int irq, void *pw)
-{
- struct ks8851_net *ks = pw;
-
- disable_irq_nosync(irq);
- schedule_work(&ks->irq_work);
- return IRQ_HANDLED;
-}
-
-/**
* ks8851_rdfifo - read data from the receive fifo
* @ks: The device state.
* @buff: The buffer address
@@ -595,19 +576,20 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
/**
- * ks8851_irq_work - work queue handler for dealing with interrupt requests
- * @work: The work structure that was scheduled by schedule_work()
+ * ks8851_irq - IRQ handler for dealing with interrupt requests
+ * @irq: IRQ number
+ * @_ks: cookie
*
- * This is the handler invoked when the ks8851_irq() is called to find out
- * what happened, as we cannot allow ourselves to sleep whilst waiting for
- * anything other process has the chip's lock.
+ * This handler is invoked when the IRQ line asserts to find out what happened.
+ * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs
+ * in thread context.
*
* Read the interrupt status, work out what needs to be done and then clear
* any of the interrupts that are not needed.
*/
-static void ks8851_irq_work(struct work_struct *work)
+static irqreturn_t ks8851_irq(int irq, void *_ks)
{
- struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+ struct ks8851_net *ks = _ks;
unsigned status;
unsigned handled = 0;
@@ -688,7 +670,7 @@ static void ks8851_irq_work(struct work_struct *work)
if (status & IRQ_TXI)
netif_wake_queue(ks->netdev);
- enable_irq(ks->netdev->irq);
+ return IRQ_HANDLED;
}
/**
@@ -896,7 +878,6 @@ static int ks8851_net_stop(struct net_device *dev)
mutex_unlock(&ks->lock);
/* stop any outstanding work */
- flush_work(&ks->irq_work);
flush_work(&ks->tx_work);
flush_work(&ks->rxctrl_work);
@@ -1052,7 +1033,6 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return ks8851_write_mac_addr(dev);
}
@@ -1438,7 +1418,6 @@ static int ks8851_probe(struct spi_device *spi)
spin_lock_init(&ks->statelock);
INIT_WORK(&ks->tx_work, ks8851_tx_work);
- INIT_WORK(&ks->irq_work, ks8851_irq_work);
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
/* initialise pre-made spi transfer messages */
@@ -1505,8 +1484,9 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
- ndev->name, ks);
+ ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ ndev->name, ks);
if (ret < 0) {
dev_err(&spi->dev, "failed to get irq\n");
goto err_irq;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ef8f9f92e547..a343066f7b43 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1237,7 +1237,6 @@ static int ks_set_mac_address(struct net_device *netdev, void *paddr)
struct sockaddr *addr = paddr;
u8 *da;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
da = (u8 *)netdev->dev_addr;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 83f0ea929d3d..8ebc352bcbe6 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4761,7 +4761,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
struct ksz_dma_buf *dma_buf;
struct net_device *dev = NULL;
- spin_lock(&hw_priv->hwlock);
+ spin_lock_irq(&hw_priv->hwlock);
last = info->last;
while (info->avail < info->alloc) {
@@ -4795,7 +4795,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
info->avail++;
}
info->last = last;
- spin_unlock(&hw_priv->hwlock);
+ spin_unlock_irq(&hw_priv->hwlock);
/* Notify the network subsystem that the packet has been sent. */
if (dev)
@@ -5259,11 +5259,15 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
+ spin_lock(&hw_priv->hwlock);
+
hw_read_intr(hw, &int_enable);
/* Not our interrupt! */
- if (!int_enable)
+ if (!int_enable) {
+ spin_unlock(&hw_priv->hwlock);
return IRQ_NONE;
+ }
do {
hw_ack_intr(hw, int_enable);
@@ -5310,6 +5314,8 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
hw_ena_intr(hw);
+ spin_unlock(&hw_priv->hwlock);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 8163fd0f453f..afaf0c07f37f 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_MICROCHIP
bool "Microchip devices"
default y
- depends on SPI && EXPERIMENTAL
+ depends on SPI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,7 +20,7 @@ if NET_VENDOR_MICROCHIP
config ENC28J60
tristate "ENC28J60 support"
- depends on SPI && EXPERIMENTAL
+ depends on SPI
select CRC32
---help---
Support for the Microchip EN28J60 ethernet chip.
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index a99456c3dd87..5d98a9f7bfc7 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -527,7 +527,6 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
return enc28j60_set_hw_macaddr(dev);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index f8408d6e961c..4f9937e026e5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -664,10 +664,9 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
- if (hdr == NULL) {
- dev_err(dev, "could not malloc firmware hdr\n");
+ if (hdr == NULL)
return -ENOMEM;
- }
+
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index f157334579fd..a100860d45e6 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,9 +5,6 @@
config NET_VENDOR_NATSEMI
bool "National Semi-conductor devices"
default y
- depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
- ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \
- PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
deleted file mode 100644
index 923e640d604c..000000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-/*
-net-3-driver for the IBM LAN Adapter/A
-
-This is an extension to the Linux operating system, and is covered by the
-same GNU General Public License that covers that work.
-
-Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
- alfred.arnold@lancom.de)
-
-This driver is based both on the SK_MCA driver, which is itself based on the
-SK_G16 and 3C523 driver.
-
-paper sources:
- 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
- Hans-Peter Messmer for the basic Microchannel stuff
-
- 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
- for help on Ethernet driver programming
-
- 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National
- Semiconductor for info on the MAC chip
-
- 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
- Document Number SC30-3661-00' by IBM for info on the adapter itself
-
- Also see http://www.national.com/analog
-
-special acknowledgements to:
- - Bob Eager for helping me out with documentation from IBM
- - Jim Shorney for his endless patience with me while I was using
- him as a beta tester to trace down the address filter bug ;-)
-
- Missing things:
-
- -> set debug level via ioctl instead of compile-time switches
- -> I didn't follow the development of the 2.1.x kernels, so my
- assumptions about which things changed with which kernel version
- are probably nonsense
-
-History:
- Nov 6th, 1999
- startup from SK_MCA driver
- Dec 6th, 1999
- finally got docs about the card. A big thank you to Bob Eager!
- Dec 12th, 1999
- first packet received
- Dec 13th, 1999
- recv queue done, tcpdump works
- Dec 15th, 1999
- transmission part works
- Dec 28th, 1999
- added usage of the isa_functions for Linux 2.3 . Things should
- still work with 2.0.x....
- Jan 28th, 2000
- in Linux 2.2.13, the version.h file mysteriously didn't get
- included. Added a workaround for this. Furthermore, it now
- not only compiles as a modules ;-)
- Jan 30th, 2000
- newer kernels automatically probe more than one board, so the
- 'startslot' as a variable is also needed here
- Apr 12th, 2000
- the interrupt mask register is not set 'hard' instead of individually
- setting registers, since this seems to set bits that shouldn't be
- set
- May 21st, 2000
- reset interrupt status immediately after CAM load
- add a recovery delay after releasing the chip's reset line
- May 24th, 2000
- finally found the bug in the address filter setup - damned signed
- chars!
- June 1st, 2000
- corrected version codes, added support for the latest 2.3 changes
- Oct 28th, 2002
- cleaned up for the 2.5 tree <alan@lxorguk.ukuu.org.uk>
-
- *************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/mca.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-
-#define _IBM_LANA_DRIVER_
-#include "ibmlana.h"
-
-#undef DEBUG
-
-#define DRV_NAME "ibmlana"
-
-/* ------------------------------------------------------------------------
- * global static data - not more since we can handle multiple boards and
- * have to pack all state info into the device struct!
- * ------------------------------------------------------------------------ */
-
-static char *MediaNames[Media_Count] = {
- "10BaseT", "10Base5", "Unknown", "10Base2"
-};
-
-/* ------------------------------------------------------------------------
- * private subfunctions
- * ------------------------------------------------------------------------ */
-
-#ifdef DEBUG
- /* dump all registers */
-
-static void dumpregs(struct net_device *dev)
-{
- int z;
-
- for (z = 0; z < 160; z += 2) {
- if (!(z & 15))
- printk("REGS: %04x:", z);
- printk(" %04x", inw(dev->base_addr + z));
- if ((z & 15) == 14)
- printk("\n");
- }
-}
-
-/* dump parts of shared memory - only needed during debugging */
-
-static void dumpmem(struct net_device *dev, u32 start, u32 len)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int z;
-
- printk("Address %04x:\n", start);
- for (z = 0; z < len; z++) {
- if ((z & 15) == 0)
- printk("%04x:", z);
- printk(" %02x", readb(priv->base + start + z));
- if ((z & 15) == 15)
- printk("\n");
- }
- if ((z & 15) != 0)
- printk("\n");
-}
-
-/* print exact time - ditto */
-
-static void PrTime(void)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
- printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec);
-}
-#endif /* DEBUG */
-
-/* deduce resources out of POS registers */
-
-static void getaddrs(struct mca_device *mdev, int *base, int *memlen,
- int *iobase, int *irq, ibmlana_medium *medium)
-{
- u_char pos0, pos1;
-
- pos0 = mca_device_read_stored_pos(mdev, 2);
- pos1 = mca_device_read_stored_pos(mdev, 3);
-
- *base = 0xc0000 + ((pos1 & 0xf0) << 9);
- *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000;
- *iobase = (pos0 & 0xe0) << 7;
- switch (pos0 & 0x06) {
- case 0:
- *irq = 5;
- break;
- case 2:
- *irq = 15;
- break;
- case 4:
- *irq = 10;
- break;
- case 6:
- *irq = 11;
- break;
- }
- *medium = (pos0 & 0x18) >> 3;
-}
-
-/* wait on register value with mask and timeout */
-
-static int wait_timeout(struct net_device *dev, int regoffs, u16 mask,
- u16 value, int timeout)
-{
- unsigned long fin = jiffies + timeout;
-
- while (time_before(jiffies,fin))
- if ((inw(dev->base_addr + regoffs) & mask) == value)
- return 1;
-
- return 0;
-}
-
-
-/* reset the whole board */
-
-static void ResetBoard(struct net_device *dev)
-{
- unsigned char bcmval;
-
- /* read original board control value */
-
- bcmval = inb(dev->base_addr + BCMREG);
-
- /* set reset bit for a while */
-
- bcmval |= BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
- udelay(10);
- bcmval &= ~BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
-
- /* switch over to RAM again */
-
- bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN;
- outb(bcmval, dev->base_addr + BCMREG);
-}
-
-/* calculate RAM layout & set up descriptors in RAM */
-
-static void InitDscrs(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- u32 addr, baddr, raddr;
- int z;
- tda_t tda;
- rda_t rda;
- rra_t rra;
-
- /* initialize RAM */
-
- memset_io(priv->base, 0xaa,
- dev->mem_start - dev->mem_start); /* XXX: typo? */
-
- /* setup n TX descriptors - independent of RAM size */
-
- priv->tdastart = addr = 0;
- priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT;
- for (z = 0; z < TXBUFCNT; z++) {
- tda.status = 0;
- tda.config = 0;
- tda.length = 0;
- tda.fragcount = 1;
- tda.startlo = baddr;
- tda.starthi = 0;
- tda.fraglength = 0;
- if (z == TXBUFCNT - 1)
- tda.link = priv->tdastart;
- else
- tda.link = addr + sizeof(tda_t);
- tda.link |= 1;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
- addr += sizeof(tda_t);
- baddr += PKTSIZE;
- }
-
- /* calculate how many receive buffers fit into remaining memory */
-
- priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE);
-
- /* calculate receive addresses */
-
- priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
- priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
- priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-
- for (z = 0; z < priv->rxbufcnt; z++) {
- rra.startlo = baddr;
- rra.starthi = 0;
- rra.cntlo = PKTSIZE >> 1;
- rra.cnthi = 0;
- memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t));
-
- rda.status = 0;
- rda.length = 0;
- rda.startlo = 0;
- rda.starthi = 0;
- rda.seqno = 0;
- if (z < priv->rxbufcnt - 1)
- rda.link = addr + sizeof(rda_t);
- else
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + addr, &rda, sizeof(rda_t));
-
- baddr += PKTSIZE;
- raddr += sizeof(rra_t);
- addr += sizeof(rda_t);
- }
-
- /* initialize current pointers */
-
- priv->nextrxdescr = 0;
- priv->lastrxdescr = priv->rxbufcnt - 1;
- priv->nexttxdescr = 0;
- priv->currtxdescr = 0;
- priv->txusedcnt = 0;
- memset(priv->txused, 0, sizeof(priv->txused));
-}
-
-/* set up Rx + Tx descriptors in SONIC */
-
-static int InitSONIC(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* set up start & end of resource area */
-
- outw(0, SONIC_URRA);
- outw(priv->rrastart, dev->base_addr + SONIC_RSA);
- outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA);
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-
- /* set EOBC so that only one packet goes into one buffer */
-
- outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC);
-
- /* let SONIC read the first RRA descriptor */
-
- outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) {
- printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name);
- return 0;
- }
-
- /* point SONIC to the first RDA */
-
- outw(0, dev->base_addr + SONIC_URDA);
- outw(priv->rdastart, dev->base_addr + SONIC_CRDA);
-
- /* set upper half of TDA address */
-
- outw(0, dev->base_addr + SONIC_UTDA);
-
- return 1;
-}
-
-/* stop SONIC so we can reinitialize it */
-
-static void StopSONIC(struct net_device *dev)
-{
- /* disable interrupts */
-
- outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG);
- outb(0, dev->base_addr + SONIC_IMREG);
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
-}
-
-/* initialize card and SONIC for proper operation */
-
-static void putcam(camentry_t * cams, int *camcnt, char *addr)
-{
- camentry_t *pcam = cams + (*camcnt);
- u8 *uaddr = (u8 *) addr;
-
- pcam->index = *camcnt;
- pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0];
- pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2];
- pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4];
- (*camcnt)++;
-}
-
-static void InitBoard(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int camcnt;
- camentry_t cams[16];
- u32 cammask;
- struct netdev_hw_addr *ha;
- u16 rcrval;
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* clear all spurious interrupts */
-
- outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG);
-
- /* set up the SONIC's bus interface - constant for this adapter -
- must be done while the SONIC is in reset */
-
- outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG);
- outw(0, dev->base_addr + SONIC_DCREG2);
-
- /* remove reset form the SONIC */
-
- outw(0, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* data sheet requires URRA to be programmed before setting up the CAM contents */
-
- outw(0, dev->base_addr + SONIC_URRA);
-
- /* program the CAM entry 0 to the device address */
-
- camcnt = 0;
- putcam(cams, &camcnt, dev->dev_addr);
-
- /* start putting the multicast addresses into the CAM list. Stop if
- it is full. */
-
- netdev_for_each_mc_addr(ha, dev) {
- putcam(cams, &camcnt, ha->addr);
- if (camcnt == 16)
- break;
- }
-
- /* calculate CAM mask */
-
- cammask = (1 << camcnt) - 1;
-
- /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */
-
- memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt);
- memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask));
-
-#ifdef DEBUG
- printk("CAM setup:\n");
- dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask));
-#endif
-
- outw(0, dev->base_addr + SONIC_CAMPTR);
- outw(camcnt, dev->base_addr + SONIC_CAMCNT);
- outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) {
- printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name);
- return;
- } else {
- /* clear interrupt condition */
-
- outw(ISREG_LCD, dev->base_addr + SONIC_ISREG);
-
-#ifdef DEBUG
- printk("Loading CAM done, address pointers %04x:%04x\n",
- inw(dev->base_addr + SONIC_URRA),
- inw(dev->base_addr + SONIC_CAMPTR));
- {
- int z;
-
- printk("\n-->CAM: PTR %04x CNT %04x\n",
- inw(dev->base_addr + SONIC_CAMPTR),
- inw(dev->base_addr + SONIC_CAMCNT));
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- for (z = 0; z < camcnt; z++) {
- outw(z, dev->base_addr + SONIC_CAMEPTR);
- printk("Entry %d: %04x %04x %04x\n", z,
- inw(dev->base_addr + SONIC_CAMADDR0),
- inw(dev->base_addr + SONIC_CAMADDR1),
- inw(dev->base_addr + SONIC_CAMADDR2));
- }
- outw(0, dev->base_addr + SONIC_CMDREG);
- }
-#endif
- }
-
- rcrval = RCREG_BRD | RCREG_LB_NONE;
-
- /* if still multicast addresses left or ALLMULTI is set, set the multicast
- enable bit */
-
- if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
- rcrval |= RCREG_AMC;
-
- /* promiscuous mode ? */
-
- if (dev->flags & IFF_PROMISC)
- rcrval |= RCREG_PRO;
-
- /* program receive mode */
-
- outw(rcrval, dev->base_addr + SONIC_RCREG);
-#ifdef DEBUG
- printk("\nRCRVAL: %04x\n", rcrval);
-#endif
-
- /* set up descriptors in shared memory + feed them into SONIC registers */
-
- InitDscrs(dev);
- if (!InitSONIC(dev))
- return;
-
- /* reset all pending interrupts */
-
- outw(0xffff, dev->base_addr + SONIC_ISREG);
-
- /* enable transmitter + receiver interrupts */
-
- outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG);
- outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG);
-
- /* turn on card interrupts */
-
- outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG);
-
-#ifdef DEBUG
- printk("Register dump after initialization:\n");
- dumpregs(dev);
-#endif
-}
-
-/* start transmission of a descriptor */
-
-static void StartTx(struct net_device *dev, int descr)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int addr;
-
- addr = priv->tdastart + (descr * sizeof(tda_t));
-
- /* put descriptor address into SONIC */
-
- outw(addr, dev->base_addr + SONIC_CTDA);
-
- /* trigger transmitter */
-
- priv->currtxdescr = descr;
- outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG);
-}
-
-/* ------------------------------------------------------------------------
- * interrupt handler(s)
- * ------------------------------------------------------------------------ */
-
-/* receive buffer area exhausted */
-
-static void irqrbe_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* point the SONIC back to the RRA start */
-
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-}
-
-/* receive interrupt */
-
-static void irqrx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- rda_t rda;
- u32 rdaaddr, lrdaaddr;
-
- /* loop until ... */
-
- while (1) {
- /* read descriptor that was next to be filled by SONIC */
-
- rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
- lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
- memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
-
- /* iron out upper word halves of fields we use - SONIC will duplicate
- bits 0..15 to 16..31 */
-
- rda.status &= 0xffff;
- rda.length &= 0xffff;
- rda.startlo &= 0xffff;
-
- /* stop if the SONIC still owns it, i.e. there is no data for us */
-
- if (rda.inuse)
- break;
-
- /* good packet? */
-
- else if (rda.status & RCREG_PRX) {
- struct sk_buff *skb;
-
- /* fetch buffer */
-
- skb = netdev_alloc_skb(dev, rda.length + 2);
- if (skb == NULL)
- dev->stats.rx_dropped++;
- else {
- /* copy out data */
-
- memcpy_fromio(skb_put(skb, rda.length),
- priv->base +
- rda.startlo, rda.length);
-
- /* set up skb fields */
-
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
-
- /* bookkeeping */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rda.length;
-
- /* pass to the upper layers */
- netif_rx(skb);
- }
- }
-
- /* otherwise check error status bits and increase statistics */
-
- else {
- dev->stats.rx_errors++;
- if (rda.status & RCREG_FAER)
- dev->stats.rx_frame_errors++;
- if (rda.status & RCREG_CRCR)
- dev->stats.rx_crc_errors++;
- }
-
- /* descriptor processed, will become new last descriptor in queue */
-
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + rdaaddr, &rda,
- sizeof(rda_t));
-
- /* set up link and EOL = 0 in currently last descriptor. Only write
- the link field since the SONIC may currently already access the
- other fields. */
-
- memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);
-
- /* advance indices */
-
- priv->lastrxdescr = priv->nextrxdescr;
- if ((++priv->nextrxdescr) >= priv->rxbufcnt)
- priv->nextrxdescr = 0;
- }
-}
-
-/* transmit interrupt */
-
-static void irqtx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor (we forgot the size ;-) */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tda.length;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-static void irqtxerr_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor to check status */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_errors++;
- if (tda.status & (TCREG_NCRS | TCREG_CRSL))
- dev->stats.tx_carrier_errors++;
- if (tda.status & TCREG_EXC)
- dev->stats.tx_aborted_errors++;
- if (tda.status & TCREG_OWC)
- dev->stats.tx_window_errors++;
- if (tda.status & TCREG_FU)
- dev->stats.tx_fifo_errors++;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-/* general interrupt entry */
-
-static irqreturn_t irq_handler(int dummy, void *device)
-{
- struct net_device *dev = device;
- u16 ival;
-
- /* in case we're not meant... */
- if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND))
- return IRQ_NONE;
-
- /* loop through the interrupt bits until everything is clear */
- while (1) {
- ival = inw(dev->base_addr + SONIC_ISREG);
-
- if (ival & ISREG_RBE) {
- irqrbe_handler(dev);
- outw(ISREG_RBE, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_PKTRX) {
- irqrx_handler(dev);
- outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXDN) {
- irqtx_handler(dev);
- outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXER) {
- irqtxerr_handler(dev);
- outw(ISREG_TXER, dev->base_addr + SONIC_ISREG);
- }
- break;
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------------------
- * driver methods
- * ------------------------------------------------------------------------ */
-
-/* MCA info */
-
-#if 0 /* info available elsewhere, but this is kept for reference */
-static int ibmlana_getinfo(char *buf, int slot, void *d)
-{
- int len = 0, i;
- struct net_device *dev = (struct net_device *) d;
- ibmlana_priv *priv;
-
- /* can't say anything about an uninitialized device... */
-
- if (dev == NULL)
- return len;
- priv = netdev_priv(dev);
-
- /* print info */
-
- len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
- len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr);
- len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1);
- len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
- len += sprintf(buf + len, "Device: %s\n", dev->name);
- len += sprintf(buf + len, "MAC address:");
- for (i = 0; i < 6; i++)
- len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
- buf[len++] = '\n';
- buf[len] = 0;
-
- return len;
-}
-#endif
-
-/* open driver. Means also initialization and start of LANCE */
-
-static int ibmlana_open(struct net_device *dev)
-{
- int result;
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* register resources - only necessary for IRQ */
-
- result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
- dev->name, dev);
- if (result != 0) {
- printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
- return result;
- }
- dev->irq = priv->realirq;
-
- /* set up the card and SONIC */
- InitBoard(dev);
-
- /* initialize operational flags */
- netif_start_queue(dev);
- return 0;
-}
-
-/* close driver. Shut down board and free allocated resources */
-
-static int ibmlana_close(struct net_device *dev)
-{
- /* turn off board */
-
- /* release resources */
- if (dev->irq != 0)
- free_irq(dev->irq, dev);
- dev->irq = 0;
- return 0;
-}
-
-/* transmit a block. */
-
-static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int tmplen, addr;
- unsigned long flags;
- tda_t tda;
- int baddr;
-
- /* find out if there are free slots for a frame to transmit. If not,
- the upper layer is in deep desperation and we simply ignore the frame. */
-
- if (priv->txusedcnt >= TXBUFCNT) {
- dev->stats.tx_dropped++;
- goto tx_done;
- }
-
- /* copy the frame data into the next free transmit buffer - fillup missing */
- tmplen = skb->len;
- if (tmplen < 60)
- tmplen = 60;
- baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
- memcpy_toio(priv->base + baddr, skb->data, skb->len);
-
- /* copy filler into RAM - in case we're filling up...
- we're filling a bit more than necessary, but that doesn't harm
- since the buffer is far larger...
- Sorry Linus for the filler string but I couldn't resist ;-) */
-
- if (tmplen > skb->len) {
- char *fill = "NetBSD is a nice OS too! ";
- unsigned int destoffs = skb->len, l = strlen(fill);
-
- while (destoffs < tmplen) {
- memcpy_toio(priv->base + baddr + destoffs, fill, l);
- destoffs += l;
- }
- }
-
- /* set up the new frame descriptor */
- addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
- memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
- tda.length = tda.fraglength = tmplen;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
-
- /* if there were no active descriptors, trigger the SONIC */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->txusedcnt++;
- priv->txused[priv->nexttxdescr] = 1;
-
- /* are all transmission slots used up ? */
- if (priv->txusedcnt >= TXBUFCNT)
- netif_stop_queue(dev);
-
- if (priv->txusedcnt == 1)
- StartTx(dev, priv->nexttxdescr);
- priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-tx_done:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* switch receiver mode. */
-
-static void ibmlana_set_multicast_list(struct net_device *dev)
-{
- /* first stop the SONIC... */
- StopSONIC(dev);
- /* ...then reinit it with the new flags */
- InitBoard(dev);
-}
-
-/* ------------------------------------------------------------------------
- * hardware check
- * ------------------------------------------------------------------------ */
-
-static int ibmlana_irq;
-static int ibmlana_io;
-static int startslot; /* counts through slots when probing multiple devices */
-
-static short ibmlana_adapter_ids[] __initdata = {
- IBM_LANA_ID,
- 0x0000
-};
-
-static char *ibmlana_adapter_names[] = {
- "IBM LAN Adapter/A",
- NULL
-};
-
-
-static const struct net_device_ops ibmlana_netdev_ops = {
- .ndo_open = ibmlana_open,
- .ndo_stop = ibmlana_close,
- .ndo_start_xmit = ibmlana_tx,
- .ndo_set_rx_mode = ibmlana_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int ibmlana_init_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev;
- int slot = mdev->slot, z, rc;
- int base = 0, irq = 0, iobase = 0, memlen = 0;
- ibmlana_priv *priv;
- ibmlana_medium medium;
-
- dev = alloc_etherdev(sizeof(ibmlana_priv));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = ibmlana_irq;
- dev->base_addr = ibmlana_io;
-
- base = dev->mem_start;
- irq = dev->irq;
-
- /* deduce card addresses */
- getaddrs(mdev, &base, &memlen, &iobase, &irq, &medium);
-
- /* were we looking for something different ? */
- if (dev->irq && dev->irq != irq) {
- rc = -ENODEV;
- goto err_out;
- }
- if (dev->mem_start && dev->mem_start != base) {
- rc = -ENODEV;
- goto err_out;
- }
-
- /* announce success */
- printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1);
-
- /* try to obtain I/O range */
- if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) {
- printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out;
- }
-
- priv = netdev_priv(dev);
- priv->slot = slot;
- priv->realirq = mca_device_transform_irq(mdev, irq);
- priv->medium = medium;
- spin_lock_init(&priv->lock);
-
- /* set base + irq for this device (irq not allocated so far) */
-
- dev->irq = 0;
- dev->mem_start = base;
- dev->mem_end = base + memlen;
- dev->base_addr = iobase;
-
- priv->base = ioremap(base, memlen);
- if (!priv->base) {
- printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out_reg;
- }
-
- mca_device_set_name(mdev, ibmlana_adapter_names[mdev->index]);
- mca_device_set_claim(mdev, 1);
-
- /* set methods */
- dev->netdev_ops = &ibmlana_netdev_ops;
- dev->flags |= IFF_MULTICAST;
-
- /* copy out MAC address */
-
- for (z = 0; z < ETH_ALEN; z++)
- dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
-
- /* print config */
-
- printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
- "MAC address %pM.\n",
- dev->name, priv->realirq, dev->base_addr,
- dev->mem_start, dev->mem_end - 1,
- dev->dev_addr);
- printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
-
- /* reset board */
-
- ResetBoard(dev);
-
- /* next probe will start at next slot */
-
- startslot = slot + 1;
-
- rc = register_netdev(dev);
- if (rc)
- goto err_out_claimed;
-
- dev_set_drvdata(kdev, dev);
- return 0;
-
-err_out_claimed:
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
-err_out_reg:
- release_region(iobase, IBM_LANA_IORANGE);
-err_out:
- free_netdev(dev);
- return rc;
-}
-
-static int ibmlana_remove_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev = dev_get_drvdata(kdev);
- ibmlana_priv *priv = netdev_priv(dev);
-
- unregister_netdev(dev);
- /*DeinitBoard(dev); */
- release_region(dev->base_addr, IBM_LANA_IORANGE);
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
- free_netdev(dev);
- return 0;
-}
-
-/* ------------------------------------------------------------------------
- * modularization support
- * ------------------------------------------------------------------------ */
-
-module_param_named(irq, ibmlana_irq, int, 0);
-module_param_named(io, ibmlana_io, int, 0);
-MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
-MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
-MODULE_LICENSE("GPL");
-
-static struct mca_driver ibmlana_driver = {
- .id_table = ibmlana_adapter_ids,
- .driver = {
- .name = "ibmlana",
- .bus = &mca_bus_type,
- .probe = ibmlana_init_one,
- .remove = ibmlana_remove_one,
- },
-};
-
-static int __init ibmlana_init_module(void)
-{
- return mca_register_driver(&ibmlana_driver);
-}
-
-static void __exit ibmlana_cleanup_module(void)
-{
- mca_unregister_driver(&ibmlana_driver);
-}
-
-module_init(ibmlana_init_module);
-module_exit(ibmlana_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h
deleted file mode 100644
index accd5efc9c8a..000000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.h
+++ /dev/null
@@ -1,278 +0,0 @@
-#ifndef _IBM_LANA_INCLUDE_
-#define _IBM_LANA_INCLUDE_
-
-#ifdef _IBM_LANA_DRIVER_
-
-/* maximum packet size */
-
-#define PKTSIZE 1524
-
-/* number of transmit buffers */
-
-#define TXBUFCNT 4
-
-/* Adapter ID's */
-#define IBM_LANA_ID 0xffe0
-
-/* media enumeration - defined in a way that it fits onto the LAN/A's
- POS registers... */
-
-typedef enum {
- Media_10BaseT, Media_10Base5,
- Media_Unknown, Media_10Base2, Media_Count
-} ibmlana_medium;
-
-/* private structure */
-
-typedef struct {
- unsigned int slot; /* MCA-Slot-# */
- int realirq; /* memorizes actual IRQ, even when
- currently not allocated */
- ibmlana_medium medium; /* physical cannector */
- u32 tdastart, txbufstart, /* addresses */
- rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt;
- int nextrxdescr, /* next rx descriptor to be used */
- lastrxdescr, /* last free rx descriptor */
- nexttxdescr, /* last tx descriptor to be used */
- currtxdescr, /* tx descriptor currently tx'ed */
- txused[TXBUFCNT]; /* busy flags */
- void __iomem *base;
- spinlock_t lock;
-} ibmlana_priv;
-
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
- a full 64K I/O range... */
-
-#define IBM_LANA_IORANGE 0xa0
-
-/* Command Register: */
-
-#define SONIC_CMDREG 0x00
-#define CMDREG_HTX 0x0001 /* halt transmission */
-#define CMDREG_TXP 0x0002 /* start transmission */
-#define CMDREG_RXDIS 0x0004 /* disable receiver */
-#define CMDREG_RXEN 0x0008 /* enable receiver */
-#define CMDREG_STP 0x0010 /* stop timer */
-#define CMDREG_ST 0x0020 /* start timer */
-#define CMDREG_RST 0x0080 /* software reset */
-#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */
-#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */
-
-/* Data Configuration Register */
-
-#define SONIC_DCREG 0x02
-#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */
-#define DCREG_LBR 0x2000 /* Latched Bus Retry */
-#define DCREG_PO1 0x1000 /* Programmable Outputs */
-#define DCREG_PO0 0x0800
-#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */
-#define DCREG_USR1 0x0200 /* User Definable Pins */
-#define DCREG_USR0 0x0100
-#define DCREG_WC0 0x0000 /* 0..3 Wait States */
-#define DCREG_WC1 0x0040
-#define DCREG_WC2 0x0080
-#define DCREG_WC3 0x00c0
-#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */
-#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */
-#define DCREG_BMS 0x0010 /* Block Mode Select */
-#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */
-#define DCREG_RFT8 0x0004
-#define DCREG_RFT16 0x0008
-#define DCREG_RFT24 0x000c
-#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */
-#define DCREG_TFT16 0x0001
-#define DCREG_TFT24 0x0002
-#define DCREG_TFT28 0x0003
-
-/* Receive Control Register */
-
-#define SONIC_RCREG 0x04
-#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
-#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
-#define RCREG_BRD 0x2000 /* accept broadcasts */
-#define RCREG_PRO 0x1000 /* promiscuous mode */
-#define RCREG_AMC 0x0800 /* accept all multicasts */
-#define RCREG_LB_NONE 0x0000 /* no loopback */
-#define RCREG_LB_MAC 0x0200 /* MAC loopback */
-#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */
-#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */
-#define RCREG_MC 0x0100 /* Multicast received */
-#define RCREG_BC 0x0080 /* Broadcast received */
-#define RCREG_LPKT 0x0040 /* last packet in RBA */
-#define RCREG_CRS 0x0020 /* carrier sense present */
-#define RCREG_COL 0x0010 /* recv'd packet with collision */
-#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */
-#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */
-#define RCREG_LBK 0x0002 /* recv'd loopback packet */
-#define RCREG_PRX 0x0001 /* recv'd packet is OK */
-
-/* Transmit Control Register */
-
-#define SONIC_TCREG 0x06
-#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */
-#define TCREG_POWC 0x4000 /* timer start out of window detect */
-#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
-#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
-#define TCREG_EXD 0x0400 /* excessive deferral occurred */
-#define TCREG_DEF 0x0200 /* single deferral occurred */
-#define TCREG_NCRS 0x0100 /* no carrier detected */
-#define TCREG_CRSL 0x0080 /* carrier lost */
-#define TCREG_EXC 0x0040 /* excessive collisions occurred */
-#define TCREG_OWC 0x0020 /* out of window collision occurred */
-#define TCREG_PMB 0x0008 /* packet monitored bad */
-#define TCREG_FU 0x0004 /* FIFO underrun */
-#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
-#define TCREG_PTX 0x0001 /* packet transmitted OK */
-
-/* Interrupt Mask Register */
-
-#define SONIC_IMREG 0x08
-#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
-#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
-#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
-#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
-#define IMREG_PRXEN 0x0400 /* interrupt when packet received */
-#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */
-#define IMREG_TXEREN 0x0100 /* interrupt when send failed */
-#define IMREG_TCEN 0x0080 /* interrupt when timer completed */
-#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */
-#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */
-#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */
-#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */
-#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */
-#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */
-#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */
-
-/* Interrupt Status Register */
-
-#define SONIC_ISREG 0x0a
-#define ISREG_BR 0x4000 /* bus retry occurred */
-#define ISREG_HBL 0x2000 /* heartbeat lost */
-#define ISREG_LCD 0x1000 /* CAM loaded */
-#define ISREG_PINT 0x0800 /* PINT in TDA set */
-#define ISREG_PKTRX 0x0400 /* packet received */
-#define ISREG_TXDN 0x0200 /* packet was sent */
-#define ISREG_TXER 0x0100 /* send failed */
-#define ISREG_TC 0x0080 /* timer completed */
-#define ISREG_RDE 0x0040 /* RDA exhausted */
-#define ISREG_RBE 0x0020 /* RBA exhausted */
-#define ISREG_RBAE 0x0010 /* RBA too short for received frame */
-#define ISREG_CRC 0x0008 /* CRC counter rolls over */
-#define ISREG_FAE 0x0004 /* FAE counter rolls over */
-#define ISREG_MP 0x0002 /* MP counter rolls over */
-#define ISREG_RFO 0x0001 /* Rx FIFO overflows */
-
-#define SONIC_UTDA 0x0c /* current transmit descr address */
-#define SONIC_CTDA 0x0e
-
-#define SONIC_URDA 0x1a /* current receive descr address */
-#define SONIC_CRDA 0x1c
-
-#define SONIC_CRBA0 0x1e /* current receive buffer address */
-#define SONIC_CRBA1 0x20
-
-#define SONIC_RBWC0 0x22 /* word count in receive buffer */
-#define SONIC_RBWC1 0x24
-
-#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */
-
-#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */
-
-#define SONIC_RSA 0x2a /* start of receive resource area */
-
-#define SONIC_REA 0x2c /* end of receive resource area */
-
-#define SONIC_RRP 0x2e /* resource read pointer */
-
-#define SONIC_RWP 0x30 /* resource write pointer */
-
-#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */
-
-#define SONIC_CAMADDR2 0x44 /* CAM address ports */
-#define SONIC_CAMADDR1 0x46
-#define SONIC_CAMADDR0 0x48
-
-#define SONIC_CAMPTR 0x4c /* lower address of CDA */
-
-#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */
-
-/* Data Configuration Register 2 */
-
-#define SONIC_DCREG2 0x7e
-#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */
-#define DCREG2_EXPO2 0x4000
-#define DCREG2_EXPO1 0x2000
-#define DCREG2_EXPO0 0x1000
-#define DCREG2_HD 0x0800 /* heartbeat disable */
-#define DCREG2_JD 0x0200 /* jabber timer disable */
-#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */
-#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */
-#define DCREG2_PH 0x0010 /* HOLD request timing */
-#define DCREG2_PCM 0x0004 /* packet compress when matched */
-#define DCREG2_PCNM 0x0002 /* packet compress when not matched */
-#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */
-
-/* Board Control Register: Enable RAM, Interrupts... */
-
-#define BCMREG 0x80
-#define BCMREG_RAMEN 0x80 /* switch over to RAM */
-#define BCMREG_IPEND 0x40 /* interrupt pending ? */
-#define BCMREG_RESET 0x08 /* reset board */
-#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */
-#define BCMREG_RAMWIN 0x02 /* enable RAM window */
-#define BCMREG_IEN 0x01 /* interrupt enable */
-
-/* MAC Address PROM */
-
-#define MACADDRPROM 0x92
-
-/* structure of a CAM entry */
-
-typedef struct {
- u32 index; /* pointer into CAM area */
- u32 addr0; /* address part (bits 0..15 used) */
- u32 addr1;
- u32 addr2;
-} camentry_t;
-
-/* structure of a receive resource */
-
-typedef struct {
- u32 startlo; /* start address (bits 0..15 used) */
- u32 starthi;
- u32 cntlo; /* size in 16-bit quantities */
- u32 cnthi;
-} rra_t;
-
-/* structure of a receive descriptor */
-
-typedef struct {
- u32 status; /* packet status */
- u32 length; /* length in bytes */
- u32 startlo; /* start address */
- u32 starthi;
- u32 seqno; /* frame sequence */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
- u32 inuse; /* !=0 --> free for SONIC to write */
-} rda_t;
-
-/* structure of a transmit descriptor */
-
-typedef struct {
- u32 status; /* transmit status */
- u32 config; /* value for TCR */
- u32 length; /* total length */
- u32 fragcount; /* number of fragments */
- u32 startlo; /* start address of fragment */
- u32 starthi;
- u32 fraglength; /* length of this fragment */
- /* more address/length triplets may */
- /* follow here */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
-} tda_t;
-
-#endif /* _IBM_LANA_DRIVER_ */
-
-#endif /* _IBM_LANA_INCLUDE_ */
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index f4ad60c97eae..7a5e295588b0 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -862,9 +862,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eedata;
}
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 7c94c089212f..bfd887382e19 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8014,7 +8014,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 92dd72d3f9de..f8f073880f84 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -82,9 +82,9 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
- strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
- strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
+ strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
* vdev->no_of_vpath;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 7c87105ca049..794444e09492 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4682,7 +4682,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
- memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index cbd6a529d0c0..162da8975b05 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -878,8 +878,8 @@ static int w90p910_ether_ioctl(struct net_device *dev,
static void w90p910_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 653487dc7b52..0b8de12bcbca 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
wmb();
@@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
@@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -3025,7 +3055,6 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5003,6 +5032,11 @@ static int nv_loopback_test(struct net_device *dev)
test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
skb_tailroom(tx_skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ test_dma_addr)) {
+ dev_kfree_skb_any(tx_skb);
+ goto out;
+ }
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
@@ -5731,9 +5765,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->perm_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 3466ca1e8f6c..c4122c86f829 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -800,7 +800,7 @@ static int lpc_mii_probe(struct net_device *ndev)
else
netdev_info(ndev, "using RMII interface\n");
phydev = phy_connect(ndev, dev_name(&phydev->dev),
- &lpc_handle_link_change, 0,
+ &lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
if (IS_ERR(phydev)) {
@@ -1239,9 +1239,10 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(ndev->dev.parent));
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ sizeof(info->bus_info));
}
static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index b5499198e029..921729f9c85c 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1350,10 +1350,10 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver));
- strncpy(info->version, DRV_VERSION, sizeof(info->version));
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
info->n_stats = 0;
info->testinfo_len = 0;
info->regdump_len = 0;
@@ -1534,12 +1534,10 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac)) {
+ if (mac && is_valid_ether_addr(mac))
memcpy(netdev->dev_addr, mac, ETH_ALEN);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
- } else {
+ else
eth_hw_addr_random(netdev);
- }
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8f29feb35548..cbbeca3f8c5c 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -32,8 +32,8 @@ config HAMACHI
called hamachi.
config YELLOWFIN
- tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Packet Engines Yellowfin Gigabit-NIC support"
+ depends on PCI
select CRC32
---help---
Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index bf829ee30077..cac33e5f9bc2 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1808,9 +1808,10 @@ static int check_if_running(struct net_device *dev)
static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct hamachi_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fbaed4fa72fa..d28593b1fc3e 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1326,9 +1326,10 @@ static void set_rx_mode(struct net_device *dev)
static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct yellowfin_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 7f556a84925d..1bcaf45aa864 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -201,11 +201,8 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
adapter->mdump.md_template =
kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
- if (!adapter->mdump.md_template) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory "
- "for minidump template.\n");
+ if (!adapter->mdump.md_template)
return -ENOMEM;
- }
err = netxen_get_minidump_template(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 946160fa5843..9fbb1cdbfa47 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -670,11 +670,9 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add mac address filter\n",
- adapter->netdev->name);
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
list_add_tail(&cur->list, &adapter->mac_list);
return nx_p3_sre_macaddr_change(adapter,
@@ -2568,16 +2566,10 @@ netxen_dump_fw(struct netxen_adapter *adapter)
adapter->mdump.md_capture_size;
if (!adapter->mdump.md_capture_buff) {
adapter->mdump.md_capture_buff =
- vmalloc(adapter->mdump.md_dump_size);
- if (!adapter->mdump.md_capture_buff) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate memory for minidump "
- "capture_buffer(%d bytes).\n",
- adapter->mdump.md_dump_size);
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
return;
- }
- memset(adapter->mdump.md_capture_buff, 0,
- adapter->mdump.md_dump_size);
+
if (netxen_collect_minidump(adapter)) {
adapter->mdump.has_valid_dump = 0;
adapter->mdump.md_dump_size = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index bc165f4d0f65..4782dcfde736 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -197,41 +197,33 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
- int ring, i, size;
+ int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- size = sizeof(struct nx_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
- netdev->name);
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
return -ENOMEM;
- }
+
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
- netdev->name);
+ if (cmd_buf_arr == NULL)
goto err_out;
- }
+
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
- netdev->name);
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6098fd4adfeb..501f49207da5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -501,12 +501,11 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
@@ -1963,10 +1962,12 @@ unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
out_err:
return -ENOMEM;
@@ -3175,11 +3176,8 @@ netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add vlan ip to list\n",
- adapter->netdev->name);
+ if (cur == NULL)
return;
- }
cur->ip_addr = ifa->ifa_address;
list_add_tail(&cur->list, &adapter->vlan_ip_list);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 67a679aaf29a..8fd38cb6d26a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2591,13 +2591,11 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
else
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
- qdev->lrg_buf =
- kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
- GFP_KERNEL);
- if (qdev->lrg_buf == NULL) {
- netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
return -ENOMEM;
- }
qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev,
@@ -3867,7 +3865,6 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index c4b8ced83829..7722a203e388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_QLCNIC) := qlcnic.o
qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
- qlcnic_sysfs.o qlcnic_minidump.o
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 537902479689..11c3db6daffd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -33,11 +33,13 @@
#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 29
-#define QLCNIC_LINUX_VERSIONID "5.0.29"
+#define _QLCNIC_LINUX_MINOR 1
+#define _QLCNIC_LINUX_SUBVERSION 34
+#define QLCNIC_LINUX_VERSIONID "5.1.34"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -96,7 +98,6 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -203,6 +204,7 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
#define QLCNIC_B0_FW_IMAGE_REGION 0x74
#define QLCNIC_C0_FW_IMAGE_REGION 0x97
#define QLCNIC_BOOTLD_REGION 0X72
@@ -223,6 +225,36 @@ struct qlcnic_flt_entry {
u32 end_addr;
};
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u16 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
@@ -267,6 +299,12 @@ struct qlcnic_flt_entry {
extern char qlcnic_driver_name[];
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+extern int qlcnic_config_npars;
+
/* Number of status descriptors to handle per interrupt */
#define MAX_STATUS_HANDLE (64)
@@ -314,6 +352,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 1
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -337,6 +376,7 @@ struct qlcnic_dump_template_hdr {
u32 sys_info[3];
u32 saved_state[16];
u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
u32 rsvd[0];
};
@@ -396,12 +436,24 @@ struct qlcnic_hardware_context {
u16 act_pci_func;
u32 capabilities;
+ u32 capabilities2;
u32 temp;
u32 int_vec_bit;
u32 fw_hal_version;
+ u32 port_config;
struct qlcnic_hardware_ops *hw_ops;
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ spinlock_t mbx_lock;
};
struct qlcnic_adapter_stats {
@@ -422,6 +474,8 @@ struct qlcnic_adapter_stats {
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
};
/*
@@ -460,12 +514,17 @@ struct qlcnic_host_sds_ring {
} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ+4];
u16 ctx_id;
u32 producer;
u32 sw_consumer;
u32 num_desc;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
struct qlcnic_cmd_buffer *cmd_buf_arr;
__le32 *hw_consumer;
@@ -492,8 +551,6 @@ struct qlcnic_recv_context {
/* HW context creation */
#define QLCNIC_OS_CRB_RETRY_COUNT 4000
-#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
- (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
#define QLCNIC_CDRP_CMD_BIT 0x80000000
@@ -513,43 +570,6 @@ struct qlcnic_recv_context {
* the crb QLCNIC_CDRP_CRB_OFFSET.
*/
#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
-#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
-
-#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
-#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
-#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
-#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
-#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
-#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
-#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
-#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
-#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
-#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
-#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
-#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
-#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
-#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
-#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
-#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
-#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
-
-#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
-#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
-#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
-#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
-#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
-#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
-#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
-#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
-#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
-#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_INVALID_ARGS 6
@@ -726,6 +746,11 @@ struct qlcnic_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
};
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
@@ -762,7 +787,7 @@ struct qlcnic_mac_list_s {
*/
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
-#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -779,6 +804,8 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -855,7 +882,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
-#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_ENABLED 0x01
#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
@@ -887,6 +914,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -895,12 +923,14 @@ struct qlcnic_ipaddr {
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
+#define QLCNIC_LB_BUCKET_SIZE 32
/* QLCNIC Driver Error Code */
#define QLCNIC_FW_NOT_RESPOND 51
#define QLCNIC_TEST_IN_PROGRESS 52
#define QLCNIC_UNDEFINED_ERROR 53
#define QLCNIC_LB_CABLE_NOT_CONN 54
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
struct hlist_node fnode;
@@ -912,7 +942,8 @@ struct qlcnic_filter {
struct qlcnic_filter_hash {
struct hlist_head *fhead;
u8 fnum;
- u8 fmax;
+ u16 fmax;
+ u16 fbucket_size;
};
struct qlcnic_adapter {
@@ -934,6 +965,7 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
+ u8 rx_csum;
u8 portnum;
u8 fw_wait_cnt;
@@ -954,8 +986,10 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
- u8 mac_learn;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -969,12 +1003,17 @@ struct qlcnic_adapter {
void __iomem *isr_int_vec;
struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
@@ -995,7 +1034,24 @@ struct qlcnic_info_le {
__le16 max_rx_ques;
__le16 min_tx_bw;
__le16 max_tx_bw;
- u8 reserved2[104];
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ u8 reserved2[64];
} __packed;
struct qlcnic_info {
@@ -1005,12 +1061,28 @@ struct qlcnic_info {
u16 switch_mode;
u32 capabilities;
u8 max_mac_filters;
- u8 reserved1;
u16 max_mtu;
u16 max_tx_ques;
u16 max_rx_ques;
u16 min_tx_bw;
u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
};
struct qlcnic_pci_info_le {
@@ -1024,7 +1096,9 @@ struct qlcnic_pci_info_le {
__le16 reserved1[2];
u8 mac[ETH_ALEN];
- u8 reserved2[106];
+ __le16 func_count;
+ u8 reserved2[104];
+
} __packed;
struct qlcnic_pci_info {
@@ -1035,6 +1109,7 @@ struct qlcnic_pci_info {
u16 tx_min_bw;
u16 tx_max_bw;
u8 mac[ETH_ALEN];
+ u16 func_count;
};
struct qlcnic_npar_info {
@@ -1266,10 +1341,8 @@ struct qlcnic_esw_statistics {
#define QLCNIC_RESET_QUIESCENT 0xadd00020
struct _cdrp_cmd {
- u32 cmd;
- u32 arg1;
- u32 arg2;
- u32 arg3;
+ u32 num;
+ u32 *arg;
};
struct qlcnic_cmd_args {
@@ -1279,9 +1352,6 @@ struct qlcnic_cmd_args {
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
-
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
-int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
@@ -1291,9 +1361,10 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
(((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
- (qlcnic_hw_read_wx_2M(adapter, off))
+ (adapter->ahw->hw_ops->read_reg)(adapter, off)
+
#define QLCWR32(adapter, off, val) \
- (qlcnic_hw_write_wx_2M(adapter, off, val))
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
@@ -1306,10 +1377,6 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
#define qlcnic_phy_unlock(a) \
qlcnic_pcie_sem_unlock((a), 3)
-#define qlcnic_api_lock(a) \
- qlcnic_pcie_sem_lock((a), 5, 0)
-#define qlcnic_api_unlock(a) \
- qlcnic_pcie_sem_unlock((a), 5)
#define qlcnic_sw_lock(a) \
qlcnic_pcie_sem_lock((a), 6, 0)
#define qlcnic_sw_unlock(a) \
@@ -1324,14 +1391,13 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
@@ -1361,54 +1427,42 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring);
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
-void qlcnic_fetch_mac(u32, u32, u8, u8 *);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
/* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
-void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_validate_max_rss(u8, u8);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-
-/* Management functions */
-int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
-int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
-int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
/* eSwitch management functions */
int qlcnic_config_switch_port(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+
int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
@@ -1418,14 +1472,12 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
struct __qlcnic_esw_statistics *);
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
-extern int qlcnic_config_tso;
-int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
-void qlcnic_napi_del(struct qlcnic_adapter *adapter);
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
@@ -1433,6 +1485,9 @@ void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_set_vlan_config(struct qlcnic_adapter *,
@@ -1440,6 +1495,22 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
+ __le16);
/*
* QLOGIC Board information
*/
@@ -1462,6 +1533,277 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+};
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
+ int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16);
+ int (*get_board_info) (struct qlcnic_adapter *);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off)
+{
+ return adapter->ahw->hw_ops->read_reg(adapter, off);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off, u32 data)
+{
+ return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, __le16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->config_intr_coal(adapter);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, __le16 id)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
writel(0, sds_ring->crb_intr_mask);
@@ -1480,12 +1822,6 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
-struct qlcnic_nic_template {
- int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
- int (*config_led) (struct qlcnic_adapter *, u32, u32);
- int (*start_firmware) (struct qlcnic_adapter *);
-};
-
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
@@ -1493,6 +1829,7 @@ struct qlcnic_nic_template {
__func__, ##_args); \
} while (0)
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1500,4 +1837,11 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 000000000000..cd5ae8813cb3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,3011 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+
+#define QLCNIC_MAX_TX_QUEUES 1
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+/* status descriptor mailbox data
+ * @phy_addr: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u64 phy_addr;
+ u8 rsvd1[16];
+ u16 sds_ring_size;
+ u16 rsvd2[3];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd3[5];
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg: physical address of regular buffer
+ * phy_addr_jmb: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u64 phy_addr_reg;
+ u64 phy_addr_jmb;
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr: DMA address of the transmit buffer
+ * @cnsmr_index: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u64 phys_addr;
+ u64 cnsmr_index;
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+} __packed;
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+};
+
+static const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+static const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partiton info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_mbx_op,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+{
+ int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ret = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!ret) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x\n", __func__, (int)addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ int err, i, num_msix;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ num_intr));
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+ num_msix += adapter->max_drv_tx_rings;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix;
+ else
+ num_msix = 1;
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl = vzalloc(num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+ return 0;
+}
+
+inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 val = 0, num_msix = adapter->ahw->num_msix - 1;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ msleep(20);
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ char name[32];
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ snprintf(name, (IFNAMSIZ + 4),
+ "%s[%s]", "qlcnic", "aen");
+ err = request_irq(val, handler, flags, name, adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = val & 0xf;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ data = ret;
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+ if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ else
+ adapter->ahw->port_type = QLCNIC_GBE;
+
+ if (QLC_83XX_AUTONEG(adapter->ahw->port_config))
+ adapter->ahw->link_autoneg = AUTONEG_ENABLE;
+ }
+ return status;
+}
+
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+/* Mailbox response for mac rcode */
+static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+{
+ u32 fw_data;
+ u8 mac_cmd_rcode;
+
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
+ return QLCNIC_RCODE_SUCCESS;
+ return 1;
+}
+
+static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ u32 data;
+ unsigned long wait_time = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* wait for mailbox completion */
+ do {
+ data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+ data = QLCNIC_RCODE_TIMEOUT;
+ break;
+ }
+ mdelay(1);
+ } while (!data);
+ return data;
+}
+
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ u16 opcode;
+ u8 mbx_err_code;
+ unsigned long flags;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ opcode = LSW(cmd->req.arg[0]);
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ dev_info(&adapter->pdev->dev,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ dev_info(&adapter->pdev->dev, "Mailbox detached\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+
+ if (mbx_val) {
+ QLCDB(adapter, DRV,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ QLCDB(adapter, DRV,
+ "Mailbox not available, 0x%x, collect FW dump\n",
+ mbx_val);
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return cmd->rsp.arg[0];
+ }
+
+ /* Fill in mailbox registers */
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+
+ /* Signal FW about the impending command */
+ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+poll:
+ rsp = qlcnic_83xx_mbx_poll(adapter);
+ if (rsp != QLCNIC_RCODE_TIMEOUT) {
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ qlcnic_83xx_process_aen(adapter);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val)
+ goto poll;
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ rsp = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
+ goto out;
+ }
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
+ rsp = mbx_err_code;
+ qlcnic_dump_mbx(adapter, cmd);
+ break;
+ }
+ goto out;
+ }
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
+out:
+ /* clear fw mbx control register */
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return rsp;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ break;
+ }
+ }
+ return 0;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
+ num_sds = adapter->max_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_RING_SETS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr = tx->phys_addr;
+ mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
+ else
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ /* disable and free mailbox interrupt */
+ qlcnic_83xx_free_mbx_intr(adapter);
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring, err;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to setup mbx interrupt\n",
+ __func__);
+ goto out;
+ }
+ }
+ adapter->ahw->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (enable) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ } else {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ }
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Promiscous mode config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported for non privilege function\n");
+ return ret;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(500);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_alloc:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "FW did not generate IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config = ahw->port_config;
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware didn't sent IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (mode == QLCNIC_IP_UP) {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 1 | temp;
+ } else {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 2 | temp;
+ }
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
+{
+ int err;
+ u32 *buf;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_macvlan_mbx mv;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ return err;
+ cmd.req.arg[1] = op | (1 << 8) |
+ (adapter->recv_ctx->context_id << 16);
+
+ mv.vlan = le16_to_cpu(vlan_id);
+ memcpy(&mv.mac, addr, ETH_ALEN);
+ buf = &cmd.req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "MAC-VLAN %s to CAM failed, err=%d.\n",
+ ((op == 1) ? "add " : "delete "), err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ __le16 vlan_id)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16);
+ cmd.req.arg[3] = coal->flag;
+ temp = coal->rx_time_us << 16;
+ cmd.req.arg[2] = coal->rx_packets | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_info(&adapter->pdev->dev,
+ "Failed to send interrupt coalescence parameters\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ adapter->ahw->link_speed = MSW(data[2]);
+ adapter->ahw->link_autoneg = MSB(MSW(data[3]));
+ adapter->ahw->module_type = MSB(LSW(data[3]));
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+ adapter->ahw->has_link_events = 1;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ unsigned long flags;
+ u32 mask, resp, event;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
+{
+ int err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
+ cmd.req.arg[1] = (port & 0xf) | BIT_4;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (func_id != adapter->ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = adapter->ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ int i, err = 0, j = 0;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ adapter->ahw->act_pci_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ pci_info->func_count = cmd.rsp.arg[1] & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: total functions = %d\n",
+ __func__, pci_info->func_count);
+ for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ if (pci_info->type == QLCNIC_TYPE_NIC)
+ adapter->ahw->act_pci_func++;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+
+ dev_info(&adapter->pdev->dev, "%s:\n"
+ "\tid = %d active = %d type = %d\n"
+ "\tport = %d min bw = %d max bw = %d\n"
+ "\tmac_addr = %pM\n", __func__,
+ pci_info->id, pci_info->active, pci_info->type,
+ pci_info->default_port, pci_info->tx_min_bw,
+ pci_info->tx_max_bw, pci_info->mac);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ bool type;
+ u8 max_ints;
+ u32 val, temp;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ cmd.req.arg[1] = max_ints;
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ int i, ret;
+ u32 word, range, flash_offset, addr = flash_addr;
+ ulong indirect_add, direct_window;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+
+ do {
+ status = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_STATUS);
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ if (mfg_id == -EIO)
+ return -EIO;
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO;
+
+ if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLCNIC_MS_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ int i, ret;
+ u32 word, addr = flash_addr;
+ ulong indirect_addr;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_addr);
+ if (ret == -EIO)
+ return -EIO;
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+{
+ u32 config = 0;
+ int status = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+ return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ int status = 0;
+ u32 config = adapter->ahw->port_config;
+
+ if (ecmd->autoneg)
+ adapter->ahw->port_config |= BIT_15;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ adapter->ahw->port_config |= BIT_8;
+ break;
+ case SPEED_100:
+ adapter->ahw->port_config |= BIT_9;
+ break;
+ case SPEED_1000:
+ adapter->ahw->port_config |= BIT_10;
+ break;
+ case SPEED_10000:
+ adapter->ahw->port_config |= BIT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_info(&adapter->pdev->dev,
+ "Faild to Set Link Speed and autoneg.\n");
+ adapter->ahw->port_config = config;
+ }
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "Error getting Rx stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ dev_info(&adapter->pdev->dev,
+ "Error getting Tx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
+ sizeof(adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u32 data;
+ u16 intrpt_id, id;
+ u8 val;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_irq:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ pause->tx_pause = 1;
+ if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+ pause->rx_pause = 1;
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ return ret & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 000000000000..61f81f6c84a9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,438 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "qlcnic_hw.h"
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+#define QLCNIC_MAX_RING_SETS 8
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ u16 major_fw_version;
+ u8 minor_fw_version;
+ u8 sub_fw_version;
+ u8 fw_build_num;
+ u8 load_from_file;
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ char **name;
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 80
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
+#define QLC_83XX_DEFAULT_MODE 0x0
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
+#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 000000000000..c53832b02b3e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2054 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = adapter->portnum & 0xf;
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear gracefull reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+
+ adapter->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ /* Disable mailbox interrupt */
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (qlcnic_83xx_config_intrpt(adapter, 1)) {
+ netdev_err(adapter->netdev,
+ "Failed to enable mbx intr\n");
+ return -EIO;
+ }
+ }
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ adapter->ahw->idc.quiesce_req = 0;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are recieved from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+
+ /* Check ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return 0;
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ adapter->ahw->idc.err_code = -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
+ !qlcnic_auto_fw_reset) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, device in non reset mode\n", __func__);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = kzalloc(size, GFP_KERNEL);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ kfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ u32 dest, *p_cache;
+ u64 addr;
+ u8 data[16];
+ size_t size;
+ int i, ret = -EIO;
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (adapter->ahw->fw_info.fw->size & ~0xF);
+ p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ addr = (u64)dest;
+
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+
+ /* alignment check */
+ if (adapter->ahw->fw_info.fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
+ data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+ }
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg);
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ u32 value;
+ int timeout_error;
+ u8 retries;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ u8 *p_buff;
+ u32 addr, count;
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (p_dev->ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int value;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int value;
+
+ if (p_rmw_hdr->index_a)
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ else
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg1);
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg2);
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ if (request_firmware(&adapter->ahw->fw_info.fw,
+ QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file) {
+ if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+* qlcnic_83xx_config_default_opmode
+*
+* @adapter: adapter structure
+*
+* Configure default driver operating mode
+*
+* Returns: Error code or Success(0)
+* */
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ if (ahw->capabilities & BIT_23)
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ else
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+
+ return ahw->nic_mode;
+}
+
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+ } else if (ret == QLC_83XX_DEFAULT_MODE) {
+ if (qlcnic_83xx_config_default_opmode(adapter))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ /* Initilaize 83xx mailbox spinlock */
+ spin_lock_init(&ahw->mbx_lock);
+
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_clear_function_resources(adapter);
+
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ if (qlcnic_83xx_idc_init(adapter))
+ return -EIO;
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ if (qlcnic_83xx_configure_opmode(adapter))
+ return -EIO;
+
+ /* Perform operating mode specific initialization */
+ if (adapter->nic_ops->init_driver(adapter))
+ return -EIO;
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+
+ return adapter->ahw->idc.err_code;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 000000000000..b0c3de9ede03
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,225 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int i, ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ if (qlcnic_config_npars) {
+ for (i = 0; i < ahw->act_pci_func; i++) {
+ id = adapter->npars[i].pci_func;
+ if (id == ahw->pci_func)
+ continue;
+ data |= qlcnic_config_npars &
+ QLC_83XX_SET_FUNC_OPMODE(0x3, id);
+ }
+ } else {
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC,
+ ahw->pci_func);
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ } else if (priv_level == QLCNIC_MGMT_FUNC) {
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ } else {
+ return -EIO;
+ }
+
+ if (ahw->capabilities & BIT_23)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 58f094ca052e..a69097c6b84d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,12 +1,92 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_SET_DRV_VER, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = type;
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
+}
+
static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
@@ -38,193 +118,123 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
return rsp;
}
-void
-qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
+ int i;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
- signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
- adapter->ahw->fw_hal_version);
+ signature = qlcnic_get_cmd_signature(ahw);
/* Acquire semaphore before accessing CRB */
if (qlcnic_api_lock(adapter)) {
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
- return;
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
}
QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
- QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
- QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
- QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
- QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
-
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "CDRP response timeout.\n");
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ dev_err(&pdev->dev, "card response timeout.\n");
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- switch (cmd->rsp.cmd) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
+ switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
- dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid args: [%d]\n";
break;
case QLCNIC_RCODE_NOT_SUPPORTED:
case QLCNIC_RCODE_NOT_IMPL:
- dev_err(&pdev->dev,
- "CDRP command not supported: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command not supported: [%d]\n";
break;
case QLCNIC_RCODE_NOT_PERMITTED:
- dev_err(&pdev->dev,
- "CDRP requested action not permitted: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP requested action not permitted: [%d]\n";
break;
case QLCNIC_RCODE_INVALID:
- dev_err(&pdev->dev,
- "CDRP invalid or unknown cmd received: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
break;
case QLCNIC_RCODE_TIMEOUT:
- dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command timeout: [%d]\n";
break;
default:
- dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command failed: [%d]\n";
+ break;
}
- } else if (rsp == QLCNIC_CDRP_RSP_OK) {
- cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
- if (cmd->rsp.arg2)
- cmd->rsp.arg2 = QLCRD32(adapter,
- QLCNIC_ARG2_CRB_OFFSET);
- if (cmd->rsp.arg3)
- cmd->rsp.arg3 = QLCRD32(adapter,
- QLCNIC_ARG3_CRB_OFFSET);
- }
- if (cmd->rsp.arg1)
- cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
/* Release semaphore */
qlcnic_api_unlock(adapter);
-
-}
-
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
-{
- uint64_t sum = 0;
- int count = temp_size / sizeof(uint32_t);
- while (count-- > 0)
- sum += *temp_buffer++;
- while (sum >> 32)
- sum = (sum & 0xFFFFFFFF) + (sum >> 32);
- return ~sum;
+ return cmd->rsp.arg[0];
}
-int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
{
- int err, i;
- void *tmp_addr;
- u32 temp_size, version, csum, *template;
- __le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
- struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
- dma_addr_t tmp_addr_t = 0;
-
- ahw = adapter->ahw;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
- memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
- dev_info(&adapter->pdev->dev,
- "Can't get template size %d\n", cmd.rsp.cmd);
- err = -EIO;
- return err;
- }
- temp_size = cmd.rsp.arg2;
- version = cmd.rsp.arg3;
- if (!temp_size)
- return -EIO;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
- tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
- &tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
- return -ENOMEM;
- }
- memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
- cmd.req.arg1 = LSD(tmp_addr_t);
- cmd.req.arg2 = MSD(tmp_addr_t);
- cmd.req.arg3 = temp_size;
- qlcnic_issue_cmd(adapter, &cmd);
-
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to get mini dump template header %d\n", err);
- err = -EIO;
- goto error;
- }
- tmp_tmpl = tmp_addr;
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr) {
- err = -EIO;
- goto error;
- }
- tmp_buf = tmp_addr;
- template = (u32 *) ahw->fw_dump.tmpl_hdr;
- for (i = 0; i < temp_size/sizeof(u32); i++)
- *template++ = __le32_to_cpu(*tmp_buf++);
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
- csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
- if (csum) {
- dev_err(&adapter->pdev->dev,
- "Template header checksum validation failed\n");
- err = -EIO;
- goto error;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER);
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+ return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
- ahw->fw_dump.enable = 1;
-error:
- dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
- return err;
+ return 0;
}
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
+ int err = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = mtu;
- cmd.req.arg3 = 0;
- if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd) {
- dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
- return -EIO;
- }
- }
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
- return 0;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
-static int
-qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
void *addr;
struct qlcnic_hostrq_rx_ctx *prq;
@@ -241,10 +251,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
u64 phys_addr;
u8 i, nrds_rings, nsds_rings;
+ u16 temp_u16;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
int err;
- u16 temp;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -278,11 +288,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
- cap |= QLCNIC_CAP0_LRO_MSS;
-
- temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp);
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
prq->txrx_sds_binding = nsds_rings - 1;
prq->capabilities[0] = cpu_to_le32(cap);
@@ -328,20 +335,17 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
}
phys_addr = hostrq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32) (phys_addr >> 32);
- cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to create rx ctx in firmware%d\n", err);
goto out_free_rsp;
}
-
prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
&prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
@@ -372,6 +376,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
@@ -380,24 +385,24 @@ out_free_rq:
static void
qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
{
+ int err;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
}
-static int
-qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
{
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
@@ -409,7 +414,6 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
int err;
u64 phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
/* reset host resources */
tx_ring->producer = 0;
@@ -444,9 +448,9 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq->host_int_crb_mode =
cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->msi_index = 0;
prq->interrupt_ctl = 0;
- prq->msi_index = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
prq_cds = &prq->cds_ring;
@@ -455,19 +459,17 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32)(phys_addr >> 32);
- cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
-
- adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
} else {
dev_err(&adapter->pdev->dev,
"Failed to create tx ctx in firmware%d\n", err);
@@ -475,76 +477,81 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
- rsp_phys_addr);
+ rsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->tx_ring->ctx_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
}
int
qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
+ int err;
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = config;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
- qlcnic_issue_cmd(adapter, &cmd);
-
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
{
void *addr;
- int err;
- int ring;
+ int err, ring;
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
struct pci_dev *pdev = adapter->pdev;
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
- sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
- if (tx_ring->hw_consumer == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
- return -ENOMEM;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
- /* cmd desc ring */
- addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr, GFP_KERNEL);
+ if (ptr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
- if (addr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- err = -ENOMEM;
- goto err_out_free;
- }
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate tx desc ring\n");
+ err = -ENOMEM;
+ goto err_out_free;
+ }
- tx_ring->desc_head = addr;
+ tx_ring->desc_head = addr;
+ }
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -583,36 +590,70 @@ err_out_free:
return err;
}
-
-int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
{
- int err;
+ int i, err, ring;
- if (adapter->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(adapter->pdev);
- adapter->flags &= ~QLCNIC_NEED_FLR;
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(dev->pdev);
+ dev->flags &= ~QLCNIC_NEED_FLR;
}
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
- if (err)
- return err;
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- return err;
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
+ if (err)
+ goto err_out;
+
+ for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_destroy_tx_ctx(dev,
+ &dev->tx_ring[i]);
+
+ goto err_out;
+ }
}
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
return 0;
+
+err_out:
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+ return err;
}
void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
{
+ int ring;
+
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- qlcnic_fw_cmd_destroy_tx_ctx(adapter);
-
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
/* Allow dma queues to drain after context reset */
mdelay(20);
}
@@ -628,20 +669,23 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- if (tx_ring->hw_consumer != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- tx_ring->hw_consumer,
- tx_ring->hw_cons_phys_addr);
- tx_ring->hw_consumer = NULL;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
- if (tx_ring->desc_head != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
- tx_ring->desc_head = NULL;
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -670,40 +714,43 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
-/* Get MAC address of a NIC partition */
-int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
{
- int err;
+ int err, i;
struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
- cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
- else {
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
err = -EIO;
}
-
+ qlcnic_free_mbx_args(&cmd);
return err;
}
/* Get info of a NIC partition */
-int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
- struct qlcnic_info *npar_info, u8 func_id)
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
{
int err;
dma_addr_t nic_dma_t;
- struct qlcnic_info_le *nic_info;
+ const struct qlcnic_info_le *nic_info;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- size_t nic_size = sizeof(struct qlcnic_info_le);
+ size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL);
@@ -712,47 +759,39 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = (func_id << 16 | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
- if (err == QLCNIC_RCODE_SUCCESS) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
- npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
- npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
-
- dev_info(&adapter->pdev->dev,
- "phy port: %d switch_mode: %d,\n"
- "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
- "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
- npar_info->phys_port, npar_info->switch_mode,
- npar_info->max_tx_ques, npar_info->max_rx_ques,
- npar_info->min_tx_bw, npar_info->max_tx_bw,
- npar_info->max_mtu, npar_info->capabilities);
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get nic info%d\n", err);
- err = -EIO;
}
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
- nic_dma_t);
+ nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Configure a NIC partition */
-int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
{
int err = -EIO;
dma_addr_t nic_dma_t;
@@ -783,13 +822,11 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -799,12 +836,14 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Get PCI Info of a partition */
-int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
- struct qlcnic_pci_info *pci_info)
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
{
int err = 0, i;
struct qlcnic_cmd_args cmd;
@@ -821,13 +860,11 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
- cmd.req.arg1 = MSD(pci_info_dma_t);
- cmd.req.arg2 = LSD(pci_info_dma_t);
- cmd.req.arg3 = pci_size;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
adapter->ahw->act_pci_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -853,6 +890,8 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
pci_info_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -871,21 +910,19 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure port mirroring%d on eswitch:%d\n",
pci_func, id);
- } else {
+ else
dev_info(&adapter->pdev->dev,
"Configured eSwitch %d for port mirroring:%d\n",
id, pci_func);
- }
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -922,13 +959,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
@@ -948,6 +983,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -962,6 +999,9 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
void *stats_addr;
int err;
+ if (mac_stats == NULL)
+ return -ENOMEM;
+
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL);
if (!stats_addr) {
@@ -970,15 +1010,11 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
memset(stats_addr, 0, stats_size);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
- cmd.req.arg1 = stats_size << 16;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
-
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
-
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
@@ -1000,10 +1036,16 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -1064,7 +1106,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
-
+ int err;
u32 arg1;
struct qlcnic_cmd_args cmd;
@@ -1087,15 +1129,16 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
arg1 |= BIT_14 | rx_tx << 15;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
err_ret:
- dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
- "rx_ctx=%d\n", func_esw, port, rx_tx);
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
return -EIO;
}
@@ -1108,22 +1151,21 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
u8 pci_func;
pci_func = (*arg1 >> 8);
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
- cmd.req.arg1 = *arg1;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- *arg1 = cmd.rsp.arg1;
- *arg2 = cmd.rsp.arg2;
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
- if (err == QLCNIC_RCODE_SUCCESS) {
+ if (err == QLCNIC_RCODE_SUCCESS)
dev_info(&adapter->pdev->dev,
- "eSwitch port config for pci func %d\n", pci_func);
- } else {
+ "eSwitch port config for pci func %d\n", pci_func);
+ else
dev_err(&adapter->pdev->dev,
"Failed to get eswitch port config for pci func %d\n",
pci_func);
- }
return err;
}
/* Configure eSwitch port
@@ -1188,20 +1230,18 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
return err;
}
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = arg2;
- qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure eswitch pci func %d\n", pci_func);
- } else {
+ else
dev_info(&adapter->pdev->dev,
- "Configured eSwitch for pci func %d\n", pci_func);
- }
+ "Configured eSwitch for pci func %d\n", pci_func);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 74b98110c5b4..5641f8ec49ab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -22,42 +22,37 @@ struct qlcnic_stats {
#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
- {"xmit_called",
- QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
- {"xmit_finished",
- QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
- {"rx_dropped",
- QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
- {"tx_dropped",
- QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed",
- QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
- {"rx_pkts",
- QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts",
- QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
- {"rx_bytes",
- QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes",
- QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
- {"lrobytes",
- QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
- {"lso_frames",
- QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on",
- QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off",
- QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
- QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf",
- QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
QLC_OFF(stats.rx_dma_map_error)},
{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
QLC_OFF(stats.tx_dma_map_error)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
};
@@ -78,7 +73,15 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx numbytes",
};
-static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_frames",
"mac_tx_bytes",
"mac_tx_mcast_pkts",
@@ -110,35 +113,70 @@ static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
"mac_rx_length_large",
"mac_rx_jabber",
"mac_rx_dropped",
- "mac_rx_crc_error",
+ "mac_crc_error",
"mac_align_error",
};
-#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
-#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
-#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_dropped_wo_sts",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attemoted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
- "External_Loopback_offline"
+ "EEPROM_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+static inline int qlcnic_82xx_statistics(void)
+{
+ return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+}
+
+static inline int qlcnic_83xx_statistics(void)
+{
+ return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_statistics();
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_statistics();
+ else
+ return -1;
+}
+
#define QLCNIC_RING_REGS_COUNT 20
#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
- CRB_CMDPEG_STATE,
- CRB_RCVPEG_STATE,
- CRB_XG_STATE_P3P,
- CRB_FW_CAPABILITIES_1,
- ISR_INT_STATE_REG,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
@@ -148,6 +186,13 @@ static const u32 diag_registers[] = {
QLCNIC_PEG_ALIVE_COUNTER,
QLCNIC_PEG_HALT_STATUS1,
QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
QLCNIC_CRB_PEG_NET_0+0x3c,
QLCNIC_CRB_PEG_NET_1+0x3c,
QLCNIC_CRB_PEG_NET_2+0x3c,
@@ -156,12 +201,19 @@ static const u32 diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_DEV_INFO_SIZE 1
-#define QLCNIC_ETHTOOL_REGS_VER 2
+#define QLCNIC_ETHTOOL_REGS_VER 3
+
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
- QLCNIC_DEV_INFO_SIZE + 1;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -174,10 +226,9 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 fw_major, fw_minor, fw_build;
-
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
@@ -192,7 +243,10 @@ static int
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
int check_sfp_module = 0;
+ u16 pcifn = ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -213,9 +267,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
- u32 val;
+ u32 val = 0;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_get_settings(adapter);
+ else
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -225,6 +282,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev) && adapter->ahw->has_link_events) {
+ if (qlcnic_82xx_check(adapter)) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn));
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
ecmd->autoneg = adapter->ahw->link_autoneg;
ecmd->duplex = adapter->ahw->link_duplex;
@@ -294,6 +357,13 @@ skip:
ecmd->port = PORT_TP;
}
break;
+ case QLCNIC_BRDTYPE_83XX_10G:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) && ahw->has_link_events;
+ break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
adapter->ahw->board_type);
@@ -321,16 +391,10 @@ skip:
return 0;
}
-static int
-qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
- u32 config = 0;
- u32 ret = 0;
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (adapter->ahw->port_type != QLCNIC_GBE)
- return -EOPNOTSUPP;
-
+ u32 ret = 0, config = 0;
/* read which mode */
if (ecmd->duplex)
config |= 0x1;
@@ -358,6 +422,24 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
adapter->ahw->link_duplex = ecmd->duplex;
@@ -370,6 +452,19 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return dev->netdev_ops->ndo_open(dev);
}
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+ return i;
+}
+
static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
@@ -377,17 +472,20 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
- int ring, i = 0, j = 0;
+ int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
+
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
(adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
- for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
- regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
@@ -415,6 +513,10 @@ static u32 qlcnic_test_link(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
@@ -426,8 +528,10 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int offset;
- int ret;
+ int ret = -1;
+ if (qlcnic_83xx_check(adapter))
+ return 0;
if (eeprom->len == 0)
return -EINVAL;
@@ -435,8 +539,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
((adapter->pdev)->device << 16);
offset = eeprom->offset;
- ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
- eeprom->len);
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
if (ret < 0)
return ret;
@@ -529,11 +634,11 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count != channel->max_tx)
return -EINVAL;
- err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count);
if (err)
return err;
- err = qlcnic_set_max_rss(adapter, channel->rx_count);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
netdev_info(dev, "allocated 0x%x sds rings\n",
adapter->max_sds_rings);
return err;
@@ -547,6 +652,10 @@ qlcnic_get_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
@@ -592,6 +701,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
/* read mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
@@ -606,6 +718,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
switch (port) {
@@ -668,6 +781,9 @@ static int qlcnic_reg_test(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
+
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@@ -675,16 +791,30 @@ static int qlcnic_reg_test(struct net_device *dev)
return 0;
}
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
+ int len;
+
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
- return QLCNIC_TOTAL_STATS_LEN;
+ len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_83xx_check(adapter))
+ return len;
+ return qlcnic_82xx_statistics();
default:
return -EOPNOTSUPP;
}
@@ -693,35 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
- goto clear_it;
+ goto clear_diag_irq;
- adapter->ahw->diag_cnt = 0;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
- cmd.req.arg1 = adapter->ahw->pci_func;
- qlcnic_issue_cmd(adapter, &cmd);
- ret = cmd.rsp.cmd;
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
- msleep(10);
-
- ret = !adapter->ahw->diag_cnt;
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
done:
+ qlcnic_free_mbx_args(&cmd);
qlcnic_diag_free_res(netdev, max_sds_rings);
-clear_it:
+clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -750,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -761,11 +892,10 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
-
adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
-
loop = 0;
+
do {
msleep(1);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -776,42 +906,46 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
dev_kfree_skb_any(skb);
if (!adapter->ahw->diag_cnt)
- QLCDB(adapter, DRV,
- "LB Test: packet #%d was not received\n", i + 1);
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
else
cnt++;
}
if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "LB Test failed\n");
- if (mode != QLCNIC_ILB_MODE) {
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
dev_warn(&adapter->pdev->dev,
- "WARNING: Please make sure external"
- "loopback connector is plugged in\n");
- }
+ "WARNING: Please check loopback cable\n");
return -1;
}
return 0;
}
-static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
int ret;
- if (!(adapter->ahw->capabilities &
- QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
- netdev_info(netdev, "Firmware is not loopback test capable\n");
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
- QLCDB(adapter, DRV, "%s loopback test in progress\n",
- mode == QLCNIC_ILB_MODE ? "internal" : "external");
- if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
- netdev_warn(netdev, "Loopback test not supported for non "
- "privilege function\n");
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
return 0;
}
@@ -823,12 +957,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
goto clear_it;
sds_ring = &adapter->recv_ctx->sds_rings[0];
-
ret = qlcnic_set_lb_mode(adapter, mode);
if (ret)
goto free_res;
- adapter->ahw->diag_cnt = 0;
+ ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -841,11 +974,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
ret = adapter->ahw->diag_cnt;
goto free_res;
}
- } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
ret = qlcnic_do_lb_test(adapter, mode);
- qlcnic_clear_lb_mode(adapter);
+ qlcnic_clear_lb_mode(adapter, mode);
free_res:
qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -878,20 +1011,18 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
- data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
- if (data[4])
- eth_test->flags |= ETH_TEST_FL_FAILED;
- eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
- }
+
+ data[4] = qlcnic_eeprom_test(dev);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static void
-qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int index, i, j;
+ int index, i, num_stats;
switch (stringset) {
case ETH_SS_TEST:
@@ -904,14 +1035,34 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
- for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_mac_stats_strings[j],
- ETH_GSTRING_LEN);
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
- for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_device_gstrings_stats[i],
ETH_GSTRING_LEN);
@@ -920,89 +1071,84 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
static void
-qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
+qlcnic_fill_stats(u64 *data, void *stats, int type)
{
- int ind = *index;
-
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
(struct qlcnic_mac_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
} else if (type == QLCNIC_ESW_STATS) {
struct __qlcnic_esw_statistics *esw_stats =
(struct __qlcnic_esw_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
-
- *index = ind;
}
-static void
-qlcnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret;
-
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- char *p =
- (char *)adapter +
- qlcnic_gstrings_stats[index].stat_offset;
- data[index] =
- (qlcnic_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ int index, ret, length, size;
+ char *p;
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
}
- /* Retrieve MAC statistics from firmware */
- memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
- qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
@@ -1013,14 +1159,13 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
-
+ qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
}
static int qlcnic_set_led(struct net_device *dev,
@@ -1030,6 +1175,8 @@ static int qlcnic_set_led(struct net_device *dev,
int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
@@ -1096,6 +1243,8 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ if (qlcnic_83xx_check(adapter))
+ return;
wol->supported = 0;
wol->wolopts = 0;
@@ -1114,8 +1263,10 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
if (!(wol_cfg & (1 << adapter->portnum)))
@@ -1307,7 +1458,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
}
netdev_info(netdev, "Forcing a FW dump\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
if (fw_dump->enable && fw_dump->tmpl_hdr) {
@@ -1327,7 +1478,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
return 0;
case QLCNIC_SET_QUIESCENT:
@@ -1341,8 +1492,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
- for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
- if (val->flag == FW_DUMP_LEVELS[i]) {
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
fw_dump->tmpl_hdr->drv_cap_mask =
val->flag;
netdev_info(netdev, "Driver mask changed to: 0x%x\n",
@@ -1386,10 +1537,3 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
};
-
-const struct ethtool_ops qlcnic_ethtool_failed_ops = {
- .get_settings = qlcnic_get_settings,
- .get_drvinfo = qlcnic_get_drvinfo,
- .set_msglevel = qlcnic_set_msglevel,
- .get_msglevel = qlcnic_get_msglevel,
-};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 49cc1ac4f057..44197ca1456c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include "qlcnic_hw.h"
+
/*
* The basic unit of access when reading/writing control registers.
*/
@@ -387,9 +389,6 @@ enum {
#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER 0x0d417340
-
/******************************************************************************
*
* Definitions specific to M25P flash
@@ -449,13 +448,10 @@ enum {
#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
-#define QLCNIC_PCI_MN_2M (0)
-#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
#define QLCNIC_PCI_CAMQM (0x04800000UL)
#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
-#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -491,7 +487,7 @@ enum {
#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
(QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
-
+#define MAX_CTL_CHECK 1000
#define TEST_AGT_CTRL (0x00)
#define TA_CTL_START BIT_0
@@ -499,44 +495,6 @@ enum {
#define TA_CTL_WRITE BIT_2
#define TA_CTL_BUSY BIT_3
-/*
- * Register offsets for MN
- */
-#define MIU_TEST_AGT_BASE (0x90)
-
-#define MIU_TEST_AGT_ADDR_LO (0x04)
-#define MIU_TEST_AGT_ADDR_HI (0x08)
-#define MIU_TEST_AGT_WRDATA_LO (0x10)
-#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
-#define MIU_TEST_AGT_RDDATA_LO (0x18)
-#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
-
-#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
-#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
-
-/*
- * Register offsets for MS
- */
-#define SIU_TEST_AGT_BASE (0x60)
-
-#define SIU_TEST_AGT_ADDR_LO (0x04)
-#define SIU_TEST_AGT_ADDR_HI (0x18)
-#define SIU_TEST_AGT_WRDATA_LO (0x08)
-#define SIU_TEST_AGT_WRDATA_HI (0x0c)
-#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
-#define SIU_TEST_AGT_RDDATA_LO (0x10)
-#define SIU_TEST_AGT_RDDATA_HI (0x14)
-#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
-
-#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
-#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
-
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
@@ -556,9 +514,6 @@ enum {
#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
-#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
-#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
-#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
@@ -568,28 +523,17 @@ enum {
#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
-#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
-#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
-#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
-#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
-#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
-
#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
-#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-
-#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
-
-#define CRB_V2P_0 (QLCNIC_REG(0x290))
-#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
-#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
@@ -616,11 +560,6 @@ enum {
/* Lock IDs for PHY lock */
#define PHY_LOCK_DRIVER 0x44524956
-/* Used for PS PCI Memory access */
-#define PCIX_PS_OP_ADDR_LO (0x10000)
-/* via CRB (PS side only) */
-#define PCIX_PS_OP_ADDR_HI (0x10004)
-
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
@@ -682,17 +621,6 @@ enum {
#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
-#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
-#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
-#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
-#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
-
-#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
-#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
@@ -711,7 +639,6 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +671,9 @@ enum {
#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
+
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -766,26 +696,13 @@ struct qlcnic_legacy_intr_set {
u32 pci_int_reg;
};
-#define QLCNIC_FW_API 0x1b216c
-#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
-/* FW dump defines */
-#define MIU_TEST_CTR 0x41000090
-#define MIU_TEST_ADDR_LO 0x41000094
-#define MIU_TEST_ADDR_HI 0x41000098
#define FLASH_ROM_WINDOW 0x42110030
#define FLASH_ROM_DATA 0x42150000
-
-static const u32 FW_DUMP_LEVELS[] = {
- 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
-static const u32 MIU_TEST_READ_DATA[] = {
- 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
-
#define QLCNIC_FW_DUMP_REG1 0x00130060
#define QLCNIC_FW_DUMP_REG2 0x001e0000
#define QLCNIC_FLASH_SEM2_LK 0x0013C010
@@ -796,7 +713,8 @@ static const u32 MIU_TEST_READ_DATA[] = {
enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
- QLCNIC_NON_PRIV_FUNC = 2
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_UNKNOWN_FUNC_MODE = 3
};
enum {
@@ -1013,6 +931,8 @@ enum {
#define QLCNIC_NIU_PROMISC_MODE 1
#define QLCNIC_NIU_ALLMULTI_MODE 2
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
struct crb_128M_2M_sub_block_map {
unsigned valid;
unsigned start_128M;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index fc48e000f35f..325e11e1ce0f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -344,28 +344,33 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
-static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
- else
- return -EIO;
+ else {
+ data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
+ if (data == -EIO)
+ return -EIO;
+ }
return data;
}
-static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
}
static int
qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct qlcnic_cmd_buffer *pbuf;
struct cmd_desc_type0 *cmd_desc;
struct qlcnic_host_tx_ring *tx_ring;
@@ -379,7 +384,6 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -402,7 +406,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
pbuf->frag_count = 0;
memcpy(&tx_ring->desc_head[producer],
- &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+ cmd_desc, sizeof(struct cmd_desc_type0));
producer = get_next_index(producer, tx_ring->num_desc);
i++;
@@ -418,9 +422,8 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
return 0;
}
-static int
-qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, unsigned op)
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -443,7 +446,29 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -456,11 +481,9 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
}
cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
- if (cur == NULL) {
- dev_err(&adapter->netdev->dev,
- "failed to add mac address filter\n");
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
@@ -507,17 +530,17 @@ void qlcnic_set_multi(struct net_device *netdev)
}
send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->mac_learn = 1;
+ adapter->drv_mac_learn = true;
} else {
- adapter->mac_learn = 0;
+ adapter->drv_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
}
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
u64 word;
@@ -556,18 +579,20 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ unsigned long time;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
- {
- if (jiffies >
- (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
qlcnic_sre_macaddr_change(adapter,
- tmp_fil->faddr, tmp_fil->vlan_id,
- tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
- QLCNIC_MAC_DEL);
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -576,6 +601,21 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
}
}
}
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
}
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
@@ -584,14 +624,17 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
- tmp_fil->vlan_id, tmp_fil->vlan_id ?
- QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -621,12 +664,13 @@ static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
return rv;
}
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
if (qlcnic_set_fw_loopback(adapter, mode))
return -EIO;
- if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
qlcnic_set_fw_loopback(adapter, 0);
return -EIO;
}
@@ -635,11 +679,11 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- int mode = VPORT_MISS_MODE_DROP;
struct net_device *netdev = adapter->netdev;
+ mode = VPORT_MISS_MODE_DROP;
qlcnic_set_fw_loopback(adapter, 0);
if (netdev->flags & IFF_PROMISC)
@@ -649,12 +693,13 @@ void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
qlcnic_nic_set_promisc(adapter, mode);
msleep(1000);
+ return 0;
}
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -676,10 +721,14 @@ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
- return rv;
}
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+#define QLCNIC_ENABLE_IPV4_LRO 1
+#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
+#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -695,7 +744,15 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(enable);
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
+ if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO |
+ QLCNIC_NO_DEST_IPV6_CHECK;
+ }
+
+ req.words[0] = cpu_to_le64(word);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
@@ -735,9 +792,12 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
}
-#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -762,13 +822,19 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
* 7-6: hash_type_ipv6
* 8: enable
* 9: use indirection table
- * 47-10: reserved
- * 63-48: indirection table mask
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
*/
- word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
- ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
((u64)(enable & 0x1) << 8) |
- ((0x7ULL) << 48);
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
req.words[0] = cpu_to_le64(word);
for (i = 0; i < 5; i++)
req.words[i+1] = cpu_to_le64(key[i]);
@@ -780,7 +846,8 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
{
struct qlcnic_nic_req req;
struct qlcnic_ipaddr *ipa;
@@ -802,23 +869,19 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x reuqest\n",
(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
-
- return rv;
}
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
int rv;
-
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(enable | (enable << 8));
-
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
@@ -883,7 +946,8 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ qlcnic_82xx_check(adapter)) {
netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -904,13 +968,15 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
if (!(changed & NETIF_F_LRO))
return 0;
- netdev->features = features ^ NETIF_F_LRO;
+ netdev->features ^= NETIF_F_LRO;
if (qlcnic_config_hw_lro(adapter, hw_lro))
return -EIO;
- if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
- return -EIO;
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
return 0;
}
@@ -982,8 +1048,8 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
return 0;
}
-int
-qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
{
unsigned long flags;
int rv;
@@ -1014,7 +1080,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
return -EIO;
}
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
@@ -1043,7 +1109,6 @@ int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
return -1;
}
-
void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
u32 offset)
{
@@ -1269,7 +1334,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
return ret;
}
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
int offset, board_type, magic;
struct pci_dev *pdev = adapter->pdev;
@@ -1342,7 +1407,7 @@ qlcnic_wol_supported(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
{
struct qlcnic_nic_req req;
int rv;
@@ -1354,7 +1419,7 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
req.words[1] = cpu_to_le64(state);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
@@ -1363,3 +1428,56 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 000000000000..5b8749eda11f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,194 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_SET_DRV_VER 0x38
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 10000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, __le16 vlan_id);
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index de79cde233de..d28336fc65ab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,15 +1,12 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/if_vlan.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
struct crb_addr_pair {
u32 addr;
@@ -166,13 +163,12 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
{
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
int ring;
recv_ctx = adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
- goto skip_rds;
+ return;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -180,16 +176,6 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
-
-skip_rds:
- if (adapter->tx_ring == NULL)
- return;
-
- tx_ring = adapter->tx_ring;
- vfree(tx_ring->cmd_buf_arr);
- tx_ring->cmd_buf_arr = NULL;
- kfree(adapter->tx_ring);
- adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -197,39 +183,16 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_rx_buffer *rx_buf;
- int ring, i, size;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
- struct net_device *netdev = adapter->netdev;
-
- size = sizeof(struct qlcnic_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
- return -ENOMEM;
- }
- adapter->tx_ring = tx_ring;
-
- tx_ring->num_desc = adapter->num_txd;
- tx_ring->txq = netdev_get_tx_queue(netdev, 0);
-
- cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- goto err_out;
- }
- tx_ring->cmd_buf_arr = cmd_buf_arr;
+ int ring, i;
recv_ctx = adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -255,11 +218,9 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
- if (rds_ring->rx_buf_arr == NULL) {
- dev_err(&netdev->dev, "Failed to allocate "
- "rx buffer ring %d\n", ring);
+ if (rds_ring->rx_buf_arr == NULL)
goto err_out;
- }
+
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -327,7 +288,6 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
long done = 0;
cond_resched();
-
while (done == 0) {
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
done &= 2;
@@ -416,8 +376,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
u32 off;
struct pci_dev *pdev = adapter->pdev;
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
/* Halt all the indiviual PEGs and other blocks */
/* disable all I2Q */
@@ -482,10 +442,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ if (buf == NULL)
return -ENOMEM;
- }
for (i = 0; i < n; i++) {
if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -564,8 +522,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
msleep(1);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
@@ -576,7 +534,7 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
@@ -592,7 +550,8 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
} while (--retries);
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
out_err:
dev_err(&adapter->pdev->dev, "Command Peg initialization not "
@@ -607,7 +566,7 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
@@ -638,7 +597,7 @@ qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
}
@@ -649,7 +608,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
val = QLC_DEV_GET_DRV(val, adapter->portnum);
if ((val & 0x3) != QLCNIC_TYPE_NIC) {
dev_err(&adapter->pdev->dev,
@@ -689,11 +648,9 @@ static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
}
entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
- flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
- if (flt_entry == NULL) {
- dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
return -EIO;
- }
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
sizeof(struct qlcnic_flt_header),
@@ -1096,11 +1053,13 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
u32 heartbeat, ret = -EIO;
int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
do {
msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
ret = QLCNIC_RCODE_SUCCESS;
break;
@@ -1270,7 +1229,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6f82812d0fab..6387e0cc3ea9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
@@ -5,9 +12,6 @@
#include "qlcnic.h"
-#define QLCNIC_MAC_HASH(MAC)\
- ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
#define TX_UDP_PKT 0x03
@@ -84,6 +88,8 @@
#define qlcnic_get_lro_sts_mss(sts_data1) \
((sts_data1 >> 32) & 0x0FFFF)
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
#define QLCNIC_RXPKT_DESC 0x04
@@ -91,18 +97,152 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
+#define QLCNIC_TX_POLL_BUDGET 128
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
/* for status field in status_desc */
#define STATUS_CKSUM_LOOP 0
#define STATUS_CKSUM_OK 2
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 uaddr, __le16 vlan_id,
- struct qlcnic_host_tx_ring *tx_ring)
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *, u16, u16);
+
+inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline u8 qlcnic_mac_hash(u64 mac)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ int loopback_pkt, __le16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, found = 0, op;
+ int ret;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ return;
+ }
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&(tmp_fil->fnode));
+ adapter->rx_fhash.fnum--;
+ }
+ }
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ __le16 vlan_id)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -128,14 +268,14 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
}
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
+ struct net_device *netdev = adapter->netdev;
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
u64 src_addr = 0;
__le16 vlan_id = 0;
u8 hindex;
@@ -143,23 +283,23 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
return;
- if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ if (adapter->fhash.fnum >= adapter->fhash.fmax) {
+ adapter->stats.mac_filter_limit_overrun++;
+ netdev_info(netdev, "Can not add more than %d mac addresses\n",
+ adapter->fhash.fmax);
return;
+ }
- /* Only NPAR capable devices support vlan based learning*/
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- vlan_id = first_desc->vlan_TCI;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
-
+ tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
- qlcnic_change_filter(adapter, src_addr, vlan_id,
- tx_ring);
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id);
tmp_fil->ftime = jiffies;
return;
}
@@ -169,17 +309,13 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
+ qlcnic_change_filter(adapter, &src_addr, vlan_id);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
-
spin_lock(&adapter->mac_learn_lock);
-
hlist_add_head(&(fil->fnode), head);
adapter->fhash.fnum++;
-
spin_unlock(&adapter->mac_learn_lock);
}
@@ -474,8 +610,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
goto unwind_buff;
- if (adapter->mac_learn)
- qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb);
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
@@ -528,8 +664,8 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
skb_reserve(skb, NET_IP_ALIGN);
- dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
@@ -544,12 +680,13 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- uint32_t producer;
+ uint32_t producer, handle;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
@@ -557,7 +694,6 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
producer = rds_ring->producer;
head = &rds_ring->free_list;
-
while (!list_empty(head)) {
buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
@@ -565,28 +701,29 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
-
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
-
if (count) {
rds_ring->producer = producer;
writel((producer - 1) & (rds_ring->num_desc - 1),
rds_ring->crb_rcv_producer);
}
-
spin_unlock(&rds_ring->lock);
}
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
{
u32 sw_consumer, hw_consumer;
int i, done, count = 0;
@@ -594,7 +731,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock(&adapter->tx_clean_lock))
return 1;
@@ -615,22 +751,19 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
-
adapter->stats.xmitfinished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
- if (++count >= MAX_STATUS_HANDLE)
+ if (++count >= budget)
break;
}
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
-
smp_mb();
-
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
@@ -654,7 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
-
spin_unlock(&adapter->tx_clean_lock);
return done;
@@ -662,16 +794,15 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
static int qlcnic_poll(struct napi_struct *napi, int budget)
{
+ int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
- int tx_complete, work_done;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
-
- tx_complete = qlcnic_process_cmd_ring(adapter);
+ tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -804,26 +935,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-static struct sk_buff *
-qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring, u16 index,
- u16 cksum)
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
- buffer = &rds_ring->rx_buf_arr[index];
-
+ buffer = &ring->rx_buf_arr[index];
if (unlikely(buffer->skb == NULL)) {
WARN_ON(1);
return NULL;
}
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
-
if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
(cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
@@ -832,6 +960,7 @@ qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
+
buffer->skb = NULL;
return skb;
@@ -871,8 +1000,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
- u16 vid = 0xffff;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -892,6 +1021,14 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
@@ -933,10 +1070,11 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
struct tcphdr *th;
bool push, timestamp;
- int index, l2_hdr_offset, l4_hdr_offset;
- u16 lro_length, length, data_offset, vid = 0xffff;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
if (unlikely(ring > adapter->max_rds_rings))
@@ -961,6 +1099,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (timestamp)
data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
else
@@ -976,18 +1122,32 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
}
skb->protocol = eth_type_trans(skb, netdev);
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
- length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
- iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
@@ -1006,9 +1166,9 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
struct list_head *cur;
struct status_desc *desc;
struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
u64 sts_data0, sts_data1;
- __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
- int opcode, ring, desc_cnt, count = 0;
+ u8 ring;
u32 consumer = sds_ring->consumer;
while (count < max) {
@@ -1020,7 +1180,6 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
opcode = qlcnic_get_sts_opcode(sts_data0);
-
switch (opcode) {
case QLCNIC_RXPKT_DESC:
case QLCNIC_OLD_RXPKT_DESC:
@@ -1040,18 +1199,16 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
default:
goto skip;
}
-
WARN_ON(desc_cnt > 1);
if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
-
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] = owner_phantom;
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
@@ -1059,7 +1216,6 @@ skip:
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
-
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
@@ -1072,7 +1228,7 @@ skip:
spin_unlock(&rds_ring->lock);
}
- qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
}
if (count) {
@@ -1084,12 +1240,12 @@ skip:
}
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- u32 producer;
+ u32 producer, handle;
struct list_head *head;
producer = rds_ring->producer;
@@ -1110,7 +1266,9 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
@@ -1180,7 +1338,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
return;
}
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
struct status_desc *desc;
@@ -1217,26 +1375,8 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
writel(consumer, sds_ring->crb_sts_consumer);
}
-void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
-{
- u32 mac_low, mac_high;
- int i;
-
- mac_low = off1;
- mac_high = off2;
-
- if (alt_mac) {
- mac_low |= (mac_low >> 16) | (mac_high << 16);
- mac_high >>= 16;
- }
-
- for (i = 0; i < 2; i++)
- mac[i] = (u8)(mac_high >> ((1 - i) * 8));
- for (i = 2; i < 6; i++)
- mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
-
-int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
{
int ring, max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1249,8 +1389,7 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
-
- if (ring == max_sds_rings - 1)
+ if (ring == adapter->max_sds_rings - 1)
netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
QLCNIC_NETDEV_WEIGHT / max_sds_rings);
else
@@ -1258,10 +1397,15 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
QLCNIC_NETDEV_WEIGHT*2);
}
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
return 0;
}
-void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1273,9 +1417,10 @@ void qlcnic_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+ qlcnic_free_tx_rings(adapter);
}
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1291,7 +1436,7 @@ void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1307,3 +1452,481 @@ void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
napi_disable(&sds_ring->napi);
}
}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unkonwn opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ budget = QLCNIC_TX_POLL_BUDGET;
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ max_sds_rings = adapter->max_sds_rings;
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll,
+ QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll,
+ QLCNIC_NETDEV_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a7554d9aab0c..28a6d4838364 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,24 +1,25 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
-#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/pci.h>
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -29,28 +30,28 @@ char qlcnic_driver_name[] = "qlcnic";
static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
-static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0444);
-MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
-static int qlcnic_use_msi = 1;
+int qlcnic_use_msi = 1;
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
module_param_named(use_msi, qlcnic_use_msi, int, 0444);
-static int qlcnic_use_msi_x = 1;
+int qlcnic_use_msi_x = 1;
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
-static int qlcnic_auto_fw_reset = 1;
+int qlcnic_auto_fw_reset = 1;
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
-static int qlcnic_load_fw_file;
+int qlcnic_load_fw_file;
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
-static int qlcnic_config_npars;
+int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
@@ -62,9 +63,6 @@ static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
static void qlcnic_fw_poll_work(struct work_struct *work);
-static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay);
-static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
@@ -77,9 +75,9 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
static irqreturn_t qlcnic_intr(int irq, void *data);
static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
@@ -93,15 +91,24 @@ static int qlcnic_vlan_rx_del(struct net_device *, u16);
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
-
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
{0,}
};
@@ -120,6 +127,32 @@ static const u32 msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
static const struct qlcnic_board_info qlcnic_boards[] = {
{0x1077, 0x8020, 0x1077, 0x203,
"8200 Series Single Port 10GbE Converged Network Adapter"
@@ -143,6 +176,7 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -164,35 +198,6 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
-{
- memset(&adapter->stats, 0, sizeof(adapter->stats));
-}
-
-static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
-{
- u32 control;
- int pos;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- else
- control = 0;
- pci_write_config_dword(pdev, pos, control);
- }
-}
-
-static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- adapter->msix_entries[i].entry = i;
-}
-
static int
qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
@@ -204,12 +209,11 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
return -EIO;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
netdev->dev_addr);
@@ -225,7 +229,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return -EOPNOTSUPP;
if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ return -EINVAL;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
@@ -243,6 +247,85 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev, const unsigned char *addr)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return err;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_del_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(netdev, addr);
+ else
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 flags)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_add_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(netdev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev, int idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+
+ return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -257,6 +340,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_tx_timeout = qlcnic_tx_timeout,
.ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
.ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -267,50 +353,125 @@ static const struct net_device_ops qlcnic_netdev_failed_ops = {
};
static struct qlcnic_nic_template qlcnic_ops = {
- .config_bridged_mode = qlcnic_config_bridged_mode,
- .config_led = qlcnic_config_led,
- .start_firmware = qlcnic_start_firmware
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
};
-static struct qlcnic_nic_template qlcnic_vf_ops = {
- .config_bridged_mode = qlcnicvf_config_bridged_mode,
- .config_led = qlcnicvf_config_led,
- .start_firmware = qlcnicvf_start_firmware
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
};
-static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+};
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1;
+ int err = -1, i;
+ int max_tx_rings;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
adapter->max_sds_rings = 1;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
- qlcnic_set_msix_bit(pdev, 0);
if (adapter->ahw->msix_supported) {
enable_msix:
- qlcnic_init_msix_entries(adapter, num_msix);
+ for (i = 0; i < num_msix; i++)
+ adapter->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- qlcnic_set_msix_bit(pdev, 1);
-
- adapter->max_sds_rings = num_msix;
-
+ if (qlcnic_83xx_check(adapter)) {
+ adapter->ahw->num_msix = num_msix;
+ /* subtract mail box and tx ring vectors */
+ max_tx_rings = adapter->max_drv_tx_rings;
+ adapter->max_sds_rings = num_msix -
+ max_tx_rings - 1;
+ } else {
+ adapter->max_sds_rings = num_msix;
+ }
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
- }
- if (err > 0) {
- num_msix = rounddown_pow_of_two(err);
- if (num_msix)
+ } else if (err > 0) {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
+ if (qlcnic_83xx_check(adapter)) {
+ if (err < QLC_83XX_MINIMUM_VECTOR)
+ return err;
+ err -= (adapter->max_drv_tx_rings + 1);
+ num_msix = rounddown_pow_of_two(err);
+ num_msix += (adapter->max_drv_tx_rings + 1);
+ } else {
+ num_msix = rounddown_pow_of_two(err);
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
}
}
+
return err;
}
-static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
+ int err = 0;
u32 offset, mask_reg;
const struct qlcnic_legacy_intr_set *legacy_intrp;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -323,8 +484,10 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
offset);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
- return;
+ return err;
}
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
@@ -336,32 +499,47 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
+ return err;
}
-static void
-qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
{
- int num_msix;
+ int num_msix, err = 0;
- if (adapter->ahw->msix_supported) {
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+
+ if (adapter->ahw->msix_supported)
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- QLCNIC_DEF_NUM_STS_DESC_RINGS));
- } else
+ num_intr));
+ else
num_msix = 1;
- if (!qlcnic_enable_msix(adapter, num_msix))
- return;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM || !err)
+ return err;
- qlcnic_enable_msi_legacy(adapter);
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+
+ return -EIO;
}
-static void
-qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
{
if (adapter->flags & QLCNIC_MSIX_ENABLED)
pci_disable_msix(adapter->pdev);
if (adapter->flags & QLCNIC_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
}
static void
@@ -371,7 +549,36 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
iounmap(adapter->ahw->pci_base0);
}
-static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_pci_info *pci_info;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
int i, ret = 0, j = 0;
@@ -423,8 +630,11 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_enable_eswitch(adapter, i, 1);
+ }
kfree(pci_info);
return 0;
@@ -445,13 +655,10 @@ static int
qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
{
u8 id;
- u32 ref_count;
int i, ret = 1;
u32 data = QLCNIC_MGMT_FUNC;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* If other drivers are not in use set their privilege level */
- ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
ret = qlcnic_api_lock(adapter);
if (ret)
goto err_lock;
@@ -465,40 +672,31 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
QLC_DEV_SET_DRV(0xf, id));
}
} else {
- data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
ahw->pci_func));
}
- QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static void
-qlcnic_check_vf(struct qlcnic_adapter *adapter)
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
{
- void __iomem *msix_base_addr;
- void __iomem *priv_op;
- u32 func;
- u32 msix_base;
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
- QLCNIC_FW_API);
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
/* Find PCI function number */
- pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
- msix_base = readl(msix_base_addr);
- func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw->pci_func = func;
+ qlcnic_get_func_no(adapter);
/* Determine function privilege level */
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
@@ -515,12 +713,16 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
}
#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
{
switch (dev_id) {
case PCI_DEVICE_ID_QLOGIC_QLE824X:
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
default:
*bar = 0;
}
@@ -531,11 +733,9 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
{
u32 offset;
void __iomem *mem_ptr0 = NULL;
- resource_size_t mem_base;
unsigned long mem_len, pci_len0 = 0, bar0_len;
/* remap phys address */
- mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
qlcnic_get_bar_length(pdev->device, &bar0_len);
@@ -552,6 +752,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
@@ -586,19 +787,26 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
+ int err;
u32 fw_major, fw_minor, fw_build, prev_fw_version;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
prev_fw_version = adapter->fw_version;
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
if (fw_dump->tmpl_hdr)
@@ -609,8 +817,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
}
}
- dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
- fw_major, fw_minor, fw_build);
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
@@ -653,9 +862,19 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ adapter->ahw->capabilities2 = temp;
+ }
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
+ /* Disable NPAR for 83XX */
+ if (qlcnic_83xx_check(adapter))
+ return err;
+
if (adapter->ahw->capabilities & BIT_6)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
@@ -714,7 +933,7 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
qlcnic_set_netdev_features(adapter, esw_cfg);
}
-static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
@@ -735,14 +954,17 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- netdev_features_t features, vlan_features;
+ unsigned long features, vlan_features;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
+ NETIF_F_IPV6_CSUM);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
@@ -752,12 +974,19 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
if (esw_cfg->offload_flags & BIT_0) {
netdev->features |= features;
- if (!(esw_cfg->offload_flags & BIT_1))
+ adapter->rx_csum = 1;
+ if (!(esw_cfg->offload_flags & BIT_1)) {
netdev->features &= ~NETIF_F_TSO;
- if (!(esw_cfg->offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO;
+ }
+ if (!(esw_cfg->offload_flags & BIT_2)) {
netdev->features &= ~NETIF_F_TSO6;
+ features &= ~NETIF_F_TSO6;
+ }
} else {
netdev->features &= ~features;
+ features &= ~features;
+ adapter->rx_csum = 0;
}
netdev->vlan_features = (features & vlan_features);
@@ -766,7 +995,6 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
static int
qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
{
- void __iomem *priv_op;
u32 op_mode, priv_level;
int err = 0;
@@ -777,8 +1005,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
@@ -810,7 +1037,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
return err;
}
-static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
struct qlcnic_npar_info *npar;
@@ -843,6 +1070,7 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
}
+
static int
qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
struct qlcnic_npar_info *npar, int pci_func)
@@ -866,7 +1094,7 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
int i, err;
struct qlcnic_npar_info *npar;
@@ -882,8 +1110,7 @@ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
npar = &adapter->npars[i];
pci_func = npar->pci_func;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- err = qlcnic_get_nic_info(adapter,
- &nic_info, pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
return err;
nic_info.min_tx_bw = npar->min_bw;
@@ -914,14 +1141,16 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
msleep(1000);
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
}
if (!npar_opt_timeo) {
dev_err(&adapter->pdev->dev,
- "Waiting for NPAR state to opertional timeout\n");
+ "Waiting for NPAR state to operational timeout\n");
return -EIO;
}
return 0;
@@ -949,8 +1178,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -990,9 +1218,8 @@ check_fw_status:
if (err)
goto err_out;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
-
err = qlcnic_check_eswitch_mode(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -1010,7 +1237,7 @@ check_fw_status:
return 0;
err_out:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
@@ -1022,6 +1249,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
{
irq_handler_t handler;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
int err, ring;
unsigned long flags = 0;
@@ -1029,7 +1257,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- handler = qlcnic_tmp_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1040,20 +1269,44 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
handler = qlcnic_msi_intr;
else {
flags |= IRQF_SHARED;
- handler = qlcnic_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
}
}
adapter->irq = netdev->irq;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
- err = request_irq(sds_ring->irq, handler,
- flags, sds_ring->name, sds_ring);
- if (err)
- return err;
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name,
+ adapter->max_sds_rings + ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
}
-
return 0;
}
@@ -1062,21 +1315,48 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- free_irq(sds_ring->irq, sds_ring);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
}
}
-static int
-__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
{
- int ring;
- u32 capab2;
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->capabilities2 &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1086,19 +1366,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
-
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
- capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
- if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
- adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
- }
+ qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
qlcnic_set_multi(netdev);
@@ -1123,10 +1398,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
}
-/* Usage: During resume and firmware recovery module.*/
-
-static int
-qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err = 0;
@@ -1138,8 +1410,7 @@ qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return err;
}
-static void
-__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1171,8 +1442,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
/* Usage: During suspend and firmware recovery module */
-static void
-qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
rtnl_lock();
if (netif_running(netdev))
@@ -1181,7 +1451,7 @@ qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
}
-static int
+int
qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1227,8 +1497,7 @@ err_out_napi_del:
return err;
}
-static void
-qlcnic_detach(struct qlcnic_adapter *adapter)
+void qlcnic_detach(struct qlcnic_adapter *adapter)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1277,21 +1546,9 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
int err = 0;
- adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
- GFP_KERNEL);
- if (!adapter->ahw) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- err = -ENOMEM;
- goto err_out;
- }
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
GFP_KERNEL);
if (!adapter->recv_ctx) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- kfree(adapter->ahw);
- adapter->ahw = NULL;
err = -ENOMEM;
goto err_out;
}
@@ -1299,6 +1556,8 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
return err;
}
@@ -1312,8 +1571,9 @@ static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
vfree(adapter->ahw->fw_dump.tmpl_hdr);
adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
- kfree(adapter->ahw);
- adapter->ahw = NULL;
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
@@ -1333,6 +1593,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1349,7 +1610,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
@@ -1387,6 +1648,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
return 0;
}
@@ -1430,34 +1692,40 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int err;
struct pci_dev *pdev = adapter->pdev;
+ adapter->rx_csum = 1;
adapter->ahw->mc_enabled = 0;
- adapter->ahw->max_mc_count = 38;
+ adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 5*HZ;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
- netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_RX);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
- netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (pci_using_dac == 1)
- netdev->hw_features |= NETIF_F_HIGHDMA;
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
- netdev->vlan_features = netdev->hw_features;
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
- netdev->hw_features |= NETIF_F_LRO;
-
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_LRO;
+ netdev->hw_features = netdev->features;
netdev->irq = adapter->msix_entries[0].vector;
err = register_netdev(netdev);
@@ -1485,17 +1753,61 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
return 0;
}
-static int
-qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
- adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
- GFP_KERNEL);
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
- if (adapter->msix_entries)
- return 0;
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ if (adapter->tx_ring != NULL)
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+ tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
- dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
- return -ENOMEM;
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->max_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+ return 0;
}
static int
@@ -1503,9 +1815,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
- uint8_t revision_id;
- char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ u32 capab2;
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
err = pci_enable_device(pdev);
if (err)
@@ -1527,10 +1840,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_enable_pcie_error_reporting(pdev);
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw)
+ goto err_out_free_res;
+
+ if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
+ } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ qlcnic_83xx_register_map(ahw);
+ } else {
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
if (!netdev) {
err = -ENOMEM;
- goto err_out_free_res;
+ goto err_out_iounmap;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -1538,15 +1868,25 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
goto err_out_free_netdev;
adapter->dev_rst_time = jiffies;
- revision_id = pdev->revision;
- adapter->ahw->revision_id = revision_id;
- adapter->mac_learn = qlcnic_mac_learn;
+ adapter->ahw->revision_id = pdev->revision;
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
+ adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
@@ -1554,31 +1894,32 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- err = qlcnic_setup_pci_map(pdev, adapter->ahw);
- if (err)
- goto err_out_free_hw;
- qlcnic_check_vf(adapter);
-
- /* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw->pci_func;
-
- err = qlcnic_get_board_info(adapter);
- if (err) {
- dev_err(&pdev->dev, "Error getting board config info.\n");
- goto err_out_iounmap;
- }
-
- err = qlcnic_setup_idc_param(adapter);
- if (err)
- goto err_out_iounmap;
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
+ goto err_out_free_hw;
+ }
- adapter->flags |= QLCNIC_NEED_FLR;
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
- err = adapter->nic_ops->start_firmware(adapter);
- if (err) {
- dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
- "\t\tIf reboot doesn't help, try flashing the card\n");
- goto err_out_maintenance_mode;
+ adapter->flags |= QLCNIC_NEED_FLR;
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed\n", __func__);
+ goto err_out_free_hw;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ goto err_out_free_hw;
}
if (qlcnic_read_mac_addr(adapter))
@@ -1586,22 +1927,34 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
+
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
+ err = qlcnic_setup_intr(adapter, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
- qlcnic_clear_stats(adapter);
-
- err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
- if (err)
- goto err_out_decr_ref;
-
- qlcnic_setup_intr(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+ }
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
- goto err_out_disable_msi;
+ goto err_out_disable_mbx_intr;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
+ qlcnic_fw_cmd_set_drv_version(adapter);
+ }
+ }
pci_set_drvdata(pdev, adapter);
@@ -1620,29 +1973,37 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (adapter->mac_learn)
+ if (qlcnic_get_act_pci_func(adapter))
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->drv_mac_learn)
qlcnic_alloc_lb_filters_mem(adapter);
- qlcnic_create_diag_entries(adapter);
+ qlcnic_add_sysfs(adapter);
return 0;
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
err_out_disable_msi:
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
-
-err_out_decr_ref:
+ qlcnic_cancel_idc_work(adapter);
qlcnic_clr_all_drv_state(adapter, 0);
-err_out_iounmap:
- qlcnic_cleanup_pci_map(adapter);
-
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
err_out_free_netdev:
free_netdev(netdev);
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
err_out_free_res:
pci_release_regions(pdev);
@@ -1650,24 +2011,13 @@ err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
-
-err_out_maintenance_mode:
- netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pdev->dev, "failed to register net device\n");
- goto err_out_decr_ref;
- }
- pci_set_drvdata(pdev, adapter);
- qlcnic_create_diag_entries(adapter);
- return 0;
}
static void qlcnic_remove(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter;
struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
@@ -1675,10 +2025,17 @@ static void qlcnic_remove(struct pci_dev *pdev)
netdev = adapter->netdev;
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ ahw = adapter->ahw;
unregister_netdev(netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -1694,9 +2051,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_free_lb_filters_mem(adapter);
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
- qlcnic_remove_diag_entries(adapter);
+ qlcnic_remove_sysfs(adapter);
qlcnic_cleanup_pci_map(adapter);
@@ -1707,7 +2063,12 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
+ }
qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
free_netdev(netdev);
}
static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1718,7 +2079,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
netif_device_detach(netdev);
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1731,7 +2092,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
retval = pci_save_state(pdev);
if (retval)
return retval;
-
if (qlcnic_82xx_check(adapter)) {
if (qlcnic_wol_supported(adapter)) {
pci_enable_wake(pdev, PCI_D3cold, 1);
@@ -1779,7 +2139,7 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
@@ -1802,14 +2162,8 @@ done:
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
int err;
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(netdev, "Device in FAILED state\n");
- return -EIO;
- }
-
netif_carrier_off(netdev);
err = qlcnic_attach(adapter);
@@ -1837,6 +2191,7 @@ static int qlcnic_close(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
__qlcnic_down(adapter, netdev);
+
return 0;
}
@@ -1844,22 +2199,53 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
+ act_pci_func = adapter->ahw->act_pci_func;
spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
- head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
- GFP_KERNEL);
if (!head)
return;
- adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fmax = (filter_size / act_pci_func);
adapter->fhash.fhead = head;
- for (i = 0; i < adapter->fhash.fmax; i++)
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
}
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
@@ -1869,16 +2255,25 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
}
-static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 temp_state, temp_val, temp = 0;
int rv = 0;
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
if (qlcnic_82xx_check(adapter))
- temp = QLCRD32(adapter, CRB_TEMP_STATE);
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
temp_state = qlcnic_get_temp_state(temp);
temp_val = qlcnic_get_temp_val(temp);
@@ -1938,7 +2333,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2014,6 +2409,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2040,7 +2443,7 @@ qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
val |= encoding << 7;
val |= (jiffies - adapter->dev_rst_time) << 8;
- QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
adapter->dev_rst_time = jiffies;
}
@@ -2055,14 +2458,14 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
if (qlcnic_api_lock(adapter))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2077,9 +2480,9 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -EBUSY;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2094,20 +2497,22 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
if (failed) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
dev_info(&adapter->pdev->dev,
"Device state set to Failed. Please Reboot\n");
} else if (!(val & 0x11111111))
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
err:
@@ -2122,12 +2527,13 @@ static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
- active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ active_mask = (~(1 << (ahw->pci_func * 4)));
act = act & active_mask;
}
@@ -2140,7 +2546,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
{
- u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
if (val != QLCNIC_DRV_IDC_VER) {
dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
@@ -2164,19 +2570,21 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
- QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
@@ -2187,15 +2595,15 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
return ret;
case QLCNIC_DEV_NEED_RESET:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_QSCNT_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
@@ -2212,7 +2620,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (prev_state == QLCNIC_DEV_QUISCENT)
continue;
@@ -2227,9 +2635,9 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
@@ -2248,7 +2656,7 @@ qlcnic_fwinit_work(struct work_struct *work)
if (qlcnic_api_lock(adapter))
goto err_ret;
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_QUISCENT ||
dev_state == QLCNIC_DEV_NEED_QUISCENT) {
qlcnic_api_unlock(adapter);
@@ -2277,17 +2685,19 @@ qlcnic_fwinit_work(struct work_struct *work)
if (!qlcnic_check_drv_state(adapter)) {
skip_ack_check:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_NEED_RESET) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
}
qlcnic_api_unlock(adapter);
@@ -2313,12 +2723,12 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
wait_npar:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
case QLCNIC_DEV_READY:
- if (!adapter->nic_ops->start_firmware(adapter)) {
+ if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
adapter->fw_wait_cnt = 0;
return;
@@ -2355,7 +2765,7 @@ qlcnic_detach_work(struct work_struct *work)
} else
qlcnic_down(adapter, netdev);
- status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR) {
dev_err(&adapter->pdev->dev,
@@ -2406,19 +2816,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
{
u32 state;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
if (state == QLCNIC_DEV_NPAR_NON_OPER)
return;
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
-/*Transit to RESET state from READY state only */
-void
-qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -2433,25 +2842,22 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "Pause control frames disabled"
" on all ports\n");
adapter->need_fw_reset = 1;
+
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(adapter->netdev,
- "Device is in FAILED state, Please Reboot\n");
- qlcnic_api_unlock(adapter);
- return;
- }
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
adapter->flags |= QLCNIC_FW_RESET_OWNER;
QLCDB(adapter, DRV, "NEED_RESET state set\n");
qlcnic_idc_debug_info(adapter, 0);
}
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2462,34 +2868,22 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
-static void
-qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay)
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
{
if (test_bit(__QLCNIC_AER, &adapter->state))
return;
INIT_DELAYED_WORK(&adapter->fw_work, func);
- queue_delayed_work(qlcnic_wq, &adapter->fw_work,
- round_jiffies_relative(delay));
-}
-
-static void
-qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
-{
- while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- msleep(10);
-
- if (!adapter->fw_work.work.func)
- return;
-
- cancel_delayed_work_sync(&adapter->fw_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
}
static void
@@ -2501,7 +2895,8 @@ qlcnic_attach_work(struct work_struct *work)
u32 npar_state;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
qlcnic_clr_all_drv_state(adapter, 0);
else if (npar_state != QLCNIC_DEV_NPAR_OPER)
@@ -2541,16 +2936,16 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
goto detach;
if (adapter->need_fw_reset)
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET) {
qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
} else if (state == QLCNIC_DEV_NEED_QUISCENT)
goto detach;
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
@@ -2570,25 +2965,25 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->flags |= QLCNIC_FW_HANG;
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
if (qlcnic_auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
"PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
"PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n",
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
- peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
@@ -2672,17 +3067,39 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
QLCDB(adapter, DRV, "Restarting fw\n");
}
qlcnic_api_unlock(adapter);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err)
return err;
qlcnic_clr_drv_state(adapter);
- qlcnic_setup_intr(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter, 0);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ goto done;
+ }
+ }
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
@@ -2724,6 +3141,12 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -2743,12 +3166,13 @@ static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
static void qlcnic_io_resume(struct pci_dev *pdev)
{
+ u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
-
- if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
- test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
FW_POLL_DELAY);
}
@@ -2781,39 +3205,59 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+int qlcnic_validate_max_rss(u8 max_hw, u8 val)
{
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_info(netdev, "no msix or msi support, hence no rss\n");
- return -EINVAL;
+ u32 max_allowed;
+
+ if (max_hw > QLC_MAX_SDS_RINGS) {
+ max_hw = QLC_MAX_SDS_RINGS;
+ pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS);
}
- if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
- netdev_info(netdev, "rss_ring valid range [2 - %x] in "
- " powers of 2\n", max_hw);
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
+ pr_info("rss_ring valid range [2 - %x] in powers of 2\n",
+ max_allowed);
return -EINVAL;
}
return 0;
-
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
{
+ int err;
struct net_device *netdev = adapter->netdev;
- int err = 0;
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
+
qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
qlcnic_teardown_intr(adapter);
+ err = qlcnic_setup_intr(adapter, data);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
- if (qlcnic_enable_msix(adapter, data)) {
- netdev_info(netdev, "failed setting max_rss; rss disabled\n");
- qlcnic_enable_msi_legacy(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
}
if (netif_running(netdev)) {
@@ -2825,6 +3269,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
+ err = len;
done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2863,8 +3308,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
in_dev_put(indev);
}
-static void
-qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device *dev;
@@ -2872,12 +3316,14 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
+ rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
dev = __vlan_find_dev_deep(netdev, vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
}
+ rcu_read_unlock();
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2945,9 +3391,11 @@ recheck:
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
break;
case NETDEV_DOWN:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
break;
default:
break;
@@ -2965,11 +3413,10 @@ static struct notifier_block qlcnic_inetaddr_cb = {
.notifier_call = qlcnic_inetaddr_event,
};
#else
-static void
-qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static struct pci_error_handlers qlcnic_err_handler = {
+static const struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
@@ -2995,12 +3442,6 @@ static int __init qlcnic_init_module(void)
printk(KERN_INFO "%s\n", qlcnic_driver_string);
- qlcnic_wq = create_singlethread_workqueue("qlcnic");
- if (qlcnic_wq == NULL) {
- printk(KERN_ERR "qlcnic: cannot create workqueue\n");
- return -ENOMEM;
- }
-
#ifdef CONFIG_INET
register_netdevice_notifier(&qlcnic_netdev_cb);
register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -3012,7 +3453,6 @@ static int __init qlcnic_init_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
return ret;
@@ -3022,14 +3462,12 @@ module_init(qlcnic_init_module);
static void __exit qlcnic_exit_module(void)
{
-
pci_unregister_driver(&qlcnic_driver);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 12ff29270745..abbd22c814a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,8 +1,25 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
#include <net/ip.h>
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
#define QLCNIC_DUMP_WCRB BIT_0
#define QLCNIC_DUMP_RWCRB BIT_1
#define QLCNIC_DUMP_ANDCRB BIT_2
@@ -102,16 +119,55 @@ struct __queue {
u8 rsvd3[2];
} __packed;
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+ u16 sel_val_stride;
+ u16 no_ops;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
struct qlcnic_dump_entry {
struct qlcnic_common_entry_hdr hdr;
union {
- struct __crb crb;
- struct __cache cache;
- struct __ocm ocm;
- struct __mem mem;
- struct __mux mux;
- struct __queue que;
- struct __ctrl ctrl;
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
} region;
} __packed;
@@ -131,6 +187,9 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_L2_ITAG = 22,
QLCNIC_DUMP_L2_DATA = 23,
QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
QLCNIC_DUMP_READ_ROM = 71,
QLCNIC_DUMP_READ_MEM = 72,
QLCNIC_DUMP_READ_CTRL = 98,
@@ -144,46 +203,17 @@ struct qlcnic_dump_operations {
__le32 *);
};
-static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- *data = readl(window_reg);
-}
-
-static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- writel(data, window_reg);
- readl(window_reg);
-}
-
-/* FW dump related functions */
static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i;
u32 addr, data;
struct __crb *crb = &entry->region.crb;
- void __iomem *base = adapter->ahw->pci_base0;
addr = crb->addr;
for (i = 0; i < crb->no_ops; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(addr);
*buffer++ = cpu_to_le32(data);
addr += crb->stride;
@@ -195,9 +225,8 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i, k, timeout = 0;
- void __iomem *base = adapter->ahw->pci_base0;
u32 addr, data;
- u8 opcode, no_ops;
+ u8 no_ops;
struct __ctrl *ctr = &entry->region.ctrl;
struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
@@ -206,34 +235,33 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
for (i = 0; i < no_ops; i++) {
k = 0;
- opcode = 0;
for (k = 0; k < 8; k++) {
if (!(ctr->opcode & (1 << k)))
continue;
switch (1 << k) {
case QLCNIC_DUMP_WCRB:
- qlcnic_write_dump_reg(addr, base, ctr->val1);
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
break;
case QLCNIC_DUMP_RWCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base, data);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_ANDCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data & ctr->val2);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
break;
case QLCNIC_DUMP_ORCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data | ctr->val3);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
break;
case QLCNIC_DUMP_POLLCRB:
while (timeout <= ctr->timeout) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
if ((data & ctr->val2) == ctr->val1)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout++;
}
if (timeout > ctr->timeout) {
@@ -245,7 +273,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
case QLCNIC_DUMP_RD_SAVE:
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
t_hdr->saved_state[ctr->index_v] = data;
break;
case QLCNIC_DUMP_WRT_SAVED:
@@ -255,7 +283,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
data = ctr->val1;
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_write_dump_reg(addr, base, data);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
data = t_hdr->saved_state[ctr->index_v];
@@ -284,12 +312,11 @@ static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
int loop;
u32 val, data = 0;
struct __mux *mux = &entry->region.mux;
- void __iomem *base = adapter->ahw->pci_base0;
val = mux->val;
for (loop = 0; loop < mux->no_ops; loop++) {
- qlcnic_write_dump_reg(mux->addr, base, val);
- qlcnic_read_dump_reg(mux->read_addr, base, &data);
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
*buffer++ = cpu_to_le32(val);
*buffer++ = cpu_to_le32(data);
val += mux->val_stride;
@@ -302,17 +329,16 @@ static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
{
int i, loop;
u32 cnt, addr, data, que_id = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __queue *que = &entry->region.que;
addr = que->read_addr;
cnt = que->read_addr_cnt;
for (loop = 0; loop < que->no_ops; loop++) {
- qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
addr = que->read_addr;
for (i = 0; i < cnt; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += que->read_addr_stride;
}
@@ -344,27 +370,27 @@ static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
int i, count = 0;
u32 fl_addr, size, val, lck_val, addr;
struct __mem *rom = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
fl_addr = rom->addr;
- size = rom->size/4;
+ size = rom->size / 4;
lock_try:
- lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
if (!lck_val && count < MAX_CTL_CHECK) {
- msleep(10);
+ usleep_range(10000, 11000);
count++;
goto lock_try;
}
- writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
for (i = 0; i < size; i++) {
addr = fl_addr & 0xFFFF0000;
- qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
addr = LSW(fl_addr) + FLASH_ROM_DATA;
- qlcnic_read_dump_reg(addr, base, &val);
+ val = qlcnic_ind_rd(adapter, addr);
fl_addr += 4;
*buffer++ = cpu_to_le32(val);
}
- readl(base + QLCNIC_FLASH_SEM2_ULK);
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
return rom->size;
}
@@ -373,18 +399,17 @@ static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
{
int i;
u32 cnt, val, data, addr;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l1 = &entry->region.cache;
val = l1->init_tag_val;
for (i = 0; i < l1->no_ops; i++) {
- qlcnic_write_dump_reg(l1->addr, base, val);
- qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
addr = l1->read_addr;
cnt = l1->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l1->read_addr_stride;
cnt--;
@@ -400,7 +425,6 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
int i;
u32 cnt, val, data, addr;
u8 poll_mask, poll_to, time_out = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l2 = &entry->region.cache;
val = l2->init_tag_val;
@@ -408,17 +432,17 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
poll_to = MSB(MSW(l2->ctrl_val));
for (i = 0; i < l2->no_ops; i++) {
- qlcnic_write_dump_reg(l2->addr, base, val);
+ qlcnic_ind_wr(adapter, l2->addr, val);
if (LSW(l2->ctrl_val))
- qlcnic_write_dump_reg(l2->ctrl_addr, base,
- LSW(l2->ctrl_val));
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
if (!poll_mask)
goto skip_poll;
do {
- qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
if (!(data & poll_mask))
break;
- msleep(1);
+ usleep_range(1000, 2000);
time_out++;
} while (time_out <= poll_to);
@@ -432,7 +456,7 @@ skip_poll:
addr = l2->read_addr;
cnt = l2->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l2->read_addr_stride;
cnt--;
@@ -448,7 +472,6 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
u32 addr, data, test, ret = 0;
int i, reg_read;
struct __mem *mem = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
reg_read = mem->size;
addr = mem->addr;
@@ -463,13 +486,12 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
mutex_lock(&adapter->ahw->mem_lock);
while (reg_read != 0) {
- qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
- qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
- qlcnic_write_dump_reg(MIU_TEST_CTR, base,
- TA_CTL_ENABLE | TA_CTL_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
for (i = 0; i < MAX_CTL_CHECK; i++) {
- qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if (!(test & TA_CTL_BUSY))
break;
}
@@ -482,8 +504,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
}
}
for (i = 0; i < 4; i++) {
- qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
- &data);
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
*buffer++ = cpu_to_le32(data);
}
addr += 16;
@@ -502,48 +523,388 @@ static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
return 0;
}
-static const struct qlcnic_dump_operations fw_dump_ops[] = {
- { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
- { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
- { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
- { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
- { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
- { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
- { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
- { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
- u32 size)
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
{
int ret = 1;
if (size != entry->hdr.cap_size) {
- dev_info(dev,
- "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
- entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
- dev_info(dev, "Aborting further dump capture\n");
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
ret = 0;
}
return ret;
}
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get memory for FW dump template\n");
+ return -ENOMEM;
+ }
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp_size = 0;
+ u32 version, csum, *tmp_buf;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
+ u8 use_flash_temp = 0;
+
+ ahw = adapter->ahw;
+
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
+ if (!ahw->fw_dump.tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ tmpl_hdr = ahw->fw_dump.tmpl_hdr;
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ ahw->fw_dump.enable = 1;
+
+ return 0;
+}
+
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
__le32 *buffer;
+ u32 ocm_window;
char mesg[64];
char *msg[] = {mesg, NULL};
int i, k, ops_cnt, ops_index, dump_size = 0;
@@ -551,12 +912,23 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+ static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_hardware_context *ahw;
+
+ ahw = adapter->ahw;
+
+ if (!fw_dump->enable) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
if (fw_dump->clr) {
dev_info(&adapter->pdev->dev,
"Previous dump not cleared, not capturing dump\n");
return -EIO;
}
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
if (i & tmpl_hdr->drv_cap_mask)
@@ -565,20 +937,27 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
return -EIO;
fw_dump->data = vzalloc(dump_size);
- if (!fw_dump->data) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate (%d KB) for fw dump\n",
- dump_size / 1024);
+ if (!fw_dump->data)
return -ENOMEM;
- }
+
buffer = fw_dump->data;
fw_dump->size = dump_size;
no_entries = tmpl_hdr->num_entries;
- ops_cnt = ARRAY_SIZE(fw_dump_ops);
entry_offset = tmpl_hdr->offset;
tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
tmpl_hdr->sys_info[1] = adapter->fw_version;
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
+ tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
+
for (i = 0; i < no_entries; i++) {
entry = (void *)tmpl_hdr + entry_offset;
if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
@@ -586,6 +965,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
entry_offset += entry->hdr.offset;
continue;
}
+
/* Find the handler for this entry */
ops_index = 0;
while (ops_index < ops_cnt) {
@@ -593,16 +973,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
break;
ops_index++;
}
+
if (ops_index == ops_cnt) {
dev_info(&adapter->pdev->dev,
"Invalid entry type %d, exiting dump\n",
entry->hdr.type);
goto error;
}
+
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
- dump))
+ if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
@@ -617,8 +998,8 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
- fw_dump->size);
+ dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
+ adapter->netdev->name, fw_dump->size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
return 0;
@@ -627,3 +1008,21 @@ error:
vfree(fw_dump->data);
return -EINVAL;
}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 341d37c867ff..987fb6f8adc3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,8 +1,16 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
@@ -13,6 +21,10 @@
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/sysfs.h>
+
+#define QLC_STATUS_UNSUPPORTED_CMD -2
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -40,7 +52,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
if (strict_strtoul(buf, 2, &new))
goto err_out;
- if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
@@ -80,9 +92,7 @@ static ssize_t qlcnic_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n",
- !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
}
static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
@@ -111,10 +121,11 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, max_sds_rings = adapter->max_sds_rings;
u16 beacon;
u8 b_state, b_rate;
- int err;
+ unsigned long h_beacon;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(dev,
@@ -122,6 +133,41 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
return -EOPNOTSUPP;
}
+ if (qlcnic_83xx_check(adapter) &&
+ !test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE,
+ &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+ if (h_beacon) {
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ if (err)
+ goto beacon_err;
+ } else {
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (err)
+ goto beacon_err;
+ }
+ /* set the current beacon state */
+ ahw->beacon_state = h_beacon;
+beacon_err:
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+ }
+
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -154,11 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
-
- if (!err) {
+ if (!err)
err = len;
- adapter->ahw->beacon_state = b_state;
- }
+ else
+ ahw->beacon_state = b_state;
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
@@ -207,21 +252,13 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
- memcpy(buf, &qmdata, size);
- } else {
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
- }
return size;
}
@@ -231,21 +268,13 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- memcpy(&qmdata, buf, size);
- qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
- } else {
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
- }
+ qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
@@ -303,33 +332,44 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
+static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -1;
+}
+
static int validate_pm_config(struct qlcnic_adapter *adapter,
struct qlcnic_pm_func_cfg *pm_cfg, int count)
{
- u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
- int i;
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
for (i = 0; i < count; i++) {
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
- if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
- dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
- if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
return QL_STATUS_INVALID_PARAM;
- s_esw_id = adapter->npars[src_pci_func].phy_port;
- d_esw_id = adapter->npars[dest_pci_func].phy_port;
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
return QL_STATUS_INVALID_PARAM;
}
- return 0;
+ return 0;
}
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
@@ -342,7 +382,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
- int count, rem, i, ret;
+ int count, rem, i, ret, index;
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
@@ -350,26 +390,32 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
return QL_STATUS_INVALID_PARAM;
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
ret = validate_pm_config(adapter, pm_cfg, count);
+
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
action = !!pm_cfg[i].action;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_port_mirroring(adapter, id, action,
- pci_func);
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
if (ret)
return ret;
}
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
- adapter->npars[pci_func].dest_npar = id;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
}
+
return size;
}
@@ -383,16 +429,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
int i;
+ u8 pci_func;
if (size != sizeof(pm_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- pm_cfg[i].action = adapter->npars[i].enable_pm;
- pm_cfg[i].dest_npar = 0;
- pm_cfg[i].pci_func = i;
+ memset(&pm_cfg, 0,
+ sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
}
memcpy(buf, &pm_cfg, size);
@@ -404,24 +453,33 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
{
u32 op_mode;
u8 pci_func;
- int i;
+ int i, ret;
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
- }
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
- if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
- QLCNIC_NON_PRIV_FUNC) {
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].mac_override != 1)
@@ -444,6 +502,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
return QL_STATUS_INVALID_PARAM;
}
}
+
return 0;
}
@@ -458,7 +517,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 pci_func, op_mode = 0;
+ int index;
+ u8 op_mode = 0, pci_func;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -471,10 +531,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
return ret;
for (i = 0; i < count; i++) {
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- }
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -503,7 +562,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- npar = &adapter->npars[pci_func];
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
npar->promisc_mode = esw_cfg[i].promisc_mode;
@@ -533,18 +593,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- u8 i;
+ u8 i, pci_func;
if (size != sizeof(esw_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- esw_cfg[i].pci_func = i;
- if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ memset(&esw_cfg, 0,
+ sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
+
memcpy(buf, &esw_cfg, size);
return size;
@@ -558,10 +621,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
@@ -581,7 +641,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
- int i, count, rem, ret;
+ int i, count, rem, ret, index;
u8 pci_func;
count = size / sizeof(struct qlcnic_npar_func_cfg);
@@ -594,8 +654,10 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
- for (i = 0; i < count ; i++) {
+ for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
@@ -605,12 +667,12 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
ret = qlcnic_set_nic_info(adapter, &nic_info);
if (ret)
return ret;
- adapter->npars[i].min_bw = nic_info.min_tx_bw;
- adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
return size;
-
}
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
@@ -628,8 +690,12 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (size != sizeof(np_cfg))
return QL_STATUS_INVALID_PARAM;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(&np_cfg, 0,
+ sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
@@ -644,6 +710,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
+
memcpy(buf, &np_cfg, size);
return size;
}
@@ -659,6 +726,9 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct qlcnic_esw_statistics port_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -691,6 +761,9 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct qlcnic_esw_statistics esw_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -722,6 +795,9 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
@@ -744,10 +820,14 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
+
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
@@ -789,7 +869,10 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ memset(&pci_cfg, 0,
+ sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].port_num = pci_info[i].default_port;
@@ -797,6 +880,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
+
memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
@@ -897,7 +981,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -911,9 +994,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
-
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_file(dev, &dev_attr_beacon))
@@ -936,7 +1016,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -945,8 +1024,6 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -958,3 +1035,23 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f80cd975daed..b13ab544a7eb 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate small buffer queue control blocks.
*/
- rx_ring->sbq =
- kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue control block allocation failed.\n");
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
goto err_mem;
- }
ql_init_sbq_ring(qdev, rx_ring);
}
@@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate large buffer queue control blocks.
*/
- rx_ring->lbq =
- kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue control block allocation failed.\n");
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
goto err_mem;
- }
ql_init_lbq_ring(qdev, rx_ring);
}
@@ -4572,7 +4566,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
- dev_err(&pdev->dev, "Coredump alloc failed.\n");
err = -ENOMEM;
goto err_out2;
}
@@ -4586,7 +4579,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
@@ -4678,7 +4670,7 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig
deleted file mode 100644
index 01969e0a9c68..000000000000
--- a/drivers/net/ethernet/racal/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Racal-Interlan device configuration
-#
-
-config NET_VENDOR_RACAL
- bool "Racal-Interlan (Micom) NI devices"
- default y
- depends on ISA
- ---help---
- If you have a network (Ethernet) card belonging to this class, such
- as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about NI cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-if NET_VENDOR_RACAL
-
-config NI5010
- tristate "NI5010 support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this is still
- experimental code.
-
- To compile this driver as a module, choose M here. The module
- will be called ni5010.
-
-endif # NET_VENDOR_RACAL
diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile
deleted file mode 100644
index 1e210ca1d78b..000000000000
--- a/drivers/net/ethernet/racal/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Racal-Interlan network device drivers.
-#
-
-obj-$(CONFIG_NI5010) += ni5010.o
diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
deleted file mode 100644
index 807982220050..000000000000
--- a/drivers/net/ethernet/racal/ni5010.c
+++ /dev/null
@@ -1,771 +0,0 @@
-/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
- *
- * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * The authors may be reached as:
- * janpascal@vanbest.org andi@lisas.de
- *
- * Sources:
- * Donald Becker's "skeleton.c"
- * Crynwr ni5010 packet driver
- *
- * Changes:
- * v0.0: First test version
- * v0.1: First working version
- * v0.2:
- * v0.3->v0.90: Now demand setting io and irq when loading as module
- * 970430 v0.91: modified for Linux 2.1.14
- * v0.92: Implemented Andreas' (better) NI5010 probe
- * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
- * 970623 v1.00: First kernel version (AM)
- * 970814 v1.01: Added detection of onboard receive buffer size (AM)
- * 060611 v1.02: slight cleanup: email addresses, driver modernization.
- * Bugs:
- * - not SMP-safe (no locking of I/O accesses)
- * - Note that you have to patch ifconfig for the new /proc/net/dev
- * format. It gives incorrect stats otherwise.
- *
- * To do:
- * Fix all bugs :-)
- * Move some stuff to chipset_init()
- * Handle xmt errors other than collisions
- * Complete merge with Andreas' driver
- * Implement ring buffers (Is this useful? You can't squeeze
- * too many packet in a 2k buffer!)
- * Implement DMA (Again, is this useful? Some docs say DMA is
- * slower than programmed I/O)
- *
- * Compile with:
- * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
- * -DMODULE -c ni5010.c
- *
- * Insert with e.g.:
- * insmod ni5010.ko io=0x300 irq=5
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni5010.h"
-
-static const char boardname[] = "NI5010";
-static char version[] __initdata =
- "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
-
-/* bufsize_rcv == 0 means autoprobing */
-static unsigned int bufsize_rcv;
-
-#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
-#undef JUMPERED_DMA /* No DMA used */
-#undef FULL_IODETECT /* Only detect in portlist */
-
-#ifndef FULL_IODETECT
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int ports[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
-#endif
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef NI5010_DEBUG
-#define NI5010_DEBUG 0
-#endif
-
-/* Information that needs to be kept for each board. */
-struct ni5010_local {
- int o_pkt_size;
- spinlock_t lock;
-};
-
-/* Index to functions, as function prototypes. */
-
-static int ni5010_probe1(struct net_device *dev, int ioaddr);
-static int ni5010_open(struct net_device *dev);
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id);
-static void ni5010_rx(struct net_device *dev);
-static void ni5010_timeout(struct net_device *dev);
-static int ni5010_close(struct net_device *dev);
-static void ni5010_set_multicast_list(struct net_device *dev);
-static void reset_receiver(struct net_device *dev);
-
-static int process_xmt_interrupt(struct net_device *dev);
-#define tx_done(dev) 1
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
-static void chipset_init(struct net_device *dev, int startp);
-static void dump_packet(void *buf, int len);
-static void ni5010_show_registers(struct net_device *dev);
-
-static int io;
-static int irq;
-
-struct net_device * __init ni5010_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
- int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni5010_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
-#ifdef FULL_IODETECT
- for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
- ;
- if (io == 0x400)
- err = -ENODEV;
-
-#else
- for (port = ports; *port && ni5010_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
-#endif /* FULL_IODETECT */
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static inline int rd_port(int ioaddr)
-{
- inb(IE_RBUF);
- return inb(IE_SAPROM);
-}
-
-static void __init trigger_irq(int ioaddr)
-{
- outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
- outb(0x00, IE_RESET); /* Board reset */
- outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
- outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
- outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
- outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
- /*
- * Transmit packet mode: Ignore parity, Power xcvr,
- * Enable loopback
- */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
- outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
- udelay(50); /* FIXME: Necessary? */
- outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
-}
-
-static const struct net_device_ops ni5010_netdev_ops = {
- .ndo_open = ni5010_open,
- .ndo_stop = ni5010_close,
- .ndo_start_xmit = ni5010_send_packet,
- .ndo_set_rx_mode = ni5010_set_multicast_list,
- .ndo_tx_timeout = ni5010_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-
-static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- struct ni5010_local *lp;
- int i;
- unsigned int data = 0;
- int boguscount = 40;
- int err = -ENODEV;
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
- return -EBUSY;
-
- /*
- * This is no "official" probe method, I've rather tested which
- * probe works best with my seven NI5010 cards
- * (they have very different serial numbers)
- * Suggestions or failure reports are very, very welcome !
- * But I think it is a relatively good probe method
- * since it doesn't use any "outb"
- * It should be nearly 100% reliable !
- * well-known WARNING: this probe method (like many others)
- * will hang the system if a NE2000 card region is probed !
- *
- * - Andreas
- */
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
- dev->name, ioaddr));
-
- if (inb(ioaddr+0) == 0xff)
- goto out;
-
- while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
- rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
- {
- if (boguscount-- == 0)
- goto out;
- }
-
- PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
-
- for (i=0; i<32; i++)
- if ( (data = rd_port(ioaddr)) != 0xff) break;
- if (data==0xff)
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
-
- if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
- (rd_port(ioaddr) != SA_ADDR2))
- goto out;
-
- for (i=0; i<4; i++)
- rd_port(ioaddr);
-
- if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
- (rd_port(ioaddr) != NI5010_MAGICVAL2) )
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
-
- if (NI5010_DEBUG && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
-
- dev->base_addr = ioaddr;
-
- for (i=0; i<6; i++) {
- outw(i, IE_GP);
- dev->dev_addr[i] = inb(IE_SAPROM);
- }
- printk("%pM ", dev->dev_addr);
-
- PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
-
-#ifdef JUMPERED_INTERRUPTS
- if (dev->irq == 0xff)
- ;
- else if (dev->irq < 2) {
- unsigned long irq_mask;
-
- PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
-
- irq_mask = probe_irq_on();
- trigger_irq(ioaddr);
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
-
- PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
-
- if (dev->irq == 0) {
- err = -EAGAIN;
- printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
- goto out;
- }
- PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
-#endif /* JUMPERED_INTERRUPTS */
- PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
-
- /* DMA is not supported (yet?), so no use detecting it */
- lp = netdev_priv(dev);
-
- spin_lock_init(&lp->lock);
-
- PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
-
-/* get the size of the onboard receive buffer
- * higher addresses than bufsize are wrapped into real buffer
- * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
- */
- if (!bufsize_rcv) {
- outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
- for (i = 1; i < 0xff; i++) {
- outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
- outb(i, IE_RBUF);
- outw(0x0, IE_GP); /* Point GP at start of packet */
- data = inb(IE_RBUF);
- if (data == i) break;
- }
- bufsize_rcv = i << 8;
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
- }
- printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
-
- dev->netdev_ops = &ni5010_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
-
- /* Shut up the ni5010 */
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
- outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
- outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
-
- printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
- if (dev->dma)
- printk(" & DMA %d", dev->dma);
- printk(".\n");
- return 0;
-out:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
- return err;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int ni5010_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int i;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
-
- if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
- printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
- /*
- * Always allocate the DMA channel after the IRQ,
- * and clean up on failure.
- */
-#ifdef JUMPERED_DMA
- if (request_dma(dev->dma, cardname)) {
- printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
- free_irq(dev->irq, NULL);
- return -EAGAIN;
- }
-#endif /* JUMPERED_DMA */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
- /* Reset the hardware here. Don't forget to set the station address. */
-
- outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
- outb(0, IE_RESET); /* Hardware reset of ni5010 board */
- outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
- /* Set the station address */
- for(i = 0;i < 6; i++) {
- outb(dev->dev_addr[i], EDLC_ADDR + i);
- }
-
- PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
- outb(0, EDLC_XMASK); /* No xmit interrupts for now */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- /* Normal packet xmit mode */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
- outb(RMD_BROADCAST, EDLC_RMODE);
- /* Receive broadcast and normal packets */
- reset_receiver(dev); /* Ready ni5010 for receiving packets */
-
- outb(0, EDLC_RESET); /* Un-reset the ni5010 */
-
- netif_start_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
- return 0;
-}
-
-static void reset_receiver(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
- outw(0, IE_GP); /* Receive packet at start of buffer */
- outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
- outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
- outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
- outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
-}
-
-static void ni5010_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- /* FIXME: Give it a real kick here */
- chipset_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
-
- /*
- * Block sending
- */
-
- netif_stop_queue(dev);
- hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ni5010_local *lp;
- int ioaddr, status;
- int xmit_was_error = 0;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
- status = inb(IE_ISTAT);
- PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
-
- if ((status & IS_R_INT) == 0) ni5010_rx(dev);
-
- if ((status & IS_X_INT) == 0) {
- xmit_was_error = process_xmt_interrupt(dev);
- }
-
- if ((status & IS_DMA_INT) == 0) {
- PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
- outb(0, IE_DMA_RST); /* Reset DMA int */
- }
-
- if (!xmit_was_error)
- reset_receiver(dev);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-
-static void dump_packet(void *buf, int len)
-{
- int i;
-
- printk(KERN_DEBUG "Packet length = %#4x\n", len);
- for (i = 0; i < len; i++){
- if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
- if (i % 2 == 0) printk(" ");
- printk("%2.2x", ((unsigned char *)buf)[i]);
- if (i % 16 == 15) printk("\n");
- }
- printk("\n");
-}
-
-/* We have a good packet, get it out of the buffer. */
-static void ni5010_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- unsigned char rcv_stat;
- struct sk_buff *skb;
- int i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
-
- rcv_stat = inb(EDLC_RSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
-
- if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
- PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
- dev->stats.rx_errors++;
- if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++;
- if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++;
- if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++;
- if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++;
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
- return;
- }
-
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
-
- i_pkt_size = inw(IE_RCNT);
- if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
- PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
- dev->name, i_pkt_size));
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- return;
- }
-
- /* Malloc up new buffer. */
- skb = netdev_alloc_skb(dev, i_pkt_size + 3);
- if (skb == NULL) {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- }
-
- skb_reserve(skb, 2);
-
- /* Read packet into buffer */
- outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
- outw(0, IE_GP); /* Seek to beginning of packet */
- insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
-
- if (NI5010_DEBUG >= 4)
- dump_packet(skb->data, skb->len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
- dev->name, i_pkt_size));
-}
-
-static int process_xmt_interrupt(struct net_device *dev)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int xmit_stat;
-
- PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
-
- xmit_stat = inb(EDLC_XSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
-
- outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
-
- if (xmit_stat & XS_COLL){
- PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
- dev->name));
- outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
- /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE);
- outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
- dev->stats.collisions++;
- return 1;
- }
-
- /* FIXME: handle other xmt error conditions */
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->o_pkt_size;
- netif_wake_queue(dev);
-
- PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
- dev->name, lp->o_pkt_size));
-
- return 0;
-}
-
-/* The inverse routine to ni5010_open(). */
-static int ni5010_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
-#ifdef JUMPERED_INTERRUPTS
- free_irq(dev->irq, NULL);
-#endif
- /* Put card in held-RESET state */
- outb(0, IE_MMODE);
- outb(RS_RESET, EDLC_RESET);
-
- netif_stop_queue(dev);
-
- PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
-*/
-static void ni5010_set_multicast_list(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
-
- if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
- !netdev_mc_empty(dev)) {
- outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
- PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
- } else {
- PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
- outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
- }
-}
-
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- unsigned int buf_offs;
-
- PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
-
- if (length > ETH_FRAME_LEN) {
- PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- if (inb(IE_ISTAT) & IS_EN_XMT) {
- PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG > 3) dump_packet(buf, length);
-
- buf_offs = NI5010_BUFSIZE - length - pad;
-
- spin_lock_irqsave(&lp->lock, flags);
- lp->o_pkt_size = length + pad;
-
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
- outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
-
- outw(buf_offs, IE_GP); /* Point GP at start of packet */
- outsb(IE_XBUF, buf, length); /* Put data in buffer */
- while(pad--)
- outb(0, IE_XBUF);
-
- outw(buf_offs, IE_GP); /* Rewrite where packet starts */
-
- /* should work without that outb() (Crynwr used it) */
- /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
- outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- netif_wake_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-}
-
-static void chipset_init(struct net_device *dev, int startp)
-{
- /* FIXME: Move some stuff here */
- PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
-}
-
-static void ni5010_show_registers(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
- PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
- PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
- PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
- PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
- PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
- PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni5010;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "ni5010 I/O base address");
-MODULE_PARM_DESC(irq, "ni5010 IRQ number");
-
-static int __init ni5010_init_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
- /*
- if(io <= 0 || irq == 0){
- printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
- printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
- return -EINVAL;
- }
- */
- if (io <= 0){
- printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
- }
-
- PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
- dev_ni5010 = ni5010_probe(-1);
- if (IS_ERR(dev_ni5010))
- return PTR_ERR(dev_ni5010);
- return 0;
-}
-
-static void __exit ni5010_cleanup_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
- unregister_netdev(dev_ni5010);
- release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
- free_netdev(dev_ni5010);
-}
-module_init(ni5010_init_module);
-module_exit(ni5010_cleanup_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/racal/ni5010.h b/drivers/net/ethernet/racal/ni5010.h
deleted file mode 100644
index e10e717fcd76..000000000000
--- a/drivers/net/ethernet/racal/ni5010.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Racal-Interlan ni5010 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- */
-
-#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */
-
-#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */
-#define NI5010_MAGICVAL1 0x55
-#define NI5010_MAGICVAL2 0xAA
-
-#define SA_ADDR0 0x02
-#define SA_ADDR1 0x07
-#define SA_ADDR2 0x01
-
-/* The number of low I/O ports used by the ni5010 ethercard. */
-#define NI5010_IO_EXTENT 32
-
-#define PRINTK(x) if (NI5010_DEBUG) printk x
-#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x
-#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x
-
-/* The various IE command registers */
-#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */
-#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */
-#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */
-#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */
-#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */
-#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */
-#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */
-#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */
-#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */
-#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */
-#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */
- /* 0x0E doesn't exist for r/w */
-#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */
-#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */
- /* 0x11 is 2nd byte of GP Pointer */
-#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */
- /* 0x11 is 2nd byte of "Byte Count" */
-#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */
-#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */
-#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */
-#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */
-#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */
-#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */
-#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */
-
-/* bits in EDLC_XSTAT, interrupt clear on write, status when read */
-#define XS_TPOK 0x80 /* transmit packet successful */
-#define XS_CS 0x40 /* carrier sense */
-#define XS_RCVD 0x20 /* transmitted packet received */
-#define XS_SHORT 0x10 /* transmission media is shorted */
-#define XS_UFLW 0x08 /* underflow. iff failed board */
-#define XS_COLL 0x04 /* collision occurred */
-#define XS_16COLL 0x02 /* 16th collision occurred */
-#define XS_PERR 0x01 /* parity error */
-
-#define XS_CLR_UFLW 0x08 /* clear underflow */
-#define XS_CLR_COLL 0x04 /* clear collision */
-#define XS_CLR_16COLL 0x02 /* clear 16th collision */
-#define XS_CLR_PERR 0x01 /* clear parity error */
-
-/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */
-#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */
-#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */
-#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */
-#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */
-#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */
-#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */
- /* note: always clear this bit */
-#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16)
-
-/* bits in EDLC_RSTAT, interrupt clear on write, status when read */
-#define RS_PKT_OK 0x80 /* received good packet */
-#define RS_RST_PKT 0x10 /* RESET packet received */
-#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */
-#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */
-#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */
-#define RS_OFLW 0x01 /* overflow for rcv FIFO */
-#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW )
- /* all valid RSTAT bits */
-
-#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */
-#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */
-#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */
-#define RS_CLR_ALIGN 0x04 /* clear Alignment error */
-#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */
-#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */
-
-/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */
-#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */
-#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */
-#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */
-#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */
-#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */
-#define RM_OFLW 0x01 /* =1 to enable overflow error ints */
-
-/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */
-#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */
-#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */
-#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */
-#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */
-
-#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */
-#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */
-#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */
-#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */
-
-/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */
-#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */
-#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */
-#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */
-#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */
-#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */
-
-/* bits in EDLC_RESET, write only */
-#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */
-
-/* bits in IE_MMODE, write only */
-#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */
-#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */
-#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */
-#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */
-#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */
-#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */
- /* =0 means Xmt Buff on system bus */
-
-/* bits in IE_ISTAT, read only */
-#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */
-#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */
-#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */
-#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */
-#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */
-#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */
-#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */
-
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 63c13125db6c..5b4103db70f5 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -755,9 +755,6 @@ static void r6040_mac_address(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
}
static int r6040_open(struct net_device *dev)
@@ -957,9 +954,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1045,7 +1042,7 @@ static int r6040_mii_probe(struct net_device *dev)
}
phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&lp->pdev->dev, "could not attach to PHY\n");
@@ -1195,9 +1192,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
- lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (!lp->mii_bus->irq) {
- dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
err = -ENOMEM;
goto err_out_mdio;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index cb6fc5a743ca..b62a32484f6a 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -577,28 +577,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct cp_private *cp;
+ int handled = 0;
u16 status;
if (unlikely(dev == NULL))
return IRQ_NONE;
cp = netdev_priv(dev);
+ spin_lock(&cp->lock);
+
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
- return IRQ_NONE;
+ goto out_unlock;
+
+ handled = 1;
netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
- spin_lock(&cp->lock);
-
/* close possible race's with dev_close */
if (unlikely(!netif_running(dev))) {
cpw16(IntrMask, 0);
- spin_unlock(&cp->lock);
- return IRQ_HANDLED;
+ goto out_unlock;
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
@@ -612,7 +614,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
if (status & LinkChg)
mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
- spin_unlock(&cp->lock);
if (status & PciErr) {
u16 pci_status;
@@ -625,7 +626,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
/* TODO: reset hardware */
}
- return IRQ_HANDLED;
+out_unlock:
+ spin_unlock(&cp->lock);
+
+ return IRQ_RETVAL(handled);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1945,7 +1949,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 5dc161630127..1276ac71353a 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -991,7 +991,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 5821966f9f28..783fa8b5cde7 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -34,8 +34,8 @@ config ATP
will be called atp.
config 8139CP
- tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed96f309bca8..8900398ba103 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -83,7 +83,7 @@ static const int multicast_filter_limit = 32;
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -450,7 +450,6 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
-#define FORCE_CLK (1 << 15) /* force clock request */
};
enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
- ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
- ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
RTL_FEATURE_WOL = (1 << 0),
RTL_FEATURE_MSI = (1 << 1),
RTL_FEATURE_GMII = (1 << 2),
- RTL_FEATURE_FW_LOADED = (1 << 3),
};
struct rtl8169_counters {
@@ -727,7 +723,6 @@ struct rtl8169_private {
u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
- u32 dirty_rx;
u32 dirty_tx;
struct rtl8169_stats rx_stats;
struct rtl8169_stats tx_stats;
@@ -1826,8 +1821,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
if (opts2 & RxVlanTag)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
-
- desc->opts2 = 0;
}
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2391,10 +2384,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw)) {
+ if (!IS_ERR_OR_NULL(rtl_fw))
rtl_phy_write_fw(tp, rtl_fw);
- tp->features |= RTL_FEATURE_FW_LOADED;
- }
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2405,31 +2396,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
-static void r810x_aldps_disable(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
-}
-
-static void r810x_aldps_enable(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x8310);
-}
-
-static void r8168_aldps_enable_1(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
-}
-
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3220,8 +3186,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
- r8168_aldps_enable_1(tp);
-
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
}
@@ -3296,8 +3260,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3305,8 +3267,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3404,8 +3364,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3491,19 +3449,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-
- r810x_aldps_enable(tp);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(20);
rtl_apply_firmware(tp);
@@ -3513,8 +3473,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r810x_aldps_enable(tp);
}
static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3527,7 +3485,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
@@ -3535,8 +3495,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- r810x_aldps_enable(tp);
}
static void rtl_hw_phy_config(struct net_device *dev)
@@ -4177,7 +4135,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
- tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+ tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5053,6 +5011,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
@@ -5061,8 +5021,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5087,12 +5046,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5149,10 +5109,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5368,9 +5326,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -5396,9 +5351,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5420,10 +5372,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- RTL_W32(MISC,
- (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
@@ -5920,7 +5869,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
void __iomem *ioaddr = tp->mmio_addr;
netif_info(tp, intr, dev, "disabling PCI DAC\n");
@@ -6035,10 +5984,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
unsigned int count;
cur_rx = tp->cur_rx;
- rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
- rx_left = min(rx_left, budget);
- for (; rx_left > 0; rx_left--, cur_rx++) {
+ for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -6064,8 +6011,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
!(status & (RxRWT | RxFOVF)) &&
(dev->features & NETIF_F_RXALL))
goto process_pkt;
-
- rtl8169_mark_to_asic(desc, rx_buf_sz);
} else {
struct sk_buff *skb;
dma_addr_t addr;
@@ -6086,16 +6031,14 @@ process_pkt:
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- continue;
+ goto release_descriptor;
}
skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
tp, pkt_size, addr);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
if (!skb) {
dev->stats.rx_dropped++;
- continue;
+ goto release_descriptor;
}
rtl8169_rx_csum(skb, status);
@@ -6111,20 +6054,15 @@ process_pkt:
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
-
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
+release_descriptor:
+ desc->opts2 = 0;
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
}
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- tp->dirty_rx += count;
-
return count;
}
@@ -6948,7 +6886,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 3d705862bd7d..33e96176e4d8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -891,18 +891,16 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
- GFP_KERNEL);
+ mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
+ sizeof(*mdp->rx_skbuff), GFP_KERNEL);
if (!mdp->rx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
- GFP_KERNEL);
+ mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
+ sizeof(*mdp->tx_skbuff), GFP_KERNEL);
if (!mdp->tx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
@@ -1422,7 +1420,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
/* Try connect to PHY */
phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- 0, mdp->phy_interface);
+ mdp->phy_interface);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 72fc57dd084d..21683e2b1ff4 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -795,7 +795,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
struct phy_device *p = NULL;
while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
PHY_INTERFACE_MODE_RGMII);
if (IS_ERR(p)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
index 29f18533fdc7..11f168e46ebe 100644
--- a/drivers/net/ethernet/seeq/Kconfig
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -6,7 +6,6 @@ config NET_VENDOR_SEEQ
bool "SEEQ devices"
default y
depends on HAS_IOMEM
- depends on (ARM && ARCH_ACORN) || SGI_HAS_SEEQ || EXPERIMENTAL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -26,17 +25,6 @@ config ARM_ETHER3
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config SEEQ8005
- tristate "SEEQ8005 support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- ---help---
- This is a driver for the SEEQ 8005 network (Ethernet) card. If this
- is for you, read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called seeq8005.
-
config SGISEEQ
tristate "SGI Seeq ethernet controller support"
depends on SGI_HAS_SEEQ
diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile
index 3e258a580c05..0488e99b831f 100644
--- a/drivers/net/ethernet/seeq/Makefile
+++ b/drivers/net/ethernet/seeq/Makefile
@@ -3,5 +3,4 @@
#
obj-$(CONFIG_ARM_ETHER3) += ether3.o
-obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_SGISEEQ) += sgiseeq.o
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
deleted file mode 100644
index d6e50de71186..000000000000
--- a/drivers/net/ethernet/seeq/seeq8005.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/* seeq8005.c: A network driver for linux. */
-/*
- Based on skeleton.c,
- Written 1993-94 by Donald Becker.
- See the skeleton.c file for further copyright information.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as hamish@zot.apana.org.au
-
- This file is a network device driver for the SEEQ 8005 chipset and
- the Linux operating system.
-
-*/
-
-static const char version[] =
- "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
-
-/*
- Sources:
- SEEQ 8005 databook
-
- Version history:
- 1.00 Public release. cosmetic changes (no warnings now)
- 0.68 Turning per- packet,interrupt debug messages off - testing for release.
- 0.67 timing problems/bad buffer reads seem to be fixed now
- 0.63 *!@$ protocol=eth_type_trans -- now packets flow
- 0.56 Send working
- 0.48 Receive working
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "seeq8005.h"
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int seeq8005_portlist[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
- long open_time; /* Useless example local info. */
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00
-#define SA_ADDR1 0x80
-#define SA_ADDR2 0x4b
-
-/* Index to functions, as function prototypes. */
-
-static int seeq8005_probe1(struct net_device *dev, int ioaddr);
-static int seeq8005_open(struct net_device *dev);
-static void seeq8005_timeout(struct net_device *dev);
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id);
-static void seeq8005_rx(struct net_device *dev);
-static int seeq8005_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
-static void hardware_send_packet(struct net_device *dev, char *buf, int length);
-extern void seeq8005_init(struct net_device *dev, int startp);
-static inline void wait_for_buffer(struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- */
-
-static int io = 0x320;
-static int irq = 10;
-
-struct net_device * __init seeq8005_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = seeq8005_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = seeq8005_portlist; *port; port++) {
- if (seeq8005_probe1(dev, *port) == 0)
- break;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, SEEQ8005_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops seeq8005_netdev_ops = {
- .ndo_open = seeq8005_open,
- .ndo_stop = seeq8005_close,
- .ndo_start_xmit = seeq8005_send_packet,
- .ndo_tx_timeout = seeq8005_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probes avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- int i,j;
- unsigned char SA_prom[32];
- int old_cfg1;
- int old_cfg2;
- int old_stat;
- int old_dmaar;
- int old_rear;
- int retval;
-
- if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
- return -ENODEV;
-
- if (net_debug>1)
- printk("seeq8005: probing at 0x%x\n",ioaddr);
-
- old_stat = inw(SEEQ_STATUS); /* read status register */
- if (old_stat == 0xffff) {
- retval = -ENODEV;
- goto out; /* assume that 0xffff == no device */
- }
- if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
- if (net_debug>1) {
- printk("seeq8005: reserved stat bits != 0x1800\n");
- printk(" == 0x%04x\n",old_stat);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_rear = inw(SEEQ_REA);
- if (old_rear == 0xffff) {
- outw(0,SEEQ_REA);
- if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
- retval = -ENODEV;
- goto out;
- }
- } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
- if (net_debug>1) {
- printk("seeq8005: unused rear bits != 0xff00\n");
- printk(" == 0x%04x\n",old_rear);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
- old_cfg1 = inw(SEEQ_CFG1);
- old_dmaar = inw(SEEQ_DMAAR);
-
- if (net_debug>4) {
- printk("seeq8005: stat = 0x%04x\n",old_stat);
- printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
- printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
- printk("seeq8005: raer = 0x%04x\n",old_rear);
- printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
- }
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
- outw( 0, SEEQ_DMAAR); /* set starting PROM address */
- outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
-
-
- j=0;
- for(i=0; i <32; i++) {
- j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
- }
-
-#if 0
- /* untested because I only have the one card */
- if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
- if (net_debug>1) { /* check this before deciding that we have a card */
- printk("seeq8005: prom sum error\n");
- }
- outw( old_stat, SEEQ_STATUS);
- outw( old_dmaar, SEEQ_DMAAR);
- outw( old_cfg1, SEEQ_CFG1);
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
- udelay(5);
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- if (net_debug) {
- printk("seeq8005: prom sum = 0x%08x\n",j);
- for(j=0; j<32; j+=16) {
- printk("seeq8005: prom %02x: ",j);
- for(i=0;i<16;i++) {
- printk("%02x ",SA_prom[j|i]);
- }
- printk(" ");
- for(i=0;i<16;i++) {
- if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
- printk("%c", SA_prom[j|i]);
- } else {
- printk(" ");
- }
- }
- printk("\n");
- }
- }
-
-#if 0
- /*
- * testing the packet buffer memory doesn't work yet
- * but all other buffer accesses do
- * - fixing is not a priority
- */
- if (net_debug>1) { /* test packet buffer memory */
- printk("seeq8005: testing packet buffer ... ");
- outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0 , SEEQ_DMAAR);
- for(i=0;i<32768;i++) {
- outw(0x5a5a, SEEQ_BUFFER);
- }
- j=jiffies+HZ;
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) )
- mb();
- outw( 0 , SEEQ_DMAAR);
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ))
- mb();
- if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- j=0;
- for(i=0;i<32768;i++) {
- if (inw(SEEQ_BUFFER) != 0x5a5a)
- j++;
- }
- if (j) {
- printk("%i\n",j);
- } else {
- printk("ok.\n");
- }
- }
-#endif
-
- if (net_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SA_prom[i+6];
- printk("%pM", dev->dev_addr);
-
- if (dev->irq == 0xff)
- ; /* Do nothing: a user-level program will set it. */
- else if (dev->irq < 2) { /* "Auto-IRQ" */
- unsigned long cookie = probe_irq_on();
-
- outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
-
- dev->irq = probe_irq_off(cookie);
-
- if (net_debug >= 2)
- printk(" autoirq is %d\n", dev->irq);
- } else if (dev->irq == 2)
- /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
- * or don't know which one to set.
- */
- dev->irq = 9;
-
-#if 0
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
- }
-#endif
- dev->netdev_ops = &seeq8005_netdev_ops;
- dev->watchdog_timeo = HZ/20;
- dev->flags &= ~IFF_MULTICAST;
-
- return 0;
-out:
- release_region(ioaddr, SEEQ8005_IO_EXTENT);
- return retval;
-}
-
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-static int seeq8005_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- return -EAGAIN;
- }
- }
-
- /* Reset the hardware here. Don't forget to set the station address. */
- seeq8005_init(dev, 1);
-
- lp->open_time = jiffies;
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void seeq8005_timeout(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- seeq8005_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- short length = skb->len;
- unsigned char *buf;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- /* Block a timer-based transmit from overlapping */
- netif_stop_queue(dev);
-
- hardware_send_packet(dev, buf, length);
- dev->stats.tx_bytes += length;
- dev_kfree_skb (skb);
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- status = inw(SEEQ_STATUS);
- do {
- if (net_debug >2) {
- printk("%s: int, status=0x%04x\n",dev->name,status);
- }
-
- if (status & SEEQSTAT_WINDOW_INT) {
- handled = 1;
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- if (net_debug) {
- printk("%s: window int!\n",dev->name);
- }
- }
- if (status & SEEQSTAT_TX_INT) {
- handled = 1;
- outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- dev->stats.tx_packets++;
- netif_wake_queue(dev); /* Inform upper layers. */
- }
- if (status & SEEQSTAT_RX_INT) {
- handled = 1;
- /* Got a packet(s). */
- seeq8005_rx(dev);
- }
- status = inw(SEEQ_STATUS);
- } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
-
- if(net_debug>2) {
- printk("%s: eoi\n",dev->name);
- }
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void seeq8005_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int boguscount = 10;
- int pkt_hdr;
- int ioaddr = dev->base_addr;
-
- do {
- int next_packet;
- int pkt_len;
- int i;
- int status;
-
- status = inw(SEEQ_STATUS);
- outw( lp->receive_ptr, SEEQ_DMAAR);
- outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- wait_for_buffer(dev);
- next_packet = ntohs(inw(SEEQ_BUFFER));
- pkt_hdr = inw(SEEQ_BUFFER);
-
- if (net_debug>2) {
- printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
- }
-
- if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
- return; /* Done for now */
- }
-
- if ((pkt_hdr & SEEQPKTS_DONE)==0)
- break;
-
- if (next_packet < lp->receive_ptr) {
- pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
- } else {
- pkt_len = next_packet - lp->receive_ptr - 4;
- }
-
- if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
- printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
- seeq8005_init(dev,1);
- return;
- }
-
- lp->receive_ptr = next_packet;
-
- if (net_debug>2) {
- printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
- }
-
- if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
- dev->stats.rx_errors++;
- if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
- if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
- /* skip over this packet */
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
- unsigned char *buf;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2); /* align data on 16 byte */
- buf = skb_put(skb,pkt_len);
-
- insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
-
- if (net_debug>2) {
- char * p = buf;
- printk("%s: recv ",dev->name);
- for(i=0;i<14;i++) {
- printk("%02x ",*(p++)&0xff);
- }
- printk("\n");
- }
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
-
- /* If any worth-while packets have been received, netif_rx()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to net_open(). */
-static int seeq8005_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- lp->open_time = 0;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx here. */
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. */
-
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
- */
-static void set_multicast_list(struct net_device *dev)
-{
-/*
- * I _could_ do up to 6 addresses here, but won't (yet?)
- */
-
-#if 0
- int ioaddr = dev->base_addr;
-/*
- * hmm, not even sure if my matching works _anyway_ - seem to be receiving
- * _everything_ . . .
- */
-
- if (num_addrs) { /* Enable promiscuous mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
- dev->flags|=IFF_PROMISC;
- } else { /* Disable promiscuous mode, use normal mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
- }
-#endif
-}
-
-void seeq8005_init(struct net_device *dev, int startp)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int i;
-
- outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
- udelay(5);
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
-/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) { /* set Station address */
- outb(dev->dev_addr[i], SEEQ_BUFFER);
- udelay(2);
- }
-
- outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
- outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
-
- lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
- outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
-
- outw( 0x00ff, SEEQ_REA); /* Receive Area End */
-
- if (net_debug>4) {
- printk("%s: SA0 = ",dev->name);
-
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR);
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) {
- printk("%02x ",inb(SEEQ_BUFFER));
- }
- printk("\n");
- }
-
- outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
- outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
-
- if (net_debug>4) {
- int old_cfg1;
- old_cfg1 = inw(SEEQ_CFG1);
- printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
- printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
- printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
- printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
- printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
-
- }
-}
-
-
-static void hardware_send_packet(struct net_device * dev, char *buf, int length)
-{
- int ioaddr = dev->base_addr;
- int status = inw(SEEQ_STATUS);
- int transmit_ptr = 0;
- unsigned long tmp;
-
- if (net_debug>4) {
- printk("%s: send 0x%04x\n",dev->name,length);
- }
-
- /* Set FIFO to writemode and set packet-buffer address */
- outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( transmit_ptr, SEEQ_DMAAR);
-
- /* output SEEQ Packet header barfage */
- outw( htons(length + 4), SEEQ_BUFFER);
- outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
-
- /* blat the buffer */
- outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
- /* paranoia !! */
- outw( 0, SEEQ_BUFFER);
- outw( 0, SEEQ_BUFFER);
-
- /* set address of start of transmit chain */
- outw( transmit_ptr, SEEQ_TPR);
-
- /* drain FIFO */
- tmp = jiffies;
- while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ))
- mb();
-
- /* doit ! */
- outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-
-}
-
-
-#ifdef MODULE
-
-static struct net_device *dev_seeq;
-MODULE_LICENSE("GPL");
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
-MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
-
-int __init init_module(void)
-{
- dev_seeq = seeq8005_probe(-1);
- return PTR_RET(dev_seeq);
-}
-
-void __exit cleanup_module(void)
-{
- unregister_netdev(dev_seeq);
- release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
- free_netdev(dev_seeq);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/seeq/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h
deleted file mode 100644
index 5dfb0098c6ca..000000000000
--- a/drivers/net/ethernet/seeq/seeq8005.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * defines, etc for the seeq8005
- */
-
-/*
- * This file is distributed under GPL.
- *
- * This style and layout of this file is also copied
- * from many of the other linux network device drivers.
- */
-
-/* The number of low I/O ports used by the ethercard. */
-#define SEEQ8005_IO_EXTENT 16
-
-#define SEEQ_B (ioaddr)
-
-#define SEEQ_CMD (SEEQ_B) /* Write only */
-#define SEEQ_STATUS (SEEQ_B) /* Read only */
-#define SEEQ_CFG1 (SEEQ_B + 2)
-#define SEEQ_CFG2 (SEEQ_B + 4)
-#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
-#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
-#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
-#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
-#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
-
-#define DEFAULT_TEA (0x3f)
-
-#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
-#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
-#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
-#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
-#define SEEQCMD_INT_MASK (0x000f)
-
-#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
-#define SEEQCMD_RX_INT_ACK (0x0020)
-#define SEEQCMD_TX_INT_ACK (0x0040)
-#define SEEQCMD_WINDOW_INT_ACK (0x0080)
-#define SEEQCMD_ACK_ALL (0x00f0)
-
-#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
-#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
-#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
-#define SEEQCMD_SET_DMA_OFF (0x0800)
-#define SEEQCMD_SET_RX_OFF (0x1000)
-#define SEEQCMD_SET_TX_OFF (0x2000)
-#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
-
-#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
-#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
-
-#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
-#define SEEQSTAT_RX_INT_EN (0x0002)
-#define SEEQSTAT_TX_INT_EN (0x0004)
-#define SEEQSTAT_WINDOW_INT_EN (0x0008)
-
-#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
-#define SEEQSTAT_RX_INT (0x0020)
-#define SEEQSTAT_TX_INT (0x0040)
-#define SEEQSTAT_WINDOW_INT (0x0080)
-#define SEEQSTAT_ANY_INT (0x00f0)
-
-#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
-#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
-#define SEEQSTAT_TX_ON (0x0400) /* TX running */
-
-#define SEEQSTAT_FIFO_FULL (0x2000)
-#define SEEQSTAT_FIFO_EMPTY (0x4000)
-#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
-
-#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
-#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
-#define SEEQCFG1_BUFFER_MAC1 (0x0001)
-#define SEEQCFG1_BUFFER_MAC2 (0x0002)
-#define SEEQCFG1_BUFFER_MAC3 (0x0003)
-#define SEEQCFG1_BUFFER_MAC4 (0x0004)
-#define SEEQCFG1_BUFFER_MAC5 (0x0005)
-#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
-#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
-#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
-#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
-
-#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
-#define SEEQCFG1_DMA_CONT (0x0000)
-#define SEEQCFG1_DMA_800ns (0x0010)
-#define SEEQCFG1_DMA_1600ns (0x0020)
-#define SEEQCFG1_DMA_3200ns (0x0030)
-
-#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
-#define SEEQCFG1_DMA_LEN1 (0x0000)
-#define SEEQCFG1_DMA_LEN2 (0x0040)
-#define SEEQCFG1_DMA_LEN4 (0x0080)
-#define SEEQCFG1_DMA_LEN8 (0x00c0)
-
-#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
-#define SEEQCFG1_MAC0_EN (0x0100)
-#define SEEQCFG1_MAC1_EN (0x0200)
-#define SEEQCFG1_MAC2_EN (0x0400)
-#define SEEQCFG1_MAC3_EN (0x0800)
-#define SEEQCFG1_MAC4_EN (0x1000)
-#define SEEQCFG1_MAC5_EN (0x2000)
-
-#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
-#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
-#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
-#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
-#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
-
-#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
-
-#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
-#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
-
-#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
-#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
-#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
-
-#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
-#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
-#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
-#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
-#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
-#define SEEQCFG2_LOOPBACK (0x0800)
-#define SEEQCFG2_CTRLO (0x1000)
-#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
-
-struct seeq_pkt_hdr {
- unsigned short next; /* address of next packet header */
- unsigned char babble_int:1, /* enable int on >1514 byte packet */
- coll_int:1, /* enable int on collision */
- coll_16_int:1, /* enable int on >15 collision */
- xmit_int:1, /* enable int on success (or xmit with <15 collision) */
- unused:1,
- data_follows:1, /* if not set, process this as a header and pointer only */
- chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
- xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
- unsigned char status;
-};
-
-#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
-#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
-#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
-#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
-#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
-#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
-#define SEEQPKTH_XMIT (0x80)
-
-#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
-#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
-#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
-#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
-#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
-#define SEEQPKTS_DRIB (0x0400) /* recv only */
-#define SEEQPKTS_SHORT (0x0800) /* recv only */
-#define SEEQPKTS_DONE (0x8000)
-#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 0767043f44a4..3f93624fc273 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
delta = timespec_sub(*e_ts, time_now);
- efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+ rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index dc171b4961e4..7ed08c32a9c5 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1565,9 +1565,9 @@ static void ioc3_get_drvinfo (struct net_device *dev,
{
struct ioc3_private *ip = netdev_priv(dev);
- strcpy (info->driver, IOC3_NAME);
- strcpy (info->version, IOC3_VERSION);
- strcpy (info->bus_info, pci_name(ip->pdev));
+ strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
index ae1ce170864d..3409b3f97a1b 100644
--- a/drivers/net/ethernet/silan/Kconfig
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_SILAN
bool "Silan devices"
default y
- depends on PCI && EXPERIMENTAL
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -19,8 +19,8 @@ config NET_VENDOR_SILAN
if NET_VENDOR_SILAN
config SC92031
- tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Silan SC92031 PCI Fast Ethernet Adapter driver"
+ depends on PCI
select CRC32
---help---
This is a driver for the Fast Ethernet PCI network cards based on
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index b2315324cc6d..28f7268f1b88 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1458,12 +1458,12 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = dev->perm_addr[3] = mac0;
- dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = dev->perm_addr[5] = mac1;
+ dev->dev_addr[0] = mac0 >> 24;
+ dev->dev_addr[1] = mac0 >> 16;
+ dev->dev_addr[2] = mac0 >> 8;
+ dev->dev_addr[3] = mac0;
+ dev->dev_addr[4] = mac1 >> 8;
+ dev->dev_addr[5] = mac1;
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5bffd9749a58..efca14eaefa9 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -247,8 +247,7 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr.
*/
static int sis900_get_mac_addr(struct pci_dev *pci_dev,
@@ -271,9 +270,6 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
return 1;
}
@@ -284,8 +280,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
@@ -311,9 +306,6 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -328,7 +320,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr and @net_dev->perm_addr.
+ * @net_dev->dev_addr.
*/
static int sis635_get_mac_addr(struct pci_dev *pci_dev,
@@ -353,9 +345,6 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,7 +364,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
@@ -395,9 +384,6 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
rc = 1;
break;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 59a6f88da867..9dd842dbb859 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1522,9 +1522,10 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc911x_ethtool_nwayreset(struct net_device *dev)
@@ -2035,7 +2036,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct resource *res;
struct smc911x_local *lp;
- unsigned int *addr;
+ void __iomem *addr;
int ret;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 022b45bc14ff..591650a8de38 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1597,9 +1597,10 @@ smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc_ethtool_nwayreset(struct net_device *dev)
@@ -2386,8 +2387,6 @@ static const struct of_device_id smc91x_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, smc91x_match);
-#else
-#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2402,7 +2401,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
- .of_match_table = smc91x_match,
+ .of_match_table = of_match_ptr(smc91x_match),
},
};
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4616bf27d515..da5cc9a3b34c 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -997,9 +997,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
phydev->addr, phydev->phy_id);
- ret = phy_connect_direct(dev, phydev,
- &smsc911x_phy_adjust_link, 0,
- pdata->config.phy_interface);
+ ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link,
+ pdata->config.phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -1831,7 +1830,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
spin_lock_irq(&pdata->mac_lock);
@@ -2575,11 +2573,13 @@ static const struct dev_pm_ops smsc911x_pm_ops = {
#define SMSC911X_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
static const struct of_device_id smsc911x_dt_ids[] = {
{ .compatible = "smsc,lan9115", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
+#endif
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
@@ -2588,7 +2588,7 @@ static struct platform_driver smsc911x_driver = {
.name = SMSC_CHIPNAME,
.owner = THIS_MODULE,
.pm = SMSC911X_PM_OPS,
- .of_match_table = smsc911x_dt_ids,
+ .of_match_table = of_match_ptr(smsc911x_dt_ids),
},
};
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3c586585e1b3..d457fa2d7509 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1179,7 +1179,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
phydev->phy_id);
phydev = phy_connect(dev, dev_name(&phydev->dev),
- smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->tx_ring);
- pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- TX_RING_SIZE), GFP_KERNEL);
- if (!pd->tx_buffers) {
- smsc_warn(IFUP, "Failed to allocated tx_buffers");
+ pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (!pd->tx_buffers)
return -ENOMEM;
- }
/* Initialize the TX Ring */
for (i = 0; i < TX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 1164930a40a5..c0ea838c78d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -26,8 +26,8 @@ config STMMAC_PLATFORM
If unsure, say N.
config STMMAC_PCI
- bool "STMMAC PCI bus support (EXPERIMENTAL)"
- depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ bool "STMMAC PCI bus support"
+ depends on STMMAC_ETH && PCI
---help---
This is to select the Synopsys DWMAC available on PCI devices,
if you have a controller with this interface, say Y or M here.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 023a4fb4efa5..b05df8983be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -127,14 +127,14 @@ static inline int stmmac_register_platform(void)
}
static inline void stmmac_unregister_platform(void)
{
- platform_driver_register(&stmmac_pltfr_driver);
+ platform_driver_unregister(&stmmac_pltfr_driver);
}
#else
static inline int stmmac_register_platform(void)
{
pr_debug("stmmac: do not register the platf driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_platform(void)
{
@@ -162,7 +162,7 @@ static inline int stmmac_register_pci(void)
{
pr_debug("stmmac: do not register the PCI driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_pci(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1372ce210b58..d1ac39c1b05d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -210,8 +210,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
- strcpy(info->version, DRV_MODULE_VERSION);
- info->fw_version[0] = '\0';
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int stmmac_ethtool_getsettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 542edbcd92c7..39c6c5524633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -69,7 +69,7 @@
#undef STMMAC_XMIT_DEBUG
/*#define STMMAC_XMIT_DEBUG*/
-#ifdef STMMAC_TX_DEBUG
+#ifdef STMMAC_XMIT_DEBUG
#define TX_DBG(fmt, args...) printk(fmt, ## args)
#else
#define TX_DBG(fmt, args...) do { } while (0)
@@ -428,8 +428,7 @@ static int stmmac_init_phy(struct net_device *dev)
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
- interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -531,17 +530,18 @@ static void init_dma_desc_rings(struct net_device *dev)
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
- priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
- priv->rx_skbuff =
- kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_rx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
- priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
- GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_tx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
txsize *
@@ -2194,18 +2194,20 @@ int stmmac_restore(struct net_device *ndev)
*/
static int __init stmmac_init(void)
{
- int err_plt = 0;
- int err_pci = 0;
-
- err_plt = stmmac_register_platform();
- err_pci = stmmac_register_pci();
-
- if ((err_pci) && (err_plt)) {
- pr_err("stmmac: driver registration failed\n");
- return -EINVAL;
- }
+ int ret;
+ ret = stmmac_register_platform();
+ if (ret)
+ goto err;
+ ret = stmmac_register_pci();
+ if (ret)
+ goto err_pci;
return 0;
+err_pci:
+ stmmac_unregister_platform();
+err:
+ pr_err("stmmac: driver registration failed\n");
+ return ret;
}
static void __exit stmmac_exit(void)
@@ -2252,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause))
goto err;
- } else if (!strncmp(opt, "eee_timer:", 6)) {
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0376a5e6b2bf..0b9829fe3eea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
- priv->mii = new_bus;
-
found = 0;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
}
}
- if (!found)
+ if (!found) {
pr_warning("%s: No PHY found\n", ndev->name);
+ mdiobus_unregister(new_bus);
+ mdiobus_free(new_bus);
+ return -ENODEV;
+ }
+
+ priv->mii = new_bus;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 064eaac9616f..19b3a2567a46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -102,6 +102,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed", __func__);
+ ret = -ENODEV;
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 57bfd8599679..3074aa374c6b 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -32,8 +32,8 @@ config HAPPYMEAL
will be called sunhme.
config SUNBMAC
- tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
- depends on SBUS && EXPERIMENTAL
+ tristate "Sun BigMAC 10/100baseT support"
+ depends on SBUS
select CRC32
---help---
This driver supports the "be" interface available as an Sbus option.
@@ -61,7 +61,7 @@ config SUNGEM
select SUNGEM_PHY
---help---
Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+ <http://docs.oracle.com/cd/E19455-01/806-3985-10/806-3985-10.pdf>.
config CASSINI
tristate "Sun Cassini support"
@@ -69,7 +69,7 @@ config CASSINI
select CRC32
---help---
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+ <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
config SUNVNET
tristate "Sun Virtual Network support"
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a0bdf0779466..e4c1c88e4c2a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -4342,7 +4342,7 @@ static int niu_alloc_rx_ring_info(struct niu *np,
{
BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
- rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+ rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
GFP_KERNEL);
if (!rp->rxhash)
return -ENOMEM;
@@ -8366,14 +8366,12 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
+ memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
}
static int niu_pci_probe_sprom(struct niu *np)
@@ -8470,29 +8468,27 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[0] = (val >> 0) & 0xff;
- dev->perm_addr[1] = (val >> 8) & 0xff;
- dev->perm_addr[2] = (val >> 16) & 0xff;
- dev->perm_addr[3] = (val >> 24) & 0xff;
+ dev->dev_addr[0] = (val >> 0) & 0xff;
+ dev->dev_addr[1] = (val >> 8) & 0xff;
+ dev->dev_addr[2] = (val >> 16) & 0xff;
+ dev->dev_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[4] = (val >> 0) & 0xff;
- dev->perm_addr[5] = (val >> 8) & 0xff;
+ dev->dev_addr[4] = (val >> 0) & 0xff;
+ dev->dev_addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->perm_addr);
+ dev->dev_addr);
return -EINVAL;
}
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9267,16 +9263,14 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
dp->full_name, prop_len);
}
- memcpy(dev->perm_addr, mac_addr, dev->addr_len);
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%s: OF MAC address is invalid\n",
dp->full_name);
- netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
return -EINVAL;
}
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-
model = of_get_property(dp, "model", &prop_len);
if (model)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index be82f6d13c51..5fafca065305 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1042,8 +1042,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunbmac");
- strcpy(info->version, "2.0");
+ strlcpy(info->driver, "sunbmac", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1dcee6915843..49bf3e2eb652 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -685,13 +685,14 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strcpy(info->driver, "sunqe");
- strcpy(info->version, "3.0");
+ strlcpy(info->driver, "sunqe", sizeof(info->driver));
+ strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d", regs->which_io);
+ snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
+ regs->which_io);
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index e1b895530827..289b4eefb42f 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -882,8 +882,8 @@ static int vnet_set_mac_addr(struct net_device *dev, void *p)
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -1032,8 +1032,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 1e4d743ff03e..e15cc71b826d 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2179,10 +2179,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcat(drvinfo->bus_info, pci_name(priv->pdev),
+ strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 4426151d4ac9..de71b1ec4625 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -88,8 +88,8 @@ config TLAN
Please email feedback to <torben.mathiasen@compaq.com>.
config CPMAC
- tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && AR7
+ tristate "TI AR7 CPMAC Ethernet support"
+ depends on AR7
select PHYLIB
---help---
TI AR7 CPMAC Ethernet support
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index d9625f62b026..31bbbca341a7 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -904,10 +904,9 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "cpmac");
- strcpy(info->version, CPMAC_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s", "cpmac");
+ strlcpy(info->driver, "cpmac", sizeof(info->driver));
+ strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
info->regdump_len = 0;
}
@@ -1173,8 +1172,8 @@ static int cpmac_probe(struct platform_device *pdev)
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 40aff684aa23..7e93df6585e7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/if_vlan.h>
#include <linux/platform_data/cpsw.h>
@@ -118,6 +119,13 @@ do { \
#define TX_PRIORITY_MAPPING 0x33221100
#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 15)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -250,7 +258,7 @@ struct cpsw_ss_regs {
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
- u32 flow_thresh;
+ u32 tx_in_ctl;
u32 port_vlan;
u32 tx_pri_map;
u32 cpdma_tx_pri_map;
@@ -277,6 +285,9 @@ struct cpsw_slave {
u32 mac_control;
struct cpsw_slave_data *data;
struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -315,17 +326,65 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- struct cpts cpts;
+ struct cpts *cpts;
+ u32 emac_port;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
-#define for_each_slave(priv, func, arg...) \
- do { \
- int idx; \
- for (idx = 0; idx < (priv)->data.slaves; idx++) \
- (func)((priv)->slaves + idx, ##arg); \
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ int idx; \
+ if (priv->data.dual_emac) \
+ (func)((priv)->slaves + priv->emac_port, ##arg);\
+ else \
+ for (idx = 0; idx < (priv)->data.slaves; idx++) \
+ (func)((priv)->slaves + idx, ##arg); \
+ } while (0)
+#define cpsw_get_slave_ndev(priv, __slave_no__) \
+ (priv->slaves[__slave_no__].ndev)
+#define cpsw_get_slave_priv(priv, __slave_no__) \
+ ((priv->slaves[__slave_no__].ndev) ? \
+ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
+
+#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+ do { \
+ if (!priv->data.dual_emac) \
+ break; \
+ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
+ ndev = cpsw_get_slave_ndev(priv, 0); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
+ ndev = cpsw_get_slave_ndev(priv, 1); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } \
+ } while (0)
+#define cpsw_add_mcast(priv, addr) \
+ do { \
+ if (priv->data.dual_emac) { \
+ struct cpsw_slave *slave = priv->slaves + \
+ priv->emac_port; \
+ int slave_port = cpsw_get_slave_port(priv, \
+ slave->slave_num); \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ 1 << slave_port | 1 << priv->host_port, \
+ ALE_VLAN, slave->port_vlan, 0); \
+ } else { \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ ALE_ALL_PORTS << priv->host_port, \
+ 0, 0, 0); \
+ } \
} while (0)
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ cpsw_add_mcast(priv, (u8 *)ha->addr);
}
}
}
@@ -374,9 +432,12 @@ void cpsw_tx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
struct cpsw_priv *priv = netdev_priv(ndev);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
- cpts_tx_timestamp(&priv->cpts, skb);
+ cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -389,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+
/* free and bail if we are shutting down */
if (unlikely(!netif_running(ndev)) ||
unlikely(!netif_carrier_ok(ndev))) {
@@ -397,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
- cpts_rx_timestamp(&priv->cpts, skb);
+ cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -417,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status)
return;
ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
}
WARN_ON(ret < 0);
}
@@ -430,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
cpsw_disable_irq(priv);
napi_schedule(&priv->napi);
+ } else {
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (likely(priv) && likely(netif_running(priv->ndev))) {
+ cpsw_intr_disable(priv);
+ cpsw_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
}
return IRQ_HANDLED;
}
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
-{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
-}
-
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- num_rx = cpdma_chan_process(priv->rxch, budget);
-
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_tx)
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpsw_enable_irq(priv);
}
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+ num_rx, num_tx);
+
return num_rx;
}
@@ -559,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
leader + strlen(name), val);
}
+static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+{
+ u32 i;
+ u32 usage_count = 0;
+
+ if (!priv->data.dual_emac)
+ return 0;
+
+ for (i = 0; i < priv->data.slaves; i++)
+ if (priv->slaves[i].open_stat)
+ usage_count++;
+
+ return usage_count;
+}
+
+static inline int cpsw_tx_packet_submit(struct net_device *ndev,
+ struct cpsw_priv *priv, struct sk_buff *skb)
+{
+ if (!priv->data.dual_emac)
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 0, GFP_KERNEL);
+
+ if (ndev == cpsw_get_slave_ndev(priv, 0))
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 1, GFP_KERNEL);
+ else
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 2, GFP_KERNEL);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+ struct cpsw_priv *priv, struct cpsw_slave *slave,
+ u32 slave_port)
+{
+ u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+
+ if (priv->version == CPSW_VERSION_1)
+ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+ else
+ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+ cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, 0);
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, slave->port_vlan);
+}
+
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
char name[32];
@@ -588,11 +700,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_port = cpsw_get_slave_port(priv, slave->slave_num);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << slave_port, 0, ALE_MCAST_FWD_2);
+ if (priv->data.dual_emac)
+ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+ else
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
- &cpsw_adjust_link, 0, slave->data->phy_if);
+ &cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
slave->data->phy_id, slave->slave_num);
@@ -604,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+ const int vlan = priv->data.default_vlan;
+ const int port = priv->host_port;
+ u32 reg;
+ int i;
+
+ reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+
+ writel(vlan, &priv->host_port_regs->port_vlan);
+
+ for (i = 0; i < 2; i++)
+ slave_write(priv->slaves + i, vlan, reg);
+
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
+ ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
+ (ALE_PORT_1 | ALE_PORT_2) << port);
+}
+
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
+ u32 control_reg;
+ u32 fifo_mode;
+
/* soft reset the controller and initialize ale */
soft_reset("cpsw", &priv->regs->soft_reset);
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+ cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&priv->regs->control);
+ control_reg |= CPSW_VLAN_AWARE;
+ writel(control_reg, &priv->regs->control);
+ fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ CPSW_FIFO_NORMAL_MODE;
+ writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
@@ -621,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_control_set(priv->ale, priv->host_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+ if (!priv->data.dual_emac) {
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ 0, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ }
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -632,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_intr_disable(priv);
netif_carrier_off(ndev);
pm_runtime_get_sync(&priv->pdev->dev);
@@ -644,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- cpsw_init_host_port(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
- /* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ /* Add default VLAN */
+ if (!priv->data.dual_emac)
+ cpsw_add_default_vlan(priv);
- /* disable priority elevation and enable statistics on all ports */
- __raw_writel(0, &priv->regs->ptype);
+ if (!cpsw_common_res_usage_state(priv)) {
+ /* setup tx dma to fixed prio and zero offset */
+ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
- /* enable statistics collection only on the host port */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ /* disable priority elevation */
+ __raw_writel(0, &priv->regs->ptype);
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
+ /* enable statistics collection only on all ports */
+ __raw_writel(0x7, &priv->regs->stat_port_en);
- for (i = 0; i < priv->data.rx_descs; i++) {
- struct sk_buff *skb;
+ if (WARN_ON(!priv->data.rx_descs))
+ priv->data.rx_descs = 128;
- ret = -ENOMEM;
- skb = netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max);
- if (!skb)
- break;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
- if (WARN_ON(ret < 0))
- break;
+ for (i = 0; i < priv->data.rx_descs; i++) {
+ struct sk_buff *skb;
+
+ ret = -ENOMEM;
+ skb = netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max);
+ if (!skb)
+ break;
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), 0, GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+ /* continue even if we didn't manage to submit all
+ * receive descs
+ */
+ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
- /* continue even if we didn't manage to submit all receive descs */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = true;
return 0;
}
@@ -701,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
+
+ if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
+ cpsw_ale_stop(priv->ale);
+ }
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -724,18 +890,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, GFP_KERNEL);
+ ret = cpsw_tx_packet_submit(ndev, priv, skb);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
}
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail:
priv->stats.tx_dropped++;
@@ -773,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -781,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -793,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave;
u32 ctrl, mtype;
+ if (priv->data.dual_emac)
+ slave = &priv->slaves[priv->emac_port];
+ else
+ slave = &priv->slaves[priv->data.cpts_active_slave];
+
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ctrl |= CTRL_TX_TS_BITS;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ctrl |= CTRL_RX_TS_BITS;
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -815,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = &priv->cpts;
+ struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -901,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
@@ -920,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
#endif
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ int ret;
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid,
+ ALE_ALL_PORTS << priv->host_port,
+ 0, ALE_ALL_PORTS << priv->host_port,
+ (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ALE_ALL_PORTS << priv->host_port,
+ ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(priv->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ return cpsw_add_vlan_ale_entry(priv, vid);
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ return ret;
+
+ return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -938,15 +1186,18 @@ static const struct net_device_ops cpsw_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- strcpy(info->driver, "TI CPSW Driver v1.0");
- strcpy(info->version, "1.0");
- strcpy(info->bus_info, priv->pdev->name);
+
+ strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -974,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts.phc_index;
+ info->phc_index = priv->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1011,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->data = data;
slave->regs = regs + slave_reg_ofs;
slave->sliver = regs + sliver_reg_ofs;
+ slave->port_vlan = data->dual_emac_res_vlan;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -1051,12 +1303,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->cpts_clock_shift = prop;
- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
- data->slaves, GFP_KERNEL);
- if (!data->slave_data) {
- pr_err("Could not allocate slave memory.\n");
+ data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
return -EINVAL;
- }
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
@@ -1093,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
+ if (!of_property_read_u32(node, "dual_emac", &prop))
+ data->dual_emac = prop;
+
/*
* Populate all the child nodes here...
*/
@@ -1126,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ if (data->dual_emac) {
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
+ &prop)) {
+ pr_err("Missing dual_emac_res_vlan in DT.\n");
+ slave_data->dual_emac_res_vlan = i+1;
+ pr_err("Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
i++;
}
@@ -1136,6 +1401,79 @@ error_ret:
return ret;
}
+static int cpsw_probe_dual_emac(struct platform_device *pdev,
+ struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = &priv->data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv_sl2;
+ int ret = 0, i;
+
+ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ if (!ndev) {
+ pr_err("cpsw: error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv_sl2 = netdev_priv(ndev);
+ spin_lock_init(&priv_sl2->lock);
+ priv_sl2->data = *data;
+ priv_sl2->pdev = pdev;
+ priv_sl2->ndev = ndev;
+ priv_sl2->dev = &ndev->dev;
+ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv_sl2->rx_packet_max = max(rx_packet_max, 128);
+
+ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+ ETH_ALEN);
+ pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ } else {
+ random_ether_addr(priv_sl2->mac_addr);
+ pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ }
+ memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+ priv_sl2->slaves = priv->slaves;
+ priv_sl2->clk = priv->clk;
+
+ priv_sl2->cpsw_res = priv->cpsw_res;
+ priv_sl2->regs = priv->regs;
+ priv_sl2->host_port = priv->host_port;
+ priv_sl2->host_port_regs = priv->host_port_regs;
+ priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->dma = priv->dma;
+ priv_sl2->txch = priv->txch;
+ priv_sl2->rxch = priv->rxch;
+ priv_sl2->ale = priv->ale;
+ priv_sl2->emac_port = 1;
+ priv->slaves[1].ndev = ndev;
+ priv_sl2->cpts = priv->cpts;
+ priv_sl2->version = priv->version;
+
+ for (i = 0; i < priv->num_irqs; i++) {
+ priv_sl2->irqs_table[i] = priv->irqs_table[i];
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("cpsw: error registering net device\n");
+ free_netdev(ndev);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -1162,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!ndev) {
+ pr_err("error allocating cpts\n");
+ goto clean_ndev_ret;
+ }
/*
* This may be required here for child devices.
@@ -1194,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
+ priv->slaves[0].ndev = ndev;
+ priv->emac_port = 0;
+
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -1248,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1259,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev)
break;
case CPSW_VERSION_2:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -1346,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev)
k++;
}
- ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1361,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
- if (cpts_register(&pdev->dev, &priv->cpts,
+ if (cpts_register(&pdev->dev, priv->cpts,
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
+ if (priv->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(pdev, priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_irq_ret;
+ }
+ }
+
return 0;
clean_irq_ret:
@@ -1406,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
- cpts_unregister(&priv->cpts);
+ cpts_unregister(priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0e9ccc2cf91f..7fa60d6092ed 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
if (memcmp(entry_addr, addr, 6) == 0)
return idx;
@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
return -ENOENT;
}
+int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+ return idx;
+ }
+ return -ENOENT;
+}
+
static int cpsw_ale_match_free(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
return 0;
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+ int flags, u16 vid)
+{
+ if (flags & ALE_VLAN) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ } else {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
return 0;
}
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -ENOENT;
@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
}
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state)
+ int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx, mask;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, super);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
return 0;
}
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
return 0;
}
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+
+ cpsw_ale_set_vlan_untag_force(ale_entry, untag);
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_set_vlan_member_list(ale_entry, port);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask)
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
struct ale_control_info {
const char *name;
int offset, port_offset;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 2bd09cbce522..30daa1265f0c 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
};
/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
-#define ALE_SECURE 1
-#define ALE_BLOCKED 2
+#define ALE_SECURE BIT(0)
+#define ALE_BLOCKED BIT(1)
+#define ALE_SUPER BIT(2)
+#define ALE_VLAN BIT(3)
+
+#define ALE_PORT_HOST BIT(0)
+#define ALE_PORT_1 BIT(1)
+#define ALE_PORT_2 BIT(2)
#define ALE_MCAST_FWD 0
#define ALE_MCAST_BLOCK_LEARN_FWD 1
@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+ int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 337766738eca..463597f919f1 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -27,8 +27,6 @@
#include <linux/uaccess.h>
#include <linux/workqueue.h>
-#include <plat/clock.h>
-
#include "cpts.h"
#ifdef CONFIG_TI_CPTS
@@ -249,8 +247,7 @@ static void cpts_clk_init(struct cpts *cpts)
cpts->refclk = NULL;
return;
}
- clk_enable(cpts->refclk);
- cpts->freq = cpts->refclk->recalc(cpts->refclk);
+ clk_prepare_enable(cpts->refclk);
}
static void cpts_clk_release(struct cpts *cpts)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index e1bba3a496b2..fe993cdd7e23 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -120,7 +120,6 @@ struct cpts {
struct delayed_work overflow_work;
int phc_index;
struct clk *refclk;
- unsigned long freq;
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49956730cd8d..ee13dc78430c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -60,6 +60,9 @@
#define CPDMA_DESC_EOQ BIT(28)
#define CPDMA_DESC_TD_COMPLETE BIT(27)
#define CPDMA_DESC_PASS_CRC BIT(26)
+#define CPDMA_DESC_TO_PORT_EN BIT(20)
+#define CPDMA_TO_PORT_SHIFT 16
+#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
@@ -105,13 +108,13 @@ struct cpdma_ctlr {
};
struct cpdma_chan {
+ struct cpdma_desc __iomem *head, *tail;
+ void __iomem *hdp, *cp, *rxfree;
enum cpdma_state state;
struct cpdma_ctlr *ctlr;
int chan_num;
spinlock_t lock;
- struct cpdma_desc __iomem *head, *tail;
int count;
- void __iomem *hdp, *cp, *rxfree;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -132,6 +135,14 @@ struct cpdma_chan {
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+#define cpdma_desc_to_port(chan, mode, directed) \
+ do { \
+ if (!is_rx_chan(chan) && ((directed == 1) || \
+ (directed == 2))) \
+ mode |= (CPDMA_DESC_TO_PORT_EN | \
+ (directed << CPDMA_TO_PORT_SHIFT)); \
+ } while (0)
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -217,17 +228,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
{
unsigned long flags;
int index;
+ int desc_start;
+ int desc_end;
struct cpdma_desc __iomem *desc = NULL;
spin_lock_irqsave(&pool->lock, flags);
- index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
- num_desc, 0);
- if (index < pool->num_desc) {
+ if (is_rx) {
+ desc_start = 0;
+ desc_end = pool->num_desc/2;
+ } else {
+ desc_start = pool->num_desc/2;
+ desc_end = pool->num_desc;
+ }
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ desc_end, desc_start, num_desc, 0);
+ if (index < desc_end) {
bitmap_set(pool->bitmap, index, num_desc);
desc = pool->iomap + pool->desc_size * index;
pool->used_desc++;
@@ -439,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
- if (ctlr->channels[i])
- cpdma_chan_destroy(ctlr->channels[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -473,11 +492,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
- dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
@@ -652,7 +673,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask)
+ int len, int directed, gfp_t gfp_mask)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
@@ -668,7 +689,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1);
+ desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -682,6 +703,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+ cpdma_desc_to_port(chan, mode, directed);
desc_write(desc, hw_next, 0);
desc_write(desc, hw_buffer, buffer);
@@ -704,6 +726,29 @@ unlock_ret:
}
EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ int index;
+ bool ret;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ pool->num_desc, pool->num_desc/2, 1, 0);
+
+ if (index < pool->num_desc)
+ ret = true;
+ else
+ ret = false;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
+
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
int outlen, int status)
@@ -749,7 +794,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
status = -EBUSY;
goto unlock_ret;
}
- status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+ CPDMA_DESC_PORT_MASK);
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
chan_write(chan, cp, desc_dma);
@@ -984,3 +1030,4 @@ unlock_ret:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(cpdma_control_set);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index afa19a0c0d81..d9bcc6032fdc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -24,6 +24,13 @@
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
+#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
+
+#define CPDMA_EOI_RX_THRESH 0x0
+#define CPDMA_EOI_RX 0x1
+#define CPDMA_EOI_TX 0x2
+#define CPDMA_EOI_MISC 0x3
+
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
@@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask);
+ int len, int directed, gfp_t gfp_mask);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
CPDMA_CMD_IDLE, /* write-only */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2a3e2c56bc60..52c05366599a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
-#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -342,7 +341,6 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- atomic_t cur_tx;
const char *phy_id;
#ifdef CONFIG_OF
struct device_node *phy_node;
@@ -480,8 +478,8 @@ static void emac_dump_regs(struct emac_priv *priv)
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, emac_version_string);
- strcpy(info->version, EMAC_MODULE_VERSION);
+ strlcpy(info->driver, emac_version_string, sizeof(info->driver));
+ strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
WARN_ON(ret == -ENOMEM);
if (unlikely(ret < 0))
@@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
- struct emac_priv *priv = netdev_priv(ndev);
-
- atomic_dec(&priv->cur_tx);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
ndev->stats.tx_packets++;
@@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
if (unlikely(ret_code != 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
goto fail_tx;
}
- if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txchan)))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
@@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
if (WARN_ON(ret < 0))
break;
}
@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_device *ndev)
if (priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link, 0,
+ &emac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cca25509b039..d04a622b08d4 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
int ret, addr;
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "failed to alloc device data\n");
+ if (!data)
return -ENOMEM;
- }
data->bus = mdiobus_alloc();
if (!data->bus) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 96070e9b50dc..36435499814b 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -2195,7 +2195,6 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
/* ISSUE: Note that "dev_addr" is now a pointer. */
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index e321d0b6fc88..445c0595c997 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1226,8 +1226,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
- strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 9c288cd7d171..ffe519382e11 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -72,11 +72,13 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strncpy(drvinfo->driver, spider_net_driver_name, 32);
- strncpy(drvinfo->version, VERSION, 32);
- strcpy(drvinfo->fw_version, "no information");
- strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
+ strlcpy(drvinfo->driver, spider_net_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "no information",
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 9819349eaa1e..fe256094db35 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -633,9 +633,8 @@ static int tc_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &tc_handle_link_change, 0,
- lp->chiptype == TC35815_TX4939 ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
+ &tc_handle_link_change,
+ lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
@@ -856,7 +855,6 @@ static int tc35815_init_one(struct pci_dev *pdev,
if (rc)
goto err_out;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
dev->name,
chip_info[ent->driver_data].name,
@@ -1976,9 +1974,10 @@ tc35815_set_multicast_list(struct net_device *dev)
static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tc35815_local *lp = netdev_priv(dev);
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 7992b3e05d3d..185c721c52d7 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -417,6 +417,12 @@ enum chip_cmd_bits {
Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
};
+struct rhine_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
struct rhine_private {
/* Bit mask for configured VLAN ids */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -458,6 +464,8 @@ struct rhine_private {
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct rhine_stats rx_stats;
+ struct rhine_stats tx_stats;
u8 wolopts;
u8 tx_thresh, rx_thresh;
@@ -495,7 +503,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
static void rhine_set_rx_mode(struct net_device *dev);
-static struct net_device_stats *rhine_get_stats(struct net_device *dev);
+static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
@@ -842,7 +851,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_open = rhine_open,
.ndo_stop = rhine_close,
.ndo_start_xmit = rhine_start_tx,
- .ndo_get_stats = rhine_get_stats,
+ .ndo_get_stats64 = rhine_get_stats64,
.ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -990,7 +999,6 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "Using random MAC address: %pM\n",
dev->dev_addr);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
@@ -1791,8 +1799,11 @@ static void rhine_tx(struct net_device *dev)
dev->stats.collisions += txstatus & 0x0F;
netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
(txstatus >> 3) & 0xF, txstatus & 0xF);
- dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
- dev->stats.tx_packets++;
+
+ u64_stats_update_begin(&rp->tx_stats.syncp);
+ rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
+ rp->tx_stats.packets++;
+ u64_stats_update_end(&rp->tx_stats.syncp);
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
@@ -1801,7 +1812,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -1924,8 +1935,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (unlikely(desc_length & DescTag))
__vlan_hwaccel_put_tag(skb, vlan_tci);
netif_receive_skb(skb);
- dev->stats.rx_bytes += pkt_len;
- dev->stats.rx_packets++;
+
+ u64_stats_update_begin(&rp->rx_stats.syncp);
+ rp->rx_stats.bytes += pkt_len;
+ rp->rx_stats.packets++;
+ u64_stats_update_end(&rp->rx_stats.syncp);
}
entry = (++rp->cur_rx) % RX_RING_SIZE;
rp->rx_head_desc = &rp->rx_ring[entry];
@@ -2010,25 +2024,37 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");
- napi_disable(&rp->napi);
- rhine_irq_disable(rp);
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
- napi_enable(&rp->napi);
- napi_schedule(&rp->napi);
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
out_unlock:
mutex_unlock(&rp->task_lock);
}
-static struct net_device_stats *rhine_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rhine_private *rp = netdev_priv(dev);
+ unsigned int start;
spin_lock_bh(&rp->lock);
rhine_update_rx_crc_and_missed_errord(rp);
spin_unlock_bh(&rp->lock);
- return &dev->stats;
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+ stats->rx_packets = rp->rx_stats.packets;
+ stats->rx_bytes = rp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+ stats->tx_packets = rp->tx_stats.packets;
+ stats->tx_bytes = rp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+
+ return stats;
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 352383890326..545043cc4c0b 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -570,7 +570,6 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5100_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 9d1d986f8d40..7cbd0e6fc6f3 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -490,7 +490,6 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5300_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 5778a4ae1164..122d60c0481b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on (PPC32 || MICROBLAZE)
+ depends on MICROBLAZE
select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index aad909d793d7..9fc2ada4c3c2 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -238,11 +238,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
int i;
lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
- if (!lp->rx_skb) {
- dev_err(&ndev->dev,
- "can't allocate memory for DMA RX buffer\n");
+ if (!lp->rx_skb)
goto out;
- }
+
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
@@ -319,18 +317,10 @@ out:
* net_device_ops
*/
-static int temac_set_mac_address(struct net_device *ndev, void *address)
+static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
-
- if (!is_valid_ether_addr(ndev->dev_addr))
- eth_hw_addr_random(ndev);
- else
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
-
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
@@ -344,15 +334,26 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
mutex_unlock(&lp->indirect_mutex);
+}
+static int temac_init_mac_address(struct net_device *ndev, void *address)
+{
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+ temac_do_set_mac_address(ndev);
return 0;
}
-static int netdev_set_mac_address(struct net_device *ndev, void *p)
+static int temac_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
- return temac_set_mac_address(ndev, addr->sa_data);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ temac_do_set_mac_address(ndev);
+ return 0;
}
static void temac_set_multicast_list(struct net_device *ndev)
@@ -579,7 +580,7 @@ static void temac_device_reset(struct net_device *ndev)
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
- temac_set_mac_address(ndev, NULL);
+ temac_do_set_mac_address(ndev);
/* Set address filter table */
temac_set_multicast_list(ndev);
@@ -938,7 +939,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
- .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1106,7 +1107,7 @@ static int temac_of_probe(struct platform_device *op)
rc = -ENODEV;
goto err_iounmap_2;
}
- temac_set_mac_address(ndev, (void *)addr);
+ temac_init_mac_address(ndev, (void *)addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d9f69b82cc4f..278c9db3b5b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1124,9 +1124,8 @@ static int axienet_ethtools_set_settings(struct net_device *ndev,
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- memset(ed, 0, sizeof(struct ethtool_drvinfo));
- strcpy(ed->driver, DRIVER_NAME);
- strcpy(ed->version, DRIVER_VERSION);
+ strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
}
@@ -1590,7 +1589,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&op->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto err_iounmap_2;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 94a1f94f74b8..98e09d0d3ce2 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1412,7 +1412,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
+ dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index d3ebb73277be..6958a5e87703 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -977,11 +977,12 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct port *port = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strcpy(info->bus_info, "internal");
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1450,7 +1451,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(port->phydev)) {
err = PTR_ERR(port->phydev);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 95dbcfdf131d..bf5e59687680 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
config MKISS
tristate "Serial port KISS driver"
- depends on AX25
+ depends on AX25 && TTY
select CRC16
---help---
KISS is a protocol used for the exchange of data between a computer
@@ -18,7 +18,7 @@ config MKISS
config 6PACK
tristate "Serial port 6PACK driver"
- depends on AX25
+ depends on AX25 && TTY
---help---
6pack is a transmission protocol for the data exchange between your
PC and your TNC (the Terminal Node Controller acts as a kind of
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index c2e5497397d5..02de6c891670 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -586,7 +586,8 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
static int __init bpq_init_driver(void)
{
#ifdef CONFIG_PROC_FS
- if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) {
+ if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
+ &bpq_info_fops)) {
printk(KERN_ERR
"bpq: cannot create /proc/net/bpqether entry.\n");
return -ENOENT;
@@ -610,7 +611,7 @@ static void __exit bpq_cleanup_driver(void)
unregister_netdevice_notifier(&bpq_dev_notifier);
- proc_net_remove(&init_net, "bpqether");
+ remove_proc_entry("bpqether", init_net.proc_net);
rtnl_lock();
while (!list_empty(&bpq_devices)) {
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index ce555d9ac02c..6636022a1027 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -463,13 +463,8 @@ static int __init setup_adapter(int card_base, int type, int n)
/* Initialize what is necessary for write_scc and write_scc_data */
info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
+ if (!info)
goto out;
- }
-
info->dev[0] = alloc_netdev(0, "", dev_setup);
if (!info->dev[0]) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 1b4a47bd32b7..bc1d52170389 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -2118,7 +2118,7 @@ static int __init scc_init_driver (void)
}
rtnl_unlock();
- proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops);
+ proc_create("z8530drv", 0, init_net.proc_net, &scc_net_seq_fops);
return 0;
}
@@ -2173,7 +2173,7 @@ static void __exit scc_cleanup_driver(void)
if (Vector_Latch)
release_region(Vector_Latch, 1);
- proc_net_remove(&init_net, "z8530drv");
+ remove_proc_entry("z8530drv", init_net.proc_net);
}
MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index c6645f1017af..4cf8f1017aad 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1167,7 +1167,7 @@ static int __init yam_init_driver(void)
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
- proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops);
+ proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops);
return 0;
error:
while (--i >= 0) {
@@ -1199,7 +1199,7 @@ static void __exit yam_cleanup_driver(void)
kfree(p);
}
- proc_net_remove(&init_net, "yam");
+ remove_proc_entry("yam", init_net.proc_net);
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 95eb34fdbba7..f71515dc5beb 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -3,8 +3,8 @@
#
config HIPPI
- bool "HIPPI driver support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET && PCI
+ bool "HIPPI driver support"
+ depends on INET && PCI
---help---
HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
@@ -18,7 +18,7 @@ config HIPPI
if HIPPI
config ROADRUNNER
- tristate "Essential RoadRunner HIPPI PCI adapter support (EXPERIMENTAL)"
+ tristate "Essential RoadRunner HIPPI PCI adapter support"
depends on PCI
---help---
Say Y here if this is your PCI HIPPI network card.
@@ -27,7 +27,7 @@ config ROADRUNNER
will be called rrunner. If unsure, say N.
config ROADRUNNER_LARGE_RINGS
- bool "Use large TX/RX rings (EXPERIMENTAL)"
+ bool "Use large TX/RX rings"
depends on ROADRUNNER
---help---
If you say Y here, the RoadRunner driver will preallocate up to 2 MB
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 5fd6f4674326..e6fe0d80d612 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -84,7 +84,7 @@ struct hv_netvsc_packet {
};
struct netvsc_device_info {
- unsigned char mac_adr[6];
+ unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
};
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f825a629a699..5f85205cd12b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -304,9 +304,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, HV_DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
@@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndevctx = netdev_priv(ndev);
struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
- char save_adr[14];
+ char save_adr[ETH_ALEN];
unsigned char save_aatype;
int err;
@@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
- { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
- 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+ { HV_NIC_GUID, },
{ },
};
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a4a62e170ec0..fc1687ea4a42 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -751,16 +751,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
-{
- return 0;
-}
-
-static int at86rf230_resume(struct spi_device *spi)
-{
- return 0;
-}
-
static int at86rf230_fill_data(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -948,8 +938,6 @@ static struct spi_driver at86rf230_driver = {
},
.probe = at86rf230_probe,
.remove = at86rf230_remove,
- .suspend = at86rf230_suspend,
- .resume = at86rf230_resume,
};
module_spi_driver(at86rf230_driver);
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 1e9cb0bbf62c..8f1c25676d44 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -372,7 +372,6 @@ static int ieee802154fake_probe(struct platform_device *pdev)
memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
dev->addr_len);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/*
* For now we'd like to emulate 2.4 GHz-only device,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 344dceb1aaf9..82164381f778 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -90,7 +90,7 @@ static void ri_tasklet(unsigned long dev)
u64_stats_update_end(&dp->tsync);
rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
+ skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 595205406d73..2a30193d0d50 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -5,7 +5,7 @@ comment "SIR device drivers"
config IRTTY_SIR
tristate "IrTTY (uses Linux serial driver)"
- depends on IRDA
+ depends on IRDA && TTY
help
Say Y here if you want to build support for the IrTTY line
discipline. To compile it as a module, choose M here: the module
@@ -140,7 +140,7 @@ config LITELINK_DONGLE
config MA600_DONGLE
tristate "Mobile Action MA600 dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Mobile Action MA600
dongle. To compile it as a module, choose M here. The MA600 dongle
@@ -153,7 +153,7 @@ config MA600_DONGLE
config GIRBIL_DONGLE
tristate "Greenwich GIrBIL dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Greenwich GIrBIL
dongle. If you want to compile it as a module, choose M here.
@@ -164,7 +164,7 @@ config GIRBIL_DONGLE
config MCP2120_DONGLE
tristate "Microchip MCP2120"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Microchip MCP2120
dongle. If you want to compile it as a module, choose M here.
@@ -178,7 +178,7 @@ config MCP2120_DONGLE
config OLD_BELKIN_DONGLE
tristate "Old Belkin dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Adaptec Airport 1000
and 2000 dongles. If you want to compile it as a module, choose
@@ -187,7 +187,7 @@ config OLD_BELKIN_DONGLE
config ACT200L_DONGLE
tristate "ACTiSYS IR-200L dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the ACTiSYS IR-200L
dongle. If you want to compile it as a module, choose M here.
@@ -198,7 +198,7 @@ config ACT200L_DONGLE
config KINGSUN_DONGLE
tristate "KingSun/DonShine DS-620 IrDA-USB dongle"
- depends on IRDA && USB && EXPERIMENTAL
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun/DonShine
DS-620 IrDA-USB bridge device driver.
@@ -212,14 +212,14 @@ config KINGSUN_DONGLE
config EP7211_DONGLE
tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
help
Say Y here if you want to build support for the Cirrus logic
EP7211 chipset's infrared module.
config KSDAZZLE_DONGLE
- tristate "KingSun Dazzle IrDA-USB dongle (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "KingSun Dazzle IrDA-USB dongle"
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun Dazzle
IrDA-USB bridge device driver.
@@ -232,8 +232,8 @@ config KSDAZZLE_DONGLE
ksdazzle-sir.
config KS959_DONGLE
- tristate "KingSun KS-959 IrDA-USB dongle (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "KingSun KS-959 IrDA-USB dongle"
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun KS-959
IrDA-USB bridge device driver.
@@ -264,8 +264,8 @@ config USB_IRDA
you will need both USB and IrDA support in your kernel...
config SIGMATEL_FIR
- tristate "SigmaTel STIr4200 bridge (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "SigmaTel STIr4200 bridge"
+ depends on IRDA && USB
select CRC32
---help---
Say Y here if you want to build support for the SigmaTel STIr4200
@@ -331,8 +331,8 @@ config SMC_IRCC_FIR
smsc-ircc2.o.
config ALI_FIR
- tristate "ALi M5123 FIR (EXPERIMENTAL)"
- depends on EXPERIMENTAL && IRDA && ISA_DMA_API
+ tristate "ALi M5123 FIR"
+ depends on IRDA && ISA_DMA_API
help
Say Y here if you want to build support for the ALi M5123 FIR
Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
@@ -343,8 +343,8 @@ config ALI_FIR
ali-ircc.
config VLSI_FIR
- tristate "VLSI 82C147 SIR/MIR/FIR (EXPERIMENTAL)"
- depends on EXPERIMENTAL && IRDA && PCI
+ tristate "VLSI 82C147 SIR/MIR/FIR"
+ depends on IRDA && PCI
help
Say Y here if you want to build support for the VLSI 82C147
PCI-IrDA Controller. This controller is used by the HP OmniBook 800
@@ -387,7 +387,7 @@ config PXA_FICP
config MCS_FIR
tristate "MosChip MCS7780 IrDA-USB dongle"
- depends on IRDA && USB && EXPERIMENTAL
+ depends on IRDA && USB
select CRC32
help
Say Y or M here if you want to build support for the MosChip
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 84872043b5c6..9cea451a6081 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -993,7 +993,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
/* Enable Interuupt */
self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
- /* Be ready for incomming frames */
+ /* Be ready for incoming frames */
ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
}
/* Go to SIR Speed */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 6e4d4b62c9a8..a41267197839 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -210,7 +210,7 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* been received, which can now be decapsulated and delivered for
* further processing
*
- * calling context depends on underlying driver and tty->low_latency!
+ * calling context depends on underlying driver and tty->port->low_latency!
* for example (low_latency: 1 / 0):
* serial.c: uart-interrupt / softint
* usbserial: urb-complete-interrupt / softint
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 81f8f9e31db5..fcbf680c3e62 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_orphan(skb);
+ /* Before queueing this packet to netif_rx(),
+ * make sure dst is refcounted.
+ */
+ skb_dst_force(skb);
+
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 68a43fe602e7..defcd8a85744 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -29,6 +29,7 @@
#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
+#include <linux/hash.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
@@ -126,6 +127,21 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
return vlan->receive(skb);
}
+static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
+{
+ return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
+}
+
+
+static unsigned int mc_hash(const struct macvlan_dev *vlan,
+ const unsigned char *addr)
+{
+ u32 val = __get_unaligned_cpu32(addr + 2);
+
+ val ^= macvlan_hash_mix(vlan);
+ return hash_32(val, MACVLAN_MC_FILTER_BITS);
+}
+
static void macvlan_broadcast(struct sk_buff *skb,
const struct macvlan_port *port,
struct net_device *src,
@@ -137,6 +153,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct sk_buff *nskb;
unsigned int i;
int err;
+ unsigned int hash;
if (skb->protocol == htons(ETH_P_PAUSE))
return;
@@ -146,6 +163,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (vlan->dev == src || !(vlan->mode & mode))
continue;
+ hash = mc_hash(vlan, eth->h_dest);
+ if (!test_bit(hash, vlan->mc_filter))
+ continue;
nskb = skb_clone(skb, GFP_ATOMIC);
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
@@ -375,7 +395,6 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
} else {
/* Rehash and update the device filters */
@@ -406,6 +425,21 @@ static void macvlan_set_mac_lists(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+ } else {
+ struct netdev_hw_addr *ha;
+ DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
+
+ bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
+ netdev_for_each_mc_addr(ha, dev) {
+ __set_bit(mc_hash(vlan, ha->addr), filter);
+ }
+
+ __set_bit(mc_hash(vlan, dev->broadcast), filter);
+
+ bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+ }
dev_uc_sync(vlan->lowerdev, dev);
dev_mc_sync(vlan->lowerdev, dev);
}
@@ -565,7 +599,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int macvlan_fdb_del(struct ndmsg *ndm,
+static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -586,8 +620,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm,
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- snprintf(drvinfo->driver, 32, "macvlan");
- snprintf(drvinfo->version, 32, "0.1");
+ strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_settings(struct net_device *dev,
@@ -765,16 +799,22 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
}
+ err = netdev_upper_dev_link(lowerdev, dev);
+ if (err)
+ goto destroy_port;
+
port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto destroy_port;
+ goto upper_dev_unlink;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+upper_dev_unlink:
+ netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
@@ -798,6 +838,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
list_del(&vlan->list);
unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
EXPORT_SYMBOL_GPL(macvlan_dellink);
@@ -822,7 +863,10 @@ static int macvlan_changelink(struct net_device *dev,
static size_t macvlan_get_size(const struct net_device *dev)
{
- return nla_total_size(4);
+ return (0
+ + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ );
}
static int macvlan_fill_info(struct sk_buff *skb,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0f0f9ce3a776..97243011d319 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -742,6 +742,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = m->msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6989ebe2bc79..37add21a3d7d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -269,12 +269,18 @@ static ssize_t show_remote_port(struct netconsole_target *nt, char *buf)
static ssize_t show_local_ip(struct netconsole_target *nt, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip);
+ if (nt->np.ipv6)
+ return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.local_ip.in6);
+ else
+ return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip);
}
static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip);
+ if (nt->np.ipv6)
+ return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.remote_ip.in6);
+ else
+ return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip);
}
static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
@@ -410,7 +416,22 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
return -EINVAL;
}
- nt->np.local_ip = in_aton(buf);
+ if (strnchr(buf, count, ':')) {
+ const char *end;
+ if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
+ if (*end && *end != '\n') {
+ printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ return -EINVAL;
+ }
+ nt->np.ipv6 = true;
+ } else
+ return -EINVAL;
+ } else {
+ if (!nt->np.ipv6) {
+ nt->np.local_ip.ip = in_aton(buf);
+ } else
+ return -EINVAL;
+ }
return strnlen(buf, count);
}
@@ -426,7 +447,22 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
return -EINVAL;
}
- nt->np.remote_ip = in_aton(buf);
+ if (strnchr(buf, count, ':')) {
+ const char *end;
+ if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
+ if (*end && *end != '\n') {
+ printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ return -EINVAL;
+ }
+ nt->np.ipv6 = true;
+ } else
+ return -EINVAL;
+ } else {
+ if (!nt->np.ipv6) {
+ nt->np.remote_ip.ip = in_aton(buf);
+ } else
+ return -EINVAL;
+ }
return strnlen(buf, count);
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644
index 000000000000..ed947dd76fbd
--- /dev/null
+++ b/drivers/net/ntb_netdev.c
@@ -0,0 +1,408 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Network Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ntb.h>
+
+#define NTB_NETDEV_VER "0.7"
+
+MODULE_DESCRIPTION(KBUILD_MODNAME);
+MODULE_VERSION(NTB_NETDEV_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+struct ntb_netdev {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+ struct ntb_transport_qp *qp;
+};
+
+#define NTB_TX_TIMEOUT_MS 1000
+#define NTB_RXQ_SIZE 100
+
+static LIST_HEAD(dev_list);
+
+static void ntb_netdev_event_handler(void *data, int status)
+{
+ struct net_device *ndev = data;
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "Event %x, Link %x\n", status,
+ ntb_transport_link_query(dev->qp));
+
+ /* Currently, only link status event is supported */
+ if (status)
+ netif_carrier_on(ndev);
+ else
+ netif_carrier_off(ndev);
+}
+
+static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = data;
+ if (!skb)
+ return;
+
+ netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (netif_rx(skb) == NET_RX_DROP) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ return;
+ }
+
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+
+ skb = data;
+ if (!skb || !ndev)
+ return;
+
+ if (len > 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ ndev->stats.tx_aborted_errors++;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ int rc;
+
+ netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
+
+ rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
+ if (rc)
+ goto err;
+
+ return NETDEV_TX_OK;
+
+err:
+ ndev->stats.tx_dropped++;
+ ndev->stats.tx_errors++;
+ return NETDEV_TX_BUSY;
+}
+
+static int ntb_netdev_open(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int rc, i, len;
+
+ /* Add some empty rx bufs */
+ for (i = 0; i < NTB_RXQ_SIZE; i++) {
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ ndev->mtu + ETH_HLEN);
+ if (rc == -EINVAL)
+ goto err;
+ }
+
+ netif_carrier_off(ndev);
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int ntb_netdev_close(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len;
+
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len, rc;
+
+ if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
+ return -EINVAL;
+
+ if (!netif_running(ndev)) {
+ ndev->mtu = new_mtu;
+ return 0;
+ }
+
+ /* Bring down the link and dispose of posted rx entries */
+ ntb_transport_link_down(dev->qp);
+
+ if (ndev->mtu < new_mtu) {
+ int i;
+
+ for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
+ dev_kfree_skb(skb);
+
+ for (; i; i--) {
+ skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ new_mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ goto err;
+ }
+ }
+ }
+
+ ndev->mtu = new_mtu;
+
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ netdev_err(ndev, "Error changing MTU, device inoperable\n");
+ return rc;
+}
+
+static const struct net_device_ops ntb_netdev_ops = {
+ .ndo_open = ntb_netdev_open,
+ .ndo_stop = ntb_netdev_close,
+ .ndo_start_xmit = ntb_netdev_start_xmit,
+ .ndo_change_mtu = ntb_netdev_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void ntb_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+}
+
+static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_Backplane;
+ cmd->advertising = ADVERTISED_Backplane;
+ cmd->speed = SPEED_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_OTHER;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_DUMMY1;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static const struct ethtool_ops ntb_ethtool_ops = {
+ .get_drvinfo = ntb_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = ntb_get_settings,
+};
+
+static const struct ntb_queue_handlers ntb_netdev_handlers = {
+ .tx_handler = ntb_netdev_tx_handler,
+ .rx_handler = ntb_netdev_rx_handler,
+ .event_handler = ntb_netdev_event_handler,
+};
+
+static int ntb_netdev_probe(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+ int rc;
+
+ ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+ if (!ndev)
+ return -ENOMEM;
+
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+ dev->pdev = pdev;
+ BUG_ON(!dev->pdev);
+ ndev->features = NETIF_F_HIGHDMA;
+
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ ndev->hw_features = ndev->features;
+ ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
+
+ random_ether_addr(ndev->perm_addr);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+
+ ndev->netdev_ops = &ntb_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+
+ dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+ if (!dev->qp) {
+ rc = -EIO;
+ goto err;
+ }
+
+ ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
+
+ rc = register_netdev(ndev);
+ if (rc)
+ goto err1;
+
+ list_add(&dev->list, &dev_list);
+ dev_info(&pdev->dev, "%s created\n", ndev->name);
+ return 0;
+
+err1:
+ ntb_transport_free_queue(dev->qp);
+err:
+ free_netdev(ndev);
+ return rc;
+}
+
+static void ntb_netdev_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+
+ list_for_each_entry(dev, &dev_list, list) {
+ if (dev->pdev == pdev)
+ break;
+ }
+ if (dev == NULL)
+ return;
+
+ ndev = dev->ndev;
+
+ unregister_netdev(ndev);
+ ntb_transport_free_queue(dev->qp);
+ free_netdev(ndev);
+}
+
+static struct ntb_client ntb_netdev_client = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .probe = ntb_netdev_probe,
+ .remove = ntb_netdev_remove,
+};
+
+static int __init ntb_netdev_init_module(void)
+{
+ int rc;
+
+ rc = ntb_register_client_dev(KBUILD_MODNAME);
+ if (rc)
+ return rc;
+ return ntb_register_client(&ntb_netdev_client);
+}
+module_init(ntb_netdev_init_module);
+
+static void __exit ntb_netdev_exit_module(void)
+{
+ ntb_unregister_client(&ntb_netdev_client);
+ ntb_unregister_client_dev(KBUILD_MODNAME);
+}
+module_exit(ntb_netdev_exit_module);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 961f0b293913..450345261bd3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -4,7 +4,6 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on !S390
depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d5199cb4caec..b5ddd5077a80 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
+#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
-#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
@@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- /* Additional delay (2ns) used to adjust RX clock phase
- * at RGMII interface */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
return c;
- c |= IP1001_PHASE_SEL_MASK;
+ c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ c |= IP1001_RXPHASE_SEL;
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ c |= IP1001_TXPHASE_SEL;
+
c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
if (c < 0)
return c;
@@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
c |= IP101A_G_APS_ON;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5d2a3f215887..22dec9c7ef05 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- /* Enable Fiber/Copper auto selection */
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
-
- temp = phy_read(phydev, MII_BMCR);
- temp |= BMCR_RESET;
- phy_write(phydev, MII_BMCR, temp);
-
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 0c9accb1c14f..e91d7d736ae2 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -53,7 +53,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
enum of_gpio_flags f;
struct mdio_mux_gpio_state *s;
- unsigned int num_gpios;
+ int num_gpios;
unsigned int n;
int r;
@@ -61,7 +61,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
return -ENODEV;
num_gpios = of_gpio_count(pdev->dev.of_node);
- if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
+ if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
return -ENODEV;
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 044b5326459f..dc920974204e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -95,7 +95,7 @@ static struct class mdio_bus_class = {
#if IS_ENABLED(CONFIG_OF_MDIO)
/* Helper function for of_mdio_find_bus */
-static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
+static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
{
return dev->of_node == mdio_bus_np;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b983596abcbb..29934446436a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -5,15 +5,20 @@
*
* Author: David J. Choi
*
- * Copyright (c) 2010 Micrel, Inc.
+ * Copyright (c) 2010-2013 Micrel, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * Support : ksz9021 1000/100/10 phy from Micrel
- * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy
+ * Support : Micrel Phys:
+ * Giga phys: ksz9021, ksz9031
+ * 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
+ * ksz8021, ksz8031, ksz8051,
+ * ksz8081, ksz8091,
+ * ksz8061,
+ * Switch : ksz8873, ksz886x
*/
#include <linux/kernel.h>
@@ -176,7 +181,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
- .name = "Micrel KSZ8021",
+ .name = "Micrel KSZ8021 or KSZ8031",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -225,6 +230,30 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8081,
+ .name = "Micrel KSZ8081 or KSZ8091",
+ .phy_id_mask = 0x00fffff0,
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ .phy_id = PHY_ID_KSZ8061,
+ .name = "Micrel KSZ8061",
+ .phy_id_mask = 0x00fffff0,
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
@@ -238,6 +267,19 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
}, {
+ .phy_id = PHY_ID_KSZ9031,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ9031 Gigabit PHY",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = ksz9021_config_intr,
+ .driver = { .owner = THIS_MODULE, },
+}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8873MLL Switch",
@@ -247,6 +289,16 @@ static struct phy_driver ksphy_driver[] = {
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
.driver = { .owner = THIS_MODULE, },
+}, {
+ .phy_id = PHY_ID_KSZ886X,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ886X Switch",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE, },
} };
static int __init ksphy_init(void)
@@ -270,12 +322,16 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
+ { PHY_ID_KSZ9031, 0x00fffff0 },
{ PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
+ { PHY_ID_KSZ8061, 0x00fffff0 },
+ { PHY_ID_KSZ8081, 0x00fffff0 },
{ PHY_ID_KSZ8873MLL, 0x00fffff0 },
+ { PHY_ID_KSZ886X, 0x00fffff0 },
{ }
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8af46e88a181..9930f9999561 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -416,16 +416,15 @@ static void phy_prepare_link(struct phy_device *phydev,
* @dev: the network device to connect
* @phydev: the pointer to the phy device
* @handler: callback function for state change notifications
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*/
int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
- void (*handler)(struct net_device *), u32 flags,
+ void (*handler)(struct net_device *),
phy_interface_t interface)
{
int rc;
- rc = phy_attach_direct(dev, phydev, flags, interface);
+ rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -443,7 +442,6 @@ EXPORT_SYMBOL(phy_connect_direct);
* @dev: the network device to connect
* @bus_id: the id string of the PHY device to connect
* @handler: callback function for state change notifications
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*
* Description: Convenience function for connecting ethernet
@@ -455,7 +453,7 @@ EXPORT_SYMBOL(phy_connect_direct);
* the desired functionality.
*/
struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *), u32 flags,
+ void (*handler)(struct net_device *),
phy_interface_t interface)
{
struct phy_device *phydev;
@@ -471,7 +469,7 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
}
phydev = to_phy_device(d);
- rc = phy_connect_direct(dev, phydev, handler, flags, interface);
+ rc = phy_connect_direct(dev, phydev, handler, interface);
if (rc)
return ERR_PTR(rc);
@@ -576,14 +574,13 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* phy_attach - attach a network device to a particular PHY device
* @dev: network device to attach
* @bus_id: Bus ID of PHY device to attach
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*
* Description: Same as phy_attach_direct() except that a PHY bus_id
* string is passed instead of a pointer to a struct phy_device.
*/
struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, u32 flags, phy_interface_t interface)
+ const char *bus_id, phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -599,7 +596,7 @@ struct phy_device *phy_attach(struct net_device *dev,
}
phydev = to_phy_device(d);
- rc = phy_attach_direct(dev, phydev, flags, interface);
+ rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 72f93470ea35..8e7af8354342 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,6 +23,8 @@
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
+#define RTL8211E_INER_LINK_STAT 0x10
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -36,7 +38,7 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static int rtl821x_config_intr(struct phy_device *phydev)
+static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
@@ -49,28 +51,63 @@ static int rtl821x_config_intr(struct phy_device *phydev)
return err;
}
+static int rtl8211e_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL821x_INER,
+ RTL8211E_INER_LINK_STAT);
+ else
+ err = phy_write(phydev, RTL821x_INER, 0);
+
+ return err;
+}
+
/* RTL8211B */
-static struct phy_driver rtl821x_driver = {
+static struct phy_driver rtl8211b_driver = {
.phy_id = 0x001cc912,
- .name = "RTL821x Gigabit Ethernet",
+ .name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl821x_config_intr,
+ .config_intr = &rtl8211b_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+/* RTL8211E */
+static struct phy_driver rtl8211e_driver = {
+ .phy_id = 0x001cc915,
+ .name = "RTL8211E Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
};
static int __init realtek_init(void)
{
- return phy_driver_register(&rtl821x_driver);
+ int ret;
+
+ ret = phy_driver_register(&rtl8211b_driver);
+ if (ret < 0)
+ return -ENODEV;
+ return phy_driver_register(&rtl8211e_driver);
}
static void __exit realtek_exit(void)
{
- phy_driver_unregister(&rtl821x_driver);
+ phy_driver_unregister(&rtl8211b_driver);
+ phy_driver_unregister(&rtl8211e_driver);
}
module_init(realtek_init);
@@ -78,6 +115,7 @@ module_exit(realtek_exit);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
+ { 0x001cc915, 0x001fffff },
{ }
};
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 41eb8ffeb53d..5c87eef40bf9 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -275,10 +275,8 @@ static int ks8995_probe(struct spi_device *spi)
pdata = spi->dev.platform_data;
ks = kzalloc(sizeof(*ks), GFP_KERNEL);
- if (!ks) {
- dev_err(&spi->dev, "no memory for private data\n");
+ if (!ks)
return -ENOMEM;
- }
mutex_init(&ks->lock);
ks->pdata = pdata;
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 872df3ef07a6..1373c6d7278d 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -82,8 +82,8 @@ config PPP_FILTER
If unsure, say N.
config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
+ tristate "PPP MPPE compression (encryption)"
+ depends on PPP
select CRYPTO
select CRYPTO_SHA1
select CRYPTO_ARC4
@@ -96,8 +96,8 @@ config PPP_MPPE
configuring PPTP clients and servers to utilize this method.
config PPP_MULTILINK
- bool "PPP multilink support (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
+ bool "PPP multilink support"
+ depends on PPP
---help---
PPP multilink is a protocol (defined in RFC 1990) which allows you
to combine several (logical or physical) lines into one logical PPP
@@ -118,8 +118,8 @@ config PPPOATM
changes its encapsulation unilaterally.
config PPPOE
- tristate "PPP over Ethernet (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP
+ tristate "PPP over Ethernet"
+ depends on PPP
---help---
Support for PPP over Ethernet.
@@ -130,8 +130,8 @@ config PPPOE
the heading "Kernel mode PPPoE").
config PPTP
- tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
+ tristate "PPP over IPv4 (PPTP)"
+ depends on PPP && NET_IPGRE_DEMUX
---help---
Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
@@ -141,12 +141,13 @@ config PPTP
utilize this module.
config PPPOL2TP
- tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && L2TP && PPP
+ tristate "PPP over L2TP"
+ depends on L2TP && PPP
---help---
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
+if TTY
config PPP_ASYNC
tristate "PPP support for async serial ports"
@@ -172,4 +173,6 @@ config PPP_SYNC_TTY
To compile this driver as a module, choose M here.
+endif # TTY
+
endif # PPP
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0b2706abe3e3..3db9131e9229 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1058,7 +1058,15 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
return stats64;
}
+static struct lock_class_key ppp_tx_busylock;
+static int ppp_dev_init(struct net_device *dev)
+{
+ dev->qdisc_tx_busylock = &ppp_tx_busylock;
+ return 0;
+}
+
static const struct net_device_ops ppp_netdev_ops = {
+ .ndo_init = ppp_dev_init,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -1805,8 +1813,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
- if (skb_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_unclone(skb, GFP_ATOMIC))
goto err;
*skb_push(skb, 2) = 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 20f31d0d1536..bb07ba94c3aa 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1134,7 +1134,7 @@ static __net_init int pppoe_init_net(struct net *net)
rwlock_init(&pn->hash_lock);
- pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
+ pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops);
#ifdef CONFIG_PROC_FS
if (!pde)
return -ENOMEM;
@@ -1145,7 +1145,7 @@ static __net_init int pppoe_init_net(struct net *net)
static __net_exit void pppoe_exit_net(struct net *net)
{
- proc_net_remove(net, "pppoe");
+ remove_proc_entry("pppoe", net->proc_net);
}
static struct pernet_operations pppoe_net_ops = {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index d8b9b1e8ee02..f433b594388e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -410,10 +410,10 @@ static void rionet_get_drvinfo(struct net_device *ndev,
{
struct rionet_private *rnet = netdev_priv(ndev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "n/a");
- strcpy(info->bus_info, rnet->mport->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
+ strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
}
static u32 rionet_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/slip/Kconfig b/drivers/net/slip/Kconfig
index 211b160e4e9c..48e68714eef3 100644
--- a/drivers/net/slip/Kconfig
+++ b/drivers/net/slip/Kconfig
@@ -4,6 +4,7 @@
config SLIP
tristate "SLIP (serial line) support"
+ depends on TTY
---help---
Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
connect to your Internet service provider or to connect to some
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6b08bd419fba..c3011af68e91 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -1,6 +1,5 @@
menuconfig NET_TEAM
- tristate "Ethernet team driver support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Ethernet team driver support"
---help---
This allows one to create virtual interfaces that teams together
multiple ethernet devices.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ad86660fb8f9..05c5efe84591 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -28,6 +28,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
+#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -507,6 +508,7 @@ static bool team_is_mode_set(struct team *team)
static void team_set_no_mode(struct team *team)
{
+ team->user_carrier_enabled = false;
team->mode = &__team_no_mode;
}
@@ -1054,10 +1056,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
}
- err = netdev_set_master(port_dev, dev);
+ err = netdev_master_upper_dev_link(port_dev, dev);
if (err) {
- netdev_err(dev, "Device %s failed to set master\n", portname);
- goto err_set_master;
+ netdev_err(dev, "Device %s failed to set upper link\n",
+ portname);
+ goto err_set_upper_link;
}
err = netdev_rx_handler_register(port_dev, team_handle_frame,
@@ -1090,9 +1093,9 @@ err_option_port_add:
netdev_rx_handler_unregister(port_dev);
err_handler_register:
- netdev_set_master(port_dev, NULL);
+ netdev_upper_dev_unlink(port_dev, dev);
-err_set_master:
+err_set_upper_link:
team_port_disable_netpoll(port);
err_enable_netpoll:
@@ -1129,18 +1132,20 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
- __team_option_inst_mark_removed_port(team, port);
- __team_options_change_check(team);
- __team_option_inst_del_port(team, port);
- __team_port_change_port_removed(port);
team_port_disable(team, port);
list_del_rcu(&port->list);
netdev_rx_handler_unregister(port_dev);
- netdev_set_master(port_dev, NULL);
+ netdev_upper_dev_unlink(port_dev, dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
+
+ __team_option_inst_mark_removed_port(team, port);
+ __team_options_change_check(team);
+ __team_option_inst_del_port(team, port);
+ __team_port_change_port_removed(port);
+
team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
synchronize_rcu();
@@ -1399,13 +1404,11 @@ static void team_destructor(struct net_device *dev)
static int team_open(struct net_device *dev)
{
- netif_carrier_on(dev);
return 0;
}
static int team_close(struct net_device *dev)
{
- netif_carrier_off(dev);
return 0;
}
@@ -1501,7 +1504,6 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
@@ -1707,6 +1709,19 @@ static netdev_features_t team_fix_features(struct net_device *dev,
return features;
}
+static int team_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ struct team *team = netdev_priv(dev);
+
+ team->user_carrier_enabled = true;
+
+ if (new_carrier)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
static const struct net_device_ops team_netdev_ops = {
.ndo_init = team_init,
.ndo_uninit = team_uninit,
@@ -1729,8 +1744,24 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
+ .ndo_change_carrier = team_change_carrier,
};
+/***********************
+ * ethtool interface
+ ***********************/
+
+static void team_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops team_ethtool_ops = {
+ .get_drvinfo = team_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
/***********************
* rt netlink interface
@@ -1746,7 +1777,6 @@ static void team_setup_by_port(struct net_device *dev,
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -1780,6 +1810,7 @@ static void team_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &team_netdev_ops;
+ dev->ethtool_ops = &team_ethtool_ops;
dev->destructor = team_destructor;
dev->tx_queue_len = 0;
dev->flags |= IFF_MULTICAST;
@@ -1941,30 +1972,6 @@ static void team_nl_team_put(struct team *team)
dev_put(team->dev);
}
-static int team_nl_send_generic(struct genl_info *info, struct team *team,
- int (*fill_func)(struct sk_buff *skb,
- struct genl_info *info,
- int flags, struct team *team))
-{
- struct sk_buff *skb;
- int err;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = fill_func(skb, info, NLM_F_ACK, team);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
- return err;
-
-err_fill:
- nlmsg_free(skb);
- return err;
-}
-
typedef int team_nl_send_func_t(struct sk_buff *skb,
struct team *team, u32 portid);
@@ -2309,16 +2316,57 @@ team_put:
return err;
}
-static int team_nl_fill_port_list_get(struct sk_buff *skb,
- u32 portid, u32 seq, int flags,
- struct team *team,
- bool fillall)
+static int team_nl_fill_one_port_get(struct sk_buff *skb,
+ struct team_port *port)
+{
+ struct nlattr *port_item;
+
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nest_cancel;
+ if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
+ goto nest_cancel;
+ if (port->changed) {
+ if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
+ goto nest_cancel;
+ port->changed = false;
+ }
+ if ((port->removed &&
+ nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
+ (port->state.linkup &&
+ nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
+ nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
+ nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
+ goto nest_cancel;
+ nla_nest_end(skb, port_item);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, port_item);
+ return -EMSGSIZE;
+}
+
+static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
+ int flags, team_nl_send_func_t *send_func,
+ struct team_port *one_port)
{
struct nlattr *port_list;
+ struct nlmsghdr *nlh;
void *hdr;
struct team_port *port;
+ int err;
+ struct sk_buff *skb = NULL;
+ bool incomplete;
+ int i;
+
+ port = list_first_entry(&team->port_list, struct team_port, list);
+
+start_again:
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ if (err)
+ return err;
- hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2329,47 +2377,54 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
if (!port_list)
goto nla_put_failure;
- list_for_each_entry(port, &team->port_list, list) {
- struct nlattr *port_item;
+ i = 0;
+ incomplete = false;
- /* Include only changed ports if fill all mode is not on */
- if (!fillall && !port->changed)
- continue;
- port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
- if (!port_item)
- goto nla_put_failure;
- if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
- goto nla_put_failure;
- if (port->changed) {
- if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
- goto nla_put_failure;
- port->changed = false;
+ /* If one port is selected, called wants to send port list containing
+ * only this port. Otherwise go through all listed ports and send all
+ */
+ if (one_port) {
+ err = team_nl_fill_one_port_get(skb, one_port);
+ if (err)
+ goto errout;
+ } else {
+ list_for_each_entry(port, &team->port_list, list) {
+ err = team_nl_fill_one_port_get(skb, port);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!i)
+ goto errout;
+ incomplete = true;
+ break;
+ }
+ goto errout;
+ }
+ i++;
}
- if ((port->removed &&
- nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
- (port->state.linkup &&
- nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
- nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
- nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
- goto nla_put_failure;
- nla_nest_end(skb, port_item);
}
nla_nest_end(skb, port_list);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ if (err)
+ goto errout;
+ goto send_done;
+ }
+
+ return send_func(skb, team, portid);
nla_put_failure:
+ err = -EMSGSIZE;
+errout:
genlmsg_cancel(skb, hdr);
- return -EMSGSIZE;
-}
-
-static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
- struct genl_info *info, int flags,
- struct team *team)
-{
- return team_nl_fill_port_list_get(skb, info->snd_portid,
- info->snd_seq, NLM_F_ACK,
- team, true);
+ nlmsg_free(skb);
+ return err;
}
static int team_nl_cmd_port_list_get(struct sk_buff *skb,
@@ -2382,7 +2437,8 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
if (!team)
return -EINVAL;
- err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
+ err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, team_nl_send_unicast, NULL);
team_nl_team_put(team);
@@ -2433,27 +2489,11 @@ static int team_nl_send_event_options_get(struct team *team,
sel_opt_inst_list);
}
-static int team_nl_send_event_port_list_get(struct team *team)
+static int team_nl_send_event_port_get(struct team *team,
+ struct team_port *port)
{
- struct sk_buff *skb;
- int err;
- struct net *net = dev_net(team->dev);
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
- GFP_KERNEL);
- return err;
-
-err_fill:
- nlmsg_free(skb);
- return err;
+ return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
+ port);
}
static int team_nl_init(void)
@@ -2526,28 +2566,53 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
port->state.duplex = 0;
send_event:
- err = team_nl_send_event_port_list_get(port->team);
+ err = team_nl_send_event_port_get(port->team, port);
if (err && err != -ESRCH)
netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
port->dev->name, err);
}
+static void __team_carrier_check(struct team *team)
+{
+ struct team_port *port;
+ bool team_linkup;
+
+ if (team->user_carrier_enabled)
+ return;
+
+ team_linkup = false;
+ list_for_each_entry(port, &team->port_list, list) {
+ if (port->linkup) {
+ team_linkup = true;
+ break;
+ }
+ }
+
+ if (team_linkup)
+ netif_carrier_on(team->dev);
+ else
+ netif_carrier_off(team->dev);
+}
+
static void __team_port_change_check(struct team_port *port, bool linkup)
{
if (port->state.linkup != linkup)
__team_port_change_send(port, linkup);
+ __team_carrier_check(port->team);
}
static void __team_port_change_port_added(struct team_port *port, bool linkup)
{
__team_port_change_send(port, linkup);
+ __team_carrier_check(port->team);
}
static void __team_port_change_port_removed(struct team_port *port)
{
port->removed = true;
__team_port_change_send(port, false);
+ __team_carrier_check(port->team);
}
static void team_port_change_check(struct team_port *port, bool linkup)
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index 6262b4defd93..40fd3381b693 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -19,6 +19,7 @@
struct ab_priv {
struct team_port __rcu *active_port;
+ struct team_option_inst_info *ap_opt_inst_info;
};
static struct ab_priv *ab_priv(struct team *team)
@@ -54,8 +55,17 @@ drop:
static void ab_port_leave(struct team *team, struct team_port *port)
{
- if (ab_priv(team)->active_port == port)
+ if (ab_priv(team)->active_port == port) {
RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+ team_option_inst_set_change(ab_priv(team)->ap_opt_inst_info);
+ }
+}
+
+static int ab_active_port_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ ab_priv(team)->ap_opt_inst_info = info;
+ return 0;
}
static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
@@ -88,6 +98,7 @@ static const struct team_option ab_options[] = {
{
.name = "activeport",
.type = TEAM_OPTION_TYPE_U32,
+ .init = ab_active_port_init,
.getter = ab_active_port_get,
.setter = ab_active_port_set,
},
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 40b426edc9e6..b6f45c5d84d5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,11 +109,11 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* 1024 is probably a high enough limit: modern hypervisors seem to support on
- * the order of 100-200 CPUs so this leaves us some breathing space if we want
- * to match a queue per guest CPU.
- */
-#define MAX_TAP_QUEUES 1024
+/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -138,6 +138,8 @@ struct tun_file {
/* only used for fasnyc */
unsigned int flags;
u16 queue_index;
+ struct list_head next;
+ struct tun_struct *detached;
};
struct tun_flow_entry {
@@ -178,10 +180,13 @@ struct tun_struct {
int debug;
#endif
spinlock_t lock;
- struct kmem_cache *flow_cache;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
unsigned long ageing_time;
+ unsigned int numdisabled;
+ struct list_head disabled;
+ void *security;
+ u32 flow_count;
};
static inline u32 tun_hashfn(u32 rxhash)
@@ -205,8 +210,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct hlist_head *head,
u32 rxhash, u16 queue_index)
{
- struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
- GFP_ATOMIC);
+ struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
+
if (e) {
tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
rxhash, queue_index);
@@ -215,23 +220,18 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
+ ++tun->flow_count;
}
return e;
}
-static void tun_flow_free(struct rcu_head *head)
-{
- struct tun_flow_entry *e
- = container_of(head, struct tun_flow_entry, rcu);
- kmem_cache_free(e->tun->flow_cache, e);
-}
-
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
- call_rcu(&e->rcu, tun_flow_free);
+ kfree_rcu(e, rcu);
+ --tun->flow_count;
}
static void tun_flow_flush(struct tun_struct *tun)
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
- u16 queue_index)
+ struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
+ u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_lock();
- if (tun->numqueues == 1)
+ /* We may get a very small possibility of OOO during switching, not
+ * worth to optimize.*/
+ if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
@@ -321,7 +324,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
e->updated = jiffies;
} else {
spin_lock_bh(&tun->lock);
- if (!tun_flow_find(head, rxhash))
+ if (!tun_flow_find(head, rxhash) &&
+ tun->flow_count < MAX_TAP_FLOWS)
tun_flow_create(tun, head, rxhash, queue_index);
if (!timer_pending(&tun->flow_gc_timer))
@@ -385,41 +389,67 @@ static void tun_set_real_num_queues(struct tun_struct *tun)
netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
}
+static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
+{
+ tfile->detached = tun;
+ list_add_tail(&tfile->next, &tun->disabled);
+ ++tun->numdisabled;
+}
+
+static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
+{
+ struct tun_struct *tun = tfile->detached;
+
+ tfile->detached = NULL;
+ list_del_init(&tfile->next);
+ --tun->numdisabled;
+ return tun;
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
struct tun_struct *tun;
struct net_device *dev;
- tun = rcu_dereference_protected(tfile->tun,
- lockdep_rtnl_is_held());
- if (tun) {
+ tun = rtnl_dereference(tfile->tun);
+
+ if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
- rcu_assign_pointer(tfile->tun, NULL);
- ntfile = rcu_dereference_protected(tun->tfiles[index],
- lockdep_rtnl_is_held());
+ ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
- sock_put(&tfile->sk);
+ if (clean) {
+ rcu_assign_pointer(tfile->tun, NULL);
+ sock_put(&tfile->sk);
+ } else
+ tun_disable_queue(tun, tfile);
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
tun_set_real_num_queues(tun);
-
- if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
+ } else if (tfile->detached && clean) {
+ tun = tun_enable_queue(tfile);
+ sock_put(&tfile->sk);
}
if (clean) {
+ if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+ netif_carrier_off(tun->dev);
+
+ if (!(tun->flags & TUN_PERSIST) &&
+ tun->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(tun->dev);
+ }
+
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
&tfile->socket.flags));
sk_release_kernel(&tfile->sk);
@@ -436,27 +466,38 @@ static void tun_detach(struct tun_file *tfile, bool clean)
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- struct tun_file *tfile;
+ struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
wake_up_all(&tfile->wq.wait);
rcu_assign_pointer(tfile->tun, NULL);
--tun->numqueues;
}
+ list_for_each_entry(tfile, &tun->disabled, next) {
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ }
BUG_ON(tun->numqueues != 0);
synchronize_net();
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
sock_put(&tfile->sk);
}
+ list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_enable_queue(tfile);
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ sock_put(&tfile->sk);
+ }
+ BUG_ON(tun->numdisabled != 0);
+
+ if (tun->flags & TUN_PERSIST)
+ module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -464,8 +505,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
struct tun_file *tfile = file->private_data;
int err;
+ err = security_tun_dev_attach(tfile->socket.sk, tun->security);
+ if (err < 0)
+ goto out;
+
err = -EINVAL;
- if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+ if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
@@ -473,7 +518,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
goto out;
err = -E2BIG;
- if (tun->numqueues == MAX_TAP_QUEUES)
+ if (!tfile->detached &&
+ tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
goto out;
err = 0;
@@ -487,9 +533,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
tfile->queue_index = tun->numqueues;
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
- sock_hold(&tfile->sk);
tun->numqueues++;
+ if (tfile->detached)
+ tun_enable_queue(tfile);
+ else
+ sock_hold(&tfile->sk);
+
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
@@ -796,12 +846,6 @@ static int tun_flow_init(struct tun_struct *tun)
{
int i;
- tun->flow_cache = kmem_cache_create("tun_flow_cache",
- sizeof(struct tun_flow_entry), 0, 0,
- NULL);
- if (!tun->flow_cache)
- return -ENOMEM;
-
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
INIT_HLIST_HEAD(&tun->flows[i]);
@@ -817,10 +861,6 @@ static void tun_flow_uninit(struct tun_struct *tun)
{
del_timer_sync(&tun->flow_gc_timer);
tun_flow_flush(tun);
-
- /* Wait for completion of call_rcu()'s */
- rcu_barrier();
- kmem_cache_destroy(tun->flow_cache);
}
/* Initialize net device. */
@@ -1160,15 +1200,17 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
+ skb_reset_network_header(skb);
rxhash = skb_get_rxhash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- tun_flow_update(tun, rxhash, tfile->queue_index);
+ tun_flow_update(tun, rxhash, tfile);
return total_len;
}
@@ -1349,7 +1391,9 @@ static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ BUG_ON(!(list_empty(&tun->disabled)));
tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
free_netdev(dev);
}
@@ -1523,6 +1567,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
struct net_device *dev;
int err;
+ if (tfile->detached)
+ return -EINVAL;
+
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1536,17 +1583,23 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun_not_capable(tun))
return -EPERM;
- err = security_tun_dev_attach(tfile->socket.sk);
+ err = security_tun_dev_open(tun->security);
if (err < 0)
return err;
err = tun_attach(tun, file);
if (err < 0)
return err;
+
+ if (tun->flags & TUN_TAP_MQ &&
+ (tun->numqueues + tun->numdisabled > 1))
+ return err;
}
else {
char *name;
unsigned long flags = 0;
+ int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1570,8 +1623,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
- tun_setup,
- MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+ tun_setup, queues, queues);
+
if (!dev)
return -ENOMEM;
@@ -1589,7 +1642,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
spin_lock_init(&tun->lock);
- security_tun_dev_post_create(&tfile->sk);
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0)
+ goto err_free_dev;
tun_net_init(dev);
@@ -1601,6 +1656,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
TUN_USER_FEATURES;
dev->features = dev->hw_features;
+ INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file);
if (err < 0)
goto err_free_dev;
@@ -1613,10 +1669,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
-
- netif_carrier_on(tun->dev);
}
+ netif_carrier_on(tun->dev);
+
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
@@ -1712,8 +1768,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
struct tun_file *tfile;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
sk_detach_filter(tfile->socket.sk);
}
@@ -1726,8 +1781,7 @@ static int tun_attach_filter(struct tun_struct *tun)
struct tun_file *tfile;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
@@ -1745,8 +1799,7 @@ static void tun_set_sndbuf(struct tun_struct *tun)
int i;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_sndbuf = tun->sndbuf;
}
}
@@ -1755,29 +1808,27 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
- struct net_device *dev;
int ret = 0;
rtnl_lock();
if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
- dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
- if (!dev) {
+ tun = tfile->detached;
+ if (!tun) {
ret = -EINVAL;
goto unlock;
}
-
- tun = netdev_priv(dev);
- if (dev->netdev_ops != &tap_netdev_ops &&
- dev->netdev_ops != &tun_netdev_ops)
+ ret = security_tun_dev_attach_queue(tun->security);
+ if (ret < 0)
+ goto unlock;
+ ret = tun_attach(tun, file);
+ } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
+ tun = rtnl_dereference(tfile->tun);
+ if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
ret = -EINVAL;
- else if (tun_not_capable(tun))
- ret = -EPERM;
else
- ret = tun_attach(tun, file);
- } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
- __tun_detach(tfile, false);
- else
+ __tun_detach(tfile, false);
+ } else
ret = -EINVAL;
unlock:
@@ -1858,10 +1909,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
- if (arg) {
+ if (arg && !(tun->flags & TUN_PERSIST)) {
tun->flags |= TUN_PERSIST;
__module_get(THIS_MODULE);
- } else {
+ }
+ if (!arg && (tun->flags & TUN_PERSIST)) {
tun->flags &= ~TUN_PERSIST;
module_put(THIS_MODULE);
}
@@ -2092,6 +2144,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
file->private_data = tfile;
set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+ INIT_LIST_HEAD(&tfile->next);
return 0;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ef976215b649..da92ed3797aa 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -8,8 +8,7 @@ menu "USB Network Adapters"
depends on USB && NET
config USB_CATC
- tristate "USB CATC NetMate-based Ethernet device support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "USB CATC NetMate-based Ethernet device support"
select CRC32
---help---
Say Y if you want to use one of the following 10Mbps USB Ethernet
@@ -83,8 +82,7 @@ config USB_PEGASUS
module will be called pegasus.
config USB_RTL8150
- tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "USB RTL8150 based ethernet device support"
select NET_CORE
select MII
help
@@ -188,7 +186,7 @@ config USB_NET_CDCETHER
config USB_NET_CDC_EEM
tristate "CDC EEM support"
- depends on USB_USBNET && EXPERIMENTAL
+ depends on USB_USBNET
help
This option supports devices conforming to the Communication Device
Class (CDC) Ethernet Emulation Model, a specification that's easy to
@@ -287,7 +285,7 @@ config USB_NET_PLUSB
tristate "Prolific PL-2301/2302/25A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
- depends on USB_USBNET && EXPERIMENTAL
+ depends on USB_USBNET
help
Choose this option if you're using a host-to-host cable
with one of these chips.
@@ -301,8 +299,8 @@ config USB_NET_MCS7830
adapters marketed under the DeLOCK brand.
config USB_NET_RNDIS_HOST
- tristate "Host for RNDIS and ActiveSync devices (EXPERIMENTAL)"
- depends on USB_USBNET && EXPERIMENTAL
+ tristate "Host for RNDIS and ActiveSync devices"
+ depends on USB_USBNET
select USB_NET_CDCETHER
help
This option enables hosting "Remote NDIS" USB networking links,
@@ -380,7 +378,7 @@ config USB_EPSON2888
config USB_KC2190
boolean "KT Technology KC2190 based cables (InstaNet)"
- depends on USB_NET_CDC_SUBSET && EXPERIMENTAL
+ depends on USB_NET_CDC_SUBSET
help
Choose this option if you're using a host-to-host cable
with one of these chips.
@@ -445,7 +443,7 @@ config USB_NET_QMI_WWAN
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
- depends on USB && RFKILL
+ depends on USB && RFKILL && TTY
default n
help
Choose this option if you have an Option HSDPA/HSUPA card.
@@ -493,7 +491,7 @@ config USB_SIERRA_NET
config USB_VL600
tristate "LG VL600 modem dongle"
- depends on USB_NET_CDCETHER
+ depends on USB_NET_CDCETHER && TTY
select USB_ACM
help
Select this if you want to use an LG Electronics 4G/LTE usb modem
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index e889631161b8..346c032aa795 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -167,6 +167,20 @@ struct asix_data {
u8 res;
};
+struct asix_rx_fixup_info {
+ struct sk_buff *ax_skb;
+ u32 header;
+ u16 size;
+ bool split_head;
+};
+
+struct asix_common_private {
+ struct asix_rx_fixup_info rx_fixup_info;
+};
+
+/* ASIX specific flags */
+#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
+
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data);
@@ -176,7 +190,9 @@ int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
u16 index, u16 size, void *data);
-int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
+int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ struct asix_rx_fixup_info *rx);
+int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 50d167330d38..f7f623a5390e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -51,49 +51,89 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
-int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ struct asix_rx_fixup_info *rx)
{
int offset = 0;
- while (offset + sizeof(u32) < skb->len) {
- struct sk_buff *ax_skb;
- u16 size;
- u32 header = get_unaligned_le32(skb->data + offset);
-
- offset += sizeof(u32);
-
- /* get the packet length */
- size = (u16) (header & 0x7ff);
- if (size != ((~header >> 16) & 0x07ff)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- return 0;
+ while (offset + sizeof(u16) <= skb->len) {
+ u16 remaining = 0;
+ unsigned char *data;
+
+ if (!rx->size) {
+ if ((skb->len - offset == sizeof(u16)) ||
+ rx->split_head) {
+ if(!rx->split_head) {
+ rx->header = get_unaligned_le16(
+ skb->data + offset);
+ rx->split_head = true;
+ offset += sizeof(u16);
+ break;
+ } else {
+ rx->header |= (get_unaligned_le16(
+ skb->data + offset)
+ << 16);
+ rx->split_head = false;
+ offset += sizeof(u16);
+ }
+ } else {
+ rx->header = get_unaligned_le32(skb->data +
+ offset);
+ offset += sizeof(u32);
+ }
+
+ /* get the packet length */
+ rx->size = (u16) (rx->header & 0x7ff);
+ if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
+ rx->header, offset);
+ rx->size = 0;
+ return 0;
+ }
+ rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
+ rx->size);
+ if (!rx->ax_skb)
+ return 0;
}
- if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
- (size + offset > skb->len)) {
+ if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
- size);
+ rx->size);
+ kfree_skb(rx->ax_skb);
return 0;
}
- ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!ax_skb)
- return 0;
- skb_put(ax_skb, size);
- memcpy(ax_skb->data, skb->data + offset, size);
- usbnet_skb_return(dev, ax_skb);
+ if (rx->size > skb->len - offset) {
+ remaining = rx->size - (skb->len - offset);
+ rx->size = skb->len - offset;
+ }
+
+ data = skb_put(rx->ax_skb, rx->size);
+ memcpy(data, skb->data + offset, rx->size);
+ if (!remaining)
+ usbnet_skb_return(dev, rx->ax_skb);
- offset += (size + 1) & 0xfffe;
+ offset += (rx->size + 1) & 0xfffe;
+ rx->size = remaining;
}
if (skb->len != offset) {
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
- skb->len);
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
+ skb->len, offset);
return 0;
}
+
return 1;
}
+int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct asix_common_private *dp = dev->driver_priv;
+ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
+
+ return asix_rx_fixup_internal(dev, skb, rx);
+}
+
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
@@ -510,8 +550,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
info->eedump_len = AX_EEPROM_LEN;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 7a6e758f48e7..2205dbc8d32f 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -422,14 +422,25 @@ static const struct net_device_ops ax88772_netdev_ops = {
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret, embd_phy;
+ int ret, embd_phy, i;
u8 buf[ETH_ALEN];
u32 phyid;
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (dev->driver_info->data & FLAG_EEPROM_MAC) {
+ for (i = 0; i < (ETH_ALEN >> 1); i++) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
+ 0, 2, buf + i * 2);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
+ 0, 0, ETH_ALEN, buf);
+ }
+
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
@@ -484,9 +495,19 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
+ if (!dev->driver_priv)
+ return -ENOMEM;
+
return 0;
}
+static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ if (dev->driver_priv)
+ kfree(dev->driver_priv);
+}
+
static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
@@ -818,6 +839,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
+ if (!dev->driver_priv)
+ return -ENOMEM;
+
return 0;
}
@@ -864,22 +889,38 @@ static const struct driver_info hawking_uf200_info = {
static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
+ .unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
+static const struct driver_info ax88772b_info = {
+ .description = "ASIX AX88772B USB 2.0 Ethernet",
+ .bind = ax88772_bind,
+ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88772_link_reset,
+ .reset = ax88772_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = FLAG_EEPROM_MAC,
+};
+
static const struct driver_info ax88178_info = {
.description = "ASIX AX88178 USB 2.0 Ethernet",
.bind = ax88178_bind,
+ .unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
@@ -953,7 +994,7 @@ static const struct usb_device_id products [] = {
}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
- .driver_info = (unsigned long) &ax88772_info,
+ .driver_info = (unsigned long) &ax88772b_info,
}, {
// ASIX AX88772 10/100
USB_DEVICE (0x0b95, 0x7720),
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index c8e0aa85fb8e..d012203b0f29 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -35,6 +35,7 @@ struct ax88172a_private {
u16 phy_addr;
u16 oldmode;
int use_embdphy;
+ struct asix_rx_fixup_info rx_fixup_info;
};
/* MDIO read and write wrappers for phylib */
@@ -116,7 +117,6 @@ static int ax88172a_init_mdio(struct usbnet *dev)
priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mdio->irq) {
- netdev_err(dev->net, "Could not allocate mdio->irq\n");
ret = -ENOMEM;
goto mfree;
}
@@ -235,10 +235,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
usbnet_get_endpoints(dev, intf);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- netdev_err(dev->net, "Could not allocate memory for private data\n");
+ if (!priv)
return -ENOMEM;
- }
+
dev->driver_priv = priv;
/* Get the MAC address */
@@ -377,7 +376,7 @@ static int ax88172a_reset(struct usbnet *dev)
priv->phydev = phy_connect(dev->net, priv->phy_name,
&ax88172a_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
netdev_err(dev->net, "Could not connect to PHY device %s\n",
priv->phy_name);
@@ -400,6 +399,14 @@ out:
}
+static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct ax88172a_private *dp = dev->driver_priv;
+ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
+
+ return asix_rx_fixup_internal(dev, skb, rx);
+}
+
const struct driver_info ax88172a_info = {
.description = "ASIX AX88172A USB 2.0 Ethernet",
.bind = ax88172a_bind,
@@ -409,6 +416,6 @@ const struct driver_info ax88172a_info = {
.status = ax88172a_status,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = ax88172a_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 18d9579123ea..8d5cac2d8e33 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -685,9 +685,9 @@ static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
- usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d0129827602b..57136dc1b887 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -457,12 +457,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
-static int cdc_manage_power(struct usbnet *dev, int on)
-{
- dev->intf->needs_remote_wakeup = on;
- return 0;
-}
-
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -470,7 +464,7 @@ static const struct driver_info cdc_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
static const struct driver_info wwan_info = {
@@ -479,7 +473,7 @@ static const struct driver_info wwan_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
/*-------------------------------------------------------------------------*/
@@ -487,6 +481,7 @@ static const struct driver_info wwan_info = {
#define HUAWEI_VENDOR_ID 0x12D1
#define NOVATEL_VENDOR_ID 0x1410
#define ZTE_VENDOR_ID 0x19D2
+#define DELL_VENDOR_ID 0x413C
static const struct usb_device_id products [] = {
/*
@@ -594,27 +589,36 @@ static const struct usb_device_id products [] = {
/* Novatel USB551L and MC551 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0xB001,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0xB001, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* Novatel E362 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0x9010,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9010, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* AnyDATA ADU960S - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 42f51c71ec1f..248d2dc765a5 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -374,6 +374,21 @@ static const struct driver_info cdc_mbim_info = {
.tx_fixup = cdc_mbim_tx_fixup,
};
+/* MBIM and NCM devices should not need a ZLP after NTBs with
+ * dwNtbOutMaxSize length. This driver_info is for the exceptional
+ * devices requiring it anyway, allowing them to be supported without
+ * forcing the performance penalty on all the sane devices.
+ */
+static const struct driver_info cdc_mbim_info_zlp = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+};
+
static const struct usb_device_id mbim_devs[] = {
/* This duplicate NCM entry is intentional. MBIM devices can
* be disguised as NCM by default, and this is necessary to
@@ -385,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* Sierra Wireless MC7710 need ZLPs */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d38bc20a60e2..4a8c25a22294 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,9 +65,9 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strncpy(info->driver, dev->driver_name, sizeof(info->driver));
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strncpy(info->fw_version, dev->driver_info->description,
+ strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
@@ -435,6 +435,13 @@ advance:
len -= temp;
}
+ /* some buggy devices have an IAD but no CDC Union */
+ if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ ctx->control = intf;
+ ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+ dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+ }
+
/* check if we got everything */
if ((ctx->control == NULL) || (ctx->data == NULL) ||
((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
@@ -497,7 +504,8 @@ advance:
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
- usb_driver_release_interface(driver, ctx->data);
+ if (ctx->data != ctx->control)
+ usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
@@ -568,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if ((intf->num_altsetting == 2) &&
!usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber,
- CDC_NCM_COMM_ALTSETTING_MBIM) &&
- cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
- return -ENODEV;
+ CDC_NCM_COMM_ALTSETTING_MBIM)) {
+ if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ return -ENODEV;
+ else
+ usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_NCM);
+ }
#endif
/* NCM data altsetting is always 1 */
@@ -1129,19 +1142,13 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
usbnet_disconnect(intf);
}
-static int cdc_ncm_manage_power(struct usbnet *dev, int status)
-{
- dev->intf->needs_remote_wakeup = status;
- return 0;
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
@@ -1155,7 +1162,21 @@ static const struct driver_info wwan_info = {
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+/* Same as wwan_info, but with FLAG_NOARP */
+static const struct driver_info wwan_noarp_info = {
+ .description = "Mobile Broadband Network Device (NO ARP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_WWAN | FLAG_NOARP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
@@ -1199,6 +1220,16 @@ static const struct usb_device_id cdc_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
+ /* Infineon(now Intel) HSPA Modem platform */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3f554c1149f3..174e5ecea4cc 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -45,6 +45,12 @@
#define DM_MCAST_ADDR 0x16 /* 8 bytes */
#define DM_GPR_CTRL 0x1e
#define DM_GPR_DATA 0x1f
+#define DM_CHIP_ID 0x2c
+#define DM_MODE_CTRL 0x91 /* only on dm9620 */
+
+/* chip id values */
+#define ID_DM9601 0
+#define ID_DM9620 1
#define DM_MAX_MCAST 64
#define DM_MCAST_SIZE 8
@@ -53,7 +59,6 @@
#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
#define DM_TIMEOUT 1000
-
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
int err;
@@ -84,32 +89,23 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, DM_WRITE_REGS,
+ return usbnet_write_cmd(dev, DM_WRITE_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, reg, NULL, 0);
}
-static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
- u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, reg, data, length);
-}
-
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
-{
- netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
-
- dm_write_async_helper(dev, reg, 0, length, data);
+ 0, reg, data, length);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
- reg, value);
-
- dm_write_async_helper(dev, reg, value, 0, NULL);
+ usbnet_write_cmd_async(dev, DM_WRITE_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@ -122,7 +118,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@ -165,7 +161,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@ -358,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN], id;
ret = usbnet_get_endpoints(dev, intf);
if (ret)
@@ -399,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
__dm9601_set_mac_address(dev);
}
+ if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+ netdev_err(dev->net, "Error reading chip ID\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* put dm9620 devices in dm9601 mode */
+ if (id == ID_DM9620) {
+ u8 mode;
+
+ if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+ netdev_err(dev->net, "Error reading MODE_CTRL\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+ }
+
/* power up phy */
dm_write_reg(dev, DM_GPR_CTRL, 1);
dm_write_reg(dev, DM_GPR_DATA, 0);
@@ -581,6 +595,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cd8ccb240f4b..e2dd3249b6bd 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2035,25 +2035,23 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
/* Push data to tty */
- if (tty) {
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
- D1("data to push to tty");
- while (write_length_remaining) {
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string
- (tty, urb->transfer_buffer +
- serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
- tty_flip_buffer_push(tty);
+ write_length_remaining = urb->actual_length -
+ serial->curr_rx_urb_offset;
+ D1("data to push to tty");
+ while (write_length_remaining) {
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
}
- tty_kref_put(tty);
+ curr_write_len = tty_insert_flip_string(&serial->port,
+ urb->transfer_buffer + serial->curr_rx_urb_offset,
+ write_length_remaining);
+ serial->curr_rx_urb_offset += curr_write_len;
+ write_length_remaining -= curr_write_len;
+ tty_flip_buffer_push(&serial->port);
}
+ tty_kref_put(tty);
+
if (write_length_remaining == 0) {
serial->curr_rx_urb_offset = 0;
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
@@ -2317,10 +2315,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->rx_urb[i]->transfer_buffer_length = 0;
serial->rx_data[i] = kzalloc(serial->rx_data_length,
GFP_KERNEL);
- if (!serial->rx_data[i]) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->rx_data[i])
goto exit;
- }
}
/* TX, allocate urb and initialize */
@@ -2336,15 +2332,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_buffer_count = 0;
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
- if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->tx_data)
goto exit;
- }
+
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
- if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->tx_buffer)
goto exit;
- }
return 0;
exit:
@@ -2580,10 +2573,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
}
hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
GFP_KERNEL);
- if (!hso_net->mux_bulk_rx_buf_pool[i]) {
- dev_err(&interface->dev, "Could not allocate rx buf\n");
+ if (!hso_net->mux_bulk_rx_buf_pool[i])
goto exit;
- }
}
hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hso_net->mux_bulk_tx_urb) {
@@ -2591,10 +2582,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
goto exit;
}
hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
- if (!hso_net->mux_bulk_tx_buf) {
- dev_err(&interface->dev, "Could not allocate tx buf\n");
+ if (!hso_net->mux_bulk_tx_buf)
goto exit;
- }
add_net_device(hso_dev);
@@ -2818,10 +2807,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_buf =
kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
GFP_KERNEL);
- if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?\n");
+ if (!mux->shared_intr_buf)
goto exit;
- }
mutex_init(&mux->shared_int_lock);
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 92c49e0a59ec..0192073e53a3 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -159,7 +159,6 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
}
memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
- memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
return status;
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index a0b5807b30d4..73051d10ead2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -149,11 +149,9 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
DECLARE_WAITQUEUE(wait, current);
buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer) {
- netif_warn(pegasus, drv, pegasus->net,
- "out of memory in %s\n", __func__);
+ if (!buffer)
return -ENOMEM;
- }
+
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
@@ -1074,8 +1072,9 @@ static void pegasus_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
pegasus_t *pegasus = netdev_priv(dev);
- strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
+
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
@@ -1096,6 +1095,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
pegasus_t *pegasus = netdev_priv(dev);
u8 reg78 = 0x04;
+ int ret;
if (wol->wolopts & ~WOL_SUPPORTED)
return -EINVAL;
@@ -1110,7 +1110,12 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
pegasus->eth_regs[0] &= ~0x10;
pegasus->wolopts = wol->wolopts;
- return set_register(pegasus, WakeupControl, reg78);
+
+ ret = set_register(pegasus, WakeupControl, reg78);
+ if (!ret)
+ ret = device_set_wakeup_enable(&pegasus->usb->dev,
+ wol->wolopts);
+ return ret;
}
static inline void pegasus_reset_wol(struct net_device *dev)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 1ea91f4237f0..efb5c7c33a28 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
@@ -383,8 +395,30 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5800 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* Dell Wireless 5800 V2 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8196,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* ADU960S */
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -419,6 +453,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
+ {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
+ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
@@ -443,6 +479,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 4a4335833c36..cc49aac70224 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -431,7 +431,6 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
memcpy(net->dev_addr, bp, ETH_ALEN);
- memcpy(net->perm_addr, bp, ETH_ALEN);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f39a3b225ef..a491d3a95393 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -776,9 +776,9 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
{
rtl8150_t *dev = netdev_priv(netdev);
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
- usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 18dd4257ab17..79ab2435d9d3 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -459,11 +459,9 @@ static void sierra_net_kevent(struct work_struct *work)
/* Query the modem for the LSI message */
buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
- if (!buf) {
- netdev_err(dev->net,
- "failed to allocate buf for LS msg\n");
+ if (!buf)
return;
- }
+
ifnum = priv->ifnum;
len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
USB_CDC_GET_ENCAPSULATED_RESPONSE,
@@ -598,8 +596,8 @@ static void sierra_net_get_drvinfo(struct net_device *net,
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strncpy(info->driver, driver_name, sizeof info->driver);
- strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static u32 sierra_net_get_link(struct net_device *net)
@@ -686,10 +684,8 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* Initialize sierra private data */
priv = kzalloc(sizeof *priv, GFP_KERNEL);
- if (!priv) {
- dev_err(&dev->udev->dev, "No memory");
+ if (!priv)
return -ENOMEM;
- }
priv->usbnet = dev;
priv->ifnum = ifacenum;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 251a3354a4b0..9abe51710f22 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1393,13 +1393,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
}
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
- GFP_KERNEL);
+ GFP_KERNEL);
pdata = (struct smsc75xx_priv *)(dev->data[0]);
- if (!pdata) {
- netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n");
+ if (!pdata)
return -ENOMEM;
- }
pdata->dev = dev;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 9b736701f854..ff4fa37dfd1d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -55,6 +55,13 @@
#define FEATURE_PHY_NLP_CROSSOVER (0x02)
#define FEATURE_AUTOSUSPEND (0x04)
+#define SUSPEND_SUSPEND0 (0x01)
+#define SUSPEND_SUSPEND1 (0x02)
+#define SUSPEND_SUSPEND2 (0x04)
+#define SUSPEND_SUSPEND3 (0x08)
+#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
+ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -62,6 +69,7 @@ struct smsc95xx_priv {
u32 wolopts;
spinlock_t mac_cr_lock;
u8 features;
+ u8 suspend_flags;
};
static bool turbo_mode = true;
@@ -513,10 +521,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
u32 flow, afc_cfg = 0;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading AFC_CFG\n");
+ if (ret < 0)
return ret;
- }
if (duplex == DUPLEX_FULL) {
u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -541,16 +547,10 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
ret = smsc95xx_write_reg(dev, FLOW, flow);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing FLOW\n");
- return ret;
- }
-
- ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
if (ret < 0)
- netdev_warn(dev->net, "Error writing AFC_CFG\n");
+ return ret;
- return ret;
+ return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
}
static int smsc95xx_link_reset(struct usbnet *dev)
@@ -564,16 +564,12 @@ static int smsc95xx_link_reset(struct usbnet *dev)
/* clear interrupt status */
ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing INT_STS\n");
+ if (ret < 0)
return ret;
- }
mii_check_media(mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -595,10 +591,8 @@ static int smsc95xx_link_reset(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing MAC_CR\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
if (ret < 0)
@@ -638,10 +632,8 @@ static int smsc95xx_set_features(struct net_device *netdev,
int ret;
ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
if (features & NETIF_F_HW_CSUM)
read_buf |= Tx_COE_EN_;
@@ -654,10 +646,8 @@ static int smsc95xx_set_features(struct net_device *netdev,
read_buf &= ~Rx_COE_EN_;
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
@@ -800,16 +790,10 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
int ret;
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
- return ret;
- }
-
- ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
if (ret < 0)
- netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
+ return ret;
- return ret;
+ return smsc95xx_write_reg(dev, ADDRH, addr_hi);
}
/* starts the TX path */
@@ -825,17 +809,11 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Enable Tx at SCSRs */
- ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
- if (ret < 0)
- netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret);
-
- return ret;
+ return smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
}
/* Starts the Receive path */
@@ -843,17 +821,12 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
- if (ret < 0)
- netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
-
- return ret;
+ return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
}
static int smsc95xx_phy_initialize(struct usbnet *dev)
@@ -910,19 +883,15 @@ static int smsc95xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
+ if (ret < 0)
return ret;
- }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout++;
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
@@ -932,19 +901,15 @@ static int smsc95xx_reset(struct usbnet *dev)
}
ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout++;
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
@@ -961,10 +926,8 @@ static int smsc95xx_reset(struct usbnet *dev)
dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
read_buf);
@@ -972,16 +935,12 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
@@ -1002,42 +961,32 @@ static int smsc95xx_reset(struct usbnet *dev)
(ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from BURST_CAP after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from BULK_IN_DLY after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n",
read_buf);
@@ -1051,69 +1000,51 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= NET_IP_ALIGN << 9;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Init Tx */
ret = smsc95xx_write_reg(dev, FLOW, 0);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Init Rx */
/* Set Vlan */
ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Enable or disable checksum offload engines */
ret = smsc95xx_set_features(dev->net, dev->net->features);
@@ -1131,19 +1062,15 @@ static int smsc95xx_reset(struct usbnet *dev)
}
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* enable PHY interrupts */
read_buf |= INT_EP_CTL_PHY_INT_;
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_start_tx_path(dev);
if (ret < 0) {
@@ -1189,13 +1116,11 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
}
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
- GFP_KERNEL);
+ GFP_KERNEL);
pdata = (struct smsc95xx_priv *)(dev->data[0]);
- if (!pdata) {
- netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n");
+ if (!pdata)
return -ENOMEM;
- }
spin_lock_init(&pdata->mac_cr_lock);
@@ -1213,10 +1138,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ if (ret < 0)
return ret;
- }
val >>= 16;
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
@@ -1261,17 +1184,13 @@ static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
/* read to clear */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ if (ret < 0)
return ret;
- }
/* enable interrupt source */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
+ if (ret < 0)
return ret;
- }
ret |= mask;
@@ -1287,16 +1206,12 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev)
/* first, a dummy read, needed to latch some MII phys */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ if (ret < 0)
return ret;
- }
return !!(ret & BMSR_LSTATUS);
}
@@ -1308,19 +1223,15 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
int ret;
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* clear wol status */
val &= ~PM_CTL_WUPS_;
@@ -1331,15 +1242,13 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
val |= PM_CTL_WUPS_ED_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* read back PM_CTRL */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0)
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND0;
return ret;
}
@@ -1360,10 +1269,8 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
/* enable energy detect power-down mode */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
+ if (ret < 0)
return ret;
- }
ret |= MODE_CTRL_STS_EDPWRDOWN_;
@@ -1371,52 +1278,133 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
/* enter SUSPEND1 mode */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_1;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* clear wol status, enable energy detection */
val &= ~PM_CTL_WUPS_;
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0)
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND1;
return ret;
}
static int smsc95xx_enter_suspend2(struct usbnet *dev)
{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
u32 val;
int ret;
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0)
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND2;
return ret;
}
+static int smsc95xx_enter_suspend3(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val & 0xFFFF) {
+ netdev_info(dev->net, "rx fifo not empty in autosuspend\n");
+ return -EBUSY;
+ }
+
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+ val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ /* clear wol status */
+ val &= ~PM_CTL_WUPS_;
+ val |= PM_CTL_WUPS_WOL_;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND3;
+
+ return 0;
+}
+
+static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int ret;
+
+ if (!netif_running(dev->net)) {
+ /* interface is ifconfig down so fully power down hw */
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
+ return smsc95xx_enter_suspend2(dev);
+ }
+
+ if (!link_up) {
+ /* link is down so enter EDPD mode, but only if device can
+ * reliably resume from it. This check should be redundant
+ * as current FEATURE_AUTOSUSPEND parts also support
+ * FEATURE_PHY_NLP_CROSSOVER but it's included for clarity */
+ if (!(pdata->features & FEATURE_PHY_NLP_CROSSOVER)) {
+ netdev_warn(dev->net, "EDPD not supported\n");
+ return -EBUSY;
+ }
+
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
+
+ /* enable PHY wakeup events for if cable is attached */
+ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_ANEG_COMP_);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_info(dev->net, "entering SUSPEND1 mode\n");
+ return smsc95xx_enter_suspend1(dev);
+ }
+
+ /* enable PHY wakeup events so we remote wakeup if cable is pulled */
+ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_LINK_DOWN_);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
+ return smsc95xx_enter_suspend3(dev);
+}
+
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1424,15 +1412,35 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
+ /* TODO: don't indicate this feature to usb framework if
+ * our current hardware doesn't have the capability
+ */
+ if ((message.event == PM_EVENT_AUTO_SUSPEND) &&
+ (!(pdata->features & FEATURE_AUTOSUSPEND))) {
+ netdev_warn(dev->net, "autosuspend not supported\n");
+ return -EBUSY;
+ }
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
return ret;
}
+ if (pdata->suspend_flags) {
+ netdev_warn(dev->net, "error during last resume\n");
+ pdata->suspend_flags = 0;
+ }
+
/* determine if link is up using only _nopm functions */
link_up = smsc95xx_link_ok_nopm(dev);
+ if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ ret = smsc95xx_autosuspend(dev, link_up);
+ goto done;
+ }
+
+ /* if we get this far we're not autosuspending */
/* if no wol options set, or if link is down and we're not waking on
* PHY activity, enter lowest power SUSPEND2 mode
*/
@@ -1442,32 +1450,24 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
/* disable energy detect (link up) & wake up events */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
ret = smsc95xx_enter_suspend2(dev);
goto done;
@@ -1565,7 +1565,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
for (i = 0; i < (wuff_filter_count * 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
kfree(filter_mask);
goto done;
}
@@ -1574,67 +1573,51 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
for (i = 0; i < (wuff_filter_count / 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
for (i = 0; i < (wuff_filter_count / 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
for (i = 0; i < (wuff_filter_count / 2); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
/* clear any pending pattern match packet status */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val |= WUCSR_WUFR_;
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val |= WUCSR_MPR_;
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
}
/* enable/disable wakeup sources */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
netdev_info(dev->net, "enabling pattern match wakeup\n");
@@ -1653,17 +1636,13 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
/* enable wol wakeup source */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
val |= PM_CTL_WOL_EN_;
@@ -1672,10 +1651,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val |= PM_CTL_ED_EN_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
/* enable receiver to enable frame reception */
smsc95xx_start_rx_path(dev, 1);
@@ -1694,42 +1671,40 @@ static int smsc95xx_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u8 suspend_flags = pdata->suspend_flags;
int ret;
u32 val;
BUG_ON(!dev);
- if (pdata->wolopts) {
+ netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
+
+ /* do this first to ensure it's cleared even in error case */
+ pdata->suspend_flags = 0;
+
+ if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
return ret;
- }
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
return ret;
- }
/* clear wake-up status */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
}
ret = usbnet_resume(intf);
@@ -1891,6 +1866,26 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
return skb;
}
+static int smsc95xx_manage_power(struct usbnet *dev, int on)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ dev->intf->needs_remote_wakeup = on;
+
+ if (pdata->features & FEATURE_AUTOSUSPEND)
+ return 0;
+
+ /* this chip revision doesn't support autosuspend */
+ netdev_info(dev->net, "hardware doesn't support USB autosuspend\n");
+
+ if (on)
+ usb_autopm_get_interface_no_resume(dev->intf);
+ else
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
static const struct driver_info smsc95xx_info = {
.description = "smsc95xx USB 2.0 Ethernet",
.bind = smsc95xx_bind,
@@ -1900,6 +1895,7 @@ static const struct driver_info smsc95xx_info = {
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
+ .manage_power = smsc95xx_manage_power,
.flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
};
@@ -2007,6 +2003,7 @@ static struct usb_driver smsc95xx_driver = {
.reset_resume = smsc95xx_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
+ .supports_autosuspend = 1,
};
module_usb_driver(smsc95xx_driver);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04110ba677f..51f3192f3931 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
+ /* prevent rx skb allocation when error ratio is high */
+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+ usb_free_urb(urb);
+ return -ENOLINK;
+ }
+
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
break;
}
+ /* stop rx if packet error rate is high */
+ if (++dev->pkt_cnt > 30) {
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ } else {
+ if (state == rx_cleanup)
+ dev->pkt_err++;
+ if (dev->pkt_err > 20)
+ set_bit(EVENT_RX_KILL, &dev->flags);
+ }
+
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
@@ -719,7 +736,8 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
- if (info->manage_power)
+ if (info->manage_power &&
+ !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
@@ -790,18 +808,23 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
+ /* reset rx error state */
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
- if (retval < 0)
- goto done_manage_power_error;
- usb_autopm_put_interface(dev->intf);
+ if (retval < 0) {
+ retval = 0;
+ set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+ } else {
+ usb_autopm_put_interface(dev->intf);
+ }
}
return retval;
-
-done_manage_power_error:
- clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -1102,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err(dev)) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
- } else {
- /* cdc_ncm collected packet; waits for more */
+ /* packet collected; minidriver waiting for more */
+ if (info->flags & FLAG_MULTI_PACKET)
goto not_drop;
- }
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
}
}
length = skb->len;
@@ -1253,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
}
}
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
@@ -1447,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
strcpy(net->name, "wwan%d");
+ /* devices that cannot do ARP */
+ if ((dev->driver_info->flags & FLAG_NOARP) != 0)
+ net->flags |= IFF_NOARP;
+
/* maybe the remote can't receive an Ethernet MTU */
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
@@ -1615,6 +1643,16 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
}
EXPORT_SYMBOL(usbnet_device_suggests_idle);
+/*
+ * For devices that can do without special commands
+ */
+int usbnet_manage_power(struct usbnet *dev, int on)
+{
+ dev->intf->needs_remote_wakeup = on;
+ return 0;
+}
+EXPORT_SYMBOL(usbnet_manage_power);
+
/*-------------------------------------------------------------------------*/
static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, void *data, u16 size)
@@ -1775,11 +1813,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
}
req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for %s\n",
- __func__);
+ if (!req)
goto fail_free_buf;
- }
req->bRequestType = reqtype;
req->bRequest = cmd;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 95814d9747ef..07a4af0aa3dc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -25,18 +25,15 @@
#define MIN_MTU 68 /* Min L3 MTU */
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
-struct veth_net_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_dropped;
+struct pcpu_vstats {
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
struct veth_priv {
- struct net_device *peer;
- struct veth_net_stats __percpu *stats;
+ struct net_device __rcu *peer;
+ atomic64_t dropped;
};
/*
@@ -92,10 +89,10 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct veth_priv *priv;
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
- priv = netdev_priv(dev);
- data[0] = priv->peer->ifindex;
+ data[0] = peer ? peer->ifindex : 0;
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -107,50 +104,37 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_ethtool_stats = veth_get_ethtool_stats,
};
-/*
- * xmit
- */
-
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_device *rcv = NULL;
- struct veth_priv *priv, *rcv_priv;
- struct veth_net_stats *stats, *rcv_stats;
- int length;
-
- priv = netdev_priv(dev);
- rcv = priv->peer;
- rcv_priv = netdev_priv(rcv);
-
- stats = this_cpu_ptr(priv->stats);
- rcv_stats = this_cpu_ptr(rcv_priv->stats);
-
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ int length = skb->len;
+
+ rcu_read_lock();
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv)) {
+ kfree_skb(skb);
+ goto drop;
+ }
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
- will cause bad checksum on forwarded packets */
+ * will cause bad checksum on forwarded packets
+ */
if (skb->ip_summed == CHECKSUM_NONE &&
rcv->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- length = skb->len;
- if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
- goto rx_drop;
+ if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += length;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
-
- u64_stats_update_begin(&rcv_stats->syncp);
- rcv_stats->rx_bytes += length;
- rcv_stats->rx_packets++;
- u64_stats_update_end(&rcv_stats->syncp);
-
- return NETDEV_TX_OK;
-
-rx_drop:
- u64_stats_update_begin(&rcv_stats->syncp);
- rcv_stats->rx_dropped++;
- u64_stats_update_end(&rcv_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->bytes += length;
+ stats->packets++;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+drop:
+ atomic64_inc(&priv->dropped);
+ }
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -158,47 +142,63 @@ rx_drop:
* general routines
*/
-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
+static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int cpu;
+ result->packets = 0;
+ result->bytes = 0;
for_each_possible_cpu(cpu) {
- struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
- u64 rx_packets, rx_bytes, rx_dropped;
- u64 tx_packets, tx_bytes;
+ struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
+ u64 packets, bytes;
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&stats->syncp);
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- rx_dropped = stats->rx_dropped;
+ packets = stats->packets;
+ bytes = stats->bytes;
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
- tot->rx_bytes += rx_bytes;
- tot->tx_bytes += tx_bytes;
- tot->rx_dropped += rx_dropped;
+ result->packets += packets;
+ result->bytes += bytes;
}
+ return atomic64_read(&priv->dropped);
+}
+
+static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ struct pcpu_vstats one;
+
+ tot->tx_dropped = veth_stats_one(&one, dev);
+ tot->tx_bytes = one.bytes;
+ tot->tx_packets = one.packets;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (peer) {
+ tot->rx_dropped = veth_stats_one(&one, peer);
+ tot->rx_bytes = one.bytes;
+ tot->rx_packets = one.packets;
+ }
+ rcu_read_unlock();
return tot;
}
static int veth_open(struct net_device *dev)
{
- struct veth_priv *priv;
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
- priv = netdev_priv(dev);
- if (priv->peer == NULL)
+ if (!peer)
return -ENOTCONN;
- if (priv->peer->flags & IFF_UP) {
+ if (peer->flags & IFF_UP) {
netif_carrier_on(dev);
- netif_carrier_on(priv->peer);
+ netif_carrier_on(peer);
}
return 0;
}
@@ -206,9 +206,11 @@ static int veth_open(struct net_device *dev)
static int veth_close(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
netif_carrier_off(dev);
- netif_carrier_off(priv->peer);
+ if (peer)
+ netif_carrier_off(peer);
return 0;
}
@@ -228,24 +230,16 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- struct veth_net_stats __percpu *stats;
- struct veth_priv *priv;
-
- stats = alloc_percpu(struct veth_net_stats);
- if (stats == NULL)
+ dev->vstats = alloc_percpu(struct pcpu_vstats);
+ if (!dev->vstats)
return -ENOMEM;
- priv = netdev_priv(dev);
- priv->stats = stats;
return 0;
}
static void veth_dev_free(struct net_device *dev)
{
- struct veth_priv *priv;
-
- priv = netdev_priv(dev);
- free_percpu(priv->stats);
+ free_percpu(dev->vstats);
free_netdev(dev);
}
@@ -259,6 +253,10 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
+
static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -269,9 +267,10 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
+ dev->features |= VETH_FEATURES;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = VETH_FEATURES;
}
/*
@@ -396,10 +395,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
*/
priv = netdev_priv(dev);
- priv->peer = peer;
+ rcu_assign_pointer(priv->peer, peer);
priv = netdev_priv(peer);
- priv->peer = dev;
+ rcu_assign_pointer(priv->peer, dev);
return 0;
err_register_dev:
@@ -420,10 +419,20 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
struct net_device *peer;
priv = netdev_priv(dev);
- peer = priv->peer;
+ peer = rtnl_dereference(priv->peer);
+ /* Note : dellink() is called from default_device_exit_batch(),
+ * before a rcu_synchronize() point. The devices are guaranteed
+ * not being freed before one RCU grace period.
+ */
+ RCU_INIT_POINTER(priv->peer, NULL);
unregister_netdevice_queue(dev, head);
- unregister_netdevice_queue(peer, head);
+
+ if (peer) {
+ priv = netdev_priv(peer);
+ RCU_INIT_POINTER(priv->peer, NULL);
+ unregister_netdevice_queue(peer, head);
+ }
}
static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 68d64f0313ea..192c91c8e799 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -123,6 +124,12 @@ struct virtnet_info {
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
+
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
@@ -130,7 +137,6 @@ struct skb_vnet_hdr {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mhdr;
};
- unsigned int num_sg;
};
struct padded_vnet_hdr {
@@ -221,6 +227,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
*len -= size;
}
@@ -530,10 +537,10 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
err = add_recvbuf_small(rq, gfp);
oom = err == -ENOMEM;
- if (err < 0)
+ if (err)
break;
++rq->num;
- } while (err > 0);
+ } while (rq->vq->num_free);
if (unlikely(rq->num > rq->max))
rq->max = rq->num;
virtqueue_kick(rq->vq);
@@ -640,10 +647,10 @@ static int virtnet_open(struct net_device *dev)
return 0;
}
-static unsigned int free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq)
{
struct sk_buff *skb;
- unsigned int len, tot_sgs = 0;
+ unsigned int len;
struct virtnet_info *vi = sq->vq->vdev->priv;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
@@ -655,10 +662,8 @@ static unsigned int free_old_xmit_skbs(struct send_queue *sq)
stats->tx_packets++;
u64_stats_update_end(&stats->tx_syncp);
- tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
- return tot_sgs;
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
@@ -666,6 +671,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
+ unsigned num_sg;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
@@ -704,8 +710,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
else
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
- return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
0, skb, GFP_ATOMIC);
}
@@ -714,28 +720,20 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int qnum = skb_get_queue_mapping(skb);
struct send_queue *sq = &vi->sq[qnum];
- int capacity;
+ int err;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
/* Try to transmit */
- capacity = xmit_skb(sq, skb);
-
- /* This can happen with OOM and indirect buffers. */
- if (unlikely(capacity < 0)) {
- if (likely(capacity == -ENOMEM)) {
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "TXQ (%d) failure: out of memory\n",
- qnum);
- } else {
- dev->stats.tx_fifo_errors++;
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "Unexpected TXQ (%d) failure: %d\n",
- qnum, capacity);
- }
+ err = xmit_skb(sq, skb);
+
+ /* This should not happen! */
+ if (unlikely(err)) {
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -748,12 +746,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Apparently nice girls don't return TX_BUSY; stop the queue
* before it gets out of hand. Naturally, this wastes entries. */
- if (capacity < 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- capacity += free_old_xmit_skbs(sq);
- if (capacity >= 2+MAX_SKB_FRAGS) {
+ free_old_xmit_skbs(sq);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
}
@@ -763,19 +761,77 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/*
+ * Send command via the control virtqueue and check status. Commands
+ * supported by the hypervisor, as indicated by feature bits, should
+ * never fail unless improperly formated.
+ */
+static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *data, int out, int in)
+{
+ struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
+ struct virtio_net_ctrl_hdr ctrl;
+ virtio_net_ctrl_ack status = ~0;
+ unsigned int tmp;
+ int i;
+
+ /* Caller should know better */
+ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
+ (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
+
+ out++; /* Add header */
+ in++; /* Add return status */
+
+ ctrl.class = class;
+ ctrl.cmd = cmd;
+
+ sg_init_table(sg, out + in);
+
+ sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
+ for_each_sg(data, s, out + in - 2, i)
+ sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
+ sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
+
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
+
+ virtqueue_kick(vi->cvq);
+
+ /* Spin for a response, the kick causes an ioport write, trapping
+ * into the hypervisor, so the request should be handled immediately.
+ */
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
+ cpu_relax();
+
+ return status == VIRTIO_NET_OK;
+}
+
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
+ struct sockaddr *addr = p;
+ struct scatterlist sg;
- ret = eth_mac_addr(dev, p);
+ ret = eth_prepare_mac_addr_change(dev, p);
if (ret)
return ret;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ sg_init_one(&sg, addr->sa_data, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &sg, 1, 0)) {
+ dev_warn(&vdev->dev,
+ "Failed to set mac address by vq command.\n");
+ return -EINVAL;
+ }
+ } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
+ addr->sa_data, dev->addr_len);
+ }
+
+ eth_commit_mac_addr_change(dev, p);
return 0;
}
@@ -829,51 +885,6 @@ static void virtnet_netpoll(struct net_device *dev)
}
#endif
-/*
- * Send command via the control virtqueue and check status. Commands
- * supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
- */
-static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *data, int out, int in)
-{
- struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
- struct virtio_net_ctrl_hdr ctrl;
- virtio_net_ctrl_ack status = ~0;
- unsigned int tmp;
- int i;
-
- /* Caller should know better */
- BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
- (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
-
- out++; /* Add header */
- in++; /* Add return status */
-
- ctrl.class = class;
- ctrl.cmd = cmd;
-
- sg_init_table(sg, out + in);
-
- sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
- for_each_sg(data, s, out + in - 2, i)
- sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
- sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
-
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
-
- virtqueue_kick(vi->cvq);
-
- /*
- * Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp))
- cpu_relax();
-
- return status == VIRTIO_NET_OK;
-}
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
@@ -962,10 +973,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf) {
- dev_warn(&dev->dev, "No memory for MAC address buffer\n");
+ if (!buf)
return;
- }
sg_init_table(sg, 2);
@@ -1023,32 +1032,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
return 0;
}
-static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
+ int cpu;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ vi->affinity_hint_set = false;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu) {
+ *per_cpu_ptr(vi->vq_index, cpu) = -1;
+ } else {
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+ }
+ }
+}
+
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+ int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
* setting the affinity hint to eliminate the contention.
*/
- if ((vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
- set = false;
- else
- return;
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ virtnet_clean_affinity(vi, -1);
+ return;
}
- for (i = 0; i < vi->max_queue_pairs; i++) {
- int cpu = set ? i : -1;
+ i = 0;
+ for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
}
- if (set)
- vi->affinity_hint_set = true;
- else
- vi->affinity_hint_set = false;
+ vi->affinity_hint_set = true;
+}
+
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -1092,13 +1144,15 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs)
return -EINVAL;
+ get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
}
+ put_online_cpus();
return err;
}
@@ -1137,12 +1191,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id.
- * TODO: handle cpu hotplug.
*/
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
@@ -1258,7 +1319,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_set_affinity(vi, false);
+ virtnet_clean_affinity(vi, -1);
vdev->config->del_vqs(vdev);
@@ -1381,7 +1442,10 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_set_affinity(vi, true);
+ get_online_cpus();
+ virtnet_set_affinity(vi);
+ put_online_cpus();
+
return 0;
err_free:
@@ -1463,6 +1527,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL)
goto free;
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1486,7 +1554,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
@@ -1509,6 +1577,13 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
+ }
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1530,6 +1605,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
@@ -1553,6 +1630,8 @@ static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1564,6 +1643,7 @@ static void virtnet_remove(struct virtio_device *vdev)
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
@@ -1638,6 +1718,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+ VIRTIO_NET_F_CTRL_MAC_ADDR,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index dc8913c6238c..ffb97b2a15a0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -43,11 +43,7 @@ static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
-static atomic_t devices_found;
-
-#define VMXNET3_MAX_DEVICES 10
static int enable_mq = 1;
-static int irq_share_mode;
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
@@ -152,10 +148,9 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
- printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
- adapter->netdev->name, adapter->link_speed);
- if (!netif_carrier_ok(adapter->netdev))
- netif_carrier_on(adapter->netdev);
+ netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
+ adapter->link_speed);
+ netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -163,10 +158,8 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
adapter);
}
} else {
- printk(KERN_INFO "%s: NIC Link is Down\n",
- adapter->netdev->name);
- if (netif_carrier_ok(adapter->netdev))
- netif_carrier_off(adapter->netdev);
+ netdev_info(adapter->netdev, "NIC Link is Down\n");
+ netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -510,8 +503,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
* sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA);
if (!tq->tx_ring.base) {
- printk(KERN_ERR "%s: failed to allocate tx ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err;
}
@@ -520,8 +512,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
sizeof(struct Vmxnet3_TxDataDesc),
&tq->data_ring.basePA);
if (!tq->data_ring.base) {
- printk(KERN_ERR "%s: failed to allocate data ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate data ring\n");
goto err;
}
@@ -530,8 +521,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA);
if (!tq->comp_ring.base) {
- printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err;
}
@@ -580,15 +570,14 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
- rbi->skb = dev_alloc_skb(rbi->len +
- NET_IP_ALIGN);
+ rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
+ rbi->len,
+ GFP_KERNEL);
if (unlikely(rbi->skb == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
- rbi->skb->dev = adapter->netdev;
- skb_reserve(rbi->skb, NET_IP_ALIGN);
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
@@ -629,12 +618,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
- rq->uncommitted[ring_idx] += num_allocated;
- dev_dbg(&adapter->netdev->dev,
- "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
- "%u, uncommitted %u\n", num_allocated, ring->next2fill,
- ring->next2comp, rq->uncommitted[ring_idx]);
+ netdev_dbg(adapter->netdev,
+ "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
+ num_allocated, ring->next2fill, ring->next2comp);
/* so that the device can distinguish a full ring and an empty ring */
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
@@ -691,7 +678,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill,
le64_to_cpu(ctx->sop_txd->txd.addr),
@@ -731,7 +718,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -771,7 +758,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%llu %u %u\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -871,7 +858,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
return 1;
@@ -977,7 +964,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
@@ -1060,7 +1047,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
(struct Vmxnet3_TxDesc *)ctx.sop_txd);
gdesc = ctx.sop_txd;
#endif
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
@@ -1213,7 +1200,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
@@ -1221,7 +1208,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = false;
ctx->skb = rbi->skb;
- new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ rbi->len);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
@@ -1236,11 +1224,14 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH))
+ ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+#endif
skb_put(ctx->skb, rcd->len);
/* Immediate refill */
- new_skb->dev = adapter->netdev;
- skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
@@ -1333,7 +1324,6 @@ rcd_done:
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
- rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -1378,7 +1368,6 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq->rx_ring[ring_idx].next2fill =
rq->rx_ring[ring_idx].next2comp = 0;
- rq->uncommitted[ring_idx] = 0;
}
rq->comp_ring.gen = VMXNET3_INIT_GEN;
@@ -1459,7 +1448,6 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
/* reset internal state and allocate buffers for both rings */
for (i = 0; i < 2; i++) {
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
- rq->uncommitted[i] = 0;
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
@@ -1518,8 +1506,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
&rq->rx_ring[i].basePA);
if (!rq->rx_ring[i].base) {
- printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
- adapter->netdev->name, i);
+ netdev_err(adapter->netdev,
+ "failed to allocate rx ring %d\n", i);
goto err;
}
}
@@ -1528,8 +1516,7 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
&rq->comp_ring.basePA);
if (!rq->comp_ring.base) {
- printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err;
}
@@ -1821,9 +1808,10 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
adapter->rx_queue[i].name,
&(adapter->rx_queue[i]));
if (err) {
- printk(KERN_ERR "Failed to request irq for MSIX"
- ", %s, error %d\n",
- adapter->rx_queue[i].name, err);
+ netdev_err(adapter->netdev,
+ "Failed to request irq for MSIX, "
+ "%s, error %d\n",
+ adapter->rx_queue[i].name, err);
return err;
}
@@ -1852,8 +1840,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
#endif
intr->num_intrs = vector + 1;
if (err) {
- printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
- ":%d\n", adapter->netdev->name, intr->type, err);
+ netdev_err(adapter->netdev,
+ "Failed to request irq (intr type:%d), error %d\n",
+ intr->type, err);
} else {
/* Number of rx queues will not change after this */
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -1874,9 +1863,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
adapter->rx_queue[0].comp_ring.intr_idx = 0;
}
- printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
- "allocated\n", adapter->netdev->name, intr->type,
- intr->mask_mode, intr->num_intrs);
+ netdev_info(adapter->netdev,
+ "intr type %u, mode %u, %u vectors allocated\n",
+ intr->type, intr->mask_mode, intr->num_intrs);
}
return err;
@@ -2042,8 +2031,8 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
- printk(KERN_INFO "%s: failed to copy mcast list"
- ", setting ALL_MULTI\n", netdev->name);
+ netdev_info(netdev, "failed to copy mcast list"
+ ", setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
@@ -2171,6 +2160,14 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
+ 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
+ 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
+ 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
+ 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
+ 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
+ };
+
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
@@ -2180,7 +2177,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
- get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+ memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
+
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
i, adapter->num_rx_queues);
@@ -2218,7 +2216,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
u32 ret;
unsigned long flags;
- dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
adapter->skb_buf_size, adapter->rx_buf_per_pkt,
adapter->tx_queue[0].tx_ring.size,
@@ -2228,15 +2226,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
vmxnet3_tq_init_all(adapter);
err = vmxnet3_rq_init_all(adapter);
if (err) {
- printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
- adapter->netdev->name, err);
+ netdev_err(adapter->netdev,
+ "Failed to init rx queue error %d\n", err);
goto rq_err;
}
err = vmxnet3_request_irqs(adapter);
if (err) {
- printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
- adapter->netdev->name, err);
+ netdev_err(adapter->netdev,
+ "Failed to setup irq for error %d\n", err);
goto irq_err;
}
@@ -2253,8 +2251,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
- printk(KERN_ERR "Failed to activate dev %s: error %u\n",
- adapter->netdev->name, ret);
+ netdev_err(adapter->netdev,
+ "Failed to activate dev: error %u\n", ret);
err = -EINVAL;
goto activate_err;
}
@@ -2369,23 +2367,22 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
- pci_name(pdev), err);
+ dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
return err;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- printk(KERN_ERR "pci_set_consistent_dma_mask failed "
- "for adapter %s\n", pci_name(pdev));
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
err = -EIO;
goto err_set_mask;
}
*dma64 = true;
} else {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- printk(KERN_ERR "pci_set_dma_mask failed for adapter "
- "%s\n", pci_name(pdev));
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
err = -EIO;
goto err_set_mask;
}
@@ -2395,8 +2392,8 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
- printk(KERN_ERR "Failed to request region for adapter %s: "
- "error %d\n", pci_name(pdev), err);
+ dev_err(&pdev->dev,
+ "Failed to request region for adapter: error %d\n", err);
goto err_set_mask;
}
@@ -2406,8 +2403,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
mmio_len = pci_resource_len(pdev, 0);
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr0) {
- printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to map bar0\n");
err = -EIO;
goto err_ioremap;
}
@@ -2416,8 +2412,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
mmio_len = pci_resource_len(pdev, 1);
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr1) {
- printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to map bar1\n");
err = -EIO;
goto err_bar1;
}
@@ -2524,12 +2519,14 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
- printk(KERN_ERR "Could not allocate any rx"
- "queues. Aborting.\n");
+ netdev_err(adapter->netdev,
+ "Could not allocate any rx queues. "
+ "Aborting.\n");
goto queue_err;
} else {
- printk(KERN_INFO "Number of rx queues changed "
- "to : %d.\n", i);
+ netdev_info(adapter->netdev,
+ "Number of rx queues changed "
+ "to : %d.\n", i);
adapter->num_rx_queues = i;
err = 0;
break;
@@ -2642,15 +2639,17 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-create rx queues,"
- " error %d. Closing it.\n", netdev->name, err);
+ netdev_err(netdev,
+ "failed to re-create rx queues, "
+ " error %d. Closing it.\n", err);
goto out;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-activate, error %d. "
- "Closing it\n", netdev->name, err);
+ netdev_err(netdev,
+ "failed to re-activate, error %d. "
+ "Closing it\n", err);
goto out;
}
}
@@ -2678,10 +2677,6 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
-
- netdev_info(adapter->netdev,
- "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
- dma64 ? " highDMA" : "");
}
@@ -2724,7 +2719,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
adapter->intr.num_intrs = vectors;
return 0;
} else if (err < 0) {
- netdev_err(adapter->netdev,
+ dev_err(&adapter->netdev->dev,
"Failed to enable MSI-X, error: %d\n", err);
vectors = 0;
} else if (err < vector_threshold) {
@@ -2733,15 +2728,16 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
/* If fails to enable required number of MSI-x vectors
* try enabling minimum number of vectors required.
*/
- netdev_err(adapter->netdev,
- "Failed to enable %d MSI-X, trying %d instead\n",
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d instead\n",
vectors, vector_threshold);
vectors = vector_threshold;
}
}
- netdev_info(adapter->netdev,
- "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
+ dev_info(&adapter->pdev->dev,
+ "Number of MSI-X interrupts which can be allocated "
+ "is lower than min threshold required.\n");
return err;
}
@@ -2796,7 +2792,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
- printk(KERN_ERR "Number of rx queues : 1\n");
+ netdev_err(adapter->netdev,
+ "Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
adapter->intr.num_intrs =
VMXNET3_LINUX_MIN_MSIX_VECT;
@@ -2807,9 +2804,9 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
return;
/* If we cannot allocate MSIx vectors use only one rx queue */
- netdev_info(adapter->netdev,
- "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
- err);
+ dev_info(&adapter->pdev->dev,
+ "Failed to enable MSI-X, error %d. "
+ "Limiting #rx queues to 1, try MSI.\n", err);
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2826,7 +2823,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#endif /* CONFIG_PCI_MSI */
adapter->num_rx_queues = 1;
- printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
+ dev_info(&adapter->netdev->dev,
+ "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
@@ -2852,7 +2850,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
+ netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
netif_wake_queue(adapter->netdev);
}
@@ -2872,12 +2870,12 @@ vmxnet3_reset_work(struct work_struct *data)
/* if the device is closed, we must leave it alone */
rtnl_lock();
if (netif_running(adapter->netdev)) {
- printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
+ netdev_notice(adapter->netdev, "resetting\n");
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
vmxnet3_activate_dev(adapter);
} else {
- printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
+ netdev_info(adapter->netdev, "already closed\n");
}
rtnl_unlock();
@@ -2936,8 +2934,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
- printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
- num_tx_queues, num_rx_queues);
+ dev_info(&pdev->dev,
+ "# of Tx queues : %d, # of Rx queues : %d\n",
+ num_tx_queues, num_rx_queues);
if (!netdev)
return -ENOMEM;
@@ -2952,8 +2951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
if (!adapter->shared) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_alloc_shared;
}
@@ -2967,8 +2965,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_alloc_queue_desc;
}
@@ -2998,8 +2995,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
} else {
- printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
- " %s\n", ver, pci_name(pdev));
+ dev_err(&pdev->dev,
+ "Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
@@ -3008,8 +3005,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
} else {
- printk(KERN_ERR "Incompatible upt version (0x%x) for "
- "adapter %s\n", ver, pci_name(pdev));
+ dev_err(&pdev->dev,
+ "Incompatible upt version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
@@ -3017,11 +3014,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
- adapter->dev_number = atomic_read(&devices_found);
-
- adapter->share_intr = irq_share_mode;
- if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
- adapter->num_tx_queues != adapter->num_rx_queues)
+ if (adapter->num_tx_queues == adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
+ else
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
vmxnet3_alloc_intr_resources(adapter);
@@ -3030,7 +3025,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (adapter->num_rx_queues > 1 &&
adapter->intr.type == VMXNET3_IT_MSIX) {
adapter->rss = true;
- printk(KERN_INFO "RSS is enabled.\n");
+ netdev->hw_features |= NETIF_F_RXHASH;
+ netdev->features |= NETIF_F_RXHASH;
+ dev_dbg(&pdev->dev, "RSS is enabled.\n");
} else {
adapter->rss = false;
}
@@ -3061,16 +3058,15 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+ netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
- printk(KERN_ERR "Failed to register adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to register adapter\n");
goto err_register;
}
vmxnet3_check_link(adapter, false);
- atomic_inc(&devices_found);
return 0;
err_register:
@@ -3312,7 +3308,7 @@ static struct pci_driver vmxnet3_driver = {
static int __init
vmxnet3_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
+ pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
VMXNET3_DRIVER_VERSION_REPORT);
return pci_register_driver(&vmxnet3_driver);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 587a218b2345..9bc542be2937 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -207,7 +207,7 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
- ETHTOOL_BUSINFO_LEN);
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
@@ -522,24 +522,23 @@ vmxnet3_set_ringparam(struct net_device *netdev,
if (err) {
/* failed, most likely because of OOM, try default
* size */
- printk(KERN_ERR "%s: failed to apply new sizes, try the"
- " default ones\n", netdev->name);
+ netdev_err(netdev, "failed to apply new sizes, "
+ "try the default ones\n");
err = vmxnet3_create_queues(adapter,
VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE);
if (err) {
- printk(KERN_ERR "%s: failed to create queues "
- "with default sizes. Closing it\n",
- netdev->name);
+ netdev_err(netdev, "failed to create queues "
+ "with default sizes. Closing it\n");
goto out;
}
}
err = vmxnet3_activate_dev(adapter);
if (err)
- printk(KERN_ERR "%s: failed to re-activate, error %d."
- " Closing it\n", netdev->name, err);
+ netdev_err(netdev, "failed to re-activate, error %d."
+ " Closing it\n", err);
}
out:
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index fc46a81ad538..3198384689d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -276,8 +276,6 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
- u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
- * update */
struct vmxnet3_rx_buf_info *buf_info[2];
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
@@ -354,7 +352,6 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
- int dev_number;
int share_intr;
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b3fdf648ea7..9d70421cf3a0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/hash.h>
+#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
@@ -392,7 +393,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
/* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
const unsigned char *addr)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -505,7 +507,8 @@ static int vxlan_join_group(struct net_device *dev)
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
int err;
@@ -532,7 +535,8 @@ static int vxlan_leave_group(struct net_device *dev)
int err = 0;
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
/* Only leave group when last vxlan is done. */
@@ -1189,6 +1193,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
spin_lock_init(&vxlan->hash_lock);
@@ -1268,6 +1273,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
+static void vxlan_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops vxlan_ethtool_ops = {
+ .get_drvinfo = vxlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
static int vxlan_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -1345,6 +1362,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vxlan->port_max = ntohs(p->high);
}
+ SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+
err = register_netdevice(dev);
if (!err)
hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index d58431e99f73..94e234975c61 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -356,63 +356,9 @@ config SDLA
To compile this driver as a module, choose M here: the
module will be called sdla.
-# Wan router core.
-config WAN_ROUTER_DRIVERS
- tristate "WAN router drivers"
- depends on WAN_ROUTER
- ---help---
- Connect LAN to WAN via Linux box.
-
- Select driver your card and remember to say Y to "Wan Router."
- You will need the wan-tools package which is available from
- <ftp://ftp.sangoma.com/>.
-
- Note that the answer to this question won't directly affect the
- kernel except for how subordinate drivers may be built:
- saying N will just cause the configurator to skip all
- the questions about WAN router drivers.
-
- If unsure, say N.
-
-config CYCLADES_SYNC
- tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
- depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
- ---help---
- Cyclom 2X from Cyclades Corporation <http://www.avocent.com/> is an
- intelligent multiprotocol WAN adapter with data transfer rates up to
- 512 Kbps. These cards support the X.25 and SNA related protocols.
-
- While no documentation is available at this time please grab the
- wanconfig tarball in
- <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes
- to make it compile with the current wanrouter include files; efforts
- are being made to use the original package available at
- <ftp://ftp.sangoma.com/>).
-
- Feel free to contact me or the cycsyn-devel mailing list at
- <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for
- additional details, I hope to have documentation available as soon as
- possible. (Cyclades Brazil is writing the Documentation).
-
- The next questions will ask you about the protocols you want the
- driver to support (for now only X.25 is supported).
-
- If you have one or more of these cards, say Y to this option.
-
- To compile this driver as a module, choose M here: the
- module will be called cyclomx.
-
-config CYCLOMX_X25
- bool "Cyclom 2X X.25 support (EXPERIMENTAL)"
- depends on CYCLADES_SYNC
- help
- Connect a Cyclom 2X card to an X.25 network.
-
- Enabling X.25 support will enlarge your kernel by about 11 kB.
-
# X.25 network drivers
config LAPBETHER
- tristate "LAPB over Ethernet driver (EXPERIMENTAL)"
+ tristate "LAPB over Ethernet driver"
depends on LAPB && X25
---help---
Driver for a pseudo device (typically called /dev/lapb0) which allows
@@ -428,8 +374,8 @@ config LAPBETHER
If unsure, say N.
config X25_ASY
- tristate "X.25 async driver (EXPERIMENTAL)"
- depends on LAPB && X25
+ tristate "X.25 async driver"
+ depends on LAPB && X25 && TTY
---help---
Send and receive X.25 frames over regular asynchronous serial
lines such as telephone lines equipped with ordinary modems.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index df70248e2fda..c135ef47cbca 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -5,10 +5,6 @@
# Rewritten to use lists instead of if-statements.
#
-cyclomx-y := cycx_main.o
-cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o
-cyclomx-objs := $(cyclomx-y)
-
obj-$(CONFIG_HDLC) += hdlc.o
obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o
obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
@@ -28,7 +24,6 @@ obj-$(CONFIG_LANMEDIA) += lmc/
obj-$(CONFIG_DLCI) += dlci.o
obj-$(CONFIG_SDLA) += sdla.o
-obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_SBNI) += sbni.o
obj-$(CONFIG_N2) += n2.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6aed238e573e..0179cefae438 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -795,8 +795,8 @@ static ssize_t cosa_read(struct file *file,
if (mutex_lock_interruptible(&chan->rlock))
return -ERESTARTSYS;
- if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
- pr_info("%s: cosa_read() - OOM\n", cosa->name);
+ chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL);
+ if (chan->rxdata == NULL) {
mutex_unlock(&chan->rlock);
return -ENOMEM;
}
@@ -874,9 +874,8 @@ static ssize_t cosa_write(struct file *file,
count = COSA_MTU;
/* Allocate the buffer */
- if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
- pr_notice("%s: cosa_write() OOM - dropping packet\n",
- cosa->name);
+ kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA);
+ if (kbuf == NULL) {
up(&chan->wsem);
return -ENOMEM;
}
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
deleted file mode 100644
index 2a3ecae67a90..000000000000
--- a/drivers/net/wan/cycx_drv.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
-* cycx_drv.c Cyclom 2X Support Module.
-*
-* This module is a library of common hardware specific
-* functions used by the Cyclades Cyclom 2X sync card.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 1999/11/11 acme set_current_state(TASK_INTERRUPTIBLE), code
-* cleanup
-* 1999/11/08 acme init_cyc2x deleted, doing nothing
-* 1999/11/06 acme back to read[bw], write[bw] and memcpy_to and
-* fromio to use dpmbase ioremaped
-* 1999/10/26 acme use isa_read[bw], isa_write[bw] & isa_memcpy_to
-* & fromio
-* 1999/10/23 acme cleanup to only supports cyclom2x: all the other
-* boards are no longer manufactured by cyclades,
-* if someone wants to support them... be my guest!
-* 1999/05/28 acme cycx_intack & cycx_intde gone for good
-* 1999/05/18 acme lots of unlogged work, submitting to Linus...
-* 1999/01/03 acme more judicious use of data types
-* 1999/01/03 acme judicious use of data types :>
-* cycx_inten trying to reset pending interrupts
-* from cyclom 2x - I think this isn't the way to
-* go, but for now...
-* 1999/01/02 acme cycx_intack ok, I think there's nothing to do
-* to ack an int in cycx_drv.c, only handle it in
-* cyx_isr (or in the other protocols: cyp_isr,
-* cyf_isr, when they get implemented.
-* Dec 31, 1998 acme cycx_data_boot & cycx_code_boot fixed, crossing
-* fingers to see x25_configure in cycx_x25.c
-* work... :)
-* Dec 26, 1998 acme load implementation fixed, seems to work! :)
-* cycx_2x_dpmbase_options with all the possible
-* DPM addresses (20).
-* cycx_intr implemented (test this!)
-* general code cleanup
-* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation.
-* Aug 8, 1998 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h> /* __init */
-#include <linux/module.h>
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/errno.h> /* return codes */
-#include <linux/cycx_drv.h> /* API definitions */
-#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
-#include <linux/delay.h> /* udelay, msleep_interruptible */
-#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
-
-#define MOD_VERSION 0
-#define MOD_RELEASE 6
-
-MODULE_AUTHOR("Arnaldo Carvalho de Melo");
-MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
-MODULE_LICENSE("GPL");
-
-/* Hardware-specific functions */
-static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len);
-static void cycx_bootcfg(struct cycx_hw *hw);
-
-static int reset_cyc2x(void __iomem *addr);
-static int detect_cyc2x(void __iomem *addr);
-
-/* Miscellaneous functions */
-static int get_option_index(const long *optlist, long optval);
-static u16 checksum(u8 *buf, u32 len);
-
-#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
-
-/* Global Data */
-
-/* private data */
-static const char fullname[] = "Cyclom 2X Support Module";
-static const char copyright[] =
- "(c) 1998-2003 Arnaldo Carvalho de Melo <acme@conectiva.com.br>";
-
-/* Hardware configuration options.
- * These are arrays of configuration options used by verification routines.
- * The first element of each array is its size (i.e. number of options).
- */
-static const long cyc2x_dpmbase_options[] = {
- 20,
- 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
- 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
- 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
-};
-
-static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
-
-/* Kernel Loadable Module Entry Points */
-/* Module 'insert' entry point.
- * o print announcement
- * o initialize static data
- *
- * Return: 0 Ok
- * < 0 error.
- * Context: process */
-
-static int __init cycx_drv_init(void)
-{
- pr_info("%s v%u.%u %s\n",
- fullname, MOD_VERSION, MOD_RELEASE, copyright);
-
- return 0;
-}
-
-/* Module 'remove' entry point.
- * o release all remaining system resources */
-static void cycx_drv_cleanup(void)
-{
-}
-
-/* Kernel APIs */
-/* Set up adapter.
- * o detect adapter type
- * o verify hardware configuration options
- * o check for hardware conflicts
- * o set up adapter shared memory
- * o test adapter memory
- * o load firmware
- * Return: 0 ok.
- * < 0 error */
-EXPORT_SYMBOL(cycx_setup);
-int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
-{
- int err;
-
- /* Verify IRQ configuration options */
- if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
- pr_err("IRQ %d is invalid!\n", hw->irq);
- return -EINVAL;
- }
-
- /* Setup adapter dual-port memory window and test memory */
- if (!dpmbase) {
- pr_err("you must specify the dpm address!\n");
- return -EINVAL;
- } else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
- pr_err("memory address 0x%lX is invalid!\n", dpmbase);
- return -EINVAL;
- }
-
- hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE);
- hw->dpmsize = CYCX_WINDOWSIZE;
-
- if (!detect_cyc2x(hw->dpmbase)) {
- pr_err("adapter Cyclom 2X not found at address 0x%lX!\n",
- dpmbase);
- return -EINVAL;
- }
-
- pr_info("found Cyclom 2X card at address 0x%lX\n", dpmbase);
-
- /* Load firmware. If loader fails then shut down adapter */
- err = load_cyc2x(hw, cfm, len);
-
- if (err)
- cycx_down(hw); /* shutdown adapter */
-
- return err;
-}
-
-EXPORT_SYMBOL(cycx_down);
-int cycx_down(struct cycx_hw *hw)
-{
- iounmap(hw->dpmbase);
- return 0;
-}
-
-/* Enable interrupt generation. */
-static void cycx_inten(struct cycx_hw *hw)
-{
- writeb(0, hw->dpmbase);
-}
-
-/* Generate an interrupt to adapter's CPU. */
-EXPORT_SYMBOL(cycx_intr);
-void cycx_intr(struct cycx_hw *hw)
-{
- writew(0, hw->dpmbase + GEN_CYCX_INTR);
-}
-
-/* Execute Adapter Command.
- * o Set exec flag.
- * o Busy-wait until flag is reset. */
-EXPORT_SYMBOL(cycx_exec);
-int cycx_exec(void __iomem *addr)
-{
- u16 i = 0;
- /* wait till addr content is zeroed */
-
- while (readw(addr)) {
- udelay(1000);
-
- if (++i > 50)
- return -1;
- }
-
- return 0;
-}
-
-/* Read absolute adapter memory.
- * Transfer data from adapter's memory to data buffer. */
-EXPORT_SYMBOL(cycx_peek);
-int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
-{
- if (len == 1)
- *(u8*)buf = readb(hw->dpmbase + addr);
- else
- memcpy_fromio(buf, hw->dpmbase + addr, len);
-
- return 0;
-}
-
-/* Write Absolute Adapter Memory.
- * Transfer data from data buffer to adapter's memory. */
-EXPORT_SYMBOL(cycx_poke);
-int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
-{
- if (len == 1)
- writeb(*(u8*)buf, hw->dpmbase + addr);
- else
- memcpy_toio(hw->dpmbase + addr, buf, len);
-
- return 0;
-}
-
-/* Hardware-Specific Functions */
-
-/* Load Aux Routines */
-/* Reset board hardware.
- return 1 if memory exists at addr and 0 if not. */
-static int memory_exists(void __iomem *addr)
-{
- int tries = 0;
-
- for (; tries < 3 ; tries++) {
- writew(TEST_PATTERN, addr + 0x10);
-
- if (readw(addr + 0x10) == TEST_PATTERN)
- if (readw(addr + 0x10) == TEST_PATTERN)
- return 1;
-
- msleep_interruptible(1 * 1000);
- }
-
- return 0;
-}
-
-/* Load reset code. */
-static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt)
-{
- void __iomem *pt_code = addr + RESET_OFFSET;
- u16 i; /*, j; */
-
- for (i = 0 ; i < cnt ; i++) {
-/* for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */
- writeb(*buffer++, pt_code++);
- }
-}
-
-/* Load buffer using boot interface.
- * o copy data from buffer to Cyclom-X memory
- * o wait for reset code to copy it to right portion of memory */
-static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt)
-{
- memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
- writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
-
- return wait_cyc(addr);
-}
-
-/* Set up entry point and kick start Cyclom-X CPU. */
-static void cycx_start(void __iomem *addr)
-{
- /* put in 0x30 offset the jump instruction to the code entry point */
- writeb(0xea, addr + 0x30);
- writeb(0x00, addr + 0x31);
- writeb(0xc4, addr + 0x32);
- writeb(0x00, addr + 0x33);
- writeb(0x00, addr + 0x34);
-
- /* cmd to start executing code */
- writew(GEN_START, addr + CMD_OFFSET);
-}
-
-/* Load and boot reset code. */
-static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_start = addr + START_OFFSET;
-
- writeb(0xea, pt_start++); /* jmp to f000:3f00 */
- writeb(0x00, pt_start++);
- writeb(0xfc, pt_start++);
- writeb(0x00, pt_start++);
- writeb(0xf0, pt_start);
- reset_load(addr, code, len);
-
- /* 80186 was in hold, go */
- writeb(0, addr + START_CPU);
- msleep_interruptible(1 * 1000);
-}
-
-/* Load data.bin file through boot (reset) interface. */
-static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
- u32 i;
-
- /* boot buffer length */
- writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
- writew(GEN_DEFPAR, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- writew(0, pt_boot_cmd + sizeof(u16));
- writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
- writew(GEN_SET_SEG, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
- if (buffer_load(addr, code + i,
- min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
- pr_err("Error !!\n");
- return -1;
- }
-
- return 0;
-}
-
-
-/* Load code.bin file through boot (reset) interface. */
-static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
- u32 i;
-
- /* boot buffer length */
- writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
- writew(GEN_DEFPAR, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- writew(0x0000, pt_boot_cmd + sizeof(u16));
- writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
- writew(GEN_SET_SEG, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
- if (buffer_load(addr, code + i,
- min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
- pr_err("Error !!\n");
- return -1;
- }
-
- return 0;
-}
-
-/* Load adapter from the memory image of the CYCX firmware module.
- * o verify firmware integrity and compatibility
- * o start adapter up */
-static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
-{
- int i, j;
- struct cycx_fw_header *img_hdr;
- u8 *reset_image,
- *data_image,
- *code_image;
- void __iomem *pt_cycld = hw->dpmbase + 0x400;
- u16 cksum;
-
- /* Announce */
- pr_info("firmware signature=\"%s\"\n", cfm->signature);
-
- /* Verify firmware signature */
- if (strcmp(cfm->signature, CFM_SIGNATURE)) {
- pr_err("load_cyc2x: not Cyclom-2X firmware!\n");
- return -EINVAL;
- }
-
- pr_info("firmware version=%u\n", cfm->version);
-
- /* Verify firmware module format version */
- if (cfm->version != CFM_VERSION) {
- pr_err("%s: firmware format %u rejected! Expecting %u.\n",
- __func__, cfm->version, CFM_VERSION);
- return -EINVAL;
- }
-
- /* Verify firmware module length and checksum */
- cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) +
- cfm->info.codesize);
-/*
- FIXME cfm->info.codesize is off by 2
- if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
-*/
- if (cksum != cfm->checksum) {
- pr_err("%s: firmware corrupted!\n", __func__);
- pr_err(" cdsize = 0x%x (expected 0x%lx)\n",
- len - (int)sizeof(struct cycx_firmware) - 1,
- cfm->info.codesize);
- pr_err(" chksum = 0x%x (expected 0x%x)\n",
- cksum, cfm->checksum);
- return -EINVAL;
- }
-
- /* If everything is ok, set reset, data and code pointers */
- img_hdr = (struct cycx_fw_header *)&cfm->image;
-#ifdef FIRMWARE_DEBUG
- pr_info("%s: image sizes\n", __func__);
- pr_info(" reset=%lu\n", img_hdr->reset_size);
- pr_info(" data=%lu\n", img_hdr->data_size);
- pr_info(" code=%lu\n", img_hdr->code_size);
-#endif
- reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
- data_image = reset_image + img_hdr->reset_size;
- code_image = data_image + img_hdr->data_size;
-
- /*---- Start load ----*/
- /* Announce */
- pr_info("loading firmware %s (ID=%u)...\n",
- cfm->descr[0] ? cfm->descr : "unknown firmware",
- cfm->info.codeid);
-
- for (i = 0 ; i < 5 ; i++) {
- /* Reset Cyclom hardware */
- if (!reset_cyc2x(hw->dpmbase)) {
- pr_err("dpm problem or board not found\n");
- return -EINVAL;
- }
-
- /* Load reset.bin */
- cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
- /* reset is waiting for boot */
- writew(GEN_POWER_ON, pt_cycld);
- msleep_interruptible(1 * 1000);
-
- for (j = 0 ; j < 3 ; j++)
- if (!readw(pt_cycld))
- goto reset_loaded;
- else
- msleep_interruptible(1 * 1000);
- }
-
- pr_err("reset not started\n");
- return -EINVAL;
-
-reset_loaded:
- /* Load data.bin */
- if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
- pr_err("cannot load data file\n");
- return -EINVAL;
- }
-
- /* Load code.bin */
- if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
- pr_err("cannot load code file\n");
- return -EINVAL;
- }
-
- /* Prepare boot-time configuration data */
- cycx_bootcfg(hw);
-
- /* kick-off CPU */
- cycx_start(hw->dpmbase);
-
- /* Arthur Ganzert's tip: wait a while after the firmware loading...
- seg abr 26 17:17:12 EST 1999 - acme */
- msleep_interruptible(7 * 1000);
- pr_info("firmware loaded!\n");
-
- /* enable interrupts */
- cycx_inten(hw);
-
- return 0;
-}
-
-/* Prepare boot-time firmware configuration data.
- * o initialize configuration data area
- From async.doc - V_3.4.0 - 07/18/1994
- - As of now, only static buffers are available to the user.
- So, the bit VD_RXDIRC must be set in 'valid'. That means that user
- wants to use the static transmission and reception buffers. */
-static void cycx_bootcfg(struct cycx_hw *hw)
-{
- /* use fixed buffers */
- writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
-}
-
-/* Detect Cyclom 2x adapter.
- * Following tests are used to detect Cyclom 2x adapter:
- * to be completed based on the tests done below
- * Return 1 if detected o.k. or 0 if failed.
- * Note: This test is destructive! Adapter will be left in shutdown
- * state after the test. */
-static int detect_cyc2x(void __iomem *addr)
-{
- reset_cyc2x(addr);
-
- return memory_exists(addr);
-}
-
-/* Miscellaneous */
-/* Get option's index into the options list.
- * Return option's index (1 .. N) or zero if option is invalid. */
-static int get_option_index(const long *optlist, long optval)
-{
- int i = 1;
-
- for (; i <= optlist[0]; ++i)
- if (optlist[i] == optval)
- return i;
-
- return 0;
-}
-
-/* Reset adapter's CPU. */
-static int reset_cyc2x(void __iomem *addr)
-{
- writeb(0, addr + RST_ENABLE);
- msleep_interruptible(2 * 1000);
- writeb(0, addr + RST_DISABLE);
- msleep_interruptible(2 * 1000);
-
- return memory_exists(addr);
-}
-
-/* Calculate 16-bit CRC using CCITT polynomial. */
-static u16 checksum(u8 *buf, u32 len)
-{
- u16 crc = 0;
- u16 mask, flag;
-
- for (; len; --len, ++buf)
- for (mask = 0x80; mask; mask >>= 1) {
- flag = (crc & 0x8000);
- crc <<= 1;
- crc |= ((*buf & mask) ? 1 : 0);
-
- if (flag)
- crc ^= 0x1021;
- }
-
- return crc;
-}
-
-module_init(cycx_drv_init);
-module_exit(cycx_drv_cleanup);
-
-/* End */
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
deleted file mode 100644
index 81fbbad406be..000000000000
--- a/drivers/net/wan/cycx_main.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
-* cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
-* Jaspreet Singh <jaspreet@sangoma.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* Please look at the bitkeeper changelog (or any other scm tool that ends up
-* importing bitkeeper changelog or that replaces bitkeeper in the future as
-* main tool for linux development).
-*
-* 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks,
-* some cleanups
-* 2000/07/13 acme remove useless #ifdef MODULE and crap
-* #if KERNEL_VERSION > blah
-* 2000/07/06 acme __exit at cyclomx_cleanup
-* 2000/04/02 acme dprintk and cycx_debug
-* module_init/module_exit
-* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
-* and cyclomx_close to cyclomx_mod_dec_use_count
-* 2000/01/08 acme cleanup
-* 1999/11/06 acme cycx_down back to life (it needs to be
-* called to iounmap the dpmbase)
-* 1999/08/09 acme removed references to enable_tx_int
-* use spinlocks instead of cli/sti in
-* cyclomx_set_state
-* 1999/05/19 acme works directly linked into the kernel
-* init_waitqueue_head for 2.3.* kernel
-* 1999/05/18 acme major cleanup (polling not needed), etc
-* 1998/08/28 acme minor cleanup (ioctls for firmware deleted)
-* queue_task activated
-* 1998/08/08 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/errno.h> /* return codes */
-#include <linux/string.h> /* inline memset(), etc. */
-#include <linux/slab.h> /* kmalloc(), kfree() */
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/module.h> /* support for loadable modules */
-#include <linux/ioport.h> /* request_region(), release_region() */
-#include <linux/wanrouter.h> /* WAN router definitions */
-#include <linux/cyclomx.h> /* cyclomx common user API definitions */
-#include <linux/init.h> /* __init (when not using as a module) */
-#include <linux/interrupt.h>
-
-unsigned int cycx_debug;
-
-MODULE_AUTHOR("Arnaldo Carvalho de Melo");
-MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
-MODULE_LICENSE("GPL");
-module_param(cycx_debug, int, 0);
-MODULE_PARM_DESC(cycx_debug, "cyclomx debug level");
-
-/* Defines & Macros */
-
-#define CYCX_DRV_VERSION 0 /* version number */
-#define CYCX_DRV_RELEASE 11 /* release (minor version) number */
-#define CYCX_MAX_CARDS 1 /* max number of adapters */
-
-#define CONFIG_CYCX_CARDS 1
-
-/* Function Prototypes */
-
-/* WAN link driver entry points */
-static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf);
-static int cycx_wan_shutdown(struct wan_device *wandev);
-
-/* Miscellaneous functions */
-static irqreturn_t cycx_isr(int irq, void *dev_id);
-
-/* Global Data
- * Note: All data must be explicitly initialized!!!
- */
-
-/* private data */
-static const char cycx_drvname[] = "cyclomx";
-static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
-static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
- "<acme@conectiva.com.br>";
-static int cycx_ncards = CONFIG_CYCX_CARDS;
-static struct cycx_device *cycx_card_array; /* adapter data space */
-
-/* Kernel Loadable Module Entry Points */
-
-/*
- * Module 'insert' entry point.
- * o print announcement
- * o allocate adapter data space
- * o initialize static data
- * o register all cards with WAN router
- * o calibrate Cyclom 2X shared memory access delay.
- *
- * Return: 0 Ok
- * < 0 error.
- * Context: process
- */
-static int __init cycx_init(void)
-{
- int cnt, err = -ENOMEM;
-
- pr_info("%s v%u.%u %s\n",
- cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
- cycx_copyright);
-
- /* Verify number of cards and allocate adapter data space */
- cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
- cycx_ncards = max_t(int, cycx_ncards, 1);
- cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
- if (!cycx_card_array)
- goto out;
-
-
- /* Register adapters with WAN router */
- for (cnt = 0; cnt < cycx_ncards; ++cnt) {
- struct cycx_device *card = &cycx_card_array[cnt];
- struct wan_device *wandev = &card->wandev;
-
- sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1);
- wandev->magic = ROUTER_MAGIC;
- wandev->name = card->devname;
- wandev->private = card;
- wandev->setup = cycx_wan_setup;
- wandev->shutdown = cycx_wan_shutdown;
- err = register_wan_device(wandev);
-
- if (err) {
- pr_err("%s registration failed with error %d!\n",
- card->devname, err);
- break;
- }
- }
-
- err = -ENODEV;
- if (!cnt) {
- kfree(cycx_card_array);
- goto out;
- }
- err = 0;
- cycx_ncards = cnt; /* adjust actual number of cards */
-out: return err;
-}
-
-/*
- * Module 'remove' entry point.
- * o unregister all adapters from the WAN router
- * o release all remaining system resources
- */
-static void __exit cycx_exit(void)
-{
- int i = 0;
-
- for (; i < cycx_ncards; ++i) {
- struct cycx_device *card = &cycx_card_array[i];
- unregister_wan_device(card->devname);
- }
-
- kfree(cycx_card_array);
-}
-
-/* WAN Device Driver Entry Points */
-/*
- * Setup/configure WAN link driver.
- * o check adapter state
- * o make sure firmware is present in configuration
- * o allocate interrupt vector
- * o setup Cyclom 2X hardware
- * o call appropriate routine to perform protocol-specific initialization
- *
- * This function is called when router handles ROUTER_SETUP IOCTL. The
- * configuration structure is in kernel memory (including extended data, if
- * any).
- */
-static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
-{
- int rc = -EFAULT;
- struct cycx_device *card;
- int irq;
-
- /* Sanity checks */
-
- if (!wandev || !wandev->private || !conf)
- goto out;
-
- card = wandev->private;
- rc = -EBUSY;
- if (wandev->state != WAN_UNCONFIGURED)
- goto out;
-
- rc = -EINVAL;
- if (!conf->data_size || !conf->data) {
- pr_err("%s: firmware not found in configuration data!\n",
- wandev->name);
- goto out;
- }
-
- if (conf->irq <= 0) {
- pr_err("%s: can't configure without IRQ!\n", wandev->name);
- goto out;
- }
-
- /* Allocate IRQ */
- irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
-
- if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
- pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq);
- goto out;
- }
-
- /* Configure hardware, load firmware, etc. */
- memset(&card->hw, 0, sizeof(card->hw));
- card->hw.irq = irq;
- card->hw.dpmsize = CYCX_WINDOWSIZE;
- card->hw.fwid = CFID_X25_2X;
- spin_lock_init(&card->lock);
- init_waitqueue_head(&card->wait_stats);
-
- rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr);
- if (rc)
- goto out_irq;
-
- /* Initialize WAN device data space */
- wandev->irq = irq;
- wandev->dma = wandev->ioport = 0;
- wandev->maddr = (unsigned long)card->hw.dpmbase;
- wandev->msize = card->hw.dpmsize;
- wandev->hw_opt[2] = 0;
- wandev->hw_opt[3] = card->hw.fwid;
-
- /* Protocol-specific initialization */
- switch (card->hw.fwid) {
-#ifdef CONFIG_CYCLOMX_X25
- case CFID_X25_2X:
- rc = cycx_x25_wan_init(card, conf);
- break;
-#endif
- default:
- pr_err("%s: this firmware is not supported!\n", wandev->name);
- rc = -EINVAL;
- }
-
- if (rc) {
- cycx_down(&card->hw);
- goto out_irq;
- }
-
- rc = 0;
-out:
- return rc;
-out_irq:
- free_irq(irq, card);
- goto out;
-}
-
-/*
- * Shut down WAN link driver.
- * o shut down adapter hardware
- * o release system resources.
- *
- * This function is called by the router when device is being unregistered or
- * when it handles ROUTER_DOWN IOCTL.
- */
-static int cycx_wan_shutdown(struct wan_device *wandev)
-{
- int ret = -EFAULT;
- struct cycx_device *card;
-
- /* sanity checks */
- if (!wandev || !wandev->private)
- goto out;
-
- ret = 0;
- if (wandev->state == WAN_UNCONFIGURED)
- goto out;
-
- card = wandev->private;
- wandev->state = WAN_UNCONFIGURED;
- cycx_down(&card->hw);
- pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq);
- free_irq(wandev->irq, card);
-out: return ret;
-}
-
-/* Miscellaneous */
-/*
- * Cyclom 2X Interrupt Service Routine.
- * o acknowledge Cyclom 2X hardware interrupt.
- * o call protocol-specific interrupt service routine, if any.
- */
-static irqreturn_t cycx_isr(int irq, void *dev_id)
-{
- struct cycx_device *card = dev_id;
-
- if (card->wandev.state == WAN_UNCONFIGURED)
- goto out;
-
- if (card->in_isr) {
- pr_warn("%s: interrupt re-entrancy on IRQ %d!\n",
- card->devname, card->wandev.irq);
- goto out;
- }
-
- if (card->isr)
- card->isr(card);
- return IRQ_HANDLED;
-out:
- return IRQ_NONE;
-}
-
-/* Set WAN device state. */
-void cycx_set_state(struct cycx_device *card, int state)
-{
- unsigned long flags;
- char *string_state = NULL;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (card->wandev.state != state) {
- switch (state) {
- case WAN_CONNECTED:
- string_state = "connected!";
- break;
- case WAN_DISCONNECTED:
- string_state = "disconnected!";
- break;
- }
- pr_info("%s: link %s\n", card->devname, string_state);
- card->wandev.state = state;
- }
-
- card->state_tick = jiffies;
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-module_init(cycx_init);
-module_exit(cycx_exit);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
deleted file mode 100644
index 06f3f6309e4b..000000000000
--- a/drivers/net/wan/cycx_x25.c
+++ /dev/null
@@ -1,1602 +0,0 @@
-/*
-* cycx_x25.c Cyclom 2X WAN Link Driver. X.25 module.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 2001/01/12 acme use dev_kfree_skb_irq on interrupt context
-* 2000/04/02 acme dprintk, cycx_debug
-* fixed the bug introduced in get_dev_by_lcn and
-* get_dev_by_dte_addr by the anonymous hacker
-* that converted this driver to softnet
-* 2000/01/08 acme cleanup
-* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
-* that we have a X.25 stack implemented in
-* firmware onboard
-* 1999/10/18 acme support for X.25 sockets in if_send,
-* beware: socket(AF_X25...) IS WORK IN PROGRESS,
-* TCP/IP over X.25 via wanrouter not affected,
-* working.
-* 1999/10/09 acme chan_disc renamed to chan_disconnect,
-* began adding support for X.25 sockets:
-* conf->protocol in new_if
-* 1999/10/05 acme fixed return E... to return -E...
-* 1999/08/10 acme serialized access to the card thru a spinlock
-* in x25_exec
-* 1999/08/09 acme removed per channel spinlocks
-* removed references to enable_tx_int
-* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated
-* if_send simplified
-* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration
-* use spinlocks instead of cli/sti in some points
-* 1999/05/24 acme finished the x25_get_stat function
-* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works,
-* AFAIT, with ARPHRD_ETHER). This seems to be
-* needed to use socket(AF_X25)...
-* Now the config file must specify a peer media
-* address for svc channels over a crossover cable.
-* Removed hold_timeout from x25_channel_t,
-* not used.
-* A little enhancement in the DEBUG processing
-* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr,
-* instead of chan_disc.
-* 1999/05/16 marcelo fixed timer initialization in SVCs
-* 1999/01/05 acme x25_configure now get (most of) all
-* parameters...
-* 1999/01/05 acme pktlen now (correctly) uses log2 (value
-* configured)
-* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc)
-* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge
-* indication (interrupt from cyclom 2x)
-* 1999/01/02 acme cyx_isr: first hackings...
-* 1999/01/0203 acme when initializing an array don't give less
-* elements than declared...
-* example: char send_cmd[6] = "?\xFF\x10";
-* you'll gonna lose a couple hours, 'cause your
-* brain won't admit that there's an error in the
-* above declaration... the side effect is that
-* memset is put into the unresolved symbols
-* instead of using the inline memset functions...
-* 1999/01/02 acme began chan_connect, chan_send, x25_send
-* 1998/12/31 acme x25_configure
-* this code can be compiled as non module
-* 1998/12/27 acme code cleanup
-* IPX code wiped out! let's decrease code
-* complexity for now, remember: I'm learning! :)
-* bps_to_speed_code OK
-* 1998/12/26 acme Minimal debug code cleanup
-* 1998/08/08 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define CYCLOMX_X25_DEBUG 1
-
-#include <linux/ctype.h> /* isdigit() */
-#include <linux/errno.h> /* return codes */
-#include <linux/if_arp.h> /* ARPHRD_HWX25 */
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/module.h>
-#include <linux/string.h> /* inline memset(), etc. */
-#include <linux/sched.h>
-#include <linux/slab.h> /* kmalloc(), kfree() */
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/wanrouter.h> /* WAN router definitions */
-
-#include <asm/byteorder.h> /* htons(), etc. */
-
-#include <linux/cyclomx.h> /* Cyclom 2X common user API definitions */
-#include <linux/cycx_x25.h> /* X.25 firmware API definitions */
-
-#include <net/x25device.h>
-
-/* Defines & Macros */
-#define CYCX_X25_MAX_CMD_RETRY 5
-#define CYCX_X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */
-
-/* Data Structures */
-/* This is an extension of the 'struct net_device' we create for each network
- interface to keep the rest of X.25 channel-specific data. */
-struct cycx_x25_channel {
- /* This member must be first. */
- struct net_device *slave; /* WAN slave */
-
- char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
- char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
- char *local_addr; /* local media address, ASCIIZ -
- svc thru crossover cable */
- s16 lcn; /* logical channel number/conn.req.key*/
- u8 link;
- struct timer_list timer; /* timer used for svc channel disc. */
- u16 protocol; /* ethertype, 0 - multiplexed */
- u8 svc; /* 0 - permanent, 1 - switched */
- u8 state; /* channel state */
- u8 drop_sequence; /* mark sequence for dropping */
- u32 idle_tmout; /* sec, before disconnecting */
- struct sk_buff *rx_skb; /* receive socket buffer */
- struct cycx_device *card; /* -> owner */
- struct net_device_stats ifstats;/* interface statistics */
-};
-
-/* Function Prototypes */
-/* WAN link driver entry points. These are called by the WAN router module. */
-static int cycx_wan_update(struct wan_device *wandev),
- cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
- wanif_conf_t *conf),
- cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev);
-
-/* Network device interface */
-static int cycx_netdevice_init(struct net_device *dev);
-static int cycx_netdevice_open(struct net_device *dev);
-static int cycx_netdevice_stop(struct net_device *dev);
-static int cycx_netdevice_hard_header(struct sk_buff *skb,
- struct net_device *dev, u16 type,
- const void *daddr, const void *saddr,
- unsigned len);
-static int cycx_netdevice_rebuild_header(struct sk_buff *skb);
-static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-
-static struct net_device_stats *
- cycx_netdevice_get_stats(struct net_device *dev);
-
-/* Interrupt handlers */
-static void cycx_x25_irq_handler(struct cycx_device *card),
- cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
- cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
- cycx_x25_irq_log(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_stat(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_connect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_connect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_disconnect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_spurious(struct cycx_device *card,
- struct cycx_x25_cmd *cmd);
-
-/* X.25 firmware interface functions */
-static int cycx_x25_configure(struct cycx_device *card,
- struct cycx_x25_config *conf),
- cycx_x25_get_stats(struct cycx_device *card),
- cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
- int len, void *buf),
- cycx_x25_connect_response(struct cycx_device *card,
- struct cycx_x25_channel *chan),
- cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
- u8 lcn);
-
-/* channel functions */
-static int cycx_x25_chan_connect(struct net_device *dev),
- cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb);
-
-static void cycx_x25_chan_disconnect(struct net_device *dev),
- cycx_x25_chan_send_event(struct net_device *dev, u8 event);
-
-/* Miscellaneous functions */
-static void cycx_x25_set_chan_state(struct net_device *dev, u8 state),
- cycx_x25_chan_timer(unsigned long d);
-
-static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
- reset_timer(struct net_device *dev);
-
-static u8 bps_to_speed_code(u32 bps);
-static u8 cycx_log2(u32 n);
-
-static unsigned dec_to_uint(u8 *str, int len);
-
-static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
- s16 lcn);
-static struct net_device *
- cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
-
-static void cycx_x25_chan_setup(struct net_device *dev);
-
-#ifdef CYCLOMX_X25_DEBUG
-static void hex_dump(char *msg, unsigned char *p, int len);
-static void cycx_x25_dump_config(struct cycx_x25_config *conf);
-static void cycx_x25_dump_stats(struct cycx_x25_stats *stats);
-static void cycx_x25_dump_devs(struct wan_device *wandev);
-#else
-#define hex_dump(msg, p, len)
-#define cycx_x25_dump_config(conf)
-#define cycx_x25_dump_stats(stats)
-#define cycx_x25_dump_devs(wandev)
-#endif
-/* Public Functions */
-
-/* X.25 Protocol Initialization routine.
- *
- * This routine is called by the main Cyclom 2X module during setup. At this
- * point adapter is completely initialized and X.25 firmware is running.
- * o configure adapter
- * o initialize protocol-specific fields of the adapter data space.
- *
- * Return: 0 o.k.
- * < 0 failure. */
-int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
-{
- struct cycx_x25_config cfg;
-
- /* Verify configuration ID */
- if (conf->config_id != WANCONFIG_X25) {
- pr_info("%s: invalid configuration ID %u!\n",
- card->devname, conf->config_id);
- return -EINVAL;
- }
-
- /* Initialize protocol-specific fields */
- card->mbox = card->hw.dpmbase + X25_MBOX_OFFS;
- card->u.x.connection_keys = 0;
- spin_lock_init(&card->u.x.lock);
-
- /* Configure adapter. Here we set reasonable defaults, then parse
- * device configuration structure and set configuration options.
- * Most configuration options are verified and corrected (if
- * necessary) since we can't rely on the adapter to do so and don't
- * want it to fail either. */
- memset(&cfg, 0, sizeof(cfg));
- cfg.link = 0;
- cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
- cfg.speed = bps_to_speed_code(conf->bps);
- cfg.n3win = 7;
- cfg.n2win = 2;
- cfg.n2 = 5;
- cfg.nvc = 1;
- cfg.npvc = 1;
- cfg.flags = 0x02; /* default = V35 */
- cfg.t1 = 10; /* line carrier timeout */
- cfg.t2 = 29; /* tx timeout */
- cfg.t21 = 180; /* CALL timeout */
- cfg.t23 = 180; /* CLEAR timeout */
-
- /* adjust MTU */
- if (!conf->mtu || conf->mtu >= 512)
- card->wandev.mtu = 512;
- else if (conf->mtu >= 256)
- card->wandev.mtu = 256;
- else if (conf->mtu >= 128)
- card->wandev.mtu = 128;
- else
- card->wandev.mtu = 64;
-
- cfg.pktlen = cycx_log2(card->wandev.mtu);
-
- if (conf->station == WANOPT_DTE) {
- cfg.locaddr = 3; /* DTE */
- cfg.remaddr = 1; /* DCE */
- } else {
- cfg.locaddr = 1; /* DCE */
- cfg.remaddr = 3; /* DTE */
- }
-
- if (conf->interface == WANOPT_RS232)
- cfg.flags = 0; /* FIXME just reset the 2nd bit */
-
- if (conf->u.x25.hi_pvc) {
- card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
- card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
- }
-
- if (conf->u.x25.hi_svc) {
- card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
- card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
- }
-
- if (card->u.x.lo_pvc == 255)
- cfg.npvc = 0;
- else
- cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
-
- cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
-
- if (conf->u.x25.hdlc_window)
- cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
-
- if (conf->u.x25.pkt_window)
- cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
-
- if (conf->u.x25.t1)
- cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
-
- if (conf->u.x25.t2)
- cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
-
- if (conf->u.x25.t11_t21)
- cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
-
- if (conf->u.x25.t13_t23)
- cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
-
- if (conf->u.x25.n2)
- cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
-
- /* initialize adapter */
- if (cycx_x25_configure(card, &cfg))
- return -EIO;
-
- /* Initialize protocol-specific fields of adapter data space */
- card->wandev.bps = conf->bps;
- card->wandev.interface = conf->interface;
- card->wandev.clocking = conf->clocking;
- card->wandev.station = conf->station;
- card->isr = cycx_x25_irq_handler;
- card->exec = NULL;
- card->wandev.update = cycx_wan_update;
- card->wandev.new_if = cycx_wan_new_if;
- card->wandev.del_if = cycx_wan_del_if;
- card->wandev.state = WAN_DISCONNECTED;
-
- return 0;
-}
-
-/* WAN Device Driver Entry Points */
-/* Update device status & statistics. */
-static int cycx_wan_update(struct wan_device *wandev)
-{
- /* sanity checks */
- if (!wandev || !wandev->private)
- return -EFAULT;
-
- if (wandev->state == WAN_UNCONFIGURED)
- return -ENODEV;
-
- cycx_x25_get_stats(wandev->private);
-
- return 0;
-}
-
-/* Create new logical channel.
- * This routine is called by the router when ROUTER_IFNEW IOCTL is being
- * handled.
- * o parse media- and hardware-specific configuration
- * o make sure that a new channel can be created
- * o allocate resources, if necessary
- * o prepare network device structure for registration.
- *
- * Return: 0 o.k.
- * < 0 failure (channel will not be created) */
-static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
- wanif_conf_t *conf)
-{
- struct cycx_device *card = wandev->private;
- struct cycx_x25_channel *chan;
- int err = 0;
-
- if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
- pr_info("%s: invalid interface name!\n", card->devname);
- return -EINVAL;
- }
-
- dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name,
- cycx_x25_chan_setup);
- if (!dev)
- return -ENOMEM;
-
- chan = netdev_priv(dev);
- strcpy(chan->name, conf->name);
- chan->card = card;
- chan->link = conf->port;
- chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP;
- chan->rx_skb = NULL;
- /* only used in svc connected thru crossover cable */
- chan->local_addr = NULL;
-
- if (conf->addr[0] == '@') { /* SVC */
- int len = strlen(conf->local_addr);
-
- if (len) {
- if (len > WAN_ADDRESS_SZ) {
- pr_err("%s: %s local addr too long!\n",
- wandev->name, chan->name);
- err = -EINVAL;
- goto error;
- } else {
- chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
-
- if (!chan->local_addr) {
- err = -ENOMEM;
- goto error;
- }
- }
-
- strncpy(chan->local_addr, conf->local_addr,
- WAN_ADDRESS_SZ);
- }
-
- chan->svc = 1;
- strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
- init_timer(&chan->timer);
- chan->timer.function = cycx_x25_chan_timer;
- chan->timer.data = (unsigned long)dev;
-
- /* Set channel timeouts (default if not specified) */
- chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
- } else if (isdigit(conf->addr[0])) { /* PVC */
- s16 lcn = dec_to_uint(conf->addr, 0);
-
- if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
- chan->lcn = lcn;
- else {
- pr_err("%s: PVC %u is out of range on interface %s!\n",
- wandev->name, lcn, chan->name);
- err = -EINVAL;
- goto error;
- }
- } else {
- pr_err("%s: invalid media address on interface %s!\n",
- wandev->name, chan->name);
- err = -EINVAL;
- goto error;
- }
-
- return 0;
-
-error:
- free_netdev(dev);
- return err;
-}
-
-/* Delete logical channel. */
-static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc) {
- kfree(chan->local_addr);
- if (chan->state == WAN_CONNECTED)
- del_timer(&chan->timer);
- }
-
- return 0;
-}
-
-
-/* Network Device Interface */
-
-static const struct header_ops cycx_header_ops = {
- .create = cycx_netdevice_hard_header,
- .rebuild = cycx_netdevice_rebuild_header,
-};
-
-static const struct net_device_ops cycx_netdev_ops = {
- .ndo_init = cycx_netdevice_init,
- .ndo_open = cycx_netdevice_open,
- .ndo_stop = cycx_netdevice_stop,
- .ndo_start_xmit = cycx_netdevice_hard_start_xmit,
- .ndo_get_stats = cycx_netdevice_get_stats,
-};
-
-static void cycx_x25_chan_setup(struct net_device *dev)
-{
- /* Initialize device driver entry points */
- dev->netdev_ops = &cycx_netdev_ops;
- dev->header_ops = &cycx_header_ops;
-
- /* Initialize media-specific parameters */
- dev->mtu = CYCX_X25_CHAN_MTU;
- dev->type = ARPHRD_HWX25; /* ARP h/w type */
- dev->hard_header_len = 0; /* media header length */
- dev->addr_len = 0; /* hardware address length */
-}
-
-/* Initialize Linux network interface.
- *
- * This routine is called only once for each interface, during Linux network
- * interface registration. Returning anything but zero will fail interface
- * registration. */
-static int cycx_netdevice_init(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- struct wan_device *wandev = &card->wandev;
-
- if (!chan->svc)
- *(__be16*)dev->dev_addr = htons(chan->lcn);
-
- /* Initialize hardware parameters (just for reference) */
- dev->irq = wandev->irq;
- dev->dma = wandev->dma;
- dev->base_addr = wandev->ioport;
- dev->mem_start = (unsigned long)wandev->maddr;
- dev->mem_end = (unsigned long)(wandev->maddr +
- wandev->msize - 1);
- dev->flags |= IFF_NOARP;
-
- /* Set transmit buffer queue length */
- dev->tx_queue_len = 10;
-
- /* Initialize socket buffers */
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-
- return 0;
-}
-
-/* Open network interface.
- * o prevent module from unloading by incrementing use count
- * o if link is disconnected then initiate connection
- *
- * Return 0 if O.k. or errno. */
-static int cycx_netdevice_open(struct net_device *dev)
-{
- if (netif_running(dev))
- return -EBUSY; /* only one open is allowed */
-
- netif_start_queue(dev);
- return 0;
-}
-
-/* Close network interface.
- * o reset flags.
- * o if there's no more open channels then disconnect physical link. */
-static int cycx_netdevice_stop(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- netif_stop_queue(dev);
-
- if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
- cycx_x25_chan_disconnect(dev);
-
- return 0;
-}
-
-/* Build media header.
- * o encapsulate packet according to encapsulation type.
- *
- * The trick here is to put packet type (Ethertype) into 'protocol' field of
- * the socket buffer, so that we don't forget it. If encapsulation fails,
- * set skb->protocol to 0 and discard packet later.
- *
- * Return: media header length. */
-static int cycx_netdevice_hard_header(struct sk_buff *skb,
- struct net_device *dev, u16 type,
- const void *daddr, const void *saddr,
- unsigned len)
-{
- skb->protocol = htons(type);
-
- return dev->hard_header_len;
-}
-
-/* * Re-build media header.
- * Return: 1 physical address resolved.
- * 0 physical address not resolved */
-static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
-{
- return 1;
-}
-
-/* Send a packet on a network interface.
- * o set busy flag (marks start of the transmission).
- * o check link state. If link is not up, then drop the packet.
- * o check channel status. If it's down then initiate a call.
- * o pass a packet to corresponding WAN device.
- * o free socket buffer
- *
- * Return: 0 complete (socket buffer must be freed)
- * non-0 packet may be re-transmitted (tbusy must be set)
- *
- * Notes:
- * 1. This routine is called either by the protocol stack or by the "net
- * bottom half" (with interrupts enabled).
- * 2. Setting tbusy flag will inhibit further transmit requests from the
- * protocol stack and can be used for flow control with protocol layer. */
-static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
-
- if (!chan->svc)
- chan->protocol = ntohs(skb->protocol);
-
- if (card->wandev.state != WAN_CONNECTED)
- ++chan->ifstats.tx_dropped;
- else if (chan->svc && chan->protocol &&
- chan->protocol != ntohs(skb->protocol)) {
- pr_info("%s: unsupported Ethertype 0x%04X on interface %s!\n",
- card->devname, ntohs(skb->protocol), dev->name);
- ++chan->ifstats.tx_errors;
- } else if (chan->protocol == ETH_P_IP) {
- switch (chan->state) {
- case WAN_DISCONNECTED:
- if (cycx_x25_chan_connect(dev)) {
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
- /* fall thru */
- case WAN_CONNECTED:
- reset_timer(dev);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
- if (cycx_x25_chan_send(dev, skb))
- return NETDEV_TX_BUSY;
-
- break;
- default:
- ++chan->ifstats.tx_dropped;
- ++card->wandev.stats.tx_dropped;
- }
- } else { /* chan->protocol == ETH_P_X25 */
- switch (skb->data[0]) {
- case X25_IFACE_DATA:
- break;
- case X25_IFACE_CONNECT:
- cycx_x25_chan_connect(dev);
- goto free_packet;
- case X25_IFACE_DISCONNECT:
- cycx_x25_chan_disconnect(dev);
- goto free_packet;
- default:
- pr_info("%s: unknown %d x25-iface request on %s!\n",
- card->devname, skb->data[0], dev->name);
- ++chan->ifstats.tx_errors;
- goto free_packet;
- }
-
- skb_pull(skb, 1); /* Remove control byte */
- reset_timer(dev);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
- if (cycx_x25_chan_send(dev, skb)) {
- /* prepare for future retransmissions */
- skb_push(skb, 1);
- return NETDEV_TX_BUSY;
- }
- }
-
-free_packet:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-/* Get Ethernet-style interface statistics.
- * Return a pointer to struct net_device_stats */
-static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- return chan ? &chan->ifstats : NULL;
-}
-
-/* Interrupt Handlers */
-/* X.25 Interrupt Service Routine. */
-static void cycx_x25_irq_handler(struct cycx_device *card)
-{
- struct cycx_x25_cmd cmd;
- u16 z = 0;
-
- card->in_isr = 1;
- card->buff_int_mode_unbusy = 0;
- cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
-
- switch (cmd.command) {
- case X25_DATA_INDICATION:
- cycx_x25_irq_rx(card, &cmd);
- break;
- case X25_ACK_FROM_VC:
- cycx_x25_irq_tx(card, &cmd);
- break;
- case X25_LOG:
- cycx_x25_irq_log(card, &cmd);
- break;
- case X25_STATISTIC:
- cycx_x25_irq_stat(card, &cmd);
- break;
- case X25_CONNECT_CONFIRM:
- cycx_x25_irq_connect_confirm(card, &cmd);
- break;
- case X25_CONNECT_INDICATION:
- cycx_x25_irq_connect(card, &cmd);
- break;
- case X25_DISCONNECT_INDICATION:
- cycx_x25_irq_disconnect(card, &cmd);
- break;
- case X25_DISCONNECT_CONFIRM:
- cycx_x25_irq_disconnect_confirm(card, &cmd);
- break;
- case X25_LINE_ON:
- cycx_set_state(card, WAN_CONNECTED);
- break;
- case X25_LINE_OFF:
- cycx_set_state(card, WAN_DISCONNECTED);
- break;
- default:
- cycx_x25_irq_spurious(card, &cmd);
- break;
- }
-
- cycx_poke(&card->hw, 0, &z, sizeof(z));
- cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
- card->in_isr = 0;
-}
-
-/* Transmit interrupt handler.
- * o Release socket buffer
- * o Clear 'tbusy' flag */
-static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
- struct net_device *dev;
- struct wan_device *wandev = &card->wandev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
-
- /* unbusy device and then dev_tint(); */
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (dev) {
- card->buff_int_mode_unbusy = 1;
- netif_wake_queue(dev);
- } else
- pr_err("%s:ackvc for inexistent lcn %d\n", card->devname, lcn);
-}
-
-/* Receive interrupt handler.
- * This routine handles fragmented IP packets using M-bit according to the
- * RFC1356.
- * o map logical channel number to network interface.
- * o allocate socket buffer or append received packet to the existing one.
- * o if M-bit is reset (i.e. it's the last packet in a sequence) then
- * decapsulate packet and pass socket buffer to the protocol stack.
- *
- * Notes:
- * 1. When allocating a socket buffer, if M-bit is set then more data is
- * coming and we have to allocate buffer for the maximum IP packet size
- * expected on this channel.
- * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
- * socket buffers available) the whole packet sequence must be discarded. */
-static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- struct cycx_x25_channel *chan;
- struct sk_buff *skb;
- u8 bitm, lcn;
- int pktlen = cmd->len - 5;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
- bitm &= 0x10;
-
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s: receiving on orphaned LCN %d!\n",
- card->devname, lcn);
- return;
- }
-
- chan = netdev_priv(dev);
- reset_timer(dev);
-
- if (chan->drop_sequence) {
- if (!bitm)
- chan->drop_sequence = 0;
- else
- return;
- }
-
- if ((skb = chan->rx_skb) == NULL) {
- /* Allocate new socket buffer */
- int bufsize = bitm ? dev->mtu : pktlen;
-
- if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
- bufsize +
- dev->hard_header_len)) == NULL) {
- pr_info("%s: no socket buffers available!\n",
- card->devname);
- chan->drop_sequence = 1;
- ++chan->ifstats.rx_dropped;
- return;
- }
-
- if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */
- /* 0 = data packet (dev_alloc_skb zeroed skb->data) */
- skb_put(skb, 1);
-
- skb->dev = dev;
- skb->protocol = htons(chan->protocol);
- chan->rx_skb = skb;
- }
-
- if (skb_tailroom(skb) < pktlen) {
- /* No room for the packet. Call off the whole thing! */
- dev_kfree_skb_irq(skb);
- chan->rx_skb = NULL;
-
- if (bitm)
- chan->drop_sequence = 1;
-
- pr_info("%s: unexpectedly long packet sequence on interface %s!\n",
- card->devname, dev->name);
- ++chan->ifstats.rx_length_errors;
- return;
- }
-
- /* Append packet to the socket buffer */
- cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
-
- if (bitm)
- return; /* more data is coming */
-
- chan->rx_skb = NULL; /* dequeue packet */
-
- ++chan->ifstats.rx_packets;
- chan->ifstats.rx_bytes += pktlen;
-
- skb_reset_mac_header(skb);
- netif_rx(skb);
-}
-
-/* Connect interrupt handler. */
-static void cycx_x25_irq_connect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev = NULL;
- struct cycx_x25_channel *chan;
- u8 d[32],
- loc[24],
- rem[24];
- u8 lcn, sizeloc, sizerem;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc));
- cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6);
-
- sizerem = sizeloc >> 4;
- sizeloc &= 0x0F;
-
- loc[0] = rem[0] = '\0';
-
- if (sizeloc)
- nibble_to_byte(d, loc, sizeloc, 0);
-
- if (sizerem)
- nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
-
- dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
- __func__, lcn, loc, rem);
-
- dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s: connect not expected: remote %s!\n",
- card->devname, rem);
- return;
- }
-
- chan = netdev_priv(dev);
- chan->lcn = lcn;
- cycx_x25_connect_response(card, chan);
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-}
-
-/* Connect confirm interrupt handler. */
-static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- struct cycx_x25_channel *chan;
- u8 lcn, key;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
- dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
- card->devname, __func__, lcn, key);
-
- dev = cycx_x25_get_dev_by_lcn(wandev, -key);
- if (!dev) {
- /* Invalid channel, discard packet */
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- pr_info("%s: connect confirm not expected: lcn %d, key=%d!\n",
- card->devname, lcn, key);
- return;
- }
-
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- chan = netdev_priv(dev);
- chan->lcn = lcn;
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-}
-
-/* Disconnect confirm interrupt handler. */
-static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
- card->devname, __func__, lcn);
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s:disconnect confirm not expected!:lcn %d\n",
- card->devname, lcn);
- return;
- }
-
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-}
-
-/* disconnect interrupt handler. */
-static void cycx_x25_irq_disconnect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
-
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (dev) {
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- cycx_x25_disconnect_response(card, chan->link, lcn);
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
- } else
- cycx_x25_disconnect_response(card, 0, lcn);
-}
-
-/* LOG interrupt handler. */
-static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
-#if CYCLOMX_X25_DEBUG
- char bf[20];
- u16 size, toread, link, msg_code;
- u8 code, routine;
-
- cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
- cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
- cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
- /* at most 20 bytes are available... thanks to Daniela :) */
- toread = size < 20 ? size : 20;
- cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
- cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
- cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
-
- pr_info("cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
- pr_info("cmd->buf=0x%X\n", cmd->buf);
- pr_info("Log message code=0x%X\n", msg_code);
- pr_info("Link=%d\n", link);
- pr_info("log code=0x%X\n", code);
- pr_info("log routine=0x%X\n", routine);
- pr_info("Message size=%d\n", size);
- hex_dump("Message", bf, toread);
-#endif
-}
-
-/* STATISTIC interrupt handler. */
-static void cycx_x25_irq_stat(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
- sizeof(card->u.x.stats));
- hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats,
- sizeof(card->u.x.stats));
- cycx_x25_dump_stats(&card->u.x.stats);
- wake_up_interruptible(&card->wait_stats);
-}
-
-/* Spurious interrupt handler.
- * o print a warning
- * If number of spurious interrupts exceeded some limit, then ??? */
-static void cycx_x25_irq_spurious(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- pr_info("%s: spurious interrupt (0x%X)!\n",
- card->devname, cmd->command);
-}
-#ifdef CYCLOMX_X25_DEBUG
-static void hex_dump(char *msg, unsigned char *p, int len)
-{
- print_hex_dump(KERN_INFO, msg, DUMP_PREFIX_OFFSET, 16, 1,
- p, len, true);
-}
-#endif
-
-/* Cyclom 2X Firmware-Specific Functions */
-/* Exec X.25 command. */
-static int x25_exec(struct cycx_device *card, int command, int link,
- void *d1, int len1, void *d2, int len2)
-{
- struct cycx_x25_cmd c;
- unsigned long flags;
- u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
- u8 retry = CYCX_X25_MAX_CMD_RETRY;
- int err = 0;
-
- c.command = command;
- c.link = link;
- c.len = len1 + len2;
-
- spin_lock_irqsave(&card->u.x.lock, flags);
-
- /* write command */
- cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
-
- /* write X.25 data */
- if (d1) {
- cycx_poke(&card->hw, addr, d1, len1);
-
- if (d2) {
- if (len2 > 254) {
- u32 addr1 = 0xA00 + 0x400 * link;
-
- cycx_poke(&card->hw, addr + len1, d2, 249);
- cycx_poke(&card->hw, addr1, ((u8*)d2) + 249,
- len2 - 249);
- } else
- cycx_poke(&card->hw, addr + len1, d2, len2);
- }
- }
-
- /* generate interruption, executing command */
- cycx_intr(&card->hw);
-
- /* wait till card->mbox == 0 */
- do {
- err = cycx_exec(card->mbox);
- } while (retry-- && err);
-
- spin_unlock_irqrestore(&card->u.x.lock, flags);
-
- return err;
-}
-
-/* Configure adapter. */
-static int cycx_x25_configure(struct cycx_device *card,
- struct cycx_x25_config *conf)
-{
- struct {
- u16 nlinks;
- struct cycx_x25_config conf[2];
- } x25_cmd_conf;
-
- memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
- x25_cmd_conf.nlinks = 2;
- x25_cmd_conf.conf[0] = *conf;
- /* FIXME: we need to find a way in the wanrouter framework
- to configure the second link, for now lets use it
- with the same config from the first link, fixing
- the interface type to RS232, the speed in 38400 and
- the clock to external */
- x25_cmd_conf.conf[1] = *conf;
- x25_cmd_conf.conf[1].link = 1;
- x25_cmd_conf.conf[1].speed = 5; /* 38400 */
- x25_cmd_conf.conf[1].clock = 8;
- x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
-
- cycx_x25_dump_config(&x25_cmd_conf.conf[0]);
- cycx_x25_dump_config(&x25_cmd_conf.conf[1]);
-
- return x25_exec(card, X25_CONFIG, 0,
- &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
-}
-
-/* Get protocol statistics. */
-static int cycx_x25_get_stats(struct cycx_device *card)
-{
- /* the firmware expects 20 in the size field!!!
- thanks to Daniela */
- int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
-
- if (err)
- return err;
-
- interruptible_sleep_on(&card->wait_stats);
-
- if (signal_pending(current))
- return -EINTR;
-
- card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
- card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
- card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
- card->wandev.stats.rx_length_errors = 0; /* not available from fw */
- card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
- card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
- card->wandev.stats.rx_dropped = 0; /* not available from fw */
- card->wandev.stats.rx_errors = 0; /* not available from fw */
- card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
- card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
- card->wandev.stats.tx_dropped = 0; /* not available from fw */
- card->wandev.stats.collisions = 0; /* not available from fw */
- card->wandev.stats.tx_errors = 0; /* not available from fw */
-
- cycx_x25_dump_devs(&card->wandev);
-
- return 0;
-}
-
-/* return the number of nibbles */
-static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
-{
- int i = 0;
-
- if (*nibble && *s) {
- d[i] |= *s++ - '0';
- *nibble = 0;
- ++i;
- }
-
- while (*s) {
- d[i] = (*s - '0') << 4;
- if (*(s + 1))
- d[i] |= *(s + 1) - '0';
- else {
- *nibble = 1;
- break;
- }
- ++i;
- s += 2;
- }
-
- return i;
-}
-
-static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
-{
- if (nibble) {
- *d++ = '0' + (*s++ & 0x0F);
- --len;
- }
-
- while (len) {
- *d++ = '0' + (*s >> 4);
-
- if (--len) {
- *d++ = '0' + (*s & 0x0F);
- --len;
- } else break;
-
- ++s;
- }
-
- *d = '\0';
-}
-
-/* Place X.25 call. */
-static int x25_place_call(struct cycx_device *card,
- struct cycx_x25_channel *chan)
-{
- int err = 0,
- len;
- char d[64],
- nibble = 0,
- mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
- remotelen = strlen(chan->addr);
- u8 key;
-
- if (card->u.x.connection_keys == ~0U) {
- pr_info("%s: too many simultaneous connection requests!\n",
- card->devname);
- return -EAGAIN;
- }
-
- key = ffz(card->u.x.connection_keys);
- set_bit(key, (void*)&card->u.x.connection_keys);
- ++key;
- dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
- memset(d, 0, sizeof(d));
- d[1] = key; /* user key */
- d[2] = 0x10;
- d[4] = 0x0B;
-
- len = byte_to_nibble(chan->addr, d + 6, &nibble);
-
- if (chan->local_addr)
- len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble);
-
- if (nibble)
- ++len;
-
- d[5] = mylen << 4 | remotelen;
- d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */
-
- if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
- &d, 7 + len + 1, NULL, 0)) != 0)
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- else
- chan->lcn = -key;
-
- return err;
-}
-
-/* Place X.25 CONNECT RESPONSE. */
-static int cycx_x25_connect_response(struct cycx_device *card,
- struct cycx_x25_channel *chan)
-{
- u8 d[8];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = chan->lcn;
- d[2] = 0x10;
- d[4] = 0x0F;
- d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */
-
- return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0);
-}
-
-/* Place X.25 DISCONNECT RESPONSE. */
-static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
- u8 lcn)
-{
- char d[5];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = lcn;
- d[2] = 0x10;
- d[4] = 0x17;
-
- return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0);
-}
-
-/* Clear X.25 call. */
-static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause,
- u8 diagn)
-{
- u8 d[7];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = lcn;
- d[2] = 0x10;
- d[4] = 0x13;
- d[5] = cause;
- d[6] = diagn;
-
- return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0);
-}
-
-/* Send X.25 data packet. */
-static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
- int len, void *buf)
-{
- u8 d[] = "?\xFF\x10??";
-
- d[0] = d[3] = lcn;
- d[4] = bitm;
-
- return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len);
-}
-
-/* Miscellaneous */
-/* Find network device by its channel number. */
-static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
- s16 lcn)
-{
- struct net_device *dev = wandev->dev;
- struct cycx_x25_channel *chan;
-
- while (dev) {
- chan = netdev_priv(dev);
-
- if (chan->lcn == lcn)
- break;
- dev = chan->slave;
- }
- return dev;
-}
-
-/* Find network device by its remote dte address. */
-static struct net_device *
- cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte)
-{
- struct net_device *dev = wandev->dev;
- struct cycx_x25_channel *chan;
-
- while (dev) {
- chan = netdev_priv(dev);
-
- if (!strcmp(chan->addr, dte))
- break;
- dev = chan->slave;
- }
- return dev;
-}
-
-/* Initiate connection on the logical channel.
- * o for PVC we just get channel configuration
- * o for SVCs place an X.25 call
- *
- * Return: 0 connected
- * >0 connection in progress
- * <0 failure */
-static int cycx_x25_chan_connect(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
-
- if (chan->svc) {
- if (!chan->addr[0])
- return -EINVAL; /* no destination address */
-
- dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
- card->devname, chan->addr);
-
- if (x25_place_call(card, chan))
- return -EIO;
-
- cycx_x25_set_chan_state(dev, WAN_CONNECTING);
- return 1;
- } else
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-
- return 0;
-}
-
-/* Disconnect logical channel.
- * o if SVC then clear X.25 call */
-static void cycx_x25_chan_disconnect(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc) {
- x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTING);
- } else
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-}
-
-/* Called by kernel timer */
-static void cycx_x25_chan_timer(unsigned long d)
-{
- struct net_device *dev = (struct net_device *)d;
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->state == WAN_CONNECTED)
- cycx_x25_chan_disconnect(dev);
- else
- pr_err("%s: %s for svc (%s) not connected!\n",
- chan->card->devname, __func__, dev->name);
-}
-
-/* Set logical channel state. */
-static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- unsigned long flags;
- char *string_state = NULL;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (chan->state != state) {
- if (chan->svc && chan->state == WAN_CONNECTED)
- del_timer(&chan->timer);
-
- switch (state) {
- case WAN_CONNECTED:
- string_state = "connected!";
- *(__be16*)dev->dev_addr = htons(chan->lcn);
- netif_wake_queue(dev);
- reset_timer(dev);
-
- if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev,
- X25_IFACE_CONNECT);
-
- break;
- case WAN_CONNECTING:
- string_state = "connecting...";
- break;
- case WAN_DISCONNECTING:
- string_state = "disconnecting...";
- break;
- case WAN_DISCONNECTED:
- string_state = "disconnected!";
-
- if (chan->svc) {
- *(unsigned short*)dev->dev_addr = 0;
- chan->lcn = 0;
- }
-
- if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev,
- X25_IFACE_DISCONNECT);
-
- netif_wake_queue(dev);
- break;
- }
-
- pr_info("%s: interface %s %s\n",
- card->devname, dev->name, string_state);
- chan->state = state;
- }
-
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* Send packet on a logical channel.
- * When this function is called, tx_skb field of the channel data space
- * points to the transmit socket buffer. When transmission is complete,
- * release socket buffer and reset 'tbusy' flag.
- *
- * Return: 0 - transmission complete
- * 1 - busy
- *
- * Notes:
- * 1. If packet length is greater than MTU for this channel, we'll fragment
- * the packet into 'complete sequence' using M-bit.
- * 2. When transmission is complete, an event notification should be issued
- * to the router. */
-static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- int bitm = 0; /* final packet */
- unsigned len = skb->len;
-
- if (skb->len > card->wandev.mtu) {
- len = card->wandev.mtu;
- bitm = 0x10; /* set M-bit (more data) */
- }
-
- if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
- return 1;
-
- if (bitm) {
- skb_pull(skb, len);
- return 1;
- }
-
- ++chan->ifstats.tx_packets;
- chan->ifstats.tx_bytes += len;
-
- return 0;
-}
-
-/* Send event (connection, disconnection, etc) to X.25 socket layer */
-
-static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
-{
- struct sk_buff *skb;
- unsigned char *ptr;
-
- if ((skb = dev_alloc_skb(1)) == NULL) {
- pr_err("%s: out of memory\n", __func__);
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = event;
-
- skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
-}
-
-/* Convert line speed in bps to a number used by cyclom 2x code. */
-static u8 bps_to_speed_code(u32 bps)
-{
- u8 number = 0; /* defaults to the lowest (1200) speed ;> */
-
- if (bps >= 512000) number = 8;
- else if (bps >= 256000) number = 7;
- else if (bps >= 64000) number = 6;
- else if (bps >= 38400) number = 5;
- else if (bps >= 19200) number = 4;
- else if (bps >= 9600) number = 3;
- else if (bps >= 4800) number = 2;
- else if (bps >= 2400) number = 1;
-
- return number;
-}
-
-/* log base 2 */
-static u8 cycx_log2(u32 n)
-{
- u8 log = 0;
-
- if (!n)
- return 0;
-
- while (n > 1) {
- n >>= 1;
- ++log;
- }
-
- return log;
-}
-
-/* Convert decimal string to unsigned integer.
- * If len != 0 then only 'len' characters of the string are converted. */
-static unsigned dec_to_uint(u8 *str, int len)
-{
- unsigned val = 0;
-
- if (!len)
- len = strlen(str);
-
- for (; len && isdigit(*str); ++str, --len)
- val = (val * 10) + (*str - (unsigned) '0');
-
- return val;
-}
-
-static void reset_timer(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc)
- mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
-}
-#ifdef CYCLOMX_X25_DEBUG
-static void cycx_x25_dump_config(struct cycx_x25_config *conf)
-{
- pr_info("X.25 configuration\n");
- pr_info("-----------------\n");
- pr_info("link number=%d\n", conf->link);
- pr_info("line speed=%d\n", conf->speed);
- pr_info("clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
- pr_info("# level 2 retransm.=%d\n", conf->n2);
- pr_info("level 2 window=%d\n", conf->n2win);
- pr_info("level 3 window=%d\n", conf->n3win);
- pr_info("# logical channels=%d\n", conf->nvc);
- pr_info("level 3 pkt len=%d\n", conf->pktlen);
- pr_info("my address=%d\n", conf->locaddr);
- pr_info("remote address=%d\n", conf->remaddr);
- pr_info("t1=%d seconds\n", conf->t1);
- pr_info("t2=%d seconds\n", conf->t2);
- pr_info("t21=%d seconds\n", conf->t21);
- pr_info("# PVCs=%d\n", conf->npvc);
- pr_info("t23=%d seconds\n", conf->t23);
- pr_info("flags=0x%x\n", conf->flags);
-}
-
-static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
-{
- pr_info("X.25 statistics\n");
- pr_info("--------------\n");
- pr_info("rx_crc_errors=%d\n", stats->rx_crc_errors);
- pr_info("rx_over_errors=%d\n", stats->rx_over_errors);
- pr_info("n2_tx_frames=%d\n", stats->n2_tx_frames);
- pr_info("n2_rx_frames=%d\n", stats->n2_rx_frames);
- pr_info("tx_timeouts=%d\n", stats->tx_timeouts);
- pr_info("rx_timeouts=%d\n", stats->rx_timeouts);
- pr_info("n3_tx_packets=%d\n", stats->n3_tx_packets);
- pr_info("n3_rx_packets=%d\n", stats->n3_rx_packets);
- pr_info("tx_aborts=%d\n", stats->tx_aborts);
- pr_info("rx_aborts=%d\n", stats->rx_aborts);
-}
-
-static void cycx_x25_dump_devs(struct wan_device *wandev)
-{
- struct net_device *dev = wandev->dev;
-
- pr_info("X.25 dev states\n");
- pr_info("name: addr: txoff: protocol:\n");
- pr_info("---------------------------------------\n");
-
- while(dev) {
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- pr_info("%-5.5s %-15.15s %d ETH_P_%s\n",
- chan->name, chan->addr, netif_queue_stopped(dev),
- chan->protocol == ETH_P_IP ? "IP" : "X25");
- dev = chan->slave;
- }
-}
-
-#endif /* CYCLOMX_X25_DEBUG */
-/* End */
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 56941d6547eb..3f0c4f268751 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2448,11 +2448,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate driver private data */
- card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
- if (card == NULL) {
- pr_err("FarSync card found but insufficient memory for driver storage\n");
+ card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
+ if (card == NULL)
return -ENOMEM;
- }
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 10cc7df95498..a0a932c63d0a 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -280,14 +280,13 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
if (!try_module_get(proto->module))
return -ENOSYS;
- if (size)
- if ((dev_to_hdlc(dev)->state = kmalloc(size,
- GFP_KERNEL)) == NULL) {
- netdev_warn(dev,
- "Memory squeeze on hdlc_proto_attach()\n");
+ if (size) {
+ dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL);
+ if (dev_to_hdlc(dev)->state == NULL) {
module_put(proto->module);
return -ENOBUFS;
}
+ }
dev_to_hdlc(dev)->proto = proto;
return 0;
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 44db8b75a531..5895f1978691 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -128,7 +128,6 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
rbuff = kmalloc(len + 4, GFP_ATOMIC);
if (xbuff == NULL || rbuff == NULL) {
- netdev_warn(dev, "unable to grow X.25 buffers, MTU change cancelled\n");
kfree(xbuff);
kfree(rbuff);
return -ENOMEM;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index def12b38cbf7..c9c711dcd0e6 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1055,7 +1055,6 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
result = 0;
}
net_dev->addr_len = ETH_ALEN;
- memcpy(net_dev->perm_addr, ack_buf.ack_pl, ETH_ALEN);
memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
error_read_mac:
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6650fde99e1d..9f1e947f3557 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,9 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
USB_DEVICE_ID_I6050_2 = 0x0188,
+ USB_DEVICE_ID_I6150 = 0x07d6,
+ USB_DEVICE_ID_I6150_2 = 0x07d7,
+ USB_DEVICE_ID_I6150_3 = 0x07d9,
USB_DEVICE_ID_I6250 = 0x0187,
};
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 1d76ae855f07..48896138418f 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m);
- struct sk_buff *skb = i2400m->wake_tx_skb;
+ struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&i2400m->tx_lock, flags);
@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
void i2400m_net_wake_stop(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *wake_tx_skb;
+ unsigned long flags;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- /* See i2400m_hard_start_xmit(), references are taken there
- * and here we release them if the work was still
- * pending. Note we can't differentiate work not pending vs
- * never scheduled, so the NULL check does that. */
- if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
- && i2400m->wake_tx_skb != NULL) {
- unsigned long flags;
- struct sk_buff *wake_tx_skb;
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
- i2400m->wake_tx_skb = NULL; /* compat help */
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ /*
+ * See i2400m_hard_start_xmit(), references are taken there and
+ * here we release them if the packet was still pending.
+ */
+ cancel_work_sync(&i2400m->wake_tx_ws);
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ wake_tx_skb = i2400m->wake_tx_skb;
+ i2400m->wake_tx_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (wake_tx_skb) {
i2400m_put(i2400m);
kfree_skb(wake_tx_skb);
}
+
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
* and if pending, release those resources. */
result = 0;
spin_lock_irqsave(&i2400m->tx_lock, flags);
- if (!work_pending(&i2400m->wake_tx_ws)) {
+ if (!i2400m->wake_tx_skb) {
netif_stop_queue(net_dev);
i2400m_get(i2400m);
i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
@@ -596,12 +599,12 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
- strncpy(info->fw_version,
- i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
if (net_dev->dev.parent)
- strncpy(info->bus_info, dev_name(net_dev->dev.parent),
- sizeof(info->bus_info) - 1);
+ strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
+ sizeof(info->bus_info));
}
static const struct ethtool_ops i2400m_ethtool_ops = {
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 37becfcc98f2..0b602951ff6b 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1346,29 +1346,22 @@ EXPORT_SYMBOL(i2400m_unknown_barker);
int i2400m_rx_setup(struct i2400m *i2400m)
{
int result = 0;
- struct device *dev = i2400m_dev(i2400m);
i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
if (i2400m->rx_reorder) {
unsigned itr;
- size_t size;
struct i2400m_roq_log *rd;
result = -ENOMEM;
- size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1);
- i2400m->rx_roq = kzalloc(size, GFP_KERNEL);
- if (i2400m->rx_roq == NULL) {
- dev_err(dev, "RX: cannot allocate %zu bytes for "
- "reorder queues\n", size);
+ i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
+ sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
+ if (i2400m->rx_roq == NULL)
goto error_roq_alloc;
- }
- size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1);
- rd = kzalloc(size, GFP_KERNEL);
+ rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
+ GFP_KERNEL);
if (rd == NULL) {
- dev_err(dev, "RX: cannot allocate %zu bytes for "
- "reorder queues log areas\n", size);
result = -ENOMEM;
goto error_roq_log_alloc;
}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index d44b545f4082..fc1355d98bc6 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -199,7 +199,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
if (buf == NULL) {
- dev_err(dev, "notification: buffer allocation failed\n");
ret = -ENOMEM;
goto error_buf_alloc;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 713d033891e6..cd15a93d9084 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -346,9 +346,9 @@ static void i2400mu_get_drvinfo(struct net_device *net_dev,
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct usb_device *udev = i2400mu->usb_dev;
- strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
- strncpy(info->fw_version,
- i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
}
@@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
switch (id->idProduct) {
case USB_DEVICE_ID_I6050:
case USB_DEVICE_ID_I6050_2:
+ case USB_DEVICE_ID_I6150:
+ case USB_DEVICE_ID_I6150_2:
+ case USB_DEVICE_ID_I6150_3:
case USB_DEVICE_ID_I6250:
i2400mu->i6050 = 1;
break;
@@ -759,6 +762,9 @@ static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 6deaae18db57..f8f0156dff4e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -138,7 +138,7 @@ config AIRO_CS
config PCMCIA_WL3501
tristate "Planet WL3501 PCMCIA cards"
- depends on EXPERIMENTAL && PCMCIA
+ depends on PCMCIA
select WIRELESS_EXT
select WEXT_SPY
help
@@ -148,7 +148,7 @@ config PCMCIA_WL3501
config PRISM54
tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
@@ -156,11 +156,7 @@ config PRISM54
---help---
This enables support for FullMAC PCI/Cardbus prism54 devices. This
driver is now deprecated in favor for the SoftMAC driver, p54pci.
- p54pci supports FullMAC PCI/Cardbus devices as well. For details on
- the scheduled removal of this driver on the kernel see the feature
- removal schedule:
-
- Documentation/feature-removal-schedule.txt
+ p54pci supports FullMAC PCI/Cardbus devices as well.
For more information refer to the p54 wiki:
@@ -191,7 +187,7 @@ config USB_ZD1201
config USB_NET_RNDIS_WLAN
tristate "Wireless RNDIS USB support"
- depends on USB && EXPERIMENTAL
+ depends on USB
depends on CFG80211
select USB_USBNET
select USB_NET_CDCETHER
@@ -221,7 +217,7 @@ source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
select CRC32
select EEPROM_93CX6
---help---
@@ -261,7 +257,7 @@ config MAC80211_HWSIM
config MWL8K
tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
---help---
This driver supports Marvell TOPDOG 802.11 wireless cards.
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 062dfdff6364..67156efe14c4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
-obj-$(CONFIG_ATH_COMMON) += ath/
+obj-$(CONFIG_ATH_CARDS) += ath/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 630577dd3a7a..956024a636e6 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -69,10 +69,9 @@ static int airo_probe(struct pcmcia_device *p_dev)
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- printk(KERN_ERR "airo_cs: no memory for new device\n");
+ if (!local)
return -ENOMEM;
- }
+
p_dev->priv = local;
return airo_config(p_dev);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 77fa4286e5e9..5ac5f7ae2721 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2164,10 +2164,8 @@ static int at76_alloc_urbs(struct at76_priv *priv,
buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!priv->bulk_out_buffer) {
- dev_err(&interface->dev, "cannot allocate output buffer\n");
+ if (!priv->bulk_out_buffer)
return -ENOMEM;
- }
at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1a67a4f829fe..2c02b4e84094 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -30,5 +30,6 @@ source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 1e18621326dc..97b964ded2be 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 30ca0a60a64c..1d264c0f5a9b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -240,13 +240,14 @@ static const struct ath_ops ath5k_common_ops = {
* Driver Initialization *
\***********************/
-static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+static void ath5k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath5k_hw *ah = hw->priv;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
- return ath_reg_notifier_apply(wiphy, request, regulatory);
+ ath_reg_notifier_apply(wiphy, request, regulatory);
}
/********************\
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index ab363f34b4df..a78afa98c650 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1613,6 +1613,10 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
+ if (WARN_ON(ee_mode < 0)) {
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+ return;
+ }
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 4084b1076286..e2d8b2cf19eb 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -985,6 +985,8 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
return;
ee_mode = ath5k_eeprom_mode_from_channel(channel);
+ if (WARN_ON(ee_mode < 0))
+ return;
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 26c4b7220859..630c83db056e 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -18,7 +18,6 @@ config ATH6KL_USB
depends on ATH6KL
depends on USB
depends on CFG80211
- depends on EXPERIMENTAL
---help---
This module adds support for wireless adapters based on
Atheros AR6004 chipset running over USB. This is still under
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5516a8ccc3c6..752ffc4f4166 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -427,6 +427,30 @@ static bool ath6kl_is_tx_pending(struct ath6kl *ar)
return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
}
+static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
+ bool enable)
+{
+ int err;
+
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
+
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
+
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
+}
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
@@ -616,13 +640,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, nw_subtype);
- /* disable background scan if period is 0 */
- if (sme->bg_scan_period == 0)
+ if (sme->bg_scan_period == 0) {
+ /* disable background scan if period is 0 */
sme->bg_scan_period = 0xffff;
-
- /* configure default value if not specified */
- if (sme->bg_scan_period == -1)
+ } else if (sme->bg_scan_period == -1) {
+ /* configure default value if not specified */
sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+ }
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
@@ -767,7 +791,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -778,7 +802,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
@@ -1454,10 +1478,10 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return -EIO;
if (pmgmt) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
mode.pwr_mode = REC_POWER;
} else {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
mode.pwr_mode = MAX_PERF_POWER;
}
@@ -1509,7 +1533,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
ath6kl_cfg80211_vif_cleanup(vif);
@@ -1559,17 +1583,13 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- vif->next_mode = AP_NETWORK;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- vif->next_mode = INFRA_NETWORK;
- break;
case NL80211_IFTYPE_P2P_GO:
vif->next_mode = AP_NETWORK;
break;
@@ -1778,14 +1798,14 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->filled |= STATION_INFO_RX_BYTES64;
sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->filled |= STATION_INFO_TX_BYTES64;
sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
@@ -2673,30 +2693,6 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
-{
- int err;
-
- if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
- return;
-
- if (vif->nw_type != INFRA_NETWORK)
- return;
-
- if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
- vif->ar->fw_capabilities))
- return;
-
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
- enable ? "enable" : "disable");
-
- err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
- vif->fw_vif_idx, enable);
- if (err)
- ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
- enable ? "enable" : "disable", err);
-}
-
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
u8 *rsn_capab)
{
@@ -2776,9 +2772,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
ar->ap_mode_bkey.valid = false;
- /* TODO:
- * info->interval
- */
+ ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
+ info->beacon_interval);
+
+ if (ret)
+ ath6kl_warn("Failed to set beacon interval: %d\n", ret);
ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
info->dtim_period);
@@ -3492,8 +3490,8 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ath6kl_cfg80211_stop(vif);
}
-static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ath6kl *ar = wiphy_priv(wiphy);
u32 rates[IEEE80211_NUM_BANDS];
@@ -3506,17 +3504,13 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
request->processed ? " processed" : "",
request->initiator, request->user_reg_hint_type);
- /*
- * As firmware is not able intersect regdoms, we can only listen to
- * cellular hints.
- */
if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
- return -EOPNOTSUPP;
+ return;
ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
if (ret) {
ath6kl_err("failed to set regdomain: %d\n", ret);
- return ret;
+ return;
}
/*
@@ -3536,10 +3530,8 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
if (ret) {
ath6kl_err("failed to start scan for a regdomain change: %d\n",
ret);
- return ret;
+ return;
}
-
- return 0;
}
static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
@@ -3563,6 +3555,37 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
return 0;
}
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
+{
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(vif->ndev);
+
+ clear_bit(WLAN_ENABLED, &vif->flags);
+
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
+
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+}
+
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index e5e70f3a8ca8..b59becd91aea 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -61,7 +61,5 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
-/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 189d8faf8c87..61b2f98b4e77 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -940,7 +940,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index ba6bd497b787..281390178e3d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -509,9 +509,7 @@ static void destroy_htc_txctrl_packet(struct htc_packet *packet)
{
struct sk_buff *skb;
skb = packet->skb;
- if (skb != NULL)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
kfree(packet);
}
@@ -969,6 +967,22 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
u16 payload_len;
int status = 0;
+ /*
+ * ar->htc_target can be NULL due to a race condition that can occur
+ * during driver initialization(we do 'ath6kl_hif_power_on' before
+ * initializing 'ar->htc_target' via 'ath6kl_htc_create').
+ * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
+ * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
+ * Thus the possibility of ar->htc_target being NULL
+ * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ */
+ if (WARN_ON_ONCE(!target)) {
+ ath6kl_err("Target not yet initialized\n");
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+
netdata = skb->data;
netlen = skb->len;
@@ -1054,6 +1068,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
dev_kfree_skb(skb);
skb = NULL;
+
goto free_skb;
}
@@ -1089,8 +1104,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
skb = NULL;
free_skb:
- if (skb != NULL)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return status;
@@ -1184,7 +1198,7 @@ static void reset_endpoint_states(struct htc_target *target)
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
- ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
+ ep->pipe.tx_credit_flow_enabled = true;
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f21fa322e5ca..5d434cf88f35 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1715,38 +1715,6 @@ void ath6kl_init_hw_restart(struct ath6kl *ar)
}
}
-/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
-{
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(vif->ndev);
-
- clear_bit(WLAN_ENABLED, &vif->flags);
-
- if (wmi_ready) {
- discon_issued = test_bit(CONNECTED, &vif->flags) ||
- test_bit(CONNECT_PEND, &vif->flags);
- ath6kl_disconnect(vif);
- del_timer(&vif->disconnect_timer);
-
- if (discon_issued)
- ath6kl_disconnect_event(vif, DISCONNECT_CMD,
- (vif->nw_type & AP_NETWORK) ?
- bcast_mac : vif->bssid,
- 0, NULL, 0);
- }
-
- if (vif->scan_req) {
- cfg80211_scan_done(vif->scan_req, true);
- vif->scan_req = NULL;
- }
-
- /* need to clean up enhanced bmiss detection fw state */
- ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
-}
-
void ath6kl_stop_txrx(struct ath6kl *ar)
{
struct ath6kl_vif *vif, *tmp_vif;
@@ -1766,7 +1734,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
ath6kl_cfg80211_vif_cleanup(vif);
rtnl_unlock();
@@ -1801,8 +1769,6 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
"attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
- clear_bit(WLAN_ENABLED, &ar->flag);
-
up(&ar->sem);
}
EXPORT_SYMBOL(ath6kl_stop_txrx);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 62bcc0d5bc23..5fcd342762de 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -159,10 +159,8 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
{
- if (urb_context->skb != NULL) {
- dev_kfree_skb(urb_context->skb);
- urb_context->skb = NULL;
- }
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 998f8b0f62fd..d76b5bd81a0d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -751,6 +751,23 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_intvl)
+{
+ struct sk_buff *skb;
+ struct set_beacon_int_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_beacon_int_cmd *) skb->data;
+
+ cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
+}
+
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
{
struct sk_buff *skb;
@@ -1108,7 +1125,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
kfree(mgmt);
if (bss == NULL)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
/*
* Firmware doesn't return any event when scheduled scan has
@@ -2480,16 +2497,11 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
free_cmd_skb:
/* free up any resources left over (possibly due to an error) */
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
free_data_skb:
- for (index = 0; index < num_pri_streams; index++) {
- if (data_sync_bufs[index].skb != NULL) {
- dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
- skb);
- }
- }
+ for (index = 0; index < num_pri_streams; index++)
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 98b1755e67f4..b5f226503baf 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -1660,6 +1660,10 @@ struct roam_ctrl_cmd {
u8 roam_ctrl;
} __packed;
+struct set_beacon_int_cmd {
+ __le32 beacon_intvl;
+} __packed;
+
struct set_dtim_cmd {
__le32 dtim_period;
} __packed;
@@ -2649,6 +2653,8 @@ int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_interval);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5fc15bf8be09..17507dc8a1e7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,7 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+ select ATH_COMMON
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -17,7 +18,6 @@ config ATH9K_BTCOEX_SUPPORT
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211
- select ATH_COMMON
select ATH9K_HW
select MAC80211_LEDS
select LEDS_CLASS
@@ -56,7 +56,9 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K && DEBUG_FS
+ depends on ATH9K
+ select MAC80211_DEBUGFS
+ select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 3a69804f4c16..d1ff3c246a12 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,29 +86,25 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data specified\n");
- ret = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource found\n");
- ret = -ENXIO;
- goto err_out;
+ return -ENXIO;
}
- mem = ioremap_nocache(res->start, resource_size(res));
+ mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IRQ resource found\n");
- ret = -ENXIO;
- goto err_iounmap;
+ return -ENXIO;
}
irq = res->start;
@@ -116,8 +112,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
- ret = -ENOMEM;
- goto err_iounmap;
+ return -ENOMEM;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -156,9 +151,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
- err_iounmap:
- iounmap(mem);
- err_out:
return ret;
}
@@ -168,12 +160,10 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_softc *sc = hw->priv;
- void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
- iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index e09ec40ce71a..7ecd40f07a74 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -152,7 +152,8 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
immunityLevel, BEACON_RSSI(ah),
- aniState->rssiThrLow, aniState->rssiThrHigh);
+ ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
if (!scan)
aniState->ofdmNoiseImmunityLevel = immunityLevel;
@@ -173,7 +174,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
weak_sig = entry_ofdm->ofdm_weak_signal_on;
if (ah->opmode == NL80211_IFTYPE_STATION &&
- BEACON_RSSI(ah) <= aniState->rssiThrHigh)
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
if (aniState->ofdmWeakSigDetect != weak_sig)
@@ -216,11 +217,11 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
- BEACON_RSSI(ah), aniState->rssiThrLow,
- aniState->rssiThrHigh);
+ BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
if (ah->opmode == NL80211_IFTYPE_STATION &&
- BEACON_RSSI(ah) <= aniState->rssiThrLow &&
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
@@ -418,9 +419,6 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
return;
aniState = &ah->curchan->ani;
- if (WARN_ON(!aniState))
- return;
-
if (!ath9k_hw_ani_read_counters(ah))
return;
@@ -489,23 +487,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
-void ath9k_hw_ani_setup(struct ath_hw *ah)
-{
- int i;
-
- static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
- static const int coarseHigh[] = { -14, -14, -14, -14, -12 };
- static const int coarseLow[] = { -64, -64, -64, -64, -70 };
- static const int firpwr[] = { -78, -78, -78, -78, -80 };
-
- for (i = 0; i < 5; i++) {
- ah->totalSizeDesired[i] = totalSizeDesired[i];
- ah->coarse_high[i] = coarseHigh[i];
- ah->coarse_low[i] = coarseLow[i];
- ah->firpwr[i] = firpwr[i];
- }
-}
-
void ath9k_hw_ani_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -531,8 +512,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ani->ofdmsTurn = true;
- ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
- ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 1485bf5e3518..dddb1361039a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -104,7 +104,6 @@ struct ath9k_ani_default {
};
struct ar5416AniState {
- struct ath9k_channel *c;
u8 noiseImmunityLevel;
u8 ofdmNoiseImmunityLevel;
u8 cckNoiseImmunityLevel;
@@ -113,15 +112,9 @@ struct ar5416AniState {
u8 spurImmunityLevel;
u8 firstepLevel;
u8 ofdmWeakSigDetect;
- u8 cckWeakSigThreshold;
u32 listenTime;
- int32_t rssiThrLow;
- int32_t rssiThrHigh;
u32 ofdmPhyErrCount;
u32 cckPhyErrCount;
- int16_t pktRssi[2];
- int16_t ofdmErrRssi[2];
- int16_t cckErrRssi[2];
struct ath9k_ani_default iniDef;
};
@@ -147,7 +140,6 @@ struct ar5416Stats {
void ath9k_enable_mib_counters(struct ath_hw *ah);
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
-void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index f81e7fc60a36..467ccfae2cee 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -466,7 +466,7 @@ static const u32 ar5416Bank0[][2] = {
};
static const u32 ar5416BB_RfGain[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x00009a00, 0x00000000, 0x00000000},
{0x00009a04, 0x00000040, 0x00000040},
{0x00009a08, 0x00000080, 0x00000080},
@@ -546,12 +546,12 @@ static const u32 ar5416Bank2[][2] = {
};
static const u32 ar5416Bank3[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x000098f0, 0x01400018, 0x01c00018},
};
static const u32 ar5416Bank6[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
@@ -588,7 +588,7 @@ static const u32 ar5416Bank6[][3] = {
};
static const u32 ar5416Bank6TPC[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 874186bfda41..fd69376ecc83 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -470,16 +470,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
{
#define ATH_ALLOC_BANK(bank, size) do { \
- bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
- if (!bank) { \
- ath_err(common, "Cannot allocate RF banks\n"); \
- return -ENOMEM; \
- } \
+ bank = devm_kzalloc(ah->dev, sizeof(u32) * size, GFP_KERNEL); \
+ if (!bank) \
+ goto error; \
} while (0);
struct ath_common *common = ath9k_hw_common(ah);
- BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ return 0;
ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
@@ -492,35 +491,12 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
return 0;
#undef ATH_ALLOC_BANK
+error:
+ ath_err(common, "Cannot allocate RF banks\n");
+ return -ENOMEM;
}
-/**
- * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
- * @ah: atheros hardware struture
- * For the external AR2133/AR5133 radios banks.
- */
-static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
- kfree(bank); \
- bank = NULL; \
- } while (0);
-
- BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
-
- ATH_FREE_BANK(ah->analogBank0Data);
- ATH_FREE_BANK(ah->analogBank1Data);
- ATH_FREE_BANK(ah->analogBank2Data);
- ATH_FREE_BANK(ah->analogBank3Data);
- ATH_FREE_BANK(ah->analogBank6Data);
- ATH_FREE_BANK(ah->analogBank6TPCData);
- ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->bank6Temp);
-
-#undef ATH_FREE_BANK
-}
-
/* *
* ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
* @ah: atheros hardware structure
@@ -1380,7 +1356,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
conf->radar_inband = 8;
}
-void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
static const u32 ar5416_cca_regs[6] = {
@@ -1391,12 +1367,15 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
+ int ret;
+
+ ret = ar5008_hw_rf_alloc_ext_banks(ah);
+ if (ret)
+ return ret;
priv_ops->rf_set_freq = ar5008_hw_set_channel;
priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
- priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
- priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
priv_ops->init_bb = ar5008_hw_init_bb;
@@ -1421,4 +1400,5 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
ar5008_hw_set_nf_limits(ah);
ar5008_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index ea4a230997ac..59524e1d4678 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -460,7 +460,7 @@ static const u32 ar5416Common_9100[][2] = {
};
static const u32 ar5416Bank6_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
@@ -497,7 +497,7 @@ static const u32 ar5416Bank6_9100[][3] = {
};
static const u32 ar5416Bank6TPC_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 648da3e885e9..f053d978540e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -23,13 +23,13 @@
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
-static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271);
INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271);
INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg);
- return;
+ return 0;
}
if (ah->config.pcie_clock_req)
@@ -102,9 +102,9 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
u32 *data;
- data = kmalloc(size, GFP_KERNEL);
+ data = devm_kzalloc(ah->dev, size, GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
memcpy(data, addac->ia_array, size);
addac->ia_array = data;
@@ -120,6 +120,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9287Common_japan_2484_cck_fir_coeff_9287_1_1);
}
+ return 0;
}
static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
@@ -409,22 +410,30 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
}
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
-void ar9002_hw_attach_ops(struct ath_hw *ah)
+int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ int ret;
+
+ ret = ar9002_hw_init_mode_regs(ah);
+ if (ret)
+ return ret;
- priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
- ar5008_hw_attach_phy_ops(ah);
+ ret = ar5008_hw_attach_phy_ops(ah);
+ if (ret)
+ return ret;
+
if (AR_SREV_9280_20_OR_LATER(ah))
ar9002_hw_attach_phy_ops(ah);
ar9002_hw_attach_calib_ops(ah);
ar9002_hw_attach_mac_ops(ah);
+ return 0;
}
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 846dd7974eb8..f4003512d8d5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -555,14 +555,73 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
}
+static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ /* on AR92xx, the highest bit of count will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless)
+ count = 0x80;
+ else if (count & 0x80)
+ count = 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9002_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
priv_ops->set_rf_regs = NULL;
- priv_ops->rf_alloc_ext_banks = NULL;
- priv_ops->rf_free_ext_banks = NULL;
priv_ops->rf_set_freq = ar9002_hw_set_channel;
priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
priv_ops->olc_init = ar9002_olc_init;
@@ -571,6 +630,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set;
+ ops->spectral_scan_config = ar9002_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 262e1e036fd7..db5ffada2217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -744,6 +744,186 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
+static const u32 ar9300Modes_mixed_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_type5_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 8b0d8dcd7625..4cc13940c895 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -32,7 +32,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -49,7 +48,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
*/
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
+ currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
ath_dbg(common, CALIBRATE,
@@ -58,14 +57,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
- AR_PHY_65NM_CH0_THERM_LOCAL, 1);
- REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
- AR_PHY_65NM_CH0_THERM_START, 1);
-
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
+ default:
+ ath_err(common, "Invalid calibration type\n");
break;
}
}
@@ -323,6 +316,14 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->enabled_cals |= TX_IQ_CAL;
+ if (AR_SREV_9485_OR_LATER(ah) && !AR_SREV_9340(ah))
+ ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
+ }
+
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
/*
@@ -959,22 +960,70 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
}
+static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int i;
+
+ if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ return;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
+ }
+}
+
+static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+{
+ u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txclcal_done = false;
+ int i, j;
+
+ if (!caldata || !(ah->enabled_cals & TX_CL_CAL))
+ return;
+
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_CLC_SUCCESS);
+
+ if (caldata->done_txclcal_once) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
+ caldata->tx_clcal[i][j]);
+ }
+ } else if (is_reusable && txclcal_done) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ caldata->tx_clcal[i][j] =
+ REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
+ }
+ caldata->done_txclcal_once = true;
+ }
+}
+
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
- bool txiqcal_done = false, txclcal_done = false;
+ bool txiqcal_done = false;
bool is_reusable = true, status = true;
- bool run_rtt_cal = false, run_agc_cal;
+ bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
- int i, j;
- u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
- AR_PHY_CL_TAB_1,
- AR_PHY_CL_TAB_2 };
+
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
if (rtt) {
if (!ar9003_hw_rtt_restore(ah, chan))
@@ -1012,7 +1061,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
}
}
- if (!(ah->enabled_cals & TX_IQ_CAL))
+ if ((IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) ||
+ !(ah->enabled_cals & TX_IQ_CAL))
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
@@ -1032,21 +1082,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- goto skip_tx_iqcal;
- } else if (caldata && !caldata->done_txiqcal_once)
+ } else if (caldata && !caldata->done_txiqcal_once) {
run_agc_cal = true;
+ sep_iq_cal = true;
+ }
+skip_tx_iqcal:
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
- if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
+ if (sep_iq_cal) {
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
-skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1057,14 +1108,8 @@ skip_tx_iqcal:
status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- ar9003_hw_manual_peak_cal(ah, i,
- IS_CHAN_2GHZ(chan));
- }
- }
+
+ ar9003_hw_do_manual_peak_cal(ah, chan);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1089,31 +1134,7 @@ skip_tx_iqcal:
else if (caldata && caldata->done_txiqcal_once)
ar9003_hw_tx_iq_cal_reload(ah);
-#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
- if (caldata && (ah->enabled_cals & TX_CL_CAL)) {
- txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->txchainmask & (1 << i)))
- continue;
- for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
- REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
- caldata->tx_clcal[i][j]);
- }
- } else if (is_reusable && txclcal_done) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->txchainmask & (1 << i)))
- continue;
- for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
- caldata->tx_clcal[i][j] =
- REG_READ(ah,
- CL_TAB_ENTRY(cl_idx[i]));
- }
- caldata->done_txclcal_once = true;
- }
- }
-#undef CL_TAB_ENTRY
+ ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
@@ -1131,20 +1152,10 @@ skip_tx_iqcal:
/* Initialize list pointers */
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
- ah->supp_cals = IQ_MISMATCH_CAL;
-
- if (ah->supp_cals & IQ_MISMATCH_CAL) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
- }
- if (ah->supp_cals & TEMP_COMP_CAL) {
- INIT_CAL(&ah->tempCompCalData);
- INSERT_CAL(ah, &ah->tempCompCalData);
- ath_dbg(common, CALIBRATE,
- "enabling Temperature Compensation Calibration\n");
- }
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 562186ca9b52..881e989ea470 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4586,14 +4586,14 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
return 0;
}
-static int ar9003_hw_power_control_override(struct ath_hw *ah,
- int frequency,
- int *correction,
- int *voltage, int *temperature)
+static void ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
{
- int tempSlope = 0;
+ int temp_slope = 0, temp_slope1 = 0, temp_slope2 = 0;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- int f[8], t[8], i;
+ int f[8], t[8], t1[3], t2[3], i;
REG_RMW(ah, AR_PHY_TPC_11_B0,
(correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4624,38 +4624,108 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
* enable temperature compensation
* Need to use register names
*/
- if (frequency < 4000)
- tempSlope = eep->modalHeader2G.tempSlope;
- else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
- for (i = 0; i < 8; i++) {
- t[i] = eep->base_ext1.tempslopextension[i];
- f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ if (frequency < 4000) {
+ temp_slope = eep->modalHeader2G.tempSlope;
+ } else {
+ if (AR_SREV_9550(ah)) {
+ t[0] = eep->base_ext1.tempslopextension[2];
+ t1[0] = eep->base_ext1.tempslopextension[3];
+ t2[0] = eep->base_ext1.tempslopextension[4];
+ f[0] = 5180;
+
+ t[1] = eep->modalHeader5G.tempSlope;
+ t1[1] = eep->base_ext1.tempslopextension[0];
+ t2[1] = eep->base_ext1.tempslopextension[1];
+ f[1] = 5500;
+
+ t[2] = eep->base_ext1.tempslopextension[5];
+ t1[2] = eep->base_ext1.tempslopextension[6];
+ t2[2] = eep->base_ext1.tempslopextension[7];
+ f[2] = 5785;
+
+ temp_slope = ar9003_hw_power_interpolate(frequency,
+ f, t, 3);
+ temp_slope1 = ar9003_hw_power_interpolate(frequency,
+ f, t1, 3);
+ temp_slope2 = ar9003_hw_power_interpolate(frequency,
+ f, t2, 3);
+
+ goto tempslope;
}
- tempSlope = ar9003_hw_power_interpolate((s32) frequency,
- f, t, 8);
- } else if (eep->base_ext2.tempSlopeLow != 0) {
- t[0] = eep->base_ext2.tempSlopeLow;
- f[0] = 5180;
- t[1] = eep->modalHeader5G.tempSlope;
- f[1] = 5500;
- t[2] = eep->base_ext2.tempSlopeHigh;
- f[2] = 5785;
- tempSlope = ar9003_hw_power_interpolate((s32) frequency,
- f, t, 3);
- } else
- tempSlope = eep->modalHeader5G.tempSlope;
- REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+ for (i = 0; i < 8; i++) {
+ t[i] = eep->base_ext1.tempslopextension[i];
+ f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ }
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 8);
+ } else if (eep->base_ext2.tempSlopeLow != 0) {
+ t[0] = eep->base_ext2.tempSlopeLow;
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.tempSlope;
+ f[1] = 5500;
+ t[2] = eep->base_ext2.tempSlopeHigh;
+ f[2] = 5785;
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 3);
+ } else {
+ temp_slope = eep->modalHeader5G.tempSlope;
+ }
+ }
+
+tempslope:
+ if (AR_SREV_9550(ah)) {
+ /*
+ * AR955x has tempSlope register for each chain.
+ * Check whether temp_compensation feature is enabled or not.
+ */
+ if (eep->baseEepHeader.featureEnable & 0x1) {
+ if (frequency < 4000) {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeLow);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeHigh);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope1);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope2);
+ }
+ } else {
+ /*
+ * If temp compensation is not enabled,
+ * set all registers to 0.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ }
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
+ }
if (AR_SREV_9462_20(ah))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope);
+ AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
temperature[0]);
-
- return 0;
}
/* Apply the recorded correction values. */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 74fd3977feeb..a3523c969a3a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -507,28 +507,59 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_mixed_ob_db_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
+{
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_ub124_tx_gain_table_1p0);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type5_tx_gain_table);
+ else if (AR_SREV_9300_22(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_type5_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_spur_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type6_tx_gain_table);
}
+typedef void (*ath_txgain_tab)(struct ath_hw *ah);
+
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
{
- switch (ar9003_hw_get_tx_gain_idx(ah)) {
- case 0:
- default:
- ar9003_tx_gain_table_mode0(ah);
- break;
- case 1:
- ar9003_tx_gain_table_mode1(ah);
- break;
- case 2:
- ar9003_tx_gain_table_mode2(ah);
- break;
- case 3:
- ar9003_tx_gain_table_mode3(ah);
- break;
- case 4:
- ar9003_tx_gain_table_mode4(ah);
- break;
- }
+ static const ath_txgain_tab modes[] = {
+ ar9003_tx_gain_table_mode0,
+ ar9003_tx_gain_table_mode1,
+ ar9003_tx_gain_table_mode2,
+ ar9003_tx_gain_table_mode3,
+ ar9003_tx_gain_table_mode4,
+ ar9003_tx_gain_table_mode5,
+ ar9003_tx_gain_table_mode6,
+ };
+ int idx = ar9003_hw_get_tx_gain_idx(ah);
+
+ if (idx >= ARRAY_SIZE(modes))
+ idx = 0;
+
+ modes[idx](ah);
}
static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
@@ -544,7 +575,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9340Common_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1);
+ ar9485_common_rx_gain_1_1);
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_rx_gain_table);
@@ -673,7 +704,7 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
- priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ ar9003_hw_init_mode_regs(ah);
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
ops->config_pci_powersave = ar9003_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ce19c09fa8e8..2bf6548dd143 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -68,7 +68,7 @@ static const int m2ThreshExt_off = 127;
static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
{
u16 bMode, fracMode = 0, aModeRefSel = 0;
- u32 freq, channelSel = 0, reg32 = 0;
+ u32 freq, chan_frac, div, channelSel = 0, reg32 = 0;
struct chan_centers centers;
int loadSynthChannel;
@@ -77,9 +77,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
if (freq < 4800) { /* 2 GHz, fractional mode */
if (AR_SREV_9330(ah)) {
- u32 chan_frac;
- u32 div;
-
if (ah->is_clk_25mhz)
div = 75;
else
@@ -89,34 +86,40 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
chan_frac = (((freq * 4) % div) * 0x20000) / div;
channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
- u32 chan_frac;
-
/*
- * freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0
+ * freq_ref = 40 / (refdiva >> amoderefsel);
+ * where refdiva=1 and amoderefsel=0
* ndiv = ((chan_mhz * 4) / 3) / freq_ref;
* chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
*/
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9340(ah)) {
if (ah->is_clk_25mhz) {
- u32 chan_frac;
-
channelSel = (freq * 2) / 75;
chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
- } else
+ } else {
channelSel = CHANSEL_2G(freq) >> 1;
- } else
+ }
+ } else if (AR_SREV_9550(ah)) {
+ if (ah->is_clk_25mhz)
+ div = 75;
+ else
+ div = 120;
+
+ channelSel = (freq * 4) / div;
+ chan_frac = (((freq * 4) % div) * 0x20000) / div;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else {
channelSel = CHANSEL_2G(freq);
+ }
/* Set to 2G mode */
bMode = 1;
} else {
if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
ah->is_clk_25mhz) {
- u32 chan_frac;
-
channelSel = freq / 75;
chan_frac = ((freq % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
@@ -586,32 +589,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
ath9k_hw_synth_delay(ah, chan, synthDelay);
}
-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
{
- switch (rx) {
- case 0x5:
+ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
- break;
- default:
- break;
- }
+
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
- else
- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ tx = 3;
- if (tx == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
}
/*
@@ -1450,6 +1440,67 @@ set_rfmode:
return 0;
}
+static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ /* on AR93xx and newer, count = 0 will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless)
+ count = 0;
+ else if (param->count == 0)
+ count = 1;
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1483,6 +1534,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
+ ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 107956298488..e71774196c01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -1028,7 +1028,7 @@
#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TPC_19_B2 (AR_SM2_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
#define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index f69d292bdc02..25db9215985a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1172,6 +1172,106 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
};
+static const u32 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x0000a394, 0x00000444, 0x00000444, 0x00000404, 0x00000404},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x02000001, 0x02000001},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000203, 0x11000203},
+ {0x0000a518, 0x21002220, 0x21002220, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x18000403, 0x18000403},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x34022225, 0x34022225, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x25000820, 0x25000820},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x29000822, 0x29000822},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38000849, 0x38000849},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x52001290, 0x52001290},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x02800001, 0x02800001},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800203, 0x11800203},
+ {0x0000a598, 0x21820220, 0x21820220, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404501, 0x01404501},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x01404501, 0x01404501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x03c0cf02, 0x03c0cf02},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03c0cf03, 0x03c0cf03},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x05419405, 0x05419405},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x00016044, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
+ {0x00016444, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+};
+
static const u32 ar9340_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index a3710f3bb90c..712f415b8c08 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -260,6 +260,79 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
+static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
@@ -450,6 +523,79 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x07000203, 0x07000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x14000406, 0x14000406},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1800040a, 0x1800040a},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000460, 0x1c000460},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x22000463, 0x22000463},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x26000465, 0x26000465},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2e0006e0, 0x2e0006e0},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
static const u32 ar9485_1_1[][2] = {
/* Addr allmodes */
{0x0000a580, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index df97f21c52dc..ccc5b6c99add 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -23,16 +23,16 @@
static const u32 ar955x_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
- {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a},
- {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800},
- {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a},
- {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24647c00, 0x24647c00},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x01885f52, 0x01885f52},
+ {0x00016104, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
- {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x00016504, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
- {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x00016904, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
};
@@ -69,15 +69,15 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
{0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
- {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
{0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
{0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
{0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
{0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
{0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
@@ -125,7 +125,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016094, 0x00000000},
{0x000160a0, 0x0a108ffe},
{0x000160a4, 0x812fc370},
- {0x000160a8, 0x423c8000},
+ {0x000160a8, 0x423c8100},
{0x000160b4, 0x92480080},
{0x000160c0, 0x006db6d0},
{0x000160c4, 0x6db6db60},
@@ -134,7 +134,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016100, 0x11999601},
{0x00016108, 0x00080010},
{0x00016144, 0x02084080},
- {0x00016148, 0x000080c0},
+ {0x00016148, 0x00008040},
{0x00016280, 0x01800804},
{0x00016284, 0x00038dc5},
{0x00016288, 0x00000000},
@@ -178,7 +178,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016500, 0x11999601},
{0x00016508, 0x00080010},
{0x00016544, 0x02084080},
- {0x00016548, 0x000080c0},
+ {0x00016548, 0x00008040},
{0x00016780, 0x00000000},
{0x00016784, 0x00000000},
{0x00016788, 0x00400705},
@@ -218,7 +218,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016900, 0x11999601},
{0x00016908, 0x00080010},
{0x00016944, 0x02084080},
- {0x00016948, 0x000080c0},
+ {0x00016948, 0x00008040},
{0x00016b80, 0x00000000},
{0x00016b84, 0x00000000},
{0x00016b88, 0x00400705},
@@ -245,9 +245,9 @@ static const u32 ar955x_1p0_radio_core[][2] = {
static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
/* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
{0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
{0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
@@ -256,63 +256,63 @@ static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
{0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
{0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
{0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
- {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e},
- {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064},
- {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242},
- {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229},
- {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2},
- {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203},
- {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803},
- {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881},
- {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809},
- {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814},
- {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c},
- {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e},
- {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812},
- {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884},
- {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84},
- {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69},
- {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4},
- {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3},
- {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5},
- {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced},
- {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4},
- {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4},
- {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4},
- {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4},
- {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
- {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a518, 0x1700002b, 0x1700002b, 0x1700002b, 0x1700002b, 0x1600002b, 0x1600002b, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1a00002d, 0x1a00002d, 0x1b000064, 0x1b000064},
+ {0x0000a520, 0x20000031, 0x20000031, 0x1f000031, 0x1f000031, 0x1e000031, 0x1e000031, 0x1f000242, 0x1f000242},
+ {0x0000a524, 0x24000051, 0x24000051, 0x23000051, 0x23000051, 0x23000051, 0x23000051, 0x23000229, 0x23000229},
+ {0x0000a528, 0x27000071, 0x27000071, 0x27000071, 0x27000071, 0x26000071, 0x26000071, 0x270002a2, 0x270002a2},
+ {0x0000a52c, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2c001203, 0x2c001203},
+ {0x0000a530, 0x3000028c, 0x3000028c, 0x2f00028c, 0x2f00028c, 0x2e00028c, 0x2e00028c, 0x30001803, 0x30001803},
+ {0x0000a534, 0x34000290, 0x34000290, 0x33000290, 0x33000290, 0x32000290, 0x32000290, 0x33000881, 0x33000881},
+ {0x0000a538, 0x37000292, 0x37000292, 0x36000292, 0x36000292, 0x35000292, 0x35000292, 0x38001809, 0x38001809},
+ {0x0000a53c, 0x3b02028d, 0x3b02028d, 0x3a02028d, 0x3a02028d, 0x3902028d, 0x3902028d, 0x3a000814, 0x3a000814},
+ {0x0000a540, 0x3f020291, 0x3f020291, 0x3e020291, 0x3e020291, 0x3d020291, 0x3d020291, 0x3f001a0c, 0x3f001a0c},
+ {0x0000a544, 0x44020490, 0x44020490, 0x43020490, 0x43020490, 0x42020490, 0x42020490, 0x43001a0e, 0x43001a0e},
+ {0x0000a548, 0x48020492, 0x48020492, 0x47020492, 0x47020492, 0x46020492, 0x46020492, 0x46001812, 0x46001812},
+ {0x0000a54c, 0x4c020692, 0x4c020692, 0x4b020692, 0x4b020692, 0x4a020692, 0x4a020692, 0x49001884, 0x49001884},
+ {0x0000a550, 0x50020892, 0x50020892, 0x4f020892, 0x4f020892, 0x4e020892, 0x4e020892, 0x4d001e84, 0x4d001e84},
+ {0x0000a554, 0x53040891, 0x53040891, 0x53040891, 0x53040891, 0x52040891, 0x52040891, 0x50001e69, 0x50001e69},
+ {0x0000a558, 0x58040893, 0x58040893, 0x57040893, 0x57040893, 0x56040893, 0x56040893, 0x550006f4, 0x550006f4},
+ {0x0000a55c, 0x5c0408b4, 0x5c0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x59000ad3, 0x59000ad3},
+ {0x0000a560, 0x610408b6, 0x610408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e000ad5, 0x5e000ad5},
+ {0x0000a564, 0x670408f6, 0x670408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x61001ced, 0x61001ced},
+ {0x0000a568, 0x6a040cf6, 0x6a040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x660018d4, 0x660018d4},
+ {0x0000a56c, 0x6d040d76, 0x6d040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x660018d4, 0x660018d4},
+ {0x0000a570, 0x70060db6, 0x70060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x660018d4, 0x660018d4},
+ {0x0000a574, 0x730a0df6, 0x730a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x660018d4, 0x660018d4},
+ {0x0000a578, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
+ {0x0000a57c, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000},
- {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02},
- {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04},
- {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000},
- {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000},
- {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000},
- {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000},
- {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05},
- {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06},
- {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07},
- {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07},
- {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07},
- {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07},
- {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a60c, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x03804000, 0x03804000},
+ {0x0000a610, 0x04008b01, 0x04008b01, 0x04008b01, 0x04008b01, 0x03c08b01, 0x03c08b01, 0x0300ca02, 0x0300ca02},
+ {0x0000a614, 0x05811403, 0x05811403, 0x05411303, 0x05411303, 0x05411303, 0x05411303, 0x00000e04, 0x00000e04},
+ {0x0000a618, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a61c, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a620, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a624, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a628, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03804c05, 0x03804c05},
+ {0x0000a62c, 0x06815604, 0x06815604, 0x06415504, 0x06415504, 0x06015504, 0x06015504, 0x0701de06, 0x0701de06},
+ {0x0000a630, 0x07819a05, 0x07819a05, 0x07419905, 0x07419905, 0x07019805, 0x07019805, 0x07819c07, 0x07819c07},
+ {0x0000a634, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a638, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a63c, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000b2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
- {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000c2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
{0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
{0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
{0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
{0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
};
static const u32 ar955x_1p0_mac_core[][2] = {
@@ -846,7 +846,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a644, 0x3fad9d74},
+ {0x0000a644, 0xbfad9d74},
{0x0000a648, 0x0048060a},
{0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
@@ -1277,7 +1277,7 @@ static const u32 ar955x_1p0_modes_fast_clock[][3] = {
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
{0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a230, 0x0000400b, 0x00004016},
{0x0000a254, 0x00000898, 0x00001130},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 6e1915aee712..28fd99203f64 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -685,6 +685,82 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
static const u32 ar9580_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x000040a4, 0x00a0c1c9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 86e26a19efda..a56b2416e2f9 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -109,14 +109,11 @@ struct ath_descdma {
void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
- struct ath_buf *dd_bufptr;
};
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
int nbuf, int ndesc, bool is_tx);
-void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head);
/***********/
/* RX / TX */
@@ -317,18 +314,17 @@ struct ath_rx {
u32 *rxlink;
u32 num_pkts;
unsigned int rxfilter;
- spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
- struct ath_buf *rx_bufptr;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
struct sk_buff *frag;
+
+ u32 ampdu_ref;
};
int ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
-void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
@@ -338,14 +334,12 @@ void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
-bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
-void ath_draintxq(struct ath_softc *sc,
- struct ath_txq *txq, bool retry_tx);
+bool ath_drain_all_txq(struct ath_softc *sc);
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
-void ath_tx_cleanup(struct ath_softc *sc);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
@@ -395,6 +389,7 @@ struct ath_beacon_config {
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
+ bool ibss_creator;
};
struct ath_beacon {
@@ -646,7 +641,6 @@ void ath_ant_comb_update(struct ath_softc *sc);
enum sc_op_flags {
SC_OP_INVALID,
SC_OP_BEACONS,
- SC_OP_RXFLUSH,
SC_OP_ANI_RUN,
SC_OP_PRIM_STA_VIF,
SC_OP_HW_RESET,
@@ -675,6 +669,23 @@ struct ath9k_vif_iter_data {
int nadhocs; /* number of adhoc vifs */
};
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+ */
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+};
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -743,6 +754,11 @@ struct ath_softc {
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
u32 wow_enabled;
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+ enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
+ int scanning;
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
@@ -751,6 +767,133 @@ struct ath_softc {
#endif
};
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_NUM_BINS 56
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_40_NUM_BINS 128
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+ s8 m = (bins[2] & 0xfc) >> 2;
+
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ else
+ m &= ~0xe0;
+
+ return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+};
+
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+
+ u8 max_exp;
+
+ __be16 freq;
+ s8 rssi;
+ s8 noise;
+
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+
+ __be64 tsf;
+
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
@@ -773,6 +916,10 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
bool ath9k_uses_beacons(int type);
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode);
+
#ifdef CONFIG_ATH9K_PCI
int ath_pci_init(void);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 531fffd801a3..5f05c26d1ec4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
}
skb = ieee80211_beacon_get(hw, vif);
@@ -198,7 +199,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
if (sc->nvifs > 1) {
ath_dbg(common, BEACON,
"Flushing previous cabq traffic\n");
- ath_draintxq(sc, cabq, false);
+ ath_draintxq(sc, cabq);
}
}
@@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
bf = ath9k_beacon_generate(sc->hw, vif);
- WARN_ON(!bf);
if (sc->beacon.bmisscnt != 0) {
ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
@@ -407,12 +407,17 @@ void ath9k_beacon_tasklet(unsigned long data)
}
}
-static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
+ u32 intval, bool reset_tsf)
{
struct ath_hw *ah = sc->sc_ah;
ath9k_hw_disable_interrupts(ah);
- ath9k_hw_reset_tsf(ah);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
ath9k_beaconq_config(sc);
ath9k_hw_beaconinit(ah, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
@@ -442,10 +447,12 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, true);
}
/*
@@ -586,17 +593,45 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
ath9k_reset_beacon_status(sc);
intval = TU_TO_USEC(conf->beacon_interval);
- nexttbtt = intval;
+
+ if (conf->ibss_creator) {
+ nexttbtt = intval;
+ } else {
+ u32 tbtt, offset, tsftu;
+ u64 tsf;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * sync'd TSF.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ offset = tsftu % conf->beacon_interval;
+ tbtt = tsftu - offset;
+ if (offset)
+ tbtt += conf->beacon_interval;
+
+ nexttbtt = TU_TO_USEC(tbtt);
+ }
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+
+ /*
+ * Set the global 'beacon has been configured' flag for the
+ * joiner case in IBSS mode.
+ */
+ if (!conf->ibss_creator && conf->enable_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
}
bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
@@ -639,6 +674,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
+ cur_conf->ibss_creator = bss_conf->ibss_creator;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
@@ -666,34 +702,59 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ unsigned long flags;
+ bool skip_beacon = false;
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
- } else {
- /*
- * Take care of multiple interfaces when
- * enabling/disabling SWBA.
- */
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (!bss_conf->enable_beacon &&
- (sc->nbcnvifs <= 1)) {
- cur_conf->enable_beacon = false;
- } else if (bss_conf->enable_beacon) {
- cur_conf->enable_beacon = true;
- ath9k_cache_beacon_config(sc, bss_conf);
- }
+ return;
+
+ }
+
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon &&
+ (sc->nbcnvifs <= 1)) {
+ cur_conf->enable_beacon = false;
+ } else if (bss_conf->enable_beacon) {
+ cur_conf->enable_beacon = true;
+ ath9k_cache_beacon_config(sc, bss_conf);
}
+ }
- if (cur_conf->beacon_interval) {
+ /*
+ * Configure the HW beacon registers only when we have a valid
+ * beacon interval.
+ */
+ if (cur_conf->beacon_interval) {
+ /*
+ * If we are joining an existing IBSS network, start beaconing
+ * only after a TSF-sync has taken place. Ensure that this
+ * happens by setting the appropriate flags.
+ */
+ if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
+ bss_conf->enable_beacon) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ skip_beacon = true;
+ } else {
ath9k_set_beacon(sc);
-
- if (cur_conf->enable_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
- else
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
+
+ /*
+ * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+ * here, it is done in ath9k_beacon_config_adhoc().
+ */
+ if (cur_conf->enable_beacon && !skip_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ else
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 13ff9edc2401..3714b971d18e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/relay.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -861,7 +862,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-LENGTH-ERR", rx_len_err);
RXS_ERR("RX-OOM-ERR", rx_oom_err);
RXS_ERR("RX-RATE-ERR", rx_rate_err);
- RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
@@ -895,6 +895,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-Bytes-All", rx_bytes_all);
RXS_ERR("RX-Beacons", rx_beacons);
RXS_ERR("RX-Frags", rx_frags);
+ RXS_ERR("RX-Spectral", rx_spectral);
if (len > size)
len = size;
@@ -966,6 +967,290 @@ static const struct file_operations fops_recv = {
.llseek = default_llseek,
};
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *mode = "";
+ unsigned int len;
+
+ switch (sc->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_spectral_scan_trigger(sc->hw);
+ } else if (strncmp("background", buf, 9) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ sc->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 15)
+ return -EINVAL;
+
+ sc->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+ if (!sc->rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1780,6 +2065,24 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
+ sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+ sc->debug.debugfs_phy,
+ 262144, 4, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_count);
+ debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_fft_period);
+
#ifdef CONFIG_ATH9K_MAC_DEBUG
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_samps);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 375c3b46411e..410d6d8f1aa7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -23,6 +23,7 @@
struct ath_txq;
struct ath_buf;
+struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
@@ -216,9 +217,9 @@ struct ath_tx_stats {
* @rx_oom_err: No. of frames dropped due to OOM issues.
* @rx_rate_err: No. of frames dropped due to rate errors.
* @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
- * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@@ -235,9 +236,9 @@ struct ath_rx_stats {
u32 rx_oom_err;
u32 rx_rate_err;
u32 rx_too_many_frags_err;
- u32 rx_drop_rxflush;
u32 rx_beacons;
u32 rx_frags;
+ u32 rx_spectral;
};
struct ath_stats {
@@ -323,6 +324,10 @@ void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
+
+void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample);
+
#else
#define RX_STAT_INC(c) /* NOP */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 24877b00cbf4..467b60014b7b 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -288,11 +288,11 @@ struct dfs_pattern_detector *
dfs_pattern_detector_init(enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
+
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
- if (dpd == NULL) {
- pr_err("allocation of dfs_pattern_detector failed\n");
+ if (dpd == NULL)
return NULL;
- }
+
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 05d5ba66cac3..716058b67557 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -280,14 +280,14 @@ err:
return ret;
}
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath9k_htc_priv *priv = hw->priv;
- return ath_reg_notifier_apply(wiphy, request,
- ath9k_hw_regulatory(priv->ah));
+ ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
}
static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
@@ -783,7 +783,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
priv->fw_version_major = be16_to_cpu(cmd_rsp.major);
priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor);
- snprintf(hw->wiphy->fw_version, ETHTOOL_BUSINFO_LEN, "%d.%d",
+ snprintf(hw->wiphy->fw_version, sizeof(hw->wiphy->fw_version), "%d.%d",
priv->fw_version_major,
priv->fw_version_minor);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9c07a8fa5134..a8016d70088a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1628,7 +1628,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
if (!ret)
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b6a5a08810b8..3ad1fd05c5e7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1196,20 +1196,17 @@ void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
int ath9k_rx_init(struct ath9k_htc_priv *priv)
{
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_rxbuf *rxbuf;
int i = 0;
INIT_LIST_HEAD(&priv->rx.rxbuf);
spin_lock_init(&priv->rx.rxbuflock);
for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
- rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
- if (rxbuf == NULL) {
- ath_err(common, "Unable to allocate RX buffers\n");
+ struct ath9k_htc_rxbuf *rxbuf =
+ kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL)
goto err;
- }
+
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 4a9570dfba72..aac4a406a513 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
skb, htc_hdr->endpoint_id,
txok);
+ } else {
+ kfree_skb(skb);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 0f2b97f6b739..14b701140b49 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -101,22 +101,6 @@ static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
}
-static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
- return 0;
-
- return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
-}
-
-static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
- return;
-
- ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
-}
-
static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 7cb787065913..2a2ae403e0e5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -54,11 +54,6 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->init_cal_settings(ah);
}
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- ath9k_hw_private_ops(ah)->init_mode_regs(ah);
-}
-
static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -208,7 +203,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
udelay(hw_delay + BASE_ACTIVATE_DELAY);
}
-void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
int column, unsigned int *writecnt)
{
int r;
@@ -554,28 +549,19 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_err(ath9k_hw_common(ah),
- "Failed allocating banks for external radio\n");
- ath9k_hw_rf_free_ext_banks(ah);
- return ecode;
- }
-
- if (ah->config.enable_ani) {
- ath9k_hw_ani_setup(ah);
+ if (ah->config.enable_ani)
ath9k_hw_ani_init(ah);
- }
return 0;
}
-static void ath9k_hw_attach_ops(struct ath_hw *ah)
+static int ath9k_hw_attach_ops(struct ath_hw *ah)
{
- if (AR_SREV_9300_20_OR_LATER(ah))
- ar9003_hw_attach_ops(ah);
- else
- ar9002_hw_attach_ops(ah);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return ar9002_hw_attach_ops(ah);
+
+ ar9003_hw_attach_ops(ah);
+ return 0;
}
/* Called for all hardware families */
@@ -611,7 +597,9 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_defaults(ah);
ath9k_hw_init_config(ah);
- ath9k_hw_attach_ops(ah);
+ r = ath9k_hw_attach_ops(ah);
+ if (r)
+ return r;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_err(common, "Couldn't wakeup chip\n");
@@ -675,8 +663,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
- ath9k_hw_init_mode_regs(ah);
-
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
@@ -1153,12 +1139,9 @@ void ath9k_hw_deinit(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (common->state < ATH_HW_INITIALIZED)
- goto free_hw;
+ return;
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
-
-free_hw:
- ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -2576,12 +2559,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
rx_chainmask >>= 1;
}
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- ah->enabled_cals |= TX_IQ_CAL;
- if (AR_SREV_9485_OR_LATER(ah))
- ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
- }
-
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
@@ -2590,7 +2567,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
}
-
if (AR_SREV_9280_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
@@ -3005,13 +2981,8 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer *timer;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
-
- if (timer == NULL) {
- ath_err(ath9k_hw_common(ah),
- "Failed to allocate memory for hw timer[%d]\n",
- timer_index);
+ if (timer == NULL)
return NULL;
- }
/* allocate a hardware generic timer slot */
timer_table->timers[timer_index] = timer;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 7f1a8e91c908..784e81ccb903 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -397,6 +397,7 @@ enum ath9k_int {
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
+#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
struct ath9k_hw_cal_data {
u16 channel;
@@ -599,13 +600,10 @@ struct ath_hw_radar_conf {
* @init_cal_settings: setup types of calibrations supported
* @init_cal: starts actual calibration
*
- * @init_mode_regs: Initializes mode registers
* @init_mode_gain_regs: Initialize TX/RX gain registers
*
* @rf_set_freq: change frequency
* @spur_mitigate_freq: spur mitigation
- * @rf_alloc_ext_banks:
- * @rf_free_ext_banks:
* @set_rf_regs:
* @compute_pll_control: compute the PLL control value to use for
* AR_RTC_PLL_CONTROL for a given channel
@@ -620,7 +618,6 @@ struct ath_hw_private_ops {
void (*init_cal_settings)(struct ath_hw *ah);
bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
- void (*init_mode_regs)(struct ath_hw *ah);
void (*init_mode_gain_regs)(struct ath_hw *ah);
void (*setup_calibration)(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
@@ -630,8 +627,6 @@ struct ath_hw_private_ops {
struct ath9k_channel *chan);
void (*spur_mitigate_freq)(struct ath_hw *ah,
struct ath9k_channel *chan);
- int (*rf_alloc_ext_banks)(struct ath_hw *ah);
- void (*rf_free_ext_banks)(struct ath_hw *ah);
bool (*set_rf_regs)(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex);
@@ -661,6 +656,37 @@ struct ath_hw_private_ops {
};
/**
+ * struct ath_spec_scan - parameters for Atheros spectral scan
+ *
+ * @enabled: enable/disable spectral scan
+ * @short_repeat: controls whether the chip is in spectral scan mode
+ * for 4 usec (enabled) or 204 usec (disabled)
+ * @count: number of scan results requested. There are special meanings
+ * in some chip revisions:
+ * AR92xx: highest bit set (>=128) for endless mode
+ * (spectral scan won't stopped until explicitly disabled)
+ * AR9300 and newer: 0 for endless mode
+ * @endless: true if endless mode is intended. Otherwise, count value is
+ * corrected to the next possible value.
+ * @period: time duration between successive spectral scan entry points
+ * (period*256*Tclk). Tclk = ath_common->clockrate
+ * @fft_period: PHY passes FFT frames to MAC every (fft_period+1)*4uS
+ *
+ * Note: Tclk = 40MHz or 44MHz depending upon operating mode.
+ * Typically it's 44MHz in 2/5GHz on later chips, but there's
+ * a "fast clock" check for this in 5GHz.
+ *
+ */
+struct ath_spec_scan {
+ bool enabled;
+ bool short_repeat;
+ bool endless;
+ u8 count;
+ u8 period;
+ u8 fft_period;
+};
+
+/**
* struct ath_hw_ops - callbacks used by hardware code and driver code
*
* This structure contains callbacks designed to to be used internally by
@@ -668,6 +694,10 @@ struct ath_hw_private_ops {
*
* @config_pci_powersave:
* @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @spectral_scan_config: set parameters for spectral scan and enable/disable it
+ * @spectral_scan_trigger: trigger a spectral scan run
+ * @spectral_scan_wait: wait for a spectral scan run to finish
*/
struct ath_hw_ops {
void (*config_pci_powersave)(struct ath_hw *ah,
@@ -688,6 +718,10 @@ struct ath_hw_ops {
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
+ void (*spectral_scan_config)(struct ath_hw *ah,
+ struct ath_spec_scan *param);
+ void (*spectral_scan_trigger)(struct ath_hw *ah);
+ void (*spectral_scan_wait)(struct ath_hw *ah);
};
struct ath_nf_limits {
@@ -710,6 +744,7 @@ enum ath_cal_list {
struct ath_hw {
struct ath_ops reg_ops;
+ struct device *dev;
struct ieee80211_hw *hw;
struct ath_common common;
struct ath9k_hw_version hw_version;
@@ -771,7 +806,6 @@ struct ath_hw {
struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
- struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -830,10 +864,6 @@ struct ath_hw {
/* ANI */
u32 proc_phyerr;
u32 aniperiod;
- int totalSizeDesired[5];
- int coarse_high[5];
- int coarse_low[5];
- int firpwr[5];
enum ath9k_ani_cmd ani_function;
u32 ani_skip_count;
@@ -979,7 +1009,7 @@ void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay);
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
-void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
int column, unsigned int *writecnt);
u32 ath9k_hw_reverse_bits(u32 val, u32 n);
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
@@ -1066,16 +1096,17 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
int ar9003_paprd_init_table(struct ath_hw *ah);
bool ar9003_paprd_is_done(struct ath_hw *ah);
bool ar9003_is_paprd_enabled(struct ath_hw *ah);
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
/* Hardware family op attach helpers */
-void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
-void ar9002_hw_attach_ops(struct ath_hw *ah);
+int ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f69ef5d48c7b..af932c9444de 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
#include <linux/module.h>
+#include <linux/relay.h>
#include "ath9k.h"
@@ -302,16 +303,15 @@ static void setup_ht_cap(struct ath_softc *sc,
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
- int ret;
- ret = ath_reg_notifier_apply(wiphy, request, reg);
+ ath_reg_notifier_apply(wiphy, request, reg);
/* Set tx power */
if (ah->curchan) {
@@ -321,8 +321,6 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
ath9k_ps_restore(sc);
}
-
- return ret;
}
/*
@@ -337,7 +335,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
struct ath_buf *bf;
- int i, bsize, error, desc_len;
+ int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
@@ -353,8 +351,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
if ((desc_len % 4) != 0) {
ath_err(common, "ath_desc not DWORD aligned\n");
BUG_ON((desc_len % 4) != 0);
- error = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
dd->dd_desc_len = desc_len * nbuf * ndesc;
@@ -378,12 +375,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
/* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
ds = (u8 *) dd->dd_desc;
ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
@@ -391,12 +387,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* allocate buffers */
bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
@@ -422,12 +415,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
list_add_tail(&bf->list, head);
}
return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
}
static int ath9k_init_queues(struct ath_softc *sc)
@@ -457,11 +444,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc)
ATH9K_NUM_CHANNELS);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- channels = kmemdup(ath9k_2ghz_chantable,
+ channels = devm_kzalloc(sc->dev,
sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
if (!channels)
return -ENOMEM;
+ memcpy(channels, ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable));
sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
@@ -472,14 +461,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc)
}
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- channels = kmemdup(ath9k_5ghz_chantable,
+ channels = devm_kzalloc(sc->dev,
sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
- if (!channels) {
- if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
+ if (!channels)
return -ENOMEM;
- }
+ memcpy(channels, ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable));
sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
@@ -509,6 +497,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
+
+ sc->spec_config.enabled = 0;
+ sc->spec_config.short_repeat = true;
+ sc->spec_config.count = 8;
+ sc->spec_config.endless = false;
+ sc->spec_config.period = 0xFF;
+ sc->spec_config.fft_period = 0xF;
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -565,10 +560,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
int ret = 0, i;
int csz = 0;
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
return -ENOMEM;
+ ah->dev = sc->dev;
ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->reg_ops.read = ath9k_ioread32;
@@ -636,7 +632,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (pdata && pdata->eeprom_name) {
ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
if (ret)
- goto err_eeprom;
+ return ret;
}
/* Initializes the hardware for all supported chipsets */
@@ -676,10 +672,6 @@ err_queues:
ath9k_hw_deinit(ah);
err_hw:
ath9k_eeprom_release(sc);
-err_eeprom:
- kfree(ah);
- sc->sc_ah = NULL;
-
return ret;
}
@@ -844,8 +836,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
/* Bring up device */
error = ath9k_init_softc(devid, sc, bus_ops);
- if (error != 0)
- goto error_init;
+ if (error)
+ return error;
ah = sc->sc_ah;
common = ath9k_hw_common(ah);
@@ -855,19 +847,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
ath9k_reg_notifier);
if (error)
- goto error_regd;
+ goto deinit;
reg = &common->regulatory;
/* Setup TX DMA */
error = ath_tx_init(sc, ATH_TXBUF);
if (error != 0)
- goto error_tx;
+ goto deinit;
/* Setup RX DMA */
error = ath_rx_init(sc, ATH_RXBUF);
if (error != 0)
- goto error_rx;
+ goto deinit;
ath9k_init_txpower_limits(sc);
@@ -881,19 +873,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
- goto error_register;
+ goto rx_cleanup;
error = ath9k_init_debug(ah);
if (error) {
ath_err(common, "Unable to create debugfs files\n");
- goto error_world;
+ goto unregister;
}
/* Handle world regulatory */
if (!ath_is_world_regd(reg)) {
error = regulatory_hint(hw->wiphy, reg->alpha2);
if (error)
- goto error_world;
+ goto unregister;
}
ath_init_leds(sc);
@@ -901,17 +893,12 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
return 0;
-error_world:
+unregister:
ieee80211_unregister_hw(hw);
-error_register:
+rx_cleanup:
ath_rx_cleanup(sc);
-error_rx:
- ath_tx_cleanup(sc);
-error_tx:
- /* Nothing */
-error_regd:
+deinit:
ath9k_deinit_softc(sc);
-error_init:
return error;
}
@@ -923,12 +910,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
- if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
-
- if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
-
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
@@ -940,8 +921,11 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
sc->dfs_detector->exit(sc->dfs_detector);
ath9k_eeprom_release(sc);
- kfree(sc->sc_ah);
- sc->sc_ah = NULL;
+
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
}
void ath9k_deinit_device(struct ath_softc *sc)
@@ -957,22 +941,9 @@ void ath9k_deinit_device(struct ath_softc *sc)
ieee80211_unregister_hw(hw);
ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
}
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
/************************/
/* Module Hooks */
/************************/
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index b42be910a83d..811007ec07a7 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -605,13 +605,13 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
* reported, then decryption and MIC errors are irrelevant,
* the frame is going to be dropped either way
*/
- if (ads.ds_rxstatus8 & AR_CRCErr)
- rs->rs_status |= ATH9K_RXERR_CRC;
- else if (ads.ds_rxstatus8 & AR_PHYErr) {
+ if (ads.ds_rxstatus8 & AR_PHYErr) {
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
rs->rs_phyerr = phyerr;
- } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ } else if (ads.ds_rxstatus8 & AR_CRCErr)
+ rs->rs_status |= ATH9K_RXERR_CRC;
+ else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 4a745e68dd94..1ff817061ebc 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -226,7 +226,8 @@ enum ath9k_phyerr {
ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
- ATH9K_PHYERR_MAX = 37,
+ ATH9K_PHYERR_SPECTRAL = 38,
+ ATH9K_PHYERR_MAX = 39,
};
struct ath_desc {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index be30a9af1528..6e66f9c6782b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)
ath_start_ani(sc);
}
-static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+static bool ath_prepare_reset(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool ret = true;
@@ -196,20 +196,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- if (!ath_stoprecv(sc))
+ if (!ath_drain_all_txq(sc))
ret = false;
- if (!ath_drain_all_txq(sc, retry_tx))
+ if (!ath_stoprecv(sc))
ret = false;
- if (!flush) {
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- } else {
- ath_flushrecv(sc);
- }
-
return ret;
}
@@ -255,18 +247,17 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
return true;
}
-static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
- bool retry_tx)
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = NULL;
bool fastcc = true;
- bool flush = false;
int r;
__ath_cancel_work(sc);
+ tasklet_disable(&sc->intr_tq);
spin_lock_bh(&sc->sc_pcu_lock);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
@@ -276,11 +267,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
if (!hchan) {
fastcc = false;
- flush = true;
hchan = ah->curchan;
}
- if (!ath_prepare_reset(sc, retry_tx, flush))
+ if (!ath_prepare_reset(sc))
fastcc = false;
ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
@@ -302,6 +292,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->intr_tq);
+
return r;
}
@@ -319,7 +311,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
- r = ath_reset_internal(sc, hchan, false);
+ r = ath_reset_internal(sc, hchan);
return r;
}
@@ -328,28 +320,25 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct ath_node *an;
- u8 density;
an = (struct ath_node *)sta->drv_priv;
an->sc = sc;
an->sta = sta;
an->vif = vif;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- ath_tx_node_init(sc, an);
+ ath_tx_node_init(sc, an);
+
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
- density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
- an->mpdudensity = density;
+ an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
}
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_tx_node_cleanup(sc, an);
+ ath_tx_node_cleanup(sc, an);
}
void ath9k_tasklet(unsigned long data)
@@ -549,23 +538,21 @@ chip_reset:
#undef SCHED_INTR
}
-static int ath_reset(struct ath_softc *sc, bool retry_tx)
+static int ath_reset(struct ath_softc *sc)
{
- int r;
+ int i, r;
ath9k_ps_wakeup(sc);
- r = ath_reset_internal(sc, NULL, retry_tx);
+ r = ath_reset_internal(sc, NULL);
- if (retry_tx) {
- int i;
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- spin_lock_bh(&sc->tx.txq[i].axq_lock);
- ath_txq_schedule(sc, &sc->tx.txq[i]);
- spin_unlock_bh(&sc->tx.txq[i].axq_lock);
- }
- }
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ spin_lock_bh(&sc->tx.txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->tx.txq[i]);
+ spin_unlock_bh(&sc->tx.txq[i].axq_lock);
}
ath9k_ps_restore(sc);
@@ -586,7 +573,7 @@ void ath_reset_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ath_reset(sc, true);
+ ath_reset(sc);
}
/**********************/
@@ -804,7 +791,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
- ath_prepare_reset(sc, false, true);
+ ath_prepare_reset(sc);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
@@ -1075,6 +1062,75 @@ static void ath9k_disable_ps(struct ath_softc *sc)
ath_dbg(common, PS, "PowerSave disabled\n");
}
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 rxfilter;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return;
+ }
+
+ ath9k_ps_wakeup(sc);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ ath9k_hw_setrxfilter(ah, rxfilter |
+ ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR);
+
+ /* TODO: usually this should not be neccesary, but for some reason
+ * (or in some mode?) the trigger must be called after the
+ * configuration, otherwise the register will have its values reset
+ * (on my ar9220 to value 0x01002310)
+ */
+ ath9k_spectral_scan_config(hw, sc->spectral_mode);
+ ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
+ ath9k_ps_restore(sc);
+}
+
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return -1;
+ }
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ sc->spec_config.enabled = 0;
+ break;
+ case SPECTRAL_BACKGROUND:
+ /* send endless samples.
+ * TODO: is this really useful for "background"?
+ */
+ sc->spec_config.endless = 1;
+ sc->spec_config.enabled = 1;
+ break;
+ case SPECTRAL_CHANSCAN:
+ case SPECTRAL_MANUAL:
+ sc->spec_config.endless = 0;
+ sc->spec_config.enabled = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
+ ath9k_ps_restore(sc);
+
+ sc->spectral_mode = spectral_mode;
+
+ return 0;
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_softc *sc = hw->priv;
@@ -1188,6 +1244,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (old_pos >= 0)
ath_update_survey_nf(sc, old_pos);
+
+ /* perform spectral scan if requested. */
+ if (sc->scanning && sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1610,7 +1671,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1729,11 +1792,11 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
if (drop) {
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
+ drain_txq = ath_drain_all_txq(sc);
spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
- ath_reset(sc, false);
+ ath_reset(sc);
ath9k_ps_restore(sc);
ieee80211_wake_queues(hw);
@@ -1833,6 +1896,9 @@ static u32 fill_chainmask(u32 cap, u32 new)
static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
{
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return true;
+
switch (val & 0x7) {
case 0x1:
case 0x3:
@@ -2238,6 +2304,19 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
}
#endif
+static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ sc->scanning = 1;
+}
+
+static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ sc->scanning = 0;
+}
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
@@ -2284,4 +2363,6 @@ struct ieee80211_ops ath9k_ops = {
.sta_add_debugfs = ath9k_sta_add_debugfs,
.sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
+ .sw_scan_start = ath9k_sw_scan_start,
+ .sw_scan_complete = ath9k_sw_scan_complete,
};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 5c02702f21e7..815bee21c19a 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -438,7 +438,7 @@ int ath_mci_setup(struct ath_softc *sc)
struct ath_mci_buf *buf = &mci->sched_buf;
int ret;
- buf->bf_addr = dma_alloc_coherent(sc->dev,
+ buf->bf_addr = dmam_alloc_coherent(sc->dev,
ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
&buf->bf_paddr, GFP_KERNEL);
@@ -474,13 +474,6 @@ void ath_mci_cleanup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
- struct ath_mci_coex *mci = &sc->mci_coex;
- struct ath_mci_buf *buf = &mci->sched_buf;
-
- if (buf->bf_addr)
- dma_free_coherent(sc->dev,
- ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
- buf->bf_addr, buf->bf_paddr);
ar9003_mci_cleanup(ah);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 7ae73fbd9136..0e0d39583837 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -147,7 +147,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
@@ -155,19 +154,19 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int ret = 0;
char hw_name[64];
- if (pci_enable_device(pdev))
+ if (pcim_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
pr_err("32-bit DMA not available\n");
- goto err_dma;
+ return ret;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
pr_err("32-bit DMA consistent DMA enable failed\n");
- goto err_dma;
+ return ret;
}
/*
@@ -203,25 +202,16 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ret = pci_request_region(pdev, 0, "ath9k");
+ ret = pcim_iomap_regions(pdev, BIT(0), "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
- ret = -ENODEV;
- goto err_region;
- }
-
- mem = pci_iomap(pdev, 0, 0);
- if (!mem) {
- pr_err("PCI memory map error\n") ;
- ret = -EIO;
- goto err_iomap;
+ return -ENODEV;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
- ret = -ENOMEM;
- goto err_alloc_hw;
+ return -ENOMEM;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -230,7 +220,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
- sc->mem = mem;
+ sc->mem = pcim_iomap_table(pdev)[0];
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);
@@ -251,7 +241,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
- hw_name, (unsigned long)mem, pdev->irq);
+ hw_name, (unsigned long)sc->mem, pdev->irq);
return 0;
@@ -259,14 +249,6 @@ err_init:
free_irq(sc->irq, sc);
err_irq:
ieee80211_free_hw(hw);
-err_alloc_hw:
- pci_iounmap(pdev, mem);
-err_iomap:
- pci_release_region(pdev, 0);
-err_region:
- /* Nothing */
-err_dma:
- pci_disable_device(pdev);
return ret;
}
@@ -274,17 +256,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- void __iomem *mem = sc->mem;
if (!is_ath9k_unloaded)
sc->sc_ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
-
- pci_iounmap(pdev, mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 714558d1ba78..96ac433ba7f6 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1204,7 +1204,7 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
else if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
caps |= WLAN_RC_40_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
caps |= WLAN_RC_SGI_FLAG;
@@ -1452,17 +1452,7 @@ static void ath_rate_free(void *priv)
static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *rate_priv;
-
- rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
- if (!rate_priv) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to allocate private rc structure\n");
- return NULL;
- }
-
- return rate_priv;
+ return kzalloc(sizeof(struct ath_rate_priv), gfp);
}
static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d4df98a938bf..ee156e543147 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/relay.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -180,11 +181,6 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
bf->bf_mpdu = NULL;
}
}
-
- INIT_LIST_HEAD(&sc->rx.rxbuf);
-
- kfree(sc->rx.rx_bufptr);
- sc->rx.rx_bufptr = NULL;
}
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
@@ -211,12 +207,11 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ah->caps.rx_hp_qdepth);
size = sizeof(struct ath_buf) * nbufs;
- bf = kzalloc(size, GFP_KERNEL);
+ bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
INIT_LIST_HEAD(&sc->rx.rxbuf);
- sc->rx.rx_bufptr = bf;
for (i = 0; i < nbufs; i++, bf++) {
skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
@@ -254,8 +249,6 @@ rx_init_fail:
static void ath_edma_start_recv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxbuflock);
-
ath9k_hw_rxena(sc->sc_ah);
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
@@ -267,8 +260,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
-
- spin_unlock_bh(&sc->rx.rxbuflock);
}
static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -285,8 +276,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
- spin_lock_init(&sc->rx.rxbuflock);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc->sc_ah->caps.rx_status_len;
@@ -363,9 +352,6 @@ void ath_rx_cleanup(struct ath_softc *sc)
bf->bf_mpdu = NULL;
}
}
-
- if (sc->rx.rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
}
}
@@ -447,7 +433,6 @@ int ath_startrecv(struct ath_softc *sc)
return 0;
}
- spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -468,26 +453,31 @@ start_recv:
ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
- spin_unlock_bh(&sc->rx.rxbuflock);
-
return 0;
}
+static void ath_flushrecv(struct ath_softc *sc)
+{
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
+}
+
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool stopped, reset = false;
- spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah, &reset);
+ ath_flushrecv(sc);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
else
sc->rx.rxlink = NULL;
- spin_unlock_bh(&sc->rx.rxbuflock);
if (!(ah->ah_flags & AH_UNPLUGGED) &&
unlikely(!stopped)) {
@@ -499,15 +489,6 @@ bool ath_stoprecv(struct ath_softc *sc)
return stopped && !reset;
}
-void ath_flushrecv(struct ath_softc *sc)
-{
- set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
-}
-
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
@@ -552,7 +533,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
- "Reconfigure Beacon timers based on timestamp from the AP\n");
+ "Reconfigure beacon timers based on synchronized timestamp\n");
ath9k_set_beacon(sc);
}
@@ -744,6 +725,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
+ list_del(&bf->list);
if (!bf->bf_mpdu)
return bf;
@@ -1034,6 +1016,134 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
+#ifdef CONFIG_ATH9K_DEBUGFS
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+}
+#endif
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct ath_hw *ah = sc->sc_ah;
+ u8 bins[SPECTRAL_HT20_NUM_BINS];
+ u8 *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample;
+ struct ath_radar_info *radar_info;
+ struct ath_ht20_mag_info *mag_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 length, max_magnitude;
+
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ */
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
+ /* Variation in the data length is possible and will be fixed later.
+ * Note that we only support HT20 for now.
+ *
+ * TODO: add HT20_40 support as well.
+ */
+ if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
+ (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
+ return 1;
+
+ fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
+ length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+ fft_sample.tlv.length = __cpu_to_be16(length);
+
+ fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
+ fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ fft_sample.noise = ah->noise;
+
+ switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+ break;
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+ bins[0] = vdata[0];
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ bins[0] = vdata[0];
+ memcpy(&bins[0], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+ break;
+ default:
+ return 1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+ /* mag data is at the end of the frame, in front of radar_info */
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+
+ /* copy raw bins without scaling them */
+ memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
+ fft_sample.max_exp = mag_info->max_exp & 0xf;
+
+ max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
+ fft_sample.max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample.tsf = __cpu_to_be64(tsf);
+
+ ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static void ath9k_apply_ampdu_details(struct ath_softc *sc,
+ struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
+{
+ if (rs->rs_isaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+
+ rxs->ampdu_reference = sc->rx.ampdu_ref;
+
+ if (!rs->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
+ sc->rx.ampdu_ref++;
+ }
+
+ if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
+ rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
+ }
+}
+
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
@@ -1059,16 +1169,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
dma_type = DMA_FROM_DEVICE;
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
- spin_lock_bh(&sc->rx.rxbuflock);
tsf = ath9k_hw_gettsf64(ah);
tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
- /* If handling rx interrupt and flush is in progress => exit */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
- break;
memset(&rs, 0, sizeof(rs));
if (edma)
@@ -1111,15 +1217,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath_debug_stat_rx(sc, &rs);
- /*
- * If we're asked to flush receive queue, directly
- * chain it back at the queue without processing it.
- */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
- RX_STAT_INC(rx_drop_rxflush);
- goto requeue_drop_frag;
- }
-
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
@@ -1131,6 +1228,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ if (rs.rs_status & ATH9K_RXERR_PHY) {
+ if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
+ RX_STAT_INC(rx_spectral);
+ goto requeue_drop_frag;
+ }
+ }
+
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
@@ -1246,6 +1350,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
ath_ant_comb_scan(sc, &rs);
+ ath9k_apply_ampdu_details(sc, &rs, rxs);
+
ieee80211_rx(hw, skb);
requeue_drop_frag:
@@ -1254,19 +1360,18 @@ requeue_drop_frag:
sc->rx.frag = NULL;
}
requeue:
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ if (flush)
+ continue;
+
if (edma) {
- list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
} else {
- list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
- if (!flush)
- ath9k_hw_rxena(ah);
+ ath9k_hw_rxena(ah);
}
} while (1);
- spin_unlock_bh(&sc->rx.rxbuflock);
-
if (!(ah->imask & ATH9K_INT_RXEOL)) {
ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
ath9k_hw_set_interrupts(ah);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index ad3c82c09177..5929850649f0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -789,6 +789,7 @@
#define AR_SREV_REVISION_9271_11 1
#define AR_SREV_VERSION_9300 0x1c0
#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_REVISION_9300_22 3
#define AR_SREV_VERSION_9330 0x200
#define AR_SREV_REVISION_9330_10 0
#define AR_SREV_REVISION_9330_11 1
@@ -869,6 +870,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
#define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
+#define AR_SREV_9300_22(_ah) \
+ (AR_SREV_9300(ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
#define AR_SREV_9330(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
@@ -884,9 +888,6 @@
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
-#define AR_SREV_9485_10(_ah) \
- (AR_SREV_9485(_ah) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
#define AR_SREV_9485_11(_ah) \
(AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 90e48a0fafe5..89a64411b82e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -378,7 +378,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, bool retry)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@@ -490,7 +490,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
- } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ } else if (tid->state & AGGR_CLEANUP) {
/*
* cleanup in progress, just fail
* the un-acked sub-frames
@@ -604,6 +604,37 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
+static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+}
+
+static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_tx_status *ts, struct ath_buf *bf,
+ struct list_head *bf_head)
+{
+ bool txok, flush;
+
+ txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+ flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ txq->axq_tx_inprogress = false;
+
+ txq->axq_depth--;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
+
+ if (!bf_isampdu(bf)) {
+ if (!flush)
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+ } else
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
+ ath_txq_schedule(sc, txq);
+}
+
static bool ath_lookup_legacy(struct ath_buf *bf)
{
struct sk_buff *skb;
@@ -1202,7 +1233,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
*/
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
@@ -1331,23 +1362,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
/* Queue Management */
/********************/
-static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
- struct ath_txq *txq)
-{
- struct ath_atx_ac *ac, *ac_tmp;
- struct ath_atx_tid *tid, *tid_tmp;
-
- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
- list_del(&ac->list);
- ac->sched = false;
- list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
- list_del(&tid->list);
- tid->sched = false;
- ath_tid_drain(sc, txq, tid);
- }
- }
-}
-
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1470,14 +1484,8 @@ int ath_cabq_update(struct ath_softc *sc)
return 0;
}
-static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
- return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-}
-
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *list, bool retry_tx)
+ struct list_head *list)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1499,16 +1507,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
lastbf = bf->bf_lastbf;
list_cut_position(&bf_head, list, &lastbf->list);
-
- txq->axq_depth--;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
-
- if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
- retry_tx);
- else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
}
@@ -1518,7 +1517,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
* This assumes output has been stopped and
* we do not need to block ath_tx_tasklet.
*/
-void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
ath_txq_lock(sc, txq);
@@ -1526,8 +1525,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
int idx = txq->txq_tailidx;
while (!list_empty(&txq->txq_fifo[idx])) {
- ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
- retry_tx);
+ ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
INCR(idx, ATH_TXFIFO_DEPTH);
}
@@ -1536,16 +1534,12 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
txq->axq_link = NULL;
txq->axq_tx_inprogress = false;
- ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
-
- /* flush any pending frames if aggregation is enabled */
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
- ath_txq_drain_pending_buffers(sc, txq);
+ ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
}
-bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1581,7 +1575,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
*/
txq = &sc->tx.txq[i];
txq->stopped = false;
- ath_draintxq(sc, txq, retry_tx);
+ ath_draintxq(sc, txq);
}
return !npend;
@@ -1910,8 +1904,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
- ieee80211_is_data_qos(hdr->frame_control)) {
+ if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(txctl->an, tidno);
@@ -2175,28 +2168,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
-static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_tx_status *ts, struct ath_buf *bf,
- struct list_head *bf_head)
-{
- int txok;
-
- txq->axq_depth--;
- txok = !(ts->ts_status & ATH9K_TXERR_MASK);
- txq->axq_tx_inprogress = false;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
-
- if (!bf_isampdu(bf)) {
- ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
- } else
- ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_txq_schedule(sc, txq);
-}
-
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2361,8 +2332,8 @@ static int ath_txstatus_setup(struct ath_softc *sc, int size)
u8 txs_len = sc->sc_ah->caps.txs_len;
dd->dd_desc_len = size * txs_len;
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
if (!dd->dd_desc)
return -ENOMEM;
@@ -2382,14 +2353,6 @@ static int ath_tx_edma_init(struct ath_softc *sc)
return err;
}
-static void ath_tx_edma_cleanup(struct ath_softc *sc)
-{
- struct ath_descdma *dd = &sc->txsdma;
-
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-}
-
int ath_tx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2402,7 +2365,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
if (error != 0) {
ath_err(common,
"Failed to allocate tx descriptors: %d\n", error);
- goto err;
+ return error;
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
@@ -2410,36 +2373,17 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
if (error != 0) {
ath_err(common,
"Failed to allocate beacon descriptors: %d\n", error);
- goto err;
+ return error;
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
error = ath_tx_edma_init(sc);
- if (error)
- goto err;
- }
-
-err:
- if (error != 0)
- ath_tx_cleanup(sc);
return error;
}
-void ath_tx_cleanup(struct ath_softc *sc)
-{
- if (sc->beacon.bdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
-
- if (sc->tx.txdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_tx_edma_cleanup(sc);
-}
-
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 13a204598766..1a796e5f69ec 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -1,6 +1,6 @@
config CARL9170
tristate "Linux Community AR9170 802.11n USB support"
- depends on USB && MAC80211 && EXPERIMENTAL
+ depends on USB && MAC80211
select ATH_COMMON
select FW_LOADER
select CRC32
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 2df17f1e49ef..25599741cd8a 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -85,20 +85,14 @@ enum carl9170_device_state {
CARL9170_STARTED,
};
-#define CARL9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define CARL9170_TX_USER_RATE_TRIES 3
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
+ ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
#define SEQ_DIFF(_start, _seq) \
(((_start) - (_seq)) & 0x0fff)
@@ -290,6 +284,7 @@ struct ar9170 {
unsigned int rx_size;
unsigned int tx_seq_table;
bool ba_filter;
+ bool disable_offload_fw;
} fw;
/* interface configuration combinations */
@@ -493,8 +488,8 @@ struct carl9170_sta_info {
bool sleeping;
atomic_t pending_frames;
unsigned int ampdu_max_len;
- struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID];
- struct carl9170_ba_stats stats[CARL9170_NUM_TID];
+ struct carl9170_sta_tid __rcu *agg[IEEE80211_NUM_TIDS];
+ struct carl9170_ba_stats stats[IEEE80211_NUM_TIDS];
};
struct carl9170_tx_info {
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index aaebecd19e59..47d5c2e910ad 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -215,6 +215,24 @@ static int carl9170_fw_tx_sequence(struct ar9170 *ar)
return 0;
}
+static void carl9170_fw_set_if_combinations(struct ar9170 *ar,
+ u16 if_comb_types)
+{
+ if (ar->fw.vif_num < 2)
+ return;
+
+ ar->if_comb_limits[0].max = ar->fw.vif_num;
+ ar->if_comb_limits[0].types = if_comb_types;
+
+ ar->if_combs[0].num_different_channels = 1;
+ ar->if_combs[0].max_interfaces = ar->fw.vif_num;
+ ar->if_combs[0].limits = ar->if_comb_limits;
+ ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
+
+ ar->hw->wiphy->iface_combinations = ar->if_combs;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
+}
+
static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
@@ -264,7 +282,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (!SUPP(CARL9170FW_COMMAND_CAM)) {
dev_info(&ar->udev->dev, "crypto offloading is disabled "
"by firmware.\n");
- ar->disable_offload = true;
+ ar->fw.disable_offload_fw = true;
}
if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
@@ -336,25 +354,24 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |=
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_P2P_GO);
+
+#ifdef CONFIG_MAC80211_MESH
+ if_comb_types |=
+ BIT(NL80211_IFTYPE_MESH_POINT);
+#endif /* CONFIG_MAC80211_MESH */
}
}
- ar->if_comb_limits[0].max = ar->fw.vif_num;
- ar->if_comb_limits[0].types = if_comb_types;
-
- ar->if_combs[0].num_different_channels = 1;
- ar->if_combs[0].max_interfaces = ar->fw.vif_num;
- ar->if_combs[0].limits = ar->if_comb_limits;
- ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
-
- ar->hw->wiphy->iface_combinations = ar->if_combs;
- ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
+ carl9170_fw_set_if_combinations(ar, if_comb_types);
ar->hw->wiphy->interface_modes |= if_comb_types;
- ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /* As IBSS Encryption is software-based, IBSS RSN is supported. */
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_SUPPORTS_TDLS;
#undef SUPPORTED
return carl9170_fw_tx_sequence(ar);
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 9443c802b25b..9111d4ffc1b3 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -156,6 +156,14 @@ struct carl9170_psm {
} __packed;
#define CARL9170_PSM_SIZE 4
+/*
+ * Note: If a bit in rx_filter is set, then it
+ * means that the particular frames which matches
+ * the condition are FILTERED/REMOVED/DISCARDED!
+ * (This is can be a bit confusing, especially
+ * because someone people think it's the exact
+ * opposite way, so watch out!)
+ */
struct carl9170_rx_filter_cmd {
__le32 rx_filter;
} __packed;
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index fa834c1460f0..0db874abde50 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -384,7 +384,7 @@
#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84)
#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88)
-#define AR9170_MAC_BCN_LENGTH_MAX 256
+#define AR9170_MAC_BCN_LENGTH_MAX (512 - 32)
#define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 25a1e2f4f738..f293b3ff4756 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -358,8 +358,13 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
ar->ps.last_action = jiffies;
ar->ps.last_slept = jiffies;
ar->erp_mode = CARL9170_ERP_AUTO;
- ar->rx_software_decryption = false;
- ar->disable_offload = false;
+
+ /* Set "disable hw crypto offload" whenever the module parameter
+ * nohwcrypt is true or if the firmware does not support it.
+ */
+ ar->disable_offload = modparam_nohwcrypt |
+ ar->fw.disable_offload_fw;
+ ar->rx_software_decryption = ar->disable_offload;
for (i = 0; i < ar->hw->queues; i++) {
ar->queue_stop_timeout[i] = jiffies;
@@ -565,12 +570,28 @@ static int carl9170_init_interface(struct ar9170 *ar,
memcpy(common->macaddr, vif->addr, ETH_ALEN);
- if (modparam_nohwcrypt ||
- ((vif->type != NL80211_IFTYPE_STATION) &&
- (vif->type != NL80211_IFTYPE_AP))) {
- ar->rx_software_decryption = true;
- ar->disable_offload = true;
- }
+ /* We have to fall back to software crypto, whenever
+ * the user choose to participates in an IBSS. HW
+ * offload for IBSS RSN is not supported by this driver.
+ *
+ * NOTE: If the previous main interface has already
+ * disabled hw crypto offload, we have to keep this
+ * previous disable_offload setting as it was.
+ * Altough ideally, we should notify mac80211 and tell
+ * it to forget about any HW crypto offload for now.
+ */
+ ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
+ (vif->type != NL80211_IFTYPE_AP));
+
+ /* While the driver supports HW offload in a single
+ * P2P client configuration, it doesn't support HW
+ * offload in the favourit, concurrent P2P GO+CLIENT
+ * configuration. Hence, HW offload will always be
+ * disabled for P2P.
+ */
+ ar->disable_offload |= vif->p2p;
+
+ ar->rx_software_decryption = ar->disable_offload;
err = carl9170_set_operating_mode(ar);
return err;
@@ -580,7 +601,7 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
- struct ieee80211_vif *main_vif;
+ struct ieee80211_vif *main_vif, *old_main = NULL;
struct ar9170 *ar = hw->priv;
int vif_id = -1, err = 0;
@@ -602,6 +623,15 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
goto init;
}
+ /* Because the AR9170 HW's MAC doesn't provide full support for
+ * multiple, independent interfaces [of different operation modes].
+ * We have to select ONE main interface [main mode of HW], but we
+ * can have multiple slaves [AKA: entry in the ACK-table].
+ *
+ * The first (from HEAD/TOP) interface in the ar->vif_list is
+ * always the main intf. All following intfs in this list
+ * are considered to be slave intfs.
+ */
main_vif = carl9170_get_main_vif(ar);
if (main_vif) {
@@ -610,6 +640,18 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
+ /* P2P GO [master] use-case
+ * Because the P2P GO station is selected dynamically
+ * by all participating peers of a WIFI Direct network,
+ * the driver has be able to change the main interface
+ * operating mode on the fly.
+ */
+ if (main_vif->p2p && vif->p2p &&
+ vif->type == NL80211_IFTYPE_AP) {
+ old_main = main_vif;
+ break;
+ }
+
err = -EBUSY;
rcu_read_unlock();
@@ -648,14 +690,41 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
vif_priv->id = vif_id;
vif_priv->enable_beacon = false;
ar->vifs++;
- list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
+ if (old_main) {
+ /* We end up in here, if the main interface is being replaced.
+ * Put the new main interface at the HEAD of the list and the
+ * previous inteface will automatically become second in line.
+ */
+ list_add_rcu(&vif_priv->list, &ar->vif_list);
+ } else {
+ /* Add new inteface. If the list is empty, it will become the
+ * main inteface, otherwise it will be slave.
+ */
+ list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
+ }
rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
init:
- if (carl9170_get_main_vif(ar) == vif) {
+ main_vif = carl9170_get_main_vif(ar);
+
+ if (main_vif == vif) {
rcu_assign_pointer(ar->beacon_iter, vif_priv);
rcu_read_unlock();
+ if (old_main) {
+ struct carl9170_vif_info *old_main_priv =
+ (void *) old_main->drv_priv;
+ /* downgrade old main intf to slave intf.
+ * NOTE: We are no longer under rcu_read_lock.
+ * But we are still holding ar->mutex, so the
+ * vif data [id, addr] is safe.
+ */
+ err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
+ old_main->addr);
+ if (err)
+ goto unlock;
+ }
+
err = carl9170_init_interface(ar, vif);
if (err)
goto unlock;
@@ -1112,9 +1181,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ar->disable_offload || !vif)
return -EOPNOTSUPP;
- /*
- * We have to fall back to software encryption, whenever
- * the user choose to participates in an IBSS or is connected
+ /* Fall back to software encryption whenever the driver is connected
* to more than one network.
*
* This is very unfortunate, because some machines cannot handle
@@ -1263,7 +1330,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
return 0;
}
- for (i = 0; i < CARL9170_NUM_TID; i++)
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
@@ -1287,7 +1354,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
sta_info->ht_sta = false;
rcu_read_lock();
- for (i = 0; i < CARL9170_NUM_TID; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
@@ -1394,7 +1461,9 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
rcu_read_lock();
tid_info = rcu_dereference(sta_info->agg[tid]);
if (tid_info) {
@@ -1784,7 +1853,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {
@@ -1805,10 +1874,6 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- /* As IBSS Encryption is software-based, IBSS RSN is supported. */
- hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
return ar;
err_nomem:
@@ -1916,13 +1981,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
return 0;
}
-static int carl9170_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void carl9170_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ar9170 *ar = hw->priv;
- return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
+ ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
}
int carl9170_register(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ef4ec0da6e49..9c0b150d5b8e 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1520,35 +1520,92 @@ void carl9170_tx_scheduler(struct ar9170 *ar)
carl9170_tx(ar);
}
-int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+/* caller has to take rcu_read_lock */
+static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
{
- struct sk_buff *skb = NULL;
struct carl9170_vif_info *cvif;
+ int i = 1;
+
+ /* The AR9170 hardware has no fancy beacon queue or some
+ * other scheduling mechanism. So, the driver has to make
+ * due by setting the two beacon timers (pretbtt and tbtt)
+ * once and then swapping the beacon address in the HW's
+ * register file each time the pretbtt fires.
+ */
+
+ cvif = rcu_dereference(ar->beacon_iter);
+ if (ar->vifs > 0 && cvif) {
+ do {
+ list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
+ list) {
+ if (cvif->active && cvif->enable_beacon)
+ goto out;
+ }
+ } while (ar->beacon_enabled && i--);
+ }
+
+out:
+ rcu_assign_pointer(ar->beacon_iter, cvif);
+ return cvif;
+}
+
+static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
+ u32 *ht1, u32 *plcp)
+{
struct ieee80211_tx_info *txinfo;
struct ieee80211_tx_rate *rate;
- __le32 *data, *old = NULL;
- unsigned int plcp, power, chains;
- u32 word, ht1, off, addr, len;
- int i = 0, err = 0;
+ unsigned int power, chains;
+ bool ht_rate;
- rcu_read_lock();
- cvif = rcu_dereference(ar->beacon_iter);
-retry:
- if (ar->vifs == 0 || !cvif)
- goto out_unlock;
+ txinfo = IEEE80211_SKB_CB(skb);
+ rate = &txinfo->control.rates[0];
+ ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS);
+ carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
- list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
- if (cvif->active && cvif->enable_beacon)
- goto found;
+ *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
+ if (chains == AR9170_TX_PHY_TXCHAIN_2)
+ *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+ SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7);
+ SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power);
+ SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains);
+
+ if (ht_rate) {
+ *ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ *plcp |= AR9170_MAC_BCN_HT2_SGI;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ }
+
+ SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
+ } else {
+ if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M)
+ *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+ else
+ *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
}
- if (!ar->beacon_enabled || i++)
- goto out_unlock;
+ return ht_rate;
+}
- goto retry;
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+{
+ struct sk_buff *skb = NULL;
+ struct carl9170_vif_info *cvif;
+ __le32 *data, *old = NULL;
+ u32 word, ht1, plcp, off, addr, len;
+ int i = 0, err = 0;
+ bool ht_rate;
-found:
- rcu_assign_pointer(ar->beacon_iter, cvif);
+ rcu_read_lock();
+ cvif = carl9170_pick_beaconing_vif(ar);
+ if (!cvif)
+ goto out_unlock;
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
NULL, NULL);
@@ -1558,7 +1615,6 @@ found:
goto err_free;
}
- txinfo = IEEE80211_SKB_CB(skb);
spin_lock_bh(&ar->beacon_lock);
data = (__le32 *)skb->data;
if (cvif->beacon)
@@ -1588,43 +1644,14 @@ found:
goto err_unlock;
}
- ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
- rate = &txinfo->control.rates[0];
- carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
- if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
- if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
- plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
- else
- plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
- } else {
- ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- plcp |= AR9170_MAC_BCN_HT2_SGI;
-
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
- plcp |= AR9170_MAC_BCN_HT2_BW40;
- }
- if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
- ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
- plcp |= AR9170_MAC_BCN_HT2_BW40;
- }
-
- SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
- }
-
- SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
- SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
- SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
- if (chains == AR9170_TX_PHY_TXCHAIN_2)
- ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+ ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
carl9170_async_regwrite_begin(ar);
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
- if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
- else
+ if (ht_rate)
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
+ else
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
/*
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index 2ec3e9191e4d..2282847d4bb8 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 12
-#define CARL9170FW_VERSION_MONTH 7
-#define CARL9170FW_VERSION_DAY 7
-#define CARL9170FW_VERSION_GIT "1.9.6"
+#define CARL9170FW_VERSION_MONTH 12
+#define CARL9170FW_VERSION_DAY 15
+#define CARL9170FW_VERSION_GIT "1.9.7"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index d81698015bf7..ccc4c718f124 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -195,8 +195,6 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
- u32 bandwidth = 0;
- int r;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -214,11 +212,8 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- r = freq_reg_info(wiphy,
- ch->center_freq,
- bandwidth,
- &reg_rule);
- if (r)
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(reg_rule))
continue;
/*
* If 11d had a rule for this channel ensure
@@ -254,8 +249,6 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *reg_rule;
- u32 bandwidth = 0;
- int r;
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
if (!sband)
@@ -283,16 +276,16 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
ch = &sband->channels[11]; /* CH 12 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
}
ch = &sband->channels[12]; /* CH 13 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
@@ -363,9 +356,9 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
-int ath_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct ath_regulatory *reg)
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
@@ -380,7 +373,7 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
* any pending requests in the queue.
*/
if (!request)
- return 0;
+ return;
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
@@ -416,8 +409,6 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
break;
}
-
- return 0;
}
EXPORT_SYMBOL(ath_reg_notifier_apply);
@@ -507,8 +498,8 @@ ath_get_regpair(int regdmn)
static int
ath_regd_init_wiphy(struct ath_regulatory *reg,
struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
@@ -628,8 +619,8 @@ static int __ath_regd_init(struct ath_regulatory *reg)
int
ath_regd_init(struct ath_regulatory *reg,
struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 03a8268ccf21..37f53bd8fcb1 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -252,12 +252,12 @@ enum CountryCode {
bool ath_is_world_regd(struct ath_regulatory *reg);
bool ath_is_49ghz_allowed(u16 redomain);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
enum ieee80211_band band);
-int ath_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct ath_regulatory *reg);
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg);
#endif
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000000..bac3d98a0cfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,29 @@
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ depends on CFG80211
+ depends on PCI
+ default n
+ ---help---
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ http://wireless.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ ---help---
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000000..9396dc9fe3c5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000000..9ecc1968262c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) { \
+ .band = IEEE80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wmi_notify_req_cmd cmd = {
+ .cid = 0,
+ .interval_usec = 0,
+ };
+
+ if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+ return -ENOENT;
+
+ /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ if (rc)
+ return rc;
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.mcs = wil->stats.bf_mcs;
+ sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = 12; /* TODO: provide real value */
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (flags)
+ wil->monitor_flags = *flags;
+ else
+ wil->monitor_flags = 0;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+
+ if (wil->scan_request) {
+ wil_err(wil, "Already scanning\n");
+ return -EAGAIN;
+ }
+
+ /* check we are client side */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, &wil->status)) {
+ wil_err(wil, "Scan after connect attempt not supported\n");
+ return -EBUSY;
+ }
+
+ wil->scan_request = request;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+ if (rsn_eid) {
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ rc = -ERANGE;
+ wil_err(wil, "IE too large (%td bytes)\n",
+ sme->ie_len);
+ goto out;
+ }
+ /*
+ * For secure assoc, send:
+ * (1) WMI_DELETE_CIPHER_KEY_CMD
+ * (2) WMI_SET_APPIE_CMD
+ */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ goto out;
+ }
+ /* WMI_SET_APPIE_CMD */
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc) {
+ wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ switch (bss->capability & 0x03) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ conn.network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ conn.network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ goto out;
+ }
+ if (rsn_eid) {
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ } else {
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ conn.channel = ch - 1;
+
+ memcpy(conn.bssid, bss->bssid, 6);
+ memcpy(conn.dst_mac, bss->bssid, 6);
+ /*
+ * FW don't support scan after connection attempt
+ */
+ set_bit(wil_status_dontscan, &wil->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+ if (rc == 0) {
+ /* Connect can take lots of time */
+ mod_timer(&wil->connect_timer,
+ jiffies + msecs_to_jiffies(2000));
+ }
+
+ out:
+ cfg80211_put_bss(wiphy, bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wdev->preset_chandef = *chandef;
+
+ return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_add_cipher_key(wil, key_index, mac_addr,
+ params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+ info->ssid, info->ssid_len);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_channel(wil, channel->hw_value);
+ if (rc)
+ return rc;
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* IE's */
+ /* bcon 'head IE's are not relevant for 60g band */
+ wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ bcon->beacon_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+
+ wil->secure_pcp = info->privacy;
+
+ rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil);
+
+ netif_carrier_on(ndev);
+
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ /* To stop beaconing, set BI to 0 */
+ rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+ return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+ .scan = wil_cfg80211_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ /* TODO: set real value */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ /* TODO: enable P2P when integrated with supplicant:
+ * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+ */
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* TODO: figure this out */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+ int rc = 0;
+ struct wireless_dev *wdev;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+ sizeof(struct wil6210_priv));
+ if (!wdev->wiphy) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wil_wiphy_init(wdev->wiphy);
+
+ rc = wiphy_register(wdev->wiphy);
+ if (rc < 0)
+ goto out_failed_reg;
+
+ return wdev;
+
+out_failed_reg:
+ wiphy_free(wdev->wiphy);
+out:
+ kfree(wdev);
+
+ return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644
index 000000000000..e5712f026c47
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
@@ -0,0 +1,20 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000000..65fc9683bfd8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct vring *vring)
+{
+ void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+ seq_printf(s, "VRING %s = {\n", name);
+ seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " va = 0x%p\n", vring->va);
+ seq_printf(s, " size = %d\n", vring->size);
+ seq_printf(s, " swtail = %d\n", vring->swtail);
+ seq_printf(s, " swhead = %d\n", vring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ if (x)
+ seq_printf(s, "0x%08x\n", ioread32(x));
+ else
+ seq_printf(s, "???\n");
+
+ if (vring->va && (vring->size < 1025)) {
+ uint i;
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &vring->va[i].tx;
+ if ((i % 64) == 0 && (i != 0))
+ seq_printf(s, "\n");
+ seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+ "S" : (vring->ctx[i] ? "H" : "h"));
+ }
+ seq_printf(s, "\n");
+ }
+ seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ struct vring *vring = &(wil->vring_tx[i]);
+ if (vring->va) {
+ char name[10];
+ snprintf(name, sizeof(name), "tx_%2d", i);
+ wil_print_vring(s, wil, name, vring);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+ .open = wil_vring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_printf(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ int n = 0;
+ unsigned char printbuf[16 * 3 + 2];
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ while (n < len) {
+ int l = min(len - n, 16);
+ hex_dump_to_buffer(databuf + n, l,
+ 16, 1, printbuf,
+ sizeof(printbuf),
+ false);
+ seq_printf(s, " : %s\n", printbuf);
+ n += l;
+ }
+ }
+ } else {
+ seq_printf(s, "\n");
+ }
+ }
+ out:
+ seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+ .open = wil_mbox_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, (void __iomem *)data);
+ wmb(); /* make sure write propagated to HW */
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32((void __iomem *)data);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void __iomem *value)
+{
+ return debugfs_create_file(name, mode, parent, (void * __force)value,
+ &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name,
+ struct dentry *parent, u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+ wil->csr + off);
+ wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 4);
+ wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 8);
+ wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+ wil->csr + off + 12);
+ wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 16);
+ wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+ wil->csr + off + 20);
+ wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+ wil->csr + off + 24);
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+ wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+ return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+ .open = wil_memread_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ loff_t pos = *ppos;
+ size_t available = blob->size;
+ void *buf;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+ pos, count);
+
+ ret = copy_to_user(user_buf, buf, count);
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = wil_default_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ /**
+ * BUG:
+ * this code does NOT sync device state with the rest of system
+ * use with care, debug only!!!
+ */
+ rtnl_lock();
+ dev_close(ndev);
+ ndev->flags &= ~IFF_UP;
+ rtnl_unlock();
+ wil_reset(wil);
+
+ return len;
+}
+
+static const struct file_operations fops_reset = {
+ .write = wil_write_file_reset,
+ .open = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct vring *vring = &(wil->vring_tx[0]);
+
+ if (!vring->va) {
+ seq_printf(s, "No Tx VRING\n");
+ return 0;
+ }
+
+ if (dbg_txdesc_index < vring->size) {
+ volatile struct vring_tx_desc *d =
+ &(vring->va[dbg_txdesc_index].tx);
+ volatile u32 *u = (volatile u32 *)d;
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+ seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = %p\n", skb);
+
+ if (skb) {
+ unsigned char printbuf[16 * 3 + 2];
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+
+ seq_printf(s, " len = %d\n", len);
+
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, " : %s\n", printbuf);
+ i += l;
+ }
+ }
+ seq_printf(s, "}\n");
+ } else {
+ seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
+ }
+
+ return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+ .open = wil_txdesc_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ seq_printf(s,
+ "TSF : 0x%016llx\n"
+ "TxMCS : %d\n"
+ "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+ wil->stats.tsf, wil->stats.bf_mcs,
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+ return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+ .open = wil_bf_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ if (*ppos != 0) {
+ wil_err(wil, "Unable to set SSID substring from [%d]\n",
+ (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(wdev->ssid)) {
+ wil_err(wil, "SSID too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+ if (netif_running(ndev)) {
+ wil_err(wil, "Unable to change SSID on running interface\n");
+ return -EINVAL;
+ }
+
+ wdev->ssid_len = count;
+ return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+ buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+ .read = wil_read_file_ssid,
+ .write = wil_write_file_ssid,
+ .open = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+ debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+ debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_txdesc_index);
+ debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+ debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+ debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+ &wil->secure_pcp);
+
+ wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+ HOSTADDR(RGF_USER_USER_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_TX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_RX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_MISC_ICR));
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+ debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+ debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+ wil->rgf_blob.data = (void * __force)wil->csr + 0;
+ wil->rgf_blob.size = 0xa000;
+ wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+ wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+ wil->fw_code_blob.size = 0x40000;
+ wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+ &wil->fw_code_blob);
+
+ wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+ wil->fw_data_blob.size = 0x8000;
+ wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+ &wil->fw_data_blob);
+
+ wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+ wil->fw_peri_blob.size = 0x18000;
+ wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+ &wil->fw_peri_blob);
+
+ wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+ wil->uc_code_blob.size = 0x10000;
+ wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+ &wil->uc_code_blob);
+
+ wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+ wil->uc_data_blob.size = 0x4000;
+ wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+ &wil->uc_data_blob);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000000..dc97e7b2609c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+ clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_TX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_RX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_MISC, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ set_bit(wil_status_irqen, &wil->status);
+
+ iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx(wil);
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ wil_dbg_irq(wil, "RX done\n");
+ isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+ wil_rx_handle(wil);
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx(wil);
+
+ if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+ uint i;
+ wil_dbg_irq(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ for (i = 0; i < 24; i++) {
+ u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+ if (isr & mask) {
+ isr &= ~mask;
+ wil_dbg_irq(wil, "TX done(%i)\n", i);
+ wil_tx_complete(wil, i);
+ }
+ }
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+ struct device *dev = &wil_to_ndev(wil)->dev;
+ char *envp[3] = {
+ [0] = "SOURCE=wil6210",
+ [1] = "EVENT=FW_ERROR",
+ [2] = NULL,
+ };
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_misc(wil);
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_dbg_irq(wil, "IRQ: Firmware error\n");
+ clear_bit(wil_status_fwready, &wil->status);
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ }
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_irq(wil, "IRQ: FW ready\n");
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_irq(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_irq(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ if (!test_bit(wil_status_irqen, &wil->status)) {
+ u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_rx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_tx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_misc = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+ return IRQ_NONE;
+
+ /* FIXME: IRQ mask debug */
+ if (wil6210_debug_irq_mask(wil, pseudo_cause))
+ return IRQ_NONE;
+
+ wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ /*
+ * IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+
+ rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+ WIL_NAME"_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+ WIL_NAME"_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME"_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+ /* error branch */
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ if (rc)
+ return rc;
+
+ wil6210_enable_irq(wil);
+
+ return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil6210_disable_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000000..761c389586d4
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ /* size_t is unsigned, if (count%4 != 0) it will wrap */
+ for (count += 4; count > 4; count -= 4)
+ *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ uint i;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_link_off(wil);
+ clear_bit(wil_status_fwconnected, &wil->status);
+
+ switch (wdev->sme_state) {
+ case CFG80211_SME_CONNECTED:
+ cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ break;
+ case CFG80211_SME_CONNECTING:
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+ wil_vring_fini_tx(wil, i);
+
+ clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, disconnect_worker);
+
+ _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "Connect timeout\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context
+ */
+ schedule_work(&wil->disconnect_worker);
+}
+
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->wmi_mutex);
+
+ init_completion(&wil->wmi_ready);
+
+ wil->pending_connect_cid = -1;
+ setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+ INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+ INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ spin_lock_init(&wil->wmi_ev_lock);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+ if (!wil->wmi_wq_conn) {
+ destroy_workqueue(wil->wmi_wq);
+ return -EAGAIN;
+ }
+
+ wil_cache_mbox_regs(wil);
+
+ return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ del_timer_sync(&wil->connect_timer);
+ _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+ wmi_event_flush(wil);
+ destroy_workqueue(wil->wmi_wq_conn);
+ destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "Resetting...\n");
+
+ /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+ /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+ wil->csr + HOSTADDR(a))
+
+ /* hpal_perst_from_pad_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+ /* car_perst_rst_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+ W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
+ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ msleep(2000);
+
+ W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+ msleep(2000);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(1000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_dbg_misc(wil, "FW ready after %d ms\n",
+ jiffies_to_msecs(to-left));
+ }
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+ int rc;
+
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+
+ wil6210_disable_irq(wil);
+ wil->status = 0;
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wmi_wq_conn);
+ flush_workqueue(wil->wmi_wq);
+
+ /* TODO: put MAC in reset */
+ wil_target_reset(wil);
+
+ /* init after reset */
+ wil->pending_connect_cid = -1;
+ INIT_COMPLETION(wil->wmi_ready);
+
+ wil_cache_mbox_regs(wil);
+
+ /* TODO: release MAC reset */
+ wil6210_enable_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+
+ return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+ int rc;
+ int bi;
+ u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+ wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg_misc(wil, "type: STATION\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg_misc(wil, "type: AP\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg_misc(wil, "type: P2P_CLIENT\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg_misc(wil, "type: P2P_GO\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg_misc(wil, "type: Monitor\n");
+ bi = 0;
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Apply profile in the following order: */
+ /* SSID and channel for the AP */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->ssid_len == 0) {
+ wil_err(wil, "SSID not set\n");
+ return -EINVAL;
+ }
+ wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+ if (channel)
+ wmi_set_channel(wil, channel->hw_value);
+ break;
+ default:
+ break;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* Set up beaconing if required. */
+ rc = wmi_set_bcon(wil, bi, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ wil_rx_init(wil);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ wil6210_disconnect(wil, NULL);
+ wil_rx_fini(wil);
+
+ return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000000..8ce2e33dce20
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_down(wil);
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+ int rc = 0;
+
+ wdev = wil_cfg80211_init(dev);
+ if (IS_ERR(wdev)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wdev;
+ }
+
+ wil = wdev_to_wil(wdev);
+ wil->csr = csr;
+ wil->wdev = wdev;
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_wdev;
+ }
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+ /* default monitor channel */
+ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+ ndev = alloc_netdev(0, "wlan%d", ether_setup);
+ if (!ndev) {
+ dev_err(dev, "alloc_netdev_mqs failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ wil_link_off(wil);
+
+ return wil;
+
+ out_priv:
+ wil_priv_deinit(wil);
+
+ out_wdev:
+ wil_wdev_free(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ if (!ndev)
+ return;
+
+ free_netdev(ndev);
+ wil_priv_deinit(wil);
+ wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+
+ rc = register_netdev(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ return rc;
+ }
+
+ wil_link_off(wil);
+
+ return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000000..81c35c6e3832
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+ " Use MSI interrupt: "
+ "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+
+ pci_set_master(pdev);
+
+ /*
+ * how many MSI interrupts to request?
+ */
+ switch (use_msi) {
+ case 3:
+ case 1:
+ case 0:
+ break;
+ default:
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+ use_msi);
+ use_msi = 1;
+ }
+ wil->n_msi = use_msi;
+ if (wil->n_msi) {
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ if (rc && (wil->n_msi == 3)) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ wil->n_msi = 1;
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ }
+ if (rc) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ wil->n_msi = 0;
+ }
+ } else {
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto stop_master;
+
+ /* need reset here to obtain MAC */
+ rc = wil_reset(wil);
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ void __iomem *csr;
+ int rc;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+ dev_err(&pdev->dev, "Not " WIL_NAME "? "
+ "BAR0 size is %lu while expecting %lu\n",
+ (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ return -ENODEV;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+ /* rollback to err_disable_pdev */
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ csr = pci_ioremap_bar(pdev, 0);
+ if (!csr) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+ wil = wil_if_alloc(dev, csr);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ goto err_iounmap;
+ }
+ /* rollback to if_free */
+
+ pci_set_drvdata(pdev, wil);
+ wil->pdev = pdev;
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto if_free;
+ }
+ /* rollback to bus_disable */
+
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ wil6210_debugfs_init(wil);
+
+ /* check FW is alive */
+ wmi_echo(wil);
+
+ return 0;
+
+ bus_disable:
+ wil_if_pcie_disable(wil);
+ if_free:
+ wil_if_free(wil);
+ err_iounmap:
+ pci_iounmap(pdev, csr);
+ err_release_reg:
+ pci_release_region(pdev, 0);
+ err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil6210_debugfs_remove(wil);
+ wil_if_pcie_disable(wil);
+ wil_if_remove(wil);
+ wil_if_free(wil);
+ pci_iounmap(pdev, wil->csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+ { PCI_DEVICE(0x1ae9, 0x0301) },
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000000..d1315b442375
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+ " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+ return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+ return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+ vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+ return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ int used = (vring->size + swhead - swtail) % vring->size;
+
+ return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+ /*
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+ vring->size);
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+ vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+ int tx)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ while (!wil_vring_is_empty(vring)) {
+ if (tx) {
+ volatile struct vring_tx_desc *d =
+ &vring->va[vring->swtail].tx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ }
+ vring->swtail = wil_vring_next_tail(vring);
+ } else { /* rx */
+ volatile struct vring_rx_desc *d =
+ &vring->va[vring->swtail].rx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ wil_vring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = RX_BUF_LEN;
+ volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+ dma_addr_t pa;
+
+ /* TODO align */
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = sz;
+ vring->ctx[i] = skb;
+
+ return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ volatile struct vring_rx_desc *d)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct wil6210_rtap_vendor {
+ struct wil6210_rtap rtap;
+ /* vendor */
+ u8 vendor_oui[3] __aligned(2);
+ u8 vendor_ns;
+ __le16 vendor_skip;
+ u8 vendor_data[0];
+ } __packed;
+ struct wil6210_rtap_vendor *rtap_vendor;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ int phy_length = 0; /* phy info header size, bytes */
+ static char phy_data[128];
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ if (rtap_include_phy_info) {
+ rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+ /* calculate additional length */
+ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+ /**
+ * PHY info starts from 8-byte boundary
+ * there are 8-byte lines, last line may be partially
+ * written (HW bug), thus FW configures for last line
+ * to be excessive. Driver skips this last line.
+ */
+ int len = min_t(int, 8 + sizeof(phy_data),
+ wil_rxdesc_phy_length(d));
+ if (len > 8) {
+ void *p = skb_tail_pointer(skb);
+ void *pa = PTR_ALIGN(p, 8);
+ if (skb_tailroom(skb) >= len + (pa - p)) {
+ phy_length = len - 8;
+ memcpy(phy_data, pa, phy_length);
+ }
+ }
+ }
+ rtap_len += phy_length;
+ }
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap_vendor = (void *)skb_push(skb, rtap_len);
+ memset(rtap_vendor, 0, rtap_len);
+
+ rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+ (1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+ rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap_vendor->rtap.mcs_flags = 0;
+ rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+ if (rtap_include_phy_info) {
+ rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+ /* OUI for Wilocity 04:ce:14 */
+ rtap_vendor->vendor_oui[0] = 0x04;
+ rtap_vendor->vendor_oui[1] = 0xce;
+ rtap_vendor->vendor_oui[2] = 0x14;
+ rtap_vendor->vendor_ns = 1;
+ /* Rx descriptor + PHY data */
+ rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+ phy_length);
+ memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+ memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+ phy_length);
+ }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+ *a ^= *b;
+ *b ^= *a;
+ *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+ struct ethhdr *eth = data;
+ u16 *s = (u16 *)eth->h_source;
+ u16 *d = (u16 *)eth->h_dest;
+
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+ volatile struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int sz = RX_BUF_LEN;
+ u8 ftype;
+ u8 ds_bits;
+
+ if (wil_vring_is_empty(vring))
+ return NULL;
+
+ d = &(vring->va[vring->swhead].rx);
+ if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ skb_trim(skb, d->dma.length);
+
+ wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb, d);
+
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_vring_advance_head(vring, 1);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /*
+ * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (ftype != IEEE80211_FTYPE_DATA) {
+ wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ /* TODO: process it */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ wil_err(wil, "Short frame, len = %d\n", skb->len);
+ /* TODO: process it (i.e. BAR) */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ ds_bits = wil_rxdesc_ds_bits(d);
+ if (ds_bits == 1) {
+ /*
+ * HW bug - in ToDS mode, i.e. Rx on AP side,
+ * addresses get swapped
+ */
+ wil_swap_ethaddr(skb->data);
+ }
+
+ return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_vring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (rc) {
+ wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+ iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+ return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+ unsigned int len = skb->len;
+
+ skb_orphan(skb);
+
+ if (in_interrupt())
+ rc = netif_rx(skb);
+ else
+ rc = netif_rx_ni(skb);
+
+ if (likely(rc == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ struct sk_buff *skb;
+
+ if (!v->va) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ } else {
+ skb->protocol = eth_type_trans(skb, ndev);
+ }
+
+ wil_netif_rx_any(skb, ndev);
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+ int rc;
+
+ vring->size = WIL6210_RX_RING_SIZE;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ rc = wmi_rx_chain_add(wil, vring);
+ if (rc)
+ goto err_free;
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring, 0);
+
+ return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+
+ if (vring->va)
+ wil_vring_free(wil, vring, 0);
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid)
+{
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ },
+ .ringid = id,
+ .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 16,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (!vring->va)
+ return;
+
+ wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v = &wil->vring_tx[0];
+
+ if (v->va)
+ return v;
+
+ return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+ dma_addr_t pa, u32 len)
+{
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = len;
+ d->dma.d0 = 0;
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* use dst index 0 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+ (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ volatile struct vring_tx_desc *d;
+ u32 swhead = vring->swhead;
+ int avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f;
+ int vring_index = vring - wil->vring_tx;
+ uint i = swhead;
+ dma_addr_t pa;
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+
+ if (avail < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ if (avail < 1 + nr_frags) {
+ wil_err(wil, "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
+ return -ENOMEM;
+ }
+ d = &(vring->va[i].tx);
+
+ /* FIXME FW can accept only unicast frames for the peer */
+ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+ pa = dma_map_single(dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+ skb->data, (unsigned long long)pa);
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ /* 1-st segment */
+ wil_tx_desc_map(d, pa, skb_headlen(skb));
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ /* middle segments */
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+ i = (swhead + f + 1) % vring->size;
+ d = &(vring->va[i].tx);
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+ wil_tx_desc_map(d, pa, len);
+ vring->ctx[i] = NULL;
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* advance swhead */
+ wil_vring_advance_head(vring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i] = skb_get(skb);
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ /* Note: increment @f to operate with positive index */
+ for (f++; f > 0; f--) {
+ i = (swhead + f) % vring->size;
+ d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ if (vring->ctx[i])
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+
+ return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct vring *vring;
+ int rc;
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ goto drop;
+ }
+ if (!test_bit(wil_status_fwconnected, &wil->status)) {
+ wil_err(wil, "FW not connected\n");
+ goto drop;
+ }
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ rc = wmi_tx_eapol(wil, skb);
+ } else {
+ /* find vring */
+ vring = wil_find_tx_vring(wil, skb);
+ if (!vring) {
+ wil_err(wil, "No Tx VRING available\n");
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+ }
+ switch (rc) {
+ case 0:
+ /* statistics will be updated on the tx_complete */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ return NETDEV_TX_BUSY;
+ default:
+ break; /* goto drop; */
+ }
+ drop:
+ netif_tx_stop_all_queues(ndev);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct device *dev = wil_to_dev(wil);
+ struct vring *vring = &wil->vring_tx[ringid];
+
+ if (!vring->va) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return;
+ }
+
+ wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+
+ while (!wil_vring_is_empty(vring)) {
+ volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+ dma_addr_t pa;
+ struct sk_buff *skb;
+ if (!(d->dma.status & TX_DMA_STATUS_DU))
+ break;
+
+ wil_dbg_txrx(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, d->dma.length, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swtail];
+ if (skb) {
+ if (d->dma.error == 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ }
+
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+ d->dma.addr_low = 0;
+ d->dma.addr_high = 0;
+ d->dma.length = 0;
+ d->dma.status = TX_DMA_STATUS_DU;
+ vring->swtail = wil_vring_next_tail(vring);
+ }
+ if (wil_vring_avail_tx(vring) > vring->size/4)
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000000..45a61f597c5c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN (2048)
+#define TX_BUF_LEN (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrup_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit 30 : reserved2:1
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5.. 7 : reserved0:3
+ * bit 8..13 : reserved1:6
+ * bit 14 : reserved2:1
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+struct vring_tx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : connection_id:3 :The Source index that was found during
+ * Parsing the TA. This field is used to define the source of the packet
+ * bit 7 : reserved:1
+ * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1
+ * bit 8.. 9 : ds_bits:2
+ * bit 10 : a_msdu_present:1 from qos header
+ * bit 11 : a_msdu_type:1 from qos header
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version
+ * bit 4 : fc_order:1 The FC Control (b15) -Order
+ * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8.. 9 : reserved:2
+ * bit 10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1
+ * [dword 3]
+ * [byte 12] error
+ * [byte 13] status
+ * bit 0 : du:1
+ * bit 1 : eop:1
+ * bit 2 : error:1
+ * bit 3 : mi:1
+ * bit 4 : l3_identified:1
+ * bit 5 : l4_identified:1
+ * bit 6 : phy_info_included:1
+ * bit 7 : reserved:1
+ * [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+
+struct vring_rx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ u16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+ struct vring_tx_desc tx;
+ struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000000..aea961ff8f08
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* popular locations */
+#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+ offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+struct wil6210_mbox_hdr_wmi {
+ u8 reserved0[2];
+ __le16 id;
+ __le16 info1; /* bits [0..3] - device_id, rest - unused */
+ u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+ dma_addr_t pa;
+ volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+ u16 size; /* number of vring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0,
+ wil_status_fwconnected,
+ wil_status_dontscan,
+ wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+ u64 tsf;
+ u32 snr;
+ u16 last_mcs_rx;
+ u16 bf_mcs; /* last BF, used for Tx */
+ u16 my_rx_sector;
+ u16 my_tx_sector;
+ u16 peer_rx_sector;
+ u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ int n_msi;
+ struct wireless_dev *wdev;
+ void __iomem *csr;
+ ulong status;
+ /* profile */
+ u32 monitor_flags;
+ u32 secure_pcp; /* create secure PCP? */
+ int sinfo_gen;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+ struct work_struct wmi_connect_worker;
+ struct work_struct disconnect_worker;
+ struct timer_list connect_timer;
+ int pending_connect_cid;
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ /* DMA related */
+ struct vring vring_rx;
+ struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+ u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* statistics */
+ struct wil6210_stats stats;
+ /* debugfs */
+ struct dentry *debug;
+ struct debugfs_blob_wrapper fw_code_blob;
+ struct debugfs_blob_wrapper fw_data_blob;
+ struct debugfs_blob_wrapper fw_peri_blob;
+ struct debugfs_blob_wrapper uc_code_blob;
+ struct debugfs_blob_wrapper uc_data_blob;
+ struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000000..0bb3b76b4b58
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
+ * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+ {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+ {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+ {0x880000, 0x88a000, 0x880000}, /* various RGF */
+ {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+ /*
+ * 920000..930000 ucode code RAM
+ * 930000..932000 ucode data RAM
+ */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .id = cpu_to_le16(cmdid),
+ .info1 = 0,
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+
+ if (sizeof(cmd) + len > r->entry_size) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ return -EAGAIN;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ return -EBUSY;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ r->tail = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ return -EBUSY;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ return -EINVAL;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ iowrite32(1, wil->csr + HOSTADDR(r->head) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* advance next ptr */
+ iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.head));
+
+ /* interrupt to FW */
+ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+ return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_ready_event *evt = d;
+ u32 ver = le32_to_cpu(evt->sw_version);
+
+ wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+ memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+ }
+ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+ "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ wil_dbg_wmi(wil, "WMI: FW ready\n");
+
+ set_bit(wil_status_fwready, &wil->status);
+ /* reuse wmi_ready for the firmware ready indication */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int ch_no = data->info.channel+1;
+ u32 freq = ieee80211_channel_to_frequency(ch_no,
+ IEEE80211_BAND_60GHZ);
+ struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+ /* TODO convert LE to CPU */
+ s32 signal = 0; /* TODO */
+ __le16 fc = rx_mgmt_frame->frame_control;
+ u32 d_len = le32_to_cpu(data->info.len);
+ u16 d_status = le16_to_cpu(data->info.status);
+
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
+ data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+ le16_to_cpu(data->info.stype));
+ wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
+ bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+ tsf, cap, bi, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss() failed\n");
+ }
+ }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ if (wil->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ bool aborted = (data->status != 0);
+
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ cfg80211_scan_done(wil->scan_request, aborted);
+ wil->scan_request = NULL;
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ ch = evt->channel + 1;
+ wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
+ wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ wil_err(wil, "Not in connecting state\n");
+ return;
+ }
+ del_timer_sync(&wil->connect_timer);
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo.assoc_req_ies = assoc_req_ie;
+ sinfo.assoc_req_ies_len = assoc_req_ielen;
+ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ }
+ set_bit(wil_status_fwconnected, &wil->status);
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+ wil->pending_connect_cid = evt->cid;
+ queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_disconnect_event *evt = d;
+
+ wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
+ evt->bssid,
+ evt->protocol_reason_status, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ wil6210_disconnect(wil, evt->bssid);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_notify_req_done_event *evt = d;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Short NOTIFY event\n");
+ return;
+ }
+
+ wil->stats.tsf = le64_to_cpu(evt->tsf);
+ wil->stats.snr = le32_to_cpu(evt->snr_val);
+ wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+ wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+ wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+ wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+ wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+ wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
+ "BF status 0x%08x SNR 0x%08x\n"
+ "Tx Tpt %d goodput %d Rx goodput %d\n"
+ "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+ wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+ wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+
+ wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
+ evt->src_mac);
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+ eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_priv *wil, int eventid,
+ void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+
+ for (;;) {
+ u16 len;
+
+ r->head = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ return;
+
+ /* read cmd from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ return;
+ }
+
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ return;
+ }
+
+ len = le16_to_cpu(hdr.len);
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt)
+ return;
+
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* indicate */
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ wil_dbg_wmi(wil, "WMI event 0x%04x\n",
+ evt->event.wmi.id);
+ }
+ wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ iowrite32(r->tail, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ {
+ int q = queue_work(wil->wmi_wq,
+ &wil->wmi_event_worker);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ }
+ }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+ int rc;
+ int remain;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ rc = __wmi_send(wil, cmdid, buf, len);
+ if (rc)
+ goto out;
+
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ remain = wait_for_completion_timeout(&wil->wmi_ready,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_wmi(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+ wil->reply_id = 0;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ out:
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+ struct wmi_set_mac_address_cmd cmd;
+
+ memcpy(cmd.mac, addr, ETH_ALEN);
+
+ wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+ struct wmi_bcon_ctrl_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ };
+
+ if (!wil->secure_pcp)
+ cmd.disable_sec = 1;
+
+ return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+ &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wmi_eapol_tx_cmd *cmd;
+ struct ethhdr *eth;
+ u16 eapol_len = skb->len - ETH_HLEN;
+ void *eapol = skb->data + ETH_HLEN;
+ uint i;
+ int rc;
+
+ skb_set_mac_header(skb, 0);
+ eth = eth_hdr(skb);
+ wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+ goto found_dest;
+ }
+
+ return -EINVAL;
+
+ found_dest:
+ /* find out eapol data & len */
+ cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+ if (!cmd)
+ return -EINVAL;
+
+ memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+ cmd->eapol_len = cpu_to_le16(eapol_len);
+ memcpy(cmd->eapol, eapol, eapol_len);
+ rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr)
+{
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key)
+{
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_len = key_len,
+ };
+
+ if (!key || (key_len > sizeof(cmd.key)))
+ return -EINVAL;
+
+ memcpy(cmd.key, key, key_len);
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ .ring_mem_base = cpu_to_le64(vring->pa),
+ .ring_size = cpu_to_le16(vring->size),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+ int rc;
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ }
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ return rc;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_wmi(wil, "%s()\n", __func__);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(wil, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->id);
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ } else {
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ }
+ wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
+ complete(&wil->wmi_ready);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_err(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_connect_worker);
+
+ if (wil->pending_connect_cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d\n",
+ wil->pending_connect_cid);
+
+ rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+ wil->pending_connect_cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0)
+ wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000000..3bbf87572b07
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_START_SCAN_CMDID = 0x0007,
+ WMI_SET_BSS_FILTER_CMDID = 0x0009,
+ WMI_SET_PROBED_SSID_CMDID = 0x000a,
+ WMI_SET_LISTEN_INT_CMDID = 0x000b,
+ WMI_BCON_CTRL_CMDID = 0x000f,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
+ WMI_SET_APPIE_CMDID = 0x003f,
+ WMI_GET_APPIE_CMDID = 0x0040,
+ WMI_SET_WSC_STATUS_CMDID = 0x0041,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
+ WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300,
+ WMI_MEM_READ_CMDID = 0x0800,
+ WMI_MEM_WR_CMDID = 0x0801,
+ WMI_ECHO_CMDID = 0x0803,
+ WMI_DEEP_ECHO_CMDID = 0x0804,
+ WMI_CONFIG_MAC_CMDID = 0x0805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
+ WMI_ADD_STATION_CMDID = 0x0807,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
+ WMI_FS_TUNE_CMDID = 0x080a,
+ WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_TEMP_SENSE_CMDID = 0x080e,
+ WMI_DC_CALIB_CMDID = 0x080f,
+ WMI_SEND_TONE_CMDID = 0x0810,
+ WMI_IQ_TX_CALIB_CMDID = 0x0811,
+ WMI_IQ_RX_CALIB_CMDID = 0x0812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x0813,
+ WMI_SET_WORK_MODE_CMDID = 0x0815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
+ WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
+ WMI_MARLON_R_READ_CMDID = 0x0818,
+ WMI_MARLON_R_WRITE_CMDID = 0x0819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_CFG_RX_CHAIN_CMDID = 0x0820,
+ WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_RX_ON_CMDID = 0x0822,
+ WMI_VRING_BA_EN_CMDID = 0x0823,
+ WMI_VRING_BA_DIS_CMDID = 0x0824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
+ WMI_RCP_DELBA_CMDID = 0x0826,
+ WMI_SET_SSID_CMDID = 0x0827,
+ WMI_GET_SSID_CMDID = 0x0828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
+ WMI_SW_TX_REQ_CMDID = 0x082b,
+ WMI_RX_OFF_CMDID = 0x082c,
+ WMI_READ_MAC_RXQ_CMDID = 0x0830,
+ WMI_READ_MAC_TXQ_CMDID = 0x0831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
+ WMI_MLME_PUSH_CMDID = 0x0835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x0837,
+ WMI_BF_SM_MGMT_CMDID = 0x0838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_SET_SECTORS_CMDID = 0x0849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x0851,
+ WMI_RS_MGMT_CMDID = 0x0852,
+ WMI_RF_MGMT_CMDID = 0x0853,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x0862,
+ WMI_NOTIFY_REQ_CMDID = 0x0863,
+ WMI_GET_STATUS_CMDID = 0x0864,
+ WMI_UNIT_TEST_CMDID = 0x0900,
+ WMI_HICCUP_CMDID = 0x0901,
+ WMI_FLASH_READ_CMDID = 0x0902,
+ WMI_FLASH_WRITE_CMDID = 0x0903,
+ WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+
+ WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
+ WMI_ABORT_SCAN_CMDID = 0xf007,
+ WMI_SET_PMK_CMDID = 0xf028,
+
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
+ WMI_GET_PMK_CMDID = 0xf048,
+ WMI_SET_PASSPHRASE_CMDID = 0xf049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
+ WMI_EAPOL_TX_CMDID = 0xf04c,
+ WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
+ WMI_FW_VER_CMDID = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_WEP = 0x02,
+ WMI_CRYPT_TKIP = 0x04,
+ WMI_CRYPT_AES = 0x08,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
+ WMI_CONNECT_SEND_REASSOC = 0x0002,
+ WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+ u8 channel; /* hint */
+ u8 reserved;
+ u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX (0)
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+#define WMI_PMK_LEN (32)
+
+struct wmi_set_pmk_cmd {
+ u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0,
+ WMI_KEY_USE_GROUP = 1,
+ WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ u8 key_usage; /* enum wmi_key_usage */
+ u8 key_len;
+ u8 key_rsc[8]; /* key replay sequence counter */
+ u8 key[WMI_MAX_KEY_LEN];
+ u8 key_op_ctrl; /* Additional Key Control information */
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ u8 reserved[8];
+ __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+ __le32 force_scan_interval; /* Time interval between scans (ms)*/
+ u8 scan_type; /* wmi_scan_type */
+ u8 num_channels; /* how many channels follow */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[0]; /* channels ID's */
+ /* 0 - 58320 MHz */
+ /* 1 - 60480 MHz */
+ /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX (15)
+
+enum wmi_ssid_flag {
+ WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
+ WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
+ WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+ u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 flag; /* enum wmi_ssid_flag */
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 reserved;
+ __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0,
+ WMI_RF_MGMT_W_ENABLE = 1,
+ WMI_RF_MGMT_GET_STATUS = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 reserved;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0,
+ WMI_VRING_DS_STATION = 1,
+ WMI_VRING_DS_AP = 2,
+ WMI_VRING_DS_ADDR4 = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0,
+ WMI_SCH_PRIO_HIGH = 1,
+};
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+ u8 mac_ctrl;
+
+ #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0,
+ WMI_VRING_CMD_MODIFY = 1,
+ WMI_VRING_CMD_DELETE = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+ u8 ringid;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+ u8 ringid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 reserved[3];
+ __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0,
+ WMI_SNIFFER_ON = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0,
+ WMI_SNIFFER_DP = 1,
+ WMI_SNIFFER_BOTH_PHYS = 2,
+};
+
+struct wmi_sniffer_cfg {
+ __le32 mode; /* enum wmi_sniffer_cfg_mode */
+ __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0,
+ WMI_RX_CHAIN_DEL = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ u8 l2_802_3_offload_ctrl;
+
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+ u8 l2_nwifi_offload_ctrl;
+
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+ u8 l3_l4_ctrl;
+
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+ u8 ring_ctrl;
+
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reserved[2];
+ struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 status_code;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_IMM_RSP_EVENTID = 0x0000,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100a,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
+ WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
+ WMI_ADD_STATION_DONE_EVENTID = 0x1807,
+ WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
+ WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180a,
+ WMI_CORR_MEASURE_DONE_EVENTID = 0x180b,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
+
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_RX_ON_DONE_EVENTID = 0x1822,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
+ WMI_RX_OFF_DONE_EVENTID = 0x182c,
+
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+
+ WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0,
+ WMI_RF_DISABLED_HW = 1,
+ WMI_RF_DISABLED_SW = 2,
+ WMI_RF_DISABLED_HW_SW = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+ u8 major;
+ u8 minor;
+ __le16 subminor;
+ __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ WMI_11AD_CAPABILITY = 7,
+ WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ u8 phy_capability; /* enum wmi_phy_capability */
+ u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+ __le32 status;
+ __le64 tsf;
+ __le32 snr_val;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 reserved2[3];
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
+ WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
+ WMI_DIS_REASON_DISCONNECT_CMD = 3,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 4,
+ WMI_DIS_REASON_AUTH_FAILED = 5,
+ WMI_DIS_REASON_ASSOC_FAILED = 6,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 8,
+ WMI_DIS_REASON_INVALID_PROFILE = 10,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 12,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 13,
+ WMI_DIS_REASON_IBSS_MERGE = 14,
+};
+
+struct wmi_disconnect_event {
+ __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
+ u8 bssid[WMI_MAC_LEN]; /* set if known */
+ u8 disconnect_reason; /* see wmi_disconnect_reason_e */
+ u8 assoc_resp_len;
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0,
+ WMI_BA_NON_AGREED = 1,
+};
+
+struct wmi_vring_ba_status_event {
+ __le16 status;
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 from_initiator;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+ WMI_VRING_CFG_SUCCESS = 0,
+ WMI_VRING_CFG_FAILURE = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+ WMI_ADDBA_SUCCESS = 0,
+ WMI_ADDBA_FAIL = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ __le16 ba_timeout;
+ __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0,
+ WMI_WBE_REASON_RX_DISASSOC = 1,
+ WMI_WBE_REASON_BAD_PHY_LINK = 2,
+};
+
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
+ WMI_TX_SW_STATUS_FAILED_TX = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+ u8 status; /* enum wmi_sw_tx_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 snr;
+ __le16 range;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ u8 qid;
+ u8 mid;
+ u8 cid;
+ u8 channel; /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+ __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ded03d226a71..b42930f457c2 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -79,10 +79,9 @@ static int atmel_probe(struct pcmcia_device *p_dev)
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- printk(KERN_ERR "atmel_cs: no memory for new device\n");
+ if (!local)
return -ENOMEM;
- }
+
p_dev->priv = local;
return atmel_config(p_dev);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 7a28d21ac389..287c6b670a36 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,8 +78,8 @@ config B43_PCMCIA
If unsure, say N.
config B43_SDIO
- bool "Broadcom 43xx SDIO device support (EXPERIMENTAL)"
- depends on B43 && SSB_SDIOHOST_POSSIBLE && EXPERIMENTAL
+ bool "Broadcom 43xx SDIO device support"
+ depends on B43 && SSB_SDIOHOST_POSSIBLE
select SSB_SDIOHOST
---help---
Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -109,8 +109,8 @@ config B43_PIO
default y
config B43_PHY_N
- bool "Support for 802.11n (N-PHY) devices (EXPERIMENTAL)"
- depends on B43 && EXPERIMENTAL
+ bool "Support for 802.11n (N-PHY) devices"
+ depends on B43
---help---
Support for the N-PHY.
@@ -130,8 +130,8 @@ config B43_PHY_LP
(802.11a support is optional, and currently disabled).
config B43_PHY_HT
- bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
- depends on B43 && EXPERIMENTAL
+ bool "Support for HT-PHY (high throughput) devices"
+ depends on B43
---help---
Support for the HT-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b298e5d68be2..10e288d470e7 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -7,6 +7,7 @@
#include <linux/hw_random.h>
#include <linux/bcma/bcma.h>
#include <linux/ssb/ssb.h>
+#include <linux/completion.h>
#include <net/mac80211.h>
#include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
+ /* a pointer to the firmware object */
+ const struct firmware *blob;
/* The type of firmware to request. */
enum b43_firmware_file_type req_type;
/* Error messages for each firmware type. */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 315b96ed1d90..9fdd1983079c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
-#define B43_RXRING_SLOTS 64
+#define B43_RXRING_SLOTS 256
#define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
#define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 16ab280359bd..806e34c19281 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
b43warn(wl, text);
}
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct b43_request_fw_context *ctx = context;
+
+ ctx->blob = firmware;
+ complete(&ctx->fw_load_complete);
+}
+
int b43_do_request_fw(struct b43_request_fw_context *ctx,
const char *name,
- struct b43_firmware_file *fw)
+ struct b43_firmware_file *fw, bool async)
{
- const struct firmware *blob;
struct b43_fw_header *hdr;
u32 size;
int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
B43_WARN_ON(1);
return -ENOSYS;
}
- err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+ if (async) {
+ /* do this part asynchronously */
+ init_completion(&ctx->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+ ctx->dev->dev->dev, GFP_KERNEL,
+ ctx, b43_fw_cb);
+ if (err < 0) {
+ pr_err("Unable to load firmware\n");
+ return err;
+ }
+ /* stall here until fw ready */
+ wait_for_completion(&ctx->fw_load_complete);
+ if (ctx->blob)
+ goto fw_ready;
+ /* On some ARM systems, the async request will fail, but the next sync
+ * request works. For this reason, we dall through here
+ */
+ }
+ err = request_firmware(&ctx->blob, ctx->fwname,
+ ctx->dev->dev->dev);
if (err == -ENOENT) {
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
- "Firmware file \"%s\" not found\n", ctx->fwname);
+ "Firmware file \"%s\" not found\n",
+ ctx->fwname);
return err;
} else if (err) {
snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
ctx->fwname, err);
return err;
}
- if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+ if (ctx->blob->size < sizeof(struct b43_fw_header))
goto err_format;
- hdr = (struct b43_fw_header *)(blob->data);
+ hdr = (struct b43_fw_header *)(ctx->blob->data);
switch (hdr->type) {
case B43_FW_TYPE_UCODE:
case B43_FW_TYPE_PCM:
size = be32_to_cpu(hdr->size);
- if (size != blob->size - sizeof(struct b43_fw_header))
+ if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
/* fallthrough */
case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
goto err_format;
}
- fw->data = blob;
+ fw->data = ctx->blob;
fw->filename = name;
fw->type = ctx->req_type;
@@ -2172,7 +2200,7 @@ err_format:
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
"Firmware file \"%s\" format error.\n", ctx->fwname);
- release_firmware(blob);
+ release_firmware(ctx->blob);
return -EPROTO;
}
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_ucode;
}
}
- err = b43_do_request_fw(ctx, filename, &fw->ucode);
+ err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
if (err)
goto err_load;
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_pcm;
fw->pcm_request_failed = false;
- err = b43_do_request_fw(ctx, filename, &fw->pcm);
+ err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
if (err)
goto err_load;
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
if (err)
goto err_load;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 8c684cd33529..abac25ee958d 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
- const char *name,
- struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+ struct b43_firmware_file *fw, bool async);
void b43_do_release_fw(struct b43_firmware_file *fw);
#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 97d4e27bf36f..aaca60c6f575 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3226,8 +3226,6 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
- u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
- dev->dev->bus_sprom->fem.ghz2.tr_iso;
if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
@@ -3249,6 +3247,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
!b43_channel_type_is_40mhz(dev->phy.channel_type))
e->cliplo_gain = 0x2d;
} else if (!ghz5 && dev->phy.rev >= 5) {
+ static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a,
+ 0x106c, 0x1074, 0x107c, 0x207c};
+ u8 tr_iso = dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
if (ext_lna) {
e->rfseq_init[0] &= ~0x4000;
e->rfseq_init[1] &= ~0x4000;
@@ -3256,26 +3258,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
e->rfseq_init[3] &= ~0x4000;
e->init_gain &= ~0x4000;
}
- switch (tr_iso) {
- case 0:
- e->cliplo_gain = 0x0062;
- case 1:
- e->cliplo_gain = 0x0064;
- case 2:
- e->cliplo_gain = 0x006a;
- case 3:
- e->cliplo_gain = 0x106a;
- case 4:
- e->cliplo_gain = 0x106c;
- case 5:
- e->cliplo_gain = 0x1074;
- case 6:
- e->cliplo_gain = 0x107c;
- case 7:
- e->cliplo_gain = 0x207c;
- default:
- e->cliplo_gain = 0x106a;
- }
+ if (tr_iso > 7)
+ tr_iso = 3;
+ e->cliplo_gain = gain_data[tr_iso];
+
} else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
e->rfseq_init[0] &= ~0x4000;
e->rfseq_init[1] &= ~0x4000;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1a6661a9f008..756e19fc2795 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,6 +26,7 @@ brcmfmac-objs += \
wl_cfg80211.o \
fwil.o \
fweh.o \
+ p2p.o \
dhd_cdc.o \
dhd_common.o \
dhd_linux.o
@@ -37,4 +38,4 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
- dhd_dbg.o \ No newline at end of file
+ dhd_dbg.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index be35a2f99b1c..11fd1c735589 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -15,8 +15,6 @@
*/
/* ****************** SDIO CARD Interface Functions **************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index d33e5598611b..d92d373733d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/sdio.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index fd672bf53867..ef6f23be6d32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -39,6 +39,7 @@
#define BRCMF_C_GET_BSSID 23
#define BRCMF_C_GET_SSID 25
#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_TERMINATED 28
#define BRCMF_C_GET_CHANNEL 29
#define BRCMF_C_SET_CHANNEL 30
#define BRCMF_C_GET_SRL 31
@@ -71,6 +72,7 @@
#define BRCMF_C_SET_WSEC 134
#define BRCMF_C_GET_PHY_NOISE 135
#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_SET_SCB_TIMEOUT 158
#define BRCMF_C_GET_PHYLIST 180
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
@@ -148,6 +150,7 @@
#define BRCMF_E_REASON_MINTXRATE 9
#define BRCMF_E_REASON_TXFAIL 10
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
#define BRCMF_E_REASON_DIRECTED_ROAM 6
#define BRCMF_E_REASON_TSPEC_REJECTED 7
@@ -374,6 +377,28 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+ u8 scan_type; /* 0 use default, active or passive scan */
+ __le32 nprobes; /* -1 use default, nr of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the home
+ * channel between channel scans
+ */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+ struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
+ struct brcmf_join_scan_params_le scan_le;
+ struct brcmf_assoc_params_le assoc_le;
+};
+
struct brcmf_wsec_key {
u32 index; /* key index */
u32 len; /* key length */
@@ -450,6 +475,19 @@ struct brcmf_sta_info_le {
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
};
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+ __be16 version;
+ __be16 chanspec;
+ __be32 rssi;
+ __be32 mactime;
+ __be32 rate;
+};
+
/* Bus independent dongle command */
struct brcmf_dcmd {
uint cmd; /* common dongle cmd definition */
@@ -480,50 +518,20 @@ struct brcmf_pub {
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- /* Additional stats for the bus level */
-
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
- /* Packets flushed due to unscheduled sendup thread */
- unsigned long rx_flushed;
- /* Number of times dpc scheduled by watchdog timer */
- unsigned long wd_dpc_sched;
-
- /* Number of flow control pkts recvd */
- unsigned long fc_packets;
-
- /* Last error return */
- int bcmerror;
-
- /* Last error from dongle */
- int dongle_error;
-
- /* Suspend disable flag flag */
- int suspend_disable_flag; /* "1" to disable all extra powersaving
- during suspend */
- int in_suspend; /* flag set to 1 when early suspend called */
- int dtim_skip; /* dtim skip , default 0 means wake each dtim */
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
- wait_queue_head_t pend_8021x_wait;
-
struct brcmf_fweh_info fweh;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
};
-struct bcmevent_name {
- uint event;
- const char *name;
-};
-
struct brcmf_if_event {
u8 ifidx;
u8 action;
@@ -541,9 +549,11 @@ struct brcmf_cfg80211_vif;
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @idx: interface index in device firmware.
+ * @ifidx: interface index in device firmware.
* @bssidx: index of bss associated with this interface.
* @mac_addr: assigned mac address.
+ * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
+ * @pend_8021x_wait: used for signalling change in count.
*/
struct brcmf_if {
struct brcmf_pub *drvr;
@@ -552,18 +562,13 @@ struct brcmf_if {
struct net_device_stats stats;
struct work_struct setmacaddr_work;
struct work_struct multicast_work;
- int idx;
+ int ifidx;
s32 bssidx;
u8 mac_addr[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
+ wait_queue_head_t pend_8021x_wait;
};
-static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- return ifp->bssidx;
-}
-
-extern const struct bcmevent_name bcmevent_names[];
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
@@ -576,9 +581,14 @@ extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
void *buf, uint len);
-extern int brcmf_net_attach(struct brcmf_if *ifp);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
- s32 bssidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
+ struct sk_buff *rxp);
+
+extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
+ s32 ifidx, char *name, u8 *mac_addr);
+extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index dd38b78a9726..ad25c3408b59 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -24,18 +24,6 @@ enum brcmf_bus_state {
BRCMF_BUS_DATA /* Ready for frame transfers */
};
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -72,11 +60,12 @@ struct brcmf_bus_ops {
* @drvr: public driver information.
* @state: operational state of the bus interface.
* @maxctl: maximum size for rxctl request message.
- * @drvr_up: indicates driver up/down status.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
* @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
+ * @chip: device identifier of the dongle chip.
+ * @chiprev: revision of the dongle chip.
*/
struct brcmf_bus {
union {
@@ -87,10 +76,10 @@ struct brcmf_bus {
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
uint maxctl;
- bool drvr_up;
unsigned long tx_realloc;
- struct dngl_stats dstats;
u8 align;
+ u32 chip;
+ u32 chiprev;
struct list_head dcmd_list;
struct brcmf_bus_ops *ops;
@@ -130,31 +119,18 @@ int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
* interface functions from common layer
*/
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
- struct sk_buff *rxp);
-
extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct device *dev, u8 ifidx,
- struct sk_buff_head *rxlist);
-static inline void brcmf_rx_packet(struct device *dev, int ifidx,
- struct sk_buff *pkt)
-{
- struct sk_buff_head q;
-
- skb_queue_head_init(&q);
- skb_queue_tail(&q, pkt);
- brcmf_rx_frame(dev, ifidx, &q);
-}
+extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
/* Indication from bus module regarding presence/insertion of dongle. */
extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
extern void brcmf_detach(struct device *dev);
-
+/* Indication from bus module that dongle should be reset */
+extern void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
extern void brcmf_txflowblock(struct device *dev, bool state);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index 83923553f1ac..a2354d951dd7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -19,8 +19,6 @@
* For certain dcmd codes, the dongle interprets string data from the host.
******************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
@@ -94,8 +92,6 @@ struct brcmf_proto_bdc_header {
struct brcmf_proto {
u16 reqid;
- u8 pending;
- u32 lastcmd;
u8 bus_header[BUS_HEADER_LEN];
struct brcmf_proto_cdc_dcmd msg;
unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
@@ -107,7 +103,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
int len = le32_to_cpu(prot->msg.len) +
sizeof(struct brcmf_proto_cdc_dcmd);
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* NOTE : cdc->msg.len holds the desired length of the buffer to be
* returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
@@ -125,7 +121,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
int ret;
struct brcmf_proto *prot = drvr->prot;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
len += sizeof(struct brcmf_proto_cdc_dcmd);
do {
ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
@@ -147,20 +143,7 @@ brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
int ret = 0, retries = 0;
u32 id, flags;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
-
- /* Respond "bcmerror" and "bcmerrorstr" with local cache */
- if (cmd == BRCMF_C_GET_VAR && buf) {
- if (!strcmp((char *)buf, "bcmerrorstr")) {
- strncpy((char *)buf, "bcm_error",
- BCME_STRLEN);
- goto done;
- } else if (!strcmp((char *)buf, "bcmerror")) {
- *(int *)buf = drvr->dongle_error;
- goto done;
- }
- }
+ brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
@@ -210,11 +193,8 @@ retry:
}
/* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR) {
+ if (flags & CDC_DCMD_ERROR)
ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
done:
return ret;
@@ -228,8 +208,7 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
int ret = 0;
u32 flags, id;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
+ brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
@@ -262,11 +241,8 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
}
/* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR) {
+ if (flags & CDC_DCMD_ERROR)
ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
done:
return ret;
@@ -287,7 +263,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
{
struct brcmf_proto_bdc_header *h;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* Push BDC header used to convey priority for buses that don't */
@@ -305,14 +281,12 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* Pop BDC header used to convey priority for buses that don't */
@@ -329,6 +303,14 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
return -EBADE;
}
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (*ifidx)
+ (*ifidx)++;
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
BDC_PROTO_VER) {
@@ -338,7 +320,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
}
if (h->flags & BDC_FLAG_SUM_GOOD) {
- brcmf_dbg(INFO, "%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
brcmf_ifname(drvr, *ifidx), h->flags);
pkt_set_sum_good(pktbuf, true);
}
@@ -348,6 +330,8 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
skb_pull(pktbuf, BDC_HEADER_LEN);
skb_pull(pktbuf, h->data_offset << 2);
+ if (pktbuf->len == 0)
+ return -ENODATA;
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index f8b52e5b941a..4544342a0428 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index f2ab01cd7966..bc013cbe06f6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -18,21 +18,26 @@
#define _BRCMF_DBG_H_
/* message levels */
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0200
-#define BRCMF_EVENT_VAL 0x0400
-#define BRCMF_BTA_VAL 0x0800
-#define BRCMF_FIL_VAL 0x1000
-#define BRCMF_USB_VAL 0x2000
-#define BRCMF_SCAN_VAL 0x4000
-#define BRCMF_CONN_VAL 0x8000
+#define BRCMF_TRACE_VAL 0x00000002
+#define BRCMF_INFO_VAL 0x00000004
+#define BRCMF_DATA_VAL 0x00000008
+#define BRCMF_CTL_VAL 0x00000010
+#define BRCMF_TIMER_VAL 0x00000020
+#define BRCMF_HDRS_VAL 0x00000040
+#define BRCMF_BYTES_VAL 0x00000080
+#define BRCMF_INTR_VAL 0x00000100
+#define BRCMF_GLOM_VAL 0x00000200
+#define BRCMF_EVENT_VAL 0x00000400
+#define BRCMF_BTA_VAL 0x00000800
+#define BRCMF_FIL_VAL 0x00001000
+#define BRCMF_USB_VAL 0x00002000
+#define BRCMF_SCAN_VAL 0x00004000
+#define BRCMF_CONN_VAL 0x00008000
+#define BRCMF_CDC_VAL 0x00010000
+
+/* set default print format */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Macro for error messages. net_ratelimit() is used when driver
* debugging is not selected. When debugging the driver error
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 74a616b4de8e..c06cea88df0d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -28,6 +26,8 @@
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -42,6 +42,12 @@ MODULE_LICENSE("Dual BSD/GPL");
int brcmf_msg_level;
module_param(brcmf_msg_level, int, 0);
+/* P2P0 enable */
+static int brcmf_p2p_enable;
+#ifdef CONFIG_BRCMDBG
+module_param_named(p2pon, brcmf_p2p_enable, int, 0);
+MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
+#endif
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
@@ -72,9 +78,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
u32 buflen;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, multicast_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
ndev = ifp->ndev;
/* Determine initial value of allmulti flag */
@@ -131,9 +138,10 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_if *ifp;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
ETH_ALEN);
if (err < 0) {
@@ -162,28 +170,31 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
schedule_work(&ifp->multicast_work);
}
-static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
+ struct ethhdr *eh;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
- /* Reject if down */
- if (!drvr->bus_if->drvr_up ||
- (drvr->bus_if->state != BRCMF_BUS_DATA)) {
- brcmf_err("xmit rejected drvup=%d state=%d\n",
- drvr->bus_if->drvr_up,
- drvr->bus_if->state);
+ /* Can the device send data? */
+ if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+ brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
netif_stop_queue(ndev);
- return -ENODEV;
+ dev_kfree_skb(skb);
+ ret = -ENODEV;
+ goto done;
}
- if (!drvr->iflist[ifp->idx]) {
- brcmf_err("bad ifidx %d\n", ifp->idx);
+ if (!drvr->iflist[ifp->bssidx]) {
+ brcmf_err("bad ifidx %d\n", ifp->bssidx);
netif_stop_queue(ndev);
- return -ENODEV;
+ dev_kfree_skb(skb);
+ ret = -ENODEV;
+ goto done;
}
/* Make sure there's enough room for any header */
@@ -191,44 +202,49 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
drvr->bus_if->tx_realloc++;
skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
ret = -ENOMEM;
goto done;
}
}
- /* Update multicast statistic */
- if (skb->len >= ETH_ALEN) {
- u8 *pktdata = (u8 *)(skb->data);
- struct ethhdr *eh = (struct ethhdr *)pktdata;
-
- if (is_multicast_ether_addr(eh->h_dest))
- drvr->tx_multicast++;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr->pend_8021x_cnt);
+ /* validate length for ether packet */
+ if (skb->len < sizeof(*eh)) {
+ ret = -EINVAL;
+ dev_kfree_skb(skb);
+ goto done;
}
+ /* handle ethernet header */
+ eh = (struct ethhdr *)(skb->data);
+ if (is_multicast_ether_addr(eh->h_dest))
+ drvr->tx_multicast++;
+ if (ntohs(eh->h_proto) == ETH_P_PAE)
+ atomic_inc(&ifp->pend_8021x_cnt);
+
/* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+ brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
/* Use bus module to send data frame */
ret = brcmf_bus_txdata(drvr->bus_if, skb);
done:
- if (ret)
- drvr->bus_if->dstats.tx_dropped++;
- else
- drvr->bus_if->dstats.tx_packets++;
+ if (ret) {
+ ifp->stats.tx_dropped++;
+ } else {
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += skb->len;
+ }
/* Return ok: we always eat the packet */
- return 0;
+ return NETDEV_TX_OK;
}
void brcmf_txflowblock(struct device *dev, bool state)
@@ -250,8 +266,7 @@ void brcmf_txflowblock(struct device *dev, bool state)
}
}
-void brcmf_rx_frame(struct device *dev, u8 ifidx,
- struct sk_buff_head *skb_list)
+void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
{
unsigned char *eth;
uint len;
@@ -259,12 +274,25 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ u8 ifidx;
+ int ret;
brcmf_dbg(TRACE, "Enter\n");
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
+
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the
@@ -280,21 +308,11 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
eth = skb->data;
len = skb->len;
- ifp = drvr->iflist[ifidx];
- if (ifp == NULL)
- ifp = drvr->iflist[0];
-
- if (!ifp || !ifp->ndev ||
- ifp->ndev->reg_state != NETREG_REGISTERED) {
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
-
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- bus_if->dstats.multicast++;
+ ifp->stats.multicast++;
skb->data = eth;
skb->len = len;
@@ -310,8 +328,13 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
ifp->ndev->last_rx = jiffies;
}
- bus_if->dstats.rx_bytes += skb->len;
- bus_if->dstats.rx_packets++; /* Local count */
+ if (!(ifp->ndev->flags & IFF_UP)) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
@@ -328,41 +351,36 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
- uint ifidx;
+ u8 ifidx;
struct ethhdr *eh;
u16 type;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp;
+
+ brcmf_proto_hdrpull(drvr, &ifidx, txp);
- brcmf_proto_hdrpull(dev, &ifidx, txp);
+ ifp = drvr->iflist[ifidx];
+ if (!ifp)
+ return;
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE) {
- atomic_dec(&drvr->pend_8021x_cnt);
- if (waitqueue_active(&drvr->pend_8021x_wait))
- wake_up(&drvr->pend_8021x_wait);
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
}
+ if (!success)
+ ifp->stats.tx_errors++;
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_bus *bus_if = ifp->drvr->bus_if;
- brcmf_dbg(TRACE, "Enter\n");
-
- /* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = bus_if->dstats.rx_packets;
- ifp->stats.tx_packets = bus_if->dstats.tx_packets;
- ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
- ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
- ifp->stats.rx_errors = bus_if->dstats.rx_errors;
- ifp->stats.tx_errors = bus_if->dstats.tx_errors;
- ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
- ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
- ifp->stats.multicast = bus_if->dstats.multicast;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
return &ifp->stats;
}
@@ -395,9 +413,11 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr->drv_version);
- sprintf(info->bus_info, "%s", dev_name(drvr->bus_if->dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ snprintf(info->version, sizeof(info->version), "%lu",
+ drvr->drv_version);
+ strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+ sizeof(info->bus_info));
}
static const struct ethtool_ops brcmf_ethtool_ops = {
@@ -414,7 +434,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
u32 toe_cmpnt, csum_dir;
int ret;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* all ethtool calls start with a cmd word */
if (copy_from_user(&cmd, uaddr, sizeof(u32)))
@@ -437,20 +457,14 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
sprintf(info.driver, "dhd");
strcpy(info.version, BRCMF_VERSION_STR);
}
-
- /* otherwise, require dongle to be up */
- else if (!drvr->bus_if->drvr_up) {
- brcmf_err("dongle is not up\n");
- return -ENODEV;
- }
- /* finally, report dongle driver type */
+ /* report dongle driver type */
else
sprintf(info.driver, "wl");
sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
- brcmf_dbg(CTL, "given %*s, returning %s\n",
+ brcmf_dbg(TRACE, "given %*s, returning %s\n",
(int)sizeof(drvname), drvname, info.driver);
break;
@@ -517,9 +531,9 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
+ brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
- if (!drvr->iflist[ifp->idx])
+ if (!drvr->iflist[ifp->bssidx])
return -1;
if (cmd == SIOCETHTOOL)
@@ -531,17 +545,12 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (drvr->bus_if->drvr_up == 0)
- return 0;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
brcmf_cfg80211_down(ndev);
/* Set state and stop OS transmissions */
- drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -555,7 +564,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
u32 toe_ol;
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* If bus is not ready, can't continue */
if (bus_if->state != BRCMF_BUS_DATA) {
@@ -563,25 +572,17 @@ static int brcmf_netdev_open(struct net_device *ndev)
return -EAGAIN;
}
- atomic_set(&drvr->pend_8021x_cnt, 0);
-
- memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+ atomic_set(&ifp->pend_8021x_cnt, 0);
/* Get current TOE mode from dongle */
if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr->iflist[ifp->idx]->ndev->features |=
- NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM;
else
- drvr->iflist[ifp->idx]->ndev->features &=
- ~NETIF_F_IP_CSUM;
-
- /* make sure RF is ready for work */
- brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ ndev->features &= ~NETIF_F_IP_CSUM;
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr->bus_if->drvr_up = true;
if (brcmf_cfg80211_up(ndev)) {
brcmf_err("failed to bring up cfg80211\n");
return -1;
@@ -600,29 +601,18 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-static const struct net_device_ops brcmf_netdev_ops_virt = {
- .ndo_open = brcmf_cfg80211_up,
- .ndo_stop = brcmf_cfg80211_down,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
-int brcmf_net_attach(struct brcmf_if *ifp)
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
+ s32 err;
- brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
ndev = ifp->ndev;
/* set appropriate operations */
- if (!ifp->idx)
- ndev->netdev_ops = &brcmf_netdev_ops_pri;
- else
- ndev->netdev_ops = &brcmf_netdev_ops_virt;
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
@@ -633,7 +623,14 @@ int brcmf_net_attach(struct brcmf_if *ifp)
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- if (register_netdev(ndev) != 0) {
+ INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+
+ if (rtnl_locked)
+ err = register_netdevice(ndev);
+ else
+ err = register_netdev(ndev);
+ if (err != 0) {
brcmf_err("couldn't register the net device\n");
goto fail;
}
@@ -647,16 +644,78 @@ fail:
return -EBADE;
}
-struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
- char *name, u8 *addr_mask)
+static int brcmf_net_p2p_open(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_up(ndev);
+}
+
+static int brcmf_net_p2p_stop(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_down(ndev);
+}
+
+static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
+ struct ifreq *ifr, int cmd)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+ return 0;
+}
+
+static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_p2p = {
+ .ndo_open = brcmf_net_p2p_open,
+ .ndo_stop = brcmf_net_p2p_stop,
+ .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
+ .ndo_start_xmit = brcmf_net_p2p_start_xmit
+};
+
+static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
+{
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
+ ndev = ifp->ndev;
+
+ ndev->netdev_ops = &brcmf_netdev_ops_p2p;
+
+ /* set the mac address */
+ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+ if (register_netdev(ndev) != 0) {
+ brcmf_err("couldn't register the p2p net device\n");
+ goto fail;
+ }
+
+ brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+ return 0;
+
+fail:
+ return -EBADE;
+}
+
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
struct net_device *ndev;
- int i;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
/*
* Delete the existing interface before overwriting it
* in case we missed the BRCMF_E_IF_DEL event.
@@ -668,7 +727,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
+ drvr->iflist[bssidx] = NULL;
} else {
brcmf_err("ignore IF event\n");
return ERR_PTR(-EINVAL);
@@ -685,16 +744,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
ifp = netdev_priv(ndev);
ifp->ndev = ndev;
ifp->drvr = drvr;
- drvr->iflist[ifidx] = ifp;
- ifp->idx = ifidx;
+ drvr->iflist[bssidx] = ifp;
+ ifp->ifidx = ifidx;
ifp->bssidx = bssidx;
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
- if (addr_mask != NULL)
- for (i = 0; i < ETH_ALEN; i++)
- ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
+ init_waitqueue_head(&ifp->pend_8021x_wait);
+
+ if (mac_addr != NULL)
+ memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
current->pid, ifp->ndev->name, ifp->mac_addr);
@@ -702,19 +760,18 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
return ifp;
}
-void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
{
struct brcmf_if *ifp;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
-
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
if (!ifp) {
- brcmf_err("Null interface\n");
+ brcmf_err("Null interface, idx=%d\n", bssidx);
return;
}
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
if (ifp->ndev) {
- if (ifidx == 0) {
+ if (bssidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
rtnl_lock();
brcmf_netdev_stop(ifp->ndev);
@@ -724,12 +781,14 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
netif_stop_queue(ifp->ndev);
}
- cancel_work_sync(&ifp->setmacaddr_work);
- cancel_work_sync(&ifp->multicast_work);
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ cancel_work_sync(&ifp->setmacaddr_work);
+ cancel_work_sync(&ifp->multicast_work);
+ }
unregister_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
- if (ifidx == 0)
+ drvr->iflist[bssidx] = NULL;
+ if (bssidx == 0)
brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
}
@@ -769,8 +828,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
- init_waitqueue_head(&drvr->pend_8021x_wait);
-
return ret;
fail:
@@ -785,6 +842,7 @@ int brcmf_bus_start(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_if *ifp;
+ struct brcmf_if *p2p_ifp;
brcmf_dbg(TRACE, "\n");
@@ -800,6 +858,13 @@ int brcmf_bus_start(struct device *dev)
if (IS_ERR(ifp))
return PTR_ERR(ifp);
+ if (brcmf_p2p_enable)
+ p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
+ else
+ p2p_ifp = NULL;
+ if (IS_ERR(p2p_ifp))
+ p2p_ifp = NULL;
+
/* signal bus ready */
bus_if->state = BRCMF_BUS_DATA;
@@ -818,16 +883,22 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
- ret = brcmf_net_attach(ifp);
+ ret = brcmf_net_attach(ifp, false);
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
if (drvr->config)
brcmf_cfg80211_detach(drvr->config);
- free_netdev(drvr->iflist[0]->ndev);
+ free_netdev(ifp->ndev);
drvr->iflist[0] = NULL;
+ if (p2p_ifp) {
+ free_netdev(p2p_ifp->ndev);
+ drvr->iflist[1] = NULL;
+ }
return ret;
}
+ if ((brcmf_p2p_enable) && (p2p_ifp))
+ brcmf_net_p2p_attach(p2p_ifp);
return 0;
}
@@ -845,9 +916,21 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
}
}
+void brcmf_dev_reset(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ if (drvr == NULL)
+ return;
+
+ if (drvr->iflist[0])
+ brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
+}
+
void brcmf_detach(struct device *dev)
{
- int i;
+ s32 i;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -866,28 +949,26 @@ void brcmf_detach(struct device *dev)
brcmf_bus_detach(drvr);
- if (drvr->prot) {
+ if (drvr->prot)
brcmf_proto_detach(drvr);
- }
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
{
- return atomic_read(&drvr->pend_8021x_cnt);
+ return atomic_read(&ifp->pend_8021x_cnt);
}
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
int err;
- err = wait_event_timeout(drvr->pend_8021x_wait,
- !brcmf_get_pend_8021x_cnt(drvr),
+ err = wait_event_timeout(ifp->pend_8021x_wait,
+ !brcmf_get_pend_8021x_cnt(ifp),
msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
WARN_ON(!err);
@@ -895,6 +976,16 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return !err;
}
+/*
+ * return chip id and rev of the device encoded in u32.
+ */
+u32 brcmf_get_chip_info(struct brcmf_if *ifp)
+{
+ struct brcmf_bus *bus = ifp->drvr->bus_if;
+
+ return bus->chip << 4 | bus->chiprev;
+}
+
static void brcmf_driver_init(struct work_struct *work)
{
brcmf_debugfs_init();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index cf857f1edf8c..4469321c0eb3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -1098,7 +1096,6 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
rd->len = 0;
@@ -1169,7 +1166,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
int errcode;
u8 doff, sfdoff;
- int ifidx = 0;
bool usechain = bus->use_rxchain;
struct brcmf_sdio_read rd_new;
@@ -1301,7 +1297,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode < 0) {
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
@@ -1388,13 +1383,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
continue;
- } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
- &ifidx, pfirst) != 0) {
- brcmf_err("rx protocol error\n");
- bus->sdiodev->bus_if->dstats.rx_errors++;
- skb_unlink(pfirst, &bus->glom);
- brcmu_pkt_buf_free_skb(pfirst);
- continue;
}
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1407,7 +1395,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* sent any remaining packets up */
if (bus->glom.qlen)
- brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
+ brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
bus->sdcnt.rxglompkts += bus->glom.qlen;
@@ -1455,10 +1443,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if (bus->rxblen)
buf = vzalloc(bus->rxblen);
- if (!buf) {
- brcmf_err("no memory for control frame\n");
+ if (!buf)
goto done;
- }
+
rbuf = bus->rxbuf;
pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
if (pad)
@@ -1488,7 +1475,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@@ -1496,7 +1482,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1558,10 +1543,10 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
+ struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int sdret; /* Return code from calls */
- int ifidx = 0;
uint rxcount = 0; /* Total frames read */
struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
@@ -1644,7 +1629,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
@@ -1662,7 +1646,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, true,
RETRYCHAN(rd->channel));
@@ -1760,15 +1743,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
- pkt) != 0) {
- brcmf_err("rx protocol error\n");
- brcmu_pkt_buf_free_skb(pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
- continue;
}
- brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
+ skb_queue_head_init(&pktlist);
+ skb_queue_tail(&pktlist, pkt);
+ brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
}
rxcount = maxframes - rxleft;
@@ -1954,10 +1933,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
- if (ret)
- bus->sdiodev->bus_if->dstats.tx_errors++;
- else
- bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -1976,8 +1951,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (bus->sdiodev->bus_if->drvr_up &&
- (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2724,9 +2698,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
(u8 *)&addr_le, 4);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2745,10 +2720,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
}
/* Read hndrte_shared structure */
- sdio_claim_host(bus->sdiodev->func[1]);
rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
- sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2850,14 +2823,12 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
return 0;
- sdio_claim_host(bus->sdiodev->func[1]);
error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
- sdio_release_host(bus->sdiodev->func[1]);
if (nbytes < 0)
return nbytes;
@@ -3322,9 +3293,6 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
int ret;
- if (bus->sdiodev->bus_if->drvr_up)
- return -EISCONN;
-
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
@@ -3955,6 +3923,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+ bus->sdiodev->bus_if->chip = bus->ci->chip;
+ bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
/* Attach to the brcmf/OS/network interface */
ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index ba0b22512f12..e9d6f91a1f2b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -189,24 +189,24 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
}
- ifp = drvr->iflist[ifevent->ifidx];
+ ifp = drvr->iflist[ifevent->bssidx];
if (ifevent->action == BRCMF_E_IF_ADD) {
brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
emsg->addr);
- ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+ ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
emsg->ifname, emsg->addr);
if (IS_ERR(ifp))
return;
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
- err = brcmf_net_attach(ifp);
+ err = brcmf_net_attach(ifp, false);
}
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
if (ifevent->action == BRCMF_E_IF_DEL)
- brcmf_del_if(drvr, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->bssidx);
}
/**
@@ -250,8 +250,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
drvr = container_of(fweh, struct brcmf_pub, fweh);
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- ifp = drvr->iflist[event->ifidx];
-
brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
@@ -283,6 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
goto event_free;
}
+ ifp = drvr->iflist[emsg.bsscfgidx];
err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
event->data);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 36901f76a3b5..8c39b51dcccf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -83,6 +83,7 @@ struct brcmf_event;
BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
BRCMF_ENUM_DEF(TRACE, 52) \
BRCMF_ENUM_DEF(IF, 54) \
+ BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
BRCMF_ENUM_DEF(RSSI, 56) \
BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
@@ -96,8 +97,11 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+ BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \
+ BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
- BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
+ BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
+ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index d8d8b6549dc5..8d1def935b8d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -45,9 +45,10 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+ err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
+ len);
else
- err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+ err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
len);
if (err >= 0)
@@ -100,6 +101,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -116,6 +118,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
new file mode 100644
index 000000000000..0f2c83bc95dc
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWIL_TYPES_H_
+#define FWIL_TYPES_H_
+
+#include <linux/if_ether.h>
+
+
+#define BRCMF_FIL_ACTION_FRAME_SIZE 1800
+
+
+enum brcmf_fil_p2p_if_types {
+ BRCMF_FIL_P2P_IF_CLIENT,
+ BRCMF_FIL_P2P_IF_GO,
+ BRCMF_FIL_P2P_IF_DYNBCN_GO,
+ BRCMF_FIL_P2P_IF_DEV,
+};
+
+struct brcmf_fil_p2p_if_le {
+ u8 addr[ETH_ALEN];
+ __le16 type;
+ __le16 chspec;
+};
+
+struct brcmf_fil_chan_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+struct brcmf_fil_action_frame_le {
+ u8 da[ETH_ALEN];
+ __le16 len;
+ __le32 packet_id;
+ u8 data[BRCMF_FIL_ACTION_FRAME_SIZE];
+};
+
+struct brcmf_fil_af_params_le {
+ __le32 channel;
+ __le32 dwell_time;
+ u8 bssid[ETH_ALEN];
+ u8 pad[2];
+ struct brcmf_fil_action_frame_le action_frame;
+};
+
+struct brcmf_fil_bss_enable_le {
+ __le32 bsscfg_idx;
+ __le32 enable;
+};
+
+#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
new file mode 100644
index 000000000000..4166e642068b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -0,0 +1,2277 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include "fwil.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "wl_cfg80211.h"
+
+/* parameters used for p2p escan */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+#define BRCMF_P2P_WILDCARD_SSID "DIRECT-"
+#define BRCMF_P2P_WILDCARD_SSID_LEN (sizeof(BRCMF_P2P_WILDCARD_SSID) - 1)
+
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+ (channel == SOCIAL_CHAN_2) || \
+ (channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+
+#define BRCMF_SCB_TIMEOUT_VALUE 20
+
+#define P2P_VER 9 /* P2P version: 9=WiFi P2P v1.0 */
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+#define P2P_AF_CATEGORY 0x7f
+#define P2P_OUI "\x50\x6F\x9A" /* P2P OUI */
+#define P2P_OUI_LEN 3 /* P2P OUI length */
+
+/* Action Frame Constants */
+#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action */
+#define DOT11_ACTION_CAT_OFF 0 /* category offset */
+#define DOT11_ACTION_ACT_OFF 1 /* action offset */
+
+#define P2P_AF_DWELL_TIME 200
+#define P2P_AF_MIN_DWELL_TIME 100
+#define P2P_AF_MED_DWELL_TIME 400
+#define P2P_AF_LONG_DWELL_TIME 1000
+#define P2P_AF_TX_MAX_RETRY 1
+#define P2P_AF_MAX_WAIT_TIME 2000
+#define P2P_INVALID_CHANNEL -1
+#define P2P_CHANNEL_SYNC_RETRY 5
+#define P2P_AF_FRM_SCAN_MAX_WAIT 1500
+#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
+/* P2P Service Discovery related */
+#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+
+/**
+ * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
+ *
+ * @state: requested discovery state (see enum brcmf_p2p_disc_state).
+ * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state.
+ * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state.
+ */
+struct brcmf_p2p_disc_st_le {
+ u8 state;
+ __le16 chspec;
+ __le16 dwell;
+};
+
+/**
+ * enum brcmf_p2p_disc_state - P2P discovery state values
+ *
+ * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE.
+ * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time.
+ * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE.
+ */
+enum brcmf_p2p_disc_state {
+ WL_P2P_DISC_ST_SCAN,
+ WL_P2P_DISC_ST_LISTEN,
+ WL_P2P_DISC_ST_SEARCH
+};
+
+/**
+ * struct brcmf_p2p_scan_le - P2P specific scan request.
+ *
+ * @type: type of scan method requested (values: 'E' or 'S').
+ * @reserved: reserved (ignored).
+ * @eparams: parameters used for type 'E'.
+ * @sparams: parameters used for type 'S'.
+ */
+struct brcmf_p2p_scan_le {
+ u8 type;
+ u8 reserved[3];
+ union {
+ struct brcmf_escan_params_le eparams;
+ struct brcmf_scan_params_le sparams;
+ };
+};
+
+/**
+ * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame
+ *
+ * @category: P2P_PUB_AF_CATEGORY
+ * @action: P2P_PUB_AF_ACTION
+ * @oui[3]: P2P_OUI
+ * @oui_type: OUI type - P2P_VER
+ * @subtype: OUI subtype - P2P_TYPE_*
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 oui[3];
+ u8 oui_type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2p_action_frame - WiFi P2P Action Frame
+ *
+ * @category: P2P_AF_CATEGORY
+ * @OUI[3]: OUI - P2P_OUI
+ * @type: OUI Type - P2P_VER
+ * @subtype: OUI Subtype - P2P_AF_*
+ * @dialog_token: nonzero, identifies req/resp tranaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_action_frame {
+ u8 category;
+ u8 oui[3];
+ u8 type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame
+ *
+ * @category: 0x04 Public Action Frame
+ * @action: 0x6c Advertisement Protocol
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ */
+struct brcmf_p2psd_gas_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 dialog_token;
+ u8 query_data[1];
+};
+
+/**
+ * struct brcmf_config_af_params - Action Frame Parameters for tx.
+ *
+ * @mpc_onoff: To make sure to send successfully action frame, we have to
+ * turn off mpc 0: off, 1: on, (-1): do nothing
+ * @search_channel: 1: search peer's channel to send af
+ * extra_listen: keep the dwell time to get af response frame.
+ */
+struct brcmf_config_af_params {
+ s32 mpc_onoff;
+ bool search_channel;
+ bool extra_listen;
+};
+
+/**
+ * brcmf_p2p_is_pub_action() - true if p2p public type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p public action type
+ */
+static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+
+ if (frame == NULL)
+ return false;
+
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_p2p_action() - true if p2p action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p action type
+ */
+static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_action_frame *act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_gas_action() - true if p2p gas action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p gas action type
+ */
+static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ return false;
+
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_print_actframe() - debug print routine.
+ *
+ * @tx: Received or to be transmitted
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Print information about the p2p action frame
+ */
+
+#ifdef DEBUG
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+ struct brcmf_p2p_action_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (!frame || frame_len <= 2)
+ return;
+
+ if (brcmf_p2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_RSP:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_CONF:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Invitation Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ } else if (brcmf_p2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n",
+ (tx) ? "TX" : "RX");
+ }
+
+ } else if (brcmf_p2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ }
+}
+
+#else
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+}
+
+#endif
+
+
+/**
+ * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec.
+ *
+ * @channel: channel number
+ */
+static u16 brcmf_p2p_chnr_to_chspec(u16 channel)
+{
+ u16 chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return chanspec;
+}
+
+
+/**
+ * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
+ *
+ * @ifp: ifp to use for iovars (primary).
+ * @p2p_mac: mac address to configure for p2p_da_override
+ */
+static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+{
+ s32 ret = 0;
+
+ brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
+ ETH_ALEN);
+ if (ret)
+ brcmf_err("failed to update device address ret %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
+ *
+ * @p2p: P2P specific data.
+ *
+ * P2P needs mac addresses for P2P device and interface. These are
+ * derived from the primary net device, ie. the permanent ethernet
+ * address of the device.
+ */
+static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN);
+ p2p->dev_addr[0] |= 0x02;
+ memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address, but also locally administered.
+ */
+ memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->int_addr[4] ^= 0x80;
+}
+
+/**
+ * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan.
+ *
+ * @request: the scan request as received from cfg80211.
+ *
+ * returns true if one of the ssids in the request matches the
+ * P2P wildcard ssid; otherwise returns false.
+ */
+static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request)
+{
+ struct cfg80211_ssid *ssids = request->ssids;
+ int i;
+
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN)
+ continue;
+
+ brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid);
+ if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid,
+ BRCMF_P2P_WILDCARD_SSID_LEN))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * brcmf_p2p_set_discover_state - set discover state in firmware.
+ *
+ * @ifp: low-level interface object.
+ * @state: discover state to set.
+ * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only).
+ * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only).
+ */
+static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state,
+ u16 chanspec, u16 listen_ms)
+{
+ struct brcmf_p2p_disc_st_le discover_state;
+ s32 ret = 0;
+ brcmf_dbg(TRACE, "enter\n");
+
+ discover_state.state = state;
+ discover_state.chspec = cpu_to_le16(chanspec);
+ discover_state.dwell = cpu_to_le16(listen_ms);
+ ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state,
+ sizeof(discover_state));
+ return ret;
+}
+
+/**
+ * brcmf_p2p_deinit_discovery() - disable P2P device discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Resets the discovery state and disables it in firmware.
+ */
+static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ /* Set the discovery state to SCAN */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ (void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+
+ /* Disable P2P discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ (void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0);
+
+ return 0;
+}
+
+/**
+ * brcmf_p2p_enable_discovery() - initialize and configure discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Initializes the discovery device and configure the virtual interface.
+ */
+static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 ret = 0;
+
+ brcmf_dbg(TRACE, "enter\n");
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("P2P config device not available\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) {
+ brcmf_dbg(INFO, "P2P config device already configured\n");
+ goto exit;
+ }
+
+ /* Re-initialize P2P Discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
+ if (ret < 0) {
+ brcmf_err("set p2p_disc error\n");
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ if (ret < 0) {
+ brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+ goto exit;
+ }
+
+ /*
+ * Set wsec to any non-zero value in the discovery bsscfg
+ * to ensure our P2P probe responses have the privacy bit
+ * set in the 802.11 WPA IE. Some peer devices may not
+ * initiate WPS with us if this bit is not set.
+ */
+ ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
+ if (ret < 0) {
+ brcmf_err("wsec error %d\n", ret);
+ goto exit;
+ }
+
+ set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status);
+exit:
+ return ret;
+}
+
+/**
+ * brcmf_p2p_escan() - initiate a P2P scan.
+ *
+ * @p2p: P2P specific data.
+ * @num_chans: number of channels to scan.
+ * @chanspecs: channel parameters for @num_chans channels.
+ * @search_state: P2P discover state to use.
+ * @action: scan action to pass to firmware.
+ * @bss_type: type of P2P bss.
+ */
+static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
+ u16 chanspecs[], s32 search_state, u16 action,
+ enum p2p_bss_type bss_type)
+{
+ s32 ret = 0;
+ s32 memsize = offsetof(struct brcmf_p2p_scan_le,
+ eparams.params_le.channel_list);
+ s32 nprobes;
+ s32 active;
+ u32 i;
+ u8 *memblk;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_p2p_scan_le *p2p_params;
+ struct brcmf_scan_params_le *sparams;
+ struct brcmf_ssid ssid;
+
+ memsize += num_chans * sizeof(__le16);
+ memblk = kzalloc(memsize, GFP_KERNEL);
+ if (!memblk)
+ return -ENOMEM;
+
+ vif = p2p->bss_idx[bss_type].vif;
+ if (vif == NULL) {
+ brcmf_err("no vif for bss type %d\n", bss_type);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ switch (search_state) {
+ case WL_P2P_DISC_ST_SEARCH:
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ /* use null ssid */
+ ssid.SSID_len = 0;
+ memset(ssid.SSID, 0, sizeof(ssid.SSID));
+ break;
+ case WL_P2P_DISC_ST_SCAN:
+ /*
+ * wpa_supplicant has p2p_find command with type social or
+ * progressive. For progressive, we need to set the ssid to
+ * P2P WILDCARD because we just do broadcast scan unless
+ * setting SSID.
+ */
+ ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN;
+ memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len);
+ break;
+ default:
+ brcmf_err(" invalid search state %d\n", search_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0);
+
+ /*
+ * set p2p scan parameters.
+ */
+ p2p_params = (struct brcmf_p2p_scan_le *)memblk;
+ p2p_params->type = 'E';
+
+ /* determine the scan engine parameters */
+ sparams = &p2p_params->eparams.params_le;
+ sparams->bss_type = DOT11_BSSTYPE_ANY;
+ if (p2p->cfg->active_scan)
+ sparams->scan_type = 0;
+ else
+ sparams->scan_type = 1;
+
+ memset(&sparams->bssid, 0xFF, ETH_ALEN);
+ if (ssid.SSID_len)
+ memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
+ sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
+
+ /*
+ * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan
+ * supported by the supplicant.
+ */
+ if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1))
+ active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+ else if (num_chans == AF_PEER_SEARCH_CNT)
+ active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+ else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+ active = -1;
+ else
+ active = P2PAPI_SCAN_DWELL_TIME_MS;
+
+ /* Override scan params to find a peer for a connection */
+ if (num_chans == 1) {
+ active = WL_SCAN_CONNECT_DWELL_TIME_MS;
+ /* WAR to sync with presence period of VSDB GO.
+ * send probe request more frequently
+ */
+ nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS;
+ }
+
+ if (nprobes <= 0)
+ nprobes = 1;
+
+ brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active);
+ sparams->active_time = cpu_to_le32(active);
+ sparams->nprobes = cpu_to_le32(nprobes);
+ sparams->passive_time = cpu_to_le32(-1);
+ sparams->channel_num = cpu_to_le32(num_chans &
+ BRCMF_SCAN_PARAMS_COUNT_MASK);
+ for (i = 0; i < num_chans; i++)
+ sparams->channel_list[i] = cpu_to_le16(chanspecs[i]);
+
+ /* set the escan specific parameters */
+ p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ p2p_params->eparams.action = cpu_to_le16(action);
+ p2p_params->eparams.sync_id = cpu_to_le16(0x1234);
+ /* perform p2p scan on primary device */
+ ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize);
+ if (!ret)
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status);
+exit:
+ kfree(memblk);
+ return ret;
+}
+
+/**
+ * brcmf_p2p_run_escan() - escan callback for peer-to-peer.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device for which scan is requested.
+ * @request: scan request from cfg80211.
+ * @action: scan action.
+ *
+ * Determines the P2P discovery state based to scan request parameters and
+ * validates the channels in the request.
+ */
+static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ u16 action)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err = 0;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *dev = NULL;
+ int i, num_nodfs = 0;
+ u16 *chanspecs;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (request->n_channels) {
+ chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs),
+ GFP_KERNEL);
+ if (!chanspecs) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (vif)
+ dev = vif->wdev.netdev;
+ if (request->n_channels == 3 &&
+ request->channels[0]->hw_value == SOCIAL_CHAN_1 &&
+ request->channels[1]->hw_value == SOCIAL_CHAN_2 &&
+ request->channels[2]->hw_value == SOCIAL_CHAN_3) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+ } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+ /* If you are already a GO, then do SEARCH only */
+ brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ } else {
+ brcmf_dbg(INFO, "P2P SCAN STATE START\n");
+ }
+
+ /*
+ * no P2P scanning on passive or DFS channels.
+ */
+ for (i = 0; i < request->n_channels; i++) {
+ struct ieee80211_channel *chan = request->channels[i];
+
+ if (chan->flags & (IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN))
+ continue;
+
+ chanspecs[i] = channel_to_chanspec(chan);
+ brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
+ num_nodfs, chan->hw_value, chanspecs[i]);
+ num_nodfs++;
+ }
+ err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
+ action, P2PAPI_BSSCFG_DEVICE);
+ }
+exit:
+ if (err)
+ brcmf_err("error (%d)\n", err);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_find_listen_channel() - find listen channel in ie string.
+ *
+ * @ie: string of information elements.
+ * @ie_len: length of string.
+ *
+ * Scan ie for p2p ie and look for attribute 6 channel. If available determine
+ * channel and return it.
+ */
+static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len)
+{
+ u8 channel_ie[5];
+ s32 listen_channel;
+ s32 err;
+
+ err = cfg80211_get_p2p_attr(ie, ie_len,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ channel_ie, sizeof(channel_ie));
+ if (err < 0)
+ return err;
+
+ /* listen channel subel length format: */
+ /* 3(country) + 1(op. class) + 1(chan num) */
+ listen_channel = (s32)channel_ie[3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel);
+ return listen_channel;
+ }
+
+ return -EPERM;
+}
+
+
+/**
+ * brcmf_p2p_scan_prep() - prepare scan based on request.
+ *
+ * @wiphy: wiphy device.
+ * @request: scan request from cfg80211.
+ * @vif: vif on which scan request is to be executed.
+ *
+ * Prepare the scan appropriately for type of scan requested. Overrides the
+ * escan .run() callback for peer-to-peer scanning.
+ */
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ int err = 0;
+
+ if (brcmf_p2p_scan_is_p2p_request(request)) {
+ /* find my listen channel */
+ err = brcmf_p2p_find_listen_channel(request->ie,
+ request->ie_len);
+ if (err < 0)
+ return err;
+
+ p2p->afx_hdl.my_listen_chan = err;
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ return err;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
+ /* override .run_escan() callback. */
+ cfg->escan_info.run = brcmf_p2p_run_escan;
+ }
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_discover_listen() - set firmware to discover listen state.
+ *
+ * @p2p: p2p device.
+ * @channel: channel nr for discover listen.
+ * @duration: time in ms to stay on channel.
+ *
+ */
+static s32
+brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ u16 chanspec;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("Discovery is not set, so we have nothing to do\n");
+ err = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
+ brcmf_err("Previous LISTEN is not completed yet\n");
+ /* WAR: prevent cookie mismatch in wpa_supplicant return OK */
+ goto exit;
+ }
+
+ chanspec = brcmf_p2p_chnr_to_chspec(channel);
+ err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
+ chanspec, (u16)duration);
+ if (!err) {
+ set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
+ p2p->remain_on_channel_cookie++;
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_remain_on_channel() - put device on channel and stay there.
+ *
+ * @wiphy: wiphy device.
+ * @channel: channel to stay on.
+ * @duration: time in ms to remain on channel.
+ *
+ */
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err;
+ u16 channel_nr;
+
+ channel_nr = ieee80211_frequency_to_channel(channel->center_freq);
+ brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr,
+ duration);
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ goto exit;
+ err = brcmf_p2p_discover_listen(p2p, channel_nr, duration);
+ if (err)
+ goto exit;
+
+ memcpy(&p2p->remain_on_channel, channel, sizeof(*channel));
+ *cookie = p2p->remain_on_channel_cookie;
+ cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_notify_listen_complete() - p2p listen has completed.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message. Not used.
+ *
+ */
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ &p2p->status)) {
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n");
+ complete(&p2p->wait_next_af);
+ }
+
+ cfg80211_remain_on_channel_expired(&ifp->vif->wdev,
+ p2p->remain_on_channel_cookie,
+ &p2p->remain_on_channel,
+ GFP_KERNEL);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state.
+ *
+ * @ifp: interfac control.
+ *
+ */
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
+{
+ if (!ifp)
+ return;
+ brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ brcmf_p2p_notify_listen_complete(ifp, NULL, NULL);
+}
+
+
+/**
+ * brcmf_p2p_act_frm_search() - search function for action frame.
+ *
+ * @p2p: p2p device.
+ * channel: channel on which action frame is to be trasmitted.
+ *
+ * search function to reach at common channel to send action frame. When
+ * channel is 0 then all social channels will be used to send af
+ */
+static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
+{
+ s32 err;
+ u32 channel_cnt;
+ u16 *default_chan_list;
+ u32 i;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (channel)
+ channel_cnt = AF_PEER_SEARCH_CNT;
+ else
+ channel_cnt = SOCIAL_CHAN_CNT;
+ default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ brcmf_err("channel list allocation failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ /* insert same channel to the chan_list */
+ for (i = 0; i < channel_cnt; i++)
+ default_chan_list[i] =
+ brcmf_p2p_chnr_to_chspec(channel);
+ } else {
+ default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1);
+ default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2);
+ default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3);
+ }
+ err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
+ WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
+ P2PAPI_BSSCFG_DEVICE);
+ kfree(default_chan_list);
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_afx_handler() - afx worker thread.
+ *
+ * @work:
+ *
+ */
+static void brcmf_p2p_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
+ struct brcmf_p2p_info *p2p = container_of(afx_hdl,
+ struct brcmf_p2p_info,
+ afx_hdl);
+ s32 err;
+
+ if (!afx_hdl->is_active)
+ return;
+
+ if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
+ /* 100ms ~ 300ms */
+ err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
+ 100 * (1 + (random32() % 3)));
+ else
+ err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
+
+ if (err) {
+ brcmf_err("ERROR occurred! value is (%d)\n", err);
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&afx_hdl->act_frm_scan);
+ }
+}
+
+
+/**
+ * brcmf_p2p_af_searching_channel() - search channel.
+ *
+ * @p2p: p2p device info struct.
+ *
+ */
+static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+{
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct brcmf_cfg80211_vif *pri_vif;
+ unsigned long duration;
+ s32 retry;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+ INIT_COMPLETION(afx_hdl->act_frm_scan);
+ set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+ afx_hdl->is_active = true;
+ afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
+
+ /* Loop to wait until we find a peer's channel or the
+ * pending action frame tx is cancelled.
+ */
+ retry = 0;
+ duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+ while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+ (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+ afx_hdl->is_listen = false;
+ brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n",
+ retry);
+ /* search peer on peer's listen channel */
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+
+ if (afx_hdl->my_listen_chan) {
+ brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n",
+ afx_hdl->my_listen_chan);
+ /* listen on my listen channel */
+ afx_hdl->is_listen = true;
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ duration);
+ }
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+ retry++;
+
+ /* if sta is connected or connecting, sleep for a while before
+ * retry af tx or finding a peer
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state))
+ msleep(P2P_DEFAULT_SLEEP_TIME_VSDB);
+ }
+
+ brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n",
+ afx_hdl->peer_chan);
+ afx_hdl->is_active = false;
+
+ clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+
+ return afx_hdl->peer_chan;
+}
+
+
+/**
+ * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel
+ *
+ * @cfg: common configuration struct.
+ * @bi: bss info struct, result from scan.
+ *
+ */
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi)
+
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u8 *ie;
+ s32 err;
+ u8 p2p_dev_addr[ETH_ALEN];
+
+ if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))
+ return false;
+
+ if (bi == NULL) {
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n");
+ if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)
+ complete(&afx_hdl->act_frm_scan);
+ return true;
+ }
+
+ ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+ memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr));
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if (err < 0)
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if ((err >= 0) &&
+ (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
+ afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return true;
+}
+
+/**
+ * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static void
+brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct net_device *ndev = cfg->escan_info.ndev;
+
+ if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
+ (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
+ test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n");
+ /* if channel is not zero, "actfame" uses off channel scan.
+ * So abort scan for off channel completion.
+ */
+ if (p2p->af_sent_channel)
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
+ /* So abort scan to cancel listen */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ }
+}
+
+
+/**
+ * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
+ *
+ * @p2p: p2p device info struct.
+ *
+ * return true if recevied action frame is to be dropped.
+ */
+static bool
+brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac)
+{
+ struct brcmf_cfg80211_info *cfg = p2p->cfg;
+ struct brcmf_if *ifp;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) ||
+ !p2p->gon_req_action)
+ return false;
+
+ brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n");
+ /* if sa(peer) addr is less than da(my) addr, then this device
+ * process peer's gon request and block to send gon req.
+ * if not (sa addr > da addr),
+ * this device will process gon request and drop gon req of peer.
+ */
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+ if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) {
+ brcmf_dbg(INFO, "Block transmit gon req !!!\n");
+ p2p->block_gon_req_tx = true;
+ /* if we are finding a common channel for sending af,
+ * do not scan more to block to send current gon req
+ */
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&p2p->afx_hdl.act_frm_scan);
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status))
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ return false;
+ }
+
+ /* drop gon request of peer to process gon request by this device. */
+ brcmf_dbg(INFO, "Drop received gon req !!!\n");
+
+ return true;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_frame_rx() - received action frame.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message, containing action frame data.
+ *
+ */
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u8 *frame = (u8 *)(rxframe + 1);
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ struct ieee80211_mgmt *mgmt_frame;
+ s32 freq;
+ u16 mgmt_type;
+ u8 action;
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
+ if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ brcmf_p2p_print_actframe(false, frame, mgmt_frame_len);
+
+ action = P2P_PAF_SUBTYPE_INVALID;
+ if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) {
+ act_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ action = act_frm->subtype;
+ if ((action == P2P_PAF_GON_REQ) &&
+ (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr,
+ ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return 0;
+ }
+ /* After complete GO Negotiation, roll back to mpc mode */
+ if ((action == P2P_PAF_GON_CONF) ||
+ (action == P2P_PAF_PROVDIS_RSP))
+ brcmf_set_mpc(ifp->ndev, 1);
+ if (action == P2P_PAF_GON_CONF) {
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+ } else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ action = sd_act_frm->action;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ (p2p->next_af_subtype == action)) {
+ brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action);
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ /* Stop waiting for next AF. */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
+ mgmt_frame_len, GFP_KERNEL);
+ if (!mgmt_frame) {
+ brcmf_err("No memory available for action frame\n");
+ return -ENOMEM;
+ }
+ memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+ brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+ ETH_ALEN);
+ memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+ mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+ memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
+ mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ GFP_ATOMIC);
+
+ kfree(mgmt_frame);
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: not used.
+ *
+ */
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(INFO, "Enter: event %s, status=%d\n",
+ e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
+ "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE",
+ e->status);
+
+ if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status))
+ return 0;
+
+ if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+ if (e->status == BRCMF_E_STATUS_SUCCESS)
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ &p2p->status);
+ else {
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+ /* If there is no ack, we don't need to wait for
+ * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+ */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ } else {
+ complete(&p2p->send_af_done);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+ * Send an action frame immediately without doing channel synchronization.
+ *
+ * This function waits for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 timeout = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ INIT_COMPLETION(p2p->send_af_done);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ sizeof(*af_params));
+ if (err) {
+ brcmf_err(" sending action frame has failed\n");
+ goto exit;
+ }
+
+ p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+ p2p->af_tx_sent_jiffies = jiffies;
+
+ timeout = wait_for_completion_timeout(&p2p->send_af_done,
+ msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME));
+
+ if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
+ brcmf_dbg(TRACE, "TX action frame operation is success\n");
+ } else {
+ err = -EIO;
+ brcmf_dbg(TRACE, "TX action frame operation has failed\n");
+ }
+ /* clear status bit for action tx */
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_pub_af_tx() - public action frame tx routine.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @af_params: action frame data/info.
+ * @config_af_params: configuration data for action frame.
+ *
+ * routine which transmits ation frame public type.
+ */
+static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_fil_af_params_le *af_params,
+ struct brcmf_config_af_params *config_af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ s32 err = 0;
+ u16 ie_len;
+
+ action_frame = &af_params->action_frame;
+ act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data);
+
+ config_af_params->extra_listen = true;
+
+ switch (act_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n");
+ set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ config_af_params->mpc_onoff = 0;
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ p2p->gon_req_action = true;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_RSP:
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for CONF frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_CONF:
+ /* If we reached till GO Neg confirmation reset the filter */
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ /* turn on mpc again if go nego is done */
+ config_af_params->mpc_onoff = 1;
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_INVITE_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_INVITE_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* maximize dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME);
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ ie_len = le16_to_cpu(action_frame->len) -
+ offsetof(struct brcmf_p2p_pub_act_frame, elts);
+ if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ NULL, 0) < 0)
+ config_af_params->search_channel = true;
+ config_af_params->mpc_onoff = 0;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ /* wpa_supplicant send go nego req right after prov disc */
+ p2p->next_af_subtype = P2P_PAF_GON_REQ;
+ /* increase dwell time to MED level */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ default:
+ brcmf_err("Unknown p2p pub act frame subtype: %d\n",
+ act_frm->subtype);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_config_af_params config_af_params;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u16 action_frame_len;
+ bool ack = false;
+ u8 category;
+ u8 action;
+ s32 tx_retry;
+ s32 extra_listen_time;
+ uint delta_ms;
+
+ action_frame = &af_params->action_frame;
+ action_frame_len = le16_to_cpu(action_frame->len);
+
+ brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len);
+
+ /* Add the default dwell time. Dwell time to stay off-channel */
+ /* to wait for a response action frame after transmitting an */
+ /* GO Negotiation action frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME);
+
+ category = action_frame->data[DOT11_ACTION_CAT_OFF];
+ action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+ /* initialize variables */
+ p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+ p2p->gon_req_action = false;
+
+ /* config parameters */
+ config_af_params.mpc_onoff = -1;
+ config_af_params.search_channel = false;
+ config_af_params.extra_listen = false;
+
+ if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) {
+ /* p2p public action frame process */
+ if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
+ /* Just send unknown subtype frame with */
+ /* default parameters. */
+ brcmf_err("P2P Public action frame, unknown subtype.\n");
+ }
+ } else if (brcmf_p2p_is_gas_action(action_frame->data,
+ action_frame_len)) {
+ /* service discovery process */
+ if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+ action == P2PSD_ACTION_ID_GAS_CREQ) {
+ /* configure service discovery query frame */
+ config_af_params.search_channel = true;
+
+ /* save next af suptype to cancel */
+ /* remaining dwell time */
+ p2p->next_af_subtype = action + 1;
+
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ } else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+ action == P2PSD_ACTION_ID_GAS_CRESP) {
+ /* configure service discovery response frame */
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ } else {
+ brcmf_err("Unknown action type: %d\n", action);
+ goto exit;
+ }
+ } else if (brcmf_p2p_is_p2p_action(action_frame->data,
+ action_frame_len)) {
+ /* do not configure anything. it will be */
+ /* sent with a default configuration */
+ } else {
+ brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
+ category, action);
+ return false;
+ }
+
+ /* if connecting on primary iface, sleep for a while before sending
+ * af tx for VSDB
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state))
+ msleep(50);
+
+ /* if scan is ongoing, abort current scan. */
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_abort_scanning(cfg);
+
+ memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN);
+
+ /* To make sure to send successfully action frame, turn off mpc */
+ if (config_af_params.mpc_onoff == 0)
+ brcmf_set_mpc(ndev, 0);
+
+ /* set status and destination address before sending af */
+ if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+ /* set status to cancel the remained dwell time in rx process */
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ }
+
+ p2p->af_sent_channel = 0;
+ set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+ /* validate channel and p2p ies */
+ if (config_af_params.search_channel &&
+ IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) &&
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) {
+ afx_hdl = &p2p->afx_hdl;
+ afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel);
+
+ if (brcmf_p2p_af_searching_channel(p2p) ==
+ P2P_INVALID_CHANNEL) {
+ brcmf_err("Couldn't find peer's channel.\n");
+ goto exit;
+ }
+
+ /* Abort scan even for VSDB scenarios. Scan gets aborted in
+ * firmware but after the check of piggyback algorithm. To take
+ * care of current piggback algo, lets abort the scan here
+ * itself.
+ */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+
+ /* update channel */
+ af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
+ }
+
+ tx_retry = 0;
+ while (!p2p->block_gon_req_tx &&
+ (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+ ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ tx_retry++;
+ }
+ if (ack == false) {
+ brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+
+exit:
+ clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+
+ /* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+ * if we coundn't get the next action response frame and dongle does
+ * not keep the dwell time, go to listen state again to get next action
+ * response frame.
+ */
+ if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx &&
+ test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ p2p->af_sent_channel == afx_hdl->my_listen_chan) {
+ delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies);
+ if (le32_to_cpu(af_params->dwell_time) > delta_ms)
+ extra_listen_time = le32_to_cpu(af_params->dwell_time) -
+ delta_ms;
+ else
+ extra_listen_time = 0;
+ if (extra_listen_time > 50) {
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n",
+ le32_to_cpu(af_params->dwell_time),
+ extra_listen_time);
+ extra_listen_time += 100;
+ if (!brcmf_p2p_discover_listen(p2p,
+ p2p->af_sent_channel,
+ extra_listen_time)) {
+ unsigned long duration;
+
+ extra_listen_time += 100;
+ duration = msecs_to_jiffies(extra_listen_time);
+ wait_for_completion_timeout(&p2p->wait_next_af,
+ duration);
+ }
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ }
+ }
+
+ if (p2p->block_gon_req_tx) {
+ /* if ack is true, supplicant will wait more time(100ms).
+ * so we will return it as a success to get more time .
+ */
+ p2p->block_gon_req_tx = false;
+ ack = true;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ /* if all done, turn mpc on again */
+ if (config_af_params.mpc_onoff == 1)
+ brcmf_set_mpc(ndev, 1);
+
+ return ack;
+}
+
+/**
+ * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req.
+ *
+ * @ifp: interface pointer for which event was received.
+ * @e: even message.
+ * @data: payload of event message (probe request).
+ */
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ u8 *mgmt_frame;
+ u32 mgmt_frame_len;
+ s32 freq;
+ u16 mgmt_type;
+
+ brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
+ e->reason);
+
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+
+ /* Firmware sends us two proberesponses for each idx one. At the */
+ /* moment anything but bsscfgidx 0 is passed up to supplicant */
+ if (e->bsscfgidx == 0)
+ return 0;
+
+ /* Filter any P2P probe reqs arriving during the GO-NEG Phase */
+ if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) {
+ brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n");
+ return 0;
+ }
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4;
+ if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ mgmt_frame = (u8 *)(rxframe + 1);
+ mgmt_frame_len = e->datalen - sizeof(*rxframe);
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
+ mgmt_frame_len, e->datalen, chanspec, freq);
+
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_if *pri_ifp;
+ struct brcmf_if *p2p_ifp;
+ struct brcmf_cfg80211_vif *p2p_vif;
+ struct brcmf_p2p_info *p2p;
+ struct brcmf_pub *drvr;
+ s32 bssidx;
+ s32 err = 0;
+
+ p2p = &cfg->p2p;
+ p2p->cfg = cfg;
+
+ drvr = cfg->pub;
+
+ pri_ifp = drvr->iflist[0];
+ p2p_ifp = drvr->iflist[1];
+
+ p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+ if (p2p_ifp) {
+ p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
+ false);
+ if (IS_ERR(p2p_vif)) {
+ brcmf_err("could not create discovery vif\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ p2p_vif->ifp = p2p_ifp;
+ p2p_ifp->vif = p2p_vif;
+ p2p_vif->wdev.netdev = p2p_ifp->ndev;
+ p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
+ SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
+
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+
+ brcmf_p2p_generate_bss_mac(p2p);
+ brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+ /* Initialize P2P Discovery in the firmware */
+ err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+ if (err < 0) {
+ brcmf_err("set p2p_disc error\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* obtain bsscfg index for P2P discovery */
+ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+ if (err < 0) {
+ brcmf_err("retrieving discover bsscfg index failed\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* Verify that firmware uses same bssidx as driver !! */
+ if (p2p_ifp->bssidx != bssidx) {
+ brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
+ bssidx, p2p_ifp->bssidx);
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+
+ init_completion(&p2p->send_af_done);
+ INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+ init_completion(&p2p->afx_hdl.act_frm_scan);
+ init_completion(&p2p->wait_next_af);
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif != NULL) {
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+ brcmf_p2p_deinit_discovery(p2p);
+ /* remove discovery interface */
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ }
+ /* just set it all to zero */
+ memset(p2p, 0, sizeof(*p2p));
+}
+
+/**
+ * brcmf_p2p_get_current_chanspec() - Get current operation channel.
+ *
+ * @p2p: P2P specific data.
+ * @chanspec: chanspec to be returned.
+ */
+static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
+ u16 *chanspec)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_fil_chan_info_le ci;
+ s32 err;
+
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+ *chanspec = 11 & WL_CHANSPEC_CHAN_MASK;
+
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
+ if (!err) {
+ *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK;
+ if (*chanspec < CH_MAX_2G_CHANNEL)
+ *chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ *chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+}
+
+/**
+ * Change a P2P Role.
+ * Parameters:
+ * @mac: MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_fil_p2p_if_le if_request;
+ s32 err;
+ u16 chanspec;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+ return -EPERM;
+ }
+ brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true);
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+ return -EPERM;
+ }
+ brcmf_set_mpc(vif->ifp->ndev, 0);
+
+ /* In concurrency case, STA may be already associated in a particular */
+ /* channel. so retrieve the current channel of primary interface and */
+ /* then start the virtual interface on that. */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ if_request.type = cpu_to_le16((u16)if_type);
+ if_request.chspec = cpu_to_le16(chanspec);
+ memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
+ sizeof(if_request));
+ if (err) {
+ brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ return err;
+ }
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+ return -EIO;
+ }
+
+ err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+
+ return err;
+}
+
+static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
+ struct brcmf_if *ifp, u8 ea[ETH_ALEN],
+ enum brcmf_fil_p2p_if_types iftype)
+{
+ struct brcmf_fil_p2p_if_le if_request;
+ int err;
+ u16 chanspec;
+
+ /* we need a default channel */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ /* fill the firmware request */
+ memcpy(if_request.addr, ea, ETH_ALEN);
+ if_request.type = cpu_to_le16((u16)iftype);
+ if_request.chspec = cpu_to_le16(chanspec);
+
+ err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
+ sizeof(if_request));
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
+}
+
+static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
+}
+
+/**
+ * brcmf_p2p_add_vif() - create a new P2P virtual interface.
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @type: nl80211 interface type.
+ * @flags: TBD
+ * @params: TBD
+ */
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ enum brcmf_fil_p2p_if_types iftype;
+ enum wl_mode mode;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return ERR_PTR(-EBUSY);
+
+ brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type);
+
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ iftype = BRCMF_FIL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ iftype = BRCMF_FIL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ vif = brcmf_alloc_vif(cfg, type, false);
+ if (IS_ERR(vif))
+ return (struct wireless_dev *)vif;
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+ err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
+ iftype);
+ if (err) {
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ goto fail;
+ }
+
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* interface created in firmware */
+ ifp = vif->ifp;
+ if (!ifp) {
+ brcmf_err("no if pointer provided\n");
+ err = -ENOENT;
+ goto fail;
+ }
+
+ strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ err = brcmf_net_attach(ifp, true);
+ if (err) {
+ brcmf_err("Registering netdevice failed\n");
+ goto fail;
+ }
+ cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+ /* Disable firmware roaming for P2P interface */
+ brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (iftype == BRCMF_FIL_P2P_IF_GO) {
+ /* set station timeout for p2p */
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+ }
+ return &ifp->vif->wdev;
+
+fail:
+ brcmf_free_vif(vif);
+ return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_del_vif() - delete a P2P virtual interface.
+ *
+ * @wiphy: wiphy device of interface.
+ * @wdev: wireless device of interface.
+ *
+ * TODO: not yet supported.
+ */
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ unsigned long jiffie_timeout = msecs_to_jiffies(1500);
+ bool wait_for_disable = false;
+ int err;
+
+ brcmf_dbg(TRACE, "delete P2P vif\n");
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (!brcmf_p2p_disable_p2p_if(vif))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -ENOTSUPP;
+ break;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ if (wait_for_disable)
+ wait_for_completion_timeout(&cfg->vif_disabled,
+ msecs_to_jiffies(500));
+
+ brcmf_vif_clear_mgmt_ies(vif);
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_p2p_release_p2p_if(vif);
+ if (!err) {
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
+ jiffie_timeout);
+ if (!err)
+ err = -EIO;
+ else
+ err = 0;
+ }
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
new file mode 100644
index 000000000000..6821b26224be
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_CFGP2P_H_
+#define WL_CFGP2P_H_
+
+#include <net/cfg80211.h>
+
+struct brcmf_cfg80211_info;
+
+/**
+ * enum p2p_bss_type - different type of BSS configurations.
+ *
+ * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
+ * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_MAX: used for range checking.
+ */
+enum p2p_bss_type {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+};
+
+/**
+ * struct p2p_bss - peer-to-peer bss related information.
+ *
+ * @vif: virtual interface of this P2P bss.
+ * @private_data: TBD
+ */
+struct p2p_bss {
+ struct brcmf_cfg80211_vif *vif;
+ void *private_data;
+};
+
+/**
+ * enum brcmf_p2p_status - P2P specific dongle status.
+ *
+ * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED?
+ * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle.
+ * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed.
+ * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked.
+ * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing.
+ * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel.
+ * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response.
+ * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active.
+ */
+enum brcmf_p2p_status {
+ BRCMF_P2P_STATUS_ENABLED,
+ BRCMF_P2P_STATUS_IF_ADD,
+ BRCMF_P2P_STATUS_IF_DEL,
+ BRCMF_P2P_STATUS_IF_DELETING,
+ BRCMF_P2P_STATUS_IF_CHANGING,
+ BRCMF_P2P_STATUS_IF_CHANGED,
+ BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ BRCMF_P2P_STATUS_ACTION_TX_NOACK,
+ BRCMF_P2P_STATUS_GO_NEG_PHASE,
+ BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ BRCMF_P2P_STATUS_SENDING_ACT_FRAME,
+ BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL
+};
+
+/**
+ * struct afx_hdl - action frame off channel storage.
+ *
+ * @afx_work: worker thread for searching channel
+ * @act_frm_scan: thread synchronizing struct.
+ * @is_active: channel searching active.
+ * @peer_chan: current channel.
+ * @is_listen: sets mode for afx worker.
+ * @my_listen_chan: this peers listen channel.
+ * @peer_listen_chan: remote peers listen channel.
+ * @tx_dst_addr: mac address where tx af should be sent to.
+ */
+struct afx_hdl {
+ struct work_struct afx_work;
+ struct completion act_frm_scan;
+ bool is_active;
+ s32 peer_chan;
+ bool is_listen;
+ u16 my_listen_chan;
+ u16 peer_listen_chan;
+ u8 tx_dst_addr[ETH_ALEN];
+};
+
+/**
+ * struct brcmf_p2p_info - p2p specific driver information.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @status: status of P2P (see enum brcmf_p2p_status).
+ * @dev_addr: P2P device address.
+ * @int_addr: P2P interface address.
+ * @bss_idx: informate for P2P bss types.
+ * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @ssid: ssid for P2P GO.
+ * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @remain_on_channel: contains copy of struct used by cfg80211.
+ * @remain_on_channel_cookie: cookie counter for remain on channel cmd
+ * @next_af_subtype: expected action frame subtype.
+ * @send_af_done: indication that action frame tx is complete.
+ * @afx_hdl: action frame search handler info.
+ * @af_sent_channel: channel action frame is sent.
+ * @af_tx_sent_jiffies: jiffies time when af tx was transmitted.
+ * @wait_next_af: thread synchronizing struct.
+ * @gon_req_action: about to send go negotiation requets frame.
+ * @block_gon_req_tx: drop tx go negotiation requets frame.
+ */
+struct brcmf_p2p_info {
+ struct brcmf_cfg80211_info *cfg;
+ unsigned long status;
+ u8 dev_addr[ETH_ALEN];
+ u8 int_addr[ETH_ALEN];
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ struct brcmf_ssid ssid;
+ u8 listen_channel;
+ struct ieee80211_channel remain_on_channel;
+ u32 remain_on_channel_cookie;
+ u8 next_af_subtype;
+ struct completion send_af_done;
+ struct afx_hdl afx_hdl;
+ u32 af_sent_channel;
+ unsigned long af_tx_sent_jiffies;
+ struct completion wait_next_af;
+ bool gon_req_action;
+ bool block_gon_req_tx;
+};
+
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params);
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type);
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif);
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie);
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp);
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params);
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi);
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+#endif /* WL_CFGP2P_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index b1bb46c49799..14be2d5530ce 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -15,8 +15,6 @@
*/
/* ***** SDIO interface chip backplane handle functions ***** */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 914c56fe6c5f..42289e9ea886 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -354,11 +354,10 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
int i;
struct brcmf_usbreq *req, *reqs;
- reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
- if (reqs == NULL) {
- brcmf_err("fail to allocate memory!\n");
+ reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
+ if (reqs == NULL)
return NULL;
- }
+
req = reqs;
for (i = 0; i < qsize; i++) {
@@ -421,10 +420,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
req->skb);
brcmf_usb_del_fromq(devinfo, req);
- if (urb->status == 0)
- devinfo->bus_pub.bus->dstats.tx_packets++;
- else
- devinfo->bus_pub.bus->dstats.tx_errors++;
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
@@ -443,30 +438,25 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- int ifidx = 0;
+ struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
skb = req->skb;
req->skb = NULL;
- if (urb->status == 0) {
- devinfo->bus_pub.bus->dstats.rx_packets++;
- } else {
- devinfo->bus_pub.bus->dstats.rx_errors++;
+ /* zero lenght packets indicate usb "failure". Do not refill */
+ if (urb->status != 0 || !urb->actual_length) {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
return;
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+ skb_queue_head_init(&skbq);
+ skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
- brcmf_err("rx protocol error\n");
- brcmu_pkt_buf_free_skb(skb);
- devinfo->bus_pub.bus->dstats.rx_errors++;
- } else
- brcmf_rx_packet(devinfo->dev, ifidx, skb);
+ brcmf_rx_frames(devinfo->dev, &skbq);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
@@ -1259,6 +1249,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
/* Attach to the common driver interface */
ret = brcmf_attach(0, dev);
@@ -1520,10 +1512,23 @@ static void brcmf_release_fw(struct list_head *q)
}
}
+static int brcmf_usb_reset_device(struct device *dev, void *notused)
+{
+ /* device past is the usb interface so we
+ * need to use parent here.
+ */
+ brcmf_dev_reset(dev->parent);
+ return 0;
+}
void brcmf_usb_exit(void)
{
+ struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
+ int ret;
+
brcmf_dbg(USB, "Enter\n");
+ ret = driver_for_each_device(drv, NULL, NULL,
+ brcmf_usb_reset_device);
usb_deregister(&brcmf_usbdrvr);
brcmf_release_fw(&fw_image_list);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 1261a9b84e04..cecc3eff72e9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,8 +16,6 @@
/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <net/cfg80211.h>
@@ -28,6 +26,8 @@
#include <brcmu_wifi.h>
#include "dhd.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -43,16 +43,13 @@
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
-#define BRCMF_IFACE_MAX_CNT 2
+#define BRCMF_IFACE_MAX_CNT 3
-#define TLV_LEN_OFF 1 /* length offset */
-#define TLV_HDR_LEN 2 /* header length */
-#define TLV_BODY_OFF 2 /* body offset */
-#define TLV_OUI_LEN 3 /* oui id length */
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_TYPE 1
#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
#define WME_OUI_TYPE 2
+#define WPS_OUI_TYPE 4
#define VS_IE_FIXED_HDR_LEN 6
#define WPA_IE_VERSION_LEN 2
@@ -78,13 +75,15 @@
#define VNDR_IE_PKTFLAG_OFFSET 8
#define VNDR_IE_VSIE_OFFSET 12
#define VNDR_IE_HDR_SIZE 12
-#define VNDR_IE_BEACON_FLAG 0x1
-#define VNDR_IE_PRBRSP_FLAG 0x2
-#define MAX_VNDR_IE_NUMBER 5
+#define VNDR_IE_PARSE_LIMIT 5
#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
@@ -273,13 +272,6 @@ static const u32 __wl_cipher_suites[] = {
WLAN_CIPHER_SUITE_AES_CMAC,
};
-/* tag_ID/length/value_buffer tuple */
-struct brcmf_tlv {
- u8 id;
- u8 len;
- u8 data[1];
-};
-
/* Vendor specific ie. id = 221, oui and type defines exact ie */
struct brcmf_vs_tlv {
u8 id;
@@ -296,7 +288,7 @@ struct parsed_vndr_ie_info {
struct parsed_vndr_ies {
u32 count;
- struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+ struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
/* Quarter dBm units to mW
@@ -383,7 +375,7 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
-static u16 channel_to_chanspec(struct ieee80211_channel *ch)
+u16 channel_to_chanspec(struct ieee80211_channel *ch)
{
u16 chanspec;
@@ -395,19 +387,92 @@ static u16 channel_to_chanspec(struct ieee80211_channel *ch)
else
chanspec |= WL_CHANSPEC_BAND_5G;
- if (ch->flags & IEEE80211_CHAN_NO_HT40) {
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- } else {
- chanspec |= WL_CHANSPEC_BW_40;
- if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
- chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
- else
- chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
- }
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
return chanspec;
}
+/* Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+{
+ struct brcmf_tlv *elt;
+ int totlen;
+
+ elt = (struct brcmf_tlv *)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
+ return elt;
+
+ elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+ u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return true;
+ }
+
+ if (tlvs == NULL)
+ return false;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return false;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpsie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
struct brcmf_wsec_key_le *key_le)
{
@@ -440,11 +505,153 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
return err;
}
+static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+void brcmf_set_mpc(struct net_device *ndev, int mpc)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err = 0;
+
+ if (check_vif_up(ifp->vif)) {
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
+ if (err) {
+ brcmf_err("fail to set mpc\n");
+ return;
+ }
+ brcmf_dbg(INFO, "MPC : %d\n", mpc);
+ }
+}
+
+s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ struct brcmf_scan_params_le params_le;
+ struct cfg80211_scan_request *scan_request;
+ s32 err = 0;
+
+ brcmf_dbg(SCAN, "Enter\n");
+
+ /* clear scan request, because the FW abort can cause a second call */
+ /* to this functon and might cause a double cfg80211_scan_done */
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+ if (timer_pending(&cfg->escan_timeout))
+ del_timer_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+ brcmf_dbg(SCAN, "ABORT scan in firmware\n");
+ memset(&params_le, 0, sizeof(params_le));
+ memset(params_le.bssid, 0xFF, ETH_ALEN);
+ params_le.bss_type = DOT11_BSSTYPE_ANY;
+ params_le.scan_type = 0;
+ params_le.channel_num = cpu_to_le32(1);
+ params_le.nprobes = cpu_to_le32(1);
+ params_le.active_time = cpu_to_le32(-1);
+ params_le.passive_time = cpu_to_le32(-1);
+ params_le.home_time = cpu_to_le32(-1);
+ /* Scan is aborted by setting channel_list[0] to -1 */
+ params_le.channel_list[0] = cpu_to_le16(-1);
+ /* E-Scan (or anyother type) can be aborted by SCAN */
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+ &params_le, sizeof(params_le));
+ if (err)
+ brcmf_err("Scan abort failed\n");
+ }
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+ */
+ if (cfg->sched_escan) {
+ brcmf_dbg(SCAN, "scheduled scan completed\n");
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+ brcmf_set_mpc(ndev, 1);
+ } else if (scan_request) {
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ }
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+
+ return err;
+}
+
+static
+int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ /* vif event pending in firmware */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return -EBUSY;
+
+ if (ndev) {
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
+ cfg->escan_info.ndev == ndev)
+ brcmf_notify_escan_complete(cfg, ndev, true,
+ true);
+
+ brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
+ }
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_del_vif(wiphy, wdev);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -EINVAL;
+ }
+ return -EOPNOTSUPP;
+}
+
static s32
brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_vif *vif = ifp->vif;
s32 infra = 0;
@@ -464,10 +671,23 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
infra = 0;
break;
case NL80211_IFTYPE_STATION:
+ /* Ignore change for p2p IF. Unclear why supplicant does this */
+ if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+ (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
+ brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+ /* WAR: It is unexpected to get a change of VIF for P2P
+ * IF, but it happens. The request can not be handled
+ * but returning EPERM causes a crash. Returning 0
+ * without setting ieee80211_ptr->iftype causes trace
+ * (WARN_ON) but it works with wpa_supplicant
+ */
+ return 0;
+ }
vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
vif->mode = WL_MODE_AP;
ap = 1;
break;
@@ -477,8 +697,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
if (ap) {
- set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
- brcmf_dbg(INFO, "IF Type = AP\n");
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ brcmf_dbg(INFO, "IF Type = P2P GO\n");
+ err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
+ }
+ if (!err) {
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
+ brcmf_dbg(INFO, "IF Type = AP\n");
+ }
} else {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
if (err) {
@@ -497,21 +723,6 @@ done:
return err;
}
-static void brcmf_set_mpc(struct net_device *ndev, int mpc)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err = 0;
-
- if (check_vif_up(ifp->vif)) {
- err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
- if (err) {
- brcmf_err("fail to set mpc\n");
- return;
- }
- brcmf_dbg(INFO, "MPC : %d\n", mpc);
- }
-}
-
static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
struct cfg80211_scan_request *request)
{
@@ -592,69 +803,6 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
}
static s32
-brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- struct brcmf_scan_params_le params_le;
- struct cfg80211_scan_request *scan_request;
- s32 err = 0;
-
- brcmf_dbg(SCAN, "Enter\n");
-
- /* clear scan request, because the FW abort can cause a second call */
- /* to this functon and might cause a double cfg80211_scan_done */
- scan_request = cfg->scan_request;
- cfg->scan_request = NULL;
-
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
-
- if (fw_abort) {
- /* Do a scan abort to stop the driver's scan engine */
- brcmf_dbg(SCAN, "ABORT scan in firmware\n");
- memset(&params_le, 0, sizeof(params_le));
- memset(params_le.bssid, 0xFF, ETH_ALEN);
- params_le.bss_type = DOT11_BSSTYPE_ANY;
- params_le.scan_type = 0;
- params_le.channel_num = cpu_to_le32(1);
- params_le.nprobes = cpu_to_le32(1);
- params_le.active_time = cpu_to_le32(-1);
- params_le.passive_time = cpu_to_le32(-1);
- params_le.home_time = cpu_to_le32(-1);
- /* Scan is aborted by setting channel_list[0] to -1 */
- params_le.channel_list[0] = cpu_to_le16(-1);
- /* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
- &params_le, sizeof(params_le));
- if (err)
- brcmf_err("Scan abort failed\n");
- }
- /*
- * e-scan can be initiated by scheduled scan
- * which takes precedence.
- */
- if (cfg->sched_escan) {
- brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->sched_escan = false;
- if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
- brcmf_set_mpc(ndev, 1);
- } else if (scan_request) {
- brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
- cfg80211_scan_done(scan_request, aborted);
- brcmf_set_mpc(ndev, 1);
- }
- if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("Scan complete while device not scanning\n");
- return -EPERM;
- }
-
- return err;
-}
-
-static s32
brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
struct cfg80211_scan_request *request, u16 action)
{
@@ -705,11 +853,12 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
s32 err;
u32 passive_scan;
struct brcmf_scan_results *results;
+ struct escan_info *escan = &cfg->escan_info;
brcmf_dbg(SCAN, "Enter\n");
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+ escan->ndev = ndev;
+ escan->wiphy = wiphy;
+ escan->escan_state = WL_ESCAN_STATE_SCANNING;
passive_scan = cfg->active_scan ? 0 : 1;
err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
passive_scan);
@@ -723,7 +872,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
results->count = 0;
results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
- err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+ err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START);
if (err)
brcmf_set_mpc(ndev, 1);
return err;
@@ -760,6 +909,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
return -EAGAIN;
}
+ /* If scan req comes for p2p0, send it over primary I/F */
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) {
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ ndev = ifp->ndev;
+ }
+
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
@@ -778,6 +933,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
cfg->scan_request = request;
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (escan_req) {
+ cfg->escan_info.run = brcmf_run_escan;
+ err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif);
+ if (err)
+ goto scan_out;
+
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (err)
goto scan_out;
@@ -935,31 +1095,6 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
memset(prof, 0, sizeof(*prof));
}
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
- size_t *join_params_size)
-{
- u16 chanspec = 0;
-
- if (ch != 0) {
- if (ch <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
- sizeof(u16);
-
- chanspec |= (ch & WL_CHANSPEC_CHAN_MASK);
- join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
- join_params->params_le.chanspec_num = cpu_to_le32(1);
-
- brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
- }
-}
-
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
{
s32 err = 0;
@@ -990,6 +1125,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
s32 wsec = 0;
s32 bcnprd;
+ u16 chanspec;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -1093,8 +1229,11 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ chanspec = channel_to_chanspec(params->chandef.chan);
+ join_params.params_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
}
/* set channel for starter */
@@ -1157,7 +1296,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
brcmf_err("set wpa_auth failed (%d)\n", err);
return err;
@@ -1196,7 +1335,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
break;
}
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
if (err) {
brcmf_err("set auth failed (%d)\n", err);
return err;
@@ -1260,7 +1399,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
}
brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
+ /* In case of privacy, but no security and WPS then simulate */
+ /* setting AES. WPS-2.0 allows no security */
+ if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+ sme->privacy)
+ pval = AES_ENABLED;
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
if (err) {
brcmf_err("error (%d)\n", err);
return err;
@@ -1282,8 +1426,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
- "wpa_auth", &val);
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
brcmf_err("could not get wpa_auth (%d)\n", err);
return err;
@@ -1317,8 +1461,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
- "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
+ "wpa_auth", val);
if (err) {
brcmf_err("could not set wpa_auth (%d)\n", err);
return err;
@@ -1395,9 +1539,28 @@ brcmf_set_sharedkey(struct net_device *ndev,
return err;
}
+static
+enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
+ enum nl80211_auth_type type)
+{
+ u32 ci;
+ if (type == NL80211_AUTHTYPE_AUTOMATIC) {
+ /* shift to ignore chip revision */
+ ci = brcmf_get_chip_info(ifp) >> 4;
+ switch (ci) {
+ case 43236:
+ brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n");
+ return NL80211_AUTHTYPE_OPEN_SYSTEM;
+ default:
+ break;
+ }
+ }
+ return type;
+}
+
static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+ struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -1405,7 +1568,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_ssid ssid;
+ struct brcmf_tlv *rsn_ie;
+ struct brcmf_vs_tlv *wpa_ie;
+ void *ie;
+ u32 ie_len;
+ struct brcmf_ext_join_params_le *ext_join_params;
+ u16 chanspec;
s32 err = 0;
@@ -1418,15 +1586,46 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
+ /* A normal (non P2P) connection request setup. */
+ ie = NULL;
+ ie_len = 0;
+ /* find the WPA_IE */
+ wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+ if (wpa_ie) {
+ ie = wpa_ie;
+ ie_len = wpa_ie->len + TLV_HDR_LEN;
+ } else {
+ /* find the RSN_IE */
+ rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ WLAN_EID_RSN);
+ if (rsn_ie) {
+ ie = rsn_ie;
+ ie_len = rsn_ie->len + TLV_HDR_LEN;
+ }
+ }
+ brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len);
+ }
+
+ err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+ sme->ie, sme->ie_len);
+ if (err)
+ brcmf_err("Set Assoc REQ IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (chan) {
cfg->channel =
ieee80211_frequency_to_channel(chan->center_freq);
- brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
- cfg->channel, chan->center_freq);
- } else
+ chanspec = channel_to_chanspec(chan);
+ brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
+ cfg->channel, chan->center_freq, chanspec);
+ } else {
cfg->channel = 0;
+ chanspec = 0;
+ }
brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
@@ -1436,6 +1635,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
err = brcmf_set_auth_type(ndev, sme);
if (err) {
brcmf_err("wl_set_auth_type failed (%d)\n", err);
@@ -1460,27 +1660,88 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID),
+ (u32)sme->ssid_len);
+ memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+ if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ profile->ssid.SSID[profile->ssid.SSID_len] = 0;
+ brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID,
+ profile->ssid.SSID_len);
+ }
+
+ /* Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
+ offsetof(struct brcmf_assoc_params_le, chanspec_list);
+ if (cfg->channel)
+ join_params_size += sizeof(u16);
+ ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
+ ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+ profile->ssid.SSID_len);
+ /*increase dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan_le.active_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+ ext_join_params->scan_le.passive_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+ /* Set up join scan parameters */
+ ext_join_params->scan_le.scan_type = -1;
+ /* to sync with presence period of VSDB GO.
+ * Send probe request more frequently. Probe request will be stopped
+ * when it gets probe response from target AP/GO.
+ */
+ ext_join_params->scan_le.nprobes =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+
+ if (cfg->channel) {
+ ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+
+ ext_join_params->assoc_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ }
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+ join_params_size);
+ kfree(ext_join_params);
+ if (!err)
+ /* This is it. join command worked, we are done */
+ goto done;
+
+ /* join command failed, fallback to set ssid */
memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid_le);
- profile->ssid.SSID_len = min_t(u32,
- sizeof(ssid.SSID), (u32)sme->ssid_len);
memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
- memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
- memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-
- if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
- brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
- ssid.SSID, ssid.SSID_len);
+ if (sme->bssid)
+ memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ if (cfg->channel) {
+ join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
+ }
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, join_params_size);
if (err)
- brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+ brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
done:
if (err)
@@ -1939,7 +2200,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
/* Report the current tx rate */
- err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err) {
brcmf_err("Could not get rate (%d)\n", err);
goto done;
@@ -2011,67 +2272,6 @@ done:
return err;
}
-static s32
-brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *addr,
- const struct cfg80211_bitrate_mask *mask)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcm_rateset_le rateset_le;
- s32 rate;
- s32 val;
- s32 err_bg;
- s32 err_a;
- u32 legacy;
- s32 err = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (!check_vif_up(ifp->vif))
- return -EIO;
-
- /* addr param is always NULL. ignore it */
- /* Get current rateset */
- err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET,
- &rateset_le, sizeof(rateset_le));
- if (err) {
- brcmf_err("could not get current rateset (%d)\n", err);
- goto done;
- }
-
- legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF);
- if (!legacy)
- legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy &
- 0xFFFF);
-
- val = wl_g_rates[legacy - 1].bitrate * 100000;
-
- if (val < le32_to_cpu(rateset_le.count))
- /* Select rate by rateset index */
- rate = rateset_le.rates[val] & 0x7f;
- else
- /* Specified rate in bps */
- rate = val / 500000;
-
- brcmf_dbg(CONN, "rate %d mbps\n", rate / 2);
-
- /*
- *
- * Set rate override,
- * Since the is a/b/g-blind, both a/bg_rate are enforced.
- */
- err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
- err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
- if (err_bg && err_a) {
- brcmf_err("could not set fixed rate (%d) (%d)\n", err_bg,
- err_a);
- err = err_bg | err_a;
- }
-
-done:
- brcmf_dbg(TRACE, "Exit\n");
- return err;
-}
-
static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
@@ -2123,7 +2323,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (!bss)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
return err;
}
@@ -2229,7 +2429,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
goto CleanUp;
}
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
CleanUp:
@@ -2245,78 +2445,10 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->mode == WL_MODE_IBSS;
}
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
-{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *) buf;
- totlen = buflen;
-
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- int len = elt->len;
-
- /* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
- return elt;
-
- elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
- }
-
- return NULL;
-}
-
-/* Is any of the tlvs the expected entry? If
- * not update the tlvs buffer pointer/length.
- */
-static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- /* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
- return true;
- }
-
- if (tlvs == NULL)
- return false;
- /* point to the next ie */
- ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
- /* calculate the length of the rest of the buffer */
- *tlvs_len -= (int)(ie - *tlvs);
- /* update the pointer to the start of the buffer */
- *tlvs = ie;
-
- return false;
-}
-
-static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
-{
- struct brcmf_tlv *ie;
-
- while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
- WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
- return (struct brcmf_vs_tlv *)ie;
- }
- return NULL;
-}
-
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
-{
- struct net_device *ndev = cfg_to_ndev(cfg);
- struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
- struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2372,7 +2504,7 @@ update_bss_info_out:
return err;
}
-static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
struct escan_info *escan = &cfg->escan_info;
@@ -2391,8 +2523,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
container_of(work, struct brcmf_cfg80211_info,
escan_timeout_work);
- brcmf_notify_escan_complete(cfg,
- cfg->escan_info.ndev, true, true);
+ brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
static void brcmf_escan_timeout(unsigned long data)
@@ -2469,11 +2600,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
- if (!cfg->scan_request) {
- brcmf_dbg(SCAN, "result without cfg80211 request\n");
- goto exit;
- }
-
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -2481,6 +2607,14 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bss_info_le = &escan_result_le->bss_info_le;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
+ goto exit;
+
+ if (!cfg->scan_request) {
+ brcmf_dbg(SCAN, "result without cfg80211 request\n");
+ goto exit;
+ }
+
bi_length = le32_to_cpu(bss_info_le->length);
if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
WL_ESCAN_RESULTS_FIXED_SIZE)) {
@@ -2519,6 +2653,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
list->count++;
} else {
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
+ goto exit;
if (cfg->scan_request) {
cfg->bss_list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
@@ -2527,7 +2663,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_notify_escan_complete(cfg, ndev, aborted,
false);
} else
- brcmf_err("Unexpected scan result 0x%x\n", status);
+ brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
+ status);
}
exit:
return err;
@@ -3031,9 +3168,8 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
}
#endif
-static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
/* set auth */
@@ -3091,10 +3227,11 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
len = wpa_ie->len + TLV_HDR_LEN;
data = (u8 *)wpa_ie;
- offset = 0;
+ offset = TLV_HDR_LEN;
if (!is_rsn_ie)
offset += VS_IE_FIXED_HDR_LEN;
- offset += WPA_IE_VERSION_LEN;
+ else
+ offset += WPA_IE_VERSION_LEN;
/* check for multicast cipher suite */
if (offset + WPA_IE_MIN_OUI_LEN > len) {
@@ -3291,7 +3428,7 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
parsed_info->vndrie.oui[2],
parsed_info->vndrie.oui_type);
- if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+ if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT)
break;
next:
remaining_len -= (ie->len + TLV_HDR_LEN);
@@ -3325,7 +3462,6 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
return ie_len + VNDR_IE_HDR_SIZE;
}
-static
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len)
{
@@ -3357,24 +3493,28 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
if (!iovar_ie_buf)
return -ENOMEM;
curr_ie_buf = iovar_ie_buf;
- if (ifp->vif->mode == WL_MODE_AP) {
- switch (pktflag) {
- case VNDR_IE_PRBRSP_FLAG:
- mgmt_ie_buf = saved_ie->probe_res_ie;
- mgmt_ie_len = &saved_ie->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
- break;
- case VNDR_IE_BEACON_FLAG:
- mgmt_ie_buf = saved_ie->beacon_ie;
- mgmt_ie_len = &saved_ie->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
- break;
- default:
- err = -EPERM;
- brcmf_err("not suitable type\n");
- goto exit;
- }
- } else {
+ switch (pktflag) {
+ case BRCMF_VNDR_IE_PRBREQ_FLAG:
+ mgmt_ie_buf = saved_ie->probe_req_ie;
+ mgmt_ie_len = &saved_ie->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie);
+ break;
+ case BRCMF_VNDR_IE_PRBRSP_FLAG:
+ mgmt_ie_buf = saved_ie->probe_res_ie;
+ mgmt_ie_len = &saved_ie->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
+ break;
+ case BRCMF_VNDR_IE_BEACON_FLAG:
+ mgmt_ie_buf = saved_ie->beacon_ie;
+ mgmt_ie_len = &saved_ie->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
+ break;
+ case BRCMF_VNDR_IE_ASSOCREQ_FLAG:
+ mgmt_ie_buf = saved_ie->assoc_req_ie;
+ mgmt_ie_len = &saved_ie->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
+ break;
+ default:
err = -EPERM;
brcmf_err("not suitable type\n");
goto exit;
@@ -3483,6 +3623,49 @@ exit:
return err;
}
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
+{
+ s32 pktflags[] = {
+ BRCMF_VNDR_IE_PRBREQ_FLAG,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ BRCMF_VNDR_IE_BEACON_FLAG
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pktflags); i++)
+ brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+
+ memset(&vif->saved_ie, 0, sizeof(vif->saved_ie));
+ return 0;
+}
+
+static s32
+brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
+ struct cfg80211_beacon_data *beacon)
+{
+ s32 err;
+
+ /* Set Beacon IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
+ beacon->tail, beacon->tail_len);
+ if (err) {
+ brcmf_err("Set Beacon IE Failed\n");
+ return err;
+ }
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
+
+ /* Set Probe Response IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG,
+ beacon->proberesp_ies,
+ beacon->proberesp_ies_len);
+ if (err)
+ brcmf_err("Set Probe Resp IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+
+ return err;
+}
+
static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
@@ -3495,7 +3678,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
- s32 bssidx = 0;
+ enum nl80211_iftype dev_role;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
cfg80211_get_chandef_type(&settings->chandef),
@@ -3505,10 +3689,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
- if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
- brcmf_err("Not in AP creation mode\n");
- return -EPERM;
- }
+ dev_role = ifp->vif->wdev.iftype;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3529,21 +3710,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
- if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
- if (err < 0) {
- brcmf_err("setting AP mode failed %d\n", err);
- goto exit;
- }
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3569,27 +3735,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
- brcmf_configure_opensecurity(ndev, bssidx);
+ brcmf_configure_opensecurity(ifp);
}
- /* Set Beacon IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_BEACON_FLAG,
- settings->beacon.tail,
- settings->beacon.tail_len);
- if (err)
- brcmf_err("Set Beacon IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
- /* Set Probe Response IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_PRBRSP_FLAG,
- settings->beacon.proberesp_ies,
- settings->beacon.proberesp_ies_len);
- if (err)
- brcmf_err("Set Probe Resp IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
@@ -3607,22 +3756,62 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
}
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_UP error (%d)\n", err);
- goto exit;
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ brcmf_fil_iovar_int_set(ifp, "apsta", 0);
}
- memset(&join_params, 0, sizeof(join_params));
- /* join parameters starts with ssid */
- memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
- /* create softap */
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
- &join_params, sizeof(join_params));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("SET SSID error (%d)\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+ if (err < 0) {
+ brcmf_err("setting AP mode failed %d\n", err);
+ goto exit;
+ }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_UP error (%d)\n", err);
+ goto exit;
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+ /* create softap */
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
+ if (err < 0) {
+ brcmf_err("SET SSID error (%d)\n", err);
+ goto exit;
+ }
+ brcmf_dbg(TRACE, "AP mode configuration complete\n");
+ } else {
+ err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
+ sizeof(ssid_le));
+ if (err < 0) {
+ brcmf_err("setting ssid failed %d\n", err);
+ goto exit;
+ }
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(1);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0) {
+ brcmf_err("bss_enable config failed %d\n", err);
+ goto exit;
+ }
+
+ brcmf_dbg(TRACE, "GO mode configuration complete\n");
+ }
clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
@@ -3636,10 +3825,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = -EPERM;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "Enter\n");
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) {
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
@@ -3653,18 +3843,41 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("BRCMF_C_UP error %d\n", err);
goto exit;
}
- brcmf_set_mpc(ndev, 1);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ } else {
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(0);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0)
+ brcmf_err("bss_enable config failed %d\n", err);
}
+ brcmf_set_mpc(ndev, 1);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+
exit:
return err;
}
+static s32
+brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
+
+ return err;
+}
+
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
@@ -3674,6 +3887,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "Enter %pM\n", mac);
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (!check_vif_up(ifp->vif))
return -EIO;
@@ -3688,7 +3903,147 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return err;
}
+
+static void
+brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct brcmf_if *ifp = netdev_priv(wdev->netdev);
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ u16 mgmt_type;
+
+ brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
+
+ mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+ if (reg)
+ vif->mgmt_rx_reg |= BIT(mgmt_type);
+ else
+ vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+}
+
+
+static int
+brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, bool offchan,
+ unsigned int wait, const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ const struct ieee80211_mgmt *mgmt;
+ struct brcmf_if *ifp;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 ie_offset;
+ s32 ie_len;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_fil_af_params_le *af_params;
+ bool ack;
+ s32 chan_nr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ *cookie = 0;
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_mgmt(mgmt->frame_control)) {
+ brcmf_err("Driver only allows MGMT packet type\n");
+ return -EPERM;
+ }
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /* Right now the only reason to get a probe response */
+ /* is for p2p listen response or for p2p GO from */
+ /* wpa_supplicant. Unfortunately the probe is send */
+ /* on primary ndev, while dongle wants it on the p2p */
+ /* vif. Since this is only reason for a probe */
+ /* response to be sent, the vif is taken from cfg. */
+ /* If ever desired to send proberesp for non p2p */
+ /* response then data should be checked for */
+ /* "DIRECT-". Note in future supplicant will take */
+ /* dedicated p2p wdev to do this and then this 'hack'*/
+ /* is not needed anymore. */
+ ie_offset = DOT11_MGMT_HDR_LEN +
+ DOT11_BCN_PRB_FIXED_LEN;
+ ie_len = len - ie_offset;
+ ifp = netdev_priv(wdev->netdev);
+ vif = ifp->vif;
+ if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_vif_set_mgmt_ie(vif,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ &buf[ie_offset],
+ ie_len);
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_KERNEL);
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+ if (af_params == NULL) {
+ brcmf_err("unable to allocate frame\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ action_frame = &af_params->action_frame;
+ /* Add the packet Id */
+ action_frame->packet_id = cpu_to_le32(*cookie);
+ /* Add BSSID */
+ memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN);
+ memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ /* Add the channel */
+ chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+ af_params->channel = cpu_to_le32(chan_nr);
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(action_frame->len));
+
+ brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+ *cookie, le16_to_cpu(action_frame->len),
+ chan->center_freq);
+
+ ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev,
+ af_params);
+
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+ kfree(af_params);
+ } else {
+ brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
+ brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len);
+ }
+
+exit:
+ return err;
+}
+
+
+static int
+brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ int err = 0;
+
+ brcmf_dbg(TRACE, "Enter p2p listen cancel\n");
+
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif == NULL) {
+ brcmf_err("No p2p device available for probe response\n");
+ err = -ENODEV;
+ goto exit;
+ }
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+exit:
+ return err;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = brcmf_cfg80211_add_iface,
+ .del_virtual_intf = brcmf_cfg80211_del_iface,
.change_virtual_intf = brcmf_cfg80211_change_iface,
.scan = brcmf_cfg80211_scan,
.set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
@@ -3703,7 +4058,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.set_default_key = brcmf_cfg80211_config_default_key,
.set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
.set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
- .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask,
.connect = brcmf_cfg80211_connect,
.disconnect = brcmf_cfg80211_disconnect,
.suspend = brcmf_cfg80211_suspend,
@@ -3713,28 +4067,43 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.flush_pmksa = brcmf_cfg80211_flush_pmksa,
.start_ap = brcmf_cfg80211_start_ap,
.stop_ap = brcmf_cfg80211_stop_ap,
+ .change_beacon = brcmf_cfg80211_change_beacon,
.del_station = brcmf_cfg80211_del_station,
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+ .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+ .mgmt_tx = brcmf_cfg80211_mgmt_tx,
+ .remain_on_channel = brcmf_p2p_remain_on_channel,
+ .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
#ifdef CONFIG_NL80211_TESTMODE
.testmode_cmd = brcmf_cfg80211_testmode
#endif
};
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
+static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
{
- s32 err = 0;
-
- switch (mode) {
- case WL_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case WL_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
+ switch (type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -ENOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+ return WL_MODE_IBSS;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return WL_MODE_BSS;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ return WL_MODE_AP;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return WL_MODE_P2P;
+ case NL80211_IFTYPE_UNSPECIFIED:
default:
- return NL80211_IFTYPE_UNSPECIFIED;
+ break;
}
- return err;
+ return -EINVAL;
}
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
@@ -3746,6 +4115,56 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
+static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO)
+ },
+};
+static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
+ {
+ .max_interfaces = BRCMF_IFACE_MAX_CNT,
+ .num_different_channels = 1, /* no multi-channel for now */
+ .n_limits = ARRAY_SIZE(brcmf_iface_limits),
+ .limits = brcmf_iface_limits
+ }
+};
+
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
{
struct wiphy *wiphy;
@@ -3758,10 +4177,16 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
}
set_wiphy_dev(wiphy, phydev);
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+ wiphy->iface_combinations = brcmf_iface_combos;
+ wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
@@ -3773,10 +4198,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = __wl_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
- wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
- * save mode
- * by default
- */
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
err = wiphy_register(wiphy);
if (err < 0) {
@@ -3787,31 +4213,25 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
return wiphy;
}
-static
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- struct net_device *netdev,
- s32 mode, bool pm_block)
+ enum nl80211_iftype type,
+ bool pm_block)
{
struct brcmf_cfg80211_vif *vif;
if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
return ERR_PTR(-ENOSPC);
+ brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
+ sizeof(*vif));
vif = kzalloc(sizeof(*vif), GFP_KERNEL);
if (!vif)
return ERR_PTR(-ENOMEM);
vif->wdev.wiphy = cfg->wiphy;
- vif->wdev.netdev = netdev;
- vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
+ vif->wdev.iftype = type;
- if (netdev) {
- vif->ifp = netdev_priv(netdev);
- netdev->ieee80211_ptr = &vif->wdev;
- SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
- }
-
- vif->mode = mode;
+ vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@@ -3822,7 +4242,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
return vif;
}
-static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy;
@@ -3896,9 +4316,9 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
}
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -3974,9 +4394,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(TRACE, "Enter\n");
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (buf == NULL) {
@@ -4031,9 +4451,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state)) {
if (completed) {
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
}
cfg80211_connect_result(ndev,
(u8 *)profile->bssid,
@@ -4044,9 +4466,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
completed ? WLAN_STATUS_SUCCESS :
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
- if (completed)
- set_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state);
brcmf_dbg(CONN, "Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -4059,38 +4478,38 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- s32 err = 0;
+ static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
- u32 len = e->datalen;
- static int generation;
-
struct station_info sinfo;
brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
- memset(&sinfo, 0, sizeof(sinfo));
+ if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
+ ndev != cfg_to_ndev(cfg)) {
+ brcmf_dbg(CONN, "AP mode link down\n");
+ complete(&cfg->vif_disabled);
+ return 0;
+ }
- sinfo.filled = 0;
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
- reason == BRCMF_E_STATUS_SUCCESS) {
+ (reason == BRCMF_E_STATUS_SUCCESS)) {
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
if (!data) {
brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
}
sinfo.assoc_req_ies = data;
- sinfo.assoc_req_ies_len = len;
+ sinfo.assoc_req_ies_len = e->datalen;
generation++;
sinfo.generation = generation;
- cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+ cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
} else if ((event == BRCMF_E_DISASSOC_IND) ||
(event == BRCMF_E_DEAUTH_IND) ||
(event == BRCMF_E_DEAUTH)) {
- generation++;
- sinfo.generation = generation;
- cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+ cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
}
- return err;
+ return 0;
}
static s32
@@ -4127,6 +4546,8 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
+ if (ndev != cfg_to_ndev(cfg))
+ complete(&cfg->vif_disabled);
} else if (brcmf_is_nonetwork(cfg, e)) {
if (brcmf_is_ibssmode(ifp->vif))
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -4175,6 +4596,57 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
return 0;
}
+static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n",
+ ifevent->action, ifevent->flags, ifevent->ifidx,
+ ifevent->bssidx);
+
+ mutex_lock(&event->vif_event_lock);
+ event->action = ifevent->action;
+ vif = event->vif;
+
+ switch (ifevent->action) {
+ case BRCMF_E_IF_ADD:
+ /* waiting process may have timed out */
+ if (!cfg->vif_event.vif)
+ return -EBADF;
+
+ ifp->vif = vif;
+ vif->ifp = ifp;
+ vif->wdev.netdev = ifp->ndev;
+ ifp->ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_DEL:
+ ifp->vif = NULL;
+ mutex_unlock(&event->vif_event_lock);
+ /* event may not be upon user request */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_CHANGE:
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ default:
+ mutex_unlock(&event->vif_event_lock);
+ break;
+ }
+ return -EINVAL;
+}
+
static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
{
conf->frag_threshold = (u32)-1;
@@ -4206,6 +4678,18 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
brcmf_notify_sched_scan_results);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_IF,
+ brcmf_notify_vif_event);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG,
+ brcmf_p2p_notify_rx_mgmt_p2p_probereq);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE,
+ brcmf_p2p_notify_listen_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX,
+ brcmf_p2p_notify_action_frame_rx);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -4261,7 +4745,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
mutex_init(&cfg->usr_sync);
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
-
+ init_completion(&cfg->vif_disabled);
return err;
}
@@ -4272,6 +4756,12 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
brcmf_deinit_priv_mem(cfg);
}
+static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
+{
+ init_waitqueue_head(&event->vif_wq);
+ mutex_init(&event->vif_event_lock);
+}
+
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@@ -4295,25 +4785,41 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg = wiphy_priv(wiphy);
cfg->wiphy = wiphy;
cfg->pub = drvr;
+ init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
- vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
wiphy_free(wiphy);
return NULL;
}
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+ ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
err = wl_init_priv(cfg);
if (err) {
brcmf_err("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
-
ifp->vif = vif;
+
+ err = brcmf_p2p_attach(cfg);
+ if (err) {
+ brcmf_err("P2P initilisation failed (%d)\n", err);
+ goto cfg80211_p2p_attach_out;
+ }
+
return cfg;
+cfg80211_p2p_attach_out:
+ wl_deinit_priv(cfg);
+
cfg80211_attach_out:
brcmf_free_vif(vif);
+ wiphy_free(wiphy);
return NULL;
}
@@ -4329,9 +4835,8 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
}
static s32
-brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
__le32 roamtrigger[2];
__le32 roam_delta[2];
@@ -4382,10 +4887,9 @@ dongle_rom_out:
}
static s32
-brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time,
s32 scan_unassoc_time, s32 scan_passive_time)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
@@ -4455,6 +4959,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
{
struct net_device *ndev;
struct wireless_dev *wdev;
+ struct brcmf_if *ifp;
s32 power_mode;
s32 err = 0;
@@ -4463,35 +4968,34 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
ndev = cfg_to_ndev(cfg);
wdev = ndev->ieee80211_ptr;
+ ifp = netdev_priv(ndev);
+
+ /* make sure RF is ready for work */
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
- brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
- WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
+ brcmf_dongle_scantime(ifp, WL_SCAN_CHANNEL_TIME,
+ WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
- err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
- power_mode);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode);
if (err)
goto default_conf_out;
brcmf_dbg(INFO, "power save set to %s\n",
(power_mode ? "enabled" : "disabled"));
- err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
- WL_BEACON_TIMEOUT);
+ err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
NULL, NULL);
- if (err && err != -EINPROGRESS)
+ if (err)
goto default_conf_out;
err = brcmf_dongle_probecap(cfg);
if (err)
goto default_conf_out;
- /* -EINPROGRESS: Call commit handler */
-
-default_conf_out:
-
cfg->dongle_up = true;
+default_conf_out:
return err;
@@ -4500,8 +5004,6 @@ default_conf_out:
static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
{
set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
- if (ifp->idx)
- return 0;
return brcmf_config_dongle(ifp->drvr->config);
}
@@ -4556,3 +5058,57 @@ s32 brcmf_cfg80211_down(struct net_device *ndev)
return err;
}
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
+{
+ struct brcmf_cfg80211_vif *vif;
+ bool result = 0;
+
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (test_bit(state, &vif->sme_state))
+ result++;
+ }
+ return result;
+}
+
+static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
+ u8 action)
+{
+ u8 evt_action;
+
+ mutex_lock(&event->vif_event_lock);
+ evt_action = event->action;
+ mutex_unlock(&event->vif_event_lock);
+ return evt_action == action;
+}
+
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ mutex_lock(&event->vif_event_lock);
+ event->vif = vif;
+ event->action = 0;
+ mutex_unlock(&event->vif_event_lock);
+}
+
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ bool armed;
+
+ mutex_lock(&event->vif_event_lock);
+ armed = event->vif != NULL;
+ mutex_unlock(&event->vif_event_lock);
+
+ return armed;
+}
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ return wait_event_timeout(event->vif_wq,
+ vif_event_equals(event, action), timeout);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index e4d9cc7a8e63..8b5d4989906c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -41,6 +41,38 @@
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define IE_MAX_LEN 512
+/* IE TLV processing */
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_OUI_LEN 3 /* oui id length */
+
+/* 802.11 Mgmt Packet flags */
+#define BRCMF_VNDR_IE_BEACON_FLAG 0x1
+#define BRCMF_VNDR_IE_PRBRSP_FLAG 0x2
+#define BRCMF_VNDR_IE_ASSOCRSP_FLAG 0x4
+#define BRCMF_VNDR_IE_AUTHRSP_FLAG 0x8
+#define BRCMF_VNDR_IE_PRBREQ_FLAG 0x10
+#define BRCMF_VNDR_IE_ASSOCREQ_FLAG 0x20
+/* vendor IE in IW advertisement protocol ID field */
+#define BRCMF_VNDR_IE_IWAPID_FLAG 0x40
+/* allow custom IE id */
+#define BRCMF_VNDR_IE_CUSTOM_FLAG 0x100
+
+/* P2P Action Frames flags (spec ordered) */
+#define BRCMF_VNDR_IE_GONREQ_FLAG 0x001000
+#define BRCMF_VNDR_IE_GONRSP_FLAG 0x002000
+#define BRCMF_VNDR_IE_GONCFM_FLAG 0x004000
+#define BRCMF_VNDR_IE_INVREQ_FLAG 0x008000
+#define BRCMF_VNDR_IE_INVRSP_FLAG 0x010000
+#define BRCMF_VNDR_IE_DISREQ_FLAG 0x020000
+#define BRCMF_VNDR_IE_DISRSP_FLAG 0x040000
+#define BRCMF_VNDR_IE_PRDREQ_FLAG 0x080000
+#define BRCMF_VNDR_IE_PRDRSP_FLAG 0x100000
+
+#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
+
+
/**
* enum brcmf_scan_status - dongle scan status
*
@@ -52,11 +84,19 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_ABORT,
};
-/* wi-fi mode */
+/**
+ * enum wl_mode - driver mode of virtual interface.
+ *
+ * @WL_MODE_BSS: connects to BSS.
+ * @WL_MODE_IBSS: operate as ad-hoc.
+ * @WL_MODE_AP: operate as access-point.
+ * @WL_MODE_P2P: provide P2P discovery.
+ */
enum wl_mode {
WL_MODE_BSS,
WL_MODE_IBSS,
- WL_MODE_AP
+ WL_MODE_AP,
+ WL_MODE_P2P
};
/* dongle configuration */
@@ -108,6 +148,7 @@ struct brcmf_cfg80211_profile {
* @BRCMF_VIF_STATUS_READY: ready for operation.
* @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
* @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
* @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
* @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
*/
@@ -115,6 +156,7 @@ enum brcmf_vif_status {
BRCMF_VIF_STATUS_READY,
BRCMF_VIF_STATUS_CONNECTING,
BRCMF_VIF_STATUS_CONNECTED,
+ BRCMF_VIF_STATUS_DISCONNECTING,
BRCMF_VIF_STATUS_AP_CREATING,
BRCMF_VIF_STATUS_AP_CREATED
};
@@ -122,16 +164,22 @@ enum brcmf_vif_status {
/**
* struct vif_saved_ie - holds saved IEs for a virtual interface.
*
+ * @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
*/
struct vif_saved_ie {
+ u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
+ u32 assoc_req_ie_len;
};
/**
@@ -145,6 +193,7 @@ struct vif_saved_ie {
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
* @list: linked list.
+ * @mgmt_rx_reg: registered rx mgmt frame types.
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -156,6 +205,7 @@ struct brcmf_cfg80211_vif {
bool pm_block;
struct vif_saved_ie saved_ie;
struct list_head list;
+ u16 mgmt_rx_reg;
};
/* association inform */
@@ -189,6 +239,9 @@ struct escan_info {
u8 escan_buf[WL_ESCAN_BUF_SIZE];
struct wiphy *wiphy;
struct net_device *ndev;
+ s32 (*run)(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request, u16 action);
};
/**
@@ -273,10 +326,27 @@ struct brcmf_pno_scanresults_le {
};
/**
+ * struct brcmf_cfg80211_vif_event - virtual interface event information.
+ *
+ * @vif_wq: waitqueue awaiting interface event from firmware.
+ * @vif_event_lock: protects other members in this structure.
+ * @vif_complete: completion for net attach.
+ * @action: either add, change, or delete.
+ * @vif: virtual interface object related to the event.
+ */
+struct brcmf_cfg80211_vif_event {
+ wait_queue_head_t vif_wq;
+ struct mutex vif_event_lock;
+ u8 action;
+ struct brcmf_cfg80211_vif *vif;
+};
+
+/**
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
* @wiphy: wiphy object for cfg80211 interface.
* @conf: dongle configuration.
+ * @p2p: peer-to-peer specific information.
* @scan_request: cfg80211 scan request object.
* @usr_sync: mainly for dongle up/down synchronization.
* @bss_list: bss_list holding scanned ap information.
@@ -304,10 +374,12 @@ struct brcmf_pno_scanresults_le {
* @escan_ioctl_buf: dongle command buffer for escan commands.
* @vif_list: linked list of vif instances.
* @vif_cnt: number of vif instances.
+ * @vif_event: vif event signalling.
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
struct brcmf_cfg80211_conf *conf;
+ struct brcmf_p2p_info p2p;
struct cfg80211_scan_request *scan_request;
struct mutex usr_sync;
struct brcmf_scan_results *bss_list;
@@ -335,6 +407,21 @@ struct brcmf_cfg80211_info {
u8 *escan_ioctl_buf;
struct list_head vif_list;
u8 vif_cnt;
+ struct brcmf_cfg80211_vif_event vif_event;
+ struct completion vif_disabled;
+};
+
+/**
+ * struct brcmf_tlv - tag_ID/length/value_buffer tuple.
+ *
+ * @id: tag identifier.
+ * @len: number of bytes in value buffer.
+ * @data: value buffer.
+ */
+struct brcmf_tlv {
+ u8 id;
+ u8 len;
+ u8 data[1];
};
static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
@@ -389,4 +476,26 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev);
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+ enum nl80211_iftype type,
+ bool pm_block);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len);
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+u16 channel_to_chanspec(struct ieee80211_channel *ch);
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif);
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout);
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort);
+void brcmf_set_mpc(struct net_device *ndev, int mpc);
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 1de94f30564f..1585cc5bf866 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -961,7 +961,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
- ini->tx_in_transit--;
ini->txretry[index] = 0;
/*
@@ -990,7 +989,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (retry && (ini->txretry[index] < (int)retry_limit)) {
int ret;
ini->txretry[index]++;
- ini->tx_in_transit--;
ret = brcms_c_txfifo(wlc, queue, p);
/*
* We shouldn't be out of space in the DMA
@@ -1000,7 +998,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
WARN_ONCE(ret, "queue %d out of txds\n", queue);
} else {
/* Retry timeout */
- ini->tx_in_transit--;
ieee80211_tx_info_clear_status(tx_info);
tx_info->status.ampdu_ack_len = 0;
tx_info->status.ampdu_len = 1;
@@ -1009,8 +1006,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
brcms_dbg_ht(wlc->hw->d11core,
- "BA Timeout, seq %d, in_transit %d\n",
- seq, ini->tx_in_transit);
+ "BA Timeout, seq %d\n",
+ seq);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index a90b72202ec5..10ee314c4229 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -183,8 +183,7 @@ static bool brcms_c_country_valid(const char *ccode)
* chars.
*/
if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
- (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
- ccode[2] == '\0'))
+ (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A))
return false;
/*
@@ -670,7 +669,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *rule;
- int band, i, ret;
+ int band, i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
@@ -685,9 +684,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- ret = freq_reg_info(wiphy, ch->center_freq,
- 0, &rule);
- if (ret)
+ rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(rule))
continue;
if (!(rule->flags & NL80211_RRF_NO_IBSS))
@@ -703,8 +701,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
}
}
-static int brcms_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void brcms_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct brcms_info *wl = hw->priv;
@@ -745,8 +743,6 @@ static int brcms_reg_notifier(struct wiphy *wiphy,
if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
brcms_c_japan_ccode(request->alpha2));
-
- return 0;
}
void brcms_c_regd_init(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
index 796836b0f469..822781cf15d4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012 Canonical Ltd.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 1fbd8ecbe2ea..c6451c61407a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -36,6 +36,7 @@
#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -362,8 +363,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return -EOPNOTSUPP;
}
+ spin_lock_bh(&wl->lock);
+ memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr));
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
+ spin_unlock_bh(&wl->lock);
return 0;
}
@@ -539,9 +543,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
- brcms_err(core, "%s: arp filtering: enabled %s, count %d"
- " (implement)\n", __func__, info->arp_filter_enabled ?
- "true" : "false", info->arp_addr_cnt);
+ brcms_err(core, "%s: arp filtering: %d addresses"
+ " (implement)\n", __func__, info->arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
@@ -668,7 +671,9 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
spin_lock_bh(&wl->lock);
brcms_c_ampdu_flush(wl->wlc, sta, tid);
spin_unlock_bh(&wl->lock);
@@ -708,16 +713,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+ bool result;
+
+ spin_lock_bh(&wl->lock);
+ result = brcms_c_tx_flush_completed(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return result;
+}
+
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
+ int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
- /* wait for packet queue and dma fifos to run empty */
- spin_lock_bh(&wl->lock);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- spin_unlock_bh(&wl->lock);
+ ret = wait_event_timeout(wl->tx_flush_wq,
+ brcms_tx_flush_completed(wl),
+ msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+ brcms_dbg_mac80211(wl->wlc->hw->d11core,
+ "ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +790,7 @@ void brcms_dpc(unsigned long data)
done:
spin_unlock_bh(&wl->lock);
+ wake_up(&wl->tx_flush_wq);
}
/*
@@ -1020,6 +1039,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
atomic_set(&wl->callbacks, 0);
+ init_waitqueue_head(&wl->tx_flush_wq);
+
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
@@ -1407,9 +1428,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
#endif
t->ms = ms;
t->periodic = (bool) periodic;
- t->set = true;
-
- atomic_inc(&t->wl->callbacks);
+ if (!t->set) {
+ t->set = true;
+ atomic_inc(&t->wl->callbacks);
+ }
ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
}
@@ -1608,13 +1630,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
spin_lock_bh(&wl->lock);
return blocked;
}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
- spin_unlock_bh(&wl->lock);
- msleep(ms);
- spin_lock_bh(&wl->lock);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd35..947ccacf43e6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
+ /* tx flush */
+ wait_queue_head_t tx_flush_wq;
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
extern void brcms_free_timer(struct brcms_timer *timer);
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 17594de4199e..8ef02dca8f8c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -101,8 +101,6 @@
#define DOT11_RTS_LEN 16
#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
@@ -1027,7 +1025,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
- bool morepending = false;
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
@@ -1041,23 +1038,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
- while (!(*fatal)
- && (s1 & TXS_V)) {
- /* !give others some time to run! */
- if (n >= max_tx_num) {
- morepending = true;
- break;
- }
+ while (n < max_tx_num) {
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
*fatal = true;
return false;
}
- s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+ /* only process when valid */
+ if (!(s1 & TXS_V))
+ break;
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1059,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+ if (*fatal == true)
+ return false;
n++;
}
- if (*fatal)
- return false;
-
- return morepending;
+ return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -2473,6 +2464,7 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+ u8 *ethaddr = wlc_hw->wlc->pub->cur_etheraddr;
if (mute_tx) {
/* suspend tx fifos */
@@ -2482,8 +2474,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
/* zero the address match register so we do not send ACKs */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- null_ether_addr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr);
} else {
/* resume tx fifos */
brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
@@ -2492,8 +2483,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
/* Restore address */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- wlc_hw->etheraddr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, ethaddr);
}
wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
@@ -3148,8 +3138,7 @@ void brcms_c_reset(struct brcms_c_info *wlc)
brcms_c_statsupd(wlc);
/* reset our snapshot of macstat counters */
- memset((char *)wlc->core->macstat_snapshot, 0,
- sizeof(struct macstat));
+ memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat));
brcms_b_reset(wlc->hw);
}
@@ -4062,7 +4051,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
return;
}
- memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
+ memset(&acp_shm, 0, sizeof(struct shm_acparams));
/* fill in shm ac params struct */
acp_shm.txop = params->txop;
/* convert from units of 32us to us for ucode */
@@ -4778,7 +4767,7 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
struct brcms_bss_info *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
- memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+ memset(bi, 0, sizeof(*bi));
bi->beacon_period = BEACON_INTERVAL_DEFAULT;
/* fill the default channel as the first valid channel
@@ -5307,7 +5296,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear rateset override */
- memset(&rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&rs, 0, sizeof(rs));
switch (gmode) {
case GMODE_LEGACY_B:
@@ -5530,7 +5519,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
if (rs->count > BRCMS_NUMRATES)
return -ENOBUFS;
- memset(&internal_rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&internal_rs, 0, sizeof(internal_rs));
/* Copy only legacy rateset section */
internal_rs.count = rs->count;
@@ -5556,8 +5545,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
{
- if (period < DOT11_MIN_BEACON_PERIOD ||
- period > DOT11_MAX_BEACON_PERIOD)
+ if (period == 0)
return -EINVAL;
wlc->default_bss->beacon_period = period;
@@ -5634,7 +5622,7 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
- memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
+ memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i]));
return 0;
}
}
@@ -6454,10 +6442,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!is_mcs_rate(rspec[k]))) {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: IEEE80211_TX_"
- "RC_MCS != is_mcs_rate(rspec)\n",
- wlc->pub->unit, __func__);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n",
+ wlc->pub->unit, __func__);
}
if (is_mcs_rate(rspec[k])) {
@@ -6690,11 +6677,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
(struct ofdm_phy_hdr *) rts_plcp) :
rts_plcp[0]) << 8;
} else {
- memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
- memset((char *)&txh->rts_frame, 0,
- sizeof(struct ieee80211_rts));
- memset((char *)txh->RTSPLCPFallback, 0,
- sizeof(txh->RTSPLCPFallback));
+ memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+ memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts));
+ memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback));
txh->RTSDurFallback = 0;
}
@@ -6849,21 +6834,19 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
wlc->fragthresh[queue] =
(u16) newfragthresh;
} else {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s txop invalid "
- "for rate %d\n",
- wlc->pub->unit, fifo_names[queue],
- rspec2rate(rspec[0]));
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s txop invalid for rate %d\n",
+ wlc->pub->unit, fifo_names[queue],
+ rspec2rate(rspec[0]));
}
if (dur > wlc->edcf_txop[ac])
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: %s txop "
- "exceeded phylen %d/%d dur %d/%d\n",
- wlc->pub->unit, __func__,
- fifo_names[queue],
- phylen, wlc->fragthresh[queue],
- dur, wlc->edcf_txop[ac]);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+ wlc->pub->unit, __func__,
+ fifo_names[queue],
+ phylen, wlc->fragthresh[queue],
+ dur, wlc->edcf_txop[ac]);
}
}
@@ -7338,7 +7321,7 @@ brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
*len = hdr_len + body_len;
/* format PHY and MAC headers */
- memset((char *)buf, 0, hdr_len);
+ memset(buf, 0, hdr_len);
plcp = (struct cck_phy_hdr *) buf;
@@ -7409,9 +7392,13 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
struct brcms_bss_cfg *cfg,
bool suspend)
{
- u16 prb_resp[BCN_TMPL_LEN / 2];
+ u16 *prb_resp;
int len = BCN_TMPL_LEN;
+ prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC);
+ if (!prb_resp)
+ return;
+
/*
* write the probe response to hardware, or save in
* the config structure
@@ -7445,6 +7432,8 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
if (suspend)
brcms_c_enable_mac(wlc);
+
+ kfree(prb_resp);
}
void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
@@ -7518,25 +7507,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
return wlc->band->bandunit;
}
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
- int timeout = 20;
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
- dma_txflush(wlc->hw->di[i]);
-
- /* wait for queue and DMA fifos to run dry */
- while (brcms_txpktpendtot(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
-
- if (--timeout == 0)
- break;
- }
+ dma_kick_tx(wlc->hw->di[i]);
- WARN_ON_ONCE(timeout == 0);
+ return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
@@ -7633,7 +7613,7 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
- bool morepending;
+ bool morepending = false;
skb_queue_head_init(&recv_frames);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 606b534347bc..21a824232478 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1343,13 +1343,13 @@ static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
wlc_lcnphy_rx_gain_override_enable(pi, true);
wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
- usleep_range(500, 500);
+ udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
return false;
wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
- usleep_range(500, 500);
+ udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
return false;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 4fb2834f4e64..b0f14b7b8616 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
- bool drop);
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
index 51c79c7239b7..3a3d73699f83 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/scb.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
@@ -36,7 +36,6 @@
/* structure to store per-tid state for the ampdu initiator */
struct scb_ampdu_tid_ini {
- u8 tx_in_transit; /* number of pending mpdus in transit in driver */
u8 tid; /* initiator tid for easy lookup */
/* tx retry count; indexed by seq modulo */
u8 txretry[AMPDU_TX_BA_MAX_WSIZE];
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index c6ea995750db..dd9a18f8dbca 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -376,7 +376,7 @@ int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
if (entry == NULL)
- return -1;
+ return -ENOMEM;
memcpy(entry->addr, mac, ETH_ALEN);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 2715b101aded..91c0cb3c368e 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -137,7 +137,7 @@ config IPW2200_PROMISCUOUS
config IPW2200_QOS
bool "Enable QoS support"
- depends on IPW2200 && EXPERIMENTAL
+ depends on IPW2200
config IPW2200_DEBUG
bool "Enable full debugging output in IPW2200 module."
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index d92b21a8e597..cb066f62879d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
}
-static void send_scan_event(void *data)
+static void ipw2100_scan_event(struct work_struct *work)
{
- struct ipw2100_priv *priv = data;
+ struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
+ scan_event.work);
union iwreq_data wrqu;
wrqu.data.length = 0;
@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data)
wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
}
-static void ipw2100_scan_event_later(struct work_struct *work)
-{
- send_scan_event(container_of(work, struct ipw2100_priv,
- scan_event_later.work));
-}
-
-static void ipw2100_scan_event_now(struct work_struct *work)
-{
- send_scan_event(container_of(work, struct ipw2100_priv,
- scan_event_now));
-}
-
static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
{
IPW_DEBUG_SCAN("scan complete\n");
@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
- if (!delayed_work_pending(&priv->scan_event_later))
- schedule_delayed_work(&priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
- cancel_delayed_work(&priv->scan_event_later);
- schedule_work(&priv->scan_event_now);
+ mod_delayed_work(system_wq, &priv->scan_event, 0);
}
}
@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
cancel_delayed_work_sync(&priv->wx_event_work);
cancel_delayed_work_sync(&priv->hang_check);
cancel_delayed_work_sync(&priv->rf_kill);
- cancel_work_sync(&priv->scan_event_now);
- cancel_delayed_work_sync(&priv->scan_event_later);
+ cancel_delayed_work_sync(&priv->scan_event);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -4478,13 +4464,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
return err;
}
- priv->tx_buffers =
- kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
- GFP_ATOMIC);
+ priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH,
+ sizeof(struct ipw2100_tx_packet),
+ GFP_ATOMIC);
if (!priv->tx_buffers) {
- printk(KERN_ERR DRV_NAME
- ": %s: alloc failed form tx buffers.\n",
- priv->net_dev->name);
bd_queue_free(priv, &priv->tx_queue);
return -ENOMEM;
}
@@ -6195,8 +6178,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
- INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now);
- INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later);
+ INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw2100_irq_tasklet, (unsigned long)priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 5fe17cbab1f3..c6d78790cb0d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -577,8 +577,7 @@ struct ipw2100_priv {
struct delayed_work wx_event_work;
struct delayed_work hang_check;
struct delayed_work rf_kill;
- struct work_struct scan_event_now;
- struct delayed_work scan_event_later;
+ struct delayed_work scan_event;
int user_requested_scan;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 844f201b7b70..d96257b79a84 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv)
{
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
- if (!delayed_work_pending(&priv->scan_event))
- schedule_delayed_work(&priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
- union iwreq_data wrqu;
-
priv->user_requested_scan = 0;
- cancel_delayed_work(&priv->scan_event);
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
+ mod_delayed_work(system_wq, &priv->scan_event, 0);
}
}
@@ -11327,7 +11320,6 @@ static int ipw_up(struct ipw_priv *priv)
if (!(priv->config & CFG_CUSTOM_MAC))
eeprom_parse_mac(priv, priv->mac_addr);
memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
- memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
ipw_set_geo(priv);
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index d604b4036a76..3630a41df50d 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -572,26 +572,11 @@ il3945_tx_skb(struct il_priv *il,
il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
- il_update_stats(il, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
- D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -610,14 +595,8 @@ il3945_tx_skb(struct il_priv *il,
* within command buffer array. */
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -626,10 +605,34 @@ il3945_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+ if (len)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
U32_PAD(len));
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
+ il_update_stats(il, true, fc, skb->len);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
/* Tell device the write idx *just past* this latest filled TFD */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
@@ -1001,12 +1004,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
-
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
@@ -1035,26 +1038,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
break;
}
+ /* Get physical address of RB/SKB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
+
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -1284,8 +1295,15 @@ il3945_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
@@ -3273,7 +3291,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
+ strlcpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
@@ -3474,6 +3492,7 @@ struct ieee80211_ops il3945_mac_ops = {
.sta_add = il3945_mac_sta_add,
.sta_remove = il_mac_sta_remove,
.tx_last_beacon = il_mac_tx_last_beacon,
+ .flush = il_mac_flush,
};
static int
@@ -3548,7 +3567,8 @@ il3945_setup_mac(struct il_priv *il)
hw->vif_data_size = sizeof(struct il_vif_priv);
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
@@ -3557,6 +3577,8 @@ il3945_setup_mac(struct il_priv *il)
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index c3fbf6717564..7941eb3a0166 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
@@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
return;
}
+ /* Get physical address of the RB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
+ rxb->page = page;
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -725,6 +728,16 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+ /* We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them.
+ */
+
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = il->_4965.ampdu_ref;
+ }
+
il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
&rx_status);
}
@@ -736,6 +749,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
il->_4965.last_phy_res_valid = true;
+ il->_4965.ampdu_ref++;
memcpy(&il->_4965.last_phy_res, pkt->u.raw,
sizeof(struct il_rx_phy_res));
}
@@ -1779,8 +1793,7 @@ il4965_tx_skb(struct il_priv *il,
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
if (info->control.hw_key)
il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
@@ -1790,7 +1803,6 @@ il4965_tx_skb(struct il_priv *il,
il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
- il_update_stats(il, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -1812,18 +1824,8 @@ il4965_tx_skb(struct il_priv *il,
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -1832,8 +1834,24 @@ il4965_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ if (secondlen)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
0, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
scratch_phys =
@@ -1846,6 +1864,8 @@ il4965_tx_skb(struct il_priv *il,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+ il_update_stats(il, true, fc, skb->len);
+
D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
@@ -4281,8 +4301,16 @@ il4965_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
@@ -5711,9 +5739,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
/* Tell mac80211 our characteristics */
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
@@ -5968,7 +5996,9 @@ il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
D_HT("start Tx\n");
ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
D_HT("stop Tx\n");
ret = il4965_tx_agg_stop(il, vif, sta, tid);
if (test_bit(S_EXIT_PENDING, &il->status))
@@ -6306,6 +6336,7 @@ const struct ieee80211_ops il4965_mac_ops = {
.sta_remove = il_mac_sta_remove,
.channel_switch = il4965_mac_channel_switch,
.tx_last_beacon = il_mac_tx_last_beacon,
+ .flush = il_mac_flush,
};
static int
@@ -6553,6 +6584,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
IL_WARN("Failed, HW not ready\n");
+ err = -EIO;
goto out_iounmap;
}
@@ -6569,9 +6601,6 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_eeprom;
- if (err)
- goto out_free_eeprom;
-
/* extract MAC Address */
il4965_eeprom_get_mac(il, il->addresses[0].addr);
D_INFO("MAC address: %pM\n", il->addresses[0].addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index f3b8e91aa3dc..e8324b5e5bfe 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -1183,8 +1183,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
- WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 5db11714e047..91eb2d07fdb8 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -1748,7 +1748,6 @@ static void
il4965_post_associate(struct il_priv *il)
{
struct ieee80211_vif *vif = il->vif;
- struct ieee80211_conf *conf = NULL;
int ret = 0;
if (!vif || !il->is_open)
@@ -1759,8 +1758,6 @@ il4965_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il_commit_rxon(il);
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 25dd7d28d022..3b6c99400892 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -1134,8 +1134,9 @@ struct il_wep_cmd {
#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 7e16d10a7f14..e006ea831320 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1830,32 +1830,30 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
- u8 mimo_ps_mode;
if (!sta || !sta_ht_inf->ht_supported)
goto done;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -3162,18 +3160,23 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
idx, il->cmd_queue);
}
#endif
- txq->need_update = 1;
-
- if (il->ops->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
phys_addr =
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
+ idx = -ENOMEM;
+ goto out;
+ }
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, fix_size);
+ txq->need_update = 1;
+
+ if (il->ops->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
+
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
U32_PAD(cmd->len));
@@ -3181,6 +3184,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
+out:
spin_unlock_irqrestore(&il->hcmd_lock, flags);
return idx;
}
@@ -3958,17 +3962,21 @@ il_connection_init_rx_config(struct il_priv *il)
memset(&il->staging, 0, sizeof(il->staging));
- if (!il->vif) {
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
- } else if (il->vif->type == NL80211_IFTYPE_STATION) {
+ break;
+ case NL80211_IFTYPE_STATION:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
+ break;
+ case NL80211_IFTYPE_ADHOC:
il->staging.dev_type = RXON_DEV_TYPE_IBSS;
il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
il->staging.filter_flags =
RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
- } else {
+ break;
+ default:
IL_ERR("Unsupported interface type %d\n", il->vif->type);
return;
}
@@ -4550,8 +4558,7 @@ out:
EXPORT_SYMBOL(il_mac_add_interface);
static void
-il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
- bool mode_change)
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
{
lockdep_assert_held(&il->mutex);
@@ -4560,9 +4567,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
il_force_scan_end(il);
}
- if (!mode_change)
- il_set_mode(il);
-
+ il_set_mode(il);
}
void
@@ -4575,8 +4580,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
WARN_ON(il->vif != vif);
il->vif = NULL;
-
- il_teardown_interface(il, vif, false);
+ il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
+ il_teardown_interface(il, vif);
memset(il->bssid, 0, ETH_ALEN);
D_MAC80211("leave\n");
@@ -4685,18 +4690,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
/* success */
- il_teardown_interface(il, vif, true);
vif->type = newtype;
vif->p2p = false;
- err = il_set_mode(il);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
+ il->iw_mode = newtype;
+ il_teardown_interface(il, vif);
err = 0;
out:
@@ -4707,6 +4704,42 @@ out:
}
EXPORT_SYMBOL(il_mac_change_interface);
+void
+il_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ int i;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ if (il->txq == NULL)
+ goto out;
+
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+ struct il_queue *q;
+
+ if (i == il->cmd_queue)
+ continue;
+
+ q = &il->txq[i].q;
+ if (q->read_ptr == q->write_ptr)
+ continue;
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Failed to flush queue %d\n", q->id);
+ break;
+ }
+
+ msleep(20);
+ }
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_flush);
+
/*
* On every watchdog tick we check (latest) time stamp. If it does not
* change during timeout period and queue is not empty we reset firmware.
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index a9a569f432fb..96f2025d936e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1356,6 +1356,7 @@ struct il_priv {
struct {
struct il_rx_phy_res last_phy_res;
bool last_phy_res_valid;
+ u32 ampdu_ref;
struct completion firmware_loading_complete;
@@ -1723,6 +1724,7 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
+void il_mac_flush(struct ieee80211_hw *hw, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 5cf43236421e..ba319cba3f1e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -43,8 +43,20 @@ config IWLWIFI
module will be called iwlwifi.
config IWLDVM
- tristate "Intel Wireless WiFi"
+ tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
+ help
+ This is the driver supporting the DVM firmware which is
+ currently the only firmware available for existing devices.
+
+config IWLMVM
+ tristate "Intel Wireless WiFi MVM Firmware support"
+ depends on IWLWIFI
+ help
+ This is the driver supporting the MVM firmware which is
+ currently only available for 7000 series devices.
+
+ Say yes if you have such a device.
menu "Debugging Options"
depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 170ec330d2a9..6c7800044a04 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,8 +5,10 @@ iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
+iwlwifi-objs += pcie/7000.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
@@ -15,5 +17,6 @@ ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
+obj-$(CONFIG_IWLMVM) += mvm/
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 33b3ad2e546b..41ec27cb6efe 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -338,7 +338,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
+ struct ieee80211_sta *sta);
static inline int iwl_sta_id(struct ieee80211_sta *sta)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index de54713b680c..6468de8634b0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 2349f393cc42..65e920cab2b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 71ab76b2b39d..84e2c0fcfef6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1403,6 +1403,7 @@ enum {
#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
+#define AGG_TX_TRY_POS 12
#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
@@ -3695,7 +3696,7 @@ struct iwl_bt_uart_msg {
u8 frame5;
u8 frame6;
u8 frame7;
-} __attribute__((packed));
+} __packed;
struct iwl_bt_coex_profile_notif {
struct iwl_bt_uart_msg last_bt_uart_msg;
@@ -3703,7 +3704,7 @@ struct iwl_bt_coex_profile_notif {
u8 bt_traffic_load; /* 0 .. 3? */
u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
u8 reserved;
-} __attribute__((packed));
+} __packed;
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
@@ -3752,7 +3753,7 @@ enum bt_coex_prio_table_priorities {
struct iwl_bt_coex_prio_table_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
-} __attribute__((packed));
+} __packed;
#define IWL_BT_COEX_ENV_CLOSE 0
#define IWL_BT_COEX_ENV_OPEN 1
@@ -3764,7 +3765,7 @@ struct iwl_bt_coex_prot_env_cmd {
u8 action; /* 0 = closed, 1 = open */
u8 type; /* 0 .. 15 */
u8 reserved[2];
-} __attribute__((packed));
+} __packed;
/*
* REPLY_D3_CONFIG
@@ -3897,6 +3898,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed;
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+/*
+ * REPLY_WOWLAN_GET_STATUS = 0xe5
+ */
+struct iwlagn_wowlan_status {
+ __le64 replay_ctr;
+ __le32 rekey_status;
+ __le32 wakeup_reason;
+ u8 pattern_number;
+ u8 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le16 non_qos_seq_ctr;
+ __le16 reserved2;
+ union iwlagn_all_tsc_rsc tsc_rsc;
+ __le16 reserved3;
+} __packed;
+
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 5b9533eef54d..20806cae11b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
- val = iwl_read_targ_mem(priv->trans, sram);
+ val = iwl_trans_read_mem32(priv->trans, sram);
for (; len; len--) {
/* put the address at the start of every line */
@@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
- val = iwl_read_targ_mem(priv->trans, sram);
+ val = iwl_trans_read_mem32(priv->trans, sram);
}
/* put in extra spaces and split lines for human readability */
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 2653a891cc7e..71ea77576d22 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 8c72be3f37c1..15cca2ef9294 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index bf479f709091..33c7e15d24f5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
- iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+ iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d3..8749dcfe695f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 6ff46605ad4f..86ea5f4c3939 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 3163e0f38c25..323e4a33fcac 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -145,14 +145,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_SCAN_WHILE_IDLE;
+ IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -206,7 +205,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
- priv->trans->ops->wowlan_suspend &&
+ priv->trans->ops->d3_suspend &&
+ priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -426,7 +426,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
- iwl_trans_wowlan_suspend(priv->trans);
+ iwl_trans_d3_suspend(priv->trans);
goto out;
@@ -441,54 +441,154 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
return ret;
}
+struct iwl_resume_data {
+ struct iwl_priv *priv;
+ struct iwlagn_wowlan_status *cmd;
+ bool valid;
+};
+
+static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_resume_data *resume_data = data;
+ struct iwl_priv *priv = resume_data->priv;
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ if (len - 4 != sizeof(*resume_data->cmd)) {
+ IWL_ERR(priv, "rx wrong size data\n");
+ return true;
+ }
+ memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
+ resume_data->valid = true;
+
+ return true;
+}
+
static int iwlagn_mac_resume(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
+ u32 base;
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_notification_wait status_wait;
+ static const u8 status_cmd[] = {
+ REPLY_WOWLAN_GET_STATUS,
+ };
+ struct iwlagn_wowlan_status status_data = {};
+ struct iwl_resume_data resume_data = {
+ .priv = priv,
+ .cmd = &status_data,
+ .valid = false,
+ };
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ const struct fw_img *img;
+#endif
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
- iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+ if (ret)
+ goto out_unlock;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(priv, "Device was reset during suspend\n");
+ goto out_unlock;
+ }
base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(priv->trans);
- if (likely(ret == 0)) {
- iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv->trans);
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_WARN(priv, "Invalid error table during resume!\n");
+ goto out_unlock;
+ }
+
+ iwl_trans_read_mem_bytes(priv->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
+ err_info.valid, err_info.error_id);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ wakeup.rfkill_release = true;
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
}
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ goto out_unlock;
+ }
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- const struct fw_img *img;
-
- img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
- if (!priv->wowlan_sram) {
- priv->wowlan_sram =
- kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
- GFP_KERNEL);
- }
+ img = &priv->fw->img[IWL_UCODE_WOWLAN];
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ iwl_trans_read_mem(priv->trans, 0x800000,
+ priv->wowlan_sram,
+ img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+#endif
+
+ /*
+ * This is very strange. The GET_STATUS command is sent but the device
+ * doesn't reply properly, it seems it doesn't close the RBD so one is
+ * always left open ... As a result, we need to send another command
+ * and have to reset the driver afterwards. As we need to switch to
+ * runtime firmware again that'll happen.
+ */
- if (priv->wowlan_sram)
- _iwl_read_targ_mem_dwords(
- priv->trans, 0x800000,
- priv->wowlan_sram,
- img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+ iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
+ ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
+ &resume_data);
+
+ iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
+ iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
+ /* an RBD is left open in the firmware now! */
+
+ ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
+ if (ret)
+ goto out_unlock;
+
+ if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
+ u32 reasons = le32_to_cpu(status_data.wakeup_reason);
+ struct cfg80211_wowlan_wakeup *wakeup_report;
+
+ IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
+
+ if (reasons) {
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
+ wakeup.pattern_idx = status_data.pattern_number;
+ if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
+ wakeup.disconnect = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
+ wakeup.gtk_rekey_failure = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
+ wakeup.eap_identity_req = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+ wakeup_report = &wakeup;
+ } else {
+ wakeup_report = NULL;
}
-#endif
- }
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ }
priv->wowlan = false;
@@ -498,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
+ out_unlock:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -520,9 +621,6 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
if (iwlagn_tx_skb(priv, control->sta, skb))
ieee80211_free_txskb(hw, skb);
}
@@ -679,7 +777,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
if ((ret == 0) && (priv->agg_tids_count > 0)) {
@@ -1154,6 +1254,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
}
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index faa05932efae..b9e3517652d6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -353,11 +353,8 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
- if (unlikely(!iwl_grab_nic_access(priv->trans))) {
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ if (!iwl_trans_grab_nic_access(priv->trans, false, &reg_flags))
return;
- }
/* Set starting address; reads will auto-increment */
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -388,8 +385,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
}
}
/* Allow device to power down */
- iwl_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(priv->trans, &reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -408,7 +404,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
+ iwl_trans_read_mem_bytes(priv->trans, base,
+ &read, sizeof(read));
capacity = read.capacity;
mode = read.mode;
num_wraps = read.wrap_counter;
@@ -1627,7 +1624,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
/*TODO: Update dbgfs with ISR error stats obtained below */
- iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -1716,9 +1713,8 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&trans->reg_lock, reg_flags);
- if (unlikely(!iwl_grab_nic_access(trans)))
- goto out_unlock;
+ if (!iwl_trans_grab_nic_access(trans, false, &reg_flags))
+ return pos;
/* Set starting address; reads will auto-increment */
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -1756,9 +1752,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
/* Allow device to power down */
- iwl_release_nic_access(trans);
-out_unlock:
- spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(trans, &reg_flags);
return pos;
}
@@ -1835,10 +1829,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
}
/* event log header */
- capacity = iwl_read_targ_mem(trans, base);
- mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
+ capacity = iwl_trans_read_mem32(trans, base);
+ mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
+ num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
+ next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
if (capacity > logsize) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
@@ -1990,13 +1984,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
/* SKU Control */
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
- CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
- (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
- (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+ (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+ (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
@@ -2008,10 +2002,11 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
priv->nvm_data->radio_cfg_dash <<
CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
+ reg_val);
IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
priv->nvm_data->radio_cfg_type,
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 518cf3715809..bd69018d07a9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index a2cee7f04848..7b03e1342d47 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index f3dd0da60d8a..abe304267261 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -411,8 +411,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
* BT traffic, as they would just be disrupted by BT.
*/
if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
- IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
- priv->bt_traffic_load);
+ IWL_DEBUG_COEX(priv,
+ "BT traffic (%d), no aggregation allowed\n",
+ priv->bt_traffic_load);
return ret;
}
@@ -1288,8 +1289,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1304,7 +1304,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1344,8 +1344,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1360,7 +1359,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1409,7 +1408,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index ad3aea8f626a..5d83cab22d62 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cac4f37cc427..a4eed2055fdb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_ni(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 9a891e6e60e8..23be948cf162 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
}
- if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
- priv->beacon_ctx) {
+ if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
if (iwlagn_update_beacon(priv, vif))
- IWL_ERR(priv, "Error sending IBSS beacon\n");
+ IWL_ERR(priv, "Error updating beacon\n");
}
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 610ed2204e1f..3a4aa5239c45 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index bdba9543c351..94ef33838bc6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
@@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -173,7 +173,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
+ struct ieee80211_sta *sta)
{
if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
return false;
@@ -183,20 +183,11 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
return false;
#endif
- /*
- * Remainder of this function checks ht_cap, but if it's
- * NULL then we can do HT40 (special case for RXON)
- */
- if (!ht_cap)
+ /* special case for RXON */
+ if (!sta)
return true;
- if (!ht_cap->ht_supported)
- return false;
-
- if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- return false;
-
- return true;
+ return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -205,7 +196,6 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
__le32 *flags, __le32 *mask)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- u8 mimo_ps_mode;
*mask = STA_FLG_RTS_MIMO_PROT_MSK |
STA_FLG_MIMO_DIS_MSK |
@@ -217,26 +207,24 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
if (!sta || !sta_ht_inf->ht_supported)
return;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
*flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -246,7 +234,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
*flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
*flags |= STA_FLG_HT40_EN_MSK;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index 57b918ce3b5f..dc6f965a123a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index eb864433e59d..03f9bc01c0cc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -185,10 +185,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(priv->trans)))
- iwl_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
+ iwl_trans_release_nic_access(priv->trans, &flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -473,8 +471,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
- iwl_prepare_ct_kill_task(priv);
tt->state = old_state;
+ iwl_prepare_ct_kill_task(priv);
}
} else if (old_state == IWL_TI_CT_KILL &&
tt->state != IWL_TI_CT_KILL) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 44c7c8f30a2d..9356c4b908ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da21328ca8ed..6aec2df3bb27 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -231,13 +231,11 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
@@ -355,8 +353,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
}
}
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
if (sta)
sta_priv = (void *)sta->drv_priv;
@@ -472,6 +468,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
WARN_ON_ONCE(is_agg &&
priv->queue_to_mac80211[txq_id] != info->hw_queue);
+ IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
+ txq_id, seq_number);
+
if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@@ -541,9 +540,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
- txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
+ txq_id = tid_data->agg.txq_id;
- switch (priv->tid_data[sta_id][tid].agg.state) {
+ switch (tid_data->agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
@@ -563,9 +562,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
case IWL_AGG_ON:
break;
default:
- IWL_WARN(priv, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- priv->tid_data[sta_id][tid].agg.state);
+ IWL_WARN(priv,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ sta_id, tid, tid_data->agg.state);
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -578,12 +577,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
sta_id, tid, txq_id);
} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
- IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
- "next_recl = %d\n",
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can't proceed: ssn %d, next_recl = %d\n",
tid_data->agg.ssn,
tid_data->next_reclaimed);
- priv->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -591,8 +589,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn);
turn_off:
- agg_state = priv->tid_data[sta_id][tid].agg.state;
- priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+ agg_state = tid_data->agg.state;
+ tid_data->agg.state = IWL_AGG_OFF;
spin_unlock_bh(&priv->sta_lock);
@@ -910,6 +908,12 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
}
}
+static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
+
static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
struct iwlagn_tx_resp *tx_resp)
{
@@ -944,9 +948,15 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
if (tx_resp->frame_count == 1)
return;
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
+ agg->txq_id,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
+
/* Construct bit-map of pending frames within Tx window */
for (i = 0; i < tx_resp->frame_count; i++) {
u16 fstatus = le16_to_cpu(frame_status[i].status);
+ u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
if (status & AGG_TX_STATUS_MSK)
iwlagn_count_agg_tx_err_status(priv, fstatus);
@@ -955,11 +965,13 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
AGG_TX_STATE_ABORT_MSK))
continue;
- IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
- "try-count (0x%08x)\n",
- iwl_get_agg_tx_fail_reason(fstatus),
- fstatus & AGG_TX_STATUS_MSK,
- fstatus & AGG_TX_TRY_MSK);
+ if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
+ IWL_DEBUG_TX_REPLY(priv,
+ "%d: status %s (0x%04x), try-count (0x%01x)\n",
+ i,
+ iwl_get_agg_tx_fail_reason(fstatus),
+ fstatus & AGG_TX_STATUS_MSK,
+ retry_cnt);
}
}
@@ -990,12 +1002,6 @@ const char *iwl_get_agg_tx_fail_reason(u16 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
{
status &= TX_STATUS_MSK;
@@ -1079,6 +1085,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
{
u16 status = le16_to_cpu(tx_resp->status.status);
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
@@ -1123,10 +1131,16 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
- if (is_agg)
+ if (is_agg) {
+ WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
+ tid >= IWL_MAX_TID_COUNT);
+ if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
+ IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
+ priv->tid_data[sta_id][tid].agg.txq_id);
iwl_rx_reply_tx_agg(priv, tx_resp);
+ }
__skb_queue_head_init(&skbs);
@@ -1213,22 +1227,41 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
freed++;
}
- WARN_ON(!is_agg && freed != 1);
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ }
+
+ if (!is_agg && freed != 1)
+ IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
/*
* An offchannel frame can be send only on the AUX queue, where
* there is no aggregation (and reordering) so it only is single
* skb is expected to be processed.
*/
- WARN_ON(is_offchannel_skb && freed != 1);
+ if (is_offchannel_skb && freed != 1)
+ IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
+
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
+ iwl_get_tx_fail_reason(status), status);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame,
+ SEQ_TO_INDEX(sequence), ssn,
+ le16_to_cpu(tx_resp->seq_ctl));
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
if (is_offchannel_skb)
@@ -1275,12 +1308,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1294,7 +1327,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_QUEUES(priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1363,11 +1396,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
}
}
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index c6467e5554f5..736fe9bb140e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv)
return iwl_send_calib_results(priv);
}
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl_verify_sec_sparse(struct iwl_priv *priv,
- const struct fw_desc *fw_desc)
-{
- __le32 *image = (__le32 *)fw_desc->data;
- u32 len = fw_desc->len;
- u32 val;
- u32 i;
-
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
- i + fw_desc->offset);
- val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image))
- return -EIO;
- }
-
- return 0;
-}
-
-static void iwl_print_mismatch_sec(struct iwl_priv *priv,
- const struct fw_desc *fw_desc)
-{
- __le32 *image = (__le32 *)fw_desc->data;
- u32 len = fw_desc->len;
- u32 val;
- u32 offs;
- int errors = 0;
-
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
- iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
- fw_desc->offset);
-
- for (offs = 0;
- offs < len && errors < 20;
- offs += sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- offs, val, le32_to_cpu(*image));
- errors++;
- }
- }
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl_verify_ucode(struct iwl_priv *priv,
- enum iwl_ucode_type ucode_type)
-{
- const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
-
- if (!img) {
- IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
- return -EINVAL;
- }
-
- if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
- IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
-
- iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
- return -EIO;
-}
-
struct iwl_alive_data {
bool valid;
u8 subtype;
@@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
- ret = iwl_trans_start_fw(priv->trans, fw);
+ ret = iwl_trans_start_fw(priv->trans, fw, false);
if (ret) {
priv->cur_ucode = old_type;
iwl_remove_notification(&priv->notif_wait, &alive_wait);
@@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
- /*
- * This step takes a long time (60-80ms!!) and
- * WoWLAN image should be loaded quickly, so
- * skip it for WoWLAN.
- */
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(priv, ucode_type);
- if (ret) {
- priv->cur_ucode = old_type;
- return ret;
- }
-
/* delay a bit to give rfkill time to run */
msleep(5);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7960a52f6ad4..e9975c54c276 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 864219d2136a..743b48343358 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6030,
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
+ IWL_DEVICE_FAMILY_7000,
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 34a5287dfc2f..df3463a38704 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -381,8 +381,8 @@
/* LED */
#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
-#define CSR_LED_REG_TRUN_ON (0x78)
-#define CSR_LED_REG_TRUN_OFF (0x38)
+#define CSR_LED_REG_TURN_ON (0x60)
+#define CSR_LED_REG_TURN_OFF (0x20)
/* ANA_PLL */
#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 42b20b0e83bc..8cf5db7fb5c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -116,6 +116,7 @@ do { \
#define IWL_DL_HCMD 0x00000004
#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_TE 0x00000020
#define IWL_DL_EEPROM 0x00000040
#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
@@ -156,6 +157,7 @@ do { \
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a)
#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 70191ddbd8f6..8f61c717f619 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index dc7e26b2f383..9a0f45ec9e01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d3549f493a17..6f228bb2b844 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,8 +139,10 @@ struct iwl_drv {
#endif
};
-#define DVM_OP_MODE 0
-#define MVM_OP_MODE 1
+enum {
+ DVM_OP_MODE = 0,
+ MVM_OP_MODE = 1,
+};
/* Protects the table contents, i.e. the ops pointer & drv list */
static struct mutex iwlwifi_opmode_table_mtx;
@@ -149,8 +151,8 @@ static struct iwlwifi_opmode_table {
const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
struct list_head drv; /* list of devices using this op_mode */
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
- { .name = "iwldvm", .ops = NULL },
- { .name = "iwlmvm", .ops = NULL },
+ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
+ [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
};
/*
@@ -268,7 +270,7 @@ struct fw_sec_parsing {
*/
struct iwl_tlv_calib_data {
__le32 ucode_type;
- __le64 calib;
+ struct iwl_tlv_calib_ctrl calib;
} __packed;
struct iwl_firmware_pieces {
@@ -358,7 +360,11 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
ucode_type);
return -EINVAL;
}
- drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
+ drv->fw.default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ drv->fw.default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
return 0;
}
@@ -959,7 +965,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
- op = &iwlwifi_opmode_table[DVM_OP_MODE];
+ if (fw->mvm_fw)
+ op = &iwlwifi_opmode_table[MVM_OP_MODE];
+ else
+ op = &iwlwifi_opmode_table[DVM_OP_MODE];
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 285de5f68c05..594a5c71b272 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 471986690cf0..034f2ff4f43d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -703,9 +703,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-static int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band)
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
@@ -728,10 +728,10 @@ static int iwl_init_sband_channels(struct iwl_nvm_data *data,
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
{
int max_bit_rate = 0;
u8 rx_chains;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 555f0eb61d48..683fe6a8c58f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -126,4 +126,13 @@ static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans);
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band);
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band);
+
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index 27c7da3c6ed1..ef4806f27cf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 1337c9d36fee..b2588c5cbf93 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index ec48563d3c6a..f5592fb3b1ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -225,6 +225,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
+#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
/**
* Rx Config/Status Registers (RCSR)
@@ -257,6 +259,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
+#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8)
+#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10)
#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
@@ -410,6 +414,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
* uCode/driver must write "1" in order to clear this flag
*/
#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008)
#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index e71564053e7f..90873eca35f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d1a86b66bc51..b545178e46e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,6 +139,19 @@ struct fw_img {
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -153,11 +166,12 @@ struct fw_img {
* @inst_evtlog_ptr: event log offset for runtime ucode.
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @mvm_fw: indicates this is MVM firmware
*/
struct iwl_fw {
u32 ucode_ver;
- char fw_version[ETHTOOL_BUSINFO_LEN];
+ char fw_version[ETHTOOL_FWVERS_LEN];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
@@ -168,7 +182,7 @@ struct iwl_fw {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
- u64 default_calib[IWL_UCODE_TYPE_MAX];
+ struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
u32 phy_config;
bool mvm_fw;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index cdaff9572059..276410d82de4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -35,54 +35,6 @@
#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
-}
-
-static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
-}
-
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_set_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bit);
-
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_clear_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_clear_bit);
-
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- unsigned long flags;
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
-
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
@@ -99,87 +51,14 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
}
EXPORT_SYMBOL_GPL(iwl_poll_bit);
-int iwl_grab_nic_access_silent(struct iwl_trans *trans)
-{
- int ret;
-
- lockdep_assert_held(&trans->reg_lock);
-
- /* this bit wakes up the NIC */
- __iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- * 5000 series and later (including 1000 series) have non-volatile SRAM,
- * and do not save/restore SRAM when power cycling.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
-
-bool iwl_grab_nic_access(struct iwl_trans *trans)
-{
- int ret = iwl_grab_nic_access_silent(trans);
- if (unlikely(ret)) {
- u32 val = iwl_read32(trans, CSR_GP_CNTRL);
- WARN_ONCE(1, "Timeout waiting for hardware access "
- "(CSR_GP_CNTRL 0x%08x)\n", val);
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
-
-void iwl_release_nic_access(struct iwl_trans *trans)
-{
- lockdep_assert_held(&trans->reg_lock);
- __iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * Above we read the CSR_GP_CNTRL register, which will flush
- * any previous writes, but we need the write that clears the
- * MAC_ACCESS_REQ bit to be performed before any other writes
- * scheduled on different CPUs (after we drop reg_lock).
- */
- mmiowb();
-}
-EXPORT_SYMBOL_GPL(iwl_release_nic_access);
-
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
- u32 value;
+ u32 value = 0x5a5a5a5a;
unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- value = iwl_read32(trans, reg);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ value = iwl_read32(trans, reg);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
return value;
}
@@ -189,12 +68,10 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, reg, value);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_direct32);
@@ -230,13 +107,12 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
- u32 val;
+ u32 val = 0x5a5a5a5a;
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- val = __iwl_read_prph(trans, ofs);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ val = __iwl_read_prph(trans, ofs);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
return val;
}
EXPORT_SYMBOL_GPL(iwl_read_prph);
@@ -245,12 +121,10 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs, val);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_prph);
@@ -258,13 +132,11 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
__iwl_read_prph(trans, ofs) | mask);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
@@ -273,13 +145,11 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
(__iwl_read_prph(trans, ofs) & mask) | bits);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
@@ -288,67 +158,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
__iwl_write_prph(trans, ofs, (val & ~mask));
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
-
-void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
- unsigned long flags;
- int offs;
- u32 *vals = buf;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(trans);
- }
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
-
-u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
-{
- u32 value;
-
- _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
-
- return value;
-}
-EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
-
-int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- unsigned long flags;
- int offs, result = 0;
- const u32 *vals = buf;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
- iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
- iwl_release_nic_access(trans);
- } else
- result = -EBUSY;
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
-
-int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
-{
- return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
-}
-EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 48dc753e3742..fd9f5b97fff3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -51,20 +51,21 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
return val;
}
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout);
-int iwl_grab_nic_access_silent(struct iwl_trans *trans);
-bool iwl_grab_nic_access(struct iwl_trans *trans);
-void iwl_release_nic_access(struct iwl_trans *trans);
-
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
@@ -76,19 +77,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
-void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
-
-#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
- do { \
- BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_dwords(trans, addr, buf, \
- (bufsize) / sizeof(u32));\
- } while (0)
-
-int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
-
-u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
-int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d9a86d6b2bd7..e5e3a79eae2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c61f2070f15a..c3affbc62cdf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 821523100cf1..c2ce764463a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
new file mode 100644
index 000000000000..a70213bdb83c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -0,0 +1,346 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+/* NVM offsets (in words) definitions */
+enum wkp_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ HW_ADDR = 0x15,
+
+/* NVM SW-Section offset (in words) definitions */
+ NVM_SW_SECTION = 0x1C0,
+ NVM_VERSION = 0,
+ RADIO_CFG = 1,
+ SKU = 2,
+ N_HW_ADDRS = 3,
+ NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+
+/* NVM calibration section offset (in words) definitions */
+ NVM_CALIB_SECTION = 0x2B8,
+ XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+ NVM_SKU_CAP_BAND_24GHZ = BIT(0),
+ NVM_SKU_CAP_BAND_52GHZ = BIT(1),
+ NVM_SKU_CAP_11N_ENABLE = BIT(2),
+};
+
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+/*
+ * These are the channel numbers in the order that they are stored in the NVM
+ */
+static const u8 iwl_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44 , 48, 52, 56, 60, 64,
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165
+};
+
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define NUM_2GHZ_CHANNELS 14
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
+
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+ { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+ { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+ { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+ { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+ { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+ { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+ { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+ { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+ { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS 0
+#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS 4
+#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_DFS = BIT(7),
+ NVM_CHANNEL_WIDE = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+};
+
+#define CHECK_AND_PRINT_I(x) \
+ ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 * const nvm_ch_flags)
+{
+ int ch_idx;
+ int n_channels = 0;
+ struct ieee80211_channel *channel;
+ u16 ch_flags;
+ bool is_5ghz;
+
+ for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ iwl_nvm_channels[ch_idx],
+ ch_flags,
+ (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ "5.2" : "2.4");
+ continue;
+ }
+
+ channel = &data->channels[n_channels];
+ n_channels++;
+
+ channel->hw_value = iwl_nvm_channels[ch_idx];
+ channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channel->center_freq =
+ ieee80211_channel_to_frequency(
+ channel->hw_value, channel->band);
+
+ /* TODO: Need to be dependent to the NVM */
+ channel->flags = IEEE80211_CHAN_NO_HT40;
+ if (ch_idx < NUM_2GHZ_CHANNELS &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ else
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ }
+
+ if (!(ch_flags & NVM_CHANNEL_IBSS))
+ channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch_flags & NVM_CHANNEL_ACTIVE))
+ channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch_flags & NVM_CHANNEL_RADAR)
+ channel->flags |= IEEE80211_CHAN_RADAR;
+
+ /* Initialize regulatory-based run-time data */
+
+ /* TODO: read the real value from the NVM */
+ channel->max_power = 0;
+ is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel->hw_value,
+ is_5ghz ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ ch_flags,
+ channel->max_power,
+ ((ch_flags & NVM_CHANNEL_IBSS) &&
+ !(ch_flags & NVM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+
+ return n_channels;
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_sw)
+{
+ int n_channels = iwl_init_channel_map(dev, cfg, data,
+ &nvm_sw[NVM_CHANNELS]);
+ int n_used = 0;
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+ sband->n_bitrates = N_RATES_24;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ sband = &data->bands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+ sband->n_bitrates = N_RATES_52;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ if (n_channels != n_used)
+ IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
+ n_used, n_channels);
+}
+
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib)
+{
+ struct iwl_nvm_data *data;
+ u8 hw_addr[ETH_ALEN];
+ u16 radio_cfg, sku;
+
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+
+ radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ sku = le16_to_cpup(nvm_sw + SKU);
+ data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ data->sku_cap_11n_enable = false;
+
+ /* check overrides (some devices have wrong NVM) */
+ if (cfg->valid_tx_ant)
+ data->valid_tx_ant = cfg->valid_tx_ant;
+ if (cfg->valid_rx_ant)
+ data->valid_rx_ant = cfg->valid_rx_ant;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+ data->valid_tx_ant, data->valid_rx_ant);
+ kfree(data);
+ return NULL;
+ }
+
+ data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ iwl_init_sbands(dev, cfg, data, nvm_sw);
+
+ data->calib_version = 255; /* TODO:
+ this value will prevent some checks from
+ failing, we need to check if this
+ field is still needed, and if it does,
+ where is it in the NVM*/
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
new file mode 100644
index 000000000000..b2692bd287fa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_nvm_parse_h__
+#define __iwl_nvm_parse_h__
+
+#include "iwl-eeprom-parse.h"
+
+/**
+ * iwl_parse_nvm_data - parse NVM data and return values
+ *
+ * This function parses all NVM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib);
+
+#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index c8d9b9517468..4a680019e117 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,8 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/debugfs.h>
+
struct iwl_op_mode;
struct iwl_trans;
struct sk_buff;
@@ -111,13 +113,13 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic and called with BH disabled.
+ * This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. Must be atomic.
+ * the radio is killed. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -128,8 +130,7 @@ struct iwl_cfg;
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
- * with BH disabled.
+ * @wimax_active: invoked when WiMax becomes active. May sleep
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -176,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
+ might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -194,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
{
+ might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
}
@@ -221,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
+ might_sleep();
op_mode->ops->wimax_active(op_mode);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 000000000000..14fc8d39fc28
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,514 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "iwl-phy-db.h"
+#include "iwl-debug.h"
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
+#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_TXP_CH_GROUPS 9
+
+struct iwl_phy_db_entry {
+ u16 size;
+ u8 *data;
+};
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+ struct iwl_phy_db_entry cfg;
+ struct iwl_phy_db_entry calib_nch;
+ struct iwl_phy_db_entry calib_ch;
+ struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
+ struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+
+ u32 channel_num;
+ u32 channel_size;
+
+ struct iwl_trans *trans;
+};
+
+enum iwl_phy_db_section_type {
+ IWL_PHY_DB_CFG = 1,
+ IWL_PHY_DB_CALIB_NCH,
+ IWL_PHY_DB_CALIB_CH,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_PHY_DB_MAX
+};
+
+#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+
+/*
+ * phy db - configure operational ucode
+ */
+struct iwl_phy_db_cmd {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+ __le32 space;
+ __le16 max_channel_idx;
+} __packed;
+
+/*
+ * phy db - Receieve phy db chunk after calibrations
+ */
+struct iwl_calib_res_notif_phy_db {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
+static inline void iwl_phy_db_test_pic(__le32 pic)
+{
+ WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
+}
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
+{
+ struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+ GFP_KERNEL);
+
+ if (!phy_db)
+ return phy_db;
+
+ phy_db->trans = trans;
+
+ /* TODO: add default values of the phy db. */
+ return phy_db;
+}
+EXPORT_SYMBOL(iwl_phy_db_init);
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ if (!phy_db || type >= IWL_PHY_DB_MAX)
+ return NULL;
+
+ switch (type) {
+ case IWL_PHY_DB_CFG:
+ return &phy_db->cfg;
+ case IWL_PHY_DB_CALIB_NCH:
+ return &phy_db->calib_nch;
+ case IWL_PHY_DB_CALIB_CH:
+ return &phy_db->calib_ch;
+ case IWL_PHY_DB_CALIB_CHG_PAPD:
+ if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_papd[chg_id];
+ case IWL_PHY_DB_CALIB_CHG_TXP:
+ if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_txp[chg_id];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ struct iwl_phy_db_entry *entry =
+ iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return;
+
+ kfree(entry->data);
+ entry->data = NULL;
+ entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+ int i;
+
+ if (!phy_db)
+ return;
+
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
+ for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+
+ kfree(phy_db);
+}
+EXPORT_SYMBOL(iwl_phy_db_free);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx)
+{
+ struct iwl_calib_res_notif_phy_db *phy_db_notif =
+ (struct iwl_calib_res_notif_phy_db *)pkt->data;
+ enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
+ u16 size = le16_to_cpu(phy_db_notif->length);
+ struct iwl_phy_db_entry *entry;
+ u16 chg_id = 0;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
+ type == IWL_PHY_DB_CALIB_CHG_TXP)
+ chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+
+ entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return -EINVAL;
+
+ kfree(entry->data);
+ entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+ if (!entry->data) {
+ entry->size = 0;
+ return -ENOMEM;
+ }
+
+ entry->size = size;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ phy_db->channel_num =
+ le32_to_cpup((__le32 *)phy_db_notif->data);
+ phy_db->channel_size =
+ (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
+ (size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_phy_db_set_section);
+
+static int is_valid_channel(u16 ch_id)
+{
+ if (ch_id <= 14 ||
+ (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+ (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+ (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+ return 1;
+ return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (ch_id <= 14)
+ return ch_id - 1;
+ if (ch_id <= 64)
+ return (ch_id + 20) / 4;
+ if (ch_id <= 140)
+ return (ch_id - 12) / 4;
+ return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (1 <= ch_id && ch_id <= 14)
+ return 0;
+ if (36 <= ch_id && ch_id <= 64)
+ return 1;
+ if (100 <= ch_id && ch_id <= 140)
+ return 2;
+ return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+ struct iwl_phy_db_chg_txp *txp_chg;
+ int i;
+ u8 ch_index = ch_id_to_ch_index(ch_id);
+ if (ch_index == 0xff)
+ return 0xff;
+
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+ txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+ if (!txp_chg)
+ return 0xff;
+ /*
+ * Looking for the first channel group that its max channel is
+ * higher then wanted channel.
+ */
+ if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+ return i;
+ }
+ return 0xff;
+}
+static
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+ u32 type, u8 **data, u16 *size, u16 ch_id)
+{
+ struct iwl_phy_db_entry *entry;
+ u32 channel_num;
+ u32 channel_size;
+ u16 ch_group_id = 0;
+ u16 index;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ /* find wanted channel group */
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+ ch_group_id = channel_id_to_papd(ch_id);
+ else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+ ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+ entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+ if (!entry)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ index = ch_id_to_ch_index(ch_id);
+ channel_num = phy_db->channel_num;
+ channel_size = phy_db->channel_size;
+ if (index >= channel_num) {
+ IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
+ ch_id);
+ return -EINVAL;
+ }
+ *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
+ *size = channel_size;
+ } else {
+ *data = entry->data;
+ *size = entry->size;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)*data) +
+ (*size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, *size);
+
+ return 0;
+}
+
+static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
+ u16 length, void *data)
+{
+ struct iwl_phy_db_cmd phy_db_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = PHY_DB_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending PHY-DB hcmd of type %d, of length %d\n",
+ type, length);
+
+ /* Set phy db cmd variables */
+ phy_db_cmd.type = cpu_to_le16(type);
+ phy_db_cmd.length = cpu_to_le16(length);
+
+ /* Set hcmd variables */
+ cmd.data[0] = &phy_db_cmd;
+ cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
+ cmd.data[1] = data;
+ cmd.len[1] = length;
+ cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+ return iwl_trans_send_cmd(phy_db->trans, &cmd);
+}
+
+static int iwl_phy_db_send_all_channel_groups(
+ struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u8 max_ch_groups)
+{
+ u16 i;
+ int err;
+ struct iwl_phy_db_entry *entry;
+
+ /* Send all the channel specific groups to operational fw */
+ for (i = 0; i < max_ch_groups; i++) {
+ entry = iwl_phy_db_get_section(phy_db,
+ type,
+ i);
+ if (!entry)
+ return -EINVAL;
+
+ /* Send the requested PHY DB section */
+ err = iwl_send_phy_db_cmd(phy_db,
+ type,
+ entry->size,
+ entry->data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Can't SEND phy_db section %d (%d), err %d",
+ type, i, err);
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sent PHY_DB HCMD, type = %d num = %d",
+ type, i);
+ }
+
+ return 0;
+}
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
+{
+ u8 *data = NULL;
+ u16 size = 0;
+ int err;
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending phy db data and configuration to runtime image\n");
+
+ /* Send PHY DB CFG section */
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot get Phy DB non specific channel section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB non specific channel section\n");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_NUM_PAPD_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific PAPD groups");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_NUM_TXP_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific TX power groups");
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Finished sending phy db non channel data\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 000000000000..d0e43d96ab38
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx);
+
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
+
+#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c3a4bb41e533..f76e9cad7757 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,6 +97,9 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+/* Device system time */
+#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+
/**
* Tx Scheduler
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index 81e8c7126d72..ce0c67b425ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -466,19 +466,18 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
/* Hard-coded periphery absolute address */
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
+ return -EIO;
+ }
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24));
for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ iwl_trans_release_nic_access(trans, &flags);
} else { /* target memory (SRAM) */
- _iwl_read_targ_mem_dwords(trans, addr,
- tst->mem.addr,
- tst->mem.size / 4);
+ iwl_trans_read_mem(trans, addr, tst->mem.addr,
+ tst->mem.size / 4);
}
tst->mem.nchunks =
@@ -501,28 +500,25 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
- /* Periphery writes can be 1-3 bytes long, or DWORDs */
- if (size < 4) {
- memcpy(&val, buf, size);
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- (addr & 0x0000FFFF) |
- ((size - 1) << 24));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_release_nic_access(trans);
- /* needed after consecutive writes w/o read */
- mmiowb();
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else {
- if (size % 4)
- return -EINVAL;
- for (i = 0; i < size; i += 4)
- iwl_write_prph(trans, addr+i,
- *(u32 *)(buf+i));
- }
+ /* Periphery writes can be 1-3 bytes long, or DWORDs */
+ if (size < 4) {
+ memcpy(&val, buf, size);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return -EIO;
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
+ (addr & 0x0000FFFF) |
+ ((size - 1) << 24));
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ if (size % 4)
+ return -EINVAL;
+ for (i = 0; i < size; i += 4)
+ iwl_write_prph(trans, addr+i,
+ *(u32 *)(buf+i));
+ }
} else if (iwl_test_valid_hw_addr(tst, addr)) {
- _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
+ iwl_trans_write_mem(trans, addr, buf, size / 4);
} else {
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index e13ffa8acc02..7fbf4d717caa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 6ba211b09426..a963f45c6849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index b76532e238c1..8c7bec6b9a0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
#include "iwl-debug.h"
#include "iwl-config.h"
@@ -193,11 +194,11 @@ struct iwl_rx_packet {
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
- CMD_SYNC = 0,
- CMD_ASYNC = BIT(0),
- CMD_WANT_SKB = BIT(1),
- CMD_WANT_HCMD = BIT(2),
- CMD_ON_DEMAND = BIT(3),
+ CMD_SYNC = 0,
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_WANT_HCMD = BIT(2),
+ CMD_ON_DEMAND = BIT(3),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -274,6 +275,7 @@ struct iwl_rx_cmd_buffer {
struct page *_page;
int _offset;
bool _page_stolen;
+ u32 _rx_page_order;
unsigned int truesize;
};
@@ -294,6 +296,11 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
return r->_page;
}
+static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+{
+ __free_pages(r->_page, r->_rx_page_order);
+}
+
#define MAX_NO_RECLAIM_CMDS 6
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
@@ -308,6 +315,16 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
#define IWL_FRAME_LIMIT 64
/**
+ * enum iwl_wowlan_status - WoWLAN image/device status
+ * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
+ * @IWL_D3_STATUS_RESET: device was reset while suspended
+ */
+enum iwl_d3_status {
+ IWL_D3_STATUS_ALIVE,
+ IWL_D3_STATUS_RESET,
+};
+
+/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
@@ -321,6 +338,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
* @n_no_reclaim_cmds: # of commands in list
* @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
* if unset 4k will be the RX buffer size
+ * @bc_table_dword: set to true if the BC table expects the byte count to be
+ * in DWORD (as opposed to bytes)
* @queue_watchdog_timeout: time (in ms) after which queues
* are considered stuck and will trigger device restart
* @command_names: array of command names, must be 256 entries
@@ -335,6 +354,7 @@ struct iwl_trans_config {
int n_no_reclaim_cmds;
bool rx_buf_size_8k;
+ bool bc_table_dword;
unsigned int queue_watchdog_timeout;
const char **command_names;
};
@@ -360,9 +380,12 @@ struct iwl_trans;
* May sleep
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
- * @wowlan_suspend: put the device into the correct mode for WoWLAN during
+ * @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
+ * @d3_resume: resume the device after WoWLAN, enabling the opmode to
+ * talk to the WoWLAN image to get its status. This is optional, if not
+ * implemented WoWLAN will not be supported. This callback may sleep.
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
* If RFkill is asserted in the middle of a SYNC host command, it must
* return -ERFKILL straight away.
@@ -387,20 +410,31 @@ struct iwl_trans;
* @read32: read a u32 register at offset ofs from the BAR
* @read_prph: read a DWORD from a periphery register
* @write_prph: write a DWORD to a periphery register
+ * @read_mem: read device's SRAM in DWORD
+ * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
+ * will be zeroed.
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
+ * Sleeping is not allowed between grab_nic_access and
+ * release_nic_access.
+ * @release_nic_access: let the NIC go to sleep. The "flags" parameter
+ * must be the same one that was sent before to the grab_nic_access.
+ * @set_bits_mask - set SRAM register according to value and mask.
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
- int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
+ int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
- void (*wowlan_suspend)(struct iwl_trans *trans);
+ void (*d3_suspend)(struct iwl_trans *trans);
+ int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -424,9 +458,19 @@ struct iwl_trans_ops {
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
+ int (*read_mem)(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+ int (*write_mem)(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
+ bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
+ unsigned long *flags);
+ void (*release_nic_access)(struct iwl_trans *trans,
+ unsigned long *flags);
+ void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
+ u32 value);
};
/**
@@ -446,7 +490,6 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
- * @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -467,7 +510,6 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
- spinlock_t reg_lock;
struct device *dev;
u32 hw_rev;
@@ -485,6 +527,10 @@ struct iwl_trans {
struct dentry *dbgfs_dir;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -528,13 +574,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *fw,
+ bool run_in_rfkill)
{
might_sleep();
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
- return trans->ops->start_fw(trans, fw);
+ return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
@@ -546,19 +593,36 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->wowlan_suspend(trans);
+ trans->ops->d3_suspend(trans);
+}
+
+static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status)
+{
+ might_sleep();
+ return trans->ops->d3_resume(trans, status);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd)
{
+ int ret;
+
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- return trans->ops->send_cmd(trans, cmd);
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+ ret = trans->ops->send_cmd(trans, cmd);
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return ret;
}
static inline struct iwl_device_cmd *
@@ -636,7 +700,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
+ struct dentry *dir)
{
return trans->ops->dbgfs_register(trans, dir);
}
@@ -679,15 +743,77 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
return trans->ops->write_prph(trans, ofs, val);
}
+static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return trans->ops->read_mem(trans, addr, buf, dwords);
+}
+
+#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
+ do { \
+ if (__builtin_constant_p(bufsize)) \
+ BUILD_BUG_ON((bufsize) % sizeof(u32)); \
+ iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
+ } while (0)
+
+static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
+{
+ u32 value;
+
+ if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
+ return 0xa5a5a5a5;
+
+ return value;
+}
+
+static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return trans->ops->write_mem(trans, addr, buf, dwords);
+}
+
+static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
+ u32 val)
+{
+ return iwl_trans_write_mem(trans, addr, &val, 1);
+}
+
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
trans->ops->set_pmi(trans, state);
}
+static inline void
+iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+ trans->ops->set_bits_mask(trans, reg, mask, value);
+}
+
+#define iwl_trans_grab_nic_access(trans, silent, flags) \
+ __cond_lock(nic_access, \
+ likely((trans)->ops->grab_nic_access(trans, silent, flags)))
+
+static inline void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
+{
+ trans->ops->release_nic_access(trans, flags);
+ __release(nic_access);
+}
+
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
+static inline void trans_lockdep_init(struct iwl_trans *trans)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key __key;
+
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+}
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
new file mode 100644
index 000000000000..807b250ec396
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_IWLMVM) += iwlmvm.o
+iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += power.o
+iwlmvm-y += led.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
new file mode 100644
index 000000000000..73d24aacb90a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ int idx;
+
+ struct iwl_mvm_phy_ctxt *phyctxt;
+
+ u16 ids[MAX_MACS_IN_BINDING];
+ u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+ struct iwl_mvm_iface_iterator_data *data)
+{
+ struct iwl_binding_cmd cmd;
+ struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+ int i, ret;
+ u32 status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+ cmd.action = cpu_to_le32(action);
+ cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+
+ for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+ for (i = 0; i < data->idx; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+ data->colors[i]));
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ sizeof(cmd), &cmd, &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+ action, ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif)
+ return;
+
+ if (mvmvif->phy_ctxt != data->phyctxt)
+ return;
+
+ if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+ return;
+
+ data->ids[data->idx] = mvmvif->id;
+ data->colors[data->idx] = mvmvif->color;
+ data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_phy_ctxt *phyctxt,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_iface_iterator_data data = {
+ .ignore_vif = vif,
+ .phyctxt = phyctxt,
+ };
+ u32 action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_iface_iterator,
+ &data);
+
+ /*
+ * If there are no other interfaces yet we
+ * need to create a new binding.
+ */
+ if (data.idx == 0) {
+ if (add)
+ action = FW_CTXT_ACTION_ADD;
+ else
+ action = FW_CTXT_ACTION_REMOVE;
+ }
+
+ if (add) {
+ if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+ return -EINVAL;
+
+ data.ids[data.idx] = mvmvif->id;
+ data.colors[data.idx] = mvmvif->color;
+ data.idx++;
+ }
+
+ return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
new file mode 100644
index 000000000000..c64d864799cd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -0,0 +1,955 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (iwlwifi_mod_params.sw_crypto)
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ mvmvif->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ mvmvif->rekey_data.valid = true;
+
+ mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+ int i;
+
+ for (i = 0; i < IWL_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
+}
+
+struct wowlan_key_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwl_wowlan_tkip_params_cmd *tkip;
+ bool error, use_rsc_tsc, use_tkip;
+ int gtk_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct wowlan_key_data *data = _data;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwl_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWL_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&mvm->mutex);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+ struct {
+ struct iwl_mvm_wep_key_cmd wep_key_cmd;
+ struct iwl_mvm_wep_key wep_key;
+ } __packed wkc = {
+ .wep_key_cmd.mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .wep_key_cmd.num_keys = 1,
+ /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+ .wep_key.key_index = key->keyidx,
+ .wep_key.key_size = key->keylen,
+ };
+
+ /*
+ * This will fail -- the key functions don't set support
+ * pairwise WEP keys. However, that's better than silently
+ * failing WoWLAN. Or maybe not?
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ break;
+
+ memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+ if (key->keyidx == mvmvif->tx_key_idx) {
+ /* TX key must be at offset 0 */
+ wkc.wep_key.key_offset = 0;
+ } else {
+ /* others start at 1 */
+ data->gtk_key_idx++;
+ wkc.wep_key.key_offset = data->gtk_key_idx;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
+ sizeof(wkc), &wkc);
+ data->error = ret != 0;
+
+ /* don't upload key again */
+ goto out_unlock;
+ }
+ default:
+ data->error = true;
+ goto out_unlock;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /*
+ * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+ * but we also shouldn't abort suspend due to that. It does have
+ * support for the IGTK key renewal, but doesn't really use the
+ * IGTK for anything. This means we could spuriously wake up or
+ * be deauthenticated, but that was considered acceptable.
+ */
+ goto out_unlock;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else {
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ /*
+ * The D3 firmware hardcodes the key offset 0 as the key it uses
+ * to transmit packets to the AP, i.e. the PTK.
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ key->hw_key_idx = 0;
+ } else {
+ data->gtk_key_idx++;
+ key->hw_key_idx = data->gtk_key_idx;
+ }
+
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
+ data->error = ret != 0;
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
+static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_proto_offload_cmd cmd = {};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+
+ if (mvmvif->num_target_ipv6_addrs) {
+ cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
+ memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
+ sizeof(mvmvif->target_ipv6_addrs[i]));
+
+ for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
+ memcpy(cmd.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.target_ipv6_addr[i]));
+#endif
+
+ if (vif->bss_conf.arp_addr_cnt) {
+ cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
+ cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (!cmd.enabled)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+
+struct iwl_d3_iter_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d3_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ return;
+
+ if (data->vif) {
+ IWL_ERR(data->mvm, "More than one managed interface active!\n");
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
+ u8 chains_static, chains_dynamic;
+ struct cfg80211_chan_def chandef;
+ int ret, i;
+ struct iwl_binding_cmd binding_cmd = {};
+ struct iwl_time_quota_cmd quota_cmd = {};
+ u32 status;
+
+ /* add back the PHY */
+ if (WARN_ON(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!ctx)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ chandef = ctx->def;
+ chains_static = ctx->rx_chains_static;
+ chains_dynamic = ctx->rx_chains_dynamic;
+ rcu_read_unlock();
+
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
+ chains_static, chains_dynamic);
+ if (ret)
+ return ret;
+
+ /* add back the MAC */
+ mvmvif->uploaded = false;
+
+ if (WARN_ON(!vif->bss_conf.assoc))
+ return -EINVAL;
+ /* hack */
+ vif->bss_conf.assoc = false;
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ vif->bss_conf.assoc = true;
+ if (ret)
+ return ret;
+
+ /* add back binding - XXX refactor? */
+ binding_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ binding_cmd.phy =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+ binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ sizeof(binding_cmd), &binding_cmd,
+ &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ return -EIO;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+ if (ret)
+ return ret;
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ return ret;
+
+ /* and some quota */
+ quota_cmd.quotas[0].id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ quota_cmd.quotas[0].quota = cpu_to_le32(100);
+ quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+
+ for (i = 1; i < MAX_BINDINGS; i++)
+ quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ sizeof(quota_cmd), &quota_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+ return 0;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_d3_iter_data suspend_iter_data = {
+ .mvm = mvm,
+ };
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct iwl_d3_manager_config d3_cfg_cmd = {};
+ struct wowlan_key_data key_data = {
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+ u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ old_aux_sta_id = mvm->aux_sta.sta_id;
+
+ /* see if there's only a single BSS vif and it's associated */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_iface_iterator, &suspend_iter_data);
+
+ if (suspend_iter_data.error || !suspend_iter_data.vif) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ vif = suspend_iter_data.vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+
+ /*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. Store the real STA ID here
+ * and assign 0. When we leave this function, we'll restore
+ * the original value for the resume code.
+ */
+ old_ap_sta_id = mvm_ap_sta->sta_id;
+ mvm_ap_sta->sta_id = 0;
+ mvmvif->ap_sta_id = 0;
+
+ /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+
+ wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
+ wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ d3_cfg_cmd.wakeup_flags |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ iwl_mvm_cancel_scan(mvm);
+
+ iwl_trans_stop_device(mvm->trans);
+
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ /* We reprogram keys and shouldn't allocate new key indices */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+ /*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. As a result, we have to move
+ * the auxiliary station to ID 1 so the ID 0 remains free for
+ * the AP station for later.
+ * We set the sta_id to 1 here, and reset it to its previous
+ * value (that we stored above) later.
+ */
+ mvm->aux_sta.sta_id = 1;
+
+ ret = iwl_mvm_load_d3_fw(mvm);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+ if (ret)
+ goto out;
+
+ if (!iwlwifi_mod_params.sw_crypto) {
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&mvm->mutex);
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&mvm->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = WOWLAN_TSC_RSC_PARAM,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*key_data.rsc_tsc),
+ };
+
+ ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip) {
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_TKIP_PARAM,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (mvmvif->rekey_data.valid) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+ NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+ NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC,
+ sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
+ CMD_SYNC, sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_send_patterns(mvm, wowlan);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_send_proto_offload(mvm, vif);
+ if (ret)
+ goto out;
+
+ /* must be last -- this switches firmware state */
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ if (ret)
+ goto out;
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ iwl_trans_d3_suspend(mvm->trans);
+ out:
+ mvm->aux_sta.sta_id = old_aux_sta_id;
+ mvm_ap_sta->sta_id = old_ap_sta_id;
+ mvmvif->ap_sta_id = old_ap_sta_id;
+ out_noreset:
+ kfree(key_data.rsc_tsc);
+ if (ret < 0)
+ ieee80211_restart_hw(mvm->hw);
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status *status;
+ u32 reasons;
+ int ret, len;
+ bool pkt8023 = false;
+ struct sk_buff *pkt = NULL;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(mvm, "error table is valid (%d)\n",
+ err_info.valid);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ wakeup.rfkill_release = true;
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ return;
+ }
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ return;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt)
+ return;
+
+ len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out;
+ }
+
+ status = (void *)cmd.resp_pkt->data;
+
+ if (len - sizeof(struct iwl_cmd_header) !=
+ sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out;
+ }
+
+ reasons = le32_to_cpu(status->wakeup_reasons);
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ wakeup_report = NULL;
+ goto report;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+ wakeup.magic_pkt = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+ wakeup.pattern_idx =
+ le16_to_cpu(status->pattern_number);
+ pkt8023 = true;
+ }
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+ wakeup.gtk_rekey_failure = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+ wakeup.rfkill_release = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+ wakeup.eap_identity_req = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+ wakeup.four_way_handshake = true;
+ pkt8023 = true;
+ }
+
+ if (status->wake_packet_bufsize) {
+ u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
+ u32 pktlen = le32_to_cpu(status->wake_packet_length);
+
+ if (pkt8023) {
+ pkt = alloc_skb(pktsize, GFP_KERNEL);
+ if (!pkt)
+ goto report;
+ memcpy(skb_put(pkt, pktsize), status->wake_packet,
+ pktsize);
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ goto report;
+ wakeup.packet = pkt->data;
+ wakeup.packet_present_len = pkt->len;
+ wakeup.packet_len = pkt->len - (pktlen - pktsize);
+ wakeup.packet_80211 = false;
+ } else {
+ wakeup.packet = status->wake_packet;
+ wakeup.packet_present_len = pktsize;
+ wakeup.packet_len = pktlen;
+ wakeup.packet_80211 = true;
+ }
+ }
+
+ report:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ kfree_skb(pkt);
+
+ out:
+ iwl_free_resp(&cmd);
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_d3_iter_data resume_iter_data = {
+ .mvm = mvm,
+ };
+ struct ieee80211_vif *vif = NULL;
+ int ret;
+ enum iwl_d3_status d3_status;
+
+ mutex_lock(&mvm->mutex);
+
+ /* get the BSS vif pointer again */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_iface_iterator, &resume_iter_data);
+
+ if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
+ goto out_unlock;
+
+ vif = resume_iter_data.vif;
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status);
+ if (ret)
+ goto out_unlock;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ goto out_unlock;
+ }
+
+ iwl_mvm_query_wakeup_reasons(mvm, vif);
+
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ if (vif)
+ ieee80211_resume_disconnect(vif);
+
+ /* return 1 to reconfigure the device */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ return 1;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
new file mode 100644
index 000000000000..c1bdb5582126
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,378 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+
+struct iwl_dbgfs_mvm_ctx {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+};
+
+static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ char buf[16];
+ int buf_size, ret;
+ u32 scd_q_msk;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &scd_q_msk) != 1)
+ return -EINVAL;
+
+ IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+
+ char buf[8];
+ int buf_size, sta_id, drain, ret;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ ret = -ENOENT;
+ else
+ ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
+ count;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ int ofs, len, pos = 0;
+ size_t bufsz, ret;
+ char *buf;
+ u8 *ptr;
+
+ /* default is to dump the entire data segment */
+ if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
+ mvm->dbgfs_sram_offset = 0x800000;
+ if (!mvm->ucode_loaded)
+ return -EINVAL;
+ img = &mvm->fw->img[mvm->cur_ucode];
+ mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ }
+ len = mvm->dbgfs_sram_len;
+
+ bufsz = len * 4 + 256;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ptr = kzalloc(len, GFP_KERNEL);
+ if (!ptr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ mvm->dbgfs_sram_offset);
+
+ iwl_trans_read_mem_bytes(mvm->trans,
+ mvm->dbgfs_sram_offset,
+ ptr, len);
+ for (ofs = 0; ofs < len; ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
+ bufsz - pos, false);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+ kfree(ptr);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ if ((offset & 0x3) || (len & 0x3))
+ return -EINVAL;
+ mvm->dbgfs_sram_offset = offset;
+ mvm->dbgfs_sram_len = len;
+ } else {
+ mvm->dbgfs_sram_offset = 0;
+ mvm->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+ char buf[400];
+ int i, pos = 0, bufsz = sizeof(buf);
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta)
+ pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+ else if (IS_ERR(sta))
+ pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+ PTR_ERR(sta));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8] = {};
+ int allow;
+
+ if (!mvm->ucode_loaded)
+ return -EIO;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d", &allow) != 1)
+ return -EINVAL;
+
+ IWL_DEBUG_POWER(mvm, "%s device power down\n",
+ allow ? "allow" : "prevent");
+
+ /*
+ * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
+ */
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8] = {};
+ int allow;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d", &allow) != 1)
+ return -EINVAL;
+
+ IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
+ allow ? "allow" : "prevent");
+
+ /*
+ * TODO: When WoWLAN FW alive notification happens, driver will send
+ * REPLY_DEBUG_CMD setting power_down_allow flag according to
+ * mvm->prevent_power_down_d3
+ */
+ mvm->prevent_power_down_d3 = !allow;
+
+ return count;
+}
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+{
+ char buf[100];
+
+ mvm->debugfs_dir = dbgfs_dir;
+
+ MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+
+ /*
+ * Create a symlink with mac80211. It will be removed when mac80211
+ * exists (before the opmode exists which removes the target.)
+ */
+ snprintf(buf, 100, "../../%s/%s",
+ dbgfs_dir->d_parent->d_parent->d_name.name,
+ dbgfs_dir->d_parent->d_name.name);
+ if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
+ goto err;
+
+ return 0;
+err:
+ IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
new file mode 100644
index 000000000000..cf6f9a02fb74
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_d3_h__
+#define __fw_api_d3_h__
+
+/**
+ * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
+ * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ */
+enum iwl_d3_wakeup_flags {
+ IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
+
+/**
+ * struct iwl_d3_manager_config - D3 manager configuration command
+ * @min_sleep_time: minimum sleep time (in usec)
+ * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ *
+ * The structure is used for the D3_CONFIG_CMD command.
+ */
+struct iwl_d3_manager_config {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+
+
+/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
+
+/**
+ * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
+ * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
+ */
+enum iwl_proto_offloads {
+ IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+ IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+};
+
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2
+
+/**
+ * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * @enabled: enable flags
+ * @remote_ipv4_addr: remote address to answer to (or zero if all)
+ * @host_ipv4_addr: our IPv4 address to respond to queries for
+ * @arp_mac_addr: our MAC address for ARP responses
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd {
+ __le32 enabled;
+ __be32 remote_ipv4_addr;
+ __be32 host_ipv4_addr;
+ u8 arp_mac_addr[ETH_ALEN];
+ __le16 reserved1;
+
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+
+
+/*
+ * WOWLAN_PATTERNS
+ */
+#define IWL_WOWLAN_MIN_PATTERN_LEN 16
+#define IWL_WOWLAN_MAX_PATTERN_LEN 128
+
+struct iwl_wowlan_pattern {
+ u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+ u8 mask_size;
+ u8 pattern_size;
+ __le16 reserved;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
+
+#define IWL_WOWLAN_MAX_PATTERNS 20
+
+struct iwl_wowlan_patterns_cmd {
+ __le32 n_patterns;
+ struct iwl_wowlan_pattern patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+
+enum iwl_wowlan_wakeup_filters {
+ IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
+ IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
+ IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
+ IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
+ IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
+ IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
+ /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
+
+struct iwl_wowlan_config_cmd {
+ __le32 wakeup_filter;
+ __le16 non_qos_seq;
+ __le16 qos_seq[8];
+ u8 wowlan_ba_teardown_tids;
+ u8 is_11n_connection;
+} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
+
+/*
+ * WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWL_NUM_RSC 16
+
+struct tkip_sc {
+ __le16 iv16;
+ __le16 pad;
+ __le32 iv32;
+} __packed; /* TKIP_SC_API_U_VER_1 */
+
+struct iwl_tkip_rsc_tsc {
+ struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc tsc;
+} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
+
+struct aes_sc {
+ __le64 pn;
+} __packed; /* TKIP_AES_SC_API_U_VER_1 */
+
+struct iwl_aes_rsc_tsc {
+ struct aes_sc unicast_rsc[IWL_NUM_RSC];
+ struct aes_sc multicast_rsc[IWL_NUM_RSC];
+ struct aes_sc tsc;
+} __packed; /* AES_TSC_RSC_API_S_VER_1 */
+
+union iwl_all_tsc_rsc {
+ struct iwl_tkip_rsc_tsc tkip;
+ struct iwl_aes_rsc_tsc aes;
+}; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd {
+ union iwl_all_tsc_rsc all_tsc_rsc;
+} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+
+#define IWL_MIC_KEY_SIZE 8
+struct iwl_mic_keys {
+ u8 tx[IWL_MIC_KEY_SIZE];
+ u8 rx_unicast[IWL_MIC_KEY_SIZE];
+ u8 rx_mcast[IWL_MIC_KEY_SIZE];
+} __packed; /* MIC_KEYS_API_S_VER_1 */
+
+#define IWL_P1K_SIZE 5
+struct iwl_p1k_cache {
+ __le16 p1k[IWL_P1K_SIZE];
+} __packed;
+
+#define IWL_NUM_RX_P1K_CACHE 2
+
+struct iwl_wowlan_tkip_params_cmd {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+
+#define IWL_KCK_MAX_SIZE 32
+#define IWL_KEK_MAX_SIZE 32
+
+struct iwl_wowlan_kek_kck_material_cmd {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+enum iwl_wowlan_rekey_status {
+ IWL_WOWLAN_REKEY_POST_REKEY = 0,
+ IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
+
+enum iwl_wowlan_wakeup_reason {
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
+ IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
+ IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
+ IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
+ IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
+ IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
+}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
+
+struct iwl_wowlan_status {
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 rekey_status;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+
+/* TODO: NetDetect API */
+
+#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
new file mode 100644
index 000000000000..ae39b7dfda7b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -0,0 +1,369 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_mac_h__
+#define __fw_api_mac_h__
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define MAC_INDEX_AUX 4
+#define MAC_INDEX_MIN_DRIVER 0
+#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
+
+#define AC_NUM 4 /* Number of access categories */
+
+/**
+ * enum iwl_mac_protection_flags - MAC context flags
+ * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwl_mac_protection_flags {
+ MAC_PROT_FLG_TGG_PROTECT = BIT(3),
+ MAC_PROT_FLG_HT_PROT = BIT(23),
+ MAC_PROT_FLG_FAT_PROT = BIT(24),
+ MAC_PROT_FLG_SELF_CTS_EN = BIT(30),
+};
+
+#define MAC_FLG_SHORT_SLOT BIT(4)
+#define MAC_FLG_SHORT_PREAMBLE BIT(5)
+
+/**
+ * enum iwl_mac_types - Supported MAC types
+ * @FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @FW_MAC_TYPE_IBSS: IBSS
+ * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @FW_MAC_TYPE_P2P_STA: P2P client
+ * @FW_MAC_TYPE_GO: P2P GO
+ * @FW_MAC_TYPE_TEST: ?
+ * @FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwl_mac_types {
+ FW_MAC_TYPE_FIRST = 1,
+ FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
+ FW_MAC_TYPE_LISTENER,
+ FW_MAC_TYPE_PIBSS,
+ FW_MAC_TYPE_IBSS,
+ FW_MAC_TYPE_BSS_STA,
+ FW_MAC_TYPE_P2P_DEVICE,
+ FW_MAC_TYPE_P2P_STA,
+ FW_MAC_TYPE_GO,
+ FW_MAC_TYPE_TEST,
+ FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
+}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_tsf_id - TSF hw timer ID
+ * @TSF_ID_A: use TSF A
+ * @TSF_ID_B: use TSF B
+ * @TSF_ID_C: use TSF C
+ * @TSF_ID_D: use TSF D
+ * @NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwl_tsf_id {
+ TSF_ID_A = 0,
+ TSF_ID_B = 1,
+ TSF_ID_C = 2,
+ TSF_ID_D = 3,
+ NUM_TSF_IDS = 4,
+}; /* TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ap {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+ __le32 dtim_interval;
+ __le32 dtim_reciprocal;
+ __le32 mcast_qid;
+ __le32 beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ */
+struct iwl_mac_data_ibss {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwl_mac_data_sta {
+ __le32 is_assoc;
+ __le32 dtim_time;
+ __le64 dtim_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+ __le32 dtim_interval;
+ __le32 dtim_reciprocal;
+ __le32 listen_interval;
+ __le32 assoc_id;
+ __le32 assoc_beacon_arrive_time;
+} __packed; /* STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwl_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwl_mac_data_go {
+ struct iwl_mac_data_ap ap;
+ __le32 ctwin;
+ __le32 opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwl_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwl_mac_data_p2p_sta {
+ struct iwl_mac_data_sta sta;
+ __le32 ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwl_mac_data_pibss {
+ __le32 stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwl_mac_data_p2p_dev {
+ __le32 is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_filter_flags - MAC context filter flags
+ * @MAC_FILTER_IN_PROMISC: accept all data frames
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * control frames to the host
+ * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwl_mac_filter_flags {
+ MAC_FILTER_IN_PROMISC = BIT(0),
+ MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1),
+ MAC_FILTER_ACCEPT_GRP = BIT(2),
+ MAC_FILTER_DIS_DECRYPT = BIT(3),
+ MAC_FILTER_DIS_GRP_DECRYPT = BIT(4),
+ MAC_FILTER_IN_BEACON = BIT(6),
+ MAC_FILTER_OUT_BCAST = BIT(8),
+ MAC_FILTER_IN_CRC32 = BIT(11),
+ MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+};
+
+/**
+ * enum iwl_mac_qos_flags - QoS flags
+ * @MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @MAC_QOS_FLG_TGN: HT is enabled
+ * @MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwl_mac_qos_flags {
+ MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
+ MAC_QOS_FLG_TGN = BIT(1),
+ MAC_QOS_FLG_TXOP_TYPE = BIT(4),
+};
+
+/**
+ * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwl_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifsn;
+ u8 fifos_mask;
+ __le16 edca_txop;
+} __packed; /* AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @mac_type: one of FW_MAC_TYPE_*
+ * @tsd_id: TSF HW timer, one of TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of MAC_FILTER_*
+ * @qos_flags: from MAC_QOS_FLG_*
+ * @ac: one iwl_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwl_mac_data_*, according to mac_type
+ */
+struct iwl_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ __le32 mac_type;
+ __le32 tsf_id;
+ u8 node_addr[6];
+ __le16 reserved_for_node_addr;
+ u8 bssid_addr[6];
+ __le16 reserved_for_bssid_addr;
+ __le32 cck_rates;
+ __le32 ofdm_rates;
+ __le32 protection_flags;
+ __le32 cck_short_preamble;
+ __le32 short_slot;
+ __le32 filter_flags;
+ /* MAC_QOS_PARAM_API_S_VER_1 */
+ __le32 qos_flags;
+ struct iwl_ac_qos ac[AC_NUM+1];
+ /* MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwl_mac_data_ap ap;
+ struct iwl_mac_data_go go;
+ struct iwl_mac_data_sta sta;
+ struct iwl_mac_data_p2p_sta p2p_sta;
+ struct iwl_mac_data_p2p_dev p2p_dev;
+ struct iwl_mac_data_pibss pibss;
+ struct iwl_mac_data_ibss ibss;
+ };
+} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline u32 iwl_mvm_reciprocal(u32 v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
new file mode 100644
index 000000000000..be36b7604b7f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_power_h__
+#define __fw_api_power_h__
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * enum iwl_scan_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @POWER_FLAGS_SLEEP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+*/
+enum iwl_power_flags {
+ POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(0),
+ POWER_FLAGS_SLEEP_OVER_DTIM_MSK = BIT(1),
+ POWER_FLAGS_LPRX_ENA_MSK = BIT(2),
+ POWER_FLAGS_SNOOZE_ENA_MSK = BIT(3),
+ POWER_FLAGS_BT_SCO_ENA = BIT(4),
+ POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(5)
+};
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @id_and_color: MAC contex identifier
+ * @action: Action on context - no action, add new,
+ * modify existent, remove
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: TBD
+ * @snooze_window: TBD
+ * @snooze_step: TBD
+ * @qndp_tid: TBD
+ * @uapsd_ac_flags: TBD
+ * @uapsd_max_sp: TBD
+ */
+struct iwl_powertable_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le16 flags;
+ u8 reserved;
+ __le16 keep_alive_seconds;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 rx_data_timeout_uapsd;
+ __le32 tx_data_timeout_uapsd;
+ u8 lprx_rssi_threshold;
+ u8 num_skip_dtim;
+ __le16 snooze_interval;
+ __le16 snooze_window;
+ u8 snooze_step;
+ u8 qndp_tid;
+ u8 uapsd_ac_flags;
+ u8 uapsd_max_sp;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
new file mode 100644
index 000000000000..aa3474d08231
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_rs_h__
+#define __fw_api_rs_h__
+
+#include "fw-api-mac.h"
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ */
+enum {
+ IWL_RATE_1M_INDEX = 0,
+ IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX,
+ IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX,
+ IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX,
+ IWL_RATE_54M_INDEX,
+ IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+ IWL_RATE_60M_INDEX,
+ IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+ IWL_RATE_COUNT,
+};
+
+#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWL_RATE_6M_PLCP = 13,
+ IWL_RATE_9M_PLCP = 15,
+ IWL_RATE_12M_PLCP = 5,
+ IWL_RATE_18M_PLCP = 7,
+ IWL_RATE_24M_PLCP = 9,
+ IWL_RATE_36M_PLCP = 11,
+ IWL_RATE_48M_PLCP = 1,
+ IWL_RATE_54M_PLCP = 3,
+ IWL_RATE_1M_PLCP = 10,
+ IWL_RATE_2M_PLCP = 20,
+ IWL_RATE_5M_PLCP = 55,
+ IWL_RATE_11M_PLCP = 110,
+};
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define RATE_MCS_VHT_POS 26
+#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define RATE_HT_MCS_RATE_CODE_MSK 0x7
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_HT_MCS_GF_POS 10
+#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
+
+#define RATE_HT_MCS_INDEX_MSK 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define RATE_VHT_MCS_NSS_POS 4
+#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define RATE_LEGACY_RATE_MSK 0xff
+
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define RATE_MCS_CHAN_WIDTH_POS 11
+#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define RATE_MCS_ANT_POS 14
+#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
+ RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
+ RATE_MCS_ANT_C_MSK)
+#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
+#define RATE_MCS_ANT_NUM 3
+
+/* Bit 17-18: (0) SS, (1) SS*2 */
+#define RATE_MCS_STBC_POS 17
+#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define RATE_MCS_BF_POS 19
+#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
+
+/* Bit 20: (0) ZLF is off, (1) ZLF is on */
+#define RATE_MCS_ZLF_POS 20
+#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define RATE_MCS_DUP_POS 24
+#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS 27
+#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags, only this one is available */
+#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0)
+
+/**
+ * struct iwl_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @control: not used
+ * @flags: combination of LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ * and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ * Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ * value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ * If a frame has higher try-count, it should not be selected for
+ * starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ * 0: no limit
+ * 1: no aggregation (one frame per aggregation)
+ * 2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
+ * @bf_params: beam forming params, currently not used
+ */
+struct iwl_lq_cmd {
+ u8 sta_id;
+ u8 reserved1;
+ u16 control;
+ /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+ u8 flags;
+ u8 mimo_delim;
+ u8 single_stream_ant_msk;
+ u8 dual_stream_ant_msk;
+ u8 initial_rate_index[AC_NUM];
+ /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+ __le16 agg_time_limit;
+ u8 agg_disable_start_th;
+ u8 agg_frame_cnt_limit;
+ __le32 reserved2;
+ __le32 rs_table[LQ_MAX_RETRY_NUM];
+ __le32 bf_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
new file mode 100644
index 000000000000..670ac8f95e26
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_scan_h__
+#define __fw_api_scan_h__
+
+#include "fw-api.h"
+
+/* Scan Commands, Responses, Notifications */
+
+/* Masks for iwl_scan_channel.type flags */
+#define SCAN_CHANNEL_TYPE_PASSIVE 0
+#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
+#define SCAN_CHANNEL_NARROW_BAND BIT(22)
+
+/* Max number of IEs for direct SSID scans in a command */
+#define PROBE_OPTION_MAX 20
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * @channel: band is selected by iwl_scan_cmd "flags" field
+ * @tx_gain: gain for analog radio
+ * @dsp_atten: gain for DSP
+ * @active_dwell: dwell time for active scan in TU, typically 5-50
+ * @passive_dwell: dwell time for passive scan in TU, typically 20-500
+ * @type: type is broken down to these bits:
+ * bit 0: 0 = passive, 1 = active
+ * bits 1-20: SSID direct bit map. If any of these bits is set then
+ * the corresponding SSID IE is transmitted in probe request
+ * (bit i adds IE in position i to the probe request)
+ * bit 22: channel width, 0 = regular, 1 = TGj narrow channel
+ *
+ * @iteration_count:
+ * @iteration_interval:
+ * This struct is used once for each channel in the scan list.
+ * Each channel can independently select:
+ * 1) SSID for directed active scans
+ * 2) Txpower setting (for rate specified within Tx command)
+ * 3) How long to stay on-channel (behavior may be modified by quiet_time,
+ * quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1) If using passive_dwell (i.e. passive_dwell != 0):
+ * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2) quiet_time <= active_dwell
+ * 3) If restricting off-channel time (i.e. max_out_time !=0):
+ * passive_dwell < max_out_time
+ * active_dwell < max_out_time
+ */
+struct iwl_scan_channel {
+ __le32 type;
+ __le16 channel;
+ __le16 iteration_count;
+ __le32 iteration_interval;
+ __le16 active_dwell;
+ __le16 passive_dwell;
+} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+ u8 id;
+ u8 len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/**
+ * iwl_scan_flags - masks for scan command flags
+ *@SCAN_FLAGS_PERIODIC_SCAN:
+ *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
+ *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
+ *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
+ *@SCAN_FLAGS_FRAGMENTED_SCAN:
+ */
+enum iwl_scan_flags {
+ SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
+ SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1),
+ SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
+ SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
+ SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
+};
+
+/**
+ * enum iwl_scan_type - Scan types for scan command
+ * @SCAN_TYPE_FORCED:
+ * @SCAN_TYPE_BACKGROUND:
+ * @SCAN_TYPE_OS:
+ * @SCAN_TYPE_ROAMING:
+ * @SCAN_TYPE_ACTION:
+ * @SCAN_TYPE_DISCOVERY:
+ * @SCAN_TYPE_DISCOVERY_FORCED:
+ */
+enum iwl_scan_type {
+ SCAN_TYPE_FORCED = 0,
+ SCAN_TYPE_BACKGROUND = 1,
+ SCAN_TYPE_OS = 2,
+ SCAN_TYPE_ROAMING = 3,
+ SCAN_TYPE_ACTION = 4,
+ SCAN_TYPE_DISCOVERY = 5,
+ SCAN_TYPE_DISCOVERY_FORCED = 6,
+}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
+
+/* Maximal number of channels to scan */
+#define MAX_NUM_SCAN_CHANNELS 0x24
+
+/**
+ * struct iwl_scan_cmd - scan request command
+ * ( SCAN_REQUEST_CMD = 0x80 )
+ * @len: command length in bytes
+ * @scan_flags: scan flags from SCAN_FLAGS_*
+ * @channel_count: num of channels in channel list (1 - MAX_NUM_SCAN_CHANNELS)
+ * @quiet_time: in msecs, dwell this time for active scan on quiet channels
+ * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
+ * this number of packets were received (typically 1)
+ * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @rxchain_sel_flags: RXON_RX_CHAIN_*
+ * @max_out_time: in usecs, max out of serving channel time
+ * @suspend_time: how long to pause scan when returning to service channel:
+ * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 20-23: reserved
+ * bits 24-31: number of beacons (suspend between channels)
+ * @rxon_flags: RXON_FLG_*
+ * @filter_flags: RXON_FILTER_*
+ * @tx_cmd: for active scans (zero for passive), w/o payload,
+ * no RS so specify TX rate
+ * @direct_scan: direct scan SSIDs
+ * @type: one of SCAN_TYPE_*
+ * @repeats: how many time to repeat the scan
+ */
+struct iwl_scan_cmd {
+ __le16 len;
+ u8 scan_flags;
+ u8 channel_count;
+ __le16 quiet_time;
+ __le16 quiet_plcp_th;
+ __le16 passive2active;
+ __le16 rxchain_sel_flags;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ __le32 rxon_flags;
+ __le32 filter_flags;
+ struct iwl_tx_cmd tx_cmd;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 type;
+ __le32 repeats;
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
+
+/* Response to scan request contains only status with one of these values */
+#define SCAN_RESPONSE_OK 0x1
+#define SCAN_RESPONSE_ERROR 0x2
+
+/*
+ * SCAN_ABORT_CMD = 0x81
+ * When scan abort is requested, the command has no fields except the common
+ * header. The response contains only a status with one of these values.
+ */
+#define SCAN_ABORT_POSSIBLE 0x1
+#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */
+
+/* TODO: complete documentation */
+#define SCAN_OWNER_STATUS 0x1
+#define MEASURE_OWNER_STATUS 0x2
+
+/**
+ * struct iwl_scan_start_notif - notifies start of scan in the device
+ * ( SCAN_START_NOTIFICATION = 0x82 )
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @beacon_timer: structured as follows:
+ * bits 0:19 - beacon interval in usecs
+ * bits 20:23 - reserved (0)
+ * bits 24:31 - number of beacons
+ * @channel: which channel is scanned
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @status: one of *_OWNER_STATUS
+ */
+struct iwl_scan_start_notif {
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 beacon_timer;
+ u8 channel;
+ u8 band;
+ u8 reserved[2];
+ __le32 status;
+} __packed; /* SCAN_START_NTF_API_S_VER_1 */
+
+/* scan results probe_status first bit indicates success */
+#define SCAN_PROBE_STATUS_OK 0
+#define SCAN_PROBE_STATUS_TX_FAILED BIT(0)
+/* error statuses combined with TX_FAILED */
+#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1)
+#define SCAN_PROBE_STATUS_FAIL_BT BIT(2)
+
+/* How many statistics are gathered for each channel */
+#define SCAN_RESULTS_STATISTICS 1
+
+/**
+ * enum iwl_scan_complete_status - status codes for scan complete notifications
+ * @SCAN_COMP_STATUS_OK: scan completed successfully
+ * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
+ * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
+ * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
+ * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
+ * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
+ * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
+ * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
+ * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
+ * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
+ * (not an error!)
+ * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ * asked for
+ * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
+*/
+enum iwl_scan_complete_status {
+ SCAN_COMP_STATUS_OK = 0x1,
+ SCAN_COMP_STATUS_ABORT = 0x2,
+ SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
+ SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
+ SCAN_COMP_STATUS_ERR_PROBE = 0x5,
+ SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
+ SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
+ SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
+ SCAN_COMP_STATUS_ERR_COEX = 0x9,
+ SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
+ SCAN_COMP_STATUS_ITERATION_END = 0x0B,
+ SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
+};
+
+/**
+ * struct iwl_scan_results_notif - scan results for one channel
+ * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ * @statistics: statistics gathered for this channel
+ */
+struct iwl_scan_results_notif {
+ u8 channel;
+ u8 band;
+ u8 probe_status;
+ u8 num_probe_not_sent;
+ __le32 duration;
+ __le32 statistics[SCAN_RESULTS_STATISTICS];
+} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
+
+/**
+ * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
+ * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: all scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_scan_complete_notif {
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+ struct iwl_scan_results_notif results[MAX_NUM_SCAN_CHANNELS];
+} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
+
+/* scan offload */
+#define IWL_MAX_SCAN_CHANNELS 40
+#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_MAX_PROFILES 11
+#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define CAN_ABORT_STATUS 1
+
+#define IWL_FULL_SCAN_MULTIPLIER 5
+#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+
+/**
+ * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
+ * @scan_flags: see enum iwl_scan_flags
+ * @channel_count: channels in channel list
+ * @quiet_time: dwell time, in milisiconds, on quiet channel
+ * @quiet_plcp_th: quiet channel num of packets threshold
+ * @good_CRC_th: passive to active promotion threshold
+ * @rx_chain: RXON rx chain.
+ * @max_out_time: max uSec to be out of assoceated channel
+ * @suspend_time: pause scan this long when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXONfilter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_type: see enum iwl_scan_type.
+ * @rep_count: repetition count for each scheduled scan iteration.
+ */
+struct iwl_scan_offload_cmd {
+ __le16 len;
+ u8 scan_flags;
+ u8 channel_count;
+ __le16 quiet_time;
+ __le16 quiet_plcp_th;
+ __le16 good_CRC_th;
+ __le16 rx_chain;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ __le32 flags;
+ __le32 filter_flags;
+ struct iwl_tx_cmd tx_cmd[2];
+ /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 scan_type;
+ __le32 rep_count;
+} __packed;
+
+enum iwl_scan_offload_channel_flags {
+ IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0),
+ IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22),
+ IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24),
+ IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25),
+};
+
+/**
+ * iwl_scan_channel_cfg - SCAN_CHANNEL_CFG_S
+ * @type: bitmap - see enum iwl_scan_offload_channel_flags.
+ * 0: passive (0) or active (1) scan.
+ * 1-20: directed scan to i'th ssid.
+ * 22: channel width configuation - 1 for narrow.
+ * 24: full scan.
+ * 25: partial scan.
+ * @channel_number: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two innteration on one channel.
+ * @dwell_time: entry 0 - active scan, entry 1 - passive scan.
+ */
+struct iwl_scan_channel_cfg {
+ __le32 type[IWL_MAX_SCAN_CHANNELS];
+ __le16 channel_number[IWL_MAX_SCAN_CHANNELS];
+ __le16 iter_count[IWL_MAX_SCAN_CHANNELS];
+ __le32 iter_interval[IWL_MAX_SCAN_CHANNELS];
+ u8 dwell_time[IWL_MAX_SCAN_CHANNELS][2];
+} __packed;
+
+/**
+ * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
+ * @scan_cmd: scan command fixed part
+ * @channel_cfg: scan channel configuration
+ * @data: probe request frames (one per band)
+ */
+struct iwl_scan_offload_cfg {
+ struct iwl_scan_offload_cmd scan_cmd;
+ struct iwl_scan_channel_cfg channel_cfg;
+ u8 data[0];
+} __packed;
+
+/**
+ * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ */
+struct iwl_scan_offload_blacklist {
+ u8 ssid[ETH_ALEN];
+ u8 reported_rssi;
+ u8 reserved;
+} __packed;
+
+enum iwl_scan_offload_network_type {
+ IWL_NETWORK_TYPE_BSS = 1,
+ IWL_NETWORK_TYPE_IBSS = 2,
+ IWL_NETWORK_TYPE_ANY = 3,
+};
+
+enum iwl_scan_offload_band_selection {
+ IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
+ IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
+ IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
+};
+
+/**
+ * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption olgorithm to match - bitmap
+ * @aut_alg: authentication olgorithm to match - bitmap
+ * @network_type: enum iwl_scan_offload_network_type
+ * @band_selection: enum iwl_scan_offload_band_selection
+ */
+struct iwl_scan_offload_profile {
+ u8 ssid_index;
+ u8 unicast_cipher;
+ u8 auth_alg;
+ u8 network_type;
+ u8 band_selection;
+ u8 reserved[3];
+} __packed;
+
+/**
+ * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blaclist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ */
+struct iwl_scan_offload_profile_cfg {
+ struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+ u8 blacklist_len;
+ u8 num_profiles;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * iwl_scan_offload_schedule - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwl_scan_offload_schedule {
+ u16 delay;
+ u8 iterations;
+ u8 full_scan_mul;
+} __packed;
+
+/*
+ * iwl_scan_offload_flags
+ *
+ * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
+ * ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
+ * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
+ * on A band.
+ */
+enum iwl_scan_offload_flags {
+ IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0),
+ IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
+ IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
+};
+
+/**
+ * iwl_scan_offload_req - scan offload request command
+ * @flags: bitmap - enum iwl_scan_offload_flags.
+ * @watchdog: maximum scan duration in TU.
+ * @delay: delay in seconds before first iteration.
+ * @schedule_line: scan offload schedule, for fast and regular scan.
+ */
+struct iwl_scan_offload_req {
+ __le16 flags;
+ __le16 watchdog;
+ __le16 delay;
+ __le16 reserved;
+ struct iwl_scan_offload_schedule schedule_line[2];
+} __packed;
+
+enum iwl_scan_offload_compleate_status {
+ IWL_SCAN_OFFLOAD_COMPLETED = 1,
+ IWL_SCAN_OFFLOAD_ABORTED = 2,
+};
+
+/**
+ * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwl_scan_offload_compleate_status
+ */
+struct iwl_scan_offload_complete {
+ u8 last_schedule_line;
+ u8 last_schedule_iteration;
+ u8 status;
+ u8 reserved;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
new file mode 100644
index 000000000000..0acb53dda22d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_sta_h__
+#define __fw_api_sta_h__
+
+/**
+ * enum iwl_sta_flags - flags for the ADD_STA host command
+ * @STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @STA_FLG_REDUCED_TX_PWR_DATA:
+ * @STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @STA_FLG_PS: set if STA is in Power Save
+ * @STA_FLG_INVALID: set if STA is invalid
+ * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @STA_FLG_DRAIN_FLOW: drain flow
+ * @STA_FLG_PAN: STA is for PAN interface
+ * @STA_FLG_CLASS_AUTH:
+ * @STA_FLG_CLASS_ASSOC:
+ * @STA_FLG_CLASS_MIMO_PROT:
+ * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @STA_FLG_MFP_EN: Management Frame Protection
+ */
+enum iwl_sta_flags {
+ STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
+ STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
+
+ STA_FLG_FLG_ANT_A = (1 << 4),
+ STA_FLG_FLG_ANT_B = (2 << 4),
+ STA_FLG_FLG_ANT_MSK = (STA_FLG_FLG_ANT_A |
+ STA_FLG_FLG_ANT_B),
+
+ STA_FLG_PS = BIT(8),
+ STA_FLG_INVALID = BIT(9),
+ STA_FLG_DLP_EN = BIT(10),
+ STA_FLG_SET_ALL_KEYS = BIT(11),
+ STA_FLG_DRAIN_FLOW = BIT(12),
+ STA_FLG_PAN = BIT(13),
+ STA_FLG_CLASS_AUTH = BIT(14),
+ STA_FLG_CLASS_ASSOC = BIT(15),
+ STA_FLG_RTS_MIMO_PROT = BIT(17),
+
+ STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
+ STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+ STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
+ STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+ STA_FLG_FAT_EN_20MHZ = (0 << 26),
+ STA_FLG_FAT_EN_40MHZ = (1 << 26),
+ STA_FLG_FAT_EN_80MHZ = (2 << 26),
+ STA_FLG_FAT_EN_160MHZ = (3 << 26),
+ STA_FLG_FAT_EN_MSK = (3 << 26),
+
+ STA_FLG_MIMO_EN_SISO = (0 << 28),
+ STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
+ STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
+ STA_FLG_MIMO_EN_MSK = (3 << 28),
+};
+
+/**
+ * enum iwl_sta_key_flag - key flags for the ADD_STA host command
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @STA_KEY_NOT_VALID: key is invalid
+ * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_MULTICAST: set for multical key
+ * @STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwl_sta_key_flag {
+ STA_KEY_FLG_NO_ENC = (0 << 0),
+ STA_KEY_FLG_WEP = (1 << 0),
+ STA_KEY_FLG_CCM = (2 << 0),
+ STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_CMAC = (6 << 0),
+ STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
+ STA_KEY_FLG_EN_MSK = (7 << 0),
+
+ STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_KEYID_POS = 8,
+ STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
+ STA_KEY_NOT_VALID = BIT(11),
+ STA_KEY_FLG_WEP_13BYTES = BIT(12),
+ STA_KEY_MULTICAST = BIT(14),
+ STA_KEY_MFP = BIT(15),
+};
+
+/**
+ * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
+ * @STA_MODIFY_KEY: this command modifies %key
+ * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @STA_MODIFY_TX_RATE: unused
+ * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @STA_MODIFY_PROT_TH:
+ * @STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwl_sta_modify_flag {
+ STA_MODIFY_KEY = BIT(0),
+ STA_MODIFY_TID_DISABLE_TX = BIT(1),
+ STA_MODIFY_TX_RATE = BIT(2),
+ STA_MODIFY_ADD_BA_TID = BIT(3),
+ STA_MODIFY_REMOVE_BA_TID = BIT(4),
+ STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
+ STA_MODIFY_PROT_TH = BIT(6),
+ STA_MODIFY_QUEUES = BIT(7),
+};
+
+#define STA_MODE_MODIFY 1
+
+/**
+ * enum iwl_sta_sleep_flag - type of sleep of the station
+ * @STA_SLEEP_STATE_AWAKE:
+ * @STA_SLEEP_STATE_PS_POLL:
+ * @STA_SLEEP_STATE_UAPSD:
+ */
+enum iwl_sta_sleep_flag {
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+};
+
+/* STA ID and color bits definitions */
+#define STA_ID_SEED (0x0f)
+#define STA_ID_POS (0)
+#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
+
+#define STA_COLOR_SEED (0x7)
+#define STA_COLOR_POS (4)
+#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
+
+#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
+#define STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
+
+#define STA_KEY_MAX_NUM (16)
+#define STA_KEY_IDX_INVALID (0xff)
+#define STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWL_MAX_GLOBAL_KEYS (4)
+#define STA_KEY_LEN_WEP40 (5)
+#define STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwl_mvm_keyinfo - key information
+ * @key_flags: type %iwl_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwl_mvm_keyinfo {
+ __le16 key_flags;
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved1;
+ __le16 tkip_rx_ttak[5];
+ u8 key_offset;
+ u8 reserved2;
+ u8 key[16];
+ __le64 tx_secur_seq_cnt;
+ __le64 hw_tkip_mic_rx_key;
+ __le64 hw_tkip_mic_tx_key;
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
+ * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
+ * sent
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @key: look at %iwl_mvm_keyinfo
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd {
+ u8 add_modify;
+ u8 unicast_tx_key_id;
+ u8 multicast_tx_key_id;
+ u8 reserved1;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN];
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ struct iwl_mvm_keyinfo key;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ __le16 tid_disable_tx;
+ __le16 reserved4;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_5 */
+
+/**
+ * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @ADD_STA_SUCCESS: operation was executed successfully
+ * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
+ * doesn't exist.
+ */
+enum iwl_mvm_add_sta_rsp_status {
+ ADD_STA_SUCCESS = 0x1,
+ ADD_STA_STATIONS_OVERLOAD = 0x2,
+ ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
+ ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+};
+
+/**
+ * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwl_mvm_rm_sta_cmd {
+ u8 sta_id;
+ u8 reserved[3];
+} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd {
+ __le32 ctrl_flags;
+ u8 IGTK[16];
+ u8 K1[16];
+ u8 K2[16];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwl_mvm_wep_key {
+ u8 key_index;
+ u8 key_offset;
+ __le16 reserved1;
+ u8 key_size;
+ u8 reserved2[3];
+ u8 key[16];
+} __packed;
+
+struct iwl_mvm_wep_key_cmd {
+ __le32 mac_id_n_color;
+ u8 num_keys;
+ u8 decryption_type;
+ u8 flags;
+ u8 reserved;
+ struct iwl_mvm_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+
+#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
new file mode 100644
index 000000000000..2677914bf0a6
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -0,0 +1,580 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_tx_h__
+#define __fw_api_tx_h__
+
+/**
+ * enum iwl_tx_flags - bitmasks for tx_flags in TX command
+ * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ * Otherwise, use rate_n_flags from the TX command
+ * @TX_CMD_FLG_BA: this frame is a block ack
+ * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ * Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
+ * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
+ * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ * Should be set for beacons and probe responses
+ * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @TX_CMD_FLG_AGG_START: allow this frame to start aggregation
+ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ * Should be set for 26/30 length MAC headers
+ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
+ * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
+ * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwl_tx_flags {
+ TX_CMD_FLG_PROT_REQUIRE = BIT(0),
+ TX_CMD_FLG_ACK = BIT(3),
+ TX_CMD_FLG_STA_RATE = BIT(4),
+ TX_CMD_FLG_BA = BIT(5),
+ TX_CMD_FLG_BAR = BIT(6),
+ TX_CMD_FLG_TXOP_PROT = BIT(7),
+ TX_CMD_FLG_VHT_NDPA = BIT(8),
+ TX_CMD_FLG_HT_NDPA = BIT(9),
+ TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+ TX_CMD_FLG_BT_DIS = BIT(12),
+ TX_CMD_FLG_SEQ_CTL = BIT(13),
+ TX_CMD_FLG_MORE_FRAG = BIT(14),
+ TX_CMD_FLG_NEXT_FRAME = BIT(15),
+ TX_CMD_FLG_TSF = BIT(16),
+ TX_CMD_FLG_CALIB = BIT(17),
+ TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
+ TX_CMD_FLG_AGG_START = BIT(19),
+ TX_CMD_FLG_MH_PAD = BIT(20),
+ TX_CMD_FLG_RESP_TO_DRV = BIT(21),
+ TX_CMD_FLG_CCMP_AGG = BIT(22),
+ TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
+ TX_CMD_FLG_CTS_ONLY = BIT(24),
+ TX_CMD_FLG_DUR = BIT(25),
+ TX_CMD_FLG_FW_DROP = BIT(26),
+ TX_CMD_FLG_EXEC_PAPD = BIT(27),
+ TX_CMD_FLG_PAPD_TYPE = BIT(28),
+ TX_CMD_FLG_HCCA_CHUNK = BIT(31)
+}; /* TX_FLAGS_BITS_API_S_VER_1 */
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP 0x01
+#define TX_CMD_SEC_CCM 0x02
+#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWL_DEFAULT_TX_RETRY 15
+#define IWL_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWL_RTS_DFAULT_RETRY_LIMIT 60
+#define IWL_BAR_DFAULT_RETRY_LIMIT 60
+#define IWL_LOW_RETRY_LIMIT 7
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwl_tx_cmd - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @next_frame_len: same as len, but for next frame (0 if not applicable)
+ * Used for fragmentation and bursting, but not in 11n aggregation.
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @key: security key
+ * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ * btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_spec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not
+ * specified by HCCA protocol
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwl_tx_cmd {
+ __le16 len;
+ __le16 next_frame_len;
+ __le32 tx_flags;
+ /* DRAM_SCRATCH_API_U_VER_1 */
+ u8 try_cnt;
+ u8 btkill_cnt;
+ __le16 reserved;
+ __le32 rate_n_flags;
+ u8 sta_id;
+ u8 sec_ctl;
+ u8 initial_rate_index;
+ u8 reserved2;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved3;
+ __le32 life_time;
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 tid_tspec;
+ __le16 pm_frame_timeout;
+ __le16 driver_txop;
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_3 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
+ * @TX_STATUS_SUCCESS:
+ * @TX_STATUS_DIRECT_DONE:
+ * @TX_STATUS_POSTPONE_DELAY:
+ * @TX_STATUS_POSTPONE_FEW_BYTES:
+ * @TX_STATUS_POSTPONE_BT_PRIO:
+ * @TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @TX_STATUS_POSTPONE_CALC_TTAK:
+ * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @TX_STATUS_FAIL_SHORT_LIMIT:
+ * @TX_STATUS_FAIL_LONG_LIMIT:
+ * @TX_STATUS_FAIL_UNDERRUN:
+ * @TX_STATUS_FAIL_DRAIN_FLOW:
+ * @TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @TX_STATUS_FAIL_DEST_PS:
+ * @TX_STATUS_FAIL_HOST_ABORTED:
+ * @TX_STATUS_FAIL_BT_RETRY:
+ * @TX_STATUS_FAIL_STA_INVALID:
+ * @TX_TATUS_FAIL_FRAG_DROPPED:
+ * @TX_STATUS_FAIL_TID_DISABLE:
+ * @TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @TX_STATUS_FAIL_FW_DROP:
+ * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * @TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @TX_MODE_MSK:
+ * @TX_MODE_NO_BURST:
+ * @TX_MODE_IN_BURST_SEQ:
+ * @TX_MODE_FIRST_IN_BURST:
+ * @TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwl_tx_status {
+ TX_STATUS_MSK = 0x000000ff,
+ TX_STATUS_SUCCESS = 0x01,
+ TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_STATUS_FAIL_UNDERRUN = 0x84,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_STATUS_FAIL_DEST_PS = 0x88,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+ TX_STATUS_INTERNAL_ABORT = 0x92,
+ TX_MODE_MSK = 0x00000f00,
+ TX_MODE_NO_BURST = 0x00000000,
+ TX_MODE_IN_BURST_SEQ = 0x00000100,
+ TX_MODE_FIRST_IN_BURST = 0x00000200,
+ TX_QUEUE_NUM_MSK = 0x0001f000,
+ TX_NARROW_BW_MSK = 0x00060000,
+ TX_NARROW_BW_1DIV2 = 0x00020000,
+ TX_NARROW_BW_1DIV4 = 0x00040000,
+ TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwl_tx_agg_status - TX aggregation status
+ * @AGG_TX_STATE_STATUS_MSK:
+ * @AGG_TX_STATE_TRANSMITTED:
+ * @AGG_TX_STATE_UNDERRUN:
+ * @AGG_TX_STATE_BT_PRIO:
+ * @AGG_TX_STATE_FEW_BYTES:
+ * @AGG_TX_STATE_ABORT:
+ * @AGG_TX_STATE_LAST_SENT_TTL:
+ * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @AGG_TX_STATE_SCD_QUERY:
+ * @AGG_TX_STATE_TEST_BAD_CRC32:
+ * @AGG_TX_STATE_RESPONSE:
+ * @AGG_TX_STATE_DUMP_TX:
+ * @AGG_TX_STATE_DELAY_TX:
+ * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwl_tx_agg_status {
+ AGG_TX_STATE_STATUS_MSK = 0x00fff,
+ AGG_TX_STATE_TRANSMITTED = 0x000,
+ AGG_TX_STATE_UNDERRUN = 0x001,
+ AGG_TX_STATE_BT_PRIO = 0x002,
+ AGG_TX_STATE_FEW_BYTES = 0x004,
+ AGG_TX_STATE_ABORT = 0x008,
+ AGG_TX_STATE_LAST_SENT_TTL = 0x010,
+ AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+ AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+ AGG_TX_STATE_SCD_QUERY = 0x080,
+ AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+ AGG_TX_STATE_RESPONSE = 0x1ff,
+ AGG_TX_STATE_DUMP_TX = 0x200,
+ AGG_TX_STATE_DELAY_TX = 0x400,
+ AGG_TX_STATE_TRY_CNT_POS = 12,
+ AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+};
+
+#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \
+ AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
+ AGG_TX_STATE_ABORT | \
+ AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct agg_tx_status - per packet TX aggregation status
+ * @status: enum iwl_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct agg_tx_status {
+ __le16 status;
+ __le16 sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status TX_STATUS_*
+ * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp {
+ u8 frame_count;
+ u8 bt_kill_count;
+ u8 failure_rts;
+ u8 failure_frame;
+ __le32 initial_rate;
+ __le16 wireless_media_time;
+
+ u8 pa_status;
+ u8 pa_integ_res_a[3];
+ u8 pa_integ_res_b[3];
+ u8 pa_integ_res_c[3];
+ __le16 measurement_req_id;
+ __le16 reserved;
+
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_info;
+ u8 ra_tid;
+ __le16 frame_ctrl;
+
+ struct agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @sta_addr_lo32: lower 32 bits of the MAC address
+ * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl:
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ */
+struct iwl_mvm_ba_notif {
+ __le32 sta_addr_lo32;
+ __le16 sta_addr_hi16;
+ __le16 reserved;
+
+ u8 sta_id;
+ u8 tid;
+ __le16 seq_ctl;
+ __le64 bitmap;
+ __le16 scd_flow;
+ __le16 scd_ssn;
+ u8 txed;
+ u8 txed_2_done;
+ __le16 reserved1;
+} __packed;
+
+/*
+ * struct iwl_mac_beacon_cmd - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd {
+ struct iwl_tx_cmd tx;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ struct ieee80211_hdr frame[0];
+} __packed;
+
+/**
+ * enum iwl_dump_control - dump (flush) control flags
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ * and the TFD queues are empty.
+ */
+enum iwl_dump_control {
+ DUMP_TX_FIFO_FLUSH = BIT(1),
+};
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+ __le32 queues_ctl;
+ __le16 flush_ctl;
+ __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
new file mode 100644
index 000000000000..23eebda848b0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,952 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw-api-rs.h"
+#include "fw-api-tx.h"
+#include "fw-api-sta.h"
+#include "fw-api-mac.h"
+#include "fw-api-power.h"
+#include "fw-api-d3.h"
+
+/* queue and FIFO numbers by usage */
+enum {
+ IWL_MVM_OFFCHANNEL_QUEUE = 8,
+ IWL_MVM_CMD_QUEUE = 9,
+ IWL_MVM_AUX_QUEUE = 15,
+ IWL_MVM_FIRST_AGG_QUEUE = 16,
+ IWL_MVM_NUM_QUEUES = 20,
+ IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
+ IWL_MVM_CMD_FIFO = 7
+};
+
+#define IWL_MVM_STATION_COUNT 16
+
+/* commands */
+enum {
+ MVM_ALIVE = 0x1,
+ REPLY_ERROR = 0x2,
+
+ INIT_COMPLETE_NOTIF = 0x4,
+
+ /* PHY context commands */
+ PHY_CONTEXT_CMD = 0x8,
+ DBG_CFG = 0x9,
+
+ /* station table */
+ ADD_STA = 0x18,
+ REMOVE_STA = 0x19,
+
+ /* TX */
+ TX_CMD = 0x1c,
+ TXPATH_FLUSH = 0x1e,
+ MGMT_MCAST_KEY = 0x1f,
+
+ /* global key */
+ WEP_KEY = 0x20,
+
+ /* MAC and Binding commands */
+ MAC_CONTEXT_CMD = 0x28,
+ TIME_EVENT_CMD = 0x29, /* both CMD and response */
+ TIME_EVENT_NOTIFICATION = 0x2a,
+ BINDING_CONTEXT_CMD = 0x2b,
+ TIME_QUOTA_CMD = 0x2c,
+
+ LQ_CMD = 0x4e,
+
+ /* Calibration */
+ TEMPERATURE_NOTIFICATION = 0x62,
+ CALIBRATION_CFG_CMD = 0x65,
+ CALIBRATION_RES_NOTIFICATION = 0x66,
+ CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
+ RADIO_VERSION_NOTIFICATION = 0x68,
+
+ /* Scan offload */
+ SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+ SCAN_OFFLOAD_ABORT_CMD = 0x52,
+ SCAN_OFFLOAD_COMPLETE = 0x6D,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+ SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+
+ /* Phy */
+ PHY_CONFIGURATION_CMD = 0x6a,
+ CALIB_RES_NOTIF_PHY_DB = 0x6b,
+ /* PHY_DB_CMD = 0x6c, */
+
+ /* Power */
+ POWER_TABLE_CMD = 0x77,
+
+ /* Scanning */
+ SCAN_REQUEST_CMD = 0x80,
+ SCAN_ABORT_CMD = 0x81,
+ SCAN_START_NOTIFICATION = 0x82,
+ SCAN_RESULTS_NOTIFICATION = 0x83,
+ SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+ /* NVM */
+ NVM_ACCESS_CMD = 0x88,
+
+ SET_CALIB_DEFAULT_CMD = 0x8e,
+
+ BEACON_TEMPLATE_CMD = 0x91,
+ TX_ANT_CONFIGURATION_CMD = 0x98,
+ STATISTICS_NOTIFICATION = 0x9d,
+
+ /* RF-KILL commands and notifications */
+ CARD_STATE_CMD = 0xa0,
+ CARD_STATE_NOTIFICATION = 0xa1,
+
+ REPLY_RX_PHY_CMD = 0xc0,
+ REPLY_RX_MPDU_CMD = 0xc1,
+ BA_NOTIF = 0xc5,
+
+ REPLY_DEBUG_CMD = 0xf0,
+ DEBUG_LOG_MSG = 0xf7,
+
+ /* D3 commands/notifications */
+ D3_CONFIG_CMD = 0xd3,
+ PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+ OFFLOADS_QUERY_CMD = 0xd5,
+ REMOTE_WAKE_CONFIG_CMD = 0xd6,
+
+ /* for WoWLAN in particular */
+ WOWLAN_PATTERNS = 0xe0,
+ WOWLAN_CONFIGURATION = 0xe1,
+ WOWLAN_TSC_RSC_PARAM = 0xe2,
+ WOWLAN_TKIP_PARAM = 0xe3,
+ WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+ WOWLAN_GET_STATUSES = 0xe5,
+ WOWLAN_TX_POWER_PER_DB = 0xe6,
+
+ /* and for NetDetect */
+ NET_DETECT_CONFIG_CMD = 0x54,
+ NET_DETECT_PROFILES_QUERY_CMD = 0x56,
+ NET_DETECT_PROFILES_CMD = 0x57,
+ NET_DETECT_HOTSPOTS_CMD = 0x58,
+ NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+
+ REPLY_MAX = 0xff,
+};
+
+/**
+ * struct iwl_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwl_cmd_response {
+ __le32 status;
+};
+
+/*
+ * struct iwl_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwl_tx_ant_cfg_cmd {
+ __le32 valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_calib_cfg {
+ IWL_CALIB_CFG_XTAL_IDX = BIT(0),
+ IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
+ IWL_CALIB_CFG_PAPD_IDX = BIT(3),
+ IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
+ IWL_CALIB_CFG_DC_IDX = BIT(5),
+ IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
+ IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
+ IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
+ IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
+ IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
+ IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
+ IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
+ IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
+ IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
+ IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
+ IWL_CALIB_CFG_DAC_IDX = BIT(16),
+ IWL_CALIB_CFG_ABS_IDX = BIT(17),
+ IWL_CALIB_CFG_AGC_IDX = BIT(18),
+};
+
+/*
+ * Phy configuration command.
+ */
+struct iwl_phy_cfg_cmd {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+} __packed;
+
+#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
+#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
+#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
+#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
+#define PHY_CFG_TX_CHAIN_A BIT(8)
+#define PHY_CFG_TX_CHAIN_B BIT(9)
+#define PHY_CFG_TX_CHAIN_C BIT(10)
+#define PHY_CFG_RX_CHAIN_A BIT(12)
+#define PHY_CFG_RX_CHAIN_B BIT(13)
+#define PHY_CFG_RX_CHAIN_C BIT(14)
+
+
+/* Target of the NVM_ACCESS_CMD */
+enum {
+ NVM_ACCESS_TARGET_CACHE = 0,
+ NVM_ACCESS_TARGET_OTP = 1,
+ NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver1 - Request the device to send the NVM.
+ * @op_code: 0 - read, 1 - write.
+ * @target: NVM_ACCESS_TARGET_*. should be 0 for read.
+ * @cache_refresh: 0 - None, 1- NVM.
+ * @offset: offset in the nvm data.
+ * @length: of the chunk.
+ * @data: empty on read, the NVM chunk on write
+ */
+struct iwl_nvm_access_cmd_ver1 {
+ u8 op_code;
+ u8 target;
+ u8 cache_refresh;
+ u8 reserved;
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_nvm_access_resp_ver1 - response to NVM_ACCESS_CMD
+ * @offset: the offset in the nvm data
+ * @length: of the chunk
+ * @data: the nvm chunk on when NVM_ACCESS_CMD was read, nothing on write
+ */
+struct iwl_nvm_access_resp_ver1 {
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_1 */
+
+/* Section types for NVM_ACCESS_CMD version 2 */
+enum {
+ NVM_SECTION_TYPE_HW = 0,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_PAPD,
+ NVM_SECTION_TYPE_BT,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+ NVM_SECTION_TYPE_POST_FCS_CALIB,
+ NVM_NUM_OF_SECTIONS,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
+ * @op_code: 0 - read, 1 - write
+ * @target: NVM_ACCESS_TARGET_*
+ * @type: NVM_SECTION_TYPE_*
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwl_nvm_access_cmd_ver2 {
+ u8 op_code;
+ u8 target;
+ __le16 type;
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwl_nvm_access_resp_ver2 {
+ __le16 offset;
+ __le16 length;
+ __le16 type;
+ __le16 status;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/* MVM_ALIVE 0x1 */
+
+/* alive response is_valid values */
+#define ALIVE_RESP_UCODE_OK BIT(0)
+#define ALIVE_RESP_RFKILL BIT(1)
+
+/* alive response ver_type values */
+enum {
+ FW_TYPE_HW = 0,
+ FW_TYPE_PROT = 1,
+ FW_TYPE_AP = 2,
+ FW_TYPE_WOWLAN = 3,
+ FW_TYPE_TIMING = 4,
+ FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+ FW_SUBTYPE_FULL_FEATURE = 0,
+ FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+ FW_SUBTYPE_REDUCED = 2,
+ FW_SUBTYPE_ALIVE_ONLY = 3,
+ FW_SUBTYPE_WOWLAN = 4,
+ FW_SUBTYPE_AP_SUBTYPE = 5,
+ FW_SUBTYPE_WIPAN = 6,
+ FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWL_ALIVE_STATUS_ERR 0xDEAD
+#define IWL_ALIVE_STATUS_OK 0xCAFE
+
+#define IWL_ALIVE_FLG_RFKILL BIT(0)
+
+struct mvm_alive_resp {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+} __packed; /* ALIVE_RES_API_S_VER_1 */
+
+/* Error response/notification */
+enum {
+ FW_ERR_UNKNOWN_CMD = 0x0,
+ FW_ERR_INVALID_CMD_PARAM = 0x1,
+ FW_ERR_SERVICE = 0x2,
+ FW_ERR_ARC_MEMORY = 0x3,
+ FW_ERR_ARC_CODE = 0x4,
+ FW_ERR_WATCH_DOG = 0x5,
+ FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+ FW_ERR_WEP_KEY_SIZE = 0x11,
+ FW_ERR_OBSOLETE_FUNC = 0x12,
+ FW_ERR_UNEXPECTED = 0xFE,
+ FW_ERR_FATAL = 0xFF
+};
+
+/**
+ * struct iwl_error_resp - FW error indication
+ * ( REPLY_ERROR = 0x2 )
+ * @error_type: one of FW_ERR_*
+ * @cmd_id: the command ID for which the error occured
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwl_error_resp {
+ __le32 error_type;
+ u8 cmd_id;
+ u8 reserved1;
+ __le16 bad_cmd_seq_num;
+ __le32 error_service;
+ __le64 timestamp;
+} __packed;
+
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define MAX_MACS_IN_BINDING (3)
+#define MAX_BINDINGS (4)
+#define AUX_BINDING_INDEX (3)
+#define MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define FW_CTXT_ID_POS (0)
+#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
+#define FW_CTXT_COLOR_POS (8)
+#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
+#define FW_CTXT_INVALID (0xffffffff)
+
+#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
+ (_color << FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum {
+ FW_CTXT_ACTION_STUB = 0,
+ FW_CTXT_ACTION_ADD,
+ FW_CTXT_ACTION_MODIFY,
+ FW_CTXT_ACTION_REMOVE,
+ FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+enum iwl_time_event_type {
+ /* BSS Station Events */
+ TE_BSS_STA_AGGRESSIVE_ASSOC,
+ TE_BSS_STA_ASSOC,
+ TE_BSS_EAP_DHCP_PROT,
+ TE_BSS_QUIET_PERIOD,
+
+ /* P2P Device Events */
+ TE_P2P_DEVICE_DISCOVERABLE,
+ TE_P2P_DEVICE_LISTEN,
+ TE_P2P_DEVICE_ACTION_SCAN,
+ TE_P2P_DEVICE_FULL_SCAN,
+
+ /* P2P Client Events */
+ TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+ TE_P2P_CLIENT_ASSOC,
+ TE_P2P_CLIENT_QUIET_PERIOD,
+
+ /* P2P GO Events */
+ TE_P2P_GO_ASSOC_PROT,
+ TE_P2P_GO_REPETITIVE_NOA,
+ TE_P2P_GO_CT_WINDOW,
+
+ /* WiDi Sync Events */
+ TE_WIDI_TX_SYNC,
+
+ TE_MAX
+}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+ TE_INDEPENDENT = 0,
+ TE_DEP_OTHER = 1,
+ TE_DEP_TSF = 2,
+ TE_EVENT_SOCIOPATHIC = 4,
+}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/* When to send Time Event notifications and to whom (internal = FW) */
+enum {
+ TE_NOTIF_NONE = 0,
+ TE_NOTIF_HOST_START = 0x1,
+ TE_NOTIF_HOST_END = 0x2,
+ TE_NOTIF_INTERNAL_START = 0x4,
+ TE_NOTIF_INTERNAL_END = 0x8
+}; /* MAC_EVENT_ACTION_API_E_VER_1 */
+
+/*
+ * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
+ * of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_FRAG_NONE = 0,
+ TE_FRAG_SINGLE = 1,
+ TE_FRAG_DUAL = 2,
+ TE_FRAG_ENDLESS = 0xffffffff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define TE_REPEAT_ENDLESS (0xffffffff)
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_REPEAT_MAX_MSK (0x0fffffff)
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_FRAG_MAX_MSK (0x0fffffff)
+
+/**
+ * struct iwl_time_event_cmd - configuring Time Events
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 id;
+ /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 max_delay;
+ __le32 dep_policy;
+ __le32 depends_on;
+ __le32 is_present;
+ __le32 max_frags;
+ __le32 interval;
+ __le32 interval_reciprocal;
+ __le32 duration;
+ __le32 repeat;
+ __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwl_time_event_resp {
+ __le32 status;
+ __le32 id;
+ __le32 unique_id;
+ __le32 id_and_color;
+} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_notif - notifications of time event start/stop
+ * ( TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of TE_NOTIF_START or TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwl_time_event_notif {
+ __le32 timestamp;
+ __le32 session_id;
+ __le32 unique_id;
+ __le32 id_and_color;
+ __le32 action;
+ __le32 status;
+} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwl_binding_cmd - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ */
+struct iwl_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* BINDING_DATA_API_S_VER_1 */
+ __le32 macs[MAX_MACS_IN_BINDING];
+ __le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWL_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwl_time_quota_data {
+ __le32 id_and_color;
+ __le32 quota;
+ __le32 max_duration;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwl_time_quota_cmd {
+ struct iwl_time_quota_data quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define PHY_BAND_5 (0)
+#define PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define PHY_VHT_CHANNEL_MODE20 (0x0)
+#define PHY_VHT_CHANNEL_MODE40 (0x1)
+#define PHY_VHT_CHANNEL_MODE80 (0x2)
+#define PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwl_fw_channel_info {
+ u8 band;
+ u8 channel;
+ u8 width;
+ u8 ctrl_pos;
+} __packed;
+
+#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define PHY_RX_CHAIN_VALID_POS (1)
+#define PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << PHY_RX_CHAIN_VALID_POS)
+#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define PHY_RX_CHAIN_CNT_POS (10)
+#define PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define NUM_PHY_CTX 3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 tx_param_color;
+ struct iwl_fw_channel_info ci;
+ __le32 txchain_info;
+ __le32 rxchain_info;
+ __le32 acquisition_data;
+ __le32 dsp_cfg_flags;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_AGC_IDX 1
+#define IWL_RX_INFO_RSSI_AB_IDX 2
+#define IWL_RX_INFO_RSSI_C_IDX 3
+#define IWL_OFDM_AGC_DB_MSK 0xfe00
+#define IWL_OFDM_AGC_DB_POS 9
+#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWL_OFDM_RSSI_A_POS 0
+#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWL_OFDM_RSSI_B_POS 16
+#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
+#define IWL_OFDM_RSSI_C_POS 0
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+ u8 non_cfg_phy_cnt;
+ u8 cfg_phy_cnt;
+ u8 stat_id;
+ u8 reserved1;
+ __le32 system_timestamp;
+ __le64 timestamp;
+ __le32 beacon_time_stamp;
+ __le16 phy_flags;
+ __le16 channel;
+ __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+ __le32 rate_n_flags;
+ __le32 byte_count;
+ __le16 reserved2;
+ __le16 frame_time;
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+ __le16 byte_count;
+ __le16 reserved;
+} __packed;
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK:
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+ RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
+ RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
+ RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
+ RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
+ RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
+ RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
+ RX_RES_PHY_FLAGS_AGG = BIT(7),
+ RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
+ RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
+ RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @RX_MPDU_RES_STATUS_KEY_VALID:
+ * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @RX_MPDU_RES_STATUS_RRF_KILL:
+ * @RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+enum iwl_mvm_rx_status {
+ RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
+ RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
+ RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
+ RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
+ RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
+ RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
+ RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
+ RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+ RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
+ RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
+ RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
+ RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
+ RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
+ RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
+ RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
+ RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
+ RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
+ RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
+ RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
+ RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
+ RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
+ RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwl_radio_version_notif {
+ __le32 radio_flavor;
+ __le32 radio_step;
+ __le32 radio_dash;
+} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwl_card_state_flags {
+ CARD_ENABLED = 0x00,
+ HW_CARD_DISABLED = 0x01,
+ SW_CARD_DISABLED = 0x02,
+ CT_KILL_CARD_DISABLED = 0x04,
+ HALT_CARD_DISABLED = 0x08,
+ CARD_DISABLED_MSK = 0x0f,
+ CARD_IS_RX_ON = 0x10,
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwl_card_state_flags
+ */
+struct iwl_card_state_notif {
+ __le32 flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_set_calib_default_cmd - set default value for calibration.
+ * ( SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwl_set_calib_default_cmd {
+ __le16 calib_index;
+ __le16 length;
+ u8 data[0];
+} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
+
+#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
new file mode 100644
index 000000000000..d3d959db03a9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -0,0 +1,640 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-eeprom-parse.h"
+
+#include "mvm.h"
+#include "iwl-phy-db.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT HZ
+#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+
+/* Default calibration values for WkP - set to INIT image w/o running */
+static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
+ 0x00, 0x18, 0x00 };
+static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f };
+static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
+ 0x00 };
+static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
+static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
+
+struct iwl_calib_default_data {
+ u16 size;
+ void *data;
+};
+
+#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
+
+static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
+ [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
+ [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
+ [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
+ [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
+ [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
+ [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
+ [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
+};
+
+struct iwl_mvm_alive_data {
+ bool valid;
+ u32 scd_base_addr;
+};
+
+static inline const struct fw_img *
+iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &mvm->fw->img[ucode_type];
+}
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ IWL_DEBUG_HC(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+ sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_alive_data *alive_data = data;
+ struct mvm_alive_resp *palive;
+
+ palive = (void *)pkt->data;
+
+ mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype);
+
+ return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_phy_db *phy_db = data;
+
+ if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+ return true;
+ }
+
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
+
+ return false;
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ enum iwl_ucode_type ucode_type)
+{
+ struct iwl_notification_wait alive_wait;
+ struct iwl_mvm_alive_data alive_data;
+ const struct fw_img *fw;
+ int ret, i;
+ enum iwl_ucode_type old_type = mvm->cur_ucode;
+ static const u8 alive_cmd[] = { MVM_ALIVE };
+
+ mvm->cur_ucode = ucode_type;
+ fw = iwl_get_ucode_image(mvm, ucode_type);
+
+ mvm->ucode_loaded = false;
+
+ if (!fw)
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_data);
+
+ ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+ if (ret) {
+ mvm->cur_ucode = old_type;
+ iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the ALIVE notification here.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret) {
+ mvm->cur_ucode = old_type;
+ return ret;
+ }
+
+ if (!alive_data.valid) {
+ IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+ mvm->cur_ucode = old_type;
+ return -EIO;
+ }
+
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
+ /*
+ * Note: all the queues are enabled as part of the interface
+ * initialization, but in firmware restart scenarios they
+ * could be stopped, so wake them up. In firmware restart,
+ * mac80211 will have the queues stopped as well until the
+ * reconfiguration completes. During normal startup, they
+ * will be empty.
+ */
+
+ for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+ if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+ mvm->queue_to_mac80211[i] = i;
+ else
+ mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+ atomic_set(&mvm->queue_stop_count[i], 0);
+ }
+
+ mvm->transport_queue_stop = 0;
+
+ mvm->ucode_loaded = true;
+
+ return 0;
+}
+#define IWL_HW_REV_ID_RAINBOW 0x2
+#define IWL_PROJ_TYPE_LHP 0x5
+
+static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_data *data = mvm->nvm_data;
+ /* Temp calls to static definitions, will be changed to CSR calls */
+ u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
+ u8 project_type = IWL_PROJ_TYPE_LHP;
+
+ return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
+ (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
+ (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ enum iwl_ucode_type ucode_type = mvm->cur_ucode;
+
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
+ phy_cfg_cmd.calib_control.event_trigger =
+ mvm->fw->default_calib[ucode_type].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ mvm->fw->default_calib[ucode_type].flow_trigger;
+
+ IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+ phy_cfg_cmd.phy_cfg);
+
+ return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+/* Starting with the new PHY DB implementation - New calibs are enabled */
+/* Value - 0x405e7 */
+#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_BB_FILTER_IDX |\
+ IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_RX_IQ_IDX |\
+ IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
+
+/* Value 0x41567 */
+#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_BB_FILTER_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_RX_IQ_IDX |\
+ IWL_CALIB_CFG_SENSITIVITY_IDX |\
+ IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_TX_PWR_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_SENSITIVITY_IDX)
+
+/*
+ * Sets the calibrations trigger values that will be sent to the FW for runtime
+ * and init calibrations.
+ * The ones given in the FW TLV are not correct.
+ */
+static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
+{
+ struct iwl_tlv_calib_ctrl default_calib;
+
+ /*
+ * WkP FW TLV calib bits are wrong, overwrite them.
+ * This defines the dynamic calibrations which are implemented in the
+ * uCode both for init(flow) calculation and event driven calibs.
+ */
+
+ /* Init Image */
+ default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
+ default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
+
+ if (default_calib.event_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
+ IWL_ERR(mvm,
+ "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
+ default_calib.event_trigger);
+ if (default_calib.flow_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
+ IWL_ERR(mvm,
+ "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
+ default_calib.flow_trigger);
+
+ memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
+ &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+ IWL_ERR(mvm,
+ "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
+ default_calib.event_trigger,
+ default_calib.flow_trigger);
+
+ /* Run time image */
+ default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
+ default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
+
+ if (default_calib.event_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
+ IWL_ERR(mvm,
+ "Updating the event calib for RT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
+ default_calib.event_trigger);
+ if (default_calib.flow_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
+ IWL_ERR(mvm,
+ "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
+ default_calib.flow_trigger);
+
+ memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
+ &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+ IWL_ERR(mvm,
+ "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
+ default_calib.event_trigger,
+ default_calib.flow_trigger);
+}
+
+static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
+{
+ u8 cmd_raw[16]; /* holds the variable size commands */
+ struct iwl_set_calib_default_cmd *cmd =
+ (struct iwl_set_calib_default_cmd *)cmd_raw;
+ int ret, i;
+
+ /* Setting default values for calibrations we don't run */
+ for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
+ u16 cmd_len;
+
+ if (wkp_calib_default_data[i].size == 0)
+ continue;
+
+ memset(cmd_raw, 0, sizeof(cmd_raw));
+ cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
+ cmd->calib_index = cpu_to_le16(i);
+ cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
+ if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
+ "Need to enlarge cmd_raw to %d\n", cmd_len))
+ break;
+ memcpy(cmd->data, wkp_calib_default_data[i].data,
+ wkp_calib_default_data[i].size);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
+ sizeof(*cmd) +
+ wkp_calib_default_data[i].size,
+ cmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u8 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ CALIB_RES_NOTIF_PHY_DB
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->init_ucode_run)
+ return 0;
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &calib_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_phy_db_entry,
+ mvm->phy_db);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+ goto error;
+ }
+
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ WARN_ON(ret);
+
+ /* Override the calibrations from TLV and the const of fw */
+ iwl_set_default_calib_trigger(mvm);
+
+ /* WkP doesn't have all calibrations, need to set default values */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ ret = iwl_set_default_calibrations(mvm);
+ if (ret)
+ goto error;
+ }
+
+ /*
+ * Send phy configurations command to init uCode
+ * to start the 16.0 uCode init image internal calibrations.
+ */
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ goto error;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the calibration complete notification.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+ MVM_UCODE_CALIB_TIMEOUT);
+ if (!ret)
+ mvm->init_ucode_run = true;
+ goto out;
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+ if (!iwlmvm_mod_params.init_dbg) {
+ iwl_trans_stop_device(mvm->trans);
+ } else if (!mvm->nvm_data) {
+ /* we want to debug INIT and we have no NVM - fake */
+ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+ sizeof(struct ieee80211_channel) +
+ sizeof(struct ieee80211_rate),
+ GFP_KERNEL);
+ if (!mvm->nvm_data)
+ return -ENOMEM;
+ mvm->nvm_data->valid_rx_ant = 1;
+ mvm->nvm_data->valid_tx_ant = 1;
+ mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+ mvm->nvm_data->bands[0].n_channels = 1;
+ mvm->nvm_data->bands[0].n_bitrates = 1;
+ mvm->nvm_data->bands[0].bitrates =
+ (void *)mvm->nvm_data->channels + 1;
+ mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ }
+
+ return ret;
+}
+
+#define UCODE_CALIB_TIMEOUT (2*HZ)
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ /* If we were in RFKILL during module loading, load init ucode now */
+ if (!mvm->init_ucode_run) {
+ ret = iwl_run_init_mvm_ucode(mvm, false);
+ if (ret && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ goto error;
+ }
+ }
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+
+ return 0;
+ error:
+ iwl_trans_stop_device(mvm->trans);
+ return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ return 0;
+ error:
+ iwl_trans_stop_device(mvm->trans);
+ return ret;
+}
+
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+ u32 flags = le32_to_cpu(card_state_notif->flags);
+
+ IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_KILL_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ return 0;
+}
+
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
+
+ /* TODO: what to do with that? */
+ IWL_DEBUG_INFO(mvm,
+ "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
+ le32_to_cpu(radio_version->radio_flavor),
+ le32_to_cpu(radio_version->radio_step),
+ le32_to_cpu(radio_version->radio_dash));
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
new file mode 100644
index 000000000000..011906e73a05
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -0,0 +1,134 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+/* Set led register on */
+static void iwl_mvm_led_enable(struct iwl_mvm *mvm)
+{
+ iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
+}
+
+/* Set led register off */
+static void iwl_mvm_led_disable(struct iwl_mvm *mvm)
+{
+ iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+ if (brightness > 0)
+ iwl_mvm_led_enable(mvm);
+ else
+ iwl_mvm_led_disable(mvm);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mvm, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ };
+
+ mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mvm->hw->wiphy));
+ mvm->led.brightness_set = iwl_led_brightness_set;
+ mvm->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mvm->led.default_trigger =
+ ieee80211_get_radio_led_name(mvm->hw);
+
+ ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+ if (ret) {
+ kfree(mvm->led.name);
+ IWL_INFO(mvm, "Failed to enable led\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+ if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
+ return;
+
+ led_classdev_unregister(&mvm->led);
+ kfree(mvm->led.name);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 000000000000..341dbc0237ea
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,992 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+ IWL_MVM_TX_FIFO_BK,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_VO,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+ unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+ unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+ enum iwl_tsf_id preferred_tsf;
+ bool found_vif;
+};
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 ac;
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark the queues used by the vif */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->cab_queue, data->used_hw_queues);
+
+ /*
+ * Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /*
+ * The TSF is a hardware/firmware resource, there are 4 and
+ * the driver should assign and free them as needed. However,
+ * there are cases where 2 MACs should share the same TSF ID
+ * for the purpose of clock sync, an optimization to avoid
+ * clock drift causing overlapping TBTTs/DTIMs for a GO and
+ * client in the system.
+ *
+ * The firmware will decide according to the MAC type which
+ * will be the master and slave. Clients that need to sync
+ * with a remote station will be the master, and an AP or GO
+ * will be the slave.
+ *
+ * Depending on the new interface type it can be slaved to
+ * or become the master of an existing interface.
+ */
+ switch (data->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The new interface is client, so if the existing one
+ * we're iterating is an AP, the TSF should be used to
+ * avoid drift between the new client and existing AP,
+ * the existing AP will get drift updates from the new
+ * client context in this case
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (data->preferred_tsf == NUM_TSF_IDS &&
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /*
+ * The new interface is AP/GO, so should get drift
+ * updates from an existing client or use the same
+ * TSF as an existing GO. There's no drift between
+ * TSFs internally but if they used different TSFs
+ * then a new client MAC could update one of them
+ * and cause drift that way.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP) {
+ if (data->preferred_tsf == NUM_TSF_IDS &&
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ default:
+ /*
+ * For all other interface types there's no need to
+ * take drift into account. Either they're exclusive
+ * like IBSS and monitor, or we don't care much about
+ * their TSF (like P2P Device), but we won't be able
+ * to share the TSF resource.
+ */
+ break;
+ }
+
+ /*
+ * Unless we exited above, we can't share the TSF resource
+ * that the virtual interface we're iterating over is using
+ * with the new one, so clear the available bit and if this
+ * was the preferred one, reset that as well.
+ */
+ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+ if (data->preferred_tsf == mvmvif->tsf_id)
+ data->preferred_tsf = NUM_TSF_IDS;
+}
+
+/*
+ * Get the mask of the queus used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 qmask, ac;
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return BIT(IWL_OFFCHANNEL_QUEUE);
+
+ qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
+ BIT(vif->cab_queue) : 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ qmask |= BIT(vif->hw_queue[ac]);
+
+ return qmask;
+}
+
+static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ .used_hw_queues = {
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+ BIT(IWL_MVM_AUX_QUEUE) |
+ BIT(IWL_MVM_CMD_QUEUE)
+ },
+ .found_vif = false,
+ };
+ u32 ac;
+ int ret;
+
+ /*
+ * Allocate a MAC ID and a TSF for this MAC, along with the queues
+ * and other resources.
+ */
+
+ /*
+ * Before the iterator, we start with all MAC IDs and TSFs available.
+ *
+ * During iteration, all MAC IDs are cleared that are in use by other
+ * virtual interfaces, and all TSF IDs are cleared that can't be used
+ * by this new virtual interface because they're used by an interface
+ * that can't share it with the new one.
+ * At the same time, we check if there's a preferred TSF in the case
+ * that we should share it with another interface.
+ */
+
+ /* Currently, MAC ID 0 should be used only for the managed vif */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ __clear_bit(0, data.available_mac_ids);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_iface_iterator, &data);
+
+ /*
+ * In the case we're getting here during resume, it's similar to
+ * firmware restart, and with RESUME_ALL the iterator will find
+ * the vif being added already.
+ * We don't want to reassign any IDs in either case since doing
+ * so would probably assign different IDs (as interfaces aren't
+ * necessarily added in the same order), but the old IDs were
+ * preserved anyway, so skip ID assignment for both resume and
+ * recovery.
+ */
+ if (data.found_vif)
+ return 0;
+
+ /* Therefore, in recovery, we can't get here */
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ mvmvif->id = find_first_bit(data.available_mac_ids,
+ NUM_MAC_INDEX_DRIVER);
+ if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+ if (mvmvif->tsf_id == NUM_TSF_IDS) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ mvmvif->color = 0;
+
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+
+ /* No need to allocate data queues to P2P Device MAC.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+ return 0;
+ }
+
+ /* Find available queues, and allocate them to the ACs */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ u8 queue = find_first_zero_bit(data.used_hw_queues,
+ IWL_MVM_FIRST_AGG_QUEUE);
+
+ if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ __set_bit(queue, data.used_hw_queues);
+ vif->hw_queue[ac] = queue;
+ }
+
+ /* Allocate the CAB queue for softAP and GO interfaces */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ u8 queue = find_first_zero_bit(data.used_hw_queues,
+ IWL_MVM_FIRST_AGG_QUEUE);
+
+ if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate cab queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ vif->cab_queue = queue;
+ } else {
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ }
+
+ mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ return 0;
+
+exit_fail:
+ memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+ memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return ret;
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ u32 ac;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
+ if (ret)
+ return ret;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
+ IWL_MVM_TX_FIFO_VO);
+ /* fall through */
+ default:
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac],
+ iwl_mvm_ac_to_tx_fifo[ac]);
+ break;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_trans_txq_disable(mvm->trans, vif->cab_queue);
+ /* fall through */
+ default:
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]);
+ }
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 *cck_rates, u8 *ofdm_rates)
+{
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u8 cck = 0;
+ u8 ofdm = 0;
+ int i;
+
+ sband = mvm->hw->wiphy->bands[band];
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWL_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chanctx;
+ u8 cck_ack_rates, ofdm_ack_rates;
+ int i;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(action);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+ else
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+ break;
+ case NL80211_IFTYPE_AP:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+ memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+ if (vif->bss_conf.bssid)
+ memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->bssid_addr);
+
+ rcu_read_lock();
+ chanctx = rcu_dereference(vif->chanctx_conf);
+ iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
+ : IEEE80211_BAND_2GHZ,
+ &cck_ack_rates, &ofdm_ack_rates);
+ rcu_read_unlock();
+
+ cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
+ cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+
+ cmd->cck_short_preamble =
+ cpu_to_le32(vif->bss_conf.use_short_preamble ?
+ MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot =
+ cpu_to_le32(vif->bss_conf.use_short_slot ?
+ MAC_FLG_SHORT_SLOT : 0);
+
+ for (i = 0; i < AC_NUM; i++) {
+ cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
+ cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
+ cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[i].edca_txop =
+ cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+ cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+ }
+
+ if (vif->bss_conf.qos)
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (vif->bss_conf.use_cts_prot)
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT |
+ MAC_PROT_FLG_SELF_CTS_EN);
+
+ /*
+ * I think that we should enable these 2 flags regardless the HT PROT
+ * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
+ */
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
+ MAC_PROT_FLG_FAT_PROT);
+ }
+
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
+ le32_to_cpu(cmd->action), ret);
+ return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type station or p2p client
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_data_sta *ctxt_sta)
+{
+ /* We need the dtim_period to set the MAC as associated */
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) {
+ u32 dtim_offs;
+
+ /*
+ * The DTIM count counts down, so when it is N that means N
+ * more beacon intervals happen until the DTIM TBTT. Therefore
+ * add this to the current time. If that ends up being in the
+ * future, the firmware will handle it.
+ *
+ * Also note that the system_timestamp (which we get here as
+ * "sync_device_ts") and TSF timestamp aren't at exactly the
+ * same offset in the frame -- the TSF is at the first symbol
+ * of the TSF, the system timestamp is at signal acquisition
+ * time. This means there's an offset between them of at most
+ * a few hundred microseconds (24 * 8 bits + PLCP time gives
+ * 384us in the longest case), this is currently not relevant
+ * as the firmware wakes up around 2ms before the TBTT.
+ */
+ dtim_offs = vif->bss_conf.sync_dtim_count *
+ vif->bss_conf.beacon_int;
+ /* convert TU to usecs */
+ dtim_offs *= 1024;
+
+ ctxt_sta->dtim_tsf =
+ cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
+ ctxt_sta->dtim_time =
+ cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
+
+ IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
+ le64_to_cpu(ctxt_sta->dtim_tsf),
+ le32_to_cpu(ctxt_sta->dtim_time),
+ dtim_offs);
+
+ ctxt_sta->is_assoc = cpu_to_le32(1);
+ } else {
+ ctxt_sta->is_assoc = cpu_to_le32(0);
+ }
+
+ ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_sta->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_sta->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for station mode */
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for station mode */
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
+
+ cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /* No other data to be filled */
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+ bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+ data->go_active = true;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mvm_go_iterator_data data = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC);
+
+ /*
+ * This flag should be set to true when the P2P Device is
+ * discoverable and there is at least another active P2P GO. Settings
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_go_iterator, &data);
+
+ cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ struct iwl_mac_beacon_cmd *beacon_cmd,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon. */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
+ beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
+ } else {
+ IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+ }
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_host_cmd cmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ .flags = CMD_ASYNC,
+ };
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ struct ieee80211_tx_info *info;
+ u32 beacon_skb_len;
+ u32 rate;
+
+ if (WARN_ON(!beacon))
+ return -EINVAL;
+
+ beacon_skb_len = beacon->len;
+
+ /* TODO: for now the beacon template id is set to be the mac context id.
+ * Might be better to handle it as another resource ... */
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ /* Set up TX command fields */
+ beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
+ beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
+ beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ beacon_cmd.tx.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS |
+ TX_CMD_FLG_TSF);
+
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->mgmt_last_antenna_idx);
+
+ beacon_cmd.tx.rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+ rate = IWL_FIRST_OFDM_RATE;
+ } else {
+ rate = IWL_FIRST_CCK_RATE;
+ beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+ }
+ beacon_cmd.tx.rate_n_flags |=
+ cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+
+ /* Set up TX beacon command fields */
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ beacon->data,
+ beacon_skb_len);
+
+ /* Submit command */
+ cmd.len[0] = sizeof(beacon_cmd);
+ cmd.data[0] = &beacon_cmd;
+ cmd.dataflags[0] = 0;
+ cmd.len[1] = beacon_skb_len;
+ cmd.data[1] = beacon->data;
+ cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/* The beacon template for the AP/GO context has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *beacon;
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+ beacon = ieee80211_beacon_get(mvm->hw, vif);
+ if (!beacon)
+ return -ENOMEM;
+
+ ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+ dev_kfree_skb(beacon);
+ return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_data_ap *ctxt_ap)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 curr_dev_time;
+
+ ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_ap->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_ap->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
+
+ ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time);
+
+ /* TODO: Assume that the beacon id == mac context id */
+ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for ap mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for GO mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap);
+
+ cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+ cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 action)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
+ action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
+ action);
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+ if (ret)
+ return ret;
+
+ mvmvif->uploaded = true;
+ return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd;
+ int ret;
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+ return ret;
+ }
+
+ mvmvif->uploaded = false;
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
new file mode 100644
index 000000000000..e8264e11b12d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,1314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+#include "iwl-phy-db.h"
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits),
+ },
+};
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ int num_mac, ret;
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ hw->queues = IWL_FIRST_AMPDU_QUEUE;
+ hw->offchannel_tx_hw_queue = IWL_OFFCHANNEL_QUEUE;
+ hw->rate_control_algorithm = "iwl-mvm-rs";
+
+ /*
+ * Enable 11w if advertised by firmware and software crypto
+ * is not enabled (as the firmware will interpret some mgmt
+ * packets, so enabling it with software crypto isn't safe)
+ */
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+ !iwlwifi_mod_params.sw_crypto)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+ hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+ hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+ hw->wiphy->max_remain_on_channel_duration = 500;
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ /* Extract MAC address */
+ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+ hw->wiphy->addresses = mvm->addresses;
+ hw->wiphy->n_addresses = 1;
+ num_mac = mvm->nvm_data->n_hw_addrs;
+ if (num_mac > 1) {
+ memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr,
+ ETH_ALEN);
+ mvm->addresses[1].addr[5]++;
+ hw->wiphy->n_addresses++;
+ }
+
+ /* we create the 802.11 header and a max-length SSID element */
+ hw->wiphy->max_scan_ie_len =
+ mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+ if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+
+ hw->wiphy->hw_version = mvm->trans->hw_id;
+
+ if (iwlwifi_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_P2P_GO_OPPPS;
+
+ mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+ if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ mvm->trans->ops->d3_suspend &&
+ mvm->trans->ops->d3_resume &&
+ device_can_wakeup(mvm->trans->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlwifi_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ }
+#endif
+
+ ret = iwl_mvm_leds_init(mvm);
+ if (ret)
+ return ret;
+
+ return ieee80211_register_hw(mvm->hw);
+}
+
+static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) {
+ IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n");
+ goto drop;
+ }
+
+ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_OFFCHANNEL_QUEUE &&
+ !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+ goto drop;
+
+ if (control->sta) {
+ if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ goto drop;
+ return;
+ }
+
+ if (iwl_mvm_tx_skb_non_sta(mvm, skb))
+ goto drop;
+ return;
+ drop:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid,
+ u16 *ssn, u8 buf_size)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+ sta->addr, tid, action);
+
+ if (!(mvm->nvm_data->sku_cap_11n_enable))
+ return -EACCES;
+
+ mutex_lock(&mvm->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->uploaded = false;
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* does this make sense at all? */
+ mvmvif->color++;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvmvif->phy_ctxt = NULL;
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+ iwl_trans_stop_device(mvm->trans);
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+ /* just in case one was running */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_cleanup_iterator, mvm);
+
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+
+ ieee80211_wake_queues(mvm->hw);
+
+ mvm->vif_count = 0;
+}
+
+static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Clean up some internal and mac80211 state on restart */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_restart_cleanup(mvm);
+
+ ret = iwl_mvm_up(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ ret = iwl_mvm_update_quotas(mvm, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+ ret);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ flush_work(&mvm->async_handlers_wk);
+
+ mutex_lock(&mvm->mutex);
+ /* async_handlers_wk is now blocked */
+
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ cancel_work_sync(&mvm->roc_done_wk);
+
+ iwl_trans_stop_device(mvm->trans);
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ iwl_mvm_async_handlers_purge(mvm);
+ /* async_handlers_list is empty and will stay empty: HW is stopped */
+
+ /* the fw is stopped, the aux sta is dead: clean up driver state */
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * The worker might have been waiting for the mutex, let it run and
+ * discover that its list is now empty.
+ */
+ cancel_work_sync(&mvm->async_handlers_wk);
+}
+
+static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ int ret;
+
+ ret = iwl_mvm_power_disable(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to disable power management\n");
+}
+
+static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+
+ iwl_mvm_power_update_mode(mvm, vif);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ /*
+ * Not much to do here. The stack will not allow interface
+ * types or combinations that we didn't advertise, so we
+ * don't really have to check the types.
+ */
+
+ mutex_lock(&mvm->mutex);
+
+ /* Allocate resources for the MAC context, and add it the the fw */
+ ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * The AP binding flow can be done only after the beacon
+ * template is configured (which happens only in the mac80211
+ * start_ap() flow), and adding the broadcast station can happen
+ * only after the binding.
+ * In addition, since modifying the MAC before adding a bcast
+ * station is not allowed by the FW, delay the adding of MAC context to
+ * the point where we can also add the bcast station.
+ * In short: there's not much we can do at this point, other than
+ * allocating resources :)
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
+ qmask);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+ goto out_release;
+ }
+
+ goto out_unlock;
+ }
+
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Iterate and disable PM on all active interfaces.
+ * Note: the method below does not count the new interface being added
+ * at this moment.
+ */
+ mvm->vif_count++;
+ if (mvm->vif_count > 1) {
+ IWL_DEBUG_MAC80211(mvm,
+ "Disable power on existing interfaces\n");
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_pm_disable_iterator, mvm);
+ }
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_release;
+
+ /*
+ * Update power state on the new interface. Admittedly, based on
+ * mac80211 logics this power update will disable power management
+ */
+ iwl_mvm_power_update_mode(mvm, vif);
+
+ /*
+ * P2P_DEVICE interface does not have a channel context assigned to it,
+ * so a dedicated PHY context is allocated to it and the corresponding
+ * MAC context is bound to it at this stage.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ mvmvif->phy_ctxt = &mvm->phy_ctxt_roc;
+
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten as part of the ROC flow.
+ * For now use the first channel we have.
+ */
+ chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
+ &chandef, 1, 1);
+ if (ret)
+ goto out_remove_mac;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove_phy;
+
+ ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+ if (ret)
+ goto out_unbind;
+
+ /* Save a pointer to p2p device vif, so it can later be used to
+ * update the p2p device MAC when a GO is started/stopped */
+ mvm->p2p_device_vif = vif;
+ }
+
+ goto out_unlock;
+
+ out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ out_remove_phy:
+ iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ out_remove_mac:
+ mvmvif->phy_ctxt = NULL;
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_release:
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Check if only one additional interface remains after rereasing
+ * current one. Update power mode on the remaining interface.
+ */
+ mvm->vif_count--;
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
+ }
+ iwl_mvm_mac_ctxt_release(mvm, vif);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 tfd_msk = 0, ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ tfd_msk |= BIT(vif->hw_queue[ac]);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ tfd_msk |= BIT(vif->cab_queue);
+
+ if (tfd_msk) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ /*
+ * Flush the ROC worker which will flush the OFFCHANNEL queue.
+ * We assume here that all the packets sent to the OFFCHANNEL
+ * queue are sent in ROC session.
+ */
+ flush_work(&mvm->roc_done_wk);
+ } else {
+ /*
+ * By now, all the AC queues are empty. The AGG queues are
+ * empty too. We already got all the Tx responses for all the
+ * packets in the queues. The drain work can have been
+ * triggered. Flush it. This work item takes the mutex, so kill
+ * it before we take it.
+ */
+ flush_work(&mvm->sta_drained_wk);
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * For AP/GO interface, the tear down of the resources allocated to the
+ * interface should be handled as part of the bss_info_changed flow.
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+ goto out_release;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvm->p2p_device_vif = NULL;
+ iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ mvmvif->phy_ctxt = NULL;
+ }
+
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Check if only one additional interface remains after removing
+ * current one. Update power mode on the remaining interface.
+ */
+ if (mvm->vif_count)
+ mvm->vif_count--;
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
+ }
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+out_release:
+ iwl_mvm_mac_ctxt_release(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ *total_flags = 0;
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ /* add quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "failed to update quotas\n");
+ return;
+ }
+ } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ /* remove AP station now that the MAC is unassoc */
+ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
+ if (ret)
+ IWL_ERR(mvm, "failed to remove AP station\n");
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ /* remove quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to update quotas\n");
+ }
+ } else if (changes & BSS_CHANGED_DTIM_PERIOD) {
+ /*
+ * We received a beacon _after_ association so
+ * remove the session protection.
+ */
+ iwl_mvm_remove_time_event(mvm, mvmvif,
+ &mvmvif->time_event_data);
+ } else if (changes & BSS_CHANGED_PS) {
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single
+ * MAC. Avoid power mode update if more than one interface
+ * is active.
+ */
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+ }
+}
+
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Add the mac context */
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Perform the binding */
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove;
+
+ mvmvif->ap_active = true;
+
+ /* Send the bcast station. At this stage the TBTT and DTIM time events
+ * are added and applied to the scheduler */
+ ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+ if (ret)
+ goto out_unbind;
+
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret)
+ goto out_rm_bcast;
+
+ /* Need to update the P2P Device MAC */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+ mutex_unlock(&mvm->mutex);
+ return 0;
+
+out_rm_bcast:
+ iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->ap_active = false;
+
+ /* Need to update the P2P Device MAC */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+ iwl_mvm_update_quotas(mvm, NULL);
+ iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ /* Need to send a new beacon template to the FW */
+ if (changes & BSS_CHANGED_BEACON) {
+ if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
+ }
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ else
+ ret = -EBUSY;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+
+ iwl_mvm_cancel_scan(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* TODO: how do we tell the fw to send frames for a specific TID */
+
+ /*
+ * The fw will send EOSP notification when the last frame will be
+ * transmitted.
+ */
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+}
+
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (atomic_read(&mvmsta->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ /*
+ * The fw updates the STA to be asleep. Tx packets on the Tx
+ * queues to this station will not be transmitted. The fw will
+ * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+ */
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (WARN_ON(mvmsta->sta_id == IWL_INVALID_STATION))
+ break;
+ iwl_mvm_sta_modify_ps_wake(mvm, sta);
+ break;
+ default:
+ break;
+ }
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ /* this would be a mac80211 bug ... but don't crash */
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ /* if a STA is being removed, reuse its ID */
+ flush_work(&mvm->sta_drained_wk);
+
+ mutex_lock(&mvm->mutex);
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = iwl_mvm_add_sta(mvm, vif, sta);
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mvm_update_sta(mvm, vif, sta);
+ if (ret == 0)
+ iwl_mvm_rs_rate_init(mvm, sta,
+ mvmvif->phy_ctxt->channel->band);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ ret = iwl_mvm_rm_sta(mvm, vif, sta);
+ } else {
+ ret = -EIO;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mvm->rts_threshold = value;
+
+ return 0;
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->queue_params[ac] = *params;
+
+ /*
+ * No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+ return 0;
+}
+
+static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
+ 200 + vif->bss_conf.beacon_int);
+ u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
+ 100 + vif->bss_conf.beacon_int);
+
+ if (WARN_ON_ONCE(vif->bss_conf.assoc))
+ return;
+
+ mutex_lock(&mvm->mutex);
+ /* Try really hard to protect the session and hear a beacon */
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (iwlwifi_mod_params.sw_crypto) {
+ IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall-through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /*
+ * Support for TX only, at least for now, so accept
+ * the key and do nothing else. Then mac80211 will
+ * pass it for TX but we don't have to use it for RX.
+ */
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ switch (cmd) {
+ case SET_KEY:
+ IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+ if (ret) {
+ IWL_WARN(mvm, "set key failed\n");
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ }
+
+ break;
+ case DISABLE_KEY:
+ IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+ ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel,
+ int duration)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct cfg80211_chan_def chandef;
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
+ return -EINVAL;
+ }
+
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value,
+ duration);
+
+ mutex_lock(&mvm->mutex);
+
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+ ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
+ &chandef, 1, 1);
+
+ /* Schedule the time events */
+ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration);
+
+ mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+
+ return ret;
+}
+
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_stop_p2p_roc(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+ return 0;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->phy_ctxt = phyctx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+ * The AP binding flow is handled as part of the start_ap flow
+ * (in bss_info_changed).
+ */
+ ret = 0;
+ goto out_unlock;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Setting the quota at this stage is only required for monitor
+ * interfaces. For the other types, the bss_info changed flow
+ * will handle quota settings.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret)
+ goto out_remove_binding;
+ }
+
+ goto out_unlock;
+
+ out_remove_binding:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+ if (ret)
+ mvmvif->phy_ctxt = NULL;
+ return ret;
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mutex_lock(&mvm->mutex);
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ goto out_unlock;
+
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ iwl_mvm_update_quotas(mvm, vif);
+ break;
+ default:
+ break;
+ }
+
+out_unlock:
+ mvmvif->phy_ctxt = NULL;
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ bool set)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ if (!mvm_sta || !mvm_sta->vif) {
+ IWL_ERR(mvm, "Station is not associated to a vif\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
+}
+
+struct ieee80211_ops iwl_mvm_hw_ops = {
+ .tx = iwl_mvm_mac_tx,
+ .ampdu_action = iwl_mvm_mac_ampdu_action,
+ .start = iwl_mvm_mac_start,
+ .restart_complete = iwl_mvm_mac_restart_complete,
+ .stop = iwl_mvm_mac_stop,
+ .add_interface = iwl_mvm_mac_add_interface,
+ .remove_interface = iwl_mvm_mac_remove_interface,
+ .config = iwl_mvm_mac_config,
+ .configure_filter = iwl_mvm_configure_filter,
+ .bss_info_changed = iwl_mvm_bss_info_changed,
+ .hw_scan = iwl_mvm_mac_hw_scan,
+ .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_state = iwl_mvm_mac_sta_state,
+ .sta_notify = iwl_mvm_mac_sta_notify,
+ .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .conf_tx = iwl_mvm_mac_conf_tx,
+ .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .set_key = iwl_mvm_mac_set_key,
+ .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+ .remain_on_channel = iwl_mvm_roc,
+ .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+
+ .add_chanctx = iwl_mvm_add_chanctx,
+ .remove_chanctx = iwl_mvm_remove_chanctx,
+ .change_chanctx = iwl_mvm_change_chanctx,
+ .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+
+ .start_ap = iwl_mvm_start_ap,
+ .stop_ap = iwl_mvm_stop_ap,
+
+ .set_tim = iwl_mvm_set_tim,
+
+#ifdef CONFIG_PM_SLEEP
+ /* look at d3.c */
+ .suspend = iwl_mvm_suspend,
+ .resume = iwl_mvm_resume,
+ .set_wakeup = iwl_mvm_set_wakeup,
+ .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+ .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
new file mode 100644
index 000000000000..4e339ccfa800
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <linux/in6.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "iwl-notif-wait.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-test.h"
+#include "iwl-trans.h"
+#include "sta.h"
+#include "fw-api.h"
+
+#define IWL_INVALID_MAC80211_QUEUE 0xff
+#define IWL_MVM_MAX_ADDRESSES 2
+#define IWL_RSSI_OFFSET 44
+
+enum iwl_mvm_tx_fifo {
+ IWL_MVM_TX_FIFO_BK = 0,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_VO,
+};
+
+/* Placeholder */
+#define IWL_OFFCHANNEL_QUEUE 8
+#define IWL_FIRST_AMPDU_QUEUE 11
+
+extern struct ieee80211_ops iwl_mvm_hw_ops;
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
+ * We will register to mac80211 to have testmode working. The NIC must not
+ * be up'ed after the INIT fw asserted. This is useful to be able to use
+ * proprietary tools over testmode to debug the INIT fw.
+ * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
+ * Save)-2(default), LP(Low Power)-3
+ */
+struct iwl_mvm_mod_params {
+ bool init_dbg;
+ int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+ u16 id;
+ u16 color;
+
+ /*
+ * TODO: This should probably be removed. Currently here only for rate
+ * scaling algorithm
+ */
+ struct ieee80211_channel *channel;
+};
+
+struct iwl_mvm_time_event_data {
+ struct ieee80211_vif *vif;
+ struct list_head list;
+ unsigned long end_jiffies;
+ u32 duration;
+ bool running;
+ u32 uid;
+
+ /*
+ * The access to the 'id' field must be done when the
+ * mvm->time_event_lock is held, as it value is used to indicate
+ * if the te is in the time event list or not (when id == TE_MAX)
+ */
+ u32 id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme
+ * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
+ * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
+ * @IWL_POWER_LEVEL_LP - Low Power
+ */
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+ IWL_POWER_SCHEME_LP
+};
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_active: indicates that ap context is configured, and that the interface
+ * should get quota etc.
+ * @queue_params: QoS params for this MAC
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ * vifs: P2P_DEVICE, GO and AP.
+ * @beacon_skb: the skb used to hold the AP/GO beacon template
+ */
+struct iwl_mvm_vif {
+ u16 id;
+ u16 color;
+ u8 ap_sta_id;
+
+ bool uploaded;
+ bool ap_active;
+
+ enum iwl_tsf_id tsf_id;
+
+ /*
+ * QoS data from mac80211, need to store this here
+ * as mac80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct iwl_mvm_time_event_data time_event_data;
+
+ struct iwl_mvm_int_sta bcast_sta;
+
+ /*
+ * Assigned while mac80211 has the interface in a channel context,
+ * or, for P2P Device, while it exists.
+ */
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+
+#ifdef CONFIG_PM_SLEEP
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
+ int tx_key_idx;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+ int num_target_ipv6_addrs;
+#endif
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *dbgfs_dir;
+ void *dbgfs_data;
+#endif
+};
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ return (void *)vif->drv_priv;
+}
+
+enum iwl_mvm_status {
+ IWL_MVM_STATUS_HW_RFKILL,
+ IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_IN_HW_RESTART,
+};
+
+enum iwl_scan_status {
+ IWL_MVM_SCAN_NONE,
+ IWL_MVM_SCAN_OS,
+};
+
+/**
+ * struct iwl_nvm_section - describes an NVM section in memory.
+ *
+ * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
+ * and saved for later use by the driver. Not all NVM sections are saved
+ * this way, only the needed ones.
+ */
+struct iwl_nvm_section {
+ u16 length;
+ const u8 *data;
+};
+
+struct iwl_mvm {
+ /* for logger access */
+ struct device *dev;
+
+ struct iwl_trans *trans;
+ const struct iwl_fw *fw;
+ const struct iwl_cfg *cfg;
+ struct iwl_phy_db *phy_db;
+ struct ieee80211_hw *hw;
+
+ /* for protecting access to iwl_mvm */
+ struct mutex mutex;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct work_struct async_handlers_wk;
+
+ struct work_struct roc_done_wk;
+
+ unsigned long status;
+
+ enum iwl_ucode_type cur_ucode;
+ bool ucode_loaded;
+ bool init_ucode_run;
+ u32 error_event_table;
+ u32 log_event_table;
+
+ u32 ampdu_ref;
+
+ struct iwl_notif_wait_data notif_wait;
+
+ unsigned long transport_queue_stop;
+ u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+ atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+
+ struct iwl_nvm_data *nvm_data;
+ /* eeprom blob for debugfs/testmode */
+ u8 *eeprom_blob;
+ size_t eeprom_blob_size;
+ /* NVM sections for 7000 family */
+ struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+ /* data related to data path */
+ struct iwl_rx_phy_info last_phy_info;
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+ struct work_struct sta_drained_wk;
+ unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+
+ /* configured by mac80211 */
+ u32 rts_threshold;
+
+ /* Scan status, cmd (pre-allocated) and auxiliary station */
+ enum iwl_scan_status scan_status;
+ struct iwl_scan_cmd *scan_cmd;
+
+ /* Internal station */
+ struct iwl_mvm_int_sta aux_sta;
+
+ u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+ u8 mgmt_last_antenna_idx;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool prevent_power_down_d3;
+#endif
+
+ struct iwl_mvm_phy_ctxt phy_ctxt_roc;
+
+ struct list_head time_event_list;
+ spinlock_t time_event_lock;
+
+ /*
+ * A bitmap indicating the index of the key in use. The firmware
+ * can hold 16 keys at most. Reflect this fact.
+ */
+ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 vif_count;
+
+ struct led_classdev led;
+
+ struct ieee80211_vif *p2p_device_vif;
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
+ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw) \
+ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+};
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+
+/* Utils */
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum ieee80211_band band);
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+ u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd,
+ u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+ u16 len, const void *data,
+ u32 *status);
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+/* Statistics */
+int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/* MVM PHY */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+
+/* MAC (virtual interface) programming */
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/* Quota management */
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif);
+
+/* Scanning */
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+int iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct dentry *dbgfs_dir);
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd);
+#else
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+ struct dentry *dbgfs_dir)
+{
+ return 0;
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ u8 flags, bool init);
+
+/* power managment */
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
+
+#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
new file mode 100644
index 000000000000..20016bcbdeab
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -0,0 +1,311 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-nvm-parse.h"
+
+/* list of NVM sections we are allowed/need to read */
+static const int nvm_to_read[] = {
+ NVM_SECTION_TYPE_HW,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+};
+
+/* used to simplify the shared operations on NCM_ACCESS_CMD versions */
+union iwl_nvm_access_cmd {
+ struct iwl_nvm_access_cmd_ver1 ver1;
+ struct iwl_nvm_access_cmd_ver2 ver2;
+};
+union iwl_nvm_access_resp {
+ struct iwl_nvm_access_resp_ver1 ver1;
+ struct iwl_nvm_access_resp_ver2 ver2;
+};
+
+static inline void iwl_nvm_fill_read_ver1(struct iwl_nvm_access_cmd_ver1 *cmd,
+ u16 offset, u16 length)
+{
+ cmd->offset = cpu_to_le16(offset);
+ cmd->length = cpu_to_le16(length);
+ cmd->cache_refresh = 1;
+}
+
+static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd,
+ u16 offset, u16 length, u16 section)
+{
+ cmd->offset = cpu_to_le16(offset);
+ cmd->length = cpu_to_le16(length);
+ cmd->type = cpu_to_le16(section);
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, u8 *data)
+{
+ union iwl_nvm_access_cmd nvm_access_cmd;
+ union iwl_nvm_access_resp *nvm_resp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ .data = { &nvm_access_cmd, },
+ };
+ int ret, bytes_read, offset_read;
+ u8 *resp_data;
+
+ memset(&nvm_access_cmd, 0, sizeof(nvm_access_cmd));
+
+ /* TODO: not sure family should be the decider, maybe FW version? */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_nvm_fill_read_ver2(&(nvm_access_cmd.ver2),
+ offset, length, section);
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver2);
+ } else {
+ iwl_nvm_fill_read_ver1(&(nvm_access_cmd.ver1),
+ offset, length);
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver1);
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ ret = le16_to_cpu(nvm_resp->ver2.status);
+ bytes_read = le16_to_cpu(nvm_resp->ver2.length);
+ offset_read = le16_to_cpu(nvm_resp->ver2.offset);
+ resp_data = nvm_resp->ver2.data;
+ } else {
+ ret = le16_to_cpu(nvm_resp->ver1.length) <= 0;
+ bytes_read = le16_to_cpu(nvm_resp->ver1.length);
+ offset_read = le16_to_cpu(nvm_resp->ver1.offset);
+ resp_data = nvm_resp->ver1.data;
+ }
+ if (ret) {
+ IWL_ERR(mvm,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->cfg->name);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+ offset_read);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Write data to NVM */
+ memcpy(data + offset, resp_data, bytes_read);
+ ret = bytes_read;
+
+exit:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+ u8 *data)
+{
+ u16 length, offset = 0;
+ int ret;
+ bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
+
+ length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024))
+ - sizeof(union iwl_nvm_access_cmd)
+ - sizeof(struct iwl_rx_packet);
+ /*
+ * if length is greater than EEPROM size, truncate it because uCode
+ * doesn't check it by itself, and exit the loop when reached.
+ */
+ if (old_eeprom && length > mvm->cfg->base_params->eeprom_size)
+ length = mvm->cfg->base_params->eeprom_size;
+ ret = length;
+
+ /* Read the NVM until exhausted (reading less than requested) */
+ while (ret == length) {
+ ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+ if (ret < 0) {
+ IWL_ERR(mvm,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
+ return ret;
+ }
+ offset += ret;
+ if (old_eeprom && offset == mvm->cfg->base_params->eeprom_size)
+ break;
+ }
+
+ IWL_INFO(mvm, "NVM section %d read completed\n", section);
+ return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+ const __le16 *hw, *sw, *calib;
+
+ /* Checking for required sections */
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
+ IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+ return NULL;
+ }
+
+ if (WARN_ON(!mvm->cfg))
+ return NULL;
+
+ hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+ calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib);
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+ int ret, i, section;
+ u8 *nvm_buffer, *temp;
+
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+ if (ret < 0)
+ break;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
+ }
+ kfree(nvm_buffer);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* allocate eeprom */
+ mvm->eeprom_blob_size = mvm->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM size = %zd\n",
+ mvm->eeprom_blob_size);
+ mvm->eeprom_blob = kzalloc(mvm->eeprom_blob_size, GFP_KERNEL);
+ if (!mvm->eeprom_blob)
+ return -ENOMEM;
+
+ ret = iwl_nvm_read_section(mvm, 0, mvm->eeprom_blob);
+ if (ret != mvm->eeprom_blob_size) {
+ IWL_ERR(mvm, "Read partial NVM %d/%zd\n",
+ ret, mvm->eeprom_blob_size);
+ kfree(mvm->eeprom_blob);
+ mvm->eeprom_blob = NULL;
+ return -EINVAL;
+ }
+ }
+
+ ret = 0;
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+ else
+ mvm->nvm_data =
+ iwl_parse_eeprom_data(mvm->trans->dev,
+ mvm->cfg,
+ mvm->eeprom_blob,
+ mvm->eeprom_blob_size);
+
+ if (!mvm->nvm_data) {
+ kfree(mvm->eeprom_blob);
+ mvm->eeprom_blob = NULL;
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
new file mode 100644
index 000000000000..aa59adf87db3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -0,0 +1,682 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw-api-scan.h"
+#include "time-event.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
+
+#define DRV_VERSION IWLWIFI_VERSION
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+ /* rest of fields are 0 by default */
+};
+
+module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
+MODULE_PARM_DESC(init_dbg,
+ "set to true to debug an ASSERT in INIT fw (default: false");
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+ int ret;
+
+ ret = iwl_mvm_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+
+ if (ret) {
+ pr_err("Unable to register MVM op_mode: %d\n", ret);
+ iwl_mvm_rate_control_unregister();
+ }
+
+ return ret;
+}
+module_init(iwl_mvm_init);
+
+static void __exit iwl_mvm_exit(void)
+{
+ iwl_opmode_deregister("iwlmvm");
+ iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ u32 reg_val = 0;
+
+ /*
+ * We can't upload the correct value to the INIT image
+ * as we don't have nvm_data by that time.
+ *
+ * TODO: Figure out what we should do here
+ */
+ if (mvm->nvm_data) {
+ radio_cfg_type = mvm->nvm_data->radio_cfg_type;
+ radio_cfg_step = mvm->nvm_data->radio_cfg_step;
+ radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
+ } else {
+ radio_cfg_type = 0;
+ radio_cfg_step = 0;
+ radio_cfg_dash = 0;
+ }
+
+ /* SKU control */
+ reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+ ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+ /* silicon bits */
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+ iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+ reg_val);
+
+ IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+ radio_cfg_step, radio_cfg_dash);
+
+ /*
+ * W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted), causing ME FW
+ * to lose ownership and not being able to obtain it back.
+ */
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+struct iwl_rx_handlers {
+ u8 cmd_id;
+ bool async;
+ int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+};
+
+#define RX_HANDLER(_cmd_id, _fn, _async) \
+ { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be SYNC - this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
+ * only in this case!), it should be set as ASYNC. In that case, it will be
+ * called from a worker with mvm->mutex held.
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+ RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
+ RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
+ RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+
+ RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
+ RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+
+ RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
+ RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+};
+#undef RX_HANDLER
+#define CMD(x) [x] = #x
+
+static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+ CMD(MVM_ALIVE),
+ CMD(REPLY_ERROR),
+ CMD(INIT_COMPLETE_NOTIF),
+ CMD(PHY_CONTEXT_CMD),
+ CMD(MGMT_MCAST_KEY),
+ CMD(TX_CMD),
+ CMD(TXPATH_FLUSH),
+ CMD(MAC_CONTEXT_CMD),
+ CMD(TIME_EVENT_CMD),
+ CMD(TIME_EVENT_NOTIFICATION),
+ CMD(BINDING_CONTEXT_CMD),
+ CMD(TIME_QUOTA_CMD),
+ CMD(RADIO_VERSION_NOTIFICATION),
+ CMD(SCAN_REQUEST_CMD),
+ CMD(SCAN_ABORT_CMD),
+ CMD(SCAN_START_NOTIFICATION),
+ CMD(SCAN_RESULTS_NOTIFICATION),
+ CMD(SCAN_COMPLETE_NOTIFICATION),
+ CMD(NVM_ACCESS_CMD),
+ CMD(PHY_CONFIGURATION_CMD),
+ CMD(CALIB_RES_NOTIF_PHY_DB),
+ CMD(SET_CALIB_DEFAULT_CMD),
+ CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+ CMD(ADD_STA),
+ CMD(REMOVE_STA),
+ CMD(LQ_CMD),
+ CMD(SCAN_OFFLOAD_CONFIG_CMD),
+ CMD(SCAN_OFFLOAD_REQUEST_CMD),
+ CMD(SCAN_OFFLOAD_ABORT_CMD),
+ CMD(SCAN_OFFLOAD_COMPLETE),
+ CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ CMD(POWER_TABLE_CMD),
+ CMD(WEP_KEY),
+ CMD(REPLY_RX_PHY_CMD),
+ CMD(REPLY_RX_MPDU_CMD),
+ CMD(BEACON_TEMPLATE_CMD),
+ CMD(STATISTICS_NOTIFICATION),
+ CMD(TX_ANT_CONFIGURATION_CMD),
+ CMD(D3_CONFIG_CMD),
+ CMD(PROT_OFFLOAD_CONFIG_CMD),
+ CMD(OFFLOADS_QUERY_CMD),
+ CMD(REMOTE_WAKE_CONFIG_CMD),
+ CMD(WOWLAN_PATTERNS),
+ CMD(WOWLAN_CONFIGURATION),
+ CMD(WOWLAN_TSC_RSC_PARAM),
+ CMD(WOWLAN_TKIP_PARAM),
+ CMD(WOWLAN_KEK_KCK_MATERIAL),
+ CMD(WOWLAN_GET_STATUSES),
+ CMD(WOWLAN_TX_POWER_PER_DB),
+ CMD(NET_DETECT_CONFIG_CMD),
+ CMD(NET_DETECT_PROFILES_QUERY_CMD),
+ CMD(NET_DETECT_PROFILES_CMD),
+ CMD(NET_DETECT_HOTSPOTS_CMD),
+ CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+};
+#undef CMD
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mvm *mvm;
+ struct iwl_trans_config trans_cfg = {};
+ static const u8 no_reclaim_cmds[] = {
+ TX_CMD,
+ };
+ int err, scan_size;
+
+ switch (cfg->device_family) {
+ case IWL_DEVICE_FAMILY_6030:
+ case IWL_DEVICE_FAMILY_6005:
+ case IWL_DEVICE_FAMILY_7000:
+ break;
+ default:
+ IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
+ return NULL;
+ }
+
+ /********************************
+ * 1. Allocating and configuring HW data
+ ********************************/
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mvm),
+ &iwl_mvm_hw_ops);
+ if (!hw)
+ return NULL;
+
+ op_mode = hw->priv;
+ op_mode->ops = &iwl_mvm_ops;
+ op_mode->trans = trans;
+
+ mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ mvm->dev = trans->dev;
+ mvm->trans = trans;
+ mvm->cfg = cfg;
+ mvm->fw = fw;
+ mvm->hw = hw;
+
+ mutex_init(&mvm->mutex);
+ spin_lock_init(&mvm->async_handlers_lock);
+ INIT_LIST_HEAD(&mvm->time_event_list);
+ INIT_LIST_HEAD(&mvm->async_handlers_list);
+ spin_lock_init(&mvm->time_event_lock);
+
+ INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+ INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+ INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+
+ SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ trans_cfg.op_mode = op_mode;
+ trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+ trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+
+ /* TODO: this should really be a TLV */
+ if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ trans_cfg.bc_table_dword = true;
+
+ if (!iwlwifi_mod_params.wd_disable)
+ trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
+ else
+ trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
+
+ trans_cfg.command_names = iwl_mvm_cmd_strings;
+
+ trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+ trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO;
+
+ snprintf(mvm->hw->wiphy->fw_version,
+ sizeof(mvm->hw->wiphy->fw_version),
+ "%s", fw->fw_version);
+
+ /* Configure transport layer */
+ iwl_trans_configure(mvm->trans, &trans_cfg);
+
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
+ /* set up notification wait support */
+ iwl_notification_wait_init(&mvm->notif_wait);
+
+ /* Init phy db */
+ mvm->phy_db = iwl_phy_db_init(trans);
+ if (!mvm->phy_db) {
+ IWL_ERR(mvm, "Cannot init phy_db\n");
+ goto out_free;
+ }
+
+ IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
+ mvm->cfg->name, mvm->trans->hw_rev);
+
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ mutex_unlock(&mvm->mutex);
+ if (err && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
+
+ /* Stop the hw after the ALIVE and NVM has been read */
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ scan_size = sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
+ mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+ if (!mvm->scan_cmd)
+ goto out_free;
+
+ err = iwl_mvm_mac_setup_register(mvm);
+ if (err)
+ goto out_free;
+
+ err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+ if (err)
+ goto out_unregister;
+
+ return op_mode;
+
+ out_unregister:
+ ieee80211_unregister_hw(mvm->hw);
+ out_free:
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ kfree(mvm->eeprom_blob);
+ iwl_trans_stop_hw(trans, true);
+ ieee80211_free_hw(mvm->hw);
+ return NULL;
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int i;
+
+ iwl_mvm_leds_exit(mvm);
+
+ ieee80211_unregister_hw(mvm->hw);
+
+ kfree(mvm->scan_cmd);
+
+ iwl_trans_stop_hw(mvm->trans, true);
+
+ iwl_phy_db_free(mvm->phy_db);
+ mvm->phy_db = NULL;
+
+ kfree(mvm->eeprom_blob);
+ iwl_free_nvm_data(mvm->nvm_data);
+ for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ kfree(mvm->nvm_sections[i].data);
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ struct iwl_async_handler_entry *entry, *tmp;
+ struct list_head local_list;
+
+ INIT_LIST_HEAD(&local_list);
+
+ /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * Sync with Rx path with a lock. Remove all the entries from this list,
+ * add them to a local one (lock free), and then handle them.
+ */
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_splice_init(&mvm->async_handlers_list, &local_list);
+ spin_unlock_bh(&mvm->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ if (entry->fn(mvm, &entry->rxb, NULL))
+ IWL_WARN(mvm,
+ "returned value from ASYNC handlers are ignored\n");
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 i;
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+ const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != pkt->hdr.cmd)
+ continue;
+
+ if (!rx_h->async)
+ return rx_h->fn(mvm, rxb, cmd);
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return 0;
+
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+ entry->fn = rx_h->fn;
+ spin_lock(&mvm->async_handlers_lock);
+ list_add_tail(&entry->list, &mvm->async_handlers_list);
+ spin_unlock(&mvm->async_handlers_lock);
+ schedule_work(&mvm->async_handlers_wk);
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int mq = mvm->queue_to_mac80211[queue];
+
+ if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+ return;
+
+ if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "queue %d (mac80211 %d) already stopped\n",
+ queue, mq);
+ return;
+ }
+
+ set_bit(mq, &mvm->transport_queue_stop);
+ ieee80211_stop_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int mq = mvm->queue_to_mac80211[queue];
+
+ if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+ return;
+
+ if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "queue %d (mac80211 %d) already awake\n",
+ queue, mq);
+ return;
+ }
+
+ clear_bit(mq, &mvm->transport_queue_stop);
+
+ ieee80211_wake_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * If we're restarting already, don't cycle restarts.
+ * If INIT fw asserted, it will likely fail again.
+ * If WoWLAN fw asserted, don't restart either, mac80211
+ * can't recover this since we're already half suspended.
+ */
+ if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+ iwlwifi_mod_params.restart_fw) {
+ /*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted (sched) scan when that was already done
+ * which is not a problem. It is necessary to abort any scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ }
+
+ ieee80211_restart_hw(mvm->hw);
+ }
+}
+
+static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ WARN_ON(1);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+ .start = iwl_op_mode_mvm_start,
+ .stop = iwl_op_mode_mvm_stop,
+ .rx = iwl_mvm_rx_dispatch,
+ .queue_full = iwl_mvm_stop_sw_queue,
+ .queue_not_full = iwl_mvm_wake_sw_queue,
+ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
+ .free_skb = iwl_mvm_free_skb,
+ .nic_error = iwl_mvm_nic_error,
+ .cmd_queue_full = iwl_mvm_cmd_queue_full,
+ .nic_config = iwl_mvm_nic_config,
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 000000000000..b428448f8ddf
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,292 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the the fw values */
+static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return PHY_VHT_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return PHY_VHT_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return PHY_VHT_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return PHY_VHT_CHANNEL_MODE160;
+ default:
+ WARN(1, "Invalid channel width=%u", chandef->width);
+ return PHY_VHT_CHANNEL_MODE20;
+ }
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the the fw values
+ */
+static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->chan->center_freq - chandef->center_freq1) {
+ case -70:
+ return PHY_VHT_CTRL_POS_4_BELOW;
+ case -50:
+ return PHY_VHT_CTRL_POS_3_BELOW;
+ case -30:
+ return PHY_VHT_CTRL_POS_2_BELOW;
+ case -10:
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ case 10:
+ return PHY_VHT_CTRL_POS_1_ABOVE;
+ case 30:
+ return PHY_VHT_CTRL_POS_2_ABOVE;
+ case 50:
+ return PHY_VHT_CTRL_POS_3_ABOVE;
+ case 70:
+ return PHY_VHT_CTRL_POS_4_ABOVE;
+ default:
+ WARN(1, "Invalid channel definition");
+ case 0:
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ }
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd *cmd,
+ u32 action, u32 apply_time)
+{
+ memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = cpu_to_le32(action);
+ cmd->apply_time = cpu_to_le32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ u8 valid_rx_chains, active_cnt, idle_cnt;
+
+ /* Set the channel info data */
+ cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+
+ cmd->ci.channel = chandef->chan->hw_value;
+ cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+
+ /* Set rx the chains */
+
+ /* TODO:
+ * Need to add on chain noise calibration limitations, and
+ * BT coex considerations.
+ */
+ valid_rx_chains = mvm->nvm_data->valid_rx_ant;
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd->rxchain_info = cpu_to_le32(valid_rx_chains <<
+ PHY_RX_CHAIN_VALID_POS);
+ cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+ PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd->txchain_info = cpu_to_le32(mvm->nvm_data->valid_tx_ant);
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic,
+ u32 action, u32 apply_time)
+{
+ struct iwl_phy_context_cmd cmd;
+ int ret;
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ chains_static, chains_dynamic);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ sizeof(struct iwl_phy_context_cmd),
+ &cmd);
+ if (ret)
+ IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+ return ret;
+}
+
+
+struct phy_ctx_used_data {
+ unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
+};
+
+static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *_data)
+{
+ struct phy_ctx_used_data *data = _data;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ __set_bit(phy_ctxt->id, data->used);
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct phy_ctx_used_data data = {
+ .used = { },
+ };
+
+ /*
+ * If this is a regular PHY context (not the ROC one)
+ * skip the ROC PHY context's ID.
+ */
+ if (ctxt != &mvm->phy_ctxt_roc)
+ __set_bit(mvm->phy_ctxt_roc.id, data.used);
+
+ lockdep_assert_held(&mvm->mutex);
+ ctxt->color++;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ ieee80211_iter_chan_contexts_atomic(
+ mvm->hw, iwl_mvm_phy_ctx_used_iter, &data);
+
+ ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX);
+ if (WARN_ONCE(ctxt->id == NUM_PHY_CTX,
+ "Failed to init PHY context - no free ID!\n"))
+ return -EIO;
+ }
+
+ ctxt->channel = chandef->chan;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ ctxt->channel = chandef->chan;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_MODIFY, 0);
+}
+
+/*
+ * Send a command to the FW to remove the given phy context.
+ * Once the command is sent, regardless of success or failure, the context is
+ * marked as invalid
+ */
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ struct iwl_phy_context_cmd cmd;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ sizeof(struct iwl_phy_context_cmd),
+ &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
+ ctxt->id);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
new file mode 100644
index 000000000000..5a92a4978795
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ int dtimper, dtimper_msec;
+ int keep_alive;
+ bool radar_detect = false;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+ if ((!vif->bss_conf.ps) ||
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ dtimper = hw->conf.ps_dtim_period ?: 1;
+
+ /* Check if radar detection is required on current channel */
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ WARN_ON(!chanctx_conf);
+ if (chanctx_conf) {
+ chan = chanctx_conf->def.chan;
+ radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+ }
+ rcu_read_unlock();
+
+ /* Check skip over DTIM conditions */
+ if (!radar_detect && (dtimper <= 10) &&
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SLEEP_OVER_DTIM_MSK);
+ cmd->num_skip_dtim = 2;
+ }
+
+ /* Check that keep alive period is at least 3 * DTIM */
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP) {
+ /* TODO: Also for D3 (device sleep / WoWLAN) */
+ cmd->rx_data_timeout = cpu_to_le32(10);
+ cmd->tx_data_timeout = cpu_to_le32(10);
+ } else {
+ cmd->rx_data_timeout = cpu_to_le32(50);
+ cmd->tx_data_timeout = cpu_to_le32(50);
+ }
+}
+
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+
+ if (!iwlwifi_mod_params.power_save) {
+ IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+ return 0;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ iwl_power_build_cmd(mvm, vif, &cmd);
+
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd.flags));
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd.lprx_rssi_threshold);
+ IWL_DEBUG_POWER(mvm, "DTIMs to skip = %u\n", cmd.num_skip_dtim);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwlwifi_mod_params.power_save) {
+ IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+ return 0;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd.flags));
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ iwl_power_build_cmd(mvm, vif, cmd);
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
new file mode 100644
index 000000000000..925628468146
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_quota_iterator_data {
+ int n_interfaces[MAX_BINDINGS];
+ int colors[MAX_BINDINGS];
+ struct ieee80211_vif *new_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_quota_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 id;
+
+ /*
+ * We'll account for the new interface (if any) below,
+ * skip it here in case we're not called from within
+ * the add_interface callback (otherwise it won't show
+ * up in iteration)
+ */
+ if (vif == data->new_vif)
+ return;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ /* currently, PHY ID == binding ID */
+ id = mvmvif->phy_ctxt->id;
+
+ /* need at least one binding per PHY */
+ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+ if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+ return;
+
+ if (data->colors[id] < 0)
+ data->colors[id] = mvmvif->phy_ctxt->color;
+ else
+ WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->bss_conf.assoc)
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (mvmvif->ap_active)
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (vif->bss_conf.ibss_joined)
+ data->n_interfaces[id]++;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
+{
+ struct iwl_time_quota_cmd cmd;
+ int i, idx, ret, num_active_bindings, quota, quota_rem;
+ struct iwl_mvm_quota_iterator_data data = {
+ .n_interfaces = {},
+ .colors = { -1, -1, -1, -1 },
+ .new_vif = newvif,
+ };
+
+ /* update all upon completion */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_quota_iterator, &data);
+ if (newvif) {
+ data.new_vif = NULL;
+ iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
+ }
+
+ /*
+ * The FW's scheduling session consists of
+ * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_bindings = 0;
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ if (data.n_interfaces[i] > 0)
+ num_active_bindings++;
+ }
+
+ if (!num_active_bindings)
+ goto send_cmd;
+
+ quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
+ quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+
+ for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+ if (data.n_interfaces[i] <= 0)
+ continue;
+
+ cmd.quotas[idx].id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+ cmd.quotas[idx].quota = cpu_to_le32(quota);
+ cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first binding */
+ le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+
+send_cmd:
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
new file mode 100644
index 000000000000..56b636d9ab30
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -0,0 +1,3080 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+#define RS_NAME "iwl-mvm-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY 1
+#define IWL_HT_NUMBER_TRY 3
+
+#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_SISO_##s##M_PLCP, \
+ IWL_RATE_MIMO2_##s##M_PLCP,\
+ IWL_RATE_MIMO3_##s##M_PLCP,\
+ IWL_RATE_##r##M_IEEE, \
+ IWL_RATE_##ip##M_INDEX, \
+ IWL_RATE_##in##M_INDEX, \
+ IWL_RATE_##rp##M_INDEX, \
+ IWL_RATE_##rn##M_INDEX, \
+ IWL_RATE_##pp##M_INDEX, \
+ IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+ /* FIXME:RS: ^^ should be INV (legacy) */
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+ /* also works for HT because bits 7:6 are zero there */
+ return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = rs_extract_rate(rate_n_flags);
+
+ if (idx >= IWL_RATE_MIMO3_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO3_6M_PLCP;
+ else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+ idx += IWL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx += 1;
+ if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+ if (iwl_rates[idx].plcp ==
+ rs_extract_rate(rate_n_flags))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta);
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index);
+#else
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
+ {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+ {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+ {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+ {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
+ {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
+ {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
+ {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
+ {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
+ {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
+ {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+ window->data = 0;
+ window->success_counter = 0;
+ window->success_ratio = IWL_INVALID_VALUE;
+ window->counter = 0;
+ window->average_tpt = IWL_INVALID_VALUE;
+ window->stamp = 0;
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the statistics. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count &&
+ (tl->time_stamp < oldest_time)) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else {
+ return IWL_MAX_TID_COUNT;
+ }
+
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
+ return IWL_MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return IWL_MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ rs_tl_rm_old_stats(tl, curr_time);
+
+ index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[index] = tl->packet_count[index] + 1;
+ tl->total = tl->total + 1;
+
+ if ((index + 1) > tl->queue_count)
+ tl->queue_count = index + 1;
+
+ return tid;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
+ }
+}
+#endif
+
+/*
+ get the traffic load value for tid
+*/
+static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+
+ if (tid >= IWL_MAX_TID_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = rs_tl_get_load(lq_data, tid);
+
+ if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
+ IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else {
+ IWL_DEBUG_HT(mvm,
+ "Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+ }
+ return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
+ struct iwl_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < IWL_MAX_TID_COUNT)
+ rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
+ else
+ IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate. window->data contains the bitmask of successful
+ * packets.
+ */
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+ static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = get_expected_tpt(tbl, scale_index);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history window; anything older isn't really relevant any more.
+ * If we have filled up the sliding window, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (window->counter >= IWL_RATE_MAX_WINDOW) {
+ /* remove earliest */
+ window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+ if (window->data & mask) {
+ window->data &= ~mask;
+ window->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ window->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ window->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ window->success_counter++;
+ window->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (window->counter > 0)
+ window->success_ratio = 128 * (100 * window->success_counter)
+ / window->counter;
+ else
+ window->success_ratio = IWL_INVALID_VALUE;
+
+ fail_count = window->counter - window->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+ window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+ else
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Tag this window as having been updated */
+ window->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+/* FIXME:RS:remove this function and put the flags statically in the table */
+static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = iwl_rates[index].plcp;
+ if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+ } else if (is_Ht(tbl->lq_type)) {
+ if (index > IWL_LAST_OFDM_RATE) {
+ IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+ index = IWL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_siso;
+ else if (is_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_mimo2;
+ else
+ rate_n_flags |= iwl_rates[index].plcp_mimo3;
+ } else {
+ IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40)
+ rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_HT_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IWL_ERR(mvm, "GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct iwl_scale_tbl_info *tbl,
+ int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == IWL_RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IWL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+ tbl->is_ht40 = 1;
+
+ mcs = rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+ if (num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE*/
+ /* MIMO2 */
+ } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
+ if (num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ /* MIMO3 */
+ } else {
+ if (num_of_ant == 3) {
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+ tbl->lq_type = LQ_MIMO3;
+ }
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct iwl_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while ((new_ant_type != tbl->ant_type) &&
+ !rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool rs_use_green(struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+
+ bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green;
+}
+
+/**
+ * rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum iwl_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate_type))
+ return lq_sta->active_mimo2_rate;
+ else
+ return lq_sta->active_mimo3_rate;
+ }
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = IWL_RATE_INVALID;
+ u8 low = IWL_RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = index - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = index + 1;
+ for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = index;
+ while (low != IWL_RATE_INVALID) {
+ low = iwl_rates[low].prev_rs;
+ if (low == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
+ }
+
+ high = index;
+ while (high != IWL_RATE_INVALID) {
+ high = iwl_rates[high].next_rs;
+ if (high == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ u8 scale_index, u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct iwl_mvm *mvm = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+ switch_to_legacy = 1;
+ scale_index = rs_ht_to_legacy[scale_index];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ first_antenna(mvm->nvm_data->valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ }
+
+ rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = (u16)(rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+ low = scale_index;
+ goto out;
+ }
+
+ high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == IWL_RATE_INVALID)
+ low = scale_index;
+
+out:
+ return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool table_type_matches(struct iwl_scale_tbl_info *a,
+ struct iwl_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+ (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_index, mac_index, i;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct iwl_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+
+ IWL_DEBUG_RATE_LIMIT(mvm,
+ "get frame ack response, update rate scale window\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rs_index -= IWL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_index = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ /* Remove # of streams */
+ mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
+ /*
+ * mac80211 HT index is always zero-indexed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (info->band == IEEE80211_BAND_2GHZ)
+ mac_index += IWL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if ((mac_index < 0) ||
+ (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.ant_type != info->status.antenna) ||
+ (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ (rs_index != mac_index)) {
+ IWL_DEBUG_RATE(mvm,
+ "initial rate %d does not match %d (0x%x)\n",
+ mac_index, rs_index, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (table_type_matches(&tbl_type,
+ &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else if (table_type_matches(
+ &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type,
+ tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type,
+ tmp_tbl->is_SGI);
+ IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
+ tbl_type.lq_type, tbl_type.ant_type,
+ tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
+ &rs_index);
+ rs_collect_tx_data(curr_tbl, rs_index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band,
+ &tbl_type, &rs_index);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (table_type_matches(&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta && sta->supp_rates[sband->band])
+ rs_rate_scale_perform(mvm, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
+ struct iwl_lq_sta *lq_sta)
+{
+ IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else if (is_mimo2(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_mimo3_20MHz;
+ else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 rs_get_best_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 index)
+{
+ /* "active" values */
+ struct iwl_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[index].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[index];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = index;
+
+ new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+ while (1) {
+ high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+ ((active_sr > IWL_RATE_DECREASE_TH) &&
+ (active_sr <= IWL_RATE_HIGH_TH) &&
+ (tpt_tbl[rate] <= active_tpt))) ||
+ ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+ (tpt_tbl[rate] > active_tpt))) {
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != IWL_RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != IWL_RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != IWL_RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != IWL_RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+{
+ return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 2)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for MIMO3
+ */
+static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 3)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
+
+ tbl->lq_type = LQ_MIMO3;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+ rate_mask = lq_sta->active_mimo3_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int rs_switch_to_siso(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
+
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm,
+ "can not switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int rs_move_legacy_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ int index)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ int ret;
+ u8 update_search_tbl_counter = 0;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_LEGACY_SWITCH_ANTENNA1:
+ case IWL_LEGACY_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ goto out;
+ }
+ break;
+ case IWL_LEGACY_SWITCH_SISO:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IWL_LEGACY_SWITCH_MIMO2_AB:
+ case IWL_LEGACY_SWITCH_MIMO2_AC:
+ case IWL_LEGACY_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+
+ case IWL_LEGACY_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
+
+ /* Set up search table to try MIMO3 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int rs_move_siso_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ u8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_SISO_SWITCH_ANTENNA1:
+ case IWL_SISO_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_SISO_SWITCH_MIMO2_AB:
+ case IWL_SISO_SWITCH_MIMO2_AC:
+ case IWL_SISO_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ case IWL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IWL_ERR(mvm,
+ "SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ case IWL_SISO_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO2_SWITCH_ANTENNA1:
+ case IWL_MIMO2_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_MIMO2_SWITCH_SISO_A:
+ case IWL_MIMO2_SWITCH_SISO_B:
+ case IWL_MIMO2_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ case IWL_MIMO2_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO3
+ */
+static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ int ret;
+ u8 update_search_tbl_counter = 0;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO3_SWITCH_ANTENNA1:
+ case IWL_MIMO3_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
+
+ if (tx_chains_num <= 3)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl))
+ goto out;
+ break;
+ case IWL_MIMO3_SWITCH_SISO_A:
+ case IWL_MIMO3_SWITCH_SISO_B:
+ case IWL_MIMO3_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_MIMO2_AB:
+ case IWL_MIMO3_SWITCH_MIMO2_AC:
+ case IWL_MIMO3_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
+
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct iwl_mvm *mvm;
+
+ mvm = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ IWL_RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) &&
+ (lq_sta->flush_timer) && (flush_interval_passed))) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay is expired %d %d %d\n",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay in table clear win\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ rs_fill_link_cmd(mvm, lq_sta, rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = IWL_RATE_INVALID;
+ int high = IWL_RATE_INVALID;
+ int index;
+ int i;
+ struct iwl_rate_scale_data *window = NULL;
+ int current_tpt = IWL_INVALID_VALUE;
+ int low_tpt = IWL_INVALID_VALUE;
+ int high_tpt = IWL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_index_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = IWL_MAX_TID_COUNT;
+ struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data;
+
+ IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = rs_tl_add_packet(lq_sta, hdr);
+ if ((tid != IWL_MAX_TID_COUNT) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &sta_priv->tid_data[tid];
+ if (tid_data->state == IWL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else {
+ lq_sta->is_agg = 0;
+ }
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ index = lq_sta->last_txrate_idx;
+
+ IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
+ tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_index_msk = (u16) (rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_scale_index_msk = (u16) (rate_mask &
+ lq_sta->supp_rates);
+
+ } else {
+ rate_scale_index_msk = rate_mask;
+ }
+
+ if (!rate_scale_index_msk)
+ rate_scale_index_msk = rate_mask;
+
+ if (!((1 << index) & rate_scale_index_msk)) {
+ IWL_ERR(mvm, "Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid*/
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history window for current rate */
+ if (!tbl->expected_tpt) {
+ IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < index)) {
+ index = lq_sta->max_rate_idx;
+ update_lq = 1;
+ window = &(tbl->win[index]);
+ goto lq_update;
+ }
+
+ window = &(tbl->win[index]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = window->counter - window->success_counter;
+ if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: still below TH. succ=%d total=%d for index %d\n",
+ window->success_counter, window->counter, index);
+
+ /* Can't calculate this yet; not enough history */
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(mvm,
+ "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (window->average_tpt > lq_sta->last_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = window->average_tpt;
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < high))
+ high = IWL_RATE_INVALID;
+
+ sr = window->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = window->average_tpt;
+ if (low != IWL_RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != IWL_RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != IWL_RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt))
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ } else {
+ scale_action = 0;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low]))))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ IWL_DEBUG_RATE(mvm,
+ "choose rate scale index %d action %d low %d high %d type %d\n",
+ index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+
+ rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search &&
+ !lq_sta->stay_in_tbl && window->counter) {
+ /* Save current throughput to compare with "search" throughput*/
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ rs_move_legacy_other(mvm, lq_sta, sta, index);
+ else if (is_siso(tbl->lq_type))
+ rs_move_siso_to_other(mvm, lq_sta, sta, index);
+ else if (is_mimo2(tbl->lq_type))
+ rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
+ else
+ rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+
+ IWL_DEBUG_RATE(mvm,
+ "Switch current mcs: %X index: %d\n",
+ tbl->current_rate, index);
+ rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ } else {
+ done_search = 1;
+ }
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
+ lq_sta->action_counter > tbl1->max_search) {
+ IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
+ rs_set_stay_in_table(mvm, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ (lq_sta->action_counter >= tbl1->max_search)) {
+ if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ (tid != IWL_MAX_TID_COUNT)) {
+ tid_data = &sta_priv->tid_data[tid];
+ if (tid_data->state == IWL_AGG_OFF) {
+ IWL_DEBUG_RATE(mvm,
+ "try to aggregate tid %d\n",
+ tid);
+ rs_tl_turn_on_agg(mvm, tid,
+ lq_sta, sta);
+ }
+ }
+ rs_set_stay_in_table(mvm, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ lq_sta->last_txrate_idx = index;
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ enum ieee80211_band band)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+
+ if (!sta || !lq_sta)
+ return;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if ((i < 0) || (i >= IWL_RATE_COUNT))
+ i = 0;
+
+ rate = iwl_rates[i].plcp;
+ tbl->ant_type = first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
+ if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_fill_link_cmd(NULL, lq_sta, rate);
+ /* TODO restore station should remember the lq cmd */
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ int rate_idx;
+
+ IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (lq_sta->max_rate_idx != -1))
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if ((lq_sta->max_rate_idx < 0) ||
+ (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ mvm_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, mvm_sta, txrc))
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS index */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO3_6M_PLCP)
+ rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
+ else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+ info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+ ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (rate_idx < IWL_FIRST_OFDM_RATE)))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust index */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+}
+
+static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
+{
+ struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)mvm_rate;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, j;
+ struct ieee80211_hw *hw = mvm->hw;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct iwl_mvm_sta *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+ sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[band];
+
+ lq_sta->lq.sta_id = sta_priv->sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: *** rate scale station global init for station %d ***\n",
+ sta_priv->sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+ lq_sta->is_green = rs_use_green(sta);
+ lq_sta->band = sband->band;
+ /*
+ * active legacy rates as per supported rates bitmap
+ */
+ supp = sta->supp_rates[sband->band];
+ lq_sta->active_legacy_rate = 0;
+ for_each_set_bit(i, &supp, BITS_PER_LONG)
+ lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
+ lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
+ lq_sta->active_mimo3_rate &= ~((u16)0x2);
+ lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
+
+ IWL_DEBUG_RATE(mvm,
+ "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+ lq_sta->active_siso_rate,
+ lq_sta->active_mimo2_rate,
+ lq_sta->active_mimo3_rate);
+
+ /* These values will be overridden later */
+ lq_sta->lq.single_stream_ant_msk =
+ first_antenna(mvm->nvm_data->valid_tx_ant);
+ lq_sta->lq.dual_stream_ant_msk =
+ mvm->nvm_data->valid_tx_ant &
+ ~first_antenna(mvm->nvm_data->valid_tx_ant);
+ if (!lq_sta->lq.dual_stream_ant_msk) {
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(mvm->nvm_data->valid_tx_ant) == 2) {
+ lq_sta->lq.dual_stream_ant_msk =
+ mvm->nvm_data->valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+ lq_sta->drv = mvm;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ rs_initialize_lq(mvm, sta, lq_sta, band);
+}
+
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+ struct iwl_scale_tbl_info tbl_type;
+ int index = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (index 0) if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Interpret new_rate (rate_n_flags) */
+ rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = min(IWL_HT_NUMBER_TRY,
+ LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+ }
+
+ lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (index 0) */
+ lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+ if (num_of_ant(tbl_type.ant_type) == 1)
+ lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
+ else if (num_of_ant(tbl_type.ant_type) == 2)
+ lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
+ /* otherwise we don't modify the existing value */
+
+ index++;
+ repeat_rate--;
+ if (mvm)
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (index < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (mvm &&
+ rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index] =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ index++;
+ }
+
+ rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->mimo_delim = index;
+
+ /* Get next rate */
+ new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (mvm &&
+ rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+ index++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void rs_free(void *mvm_rate)
+{
+ return;
+}
+
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
+ void *mvm_sta)
+{
+ struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+ IWL_DEBUG_RATE(mvm, "enter\n");
+ IWL_DEBUG_RATE(mvm, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{
+ struct iwl_mvm *mvm;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ mvm = lq_sta->drv;
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+ >> RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IWL_ERR(mvm,
+ "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ }
+ } else {
+ IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ }
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm *mvm;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+
+
+ mvm = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ rs_program_fix_rate(mvm, lq_sta);
+
+ return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int index = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm *mvm;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ mvm = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+ lq_sta->dbg_fixed_rate);
+ desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ (mvm->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (mvm->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (mvm->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc += sprintf(buff+desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" :
+ ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+ desc += sprintf(buff+desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc += sprintf(buff+desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc += sprintf(buff+desc,
+ "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.flags,
+ lq_sta->lq.mimo_delim,
+ lq_sta->lq.single_stream_ant_msk,
+ lq_sta->lq.dual_stream_ant_msk);
+
+ desc += sprintf(buff+desc,
+ "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_time_limit),
+ lq_sta->lq.agg_disable_start_th,
+ lq_sta->lq.agg_frame_cnt_limit);
+
+ desc += sprintf(buff+desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.initial_rate_index[0],
+ lq_sta->lq.initial_rate_index[1],
+ lq_sta->lq.initial_rate_index[2],
+ lq_sta->lq.initial_rate_index[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ index = iwl_hwrate_to_plcp_idx(
+ le32_to_cpu(lq_sta->lq.rs_table[i]));
+ if (is_legacy(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+ iwl_rate_mcs[index].mbps);
+ } else {
+ desc += sprintf(buff+desc,
+ " rate[%d] 0x%X %smbps (%s)\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+ iwl_rate_mcs[index].mbps,
+ iwl_rate_mcs[index].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = rs_sta_dbgfs_scale_table_write,
+ .read = rs_sta_dbgfs_scale_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc += sprintf(buff+desc,
+ "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+ "rate=0x%X\n",
+ lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < IWL_RATE_COUNT; j++) {
+ desc += sprintf(buff+desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = rs_sta_dbgfs_stats_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ char buff[120];
+ int desc = 0;
+
+ if (is_Ht(tbl->lq_type))
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = rs_sta_dbgfs_rate_scale_data_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+{
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+}
+
+static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+{
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *mvm_sta)
+{
+}
+static struct rate_control_ops rs_mvm_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = rs_tx_status,
+ .get_rate = rs_get_rate,
+ .rate_init = rs_rate_init_stub,
+ .alloc = rs_alloc,
+ .free = rs_free,
+ .alloc_sta = rs_alloc_sta,
+ .free_sta = rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = rs_add_debugfs,
+ .remove_sta_debugfs = rs_remove_debugfs,
+#endif
+};
+
+int iwl_mvm_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_mvm_ops);
+}
+
+void iwl_mvm_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_mvm_ops);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
new file mode 100644
index 000000000000..219c6857cc0f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -0,0 +1,393 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __rs_h__
+#define __rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "fw-api.h"
+#include "iwl-trans.h"
+
+struct iwl_rs_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+#define IWL_RATE_60M_PLCP 3
+
+enum {
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
+#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
+#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
+#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
+#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
+#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
+#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
+#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
+#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
+#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
+#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
+#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
+
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ IWL_RATE_SISO_6M_PLCP = 0,
+ IWL_RATE_SISO_12M_PLCP = 1,
+ IWL_RATE_SISO_18M_PLCP = 2,
+ IWL_RATE_SISO_24M_PLCP = 3,
+ IWL_RATE_SISO_36M_PLCP = 4,
+ IWL_RATE_SISO_48M_PLCP = 5,
+ IWL_RATE_SISO_54M_PLCP = 6,
+ IWL_RATE_SISO_60M_PLCP = 7,
+ IWL_RATE_MIMO2_6M_PLCP = 0x8,
+ IWL_RATE_MIMO2_12M_PLCP = 0x9,
+ IWL_RATE_MIMO2_18M_PLCP = 0xa,
+ IWL_RATE_MIMO2_24M_PLCP = 0xb,
+ IWL_RATE_MIMO2_36M_PLCP = 0xc,
+ IWL_RATE_MIMO2_48M_PLCP = 0xd,
+ IWL_RATE_MIMO2_54M_PLCP = 0xe,
+ IWL_RATE_MIMO2_60M_PLCP = 0xf,
+ IWL_RATE_MIMO3_6M_PLCP = 0x10,
+ IWL_RATE_MIMO3_12M_PLCP = 0x11,
+ IWL_RATE_MIMO3_18M_PLCP = 0x12,
+ IWL_RATE_MIMO3_24M_PLCP = 0x13,
+ IWL_RATE_MIMO3_36M_PLCP = 0x14,
+ IWL_RATE_MIMO3_48M_PLCP = 0x15,
+ IWL_RATE_MIMO3_54M_PLCP = 0x16,
+ IWL_RATE_MIMO3_60M_PLCP = 0x17,
+ IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ IWL_RATE_6M_IEEE = 12,
+ IWL_RATE_9M_IEEE = 18,
+ IWL_RATE_12M_IEEE = 24,
+ IWL_RATE_18M_IEEE = 36,
+ IWL_RATE_24M_IEEE = 48,
+ IWL_RATE_36M_IEEE = 72,
+ IWL_RATE_48M_IEEE = 96,
+ IWL_RATE_54M_IEEE = 108,
+ IWL_RATE_60M_IEEE = 120,
+ IWL_RATE_1M_IEEE = 2,
+ IWL_RATE_2M_IEEE = 4,
+ IWL_RATE_5M_IEEE = 11,
+ IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE -1
+
+#define IWL_MIN_RSSI_VAL -100
+#define IWL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT 160
+#define IWL_LEGACY_SUCCESS_LIMIT 480
+#define IWL_LEGACY_TABLE_COUNT 160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_NONE_LEGACY_TABLE_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO 12800 /* 100% */
+#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
+#define IWL_RATE_HIGH_TH 10880 /* 85% */
+#define IWL_RATE_INCREASE_TH 6400 /* 50% */
+#define IWL_RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1 0
+#define IWL_LEGACY_SWITCH_ANTENNA2 1
+#define IWL_LEGACY_SWITCH_SISO 2
+#define IWL_LEGACY_SWITCH_MIMO2_AB 3
+#define IWL_LEGACY_SWITCH_MIMO2_AC 4
+#define IWL_LEGACY_SWITCH_MIMO2_BC 5
+#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1 0
+#define IWL_SISO_SWITCH_ANTENNA2 1
+#define IWL_SISO_SWITCH_MIMO2_AB 2
+#define IWL_SISO_SWITCH_MIMO2_AC 3
+#define IWL_SISO_SWITCH_MIMO2_BC 4
+#define IWL_SISO_SWITCH_GI 5
+#define IWL_SISO_SWITCH_MIMO3_ABC 6
+
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1 0
+#define IWL_MIMO2_SWITCH_ANTENNA2 1
+#define IWL_MIMO2_SWITCH_SISO_A 2
+#define IWL_MIMO2_SWITCH_SISO_B 3
+#define IWL_MIMO2_SWITCH_SISO_C 4
+#define IWL_MIMO2_SWITCH_GI 5
+#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
+
+
+/* possible actions when in mimo3 mode */
+#define IWL_MIMO3_SWITCH_ANTENNA1 0
+#define IWL_MIMO3_SWITCH_ANTENNA2 1
+#define IWL_MIMO3_SWITCH_SISO_A 2
+#define IWL_MIMO3_SWITCH_SISO_B 3
+#define IWL_MIMO3_SWITCH_SISO_C 4
+#define IWL_MIMO3_SWITCH_MIMO2_AB 5
+#define IWL_MIMO3_SWITCH_MIMO2_AC 6
+#define IWL_MIMO3_SWITCH_MIMO2_BC 7
+#define IWL_MIMO3_SWITCH_GI 8
+
+
+#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
+
+/*FIXME:RS:add possible actions for MIMO3*/
+
+#define IWL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD 0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+enum iwl_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MIMO3,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
+#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+ enum iwl_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+ unsigned long time_stamp; /* age of the oldest statistics */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+ u8 active_tbl; /* index of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ u16 active_mimo3_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct iwl_lq_cmd lq;
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct iwl_mvm *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+ /* BT traffic this sta was last updated in */
+ u8 last_bt_traffic;
+};
+
+static inline u8 num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_band band);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl_mvm_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl_mvm_rate_control_unregister(void);
+
+#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
new file mode 100644
index 000000000000..3f40ab05bbd8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -0,0 +1,356 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include "iwl-trans.h"
+
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+ mvm->ampdu_ref++;
+ return 0;
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr, u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ unsigned int hdrlen, fraglen;
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
+ * are more efficient.
+ */
+ hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+
+ memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+ fraglen = len - hdrlen;
+
+ if (fraglen) {
+ int offset = (void *)hdr + hdrlen -
+ rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx_ni(mvm->hw, skb);
+}
+
+/*
+ * iwl_mvm_calc_rssi - calculate the rssi in dBm
+ * @phy_info: the phy information for the coming packet
+ */
+static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info)
+{
+ u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
+ u32 val;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the Digital Signal Processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's Automatic Gain Control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
+ rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
+ rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
+ rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
+
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
+ agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc_db - IWL_RSSI_OFFSET;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats,
+ u32 rx_pkt_status)
+{
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_NO_ENC)
+ return 0;
+
+ /* packet was encrypted with unknown alg */
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+ return 0;
+
+ switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+ case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+ /* alg is CCM: check MIC only */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+ /* fall through if TTAK OK */
+
+ case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
+ default:
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+ }
+
+ return 0;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status rx_status = {};
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_info *phy_info;
+ struct iwl_rx_mpdu_res_start *rx_res;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+ u32 rx_pkt_status;
+
+ phy_info = &mvm->last_phy_info;
+ rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+ len = le16_to_cpu(rx_res->byte_count);
+ rx_pkt_status = le32_to_cpup((__le32 *)
+ (pkt->data + sizeof(*rx_res) + len));
+
+ memset(&rx_status, 0, sizeof(rx_status));
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ return 0;
+ }
+
+ if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
+ phy_info->cfg_phy_cnt);
+ return 0;
+ }
+
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+ return 0;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+ rx_status.band =
+ (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+ rx_status.band);
+ /*
+ * TSF as indicated by the fw is at INA time, but mac80211 expects the
+ * TSF at the beginning of the MPDU.
+ */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ /*
+ * We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them
+ */
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = mvm->ampdu_ref;
+ }
+
+ /* Set up the HT phy flags */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status.flag |= RX_FLAG_40MHZ;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status.flag |= RX_FLAG_80MHZ;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status.flag |= RX_FLAG_160MHZ;
+ break;
+ }
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ rx_status.flag |= RX_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rx_status.flag |= RX_FLAG_HT;
+ rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rx_status.vht_nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status.flag |= RX_FLAG_VHT;
+ } else {
+ rx_status.rate_idx =
+ iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status.band);
+ }
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
+ rxb, &rx_status);
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
new file mode 100644
index 000000000000..9b21b92aa8d1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -0,0 +1,442 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+
+#define IWL_PLCP_QUIET_THRESH 1
+#define IWL_ACTIVE_QUIET_TIME 10
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+ u16 rx_chain;
+ u8 rx_ant = mvm->nvm_data->valid_rx_ant;
+
+ rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+ return cpu_to_le16(rx_chain);
+}
+
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.assoc)
+ return cpu_to_le32(200 * 1024);
+ else
+ return 0;
+}
+
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.assoc)
+ return cpu_to_le32(vif->bss_conf.beacon_int);
+ else
+ return 0;
+}
+
+static inline __le32
+iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
+{
+ if (req->channels[0]->band == IEEE80211_BAND_2GHZ)
+ return cpu_to_le32(PHY_BAND_24);
+ else
+ return cpu_to_le32(PHY_BAND_5);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+ bool no_cck)
+{
+ u32 tx_ant;
+
+ mvm->scan_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->scan_last_antenna_idx);
+ tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ if (band == IEEE80211_BAND_2GHZ && !no_cck)
+ return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+ tx_ant);
+ else
+ return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+/*
+ * We insert the SSIDs in an inverted order, because the FW will
+ * invert it back. The most prioritized SSID, which is first in the
+ * request list, is not copied here, but inserted directly to the probe
+ * request.
+ */
+static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
+ struct cfg80211_scan_request *req)
+{
+ int fw_idx, req_idx;
+
+ fw_idx = 0;
+ for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+ cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
+ cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
+ memcpy(cmd->direct_scan[fw_idx].ssid,
+ req->ssids[req_idx].ssid,
+ req->ssids[req_idx].ssid_len);
+ }
+}
+
+/*
+ * If req->n_ssids > 0, it means we should do an active scan.
+ * In case of active scan w/o directed scan, we receive a zero-length SSID
+ * just to notify that this scan is active and not passive.
+ * In order to notify the FW of the number of SSIDs we wish to scan (including
+ * the zero-length one), we need to set the corresponding bits in chan->type,
+ * one for each SSID, and set the active bit (first).
+ */
+static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+{
+ if (band == IEEE80211_BAND_2GHZ)
+ return 30 + 3 * (n_ssids + 1);
+ return 20 + 2 * (n_ssids + 1);
+}
+
+static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+{
+ return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
+}
+
+static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
+ struct cfg80211_scan_request *req)
+{
+ u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
+ u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
+ req->n_ssids);
+ struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
+ (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
+ int i;
+ __le32 chan_type_value;
+
+ if (req->n_ssids > 0)
+ chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
+ else
+ chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
+
+ for (i = 0; i < cmd->channel_count; i++) {
+ chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+ if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ chan->type = chan_type_value;
+ chan->active_dwell = cpu_to_le16(active_dwell);
+ chan->passive_dwell = cpu_to_le16(passive_dwell);
+ chan->iteration_count = cpu_to_le16(1);
+ chan++;
+ }
+}
+
+/*
+ * Fill in probe request with the following parameters:
+ * TA is our vif HW address, which mac80211 ensures we have.
+ * Packet is broadcasted, so this is both SA and DA.
+ * The probe request IE is made out of two: first comes the most prioritized
+ * SSID if a directed scan is requested. Second comes whatever extra
+ * information was given to us as the scan request IE.
+ */
+static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+ int n_ssids, const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ eth_broadcast_addr(frame->bssid);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* for passive scans, no need to fill anything */
+ if (n_ssids == 0)
+ return (u16)len;
+
+ /* points to the payload of the request */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our SSID IE */
+ left -= ssid_len + 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = ssid_len;
+ if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+ }
+
+ len += ssid_len + 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ie && ie_len) {
+ memcpy(pos, ie, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQUEST_CMD,
+ .len = { 0, },
+ .data = { mvm->scan_cmd, },
+ .flags = CMD_SYNC,
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+ int ret;
+ u32 status;
+ int ssid_len = 0;
+ u8 *ssid = NULL;
+
+ lockdep_assert_held(&mvm->mutex);
+ BUG_ON(mvm->scan_cmd == NULL);
+
+ IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
+ mvm->scan_status = IWL_MVM_SCAN_OS;
+ memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
+
+ cmd->channel_count = (u8)req->n_channels;
+ cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+ cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+ cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
+ cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
+ cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
+ else
+ cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
+
+ cmd->repeats = cpu_to_le32(1);
+
+ /*
+ * If the user asked for passive scan, don't change to active scan if
+ * you see any activity on the channel - remain passive.
+ */
+ if (req->n_ssids > 0) {
+ cmd->passive2active = cpu_to_le16(1);
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ } else {
+ cmd->passive2active = 0;
+ }
+
+ iwl_mvm_scan_fill_ssids(cmd, req);
+
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
+ cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->tx_cmd.rate_n_flags =
+ iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
+ req->no_cck);
+
+ cmd->tx_cmd.len =
+ cpu_to_le16(iwl_mvm_fill_probe_req(
+ (struct ieee80211_mgmt *)cmd->data,
+ vif->addr,
+ req->n_ssids, ssid, ssid_len,
+ req->ie, req->ie_len,
+ mvm->fw->ucode_capa.max_probe_length));
+
+ iwl_mvm_scan_fill_channels(cmd, req);
+
+ cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
+ le16_to_cpu(cmd->tx_cmd.len) +
+ (cmd->channel_count * sizeof(struct iwl_scan_channel)));
+ hcmd.len[0] = le16_to_cpu(cmd->len);
+
+ status = SCAN_RESPONSE_OK;
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
+ if (!ret && status == SCAN_RESPONSE_OK) {
+ IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
+ status, ret);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ret = -EIO;
+ }
+ return ret;
+}
+
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_cmd_response *resp = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
+ le32_to_cpu(resp->status));
+ return 0;
+}
+
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+
+ return 0;
+}
+
+static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_scan_complete_notif *notif;
+ u32 *resp;
+
+ switch (pkt->hdr.cmd) {
+ case SCAN_ABORT_CMD:
+ resp = (void *)pkt->data;
+ if (*resp == CAN_ABORT_STATUS) {
+ IWL_DEBUG_SCAN(mvm,
+ "Scan can be aborted, wait until completion\n");
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
+ *resp);
+ return true;
+
+ case SCAN_COMPLETE_NOTIFICATION:
+ notif = (void *)pkt->data;
+ IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
+ notif->status);
+ return true;
+
+ default:
+ WARN_ON(1);
+ return false;
+ };
+}
+
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait wait_scan_abort;
+ static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
+ SCAN_COMPLETE_NOTIFICATION };
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
+ scan_abort_notif,
+ ARRAY_SIZE(scan_abort_notif),
+ iwl_mvm_scan_abort_notif, NULL);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+ goto out_remove_notif;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
+ if (ret)
+ IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
+
+ return;
+
+out_remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
new file mode 100644
index 000000000000..861a7f9f8e7f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -0,0 +1,1241 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "sta.h"
+
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+{
+ int sta_id;
+
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex)))
+ return sta_id;
+ return IWL_MVM_STATION_COUNT;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd add_sta_cmd;
+ int ret;
+ u32 status;
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ add_sta_cmd.sta_id = mvm_sta->sta_id;
+ add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ if (!update) {
+ add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+ }
+ add_sta_cmd.add_modify = update ? 1 : 0;
+
+ /* STA_FLG_FAT_EN_MSK ? */
+ /* STA_FLG_MIMO_EN_MSK ? */
+
+ if (sta->ht_cap.ht_supported) {
+ add_sta_cmd.station_flags_msk |=
+ cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENS_MSK);
+
+ mpdu_dens = sta->ht_cap.ampdu_density;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ agg_size = sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ agg_size >>=
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ } else if (sta->ht_cap.ht_supported) {
+ agg_size = sta->ht_cap.ampdu_factor;
+ }
+
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "ADD_STA failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ int i, ret, sta_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ sta_id = iwl_mvm_find_free_sta_id(mvm);
+ else
+ sta_id = mvm_sta->sta_id;
+
+ if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+
+ spin_lock_init(&mvm_sta->lock);
+
+ mvm_sta->sta_id = sta_id;
+ mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color);
+ mvm_sta->vif = vif;
+ mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /* HW restart, don't assume the memory has been zeroed */
+ atomic_set(&mvm_sta->pending_frames, 0);
+ mvm_sta->tid_disable_agg = 0;
+ mvm_sta->tfd_queue_msk = 0;
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
+
+ /* for HW restart - need to reset the seq_number etc... */
+ memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+ if (ret)
+ return ret;
+
+ /* The first station added is the AP, the others are TDLS STAs */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ mvmvif->ap_sta_id = sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+ return 0;
+}
+
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+ mvmsta->sta_id);
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+ mvmsta->sta_id);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+ .sta_id = sta_id,
+ };
+ int ret;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* Note: internal stations are marked as error values */
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+ sizeof(rm_sta_cmd), &rm_sta_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_sta_drained_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
+ u8 sta_id;
+
+ /*
+ * The mutex is needed because of the SYNC cmd, but not only: if the
+ * work would run concurrently with iwl_mvm_rm_sta, it would run before
+ * iwl_mvm_rm_sta sets the station as busy, and exit. Then
+ * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
+ * that later.
+ */
+ mutex_lock(&mvm->mutex);
+
+ for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
+ int ret;
+ struct ieee80211_sta *sta =
+ rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This station is in use */
+ if (!IS_ERR(sta))
+ continue;
+
+ if (PTR_ERR(sta) == -EINVAL) {
+ IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
+ sta_id);
+ continue;
+ }
+
+ if (!sta) {
+ IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
+ sta_id);
+ continue;
+ }
+
+ WARN_ON(PTR_ERR(sta) != -EBUSY);
+ /* This station was removed and we waited until it got drained,
+ * we can now proceed and remove it.
+ */
+ ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Couldn't remove sta %d after it was drained\n",
+ sta_id);
+ continue;
+ }
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ clear_bit(sta_id, mvm->sta_drained);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ /*
+ * Put a non-NULL since the fw station isn't removed.
+ * It will be removed after the MAC will be set as
+ * unassoc.
+ */
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-EINVAL));
+
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+
+ /* if we are associated - we can't remove the AP STA now */
+ if (vif->bss_conf.assoc)
+ return ret;
+
+ /* unassoc - go ahead - remove the AP STA now */
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ }
+
+ /*
+ * There are frames pending on the AC queues for this station.
+ * We need to wait until all the frames are drained...
+ */
+ if (atomic_read(&mvm_sta->pending_frames)) {
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-EBUSY));
+ } else {
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id)
+{
+ int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+ u32 qmask)
+{
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+ }
+
+ sta->tfd_queue_msk = qmask;
+
+ /* put a non-NULL value so iterating over the stations won't stop */
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+ return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+ sta->sta_id = IWL_MVM_STATION_COUNT;
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr,
+ u16 mac_id, u16 color)
+{
+ struct iwl_mvm_add_sta_cmd cmd;
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+ cmd.sta_id = sta->sta_id;
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+
+ if (addr)
+ memcpy(cmd.addr, addr, ETH_ALEN);
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+ return 0;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+ status);
+ break;
+ }
+ return ret;
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Add the aux station, but without any queues */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+ MAC_INDEX_AUX, 0);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+ return ret;
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+
+ return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *bsta)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+ return ret;
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ u32 qmask;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+ return ret;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ if (ret)
+ return ret;
+
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+ return ret;
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
+ STA_MODIFY_REMOVE_BA_TID;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ break;
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ ret = -ENOSPC;
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start) {
+ mvm_sta->tfd_queue_msk |= BIT(queue);
+ mvm_sta->tid_disable_agg &= ~BIT(tid);
+ } else {
+ mvm_sta->tfd_queue_msk &= ~BIT(queue);
+ mvm_sta->tid_disable_agg |= BIT(tid);
+ }
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data;
+ int txq_id;
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+ mvmsta->tid_data[tid].state);
+ return -ENXIO;
+ }
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
+ txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+ if (mvm->queue_to_mac80211[txq_id] ==
+ IWL_INVALID_MAC80211_QUEUE)
+ break;
+
+ if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ return -EIO;
+ }
+
+ /* the new tx queue is still connected to the same mac80211 queue */
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+
+ spin_lock_bh(&mvmsta->lock);
+ tid_data = &mvmsta->tid_data[tid];
+ tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->txq_id = txq_id;
+ *ssn = tid_data->ssn;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->ssn,
+ tid_data->next_reclaimed);
+
+ if (tid_data->ssn == tid_data->next_reclaimed) {
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ int queue, fifo, ret;
+ u16 ssn;
+
+ buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
+
+ spin_lock_bh(&mvmsta->lock);
+ ssn = tid_data->ssn;
+ queue = tid_data->txq_id;
+ tid_data->state = IWL_AGG_ON;
+ tid_data->ssn = 0xffff;
+ spin_unlock_bh(&mvmsta->lock);
+
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+
+ iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid,
+ buf_size, ssn);
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ mvmsta->max_agg_bufsize =
+ min(mvmsta->max_agg_bufsize, buf_size);
+ mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+ if (mvm->cfg->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ /*
+ * TODO: remove the TLC_RTS flag when we tear down the last
+ * AGG session (agg_tids_count in DVM)
+ */
+ }
+
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ int err;
+
+ spin_lock_bh(&mvmsta->lock);
+
+ txq_id = tid_data->txq_id;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->state);
+
+ switch (tid_data->state) {
+ case IWL_AGG_ON:
+ tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "ssn = %d, next_recl = %d\n",
+ tid_data->ssn, tid_data->next_reclaimed);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->ssn != tid_data->next_reclaimed) {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
+ err = 0;
+ break;
+ }
+
+ tid_data->ssn = 0xffff;
+ iwl_trans_txq_disable(mvm->trans, txq_id);
+ /* fall through */
+ case IWL_AGG_STARTING:
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * The agg session has been stopped before it was set up. This
+ * can happen when the AddBA timer times out for example.
+ */
+
+ /* No barriers since we are under mutex */
+ lockdep_assert_held(&mvm->mutex);
+ mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ tid_data->state = IWL_AGG_OFF;
+ err = 0;
+ break;
+ default:
+ IWL_ERR(mvm,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ mvmsta->sta_id, tid, tid_data->state);
+ IWL_ERR(mvm,
+ "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+ err = -EINVAL;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return err;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+
+ if (i == STA_KEY_MAX_NUM)
+ return STA_KEY_IDX_INVALID;
+
+ __set_bit(i, mvm->fw_key_table);
+
+ return i;
+}
+
+static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+ if (sta) {
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ return mvm_sta->sta_id;
+ }
+
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use AP's station ID.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
+ return mvmvif->ap_sta_id;
+
+ return IWL_INVALID_STATION;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+ u32 cmd_flags)
+{
+ __le16 key_flags;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret, status;
+ u16 keyidx;
+ int i;
+
+ keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK;
+ key_flags = cpu_to_le16(keyidx);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+ cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+ memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.key.key_flags = key_flags;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.sta_id = sta_id;
+
+ status = ADD_STA_SUCCESS;
+ if (cmd_flags == CMD_SYNC)
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ else
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, bool remove_key)
+{
+ struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+ /* verify the key details match the required command's expectations */
+ if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
+ (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+ return -EINVAL;
+
+ igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+ igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+ if (remove_key) {
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+ } else {
+ struct ieee80211_key_seq seq;
+ const u8 *pn;
+
+ memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+ ieee80211_aes_cmac_calculate_k1_k2(keyconf,
+ igtk_cmd.K1, igtk_cmd.K2);
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ pn = seq.aes_cmac.pn;
+ igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+ ((u64) pn[4] << 8) |
+ ((u64) pn[3] << 16) |
+ ((u64) pn[2] << 24) |
+ ((u64) pn[1] << 32) |
+ ((u64) pn[0] << 40));
+ }
+
+ IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+ remove_key ? "removing" : "installing",
+ igtk_cmd.sta_id);
+
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+ sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+ if (sta)
+ return sta->addr;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ return sta->addr;
+ }
+
+
+ return NULL;
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ bool have_key_offset)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ int ret;
+ u8 *addr, sta_id;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station id from the mvm local station table */
+ sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(mvm, "Failed to find station id\n");
+ return -EINVAL;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+ goto end;
+ }
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station table.
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ return -EINVAL;
+
+ if (!have_key_offset) {
+ /*
+ * The D3 firmware hardcodes the PTK offset to 0, so we have to
+ * configure it there. As a result, this workaround exists to
+ * let the caller set the key offset (hw_key_idx), see d3.c.
+ */
+ keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return -ENOSPC;
+ }
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ seq.tkip.iv32, p1k, CMD_SYNC);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ 0, NULL, CMD_SYNC);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+
+end:
+ IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta->addr, ret);
+ return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ __le16 key_flags;
+ int ret, status;
+ u8 sta_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station id from the mvm local station table */
+ sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+ IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+ ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ if (!ret) {
+ IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+ keyconf->hw_key_idx);
+ return -ENOENT;
+ }
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+ return 0;
+ }
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station table,
+ * for example when a GTK is removed (where the sta_id will then be
+ * the AP ID, and no station was passed by mac80211.)
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ return -EINVAL;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.key.key_flags = key_flags;
+ cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.sta_id = sta_id;
+
+ cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.add_modify = STA_MODE_MODIFY;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return;
+
+ rcu_read_lock();
+
+ if (!sta) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ mvm_sta = (void *)sta->drv_priv;
+ iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ iv32, phase1key, CMD_ASYNC);
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int ret;
+
+ /*
+ * Same modify mask for sleep_tx_count and sleep_state_flags but this
+ * should be fine since if we set the STA as "awake", then
+ * sleep_tx_count is not relevant.
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt)
+{
+ u16 sleep_state_flags =
+ (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
+ STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ /*
+ * Same modify mask for sleep_tx_count and sleep_state_flags so
+ * we must set the sleep_state_flags too.
+ */
+ .sleep_state_flags = cpu_to_le16(sleep_state_flags),
+ };
+ int ret;
+
+ /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
new file mode 100644
index 000000000000..896f88ac8145
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -0,0 +1,374 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "rs.h"
+
+struct iwl_mvm;
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates. In
+ * that case, we need to drain all the frames for that client from the AC queues
+ * that are shared with the other clients. Only then, we can remove the STA in
+ * the fw. In order to do so, we track the non-AMPDU packets for each station.
+ * If mac80211 removes a STA and if it still has non-AMPDU packets pending in
+ * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
+ * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
+ * (we know about it with its Tx response), we remove the station in fw and set
+ * it as %NULL in %fw_id_to_mac_id: this is the purpose of
+ * %iwl_mvm_sta_drained_wk.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All the
+ * non-aggregation frames to that station will be dropped by the fw
+ * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know how many frames we have in these queues so that it can
+ * properly handle trigger frames.
+ * When the a trigger frame is received, mac80211 tells the driver to send
+ * frames from the AMPDU queues or AC queue depending on which queue are
+ * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
+ * all the knowledege since all the non-agg frames are buffered / filtered, and
+ * the driver tells mac80211 about agg frames). The driver needs to tell the fw
+ * to let frames out even if the station is asleep. This is done by
+ * %iwl_mvm_sta_modify_sleep_tx_count.
+ * When we receive a frame from that station with PM bit unset, the
+ * driver needs to let the fw know that this station isn't alseep any more.
+ * This is done by %iwl_mvm_sta_modify_ps_wake.
+ *
+ * TODO - EOSP handling
+ */
+
+/**
+ * enum iwl_mvm_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_STARTING,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_mvm_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ /* The rest is Tx AGG related */
+ u32 rate_n_flags;
+ enum iwl_mvm_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ * tid.
+ * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @pending_frames: number of frames for this STA on the shared Tx queues.
+ * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+ u32 sta_id;
+ u32 tfd_queue_msk;
+ u32 mac_id_n_color;
+ u16 tid_disable_agg;
+ u8 max_agg_bufsize;
+ spinlock_t lock;
+ atomic_t pending_frames;
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+ struct iwl_lq_sta lq_sta;
+ struct ieee80211_vif *vif;
+
+#ifdef CONFIG_PM_SLEEP
+ u16 last_seq_ctl;
+#endif
+};
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+ u32 sta_id;
+ u32 tfd_queue_msk;
+};
+
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool have_key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+ u32 qmask);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta);
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta);
+void iwl_mvm_sta_drained_wk(struct work_struct *wk);
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain);
+
+#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
new file mode 100644
index 000000000000..e437e02c7149
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -0,0 +1,519 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/* A TimeUnit is 1024 microsecond */
+#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
+#define MSEC_TO_TU(_msec) (_msec*1000/1024)
+
+/* For ROC use a TE type which has priority high enough to be scheduled when
+ * there is a concurrent BSS or GO/AP. Currently, use a TE type that has
+ * priority similar to the TE priority used for action scans by the FW.
+ * TODO: This needs to be changed, based on the reason for the ROC, i.e., use
+ * TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use
+ * TE_P2P_DEVICE_ACTION_SCAN
+ */
+#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ if (te_data->id == TE_MAX)
+ return;
+
+ list_del(&te_data->list);
+ te_data->running = false;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ te_data->vif = NULL;
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ synchronize_net();
+
+ /*
+ * Flush the offchannel queue -- this is called when the time
+ * event finishes or is cancelled, so that frames queued for it
+ * won't get stuck on the queue and be transmitted in the next
+ * time event.
+ * We have to send the command asynchronously since this cannot
+ * be under the mutex for locking reasons, but that's not an
+ * issue as it will have to complete before the next command is
+ * executed, and a new time event means a new command.
+ */
+ iwl_mvm_flush_tx_path(mvm, BIT(IWL_OFFCHANNEL_QUEUE), false);
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+ /*
+ * First, clear the ROC_RUNNING status bit. This will cause the TX
+ * path to drop offchannel transmissions. That would also be done
+ * by mac80211, but it is racy, in particular in the case that the
+ * time event actually completed in the firmware (which is handled
+ * in iwl_mvm_te_handle_notif).
+ */
+ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+
+ /*
+ * Of course, our status bit is just as racy as mac80211, so in
+ * addition, fire off the work struct which will drop all frames
+ * from the hardware queues that made it through the race. First
+ * it will of course synchronize the TX path to make sure that
+ * any *new* TX will be rejected.
+ */
+ schedule_work(&mvm->roc_done_wk);
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ /*
+ * The FW sends the start/end time event notifications even for events
+ * that it fails to schedule. This is indicated in the status field of
+ * the notification. This happens in cases that the scheduler cannot
+ * find a schedule that can handle the event (for example requesting a
+ * P2P Device discoveribility, while there are other higher priority
+ * events in the system).
+ */
+ WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n");
+
+ if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_END) {
+ IWL_DEBUG_TE(mvm,
+ "TE ended - current time %lu, estimated end %lu\n",
+ jiffies, te_data->end_jiffies);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm);
+ }
+
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ if (te_data->vif->type == NL80211_IFTYPE_STATION &&
+ (!te_data->vif->bss_conf.assoc ||
+ !te_data->vif->bss_conf.dtim_period)) {
+ IWL_ERR(mvm,
+ "No assocation and the time event is over already...\n");
+ ieee80211_connection_loss(te_data->vif);
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) {
+ te_data->running = true;
+ te_data->end_jiffies = jiffies +
+ TU_TO_JIFFIES(te_data->duration);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw);
+ }
+ } else {
+ IWL_WARN(mvm, "Got TE with unknown action\n");
+ }
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_event_notif *notif = (void *)pkt->data;
+ struct iwl_mvm_time_event_data *te_data, *tmp;
+
+ IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid)
+ iwl_mvm_te_handle_notif(mvm, te_data, notif);
+ }
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return 0;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_resp *resp;
+ int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ /* we should never get a response to another TIME_EVENT_CMD here */
+ if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
+ return false;
+
+ te_data->uid = le32_to_cpu(resp->unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+ return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_cmd *te_cmd)
+{
+ static const u8 time_event_response[] = { TIME_EVENT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
+ le32_to_cpu(te_cmd->duration));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (WARN_ON(te_data->id != TE_MAX)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+ te_data->vif = vif;
+ te_data->duration = le32_to_cpu(te_cmd->duration);
+ te_data->id = le32_to_cpu(te_cmd->id);
+ list_add_tail(&te_data->list, &mvm->time_event_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_time_event_response, te_data);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(*te_cmd), te_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(ret);
+
+ if (ret) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+ return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running &&
+ time_after(te_data->end_jiffies,
+ jiffies + TU_TO_JIFFIES(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ return;
+ }
+
+ if (te_data->running) {
+ IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+ te_data->uid,
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ /*
+ * we don't have enough time
+ * cancel the current TE and issue a new one
+ * Of course it would be better to remove the old one only
+ * when the new one is added, but we don't care if we are off
+ * channel for a bit. All we need to do, is not to return
+ * before we actually begin to be on the channel.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time =
+ cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+
+ time_cmd.dep_policy = TE_INDEPENDENT;
+ time_cmd.is_present = cpu_to_le32(1);
+ time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+ time_cmd.max_delay = cpu_to_le32(500);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = cpu_to_le32(1);
+ time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+ iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 id, uid;
+ int ret;
+
+ /*
+ * It is possible that by the time we got to this point the time
+ * event was already removed.
+ */
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /* Save time event uid before clearing its data */
+ uid = te_data->uid;
+ id = te_data->id;
+
+ /*
+ * The clear_data function handles time events that were already removed
+ */
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * It is possible that by the time we try to remove it, the time event
+ * has already ended and removed. In such a case there is no need to
+ * send a removal command.
+ */
+ if (id == TE_MAX) {
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
+ return;
+ }
+
+ /* When we remove a TE, the UID is to be set in the id field */
+ time_cmd.id = cpu_to_le32(uid);
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(time_cmd), &time_cmd);
+ if (WARN_ON(ret))
+ return;
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+ lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+ if (te_data->running) {
+ IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE);
+
+ time_cmd.apply_time = cpu_to_le32(0);
+ time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
+ time_cmd.is_present = cpu_to_le32(1);
+
+ time_cmd.interval = cpu_to_le32(1);
+
+ /*
+ * IWL_MVM_ROC_TE_TYPE can have lower priority than other events
+ * that are being scheduled by the driver/fw, and thus it might not be
+ * scheduled. To improve the chances of it being scheduled, allow it to
+ * be fragmented.
+ * In addition, for the same reasons, allow to delay the scheduling of
+ * the time event.
+ */
+ time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+ time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+ time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+ time_cmd.repeat = cpu_to_le32(1);
+ time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_time_event_data *te_data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Iterate over the list of time events and find the time event that is
+ * associated with a P2P_DEVICE interface.
+ * This assumes that a P2P_DEVICE interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ mvmvif = NULL;
+ spin_lock_bh(&mvm->time_event_lock);
+ list_for_each_entry(te_data, &mvm->time_event_list, list) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ break;
+ }
+ }
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (!mvmvif) {
+ IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
+ return;
+ }
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+
+ iwl_mvm_roc_finished(mvm);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
new file mode 100644
index 000000000000..64fb57a5ab43
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ * in less than min_duration.
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * will block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be cancelled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration);
+
+/**
+ * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @vif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
new file mode 100644
index 000000000000..6b67ce3f679c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -0,0 +1,916 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+
+#include "iwl-trans.h"
+#include "iwl-eeprom-parse.h"
+#include "mvm.h"
+#include "sta.h"
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+ u32 len = skb->len + FCS_LEN;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ tx_flags |= TX_CMD_FLG_ACK;
+ else
+ tx_flags &= ~TX_CMD_FLG_ACK;
+
+ if (ieee80211_is_probe_resp(fc))
+ tx_flags |= TX_CMD_FLG_TSF;
+ else if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+
+ /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+ if (info->band == IEEE80211_BAND_2GHZ &&
+ (skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_back_req(fc) ||
+ ieee80211_is_mgmt(fc)))
+ tx_flags |= TX_CMD_FLG_BT_DIS;
+
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else {
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+
+ /* The spec allows Action frames in A-MPDU, we don't support
+ * it
+ */
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else {
+ tx_cmd->pm_frame_timeout = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+ /* Total # bytes to be transmitted */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
+ tx_cmd->next_frame_len = 0;
+ tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_cmd->sta_id = sta_id;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rate_plcp;
+
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+ tx_cmd->rts_retry_limit =
+ min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+ } else {
+ tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ }
+
+ /*
+ * for data packets, rate info comes from the table inside he fw. This
+ * table is controlled by LINK_QUALITY commands
+ */
+
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ }
+
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+ "Got an HT rate for a non data frame 0x%x\n",
+ info->control.rates[0].flags);
+
+ rate_idx = info->control.rates[0].idx;
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+ rate_idx = rate_lowest_index(
+ &mvm->nvm_data->bands[info->band], sta);
+
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+
+ /* For 2.4 GHZ band, check that there is no need to remap */
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->mgmt_last_antenna_idx);
+ rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+ ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+ TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+
+ dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+ if (unlikely(!dev_cmd))
+ return NULL;
+
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->driver_data[0] = NULL;
+ info->driver_data[1] = dev_cmd;
+
+ return dev_cmd;
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+ u8 sta_id;
+
+ if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+ return -1;
+
+ if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+ (!info->control.vif ||
+ info->hw_queue != info->control.vif->cab_queue)))
+ return -1;
+
+ /*
+ * If the interface on which frame is sent is the P2P_DEVICE
+ * or an AP/GO interface use the broadcast station associated
+ * with it; otherwise use the AUX station.
+ */
+ if (info->control.vif &&
+ (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ info->control.vif->type == NL80211_IFTYPE_AP)) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ sta_id = mvmvif->bcast_sta.sta_id;
+ } else {
+ sta_id = mvm->aux_sta.sta_id;
+ }
+
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+ if (!dev_cmd)
+ return -1;
+
+ /* From now on, we cannot access info->control */
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+ __le16 fc;
+ u16 seq_number = 0;
+ u8 tid = IWL_MAX_TID_COUNT;
+ u8 txq_id = info->hw_queue;
+ bool is_data_qos = false, is_ampdu = false;
+
+ mvmsta = (void *)sta->drv_priv;
+ fc = hdr->frame_control;
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_INVALID_STATION))
+ return -1;
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+ if (!dev_cmd)
+ goto drop;
+
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ /* From now on, we cannot access info->control */
+
+ spin_lock(&mvmsta->lock);
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ is_data_qos = true;
+ is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+ if (is_ampdu) {
+ if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
+ goto drop_unlock_sta;
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ }
+
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
+ tid, txq_id, seq_number);
+
+ /* NOTE: aggregation will need changes here (for txq id) */
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+ goto drop_unlock_sta;
+
+ if (is_data_qos && !ieee80211_has_morefrags(fc))
+ mvmsta->tid_data[tid].seq_number = seq_number;
+
+ spin_unlock(&mvmsta->lock);
+
+ if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
+ txq_id < IWL_FIRST_AMPDU_QUEUE)
+ atomic_inc(&mvmsta->pending_frames);
+
+ return 0;
+
+drop_unlock_sta:
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+drop:
+ return -1;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 tid)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct ieee80211_vif *vif = mvmsta->vif;
+
+ lockdep_assert_held(&mvmsta->lock);
+
+ if (tid_data->ssn != tid_data->next_reclaimed)
+ return;
+
+ switch (tid_data->state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue addBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue DELBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ tid_data->state = IWL_AGG_OFF;
+ /*
+ * we can't hold the mutex - but since we are after a sequence
+ * point (call to iwl_trans_txq_disable), so we don't even need
+ * a memory barrier.
+ */
+ mvm->queue_to_mac80211[tid_data->txq_id] =
+ IWL_INVALID_MAC80211_QUEUE;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(SMALL_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ r->flags |= IEEE80211_TX_RC_MCS;
+ r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ ieee80211_rate_set_vht(
+ r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1);
+ r->flags |= IEEE80211_TX_RC_VHT_MCS;
+ } else {
+ r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ info->band);
+ }
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_sta *sta;
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+ struct iwl_mvm_sta *mvmsta;
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ u16 next_reclaimed, seq_ctl;
+
+ __skb_queue_head_init(&skbs);
+
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ break;
+ default:
+ break;
+ }
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
+ info);
+
+ /* Single frame failure in an AMPDU queue => send BAR */
+ if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
+ !(info->flags & IEEE80211_TX_STAT_ACK)) {
+ /* there must be only one skb in the skb_list */
+ WARN_ON_ONCE(skb_freed > 1 ||
+ !skb_queue_empty(&skbs));
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ }
+
+ /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
+ if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+ }
+
+ ieee80211_tx_status_ni(mvm->hw, skb);
+ }
+
+ if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
+ /* If this is an aggregation queue, we use the ssn since:
+ * ssn = wifi seq_num % 256.
+ * The seq_ctl is the sequence control of the packet to which
+ * this Tx response relates. But if there is a hole in the
+ * bitmap of the BA we received, this Tx response may allow to
+ * reclaim the hole and all the subsequent packets that were
+ * already acked. In that case, seq_ctl != ssn, and the next
+ * packet to be reclaimed will be ssn and not seq_ctl. In that
+ * case, several packets will be reclaimed even if
+ * frame_count = 1.
+ *
+ * The ssn is the index (% 256) of the latest packet that has
+ * treated (acked / dropped) + 1.
+ */
+ next_reclaimed = ssn;
+ } else {
+ /* The next packet to be reclaimed is the one after this one */
+ next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10);
+ }
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TXQ %d status %s (0x%08x)\n\t\t\t\tinitial_rate 0x%x "
+ "retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+ txq_id, iwl_mvm_get_tx_fail_reason(status),
+ status, le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+ ssn, next_reclaimed, seq_ctl);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (!IS_ERR_OR_NULL(sta)) {
+ mvmsta = (void *)sta->drv_priv;
+
+ if (tid != IWL_TID_NON_QOS) {
+ struct iwl_mvm_tid_data *tid_data =
+ &mvmsta->tid_data[tid];
+
+ spin_lock_bh(&mvmsta->lock);
+ tid_data->next_reclaimed = next_reclaimed;
+ IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+ spin_unlock_bh(&mvmsta->lock);
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ mvmsta->last_seq_ctl = seq_ctl;
+#endif
+ } else {
+ sta = NULL;
+ mvmsta = NULL;
+ }
+
+ /*
+ * If the txq is not an AMPDU queue, there is no chance we freed
+ * several skbs. Check that out...
+ * If there are no pending frames for this STA, notify mac80211 that
+ * this station can go to sleep in its STA table.
+ */
+ if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta &&
+ !WARN_ON(skb_freed > 1) &&
+ mvmsta->vif->type == NL80211_IFTYPE_AP &&
+ atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+ switch (status & AGG_TX_STATE_STATUS_MSK) {
+ AGG_TX_STATE_(TRANSMITTED);
+ AGG_TX_STATE_(UNDERRUN);
+ AGG_TX_STATE_(BT_PRIO);
+ AGG_TX_STATE_(FEW_BYTES);
+ AGG_TX_STATE_(ABORT);
+ AGG_TX_STATE_(LAST_SENT_TTL);
+ AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+ AGG_TX_STATE_(LAST_SENT_BT_KILL);
+ AGG_TX_STATE_(SCD_QUERY);
+ AGG_TX_STATE_(TEST_BAD_CRC32);
+ AGG_TX_STATE_(RESPONSE);
+ AGG_TX_STATE_(DUMP_TX);
+ AGG_TX_STATE_(DELAY_TX);
+ }
+
+ return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ int i;
+
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+ iwl_get_agg_tx_status(fstatus),
+ fstatus & AGG_TX_STATE_STATUS_MSK,
+ (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+ AGG_TX_STATE_TRY_CNT_POS,
+ le16_to_cpu(frame_status[i].sequence));
+ }
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ struct ieee80211_sta *sta;
+
+ if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_FIRST_AMPDU_QUEUE))
+ return;
+
+ if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
+ return;
+
+ iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ mvmsta->tid_data[tid].rate_n_flags =
+ le32_to_cpu(tx_resp->initial_rate);
+ }
+
+ rcu_read_unlock();
+}
+
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+
+ if (tx_resp->frame_count == 1)
+ iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+ else
+ iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+
+ return 0;
+}
+
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
+ struct sk_buff_head reclaimed_skbs;
+ struct iwl_mvm_tid_data *tid_data;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int sta_id, tid, freed;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* Reclaiming frames for a station that has been deleted ? */
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ mvmsta = (void *)sta->drv_priv;
+ tid_data = &mvmsta->tid_data[tid];
+
+ if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
+ tid_data->txq_id, tid, scd_flow)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
+ &reclaimed_skbs);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ (u8 *)&ba_notif->sta_addr_lo32,
+ ba_notif->sta_id);
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ (unsigned long long)le64_to_cpu(ba_notif->bitmap),
+ scd_flow, ba_resp_scd_ssn, ba_notif->txed,
+ ba_notif->txed_2_done);
+
+ tid_data->next_reclaimed = ba_resp_scd_ssn;
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ freed = 0;
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_notif->txed_2_done;
+ info->status.ampdu_len = ba_notif->txed;
+ iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
+ info);
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ rcu_read_unlock();
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status_ni(mvm->hw, skb);
+ }
+
+ return 0;
+}
+
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .queues_ctl = cpu_to_le32(tfd_msk),
+ .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+ };
+
+ u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
new file mode 100644
index 000000000000..000e842c2edd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -0,0 +1,472 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+
+#include "mvm.h"
+#include "fw-api-rs.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (!(cmd->flags & CMD_ASYNC))
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+ /*
+ * If the caller wants the SKB, then don't hide any problems, the
+ * caller might access the response buffer which will be NULL if
+ * the command failed.
+ */
+ if (cmd->flags & CMD_WANT_SKB)
+ return ret;
+
+ /* Silently ignore failures if RFKILL is asserted */
+ if (!ret || ret == -ERFKILL)
+ return 0;
+ return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+ u32 *status)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ int ret, resp_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Only synchronous commands can wait for status,
+ * we use WANT_SKB so the caller can't.
+ */
+ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+ "cmd flags %x", cmd->flags))
+ return -EINVAL;
+
+ cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (ret == -ERFKILL) {
+ /*
+ * The command failed because of RFKILL, don't update
+ * the status, leave it as success and return 0.
+ */
+ return 0;
+ } else if (ret) {
+ return ret;
+ }
+
+ pkt = cmd->resp_pkt;
+ /* Can happen if RFKILL is asserted */
+ if (!pkt) {
+ ret = 0;
+ goto out_free_resp;
+ }
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+ out_free_resp:
+ iwl_free_resp(cmd);
+ return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+ const void *data, u32 *status)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+#define IWL_DECLARE_RATE_INFO(r) \
+ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1),
+ IWL_DECLARE_RATE_INFO(2),
+ IWL_DECLARE_RATE_INFO(5),
+ IWL_DECLARE_RATE_INFO(11),
+ IWL_DECLARE_RATE_INFO(6),
+ IWL_DECLARE_RATE_INFO(9),
+ IWL_DECLARE_RATE_INFO(12),
+ IWL_DECLARE_RATE_INFO(18),
+ IWL_DECLARE_RATE_INFO(24),
+ IWL_DECLARE_RATE_INFO(36),
+ IWL_DECLARE_RATE_INFO(48),
+ IWL_DECLARE_RATE_INFO(54),
+};
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum ieee80211_band band)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ int idx;
+ int band_offset = 0;
+
+ /* Legacy rate format, search for match in table */
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (fw_rate_idx_to_plcp[idx] == rate)
+ return idx - band_offset;
+
+ return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+{
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ return fw_rate_idx_to_plcp[rate_idx];
+}
+
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+ IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+ le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+ IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_service));
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+ le64_to_cpu(err_resp->timestamp));
+ return 0;
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+ BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+ WARN_ON_ONCE(!mask); /* ffs will return 0 if mask is zeroed */
+ return (u8)(BIT(ffs(mask)));
+}
+
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+ u8 ind = last_idx;
+ int i;
+
+ for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
+ ind = (ind + 1) % RATE_MCS_ANT_NUM;
+ if (valid & BIT(ind))
+ return ind;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+ return last_idx;
+}
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == num)
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 gp3; /* GP3 timer register */
+ u32 ucode_ver; /* uCode version */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_error_event_table table;
+ u32 base;
+
+ base = mvm->error_event_table;
+ if (mvm->cur_ucode == IWL_UCODE_INIT) {
+ if (!base)
+ base = mvm->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = mvm->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.data3,
+ table.blink1, table.blink2, table.ilink1,
+ table.ilink2, table.bcon_time, table.gp1,
+ table.gp2, table.gp3, table.ucode_ver,
+ table.hw_ver, table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+ IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
+ IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ u8 flags, bool init)
+{
+ struct iwl_host_cmd cmd = {
+ .id = LQ_CMD,
+ .len = { sizeof(struct iwl_lq_cmd), },
+ .flags = flags,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+ return -EINVAL;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index f8620ecae6b4..ff3389757281 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 244019cec3e1..e7de33128b16 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 83ca40321ff1..5096f7c96ab6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index d4df976d4709..801ff49796dd 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/pcie/7000.c
new file mode 100644
index 000000000000..6e35b2b72332
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/7000.c
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+
+/* Highest firmware API version supported */
+#define IWL7260_UCODE_API_MAX 6
+#define IWL3160_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL7260_UCODE_API_OK 6
+#define IWL3160_UCODE_API_OK 6
+
+/* Lowest firmware API version supported */
+#define IWL7260_UCODE_API_MIN 6
+#define IWL3160_UCODE_API_MIN 6
+
+/* NVM versions */
+#define IWL7260_NVM_VERSION 0x0a1d
+#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL3160_NVM_VERSION 0x709
+#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL7260_FW_PRE "iwlwifi-7260-"
+#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
+
+#define IWL3160_FW_PRE "iwlwifi-3160-"
+#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl7000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_ht_params iwl7000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_7000 \
+ .ucode_api_max = IWL7260_UCODE_API_MAX, \
+ .ucode_api_ok = IWL7260_UCODE_API_OK, \
+ .ucode_api_min = IWL7260_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_7000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl7000_base_params, \
+ /* TODO: .bt_params? */ \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+
+const struct iwl_cfg iwl7260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC3160",
+ .fw_name_pre = IWL3160_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311d73b..c6f8e83c3551 100644
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,5 +109,7 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
extern const struct iwl_cfg iwl105_bgn_cfg;
extern const struct iwl_cfg iwl105_bgn_d_cfg;
extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl3160_ac_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index c2e141af353c..7bc0fb9128dd 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -255,6 +255,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+/* 7000 Series */
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d91d2e8c62f5..aa2a39a637dd 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -222,8 +222,6 @@ struct iwl_txq {
* @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
- * @irq - the irq number for the device
- * @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
@@ -234,8 +232,10 @@ struct iwl_txq {
* @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
+ * @reg_lock: protect hw register access
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -249,11 +249,8 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
- bool irq_requested;
- struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
- unsigned int irq;
spinlock_t irq_lock;
u32 inta_mask;
u32 scd_base_addr;
@@ -279,12 +276,16 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
bool rx_buf_size_8k;
+ bool bc_table_dword;
u32 rx_page_order;
const char **command_names;
/* queue watchdog */
unsigned long wd_timeout;
+
+ /*protect hw register */
+ spinlock_t reg_lock;
};
/**
@@ -328,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
-void iwl_pcie_tasklet(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
@@ -359,6 +360,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+
/*****************************************************
* Error handling
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index dad4c4aad91f..b0ae06d2456f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -81,10 +81,10 @@
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
@@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts / tasklets
- * because we have to (see comment there). On the other hand, since
- * the APM is stopped, we cannot access the HW (in particular not prph).
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
@@ -436,7 +436,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
err_rb_stts:
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
err_bd:
return -ENOMEM;
@@ -455,6 +455,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ /* reset and flush pointers */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
@@ -491,7 +495,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
-
int i, err;
unsigned long flags;
@@ -518,6 +521,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags);
iwl_pcie_rx_replenish(trans);
@@ -545,13 +549,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
spin_lock_irqsave(&rxq->lock, flags);
iwl_pcie_rxq_free_rbs(trans);
spin_unlock_irqrestore(&rxq->lock, flags);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
if (rxq->rb_stts)
@@ -560,7 +566,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts, rxq->rb_stts_dma);
else
IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL;
}
@@ -588,6 +594,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int index, cmd_index, err, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
+ ._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
.truesize = max_len,
@@ -789,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
+ local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
+ local_bh_enable();
}
-void iwl_pcie_tasklet(struct iwl_trans *trans)
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
+ struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
@@ -804,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@@ -848,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
handled |= CSR_INT_BIT_HW_ERR;
- return;
+ goto out;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -998,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
}
/******************************************************************************
@@ -1120,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
+ * If we have something to service, the irq thread will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -1160,12 +1176,13 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
+ /* the thread will service interrupts and re-enable them */
if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
+ return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
+ return IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
@@ -1269,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+ if (likely(inta)) {
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ return IRQ_WAKE_THREAD;
+ } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 35708b959ad6..17bedc50e753 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,21 +75,43 @@
#include "iwl-agn-hw.h"
#include "internal.h"
-static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
+static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
+ u32 v;
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+{
+ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ else
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
}
/* PCI registers */
@@ -259,7 +281,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_pcie_set_pwr_vmain(trans);
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,7 +457,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -454,7 +476,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (hw_rfkill)
+ if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
@@ -534,12 +556,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans);
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(trans_pcie->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
-
- cancel_work_sync(&trans_pcie->rx_replenish);
-
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
@@ -551,46 +567,87 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans_pcie->status);
}
-static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
{
/* let the ucode operate on its own */
iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
iwl_disable_interrupts(trans);
+ iwl_pcie_disable_ict(trans);
+
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+
+ iwl_pcie_set_pwr(trans, true);
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int err;
- bool hw_rfkill;
+ u32 val;
+ int ret;
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ iwl_pcie_set_pwr(trans, false);
- if (!trans_pcie->irq_requested) {
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_pcie_tasklet, (unsigned long)trans);
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ *status = IWL_D3_STATUS_RESET;
+ return 0;
+ }
- iwl_pcie_alloc_ict(trans);
+ /*
+ * Also enables interrupts - none will happen as the device doesn't
+ * know we're waking it up, only when the opmode actually tells it
+ * after this call.
+ */
+ iwl_pcie_reset_ict(trans);
- err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
- IRQF_SHARED, DRV_NAME, trans);
- if (err) {
- IWL_ERR(trans, "Error allocating IRQ %d\n",
- trans_pcie->irq);
- goto error;
- }
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+ return ret;
+ }
- trans_pcie->irq_requested = true;
+ iwl_trans_pcie_tx_reset(trans);
+
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
+ return ret;
}
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ bool hw_rfkill;
+ int err;
+
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
- goto err_free_irq;
+ return err;
}
iwl_pcie_apm_init(trans);
@@ -601,15 +658,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- return err;
-
-err_free_irq:
- trans_pcie->irq_requested = false;
- free_irq(trans_pcie->irq, trans);
-error:
- iwl_pcie_free_ict(trans);
- tasklet_kill(&trans_pcie->irq_tasklet);
- return err;
+ return 0;
}
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
@@ -703,19 +752,20 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
trans_pcie->command_names = trans_cfg->command_names;
+ trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
- if (trans_pcie->irq_requested == true) {
- free_irq(trans_pcie->irq, trans);
- iwl_pcie_free_ict(trans);
- }
+ free_irq(trans_pcie->pci_dev->irq, trans);
+ iwl_pcie_free_ict(trans);
pci_disable_msi(trans_pcie->pci_dev);
iounmap(trans_pcie->hw_base);
@@ -751,13 +801,126 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (!hw_rfkill)
- iwl_enable_interrupts(trans);
-
return 0;
}
#endif /* CONFIG_PM_SLEEP */
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
+ unsigned long *flags)
+{
+ int ret;
+ struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+ spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
+
+ /* this bit wakes up the NIC */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ * 5000 series and later (including 1000 series) have non-volatile SRAM,
+ * and do not save/restore SRAM when power cycling.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (unlikely(ret < 0)) {
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ if (!silent) {
+ u32 val = iwl_read32(trans, CSR_GP_CNTRL);
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ val);
+ spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
+ return false;
+ }
+ }
+
+ /*
+ * Fool sparse by faking we release the lock - sparse will
+ * track nic_access anyway.
+ */
+ __release(&pcie_trans->reg_lock);
+ return true;
+}
+
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ unsigned long *flags)
+{
+ struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&pcie_trans->reg_lock);
+
+ /*
+ * Fool sparse by faking we acquiring the lock - sparse will
+ * track nic_access anyway.
+ */
+ __acquire(&pcie_trans->reg_lock);
+
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
+ spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
+}
+
+static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
@@ -767,6 +930,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
+ u32 scd_sram_addr;
+ u8 buf[16];
int ret = 0;
/* waiting for all the tx frames complete might take a while */
@@ -780,14 +945,64 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
msleep(1);
if (q->read_ptr != q->write_ptr) {
- IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
break;
}
}
+
+ if (!ret)
+ return 0;
+
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+
+ if (cnt & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ cnt, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
+ }
+
return ret;
}
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+}
+
static const char *get_fh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -1212,7 +1427,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
+ .d3_suspend = iwl_trans_pcie_d3_suspend,
+ .d3_resume = iwl_trans_pcie_d3_resume,
.send_cmd = iwl_trans_pcie_send_hcmd,
@@ -1235,8 +1451,13 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.read32 = iwl_trans_pcie_read32,
.read_prph = iwl_trans_pcie_read_prph,
.write_prph = iwl_trans_pcie_write_prph,
+ .read_mem = iwl_trans_pcie_read_mem,
+ .write_mem = iwl_trans_pcie_write_mem,
.configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi,
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access,
+ .release_nic_access = iwl_trans_pcie_release_nic_access,
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1258,8 +1479,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
+ trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
+ spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
/* W/A - seems to solve weird behavior. We need to remove this if we
@@ -1318,7 +1541,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans->dev = &pdev->dev;
- trans_pcie->irq = pdev->irq;
trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
@@ -1327,7 +1549,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- spin_lock_init(&trans->reg_lock);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
@@ -1344,8 +1565,24 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (!trans->dev_cmd_pool)
goto out_pci_disable_msi;
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+ if (iwl_pcie_alloc_ict(trans))
+ goto out_free_cmd_pool;
+
+ if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans)) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+
return trans;
+out_free_ict:
+ iwl_pcie_free_ict(trans);
+out_free_cmd_pool:
+ kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6c5b867c353a..8e9e3212fe78 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
+ iwl_trans_read_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
break;
}
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -306,6 +309,9 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
return;
}
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
+ txq->q.write_ptr);
+
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
@@ -612,7 +618,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->q.n_bd) {
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ txq->q.dma_addr = 0;
}
kfree(txq->entries);
@@ -638,9 +644,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
+ int nq = trans->cfg->base_params->num_of_queues;
int chan;
u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
@@ -652,20 +660,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
WARN_ON(scd_base_addr != 0 &&
scd_base_addr != trans_pcie->scd_base_addr);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
+ /* reset context data, TX status and translation data */
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
@@ -697,6 +695,29 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->q.read_ptr = 0;
+ txq->q.write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+}
+
/*
* iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
@@ -905,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -949,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
/*
@@ -1002,14 +1023,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
return 0;
}
@@ -1068,9 +1089,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
@@ -1101,8 +1122,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_set_inactive(trans, txq_id);
- _iwl_write_targ_mem_dwords(trans, stts_addr,
- zero_val, ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id);
@@ -1350,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
@@ -1384,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
@@ -1642,10 +1663,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ec6d5d6b452e..116f4aba08d6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -657,7 +657,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
}
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -1444,7 +1444,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
done:
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1766,7 +1766,7 @@ static void lbs_join_post(struct lbs_private *priv,
params->beacon_interval,
fake_ie, fake - fake_ie,
0, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
@@ -2011,7 +2011,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
if (bss) {
ret = lbs_ibss_join_existing(priv, params, bss);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
} else
ret = lbs_ibss_start_new(priv, params);
@@ -2081,10 +2081,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev)
lbs_deb_enter(LBS_DEB_CFG80211);
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- dev_err(dev, "cannot allocate wireless device\n");
+ if (!wdev)
return ERR_PTR(-ENOMEM);
- }
wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private));
if (!wdev->wiphy) {
@@ -2132,6 +2130,21 @@ static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
lbs_deb_leave(LBS_DEB_CFG80211);
}
+static void lbs_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
+ "callback for domain %c%c\n", request->alpha2[0],
+ request->alpha2[1]);
+
+ memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
+ if (lbs_iface_active(priv))
+ lbs_set_11d_domain_info(priv);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
/*
* This function get's called after lbs_setup_firmware() determined the
@@ -2184,24 +2197,6 @@ int lbs_cfg_register(struct lbs_private *priv)
return ret;
}
-int lbs_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct lbs_private *priv = wiphy_priv(wiphy);
- int ret = 0;
-
- lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
- "callback for domain %c%c\n", request->alpha2[0],
- request->alpha2[1]);
-
- memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
- if (lbs_iface_active(priv))
- ret = lbs_set_11d_domain_info(priv);
-
- lbs_deb_leave(LBS_DEB_CFG80211);
- return ret;
-}
-
void lbs_scan_deinit(struct lbs_private *priv)
{
lbs_deb_enter(LBS_DEB_CFG80211);
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 558168ce634d..10995f59fe34 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -10,9 +10,6 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev);
int lbs_cfg_register(struct lbs_private *priv);
void lbs_cfg_free(struct lbs_private *priv);
-int lbs_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request);
-
void lbs_send_disconnect_notification(struct lbs_private *priv);
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ff9085502bea..cffdf4fbf161 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -48,6 +48,10 @@ static int channels = 1;
module_param(channels, int, 0444);
MODULE_PARM_DESC(channels, "Number of concurrent channels");
+static bool paged_rx = false;
+module_param(paged_rx, bool, 0644);
+MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -333,11 +337,11 @@ struct mac80211_hwsim_data {
int scan_chan_idx;
struct ieee80211_channel *channel;
- unsigned long beacon_int; /* in jiffies unit */
+ u64 beacon_int /* beacon interval in us */;
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
- struct timer_list beacon_timer;
+ struct tasklet_hrtimer beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
@@ -357,7 +361,10 @@ struct mac80211_hwsim_data {
int power_level;
/* difference between this hw's clock and the real clock, in usecs */
- u64 tsf_offset;
+ s64 tsf_offset;
+ s64 bcn_delta;
+ /* absolute beacon transmission time. Used to cover up "tx" delay. */
+ u64 abs_bcn_ts;
};
@@ -405,15 +412,19 @@ static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static inline u64 mac80211_hwsim_get_tsf_raw(void)
+{
+ return ktime_to_us(ktime_get_real());
+}
+
static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data)
{
- struct timeval tv = ktime_to_timeval(ktime_get_real());
- u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ u64 now = mac80211_hwsim_get_tsf_raw();
return cpu_to_le64(now + data->tsf_offset);
}
static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *data = hw->priv;
return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
@@ -423,9 +434,13 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u64 tsf)
{
struct mac80211_hwsim_data *data = hw->priv;
- struct timeval tv = ktime_to_timeval(ktime_get_real());
- u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
- data->tsf_offset = tsf - now;
+ u64 now = mac80211_hwsim_get_tsf(hw, vif);
+ u32 bcn_int = data->beacon_int;
+ s64 delta = tsf - now;
+
+ data->tsf_offset += delta;
+ /* adjust after beaconing with new timestamp at old TBTT */
+ data->bcn_delta = do_div(delta, bcn_int);
}
static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
@@ -696,7 +711,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_rx_status rx_status;
- struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
+ u64 now;
memset(&rx_status, 0, sizeof(rx_status));
rx_status.flag |= RX_FLAG_MACTIME_START;
@@ -722,11 +737,23 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
secpath_reset(skb);
nf_reset(skb);
+ /*
+ * Get absolute mactime here so all HWs RX at the "same time", and
+ * absolute TX time for beacon mactime so the timestamp matches.
+ * Giving beacons a different mactime than non-beacons looks messy, but
+ * it helps the Toffset be exact and a ~10us mactime discrepancy
+ * probably doesn't really matter.
+ */
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ now = data->abs_bcn_ts;
+ else
+ now = mac80211_hwsim_get_tsf_raw();
+
/* Copy skb to all enabled radios that are on the current frequency */
spin_lock(&hwsim_radio_lock);
list_for_each_entry(data2, &hwsim_radios, list) {
struct sk_buff *nskb;
- struct ieee80211_mgmt *mgmt;
struct tx_iter_data tx_iter_data = {
.receive = false,
.channel = chan,
@@ -755,24 +782,30 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
* reserve some space for our vendor and the normal
* radiotap header, since we're copying anyway
*/
- nskb = skb_copy_expand(skb, 64, 0, GFP_ATOMIC);
- if (nskb == NULL)
- continue;
+ if (skb->len < PAGE_SIZE && paged_rx) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+
+ if (!page)
+ continue;
+
+ nskb = dev_alloc_skb(128);
+ if (!nskb) {
+ __free_page(page);
+ continue;
+ }
+
+ memcpy(page_address(page), skb->data, skb->len);
+ skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len);
+ } else {
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
+ }
if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
- /* set bcn timestamp relative to receiver mactime */
- rx_status.mactime =
- le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
- mgmt = (struct ieee80211_mgmt *) nskb->data;
- if (ieee80211_is_beacon(mgmt->frame_control) ||
- ieee80211_is_probe_resp(mgmt->frame_control))
- mgmt->u.beacon.timestamp = cpu_to_le64(
- rx_status.mactime +
- (data->tsf_offset - data2->tsf_offset) +
- 24 * 8 * 10 / txrate->bitrate);
-
+ rx_status.mactime = now + data2->tsf_offset;
#if 0
/*
* Don't enable this code by default as the OUI 00:00:00
@@ -896,7 +929,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
data->started = false;
- del_timer(&data->beacon_timer);
+ tasklet_hrtimer_cancel(&data->beacon_timer);
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
@@ -962,7 +995,11 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ieee80211_hw *hw = arg;
+ struct mac80211_hwsim_data *data = arg;
+ struct ieee80211_hw *hw = data->hw;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_rate *txrate;
+ struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
hwsim_check_magic(vif);
@@ -975,26 +1012,48 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
skb = ieee80211_beacon_get(hw, vif);
if (skb == NULL)
return;
+ info = IEEE80211_SKB_CB(skb);
+ txrate = ieee80211_get_tx_rate(hw, info);
+
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ /* fake header transmission time */
+ data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw();
+ mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
+ data->tsf_offset +
+ 24 * 8 * 10 / txrate->bitrate);
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
}
-
-static void mac80211_hwsim_beacon(unsigned long arg)
+static enum hrtimer_restart
+mac80211_hwsim_beacon(struct hrtimer *timer)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *) arg;
- struct mac80211_hwsim_data *data = hw->priv;
+ struct mac80211_hwsim_data *data =
+ container_of(timer, struct mac80211_hwsim_data,
+ beacon_timer.timer);
+ struct ieee80211_hw *hw = data->hw;
+ u64 bcn_int = data->beacon_int;
+ ktime_t next_bcn;
if (!data->started)
- return;
+ goto out;
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
- mac80211_hwsim_beacon_tx, hw);
+ mac80211_hwsim_beacon_tx, data);
+
+ /* beacon at new TBTT + beacon interval */
+ if (data->bcn_delta) {
+ bcn_int -= data->bcn_delta;
+ data->bcn_delta = 0;
+ }
- data->beacon_timer.expires = jiffies + data->beacon_int;
- add_timer(&data->beacon_timer);
+ next_bcn = ktime_add(hrtimer_get_expires(timer),
+ ns_to_ktime(bcn_int * 1000));
+ tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS);
+out:
+ return HRTIMER_NORESTART;
}
static const char *hwsim_chantypes[] = {
@@ -1032,9 +1091,16 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
- del_timer(&data->beacon_timer);
- else
- mod_timer(&data->beacon_timer, jiffies + data->beacon_int);
+ tasklet_hrtimer_cancel(&data->beacon_timer);
+ else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 bcn_int = data->beacon_int;
+ u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
+
+ tasklet_hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * 1000),
+ HRTIMER_MODE_REL);
+ }
return 0;
}
@@ -1084,12 +1150,26 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int);
- data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
- if (WARN_ON(!data->beacon_int))
- data->beacon_int = 1;
- if (data->started)
- mod_timer(&data->beacon_timer,
- jiffies + data->beacon_int);
+ data->beacon_int = info->beacon_int * 1024;
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon);
+ if (data->started &&
+ !hrtimer_is_queued(&data->beacon_timer.timer) &&
+ info->enable_beacon) {
+ u64 tsf, until_tbtt;
+ u32 bcn_int;
+ if (WARN_ON(!data->beacon_int))
+ data->beacon_int = 1000 * 1024;
+ tsf = mac80211_hwsim_get_tsf(hw, vif);
+ bcn_int = data->beacon_int;
+ until_tbtt = bcn_int - do_div(tsf, bcn_int);
+ tasklet_hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * 1000),
+ HRTIMER_MODE_REL);
+ } else if (!info->enable_beacon)
+ tasklet_hrtimer_cancel(&data->beacon_timer);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
@@ -1292,7 +1372,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -2165,6 +2247,7 @@ static int __init init_mac80211_hwsim(void)
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+ hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
memcpy(data->channels_2ghz, hwsim_channels_2ghz,
sizeof(hwsim_channels_2ghz));
@@ -2370,8 +2453,9 @@ static int __init init_mac80211_hwsim(void)
data->debugfs, data,
&hwsim_fops_group);
- setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
- (unsigned long) hw);
+ tasklet_hrtimer_init(&data->beacon_timer,
+ mac80211_hwsim_beacon,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
list_add_tail(&data->list, &hwsim_radios);
}
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
new file mode 100644
index 000000000000..cf43b3c29250
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -0,0 +1,261 @@
+/*
+ * Marvell Wireless LAN device driver: 802.11ac
+ *
+ * Copyright (C) 2013, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "fw.h"
+#include "main.h"
+#include "11ac.h"
+
+/* This function converts the 2-bit MCS map to the highest long GI
+ * VHT data rate.
+ */
+static u16
+mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
+ u8 bands, u16 mcs_map)
+{
+ u8 i, nss, max_mcs;
+ u16 max_rate = 0;
+ u32 usr_vht_cap_info = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ /* tables of the MCS map to the highest data rate (in Mbps)
+ * supported for long GI
+ */
+ u16 max_rate_lgi_80MHZ[8][3] = {
+ {0x124, 0x15F, 0x186}, /* NSS = 1 */
+ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
+ {0x36D, 0x41D, 0x492}, /* NSS = 3 */
+ {0x492, 0x57C, 0x618}, /* NSS = 4 */
+ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
+ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
+ {0x924, 0xAF8, 0xC30} /* NSS = 8 */
+ };
+ u16 max_rate_lgi_160MHZ[8][3] = {
+ {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
+ {0x492, 0x57C, 0x618}, /* NSS = 2 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
+ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
+ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
+ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+ };
+
+ if (bands & BAND_AAC)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* find the max NSS supported */
+ nss = 0;
+ for (i = 0; i < 8; i++) {
+ max_mcs = (mcs_map >> (2 * i)) & 0x3;
+ if (max_mcs < 3)
+ nss = i;
+ }
+ max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+
+ /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
+ if (max_mcs >= 3)
+ max_mcs = 2;
+
+ if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
+ /* support 160 MHz */
+ max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+ if (!max_rate)
+ /* MCS9 is not supported in NSS6 */
+ max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+ } else {
+ max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+ if (!max_rate)
+ /* MCS9 is not supported in NSS3 */
+ max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+ }
+
+ return max_rate;
+}
+
+static void
+mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (bands & BAND_A)
+ vht_cap->vht_cap.vht_cap_info =
+ cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
+ else
+ vht_cap->vht_cap.vht_cap_info =
+ cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
+}
+
+static void
+mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+ u16 mcs_user, mcs_resp, nss, tmp;
+
+ /* Fill VHT cap info */
+ mwifiex_fill_vht_cap_info(priv, vht_cap, bands);
+
+ /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
+ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+ if ((mcs_user == NO_NSS_SUPPORT) ||
+ (mcs_resp == NO_NSS_SUPPORT))
+ SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min(mcs_user, mcs_resp));
+ }
+
+ vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+
+ tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
+ vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+
+ /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
+ mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+ if ((mcs_user == NO_NSS_SUPPORT) ||
+ (mcs_resp == NO_NSS_SUPPORT))
+ SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min(mcs_user, mcs_resp));
+ }
+
+ vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+
+ tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
+ vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+
+ return;
+}
+
+int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc,
+ u8 **buffer)
+{
+ struct mwifiex_ie_types_vhtcap *vht_cap;
+ struct mwifiex_ie_types_oper_mode_ntf *oper_ntf;
+ struct ieee_types_oper_mode_ntf *ieee_oper_ntf;
+ struct mwifiex_ie_types_vht_oper *vht_op;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 supp_chwd_set;
+ u32 usr_vht_cap_info;
+ int ret_len = 0;
+
+ if (bss_desc->bss_band & BAND_A)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* VHT Capabilities IE */
+ if (bss_desc->bcn_vht_cap) {
+ vht_cap = (struct mwifiex_ie_types_vhtcap *)*buffer;
+ memset(vht_cap, 0, sizeof(*vht_cap));
+ vht_cap->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ vht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
+ (u8 *)bss_desc->bcn_vht_cap +
+ sizeof(struct ieee_types_header),
+ le16_to_cpu(vht_cap->header.len));
+
+ mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+ *buffer += sizeof(*vht_cap);
+ ret_len += sizeof(*vht_cap);
+ }
+
+ /* VHT Operation IE */
+ if (bss_desc->bcn_vht_oper) {
+ if (priv->bss_mode == HostCmd_BSS_MODE_IBSS) {
+ vht_op = (struct mwifiex_ie_types_vht_oper *)*buffer;
+ memset(vht_op, 0, sizeof(*vht_op));
+ vht_op->header.type =
+ cpu_to_le16(WLAN_EID_VHT_OPERATION);
+ vht_op->header.len = cpu_to_le16(sizeof(*vht_op) -
+ sizeof(struct mwifiex_ie_types_header));
+ memcpy((u8 *)vht_op +
+ sizeof(struct mwifiex_ie_types_header),
+ (u8 *)bss_desc->bcn_vht_oper +
+ sizeof(struct ieee_types_header),
+ le16_to_cpu(vht_op->header.len));
+
+ /* negotiate the channel width and central freq
+ * and keep the central freq as the peer suggests
+ */
+ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+
+ switch (supp_chwd_set) {
+ case 0:
+ vht_op->chan_width =
+ min_t(u8, IEEE80211_VHT_CHANWIDTH_80MHZ,
+ bss_desc->bcn_vht_oper->chan_width);
+ break;
+ case 1:
+ vht_op->chan_width =
+ min_t(u8, IEEE80211_VHT_CHANWIDTH_160MHZ,
+ bss_desc->bcn_vht_oper->chan_width);
+ break;
+ case 2:
+ vht_op->chan_width =
+ min_t(u8, IEEE80211_VHT_CHANWIDTH_80P80MHZ,
+ bss_desc->bcn_vht_oper->chan_width);
+ break;
+ default:
+ vht_op->chan_width =
+ IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ *buffer += sizeof(*vht_op);
+ ret_len += sizeof(*vht_op);
+ }
+ }
+
+ /* Operating Mode Notification IE */
+ if (bss_desc->oper_mode) {
+ ieee_oper_ntf = bss_desc->oper_mode;
+ oper_ntf = (void *)*buffer;
+ memset(oper_ntf, 0, sizeof(*oper_ntf));
+ oper_ntf->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF);
+ oper_ntf->header.len = cpu_to_le16(sizeof(u8));
+ oper_ntf->oper_mode = ieee_oper_ntf->oper_mode;
+ *buffer += sizeof(*oper_ntf);
+ ret_len += sizeof(*oper_ntf);
+ }
+
+ return ret_len;
+}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
new file mode 100644
index 000000000000..80fd1ba46200
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -0,0 +1,26 @@
+/*
+ * Marvell Wireless LAN device driver: 802.11ac
+ *
+ * Copyright (C) 2013, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _MWIFIEX_11AC_H_
+#define _MWIFIEX_11AC_H_
+
+int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc,
+ u8 **buffer);
+#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 245a371f1a43..45f19716687e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -53,7 +53,9 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
- sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ (priv->adapter->sec_chan_offset !=
+ IEEE80211_HT_PARAM_CHA_SEC_NONE)))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
@@ -248,7 +250,8 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
* - Setting HT Tx capability and HT Tx information fields
* - Ensuring correct endian-ness
*/
-int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
+int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_ds_11n_tx_cfg *txcfg)
{
struct host_cmd_ds_11n_cfg *htcfg = &cmd->params.htcfg;
@@ -258,6 +261,10 @@ int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
htcfg->action = cpu_to_le16(cmd_action);
htcfg->ht_tx_cap = cpu_to_le16(txcfg->tx_htcap);
htcfg->ht_tx_info = cpu_to_le16(txcfg->tx_htinfo);
+
+ if (priv->adapter->is_hw_11ac_capable)
+ htcfg->misc_config = cpu_to_le16(txcfg->misc_config);
+
return 0;
}
@@ -398,45 +405,6 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
}
/*
- * This function reconfigures the Tx buffer size in firmware.
- *
- * This function prepares a firmware command and issues it, if
- * the current Tx buffer size is different from the one requested.
- * Maximum configurable Tx buffer size is limited by the HT capability
- * field value.
- */
-void
-mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
-{
- u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- u16 tx_buf, curr_tx_buf_size = 0;
-
- if (bss_desc->bcn_ht_cap) {
- if (le16_to_cpu(bss_desc->bcn_ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU)
- max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K;
- else
- max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- }
-
- tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
-
- dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
- max_amsdu, priv->adapter->max_tx_buf_size);
-
- if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K;
- if (curr_tx_buf_size != tx_buf)
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
- HostCmd_ACT_GEN_SET, 0, &tx_buf);
-}
-
-/*
* This function checks if the given pointer is valid entry of
* Tx BA Stream table.
*/
@@ -531,11 +499,8 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
GFP_ATOMIC);
- if (!new_node) {
- dev_err(priv->adapter->dev,
- "%s: failed to alloc new_node\n", __func__);
+ if (!new_node)
return;
- }
INIT_LIST_HEAD(&new_node->list);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 46006a54a656..375db01442bf 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -28,14 +28,12 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
+int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_ds_11n_tx_cfg *txcfg);
-
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc);
void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
struct mwifiex_ie_types_htcap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 68d52cfc1ebd..af8fe6352eed 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -278,14 +278,16 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
break;
case -1:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
__func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
return 0;
case -EINPROGRESS:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
break;
case 0:
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 4a97acd170f7..5e796f847088 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -272,11 +272,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
}
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
- if (!new_node) {
- dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n",
- __func__);
+ if (!new_node)
return;
- }
INIT_LIST_HEAD(&new_node->list);
new_node->tid = tid;
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index b2e27723f801..4f614aad9ded 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -20,12 +20,12 @@ config MWIFIEX_SDIO
mwifiex_sdio.
config MWIFIEX_PCIE
- tristate "Marvell WiFi-Ex Driver for PCIE 8766"
+ tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
depends on MWIFIEX && PCI
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8766 chipset with PCIe interface.
+ 8766/8897 chipsets with PCIe interface.
If you choose to build it as a module, it will be called
mwifiex_pcie.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index dd0410d2d465..97b245cbafd8 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -23,6 +23,7 @@ mwifiex-y += util.o
mwifiex-y += txrx.o
mwifiex-y += wmm.o
mwifiex-y += 11n.o
+mwifiex-y += 11ac.o
mwifiex-y += 11n_aggr.o
mwifiex-y += 11n_rxreorder.o
mwifiex-y += scan.o
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b55badef4660..3d64613ebb29 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -121,7 +121,6 @@ info
wmm_ac_vi = <number of packets sent to device from WMM AcVi queue>
wmm_ac_be = <number of packets sent to device from WMM AcBE queue>
wmm_ac_bk = <number of packets sent to device from WMM AcBK queue>
- max_tx_buf_size = <maximum Tx buffer size>
tx_buf_size = <current Tx buffer size>
curr_tx_buf_size = <current Tx buffer size>
ps_mode = <0/1, CAM mode/PS mode>
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a875499f8945..a44023a7bd57 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -519,8 +519,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
* - Set by user
* - Set bt Country IE
*/
-static int mwifiex_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void mwifiex_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -540,8 +540,6 @@ static int mwifiex_reg_notifier(struct wiphy *wiphy,
break;
}
mwifiex_send_domain_info_cmd_fw(wiphy);
-
- return 0;
}
/*
@@ -836,6 +834,66 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return ret;
}
+static void
+mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
+ struct rate_info *rate)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (adapter->is_hw_11ac_capable) {
+ /* bit[1-0]: 00=LG 01=HT 10=VHT */
+ if (tx_htinfo & BIT(0)) {
+ /* HT */
+ rate->mcs = priv->tx_rate;
+ rate->flags |= RATE_INFO_FLAGS_MCS;
+ }
+ if (tx_htinfo & BIT(1)) {
+ /* VHT */
+ rate->mcs = priv->tx_rate & 0x0F;
+ rate->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ }
+
+ if (tx_htinfo & (BIT(1) | BIT(0))) {
+ /* HT or VHT */
+ switch (tx_htinfo & (BIT(3) | BIT(2))) {
+ case 0:
+ /* This will be 20MHz */
+ break;
+ case (BIT(2)):
+ rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ break;
+ case (BIT(3)):
+ rate->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
+ break;
+ case (BIT(3) | BIT(2)):
+ rate->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
+ break;
+ }
+
+ if (tx_htinfo & BIT(4))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if ((priv->tx_rate >> 4) == 1)
+ rate->nss = 2;
+ else
+ rate->nss = 1;
+ }
+ } else {
+ /*
+ * Bit 0 in tx_htinfo indicates that current Tx rate
+ * is 11n rate. Valid MCS index values for us are 0 to 15.
+ */
+ if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
+ rate->mcs = priv->tx_rate;
+ rate->flags |= RATE_INFO_FLAGS_MCS;
+ if (tx_htinfo & BIT(1))
+ rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ if (tx_htinfo & BIT(2))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+ }
+}
+
/*
* This function dumps the station information on a buffer.
*
@@ -875,20 +933,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
&priv->dtim_period);
- /*
- * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
- * MCS index values for us are 0 to 15.
- */
- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
- sinfo->txrate.mcs = priv->tx_rate;
- sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
- /* 40MHz rate */
- if (priv->tx_htinfo & BIT(1))
- sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- /* SGI enabled */
- if (priv->tx_htinfo & BIT(2))
- sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- }
+ mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
sinfo->signal_avg = priv->bcn_rssi_avg;
sinfo->rx_bytes = priv->stats.rx_bytes;
@@ -1297,20 +1342,22 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
/* Set appropriate bands */
if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
+ config_bands = BAND_B | BAND_G;
- if (cfg80211_get_chandef_type(&params->chandef) ==
- NL80211_CHAN_NO_HT)
- config_bands = BAND_B | BAND_G;
- else
- config_bands = BAND_B | BAND_G | BAND_GN;
+ if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
+ config_bands |= BAND_GN;
+
+ if (params->chandef.width > NL80211_CHAN_WIDTH_40)
+ config_bands |= BAND_GAC;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
+ config_bands = BAND_A;
- if (cfg80211_get_chandef_type(&params->chandef) ==
- NL80211_CHAN_NO_HT)
- config_bands = BAND_A;
- else
- config_bands = BAND_AN | BAND_A;
+ if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
+ config_bands |= BAND_AN;
+
+ if (params->chandef.width > NL80211_CHAN_WIDTH_40)
+ config_bands |= BAND_AAC;
}
if (!((config_bands | priv->adapter->fw_bands) &
@@ -1327,6 +1374,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
}
mwifiex_set_ht_params(priv, bss_cfg, params);
+ mwifiex_set_wmm_params(priv, bss_cfg, params);
if (params->inactivity_timeout > 0) {
/* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
@@ -1431,7 +1479,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
@@ -1459,7 +1507,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
- u8 is_scanning_required = 0, config_bands = 0;
+ u8 is_scanning_required = 0;
memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
@@ -1478,19 +1526,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
/* disconnect before try to associate */
mwifiex_deauthenticate(priv, NULL);
- if (channel) {
- if (mode == NL80211_IFTYPE_STATION) {
- if (channel->band == IEEE80211_BAND_2GHZ)
- config_bands = BAND_B | BAND_G | BAND_GN;
- else
- config_bands = BAND_A | BAND_AN;
-
- if (!((config_bands | priv->adapter->fw_bands) &
- ~priv->adapter->fw_bands))
- priv->adapter->config_bands = config_bands;
- }
- }
-
/* As this is new association, clear locally stored
* keys and security related flags */
priv->sec_info.wpa_enabled = false;
@@ -1707,9 +1742,9 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
if (cfg80211_get_chandef_type(&params->chandef) !=
NL80211_CHAN_NO_HT)
- config_bands |= BAND_GN;
+ config_bands |= BAND_G | BAND_GN;
} else {
- if (cfg80211_get_chandef_type(&params->chandef) !=
+ if (cfg80211_get_chandef_type(&params->chandef) ==
NL80211_CHAN_NO_HT)
config_bands = BAND_A;
else
@@ -1834,10 +1869,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
GFP_KERNEL);
- if (!priv->user_scan_cfg) {
- dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+ if (!priv->user_scan_cfg)
return -ENOMEM;
- }
priv->scan_request = request;
@@ -1895,6 +1928,79 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return 0;
}
+static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
+ struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u32 vht_cap = 0, cap = adapter->hw_dot_11ac_dev_cap;
+
+ vht_info->vht_supported = true;
+
+ switch (GET_VHTCAP_MAXMPDULEN(cap)) {
+ case 0x00:
+ vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+ break;
+ case 0x01:
+ vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ break;
+ case 0x10:
+ vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ break;
+ default:
+ dev_err(adapter->dev, "unsupported MAX MPDU len\n");
+ break;
+ }
+
+ if (ISSUPP_11ACVHTHTCVHT(cap))
+ vht_cap |= IEEE80211_VHT_CAP_HTC_VHT;
+
+ if (ISSUPP_11ACVHTTXOPPS(cap))
+ vht_cap |= IEEE80211_VHT_CAP_VHT_TXOP_PS;
+
+ if (ISSUPP_11ACMURXBEAMFORMEE(cap))
+ vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
+
+ if (ISSUPP_11ACMUTXBEAMFORMEE(cap))
+ vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if (ISSUPP_11ACSUBEAMFORMER(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+
+ if (ISSUPP_11ACSUBEAMFORMEE(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+
+ if (ISSUPP_11ACRXSTBC(cap))
+ vht_cap |= IEEE80211_VHT_CAP_RXSTBC_1;
+
+ if (ISSUPP_11ACTXSTBC(cap))
+ vht_cap |= IEEE80211_VHT_CAP_TXSTBC;
+
+ if (ISSUPP_11ACSGI160(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ if (ISSUPP_11ACSGI80(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ if (ISSUPP_11ACLDPC(cap))
+ vht_cap |= IEEE80211_VHT_CAP_RXLDPC;
+
+ if (ISSUPP_11ACBW8080(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+
+ if (ISSUPP_11ACBW160(cap))
+ vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+
+ vht_info->cap = vht_cap;
+
+ /* Update MCS support for VHT */
+ vht_info->vht_mcs.rx_mcs_map = cpu_to_le16(
+ adapter->hw_dot_11ac_mcs_support & 0xFFFF);
+ vht_info->vht_mcs.rx_highest = 0;
+ vht_info->vht_mcs.tx_mcs_map = cpu_to_le16(
+ adapter->hw_dot_11ac_mcs_support >> 16);
+ vht_info->vht_mcs.tx_highest = 0;
+}
+
/*
* This function sets up the CFG802.11 specific HT capability fields
* with default values.
@@ -2108,16 +2214,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->netdev = dev;
mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+ if (adapter->is_hw_11ac_capable)
+ mwifiex_setup_vht_caps(
+ &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv);
if (adapter->config_bands & BAND_A)
mwifiex_setup_ht_caps(
&wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+ if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
+ mwifiex_setup_vht_caps(
+ &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
+
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = priv->wdev;
dev->ieee80211_ptr->iftype = priv->bss_mode;
memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
- memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
@@ -2261,6 +2373,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f69300f93f42..988552dece75 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -106,8 +106,8 @@ u8 *mwifiex_11d_code_2_region(u8 code)
* This function maps an index in supported rates table into
* the corresponding data rate.
*/
-u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
- u8 ht_info)
+u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
+ u8 index, u8 ht_info)
{
/*
* For every mcs_rate line, the first 8 bytes are for stream 1x1,
@@ -130,10 +130,155 @@ u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
{ 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
};
+ /* AC rates */
+ u16 ac_mcs_rate_nss1[8][10] = {
+ /* LG 160M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 160M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 80M */
+ { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+ 0x249, 0x2BE, 0x30C },
+
+ /* SG 80M */
+ { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+ 0x28A, 0x30C, 0x363 },
+
+ /* LG 40M */
+ { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+ 0x10E, 0x144, 0x168 },
+
+ /* SG 40M */
+ { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+ 0x12C, 0x168, 0x190 },
+
+ /* LG 20M */
+ { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+ /* SG 20M */
+ { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+ };
+ /* NSS2 note: the value in the table is 2 multiplier of the actual
+ * rate
+ */
+ u16 ac_mcs_rate_nss2[8][10] = {
+ /* LG 160M */
+ { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+ 0x924, 0xAF8, 0xC30 },
+
+ /* SG 160M */
+ { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+ 0xA28, 0xC30, 0xD8B },
+
+ /* LG 80M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 80M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 40M */
+ { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+ 0x21C, 0x288, 0x2D0 },
+
+ /* SG 40M */
+ { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+ 0x258, 0x2D0, 0x320 },
+
+ /* LG 20M */
+ { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+ 0x138, 0x00 },
+
+ /* SG 20M */
+ { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+ 0x15B, 0x00 },
+ };
+ u32 rate = 0;
+ u8 mcs_index = 0;
+ u8 bw = 0;
+ u8 gi = 0;
+
+ if ((ht_info & 0x3) == MWIFIEX_RATE_FORMAT_VHT) {
+ mcs_index = min(index & 0xF, 9);
+
+ /* 20M: bw=0, 40M: bw=1, 80M: bw=2, 160M: bw=3 */
+ bw = (ht_info & 0xC) >> 2;
+
+ /* LGI: gi =0, SGI: gi = 1 */
+ gi = (ht_info & 0x10) >> 4;
+
+ if ((index >> 4) == 1) /* NSS = 2 */
+ rate = ac_mcs_rate_nss2[2 * (3 - bw) + gi][mcs_index];
+ else /* NSS = 1 */
+ rate = ac_mcs_rate_nss1[2 * (3 - bw) + gi][mcs_index];
+ } else if ((ht_info & 0x3) == MWIFIEX_RATE_FORMAT_HT) {
+ /* 20M: bw=0, 40M: bw=1 */
+ bw = (ht_info & 0xC) >> 2;
+
+ /* LGI: gi =0, SGI: gi = 1 */
+ gi = (ht_info & 0x10) >> 4;
+
+ if (index == MWIFIEX_RATE_BITMAP_MCS0) {
+ if (gi == 1)
+ rate = 0x0D; /* MCS 32 SGI rate */
+ else
+ rate = 0x0C; /* MCS 32 LGI rate */
+ } else if (index < 16) {
+ if ((bw == 1) || (bw == 0))
+ rate = mcs_rate[2 * (1 - bw) + gi][index];
+ else
+ rate = mwifiex_data_rates[0];
+ } else {
+ rate = mwifiex_data_rates[0];
+ }
+ } else {
+ /* 11n non-HT rates */
+ if (index >= MWIFIEX_SUPPORTED_RATES_EXT)
+ index = 0;
+ rate = mwifiex_data_rates[index];
+ }
+
+ return rate;
+}
+
+/* This function maps an index in supported rates table into
+ * the corresponding data rate.
+ */
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
+ u8 index, u8 ht_info)
+{
+ /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+ u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+ };
u32 mcs_num_supp =
(priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
+ if (priv->adapter->is_hw_11ac_capable)
+ return mwifiex_index_to_acs_data_rate(priv, index, ht_info);
+
if (ht_info & BIT(0)) {
if (index == MWIFIEX_RATE_BITMAP_MCS0) {
if (ht_info & BIT(2))
@@ -269,6 +414,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
{
u32 k = 0;
struct mwifiex_adapter *adapter = priv->adapter;
+
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
switch (adapter->config_bands) {
case BAND_B:
@@ -279,6 +425,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
break;
case BAND_G:
case BAND_G | BAND_GN:
+ case BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_g\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -288,7 +435,11 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B | BAND_G:
case BAND_A | BAND_B:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
+ case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
+ case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
+ BAND_AAC | BAND_GAC:
case BAND_B | BAND_G | BAND_GN:
+ case BAND_B | BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_bg\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -301,14 +452,18 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
+ case BAND_AN:
case BAND_A | BAND_AN:
+ case BAND_A | BAND_AN | BAND_AAC:
case BAND_A | BAND_G | BAND_AN | BAND_GN:
+ case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_a\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
case BAND_GN:
+ case BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_n\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 5f438e6c2155..20a6c5555873 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -24,6 +24,7 @@
#include "main.h"
#include "wmm.h"
#include "11n.h"
+#include "11ac.h"
/*
* This function initializes a command node.
@@ -334,20 +335,15 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_array;
- u32 buf_size;
u32 i;
/* Allocate and initialize struct cmd_ctrl_node */
- buf_size = sizeof(struct cmd_ctrl_node) * MWIFIEX_NUM_OF_CMD_BUFFER;
- cmd_array = kzalloc(buf_size, GFP_KERNEL);
- if (!cmd_array) {
- dev_err(adapter->dev, "%s: failed to alloc cmd_array\n",
- __func__);
+ cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER,
+ sizeof(struct cmd_ctrl_node), GFP_KERNEL);
+ if (!cmd_array)
return -ENOMEM;
- }
adapter->cmd_pool = cmd_array;
- memset(adapter->cmd_pool, 0, buf_size);
/* Allocate and initialize command buffers */
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
@@ -1470,6 +1466,24 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
+ if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
+ adapter->is_hw_11ac_capable = true;
+
+ /* Copy 11AC cap */
+ adapter->hw_dot_11ac_dev_cap =
+ le32_to_cpu(hw_spec->dot_11ac_dev_cap);
+ adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
+ adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+
+ /* Copy 11AC mcs */
+ adapter->hw_dot_11ac_mcs_support =
+ le32_to_cpu(hw_spec->dot_11ac_mcs_support);
+ adapter->usr_dot_11ac_mcs_support =
+ adapter->hw_dot_11ac_mcs_support;
+ } else {
+ adapter->is_hw_11ac_capable = false;
+ }
+
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 46e34aa65d1c..753b5682d53f 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -58,8 +58,6 @@ static struct mwifiex_debug_data items[] = {
item_addr(packets_out[WMM_AC_BE]), 1},
{"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
item_addr(packets_out[WMM_AC_BK]), 1},
- {"max_tx_buf_size", item_size(max_tx_buf_size),
- item_addr(max_tx_buf_size), 1},
{"tx_buf_size", item_size(tx_buf_size),
item_addr(tx_buf_size), 1},
{"curr_tx_buf_size", item_size(curr_tx_buf_size),
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e9357d87d327..e8a569aaa2e8 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/ieee80211.h>
+#include <net/mac80211.h>
#define MWIFIEX_MAX_BSS_NUM (3)
@@ -58,6 +59,8 @@
#define MWIFIEX_RTS_MAX_VALUE (2347)
#define MWIFIEX_FRAG_MIN_VALUE (256)
#define MWIFIEX_FRAG_MAX_VALUE (2346)
+#define MWIFIEX_WMM_VERSION 0x01
+#define MWIFIEX_WMM_SUBTYPE 0x01
#define MWIFIEX_RETRY_LIMIT 14
#define MWIFIEX_SDIO_BLOCK_SIZE 256
@@ -126,4 +129,19 @@ enum mwifiex_wmm_ac_e {
WMM_AC_VI,
WMM_AC_VO
} __packed;
+
+struct ieee_types_wmm_ac_parameters {
+ u8 aci_aifsn_bitmap;
+ u8 ecw_bitmap;
+ __le16 tx_op_limit;
+} __packed;
+
+struct mwifiex_types_wmm_info {
+ u8 oui[4];
+ u8 subtype;
+ u8 version;
+ u8 qos_info;
+ u8 reserved;
+ struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 4dc8e2e9a889..25acb0682c56 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -49,13 +49,23 @@ struct tx_packet_hdr {
#define A_SUPPORTED_RATES 9
#define HOSTCMD_SUPPORTED_RATES 14
#define N_SUPPORTED_RATES 3
-#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN)
+#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
+ BAND_AN | BAND_GAC | BAND_AAC)
-#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11))
+#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
+ BIT(12) | BIT(13))
#define IS_SUPPORT_MULTI_BANDS(adapter) \
(adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
+
+/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
+ * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
+ * bit 14 for AAC, in order to be compatible with the band capability
+ * defined in the driver after right shift of 8 bits.
+ */
#define GET_FW_DEFAULT_BANDS(adapter) \
- ((adapter->fw_cap_info >> 8) & ALL_802_11_BANDS)
+ (((((adapter->fw_cap_info & 0x3000) << 1) | \
+ (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+ ALL_802_11_BANDS)
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -216,6 +226,47 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define LLC_SNAP_LEN 8
+/* HW_SPEC fw_cap_info */
+
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
+
+#define GET_VHTCAP_MAXMPDULEN(vht_cap_info) (vht_cap_info & 0x3)
+#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
+#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
+#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
+ (2 * (nss - 1)))
+#define NO_NSS_SUPPORT 0x3
+
+/* HW_SPEC: HTC-VHT supported */
+#define ISSUPP_11ACVHTHTCVHT(Dot11acDevCap) (Dot11acDevCap & BIT(22))
+/* HW_SPEC: VHT TXOP PS support */
+#define ISSUPP_11ACVHTTXOPPS(Dot11acDevCap) (Dot11acDevCap & BIT(21))
+/* HW_SPEC: MU RX beamformee support */
+#define ISSUPP_11ACMURXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(20))
+/* HW_SPEC: MU TX beamformee support */
+#define ISSUPP_11ACMUTXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(19))
+/* HW_SPEC: SU Beamformee support */
+#define ISSUPP_11ACSUBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(10))
+/* HW_SPEC: SU Beamformer support */
+#define ISSUPP_11ACSUBEAMFORMER(Dot11acDevCap) (Dot11acDevCap & BIT(9))
+/* HW_SPEC: Rx STBC support */
+#define ISSUPP_11ACRXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(8))
+/* HW_SPEC: Tx STBC support */
+#define ISSUPP_11ACTXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(7))
+/* HW_SPEC: Short GI support for 160MHz BW */
+#define ISSUPP_11ACSGI160(Dot11acDevCap) (Dot11acDevCap & BIT(6))
+/* HW_SPEC: Short GI support for 80MHz BW */
+#define ISSUPP_11ACSGI80(Dot11acDevCap) (Dot11acDevCap & BIT(5))
+/* HW_SPEC: LDPC coding support */
+#define ISSUPP_11ACLDPC(Dot11acDevCap) (Dot11acDevCap & BIT(4))
+/* HW_SPEC: Channel BW 20/40/80/160/80+80 MHz support */
+#define ISSUPP_11ACBW8080(Dot11acDevCap) (Dot11acDevCap & BIT(3))
+/* HW_SPEC: Channel BW 20/40/80/160 MHz support */
+#define ISSUPP_11ACBW160(Dot11acDevCap) (Dot11acDevCap & BIT(2))
+
+#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
+#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
+
#define MOD_CLASS_HR_DSSS 0x03
#define MOD_CLASS_OFDM 0x07
#define MOD_CLASS_HT 0x08
@@ -330,6 +381,9 @@ enum P2P_MODES {
#define HOST_SLEEP_CFG_GPIO_DEF 0xff
#define HOST_SLEEP_CFG_GAP_DEF 0
+#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc
+#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2
+
#define CMD_F_HOSTCMD (1 << 0)
#define CMD_F_CANCELED (1 << 1)
@@ -452,9 +506,22 @@ struct rxpd {
u8 rx_rate;
s8 snr;
s8 nf;
- /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
+
+ /* For: Non-802.11 AC cards
+ *
+ * Ht Info [Bit 0] RxRate format: LG=0, HT=1
* [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
- * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
+ * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1
+ *
+ * For: 802.11 AC cards
+ * [Bit 1] [Bit 0] RxRate format: legacy rate = 00 HT = 01 VHT = 10
+ * [Bit 3] [Bit 2] HT/VHT Bandwidth BW20 = 00 BW40 = 01
+ * BW80 = 10 BW160 = 11
+ * [Bit 4] HT/VHT Guard interval LGI = 0 SGI = 1
+ * [Bit 5] STBC support Enabled = 1
+ * [Bit 6] LDPC support Enabled = 1
+ * [Bit 7] Reserved
+ */
u8 ht_info;
u8 reserved;
} __packed;
@@ -677,7 +744,11 @@ struct host_cmd_ds_get_hw_spec {
__le32 dot_11n_dev_cap;
u8 dev_mcs_support;
__le16 mp_end_port; /* SDIO only, reserved for other interfacces */
- __le16 reserved_4;
+ __le16 mgmt_buf_count; /* mgmt IE buffer count */
+ __le32 reserved_5;
+ __le32 reserved_6;
+ __le32 dot_11ac_dev_cap;
+ __le32 dot_11ac_mcs_support;
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -783,6 +854,12 @@ union ieee_types_phy_param_set {
struct ieee_types_ds_param_set ds_param_set;
} __packed;
+struct ieee_types_oper_mode_ntf {
+ u8 element_id;
+ u8 len;
+ u8 oper_mode;
+} __packed;
+
struct host_cmd_ds_802_11_ad_hoc_start {
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 bss_mode;
@@ -843,11 +920,27 @@ struct host_cmd_ds_802_11_get_log {
__le32 wep_icv_err_cnt[4];
};
+/* Enumeration for rate format */
+enum _mwifiex_rate_format {
+ MWIFIEX_RATE_FORMAT_LG = 0,
+ MWIFIEX_RATE_FORMAT_HT,
+ MWIFIEX_RATE_FORMAT_VHT,
+ MWIFIEX_RATE_FORMAT_AUTO = 0xFF,
+};
+
struct host_cmd_ds_tx_rate_query {
u8 tx_rate;
- /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
+ /* Tx Rate Info: For 802.11 AC cards
+ *
+ * [Bit 0-1] tx rate formate: LG = 0, HT = 1, VHT = 2
+ * [Bit 2-3] HT/VHT Bandwidth: BW20 = 0, BW40 = 1, BW80 = 2, BW160 = 3
+ * [Bit 4] HT/VHT Guard Interval: LGI = 0, SGI = 1
+ *
+ * For non-802.11 AC cards
+ * Ht Info [Bit 0] RxRate format: LG=0, HT=1
* [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
- * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
+ * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1
+ */
u8 ht_info;
} __packed;
@@ -1093,6 +1186,7 @@ struct host_cmd_ds_11n_cfg {
__le16 action;
__le16 ht_tx_cap;
__le16 ht_tx_info;
+ __le16 misc_config; /* Needed for 802.11AC cards only */
} __packed;
struct host_cmd_ds_txbuf_cfg {
@@ -1131,12 +1225,6 @@ struct ieee_types_vendor_header {
u8 version;
} __packed;
-struct ieee_types_wmm_ac_parameters {
- u8 aci_aifsn_bitmap;
- u8 ecw_bitmap;
- __le16 tx_op_limit;
-} __packed;
-
struct ieee_types_wmm_parameter {
/*
* WMM Parameter IE - Vendor Specific Header:
@@ -1186,6 +1274,31 @@ struct mwifiex_ie_types_htcap {
struct ieee80211_ht_cap ht_cap;
} __packed;
+struct mwifiex_ie_types_vhtcap {
+ struct mwifiex_ie_types_header header;
+ struct ieee80211_vht_cap vht_cap;
+} __packed;
+
+struct mwifiex_ie_types_oper_mode_ntf {
+ struct mwifiex_ie_types_header header;
+ u8 oper_mode;
+} __packed;
+
+/* VHT Operations IE */
+struct mwifiex_ie_types_vht_oper {
+ struct mwifiex_ie_types_header header;
+ u8 chan_width;
+ u8 chan_center_freq_1;
+ u8 chan_center_freq_2;
+ /* Basic MCS set map, each 2 bits stands for a NSS */
+ u16 basic_mcs_map;
+} __packed;
+
+struct mwifiex_ie_types_wmmcap {
+ struct mwifiex_ie_types_header header;
+ struct mwifiex_types_wmm_info wmm_info;
+} __packed;
+
struct mwifiex_ie_types_htinfo {
struct mwifiex_ie_types_header header;
struct ieee80211_ht_operation ht_oper;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 39f03ce5a5b1..e38aa9b3663d 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -39,11 +39,8 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
unsigned long flags;
bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
- if (!bss_prio) {
- dev_err(adapter->dev, "%s: failed to alloc bss_prio\n",
- __func__);
+ if (!bss_prio)
return -ENOMEM;
- }
bss_prio->priv = priv;
INIT_LIST_HEAD(&bss_prio->list);
@@ -317,7 +314,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->pm_wakeup_fw_try = false;
- adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -591,6 +587,12 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
return -1;
}
}
+
+ if (adapter->if_ops.init_fw_port) {
+ if (adapter->if_ops.init_fw_port(adapter))
+ return -1;
+ }
+
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta);
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 4e31c6013ebe..d85e6eb1f58a 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -20,7 +20,6 @@
#ifndef _MWIFIEX_IOCTL_H_
#define _MWIFIEX_IOCTL_H_
-#include <net/mac80211.h>
#include <net/lib80211.h>
enum {
@@ -61,6 +60,8 @@ enum {
BAND_A = 4,
BAND_GN = 8,
BAND_AN = 16,
+ BAND_GAC = 32,
+ BAND_AAC = 64,
};
#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -104,9 +105,12 @@ struct mwifiex_uap_bss_param {
struct wpa_param wpa_cfg;
struct wep_key wep_cfg[NUM_WEP_KEYS];
struct ieee80211_ht_cap ht_cap;
+ struct ieee80211_vht_cap vht_cap;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 sta_ao_timer;
u32 ps_sta_ao_timer;
+ u8 qos_info;
+ struct mwifiex_types_wmm_info wmm_info;
};
enum {
@@ -177,7 +181,6 @@ struct mwifiex_ds_tx_ba_stream_tbl {
struct mwifiex_debug_info {
u32 int_counter;
u32 packets_out[MAX_NUM_TID];
- u32 max_tx_buf_size;
u32 tx_buf_size;
u32 curr_tx_buf_size;
u32 tx_tbl_num;
@@ -272,6 +275,7 @@ struct mwifiex_ds_pm_cfg {
struct mwifiex_ds_11n_tx_cfg {
u16 tx_htcap;
u16 tx_htinfo;
+ u16 misc_config; /* Needed for 802.11AC cards only */
};
struct mwifiex_ds_11n_amsdu_aggr_ctrl {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 88664ae667ba..246aa62a4817 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -24,6 +24,7 @@
#include "main.h"
#include "wmm.h"
#include "11n.h"
+#include "11ac.h"
#define CAPINFO_MASK (~(BIT(15) | BIT(14) | BIT(12) | BIT(11) | BIT(9)))
@@ -157,8 +158,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
memset(rate1, 0, rate1_size);
- for (i = 0; rate2[i] && i < rate2_size; i++) {
- for (j = 0; tmp[j] && j < rate1_size; j++) {
+ for (i = 0; i < rate2_size && rate2[i]; i++) {
+ for (j = 0; j < rate1_size && tmp[j]; j++) {
/* Check common rate, excluding the bit for
basic rate */
if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
@@ -398,8 +399,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
pos = (u8 *) assoc;
- mwifiex_cfg_tx_buf(priv, bss_desc);
-
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
/* Save so we know which BSS Desc to use in the response handler */
@@ -514,6 +513,12 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
priv->adapter->config_bands & BAND_AN))
mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);
+ if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
+ !bss_desc->disable_11n && !bss_desc->disable_11ac &&
+ (priv->adapter->config_bands & BAND_GAC ||
+ priv->adapter->config_bands & BAND_AAC))
+ mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
+
/* Append vendor specific IE TLV */
mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos);
@@ -615,23 +620,33 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
u8 enable_data = true;
+ u16 cap_info, status_code;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
+ cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
+ status_code = le16_to_cpu(assoc_rsp->status_code);
+
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
- if (le16_to_cpu(assoc_rsp->status_code)) {
+ if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
dev_err(priv->adapter->dev,
"ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
- le16_to_cpu(assoc_rsp->status_code),
- le16_to_cpu(assoc_rsp->cap_info_bitmap),
- le16_to_cpu(assoc_rsp->a_id));
+ status_code, cap_info, le16_to_cpu(assoc_rsp->a_id));
+
+ if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) {
+ if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT)
+ ret = WLAN_STATUS_AUTH_TIMEOUT;
+ else
+ ret = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ } else {
+ ret = status_code;
+ }
- ret = le16_to_cpu(assoc_rsp->status_code);
goto done;
}
@@ -969,6 +984,16 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
priv->adapter->config_bands);
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_NONE) {
+ u16 tmp_ht_cap;
+
+ tmp_ht_cap = le16_to_cpu(ht_cap->ht_cap.cap_info);
+ tmp_ht_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ tmp_ht_cap &= ~IEEE80211_HT_CAP_SGI_40;
+ ht_cap->ht_cap.cap_info = cpu_to_le16(tmp_ht_cap);
+ }
+
pos += sizeof(struct mwifiex_ie_types_htcap);
cmd_append_size += sizeof(struct mwifiex_ie_types_htcap);
@@ -1403,6 +1428,7 @@ mwifiex_band_to_radio_type(u8 band)
case BAND_A:
case BAND_AN:
case BAND_A | BAND_AN:
+ case BAND_A | BAND_AN | BAND_AAC:
return HostCmd_SCAN_RADIO_TYPE_A;
case BAND_B:
case BAND_G:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 1b3cfc821940..553adfb0aa81 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -295,6 +295,13 @@ struct mwifiex_bssdescriptor {
u16 bss_co_2040_offset;
u8 *bcn_ext_cap;
u16 ext_cap_offset;
+ struct ieee80211_vht_cap *bcn_vht_cap;
+ u16 vht_cap_offset;
+ struct ieee80211_vht_operation *bcn_vht_oper;
+ u16 vht_info_offset;
+ struct ieee_types_oper_mode_ntf *oper_mode;
+ u16 oper_mode_offset;
+ u8 disable_11ac;
struct ieee_types_vendor_specific *bcn_wpa_ie;
u16 wpa_offset;
struct ieee_types_generic *bcn_rsn_ie;
@@ -499,6 +506,7 @@ struct mwifiex_private {
u16 rsn_idx;
struct timer_list scan_delay_timer;
u8 ap_11n_enabled;
+ u8 ap_11ac_enabled;
u32 mgmt_frame_mask;
struct mwifiex_roc_cfg roc_cfg;
};
@@ -599,8 +607,10 @@ struct mwifiex_if_ops {
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
+ int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
+ int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
};
struct mwifiex_adapter {
@@ -629,7 +639,6 @@ struct mwifiex_adapter {
/* spin lock for main process */
spinlock_t main_proc_lock;
u32 mwifiex_processing;
- u16 max_tx_buf_size;
u16 tx_buf_size;
u16 curr_tx_buf_size;
u32 ioport;
@@ -721,6 +730,15 @@ struct mwifiex_adapter {
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
u8 empty_tx_q_cnt;
+
+ /* 11AC */
+ u32 is_hw_11ac_capable;
+ u32 hw_dot_11ac_dev_cap;
+ u32 hw_dot_11ac_mcs_support;
+ u32 usr_dot_11ac_dev_cap_bg;
+ u32 usr_dot_11ac_dev_cap_a;
+ u32 usr_dot_11ac_mcs_support;
+
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
};
@@ -863,8 +881,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd);
struct mwifiex_chan_freq_power *mwifiex_get_cfp(struct mwifiex_private *priv,
u8 band, u16 channel, u32 freq);
-u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
- u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
+ u8 index, u8 ht_info);
+u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
+ u8 index, u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u8 **buffer);
@@ -890,6 +910,10 @@ void mwifiex_set_ht_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params);
void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params);
+void
+mwifiex_set_wmm_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 13fbc4eb1595..4b54bcf382f3 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,17 +39,20 @@ static struct semaphore add_remove_card_sem;
static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter);
static int mwifiex_pcie_resume(struct pci_dev *pdev);
-/*
- * This function is called after skb allocation to update
- * "skb->cb" with physical address of data pointer.
- */
-static phys_addr_t *mwifiex_update_sk_buff_pa(struct sk_buff *skb)
+static int
+mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
+ int size, int flags)
{
- phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb);
-
- *buf_pa = (phys_addr_t)virt_to_phys(skb->data);
+ struct pcie_service_card *card = adapter->card;
+ dma_addr_t buf_pa;
- return buf_pa;
+ buf_pa = pci_map_single(card->dev, skb->data, size, flags);
+ if (pci_dma_mapping_error(card->dev, buf_pa)) {
+ dev_err(adapter->dev, "failed to map pci memory!\n");
+ return -1;
+ }
+ memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+ return 0;
}
/*
@@ -59,9 +62,13 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
{
u32 *cookie_addr;
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (!reg->sleep_cookie)
+ return true;
- if (card->sleep_cookie) {
- cookie_addr = (u32 *)card->sleep_cookie->data;
+ if (card->sleep_cookie_vbase) {
+ cookie_addr = (u32 *)card->sleep_cookie_vbase;
dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
*cookie_addr);
if (*cookie_addr == FW_AWAKE_COOKIE)
@@ -91,6 +98,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->dev = pdev;
+ if (ent->driver_data) {
+ struct mwifiex_pcie_device *data = (void *)ent->driver_data;
+ card->pcie.firmware = data->firmware;
+ card->pcie.reg = data->reg;
+ card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+ }
+
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
MWIFIEX_PCIE)) {
pr_err("%s failed\n", __func__);
@@ -161,7 +175,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
- if (!card || card->adapter) {
+ if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
}
@@ -227,13 +241,16 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
return 0;
}
-#define PCIE_VENDOR_ID_MARVELL (0x11ab)
-#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
-
static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long) &mwifiex_pcie8766,
+ },
+ {
+ PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long) &mwifiex_pcie8897,
},
{},
};
@@ -286,8 +303,10 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
int i = 0;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- while (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ while (reg->sleep_cookie && mwifiex_pcie_ok_to_access_hw(adapter)) {
i++;
usleep_range(10, 20);
/* 50ms max wait */
@@ -361,14 +380,246 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
}
/*
- * This function creates buffer descriptor ring for TX
+ * This function initializes TX buffer ring descriptors
*/
-static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ card->tx_buf_list[i] = NULL;
+ if (reg->pfu_enabled) {
+ card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+ (sizeof(*desc2) * i);
+ desc2 = card->txbd_ring[i];
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+ (sizeof(*desc) * i);
+ desc = card->txbd_ring[i];
+ memset(desc, 0, sizeof(*desc));
+ }
+ }
+
+ return 0;
+}
+
+/* This function initializes RX buffer ring descriptors. Each SKB is allocated
+ * here and after mapping PCI memory, its physical address is assigned to
+ * PCIE Rx buffer descriptor's physical address.
+ */
+static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct sk_buff *skb;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ dma_addr_t buf_pa;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev,
+ "Unable to allocate skb for RX ring.\n");
+ kfree(card->rxbd_ring_vbase);
+ return -ENOMEM;
+ }
+
+ if (mwifiex_map_pci_memory(adapter, skb,
+ MWIFIEX_RX_DATA_BUF_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+ dev_dbg(adapter->dev,
+ "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
+
+ card->rx_buf_list[i] = skb;
+ if (reg->pfu_enabled) {
+ card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase +
+ (sizeof(*desc2) * i);
+ desc2 = card->rxbd_ring[i];
+ desc2->paddr = buf_pa;
+ desc2->len = (u16)skb->len;
+ desc2->frag_len = (u16)skb->len;
+ desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop;
+ desc2->offset = 0;
+ } else {
+ card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase +
+ (sizeof(*desc) * i));
+ desc = card->rxbd_ring[i];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This function initializes event buffer ring descriptors. Each SKB is
+ * allocated here and after mapping PCI memory, its physical address is assigned
+ * to PCIE Rx buffer descriptor's physical address
+ */
+static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_evt_buf_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t buf_pa;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MAX_EVENT_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev,
+ "Unable to allocate skb for EVENT buf.\n");
+ kfree(card->evtbd_ring_vbase);
+ return -ENOMEM;
+ }
+ skb_put(skb, MAX_EVENT_SIZE);
+
+ if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+ dev_dbg(adapter->dev,
+ "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
+
+ card->evt_buf_list[i] = skb;
+ card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
+ (sizeof(*desc) * i));
+ desc = card->evtbd_ring[i];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
+ }
+
+ return 0;
+}
+
+/* This function cleans up TX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct sk_buff *skb;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (reg->pfu_enabled) {
+ desc2 = card->txbd_ring[i];
+ if (card->tx_buf_list[i]) {
+ skb = card->tx_buf_list[i];
+ pci_unmap_single(card->dev, desc2->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->txbd_ring[i];
+ if (card->tx_buf_list[i]) {
+ skb = card->tx_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc, 0, sizeof(*desc));
+ }
+ card->tx_buf_list[i] = NULL;
+ }
+
+ return;
+}
+
+/* This function cleans up RX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
struct sk_buff *skb;
int i;
- phys_addr_t *buf_pa;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (reg->pfu_enabled) {
+ desc2 = card->rxbd_ring[i];
+ if (card->rx_buf_list[i]) {
+ skb = card->rx_buf_list[i];
+ pci_unmap_single(card->dev, desc2->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->rxbd_ring[i];
+ if (card->rx_buf_list[i]) {
+ skb = card->rx_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc, 0, sizeof(*desc));
+ }
+ card->rx_buf_list[i] = NULL;
+ }
+
+ return;
+}
+
+/* This function cleans up event buffer rings. If any of the buffer list has
+ * valid SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_evt_buf_desc *desc;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ desc = card->evtbd_ring[i];
+ if (card->evt_buf_list[i]) {
+ skb = card->evt_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ card->evt_buf_list[i] = NULL;
+ memset(desc, 0, sizeof(*desc));
+ }
+
+ return;
+}
+
+/* This function creates buffer descriptor ring for TX
+ */
+static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/*
* driver maintaines the write pointer and firmware maintaines the read
@@ -376,76 +627,56 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
* starts at zero with rollover bit set
*/
card->txbd_wrptr = 0;
- card->txbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+ if (reg->pfu_enabled)
+ card->txbd_rdptr = 0;
+ else
+ card->txbd_rdptr |= reg->tx_rollover_ind;
/* allocate shared memory for the BD ring and divide the same in to
several descriptors */
- card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
+ if (reg->pfu_enabled)
+ card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ else
+ card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+
dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
- card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
+ card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
+ card->txbd_ring_size,
+ &card->txbd_ring_pbase);
if (!card->txbd_ring_vbase) {
- dev_err(adapter->dev, "Unable to alloc buffer for txbd ring\n");
+ dev_err(adapter->dev,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->txbd_ring_size);
return -ENOMEM;
}
- card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
-
dev_dbg(adapter->dev,
"info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
- card->txbd_ring_vbase, (u32)card->txbd_ring_pbase,
+ card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
(u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->txbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- /* Allocate buffer here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
- if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for TX ring.\n");
- kfree(card->txbd_ring_vbase);
- return -ENOMEM;
- }
- buf_pa = mwifiex_update_sk_buff_pa(skb);
-
- skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
- dev_dbg(adapter->dev, "info: TX ring: add new skb base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
- skb, skb->data, (u32)*buf_pa,
- (u32)(((u64)*buf_pa >> 32)), skb->len);
-
- card->tx_buf_list[i] = skb;
- card->txbd_ring[i]->paddr = *buf_pa;
- card->txbd_ring[i]->len = (u16)skb->len;
- card->txbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_init_txq_ring(adapter);
}
static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- if (card->tx_buf_list[i])
- dev_kfree_skb_any(card->tx_buf_list[i]);
- card->tx_buf_list[i] = NULL;
- card->txbd_ring[i]->paddr = 0;
- card->txbd_ring[i]->len = 0;
- card->txbd_ring[i]->flags = 0;
- card->txbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_txq_ring(adapter);
- kfree(card->txbd_ring_vbase);
+ if (card->txbd_ring_vbase)
+ pci_free_consistent(card->dev, card->txbd_ring_size,
+ card->txbd_ring_vbase,
+ card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
- card->txbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->txbd_rdptr = 0 | reg->tx_rollover_ind;
card->txbd_ring_vbase = NULL;
+ card->txbd_ring_pbase = 0;
return 0;
}
@@ -456,9 +687,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
- phys_addr_t *buf_pa;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/*
* driver maintaines the read pointer and firmware maintaines the write
@@ -466,19 +695,26 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
* starts at zero with rollover bit set
*/
card->rxbd_wrptr = 0;
- card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->rxbd_rdptr = reg->rx_rollover_ind;
+
+ if (reg->pfu_enabled)
+ card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ else
+ card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
- card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
- card->rxbd_ring_vbase = kzalloc(card->rxbd_ring_size, GFP_KERNEL);
+ card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
+ card->rxbd_ring_size,
+ &card->rxbd_ring_pbase);
if (!card->rxbd_ring_vbase) {
- dev_err(adapter->dev, "Unable to allocate buffer for "
- "rxbd_ring.\n");
+ dev_err(adapter->dev,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->rxbd_ring_size);
return -ENOMEM;
}
- card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
dev_dbg(adapter->dev,
"info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
@@ -486,35 +722,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->rxbd_ring_pbase >> 32),
card->rxbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->rxbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- /* Allocate skb here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for RX ring.\n");
- kfree(card->rxbd_ring_vbase);
- return -ENOMEM;
- }
- buf_pa = mwifiex_update_sk_buff_pa(skb);
- skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
-
- dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
- skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
- skb->len);
-
- card->rx_buf_list[i] = skb;
- card->rxbd_ring[i]->paddr = *buf_pa;
- card->rxbd_ring[i]->len = (u16)skb->len;
- card->rxbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_init_rxq_ring(adapter);
}
/*
@@ -523,23 +731,19 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- if (card->rx_buf_list[i])
- dev_kfree_skb_any(card->rx_buf_list[i]);
- card->rx_buf_list[i] = NULL;
- card->rxbd_ring[i]->paddr = 0;
- card->rxbd_ring[i]->len = 0;
- card->rxbd_ring[i]->flags = 0;
- card->rxbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_rxq_ring(adapter);
- kfree(card->rxbd_ring_vbase);
+ if (card->rxbd_ring_vbase)
+ pci_free_consistent(card->dev, card->rxbd_ring_size,
+ card->rxbd_ring_vbase,
+ card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
- card->rxbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
card->rxbd_ring_vbase = NULL;
+ card->rxbd_ring_pbase = 0;
return 0;
}
@@ -550,9 +754,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
- phys_addr_t *buf_pa;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/*
* driver maintaines the read pointer and firmware maintaines the write
@@ -560,19 +762,22 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
* starts at zero with rollover bit set
*/
card->evtbd_wrptr = 0;
- card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->evtbd_rdptr = reg->evt_rollover_ind;
+
+ card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
+ MWIFIEX_MAX_EVT_BD;
- card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_EVT_BD;
dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
- card->evtbd_ring_vbase = kzalloc(card->evtbd_ring_size, GFP_KERNEL);
+ card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
+ card->evtbd_ring_size,
+ &card->evtbd_ring_pbase);
if (!card->evtbd_ring_vbase) {
dev_err(adapter->dev,
- "Unable to allocate buffer. Terminating download\n");
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->evtbd_ring_size);
return -ENOMEM;
}
- card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
dev_dbg(adapter->dev,
"info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
@@ -580,35 +785,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->evtbd_ring_pbase >> 32),
card->evtbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
- card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->evtbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- /* Allocate skb here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MAX_EVENT_SIZE);
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for EVENT buf.\n");
- kfree(card->evtbd_ring_vbase);
- return -ENOMEM;
- }
- buf_pa = mwifiex_update_sk_buff_pa(skb);
- skb_put(skb, MAX_EVENT_SIZE);
-
- dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
- skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
- skb->len);
-
- card->evt_buf_list[i] = skb;
- card->evtbd_ring[i]->paddr = *buf_pa;
- card->evtbd_ring[i]->len = (u16)skb->len;
- card->evtbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_pcie_init_evt_ring(adapter);
}
/*
@@ -617,23 +794,19 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
- if (card->evt_buf_list[i])
- dev_kfree_skb_any(card->evt_buf_list[i]);
- card->evt_buf_list[i] = NULL;
- card->evtbd_ring[i]->paddr = 0;
- card->evtbd_ring[i]->len = 0;
- card->evtbd_ring[i]->flags = 0;
- card->evtbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_evt_ring(adapter);
- kfree(card->evtbd_ring_vbase);
+ if (card->evtbd_ring_vbase)
+ pci_free_consistent(card->dev, card->evtbd_ring_size,
+ card->evtbd_ring_vbase,
+ card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
- card->evtbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
card->evtbd_ring_vbase = NULL;
+ card->evtbd_ring_pbase = 0;
return 0;
}
@@ -653,21 +826,12 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
"Unable to allocate skb for command response data.\n");
return -ENOMEM;
}
- mwifiex_update_sk_buff_pa(skb);
skb_put(skb, MWIFIEX_UPLD_SIZE);
- card->cmdrsp_buf = skb;
+ if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
- skb = NULL;
- /* Allocate memory for sending command to firmware */
- skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for command data.\n");
- return -ENOMEM;
- }
- mwifiex_update_sk_buff_pa(skb);
- skb_put(skb, MWIFIEX_SIZE_OF_CMD_BUFFER);
- card->cmd_buf = skb;
+ card->cmdrsp_buf = skb;
return 0;
}
@@ -678,18 +842,26 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card;
+ dma_addr_t buf_pa;
if (!adapter)
return 0;
card = adapter->card;
- if (card && card->cmdrsp_buf)
+ if (card && card->cmdrsp_buf) {
+ MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
+ }
- if (card && card->cmd_buf)
+ if (card && card->cmd_buf) {
+ MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_SIZE_OF_CMD_BUFFER,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(card->cmd_buf);
-
+ }
return 0;
}
@@ -698,27 +870,19 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
{
- struct sk_buff *skb;
struct pcie_service_card *card = adapter->card;
- /* Allocate memory for sleep cookie */
- skb = dev_alloc_skb(sizeof(u32));
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for sleep cookie!\n");
+ card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
+ &card->sleep_cookie_pbase);
+ if (!card->sleep_cookie_vbase) {
+ dev_err(adapter->dev, "pci_alloc_consistent failed!\n");
return -ENOMEM;
}
- mwifiex_update_sk_buff_pa(skb);
- skb_put(skb, sizeof(u32));
-
/* Init val of Sleep Cookie */
- *(u32 *)skb->data = FW_AWAKE_COOKIE;
+ *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
- *((u32 *)skb->data));
-
- /* Save the sleep cookie */
- card->sleep_cookie = skb;
+ *((u32 *)card->sleep_cookie_vbase));
return 0;
}
@@ -735,86 +899,246 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
- if (card && card->sleep_cookie) {
- dev_kfree_skb_any(card->sleep_cookie);
- card->sleep_cookie = NULL;
+ if (card && card->sleep_cookie_vbase) {
+ pci_free_consistent(card->dev, sizeof(u32),
+ card->sleep_cookie_vbase,
+ card->sleep_cookie_pbase);
+ card->sleep_cookie_vbase = NULL;
}
return 0;
}
+/* This function flushes the TX buffer descriptor ring
+ * This function defined as handler is also called while cleaning TXRX
+ * during disconnect/ bss stop.
+ */
+static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ u32 rdptr;
+
+ /* Read the TX ring read pointer set by firmware */
+ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
+ dev_err(adapter->dev,
+ "Flush TXBD: failed to read reg->tx_rdptr\n");
+ return -1;
+ }
+
+ if (!mwifiex_pcie_txbd_empty(card, rdptr)) {
+ card->txbd_flush = 1;
+ /* write pointer already set at last send
+ * send dnld-rdy intr again, wait for completion.
+ */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DNLD_RDY)) {
+ dev_err(adapter->dev,
+ "failed to assert dnld-rdy interrupt.\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
/*
- * This function sends data buffer to device
+ * This function unmaps and frees downloaded data buffer
*/
-static int
-mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
+ struct sk_buff *skb;
+ dma_addr_t buf_pa;
+ u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
struct pcie_service_card *card = adapter->card;
- u32 wrindx, rdptr;
- phys_addr_t *buf_pa;
- __le16 *tmp;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
mwifiex_pm_wakeup_card(adapter);
/* Read the TX ring read pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
dev_err(adapter->dev,
- "SEND DATA: failed to read REG_TXBD_RDPTR\n");
+ "SEND COMP: failed to read reg->tx_rdptr\n");
return -1;
}
- wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK;
+ dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
+ card->txbd_rdptr, rdptr);
- dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", rdptr,
- card->txbd_wrptr);
- if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) !=
- (rdptr & MWIFIEX_TXBD_MASK)) ||
- ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
- (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
- struct sk_buff *skb_data;
+ num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
+ /* free from previous txbd_rdptr to current txbd_rdptr */
+ while (((card->txbd_rdptr & reg->tx_mask) !=
+ (rdptr & reg->tx_mask)) ||
+ ((card->txbd_rdptr & reg->tx_rollover_ind) !=
+ (rdptr & reg->tx_rollover_ind))) {
+ wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >>
+ reg->tx_start_ptr;
+
+ skb = card->tx_buf_list[wrdoneidx];
+ if (skb) {
+ dev_dbg(adapter->dev,
+ "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
+ skb, wrdoneidx);
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, skb->len,
+ PCI_DMA_TODEVICE);
+
+ unmap_count++;
+
+ if (card->txbd_flush)
+ mwifiex_write_data_complete(adapter, skb, 0,
+ -1);
+ else
+ mwifiex_write_data_complete(adapter, skb, 0, 0);
+ }
+
+ card->tx_buf_list[wrdoneidx] = NULL;
+
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->txbd_ring[wrdoneidx];
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->txbd_ring[wrdoneidx];
+ memset(desc, 0, sizeof(*desc));
+ }
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ card->txbd_rdptr++;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ card->txbd_rdptr += reg->ring_tx_start_ptr;
+ break;
+ }
+
+
+ if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs)
+ card->txbd_rdptr = ((card->txbd_rdptr &
+ reg->tx_rollover_ind) ^
+ reg->tx_rollover_ind);
+ }
+
+ if (unmap_count)
+ adapter->data_sent = false;
+
+ if (card->txbd_flush) {
+ if (mwifiex_pcie_txbd_empty(card, card->txbd_rdptr))
+ card->txbd_flush = 0;
+ else
+ mwifiex_clean_pcie_ring_buf(adapter);
+ }
+
+ return 0;
+}
+
+/* This function sends data buffer to device. First 4 bytes of payload
+ * are filled with payload length and payload type. Then this payload
+ * is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
+ * Download ready interrupt to FW is deffered if Tx ring is not full and
+ * additional payload can be accomodated.
+ */
+static int
+mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
+ struct mwifiex_tx_param *tx_param)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ u32 wrindx, num_tx_buffs, rx_val;
+ int ret;
+ dma_addr_t buf_pa;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ __le16 *tmp;
+
+ if (!(skb->data && skb->len)) {
+ dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n",
+ __func__, skb->data, skb->len);
+ return -1;
+ }
+
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ mwifiex_pm_wakeup_card(adapter);
+
+ num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
+ dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
+ card->txbd_rdptr, card->txbd_wrptr);
+ if (mwifiex_pcie_txbd_not_full(card)) {
u8 *payload;
adapter->data_sent = true;
- skb_data = card->tx_buf_list[wrindx];
- memcpy(skb_data->data, skb->data, skb->len);
- payload = skb_data->data;
+ payload = skb->data;
tmp = (__le16 *)&payload[0];
*tmp = cpu_to_le16((u16)skb->len);
tmp = (__le16 *)&payload[2];
*tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
- skb_put(skb_data, MWIFIEX_RX_DATA_BUF_SIZE - skb_data->len);
- skb_trim(skb_data, skb->len);
- buf_pa = MWIFIEX_SKB_PACB(skb_data);
- card->txbd_ring[wrindx]->paddr = *buf_pa;
- card->txbd_ring[wrindx]->len = (u16)skb_data->len;
- card->txbd_ring[wrindx]->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
- MWIFIEX_BD_FLAG_LAST_DESC;
-
- if ((++card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
- MWIFIEX_MAX_TXRX_BD)
- card->txbd_wrptr = ((card->txbd_wrptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
- /* Write the TX ring write pointer in to REG_TXBD_WRPTR */
- if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR,
- card->txbd_wrptr)) {
- dev_err(adapter->dev,
- "SEND DATA: failed to write REG_TXBD_WRPTR\n");
- return 0;
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+ PCI_DMA_TODEVICE))
+ return -1;
+
+ wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+ card->tx_buf_list[wrindx] = skb;
+
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->txbd_ring[wrindx];
+ desc2->paddr = buf_pa;
+ desc2->len = (u16)skb->len;
+ desc2->frag_len = (u16)skb->len;
+ desc2->offset = 0;
+ desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+ MWIFIEX_BD_FLAG_LAST_DESC;
+ } else {
+ desc = card->txbd_ring[wrindx];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+ MWIFIEX_BD_FLAG_LAST_DESC;
}
- /* Send the TX ready interrupt */
- if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
- CPU_INTR_DNLD_RDY)) {
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ card->txbd_wrptr++;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ card->txbd_wrptr += reg->ring_tx_start_ptr;
+ break;
+ }
+
+ if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs)
+ card->txbd_wrptr = ((card->txbd_wrptr &
+ reg->tx_rollover_ind) ^
+ reg->tx_rollover_ind);
+
+ rx_val = card->rxbd_rdptr & reg->rx_wrap_mask;
+ /* Write the TX ring write pointer in to reg->tx_wrptr */
+ if (mwifiex_write_reg(adapter, reg->tx_wrptr,
+ card->txbd_wrptr | rx_val)) {
dev_err(adapter->dev,
- "SEND DATA: failed to assert door-bell intr\n");
- return -1;
+ "SEND DATA: failed to write reg->tx_wrptr\n");
+ ret = -1;
+ goto done_unmap;
+ }
+ if ((mwifiex_pcie_txbd_not_full(card)) &&
+ tx_param->next_pkt_len) {
+ /* have more packets and TxBD still can hold more */
+ dev_dbg(adapter->dev,
+ "SEND DATA: delay dnld-rdy interrupt.\n");
+ adapter->data_sent = false;
+ } else {
+ /* Send the TX ready interrupt */
+ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
+ CPU_INTR_DNLD_RDY)) {
+ dev_err(adapter->dev,
+ "SEND DATA: failed to assert dnld-rdy interrupt.\n");
+ ret = -1;
+ goto done_unmap;
+ }
}
dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
"%#x> and sent packet to firmware successfully\n",
- rdptr, card->txbd_wrptr);
+ card->txbd_rdptr, card->txbd_wrptr);
} else {
dev_dbg(adapter->dev,
"info: TX Ring full, can't send packets to fw\n");
@@ -827,7 +1151,17 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -EBUSY;
}
- return 0;
+ return -EINPROGRESS;
+done_unmap:
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+ card->tx_buf_list[wrindx] = NULL;
+ if (reg->pfu_enabled)
+ memset(desc2, 0, sizeof(*desc2));
+ else
+ memset(desc, 0, sizeof(*desc));
+
+ return ret;
}
/*
@@ -837,78 +1171,119 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- u32 wrptr, rd_index;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ u32 wrptr, rd_index, tx_val;
+ dma_addr_t buf_pa;
int ret = 0;
struct sk_buff *skb_tmp = NULL;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ mwifiex_pm_wakeup_card(adapter);
/* Read the RX ring Write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
+ card->rxbd_wrptr = wrptr;
- while (((wrptr & MWIFIEX_RXBD_MASK) !=
- (card->rxbd_rdptr & MWIFIEX_RXBD_MASK)) ||
- ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
- (card->rxbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ while (((wrptr & reg->rx_mask) !=
+ (card->rxbd_rdptr & reg->rx_mask)) ||
+ ((wrptr & reg->rx_rollover_ind) ==
+ (card->rxbd_rdptr & reg->rx_rollover_ind))) {
struct sk_buff *skb_data;
u16 rx_len;
+ __le16 pkt_len;
- rd_index = card->rxbd_rdptr & MWIFIEX_RXBD_MASK;
+ rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
+ MWIFIEX_SKB_PACB(skb_data, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ card->rx_buf_list[rd_index] = NULL;
+
/* Get data length from interface header -
- first byte is len, second byte is type */
- rx_len = *((u16 *)skb_data->data);
+ * first 2 bytes for len, next 2 bytes is for type
+ */
+ pkt_len = *((__le16 *)skb_data->data);
+ rx_len = le16_to_cpu(pkt_len);
+ skb_put(skb_data, rx_len);
dev_dbg(adapter->dev,
"info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
card->rxbd_rdptr, wrptr, rx_len);
- skb_tmp = dev_alloc_skb(rx_len);
+ skb_pull(skb_data, INTF_HEADER_LEN);
+ mwifiex_handle_rx_packet(adapter, skb_data);
+
+ skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
if (!skb_tmp) {
- dev_dbg(adapter->dev,
- "info: Failed to alloc skb for RX\n");
- ret = -EBUSY;
- goto done;
+ dev_err(adapter->dev,
+ "Unable to allocate skb.\n");
+ return -ENOMEM;
}
- skb_put(skb_tmp, rx_len);
+ if (mwifiex_map_pci_memory(adapter, skb_tmp,
+ MWIFIEX_RX_DATA_BUF_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
- memcpy(skb_tmp->data, skb_data->data + INTF_HEADER_LEN, rx_len);
- if ((++card->rxbd_rdptr & MWIFIEX_RXBD_MASK) ==
+ dev_dbg(adapter->dev,
+ "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
+ skb_tmp, rd_index);
+ card->rx_buf_list[rd_index] = skb_tmp;
+
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->rxbd_ring[rd_index];
+ desc2->paddr = buf_pa;
+ desc2->len = skb_tmp->len;
+ desc2->frag_len = skb_tmp->len;
+ desc2->offset = 0;
+ desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop;
+ } else {
+ desc = card->rxbd_ring[rd_index];
+ desc->paddr = buf_pa;
+ desc->len = skb_tmp->len;
+ desc->flags = 0;
+ }
+
+ if ((++card->rxbd_rdptr & reg->rx_mask) ==
MWIFIEX_MAX_TXRX_BD) {
card->rxbd_rdptr = ((card->rxbd_rdptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->rx_rollover_ind) ^
+ reg->rx_rollover_ind);
}
dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
card->rxbd_rdptr, wrptr);
- /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
- if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR,
- card->rxbd_rdptr)) {
+ tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
+ /* Write the RX ring read pointer in to reg->rx_rdptr */
+ if (mwifiex_write_reg(adapter, reg->rx_rdptr,
+ card->rxbd_rdptr | tx_val)) {
dev_err(adapter->dev,
- "RECV DATA: failed to write REG_RXBD_RDPTR\n");
+ "RECV DATA: failed to write reg->rx_rdptr\n");
ret = -1;
goto done;
}
/* Read the RX ring Write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
dev_dbg(adapter->dev,
"info: RECV DATA: Rcvd packet from fw successfully\n");
- mwifiex_handle_rx_packet(adapter, skb_tmp);
+ card->rxbd_wrptr = wrptr;
}
done:
- if (ret && skb_tmp)
- dev_kfree_skb_any(skb_tmp);
return ret;
}
@@ -918,40 +1293,54 @@ done:
static int
mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
{
- phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb);
+ dma_addr_t buf_pa;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- if (!(skb->data && skb->len && *buf_pa)) {
+ if (!(skb->data && skb->len)) {
dev_err(adapter->dev,
- "Invalid parameter in %s <%p, %#x:%x, %x>\n",
- __func__, skb->data, skb->len,
- (u32)*buf_pa, (u32)((u64)*buf_pa >> 32));
+ "Invalid parameter in %s <%p. len %d>\n",
+ __func__, skb->data, skb->len);
return -1;
}
- /* Write the lower 32bits of the physical address to scratch
- * register 0 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)*buf_pa)) {
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+ /* Write the lower 32bits of the physical address to low command
+ * address scratch register
+ */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_TODEVICE);
return -1;
}
- /* Write the upper 32bits of the physical address to scratch
- * register 1 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG,
- (u32)((u64)*buf_pa >> 32))) {
+ /* Write the upper 32bits of the physical address to high command
+ * address scratch register
+ */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
+ (u32)((u64)buf_pa >> 32))) {
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_TODEVICE);
return -1;
}
- /* Write the command length to scratch register 2 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) {
+ /* Write the command length to cmd_size scratch register */
+ if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
dev_err(adapter->dev,
- "%s: failed to write command len to scratch reg 2\n",
+ "%s: failed to write command len to cmd_size scratch reg\n",
__func__);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_TODEVICE);
return -1;
}
@@ -960,22 +1349,43 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
dev_err(adapter->dev,
"%s: failed to assert door-bell intr\n", __func__);
+ pci_unmap_single(card->dev, buf_pa,
+ MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
return -1;
}
return 0;
}
-/*
- * This function downloads commands to the device
+/* This function init rx port in firmware which in turn enables to receive data
+ * from device before transmitting any packet.
+ */
+static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask;
+
+ /* Write the RX ring read pointer in to reg->rx_rdptr */
+ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
+ tx_wrap)) {
+ dev_err(adapter->dev,
+ "RECV DATA: failed to write reg->rx_rdptr\n");
+ return -1;
+ }
+ return 0;
+}
+
+/* This function downloads commands to the device
*/
static int
mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int ret = 0;
- phys_addr_t *cmd_buf_pa;
- phys_addr_t *cmdrsp_buf_pa;
+ dma_addr_t cmd_buf_pa, cmdrsp_buf_pa;
+ u8 *payload = (u8 *)skb->data;
if (!(skb->data && skb->len)) {
dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
@@ -990,21 +1400,22 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -EBUSY;
}
- /* Make sure a command buffer is available */
- if (!card->cmd_buf) {
- dev_err(adapter->dev, "Command buffer not available\n");
- return -EBUSY;
- }
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ mwifiex_pm_wakeup_card(adapter);
adapter->cmd_sent = true;
- /* Copy the given skb in to DMA accessable shared buffer */
- skb_put(card->cmd_buf, MWIFIEX_SIZE_OF_CMD_BUFFER - card->cmd_buf->len);
- skb_trim(card->cmd_buf, skb->len);
- memcpy(card->cmd_buf->data, skb->data, skb->len);
+
+ *(__le16 *)&payload[0] = cpu_to_le16((u16)skb->len);
+ *(__le16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_CMD);
+
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ return -1;
+
+ card->cmd_buf = skb;
/* To send a command, the driver will:
1. Write the 64bit physical address of the data buffer to
- SCRATCH1 + SCRATCH0
+ cmd response address low + cmd response address high
2. Ring the door bell (i.e. set the door bell interrupt)
In response to door bell interrupt, the firmware will perform
@@ -1013,11 +1424,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (card->cmdrsp_buf) {
- cmdrsp_buf_pa = MWIFIEX_SKB_PACB(card->cmdrsp_buf);
+ MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO,
- (u32)*cmdrsp_buf_pa)) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
+ (u32)cmdrsp_buf_pa)) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
ret = -1;
@@ -1025,8 +1436,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI,
- (u32)((u64)*cmdrsp_buf_pa >> 32))) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
+ (u32)((u64)cmdrsp_buf_pa >> 32))) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
ret = -1;
@@ -1034,27 +1445,29 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
}
- cmd_buf_pa = MWIFIEX_SKB_PACB(card->cmd_buf);
- /* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */
- if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO, (u32)*cmd_buf_pa)) {
+ MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+ /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
+ (u32)cmd_buf_pa)) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
- /* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */
- if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI,
- (u32)((u64)*cmd_buf_pa >> 32))) {
+ /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
+ (u32)((u64)cmd_buf_pa >> 32))) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
- /* Write the command length to REG_CMD_SIZE */
- if (mwifiex_write_reg(adapter, REG_CMD_SIZE, card->cmd_buf->len)) {
+ /* Write the command length to reg->cmd_size */
+ if (mwifiex_write_reg(adapter, reg->cmd_size,
+ card->cmd_buf->len)) {
dev_err(adapter->dev,
- "Failed to write cmd len to REG_CMD_SIZE\n");
+ "Failed to write cmd len to reg->cmd_size\n");
ret = -1;
goto done;
}
@@ -1081,18 +1494,30 @@ done:
static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
struct sk_buff *skb = card->cmdrsp_buf;
int count = 0;
+ u16 rx_len;
+ __le16 pkt_len;
+ dma_addr_t buf_pa;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ pkt_len = *((__le16 *)skb->data);
+ rx_len = le16_to_cpu(pkt_len);
+ skb_trim(skb, rx_len);
+ skb_pull(skb, INTF_HEADER_LEN);
+
if (!adapter->curr_cmd) {
- skb_pull(skb, INTF_HEADER_LEN);
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
- while (mwifiex_pcie_ok_to_access_hw(adapter) &&
- (count++ < 10))
+ while (reg->sleep_cookie && (count++ < 10) &&
+ mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
} else {
dev_err(adapter->dev,
@@ -1100,9 +1525,12 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
- skb_push(skb, INTF_HEADER_LEN);
+ if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
- skb_pull(skb, INTF_HEADER_LEN);
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
/* Take the pointer and set it to CMD node and will
@@ -1112,14 +1540,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
/* Clear the cmd-rsp buffer address in scratch registers. This
will prevent firmware from writing to the same response
buffer again. */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
dev_err(adapter->dev,
"cmd_done: failed to clear cmd_rsp_addr_lo\n");
return -1;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
dev_err(adapter->dev,
"cmd_done: failed to clear cmd_rsp_addr_hi\n");
return -1;
@@ -1136,10 +1564,23 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
+ dma_addr_t buf_pa;
+ struct sk_buff *skb_tmp;
if (skb) {
card->cmdrsp_buf = skb;
skb_push(card->cmdrsp_buf, INTF_HEADER_LEN);
+ if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+ }
+
+ skb_tmp = card->cmd_buf;
+ if (skb_tmp) {
+ MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE);
+ card->cmd_buf = NULL;
}
return 0;
@@ -1151,8 +1592,14 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
+ dma_addr_t buf_pa;
+ struct mwifiex_evt_buf_desc *desc;
+
+ if (!mwifiex_pcie_ok_to_access_hw(adapter))
+ mwifiex_pm_wakeup_card(adapter);
if (adapter->event_received) {
dev_dbg(adapter->dev, "info: Event being processed, "
@@ -1166,9 +1613,9 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
}
/* Read the event ring write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "EventReady: failed to read REG_EVTBD_WRPTR\n");
+ "EventReady: failed to read reg->evt_wrptr\n");
return -1;
}
@@ -1176,20 +1623,23 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
card->evtbd_rdptr, wrptr);
if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
& MWIFIEX_EVTBD_MASK)) ||
- ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
- (card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ ((wrptr & reg->evt_rollover_ind) ==
+ (card->evtbd_rdptr & reg->evt_rollover_ind))) {
struct sk_buff *skb_cmd;
__le16 data_len = 0;
u16 evt_len;
dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
+ MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
+ pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE);
+
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
card->evt_buf_list[rdptr] = NULL;
- card->evtbd_ring[rdptr]->paddr = 0;
- card->evtbd_ring[rdptr]->len = 0;
- card->evtbd_ring[rdptr]->flags = 0;
+ desc = card->evtbd_ring[rdptr];
+ memset(desc, 0, sizeof(*desc));
event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
adapter->event_cause = event;
@@ -1225,10 +1675,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
- phys_addr_t *buf_pa;
+ dma_addr_t buf_pa;
+ struct mwifiex_evt_buf_desc *desc;
if (!skb)
return 0;
@@ -1240,19 +1692,25 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
}
/* Read the event ring write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "event_complete: failed to read REG_EVTBD_WRPTR\n");
+ "event_complete: failed to read reg->evt_wrptr\n");
return -1;
}
if (!card->evt_buf_list[rdptr]) {
skb_push(skb, INTF_HEADER_LEN);
+ if (mwifiex_map_pci_memory(adapter, skb,
+ MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
- buf_pa = MWIFIEX_SKB_PACB(skb);
- card->evtbd_ring[rdptr]->paddr = *buf_pa;
- card->evtbd_ring[rdptr]->len = (u16)skb->len;
- card->evtbd_ring[rdptr]->flags = 0;
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+ desc = card->evtbd_ring[rdptr];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
skb = NULL;
} else {
dev_dbg(adapter->dev,
@@ -1262,17 +1720,18 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
card->evtbd_rdptr = ((card->evtbd_rdptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->evt_rollover_ind) ^
+ reg->evt_rollover_ind);
}
dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
card->evtbd_rdptr, wrptr);
- /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
- if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
+ /* Write the event ring read pointer in to reg->evt_rdptr */
+ if (mwifiex_write_reg(adapter, reg->evt_rdptr,
+ card->evtbd_rdptr)) {
dev_err(adapter->dev,
- "event_complete: failed to read REG_EVTBD_RDPTR\n");
+ "event_complete: failed to read reg->evt_rdptr\n");
return -1;
}
@@ -1299,11 +1758,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct sk_buff *skb;
u32 txlen, tx_blocks = 0, tries, len;
u32 block_retry_cnt = 0;
-
- if (!adapter) {
- pr_err("adapter structure is not valid\n");
- return -1;
- }
+ dma_addr_t buf_pa;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!firmware || !firmware_len) {
dev_err(adapter->dev,
@@ -1325,7 +1782,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = -ENOMEM;
goto done;
}
- mwifiex_update_sk_buff_pa(skb);
/* Perform firmware data transfer */
do {
@@ -1336,7 +1792,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
break;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
- ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG,
+ ret = mwifiex_read_reg(adapter, reg->cmd_size,
&len);
if (ret) {
dev_warn(adapter->dev,
@@ -1382,16 +1838,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, ".");
- tx_blocks = (txlen +
- MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
- MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
+ tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
+ card->pcie.blksz_fw_dl;
/* Copy payload to buffer */
memmove(skb->data, &firmware[offset], txlen);
}
skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
- skb_trim(skb, tx_blocks * MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD);
+ skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl);
/* Send the boot command to device */
if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
@@ -1400,6 +1855,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = -1;
goto done;
}
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
/* Wait for the command done interrupt */
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1407,11 +1865,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "%s: Failed to read "
"interrupt status during fw dnld.\n",
__func__);
+ pci_unmap_single(card->dev, buf_pa, skb->len,
+ PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
+
+ pci_unmap_single(card->dev, buf_pa, skb->len,
+ PCI_DMA_TODEVICE);
+
offset += txlen;
} while (true);
@@ -1435,6 +1899,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
{
int ret = 0;
u32 firmware_stat, winner_status;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 tries;
/* Mask spurios interrupts */
@@ -1445,7 +1911,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
}
dev_dbg(adapter->dev, "Setting driver ready signature\n");
- if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) {
+ if (mwifiex_write_reg(adapter, reg->drv_rdy,
+ FIRMWARE_READY_PCIE)) {
dev_err(adapter->dev,
"Failed to write driver ready signature\n");
return -1;
@@ -1453,7 +1920,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/* Wait for firmware initialization event */
for (tries = 0; tries < poll_num; tries++) {
- if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ if (mwifiex_read_reg(adapter, reg->fw_status,
&firmware_stat))
ret = -1;
else
@@ -1470,7 +1937,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
}
if (ret) {
- if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ if (mwifiex_read_reg(adapter, reg->fw_status,
&winner_status))
ret = -1;
else if (!winner_status) {
@@ -1594,39 +2061,40 @@ exit:
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
int ret;
- u32 pcie_ireg = 0;
+ u32 pcie_ireg;
unsigned long flags;
spin_lock_irqsave(&adapter->int_lock, flags);
/* Clear out unused interrupts */
- adapter->int_status &= HOST_INTR_MASK;
+ pcie_ireg = adapter->int_status;
+ adapter->int_status = 0;
spin_unlock_irqrestore(&adapter->int_lock, flags);
- while (adapter->int_status & HOST_INTR_MASK) {
- if (adapter->int_status & HOST_INTR_DNLD_DONE) {
- adapter->int_status &= ~HOST_INTR_DNLD_DONE;
- if (adapter->data_sent) {
- dev_dbg(adapter->dev, "info: DATA sent intr\n");
- adapter->data_sent = false;
- }
+ while (pcie_ireg & HOST_INTR_MASK) {
+ if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+ pcie_ireg &= ~HOST_INTR_DNLD_DONE;
+ dev_dbg(adapter->dev, "info: TX DNLD Done\n");
+ ret = mwifiex_pcie_send_data_complete(adapter);
+ if (ret)
+ return ret;
}
- if (adapter->int_status & HOST_INTR_UPLD_RDY) {
- adapter->int_status &= ~HOST_INTR_UPLD_RDY;
+ if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+ pcie_ireg &= ~HOST_INTR_UPLD_RDY;
dev_dbg(adapter->dev, "info: Rx DATA\n");
ret = mwifiex_pcie_process_recv_data(adapter);
if (ret)
return ret;
}
- if (adapter->int_status & HOST_INTR_EVENT_RDY) {
- adapter->int_status &= ~HOST_INTR_EVENT_RDY;
+ if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+ pcie_ireg &= ~HOST_INTR_EVENT_RDY;
dev_dbg(adapter->dev, "info: Rx EVENT\n");
ret = mwifiex_pcie_process_event_ready(adapter);
if (ret)
return ret;
}
- if (adapter->int_status & HOST_INTR_CMD_DONE) {
- adapter->int_status &= ~HOST_INTR_CMD_DONE;
+ if (pcie_ireg & HOST_INTR_CMD_DONE) {
+ pcie_ireg &= ~HOST_INTR_CMD_DONE;
if (adapter->cmd_sent) {
dev_dbg(adapter->dev,
"info: CMD sent Interrupt\n");
@@ -1654,8 +2122,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
"Write register failed\n");
return -1;
}
- adapter->int_status |= pcie_ireg;
- adapter->int_status &= HOST_INTR_MASK;
}
}
@@ -1687,7 +2153,7 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
}
if (type == MWIFIEX_TYPE_DATA)
- return mwifiex_pcie_send_data(adapter, skb);
+ return mwifiex_pcie_send_data(adapter, skb, tx_param);
else if (type == MWIFIEX_TYPE_CMD)
return mwifiex_pcie_send_cmd(adapter, skb);
@@ -1709,6 +2175,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
int ret;
struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
pci_set_drvdata(pdev, card);
@@ -1739,6 +2206,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
card->pci_mmap = pci_iomap(pdev, 0, 0);
if (!card->pci_mmap) {
dev_err(adapter->dev, "iomap(0) error\n");
+ ret = -EIO;
goto err_iomap0;
}
ret = pci_request_region(pdev, 2, DRV_NAME);
@@ -1749,6 +2217,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
card->pci_mmap1 = pci_iomap(pdev, 2, 0);
if (!card->pci_mmap1) {
dev_err(adapter->dev, "iomap(2) error\n");
+ ret = -EIO;
goto err_iomap2;
}
@@ -1769,10 +2238,13 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
if (ret)
goto err_alloc_cmdbuf;
- ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
- if (ret)
- goto err_alloc_cookie;
-
+ if (reg->sleep_cookie) {
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret)
+ goto err_alloc_cookie;
+ } else {
+ card->sleep_cookie_vbase = NULL;
+ }
return ret;
err_alloc_cookie:
@@ -1813,17 +2285,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- mwifiex_pcie_delete_sleep_cookie_buf(adapter);
- mwifiex_pcie_delete_cmdrsp_buf(adapter);
- mwifiex_pcie_delete_evtbd_ring(adapter);
- mwifiex_pcie_delete_rxbd_ring(adapter);
- mwifiex_pcie_delete_txbd_ring(adapter);
- card->cmdrsp_buf = NULL;
-
- dev_dbg(adapter->dev, "Clearing driver ready signature\n");
if (user_rmmod) {
- if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000))
+ dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+ if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
dev_err(adapter->dev,
"Failed to write driver not-ready signature\n");
}
@@ -1861,7 +2327,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
}
adapter->dev = &pdev->dev;
- strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+ strcpy(adapter->fw_name, card->pcie.firmware);
return 0;
}
@@ -1875,10 +2341,21 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg;
if (card) {
dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
free_irq(card->dev->irq, card->dev);
+
+ reg = card->pcie.reg;
+ if (reg->sleep_cookie)
+ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+ mwifiex_pcie_delete_txbd_ring(adapter);
+ card->cmdrsp_buf = NULL;
}
}
@@ -1900,6 +2377,8 @@ static struct mwifiex_if_ops pcie_ops = {
.event_complete = mwifiex_pcie_event_complete,
.update_mp_end_port = NULL,
.cleanup_mpa_buf = NULL,
+ .init_fw_port = mwifiex_pcie_init_fw_port,
+ .clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
};
/*
@@ -1912,7 +2391,7 @@ static int mwifiex_pcie_init_module(void)
{
int ret;
- pr_debug("Marvell 8766 PCIe Driver\n");
+ pr_debug("Marvell PCIe Driver\n");
sema_init(&add_remove_card_sem, 1);
@@ -1955,4 +2434,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
MODULE_VERSION(PCIE_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/pcie8766_uapsta.bin");
+MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 2f218f9a3fd3..d322ab8604ea 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -29,6 +29,11 @@
#include "main.h"
#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
+#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+
+#define PCIE_VENDOR_ID_MARVELL (0x11ab)
+#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
+#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
@@ -57,6 +62,8 @@
#define PCIE_SCRATCH_10_REG 0xCE8
#define PCIE_SCRATCH_11_REG 0xCEC
#define PCIE_SCRATCH_12_REG 0xCF0
+#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
+#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
#define CPU_INTR_DNLD_RDY BIT(0)
#define CPU_INTR_DOOR_BELL BIT(1)
@@ -75,27 +82,14 @@
#define MWIFIEX_BD_FLAG_ROLLOVER_IND BIT(7)
#define MWIFIEX_BD_FLAG_FIRST_DESC BIT(0)
#define MWIFIEX_BD_FLAG_LAST_DESC BIT(1)
-#define REG_CMD_ADDR_LO PCIE_SCRATCH_0_REG
-#define REG_CMD_ADDR_HI PCIE_SCRATCH_1_REG
-#define REG_CMD_SIZE PCIE_SCRATCH_2_REG
-
-#define REG_CMDRSP_ADDR_LO PCIE_SCRATCH_4_REG
-#define REG_CMDRSP_ADDR_HI PCIE_SCRATCH_5_REG
-
-/* TX buffer description read pointer */
-#define REG_TXBD_RDPTR PCIE_SCRATCH_6_REG
-/* TX buffer description write pointer */
-#define REG_TXBD_WRPTR PCIE_SCRATCH_7_REG
-/* RX buffer description read pointer */
-#define REG_RXBD_RDPTR PCIE_SCRATCH_8_REG
-/* RX buffer description write pointer */
-#define REG_RXBD_WRPTR PCIE_SCRATCH_9_REG
-/* Event buffer description read pointer */
-#define REG_EVTBD_RDPTR PCIE_SCRATCH_10_REG
-/* Event buffer description write pointer */
-#define REG_EVTBD_WRPTR PCIE_SCRATCH_11_REG
-/* Driver ready signature write pointer */
-#define REG_DRV_READY PCIE_SCRATCH_12_REG
+#define MWIFIEX_BD_FLAG_SOP BIT(0)
+#define MWIFIEX_BD_FLAG_EOP BIT(1)
+#define MWIFIEX_BD_FLAG_XS_SOP BIT(2)
+#define MWIFIEX_BD_FLAG_XS_EOP BIT(3)
+#define MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND BIT(7)
+#define MWIFIEX_BD_FLAG_RX_ROLLOVER_IND BIT(10)
+#define MWIFIEX_BD_FLAG_TX_START_PTR BIT(16)
+#define MWIFIEX_BD_FLAG_TX_ROLLOVER_IND BIT(26)
/* Max retry number of command write */
#define MAX_WRITE_IOMEM_RETRY 2
@@ -104,45 +98,223 @@
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
+struct mwifiex_pcie_card_reg {
+ u16 cmd_addr_lo;
+ u16 cmd_addr_hi;
+ u16 fw_status;
+ u16 cmd_size;
+ u16 cmdrsp_addr_lo;
+ u16 cmdrsp_addr_hi;
+ u16 tx_rdptr;
+ u16 tx_wrptr;
+ u16 rx_rdptr;
+ u16 rx_wrptr;
+ u16 evt_rdptr;
+ u16 evt_wrptr;
+ u16 drv_rdy;
+ u16 tx_start_ptr;
+ u32 tx_mask;
+ u32 tx_wrap_mask;
+ u32 rx_mask;
+ u32 rx_wrap_mask;
+ u32 tx_rollover_ind;
+ u32 rx_rollover_ind;
+ u32 evt_rollover_ind;
+ u8 ring_flag_sop;
+ u8 ring_flag_eop;
+ u8 ring_flag_xs_sop;
+ u8 ring_flag_xs_eop;
+ u32 ring_tx_start_ptr;
+ u8 pfu_enabled;
+ u8 sleep_cookie;
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_SCRATCH_6_REG,
+ .tx_wrptr = PCIE_SCRATCH_7_REG,
+ .rx_rdptr = PCIE_SCRATCH_8_REG,
+ .rx_wrptr = PCIE_SCRATCH_9_REG,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 0,
+ .tx_mask = MWIFIEX_TXBD_MASK,
+ .tx_wrap_mask = 0,
+ .rx_mask = MWIFIEX_RXBD_MASK,
+ .rx_wrap_mask = 0,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .ring_flag_sop = 0,
+ .ring_flag_eop = 0,
+ .ring_flag_xs_sop = 0,
+ .ring_flag_xs_eop = 0,
+ .ring_tx_start_ptr = 0,
+ .pfu_enabled = 0,
+ .sleep_cookie = 1,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x03FF0000,
+ .tx_wrap_mask = 0x07FF0000,
+ .rx_mask = 0x000003FF,
+ .rx_wrap_mask = 0x000007FF,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+};
+
+struct mwifiex_pcie_device {
+ const char *firmware;
+ const struct mwifiex_pcie_card_reg *reg;
+ u16 blksz_fw_dl;
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
+ .firmware = PCIE8766_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8766,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
+ .firmware = PCIE8897_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8897,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+struct mwifiex_evt_buf_desc {
+ u64 paddr;
+ u16 len;
+ u16 flags;
+} __packed;
+
struct mwifiex_pcie_buf_desc {
u64 paddr;
u16 len;
u16 flags;
} __packed;
+struct mwifiex_pfu_buf_desc {
+ u16 flags;
+ u16 offset;
+ u16 frag_len;
+ u16 len;
+ u64 paddr;
+ u32 reserved;
+} __packed;
+
struct pcie_service_card {
struct pci_dev *dev;
struct mwifiex_adapter *adapter;
+ struct mwifiex_pcie_device pcie;
+ u8 txbd_flush;
u32 txbd_wrptr;
u32 txbd_rdptr;
u32 txbd_ring_size;
u8 *txbd_ring_vbase;
- phys_addr_t txbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *txbd_ring[MWIFIEX_MAX_TXRX_BD];
+ dma_addr_t txbd_ring_pbase;
+ void *txbd_ring[MWIFIEX_MAX_TXRX_BD];
struct sk_buff *tx_buf_list[MWIFIEX_MAX_TXRX_BD];
u32 rxbd_wrptr;
u32 rxbd_rdptr;
u32 rxbd_ring_size;
u8 *rxbd_ring_vbase;
- phys_addr_t rxbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
+ dma_addr_t rxbd_ring_pbase;
+ void *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
struct sk_buff *rx_buf_list[MWIFIEX_MAX_TXRX_BD];
u32 evtbd_wrptr;
u32 evtbd_rdptr;
u32 evtbd_ring_size;
u8 *evtbd_ring_vbase;
- phys_addr_t evtbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *evtbd_ring[MWIFIEX_MAX_EVT_BD];
+ dma_addr_t evtbd_ring_pbase;
+ void *evtbd_ring[MWIFIEX_MAX_EVT_BD];
struct sk_buff *evt_buf_list[MWIFIEX_MAX_EVT_BD];
struct sk_buff *cmd_buf;
struct sk_buff *cmdrsp_buf;
- struct sk_buff *sleep_cookie;
+ u8 *sleep_cookie_vbase;
+ dma_addr_t sleep_cookie_pbase;
void __iomem *pci_mmap;
void __iomem *pci_mmap1;
};
+static inline int
+mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr)
+{
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ if (((card->txbd_wrptr & reg->tx_mask) ==
+ (rdptr & reg->tx_mask)) &&
+ ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+ (rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ if (((card->txbd_wrptr & reg->tx_mask) ==
+ (rdptr & reg->tx_mask)) &&
+ ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+ (rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+static inline int
+mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
+{
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ if (((card->txbd_wrptr & reg->tx_mask) !=
+ (card->txbd_rdptr & reg->tx_mask)) ||
+ ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+ (card->txbd_rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ if (((card->txbd_wrptr & reg->tx_mask) !=
+ (card->txbd_rdptr & reg->tx_mask)) ||
+ ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+ (card->txbd_rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
#endif /* _MWIFIEX_PCIE_H */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9189a32b7844..bb60c2754a97 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1250,6 +1250,23 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
+ case WLAN_EID_VHT_CAPABILITY:
+ bss_entry->disable_11ac = false;
+ bss_entry->bcn_vht_cap =
+ (void *)(current_ptr +
+ sizeof(struct ieee_types_header));
+ bss_entry->vht_cap_offset =
+ (u16)((u8 *)bss_entry->bcn_vht_cap -
+ bss_entry->beacon_buf);
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ bss_entry->bcn_vht_oper =
+ (void *)(current_ptr +
+ sizeof(struct ieee_types_header));
+ bss_entry->vht_info_offset =
+ (u16)((u8 *)bss_entry->bcn_vht_oper -
+ bss_entry->beacon_buf);
+ break;
case WLAN_EID_BSS_COEX_2040:
bss_entry->bcn_bss_co_2040 = current_ptr +
sizeof(struct ieee_types_header);
@@ -1264,6 +1281,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
+ case WLAN_EID_OPMODE_NOTIF:
+ bss_entry->oper_mode =
+ (void *)(current_ptr +
+ sizeof(struct ieee_types_header));
+ bss_entry->oper_mode_offset =
+ (u16)((u8 *)bss_entry->oper_mode -
+ bss_entry->beacon_buf);
+ break;
default:
break;
}
@@ -1309,7 +1334,6 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node;
union mwifiex_scan_cmd_config_tlv *scan_cfg_out;
struct mwifiex_ie_types_chan_list_param_set *chan_list_out;
- u32 buf_size;
struct mwifiex_chan_scan_param_set *scan_chan_list;
u8 filtered_scan;
u8 scan_current_chan_only;
@@ -1332,18 +1356,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!scan_cfg_out) {
- dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
ret = -ENOMEM;
goto done;
}
- buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
- MWIFIEX_USER_SCAN_CHAN_MAX;
- scan_chan_list = kzalloc(buf_size, GFP_KERNEL);
+ scan_chan_list = kcalloc(MWIFIEX_USER_SCAN_CHAN_MAX,
+ sizeof(struct mwifiex_chan_scan_param_set),
+ GFP_KERNEL);
if (!scan_chan_list) {
- dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
kfree(scan_cfg_out);
ret = -ENOMEM;
goto done;
@@ -1461,12 +1483,9 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
unsigned long flags;
/* Allocate and fill new bss descriptor */
- bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
- GFP_KERNEL);
- if (!bss_desc) {
- dev_err(priv->adapter->dev, " failed to alloc bss_desc\n");
+ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL);
+ if (!bss_desc)
return -ENOMEM;
- }
ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
if (ret)
@@ -1485,20 +1504,26 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
- priv->curr_bss_params.bss_descriptor.ht_cap_offset =
- 0;
+ priv->curr_bss_params.bss_descriptor.ht_cap_offset = 0;
priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL;
- priv->curr_bss_params.bss_descriptor.ht_info_offset =
- 0;
- priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
- NULL;
- priv->curr_bss_params.bss_descriptor.
- bss_co_2040_offset = 0;
+ priv->curr_bss_params.bss_descriptor.ht_info_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = NULL;
+ priv->curr_bss_params.bss_descriptor.bss_co_2040_offset = 0;
priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
- priv->curr_bss_params.bss_descriptor.beacon_buf_size =
- 0;
+ priv->curr_bss_params.bss_descriptor.beacon_buf_size = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_vht_cap = NULL;
+ priv->curr_bss_params.bss_descriptor.vht_cap_offset = 0;
+ priv->curr_bss_params.bss_descriptor.bcn_vht_oper = NULL;
+ priv->curr_bss_params.bss_descriptor.vht_info_offset = 0;
+ priv->curr_bss_params.bss_descriptor.oper_mode = NULL;
+ priv->curr_bss_params.bss_descriptor.oper_mode_offset = 0;
+
+ /* Disable 11ac by default. Enable it only where there
+ * exist VHT_CAP IE in AP beacon
+ */
+ priv->curr_bss_params.bss_descriptor.disable_11ac = true;
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
@@ -1563,7 +1588,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
- goto done;
+ goto check_next_scan;
}
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1634,7 +1659,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (!beacon_size || beacon_size > bytes_left) {
bss_info += bytes_left;
bytes_left = 0;
- return -1;
+ ret = -1;
+ goto check_next_scan;
}
/* Initialize the current working beacon pointer for this BSS
@@ -1690,7 +1716,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(priv->adapter->dev,
"%s: bytes left < IE length\n",
__func__);
- goto done;
+ goto check_next_scan;
}
if (element_id == WLAN_EID_DS_PARAMS) {
channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1746,13 +1772,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
}
} else {
dev_dbg(adapter->dev, "missing BSS channel IE\n");
}
}
+check_next_scan:
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1813,7 +1840,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
}
-done:
return ret;
}
@@ -1879,10 +1905,8 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
}
scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
- if (!scan_cfg) {
- dev_err(adapter->dev, "failed to alloc scan_cfg\n");
+ if (!scan_cfg)
return -ENOMEM;
- }
scan_cfg->ssid_list = req_ssid;
scan_cfg->num_ssids = 1;
@@ -1996,11 +2020,8 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
kfree(priv->curr_bcn_buf);
priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size,
GFP_ATOMIC);
- if (!priv->curr_bcn_buf) {
- dev_err(priv->adapter->dev,
- "failed to alloc curr_bcn_buf\n");
+ if (!priv->curr_bcn_buf)
return;
- }
}
memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
@@ -2032,6 +2053,14 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
(curr_bss->beacon_buf +
curr_bss->ht_info_offset);
+ if (curr_bss->bcn_vht_cap)
+ curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_cap_offset);
+
+ if (curr_bss->bcn_vht_oper)
+ curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_info_offset);
+
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
(curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
@@ -2039,6 +2068,10 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
if (curr_bss->bcn_ext_cap)
curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
curr_bss->ext_cap_offset;
+
+ if (curr_bss->oper_mode)
+ curr_bss->oper_mode = (void *)(curr_bss->beacon_buf +
+ curr_bss->oper_mode_offset);
}
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5a1c1d0e5599..e63f646a260e 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -332,7 +332,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
u8 *buffer, u32 pkt_len, u32 port)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
u8 blk_mode =
(port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -350,8 +350,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
sdio_claim_host(card->func);
- if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size))
- ret = 0;
+ ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
sdio_release_host(card->func);
@@ -365,7 +364,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
u32 len, u32 port, u8 claim)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
: BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -376,8 +375,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
if (claim)
sdio_claim_host(card->func);
- if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size))
- ret = 0;
+ ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
if (claim)
sdio_release_host(card->func);
@@ -718,11 +716,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Assume that the allocated buffer is 8-byte aligned */
fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
- if (!fwbuf) {
- dev_err(adapter->dev,
- "unable to alloc buffer for FW. Terminating dnld\n");
+ if (!fwbuf)
return -ENOMEM;
- }
/* Perform firmware data transfer */
do {
@@ -1520,7 +1515,6 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
if (!card->mpa_tx.buf) {
- dev_err(adapter->dev, "could not alloc buffer for MP-A TX\n");
ret = -1;
goto error;
}
@@ -1529,7 +1523,6 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
if (!card->mpa_rx.buf) {
- dev_err(adapter->dev, "could not alloc buffer for MP-A RX\n");
ret = -1;
goto error;
}
@@ -1682,10 +1675,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
/* Allocate buffers for SDIO MP-A */
card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL);
- if (!card->mp_regs) {
- dev_err(adapter->dev, "failed to alloc mp_regs\n");
+ if (!card->mp_regs)
return -ENOMEM;
- }
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
@@ -1752,6 +1743,8 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
static struct mmc_host *reset_host;
static void sdio_card_reset_worker(struct work_struct *work)
{
+ struct mmc_host *target = reset_host;
+
/* The actual reset operation must be run outside of driver thread.
* This is because mmc_remove_host() will cause the device to be
* instantly destroyed, and the driver then needs to end its thread,
@@ -1761,10 +1754,10 @@ static void sdio_card_reset_worker(struct work_struct *work)
*/
pr_err("Resetting card...\n");
- mmc_remove_host(reset_host);
+ mmc_remove_host(target);
/* 20ms delay is based on experiment with sdhci controller */
mdelay(20);
- mmc_add_host(reset_host);
+ mmc_add_host(target);
}
static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
@@ -1773,9 +1766,6 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- if (work_pending(&card_reset_work))
- return;
-
reset_host = card->func->card->host;
schedule_work(&card_reset_work);
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 5d87195390f8..c55c5bb93134 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -931,7 +931,6 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
struct host_cmd_ds_pcie_details *host_spec =
&cmd->params.pcie_host_spec;
struct pcie_service_card *card = priv->adapter->card;
- phys_addr_t *buf_pa;
cmd->command = cpu_to_le16(HostCmd_CMD_PCIE_DESC_DETAILS);
cmd->size = cpu_to_le16(sizeof(struct
@@ -953,10 +952,11 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
- if (card->sleep_cookie) {
- buf_pa = MWIFIEX_SKB_PACB(card->sleep_cookie);
- host_spec->sleep_cookie_addr_lo = (u32) *buf_pa;
- host_spec->sleep_cookie_addr_hi = (u32) (((u64)*buf_pa) >> 32);
+ if (card->sleep_cookie_vbase) {
+ host_spec->sleep_cookie_addr_lo =
+ (u32)(card->sleep_cookie_pbase);
+ host_spec->sleep_cookie_addr_hi =
+ (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
host_spec->sleep_cookie_addr_lo);
}
@@ -1230,7 +1230,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
data_buf);
break;
case HostCmd_CMD_11N_CFG:
- ret = mwifiex_cmd_11n_cfg(cmd_ptr, cmd_action, data_buf);
+ ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf);
break;
case HostCmd_CMD_WMM_GET_STATUS:
dev_dbg(priv->adapter->dev,
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 65c12eb3e5e7..4669f8d9389f 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -24,6 +24,7 @@
#include "main.h"
#include "wmm.h"
#include "11n.h"
+#include "11ac.h"
/*
@@ -935,9 +936,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
/ MWIFIEX_SDIO_BLOCK_SIZE)
* MWIFIEX_SDIO_BLOCK_SIZE;
adapter->curr_tx_buf_size = adapter->tx_buf_size;
- dev_dbg(adapter->dev,
- "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
- adapter->max_tx_buf_size, adapter->tx_buf_size);
+ dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
+ adapter->curr_tx_buf_size);
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index cb682561c438..9f33c92c90f5 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
*/
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
- bool cancel_flag = false;
int status;
struct cmd_ctrl_node *cmd_queued;
@@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
atomic_inc(&adapter->cmd_pending);
/* Wait for completion */
- wait_event_interruptible(adapter->cmd_wait_q.wait,
- *(cmd_queued->condition));
- if (!*(cmd_queued->condition))
- cancel_flag = true;
-
- if (cancel_flag) {
- mwifiex_cancel_pending_ioctl(adapter);
- dev_dbg(adapter->dev, "cmd cancel\n");
+ status = wait_event_interruptible(adapter->cmd_wait_q.wait,
+ *(cmd_queued->condition));
+ if (status) {
+ dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ return status;
}
status = adapter->cmd_wait_q.status;
@@ -166,13 +162,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
rcu_read_lock();
ies = rcu_dereference(bss->ies);
- if (WARN_ON(!ies)) {
- /* should never happen */
- rcu_read_unlock();
- return -EINVAL;
- }
beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC);
beacon_ie_len = ies->len;
+ bss_desc->timestamp = ies->tsf;
rcu_read_unlock();
if (!beacon_ie) {
@@ -188,7 +180,6 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
bss_desc->cap_info_bitmap = bss->capability;
bss_desc->bss_band = bss_priv->band;
bss_desc->fw_tsf = bss_priv->fw_tsf;
- bss_desc->timestamp = bss->tsf;
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -270,11 +261,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
- GFP_KERNEL);
- if (!bss_desc) {
- dev_err(priv->adapter->dev, " failed to alloc bss_desc\n");
+ GFP_KERNEL);
+ if (!bss_desc)
return -ENOMEM;
- }
ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
if (ret)
@@ -287,6 +276,21 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (ret)
goto done;
+ if (bss_desc) {
+ u8 config_bands = 0;
+
+ if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band)
+ == HostCmd_SCAN_RADIO_TYPE_BG)
+ config_bands = BAND_B | BAND_G | BAND_GN |
+ BAND_GAC;
+ else
+ config_bands = BAND_A | BAND_AN | BAND_AAC;
+
+ if (!((config_bands | adapter->fw_bands) &
+ ~adapter->fw_bands))
+ adapter->config_bands = config_bands;
+ }
+
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (ret)
goto done;
@@ -314,7 +318,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
}
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
@@ -344,7 +348,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
" list. Joining...\n");
ret = mwifiex_adhoc_join(priv, bss_desc);
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
dev_dbg(adapter->dev, "info: Network not found in "
"the list, creating adhoc with ssid = %s\n",
@@ -496,8 +500,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
return false;
}
- wait_event_interruptible(adapter->hs_activate_wait_q,
- adapter->hs_activate_wait_q_woken);
+ if (wait_event_interruptible(adapter->hs_activate_wait_q,
+ adapter->hs_activate_wait_q_woken)) {
+ dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+ return false;
+ }
return true;
}
@@ -623,11 +630,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
}
}
buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL);
- if (!buf) {
- dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n",
- __func__);
+ if (!buf)
return -ENOMEM;
- }
txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 8c80024c30ff..296faec14365 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -117,14 +117,16 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
break;
case -1:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
case -EINPROGRESS:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
break;
case 0:
mwifiex_write_data_complete(adapter, skb, 0, ret);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 8dd72240f162..6e76a15a8950 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -219,6 +219,7 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
config->rts_threshold = 0x7FFF;
config->frag_threshold = 0x7FFF;
config->retry_limit = 0x7F;
+ config->qos_info = 0xFF;
}
/* This function parses BSS related parameters from structure
@@ -297,6 +298,38 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
return;
}
+/* This function parses WMM related parameters from cfg80211_ap_settings
+ * structure and updates bss_config structure.
+ */
+void
+mwifiex_set_wmm_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params)
+{
+ const u8 *vendor_ie;
+ struct ieee_types_header *wmm_ie;
+ u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
+
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ params->beacon.tail,
+ params->beacon.tail_len);
+ if (vendor_ie) {
+ wmm_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
+ sizeof(bss_cfg->wmm_info));
+ priv->wmm_enabled = 1;
+ } else {
+ memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
+ memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui));
+ bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE;
+ bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION;
+ priv->wmm_enabled = 0;
+ }
+
+ bss_cfg->qos_info = 0x00;
+ return;
+}
/* This function parses BSS related parameters from structure
* and prepares TLVs specific to WEP encryption.
* These TLVs are appended to command buffer.
@@ -354,6 +387,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
struct host_cmd_tlv_rates *tlv_rates;
struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct mwifiex_ie_types_htcap *htcap;
+ struct mwifiex_ie_types_wmmcap *wmm_cap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
int i;
u16 cmd_size = *param_size;
@@ -507,6 +541,16 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
+ if (bss_cfg->wmm_info.qos_info != 0xFF) {
+ wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv;
+ wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC);
+ wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info));
+ memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info,
+ sizeof(wmm_cap->wmm_info));
+ cmd_size += sizeof(struct mwifiex_ie_types_wmmcap);
+ tlv += sizeof(struct mwifiex_ie_types_wmmcap);
+ }
+
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 63ac9f2d11ae..f90fe21e5bfd 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -672,7 +672,7 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
*len, &actual_length, timeout);
if (ret) {
dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
- ret = -1;
+ return ret;
}
*len = actual_length;
@@ -691,7 +691,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
*len, &actual_length, timeout);
if (ret) {
dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
- ret = -1;
+ return ret;
}
*len = actual_length;
@@ -786,21 +786,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return 0;
}
-/* This function reads one block of firmware data. */
-static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter,
- u32 offset, u32 len, u8 *buf)
-{
- if (!buf || !len)
- return -1;
-
- if (offset + len > adapter->firmware->size)
- return -1;
-
- memcpy(buf, adapter->firmware->data + offset, len);
-
- return 0;
-}
-
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *fw)
{
@@ -836,23 +821,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dlen = 0;
} else {
/* copy the header of the fw_data to get the length */
- if (firmware)
- memcpy(&fwdata->fw_hdr, &firmware[tlen],
- sizeof(struct fw_header));
- else
- mwifiex_get_fw_data(adapter, tlen,
- sizeof(struct fw_header),
- (u8 *)&fwdata->fw_hdr);
+ memcpy(&fwdata->fw_hdr, &firmware[tlen],
+ sizeof(struct fw_header));
dlen = le32_to_cpu(fwdata->fw_hdr.data_len);
dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
tlen += sizeof(struct fw_header);
- if (firmware)
- memcpy(fwdata->data, &firmware[tlen], dlen);
- else
- mwifiex_get_fw_data(adapter, tlen, dlen,
- (u8 *)fwdata->data);
+ memcpy(fwdata->data, &firmware[tlen], dlen);
fwdata->seq_num = cpu_to_le32(fw_seqnum);
tlen += dlen;
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 0982375ba3b1..21553976b550 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -91,7 +91,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
memcpy(info->packets_out,
priv->wmm.packets_out,
sizeof(priv->wmm.packets_out));
- info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
+ info->curr_tx_buf_size = (u32) adapter->curr_tx_buf_size;
info->tx_buf_size = (u32) adapter->tx_buf_size;
info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(priv,
info->rx_tbl);
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index f6d36b9654a0..cb2d0582bd36 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -22,16 +22,16 @@
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
{
- return (struct mwifiex_rxinfo *)(skb->cb + sizeof(phys_addr_t));
+ return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t));
}
static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
{
- return (struct mwifiex_txinfo *)(skb->cb + sizeof(phys_addr_t));
+ return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
}
-static inline phys_addr_t *MWIFIEX_SKB_PACB(struct sk_buff *skb)
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
{
- return (phys_addr_t *)skb->cb;
+ memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
}
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 818f871ae987..32adc878041d 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -109,12 +109,9 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
struct mwifiex_ra_list_tbl *ra_list;
ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
-
- if (!ra_list) {
- dev_err(adapter->dev, "%s: failed to alloc ra_list\n",
- __func__);
+ if (!ra_list)
return NULL;
- }
+
INIT_LIST_HEAD(&ra_list->list);
skb_queue_head_init(&ra_list->skb_head);
@@ -568,6 +565,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
mwifiex_wmm_delete_all_ralist(priv);
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
+ if (priv->adapter->if_ops.clean_pcie_ring)
+ priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
@@ -1206,13 +1205,15 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
ra_list_flags);
break;
case -1:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
case -EINPROGRESS:
- adapter->data_sent = false;
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
default:
break;
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f221b95b90b3..091d9a64080a 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -101,6 +101,18 @@ MODULE_PARM_DESC(ap_mode_default,
#define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES)
#define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues)
+/* txpriorities are mapped with hw queues.
+ * Each hw queue has a txpriority.
+ */
+#define TOTAL_HW_TX_QUEUES 8
+
+/* Each HW queue can have one AMPDU stream.
+ * But, because one of the hw queue is reserved,
+ * maximum AMPDU queues that can be created are
+ * one short of total tx queues.
+ */
+#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1)
+
struct rxd_ops {
int rxd_size;
void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
@@ -160,7 +172,6 @@ struct mwl8k_ampdu_stream {
u8 tid;
u8 state;
u8 idx;
- u8 txq_idx; /* index of this stream in priv->txq */
};
struct mwl8k_priv {
@@ -202,6 +213,8 @@ struct mwl8k_priv {
int fw_mutex_depth;
struct completion *hostcmd_wait;
+ atomic_t watchdog_event_pending;
+
/* lock held over TX and TX reap */
spinlock_t tx_lock;
@@ -272,6 +285,9 @@ struct mwl8k_priv {
char *fw_pref;
char *fw_alt;
struct completion firmware_loading_complete;
+
+ /* bitmap of running BSSes */
+ u32 running_bsses;
};
#define MAX_WEP_KEY_LEN 13
@@ -318,20 +334,20 @@ struct mwl8k_sta {
#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
static const struct ieee80211_channel mwl8k_channels_24[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
};
static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -352,10 +368,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
};
static const struct ieee80211_channel mwl8k_channels_50[] = {
- { .center_freq = 5180, .hw_value = 36, },
- { .center_freq = 5200, .hw_value = 40, },
- { .center_freq = 5220, .hw_value = 44, },
- { .center_freq = 5240, .hw_value = 48, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1133,7 +1149,6 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
@@ -1426,7 +1441,6 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
@@ -1516,6 +1530,9 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
return -EBUSY;
}
+ if (atomic_read(&priv->watchdog_event_pending))
+ return 0;
+
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
@@ -1537,6 +1554,14 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
spin_unlock_bh(&priv->tx_lock);
timeout = wait_for_completion_timeout(&tx_wait,
msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS));
+
+ if (atomic_read(&priv->watchdog_event_pending)) {
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_wait = NULL;
+ spin_unlock_bh(&priv->tx_lock);
+ return 0;
+ }
+
spin_lock_bh(&priv->tx_lock);
if (timeout) {
@@ -1564,6 +1589,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
rc = -ETIMEDOUT;
}
+ priv->tx_wait = NULL;
spin_unlock_bh(&priv->tx_lock);
return rc;
@@ -1734,14 +1760,13 @@ mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid)
struct mwl8k_priv *priv = hw->priv;
int i;
- for (i = 0; i < priv->num_ampdu_queues; i++) {
+ for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) {
stream = &priv->ampdu[i];
if (stream->state == AMPDU_NO_STREAM) {
stream->sta = sta;
stream->state = AMPDU_STREAM_NEW;
stream->tid = tid;
stream->idx = i;
- stream->txq_idx = MWL8K_TX_WMM_QUEUES + i;
wiphy_debug(hw->wiphy, "Added a new stream for %pM %d",
sta->addr, tid);
return stream;
@@ -1782,7 +1807,7 @@ mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid)
struct mwl8k_priv *priv = hw->priv;
int i;
- for (i = 0 ; i < priv->num_ampdu_queues; i++) {
+ for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) {
struct mwl8k_ampdu_stream *stream;
stream = &priv->ampdu[i];
if (stream->state == AMPDU_NO_STREAM)
@@ -1829,6 +1854,13 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
tx_stats->pkts++;
}
+/* The hardware ampdu queues start from 5.
+ * txpriorities for ampdu queues are
+ * 5 6 7 0 1 2 3 4 ie., queue 5 is highest
+ * and queue 3 is lowest (queue 4 is reserved)
+ */
+#define BA_QUEUE 5
+
static void
mwl8k_txq_xmit(struct ieee80211_hw *hw,
int index,
@@ -1928,8 +1960,13 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
stream = mwl8k_lookup_stream(hw, sta->addr, tid);
if (stream != NULL) {
if (stream->state == AMPDU_STREAM_ACTIVE) {
- txpriority = stream->txq_idx;
- index = stream->txq_idx;
+ WARN_ON(!(qos & MWL8K_QOS_ACK_POLICY_BLOCKACK));
+ txpriority = (BA_QUEUE + stream->idx) %
+ TOTAL_HW_TX_QUEUES;
+ if (stream->idx <= 1)
+ index = stream->idx +
+ MWL8K_TX_WMM_QUEUES;
+
} else if (stream->state == AMPDU_STREAM_NEW) {
/* We get here if the driver sends us packets
* after we've initiated a stream, but before
@@ -1971,6 +2008,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
}
}
spin_unlock(&priv->stream_lock);
+ } else {
+ qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
+ qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
dma = pci_map_single(priv->pdev, skb->data,
@@ -2117,6 +2157,8 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
}
}
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable,
+ u32 bitmap);
/*
* Command processing.
@@ -2135,6 +2177,34 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
int rc;
unsigned long timeout = 0;
u8 buf[32];
+ u32 bitmap = 0;
+
+ wiphy_dbg(hw->wiphy, "Posting %s [%d]\n",
+ mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), cmd->macid);
+
+ /* Before posting firmware commands that could change the hardware
+ * characteristics, make sure that all BSSes are stopped temporary.
+ * Enable these stopped BSSes after completion of the commands
+ */
+
+ rc = mwl8k_fw_lock(hw);
+ if (rc)
+ return rc;
+
+ if (priv->ap_fw && priv->running_bsses) {
+ switch (le16_to_cpu(cmd->code)) {
+ case MWL8K_CMD_SET_RF_CHANNEL:
+ case MWL8K_CMD_RADIO_CONTROL:
+ case MWL8K_CMD_RF_TX_POWER:
+ case MWL8K_CMD_TX_POWER:
+ case MWL8K_CMD_RF_ANTENNA:
+ case MWL8K_CMD_RTS_THRESHOLD:
+ case MWL8K_CMD_MIMO_CONFIG:
+ bitmap = priv->running_bsses;
+ mwl8k_enable_bsses(hw, false, bitmap);
+ break;
+ }
+ }
cmd->result = (__force __le16) 0xffff;
dma_size = le16_to_cpu(cmd->length);
@@ -2143,13 +2213,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
if (pci_dma_mapping_error(priv->pdev, dma_addr))
return -ENOMEM;
- rc = mwl8k_fw_lock(hw);
- if (rc) {
- pci_unmap_single(priv->pdev, dma_addr, dma_size,
- PCI_DMA_BIDIRECTIONAL);
- return rc;
- }
-
priv->hostcmd_wait = &cmd_wait;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
iowrite32(MWL8K_H2A_INT_DOORBELL,
@@ -2162,7 +2225,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
priv->hostcmd_wait = NULL;
- mwl8k_fw_unlock(hw);
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
@@ -2189,6 +2251,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
ms);
}
+ if (bitmap)
+ mwl8k_enable_bsses(hw, true, bitmap);
+
+ mwl8k_fw_unlock(hw);
+
return rc;
}
@@ -2450,7 +2517,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->hw_rev = cmd->hw_rev;
mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
priv->ap_macids_supported = 0x000000ff;
- priv->sta_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000100;
priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
@@ -3469,7 +3536,10 @@ static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw,
mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
- mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ if (priv->ap_fw)
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
else
mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
} else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
@@ -3578,7 +3648,11 @@ static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap)
return rc;
}
-#define INVALID_BA 0xAA
+#define MWL8K_WMM_QUEUE_NUMBER 3
+
+static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
+ u8 idx);
+
static void mwl8k_watchdog_ba_events(struct work_struct *work)
{
int rc;
@@ -3586,24 +3660,41 @@ static void mwl8k_watchdog_ba_events(struct work_struct *work)
struct mwl8k_ampdu_stream *streams;
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, watchdog_ba_handle);
+ struct ieee80211_hw *hw = priv->hw;
+ int i;
+ u32 status = 0;
+
+ mwl8k_fw_lock(hw);
rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap);
if (rc)
- return;
+ goto done;
- if (bitmap == INVALID_BA)
- return;
+ spin_lock(&priv->stream_lock);
/* the bitmap is the hw queue number. Map it to the ampdu queue. */
- stream_index = bitmap - MWL8K_TX_WMM_QUEUES;
-
- BUG_ON(stream_index >= priv->num_ampdu_queues);
-
- streams = &priv->ampdu[stream_index];
-
- if (streams->state == AMPDU_STREAM_ACTIVE)
- ieee80211_stop_tx_ba_session(streams->sta, streams->tid);
+ for (i = 0; i < TOTAL_HW_TX_QUEUES; i++) {
+ if (bitmap & (1 << i)) {
+ stream_index = (i + MWL8K_WMM_QUEUE_NUMBER) %
+ TOTAL_HW_TX_QUEUES;
+ streams = &priv->ampdu[stream_index];
+ if (streams->state == AMPDU_STREAM_ACTIVE) {
+ ieee80211_stop_tx_ba_session(streams->sta,
+ streams->tid);
+ spin_unlock(&priv->stream_lock);
+ mwl8k_destroy_ba(hw, stream_index);
+ spin_lock(&priv->stream_lock);
+ }
+ }
+ }
+ spin_unlock(&priv->stream_lock);
+done:
+ atomic_dec(&priv->watchdog_event_pending);
+ status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
+ iowrite32((status | MWL8K_A2H_INT_BA_WATCHDOG),
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
+ mwl8k_fw_unlock(hw);
return;
}
@@ -3620,8 +3711,16 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int enable)
{
struct mwl8k_cmd_bss_start *cmd;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_priv *priv = hw->priv;
int rc;
+ if (enable && (priv->running_bsses & (1 << mwl8k_vif->macid)))
+ return 0;
+
+ if (!enable && !(priv->running_bsses & (1 << mwl8k_vif->macid)))
+ return 0;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -3633,9 +3732,31 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
+ if (!rc) {
+ if (enable)
+ priv->running_bsses |= (1 << mwl8k_vif->macid);
+ else
+ priv->running_bsses &= ~(1 << mwl8k_vif->macid);
+ }
return rc;
}
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, u32 bitmap)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif, *tmp_vif;
+ struct ieee80211_vif *vif;
+
+ list_for_each_entry_safe(mwl8k_vif, tmp_vif, &priv->vif_list, list) {
+ vif = mwl8k_vif->vif;
+
+ if (!(bitmap & (1 << mwl8k_vif->macid)))
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mwl8k_cmd_bss_start(hw, vif, enable);
+ }
+}
/*
* CMD_BASTREAM.
*/
@@ -3763,7 +3884,7 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
}
static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
- struct mwl8k_ampdu_stream *stream)
+ u8 idx)
{
struct mwl8k_cmd_bastream *cmd;
@@ -3775,10 +3896,10 @@ static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_BA_DESTROY);
- cmd->destroy_params.ba_context = cpu_to_le32(stream->idx);
+ cmd->destroy_params.ba_context = cpu_to_le32(idx);
mwl8k_post_cmd(hw, &cmd->header);
- wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx);
+ wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", idx);
kfree(cmd);
}
@@ -3875,7 +3996,30 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *addr)
{
struct mwl8k_cmd_set_new_stn *cmd;
- int rc;
+ struct mwl8k_priv *priv = hw->priv;
+ int rc, i;
+ u8 idx;
+
+ spin_lock(&priv->stream_lock);
+ /* Destroy any active ampdu streams for this sta */
+ for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) {
+ struct mwl8k_ampdu_stream *s;
+ s = &priv->ampdu[i];
+ if (s->state != AMPDU_NO_STREAM) {
+ if (memcmp(s->sta->addr, addr, ETH_ALEN) == 0) {
+ if (s->state == AMPDU_STREAM_ACTIVE) {
+ idx = s->idx;
+ spin_unlock(&priv->stream_lock);
+ mwl8k_destroy_ba(hw, idx);
+ spin_lock(&priv->stream_lock);
+ } else if (s->state == AMPDU_STREAM_NEW) {
+ mwl8k_remove_stream(hw, s);
+ }
+ }
+ }
+ }
+
+ spin_unlock(&priv->stream_lock);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
@@ -4119,8 +4263,9 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
u8 encr_type;
u8 *addr;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_priv *priv = hw->priv;
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !priv->ap_fw)
return -EOPNOTSUPP;
if (sta == NULL)
@@ -4250,9 +4395,11 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p->amsdu_enabled = 0;
rc = mwl8k_post_cmd(hw, &cmd->header);
+ if (!rc)
+ rc = p->station_id;
kfree(cmd);
- return rc ? rc : p->station_id;
+ return rc;
}
static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
@@ -4301,6 +4448,10 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
}
if (status & MWL8K_A2H_INT_BA_WATCHDOG) {
+ iowrite32(~MWL8K_A2H_INT_BA_WATCHDOG,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
+
+ atomic_inc(&priv->watchdog_event_pending);
status &= ~MWL8K_A2H_INT_BA_WATCHDOG;
ieee80211_queue_work(hw, &priv->watchdog_ba_handle);
}
@@ -4444,6 +4595,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
+ } else {
+ ieee80211_wake_queues(hw);
}
return rc;
@@ -4518,12 +4671,18 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
if (priv->ap_fw && di->fw_image_sta) {
- /* we must load the sta fw to meet this request */
- if (!list_empty(&priv->vif_list))
- return -EBUSY;
- rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
- if (rc)
- return rc;
+ if (!list_empty(&priv->vif_list)) {
+ wiphy_warn(hw->wiphy, "AP interface is running.\n"
+ "Adding STA interface for WDS");
+ } else {
+ /* we must load the sta fw to
+ * meet this request.
+ */
+ rc = mwl8k_reload_firmware(hw,
+ di->fw_image_sta);
+ if (rc)
+ return rc;
+ }
}
macids_supported = priv->sta_macids_supported;
break;
@@ -4547,7 +4706,7 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
- if (priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_cmd_set_new_stn_add_self(hw, vif);
priv->macids_used |= 1 << mwl8k_vif->macid;
@@ -4572,7 +4731,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
mwl8k_cmd_del_mac_addr(hw, vif, vif->addr);
@@ -4646,9 +4805,11 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf);
- if (rc)
- goto out;
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
+ if (rc)
+ goto out;
+ }
if (conf->power_level > 18)
conf->power_level = 18;
@@ -4661,12 +4822,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
- if (rc)
- wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
- if (rc)
- wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
} else {
rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
@@ -4724,7 +4879,8 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
}
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+ !priv->ap_fw) {
rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
@@ -4732,6 +4888,25 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ } else {
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+ priv->ap_fw) {
+ int idx;
+ int rate;
+
+ /* Use AP firmware specific rate command.
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -4741,13 +4916,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (changed & BSS_CHANGED_ERP_SLOT) {
+ if ((changed & BSS_CHANGED_ERP_SLOT) && !priv->ap_fw) {
rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
}
- if (vif->bss_conf.assoc &&
+ if (vif->bss_conf.assoc && !priv->ap_fw &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT))) {
rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
@@ -4827,11 +5002,9 @@ static void
mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mwl8k_priv *priv = hw->priv;
-
- if (!priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_STATION)
mwl8k_bss_info_changed_sta(hw, vif, info, changed);
- else
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_bss_info_changed_ap(hw, vif, info, changed);
}
@@ -5092,7 +5265,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_ampdu_stream *stream;
- u8 *addr = sta->addr;
+ u8 *addr = sta->addr, idx;
struct mwl8k_sta *sta_info = MWL8K_STA(sta);
if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
@@ -5170,11 +5343,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
if (stream) {
if (stream->state == AMPDU_STREAM_ACTIVE) {
+ idx = stream->idx;
spin_unlock(&priv->stream_lock);
- mwl8k_destroy_ba(hw, stream);
+ mwl8k_destroy_ba(hw, idx);
spin_lock(&priv->stream_lock);
}
mwl8k_remove_stream(hw, stream);
@@ -5190,8 +5366,9 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!rc)
stream->state = AMPDU_STREAM_ACTIVE;
else {
+ idx = stream->idx;
spin_unlock(&priv->stream_lock);
- mwl8k_destroy_ba(hw, stream);
+ mwl8k_destroy_ba(hw, idx);
spin_lock(&priv->stream_lock);
wiphy_debug(hw->wiphy,
"Failed adding stream for sta %pM tid %d\n",
@@ -5254,7 +5431,7 @@ enum {
MWL8366,
};
-#define MWL8K_8366_AP_FW_API 2
+#define MWL8K_8366_AP_FW_API 3
#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
@@ -5294,6 +5471,8 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
{ PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
@@ -5462,6 +5641,7 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
if (priv->rxd_ops == NULL) {
wiphy_err(hw->wiphy,
"Driver does not have AP firmware image support for this hardware\n");
+ rc = -ENOENT;
goto err_stop_firmware;
}
} else {
@@ -5471,6 +5651,7 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
priv->sniffer_enabled = false;
priv->wmm_enabled = false;
priv->pending_tx_pkts = 0;
+ atomic_set(&priv->watchdog_event_pending, 0);
rc = mwl8k_rxq_init(hw, 0);
if (rc)
@@ -5550,6 +5731,15 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
goto err_free_irq;
}
+ /* Configure Antennas */
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
+
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -5637,6 +5827,7 @@ fail:
static const struct ieee80211_iface_limit ap_if_limits[] = {
{ .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
};
static const struct ieee80211_iface_combination ap_if_comb = {
@@ -5729,6 +5920,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
if (priv->ap_macids_supported || priv->device_info->fw_image_ap) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
hw->wiphy->iface_combinations = &ap_if_comb;
hw->wiphy->n_iface_combinations = 1;
}
@@ -5807,6 +5999,7 @@ static int mwl8k_probe(struct pci_dev *pdev,
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
+ rc = -EIO;
goto err_iounmap;
}
@@ -5819,6 +6012,7 @@ static int mwl8k_probe(struct pci_dev *pdev,
priv->regs = pci_iomap(pdev, 2, 0x10000);
if (priv->regs == NULL) {
wiphy_err(hw->wiphy, "Cannot map device registers\n");
+ rc = -EIO;
goto err_iounmap;
}
}
@@ -5849,6 +6043,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
priv->hw_restart_in_progress = false;
+ priv->running_bsses = 0;
+
return rc;
err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 88e3ad2d1db8..38ec8d19ac29 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -853,12 +853,8 @@ void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
int err;
desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
- if (!desc) {
- printk(KERN_WARNING
- "%s: Can't allocate space for RX descriptor\n",
- dev->name);
+ if (!desc)
goto update_stats;
- }
rxfid = hermes_read_regn(hw, RXFID);
@@ -1336,10 +1332,9 @@ static void qbuf_scan(struct orinoco_private *priv, void *buf,
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
- if (!sd) {
- printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+ if (!sd)
return;
- }
+
sd->buf = buf;
sd->len = len;
sd->type = type;
@@ -1357,10 +1352,9 @@ static void qabort_scan(struct orinoco_private *priv)
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
- if (!sd) {
- printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
+ if (!sd)
return;
- }
+
sd->len = -1; /* Abort */
spin_lock_irqsave(&priv->scan_lock, flags);
@@ -2290,7 +2284,6 @@ int orinoco_if_add(struct orinoco_private *priv,
netif_carrier_off(dev);
memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
- memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
dev->base_addr = base_addr;
dev->irq = irq;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 01624dcaf73e..7744f42de1ea 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -804,10 +804,15 @@ static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
static int ezusb_firmware_download(struct ezusb_priv *upriv,
struct ez_usb_fw *fw)
{
- u8 fw_buffer[FW_BUF_SIZE];
+ u8 *fw_buffer;
int retval, addr;
int variant_offset;
+ fw_buffer = kmalloc(FW_BUF_SIZE, GFP_KERNEL);
+ if (!fw_buffer) {
+ printk(KERN_ERR PFX "Out of memory for firmware buffer.\n");
+ return -ENOMEM;
+ }
/*
* This byte is 1 and should be replaced with 0. The offset is
* 0x10AD in version 0.0.6. The byte in question should follow
@@ -859,6 +864,7 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv,
printk(KERN_ERR PFX "Firmware download failed, error %d\n",
retval);
exit:
+ kfree(fw_buffer);
return retval;
}
@@ -1681,7 +1687,8 @@ static int ezusb_probe(struct usb_interface *interface,
firmware.code = fw_entry->data;
}
if (firmware.size && firmware.code) {
- ezusb_firmware_download(upriv, &firmware);
+ if (ezusb_firmware_download(upriv, &firmware))
+ goto error;
} else {
err("No firmware to download");
goto error;
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 96e39edfec77..e8c5714bfd11 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -125,7 +125,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(cbss);
+ cfg80211_put_bss(wiphy, cbss);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -158,7 +158,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
capability, beacon_interval, ie, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(cbss);
+ cfg80211_put_bss(wiphy, cbss);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index 0ec55b50798e..15ea36b51a66 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,6 +1,6 @@
config P54_COMMON
tristate "Softmac Prism54 support"
- depends on MAC80211 && EXPERIMENTAL
+ depends on MAC80211
select FW_LOADER
select CRC_CCITT
---help---
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 933e5d941937..57e3af8ebb4b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -559,6 +559,7 @@ static int p54p_probe(struct pci_dev *pdev,
mem_len = pci_resource_len(pdev, 0);
if (mem_len < sizeof(struct p54p_csr)) {
dev_err(&pdev->dev, "Too short PCI resources\n");
+ err = -ENODEV;
goto err_disable_dev;
}
@@ -568,8 +569,10 @@ static int p54p_probe(struct pci_dev *pdev,
goto err_disable_dev;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto err_free_reg;
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index e71c702e2eb1..b9deef66cf4b 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -47,6 +47,7 @@ static struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
{USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
+ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */
{USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
{USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
@@ -82,7 +83,9 @@ static struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
{USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
{USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
@@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
+ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
{USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
@@ -506,11 +510,8 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
return err;
tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL);
- if (!buf) {
- dev_err(&priv->udev->dev, "(p54usb) cannot allocate firmware"
- "upload buffer!\n");
+ if (!buf)
return -ENOMEM;
- }
left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size);
strcpy(buf, p54u_firmware_upload_3887);
@@ -633,11 +634,8 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
const u8 *data;
buf = kmalloc(512, GFP_KERNEL);
- if (!buf) {
- dev_err(&priv->udev->dev, "(p54usb) firmware buffer "
- "alloc failed!\n");
+ if (!buf)
return -ENOMEM;
- }
#define P54U_WRITE(type, addr, data) \
do {\
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4e44b1af119a..1c22b81e6ef3 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -1503,6 +1503,7 @@ static int prism54_get_auth(struct net_device *ndev,
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
param->value = IW_AUTH_ALG_SHARED_KEY;
+ break;
case DOT11_AUTH_NONE:
default:
param->value = 0;
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index c5404cb59e08..9f19cceab487 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -123,11 +123,8 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
if (buf->mem == NULL) {
buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
- if (!buf->mem) {
- printk(KERN_WARNING
- "Error allocating management frame.\n");
+ if (!buf->mem)
return -ENOMEM;
- }
buf->size = MGMT_FRAME_SIZE;
}
if (buf->pci_addr == 0) {
@@ -356,14 +353,11 @@ islpci_mgt_receive(struct net_device *ndev)
/* Determine frame size, skipping OID_INL_TUNNEL headers. */
size = PIMFOR_HEADER_SIZE + header->length;
- frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
+ frame = kmalloc(sizeof(struct islpci_mgmtframe) + size,
GFP_ATOMIC);
- if (!frame) {
- printk(KERN_WARNING
- "%s: Out of memory, cannot handle oid 0x%08x\n",
- ndev->name, header->oid);
+ if (!frame)
continue;
- }
+
frame->ndev = ndev;
memcpy(&frame->buf, header, size);
frame->header = (pimfor_header_t *) frame->buf;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 598ca1cafb95..e7cf37f550d1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1107,12 +1107,15 @@ static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
+ UCHAR tmp[IW_ESSID_MAX_SIZE + 1];
/* Get the essid that was set */
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
+ memcpy(tmp, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
+ tmp[IW_ESSID_MAX_SIZE] = '\0';
/* Push it out ! */
- wrqu->essid.length = strlen(extra);
+ wrqu->essid.length = strlen(tmp);
wrqu->essid.flags = 1; /* active */
return 0;
@@ -1842,6 +1845,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
UCHAR tmp;
UCHAR cmd;
UCHAR status;
+ UCHAR memtmp[ESSID_SIZE + 1];
+
if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
return IRQ_NONE;
@@ -1901,17 +1906,21 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
break;
case CCS_START_NETWORK:
case CCS_JOIN_NETWORK:
+ memcpy(memtmp, local->sparm.b4.a_current_ess_id,
+ ESSID_SIZE);
+ memtmp[ESSID_SIZE] = '\0';
+
if (status == CCS_COMMAND_COMPLETE) {
if (readb
(&pccs->var.start_network.net_initiated) ==
1) {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" started\n",
- local->sparm.b4.a_current_ess_id);
+ memtmp);
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" joined\n",
- local->sparm.b4.a_current_ess_id);
+ memtmp);
}
memcpy_fromio(&local->bss_id,
pccs->var.start_network.bssid,
@@ -1939,12 +1948,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
if (status == CCS_START_NETWORK) {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
- local->sparm.b4.a_current_ess_id);
+ memtmp);
local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
- local->sparm.b4.a_current_ess_id);
+ memtmp);
local->timer.function = join_net;
}
add_timer(&local->timer);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index abe1d039be81..525fd7521dff 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1621,11 +1621,8 @@ static void set_multicast_list(struct usbnet *usbdev)
} else if (mc_count) {
int i = 0;
- mc_addrs = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ mc_addrs = kmalloc_array(mc_count, ETH_ALEN, GFP_ATOMIC);
if (!mc_addrs) {
- netdev_warn(usbdev->net,
- "couldn't alloc %d bytes of memory\n",
- mc_count * ETH_ALEN);
netif_addr_unlock_bh(usbdev->net);
return;
}
@@ -2029,7 +2026,7 @@ static bool rndis_bss_info_update(struct usbnet *usbdev,
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
timestamp, capability, beacon_interval, ie, ie_len, signal,
GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
return (bss != NULL);
}
@@ -2718,7 +2715,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
timestamp, capability, beacon_period, ie_buf, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
}
/*
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index c7548da6573d..44d6ead43341 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -82,7 +82,6 @@ config RT2800PCI_RT33XX
config RT2800PCI_RT35XX
bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
default y
---help---
This adds support for rt35xx wireless chipset family to the
@@ -92,7 +91,6 @@ config RT2800PCI_RT35XX
config RT2800PCI_RT53XX
bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
default y
---help---
This adds support for rt53xx wireless chipset family to the
@@ -101,7 +99,6 @@ config RT2800PCI_RT53XX
config RT2800PCI_RT3290
bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
default y
---help---
This adds support for rt3290 wireless chipset family to the
@@ -159,7 +156,6 @@ config RT2800USB_RT33XX
config RT2800USB_RT35XX
bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
default y
---help---
This adds support for rt35xx wireless chipset family to the
@@ -168,7 +164,6 @@ config RT2800USB_RT35XX
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
---help---
This adds support for rt53xx wireless chipset family to the
rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index a2d2bc2c7b3d..221beaaa83f1 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1185,8 +1185,14 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- rt2x00queue_map_txskb(entry);
-
+ if (rt2x00queue_map_txskb(entry)) {
+ ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+ goto out;
+ }
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
/*
* Write the TX descriptor for the beacon.
*/
@@ -1196,7 +1202,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
/*
* Enable beaconing again.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 9bea10f53f0a..39edc59e8d03 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1338,7 +1338,10 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- rt2x00queue_map_txskb(entry);
+ if (rt2x00queue_map_txskb(entry)) {
+ ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+ goto out;
+ }
/*
* Write the TX descriptor for the beacon.
@@ -1349,7 +1352,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
/*
* Enable beaconing again.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 197b4466a5d2..a658b4bc7da2 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -80,7 +80,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF3022))
return true;
- NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+ WARNING(rt2x00dev, "Unknown RF chipset on rt305x\n");
return false;
}
@@ -1296,8 +1296,7 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
- !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
@@ -3866,6 +3865,400 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
return rfcsr24;
}
+static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+}
+
+static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
+}
+
+static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
+}
+
+static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+}
+
+static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+}
+
+static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x4c);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x65);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0xac);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xd0);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
+}
+
+static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+}
+
+static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4d);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x8d);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x0c);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x3a);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xa1);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
+}
+
static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -3889,6 +4282,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
/*
* Init RF calibration.
*/
+
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3907,379 +4301,35 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT3070) ||
- rt2x00_rt(rt2x00dev, RT3071) ||
- rt2x00_rt(rt2x00dev, RT3090)) {
- rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
- rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
- rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
- rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
- rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_rt(rt2x00dev, RT3290)) {
- rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
- rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
- rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
- rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
- rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
- rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
- rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
- rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
- rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
- rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
- rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
- rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
- rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
- rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
- rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
- rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
- rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
- rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
- rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
- } else if (rt2x00_rt(rt2x00dev, RT3390)) {
- rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
- rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
- rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
- rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
- rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
- rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
- rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
- rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
- rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
- } else if (rt2x00_rt(rt2x00dev, RT3572)) {
- rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
- rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
- rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 4, 0x4c);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x05);
- rt2800_rfcsr_write(rt2x00dev, 6, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
- rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
- rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
- rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x70);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x65);
- rt2800_rfcsr_write(rt2x00dev, 14, 0xa0);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
- rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
- rt2800_rfcsr_write(rt2x00dev, 18, 0xac);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
- rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
- rt2800_rfcsr_write(rt2x00dev, 21, 0xd0);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x3c);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
- } else if (rt2800_is_305x_soc(rt2x00dev)) {
- rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
- rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
- rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
- rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
- rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
- rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
- rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
- rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+ if (rt2800_is_305x_soc(rt2x00dev)) {
+ rt2800_init_rfcsr_305x_soc(rt2x00dev);
return 0;
- } else if (rt2x00_rt(rt2x00dev, RT3352)) {
- rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
- rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
- rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
- rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
- rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
- rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
- rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
- rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
- rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
- rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
- rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
- rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
- rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
- rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
- rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
- rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
- rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
- rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
- rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
- rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
- rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
- } else if (rt2x00_rt(rt2x00dev, RT5390)) {
- rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
- else
- rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
-
- rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
- else
- rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
-
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
- rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
- rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
- rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
-
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
- else
- rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
- rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
- rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
- rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
- rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
- rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
- else
- rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
- rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
-
- rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
- else
- rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
- rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
- rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
- rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
-
- rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
- else
- rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
- rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
- } else if (rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
- rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
- rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
- rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
- rt2800_rfcsr_write(rt2x00dev, 19, 0x4d);
- rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 21, 0x8d);
- rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
- rt2800_rfcsr_write(rt2x00dev, 23, 0x0b);
- rt2800_rfcsr_write(rt2x00dev, 24, 0x44);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
- rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
- rt2800_rfcsr_write(rt2x00dev, 32, 0x20);
- rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
- rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
- rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
- rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
- rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
- rt2800_rfcsr_write(rt2x00dev, 40, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
- rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
- rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
- rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
- rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
- rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
- rt2800_rfcsr_write(rt2x00dev, 47, 0x0c);
- rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
- rt2800_rfcsr_write(rt2x00dev, 50, 0x94);
- rt2800_rfcsr_write(rt2x00dev, 51, 0x3a);
- rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
- rt2800_rfcsr_write(rt2x00dev, 53, 0x44);
- rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
- rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
- rt2800_rfcsr_write(rt2x00dev, 56, 0xa1);
- rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x07);
- rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
- rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
- rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
- rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
+ }
+
+ switch (rt2x00dev->chip.rt) {
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ rt2800_init_rfcsr_30xx(rt2x00dev);
+ break;
+ case RT3290:
+ rt2800_init_rfcsr_3290(rt2x00dev);
+ break;
+ case RT3352:
+ rt2800_init_rfcsr_3352(rt2x00dev);
+ break;
+ case RT3390:
+ rt2800_init_rfcsr_3390(rt2x00dev);
+ break;
+ case RT3572:
+ rt2800_init_rfcsr_3572(rt2x00dev);
+ break;
+ case RT5390:
+ rt2800_init_rfcsr_5390(rt2x00dev);
+ break;
+ case RT5392:
+ rt2800_init_rfcsr_5392(rt2x00dev);
+ break;
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -4620,12 +4670,14 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
mutex_unlock(&rt2x00dev->csr_mutex);
}
-void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
rt2800_efuse_read(rt2x00dev, i);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
@@ -4635,11 +4687,14 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
u16 word;
u8 *mac;
u8 default_lna_gain;
+ int retval;
/*
* Read the EEPROM.
*/
- rt2800_read_eeprom(rt2x00dev);
+ retval = rt2800_read_eeprom(rt2x00dev);
+ if (retval)
+ return retval;
/*
* Start validation of the data that has been read.
@@ -5090,8 +5145,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -5484,7 +5538,9 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index a128ceadcb3e..6ec739466db4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -43,7 +43,7 @@ struct rt2800_ops {
const unsigned int offset,
const struct rt2x00_field32 field, u32 *reg);
- void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
+ int (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
@@ -117,11 +117,11 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
}
-static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
- rt2800ops->read_eeprom(rt2x00dev);
+ return rt2800ops->read_eeprom(rt2x00dev);
}
static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
@@ -207,7 +207,7 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
-void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
+int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 9224d874bf24..48a01aa21f1c 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -90,17 +90,22 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
}
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
-static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+ if (!base_addr)
+ return -ENOMEM;
+
memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
iounmap(base_addr);
+ return 0;
}
#else
-static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
+ return -ENOMEM;
}
#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
@@ -135,7 +140,7 @@ static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg);
}
-static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
{
struct eeprom_93cx6 eeprom;
u32 reg;
@@ -164,6 +169,8 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
EEPROM_SIZE / sizeof(u16));
+
+ return 0;
}
static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
@@ -171,13 +178,14 @@ static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
return rt2800_efuse_detect(rt2x00dev);
}
-static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
- rt2800_read_eeprom_efuse(rt2x00dev);
+ return rt2800_read_eeprom_efuse(rt2x00dev);
}
#else
-static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
{
+ return -EOPNOTSUPP;
}
static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
@@ -185,8 +193,9 @@ static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
return 0;
}
-static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_PCI */
@@ -970,14 +979,18 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
/*
* Device probe functions.
*/
-static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
+ int retval;
+
if (rt2x00_is_soc(rt2x00dev))
- rt2800pci_read_eeprom_soc(rt2x00dev);
+ retval = rt2800pci_read_eeprom_soc(rt2x00dev);
else if (rt2800pci_efuse_detect(rt2x00dev))
- rt2800pci_read_eeprom_efuse(rt2x00dev);
+ retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
else
- rt2800pci_read_eeprom_pci(rt2x00dev);
+ retval = rt2800pci_read_eeprom_pci(rt2x00dev);
+
+ return retval;
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1139,6 +1152,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3562) },
{ PCI_DEVICE(0x1814, 0x3592) },
{ PCI_DEVICE(0x1814, 0x3593) },
+ { PCI_DEVICE(0x1814, 0x359f) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5360) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5c149b58ab46..098613ed93fb 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -540,9 +540,9 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
- WARNING(entry->queue->rt2x00dev,
- "TX status report missed for queue %d entry %d\n",
- entry->queue->qid, entry->entry_idx);
+ DEBUG(entry->queue->rt2x00dev,
+ "TX status report missed for queue %d entry %d\n",
+ entry->queue->qid, entry->entry_idx);
return TXDONE_UNKNOWN;
}
@@ -735,13 +735,17 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
/*
* Device probe functions.
*/
-static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
+ int retval;
+
if (rt2800_efuse_detect(rt2x00dev))
- rt2800_read_eeprom_efuse(rt2x00dev);
+ retval = rt2800_read_eeprom_efuse(rt2x00dev);
else
- rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
- EEPROM_SIZE);
+ retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
+ EEPROM_SIZE);
+
+ return retval;
}
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -964,6 +968,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c13) },
{ USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3c1b) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
@@ -1094,9 +1099,11 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x15a9, 0x0006) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153) },
+ { USB_DEVICE(0x177f, 0x0164) },
{ USB_DEVICE(0x177f, 0x0302) },
{ USB_DEVICE(0x177f, 0x0313) },
{ USB_DEVICE(0x177f, 0x0323) },
+ { USB_DEVICE(0x177f, 0x0324) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
@@ -1111,6 +1118,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Zyxel */
{ USB_DEVICE(0x0586, 0x3416) },
{ USB_DEVICE(0x0586, 0x3418) },
+ { USB_DEVICE(0x0586, 0x341a) },
{ USB_DEVICE(0x0586, 0x341e) },
{ USB_DEVICE(0x0586, 0x343e) },
#ifdef CONFIG_RT2800USB_RT33XX
@@ -1127,6 +1135,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x8070) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0050) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0163) },
+ { USB_DEVICE(0x177f, 0x0165) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
/* Allwin */
@@ -1162,6 +1173,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
+ { USB_DEVICE(0x043e, 0x7a32) },
/* Azurewave */
{ USB_DEVICE(0x13d3, 0x3329) },
{ USB_DEVICE(0x13d3, 0x3365) },
@@ -1173,16 +1185,20 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1e) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
+ { USB_DEVICE(0x043e, 0x7a42) },
/* Panasonic */
{ USB_DEVICE(0x04da, 0x1801) },
{ USB_DEVICE(0x04da, 0x1800) },
+ { USB_DEVICE(0x04da, 0x23f6) },
/* Philips */
{ USB_DEVICE(0x0471, 0x2104) },
+ { USB_DEVICE(0x0471, 0x2126) },
+ { USB_DEVICE(0x0471, 0x2180) },
+ { USB_DEVICE(0x0471, 0x2181) },
+ { USB_DEVICE(0x0471, 0x2182) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5370) },
{ USB_DEVICE(0x148f, 0x5372) },
- /* Unknown */
- { USB_DEVICE(0x04da, 0x23f6) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1203,10 +1219,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0b05, 0x1760) },
{ USB_DEVICE(0x0b05, 0x1761) },
{ USB_DEVICE(0x0b05, 0x1790) },
+ { USB_DEVICE(0x0b05, 0x17a7) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262) },
{ USB_DEVICE(0x13d3, 0x3284) },
{ USB_DEVICE(0x13d3, 0x3322) },
+ { USB_DEVICE(0x13d3, 0x3340) },
+ { USB_DEVICE(0x13d3, 0x3399) },
+ { USB_DEVICE(0x13d3, 0x3400) },
+ { USB_DEVICE(0x13d3, 0x3401) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x1003) },
/* Buffalo */
@@ -1219,13 +1240,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x18c5, 0x0008) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
- { USB_DEVICE(0x07d1, 0x3c17) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
+ /* EnGenius */
+ { USB_DEVICE(0x1740, 0x0600) },
+ { USB_DEVICE(0x1740, 0x0602) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800c) },
+ /* Hercules */
+ { USB_DEVICE(0x06f8, 0xe036) },
/* Huawei */
{ USB_DEVICE(0x148f, 0xf101) },
/* I-O DATA */
@@ -1252,13 +1277,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x004a) },
{ USB_DEVICE(0x0df6, 0x004d) },
{ USB_DEVICE(0x0df6, 0x0053) },
+ { USB_DEVICE(0x0df6, 0x0069) },
+ { USB_DEVICE(0x0df6, 0x006f) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512) },
{ USB_DEVICE(0x083a, 0xc522) },
{ USB_DEVICE(0x083a, 0xd522) },
{ USB_DEVICE(0x083a, 0xf511) },
- /* Zyxel */
- { USB_DEVICE(0x0586, 0x341a) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0254) },
+ /* TP-LINK */
+ { USB_DEVICE(0xf201, 0x5370) },
#endif
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 0751b35ef6dc..086abb403a4f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -88,11 +88,9 @@
#define ERROR_PROBE(__msg, __args...) \
DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args)
#define WARNING(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args)
-#define NOTICE(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args)
+ DEBUG_PRINTK_MSG(__dev, KERN_WARNING, "Warning", __msg, ##__args)
#define INFO(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args)
+ DEBUG_PRINTK_MSG(__dev, KERN_INFO, "Info", __msg, ##__args)
#define DEBUG(__dev, __msg, __args...) \
DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args)
#define EEPROM(__dev, __msg, __args...) \
@@ -1016,6 +1014,26 @@ struct rt2x00_dev {
* Protect the interrupt mask register.
*/
spinlock_t irqmask_lock;
+
+ /*
+ * List of BlockAckReq TX entries that need driver BlockAck processing.
+ */
+ struct list_head bar_list;
+ spinlock_t bar_list_lock;
+};
+
+struct rt2x00_bar_list_entry {
+ struct list_head list;
+ struct rcu_head head;
+
+ struct queue_entry *entry;
+ int block_acked;
+
+ /* Relevant parts of the IEEE80211 BAR header */
+ __u8 ra[6];
+ __u8 ta[6];
+ __le16 control;
+ __le16 start_seq_num;
};
/*
@@ -1151,8 +1169,10 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
+ *
+ * Returns -ENOMEM if mapping fail, 0 otherwise.
*/
-void rt2x00queue_map_txskb(struct queue_entry *entry);
+int rt2x00queue_map_txskb(struct queue_entry *entry);
/**
* rt2x00queue_unmap_skb - Unmap a skb from DMA.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 4ffb6a584cd0..1031db66474a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -271,6 +271,50 @@ void rt2x00lib_dmadone(struct queue_entry *entry)
}
EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
+static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct ieee80211_bar *bar = (void *) entry->skb->data;
+ struct rt2x00_bar_list_entry *bar_entry;
+ int ret;
+
+ if (likely(!ieee80211_is_back_req(bar->frame_control)))
+ return 0;
+
+ /*
+ * Unlike all other frames, the status report for BARs does
+ * not directly come from the hardware as it is incapable of
+ * matching a BA to a previously send BAR. The hardware will
+ * report all BARs as if they weren't acked at all.
+ *
+ * Instead the RX-path will scan for incoming BAs and set the
+ * block_acked flag if it sees one that was likely caused by
+ * a BAR from us.
+ *
+ * Remove remaining BARs here and return their status for
+ * TX done processing.
+ */
+ ret = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(bar_entry, &rt2x00dev->bar_list, list) {
+ if (bar_entry->entry != entry)
+ continue;
+
+ spin_lock_bh(&rt2x00dev->bar_list_lock);
+ /* Return whether this BAR was blockacked or not */
+ ret = bar_entry->block_acked;
+ /* Remove the BAR from our checklist */
+ list_del_rcu(&bar_entry->list);
+ spin_unlock_bh(&rt2x00dev->bar_list_lock);
+ kfree_rcu(bar_entry, head);
+
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc)
{
@@ -324,9 +368,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
/*
- * Determine if the frame has been successfully transmitted.
+ * Determine if the frame has been successfully transmitted and
+ * remove BARs from our check list while checking for their
+ * TX status.
*/
success =
+ rt2x00lib_txdone_bar_status(entry) ||
test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
test_bit(TXDONE_UNKNOWN, &txdesc->flags);
@@ -491,6 +538,50 @@ static void rt2x00lib_sleep(struct work_struct *work)
IEEE80211_CONF_CHANGE_PS);
}
+static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct rt2x00_bar_list_entry *entry;
+ struct ieee80211_bar *ba = (void *)skb->data;
+
+ if (likely(!ieee80211_is_back(ba->frame_control)))
+ return;
+
+ if (rxdesc->size < sizeof(*ba) + FCS_LEN)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) {
+
+ if (ba->start_seq_num != entry->start_seq_num)
+ continue;
+
+#define TID_CHECK(a, b) ( \
+ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
+ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
+
+ if (!TID_CHECK(ba->control, entry->control))
+ continue;
+
+#undef TID_CHECK
+
+ if (compare_ether_addr(ba->ra, entry->ta))
+ continue;
+
+ if (compare_ether_addr(ba->ta, entry->ra))
+ continue;
+
+ /* Mark BAR since we received the according BA */
+ spin_lock_bh(&rt2x00dev->bar_list_lock);
+ entry->block_acked = 1;
+ spin_unlock_bh(&rt2x00dev->bar_list_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+}
+
static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct rxdone_entry_desc *rxdesc)
@@ -674,6 +765,12 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);
/*
+ * Check for incoming BlockAcks to match to the BlockAckReqs
+ * we've send out.
+ */
+ rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc);
+
+ /*
* Update extra components
*/
rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
@@ -685,6 +782,14 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
* to mac80211.
*/
rx_status = IEEE80211_SKB_RXCB(entry->skb);
+
+ /* Ensure that all fields of rx_status are initialized
+ * properly. The skb->cb array was used for driver
+ * specific informations, so rx_status might contain
+ * garbage.
+ */
+ memset(rx_status, 0, sizeof(*rx_status));
+
rx_status->mactime = rxdesc.timestamp;
rx_status->band = rt2x00dev->curr_band;
rx_status->freq = rt2x00dev->curr_freq;
@@ -1131,7 +1236,8 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
*/
if_limit = &rt2x00dev->if_limits_ap;
if_limit->max = rt2x00dev->ops->max_ap_intf;
- if_limit->types = BIT(NL80211_IFTYPE_AP);
+ if_limit->types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
/*
* Build up AP interface combinations structure.
@@ -1175,6 +1281,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
+ INIT_LIST_HEAD(&rt2x00dev->bar_list);
+ spin_lock_init(&rt2x00dev->bar_list_lock);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -1339,7 +1447,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
#ifdef CONFIG_PM
int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
{
- NOTICE(rt2x00dev, "Going to sleep.\n");
+ DEBUG(rt2x00dev, "Going to sleep.\n");
/*
* Prevent mac80211 from accessing driver while suspended.
@@ -1379,7 +1487,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
{
- NOTICE(rt2x00dev, "Waking up.\n");
+ DEBUG(rt2x00dev, "Waking up.\n");
/*
* Restore/enable extra components.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ed7a1bb3f245..20c6eccce5aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -731,9 +731,9 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
queue->aifs = params->aifs;
queue->txop = params->txop;
- INFO(rt2x00dev,
- "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
- queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
+ DEBUG(rt2x00dev,
+ "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
+ queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index e488b944a034..4d91795dc6a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -87,24 +87,35 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
skbdesc->entry = entry;
if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
- skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
- skb->data,
- skb->len,
- DMA_FROM_DEVICE);
+ dma_addr_t skb_dma;
+
+ skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ skbdesc->skb_dma = skb_dma;
skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
}
return skb;
}
-void rt2x00queue_map_txskb(struct queue_entry *entry)
+int rt2x00queue_map_txskb(struct queue_entry *entry)
{
struct device *dev = entry->queue->rt2x00dev->dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->skb_dma =
dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
+ return -ENOMEM;
+
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+ return 0;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -343,10 +354,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* when using more then one tx stream (>MCS7).
*/
if (sta && txdesc->u.ht.mcs > 7 &&
- ((sta->ht_cap.cap &
- IEEE80211_HT_CAP_SM_PS) >>
- IEEE80211_HT_CAP_SM_PS_SHIFT) ==
- WLAN_HT_CAP_SM_PS_DYNAMIC)
+ sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
@@ -545,8 +553,9 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Map the skb to DMA.
*/
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
- rt2x00queue_map_txskb(entry);
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+ rt2x00queue_map_txskb(entry))
+ return -ENOMEM;
return 0;
}
@@ -582,6 +591,48 @@ static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
queue->rt2x00dev->ops->lib->kick_queue(queue);
}
+static void rt2x00queue_bar_check(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct ieee80211_bar *bar = (void *) (entry->skb->data +
+ rt2x00dev->ops->extra_tx_headroom);
+ struct rt2x00_bar_list_entry *bar_entry;
+
+ if (likely(!ieee80211_is_back_req(bar->frame_control)))
+ return;
+
+ bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
+
+ /*
+ * If the alloc fails we still send the BAR out but just don't track
+ * it in our bar list. And as a result we will report it to mac80211
+ * back as failed.
+ */
+ if (!bar_entry)
+ return;
+
+ bar_entry->entry = entry;
+ bar_entry->block_acked = 0;
+
+ /*
+ * Copy the relevant parts of the 802.11 BAR into out check list
+ * such that we can use RCU for less-overhead in the RX path since
+ * sending BARs and processing the according BlockAck should be
+ * the exception.
+ */
+ memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
+ memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
+ bar_entry->control = bar->control;
+ bar_entry->start_seq_num = bar->start_seq_num;
+
+ /*
+ * Insert BAR into our BAR check list.
+ */
+ spin_lock_bh(&rt2x00dev->bar_list_lock);
+ list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
+ spin_unlock_bh(&rt2x00dev->bar_list_lock);
+}
+
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local)
{
@@ -680,6 +731,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
goto out;
}
+ /*
+ * Put BlockAckReqs into our check list for driver BA processing.
+ */
+ rt2x00queue_bar_check(entry);
+
set_bit(ENTRY_DATA_PENDING, &entry->flags);
rt2x00queue_index_inc(entry, Q_INDEX);
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
index 17d80fe556de..30332175bcd8 100644
--- a/drivers/net/wireless/rtl818x/Kconfig
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -3,7 +3,7 @@
#
config RTL8180
tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
select EEPROM_93CX6
---help---
This is a driver for RTL8180 and RTL8185 based cards.
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 21b1bbb93a7e..b6aa0c40658f 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,8 +1,26 @@
+config RTLWIFI
+ tristate "Realtek wireless card support"
+ depends on MAC80211
+ select FW_LOADER
+ ---help---
+ This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
+ drivers. This module does nothing by itself - the various front-end
+ drivers need to be enabled to support any desired devices.
+
+ If you choose to build as a module, it'll be called rtlwifi.
+
+config RTLWIFI_DEBUG
+ bool "Debugging output for rtlwifi driver family"
+ depends on RTLWIFI
+ default y
+ ---help---
+ To use the module option that sets the dynamic-debugging level for,
+ the front-end driver, this parameter must be "Y". For memory-limited
+ systems, choose "N". If in doubt, choose "Y".
+
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
@@ -12,9 +30,7 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
wireless network adapters.
@@ -23,9 +39,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
wireless network adapters.
@@ -34,9 +48,7 @@ config RTL8192DE
config RTL8723AE
tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
@@ -45,9 +57,7 @@ config RTL8723AE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on MAC80211 && USB
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && USB
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -55,16 +65,6 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
-config RTLWIFI
- tristate
- depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
- default m
-
-config RTLWIFI_DEBUG
- bool "Additional debugging output"
- depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
- default y
-
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 4494d130b37c..99c5cea3fe21 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -523,8 +523,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_STATION)
bw_40 = mac->bw_40;
else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
if (bw_40 && sgi_40)
tcb_desc->use_shortgi = true;
@@ -634,8 +634,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
return;
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (!(sta->ht_cap.ht_supported) ||
- !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
@@ -1004,7 +1003,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
is_tx ? "Tx" : "Rx");
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->
+ works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies =
jiffies;
}
@@ -1014,7 +1014,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
} else if (ETH_P_ARP == ether_type) {
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@@ -1024,7 +1024,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index be33aa14c8af..d3ce9fbef00e 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -879,7 +879,9 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
"IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, sta, tid, ssn);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
"IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, sta, tid);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 3deacafdcd5e..4261e8ecc4c3 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -743,6 +743,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
done:
bufferaddress = (*((dma_addr_t *)skb->cb));
+ if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+ return;
tmp_one = 1;
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
HW_DESC_RXBUFF_ADDR,
@@ -1115,6 +1117,10 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
PCI_DMA_FROMDEVICE);
bufferaddress = (*((dma_addr_t *)skb->cb));
+ if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) {
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index c1e065f136ba..f9f059dadb73 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -116,9 +116,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (txrc->short_preamble)
rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta && (sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta && (sta->bandwidth >= IEEE80211_STA_RX_BW_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
} else {
if (mac->bw_40)
@@ -223,13 +222,6 @@ static void rtl_rate_init(void *ppriv,
{
}
-static void rtl_rate_update(void *ppriv,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
-}
-
static void *rtl_rate_alloc(struct ieee80211_hw *hw,
struct dentry *debugfsdir)
{
@@ -275,7 +267,6 @@ static struct rate_control_ops rtl_rate_ops = {
.alloc_sta = rtl_rate_alloc_sta,
.free_sta = rtl_rate_free_sta,
.rate_init = rtl_rate_init,
- .rate_update = rtl_rate_update,
.tx_status = rtl_tx_status,
.get_rate = rtl_get_rate,
};
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index c1608cddc529..d7d0d4948b01 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -158,8 +158,6 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
- u32 bandwidth = 0;
- int r;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -174,9 +172,8 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
(ch->flags & IEEE80211_CHAN_RADAR))
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- r = freq_reg_info(wiphy, ch->center_freq,
- bandwidth, &reg_rule);
- if (r)
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(reg_rule))
continue;
/*
@@ -211,8 +208,6 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *reg_rule;
- u32 bandwidth = 0;
- int r;
if (!wiphy->bands[IEEE80211_BAND_2GHZ])
return;
@@ -240,16 +235,16 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
ch = &sband->channels[11]; /* CH 12 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
}
ch = &sband->channels[12]; /* CH 13 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
@@ -303,9 +298,9 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
return;
}
-static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct rtl_regulatory *reg)
+static void _rtl_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct rtl_regulatory *reg)
{
/* We always apply this */
_rtl_reg_apply_radar_flags(wiphy);
@@ -319,8 +314,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
_rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
break;
}
-
- return 0;
}
static const struct ieee80211_regdomain *_rtl_regdomain_select(
@@ -353,9 +346,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
struct wiphy *wiphy,
- int (*reg_notifier) (struct wiphy *wiphy,
- struct regulatory_request *
- request))
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *
+ request))
{
const struct ieee80211_regdomain *regd;
@@ -384,7 +377,7 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
}
int rtl_regd_init(struct ieee80211_hw *hw,
- int (*reg_notifier) (struct wiphy *wiphy,
+ void (*reg_notifier) (struct wiphy *wiphy,
struct regulatory_request *request))
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -426,12 +419,12 @@ int rtl_regd_init(struct ieee80211_hw *hw,
return 0;
}
-int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
- return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+ _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index 70ef2f418a44..4e1f4f00e6e9 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -55,7 +55,7 @@ enum country_code_type_t {
};
int rtl_regd_init(struct ieee80211_hw *hw,
- int (*reg_notifier) (struct wiphy *wiphy,
- struct regulatory_request *request));
-int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1cdf5a271c9f..b793a659a465 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -669,7 +669,8 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
u8 thermalvalue, delta, delta_lck, delta_iqk;
long ele_a, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
+ u8 ofdm_index[2], ofdm_index_old[2], cck_index_old = 0;
+ s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
s8 txpwr_level[2] = {0, 0};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 1d5d3604e3e0..246e5352f2e1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -692,7 +692,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index d1f34f6ffbdf..1b65db7fd651 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1846,9 +1846,9 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = curtxbw_40mhz &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 173424756149..b9b1a6e0b16e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -488,7 +488,7 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
__le16 fc;
u16 type, c_fc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
@@ -611,8 +611,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
PCI_DMA_TODEVICE);
+
u8 bw_40 = 0;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
rcu_read_lock();
sta = get_sta(hw, mac->vif, mac->bssid);
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -620,8 +626,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -774,6 +779,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
if (firstseg)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 32ff959a0251..85b6bdb163c0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1084,7 +1084,7 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
__le16 fc;
u16 type, cpu_fc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index b7e6607e6b6d..a73a17bc56dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -76,7 +76,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
GFP_KERNEL, hw, rtl_fw_cb);
- return 0;
+ return err;
}
static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
@@ -363,9 +364,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+static int rtl8192cu_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg);
+}
+
static struct usb_driver rtl8192cu_driver = {
.name = "rtl8192cu",
- .probe = rtl_usb_probe,
+ .probe = rtl8192cu_probe,
.disconnect = rtl_usb_disconnect,
.id_table = rtl8192c_usb_ids,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index fd8df233ff22..5251fb8a111e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -841,9 +841,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2];
- u8 cck_index = 0;
+ s8 cck_index = 0;
u8 ofdm_index_old[2];
- u8 cck_index_old = 0;
+ s8 cck_index_old = 0;
u8 index;
int i;
bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f4051f4f0390..aa5b42521bb4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1970,8 +1970,7 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f9f3861046c1..941080e03c06 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -452,7 +452,7 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
u16 type, cfc;
__le16 fc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
@@ -574,8 +574,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
@@ -587,6 +586,11 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
buf_len = skb->len;
mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
@@ -740,6 +744,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 28526a7361f5..084e7773bce2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2085,8 +2085,7 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index = 0;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 0e9f6ebf078a..7b0a2e75b8b8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -480,7 +480,7 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
__le16 fc;
u16 type, cfc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
@@ -611,13 +611,17 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
u8 bw_40 = 0;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
if (mac->opmode == NL80211_IFTYPE_STATION) {
bw_40 = mac->bw_40;
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -763,6 +767,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg, struct sk_buff *skb)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
@@ -770,7 +775,12 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- /* Clear all status */
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
+ /* Clear all status */
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_CMDDESC_SIZE_RTL8192S);
/* This bit indicate this packet is used for FW download. */
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index f55b1767ef57..35cb8f83eed4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -252,7 +252,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg = 0, box_extreg = 0;
u8 u1tmp;
bool isfw_rd = false;
- bool bwrite_sucess = false;
+ bool bwrite_success = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
u8 boxcontent[4], boxextcontent[2];
@@ -291,7 +291,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
}
}
- while (!bwrite_sucess) {
+ while (!bwrite_success) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -429,7 +429,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
}
- bwrite_sucess = true;
+ bwrite_success = true;
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
@@ -512,7 +512,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
- u8 own;
unsigned long flags;
struct sk_buff *pskb = NULL;
@@ -525,7 +524,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
index 887d521fe690..68c28340f791 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -1433,7 +1433,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- u8 bt_retry_cnt;
u8 bt_info_original;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
@@ -1445,7 +1444,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
"[BTCoex] c2h for btInfo not rcvd yet!!\n");
}
- bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt;
bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original;
/* when bt inquiry or page scan, we have to set h2c 0x25
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 0a8c03863fb2..9a0c71c2e15e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -703,11 +703,9 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 reg_bw_opmode;
- u32 reg_ratr, reg_prsr;
+ u32 reg_prsr;
reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
@@ -1868,8 +1866,7 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
@@ -2030,7 +2027,7 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp;
bool actuallyset = false;
@@ -2049,8 +2046,6 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 39cc7938eedf..eafbb18dd48e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -614,17 +614,11 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
- u32 *radiob_array_table;
- u16 radioa_arraylen, radiob_arraylen;
+ u16 radioa_arraylen;
radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
- radiob_array_table = RTL8723E_RADIOB_1TARRAY;
-
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
@@ -1106,7 +1100,7 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723ae_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
@@ -1531,11 +1525,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
const u32 retrycount = 2;
- u32 bbvalue;
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
}
@@ -1712,8 +1703,7 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
long result[4][8];
u8 i, final_candidate;
bool patha_ok, pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0;
bool is12simular, is13simular, is23simular;
bool start_conttx = false, singletone = false;
u32 iqk_bb_reg[10] = {
@@ -1780,21 +1770,15 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
patha_ok = pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 18b0bc51766b..bb7cc90bafb2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -341,7 +341,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
-static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
+static struct pci_device_id rtl8723ae_pci_ids[] = {
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
{},
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 87331d826d73..ac081297db50 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -244,10 +244,9 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
- u8 *psaddr;
__le16 fc;
u16 type;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
@@ -255,7 +254,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
fc = hdr->frame_control;
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- psaddr = ieee80211_get_SA(hdr);
packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid,
@@ -387,13 +385,17 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
u8 bw_40 = 0;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
if (mac->opmode == NL80211_IFTYPE_STATION) {
bw_40 = mac->bw_40;
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -542,6 +544,11 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
__le16 fc = hdr->frame_control;
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
if (firstseg)
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 29f0969e4ba0..156b52732f3d 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -42,8 +42,12 @@
static void usbctrl_async_callback(struct urb *urb)
{
- if (urb)
- kfree(urb->context);
+ if (urb) {
+ /* free dr */
+ kfree(urb->setup_packet);
+ /* free databuf */
+ kfree(urb->transfer_buffer);
+ }
}
static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
@@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
u8 reqtype;
struct usb_ctrlrequest *dr;
struct urb *urb;
- struct rtl819x_async_write_data {
- u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
- struct usb_ctrlrequest dr;
- } *buf;
+ const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
+ u8 *databuf;
+
+ if (WARN_ON_ONCE(len > databuf_maxlen))
+ len = databuf_maxlen;
pipe = usb_sndctrlpipe(udev, 0); /* write_out */
reqtype = REALTEK_USB_VENQT_WRITE;
- buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
- if (!buf)
+ dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
+ if (!dr)
+ return -ENOMEM;
+
+ databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
+ if (!databuf) {
+ kfree(dr);
return -ENOMEM;
+ }
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- kfree(buf);
+ kfree(databuf);
+ kfree(dr);
return -ENOMEM;
}
- dr = &buf->dr;
-
dr->bRequestType = reqtype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(len);
/* data are already in little-endian order */
- memcpy(buf, pdata, len);
+ memcpy(databuf, pdata, len);
usb_fill_control_urb(urb, udev, pipe,
- (unsigned char *)dr, buf, len,
- usbctrl_async_callback, buf);
+ (unsigned char *)dr, databuf, len,
+ usbctrl_async_callback, NULL);
rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc < 0)
- kfree(buf);
+ if (rc < 0) {
+ kfree(databuf);
+ kfree(dr);
+ }
usb_free_urb(urb);
return rc;
}
@@ -210,17 +222,16 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
u16 index = REALTEK_USB_VENQT_CMD_IDX;
int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
u8 *buffer;
- dma_addr_t dma_addr;
- wvalue = (u16)(addr&0x0000ffff);
- buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+ wvalue = (u16)(addr & 0x0000ffff);
+ buffer = kmalloc(len, GFP_ATOMIC);
if (!buffer)
return;
memcpy(buffer, data, len);
usb_control_msg(udev, pipe, request, reqtype, wvalue,
index, buffer, len, 50);
- usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+ kfree(buffer);
}
static void _rtl_usb_io_handler_init(struct device *dev,
@@ -543,8 +554,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
WARN_ON(skb_queue_empty(&rx_queue));
while (!skb_queue_empty(&rx_queue)) {
_skb = skb_dequeue(&rx_queue);
- _rtl_usb_rx_process_agg(hw, skb);
- ieee80211_rx_irqsafe(hw, skb);
+ _rtl_usb_rx_process_agg(hw, _skb);
+ ieee80211_rx_irqsafe(hw, _skb);
}
}
@@ -640,6 +651,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
"Failed to prep_rx_urb!!\n");
err = PTR_ERR(skb);
+ usb_free_urb(urb);
goto err_out;
}
@@ -825,8 +837,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
u32 ep_num;
struct urb *_urb = NULL;
struct sk_buff *_skb = NULL;
- struct sk_buff_head *skb_list;
- struct usb_anchor *urb_list;
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
@@ -836,7 +846,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
ep_num = rtlusb->ep_map.ep_mapping[qnum];
- skb_list = &rtlusb->tx_skb_queue[ep_num];
_skb = skb;
_urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
if (unlikely(!_urb)) {
@@ -844,7 +853,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
"Can't allocate urb. Drop skb!\n");
return;
}
- urb_list = &rtlusb->tx_pending[ep_num];
_rtl_submit_tx_urb(hw, _urb);
}
@@ -941,7 +949,8 @@ static struct rtl_intf_ops rtl_usb_ops = {
};
int rtl_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id,
+ struct rtl_hal_cfg *rtl_hal_cfg)
{
int err;
struct ieee80211_hw *hw = NULL;
@@ -976,7 +985,7 @@ int rtl_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, hw);
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_USB;
- rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+ rtlpriv->cfg = rtl_hal_cfg;
rtlpriv->intf_ops = &rtl_usb_ops;
rtl_dbgp_flag_init(hw);
/* Init IO handler */
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index 5235136f6dd2..fb986f98d1df 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -157,7 +157,8 @@ struct rtl_usb_priv {
int rtl_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
+ const struct usb_device_id *id,
+ struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
int rtl_usb_resume(struct usb_interface *pusb_intf);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 21a5f4f4a135..f13258a8d995 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1702,7 +1702,7 @@ struct rtl_works {
struct rtl_debug {
u32 dbgp_type[DBGP_TYPE_MAX];
- u32 global_debuglevel;
+ int global_debuglevel;
u64 global_debugcomponents;
/* add for proc debug */
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index be800119d0a3..cbe1e7fef61b 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -12,4 +12,13 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig"
# keep last for automatic dependencies
source "drivers/net/wireless/ti/wlcore/Kconfig"
+
+config WILINK_PLATFORM_DATA
+ bool "TI WiLink platform data"
+ depends on WLCORE_SDIO || WL1251_SDIO
+ default y
+ ---help---
+ Small platform data bit needed to pass data to the sdio modules.
+
+
endif # WL_TI
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 4d6823983c04..af14231aeede 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
-obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
obj-$(CONFIG_WL1251) += wl1251/
obj-$(CONFIG_WL18XX) += wl18xx/
+
+# small builtin driver bit
+obj-$(CONFIG_WILINK_PLATFORM_DATA) += wilink_platform_data.o
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index 998e95895f9d..998e95895f9d 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
diff --git a/drivers/net/wireless/ti/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
index 1fb65849414f..8fec4ed36ac2 100644
--- a/drivers/net/wireless/ti/wl1251/Kconfig
+++ b/drivers/net/wireless/ti/wl1251/Kconfig
@@ -1,6 +1,6 @@
menuconfig WL1251
tristate "TI wl1251 driver support"
- depends on MAC80211 && EXPERIMENTAL && GENERIC_HARDIRQS
+ depends on MAC80211 && GENERIC_HARDIRQS
select FW_LOADER
select CRC7
---help---
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 5ec50a476a69..74ae8e1c2e33 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -29,6 +29,8 @@
static int wl1251_event_scan_complete(struct wl1251 *wl,
struct event_mailbox *mbox)
{
+ int ret = 0;
+
wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d",
mbox->scheduled_scan_status,
mbox->scheduled_scan_channels);
@@ -37,9 +39,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
ieee80211_scan_completed(wl->hw, false);
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
wl->scanning = false;
+ if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
+ ret = wl1251_ps_set_mode(wl, STATION_IDLE);
}
- return 0;
+ return ret;
}
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index f47e8b0482ad..bbbf68cf50a7 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -623,7 +623,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !wl->scanning) {
if (conf->flags & IEEE80211_CONF_IDLE) {
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
if (ret < 0)
@@ -895,11 +895,21 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (hw->conf.flags & IEEE80211_CONF_IDLE) {
+ ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0)
+ goto out_sleep;
+ }
+
skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
req->ie_len);
if (!skb) {
ret = -ENOMEM;
- goto out;
+ goto out_idle;
}
if (req->ie_len)
memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
@@ -908,11 +918,11 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
- goto out_sleep;
+ goto out_idle;
ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- goto out_sleep;
+ goto out_idle;
wl->scanning = true;
@@ -920,9 +930,13 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
wl->scanning = false;
- goto out_sleep;
+ goto out_idle;
}
+ goto out_sleep;
+out_idle:
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ ret = wl1251_ps_set_mode(wl, STATION_IDLE);
out_sleep:
wl1251_ps_elp_sleep(wl);
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index db719f7d2692..b9e27b98bbc9 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -68,8 +68,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
unsigned long timeout, start;
u32 elp_reg;
- if (delayed_work_pending(&wl->elp_work))
- cancel_delayed_work(&wl->elp_work);
+ cancel_delayed_work(&wl->elp_work);
if (!wl->elp)
return 0;
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index da509aa7d009..e6a24056b3c8 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
-wl12xx-objs = main.o cmd.o acx.o debugfs.o
+wl12xx-objs = main.o cmd.o acx.o debugfs.o scan.o event.o
obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 622206241e83..7dc9f965037d 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -284,3 +284,40 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
kfree(radio_parms);
return ret;
}
+
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl12xx_cmd_channel_switch *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+ cmd->channel = ch_switch->channel->hw_value;
+ cmd->switch_time = ch_switch->count;
+ cmd->stop_tx = ch_switch->block_tx;
+
+ /* FIXME: control from mac80211 in the future */
+ /* Enable TX on the target channel */
+ cmd->post_switch_tx_disable = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send channel switch command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h
index 140a0e8829d5..32cbad54e993 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.h
+++ b/drivers/net/wireless/ti/wl12xx/cmd.h
@@ -103,10 +103,30 @@ struct wl1271_ext_radio_parms_cmd {
u8 padding[3];
} __packed;
+struct wl12xx_cmd_channel_switch {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+
+ /* The new serving channel */
+ u8 channel;
+ /* Relative time of the serving channel switch in TBTT units */
+ u8 switch_time;
+ /* Stop the role TX, should expect it after radar detection */
+ u8 stop_tx;
+ /* The target channel tx status 1-stopped 0-open*/
+ u8 post_switch_tx_disable;
+
+ u8 padding[3];
+} __packed;
+
int wl1271_cmd_general_parms(struct wl1271 *wl);
int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/event.c b/drivers/net/wireless/ti/wl12xx/event.c
new file mode 100644
index 000000000000..6ac0ed751da8
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/event.c
@@ -0,0 +1,116 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout)
+{
+ u32 local_event;
+
+ switch (event) {
+ case WLCORE_EVENT_ROLE_STOP_COMPLETE:
+ local_event = ROLE_STOP_COMPLETE_EVENT_ID;
+ break;
+
+ case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+ local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+ break;
+
+ default:
+ /* event not implemented */
+ return 0;
+ }
+ return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl12xx_process_mailbox_events(struct wl1271 *wl)
+{
+ struct wl12xx_event_mailbox *mbox = wl->mbox;
+ u32 vector;
+
+
+ vector = le32_to_cpu(mbox->events_vector);
+ vector &= ~(le32_to_cpu(mbox->events_mask));
+
+ wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+ if (vector & SCAN_COMPLETE_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "status: 0x%x",
+ mbox->scheduled_scan_status);
+
+ if (wl->scan_wlvif)
+ wl12xx_scan_completed(wl, wl->scan_wlvif);
+ }
+
+ if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT,
+ "PERIODIC_SCAN_REPORT_EVENT (status 0x%0x)",
+ mbox->scheduled_scan_status);
+
+ wlcore_scan_sched_scan_results(wl);
+ }
+
+ if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+ wlcore_event_sched_scan_completed(wl,
+ mbox->scheduled_scan_status);
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
+ wlcore_event_soft_gemini_sense(wl,
+ mbox->soft_gemini_sense_info);
+
+ if (vector & BSS_LOSE_EVENT_ID)
+ wlcore_event_beacon_loss(wl, 0xff);
+
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+ wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+ wlcore_event_ba_rx_constraint(wl,
+ BIT(mbox->role_id),
+ mbox->rx_ba_allowed);
+
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+ wlcore_event_channel_switch(wl, 0xff,
+ mbox->channel_switch_status);
+
+ if (vector & DUMMY_PACKET_EVENT_ID)
+ wlcore_event_dummy_packet(wl);
+
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected.
+ */
+ if (vector & MAX_TX_RETRY_EVENT_ID)
+ wlcore_event_max_tx_failure(wl,
+ le16_to_cpu(mbox->sta_tx_retry_exceeded));
+
+ if (vector & INACTIVE_STA_EVENT_ID)
+ wlcore_event_inactive_sta(wl,
+ le16_to_cpu(mbox->sta_aging_status));
+
+ if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+ wlcore_event_roc_complete(wl);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/event.h b/drivers/net/wireless/ti/wl12xx/event.h
new file mode 100644
index 000000000000..a5cc3fcd9eea
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/event.h
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_EVENT_H__
+#define __WL12XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+ MEASUREMENT_START_EVENT_ID = BIT(8),
+ MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
+ SCAN_COMPLETE_EVENT_ID = BIT(10),
+ WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
+ AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
+ RESERVED1 = BIT(13),
+ PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
+ ROLE_STOP_COMPLETE_EVENT_ID = BIT(15),
+ RADAR_DETECTED_EVENT_ID = BIT(16),
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
+ BSS_LOSE_EVENT_ID = BIT(18),
+ REGAINED_BSS_EVENT_ID = BIT(19),
+ MAX_TX_RETRY_EVENT_ID = BIT(20),
+ DUMMY_PACKET_EVENT_ID = BIT(21),
+ SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
+ CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
+ SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
+ PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
+ INACTIVE_STA_EVENT_ID = BIT(26),
+ PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
+ PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
+ PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
+};
+
+struct wl12xx_event_mailbox {
+ __le32 events_vector;
+ __le32 events_mask;
+ __le32 reserved_1;
+ __le32 reserved_2;
+
+ u8 number_of_scan_results;
+ u8 scan_tag;
+ u8 completed_scan_status;
+ u8 reserved_3;
+
+ u8 soft_gemini_sense_info;
+ u8 soft_gemini_protective_info;
+ s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+ u8 change_auto_mode_timeout;
+ u8 scheduled_scan_status;
+ u8 reserved4;
+ /* tuned channel (roc) */
+ u8 roc_channel;
+
+ __le16 hlid_removed_bitmap;
+
+ /* bitmap of aged stations (by HLID) */
+ __le16 sta_aging_status;
+
+ /* bitmap of stations (by HLID) which exceeded max tx retries */
+ __le16 sta_tx_retry_exceeded;
+
+ /* discovery completed results */
+ u8 discovery_tag;
+ u8 number_of_preq_results;
+ u8 number_of_prsp_results;
+ u8 reserved_5;
+
+ /* rx ba constraint */
+ u8 role_id; /* 0xFF means any role. */
+ u8 rx_ba_allowed;
+ u8 reserved_6[2];
+
+ /* Channel switch results */
+
+ u8 channel_switch_role_id;
+ u8 channel_switch_status;
+ u8 reserved_7[2];
+
+ u8 ps_poll_delivery_failure_role_ids;
+ u8 stopped_role_ids;
+ u8 started_role_ids;
+
+ u8 reserved_8[9];
+} __packed;
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+int wl12xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
+
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index e5f5f8f39144..09694e39bb14 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -38,6 +38,8 @@
#include "reg.h"
#include "cmd.h"
#include "acx.h"
+#include "scan.h"
+#include "event.h"
#include "debugfs.h"
static char *fref_param;
@@ -208,6 +210,8 @@ static struct wlcore_conf wl12xx_conf = {
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
+ .slow_link_thold = 3,
+ .fast_link_thold = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -265,8 +269,10 @@ static struct wlcore_conf wl12xx_conf = {
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 100000,
- .max_dwell_time_passive = 100000,
+ .min_dwell_time_active_long = 25000,
+ .max_dwell_time_active_long = 50000,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
@@ -368,6 +374,10 @@ static struct wlcore_conf wl12xx_conf = {
.increase_time = 1,
.window_size = 16,
},
+ .recovery = {
+ .bug_on_recovery = 0,
+ .no_recovery = 0,
+ },
};
static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
@@ -601,9 +611,9 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
+ if (wl->chip.id != CHIP_ID_128X_PG20) {
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
- struct wl127x_rx_mem_pool_addr rx_mem_addr;
+ struct wl12xx_priv *priv = wl->priv;
/*
* Choose the block we want to read
@@ -612,13 +622,13 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
*/
u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
- rx_mem_addr.addr = (mem_block << 8) +
+ priv->rx_mem_addr->addr = (mem_block << 8) +
le32_to_cpu(wl_mem_map->packet_memory_pool_start);
- rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
+ priv->rx_mem_addr->addr_extra = priv->rx_mem_addr->addr + 4;
- ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
- sizeof(rx_mem_addr), false);
+ ret = wlcore_write(wl, WL1271_SLV_REG_DATA, priv->rx_mem_addr,
+ sizeof(*priv->rx_mem_addr), false);
if (ret < 0)
return ret;
}
@@ -631,13 +641,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
int ret = 0;
switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
+ case CHIP_ID_127X_PG10:
wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -646,18 +658,22 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
- wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
- WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
- WL127X_MINOR_VER);
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+ WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
+ WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+ WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
+ WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
- case CHIP_ID_1271_PG20:
+ case CHIP_ID_127X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -667,12 +683,14 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
- wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
- WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
- WL127X_MINOR_VER);
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+ WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
+ WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+ WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
+ WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
- case CHIP_ID_1283_PG20:
+ case CHIP_ID_128X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
wl->plt_fw_name = WL128X_PLT_FW_NAME;
@@ -682,19 +700,29 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* wl128x requires TX blocksize alignment */
wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
-
- wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
- WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
- WL128X_MINOR_VER);
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+
+ wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
+ WL128X_IFTYPE_SR_VER, WL128X_MAJOR_SR_VER,
+ WL128X_SUBTYPE_SR_VER, WL128X_MINOR_SR_VER,
+ WL128X_IFTYPE_MR_VER, WL128X_MAJOR_MR_VER,
+ WL128X_SUBTYPE_MR_VER, WL128X_MINOR_MR_VER);
break;
- case CHIP_ID_1283_PG10:
+ case CHIP_ID_128X_PG10:
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
goto out;
}
+ /* common settings */
+ wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
+ wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
+ wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ;
out:
return ret;
}
@@ -1067,7 +1095,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
u32 clk;
int selected_clock = -1;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl128x_boot_clk(wl, &selected_clock);
if (ret < 0)
goto out;
@@ -1098,7 +1126,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
- if (wl->chip.id == CHIP_ID_1283_PG20)
+ if (wl->chip.id == CHIP_ID_128X_PG20)
clk |= ((selected_clock & 0x3) << 1) << 4;
else
clk |= (priv->ref_clock << 1) << 4;
@@ -1152,7 +1180,7 @@ static int wl12xx_pre_upload(struct wl1271 *wl)
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
if (ret < 0)
goto out;
@@ -1219,6 +1247,23 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+ wl->event_mask = BSS_LOSE_EVENT_ID |
+ REGAINED_BSS_EVENT_ID |
+ SCAN_COMPLETE_EVENT_ID |
+ ROLE_STOP_COMPLETE_EVENT_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID |
+ PSPOLL_DELIVERY_FAILURE_EVENT_ID |
+ SOFT_GEMINI_SENSE_EVENT_ID |
+ PERIODIC_SCAN_REPORT_EVENT_ID |
+ PERIODIC_SCAN_COMPLETE_EVENT_ID |
+ DUMMY_PACKET_EVENT_ID |
+ PEER_REMOVE_COMPLETE_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_RETRY_EVENT_ID |
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1261,7 +1306,7 @@ static void
wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
u32 blks, u32 spare_blks)
{
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.total_mem_blocks = blks;
} else {
desc->wl127x_mem.extra_blocks = spare_blks;
@@ -1275,7 +1320,7 @@ wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
{
u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
desc->length = cpu_to_le16(aligned_len >> 2);
@@ -1339,7 +1384,7 @@ static int wl12xx_hw_init(struct wl1271 *wl)
{
int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
ret = wl128x_cmd_general_parms(wl);
@@ -1394,22 +1439,6 @@ static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
return wlvif->rate_set;
}
-static int wl12xx_identify_fw(struct wl1271 *wl)
-{
- unsigned int *fw_ver = wl->chip.fw_ver;
-
- /* Only new station firmwares support routing fw logs to the host */
- if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
- wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
- /* This feature is not yet supported for AP mode */
- if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
- wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
- return 0;
-}
-
static void wl12xx_conf_init(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
@@ -1426,7 +1455,7 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
bool supported = false;
u8 major, minor;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
@@ -1482,7 +1511,7 @@ static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
u16 die_info;
int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
+ if (wl->chip.id == CHIP_ID_128X_PG20)
ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
&die_info);
else
@@ -1589,16 +1618,46 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
+static int wl12xx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ return wl1271_acx_set_ht_capabilities(wl, ht_cap, allow_ht_operation,
+ hlid);
+}
+
+static bool wl12xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+
+ if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map))
+ thold = wl->conf.tx.fast_link_thold;
+ else
+ thold = wl->conf.tx.slow_link_thold;
+
+ return lnk->allocated_pkts < thold;
+}
+
+static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ /* any link is good for low priority */
+ return true;
+}
+
static int wl12xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl12xx_ops = {
.setup = wl12xx_setup,
.identify_chip = wl12xx_identify_chip,
- .identify_fw = wl12xx_identify_fw,
.boot = wl12xx_boot,
.plt_init = wl12xx_plt_init,
.trigger_cmd = wl12xx_trigger_cmd,
.ack_event = wl12xx_ack_event,
+ .wait_for_event = wl12xx_wait_for_event,
+ .process_mailbox_events = wl12xx_process_mailbox_events,
.calc_tx_blocks = wl12xx_calc_tx_blocks,
.set_tx_desc_blocks = wl12xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl12xx_set_tx_desc_data_len,
@@ -1615,9 +1674,17 @@ static struct wlcore_ops wl12xx_ops = {
.set_rx_csum = NULL,
.ap_get_mimo_wide_rate_mask = NULL,
.debugfs_init = wl12xx_debugfs_add_files,
+ .scan_start = wl12xx_scan_start,
+ .scan_stop = wl12xx_scan_stop,
+ .sched_scan_start = wl12xx_sched_scan_start,
+ .sched_scan_stop = wl12xx_scan_sched_scan_stop,
.get_spare_blocks = wl12xx_get_spare_blocks,
.set_key = wl12xx_set_key,
+ .channel_switch = wl12xx_cmd_channel_switch,
.pre_pkt_send = NULL,
+ .set_peer_cap = wl12xx_set_peer_cap,
+ .lnk_high_prio = wl12xx_lnk_high_prio,
+ .lnk_low_prio = wl12xx_lnk_low_prio,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1636,11 +1703,13 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+ wl->num_channels = 1;
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
@@ -1693,6 +1762,10 @@ static int wl12xx_setup(struct wl1271 *wl)
wl1271_error("Invalid tcxo parameter %s", tcxo_param);
}
+ priv->rx_mem_addr = kmalloc(sizeof(*priv->rx_mem_addr), GFP_KERNEL);
+ if (!priv->rx_mem_addr)
+ return -ENOMEM;
+
return 0;
}
@@ -1703,7 +1776,8 @@ static int wl12xx_probe(struct platform_device *pdev)
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
- WL12XX_AGGR_BUFFER_SIZE);
+ WL12XX_AGGR_BUFFER_SIZE,
+ sizeof(struct wl12xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
@@ -1725,6 +1799,21 @@ out:
return ret;
}
+static int wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+ struct wl12xx_priv *priv;
+
+ if (!wl)
+ goto out;
+ priv = wl->priv;
+
+ kfree(priv->rx_mem_addr);
+
+out:
+ return wlcore_remove(pdev);
+}
+
static const struct platform_device_id wl12xx_id_table[] = {
{ "wl12xx", 0 },
{ } /* Terminating Entry */
@@ -1733,7 +1822,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
static struct platform_driver wl12xx_driver = {
.probe = wl12xx_probe,
- .remove = wlcore_remove,
+ .remove = wl12xx_remove,
.id_table = wl12xx_id_table,
.driver = {
.name = "wl12xx_driver",
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
new file mode 100644
index 000000000000..affdb3ec6225
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -0,0 +1,501 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/tx.h"
+
+static int wl1271_get_scan_channels(struct wl1271 *wl,
+ struct cfg80211_scan_request *req,
+ struct basic_scan_channel_params *channels,
+ enum ieee80211_band band, bool passive)
+{
+ struct conf_scan_settings *c = &wl->conf.scan;
+ int i, j;
+ u32 flags;
+
+ for (i = 0, j = 0;
+ i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
+ i++) {
+ flags = req->channels[i]->flags;
+
+ if (!test_bit(i, wl->scan.scanned_ch) &&
+ !(flags & IEEE80211_CHAN_DISABLED) &&
+ (req->channels[i]->band == band) &&
+ /*
+ * In passive scans, we scan all remaining
+ * channels, even if not marked as such.
+ * In active scans, we only scan channels not
+ * marked as passive.
+ */
+ (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+ wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
+ req->channels[i]->band,
+ req->channels[i]->center_freq);
+ wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
+ req->channels[i]->hw_value,
+ req->channels[i]->flags);
+ wl1271_debug(DEBUG_SCAN,
+ "max_antenna_gain %d, max_power %d",
+ req->channels[i]->max_antenna_gain,
+ req->channels[i]->max_power);
+ wl1271_debug(DEBUG_SCAN, "beacon_found %d",
+ req->channels[i]->beacon_found);
+
+ if (!passive) {
+ channels[j].min_duration =
+ cpu_to_le32(c->min_dwell_time_active);
+ channels[j].max_duration =
+ cpu_to_le32(c->max_dwell_time_active);
+ } else {
+ channels[j].min_duration =
+ cpu_to_le32(c->dwell_time_passive);
+ channels[j].max_duration =
+ cpu_to_le32(c->dwell_time_passive);
+ }
+ channels[j].early_termination = 0;
+ channels[j].tx_power_att = req->channels[i]->max_power;
+ channels[j].channel = req->channels[i]->hw_value;
+
+ memset(&channels[j].bssid_lsb, 0xff, 4);
+ memset(&channels[j].bssid_msb, 0xff, 2);
+
+ /* Mark the channels we already used */
+ set_bit(i, wl->scan.scanned_ch);
+
+ j++;
+ }
+ }
+
+ return j;
+}
+
+#define WL1271_NOTHING_TO_SCAN 1
+
+static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wl1271_cmd_scan *cmd;
+ struct wl1271_cmd_trigger_scan_to *trigger;
+ int ret;
+ u16 scan_options = 0;
+
+ /* skip active scans if we don't have SSIDs */
+ if (!passive && wl->scan.req->n_ssids == 0)
+ return WL1271_NOTHING_TO_SCAN;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!cmd || !trigger) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (wl->conf.scan.split_scan_timeout)
+ scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
+
+ if (passive)
+ scan_options |= WL1271_SCAN_OPT_PASSIVE;
+
+ cmd->params.role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->params.scan_options = cpu_to_le16(scan_options);
+
+ cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
+ cmd->channels,
+ band, passive);
+ if (cmd->params.n_ch == 0) {
+ ret = WL1271_NOTHING_TO_SCAN;
+ goto out;
+ }
+
+ cmd->params.tx_rate = cpu_to_le32(basic_rate);
+ cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
+ cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
+ cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
+
+ if (band == IEEE80211_BAND_2GHZ)
+ cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
+ else
+ cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
+
+ if (wl->scan.ssid_len && wl->scan.ssid) {
+ cmd->params.ssid_len = wl->scan.ssid_len;
+ memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
+ }
+
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
+
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->params.role_id, band,
+ wl->scan.ssid, wl->scan.ssid_len,
+ wl->scan.req->ie,
+ wl->scan.req->ie_len, false);
+ if (ret < 0) {
+ wl1271_error("PROBE request template failed");
+ goto out;
+ }
+
+ trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
+ ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
+ sizeof(*trigger), 0);
+ if (ret < 0) {
+ wl1271_error("trigger scan to failed for hw scan");
+ goto out;
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ kfree(trigger);
+ return ret;
+}
+
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_header *cmd = NULL;
+ int ret = 0;
+
+ if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
+ return -EINVAL;
+
+ wl1271_debug(DEBUG_CMD, "cmd scan stop");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
+ sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("cmd stop_scan failed");
+ goto out;
+ }
+out:
+ kfree(cmd);
+ return ret;
+}
+
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret = 0;
+ enum ieee80211_band band;
+ u32 rate, mask;
+
+ switch (wl->scan.state) {
+ case WL1271_SCAN_STATE_IDLE:
+ break;
+
+ case WL1271_SCAN_STATE_2GHZ_ACTIVE:
+ band = IEEE80211_BAND_2GHZ;
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_2GHZ_PASSIVE:
+ band = IEEE80211_BAND_2GHZ;
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ if (wl->enable_11a)
+ wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
+ else
+ wl->scan.state = WL1271_SCAN_STATE_DONE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_5GHZ_ACTIVE:
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_5GHZ_PASSIVE:
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_DONE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_DONE:
+ wl->scan.failed = false;
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+ break;
+
+ default:
+ wl1271_error("invalid scan state");
+ break;
+ }
+
+ if (ret < 0) {
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+ }
+}
+
+static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
+ struct wlcore_scan_channels *cmd_channels)
+{
+ memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+ memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+ cmd->dfs = cmd_channels->dfs;
+ cmd->n_pactive_ch = cmd_channels->passive_active;
+
+ memcpy(cmd->channels_2, cmd_channels->channels_2,
+ sizeof(cmd->channels_2));
+ memcpy(cmd->channels_5, cmd_channels->channels_5,
+ sizeof(cmd->channels_2));
+ /* channels_4 are not supported, so no need to copy them */
+}
+
+int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct wl1271_cmd_sched_scan_config *cfg = NULL;
+ struct wlcore_scan_channels *cfg_channels = NULL;
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ int i, ret;
+ bool force_passive = !req->n_ssids;
+
+ wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ cfg->role_id = wlvif->role_id;
+ cfg->rssi_threshold = c->rssi_threshold;
+ cfg->snr_threshold = c->snr_threshold;
+ cfg->n_probe_reqs = c->num_probe_reqs;
+ /* cycles set to 0 it means infinite (until manually stopped) */
+ cfg->cycles = 0;
+ /* report APs when at least 1 is found */
+ cfg->report_after = 1;
+ /* don't stop scanning automatically when something is found */
+ cfg->terminate = 0;
+ cfg->tag = WL1271_SCAN_DEFAULT_TAG;
+ /* don't filter on BSS type */
+ cfg->bss_type = SCAN_BSS_TYPE_ANY;
+ /* currently NL80211 supports only a single interval */
+ for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
+ cfg->intervals[i] = cpu_to_le32(req->interval);
+
+ cfg->ssid_len = 0;
+ ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+ if (ret < 0)
+ goto out;
+
+ cfg->filter_type = ret;
+
+ wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
+
+ cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL);
+ if (!cfg_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_PERIODIC)) {
+ wl1271_error("scan channel list is empty");
+ ret = -EINVAL;
+ goto out;
+ }
+ wl12xx_adjust_channels(cfg, cfg_channels);
+
+ if (!force_passive && cfg->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->role_id, band,
+ req->ssids[0].ssid,
+ req->ssids[0].ssid_len,
+ ies->ie[band],
+ ies->len[band], true);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (!force_passive && cfg->active[1]) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->role_id, band,
+ req->ssids[0].ssid,
+ req->ssids[0].ssid_len,
+ ies->ie[band],
+ ies->len[band], true);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
+
+ ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
+ sizeof(*cfg), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN configuration failed");
+ goto out;
+ }
+out:
+ kfree(cfg_channels);
+ kfree(cfg);
+ return ret;
+}
+
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_sched_scan_start *start;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
+
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+ return -EOPNOTSUPP;
+
+ if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return -EBUSY;
+
+ start = kzalloc(sizeof(*start), GFP_KERNEL);
+ if (!start)
+ return -ENOMEM;
+
+ start->role_id = wlvif->role_id;
+ start->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
+ sizeof(*start), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send scan start command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(start);
+ return ret;
+}
+
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ int ret;
+
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
+ if (ret < 0)
+ return ret;
+
+ return wl1271_scan_sched_scan_start(wl, wlvif);
+}
+
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_sched_scan_stop *stop;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+ /* FIXME: what to do if alloc'ing to stop fails? */
+ stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+ if (!stop) {
+ wl1271_error("failed to alloc memory to send sched scan stop");
+ return;
+ }
+
+ stop->role_id = wlvif->role_id;
+ stop->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
+ sizeof(*stop), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send sched scan stop command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(stop);
+}
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ wl1271_scan_stm(wl, wlvif);
+ return 0;
+}
+
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ wl1271_scan_stm(wl, wlvif);
+}
diff --git a/drivers/net/wireless/ti/wl12xx/scan.h b/drivers/net/wireless/ti/wl12xx/scan.h
new file mode 100644
index 000000000000..264af7ac2785
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/scan.h
@@ -0,0 +1,140 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_SCAN_H__
+#define __WL12XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+#define WL12XX_MAX_CHANNELS_5GHZ 23
+
+struct basic_scan_params {
+ /* Scan option flags (WL1271_SCAN_OPT_*) */
+ __le16 scan_options;
+ u8 role_id;
+ /* Number of scan channels in the list (maximum 30) */
+ u8 n_ch;
+ /* This field indicates the number of probe requests to send
+ per channel for an active scan */
+ u8 n_probe_reqs;
+ u8 tid_trigger;
+ u8 ssid_len;
+ u8 use_ssid_list;
+
+ /* Rate bit field for sending the probes */
+ __le32 tx_rate;
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ /* Band to scan */
+ u8 band;
+
+ u8 scan_tag;
+ u8 padding2[2];
+} __packed;
+
+struct basic_scan_channel_params {
+ /* Duration in TU to wait for frames on a channel for active scan */
+ __le32 min_duration;
+ __le32 max_duration;
+ __le32 bssid_lsb;
+ __le16 bssid_msb;
+ u8 early_termination;
+ u8 tx_power_att;
+ u8 channel;
+ /* FW internal use only! */
+ u8 dfs_candidate;
+ u8 activity_detected;
+ u8 pad;
+} __packed;
+
+struct wl1271_cmd_scan {
+ struct wl1271_cmd_header header;
+
+ struct basic_scan_params params;
+ struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
+
+ /* src mac address */
+ u8 addr[ETH_ALEN];
+ u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_config {
+ struct wl1271_cmd_header header;
+
+ __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
+
+ s8 rssi_threshold; /* for filtering (in dBm) */
+ s8 snr_threshold; /* for filtering (in dB) */
+
+ u8 cycles; /* maximum number of scan cycles */
+ u8 report_after; /* report when this number of results are received */
+ u8 terminate; /* stop scanning after reporting */
+
+ u8 tag;
+ u8 bss_type; /* for filtering */
+ u8 filter_type;
+
+ u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+ u8 n_probe_reqs; /* Number of probes requests per channel */
+
+ u8 passive[SCAN_MAX_BANDS];
+ u8 active[SCAN_MAX_BANDS];
+
+ u8 dfs;
+
+ u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
+ channels in BG band */
+ u8 role_id;
+ u8 padding[1];
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[WL12XX_MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+} __packed;
+
+struct wl1271_cmd_sched_scan_start {
+ struct wl1271_cmd_header header;
+
+ u8 tag;
+ u8 role_id;
+ u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_stop {
+ struct wl1271_cmd_header header;
+
+ u8 tag;
+ u8 role_id;
+ u8 padding[2];
+} __packed;
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+#endif
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 7182bbf6625d..d4552857480c 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,19 +24,37 @@
#include "conf.h"
-/* minimum FW required for driver for wl127x */
+/* WiLink 6/7 chip IDs */
+#define CHIP_ID_127X_PG10 (0x04030101)
+#define CHIP_ID_127X_PG20 (0x04030111)
+#define CHIP_ID_128X_PG10 (0x05030101)
+#define CHIP_ID_128X_PG20 (0x05030111)
+
+/* FW chip version for wl127x */
#define WL127X_CHIP_VER 6
-#define WL127X_IFTYPE_VER 3
-#define WL127X_MAJOR_VER 10
-#define WL127X_SUBTYPE_VER 2
-#define WL127X_MINOR_VER 115
+/* minimum single-role FW version for wl127x */
+#define WL127X_IFTYPE_SR_VER 3
+#define WL127X_MAJOR_SR_VER 10
+#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_SR_VER 115
+/* minimum multi-role FW version for wl127x */
+#define WL127X_IFTYPE_MR_VER 5
+#define WL127X_MAJOR_MR_VER 7
+#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_MR_VER 115
-/* minimum FW required for driver for wl128x */
+/* FW chip version for wl128x */
#define WL128X_CHIP_VER 7
-#define WL128X_IFTYPE_VER 3
-#define WL128X_MAJOR_VER 10
-#define WL128X_SUBTYPE_VER 2
-#define WL128X_MINOR_VER 115
+/* minimum single-role FW version for wl128x */
+#define WL128X_IFTYPE_SR_VER 3
+#define WL128X_MAJOR_SR_VER 10
+#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_SR_VER 115
+/* minimum multi-role FW version for wl128x */
+#define WL128X_IFTYPE_MR_VER 5
+#define WL128X_MAJOR_MR_VER 7
+#define WL128X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_MR_VER 42
#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -55,6 +73,8 @@ struct wl12xx_priv {
int ref_clock;
int tcxo_clock;
+
+ struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
index 67c098734c7f..ae2b81735785 100644
--- a/drivers/net/wireless/ti/wl18xx/Makefile
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -1,3 +1,3 @@
-wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
+wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 72840e23bf59..a169bb5a5dbf 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -75,7 +75,7 @@ int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
- ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
+ ret = wl1271_cmd_configure(wl, ACX_CSUM_CONFIG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set Tx checksum state: %d", ret);
goto out;
@@ -109,3 +109,88 @@ out:
kfree(acx);
return ret;
}
+
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide)
+{
+ struct wlcore_peer_ht_operation_mode *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx peer ht operation mode hlid %d bw %d",
+ hlid, wide);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->hlid = hlid;
+ acx->bandwidth = wide ? WLCORE_BANDWIDTH_40MHZ : WLCORE_BANDWIDTH_20MHZ;
+
+ ret = wl1271_cmd_configure(wl, ACX_PEER_HT_OPERATION_MODE_CFG, acx,
+ sizeof(*acx));
+
+ if (ret < 0) {
+ wl1271_warning("acx peer ht operation mode failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+
+}
+
+/*
+ * this command is basically the same as wl1271_acx_ht_capabilities,
+ * with the addition of supported rates. they should be unified in
+ * the next fw api change
+ */
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ struct wlcore_acx_peer_cap *acx;
+ int ret = 0;
+ u32 ht_capabilites = 0;
+
+ wl1271_debug(DEBUG_ACX,
+ "acx set cap ht_supp: %d ht_cap: %d rates: 0x%x",
+ ht_cap->ht_supported, ht_cap->cap, rate_set);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (allow_ht_operation && ht_cap->ht_supported) {
+ /* no need to translate capabilities - use the spec values */
+ ht_capabilites = ht_cap->cap;
+
+ /*
+ * this bit is not employed by the spec but only by FW to
+ * indicate peer HT support
+ */
+ ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
+
+ /* get data from A-MPDU parameters field */
+ acx->ampdu_max_length = ht_cap->ampdu_factor;
+ acx->ampdu_min_spacing = ht_cap->ampdu_density;
+ }
+
+ acx->hlid = hlid;
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+ acx->supported_rates = cpu_to_le32(rate_set);
+
+ ret = wl1271_cmd_configure(wl, ACX_PEER_CAP, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ht capabilities setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index e2609a6b7341..0e636def1217 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -26,7 +26,13 @@
#include "../wlcore/acx.h"
enum {
- ACX_CLEAR_STATISTICS = 0x0047,
+ ACX_NS_IPV6_FILTER = 0x0050,
+ ACX_PEER_HT_OPERATION_MODE_CFG = 0x0051,
+ ACX_CSUM_CONFIG = 0x0052,
+ ACX_SIM_CONFIG = 0x0053,
+ ACX_CLEAR_STATISTICS = 0x0054,
+ ACX_AUTO_RX_STREAMING = 0x0055,
+ ACX_PEER_CAP = 0x0056
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -278,10 +284,57 @@ struct wl18xx_acx_clear_statistics {
struct acx_header header;
};
+enum wlcore_bandwidth {
+ WLCORE_BANDWIDTH_20MHZ,
+ WLCORE_BANDWIDTH_40MHZ,
+};
+
+struct wlcore_peer_ht_operation_mode {
+ struct acx_header header;
+
+ u8 hlid;
+ u8 bandwidth; /* enum wlcore_bandwidth */
+ u8 padding[2];
+};
+
+/*
+ * ACX_PEER_CAP
+ * this struct is very similar to wl1271_acx_ht_capabilities, with the
+ * addition of supported rates
+ */
+struct wlcore_acx_peer_cap {
+ struct acx_header header;
+
+ /* bitmask of capability bits supported by the peer */
+ __le32 ht_capabilites;
+
+ /* rates supported by the remote peer */
+ __le32 supported_rates;
+
+ /* Indicates to which link these capabilities apply. */
+ u8 hlid;
+
+ /*
+ * This the maximum A-MPDU length supported by the AP. The FW may not
+ * exceed this length when sending A-MPDUs
+ */
+ u8 ampdu_max_length;
+
+ /* This is the minimal spacing required when sending A-MPDUs to the AP*/
+ u8 ampdu_min_spacing;
+
+ u8 padding;
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
int wl18xx_acx_clear_statistics(struct wl1271 *wl);
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide);
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
new file mode 100644
index 000000000000..1d1f6cc7a50a
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -0,0 +1,80 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/hw_ops.h"
+
+#include "cmd.h"
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl18xx_cmd_channel_switch *cmd;
+ u32 supported_rates;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+ cmd->channel = ch_switch->channel->hw_value;
+ cmd->switch_time = ch_switch->count;
+ cmd->stop_tx = ch_switch->block_tx;
+
+ switch (ch_switch->channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = WLCORE_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = WLCORE_BAND_5GHZ;
+ break;
+ default:
+ wl1271_error("invalid channel switch band: %d",
+ ch_switch->channel->band);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
+ cmd->local_supported_rates = cpu_to_le32(supported_rates);
+ cmd->channel_type = wlvif->channel_type;
+
+ ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send channel switch command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
new file mode 100644
index 000000000000..6687d10899ac
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/cmd.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_CMD_H__
+#define __WL18XX_CMD_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+struct wl18xx_cmd_channel_switch {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+
+ /* The new serving channel */
+ u8 channel;
+ /* Relative time of the serving channel switch in TBTT units */
+ u8 switch_time;
+ /* Stop the role TX, should expect it after radar detection */
+ u8 stop_tx;
+
+ __le32 local_supported_rates;
+
+ u8 channel_type;
+ u8 band;
+
+ u8 padding[2];
+} __packed;
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
+
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
index 4d426cc20274..e34302e3b51d 100644
--- a/drivers/net/wireless/ti/wl18xx/conf.h
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -23,20 +23,21 @@
#define __WL18XX_CONF_H__
#define WL18XX_CONF_MAGIC 0x10e100ca
-#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0003)
+#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0006)
#define WL18XX_CONF_MASK 0x0000ffff
#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
sizeof(struct wl18xx_priv_conf))
#define NUM_OF_CHANNELS_11_ABG 150
#define NUM_OF_CHANNELS_11_P 7
-#define WL18XX_NUM_OF_SUB_BANDS 9
#define SRF_TABLE_LEN 16
#define PIN_MUXING_SIZE 2
+#define WL18XX_TRACE_LOSS_GAPS_TX 10
+#define WL18XX_TRACE_LOSS_GAPS_RX 18
struct wl18xx_mac_and_phy_params {
u8 phy_standalone;
- u8 rdl;
+ u8 spare0;
u8 enable_clpc;
u8 enable_tx_low_pwr_on_siso_rdl;
u8 auto_detect;
@@ -69,18 +70,27 @@ struct wl18xx_mac_and_phy_params {
u8 pwr_limit_reference_11_abg;
u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
u8 pwr_limit_reference_11p;
- u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
- u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 spare1;
+ u8 per_chan_bo_mode_11_abg[13];
+ u8 per_chan_bo_mode_11_p[4];
u8 primary_clock_setting_time;
u8 clock_valid_on_wake_up;
u8 secondary_clock_setting_time;
u8 board_type;
/* enable point saturation */
u8 psat;
- /* low/medium/high Tx power in dBm */
+ /* low/medium/high Tx power in dBm for STA-HP BG */
s8 low_power_val;
s8 med_power_val;
s8 high_power_val;
+ s8 per_sub_band_tx_trace_loss[WL18XX_TRACE_LOSS_GAPS_TX];
+ s8 per_sub_band_rx_trace_loss[WL18XX_TRACE_LOSS_GAPS_RX];
+ u8 tx_rf_margin;
+ /* low/medium/high Tx power in dBm for other role */
+ s8 low_power_val_2nd;
+ s8 med_power_val_2nd;
+ s8 high_power_val_2nd;
+
u8 padding[1];
} __packed;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
new file mode 100644
index 000000000000..c9199d7804c6
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout)
+{
+ u32 local_event;
+
+ switch (event) {
+ case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+ local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+ break;
+
+ case WLCORE_EVENT_DFS_CONFIG_COMPLETE:
+ local_event = DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+ break;
+
+ default:
+ /* event not implemented */
+ return 0;
+ }
+ return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl18xx_process_mailbox_events(struct wl1271 *wl)
+{
+ struct wl18xx_event_mailbox *mbox = wl->mbox;
+ u32 vector;
+
+ vector = le32_to_cpu(mbox->events_vector);
+ wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+ if (vector & SCAN_COMPLETE_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "scan results: %d",
+ mbox->number_of_scan_results);
+
+ if (wl->scan_wlvif)
+ wl18xx_scan_completed(wl, wl->scan_wlvif);
+ }
+
+ if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT,
+ "PERIODIC_SCAN_REPORT_EVENT (results %d)",
+ mbox->number_of_sched_scan_results);
+
+ wlcore_scan_sched_scan_results(wl);
+ }
+
+ if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+ wlcore_event_sched_scan_completed(wl, 1);
+
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+ wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+ wlcore_event_ba_rx_constraint(wl,
+ le16_to_cpu(mbox->rx_ba_role_id_bitmap),
+ le16_to_cpu(mbox->rx_ba_allowed_bitmap));
+
+ if (vector & BSS_LOSS_EVENT_ID)
+ wlcore_event_beacon_loss(wl,
+ le16_to_cpu(mbox->bss_loss_bitmap));
+
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+ wlcore_event_channel_switch(wl,
+ le16_to_cpu(mbox->channel_switch_role_id_bitmap),
+ true);
+
+ if (vector & DUMMY_PACKET_EVENT_ID)
+ wlcore_event_dummy_packet(wl);
+
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected.
+ */
+ if (vector & MAX_TX_FAILURE_EVENT_ID)
+ wlcore_event_max_tx_failure(wl,
+ le32_to_cpu(mbox->tx_retry_exceeded_bitmap));
+
+ if (vector & INACTIVE_STA_EVENT_ID)
+ wlcore_event_inactive_sta(wl,
+ le32_to_cpu(mbox->inactive_sta_bitmap));
+
+ if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+ wlcore_event_roc_complete(wl);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
new file mode 100644
index 000000000000..398f3d2c0a6c
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -0,0 +1,77 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_EVENT_H__
+#define __WL18XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+ SCAN_COMPLETE_EVENT_ID = BIT(8),
+ RADAR_DETECTED_EVENT_ID = BIT(9),
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(10),
+ BSS_LOSS_EVENT_ID = BIT(11),
+ MAX_TX_FAILURE_EVENT_ID = BIT(12),
+ DUMMY_PACKET_EVENT_ID = BIT(13),
+ INACTIVE_STA_EVENT_ID = BIT(14),
+ PEER_REMOVE_COMPLETE_EVENT_ID = BIT(15),
+ PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(16),
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(17),
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
+ DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
+ PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
+};
+
+struct wl18xx_event_mailbox {
+ __le32 events_vector;
+
+ u8 number_of_scan_results;
+ u8 number_of_sched_scan_results;
+
+ __le16 channel_switch_role_id_bitmap;
+
+ s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+
+ /* bitmap of removed links */
+ __le32 hlid_removed_bitmap;
+
+ /* rx ba constraint */
+ __le16 rx_ba_role_id_bitmap; /* 0xfff means any role. */
+ __le16 rx_ba_allowed_bitmap;
+
+ /* bitmap of roc completed (by role id) */
+ __le16 roc_completed_bitmap;
+
+ /* bitmap of stations (by role id) with bss loss */
+ __le16 bss_loss_bitmap;
+
+ /* bitmap of stations (by HLID) which exceeded max tx retries */
+ __le32 tx_retry_exceeded_bitmap;
+
+ /* bitmap of inactive stations (by HLID) */
+ __le32 inactive_sta_bitmap;
+} __packed;
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+int wl18xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 8d8c1f8c63b7..da3ef1b10a9c 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -34,10 +34,13 @@
#include "reg.h"
#include "conf.h"
+#include "cmd.h"
#include "acx.h"
#include "tx.h"
#include "wl18xx.h"
#include "io.h"
+#include "scan.h"
+#include "event.h"
#include "debugfs.h"
#define WL18XX_RX_CHECKSUM_MASK 0x40
@@ -334,6 +337,8 @@ static struct wlcore_conf wl18xx_conf = {
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
+ .slow_link_thold = 3,
+ .fast_link_thold = 30,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -391,8 +396,10 @@ static struct wlcore_conf wl18xx_conf = {
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 100000,
- .max_dwell_time_passive = 100000,
+ .min_dwell_time_active_long = 25000,
+ .max_dwell_time_active_long = 50000,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
@@ -489,6 +496,10 @@ static struct wlcore_conf wl18xx_conf = {
.increase_time = 1,
.window_size = 16,
},
+ .recovery = {
+ .bug_on_recovery = 0,
+ .no_recovery = 0,
+ },
};
static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
@@ -501,7 +512,6 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.clock_valid_on_wake_up = 0x00,
.secondary_clock_setting_time = 0x05,
.board_type = BOARD_TYPE_HDK_18XX,
- .rdl = 0x01,
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
.low_band_component = COMPONENT_3_WAY_SWITCH,
@@ -517,14 +527,44 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.enable_clpc = 0x00,
.enable_tx_low_pwr_on_siso_rdl = 0x00,
.rx_profile = 0x00,
- .pwr_limit_reference_11_abg = 0xc8,
+ .pwr_limit_reference_11_abg = 0x64,
+ .per_chan_pwr_limit_arr_11abg = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .pwr_limit_reference_11p = 0x64,
+ .per_chan_bo_mode_11_abg = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00 },
+ .per_chan_bo_mode_11_p = { 0x00, 0x00, 0x00, 0x00 },
+ .per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff },
.psat = 0,
- .low_power_val = 0x00,
- .med_power_val = 0x0a,
- .high_power_val = 0x1e,
+ .low_power_val = 0x08,
+ .med_power_val = 0x12,
+ .high_power_val = 0x18,
+ .low_power_val_2nd = 0x05,
+ .med_power_val_2nd = 0x0a,
+ .high_power_val_2nd = 0x14,
.external_pa_dc2dc = 0,
- .number_of_assembled_ant2_4 = 1,
+ .number_of_assembled_ant2_4 = 2,
.number_of_assembled_ant5 = 1,
+ .tx_rf_margin = 1,
},
};
@@ -595,7 +635,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -608,15 +648,18 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
wl->sr_fw_name = WL18XX_FW_NAME;
/* wl18xx uses the same firmware for PLT */
wl->plt_fw_name = WL18XX_FW_NAME;
- wl->quirks |= WLCORE_QUIRK_NO_ELP |
- WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
+ wl->quirks |= WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
- WLCORE_QUIRK_TX_PAD_LAST_FRAME;
-
- wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
- WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
- WL18XX_MINOR_VER);
+ WLCORE_QUIRK_TX_PAD_LAST_FRAME |
+ WLCORE_QUIRK_REGDOMAIN_CONF |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL;
+
+ wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER,
+ WL18XX_IFTYPE_VER, WL18XX_MAJOR_VER,
+ WL18XX_SUBTYPE_VER, WL18XX_MINOR_VER,
+ /* there's no separate multi-role FW */
+ 0, 0, 0, 0);
break;
case CHIP_ID_185x_PG10:
wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
@@ -630,6 +673,11 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
+ wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC;
+ wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ;
out:
return ret;
}
@@ -843,6 +891,20 @@ static int wl18xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+ wl->event_mask = BSS_LOSS_EVENT_ID |
+ SCAN_COMPLETE_EVENT_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID |
+ PERIODIC_SCAN_COMPLETE_EVENT_ID |
+ PERIODIC_SCAN_REPORT_EVENT_ID |
+ DUMMY_PACKET_EVENT_ID |
+ PEER_REMOVE_COMPLETE_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_FAILURE_EVENT_ID |
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID |
+ DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -964,7 +1026,7 @@ static int wl18xx_hw_init(struct wl1271 *wl)
/* (re)init private structures. Relevant on recovery as well. */
priv->last_fw_rls_idx = 0;
- priv->extra_spare_vif_count = 0;
+ priv->extra_spare_key_count = 0;
/* set the default amount of spare blocks in the bitmap */
ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
@@ -1022,7 +1084,12 @@ static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
- return priv->conf.phy.number_of_assembled_ant2_4 >= 2;
+ /* only support MIMO with multiple antennas, and when SISO
+ * is not forced through config
+ */
+ return (priv->conf.phy.number_of_assembled_ant2_4 >= 2) &&
+ (priv->conf.ht.mode != HT_MODE_WIDE) &&
+ (priv->conf.ht.mode != HT_MODE_SISO20);
}
/*
@@ -1223,8 +1290,8 @@ static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
{
struct wl18xx_priv *priv = wl->priv;
- /* If we have VIFs requiring extra spare, indulge them */
- if (priv->extra_spare_vif_count)
+ /* If we have keys requiring extra spare, indulge them */
+ if (priv->extra_spare_key_count)
return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
return WL18XX_TX_HW_BLOCK_SPARE;
@@ -1236,42 +1303,48 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl18xx_priv *priv = wl->priv;
- bool change_spare = false;
+ bool change_spare = false, special_enc;
int ret;
+ wl1271_debug(DEBUG_CRYPT, "extra spare keys before: %d",
+ priv->extra_spare_key_count);
+
+ special_enc = key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+ key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
+
+ ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ if (ret < 0)
+ goto out;
+
/*
- * when adding the first or removing the last GEM/TKIP interface,
+ * when adding the first or removing the last GEM/TKIP key,
* we have to adjust the number of spare blocks.
*/
- change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
- key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
- ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
- (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
+ if (special_enc) {
+ if (cmd == SET_KEY) {
+ /* first key */
+ change_spare = (priv->extra_spare_key_count == 0);
+ priv->extra_spare_key_count++;
+ } else if (cmd == DISABLE_KEY) {
+ /* last key */
+ change_spare = (priv->extra_spare_key_count == 1);
+ priv->extra_spare_key_count--;
+ }
+ }
- /* no need to change spare - just regular set_key */
- if (!change_spare)
- return wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ wl1271_debug(DEBUG_CRYPT, "extra spare keys after: %d",
+ priv->extra_spare_key_count);
- ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
- if (ret < 0)
+ if (!change_spare)
goto out;
/* key is now set, change the spare blocks */
- if (cmd == SET_KEY) {
+ if (priv->extra_spare_key_count)
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
- if (ret < 0)
- goto out;
-
- priv->extra_spare_vif_count++;
- } else {
+ else
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_BLOCK_SPARE);
- if (ret < 0)
- goto out;
-
- priv->extra_spare_vif_count--;
- }
out:
return ret;
@@ -1296,6 +1369,92 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
return buf_offset;
}
+static void wl18xx_sta_rc_update(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
+
+ if (!(changed & IEEE80211_RC_BW_CHANGED))
+ return;
+
+ mutex_lock(&wl->mutex);
+
+ /* sanity */
+ if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
+ goto out;
+
+ /* ignore the change before association */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+
+ /*
+ * If we started out as wide, we can change the operation mode. If we
+ * thought this was a 20mhz AP, we have to reconnect
+ */
+ if (wlvif->sta.role_chan_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->sta.role_chan_type == NL80211_CHAN_HT40PLUS)
+ wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
+ else
+ ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl18xx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ return wl18xx_acx_set_peer_cap(wl, ht_cap, allow_ht_operation,
+ rate_set, hlid);
+}
+
+static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+ /* suspended links are never high priority */
+ if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+ return false;
+
+ /* the priority thresholds are taken from FW */
+ if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+ !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+ thold = status_priv->tx_fast_link_prio_threshold;
+ else
+ thold = status_priv->tx_slow_link_prio_threshold;
+
+ return lnk->allocated_pkts < thold;
+}
+
+static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+ if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+ thold = status_priv->tx_suspend_threshold;
+ else if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+ !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+ thold = status_priv->tx_fast_stop_threshold;
+ else
+ thold = status_priv->tx_slow_stop_threshold;
+
+ return lnk->allocated_pkts < thold;
+}
+
static int wl18xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl18xx_ops = {
@@ -1305,6 +1464,8 @@ static struct wlcore_ops wl18xx_ops = {
.plt_init = wl18xx_plt_init,
.trigger_cmd = wl18xx_trigger_cmd,
.ack_event = wl18xx_ack_event,
+ .wait_for_event = wl18xx_wait_for_event,
+ .process_mailbox_events = wl18xx_process_mailbox_events,
.calc_tx_blocks = wl18xx_calc_tx_blocks,
.set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
@@ -1320,16 +1481,26 @@ static struct wlcore_ops wl18xx_ops = {
.ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
.get_mac = wl18xx_get_mac,
.debugfs_init = wl18xx_debugfs_add_files,
+ .scan_start = wl18xx_scan_start,
+ .scan_stop = wl18xx_scan_stop,
+ .sched_scan_start = wl18xx_sched_scan_start,
+ .sched_scan_stop = wl18xx_scan_sched_scan_stop,
.handle_static_data = wl18xx_handle_static_data,
.get_spare_blocks = wl18xx_get_spare_blocks,
.set_key = wl18xx_set_key,
+ .channel_switch = wl18xx_cmd_channel_switch,
.pre_pkt_send = wl18xx_pre_pkt_send,
+ .sta_rc_update = wl18xx_sta_rc_update,
+ .set_peer_cap = wl18xx_set_peer_cap,
+ .lnk_high_prio = wl18xx_lnk_high_prio,
+ .lnk_low_prio = wl18xx_lnk_low_prio,
};
/* HT cap appropriate for wide channels in 2Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1343,7 +1514,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
/* HT cap appropriate for wide channels in 5Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1356,7 +1528,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
/* HT cap appropriate for SISO 20 */
static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
- .cap = IEEE80211_HT_CAP_SGI_20,
+ .cap = IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1369,7 +1542,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
/* HT cap appropriate for MIMO rates in 20mhz channel */
static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
- .cap = IEEE80211_HT_CAP_SGI_20,
+ .cap = IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1387,7 +1561,8 @@ static int wl18xx_setup(struct wl1271 *wl)
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
- wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+ wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
+ wl->num_channels = 2;
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
@@ -1506,7 +1681,8 @@ static int wl18xx_probe(struct platform_device *pdev)
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
- WL18XX_AGGR_BUFFER_SIZE);
+ WL18XX_AGGR_BUFFER_SIZE,
+ sizeof(struct wl18xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
new file mode 100644
index 000000000000..09d944505ac0
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -0,0 +1,326 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+
+static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
+ struct wlcore_scan_channels *cmd_channels)
+{
+ memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+ memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+ cmd->dfs = cmd_channels->dfs;
+ cmd->passive_active = cmd_channels->passive_active;
+
+ memcpy(cmd->channels_2, cmd_channels->channels_2,
+ sizeof(cmd->channels_2));
+ memcpy(cmd->channels_5, cmd_channels->channels_5,
+ sizeof(cmd->channels_2));
+ /* channels_4 are not supported, so no need to copy them */
+}
+
+static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ struct wl18xx_cmd_scan_params *cmd;
+ struct wlcore_scan_channels *cmd_channels = NULL;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->scan_type = SCAN_TYPE_SEARCH;
+ cmd->rssi_threshold = -127;
+ cmd->snr_threshold = 0;
+
+ cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+ cmd->ssid_from_list = 0;
+ cmd->filter = 0;
+ cmd->add_broadcast = 0;
+
+ cmd->urgency = 0;
+ cmd->protect = 0;
+
+ cmd->n_probe_reqs = wl->conf.scan.num_probe_reqs;
+ cmd->terminate_after = 0;
+
+ /* configure channels */
+ WARN_ON(req->n_ssids > 1);
+
+ cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+ if (!cmd_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_SEARCH);
+ wl18xx_adjust_channels(cmd, cmd_channels);
+
+ /*
+ * all the cycles params (except total cycles) should
+ * remain 0 for normal scan
+ */
+ cmd->total_cycles = 1;
+
+ if (req->no_cck)
+ cmd->rate = WL18XX_SCAN_RATE_6;
+
+ cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ if (req->n_ssids) {
+ cmd->ssid_len = req->ssids[0].ssid_len;
+ memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len);
+ }
+
+ /* TODO: per-band ies? */
+ if (cmd->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ req->ie,
+ req->ie_len,
+ false);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (cmd->active[1] || cmd->dfs) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ req->ie,
+ req->ie_len,
+ false);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd_channels);
+ kfree(cmd);
+ return ret;
+}
+
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ wl->scan.failed = false;
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+}
+
+static
+int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct wl18xx_cmd_scan_params *cmd;
+ struct wlcore_scan_channels *cmd_channels = NULL;
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ int ret;
+ int filter_type;
+
+ wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+ filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+ if (filter_type < 0)
+ return filter_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->scan_type = SCAN_TYPE_PERIODIC;
+ cmd->rssi_threshold = c->rssi_threshold;
+ cmd->snr_threshold = c->snr_threshold;
+
+ /* don't filter on BSS type */
+ cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+ cmd->ssid_from_list = 1;
+ if (filter_type == SCAN_SSID_FILTER_LIST)
+ cmd->filter = 1;
+ cmd->add_broadcast = 0;
+
+ cmd->urgency = 0;
+ cmd->protect = 0;
+
+ cmd->n_probe_reqs = c->num_probe_reqs;
+ /* don't stop scanning automatically when something is found */
+ cmd->terminate_after = 0;
+
+ cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+ if (!cmd_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* configure channels */
+ wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_PERIODIC);
+ wl18xx_adjust_channels(cmd, cmd_channels);
+
+ cmd->short_cycles_sec = 0;
+ cmd->long_cycles_sec = cpu_to_le16(req->interval);
+ cmd->short_cycles_count = 0;
+
+ cmd->total_cycles = 0;
+
+ cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ /* create a PERIODIC_SCAN_REPORT_EVENT whenever we've got a match */
+ cmd->report_threshold = 1;
+ cmd->terminate_on_report = 0;
+
+ if (cmd->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ ies->ie[band],
+ ies->len[band],
+ true);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (cmd->active[1] || cmd->dfs) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ ies->ie[band],
+ ies->len[band],
+ true);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd_channels);
+ kfree(cmd);
+ return ret;
+}
+
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
+}
+
+static int __wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 scan_type)
+{
+ struct wl18xx_cmd_scan_stop *stop;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+ stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+ if (!stop) {
+ wl1271_error("failed to alloc memory to send sched scan stop");
+ return -ENOMEM;
+ }
+
+ stop->role_id = wlvif->role_id;
+ stop->scan_type = scan_type;
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, stop, sizeof(*stop), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send sched scan stop command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(stop);
+ return ret;
+}
+
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_PERIODIC);
+}
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ return wl18xx_scan_send(wl, wlvif, req);
+}
+
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ return __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_SEARCH);
+}
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
new file mode 100644
index 000000000000..eadee42689d1
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/scan.h
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_SCAN_H__
+#define __WL18XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+struct tracking_ch_params {
+ struct conn_scan_ch_params channel;
+
+ __le32 bssid_lsb;
+ __le16 bssid_msb;
+
+ u8 padding[2];
+} __packed;
+
+/* probe request rate */
+enum
+{
+ WL18XX_SCAN_RATE_1 = 0,
+ WL18XX_SCAN_RATE_5_5 = 1,
+ WL18XX_SCAN_RATE_6 = 2,
+};
+
+#define WL18XX_MAX_CHANNELS_5GHZ 32
+
+struct wl18xx_cmd_scan_params {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 scan_type;
+
+ s8 rssi_threshold; /* for filtering (in dBm) */
+ s8 snr_threshold; /* for filtering (in dB) */
+
+ u8 bss_type; /* for filtering */
+ u8 ssid_from_list; /* use ssid from configured ssid list */
+ u8 filter; /* forward only results with matching ssids */
+
+ /*
+ * add broadcast ssid in addition to the configured ssids.
+ * the driver should add dummy entry for it (?).
+ */
+ u8 add_broadcast;
+
+ u8 urgency;
+ u8 protect; /* ??? */
+ u8 n_probe_reqs; /* Number of probes requests per channel */
+ u8 terminate_after; /* early terminate scan operation */
+
+ u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+ u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */
+ u8 dfs; /* number of dfs channels in 5ghz */
+ u8 passive_active; /* number of passive before active channels 2.4ghz */
+
+ __le16 short_cycles_sec;
+ __le16 long_cycles_sec;
+ u8 short_cycles_count;
+ u8 total_cycles; /* 0 - infinite */
+ u8 padding[2];
+
+ union {
+ struct {
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[WL18XX_MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+ };
+ struct tracking_ch_params channels_tracking[WL1271_SCAN_MAX_CHANNELS];
+ } ;
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
+ u8 tag;
+ u8 rate;
+
+ /* send SCAN_REPORT_EVENT in periodic scans after each cycle
+ * if number of results >= report_threshold. Must be 0 for
+ * non periodic scans
+ */
+ u8 report_threshold;
+
+ /* Should periodic scan stop after a report event was created.
+ * Must be 0 for non periodic scans.
+ */
+ u8 terminate_on_report;
+
+ u8 padding1[3];
+} __packed;
+
+struct wl18xx_cmd_scan_stop {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 scan_type;
+ u8 padding[2];
+} __packed;
+
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 5b1fb10d9fd7..57c694396647 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -28,6 +28,49 @@
#include "wl18xx.h"
#include "tx.h"
+static
+void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
+ struct ieee80211_tx_rate *rate)
+{
+ u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+
+ if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
+ wl1271_error("last Tx rate invalid: %d", fw_rate);
+ rate->idx = 0;
+ rate->flags = 0;
+ return;
+ }
+
+ if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
+ rate->idx = fw_rate;
+ rate->flags = 0;
+ } else {
+ rate->flags = IEEE80211_TX_RC_MCS;
+ rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0;
+
+ /* SGI modifier is counted as a separate rate */
+ if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI)
+ (rate->idx)--;
+ if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+ (rate->idx)--;
+
+ /* this also covers the 40Mhz SGI case (= MCS15) */
+ if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI ||
+ fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) {
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ /* adjustment needed for range 0-7 */
+ rate->idx -= 8;
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+ }
+ }
+}
+
static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
{
struct ieee80211_tx_info *info;
@@ -44,7 +87,6 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
/* a zero bit indicates Tx success */
tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
-
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
@@ -56,11 +98,13 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
/* update the TX status info */
if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
+ /*
+ * first pass info->control.vif while it's valid, and then fill out
+ * the info->status structures
+ */
+ wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]);
- /* no real data about Tx completion */
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- info->status.rates[0].flags = 0;
+ info->status.rates[0].count = 1; /* no data about retries */
info->status.ack_signal = -1;
if (!tx_success)
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 96a1e438d677..b6739e79efcf 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 2
-#define WL18XX_MAJOR_VER 0
-#define WL18XX_SUBTYPE_VER 0
-#define WL18XX_MINOR_VER 100
+#define WL18XX_IFTYPE_VER 5
+#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_MINOR_VER 28
#define WL18XX_CMD_MAX_SIZE 740
@@ -49,8 +49,8 @@ struct wl18xx_priv {
/* Index of last released Tx desc in FW */
u8 last_fw_rls_idx;
- /* number of VIFs requiring extra spare mem-blocks */
- int extra_spare_vif_count;
+ /* number of keys requiring extra spare mem-blocks */
+ int extra_spare_key_count;
};
#define WL18XX_FW_MAX_TX_STATUS_DESC 33
@@ -68,7 +68,43 @@ struct wl18xx_fw_status_priv {
*/
u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
- u8 padding[2];
+ /* A bitmap representing the currently suspended links. The suspend
+ * is short lived, for multi-channel Tx requirements.
+ */
+ __le32 link_suspend_bitmap;
+
+ /* packet threshold for an "almost empty" AC,
+ * for Tx schedulng purposes
+ */
+ u8 tx_ac_threshold;
+
+ /* number of packets to queue up for a link in PS */
+ u8 tx_ps_threshold;
+
+ /* number of packet to queue up for a suspended link */
+ u8 tx_suspend_threshold;
+
+ /* Should have less than this number of packets in queue of a slow
+ * link to qualify as high priority link
+ */
+ u8 tx_slow_link_prio_threshold;
+
+ /* Should have less than this number of packets in queue of a fast
+ * link to qualify as high priority link
+ */
+ u8 tx_fast_link_prio_threshold;
+
+ /* Should have less than this number of packets in queue of a slow
+ * link before we stop queuing up packets for it.
+ */
+ u8 tx_slow_stop_threshold;
+
+ /* Should have less than this number of packets in queue of a fast
+ * link before we stop queuing up packets for it.
+ */
+ u8 tx_fast_stop_threshold;
+
+ u8 padding[3];
};
#define WL18XX_PHY_VERSION_MAX_LEN 20
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index d7b907e67170..2b832825c3d4 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -33,8 +33,3 @@ config WLCORE_SDIO
If you choose to build a module, it'll be called wlcore_sdio.
Say N if unsure.
-
-config WL12XX_PLATFORM_DATA
- bool
- depends on WLCORE_SDIO != n || WL1251_SDIO != n
- default y
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index d9fba9e32130..b21398f6c3ec 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -9,7 +9,4 @@ obj-$(CONFIG_WLCORE) += wlcore.o
obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
-# small builtin driver bit
-obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
-
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ce108a736bd0..c79654323396 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1340,6 +1340,8 @@ out:
kfree(acx);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities);
+
int wl1271_acx_set_ht_information(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
@@ -1433,13 +1435,22 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
acx->win_size = wl->conf.ht.rx_ba_win_size;
acx->ssn = ssn;
- ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
- sizeof(*acx));
+ ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx),
+ BIT(CMD_STATUS_NO_RX_BA_SESSION));
if (ret < 0) {
wl1271_warning("acx ba receiver session failed: %d", ret);
goto out;
}
+ /* sometimes we can't start the session */
+ if (ret == CMD_STATUS_NO_RX_BA_SESSION) {
+ wl1271_warning("no fw rx ba on tid %d", tid_index);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = 0;
out:
kfree(acx);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index d03215d6b3bd..126536c6a393 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1025,7 +1025,6 @@ enum {
ACX_CONFIG_HANGOVER = 0x0042,
ACX_FEATURE_CFG = 0x0043,
ACX_PROTECTION_CFG = 0x0044,
- ACX_CHECKSUM_CONFIG = 0x0045,
};
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 375ea574eafb..77752b03f189 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -84,47 +84,57 @@ out:
static int wlcore_validate_fw_ver(struct wl1271 *wl)
{
unsigned int *fw_ver = wl->chip.fw_ver;
- unsigned int *min_ver = wl->min_fw_ver;
+ unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
+ wl->min_mr_fw_ver : wl->min_sr_fw_ver;
+ char min_fw_str[32] = "";
+ int i;
/* the chip must be exactly equal */
- if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
+ if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP]))
goto fail;
- /* always check the next digit if all previous ones are equal */
-
- if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
- goto out;
- else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
+ /* the firmware type must be equal */
+ if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE]))
goto fail;
- if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
- goto out;
- else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
+ /* the project number must be equal */
+ if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE]))
goto fail;
- if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
- goto out;
- else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
+ /* the API version must be greater or equal */
+ if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR]))
goto fail;
- if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
- goto out;
- else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
+ /* if the API version is equal... */
+ if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) ||
+ (min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) &&
+ /* ...the minor must be greater or equal */
+ ((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])))
goto fail;
-out:
return 0;
fail:
- wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
- "Please use at least FW %u.%u.%u.%u.%u.\n"
- "You can get more information at:\n"
- "http://wireless.kernel.org/en/users/Drivers/wl12xx",
+ for (i = 0; i < NUM_FW_VER; i++)
+ if (min_ver[i] == WLCORE_FW_VER_IGNORE)
+ snprintf(min_fw_str, sizeof(min_fw_str),
+ "%s*.", min_fw_str);
+ else
+ snprintf(min_fw_str, sizeof(min_fw_str),
+ "%s%u.", min_fw_str, min_ver[i]);
+
+ wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
+ "Please use at least FW %s\n"
+ "You can get the latest firmwares at:\n"
+ "git://github.com/TI-OpenLink/firmwares.git",
fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
- fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
- min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
- min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
+ fw_ver[FW_VER_MINOR], min_fw_str);
return -EINVAL;
}
@@ -491,7 +501,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
if (ret < 0)
return ret;
- wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
+ wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size;
wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
@@ -508,23 +518,6 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
*/
/* unmask required mbox events */
- wl->event_mask = BSS_LOSE_EVENT_ID |
- REGAINED_BSS_EVENT_ID |
- SCAN_COMPLETE_EVENT_ID |
- ROLE_STOP_COMPLETE_EVENT_ID |
- RSSI_SNR_TRIGGER_0_EVENT_ID |
- PSPOLL_DELIVERY_FAILURE_EVENT_ID |
- SOFT_GEMINI_SENSE_EVENT_ID |
- PERIODIC_SCAN_REPORT_EVENT_ID |
- PERIODIC_SCAN_COMPLETE_EVENT_ID |
- DUMMY_PACKET_EVENT_ID |
- PEER_REMOVE_COMPLETE_EVENT_ID |
- BA_SESSION_RX_CONSTRAINT_EVENT_ID |
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
- INACTIVE_STA_EVENT_ID |
- MAX_TX_RETRY_EVENT_ID |
- CHANNEL_SWITCH_COMPLETE_EVENT_ID;
-
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 27f83f72a93b..6331f9e1cb39 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -48,14 +48,15 @@
* @id: command id
* @buf: buffer containing the command, must work with dma
* @len: length of the buffer
+ * return the cmd status code on success.
*/
-int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len)
+static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, size_t res_len)
{
struct wl1271_cmd_header *cmd;
unsigned long timeout;
u32 intr;
- int ret = 0;
+ int ret;
u16 status;
u16 poll_count = 0;
@@ -71,7 +72,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
if (ret < 0)
- goto fail;
+ return ret;
/*
* TODO: we just need this because one bit is in a different
@@ -79,19 +80,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
*/
ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
if (ret < 0)
- goto fail;
+ return ret;
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
- goto fail;
+ return ret;
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
- ret = -ETIMEDOUT;
- goto fail;
+ return -ETIMEDOUT;
}
poll_count++;
@@ -102,7 +102,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
- goto fail;
+ return ret;
}
/* read back the status code of the command */
@@ -111,33 +111,66 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
if (ret < 0)
- goto fail;
+ return ret;
status = le16_to_cpu(cmd->status);
- if (status != CMD_STATUS_SUCCESS) {
- wl1271_error("command execute failure %d", status);
- ret = -EIO;
- goto fail;
- }
ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
WL1271_ACX_INTR_CMD_COMPLETE);
if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/*
+ * send command to fw and return cmd status on success
+ * valid_rets contains a bitmap of allowed error codes
+ */
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len, unsigned long valid_rets)
+{
+ int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
+
+ if (ret < 0)
goto fail;
- return 0;
+ /* success is always a valid status */
+ valid_rets |= BIT(CMD_STATUS_SUCCESS);
+ if (ret >= MAX_COMMAND_STATUS ||
+ !test_bit(ret, &valid_rets)) {
+ wl1271_error("command execute failure %d", ret);
+ ret = -EIO;
+ goto fail;
+ }
+ return ret;
fail:
wl12xx_queue_recovery_work(wl);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_send);
+
+/*
+ * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
+ * return 0 on success.
+ */
+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len)
+{
+ int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
- u32 mask, bool *timeout)
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout)
{
u32 *events_vector;
u32 event;
@@ -187,20 +220,7 @@ out:
kfree(events_vector);
return ret;
}
-
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
-{
- int ret;
- bool timeout = false;
-
- ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
- if (ret != 0 || timeout) {
- wl12xx_queue_recovery_work(wl);
- return ret;
- }
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id)
@@ -278,6 +298,16 @@ out:
return ret;
}
+static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
+{
+ if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX)
+ wl->session_ids[hlid] = 0;
+
+ wl->session_ids[hlid]++;
+
+ return wl->session_ids[hlid];
+}
+
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
@@ -285,12 +315,21 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
+ wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
+
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /* take the last "freed packets" value from the current FW status */
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ wl->links[link].wlvif = wlvif;
*hlid = link;
+
+ wl->active_link_count++;
return 0;
}
@@ -307,24 +346,21 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__clear_bit(*hlid, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ wl->links[*hlid].allocated_pkts = 0;
+ wl->links[*hlid].prev_freed_pkts = 0;
+ wl->links[*hlid].ba_bitmap = 0;
+ memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+
/*
* At this point op_tx() will not add more packets to the queues. We
* can purge them.
*/
wl1271_tx_reset_link_queues(wl, *hlid);
+ wl->links[*hlid].wlvif = NULL;
*hlid = WL12XX_INVALID_LINK_ID;
-}
-
-static int wl12xx_get_new_session_id(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
-{
- if (wlvif->session_counter >= SESSION_COUNTER_MAX)
- wlvif->session_counter = 0;
-
- wlvif->session_counter++;
-
- return wlvif->session_counter;
+ wl->active_link_count--;
+ WARN_ON_ONCE(wl->active_link_count < 0);
}
static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
@@ -345,7 +381,9 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
}
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ enum ieee80211_band band,
+ int channel)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -359,9 +397,9 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
cmd->role_id = wlvif->dev_role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (band == IEEE80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
- cmd->channel = wlvif->channel;
+ cmd->channel = channel;
if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
@@ -369,7 +407,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
goto out_free;
}
cmd->device.hlid = wlvif->dev_hlid;
- cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->device.session = wl->session_ids[wlvif->dev_hlid];
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -420,12 +458,6 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
goto out_free;
}
- ret = wl1271_cmd_wait_for_event(wl, ROLE_STOP_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd role stop dev event completion error");
- goto out_free;
- }
-
wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
@@ -439,6 +471,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
+ u32 supported_rates;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -459,7 +492,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->sta.ssid_len = wlvif->ssid_len;
memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
+
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
+
+ cmd->sta.local_rates = cpu_to_le32(supported_rates);
+
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
@@ -468,8 +508,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
cmd->sta.hlid = wlvif->sta.hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
- cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
+ cmd->sta.session = wl->session_ids[wlvif->sta.hlid];
+ /*
+ * We don't have the correct remote rates in this stage. The
+ * rates will be reconfigured later, after association, if the
+ * firmware supports ACX_PEER_CAP. Otherwise, there's nothing
+ * we can do, so use all supported_rates here.
+ */
+ cmd->sta.remote_rates = cpu_to_le32(supported_rates);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
@@ -482,6 +528,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto err_hlid;
}
+ wlvif->sta.role_chan_type = wlvif->channel_type;
goto out_free;
err_hlid:
@@ -500,7 +547,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- bool timeout = false;
if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
@@ -523,17 +569,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
- /*
- * Sometimes the firmware doesn't send this event, so we just
- * time out without failing. Queue recovery for other
- * failures.
- */
- ret = wl1271_cmd_wait_for_event_or_timeout(wl,
- ROLE_STOP_COMPLETE_EVENT_ID,
- &timeout);
- if (ret)
- wl12xx_queue_recovery_work(wl);
-
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
@@ -579,12 +614,15 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
cmd->ap.global_hlid = wlvif->ap.global_hlid;
cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid];
+ cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid];
cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
/* FIXME: Change when adding DFS */
cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
+ cmd->ap.wmm = wlvif->wmm_enabled;
cmd->channel = wlvif->channel;
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
@@ -599,8 +637,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
}
- supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
supported_rates);
@@ -799,8 +839,11 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
* @id: acx id
* @buf: buffer containing acx, including all headers, must work with dma
* @len: length of buf
+ * @valid_rets: bitmap of valid cmd status codes (i.e. return values).
+ * return the cmd status on success.
*/
-int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, unsigned long valid_rets)
{
struct acx_header *acx = buf;
int ret;
@@ -812,12 +855,26 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
/* payload length, does not include any headers */
acx->len = cpu_to_le16(len - sizeof(*acx));
- ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
+ ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0,
+ valid_rets);
if (ret < 0) {
wl1271_warning("CONFIGURE command NOK");
return ret;
}
+ return ret;
+}
+
+/*
+ * wrapper for wlcore_cmd_configure that accepts only success status.
+ * return 0 on success
+ */
+int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+{
+ int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0);
+
+ if (ret < 0)
+ return ret;
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
@@ -1034,8 +1091,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb;
int ret;
u32 rate;
- u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
- u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ u16 template_id_2_4 = wl->scan_templ_id_2_4;
+ u16 template_id_5 = wl->scan_templ_id_5;
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
@@ -1048,10 +1105,10 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- if (!sched_scan &&
+ if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
- template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
- template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
+ template_id_2_4 = wl->sched_scan_templ_id_2_4;
+ template_id_5 = wl->sched_scan_templ_id_5;
}
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
@@ -1068,6 +1125,7 @@ out:
dev_kfree_skb(skb);
return ret;
}
+EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
@@ -1379,7 +1437,8 @@ out:
return ret;
}
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_set_peer_state *cmd;
int ret = 0;
@@ -1395,6 +1454,10 @@ int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
cmd->hlid = hlid;
cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
+ /* wmm param is valid only for station role */
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ cmd->wmm = wlvif->wmm_enabled;
+
ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send set peer state command");
@@ -1429,6 +1492,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->hlid = hlid;
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
+ cmd->session_id = wl->session_ids[hlid];
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1490,9 +1554,10 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
goto out_free;
}
- ret = wl1271_cmd_wait_for_event_or_timeout(wl,
- PEER_REMOVE_COMPLETE_EVENT_ID,
- &timeout);
+ ret = wl->ops->wait_for_event(wl,
+ WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+ &timeout);
+
/*
* We are ok with a timeout here. The event is sometimes not sent
* due to a firmware bug. In case of another error (like SDIO timeout)
@@ -1508,6 +1573,131 @@ out:
return ret;
}
+static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+{
+ int idx = -1;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ if (ch >= 8 && ch <= 16)
+ idx = ((ch-8)/4 + 18);
+ else if (ch >= 34 && ch <= 64)
+ idx = ((ch-34)/2 + 3 + 18);
+ else if (ch >= 100 && ch <= 140)
+ idx = ((ch-100)/4 + 15 + 18);
+ else if (ch >= 149 && ch <= 165)
+ idx = ((ch-149)/4 + 26 + 18);
+ else
+ idx = -1;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (ch >= 1 && ch <= 14)
+ idx = ch - 1;
+ else
+ idx = -1;
+ break;
+ default:
+ wl1271_error("get reg conf ch idx - unknown band: %d",
+ (int)band);
+ }
+
+ return idx;
+}
+
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+ enum ieee80211_band band)
+{
+ int ch_bit_idx = 0;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return;
+
+ ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
+
+ if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
+}
+
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
+ int ret = 0, i, b, ch_bit_idx;
+ struct ieee80211_channel *channel;
+ u32 tmp_ch_bitmap[2];
+ u16 ch;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ struct ieee80211_supported_band *band;
+ bool timeout = false;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd reg domain config");
+
+ memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
+
+ for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+ band = wiphy->bands[b];
+ for (i = 0; i < band->n_channels; i++) {
+ channel = &band->channels[i];
+ ch = channel->hw_value;
+
+ if (channel->flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN))
+ continue;
+
+ ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
+ if (ch_bit_idx < 0)
+ continue;
+
+ set_bit(ch_bit_idx, (long *)tmp_ch_bitmap);
+ }
+ }
+
+ tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0];
+ tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1];
+
+ if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
+ goto out;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
+ cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+
+ wl1271_debug(DEBUG_CMD,
+ "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
+ cmd->ch_bit_map1, cmd->ch_bit_map2);
+
+ ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send reg domain dfs config");
+ goto out;
+ }
+
+ ret = wl->ops->wait_for_event(wl,
+ WLCORE_EVENT_DFS_CONFIG_COMPLETE,
+ &timeout);
+ if (ret < 0 || timeout) {
+ wl1271_error("reg domain conf %serror",
+ timeout ? "completion " : "");
+ ret = timeout ? -ETIMEDOUT : ret;
+ goto out;
+ }
+
+ memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap));
+ memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending));
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_config_fwlog *cmd;
@@ -1593,12 +1783,12 @@ out:
}
static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 role_id)
+ u8 role_id, enum ieee80211_band band, u8 channel)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1610,8 +1800,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
cmd->role_id = role_id;
- cmd->channel = wlvif->channel;
- switch (wlvif->band) {
+ cmd->channel = channel;
+ switch (band) {
case IEEE80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
@@ -1666,30 +1856,18 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+ enum ieee80211_band band, u8 channel)
{
int ret = 0;
- bool is_first_roc;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
- WL12XX_MAX_ROLES);
-
- ret = wl12xx_cmd_roc(wl, wlvif, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel);
if (ret < 0)
goto out;
- if (is_first_roc) {
- ret = wl1271_cmd_wait_for_event(wl,
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd roc event completion error");
- goto out;
- }
- }
-
__set_bit(role_id, wl->roc_map);
out:
return ret;
@@ -1719,43 +1897,7 @@ out:
return ret;
}
-int wl12xx_cmd_channel_switch(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct wl12xx_cmd_channel_switch *cmd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "cmd channel switch");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- cmd->role_id = wlvif->role_id;
- cmd->channel = ch_switch->channel->hw_value;
- cmd->switch_time = ch_switch->count;
- cmd->stop_tx = ch_switch->block_tx;
-
- /* FIXME: control from mac80211 in the future */
- cmd->post_switch_tx_disable = 0; /* Enable TX on the target channel */
-
- ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("failed to send channel switch command");
- goto out_free;
- }
-
-out_free:
- kfree(cmd);
-
-out:
- return ret;
-}
-
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_stop_channel_switch *cmd;
int ret;
@@ -1768,6 +1910,8 @@ int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
goto out;
}
+ cmd->role_id = wlvif->role_id;
+
ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to stop channel switch command");
@@ -1782,7 +1926,8 @@ out:
}
/* start dev role and roc on its channel */
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band, int channel)
{
int ret;
@@ -1797,11 +1942,11 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (ret < 0)
goto out;
- ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
if (ret < 0)
goto out_disable;
- ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel);
if (ret < 0)
goto out_stop;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 2409f3d71f63..fd34123047cd 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -31,6 +31,8 @@ struct acx_header;
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len, unsigned long valid_rets);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -39,11 +41,14 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, unsigned long valid_rets);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ps_mode, u16 auto_ps_timeout);
@@ -75,22 +80,30 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+ enum ieee80211_band band, u8 channel);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+ enum ieee80211_band band);
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch);
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 *hlid);
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout);
enum wl1271_commands {
CMD_INTERROGATE = 1, /* use this to read information elements */
@@ -149,8 +162,11 @@ enum wl1271_commands {
CMD_WFD_START_DISCOVERY = 45,
CMD_WFD_STOP_DISCOVERY = 46,
CMD_WFD_ATTRIBUTE_CONFIG = 47,
- CMD_NOP = 48,
- CMD_LAST_COMMAND,
+ CMD_GENERIC_CFG = 48,
+ CMD_NOP = 49,
+
+ /* start of 18xx specific commands */
+ CMD_DFS_CHANNEL_CONFIG = 60,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -167,8 +183,8 @@ enum cmd_templ {
CMD_TEMPL_PS_POLL,
CMD_TEMPL_KLV,
CMD_TEMPL_DISCONNECT,
- CMD_TEMPL_APP_PROBE_REQ_2_4,
- CMD_TEMPL_APP_PROBE_REQ_5,
+ CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY,
+ CMD_TEMPL_APP_PROBE_REQ_5_LEGACY,
CMD_TEMPL_BAR, /* for firmware internal use only */
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
@@ -179,6 +195,8 @@ enum cmd_templ {
CMD_TEMPL_DEAUTH_AP,
CMD_TEMPL_TEMPORARY,
CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+ CMD_TEMPL_PROBE_REQ_2_4_PERIODIC,
+ CMD_TEMPL_PROBE_REQ_5_PERIODIC,
CMD_TEMPL_MAX = 0xff
};
@@ -220,7 +238,8 @@ enum {
CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
CMD_STATUS_TEMPLATE_OOM = 23,
CMD_STATUS_NO_RX_BA_SESSION = 24,
- MAX_COMMAND_STATUS = 0xff
+
+ MAX_COMMAND_STATUS
};
#define CMDMBOX_HEADER_LEN 4
@@ -345,7 +364,15 @@ struct wl12xx_cmd_role_start {
u8 reset_tsf;
- u8 padding_1[4];
+ /*
+ * ap supports wmm (note that there is additional
+ * per-sta wmm configuration)
+ */
+ u8 wmm;
+
+ u8 bcast_session_id;
+ u8 global_session_id;
+ u8 padding_1[1];
} __packed ap;
};
} __packed;
@@ -515,7 +542,14 @@ struct wl12xx_cmd_set_peer_state {
u8 hlid;
u8 state;
- u8 padding[2];
+
+ /*
+ * wmm is relevant for sta role only.
+ * ap role configures the per-sta wmm params in
+ * the add_peer command.
+ */
+ u8 wmm;
+ u8 padding[1];
} __packed;
struct wl12xx_cmd_roc {
@@ -558,7 +592,7 @@ struct wl12xx_cmd_add_peer {
u8 bss_index;
u8 sp_len;
u8 wmm;
- u8 padding1;
+ u8 session_id;
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -597,6 +631,13 @@ enum wl12xx_fwlogger_output {
WL12XX_FWLOG_OUTPUT_HOST,
};
+struct wl12xx_cmd_regdomain_dfs_config {
+ struct wl1271_cmd_header header;
+
+ __le32 ch_bit_map1;
+ __le32 ch_bit_map2;
+} __packed;
+
struct wl12xx_cmd_config_fwlog {
struct wl1271_cmd_header header;
@@ -626,27 +667,13 @@ struct wl12xx_cmd_stop_fwlog {
struct wl1271_cmd_header header;
} __packed;
-struct wl12xx_cmd_channel_switch {
+struct wl12xx_cmd_stop_channel_switch {
struct wl1271_cmd_header header;
u8 role_id;
-
- /* The new serving channel */
- u8 channel;
- /* Relative time of the serving channel switch in TBTT units */
- u8 switch_time;
- /* Stop the role TX, should expect it after radar detection */
- u8 stop_tx;
- /* The target channel tx status 1-stopped 0-open*/
- u8 post_switch_tx_disable;
-
u8 padding[3];
} __packed;
-struct wl12xx_cmd_stop_channel_switch {
- struct wl1271_cmd_header header;
-} __packed;
-
/* Used to check radio status after calibration */
#define MAX_TLV_LENGTH 500
#define TEST_CMD_P2G_CAL 2 /* TX BiP */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 9e40760bafe1..2b96ff821341 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -57,20 +57,49 @@ enum {
};
enum {
- CONF_HW_RATE_INDEX_1MBPS = 0,
- CONF_HW_RATE_INDEX_2MBPS = 1,
- CONF_HW_RATE_INDEX_5_5MBPS = 2,
- CONF_HW_RATE_INDEX_6MBPS = 3,
- CONF_HW_RATE_INDEX_9MBPS = 4,
- CONF_HW_RATE_INDEX_11MBPS = 5,
- CONF_HW_RATE_INDEX_12MBPS = 6,
- CONF_HW_RATE_INDEX_18MBPS = 7,
- CONF_HW_RATE_INDEX_22MBPS = 8,
- CONF_HW_RATE_INDEX_24MBPS = 9,
- CONF_HW_RATE_INDEX_36MBPS = 10,
- CONF_HW_RATE_INDEX_48MBPS = 11,
- CONF_HW_RATE_INDEX_54MBPS = 12,
- CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
+ CONF_HW_RATE_INDEX_1MBPS = 0,
+ CONF_HW_RATE_INDEX_2MBPS = 1,
+ CONF_HW_RATE_INDEX_5_5MBPS = 2,
+ CONF_HW_RATE_INDEX_11MBPS = 3,
+ CONF_HW_RATE_INDEX_6MBPS = 4,
+ CONF_HW_RATE_INDEX_9MBPS = 5,
+ CONF_HW_RATE_INDEX_12MBPS = 6,
+ CONF_HW_RATE_INDEX_18MBPS = 7,
+ CONF_HW_RATE_INDEX_24MBPS = 8,
+ CONF_HW_RATE_INDEX_36MBPS = 9,
+ CONF_HW_RATE_INDEX_48MBPS = 10,
+ CONF_HW_RATE_INDEX_54MBPS = 11,
+ CONF_HW_RATE_INDEX_MCS0 = 12,
+ CONF_HW_RATE_INDEX_MCS1 = 13,
+ CONF_HW_RATE_INDEX_MCS2 = 14,
+ CONF_HW_RATE_INDEX_MCS3 = 15,
+ CONF_HW_RATE_INDEX_MCS4 = 16,
+ CONF_HW_RATE_INDEX_MCS5 = 17,
+ CONF_HW_RATE_INDEX_MCS6 = 18,
+ CONF_HW_RATE_INDEX_MCS7 = 19,
+ CONF_HW_RATE_INDEX_MCS7_SGI = 20,
+ CONF_HW_RATE_INDEX_MCS0_40MHZ = 21,
+ CONF_HW_RATE_INDEX_MCS1_40MHZ = 22,
+ CONF_HW_RATE_INDEX_MCS2_40MHZ = 23,
+ CONF_HW_RATE_INDEX_MCS3_40MHZ = 24,
+ CONF_HW_RATE_INDEX_MCS4_40MHZ = 25,
+ CONF_HW_RATE_INDEX_MCS5_40MHZ = 26,
+ CONF_HW_RATE_INDEX_MCS6_40MHZ = 27,
+ CONF_HW_RATE_INDEX_MCS7_40MHZ = 28,
+ CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI = 29,
+
+ /* MCS8+ rates overlap with 40Mhz rates */
+ CONF_HW_RATE_INDEX_MCS8 = 21,
+ CONF_HW_RATE_INDEX_MCS9 = 22,
+ CONF_HW_RATE_INDEX_MCS10 = 23,
+ CONF_HW_RATE_INDEX_MCS11 = 24,
+ CONF_HW_RATE_INDEX_MCS12 = 25,
+ CONF_HW_RATE_INDEX_MCS13 = 26,
+ CONF_HW_RATE_INDEX_MCS14 = 27,
+ CONF_HW_RATE_INDEX_MCS15 = 28,
+ CONF_HW_RATE_INDEX_MCS15_SGI = 29,
+
+ CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI,
};
#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
@@ -415,11 +444,11 @@ struct conf_rx_settings {
#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS
/*
- * Rates supported for data packets when operating as AP. Note the absence
+ * Rates supported for data packets when operating as STA/AP. Note the absence
* of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
* one. The rate dropped is not mandatory under any operating mode.
*/
-#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+#define CONF_TX_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
@@ -677,6 +706,18 @@ struct conf_tx_settings {
/* Time in ms for Tx watchdog timer to expire */
u32 tx_watchdog_timeout;
+
+ /*
+ * when a slow link has this much packets pending, it becomes a low
+ * priority link, scheduling-wise
+ */
+ u8 slow_link_thold;
+
+ /*
+ * when a fast link has this much packets pending, it becomes a low
+ * priority link, scheduling-wise
+ */
+ u8 fast_link_thold;
} __packed;
enum {
@@ -1047,6 +1088,7 @@ struct conf_roam_trigger_settings {
struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
+ * This value will be used whenever there's a connected interface.
*
* Range: u32 tu/1000
*/
@@ -1054,24 +1096,37 @@ struct conf_scan_settings {
/*
* The maximum time to wait on each channel for active scans
+ * This value will be currently used whenever there's a
+ * connected interface. It shouldn't exceed 30000 (~30ms) to avoid
+ * possible interference of voip traffic going on while scanning.
*
* Range: u32 tu/1000
*/
u32 max_dwell_time_active;
- /*
- * The minimum time to wait on each channel for passive scans
+ /* The minimum time to wait on each channel for active scans
+ * when it's possible to have longer scan dwell times.
+ * Currently this is used whenever we're idle on all interfaces.
+ * Longer dwell times improve detection of networks within a
+ * single scan.
*
* Range: u32 tu/1000
*/
- u32 min_dwell_time_passive;
+ u32 min_dwell_time_active_long;
- /*
- * The maximum time to wait on each channel for passive scans
+ /* The maximum time to wait on each channel for active scans
+ * when it's possible to have longer scan dwell times.
+ * See min_dwell_time_active_long
*
* Range: u32 tu/1000
*/
- u32 max_dwell_time_passive;
+ u32 max_dwell_time_active_long;
+
+ /* time to wait on the channel for passive scans (in TU/1000) */
+ u32 dwell_time_passive;
+
+ /* time to wait on the channel for DFS scans (in TU/1000) */
+ u32 dwell_time_dfs;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1276,12 +1331,20 @@ struct conf_hangover_settings {
u8 window_size;
} __packed;
+struct conf_recovery_settings {
+ /* BUG() on fw recovery */
+ u8 bug_on_recovery;
+
+ /* Prevent HW recovery. FW will remain stuck. */
+ u8 no_recovery;
+} __packed;
+
/*
* The conf version consists of 4 bytes. The two MSB are the wlcore
* version, the two LSB are the lower driver's private conf
* version.
*/
-#define WLCORE_CONF_VERSION (0x0002 << 16)
+#define WLCORE_CONF_VERSION (0x0005 << 16)
#define WLCORE_CONF_MASK 0xffff0000
#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
sizeof(struct wlcore_conf))
@@ -1309,6 +1372,7 @@ struct wlcore_conf {
struct conf_fwlog fwlog;
struct conf_rate_policy_settings rate;
struct conf_hangover_settings hangover;
+ struct conf_recovery_settings recovery;
} __packed;
struct wlcore_conf_file {
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index c86bb00c2488..e70a7c864865 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -490,7 +490,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_HEX(chip.id);
DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
- DRIVER_STATE_PRINT_INT(sched_scanning);
+ DRIVER_STATE_PRINT_INT(recovery_count);
#undef DRIVER_STATE_PRINT_INT
#undef DRIVER_STATE_PRINT_LONG
@@ -560,7 +560,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
VIF_STATE_PRINT_INT(sta.hlid);
- VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
VIF_STATE_PRINT_INT(sta.basic_rate_idx);
VIF_STATE_PRINT_INT(sta.ap_rate_idx);
VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
@@ -577,6 +576,10 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
}
VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_INT(tx_queue_count[0]);
+ VIF_STATE_PRINT_INT(tx_queue_count[1]);
+ VIF_STATE_PRINT_INT(tx_queue_count[2]);
+ VIF_STATE_PRINT_INT(tx_queue_count[3]);
VIF_STATE_PRINT_LHEX(links_map[0]);
VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
VIF_STATE_PRINT_INT(band);
@@ -589,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(beacon_int);
VIF_STATE_PRINT_INT(default_key);
VIF_STATE_PRINT_INT(aid);
- VIF_STATE_PRINT_INT(session_counter);
VIF_STATE_PRINT_INT(psm_entry_retry);
VIF_STATE_PRINT_INT(power_level);
VIF_STATE_PRINT_INT(rssi_thold);
@@ -993,7 +995,7 @@ static ssize_t sleep_auth_write(struct file *file,
return -EINVAL;
}
- if (value < 0 || value > WL1271_PSM_MAX) {
+ if (value > WL1271_PSM_MAX) {
wl1271_warning("sleep_auth must be between 0 and %d",
WL1271_PSM_MAX);
return -ERANGE;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 48907054d493..70f289aa1bc6 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -29,34 +29,39 @@
#include "scan.h"
#include "wl12xx_80211.h"
-static void wl1271_event_rssi_trigger(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct event_mailbox *mbox)
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
{
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
enum nl80211_cqm_rssi_threshold_event event;
- s8 metric = mbox->rssi_snr_trigger_metric[0];
+ s8 metric = metric_arr[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wlvif->rssi_thold)
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
- else
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
-
- if (event != wlvif->last_rssi_event)
- ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
- wlvif->last_rssi_event = event;
+ /* TODO: check actual multi-role support */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (metric <= wlvif->rssi_thold)
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
+ }
}
+EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
- if (!wlvif->sta.ba_rx_bitmap)
+ u8 hlid = wlvif->sta.hlid;
+ if (!wl->links[hlid].ba_bitmap)
return;
- ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
vif->bss_conf.bssid);
} else {
u8 hlid;
@@ -74,8 +79,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
}
-static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
- u8 enable)
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
{
struct wl12xx_vif *wlvif;
@@ -87,201 +91,169 @@ static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
wl1271_recalc_rx_streaming(wl, wlvif);
}
}
-
}
+EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
-static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+ u8 status)
{
- wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
- wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector);
- wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
+ wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
+ status);
+
+ if (wl->sched_vif) {
+ ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_vif = NULL;
+ }
}
+EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
-static int wl1271_event_process(struct wl1271 *wl)
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ unsigned long allowed_bitmap)
{
- struct event_mailbox *mbox = wl->mbox;
- struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
- u32 vector;
- bool disconnect_sta = false;
- unsigned long sta_bitmap = 0;
- int ret;
-
- wl1271_event_mbox_dump(mbox);
-
- vector = le32_to_cpu(mbox->events_vector);
- vector &= ~(le32_to_cpu(mbox->events_mask));
- wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
- if (vector & SCAN_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "status: 0x%x",
- mbox->scheduled_scan_status);
-
- wl1271_scan_stm(wl, wl->scan_vif);
- }
+ wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
+ __func__, roles_bitmap, allowed_bitmap);
- if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_REPORT_EVENT "
- "(status 0x%0x)", mbox->scheduled_scan_status);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- wl1271_scan_sched_scan_results(wl);
+ wlvif->ba_allowed = !!test_bit(wlvif->role_id,
+ &allowed_bitmap);
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
}
+}
+EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
- if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
- "(status 0x%0x)", mbox->scheduled_scan_status);
- if (wl->sched_scanning) {
- ieee80211_sched_scan_stopped(wl->hw);
- wl->sched_scanning = false;
- }
- }
+void wlcore_event_channel_switch(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ bool success)
+{
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
- wl12xx_event_soft_gemini_sense(wl,
- mbox->soft_gemini_sense_info);
+ wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
+ __func__, roles_bitmap, success);
- /*
- * We are HW_MONITOR device. On beacon loss - queue
- * connection loss work. Cancel it on REGAINED event.
- */
- if (vector & BSS_LOSE_EVENT_ID) {
- /* TODO: check for multi-role */
- int delay = wl->conf.conn.synch_fail_thold *
- wl->conf.conn.bss_lose_timeout;
- wl1271_info("Beacon loss detected.");
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- /*
- * if the work is already queued, it should take place. We
- * don't want to delay the connection loss indication
- * any more.
- */
- ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
- msecs_to_jiffies(delay));
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wlvif->flags))
+ continue;
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
+ vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_cqm_rssi_notify(
- vif,
- NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
- GFP_KERNEL);
- }
+ ieee80211_chswitch_done(vif, success);
+ cancel_delayed_work(&wlvif->channel_switch_work);
}
+}
+EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
- if (vector & REGAINED_BSS_EVENT_ID) {
- /* TODO: check for multi-role */
- wl1271_info("Beacon regained.");
- cancel_delayed_work(&wl->connection_loss_work);
-
- /* sanity check - we can't lose and gain the beacon together */
- WARN(vector & BSS_LOSE_EVENT_ID,
- "Concurrent beacon loss and gain from FW");
- }
+void wlcore_event_dummy_packet(struct wl1271 *wl)
+{
+ wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
+ wl1271_tx_dummy_packet(wl);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
- if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
- /* TODO: check actual multi-role support */
- wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- wl1271_event_rssi_trigger(wl, wlvif, mbox);
+static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ u32 num_packets = wl->conf.tx.max_tx_retries;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ const u8 *addr;
+ int h;
+
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
}
- }
+ if (!found)
+ continue;
- if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
- u8 role_id = mbox->role_id;
- wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x, role_id=%d",
- mbox->rx_ba_allowed, role_id);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ addr = wl->links[h].addr;
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (role_id != 0xff && role_id != wlvif->role_id)
- continue;
-
- wlvif->ba_allowed = !!mbox->rx_ba_allowed;
- if (!wlvif->ba_allowed)
- wl1271_stop_ba_event(wl, wlvif);
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, addr);
+ if (sta) {
+ wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
+ ieee80211_report_low_ack(sta, num_packets);
}
+ rcu_read_unlock();
}
+}
- if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
- "status = 0x%x",
- mbox->channel_switch_status);
- /*
- * That event uses for two cases:
- * 1) channel switch complete with status=0
- * 2) channel switch failed status=1
- */
-
- /* TODO: configure only the relevant vif */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- bool success;
-
- if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
- &wlvif->flags))
- continue;
-
- success = mbox->channel_switch_status ? false : true;
- vif = wl12xx_wlvif_to_vif(wlvif);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
+ wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
- ieee80211_chswitch_done(vif, success);
- }
- }
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
+ wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
- if ((vector & DUMMY_PACKET_EVENT_ID)) {
- wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- ret = wl1271_tx_dummy_packet(wl);
- if (ret < 0)
- return ret;
- }
+void wlcore_event_roc_complete(struct wl1271 *wl)
+{
+ wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
+ if (wl->roc_vif)
+ ieee80211_ready_on_channel(wl->hw);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
+{
/*
- * "TX retries exceeded" has a different meaning according to mode.
- * In AP mode the offending station is disconnected.
+ * We are HW_MONITOR device. On beacon loss - queue
+ * connection loss work. Cancel it on REGAINED event.
*/
- if (vector & MAX_TX_RETRY_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
- sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
- disconnect_sta = true;
- }
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ int delay = wl->conf.conn.synch_fail_thold *
+ wl->conf.conn.bss_lose_timeout;
- if (vector & INACTIVE_STA_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
- sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
- disconnect_sta = true;
- }
+ wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
- if (disconnect_sta) {
- u32 num_packets = wl->conf.tx.max_tx_retries;
- struct ieee80211_sta *sta;
- const u8 *addr;
- int h;
-
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
- bool found = false;
- /* find the ap vif connected to this sta */
- wl12xx_for_each_wlvif_ap(wl, wlvif) {
- if (!test_bit(h, wlvif->ap.sta_hlid_map))
- continue;
- found = true;
- break;
- }
- if (!found)
- continue;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- vif = wl12xx_wlvif_to_vif(wlvif);
- addr = wl->links[h].addr;
+ /*
+ * if the work is already queued, it should take place.
+ * We don't want to delay the connection loss
+ * indication any more.
+ */
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->connection_loss_work,
+ msecs_to_jiffies(delay));
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, addr);
- if (sta) {
- wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
- ieee80211_report_low_ack(sta, num_packets);
- }
- rcu_read_unlock();
- }
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
+ GFP_KERNEL);
}
- return 0;
}
+EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
int wl1271_event_unmask(struct wl1271 *wl)
{
@@ -305,12 +277,12 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
/* first we read the mbox descriptor */
ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
- sizeof(*wl->mbox), false);
+ wl->mbox_size, false);
if (ret < 0)
return ret;
/* process the descriptor */
- ret = wl1271_event_process(wl);
+ ret = wl->ops->process_mailbox_events(wl);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/event.h b/drivers/net/wireless/ti/wlcore/event.h
index 8adf18d6c58f..acc7a59d3828 100644
--- a/drivers/net/wireless/ti/wlcore/event.h
+++ b/drivers/net/wireless/ti/wlcore/event.h
@@ -46,33 +46,17 @@ enum {
RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
- MEASUREMENT_START_EVENT_ID = BIT(8),
- MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
- SCAN_COMPLETE_EVENT_ID = BIT(10),
- WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
- AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
- RESERVED1 = BIT(13),
- PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
- ROLE_STOP_COMPLETE_EVENT_ID = BIT(15),
- RADAR_DETECTED_EVENT_ID = BIT(16),
- CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
- BSS_LOSE_EVENT_ID = BIT(18),
- REGAINED_BSS_EVENT_ID = BIT(19),
- MAX_TX_RETRY_EVENT_ID = BIT(20),
- DUMMY_PACKET_EVENT_ID = BIT(21),
- SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
- CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
- SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
- PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
- INACTIVE_STA_EVENT_ID = BIT(26),
- PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
- PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
- PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
- BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
+
EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
};
+/* events the driver might want to wait for */
+enum wlcore_wait_event {
+ WLCORE_EVENT_ROLE_STOP_COMPLETE,
+ WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+ WLCORE_EVENT_DFS_CONFIG_COMPLETE
+};
+
enum {
EVENT_ENTER_POWER_SAVE_FAIL = 0,
EVENT_ENTER_POWER_SAVE_SUCCESS,
@@ -80,61 +64,24 @@ enum {
#define NUM_OF_RSSI_SNR_TRIGGERS 8
-struct event_mailbox {
- __le32 events_vector;
- __le32 events_mask;
- __le32 reserved_1;
- __le32 reserved_2;
-
- u8 number_of_scan_results;
- u8 scan_tag;
- u8 completed_scan_status;
- u8 reserved_3;
-
- u8 soft_gemini_sense_info;
- u8 soft_gemini_protective_info;
- s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
- u8 change_auto_mode_timeout;
- u8 scheduled_scan_status;
- u8 reserved4;
- /* tuned channel (roc) */
- u8 roc_channel;
-
- __le16 hlid_removed_bitmap;
-
- /* bitmap of aged stations (by HLID) */
- __le16 sta_aging_status;
-
- /* bitmap of stations (by HLID) which exceeded max tx retries */
- __le16 sta_tx_retry_exceeded;
-
- /* discovery completed results */
- u8 discovery_tag;
- u8 number_of_preq_results;
- u8 number_of_prsp_results;
- u8 reserved_5;
-
- /* rx ba constraint */
- u8 role_id; /* 0xFF means any role. */
- u8 rx_ba_allowed;
- u8 reserved_6[2];
-
- /* Channel switch results */
-
- u8 channel_switch_role_id;
- u8 channel_switch_status;
- u8 reserved_7[2];
-
- u8 ps_poll_delivery_failure_role_ids;
- u8 stopped_role_ids;
- u8 started_role_ids;
-
- u8 reserved_8[9];
-} __packed;
-
struct wl1271;
int wl1271_event_unmask(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable);
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+ u8 status);
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ unsigned long allowed_bitmap);
+void wlcore_event_channel_switch(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ bool success);
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap);
+void wlcore_event_dummy_packet(struct wl1271 *wl);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_roc_complete(struct wl1271 *wl);
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 2673d783ec1e..7fd260c02a0a 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -201,4 +201,45 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
return buf_offset;
}
+static inline void
+wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ if (wl->ops->sta_rc_update)
+ wl->ops->sta_rc_update(wl, wlvif, sta, changed);
+}
+
+static inline int
+wlcore_hw_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ if (wl->ops->set_peer_cap)
+ return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation,
+ rate_set, hlid);
+
+ return 0;
+}
+
+static inline bool
+wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ if (!wl->ops->lnk_high_prio)
+ BUG_ON(1);
+
+ return wl->ops->lnk_high_prio(wl, hlid, lnk);
+}
+
+static inline bool
+wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ if (!wl->ops->lnk_low_prio)
+ BUG_ON(1);
+
+ return wl->ops->lnk_low_prio(wl, hlid, lnk);
+}
+
#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 32d157f62f31..5c6f11e157d9 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -41,14 +41,14 @@ int wl1271_init_templates_config(struct wl1271 *wl)
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
+ wl->scan_templ_id_2_4, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_CFG_PROBE_REQ_5,
+ wl->scan_templ_id_5,
NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
@@ -56,14 +56,16 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
+ wl->sched_scan_templ_id_2_4,
+ NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_APP_PROBE_REQ_5, NULL,
+ wl->sched_scan_templ_id_5,
+ NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
@@ -463,7 +465,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
- supported_rates = CONF_TX_AP_ENABLED_RATES;
+ supported_rates = CONF_TX_ENABLED_RATES;
/* unconditionally enable HT rates */
supported_rates |= CONF_TX_MCS_RATES;
@@ -575,9 +577,6 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
ret = wl1271_acx_sleep_auth(wl, sta_auth);
- /* Configure for power always on */
- else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
/* Configure for ELP power saving */
else
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -679,6 +678,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
+ ret = wlcore_cmd_regdomain_config_locked(wl);
+ if (ret < 0)
+ return ret;
+
/* Bluetooth WLAN coexistence */
ret = wl1271_init_pta(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index f48530fec14f..af7d9f9b3b4d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -105,13 +105,13 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
{
int ret;
- ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ ret = wlcore_raw_read(wl, addr, wl->buffer_32,
+ sizeof(*wl->buffer_32), false);
if (ret < 0)
return ret;
if (val)
- *val = le32_to_cpu(wl->buffer_32);
+ *val = le32_to_cpu(*wl->buffer_32);
return 0;
}
@@ -119,9 +119,9 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
u32 val)
{
- wl->buffer_32 = cpu_to_le32(val);
- return wlcore_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ *wl->buffer_32 = cpu_to_le32(val);
+ return wlcore_raw_write(wl, addr, wl->buffer_32,
+ sizeof(*wl->buffer_32), false);
}
static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ea9d8e011bc9..2c2ff3e1f849 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -56,8 +56,8 @@
#define WL1271_BOOT_RETRIES 3
static char *fwlog_param;
-static bool bug_on_recovery;
-static bool no_recovery;
+static int bug_on_recovery = -1;
+static int no_recovery = -1;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
@@ -79,22 +79,22 @@ static int wl12xx_set_authorized(struct wl1271 *wl,
if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wlvif->role_id);
-
wl1271_info("Association completed.");
return 0;
}
-static int wl1271_reg_notify(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void wl1271_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_supported_band *band;
struct ieee80211_channel *ch;
int i;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wl1271 *wl = hw->priv;
band = wiphy->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++) {
@@ -108,7 +108,8 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
}
- return 0;
+ if (likely(wl->state == WLCORE_STATE_ON))
+ wlcore_regdomain_config(wl);
}
static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -303,6 +304,7 @@ out:
static void wlcore_adjust_conf(struct wl1271 *wl)
{
/* Adjust settings according to optional module parameters */
+
if (fwlog_param) {
if (!strcmp(fwlog_param, "continuous")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -318,16 +320,22 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
wl1271_error("Unknown fwlog parameter %s", fwlog_param);
}
}
+
+ if (bug_on_recovery != -1)
+ wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
+
+ if (no_recovery != -1)
+ wl->conf.recovery.no_recovery = (u8) no_recovery;
}
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, u8 tx_pkts)
{
- bool fw_ps, single_sta;
+ bool fw_ps, single_link;
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
- single_sta = (wl->active_sta_count == 1);
+ single_link = (wl->active_link_count == 1);
/*
* Wake up from high level PS if the STA is asleep with too little
@@ -338,10 +346,10 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
- * Make an exception if this is the only connected station. In this
- * case FW-memory congestion is not a problem.
+ * Make an exception if this is the only connected link. In this
+ * case FW-memory congestion is less of a problem.
*/
- else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+ else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -349,11 +357,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct wl_fw_status_2 *status)
{
- struct wl1271_link *lnk;
u32 cur_fw_ps_map;
- u8 hlid, cnt;
-
- /* TODO: also use link_fast_bitmap here */
+ u8 hlid;
cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
@@ -365,17 +370,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
- lnk = &wl->links[hlid];
- cnt = status->counters.tx_lnk_free_pkts[hlid] -
- lnk->prev_freed_pkts;
-
- lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
- lnk->allocated_pkts -= cnt;
-
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
- lnk->allocated_pkts);
- }
+ wl->links[hlid].allocated_pkts);
}
static int wlcore_fw_status(struct wl1271 *wl,
@@ -389,6 +386,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
int i;
size_t status_len;
int ret;
+ struct wl1271_link *lnk;
status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
sizeof(*status_2) + wl->fw_status_priv_len;
@@ -414,6 +412,17 @@ static int wlcore_fw_status(struct wl1271 *wl,
wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
}
+
+ for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[i];
+ /* prevent wrap-around in freed-packets counter */
+ lnk->allocated_pkts -=
+ (status_2->counters.tx_lnk_free_pkts[i] -
+ lnk->prev_freed_pkts) & 0xff;
+
+ lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ }
+
/* prevent wrap-around in total blocks counter */
if (likely(wl->tx_blocks_freed <=
le32_to_cpu(status_2->total_released_blks)))
@@ -466,6 +475,8 @@ static int wlcore_fw_status(struct wl1271 *wl,
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
(s64)le32_to_cpu(status_2->fw_localtime);
+ wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+
return 0;
}
@@ -802,11 +813,13 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
/*
* Make sure the chip is awake and the logger isn't active.
- * Do not send a stop fwlog command if the fw is hanged.
+ * Do not send a stop fwlog command if the fw is hanged or if
+ * dbgpins are used (due to some fw bug).
*/
if (wl1271_ps_elp_wakeup(wl))
goto out;
- if (!wl->watchdog_recovery)
+ if (!wl->watchdog_recovery &&
+ wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
@@ -874,7 +887,8 @@ static void wlcore_print_recovery(struct wl1271 *wl)
if (ret < 0)
return;
- wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
+ wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
+ pc, hint_sts, ++wl->recovery_count);
wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
}
@@ -897,10 +911,10 @@ static void wl1271_recovery_work(struct work_struct *work)
wlcore_print_recovery(wl);
}
- BUG_ON(bug_on_recovery &&
+ BUG_ON(wl->conf.recovery.bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
- if (no_recovery) {
+ if (wl->conf.recovery.no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
goto out_unlock;
}
@@ -920,11 +934,6 @@ static void wl1271_recovery_work(struct work_struct *work)
/* Prevent spurious TX during FW restart */
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
- if (wl->sched_scanning) {
- ieee80211_sched_scan_stopped(wl->hw);
- wl->sched_scanning = false;
- }
-
/* reboot the chipset */
while (!list_empty(&wl->wlvif_list)) {
wlvif = list_first_entry(&wl->wlvif_list,
@@ -1141,7 +1150,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
- cancel_delayed_work_sync(&wl->connection_loss_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
@@ -1169,9 +1177,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
int q, mapping;
u8 hlid;
- if (vif)
- wlvif = wl12xx_vif_to_data(vif);
+ if (!vif) {
+ wl1271_debug(DEBUG_TX, "DROP skb with no vif");
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
@@ -1185,9 +1197,9 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
* allow these packets through.
*/
if (hlid == WL12XX_INVALID_LINK_ID ||
- (wlvif && !test_bit(hlid, wlvif->links_map)) ||
- (wlcore_is_queue_stopped(wl, q) &&
- !wlcore_is_queue_stopped_by_reason(wl, q,
+ (!test_bit(hlid, wlvif->links_map)) ||
+ (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
+ !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb);
@@ -1199,16 +1211,17 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
wl->tx_queue_count[q]++;
+ wlvif->tx_queue_count[q]++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
- !wlcore_is_queue_stopped_by_reason(wl, q,
+ if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
+ !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
- wlcore_stop_queue_locked(wl, q,
+ wlcore_stop_queue_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
@@ -1843,11 +1856,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
- cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl12xx_tx_reset(wl);
mutex_lock(&wl->mutex);
+ wl12xx_tx_reset(wl);
wl1271_power_off(wl);
/*
@@ -1870,14 +1882,17 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl->time_offset = 0;
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
- wl->sched_scanning = false;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
memset(wl->roles_map, 0, sizeof(wl->roles_map));
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ memset(wl->session_ids, 0, sizeof(wl->session_ids));
wl->active_sta_count = 0;
+ wl->active_link_count = 0;
/* The system link is always allocated */
+ wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
+ wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
/*
@@ -1903,6 +1918,12 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
+
+ /*
+ * FW channels must be re-calibrated after recovery,
+ * clear the last Reg-Domain channel configuration.
+ */
+ memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
static void wlcore_op_stop(struct ieee80211_hw *hw)
@@ -1918,6 +1939,71 @@ static void wlcore_op_stop(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
+static void wlcore_channel_switch_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
+ wl = wlvif->wl;
+
+ wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* check the channel switch is still ongoing */
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
+ goto out;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl12xx_cmd_stop_channel_switch(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static void wlcore_connection_loss_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
+ wl = wlvif->wl;
+
+ wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* Call mac80211 connection loss */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2037,15 +2123,15 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_allocate_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
- wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
+ wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
/*
* TODO: check if basic_rate shouldn't be
* wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
* instead (the same thing for STA above).
*/
- wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
+ wlvif->basic_rate = CONF_TX_ENABLED_RATES;
/* TODO: this seems to be used only for STA, check it */
- wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
+ wlvif->rate_set = CONF_TX_ENABLED_RATES;
}
wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
@@ -2065,6 +2151,10 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl1271_rx_streaming_enable_work);
INIT_WORK(&wlvif->rx_streaming_disable_work,
wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->channel_switch_work,
+ wlcore_channel_switch_work);
+ INIT_DELAYED_WORK(&wlvif->connection_loss_work,
+ wlcore_connection_loss_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2072,7 +2162,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
return 0;
}
-static bool wl12xx_init_fw(struct wl1271 *wl)
+static int wl12xx_init_fw(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
bool booted = false;
@@ -2138,7 +2228,7 @@ power_off:
wl->state = WLCORE_STATE_ON;
out:
- return booted;
+ return ret;
}
static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
@@ -2198,6 +2288,81 @@ static void wl12xx_force_active_psm(struct wl1271 *wl)
}
}
+struct wlcore_hw_queue_iter_data {
+ unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
+ /* current vif */
+ struct ieee80211_vif *vif;
+ /* is the current vif among those iterated */
+ bool cur_running;
+};
+
+static void wlcore_hw_queue_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct wlcore_hw_queue_iter_data *iter_data = data;
+
+ if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+ return;
+
+ if (iter_data->cur_running || vif == iter_data->vif) {
+ iter_data->cur_running = true;
+ return;
+ }
+
+ __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
+}
+
+static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wlcore_hw_queue_iter_data iter_data = {};
+ int i, q_base;
+
+ iter_data.vif = vif;
+
+ /* mark all bits taken by active interfaces */
+ ieee80211_iterate_active_interfaces_atomic(wl->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ wlcore_hw_queue_iter, &iter_data);
+
+ /* the current vif is already running in mac80211 (resume/recovery) */
+ if (iter_data.cur_running) {
+ wlvif->hw_queue_base = vif->hw_queue[0];
+ wl1271_debug(DEBUG_MAC80211,
+ "using pre-allocated hw queue base %d",
+ wlvif->hw_queue_base);
+
+ /* interface type might have changed type */
+ goto adjust_cab_queue;
+ }
+
+ q_base = find_first_zero_bit(iter_data.hw_queue_map,
+ WLCORE_NUM_MAC_ADDRESSES);
+ if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
+ return -EBUSY;
+
+ wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
+ wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
+ wlvif->hw_queue_base);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
+ /* register hw queues in mac80211 */
+ vif->hw_queue[i] = wlvif->hw_queue_base + i;
+ }
+
+adjust_cab_queue:
+ /* the last places are reserved for cab queues per interface */
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
+ wlvif->hw_queue_base / NUM_TX_QUEUES;
+ else
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+ return 0;
+}
+
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -2206,7 +2371,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct vif_counter_data vif_count;
int ret = 0;
u8 role_type;
- bool booted = false;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2244,6 +2408,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ ret = wlcore_allocate_hw_queue_base(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
@@ -2263,11 +2431,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
*/
memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
- booted = wl12xx_init_fw(wl);
- if (!booted) {
- ret = -EINVAL;
+ ret = wl12xx_init_fw(wl);
+ if (ret < 0)
goto out;
- }
}
ret = wl12xx_cmd_role_enable(wl, vif->addr,
@@ -2314,7 +2480,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl1271_info("down");
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
- wl->scan_vif == vif) {
+ wl->scan_wlvif == wlvif) {
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
@@ -2323,11 +2489,21 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
+ if (wl->sched_vif == wlvif) {
+ ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_vif = NULL;
+ }
+
+ if (wl->roc_vif == vif) {
+ wl->roc_vif = NULL;
+ ieee80211_remain_on_channel_expired(wl->hw);
+ }
+
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
ret = wl1271_ps_elp_wakeup(wl);
@@ -2396,9 +2572,6 @@ deinit:
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
wl1271_acx_sleep_auth(wl, sta_auth);
- /* Configure for power always on */
- else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
- wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
/* Configure for ELP power saving */
else
wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -2410,6 +2583,7 @@ unlock:
del_timer_sync(&wlvif->rx_streaming_timer);
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->connection_loss_work);
mutex_lock(&wl->mutex);
}
@@ -2468,8 +2642,7 @@ static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
return ret;
}
-static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- bool set_assoc)
+static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
@@ -2489,18 +2662,111 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* clear encryption type */
wlvif->encryption_type = KEY_NONE;
- if (set_assoc)
- set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
-
if (is_ibss)
ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
- else
+ else {
+ if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
+ /*
+ * TODO: this is an ugly workaround for wl12xx fw
+ * bug - we are not able to tx/rx after the first
+ * start_sta, so make dummy start+stop calls,
+ * and then call start_sta again.
+ * this should be fixed in the fw.
+ */
+ wl12xx_cmd_role_start_sta(wl, wlvif);
+ wl12xx_cmd_role_stop_sta(wl, wlvif);
+ }
+
ret = wl12xx_cmd_role_start_sta(wl, wlvif);
+ }
+
+ return ret;
+}
+
+static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
+ int offset)
+{
+ u8 ssid_len;
+ const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
+ skb->len - offset);
+
+ if (!ptr) {
+ wl1271_error("No SSID in IEs!");
+ return -ENOENT;
+ }
+
+ ssid_len = ptr[1];
+ if (ssid_len > IEEE80211_MAX_SSID_LEN) {
+ wl1271_error("SSID is too long!");
+ return -EINVAL;
+ }
+
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
+ return 0;
+}
+
+static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct sk_buff *skb;
+ int ieoffset;
+
+ /* we currently only support setting the ssid from the ap probe req */
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+ return -EINVAL;
+
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
+ if (!skb)
+ return -EINVAL;
+
+ ieoffset = offsetof(struct ieee80211_mgmt,
+ u.probe_req.variable);
+ wl1271_ssid_set(wlvif, skb, ieoffset);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 sta_rate_set)
+{
+ int ieoffset;
+ int ret;
+
+ wlvif->aid = bss_conf->aid;
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->beacon_int = bss_conf->beacon_int;
+ wlvif->wmm_enabled = bss_conf->qos;
+
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
+
+ /*
+ * with wl1271, we don't need to update the
+ * beacon_int and dtim_period, because the firmware
+ * updates it by itself when the first beacon is
+ * received after a join.
+ */
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
- goto out;
+ return ret;
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
+ /*
+ * Get a template for hardware connection maintenance
+ */
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
+ ieoffset = offsetof(struct ieee80211_mgmt,
+ u.probe_req.variable);
+ wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
+
+ /* enable the connection monitoring feature */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
+ if (ret < 0)
+ return ret;
/*
* The join command disable the keep-alive mode, shut down its process,
@@ -2510,35 +2776,83 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
*/
ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
- goto out;
+ return ret;
ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
- goto out;
+ return ret;
ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
- goto out;
+ return ret;
ret = wl1271_acx_keep_alive_config(wl, wlvif,
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
- goto out;
+ return ret;
+
+ /*
+ * The default fw psm configuration is AUTO, while mac80211 default
+ * setting is off (ACTIVE), so sync the fw with the correct value.
+ */
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ return ret;
+
+ if (sta_rate_set) {
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set,
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
-out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
+ bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+
+ /* make sure we are connected (sta) joined */
+ if (sta &&
+ !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ return false;
+
+ /* make sure we are joined (ibss) */
+ if (!sta &&
+ test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
+ return false;
+
+ if (sta) {
+ /* use defaults when not associated */
+ wlvif->aid = 0;
+
+ /* free probe-request template */
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
+
+ /* disable connection monitor features */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+ }
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- wl12xx_cmd_stop_channel_switch(wl);
+ wl12xx_cmd_stop_channel_switch(wl, wlvif);
ieee80211_chswitch_done(vif, false);
+ cancel_delayed_work(&wlvif->channel_switch_work);
}
/* invalidate keep-alive template */
@@ -2546,17 +2860,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_INVALID);
- /* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
- if (ret < 0)
- goto out;
-
/* reset TX security counters on a clean disconnect */
wlvif->tx_security_last_seq_lsb = 0;
wlvif->tx_security_seq = 0;
-out:
- return ret;
+ return 0;
}
static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
@@ -2565,147 +2873,10 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
-static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- bool idle)
-{
- int ret;
- bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
-
- if (idle == cur_idle)
- return 0;
-
- if (idle) {
- /* no need to croc if we weren't busy (e.g. during boot) */
- if (wl12xx_dev_role_started(wlvif)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- goto out;
- }
- wlvif->rate_set =
- wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
- clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
- } else {
- /* The current firmware only supports sched_scan in idle */
- if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl, wlvif);
- ieee80211_sched_scan_stopped(wl->hw);
- }
-
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- goto out;
- set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
- }
-
-out:
- return ret;
-}
-
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
- bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
- int channel, ret;
-
- channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
-
- /* if the channel changes while joined, join again */
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wlvif->band != conf->channel->band) ||
- (wlvif->channel != channel) ||
- (wlvif->channel_type != conf->channel_type))) {
- /* send all pending packets */
- ret = wlcore_tx_work_locked(wl);
- if (ret < 0)
- return ret;
-
- wlvif->band = conf->channel->band;
- wlvif->channel = channel;
- wlvif->channel_type = conf->channel_type;
-
- if (is_ap) {
- wl1271_set_band_rate(wl, wlvif);
- ret = wl1271_init_ap_rates(wl, wlvif);
- if (ret < 0)
- wl1271_error("AP rate policy change failed %d",
- ret);
- } else {
- /*
- * FIXME: the mac80211 should really provide a fixed
- * rate to use here. for now, just use the smallest
- * possible rate for the band as a fixed rate for
- * association frames and other control messages.
- */
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- wl1271_set_band_rate(wl, wlvif);
-
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- wl1271_warning("rate policy for channel "
- "failed %d", ret);
-
- /*
- * change the ROC channel. do it only if we are
- * not idle. otherwise, CROC will be called
- * anyway.
- */
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags) &&
- wl12xx_dev_role_started(wlvif) &&
- !(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- return ret;
- }
- }
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
-
- if ((conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
- !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
- int ps_mode;
- char *ps_mode_str;
-
- if (wl->conf.conn.forced_ps) {
- ps_mode = STATION_POWER_SAVE_MODE;
- ps_mode_str = "forced";
- } else {
- ps_mode = STATION_AUTO_PS_MODE;
- ps_mode_str = "auto";
- }
-
- wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
-
- ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
-
- if (ret < 0)
- wl1271_warning("enter %s ps failed %d",
- ps_mode_str, ret);
-
- } else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
- wl1271_debug(DEBUG_PSM, "auto ps disabled");
-
- ret = wl1271_ps_set_mode(wl, wlvif,
- STATION_ACTIVE_MODE);
- if (ret < 0)
- wl1271_warning("exit auto ps failed %d", ret);
- }
- }
+ int ret;
if (conf->power_level != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
@@ -2723,37 +2894,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
-
- channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
" changed 0x%x",
- channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
changed);
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
- ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE)))
- wl1271_tx_flush(wl);
-
mutex_lock(&wl->mutex);
- /* we support configuring the channel and band even while off */
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- wl->channel_type = conf->channel_type;
- }
-
if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
@@ -3073,10 +3224,7 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* stop the queues and flush to ensure the next packets are
* in sync with FW spare block accounting
*/
- mutex_lock(&wl->mutex);
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
- mutex_unlock(&wl->mutex);
-
wl1271_tx_flush(wl);
}
@@ -3202,6 +3350,29 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
}
EXPORT_SYMBOL_GPL(wlcore_set_key);
+void wlcore_regdomain_config(struct wl1271 *wl)
+{
+ int ret;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_cmd_regdomain_config_locked(wl);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
@@ -3241,7 +3412,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out_sleep;
}
- ret = wl1271_scan(hw->priv, vif, ssid, len, req);
+ ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -3254,6 +3425,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
@@ -3271,7 +3443,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
- ret = wl1271_scan_stop(wl);
+ ret = wl->ops->scan_stop(wl, wlvif);
if (ret < 0)
goto out_sleep;
}
@@ -3284,7 +3456,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -3318,15 +3490,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
+ ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl, wlvif);
- if (ret < 0)
- goto out_sleep;
-
- wl->sched_scanning = true;
+ wl->sched_vif = wlvif;
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3353,7 +3521,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- wl1271_scan_sched_scan_stop(wl, wlvif);
+ wl->ops->sched_scan_stop(wl, wlvif);
wl1271_ps_elp_sleep(wl);
out:
@@ -3418,30 +3586,6 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
- int offset)
-{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- u8 ssid_len;
- const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
- skb->len - offset);
-
- if (!ptr) {
- wl1271_error("No SSID in IEs!");
- return -ENOENT;
- }
-
- ssid_len = ptr[1];
- if (ssid_len > IEEE80211_MAX_SSID_LEN) {
- wl1271_error("SSID is too long!");
- return -EINVAL;
- }
-
- wlvif->ssid_len = ssid_len;
- memcpy(wlvif->ssid, ptr+2, ssid_len);
- return 0;
-}
-
static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
{
int len;
@@ -3622,7 +3766,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(vif, beacon, ieoffset);
+ ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
@@ -3639,6 +3783,12 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
goto out;
}
+ wlvif->wmm_enabled =
+ cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ beacon->data + ieoffset,
+ beacon->len - ieoffset);
+
/*
* In case we already have a probe-resp beacon set explicitly
* by usermode, don't use the beacon data.
@@ -3692,7 +3842,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
- if ((changed & BSS_CHANGED_BEACON_INT)) {
+ if (changed & BSS_CHANGED_BEACON_INT) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
@@ -3705,7 +3855,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
}
- if ((changed & BSS_CHANGED_BEACON)) {
+ if (changed & BSS_CHANGED_BEACON) {
ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
@@ -3726,7 +3876,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
- if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ if (changed & BSS_CHANGED_BASIC_RATES) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
@@ -3757,7 +3907,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if (ret < 0)
goto out;
- if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (bss_conf->enable_beacon) {
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
ret = wl12xx_cmd_role_start_ap(wl, wlvif);
@@ -3804,6 +3954,79 @@ out:
return;
}
+static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 sta_rate_set)
+{
+ u32 rates;
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211,
+ "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
+ bss_conf->bssid, bss_conf->aid,
+ bss_conf->beacon_int,
+ bss_conf->basic_rates, sta_rate_set);
+
+ wlvif->beacon_int = bss_conf->beacon_int;
+ rates = bss_conf->basic_rates;
+ wlvif->basic_rate_set =
+ wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+
+ if (sta_rate_set)
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set,
+ wlvif->band);
+
+ /* we only support sched_scan while not connected */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
+ if (ret < 0)
+ return ret;
+
+ wlcore_set_ssid(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+ return 0;
+}
+
+static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ /* revert back to minimum rates for the current band */
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+ return 0;
+}
/* STA/IBSS mode changes */
static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_vif *vif,
@@ -3811,7 +4034,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- bool do_join = false, set_assoc = false;
+ bool do_join = false;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
@@ -3832,9 +4055,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
- &wlvif->flags))
- wl1271_unjoin(wl, wlvif);
+ wlcore_unset_assoc(wl, wlvif);
+ wl12xx_cmd_role_stop_sta(wl, wlvif);
}
}
@@ -3852,13 +4074,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
- if (changed & BSS_CHANGED_IDLE && !is_ibss) {
- ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
- if ((changed & BSS_CHANGED_CQM)) {
+ if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -3870,150 +4086,39 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if (changed & BSS_CHANGED_BSSID)
- if (!is_zero_ether_addr(bss_conf->bssid)) {
- ret = wl12xx_cmd_build_null_data(wl, wlvif);
- if (ret < 0)
- goto out;
-
- ret = wl1271_build_qos_null_data(wl, vif);
- if (ret < 0)
- goto out;
- }
-
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
+ if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
+ BSS_CHANGED_ASSOC)) {
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (!sta)
- goto sta_not_found;
-
- /* save the supp_rates of the ap */
- sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
- if (sta->ht_cap.ht_supported)
- sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
- (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
- sta_ht_cap = sta->ht_cap;
- sta_exists = true;
-
-sta_not_found:
+ if (sta) {
+ u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
+
+ /* save the supp_rates of the ap */
+ sta_rate_set = sta->supp_rates[wlvif->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rate_set |=
+ (rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (rx_mask[1] << HW_MIMO_RATES_OFFSET);
+ sta_ht_cap = sta->ht_cap;
+ sta_exists = true;
+ }
+
rcu_read_unlock();
}
- if ((changed & BSS_CHANGED_ASSOC)) {
- if (bss_conf->assoc) {
- u32 rates;
- int ieoffset;
- wlvif->aid = bss_conf->aid;
- wlvif->channel_type =
- cfg80211_get_chandef_type(&bss_conf->chandef);
- wlvif->beacon_int = bss_conf->beacon_int;
- do_join = true;
- set_assoc = true;
-
- /*
- * use basic rates from AP, and determine lowest rate
- * to use with control frames.
- */
- rates = bss_conf->basic_rates;
- wlvif->basic_rate_set =
- wl1271_tx_enabled_rates_get(wl, rates,
- wlvif->band);
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- if (sta_rate_set)
- wlvif->rate_set =
- wl1271_tx_enabled_rates_get(wl,
- sta_rate_set,
- wlvif->band);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
-
- /*
- * with wl1271, we don't need to update the
- * beacon_int and dtim_period, because the firmware
- * updates it by itself when the first beacon is
- * received after a join.
- */
- ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
+ if (changed & BSS_CHANGED_BSSID) {
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wlcore_set_bssid(wl, wlvif, bss_conf,
+ sta_rate_set);
if (ret < 0)
goto out;
- /*
- * Get a template for hardware connection maintenance
- */
- dev_kfree_skb(wlvif->probereq);
- wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
- wlvif,
- NULL);
- ieoffset = offsetof(struct ieee80211_mgmt,
- u.probe_req.variable);
- wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
-
- /* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
- if (ret < 0)
- goto out;
+ /* Need to update the BSSID (for filtering etc) */
+ do_join = true;
} else {
- /* use defaults when not associated */
- bool was_assoc =
- !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags);
- bool was_ifup =
- !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
- &wlvif->flags);
- wlvif->aid = 0;
-
- /* free probe-request template */
- dev_kfree_skb(wlvif->probereq);
- wlvif->probereq = NULL;
-
- /* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl, wlvif);
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
-
- /* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
-
- /* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+ ret = wlcore_clear_bssid(wl, wlvif);
if (ret < 0)
goto out;
-
- /* restore the bssid filter and go to dummy bssid */
- if (was_assoc) {
- /*
- * we might have to disable roc, if there was
- * no IF_OPER_UP notification.
- */
- if (!was_ifup) {
- ret = wl12xx_croc(wl, wlvif->role_id);
- if (ret < 0)
- goto out;
- }
- /*
- * (we also need to disable roc in case of
- * roaming on the same channel. until we will
- * have a better flow...)
- */
- if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl,
- wlvif->dev_role_id);
- if (ret < 0)
- goto out;
- }
-
- wl1271_unjoin(wl, wlvif);
- if (!bss_conf->idle)
- wl12xx_start_dev(wl, wlvif);
- }
}
}
@@ -4043,71 +4148,87 @@ sta_not_found:
goto out;
if (do_join) {
- ret = wl1271_join(wl, wlvif, set_assoc);
+ ret = wlcore_join(wl, wlvif);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
}
+ }
- /* ROC until connected (after EAPOL exchange) */
- if (!is_ibss) {
- ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ ret = wlcore_set_assoc(wl, wlvif, bss_conf,
+ sta_rate_set);
if (ret < 0)
goto out;
if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
wl12xx_set_authorized(wl, wlvif);
+ } else {
+ wlcore_unset_assoc(wl, wlvif);
}
- /*
- * stop device role if started (we might already be in
- * STA/IBSS role).
- */
- if (wl12xx_dev_role_started(wlvif)) {
- ret = wl12xx_stop_dev(wl, wlvif);
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ if ((bss_conf->ps) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+ int ps_mode;
+ char *ps_mode_str;
+
+ if (wl->conf.conn.forced_ps) {
+ ps_mode = STATION_POWER_SAVE_MODE;
+ ps_mode_str = "forced";
+ } else {
+ ps_mode = STATION_AUTO_PS_MODE;
+ ps_mode_str = "auto";
+ }
+
+ wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
+
+ ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
if (ret < 0)
- goto out;
+ wl1271_warning("enter %s ps failed %d",
+ ps_mode_str, ret);
+ } else if (!bss_conf->ps &&
+ test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+ wl1271_debug(DEBUG_PSM, "auto ps disabled");
+
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE);
+ if (ret < 0)
+ wl1271_warning("exit auto ps failed %d", ret);
}
}
/* Handle new association with HT. Do this after join. */
- if (sta_exists) {
- if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = wl1271_acx_set_ht_capabilities(wl,
- &sta_ht_cap,
- true,
- wlvif->sta.hlid);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d",
- ret);
- goto out;
- }
+ if (sta_exists &&
+ (changed & BSS_CHANGED_HT)) {
+ bool enabled =
+ bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+
+ ret = wlcore_hw_set_peer_cap(wl,
+ &sta_ht_cap,
+ enabled,
+ wlvif->rate_set,
+ wlvif->sta.hlid);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap failed %d", ret);
+ goto out;
+
}
- /* handle new association without HT and disassociation */
- else if (changed & BSS_CHANGED_ASSOC) {
- ret = wl1271_acx_set_ht_capabilities(wl,
- &sta_ht_cap,
- false,
- wlvif->sta.hlid);
+
+ if (enabled) {
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
+ bss_conf->ht_operation_mode);
if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d",
+ wl1271_warning("Set ht information failed %d",
ret);
goto out;
}
}
}
- /* Handle HT information change. Done after join. */
- if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = wl1271_acx_set_ht_information(wl, wlvif,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d", ret);
- goto out;
- }
- }
-
/* Handle arp filtering. Done after join. */
if ((changed & BSS_CHANGED_ARP_FILTER) ||
(!is_ibss && (changed & BSS_CHANGED_QOS))) {
@@ -4115,8 +4236,7 @@ sta_not_found:
wlvif->sta.qos = bss_conf->qos;
WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
- if (bss_conf->arp_addr_cnt == 1 &&
- bss_conf->arp_filter_enabled) {
+ if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
wlvif->ip_addr = addr;
/*
* The template should have been configured only upon
@@ -4157,15 +4277,15 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
- (int)changed);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
+ wlvif->role_id, (int)changed);
/*
* make sure to cancel pending disconnections if our association
* state changed
*/
if (!is_ap && (changed & BSS_CHANGED_ASSOC))
- cancel_delayed_work_sync(&wl->connection_loss_work);
+ cancel_delayed_work_sync(&wlvif->connection_loss_work);
if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
!bss_conf->enable_beacon)
@@ -4194,6 +4314,76 @@ out:
mutex_unlock(&wl->mutex);
}
+static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+ return 0;
+}
+
+static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+}
+
+static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 change chanctx %d (type %d) changed 0x%x",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def), changed);
+}
+
+static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int channel = ieee80211_frequency_to_channel(
+ ctx->def.chan->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 assign chanctx (role %d) %d (type %d)",
+ wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
+
+ mutex_lock(&wl->mutex);
+
+ wlvif->band = ctx->def.chan->band;
+ wlvif->channel = channel;
+ wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
+
+ /* update default rates according to the band */
+ wl1271_set_band_rate(wl, wlvif);
+
+ mutex_unlock(&wl->mutex);
+
+ return 0;
+}
+
+static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 unassign chanctx (role %d) %d (type %d)",
+ wlvif->role_id,
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+
+ wl1271_tx_flush(wl);
+}
+
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
@@ -4321,8 +4511,6 @@ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
return;
clear_bit(hlid, wlvif->ap.sta_hlid_map);
- memset(wl->links[hlid].addr, 0, ETH_ALEN);
- wl->links[hlid].ba_bitmap = 0;
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
wl12xx_free_link(wl, wlvif, &hlid);
@@ -4382,6 +4570,45 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
return ret;
}
+static void wlcore_roc_if_possible(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
+ return;
+
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
+ return;
+
+ wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
+}
+
+static void wlcore_update_inconn_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta,
+ bool in_connection)
+{
+ if (in_connection) {
+ if (WARN_ON(wl_sta->in_connection))
+ return;
+ wl_sta->in_connection = true;
+ if (!wlvif->inconn_count++)
+ wlcore_roc_if_possible(wl, wlvif);
+ } else {
+ if (!wl_sta->in_connection)
+ return;
+
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ if (WARN_ON(wlvif->inconn_count < 0))
+ return;
+
+ if (!wlvif->inconn_count)
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+ }
+}
+
static int wl12xx_update_sta_state(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta,
@@ -4400,8 +4627,13 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
/* Add station (AP mode) */
if (is_ap &&
old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE)
- return wl12xx_sta_add(wl, wlvif, sta);
+ new_state == IEEE80211_STA_NONE) {
+ ret = wl12xx_sta_add(wl, wlvif, sta);
+ if (ret)
+ return ret;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
+ }
/* Remove station (AP mode) */
if (is_ap &&
@@ -4409,35 +4641,59 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
new_state == IEEE80211_STA_NOTEXIST) {
/* must not fail */
wl12xx_sta_remove(wl, wlvif, sta);
- return 0;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station (AP mode) */
if (is_ap &&
new_state == IEEE80211_STA_AUTHORIZED) {
- ret = wl12xx_cmd_set_peer_state(wl, hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
if (ret < 0)
return ret;
ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
hlid);
- return ret;
+ if (ret)
+ return ret;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station */
if (is_sta &&
new_state == IEEE80211_STA_AUTHORIZED) {
set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
- return wl12xx_set_authorized(wl, wlvif);
+ ret = wl12xx_set_authorized(wl, wlvif);
+ if (ret)
+ return ret;
}
if (is_sta &&
old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
- return 0;
+ clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
}
+ /* clear ROCs on failure or authorization */
+ if (is_sta &&
+ (new_state == IEEE80211_STA_AUTHORIZED ||
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+ }
+
+ if (is_sta &&
+ old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ if (find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
+ WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
+ wl12xx_roc(wl, wlvif, wlvif->role_id,
+ wlvif->band, wlvif->channel);
+ }
+ }
return 0;
}
@@ -4502,18 +4758,18 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
hlid = wlvif->sta.hlid;
- ba_bitmap = &wlvif->sta.ba_rx_bitmap;
} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
- ba_bitmap = &wl->links[hlid].ba_bitmap;
} else {
ret = -EINVAL;
goto out;
}
+ ba_bitmap = &wl->links[hlid].ba_bitmap;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -4575,7 +4831,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
* Falling break here on purpose for all TX APDU commands.
*/
case IEEE80211_AMPDU_TX_START:
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = -EINVAL;
break;
@@ -4665,12 +4923,23 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
/* TODO: change mac80211 to pass vif as param */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
- ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
+ unsigned long delay_usec;
+
+ ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
+ if (ret)
+ goto out_sleep;
- if (!ret)
- set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+
+ /* indicate failure 5 seconds after channel switch time */
+ delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
+ ch_switch->count;
+ ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
+ usecs_to_jiffies(delay_usec) +
+ msecs_to_jiffies(5000));
}
+out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -4684,6 +4953,144 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
wl1271_tx_flush(wl);
}
+static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271 *wl = hw->priv;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
+ channel, wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* return EBUSY if we can't ROC right now */
+ if (WARN_ON(wl->roc_vif ||
+ find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl->roc_vif = vif;
+ ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
+ msecs_to_jiffies(duration));
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int __wlcore_roc_completed(struct wl1271 *wl)
+{
+ struct wl12xx_vif *wlvif;
+ int ret;
+
+ /* already completed */
+ if (unlikely(!wl->roc_vif))
+ return 0;
+
+ wlvif = wl12xx_vif_to_data(wl->roc_vif);
+
+ if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return -EBUSY;
+
+ ret = wl12xx_stop_dev(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ wl->roc_vif = NULL;
+
+ return 0;
+}
+
+static int wlcore_roc_completed(struct wl1271 *wl)
+{
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "roc complete");
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = __wlcore_roc_completed(wl);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static void wlcore_roc_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, roc_complete_work);
+
+ ret = wlcore_roc_completed(wl);
+ if (!ret)
+ ieee80211_remain_on_channel_expired(wl->hw);
+}
+
+static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
+
+ /* TODO: per-vif */
+ wl1271_tx_flush(wl);
+
+ /*
+ * we can't just flush_work here, because it might deadlock
+ * (as we might get called from the same workqueue)
+ */
+ cancel_delayed_work_sync(&wl->roc_complete_work);
+ wlcore_roc_completed(wl);
+
+ return 0;
+}
+
+static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271 *wl = hw->priv;
+
+ wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -4747,20 +5154,20 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
- { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
- { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
- { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
- { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
- { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
- { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
- { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
- { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
- { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
- { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
- { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
- { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
- { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
+ { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
};
/* can't be const, mac80211 writes to this */
@@ -4801,40 +5208,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
- { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
- { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
- { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
- { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
- { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
- { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
- { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
- { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
- { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
- { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
- { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
- { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
- { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
- { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
- { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
- { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
- { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
- { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
- { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
- { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
- { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
- { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
- { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
- { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
- { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
- { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
- { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
- { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
- { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
- { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
- { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
- { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
- { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
};
static struct ieee80211_supported_band wl1271_band_5ghz = {
@@ -4875,6 +5282,14 @@ static const struct ieee80211_ops wl1271_ops = {
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.channel_switch = wl12xx_op_channel_switch,
.flush = wlcore_op_flush,
+ .remain_on_channel = wlcore_op_remain_on_channel,
+ .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
+ .add_chanctx = wlcore_op_add_chanctx,
+ .remove_chanctx = wlcore_op_remove_chanctx,
+ .change_chanctx = wlcore_op_change_chanctx,
+ .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
+ .sta_rc_update = wlcore_op_sta_rc_update,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -5044,34 +5459,6 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-static void wl1271_connection_loss_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wl1271 *wl;
- struct ieee80211_vif *vif;
- struct wl12xx_vif *wlvif;
-
- dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, connection_loss_work);
-
- wl1271_info("Connection loss work.");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- /* Call mac80211 connection loss */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_connection_loss(vif);
- }
-out:
- mutex_unlock(&wl->mutex);
-}
-
static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
{
int i;
@@ -5117,7 +5504,7 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
ret = wl12xx_set_power_on(wl);
if (ret < 0)
- goto out;
+ return ret;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
if (ret < 0)
@@ -5207,10 +5594,9 @@ static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
},
};
-static const struct ieee80211_iface_combination
+static struct ieee80211_iface_combination
wlcore_iface_combinations[] = {
{
- .num_different_channels = 1,
.max_interfaces = 3,
.limits = wlcore_iface_limits,
.n_limits = ARRAY_SIZE(wlcore_iface_limits),
@@ -5219,6 +5605,7 @@ wlcore_iface_combinations[] = {
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
+ int i;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -5249,7 +5636,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_SCAN_WHILE_IDLE;
+ IEEE80211_HW_QUEUE_CONTROL;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5271,6 +5658,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
+ wl->hw->wiphy->max_remain_on_channel_duration = 5000;
+
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -5279,6 +5668,22 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ARRAY_SIZE(wl1271_channels_5ghz) >
WL1271_MAX_CHANNELS);
/*
+ * clear channel flags from the previous usage
+ * and restore max_power & max_antenna_gain values.
+ */
+ for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
+ wl1271_band_2ghz.channels[i].flags = 0;
+ wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+ wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
+ wl1271_band_5ghz.channels[i].flags = 0;
+ wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+ wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
+ }
+
+ /*
* We keep local copies of the band structs because we need to
* modify them on a per-device basis.
*/
@@ -5298,7 +5703,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&wl->bands[IEEE80211_BAND_5GHZ];
- wl->hw->queues = 4;
+ /*
+ * allow 4 queues per mac address we support +
+ * 1 cab queue per mac + one global offchannel Tx queue
+ */
+ wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
+
+ /* the last queue is the offchannel queue */
+ wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
wl->hw->max_rates = 1;
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
@@ -5311,6 +5723,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
+ wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
wl->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(wlcore_iface_combinations);
@@ -5327,7 +5740,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+ u32 mbox_size)
{
struct ieee80211_hw *hw;
struct wl1271 *wl;
@@ -5369,9 +5783,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+ INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
- INIT_DELAYED_WORK(&wl->connection_loss_work,
- wl1271_connection_loss_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -5387,14 +5800,15 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
wl->flags = 0;
wl->sg_enabled = true;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
+ wl->recovery_count = 0;
wl->hw_pg_ver = -1;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
- wl->sched_scanning = false;
wl->system_hlid = WL12XX_SYSTEM_HLID;
wl->active_sta_count = 0;
+ wl->active_link_count = 0;
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -5434,14 +5848,24 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
goto err_dummy_packet;
}
- wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
+ wl->mbox_size = mbox_size;
+ wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
if (!wl->mbox) {
ret = -ENOMEM;
goto err_fwlog;
}
+ wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
+ if (!wl->buffer_32) {
+ ret = -ENOMEM;
+ goto err_mbox;
+ }
+
return hw;
+err_mbox:
+ kfree(wl->mbox);
+
err_fwlog:
free_page((unsigned long)wl->fwlog);
@@ -5480,6 +5904,8 @@ int wlcore_free_hw(struct wl1271 *wl)
device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+ kfree(wl->buffer_32);
+ kfree(wl->mbox);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
@@ -5536,7 +5962,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
@@ -5565,8 +5992,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->irq = platform_get_irq(pdev, 0);
wl->platform_quirks = pdata->platform_quirks;
- wl->set_power = pdata->set_power;
- wl->if_ops = pdata->ops;
+ wl->if_ops = pdev_data->if_ops;
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
irqflags = IRQF_TRIGGER_RISING;
@@ -5712,10 +6138,10 @@ module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, ondemand, dbgpins or disable");
-module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
-module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(no_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4d1414a673fb..9b7b6e2e4fbc 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -151,9 +151,6 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
wl12xx_queue_recovery_work(wl);
ret = -ETIMEDOUT;
goto err;
- } else if (ret < 0) {
- wl1271_error("ELP wakeup completion error.");
- goto err;
}
}
@@ -242,11 +239,12 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
struct ieee80211_tx_info *info;
unsigned long flags;
int filtered[NUM_TX_QUEUES];
+ struct wl1271_link *lnk = &wl->links[hlid];
/* filter all frames currently in the low level queues for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
filtered[i] = 0;
- while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
filtered[i]++;
if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
@@ -260,8 +258,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
}
spin_lock_irqsave(&wl->wl_lock, flags);
- for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= filtered[i];
+ if (lnk->wlvif)
+ lnk->wlvif->tx_queue_count[i] -= filtered[i];
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 9ee0ec6fd1db..6791a1a6afba 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -92,11 +92,16 @@ static void wl1271_rx_status(struct wl1271 *wl,
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
+ if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) {
status->flag |= RX_FLAG_MMIC_ERROR;
- wl1271_warning("Michael MIC error");
+ wl1271_warning("Michael MIC error. Desc: 0x%x",
+ desc_err_code);
}
}
+
+ if (beacon)
+ wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
+ status->band);
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
@@ -108,7 +113,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
- u8 reserved = 0;
+ u8 reserved = 0, offset_to_data = 0;
u16 seq_num;
u32 pkt_data_len;
@@ -128,6 +133,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
reserved = RX_BUF_ALIGN;
+ else if (rx_align == WLCORE_RX_BUF_PADDED)
+ offset_to_data = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
@@ -139,19 +146,15 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return 0;
}
- switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
/* discard corrupted packets */
- case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
- case WL1271_RX_DESC_DECRYPT_FAIL:
- wl1271_warning("corrupted packet in RX with status: 0x%x",
- desc->status & WL1271_RX_DESC_STATUS_MASK);
- return -EINVAL;
- case WL1271_RX_DESC_SUCCESS:
- case WL1271_RX_DESC_MIC_FAIL:
- break;
- default:
- wl1271_error("invalid RX descriptor status: 0x%x",
- desc->status & WL1271_RX_DESC_STATUS_MASK);
+ if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) {
+ hdr = (void *)(data + sizeof(*desc) + offset_to_data);
+ wl1271_warning("corrupted packet in RX: status: 0x%x len: %d",
+ desc->status & WL1271_RX_DESC_STATUS_MASK,
+ pkt_data_len);
+ wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc),
+ min(pkt_data_len,
+ ieee80211_hdrlen(hdr->frame_control)));
return -EINVAL;
}
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 71eba1899915..3363f60fb7da 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -84,12 +84,11 @@
* Bits 3-5 - process_id tag (AP mode FW)
* Bits 6-7 - reserved
*/
-#define WL1271_RX_DESC_STATUS_MASK 0x03
+#define WL1271_RX_DESC_STATUS_MASK 0x07
#define WL1271_RX_DESC_SUCCESS 0x00
#define WL1271_RX_DESC_DECRYPT_FAIL 0x01
#define WL1271_RX_DESC_MIC_FAIL 0x02
-#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
#define RX_MEM_BLOCK_MASK 0xFF
#define RX_BUF_SIZE_MASK 0xFFF00
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index d00501493dfe..f407101e525b 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -35,7 +35,6 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
- struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
int ret;
@@ -52,8 +51,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- vif = wl->scan_vif;
- wlvif = wl12xx_vif_to_data(vif);
+ wlvif = wl->scan_wlvif;
/*
* Rearm the tx watchdog just before idling scan. This
@@ -64,7 +62,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
@@ -82,6 +80,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl12xx_queue_recovery_work(wl);
}
+ wlcore_cmd_regdomain_config_locked(wl);
+
ieee80211_scan_completed(wl->hw, false);
out:
@@ -89,371 +89,99 @@ out:
}
-
-static int wl1271_get_scan_channels(struct wl1271 *wl,
- struct cfg80211_scan_request *req,
- struct basic_scan_channel_params *channels,
- enum ieee80211_band band, bool passive)
-{
- struct conf_scan_settings *c = &wl->conf.scan;
- int i, j;
- u32 flags;
-
- for (i = 0, j = 0;
- i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
- i++) {
- flags = req->channels[i]->flags;
-
- if (!test_bit(i, wl->scan.scanned_ch) &&
- !(flags & IEEE80211_CHAN_DISABLED) &&
- (req->channels[i]->band == band) &&
- /*
- * In passive scans, we scan all remaining
- * channels, even if not marked as such.
- * In active scans, we only scan channels not
- * marked as passive.
- */
- (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req->channels[i]->band,
- req->channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req->channels[i]->hw_value,
- req->channels[i]->flags);
- wl1271_debug(DEBUG_SCAN,
- "max_antenna_gain %d, max_power %d",
- req->channels[i]->max_antenna_gain,
- req->channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "beacon_found %d",
- req->channels[i]->beacon_found);
-
- if (!passive) {
- channels[j].min_duration =
- cpu_to_le32(c->min_dwell_time_active);
- channels[j].max_duration =
- cpu_to_le32(c->max_dwell_time_active);
- } else {
- channels[j].min_duration =
- cpu_to_le32(c->min_dwell_time_passive);
- channels[j].max_duration =
- cpu_to_le32(c->max_dwell_time_passive);
- }
- channels[j].early_termination = 0;
- channels[j].tx_power_att = req->channels[i]->max_power;
- channels[j].channel = req->channels[i]->hw_value;
-
- memset(&channels[j].bssid_lsb, 0xff, 4);
- memset(&channels[j].bssid_msb, 0xff, 2);
-
- /* Mark the channels we already used */
- set_bit(i, wl->scan.scanned_ch);
-
- j++;
- }
- }
-
- return j;
-}
-
-#define WL1271_NOTHING_TO_SCAN 1
-
-static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
- enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static void wlcore_started_vifs_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- struct wl1271_cmd_scan *cmd;
- struct wl1271_cmd_trigger_scan_to *trigger;
- int ret;
- u16 scan_options = 0;
-
- /* skip active scans if we don't have SSIDs */
- if (!passive && wl->scan.req->n_ssids == 0)
- return WL1271_NOTHING_TO_SCAN;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!cmd || !trigger) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (wl->conf.scan.split_scan_timeout)
- scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
-
- if (passive)
- scan_options |= WL1271_SCAN_OPT_PASSIVE;
-
- cmd->params.role_id = wlvif->role_id;
-
- if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
- ret = -EINVAL;
- goto out;
- }
-
- cmd->params.scan_options = cpu_to_le16(scan_options);
-
- cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
- cmd->channels,
- band, passive);
- if (cmd->params.n_ch == 0) {
- ret = WL1271_NOTHING_TO_SCAN;
- goto out;
- }
-
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
- cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
- cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
-
- if (band == IEEE80211_BAND_2GHZ)
- cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
- else
- cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
-
- if (wl->scan.ssid_len && wl->scan.ssid) {
- cmd->params.ssid_len = wl->scan.ssid_len;
- memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
- }
-
- memcpy(cmd->addr, vif->addr, ETH_ALEN);
-
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- cmd->params.role_id, band,
- wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie,
- wl->scan.req->ie_len, false);
- if (ret < 0) {
- wl1271_error("PROBE request template failed");
- goto out;
- }
-
- trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
- ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger), 0);
- if (ret < 0) {
- wl1271_error("trigger scan to failed for hw scan");
- goto out;
- }
-
- wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+ int *count = (int *)data;
- ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("SCAN failed");
- goto out;
- }
-
-out:
- kfree(cmd);
- kfree(trigger);
- return ret;
+ if (!vif->bss_conf.idle)
+ (*count)++;
}
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
+static int wlcore_count_started_vifs(struct wl1271 *wl)
{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- int ret = 0;
- enum ieee80211_band band;
- u32 rate, mask;
-
- switch (wl->scan.state) {
- case WL1271_SCAN_STATE_IDLE:
- break;
-
- case WL1271_SCAN_STATE_2GHZ_ACTIVE:
- band = IEEE80211_BAND_2GHZ;
- mask = wlvif->bitrate_masks[band];
- if (wl->scan.req->no_cck) {
- mask &= ~CONF_TX_CCK_RATES;
- if (!mask)
- mask = CONF_TX_RATE_MASK_BASIC_P2P;
- }
- rate = wl1271_tx_min_rate_get(wl, mask);
- ret = wl1271_scan_send(wl, vif, band, false, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_2GHZ_PASSIVE:
- band = IEEE80211_BAND_2GHZ;
- mask = wlvif->bitrate_masks[band];
- if (wl->scan.req->no_cck) {
- mask &= ~CONF_TX_CCK_RATES;
- if (!mask)
- mask = CONF_TX_RATE_MASK_BASIC_P2P;
- }
- rate = wl1271_tx_min_rate_get(wl, mask);
- ret = wl1271_scan_send(wl, vif, band, true, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- if (wl->enable_11a)
- wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
- else
- wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
+ int count = 0;
- case WL1271_SCAN_STATE_5GHZ_ACTIVE:
- band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, vif, band, false, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_5GHZ_PASSIVE:
- band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, vif, band, true, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_DONE:
- wl->scan.failed = false;
- cancel_delayed_work(&wl->scan_complete_work);
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(0));
- break;
-
- default:
- wl1271_error("invalid scan state");
- break;
- }
-
- if (ret < 0) {
- cancel_delayed_work(&wl->scan_complete_work);
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(0));
- }
-}
-
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
- const u8 *ssid, size_t ssid_len,
- struct cfg80211_scan_request *req)
-{
- /*
- * cfg80211 should guarantee that we don't get more channels
- * than what we have registered.
- */
- BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
-
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
- return -EBUSY;
-
- wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
-
- if (ssid_len && ssid) {
- wl->scan.ssid_len = ssid_len;
- memcpy(wl->scan.ssid, ssid, ssid_len);
- } else {
- wl->scan.ssid_len = 0;
- }
-
- wl->scan_vif = vif;
- wl->scan.req = req;
- memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
-
- /* we assume failure so that timeout scenarios are handled correctly */
- wl->scan.failed = true;
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
-
- wl1271_scan_stm(wl, vif);
-
- return 0;
-}
-
-int wl1271_scan_stop(struct wl1271 *wl)
-{
- struct wl1271_cmd_header *cmd = NULL;
- int ret = 0;
-
- if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
- return -EINVAL;
-
- wl1271_debug(DEBUG_CMD, "cmd scan stop");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
- sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("cmd stop_scan failed");
- goto out;
- }
-out:
- kfree(cmd);
- return ret;
+ ieee80211_iterate_active_interfaces_atomic(wl->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ wlcore_started_vifs_iter, &count);
+ return count;
}
static int
-wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
- struct cfg80211_sched_scan_request *req,
- struct conn_scan_ch_params *channels,
- u32 band, bool radar, bool passive,
- int start, int max_channels,
- u8 *n_pactive_ch)
+wlcore_scan_get_channels(struct wl1271 *wl,
+ struct ieee80211_channel *req_channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ struct conn_scan_ch_params *channels,
+ u32 band, bool radar, bool passive,
+ int start, int max_channels,
+ u8 *n_pactive_ch,
+ int scan_type)
{
- struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
u32 flags;
- bool force_passive = !req->n_ssids;
- u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
+ bool force_passive = !n_ssids;
+ u32 min_dwell_time_active, max_dwell_time_active;
u32 dwell_time_passive, dwell_time_dfs;
- if (band == IEEE80211_BAND_5GHZ)
- delta_per_probe = c->dwell_time_delta_per_probe_5;
- else
- delta_per_probe = c->dwell_time_delta_per_probe;
+ /* configure dwell times according to scan type */
+ if (scan_type == SCAN_TYPE_SEARCH) {
+ struct conf_scan_settings *c = &wl->conf.scan;
+ bool active_vif_exists = !!wlcore_count_started_vifs(wl);
+
+ min_dwell_time_active = active_vif_exists ?
+ c->min_dwell_time_active :
+ c->min_dwell_time_active_long;
+ max_dwell_time_active = active_vif_exists ?
+ c->max_dwell_time_active :
+ c->max_dwell_time_active_long;
+ dwell_time_passive = c->dwell_time_passive;
+ dwell_time_dfs = c->dwell_time_dfs;
+ } else {
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ u32 delta_per_probe;
- min_dwell_time_active = c->base_dwell_time +
- req->n_ssids * c->num_probe_reqs * delta_per_probe;
+ if (band == IEEE80211_BAND_5GHZ)
+ delta_per_probe = c->dwell_time_delta_per_probe_5;
+ else
+ delta_per_probe = c->dwell_time_delta_per_probe;
- max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
+ min_dwell_time_active = c->base_dwell_time +
+ n_ssids * c->num_probe_reqs * delta_per_probe;
+ max_dwell_time_active = min_dwell_time_active +
+ c->max_dwell_time_delta;
+ dwell_time_passive = c->dwell_time_passive;
+ dwell_time_dfs = c->dwell_time_dfs;
+ }
min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
- dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
- dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
+ dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000);
+ dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000);
for (i = 0, j = start;
- i < req->n_channels && j < max_channels;
+ i < n_channels && j < max_channels;
i++) {
- flags = req->channels[i]->flags;
+ flags = req_channels[i]->flags;
if (force_passive)
flags |= IEEE80211_CHAN_PASSIVE_SCAN;
- if ((req->channels[i]->band == band) &&
+ if ((req_channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req->channels[i]->band,
- req->channels[i]->center_freq);
+ req_channels[i]->band,
+ req_channels[i]->center_freq);
wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req->channels[i]->hw_value,
- req->channels[i]->flags);
+ req_channels[i]->hw_value,
+ req_channels[i]->flags);
wl1271_debug(DEBUG_SCAN, "max_power %d",
- req->channels[i]->max_power);
+ req_channels[i]->max_power);
wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
min_dwell_time_active,
max_dwell_time_active);
@@ -473,10 +201,11 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].max_duration =
cpu_to_le16(max_dwell_time_active);
- channels[j].tx_power_att = req->channels[i]->max_power;
- channels[j].channel = req->channels[i]->hw_value;
+ channels[j].tx_power_att = req_channels[i]->max_power;
+ channels[j].channel = req_channels[i]->hw_value;
- if ((band == IEEE80211_BAND_2GHZ) &&
+ if (n_pactive_ch &&
+ (band == IEEE80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
(flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
@@ -500,51 +229,80 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
return j - start;
}
-static bool
-wl1271_scan_sched_scan_channels(struct wl1271 *wl,
- struct cfg80211_sched_scan_request *req,
- struct wl1271_cmd_sched_scan_config *cfg)
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+ struct wlcore_scan_channels *cfg,
+ struct ieee80211_channel *channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ int scan_type)
{
u8 n_pactive_ch = 0;
cfg->passive[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
- IEEE80211_BAND_2GHZ,
- false, true, 0,
- MAX_CHANNELS_2GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_2,
+ IEEE80211_BAND_2GHZ,
+ false, true, 0,
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch,
+ scan_type);
cfg->active[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
- IEEE80211_BAND_2GHZ,
- false, false,
- cfg->passive[0],
- MAX_CHANNELS_2GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_2,
+ IEEE80211_BAND_2GHZ,
+ false, false,
+ cfg->passive[0],
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch,
+ scan_type);
cfg->passive[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- false, true, 0,
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ false, true, 0,
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
cfg->dfs =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- true, true,
- cfg->passive[1],
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ true, true,
+ cfg->passive[1],
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
cfg->active[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- false, false,
- cfg->passive[1] + cfg->dfs,
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ false, false,
+ cfg->passive[1] + cfg->dfs,
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
+
/* 802.11j channels are not supported yet */
cfg->passive[2] = 0;
cfg->active[2] = 0;
- cfg->n_pactive_ch = n_pactive_ch;
+ cfg->passive_active = n_pactive_ch;
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
@@ -556,10 +314,48 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
cfg->passive[1] || cfg->active[1] || cfg->dfs ||
cfg->passive[2] || cfg->active[2];
}
+EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params);
+
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
+ struct cfg80211_scan_request *req)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+ /*
+ * cfg80211 should guarantee that we don't get more channels
+ * than what we have registered.
+ */
+ BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
+
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
+ return -EBUSY;
+
+ wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
+
+ if (ssid_len && ssid) {
+ wl->scan.ssid_len = ssid_len;
+ memcpy(wl->scan.ssid, ssid, ssid_len);
+ } else {
+ wl->scan.ssid_len = 0;
+ }
+
+ wl->scan_wlvif = wlvif;
+ wl->scan.req = req;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+
+ /* we assume failure so that timeout scenarios are handled correctly */
+ wl->scan.failed = true;
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
+ wl->ops->scan_start(wl, wlvif, req);
+
+ return 0;
+}
/* Returns the scan type to be used or a negative value on error */
-static int
-wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req)
{
@@ -662,160 +458,12 @@ out:
return ret;
return type;
}
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list);
-int wl1271_scan_sched_scan_config(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_sched_scan_ies *ies)
-{
- struct wl1271_cmd_sched_scan_config *cfg = NULL;
- struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
- int i, ret;
- bool force_passive = !req->n_ssids;
-
- wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
-
- cfg->role_id = wlvif->role_id;
- cfg->rssi_threshold = c->rssi_threshold;
- cfg->snr_threshold = c->snr_threshold;
- cfg->n_probe_reqs = c->num_probe_reqs;
- /* cycles set to 0 it means infinite (until manually stopped) */
- cfg->cycles = 0;
- /* report APs when at least 1 is found */
- cfg->report_after = 1;
- /* don't stop scanning automatically when something is found */
- cfg->terminate = 0;
- cfg->tag = WL1271_SCAN_DEFAULT_TAG;
- /* don't filter on BSS type */
- cfg->bss_type = SCAN_BSS_TYPE_ANY;
- /* currently NL80211 supports only a single interval */
- for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
- cfg->intervals[i] = cpu_to_le32(req->interval);
-
- cfg->ssid_len = 0;
- ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
- if (ret < 0)
- goto out;
-
- cfg->filter_type = ret;
-
- wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
-
- if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) {
- wl1271_error("scan channel list is empty");
- ret = -EINVAL;
- goto out;
- }
-
- if (!force_passive && cfg->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->role_id, band,
- req->ssids[0].ssid,
- req->ssids[0].ssid_len,
- ies->ie[band],
- ies->len[band], true);
- if (ret < 0) {
- wl1271_error("2.4GHz PROBE request template failed");
- goto out;
- }
- }
-
- if (!force_passive && cfg->active[1]) {
- u8 band = IEEE80211_BAND_5GHZ;
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->role_id, band,
- req->ssids[0].ssid,
- req->ssids[0].ssid_len,
- ies->ie[band],
- ies->len[band], true);
- if (ret < 0) {
- wl1271_error("5GHz PROBE request template failed");
- goto out;
- }
- }
-
- wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
-
- ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
- sizeof(*cfg), 0);
- if (ret < 0) {
- wl1271_error("SCAN configuration failed");
- goto out;
- }
-out:
- kfree(cfg);
- return ret;
-}
-
-int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
-{
- struct wl1271_cmd_sched_scan_start *start;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
-
- if (wlvif->bss_type != BSS_TYPE_STA_BSS)
- return -EOPNOTSUPP;
-
- if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- return -EBUSY;
-
- start = kzalloc(sizeof(*start), GFP_KERNEL);
- if (!start)
- return -ENOMEM;
-
- start->role_id = wlvif->role_id;
- start->tag = WL1271_SCAN_DEFAULT_TAG;
-
- ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
- sizeof(*start), 0);
- if (ret < 0) {
- wl1271_error("failed to send scan start command");
- goto out_free;
- }
-
-out_free:
- kfree(start);
- return ret;
-}
-
-void wl1271_scan_sched_scan_results(struct wl1271 *wl)
+void wlcore_scan_sched_scan_results(struct wl1271 *wl)
{
wl1271_debug(DEBUG_SCAN, "got periodic scan results");
ieee80211_sched_scan_results(wl->hw);
}
-
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
-{
- struct wl1271_cmd_sched_scan_stop *stop;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
-
- /* FIXME: what to do if alloc'ing to stop fails? */
- stop = kzalloc(sizeof(*stop), GFP_KERNEL);
- if (!stop) {
- wl1271_error("failed to alloc memory to send sched scan stop");
- return;
- }
-
- stop->role_id = wlvif->role_id;
- stop->tag = WL1271_SCAN_DEFAULT_TAG;
-
- ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
- sizeof(*stop), 0);
- if (ret < 0) {
- wl1271_error("failed to send sched scan stop command");
- goto out_free;
- }
-
-out_free:
- kfree(stop);
-}
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results);
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 29f3c8d6b046..a6ab24b5c0f9 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -26,22 +26,20 @@
#include "wlcore.h"
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
-int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_results(struct wl1271 *wl);
+void wlcore_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_MAX_CHANNELS 24
#define WL1271_SCAN_DEFAULT_TAG 1
@@ -66,56 +64,6 @@ enum {
WL1271_SCAN_STATE_DONE
};
-struct basic_scan_params {
- /* Scan option flags (WL1271_SCAN_OPT_*) */
- __le16 scan_options;
- u8 role_id;
- /* Number of scan channels in the list (maximum 30) */
- u8 n_ch;
- /* This field indicates the number of probe requests to send
- per channel for an active scan */
- u8 n_probe_reqs;
- u8 tid_trigger;
- u8 ssid_len;
- u8 use_ssid_list;
-
- /* Rate bit field for sending the probes */
- __le32 tx_rate;
-
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- /* Band to scan */
- u8 band;
-
- u8 scan_tag;
- u8 padding2[2];
-} __packed;
-
-struct basic_scan_channel_params {
- /* Duration in TU to wait for frames on a channel for active scan */
- __le32 min_duration;
- __le32 max_duration;
- __le32 bssid_lsb;
- __le16 bssid_msb;
- u8 early_termination;
- u8 tx_power_att;
- u8 channel;
- /* FW internal use only! */
- u8 dfs_candidate;
- u8 activity_detected;
- u8 pad;
-} __packed;
-
-struct wl1271_cmd_scan {
- struct wl1271_cmd_header header;
-
- struct basic_scan_params params;
- struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
-
- /* src mac address */
- u8 addr[ETH_ALEN];
- u8 padding[2];
-} __packed;
-
struct wl1271_cmd_trigger_scan_to {
struct wl1271_cmd_header header;
@@ -123,9 +71,17 @@ struct wl1271_cmd_trigger_scan_to {
} __packed;
#define MAX_CHANNELS_2GHZ 14
-#define MAX_CHANNELS_5GHZ 23
#define MAX_CHANNELS_4GHZ 4
+/*
+ * This max value here is used only for the struct definition of
+ * wlcore_scan_channels. This struct is used by both 12xx
+ * and 18xx (which have different max 5ghz channels value).
+ * In order to make sure this is large enough, just use the
+ * max possible 5ghz channels.
+ */
+#define MAX_CHANNELS_5GHZ 42
+
#define SCAN_MAX_CYCLE_INTERVALS 16
#define SCAN_MAX_BANDS 3
@@ -160,43 +116,6 @@ struct conn_scan_ch_params {
u8 padding[3];
} __packed;
-struct wl1271_cmd_sched_scan_config {
- struct wl1271_cmd_header header;
-
- __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
-
- s8 rssi_threshold; /* for filtering (in dBm) */
- s8 snr_threshold; /* for filtering (in dB) */
-
- u8 cycles; /* maximum number of scan cycles */
- u8 report_after; /* report when this number of results are received */
- u8 terminate; /* stop scanning after reporting */
-
- u8 tag;
- u8 bss_type; /* for filtering */
- u8 filter_type;
-
- u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
- u8 ssid[IEEE80211_MAX_SSID_LEN];
-
- u8 n_probe_reqs; /* Number of probes requests per channel */
-
- u8 passive[SCAN_MAX_BANDS];
- u8 active[SCAN_MAX_BANDS];
-
- u8 dfs;
-
- u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
- channels in BG band */
- u8 role_id;
- u8 padding[1];
-
- struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
- struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
- struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
-} __packed;
-
-
#define SCHED_SCAN_MAX_SSIDS 16
enum {
@@ -220,21 +139,34 @@ struct wl1271_cmd_sched_scan_ssid_list {
u8 padding[2];
} __packed;
-struct wl1271_cmd_sched_scan_start {
- struct wl1271_cmd_header header;
+struct wlcore_scan_channels {
+ u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+ u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */
+ u8 dfs; /* number of dfs channels in 5ghz */
+ u8 passive_active; /* number of passive before active channels 2.4ghz */
- u8 tag;
- u8 role_id;
- u8 padding[2];
-} __packed;
-
-struct wl1271_cmd_sched_scan_stop {
- struct wl1271_cmd_header header;
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+};
- u8 tag;
- u8 role_id;
- u8 padding[2];
-} __packed;
+enum {
+ SCAN_TYPE_SEARCH = 0,
+ SCAN_TYPE_PERIODIC = 1,
+ SCAN_TYPE_TRACKING = 2,
+};
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+ struct wlcore_scan_channels *cfg,
+ struct ieee80211_channel *channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ int scan_type);
+
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req);
#endif /* __WL1271_SCAN_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 646f703ae739..29ef2492951f 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wl12xx_platform_data *wlan_data;
+ struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -228,10 +228,16 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
+ pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ goto out;
+
+ pdev_data->if_ops = &sdio_ops;
+
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&func->dev, "can't allocate glue\n");
- goto out;
+ goto out_free_pdev_data;
}
glue->dev = &func->dev;
@@ -242,9 +248,9 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
+ pdev_data->pdata = wl12xx_get_platform_data();
+ if (IS_ERR(pdev_data->pdata)) {
+ ret = PTR_ERR(pdev_data->pdata);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
@@ -254,9 +260,7 @@ static int wl1271_probe(struct sdio_func *func,
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- wlan_data->pwr_in_suspend = true;
-
- wlan_data->ops = &sdio_ops;
+ pdev_data->pdata->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -274,7 +278,7 @@ static int wl1271_probe(struct sdio_func *func,
else
chip_family = "wl12xx";
- glue->core = platform_device_alloc(chip_family, -1);
+ glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
@@ -285,7 +289,7 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = wlan_data->irq;
+ res[0].start = pdev_data->pdata->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
@@ -295,8 +299,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, wlan_data,
- sizeof(*wlan_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -315,6 +319,9 @@ out_dev_put:
out_free_glue:
kfree(glue);
+out_free_pdev_data:
+ kfree(pdev_data);
+
out:
return ret;
}
@@ -326,8 +333,7 @@ static void wl1271_remove(struct sdio_func *func)
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- platform_device_del(glue->core);
- platform_device_put(glue->core);
+ platform_device_unregister(glue->core);
kfree(glue);
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f06f4770ce02..e26447832683 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -270,7 +270,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
u32 *cmd;
@@ -327,22 +327,27 @@ static struct wl1271_if_operations spi_ops = {
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wl12xx_platform_data *pdata;
+ struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret = -ENOMEM;
- pdata = spi->dev.platform_data;
- if (!pdata) {
+ pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ goto out;
+
+ pdev_data->pdata = spi->dev.platform_data;
+ if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_pdev_data;
}
- pdata->ops = &spi_ops;
+ pdev_data->if_ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
- goto out;
+ goto out_free_pdev_data;
}
glue->dev = &spi->dev;
@@ -359,7 +364,7 @@ static int wl1271_probe(struct spi_device *spi)
goto out_free_glue;
}
- glue->core = platform_device_alloc("wl12xx", -1);
+ glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
ret = -ENOMEM;
@@ -380,7 +385,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -399,6 +405,10 @@ out_dev_put:
out_free_glue:
kfree(glue);
+
+out_free_pdev_data:
+ kfree(pdev_data);
+
out:
return ret;
}
@@ -407,8 +417,7 @@ static int wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- platform_device_del(glue->core);
- platform_device_put(glue->core);
+ platform_device_unregister(glue->core);
kfree(glue);
return 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a90d3cd09408..ece392c54d9c 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -104,7 +104,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid)
{
- bool fw_ps, single_sta;
+ bool fw_ps, single_link;
u8 tx_pkts;
if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
@@ -112,15 +112,15 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
- single_sta = (wl->active_sta_count == 1);
+ single_link = (wl->active_link_count == 1);
/*
* if in FW PS and there is enough data in FW we can put the link
* into high-level PS and clean out its TX queues.
- * Make an exception if this is the only connected station. In this
- * case FW-memory congestion is not a problem.
+ * Make an exception if this is the only connected link. In this
+ * case FW-memory congestion is less of a problem.
*/
- if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+ if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -155,21 +155,18 @@ static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
- return wl->system_hlid;
+ struct ieee80211_tx_info *control;
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
- if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
- test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
- !ieee80211_is_auth(hdr->frame_control) &&
- !ieee80211_is_assoc_req(hdr->frame_control))
- return wlvif->sta.hlid;
- else
+ control = IEEE80211_SKB_CB(skb);
+ if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ wl1271_debug(DEBUG_TX, "tx offchannel");
return wlvif->dev_hlid;
+ }
+
+ return wlvif->sta.hlid;
}
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
@@ -224,9 +221,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
- wlvif->bss_type == BSS_TYPE_AP_BSS &&
- test_bit(hlid, wlvif->ap.sta_hlid_map))
+ if (test_bit(hlid, wl->links_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -293,9 +288,14 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
} else if (wlvif) {
+ u8 session_id = wl->session_ids[hlid];
+
+ if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
+ (wlvif->bss_type == BSS_TYPE_AP_BSS))
+ session_id = 0;
+
/* configure the tx attributes */
- tx_attr = wlvif->session_counter <<
- TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
@@ -452,20 +452,22 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
int i;
+ struct wl12xx_vif *wlvif;
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (wlcore_is_queue_stopped_by_reason(wl, i,
- WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
- wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
- /* firmware buffer has space, restart queues */
- wlcore_wake_queue(wl, i,
- WLCORE_QUEUE_STOP_REASON_WATERMARK);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
+ wlvif->tx_queue_count[i] <=
+ WL1271_TX_QUEUE_LOW_WATERMARK)
+ /* firmware buffer has space, restart queues */
+ wlcore_wake_queue(wl, wlvif, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
}
}
-static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
- struct sk_buff_head *queues)
+static int wlcore_select_ac(struct wl1271 *wl)
{
int i, q = -1, ac;
u32 min_pkts = 0xffffffff;
@@ -479,45 +481,60 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
*/
for (i = 0; i < NUM_TX_QUEUES; i++) {
ac = wl1271_tx_get_queue(i);
- if (!skb_queue_empty(&queues[ac]) &&
- (wl->tx_allocated_pkts[ac] < min_pkts)) {
+ if (wl->tx_queue_count[ac] &&
+ wl->tx_allocated_pkts[ac] < min_pkts) {
q = ac;
min_pkts = wl->tx_allocated_pkts[q];
}
}
- if (q == -1)
- return NULL;
-
- return &queues[q];
+ return q;
}
-static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
- struct wl1271_link *lnk)
+static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk, u8 q)
{
struct sk_buff *skb;
unsigned long flags;
- struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, lnk->tx_queue);
- if (!queue)
- return NULL;
-
- skb = skb_dequeue(queue);
+ skb = skb_dequeue(&lnk->tx_queue[q]);
if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
+ if (lnk->wlvif) {
+ WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
+ lnk->wlvif->tx_queue_count[q]--;
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return skb;
}
-static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- u8 *hlid)
+static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
+ u8 hlid, u8 ac,
+ u8 *low_prio_hlid)
+{
+ struct wl1271_link *lnk = &wl->links[hlid];
+
+ if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
+ if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
+ !skb_queue_empty(&lnk->tx_queue[ac]) &&
+ wlcore_hw_lnk_low_prio(wl, hlid, lnk))
+ /* we found the first non-empty low priority queue */
+ *low_prio_hlid = hlid;
+
+ return NULL;
+ }
+
+ return wlcore_lnk_dequeue(wl, lnk, ac);
+}
+
+static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 ac, u8 *hlid,
+ u8 *low_prio_hlid)
{
struct sk_buff *skb = NULL;
int i, h, start_hlid;
@@ -533,7 +550,8 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
if (!test_bit(h, wlvif->links_map))
continue;
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
+ low_prio_hlid);
if (!skb)
continue;
@@ -553,42 +571,74 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
unsigned long flags;
struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
+ int ac;
+ u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
+
+ ac = wlcore_select_ac(wl);
+ if (ac < 0)
+ goto out;
/* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
- if (skb) {
- wl->last_wlvif = wlvif;
- break;
- }
+ if (!wlvif->tx_queue_count[ac])
+ continue;
+
+ skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+ &low_prio_hlid);
+ if (!skb)
+ continue;
+
+ wl->last_wlvif = wlvif;
+ break;
}
}
/* dequeue from the system HLID before the restarting wlvif list */
if (!skb) {
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
- *hlid = wl->system_hlid;
+ skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
+ ac, &low_prio_hlid);
+ if (skb) {
+ *hlid = wl->system_hlid;
+ wl->last_wlvif = NULL;
+ }
}
- /* do a new pass over the wlvif list */
+ /* Do a new pass over the wlvif list. But no need to continue
+ * after last_wlvif. The previous pass should have found it. */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
+ if (!wlvif->tx_queue_count[ac])
+ goto next;
+
+ skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+ &low_prio_hlid);
if (skb) {
wl->last_wlvif = wlvif;
break;
}
- /*
- * No need to continue after last_wlvif. The previous
- * pass should have found it.
- */
+next:
if (wlvif == wl->last_wlvif)
break;
}
}
+ /* no high priority skbs found - but maybe a low priority one? */
+ if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
+ struct wl1271_link *lnk = &wl->links[low_prio_hlid];
+ skb = wlcore_lnk_dequeue(wl, lnk, ac);
+
+ WARN_ON(!skb); /* we checked this before */
+ *hlid = low_prio_hlid;
+
+ /* ensure proper round robin in the vif/link levels */
+ wl->last_wlvif = lnk->wlvif;
+ if (lnk->wlvif)
+ lnk->wlvif->last_tx_hlid = low_prio_hlid;
+
+ }
+
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
@@ -602,6 +652,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
+out:
return skb;
}
@@ -623,6 +674,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count[q]++;
+ if (wlvif)
+ wlvif->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -699,7 +752,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
bool has_data = false;
wlvif = NULL;
- if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ if (!wl12xx_is_dummy_packet(wl, skb))
wlvif = wl12xx_vif_to_data(info->control.vif);
else
hlid = wl->system_hlid;
@@ -972,10 +1025,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
unsigned long flags;
struct ieee80211_tx_info *info;
int total[NUM_TX_QUEUES];
+ struct wl1271_link *lnk = &wl->links[hlid];
for (i = 0; i < NUM_TX_QUEUES; i++) {
total[i] = 0;
- while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
if (!wl12xx_is_dummy_packet(wl, skb)) {
@@ -990,8 +1044,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
spin_lock_irqsave(&wl->wl_lock, flags);
- for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= total[i];
+ if (lnk->wlvif)
+ lnk->wlvif->tx_queue_count[i] -= total[i];
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
@@ -1004,16 +1061,18 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* TX failure */
for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+ /* this calls wl12xx_free_link */
wl1271_free_sta(wl, wlvif, i);
- else
- wlvif->sta.ba_rx_bitmap = 0;
-
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
+ } else {
+ u8 hlid = i;
+ wl12xx_free_link(wl, wlvif, &hlid);
+ }
}
wlvif->last_tx_hlid = 0;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlvif->tx_queue_count[i] = 0;
}
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl)
@@ -1023,7 +1082,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
struct ieee80211_tx_info *info;
/* only reset the queues if something bad happened */
- if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+ if (wl1271_tx_total_queue_count(wl) != 0) {
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
@@ -1135,45 +1194,48 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
return BIT(__ffs(rate_set));
}
+EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason)
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue, enum wlcore_queue_stop_reason reason)
{
- bool stopped = !!wl->queue_stop_reasons[queue];
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+ bool stopped = !!wl->queue_stop_reasons[hwq];
/* queue should not be stopped for this reason */
- WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
+ WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
if (stopped)
return;
- ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+ ieee80211_stop_queue(wl->hw, hwq);
}
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
- wlcore_stop_queue_locked(wl, queue, reason);
+ wlcore_stop_queue_locked(wl, wlvif, queue, reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue should not be clear for this reason */
- WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
+ WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
- if (wl->queue_stop_reasons[queue])
+ if (wl->queue_stop_reasons[hwq])
goto out;
- ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+ ieee80211_wake_queue(wl->hw, hwq);
out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -1183,48 +1245,74 @@ void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
+ unsigned long flags;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wlcore_stop_queue(wl, i, reason);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* mark all possible queues as stopped */
+ for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+ WARN_ON_ONCE(test_and_set_bit(reason,
+ &wl->queue_stop_reasons[i]));
+
+ /* use the global version to make sure all vifs in mac80211 we don't
+ * know are stopped.
+ */
+ ieee80211_stop_queues(wl->hw);
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-EXPORT_SYMBOL_GPL(wlcore_stop_queues);
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
+ unsigned long flags;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wlcore_wake_queue(wl, i, reason);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* mark all possible queues as awake */
+ for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+ WARN_ON_ONCE(!test_and_clear_bit(reason,
+ &wl->queue_stop_reasons[i]));
+
+ /* use the global version to make sure all vifs in mac80211 we don't
+ * know are woken up.
+ */
+ ieee80211_wake_queues(wl->hw);
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-EXPORT_SYMBOL_GPL(wlcore_wake_queues);
-void wlcore_reset_stopped_queues(struct wl1271 *wl)
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
+ enum wlcore_queue_stop_reason reason)
{
- int i;
unsigned long flags;
+ bool stopped;
spin_lock_irqsave(&wl->wl_lock, flags);
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (!wl->queue_stop_reasons[i])
- continue;
-
- wl->queue_stop_reasons[i] = 0;
- ieee80211_wake_queue(wl->hw,
- wl1271_tx_get_mac80211_queue(i));
- }
-
+ stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
+ reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return stopped;
}
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason)
+bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
+ enum wlcore_queue_stop_reason reason)
{
- return test_bit(reason, &wl->queue_stop_reasons[queue]);
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+ WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+ return test_bit(reason, &wl->queue_stop_reasons[hwq]);
}
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue)
{
- return !!wl->queue_stop_reasons[queue];
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+ WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+ return !!wl->queue_stop_reasons[hwq];
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 349520d8b724..55aa4acf9105 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -207,19 +207,22 @@ static inline int wl1271_tx_get_queue(int queue)
}
}
-static inline int wl1271_tx_get_mac80211_queue(int queue)
+static inline
+int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue)
{
+ int mac_queue = wlvif->hw_queue_base;
+
switch (queue) {
case CONF_TX_AC_VO:
- return 0;
+ return mac_queue + 0;
case CONF_TX_AC_VI:
- return 1;
+ return mac_queue + 1;
case CONF_TX_AC_BE:
- return 2;
+ return mac_queue + 2;
case CONF_TX_AC_BK:
- return 3;
+ return mac_queue + 3;
default:
- return 2;
+ return mac_queue + 2;
}
}
@@ -252,20 +255,26 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length);
void wl1271_free_tx_id(struct wl1271 *wl, int id);
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason);
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue, enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
-void wlcore_reset_stopped_queues(struct wl1271 *wl);
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
+bool
+wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 queue,
+ enum wlcore_queue_stop_reason reason);
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index c3884937c007..af9fecaefc30 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -37,6 +37,9 @@
*/
#define WLCORE_NUM_MAC_ADDRESSES 3
+/* wl12xx/wl18xx maximum transmission power (in dBm) */
+#define WLCORE_MAX_TXPWR 25
+
/* forward declaration */
struct wl1271_tx_hw_descr;
enum wl_rx_buf_align;
@@ -51,6 +54,9 @@ struct wlcore_ops {
int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len);
int (*ack_event)(struct wl1271 *wl);
+ int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+ int (*process_mailbox_events)(struct wl1271 *wl);
u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
void (*set_tx_desc_blocks)(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
@@ -82,12 +88,32 @@ struct wlcore_ops {
int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
int (*handle_static_data)(struct wl1271 *wl,
struct wl1271_static_data *static_data);
+ int (*scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+ int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+ void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
+ int (*channel_switch)(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
+ void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u32 changed);
+ int (*set_peer_cap)(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid);
+ bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk);
+ bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk);
};
enum wlcore_partitions {
@@ -157,7 +183,6 @@ struct wl1271 {
struct wl1271_if_operations *if_ops;
- void (*set_power)(bool enable);
int irq;
spinlock_t wl_lock;
@@ -202,6 +227,8 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
+ u8 session_ids[WL12XX_MAX_LINKS];
+
struct list_head wlvif_list;
u8 sta_count;
@@ -227,7 +254,8 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long queue_stop_reasons[NUM_TX_QUEUES];
+ unsigned long queue_stop_reasons[
+ NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES];
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@@ -269,24 +297,30 @@ struct wl1271 {
struct work_struct recovery_work;
bool watchdog_recovery;
+ /* Reg domain last configuration */
+ u32 reg_ch_conf_last[2];
+ /* Reg domain pending configuration */
+ u32 reg_ch_conf_pending[2];
+
/* Pointer that holds DMA-friendly block for the mailbox */
- struct event_mailbox *mbox;
+ void *mbox;
/* The mbox event mask */
u32 event_mask;
/* Mailbox pointers */
+ u32 mbox_size;
u32 mbox_ptr[2];
/* Are we currently scanning */
- struct ieee80211_vif *scan_vif;
+ struct wl12xx_vif *scan_wlvif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
- /* Connection loss work */
- struct delayed_work connection_loss_work;
+ struct ieee80211_vif *roc_vif;
+ struct delayed_work roc_complete_work;
- bool sched_scanning;
+ struct wl12xx_vif *sched_vif;
/* The current band */
enum ieee80211_band band;
@@ -299,7 +333,7 @@ struct wl1271 {
struct wl1271_stats stats;
- __le32 buffer_32;
+ __le32 *buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
@@ -314,6 +348,8 @@ struct wl1271 {
bool enable_11a;
+ int recovery_count;
+
/* Most recently reported noise in dBm */
s8 noise;
@@ -333,6 +369,12 @@ struct wl1271 {
*/
struct wl1271_link links[WL12XX_MAX_LINKS];
+ /* number of currently active links */
+ int active_link_count;
+
+ /* Fast/slow links bitmap according to FW */
+ u32 fw_fast_lnk_map;
+
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -367,6 +409,12 @@ struct wl1271 {
const char *sr_fw_name;
const char *mr_fw_name;
+ u8 scan_templ_id_2_4;
+ u8 scan_templ_id_5;
+ u8 sched_scan_templ_id_2_4;
+ u8 sched_scan_templ_id_5;
+ u8 max_channels_5;
+
/* per-chip-family private structure */
void *priv;
@@ -408,20 +456,28 @@ struct wl1271 {
/* the number of allocated MAC addresses in this chip */
int num_mac_addr;
- /* the minimum FW version required for the driver to work */
- unsigned int min_fw_ver[NUM_FW_VER];
+ /* minimum FW version required for the driver to work in single-role */
+ unsigned int min_sr_fw_ver[NUM_FW_VER];
+
+ /* minimum FW version required for the driver to work in multi-role */
+ unsigned int min_mr_fw_ver[NUM_FW_VER];
struct completion nvs_loading_complete;
+
+ /* number of concurrent channels the HW supports */
+ u32 num_channels;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
int wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+ u32 mbox_size);
int wlcore_free_hw(struct wl1271 *wl);
int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
+void wlcore_regdomain_config(struct wl1271 *wl);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
@@ -430,16 +486,27 @@ wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
}
+/* Tell wlcore not to care about this element when checking the version */
+#define WLCORE_FW_VER_IGNORE -1
+
static inline void
wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
- unsigned int iftype, unsigned int major,
- unsigned int subtype, unsigned int minor)
+ unsigned int iftype_sr, unsigned int major_sr,
+ unsigned int subtype_sr, unsigned int minor_sr,
+ unsigned int iftype_mr, unsigned int major_mr,
+ unsigned int subtype_mr, unsigned int minor_mr)
{
- wl->min_fw_ver[FW_VER_CHIP] = chip;
- wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
- wl->min_fw_ver[FW_VER_MAJOR] = major;
- wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
- wl->min_fw_ver[FW_VER_MINOR] = minor;
+ wl->min_sr_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_sr_fw_ver[FW_VER_IF_TYPE] = iftype_sr;
+ wl->min_sr_fw_ver[FW_VER_MAJOR] = major_sr;
+ wl->min_sr_fw_ver[FW_VER_SUBTYPE] = subtype_sr;
+ wl->min_sr_fw_ver[FW_VER_MINOR] = minor_sr;
+
+ wl->min_mr_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_mr_fw_ver[FW_VER_IF_TYPE] = iftype_mr;
+ wl->min_mr_fw_ver[FW_VER_MAJOR] = major_mr;
+ wl->min_mr_fw_ver[FW_VER_SUBTYPE] = subtype_mr;
+ wl->min_mr_fw_ver[FW_VER_MINOR] = minor_mr;
}
/* Firmware image load chunk size */
@@ -450,6 +517,9 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
+/* the first start_role(sta) sometimes doesn't work on wl12xx */
+#define WLCORE_QUIRK_START_STA_FAILS BIT(1)
+
/* wl127x and SPI don't support SDIO block size alignment */
#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
@@ -462,9 +532,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Older firmwares use an old NVS format */
#define WLCORE_QUIRK_LEGACY_NVS BIT(5)
-/* Some firmwares may not support ELP */
-#define WLCORE_QUIRK_NO_ELP BIT(6)
-
/* pad only the last frame in the aggregate buffer */
#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
@@ -477,11 +544,11 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* separate probe response templates for one-shot and sched scans */
#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10)
-/* TODO: move to the lower drivers when all usages are abstracted */
-#define CHIP_ID_1271_PG10 (0x4030101)
-#define CHIP_ID_1271_PG20 (0x4030111)
-#define CHIP_ID_1283_PG10 (0x05030101)
-#define CHIP_ID_1283_PG20 (0x05030111)
+/* Firmware requires reg domain configuration for active calibration */
+#define WLCORE_QUIRK_REGDOMAIN_CONF BIT(11)
+
+/* The FW only support a zero session id for AP */
+#define WLCORE_QUIRK_AP_ZERO_SESSION_ID BIT(12)
/* TODO: move all these common registers and values elsewhere */
#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 6678d4b18611..508f5b0f8a70 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -109,22 +109,11 @@ enum {
NUM_FW_VER
};
-#define FW_VER_CHIP_WL127X 6
-#define FW_VER_CHIP_WL128X 7
-
-#define FW_VER_IF_TYPE_STA 1
-#define FW_VER_IF_TYPE_AP 2
-
-#define FW_VER_MINOR_1_SPARE_STA_MIN 58
-#define FW_VER_MINOR_1_SPARE_AP_MIN 47
-
-#define FW_VER_MINOR_FWLOG_STA_MIN 70
-
struct wl1271_chip {
u32 id;
- char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ char fw_ver_str[ETHTOOL_FWVERS_LEN];
unsigned int fw_ver[NUM_FW_VER];
- char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ char phy_fw_ver_str[ETHTOOL_FWVERS_LEN];
};
#define NUM_TX_QUEUES 4
@@ -141,7 +130,10 @@ struct wl_fw_packet_counters {
/* Cumulative counter of released Voice memory blocks */
u8 tx_voice_released_blks;
- u8 padding[3];
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
} __packed;
/* FW status registers */
@@ -214,6 +206,11 @@ struct wl1271_if_operations {
void (*set_block_size) (struct device *child, unsigned int blksz);
};
+struct wlcore_platdev_data {
+ struct wl12xx_platform_data *pdata;
+ struct wl1271_if_operations *if_ops;
+};
+
#define MAX_NUM_KEYS 14
#define MAX_KEY_SIZE 32
@@ -260,6 +257,8 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_IN_USE,
};
+struct wl12xx_vif;
+
struct wl1271_link {
/* AP-mode - TX queue per AC in link */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
@@ -272,6 +271,9 @@ struct wl1271_link {
/* bitmap of TIDs where RX BA sessions are active for this link */
u8 ba_bitmap;
+
+ /* The wlvif this link belongs to. Might be null for global links */
+ struct wl12xx_vif *wlvif;
};
#define WL1271_MAX_RX_FILTERS 5
@@ -315,6 +317,7 @@ struct wl12xx_rx_filter {
struct wl1271_station {
u8 hlid;
+ bool in_connection;
};
struct wl12xx_vif {
@@ -332,7 +335,6 @@ struct wl12xx_vif {
union {
struct {
u8 hlid;
- u8 ba_rx_bitmap;
u8 basic_rate_idx;
u8 ap_rate_idx;
@@ -341,6 +343,8 @@ struct wl12xx_vif {
u8 klv_template_id;
bool qos;
+ /* channel type we started the STA role with */
+ enum nl80211_channel_type role_chan_type;
} sta;
struct {
u8 global_hlid;
@@ -362,6 +366,9 @@ struct wl12xx_vif {
/* the hlid of the last transmitted skb */
int last_tx_hlid;
+ /* counters of packets per AC, across all links in the vif */
+ int tx_queue_count[NUM_TX_QUEUES];
+
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
@@ -396,9 +403,6 @@ struct wl12xx_vif {
/* Our association ID */
u16 aid;
- /* Session counter for the chipset */
- int session_counter;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -416,11 +420,28 @@ struct wl12xx_vif {
bool ba_support;
bool ba_allowed;
+ bool wmm_enabled;
+
/* Rx Streaming */
struct work_struct rx_streaming_enable_work;
struct work_struct rx_streaming_disable_work;
struct timer_list rx_streaming_timer;
+ struct delayed_work channel_switch_work;
+ struct delayed_work connection_loss_work;
+
+ /* number of in connection stations */
+ int inconn_count;
+
+ /*
+ * This vif's queues are mapped to mac80211 HW queues as:
+ * VO - hw_queue_base
+ * VI - hw_queue_base + 1
+ * BE - hw_queue_base + 2
+ * BK - hw_queue_base + 3
+ */
+ int hw_queue_base;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -443,6 +464,7 @@ struct wl12xx_vif {
static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
{
+ WARN_ON(!vif);
return (struct wl12xx_vif *)vif->drv_priv;
}
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 5f809695f71a..96c8e1de0879 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -1,6 +1,6 @@
config ZD1211RW
tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
- depends on USB && MAC80211 && EXPERIMENTAL
+ depends on USB && MAC80211
select FW_LOADER
---help---
This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ef2b171e3514..7ef0b4a181e1 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -155,7 +155,6 @@ static int upload_code(struct usb_device *udev,
*/
p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL);
if (!p) {
- dev_err(&udev->dev, "out of memory\n");
r = -ENOMEM;
goto error;
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3338c4..9d7f1723dd8f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
/* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b7d41f8c338a..d98414168485 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
static void xenvif_down(struct xenvif *vif)
{
disable_irq(vif->irq);
+ del_timer_sync(&vif->credit_timeout);
xen_netbk_deschedule_xenvif(vif);
xen_netbk_remove_xenvif(vif);
}
@@ -238,6 +239,8 @@ static const struct net_device_ops xenvif_netdev_ops = {
.ndo_stop = xenvif_close,
.ndo_change_mtu = xenvif_change_mtu,
.ndo_fix_features = xenvif_fix_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -343,23 +346,26 @@ err:
return err;
}
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
- if (netif_carrier_ok(dev)) {
- rtnl_lock();
- netif_carrier_off(dev); /* discard queued packets */
- if (netif_running(dev))
- xenvif_down(vif);
- rtnl_unlock();
- xenvif_put(vif);
- }
+
+ rtnl_lock();
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ rtnl_unlock();
+ xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+ if (netif_carrier_ok(vif->dev))
+ xenvif_carrier_off(vif);
atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
- del_timer_sync(&vif->credit_timeout);
-
if (vif->irq)
unbind_from_irqhandler(vif->irq, vif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f2d6b78d901d..cd49ba949636 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
atomic_dec(&netbk->netfront_count);
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- if (cons >= end)
+ if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
xenvif_put(vif);
}
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+ netdev_err(vif->dev, "fatal error; disabling device\n");
+ xenvif_carrier_off(vif);
+ xenvif_put(vif);
+}
+
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
@@ -901,29 +909,33 @@ static int netbk_count_requests(struct xenvif *vif,
do {
if (frags >= work_to_do) {
- netdev_dbg(vif->dev, "Need more frags\n");
- return -frags;
+ netdev_err(vif->dev, "Need more frags\n");
+ netbk_fatal_tx_err(vif);
+ return -ENODATA;
}
if (unlikely(frags >= MAX_SKB_FRAGS)) {
- netdev_dbg(vif->dev, "Too many frags\n");
- return -frags;
+ netdev_err(vif->dev, "Too many frags\n");
+ netbk_fatal_tx_err(vif);
+ return -E2BIG;
}
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
- netdev_dbg(vif->dev, "Frags galore\n");
- return -frags;
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ netbk_fatal_tx_err(vif);
+ return -EIO;
}
first->size -= txp->size;
frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- return -frags;
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
return frags;
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page)
- return NULL;
+ goto err;
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
}
return gop;
+err:
+ /* Unwind, freeing all pages and sending error responses. */
+ while (i-- > start) {
+ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ return NULL;
}
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
- struct xenvif *vif = pending_tx_info[pending_idx].vif;
- struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;
/* Check status of header. */
err = gop->status;
- if (unlikely(err)) {
- pending_ring_idx_t index;
- index = pending_index(netbk->pending_prod++);
- txp = &pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
- }
+ if (unlikely(err))
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
- pending_ring_idx_t index;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
continue;
}
/* Error on this fragment: respond to client with an error. */
- txp = &netbk->pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
/* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
}
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do {
if (unlikely(work_to_do-- <= 0)) {
- netdev_dbg(vif->dev, "Missing extra info\n");
+ netdev_err(vif->dev, "Missing extra info\n");
+ netbk_fatal_tx_err(vif);
return -EBADR;
}
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+ netdev_err(vif->dev, "GSO size must not be zero.\n");
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
+ /* This can sometimes happen because the test of
+ * list_empty(net_schedule_list) at the top of the
+ * loop is unlocked. Just go back and have another
+ * look.
+ */
if (!vif)
continue;
+ if (vif->tx.sring->req_prod - vif->tx.req_cons >
+ XEN_NETIF_TX_RING_SIZE) {
+ netdev_err(vif->dev,
+ "Impossible number of requests. "
+ "req_prod %d, req_cons %d, size %ld\n",
+ vif->tx.sring->req_prod, vif->tx.req_cons,
+ XEN_NETIF_TX_RING_SIZE);
+ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
- if (unlikely(work_to_do < 0)) {
- netbk_tx_err(vif, &txreq, idx);
+ if (unlikely(work_to_do < 0))
continue;
- }
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
- if (unlikely(ret < 0)) {
- netbk_tx_err(vif, &txreq, idx - ret);
+ if (unlikely(ret < 0))
continue;
- }
+
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_tx_err(vif, &txreq, idx);
+ netbk_fatal_tx_err(vif);
continue;
}
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
+ /* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
continue;
}
}
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
xen_netbk_tx_submit(netbk);
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status)
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
vif = pending_tx_info->vif;
- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+ make_tx_response(vif, &pending_tx_info->req, status);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c26e28b4bd9f..7ffa43bd7cf9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1015,29 +1015,10 @@ err:
i = xennet_fill_frags(np, skb, &tmpq);
/*
- * Truesize approximates the size of true data plus
- * any supervisor overheads. Adding hypervisor
- * overheads has been shown to significantly reduce
- * achievable bandwidth with the default receive
- * buffer size. It is therefore not wise to account
- * for it here.
- *
- * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
- * to RX_COPY_THRESHOLD + the supervisor
- * overheads. Here, we add the size of the data pulled
- * in xennet_fill_frags().
- *
- * We also adjust for any unused space in the main
- * data area by subtracting (RX_COPY_THRESHOLD -
- * len). This is especially important with drivers
- * which split incoming packets into header and data,
- * using only 66 bytes of the main data area (see the
- * e1000 driver for example.) On such systems,
- * without this last adjustement, our achievable
- * receive throughout using the standard receive
- * buffer size was cut by 25%(!!!).
- */
- skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
+ * Truesize is the actual allocation size, even if the
+ * allocation is only partially used.
+ */
+ skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ec857676c39f..e57034971ccc 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -5,19 +5,6 @@
menu "Near Field Communication (NFC) devices"
depends on NFC
-config PN544_HCI_NFC
- tristate "HCI PN544 NFC driver"
- depends on I2C && NFC_HCI && NFC_SHDLC
- select CRC_CCITT
- default n
- ---help---
- NXP PN544 i2c driver.
- This is a driver based on the SHDLC and HCI NFC kernel layers and
- will thus not work with NXP libnfc library.
-
- To compile this driver as a module, choose m here. The module will
- be called pn544_hci.
-
config NFC_PN533
tristate "NXP PN533 USB driver"
depends on USB
@@ -39,4 +26,7 @@ config NFC_WILINK
Say Y here to compile support for Texas Instrument's NFC WiLink driver
into the kernel or say M to compile it as module.
+source "drivers/nfc/pn544/Kconfig"
+source "drivers/nfc/microread/Kconfig"
+
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 36c359043f54..a189ada0926a 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,8 @@
# Makefile for nfc devices
#
-obj-$(CONFIG_PN544_HCI_NFC) += pn544/
+obj-$(CONFIG_NFC_PN544) += pn544/
+obj-$(CONFIG_NFC_MICROREAD) += microread/
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/microread/Kconfig b/drivers/nfc/microread/Kconfig
new file mode 100644
index 000000000000..572305be6e37
--- /dev/null
+++ b/drivers/nfc/microread/Kconfig
@@ -0,0 +1,35 @@
+config NFC_MICROREAD
+ tristate "Inside Secure microread NFC driver"
+ depends on NFC_HCI
+ select CRC_CCITT
+ default n
+ ---help---
+ This module contains the main code for Inside Secure microread
+ NFC chipsets. It implements the chipset HCI logic and hooks into
+ the NFC kernel APIs. Physical layers will register against it.
+
+ To compile this driver as a module, choose m here. The module will
+ be called microread.
+ Say N if unsure.
+
+config NFC_MICROREAD_I2C
+ tristate "NFC Microread i2c support"
+ depends on NFC_MICROREAD && I2C && NFC_SHDLC
+ ---help---
+ This module adds support for the i2c interface of adapters using
+ Inside microread chipsets. Select this if your platform is using
+ the i2c bus.
+
+ If you choose to build a module, it'll be called microread_i2c.
+ Say N if unsure.
+
+config NFC_MICROREAD_MEI
+ tristate "NFC Microread MEI support"
+ depends on NFC_MICROREAD && INTEL_MEI_BUS_NFC
+ ---help---
+ This module adds support for the mei interface of adapters using
+ Inside microread chipsets. Select this if your microread chipset
+ is handled by Intel's Management Engine Interface on your platform.
+
+ If you choose to build a module, it'll be called microread_mei.
+ Say N if unsure.
diff --git a/drivers/nfc/microread/Makefile b/drivers/nfc/microread/Makefile
new file mode 100644
index 000000000000..755c24cba253
--- /dev/null
+++ b/drivers/nfc/microread/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Microread HCI based NFC driver
+#
+
+microread_i2c-objs = i2c.o
+microread_mei-objs = mei.o
+
+obj-$(CONFIG_NFC_MICROREAD) += microread.o
+obj-$(CONFIG_NFC_MICROREAD_I2C) += microread_i2c.o
+obj-$(CONFIG_NFC_MICROREAD_MEI) += microread_mei.o
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
new file mode 100644
index 000000000000..101089495bf8
--- /dev/null
+++ b/drivers/nfc/microread/i2c.c
@@ -0,0 +1,340 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip - i2c layer
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+#define MICROREAD_I2C_DRIVER_NAME "microread"
+
+#define MICROREAD_I2C_FRAME_HEADROOM 1
+#define MICROREAD_I2C_FRAME_TAILROOM 1
+
+/* framing in HCI mode */
+#define MICROREAD_I2C_LLC_LEN 1
+#define MICROREAD_I2C_LLC_CRC 1
+#define MICROREAD_I2C_LLC_LEN_CRC (MICROREAD_I2C_LLC_LEN + \
+ MICROREAD_I2C_LLC_CRC)
+#define MICROREAD_I2C_LLC_MIN_SIZE (1 + MICROREAD_I2C_LLC_LEN_CRC)
+#define MICROREAD_I2C_LLC_MAX_PAYLOAD 29
+#define MICROREAD_I2C_LLC_MAX_SIZE (MICROREAD_I2C_LLC_LEN_CRC + 1 + \
+ MICROREAD_I2C_LLC_MAX_PAYLOAD)
+
+struct microread_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct nfc_hci_dev *hdev;
+
+ int irq;
+
+ int hard_fault; /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+#define I2C_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+static void microread_i2c_add_len_crc(struct sk_buff *skb)
+{
+ int i;
+ u8 crc = 0;
+ int len;
+
+ len = skb->len;
+ *skb_push(skb, 1) = len;
+
+ for (i = 0; i < skb->len; i++)
+ crc = crc ^ skb->data[i];
+
+ *skb_put(skb, 1) = crc;
+}
+
+static void microread_i2c_remove_len_crc(struct sk_buff *skb)
+{
+ skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM);
+ skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM);
+}
+
+static int check_crc(struct sk_buff *skb)
+{
+ int i;
+ u8 crc = 0;
+
+ for (i = 0; i < skb->len - 1; i++)
+ crc = crc ^ skb->data[i];
+
+ if (crc != skb->data[skb->len-1]) {
+ pr_err(MICROREAD_I2C_DRIVER_NAME
+ ": CRC error 0x%x != 0x%x\n",
+ crc, skb->data[skb->len-1]);
+
+ pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int microread_i2c_enable(void *phy_id)
+{
+ return 0;
+}
+
+static void microread_i2c_disable(void *phy_id)
+{
+ return;
+}
+
+static int microread_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ int r;
+ struct microread_i2c_phy *phy = phy_id;
+ struct i2c_client *client = phy->i2c_dev;
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ usleep_range(3000, 6000);
+
+ microread_i2c_add_len_crc(skb);
+
+ I2C_DUMP_SKB("i2c frame written", skb);
+
+ r = i2c_master_send(client, skb->data, skb->len);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+
+ if (r >= 0) {
+ if (r != skb->len)
+ r = -EREMOTEIO;
+ else
+ r = 0;
+ }
+
+ microread_i2c_remove_len_crc(skb);
+
+ return r;
+}
+
+
+static int microread_i2c_read(struct microread_i2c_phy *phy,
+ struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
+ struct i2c_client *client = phy->i2c_dev;
+
+ pr_debug("%s\n", __func__);
+
+ r = i2c_master_recv(client, &len, 1);
+ if (r != 1) {
+ dev_err(&client->dev, "cannot read len byte\n");
+ return -EREMOTEIO;
+ }
+
+ if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
+ (len > MICROREAD_I2C_LLC_MAX_SIZE)) {
+ dev_err(&client->dev, "invalid len byte\n");
+ pr_err("invalid len byte\n");
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ *skb = alloc_skb(1 + len, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto flush;
+ }
+
+ *skb_put(*skb, 1) = len;
+
+ r = i2c_master_recv(client, skb_put(*skb, len), len);
+ if (r != len) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ I2C_DUMP_SKB("cc frame read", *skb);
+
+ r = check_crc(*skb);
+ if (r != 0) {
+ kfree_skb(*skb);
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ skb_pull(*skb, 1);
+ skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM);
+
+ usleep_range(3000, 6000);
+
+ return 0;
+
+flush:
+ if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+ r = -EREMOTEIO;
+
+ usleep_range(3000, 6000);
+
+ return r;
+}
+
+static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+ struct microread_i2c_phy *phy = phy_id;
+ struct i2c_client *client;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (!phy || irq != phy->i2c_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (phy->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = microread_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+
+ nfc_hci_recv_frame(phy->hdev, NULL);
+
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+
+ nfc_hci_recv_frame(phy->hdev, skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+ .write = microread_i2c_write,
+ .enable = microread_i2c_enable,
+ .disable = microread_i2c_disable,
+};
+
+static int microread_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct microread_i2c_phy *phy;
+ struct microread_nfc_platform_data *pdata =
+ dev_get_platdata(&client->dev);
+ int r;
+
+ dev_dbg(&client->dev, "client %p", client);
+
+ if (!pdata) {
+ dev_err(&client->dev, "client %p: missing platform data",
+ client);
+ return -EINVAL;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
+ GFP_KERNEL);
+ if (!phy) {
+ dev_err(&client->dev, "Can't allocate microread phy");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, phy);
+ phy->i2c_dev = client;
+
+ r = request_threaded_irq(client->irq, NULL, microread_i2c_irq_thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ MICROREAD_I2C_DRIVER_NAME, phy);
+ if (r) {
+ dev_err(&client->dev, "Unable to register IRQ handler");
+ return r;
+ }
+
+ r = microread_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+ MICROREAD_I2C_FRAME_HEADROOM,
+ MICROREAD_I2C_FRAME_TAILROOM,
+ MICROREAD_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+ if (r < 0)
+ goto err_irq;
+
+ dev_info(&client->dev, "Probed");
+
+ return 0;
+
+err_irq:
+ free_irq(client->irq, phy);
+
+ return r;
+}
+
+static int microread_i2c_remove(struct i2c_client *client)
+{
+ struct microread_i2c_phy *phy = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ microread_remove(phy->hdev);
+
+ free_irq(client->irq, phy);
+
+ return 0;
+}
+
+static struct i2c_device_id microread_i2c_id[] = {
+ { MICROREAD_I2C_DRIVER_NAME, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, microread_i2c_id);
+
+static struct i2c_driver microread_i2c_driver = {
+ .driver = {
+ .name = MICROREAD_I2C_DRIVER_NAME,
+ },
+ .probe = microread_i2c_probe,
+ .remove = microread_i2c_remove,
+ .id_table = microread_i2c_id,
+};
+
+module_i2c_driver(microread_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
new file mode 100644
index 000000000000..eef38cfd812e
--- /dev/null
+++ b/drivers/nfc/microread/mei.c
@@ -0,0 +1,246 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/mei_bus.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+#define MICROREAD_DRIVER_NAME "microread"
+
+#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
+ 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
+struct mei_nfc_hdr {
+ u8 cmd;
+ u8 status;
+ u16 req_id;
+ u32 reserved;
+ u16 data_size;
+} __attribute__((packed));
+
+#define MEI_NFC_HEADER_SIZE 10
+#define MEI_NFC_MAX_HCI_PAYLOAD 300
+#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
+
+struct microread_mei_phy {
+ struct mei_device *mei_device;
+ struct nfc_hci_dev *hdev;
+
+ int powered;
+
+ int hard_fault; /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+#define MEI_DUMP_SKB_IN(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "mei in : ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+#define MEI_DUMP_SKB_OUT(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "mei out: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+static int microread_mei_enable(void *phy_id)
+{
+ struct microread_mei_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ phy->powered = 1;
+
+ return 0;
+}
+
+static void microread_mei_disable(void *phy_id)
+{
+ struct microread_mei_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ phy->powered = 0;
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int microread_mei_write(void *phy_id, struct sk_buff *skb)
+{
+ struct microread_mei_phy *phy = phy_id;
+ int r;
+
+ MEI_DUMP_SKB_OUT("mei frame sent", skb);
+
+ r = mei_send(phy->device, skb->data, skb->len);
+ if (r > 0)
+ r = 0;
+
+ return r;
+}
+
+static void microread_event_cb(struct mei_device *device, u32 events,
+ void *context)
+{
+ struct microread_mei_phy *phy = context;
+
+ if (phy->hard_fault != 0)
+ return;
+
+ if (events & BIT(MEI_EVENT_RX)) {
+ struct sk_buff *skb;
+ int reply_size;
+
+ skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ);
+ if (reply_size < MEI_NFC_HEADER_SIZE) {
+ kfree(skb);
+ return;
+ }
+
+ skb_put(skb, reply_size);
+ skb_pull(skb, MEI_NFC_HEADER_SIZE);
+
+ MEI_DUMP_SKB_IN("mei frame read", skb);
+
+ nfc_hci_recv_frame(phy->hdev, skb);
+ }
+}
+
+static struct nfc_phy_ops mei_phy_ops = {
+ .write = microread_mei_write,
+ .enable = microread_mei_enable,
+ .disable = microread_mei_disable,
+};
+
+static int microread_mei_probe(struct mei_device *device,
+ const struct mei_id *id)
+{
+ struct microread_mei_phy *phy;
+ int r;
+
+ pr_info("Probing NFC microread\n");
+
+ phy = kzalloc(sizeof(struct microread_mei_phy), GFP_KERNEL);
+ if (!phy) {
+ pr_err("Cannot allocate memory for microread mei phy.\n");
+ return -ENOMEM;
+ }
+
+ phy->device = device;
+ mei_set_clientdata(device, phy);
+
+ r = mei_register_event_cb(device, microread_event_cb, phy);
+ if (r) {
+ pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
+ goto err_out;
+ }
+
+ r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
+ MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
+ &phy->hdev);
+ if (r < 0)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ kfree(phy);
+
+ return r;
+}
+
+static int microread_mei_remove(struct mei_device *device)
+{
+ struct microread_mei_phy *phy = mei_get_clientdata(device);
+
+ pr_info("Removing microread\n");
+
+ microread_remove(phy->hdev);
+
+ if (phy->powered)
+ microread_mei_disable(phy);
+
+ kfree(phy);
+
+ return 0;
+}
+
+static struct mei_id microread_mei_tbl[] = {
+ { MICROREAD_DRIVER_NAME, MICROREAD_UUID },
+
+ /* required last entry */
+ { }
+};
+
+MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
+
+static struct mei_driver microread_driver = {
+ .id_table = microread_mei_tbl,
+ .name = MICROREAD_DRIVER_NAME,
+
+ .probe = microread_mei_probe,
+ .remove = microread_mei_remove,
+};
+
+static int microread_mei_init(void)
+{
+ int r;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+ r = mei_driver_register(&microread_driver);
+ if (r) {
+ pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void microread_mei_exit(void)
+{
+ mei_driver_unregister(&microread_driver);
+}
+
+module_init(microread_mei_init);
+module_exit(microread_mei_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
new file mode 100644
index 000000000000..3420d833db17
--- /dev/null
+++ b/drivers/nfc/microread/microread.c
@@ -0,0 +1,728 @@
+/*
+ * HCI based Driver for Inside Secure microread NFC Chip
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/crc-ccitt.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "microread.h"
+
+/* Proprietary gates, events, commands and registers */
+/* Admin */
+#define MICROREAD_GATE_ID_ADM NFC_HCI_ADMIN_GATE
+#define MICROREAD_GATE_ID_MGT 0x01
+#define MICROREAD_GATE_ID_OS 0x02
+#define MICROREAD_GATE_ID_TESTRF 0x03
+#define MICROREAD_GATE_ID_LOOPBACK NFC_HCI_LOOPBACK_GATE
+#define MICROREAD_GATE_ID_IDT NFC_HCI_ID_MGMT_GATE
+#define MICROREAD_GATE_ID_LMS NFC_HCI_LINK_MGMT_GATE
+
+/* Reader */
+#define MICROREAD_GATE_ID_MREAD_GEN 0x10
+#define MICROREAD_GATE_ID_MREAD_ISO_B NFC_HCI_RF_READER_B_GATE
+#define MICROREAD_GATE_ID_MREAD_NFC_T1 0x12
+#define MICROREAD_GATE_ID_MREAD_ISO_A NFC_HCI_RF_READER_A_GATE
+#define MICROREAD_GATE_ID_MREAD_NFC_T3 0x14
+#define MICROREAD_GATE_ID_MREAD_ISO_15_3 0x15
+#define MICROREAD_GATE_ID_MREAD_ISO_15_2 0x16
+#define MICROREAD_GATE_ID_MREAD_ISO_B_3 0x17
+#define MICROREAD_GATE_ID_MREAD_BPRIME 0x18
+#define MICROREAD_GATE_ID_MREAD_ISO_A_3 0x19
+
+/* Card */
+#define MICROREAD_GATE_ID_MCARD_GEN 0x20
+#define MICROREAD_GATE_ID_MCARD_ISO_B 0x21
+#define MICROREAD_GATE_ID_MCARD_BPRIME 0x22
+#define MICROREAD_GATE_ID_MCARD_ISO_A 0x23
+#define MICROREAD_GATE_ID_MCARD_NFC_T3 0x24
+#define MICROREAD_GATE_ID_MCARD_ISO_15_3 0x25
+#define MICROREAD_GATE_ID_MCARD_ISO_15_2 0x26
+#define MICROREAD_GATE_ID_MCARD_ISO_B_2 0x27
+#define MICROREAD_GATE_ID_MCARD_ISO_CUSTOM 0x28
+#define MICROREAD_GATE_ID_SECURE_ELEMENT 0x2F
+
+/* P2P */
+#define MICROREAD_GATE_ID_P2P_GEN 0x30
+#define MICROREAD_GATE_ID_P2P_TARGET 0x31
+#define MICROREAD_PAR_P2P_TARGET_MODE 0x01
+#define MICROREAD_PAR_P2P_TARGET_GT 0x04
+#define MICROREAD_GATE_ID_P2P_INITIATOR 0x32
+#define MICROREAD_PAR_P2P_INITIATOR_GI 0x01
+#define MICROREAD_PAR_P2P_INITIATOR_GT 0x03
+
+/* Those pipes are created/opened by default in the chip */
+#define MICROREAD_PIPE_ID_LMS 0x00
+#define MICROREAD_PIPE_ID_ADMIN 0x01
+#define MICROREAD_PIPE_ID_MGT 0x02
+#define MICROREAD_PIPE_ID_OS 0x03
+#define MICROREAD_PIPE_ID_HDS_LOOPBACK 0x04
+#define MICROREAD_PIPE_ID_HDS_IDT 0x05
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B 0x08
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_BPRIME 0x09
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_A 0x0A
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_3 0x0B
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_2 0x0C
+#define MICROREAD_PIPE_ID_HDS_MCARD_NFC_T3 0x0D
+#define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B_2 0x0E
+#define MICROREAD_PIPE_ID_HDS_MCARD_CUSTOM 0x0F
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B 0x10
+#define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1 0x11
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A 0x12
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_3 0x13
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_2 0x14
+#define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3 0x15
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B_3 0x16
+#define MICROREAD_PIPE_ID_HDS_MREAD_BPRIME 0x17
+#define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3 0x18
+#define MICROREAD_PIPE_ID_HDS_MREAD_GEN 0x1B
+#define MICROREAD_PIPE_ID_HDS_STACKED_ELEMENT 0x1C
+#define MICROREAD_PIPE_ID_HDS_INSTANCES 0x1D
+#define MICROREAD_PIPE_ID_HDS_TESTRF 0x1E
+#define MICROREAD_PIPE_ID_HDS_P2P_TARGET 0x1F
+#define MICROREAD_PIPE_ID_HDS_P2P_INITIATOR 0x20
+
+/* Events */
+#define MICROREAD_EVT_MREAD_DISCOVERY_OCCURED NFC_HCI_EVT_TARGET_DISCOVERED
+#define MICROREAD_EVT_MREAD_CARD_FOUND 0x3D
+#define MICROREAD_EMCF_A_ATQA 0
+#define MICROREAD_EMCF_A_SAK 2
+#define MICROREAD_EMCF_A_LEN 3
+#define MICROREAD_EMCF_A_UID 4
+#define MICROREAD_EMCF_A3_ATQA 0
+#define MICROREAD_EMCF_A3_SAK 2
+#define MICROREAD_EMCF_A3_LEN 3
+#define MICROREAD_EMCF_A3_UID 4
+#define MICROREAD_EMCF_B_UID 0
+#define MICROREAD_EMCF_T1_ATQA 0
+#define MICROREAD_EMCF_T1_UID 4
+#define MICROREAD_EMCF_T3_UID 0
+#define MICROREAD_EVT_MREAD_DISCOVERY_START NFC_HCI_EVT_READER_REQUESTED
+#define MICROREAD_EVT_MREAD_DISCOVERY_START_SOME 0x3E
+#define MICROREAD_EVT_MREAD_DISCOVERY_STOP NFC_HCI_EVT_END_OPERATION
+#define MICROREAD_EVT_MREAD_SIM_REQUESTS 0x3F
+#define MICROREAD_EVT_MCARD_EXCHANGE NFC_HCI_EVT_TARGET_DISCOVERED
+#define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF 0x20
+#define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF 0x21
+#define MICROREAD_EVT_MCARD_FIELD_ON 0x11
+#define MICROREAD_EVT_P2P_TARGET_ACTIVATED 0x13
+#define MICROREAD_EVT_P2P_TARGET_DEACTIVATED 0x12
+#define MICROREAD_EVT_MCARD_FIELD_OFF 0x14
+
+/* Commands */
+#define MICROREAD_CMD_MREAD_EXCHANGE 0x10
+#define MICROREAD_CMD_MREAD_SUBSCRIBE 0x3F
+
+/* Hosts IDs */
+#define MICROREAD_ELT_ID_HDS NFC_HCI_TERMINAL_HOST_ID
+#define MICROREAD_ELT_ID_SIM NFC_HCI_UICC_HOST_ID
+#define MICROREAD_ELT_ID_SE1 0x03
+#define MICROREAD_ELT_ID_SE2 0x04
+#define MICROREAD_ELT_ID_SE3 0x05
+
+static struct nfc_hci_gate microread_gates[] = {
+ {MICROREAD_GATE_ID_ADM, MICROREAD_PIPE_ID_ADMIN},
+ {MICROREAD_GATE_ID_LOOPBACK, MICROREAD_PIPE_ID_HDS_LOOPBACK},
+ {MICROREAD_GATE_ID_IDT, MICROREAD_PIPE_ID_HDS_IDT},
+ {MICROREAD_GATE_ID_LMS, MICROREAD_PIPE_ID_LMS},
+ {MICROREAD_GATE_ID_MREAD_ISO_B, MICROREAD_PIPE_ID_HDS_MREAD_ISO_B},
+ {MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A},
+ {MICROREAD_GATE_ID_MREAD_ISO_A_3, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3},
+ {MICROREAD_GATE_ID_MGT, MICROREAD_PIPE_ID_MGT},
+ {MICROREAD_GATE_ID_OS, MICROREAD_PIPE_ID_OS},
+ {MICROREAD_GATE_ID_MREAD_NFC_T1, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1},
+ {MICROREAD_GATE_ID_MREAD_NFC_T3, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3},
+ {MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PIPE_ID_HDS_P2P_TARGET},
+ {MICROREAD_GATE_ID_P2P_INITIATOR, MICROREAD_PIPE_ID_HDS_P2P_INITIATOR}
+};
+
+/* Largest headroom needed for outgoing custom commands */
+#define MICROREAD_CMDS_HEADROOM 2
+#define MICROREAD_CMD_TAILROOM 2
+
+struct microread_info {
+ struct nfc_phy_ops *phy_ops;
+ void *phy_id;
+
+ struct nfc_hci_dev *hdev;
+
+ int async_cb_type;
+ data_exchange_cb_t async_cb;
+ void *async_cb_context;
+};
+
+static int microread_open(struct nfc_hci_dev *hdev)
+{
+ struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+ return info->phy_ops->enable(info->phy_id);
+}
+
+static void microread_close(struct nfc_hci_dev *hdev)
+{
+ struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+ info->phy_ops->disable(info->phy_id);
+}
+
+static int microread_hci_ready(struct nfc_hci_dev *hdev)
+{
+ int r;
+ u8 param[4];
+
+ param[0] = 0x03;
+ r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+ MICROREAD_CMD_MREAD_SUBSCRIBE, param, 1, NULL);
+ if (r)
+ return r;
+
+ r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A_3,
+ MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL);
+ if (r)
+ return r;
+
+ param[0] = 0x00;
+ param[1] = 0x03;
+ param[2] = 0x00;
+ r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_B,
+ MICROREAD_CMD_MREAD_SUBSCRIBE, param, 3, NULL);
+ if (r)
+ return r;
+
+ r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T1,
+ MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL);
+ if (r)
+ return r;
+
+ param[0] = 0xFF;
+ param[1] = 0xFF;
+ param[2] = 0x00;
+ param[3] = 0x00;
+ r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T3,
+ MICROREAD_CMD_MREAD_SUBSCRIBE, param, 4, NULL);
+
+ return r;
+}
+
+static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+ return info->phy_ops->write(info->phy_id, skb);
+}
+
+static int microread_start_poll(struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols)
+{
+ int r;
+
+ u8 param[2];
+ u8 mode;
+
+ param[0] = 0x00;
+ param[1] = 0x00;
+
+ if (im_protocols & NFC_PROTO_ISO14443_MASK)
+ param[0] |= (1 << 2);
+
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+ param[0] |= 1;
+
+ if (im_protocols & NFC_PROTO_MIFARE_MASK)
+ param[1] |= 1;
+
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
+ param[0] |= (1 << 1);
+
+ if (im_protocols & NFC_PROTO_FELICA_MASK)
+ param[0] |= (1 << 5);
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK)
+ param[1] |= (1 << 1);
+
+ if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
+ hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
+ &hdev->gb_len);
+ if (hdev->gb == NULL || hdev->gb_len == 0) {
+ im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ }
+ }
+
+ r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+ MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0);
+ if (r)
+ return r;
+
+ mode = 0xff;
+ r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+ MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+ if (r)
+ return r;
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_INITIATOR,
+ MICROREAD_PAR_P2P_INITIATOR_GI,
+ hdev->gb, hdev->gb_len);
+ if (r)
+ return r;
+ }
+
+ if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+ MICROREAD_PAR_P2P_TARGET_GT,
+ hdev->gb, hdev->gb_len);
+ if (r)
+ return r;
+
+ mode = 0x02;
+ r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+ MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+ if (r)
+ return r;
+ }
+
+ return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A,
+ MICROREAD_EVT_MREAD_DISCOVERY_START_SOME,
+ param, 2);
+}
+
+static int microread_dep_link_up(struct nfc_hci_dev *hdev,
+ struct nfc_target *target, u8 comm_mode,
+ u8 *gb, size_t gb_len)
+{
+ struct sk_buff *rgb_skb = NULL;
+ int r;
+
+ r = nfc_hci_get_param(hdev, target->hci_reader_gate,
+ MICROREAD_PAR_P2P_INITIATOR_GT, &rgb_skb);
+ if (r < 0)
+ return r;
+
+ if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data,
+ rgb_skb->len);
+ if (r == 0)
+ r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode,
+ NFC_RF_INITIATOR);
+exit:
+ kfree_skb(rgb_skb);
+
+ return r;
+}
+
+static int microread_dep_link_down(struct nfc_hci_dev *hdev)
+{
+ return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_INITIATOR,
+ MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0);
+}
+
+static int microread_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target)
+{
+ switch (gate) {
+ case MICROREAD_GATE_ID_P2P_INITIATOR:
+ target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ break;
+ default:
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int microread_complete_target_discovered(struct nfc_hci_dev *hdev,
+ u8 gate,
+ struct nfc_target *target)
+{
+ return 0;
+}
+
+#define MICROREAD_CB_TYPE_READER_ALL 1
+
+static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
+ int err)
+{
+ struct microread_info *info = context;
+
+ switch (info->async_cb_type) {
+ case MICROREAD_CB_TYPE_READER_ALL:
+ if (err == 0) {
+ if (skb->len == 0) {
+ err = -EPROTO;
+ kfree_skb(skb);
+ info->async_cb(info->async_cb_context, NULL,
+ -EPROTO);
+ return;
+ }
+
+ if (skb->data[skb->len - 1] != 0) {
+ err = nfc_hci_result_to_errno(
+ skb->data[skb->len - 1]);
+ kfree_skb(skb);
+ info->async_cb(info->async_cb_context, NULL,
+ err);
+ return;
+ }
+
+ skb_trim(skb, skb->len - 1); /* RF Error ind. */
+ }
+ info->async_cb(info->async_cb_context, skb, err);
+ break;
+ default:
+ if (err == 0)
+ kfree_skb(skb);
+ break;
+ }
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ * 1: driver doesn't especially handle, please do standard processing
+ */
+static int microread_im_transceive(struct nfc_hci_dev *hdev,
+ struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct microread_info *info = nfc_hci_get_clientdata(hdev);
+ u8 control_bits;
+ u16 crc;
+
+ pr_info("data exchange to gate 0x%x\n", target->hci_reader_gate);
+
+ if (target->hci_reader_gate == MICROREAD_GATE_ID_P2P_INITIATOR) {
+ *skb_push(skb, 1) = 0;
+
+ return nfc_hci_send_event(hdev, target->hci_reader_gate,
+ MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF,
+ skb->data, skb->len);
+ }
+
+ switch (target->hci_reader_gate) {
+ case MICROREAD_GATE_ID_MREAD_ISO_A:
+ control_bits = 0xCB;
+ break;
+ case MICROREAD_GATE_ID_MREAD_ISO_A_3:
+ control_bits = 0xCB;
+ break;
+ case MICROREAD_GATE_ID_MREAD_ISO_B:
+ control_bits = 0xCB;
+ break;
+ case MICROREAD_GATE_ID_MREAD_NFC_T1:
+ control_bits = 0x1B;
+
+ crc = crc_ccitt(0xffff, skb->data, skb->len);
+ crc = ~crc;
+ *skb_put(skb, 1) = crc & 0xff;
+ *skb_put(skb, 1) = crc >> 8;
+ break;
+ case MICROREAD_GATE_ID_MREAD_NFC_T3:
+ control_bits = 0xDB;
+ break;
+ default:
+ pr_info("Abort im_transceive to invalid gate 0x%x\n",
+ target->hci_reader_gate);
+ return 1;
+ }
+
+ *skb_push(skb, 1) = control_bits;
+
+ info->async_cb_type = MICROREAD_CB_TYPE_READER_ALL;
+ info->async_cb = cb;
+ info->async_cb_context = cb_context;
+
+ return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+ MICROREAD_CMD_MREAD_EXCHANGE,
+ skb->data, skb->len,
+ microread_im_transceive_cb, info);
+}
+
+static int microread_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ int r;
+
+ r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+ MICROREAD_EVT_MCARD_EXCHANGE,
+ skb->data, skb->len);
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
+ struct sk_buff *skb)
+{
+ struct nfc_target *targets;
+ int r = 0;
+
+ pr_info("target discovered to gate 0x%x\n", gate);
+
+ targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+ if (targets == NULL) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ targets->hci_reader_gate = gate;
+
+ switch (gate) {
+ case MICROREAD_GATE_ID_MREAD_ISO_A:
+ targets->supported_protocols =
+ nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A_SAK]);
+ targets->sens_res =
+ be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
+ targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+ skb->data[MICROREAD_EMCF_A_LEN]);
+ targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+ break;
+ case MICROREAD_GATE_ID_MREAD_ISO_A_3:
+ targets->supported_protocols =
+ nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A3_SAK]);
+ targets->sens_res =
+ be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
+ targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+ skb->data[MICROREAD_EMCF_A3_LEN]);
+ targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+ break;
+ case MICROREAD_GATE_ID_MREAD_ISO_B:
+ targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_B_UID], 4);
+ targets->nfcid1_len = 4;
+ break;
+ case MICROREAD_GATE_ID_MREAD_NFC_T1:
+ targets->supported_protocols = NFC_PROTO_JEWEL_MASK;
+ targets->sens_res =
+ le16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_T1_ATQA]);
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T1_UID], 4);
+ targets->nfcid1_len = 4;
+ break;
+ case MICROREAD_GATE_ID_MREAD_NFC_T3:
+ targets->supported_protocols = NFC_PROTO_FELICA_MASK;
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T3_UID], 8);
+ targets->nfcid1_len = 8;
+ break;
+ default:
+ pr_info("discard target discovered to gate 0x%x\n", gate);
+ goto exit_free;
+ }
+
+ r = nfc_targets_found(hdev->ndev, targets, 1);
+
+exit_free:
+ kfree(targets);
+
+exit:
+ kfree_skb(skb);
+
+ if (r)
+ pr_err("Failed to handle discovered target err=%d", r);
+}
+
+static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
+ u8 event, struct sk_buff *skb)
+{
+ int r;
+ u8 mode;
+
+ pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate);
+
+ switch (event) {
+ case MICROREAD_EVT_MREAD_CARD_FOUND:
+ microread_target_discovered(hdev, gate, skb);
+ return 0;
+
+ case MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF:
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return -EPROTO;
+ }
+
+ if (skb->data[skb->len - 1]) {
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ skb_trim(skb, skb->len - 1);
+
+ r = nfc_tm_data_received(hdev->ndev, skb);
+ break;
+
+ case MICROREAD_EVT_MCARD_FIELD_ON:
+ case MICROREAD_EVT_MCARD_FIELD_OFF:
+ kfree_skb(skb);
+ return 0;
+
+ case MICROREAD_EVT_P2P_TARGET_ACTIVATED:
+ r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE, skb->data,
+ skb->len);
+
+ kfree_skb(skb);
+ break;
+
+ case MICROREAD_EVT_MCARD_EXCHANGE:
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return -EPROTO;
+ }
+
+ if (skb->data[skb->len-1]) {
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ skb_trim(skb, skb->len - 1);
+
+ r = nfc_tm_data_received(hdev->ndev, skb);
+ break;
+
+ case MICROREAD_EVT_P2P_TARGET_DEACTIVATED:
+ kfree_skb(skb);
+
+ mode = 0xff;
+ r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET,
+ MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1);
+ if (r)
+ break;
+
+ r = nfc_hci_send_event(hdev, gate,
+ MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL,
+ 0);
+ break;
+
+ default:
+ return 1;
+ }
+
+ return r;
+}
+
+static struct nfc_hci_ops microread_hci_ops = {
+ .open = microread_open,
+ .close = microread_close,
+ .hci_ready = microread_hci_ready,
+ .xmit = microread_xmit,
+ .start_poll = microread_start_poll,
+ .dep_link_up = microread_dep_link_up,
+ .dep_link_down = microread_dep_link_down,
+ .target_from_gate = microread_target_from_gate,
+ .complete_target_discovered = microread_complete_target_discovered,
+ .im_transceive = microread_im_transceive,
+ .tm_send = microread_tm_send,
+ .check_presence = NULL,
+ .event_received = microread_event_received,
+};
+
+int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+ int phy_headroom, int phy_tailroom, int phy_payload,
+ struct nfc_hci_dev **hdev)
+{
+ struct microread_info *info;
+ unsigned long quirks = 0;
+ u32 protocols, se;
+ struct nfc_hci_init_data init_data;
+ int r;
+
+ info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("Cannot allocate memory for microread_info.\n");
+ r = -ENOMEM;
+ goto err_info_alloc;
+ }
+
+ info->phy_ops = phy_ops;
+ info->phy_id = phy_id;
+
+ init_data.gate_count = ARRAY_SIZE(microread_gates);
+ memcpy(init_data.gates, microread_gates, sizeof(microread_gates));
+
+ strcpy(init_data.session_id, "MICROREA");
+
+ set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
+
+ protocols = NFC_PROTO_JEWEL_MASK |
+ NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_FELICA_MASK |
+ NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_ISO14443_B_MASK |
+ NFC_PROTO_NFC_DEP_MASK;
+
+ se = NFC_SE_UICC | NFC_SE_EMBEDDED;
+
+ info->hdev = nfc_hci_allocate_device(&microread_hci_ops, &init_data,
+ quirks, protocols, se, llc_name,
+ phy_headroom +
+ MICROREAD_CMDS_HEADROOM,
+ phy_tailroom +
+ MICROREAD_CMD_TAILROOM,
+ phy_payload);
+ if (!info->hdev) {
+ pr_err("Cannot allocate nfc hdev.\n");
+ r = -ENOMEM;
+ goto err_alloc_hdev;
+ }
+
+ nfc_hci_set_clientdata(info->hdev, info);
+
+ r = nfc_hci_register_device(info->hdev);
+ if (r)
+ goto err_regdev;
+
+ *hdev = info->hdev;
+
+ return 0;
+
+err_regdev:
+ nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
+ kfree(info);
+
+err_info_alloc:
+ return r;
+}
+EXPORT_SYMBOL(microread_probe);
+
+void microread_remove(struct nfc_hci_dev *hdev)
+{
+ struct microread_info *info = nfc_hci_get_clientdata(hdev);
+
+ nfc_hci_unregister_device(hdev);
+ nfc_hci_free_device(hdev);
+ kfree(info);
+}
+EXPORT_SYMBOL(microread_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
new file mode 100644
index 000000000000..64b447a1c5bf
--- /dev/null
+++ b/drivers/nfc/microread/microread.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_MICROREAD_H_
+#define __LOCAL_MICROREAD_H_
+
+#include <net/nfc/hci.h>
+
+#define DRIVER_DESC "NFC driver for microread"
+
+int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+ int phy_headroom, int phy_tailroom, int phy_payload,
+ struct nfc_hci_dev **hdev);
+
+void microread_remove(struct nfc_hci_dev *hdev);
+
+#endif /* __LOCAL_MICROREAD_H_ */
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 50b1ee41afc6..3b731acbc408 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -526,7 +526,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
nfc_dev_dbg(&pdev->dev, "probe entry");
- drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
+ drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL);
if (!drv) {
rc = -ENOMEM;
goto exit;
@@ -542,12 +542,13 @@ static int nfcwilink_probe(struct platform_device *pdev)
drv->ndev = nci_allocate_device(&nfcwilink_ops,
protocols,
+ NFC_SE_NONE,
NFCWILINK_HDR_LEN,
0);
if (!drv->ndev) {
nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
rc = -ENOMEM;
- goto free_exit;
+ goto exit;
}
nci_set_parent_dev(drv->ndev, &pdev->dev);
@@ -566,9 +567,6 @@ static int nfcwilink_probe(struct platform_device *pdev)
free_dev_exit:
nci_free_device(drv->ndev);
-free_exit:
- kfree(drv);
-
exit:
return rc;
}
@@ -588,8 +586,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
nci_unregister_device(ndev);
nci_free_device(ndev);
- kfree(drv);
-
dev_set_drvdata(&pdev->dev, NULL);
return 0;
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index ada681b01a17..f0f6763d67ae 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -41,11 +41,6 @@
#define SONY_VENDOR_ID 0x054c
#define PASORI_PRODUCT_ID 0x02e1
-#define PN533_QUIRKS_TYPE_A BIT(0)
-#define PN533_QUIRKS_TYPE_F BIT(1)
-#define PN533_QUIRKS_DEP BIT(2)
-#define PN533_QUIRKS_RAW_EXCHANGE BIT(3)
-
#define PN533_DEVICE_STD 0x1
#define PN533_DEVICE_PASORI 0x2
@@ -84,14 +79,18 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_LISTEN_TIME 2
/* frame definitions */
-#define PN533_NORMAL_FRAME_MAX_LEN 262 /* 6 (PREAMBLE, SOF, LEN, LCS, TFI)
- 254 (DATA)
- 2 (DCS, postamble) */
-
-#define PN533_FRAME_TAIL_SIZE 2
-#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
- PN533_FRAME_TAIL_SIZE)
-#define PN533_FRAME_ACK_SIZE (sizeof(struct pn533_frame) + 1)
+#define PN533_FRAME_HEADER_LEN (sizeof(struct pn533_frame) \
+ + 2) /* data[0] TFI, data[1] CC */
+#define PN533_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+/*
+ * Max extended frame payload len, excluding TFI and CC
+ * which are already in PN533_FRAME_HEADER_LEN.
+ */
+#define PN533_FRAME_MAX_PAYLOAD_LEN 263
+
+#define PN533_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
+ Postamble (1) */
#define PN533_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
@@ -105,8 +104,6 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
/* PN533 Commands */
#define PN533_FRAME_CMD(f) (f->data[1])
-#define PN533_FRAME_CMD_PARAMS_PTR(f) (&f->data[2])
-#define PN533_FRAME_CMD_PARAMS_LEN(f) (f->datalen - 2)
#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
#define PN533_CMD_RF_CONFIGURATION 0x32
@@ -120,6 +117,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
#define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_UNDEF 0xff
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -128,13 +126,12 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
-/* PN533 status codes */
-#define PN533_STATUS_TARGET_RELEASED 0x29
-
struct pn533;
-typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
- u8 *params, int params_len);
+typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg, int status);
+
+typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
+ struct sk_buff *resp);
/* structs for pn533 commands */
@@ -222,7 +219,7 @@ struct pn533_poll_modulations {
u8 len;
};
-const struct pn533_poll_modulations poll_mod[] = {
+static const struct pn533_poll_modulations poll_mod[] = {
[PN533_POLL_MOD_106KBPS_A] = {
.data = {
.maxtg = 1,
@@ -282,11 +279,6 @@ const struct pn533_poll_modulations poll_mod[] = {
/* PN533_CMD_IN_ATR */
-struct pn533_cmd_activate_param {
- u8 tg;
- u8 next;
-} __packed;
-
struct pn533_cmd_activate_response {
u8 status;
u8 nfcid3t[10];
@@ -299,14 +291,6 @@ struct pn533_cmd_activate_response {
u8 gt[];
} __packed;
-/* PN533_CMD_IN_JUMP_FOR_DEP */
-struct pn533_cmd_jump_dep {
- u8 active;
- u8 baud;
- u8 next;
- u8 data[];
-} __packed;
-
struct pn533_cmd_jump_dep_response {
u8 status;
u8 tg;
@@ -329,32 +313,13 @@ struct pn533_cmd_jump_dep_response {
#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
#define PN533_INIT_TARGET_RESP_DEP 0x4
-struct pn533_cmd_init_target {
- u8 mode;
- u8 mifare[6];
- u8 felica[18];
- u8 nfcid3[10];
- u8 gb_len;
- u8 gb[];
-} __packed;
-
-struct pn533_cmd_init_target_response {
- u8 mode;
- u8 cmd[];
-} __packed;
-
struct pn533 {
struct usb_device *udev;
struct usb_interface *interface;
struct nfc_dev *nfc_dev;
struct urb *out_urb;
- int out_maxlen;
- struct pn533_frame *out_frame;
-
struct urb *in_urb;
- int in_maxlen;
- struct pn533_frame *in_frame;
struct sk_buff_head resp_q;
@@ -365,12 +330,12 @@ struct pn533 {
struct work_struct mi_work;
struct work_struct tg_work;
struct timer_list listen_timer;
- struct pn533_frame *wq_in_frame;
int wq_in_error;
int cancel_listen;
pn533_cmd_complete_t cmd_complete;
void *cmd_complete_arg;
+ void *cmd_complete_mi_arg;
struct mutex cmd_lock;
u8 cmd;
@@ -391,16 +356,17 @@ struct pn533 {
struct list_head cmd_queue;
u8 cmd_pending;
+
+ struct pn533_frame_ops *ops;
};
struct pn533_cmd {
struct list_head queue;
- struct pn533_frame *out_frame;
- struct pn533_frame *in_frame;
- int in_frame_len;
- pn533_cmd_complete_t cmd_complete;
+ u8 cmd_code;
+ struct sk_buff *req;
+ struct sk_buff *resp;
+ int resp_len;
void *arg;
- gfp_t flags;
};
struct pn533_frame {
@@ -411,6 +377,22 @@ struct pn533_frame {
u8 data[];
} __packed;
+struct pn533_frame_ops {
+ void (*tx_frame_init)(void *frame, u8 cmd_code);
+ void (*tx_frame_finish)(void *frame);
+ void (*tx_update_payload_len)(void *frame, int len);
+ int tx_header_len;
+ int tx_tail_len;
+
+ bool (*rx_is_frame_valid)(void *frame);
+ int (*rx_frame_size)(void *frame);
+ int rx_header_len;
+ int rx_tail_len;
+
+ int max_payload_len;
+ u8 (*get_cmd_code)(void *frame);
+};
+
/* The rule: value + checksum = 0 */
static inline u8 pn533_checksum(u8 value)
{
@@ -429,37 +411,21 @@ static u8 pn533_data_checksum(u8 *data, int datalen)
return pn533_checksum(sum);
}
-/**
- * pn533_tx_frame_ack - create a ack frame
- * @frame: The frame to be set as ack
- *
- * Ack is different type of standard frame. As a standard frame, it has
- * preamble and start_frame. However the checksum of this frame must fail,
- * i.e. datalen + datalen_checksum must NOT be zero. When the checksum test
- * fails and datalen = 0 and datalen_checksum = 0xFF, the frame is a ack.
- * After datalen_checksum field, the postamble is placed.
- */
-static void pn533_tx_frame_ack(struct pn533_frame *frame)
+static void pn533_tx_frame_init(void *_frame, u8 cmd_code)
{
- frame->preamble = 0;
- frame->start_frame = cpu_to_be16(PN533_SOF);
- frame->datalen = 0;
- frame->datalen_checksum = 0xFF;
- /* data[0] is used as postamble */
- frame->data[0] = 0;
-}
+ struct pn533_frame *frame = _frame;
-static void pn533_tx_frame_init(struct pn533_frame *frame, u8 cmd)
-{
frame->preamble = 0;
frame->start_frame = cpu_to_be16(PN533_SOF);
PN533_FRAME_IDENTIFIER(frame) = PN533_DIR_OUT;
- PN533_FRAME_CMD(frame) = cmd;
+ PN533_FRAME_CMD(frame) = cmd_code;
frame->datalen = 2;
}
-static void pn533_tx_frame_finish(struct pn533_frame *frame)
+static void pn533_tx_frame_finish(void *_frame)
{
+ struct pn533_frame *frame = _frame;
+
frame->datalen_checksum = pn533_checksum(frame->datalen);
PN533_FRAME_CHECKSUM(frame) =
@@ -468,9 +434,17 @@ static void pn533_tx_frame_finish(struct pn533_frame *frame)
PN533_FRAME_POSTAMBLE(frame) = 0;
}
-static bool pn533_rx_frame_is_valid(struct pn533_frame *frame)
+static void pn533_tx_update_payload_len(void *_frame, int len)
+{
+ struct pn533_frame *frame = _frame;
+
+ frame->datalen += len;
+}
+
+static bool pn533_rx_frame_is_valid(void *_frame)
{
u8 checksum;
+ struct pn533_frame *frame = _frame;
if (frame->start_frame != cpu_to_be16(PN533_SOF))
return false;
@@ -497,28 +471,48 @@ static bool pn533_rx_frame_is_ack(struct pn533_frame *frame)
return true;
}
-static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
+static inline int pn533_rx_frame_size(void *frame)
+{
+ struct pn533_frame *f = frame;
+
+ return sizeof(struct pn533_frame) + f->datalen + PN533_FRAME_TAIL_LEN;
+}
+
+static u8 pn533_get_cmd_code(void *frame)
+{
+ struct pn533_frame *f = frame;
+
+ return PN533_FRAME_CMD(f);
+}
+
+static struct pn533_frame_ops pn533_std_frame_ops = {
+ .tx_frame_init = pn533_tx_frame_init,
+ .tx_frame_finish = pn533_tx_frame_finish,
+ .tx_update_payload_len = pn533_tx_update_payload_len,
+ .tx_header_len = PN533_FRAME_HEADER_LEN,
+ .tx_tail_len = PN533_FRAME_TAIL_LEN,
+
+ .rx_is_frame_valid = pn533_rx_frame_is_valid,
+ .rx_frame_size = pn533_rx_frame_size,
+ .rx_header_len = PN533_FRAME_HEADER_LEN,
+ .rx_tail_len = PN533_FRAME_TAIL_LEN,
+
+ .max_payload_len = PN533_FRAME_MAX_PAYLOAD_LEN,
+ .get_cmd_code = pn533_get_cmd_code,
+};
+
+static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
{
- return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd));
+ return (dev->ops->get_cmd_code(frame) == PN533_CMD_RESPONSE(dev->cmd));
}
static void pn533_wq_cmd_complete(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
- struct pn533_frame *in_frame;
int rc;
- in_frame = dev->wq_in_frame;
-
- if (dev->wq_in_error)
- rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL,
- dev->wq_in_error);
- else
- rc = dev->cmd_complete(dev, dev->cmd_complete_arg,
- PN533_FRAME_CMD_PARAMS_PTR(in_frame),
- PN533_FRAME_CMD_PARAMS_LEN(in_frame));
-
+ rc = dev->cmd_complete(dev, dev->cmd_complete_arg, dev->wq_in_error);
if (rc != -EINPROGRESS)
queue_work(dev->wq, &dev->cmd_work);
}
@@ -526,46 +520,46 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
static void pn533_recv_response(struct urb *urb)
{
struct pn533 *dev = urb->context;
- struct pn533_frame *in_frame;
-
- dev->wq_in_frame = NULL;
+ u8 *in_frame;
switch (urb->status) {
case 0:
- /* success */
- break;
+ break; /* success */
case -ECONNRESET:
case -ENOENT:
- case -ESHUTDOWN:
- nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
- " status: %d", urb->status);
+ nfc_dev_dbg(&dev->interface->dev,
+ "The urb has been canceled (status %d)",
+ urb->status);
dev->wq_in_error = urb->status;
goto sched_wq;
+ case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
- " %d", urb->status);
+ nfc_dev_err(&dev->interface->dev,
+ "Urb failure (status %d)", urb->status);
dev->wq_in_error = urb->status;
goto sched_wq;
}
in_frame = dev->in_urb->transfer_buffer;
- if (!pn533_rx_frame_is_valid(in_frame)) {
+ nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
+ print_hex_dump(KERN_DEBUG, "PN533 RX: ", DUMP_PREFIX_NONE, 16, 1,
+ in_frame, dev->ops->rx_frame_size(in_frame), false);
+
+ if (!dev->ops->rx_is_frame_valid(in_frame)) {
nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
dev->wq_in_error = -EIO;
goto sched_wq;
}
- if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) {
- nfc_dev_err(&dev->interface->dev, "The received frame is not "
- "response to the last command");
+ if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
+ nfc_dev_err(&dev->interface->dev,
+ "It it not the response to the last command");
dev->wq_in_error = -EIO;
goto sched_wq;
}
- nfc_dev_dbg(&dev->interface->dev, "Received a valid frame");
dev->wq_in_error = 0;
- dev->wq_in_frame = in_frame;
sched_wq:
queue_work(dev->wq, &dev->cmd_complete_work);
@@ -586,18 +580,18 @@ static void pn533_recv_ack(struct urb *urb)
switch (urb->status) {
case 0:
- /* success */
- break;
+ break; /* success */
case -ECONNRESET:
case -ENOENT:
- case -ESHUTDOWN:
- nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
- " status: %d", urb->status);
+ nfc_dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)",
+ urb->status);
dev->wq_in_error = urb->status;
goto sched_wq;
+ case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
- " %d", urb->status);
+ nfc_dev_err(&dev->interface->dev,
+ "Urb failure (status %d)", urb->status);
dev->wq_in_error = urb->status;
goto sched_wq;
}
@@ -610,12 +604,10 @@ static void pn533_recv_ack(struct urb *urb)
goto sched_wq;
}
- nfc_dev_dbg(&dev->interface->dev, "Received a valid ack");
-
rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
if (rc) {
- nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with"
- " result %d", rc);
+ nfc_dev_err(&dev->interface->dev,
+ "usb_submit_urb failed with result %d", rc);
dev->wq_in_error = rc;
goto sched_wq;
}
@@ -623,7 +615,6 @@ static void pn533_recv_ack(struct urb *urb)
return;
sched_wq:
- dev->wq_in_frame = NULL;
queue_work(dev->wq, &dev->cmd_complete_work);
}
@@ -636,47 +627,46 @@ static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
{
+ u8 ack[PN533_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- pn533_tx_frame_ack(dev->out_frame);
-
- dev->out_urb->transfer_buffer = dev->out_frame;
- dev->out_urb->transfer_buffer_length = PN533_FRAME_ACK_SIZE;
+ dev->out_urb->transfer_buffer = ack;
+ dev->out_urb->transfer_buffer_length = sizeof(ack);
rc = usb_submit_urb(dev->out_urb, flags);
return rc;
}
-static int __pn533_send_cmd_frame_async(struct pn533 *dev,
- struct pn533_frame *out_frame,
- struct pn533_frame *in_frame,
- int in_frame_len,
+static int __pn533_send_frame_async(struct pn533 *dev,
+ struct sk_buff *out,
+ struct sk_buff *in,
+ int in_len,
pn533_cmd_complete_t cmd_complete,
- void *arg, gfp_t flags)
+ void *arg)
{
int rc;
- nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x",
- PN533_FRAME_CMD(out_frame));
-
- dev->cmd = PN533_FRAME_CMD(out_frame);
+ dev->cmd = dev->ops->get_cmd_code(out->data);
dev->cmd_complete = cmd_complete;
dev->cmd_complete_arg = arg;
- dev->out_urb->transfer_buffer = out_frame;
- dev->out_urb->transfer_buffer_length =
- PN533_FRAME_SIZE(out_frame);
+ dev->out_urb->transfer_buffer = out->data;
+ dev->out_urb->transfer_buffer_length = out->len;
- dev->in_urb->transfer_buffer = in_frame;
- dev->in_urb->transfer_buffer_length = in_frame_len;
+ dev->in_urb->transfer_buffer = in->data;
+ dev->in_urb->transfer_buffer_length = in_len;
- rc = usb_submit_urb(dev->out_urb, flags);
+ print_hex_dump(KERN_DEBUG, "PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
if (rc)
return rc;
- rc = pn533_submit_urb_for_ack(dev, flags);
+ rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
goto error;
@@ -687,146 +677,325 @@ error:
return rc;
}
-static void pn533_wq_cmd(struct work_struct *work)
+static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *skb)
{
- struct pn533 *dev = container_of(work, struct pn533, cmd_work);
- struct pn533_cmd *cmd;
+ /* payload is already there, just update datalen */
+ int payload_len = skb->len;
+ struct pn533_frame_ops *ops = dev->ops;
- mutex_lock(&dev->cmd_lock);
- if (list_empty(&dev->cmd_queue)) {
- dev->cmd_pending = 0;
- mutex_unlock(&dev->cmd_lock);
- return;
- }
+ skb_push(skb, ops->tx_header_len);
+ skb_put(skb, ops->tx_tail_len);
- cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+ ops->tx_frame_init(skb->data, cmd_code);
+ ops->tx_update_payload_len(skb->data, payload_len);
+ ops->tx_frame_finish(skb->data);
+}
- list_del(&cmd->queue);
+struct pn533_send_async_complete_arg {
+ pn533_send_async_complete_t complete_cb;
+ void *complete_cb_context;
+ struct sk_buff *resp;
+ struct sk_buff *req;
+};
- mutex_unlock(&dev->cmd_lock);
+static int pn533_send_async_complete(struct pn533 *dev, void *_arg, int status)
+{
+ struct pn533_send_async_complete_arg *arg = _arg;
- __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
- cmd->in_frame_len, cmd->cmd_complete,
- cmd->arg, cmd->flags);
+ struct sk_buff *req = arg->req;
+ struct sk_buff *resp = arg->resp;
- kfree(cmd);
+ int rc;
+
+ dev_kfree_skb(req);
+
+ if (status < 0) {
+ arg->complete_cb(dev, arg->complete_cb_context,
+ ERR_PTR(status));
+ dev_kfree_skb(resp);
+ kfree(arg);
+ return status;
+ }
+
+ skb_put(resp, dev->ops->rx_frame_size(resp->data));
+ skb_pull(resp, dev->ops->rx_header_len);
+ skb_trim(resp, resp->len - dev->ops->rx_tail_len);
+
+ rc = arg->complete_cb(dev, arg->complete_cb_context, resp);
+
+ kfree(arg);
+ return rc;
}
-static int pn533_send_cmd_frame_async(struct pn533 *dev,
- struct pn533_frame *out_frame,
- struct pn533_frame *in_frame,
- int in_frame_len,
- pn533_cmd_complete_t cmd_complete,
- void *arg, gfp_t flags)
+static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *req, struct sk_buff *resp,
+ int resp_len,
+ pn533_send_async_complete_t complete_cb,
+ void *complete_cb_context)
{
struct pn533_cmd *cmd;
+ struct pn533_send_async_complete_arg *arg;
int rc = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+
+ arg->complete_cb = complete_cb;
+ arg->complete_cb_context = complete_cb_context;
+ arg->resp = resp;
+ arg->req = req;
+
+ pn533_build_cmd_frame(dev, cmd_code, req);
mutex_lock(&dev->cmd_lock);
if (!dev->cmd_pending) {
- rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
- in_frame_len, cmd_complete,
- arg, flags);
- if (!rc)
- dev->cmd_pending = 1;
+ rc = __pn533_send_frame_async(dev, req, resp, resp_len,
+ pn533_send_async_complete, arg);
+ if (rc)
+ goto error;
+ dev->cmd_pending = 1;
goto unlock;
}
- nfc_dev_dbg(&dev->interface->dev, "%s Queueing command", __func__);
+ nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
+ cmd_code);
- cmd = kzalloc(sizeof(struct pn533_cmd), flags);
+ cmd = kzalloc(sizeof(struct pn533_cmd), GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
- goto unlock;
+ goto error;
}
INIT_LIST_HEAD(&cmd->queue);
- cmd->out_frame = out_frame;
- cmd->in_frame = in_frame;
- cmd->in_frame_len = in_frame_len;
- cmd->cmd_complete = cmd_complete;
+ cmd->cmd_code = cmd_code;
+ cmd->req = req;
+ cmd->resp = resp;
+ cmd->resp_len = resp_len;
cmd->arg = arg;
- cmd->flags = flags;
list_add_tail(&cmd->queue, &dev->cmd_queue);
+ goto unlock;
+
+error:
+ kfree(arg);
unlock:
mutex_unlock(&dev->cmd_lock);
+ return rc;
+}
+
+static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *req,
+ pn533_send_async_complete_t complete_cb,
+ void *complete_cb_context)
+{
+ struct sk_buff *resp;
+ int rc;
+ int resp_len = dev->ops->rx_header_len +
+ dev->ops->max_payload_len +
+ dev->ops->rx_tail_len;
+
+ resp = nfc_alloc_recv_skb(resp_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
+ complete_cb_context);
+ if (rc)
+ dev_kfree_skb(resp);
return rc;
}
-struct pn533_sync_cmd_response {
+static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *req,
+ pn533_send_async_complete_t complete_cb,
+ void *complete_cb_context)
+{
+ struct sk_buff *resp;
int rc;
- struct completion done;
-};
+ int resp_len = dev->ops->rx_header_len +
+ dev->ops->max_payload_len +
+ dev->ops->rx_tail_len;
+
+ resp = alloc_skb(resp_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
+ complete_cb_context);
+ if (rc)
+ dev_kfree_skb(resp);
+
+ return rc;
+}
-static int pn533_sync_cmd_complete(struct pn533 *dev, void *_arg,
- u8 *params, int params_len)
+/*
+ * pn533_send_cmd_direct_async
+ *
+ * The function sends a piority cmd directly to the chip omiting the cmd
+ * queue. It's intended to be used by chaining mechanism of received responses
+ * where the host has to request every single chunk of data before scheduling
+ * next cmd from the queue.
+ */
+static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *req,
+ pn533_send_async_complete_t complete_cb,
+ void *complete_cb_context)
{
- struct pn533_sync_cmd_response *arg = _arg;
+ struct pn533_send_async_complete_arg *arg;
+ struct sk_buff *resp;
+ int rc;
+ int resp_len = dev->ops->rx_header_len +
+ dev->ops->max_payload_len +
+ dev->ops->rx_tail_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ resp = alloc_skb(resp_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ dev_kfree_skb(resp);
+ return -ENOMEM;
+ }
+
+ arg->complete_cb = complete_cb;
+ arg->complete_cb_context = complete_cb_context;
+ arg->resp = resp;
+ arg->req = req;
+
+ pn533_build_cmd_frame(dev, cmd_code, req);
+
+ rc = __pn533_send_frame_async(dev, req, resp, resp_len,
+ pn533_send_async_complete, arg);
+ if (rc < 0) {
+ dev_kfree_skb(resp);
+ kfree(arg);
+ }
+
+ return rc;
+}
+
+static void pn533_wq_cmd(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+ struct pn533_cmd *cmd;
+
+ mutex_lock(&dev->cmd_lock);
+
+ if (list_empty(&dev->cmd_queue)) {
+ dev->cmd_pending = 0;
+ mutex_unlock(&dev->cmd_lock);
+ return;
+ }
+
+ cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+
+ list_del(&cmd->queue);
+
+ mutex_unlock(&dev->cmd_lock);
+
+ __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len,
+ pn533_send_async_complete, cmd->arg);
+
+ kfree(cmd);
+}
- arg->rc = 0;
+struct pn533_sync_cmd_response {
+ struct sk_buff *resp;
+ struct completion done;
+};
- if (params_len < 0) /* error */
- arg->rc = params_len;
+static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
+ struct sk_buff *resp)
+{
+ struct pn533_sync_cmd_response *arg = _arg;
+ arg->resp = resp;
complete(&arg->done);
return 0;
}
-static int pn533_send_cmd_frame_sync(struct pn533 *dev,
- struct pn533_frame *out_frame,
- struct pn533_frame *in_frame,
- int in_frame_len)
+/* pn533_send_cmd_sync
+ *
+ * Please note the req parameter is freed inside the function to
+ * limit a number of return value interpretations by the caller.
+ *
+ * 1. negative in case of error during TX path -> req should be freed
+ *
+ * 2. negative in case of error during RX path -> req should not be freed
+ * as it's been already freed at the begining of RX path by
+ * async_complete_cb.
+ *
+ * 3. valid pointer in case of succesfult RX path
+ *
+ * A caller has to check a return value with IS_ERR macro. If the test pass,
+ * the returned pointer is valid.
+ *
+ * */
+static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code,
+ struct sk_buff *req)
{
int rc;
struct pn533_sync_cmd_response arg;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
init_completion(&arg.done);
- rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, in_frame_len,
- pn533_sync_cmd_complete, &arg, GFP_KERNEL);
- if (rc)
- return rc;
+ rc = pn533_send_cmd_async(dev, cmd_code, req,
+ pn533_send_sync_complete, &arg);
+ if (rc) {
+ dev_kfree_skb(req);
+ return ERR_PTR(rc);
+ }
wait_for_completion(&arg.done);
- return arg.rc;
+ return arg.resp;
}
static void pn533_send_complete(struct urb *urb)
{
struct pn533 *dev = urb->context;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
switch (urb->status) {
case 0:
- /* success */
- break;
+ break; /* success */
case -ECONNRESET:
case -ENOENT:
- case -ESHUTDOWN:
- nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
- " status: %d", urb->status);
+ nfc_dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)",
+ urb->status);
break;
+ case -ESHUTDOWN:
default:
- nfc_dev_dbg(&dev->interface->dev, "Nonzero urb status received:"
- " %d", urb->status);
+ nfc_dev_err(&dev->interface->dev,
+ "Urb failure (status %d)", urb->status);
}
}
+static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(dev->ops->tx_header_len +
+ size +
+ dev->ops->tx_tail_len, GFP_KERNEL);
+
+ if (skb)
+ skb_reserve(skb, dev->ops->tx_header_len);
+
+ return skb;
+}
+
struct pn533_target_type_a {
__be16 sens_res;
u8 sel_res;
@@ -867,9 +1036,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res);
if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
- platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
- (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
- platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+ platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+ (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
return false;
/* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */
@@ -884,7 +1053,7 @@ static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data,
{
struct pn533_target_type_a *tgt_type_a;
- tgt_type_a = (struct pn533_target_type_a *) tgt_data;
+ tgt_type_a = (struct pn533_target_type_a *)tgt_data;
if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len))
return -EPROTO;
@@ -942,14 +1111,13 @@ static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data,
{
struct pn533_target_felica *tgt_felica;
- tgt_felica = (struct pn533_target_felica *) tgt_data;
+ tgt_felica = (struct pn533_target_felica *)tgt_data;
if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len))
return -EPROTO;
- if (tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1 &&
- tgt_felica->nfcid2[1] ==
- PN533_FELICA_SENSF_NFCID2_DEP_B2)
+ if ((tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1) &&
+ (tgt_felica->nfcid2[1] == PN533_FELICA_SENSF_NFCID2_DEP_B2))
nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
else
nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK;
@@ -979,9 +1147,9 @@ static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel,
platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res);
if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
- platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
- (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
- platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+ platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+ (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
return false;
return true;
@@ -992,7 +1160,7 @@ static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data,
{
struct pn533_target_jewel *tgt_jewel;
- tgt_jewel = (struct pn533_target_jewel *) tgt_data;
+ tgt_jewel = (struct pn533_target_jewel *)tgt_data;
if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len))
return -EPROTO;
@@ -1051,7 +1219,7 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
{
struct pn533_target_type_b *tgt_type_b;
- tgt_type_b = (struct pn533_target_type_b *) tgt_data;
+ tgt_type_b = (struct pn533_target_type_b *)tgt_data;
if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
return -EPROTO;
@@ -1061,50 +1229,37 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
return 0;
}
-struct pn533_poll_response {
- u8 nbtg;
- u8 tg;
- u8 target_data[];
-} __packed;
-
-static int pn533_target_found(struct pn533 *dev,
- struct pn533_poll_response *resp, int resp_len)
+static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
+ int tgdata_len)
{
- int target_data_len;
struct nfc_target nfc_tgt;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
- dev->poll_mod_curr);
+ dev->poll_mod_curr);
- if (resp->tg != 1)
+ if (tg != 1)
return -EPROTO;
memset(&nfc_tgt, 0, sizeof(struct nfc_target));
- target_data_len = resp_len - sizeof(struct pn533_poll_response);
-
switch (dev->poll_mod_curr) {
case PN533_POLL_MOD_106KBPS_A:
- rc = pn533_target_found_type_a(&nfc_tgt, resp->target_data,
- target_data_len);
+ rc = pn533_target_found_type_a(&nfc_tgt, tgdata, tgdata_len);
break;
case PN533_POLL_MOD_212KBPS_FELICA:
case PN533_POLL_MOD_424KBPS_FELICA:
- rc = pn533_target_found_felica(&nfc_tgt, resp->target_data,
- target_data_len);
+ rc = pn533_target_found_felica(&nfc_tgt, tgdata, tgdata_len);
break;
case PN533_POLL_MOD_106KBPS_JEWEL:
- rc = pn533_target_found_jewel(&nfc_tgt, resp->target_data,
- target_data_len);
+ rc = pn533_target_found_jewel(&nfc_tgt, tgdata, tgdata_len);
break;
case PN533_POLL_MOD_847KBPS_B:
- rc = pn533_target_found_type_b(&nfc_tgt, resp->target_data,
- target_data_len);
+ rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
break;
default:
- nfc_dev_err(&dev->interface->dev, "Unknown current poll"
- " modulation");
+ nfc_dev_err(&dev->interface->dev,
+ "Unknown current poll modulation");
return -EPROTO;
}
@@ -1112,13 +1267,14 @@ static int pn533_target_found(struct pn533 *dev,
return rc;
if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
- nfc_dev_dbg(&dev->interface->dev, "The target found does not"
- " have the desired protocol");
+ nfc_dev_dbg(&dev->interface->dev,
+ "The Tg found doesn't have the desired protocol");
return -EAGAIN;
}
- nfc_dev_dbg(&dev->interface->dev, "Target found - supported protocols: "
- "0x%x", nfc_tgt.supported_protocols);
+ nfc_dev_dbg(&dev->interface->dev,
+ "Target found - supported protocols: 0x%x",
+ nfc_tgt.supported_protocols);
dev->tgt_available_prots = nfc_tgt.supported_protocols;
@@ -1140,7 +1296,7 @@ static void pn533_poll_reset_mod_list(struct pn533 *dev)
static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
{
dev->poll_mod_active[dev->poll_mod_count] =
- (struct pn533_poll_modulations *) &poll_mod[mod_index];
+ (struct pn533_poll_modulations *)&poll_mod[mod_index];
dev->poll_mod_count++;
}
@@ -1149,13 +1305,13 @@ static void pn533_poll_create_mod_list(struct pn533 *dev,
{
pn533_poll_reset_mod_list(dev);
- if (im_protocols & NFC_PROTO_MIFARE_MASK
- || im_protocols & NFC_PROTO_ISO14443_MASK
- || im_protocols & NFC_PROTO_NFC_DEP_MASK)
+ if ((im_protocols & NFC_PROTO_MIFARE_MASK) ||
+ (im_protocols & NFC_PROTO_ISO14443_MASK) ||
+ (im_protocols & NFC_PROTO_NFC_DEP_MASK))
pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
- if (im_protocols & NFC_PROTO_FELICA_MASK
- || im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ if (im_protocols & NFC_PROTO_FELICA_MASK ||
+ im_protocols & NFC_PROTO_NFC_DEP_MASK) {
pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
}
@@ -1170,16 +1326,20 @@ static void pn533_poll_create_mod_list(struct pn533 *dev,
pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
}
-static int pn533_start_poll_complete(struct pn533 *dev, u8 *params, int params_len)
+static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
{
- struct pn533_poll_response *resp;
- int rc;
+ u8 nbtg, tg, *tgdata;
+ int rc, tgdata_len;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- resp = (struct pn533_poll_response *) params;
- if (resp->nbtg) {
- rc = pn533_target_found(dev, resp, params_len);
+ nbtg = resp->data[0];
+ tg = resp->data[1];
+ tgdata = &resp->data[2];
+ tgdata_len = resp->len - 2; /* nbtg + tg */
+
+ if (nbtg) {
+ rc = pn533_target_found(dev, tg, tgdata, tgdata_len);
/* We must stop the poll after a valid target found */
if (rc == 0) {
@@ -1191,158 +1351,134 @@ static int pn533_start_poll_complete(struct pn533 *dev, u8 *params, int params_l
return -EAGAIN;
}
-static int pn533_init_target_frame(struct pn533_frame *frame,
- u8 *gb, size_t gb_len)
+static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
{
- struct pn533_cmd_init_target *cmd;
- size_t cmd_len;
+ struct sk_buff *skb;
+ u8 *felica, *nfcid3, *gb;
+
+ u8 *gbytes = dev->gb;
+ size_t gbytes_len = dev->gb_len;
+
u8 felica_params[18] = {0x1, 0xfe, /* DEP */
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0xff}; /* System code */
+
u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
0x0, 0x0, 0x0,
0x40}; /* SEL_RES for DEP */
- cmd_len = sizeof(struct pn533_cmd_init_target) + gb_len + 1;
- cmd = kzalloc(cmd_len, GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
+ unsigned int skb_len = 36 + /* mode (1), mifare (6),
+ felica (18), nfcid3 (10), gb_len (1) */
+ gbytes_len +
+ 1; /* len Tk*/
- pn533_tx_frame_init(frame, PN533_CMD_TG_INIT_AS_TARGET);
+ skb = pn533_alloc_skb(dev, skb_len);
+ if (!skb)
+ return NULL;
/* DEP support only */
- cmd->mode |= PN533_INIT_TARGET_DEP;
+ *skb_put(skb, 1) = PN533_INIT_TARGET_DEP;
+
+ /* MIFARE params */
+ memcpy(skb_put(skb, 6), mifare_params, 6);
/* Felica params */
- memcpy(cmd->felica, felica_params, 18);
- get_random_bytes(cmd->felica + 2, 6);
+ felica = skb_put(skb, 18);
+ memcpy(felica, felica_params, 18);
+ get_random_bytes(felica + 2, 6);
/* NFCID3 */
- memset(cmd->nfcid3, 0, 10);
- memcpy(cmd->nfcid3, cmd->felica, 8);
-
- /* MIFARE params */
- memcpy(cmd->mifare, mifare_params, 6);
+ nfcid3 = skb_put(skb, 10);
+ memset(nfcid3, 0, 10);
+ memcpy(nfcid3, felica, 8);
/* General bytes */
- cmd->gb_len = gb_len;
- memcpy(cmd->gb, gb, gb_len);
+ *skb_put(skb, 1) = gbytes_len;
- /* Len Tk */
- cmd->gb[gb_len] = 0;
-
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), cmd, cmd_len);
-
- frame->datalen += cmd_len;
-
- pn533_tx_frame_finish(frame);
+ gb = skb_put(skb, gbytes_len);
+ memcpy(gb, gbytes, gbytes_len);
- kfree(cmd);
+ /* Len Tk */
+ *skb_put(skb, 1) = 0;
- return 0;
+ return skb;
}
-#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
+#define PN533_CMD_DATAEXCH_HEAD_LEN 1
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+ struct sk_buff *resp)
{
- struct sk_buff *skb_resp = arg;
- struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
+ u8 status;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when starting as a target",
- params_len);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
- return params_len;
- }
+ status = resp->data[0];
+ skb_pull(resp, sizeof(status));
- if (params_len > 0 && params[0] != 0) {
+ if (status != 0) {
nfc_tm_deactivated(dev->nfc_dev);
-
dev->tgt_mode = 0;
-
- kfree_skb(skb_resp);
+ dev_kfree_skb(resp);
return 0;
}
- skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
- skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
- skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
-
- return nfc_tm_data_received(dev->nfc_dev, skb_resp);
+ return nfc_tm_data_received(dev->nfc_dev, resp);
}
static void pn533_wq_tg_get_data(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, tg_work);
- struct pn533_frame *in_frame;
- struct sk_buff *skb_resp;
- size_t skb_resp_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ struct sk_buff *skb;
+ int rc;
- skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
- PN533_CMD_DATAEXCH_DATA_MAXLEN +
- PN533_FRAME_TAIL_SIZE;
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
- if (!skb_resp)
+ skb = pn533_alloc_skb(dev, 0);
+ if (!skb)
return;
- in_frame = (struct pn533_frame *)skb_resp->data;
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_GET_DATA, skb,
+ pn533_tm_get_data_complete, NULL);
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_TG_GET_DATA);
- pn533_tx_frame_finish(dev->out_frame);
-
- pn533_send_cmd_frame_async(dev, dev->out_frame, in_frame,
- skb_resp_len,
- pn533_tm_get_data_complete,
- skb_resp, GFP_KERNEL);
+ if (rc < 0)
+ dev_kfree_skb(skb);
return;
}
#define ATR_REQ_GB_OFFSET 17
-static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_len)
+static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
{
- struct pn533_cmd_init_target_response *resp;
- u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
+ u8 mode, *cmd, comm_mode = NFC_COMM_PASSIVE, *gb;
size_t gb_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when starting as a target",
- params_len);
-
- return params_len;
- }
-
- if (params_len < ATR_REQ_GB_OFFSET + 1)
+ if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
- resp = (struct pn533_cmd_init_target_response *) params;
+ mode = resp->data[0];
+ cmd = &resp->data[1];
- nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x param len %d\n",
- resp->mode, params_len);
+ nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+ mode, resp->len);
- frame = resp->mode & PN533_INIT_TARGET_RESP_FRAME_MASK;
- if (frame == PN533_INIT_TARGET_RESP_ACTIVE)
+ if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
+ PN533_INIT_TARGET_RESP_ACTIVE)
comm_mode = NFC_COMM_ACTIVE;
- /* Again, only DEP */
- if ((resp->mode & PN533_INIT_TARGET_RESP_DEP) == 0)
+ if ((mode & PN533_INIT_TARGET_RESP_DEP) == 0) /* Only DEP supported */
return -EOPNOTSUPP;
- gb = resp->cmd + ATR_REQ_GB_OFFSET;
- gb_len = params_len - (ATR_REQ_GB_OFFSET + 1);
+ gb = cmd + ATR_REQ_GB_OFFSET;
+ gb_len = resp->len - (ATR_REQ_GB_OFFSET + 1);
rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
comm_mode, gb, gb_len);
@@ -1353,7 +1489,6 @@ static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_
}
dev->tgt_mode = 1;
-
queue_work(dev->wq, &dev->tg_work);
return 0;
@@ -1361,7 +1496,7 @@ static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_
static void pn533_listen_mode_timer(unsigned long data)
{
- struct pn533 *dev = (struct pn533 *) data;
+ struct pn533 *dev = (struct pn533 *)data;
nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
@@ -1376,88 +1511,104 @@ static void pn533_listen_mode_timer(unsigned long data)
}
static int pn533_poll_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+ struct sk_buff *resp)
{
struct pn533_poll_modulations *cur_mod;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (params_len == -ENOENT) {
- if (dev->poll_mod_count != 0)
- return 0;
-
- nfc_dev_err(&dev->interface->dev,
- "Polling operation has been stopped");
-
- goto stop_poll;
- }
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
- if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when running poll", params_len);
+ nfc_dev_err(&dev->interface->dev, "%s Poll complete error %d",
+ __func__, rc);
- goto stop_poll;
+ if (rc == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return rc;
+ else
+ goto stop_poll;
+ } else if (rc < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when running poll", rc);
+ goto stop_poll;
+ }
}
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- if (cur_mod->len == 0) {
+ if (cur_mod->len == 0) { /* Target mode */
del_timer(&dev->listen_timer);
-
- return pn533_init_target_complete(dev, params, params_len);
- } else {
- rc = pn533_start_poll_complete(dev, params, params_len);
- if (!rc)
- return rc;
+ rc = pn533_init_target_complete(dev, resp);
+ goto done;
}
- pn533_poll_next_mod(dev);
+ /* Initiator mode */
+ rc = pn533_start_poll_complete(dev, resp);
+ if (!rc)
+ goto done;
+ pn533_poll_next_mod(dev);
queue_work(dev->wq, &dev->poll_work);
- return 0;
+done:
+ dev_kfree_skb(resp);
+ return rc;
stop_poll:
+ nfc_dev_err(&dev->interface->dev, "Polling operation has been stopped");
+
pn533_poll_reset_mod_list(dev);
dev->poll_protocols = 0;
- return 0;
+ return rc;
}
-static void pn533_build_poll_frame(struct pn533 *dev,
- struct pn533_frame *frame,
- struct pn533_poll_modulations *mod)
+static struct sk_buff *pn533_alloc_poll_in_frame(struct pn533 *dev,
+ struct pn533_poll_modulations *mod)
{
- nfc_dev_dbg(&dev->interface->dev, "mod len %d\n", mod->len);
+ struct sk_buff *skb;
- if (mod->len == 0) {
- /* Listen mode */
- pn533_init_target_frame(frame, dev->gb, dev->gb_len);
- } else {
- /* Polling mode */
- pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
+ skb = pn533_alloc_skb(dev, mod->len);
+ if (!skb)
+ return NULL;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
- frame->datalen += mod->len;
+ memcpy(skb_put(skb, mod->len), &mod->data, mod->len);
- pn533_tx_frame_finish(frame);
- }
+ return skb;
}
static int pn533_send_poll_frame(struct pn533 *dev)
{
- struct pn533_poll_modulations *cur_mod;
+ struct pn533_poll_modulations *mod;
+ struct sk_buff *skb;
int rc;
+ u8 cmd_code;
- cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+ mod = dev->poll_mod_active[dev->poll_mod_curr];
- pn533_build_poll_frame(dev, dev->out_frame, cur_mod);
+ nfc_dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+ __func__, mod->len);
- rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_poll_complete,
- NULL, GFP_KERNEL);
- if (rc)
+ if (mod->len == 0) { /* Listen mode */
+ cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
+ skb = pn533_alloc_poll_tg_frame(dev);
+ } else { /* Polling mode */
+ cmd_code = PN533_CMD_IN_LIST_PASSIVE_TARGET;
+ skb = pn533_alloc_poll_in_frame(dev, mod);
+ }
+
+ if (!skb) {
+ nfc_dev_err(&dev->interface->dev, "Failed to allocate skb.");
+ return -ENOMEM;
+ }
+
+ rc = pn533_send_cmd_async(dev, cmd_code, skb, pn533_poll_complete,
+ NULL);
+ if (rc < 0) {
+ dev_kfree_skb(skb);
nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+ }
return rc;
}
@@ -1533,8 +1684,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
del_timer(&dev->listen_timer);
if (!dev->poll_mod_count) {
- nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
- " running");
+ nfc_dev_dbg(&dev->interface->dev,
+ "Polling operation was not running");
return;
}
@@ -1549,38 +1700,38 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
static int pn533_activate_target_nfcdep(struct pn533 *dev)
{
- struct pn533_cmd_activate_param param;
- struct pn533_cmd_activate_response *resp;
+ struct pn533_cmd_activate_response *rsp;
u16 gt_len;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_ATR);
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- param.tg = 1;
- param.next = 0;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &param,
- sizeof(struct pn533_cmd_activate_param));
- dev->out_frame->datalen += sizeof(struct pn533_cmd_activate_param);
+ skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
+ if (!skb)
+ return -ENOMEM;
- pn533_tx_frame_finish(dev->out_frame);
+ *skb_put(skb, sizeof(u8)) = 1; /* TG */
+ *skb_put(skb, sizeof(u8)) = 0; /* Next */
- rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen);
- if (rc)
- return rc;
+ resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
- resp = (struct pn533_cmd_activate_response *)
- PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
- rc = resp->status & PN533_CMD_RET_MASK;
- if (rc != PN533_CMD_RET_SUCCESS)
+ rsp = (struct pn533_cmd_activate_response *)resp->data;
+ rc = rsp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ dev_kfree_skb(resp);
return -EIO;
+ }
/* ATR_RES general bytes are located at offset 16 */
- gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
- rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+ gt_len = resp->len - 16;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, gt_len);
+ dev_kfree_skb(resp);
return rc;
}
@@ -1591,38 +1742,38 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
- protocol);
+ protocol);
if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev, "Cannot activate while"
- " polling");
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot activate while polling");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "There is already an active"
- " target");
+ nfc_dev_err(&dev->interface->dev,
+ "There is already an active target");
return -EBUSY;
}
if (!dev->tgt_available_prots) {
- nfc_dev_err(&dev->interface->dev, "There is no available target"
- " to activate");
+ nfc_dev_err(&dev->interface->dev,
+ "There is no available target to activate");
return -EINVAL;
}
if (!(dev->tgt_available_prots & (1 << protocol))) {
- nfc_dev_err(&dev->interface->dev, "The target does not support"
- " the requested protocol %u", protocol);
+ nfc_dev_err(&dev->interface->dev,
+ "Target doesn't support requested proto %u",
+ protocol);
return -EINVAL;
}
if (protocol == NFC_PROTO_NFC_DEP) {
rc = pn533_activate_target_nfcdep(dev);
if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when"
- " activating target with"
- " NFC_DEP protocol", rc);
+ nfc_dev_err(&dev->interface->dev,
+ "Activating target with DEP failed %d", rc);
return rc;
}
}
@@ -1637,8 +1788,10 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct nfc_target *target)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- u8 tg;
- u8 status;
+
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1649,83 +1802,69 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
}
dev->tgt_active_prot = 0;
-
skb_queue_purge(&dev->resp_q);
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE);
-
- tg = 1;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &tg, sizeof(u8));
- dev->out_frame->datalen += sizeof(u8);
+ skb = pn533_alloc_skb(dev, sizeof(u8));
+ if (!skb)
+ return;
- pn533_tx_frame_finish(dev->out_frame);
+ *skb_put(skb, 1) = 1; /* TG*/
- rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen);
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error when sending release"
- " command to the controller");
+ resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_RELEASE, skb);
+ if (IS_ERR(resp))
return;
- }
- status = PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame)[0];
- rc = status & PN533_CMD_RET_MASK;
+ rc = resp->data[0] & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS)
- nfc_dev_err(&dev->interface->dev, "Error 0x%x when releasing"
- " the target", rc);
+ nfc_dev_err(&dev->interface->dev,
+ "Error 0x%x when releasing the target", rc);
+ dev_kfree_skb(resp);
return;
}
static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+ struct sk_buff *resp)
{
- struct pn533_cmd_jump_dep_response *resp;
- struct nfc_target nfc_target;
+ struct pn533_cmd_jump_dep_response *rsp;
u8 target_gt_len;
int rc;
- struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
- u8 active = cmd->active;
+ u8 active = *(u8 *)arg;
kfree(arg);
- if (params_len == -ENOENT) {
- nfc_dev_dbg(&dev->interface->dev, "");
- return 0;
- }
-
- if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when bringing DEP link up",
- params_len);
- return 0;
- }
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
if (dev->tgt_available_prots &&
!(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
nfc_dev_err(&dev->interface->dev,
- "The target does not support DEP");
- return -EINVAL;
+ "The target does not support DEP");
+ rc = -EINVAL;
+ goto error;
}
- resp = (struct pn533_cmd_jump_dep_response *) params;
- rc = resp->status & PN533_CMD_RET_MASK;
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+ rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
nfc_dev_err(&dev->interface->dev,
- "Bringing DEP link up failed %d", rc);
- return 0;
+ "Bringing DEP link up failed %d", rc);
+ goto error;
}
if (!dev->tgt_available_prots) {
+ struct nfc_target nfc_target;
+
nfc_dev_dbg(&dev->interface->dev, "Creating new target");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
- memcpy(nfc_target.nfcid1, resp->nfcid3t, nfc_target.nfcid1_len);
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
if (rc)
- return 0;
+ goto error;
dev->tgt_available_prots = 0;
}
@@ -1733,15 +1872,17 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
/* ATR_RES general bytes are located at offset 17 */
- target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+ target_gt_len = resp->len - 17;
rc = nfc_set_remote_general_bytes(dev->nfc_dev,
- resp->gt, target_gt_len);
+ rsp->gt, target_gt_len);
if (rc == 0)
rc = nfc_dep_link_is_up(dev->nfc_dev,
- dev->nfc_dev->targets[0].idx,
- !active, NFC_RF_INITIATOR);
+ dev->nfc_dev->targets[0].idx,
+ !active, NFC_RF_INITIATOR);
- return 0;
+error:
+ dev_kfree_skb(resp);
+ return rc;
}
static int pn533_mod_to_baud(struct pn533 *dev)
@@ -1760,25 +1901,26 @@ static int pn533_mod_to_baud(struct pn533 *dev)
#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
- u8 comm_mode, u8* gb, size_t gb_len)
+ u8 comm_mode, u8 *gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- struct pn533_cmd_jump_dep *cmd;
- u8 cmd_len, *data_ptr;
+ struct sk_buff *skb;
+ int rc, baud, skb_len;
+ u8 *next, *arg;
+
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- int rc, baud;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
if (dev->poll_mod_count) {
nfc_dev_err(&dev->interface->dev,
- "Cannot bring the DEP link up while polling");
+ "Cannot bring the DEP link up while polling");
return -EBUSY;
}
if (dev->tgt_active_prot) {
nfc_dev_err(&dev->interface->dev,
- "There is already an active target");
+ "There is already an active target");
return -EBUSY;
}
@@ -1789,43 +1931,48 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
return baud;
}
- cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
+ skb_len = 3 + gb_len; /* ActPass + BR + Next */
if (comm_mode == NFC_COMM_PASSIVE)
- cmd_len += PASSIVE_DATA_LEN;
+ skb_len += PASSIVE_DATA_LEN;
- cmd = kzalloc(cmd_len, GFP_KERNEL);
- if (cmd == NULL)
+ skb = pn533_alloc_skb(dev, skb_len);
+ if (!skb)
return -ENOMEM;
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+ *skb_put(skb, 1) = !comm_mode; /* ActPass */
+ *skb_put(skb, 1) = baud; /* Baud rate */
- cmd->active = !comm_mode;
- cmd->next = 0;
- cmd->baud = baud;
- data_ptr = cmd->data;
- if (comm_mode == NFC_COMM_PASSIVE && cmd->baud > 0) {
- memcpy(data_ptr, passive_data, PASSIVE_DATA_LEN);
- cmd->next |= 1;
- data_ptr += PASSIVE_DATA_LEN;
+ next = skb_put(skb, 1); /* Next */
+ *next = 0;
+
+ if (comm_mode == NFC_COMM_PASSIVE && baud > 0) {
+ memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data,
+ PASSIVE_DATA_LEN);
+ *next |= 1;
}
if (gb != NULL && gb_len > 0) {
- cmd->next |= 4; /* We have some Gi */
- memcpy(data_ptr, gb, gb_len);
+ memcpy(skb_put(skb, gb_len), gb, gb_len);
+ *next |= 4; /* We have some Gi */
} else {
- cmd->next = 0;
+ *next = 0;
}
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
- dev->out_frame->datalen += cmd_len;
+ arg = kmalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
- pn533_tx_frame_finish(dev->out_frame);
+ *arg = !comm_mode;
- rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_in_dep_link_up_complete,
- cmd, GFP_KERNEL);
- if (rc < 0)
- kfree(cmd);
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+ pn533_in_dep_link_up_complete, arg);
+
+ if (rc < 0) {
+ dev_kfree_skb(skb);
+ kfree(arg);
+ }
return rc;
}
@@ -1834,6 +1981,8 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
pn533_poll_reset_mod_list(dev);
if (dev->tgt_mode || dev->tgt_active_prot) {
@@ -1849,68 +1998,7 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
return 0;
}
-static int pn533_build_tx_frame(struct pn533 *dev, struct sk_buff *skb,
- bool target)
-{
- int payload_len = skb->len;
- struct pn533_frame *out_frame;
- u8 tg;
-
- nfc_dev_dbg(&dev->interface->dev, "%s - Sending %d bytes", __func__,
- payload_len);
-
- if (payload_len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
- /* TODO: Implement support to multi-part data exchange */
- nfc_dev_err(&dev->interface->dev, "Data length greater than the"
- " max allowed: %d",
- PN533_CMD_DATAEXCH_DATA_MAXLEN);
- return -ENOSYS;
- }
-
- if (target == true) {
- switch (dev->device_type) {
- case PN533_DEVICE_PASORI:
- if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
- skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
- out_frame = (struct pn533_frame *) skb->data;
- pn533_tx_frame_init(out_frame,
- PN533_CMD_IN_COMM_THRU);
-
- break;
- }
-
- default:
- skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
- out_frame = (struct pn533_frame *) skb->data;
- pn533_tx_frame_init(out_frame,
- PN533_CMD_IN_DATA_EXCHANGE);
- tg = 1;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame),
- &tg, sizeof(u8));
- out_frame->datalen += sizeof(u8);
-
- break;
- }
-
- } else {
- skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
- out_frame = (struct pn533_frame *) skb->data;
- pn533_tx_frame_init(out_frame, PN533_CMD_TG_SET_DATA);
- }
-
-
- /* The data is already in the out_frame, just update the datalen */
- out_frame->datalen += payload_len;
-
- pn533_tx_frame_finish(out_frame);
- skb_put(skb, PN533_FRAME_TAIL_SIZE);
-
- return 0;
-}
-
struct pn533_data_exchange_arg {
- struct sk_buff *skb_resp;
- struct sk_buff *skb_out;
data_exchange_cb_t cb;
void *cb_context;
};
@@ -1920,7 +2008,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -1954,46 +2042,44 @@ out:
}
static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
- u8 *params, int params_len)
+ struct sk_buff *resp)
{
struct pn533_data_exchange_arg *arg = _arg;
- struct sk_buff *skb = NULL, *skb_resp = arg->skb_resp;
- struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
- int err = 0;
- u8 status;
- u8 cmd_ret;
+ struct sk_buff *skb;
+ int rc = 0;
+ u8 status, ret, mi;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- dev_kfree_skb(arg->skb_out);
-
- if (params_len < 0) { /* error */
- err = params_len;
- goto error;
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ goto _error;
}
- status = params[0];
+ status = resp->data[0];
+ ret = status & PN533_CMD_RET_MASK;
+ mi = status & PN533_CMD_MI_MASK;
+
+ skb_pull(resp, sizeof(status));
- cmd_ret = status & PN533_CMD_RET_MASK;
- if (cmd_ret != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev, "PN533 reported error %d when"
- " exchanging data", cmd_ret);
- err = -EIO;
+ if (ret != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev,
+ "PN533 reported error %d when exchanging data",
+ ret);
+ rc = -EIO;
goto error;
}
- skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
- skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
- skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
- skb_queue_tail(&dev->resp_q, skb_resp);
+ skb_queue_tail(&dev->resp_q, resp);
- if (status & PN533_CMD_MI_MASK) {
+ if (mi) {
+ dev->cmd_complete_mi_arg = arg;
queue_work(dev->wq, &dev->mi_work);
return -EINPROGRESS;
}
skb = pn533_build_response(dev);
- if (skb == NULL)
+ if (!skb)
goto error;
arg->cb(arg->cb_context, skb, 0);
@@ -2001,11 +2087,12 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
return 0;
error:
+ dev_kfree_skb(resp);
+_error:
skb_queue_purge(&dev->resp_q);
- dev_kfree_skb(skb_resp);
- arg->cb(arg->cb_context, NULL, err);
+ arg->cb(arg->cb_context, NULL, rc);
kfree(arg);
- return 0;
+ return rc;
}
static int pn533_transceive(struct nfc_dev *nfc_dev,
@@ -2013,87 +2100,82 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
data_exchange_cb_t cb, void *cb_context)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- struct pn533_frame *out_frame, *in_frame;
- struct pn533_data_exchange_arg *arg;
- struct sk_buff *skb_resp;
- int skb_resp_len;
+ struct pn533_data_exchange_arg *arg = NULL;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (!dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "Cannot exchange data if"
- " there is no active target");
- rc = -EINVAL;
+ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+ /* TODO: Implement support to multi-part data exchange */
+ nfc_dev_err(&dev->interface->dev,
+ "Data length greater than the max allowed: %d",
+ PN533_CMD_DATAEXCH_DATA_MAXLEN);
+ rc = -ENOSYS;
goto error;
}
- rc = pn533_build_tx_frame(dev, skb, true);
- if (rc)
- goto error;
-
- skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
- PN533_CMD_DATAEXCH_DATA_MAXLEN +
- PN533_FRAME_TAIL_SIZE;
-
- skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
- if (!skb_resp) {
- rc = -ENOMEM;
+ if (!dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "Can't exchange data if there is no active target");
+ rc = -EINVAL;
goto error;
}
- in_frame = (struct pn533_frame *) skb_resp->data;
- out_frame = (struct pn533_frame *) skb->data;
-
- arg = kmalloc(sizeof(struct pn533_data_exchange_arg), GFP_KERNEL);
+ arg = kmalloc(sizeof(*arg), GFP_KERNEL);
if (!arg) {
rc = -ENOMEM;
- goto free_skb_resp;
+ goto error;
}
- arg->skb_resp = skb_resp;
- arg->skb_out = skb;
arg->cb = cb;
arg->cb_context = cb_context;
- rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, skb_resp_len,
- pn533_data_exchange_complete, arg,
- GFP_KERNEL);
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
- " perform data_exchange", rc);
- goto free_arg;
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+ rc = pn533_send_data_async(dev, PN533_CMD_IN_COMM_THRU,
+ skb,
+ pn533_data_exchange_complete,
+ arg);
+
+ break;
+ }
+ default:
+ *skb_push(skb, sizeof(u8)) = 1; /*TG*/
+
+ rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
+ skb, pn533_data_exchange_complete,
+ arg);
+
+ break;
}
+ if (rc < 0) /* rc from send_async */
+ goto error;
+
return 0;
-free_arg:
- kfree(arg);
-free_skb_resp:
- kfree_skb(skb_resp);
error:
- kfree_skb(skb);
+ kfree(arg);
+ dev_kfree_skb(skb);
return rc;
}
static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+ struct sk_buff *resp)
{
- struct sk_buff *skb_out = arg;
+ u8 status;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- dev_kfree_skb(skb_out);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
- if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when sending data",
- params_len);
+ status = resp->data[0];
- return params_len;
- }
+ dev_kfree_skb(resp);
- if (params_len > 0 && params[0] != 0) {
+ if (status != 0) {
nfc_tm_deactivated(dev->nfc_dev);
dev->tgt_mode = 0;
@@ -2109,30 +2191,21 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- struct pn533_frame *out_frame;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- rc = pn533_build_tx_frame(dev, skb, false);
- if (rc)
- goto error;
-
- out_frame = (struct pn533_frame *) skb->data;
-
- rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
- dev->in_maxlen, pn533_tm_send_complete,
- skb, GFP_KERNEL);
- if (rc) {
+ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
nfc_dev_err(&dev->interface->dev,
- "Error %d when trying to send data", rc);
- goto error;
+ "Data length greater than the max allowed: %d",
+ PN533_CMD_DATAEXCH_DATA_MAXLEN);
+ return -ENOSYS;
}
- return 0;
-
-error:
- kfree_skb(skb);
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+ pn533_tm_send_complete, NULL);
+ if (rc < 0)
+ dev_kfree_skb(skb);
return rc;
}
@@ -2140,107 +2213,123 @@ error:
static void pn533_wq_mi_recv(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, mi_work);
- struct sk_buff *skb_cmd;
- struct pn533_data_exchange_arg *arg = dev->cmd_complete_arg;
- struct pn533_frame *out_frame, *in_frame;
- struct sk_buff *skb_resp;
- int skb_resp_len;
+
+ struct sk_buff *skb;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- /* This is a zero payload size skb */
- skb_cmd = alloc_skb(PN533_CMD_DATAEXCH_HEAD_LEN + PN533_FRAME_TAIL_SIZE,
- GFP_KERNEL);
- if (skb_cmd == NULL)
- goto error_cmd;
-
- skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
+ skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
+ if (!skb)
+ goto error;
- rc = pn533_build_tx_frame(dev, skb_cmd, true);
- if (rc)
- goto error_frame;
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_IN_COMM_THRU,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_mi_arg);
- skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
- PN533_CMD_DATAEXCH_DATA_MAXLEN +
- PN533_FRAME_TAIL_SIZE;
- skb_resp = alloc_skb(skb_resp_len, GFP_KERNEL);
- if (!skb_resp) {
- rc = -ENOMEM;
- goto error_frame;
- }
+ break;
+ }
+ default:
+ *skb_put(skb, sizeof(u8)) = 1; /*TG*/
- in_frame = (struct pn533_frame *) skb_resp->data;
- out_frame = (struct pn533_frame *) skb_cmd->data;
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_IN_DATA_EXCHANGE,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_mi_arg);
- arg->skb_resp = skb_resp;
- arg->skb_out = skb_cmd;
+ break;
+ }
- rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
- skb_resp_len,
- pn533_data_exchange_complete,
- dev->cmd_complete_arg, GFP_KERNEL);
- if (!rc)
+ if (rc == 0) /* success */
return;
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
- " perform data_exchange", rc);
-
- kfree_skb(skb_resp);
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange", rc);
-error_frame:
- kfree_skb(skb_cmd);
+ dev_kfree_skb(skb);
+ kfree(dev->cmd_complete_arg);
-error_cmd:
+error:
pn533_send_ack(dev, GFP_KERNEL);
-
- kfree(arg);
-
queue_work(dev->wq, &dev->cmd_work);
}
static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
u8 cfgdata_len)
{
- int rc;
- u8 *params;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+
+ int skb_len;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_RF_CONFIGURATION);
+ skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
- params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
- params[0] = cfgitem;
- memcpy(&params[1], cfgdata, cfgdata_len);
- dev->out_frame->datalen += (1 + cfgdata_len);
+ skb = pn533_alloc_skb(dev, skb_len);
+ if (!skb)
+ return -ENOMEM;
- pn533_tx_frame_finish(dev->out_frame);
+ *skb_put(skb, sizeof(cfgitem)) = cfgitem;
+ memcpy(skb_put(skb, cfgdata_len), cfgdata, cfgdata_len);
- rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen);
+ resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
- return rc;
+ dev_kfree_skb(resp);
+ return 0;
+}
+
+static int pn533_get_firmware_version(struct pn533 *dev,
+ struct pn533_fw_version *fv)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+
+ skb = pn533_alloc_skb(dev, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ resp = pn533_send_cmd_sync(dev, PN533_CMD_GET_FIRMWARE_VERSION, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ fv->ic = resp->data[0];
+ fv->ver = resp->data[1];
+ fv->rev = resp->data[2];
+ fv->support = resp->data[3];
+
+ dev_kfree_skb(resp);
+ return 0;
}
static int pn533_fw_reset(struct pn533 *dev)
{
- int rc;
- u8 *params;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- pn533_tx_frame_init(dev->out_frame, 0x18);
+ skb = pn533_alloc_skb(dev, sizeof(u8));
+ if (!skb)
+ return -ENOMEM;
- params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
- params[0] = 0x1;
- dev->out_frame->datalen += 1;
+ *skb_put(skb, sizeof(u8)) = 0x1;
- pn533_tx_frame_finish(dev->out_frame);
+ resp = pn533_send_cmd_sync(dev, 0x18, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
- rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen);
+ dev_kfree_skb(resp);
- return rc;
+ return 0;
}
static struct nfc_ops pn533_nfc_ops = {
@@ -2337,7 +2426,7 @@ static int pn533_setup(struct pn533 *dev)
static int pn533_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct pn533_fw_version *fw_ver;
+ struct pn533_fw_version fw_ver;
struct pn533 *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
@@ -2359,41 +2448,32 @@ static int pn533_probe(struct usb_interface *interface,
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
- if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
- dev->in_maxlen = le16_to_cpu(endpoint->wMaxPacketSize);
+ if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
in_endpoint = endpoint->bEndpointAddress;
- }
- if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) {
- dev->out_maxlen =
- le16_to_cpu(endpoint->wMaxPacketSize);
+ if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
out_endpoint = endpoint->bEndpointAddress;
- }
}
if (!in_endpoint || !out_endpoint) {
- nfc_dev_err(&interface->dev, "Could not find bulk-in or"
- " bulk-out endpoint");
+ nfc_dev_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint");
rc = -ENODEV;
goto error;
}
- dev->in_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
- dev->out_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->in_frame || !dev->out_frame ||
- !dev->in_urb || !dev->out_urb)
+ if (!dev->in_urb || !dev->out_urb)
goto error;
usb_fill_bulk_urb(dev->in_urb, dev->udev,
- usb_rcvbulkpipe(dev->udev, in_endpoint),
- NULL, 0, NULL, dev);
+ usb_rcvbulkpipe(dev->udev, in_endpoint),
+ NULL, 0, NULL, dev);
usb_fill_bulk_urb(dev->out_urb, dev->udev,
- usb_sndbulkpipe(dev->udev, out_endpoint),
- NULL, 0,
- pn533_send_complete, dev);
+ usb_sndbulkpipe(dev->udev, out_endpoint),
+ NULL, 0, pn533_send_complete, dev);
INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
@@ -2414,18 +2494,7 @@ static int pn533_probe(struct usb_interface *interface,
usb_set_intfdata(interface, dev);
- pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
- pn533_tx_frame_finish(dev->out_frame);
-
- rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen);
- if (rc)
- goto destroy_wq;
-
- fw_ver = (struct pn533_fw_version *)
- PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
- nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now"
- " attached", fw_ver->ver, fw_ver->rev);
+ dev->ops = &pn533_std_frame_ops;
dev->device_type = id->driver_info;
switch (dev->device_type) {
@@ -2444,9 +2513,21 @@ static int pn533_probe(struct usb_interface *interface,
goto destroy_wq;
}
+ memset(&fw_ver, 0, sizeof(fw_ver));
+ rc = pn533_get_firmware_version(dev, &fw_ver);
+ if (rc < 0)
+ goto destroy_wq;
+
+ nfc_dev_info(&dev->interface->dev,
+ "NXP PN533 firmware ver %d.%d now attached",
+ fw_ver.ver, fw_ver.rev);
+
+
dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ NFC_SE_NONE,
+ dev->ops->tx_header_len +
PN533_CMD_DATAEXCH_HEAD_LEN,
- PN533_FRAME_TAIL_SIZE);
+ dev->ops->tx_tail_len);
if (!dev->nfc_dev)
goto destroy_wq;
@@ -2472,9 +2553,7 @@ free_nfc_dev:
destroy_wq:
destroy_workqueue(dev->wq);
error:
- kfree(dev->in_frame);
usb_free_urb(dev->in_urb);
- kfree(dev->out_frame);
usb_free_urb(dev->out_urb);
kfree(dev);
return rc;
@@ -2505,9 +2584,7 @@ static void pn533_disconnect(struct usb_interface *interface)
kfree(cmd);
}
- kfree(dev->in_frame);
usb_free_urb(dev->in_urb);
- kfree(dev->out_frame);
usb_free_urb(dev->out_urb);
kfree(dev);
diff --git a/drivers/nfc/pn544/Kconfig b/drivers/nfc/pn544/Kconfig
new file mode 100644
index 000000000000..c277790ac71c
--- /dev/null
+++ b/drivers/nfc/pn544/Kconfig
@@ -0,0 +1,23 @@
+config NFC_PN544
+ tristate "NXP PN544 NFC driver"
+ depends on NFC_HCI
+ select CRC_CCITT
+ default n
+ ---help---
+ NXP PN544 core driver.
+ This is a driver based on the HCI NFC kernel layers and
+ will thus not work with NXP libnfc library.
+
+ To compile this driver as a module, choose m here. The module will
+ be called pn544.
+ Say N if unsure.
+
+config NFC_PN544_I2C
+ tristate "NFC PN544 i2c support"
+ depends on NFC_PN544 && I2C && NFC_SHDLC
+ ---help---
+ This module adds support for the NXP pn544 i2c interface.
+ Select this if your platform is using the i2c bus.
+
+ If you choose to build a module, it'll be called pn544_i2c.
+ Say N if unsure. \ No newline at end of file
diff --git a/drivers/nfc/pn544/Makefile b/drivers/nfc/pn544/Makefile
index 725733881eb3..ac076793687d 100644
--- a/drivers/nfc/pn544/Makefile
+++ b/drivers/nfc/pn544/Makefile
@@ -2,6 +2,7 @@
# Makefile for PN544 HCI based NFC driver
#
-obj-$(CONFIG_PN544_HCI_NFC) += pn544_i2c.o
+pn544_i2c-objs = i2c.o
-pn544_i2c-y := pn544.o i2c.o
+obj-$(CONFIG_NFC_PN544) += pn544.o
+obj-$(CONFIG_NFC_PN544_I2C) += pn544_i2c.o
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 7da9071b68b6..8cf64c19f022 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -361,8 +361,8 @@ static struct nfc_phy_ops i2c_phy_ops = {
.disable = pn544_hci_i2c_disable,
};
-static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn544_hci_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct pn544_i2c_phy *phy;
struct pn544_nfc_platform_data *pdata;
@@ -376,12 +376,12 @@ static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
- phy = kzalloc(sizeof(struct pn544_i2c_phy), GFP_KERNEL);
+ phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
+ GFP_KERNEL);
if (!phy) {
dev_err(&client->dev,
"Cannot allocate memory for pn544 i2c phy.\n");
- r = -ENOMEM;
- goto err_phy_alloc;
+ return -ENOMEM;
}
phy->i2c_dev = client;
@@ -390,20 +390,18 @@ static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
if (pdata == NULL) {
dev_err(&client->dev, "No platform data\n");
- r = -EINVAL;
- goto err_pdata;
+ return -EINVAL;
}
if (pdata->request_resources == NULL) {
dev_err(&client->dev, "request_resources() missing\n");
- r = -EINVAL;
- goto err_pdata;
+ return -EINVAL;
}
r = pdata->request_resources(client);
if (r) {
dev_err(&client->dev, "Cannot get platform resources\n");
- goto err_pdata;
+ return r;
}
phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
@@ -435,14 +433,10 @@ err_rti:
if (pdata->free_resources != NULL)
pdata->free_resources();
-err_pdata:
- kfree(phy);
-
-err_phy_alloc:
return r;
}
-static __devexit int pn544_hci_i2c_remove(struct i2c_client *client)
+static int pn544_hci_i2c_remove(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
@@ -458,8 +452,6 @@ static __devexit int pn544_hci_i2c_remove(struct i2c_client *client)
if (pdata->free_resources)
pdata->free_resources();
- kfree(phy);
-
return 0;
}
@@ -469,32 +461,10 @@ static struct i2c_driver pn544_hci_i2c_driver = {
},
.probe = pn544_hci_i2c_probe,
.id_table = pn544_hci_i2c_id_table,
- .remove = __devexit_p(pn544_hci_i2c_remove),
+ .remove = pn544_hci_i2c_remove,
};
-static int __init pn544_hci_i2c_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = i2c_add_driver(&pn544_hci_i2c_driver);
- if (r) {
- pr_err(PN544_HCI_I2C_DRIVER_NAME
- ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void __exit pn544_hci_i2c_exit(void)
-{
- i2c_del_driver(&pn544_hci_i2c_driver);
-}
-
-module_init(pn544_hci_i2c_init);
-module_exit(pn544_hci_i2c_exit);
+module_i2c_driver(pn544_hci_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index cc666de3b8e5..9c5f16e7baef 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/nfc.h>
#include <net/nfc/hci.h>
@@ -675,11 +676,17 @@ static int pn544_hci_im_transceive(struct nfc_hci_dev *hdev,
static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
{
+ int r;
+
/* Set default false for multiple information chaining */
*skb_push(skb, 1) = 0;
- return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
- PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
+ r = nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+ PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
+
+ kfree_skb(skb);
+
+ return r;
}
static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
@@ -714,35 +721,40 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
return 0;
}
-static void pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
- u8 event, struct sk_buff *skb)
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ * 1: driver does not handle the event, please do standard processing
+ */
+static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ struct sk_buff *skb)
{
struct sk_buff *rgb_skb = NULL;
- int r = 0;
+ int r;
pr_debug("hci event %d", event);
switch (event) {
case PN544_HCI_EVT_ACTIVATED:
- if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
- nfc_hci_target_discovered(hdev, gate);
- else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) {
+ if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) {
+ r = nfc_hci_target_discovered(hdev, gate);
+ } else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) {
r = nfc_hci_get_param(hdev, gate, PN544_DEP_ATR_REQ,
- &rgb_skb);
-
+ &rgb_skb);
if (r < 0)
goto exit;
- nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
- NFC_COMM_PASSIVE, rgb_skb->data,
- rgb_skb->len);
+ r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE, rgb_skb->data,
+ rgb_skb->len);
kfree_skb(rgb_skb);
+ } else {
+ r = -EINVAL;
}
-
break;
case PN544_HCI_EVT_DEACTIVATED:
- nfc_hci_send_event(hdev, gate,
- NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ r = nfc_hci_send_event(hdev, gate, NFC_HCI_EVT_END_OPERATION,
+ NULL, 0);
break;
case PN544_HCI_EVT_RCV_DATA:
if (skb->len < 2) {
@@ -757,15 +769,15 @@ static void pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
}
skb_pull(skb, 2);
- nfc_tm_data_received(hdev->ndev, skb);
-
- return;
+ return nfc_tm_data_received(hdev->ndev, skb);
default:
- break;
+ return 1;
}
exit:
kfree_skb(skb);
+
+ return r;
}
static struct nfc_hci_ops pn544_hci_ops = {
@@ -789,7 +801,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
- u32 protocols;
+ u32 protocols, se;
struct nfc_hci_init_data init_data;
int r;
@@ -822,8 +834,10 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
NFC_PROTO_ISO14443_B_MASK |
NFC_PROTO_NFC_DEP_MASK;
- info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
- protocols, llc_name,
+ se = NFC_SE_UICC | NFC_SE_EMBEDDED;
+
+ info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0,
+ protocols, se, llc_name,
phy_headroom + PN544_CMDS_HEADROOM,
phy_tailroom, phy_payload);
if (!info->hdev) {
@@ -851,6 +865,7 @@ err_alloc_hdev:
err_info_alloc:
return r;
}
+EXPORT_SYMBOL(pn544_hci_probe);
void pn544_hci_remove(struct nfc_hci_dev *hdev)
{
@@ -860,3 +875,7 @@ void pn544_hci_remove(struct nfc_hci_dev *hdev)
nfc_hci_free_device(hdev);
kfree(info);
}
+EXPORT_SYMBOL(pn544_hci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
new file mode 100644
index 000000000000..37ee6495acc1
--- /dev/null
+++ b/drivers/ntb/Kconfig
@@ -0,0 +1,13 @@
+config NTB
+ tristate "Intel Non-Transparent Bridge support"
+ depends on PCI
+ depends on X86_64
+ help
+ The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+ connecting 2 systems. When configured, writes to the device's PCI
+ mapped memory will be mirrored to a buffer on the remote system. The
+ ntb Linux driver uses this point-to-point communication as a method to
+ transfer data from one system to the other.
+
+ If unsure, say N.
+
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
new file mode 100644
index 000000000000..15cb59fd354e
--- /dev/null
+++ b/drivers/ntb/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB) += ntb.o
+
+ntb-objs := ntb_hw.o ntb_transport.o
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
new file mode 100644
index 000000000000..f802e7c92356
--- /dev/null
+++ b/drivers/ntb/ntb_hw.c
@@ -0,0 +1,1141 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "ntb_hw.h"
+#include "ntb_regs.h"
+
+#define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER "0.25"
+
+MODULE_DESCRIPTION(NTB_NAME);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+enum {
+ NTB_CONN_CLASSIC = 0,
+ NTB_CONN_B2B,
+ NTB_CONN_RP,
+};
+
+enum {
+ NTB_DEV_USD = 0,
+ NTB_DEV_DSD,
+};
+
+enum {
+ SNB_HW = 0,
+ BWD_HW,
+};
+
+/* Translate memory window 0,1 to BAR 2,4 */
+#define MW_TO_BAR(mw) (mw * 2 + 2)
+
+static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
+
+/**
+ * ntb_register_event_callback() - register event callback
+ * @ndev: pointer to ntb_device instance
+ * @func: callback function to register
+ *
+ * This function registers a callback for any HW driver events such as link
+ * up/down, power management notices and etc.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_event_callback(struct ntb_device *ndev,
+ void (*func)(void *handle, enum ntb_hw_event event))
+{
+ if (ndev->event_cb)
+ return -EINVAL;
+
+ ndev->event_cb = func;
+
+ return 0;
+}
+
+/**
+ * ntb_unregister_event_callback() - unregisters the event callback
+ * @ndev: pointer to ntb_device instance
+ *
+ * This function unregisters the existing callback from transport
+ */
+void ntb_unregister_event_callback(struct ntb_device *ndev)
+{
+ ndev->event_cb = NULL;
+}
+
+/**
+ * ntb_register_db_callback() - register a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ * @func: callback function to register
+ *
+ * This function registers a callback function for the doorbell interrupt
+ * on the primary side. The function will unmask the doorbell as well to
+ * allow interrupt.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+ void *data, void (*func)(void *data, int db_num))
+{
+ unsigned long mask;
+
+ if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
+ dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
+ return -EINVAL;
+ }
+
+ ndev->db_cb[idx].callback = func;
+ ndev->db_cb[idx].data = data;
+
+ /* unmask interrupt */
+ mask = readw(ndev->reg_ofs.pdb_mask);
+ clear_bit(idx * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.pdb_mask);
+
+ return 0;
+}
+
+/**
+ * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ *
+ * This function unregisters a callback function for the doorbell interrupt
+ * on the primary side. The function will also mask the said doorbell.
+ */
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
+{
+ unsigned long mask;
+
+ if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
+ return;
+
+ mask = readw(ndev->reg_ofs.pdb_mask);
+ set_bit(idx * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.pdb_mask);
+
+ ndev->db_cb[idx].callback = NULL;
+}
+
+/**
+ * ntb_find_transport() - find the transport pointer
+ * @transport: pointer to pci device
+ *
+ * Given the pci device pointer, return the transport pointer passed in when
+ * the transport attached when it was inited.
+ *
+ * RETURNS: pointer to transport.
+ */
+void *ntb_find_transport(struct pci_dev *pdev)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+ return ndev->ntb_transport;
+}
+
+/**
+ * ntb_register_transport() - Register NTB transport with NTB HW driver
+ * @transport: transport identifier
+ *
+ * This function allows a transport to reserve the hardware driver for
+ * NTB usage.
+ *
+ * RETURNS: pointer to ntb_device, NULL on error.
+ */
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+
+ if (ndev->ntb_transport)
+ return NULL;
+
+ ndev->ntb_transport = transport;
+ return ndev;
+}
+
+/**
+ * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
+ * @ndev - ntb_device of the transport to be freed
+ *
+ * This function unregisters the transport from the HW driver and performs any
+ * necessary cleanups.
+ */
+void ntb_unregister_transport(struct ntb_device *ndev)
+{
+ int i;
+
+ if (!ndev->ntb_transport)
+ return;
+
+ for (i = 0; i < ndev->max_cbs; i++)
+ ntb_unregister_db_callback(ndev, i);
+
+ ntb_unregister_event_callback(ndev);
+ ndev->ntb_transport = NULL;
+}
+
+/**
+ * ntb_write_local_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. This writes over the data mirrored to the local scratchpad register
+ * by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
+ val, idx);
+ writel(val, ndev->reg_ofs.spad_read + idx * 4);
+
+ return 0;
+}
+
+/**
+ * ntb_read_local_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side. This allows the local system to read data
+ * written and mirrored to the scratchpad register by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ *val = readl(ndev->reg_ofs.spad_write + idx * 4);
+ dev_dbg(&ndev->pdev->dev,
+ "Reading %x from local scratch pad index %d\n", *val, idx);
+
+ return 0;
+}
+
+/**
+ * ntb_write_remote_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side. This allows
+ * the local system to write data to be mirrored to the remote systems
+ * scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
+ val, idx);
+ writel(val, ndev->reg_ofs.spad_write + idx * 4);
+
+ return 0;
+}
+
+/**
+ * ntb_read_remote_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side. This alloows the local system to read the data
+ * it wrote to be mirrored on the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ *val = readl(ndev->reg_ofs.spad_read + idx * 4);
+ dev_dbg(&ndev->pdev->dev,
+ "Reading %x from remote scratch pad index %d\n", *val, idx);
+
+ return 0;
+}
+
+/**
+ * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the base virtual address of the memory window
+ * specified.
+ *
+ * RETURNS: pointer to virtual address, or NULL on error.
+ */
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
+{
+ if (mw > NTB_NUM_MW)
+ return NULL;
+
+ return ndev->mw[mw].vbase;
+}
+
+/**
+ * ntb_get_mw_size() - return size of NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the physical size of the memory window specified
+ *
+ * RETURNS: the size of the memory window or zero on error
+ */
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
+{
+ if (mw > NTB_NUM_MW)
+ return 0;
+
+ return ndev->mw[mw].bar_sz;
+}
+
+/**
+ * ntb_set_mw_addr - set the memory window address
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ * @addr: base address for data
+ *
+ * This function sets the base physical address of the memory window. This
+ * memory address is where data from the remote system will be transfered into
+ * or out of depending on how the transport is configured.
+ */
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
+{
+ if (mw > NTB_NUM_MW)
+ return;
+
+ dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
+ MW_TO_BAR(mw));
+
+ ndev->mw[mw].phys_addr = addr;
+
+ switch (MW_TO_BAR(mw)) {
+ case NTB_BAR_23:
+ writeq(addr, ndev->reg_ofs.sbar2_xlat);
+ break;
+ case NTB_BAR_45:
+ writeq(addr, ndev->reg_ofs.sbar4_xlat);
+ break;
+ }
+}
+
+/**
+ * ntb_ring_sdb() - Set the doorbell on the secondary/external side
+ * @ndev: pointer to ntb_device instance
+ * @db: doorbell to ring
+ *
+ * This function allows triggering of a doorbell on the secondary/external
+ * side that will initiate an interrupt on the remote host
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
+{
+ dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
+
+ if (ndev->hw_type == BWD_HW)
+ writeq((u64) 1 << db, ndev->reg_ofs.sdb);
+ else
+ writew(((1 << ndev->bits_per_vector) - 1) <<
+ (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
+}
+
+static void ntb_link_event(struct ntb_device *ndev, int link_state)
+{
+ unsigned int event;
+
+ if (ndev->link_status == link_state)
+ return;
+
+ if (link_state == NTB_LINK_UP) {
+ u16 status;
+
+ dev_info(&ndev->pdev->dev, "Link Up\n");
+ ndev->link_status = NTB_LINK_UP;
+ event = NTB_EVENT_HW_LINK_UP;
+
+ if (ndev->hw_type == BWD_HW)
+ status = readw(ndev->reg_ofs.lnk_stat);
+ else {
+ int rc = pci_read_config_word(ndev->pdev,
+ SNB_LINK_STATUS_OFFSET,
+ &status);
+ if (rc)
+ return;
+ }
+ dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
+ (status & NTB_LINK_WIDTH_MASK) >> 4,
+ (status & NTB_LINK_SPEED_MASK));
+ } else {
+ dev_info(&ndev->pdev->dev, "Link Down\n");
+ ndev->link_status = NTB_LINK_DOWN;
+ event = NTB_EVENT_HW_LINK_DOWN;
+ }
+
+ /* notify the upper layer if we have an event change */
+ if (ndev->event_cb)
+ ndev->event_cb(ndev->ntb_transport, event);
+}
+
+static int ntb_link_status(struct ntb_device *ndev)
+{
+ int link_state;
+
+ if (ndev->hw_type == BWD_HW) {
+ u32 ntb_cntl;
+
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ if (ntb_cntl & BWD_CNTL_LINK_DOWN)
+ link_state = NTB_LINK_DOWN;
+ else
+ link_state = NTB_LINK_UP;
+ } else {
+ u16 status;
+ int rc;
+
+ rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & NTB_LINK_STATUS_ACTIVE)
+ link_state = NTB_LINK_UP;
+ else
+ link_state = NTB_LINK_DOWN;
+ }
+
+ ntb_link_event(ndev, link_state);
+
+ return 0;
+}
+
+/* BWD doesn't have link status interrupt, poll on that platform */
+static void bwd_link_poll(struct work_struct *work)
+{
+ struct ntb_device *ndev = container_of(work, struct ntb_device,
+ hb_timer.work);
+ unsigned long ts = jiffies;
+
+ /* If we haven't gotten an interrupt in a while, check the BWD link
+ * status bit
+ */
+ if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
+ int rc = ntb_link_status(ndev);
+ if (rc)
+ dev_err(&ndev->pdev->dev,
+ "Error determining link status\n");
+ }
+
+ schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+}
+
+static int ntb_xeon_setup(struct ntb_device *ndev)
+{
+ int rc;
+ u8 val;
+
+ ndev->hw_type = SNB_HW;
+
+ rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val);
+ if (rc)
+ return rc;
+
+ switch (val & SNB_PPD_CONN_TYPE) {
+ case NTB_CONN_B2B:
+ ndev->conn_type = NTB_CONN_B2B;
+ break;
+ case NTB_CONN_CLASSIC:
+ case NTB_CONN_RP:
+ default:
+ dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ return -EINVAL;
+ }
+
+ if (val & SNB_PPD_DEV_TYPE)
+ ndev->dev_type = NTB_DEV_DSD;
+ else
+ ndev->dev_type = NTB_DEV_USD;
+
+ ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+ ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+ ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+ ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
+ ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
+ ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
+
+ if (ndev->conn_type == NTB_CONN_B2B) {
+ ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
+ ndev->limits.max_spads = SNB_MAX_SPADS;
+ } else {
+ ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
+ }
+
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
+ ndev->limits.msix_cnt = SNB_MSIX_CNT;
+ ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
+
+ return 0;
+}
+
+static int ntb_bwd_setup(struct ntb_device *ndev)
+{
+ int rc;
+ u32 val;
+
+ ndev->hw_type = BWD_HW;
+
+ rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
+ if (rc)
+ return rc;
+
+ switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
+ case NTB_CONN_B2B:
+ ndev->conn_type = NTB_CONN_B2B;
+ break;
+ case NTB_CONN_RP:
+ default:
+ dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ return -EINVAL;
+ }
+
+ if (val & BWD_PPD_DEV_TYPE)
+ ndev->dev_type = NTB_DEV_DSD;
+ else
+ ndev->dev_type = NTB_DEV_USD;
+
+ /* Initiate PCI-E link training */
+ rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
+ val | BWD_PPD_INIT_LINK);
+ if (rc)
+ return rc;
+
+ ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+ ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
+ ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
+ ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
+ ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
+ ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
+ ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
+
+ if (ndev->conn_type == NTB_CONN_B2B) {
+ ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
+ ndev->limits.max_spads = BWD_MAX_SPADS;
+ } else {
+ ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
+ ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
+ }
+
+ ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
+ ndev->limits.msix_cnt = BWD_MSIX_CNT;
+ ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
+
+ /* Since bwd doesn't have a link interrupt, setup a poll timer */
+ INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
+ schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+
+ return 0;
+}
+
+static int ntb_device_setup(struct ntb_device *ndev)
+{
+ int rc;
+
+ switch (ndev->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ rc = ntb_xeon_setup(ndev);
+ break;
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+ rc = ntb_bwd_setup(ndev);
+ break;
+ default:
+ rc = -ENODEV;
+ }
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
+
+ return rc;
+}
+
+static void ntb_device_free(struct ntb_device *ndev)
+{
+ if (ndev->hw_type == BWD_HW)
+ cancel_delayed_work_sync(&ndev->hb_timer);
+}
+
+static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
+{
+ struct ntb_db_cb *db_cb = data;
+ struct ntb_device *ndev = db_cb->ndev;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+ db_cb->db_num);
+
+ if (db_cb->callback)
+ db_cb->callback(db_cb->data, db_cb->db_num);
+
+ /* No need to check for the specific HB irq, any interrupt means
+ * we're connected.
+ */
+ ndev->last_ts = jiffies;
+
+ writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
+{
+ struct ntb_db_cb *db_cb = data;
+ struct ntb_device *ndev = db_cb->ndev;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+ db_cb->db_num);
+
+ if (db_cb->callback)
+ db_cb->callback(db_cb->data, db_cb->db_num);
+
+ /* On Sandybridge, there are 16 bits in the interrupt register
+ * but only 4 vectors. So, 5 bits are assigned to the first 3
+ * vectors, with the 4th having a single bit for link
+ * interrupts.
+ */
+ writew(((1 << ndev->bits_per_vector) - 1) <<
+ (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
+static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
+{
+ struct ntb_device *ndev = dev;
+ int rc;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
+
+ rc = ntb_link_status(ndev);
+ if (rc)
+ dev_err(&ndev->pdev->dev, "Error determining link status\n");
+
+ /* bit 15 is always the link bit */
+ writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ntb_interrupt(int irq, void *dev)
+{
+ struct ntb_device *ndev = dev;
+ unsigned int i = 0;
+
+ if (ndev->hw_type == BWD_HW) {
+ u64 pdb = readq(ndev->reg_ofs.pdb);
+
+ dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
+
+ while (pdb) {
+ i = __ffs(pdb);
+ pdb &= pdb - 1;
+ bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
+ }
+ } else {
+ u16 pdb = readw(ndev->reg_ofs.pdb);
+
+ dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
+ pdb, readw(ndev->reg_ofs.sdb));
+
+ if (pdb & SNB_DB_HW_LINK) {
+ xeon_event_msix_irq(irq, dev);
+ pdb &= ~SNB_DB_HW_LINK;
+ }
+
+ while (pdb) {
+ i = __ffs(pdb);
+ pdb &= pdb - 1;
+ xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ struct msix_entry *msix;
+ int msix_entries;
+ int rc, i, pos;
+ u16 val;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ rc = -EIO;
+ goto err;
+ }
+
+ rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
+ if (rc)
+ goto err;
+
+ msix_entries = msix_table_size(val);
+ if (msix_entries > ndev->limits.msix_cnt) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+ GFP_KERNEL);
+ if (!ndev->msix_entries) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < msix_entries; i++)
+ ndev->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
+ if (rc < 0)
+ goto err1;
+ if (rc > 0) {
+ /* On SNB, the link interrupt is always tied to 4th vector. If
+ * we can't get all 4, then we can't use MSI-X.
+ */
+ if (ndev->hw_type != BWD_HW) {
+ rc = -EIO;
+ goto err1;
+ }
+
+ dev_warn(&pdev->dev,
+ "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
+ rc);
+ msix_entries = rc;
+ }
+
+ for (i = 0; i < msix_entries; i++) {
+ msix = &ndev->msix_entries[i];
+ WARN_ON(!msix->vector);
+
+ /* Use the last MSI-X vector for Link status */
+ if (ndev->hw_type == BWD_HW) {
+ rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+ "ntb-callback-msix", &ndev->db_cb[i]);
+ if (rc)
+ goto err2;
+ } else {
+ if (i == msix_entries - 1) {
+ rc = request_irq(msix->vector,
+ xeon_event_msix_irq, 0,
+ "ntb-event-msix", ndev);
+ if (rc)
+ goto err2;
+ } else {
+ rc = request_irq(msix->vector,
+ xeon_callback_msix_irq, 0,
+ "ntb-callback-msix",
+ &ndev->db_cb[i]);
+ if (rc)
+ goto err2;
+ }
+ }
+ }
+
+ ndev->num_msix = msix_entries;
+ if (ndev->hw_type == BWD_HW)
+ ndev->max_cbs = msix_entries;
+ else
+ ndev->max_cbs = msix_entries - 1;
+
+ return 0;
+
+err2:
+ while (--i >= 0) {
+ msix = &ndev->msix_entries[i];
+ if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+ free_irq(msix->vector, ndev);
+ else
+ free_irq(msix->vector, &ndev->db_cb[i]);
+ }
+ pci_disable_msix(pdev);
+err1:
+ kfree(ndev->msix_entries);
+ dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
+err:
+ ndev->num_msix = 0;
+ return rc;
+}
+
+static int ntb_setup_msi(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int rc;
+
+ rc = pci_enable_msi(pdev);
+ if (rc)
+ return rc;
+
+ rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
+ if (rc) {
+ pci_disable_msi(pdev);
+ dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ntb_setup_intx(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int rc;
+
+ pci_msi_off(pdev);
+
+ /* Verify intx is enabled */
+ pci_intx(pdev, 1);
+
+ rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
+ ndev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int ntb_setup_interrupts(struct ntb_device *ndev)
+{
+ int rc;
+
+ /* On BWD, disable all interrupts. On SNB, disable all but Link
+ * Interrupt. The rest will be unmasked as callbacks are registered.
+ */
+ if (ndev->hw_type == BWD_HW)
+ writeq(~0, ndev->reg_ofs.pdb_mask);
+ else
+ writew(~(1 << ndev->limits.max_db_bits),
+ ndev->reg_ofs.pdb_mask);
+
+ rc = ntb_setup_msix(ndev);
+ if (!rc)
+ goto done;
+
+ ndev->bits_per_vector = 1;
+ ndev->max_cbs = ndev->limits.max_db_bits;
+
+ rc = ntb_setup_msi(ndev);
+ if (!rc)
+ goto done;
+
+ rc = ntb_setup_intx(ndev);
+ if (rc) {
+ dev_err(&ndev->pdev->dev, "no usable interrupts\n");
+ return rc;
+ }
+
+done:
+ return 0;
+}
+
+static void ntb_free_interrupts(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+
+ /* mask interrupts */
+ if (ndev->hw_type == BWD_HW)
+ writeq(~0, ndev->reg_ofs.pdb_mask);
+ else
+ writew(~0, ndev->reg_ofs.pdb_mask);
+
+ if (ndev->num_msix) {
+ struct msix_entry *msix;
+ u32 i;
+
+ for (i = 0; i < ndev->num_msix; i++) {
+ msix = &ndev->msix_entries[i];
+ if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+ free_irq(msix->vector, ndev);
+ else
+ free_irq(msix->vector, &ndev->db_cb[i]);
+ }
+ pci_disable_msix(pdev);
+ } else {
+ free_irq(pdev->irq, ndev);
+
+ if (pci_dev_msi_enabled(pdev))
+ pci_disable_msi(pdev);
+ }
+}
+
+static int ntb_create_callbacks(struct ntb_device *ndev)
+{
+ int i;
+
+ /* Checken-egg issue. We won't know how many callbacks are necessary
+ * until we see how many MSI-X vectors we get, but these pointers need
+ * to be passed into the MSI-X register fucntion. So, we allocate the
+ * max, knowing that they might not all be used, to work around this.
+ */
+ ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
+ sizeof(struct ntb_db_cb),
+ GFP_KERNEL);
+ if (!ndev->db_cb)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->limits.max_db_bits; i++) {
+ ndev->db_cb[i].db_num = i;
+ ndev->db_cb[i].ndev = ndev;
+ }
+
+ return 0;
+}
+
+static void ntb_free_callbacks(struct ntb_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->limits.max_db_bits; i++)
+ ntb_unregister_db_callback(ndev, i);
+
+ kfree(ndev->db_cb);
+}
+
+static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ntb_device *ndev;
+ int rc, i;
+
+ ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->pdev = pdev;
+ ndev->link_status = NTB_LINK_DOWN;
+ pci_set_drvdata(pdev, ndev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err;
+
+ pci_set_master(ndev->pdev);
+
+ rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
+ if (rc)
+ goto err1;
+
+ ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
+ if (!ndev->reg_base) {
+ dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
+ ndev->mw[i].vbase =
+ ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
+ ndev->mw[i].bar_sz);
+ dev_info(&pdev->dev, "MW %d size %d\n", i,
+ (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+ if (!ndev->mw[i].vbase) {
+ dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
+ MW_TO_BAR(i));
+ rc = -EIO;
+ goto err3;
+ }
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err3;
+
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err3;
+
+ dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+ }
+
+ rc = ntb_device_setup(ndev);
+ if (rc)
+ goto err3;
+
+ rc = ntb_create_callbacks(ndev);
+ if (rc)
+ goto err4;
+
+ rc = ntb_setup_interrupts(ndev);
+ if (rc)
+ goto err5;
+
+ /* The scratchpad registers keep the values between rmmod/insmod,
+ * blast them now
+ */
+ for (i = 0; i < ndev->limits.max_spads; i++) {
+ ntb_write_local_spad(ndev, i, 0);
+ ntb_write_remote_spad(ndev, i, 0);
+ }
+
+ rc = ntb_transport_init(pdev);
+ if (rc)
+ goto err6;
+
+ /* Let's bring the NTB link up */
+ writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
+ ndev->reg_ofs.lnk_cntl);
+
+ return 0;
+
+err6:
+ ntb_free_interrupts(ndev);
+err5:
+ ntb_free_callbacks(ndev);
+err4:
+ ntb_device_free(ndev);
+err3:
+ for (i--; i >= 0; i--)
+ iounmap(ndev->mw[i].vbase);
+ iounmap(ndev->reg_base);
+err2:
+ pci_release_selected_regions(pdev, NTB_BAR_MASK);
+err1:
+ pci_disable_device(pdev);
+err:
+ kfree(ndev);
+
+ dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
+ return rc;
+}
+
+static void ntb_pci_remove(struct pci_dev *pdev)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+ int i;
+ u32 ntb_cntl;
+
+ /* Bring NTB link down */
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ ntb_cntl |= NTB_LINK_DISABLE;
+ writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+
+ ntb_transport_free(ndev->ntb_transport);
+
+ ntb_free_interrupts(ndev);
+ ntb_free_callbacks(ndev);
+ ntb_device_free(ndev);
+
+ for (i = 0; i < NTB_NUM_MW; i++)
+ iounmap(ndev->mw[i].vbase);
+
+ iounmap(ndev->reg_base);
+ pci_release_selected_regions(pdev, NTB_BAR_MASK);
+ pci_disable_device(pdev);
+ kfree(ndev);
+}
+
+static struct pci_driver ntb_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ntb_pci_tbl,
+ .probe = ntb_pci_probe,
+ .remove = ntb_pci_remove,
+};
+module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
new file mode 100644
index 000000000000..3a3038ca83e6
--- /dev/null
+++ b/drivers/ntb/ntb_hw.h
@@ -0,0 +1,181 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF 0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF 0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB 0x3C08
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB 0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
+
+#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
+
+#define NTB_BAR_MMIO 0
+#define NTB_BAR_23 2
+#define NTB_BAR_45 4
+#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
+ (1 << NTB_BAR_45))
+
+#define NTB_LINK_DOWN 0
+#define NTB_LINK_UP 1
+
+#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
+
+#define NTB_NUM_MW 2
+
+enum ntb_hw_event {
+ NTB_EVENT_SW_EVENT0 = 0,
+ NTB_EVENT_SW_EVENT1,
+ NTB_EVENT_SW_EVENT2,
+ NTB_EVENT_HW_ERROR,
+ NTB_EVENT_HW_LINK_UP,
+ NTB_EVENT_HW_LINK_DOWN,
+};
+
+struct ntb_mw {
+ dma_addr_t phys_addr;
+ void __iomem *vbase;
+ resource_size_t bar_sz;
+};
+
+struct ntb_db_cb {
+ void (*callback) (void *data, int db_num);
+ unsigned int db_num;
+ void *data;
+ struct ntb_device *ndev;
+};
+
+struct ntb_device {
+ struct pci_dev *pdev;
+ struct msix_entry *msix_entries;
+ void __iomem *reg_base;
+ struct ntb_mw mw[NTB_NUM_MW];
+ struct {
+ unsigned int max_spads;
+ unsigned int max_db_bits;
+ unsigned int msix_cnt;
+ } limits;
+ struct {
+ void __iomem *pdb;
+ void __iomem *pdb_mask;
+ void __iomem *sdb;
+ void __iomem *sbar2_xlat;
+ void __iomem *sbar4_xlat;
+ void __iomem *spad_write;
+ void __iomem *spad_read;
+ void __iomem *lnk_cntl;
+ void __iomem *lnk_stat;
+ void __iomem *spci_cmd;
+ } reg_ofs;
+ struct ntb_transport *ntb_transport;
+ void (*event_cb)(void *handle, enum ntb_hw_event event);
+
+ struct ntb_db_cb *db_cb;
+ unsigned char hw_type;
+ unsigned char conn_type;
+ unsigned char dev_type;
+ unsigned char num_msix;
+ unsigned char bits_per_vector;
+ unsigned char max_cbs;
+ unsigned char link_status;
+ struct delayed_work hb_timer;
+ unsigned long last_ts;
+};
+
+/**
+ * ntb_hw_link_status() - return the hardware link status
+ * @ndev: pointer to ntb_device instance
+ *
+ * Returns true if the hardware is connected to the remote system
+ *
+ * RETURNS: true or false based on the hardware link state
+ */
+static inline bool ntb_hw_link_status(struct ntb_device *ndev)
+{
+ return ndev->link_status == NTB_LINK_UP;
+}
+
+/**
+ * ntb_query_pdev() - return the pci_dev pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
+ *
+ * RETURNS: a pointer to the ntb pci_dev
+ */
+static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
+{
+ return ndev->pdev;
+}
+
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
+ void *transport);
+void ntb_unregister_transport(struct ntb_device *ndev);
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+ void *data, void (*db_cb_func) (void *data,
+ int db_num));
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
+int ntb_register_event_callback(struct ntb_device *ndev,
+ void (*event_cb_func) (void *handle,
+ enum ntb_hw_event event));
+void ntb_unregister_event_callback(struct ntb_device *ndev);
+int ntb_get_max_spads(struct ntb_device *ndev);
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
+void *ntb_find_transport(struct pci_dev *pdev);
+
+int ntb_transport_init(struct pci_dev *pdev);
+void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
new file mode 100644
index 000000000000..5bfa8c06c059
--- /dev/null
+++ b/drivers/ntb/ntb_regs.h
@@ -0,0 +1,139 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define NTB_LINK_ENABLE 0x0000
+#define NTB_LINK_DISABLE 0x0002
+#define NTB_LINK_STATUS_ACTIVE 0x2000
+#define NTB_LINK_SPEED_MASK 0x000f
+#define NTB_LINK_WIDTH_MASK 0x03f0
+
+#define SNB_MSIX_CNT 4
+#define SNB_MAX_SPADS 16
+#define SNB_MAX_COMPAT_SPADS 8
+/* Reserve the uppermost bit for link interrupt */
+#define SNB_MAX_DB_BITS 15
+#define SNB_DB_BITS_PER_VEC 5
+
+#define SNB_DB_HW_LINK 0x8000
+
+#define SNB_PCICMD_OFFSET 0x0504
+#define SNB_DEVCTRL_OFFSET 0x0598
+#define SNB_LINK_STATUS_OFFSET 0x01A2
+
+#define SNB_PBAR2LMT_OFFSET 0x0000
+#define SNB_PBAR4LMT_OFFSET 0x0008
+#define SNB_PBAR2XLAT_OFFSET 0x0010
+#define SNB_PBAR4XLAT_OFFSET 0x0018
+#define SNB_SBAR2LMT_OFFSET 0x0020
+#define SNB_SBAR4LMT_OFFSET 0x0028
+#define SNB_SBAR2XLAT_OFFSET 0x0030
+#define SNB_SBAR4XLAT_OFFSET 0x0038
+#define SNB_SBAR0BASE_OFFSET 0x0040
+#define SNB_SBAR2BASE_OFFSET 0x0048
+#define SNB_SBAR4BASE_OFFSET 0x0050
+#define SNB_NTBCNTL_OFFSET 0x0058
+#define SNB_SBDF_OFFSET 0x005C
+#define SNB_PDOORBELL_OFFSET 0x0060
+#define SNB_PDBMSK_OFFSET 0x0062
+#define SNB_SDOORBELL_OFFSET 0x0064
+#define SNB_SDBMSK_OFFSET 0x0066
+#define SNB_USMEMMISS 0x0070
+#define SNB_SPAD_OFFSET 0x0080
+#define SNB_SPADSEMA4_OFFSET 0x00c0
+#define SNB_WCCNTRL_OFFSET 0x00e0
+#define SNB_B2B_SPAD_OFFSET 0x0100
+#define SNB_B2B_DOORBELL_OFFSET 0x0140
+#define SNB_B2B_XLAT_OFFSET 0x0144
+
+#define BWD_MSIX_CNT 34
+#define BWD_MAX_SPADS 16
+#define BWD_MAX_COMPAT_SPADS 16
+#define BWD_MAX_DB_BITS 34
+#define BWD_DB_BITS_PER_VEC 1
+
+#define BWD_PCICMD_OFFSET 0xb004
+#define BWD_MBAR23_OFFSET 0xb018
+#define BWD_MBAR45_OFFSET 0xb020
+#define BWD_DEVCTRL_OFFSET 0xb048
+#define BWD_LINK_STATUS_OFFSET 0xb052
+
+#define BWD_SBAR2XLAT_OFFSET 0x0008
+#define BWD_SBAR4XLAT_OFFSET 0x0010
+#define BWD_PDOORBELL_OFFSET 0x0020
+#define BWD_PDBMSK_OFFSET 0x0028
+#define BWD_NTBCNTL_OFFSET 0x0060
+#define BWD_EBDF_OFFSET 0x0064
+#define BWD_SPAD_OFFSET 0x0080
+#define BWD_SPADSEMA_OFFSET 0x00c0
+#define BWD_STKYSPAD_OFFSET 0x00c4
+#define BWD_PBAR2XLAT_OFFSET 0x8008
+#define BWD_PBAR4XLAT_OFFSET 0x8010
+#define BWD_B2B_DOORBELL_OFFSET 0x8020
+#define BWD_B2B_SPAD_OFFSET 0x8080
+#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
+#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
+
+#define NTB_CNTL_BAR23_SNOOP (1 << 2)
+#define NTB_CNTL_BAR45_SNOOP (1 << 6)
+#define BWD_CNTL_LINK_DOWN (1 << 16)
+
+#define NTB_PPD_OFFSET 0x00D4
+#define SNB_PPD_CONN_TYPE 0x0003
+#define SNB_PPD_DEV_TYPE 0x0010
+#define BWD_PPD_INIT_LINK 0x0008
+#define BWD_PPD_CONN_TYPE 0x0300
+#define BWD_PPD_DEV_TYPE 0x1000
+
+#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
+#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
+#define BWD_MBAR23_USD_ADDR 0x000000410000000C
+#define BWD_MBAR45_USD_ADDR 0x000000810000000C
+#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
+#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
+#define BWD_MBAR23_DSD_ADDR 0x000000400000000C
+#define BWD_MBAR45_DSD_ADDR 0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644
index 000000000000..e0bdfd7f9930
--- /dev/null
+++ b/drivers/ntb/ntb_transport.c
@@ -0,0 +1,1441 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/ntb.h>
+#include "ntb_hw.h"
+
+#define NTB_TRANSPORT_VERSION 2
+
+static unsigned int transport_mtu = 0x401E;
+module_param(transport_mtu, uint, 0644);
+MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
+
+static unsigned char max_num_clients = 2;
+module_param(max_num_clients, byte, 0644);
+MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+
+struct ntb_queue_entry {
+ /* ntb_queue list reference */
+ struct list_head entry;
+ /* pointers to data to be transfered */
+ void *cb_data;
+ void *buf;
+ unsigned int len;
+ unsigned int flags;
+};
+
+struct ntb_rx_info {
+ unsigned int entry;
+};
+
+struct ntb_transport_qp {
+ struct ntb_transport *transport;
+ struct ntb_device *ndev;
+ void *cb_data;
+
+ bool client_ready;
+ bool qp_link;
+ u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
+
+ struct ntb_rx_info __iomem *rx_info;
+ struct ntb_rx_info *remote_rx_info;
+
+ void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct list_head tx_free_q;
+ spinlock_t ntb_tx_free_q_lock;
+ void __iomem *tx_mw;
+ unsigned int tx_index;
+ unsigned int tx_max_entry;
+ unsigned int tx_max_frame;
+
+ void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct tasklet_struct rx_work;
+ struct list_head rx_pend_q;
+ struct list_head rx_free_q;
+ spinlock_t ntb_rx_pend_q_lock;
+ spinlock_t ntb_rx_free_q_lock;
+ void *rx_buff;
+ unsigned int rx_index;
+ unsigned int rx_max_entry;
+ unsigned int rx_max_frame;
+
+ void (*event_handler) (void *data, int status);
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_stats;
+
+ /* Stats */
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_ring_empty;
+ u64 rx_err_no_buf;
+ u64 rx_err_oflow;
+ u64 rx_err_ver;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_ring_full;
+};
+
+struct ntb_transport_mw {
+ size_t size;
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+struct ntb_transport_client_dev {
+ struct list_head entry;
+ struct device dev;
+};
+
+struct ntb_transport {
+ struct list_head entry;
+ struct list_head client_devs;
+
+ struct ntb_device *ndev;
+ struct ntb_transport_mw mw[NTB_NUM_MW];
+ struct ntb_transport_qp *qps;
+ unsigned int max_qps;
+ unsigned long qp_bitmap;
+ bool transport_link;
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+ struct dentry *debugfs_dir;
+};
+
+enum {
+ DESC_DONE_FLAG = 1 << 0,
+ LINK_DOWN_FLAG = 1 << 1,
+};
+
+struct ntb_payload_header {
+ unsigned int ver;
+ unsigned int len;
+ unsigned int flags;
+};
+
+enum {
+ VERSION = 0,
+ MW0_SZ,
+ MW1_SZ,
+ NUM_QPS,
+ QP_LINKS,
+ MAX_SPAD,
+};
+
+#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
+#define NTB_QP_DEF_NUM_ENTRIES 100
+#define NTB_LINK_DOWN_TIMEOUT 10
+
+static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+{
+ return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
+}
+
+static int ntb_client_probe(struct device *dev)
+{
+ const struct ntb_client *drv = container_of(dev->driver,
+ struct ntb_client, driver);
+ struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+ int rc = -EINVAL;
+
+ get_device(dev);
+ if (drv && drv->probe)
+ rc = drv->probe(pdev);
+ if (rc)
+ put_device(dev);
+
+ return rc;
+}
+
+static int ntb_client_remove(struct device *dev)
+{
+ const struct ntb_client *drv = container_of(dev->driver,
+ struct ntb_client, driver);
+ struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+
+ if (drv && drv->remove)
+ drv->remove(pdev);
+
+ put_device(dev);
+
+ return 0;
+}
+
+static struct bus_type ntb_bus_type = {
+ .name = "ntb_bus",
+ .match = ntb_match_bus,
+ .probe = ntb_client_probe,
+ .remove = ntb_client_remove,
+};
+
+static LIST_HEAD(ntb_transport_list);
+
+static int ntb_bus_init(struct ntb_transport *nt)
+{
+ if (list_empty(&ntb_transport_list)) {
+ int rc = bus_register(&ntb_bus_type);
+ if (rc)
+ return rc;
+ }
+
+ list_add(&nt->entry, &ntb_transport_list);
+
+ return 0;
+}
+
+static void ntb_bus_remove(struct ntb_transport *nt)
+{
+ struct ntb_transport_client_dev *client_dev, *cd;
+
+ list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
+ dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
+ dev_name(&client_dev->dev));
+ list_del(&client_dev->entry);
+ device_unregister(&client_dev->dev);
+ }
+
+ list_del(&nt->entry);
+
+ if (list_empty(&ntb_transport_list))
+ bus_unregister(&ntb_bus_type);
+}
+
+static void ntb_client_release(struct device *dev)
+{
+ struct ntb_transport_client_dev *client_dev;
+ client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
+
+ kfree(client_dev);
+}
+
+/**
+ * ntb_unregister_client_dev - Unregister NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Unregister an NTB client device with the NTB transport layer
+ */
+void ntb_unregister_client_dev(char *device_name)
+{
+ struct ntb_transport_client_dev *client, *cd;
+ struct ntb_transport *nt;
+
+ list_for_each_entry(nt, &ntb_transport_list, entry)
+ list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
+ if (!strncmp(dev_name(&client->dev), device_name,
+ strlen(device_name))) {
+ list_del(&client->entry);
+ device_unregister(&client->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+
+/**
+ * ntb_register_client_dev - Register NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Register an NTB client device with the NTB transport layer
+ */
+int ntb_register_client_dev(char *device_name)
+{
+ struct ntb_transport_client_dev *client_dev;
+ struct ntb_transport *nt;
+ int rc;
+
+ if (list_empty(&ntb_transport_list))
+ return -ENODEV;
+
+ list_for_each_entry(nt, &ntb_transport_list, entry) {
+ struct device *dev;
+
+ client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
+ GFP_KERNEL);
+ if (!client_dev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ dev = &client_dev->dev;
+
+ /* setup and register client devices */
+ dev_set_name(dev, "%s", device_name);
+ dev->bus = &ntb_bus_type;
+ dev->release = ntb_client_release;
+ dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+
+ rc = device_register(dev);
+ if (rc) {
+ kfree(client_dev);
+ goto err;
+ }
+
+ list_add_tail(&client_dev->entry, &nt->client_devs);
+ }
+
+ return 0;
+
+err:
+ ntb_unregister_client_dev(device_name);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+
+/**
+ * ntb_register_client - Register NTB client driver
+ * @drv: NTB client driver to be registered
+ *
+ * Register an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_client(struct ntb_client *drv)
+{
+ drv->driver.bus = &ntb_bus_type;
+
+ if (list_empty(&ntb_transport_list))
+ return -ENODEV;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_register_client);
+
+/**
+ * ntb_unregister_client - Unregister NTB client driver
+ * @drv: NTB client driver to be unregistered
+ *
+ * Unregister an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_unregister_client(struct ntb_client *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client);
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *offp)
+{
+ struct ntb_transport_qp *qp;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 600;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ qp = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "NTB QP stats\n");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_bytes - \t%llu\n", qp->rx_bytes);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_pkts - \t%llu\n", qp->rx_pkts);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_ring_empty - %llu\n", qp->rx_ring_empty);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_ver - \t%llu\n", qp->rx_err_ver);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_buff - \t%p\n", qp->rx_buff);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_index - \t%u\n", qp->rx_index);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_max_entry - \t%u\n", qp->rx_max_entry);
+
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_bytes - \t%llu\n", qp->tx_bytes);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_pkts - \t%llu\n", qp->tx_pkts);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_ring_full - \t%llu\n", qp->tx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_mw - \t%p\n", qp->tx_mw);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_index - \t%u\n", qp->tx_index);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_max_entry - \t%u\n", qp->tx_max_entry);
+
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
+ "Up" : "Down");
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations ntb_qp_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = debugfs_read,
+};
+
+static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
+ struct list_head *list)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail(entry, list);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
+ struct list_head *list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ if (list_empty(list)) {
+ entry = NULL;
+ goto out;
+ }
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_del(&entry->entry);
+out:
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
+static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp = &nt->qps[qp_num];
+ unsigned int rx_size, num_qps_mw;
+ u8 mw_num = QP_TO_MW(qp_num);
+ unsigned int i;
+
+ WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+
+ if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+ num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ else
+ num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+ rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
+ qp->remote_rx_info = nt->mw[mw_num].virt_addr +
+ (qp_num / NTB_NUM_MW * rx_size);
+ rx_size -= sizeof(struct ntb_rx_info);
+
+ qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
+ qp->rx_max_frame = min(transport_mtu, rx_size);
+ qp->rx_max_entry = rx_size / qp->rx_max_frame;
+ qp->rx_index = 0;
+
+ qp->remote_rx_info->entry = qp->rx_max_entry;
+
+ /* setup the hdr offsets with 0's */
+ for (i = 0; i < qp->rx_max_entry; i++) {
+ void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
+ sizeof(struct ntb_payload_header);
+ memset(offset, 0, sizeof(struct ntb_payload_header));
+ }
+
+ qp->rx_pkts = 0;
+ qp->tx_pkts = 0;
+}
+
+static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+{
+ struct ntb_transport_mw *mw = &nt->mw[num_mw];
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ /* Alloc memory for receiving data. Must be 4k aligned */
+ mw->size = ALIGN(size, 4096);
+
+ mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
+ GFP_KERNEL);
+ if (!mw->virt_addr) {
+ dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
+ (int) mw->size);
+ return -ENOMEM;
+ }
+
+ /* Notify HW the memory location of the receive buffer */
+ ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+
+ return 0;
+}
+
+static void ntb_qp_link_cleanup(struct work_struct *work)
+{
+ struct ntb_transport_qp *qp = container_of(work,
+ struct ntb_transport_qp,
+ link_cleanup);
+ struct ntb_transport *nt = qp->transport;
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ if (qp->qp_link == NTB_LINK_DOWN) {
+ cancel_delayed_work_sync(&qp->link_work);
+ return;
+ }
+
+ if (qp->event_handler)
+ qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+
+ dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+ qp->qp_link = NTB_LINK_DOWN;
+
+ if (nt->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+{
+ schedule_work(&qp->link_cleanup);
+}
+
+static void ntb_transport_link_cleanup(struct work_struct *work)
+{
+ struct ntb_transport *nt = container_of(work, struct ntb_transport,
+ link_cleanup);
+ int i;
+
+ if (nt->transport_link == NTB_LINK_DOWN)
+ cancel_delayed_work_sync(&nt->link_work);
+ else
+ nt->transport_link = NTB_LINK_DOWN;
+
+ /* Pass along the info to any clients */
+ for (i = 0; i < nt->max_qps; i++)
+ if (!test_bit(i, &nt->qp_bitmap))
+ ntb_qp_link_down(&nt->qps[i]);
+
+ /* The scratchpad registers keep the values if the remote side
+ * goes down, blast them now to give them a sane value the next
+ * time they are accessed
+ */
+ for (i = 0; i < MAX_SPAD; i++)
+ ntb_write_local_spad(nt->ndev, i, 0);
+}
+
+static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+{
+ struct ntb_transport *nt = data;
+
+ switch (event) {
+ case NTB_EVENT_HW_LINK_UP:
+ schedule_delayed_work(&nt->link_work, 0);
+ break;
+ case NTB_EVENT_HW_LINK_DOWN:
+ schedule_work(&nt->link_cleanup);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void ntb_transport_link_work(struct work_struct *work)
+{
+ struct ntb_transport *nt = container_of(work, struct ntb_transport,
+ link_work.work);
+ struct ntb_device *ndev = nt->ndev;
+ struct pci_dev *pdev = ntb_query_pdev(ndev);
+ u32 val;
+ int rc, i;
+
+ /* send the local info */
+ rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ 0, VERSION);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ nt->max_qps, NUM_QPS);
+ goto out;
+ }
+
+ rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val, QP_LINKS);
+ goto out;
+ }
+
+ /* Query the remote side for its info */
+ rc = ntb_read_remote_spad(ndev, VERSION, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
+ goto out;
+ }
+
+ if (val != NTB_TRANSPORT_VERSION)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+
+ rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+ goto out;
+ }
+
+ if (val != nt->max_qps)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
+
+ rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+ goto out;
+ }
+
+ if (!val)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+
+ rc = ntb_set_mw(nt, 0, val);
+ if (rc)
+ goto out;
+
+ rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
+ goto out;
+ }
+
+ if (!val)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+
+ rc = ntb_set_mw(nt, 1, val);
+ if (rc)
+ goto out;
+
+ nt->transport_link = NTB_LINK_UP;
+
+ for (i = 0; i < nt->max_qps; i++) {
+ struct ntb_transport_qp *qp = &nt->qps[i];
+
+ ntb_transport_setup_qp_mw(nt, i);
+
+ if (qp->client_ready == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work, 0);
+ }
+
+ return;
+
+out:
+ if (ntb_hw_link_status(ndev))
+ schedule_delayed_work(&nt->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_work(struct work_struct *work)
+{
+ struct ntb_transport_qp *qp = container_of(work,
+ struct ntb_transport_qp,
+ link_work.work);
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_transport *nt = qp->transport;
+ int rc, val;
+
+ WARN_ON(nt->transport_link != NTB_LINK_UP);
+
+ rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ return;
+ }
+
+ rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
+ if (rc)
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val | 1 << qp->qp_num, QP_LINKS);
+
+ /* query remote spad for qp ready bits */
+ rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
+ if (rc)
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
+
+ dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+
+ /* See if the remote side is up */
+ if (1 << qp->qp_num & val) {
+ qp->qp_link = NTB_LINK_UP;
+
+ dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+ if (qp->event_handler)
+ qp->event_handler(qp->cb_data, NTB_LINK_UP);
+ } else if (nt->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_transport_init_queue(struct ntb_transport *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp;
+ unsigned int num_qps_mw, tx_size;
+ u8 mw_num = QP_TO_MW(qp_num);
+
+ qp = &nt->qps[qp_num];
+ qp->qp_num = qp_num;
+ qp->transport = nt;
+ qp->ndev = nt->ndev;
+ qp->qp_link = NTB_LINK_DOWN;
+ qp->client_ready = NTB_LINK_DOWN;
+ qp->event_handler = NULL;
+
+ if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+ num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ else
+ num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+ tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
+ qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
+ (qp_num / NTB_NUM_MW * tx_size);
+ tx_size -= sizeof(struct ntb_rx_info);
+
+ qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
+ qp->tx_max_frame = min(transport_mtu, tx_size);
+ qp->tx_max_entry = tx_size / qp->tx_max_frame;
+ qp->tx_index = 0;
+
+ if (nt->debugfs_dir) {
+ char debugfs_name[4];
+
+ snprintf(debugfs_name, 4, "qp%d", qp_num);
+ qp->debugfs_dir = debugfs_create_dir(debugfs_name,
+ nt->debugfs_dir);
+
+ qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
+ qp->debugfs_dir, qp,
+ &ntb_qp_debugfs_stats);
+ }
+
+ INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
+ INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+
+ spin_lock_init(&qp->ntb_rx_pend_q_lock);
+ spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_tx_free_q_lock);
+
+ INIT_LIST_HEAD(&qp->rx_pend_q);
+ INIT_LIST_HEAD(&qp->rx_free_q);
+ INIT_LIST_HEAD(&qp->tx_free_q);
+}
+
+int ntb_transport_init(struct pci_dev *pdev)
+{
+ struct ntb_transport *nt;
+ int rc, i;
+
+ nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+
+ if (debugfs_initialized())
+ nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ else
+ nt->debugfs_dir = NULL;
+
+ nt->ndev = ntb_register_transport(pdev, nt);
+ if (!nt->ndev) {
+ rc = -EIO;
+ goto err;
+ }
+
+ nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
+
+ nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
+ GFP_KERNEL);
+ if (!nt->qps) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
+
+ for (i = 0; i < nt->max_qps; i++)
+ ntb_transport_init_queue(nt, i);
+
+ INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
+ INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+
+ rc = ntb_register_event_callback(nt->ndev,
+ ntb_transport_event_callback);
+ if (rc)
+ goto err2;
+
+ INIT_LIST_HEAD(&nt->client_devs);
+ rc = ntb_bus_init(nt);
+ if (rc)
+ goto err3;
+
+ if (ntb_hw_link_status(nt->ndev))
+ schedule_delayed_work(&nt->link_work, 0);
+
+ return 0;
+
+err3:
+ ntb_unregister_event_callback(nt->ndev);
+err2:
+ kfree(nt->qps);
+err1:
+ ntb_unregister_transport(nt->ndev);
+err:
+ debugfs_remove_recursive(nt->debugfs_dir);
+ kfree(nt);
+ return rc;
+}
+
+void ntb_transport_free(void *transport)
+{
+ struct ntb_transport *nt = transport;
+ struct pci_dev *pdev;
+ int i;
+
+ nt->transport_link = NTB_LINK_DOWN;
+
+ /* verify that all the qp's are freed */
+ for (i = 0; i < nt->max_qps; i++)
+ if (!test_bit(i, &nt->qp_bitmap))
+ ntb_transport_free_queue(&nt->qps[i]);
+
+ ntb_bus_remove(nt);
+
+ cancel_delayed_work_sync(&nt->link_work);
+
+ debugfs_remove_recursive(nt->debugfs_dir);
+
+ ntb_unregister_event_callback(nt->ndev);
+
+ pdev = ntb_query_pdev(nt->ndev);
+
+ for (i = 0; i < NTB_NUM_MW; i++)
+ if (nt->mw[i].virt_addr)
+ dma_free_coherent(&pdev->dev, nt->mw[i].size,
+ nt->mw[i].virt_addr,
+ nt->mw[i].dma_addr);
+
+ kfree(nt->qps);
+ ntb_unregister_transport(nt->ndev);
+ kfree(nt);
+}
+
+static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry, void *offset)
+{
+ void *cb_data = entry->cb_data;
+ unsigned int len = entry->len;
+
+ memcpy(entry->buf, offset, entry->len);
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+ if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+}
+
+static int ntb_process_rxc(struct ntb_transport_qp *qp)
+{
+ struct ntb_payload_header *hdr;
+ struct ntb_queue_entry *entry;
+ void *offset;
+
+ offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
+ hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
+
+ entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ if (!entry) {
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "no buffer - HDR ver %u, len %d, flags %x\n",
+ hdr->ver, hdr->len, hdr->flags);
+ qp->rx_err_no_buf++;
+ return -ENOMEM;
+ }
+
+ if (!(hdr->flags & DESC_DONE_FLAG)) {
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ qp->rx_ring_empty++;
+ return -EAGAIN;
+ }
+
+ if (hdr->ver != (u32) qp->rx_pkts) {
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "qp %d: version mismatch, expected %llu - got %u\n",
+ qp->qp_num, qp->rx_pkts, hdr->ver);
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ qp->rx_err_ver++;
+ return -EIO;
+ }
+
+ if (hdr->flags & LINK_DOWN_FLAG) {
+ ntb_qp_link_down(qp);
+
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ goto out;
+ }
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "rx offset %u, ver %u - %d payload received, buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+ if (hdr->len <= entry->len) {
+ entry->len = hdr->len;
+ ntb_rx_copy_task(qp, entry, offset);
+ } else {
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+
+ qp->rx_err_oflow++;
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "RX overflow! Wanted %d got %d\n",
+ hdr->len, entry->len);
+ }
+
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
+
+out:
+ /* Ensure that the data is fully copied out before clearing the flag */
+ wmb();
+ hdr->flags = 0;
+ iowrite32(qp->rx_index, &qp->rx_info->entry);
+
+ qp->rx_index++;
+ qp->rx_index %= qp->rx_max_entry;
+
+ return 0;
+}
+
+static void ntb_transport_rx(unsigned long data)
+{
+ struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+ int rc;
+
+ do {
+ rc = ntb_process_rxc(qp);
+ } while (!rc);
+}
+
+static void ntb_transport_rxc_db(void *data, int db_num)
+{
+ struct ntb_transport_qp *qp = data;
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+ __func__, db_num);
+
+ tasklet_schedule(&qp->rx_work);
+}
+
+static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry,
+ void __iomem *offset)
+{
+ struct ntb_payload_header __iomem *hdr;
+
+ memcpy_toio(offset, entry->buf, entry->len);
+
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32) qp->tx_pkts, &hdr->ver);
+
+ /* Ensure that the data is fully copied out before setting the flag */
+ wmb();
+ iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
+
+ ntb_ring_sdb(qp->ndev, qp->qp_num);
+
+ /* The entry length can only be zero if the packet is intended to be a
+ * "link down" or similar. Since no payload is being sent in these
+ * cases, there is nothing to add to the completion queue.
+ */
+ if (entry->len > 0) {
+ qp->tx_bytes += entry->len;
+
+ if (qp->tx_handler)
+ qp->tx_handler(qp, qp->cb_data, entry->cb_data,
+ entry->len);
+ }
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ void __iomem *offset;
+
+ offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
+ qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
+ entry->buf);
+ if (qp->tx_index == qp->remote_rx_info->entry) {
+ qp->tx_ring_full++;
+ return -EAGAIN;
+ }
+
+ if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
+ if (qp->tx_handler)
+ qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+ return 0;
+ }
+
+ ntb_tx_copy_task(qp, entry, offset);
+
+ qp->tx_index++;
+ qp->tx_index %= qp->tx_max_entry;
+
+ qp->tx_pkts++;
+
+ return 0;
+}
+
+static void ntb_send_link_down(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_queue_entry *entry;
+ int i, rc;
+
+ if (qp->qp_link == NTB_LINK_DOWN)
+ return;
+
+ qp->qp_link = NTB_LINK_DOWN;
+ dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+
+ for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
+ entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ if (entry)
+ break;
+ msleep(100);
+ }
+
+ if (!entry)
+ return;
+
+ entry->cb_data = NULL;
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->flags = LINK_DOWN_FLAG;
+
+ rc = ntb_process_tx(qp, entry);
+ if (rc)
+ dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
+ qp->qp_num);
+}
+
+/**
+ * ntb_transport_create_queue - Create a new NTB transport layer queue
+ * @rx_handler: receive callback function
+ * @tx_handler: transmit callback function
+ * @event_handler: event callback function
+ *
+ * Create a new NTB transport layer queue and provide the queue with a callback
+ * routine for both transmit and receive. The receive callback routine will be
+ * used to pass up data when the transport has received it on the queue. The
+ * transmit callback routine will be called when the transport has completed the
+ * transmission of the data on the queue and the data is ready to be freed.
+ *
+ * RETURNS: pointer to newly created ntb_queue, NULL on error.
+ */
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+ const struct ntb_queue_handlers *handlers)
+{
+ struct ntb_queue_entry *entry;
+ struct ntb_transport_qp *qp;
+ struct ntb_transport *nt;
+ unsigned int free_queue;
+ int rc, i;
+
+ nt = ntb_find_transport(pdev);
+ if (!nt)
+ goto err;
+
+ free_queue = ffs(nt->qp_bitmap);
+ if (!free_queue)
+ goto err;
+
+ /* decrement free_queue to make it zero based */
+ free_queue--;
+
+ clear_bit(free_queue, &nt->qp_bitmap);
+
+ qp = &nt->qps[free_queue];
+ qp->cb_data = data;
+ qp->rx_handler = handlers->rx_handler;
+ qp->tx_handler = handlers->tx_handler;
+ qp->event_handler = handlers->event_handler;
+
+ for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ if (!entry)
+ goto err1;
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ &qp->rx_free_q);
+ }
+
+ for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ if (!entry)
+ goto err2;
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+ }
+
+ tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
+
+ rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
+ ntb_transport_rxc_db);
+ if (rc)
+ goto err3;
+
+ dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
+
+ return qp;
+
+err3:
+ tasklet_disable(&qp->rx_work);
+err2:
+ while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+ kfree(entry);
+err1:
+ while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ kfree(entry);
+ set_bit(free_queue, &nt->qp_bitmap);
+err:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
+
+/**
+ * ntb_transport_free_queue - Frees NTB transport queue
+ * @qp: NTB queue to be freed
+ *
+ * Frees NTB transport queue
+ */
+void ntb_transport_free_queue(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_queue_entry *entry;
+
+ if (!qp)
+ return;
+
+ cancel_delayed_work_sync(&qp->link_work);
+
+ ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+ tasklet_disable(&qp->rx_work);
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ kfree(entry);
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ kfree(entry);
+ }
+
+ while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+ kfree(entry);
+
+ set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+
+ dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
+
+/**
+ * ntb_transport_rx_remove - Dequeues enqueued rx packet
+ * @qp: NTB queue to be freed
+ * @len: pointer to variable to write enqueued buffers length
+ *
+ * Dequeues unused buffers from receive queue. Should only be used during
+ * shutdown of qp.
+ *
+ * RETURNS: NULL error value on error, or void* for success.
+ */
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
+{
+ struct ntb_queue_entry *entry;
+ void *buf;
+
+ if (!qp || qp->client_ready == NTB_LINK_UP)
+ return NULL;
+
+ entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ if (!entry)
+ return NULL;
+
+ buf = entry->cb_data;
+ *len = entry->len;
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
+
+/**
+ * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that incoming packets will be copied into
+ * @len: length of the data buffer
+ *
+ * Enqueue a new receive buffer onto the transport queue into which a NTB
+ * payload can be received into.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len)
+{
+ struct ntb_queue_entry *entry;
+
+ if (!qp)
+ return -EINVAL;
+
+ entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cb_data = cb;
+ entry->buf = data;
+ entry->len = len;
+
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
+
+/**
+ * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that will be sent
+ * @len: length of the data buffer
+ *
+ * Enqueue a new transmit buffer onto the transport queue from which a NTB
+ * payload will be transmitted. This assumes that a lock is behing held to
+ * serialize access to the qp.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len)
+{
+ struct ntb_queue_entry *entry;
+ int rc;
+
+ if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+ return -EINVAL;
+
+ entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cb_data = cb;
+ entry->buf = data;
+ entry->len = len;
+ entry->flags = 0;
+
+ rc = ntb_process_tx(qp, entry);
+ if (rc)
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
+
+/**
+ * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
+ * @qp: NTB transport layer queue to be enabled
+ *
+ * Notify NTB transport layer of client readiness to use queue
+ */
+void ntb_transport_link_up(struct ntb_transport_qp *qp)
+{
+ if (!qp)
+ return;
+
+ qp->client_ready = NTB_LINK_UP;
+
+ if (qp->transport->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work, 0);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_up);
+
+/**
+ * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
+ * @qp: NTB transport layer queue to be disabled
+ *
+ * Notify NTB transport layer of client's desire to no longer receive data on
+ * transport queue specified. It is the client's responsibility to ensure all
+ * entries on queue are purged or otherwise handled appropraitely.
+ */
+void ntb_transport_link_down(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ int rc, val;
+
+ if (!qp)
+ return;
+
+ qp->client_ready = NTB_LINK_DOWN;
+
+ rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ return;
+ }
+
+ rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
+ val & ~(1 << qp->qp_num));
+ if (rc)
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val & ~(1 << qp->qp_num), QP_LINKS);
+
+ if (qp->qp_link == NTB_LINK_UP)
+ ntb_send_link_down(qp);
+ else
+ cancel_delayed_work_sync(&qp->link_work);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_down);
+
+/**
+ * ntb_transport_link_query - Query transport link state
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query connectivity to the remote system of the NTB transport queue
+ *
+ * RETURNS: true for link up or false for link down
+ */
+bool ntb_transport_link_query(struct ntb_transport_qp *qp)
+{
+ return qp->qp_link == NTB_LINK_UP;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_query);
+
+/**
+ * ntb_transport_qp_num - Query the qp number
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query qp number of the NTB transport queue
+ *
+ * RETURNS: a zero based number specifying the qp number
+ */
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
+{
+ return qp->qp_num;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
+
+/**
+ * ntb_transport_max_size - Query the max payload size of a qp
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query the maximum payload size permissible on the given qp
+ *
+ * RETURNS: the max payload size of a qp
+ */
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
+{
+ return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 0125524c08c4..04da786c84d2 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -429,7 +429,7 @@ static u64 __of_translate_address(struct device_node *dev,
goto bail;
bus = of_match_bus(parent);
- /* Cound address cells & copy address locally */
+ /* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 538e3cfad23e..321d3ef05006 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -24,38 +24,21 @@
#include <linux/slab.h>
#include <linux/proc_fs.h>
-/**
- * struct alias_prop - Alias property in 'aliases' node
- * @link: List node to link the structure in aliases_lookup list
- * @alias: Alias property name
- * @np: Pointer to device_node that the alias stands for
- * @id: Index value from end of alias name
- * @stem: Alias string without the index
- *
- * The structure represents one alias property of 'aliases' node as
- * an entry in aliases_lookup list.
- */
-struct alias_prop {
- struct list_head link;
- const char *alias;
- struct device_node *np;
- int id;
- char stem[0];
-};
+#include "of_private.h"
-static LIST_HEAD(aliases_lookup);
+LIST_HEAD(aliases_lookup);
struct device_node *of_allnodes;
EXPORT_SYMBOL(of_allnodes);
struct device_node *of_chosen;
struct device_node *of_aliases;
-static DEFINE_MUTEX(of_aliases_mutex);
+DEFINE_MUTEX(of_aliases_mutex);
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
-DEFINE_RWLOCK(devtree_lock);
+DEFINE_RAW_SPINLOCK(devtree_lock);
int of_n_addr_cells(struct device_node *np)
{
@@ -164,16 +147,14 @@ void of_node_put(struct device_node *node)
EXPORT_SYMBOL(of_node_put);
#endif /* CONFIG_OF_DYNAMIC */
-struct property *of_find_property(const struct device_node *np,
- const char *name,
- int *lenp)
+static struct property *__of_find_property(const struct device_node *np,
+ const char *name, int *lenp)
{
struct property *pp;
if (!np)
return NULL;
- read_lock(&devtree_lock);
for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, name) == 0) {
if (lenp)
@@ -181,7 +162,20 @@ struct property *of_find_property(const struct device_node *np,
break;
}
}
- read_unlock(&devtree_lock);
+
+ return pp;
+}
+
+struct property *of_find_property(const struct device_node *np,
+ const char *name,
+ int *lenp)
+{
+ struct property *pp;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ pp = __of_find_property(np, name, lenp);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return pp;
}
@@ -199,13 +193,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
{
struct device_node *np;
- read_lock(&devtree_lock);
+ raw_spin_lock(&devtree_lock);
np = prev ? prev->allnext : of_allnodes;
for (; np != NULL; np = np->allnext)
if (of_node_get(np))
break;
of_node_put(prev);
- read_unlock(&devtree_lock);
+ raw_spin_unlock(&devtree_lock);
return np;
}
EXPORT_SYMBOL(of_find_all_nodes);
@@ -214,8 +208,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
* Find a property with a given name for a given node
* and return the value.
*/
+static const void *__of_get_property(const struct device_node *np,
+ const char *name, int *lenp)
+{
+ struct property *pp = __of_find_property(np, name, lenp);
+
+ return pp ? pp->value : NULL;
+}
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
const void *of_get_property(const struct device_node *np, const char *name,
- int *lenp)
+ int *lenp)
{
struct property *pp = of_find_property(np, name, lenp);
@@ -226,13 +232,13 @@ EXPORT_SYMBOL(of_get_property);
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
-int of_device_is_compatible(const struct device_node *device,
- const char *compat)
+static int __of_device_is_compatible(const struct device_node *device,
+ const char *compat)
{
const char* cp;
int cplen, l;
- cp = of_get_property(device, "compatible", &cplen);
+ cp = __of_get_property(device, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
@@ -245,6 +251,21 @@ int of_device_is_compatible(const struct device_node *device,
return 0;
}
+
+/** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+int of_device_is_compatible(const struct device_node *device,
+ const char *compat)
+{
+ unsigned long flags;
+ int res;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ res = __of_device_is_compatible(device, compat);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return res;
+}
EXPORT_SYMBOL(of_device_is_compatible);
/**
@@ -269,19 +290,19 @@ int of_machine_is_compatible(const char *compat)
EXPORT_SYMBOL(of_machine_is_compatible);
/**
- * of_device_is_available - check if a device is available for use
+ * __of_device_is_available - check if a device is available for use
*
- * @device: Node to check for availability
+ * @device: Node to check for availability, with locks already held
*
* Returns 1 if the status property is absent or set to "okay" or "ok",
* 0 otherwise
*/
-int of_device_is_available(const struct device_node *device)
+static int __of_device_is_available(const struct device_node *device)
{
const char *status;
int statlen;
- status = of_get_property(device, "status", &statlen);
+ status = __of_get_property(device, "status", &statlen);
if (status == NULL)
return 1;
@@ -292,6 +313,26 @@ int of_device_is_available(const struct device_node *device)
return 0;
}
+
+/**
+ * of_device_is_available - check if a device is available for use
+ *
+ * @device: Node to check for availability
+ *
+ * Returns 1 if the status property is absent or set to "okay" or "ok",
+ * 0 otherwise
+ */
+int of_device_is_available(const struct device_node *device)
+{
+ unsigned long flags;
+ int res;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ res = __of_device_is_available(device);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return res;
+
+}
EXPORT_SYMBOL(of_device_is_available);
/**
@@ -304,13 +345,14 @@ EXPORT_SYMBOL(of_device_is_available);
struct device_node *of_get_parent(const struct device_node *node)
{
struct device_node *np;
+ unsigned long flags;
if (!node)
return NULL;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = of_node_get(node->parent);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_get_parent);
@@ -329,14 +371,15 @@ EXPORT_SYMBOL(of_get_parent);
struct device_node *of_get_next_parent(struct device_node *node)
{
struct device_node *parent;
+ unsigned long flags;
if (!node)
return NULL;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
parent = of_node_get(node->parent);
of_node_put(node);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return parent;
}
@@ -352,14 +395,15 @@ struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling)
if (of_node_get(next))
break;
of_node_put(prev);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_child);
@@ -377,16 +421,16 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
{
struct device_node *next;
- read_lock(&devtree_lock);
+ raw_spin_lock(&devtree_lock);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling) {
- if (!of_device_is_available(next))
+ if (!__of_device_is_available(next))
continue;
if (of_node_get(next))
break;
}
of_node_put(prev);
- read_unlock(&devtree_lock);
+ raw_spin_unlock(&devtree_lock);
return next;
}
EXPORT_SYMBOL(of_get_next_available_child);
@@ -424,14 +468,15 @@ EXPORT_SYMBOL(of_get_child_by_name);
struct device_node *of_find_node_by_path(const char *path)
{
struct device_node *np = of_allnodes;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
for (; np; np = np->allnext) {
if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
&& of_node_get(np))
break;
}
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_path);
@@ -451,15 +496,16 @@ struct device_node *of_find_node_by_name(struct device_node *from,
const char *name)
{
struct device_node *np;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext)
if (np->name && (of_node_cmp(np->name, name) == 0)
&& of_node_get(np))
break;
of_node_put(from);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_name);
@@ -480,15 +526,16 @@ struct device_node *of_find_node_by_type(struct device_node *from,
const char *type)
{
struct device_node *np;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext)
if (np->type && (of_node_cmp(np->type, type) == 0)
&& of_node_get(np))
break;
of_node_put(from);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_type);
@@ -511,18 +558,20 @@ struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compatible)
{
struct device_node *np;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
if (type
&& !(np->type && (of_node_cmp(np->type, type) == 0)))
continue;
- if (of_device_is_compatible(np, compatible) && of_node_get(np))
+ if (__of_device_is_compatible(np, compatible) &&
+ of_node_get(np))
break;
}
of_node_put(from);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_compatible_node);
@@ -544,8 +593,9 @@ struct device_node *of_find_node_with_property(struct device_node *from,
{
struct device_node *np;
struct property *pp;
+ unsigned long flags;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
for (pp = np->properties; pp; pp = pp->next) {
@@ -557,20 +607,14 @@ struct device_node *of_find_node_with_property(struct device_node *from,
}
out:
of_node_put(from);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_with_property);
-/**
- * of_match_node - Tell if an device_node has a matching of_match structure
- * @matches: array of of device match structures to search in
- * @node: the of device structure to match against
- *
- * Low level utility function used by device matching.
- */
-const struct of_device_id *of_match_node(const struct of_device_id *matches,
- const struct device_node *node)
+static
+const struct of_device_id *__of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
{
if (!matches)
return NULL;
@@ -584,14 +628,33 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
match &= node->type
&& !strcmp(matches->type, node->type);
if (matches->compatible[0])
- match &= of_device_is_compatible(node,
- matches->compatible);
+ match &= __of_device_is_compatible(node,
+ matches->compatible);
if (match)
return matches;
matches++;
}
return NULL;
}
+
+/**
+ * of_match_node - Tell if an device_node has a matching of_match structure
+ * @matches: array of of device match structures to search in
+ * @node: the of device structure to match against
+ *
+ * Low level utility function used by device matching.
+ */
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
+{
+ const struct of_device_id *match;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ match = __of_match_node(matches, node);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return match;
+}
EXPORT_SYMBOL(of_match_node);
/**
@@ -612,24 +675,27 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
const struct of_device_id **match)
{
struct device_node *np;
+ const struct of_device_id *m;
+ unsigned long flags;
if (match)
*match = NULL;
- read_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
- if (of_match_node(matches, np) && of_node_get(np)) {
+ m = __of_match_node(matches, np);
+ if (m && of_node_get(np)) {
if (match)
- *match = matches;
+ *match = m;
break;
}
}
of_node_put(from);
- read_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
-EXPORT_SYMBOL(of_find_matching_node);
+EXPORT_SYMBOL(of_find_matching_node_and_match);
/**
* of_modalias_node - Lookup appropriate modalias for a device node
@@ -669,12 +735,12 @@ struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
- read_lock(&devtree_lock);
+ raw_spin_lock(&devtree_lock);
for (np = of_allnodes; np; np = np->allnext)
if (np->phandle == handle)
break;
of_node_get(np);
- read_unlock(&devtree_lock);
+ raw_spin_unlock(&devtree_lock);
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
@@ -1025,12 +1091,13 @@ EXPORT_SYMBOL(of_parse_phandle);
* To get a device_node of the `node2' node you may call this:
* of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
-int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
- const char *cells_name, int index,
- struct of_phandle_args *out_args)
+static int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name, int index,
+ struct of_phandle_args *out_args)
{
const __be32 *list, *list_end;
- int size, cur_index = 0;
+ int rc = 0, size, cur_index = 0;
uint32_t count = 0;
struct device_node *node = NULL;
phandle phandle;
@@ -1043,6 +1110,7 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
/* Loop over the phandles until all the requested entry is found */
while (list < list_end) {
+ rc = -EINVAL;
count = 0;
/*
@@ -1059,13 +1127,13 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
if (!node) {
pr_err("%s: could not find phandle\n",
np->full_name);
- break;
+ goto err;
}
if (of_property_read_u32(node, cells_name, &count)) {
pr_err("%s: could not get %s for %s\n",
np->full_name, cells_name,
node->full_name);
- break;
+ goto err;
}
/*
@@ -1075,7 +1143,7 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
if (list + count > list_end) {
pr_err("%s: arguments longer than property\n",
np->full_name);
- break;
+ goto err;
}
}
@@ -1085,9 +1153,10 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
* index matches, then fill the out_args structure and return,
* or return -ENOENT for an empty entry.
*/
+ rc = -ENOENT;
if (cur_index == index) {
if (!phandle)
- return -ENOENT;
+ goto err;
if (out_args) {
int i;
@@ -1098,6 +1167,10 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
for (i = 0; i < count; i++)
out_args->args[i] = be32_to_cpup(list++);
}
+
+ /* Found it! return success */
+ if (node)
+ of_node_put(node);
return 0;
}
@@ -1107,34 +1180,95 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
cur_index++;
}
- /* Loop exited without finding a valid entry; return an error */
+ /*
+ * Unlock node before returning result; will be one of:
+ * -ENOENT : index is for empty phandle
+ * -EINVAL : parsing error on data
+ * [1..n] : Number of phandle (count mode; when index = -1)
+ */
+ rc = index < 0 ? cur_index : -ENOENT;
+ err:
if (node)
of_node_put(node);
- return -EINVAL;
+ return rc;
+}
+
+int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
+ const char *cells_name, int index,
+ struct of_phandle_args *out_args)
+{
+ if (index < 0)
+ return -EINVAL;
+ return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
}
EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
- * prom_add_property - Add a property to a node
+ * of_count_phandle_with_args() - Find the number of phandles references in a property
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * Returns the number of phandle + argument tuples within a property. It
+ * is a typical pattern to encode a list of phandle and variable
+ * arguments into a single property. The number of arguments is encoded
+ * by a property in the phandle-target node. For example, a gpios
+ * property would contain a list of GPIO specifies consisting of a
+ * phandle and 1 or more arguments. The number of arguments are
+ * determined by the #gpio-cells property in the node pointed to by the
+ * phandle.
*/
-int prom_add_property(struct device_node *np, struct property *prop)
+int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
+ const char *cells_name)
+{
+ return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+}
+EXPORT_SYMBOL(of_count_phandle_with_args);
+
+#if defined(CONFIG_OF_DYNAMIC)
+static int of_property_notify(int action, struct device_node *np,
+ struct property *prop)
+{
+ struct of_prop_reconfig pr;
+
+ pr.dn = np;
+ pr.prop = prop;
+ return of_reconfig_notify(action, &pr);
+}
+#else
+static int of_property_notify(int action, struct device_node *np,
+ struct property *prop)
+{
+ return 0;
+}
+#endif
+
+/**
+ * of_add_property - Add a property to a node
+ */
+int of_add_property(struct device_node *np, struct property *prop)
{
struct property **next;
unsigned long flags;
+ int rc;
+
+ rc = of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop);
+ if (rc)
+ return rc;
prop->next = NULL;
- write_lock_irqsave(&devtree_lock, flags);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
while (*next) {
if (strcmp(prop->name, (*next)->name) == 0) {
/* duplicate ! don't insert it */
- write_unlock_irqrestore(&devtree_lock, flags);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return -1;
}
next = &(*next)->next;
}
*next = prop;
- write_unlock_irqrestore(&devtree_lock, flags);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
#ifdef CONFIG_PROC_DEVICETREE
/* try to add to proc as well if it was initialized */
@@ -1146,20 +1280,25 @@ int prom_add_property(struct device_node *np, struct property *prop)
}
/**
- * prom_remove_property - Remove a property from a node.
+ * of_remove_property - Remove a property from a node.
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
* Instead we just move the property to the "dead properties"
* list, so it won't be found any more.
*/
-int prom_remove_property(struct device_node *np, struct property *prop)
+int of_remove_property(struct device_node *np, struct property *prop)
{
struct property **next;
unsigned long flags;
int found = 0;
+ int rc;
+
+ rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop);
+ if (rc)
+ return rc;
- write_lock_irqsave(&devtree_lock, flags);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
while (*next) {
if (*next == prop) {
@@ -1172,7 +1311,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
next = &(*next)->next;
}
- write_unlock_irqrestore(&devtree_lock, flags);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (!found)
return -ENODEV;
@@ -1187,7 +1326,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
/*
- * prom_update_property - Update a property in a node, if the property does
+ * of_update_property - Update a property in a node, if the property does
* not exist, add it.
*
* Note that we don't actually remove it, since we have given out
@@ -1195,21 +1334,24 @@ int prom_remove_property(struct device_node *np, struct property *prop)
* Instead we just move the property to the "dead properties" list,
* and add the new property to the property list
*/
-int prom_update_property(struct device_node *np,
- struct property *newprop)
+int of_update_property(struct device_node *np, struct property *newprop)
{
struct property **next, *oldprop;
unsigned long flags;
- int found = 0;
+ int rc, found = 0;
+
+ rc = of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop);
+ if (rc)
+ return rc;
if (!newprop->name)
return -EINVAL;
oldprop = of_find_property(np, newprop->name, NULL);
if (!oldprop)
- return prom_add_property(np, newprop);
+ return of_add_property(np, newprop);
- write_lock_irqsave(&devtree_lock, flags);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
while (*next) {
if (*next == oldprop) {
@@ -1223,7 +1365,7 @@ int prom_update_property(struct device_node *np,
}
next = &(*next)->next;
}
- write_unlock_irqrestore(&devtree_lock, flags);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (!found)
return -ENODEV;
@@ -1246,37 +1388,117 @@ int prom_update_property(struct device_node *np,
* device tree nodes.
*/
+static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
+
+int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_register);
+
+int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
+
+int of_reconfig_notify(unsigned long action, void *p)
+{
+ int rc;
+
+ rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
+ return notifier_to_errno(rc);
+}
+
+#ifdef CONFIG_PROC_DEVICETREE
+static void of_add_proc_dt_entry(struct device_node *dn)
+{
+ struct proc_dir_entry *ent;
+
+ ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
+ if (ent)
+ proc_device_tree_add_node(dn, ent);
+}
+#else
+static void of_add_proc_dt_entry(struct device_node *dn)
+{
+ return;
+}
+#endif
+
/**
* of_attach_node - Plug a device node into the tree and global list.
*/
-void of_attach_node(struct device_node *np)
+int of_attach_node(struct device_node *np)
{
unsigned long flags;
+ int rc;
- write_lock_irqsave(&devtree_lock, flags);
+ rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np->sibling = np->parent->child;
np->allnext = of_allnodes;
np->parent->child = np;
of_allnodes = np;
- write_unlock_irqrestore(&devtree_lock, flags);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ of_add_proc_dt_entry(np);
+ return 0;
}
+#ifdef CONFIG_PROC_DEVICETREE
+static void of_remove_proc_dt_entry(struct device_node *dn)
+{
+ struct device_node *parent = dn->parent;
+ struct property *prop = dn->properties;
+
+ while (prop) {
+ remove_proc_entry(prop->name, dn->pde);
+ prop = prop->next;
+ }
+
+ if (dn->pde)
+ remove_proc_entry(dn->pde->name, parent->pde);
+}
+#else
+static void of_remove_proc_dt_entry(struct device_node *dn)
+{
+ return;
+}
+#endif
+
/**
* of_detach_node - "Unplug" a node from the device tree.
*
* The caller must hold a reference to the node. The memory associated with
* the node is not freed until its refcount goes to zero.
*/
-void of_detach_node(struct device_node *np)
+int of_detach_node(struct device_node *np)
{
struct device_node *parent;
unsigned long flags;
+ int rc = 0;
+
+ rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np);
+ if (rc)
+ return rc;
- write_lock_irqsave(&devtree_lock, flags);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+
+ if (of_node_check_flag(np, OF_DETACHED)) {
+ /* someone already detached it */
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return rc;
+ }
parent = np->parent;
- if (!parent)
- goto out_unlock;
+ if (!parent) {
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return rc;
+ }
if (of_allnodes == np)
of_allnodes = np->allnext;
@@ -1301,9 +1523,10 @@ void of_detach_node(struct device_node *np)
}
of_node_set_flag(np, OF_DETACHED);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-out_unlock:
- write_unlock_irqrestore(&devtree_lock, flags);
+ of_remove_proc_dt_entry(np);
+ return rc;
}
#endif /* defined(CONFIG_OF_DYNAMIC) */
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 4c74e4fc5a51..f685e55e0717 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <asm/errno.h>
+#include "of_private.h"
/**
* of_match_device - Tell if a struct device matches an of_device_id list
@@ -131,6 +132,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const char *compat;
+ struct alias_prop *app;
int seen = 0, cplen, sl;
if ((!dev) || (!dev->of_node))
@@ -153,6 +155,17 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
seen++;
}
add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
+
+ seen = 0;
+ mutex_lock(&of_aliases_mutex);
+ list_for_each_entry(app, &aliases_lookup, link) {
+ if (dev->of_node == app->np) {
+ add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
+ app->alias);
+ seen++;
+ }
+ }
+ mutex_unlock(&of_aliases_mutex);
}
int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a65c39c473bf..808be06bb67e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -488,14 +488,8 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
depth++;
pathp = (char *)p;
p = ALIGN(p + strlen(pathp) + 1, 4);
- if ((*pathp) == '/') {
- const char *lp, *np;
- for (lp = NULL, np = pathp; *np; np++)
- if ((*np) == '/')
- lp = np+1;
- if (lp != NULL)
- pathp = lp;
- }
+ if (*pathp == '/')
+ pathp = kbasename(pathp);
rc = it(p, pathp, depth, data);
if (rc != 0)
break;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 83ca06f4312b..e3a8b22ef9dd 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -157,7 +157,7 @@ struct phy_device *of_phy_connect(struct net_device *dev,
if (!phy)
return NULL;
- return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy;
+ return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_connect);
@@ -194,7 +194,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
- phy = phy_connect(dev, bus_id, hndlr, 0, iface);
+ phy = phy_connect(dev, bus_id, hndlr, iface);
return IS_ERR(phy) ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_connect_fixed_link);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
new file mode 100644
index 000000000000..ff350c8fa7ac
--- /dev/null
+++ b/drivers/of/of_private.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_OF_PRIVATE_H
+#define _LINUX_OF_PRIVATE_H
+/*
+ * Private symbols used by OF support code
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/**
+ * struct alias_prop - Alias property in 'aliases' node
+ * @link: List node to link the structure in aliases_lookup list
+ * @alias: Alias property name
+ * @np: Pointer to device_node that the alias stands for
+ * @id: Index value from end of alias name
+ * @stem: Alias string without the index
+ *
+ * The structure represents one alias property of 'aliases' node as
+ * an entry in aliases_lookup list.
+ */
+struct alias_prop {
+ struct list_head link;
+ const char *alias;
+ struct device_node *np;
+ int id;
+ char stem[0];
+};
+
+extern struct mutex of_aliases_mutex;
+extern struct list_head aliases_lookup;
+#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b80891b43816..e0a6514ab46c 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -436,6 +436,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
* of_platform_populate() - Populate platform_devices from device tree data
* @root: parent of the first level to probe or NULL for the root of the tree
* @matches: match table, NULL to use the default
+ * @lookup: auxdata table for matching id and platform_data with device nodes
* @parent: parent to hook devices from, NULL for toplevel
*
* Similar to of_platform_bus_probe(), this function walks the device tree
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index f24ffd7088d2..0eb5c38b4e07 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -2,7 +2,7 @@
* Self tests for device tree subsystem
*/
-#define pr_fmt(fmt) "### %s(): " fmt, __func__
+#define pr_fmt(fmt) "### dt-test ### " fmt
#include <linux/clk.h>
#include <linux/err.h>
@@ -16,26 +16,30 @@
static bool selftest_passed = true;
#define selftest(result, fmt, ...) { \
- selftest_passed &= (result); \
- if (!(result)) \
+ if (!(result)) { \
pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
+ selftest_passed = false; \
+ } else { \
+ pr_info("pass %s:%i\n", __FILE__, __LINE__); \
+ } \
}
static void __init of_selftest_parse_phandle_with_args(void)
{
struct device_node *np;
struct of_phandle_args args;
- int rc, i;
- bool passed_all = true;
+ int i, rc;
- pr_info("start\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_err("missing testcase data\n");
return;
}
- for (i = 0; i < 7; i++) {
+ rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+ selftest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
+
+ for (i = 0; i < 8; i++) {
bool passed = true;
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells", i, &args);
@@ -79,45 +83,47 @@ static void __init of_selftest_parse_phandle_with_args(void)
passed &= (args.args[0] == (i + 1));
break;
case 7:
- passed &= (rc == -EINVAL);
+ passed &= (rc == -ENOENT);
break;
default:
passed = false;
}
- if (!passed) {
- int j;
- pr_err("index %i - data error on node %s rc=%i regs=[",
- i, args.np->full_name, rc);
- for (j = 0; j < args.args_count; j++)
- printk(" %i", args.args[j]);
- printk(" ]\n");
-
- passed_all = false;
- }
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
}
/* Check for missing list property */
rc = of_parse_phandle_with_args(np, "phandle-list-missing",
"#phandle-cells", 0, &args);
- passed_all &= (rc == -EINVAL);
+ selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
+ rc = of_count_phandle_with_args(np, "phandle-list-missing",
+ "#phandle-cells");
+ selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
/* Check for missing cells property */
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing", 0, &args);
- passed_all &= (rc == -EINVAL);
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ rc = of_count_phandle_with_args(np, "phandle-list",
+ "#phandle-cells-missing");
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells", 0, &args);
- passed_all &= (rc == -EINVAL);
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
+ "#phandle-cells");
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
- passed_all &= (rc == -EINVAL);
-
- pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
+ "#phandle-cells");
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
static void __init of_selftest_property_match_string(void)
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 620264936341..592de566e72f 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -128,6 +128,7 @@ config SUPERIO
config CHASSIS_LCD_LED
bool "Chassis LCD and LED support"
default y
+ select VM_EVENT_COUNTERS
help
Say Y here if you want to enable support for the Heartbeat,
Disk/Network activities LEDs on some PA-RISC machines,
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index fb6a1fe21b93..9eae9834bcc7 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -430,7 +430,7 @@ static void dino_choose_irq(struct parisc_device *dev, void *ctrl)
* Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de)
* (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...)
*/
-static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev)
+static void quirk_cirrus_cardbus(struct pci_dev *dev)
{
u8 new_irq = dev->irq - 1;
printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n",
@@ -580,15 +580,13 @@ dino_fixup_bus(struct pci_bus *bus)
}
- DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
+ DBG("DEBUG %s assigning %d [%pR]\n",
dev_name(&bus->self->dev), i,
- bus->self->resource[i].start,
- bus->self->resource[i].end);
+ &bus->self->resource[i]);
WARN_ON(pci_assign_resource(bus->self, i));
- DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
+ DBG("DEBUG %s after assign %d [%pR]\n",
dev_name(&bus->self->dev), i,
- bus->self->resource[i].start,
- bus->self->resource[i].end);
+ &bus->self->resource[i]);
}
}
@@ -772,8 +770,7 @@ dino_bridge_init(struct dino_device *dino_dev, const char *name)
result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
if (result < 0) {
printk(KERN_ERR "%s: failed to claim PCI Bus address "
- "space %d (0x%lx-0x%lx)!\n", name, i,
- (unsigned long)res[i].start, (unsigned long)res[i].end);
+ "space %d (%pR)!\n", name, i, &res[i]);
return result;
}
}
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 815db175d427..898208e4f302 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -74,10 +74,8 @@ static int hppb_probe(struct parisc_device *dev)
status = ccio_request_resource(dev, &card->mmio_region);
if(status < 0) {
- printk(KERN_ERR "%s: failed to claim HP-PB "
- "bus space (0x%08llx, 0x%08llx)\n",
- __FILE__, (unsigned long long) card->mmio_region.start,
- (unsigned long long) card->mmio_region.end);
+ printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n",
+ __FILE__, &card->mmio_region);
}
return 0;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index fdd63a6a62d6..2ef7103270bb 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -34,7 +34,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/init.h> /* for __init and __devinit */
+#include <linux/init.h> /* for __init */
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/slab.h>
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 246a92f677e4..0f54ab6260df 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -212,12 +212,10 @@ pdcspath_store(struct pdcspath_entry *entry)
entry, devpath, entry->addr);
/* addr, devpath and count must be word aligned */
- if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) {
- printk(KERN_ERR "%s: an error occurred when writing to PDC.\n"
+ if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK)
+ WARN(1, KERN_ERR "%s: an error occurred when writing to PDC.\n"
"It is likely that the Stable Storage data has been corrupted.\n"
"Please check it carefully upon next reboot.\n", __func__);
- WARN_ON(1);
- }
/* kobject is already registered */
entry->ready = 2;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 5003458980d3..ac6e8e7a02df 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -274,7 +274,7 @@ superio_init(struct pci_dev *pcidev)
else
printk(KERN_ERR PFX "USB regulator not initialized!\n");
- if (request_irq(pdev->irq, superio_interrupt, IRQF_DISABLED,
+ if (request_irq(pdev->irq, superio_interrupt, 0,
SUPERIO, (void *)sio)) {
printk(KERN_ERR PFX "could not get irq\n");
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 0e60438ebe30..24e12d4d1769 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -35,7 +35,7 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
+ depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
---help---
You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 352f96180bc7..050773c36823 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -137,7 +137,7 @@ struct parport_operations parport_gsc_ops =
/*
* Checks for port existence, all ports support SPP MODE
*/
-static int __devinit parport_SPP_supported(struct parport *pb)
+static int parport_SPP_supported(struct parport *pb)
{
unsigned char r, w;
@@ -201,7 +201,7 @@ static int __devinit parport_SPP_supported(struct parport *pb)
* be misdetected here is rather academic.
*/
-static int __devinit parport_PS2_supported(struct parport *pb)
+static int parport_PS2_supported(struct parport *pb)
{
int ok = 0;
@@ -232,10 +232,9 @@ static int __devinit parport_PS2_supported(struct parport *pb)
/* --- Initialisation code -------------------------------- */
-struct parport *__devinit parport_gsc_probe_port (unsigned long base,
- unsigned long base_hi,
- int irq, int dma,
- struct pci_dev *dev)
+struct parport *parport_gsc_probe_port(unsigned long base,
+ unsigned long base_hi, int irq,
+ int dma, struct pci_dev *dev)
{
struct parport_gsc_private *priv;
struct parport_operations *ops;
@@ -345,9 +344,9 @@ struct parport *__devinit parport_gsc_probe_port (unsigned long base,
#define PARPORT_GSC_OFFSET 0x800
-static int __devinitdata parport_count;
+static int parport_count;
-static int __devinit parport_init_chip(struct parisc_device *dev)
+static int parport_init_chip(struct parisc_device *dev)
{
struct parport *p;
unsigned long port;
@@ -382,7 +381,7 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
return 0;
}
-static int __devexit parport_remove_chip(struct parisc_device *dev)
+static int parport_remove_chip(struct parisc_device *dev)
{
struct parport *p = dev_get_drvdata(&dev->dev);
if (p) {
@@ -415,15 +414,15 @@ static struct parisc_driver parport_driver = {
.name = "Parallel",
.id_table = parport_tbl,
.probe = parport_init_chip,
- .remove = __devexit_p(parport_remove_chip),
+ .remove = parport_remove_chip,
};
-int __devinit parport_gsc_init(void)
+int parport_gsc_init(void)
{
return register_parisc_driver(&parport_driver);
}
-static void __devexit parport_gsc_exit(void)
+static void parport_gsc_exit(void)
{
unregister_parisc_driver(&parport_driver);
}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 5abffe58a9d2..903e1285fda0 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -953,7 +953,7 @@ static struct superio_struct *find_free_superio(void)
/* Super-IO chipset detection, Winbond, SMSC */
-static void __devinit show_parconfig_smsc37c669(int io, int key)
+static void show_parconfig_smsc37c669(int io, int key)
{
int cr1, cr4, cra, cr23, cr26, cr27;
struct superio_struct *s;
@@ -1038,7 +1038,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key)
}
-static void __devinit show_parconfig_winbond(int io, int key)
+static void show_parconfig_winbond(int io, int key)
{
int cr30, cr60, cr61, cr70, cr74, crf0;
struct superio_struct *s;
@@ -1106,8 +1106,7 @@ static void __devinit show_parconfig_winbond(int io, int key)
}
}
-static void __devinit decode_winbond(int efer, int key, int devid,
- int devrev, int oldid)
+static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
{
const char *type = "unknown";
int id, progif = 2;
@@ -1159,7 +1158,7 @@ static void __devinit decode_winbond(int efer, int key, int devid,
show_parconfig_winbond(efer, key);
}
-static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
+static void decode_smsc(int efer, int key, int devid, int devrev)
{
const char *type = "unknown";
void (*func)(int io, int key);
@@ -1193,7 +1192,7 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
}
-static void __devinit winbond_check(int io, int key)
+static void winbond_check(int io, int key)
{
int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
@@ -1231,7 +1230,7 @@ out:
release_region(io, 3);
}
-static void __devinit winbond_check2(int io, int key)
+static void winbond_check2(int io, int key)
{
int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
@@ -1272,7 +1271,7 @@ out:
release_region(io, 3);
}
-static void __devinit smsc_check(int io, int key)
+static void smsc_check(int io, int key)
{
int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
@@ -1316,7 +1315,7 @@ out:
}
-static void __devinit detect_and_report_winbond(void)
+static void detect_and_report_winbond(void)
{
if (verbose_probing)
printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
@@ -1329,7 +1328,7 @@ static void __devinit detect_and_report_winbond(void)
winbond_check2(0x250, 0x89);
}
-static void __devinit detect_and_report_smsc(void)
+static void detect_and_report_smsc(void)
{
if (verbose_probing)
printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
@@ -1339,7 +1338,7 @@ static void __devinit detect_and_report_smsc(void)
smsc_check(0x370, 0x44);
}
-static void __devinit detect_and_report_it87(void)
+static void detect_and_report_it87(void)
{
u16 dev;
u8 origval, r;
@@ -1796,24 +1795,24 @@ static int parport_ECPEPP_supported(struct parport *pb)
#else /* No IEEE 1284 support */
/* Don't bother probing for modes we know we won't use. */
-static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
+static int parport_PS2_supported(struct parport *pb) { return 0; }
#ifdef CONFIG_PARPORT_PC_FIFO
static int parport_ECP_supported(struct parport *pb)
{
return 0;
}
#endif
-static int __devinit parport_EPP_supported(struct parport *pb)
+static int parport_EPP_supported(struct parport *pb)
{
return 0;
}
-static int __devinit parport_ECPEPP_supported(struct parport *pb)
+static int parport_ECPEPP_supported(struct parport *pb)
{
return 0;
}
-static int __devinit parport_ECPPS2_supported(struct parport *pb)
+static int parport_ECPPS2_supported(struct parport *pb)
{
return 0;
}
@@ -2269,9 +2268,8 @@ EXPORT_SYMBOL(parport_pc_unregister_port);
#ifdef CONFIG_PCI
/* ITE support maintained by Rich Liu <richliu@poorman.org> */
-static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
- int autodma,
- const struct parport_pc_via_data *via)
+static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via)
{
short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
u32 ite8872set;
@@ -2377,10 +2375,10 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru>
based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */
-static int __devinitdata parport_init_mode;
+static int parport_init_mode;
/* Data for two known VIA chips */
-static struct parport_pc_via_data via_686a_data __devinitdata = {
+static struct parport_pc_via_data via_686a_data = {
0x51,
0x50,
0x85,
@@ -2389,7 +2387,7 @@ static struct parport_pc_via_data via_686a_data __devinitdata = {
0xF0,
0xE6
};
-static struct parport_pc_via_data via_8231_data __devinitdata = {
+static struct parport_pc_via_data via_8231_data = {
0x45,
0x44,
0x50,
@@ -2399,9 +2397,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
0xF6
};
-static int __devinit sio_via_probe(struct pci_dev *pdev, int autoirq,
- int autodma,
- const struct parport_pc_via_data *via)
+static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via)
{
u8 tmp, tmp2, siofunc;
u8 ppcontrol = 0;
@@ -2575,7 +2572,7 @@ static struct parport_pc_superio {
int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
const struct parport_pc_via_data *via);
const struct parport_pc_via_data *via;
-} parport_pc_superio_info[] __devinitdata = {
+} parport_pc_superio_info[] = {
{ sio_via_probe, &via_686a_data, },
{ sio_via_probe, &via_8231_data, },
{ sio_ite_8872_probe, NULL, },
@@ -2860,7 +2857,7 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
return -ENODEV;
}
-static void __devexit parport_pc_pci_remove(struct pci_dev *dev)
+static void parport_pc_pci_remove(struct pci_dev *dev)
{
struct pci_parport_data *data = pci_get_drvdata(dev);
int i;
@@ -2879,7 +2876,7 @@ static struct pci_driver parport_pc_pci_driver = {
.name = "parport_pc",
.id_table = parport_pc_pci_tbl,
.probe = parport_pc_pci_probe,
- .remove = __devexit_p(parport_pc_pci_remove),
+ .remove = parport_pc_pci_remove,
};
static int __init parport_pc_init_superio(int autoirq, int autodma)
@@ -2983,7 +2980,7 @@ static struct pnp_driver parport_pc_pnp_driver = {
static struct pnp_driver parport_pc_pnp_driver;
#endif /* CONFIG_PNP */
-static int __devinit parport_pc_platform_probe(struct platform_device *pdev)
+static int parport_pc_platform_probe(struct platform_device *pdev)
{
/* Always succeed, the actual probing is done in
* parport_pc_probe_port(). */
@@ -2999,7 +2996,7 @@ static struct platform_driver parport_pc_platform_driver = {
};
/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
-static int __devinit __attribute__((unused))
+static int __attribute__((unused))
parport_pc_find_isa_ports(int autoirq, int autodma)
{
int count = 0;
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 1631eeaf440e..1b8bdb7e9bf4 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -63,6 +63,7 @@ enum parport_pc_pci_cards {
timedia_9079b,
timedia_9079c,
wch_ch353_2s1p,
+ sunix_2s1p,
};
/* each element directly indexed from enum list, above */
@@ -87,7 +88,8 @@ struct parport_pc_pci {
struct parport_pc_pci *card, int failed);
};
-static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par, int autoirq, int autodma)
+static int netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par,
+ int autoirq, int autodma)
{
/* the rule described below doesn't hold for this device */
if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
@@ -111,7 +113,7 @@ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc
return 0;
}
-static struct parport_pc_pci cards[] __devinitdata = {
+static struct parport_pc_pci cards[] = {
/* titan_110l */ { 1, { { 3, -1 }, } },
/* titan_210l */ { 1, { { 3, -1 }, } },
/* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
@@ -147,8 +149,12 @@ static struct parport_pc_pci cards[] __devinitdata = {
/* timedia_9079b */ { 1, { { 2, 3 }, } },
/* timedia_9079c */ { 1, { { 2, 3 }, } },
/* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
+ /* sunix_2s1p */ { 1, { { 3, -1 }, } },
};
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
static struct pci_device_id parport_serial_pci_tbl[] = {
/* PCI cards */
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_110L,
@@ -245,8 +251,18 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
{ 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+
/* WCH CARDS */
{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+
+ /*
+ * More SUNIX variations. At least one of these has part number
+ * '5079A but subdevice 0x102. That board reports 0x0708 as
+ * its PCI Class.
+ */
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0102, 0, 0, sunix_2s1p },
+
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
@@ -258,7 +274,7 @@ MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
* Cards not tested are marked n/t
* If you have one of these cards and it works for you, please tell me..
*/
-static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
+static struct pciserial_board pci_parport_serial_boards[] = {
[titan_110l] = {
.flags = FL_BASE1 | FL_BASE_BARS,
.num_ports = 1,
@@ -469,6 +485,12 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [sunix_2s1p] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
};
struct parport_serial_private {
@@ -479,8 +501,7 @@ struct parport_serial_private {
};
/* Register the serial port(s) of a PCI card. */
-static int __devinit serial_register (struct pci_dev *dev,
- const struct pci_device_id *id)
+static int serial_register(struct pci_dev *dev, const struct pci_device_id *id)
{
struct parport_serial_private *priv = pci_get_drvdata (dev);
struct pciserial_board *board;
@@ -501,8 +522,7 @@ static int __devinit serial_register (struct pci_dev *dev,
}
/* Register the parallel port(s) of a PCI card. */
-static int __devinit parport_register (struct pci_dev *dev,
- const struct pci_device_id *id)
+static int parport_register(struct pci_dev *dev, const struct pci_device_id *id)
{
struct parport_pc_pci *card;
struct parport_serial_private *priv = pci_get_drvdata (dev);
@@ -563,8 +583,8 @@ static int __devinit parport_register (struct pci_dev *dev,
return 0;
}
-static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
- const struct pci_device_id *id)
+static int parport_serial_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct parport_serial_private *priv;
int err;
@@ -599,7 +619,7 @@ static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
return 0;
}
-static void __devexit parport_serial_pci_remove (struct pci_dev *dev)
+static void parport_serial_pci_remove(struct pci_dev *dev)
{
struct parport_serial_private *priv = pci_get_drvdata (dev);
int i;
@@ -664,7 +684,7 @@ static struct pci_driver parport_serial_pci_driver = {
.name = "parport_serial",
.id_table = parport_serial_pci_tbl,
.probe = parport_serial_pci_probe,
- .remove = __devexit_p(parport_serial_pci_remove),
+ .remove = parport_serial_pci_remove,
#ifdef CONFIG_PM
.suspend = parport_serial_pci_suspend,
.resume = parport_serial_pci_resume,
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 983a2d2df659..5c4b6a1db6ca 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -265,7 +265,7 @@ static struct parport_operations parport_sunbpp_ops =
.owner = THIS_MODULE,
};
-static int __devinit bpp_probe(struct platform_device *op)
+static int bpp_probe(struct platform_device *op)
{
struct parport_operations *ops;
struct bpp_regs __iomem *regs;
@@ -330,7 +330,7 @@ out_unmap:
return err;
}
-static int __devexit bpp_remove(struct platform_device *op)
+static int bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
@@ -367,7 +367,7 @@ static struct platform_driver bpp_sbus_driver = {
.of_match_table = bpp_match,
},
.probe = bpp_probe,
- .remove = __devexit_p(bpp_remove),
+ .remove = bpp_remove,
};
module_platform_driver(bpp_sbus_driver);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3d6d4fd1e3c5..a951c22921d1 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -734,34 +734,24 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
*/
static int acpiphp_bus_add(struct acpiphp_func *func)
{
- acpi_handle phandle;
- struct acpi_device *device, *pdevice;
+ struct acpi_device *device;
int ret_val;
- acpi_get_parent(func->handle, &phandle);
- if (acpi_bus_get_device(phandle, &pdevice)) {
- dbg("no parent device, assuming NULL\n");
- pdevice = NULL;
- }
if (!acpi_bus_get_device(func->handle, &device)) {
dbg("bus exists... trim\n");
/* this shouldn't be in here, so remove
* the bus then re-add it...
*/
- ret_val = acpi_bus_trim(device, 1);
- dbg("acpi_bus_trim return %x\n", ret_val);
+ acpi_bus_trim(device);
}
- ret_val = acpi_bus_add(&device, pdevice, func->handle,
- ACPI_BUS_TYPE_DEVICE);
- if (ret_val) {
- dbg("error adding bus, %x\n",
- -ret_val);
- goto acpiphp_bus_add_out;
- }
- ret_val = acpi_bus_start(device);
+ ret_val = acpi_bus_scan(func->handle);
+ if (!ret_val)
+ ret_val = acpi_bus_get_device(func->handle, &device);
+
+ if (ret_val)
+ dbg("error adding bus, %x\n", -ret_val);
-acpiphp_bus_add_out:
return ret_val;
}
@@ -781,11 +771,8 @@ static int acpiphp_bus_trim(acpi_handle handle)
return retval;
}
- retval = acpi_bus_trim(device, 1);
- if (retval)
- err("cannot remove from acpi list\n");
-
- return retval;
+ acpi_bus_trim(device);
+ return 0;
}
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
@@ -1130,8 +1117,7 @@ static int acpiphp_configure_bridge (acpi_handle handle)
static void handle_bridge_insertion(acpi_handle handle, u32 type)
{
- struct acpi_device *device, *pdevice;
- acpi_handle phandle;
+ struct acpi_device *device;
if ((type != ACPI_NOTIFY_BUS_CHECK) &&
(type != ACPI_NOTIFY_DEVICE_CHECK)) {
@@ -1139,17 +1125,15 @@ static void handle_bridge_insertion(acpi_handle handle, u32 type)
return;
}
- acpi_get_parent(handle, &phandle);
- if (acpi_bus_get_device(phandle, &pdevice)) {
- dbg("no parent device, assuming NULL\n");
- pdevice = NULL;
- }
- if (acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE)) {
+ if (acpi_bus_scan(handle)) {
err("cannot add bridge to acpi list\n");
return;
}
- if (!acpiphp_configure_bridge(handle) &&
- !acpi_bus_start(device))
+ if (acpi_bus_get_device(handle, &device)) {
+ err("ACPI device object missing\n");
+ return;
+ }
+ if (!acpiphp_configure_bridge(handle))
add_bridge(handle);
else
err("cannot configure and start bridge\n");
@@ -1234,6 +1218,8 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
handle = hp_work->handle;
type = hp_work->type;
+ acpi_scan_lock_acquire();
+
if (acpi_bus_get_device(handle, &device)) {
/* This bridge must have just been physically inserted */
handle_bridge_insertion(handle, type);
@@ -1311,6 +1297,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
}
out:
+ acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
@@ -1357,6 +1344,8 @@ static void _handle_hotplug_event_func(struct work_struct *work)
func = (struct acpiphp_func *)context;
+ acpi_scan_lock_acquire();
+
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
@@ -1387,6 +1376,7 @@ static void _handle_hotplug_event_func(struct work_struct *work)
break;
}
+ acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_func */
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 26ffd3e3fb74..2c113de94323 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
extern bool pciehp_debug;
extern bool pciehp_force;
-extern struct workqueue_struct *pciehp_wq;
#define dbg(format, arg...) \
do { \
@@ -78,6 +77,7 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct workqueue_struct *wq;
};
struct event_info {
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 916bf4f53aba..939bd1d4b5b1 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -42,7 +42,6 @@ bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
bool pciehp_force;
-struct workqueue_struct *pciehp_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -340,18 +339,13 @@ static int __init pcied_init(void)
{
int retval = 0;
- pciehp_wq = alloc_workqueue("pciehp", 0, 0);
- if (!pciehp_wq)
- return -ENOMEM;
-
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- if (retval) {
- destroy_workqueue(pciehp_wq);
+ if (retval)
dbg("Failure to register service\n");
- }
+
return retval;
}
@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)
{
dbg("unload_pciehpd()\n");
pcie_port_service_unregister(&hpdriver_portdrv);
- destroy_workqueue(pciehp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 27f44295a657..38f018679175 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
info->p_slot = p_slot;
INIT_WORK(&info->work, interrupt_event_handler);
- queue_work(pciehp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
return 0;
}
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(pciehp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
if (ATTN_LED(ctrl))
pciehp_set_attention_status(p_slot, 0);
- queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
else
p_slot->state = POWERON_STATE;
- queue_work(pciehp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
}
static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 13b2eaf7ba43..5127f3f41821 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
static int pcie_init_slot(struct controller *ctrl)
{
struct slot *slot;
+ char name[32];
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
+ snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
+ slot->wq = alloc_workqueue(name, 0, 0);
+ if (!slot->wq)
+ goto abort;
+
slot->ctrl = ctrl;
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
+abort:
+ kfree(slot);
+ return -ENOMEM;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
- flush_workqueue(pciehp_wq);
+ destroy_workqueue(slot->wq);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index dee68e0698e1..7db249a25016 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -172,25 +172,6 @@ error:
return -ENOMEM;
}
-static int __init init_pci_slots(void)
-{
- struct zpci_dev *zdev;
- int device = 0;
-
- /*
- * Create a structure for each slot, and register that slot
- * with the pci_hotplug subsystem.
- */
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry) {
- init_pci_slot(zdev);
- device++;
- }
-
- mutex_unlock(&zpci_list_lock);
- return (device) ? 0 : -ENODEV;
-}
-
static void exit_pci_slot(struct zpci_dev *zdev)
{
struct list_head *tmp, *n;
@@ -205,6 +186,26 @@ static void exit_pci_slot(struct zpci_dev *zdev)
}
}
+static struct pci_hp_callback_ops hp_ops = {
+ .create_slot = init_pci_slot,
+ .remove_slot = exit_pci_slot,
+};
+
+static void __init init_pci_slots(void)
+{
+ struct zpci_dev *zdev;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry) {
+ init_pci_slot(zdev);
+ }
+ mutex_unlock(&zpci_list_lock);
+}
+
static void __exit exit_pci_slots(void)
{
struct list_head *tmp, *n;
@@ -224,28 +225,19 @@ static void __exit exit_pci_slots(void)
static int __init pci_hotplug_s390_init(void)
{
- /*
- * Do specific initialization stuff for your driver here
- * like initializing your controller hardware (if any) and
- * determining the number of slots you have in the system
- * right now.
- */
-
- if (!pci_probe)
+ if (!s390_pci_probe)
return -EOPNOTSUPP;
- /* register callbacks for slot handling from arch code */
- mutex_lock(&zpci_list_lock);
- hotplug_ops.create_slot = init_pci_slot;
- hotplug_ops.remove_slot = exit_pci_slot;
- mutex_unlock(&zpci_list_lock);
- pr_info("registered hotplug slot callbacks\n");
- return init_pci_slots();
+ zpci_register_hp_ops(&hp_ops);
+ init_pci_slots();
+
+ return 0;
}
static void __exit pci_hotplug_s390_exit(void)
{
exit_pci_slots();
+ zpci_deregister_hp_ops();
}
module_init(pci_hotplug_s390_init);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index f64ca92253da..574421bc2fa6 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -412,7 +412,6 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
unsigned long long adr;
struct acpi_device *pdevice;
- struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
@@ -426,6 +425,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
pdevice = NULL;
}
+ acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
@@ -448,20 +448,18 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
- ret = acpi_bus_add(&device, pdevice, chandle,
- ACPI_BUS_TYPE_DEVICE);
+ ret = acpi_bus_scan(chandle);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_bus_add "
+ printk(KERN_ERR "%s: acpi_bus_scan "
"failed (0x%x) for slot %d "
"func %d\n", __func__,
ret, (int)(adr>>16),
(int)(adr&0xffff));
/* try to continue on */
- } else {
- acpi_bus_start(device);
}
}
}
+ acpi_scan_lock_release();
}
/* Call the driver for the new device */
@@ -512,6 +510,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
/* Get the rootbus node pointer */
phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
@@ -539,10 +538,10 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
ret = acpi_bus_get_device(chandle,
&device);
if (ACPI_SUCCESS(ret))
- acpi_bus_trim(device, 1);
+ acpi_bus_trim(device);
}
}
-
+ acpi_scan_lock_release();
}
/* Free the SN resources assigned to the Linux device.*/
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ca64932e658b..b849f995075a 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -46,8 +46,6 @@
extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
extern bool shpchp_debug;
-extern struct workqueue_struct *shpchp_wq;
-extern struct workqueue_struct *shpchp_ordered_wq;
#define dbg(format, arg...) \
do { \
@@ -91,6 +89,7 @@ struct slot {
struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct workqueue_struct *wq;
u8 hp_slot;
};
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index b6de307248e4..3100c52c837c 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,8 +39,6 @@
bool shpchp_debug;
bool shpchp_poll_mode;
int shpchp_poll_time;
-struct workqueue_struct *shpchp_wq;
-struct workqueue_struct *shpchp_ordered_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl)
slot->device = ctrl->slot_device_offset + i;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
+
+ snprintf(name, sizeof(name), "shpchp-%d", slot->number);
+ slot->wq = alloc_workqueue(name, 0, 0);
+ if (!slot->wq) {
+ retval = -ENOMEM;
+ goto error_info;
+ }
+
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
@@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl)
if (retval) {
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
retval);
- goto error_info;
+ goto error_slotwq;
}
get_power_status(hotplug_slot, &info->power_status);
@@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl)
}
return 0;
+error_slotwq:
+ destroy_workqueue(slot->wq);
error_info:
kfree(info);
error_hpslot:
@@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl)
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
- flush_workqueue(shpchp_wq);
- flush_workqueue(shpchp_ordered_wq);
+ destroy_workqueue(slot->wq);
pci_hp_deregister(slot->hotplug_slot);
}
}
@@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval = 0;
-
- shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
- if (!shpchp_wq)
- return -ENOMEM;
-
- shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
- if (!shpchp_ordered_wq) {
- destroy_workqueue(shpchp_wq);
- return -ENOMEM;
- }
+ int retval;
retval = pci_register_driver(&shpc_driver);
dbg("%s: pci_register_driver = %d\n", __func__, retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- if (retval) {
- destroy_workqueue(shpchp_ordered_wq);
- destroy_workqueue(shpchp_wq);
- }
+
return retval;
}
@@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void)
{
dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- destroy_workqueue(shpchp_ordered_wq);
- destroy_workqueue(shpchp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index f9b5a52e4115..58499277903a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
info->p_slot = p_slot;
INIT_WORK(&info->work, interrupt_event_handler);
- queue_work(shpchp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
return 0;
}
@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(shpchp_ordered_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot)
p_slot->hpc_ops->green_led_blink(p_slot);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index bafd2bbcaf65..c18e5bf444fa 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -739,7 +739,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
/**
* pci_sriov_set_totalvfs -- reduce the TotalVFs available
* @dev: the PCI PF device
- * numvfs: number that should be used for TotalVFs supported
+ * @numvfs: number that should be used for TotalVFs supported
*
* Should be called from PF driver's probe routine with
* device's mutex held.
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5099636a6e5f..00cc78c7aa04 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -845,6 +845,32 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
}
EXPORT_SYMBOL(pci_enable_msi_block);
+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
+{
+ int ret, pos, nvec;
+ u16 msgctl;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ if (maxvec)
+ *maxvec = ret;
+
+ do {
+ nvec = ret;
+ ret = pci_enable_msi_block(dev, nvec);
+ } while (ret > 0);
+
+ if (ret < 0)
+ return ret;
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_block_auto);
+
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1af4008182fd..e407c61559ca 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -283,7 +283,6 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,
- .can_wakeup = acpi_pci_can_wakeup,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
};
@@ -321,10 +320,65 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
return 0;
}
+static void pci_acpi_setup(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+ acpi_status status;
+ acpi_handle dummy;
+
+ /*
+ * Evaluate and parse _PRT, if exists. This code allows parsing of
+ * _PRT objects within the scope of non-bridge devices. Note that
+ * _PRTs within the scope of a PCI bridge assume the bridge's
+ * subordinate bus number.
+ *
+ * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
+ */
+ status = acpi_get_handle(handle, METHOD_NAME__PRT, &dummy);
+ if (ACPI_SUCCESS(status)) {
+ unsigned char bus;
+
+ bus = pci_dev->subordinate ?
+ pci_dev->subordinate->number : pci_dev->bus->number;
+ acpi_pci_irq_add_prt(handle, pci_domain_nr(pci_dev->bus), bus);
+ }
+
+ if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
+ return;
+
+ device_set_wakeup_capable(dev, true);
+ acpi_pci_sleep_wake(pci_dev, false);
+
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (adev->wakeup.flags.run_wake)
+ device_set_run_wake(dev, true);
+}
+
+static void pci_acpi_cleanup(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
+ device_set_wakeup_capable(dev, false);
+ device_set_run_wake(dev, false);
+ pci_acpi_remove_pm_notifier(adev);
+ }
+
+ if (pci_dev->subordinate)
+ acpi_pci_irq_del_prt(pci_domain_nr(pci_dev->bus),
+ pci_dev->subordinate->number);
+}
+
static struct acpi_bus_type acpi_pci_bus = {
.bus = &pci_bus_type,
.find_device = acpi_pci_find_device,
.find_bridge = acpi_pci_find_root_bridge,
+ .setup = pci_acpi_setup,
+ .cleanup = pci_acpi_cleanup,
};
static int __init acpi_pci_init(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 05b78b16d20b..9c6e9bb674ec 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -422,77 +422,60 @@ static ssize_t sriov_numvfs_show(struct device *dev,
}
/*
- * num_vfs > 0; number of vfs to enable
- * num_vfs = 0; disable all vfs
+ * num_vfs > 0; number of VFs to enable
+ * num_vfs = 0; disable all VFs
*
* Note: SRIOV spec doesn't allow partial VF
- * disable, so its all or none.
+ * disable, so it's all or none.
*/
static ssize_t sriov_numvfs_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int num_vfs_enabled = 0;
- int num_vfs;
- int ret = 0;
- u16 total;
+ int ret;
+ u16 num_vfs;
- if (kstrtoint(buf, 0, &num_vfs) < 0)
- return -EINVAL;
+ ret = kstrtou16(buf, 0, &num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (num_vfs > pci_sriov_get_totalvfs(pdev))
+ return -ERANGE;
+
+ if (num_vfs == pdev->sriov->num_VFs)
+ return count; /* no change */
/* is PF driver loaded w/callback */
if (!pdev->driver || !pdev->driver->sriov_configure) {
- dev_info(&pdev->dev,
- "Driver doesn't support SRIOV configuration via sysfs\n");
+ dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
return -ENOSYS;
}
- /* if enabling vf's ... */
- total = pci_sriov_get_totalvfs(pdev);
- /* Requested VFs to enable < totalvfs and none enabled already */
- if ((num_vfs > 0) && (num_vfs <= total)) {
- if (pdev->sriov->num_VFs == 0) {
- num_vfs_enabled =
- pdev->driver->sriov_configure(pdev, num_vfs);
- if ((num_vfs_enabled >= 0) &&
- (num_vfs_enabled != num_vfs)) {
- dev_warn(&pdev->dev,
- "Only %d VFs enabled\n",
- num_vfs_enabled);
- return count;
- } else if (num_vfs_enabled < 0)
- /* error code from driver callback */
- return num_vfs_enabled;
- } else if (num_vfs == pdev->sriov->num_VFs) {
- dev_warn(&pdev->dev,
- "%d VFs already enabled; no enable action taken\n",
- num_vfs);
- return count;
- } else {
- dev_warn(&pdev->dev,
- "%d VFs already enabled. Disable before enabling %d VFs\n",
- pdev->sriov->num_VFs, num_vfs);
- return -EINVAL;
- }
+ if (num_vfs == 0) {
+ /* disable VFs */
+ ret = pdev->driver->sriov_configure(pdev, 0);
+ if (ret < 0)
+ return ret;
+ return count;
}
- /* disable vfs */
- if (num_vfs == 0) {
- if (pdev->sriov->num_VFs != 0) {
- ret = pdev->driver->sriov_configure(pdev, 0);
- return ret ? ret : count;
- } else {
- dev_warn(&pdev->dev,
- "All VFs disabled; no disable action taken\n");
- return count;
- }
+ /* enable VFs */
+ if (pdev->sriov->num_VFs) {
+ dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
+ pdev->sriov->num_VFs, num_vfs);
+ return -EBUSY;
}
- dev_err(&pdev->dev,
- "Invalid value for number of VFs to enable: %d\n", num_vfs);
+ ret = pdev->driver->sriov_configure(pdev, num_vfs);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ if (ret != num_vfs)
+ dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+ num_vfs, ret);
+
+ return count;
}
static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5cb5820fae40..0c4f641b7be1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -450,7 +450,7 @@ static struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
{
if (!ops->is_manageable || !ops->set_state || !ops->choose_state
- || !ops->sleep_wake || !ops->can_wakeup)
+ || !ops->sleep_wake)
return -EINVAL;
pci_platform_pm = ops;
return 0;
@@ -473,11 +473,6 @@ static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
}
-static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
-{
- return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
-}
-
static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
@@ -1985,25 +1980,6 @@ void pci_pm_init(struct pci_dev *dev)
}
}
-/**
- * platform_pci_wakeup_init - init platform wakeup if present
- * @dev: PCI device
- *
- * Some devices don't have PCI PM caps but can still generate wakeup
- * events through platform methods (like ACPI events). If @dev supports
- * platform wakeup events, set the device flag to indicate as much. This
- * may be redundant if the device also supports PCI PM caps, but double
- * initialization should be safe in that case.
- */
-void platform_pci_wakeup_init(struct pci_dev *dev)
-{
- if (!platform_pci_can_wakeup(dev))
- return;
-
- device_set_wakeup_capable(&dev->dev, true);
- platform_pci_sleep_wake(dev, false);
-}
-
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e8518292826f..adfd172c5b9b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -43,9 +43,6 @@ int pci_probe_reset_function(struct pci_dev *dev);
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
*
- * @can_wakeup: returns 'true' if given device is capable of waking up the
- * system from a sleeping state
- *
* @sleep_wake: enables/disables the system wake up capability of given device
*
* @run_wake: enables/disables the platform to generate run-time wake-up events
@@ -59,7 +56,6 @@ struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*choose_state)(struct pci_dev *dev);
- bool (*can_wakeup)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
};
@@ -74,7 +70,6 @@ extern void pci_wakeup_bus(struct pci_bus *bus);
extern void pci_config_pm_runtime_get(struct pci_dev *dev);
extern void pci_config_pm_runtime_put(struct pci_dev *dev);
extern void pci_pm_init(struct pci_dev *dev);
-extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 6c8bc5809787..fde4a32a0295 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -82,4 +82,4 @@ endchoice
config PCIE_PME
def_bool y
- depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
+ depends on PCIEPORTBUS && PM_RUNTIME && ACPI
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 421bbc5fee32..564d97f94b6c 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work)
continue;
}
do_recovery(pdev, entry.severity);
+ pci_dev_put(pdev);
}
}
#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 3ea51736f18d..5ab14251839d 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -23,6 +23,9 @@
#include "aerdrv.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/ras.h>
+
#define AER_AGENT_RECEIVER 0
#define AER_AGENT_REQUESTER 1
#define AER_AGENT_COMPLETER 2
@@ -121,12 +124,11 @@ static const char *aer_agent_string[] = {
"Transmitter ID"
};
-static void __aer_print_error(const char *prefix,
+static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
int i, status;
const char *errmsg = NULL;
-
status = (info->status & ~info->mask);
for (i = 0; i < 32; i++) {
@@ -141,26 +143,22 @@ static void __aer_print_error(const char *prefix,
aer_uncorrectable_error_string[i] : NULL;
if (errmsg)
- printk("%s"" [%2d] %-22s%s\n", prefix, i, errmsg,
+ dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
else
- printk("%s"" [%2d] Unknown Error Bit%s\n", prefix, i,
- info->first_error == i ? " (First)" : "");
+ dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n",
+ i, info->first_error == i ? " (First)" : "");
}
}
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int id = ((dev->bus->number << 8) | dev->devfn);
- char prefix[44];
-
- snprintf(prefix, sizeof(prefix), "%s%s %s: ",
- (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
- dev_driver_string(&dev->dev), dev_name(&dev->dev));
if (info->status == 0) {
- printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, "
- "id=%04x(Unregistered Agent ID)\n", prefix,
+ dev_err(&dev->dev,
+ "PCIe Bus Error: severity=%s, type=Unaccessible, "
+ "id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
} else {
int layer, agent;
@@ -168,22 +166,24 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
- prefix, aer_error_severity_string[info->severity],
+ dev_err(&dev->dev,
+ "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
aer_error_layer[layer], id, aer_agent_string[agent]);
- printk("%s"" device [%04x:%04x] error status/mask=%08x/%08x\n",
- prefix, dev->vendor, dev->device,
+ dev_err(&dev->dev,
+ " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device,
info->status, info->mask);
- __aer_print_error(prefix, info);
+ __aer_print_error(dev, info);
if (info->tlp_header_valid) {
unsigned char *tlp = (unsigned char *) &info->tlp;
- printk("%s"" TLP Header:"
+ dev_err(&dev->dev, " TLP Header:"
" %02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
- prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
*(tlp + 11), *(tlp + 10), *(tlp + 9),
*(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -192,8 +192,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
}
if (info->id && info->error_dev_num > 1 && info->id == id)
- printk("%s"" Error of this Agent(%04x) is reported first\n",
- prefix, id);
+ dev_err(&dev->dev,
+ " Error of this Agent(%04x) is reported first\n",
+ id);
+ trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
+ info->severity);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -217,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
-void cper_print_aer(const char *prefix, int cper_severity,
+void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
struct aer_capability_regs *aer)
{
int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -239,25 +242,27 @@ void cper_print_aer(const char *prefix, int cper_severity,
}
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
- printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n",
- prefix, status, mask);
+ dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ status, mask);
cper_print_bits(prefix, status, status_strs, status_strs_size);
- printk("%s""aer_layer=%s, aer_agent=%s\n", prefix,
+ dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
- printk("%s""aer_uncor_severity: 0x%08x\n",
- prefix, aer->uncor_severity);
+ dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
if (tlp_header_valid) {
const unsigned char *tlp;
tlp = (const unsigned char *)&aer->header_log;
- printk("%s""aer_tlp_header:"
+ dev_err(&dev->dev, "aer_tlp_header:"
" %02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
- prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
*(tlp + 11), *(tlp + 10), *(tlp + 9),
*(tlp + 8), *(tlp + 15), *(tlp + 14),
*(tlp + 13), *(tlp + 12));
}
+ trace_aer_event(dev_name(&dev->dev), (status & ~mask),
+ aer_severity);
}
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b52630b8eada..8474b6a4fc9b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
{
struct pci_dev *child;
+ if (aspm_force)
+ return;
+
/*
* Clear any ASPM setup that the firmware has carried out on this bus
*/
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index d4824cb78b49..08c243ab034e 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -134,10 +134,28 @@ static int pcie_port_runtime_resume(struct device *dev)
return 0;
}
+static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
+{
+ bool *pme_poll = data;
+
+ if (pdev->pme_poll)
+ *pme_poll = true;
+ return 0;
+}
+
static int pcie_port_runtime_idle(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool pme_poll = false;
+
+ /*
+ * If any subordinate device needs pme poll, we should keep
+ * the port in D0, because we need port in D0 to poll it.
+ */
+ pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
/* Delay for a short while to prevent too frequent suspend/resume */
- pm_schedule_suspend(dev, 10);
+ if (!pme_poll)
+ pm_schedule_suspend(dev, 10);
return -EBUSY;
}
#else
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6186f03d84f3..2dcd22d9c816 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1280,7 +1280,6 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Power Management */
pci_pm_init(dev);
- platform_pci_wakeup_init(dev);
/* Vital Product Data */
pci_vpd_pci22_init(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8f7a6344e79e..0369fb6fc1da 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2725,7 +2725,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
if (PCI_FUNC(dev->devfn))
return;
/*
- * RICOH 0xe823 SD/MMC card reader fails to recognize
+ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
* certain types of SD/MMC cards. Lowering the SD base
* clock frequency from 200Mhz to 50Mhz fixes this issue.
*
@@ -2736,7 +2736,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
* 0xf9 - Key register for 0x150
* 0xfc - key register for 0xe1
*/
- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
+ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
pci_write_config_byte(dev, 0xf9, 0xfc);
pci_write_config_byte(dev, 0x150, 0x10);
pci_write_config_byte(dev, 0xf9, 0x00);
@@ -2763,6 +2764,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 7c0fd9252e6f..84954a726a94 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
static void pci_stop_dev(struct pci_dev *dev)
{
+ pci_pme_active(dev, false);
+
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 8fd255f7ee40..b90f85bf5f81 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -36,8 +36,8 @@ config PCMCIA
If unsure, say Y.
config PCMCIA_LOAD_CIS
- bool "Load CIS updates from userspace (EXPERIMENTAL)"
- depends on PCMCIA && EXPERIMENTAL
+ bool "Load CIS updates from userspace"
+ depends on PCMCIA
select FW_LOADER
default y
help
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 673c14ea11e3..5292db69c426 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -484,7 +484,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
static int socket_late_resume(struct pcmcia_socket *skt)
{
- int ret;
+ int ret = 0;
mutex_lock(&skt->ops_mutex);
skt->state &= ~SOCKET_SUSPEND;
@@ -511,19 +511,31 @@ static int socket_late_resume(struct pcmcia_socket *skt)
return socket_insert(skt);
}
+ if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
+ ret = skt->callback->early_resume(skt);
+ return ret;
+}
+
+/*
+ * Finalize the resume. In case of a cardbus socket, we have
+ * to rebind the devices as we can't be certain that it has been
+ * replaced, or not.
+ */
+static int socket_complete_resume(struct pcmcia_socket *skt)
+{
+ int ret = 0;
#ifdef CONFIG_CARDBUS
if (skt->state & SOCKET_CARDBUS) {
/* We can't be sure the CardBus card is the same
* as the one previously inserted. Therefore, remove
* and re-add... */
cb_free(skt);
- cb_alloc(skt);
- return 0;
+ ret = cb_alloc(skt);
+ if (ret)
+ cb_free(skt);
}
#endif
- if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
- skt->callback->early_resume(skt);
- return 0;
+ return ret;
}
/*
@@ -533,11 +545,15 @@ static int socket_late_resume(struct pcmcia_socket *skt)
*/
static int socket_resume(struct pcmcia_socket *skt)
{
+ int err;
if (!(skt->state & SOCKET_SUSPEND))
return -EBUSY;
socket_early_resume(skt);
- return socket_late_resume(skt);
+ err = socket_late_resume(skt);
+ if (!err)
+ err = socket_complete_resume(skt);
+ return err;
}
static void socket_remove(struct pcmcia_socket *skt)
@@ -848,6 +864,12 @@ static int __used pcmcia_socket_dev_resume(struct device *dev)
return __pcmcia_pm_op(dev, socket_late_resume);
}
+static void __used pcmcia_socket_dev_complete(struct device *dev)
+{
+ WARN(__pcmcia_pm_op(dev, socket_complete_resume),
+ "failed to complete resume");
+}
+
static const struct dev_pm_ops pcmcia_socket_pm_ops = {
/* dev_resume may be called with IRQs enabled */
SET_SYSTEM_SLEEP_PM_OPS(NULL,
@@ -862,6 +884,7 @@ static const struct dev_pm_ops pcmcia_socket_pm_ops = {
.resume_noirq = pcmcia_socket_dev_resume_noirq,
.thaw_noirq = pcmcia_socket_dev_resume_noirq,
.restore_noirq = pcmcia_socket_dev_resume_noirq,
+ .complete = pcmcia_socket_dev_complete,
};
#define PCMCIA_SOCKET_CLASS_PM_OPS (&pcmcia_socket_pm_ops)
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 3578e1ca97a0..519c4d6003a6 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,8 +133,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
goto err_out_free_res;
}
- pci_set_drvdata(dev, &sockets[i].socket);
-
for (i = 0; i<socket_count; i++) {
sockets[i].socket.dev.parent = &dev->dev;
sockets[i].socket.ops = &i82092aa_operations;
@@ -164,14 +162,14 @@ err_out_disable:
static void i82092aa_pci_remove(struct pci_dev *dev)
{
- struct pcmcia_socket *socket = pci_get_drvdata(dev);
+ int i;
enter("i82092aa_pci_remove");
free_irq(dev->irq, i82092aa_interrupt);
- if (socket)
- pcmcia_unregister_socket(socket);
+ for (i = 0; i < socket_count; i++)
+ pcmcia_unregister_socket(&sockets[i].socket);
leave("i82092aa_pci_remove");
}
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 430a9ac56091..065704c605d5 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -369,12 +369,12 @@ static int do_validate_mem(struct pcmcia_socket *s,
}
}
- free_region(res2);
- free_region(res1);
-
dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
base, base+size-1, res1, res2, ret, info1, info2);
+ free_region(res2);
+ free_region(res1);
+
if ((ret) || (info1 != info2) || (info1 == 0))
return -EINVAL;
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 75806be344e5..d98a08612492 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
socket = &vrc4171_sockets[slot];
socket->csc_irq = search_nonuse_irq();
socket->io_irq = search_nonuse_irq();
+ spin_lock_init(&socket->lock);
return 0;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c31aeb01bb00..34f51d2d90d2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -26,6 +26,29 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
+config PINCTRL_ABX500
+ bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
+ depends on AB8500_CORE
+ select GENERIC_PINCONF
+ help
+ Select this to enable the ABx500 family IC GPIO driver
+
+config PINCTRL_AB8500
+ bool "AB8500 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8540
+ bool "AB8540 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB9540
+ bool "AB9540 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8505
+ bool "AB8505 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
config PINCTRL_AT91
bool "AT91 pinctrl driver"
depends on OF
@@ -151,6 +174,11 @@ config PINCTRL_SIRF
depends on ARCH_SIRF
select PINMUX
+config PINCTRL_SUNXI
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+
config PINCTRL_TEGRA
bool
select PINMUX
@@ -164,6 +192,10 @@ config PINCTRL_TEGRA30
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA114
+ bool
+ select PINCTRL_TEGRA
+
config PINCTRL_U300
bool "U300 pin controller driver"
depends on ARCH_U300
@@ -181,12 +213,11 @@ config PINCTRL_COH901
config PINCTRL_SAMSUNG
bool
- depends on OF && GPIOLIB
select PINMUX
select PINCONF
-config PINCTRL_EXYNOS4
- bool "Pinctrl driver data for Exynos4 SoC"
+config PINCTRL_EXYNOS
+ bool "Pinctrl driver data for Samsung EXYNOS SoCs"
depends on OF && GPIOLIB
select PINCTRL_SAMSUNG
@@ -196,7 +227,7 @@ config PINCTRL_EXYNOS5440
select PINCONF
source "drivers/pinctrl/mvebu/Kconfig"
-
+source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
config PINCTRL_XWAY
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index fc4606f27dc7..f82cc5baf767 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -9,6 +9,11 @@ ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_PINCTRL) += devicetree.o
endif
obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
+obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o
+obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
+obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o
+obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o
+obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
@@ -30,16 +35,20 @@ obj-$(CONFIG_PINCTRL_PXA168) += pinctrl-pxa168.o
obj-$(CONFIG_PINCTRL_PXA910) += pinctrl-pxa910.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_SUNXI) += pinctrl-sunxi.o
obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
+obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
-obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/
+obj-$(CONFIG_SUPERH) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5cdee8669ea3..b0de6e7f1fdb 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "pinctrl core: " fmt
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -31,17 +32,6 @@
#include "pinmux.h"
#include "pinconf.h"
-/**
- * struct pinctrl_maps - a list item containing part of the mapping table
- * @node: mapping table list node
- * @maps: array of mapping table entries
- * @num_maps: the number of entries in @maps
- */
-struct pinctrl_maps {
- struct list_head node;
- struct pinctrl_map const *maps;
- unsigned num_maps;
-};
static bool pinctrl_dummy_state;
@@ -55,13 +45,8 @@ LIST_HEAD(pinctrldev_list);
static LIST_HEAD(pinctrl_list);
/* List of pinctrl maps (struct pinctrl_maps) */
-static LIST_HEAD(pinctrl_maps);
+LIST_HEAD(pinctrl_maps);
-#define for_each_maps(_maps_node_, _i_, _map_) \
- list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
- for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
- _i_ < _maps_node_->num_maps; \
- _i_++, _map_ = &_maps_node_->maps[_i_])
/**
* pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
@@ -83,6 +68,12 @@ const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
}
EXPORT_SYMBOL_GPL(pinctrl_dev_get_name);
+const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev)
+{
+ return dev_name(pctldev->dev);
+}
+EXPORT_SYMBOL_GPL(pinctrl_dev_get_devname);
+
void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev)
{
return pctldev->driver_data;
@@ -609,13 +600,16 @@ static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
if (setting->pctldev == NULL) {
- dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
- map->ctrl_dev_name);
kfree(setting);
+ /* Do not defer probing of hogs (circular loop) */
+ if (!strcmp(map->ctrl_dev_name, map->dev_name))
+ return -ENODEV;
/*
* OK let us guess that the driver is not there yet, and
* let's defer obtaining this pinctrl handle to later...
*/
+ dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
+ map->ctrl_dev_name);
return -EPROBE_DEFER;
}
@@ -694,13 +688,33 @@ static struct pinctrl *create_pinctrl(struct device *dev)
continue;
ret = add_setting(p, map);
- if (ret < 0) {
+ /*
+ * At this point the adding of a setting may:
+ *
+ * - Defer, if the pinctrl device is not yet available
+ * - Fail, if the pinctrl device is not yet available,
+ * AND the setting is a hog. We cannot defer that, since
+ * the hog will kick in immediately after the device
+ * is registered.
+ *
+ * If the error returned was not -EPROBE_DEFER then we
+ * accumulate the errors to see if we end up with
+ * an -EPROBE_DEFER later, as that is the worst case.
+ */
+ if (ret == -EPROBE_DEFER) {
pinctrl_put_locked(p, false);
return ERR_PTR(ret);
}
}
+ if (ret < 0) {
+ /* If some other error than deferral occured, return here */
+ pinctrl_put_locked(p, false);
+ return ERR_PTR(ret);
+ }
- /* Add the pinmux to the global list */
+ kref_init(&p->users);
+
+ /* Add the pinctrl handle to the global list */
list_add_tail(&p->node, &pinctrl_list);
return p;
@@ -713,9 +727,17 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
+ /*
+ * See if somebody else (such as the device core) has already
+ * obtained a handle to the pinctrl for this device. In that case,
+ * return another pointer to it.
+ */
p = find_pinctrl(dev);
- if (p != NULL)
- return ERR_PTR(-EBUSY);
+ if (p != NULL) {
+ dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+ kref_get(&p->users);
+ return p;
+ }
return create_pinctrl(dev);
}
@@ -771,13 +793,24 @@ static void pinctrl_put_locked(struct pinctrl *p, bool inlist)
}
/**
- * pinctrl_put() - release a previously claimed pinctrl handle
+ * pinctrl_release() - release the pinctrl handle
+ * @kref: the kref in the pinctrl being released
+ */
+static void pinctrl_release(struct kref *kref)
+{
+ struct pinctrl *p = container_of(kref, struct pinctrl, users);
+
+ pinctrl_put_locked(p, true);
+}
+
+/**
+ * pinctrl_put() - decrease use count on a previously claimed pinctrl handle
* @p: the pinctrl handle to release
*/
void pinctrl_put(struct pinctrl *p)
{
mutex_lock(&pinctrl_mutex);
- pinctrl_put_locked(p, true);
+ kref_put(&p->users, pinctrl_release);
mutex_unlock(&pinctrl_mutex);
}
EXPORT_SYMBOL_GPL(pinctrl_put);
@@ -1055,6 +1088,30 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
}
}
+/**
+ * pinctrl_force_sleep() - turn a given controller device into sleep state
+ * @pctldev: pin controller device
+ */
+int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
+{
+ if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
+ return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
+
+/**
+ * pinctrl_force_default() - turn a given controller device into default state
+ * @pctldev: pin controller device
+ */
+int pinctrl_force_default(struct pinctrl_dev *pctldev)
+{
+ if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
+ return pinctrl_select_state(pctldev->p, pctldev->hog_default);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_force_default);
+
#ifdef CONFIG_DEBUG_FS
static int pinctrl_pins_show(struct seq_file *s, void *what)
@@ -1500,16 +1557,23 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pctldev->p = pinctrl_get_locked(pctldev->dev);
if (!IS_ERR(pctldev->p)) {
- struct pinctrl_state *s =
+ pctldev->hog_default =
pinctrl_lookup_state_locked(pctldev->p,
PINCTRL_STATE_DEFAULT);
- if (IS_ERR(s)) {
+ if (IS_ERR(pctldev->hog_default)) {
dev_dbg(dev, "failed to lookup the default state\n");
} else {
- if (pinctrl_select_state_locked(pctldev->p, s))
+ if (pinctrl_select_state_locked(pctldev->p,
+ pctldev->hog_default))
dev_err(dev,
"failed to select default state\n");
}
+
+ pctldev->hog_sleep =
+ pinctrl_lookup_state_locked(pctldev->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(pctldev->hog_sleep))
+ dev_dbg(dev, "failed to lookup the sleep state\n");
}
mutex_unlock(&pinctrl_mutex);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 12f5694f3d5d..ee72f1f6d862 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -9,6 +9,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/pinctrl/pinconf.h>
@@ -30,6 +31,8 @@ struct pinctrl_gpio_range;
* @driver_data: driver data for drivers registering to the pin controller
* subsystem
* @p: result of pinctrl_get() for this device
+ * @hog_default: default state for pins hogged by this device
+ * @hog_sleep: sleep state for pins hogged by this device
* @device_root: debugfs root for this device
*/
struct pinctrl_dev {
@@ -41,6 +44,8 @@ struct pinctrl_dev {
struct module *owner;
void *driver_data;
struct pinctrl *p;
+ struct pinctrl_state *hog_default;
+ struct pinctrl_state *hog_sleep;
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
#endif
@@ -54,6 +59,7 @@ struct pinctrl_dev {
* @state: the current state
* @dt_maps: the mapping table chunks dynamically parsed from device tree for
* this device, if any
+ * @users: reference count
*/
struct pinctrl {
struct list_head node;
@@ -61,6 +67,7 @@ struct pinctrl {
struct list_head states;
struct pinctrl_state *state;
struct list_head dt_maps;
+ struct kref users;
};
/**
@@ -148,6 +155,18 @@ struct pin_desc {
#endif
};
+/**
+ * struct pinctrl_maps - a list item containing part of the mapping table
+ * @node: mapping table list node
+ * @maps: array of mapping table entries
+ * @num_maps: the number of entries in @maps
+ */
+struct pinctrl_maps {
+ struct list_head node;
+ struct pinctrl_map const *maps;
+ unsigned num_maps;
+};
+
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
@@ -164,5 +183,15 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
bool dup, bool locked);
void pinctrl_unregister_map(struct pinctrl_map const *map);
+extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
+extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
+
extern struct mutex pinctrl_mutex;
extern struct list_head pinctrldev_list;
+extern struct list_head pinctrl_maps;
+
+#define for_each_maps(_maps_node_, _i_, _map_) \
+ list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
+ for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
+ _i_ < _maps_node_->num_maps; \
+ _i_++, _map_ = &_maps_node_->maps[_i_])
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index fe2d1af7cfa0..fd40a11ad645 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -141,6 +141,11 @@ static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
pctldev = find_pinctrl_by_of_node(np_pctldev);
if (pctldev)
break;
+ /* Do not defer probing of hogs (circular loop) */
+ if (np_pctldev == p->dev->of_node) {
+ of_node_put(np_pctldev);
+ return -ENODEV;
+ }
}
of_node_put(np_pctldev);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index c907647de6ad..48e21a229483 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -367,7 +367,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
-static struct of_device_id armada_370_pinctrl_of_match[] __devinitdata = {
+static struct of_device_id armada_370_pinctrl_of_match[] = {
{ .compatible = "marvell,mv88f6710-pinctrl" },
{ },
};
@@ -382,7 +382,7 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 2),
};
-static int __devinit armada_370_pinctrl_probe(struct platform_device *pdev)
+static int armada_370_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
@@ -399,7 +399,7 @@ static int __devinit armada_370_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int __devexit armada_370_pinctrl_remove(struct platform_device *pdev)
+static int armada_370_pinctrl_remove(struct platform_device *pdev)
{
return mvebu_pinctrl_remove(pdev);
}
@@ -411,7 +411,7 @@ static struct platform_driver armada_370_pinctrl_driver = {
.of_match_table = of_match_ptr(armada_370_pinctrl_of_match),
},
.probe = armada_370_pinctrl_probe,
- .remove = __devexit_p(armada_370_pinctrl_remove),
+ .remove = armada_370_pinctrl_remove,
};
module_platform_driver(armada_370_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index 40bd52a46b4e..ab5dc04b3e8a 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -349,7 +349,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info;
-static struct of_device_id armada_xp_pinctrl_of_match[] __devinitdata = {
+static struct of_device_id armada_xp_pinctrl_of_match[] = {
{
.compatible = "marvell,mv78230-pinctrl",
.data = (void *) V_MV78230,
@@ -394,7 +394,7 @@ static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
-static int __devinit armada_xp_pinctrl_probe(struct platform_device *pdev)
+static int armada_xp_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
@@ -446,7 +446,7 @@ static int __devinit armada_xp_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int __devexit armada_xp_pinctrl_remove(struct platform_device *pdev)
+static int armada_xp_pinctrl_remove(struct platform_device *pdev)
{
return mvebu_pinctrl_remove(pdev);
}
@@ -458,7 +458,7 @@ static struct platform_driver armada_xp_pinctrl_driver = {
.of_match_table = of_match_ptr(armada_xp_pinctrl_of_match),
},
.probe = armada_xp_pinctrl_probe,
- .remove = __devexit_p(armada_xp_pinctrl_remove),
+ .remove = armada_xp_pinctrl_remove,
};
module_platform_driver(armada_xp_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 40c9c3eecd94..428ea96a94d3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -579,29 +579,32 @@ static struct mvebu_pinctrl_soc_info dove_pinctrl_info = {
static struct clk *clk;
-static struct of_device_id dove_pinctrl_of_match[] __devinitdata = {
+static struct of_device_id dove_pinctrl_of_match[] = {
{ .compatible = "marvell,dove-pinctrl", .data = &dove_pinctrl_info },
{ }
};
-static int __devinit dove_pinctrl_probe(struct platform_device *pdev)
+static int dove_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(dove_pinctrl_of_match, &pdev->dev);
- pdev->dev.platform_data = match->data;
+ pdev->dev.platform_data = (void *)match->data;
/*
* General MPP Configuration Register is part of pdma registers.
* grab clk to make sure it is ticking.
*/
clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Unable to get pdma clock");
+ return PTR_RET(clk);
+ }
+ clk_prepare_enable(clk);
return mvebu_pinctrl_probe(pdev);
}
-static int __devexit dove_pinctrl_remove(struct platform_device *pdev)
+static int dove_pinctrl_remove(struct platform_device *pdev)
{
int ret;
@@ -618,7 +621,7 @@ static struct platform_driver dove_pinctrl_driver = {
.of_match_table = of_match_ptr(dove_pinctrl_of_match),
},
.probe = dove_pinctrl_probe,
- .remove = __devexit_p(dove_pinctrl_remove),
+ .remove = dove_pinctrl_remove,
};
module_platform_driver(dove_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index fa6ce31c94d9..cdd483df673e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -66,9 +66,9 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(6,
- MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0, 0))),
+ MPP_VAR_FUNCTION(0x1, "sysrst", "out", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0, 0))),
MPP_MODE(7,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0, 1)),
@@ -444,7 +444,7 @@ static struct mvebu_pinctrl_soc_info mv98dx4122_info = {
.ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
};
-static struct of_device_id kirkwood_pinctrl_of_match[] __devinitdata = {
+static struct of_device_id kirkwood_pinctrl_of_match[] = {
{ .compatible = "marvell,88f6180-pinctrl", .data = &mv88f6180_info },
{ .compatible = "marvell,88f6190-pinctrl", .data = &mv88f6190_info },
{ .compatible = "marvell,88f6192-pinctrl", .data = &mv88f6192_info },
@@ -454,15 +454,15 @@ static struct of_device_id kirkwood_pinctrl_of_match[] __devinitdata = {
{ }
};
-static int __devinit kirkwood_pinctrl_probe(struct platform_device *pdev)
+static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
- pdev->dev.platform_data = match->data;
+ pdev->dev.platform_data = (void *)match->data;
return mvebu_pinctrl_probe(pdev);
}
-static int __devexit kirkwood_pinctrl_remove(struct platform_device *pdev)
+static int kirkwood_pinctrl_remove(struct platform_device *pdev)
{
return mvebu_pinctrl_remove(pdev);
}
@@ -474,7 +474,7 @@ static struct platform_driver kirkwood_pinctrl_driver = {
.of_match_table = of_match_ptr(kirkwood_pinctrl_of_match),
},
.probe = kirkwood_pinctrl_probe,
- .remove = __devexit_p(kirkwood_pinctrl_remove),
+ .remove = kirkwood_pinctrl_remove,
};
module_platform_driver(kirkwood_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 6c44b7e8964c..c689c04a4f52 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -478,8 +478,7 @@ static struct pinctrl_ops mvebu_pinctrl_ops = {
.dt_free_map = mvebu_pinctrl_dt_free_map,
};
-static int __devinit _add_function(struct mvebu_pinctrl_function *funcs,
- const char *name)
+static int _add_function(struct mvebu_pinctrl_function *funcs, const char *name)
{
while (funcs->num_groups) {
/* function already there */
@@ -494,8 +493,8 @@ static int __devinit _add_function(struct mvebu_pinctrl_function *funcs,
return 0;
}
-static int __devinit mvebu_pinctrl_build_functions(struct platform_device *pdev,
- struct mvebu_pinctrl *pctl)
+static int mvebu_pinctrl_build_functions(struct platform_device *pdev,
+ struct mvebu_pinctrl *pctl)
{
struct mvebu_pinctrl_function *funcs;
int num = 0;
@@ -568,7 +567,7 @@ static int __devinit mvebu_pinctrl_build_functions(struct platform_device *pdev,
return 0;
}
-int __devinit mvebu_pinctrl_probe(struct platform_device *pdev)
+int mvebu_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
@@ -745,7 +744,7 @@ int __devinit mvebu_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-int __devexit mvebu_pinctrl_remove(struct platform_device *pdev)
+int mvebu_pinctrl_remove(struct platform_device *pdev)
{
struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
pinctrl_unregister(pctl->pctldev);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 833a36458157..06c304ac6f7d 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -41,11 +41,13 @@ struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
- PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_DISABLE, "input schmitt disabled", NULL),
+ PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
+ PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL),
PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
+ PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level"),
};
void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index baee2cc46a17..ac8d382a79bb 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -574,6 +574,207 @@ static const struct file_operations pinconf_groups_ops = {
.release = single_release,
};
+/* 32bit read/write ressources */
+#define MAX_NAME_LEN 16
+char dbg_pinname[MAX_NAME_LEN]; /* shared: name of the state of the pin*/
+char dbg_state_name[MAX_NAME_LEN]; /* shared: state of the pin*/
+static u32 dbg_config; /* shared: config to be read/set for the pin & state*/
+
+static int pinconf_dbg_pinname_print(struct seq_file *s, void *d)
+{
+ if (strlen(dbg_pinname))
+ seq_printf(s, "%s\n", dbg_pinname);
+ else
+ seq_printf(s, "No pin name set\n");
+ return 0;
+}
+
+static int pinconf_dbg_pinname_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_dbg_pinname_print, inode->i_private);
+}
+
+static int pinconf_dbg_pinname_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+
+ if (count > MAX_NAME_LEN)
+ return -EINVAL;
+
+ err = sscanf(user_buf, "%15s", dbg_pinname);
+
+ if (err != 1)
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations pinconf_dbg_pinname_fops = {
+ .open = pinconf_dbg_pinname_open,
+ .write = pinconf_dbg_pinname_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int pinconf_dbg_state_print(struct seq_file *s, void *d)
+{
+ if (strlen(dbg_state_name))
+ seq_printf(s, "%s\n", dbg_pinname);
+ else
+ seq_printf(s, "No pin state set\n");
+ return 0;
+}
+
+static int pinconf_dbg_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_dbg_state_print, inode->i_private);
+}
+
+static int pinconf_dbg_state_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+
+ if (count > MAX_NAME_LEN)
+ return -EINVAL;
+
+ err = sscanf(user_buf, "%15s", dbg_state_name);
+
+ if (err != 1)
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations pinconf_dbg_pinstate_fops = {
+ .open = pinconf_dbg_state_open,
+ .write = pinconf_dbg_state_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * pinconf_dbg_config_print() - display the pinctrl config from the pinctrl
+ * map, of a pin/state pair based on pinname and state that have been
+ * selected with the debugfs entries pinconf-name and pinconf-state
+ * @s: contains the 32bits config to be written
+ * @d: not used
+ */
+static int pinconf_dbg_config_print(struct seq_file *s, void *d)
+{
+ struct pinctrl_maps *maps_node;
+ struct pinctrl_map const *map;
+ struct pinctrl_dev *pctldev = NULL;
+ struct pinconf_ops *confops = NULL;
+ int i, j;
+ bool found = false;
+
+ mutex_lock(&pinctrl_mutex);
+
+ /* Parse the pinctrl map and look for the elected pin/state */
+ for_each_maps(maps_node, i, map) {
+ if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
+ continue;
+
+ if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
+ continue;
+
+ for (j = 0; j < map->data.configs.num_configs; j++) {
+ if (0 == strncmp(map->data.configs.group_or_pin,
+ dbg_pinname, MAX_NAME_LEN)) {
+ /* We found the right pin / state, read the
+ * config and store the pctldev */
+ dbg_config = map->data.configs.configs[j];
+ pctldev = get_pinctrl_dev_from_devname
+ (map->ctrl_dev_name);
+ found = true;
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&pinctrl_mutex);
+
+ if (found) {
+ seq_printf(s, "Config of %s in state %s: 0x%08X\n", dbg_pinname,
+ dbg_state_name, dbg_config);
+
+ if (pctldev)
+ confops = pctldev->desc->confops;
+
+ if (confops && confops->pin_config_config_dbg_show)
+ confops->pin_config_config_dbg_show(pctldev,
+ s, dbg_config);
+ } else {
+ seq_printf(s, "No pin found for defined name/state\n");
+ }
+
+ return 0;
+}
+
+static int pinconf_dbg_config_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_dbg_config_print, inode->i_private);
+}
+
+/**
+ * pinconf_dbg_config_write() - overwrite the pinctrl config in thepinctrl
+ * map, of a pin/state pair based on pinname and state that have been
+ * selected with the debugfs entries pinconf-name and pinconf-state
+ */
+static int pinconf_dbg_config_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long config;
+ struct pinctrl_maps *maps_node;
+ struct pinctrl_map const *map;
+ int i, j;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &config);
+
+ if (err)
+ return err;
+
+ dbg_config = config;
+
+ mutex_lock(&pinctrl_mutex);
+
+ /* Parse the pinctrl map and look for the selected pin/state */
+ for_each_maps(maps_node, i, map) {
+ if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
+ continue;
+
+ if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
+ continue;
+
+ /* we found the right pin / state, so overwrite config */
+ for (j = 0; j < map->data.configs.num_configs; j++) {
+ if (strncmp(map->data.configs.group_or_pin, dbg_pinname,
+ MAX_NAME_LEN) == 0)
+ map->data.configs.configs[j] = dbg_config;
+ }
+ }
+
+ mutex_unlock(&pinctrl_mutex);
+
+ return count;
+}
+
+static const struct file_operations pinconf_dbg_pinconfig_fops = {
+ .open = pinconf_dbg_config_open,
+ .write = pinconf_dbg_config_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void pinconf_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
@@ -581,6 +782,12 @@ void pinconf_init_device_debugfs(struct dentry *devroot,
devroot, pctldev, &pinconf_pins_ops);
debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
devroot, pctldev, &pinconf_groups_ops);
+ debugfs_create_file("pinconf-name", (S_IRUGO | S_IWUSR | S_IWGRP),
+ devroot, pctldev, &pinconf_dbg_pinname_fops);
+ debugfs_create_file("pinconf-state", (S_IRUGO | S_IWUSR | S_IWGRP),
+ devroot, pctldev, &pinconf_dbg_pinstate_fops);
+ debugfs_create_file("pinconf-config", (S_IRUGO | S_IWUSR | S_IWGRP),
+ devroot, pctldev, &pinconf_dbg_pinconfig_fops);
}
#endif
diff --git a/drivers/pinctrl/pinctrl-ab8500.c b/drivers/pinctrl/pinctrl-ab8500.c
new file mode 100644
index 000000000000..3b471d87c211
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8500.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset) (offset)
+
+#define AB8500_PIN_T10 ABX500_GPIO(1)
+#define AB8500_PIN_T9 ABX500_GPIO(2)
+#define AB8500_PIN_U9 ABX500_GPIO(3)
+#define AB8500_PIN_W2 ABX500_GPIO(4)
+/* hole */
+#define AB8500_PIN_Y18 ABX500_GPIO(6)
+#define AB8500_PIN_AA20 ABX500_GPIO(7)
+#define AB8500_PIN_W18 ABX500_GPIO(8)
+#define AB8500_PIN_AA19 ABX500_GPIO(9)
+#define AB8500_PIN_U17 ABX500_GPIO(10)
+#define AB8500_PIN_AA18 ABX500_GPIO(11)
+#define AB8500_PIN_U16 ABX500_GPIO(12)
+#define AB8500_PIN_W17 ABX500_GPIO(13)
+#define AB8500_PIN_F14 ABX500_GPIO(14)
+#define AB8500_PIN_B17 ABX500_GPIO(15)
+#define AB8500_PIN_F15 ABX500_GPIO(16)
+#define AB8500_PIN_P5 ABX500_GPIO(17)
+#define AB8500_PIN_R5 ABX500_GPIO(18)
+#define AB8500_PIN_U5 ABX500_GPIO(19)
+#define AB8500_PIN_T5 ABX500_GPIO(20)
+#define AB8500_PIN_H19 ABX500_GPIO(21)
+#define AB8500_PIN_G20 ABX500_GPIO(22)
+#define AB8500_PIN_G19 ABX500_GPIO(23)
+#define AB8500_PIN_T14 ABX500_GPIO(24)
+#define AB8500_PIN_R16 ABX500_GPIO(25)
+#define AB8500_PIN_M16 ABX500_GPIO(26)
+#define AB8500_PIN_J6 ABX500_GPIO(27)
+#define AB8500_PIN_K6 ABX500_GPIO(28)
+#define AB8500_PIN_G6 ABX500_GPIO(29)
+#define AB8500_PIN_H6 ABX500_GPIO(30)
+#define AB8500_PIN_F5 ABX500_GPIO(31)
+#define AB8500_PIN_G5 ABX500_GPIO(32)
+/* hole */
+#define AB8500_PIN_R17 ABX500_GPIO(34)
+#define AB8500_PIN_W15 ABX500_GPIO(35)
+#define AB8500_PIN_A17 ABX500_GPIO(36)
+#define AB8500_PIN_E15 ABX500_GPIO(37)
+#define AB8500_PIN_C17 ABX500_GPIO(38)
+#define AB8500_PIN_E16 ABX500_GPIO(39)
+#define AB8500_PIN_T19 ABX500_GPIO(40)
+#define AB8500_PIN_U19 ABX500_GPIO(41)
+#define AB8500_PIN_U2 ABX500_GPIO(42)
+
+/* indicates the highest GPIO number */
+#define AB8500_GPIO_MAX_NUMBER 42
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8500_pins[] = {
+ PINCTRL_PIN(AB8500_PIN_T10, "GPIO1_T10"),
+ PINCTRL_PIN(AB8500_PIN_T9, "GPIO2_T9"),
+ PINCTRL_PIN(AB8500_PIN_U9, "GPIO3_U9"),
+ PINCTRL_PIN(AB8500_PIN_W2, "GPIO4_W2"),
+ /* hole */
+ PINCTRL_PIN(AB8500_PIN_Y18, "GPIO6_Y18"),
+ PINCTRL_PIN(AB8500_PIN_AA20, "GPIO7_AA20"),
+ PINCTRL_PIN(AB8500_PIN_W18, "GPIO8_W18"),
+ PINCTRL_PIN(AB8500_PIN_AA19, "GPIO9_AA19"),
+ PINCTRL_PIN(AB8500_PIN_U17, "GPIO10_U17"),
+ PINCTRL_PIN(AB8500_PIN_AA18, "GPIO11_AA18"),
+ PINCTRL_PIN(AB8500_PIN_U16, "GPIO12_U16"),
+ PINCTRL_PIN(AB8500_PIN_W17, "GPIO13_W17"),
+ PINCTRL_PIN(AB8500_PIN_F14, "GPIO14_F14"),
+ PINCTRL_PIN(AB8500_PIN_B17, "GPIO15_B17"),
+ PINCTRL_PIN(AB8500_PIN_F15, "GPIO16_F15"),
+ PINCTRL_PIN(AB8500_PIN_P5, "GPIO17_P5"),
+ PINCTRL_PIN(AB8500_PIN_R5, "GPIO18_R5"),
+ PINCTRL_PIN(AB8500_PIN_U5, "GPIO19_U5"),
+ PINCTRL_PIN(AB8500_PIN_T5, "GPIO20_T5"),
+ PINCTRL_PIN(AB8500_PIN_H19, "GPIO21_H19"),
+ PINCTRL_PIN(AB8500_PIN_G20, "GPIO22_G20"),
+ PINCTRL_PIN(AB8500_PIN_G19, "GPIO23_G19"),
+ PINCTRL_PIN(AB8500_PIN_T14, "GPIO24_T14"),
+ PINCTRL_PIN(AB8500_PIN_R16, "GPIO25_R16"),
+ PINCTRL_PIN(AB8500_PIN_M16, "GPIO26_M16"),
+ PINCTRL_PIN(AB8500_PIN_J6, "GPIO27_J6"),
+ PINCTRL_PIN(AB8500_PIN_K6, "GPIO28_K6"),
+ PINCTRL_PIN(AB8500_PIN_G6, "GPIO29_G6"),
+ PINCTRL_PIN(AB8500_PIN_H6, "GPIO30_H6"),
+ PINCTRL_PIN(AB8500_PIN_F5, "GPIO31_F5"),
+ PINCTRL_PIN(AB8500_PIN_G5, "GPIO32_G5"),
+ /* hole */
+ PINCTRL_PIN(AB8500_PIN_R17, "GPIO34_R17"),
+ PINCTRL_PIN(AB8500_PIN_W15, "GPIO35_W15"),
+ PINCTRL_PIN(AB8500_PIN_A17, "GPIO36_A17"),
+ PINCTRL_PIN(AB8500_PIN_E15, "GPIO37_E15"),
+ PINCTRL_PIN(AB8500_PIN_C17, "GPIO38_C17"),
+ PINCTRL_PIN(AB8500_PIN_E16, "GPIO39_E16"),
+ PINCTRL_PIN(AB8500_PIN_T19, "GPIO40_T19"),
+ PINCTRL_PIN(AB8500_PIN_U19, "GPIO41_U19"),
+ PINCTRL_PIN(AB8500_PIN_U2, "GPIO42_U2"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8500_pinranges[] = {
+ ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+ ABX500_PINRANGE(6, 4, ABX500_ALT_A),
+ ABX500_PINRANGE(10, 4, ABX500_DEFAULT),
+ ABX500_PINRANGE(14, 12, ABX500_ALT_A),
+ ABX500_PINRANGE(26, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+ ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+ ABX500_PINRANGE(35, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(36, 7, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8500_PIN_T10 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8500_PIN_T9 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8500_PIN_U9 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB8500_PIN_W2 };
+static const unsigned ycbcr0123_d_1_pins[] = { AB8500_PIN_Y18, AB8500_PIN_AA20,
+ AB8500_PIN_W18, AB8500_PIN_AA19};
+static const unsigned gpio10_d_1_pins[] = { AB8500_PIN_U17 };
+static const unsigned gpio11_d_1_pins[] = { AB8500_PIN_AA18 };
+static const unsigned gpio12_d_1_pins[] = { AB8500_PIN_U16 };
+static const unsigned gpio13_d_1_pins[] = { AB8500_PIN_W17 };
+static const unsigned pwmout1_d_1_pins[] = { AB8500_PIN_F14 };
+static const unsigned pwmout2_d_1_pins[] = { AB8500_PIN_B17 };
+static const unsigned pwmout3_d_1_pins[] = { AB8500_PIN_F15 };
+
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB8500_PIN_P5, AB8500_PIN_R5,
+ AB8500_PIN_U5, AB8500_PIN_T5 };
+/* USBUICC */
+static const unsigned usbuicc_d_1_pins[] = { AB8500_PIN_H19, AB8500_PIN_G20,
+ AB8500_PIN_G19 };
+static const unsigned sysclkreq7_d_1_pins[] = { AB8500_PIN_T14 };
+static const unsigned sysclkreq8_d_1_pins[] = { AB8500_PIN_R16 };
+static const unsigned gpio26_d_1_pins[] = { AB8500_PIN_M16 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB8500_PIN_J6, AB8500_PIN_K6 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB8500_PIN_G6, AB8500_PIN_H6 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB8500_PIN_F5, AB8500_PIN_G5 };
+static const unsigned extcpena_d_1_pins[] = { AB8500_PIN_R17 };
+static const unsigned gpio35_d_1_pins[] = { AB8500_PIN_W15 };
+/* APE SPI */
+static const unsigned apespi_d_1_pins[] = { AB8500_PIN_A17, AB8500_PIN_E15,
+ AB8500_PIN_C17, AB8500_PIN_E16};
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB8500_PIN_T19, AB8500_PIN_U19 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB8500_PIN_U2 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8500_PIN_T10 };
+static const unsigned gpio2_a_1_pins[] = { AB8500_PIN_T9 };
+static const unsigned gpio3_a_1_pins[] = { AB8500_PIN_U9 };
+static const unsigned gpio4_a_1_pins[] = { AB8500_PIN_W2 };
+static const unsigned gpio6_a_1_pins[] = { AB8500_PIN_Y18 };
+static const unsigned gpio7_a_1_pins[] = { AB8500_PIN_AA20 };
+static const unsigned gpio8_a_1_pins[] = { AB8500_PIN_W18 };
+static const unsigned gpio9_a_1_pins[] = { AB8500_PIN_AA19 };
+/* YCbCr4 YCbCr5 YCbCr6 YCbCr7*/
+static const unsigned ycbcr4567_a_1_pins[] = { AB8500_PIN_U17, AB8500_PIN_AA18,
+ AB8500_PIN_U16, AB8500_PIN_W17};
+static const unsigned gpio14_a_1_pins[] = { AB8500_PIN_F14 };
+static const unsigned gpio15_a_1_pins[] = { AB8500_PIN_B17 };
+static const unsigned gpio16_a_1_pins[] = { AB8500_PIN_F15 };
+static const unsigned gpio17_a_1_pins[] = { AB8500_PIN_P5 };
+static const unsigned gpio18_a_1_pins[] = { AB8500_PIN_R5 };
+static const unsigned gpio19_a_1_pins[] = { AB8500_PIN_U5 };
+static const unsigned gpio20_a_1_pins[] = { AB8500_PIN_T5 };
+static const unsigned gpio21_a_1_pins[] = { AB8500_PIN_H19 };
+static const unsigned gpio22_a_1_pins[] = { AB8500_PIN_G20 };
+static const unsigned gpio23_a_1_pins[] = { AB8500_PIN_G19 };
+static const unsigned gpio24_a_1_pins[] = { AB8500_PIN_T14 };
+static const unsigned gpio25_a_1_pins[] = { AB8500_PIN_R16 };
+static const unsigned gpio27_a_1_pins[] = { AB8500_PIN_J6 };
+static const unsigned gpio28_a_1_pins[] = { AB8500_PIN_K6 };
+static const unsigned gpio29_a_1_pins[] = { AB8500_PIN_G6 };
+static const unsigned gpio30_a_1_pins[] = { AB8500_PIN_H6 };
+static const unsigned gpio31_a_1_pins[] = { AB8500_PIN_F5 };
+static const unsigned gpio32_a_1_pins[] = { AB8500_PIN_G5 };
+static const unsigned gpio34_a_1_pins[] = { AB8500_PIN_R17 };
+static const unsigned gpio36_a_1_pins[] = { AB8500_PIN_A17 };
+static const unsigned gpio37_a_1_pins[] = { AB8500_PIN_E15 };
+static const unsigned gpio38_a_1_pins[] = { AB8500_PIN_C17 };
+static const unsigned gpio39_a_1_pins[] = { AB8500_PIN_E16 };
+static const unsigned gpio40_a_1_pins[] = { AB8500_PIN_T19 };
+static const unsigned gpio41_a_1_pins[] = { AB8500_PIN_U19 };
+static const unsigned gpio42_a_1_pins[] = { AB8500_PIN_U2 };
+
+/* Altfunction B colum */
+static const unsigned hiqclkena_b_1_pins[] = { AB8500_PIN_U17 };
+static const unsigned usbuiccpd_b_1_pins[] = { AB8500_PIN_AA18 };
+static const unsigned i2ctrig1_b_1_pins[] = { AB8500_PIN_U16 };
+static const unsigned i2ctrig2_b_1_pins[] = { AB8500_PIN_W17 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB8500_PIN_W17 };
+
+
+#define AB8500_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
+ .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8500_groups[] = {
+ /* default column */
+ AB8500_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(ycbcr0123_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio12_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio26_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(gpio35_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(apespi_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+ AB8500_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+ /* Altfunction A column */
+ AB8500_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio6_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio7_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio8_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio9_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(ycbcr4567_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio36_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio37_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio38_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio39_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+ AB8500_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+ /* Altfunction B column */
+ AB8500_PIN_GROUP(hiqclkena_b_1, ABX500_ALT_B),
+ AB8500_PIN_GROUP(usbuiccpd_b_1, ABX500_ALT_B),
+ AB8500_PIN_GROUP(i2ctrig1_b_1, ABX500_ALT_B),
+ AB8500_PIN_GROUP(i2ctrig2_b_1, ABX500_ALT_B),
+ /* Altfunction C column */
+ AB8500_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8500_FUNC_GROUPS(a, b...) \
+static const char * const a##_groups[] = { b };
+
+AB8500_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+ "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
+ "sysclkreq7_d_1", "sysclkreq8_d_1");
+AB8500_FUNC_GROUPS(ycbcr, "ycbcr0123_d_1", "ycbcr4567_a_1");
+AB8500_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+ "gpio6_a_1", "gpio7_a_1", "gpio8_a_1", "gpio9_a_1",
+ "gpio10_d_1", "gpio11_d_1", "gpio12_d_1", "gpio13_d_1",
+ "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
+ "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio21_a_1",
+ "gpio22_a_1", "gpio23_a_1", "gpio24_a_1", "gpio25_a_1",
+ "gpio26_d_1", "gpio27_a_1", "gpio28_a_1", "gpio29_a_1",
+ "gpio30_a_1", "gpio31_a_1", "gpio32_a_1", "gpio34_a_1",
+ "gpio35_d_1", "gpio36_a_1", "gpio37_a_1", "gpio38_a_1",
+ "gpio39_a_1", "gpio40_a_1", "gpio41_a_1", "gpio42_a_1");
+AB8500_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB8500_FUNC_GROUPS(adi1, "adi1_d_1");
+AB8500_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_b_1");
+AB8500_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB8500_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB8500_FUNC_GROUPS(apespi, "apespi_d_1");
+AB8500_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB8500_FUNC_GROUPS(hiqclkena, "hiqclkena_b_1");
+AB8500_FUNC_GROUPS(i2ctrig, "i2ctrig1_b_1", "i2ctrig2_b_1");
+AB8500_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct abx500_function ab8500_functions[] = {
+ FUNCTION(sysclkreq),
+ FUNCTION(ycbcr),
+ FUNCTION(gpio),
+ FUNCTION(pwmout),
+ FUNCTION(adi1),
+ FUNCTION(usbuicc),
+ FUNCTION(dmic),
+ FUNCTION(extcpena),
+ FUNCTION(apespi),
+ FUNCTION(modsclsda),
+ FUNCTION(hiqclkena),
+ FUNCTION(i2ctrig),
+ FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB8500 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 0, 1 ,2),
+ * means that pin AB8500_PIN_W17 (pin 13) supports 4 mux (default/ALT_A,
+ * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ * select the mux. ALTA, ALTB and ALTC val indicates values to write in
+ * ALTERNATFUNC register. We need to specifies these values as SOC
+ * designers didn't apply the same logic on how to select mux in the
+ * ABx500 family.
+ *
+ * As this pins supports at least ALT_B mux, default mux is
+ * selected by writing 1 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ * default | 1 | 0 | 0
+ * alt_A | 0 | 0 | 0
+ * alt_B | 0 | 0 | 1
+ * alt_C | 0 | 1 | 0
+ *
+ * ALTERNATE_FUNCTIONS(8, 7, UNUSED, UNUSED),
+ * means that pin AB8500_PIN_W18 (pin 8) supports 2 mux, so only GPIOSEL
+ * register is used to select the mux. As this pins doesn't support at
+ * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=7 | alternatfunc bit2= | alternatfunc bit1=
+ * default | 0 | 0 | 0
+ * alt_A | 1 | 0 | 0
+ */
+
+struct alternate_functions ab8500_alternate_functions[AB8500_GPIO_MAX_NUMBER + 1] = {
+ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+ ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+ /* bit 4 reserved */
+ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+ ALTERNATE_FUNCTIONS(6, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO6, altA controlled by bit 5*/
+ ALTERNATE_FUNCTIONS(7, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO7, altA controlled by bit 6*/
+ ALTERNATE_FUNCTIONS(8, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO8, altA controlled by bit 7*/
+
+ ALTERNATE_FUNCTIONS(9, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO9, altA controlled by bit 0*/
+ ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 0, 1, 0), /* GPIO10, altA and altB controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(11, 2, 1, UNUSED, 0, 1, 0), /* GPIO11, altA and altB controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(12, 3, 2, UNUSED, 0, 1, 0), /* GPIO12, altA and altB controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(13, 4, 3, 4, 0, 1, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+ /*
+ * pins 17 to 20 are special case, only bit 0 is used to select
+ * alternate function for these 4 pins.
+ * bits 1 to 3 are reserved
+ */
+ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(21, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(22, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(23, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(24, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
+
+ ALTERNATE_FUNCTIONS(25, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
+ /* pin 26 special case, no alternate function, bit 1 reserved */
+ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO26 */
+ ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+
+ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+ ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+ /* pin 35 special case, no alternate function, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO35 */
+ ALTERNATE_FUNCTIONS(36, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO36, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(37, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO37, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(38, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO38, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(39, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO39, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
+
+ ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+};
+
+/*
+ * Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ * GPIO6 to GPIO13
+ * GPIO24 and GPIO25
+ * GPIO36 to GPIO41
+ */
+struct abx500_gpio_irq_cluster ab8500_gpio_irq_cluster[] = {
+ GPIO_IRQ_CLUSTER(6, 13, AB8500_INT_GPIO6R),
+ GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
+ GPIO_IRQ_CLUSTER(36, 41, AB8500_INT_GPIO36R),
+};
+
+static struct abx500_pinctrl_soc_data ab8500_soc = {
+ .gpio_ranges = ab8500_pinranges,
+ .gpio_num_ranges = ARRAY_SIZE(ab8500_pinranges),
+ .pins = ab8500_pins,
+ .npins = ARRAY_SIZE(ab8500_pins),
+ .functions = ab8500_functions,
+ .nfunctions = ARRAY_SIZE(ab8500_functions),
+ .groups = ab8500_groups,
+ .ngroups = ARRAY_SIZE(ab8500_groups),
+ .alternate_functions = ab8500_alternate_functions,
+ .gpio_irq_cluster = ab8500_gpio_irq_cluster,
+ .ngpio_irq_cluster = ARRAY_SIZE(ab8500_gpio_irq_cluster),
+ .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+ .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+ .irq_gpio_factor = 1,
+};
+
+void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
+{
+ *soc = &ab8500_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab8505.c b/drivers/pinctrl/pinctrl-ab8505.c
new file mode 100644
index 000000000000..3a4238e879e3
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8505.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset) (offset)
+
+#define AB8505_PIN_N4 ABX500_GPIO(1)
+#define AB8505_PIN_R5 ABX500_GPIO(2)
+#define AB8505_PIN_P5 ABX500_GPIO(3)
+/* hole */
+#define AB8505_PIN_B16 ABX500_GPIO(10)
+#define AB8505_PIN_B17 ABX500_GPIO(11)
+/* hole */
+#define AB8505_PIN_D17 ABX500_GPIO(13)
+#define AB8505_PIN_C16 ABX500_GPIO(14)
+/* hole */
+#define AB8505_PIN_P2 ABX500_GPIO(17)
+#define AB8505_PIN_N3 ABX500_GPIO(18)
+#define AB8505_PIN_T1 ABX500_GPIO(19)
+#define AB8505_PIN_P3 ABX500_GPIO(20)
+/* hole */
+#define AB8505_PIN_H14 ABX500_GPIO(34)
+/* hole */
+#define AB8505_PIN_J15 ABX500_GPIO(40)
+#define AB8505_PIN_J14 ABX500_GPIO(41)
+/* hole */
+#define AB8505_PIN_L4 ABX500_GPIO(50)
+/* hole */
+#define AB8505_PIN_D16 ABX500_GPIO(52)
+#define AB8505_PIN_D15 ABX500_GPIO(53)
+
+/* indicates the higher GPIO number */
+#define AB8505_GPIO_MAX_NUMBER 53
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8505_pins[] = {
+ PINCTRL_PIN(AB8505_PIN_N4, "GPIO1_N4"),
+ PINCTRL_PIN(AB8505_PIN_R5, "GPIO2_R5"),
+ PINCTRL_PIN(AB8505_PIN_P5, "GPIO3_P5"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_B16, "GPIO10_B16"),
+ PINCTRL_PIN(AB8505_PIN_B17, "GPIO11_B17"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_D17, "GPIO13_D17"),
+ PINCTRL_PIN(AB8505_PIN_C16, "GPIO14_C16"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_P2, "GPIO17_P2"),
+ PINCTRL_PIN(AB8505_PIN_N3, "GPIO18_N3"),
+ PINCTRL_PIN(AB8505_PIN_T1, "GPIO19_T1"),
+ PINCTRL_PIN(AB8505_PIN_P3, "GPIO20_P3"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_H14, "GPIO34_H14"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_J15, "GPIO40_J15"),
+ PINCTRL_PIN(AB8505_PIN_J14, "GPIO41_J14"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_L4, "GPIO50_L4"),
+/* hole */
+ PINCTRL_PIN(AB8505_PIN_D16, "GPIO52_D16"),
+ PINCTRL_PIN(AB8505_PIN_D15, "GPIO53_D15"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8505_pinranges[] = {
+ ABX500_PINRANGE(1, 3, ABX500_ALT_A),
+ ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
+ ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(14, 1, ABX500_ALT_A),
+ ABX500_PINRANGE(17, 4, ABX500_ALT_A),
+ ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+ ABX500_PINRANGE(40, 2, ABX500_ALT_A),
+ ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(52, 2, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8505_PIN_N4 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8505_PIN_R5 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8505_PIN_P5 };
+static const unsigned gpio10_d_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned gpio11_d_1_pins[] = { AB8505_PIN_B17 };
+static const unsigned gpio13_d_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned pwmout1_d_1_pins[] = { AB8505_PIN_C16 };
+/* audio data interface 2*/
+static const unsigned adi2_d_1_pins[] = { AB8505_PIN_P2, AB8505_PIN_N3,
+ AB8505_PIN_T1, AB8505_PIN_P3 };
+static const unsigned extcpena_d_1_pins[] = { AB8505_PIN_H14 };
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB8505_PIN_J15, AB8505_PIN_J14 };
+static const unsigned gpio50_d_1_pins[] = { AB8505_PIN_L4 };
+static const unsigned resethw_d_1_pins[] = { AB8505_PIN_D16 };
+static const unsigned service_d_1_pins[] = { AB8505_PIN_D15 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8505_PIN_N4 };
+static const unsigned gpio2_a_1_pins[] = { AB8505_PIN_R5 };
+static const unsigned gpio3_a_1_pins[] = { AB8505_PIN_P5 };
+static const unsigned hiqclkena_a_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned pdmclk_a_1_pins[] = { AB8505_PIN_B17 };
+static const unsigned uarttxdata_a_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned gpio14_a_1_pins[] = { AB8505_PIN_C16 };
+static const unsigned gpio17_a_1_pins[] = { AB8505_PIN_P2 };
+static const unsigned gpio18_a_1_pins[] = { AB8505_PIN_N3 };
+static const unsigned gpio19_a_1_pins[] = { AB8505_PIN_T1 };
+static const unsigned gpio20_a_1_pins[] = { AB8505_PIN_P3 };
+static const unsigned gpio34_a_1_pins[] = { AB8505_PIN_H14 };
+static const unsigned gpio40_a_1_pins[] = { AB8505_PIN_J15 };
+static const unsigned gpio41_a_1_pins[] = { AB8505_PIN_J14 };
+static const unsigned uartrxdata_a_1_pins[] = { AB8505_PIN_J14 };
+static const unsigned gpio50_a_1_pins[] = { AB8505_PIN_L4 };
+static const unsigned gpio52_a_1_pins[] = { AB8505_PIN_D16 };
+static const unsigned gpio53_a_1_pins[] = { AB8505_PIN_D15 };
+
+/* Altfunction B colum */
+static const unsigned pdmdata_b_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned extvibrapwm1_b_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned extvibrapwm2_b_1_pins[] = { AB8505_PIN_L4 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB8505_PIN_D17 };
+
+#define AB8505_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
+ .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8505_groups[] = {
+ AB8505_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(adi2_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+ AB8505_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(uarttxdata_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(uartrxdata_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
+ AB8505_PIN_GROUP(extvibrapwm1_b_1, ABX500_ALT_B),
+ AB8505_PIN_GROUP(extvibrapwm2_b_1, ABX500_ALT_B),
+ AB8505_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8505_FUNC_GROUPS(a, b...) \
+static const char * const a##_groups[] = { b };
+
+AB8505_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+ "sysclkreq4_d_1");
+AB8505_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1",
+ "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
+ "gpio17_a_1", "gpio18_a_1", "gpio19_a_1", "gpio20_a_1",
+ "gpio34_a_1", "gpio40_a_1", "gpio41_a_1", "gpio50_d_1",
+ "gpio52_a_1", "gpio53_a_1");
+AB8505_FUNC_GROUPS(pwmout, "pwmout1_d_1");
+AB8505_FUNC_GROUPS(adi2, "adi2_d_1");
+AB8505_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB8505_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB8505_FUNC_GROUPS(resethw, "resethw_d_1");
+AB8505_FUNC_GROUPS(service, "service_d_1");
+AB8505_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
+AB8505_FUNC_GROUPS(pdm, "pdmclk_a_1", "pdmdata_b_1");
+AB8505_FUNC_GROUPS(uartdata, "uarttxdata_a_1", "uartrxdata_a_1");
+AB8505_FUNC_GROUPS(extvibra, "extvibrapwm1_b_1", "extvibrapwm2_b_1");
+AB8505_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct abx500_function ab8505_functions[] = {
+ FUNCTION(sysclkreq),
+ FUNCTION(gpio),
+ FUNCTION(pwmout),
+ FUNCTION(adi2),
+ FUNCTION(extcpena),
+ FUNCTION(modsclsda),
+ FUNCTION(resethw),
+ FUNCTION(service),
+ FUNCTION(hiqclkena),
+ FUNCTION(pdm),
+ FUNCTION(uartdata),
+ FUNCTION(extvibra),
+ FUNCTION(extvibra),
+ FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB8505 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2),
+ * means that pin AB8505_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
+ * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ * select the mux. ALTA, ALTB and ALTC val indicates values to write in
+ * ALTERNATFUNC register. We need to specifies these values as SOC
+ * designers didn't apply the same logic on how to select mux in the
+ * ABx500 family.
+ *
+ * As this pins supports at least ALT_B mux, default mux is
+ * selected by writing 1 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ * default | 1 | 0 | 0
+ * alt_A | 0 | 0 | 1
+ * alt_B | 0 | 0 | 0
+ * alt_C | 0 | 1 | 0
+ *
+ * ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED),
+ * means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
+ * register is used to select the mux. As this pins doesn't support at
+ * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=0 | alternatfunc bit2= | alternatfunc bit1=
+ * default | 0 | 0 | 0
+ * alt_A | 1 | 0 | 0
+ */
+
+struct alternate_functions ab8505_alternate_functions[AB8505_GPIO_MAX_NUMBER + 1] = {
+ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+ ALTERNATE_FUNCTIONS(4, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO4, bit 3 reserved */
+ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5, bit 4 reserved */
+ ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6, bit 5 reserved */
+ ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7, bit 6 reserved */
+ ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8, bit 7 reserved */
+
+ ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9, bit 0 reserved */
+ ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(11, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12, bit3 reseved */
+ ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(15, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 6 reserved */
+ ALTERNATE_FUNCTIONS(16, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 7 reserved */
+ /*
+ * pins 17 to 20 are special case, only bit 0 is used to select
+ * alternate function for these 4 pins.
+ * bits 1 to 3 are reserved
+ */
+ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21, bit 4 reserved */
+ ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22, bit 5 reserved */
+ ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23, bit 6 reserved */
+ ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24, bit 7 reserved */
+
+ ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25, bit 0 reserved */
+ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26, bit 1 reserved */
+ ALTERNATE_FUNCTIONS(27, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO27, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(28, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO28, bit 3 reserved */
+ ALTERNATE_FUNCTIONS(29, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO29, bit 4 reserved */
+ ALTERNATE_FUNCTIONS(30, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO30, bit 5 reserved */
+ ALTERNATE_FUNCTIONS(31, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO31, bit 6 reserved */
+ ALTERNATE_FUNCTIONS(32, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO32, bit 7 reserved */
+
+ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33, bit 0 reserved */
+ ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7*/
+
+ ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(42, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO42, bit 1 reserved */
+ ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43, bit 2 reserved */
+ ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44, bit 3 reserved */
+ ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45, bit 4 reserved */
+ ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46, bit 5 reserved */
+ ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47, bit 6 reserved */
+ ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48, bit 7 reserved */
+
+ ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
+ ALTERNATE_FUNCTIONS(50, 1, 2, UNUSED, 1, 0, 0), /* GPIO50, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(51, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
+ ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+};
+
+/*
+ * For AB8505 Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ * GPIO10 to GPIO11
+ * GPIO13
+ * GPIO40 and GPIO41
+ * GPIO50
+ * GPIO52 to GPIO53
+ */
+struct abx500_gpio_irq_cluster ab8505_gpio_irq_cluster[] = {
+ GPIO_IRQ_CLUSTER(10, 11, AB8500_INT_GPIO10R),
+ GPIO_IRQ_CLUSTER(13, 13, AB8500_INT_GPIO13R),
+ GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
+ GPIO_IRQ_CLUSTER(50, 50, AB9540_INT_GPIO50R),
+ GPIO_IRQ_CLUSTER(52, 53, AB9540_INT_GPIO52R),
+};
+
+static struct abx500_pinctrl_soc_data ab8505_soc = {
+ .gpio_ranges = ab8505_pinranges,
+ .gpio_num_ranges = ARRAY_SIZE(ab8505_pinranges),
+ .pins = ab8505_pins,
+ .npins = ARRAY_SIZE(ab8505_pins),
+ .functions = ab8505_functions,
+ .nfunctions = ARRAY_SIZE(ab8505_functions),
+ .groups = ab8505_groups,
+ .ngroups = ARRAY_SIZE(ab8505_groups),
+ .alternate_functions = ab8505_alternate_functions,
+ .gpio_irq_cluster = ab8505_gpio_irq_cluster,
+ .ngpio_irq_cluster = ARRAY_SIZE(ab8505_gpio_irq_cluster),
+ .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+ .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+ .irq_gpio_factor = 1,
+};
+
+void
+abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
+{
+ *soc = &ab8505_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab8540.c b/drivers/pinctrl/pinctrl-ab8540.c
new file mode 100644
index 000000000000..8ee1e8d95f65
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8540.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset) (offset)
+
+#define AB8540_PIN_J16 ABX500_GPIO(1)
+#define AB8540_PIN_D17 ABX500_GPIO(2)
+#define AB8540_PIN_C12 ABX500_GPIO(3)
+#define AB8540_PIN_G12 ABX500_GPIO(4)
+/* hole */
+#define AB8540_PIN_D16 ABX500_GPIO(14)
+#define AB8540_PIN_F15 ABX500_GPIO(15)
+#define AB8540_PIN_J8 ABX500_GPIO(16)
+#define AB8540_PIN_K16 ABX500_GPIO(17)
+#define AB8540_PIN_G15 ABX500_GPIO(18)
+#define AB8540_PIN_F17 ABX500_GPIO(19)
+#define AB8540_PIN_E17 ABX500_GPIO(20)
+/* hole */
+#define AB8540_PIN_AA16 ABX500_GPIO(27)
+#define AB8540_PIN_W18 ABX500_GPIO(28)
+#define AB8540_PIN_Y15 ABX500_GPIO(29)
+#define AB8540_PIN_W16 ABX500_GPIO(30)
+#define AB8540_PIN_V15 ABX500_GPIO(31)
+#define AB8540_PIN_W17 ABX500_GPIO(32)
+/* hole */
+#define AB8540_PIN_D12 ABX500_GPIO(42)
+#define AB8540_PIN_P4 ABX500_GPIO(43)
+#define AB8540_PIN_AB1 ABX500_GPIO(44)
+#define AB8540_PIN_K7 ABX500_GPIO(45)
+#define AB8540_PIN_L7 ABX500_GPIO(46)
+#define AB8540_PIN_G10 ABX500_GPIO(47)
+#define AB8540_PIN_K12 ABX500_GPIO(48)
+/* hole */
+#define AB8540_PIN_N8 ABX500_GPIO(51)
+#define AB8540_PIN_P12 ABX500_GPIO(52)
+#define AB8540_PIN_K8 ABX500_GPIO(53)
+#define AB8540_PIN_J11 ABX500_GPIO(54)
+#define AB8540_PIN_AC2 ABX500_GPIO(55)
+#define AB8540_PIN_AB2 ABX500_GPIO(56)
+
+/* indicates the highest GPIO number */
+#define AB8540_GPIO_MAX_NUMBER 56
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8540_pins[] = {
+ PINCTRL_PIN(AB8540_PIN_J16, "GPIO1_J16"),
+ PINCTRL_PIN(AB8540_PIN_D17, "GPIO2_D17"),
+ PINCTRL_PIN(AB8540_PIN_C12, "GPIO3_C12"),
+ PINCTRL_PIN(AB8540_PIN_G12, "GPIO4_G12"),
+ /* hole */
+ PINCTRL_PIN(AB8540_PIN_D16, "GPIO14_D16"),
+ PINCTRL_PIN(AB8540_PIN_F15, "GPIO15_F15"),
+ PINCTRL_PIN(AB8540_PIN_J8, "GPIO16_J8"),
+ PINCTRL_PIN(AB8540_PIN_K16, "GPIO17_K16"),
+ PINCTRL_PIN(AB8540_PIN_G15, "GPIO18_G15"),
+ PINCTRL_PIN(AB8540_PIN_F17, "GPIO19_F17"),
+ PINCTRL_PIN(AB8540_PIN_E17, "GPIO20_E17"),
+ /* hole */
+ PINCTRL_PIN(AB8540_PIN_AA16, "GPIO27_AA16"),
+ PINCTRL_PIN(AB8540_PIN_W18, "GPIO28_W18"),
+ PINCTRL_PIN(AB8540_PIN_Y15, "GPIO29_Y15"),
+ PINCTRL_PIN(AB8540_PIN_W16, "GPIO30_W16"),
+ PINCTRL_PIN(AB8540_PIN_V15, "GPIO31_V15"),
+ PINCTRL_PIN(AB8540_PIN_W17, "GPIO32_W17"),
+ /* hole */
+ PINCTRL_PIN(AB8540_PIN_D12, "GPIO42_D12"),
+ PINCTRL_PIN(AB8540_PIN_P4, "GPIO43_P4"),
+ PINCTRL_PIN(AB8540_PIN_AB1, "GPIO44_AB1"),
+ PINCTRL_PIN(AB8540_PIN_K7, "GPIO45_K7"),
+ PINCTRL_PIN(AB8540_PIN_L7, "GPIO46_L7"),
+ PINCTRL_PIN(AB8540_PIN_G10, "GPIO47_G10"),
+ PINCTRL_PIN(AB8540_PIN_K12, "GPIO48_K12"),
+ /* hole */
+ PINCTRL_PIN(AB8540_PIN_N8, "GPIO51_N8"),
+ PINCTRL_PIN(AB8540_PIN_P12, "GPIO52_P12"),
+ PINCTRL_PIN(AB8540_PIN_K8, "GPIO53_K8"),
+ PINCTRL_PIN(AB8540_PIN_J11, "GPIO54_J11"),
+ PINCTRL_PIN(AB8540_PIN_AC2, "GPIO55_AC2"),
+ PINCTRL_PIN(AB8540_PIN_AB2, "GPIO56_AB2"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8540_pinranges[] = {
+ ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+ ABX500_PINRANGE(14, 7, ABX500_ALT_A),
+ ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+ ABX500_PINRANGE(42, 7, ABX500_ALT_A),
+ ABX500_PINRANGE(51, 6, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8540_PIN_J16 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8540_PIN_D17 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8540_PIN_C12 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB8540_PIN_G12 };
+static const unsigned pwmout1_d_1_pins[] = { AB8540_PIN_D16 };
+static const unsigned pwmout2_d_1_pins[] = { AB8540_PIN_F15 };
+static const unsigned pwmout3_d_1_pins[] = { AB8540_PIN_J8 };
+
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB8540_PIN_K16, AB8540_PIN_G15,
+ AB8540_PIN_F17, AB8540_PIN_E17 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB8540_PIN_AA16, AB8540_PIN_W18 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB8540_PIN_Y15, AB8540_PIN_W16 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB8540_PIN_V15, AB8540_PIN_W17 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB8540_PIN_D12 };
+static const unsigned batremn_d_1_pins[] = { AB8540_PIN_P4 };
+static const unsigned service_d_1_pins[] = { AB8540_PIN_AB1 };
+static const unsigned pwrctrl0_d_1_pins[] = { AB8540_PIN_K7 };
+static const unsigned pwrctrl1_d_1_pins[] = { AB8540_PIN_L7 };
+static const unsigned pwmextvibra1_d_1_pins[] = { AB8540_PIN_G10 };
+static const unsigned pwmextvibra2_d_1_pins[] = { AB8540_PIN_K12 };
+static const unsigned gpio1_vbat_d_1_pins[] = { AB8540_PIN_N8 };
+static const unsigned gpio2_vbat_d_1_pins[] = { AB8540_PIN_P12 };
+static const unsigned gpio3_vbat_d_1_pins[] = { AB8540_PIN_K8 };
+static const unsigned gpio4_vbat_d_1_pins[] = { AB8540_PIN_J11 };
+static const unsigned pdmclkdat_d_1_pins[] = { AB8540_PIN_AC2, AB8540_PIN_AB2 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8540_PIN_J16 };
+static const unsigned gpio2_a_1_pins[] = { AB8540_PIN_D17 };
+static const unsigned gpio3_a_1_pins[] = { AB8540_PIN_C12 };
+static const unsigned gpio4_a_1_pins[] = { AB8540_PIN_G12 };
+static const unsigned gpio14_a_1_pins[] = { AB8540_PIN_D16 };
+static const unsigned gpio15_a_1_pins[] = { AB8540_PIN_F15 };
+static const unsigned gpio16_a_1_pins[] = { AB8540_PIN_J8 };
+static const unsigned gpio17_a_1_pins[] = { AB8540_PIN_K16 };
+static const unsigned gpio18_a_1_pins[] = { AB8540_PIN_G15 };
+static const unsigned gpio19_a_1_pins[] = { AB8540_PIN_F17 };
+static const unsigned gpio20_a_1_pins[] = { AB8540_PIN_E17 };
+static const unsigned gpio27_a_1_pins[] = { AB8540_PIN_AA16 };
+static const unsigned gpio28_a_1_pins[] = { AB8540_PIN_W18 };
+static const unsigned gpio29_a_1_pins[] = { AB8540_PIN_Y15 };
+static const unsigned gpio30_a_1_pins[] = { AB8540_PIN_W16 };
+static const unsigned gpio31_a_1_pins[] = { AB8540_PIN_V15 };
+static const unsigned gpio32_a_1_pins[] = { AB8540_PIN_W17 };
+static const unsigned gpio42_a_1_pins[] = { AB8540_PIN_D12 };
+static const unsigned gpio43_a_1_pins[] = { AB8540_PIN_P4 };
+static const unsigned gpio44_a_1_pins[] = { AB8540_PIN_AB1 };
+static const unsigned gpio45_a_1_pins[] = { AB8540_PIN_K7 };
+static const unsigned gpio46_a_1_pins[] = { AB8540_PIN_L7 };
+static const unsigned gpio47_a_1_pins[] = { AB8540_PIN_G10 };
+static const unsigned gpio48_a_1_pins[] = { AB8540_PIN_K12 };
+static const unsigned gpio51_a_1_pins[] = { AB8540_PIN_N8 };
+static const unsigned gpio52_a_1_pins[] = { AB8540_PIN_P12 };
+static const unsigned gpio53_a_1_pins[] = { AB8540_PIN_K8 };
+static const unsigned gpio54_a_1_pins[] = { AB8540_PIN_J11 };
+static const unsigned gpio55_a_1_pins[] = { AB8540_PIN_AC2 };
+static const unsigned gpio56_a_1_pins[] = { AB8540_PIN_AB2 };
+
+#define AB8540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
+ .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8540_groups[] = {
+ /* default column */
+ AB8540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwrctrl0_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwrctrl1_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwmextvibra1_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pwmextvibra2_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(gpio1_vbat_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(gpio2_vbat_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(gpio3_vbat_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(gpio4_vbat_d_1, ABX500_DEFAULT),
+ AB8540_PIN_GROUP(pdmclkdat_d_1, ABX500_DEFAULT),
+ /* Altfunction A column */
+ AB8540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio43_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio44_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio45_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio46_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio47_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio48_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio54_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio55_a_1, ABX500_ALT_A),
+ AB8540_PIN_GROUP(gpio56_a_1, ABX500_ALT_A),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8540_FUNC_GROUPS(a, b...) \
+static const char * const a##_groups[] = { b };
+
+AB8540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+ "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1");
+AB8540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+ "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
+ "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio27_a_1",
+ "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
+ "gpio32_a_1", "gpio42_a_1", "gpio43_a_1", "gpio44_a_1",
+ "gpio45_a_1", "gpio46_a_1", "gpio47_a_1", "gpio48_a_1",
+ "gpio51_a_1", "gpio52_a_1", "gpio53_a_1", "gpio54_a_1",
+ "gpio55_a_1", "gpio56_a_1");
+AB8540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB8540_FUNC_GROUPS(adi1, "adi1_d_1");
+AB8540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB8540_FUNC_GROUPS(batremn, "batremn_d_1");
+AB8540_FUNC_GROUPS(service, "service_d_1");
+AB8540_FUNC_GROUPS(pwrctrl, "pwrctrl0_d_1", "pwrctrl1_d_1");
+AB8540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_d_1", "pwmextvibra2_d_1");
+AB8540_FUNC_GROUPS(gpio_vbat, "gpio1_vbat_d_1", "gpio2_vbat_d_1",
+ "gpio3_vbat_d_1", "gpio4_vbat_d_1");
+AB8540_FUNC_GROUPS(pdm, "pdmclkdat_d_1");
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct abx500_function ab8540_functions[] = {
+ FUNCTION(sysclkreq),
+ FUNCTION(gpio),
+ FUNCTION(pwmout),
+ FUNCTION(adi1),
+ FUNCTION(dmic),
+ FUNCTION(batremn),
+ FUNCTION(service),
+ FUNCTION(pwrctrl),
+ FUNCTION(pwmextvibra),
+ FUNCTION(gpio_vbat),
+ FUNCTION(pdm),
+};
+
+/*
+ * this table translates what's is in the AB8540 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ * AB8540 only supports DEFAULT and ALTA functions, so ALTERNATFUNC
+ * registers is not used
+ *
+ */
+
+struct alternate_functions ab8540_alternate_functions[AB8540_GPIO_MAX_NUMBER + 1] = {
+ /* GPIOSEL1 - bit 4-7 reserved */
+ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+ ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+ ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
+ ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
+ ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
+ /* GPIOSEL2 - bit 0-4 reserved */
+ ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
+ ALTERNATE_FUNCTIONS(10, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO10 */
+ ALTERNATE_FUNCTIONS(11, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO11 */
+ ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
+ ALTERNATE_FUNCTIONS(13, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO13 */
+ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+ /* GPIOSEL3 - bit 4-7 reserved */
+ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(18, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(19, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(20, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21 */
+ ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22 */
+ ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23 */
+ ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24 */
+ /* GPIOSEL4 - bit 0-1 reserved */
+ ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25 */
+ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
+ ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+ /* GPIOSEL5 - bit 0-7 reserved */
+ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+ ALTERNATE_FUNCTIONS(34, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO34 */
+ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
+ ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
+ ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
+ ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
+ ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
+ ALTERNATE_FUNCTIONS(40, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO40 */
+ /* GPIOSEL6 - bit 0 reserved */
+ ALTERNATE_FUNCTIONS(41, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO41 */
+ ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(43, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO43, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(44, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO44, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(45, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO45, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(46, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO46, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(47, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO47, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(48, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO48, altA controlled by bit 7 */
+ /* GPIOSEL7 - bit 0-1 reserved */
+ ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
+ ALTERNATE_FUNCTIONS(50, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO50 */
+ ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(55, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO55, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(56, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO56, altA controlled by bit 7 */
+};
+
+static struct pullud ab8540_pullud = {
+ .first_pin = 51, /* GPIO1_VBAT */
+ .last_pin = 54, /* GPIO4_VBAT */
+};
+
+/*
+ * For AB8540 Only some GPIOs are interrupt capable:
+ * GPIO43 to GPIO44
+ * GPIO51 to GPIO54
+ */
+struct abx500_gpio_irq_cluster ab8540_gpio_irq_cluster[] = {
+ GPIO_IRQ_CLUSTER(43, 43, AB8540_INT_GPIO43F),
+ GPIO_IRQ_CLUSTER(44, 44, AB8540_INT_GPIO44F),
+ GPIO_IRQ_CLUSTER(51, 54, AB9540_INT_GPIO51R),
+};
+
+static struct abx500_pinctrl_soc_data ab8540_soc = {
+ .gpio_ranges = ab8540_pinranges,
+ .gpio_num_ranges = ARRAY_SIZE(ab8540_pinranges),
+ .pins = ab8540_pins,
+ .npins = ARRAY_SIZE(ab8540_pins),
+ .functions = ab8540_functions,
+ .nfunctions = ARRAY_SIZE(ab8540_functions),
+ .groups = ab8540_groups,
+ .ngroups = ARRAY_SIZE(ab8540_groups),
+ .alternate_functions = ab8540_alternate_functions,
+ .pullud = &ab8540_pullud,
+ .gpio_irq_cluster = ab8540_gpio_irq_cluster,
+ .ngpio_irq_cluster = ARRAY_SIZE(ab8540_gpio_irq_cluster),
+ .irq_gpio_rising_offset = AB8540_INT_GPIO43R,
+ .irq_gpio_falling_offset = AB8540_INT_GPIO43F,
+ .irq_gpio_factor = 2,
+};
+
+void
+abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
+{
+ *soc = &ab8540_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab9540.c b/drivers/pinctrl/pinctrl-ab9540.c
new file mode 100644
index 000000000000..7610bd012b98
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab9540.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset) (offset)
+
+#define AB9540_PIN_R4 ABX500_GPIO(1)
+#define AB9540_PIN_V3 ABX500_GPIO(2)
+#define AB9540_PIN_T4 ABX500_GPIO(3)
+#define AB9540_PIN_T5 ABX500_GPIO(4)
+/* hole */
+#define AB9540_PIN_B18 ABX500_GPIO(10)
+#define AB9540_PIN_C18 ABX500_GPIO(11)
+/* hole */
+#define AB9540_PIN_D18 ABX500_GPIO(13)
+#define AB9540_PIN_B19 ABX500_GPIO(14)
+#define AB9540_PIN_C19 ABX500_GPIO(15)
+#define AB9540_PIN_D19 ABX500_GPIO(16)
+#define AB9540_PIN_R3 ABX500_GPIO(17)
+#define AB9540_PIN_T2 ABX500_GPIO(18)
+#define AB9540_PIN_U2 ABX500_GPIO(19)
+#define AB9540_PIN_V2 ABX500_GPIO(20)
+#define AB9540_PIN_N17 ABX500_GPIO(21)
+#define AB9540_PIN_N16 ABX500_GPIO(22)
+#define AB9540_PIN_M19 ABX500_GPIO(23)
+#define AB9540_PIN_T3 ABX500_GPIO(24)
+#define AB9540_PIN_W2 ABX500_GPIO(25)
+/* hole */
+#define AB9540_PIN_H4 ABX500_GPIO(27)
+#define AB9540_PIN_F1 ABX500_GPIO(28)
+#define AB9540_PIN_F4 ABX500_GPIO(29)
+#define AB9540_PIN_F2 ABX500_GPIO(30)
+#define AB9540_PIN_E4 ABX500_GPIO(31)
+#define AB9540_PIN_F3 ABX500_GPIO(32)
+/* hole */
+#define AB9540_PIN_J13 ABX500_GPIO(34)
+/* hole */
+#define AB9540_PIN_L17 ABX500_GPIO(40)
+#define AB9540_PIN_L16 ABX500_GPIO(41)
+#define AB9540_PIN_W3 ABX500_GPIO(42)
+#define AB9540_PIN_N4 ABX500_GPIO(50)
+#define AB9540_PIN_G12 ABX500_GPIO(51)
+#define AB9540_PIN_E17 ABX500_GPIO(52)
+#define AB9540_PIN_D11 ABX500_GPIO(53)
+#define AB9540_PIN_M18 ABX500_GPIO(54)
+
+/* indicates the highest GPIO number */
+#define AB9540_GPIO_MAX_NUMBER 54
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab9540_pins[] = {
+ PINCTRL_PIN(AB9540_PIN_R4, "GPIO1_R4"),
+ PINCTRL_PIN(AB9540_PIN_V3, "GPIO2_V3"),
+ PINCTRL_PIN(AB9540_PIN_T4, "GPIO3_T4"),
+ PINCTRL_PIN(AB9540_PIN_T5, "GPIO4_T5"),
+ /* hole */
+ PINCTRL_PIN(AB9540_PIN_B18, "GPIO10_B18"),
+ PINCTRL_PIN(AB9540_PIN_C18, "GPIO11_C18"),
+ /* hole */
+ PINCTRL_PIN(AB9540_PIN_D18, "GPIO13_D18"),
+ PINCTRL_PIN(AB9540_PIN_B19, "GPIO14_B19"),
+ PINCTRL_PIN(AB9540_PIN_C19, "GPIO15_C19"),
+ PINCTRL_PIN(AB9540_PIN_D19, "GPIO16_D19"),
+ PINCTRL_PIN(AB9540_PIN_R3, "GPIO17_R3"),
+ PINCTRL_PIN(AB9540_PIN_T2, "GPIO18_T2"),
+ PINCTRL_PIN(AB9540_PIN_U2, "GPIO19_U2"),
+ PINCTRL_PIN(AB9540_PIN_V2, "GPIO20_V2"),
+ PINCTRL_PIN(AB9540_PIN_N17, "GPIO21_N17"),
+ PINCTRL_PIN(AB9540_PIN_N16, "GPIO22_N16"),
+ PINCTRL_PIN(AB9540_PIN_M19, "GPIO23_M19"),
+ PINCTRL_PIN(AB9540_PIN_T3, "GPIO24_T3"),
+ PINCTRL_PIN(AB9540_PIN_W2, "GPIO25_W2"),
+ /* hole */
+ PINCTRL_PIN(AB9540_PIN_H4, "GPIO27_H4"),
+ PINCTRL_PIN(AB9540_PIN_F1, "GPIO28_F1"),
+ PINCTRL_PIN(AB9540_PIN_F4, "GPIO29_F4"),
+ PINCTRL_PIN(AB9540_PIN_F2, "GPIO30_F2"),
+ PINCTRL_PIN(AB9540_PIN_E4, "GPIO31_E4"),
+ PINCTRL_PIN(AB9540_PIN_F3, "GPIO32_F3"),
+ /* hole */
+ PINCTRL_PIN(AB9540_PIN_J13, "GPIO34_J13"),
+ /* hole */
+ PINCTRL_PIN(AB9540_PIN_L17, "GPIO40_L17"),
+ PINCTRL_PIN(AB9540_PIN_L16, "GPIO41_L16"),
+ PINCTRL_PIN(AB9540_PIN_W3, "GPIO42_W3"),
+ PINCTRL_PIN(AB9540_PIN_N4, "GPIO50_N4"),
+ PINCTRL_PIN(AB9540_PIN_G12, "GPIO51_G12"),
+ PINCTRL_PIN(AB9540_PIN_E17, "GPIO52_E17"),
+ PINCTRL_PIN(AB9540_PIN_D11, "GPIO53_D11"),
+ PINCTRL_PIN(AB9540_PIN_M18, "GPIO60_M18"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab9540_pinranges[] = {
+ ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+ ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
+ ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(14, 12, ABX500_ALT_A),
+ ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+ ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+ ABX500_PINRANGE(40, 3, ABX500_ALT_A),
+ ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
+ ABX500_PINRANGE(51, 3, ABX500_ALT_A),
+ ABX500_PINRANGE(54, 1, ABX500_DEFAULT),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB9540_PIN_R4 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB9540_PIN_V3 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB9540_PIN_T4 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB9540_PIN_T5 };
+static const unsigned gpio10_d_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned gpio11_d_1_pins[] = { AB9540_PIN_C18 };
+static const unsigned gpio13_d_1_pins[] = { AB9540_PIN_D18 };
+static const unsigned pwmout1_d_1_pins[] = { AB9540_PIN_B19 };
+static const unsigned pwmout2_d_1_pins[] = { AB9540_PIN_C19 };
+static const unsigned pwmout3_d_1_pins[] = { AB9540_PIN_D19 };
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB9540_PIN_R3, AB9540_PIN_T2,
+ AB9540_PIN_U2, AB9540_PIN_V2 };
+/* USBUICC */
+static const unsigned usbuicc_d_1_pins[] = { AB9540_PIN_N17, AB9540_PIN_N16,
+ AB9540_PIN_M19 };
+static const unsigned sysclkreq7_d_1_pins[] = { AB9540_PIN_T3 };
+static const unsigned sysclkreq8_d_1_pins[] = { AB9540_PIN_W2 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB9540_PIN_H4, AB9540_PIN_F1 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB9540_PIN_F4, AB9540_PIN_F2 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB9540_PIN_E4, AB9540_PIN_F3 };
+static const unsigned extcpena_d_1_pins[] = { AB9540_PIN_J13 };
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB9540_PIN_L17, AB9540_PIN_L16 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB9540_PIN_W3 };
+static const unsigned gpio50_d_1_pins[] = { AB9540_PIN_N4 };
+static const unsigned batremn_d_1_pins[] = { AB9540_PIN_G12 };
+static const unsigned resethw_d_1_pins[] = { AB9540_PIN_E17 };
+static const unsigned service_d_1_pins[] = { AB9540_PIN_D11 };
+static const unsigned gpio60_d_1_pins[] = { AB9540_PIN_M18 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB9540_PIN_R4 };
+static const unsigned gpio2_a_1_pins[] = { AB9540_PIN_V3 };
+static const unsigned gpio3_a_1_pins[] = { AB9540_PIN_T4 };
+static const unsigned gpio4_a_1_pins[] = { AB9540_PIN_T5 };
+static const unsigned hiqclkena_a_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned pdmclk_a_1_pins[] = { AB9540_PIN_C18 };
+static const unsigned uartdata_a_1_pins[] = { AB9540_PIN_D18, AB9540_PIN_N4 };
+static const unsigned gpio14_a_1_pins[] = { AB9540_PIN_B19 };
+static const unsigned gpio15_a_1_pins[] = { AB9540_PIN_C19 };
+static const unsigned gpio16_a_1_pins[] = { AB9540_PIN_D19 };
+static const unsigned gpio17_a_1_pins[] = { AB9540_PIN_R3 };
+static const unsigned gpio18_a_1_pins[] = { AB9540_PIN_T2 };
+static const unsigned gpio19_a_1_pins[] = { AB9540_PIN_U2 };
+static const unsigned gpio20_a_1_pins[] = { AB9540_PIN_V2 };
+static const unsigned gpio21_a_1_pins[] = { AB9540_PIN_N17 };
+static const unsigned gpio22_a_1_pins[] = { AB9540_PIN_N16 };
+static const unsigned gpio23_a_1_pins[] = { AB9540_PIN_M19 };
+static const unsigned gpio24_a_1_pins[] = { AB9540_PIN_T3 };
+static const unsigned gpio25_a_1_pins[] = { AB9540_PIN_W2 };
+static const unsigned gpio27_a_1_pins[] = { AB9540_PIN_H4 };
+static const unsigned gpio28_a_1_pins[] = { AB9540_PIN_F1 };
+static const unsigned gpio29_a_1_pins[] = { AB9540_PIN_F4 };
+static const unsigned gpio30_a_1_pins[] = { AB9540_PIN_F2 };
+static const unsigned gpio31_a_1_pins[] = { AB9540_PIN_E4 };
+static const unsigned gpio32_a_1_pins[] = { AB9540_PIN_F3 };
+static const unsigned gpio34_a_1_pins[] = { AB9540_PIN_J13 };
+static const unsigned gpio40_a_1_pins[] = { AB9540_PIN_L17 };
+static const unsigned gpio41_a_1_pins[] = { AB9540_PIN_L16 };
+static const unsigned gpio42_a_1_pins[] = { AB9540_PIN_W3 };
+static const unsigned gpio51_a_1_pins[] = { AB9540_PIN_G12 };
+static const unsigned gpio52_a_1_pins[] = { AB9540_PIN_E17 };
+static const unsigned gpio53_a_1_pins[] = { AB9540_PIN_D11 };
+static const unsigned usbuiccpd_a_1_pins[] = { AB9540_PIN_M18 };
+
+/* Altfunction B colum */
+static const unsigned pdmdata_b_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned pwmextvibra1_b_1_pins[] = { AB9540_PIN_D18 };
+static const unsigned pwmextvibra2_b_1_pins[] = { AB9540_PIN_N4 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB9540_PIN_D18 };
+
+#define AB9540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
+ .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab9540_groups[] = {
+ /* default column */
+ AB9540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+ AB9540_PIN_GROUP(gpio60_d_1, ABX500_DEFAULT),
+
+ /* Altfunction A column */
+ AB9540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(uartdata_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+ AB9540_PIN_GROUP(usbuiccpd_a_1, ABX500_ALT_A),
+
+ /* Altfunction B column */
+ AB9540_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
+ AB9540_PIN_GROUP(pwmextvibra1_b_1, ABX500_ALT_B),
+ AB9540_PIN_GROUP(pwmextvibra2_b_1, ABX500_ALT_B),
+
+ /* Altfunction C column */
+ AB9540_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB9540_FUNC_GROUPS(a, b...) \
+static const char * const a##_groups[] = { b };
+
+AB9540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+ "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
+ "sysclkreq7_d_1", "sysclkreq8_d_1");
+AB9540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+ "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
+ "gpio15_a_1", "gpio16_a_1", "gpio17_a_1", "gpio18_a_1",
+ "gpio19_a_1", "gpio20_a_1", "gpio21_a_1", "gpio22_a_1",
+ "gpio23_a_1", "gpio24_a_1", "gpio25_a_1", "gpio27_a_1",
+ "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
+ "gpio32_a_1", "gpio34_a_1", "gpio40_a_1", "gpio41_a_1",
+ "gpio42_a_1", "gpio50_d_1", "gpio51_a_1", "gpio52_a_1",
+ "gpio53_a_1", "gpio60_d_1");
+AB9540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB9540_FUNC_GROUPS(adi1, "adi1_d_1");
+AB9540_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_a_1");
+AB9540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB9540_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB9540_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB9540_FUNC_GROUPS(batremn, "batremn_d_1");
+AB9540_FUNC_GROUPS(resethw, "resethw_d_1");
+AB9540_FUNC_GROUPS(service, "service_d_1");
+AB9540_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
+AB9540_FUNC_GROUPS(pdm, "pdmdata_b_1", "pdmclk_a_1");
+AB9540_FUNC_GROUPS(uartdata, "uartdata_a_1");
+AB9540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_b_1", "pwmextvibra2_b_1");
+AB9540_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct abx500_function ab9540_functions[] = {
+ FUNCTION(sysclkreq),
+ FUNCTION(gpio),
+ FUNCTION(pwmout),
+ FUNCTION(adi1),
+ FUNCTION(usbuicc),
+ FUNCTION(dmic),
+ FUNCTION(extcpena),
+ FUNCTION(modsclsda),
+ FUNCTION(batremn),
+ FUNCTION(resethw),
+ FUNCTION(service),
+ FUNCTION(hiqclkena),
+ FUNCTION(pdm),
+ FUNCTION(uartdata),
+ FUNCTION(pwmextvibra),
+ FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB9540 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2),
+ * means that pin AB9540_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
+ * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ * select the mux. ALTA, ALTB and ALTC val indicates values to write in
+ * ALTERNATFUNC register. We need to specifies these values as SOC
+ * designers didn't apply the same logic on how to select mux in the
+ * ABx500 family.
+ *
+ * As this pins supports at least ALT_B mux, default mux is
+ * selected by writing 1 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ * default | 1 | 0 | 0
+ * alt_A | 0 | 0 | 1
+ * alt_B | 0 | 0 | 0
+ * alt_C | 0 | 1 | 0
+ *
+ * ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED),
+ * means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
+ * register is used to select the mux. As this pins doesn't support at
+ * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ * | GPIOSEL bit=0 | alternatfunc bit2= | alternatfunc bit1=
+ * default | 0 | 0 | 0
+ * alt_A | 1 | 0 | 0
+ */
+
+struct alternate_functions ab9540alternate_functions[AB9540_GPIO_MAX_NUMBER + 1] = {
+ /* GPIOSEL1 - bits 4-7 are reserved */
+ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+ ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+ ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
+ ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
+ ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
+ /* GPIOSEL2 - bits 0 and 3 are reserved */
+ ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
+ ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(11, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
+ ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+ /* GPIOSEL3 - bit 1-3 reserved
+ * pins 17 to 20 are special case, only bit 0 is used to select
+ * alternate function for these 4 pins.
+ * bits 1 to 3 are reserved
+ */
+ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(21, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(22, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(23, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(24, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
+ /* GPIOSEL4 - bit 1 reserved */
+ ALTERNATE_FUNCTIONS(25, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
+ ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+ ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+ ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+ /* GPIOSEL5 - bit 0, 2-6 are reserved */
+ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+ ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
+ ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
+ ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
+ ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
+ ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
+ ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
+ /* GPIOSEL6 - bit 2-7 are reserved */
+ ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+ ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43 */
+ ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44 */
+ ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45 */
+ ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46 */
+ ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47 */
+ ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48 */
+ /*
+ * GPIOSEL7 - bit 0 and 6-7 are reserved
+ * special case with GPIO60, wich is located at offset 5 of gpiosel7
+ * don't know why it has been called GPIO60 in AB9540 datasheet,
+ * GPIO54 would be logical..., so at SOC point of view we consider
+ * GPIO60 = GPIO54
+ */
+ ALTERNATE_FUNCTIONS(49, 0, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
+ ALTERNATE_FUNCTIONS(50, 1, 2, UNUSED, 1, 0, 0), /* GPIO50, altA and altB controlled by bit 1 */
+ ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
+ ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+ ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+ ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54 = GPIO60, altA controlled by bit 5 */
+};
+
+struct abx500_gpio_irq_cluster ab9540_gpio_irq_cluster[] = {
+ GPIO_IRQ_CLUSTER(10, 13, AB8500_INT_GPIO10R),
+ GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
+ GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
+ GPIO_IRQ_CLUSTER(50, 54, AB9540_INT_GPIO50R),
+};
+
+static struct abx500_pinctrl_soc_data ab9540_soc = {
+ .gpio_ranges = ab9540_pinranges,
+ .gpio_num_ranges = ARRAY_SIZE(ab9540_pinranges),
+ .pins = ab9540_pins,
+ .npins = ARRAY_SIZE(ab9540_pins),
+ .functions = ab9540_functions,
+ .nfunctions = ARRAY_SIZE(ab9540_functions),
+ .groups = ab9540_groups,
+ .ngroups = ARRAY_SIZE(ab9540_groups),
+ .alternate_functions = ab9540alternate_functions,
+ .gpio_irq_cluster = ab9540_gpio_irq_cluster,
+ .ngpio_irq_cluster = ARRAY_SIZE(ab9540_gpio_irq_cluster),
+ .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+ .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+ .irq_gpio_factor = 1,
+};
+
+void
+abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
+{
+ *soc = &ab9540_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
new file mode 100644
index 000000000000..caecdd373061
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -0,0 +1,1012 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2013
+ *
+ * Author: Patrice Chotard <patrice.chotard@st.com>
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-abx500.h"
+
+/*
+ * The AB9540 and AB8540 GPIO support are extended versions
+ * of the AB8500 GPIO support.
+ * The AB9540 supports an additional (7th) register so that
+ * more GPIO may be configured and used.
+ * The AB8540 supports 4 new gpios (GPIOx_VBAT) that have
+ * internal pull-up and pull-down capabilities.
+ */
+
+/*
+ * GPIO registers offset
+ * Bank: 0x10
+ */
+#define AB8500_GPIO_SEL1_REG 0x00
+#define AB8500_GPIO_SEL2_REG 0x01
+#define AB8500_GPIO_SEL3_REG 0x02
+#define AB8500_GPIO_SEL4_REG 0x03
+#define AB8500_GPIO_SEL5_REG 0x04
+#define AB8500_GPIO_SEL6_REG 0x05
+#define AB9540_GPIO_SEL7_REG 0x06
+
+#define AB8500_GPIO_DIR1_REG 0x10
+#define AB8500_GPIO_DIR2_REG 0x11
+#define AB8500_GPIO_DIR3_REG 0x12
+#define AB8500_GPIO_DIR4_REG 0x13
+#define AB8500_GPIO_DIR5_REG 0x14
+#define AB8500_GPIO_DIR6_REG 0x15
+#define AB9540_GPIO_DIR7_REG 0x16
+
+#define AB8500_GPIO_OUT1_REG 0x20
+#define AB8500_GPIO_OUT2_REG 0x21
+#define AB8500_GPIO_OUT3_REG 0x22
+#define AB8500_GPIO_OUT4_REG 0x23
+#define AB8500_GPIO_OUT5_REG 0x24
+#define AB8500_GPIO_OUT6_REG 0x25
+#define AB9540_GPIO_OUT7_REG 0x26
+
+#define AB8500_GPIO_PUD1_REG 0x30
+#define AB8500_GPIO_PUD2_REG 0x31
+#define AB8500_GPIO_PUD3_REG 0x32
+#define AB8500_GPIO_PUD4_REG 0x33
+#define AB8500_GPIO_PUD5_REG 0x34
+#define AB8500_GPIO_PUD6_REG 0x35
+#define AB9540_GPIO_PUD7_REG 0x36
+
+#define AB8500_GPIO_IN1_REG 0x40
+#define AB8500_GPIO_IN2_REG 0x41
+#define AB8500_GPIO_IN3_REG 0x42
+#define AB8500_GPIO_IN4_REG 0x43
+#define AB8500_GPIO_IN5_REG 0x44
+#define AB8500_GPIO_IN6_REG 0x45
+#define AB9540_GPIO_IN7_REG 0x46
+#define AB8540_GPIO_VINSEL_REG 0x47
+#define AB8540_GPIO_PULL_UPDOWN_REG 0x48
+#define AB8500_GPIO_ALTFUN_REG 0x50
+#define AB8540_GPIO_PULL_UPDOWN_MASK 0x03
+#define AB8540_GPIO_VINSEL_MASK 0x03
+#define AB8540_GPIOX_VBAT_START 51
+#define AB8540_GPIOX_VBAT_END 54
+
+struct abx500_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ struct abx500_pinctrl_soc_data *soc;
+ struct gpio_chip chip;
+ struct ab8500 *parent;
+ struct mutex lock;
+ struct abx500_gpio_irq_cluster *irq_cluster;
+ int irq_cluster_size;
+};
+
+/**
+ * to_abx500_pinctrl() - get the pointer to abx500_pinctrl
+ * @chip: Member of the structure abx500_pinctrl
+ */
+static inline struct abx500_pinctrl *to_abx500_pinctrl(struct gpio_chip *chip)
+{
+ return container_of(chip, struct abx500_pinctrl, chip);
+}
+
+static int abx500_gpio_get_bit(struct gpio_chip *chip, u8 reg,
+ unsigned offset, bool *bit)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ u8 pos = offset % 8;
+ u8 val;
+ int ret;
+
+ reg += offset / 8;
+ ret = abx500_get_register_interruptible(pct->dev,
+ AB8500_MISC, reg, &val);
+
+ *bit = !!(val & BIT(pos));
+
+ if (ret < 0)
+ dev_err(pct->dev,
+ "%s read reg =%x, offset=%x failed\n",
+ __func__, reg, offset);
+
+ return ret;
+}
+
+static int abx500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
+ unsigned offset, int val)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ u8 pos = offset % 8;
+ int ret;
+
+ reg += offset / 8;
+ ret = abx500_mask_and_set_register_interruptible(pct->dev,
+ AB8500_MISC, reg, BIT(pos), val << pos);
+ if (ret < 0)
+ dev_err(pct->dev, "%s write failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * abx500_gpio_get() - Get the particular GPIO value
+ * @chip: Gpio device
+ * @offset: GPIO number to read
+ */
+static int abx500_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ bool bit;
+ int ret;
+
+ ret = abx500_gpio_get_bit(chip, AB8500_GPIO_IN1_REG,
+ offset, &bit);
+ if (ret < 0) {
+ dev_err(pct->dev, "%s failed\n", __func__);
+ return ret;
+ }
+
+ return bit;
+}
+
+static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ int ret;
+
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
+ if (ret < 0)
+ dev_err(pct->dev, "%s write failed\n", __func__);
+}
+
+static int abx500_config_pull_updown(struct abx500_pinctrl *pct,
+ int offset, enum abx500_gpio_pull_updown val)
+{
+ u8 pos;
+ int ret;
+ struct pullud *pullud;
+
+ if (!pct->soc->pullud) {
+ dev_err(pct->dev, "%s AB chip doesn't support pull up/down feature",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
+ pullud = pct->soc->pullud;
+
+ if ((offset < pullud->first_pin)
+ || (offset > pullud->last_pin)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pos = offset << 1;
+
+ ret = abx500_mask_and_set_register_interruptible(pct->dev,
+ AB8500_MISC, AB8540_GPIO_PULL_UPDOWN_REG,
+ AB8540_GPIO_PULL_UPDOWN_MASK << pos, val << pos);
+
+out:
+ if (ret < 0)
+ dev_err(pct->dev, "%s failed (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static int abx500_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int val)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ struct pullud *pullud = pct->soc->pullud;
+ unsigned gpio;
+ int ret;
+
+ /* set direction as output */
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
+ if (ret < 0)
+ return ret;
+
+ /* disable pull down */
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
+ if (ret < 0)
+ return ret;
+
+ /* if supported, disable both pull down and pull up */
+ gpio = offset + 1;
+ if (pullud && gpio >= pullud->first_pin && gpio <= pullud->last_pin) {
+ ret = abx500_config_pull_updown(pct,
+ gpio,
+ ABX500_GPIO_PULL_NONE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* set the output as 1 or 0 */
+ return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
+}
+
+static int abx500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ /* set the register as input */
+ return abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
+}
+
+static int abx500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ /* The AB8500 GPIO numbers are off by one */
+ int gpio = offset + 1;
+ int hwirq;
+ int i;
+
+ for (i = 0; i < pct->irq_cluster_size; i++) {
+ struct abx500_gpio_irq_cluster *cluster =
+ &pct->irq_cluster[i];
+
+ if (gpio >= cluster->start && gpio <= cluster->end) {
+ /*
+ * The ABx500 GPIO's associated IRQs are clustered together
+ * throughout the interrupt numbers at irregular intervals.
+ * To solve this quandry, we have placed the read-in values
+ * into the cluster information table.
+ */
+ hwirq = gpio - cluster->start + cluster->to_irq;
+ return irq_create_mapping(pct->parent->domain, hwirq);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
+ unsigned gpio, int alt_setting)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ struct alternate_functions af = pct->soc->alternate_functions[gpio];
+ int ret;
+ int val;
+ unsigned offset;
+
+ const char *modes[] = {
+ [ABX500_DEFAULT] = "default",
+ [ABX500_ALT_A] = "altA",
+ [ABX500_ALT_B] = "altB",
+ [ABX500_ALT_C] = "altC",
+ };
+
+ /* sanity check */
+ if (((alt_setting == ABX500_ALT_A) && (af.gpiosel_bit == UNUSED)) ||
+ ((alt_setting == ABX500_ALT_B) && (af.alt_bit1 == UNUSED)) ||
+ ((alt_setting == ABX500_ALT_C) && (af.alt_bit2 == UNUSED))) {
+ dev_dbg(pct->dev, "pin %d doesn't support %s mode\n", gpio,
+ modes[alt_setting]);
+ return -EINVAL;
+ }
+
+ /* on ABx5xx, there is no GPIO0, so adjust the offset */
+ offset = gpio - 1;
+
+ switch (alt_setting) {
+ case ABX500_DEFAULT:
+ /*
+ * for ABx5xx family, default mode is always selected by
+ * writing 0 to GPIOSELx register, except for pins which
+ * support at least ALT_B mode, default mode is selected
+ * by writing 1 to GPIOSELx register
+ */
+ val = 0;
+ if (af.alt_bit1 != UNUSED)
+ val++;
+
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+ offset, val);
+ break;
+
+ case ABX500_ALT_A:
+ /*
+ * for ABx5xx family, alt_a mode is always selected by
+ * writing 1 to GPIOSELx register, except for pins which
+ * support at least ALT_B mode, alt_a mode is selected
+ * by writing 0 to GPIOSELx register and 0 in ALTFUNC
+ * register
+ */
+ if (af.alt_bit1 != UNUSED) {
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+ offset, 0);
+ ret = abx500_gpio_set_bits(chip,
+ AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit1,
+ !!(af.alta_val && BIT(0)));
+ if (af.alt_bit2 != UNUSED)
+ ret = abx500_gpio_set_bits(chip,
+ AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit2,
+ !!(af.alta_val && BIT(1)));
+ } else
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+ offset, 1);
+ break;
+
+ case ABX500_ALT_B:
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+ offset, 0);
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit1, !!(af.altb_val && BIT(0)));
+ if (af.alt_bit2 != UNUSED)
+ ret = abx500_gpio_set_bits(chip,
+ AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit2,
+ !!(af.altb_val && BIT(1)));
+ break;
+
+ case ABX500_ALT_C:
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+ offset, 0);
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit2, !!(af.altc_val && BIT(0)));
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit2, !!(af.altc_val && BIT(1)));
+ break;
+
+ default:
+ dev_dbg(pct->dev, "unknow alt_setting %d\n", alt_setting);
+
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
+ unsigned gpio)
+{
+ u8 mode;
+ bool bit_mode;
+ bool alt_bit1;
+ bool alt_bit2;
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ struct alternate_functions af = pct->soc->alternate_functions[gpio];
+ /* on ABx5xx, there is no GPIO0, so adjust the offset */
+ unsigned offset = gpio - 1;
+
+ /*
+ * if gpiosel_bit is set to unused,
+ * it means no GPIO or special case
+ */
+ if (af.gpiosel_bit == UNUSED)
+ return ABX500_DEFAULT;
+
+ /* read GpioSelx register */
+ abx500_gpio_get_bit(chip, AB8500_GPIO_SEL1_REG + (offset / 8),
+ af.gpiosel_bit, &bit_mode);
+ mode = bit_mode;
+
+ /* sanity check */
+ if ((af.alt_bit1 < UNUSED) || (af.alt_bit1 > 7) ||
+ (af.alt_bit2 < UNUSED) || (af.alt_bit2 > 7)) {
+ dev_err(pct->dev,
+ "alt_bitX value not in correct range (-1 to 7)\n");
+ return -EINVAL;
+ }
+
+ /* if alt_bit2 is used, alt_bit1 must be used too */
+ if ((af.alt_bit2 != UNUSED) && (af.alt_bit1 == UNUSED)) {
+ dev_err(pct->dev,
+ "if alt_bit2 is used, alt_bit1 can't be unused\n");
+ return -EINVAL;
+ }
+
+ /* check if pin use AlternateFunction register */
+ if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
+ return mode;
+ /*
+ * if pin GPIOSEL bit is set and pin supports alternate function,
+ * it means DEFAULT mode
+ */
+ if (mode)
+ return ABX500_DEFAULT;
+
+ /*
+ * pin use the AlternatFunction register
+ * read alt_bit1 value
+ */
+ abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG,
+ af.alt_bit1, &alt_bit1);
+
+ if (af.alt_bit2 != UNUSED)
+ /* read alt_bit2 value */
+ abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG, af.alt_bit2,
+ &alt_bit2);
+ else
+ alt_bit2 = 0;
+
+ mode = (alt_bit2 << 1) + alt_bit1;
+ if (mode == af.alta_val)
+ return ABX500_ALT_A;
+ else if (mode == af.altb_val)
+ return ABX500_ALT_B;
+ else
+ return ABX500_ALT_C;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+static void abx500_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset, unsigned gpio)
+{
+ const char *label = gpiochip_is_requested(chip, offset - 1);
+ u8 gpio_offset = offset - 1;
+ int mode = -1;
+ bool is_out;
+ bool pull;
+
+ const char *modes[] = {
+ [ABX500_DEFAULT] = "default",
+ [ABX500_ALT_A] = "altA",
+ [ABX500_ALT_B] = "altB",
+ [ABX500_ALT_C] = "altC",
+ };
+
+ abx500_gpio_get_bit(chip, AB8500_GPIO_DIR1_REG, gpio_offset, &is_out);
+ abx500_gpio_get_bit(chip, AB8500_GPIO_PUD1_REG, gpio_offset, &pull);
+
+ if (pctldev)
+ mode = abx500_get_mode(pctldev, chip, offset);
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) %-3s %-9s %s",
+ gpio, label ?: "(none)",
+ is_out ? "out" : "in ",
+ is_out ?
+ (chip->get
+ ? (chip->get(chip, offset) ? "hi" : "lo")
+ : "? ")
+ : (pull ? "pull up" : "pull down"),
+ (mode < 0) ? "unknown" : modes[mode]);
+}
+
+static void abx500_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned i;
+ unsigned gpio = chip->base;
+ struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+ struct pinctrl_dev *pctldev = pct->pctldev;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ /* On AB8500, there is no GPIO0, the first is the GPIO 1 */
+ abx500_gpio_dbg_show_one(s, pctldev, chip, i + 1, gpio);
+ seq_printf(s, "\n");
+ }
+}
+
+#else
+static inline void abx500_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset, unsigned gpio)
+{
+}
+#define abx500_gpio_dbg_show NULL
+#endif
+
+int abx500_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ return pinctrl_request_gpio(gpio);
+}
+
+void abx500_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinctrl_free_gpio(gpio);
+}
+
+static struct gpio_chip abx500gpio_chip = {
+ .label = "abx500-gpio",
+ .owner = THIS_MODULE,
+ .request = abx500_gpio_request,
+ .free = abx500_gpio_free,
+ .direction_input = abx500_gpio_direction_input,
+ .get = abx500_gpio_get,
+ .direction_output = abx500_gpio_direction_output,
+ .set = abx500_gpio_set,
+ .to_irq = abx500_gpio_to_irq,
+ .dbg_show = abx500_gpio_dbg_show,
+};
+
+static int abx500_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ return pct->soc->nfunctions;
+}
+
+static const char *abx500_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ return pct->soc->functions[function].name;
+}
+
+static int abx500_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pct->soc->functions[function].groups;
+ *num_groups = pct->soc->functions[function].ngroups;
+
+ return 0;
+}
+
+static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &pct->chip;
+ const struct abx500_pingroup *g;
+ int i;
+ int ret = 0;
+
+ g = &pct->soc->groups[group];
+ if (g->altsetting < 0)
+ return -EINVAL;
+
+ dev_dbg(pct->dev, "enable group %s, %u pins\n", g->name, g->npins);
+
+ for (i = 0; i < g->npins; i++) {
+ dev_dbg(pct->dev, "setting pin %d to altsetting %d\n",
+ g->pins[i], g->altsetting);
+
+ ret = abx500_set_mode(pctldev, chip, g->pins[i], g->altsetting);
+ }
+
+ return ret;
+}
+
+static void abx500_pmx_disable(struct pinctrl_dev *pctldev,
+ unsigned function, unsigned group)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ const struct abx500_pingroup *g;
+
+ g = &pct->soc->groups[group];
+ if (g->altsetting < 0)
+ return;
+
+ /* FIXME: poke out the mux, set the pin to some default state? */
+ dev_dbg(pct->dev, "disable group %s, %u pins\n", g->name, g->npins);
+}
+
+int abx500_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ const struct abx500_pinrange *p;
+ int ret;
+ int i;
+
+ /*
+ * Different ranges have different ways to enable GPIO function on a
+ * pin, so refer back to our local range type, where we handily define
+ * what altfunc enables GPIO for a certain pin.
+ */
+ for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
+ p = &pct->soc->gpio_ranges[i];
+ if ((offset >= p->offset) &&
+ (offset < (p->offset + p->npins)))
+ break;
+ }
+
+ if (i == pct->soc->gpio_num_ranges) {
+ dev_err(pct->dev, "%s failed to locate range\n", __func__);
+ return -ENODEV;
+ }
+
+ dev_dbg(pct->dev, "enable GPIO by altfunc %d at gpio %d\n",
+ p->altfunc, offset);
+
+ ret = abx500_set_mode(pct->pctldev, &pct->chip,
+ offset, p->altfunc);
+ if (ret < 0) {
+ dev_err(pct->dev, "%s setting altfunc failed\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void abx500_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+}
+
+static struct pinmux_ops abx500_pinmux_ops = {
+ .get_functions_count = abx500_pmx_get_funcs_cnt,
+ .get_function_name = abx500_pmx_get_func_name,
+ .get_function_groups = abx500_pmx_get_func_groups,
+ .enable = abx500_pmx_enable,
+ .disable = abx500_pmx_disable,
+ .gpio_request_enable = abx500_gpio_request_enable,
+ .gpio_disable_free = abx500_gpio_disable_free,
+};
+
+static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ return pct->soc->ngroups;
+}
+
+static const char *abx500_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ return pct->soc->groups[selector].name;
+}
+
+static int abx500_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pct->soc->groups[selector].pins;
+ *num_pins = pct->soc->groups[selector].npins;
+
+ return 0;
+}
+
+static void abx500_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned offset)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &pct->chip;
+
+ abx500_gpio_dbg_show_one(s, pctldev, chip, offset,
+ chip->base + offset - 1);
+}
+
+static struct pinctrl_ops abx500_pinctrl_ops = {
+ .get_groups_count = abx500_get_groups_cnt,
+ .get_group_name = abx500_get_group_name,
+ .get_group_pins = abx500_get_group_pins,
+ .pin_dbg_show = abx500_pin_dbg_show,
+};
+
+int abx500_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config)
+{
+ return -ENOSYS;
+}
+
+int abx500_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long config)
+{
+ struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+ struct pullud *pullud = pct->soc->pullud;
+ struct gpio_chip *chip = &pct->chip;
+ unsigned offset;
+ int ret;
+ enum pin_config_param param = pinconf_to_config_param(config);
+ enum pin_config_param argument = pinconf_to_config_argument(config);
+
+ dev_dbg(chip->dev, "pin %d [%#lx]: %s %s\n",
+ pin, config, (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
+ (param == PIN_CONFIG_OUTPUT) ? (argument ? "high" : "low") :
+ (argument ? "pull up" : "pull down"));
+
+ /* on ABx500, there is no GPIO0, so adjust the offset */
+ offset = pin - 1;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ /*
+ * if argument = 1 set the pull down
+ * else clear the pull down
+ */
+ ret = abx500_gpio_direction_input(chip, offset);
+ /*
+ * Some chips only support pull down, while some actually
+ * support both pull up and pull down. Such chips have
+ * a "pullud" range specified for the pins that support
+ * both features. If the pin is not within that range, we
+ * fall back to the old bit set that only support pull down.
+ */
+ if (pullud &&
+ pin >= pullud->first_pin &&
+ pin <= pullud->last_pin)
+ ret = abx500_config_pull_updown(pct,
+ pin,
+ argument ? ABX500_GPIO_PULL_DOWN : ABX500_GPIO_PULL_NONE);
+ else
+ /* Chip only supports pull down */
+ ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG,
+ offset, argument ? 0 : 1);
+ break;
+
+ case PIN_CONFIG_OUTPUT:
+ ret = abx500_gpio_direction_output(chip, offset, argument);
+
+ break;
+
+ default:
+ dev_err(chip->dev, "illegal configuration requested\n");
+
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct pinconf_ops abx500_pinconf_ops = {
+ .pin_config_get = abx500_pin_config_get,
+ .pin_config_set = abx500_pin_config_set,
+};
+
+static struct pinctrl_desc abx500_pinctrl_desc = {
+ .name = "pinctrl-abx500",
+ .pctlops = &abx500_pinctrl_ops,
+ .pmxops = &abx500_pinmux_ops,
+ .confops = &abx500_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int abx500_get_gpio_num(struct abx500_pinctrl_soc_data *soc)
+{
+ unsigned int lowest = 0;
+ unsigned int highest = 0;
+ unsigned int npins = 0;
+ int i;
+
+ /*
+ * Compute number of GPIOs from the last SoC gpio range descriptors
+ * These ranges may include "holes" but the GPIO number space shall
+ * still be homogeneous, so we need to detect and account for any
+ * such holes so that these are included in the number of GPIO pins.
+ */
+ for (i = 0; i < soc->gpio_num_ranges; i++) {
+ unsigned gstart;
+ unsigned gend;
+ const struct abx500_pinrange *p;
+
+ p = &soc->gpio_ranges[i];
+ gstart = p->offset;
+ gend = p->offset + p->npins - 1;
+
+ if (i == 0) {
+ /* First iteration, set start values */
+ lowest = gstart;
+ highest = gend;
+ } else {
+ if (gstart < lowest)
+ lowest = gstart;
+ if (gend > highest)
+ highest = gend;
+ }
+ }
+ /* this gives the absolute number of pins */
+ npins = highest - lowest + 1;
+ return npins;
+}
+
+static const struct of_device_id abx500_gpio_match[] = {
+ { .compatible = "stericsson,ab8500-gpio", .data = (void *)PINCTRL_AB8500, },
+ { .compatible = "stericsson,ab8505-gpio", .data = (void *)PINCTRL_AB8505, },
+ { .compatible = "stericsson,ab8540-gpio", .data = (void *)PINCTRL_AB8540, },
+ { .compatible = "stericsson,ab9540-gpio", .data = (void *)PINCTRL_AB9540, },
+};
+
+static int abx500_gpio_probe(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *abx500_pdata =
+ dev_get_platdata(pdev->dev.parent);
+ struct abx500_gpio_platform_data *pdata = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_pinctrl *pct;
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ unsigned int id = -1;
+ int ret, err;
+ int i;
+
+ if (abx500_pdata)
+ pdata = abx500_pdata->gpio;
+ if (!pdata) {
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_device(abx500_gpio_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ id = (unsigned long)match->data;
+ } else {
+ dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+ return -ENODEV;
+ }
+ }
+
+ if (platid)
+ id = platid->driver_data;
+
+ pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
+ GFP_KERNEL);
+ if (pct == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for pct\n");
+ return -ENOMEM;
+ }
+
+ pct->dev = &pdev->dev;
+ pct->parent = dev_get_drvdata(pdev->dev.parent);
+ pct->chip = abx500gpio_chip;
+ pct->chip.dev = &pdev->dev;
+ pct->chip.base = pdata->gpio_base;
+ pct->chip.base = (np) ? -1 : pdata->gpio_base;
+
+ /* initialize the lock */
+ mutex_init(&pct->lock);
+
+ /* Poke in other ASIC variants here */
+ switch (id) {
+ case PINCTRL_AB8500:
+ abx500_pinctrl_ab8500_init(&pct->soc);
+ break;
+ case PINCTRL_AB8540:
+ abx500_pinctrl_ab8540_init(&pct->soc);
+ break;
+ case PINCTRL_AB9540:
+ abx500_pinctrl_ab9540_init(&pct->soc);
+ break;
+ case PINCTRL_AB8505:
+ abx500_pinctrl_ab8505_init(&pct->soc);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
+ (int) platid->driver_data);
+ mutex_destroy(&pct->lock);
+ return -EINVAL;
+ }
+
+ if (!pct->soc) {
+ dev_err(&pdev->dev, "Invalid SOC data\n");
+ mutex_destroy(&pct->lock);
+ return -EINVAL;
+ }
+
+ pct->chip.ngpio = abx500_get_gpio_num(pct->soc);
+ pct->irq_cluster = pct->soc->gpio_irq_cluster;
+ pct->irq_cluster_size = pct->soc->ngpio_irq_cluster;
+
+ ret = gpiochip_add(&pct->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ mutex_destroy(&pct->lock);
+ return ret;
+ }
+ dev_info(&pdev->dev, "added gpiochip\n");
+
+ abx500_pinctrl_desc.pins = pct->soc->pins;
+ abx500_pinctrl_desc.npins = pct->soc->npins;
+ pct->pctldev = pinctrl_register(&abx500_pinctrl_desc, &pdev->dev, pct);
+ if (!pct->pctldev) {
+ dev_err(&pdev->dev,
+ "could not register abx500 pinctrl driver\n");
+ ret = -EINVAL;
+ goto out_rem_chip;
+ }
+ dev_info(&pdev->dev, "registered pin controller\n");
+
+ /* We will handle a range of GPIO pins */
+ for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
+ const struct abx500_pinrange *p = &pct->soc->gpio_ranges[i];
+
+ ret = gpiochip_add_pin_range(&pct->chip,
+ dev_name(&pdev->dev),
+ p->offset - 1, p->offset, p->npins);
+ if (ret < 0)
+ goto out_rem_chip;
+ }
+
+ platform_set_drvdata(pdev, pct);
+ dev_info(&pdev->dev, "initialized abx500 pinctrl driver\n");
+
+ return 0;
+
+out_rem_chip:
+ err = gpiochip_remove(&pct->chip);
+ if (err)
+ dev_info(&pdev->dev, "failed to remove gpiochip\n");
+
+ mutex_destroy(&pct->lock);
+ return ret;
+}
+
+/**
+ * abx500_gpio_remove() - remove Ab8500-gpio driver
+ * @pdev: Platform device registered
+ */
+static int abx500_gpio_remove(struct platform_device *pdev)
+{
+ struct abx500_pinctrl *pct = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&pct->chip);
+ if (ret < 0) {
+ dev_err(pct->dev, "unable to remove gpiochip: %d\n",
+ ret);
+ return ret;
+ }
+
+ mutex_destroy(&pct->lock);
+
+ return 0;
+}
+
+static const struct platform_device_id abx500_pinctrl_id[] = {
+ { "pinctrl-ab8500", PINCTRL_AB8500 },
+ { "pinctrl-ab8540", PINCTRL_AB8540 },
+ { "pinctrl-ab9540", PINCTRL_AB9540 },
+ { "pinctrl-ab8505", PINCTRL_AB8505 },
+ { },
+};
+
+static struct platform_driver abx500_gpio_driver = {
+ .driver = {
+ .name = "abx500-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = abx500_gpio_match,
+ },
+ .probe = abx500_gpio_probe,
+ .remove = abx500_gpio_remove,
+ .id_table = abx500_pinctrl_id,
+};
+
+static int __init abx500_gpio_init(void)
+{
+ return platform_driver_register(&abx500_gpio_driver);
+}
+core_initcall(abx500_gpio_init);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("Driver allows to use AxB5xx unused pins to be used as GPIO");
+MODULE_ALIAS("platform:abx500-gpio");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
new file mode 100644
index 000000000000..eeca8f973999
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -0,0 +1,234 @@
+#ifndef PINCTRL_PINCTRL_ABx5O0_H
+#define PINCTRL_PINCTRL_ABx500_H
+
+/* Package definitions */
+#define PINCTRL_AB8500 0
+#define PINCTRL_AB8540 1
+#define PINCTRL_AB9540 2
+#define PINCTRL_AB8505 3
+
+/* pins alternate function */
+enum abx500_pin_func {
+ ABX500_DEFAULT,
+ ABX500_ALT_A,
+ ABX500_ALT_B,
+ ABX500_ALT_C,
+};
+
+/**
+ * struct abx500_function - ABx500 pinctrl mux function
+ * @name: The name of the function, exported to pinctrl core.
+ * @groups: An array of pin groups that may select this function.
+ * @ngroups: The number of entries in @groups.
+ */
+struct abx500_function {
+ const char *name;
+ const char * const *groups;
+ unsigned ngroups;
+};
+
+/**
+ * struct abx500_pingroup - describes a ABx500 pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ * from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ * @altsetting: the altsetting to apply to all pins in this group to
+ * configure them to be used by a function
+ */
+struct abx500_pingroup {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned npins;
+ int altsetting;
+};
+
+#define ALTERNATE_FUNCTIONS(pin, sel_bit, alt1, alt2, alta, altb, altc) \
+{ \
+ .pin_number = pin, \
+ .gpiosel_bit = sel_bit, \
+ .alt_bit1 = alt1, \
+ .alt_bit2 = alt2, \
+ .alta_val = alta, \
+ .altb_val = altb, \
+ .altc_val = altc, \
+}
+
+#define UNUSED -1
+/**
+ * struct alternate_functions
+ * @pin_number: The pin number
+ * @gpiosel_bit: Control bit in GPIOSEL register,
+ * @alt_bit1: First AlternateFunction bit used to select the
+ * alternate function
+ * @alt_bit2: Second AlternateFunction bit used to select the
+ * alternate function
+ *
+ * these 3 following fields are necessary due to none
+ * coherency on how to select the altA, altB and altC
+ * function between the ABx500 SOC family when using
+ * alternatfunc register.
+ * @alta_val: value to write in alternatfunc to select altA function
+ * @altb_val: value to write in alternatfunc to select altB function
+ * @altc_val: value to write in alternatfunc to select altC function
+ */
+struct alternate_functions {
+ unsigned pin_number;
+ s8 gpiosel_bit;
+ s8 alt_bit1;
+ s8 alt_bit2;
+ u8 alta_val;
+ u8 altb_val;
+ u8 altc_val;
+};
+
+/**
+ * struct pullud - specific pull up/down feature
+ * @first_pin: The pin number of the first pins which support
+ * specific pull up/down
+ * @last_pin: The pin number of the last pins
+ */
+struct pullud {
+ unsigned first_pin;
+ unsigned last_pin;
+};
+
+#define GPIO_IRQ_CLUSTER(a, b, c) \
+{ \
+ .start = a, \
+ .end = b, \
+ .to_irq = c, \
+}
+
+/**
+ * struct abx500_gpio_irq_cluster - indicates GPIOs which are interrupt
+ * capable
+ * @start: The pin number of the first pin interrupt capable
+ * @end: The pin number of the last pin interrupt capable
+ * @to_irq: The ABx500 GPIO's associated IRQs are clustered
+ * together throughout the interrupt numbers at irregular
+ * intervals. To solve this quandary, we will place the
+ * read-in values into the cluster information table
+ */
+
+struct abx500_gpio_irq_cluster {
+ int start;
+ int end;
+ int to_irq;
+};
+
+/**
+ * struct abx500_pinrange - map pin numbers to GPIO offsets
+ * @offset: offset into the GPIO local numberspace, incidentally
+ * identical to the offset into the local pin numberspace
+ * @npins: number of pins to map from both offsets
+ * @altfunc: altfunc setting to be used to enable GPIO on a pin in
+ * this range (may vary)
+ */
+struct abx500_pinrange {
+ unsigned int offset;
+ unsigned int npins;
+ int altfunc;
+};
+
+#define ABX500_PINRANGE(a, b, c) { .offset = a, .npins = b, .altfunc = c }
+
+/**
+ * struct abx500_pinctrl_soc_data - ABx500 pin controller per-SoC configuration
+ * @gpio_ranges: An array of GPIO ranges for this SoC
+ * @gpio_num_ranges: The number of GPIO ranges for this SoC
+ * @pins: An array describing all pins the pin controller affects.
+ * All pins which are also GPIOs must be listed first within the
+ * array, and be numbered identically to the GPIO controller's
+ * numbering.
+ * @npins: The number of entries in @pins.
+ * @functions: The functions supported on this SoC.
+ * @nfunction: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @alternate_functions: array describing pins which supports alternate and
+ * how to set it.
+ * @pullud: array describing pins which supports pull up/down
+ * specific registers.
+ * @gpio_irq_cluster: An array of GPIO interrupt capable for this SoC
+ * @ngpio_irq_cluster: The number of GPIO inetrrupt capable for this SoC
+ * @irq_gpio_rising_offset: Interrupt offset used as base to compute specific
+ * setting strategy of the rising interrupt line
+ * @irq_gpio_falling_offset: Interrupt offset used as base to compute specific
+ * setting strategy of the falling interrupt line
+ * @irq_gpio_factor: Factor used to compute specific setting strategy of
+ * the interrupt line
+ */
+
+struct abx500_pinctrl_soc_data {
+ const struct abx500_pinrange *gpio_ranges;
+ unsigned gpio_num_ranges;
+ const struct pinctrl_pin_desc *pins;
+ unsigned npins;
+ const struct abx500_function *functions;
+ unsigned nfunctions;
+ const struct abx500_pingroup *groups;
+ unsigned ngroups;
+ struct alternate_functions *alternate_functions;
+ struct pullud *pullud;
+ struct abx500_gpio_irq_cluster *gpio_irq_cluster;
+ unsigned ngpio_irq_cluster;
+ int irq_gpio_rising_offset;
+ int irq_gpio_falling_offset;
+ int irq_gpio_factor;
+};
+
+#ifdef CONFIG_PINCTRL_AB8500
+
+void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB8540
+
+void abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB9540
+
+void abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB8505
+
+void abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#endif /* PINCTRL_PINCTRL_ABx500_H */
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index c5e757157183..75933a6aa828 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -265,7 +265,7 @@ static int at91_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
if (!parent) {
- kfree(new_map);
+ devm_kfree(pctldev->dev, new_map);
return -EINVAL;
}
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
@@ -792,8 +792,8 @@ static struct pinctrl_desc at91_pinctrl_desc = {
static const char *gpio_compat = "atmel,at91rm9200-gpio";
-static void __devinit at91_pinctrl_child_count(struct at91_pinctrl *info,
- struct device_node *np)
+static void at91_pinctrl_child_count(struct at91_pinctrl *info,
+ struct device_node *np)
{
struct device_node *child;
@@ -807,8 +807,8 @@ static void __devinit at91_pinctrl_child_count(struct at91_pinctrl *info,
}
}
-static int __devinit at91_pinctrl_mux_mask(struct at91_pinctrl *info,
- struct device_node *np)
+static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
+ struct device_node *np)
{
int ret = 0;
int size;
@@ -840,10 +840,9 @@ static int __devinit at91_pinctrl_mux_mask(struct at91_pinctrl *info,
return ret;
}
-static int __devinit at91_pinctrl_parse_groups(struct device_node *np,
- struct at91_pin_group *grp,
- struct at91_pinctrl *info,
- u32 index)
+static int at91_pinctrl_parse_groups(struct device_node *np,
+ struct at91_pin_group *grp,
+ struct at91_pinctrl *info, u32 index)
{
struct at91_pmx_pin *pin;
int size;
@@ -889,8 +888,8 @@ static int __devinit at91_pinctrl_parse_groups(struct device_node *np,
return 0;
}
-static int __devinit at91_pinctrl_parse_functions(struct device_node *np,
- struct at91_pinctrl *info, u32 index)
+static int at91_pinctrl_parse_functions(struct device_node *np,
+ struct at91_pinctrl *info, u32 index)
{
struct device_node *child;
struct at91_pmx_func *func;
@@ -926,14 +925,14 @@ static int __devinit at91_pinctrl_parse_functions(struct device_node *np,
return 0;
}
-static struct of_device_id at91_pinctrl_of_match[] __devinitdata = {
+static struct of_device_id at91_pinctrl_of_match[] = {
{ .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops },
{ .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops },
{ /* sentinel */ }
};
-static int __devinit at91_pinctrl_probe_dt(struct platform_device *pdev,
- struct at91_pinctrl *info)
+static int at91_pinctrl_probe_dt(struct platform_device *pdev,
+ struct at91_pinctrl *info)
{
int ret = 0;
int i, j;
@@ -999,7 +998,7 @@ static int __devinit at91_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-static int __devinit at91_pinctrl_probe(struct platform_device *pdev)
+static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
@@ -1063,7 +1062,7 @@ err:
return ret;
}
-static int __devexit at91_pinctrl_remove(struct platform_device *pdev)
+static int at91_pinctrl_remove(struct platform_device *pdev)
{
struct at91_pinctrl *info = platform_get_drvdata(pdev);
@@ -1443,7 +1442,7 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void __devinit at91_gpio_probe_fixup(void)
+static void at91_gpio_probe_fixup(void)
{
unsigned i;
struct at91_gpio_chip *at91_gpio, *last = NULL;
@@ -1461,13 +1460,13 @@ static void __devinit at91_gpio_probe_fixup(void)
}
}
-static struct of_device_id at91_gpio_of_match[] __devinitdata = {
+static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
{ /* sentinel */ }
};
-static int __devinit at91_gpio_probe(struct platform_device *pdev)
+static int at91_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct resource *res;
@@ -1504,10 +1503,9 @@ static int __devinit at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- at91_chip->regbase = devm_request_and_ioremap(&pdev->dev, res);
- if (!at91_chip->regbase) {
- dev_err(&pdev->dev, "failed to map registers, ignoring.\n");
- ret = -EBUSY;
+ at91_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_chip->regbase)) {
+ ret = PTR_ERR(at91_chip->regbase);
goto err;
}
@@ -1609,7 +1607,7 @@ static struct platform_driver at91_pinctrl_driver = {
.of_match_table = of_match_ptr(at91_pinctrl_of_match),
},
.probe = at91_pinctrl_probe,
- .remove = __devexit_p(at91_pinctrl_remove),
+ .remove = at91_pinctrl_remove,
};
static int __init at91_pinctrl_init(void)
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index 0b0e9b49a1b5..4eb6d2c4e4df 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -936,7 +936,7 @@ static struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range = {
.npins = BCM2835_NUM_GPIOS,
};
-static int __devinit bcm2835_pinctrl_probe(struct platform_device *pdev)
+static int bcm2835_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -959,9 +959,9 @@ static int __devinit bcm2835_pinctrl_probe(struct platform_device *pdev)
return err;
}
- pc->base = devm_request_and_ioremap(dev, &iomem);
- if (!pc->base)
- return -EADDRNOTAVAIL;
+ pc->base = devm_ioremap_resource(dev, &iomem);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
pc->gpio_chip = bcm2835_gpio_chip;
pc->gpio_chip.dev = dev;
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index fbb37154471c..8b7e7bc2226b 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -715,11 +715,9 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- gpio->base = devm_request_and_ioremap(&pdev->dev, memres);
- if (!gpio->base) {
- dev_err(gpio->dev, "could not get remap memory\n");
- return -ENOMEM;
- }
+ gpio->base = devm_ioremap_resource(&pdev->dev, memres);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
gpio->clk = devm_clk_get(gpio->dev, NULL);
if (IS_ERR(gpio->clk)) {
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index b8635f634e91..1376eb7305db 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -117,7 +117,7 @@ struct exynos5440_pinctrl_priv_data {
};
/* list of all possible config options supported */
-struct pin_config {
+static struct pin_config {
char *prop_cfg;
unsigned int cfg_type;
} pcfgs[] = {
@@ -599,7 +599,7 @@ static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offse
}
/* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
-static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
struct device_node *cfg_np, unsigned int **pin_list,
unsigned int *npins)
{
@@ -630,7 +630,7 @@ static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
* Parse the information about all the available pin groups and pin functions
* from device node of the pin-controller.
*/
-static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
struct exynos5440_pinctrl_priv_data *priv)
{
struct device *dev = &pdev->dev;
@@ -723,7 +723,7 @@ static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
}
/* register the pinctrl interface with the pinctrl subsystem */
-static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
+static int exynos5440_pinctrl_register(struct platform_device *pdev,
struct exynos5440_pinctrl_priv_data *priv)
{
struct device *dev = &pdev->dev;
@@ -798,7 +798,7 @@ static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
}
/* register the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
+static int exynos5440_gpiolib_register(struct platform_device *pdev,
struct exynos5440_pinctrl_priv_data *priv)
{
struct gpio_chip *gc;
@@ -831,7 +831,7 @@ static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
}
/* unregister the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev,
+static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
struct exynos5440_pinctrl_priv_data *priv)
{
int ret = gpiochip_remove(priv->gc);
@@ -842,7 +842,7 @@ static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev,
return 0;
}
-static int __devinit exynos5440_pinctrl_probe(struct platform_device *pdev)
+static int exynos5440_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos5440_pinctrl_priv_data *priv;
@@ -866,11 +866,9 @@ static int __devinit exynos5440_pinctrl_probe(struct platform_device *pdev)
return -ENOENT;
}
- priv->reg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->reg_base) {
- dev_err(dev, "ioremap failed\n");
- return -ENODEV;
- }
+ priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
ret = exynos5440_gpiolib_register(pdev, priv);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 8ed20e84cb02..af97a1f90007 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -170,7 +170,7 @@ static const unsigned pins_ntr[] = {GPIO4};
static const unsigned pins_ntr8k[] = {GPIO5};
static const unsigned pins_hrst[] = {GPIO6};
static const unsigned pins_mdio[] = {GPIO7, GPIO8};
-static const unsigned pins_bled[] = {GPIO7, GPIO10, GPIO11,
+static const unsigned pins_bled[] = {GPIO9, GPIO10, GPIO11,
GPIO12, GPIO13, GPIO14};
static const unsigned pins_asc0[] = {GPIO32, GPIO33};
static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36};
@@ -315,6 +315,37 @@ static int falcon_pinconf_set(struct pinctrl_dev *pctrldev,
static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev,
struct seq_file *s, unsigned offset)
{
+ unsigned long config;
+ struct pin_desc *desc;
+
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ int port = PORT(offset);
+
+ seq_printf(s, " (port %d) mux %d -- ", port,
+ pad_r32(info->membase[port], LTQ_PADC_MUX(PORT_PIN(offset))));
+
+ config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_PULL, 0);
+ if (!falcon_pinconf_get(pctrldev, offset, &config))
+ seq_printf(s, "pull %d ",
+ (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+ config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_DRIVE_CURRENT, 0);
+ if (!falcon_pinconf_get(pctrldev, offset, &config))
+ seq_printf(s, "drive-current %d ",
+ (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+ config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_SLEW_RATE, 0);
+ if (!falcon_pinconf_get(pctrldev, offset, &config))
+ seq_printf(s, "slew-rate %d ",
+ (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+ desc = pin_desc_get(pctrldev, offset);
+ if (desc) {
+ if (desc->gpio_owner)
+ seq_printf(s, " owner: %s", desc->gpio_owner);
+ } else {
+ seq_printf(s, " not registered");
+ }
}
static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev,
@@ -360,6 +391,8 @@ static const struct ltq_cfg_param falcon_cfg_params[] = {
static struct ltq_pinmux_info falcon_info = {
.desc = &falcon_pctrl_desc,
.apply_mux = falcon_mux_apply,
+ .params = falcon_cfg_params,
+ .num_params = ARRAY_SIZE(falcon_cfg_params),
};
@@ -398,6 +431,9 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
u32 avail;
int pins;
+ if (!of_device_is_available(np))
+ continue;
+
if (!ppdev) {
dev_err(&pdev->dev, "failed to find pad pdev\n");
continue;
@@ -411,14 +447,11 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
dev_err(&ppdev->dev, "failed to get clock\n");
return PTR_ERR(falcon_info.clk[*bank]);
}
- falcon_info.membase[*bank] =
- devm_request_and_ioremap(&pdev->dev, &res);
- if (!falcon_info.membase[*bank]) {
- dev_err(&pdev->dev,
- "Failed to remap memory for bank %d\n",
- *bank);
- return -ENOMEM;
- }
+ falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
+ &res);
+ if (IS_ERR(falcon_info.membase[*bank]))
+ return PTR_ERR(falcon_info.membase[*bank]);
+
avail = pad_r32(falcon_info.membase[*bank],
LTQ_PADC_AVAIL);
pins = fls(avail);
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 131d86d7c2a5..4cebb9c6c5c5 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -425,10 +425,10 @@ static int imx_pinctrl_get_pin_id_and_mux(const struct imx_pinctrl_soc_info *inf
return 0;
}
-static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
- struct imx_pin_group *grp,
- struct imx_pinctrl_soc_info *info,
- u32 index)
+static int imx_pinctrl_parse_groups(struct device_node *np,
+ struct imx_pin_group *grp,
+ struct imx_pinctrl_soc_info *info,
+ u32 index)
{
unsigned int pin_func_id;
int ret, size;
@@ -482,8 +482,9 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
return 0;
}
-static int __devinit imx_pinctrl_parse_functions(struct device_node *np,
- struct imx_pinctrl_soc_info *info, u32 index)
+static int imx_pinctrl_parse_functions(struct device_node *np,
+ struct imx_pinctrl_soc_info *info,
+ u32 index)
{
struct device_node *child;
struct imx_pmx_func *func;
@@ -517,7 +518,7 @@ static int __devinit imx_pinctrl_parse_functions(struct device_node *np,
return 0;
}
-static int __devinit imx_pinctrl_probe_dt(struct platform_device *pdev,
+static int imx_pinctrl_probe_dt(struct platform_device *pdev,
struct imx_pinctrl_soc_info *info)
{
struct device_node *np = pdev->dev.of_node;
@@ -560,8 +561,8 @@ static int __devinit imx_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-int __devinit imx_pinctrl_probe(struct platform_device *pdev,
- struct imx_pinctrl_soc_info *info)
+int imx_pinctrl_probe(struct platform_device *pdev,
+ struct imx_pinctrl_soc_info *info)
{
struct imx_pinctrl *ipctl;
struct resource *res;
@@ -583,9 +584,9 @@ int __devinit imx_pinctrl_probe(struct platform_device *pdev,
if (!res)
return -ENOENT;
- ipctl->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!ipctl->base)
- return -EBUSY;
+ ipctl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ipctl->base))
+ return PTR_ERR(ipctl->base);
imx_pinctrl_desc.name = dev_name(&pdev->dev);
imx_pinctrl_desc.pins = info->pins;
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 04364f7822b7..e76d75c9d1ba 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -267,7 +267,7 @@ static struct mxs_pinctrl_soc_data imx23_pinctrl_data = {
.npins = ARRAY_SIZE(imx23_pins),
};
-static int __devinit imx23_pinctrl_probe(struct platform_device *pdev)
+static int imx23_pinctrl_probe(struct platform_device *pdev)
{
return mxs_pinctrl_probe(pdev, &imx23_pinctrl_data);
}
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index e1af2ba89004..79c9c8d296af 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -383,7 +383,7 @@ static struct mxs_pinctrl_soc_data imx28_pinctrl_data = {
.npins = ARRAY_SIZE(imx28_pins),
};
-static int __devinit imx28_pinctrl_probe(struct platform_device *pdev)
+static int imx28_pinctrl_probe(struct platform_device *pdev)
{
return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data);
}
diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/pinctrl-imx35.c
index 1dbf5278acec..6e214110e3d5 100644
--- a/drivers/pinctrl/pinctrl-imx35.c
+++ b/drivers/pinctrl/pinctrl-imx35.c
@@ -1564,7 +1564,7 @@ static struct of_device_id imx35_pinctrl_of_match[] = {
{ /* sentinel */ }
};
-static int __devinit imx35_pinctrl_probe(struct platform_device *pdev)
+static int imx35_pinctrl_probe(struct platform_device *pdev)
{
return imx_pinctrl_probe(pdev, &imx35_pinctrl_info);
}
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index 131216558a7b..9a92aaad150f 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -1291,7 +1291,7 @@ static struct of_device_id imx51_pinctrl_of_match[] = {
{ /* sentinel */ }
};
-static int __devinit imx51_pinctrl_probe(struct platform_device *pdev)
+static int imx51_pinctrl_probe(struct platform_device *pdev)
{
return imx_pinctrl_probe(pdev, &imx51_pinctrl_info);
}
diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/pinctrl-imx53.c
index ec4048691775..2c9c8e2334da 100644
--- a/drivers/pinctrl/pinctrl-imx53.c
+++ b/drivers/pinctrl/pinctrl-imx53.c
@@ -1371,7 +1371,7 @@ static struct imx_pin_reg imx53_pin_regs[] = {
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 0, 0x7F8, 1), /* MX53_PAD_GPIO_8__ESAI1_TX5_RX0 */
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 1, 0x000, 0), /* MX53_PAD_GPIO_8__GPIO1_8 */
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 2, 0x000, 0), /* MX53_PAD_GPIO_8__EPIT2_EPITO */
- IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 3, 0x760, 3), /* MX53_PAD_GPIO_8__CAN1_RXCAN */
+ IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 3, 0x760, 2), /* MX53_PAD_GPIO_8__CAN1_RXCAN */
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 4, 0x880, 5), /* MX53_PAD_GPIO_8__UART2_RXD_MUX */
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 5, 0x000, 0), /* MX53_PAD_GPIO_8__FIRI_TXD */
IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 6, 0x000, 0), /* MX53_PAD_GPIO_8__SPDIF_SRCLK */
@@ -1618,7 +1618,7 @@ static struct of_device_id imx53_pinctrl_of_match[] = {
{ /* sentinel */ }
};
-static int __devinit imx53_pinctrl_probe(struct platform_device *pdev)
+static int imx53_pinctrl_probe(struct platform_device *pdev)
{
return imx_pinctrl_probe(pdev, &imx53_pinctrl_info);
}
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 844ab13c93a3..663346bb765e 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -2302,7 +2302,7 @@ static struct of_device_id imx6q_pinctrl_of_match[] = {
{ /* sentinel */ }
};
-static int __devinit imx6q_pinctrl_probe(struct platform_device *pdev)
+static int imx6q_pinctrl_probe(struct platform_device *pdev)
{
return imx_pinctrl_probe(pdev, &imx6q_pinctrl_info);
}
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index 15f501d89026..a70384611351 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -64,11 +64,13 @@ static void ltq_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %s", dev_name(pctldev->dev));
}
-static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+static void ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
struct pinctrl_map **map)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+ struct property *pins = of_find_property(np, "lantiq,pins", NULL);
+ struct property *groups = of_find_property(np, "lantiq,groups", NULL);
unsigned long configs[3];
unsigned num_configs = 0;
struct property *prop;
@@ -76,8 +78,20 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
const char *function;
int ret, i;
+ if (!pins && !groups) {
+ dev_err(pctldev->dev, "%s defines neither pins nor groups\n",
+ np->name);
+ return;
+ }
+
+ if (pins && groups) {
+ dev_err(pctldev->dev, "%s defines both pins and groups\n",
+ np->name);
+ return;
+ }
+
ret = of_property_read_string(np, "lantiq,function", &function);
- if (!ret) {
+ if (groups && !ret) {
of_property_for_each_string(np, "lantiq,groups", prop, group) {
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->name = function;
@@ -85,11 +99,6 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = function;
(*map)++;
}
- if (of_find_property(np, "lantiq,pins", NULL))
- dev_err(pctldev->dev,
- "%s mixes pins and groups settings\n",
- np->name);
- return 0;
}
for (i = 0; i < info->num_params; i++) {
@@ -103,7 +112,7 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
}
if (!num_configs)
- return -EINVAL;
+ return;
of_property_for_each_string(np, "lantiq,pins", prop, pin) {
(*map)->data.configs.configs = kmemdup(configs,
@@ -115,7 +124,16 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
(*map)->data.configs.num_configs = num_configs;
(*map)++;
}
- return 0;
+ of_property_for_each_string(np, "lantiq,groups", prop, group) {
+ (*map)->data.configs.configs = kmemdup(configs,
+ num_configs * sizeof(unsigned long),
+ GFP_KERNEL);
+ (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)->name = group;
+ (*map)->data.configs.group_or_pin = group;
+ (*map)->data.configs.num_configs = num_configs;
+ (*map)++;
+ }
}
static int ltq_pinctrl_dt_subnode_size(struct device_node *np)
@@ -135,23 +153,19 @@ static int ltq_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct pinctrl_map *tmp;
struct device_node *np;
- int ret;
+ int max_maps = 0;
- *num_maps = 0;
for_each_child_of_node(np_config, np)
- *num_maps += ltq_pinctrl_dt_subnode_size(np);
- *map = kzalloc(*num_maps * sizeof(struct pinctrl_map), GFP_KERNEL);
+ max_maps += ltq_pinctrl_dt_subnode_size(np);
+ *map = kzalloc(max_maps * sizeof(struct pinctrl_map) * 2, GFP_KERNEL);
if (!*map)
return -ENOMEM;
tmp = *map;
- for_each_child_of_node(np_config, np) {
- ret = ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
- if (ret < 0) {
- ltq_pinctrl_dt_free_map(pctldev, *map, *num_maps);
- return ret;
- }
- }
+ for_each_child_of_node(np_config, np)
+ ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
+ *num_maps = ((int)(tmp - *map));
+
return 0;
}
@@ -280,7 +294,7 @@ static int ltq_pmx_gpio_request_enable(struct pinctrl_dev *pctrldev,
unsigned pin)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
- int mfp = match_mfp(info, pin + (range->id * 32));
+ int mfp = match_mfp(info, pin);
int pin_func;
if (mfp < 0) {
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index 4419d32a0ade..6d07f0238532 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -34,6 +34,7 @@ enum ltq_pinconf_param {
LTQ_PINCONF_PARAM_OPEN_DRAIN,
LTQ_PINCONF_PARAM_DRIVE_CURRENT,
LTQ_PINCONF_PARAM_SLEW_RATE,
+ LTQ_PINCONF_PARAM_OUTPUT,
};
struct ltq_cfg_param {
diff --git a/drivers/pinctrl/pinctrl-mmp2.c b/drivers/pinctrl/pinctrl-mmp2.c
index 4fbb3db3f1c1..4afa56a3a51d 100644
--- a/drivers/pinctrl/pinctrl-mmp2.c
+++ b/drivers/pinctrl/pinctrl-mmp2.c
@@ -686,7 +686,7 @@ static struct pxa3xx_pinmux_info mmp2_info = {
.ds_shift = MMP2_DS_SHIFT,
};
-static int __devinit mmp2_pinmux_probe(struct platform_device *pdev)
+static int mmp2_pinmux_probe(struct platform_device *pdev)
{
return pxa3xx_pinctrl_register(pdev, &mmp2_info);
}
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 180f16379ec1..23af9f1f9c35 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -146,7 +146,7 @@ free:
static void mxs_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
- int i;
+ u32 i;
for (i = 0; i < num_maps; i++) {
if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
@@ -203,7 +203,7 @@ static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
void __iomem *reg;
u8 bank, shift;
u16 pin;
- int i;
+ u32 i;
for (i = 0; i < g->npins; i++) {
bank = PINID_TO_BANK(g->pins[i]);
@@ -256,7 +256,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
void __iomem *reg;
u8 ma, vol, pull, bank, shift;
u16 pin;
- int i;
+ u32 i;
ma = CONFIG_TO_MA(config);
vol = CONFIG_TO_VOL(config);
@@ -335,9 +335,9 @@ static struct pinctrl_desc mxs_pinctrl_desc = {
.owner = THIS_MODULE,
};
-static int __devinit mxs_pinctrl_parse_group(struct platform_device *pdev,
- struct device_node *np, int idx,
- const char **out_name)
+static int mxs_pinctrl_parse_group(struct platform_device *pdev,
+ struct device_node *np, int idx,
+ const char **out_name)
{
struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
struct mxs_group *g = &d->soc->groups[idx];
@@ -345,8 +345,7 @@ static int __devinit mxs_pinctrl_parse_group(struct platform_device *pdev,
const char *propname = "fsl,pinmux-ids";
char *group;
int length = strlen(np->name) + SUFFIX_LEN;
- int i;
- u32 val;
+ u32 val, i;
group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
if (!group)
@@ -384,8 +383,8 @@ static int __devinit mxs_pinctrl_parse_group(struct platform_device *pdev,
return 0;
}
-static int __devinit mxs_pinctrl_probe_dt(struct platform_device *pdev,
- struct mxs_pinctrl_data *d)
+static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
+ struct mxs_pinctrl_data *d)
{
struct mxs_pinctrl_soc_data *soc = d->soc;
struct device_node *np = pdev->dev.of_node;
@@ -476,8 +475,8 @@ static int __devinit mxs_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
- struct mxs_pinctrl_soc_data *soc)
+int mxs_pinctrl_probe(struct platform_device *pdev,
+ struct mxs_pinctrl_soc_data *soc)
{
struct device_node *np = pdev->dev.of_node;
struct mxs_pinctrl_data *d;
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 7d88ae352119..30b4da91ef7e 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -1251,8 +1251,7 @@ static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
.prcm_gpiocr_registers = db8500_prcm_gpiocr_regs,
};
-void __devinit
-nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
+void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
{
*soc = &nmk_db8500_soc;
}
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8540.c b/drivers/pinctrl/pinctrl-nomadik-db8540.c
index bb6a4016322a..d7ba5443bae0 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8540.c
@@ -1260,8 +1260,7 @@ static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
.prcm_gpiocr_registers = db8540_prcm_gpiocr_regs,
};
-void __devinit
-nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
{
*soc = &nmk_db8540_soc;
}
diff --git a/drivers/pinctrl/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/pinctrl-nomadik-stn8815.c
index 7d432c3bc359..924a3393fa82 100644
--- a/drivers/pinctrl/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/pinctrl-nomadik-stn8815.c
@@ -350,8 +350,7 @@ static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
.ngroups = ARRAY_SIZE(nmk_stn8815_groups),
};
-void __devinit
-nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
{
*soc = &nmk_stn8815_soc;
}
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index ef66f98e9202..36d20293de5c 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -25,6 +25,8 @@
#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -32,8 +34,8 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/pinctrl-nomadik.h>
#include <asm/mach/irq.h>
-#include <mach/irqs.h>
#include "pinctrl-nomadik.h"
+#include "core.h"
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -216,7 +218,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
int gpio = nmk_chip->chip.base + offset;
- int irq = NOMADIK_GPIO_TO_IRQ(gpio);
+ int irq = irq_find_mapping(nmk_chip->domain, offset);
struct irq_data *d = irq_get_irq_data(irq);
if (!rising && !falling)
@@ -259,6 +261,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
const struct prcm_gpiocr_altcx_pin_desc *pin_desc;
const u16 *gpiocr_regs;
+ if (!npct->prcm_base)
+ return;
+
if (alt_num > PRCM_IDX_GPIOCR_ALTC_MAX) {
dev_err(npct->dev, "PRCM GPIOCR: alternate-C%i is invalid\n",
alt_num);
@@ -673,7 +678,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)
}
EXPORT_SYMBOL(nmk_gpio_set_mode);
-static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
+static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
{
int i;
u16 reg;
@@ -682,6 +687,9 @@ static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
const struct prcm_gpiocr_altcx_pin_desc *pin_desc;
const u16 *gpiocr_regs;
+ if (!npct->prcm_base)
+ return NMK_GPIO_ALT_C;
+
for (i = 0; i < npct->soc->npins_altcx; i++) {
if (npct->soc->altcx_pins[i].pin == gpio)
break;
@@ -1306,7 +1314,7 @@ const struct irq_domain_ops nmk_gpio_irq_simple_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int __devinit nmk_gpio_probe(struct platform_device *dev)
+static int nmk_gpio_probe(struct platform_device *dev)
{
struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
struct device_node *np = dev->dev.of_node;
@@ -1335,8 +1343,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
dev_err(&dev->dev, "gpio-bank property not found\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
@@ -1344,41 +1351,29 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
}
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto out;
- }
+ if (!res)
+ return -ENOENT;
irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
secondary_irq = platform_get_irq(dev, 1);
- if (secondary_irq >= 0 && !pdata->get_secondary_status) {
- ret = -EINVAL;
- goto out;
- }
+ if (secondary_irq >= 0 && !pdata->get_secondary_status)
+ return -EINVAL;
- base = devm_request_and_ioremap(&dev->dev, res);
- if (!base) {
- ret = -ENOMEM;
- goto out;
- }
+ base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto out;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
clk_prepare(clk);
nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
- if (!nmk_chip) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!nmk_chip)
+ return -ENOMEM;
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
@@ -1412,7 +1407,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
- goto out;
+ return ret;
BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
@@ -1421,14 +1416,15 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
if (!np)
- irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
+ irq_start = pdata->first_irq;
nmk_chip->domain = irq_domain_add_simple(np,
NMK_GPIO_PER_CHIP, irq_start,
&nmk_gpio_irq_simple_ops, nmk_chip);
if (!nmk_chip->domain) {
dev_err(&dev->dev, "failed to create irqdomain\n");
- ret = -ENOSYS;
- goto out;
+ /* Just do this, no matter if it fails */
+ ret = gpiochip_remove(&nmk_chip->chip);
+ return -ENOSYS;
}
nmk_gpio_init_irq(nmk_chip);
@@ -1436,12 +1432,6 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
return 0;
-
-out:
- dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
- pdata->first_gpio, pdata->first_gpio+31);
-
- return ret;
}
static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
@@ -1502,11 +1492,285 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
}
+static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ kfree(map);
+}
+
+static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, unsigned reserve)
+{
+ unsigned old_num = *reserved_maps;
+ unsigned new_num = *num_maps + reserve;
+ struct pinctrl_map *new_map;
+
+ if (old_num >= new_num)
+ return 0;
+
+ new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+ *map = new_map;
+ *reserved_maps = new_num;
+
+ return 0;
+}
+
+static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ const char *function)
+{
+ if (*num_maps == *reserved_maps)
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = group;
+ (*map)[*num_maps].data.mux.function = function;
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int nmk_dt_add_map_configs(struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ unsigned long *configs, unsigned num_configs)
+{
+ unsigned long *dup_configs;
+
+ if (*num_maps == *reserved_maps)
+ return -ENOSPC;
+
+ dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+ GFP_KERNEL);
+ if (!dup_configs)
+ return -ENOMEM;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN;
+
+ (*map)[*num_maps].data.configs.group_or_pin = group;
+ (*map)[*num_maps].data.configs.configs = dup_configs;
+ (*map)[*num_maps].data.configs.num_configs = num_configs;
+ (*num_maps)++;
+
+ return 0;
+}
+
+#define NMK_CONFIG_PIN(x,y) { .property = x, .config = y, }
+#define NMK_CONFIG_PIN_ARRAY(x,y) { .property = x, .choice = y, \
+ .size = ARRAY_SIZE(y), }
+
+static const unsigned long nmk_pin_input_modes[] = {
+ PIN_INPUT_NOPULL,
+ PIN_INPUT_PULLUP,
+ PIN_INPUT_PULLDOWN,
+};
+
+static const unsigned long nmk_pin_output_modes[] = {
+ PIN_OUTPUT_LOW,
+ PIN_OUTPUT_HIGH,
+ PIN_DIR_OUTPUT,
+};
+
+static const unsigned long nmk_pin_sleep_modes[] = {
+ PIN_SLEEPMODE_DISABLED,
+ PIN_SLEEPMODE_ENABLED,
+};
+
+static const unsigned long nmk_pin_sleep_input_modes[] = {
+ PIN_SLPM_INPUT_NOPULL,
+ PIN_SLPM_INPUT_PULLUP,
+ PIN_SLPM_INPUT_PULLDOWN,
+ PIN_SLPM_DIR_INPUT,
+};
+
+static const unsigned long nmk_pin_sleep_output_modes[] = {
+ PIN_SLPM_OUTPUT_LOW,
+ PIN_SLPM_OUTPUT_HIGH,
+ PIN_SLPM_DIR_OUTPUT,
+};
+
+static const unsigned long nmk_pin_sleep_wakeup_modes[] = {
+ PIN_SLPM_WAKEUP_DISABLE,
+ PIN_SLPM_WAKEUP_ENABLE,
+};
+
+static const unsigned long nmk_pin_gpio_modes[] = {
+ PIN_GPIOMODE_DISABLED,
+ PIN_GPIOMODE_ENABLED,
+};
+
+static const unsigned long nmk_pin_sleep_pdis_modes[] = {
+ PIN_SLPM_PDIS_DISABLED,
+ PIN_SLPM_PDIS_ENABLED,
+};
+
+struct nmk_cfg_param {
+ const char *property;
+ unsigned long config;
+ const unsigned long *choice;
+ int size;
+};
+
+static const struct nmk_cfg_param nmk_cfg_params[] = {
+ NMK_CONFIG_PIN_ARRAY("ste,input", nmk_pin_input_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,output", nmk_pin_output_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,sleep", nmk_pin_sleep_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,sleep-input", nmk_pin_sleep_input_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,sleep-output", nmk_pin_sleep_output_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,sleep-wakeup", nmk_pin_sleep_wakeup_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,gpio", nmk_pin_gpio_modes),
+ NMK_CONFIG_PIN_ARRAY("ste,sleep-pull-disable", nmk_pin_sleep_pdis_modes),
+};
+
+static int nmk_dt_pin_config(int index, int val, unsigned long *config)
+{
+ int ret = 0;
+
+ if (nmk_cfg_params[index].choice == NULL)
+ *config = nmk_cfg_params[index].config;
+ else {
+ /* test if out of range */
+ if (val < nmk_cfg_params[index].size) {
+ *config = nmk_cfg_params[index].config |
+ nmk_cfg_params[index].choice[val];
+ }
+ }
+ return ret;
+}
+
+static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name)
+{
+ int i, pin_number;
+ struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
+
+ if (sscanf((char *)pin_name, "GPIO%d", &pin_number) == 1)
+ for (i = 0; i < npct->soc->npins; i++)
+ if (npct->soc->pins[i].number == pin_number)
+ return npct->soc->pins[i].name;
+ return NULL;
+}
+
+static bool nmk_pinctrl_dt_get_config(struct device_node *np,
+ unsigned long *configs)
+{
+ bool has_config = 0;
+ unsigned long cfg = 0;
+ int i, val, ret;
+
+ for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) {
+ ret = of_property_read_u32(np,
+ nmk_cfg_params[i].property, &val);
+ if (ret != -EINVAL) {
+ if (nmk_dt_pin_config(i, val, &cfg) == 0) {
+ *configs |= cfg;
+ has_config = 1;
+ }
+ }
+ }
+
+ return has_config;
+}
+
+int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ int ret;
+ const char *function = NULL;
+ unsigned long configs = 0;
+ bool has_config = 0;
+ unsigned reserve = 0;
+ struct property *prop;
+ const char *group, *gpio_name;
+ struct device_node *np_config;
+
+ ret = of_property_read_string(np, "ste,function", &function);
+ if (ret >= 0)
+ reserve = 1;
+
+ has_config = nmk_pinctrl_dt_get_config(np, &configs);
+
+ np_config = of_parse_phandle(np, "ste,config", 0);
+ if (np_config)
+ has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+
+ ret = of_property_count_strings(np, "ste,pins");
+ if (ret < 0)
+ goto exit;
+
+ if (has_config)
+ reserve++;
+
+ reserve *= ret;
+
+ ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "ste,pins", prop, group) {
+ if (function) {
+ ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
+ group, function);
+ if (ret < 0)
+ goto exit;
+ }
+ if (has_config) {
+ gpio_name = nmk_find_pin_name(pctldev, group);
+
+ ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps,
+ gpio_name, &configs, 1);
+ if (ret < 0)
+ goto exit;
+ }
+
+ }
+exit:
+ return ret;
+}
+
+int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ unsigned reserved_maps;
+ struct device_node *np;
+ int ret;
+
+ reserved_maps = 0;
+ *map = NULL;
+ *num_maps = 0;
+
+ for_each_child_of_node(np_config, np) {
+ ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps);
+ if (ret < 0) {
+ nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static struct pinctrl_ops nmk_pinctrl_ops = {
.get_groups_count = nmk_get_groups_cnt,
.get_group_name = nmk_get_group_name,
.get_group_pins = nmk_get_group_pins,
.pin_dbg_show = nmk_pin_dbg_show,
+ .dt_node_to_map = nmk_pinctrl_dt_node_to_map,
+ .dt_free_map = nmk_pinctrl_dt_free_map,
};
static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -1840,16 +2104,43 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
static const struct of_device_id nmk_pinctrl_match[] = {
{
- .compatible = "stericsson,nmk_pinctrl",
+ .compatible = "stericsson,nmk-pinctrl-stn8815",
+ .data = (void *)PINCTRL_NMK_STN8815,
+ },
+ {
+ .compatible = "stericsson,nmk-pinctrl",
.data = (void *)PINCTRL_NMK_DB8500,
},
{},
};
-static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
+static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nmk_pinctrl *npct;
+
+ npct = platform_get_drvdata(pdev);
+ if (!npct)
+ return -EINVAL;
+
+ return pinctrl_force_sleep(npct->pctl);
+}
+
+static int nmk_pinctrl_resume(struct platform_device *pdev)
+{
+ struct nmk_pinctrl *npct;
+
+ npct = platform_get_drvdata(pdev);
+ if (!npct)
+ return -EINVAL;
+
+ return pinctrl_force_default(npct->pctl);
+}
+
+static int nmk_pinctrl_probe(struct platform_device *pdev)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
struct device_node *np = pdev->dev.of_node;
+ struct device_node *prcm_np;
struct nmk_pinctrl *npct;
struct resource *res;
unsigned int version = 0;
@@ -1878,18 +2169,26 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
if (version == PINCTRL_NMK_DB8540)
nmk_pinctrl_db8540_init(&npct->soc);
+ if (np) {
+ prcm_np = of_parse_phandle(np, "prcm", 0);
+ if (prcm_np)
+ npct->prcm_base = of_iomap(prcm_np, 0);
+ }
+
+ /* Allow platform passed information to over-write DT. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
+ if (res)
npct->prcm_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (!npct->prcm_base) {
- dev_err(&pdev->dev,
- "failed to ioremap PRCM registers\n");
- return -ENOMEM;
+ if (!npct->prcm_base) {
+ if (version == PINCTRL_NMK_STN8815) {
+ dev_info(&pdev->dev,
+ "No PRCM base, "
+ "assuming no ALT-Cx control is available\n");
+ } else {
+ dev_err(&pdev->dev, "missing PRCM base address\n");
+ return -EINVAL;
}
- } else {
- dev_info(&pdev->dev,
- "No PRCM base, assume no ALT-Cx control is available\n");
}
/*
@@ -1954,6 +2253,10 @@ static struct platform_driver nmk_pinctrl_driver = {
},
.probe = nmk_pinctrl_probe,
.id_table = nmk_pinctrl_id,
+#ifdef CONFIG_PM
+ .suspend = nmk_pinctrl_suspend,
+ .resume = nmk_pinctrl_resume,
+#endif
};
static int __init nmk_gpio_init(void)
diff --git a/drivers/pinctrl/pinctrl-pxa168.c b/drivers/pinctrl/pinctrl-pxa168.c
index cb771e4a6355..d9cd2b457484 100644
--- a/drivers/pinctrl/pinctrl-pxa168.c
+++ b/drivers/pinctrl/pinctrl-pxa168.c
@@ -615,7 +615,7 @@ static struct pxa3xx_pinmux_info pxa168_info = {
.ds_shift = PXA168_DS_SHIFT,
};
-static int __devinit pxa168_pinmux_probe(struct platform_device *pdev)
+static int pxa168_pinmux_probe(struct platform_device *pdev)
{
return pxa3xx_pinctrl_register(pdev, &pxa168_info);
}
diff --git a/drivers/pinctrl/pinctrl-pxa3xx.c b/drivers/pinctrl/pinctrl-pxa3xx.c
index 51f8a388b917..1f49bb02a6af 100644
--- a/drivers/pinctrl/pinctrl-pxa3xx.c
+++ b/drivers/pinctrl/pinctrl-pxa3xx.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -187,9 +188,9 @@ int pxa3xx_pinctrl_register(struct platform_device *pdev,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
- info->virt_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!info->virt_base)
- return -ENOMEM;
+ info->virt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->virt_base))
+ return PTR_ERR(info->virt_base);
info->pctrl = pinctrl_register(desc, &pdev->dev, info);
if (!info->pctrl) {
dev_err(&pdev->dev, "failed to register PXA pinmux driver\n");
diff --git a/drivers/pinctrl/pinctrl-pxa910.c b/drivers/pinctrl/pinctrl-pxa910.c
index 5fecd221b830..a2f917b847fb 100644
--- a/drivers/pinctrl/pinctrl-pxa910.c
+++ b/drivers/pinctrl/pinctrl-pxa910.c
@@ -971,7 +971,7 @@ static struct pxa3xx_pinmux_info pxa910_info = {
.ds_shift = PXA910_DS_SHIFT,
};
-static int __devinit pxa910_pinmux_probe(struct platform_device *pdev)
+static int pxa910_pinmux_probe(struct platform_device *pdev)
{
return pxa3xx_pinctrl_register(pdev, &pxa910_info);
}
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 8f31b656c4e9..f206df175656 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -37,7 +37,7 @@
#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
/* list of all possible config options supported */
-struct pin_config {
+static struct pin_config {
char *prop_cfg;
unsigned int cfg_type;
} pcfgs[] = {
@@ -549,9 +549,11 @@ static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
* Parse the pin names listed in the 'samsung,pins' property and convert it
* into a list of gpio numbers are create a pin group from it.
*/
-static int __devinit samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
- struct device_node *cfg_np, struct pinctrl_desc *pctl,
- unsigned int **pin_list, unsigned int *npins)
+static int samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
+ struct device_node *cfg_np,
+ struct pinctrl_desc *pctl,
+ unsigned int **pin_list,
+ unsigned int *npins)
{
struct device *dev = &pdev->dev;
struct property *prop;
@@ -596,8 +598,8 @@ static int __devinit samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
* from device node of the pin-controller. A pin group is formed with all
* the pins listed in the "samsung,pins" property.
*/
-static int __devinit samsung_pinctrl_parse_dt(struct platform_device *pdev,
- struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_pinctrl_parse_dt(struct platform_device *pdev,
+ struct samsung_pinctrl_drv_data *drvdata)
{
struct device *dev = &pdev->dev;
struct device_node *dev_np = dev->of_node;
@@ -691,8 +693,8 @@ static int __devinit samsung_pinctrl_parse_dt(struct platform_device *pdev,
}
/* register the pinctrl interface with the pinctrl subsystem */
-static int __devinit samsung_pinctrl_register(struct platform_device *pdev,
- struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_pinctrl_register(struct platform_device *pdev,
+ struct samsung_pinctrl_drv_data *drvdata)
{
struct pinctrl_desc *ctrldesc = &drvdata->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
@@ -714,7 +716,6 @@ static int __devinit samsung_pinctrl_register(struct platform_device *pdev,
}
ctrldesc->pins = pindesc;
ctrldesc->npins = drvdata->ctrl->nr_pins;
- ctrldesc->npins = drvdata->ctrl->nr_pins;
/* dynamically populate the pin number and pin name for pindesc */
for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
@@ -778,8 +779,8 @@ static const struct gpio_chip samsung_gpiolib_chip = {
};
/* register the gpiolib interface with the gpiolib subsystem */
-static int __devinit samsung_gpiolib_register(struct platform_device *pdev,
- struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_gpiolib_register(struct platform_device *pdev,
+ struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
struct samsung_pin_bank *bank = ctrl->pin_banks;
@@ -816,8 +817,8 @@ fail:
}
/* unregister the gpiolib interface with the gpiolib subsystem */
-static int __devinit samsung_gpiolib_unregister(struct platform_device *pdev,
- struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_gpiolib_unregister(struct platform_device *pdev,
+ struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
struct samsung_pin_bank *bank = ctrl->pin_banks;
@@ -881,7 +882,7 @@ static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
return ctrl;
}
-static int __devinit samsung_pinctrl_probe(struct platform_device *pdev)
+static int samsung_pinctrl_probe(struct platform_device *pdev)
{
struct samsung_pinctrl_drv_data *drvdata;
struct device *dev = &pdev->dev;
@@ -915,11 +916,9 @@ static int __devinit samsung_pinctrl_probe(struct platform_device *pdev)
return -ENOENT;
}
- drvdata->virt_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!drvdata->virt_base) {
- dev_err(dev, "ioremap failed\n");
- return -ENODEV;
- }
+ drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->virt_base))
+ return PTR_ERR(drvdata->virt_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
@@ -945,9 +944,9 @@ static int __devinit samsung_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id samsung_pinctrl_dt_match[] = {
- { .compatible = "samsung,pinctrl-exynos4210",
+ { .compatible = "samsung,exynos4210-pinctrl",
.data = (void *)exynos4210_pin_ctrl },
- { .compatible = "samsung,pinctrl-exynos4x12",
+ { .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
{},
};
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 5addfd16e3cc..e2d4e67f7e88 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -104,7 +104,7 @@ struct samsung_pinctrl_drv_data;
/**
* struct samsung_pin_bank: represent a controller pin-bank.
- * @reg_offset: starting offset of the pin-bank registers.
+ * @pctl_offset: starting offset of the pin-bank registers.
* @pin_base: starting pin number of the bank.
* @nr_pins: number of pins included in this bank.
* @func_width: width of the function selector bit field.
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 79642831bba2..5c32e880bcb2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -30,7 +30,6 @@
#define PCS_MUX_BITS_NAME "pinctrl-single,bits"
#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 1)
#define PCS_OFF_DISABLED ~0U
-#define PCS_MAX_GPIO_VALUES 2
/**
* struct pcs_pingroup - pingroups for a function
@@ -78,16 +77,6 @@ struct pcs_function {
};
/**
- * struct pcs_gpio_range - pinctrl gpio range
- * @range: subrange of the GPIO number space
- * @gpio_func: gpio function value in the pinmux register
- */
-struct pcs_gpio_range {
- struct pinctrl_gpio_range range;
- int gpio_func;
-};
-
-/**
* struct pcs_data - wrapper for data needed by pinctrl framework
* @pa: pindesc array
* @cur: index to current element
@@ -414,26 +403,9 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
}
static int pcs_request_gpio(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range, unsigned pin)
+ struct pinctrl_gpio_range *range, unsigned offset)
{
- struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
- struct pcs_gpio_range *gpio = NULL;
- int end, mux_bytes;
- unsigned data;
-
- gpio = container_of(range, struct pcs_gpio_range, range);
- end = range->pin_base + range->npins - 1;
- if (pin < range->pin_base || pin > end) {
- dev_err(pctldev->dev,
- "pin %d isn't in the range of %d to %d\n",
- pin, range->pin_base, end);
- return -EINVAL;
- }
- mux_bytes = pcs->width / BITS_PER_BYTE;
- data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask;
- data |= gpio->gpio_func;
- pcs->write(data, pcs->base + pin * mux_bytes);
- return 0;
+ return -ENOTSUPP;
}
static struct pinmux_ops pcs_pinmux_ops = {
@@ -493,7 +465,7 @@ static struct pinconf_ops pcs_pinconf_ops = {
* @pcs: pcs driver instance
* @offset: register offset from base
*/
-static int __devinit pcs_add_pin(struct pcs_device *pcs, unsigned offset)
+static int pcs_add_pin(struct pcs_device *pcs, unsigned offset)
{
struct pinctrl_pin_desc *pin;
struct pcs_name *pn;
@@ -526,7 +498,7 @@ static int __devinit pcs_add_pin(struct pcs_device *pcs, unsigned offset)
* If your hardware needs holes in the address space, then just set
* up multiple driver instances.
*/
-static int __devinit pcs_allocate_pin_table(struct pcs_device *pcs)
+static int pcs_allocate_pin_table(struct pcs_device *pcs)
{
int mux_bytes, nr_pins, i;
@@ -907,51 +879,7 @@ static void pcs_free_resources(struct pcs_device *pcs)
static struct of_device_id pcs_of_match[];
-static int __devinit pcs_add_gpio_range(struct device_node *node,
- struct pcs_device *pcs)
-{
- struct pcs_gpio_range *gpio;
- struct device_node *child;
- struct resource r;
- const char name[] = "pinctrl-single";
- u32 gpiores[PCS_MAX_GPIO_VALUES];
- int ret, i = 0, mux_bytes = 0;
-
- for_each_child_of_node(node, child) {
- ret = of_address_to_resource(child, 0, &r);
- if (ret < 0)
- continue;
- memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES);
- ret = of_property_read_u32_array(child, "pinctrl-single,gpio",
- gpiores, PCS_MAX_GPIO_VALUES);
- if (ret < 0)
- continue;
- gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio) {
- dev_err(pcs->dev, "failed to allocate pcs gpio\n");
- return -ENOMEM;
- }
- gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name),
- GFP_KERNEL);
- if (!gpio->range.name) {
- dev_err(pcs->dev, "failed to allocate range name\n");
- return -ENOMEM;
- }
- memcpy((char *)gpio->range.name, name, sizeof(name));
-
- gpio->range.id = i++;
- gpio->range.base = gpiores[0];
- gpio->gpio_func = gpiores[1];
- mux_bytes = pcs->width / BITS_PER_BYTE;
- gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes;
- gpio->range.npins = (r.end - r.start) / mux_bytes + 1;
-
- pinctrl_add_gpio_range(pcs->pctl, &gpio->range);
- }
- return 0;
-}
-
-static int __devinit pcs_probe(struct platform_device *pdev)
+static int pcs_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
@@ -1047,10 +975,6 @@ static int __devinit pcs_probe(struct platform_device *pdev)
goto free;
}
- ret = pcs_add_gpio_range(np, pcs);
- if (ret < 0)
- goto free;
-
dev_info(pcs->dev, "%i pins at pa %p size %u\n",
pcs->desc.npins, pcs->base, pcs->size);
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index a4f0c5e487d5..d02498b30c6e 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1246,7 +1246,23 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
return of_iomap(np, 0);
}
-static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
+static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
+ return -EINVAL;
+
+ if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
+}
+
+static int sirfsoc_pinmux_probe(struct platform_device *pdev)
{
int ret;
struct sirfsoc_pmx *spmx;
@@ -1663,7 +1679,45 @@ const struct irq_domain_ops sirfsoc_gpio_irq_simple_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int __devinit sirfsoc_gpio_probe(struct device_node *np)
+static void sirfsoc_gpio_set_pullup(const u32 *pullups)
+{
+ int i, n;
+ const unsigned long *p = (const unsigned long *)pullups;
+
+ for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
+ n = find_first_bit(p + i, BITS_PER_LONG);
+ while (n < BITS_PER_LONG) {
+ u32 offset = SIRFSOC_GPIO_CTRL(i, n);
+ u32 val = readl(sgpio_bank[i].chip.regs + offset);
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val |= SIRFSOC_GPIO_CTL_PULL_HIGH;
+ writel(val, sgpio_bank[i].chip.regs + offset);
+
+ n = find_next_bit(p + i, BITS_PER_LONG, n + 1);
+ }
+ }
+}
+
+static void sirfsoc_gpio_set_pulldown(const u32 *pulldowns)
+{
+ int i, n;
+ const unsigned long *p = (const unsigned long *)pulldowns;
+
+ for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
+ n = find_first_bit(p + i, BITS_PER_LONG);
+ while (n < BITS_PER_LONG) {
+ u32 offset = SIRFSOC_GPIO_CTRL(i, n);
+ u32 val = readl(sgpio_bank[i].chip.regs + offset);
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH;
+ writel(val, sgpio_bank[i].chip.regs + offset);
+
+ n = find_next_bit(p + i, BITS_PER_LONG, n + 1);
+ }
+ }
+}
+
+static int sirfsoc_gpio_probe(struct device_node *np)
{
int i, err = 0;
struct sirfsoc_gpio_bank *bank;
@@ -1671,6 +1725,8 @@ static int __devinit sirfsoc_gpio_probe(struct device_node *np)
struct platform_device *pdev;
bool is_marco = false;
+ u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
+
pdev = of_find_device_by_node(np);
if (!pdev)
return -ENODEV;
@@ -1696,6 +1752,8 @@ static int __devinit sirfsoc_gpio_probe(struct device_node *np)
bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
bank->chip.gc.of_node = np;
+ bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
+ bank->chip.gc.of_gpio_n_cells = 2;
bank->chip.regs = regs;
bank->id = i;
bank->is_marco = is_marco;
@@ -1726,6 +1784,14 @@ static int __devinit sirfsoc_gpio_probe(struct device_node *np)
irq_set_handler_data(bank->parent_irq, bank);
}
+ if (!of_property_read_u32_array(np, "sirf,pullups", pullups,
+ SIRFSOC_GPIO_NO_OF_BANKS))
+ sirfsoc_gpio_set_pullup(pullups);
+
+ if (!of_property_read_u32_array(np, "sirf,pulldowns", pulldowns,
+ SIRFSOC_GPIO_NO_OF_BANKS))
+ sirfsoc_gpio_set_pulldown(pulldowns);
+
return 0;
out:
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
new file mode 100644
index 000000000000..80b11e3415bc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -0,0 +1,1505 @@
+/*
+ * Allwinner A1X SoCs pinctrl driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun4i_a10_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* DTR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* DSR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* DCD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* RING */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+};
+
+static const struct sunxi_desc_pin sun5i_a13_pins[] = {
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+};
+
+static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
+ .pins = sun4i_a10_pins,
+ .npins = ARRAY_SIZE(sun4i_a10_pins),
+};
+
+static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
+ .pins = sun5i_a13_pins,
+ .npins = ARRAY_SIZE(sun5i_a13_pins),
+};
+
+static struct sunxi_pinctrl_group *
+sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
+{
+ int i;
+
+ for (i = 0; i < pctl->ngroups; i++) {
+ struct sunxi_pinctrl_group *grp = pctl->groups + i;
+
+ if (!strcmp(grp->name, group))
+ return grp;
+ }
+
+ return NULL;
+}
+
+static struct sunxi_pinctrl_function *
+sunxi_pinctrl_find_function_by_name(struct sunxi_pinctrl *pctl,
+ const char *name)
+{
+ struct sunxi_pinctrl_function *func = pctl->functions;
+ int i;
+
+ for (i = 0; i < pctl->nfunctions; i++) {
+ if (!func[i].name)
+ break;
+
+ if (!strcmp(func[i].name, name))
+ return func + i;
+ }
+
+ return NULL;
+}
+
+static struct sunxi_desc_function *
+sunxi_pinctrl_desc_find_function_by_name(struct sunxi_pinctrl *pctl,
+ const char *pin_name,
+ const char *func_name)
+{
+ int i;
+
+ for (i = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+ if (!strcmp(pin->pin.name, pin_name)) {
+ struct sunxi_desc_function *func = pin->functions;
+
+ while (func->name) {
+ if (!strcmp(func->name, func_name))
+ return func;
+
+ func++;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int sunxi_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->ngroups;
+}
+
+static const char *sunxi_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->groups[group].name;
+}
+
+static int sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned *)&pctl->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long *pinconfig;
+ struct property *prop;
+ const char *function;
+ const char *group;
+ int ret, nmaps, i = 0;
+ u32 val;
+
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = of_property_read_string(node, "allwinner,function", &function);
+ if (ret) {
+ dev_err(pctl->dev,
+ "missing allwinner,function property in node %s\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ nmaps = of_property_count_strings(node, "allwinner,pins") * 2;
+ if (nmaps < 0) {
+ dev_err(pctl->dev,
+ "missing allwinner,pins property in node %s\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_for_each_string(node, "allwinner,pins", prop, group) {
+ struct sunxi_pinctrl_group *grp =
+ sunxi_pinctrl_find_group_by_name(pctl, group);
+ int j = 0, configlen = 0;
+
+ if (!grp) {
+ dev_err(pctl->dev, "unknown pin %s", group);
+ continue;
+ }
+
+ if (!sunxi_pinctrl_desc_find_function_by_name(pctl,
+ grp->name,
+ function)) {
+ dev_err(pctl->dev, "unsupported function %s on pin %s",
+ function, group);
+ continue;
+ }
+
+ (*map)[i].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[i].data.mux.group = group;
+ (*map)[i].data.mux.function = function;
+
+ i++;
+
+ (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)[i].data.configs.group_or_pin = group;
+
+ if (of_find_property(node, "allwinner,drive", NULL))
+ configlen++;
+ if (of_find_property(node, "allwinner,pull", NULL))
+ configlen++;
+
+ pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+
+ if (!of_property_read_u32(node, "allwinner,drive", &val)) {
+ u16 strength = (val + 1) * 10;
+ pinconfig[j++] =
+ pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH,
+ strength);
+ }
+
+ if (!of_property_read_u32(node, "allwinner,pull", &val)) {
+ enum pin_config_param pull = PIN_CONFIG_END;
+ if (val == 1)
+ pull = PIN_CONFIG_BIAS_PULL_UP;
+ else if (val == 2)
+ pull = PIN_CONFIG_BIAS_PULL_DOWN;
+ pinconfig[j++] = pinconf_to_config_packed(pull, 0);
+ }
+
+ (*map)[i].data.configs.configs = pinconfig;
+ (*map)[i].data.configs.num_configs = configlen;
+
+ i++;
+ }
+
+ *num_maps = nmaps;
+
+ return 0;
+}
+
+static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+ kfree(map[i].data.configs.configs);
+ }
+
+ kfree(map);
+}
+
+static struct pinctrl_ops sunxi_pctrl_ops = {
+ .dt_node_to_map = sunxi_pctrl_dt_node_to_map,
+ .dt_free_map = sunxi_pctrl_dt_free_map,
+ .get_groups_count = sunxi_pctrl_get_groups_count,
+ .get_group_name = sunxi_pctrl_get_group_name,
+ .get_group_pins = sunxi_pctrl_get_group_pins,
+};
+
+static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned group,
+ unsigned long *config)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *config = pctl->groups[group].config;
+
+ return 0;
+}
+
+static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned group,
+ unsigned long config)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct sunxi_pinctrl_group *g = &pctl->groups[group];
+ u32 val, mask;
+ u16 strength;
+ u8 dlevel;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ strength = pinconf_to_config_argument(config);
+ if (strength > 40)
+ return -EINVAL;
+ /*
+ * We convert from mA to what the register expects:
+ * 0: 10mA
+ * 1: 20mA
+ * 2: 30mA
+ * 3: 40mA
+ */
+ dlevel = strength / 10 - 1;
+ val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
+ mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
+ writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
+ pctl->membase + sunxi_dlevel_reg(g->pin));
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+ mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+ writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
+ pctl->membase + sunxi_pull_reg(g->pin));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+ mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+ writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
+ pctl->membase + sunxi_pull_reg(g->pin));
+ break;
+ default:
+ break;
+ }
+
+ /* cache the config value */
+ g->config = config;
+
+ return 0;
+}
+
+static struct pinconf_ops sunxi_pconf_ops = {
+ .pin_config_group_get = sunxi_pconf_group_get,
+ .pin_config_group_set = sunxi_pconf_group_set,
+};
+
+static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->nfunctions;
+}
+
+static const char *sunxi_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->functions[function].name;
+}
+
+static int sunxi_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->functions[function].groups;
+ *num_groups = pctl->functions[function].ngroups;
+
+ return 0;
+}
+
+static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ u8 config)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
+ u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+ writel((val & ~mask) | config << sunxi_mux_offset(pin),
+ pctl->membase + sunxi_mux_reg(pin));
+}
+
+static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct sunxi_pinctrl_group *g = pctl->groups + group;
+ struct sunxi_pinctrl_function *func = pctl->functions + function;
+ struct sunxi_desc_function *desc =
+ sunxi_pinctrl_desc_find_function_by_name(pctl,
+ g->name,
+ func->name);
+
+ if (!desc)
+ return -EINVAL;
+
+ sunxi_pmx_set(pctldev, g->pin, desc->muxval);
+
+ return 0;
+}
+
+static int
+sunxi_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct sunxi_desc_function *desc;
+ char pin_name[SUNXI_PIN_NAME_MAX_LEN];
+ const char *func;
+ u8 bank, pin;
+ int ret;
+
+ bank = (offset) / PINS_PER_BANK;
+ pin = (offset) % PINS_PER_BANK;
+
+ ret = sprintf(pin_name, "P%c%d", 'A' + bank, pin);
+ if (!ret)
+ goto error;
+
+ if (input)
+ func = "gpio_in";
+ else
+ func = "gpio_out";
+
+ desc = sunxi_pinctrl_desc_find_function_by_name(pctl,
+ pin_name,
+ func);
+ if (!desc) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ sunxi_pmx_set(pctldev, offset, desc->muxval);
+
+ ret = 0;
+
+error:
+ return ret;
+}
+
+static struct pinmux_ops sunxi_pmx_ops = {
+ .get_functions_count = sunxi_pmx_get_funcs_cnt,
+ .get_function_name = sunxi_pmx_get_func_name,
+ .get_function_groups = sunxi_pmx_get_func_groups,
+ .enable = sunxi_pmx_enable,
+ .gpio_set_direction = sunxi_pmx_gpio_set_direction,
+};
+
+static struct pinctrl_desc sunxi_pctrl_desc = {
+ .confops = &sunxi_pconf_ops,
+ .pctlops = &sunxi_pctrl_ops,
+ .pmxops = &sunxi_pmx_ops,
+};
+
+static int sunxi_pinctrl_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void sunxi_pinctrl_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
+
+ u32 reg = sunxi_data_reg(offset);
+ u8 index = sunxi_data_offset(offset);
+ u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+
+ return val;
+}
+
+static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
+ u32 reg = sunxi_data_reg(offset);
+ u8 index = sunxi_data_offset(offset);
+
+ writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+}
+
+static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ int pin, base;
+
+ base = PINS_PER_BANK * gpiospec->args[0];
+ pin = base + gpiospec->args[1];
+
+ if (pin > (gc->base + gc->ngpio))
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ return pin;
+}
+
+static struct gpio_chip sunxi_pinctrl_gpio_chip = {
+ .owner = THIS_MODULE,
+ .request = sunxi_pinctrl_gpio_request,
+ .free = sunxi_pinctrl_gpio_free,
+ .direction_input = sunxi_pinctrl_gpio_direction_input,
+ .direction_output = sunxi_pinctrl_gpio_direction_output,
+ .get = sunxi_pinctrl_gpio_get,
+ .set = sunxi_pinctrl_gpio_set,
+ .of_xlate = sunxi_pinctrl_gpio_of_xlate,
+ .of_gpio_n_cells = 3,
+ .can_sleep = 0,
+};
+
+static struct of_device_id sunxi_pinctrl_match[] = {
+ { .compatible = "allwinner,sun4i-a10-pinctrl", .data = (void *)&sun4i_a10_pinctrl_data },
+ { .compatible = "allwinner,sun5i-a13-pinctrl", .data = (void *)&sun5i_a13_pinctrl_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_pinctrl_match);
+
+static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
+ const char *name)
+{
+ struct sunxi_pinctrl_function *func = pctl->functions;
+
+ while (func->name) {
+ /* function already there */
+ if (strcmp(func->name, name) == 0) {
+ func->ngroups++;
+ return -EEXIST;
+ }
+ func++;
+ }
+
+ func->name = name;
+ func->ngroups = 1;
+
+ pctl->nfunctions++;
+
+ return 0;
+}
+
+static int sunxi_pinctrl_build_state(struct platform_device *pdev)
+{
+ struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
+ int i;
+
+ pctl->ngroups = pctl->desc->npins;
+
+ /* Allocate groups */
+ pctl->groups = devm_kzalloc(&pdev->dev,
+ pctl->ngroups * sizeof(*pctl->groups),
+ GFP_KERNEL);
+ if (!pctl->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+ struct sunxi_pinctrl_group *group = pctl->groups + i;
+
+ group->name = pin->pin.name;
+ group->pin = pin->pin.number;
+ }
+
+ /*
+ * We suppose that we won't have any more functions than pins,
+ * we'll reallocate that later anyway
+ */
+ pctl->functions = devm_kzalloc(&pdev->dev,
+ pctl->desc->npins * sizeof(*pctl->functions),
+ GFP_KERNEL);
+ if (!pctl->functions)
+ return -ENOMEM;
+
+ /* Count functions and their associated groups */
+ for (i = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+ struct sunxi_desc_function *func = pin->functions;
+
+ while (func->name) {
+ sunxi_pinctrl_add_function(pctl, func->name);
+ func++;
+ }
+ }
+
+ pctl->functions = krealloc(pctl->functions,
+ pctl->nfunctions * sizeof(*pctl->functions),
+ GFP_KERNEL);
+
+ for (i = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+ struct sunxi_desc_function *func = pin->functions;
+
+ while (func->name) {
+ struct sunxi_pinctrl_function *func_item;
+ const char **func_grp;
+
+ func_item = sunxi_pinctrl_find_function_by_name(pctl,
+ func->name);
+ if (!func_item)
+ return -EINVAL;
+
+ if (!func_item->groups) {
+ func_item->groups =
+ devm_kzalloc(&pdev->dev,
+ func_item->ngroups * sizeof(*func_item->groups),
+ GFP_KERNEL);
+ if (!func_item->groups)
+ return -ENOMEM;
+ }
+
+ func_grp = func_item->groups;
+ while (*func_grp)
+ func_grp++;
+
+ *func_grp = pin->pin.name;
+ func++;
+ }
+ }
+
+ return 0;
+}
+
+static int sunxi_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *device;
+ struct pinctrl_pin_desc *pins;
+ struct sunxi_pinctrl *pctl;
+ int i, ret, last_pin;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pctl);
+
+ pctl->membase = of_iomap(node, 0);
+ if (!pctl->membase)
+ return -ENOMEM;
+
+ device = of_match_device(sunxi_pinctrl_match, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ pctl->desc = (struct sunxi_pinctrl_desc *)device->data;
+
+ ret = sunxi_pinctrl_build_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "dt probe failed: %d\n", ret);
+ return ret;
+ }
+
+ pins = devm_kzalloc(&pdev->dev,
+ pctl->desc->npins * sizeof(*pins),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->desc->npins; i++)
+ pins[i] = pctl->desc->pins[i].pin;
+
+ sunxi_pctrl_desc.name = dev_name(&pdev->dev);
+ sunxi_pctrl_desc.owner = THIS_MODULE;
+ sunxi_pctrl_desc.pins = pins;
+ sunxi_pctrl_desc.npins = pctl->desc->npins;
+ pctl->dev = &pdev->dev;
+ pctl->pctl_dev = pinctrl_register(&sunxi_pctrl_desc,
+ &pdev->dev, pctl);
+ if (!pctl->pctl_dev) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
+ if (!pctl->chip) {
+ ret = -ENOMEM;
+ goto pinctrl_error;
+ }
+
+ last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
+ pctl->chip = &sunxi_pinctrl_gpio_chip;
+ pctl->chip->ngpio = round_up(last_pin, PINS_PER_BANK);
+ pctl->chip->label = dev_name(&pdev->dev);
+ pctl->chip->dev = &pdev->dev;
+ pctl->chip->base = 0;
+
+ ret = gpiochip_add(pctl->chip);
+ if (ret)
+ goto pinctrl_error;
+
+ for (i = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+ ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
+ pin->pin.number,
+ pin->pin.number, 1);
+ if (ret)
+ goto gpiochip_error;
+ }
+
+ dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
+
+ return 0;
+
+gpiochip_error:
+ ret = gpiochip_remove(pctl->chip);
+pinctrl_error:
+ pinctrl_unregister(pctl->pctl_dev);
+ return ret;
+}
+
+static struct platform_driver sunxi_pinctrl_driver = {
+ .probe = sunxi_pinctrl_probe,
+ .driver = {
+ .name = "sunxi-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_pinctrl_match,
+ },
+};
+module_platform_driver(sunxi_pinctrl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner A1X pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
new file mode 100644
index 000000000000..e921621059ce
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -0,0 +1,478 @@
+/*
+ * Allwinner A1X SoCs pinctrl driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PINCTRL_SUNXI_H
+#define __PINCTRL_SUNXI_H
+
+#include <linux/kernel.h>
+
+#define PA_BASE 0
+#define PB_BASE 32
+#define PC_BASE 64
+#define PD_BASE 96
+#define PE_BASE 128
+#define PF_BASE 160
+#define PG_BASE 192
+#define PH_BASE 224
+#define PI_BASE 256
+
+#define SUNXI_PINCTRL_PIN_PA0 PINCTRL_PIN(PA_BASE + 0, "PA0")
+#define SUNXI_PINCTRL_PIN_PA1 PINCTRL_PIN(PA_BASE + 1, "PA1")
+#define SUNXI_PINCTRL_PIN_PA2 PINCTRL_PIN(PA_BASE + 2, "PA2")
+#define SUNXI_PINCTRL_PIN_PA3 PINCTRL_PIN(PA_BASE + 3, "PA3")
+#define SUNXI_PINCTRL_PIN_PA4 PINCTRL_PIN(PA_BASE + 4, "PA4")
+#define SUNXI_PINCTRL_PIN_PA5 PINCTRL_PIN(PA_BASE + 5, "PA5")
+#define SUNXI_PINCTRL_PIN_PA6 PINCTRL_PIN(PA_BASE + 6, "PA6")
+#define SUNXI_PINCTRL_PIN_PA7 PINCTRL_PIN(PA_BASE + 7, "PA7")
+#define SUNXI_PINCTRL_PIN_PA8 PINCTRL_PIN(PA_BASE + 8, "PA8")
+#define SUNXI_PINCTRL_PIN_PA9 PINCTRL_PIN(PA_BASE + 9, "PA9")
+#define SUNXI_PINCTRL_PIN_PA10 PINCTRL_PIN(PA_BASE + 10, "PA10")
+#define SUNXI_PINCTRL_PIN_PA11 PINCTRL_PIN(PA_BASE + 11, "PA11")
+#define SUNXI_PINCTRL_PIN_PA12 PINCTRL_PIN(PA_BASE + 12, "PA12")
+#define SUNXI_PINCTRL_PIN_PA13 PINCTRL_PIN(PA_BASE + 13, "PA13")
+#define SUNXI_PINCTRL_PIN_PA14 PINCTRL_PIN(PA_BASE + 14, "PA14")
+#define SUNXI_PINCTRL_PIN_PA15 PINCTRL_PIN(PA_BASE + 15, "PA15")
+#define SUNXI_PINCTRL_PIN_PA16 PINCTRL_PIN(PA_BASE + 16, "PA16")
+#define SUNXI_PINCTRL_PIN_PA17 PINCTRL_PIN(PA_BASE + 17, "PA17")
+#define SUNXI_PINCTRL_PIN_PA18 PINCTRL_PIN(PA_BASE + 18, "PA18")
+#define SUNXI_PINCTRL_PIN_PA19 PINCTRL_PIN(PA_BASE + 19, "PA19")
+#define SUNXI_PINCTRL_PIN_PA20 PINCTRL_PIN(PA_BASE + 20, "PA20")
+#define SUNXI_PINCTRL_PIN_PA21 PINCTRL_PIN(PA_BASE + 21, "PA21")
+#define SUNXI_PINCTRL_PIN_PA22 PINCTRL_PIN(PA_BASE + 22, "PA22")
+#define SUNXI_PINCTRL_PIN_PA23 PINCTRL_PIN(PA_BASE + 23, "PA23")
+#define SUNXI_PINCTRL_PIN_PA24 PINCTRL_PIN(PA_BASE + 24, "PA24")
+#define SUNXI_PINCTRL_PIN_PA25 PINCTRL_PIN(PA_BASE + 25, "PA25")
+#define SUNXI_PINCTRL_PIN_PA26 PINCTRL_PIN(PA_BASE + 26, "PA26")
+#define SUNXI_PINCTRL_PIN_PA27 PINCTRL_PIN(PA_BASE + 27, "PA27")
+#define SUNXI_PINCTRL_PIN_PA28 PINCTRL_PIN(PA_BASE + 28, "PA28")
+#define SUNXI_PINCTRL_PIN_PA29 PINCTRL_PIN(PA_BASE + 29, "PA29")
+#define SUNXI_PINCTRL_PIN_PA30 PINCTRL_PIN(PA_BASE + 30, "PA30")
+#define SUNXI_PINCTRL_PIN_PA31 PINCTRL_PIN(PA_BASE + 31, "PA31")
+
+#define SUNXI_PINCTRL_PIN_PB0 PINCTRL_PIN(PB_BASE + 0, "PB0")
+#define SUNXI_PINCTRL_PIN_PB1 PINCTRL_PIN(PB_BASE + 1, "PB1")
+#define SUNXI_PINCTRL_PIN_PB2 PINCTRL_PIN(PB_BASE + 2, "PB2")
+#define SUNXI_PINCTRL_PIN_PB3 PINCTRL_PIN(PB_BASE + 3, "PB3")
+#define SUNXI_PINCTRL_PIN_PB4 PINCTRL_PIN(PB_BASE + 4, "PB4")
+#define SUNXI_PINCTRL_PIN_PB5 PINCTRL_PIN(PB_BASE + 5, "PB5")
+#define SUNXI_PINCTRL_PIN_PB6 PINCTRL_PIN(PB_BASE + 6, "PB6")
+#define SUNXI_PINCTRL_PIN_PB7 PINCTRL_PIN(PB_BASE + 7, "PB7")
+#define SUNXI_PINCTRL_PIN_PB8 PINCTRL_PIN(PB_BASE + 8, "PB8")
+#define SUNXI_PINCTRL_PIN_PB9 PINCTRL_PIN(PB_BASE + 9, "PB9")
+#define SUNXI_PINCTRL_PIN_PB10 PINCTRL_PIN(PB_BASE + 10, "PB10")
+#define SUNXI_PINCTRL_PIN_PB11 PINCTRL_PIN(PB_BASE + 11, "PB11")
+#define SUNXI_PINCTRL_PIN_PB12 PINCTRL_PIN(PB_BASE + 12, "PB12")
+#define SUNXI_PINCTRL_PIN_PB13 PINCTRL_PIN(PB_BASE + 13, "PB13")
+#define SUNXI_PINCTRL_PIN_PB14 PINCTRL_PIN(PB_BASE + 14, "PB14")
+#define SUNXI_PINCTRL_PIN_PB15 PINCTRL_PIN(PB_BASE + 15, "PB15")
+#define SUNXI_PINCTRL_PIN_PB16 PINCTRL_PIN(PB_BASE + 16, "PB16")
+#define SUNXI_PINCTRL_PIN_PB17 PINCTRL_PIN(PB_BASE + 17, "PB17")
+#define SUNXI_PINCTRL_PIN_PB18 PINCTRL_PIN(PB_BASE + 18, "PB18")
+#define SUNXI_PINCTRL_PIN_PB19 PINCTRL_PIN(PB_BASE + 19, "PB19")
+#define SUNXI_PINCTRL_PIN_PB20 PINCTRL_PIN(PB_BASE + 20, "PB20")
+#define SUNXI_PINCTRL_PIN_PB21 PINCTRL_PIN(PB_BASE + 21, "PB21")
+#define SUNXI_PINCTRL_PIN_PB22 PINCTRL_PIN(PB_BASE + 22, "PB22")
+#define SUNXI_PINCTRL_PIN_PB23 PINCTRL_PIN(PB_BASE + 23, "PB23")
+#define SUNXI_PINCTRL_PIN_PB24 PINCTRL_PIN(PB_BASE + 24, "PB24")
+#define SUNXI_PINCTRL_PIN_PB25 PINCTRL_PIN(PB_BASE + 25, "PB25")
+#define SUNXI_PINCTRL_PIN_PB26 PINCTRL_PIN(PB_BASE + 26, "PB26")
+#define SUNXI_PINCTRL_PIN_PB27 PINCTRL_PIN(PB_BASE + 27, "PB27")
+#define SUNXI_PINCTRL_PIN_PB28 PINCTRL_PIN(PB_BASE + 28, "PB28")
+#define SUNXI_PINCTRL_PIN_PB29 PINCTRL_PIN(PB_BASE + 29, "PB29")
+#define SUNXI_PINCTRL_PIN_PB30 PINCTRL_PIN(PB_BASE + 30, "PB30")
+#define SUNXI_PINCTRL_PIN_PB31 PINCTRL_PIN(PB_BASE + 31, "PB31")
+
+#define SUNXI_PINCTRL_PIN_PC0 PINCTRL_PIN(PC_BASE + 0, "PC0")
+#define SUNXI_PINCTRL_PIN_PC1 PINCTRL_PIN(PC_BASE + 1, "PC1")
+#define SUNXI_PINCTRL_PIN_PC2 PINCTRL_PIN(PC_BASE + 2, "PC2")
+#define SUNXI_PINCTRL_PIN_PC3 PINCTRL_PIN(PC_BASE + 3, "PC3")
+#define SUNXI_PINCTRL_PIN_PC4 PINCTRL_PIN(PC_BASE + 4, "PC4")
+#define SUNXI_PINCTRL_PIN_PC5 PINCTRL_PIN(PC_BASE + 5, "PC5")
+#define SUNXI_PINCTRL_PIN_PC6 PINCTRL_PIN(PC_BASE + 6, "PC6")
+#define SUNXI_PINCTRL_PIN_PC7 PINCTRL_PIN(PC_BASE + 7, "PC7")
+#define SUNXI_PINCTRL_PIN_PC8 PINCTRL_PIN(PC_BASE + 8, "PC8")
+#define SUNXI_PINCTRL_PIN_PC9 PINCTRL_PIN(PC_BASE + 9, "PC9")
+#define SUNXI_PINCTRL_PIN_PC10 PINCTRL_PIN(PC_BASE + 10, "PC10")
+#define SUNXI_PINCTRL_PIN_PC11 PINCTRL_PIN(PC_BASE + 11, "PC11")
+#define SUNXI_PINCTRL_PIN_PC12 PINCTRL_PIN(PC_BASE + 12, "PC12")
+#define SUNXI_PINCTRL_PIN_PC13 PINCTRL_PIN(PC_BASE + 13, "PC13")
+#define SUNXI_PINCTRL_PIN_PC14 PINCTRL_PIN(PC_BASE + 14, "PC14")
+#define SUNXI_PINCTRL_PIN_PC15 PINCTRL_PIN(PC_BASE + 15, "PC15")
+#define SUNXI_PINCTRL_PIN_PC16 PINCTRL_PIN(PC_BASE + 16, "PC16")
+#define SUNXI_PINCTRL_PIN_PC17 PINCTRL_PIN(PC_BASE + 17, "PC17")
+#define SUNXI_PINCTRL_PIN_PC18 PINCTRL_PIN(PC_BASE + 18, "PC18")
+#define SUNXI_PINCTRL_PIN_PC19 PINCTRL_PIN(PC_BASE + 19, "PC19")
+#define SUNXI_PINCTRL_PIN_PC20 PINCTRL_PIN(PC_BASE + 20, "PC20")
+#define SUNXI_PINCTRL_PIN_PC21 PINCTRL_PIN(PC_BASE + 21, "PC21")
+#define SUNXI_PINCTRL_PIN_PC22 PINCTRL_PIN(PC_BASE + 22, "PC22")
+#define SUNXI_PINCTRL_PIN_PC23 PINCTRL_PIN(PC_BASE + 23, "PC23")
+#define SUNXI_PINCTRL_PIN_PC24 PINCTRL_PIN(PC_BASE + 24, "PC24")
+#define SUNXI_PINCTRL_PIN_PC25 PINCTRL_PIN(PC_BASE + 25, "PC25")
+#define SUNXI_PINCTRL_PIN_PC26 PINCTRL_PIN(PC_BASE + 26, "PC26")
+#define SUNXI_PINCTRL_PIN_PC27 PINCTRL_PIN(PC_BASE + 27, "PC27")
+#define SUNXI_PINCTRL_PIN_PC28 PINCTRL_PIN(PC_BASE + 28, "PC28")
+#define SUNXI_PINCTRL_PIN_PC29 PINCTRL_PIN(PC_BASE + 29, "PC29")
+#define SUNXI_PINCTRL_PIN_PC30 PINCTRL_PIN(PC_BASE + 30, "PC30")
+#define SUNXI_PINCTRL_PIN_PC31 PINCTRL_PIN(PC_BASE + 31, "PC31")
+
+#define SUNXI_PINCTRL_PIN_PD0 PINCTRL_PIN(PD_BASE + 0, "PD0")
+#define SUNXI_PINCTRL_PIN_PD1 PINCTRL_PIN(PD_BASE + 1, "PD1")
+#define SUNXI_PINCTRL_PIN_PD2 PINCTRL_PIN(PD_BASE + 2, "PD2")
+#define SUNXI_PINCTRL_PIN_PD3 PINCTRL_PIN(PD_BASE + 3, "PD3")
+#define SUNXI_PINCTRL_PIN_PD4 PINCTRL_PIN(PD_BASE + 4, "PD4")
+#define SUNXI_PINCTRL_PIN_PD5 PINCTRL_PIN(PD_BASE + 5, "PD5")
+#define SUNXI_PINCTRL_PIN_PD6 PINCTRL_PIN(PD_BASE + 6, "PD6")
+#define SUNXI_PINCTRL_PIN_PD7 PINCTRL_PIN(PD_BASE + 7, "PD7")
+#define SUNXI_PINCTRL_PIN_PD8 PINCTRL_PIN(PD_BASE + 8, "PD8")
+#define SUNXI_PINCTRL_PIN_PD9 PINCTRL_PIN(PD_BASE + 9, "PD9")
+#define SUNXI_PINCTRL_PIN_PD10 PINCTRL_PIN(PD_BASE + 10, "PD10")
+#define SUNXI_PINCTRL_PIN_PD11 PINCTRL_PIN(PD_BASE + 11, "PD11")
+#define SUNXI_PINCTRL_PIN_PD12 PINCTRL_PIN(PD_BASE + 12, "PD12")
+#define SUNXI_PINCTRL_PIN_PD13 PINCTRL_PIN(PD_BASE + 13, "PD13")
+#define SUNXI_PINCTRL_PIN_PD14 PINCTRL_PIN(PD_BASE + 14, "PD14")
+#define SUNXI_PINCTRL_PIN_PD15 PINCTRL_PIN(PD_BASE + 15, "PD15")
+#define SUNXI_PINCTRL_PIN_PD16 PINCTRL_PIN(PD_BASE + 16, "PD16")
+#define SUNXI_PINCTRL_PIN_PD17 PINCTRL_PIN(PD_BASE + 17, "PD17")
+#define SUNXI_PINCTRL_PIN_PD18 PINCTRL_PIN(PD_BASE + 18, "PD18")
+#define SUNXI_PINCTRL_PIN_PD19 PINCTRL_PIN(PD_BASE + 19, "PD19")
+#define SUNXI_PINCTRL_PIN_PD20 PINCTRL_PIN(PD_BASE + 20, "PD20")
+#define SUNXI_PINCTRL_PIN_PD21 PINCTRL_PIN(PD_BASE + 21, "PD21")
+#define SUNXI_PINCTRL_PIN_PD22 PINCTRL_PIN(PD_BASE + 22, "PD22")
+#define SUNXI_PINCTRL_PIN_PD23 PINCTRL_PIN(PD_BASE + 23, "PD23")
+#define SUNXI_PINCTRL_PIN_PD24 PINCTRL_PIN(PD_BASE + 24, "PD24")
+#define SUNXI_PINCTRL_PIN_PD25 PINCTRL_PIN(PD_BASE + 25, "PD25")
+#define SUNXI_PINCTRL_PIN_PD26 PINCTRL_PIN(PD_BASE + 26, "PD26")
+#define SUNXI_PINCTRL_PIN_PD27 PINCTRL_PIN(PD_BASE + 27, "PD27")
+#define SUNXI_PINCTRL_PIN_PD28 PINCTRL_PIN(PD_BASE + 28, "PD28")
+#define SUNXI_PINCTRL_PIN_PD29 PINCTRL_PIN(PD_BASE + 29, "PD29")
+#define SUNXI_PINCTRL_PIN_PD30 PINCTRL_PIN(PD_BASE + 30, "PD30")
+#define SUNXI_PINCTRL_PIN_PD31 PINCTRL_PIN(PD_BASE + 31, "PD31")
+
+#define SUNXI_PINCTRL_PIN_PE0 PINCTRL_PIN(PE_BASE + 0, "PE0")
+#define SUNXI_PINCTRL_PIN_PE1 PINCTRL_PIN(PE_BASE + 1, "PE1")
+#define SUNXI_PINCTRL_PIN_PE2 PINCTRL_PIN(PE_BASE + 2, "PE2")
+#define SUNXI_PINCTRL_PIN_PE3 PINCTRL_PIN(PE_BASE + 3, "PE3")
+#define SUNXI_PINCTRL_PIN_PE4 PINCTRL_PIN(PE_BASE + 4, "PE4")
+#define SUNXI_PINCTRL_PIN_PE5 PINCTRL_PIN(PE_BASE + 5, "PE5")
+#define SUNXI_PINCTRL_PIN_PE6 PINCTRL_PIN(PE_BASE + 6, "PE6")
+#define SUNXI_PINCTRL_PIN_PE7 PINCTRL_PIN(PE_BASE + 7, "PE7")
+#define SUNXI_PINCTRL_PIN_PE8 PINCTRL_PIN(PE_BASE + 8, "PE8")
+#define SUNXI_PINCTRL_PIN_PE9 PINCTRL_PIN(PE_BASE + 9, "PE9")
+#define SUNXI_PINCTRL_PIN_PE10 PINCTRL_PIN(PE_BASE + 10, "PE10")
+#define SUNXI_PINCTRL_PIN_PE11 PINCTRL_PIN(PE_BASE + 11, "PE11")
+#define SUNXI_PINCTRL_PIN_PE12 PINCTRL_PIN(PE_BASE + 12, "PE12")
+#define SUNXI_PINCTRL_PIN_PE13 PINCTRL_PIN(PE_BASE + 13, "PE13")
+#define SUNXI_PINCTRL_PIN_PE14 PINCTRL_PIN(PE_BASE + 14, "PE14")
+#define SUNXI_PINCTRL_PIN_PE15 PINCTRL_PIN(PE_BASE + 15, "PE15")
+#define SUNXI_PINCTRL_PIN_PE16 PINCTRL_PIN(PE_BASE + 16, "PE16")
+#define SUNXI_PINCTRL_PIN_PE17 PINCTRL_PIN(PE_BASE + 17, "PE17")
+#define SUNXI_PINCTRL_PIN_PE18 PINCTRL_PIN(PE_BASE + 18, "PE18")
+#define SUNXI_PINCTRL_PIN_PE19 PINCTRL_PIN(PE_BASE + 19, "PE19")
+#define SUNXI_PINCTRL_PIN_PE20 PINCTRL_PIN(PE_BASE + 20, "PE20")
+#define SUNXI_PINCTRL_PIN_PE21 PINCTRL_PIN(PE_BASE + 21, "PE21")
+#define SUNXI_PINCTRL_PIN_PE22 PINCTRL_PIN(PE_BASE + 22, "PE22")
+#define SUNXI_PINCTRL_PIN_PE23 PINCTRL_PIN(PE_BASE + 23, "PE23")
+#define SUNXI_PINCTRL_PIN_PE24 PINCTRL_PIN(PE_BASE + 24, "PE24")
+#define SUNXI_PINCTRL_PIN_PE25 PINCTRL_PIN(PE_BASE + 25, "PE25")
+#define SUNXI_PINCTRL_PIN_PE26 PINCTRL_PIN(PE_BASE + 26, "PE26")
+#define SUNXI_PINCTRL_PIN_PE27 PINCTRL_PIN(PE_BASE + 27, "PE27")
+#define SUNXI_PINCTRL_PIN_PE28 PINCTRL_PIN(PE_BASE + 28, "PE28")
+#define SUNXI_PINCTRL_PIN_PE29 PINCTRL_PIN(PE_BASE + 29, "PE29")
+#define SUNXI_PINCTRL_PIN_PE30 PINCTRL_PIN(PE_BASE + 30, "PE30")
+#define SUNXI_PINCTRL_PIN_PE31 PINCTRL_PIN(PE_BASE + 31, "PE31")
+
+#define SUNXI_PINCTRL_PIN_PF0 PINCTRL_PIN(PF_BASE + 0, "PF0")
+#define SUNXI_PINCTRL_PIN_PF1 PINCTRL_PIN(PF_BASE + 1, "PF1")
+#define SUNXI_PINCTRL_PIN_PF2 PINCTRL_PIN(PF_BASE + 2, "PF2")
+#define SUNXI_PINCTRL_PIN_PF3 PINCTRL_PIN(PF_BASE + 3, "PF3")
+#define SUNXI_PINCTRL_PIN_PF4 PINCTRL_PIN(PF_BASE + 4, "PF4")
+#define SUNXI_PINCTRL_PIN_PF5 PINCTRL_PIN(PF_BASE + 5, "PF5")
+#define SUNXI_PINCTRL_PIN_PF6 PINCTRL_PIN(PF_BASE + 6, "PF6")
+#define SUNXI_PINCTRL_PIN_PF7 PINCTRL_PIN(PF_BASE + 7, "PF7")
+#define SUNXI_PINCTRL_PIN_PF8 PINCTRL_PIN(PF_BASE + 8, "PF8")
+#define SUNXI_PINCTRL_PIN_PF9 PINCTRL_PIN(PF_BASE + 9, "PF9")
+#define SUNXI_PINCTRL_PIN_PF10 PINCTRL_PIN(PF_BASE + 10, "PF10")
+#define SUNXI_PINCTRL_PIN_PF11 PINCTRL_PIN(PF_BASE + 11, "PF11")
+#define SUNXI_PINCTRL_PIN_PF12 PINCTRL_PIN(PF_BASE + 12, "PF12")
+#define SUNXI_PINCTRL_PIN_PF13 PINCTRL_PIN(PF_BASE + 13, "PF13")
+#define SUNXI_PINCTRL_PIN_PF14 PINCTRL_PIN(PF_BASE + 14, "PF14")
+#define SUNXI_PINCTRL_PIN_PF15 PINCTRL_PIN(PF_BASE + 15, "PF15")
+#define SUNXI_PINCTRL_PIN_PF16 PINCTRL_PIN(PF_BASE + 16, "PF16")
+#define SUNXI_PINCTRL_PIN_PF17 PINCTRL_PIN(PF_BASE + 17, "PF17")
+#define SUNXI_PINCTRL_PIN_PF18 PINCTRL_PIN(PF_BASE + 18, "PF18")
+#define SUNXI_PINCTRL_PIN_PF19 PINCTRL_PIN(PF_BASE + 19, "PF19")
+#define SUNXI_PINCTRL_PIN_PF20 PINCTRL_PIN(PF_BASE + 20, "PF20")
+#define SUNXI_PINCTRL_PIN_PF21 PINCTRL_PIN(PF_BASE + 21, "PF21")
+#define SUNXI_PINCTRL_PIN_PF22 PINCTRL_PIN(PF_BASE + 22, "PF22")
+#define SUNXI_PINCTRL_PIN_PF23 PINCTRL_PIN(PF_BASE + 23, "PF23")
+#define SUNXI_PINCTRL_PIN_PF24 PINCTRL_PIN(PF_BASE + 24, "PF24")
+#define SUNXI_PINCTRL_PIN_PF25 PINCTRL_PIN(PF_BASE + 25, "PF25")
+#define SUNXI_PINCTRL_PIN_PF26 PINCTRL_PIN(PF_BASE + 26, "PF26")
+#define SUNXI_PINCTRL_PIN_PF27 PINCTRL_PIN(PF_BASE + 27, "PF27")
+#define SUNXI_PINCTRL_PIN_PF28 PINCTRL_PIN(PF_BASE + 28, "PF28")
+#define SUNXI_PINCTRL_PIN_PF29 PINCTRL_PIN(PF_BASE + 29, "PF29")
+#define SUNXI_PINCTRL_PIN_PF30 PINCTRL_PIN(PF_BASE + 30, "PF30")
+#define SUNXI_PINCTRL_PIN_PF31 PINCTRL_PIN(PF_BASE + 31, "PF31")
+
+#define SUNXI_PINCTRL_PIN_PG0 PINCTRL_PIN(PG_BASE + 0, "PG0")
+#define SUNXI_PINCTRL_PIN_PG1 PINCTRL_PIN(PG_BASE + 1, "PG1")
+#define SUNXI_PINCTRL_PIN_PG2 PINCTRL_PIN(PG_BASE + 2, "PG2")
+#define SUNXI_PINCTRL_PIN_PG3 PINCTRL_PIN(PG_BASE + 3, "PG3")
+#define SUNXI_PINCTRL_PIN_PG4 PINCTRL_PIN(PG_BASE + 4, "PG4")
+#define SUNXI_PINCTRL_PIN_PG5 PINCTRL_PIN(PG_BASE + 5, "PG5")
+#define SUNXI_PINCTRL_PIN_PG6 PINCTRL_PIN(PG_BASE + 6, "PG6")
+#define SUNXI_PINCTRL_PIN_PG7 PINCTRL_PIN(PG_BASE + 7, "PG7")
+#define SUNXI_PINCTRL_PIN_PG8 PINCTRL_PIN(PG_BASE + 8, "PG8")
+#define SUNXI_PINCTRL_PIN_PG9 PINCTRL_PIN(PG_BASE + 9, "PG9")
+#define SUNXI_PINCTRL_PIN_PG10 PINCTRL_PIN(PG_BASE + 10, "PG10")
+#define SUNXI_PINCTRL_PIN_PG11 PINCTRL_PIN(PG_BASE + 11, "PG11")
+#define SUNXI_PINCTRL_PIN_PG12 PINCTRL_PIN(PG_BASE + 12, "PG12")
+#define SUNXI_PINCTRL_PIN_PG13 PINCTRL_PIN(PG_BASE + 13, "PG13")
+#define SUNXI_PINCTRL_PIN_PG14 PINCTRL_PIN(PG_BASE + 14, "PG14")
+#define SUNXI_PINCTRL_PIN_PG15 PINCTRL_PIN(PG_BASE + 15, "PG15")
+#define SUNXI_PINCTRL_PIN_PG16 PINCTRL_PIN(PG_BASE + 16, "PG16")
+#define SUNXI_PINCTRL_PIN_PG17 PINCTRL_PIN(PG_BASE + 17, "PG17")
+#define SUNXI_PINCTRL_PIN_PG18 PINCTRL_PIN(PG_BASE + 18, "PG18")
+#define SUNXI_PINCTRL_PIN_PG19 PINCTRL_PIN(PG_BASE + 19, "PG19")
+#define SUNXI_PINCTRL_PIN_PG20 PINCTRL_PIN(PG_BASE + 20, "PG20")
+#define SUNXI_PINCTRL_PIN_PG21 PINCTRL_PIN(PG_BASE + 21, "PG21")
+#define SUNXI_PINCTRL_PIN_PG22 PINCTRL_PIN(PG_BASE + 22, "PG22")
+#define SUNXI_PINCTRL_PIN_PG23 PINCTRL_PIN(PG_BASE + 23, "PG23")
+#define SUNXI_PINCTRL_PIN_PG24 PINCTRL_PIN(PG_BASE + 24, "PG24")
+#define SUNXI_PINCTRL_PIN_PG25 PINCTRL_PIN(PG_BASE + 25, "PG25")
+#define SUNXI_PINCTRL_PIN_PG26 PINCTRL_PIN(PG_BASE + 26, "PG26")
+#define SUNXI_PINCTRL_PIN_PG27 PINCTRL_PIN(PG_BASE + 27, "PG27")
+#define SUNXI_PINCTRL_PIN_PG28 PINCTRL_PIN(PG_BASE + 28, "PG28")
+#define SUNXI_PINCTRL_PIN_PG29 PINCTRL_PIN(PG_BASE + 29, "PG29")
+#define SUNXI_PINCTRL_PIN_PG30 PINCTRL_PIN(PG_BASE + 30, "PG30")
+#define SUNXI_PINCTRL_PIN_PG31 PINCTRL_PIN(PG_BASE + 31, "PG31")
+
+#define SUNXI_PINCTRL_PIN_PH0 PINCTRL_PIN(PH_BASE + 0, "PH0")
+#define SUNXI_PINCTRL_PIN_PH1 PINCTRL_PIN(PH_BASE + 1, "PH1")
+#define SUNXI_PINCTRL_PIN_PH2 PINCTRL_PIN(PH_BASE + 2, "PH2")
+#define SUNXI_PINCTRL_PIN_PH3 PINCTRL_PIN(PH_BASE + 3, "PH3")
+#define SUNXI_PINCTRL_PIN_PH4 PINCTRL_PIN(PH_BASE + 4, "PH4")
+#define SUNXI_PINCTRL_PIN_PH5 PINCTRL_PIN(PH_BASE + 5, "PH5")
+#define SUNXI_PINCTRL_PIN_PH6 PINCTRL_PIN(PH_BASE + 6, "PH6")
+#define SUNXI_PINCTRL_PIN_PH7 PINCTRL_PIN(PH_BASE + 7, "PH7")
+#define SUNXI_PINCTRL_PIN_PH8 PINCTRL_PIN(PH_BASE + 8, "PH8")
+#define SUNXI_PINCTRL_PIN_PH9 PINCTRL_PIN(PH_BASE + 9, "PH9")
+#define SUNXI_PINCTRL_PIN_PH10 PINCTRL_PIN(PH_BASE + 10, "PH10")
+#define SUNXI_PINCTRL_PIN_PH11 PINCTRL_PIN(PH_BASE + 11, "PH11")
+#define SUNXI_PINCTRL_PIN_PH12 PINCTRL_PIN(PH_BASE + 12, "PH12")
+#define SUNXI_PINCTRL_PIN_PH13 PINCTRL_PIN(PH_BASE + 13, "PH13")
+#define SUNXI_PINCTRL_PIN_PH14 PINCTRL_PIN(PH_BASE + 14, "PH14")
+#define SUNXI_PINCTRL_PIN_PH15 PINCTRL_PIN(PH_BASE + 15, "PH15")
+#define SUNXI_PINCTRL_PIN_PH16 PINCTRL_PIN(PH_BASE + 16, "PH16")
+#define SUNXI_PINCTRL_PIN_PH17 PINCTRL_PIN(PH_BASE + 17, "PH17")
+#define SUNXI_PINCTRL_PIN_PH18 PINCTRL_PIN(PH_BASE + 18, "PH18")
+#define SUNXI_PINCTRL_PIN_PH19 PINCTRL_PIN(PH_BASE + 19, "PH19")
+#define SUNXI_PINCTRL_PIN_PH20 PINCTRL_PIN(PH_BASE + 20, "PH20")
+#define SUNXI_PINCTRL_PIN_PH21 PINCTRL_PIN(PH_BASE + 21, "PH21")
+#define SUNXI_PINCTRL_PIN_PH22 PINCTRL_PIN(PH_BASE + 22, "PH22")
+#define SUNXI_PINCTRL_PIN_PH23 PINCTRL_PIN(PH_BASE + 23, "PH23")
+#define SUNXI_PINCTRL_PIN_PH24 PINCTRL_PIN(PH_BASE + 24, "PH24")
+#define SUNXI_PINCTRL_PIN_PH25 PINCTRL_PIN(PH_BASE + 25, "PH25")
+#define SUNXI_PINCTRL_PIN_PH26 PINCTRL_PIN(PH_BASE + 26, "PH26")
+#define SUNXI_PINCTRL_PIN_PH27 PINCTRL_PIN(PH_BASE + 27, "PH27")
+#define SUNXI_PINCTRL_PIN_PH28 PINCTRL_PIN(PH_BASE + 28, "PH28")
+#define SUNXI_PINCTRL_PIN_PH29 PINCTRL_PIN(PH_BASE + 29, "PH29")
+#define SUNXI_PINCTRL_PIN_PH30 PINCTRL_PIN(PH_BASE + 30, "PH30")
+#define SUNXI_PINCTRL_PIN_PH31 PINCTRL_PIN(PH_BASE + 31, "PH31")
+
+#define SUNXI_PINCTRL_PIN_PI0 PINCTRL_PIN(PI_BASE + 0, "PI0")
+#define SUNXI_PINCTRL_PIN_PI1 PINCTRL_PIN(PI_BASE + 1, "PI1")
+#define SUNXI_PINCTRL_PIN_PI2 PINCTRL_PIN(PI_BASE + 2, "PI2")
+#define SUNXI_PINCTRL_PIN_PI3 PINCTRL_PIN(PI_BASE + 3, "PI3")
+#define SUNXI_PINCTRL_PIN_PI4 PINCTRL_PIN(PI_BASE + 4, "PI4")
+#define SUNXI_PINCTRL_PIN_PI5 PINCTRL_PIN(PI_BASE + 5, "PI5")
+#define SUNXI_PINCTRL_PIN_PI6 PINCTRL_PIN(PI_BASE + 6, "PI6")
+#define SUNXI_PINCTRL_PIN_PI7 PINCTRL_PIN(PI_BASE + 7, "PI7")
+#define SUNXI_PINCTRL_PIN_PI8 PINCTRL_PIN(PI_BASE + 8, "PI8")
+#define SUNXI_PINCTRL_PIN_PI9 PINCTRL_PIN(PI_BASE + 9, "PI9")
+#define SUNXI_PINCTRL_PIN_PI10 PINCTRL_PIN(PI_BASE + 10, "PI10")
+#define SUNXI_PINCTRL_PIN_PI11 PINCTRL_PIN(PI_BASE + 11, "PI11")
+#define SUNXI_PINCTRL_PIN_PI12 PINCTRL_PIN(PI_BASE + 12, "PI12")
+#define SUNXI_PINCTRL_PIN_PI13 PINCTRL_PIN(PI_BASE + 13, "PI13")
+#define SUNXI_PINCTRL_PIN_PI14 PINCTRL_PIN(PI_BASE + 14, "PI14")
+#define SUNXI_PINCTRL_PIN_PI15 PINCTRL_PIN(PI_BASE + 15, "PI15")
+#define SUNXI_PINCTRL_PIN_PI16 PINCTRL_PIN(PI_BASE + 16, "PI16")
+#define SUNXI_PINCTRL_PIN_PI17 PINCTRL_PIN(PI_BASE + 17, "PI17")
+#define SUNXI_PINCTRL_PIN_PI18 PINCTRL_PIN(PI_BASE + 18, "PI18")
+#define SUNXI_PINCTRL_PIN_PI19 PINCTRL_PIN(PI_BASE + 19, "PI19")
+#define SUNXI_PINCTRL_PIN_PI20 PINCTRL_PIN(PI_BASE + 20, "PI20")
+#define SUNXI_PINCTRL_PIN_PI21 PINCTRL_PIN(PI_BASE + 21, "PI21")
+#define SUNXI_PINCTRL_PIN_PI22 PINCTRL_PIN(PI_BASE + 22, "PI22")
+#define SUNXI_PINCTRL_PIN_PI23 PINCTRL_PIN(PI_BASE + 23, "PI23")
+#define SUNXI_PINCTRL_PIN_PI24 PINCTRL_PIN(PI_BASE + 24, "PI24")
+#define SUNXI_PINCTRL_PIN_PI25 PINCTRL_PIN(PI_BASE + 25, "PI25")
+#define SUNXI_PINCTRL_PIN_PI26 PINCTRL_PIN(PI_BASE + 26, "PI26")
+#define SUNXI_PINCTRL_PIN_PI27 PINCTRL_PIN(PI_BASE + 27, "PI27")
+#define SUNXI_PINCTRL_PIN_PI28 PINCTRL_PIN(PI_BASE + 28, "PI28")
+#define SUNXI_PINCTRL_PIN_PI29 PINCTRL_PIN(PI_BASE + 29, "PI29")
+#define SUNXI_PINCTRL_PIN_PI30 PINCTRL_PIN(PI_BASE + 30, "PI30")
+#define SUNXI_PINCTRL_PIN_PI31 PINCTRL_PIN(PI_BASE + 31, "PI31")
+
+#define SUNXI_PIN_NAME_MAX_LEN 5
+
+#define BANK_MEM_SIZE 0x24
+#define MUX_REGS_OFFSET 0x0
+#define DATA_REGS_OFFSET 0x10
+#define DLEVEL_REGS_OFFSET 0x14
+#define PULL_REGS_OFFSET 0x1c
+
+#define PINS_PER_BANK 32
+#define MUX_PINS_PER_REG 8
+#define MUX_PINS_BITS 4
+#define MUX_PINS_MASK 0x0f
+#define DATA_PINS_PER_REG 32
+#define DATA_PINS_BITS 1
+#define DATA_PINS_MASK 0x01
+#define DLEVEL_PINS_PER_REG 16
+#define DLEVEL_PINS_BITS 2
+#define DLEVEL_PINS_MASK 0x03
+#define PULL_PINS_PER_REG 16
+#define PULL_PINS_BITS 2
+#define PULL_PINS_MASK 0x03
+
+struct sunxi_desc_function {
+ const char *name;
+ u8 muxval;
+};
+
+struct sunxi_desc_pin {
+ struct pinctrl_pin_desc pin;
+ struct sunxi_desc_function *functions;
+};
+
+struct sunxi_pinctrl_desc {
+ const struct sunxi_desc_pin *pins;
+ int npins;
+ struct pinctrl_gpio_range *ranges;
+ int nranges;
+};
+
+struct sunxi_pinctrl_function {
+ const char *name;
+ const char **groups;
+ unsigned ngroups;
+};
+
+struct sunxi_pinctrl_group {
+ const char *name;
+ unsigned long config;
+ unsigned pin;
+};
+
+struct sunxi_pinctrl {
+ void __iomem *membase;
+ struct gpio_chip *chip;
+ struct sunxi_pinctrl_desc *desc;
+ struct device *dev;
+ struct sunxi_pinctrl_function *functions;
+ unsigned nfunctions;
+ struct sunxi_pinctrl_group *groups;
+ unsigned ngroups;
+ struct pinctrl_dev *pctl_dev;
+};
+
+#define SUNXI_PIN(_pin, ...) \
+ { \
+ .pin = _pin, \
+ .functions = (struct sunxi_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define SUNXI_FUNCTION(_val, _name) \
+ { \
+ .name = _name, \
+ .muxval = _val, \
+ }
+
+/*
+ * The sunXi PIO registers are organized as is:
+ * 0x00 - 0x0c Muxing values.
+ * 8 pins per register, each pin having a 4bits value
+ * 0x10 Pin values
+ * 32 bits per register, each pin corresponding to one bit
+ * 0x14 - 0x18 Drive level
+ * 16 pins per register, each pin having a 2bits value
+ * 0x1c - 0x20 Pull-Up values
+ * 16 pins per register, each pin having a 2bits value
+ *
+ * This is for the first bank. Each bank will have the same layout,
+ * with an offset being a multiple of 0x24.
+ *
+ * The following functions calculate from the pin number the register
+ * and the bit offset that we should access.
+ */
+static inline u32 sunxi_mux_reg(u16 pin)
+{
+ u8 bank = pin / PINS_PER_BANK;
+ u32 offset = bank * BANK_MEM_SIZE;
+ offset += MUX_REGS_OFFSET;
+ offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
+ return round_down(offset, 4);
+}
+
+static inline u32 sunxi_mux_offset(u16 pin)
+{
+ u32 pin_num = pin % MUX_PINS_PER_REG;
+ return pin_num * MUX_PINS_BITS;
+}
+
+static inline u32 sunxi_data_reg(u16 pin)
+{
+ u8 bank = pin / PINS_PER_BANK;
+ u32 offset = bank * BANK_MEM_SIZE;
+ offset += DATA_REGS_OFFSET;
+ offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
+ return round_down(offset, 4);
+}
+
+static inline u32 sunxi_data_offset(u16 pin)
+{
+ u32 pin_num = pin % DATA_PINS_PER_REG;
+ return pin_num * DATA_PINS_BITS;
+}
+
+static inline u32 sunxi_dlevel_reg(u16 pin)
+{
+ u8 bank = pin / PINS_PER_BANK;
+ u32 offset = bank * BANK_MEM_SIZE;
+ offset += DLEVEL_REGS_OFFSET;
+ offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
+ return round_down(offset, 4);
+}
+
+static inline u32 sunxi_dlevel_offset(u16 pin)
+{
+ u32 pin_num = pin % DLEVEL_PINS_PER_REG;
+ return pin_num * DLEVEL_PINS_BITS;
+}
+
+static inline u32 sunxi_pull_reg(u16 pin)
+{
+ u8 bank = pin / PINS_PER_BANK;
+ u32 offset = bank * BANK_MEM_SIZE;
+ offset += PULL_REGS_OFFSET;
+ offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
+ return round_down(offset, 4);
+}
+
+static inline u32 sunxi_pull_offset(u16 pin)
+{
+ u32 pin_num = pin % PULL_PINS_PER_REG;
+ return pin_num * PULL_PINS_BITS;
+}
+
+#endif /* __PINCTRL_SUNXI_H */
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index e356b0380fa7..f195d77a3572 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -201,6 +201,7 @@ static const struct cfg_param {
{"nvidia,open-drain", TEGRA_PINCONF_PARAM_OPEN_DRAIN},
{"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK},
{"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET},
+ {"nvidia,rcv-sel", TEGRA_PINCONF_PARAM_RCV_SEL},
{"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
{"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT},
{"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
@@ -208,6 +209,7 @@ static const struct cfg_param {
{"nvidia,pull-up-strength", TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH},
{"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
{"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
+ {"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
};
static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
@@ -450,6 +452,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*bit = g->ioreset_bit;
*width = 1;
break;
+ case TEGRA_PINCONF_PARAM_RCV_SEL:
+ *bank = g->rcv_sel_bank;
+ *reg = g->rcv_sel_reg;
+ *bit = g->rcv_sel_bit;
+ *width = 1;
+ break;
case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
@@ -492,6 +500,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*bit = g->slwr_bit;
*width = g->slwr_width;
break;
+ case TEGRA_PINCONF_PARAM_DRIVE_TYPE:
+ *bank = g->drvtype_bank;
+ *reg = g->drvtype_reg;
+ *bit = g->drvtype_bit;
+ *width = 2;
+ break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
@@ -687,7 +701,7 @@ static struct pinctrl_desc tegra_pinctrl_desc = {
.owner = THIS_MODULE,
};
-int __devinit tegra_pinctrl_probe(struct platform_device *pdev,
+int tegra_pinctrl_probe(struct platform_device *pdev,
const struct tegra_pinctrl_soc_data *soc_data)
{
struct tegra_pmx *pmx;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/pinctrl-tegra.h
index 62e380965c68..817f7061dc4c 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/pinctrl-tegra.h
@@ -30,6 +30,8 @@ enum tegra_pinconf_param {
/* argument: Boolean */
TEGRA_PINCONF_PARAM_IORESET,
/* argument: Boolean */
+ TEGRA_PINCONF_PARAM_RCV_SEL,
+ /* argument: Boolean */
TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE,
/* argument: Boolean */
TEGRA_PINCONF_PARAM_SCHMITT,
@@ -43,6 +45,8 @@ enum tegra_pinconf_param {
TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING,
/* argument: Integer, range is HW-dependant */
TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
+ /* argument: Integer, range is HW-dependant */
+ TEGRA_PINCONF_PARAM_DRIVE_TYPE,
};
enum tegra_pinconf_pull {
@@ -95,6 +99,9 @@ struct tegra_function {
* @ioreset_reg: IO reset register offset. -1 if unsupported.
* @ioreset_bank: IO reset register bank. 0 if unsupported.
* @ioreset_bit: IO reset register bit. 0 if unsupported.
+ * @rcv_sel_reg: Receiver select offset. -1 if unsupported.
+ * @rcv_sel_bank: Receiver select bank. 0 if unsupported.
+ * @rcv_sel_bit: Receiver select bit. 0 if unsupported.
* @drv_reg: Drive fields register offset. -1 if unsupported.
* This register contains the hsm, schmitt, lpmd, drvdn,
* drvup, slwr, and slwf parameters.
@@ -110,6 +117,9 @@ struct tegra_function {
* @slwr_width: Slew Rising field width. 0 if unsupported.
* @slwf_bit: Slew Falling register bit. 0 if unsupported.
* @slwf_width: Slew Falling field width. 0 if unsupported.
+ * @drvtype_reg: Drive type fields register offset. -1 if unsupported.
+ * @drvtype_bank: Drive type fields register bank. 0 if unsupported.
+ * @drvtype_bit: Drive type register bit. 0 if unsupported.
*
* A representation of a group of pins (possibly just one pin) in the Tegra
* pin controller. Each group allows some parameter or parameters to be
@@ -131,15 +141,19 @@ struct tegra_pingroup {
s16 odrain_reg;
s16 lock_reg;
s16 ioreset_reg;
+ s16 rcv_sel_reg;
s16 drv_reg;
+ s16 drvtype_reg;
u32 mux_bank:2;
u32 pupd_bank:2;
u32 tri_bank:2;
u32 einput_bank:2;
u32 odrain_bank:2;
u32 ioreset_bank:2;
+ u32 rcv_sel_bank:2;
u32 lock_bank:2;
u32 drv_bank:2;
+ u32 drvtype_bank:2;
u32 mux_bit:5;
u32 pupd_bit:5;
u32 tri_bit:5;
@@ -147,6 +161,7 @@ struct tegra_pingroup {
u32 odrain_bit:5;
u32 lock_bit:5;
u32 ioreset_bit:5;
+ u32 rcv_sel_bit:5;
u32 hsm_bit:5;
u32 schmitt_bit:5;
u32 lpmd_bit:5;
@@ -154,6 +169,7 @@ struct tegra_pingroup {
u32 drvup_bit:5;
u32 slwr_bit:5;
u32 slwf_bit:5;
+ u32 drvtype_bit:5;
u32 drvdn_width:6;
u32 drvup_width:6;
u32 slwr_width:6;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
new file mode 100644
index 000000000000..622c4854977e
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -0,0 +1,2769 @@
+/*
+ * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
+ *
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Arthur: Pritesh Raithatha <praithatha@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/*
+ * Most pins affected by the pinmux can also be GPIOs. Define these first.
+ * These must match how the GPIO driver names/numbers its pins.
+ */
+#define _GPIO(offset) (offset)
+
+#define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0)
+#define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1)
+#define TEGRA_PIN_DAP2_FS_PA2 _GPIO(2)
+#define TEGRA_PIN_DAP2_SCLK_PA3 _GPIO(3)
+#define TEGRA_PIN_DAP2_DIN_PA4 _GPIO(4)
+#define TEGRA_PIN_DAP2_DOUT_PA5 _GPIO(5)
+#define TEGRA_PIN_SDMMC3_CLK_PA6 _GPIO(6)
+#define TEGRA_PIN_SDMMC3_CMD_PA7 _GPIO(7)
+#define TEGRA_PIN_GMI_A17_PB0 _GPIO(8)
+#define TEGRA_PIN_GMI_A18_PB1 _GPIO(9)
+#define TEGRA_PIN_SDMMC3_DAT3_PB4 _GPIO(12)
+#define TEGRA_PIN_SDMMC3_DAT2_PB5 _GPIO(13)
+#define TEGRA_PIN_SDMMC3_DAT1_PB6 _GPIO(14)
+#define TEGRA_PIN_SDMMC3_DAT0_PB7 _GPIO(15)
+#define TEGRA_PIN_UART3_RTS_N_PC0 _GPIO(16)
+#define TEGRA_PIN_UART2_TXD_PC2 _GPIO(18)
+#define TEGRA_PIN_UART2_RXD_PC3 _GPIO(19)
+#define TEGRA_PIN_GEN1_I2C_SCL_PC4 _GPIO(20)
+#define TEGRA_PIN_GEN1_I2C_SDA_PC5 _GPIO(21)
+#define TEGRA_PIN_GMI_WP_N_PC7 _GPIO(23)
+#define TEGRA_PIN_GMI_AD0_PG0 _GPIO(48)
+#define TEGRA_PIN_GMI_AD1_PG1 _GPIO(49)
+#define TEGRA_PIN_GMI_AD2_PG2 _GPIO(50)
+#define TEGRA_PIN_GMI_AD3_PG3 _GPIO(51)
+#define TEGRA_PIN_GMI_AD4_PG4 _GPIO(52)
+#define TEGRA_PIN_GMI_AD5_PG5 _GPIO(53)
+#define TEGRA_PIN_GMI_AD6_PG6 _GPIO(54)
+#define TEGRA_PIN_GMI_AD7_PG7 _GPIO(55)
+#define TEGRA_PIN_GMI_AD8_PH0 _GPIO(56)
+#define TEGRA_PIN_GMI_AD9_PH1 _GPIO(57)
+#define TEGRA_PIN_GMI_AD10_PH2 _GPIO(58)
+#define TEGRA_PIN_GMI_AD11_PH3 _GPIO(59)
+#define TEGRA_PIN_GMI_AD12_PH4 _GPIO(60)
+#define TEGRA_PIN_GMI_AD13_PH5 _GPIO(61)
+#define TEGRA_PIN_GMI_AD14_PH6 _GPIO(62)
+#define TEGRA_PIN_GMI_AD15_PH7 _GPIO(63)
+#define TEGRA_PIN_GMI_WR_N_PI0 _GPIO(64)
+#define TEGRA_PIN_GMI_OE_N_PI1 _GPIO(65)
+#define TEGRA_PIN_GMI_CS6_N_PI3 _GPIO(67)
+#define TEGRA_PIN_GMI_RST_N_PI4 _GPIO(68)
+#define TEGRA_PIN_GMI_IORDY_PI5 _GPIO(69)
+#define TEGRA_PIN_GMI_CS7_N_PI6 _GPIO(70)
+#define TEGRA_PIN_GMI_WAIT_PI7 _GPIO(71)
+#define TEGRA_PIN_GMI_CS0_N_PJ0 _GPIO(72)
+#define TEGRA_PIN_GMI_CS1_N_PJ2 _GPIO(74)
+#define TEGRA_PIN_GMI_DQS_P_PJ3 _GPIO(75)
+#define TEGRA_PIN_UART2_CTS_N_PJ5 _GPIO(77)
+#define TEGRA_PIN_UART2_RTS_N_PJ6 _GPIO(78)
+#define TEGRA_PIN_GMI_A16_PJ7 _GPIO(79)
+#define TEGRA_PIN_GMI_ADV_N_PK0 _GPIO(80)
+#define TEGRA_PIN_GMI_CLK_PK1 _GPIO(81)
+#define TEGRA_PIN_GMI_CS4_N_PK2 _GPIO(82)
+#define TEGRA_PIN_GMI_CS2_N_PK3 _GPIO(83)
+#define TEGRA_PIN_GMI_CS3_N_PK4 _GPIO(84)
+#define TEGRA_PIN_SPDIF_OUT_PK5 _GPIO(85)
+#define TEGRA_PIN_SPDIF_IN_PK6 _GPIO(86)
+#define TEGRA_PIN_GMI_A19_PK7 _GPIO(87)
+#define TEGRA_PIN_DAP1_FS_PN0 _GPIO(104)
+#define TEGRA_PIN_DAP1_DIN_PN1 _GPIO(105)
+#define TEGRA_PIN_DAP1_DOUT_PN2 _GPIO(106)
+#define TEGRA_PIN_DAP1_SCLK_PN3 _GPIO(107)
+#define TEGRA_PIN_USB_VBUS_EN0_PN4 _GPIO(108)
+#define TEGRA_PIN_USB_VBUS_EN1_PN5 _GPIO(109)
+#define TEGRA_PIN_HDMI_INT_PN7 _GPIO(111)
+#define TEGRA_PIN_ULPI_DATA7_PO0 _GPIO(112)
+#define TEGRA_PIN_ULPI_DATA0_PO1 _GPIO(113)
+#define TEGRA_PIN_ULPI_DATA1_PO2 _GPIO(114)
+#define TEGRA_PIN_ULPI_DATA2_PO3 _GPIO(115)
+#define TEGRA_PIN_ULPI_DATA3_PO4 _GPIO(116)
+#define TEGRA_PIN_ULPI_DATA4_PO5 _GPIO(117)
+#define TEGRA_PIN_ULPI_DATA5_PO6 _GPIO(118)
+#define TEGRA_PIN_ULPI_DATA6_PO7 _GPIO(119)
+#define TEGRA_PIN_DAP3_FS_PP0 _GPIO(120)
+#define TEGRA_PIN_DAP3_DIN_PP1 _GPIO(121)
+#define TEGRA_PIN_DAP3_DOUT_PP2 _GPIO(122)
+#define TEGRA_PIN_DAP3_SCLK_PP3 _GPIO(123)
+#define TEGRA_PIN_DAP4_FS_PP4 _GPIO(124)
+#define TEGRA_PIN_DAP4_DIN_PP5 _GPIO(125)
+#define TEGRA_PIN_DAP4_DOUT_PP6 _GPIO(126)
+#define TEGRA_PIN_DAP4_SCLK_PP7 _GPIO(127)
+#define TEGRA_PIN_KB_COL0_PQ0 _GPIO(128)
+#define TEGRA_PIN_KB_COL1_PQ1 _GPIO(129)
+#define TEGRA_PIN_KB_COL2_PQ2 _GPIO(130)
+#define TEGRA_PIN_KB_COL3_PQ3 _GPIO(131)
+#define TEGRA_PIN_KB_COL4_PQ4 _GPIO(132)
+#define TEGRA_PIN_KB_COL5_PQ5 _GPIO(133)
+#define TEGRA_PIN_KB_COL6_PQ6 _GPIO(134)
+#define TEGRA_PIN_KB_COL7_PQ7 _GPIO(135)
+#define TEGRA_PIN_KB_ROW0_PR0 _GPIO(136)
+#define TEGRA_PIN_KB_ROW1_PR1 _GPIO(137)
+#define TEGRA_PIN_KB_ROW2_PR2 _GPIO(138)
+#define TEGRA_PIN_KB_ROW3_PR3 _GPIO(139)
+#define TEGRA_PIN_KB_ROW4_PR4 _GPIO(140)
+#define TEGRA_PIN_KB_ROW5_PR5 _GPIO(141)
+#define TEGRA_PIN_KB_ROW6_PR6 _GPIO(142)
+#define TEGRA_PIN_KB_ROW7_PR7 _GPIO(143)
+#define TEGRA_PIN_KB_ROW8_PS0 _GPIO(144)
+#define TEGRA_PIN_KB_ROW9_PS1 _GPIO(145)
+#define TEGRA_PIN_KB_ROW10_PS2 _GPIO(146)
+#define TEGRA_PIN_GEN2_I2C_SCL_PT5 _GPIO(157)
+#define TEGRA_PIN_GEN2_I2C_SDA_PT6 _GPIO(158)
+#define TEGRA_PIN_SDMMC4_CMD_PT7 _GPIO(159)
+#define TEGRA_PIN_PU0 _GPIO(160)
+#define TEGRA_PIN_PU1 _GPIO(161)
+#define TEGRA_PIN_PU2 _GPIO(162)
+#define TEGRA_PIN_PU3 _GPIO(163)
+#define TEGRA_PIN_PU4 _GPIO(164)
+#define TEGRA_PIN_PU5 _GPIO(165)
+#define TEGRA_PIN_PU6 _GPIO(166)
+#define TEGRA_PIN_PV0 _GPIO(168)
+#define TEGRA_PIN_PV1 _GPIO(169)
+#define TEGRA_PIN_SDMMC3_CD_N_PV2 _GPIO(170)
+#define TEGRA_PIN_SDMMC1_WP_N_PV3 _GPIO(171)
+#define TEGRA_PIN_DDC_SCL_PV4 _GPIO(172)
+#define TEGRA_PIN_DDC_SDA_PV5 _GPIO(173)
+#define TEGRA_PIN_GPIO_W2_AUD_PW2 _GPIO(178)
+#define TEGRA_PIN_GPIO_W3_AUD_PW3 _GPIO(179)
+#define TEGRA_PIN_CLK1_OUT_PW4 _GPIO(180)
+#define TEGRA_PIN_CLK2_OUT_PW5 _GPIO(181)
+#define TEGRA_PIN_UART3_TXD_PW6 _GPIO(182)
+#define TEGRA_PIN_UART3_RXD_PW7 _GPIO(183)
+#define TEGRA_PIN_DVFS_PWM_PX0 _GPIO(184)
+#define TEGRA_PIN_GPIO_X1_AUD_PX1 _GPIO(185)
+#define TEGRA_PIN_DVFS_CLK_PX2 _GPIO(186)
+#define TEGRA_PIN_GPIO_X3_AUD_PX3 _GPIO(187)
+#define TEGRA_PIN_GPIO_X4_AUD_PX4 _GPIO(188)
+#define TEGRA_PIN_GPIO_X5_AUD_PX5 _GPIO(189)
+#define TEGRA_PIN_GPIO_X6_AUD_PX6 _GPIO(190)
+#define TEGRA_PIN_GPIO_X7_AUD_PX7 _GPIO(191)
+#define TEGRA_PIN_ULPI_CLK_PY0 _GPIO(192)
+#define TEGRA_PIN_ULPI_DIR_PY1 _GPIO(193)
+#define TEGRA_PIN_ULPI_NXT_PY2 _GPIO(194)
+#define TEGRA_PIN_ULPI_STP_PY3 _GPIO(195)
+#define TEGRA_PIN_SDMMC1_DAT3_PY4 _GPIO(196)
+#define TEGRA_PIN_SDMMC1_DAT2_PY5 _GPIO(197)
+#define TEGRA_PIN_SDMMC1_DAT1_PY6 _GPIO(198)
+#define TEGRA_PIN_SDMMC1_DAT0_PY7 _GPIO(199)
+#define TEGRA_PIN_SDMMC1_CLK_PZ0 _GPIO(200)
+#define TEGRA_PIN_SDMMC1_CMD_PZ1 _GPIO(201)
+#define TEGRA_PIN_SYS_CLK_REQ_PZ5 _GPIO(205)
+#define TEGRA_PIN_PWR_I2C_SCL_PZ6 _GPIO(206)
+#define TEGRA_PIN_PWR_I2C_SDA_PZ7 _GPIO(207)
+#define TEGRA_PIN_SDMMC4_DAT0_PAA0 _GPIO(208)
+#define TEGRA_PIN_SDMMC4_DAT1_PAA1 _GPIO(209)
+#define TEGRA_PIN_SDMMC4_DAT2_PAA2 _GPIO(210)
+#define TEGRA_PIN_SDMMC4_DAT3_PAA3 _GPIO(211)
+#define TEGRA_PIN_SDMMC4_DAT4_PAA4 _GPIO(212)
+#define TEGRA_PIN_SDMMC4_DAT5_PAA5 _GPIO(213)
+#define TEGRA_PIN_SDMMC4_DAT6_PAA6 _GPIO(214)
+#define TEGRA_PIN_SDMMC4_DAT7_PAA7 _GPIO(215)
+#define TEGRA_PIN_PBB0 _GPIO(216)
+#define TEGRA_PIN_CAM_I2C_SCL_PBB1 _GPIO(217)
+#define TEGRA_PIN_CAM_I2C_SDA_PBB2 _GPIO(218)
+#define TEGRA_PIN_PBB3 _GPIO(219)
+#define TEGRA_PIN_PBB4 _GPIO(220)
+#define TEGRA_PIN_PBB5 _GPIO(221)
+#define TEGRA_PIN_PBB6 _GPIO(222)
+#define TEGRA_PIN_PBB7 _GPIO(223)
+#define TEGRA_PIN_CAM_MCLK_PCC0 _GPIO(224)
+#define TEGRA_PIN_PCC1 _GPIO(225)
+#define TEGRA_PIN_PCC2 _GPIO(226)
+#define TEGRA_PIN_SDMMC4_CLK_PCC4 _GPIO(228)
+#define TEGRA_PIN_CLK2_REQ_PCC5 _GPIO(229)
+#define TEGRA_PIN_CLK3_OUT_PEE0 _GPIO(240)
+#define TEGRA_PIN_CLK3_REQ_PEE1 _GPIO(241)
+#define TEGRA_PIN_CLK1_REQ_PEE2 _GPIO(242)
+#define TEGRA_PIN_HDMI_CEC_PEE3 _GPIO(243)
+#define TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4 _GPIO(244)
+#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 _GPIO(245)
+
+/* All non-GPIO pins follow */
+#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+/* Non-GPIO pins */
+#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
+#define TEGRA_PIN_CPU_PWR_REQ _PIN(1)
+#define TEGRA_PIN_PWR_INT_N _PIN(2)
+#define TEGRA_PIN_RESET_OUT_N _PIN(3)
+#define TEGRA_PIN_OWR _PIN(4)
+
+static const struct pinctrl_pin_desc tegra114_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PA3, "DAP2_SCLK PA3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PA4, "DAP2_DIN PA4"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PA5, "DAP2_DOUT PA5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PA6, "SDMMC3_CLK PA6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PA7, "SDMMC3_CMD PA7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_A17_PB0, "GMI_A17 PB0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_A18_PB1, "GMI_A18 PB1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PB4, "SDMMC3_DAT3 PB4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PB5, "SDMMC3_DAT2 PB5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PB6, "SDMMC3_DAT1 PB6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PB7, "SDMMC3_DAT0 PB7"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RTS_N_PC0, "UART3_RTS_N PC0"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_TXD_PC2, "UART2_TXD PC2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RXD_PC3, "UART2_RXD PC3"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC4, "GEN1_I2C_SCL PC4"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC5, "GEN1_I2C_SDA PC5"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_WP_N_PC7, "GMI_WP_N PC7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD0_PG0, "GMI_AD0 PG0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD1_PG1, "GMI_AD1 PG1"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD2_PG2, "GMI_AD2 PG2"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD3_PG3, "GMI_AD3 PG3"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD4_PG4, "GMI_AD4 PG4"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD5_PG5, "GMI_AD5 PG5"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD6_PG6, "GMI_AD6 PG6"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD7_PG7, "GMI_AD7 PG7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD8_PH0, "GMI_AD8 PH0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD9_PH1, "GMI_AD9 PH1"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD10_PH2, "GMI_AD10 PH2"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD11_PH3, "GMI_AD11 PH3"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD12_PH4, "GMI_AD12 PH4"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD13_PH5, "GMI_AD13 PH5"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD14_PH6, "GMI_AD14 PH6"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_AD15_PH7, "GMI_AD15 PH7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_WR_N_PI0, "GMI_WR_N PI0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_OE_N_PI1, "GMI_OE_N PI1"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS6_N_PI3, "GMI_CS6_N PI3"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_RST_N_PI4, "GMI_RST_N PI4"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_IORDY_PI5, "GMI_IORDY PI5"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS7_N_PI6, "GMI_CS7_N PI6"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_WAIT_PI7, "GMI_WAIT PI7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS0_N_PJ0, "GMI_CS0_N PJ0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS1_N_PJ2, "GMI_CS1_N PJ2"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_DQS_P_PJ3, "GMI_DQS_P PJ3"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_CTS_N_PJ5, "UART2_CTS_N PJ5"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RTS_N_PJ6, "UART2_RTS_N PJ6"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_A16_PJ7, "GMI_A16 PJ7"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_ADV_N_PK0, "GMI_ADV_N PK0"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_PK1, "GMI_CLK PK1"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS4_N_PK2, "GMI_CS4_N PK2"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS2_N_PK3, "GMI_CS2_N PK3"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CS3_N_PK4, "GMI_CS3_N PK4"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PK5, "SPDIF_OUT PK5"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PK6, "SPDIF_IN PK6"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_A19_PK7, "GMI_A19 PK7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PN0, "DAP1_FS PN0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PN1, "DAP1_DIN PN1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PN2, "DAP1_DOUT PN2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PN3, "DAP1_SCLK PN3"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PN4, "USB_VBUS_EN0 PN4"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PN5, "USB_VBUS_EN1 PN5"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_INT_PN7, "HDMI_INT PN7"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA7_PO0, "ULPI_DATA7 PO0"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA0_PO1, "ULPI_DATA0 PO1"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA1_PO2, "ULPI_DATA1 PO2"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA2_PO3, "ULPI_DATA2 PO3"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA3_PO4, "ULPI_DATA3 PO4"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA4_PO5, "ULPI_DATA4 PO5"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA5_PO6, "ULPI_DATA5 PO6"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA6_PO7, "ULPI_DATA6 PO7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_FS_PP0, "DAP3_FS PP0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_DIN_PP1, "DAP3_DIN PP1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_DOUT_PP2, "DAP3_DOUT PP2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_SCLK_PP3, "DAP3_SCLK PP3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PP4, "DAP4_FS PP4"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PP5, "DAP4_DIN PP5"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PP6, "DAP4_DOUT PP6"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PP7, "DAP4_SCLK PP7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL0_PQ0, "KB_COL0 PQ0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL1_PQ1, "KB_COL1 PQ1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL2_PQ2, "KB_COL2 PQ2"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL3_PQ3, "KB_COL3 PQ3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL4_PQ4, "KB_COL4 PQ4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL5_PQ5, "KB_COL5 PQ5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL6_PQ6, "KB_COL6 PQ6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL7_PQ7, "KB_COL7 PQ7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW0_PR0, "KB_ROW0 PR0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW1_PR1, "KB_ROW1 PR1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW2_PR2, "KB_ROW2 PR2"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW3_PR3, "KB_ROW3 PR3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW4_PR4, "KB_ROW4 PR4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW5_PR5, "KB_ROW5 PR5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW6_PR6, "KB_ROW6 PR6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW7_PR7, "KB_ROW7 PR7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
+ PINCTRL_PIN(TEGRA_PIN_PU0, "PU0"),
+ PINCTRL_PIN(TEGRA_PIN_PU1, "PU1"),
+ PINCTRL_PIN(TEGRA_PIN_PU2, "PU2"),
+ PINCTRL_PIN(TEGRA_PIN_PU3, "PU3"),
+ PINCTRL_PIN(TEGRA_PIN_PU4, "PU4"),
+ PINCTRL_PIN(TEGRA_PIN_PU5, "PU5"),
+ PINCTRL_PIN(TEGRA_PIN_PU6, "PU6"),
+ PINCTRL_PIN(TEGRA_PIN_PV0, "PV0"),
+ PINCTRL_PIN(TEGRA_PIN_PV1, "PV1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CD_N_PV2, "SDMMC3_CD_N PV2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_WP_N_PV3, "SDMMC1_WP_N PV3"),
+ PINCTRL_PIN(TEGRA_PIN_DDC_SCL_PV4, "DDC_SCL PV4"),
+ PINCTRL_PIN(TEGRA_PIN_DDC_SDA_PV5, "DDC_SDA PV5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_W2_AUD_PW2, "GPIO_W2_AUD PW2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_W3_AUD_PW3, "GPIO_W3_AUD PW3"),
+ PINCTRL_PIN(TEGRA_PIN_CLK1_OUT_PW4, "CLK1_OUT PW4"),
+ PINCTRL_PIN(TEGRA_PIN_CLK2_OUT_PW5, "CLK2_OUT PW5"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_TXD_PW6, "UART3_TXD PW6"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RXD_PW7, "UART3_RXD PW7"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_PWM_PX0, "DVFS_PWM PX0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X1_AUD_PX1, "GPIO_X1_AUD PX1"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_CLK_PX2, "DVFS_CLK PX2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X3_AUD_PX3, "GPIO_X3_AUD PX3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X4_AUD_PX4, "GPIO_X4_AUD PX4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X5_AUD_PX5, "GPIO_X5_AUD PX5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X6_AUD_PX6, "GPIO_X6_AUD PX6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X7_AUD_PX7, "GPIO_X7_AUD PX7"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_CLK_PY0, "ULPI_CLK PY0"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DIR_PY1, "ULPI_DIR PY1"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_NXT_PY2, "ULPI_NXT PY2"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_STP_PY3, "ULPI_STP PY3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PY4, "SDMMC1_DAT3 PY4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PY5, "SDMMC1_DAT2 PY5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PY6, "SDMMC1_DAT1 PY6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PY7, "SDMMC1_DAT0 PY7"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PZ0, "SDMMC1_CLK PZ0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PZ1, "SDMMC1_CMD PZ1"),
+ PINCTRL_PIN(TEGRA_PIN_SYS_CLK_REQ_PZ5, "SYS_CLK_REQ PZ5"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PZ6, "PWR_I2C_SCL PZ6"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PZ7, "PWR_I2C_SDA PZ7"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT0_PAA0, "SDMMC4_DAT0 PAA0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT1_PAA1, "SDMMC4_DAT1 PAA1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT2_PAA2, "SDMMC4_DAT2 PAA2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT3_PAA3, "SDMMC4_DAT3 PAA3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT4_PAA4, "SDMMC4_DAT4 PAA4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT5_PAA5, "SDMMC4_DAT5 PAA5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT6_PAA6, "SDMMC4_DAT6 PAA6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT7_PAA7, "SDMMC4_DAT7 PAA7"),
+ PINCTRL_PIN(TEGRA_PIN_PBB0, "PBB0"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PBB1, "CAM_I2C_SCL PBB1"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PBB2, "CAM_I2C_SDA PBB2"),
+ PINCTRL_PIN(TEGRA_PIN_PBB3, "PBB3"),
+ PINCTRL_PIN(TEGRA_PIN_PBB4, "PBB4"),
+ PINCTRL_PIN(TEGRA_PIN_PBB5, "PBB5"),
+ PINCTRL_PIN(TEGRA_PIN_PBB6, "PBB6"),
+ PINCTRL_PIN(TEGRA_PIN_PBB7, "PBB7"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_MCLK_PCC0, "CAM_MCLK PCC0"),
+ PINCTRL_PIN(TEGRA_PIN_PCC1, "PCC1"),
+ PINCTRL_PIN(TEGRA_PIN_PCC2, "PCC2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_CLK_PCC4, "SDMMC4_CLK PCC4"),
+ PINCTRL_PIN(TEGRA_PIN_CLK2_REQ_PCC5, "CLK2_REQ PCC5"),
+ PINCTRL_PIN(TEGRA_PIN_CLK3_OUT_PEE0, "CLK3_OUT PEE0"),
+ PINCTRL_PIN(TEGRA_PIN_CLK3_REQ_PEE1, "CLK3_REQ PEE1"),
+ PINCTRL_PIN(TEGRA_PIN_CLK1_REQ_PEE2, "CLK1_REQ PEE2"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+};
+
+static const unsigned clk_32k_out_pa0_pins[] = {
+ TEGRA_PIN_CLK_32K_OUT_PA0,
+};
+
+static const unsigned uart3_cts_n_pa1_pins[] = {
+ TEGRA_PIN_UART3_CTS_N_PA1,
+};
+
+static const unsigned dap2_fs_pa2_pins[] = {
+ TEGRA_PIN_DAP2_FS_PA2,
+};
+
+static const unsigned dap2_sclk_pa3_pins[] = {
+ TEGRA_PIN_DAP2_SCLK_PA3,
+};
+
+static const unsigned dap2_din_pa4_pins[] = {
+ TEGRA_PIN_DAP2_DIN_PA4,
+};
+
+static const unsigned dap2_dout_pa5_pins[] = {
+ TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned sdmmc3_clk_pa6_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PA6,
+};
+
+static const unsigned sdmmc3_cmd_pa7_pins[] = {
+ TEGRA_PIN_SDMMC3_CMD_PA7,
+};
+
+static const unsigned gmi_a17_pb0_pins[] = {
+ TEGRA_PIN_GMI_A17_PB0,
+};
+
+static const unsigned gmi_a18_pb1_pins[] = {
+ TEGRA_PIN_GMI_A18_PB1,
+};
+
+static const unsigned sdmmc3_dat3_pb4_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT3_PB4,
+};
+
+static const unsigned sdmmc3_dat2_pb5_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT2_PB5,
+};
+
+static const unsigned sdmmc3_dat1_pb6_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT1_PB6,
+};
+
+static const unsigned sdmmc3_dat0_pb7_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT0_PB7,
+};
+
+static const unsigned uart3_rts_n_pc0_pins[] = {
+ TEGRA_PIN_UART3_RTS_N_PC0,
+};
+
+static const unsigned uart2_txd_pc2_pins[] = {
+ TEGRA_PIN_UART2_TXD_PC2,
+};
+
+static const unsigned uart2_rxd_pc3_pins[] = {
+ TEGRA_PIN_UART2_RXD_PC3,
+};
+
+static const unsigned gen1_i2c_scl_pc4_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC4,
+};
+
+static const unsigned gen1_i2c_sda_pc5_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SDA_PC5,
+};
+
+static const unsigned gmi_wp_n_pc7_pins[] = {
+ TEGRA_PIN_GMI_WP_N_PC7,
+};
+
+static const unsigned gmi_ad0_pg0_pins[] = {
+ TEGRA_PIN_GMI_AD0_PG0,
+};
+
+static const unsigned gmi_ad1_pg1_pins[] = {
+ TEGRA_PIN_GMI_AD1_PG1,
+};
+
+static const unsigned gmi_ad2_pg2_pins[] = {
+ TEGRA_PIN_GMI_AD2_PG2,
+};
+
+static const unsigned gmi_ad3_pg3_pins[] = {
+ TEGRA_PIN_GMI_AD3_PG3,
+};
+
+static const unsigned gmi_ad4_pg4_pins[] = {
+ TEGRA_PIN_GMI_AD4_PG4,
+};
+
+static const unsigned gmi_ad5_pg5_pins[] = {
+ TEGRA_PIN_GMI_AD5_PG5,
+};
+
+static const unsigned gmi_ad6_pg6_pins[] = {
+ TEGRA_PIN_GMI_AD6_PG6,
+};
+
+static const unsigned gmi_ad7_pg7_pins[] = {
+ TEGRA_PIN_GMI_AD7_PG7,
+};
+
+static const unsigned gmi_ad8_ph0_pins[] = {
+ TEGRA_PIN_GMI_AD8_PH0,
+};
+
+static const unsigned gmi_ad9_ph1_pins[] = {
+ TEGRA_PIN_GMI_AD9_PH1,
+};
+
+static const unsigned gmi_ad10_ph2_pins[] = {
+ TEGRA_PIN_GMI_AD10_PH2,
+};
+
+static const unsigned gmi_ad11_ph3_pins[] = {
+ TEGRA_PIN_GMI_AD11_PH3,
+};
+
+static const unsigned gmi_ad12_ph4_pins[] = {
+ TEGRA_PIN_GMI_AD12_PH4,
+};
+
+static const unsigned gmi_ad13_ph5_pins[] = {
+ TEGRA_PIN_GMI_AD13_PH5,
+};
+
+static const unsigned gmi_ad14_ph6_pins[] = {
+ TEGRA_PIN_GMI_AD14_PH6,
+};
+
+static const unsigned gmi_ad15_ph7_pins[] = {
+ TEGRA_PIN_GMI_AD15_PH7,
+};
+
+static const unsigned gmi_wr_n_pi0_pins[] = {
+ TEGRA_PIN_GMI_WR_N_PI0,
+};
+
+static const unsigned gmi_oe_n_pi1_pins[] = {
+ TEGRA_PIN_GMI_OE_N_PI1,
+};
+
+static const unsigned gmi_cs6_n_pi3_pins[] = {
+ TEGRA_PIN_GMI_CS6_N_PI3,
+};
+
+static const unsigned gmi_rst_n_pi4_pins[] = {
+ TEGRA_PIN_GMI_RST_N_PI4,
+};
+
+static const unsigned gmi_iordy_pi5_pins[] = {
+ TEGRA_PIN_GMI_IORDY_PI5,
+};
+
+static const unsigned gmi_cs7_n_pi6_pins[] = {
+ TEGRA_PIN_GMI_CS7_N_PI6,
+};
+
+static const unsigned gmi_wait_pi7_pins[] = {
+ TEGRA_PIN_GMI_WAIT_PI7,
+};
+
+static const unsigned gmi_cs0_n_pj0_pins[] = {
+ TEGRA_PIN_GMI_CS0_N_PJ0,
+};
+
+static const unsigned gmi_cs1_n_pj2_pins[] = {
+ TEGRA_PIN_GMI_CS1_N_PJ2,
+};
+
+static const unsigned gmi_dqs_p_pj3_pins[] = {
+ TEGRA_PIN_GMI_DQS_P_PJ3,
+};
+
+static const unsigned uart2_cts_n_pj5_pins[] = {
+ TEGRA_PIN_UART2_CTS_N_PJ5,
+};
+
+static const unsigned uart2_rts_n_pj6_pins[] = {
+ TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned gmi_a16_pj7_pins[] = {
+ TEGRA_PIN_GMI_A16_PJ7,
+};
+
+static const unsigned gmi_adv_n_pk0_pins[] = {
+ TEGRA_PIN_GMI_ADV_N_PK0,
+};
+
+static const unsigned gmi_clk_pk1_pins[] = {
+ TEGRA_PIN_GMI_CLK_PK1,
+};
+
+static const unsigned gmi_cs4_n_pk2_pins[] = {
+ TEGRA_PIN_GMI_CS4_N_PK2,
+};
+
+static const unsigned gmi_cs2_n_pk3_pins[] = {
+ TEGRA_PIN_GMI_CS2_N_PK3,
+};
+
+static const unsigned gmi_cs3_n_pk4_pins[] = {
+ TEGRA_PIN_GMI_CS3_N_PK4,
+};
+
+static const unsigned spdif_out_pk5_pins[] = {
+ TEGRA_PIN_SPDIF_OUT_PK5,
+};
+
+static const unsigned spdif_in_pk6_pins[] = {
+ TEGRA_PIN_SPDIF_IN_PK6,
+};
+
+static const unsigned gmi_a19_pk7_pins[] = {
+ TEGRA_PIN_GMI_A19_PK7,
+};
+
+static const unsigned dap1_fs_pn0_pins[] = {
+ TEGRA_PIN_DAP1_FS_PN0,
+};
+
+static const unsigned dap1_din_pn1_pins[] = {
+ TEGRA_PIN_DAP1_DIN_PN1,
+};
+
+static const unsigned dap1_dout_pn2_pins[] = {
+ TEGRA_PIN_DAP1_DOUT_PN2,
+};
+
+static const unsigned dap1_sclk_pn3_pins[] = {
+ TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned usb_vbus_en0_pn4_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PN4,
+};
+
+static const unsigned usb_vbus_en1_pn5_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN1_PN5,
+};
+
+static const unsigned hdmi_int_pn7_pins[] = {
+ TEGRA_PIN_HDMI_INT_PN7,
+};
+
+static const unsigned ulpi_data7_po0_pins[] = {
+ TEGRA_PIN_ULPI_DATA7_PO0,
+};
+
+static const unsigned ulpi_data0_po1_pins[] = {
+ TEGRA_PIN_ULPI_DATA0_PO1,
+};
+
+static const unsigned ulpi_data1_po2_pins[] = {
+ TEGRA_PIN_ULPI_DATA1_PO2,
+};
+
+static const unsigned ulpi_data2_po3_pins[] = {
+ TEGRA_PIN_ULPI_DATA2_PO3,
+};
+
+static const unsigned ulpi_data3_po4_pins[] = {
+ TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned ulpi_data4_po5_pins[] = {
+ TEGRA_PIN_ULPI_DATA4_PO5,
+};
+
+static const unsigned ulpi_data5_po6_pins[] = {
+ TEGRA_PIN_ULPI_DATA5_PO6,
+};
+
+static const unsigned ulpi_data6_po7_pins[] = {
+ TEGRA_PIN_ULPI_DATA6_PO7,
+};
+
+static const unsigned dap3_fs_pp0_pins[] = {
+ TEGRA_PIN_DAP3_FS_PP0,
+};
+
+static const unsigned dap3_din_pp1_pins[] = {
+ TEGRA_PIN_DAP3_DIN_PP1,
+};
+
+static const unsigned dap3_dout_pp2_pins[] = {
+ TEGRA_PIN_DAP3_DOUT_PP2,
+};
+
+static const unsigned dap3_sclk_pp3_pins[] = {
+ TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned dap4_fs_pp4_pins[] = {
+ TEGRA_PIN_DAP4_FS_PP4,
+};
+
+static const unsigned dap4_din_pp5_pins[] = {
+ TEGRA_PIN_DAP4_DIN_PP5,
+};
+
+static const unsigned dap4_dout_pp6_pins[] = {
+ TEGRA_PIN_DAP4_DOUT_PP6,
+};
+
+static const unsigned dap4_sclk_pp7_pins[] = {
+ TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned kb_col0_pq0_pins[] = {
+ TEGRA_PIN_KB_COL0_PQ0,
+};
+
+static const unsigned kb_col1_pq1_pins[] = {
+ TEGRA_PIN_KB_COL1_PQ1,
+};
+
+static const unsigned kb_col2_pq2_pins[] = {
+ TEGRA_PIN_KB_COL2_PQ2,
+};
+
+static const unsigned kb_col3_pq3_pins[] = {
+ TEGRA_PIN_KB_COL3_PQ3,
+};
+
+static const unsigned kb_col4_pq4_pins[] = {
+ TEGRA_PIN_KB_COL4_PQ4,
+};
+
+static const unsigned kb_col5_pq5_pins[] = {
+ TEGRA_PIN_KB_COL5_PQ5,
+};
+
+static const unsigned kb_col6_pq6_pins[] = {
+ TEGRA_PIN_KB_COL6_PQ6,
+};
+
+static const unsigned kb_col7_pq7_pins[] = {
+ TEGRA_PIN_KB_COL7_PQ7,
+};
+
+static const unsigned kb_row0_pr0_pins[] = {
+ TEGRA_PIN_KB_ROW0_PR0,
+};
+
+static const unsigned kb_row1_pr1_pins[] = {
+ TEGRA_PIN_KB_ROW1_PR1,
+};
+
+static const unsigned kb_row2_pr2_pins[] = {
+ TEGRA_PIN_KB_ROW2_PR2,
+};
+
+static const unsigned kb_row3_pr3_pins[] = {
+ TEGRA_PIN_KB_ROW3_PR3,
+};
+
+static const unsigned kb_row4_pr4_pins[] = {
+ TEGRA_PIN_KB_ROW4_PR4,
+};
+
+static const unsigned kb_row5_pr5_pins[] = {
+ TEGRA_PIN_KB_ROW5_PR5,
+};
+
+static const unsigned kb_row6_pr6_pins[] = {
+ TEGRA_PIN_KB_ROW6_PR6,
+};
+
+static const unsigned kb_row7_pr7_pins[] = {
+ TEGRA_PIN_KB_ROW7_PR7,
+};
+
+static const unsigned kb_row8_ps0_pins[] = {
+ TEGRA_PIN_KB_ROW8_PS0,
+};
+
+static const unsigned kb_row9_ps1_pins[] = {
+ TEGRA_PIN_KB_ROW9_PS1,
+};
+
+static const unsigned kb_row10_ps2_pins[] = {
+ TEGRA_PIN_KB_ROW10_PS2,
+};
+
+static const unsigned gen2_i2c_scl_pt5_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SCL_PT5,
+};
+
+static const unsigned gen2_i2c_sda_pt6_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned sdmmc4_cmd_pt7_pins[] = {
+ TEGRA_PIN_SDMMC4_CMD_PT7,
+};
+
+static const unsigned pu0_pins[] = {
+ TEGRA_PIN_PU0,
+};
+
+static const unsigned pu1_pins[] = {
+ TEGRA_PIN_PU1,
+};
+
+static const unsigned pu2_pins[] = {
+ TEGRA_PIN_PU2,
+};
+
+static const unsigned pu3_pins[] = {
+ TEGRA_PIN_PU3,
+};
+
+static const unsigned pu4_pins[] = {
+ TEGRA_PIN_PU4,
+};
+
+static const unsigned pu5_pins[] = {
+ TEGRA_PIN_PU5,
+};
+
+static const unsigned pu6_pins[] = {
+ TEGRA_PIN_PU6,
+};
+
+static const unsigned pv0_pins[] = {
+ TEGRA_PIN_PV0,
+};
+
+static const unsigned pv1_pins[] = {
+ TEGRA_PIN_PV1,
+};
+
+static const unsigned sdmmc3_cd_n_pv2_pins[] = {
+ TEGRA_PIN_SDMMC3_CD_N_PV2,
+};
+
+static const unsigned sdmmc1_wp_n_pv3_pins[] = {
+ TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned ddc_scl_pv4_pins[] = {
+ TEGRA_PIN_DDC_SCL_PV4,
+};
+
+static const unsigned ddc_sda_pv5_pins[] = {
+ TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned gpio_w2_aud_pw2_pins[] = {
+ TEGRA_PIN_GPIO_W2_AUD_PW2,
+};
+
+static const unsigned gpio_w3_aud_pw3_pins[] = {
+ TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned clk1_out_pw4_pins[] = {
+ TEGRA_PIN_CLK1_OUT_PW4,
+};
+
+static const unsigned clk2_out_pw5_pins[] = {
+ TEGRA_PIN_CLK2_OUT_PW5,
+};
+
+static const unsigned uart3_txd_pw6_pins[] = {
+ TEGRA_PIN_UART3_TXD_PW6,
+};
+
+static const unsigned uart3_rxd_pw7_pins[] = {
+ TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned dvfs_pwm_px0_pins[] = {
+ TEGRA_PIN_DVFS_PWM_PX0,
+};
+
+static const unsigned gpio_x1_aud_px1_pins[] = {
+ TEGRA_PIN_GPIO_X1_AUD_PX1,
+};
+
+static const unsigned dvfs_clk_px2_pins[] = {
+ TEGRA_PIN_DVFS_CLK_PX2,
+};
+
+static const unsigned gpio_x3_aud_px3_pins[] = {
+ TEGRA_PIN_GPIO_X3_AUD_PX3,
+};
+
+static const unsigned gpio_x4_aud_px4_pins[] = {
+ TEGRA_PIN_GPIO_X4_AUD_PX4,
+};
+
+static const unsigned gpio_x5_aud_px5_pins[] = {
+ TEGRA_PIN_GPIO_X5_AUD_PX5,
+};
+
+static const unsigned gpio_x6_aud_px6_pins[] = {
+ TEGRA_PIN_GPIO_X6_AUD_PX6,
+};
+
+static const unsigned gpio_x7_aud_px7_pins[] = {
+ TEGRA_PIN_GPIO_X7_AUD_PX7,
+};
+
+static const unsigned ulpi_clk_py0_pins[] = {
+ TEGRA_PIN_ULPI_CLK_PY0,
+};
+
+static const unsigned ulpi_dir_py1_pins[] = {
+ TEGRA_PIN_ULPI_DIR_PY1,
+};
+
+static const unsigned ulpi_nxt_py2_pins[] = {
+ TEGRA_PIN_ULPI_NXT_PY2,
+};
+
+static const unsigned ulpi_stp_py3_pins[] = {
+ TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned sdmmc1_dat3_py4_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PY4,
+};
+
+static const unsigned sdmmc1_dat2_py5_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT2_PY5,
+};
+
+static const unsigned sdmmc1_dat1_py6_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT1_PY6,
+};
+
+static const unsigned sdmmc1_dat0_py7_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT0_PY7,
+};
+
+static const unsigned sdmmc1_clk_pz0_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PZ0,
+};
+
+static const unsigned sdmmc1_cmd_pz1_pins[] = {
+ TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned sys_clk_req_pz5_pins[] = {
+ TEGRA_PIN_SYS_CLK_REQ_PZ5,
+};
+
+static const unsigned pwr_i2c_scl_pz6_pins[] = {
+ TEGRA_PIN_PWR_I2C_SCL_PZ6,
+};
+
+static const unsigned pwr_i2c_sda_pz7_pins[] = {
+ TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned sdmmc4_dat0_paa0_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT0_PAA0,
+};
+
+static const unsigned sdmmc4_dat1_paa1_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT1_PAA1,
+};
+
+static const unsigned sdmmc4_dat2_paa2_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT2_PAA2,
+};
+
+static const unsigned sdmmc4_dat3_paa3_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT3_PAA3,
+};
+
+static const unsigned sdmmc4_dat4_paa4_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT4_PAA4,
+};
+
+static const unsigned sdmmc4_dat5_paa5_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT5_PAA5,
+};
+
+static const unsigned sdmmc4_dat6_paa6_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT6_PAA6,
+};
+
+static const unsigned sdmmc4_dat7_paa7_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned pbb0_pins[] = {
+ TEGRA_PIN_PBB0,
+};
+
+static const unsigned cam_i2c_scl_pbb1_pins[] = {
+ TEGRA_PIN_CAM_I2C_SCL_PBB1,
+};
+
+static const unsigned cam_i2c_sda_pbb2_pins[] = {
+ TEGRA_PIN_CAM_I2C_SDA_PBB2,
+};
+
+static const unsigned pbb3_pins[] = {
+ TEGRA_PIN_PBB3,
+};
+
+static const unsigned pbb4_pins[] = {
+ TEGRA_PIN_PBB4,
+};
+
+static const unsigned pbb5_pins[] = {
+ TEGRA_PIN_PBB5,
+};
+
+static const unsigned pbb6_pins[] = {
+ TEGRA_PIN_PBB6,
+};
+
+static const unsigned pbb7_pins[] = {
+ TEGRA_PIN_PBB7,
+};
+
+static const unsigned cam_mclk_pcc0_pins[] = {
+ TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned pcc1_pins[] = {
+ TEGRA_PIN_PCC1,
+};
+
+static const unsigned pcc2_pins[] = {
+ TEGRA_PIN_PCC2,
+};
+
+static const unsigned sdmmc4_clk_pcc4_pins[] = {
+ TEGRA_PIN_SDMMC4_CLK_PCC4,
+};
+
+static const unsigned clk2_req_pcc5_pins[] = {
+ TEGRA_PIN_CLK2_REQ_PCC5,
+};
+
+static const unsigned clk3_out_pee0_pins[] = {
+ TEGRA_PIN_CLK3_OUT_PEE0,
+};
+
+static const unsigned clk3_req_pee1_pins[] = {
+ TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned clk1_req_pee2_pins[] = {
+ TEGRA_PIN_CLK1_REQ_PEE2,
+};
+
+static const unsigned hdmi_cec_pee3_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
+static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+};
+
+static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+
+static const unsigned core_pwr_req_pins[] = {
+ TEGRA_PIN_CORE_PWR_REQ,
+};
+
+static const unsigned cpu_pwr_req_pins[] = {
+ TEGRA_PIN_CPU_PWR_REQ,
+};
+
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
+};
+
+static const unsigned pwr_int_n_pins[] = {
+ TEGRA_PIN_PWR_INT_N,
+};
+
+static const unsigned reset_out_n_pins[] = {
+ TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned drive_ao1_pins[] = {
+ TEGRA_PIN_KB_ROW0_PR0,
+ TEGRA_PIN_KB_ROW1_PR1,
+ TEGRA_PIN_KB_ROW2_PR2,
+ TEGRA_PIN_KB_ROW3_PR3,
+ TEGRA_PIN_KB_ROW4_PR4,
+ TEGRA_PIN_KB_ROW5_PR5,
+ TEGRA_PIN_KB_ROW6_PR6,
+ TEGRA_PIN_KB_ROW7_PR7,
+ TEGRA_PIN_PWR_I2C_SCL_PZ6,
+ TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned drive_ao2_pins[] = {
+ TEGRA_PIN_CLK_32K_OUT_PA0,
+ TEGRA_PIN_KB_COL0_PQ0,
+ TEGRA_PIN_KB_COL1_PQ1,
+ TEGRA_PIN_KB_COL2_PQ2,
+ TEGRA_PIN_KB_COL3_PQ3,
+ TEGRA_PIN_KB_COL4_PQ4,
+ TEGRA_PIN_KB_COL5_PQ5,
+ TEGRA_PIN_KB_COL6_PQ6,
+ TEGRA_PIN_KB_COL7_PQ7,
+ TEGRA_PIN_KB_ROW8_PS0,
+ TEGRA_PIN_KB_ROW9_PS1,
+ TEGRA_PIN_KB_ROW10_PS2,
+ TEGRA_PIN_SYS_CLK_REQ_PZ5,
+ TEGRA_PIN_CORE_PWR_REQ,
+ TEGRA_PIN_CPU_PWR_REQ,
+ TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned drive_at1_pins[] = {
+ TEGRA_PIN_GMI_AD8_PH0,
+ TEGRA_PIN_GMI_AD9_PH1,
+ TEGRA_PIN_GMI_AD10_PH2,
+ TEGRA_PIN_GMI_AD11_PH3,
+ TEGRA_PIN_GMI_AD12_PH4,
+ TEGRA_PIN_GMI_AD13_PH5,
+ TEGRA_PIN_GMI_AD14_PH6,
+ TEGRA_PIN_GMI_AD15_PH7,
+
+ TEGRA_PIN_GMI_IORDY_PI5,
+ TEGRA_PIN_GMI_CS7_N_PI6,
+};
+
+static const unsigned drive_at2_pins[] = {
+ TEGRA_PIN_GMI_AD0_PG0,
+ TEGRA_PIN_GMI_AD1_PG1,
+ TEGRA_PIN_GMI_AD2_PG2,
+ TEGRA_PIN_GMI_AD3_PG3,
+ TEGRA_PIN_GMI_AD4_PG4,
+ TEGRA_PIN_GMI_AD5_PG5,
+ TEGRA_PIN_GMI_AD6_PG6,
+ TEGRA_PIN_GMI_AD7_PG7,
+
+ TEGRA_PIN_GMI_WR_N_PI0,
+ TEGRA_PIN_GMI_OE_N_PI1,
+ TEGRA_PIN_GMI_CS6_N_PI3,
+ TEGRA_PIN_GMI_RST_N_PI4,
+ TEGRA_PIN_GMI_WAIT_PI7,
+
+ TEGRA_PIN_GMI_DQS_P_PJ3,
+
+ TEGRA_PIN_GMI_ADV_N_PK0,
+ TEGRA_PIN_GMI_CLK_PK1,
+ TEGRA_PIN_GMI_CS4_N_PK2,
+ TEGRA_PIN_GMI_CS2_N_PK3,
+ TEGRA_PIN_GMI_CS3_N_PK4,
+};
+
+static const unsigned drive_at3_pins[] = {
+ TEGRA_PIN_GMI_WP_N_PC7,
+ TEGRA_PIN_GMI_CS0_N_PJ0,
+};
+
+static const unsigned drive_at4_pins[] = {
+ TEGRA_PIN_GMI_A17_PB0,
+ TEGRA_PIN_GMI_A18_PB1,
+ TEGRA_PIN_GMI_CS1_N_PJ2,
+ TEGRA_PIN_GMI_A16_PJ7,
+ TEGRA_PIN_GMI_A19_PK7,
+};
+
+static const unsigned drive_at5_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SCL_PT5,
+ TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned drive_cdev1_pins[] = {
+ TEGRA_PIN_CLK1_OUT_PW4,
+ TEGRA_PIN_CLK1_REQ_PEE2,
+};
+
+static const unsigned drive_cdev2_pins[] = {
+ TEGRA_PIN_CLK2_OUT_PW5,
+ TEGRA_PIN_CLK2_REQ_PCC5,
+ TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned drive_dap1_pins[] = {
+ TEGRA_PIN_DAP1_FS_PN0,
+ TEGRA_PIN_DAP1_DIN_PN1,
+ TEGRA_PIN_DAP1_DOUT_PN2,
+ TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned drive_dap2_pins[] = {
+ TEGRA_PIN_DAP2_FS_PA2,
+ TEGRA_PIN_DAP2_SCLK_PA3,
+ TEGRA_PIN_DAP2_DIN_PA4,
+ TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned drive_dap3_pins[] = {
+ TEGRA_PIN_DAP3_FS_PP0,
+ TEGRA_PIN_DAP3_DIN_PP1,
+ TEGRA_PIN_DAP3_DOUT_PP2,
+ TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned drive_dap4_pins[] = {
+ TEGRA_PIN_DAP4_FS_PP4,
+ TEGRA_PIN_DAP4_DIN_PP5,
+ TEGRA_PIN_DAP4_DOUT_PP6,
+ TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned drive_dbg_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC4,
+ TEGRA_PIN_GEN1_I2C_SDA_PC5,
+ TEGRA_PIN_PU0,
+ TEGRA_PIN_PU1,
+ TEGRA_PIN_PU2,
+ TEGRA_PIN_PU3,
+ TEGRA_PIN_PU4,
+ TEGRA_PIN_PU5,
+ TEGRA_PIN_PU6,
+};
+
+static const unsigned drive_sdio3_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PA6,
+ TEGRA_PIN_SDMMC3_CMD_PA7,
+ TEGRA_PIN_SDMMC3_DAT3_PB4,
+ TEGRA_PIN_SDMMC3_DAT2_PB5,
+ TEGRA_PIN_SDMMC3_DAT1_PB6,
+ TEGRA_PIN_SDMMC3_DAT0_PB7,
+ TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+ TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+
+static const unsigned drive_spi_pins[] = {
+ TEGRA_PIN_DVFS_PWM_PX0,
+ TEGRA_PIN_GPIO_X1_AUD_PX1,
+ TEGRA_PIN_DVFS_CLK_PX2,
+ TEGRA_PIN_GPIO_X3_AUD_PX3,
+ TEGRA_PIN_GPIO_X4_AUD_PX4,
+ TEGRA_PIN_GPIO_X5_AUD_PX5,
+ TEGRA_PIN_GPIO_X6_AUD_PX6,
+ TEGRA_PIN_GPIO_X7_AUD_PX7,
+ TEGRA_PIN_GPIO_W2_AUD_PW2,
+ TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned drive_uaa_pins[] = {
+ TEGRA_PIN_ULPI_DATA0_PO1,
+ TEGRA_PIN_ULPI_DATA1_PO2,
+ TEGRA_PIN_ULPI_DATA2_PO3,
+ TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned drive_uab_pins[] = {
+ TEGRA_PIN_ULPI_DATA7_PO0,
+ TEGRA_PIN_ULPI_DATA4_PO5,
+ TEGRA_PIN_ULPI_DATA5_PO6,
+ TEGRA_PIN_ULPI_DATA6_PO7,
+ TEGRA_PIN_PV0,
+ TEGRA_PIN_PV1,
+};
+
+static const unsigned drive_uart2_pins[] = {
+ TEGRA_PIN_UART2_TXD_PC2,
+ TEGRA_PIN_UART2_RXD_PC3,
+ TEGRA_PIN_UART2_CTS_N_PJ5,
+ TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned drive_uart3_pins[] = {
+ TEGRA_PIN_UART3_CTS_N_PA1,
+ TEGRA_PIN_UART3_RTS_N_PC0,
+ TEGRA_PIN_UART3_TXD_PW6,
+ TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned drive_sdio1_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PY4,
+ TEGRA_PIN_SDMMC1_DAT2_PY5,
+ TEGRA_PIN_SDMMC1_DAT1_PY6,
+ TEGRA_PIN_SDMMC1_DAT0_PY7,
+ TEGRA_PIN_SDMMC1_CLK_PZ0,
+ TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned drive_ddc_pins[] = {
+ TEGRA_PIN_DDC_SCL_PV4,
+ TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned drive_gma_pins[] = {
+ TEGRA_PIN_SDMMC4_CLK_PCC4,
+ TEGRA_PIN_SDMMC4_CMD_PT7,
+ TEGRA_PIN_SDMMC4_DAT0_PAA0,
+ TEGRA_PIN_SDMMC4_DAT1_PAA1,
+ TEGRA_PIN_SDMMC4_DAT2_PAA2,
+ TEGRA_PIN_SDMMC4_DAT3_PAA3,
+ TEGRA_PIN_SDMMC4_DAT4_PAA4,
+ TEGRA_PIN_SDMMC4_DAT5_PAA5,
+ TEGRA_PIN_SDMMC4_DAT6_PAA6,
+ TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned drive_gme_pins[] = {
+ TEGRA_PIN_PBB0,
+ TEGRA_PIN_CAM_I2C_SCL_PBB1,
+ TEGRA_PIN_CAM_I2C_SDA_PBB2,
+ TEGRA_PIN_PBB3,
+ TEGRA_PIN_PCC2,
+};
+
+static const unsigned drive_gmf_pins[] = {
+ TEGRA_PIN_PBB4,
+ TEGRA_PIN_PBB5,
+ TEGRA_PIN_PBB6,
+ TEGRA_PIN_PBB7,
+};
+
+static const unsigned drive_gmg_pins[] = {
+ TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned drive_gmh_pins[] = {
+ TEGRA_PIN_PCC1,
+};
+
+static const unsigned drive_owr_pins[] = {
+ TEGRA_PIN_SDMMC3_CD_N_PV2,
+};
+
+static const unsigned drive_uda_pins[] = {
+ TEGRA_PIN_ULPI_CLK_PY0,
+ TEGRA_PIN_ULPI_DIR_PY1,
+ TEGRA_PIN_ULPI_NXT_PY2,
+ TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned drive_dev3_pins[] = {
+ TEGRA_PIN_CLK3_OUT_PEE0,
+ TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+enum tegra_mux {
+ TEGRA_MUX_BLINK,
+ TEGRA_MUX_CEC,
+ TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK12,
+ TEGRA_MUX_CPU,
+ TEGRA_MUX_DAP,
+ TEGRA_MUX_DAP1,
+ TEGRA_MUX_DAP2,
+ TEGRA_MUX_DEV3,
+ TEGRA_MUX_DISPLAYA,
+ TEGRA_MUX_DISPLAYA_ALT,
+ TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_EMC_DLL,
+ TEGRA_MUX_EXTPERIPH1,
+ TEGRA_MUX_EXTPERIPH2,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_GMI,
+ TEGRA_MUX_GMI_ALT,
+ TEGRA_MUX_HDA,
+ TEGRA_MUX_HSI,
+ TEGRA_MUX_I2C1,
+ TEGRA_MUX_I2C2,
+ TEGRA_MUX_I2C3,
+ TEGRA_MUX_I2C4,
+ TEGRA_MUX_I2CPWR,
+ TEGRA_MUX_I2S0,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_I2S4,
+ TEGRA_MUX_IRDA,
+ TEGRA_MUX_KBC,
+ TEGRA_MUX_NAND,
+ TEGRA_MUX_NAND_ALT,
+ TEGRA_MUX_OWR,
+ TEGRA_MUX_PMI,
+ TEGRA_MUX_PWM0,
+ TEGRA_MUX_PWM1,
+ TEGRA_MUX_PWM2,
+ TEGRA_MUX_PWM3,
+ TEGRA_MUX_PWRON,
+ TEGRA_MUX_RESET_OUT_N,
+ TEGRA_MUX_RSVD1,
+ TEGRA_MUX_RSVD2,
+ TEGRA_MUX_RSVD3,
+ TEGRA_MUX_RSVD4,
+ TEGRA_MUX_SDMMC1,
+ TEGRA_MUX_SDMMC2,
+ TEGRA_MUX_SDMMC3,
+ TEGRA_MUX_SDMMC4,
+ TEGRA_MUX_SOC,
+ TEGRA_MUX_SPDIF,
+ TEGRA_MUX_SPI1,
+ TEGRA_MUX_SPI2,
+ TEGRA_MUX_SPI3,
+ TEGRA_MUX_SPI4,
+ TEGRA_MUX_SPI5,
+ TEGRA_MUX_SPI6,
+ TEGRA_MUX_SYSCLK,
+ TEGRA_MUX_TRACE,
+ TEGRA_MUX_UARTA,
+ TEGRA_MUX_UARTB,
+ TEGRA_MUX_UARTC,
+ TEGRA_MUX_UARTD,
+ TEGRA_MUX_ULPI,
+ TEGRA_MUX_USB,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
+ TEGRA_MUX_VI,
+ TEGRA_MUX_VI_ALT1,
+ TEGRA_MUX_VI_ALT3,
+};
+
+static const char * const blink_groups[] = {
+ "clk_32k_out_pa0",
+};
+
+static const char * const cec_groups[] = {
+ "hdmi_cec_pee3",
+};
+
+static const char * const cldvfs_groups[] = {
+ "gmi_ad9_ph1",
+ "gmi_ad10_ph2",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "dvfs_pwm_px0",
+ "dvfs_clk_px2",
+};
+
+static const char * const clk12_groups[] = {
+ "sdmmc1_wp_n_pv3",
+ "sdmmc1_clk_pz0",
+};
+
+static const char * const cpu_groups[] = {
+ "cpu_pwr_req",
+};
+
+static const char * const dap_groups[] = {
+ "clk1_req_pee2",
+ "clk2_req_pcc5",
+};
+
+static const char * const dap1_groups[] = {
+ "clk1_req_pee2",
+};
+
+static const char * const dap2_groups[] = {
+ "clk1_out_pw4",
+ "gpio_x4_aud_px4",
+};
+
+static const char * const dev3_groups[] = {
+ "clk3_req_pee1",
+};
+
+static const char * const displaya_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+ "uart3_rts_n_pc0",
+ "pu3",
+ "pu4",
+ "pu5",
+ "pbb3",
+ "pbb4",
+ "pbb5",
+ "pbb6",
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+ "kb_col3_pq3",
+ "sdmmc3_dat2_pb5",
+};
+
+static const char * const displaya_alt_groups[] = {
+ "kb_row6_pr6",
+};
+
+static const char * const displayb_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+ "pu3",
+ "pu4",
+ "pu5",
+ "pu6",
+ "pbb3",
+ "pbb4",
+ "pbb5",
+ "pbb6",
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const dtv_groups[] = {
+ "uart3_cts_n_pa1",
+ "uart3_rts_n_pc0",
+ "dap4_fs_pp4",
+ "dap4_dout_pp6",
+ "gmi_wait_pi7",
+ "gmi_ad8_ph0",
+ "gmi_ad14_ph6",
+ "gmi_ad15_ph7",
+};
+
+static const char * const emc_dll_groups[] = {
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+};
+
+static const char * const extperiph1_groups[] = {
+ "clk1_out_pw4",
+};
+
+static const char * const extperiph2_groups[] = {
+ "clk2_out_pw5",
+};
+
+static const char * const extperiph3_groups[] = {
+ "clk3_out_pee0",
+};
+
+static const char * const gmi_groups[] = {
+ "gmi_wp_n_pc7",
+
+ "gmi_ad0_pg0",
+ "gmi_ad1_pg1",
+ "gmi_ad2_pg2",
+ "gmi_ad3_pg3",
+ "gmi_ad4_pg4",
+ "gmi_ad5_pg5",
+ "gmi_ad6_pg6",
+ "gmi_ad7_pg7",
+ "gmi_ad8_ph0",
+ "gmi_ad9_ph1",
+ "gmi_ad10_ph2",
+ "gmi_ad11_ph3",
+ "gmi_ad12_ph4",
+ "gmi_ad13_ph5",
+ "gmi_ad14_ph6",
+ "gmi_ad15_ph7",
+ "gmi_wr_n_pi0",
+ "gmi_oe_n_pi1",
+ "gmi_cs6_n_pi3",
+ "gmi_rst_n_pi4",
+ "gmi_iordy_pi5",
+ "gmi_cs7_n_pi6",
+ "gmi_wait_pi7",
+ "gmi_cs0_n_pj0",
+ "gmi_cs1_n_pj2",
+ "gmi_dqs_p_pj3",
+ "gmi_adv_n_pk0",
+ "gmi_clk_pk1",
+ "gmi_cs4_n_pk2",
+ "gmi_cs2_n_pk3",
+ "gmi_cs3_n_pk4",
+ "gmi_a16_pj7",
+ "gmi_a17_pb0",
+ "gmi_a18_pb1",
+ "gmi_a19_pk7",
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+};
+
+static const char * const gmi_alt_groups[] = {
+ "gmi_wp_n_pc7",
+ "gmi_cs3_n_pk4",
+ "gmi_a16_pj7",
+};
+
+static const char * const hda_groups[] = {
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+};
+
+static const char * const hsi_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+};
+
+static const char * const i2c1_groups[] = {
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const i2c2_groups[] = {
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+};
+
+static const char * const i2c3_groups[] = {
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+};
+
+static const char * const i2c4_groups[] = {
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+};
+
+static const char * const i2cpwr_groups[] = {
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+};
+
+static const char * const i2s0_groups[] = {
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+};
+
+static const char * const i2s1_groups[] = {
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+};
+
+static const char * const i2s2_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+};
+
+static const char * const i2s3_groups[] = {
+ "dap4_fs_pp4",
+ "dap4_din_pp5",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7",
+};
+
+static const char * const i2s4_groups[] = {
+ "pcc1",
+ "pbb0",
+ "pbb7",
+ "pcc2",
+};
+
+static const char * const irda_groups[] = {
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+};
+
+static const char * const kbc_groups[] = {
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col3_pq3",
+ "kb_col4_pq4",
+ "kb_col5_pq5",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+};
+
+static const char * const nand_groups[] = {
+ "gmi_wp_n_pc7",
+ "gmi_wait_pi7",
+ "gmi_adv_n_pk0",
+ "gmi_clk_pk1",
+ "gmi_cs0_n_pj0",
+ "gmi_cs1_n_pj2",
+ "gmi_cs2_n_pk3",
+ "gmi_cs3_n_pk4",
+ "gmi_cs4_n_pk2",
+ "gmi_cs6_n_pi3",
+ "gmi_cs7_n_pi6",
+ "gmi_ad0_pg0",
+ "gmi_ad1_pg1",
+ "gmi_ad2_pg2",
+ "gmi_ad3_pg3",
+ "gmi_ad4_pg4",
+ "gmi_ad5_pg5",
+ "gmi_ad6_pg6",
+ "gmi_ad7_pg7",
+ "gmi_ad8_ph0",
+ "gmi_ad9_ph1",
+ "gmi_ad10_ph2",
+ "gmi_ad11_ph3",
+ "gmi_ad12_ph4",
+ "gmi_ad13_ph5",
+ "gmi_ad14_ph6",
+ "gmi_ad15_ph7",
+ "gmi_wr_n_pi0",
+ "gmi_oe_n_pi1",
+ "gmi_dqs_p_pj3",
+ "gmi_rst_n_pi4",
+};
+
+static const char * const nand_alt_groups[] = {
+ "gmi_cs6_n_pi3",
+ "gmi_cs7_n_pi6",
+ "gmi_rst_n_pi4",
+};
+
+static const char * const owr_groups[] = {
+ "pu0",
+ "kb_col4_pq4",
+ "owr",
+ "sdmmc3_cd_n_pv2",
+};
+
+static const char * const pmi_groups[] = {
+ "pwr_int_n",
+};
+
+static const char * const pwm0_groups[] = {
+ "sdmmc1_dat2_py5",
+ "uart3_rts_n_pc0",
+ "pu3",
+ "gmi_ad8_ph0",
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const pwm1_groups[] = {
+ "sdmmc1_dat1_py6",
+ "pu4",
+ "gmi_ad9_ph1",
+ "sdmmc3_dat2_pb5",
+};
+
+static const char * const pwm2_groups[] = {
+ "pu5",
+ "gmi_ad10_ph2",
+ "kb_col3_pq3",
+ "sdmmc3_dat1_pb6",
+};
+
+static const char * const pwm3_groups[] = {
+ "pu6",
+ "gmi_ad11_ph3",
+ "sdmmc3_cmd_pa7",
+};
+
+static const char * const pwron_groups[] = {
+ "core_pwr_req",
+};
+
+static const char * const reset_out_n_groups[] = {
+ "reset_out_n",
+};
+
+static const char * const rsvd1_groups[] = {
+ "pv1",
+ "hdmi_int_pn7",
+ "pu1",
+ "pu2",
+ "gmi_wp_n_pc7",
+ "gmi_adv_n_pk0",
+ "gmi_cs0_n_pj0",
+ "gmi_cs1_n_pj2",
+ "gmi_ad0_pg0",
+ "gmi_ad1_pg1",
+ "gmi_ad2_pg2",
+ "gmi_ad3_pg3",
+ "gmi_ad4_pg4",
+ "gmi_ad5_pg5",
+ "gmi_ad6_pg6",
+ "gmi_ad7_pg7",
+ "gmi_wr_n_pi0",
+ "gmi_oe_n_pi1",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x7_aud_px7",
+
+ "reset_out_n",
+};
+
+static const char * const rsvd2_groups[] = {
+ "pv0",
+ "pv1",
+ "sdmmc1_dat0_py7",
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+ "dap4_fs_pp4",
+ "dap4_din_pp5",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7",
+ "clk3_out_pee0",
+ "clk3_req_pee1",
+ "gmi_iordy_pi5",
+ "gmi_a17_pb0",
+ "gmi_a18_pb1",
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat7_paa7",
+ "pcc1",
+ "pbb7",
+ "pcc2",
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col5_pq5",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+ "sys_clk_req_pz5",
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "owr",
+ "spdif_out_pk5",
+ "gpio_x1_aud_px1",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_dat0_pb7",
+ "gpio_w2_aud_pw2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5",
+ "reset_out_n",
+};
+
+static const char * const rsvd3_groups[] = {
+ "pv0",
+ "pv1",
+ "sdmmc1_clk_pz0",
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "pu0",
+ "pu1",
+ "pu2",
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+ "dap4_din_pp5",
+ "dap4_sclk_pp7",
+ "clk3_out_pee0",
+ "clk3_req_pee1",
+ "pcc1",
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+ "pbb7",
+ "pcc2",
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row3_pr3",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "clk_32k_out_pa0",
+ "sys_clk_req_pz5",
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "owr",
+ "clk1_req_pee2",
+ "clk1_out_pw4",
+ "spdif_out_pk5",
+ "spdif_in_pk6",
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+ "dvfs_pwm_px0",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+ "dvfs_clk_px2",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_dat0_pb7",
+ "hdmi_cec_pee3",
+ "sdmmc3_cd_n_pv2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5",
+ "reset_out_n",
+};
+
+static const char * const rsvd4_groups[] = {
+ "pv0",
+ "pv1",
+ "sdmmc1_clk_pz0",
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+ "pu0",
+ "pu1",
+ "pu2",
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+ "dap4_fs_pp4",
+ "dap4_din_pp5",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7",
+ "clk3_out_pee0",
+ "clk3_req_pee1",
+ "gmi_ad0_pg0",
+ "gmi_ad1_pg1",
+ "gmi_ad2_pg2",
+ "gmi_ad3_pg3",
+ "gmi_ad4_pg4",
+ "gmi_ad12_ph4",
+ "gmi_ad13_ph5",
+ "gmi_rst_n_pi4",
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+ "cam_mclk_pcc0",
+ "pcc1",
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+ "pbb3",
+ "pbb4",
+ "pbb5",
+ "pbb6",
+ "pbb7",
+ "pcc2",
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_col2_pq2",
+ "kb_col5_pq5",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+ "clk_32k_out_pa0",
+ "sys_clk_req_pz5",
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "owr",
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+ "clk1_req_pee2",
+ "clk1_out_pw4",
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+ "dvfs_pwm_px0",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+ "dvfs_clk_px2",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+ "gpio_x7_aud_px7",
+ "sdmmc3_cd_n_pv2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_in_pee5",
+ "sdmmc3_clk_lb_out_pee4",
+};
+
+static const char * const sdmmc1_groups[] = {
+
+ "sdmmc1_clk_pz0",
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+ "uart3_cts_n_pa1",
+ "kb_col5_pq5",
+ "sdmmc1_wp_n_pv3",
+};
+
+static const char * const sdmmc2_groups[] = {
+ "gmi_iordy_pi5",
+ "gmi_clk_pk1",
+ "gmi_cs2_n_pk3",
+ "gmi_cs3_n_pk4",
+ "gmi_cs7_n_pi6",
+ "gmi_ad12_ph4",
+ "gmi_ad13_ph5",
+ "gmi_ad14_ph6",
+ "gmi_ad15_ph7",
+ "gmi_dqs_p_pj3",
+};
+
+static const char * const sdmmc3_groups[] = {
+ "kb_col4_pq4",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4",
+ "hdmi_cec_pee3",
+ "sdmmc3_cd_n_pv2",
+ "sdmmc3_clk_lb_in_pee5",
+ "sdmmc3_clk_lb_out_pee4",
+};
+
+static const char * const sdmmc4_groups[] = {
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+};
+
+static const char * const soc_groups[] = {
+ "gmi_cs1_n_pj2",
+ "gmi_oe_n_pi1",
+ "clk_32k_out_pa0",
+ "hdmi_cec_pee3",
+};
+
+static const char * const spdif_groups[] = {
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+};
+
+static const char * const spi1_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "gpio_x3_aud_px3",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+ "gpio_x7_aud_px7",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const spi2_groups[] = {
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+ "gpio_x7_aud_px7",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const spi3_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const spi4_groups[] = {
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "uart3_cts_n_pa1",
+ "gmi_wait_pi7",
+ "gmi_cs6_n_pi3",
+ "gmi_ad5_pg5",
+ "gmi_ad6_pg6",
+ "gmi_ad7_pg7",
+ "gmi_a19_pk7",
+ "gmi_wr_n_pi0",
+ "sdmmc1_wp_n_pv3",
+};
+
+static const char * const spi5_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+};
+
+static const char * const spi6_groups[] = {
+ "dvfs_pwm_px0",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+ "dvfs_clk_px2",
+ "gpio_x6_aud_px6",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const sysclk_groups[] = {
+ "sys_clk_req_pz5",
+};
+
+static const char * const trace_groups[] = {
+ "gmi_iordy_pi5",
+ "gmi_adv_n_pk0",
+ "gmi_clk_pk1",
+ "gmi_cs2_n_pk3",
+ "gmi_cs4_n_pk2",
+ "gmi_a16_pj7",
+ "gmi_a17_pb0",
+ "gmi_a18_pb1",
+ "gmi_a19_pk7",
+ "gmi_dqs_p_pj3",
+};
+
+static const char * const uarta_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+ "pu0",
+ "pu1",
+ "pu2",
+ "pu3",
+ "pu4",
+ "pu5",
+ "pu6",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_col3_pq3",
+ "kb_col4_pq4",
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc1_wp_n_pv3",
+};
+
+static const char * const uartb_groups[] = {
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+};
+
+static const char * const uartc_groups[] = {
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "uart3_cts_n_pa1",
+ "uart3_rts_n_pc0",
+};
+
+static const char * const uartd_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "gmi_a16_pj7",
+ "gmi_a17_pb0",
+ "gmi_a18_pb1",
+ "gmi_a19_pk7",
+};
+
+static const char * const ulpi_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+};
+
+static const char * const usb_groups[] = {
+ "pv0",
+ "pu6",
+ "gmi_cs0_n_pj0",
+ "gmi_cs4_n_pk2",
+ "gmi_ad11_ph3",
+ "kb_col0_pq0",
+ "spdif_in_pk6",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+};
+
+static const char * const vgp1_groups[] = {
+ "cam_i2c_scl_pbb1",
+};
+
+static const char * const vgp2_groups[] = {
+ "cam_i2c_sda_pbb2",
+};
+
+static const char * const vgp3_groups[] = {
+ "pbb3",
+};
+
+static const char * const vgp4_groups[] = {
+ "pbb4",
+};
+
+static const char * const vgp5_groups[] = {
+ "pbb5",
+};
+
+static const char * const vgp6_groups[] = {
+ "pbb6",
+};
+
+static const char * const vi_groups[] = {
+ "cam_mclk_pcc0",
+ "pbb0",
+};
+
+static const char * const vi_alt1_groups[] = {
+ "cam_mclk_pcc0",
+ "pbb0",
+};
+
+static const char * const vi_alt3_groups[] = {
+ "cam_mclk_pcc0",
+ "pbb0",
+};
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct tegra_function tegra114_functions[] = {
+ FUNCTION(blink),
+ FUNCTION(cec),
+ FUNCTION(cldvfs),
+ FUNCTION(clk12),
+ FUNCTION(cpu),
+ FUNCTION(dap),
+ FUNCTION(dap1),
+ FUNCTION(dap2),
+ FUNCTION(dev3),
+ FUNCTION(displaya),
+ FUNCTION(displaya_alt),
+ FUNCTION(displayb),
+ FUNCTION(dtv),
+ FUNCTION(emc_dll),
+ FUNCTION(extperiph1),
+ FUNCTION(extperiph2),
+ FUNCTION(extperiph3),
+ FUNCTION(gmi),
+ FUNCTION(gmi_alt),
+ FUNCTION(hda),
+ FUNCTION(hsi),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(i2c4),
+ FUNCTION(i2cpwr),
+ FUNCTION(i2s0),
+ FUNCTION(i2s1),
+ FUNCTION(i2s2),
+ FUNCTION(i2s3),
+ FUNCTION(i2s4),
+ FUNCTION(irda),
+ FUNCTION(kbc),
+ FUNCTION(nand),
+ FUNCTION(nand_alt),
+ FUNCTION(owr),
+ FUNCTION(pmi),
+ FUNCTION(pwm0),
+ FUNCTION(pwm1),
+ FUNCTION(pwm2),
+ FUNCTION(pwm3),
+ FUNCTION(pwron),
+ FUNCTION(reset_out_n),
+ FUNCTION(rsvd1),
+ FUNCTION(rsvd2),
+ FUNCTION(rsvd3),
+ FUNCTION(rsvd4),
+ FUNCTION(sdmmc1),
+ FUNCTION(sdmmc2),
+ FUNCTION(sdmmc3),
+ FUNCTION(sdmmc4),
+ FUNCTION(soc),
+ FUNCTION(spdif),
+ FUNCTION(spi1),
+ FUNCTION(spi2),
+ FUNCTION(spi3),
+ FUNCTION(spi4),
+ FUNCTION(spi5),
+ FUNCTION(spi6),
+ FUNCTION(sysclk),
+ FUNCTION(trace),
+ FUNCTION(uarta),
+ FUNCTION(uartb),
+ FUNCTION(uartc),
+ FUNCTION(uartd),
+ FUNCTION(ulpi),
+ FUNCTION(usb),
+ FUNCTION(vgp1),
+ FUNCTION(vgp2),
+ FUNCTION(vgp3),
+ FUNCTION(vgp4),
+ FUNCTION(vgp5),
+ FUNCTION(vgp6),
+ FUNCTION(vi),
+ FUNCTION(vi_alt1),
+ FUNCTION(vi_alt3),
+};
+
+#define DRV_PINGROUP_REG_START 0x868 /* bank 0 */
+#define PINGROUP_REG_START 0x3000 /* bank 1 */
+
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_START)
+#define PINGROUP_REG_N(r) -1
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ .func_safe = TEGRA_MUX_##f_safe, \
+ .mux_reg = PINGROUP_REG_Y(r), \
+ .mux_bank = 1, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG_Y(r), \
+ .pupd_bank = 1, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG_Y(r), \
+ .tri_bank = 1, \
+ .tri_bit = 4, \
+ .einput_reg = PINGROUP_REG_Y(r), \
+ .einput_bank = 1, \
+ .einput_bit = 5, \
+ .odrain_reg = PINGROUP_REG_##od(r), \
+ .odrain_bank = 1, \
+ .odrain_bit = 6, \
+ .lock_reg = PINGROUP_REG_Y(r), \
+ .lock_bank = 1, \
+ .lock_bit = 7, \
+ .ioreset_reg = PINGROUP_REG_##ior(r), \
+ .ioreset_bank = 1, \
+ .ioreset_bit = 8, \
+ .rcv_sel_reg = PINGROUP_REG_##rcv_sel(r), \
+ .rcv_sel_bank = 1, \
+ .rcv_sel_bit = 9, \
+ .drv_reg = -1, \
+ .drvtype_reg = -1, \
+ }
+
+#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
+#define DRV_PINGROUP_DVRTYPE_N(r) -1
+
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
+ drvdn_b, drvdn_w, drvup_b, drvup_w, \
+ slwr_b, slwr_w, slwf_b, slwf_w, \
+ drvtype) \
+ { \
+ .name = "drive_" #pg_name, \
+ .pins = drive_##pg_name##_pins, \
+ .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_reg = -1, \
+ .odrain_reg = -1, \
+ .lock_reg = -1, \
+ .ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
+ .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_bank = 0, \
+ .hsm_bit = hsm_b, \
+ .schmitt_bit = schmitt_b, \
+ .lpmd_bit = lpmd_b, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w, \
+ .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_bank = 0, \
+ .drvtype_bit = 6, \
+ }
+
+static const struct tegra_pingroup tegra114_groups[] = {
+ /* pg_name, f0, f1, f2, f3, safe, r, od, ior, rcv_sel */
+ /* FIXME: Fill in correct data in safe column */
+ PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, ULPI, 0x3000, N, N, N),
+ PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, ULPI, 0x3004, N, N, N),
+ PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, ULPI, 0x3008, N, N, N),
+ PINGROUP(ulpi_data3_po4, SPI3, HSI, UARTA, ULPI, ULPI, 0x300c, N, N, N),
+ PINGROUP(ulpi_data4_po5, SPI2, HSI, UARTA, ULPI, ULPI, 0x3010, N, N, N),
+ PINGROUP(ulpi_data5_po6, SPI2, HSI, UARTA, ULPI, ULPI, 0x3014, N, N, N),
+ PINGROUP(ulpi_data6_po7, SPI2, HSI, UARTA, ULPI, ULPI, 0x3018, N, N, N),
+ PINGROUP(ulpi_data7_po0, SPI2, HSI, UARTA, ULPI, ULPI, 0x301c, N, N, N),
+ PINGROUP(ulpi_clk_py0, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3020, N, N, N),
+ PINGROUP(ulpi_dir_py1, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3024, N, N, N),
+ PINGROUP(ulpi_nxt_py2, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3028, N, N, N),
+ PINGROUP(ulpi_stp_py3, SPI1, SPI5, UARTD, ULPI, ULPI, 0x302c, N, N, N),
+ PINGROUP(dap3_fs_pp0, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3030, N, N, N),
+ PINGROUP(dap3_din_pp1, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3034, N, N, N),
+ PINGROUP(dap3_dout_pp2, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3038, N, N, N),
+ PINGROUP(dap3_sclk_pp3, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x303c, N, N, N),
+ PINGROUP(pv0, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x3040, N, N, N),
+ PINGROUP(pv1, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3044, N, N, N),
+ PINGROUP(sdmmc1_clk_pz0, SDMMC1, CLK12, RSVD3, RSVD4, RSVD4, 0x3048, N, N, N),
+ PINGROUP(sdmmc1_cmd_pz1, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x304c, N, N, N),
+ PINGROUP(sdmmc1_dat3_py4, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x3050, N, N, N),
+ PINGROUP(sdmmc1_dat2_py5, SDMMC1, PWM0, SPI4, UARTA, SDMMC1, 0x3054, N, N, N),
+ PINGROUP(sdmmc1_dat1_py6, SDMMC1, PWM1, SPI4, UARTA, SDMMC1, 0x3058, N, N, N),
+ PINGROUP(sdmmc1_dat0_py7, SDMMC1, RSVD2, SPI4, UARTA, RSVD2, 0x305c, N, N, N),
+ PINGROUP(clk2_out_pw5, EXTPERIPH2, RSVD2, RSVD3, RSVD4, RSVD4, 0x3068, N, N, N),
+ PINGROUP(clk2_req_pcc5, DAP, RSVD2, RSVD3, RSVD4, RSVD4, 0x306c, N, N, N),
+ PINGROUP(hdmi_int_pn7, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3110, N, N, Y),
+ PINGROUP(ddc_scl_pv4, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3114, N, N, Y),
+ PINGROUP(ddc_sda_pv5, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3118, N, N, Y),
+ PINGROUP(uart2_rxd_pc3, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3164, N, N, N),
+ PINGROUP(uart2_txd_pc2, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3168, N, N, N),
+ PINGROUP(uart2_rts_n_pj6, UARTA, UARTB, RSVD3, SPI4, RSVD3, 0x316c, N, N, N),
+ PINGROUP(uart2_cts_n_pj5, UARTA, UARTB, RSVD3, SPI4, RSVD3, 0x3170, N, N, N),
+ PINGROUP(uart3_txd_pw6, UARTC, RSVD2, RSVD3, SPI4, RSVD3, 0x3174, N, N, N),
+ PINGROUP(uart3_rxd_pw7, UARTC, RSVD2, RSVD3, SPI4, RSVD3, 0x3178, N, N, N),
+ PINGROUP(uart3_cts_n_pa1, UARTC, SDMMC1, DTV, SPI4, UARTC, 0x317c, N, N, N),
+ PINGROUP(uart3_rts_n_pc0, UARTC, PWM0, DTV, DISPLAYA, UARTC, 0x3180, N, N, N),
+ PINGROUP(pu0, OWR, UARTA, RSVD3, RSVD4, RSVD4, 0x3184, N, N, N),
+ PINGROUP(pu1, RSVD1, UARTA, RSVD3, RSVD4, RSVD4, 0x3188, N, N, N),
+ PINGROUP(pu2, RSVD1, UARTA, RSVD3, RSVD4, RSVD4, 0x318c, N, N, N),
+ PINGROUP(pu3, PWM0, UARTA, DISPLAYA, DISPLAYB, PWM0, 0x3190, N, N, N),
+ PINGROUP(pu4, PWM1, UARTA, DISPLAYA, DISPLAYB, PWM1, 0x3194, N, N, N),
+ PINGROUP(pu5, PWM2, UARTA, DISPLAYA, DISPLAYB, PWM2, 0x3198, N, N, N),
+ PINGROUP(pu6, PWM3, UARTA, USB, DISPLAYB, PWM3, 0x319c, N, N, N),
+ PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a0, Y, N, N),
+ PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a4, Y, N, N),
+ PINGROUP(dap4_fs_pp4, I2S3, RSVD2, DTV, RSVD4, RSVD4, 0x31a8, N, N, N),
+ PINGROUP(dap4_din_pp5, I2S3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31ac, N, N, N),
+ PINGROUP(dap4_dout_pp6, I2S3, RSVD2, DTV, RSVD4, RSVD4, 0x31b0, N, N, N),
+ PINGROUP(dap4_sclk_pp7, I2S3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31b4, N, N, N),
+ PINGROUP(clk3_out_pee0, EXTPERIPH3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31b8, N, N, N),
+ PINGROUP(clk3_req_pee1, DEV3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31bc, N, N, N),
+ PINGROUP(gmi_wp_n_pc7, RSVD1, NAND, GMI, GMI_ALT, RSVD1, 0x31c0, N, N, N),
+ PINGROUP(gmi_iordy_pi5, SDMMC2, RSVD2, GMI, TRACE, RSVD2, 0x31c4, N, N, N),
+ PINGROUP(gmi_wait_pi7, SPI4, NAND, GMI, DTV, NAND, 0x31c8, N, N, N),
+ PINGROUP(gmi_adv_n_pk0, RSVD1, NAND, GMI, TRACE, RSVD1, 0x31cc, N, N, N),
+ PINGROUP(gmi_clk_pk1, SDMMC2, NAND, GMI, TRACE, GMI, 0x31d0, N, N, N),
+ PINGROUP(gmi_cs0_n_pj0, RSVD1, NAND, GMI, USB, RSVD1, 0x31d4, N, N, N),
+ PINGROUP(gmi_cs1_n_pj2, RSVD1, NAND, GMI, SOC, RSVD1, 0x31d8, N, N, N),
+ PINGROUP(gmi_cs2_n_pk3, SDMMC2, NAND, GMI, TRACE, GMI, 0x31dc, N, N, N),
+ PINGROUP(gmi_cs3_n_pk4, SDMMC2, NAND, GMI, GMI_ALT, GMI, 0x31e0, N, N, N),
+ PINGROUP(gmi_cs4_n_pk2, USB, NAND, GMI, TRACE, GMI, 0x31e4, N, N, N),
+ PINGROUP(gmi_cs6_n_pi3, NAND, NAND_ALT, GMI, SPI4, NAND, 0x31e8, N, N, N),
+ PINGROUP(gmi_cs7_n_pi6, NAND, NAND_ALT, GMI, SDMMC2, NAND, 0x31ec, N, N, N),
+ PINGROUP(gmi_ad0_pg0, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f0, N, N, N),
+ PINGROUP(gmi_ad1_pg1, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f4, N, N, N),
+ PINGROUP(gmi_ad2_pg2, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f8, N, N, N),
+ PINGROUP(gmi_ad3_pg3, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31fc, N, N, N),
+ PINGROUP(gmi_ad4_pg4, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3200, N, N, N),
+ PINGROUP(gmi_ad5_pg5, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3204, N, N, N),
+ PINGROUP(gmi_ad6_pg6, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3208, N, N, N),
+ PINGROUP(gmi_ad7_pg7, RSVD1, NAND, GMI, SPI4, RSVD1, 0x320c, N, N, N),
+ PINGROUP(gmi_ad8_ph0, PWM0, NAND, GMI, DTV, GMI, 0x3210, N, N, N),
+ PINGROUP(gmi_ad9_ph1, PWM1, NAND, GMI, CLDVFS, GMI, 0x3214, N, N, N),
+ PINGROUP(gmi_ad10_ph2, PWM2, NAND, GMI, CLDVFS, GMI, 0x3218, N, N, N),
+ PINGROUP(gmi_ad11_ph3, PWM3, NAND, GMI, USB, GMI, 0x321c, N, N, N),
+ PINGROUP(gmi_ad12_ph4, SDMMC2, NAND, GMI, RSVD4, RSVD4, 0x3220, N, N, N),
+ PINGROUP(gmi_ad13_ph5, SDMMC2, NAND, GMI, RSVD4, RSVD4, 0x3224, N, N, N),
+ PINGROUP(gmi_ad14_ph6, SDMMC2, NAND, GMI, DTV, GMI, 0x3228, N, N, N),
+ PINGROUP(gmi_ad15_ph7, SDMMC2, NAND, GMI, DTV, GMI, 0x322c, N, N, N),
+ PINGROUP(gmi_a16_pj7, UARTD, TRACE, GMI, GMI_ALT, GMI, 0x3230, N, N, N),
+ PINGROUP(gmi_a17_pb0, UARTD, RSVD2, GMI, TRACE, RSVD2, 0x3234, N, N, N),
+ PINGROUP(gmi_a18_pb1, UARTD, RSVD2, GMI, TRACE, RSVD2, 0x3238, N, N, N),
+ PINGROUP(gmi_a19_pk7, UARTD, SPI4, GMI, TRACE, GMI, 0x323c, N, N, N),
+ PINGROUP(gmi_wr_n_pi0, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3240, N, N, N),
+ PINGROUP(gmi_oe_n_pi1, RSVD1, NAND, GMI, SOC, RSVD1, 0x3244, N, N, N),
+ PINGROUP(gmi_dqs_p_pj3, SDMMC2, NAND, GMI, TRACE, NAND, 0x3248, N, N, N),
+ PINGROUP(gmi_rst_n_pi4, NAND, NAND_ALT, GMI, RSVD4, RSVD4, 0x324c, N, N, N),
+ PINGROUP(gen2_i2c_scl_pt5, I2C2, RSVD2, GMI, RSVD4, RSVD4, 0x3250, Y, N, N),
+ PINGROUP(gen2_i2c_sda_pt6, I2C2, RSVD2, GMI, RSVD4, RSVD4, 0x3254, Y, N, N),
+ PINGROUP(sdmmc4_clk_pcc4, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x3258, N, Y, N),
+ PINGROUP(sdmmc4_cmd_pt7, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x325c, N, Y, N),
+ PINGROUP(sdmmc4_dat0_paa0, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3260, N, Y, N),
+ PINGROUP(sdmmc4_dat1_paa1, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3264, N, Y, N),
+ PINGROUP(sdmmc4_dat2_paa2, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3268, N, Y, N),
+ PINGROUP(sdmmc4_dat3_paa3, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x326c, N, Y, N),
+ PINGROUP(sdmmc4_dat4_paa4, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3270, N, Y, N),
+ PINGROUP(sdmmc4_dat5_paa5, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3274, N, Y, N),
+ PINGROUP(sdmmc4_dat6_paa6, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3278, N, Y, N),
+ PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x327c, N, Y, N),
+ PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, RSVD4, RSVD4, 0x3284, N, N, N),
+ PINGROUP(pcc1, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3288, N, N, N),
+ PINGROUP(pbb0, I2S4, VI, VI_ALT1, VI_ALT3, I2S4, 0x328c, N, N, N),
+ PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, RSVD4, RSVD4, 0x3290, Y, N, N),
+ PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, RSVD4, RSVD4, 0x3294, Y, N, N),
+ PINGROUP(pbb3, VGP3, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x3298, N, N, N),
+ PINGROUP(pbb4, VGP4, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x329c, N, N, N),
+ PINGROUP(pbb5, VGP5, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a0, N, N, N),
+ PINGROUP(pbb6, VGP6, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a4, N, N, N),
+ PINGROUP(pbb7, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32a8, N, N, N),
+ PINGROUP(pcc2, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32ac, N, N, N),
+ PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b4, Y, N, N),
+ PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b8, Y, N, N),
+ PINGROUP(kb_row0_pr0, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32bc, N, N, N),
+ PINGROUP(kb_row1_pr1, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c0, N, N, N),
+ PINGROUP(kb_row2_pr2, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c4, N, N, N),
+ PINGROUP(kb_row3_pr3, KBC, DISPLAYA, RSVD3, DISPLAYB, RSVD3, 0x32c8, N, N, N),
+ PINGROUP(kb_row4_pr4, KBC, DISPLAYA, SPI2, DISPLAYB, KBC, 0x32cc, N, N, N),
+ PINGROUP(kb_row5_pr5, KBC, DISPLAYA, SPI2, DISPLAYB, KBC, 0x32d0, N, N, N),
+ PINGROUP(kb_row6_pr6, KBC, DISPLAYA, DISPLAYA_ALT, DISPLAYB, KBC, 0x32d4, N, N, N),
+ PINGROUP(kb_row7_pr7, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32d8, N, N, N),
+ PINGROUP(kb_row8_ps0, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32dc, N, N, N),
+ PINGROUP(kb_row9_ps1, KBC, RSVD2, RSVD3, UARTA, RSVD3, 0x32e0, N, N, N),
+ PINGROUP(kb_row10_ps2, KBC, RSVD2, RSVD3, UARTA, RSVD3, 0x32e4, N, N, N),
+ PINGROUP(kb_col0_pq0, KBC, USB, SPI2, EMC_DLL, KBC, 0x32fc, N, N, N),
+ PINGROUP(kb_col1_pq1, KBC, RSVD2, SPI2, EMC_DLL, RSVD2, 0x3300, N, N, N),
+ PINGROUP(kb_col2_pq2, KBC, RSVD2, SPI2, RSVD4, RSVD2, 0x3304, N, N, N),
+ PINGROUP(kb_col3_pq3, KBC, DISPLAYA, PWM2, UARTA, KBC, 0x3308, N, N, N),
+ PINGROUP(kb_col4_pq4, KBC, OWR, SDMMC3, UARTA, KBC, 0x330c, N, N, N),
+ PINGROUP(kb_col5_pq5, KBC, RSVD2, SDMMC1, RSVD4, RSVD4, 0x3310, N, N, N),
+ PINGROUP(kb_col6_pq6, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3314, N, N, N),
+ PINGROUP(kb_col7_pq7, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3318, N, N, N),
+ PINGROUP(clk_32k_out_pa0, BLINK, SOC, RSVD3, RSVD4, RSVD4, 0x331c, N, N, N),
+ PINGROUP(sys_clk_req_pz5, SYSCLK, RSVD2, RSVD3, RSVD4, RSVD4, 0x3320, N, N, N),
+ PINGROUP(core_pwr_req, PWRON, RSVD2, RSVD3, RSVD4, RSVD4, 0x3324, N, N, N),
+ PINGROUP(cpu_pwr_req, CPU, RSVD2, RSVD3, RSVD4, RSVD4, 0x3328, N, N, N),
+ PINGROUP(pwr_int_n, PMI, RSVD2, RSVD3, RSVD4, RSVD4, 0x332c, N, N, N),
+ PINGROUP(owr, OWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x3334, N, N, Y),
+ PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3338, N, N, N),
+ PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, RSVD4, RSVD4, 0x333c, N, N, N),
+ PINGROUP(dap1_dout_pn2, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3340, N, N, N),
+ PINGROUP(dap1_sclk_pn3, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3344, N, N, N),
+ PINGROUP(clk1_req_pee2, DAP, DAP1, RSVD3, RSVD4, RSVD4, 0x3348, N, N, N),
+ PINGROUP(clk1_out_pw4, EXTPERIPH1, DAP2, RSVD3, RSVD4, RSVD4, 0x334c, N, N, N),
+ PINGROUP(spdif_in_pk6, SPDIF, USB, RSVD3, RSVD4, RSVD4, 0x3350, N, N, N),
+ PINGROUP(spdif_out_pk5, SPDIF, RSVD2, RSVD3, RSVD4, RSVD4, 0x3354, N, N, N),
+ PINGROUP(dap2_fs_pa2, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3358, N, N, N),
+ PINGROUP(dap2_din_pa4, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x335c, N, N, N),
+ PINGROUP(dap2_dout_pa5, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3360, N, N, N),
+ PINGROUP(dap2_sclk_pa3, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3364, N, N, N),
+ PINGROUP(dvfs_pwm_px0, SPI6, CLDVFS, RSVD3, RSVD4, RSVD4, 0x3368, N, N, N),
+ PINGROUP(gpio_x1_aud_px1, SPI6, RSVD2, RSVD3, RSVD4, RSVD4, 0x336c, N, N, N),
+ PINGROUP(gpio_x3_aud_px3, SPI6, SPI1, RSVD3, RSVD4, RSVD4, 0x3370, N, N, N),
+ PINGROUP(dvfs_clk_px2, SPI6, CLDVFS, RSVD3, RSVD4, RSVD4, 0x3374, N, N, N),
+ PINGROUP(gpio_x4_aud_px4, RSVD1, SPI1, SPI2, DAP2, RSVD1, 0x3378, N, N, N),
+ PINGROUP(gpio_x5_aud_px5, RSVD1, SPI1, SPI2, RSVD4, RSVD1, 0x337c, N, N, N),
+ PINGROUP(gpio_x6_aud_px6, SPI6, SPI1, SPI2, RSVD4, RSVD4, 0x3380, N, N, N),
+ PINGROUP(gpio_x7_aud_px7, RSVD1, SPI1, SPI2, RSVD4, RSVD4, 0x3384, N, N, N),
+ PINGROUP(sdmmc3_clk_pa6, SDMMC3, RSVD2, RSVD3, SPI3, RSVD3, 0x3390, N, N, N),
+ PINGROUP(sdmmc3_cmd_pa7, SDMMC3, PWM3, UARTA, SPI3, SDMMC3, 0x3394, N, N, N),
+ PINGROUP(sdmmc3_dat0_pb7, SDMMC3, RSVD2, RSVD3, SPI3, RSVD3, 0x3398, N, N, N),
+ PINGROUP(sdmmc3_dat1_pb6, SDMMC3, PWM2, UARTA, SPI3, SDMMC3, 0x339c, N, N, N),
+ PINGROUP(sdmmc3_dat2_pb5, SDMMC3, PWM1, DISPLAYA, SPI3, SDMMC3, 0x33a0, N, N, N),
+ PINGROUP(sdmmc3_dat3_pb4, SDMMC3, PWM0, DISPLAYB, SPI3, SDMMC3, 0x33a4, N, N, N),
+ PINGROUP(hdmi_cec_pee3, CEC, SDMMC3, RSVD3, SOC, RSVD3, 0x33e0, Y, N, N),
+ PINGROUP(sdmmc1_wp_n_pv3, SDMMC1, CLK12, SPI4, UARTA, SDMMC1, 0x33e4, N, N, N),
+ PINGROUP(sdmmc3_cd_n_pv2, SDMMC3, OWR, RSVD3, RSVD4, RSVD4, 0x33e8, N, N, N),
+ PINGROUP(gpio_w2_aud_pw2, SPI6, RSVD2, SPI2, I2C1, RSVD2, 0x33ec, N, N, N),
+ PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI6, 0x33f0, N, N, N),
+ PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f4, Y, N, N),
+ PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f8, Y, N, N),
+ PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x33fc, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x3400, N, N, N),
+ PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD3, 0x3408, N, N, N),
+
+ /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
+ DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at1, 0x870, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at2, 0x874, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at3, 0x878, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gma, 0x900, 2, 3, 4, 14, 5, 20, 5, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+};
+
+static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
+ .ngpios = NUM_GPIOS,
+ .pins = tegra114_pins,
+ .npins = ARRAY_SIZE(tegra114_pins),
+ .functions = tegra114_functions,
+ .nfunctions = ARRAY_SIZE(tegra114_functions),
+ .groups = tegra114_groups,
+ .ngroups = ARRAY_SIZE(tegra114_groups),
+};
+
+static int tegra114_pinctrl_probe(struct platform_device *pdev)
+{
+ return tegra_pinctrl_probe(pdev, &tegra114_pinctrl);
+}
+
+static struct of_device_id tegra114_pinctrl_of_match[] = {
+ { .compatible = "nvidia,tegra114-pinmux", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra114_pinctrl_of_match);
+
+static struct platform_driver tegra114_pinctrl_driver = {
+ .driver = {
+ .name = "tegra114-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra114_pinctrl_of_match,
+ },
+ .probe = tegra114_pinctrl_probe,
+ .remove = tegra_pinctrl_remove,
+};
+module_platform_driver(tegra114_pinctrl_driver);
+
+MODULE_ALIAS("platform:tegra114-pinctrl");
+MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index 1524bfd66602..fcfb7d012c5b 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -2624,7 +2624,9 @@ static const struct tegra_function tegra20_functions[] = {
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
.drv_reg = -1, \
+ .drvtype_reg = -1, \
}
/* Pin groups with only pull up and pull down control */
@@ -2642,7 +2644,9 @@ static const struct tegra_function tegra20_functions[] = {
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
.drv_reg = -1, \
+ .drvtype_reg = -1, \
}
/* Pin groups for drive strength registers (configurable version) */
@@ -2660,6 +2664,7 @@ static const struct tegra_function tegra20_functions[] = {
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
.drv_reg = ((r) - PINGROUP_REG_A), \
.drv_bank = 3, \
.hsm_bit = hsm_b, \
@@ -2673,6 +2678,7 @@ static const struct tegra_function tegra20_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
+ .drvtype_reg = -1, \
}
/* Pin groups for drive strength registers (simple version) */
@@ -2856,7 +2862,7 @@ static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.ngroups = ARRAY_SIZE(tegra20_groups),
};
-static int __devinit tegra20_pinctrl_probe(struct platform_device *pdev)
+static int tegra20_pinctrl_probe(struct platform_device *pdev)
{
return tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
}
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index cf579ebf346f..2300deba25bd 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -3384,7 +3384,9 @@ static const struct tegra_function tegra30_functions[] = {
.ioreset_reg = PINGROUP_REG_##ior(r), \
.ioreset_bank = 1, \
.ioreset_bit = 8, \
+ .rcv_sel_reg = -1, \
.drv_reg = -1, \
+ .drvtype_reg = -1, \
}
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
@@ -3401,6 +3403,7 @@ static const struct tegra_function tegra30_functions[] = {
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
.drv_reg = ((r) - DRV_PINGROUP_REG_A), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
@@ -3414,6 +3417,7 @@ static const struct tegra_function tegra30_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
+ .drvtype_reg = -1, \
}
static const struct tegra_pingroup tegra30_groups[] = {
@@ -3722,7 +3726,7 @@ static const struct tegra_pinctrl_soc_data tegra30_pinctrl = {
.ngroups = ARRAY_SIZE(tegra30_groups),
};
-static int __devinit tegra30_pinctrl_probe(struct platform_device *pdev)
+static int tegra30_pinctrl_probe(struct platform_device *pdev)
{
return tegra_pinctrl_probe(pdev, &tegra30_pinctrl);
}
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 8c039ad22baf..2b5772550836 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1062,7 +1062,7 @@ static struct pinctrl_desc u300_pmx_desc = {
.owner = THIS_MODULE,
};
-static int __devinit u300_pmx_probe(struct platform_device *pdev)
+static int u300_pmx_probe(struct platform_device *pdev)
{
struct u300_pmx *upmx;
struct resource *res;
@@ -1078,9 +1078,9 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- upmx->virtbase = devm_request_and_ioremap(&pdev->dev, res);
- if (!upmx->virtbase)
- return -ENOMEM;
+ upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(upmx->virtbase))
+ return PTR_ERR(upmx->virtbase);
upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
if (!upmx->pctl) {
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index ad90984ec500..068224efa6fa 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -9,6 +9,7 @@
* Copyright (C) 2012 John Crispin <blogic@openwrt.org>
*/
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -441,17 +442,17 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
if (port == PORT3)
reg = GPIO3_OD;
else
- reg = GPIO_OD(port);
+ reg = GPIO_OD(pin);
*config = LTQ_PINCONF_PACK(param,
- !!gpio_getbit(info->membase[0], reg, PORT_PIN(port)));
+ !gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
break;
case LTQ_PINCONF_PARAM_PULL:
if (port == PORT3)
reg = GPIO3_PUDEN;
else
- reg = GPIO_PUDEN(port);
- if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port))) {
+ reg = GPIO_PUDEN(pin);
+ if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin))) {
*config = LTQ_PINCONF_PACK(param, 0);
break;
}
@@ -459,13 +460,18 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
if (port == PORT3)
reg = GPIO3_PUDSEL;
else
- reg = GPIO_PUDSEL(port);
- if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port)))
+ reg = GPIO_PUDSEL(pin);
+ if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin)))
*config = LTQ_PINCONF_PACK(param, 2);
else
*config = LTQ_PINCONF_PACK(param, 1);
break;
+ case LTQ_PINCONF_PARAM_OUTPUT:
+ reg = GPIO_DIR(pin);
+ *config = LTQ_PINCONF_PACK(param,
+ gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
+ break;
default:
dev_err(pctldev->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
@@ -488,33 +494,44 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
if (port == PORT3)
reg = GPIO3_OD;
else
- reg = GPIO_OD(port);
- gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+ reg = GPIO_OD(pin);
+ if (arg == 0)
+ gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
+ else
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
break;
case LTQ_PINCONF_PARAM_PULL:
if (port == PORT3)
reg = GPIO3_PUDEN;
else
- reg = GPIO_PUDEN(port);
+ reg = GPIO_PUDEN(pin);
if (arg == 0) {
- gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
break;
}
- gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+ gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
if (port == PORT3)
reg = GPIO3_PUDSEL;
else
- reg = GPIO_PUDSEL(port);
+ reg = GPIO_PUDSEL(pin);
if (arg == 1)
- gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
else if (arg == 2)
- gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+ gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
else
dev_err(pctldev->dev, "Invalid pull value %d\n", arg);
break;
+ case LTQ_PINCONF_PARAM_OUTPUT:
+ reg = GPIO_DIR(pin);
+ if (arg == 0)
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
+ else
+ gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
+ break;
+
default:
dev_err(pctldev->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
@@ -522,9 +539,24 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
return 0;
}
+int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long config)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+ int i, ret = 0;
+
+ for (i = 0; i < info->grps[selector].npins && !ret; i++)
+ ret = xway_pinconf_set(pctldev,
+ info->grps[selector].pins[i], config);
+
+ return ret;
+}
+
static struct pinconf_ops xway_pinconf_ops = {
.pin_config_get = xway_pinconf_get,
.pin_config_set = xway_pinconf_set,
+ .pin_config_group_set = xway_pinconf_group_set,
};
static struct pinctrl_desc xway_pctrl_desc = {
@@ -558,6 +590,7 @@ static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
static const struct ltq_cfg_param xway_cfg_params[] = {
{"lantiq,pull", LTQ_PINCONF_PARAM_PULL},
{"lantiq,open-drain", LTQ_PINCONF_PARAM_OPEN_DRAIN},
+ {"lantiq,output", LTQ_PINCONF_PARAM_OUTPUT},
};
static struct ltq_pinmux_info xway_info = {
@@ -674,7 +707,7 @@ static const struct of_device_id xway_match[] = {
};
MODULE_DEVICE_TABLE(of, xway_match);
-static int __devinit pinmux_xway_probe(struct platform_device *pdev)
+static int pinmux_xway_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
@@ -687,11 +720,9 @@ static int __devinit pinmux_xway_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get resource\n");
return -ENOENT;
}
- xway_info.membase[0] = devm_request_and_ioremap(&pdev->dev, res);
- if (!xway_info.membase[0]) {
- dev_err(&pdev->dev, "Failed to remap resource\n");
- return -ENOMEM;
- }
+ xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xway_info.membase[0]))
+ return PTR_ERR(xway_info.membase[0]);
match = of_match_device(xway_match, &pdev->dev);
if (match)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
new file mode 100644
index 000000000000..c3340f54d2ad
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -0,0 +1,116 @@
+#
+# Renesas SH and SH Mobile PINCTRL drivers
+#
+
+if ARCH_SHMOBILE || SUPERH
+
+config PINCTRL_SH_PFC
+ # XXX move off the gpio dependency
+ depends on GENERIC_GPIO
+ select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
+ select PINMUX
+ select PINCONF
+ def_bool y
+ help
+ This enables pin control drivers for SH and SH Mobile platforms
+
+config GPIO_SH_PFC
+ bool "SuperH PFC GPIO support"
+ depends on PINCTRL_SH_PFC && GPIOLIB
+ help
+ This enables support for GPIOs within the SoC's pin function
+ controller.
+
+config PINCTRL_PFC_R8A7740
+ def_bool y
+ depends on ARCH_R8A7740
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A7779
+ def_bool y
+ depends on ARCH_R8A7779
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7203
+ def_bool y
+ depends on CPU_SUBTYPE_SH7203
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7264
+ def_bool y
+ depends on CPU_SUBTYPE_SH7264
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7269
+ def_bool y
+ depends on CPU_SUBTYPE_SH7269
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7372
+ def_bool y
+ depends on ARCH_SH7372
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH73A0
+ def_bool y
+ depends on ARCH_SH73A0
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7720
+ def_bool y
+ depends on CPU_SUBTYPE_SH7720
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7722
+ def_bool y
+ depends on CPU_SUBTYPE_SH7722
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7723
+ def_bool y
+ depends on CPU_SUBTYPE_SH7723
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7724
+ def_bool y
+ depends on CPU_SUBTYPE_SH7724
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7734
+ def_bool y
+ depends on CPU_SUBTYPE_SH7734
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7757
+ def_bool y
+ depends on CPU_SUBTYPE_SH7757
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7785
+ def_bool y
+ depends on CPU_SUBTYPE_SH7785
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7786
+ def_bool y
+ depends on CPU_SUBTYPE_SH7786
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SHX3
+ def_bool y
+ depends on CPU_SUBTYPE_SHX3
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+endif
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
new file mode 100644
index 000000000000..e8b9562c47e1
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -0,0 +1,21 @@
+sh-pfc-objs = core.o pinctrl.o
+ifeq ($(CONFIG_GPIO_SH_PFC),y)
+sh-pfc-objs += gpio.o
+endif
+obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
+obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
+obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
+obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
+obj-$(CONFIG_PINCTRL_PFC_SH7372) += pfc-sh7372.o
+obj-$(CONFIG_PINCTRL_PFC_SH73A0) += pfc-sh73a0.o
+obj-$(CONFIG_PINCTRL_PFC_SH7720) += pfc-sh7720.o
+obj-$(CONFIG_PINCTRL_PFC_SH7722) += pfc-sh7722.o
+obj-$(CONFIG_PINCTRL_PFC_SH7723) += pfc-sh7723.o
+obj-$(CONFIG_PINCTRL_PFC_SH7724) += pfc-sh7724.o
+obj-$(CONFIG_PINCTRL_PFC_SH7734) += pfc-sh7734.o
+obj-$(CONFIG_PINCTRL_PFC_SH7757) += pfc-sh7757.o
+obj-$(CONFIG_PINCTRL_PFC_SH7785) += pfc-sh7785.o
+obj-$(CONFIG_PINCTRL_PFC_SH7786) += pfc-sh7786.o
+obj-$(CONFIG_PINCTRL_PFC_SHX3) += pfc-shx3.o
diff --git a/drivers/sh/pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 68169373c98b..970ddff2b0b6 100644
--- a/drivers/sh/pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -8,78 +8,61 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sh_pfc.h>
-#include <linux/module.h>
+#define DRV_NAME "sh-pfc"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
-static struct sh_pfc *sh_pfc __read_mostly;
-
-static inline bool sh_pfc_initialized(void)
-{
- return !!sh_pfc;
-}
-
-static void pfc_iounmap(struct sh_pfc *pfc)
-{
- int k;
-
- for (k = 0; k < pfc->num_resources; k++)
- if (pfc->window[k].virt)
- iounmap(pfc->window[k].virt);
-
- kfree(pfc->window);
- pfc->window = NULL;
-}
+#include "core.h"
-static int pfc_ioremap(struct sh_pfc *pfc)
+static int sh_pfc_ioremap(struct sh_pfc *pfc, struct platform_device *pdev)
{
struct resource *res;
int k;
- if (!pfc->num_resources)
+ if (pdev->num_resources == 0) {
+ pfc->num_windows = 0;
return 0;
+ }
- pfc->window = kzalloc(pfc->num_resources * sizeof(*pfc->window),
- GFP_NOWAIT);
+ pfc->window = devm_kzalloc(pfc->dev, pdev->num_resources *
+ sizeof(*pfc->window), GFP_NOWAIT);
if (!pfc->window)
- goto err1;
+ return -ENOMEM;
- for (k = 0; k < pfc->num_resources; k++) {
- res = pfc->resource + k;
+ pfc->num_windows = pdev->num_resources;
+
+ for (k = 0, res = pdev->resource; k < pdev->num_resources; k++, res++) {
WARN_ON(resource_type(res) != IORESOURCE_MEM);
pfc->window[k].phys = res->start;
pfc->window[k].size = resource_size(res);
- pfc->window[k].virt = ioremap_nocache(res->start,
- resource_size(res));
+ pfc->window[k].virt = devm_ioremap_nocache(pfc->dev, res->start,
+ resource_size(res));
if (!pfc->window[k].virt)
- goto err2;
+ return -ENOMEM;
}
return 0;
-
-err2:
- pfc_iounmap(pfc);
-err1:
- return -1;
}
-static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
- unsigned long address)
+static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
+ unsigned long address)
{
- struct pfc_window *window;
+ struct sh_pfc_window *window;
int k;
/* scan through physical windows and convert address */
- for (k = 0; k < pfc->num_resources; k++) {
+ for (k = 0; k < pfc->num_windows; k++) {
window = pfc->window + k;
if (address < window->phys)
@@ -95,7 +78,7 @@ static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
return (void __iomem *)address;
}
-static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+static int sh_pfc_enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
{
if (enum_id < r->begin)
return 0;
@@ -106,8 +89,8 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
return 1;
}
-static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
+static unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width)
{
switch (reg_width) {
case 8:
@@ -122,9 +105,8 @@ static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
return 0;
}
-static void gpio_write_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width,
- unsigned long data)
+static void sh_pfc_write_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width, unsigned long data)
{
switch (reg_width) {
case 8:
@@ -150,9 +132,8 @@ int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos)
pr_debug("read_bit: addr = %lx, pos = %ld, "
"r_width = %ld\n", dr->reg, pos, dr->reg_width);
- return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+ return (sh_pfc_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_read_bit);
void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
unsigned long value)
@@ -170,20 +151,19 @@ void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
else
clear_bit(pos, &dr->reg_shadow);
- gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
+ sh_pfc_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
}
-EXPORT_SYMBOL_GPL(sh_pfc_write_bit);
-
-static void config_reg_helper(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long in_pos,
- void __iomem **mapped_regp,
- unsigned long *maskp,
- unsigned long *posp)
+
+static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
{
int k;
- *mapped_regp = pfc_phys_to_virt(pfc, crp->reg);
+ *mapped_regp = sh_pfc_phys_to_virt(pfc, crp->reg);
if (crp->field_width) {
*maskp = (1 << crp->field_width) - 1;
@@ -196,30 +176,30 @@ static void config_reg_helper(struct sh_pfc *pfc,
}
}
-static int read_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long field)
+static int sh_pfc_read_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
{
void __iomem *mapped_reg;
unsigned long mask, pos;
- config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+ sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
pr_debug("read_reg: addr = %lx, field = %ld, "
"r_width = %ld, f_width = %ld\n",
crp->reg, field, crp->reg_width, crp->field_width);
- return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
+ return (sh_pfc_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
}
-static void write_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long field, unsigned long value)
+static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
{
void __iomem *mapped_reg;
unsigned long mask, pos, data;
- config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+ sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
"r_width = %ld, f_width = %ld\n",
@@ -228,34 +208,35 @@ static void write_config_reg(struct sh_pfc *pfc,
mask = ~(mask << pos);
value = value << pos;
- data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data = sh_pfc_read_raw_reg(mapped_reg, crp->reg_width);
data &= mask;
data |= value;
- if (pfc->unlock_reg)
- gpio_write_raw_reg(pfc_phys_to_virt(pfc, pfc->unlock_reg),
- 32, ~data);
+ if (pfc->info->unlock_reg)
+ sh_pfc_write_raw_reg(
+ sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
+ ~data);
- gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
+ sh_pfc_write_raw_reg(mapped_reg, crp->reg_width, data);
}
-static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
+static int sh_pfc_setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
{
- struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
struct pinmux_data_reg *data_reg;
int k, n;
- if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
return -1;
k = 0;
while (1) {
- data_reg = pfc->data_regs + k;
+ data_reg = pfc->info->data_regs + k;
if (!data_reg->reg_width)
break;
- data_reg->mapped_reg = pfc_phys_to_virt(pfc, data_reg->reg);
+ data_reg->mapped_reg = sh_pfc_phys_to_virt(pfc, data_reg->reg);
for (n = 0; n < data_reg->reg_width; n++) {
if (data_reg->enum_ids[n] == gpiop->enum_id) {
@@ -274,23 +255,23 @@ static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
return -1;
}
-static void setup_data_regs(struct sh_pfc *pfc)
+static void sh_pfc_setup_data_regs(struct sh_pfc *pfc)
{
struct pinmux_data_reg *drp;
int k;
- for (k = pfc->first_gpio; k <= pfc->last_gpio; k++)
- setup_data_reg(pfc, k);
+ for (k = pfc->info->first_gpio; k <= pfc->info->last_gpio; k++)
+ sh_pfc_setup_data_reg(pfc, k);
k = 0;
while (1) {
- drp = pfc->data_regs + k;
+ drp = pfc->info->data_regs + k;
if (!drp->reg_width)
break;
- drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
- drp->reg_width);
+ drp->reg_shadow = sh_pfc_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
k++;
}
}
@@ -298,24 +279,22 @@ static void setup_data_regs(struct sh_pfc *pfc)
int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
struct pinmux_data_reg **drp, int *bitp)
{
- struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
int k, n;
- if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
return -1;
k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
- *drp = pfc->data_regs + k;
+ *drp = pfc->info->data_regs + k;
*bitp = n;
return 0;
}
-EXPORT_SYMBOL_GPL(sh_pfc_get_data_reg);
-static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp,
- int *fieldp, int *valuep,
- unsigned long **cntp)
+static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
+ struct pinmux_cfg_reg **crp, int *fieldp,
+ int *valuep, unsigned long **cntp)
{
struct pinmux_cfg_reg *config_reg;
unsigned long r_width, f_width, curr_width, ncomb;
@@ -323,7 +302,7 @@ static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
k = 0;
while (1) {
- config_reg = pfc->cfg_regs + k;
+ config_reg = pfc->info->cfg_regs + k;
r_width = config_reg->reg_width;
f_width = config_reg->field_width;
@@ -361,12 +340,12 @@ static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
pinmux_enum_t *enum_idp)
{
- pinmux_enum_t enum_id = pfc->gpios[gpio].enum_id;
- pinmux_enum_t *data = pfc->gpio_data;
+ pinmux_enum_t enum_id = pfc->info->gpios[gpio].enum_id;
+ pinmux_enum_t *data = pfc->info->gpio_data;
int k;
- if (!enum_in_range(enum_id, &pfc->data)) {
- if (!enum_in_range(enum_id, &pfc->mark)) {
+ if (!sh_pfc_enum_in_range(enum_id, &pfc->info->data)) {
+ if (!sh_pfc_enum_in_range(enum_id, &pfc->info->mark)) {
pr_err("non data/mark enum_id for gpio %d\n", gpio);
return -1;
}
@@ -377,7 +356,7 @@ int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
return pos + 1;
}
- for (k = 0; k < pfc->gpio_data_size; k++) {
+ for (k = 0; k < pfc->info->gpio_data_size; k++) {
if (data[k] == enum_id) {
*enum_idp = data[k + 1];
return k + 1;
@@ -387,7 +366,6 @@ int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
return -1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_gpio_to_enum);
int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
int cfg_mode)
@@ -405,19 +383,19 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
break;
case PINMUX_TYPE_OUTPUT:
- range = &pfc->output;
+ range = &pfc->info->output;
break;
case PINMUX_TYPE_INPUT:
- range = &pfc->input;
+ range = &pfc->info->input;
break;
case PINMUX_TYPE_INPUT_PULLUP:
- range = &pfc->input_pu;
+ range = &pfc->info->input_pu;
break;
case PINMUX_TYPE_INPUT_PULLDOWN:
- range = &pfc->input_pd;
+ range = &pfc->info->input_pd;
break;
default:
@@ -437,7 +415,7 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
break;
/* first check if this is a function enum */
- in_range = enum_in_range(enum_id, &pfc->function);
+ in_range = sh_pfc_enum_in_range(enum_id, &pfc->info->function);
if (!in_range) {
/* not a function enum */
if (range) {
@@ -449,7 +427,7 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
* for this case we only allow function enums
* and the enums that match the other range.
*/
- in_range = enum_in_range(enum_id, range);
+ in_range = sh_pfc_enum_in_range(enum_id, range);
/*
* special case pass through for fixed
@@ -474,19 +452,19 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
if (!in_range)
continue;
- if (get_config_reg(pfc, enum_id, &cr,
- &field, &value, &cntp) != 0)
+ if (sh_pfc_get_config_reg(pfc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
goto out_err;
switch (cfg_mode) {
case GPIO_CFG_DRYRUN:
if (!*cntp ||
- (read_config_reg(pfc, cr, field) != value))
+ (sh_pfc_read_config_reg(pfc, cr, field) != value))
continue;
break;
case GPIO_CFG_REQ:
- write_config_reg(pfc, cr, field, value);
+ sh_pfc_write_config_reg(pfc, cr, field, value);
*cntp = *cntp + 1;
break;
@@ -500,11 +478,11 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
out_err:
return -1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_config_gpio);
-int register_sh_pfc(struct sh_pfc *pfc)
+static int sh_pfc_probe(struct platform_device *pdev)
{
- int (*initroutine)(struct sh_pfc *) = NULL;
+ struct sh_pfc_soc_info *info;
+ struct sh_pfc *pfc;
int ret;
/*
@@ -512,61 +490,146 @@ int register_sh_pfc(struct sh_pfc *pfc)
*/
BUILD_BUG_ON(PINMUX_FLAG_TYPE > ((1 << PINMUX_FLAG_DBIT_SHIFT) - 1));
- if (sh_pfc)
- return -EBUSY;
+ info = pdev->id_entry->driver_data
+ ? (void *)pdev->id_entry->driver_data : pdev->dev.platform_data;
+ if (info == NULL)
+ return -ENODEV;
+
+ pfc = devm_kzalloc(&pdev->dev, sizeof(*pfc), GFP_KERNEL);
+ if (pfc == NULL)
+ return -ENOMEM;
+
+ pfc->info = info;
+ pfc->dev = &pdev->dev;
- ret = pfc_ioremap(pfc);
+ ret = sh_pfc_ioremap(pfc, pdev);
if (unlikely(ret < 0))
return ret;
spin_lock_init(&pfc->lock);
pinctrl_provide_dummies();
- setup_data_regs(pfc);
-
- sh_pfc = pfc;
+ sh_pfc_setup_data_regs(pfc);
/*
* Initialize pinctrl bindings first
*/
- initroutine = symbol_request(sh_pfc_register_pinctrl);
- if (initroutine) {
- ret = (*initroutine)(pfc);
- symbol_put_addr(initroutine);
-
- if (unlikely(ret != 0))
- goto err;
- } else {
- pr_err("failed to initialize pinctrl bindings\n");
- goto err;
- }
+ ret = sh_pfc_register_pinctrl(pfc);
+ if (unlikely(ret != 0))
+ return ret;
+#ifdef CONFIG_GPIO_SH_PFC
/*
* Then the GPIO chip
*/
- initroutine = symbol_request(sh_pfc_register_gpiochip);
- if (initroutine) {
- ret = (*initroutine)(pfc);
- symbol_put_addr(initroutine);
-
+ ret = sh_pfc_register_gpiochip(pfc);
+ if (unlikely(ret != 0)) {
/*
* If the GPIO chip fails to come up we still leave the
* PFC state as it is, given that there are already
* extant users of it that have succeeded by this point.
*/
- if (unlikely(ret != 0)) {
- pr_notice("failed to init GPIO chip, ignoring...\n");
- ret = 0;
- }
+ pr_notice("failed to init GPIO chip, ignoring...\n");
}
+#endif
+
+ platform_set_drvdata(pdev, pfc);
- pr_info("%s support registered\n", pfc->name);
+ pr_info("%s support registered\n", info->name);
return 0;
+}
-err:
- pfc_iounmap(pfc);
- sh_pfc = NULL;
+static int sh_pfc_remove(struct platform_device *pdev)
+{
+ struct sh_pfc *pfc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_GPIO_SH_PFC
+ sh_pfc_unregister_gpiochip(pfc);
+#endif
+ sh_pfc_unregister_pinctrl(pfc);
+
+ platform_set_drvdata(pdev, NULL);
- return ret;
+ return 0;
}
+
+static const struct platform_device_id sh_pfc_id_table[] = {
+#ifdef CONFIG_PINCTRL_PFC_R8A7740
+ { "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7779
+ { "pfc-r8a7779", (kernel_ulong_t)&r8a7779_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7203
+ { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7264
+ { "pfc-sh7264", (kernel_ulong_t)&sh7264_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7269
+ { "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7372
+ { "pfc-sh7372", (kernel_ulong_t)&sh7372_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH73A0
+ { "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7720
+ { "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7722
+ { "pfc-sh7722", (kernel_ulong_t)&sh7722_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7723
+ { "pfc-sh7723", (kernel_ulong_t)&sh7723_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7724
+ { "pfc-sh7724", (kernel_ulong_t)&sh7724_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7734
+ { "pfc-sh7734", (kernel_ulong_t)&sh7734_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7757
+ { "pfc-sh7757", (kernel_ulong_t)&sh7757_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7785
+ { "pfc-sh7785", (kernel_ulong_t)&sh7785_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7786
+ { "pfc-sh7786", (kernel_ulong_t)&sh7786_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SHX3
+ { "pfc-shx3", (kernel_ulong_t)&shx3_pinmux_info },
+#endif
+ { "sh-pfc", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, sh_pfc_id_table);
+
+static struct platform_driver sh_pfc_driver = {
+ .probe = sh_pfc_probe,
+ .remove = sh_pfc_remove,
+ .id_table = sh_pfc_id_table,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_pfc_init(void)
+{
+ return platform_driver_register(&sh_pfc_driver);
+}
+postcore_initcall(sh_pfc_init);
+
+static void __exit sh_pfc_exit(void)
+{
+ platform_driver_unregister(&sh_pfc_driver);
+}
+module_exit(sh_pfc_exit);
+
+MODULE_AUTHOR("Magnus Damm, Paul Mundt, Laurent Pinchart");
+MODULE_DESCRIPTION("Pin Control and GPIO driver for SuperH pin function controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
new file mode 100644
index 000000000000..ba7c33c33599
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -0,0 +1,72 @@
+/*
+ * SuperH Pin Function Controller support.
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __SH_PFC_CORE_H__
+#define __SH_PFC_CORE_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "sh_pfc.h"
+
+struct sh_pfc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
+struct sh_pfc_chip;
+struct sh_pfc_pinctrl;
+
+struct sh_pfc {
+ struct device *dev;
+ struct sh_pfc_soc_info *info;
+ spinlock_t lock;
+
+ unsigned int num_windows;
+ struct sh_pfc_window *window;
+
+ struct sh_pfc_chip *gpio;
+ struct sh_pfc_pinctrl *pinctrl;
+};
+
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
+int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
+
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
+int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
+
+int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos);
+void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
+ unsigned long value);
+int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
+ struct pinmux_data_reg **drp, int *bitp);
+int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
+ pinmux_enum_t *enum_idp);
+int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
+ int cfg_mode);
+
+extern struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern struct sh_pfc_soc_info r8a7779_pinmux_info;
+extern struct sh_pfc_soc_info sh7203_pinmux_info;
+extern struct sh_pfc_soc_info sh7264_pinmux_info;
+extern struct sh_pfc_soc_info sh7269_pinmux_info;
+extern struct sh_pfc_soc_info sh7372_pinmux_info;
+extern struct sh_pfc_soc_info sh73a0_pinmux_info;
+extern struct sh_pfc_soc_info sh7720_pinmux_info;
+extern struct sh_pfc_soc_info sh7722_pinmux_info;
+extern struct sh_pfc_soc_info sh7723_pinmux_info;
+extern struct sh_pfc_soc_info sh7724_pinmux_info;
+extern struct sh_pfc_soc_info sh7734_pinmux_info;
+extern struct sh_pfc_soc_info sh7757_pinmux_info;
+extern struct sh_pfc_soc_info sh7785_pinmux_info;
+extern struct sh_pfc_soc_info sh7786_pinmux_info;
+extern struct sh_pfc_soc_info shx3_pinmux_info;
+
+#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/sh/pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 038fa071382a..a535075c8b69 100644
--- a/drivers/sh/pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -8,16 +8,18 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
+#define pr_fmt(fmt) KBUILD_MODNAME " gpio: " fmt
+
+#include <linux/device.h>
#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/sh_pfc.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
struct sh_pfc_chip {
struct sh_pfc *pfc;
@@ -49,7 +51,7 @@ static void sh_gpio_set_value(struct sh_pfc *pfc, unsigned gpio, int value)
struct pinmux_data_reg *dr = NULL;
int bit = 0;
- if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
BUG();
else
sh_pfc_write_bit(dr, bit, value);
@@ -60,7 +62,7 @@ static int sh_gpio_get_value(struct sh_pfc *pfc, unsigned gpio)
struct pinmux_data_reg *dr = NULL;
int bit = 0;
- if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
return -EINVAL;
return sh_pfc_read_bit(dr, bit);
@@ -103,11 +105,11 @@ static int sh_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
if (pos <= 0 || !enum_id)
break;
- for (i = 0; i < pfc->gpio_irq_size; i++) {
- enum_ids = pfc->gpio_irq[i].enum_ids;
+ for (i = 0; i < pfc->info->gpio_irq_size; i++) {
+ enum_ids = pfc->info->gpio_irq[i].enum_ids;
for (k = 0; enum_ids[k]; k++) {
if (enum_ids[k] == enum_id)
- return pfc->gpio_irq[i].irq;
+ return pfc->info->gpio_irq[i].irq;
}
}
}
@@ -128,12 +130,12 @@ static void sh_pfc_gpio_setup(struct sh_pfc_chip *chip)
gc->set = sh_gpio_set;
gc->to_irq = sh_gpio_to_irq;
- WARN_ON(pfc->first_gpio != 0); /* needs testing */
+ WARN_ON(pfc->info->first_gpio != 0); /* needs testing */
- gc->label = pfc->name;
+ gc->label = pfc->info->name;
gc->owner = THIS_MODULE;
- gc->base = pfc->first_gpio;
- gc->ngpio = (pfc->last_gpio - pfc->first_gpio) + 1;
+ gc->base = pfc->info->first_gpio;
+ gc->ngpio = (pfc->info->last_gpio - pfc->info->first_gpio) + 1;
}
int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
@@ -141,7 +143,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
struct sh_pfc_chip *chip;
int ret;
- chip = kzalloc(sizeof(struct sh_pfc_chip), GFP_KERNEL);
+ chip = devm_kzalloc(pfc->dev, sizeof(*chip), GFP_KERNEL);
if (unlikely(!chip))
return -ENOMEM;
@@ -151,90 +153,26 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
ret = gpiochip_add(&chip->gpio_chip);
if (unlikely(ret < 0))
- kfree(chip);
-
- pr_info("%s handling gpio %d -> %d\n",
- pfc->name, pfc->first_gpio, pfc->last_gpio);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(sh_pfc_register_gpiochip);
-
-static int sh_pfc_gpio_match(struct gpio_chip *gc, void *data)
-{
- return !!strstr(gc->label, data);
-}
-
-static int __devinit sh_pfc_gpio_probe(struct platform_device *pdev)
-{
- struct sh_pfc_chip *chip;
- struct gpio_chip *gc;
-
- gc = gpiochip_find("_pfc", sh_pfc_gpio_match);
- if (unlikely(!gc)) {
- pr_err("Cant find gpio chip\n");
- return -ENODEV;
- }
+ return ret;
- chip = gpio_to_pfc_chip(gc);
- platform_set_drvdata(pdev, chip);
+ pfc->gpio = chip;
- pr_info("attaching to GPIO chip %s\n", chip->pfc->name);
+ pr_info("%s handling gpio %d -> %d\n",
+ pfc->info->name, pfc->info->first_gpio,
+ pfc->info->last_gpio);
return 0;
}
-static int __devexit sh_pfc_gpio_remove(struct platform_device *pdev)
+int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
{
- struct sh_pfc_chip *chip = platform_get_drvdata(pdev);
+ struct sh_pfc_chip *chip = pfc->gpio;
int ret;
ret = gpiochip_remove(&chip->gpio_chip);
if (unlikely(ret < 0))
return ret;
- kfree(chip);
+ pfc->gpio = NULL;
return 0;
}
-
-static struct platform_driver sh_pfc_gpio_driver = {
- .probe = sh_pfc_gpio_probe,
- .remove = __devexit_p(sh_pfc_gpio_remove),
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- },
-};
-
-static struct platform_device sh_pfc_gpio_device = {
- .name = KBUILD_MODNAME,
- .id = -1,
-};
-
-static int __init sh_pfc_gpio_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&sh_pfc_gpio_driver);
- if (likely(!rc)) {
- rc = platform_device_register(&sh_pfc_gpio_device);
- if (unlikely(rc))
- platform_driver_unregister(&sh_pfc_gpio_driver);
- }
-
- return rc;
-}
-
-static void __exit sh_pfc_gpio_exit(void)
-{
- platform_device_unregister(&sh_pfc_gpio_device);
- platform_driver_unregister(&sh_pfc_gpio_driver);
-}
-
-module_init(sh_pfc_gpio_init);
-module_exit(sh_pfc_gpio_exit);
-
-MODULE_AUTHOR("Magnus Damm, Paul Mundt");
-MODULE_DESCRIPTION("GPIO driver for SuperH pin function controller");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pfc-gpio");
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
new file mode 100644
index 000000000000..214788c4a606
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -0,0 +1,2612 @@
+/*
+ * R8A7740 processor support
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/r8a7740.h>
+#include <mach/irqs.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
+ PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##20, sfx), \
+ PORT_1(fn, pfx##210, sfx), PORT_1(fn, pfx##211, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ /* PORT0_DATA -> PORT211_DATA */
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ /* PORT0_IN -> PORT211_IN */
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN),
+ PINMUX_INPUT_END,
+
+ /* PORT0_IN_PU -> PORT211_IN_PU */
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU),
+ PINMUX_INPUT_PULLUP_END,
+
+ /* PORT0_IN_PD -> PORT211_IN_PD */
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD),
+ PINMUX_INPUT_PULLDOWN_END,
+
+ /* PORT0_OUT -> PORT211_OUT */
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT),
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT211_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT211_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT211_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT211_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT211_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT211_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT211_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT211_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT211_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT211_FN7 */
+
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_5_0, MSEL1CR_5_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+
+ MSEL3CR_15_0, MSEL3CR_15_1, /* Trace / Debug ? */
+ MSEL3CR_6_0, MSEL3CR_6_1,
+
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+
+ MSEL5CR_31_0, MSEL5CR_31_1, /* irq/fiq output */
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ MSEL5CR_8_0, MSEL5CR_8_1,
+ MSEL5CR_7_0, MSEL5CR_7_1,
+ MSEL5CR_6_0, MSEL5CR_6_1,
+ MSEL5CR_5_0, MSEL5CR_5_1,
+ MSEL5CR_4_0, MSEL5CR_4_1,
+ MSEL5CR_3_0, MSEL5CR_3_1,
+ MSEL5CR_2_0, MSEL5CR_2_1,
+ MSEL5CR_0_0, MSEL5CR_0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ /* IRQ */
+ IRQ0_PORT2_MARK, IRQ0_PORT13_MARK,
+ IRQ1_MARK,
+ IRQ2_PORT11_MARK, IRQ2_PORT12_MARK,
+ IRQ3_PORT10_MARK, IRQ3_PORT14_MARK,
+ IRQ4_PORT15_MARK, IRQ4_PORT172_MARK,
+ IRQ5_PORT0_MARK, IRQ5_PORT1_MARK,
+ IRQ6_PORT121_MARK, IRQ6_PORT173_MARK,
+ IRQ7_PORT120_MARK, IRQ7_PORT209_MARK,
+ IRQ8_MARK,
+ IRQ9_PORT118_MARK, IRQ9_PORT210_MARK,
+ IRQ10_MARK,
+ IRQ11_MARK,
+ IRQ12_PORT42_MARK, IRQ12_PORT97_MARK,
+ IRQ13_PORT64_MARK, IRQ13_PORT98_MARK,
+ IRQ14_PORT63_MARK, IRQ14_PORT99_MARK,
+ IRQ15_PORT62_MARK, IRQ15_PORT100_MARK,
+ IRQ16_PORT68_MARK, IRQ16_PORT211_MARK,
+ IRQ17_MARK,
+ IRQ18_MARK,
+ IRQ19_MARK,
+ IRQ20_MARK,
+ IRQ21_MARK,
+ IRQ22_MARK,
+ IRQ23_MARK,
+ IRQ24_MARK,
+ IRQ25_MARK,
+ IRQ26_PORT58_MARK, IRQ26_PORT81_MARK,
+ IRQ27_PORT57_MARK, IRQ27_PORT168_MARK,
+ IRQ28_PORT56_MARK, IRQ28_PORT169_MARK,
+ IRQ29_PORT50_MARK, IRQ29_PORT170_MARK,
+ IRQ30_PORT49_MARK, IRQ30_PORT171_MARK,
+ IRQ31_PORT41_MARK, IRQ31_PORT167_MARK,
+
+ /* Function */
+
+ /* DBGT */
+ DBGMDT2_MARK, DBGMDT1_MARK, DBGMDT0_MARK,
+ DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK,
+ DBGMD21_MARK,
+
+ /* FSI-A */
+ FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */
+ FSIAISLD_PORT5_MARK,
+ FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */
+ FSIASPDIF_PORT18_MARK,
+ FSIAOSLD1_MARK, FSIAOSLD2_MARK, FSIAOLR_MARK,
+ FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK,
+ FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
+
+ /* FSI-B */
+ FSIBCK_MARK,
+
+ /* FMSI */
+ FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */
+ FMSISLD_PORT6_MARK,
+ FMSIILR_MARK, FMSIIBT_MARK, FMSIOLR_MARK, FMSIOBT_MARK,
+ FMSICK_MARK, FMSOILR_MARK, FMSOIBT_MARK, FMSOOLR_MARK,
+ FMSOOBT_MARK, FMSOSLD_MARK, FMSOCK_MARK,
+
+ /* SCIFA0 */
+ SCIFA0_SCK_MARK, SCIFA0_CTS_MARK, SCIFA0_RTS_MARK,
+ SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
+
+ /* SCIFA1 */
+ SCIFA1_CTS_MARK, SCIFA1_SCK_MARK, SCIFA1_RXD_MARK,
+ SCIFA1_TXD_MARK, SCIFA1_RTS_MARK,
+
+ /* SCIFA2 */
+ SCIFA2_SCK_PORT22_MARK, /* SCIFA2_SCK Port 22/199 */
+ SCIFA2_SCK_PORT199_MARK,
+ SCIFA2_RXD_MARK, SCIFA2_TXD_MARK,
+ SCIFA2_CTS_MARK, SCIFA2_RTS_MARK,
+
+ /* SCIFA3 */
+ SCIFA3_RTS_PORT105_MARK, /* MSEL5CR_8_0 */
+ SCIFA3_SCK_PORT116_MARK,
+ SCIFA3_CTS_PORT117_MARK,
+ SCIFA3_RXD_PORT174_MARK,
+ SCIFA3_TXD_PORT175_MARK,
+
+ SCIFA3_RTS_PORT161_MARK, /* MSEL5CR_8_1 */
+ SCIFA3_SCK_PORT158_MARK,
+ SCIFA3_CTS_PORT162_MARK,
+ SCIFA3_RXD_PORT159_MARK,
+ SCIFA3_TXD_PORT160_MARK,
+
+ /* SCIFA4 */
+ SCIFA4_RXD_PORT12_MARK, /* MSEL5CR[12:11] = 00 */
+ SCIFA4_TXD_PORT13_MARK,
+
+ SCIFA4_RXD_PORT204_MARK, /* MSEL5CR[12:11] = 01 */
+ SCIFA4_TXD_PORT203_MARK,
+
+ SCIFA4_RXD_PORT94_MARK, /* MSEL5CR[12:11] = 10 */
+ SCIFA4_TXD_PORT93_MARK,
+
+ SCIFA4_SCK_PORT21_MARK, /* SCIFA4_SCK Port 21/205 */
+ SCIFA4_SCK_PORT205_MARK,
+
+ /* SCIFA5 */
+ SCIFA5_TXD_PORT20_MARK, /* MSEL5CR[15:14] = 00 */
+ SCIFA5_RXD_PORT10_MARK,
+
+ SCIFA5_RXD_PORT207_MARK, /* MSEL5CR[15:14] = 01 */
+ SCIFA5_TXD_PORT208_MARK,
+
+ SCIFA5_TXD_PORT91_MARK, /* MSEL5CR[15:14] = 10 */
+ SCIFA5_RXD_PORT92_MARK,
+
+ SCIFA5_SCK_PORT23_MARK, /* SCIFA5_SCK Port 23/206 */
+ SCIFA5_SCK_PORT206_MARK,
+
+ /* SCIFA6 */
+ SCIFA6_SCK_MARK, SCIFA6_RXD_MARK, SCIFA6_TXD_MARK,
+
+ /* SCIFA7 */
+ SCIFA7_TXD_MARK, SCIFA7_RXD_MARK,
+
+ /* SCIFAB */
+ SCIFB_SCK_PORT190_MARK, /* MSEL5CR_17_0 */
+ SCIFB_RXD_PORT191_MARK,
+ SCIFB_TXD_PORT192_MARK,
+ SCIFB_RTS_PORT186_MARK,
+ SCIFB_CTS_PORT187_MARK,
+
+ SCIFB_SCK_PORT2_MARK, /* MSEL5CR_17_1 */
+ SCIFB_RXD_PORT3_MARK,
+ SCIFB_TXD_PORT4_MARK,
+ SCIFB_RTS_PORT172_MARK,
+ SCIFB_CTS_PORT173_MARK,
+
+ /* LCD0 */
+ LCDC0_SELECT_MARK,
+
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK,
+ LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK,
+ LCD0_DCK_MARK, LCD0_VSYN_MARK, /* for RGB */
+ LCD0_HSYN_MARK, LCD0_DISP_MARK, /* for RGB */
+ LCD0_WR_MARK, LCD0_RD_MARK, /* for SYS */
+ LCD0_CS_MARK, LCD0_RS_MARK, /* for SYS */
+
+ LCD0_D21_PORT158_MARK, LCD0_D23_PORT159_MARK, /* MSEL5CR_6_1 */
+ LCD0_D22_PORT160_MARK, LCD0_D20_PORT161_MARK,
+ LCD0_D19_PORT162_MARK, LCD0_D18_PORT163_MARK,
+ LCD0_LCLK_PORT165_MARK,
+
+ LCD0_D18_PORT40_MARK, LCD0_D22_PORT0_MARK, /* MSEL5CR_6_0 */
+ LCD0_D23_PORT1_MARK, LCD0_D21_PORT2_MARK,
+ LCD0_D20_PORT3_MARK, LCD0_D19_PORT4_MARK,
+ LCD0_LCLK_PORT102_MARK,
+
+ /* LCD1 */
+ LCDC1_SELECT_MARK,
+
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+ LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
+ LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK,
+ LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK,
+ LCD1_DON_MARK, LCD1_VCPWC_MARK,
+ LCD1_LCLK_MARK, LCD1_VEPWC_MARK,
+
+ LCD1_DCK_MARK, LCD1_VSYN_MARK, /* for RGB */
+ LCD1_HSYN_MARK, LCD1_DISP_MARK, /* for RGB */
+ LCD1_RS_MARK, LCD1_CS_MARK, /* for SYS */
+ LCD1_RD_MARK, LCD1_WR_MARK, /* for SYS */
+
+ /* RSPI */
+ RSPI_SSL0_A_MARK, RSPI_SSL1_A_MARK, RSPI_SSL2_A_MARK,
+ RSPI_SSL3_A_MARK, RSPI_CK_A_MARK, RSPI_MOSI_A_MARK,
+ RSPI_MISO_A_MARK,
+
+ /* VIO CKO */
+ VIO_CKO1_MARK, /* needs fixup */
+ VIO_CKO2_MARK,
+ VIO_CKO_1_MARK,
+ VIO_CKO_MARK,
+
+ /* VIO0 */
+ VIO0_D0_MARK, VIO0_D1_MARK, VIO0_D2_MARK, VIO0_D3_MARK,
+ VIO0_D4_MARK, VIO0_D5_MARK, VIO0_D6_MARK, VIO0_D7_MARK,
+ VIO0_D8_MARK, VIO0_D9_MARK, VIO0_D10_MARK, VIO0_D11_MARK,
+ VIO0_D12_MARK, VIO0_VD_MARK, VIO0_HD_MARK, VIO0_CLK_MARK,
+ VIO0_FIELD_MARK,
+
+ VIO0_D13_PORT26_MARK, /* MSEL5CR_27_0 */
+ VIO0_D14_PORT25_MARK,
+ VIO0_D15_PORT24_MARK,
+
+ VIO0_D13_PORT22_MARK, /* MSEL5CR_27_1 */
+ VIO0_D14_PORT95_MARK,
+ VIO0_D15_PORT96_MARK,
+
+ /* VIO1 */
+ VIO1_D0_MARK, VIO1_D1_MARK, VIO1_D2_MARK, VIO1_D3_MARK,
+ VIO1_D4_MARK, VIO1_D5_MARK, VIO1_D6_MARK, VIO1_D7_MARK,
+ VIO1_VD_MARK, VIO1_HD_MARK, VIO1_CLK_MARK, VIO1_FIELD_MARK,
+
+ /* TPU0 */
+ TPU0TO0_MARK, TPU0TO1_MARK, TPU0TO3_MARK,
+ TPU0TO2_PORT66_MARK, /* TPU0TO2 Port 66/202 */
+ TPU0TO2_PORT202_MARK,
+
+ /* SSP1 0 */
+ STP0_IPD0_MARK, STP0_IPD1_MARK, STP0_IPD2_MARK, STP0_IPD3_MARK,
+ STP0_IPD4_MARK, STP0_IPD5_MARK, STP0_IPD6_MARK, STP0_IPD7_MARK,
+ STP0_IPEN_MARK, STP0_IPCLK_MARK, STP0_IPSYNC_MARK,
+
+ /* SSP1 1 */
+ STP1_IPD1_MARK, STP1_IPD2_MARK, STP1_IPD3_MARK, STP1_IPD4_MARK,
+ STP1_IPD5_MARK, STP1_IPD6_MARK, STP1_IPD7_MARK, STP1_IPCLK_MARK,
+ STP1_IPSYNC_MARK,
+
+ STP1_IPD0_PORT186_MARK, /* MSEL5CR_23_0 */
+ STP1_IPEN_PORT187_MARK,
+
+ STP1_IPD0_PORT194_MARK, /* MSEL5CR_23_1 */
+ STP1_IPEN_PORT193_MARK,
+
+ /* SIM */
+ SIM_RST_MARK, SIM_CLK_MARK,
+ SIM_D_PORT22_MARK, /* SIM_D Port 22/199 */
+ SIM_D_PORT199_MARK,
+
+ /* SDHI0 */
+ SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK,
+ SDHI0_CD_MARK, SDHI0_WP_MARK, SDHI0_CMD_MARK, SDHI0_CLK_MARK,
+
+ /* SDHI1 */
+ SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK,
+ SDHI1_CD_MARK, SDHI1_WP_MARK, SDHI1_CMD_MARK, SDHI1_CLK_MARK,
+
+ /* SDHI2 */
+ SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK,
+ SDHI2_CLK_MARK, SDHI2_CMD_MARK,
+
+ SDHI2_CD_PORT24_MARK, /* MSEL5CR_19_0 */
+ SDHI2_WP_PORT25_MARK,
+
+ SDHI2_WP_PORT177_MARK, /* MSEL5CR_19_1 */
+ SDHI2_CD_PORT202_MARK,
+
+ /* MSIOF2 */
+ MSIOF2_TXD_MARK, MSIOF2_RXD_MARK, MSIOF2_TSCK_MARK,
+ MSIOF2_SS2_MARK, MSIOF2_TSYNC_MARK, MSIOF2_SS1_MARK,
+ MSIOF2_MCK1_MARK, MSIOF2_MCK0_MARK, MSIOF2_RSYNC_MARK,
+ MSIOF2_RSCK_MARK,
+
+ /* KEYSC */
+ KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK, KEYIN7_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_MARK, KEYOUT5_MARK, KEYOUT6_MARK, KEYOUT7_MARK,
+
+ KEYIN0_PORT43_MARK, /* MSEL4CR_18_0 */
+ KEYIN1_PORT44_MARK,
+ KEYIN2_PORT45_MARK,
+ KEYIN3_PORT46_MARK,
+
+ KEYIN0_PORT58_MARK, /* MSEL4CR_18_1 */
+ KEYIN1_PORT57_MARK,
+ KEYIN2_PORT56_MARK,
+ KEYIN3_PORT55_MARK,
+
+ /* VOU */
+ DV_D0_MARK, DV_D1_MARK, DV_D2_MARK, DV_D3_MARK,
+ DV_D4_MARK, DV_D5_MARK, DV_D6_MARK, DV_D7_MARK,
+ DV_D8_MARK, DV_D9_MARK, DV_D10_MARK, DV_D11_MARK,
+ DV_D12_MARK, DV_D13_MARK, DV_D14_MARK, DV_D15_MARK,
+ DV_CLK_MARK, DV_VSYNC_MARK, DV_HSYNC_MARK,
+
+ /* MEMC */
+ MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK, MEMC_AD3_MARK,
+ MEMC_AD4_MARK, MEMC_AD5_MARK, MEMC_AD6_MARK, MEMC_AD7_MARK,
+ MEMC_AD8_MARK, MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
+ MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK, MEMC_AD15_MARK,
+ MEMC_CS0_MARK, MEMC_INT_MARK, MEMC_NWE_MARK, MEMC_NOE_MARK,
+
+ MEMC_CS1_MARK, /* MSEL4CR_6_0 */
+ MEMC_ADV_MARK,
+ MEMC_WAIT_MARK,
+ MEMC_BUSCLK_MARK,
+
+ MEMC_A1_MARK, /* MSEL4CR_6_1 */
+ MEMC_DREQ0_MARK,
+ MEMC_DREQ1_MARK,
+ MEMC_A0_MARK,
+
+ /* MMC */
+ MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK,
+ MMC0_D3_PORT71_MARK, MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK,
+ MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK, MMC0_CLK_PORT66_MARK,
+ MMC0_CMD_PORT67_MARK, /* MSEL4CR_15_0 */
+
+ MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK,
+ MMC1_D3_PORT146_MARK, MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK,
+ MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK, MMC1_CLK_PORT103_MARK,
+ MMC1_CMD_PORT104_MARK, /* MSEL4CR_15_1 */
+
+ /* MSIOF0 */
+ MSIOF0_SS1_MARK, MSIOF0_SS2_MARK, MSIOF0_RXD_MARK,
+ MSIOF0_TXD_MARK, MSIOF0_MCK0_MARK, MSIOF0_MCK1_MARK,
+ MSIOF0_RSYNC_MARK, MSIOF0_RSCK_MARK, MSIOF0_TSCK_MARK,
+ MSIOF0_TSYNC_MARK,
+
+ /* MSIOF1 */
+ MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
+
+ MSIOF1_SS2_PORT116_MARK, MSIOF1_SS1_PORT117_MARK,
+ MSIOF1_RXD_PORT118_MARK, MSIOF1_TXD_PORT119_MARK,
+ MSIOF1_TSYNC_PORT120_MARK,
+ MSIOF1_TSCK_PORT121_MARK, /* MSEL4CR_10_0 */
+
+ MSIOF1_SS1_PORT67_MARK, MSIOF1_TSCK_PORT72_MARK,
+ MSIOF1_TSYNC_PORT73_MARK, MSIOF1_TXD_PORT74_MARK,
+ MSIOF1_RXD_PORT75_MARK,
+ MSIOF1_SS2_PORT202_MARK, /* MSEL4CR_10_1 */
+
+ /* GPIO */
+ GPO0_MARK, GPI0_MARK, GPO1_MARK, GPI1_MARK,
+
+ /* USB0 */
+ USB0_OCI_MARK, USB0_PPON_MARK, VBUS_MARK,
+
+ /* USB1 */
+ USB1_OCI_MARK, USB1_PPON_MARK,
+
+ /* BBIF1 */
+ BBIF1_RXD_MARK, BBIF1_TXD_MARK, BBIF1_TSYNC_MARK,
+ BBIF1_TSCK_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
+ BBIF1_FLOW_MARK, BBIF1_RX_FLOW_N_MARK,
+
+ /* BBIF2 */
+ BBIF2_TXD2_PORT5_MARK, /* MSEL5CR_0_0 */
+ BBIF2_RXD2_PORT60_MARK,
+ BBIF2_TSYNC2_PORT6_MARK,
+ BBIF2_TSCK2_PORT59_MARK,
+
+ BBIF2_RXD2_PORT90_MARK, /* MSEL5CR_0_1 */
+ BBIF2_TXD2_PORT183_MARK,
+ BBIF2_TSCK2_PORT89_MARK,
+ BBIF2_TSYNC2_PORT184_MARK,
+
+ /* BSC / FLCTL / PCMCIA */
+ CS0_MARK, CS2_MARK, CS4_MARK,
+ CS5B_MARK, CS6A_MARK,
+ CS5A_PORT105_MARK, /* CS5A PORT 19/105 */
+ CS5A_PORT19_MARK,
+ IOIS16_MARK, /* ? */
+
+ A0_MARK, A1_MARK, A2_MARK, A3_MARK,
+ A4_FOE_MARK, /* share with FLCTL */
+ A5_FCDE_MARK, /* share with FLCTL */
+ A6_MARK, A7_MARK, A8_MARK, A9_MARK,
+ A10_MARK, A11_MARK, A12_MARK, A13_MARK,
+ A14_MARK, A15_MARK, A16_MARK, A17_MARK,
+ A18_MARK, A19_MARK, A20_MARK, A21_MARK,
+ A22_MARK, A23_MARK, A24_MARK, A25_MARK,
+ A26_MARK,
+
+ D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, /* share with FLCTL */
+ D3_NAF3_MARK, D4_NAF4_MARK, D5_NAF5_MARK, /* share with FLCTL */
+ D6_NAF6_MARK, D7_NAF7_MARK, D8_NAF8_MARK, /* share with FLCTL */
+ D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK, /* share with FLCTL */
+ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, /* share with FLCTL */
+ D15_NAF15_MARK, /* share with FLCTL */
+ D16_MARK, D17_MARK, D18_MARK, D19_MARK,
+ D20_MARK, D21_MARK, D22_MARK, D23_MARK,
+ D24_MARK, D25_MARK, D26_MARK, D27_MARK,
+ D28_MARK, D29_MARK, D30_MARK, D31_MARK,
+
+ WE0_FWE_MARK, /* share with FLCTL */
+ WE1_MARK,
+ WE2_ICIORD_MARK, /* share with PCMCIA */
+ WE3_ICIOWR_MARK, /* share with PCMCIA */
+ CKO_MARK, BS_MARK, RDWR_MARK,
+ RD_FSC_MARK, /* share with FLCTL */
+ WAIT_PORT177_MARK, /* WAIT Port 90/177 */
+ WAIT_PORT90_MARK,
+
+ FCE0_MARK, FCE1_MARK, FRB_MARK, /* FLCTL */
+
+ /* IRDA */
+ IRDA_FIRSEL_MARK, IRDA_IN_MARK, IRDA_OUT_MARK,
+
+ /* ATAPI */
+ IDE_D0_MARK, IDE_D1_MARK, IDE_D2_MARK, IDE_D3_MARK,
+ IDE_D4_MARK, IDE_D5_MARK, IDE_D6_MARK, IDE_D7_MARK,
+ IDE_D8_MARK, IDE_D9_MARK, IDE_D10_MARK, IDE_D11_MARK,
+ IDE_D12_MARK, IDE_D13_MARK, IDE_D14_MARK, IDE_D15_MARK,
+ IDE_A0_MARK, IDE_A1_MARK, IDE_A2_MARK, IDE_CS0_MARK,
+ IDE_CS1_MARK, IDE_IOWR_MARK, IDE_IORD_MARK, IDE_IORDY_MARK,
+ IDE_INT_MARK, IDE_RST_MARK, IDE_DIRECTION_MARK,
+ IDE_EXBUF_ENB_MARK, IDE_IODACK_MARK, IDE_IODREQ_MARK,
+
+ /* RMII */
+ RMII_CRS_DV_MARK, RMII_RX_ER_MARK, RMII_RXD0_MARK,
+ RMII_RXD1_MARK, RMII_TX_EN_MARK, RMII_TXD0_MARK,
+ RMII_MDC_MARK, RMII_TXD1_MARK, RMII_MDIO_MARK,
+ RMII_REF50CK_MARK, /* for RMII */
+ RMII_REF125CK_MARK, /* for GMII */
+
+ /* GEther */
+ ET_TX_CLK_MARK, ET_TX_EN_MARK, ET_ETXD0_MARK, ET_ETXD1_MARK,
+ ET_ETXD2_MARK, ET_ETXD3_MARK,
+ ET_ETXD4_MARK, ET_ETXD5_MARK, /* for GEther */
+ ET_ETXD6_MARK, ET_ETXD7_MARK, /* for GEther */
+ ET_COL_MARK, ET_TX_ER_MARK, ET_RX_CLK_MARK, ET_RX_DV_MARK,
+ ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK,
+ ET_ERXD4_MARK, ET_ERXD5_MARK, /* for GEther */
+ ET_ERXD6_MARK, ET_ERXD7_MARK, /* for GEther */
+ ET_RX_ER_MARK, ET_CRS_MARK, ET_MDC_MARK, ET_MDIO_MARK,
+ ET_LINK_MARK, ET_PHY_INT_MARK, ET_WOL_MARK, ET_GTX_CLK_MARK,
+
+ /* DMA0 */
+ DREQ0_MARK, DACK0_MARK,
+
+ /* DMA1 */
+ DREQ1_MARK, DACK1_MARK,
+
+ /* SYSC */
+ RESETOUTS_MARK, RESETP_PULLUP_MARK, RESETP_PLAIN_MARK,
+
+ /* IRREM */
+ IROUT_MARK,
+
+ /* SDENC */
+ SDENC_CPG_MARK, SDENC_DV_CLKI_MARK,
+
+ /* HDMI */
+ HDMI_HPD_MARK, HDMI_CEC_MARK,
+
+ /* DEBUG */
+ EDEBGREQ_PULLUP_MARK, /* for JTAG */
+ EDEBGREQ_PULLDOWN_MARK,
+
+ TRACEAUD_FROM_VIO_MARK, /* for TRACE/AUD */
+ TRACEAUD_FROM_LCDC0_MARK,
+ TRACEAUD_FROM_MEMC_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+
+ /* I/O and Pull U/D */
+ PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
+ PORT_DATA_IO_PD(2), PORT_DATA_IO_PD(3),
+ PORT_DATA_IO_PD(4), PORT_DATA_IO_PD(5),
+ PORT_DATA_IO_PD(6), PORT_DATA_IO(7),
+ PORT_DATA_IO(8), PORT_DATA_IO(9),
+
+ PORT_DATA_IO_PD(10), PORT_DATA_IO_PD(11),
+ PORT_DATA_IO_PD(12), PORT_DATA_IO_PU_PD(13),
+ PORT_DATA_IO_PD(14), PORT_DATA_IO_PD(15),
+ PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
+ PORT_DATA_IO(18), PORT_DATA_IO_PU(19),
+
+ PORT_DATA_IO_PU_PD(20), PORT_DATA_IO_PD(21),
+ PORT_DATA_IO_PU_PD(22), PORT_DATA_IO(23),
+ PORT_DATA_IO_PU(24), PORT_DATA_IO_PU(25),
+ PORT_DATA_IO_PU(26), PORT_DATA_IO_PU(27),
+ PORT_DATA_IO_PU(28), PORT_DATA_IO_PU(29),
+
+ PORT_DATA_IO_PU(30), PORT_DATA_IO_PD(31),
+ PORT_DATA_IO_PD(32), PORT_DATA_IO_PD(33),
+ PORT_DATA_IO_PD(34), PORT_DATA_IO_PU(35),
+ PORT_DATA_IO_PU(36), PORT_DATA_IO_PD(37),
+ PORT_DATA_IO_PU(38), PORT_DATA_IO_PD(39),
+
+ PORT_DATA_IO_PU_PD(40), PORT_DATA_IO_PD(41),
+ PORT_DATA_IO_PD(42), PORT_DATA_IO_PU_PD(43),
+ PORT_DATA_IO_PU_PD(44), PORT_DATA_IO_PU_PD(45),
+ PORT_DATA_IO_PU_PD(46), PORT_DATA_IO_PU_PD(47),
+ PORT_DATA_IO_PU_PD(48), PORT_DATA_IO_PU_PD(49),
+
+ PORT_DATA_IO_PU_PD(50), PORT_DATA_IO_PD(51),
+ PORT_DATA_IO_PD(52), PORT_DATA_IO_PD(53),
+ PORT_DATA_IO_PD(54), PORT_DATA_IO_PU_PD(55),
+ PORT_DATA_IO_PU_PD(56), PORT_DATA_IO_PU_PD(57),
+ PORT_DATA_IO_PU_PD(58), PORT_DATA_IO_PU_PD(59),
+
+ PORT_DATA_IO_PU_PD(60), PORT_DATA_IO_PD(61),
+ PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63),
+ PORT_DATA_IO_PD(64), PORT_DATA_IO_PD(65),
+ PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67),
+ PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69),
+
+ PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71),
+ PORT_DATA_IO_PU_PD(72), PORT_DATA_IO_PU_PD(73),
+ PORT_DATA_IO_PU_PD(74), PORT_DATA_IO_PU_PD(75),
+ PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
+ PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
+
+ PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
+ PORT_DATA_IO(82), PORT_DATA_IO_PU_PD(83),
+ PORT_DATA_IO(84), PORT_DATA_IO_PD(85),
+ PORT_DATA_IO_PD(86), PORT_DATA_IO_PD(87),
+ PORT_DATA_IO_PD(88), PORT_DATA_IO_PD(89),
+
+ PORT_DATA_IO_PD(90), PORT_DATA_IO_PU_PD(91),
+ PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
+ PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
+ PORT_DATA_IO_PU_PD(96), PORT_DATA_IO_PU_PD(97),
+ PORT_DATA_IO_PU_PD(98), PORT_DATA_IO_PU_PD(99),
+
+ PORT_DATA_IO_PU_PD(100), PORT_DATA_IO(101),
+ PORT_DATA_IO_PU(102), PORT_DATA_IO_PU_PD(103),
+ PORT_DATA_IO_PU(104), PORT_DATA_IO_PU(105),
+ PORT_DATA_IO_PU_PD(106), PORT_DATA_IO(107),
+ PORT_DATA_IO(108), PORT_DATA_IO(109),
+
+ PORT_DATA_IO(110), PORT_DATA_IO(111),
+ PORT_DATA_IO(112), PORT_DATA_IO(113),
+ PORT_DATA_IO_PU_PD(114), PORT_DATA_IO(115),
+ PORT_DATA_IO_PD(116), PORT_DATA_IO_PD(117),
+ PORT_DATA_IO_PD(118), PORT_DATA_IO_PD(119),
+
+ PORT_DATA_IO_PD(120), PORT_DATA_IO_PD(121),
+ PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
+ PORT_DATA_IO_PD(124), PORT_DATA_IO(125),
+ PORT_DATA_IO(126), PORT_DATA_IO(127),
+ PORT_DATA_IO(128), PORT_DATA_IO(129),
+
+ PORT_DATA_IO(130), PORT_DATA_IO(131),
+ PORT_DATA_IO(132), PORT_DATA_IO(133),
+ PORT_DATA_IO(134), PORT_DATA_IO(135),
+ PORT_DATA_IO(136), PORT_DATA_IO(137),
+ PORT_DATA_IO(138), PORT_DATA_IO(139),
+
+ PORT_DATA_IO(140), PORT_DATA_IO(141),
+ PORT_DATA_IO_PU(142), PORT_DATA_IO_PU(143),
+ PORT_DATA_IO_PU(144), PORT_DATA_IO_PU(145),
+ PORT_DATA_IO_PU(146), PORT_DATA_IO_PU(147),
+ PORT_DATA_IO_PU(148), PORT_DATA_IO_PU(149),
+
+ PORT_DATA_IO_PU(150), PORT_DATA_IO_PU(151),
+ PORT_DATA_IO_PU(152), PORT_DATA_IO_PU(153),
+ PORT_DATA_IO_PU(154), PORT_DATA_IO_PU(155),
+ PORT_DATA_IO_PU(156), PORT_DATA_IO_PU(157),
+ PORT_DATA_IO_PD(158), PORT_DATA_IO_PD(159),
+
+ PORT_DATA_IO_PU_PD(160), PORT_DATA_IO_PD(161),
+ PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
+ PORT_DATA_IO_PD(164), PORT_DATA_IO_PD(165),
+ PORT_DATA_IO_PU(166), PORT_DATA_IO_PU(167),
+ PORT_DATA_IO_PU(168), PORT_DATA_IO_PU(169),
+
+ PORT_DATA_IO_PU(170), PORT_DATA_IO_PU(171),
+ PORT_DATA_IO_PD(172), PORT_DATA_IO_PD(173),
+ PORT_DATA_IO_PD(174), PORT_DATA_IO_PD(175),
+ PORT_DATA_IO_PU(176), PORT_DATA_IO_PU_PD(177),
+ PORT_DATA_IO_PU(178), PORT_DATA_IO_PD(179),
+
+ PORT_DATA_IO_PD(180), PORT_DATA_IO_PU(181),
+ PORT_DATA_IO_PU(182), PORT_DATA_IO(183),
+ PORT_DATA_IO_PD(184), PORT_DATA_IO_PD(185),
+ PORT_DATA_IO_PD(186), PORT_DATA_IO_PD(187),
+ PORT_DATA_IO_PD(188), PORT_DATA_IO_PD(189),
+
+ PORT_DATA_IO_PD(190), PORT_DATA_IO_PD(191),
+ PORT_DATA_IO_PD(192), PORT_DATA_IO_PU_PD(193),
+ PORT_DATA_IO_PU_PD(194), PORT_DATA_IO_PD(195),
+ PORT_DATA_IO_PU_PD(196), PORT_DATA_IO_PD(197),
+ PORT_DATA_IO_PU_PD(198), PORT_DATA_IO_PU_PD(199),
+
+ PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU(201),
+ PORT_DATA_IO_PU_PD(202), PORT_DATA_IO(203),
+ PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205),
+ PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PU_PD(207),
+ PORT_DATA_IO_PU_PD(208), PORT_DATA_IO_PD(209),
+
+ PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211),
+
+ /* Port0 */
+ PINMUX_DATA(DBGMDT2_MARK, PORT0_FN1),
+ PINMUX_DATA(FSIAISLD_PORT0_MARK, PORT0_FN2, MSEL5CR_3_0),
+ PINMUX_DATA(FSIAOSLD1_MARK, PORT0_FN3),
+ PINMUX_DATA(LCD0_D22_PORT0_MARK, PORT0_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(SCIFA7_RXD_MARK, PORT0_FN6),
+ PINMUX_DATA(LCD1_D4_MARK, PORT0_FN7),
+ PINMUX_DATA(IRQ5_PORT0_MARK, PORT0_FN0, MSEL1CR_5_0),
+
+ /* Port1 */
+ PINMUX_DATA(DBGMDT1_MARK, PORT1_FN1),
+ PINMUX_DATA(FMSISLD_PORT1_MARK, PORT1_FN2, MSEL5CR_5_0),
+ PINMUX_DATA(FSIAOSLD2_MARK, PORT1_FN3),
+ PINMUX_DATA(LCD0_D23_PORT1_MARK, PORT1_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(SCIFA7_TXD_MARK, PORT1_FN6),
+ PINMUX_DATA(LCD1_D3_MARK, PORT1_FN7),
+ PINMUX_DATA(IRQ5_PORT1_MARK, PORT1_FN0, MSEL1CR_5_1),
+
+ /* Port2 */
+ PINMUX_DATA(DBGMDT0_MARK, PORT2_FN1),
+ PINMUX_DATA(SCIFB_SCK_PORT2_MARK, PORT2_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D21_PORT2_MARK, PORT2_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D2_MARK, PORT2_FN7),
+ PINMUX_DATA(IRQ0_PORT2_MARK, PORT2_FN0, MSEL1CR_0_1),
+
+ /* Port3 */
+ PINMUX_DATA(DBGMD21_MARK, PORT3_FN1),
+ PINMUX_DATA(SCIFB_RXD_PORT3_MARK, PORT3_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D20_PORT3_MARK, PORT3_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D1_MARK, PORT3_FN7),
+
+ /* Port4 */
+ PINMUX_DATA(DBGMD20_MARK, PORT4_FN1),
+ PINMUX_DATA(SCIFB_TXD_PORT4_MARK, PORT4_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D19_PORT4_MARK, PORT4_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D0_MARK, PORT4_FN7),
+
+ /* Port5 */
+ PINMUX_DATA(DBGMD11_MARK, PORT5_FN1),
+ PINMUX_DATA(BBIF2_TXD2_PORT5_MARK, PORT5_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(FSIAISLD_PORT5_MARK, PORT5_FN4, MSEL5CR_3_1),
+ PINMUX_DATA(RSPI_SSL0_A_MARK, PORT5_FN6),
+ PINMUX_DATA(LCD1_VCPWC_MARK, PORT5_FN7),
+
+ /* Port6 */
+ PINMUX_DATA(DBGMD10_MARK, PORT6_FN1),
+ PINMUX_DATA(BBIF2_TSYNC2_PORT6_MARK, PORT6_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(FMSISLD_PORT6_MARK, PORT6_FN4, MSEL5CR_5_1),
+ PINMUX_DATA(RSPI_SSL1_A_MARK, PORT6_FN6),
+ PINMUX_DATA(LCD1_VEPWC_MARK, PORT6_FN7),
+
+ /* Port7 */
+ PINMUX_DATA(FSIAOLR_MARK, PORT7_FN1),
+
+ /* Port8 */
+ PINMUX_DATA(FSIAOBT_MARK, PORT8_FN1),
+
+ /* Port9 */
+ PINMUX_DATA(FSIAOSLD_MARK, PORT9_FN1),
+ PINMUX_DATA(FSIASPDIF_PORT9_MARK, PORT9_FN2, MSEL5CR_4_0),
+
+ /* Port10 */
+ PINMUX_DATA(FSIAOMC_MARK, PORT10_FN1),
+ PINMUX_DATA(SCIFA5_RXD_PORT10_MARK, PORT10_FN3, MSEL5CR_14_0, MSEL5CR_15_0),
+ PINMUX_DATA(IRQ3_PORT10_MARK, PORT10_FN0, MSEL1CR_3_0),
+
+ /* Port11 */
+ PINMUX_DATA(FSIACK_MARK, PORT11_FN1),
+ PINMUX_DATA(FSIBCK_MARK, PORT11_FN2),
+ PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0),
+
+ /* Port12 */
+ PINMUX_DATA(FSIAILR_MARK, PORT12_FN1),
+ PINMUX_DATA(SCIFA4_RXD_PORT12_MARK, PORT12_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
+ PINMUX_DATA(LCD1_RS_MARK, PORT12_FN6),
+ PINMUX_DATA(LCD1_DISP_MARK, PORT12_FN7),
+ PINMUX_DATA(IRQ2_PORT12_MARK, PORT12_FN0, MSEL1CR_2_1),
+
+ /* Port13 */
+ PINMUX_DATA(FSIAIBT_MARK, PORT13_FN1),
+ PINMUX_DATA(SCIFA4_TXD_PORT13_MARK, PORT13_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
+ PINMUX_DATA(LCD1_RD_MARK, PORT13_FN7),
+ PINMUX_DATA(IRQ0_PORT13_MARK, PORT13_FN0, MSEL1CR_0_0),
+
+ /* Port14 */
+ PINMUX_DATA(FMSOILR_MARK, PORT14_FN1),
+ PINMUX_DATA(FMSIILR_MARK, PORT14_FN2),
+ PINMUX_DATA(VIO_CKO1_MARK, PORT14_FN3),
+ PINMUX_DATA(LCD1_D23_MARK, PORT14_FN7),
+ PINMUX_DATA(IRQ3_PORT14_MARK, PORT14_FN0, MSEL1CR_3_1),
+
+ /* Port15 */
+ PINMUX_DATA(FMSOIBT_MARK, PORT15_FN1),
+ PINMUX_DATA(FMSIIBT_MARK, PORT15_FN2),
+ PINMUX_DATA(VIO_CKO2_MARK, PORT15_FN3),
+ PINMUX_DATA(LCD1_D22_MARK, PORT15_FN7),
+ PINMUX_DATA(IRQ4_PORT15_MARK, PORT15_FN0, MSEL1CR_4_0),
+
+ /* Port16 */
+ PINMUX_DATA(FMSOOLR_MARK, PORT16_FN1),
+ PINMUX_DATA(FMSIOLR_MARK, PORT16_FN2),
+
+ /* Port17 */
+ PINMUX_DATA(FMSOOBT_MARK, PORT17_FN1),
+ PINMUX_DATA(FMSIOBT_MARK, PORT17_FN2),
+
+ /* Port18 */
+ PINMUX_DATA(FMSOSLD_MARK, PORT18_FN1),
+ PINMUX_DATA(FSIASPDIF_PORT18_MARK, PORT18_FN2, MSEL5CR_4_1),
+
+ /* Port19 */
+ PINMUX_DATA(FMSICK_MARK, PORT19_FN1),
+ PINMUX_DATA(CS5A_PORT19_MARK, PORT19_FN7, MSEL5CR_2_1),
+ PINMUX_DATA(IRQ10_MARK, PORT19_FN0),
+
+ /* Port20 */
+ PINMUX_DATA(FMSOCK_MARK, PORT20_FN1),
+ PINMUX_DATA(SCIFA5_TXD_PORT20_MARK, PORT20_FN3, MSEL5CR_15_0, MSEL5CR_14_0),
+ PINMUX_DATA(IRQ1_MARK, PORT20_FN0),
+
+ /* Port21 */
+ PINMUX_DATA(SCIFA1_CTS_MARK, PORT21_FN1),
+ PINMUX_DATA(SCIFA4_SCK_PORT21_MARK, PORT21_FN2, MSEL5CR_10_0),
+ PINMUX_DATA(TPU0TO1_MARK, PORT21_FN4),
+ PINMUX_DATA(VIO1_FIELD_MARK, PORT21_FN5),
+ PINMUX_DATA(STP0_IPD5_MARK, PORT21_FN6),
+ PINMUX_DATA(LCD1_D10_MARK, PORT21_FN7),
+
+ /* Port22 */
+ PINMUX_DATA(SCIFA2_SCK_PORT22_MARK, PORT22_FN1, MSEL5CR_7_0),
+ PINMUX_DATA(SIM_D_PORT22_MARK, PORT22_FN4, MSEL5CR_21_0),
+ PINMUX_DATA(VIO0_D13_PORT22_MARK, PORT22_FN7, MSEL5CR_27_1),
+
+ /* Port23 */
+ PINMUX_DATA(SCIFA1_RTS_MARK, PORT23_FN1),
+ PINMUX_DATA(SCIFA5_SCK_PORT23_MARK, PORT23_FN3, MSEL5CR_13_0),
+ PINMUX_DATA(TPU0TO0_MARK, PORT23_FN4),
+ PINMUX_DATA(VIO_CKO_1_MARK, PORT23_FN5),
+ PINMUX_DATA(STP0_IPD2_MARK, PORT23_FN6),
+ PINMUX_DATA(LCD1_D7_MARK, PORT23_FN7),
+
+ /* Port24 */
+ PINMUX_DATA(VIO0_D15_PORT24_MARK, PORT24_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D7_MARK, PORT24_FN5),
+ PINMUX_DATA(SCIFA6_SCK_MARK, PORT24_FN6),
+ PINMUX_DATA(SDHI2_CD_PORT24_MARK, PORT24_FN7, MSEL5CR_19_0),
+
+ /* Port25 */
+ PINMUX_DATA(VIO0_D14_PORT25_MARK, PORT25_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D6_MARK, PORT25_FN5),
+ PINMUX_DATA(SCIFA6_RXD_MARK, PORT25_FN6),
+ PINMUX_DATA(SDHI2_WP_PORT25_MARK, PORT25_FN7, MSEL5CR_19_0),
+
+ /* Port26 */
+ PINMUX_DATA(VIO0_D13_PORT26_MARK, PORT26_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D5_MARK, PORT26_FN5),
+ PINMUX_DATA(SCIFA6_TXD_MARK, PORT26_FN6),
+
+ /* Port27 - Port39 Function */
+ PINMUX_DATA(VIO0_D7_MARK, PORT27_FN1),
+ PINMUX_DATA(VIO0_D6_MARK, PORT28_FN1),
+ PINMUX_DATA(VIO0_D5_MARK, PORT29_FN1),
+ PINMUX_DATA(VIO0_D4_MARK, PORT30_FN1),
+ PINMUX_DATA(VIO0_D3_MARK, PORT31_FN1),
+ PINMUX_DATA(VIO0_D2_MARK, PORT32_FN1),
+ PINMUX_DATA(VIO0_D1_MARK, PORT33_FN1),
+ PINMUX_DATA(VIO0_D0_MARK, PORT34_FN1),
+ PINMUX_DATA(VIO0_CLK_MARK, PORT35_FN1),
+ PINMUX_DATA(VIO_CKO_MARK, PORT36_FN1),
+ PINMUX_DATA(VIO0_HD_MARK, PORT37_FN1),
+ PINMUX_DATA(VIO0_FIELD_MARK, PORT38_FN1),
+ PINMUX_DATA(VIO0_VD_MARK, PORT39_FN1),
+
+ /* Port38 IRQ */
+ PINMUX_DATA(IRQ25_MARK, PORT38_FN0),
+
+ /* Port40 */
+ PINMUX_DATA(LCD0_D18_PORT40_MARK, PORT40_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(RSPI_CK_A_MARK, PORT40_FN6),
+ PINMUX_DATA(LCD1_LCLK_MARK, PORT40_FN7),
+
+ /* Port41 */
+ PINMUX_DATA(LCD0_D17_MARK, PORT41_FN1),
+ PINMUX_DATA(MSIOF2_SS1_MARK, PORT41_FN2),
+ PINMUX_DATA(IRQ31_PORT41_MARK, PORT41_FN0, MSEL1CR_31_1),
+
+ /* Port42 */
+ PINMUX_DATA(LCD0_D16_MARK, PORT42_FN1),
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT42_FN2),
+ PINMUX_DATA(IRQ12_PORT42_MARK, PORT42_FN0, MSEL1CR_12_1),
+
+ /* Port43 */
+ PINMUX_DATA(LCD0_D15_MARK, PORT43_FN1),
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT43_FN2),
+ PINMUX_DATA(KEYIN0_PORT43_MARK, PORT43_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D15_MARK, PORT43_FN6),
+
+ /* Port44 */
+ PINMUX_DATA(LCD0_D14_MARK, PORT44_FN1),
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT44_FN2),
+ PINMUX_DATA(KEYIN1_PORT44_MARK, PORT44_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D14_MARK, PORT44_FN6),
+
+ /* Port45 */
+ PINMUX_DATA(LCD0_D13_MARK, PORT45_FN1),
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT45_FN2),
+ PINMUX_DATA(KEYIN2_PORT45_MARK, PORT45_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D13_MARK, PORT45_FN6),
+
+ /* Port46 */
+ PINMUX_DATA(LCD0_D12_MARK, PORT46_FN1),
+ PINMUX_DATA(KEYIN3_PORT46_MARK, PORT46_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D12_MARK, PORT46_FN6),
+
+ /* Port47 */
+ PINMUX_DATA(LCD0_D11_MARK, PORT47_FN1),
+ PINMUX_DATA(KEYIN4_MARK, PORT47_FN3),
+ PINMUX_DATA(DV_D11_MARK, PORT47_FN6),
+
+ /* Port48 */
+ PINMUX_DATA(LCD0_D10_MARK, PORT48_FN1),
+ PINMUX_DATA(KEYIN5_MARK, PORT48_FN3),
+ PINMUX_DATA(DV_D10_MARK, PORT48_FN6),
+
+ /* Port49 */
+ PINMUX_DATA(LCD0_D9_MARK, PORT49_FN1),
+ PINMUX_DATA(KEYIN6_MARK, PORT49_FN3),
+ PINMUX_DATA(DV_D9_MARK, PORT49_FN6),
+ PINMUX_DATA(IRQ30_PORT49_MARK, PORT49_FN0, MSEL1CR_30_1),
+
+ /* Port50 */
+ PINMUX_DATA(LCD0_D8_MARK, PORT50_FN1),
+ PINMUX_DATA(KEYIN7_MARK, PORT50_FN3),
+ PINMUX_DATA(DV_D8_MARK, PORT50_FN6),
+ PINMUX_DATA(IRQ29_PORT50_MARK, PORT50_FN0, MSEL1CR_29_1),
+
+ /* Port51 */
+ PINMUX_DATA(LCD0_D7_MARK, PORT51_FN1),
+ PINMUX_DATA(KEYOUT0_MARK, PORT51_FN3),
+ PINMUX_DATA(DV_D7_MARK, PORT51_FN6),
+
+ /* Port52 */
+ PINMUX_DATA(LCD0_D6_MARK, PORT52_FN1),
+ PINMUX_DATA(KEYOUT1_MARK, PORT52_FN3),
+ PINMUX_DATA(DV_D6_MARK, PORT52_FN6),
+
+ /* Port53 */
+ PINMUX_DATA(LCD0_D5_MARK, PORT53_FN1),
+ PINMUX_DATA(KEYOUT2_MARK, PORT53_FN3),
+ PINMUX_DATA(DV_D5_MARK, PORT53_FN6),
+
+ /* Port54 */
+ PINMUX_DATA(LCD0_D4_MARK, PORT54_FN1),
+ PINMUX_DATA(KEYOUT3_MARK, PORT54_FN3),
+ PINMUX_DATA(DV_D4_MARK, PORT54_FN6),
+
+ /* Port55 */
+ PINMUX_DATA(LCD0_D3_MARK, PORT55_FN1),
+ PINMUX_DATA(KEYOUT4_MARK, PORT55_FN3),
+ PINMUX_DATA(KEYIN3_PORT55_MARK, PORT55_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D3_MARK, PORT55_FN6),
+
+ /* Port56 */
+ PINMUX_DATA(LCD0_D2_MARK, PORT56_FN1),
+ PINMUX_DATA(KEYOUT5_MARK, PORT56_FN3),
+ PINMUX_DATA(KEYIN2_PORT56_MARK, PORT56_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D2_MARK, PORT56_FN6),
+ PINMUX_DATA(IRQ28_PORT56_MARK, PORT56_FN0, MSEL1CR_28_1),
+
+ /* Port57 */
+ PINMUX_DATA(LCD0_D1_MARK, PORT57_FN1),
+ PINMUX_DATA(KEYOUT6_MARK, PORT57_FN3),
+ PINMUX_DATA(KEYIN1_PORT57_MARK, PORT57_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D1_MARK, PORT57_FN6),
+ PINMUX_DATA(IRQ27_PORT57_MARK, PORT57_FN0, MSEL1CR_27_1),
+
+ /* Port58 */
+ PINMUX_DATA(LCD0_D0_MARK, PORT58_FN1),
+ PINMUX_DATA(KEYOUT7_MARK, PORT58_FN3),
+ PINMUX_DATA(KEYIN0_PORT58_MARK, PORT58_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D0_MARK, PORT58_FN6),
+ PINMUX_DATA(IRQ26_PORT58_MARK, PORT58_FN0, MSEL1CR_26_1),
+
+ /* Port59 */
+ PINMUX_DATA(LCD0_VCPWC_MARK, PORT59_FN1),
+ PINMUX_DATA(BBIF2_TSCK2_PORT59_MARK, PORT59_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(RSPI_MOSI_A_MARK, PORT59_FN6),
+
+ /* Port60 */
+ PINMUX_DATA(LCD0_VEPWC_MARK, PORT60_FN1),
+ PINMUX_DATA(BBIF2_RXD2_PORT60_MARK, PORT60_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(RSPI_MISO_A_MARK, PORT60_FN6),
+
+ /* Port61 */
+ PINMUX_DATA(LCD0_DON_MARK, PORT61_FN1),
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT61_FN2),
+
+ /* Port62 */
+ PINMUX_DATA(LCD0_DCK_MARK, PORT62_FN1),
+ PINMUX_DATA(LCD0_WR_MARK, PORT62_FN4),
+ PINMUX_DATA(DV_CLK_MARK, PORT62_FN6),
+ PINMUX_DATA(IRQ15_PORT62_MARK, PORT62_FN0, MSEL1CR_15_1),
+
+ /* Port63 */
+ PINMUX_DATA(LCD0_VSYN_MARK, PORT63_FN1),
+ PINMUX_DATA(DV_VSYNC_MARK, PORT63_FN6),
+ PINMUX_DATA(IRQ14_PORT63_MARK, PORT63_FN0, MSEL1CR_14_1),
+
+ /* Port64 */
+ PINMUX_DATA(LCD0_HSYN_MARK, PORT64_FN1),
+ PINMUX_DATA(LCD0_CS_MARK, PORT64_FN4),
+ PINMUX_DATA(DV_HSYNC_MARK, PORT64_FN6),
+ PINMUX_DATA(IRQ13_PORT64_MARK, PORT64_FN0, MSEL1CR_13_1),
+
+ /* Port65 */
+ PINMUX_DATA(LCD0_DISP_MARK, PORT65_FN1),
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT65_FN2),
+ PINMUX_DATA(LCD0_RS_MARK, PORT65_FN4),
+
+ /* Port66 */
+ PINMUX_DATA(MEMC_INT_MARK, PORT66_FN1),
+ PINMUX_DATA(TPU0TO2_PORT66_MARK, PORT66_FN3, MSEL5CR_25_0),
+ PINMUX_DATA(MMC0_CLK_PORT66_MARK, PORT66_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(SDHI1_CLK_MARK, PORT66_FN6),
+
+ /* Port67 - Port73 Function1 */
+ PINMUX_DATA(MEMC_CS0_MARK, PORT67_FN1),
+ PINMUX_DATA(MEMC_AD8_MARK, PORT68_FN1),
+ PINMUX_DATA(MEMC_AD9_MARK, PORT69_FN1),
+ PINMUX_DATA(MEMC_AD10_MARK, PORT70_FN1),
+ PINMUX_DATA(MEMC_AD11_MARK, PORT71_FN1),
+ PINMUX_DATA(MEMC_AD12_MARK, PORT72_FN1),
+ PINMUX_DATA(MEMC_AD13_MARK, PORT73_FN1),
+
+ /* Port67 - Port73 Function2 */
+ PINMUX_DATA(MSIOF1_SS1_PORT67_MARK, PORT67_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT68_FN2),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT69_FN2),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT70_FN2),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT71_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_PORT72_MARK, PORT72_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TSYNC_PORT73_MARK, PORT73_FN2, MSEL4CR_10_1),
+
+ /* Port67 - Port73 Function4 */
+ PINMUX_DATA(MMC0_CMD_PORT67_MARK, PORT67_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D0_PORT68_MARK, PORT68_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D1_PORT69_MARK, PORT69_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D2_PORT70_MARK, PORT70_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D3_PORT71_MARK, PORT71_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D4_PORT72_MARK, PORT72_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D5_PORT73_MARK, PORT73_FN4, MSEL4CR_15_0),
+
+ /* Port67 - Port73 Function6 */
+ PINMUX_DATA(SDHI1_CMD_MARK, PORT67_FN6),
+ PINMUX_DATA(SDHI1_D0_MARK, PORT68_FN6),
+ PINMUX_DATA(SDHI1_D1_MARK, PORT69_FN6),
+ PINMUX_DATA(SDHI1_D2_MARK, PORT70_FN6),
+ PINMUX_DATA(SDHI1_D3_MARK, PORT71_FN6),
+ PINMUX_DATA(SDHI1_CD_MARK, PORT72_FN6),
+ PINMUX_DATA(SDHI1_WP_MARK, PORT73_FN6),
+
+ /* Port67 - Port71 IRQ */
+ PINMUX_DATA(IRQ20_MARK, PORT67_FN0),
+ PINMUX_DATA(IRQ16_PORT68_MARK, PORT68_FN0, MSEL1CR_16_0),
+ PINMUX_DATA(IRQ17_MARK, PORT69_FN0),
+ PINMUX_DATA(IRQ18_MARK, PORT70_FN0),
+ PINMUX_DATA(IRQ19_MARK, PORT71_FN0),
+
+ /* Port74 */
+ PINMUX_DATA(MEMC_AD14_MARK, PORT74_FN1),
+ PINMUX_DATA(MSIOF1_TXD_PORT74_MARK, PORT74_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MMC0_D6_PORT74_MARK, PORT74_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(STP1_IPD7_MARK, PORT74_FN6),
+ PINMUX_DATA(LCD1_D21_MARK, PORT74_FN7),
+
+ /* Port75 */
+ PINMUX_DATA(MEMC_AD15_MARK, PORT75_FN1),
+ PINMUX_DATA(MSIOF1_RXD_PORT75_MARK, PORT75_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MMC0_D7_PORT75_MARK, PORT75_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(STP1_IPD6_MARK, PORT75_FN6),
+ PINMUX_DATA(LCD1_D20_MARK, PORT75_FN7),
+
+ /* Port76 - Port80 Function */
+ PINMUX_DATA(SDHI0_CMD_MARK, PORT76_FN1),
+ PINMUX_DATA(SDHI0_D0_MARK, PORT77_FN1),
+ PINMUX_DATA(SDHI0_D1_MARK, PORT78_FN1),
+ PINMUX_DATA(SDHI0_D2_MARK, PORT79_FN1),
+ PINMUX_DATA(SDHI0_D3_MARK, PORT80_FN1),
+
+ /* Port81 */
+ PINMUX_DATA(SDHI0_CD_MARK, PORT81_FN1),
+ PINMUX_DATA(IRQ26_PORT81_MARK, PORT81_FN0, MSEL1CR_26_0),
+
+ /* Port82 - Port88 Function */
+ PINMUX_DATA(SDHI0_CLK_MARK, PORT82_FN1),
+ PINMUX_DATA(SDHI0_WP_MARK, PORT83_FN1),
+ PINMUX_DATA(RESETOUTS_MARK, PORT84_FN1),
+ PINMUX_DATA(USB0_PPON_MARK, PORT85_FN1),
+ PINMUX_DATA(USB0_OCI_MARK, PORT86_FN1),
+ PINMUX_DATA(USB1_PPON_MARK, PORT87_FN1),
+ PINMUX_DATA(USB1_OCI_MARK, PORT88_FN1),
+
+ /* Port89 */
+ PINMUX_DATA(DREQ0_MARK, PORT89_FN1),
+ PINMUX_DATA(BBIF2_TSCK2_PORT89_MARK, PORT89_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(RSPI_SSL3_A_MARK, PORT89_FN6),
+
+ /* Port90 */
+ PINMUX_DATA(DACK0_MARK, PORT90_FN1),
+ PINMUX_DATA(BBIF2_RXD2_PORT90_MARK, PORT90_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(RSPI_SSL2_A_MARK, PORT90_FN6),
+ PINMUX_DATA(WAIT_PORT90_MARK, PORT90_FN7, MSEL5CR_2_1),
+
+ /* Port91 */
+ PINMUX_DATA(MEMC_AD0_MARK, PORT91_FN1),
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT91_FN2),
+ PINMUX_DATA(SCIFA5_TXD_PORT91_MARK, PORT91_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
+ PINMUX_DATA(LCD1_D5_MARK, PORT91_FN7),
+
+ /* Port92 */
+ PINMUX_DATA(MEMC_AD1_MARK, PORT92_FN1),
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT92_FN2),
+ PINMUX_DATA(SCIFA5_RXD_PORT92_MARK, PORT92_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
+ PINMUX_DATA(STP0_IPD1_MARK, PORT92_FN6),
+ PINMUX_DATA(LCD1_D6_MARK, PORT92_FN7),
+
+ /* Port93 */
+ PINMUX_DATA(MEMC_AD2_MARK, PORT93_FN1),
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT93_FN2),
+ PINMUX_DATA(SCIFA4_TXD_PORT93_MARK, PORT93_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
+ PINMUX_DATA(STP0_IPD3_MARK, PORT93_FN6),
+ PINMUX_DATA(LCD1_D8_MARK, PORT93_FN7),
+
+ /* Port94 */
+ PINMUX_DATA(MEMC_AD3_MARK, PORT94_FN1),
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT94_FN2),
+ PINMUX_DATA(SCIFA4_RXD_PORT94_MARK, PORT94_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
+ PINMUX_DATA(STP0_IPD4_MARK, PORT94_FN6),
+ PINMUX_DATA(LCD1_D9_MARK, PORT94_FN7),
+
+ /* Port95 */
+ PINMUX_DATA(MEMC_CS1_MARK, PORT95_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_A1_MARK, PORT95_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_CTS_MARK, PORT95_FN2),
+ PINMUX_DATA(SIM_RST_MARK, PORT95_FN4),
+ PINMUX_DATA(VIO0_D14_PORT95_MARK, PORT95_FN7, MSEL5CR_27_1),
+ PINMUX_DATA(IRQ22_MARK, PORT95_FN0),
+
+ /* Port96 */
+ PINMUX_DATA(MEMC_ADV_MARK, PORT96_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_DREQ0_MARK, PORT96_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_RTS_MARK, PORT96_FN2),
+ PINMUX_DATA(SIM_CLK_MARK, PORT96_FN4),
+ PINMUX_DATA(VIO0_D15_PORT96_MARK, PORT96_FN7, MSEL5CR_27_1),
+ PINMUX_DATA(IRQ23_MARK, PORT96_FN0),
+
+ /* Port97 */
+ PINMUX_DATA(MEMC_AD4_MARK, PORT97_FN1),
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT97_FN2),
+ PINMUX_DATA(LCD1_CS_MARK, PORT97_FN6),
+ PINMUX_DATA(LCD1_HSYN_MARK, PORT97_FN7),
+ PINMUX_DATA(IRQ12_PORT97_MARK, PORT97_FN0, MSEL1CR_12_0),
+
+ /* Port98 */
+ PINMUX_DATA(MEMC_AD5_MARK, PORT98_FN1),
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT98_FN2),
+ PINMUX_DATA(LCD1_VSYN_MARK, PORT98_FN7),
+ PINMUX_DATA(IRQ13_PORT98_MARK, PORT98_FN0, MSEL1CR_13_0),
+
+ /* Port99 */
+ PINMUX_DATA(MEMC_AD6_MARK, PORT99_FN1),
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT99_FN2),
+ PINMUX_DATA(LCD1_WR_MARK, PORT99_FN6),
+ PINMUX_DATA(LCD1_DCK_MARK, PORT99_FN7),
+ PINMUX_DATA(IRQ14_PORT99_MARK, PORT99_FN0, MSEL1CR_14_0),
+
+ /* Port100 */
+ PINMUX_DATA(MEMC_AD7_MARK, PORT100_FN1),
+ PINMUX_DATA(BBIF1_RX_FLOW_N_MARK, PORT100_FN2),
+ PINMUX_DATA(LCD1_DON_MARK, PORT100_FN7),
+ PINMUX_DATA(IRQ15_PORT100_MARK, PORT100_FN0, MSEL1CR_15_0),
+
+ /* Port101 */
+ PINMUX_DATA(FCE0_MARK, PORT101_FN1),
+
+ /* Port102 */
+ PINMUX_DATA(FRB_MARK, PORT102_FN1),
+ PINMUX_DATA(LCD0_LCLK_PORT102_MARK, PORT102_FN4, MSEL5CR_6_0),
+
+ /* Port103 */
+ PINMUX_DATA(CS5B_MARK, PORT103_FN1),
+ PINMUX_DATA(FCE1_MARK, PORT103_FN2),
+ PINMUX_DATA(MMC1_CLK_PORT103_MARK, PORT103_FN3, MSEL4CR_15_1),
+
+ /* Port104 */
+ PINMUX_DATA(CS6A_MARK, PORT104_FN1),
+ PINMUX_DATA(MMC1_CMD_PORT104_MARK, PORT104_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(IRQ11_MARK, PORT104_FN0),
+
+ /* Port105 */
+ PINMUX_DATA(CS5A_PORT105_MARK, PORT105_FN1, MSEL5CR_2_0),
+ PINMUX_DATA(SCIFA3_RTS_PORT105_MARK, PORT105_FN4, MSEL5CR_8_0),
+
+ /* Port106 */
+ PINMUX_DATA(IOIS16_MARK, PORT106_FN1),
+ PINMUX_DATA(IDE_EXBUF_ENB_MARK, PORT106_FN6),
+
+ /* Port107 - Port115 Function */
+ PINMUX_DATA(WE3_ICIOWR_MARK, PORT107_FN1),
+ PINMUX_DATA(WE2_ICIORD_MARK, PORT108_FN1),
+ PINMUX_DATA(CS0_MARK, PORT109_FN1),
+ PINMUX_DATA(CS2_MARK, PORT110_FN1),
+ PINMUX_DATA(CS4_MARK, PORT111_FN1),
+ PINMUX_DATA(WE1_MARK, PORT112_FN1),
+ PINMUX_DATA(WE0_FWE_MARK, PORT113_FN1),
+ PINMUX_DATA(RDWR_MARK, PORT114_FN1),
+ PINMUX_DATA(RD_FSC_MARK, PORT115_FN1),
+
+ /* Port116 */
+ PINMUX_DATA(A25_MARK, PORT116_FN1),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT116_FN2),
+ PINMUX_DATA(MSIOF1_SS2_PORT116_MARK, PORT116_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(SCIFA3_SCK_PORT116_MARK, PORT116_FN4, MSEL5CR_8_0),
+ PINMUX_DATA(GPO1_MARK, PORT116_FN5),
+
+ /* Port117 */
+ PINMUX_DATA(A24_MARK, PORT117_FN1),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT117_FN2),
+ PINMUX_DATA(MSIOF1_SS1_PORT117_MARK, PORT117_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(SCIFA3_CTS_PORT117_MARK, PORT117_FN4, MSEL5CR_8_0),
+ PINMUX_DATA(GPO0_MARK, PORT117_FN5),
+
+ /* Port118 */
+ PINMUX_DATA(A23_MARK, PORT118_FN1),
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT118_FN2),
+ PINMUX_DATA(MSIOF1_RXD_PORT118_MARK, PORT118_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(GPI1_MARK, PORT118_FN5),
+ PINMUX_DATA(IRQ9_PORT118_MARK, PORT118_FN0, MSEL1CR_9_0),
+
+ /* Port119 */
+ PINMUX_DATA(A22_MARK, PORT119_FN1),
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT119_FN2),
+ PINMUX_DATA(MSIOF1_TXD_PORT119_MARK, PORT119_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(GPI0_MARK, PORT119_FN5),
+ PINMUX_DATA(IRQ8_MARK, PORT119_FN0),
+
+ /* Port120 */
+ PINMUX_DATA(A21_MARK, PORT120_FN1),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2),
+ PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_1),
+
+ /* Port121 */
+ PINMUX_DATA(A20_MARK, PORT121_FN1),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT121_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_PORT121_MARK, PORT121_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(IRQ6_PORT121_MARK, PORT121_FN0, MSEL1CR_6_0),
+
+ /* Port122 */
+ PINMUX_DATA(A19_MARK, PORT122_FN1),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT122_FN2),
+
+ /* Port123 */
+ PINMUX_DATA(A18_MARK, PORT123_FN1),
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT123_FN2),
+
+ /* Port124 */
+ PINMUX_DATA(A17_MARK, PORT124_FN1),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT124_FN2),
+
+ /* Port125 - Port141 Function */
+ PINMUX_DATA(A16_MARK, PORT125_FN1),
+ PINMUX_DATA(A15_MARK, PORT126_FN1),
+ PINMUX_DATA(A14_MARK, PORT127_FN1),
+ PINMUX_DATA(A13_MARK, PORT128_FN1),
+ PINMUX_DATA(A12_MARK, PORT129_FN1),
+ PINMUX_DATA(A11_MARK, PORT130_FN1),
+ PINMUX_DATA(A10_MARK, PORT131_FN1),
+ PINMUX_DATA(A9_MARK, PORT132_FN1),
+ PINMUX_DATA(A8_MARK, PORT133_FN1),
+ PINMUX_DATA(A7_MARK, PORT134_FN1),
+ PINMUX_DATA(A6_MARK, PORT135_FN1),
+ PINMUX_DATA(A5_FCDE_MARK, PORT136_FN1),
+ PINMUX_DATA(A4_FOE_MARK, PORT137_FN1),
+ PINMUX_DATA(A3_MARK, PORT138_FN1),
+ PINMUX_DATA(A2_MARK, PORT139_FN1),
+ PINMUX_DATA(A1_MARK, PORT140_FN1),
+ PINMUX_DATA(CKO_MARK, PORT141_FN1),
+
+ /* Port142 - Port157 Function1 */
+ PINMUX_DATA(D15_NAF15_MARK, PORT142_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT143_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT144_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT145_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT146_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT147_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT148_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT149_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT150_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT151_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT152_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT153_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT154_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT155_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT156_FN1),
+ PINMUX_DATA(D0_NAF0_MARK, PORT157_FN1),
+
+ /* Port142 - Port149 Function3 */
+ PINMUX_DATA(MMC1_D7_PORT142_MARK, PORT142_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D6_PORT143_MARK, PORT143_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D5_PORT144_MARK, PORT144_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D4_PORT145_MARK, PORT145_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D3_PORT146_MARK, PORT146_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D2_PORT147_MARK, PORT147_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D1_PORT148_MARK, PORT148_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D0_PORT149_MARK, PORT149_FN3, MSEL4CR_15_1),
+
+ /* Port158 */
+ PINMUX_DATA(D31_MARK, PORT158_FN1),
+ PINMUX_DATA(SCIFA3_SCK_PORT158_MARK, PORT158_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(RMII_REF125CK_MARK, PORT158_FN3),
+ PINMUX_DATA(LCD0_D21_PORT158_MARK, PORT158_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_FIRSEL_MARK, PORT158_FN5),
+ PINMUX_DATA(IDE_D15_MARK, PORT158_FN6),
+
+ /* Port159 */
+ PINMUX_DATA(D30_MARK, PORT159_FN1),
+ PINMUX_DATA(SCIFA3_RXD_PORT159_MARK, PORT159_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(RMII_REF50CK_MARK, PORT159_FN3),
+ PINMUX_DATA(LCD0_D23_PORT159_MARK, PORT159_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IDE_D14_MARK, PORT159_FN6),
+
+ /* Port160 */
+ PINMUX_DATA(D29_MARK, PORT160_FN1),
+ PINMUX_DATA(SCIFA3_TXD_PORT160_MARK, PORT160_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(LCD0_D22_PORT160_MARK, PORT160_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(VIO1_HD_MARK, PORT160_FN5),
+ PINMUX_DATA(IDE_D13_MARK, PORT160_FN6),
+
+ /* Port161 */
+ PINMUX_DATA(D28_MARK, PORT161_FN1),
+ PINMUX_DATA(SCIFA3_RTS_PORT161_MARK, PORT161_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(ET_RX_DV_MARK, PORT161_FN3),
+ PINMUX_DATA(LCD0_D20_PORT161_MARK, PORT161_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_IN_MARK, PORT161_FN5),
+ PINMUX_DATA(IDE_D12_MARK, PORT161_FN6),
+
+ /* Port162 */
+ PINMUX_DATA(D27_MARK, PORT162_FN1),
+ PINMUX_DATA(SCIFA3_CTS_PORT162_MARK, PORT162_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(LCD0_D19_PORT162_MARK, PORT162_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_OUT_MARK, PORT162_FN5),
+ PINMUX_DATA(IDE_D11_MARK, PORT162_FN6),
+
+ /* Port163 */
+ PINMUX_DATA(D26_MARK, PORT163_FN1),
+ PINMUX_DATA(MSIOF2_SS2_MARK, PORT163_FN2),
+ PINMUX_DATA(ET_COL_MARK, PORT163_FN3),
+ PINMUX_DATA(LCD0_D18_PORT163_MARK, PORT163_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IROUT_MARK, PORT163_FN5),
+ PINMUX_DATA(IDE_D10_MARK, PORT163_FN6),
+
+ /* Port164 */
+ PINMUX_DATA(D25_MARK, PORT164_FN1),
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT164_FN2),
+ PINMUX_DATA(ET_PHY_INT_MARK, PORT164_FN3),
+ PINMUX_DATA(LCD0_RD_MARK, PORT164_FN4),
+ PINMUX_DATA(IDE_D9_MARK, PORT164_FN6),
+
+ /* Port165 */
+ PINMUX_DATA(D24_MARK, PORT165_FN1),
+ PINMUX_DATA(MSIOF2_RXD_MARK, PORT165_FN2),
+ PINMUX_DATA(LCD0_LCLK_PORT165_MARK, PORT165_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IDE_D8_MARK, PORT165_FN6),
+
+ /* Port166 - Port171 Function1 */
+ PINMUX_DATA(D21_MARK, PORT166_FN1),
+ PINMUX_DATA(D20_MARK, PORT167_FN1),
+ PINMUX_DATA(D19_MARK, PORT168_FN1),
+ PINMUX_DATA(D18_MARK, PORT169_FN1),
+ PINMUX_DATA(D17_MARK, PORT170_FN1),
+ PINMUX_DATA(D16_MARK, PORT171_FN1),
+
+ /* Port166 - Port171 Function3 */
+ PINMUX_DATA(ET_ETXD5_MARK, PORT166_FN3),
+ PINMUX_DATA(ET_ETXD4_MARK, PORT167_FN3),
+ PINMUX_DATA(ET_ETXD3_MARK, PORT168_FN3),
+ PINMUX_DATA(ET_ETXD2_MARK, PORT169_FN3),
+ PINMUX_DATA(ET_ETXD1_MARK, PORT170_FN3),
+ PINMUX_DATA(ET_ETXD0_MARK, PORT171_FN3),
+
+ /* Port166 - Port171 Function6 */
+ PINMUX_DATA(IDE_D5_MARK, PORT166_FN6),
+ PINMUX_DATA(IDE_D4_MARK, PORT167_FN6),
+ PINMUX_DATA(IDE_D3_MARK, PORT168_FN6),
+ PINMUX_DATA(IDE_D2_MARK, PORT169_FN6),
+ PINMUX_DATA(IDE_D1_MARK, PORT170_FN6),
+ PINMUX_DATA(IDE_D0_MARK, PORT171_FN6),
+
+ /* Port167 - Port171 IRQ */
+ PINMUX_DATA(IRQ31_PORT167_MARK, PORT167_FN0, MSEL1CR_31_0),
+ PINMUX_DATA(IRQ27_PORT168_MARK, PORT168_FN0, MSEL1CR_27_0),
+ PINMUX_DATA(IRQ28_PORT169_MARK, PORT169_FN0, MSEL1CR_28_0),
+ PINMUX_DATA(IRQ29_PORT170_MARK, PORT170_FN0, MSEL1CR_29_0),
+ PINMUX_DATA(IRQ30_PORT171_MARK, PORT171_FN0, MSEL1CR_30_0),
+
+ /* Port172 */
+ PINMUX_DATA(D23_MARK, PORT172_FN1),
+ PINMUX_DATA(SCIFB_RTS_PORT172_MARK, PORT172_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(ET_ETXD7_MARK, PORT172_FN3),
+ PINMUX_DATA(IDE_D7_MARK, PORT172_FN6),
+ PINMUX_DATA(IRQ4_PORT172_MARK, PORT172_FN0, MSEL1CR_4_1),
+
+ /* Port173 */
+ PINMUX_DATA(D22_MARK, PORT173_FN1),
+ PINMUX_DATA(SCIFB_CTS_PORT173_MARK, PORT173_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(ET_ETXD6_MARK, PORT173_FN3),
+ PINMUX_DATA(IDE_D6_MARK, PORT173_FN6),
+ PINMUX_DATA(IRQ6_PORT173_MARK, PORT173_FN0, MSEL1CR_6_1),
+
+ /* Port174 */
+ PINMUX_DATA(A26_MARK, PORT174_FN1),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT174_FN2),
+ PINMUX_DATA(ET_RX_CLK_MARK, PORT174_FN3),
+ PINMUX_DATA(SCIFA3_RXD_PORT174_MARK, PORT174_FN4, MSEL5CR_8_0),
+
+ /* Port175 */
+ PINMUX_DATA(A0_MARK, PORT175_FN1),
+ PINMUX_DATA(BS_MARK, PORT175_FN2),
+ PINMUX_DATA(ET_WOL_MARK, PORT175_FN3),
+ PINMUX_DATA(SCIFA3_TXD_PORT175_MARK, PORT175_FN4, MSEL5CR_8_0),
+
+ /* Port176 */
+ PINMUX_DATA(ET_GTX_CLK_MARK, PORT176_FN3),
+
+ /* Port177 */
+ PINMUX_DATA(WAIT_PORT177_MARK, PORT177_FN1, MSEL5CR_2_0),
+ PINMUX_DATA(ET_LINK_MARK, PORT177_FN3),
+ PINMUX_DATA(IDE_IOWR_MARK, PORT177_FN6),
+ PINMUX_DATA(SDHI2_WP_PORT177_MARK, PORT177_FN7, MSEL5CR_19_1),
+
+ /* Port178 */
+ PINMUX_DATA(VIO0_D12_MARK, PORT178_FN1),
+ PINMUX_DATA(VIO1_D4_MARK, PORT178_FN5),
+ PINMUX_DATA(IDE_IORD_MARK, PORT178_FN6),
+
+ /* Port179 */
+ PINMUX_DATA(VIO0_D11_MARK, PORT179_FN1),
+ PINMUX_DATA(VIO1_D3_MARK, PORT179_FN5),
+ PINMUX_DATA(IDE_IORDY_MARK, PORT179_FN6),
+
+ /* Port180 */
+ PINMUX_DATA(VIO0_D10_MARK, PORT180_FN1),
+ PINMUX_DATA(TPU0TO3_MARK, PORT180_FN4),
+ PINMUX_DATA(VIO1_D2_MARK, PORT180_FN5),
+ PINMUX_DATA(IDE_INT_MARK, PORT180_FN6),
+ PINMUX_DATA(IRQ24_MARK, PORT180_FN0),
+
+ /* Port181 */
+ PINMUX_DATA(VIO0_D9_MARK, PORT181_FN1),
+ PINMUX_DATA(VIO1_D1_MARK, PORT181_FN5),
+ PINMUX_DATA(IDE_RST_MARK, PORT181_FN6),
+
+ /* Port182 */
+ PINMUX_DATA(VIO0_D8_MARK, PORT182_FN1),
+ PINMUX_DATA(VIO1_D0_MARK, PORT182_FN5),
+ PINMUX_DATA(IDE_DIRECTION_MARK, PORT182_FN6),
+
+ /* Port183 */
+ PINMUX_DATA(DREQ1_MARK, PORT183_FN1),
+ PINMUX_DATA(BBIF2_TXD2_PORT183_MARK, PORT183_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(ET_TX_EN_MARK, PORT183_FN3),
+
+ /* Port184 */
+ PINMUX_DATA(DACK1_MARK, PORT184_FN1),
+ PINMUX_DATA(BBIF2_TSYNC2_PORT184_MARK, PORT184_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(ET_TX_CLK_MARK, PORT184_FN3),
+
+ /* Port185 - Port192 Function1 */
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT185_FN1),
+ PINMUX_DATA(SCIFB_RTS_PORT186_MARK, PORT186_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_CTS_PORT187_MARK, PORT187_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT188_FN1),
+ PINMUX_DATA(SCIFB_SCK_PORT190_MARK, PORT190_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_RXD_PORT191_MARK, PORT191_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_TXD_PORT192_MARK, PORT192_FN1, MSEL5CR_17_0),
+
+ /* Port185 - Port192 Function3 */
+ PINMUX_DATA(ET_ERXD0_MARK, PORT185_FN3),
+ PINMUX_DATA(ET_ERXD1_MARK, PORT186_FN3),
+ PINMUX_DATA(ET_ERXD2_MARK, PORT187_FN3),
+ PINMUX_DATA(ET_ERXD3_MARK, PORT188_FN3),
+ PINMUX_DATA(ET_ERXD4_MARK, PORT189_FN3),
+ PINMUX_DATA(ET_ERXD5_MARK, PORT190_FN3),
+ PINMUX_DATA(ET_ERXD6_MARK, PORT191_FN3),
+ PINMUX_DATA(ET_ERXD7_MARK, PORT192_FN3),
+
+ /* Port185 - Port192 Function6 */
+ PINMUX_DATA(STP1_IPCLK_MARK, PORT185_FN6),
+ PINMUX_DATA(STP1_IPD0_PORT186_MARK, PORT186_FN6, MSEL5CR_23_0),
+ PINMUX_DATA(STP1_IPEN_PORT187_MARK, PORT187_FN6, MSEL5CR_23_0),
+ PINMUX_DATA(STP1_IPSYNC_MARK, PORT188_FN6),
+ PINMUX_DATA(STP0_IPCLK_MARK, PORT189_FN6),
+ PINMUX_DATA(STP0_IPD0_MARK, PORT190_FN6),
+ PINMUX_DATA(STP0_IPEN_MARK, PORT191_FN6),
+ PINMUX_DATA(STP0_IPSYNC_MARK, PORT192_FN6),
+
+ /* Port193 */
+ PINMUX_DATA(SCIFA0_CTS_MARK, PORT193_FN1),
+ PINMUX_DATA(RMII_CRS_DV_MARK, PORT193_FN3),
+ PINMUX_DATA(STP1_IPEN_PORT193_MARK, PORT193_FN6, MSEL5CR_23_1), /* ? */
+ PINMUX_DATA(LCD1_D17_MARK, PORT193_FN7),
+
+ /* Port194 */
+ PINMUX_DATA(SCIFA0_RTS_MARK, PORT194_FN1),
+ PINMUX_DATA(RMII_RX_ER_MARK, PORT194_FN3),
+ PINMUX_DATA(STP1_IPD0_PORT194_MARK, PORT194_FN6, MSEL5CR_23_1), /* ? */
+ PINMUX_DATA(LCD1_D16_MARK, PORT194_FN7),
+
+ /* Port195 */
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT195_FN1),
+ PINMUX_DATA(RMII_RXD0_MARK, PORT195_FN3),
+ PINMUX_DATA(STP1_IPD3_MARK, PORT195_FN6),
+ PINMUX_DATA(LCD1_D15_MARK, PORT195_FN7),
+
+ /* Port196 */
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT196_FN1),
+ PINMUX_DATA(RMII_RXD1_MARK, PORT196_FN3),
+ PINMUX_DATA(STP1_IPD2_MARK, PORT196_FN6),
+ PINMUX_DATA(LCD1_D14_MARK, PORT196_FN7),
+
+ /* Port197 */
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT197_FN1),
+ PINMUX_DATA(VIO1_CLK_MARK, PORT197_FN5),
+ PINMUX_DATA(STP1_IPD5_MARK, PORT197_FN6),
+ PINMUX_DATA(LCD1_D19_MARK, PORT197_FN7),
+
+ /* Port198 */
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT198_FN1),
+ PINMUX_DATA(VIO1_VD_MARK, PORT198_FN5),
+ PINMUX_DATA(STP1_IPD4_MARK, PORT198_FN6),
+ PINMUX_DATA(LCD1_D18_MARK, PORT198_FN7),
+
+ /* Port199 */
+ PINMUX_DATA(MEMC_NWE_MARK, PORT199_FN1),
+ PINMUX_DATA(SCIFA2_SCK_PORT199_MARK, PORT199_FN2, MSEL5CR_7_1),
+ PINMUX_DATA(RMII_TX_EN_MARK, PORT199_FN3),
+ PINMUX_DATA(SIM_D_PORT199_MARK, PORT199_FN4, MSEL5CR_21_1),
+ PINMUX_DATA(STP1_IPD1_MARK, PORT199_FN6),
+ PINMUX_DATA(LCD1_D13_MARK, PORT199_FN7),
+
+ /* Port200 */
+ PINMUX_DATA(MEMC_NOE_MARK, PORT200_FN1),
+ PINMUX_DATA(SCIFA2_RXD_MARK, PORT200_FN2),
+ PINMUX_DATA(RMII_TXD0_MARK, PORT200_FN3),
+ PINMUX_DATA(STP0_IPD7_MARK, PORT200_FN6),
+ PINMUX_DATA(LCD1_D12_MARK, PORT200_FN7),
+
+ /* Port201 */
+ PINMUX_DATA(MEMC_WAIT_MARK, PORT201_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_DREQ1_MARK, PORT201_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_TXD_MARK, PORT201_FN2),
+ PINMUX_DATA(RMII_TXD1_MARK, PORT201_FN3),
+ PINMUX_DATA(STP0_IPD6_MARK, PORT201_FN6),
+ PINMUX_DATA(LCD1_D11_MARK, PORT201_FN7),
+
+ /* Port202 */
+ PINMUX_DATA(MEMC_BUSCLK_MARK, PORT202_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_A0_MARK, PORT202_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(MSIOF1_SS2_PORT202_MARK, PORT202_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(RMII_MDC_MARK, PORT202_FN3),
+ PINMUX_DATA(TPU0TO2_PORT202_MARK, PORT202_FN4, MSEL5CR_25_1),
+ PINMUX_DATA(IDE_CS0_MARK, PORT202_FN6),
+ PINMUX_DATA(SDHI2_CD_PORT202_MARK, PORT202_FN7, MSEL5CR_19_1),
+ PINMUX_DATA(IRQ21_MARK, PORT202_FN0),
+
+ /* Port203 - Port208 Function1 */
+ PINMUX_DATA(SDHI2_CLK_MARK, PORT203_FN1),
+ PINMUX_DATA(SDHI2_CMD_MARK, PORT204_FN1),
+ PINMUX_DATA(SDHI2_D0_MARK, PORT205_FN1),
+ PINMUX_DATA(SDHI2_D1_MARK, PORT206_FN1),
+ PINMUX_DATA(SDHI2_D2_MARK, PORT207_FN1),
+ PINMUX_DATA(SDHI2_D3_MARK, PORT208_FN1),
+
+ /* Port203 - Port208 Function3 */
+ PINMUX_DATA(ET_TX_ER_MARK, PORT203_FN3),
+ PINMUX_DATA(ET_RX_ER_MARK, PORT204_FN3),
+ PINMUX_DATA(ET_CRS_MARK, PORT205_FN3),
+ PINMUX_DATA(ET_MDC_MARK, PORT206_FN3),
+ PINMUX_DATA(ET_MDIO_MARK, PORT207_FN3),
+ PINMUX_DATA(RMII_MDIO_MARK, PORT208_FN3),
+
+ /* Port203 - Port208 Function6 */
+ PINMUX_DATA(IDE_A2_MARK, PORT203_FN6),
+ PINMUX_DATA(IDE_A1_MARK, PORT204_FN6),
+ PINMUX_DATA(IDE_A0_MARK, PORT205_FN6),
+ PINMUX_DATA(IDE_IODACK_MARK, PORT206_FN6),
+ PINMUX_DATA(IDE_IODREQ_MARK, PORT207_FN6),
+ PINMUX_DATA(IDE_CS1_MARK, PORT208_FN6),
+
+ /* Port203 - Port208 Function7 */
+ PINMUX_DATA(SCIFA4_TXD_PORT203_MARK, PORT203_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
+ PINMUX_DATA(SCIFA4_RXD_PORT204_MARK, PORT204_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
+ PINMUX_DATA(SCIFA4_SCK_PORT205_MARK, PORT205_FN7, MSEL5CR_10_1),
+ PINMUX_DATA(SCIFA5_SCK_PORT206_MARK, PORT206_FN7, MSEL5CR_13_1),
+ PINMUX_DATA(SCIFA5_RXD_PORT207_MARK, PORT207_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
+ PINMUX_DATA(SCIFA5_TXD_PORT208_MARK, PORT208_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
+
+ /* Port209 */
+ PINMUX_DATA(VBUS_MARK, PORT209_FN1),
+ PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_0),
+
+ /* Port210 */
+ PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(HDMI_HPD_MARK, PORT210_FN1),
+
+ /* Port211 */
+ PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1),
+ PINMUX_DATA(HDMI_CEC_MARK, PORT211_FN1),
+
+ /* LCDC select */
+ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
+ PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
+
+ /* SDENC */
+ PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
+ PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
+
+ /* SYSC */
+ PINMUX_DATA(RESETP_PULLUP_MARK, MSEL4CR_4_0),
+ PINMUX_DATA(RESETP_PLAIN_MARK, MSEL4CR_4_1),
+
+ /* DEBUG */
+ PINMUX_DATA(EDEBGREQ_PULLDOWN_MARK, MSEL4CR_1_0),
+ PINMUX_DATA(EDEBGREQ_PULLUP_MARK, MSEL4CR_1_1),
+
+ PINMUX_DATA(TRACEAUD_FROM_VIO_MARK, MSEL5CR_30_0, MSEL5CR_29_0),
+ PINMUX_DATA(TRACEAUD_FROM_LCDC0_MARK, MSEL5CR_30_0, MSEL5CR_29_1),
+ PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PORT */
+ GPIO_PORT_ALL(),
+
+ /* IRQ */
+ GPIO_FN(IRQ0_PORT2), GPIO_FN(IRQ0_PORT13),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2_PORT11), GPIO_FN(IRQ2_PORT12),
+ GPIO_FN(IRQ3_PORT10), GPIO_FN(IRQ3_PORT14),
+ GPIO_FN(IRQ4_PORT15), GPIO_FN(IRQ4_PORT172),
+ GPIO_FN(IRQ5_PORT0), GPIO_FN(IRQ5_PORT1),
+ GPIO_FN(IRQ6_PORT121), GPIO_FN(IRQ6_PORT173),
+ GPIO_FN(IRQ7_PORT120), GPIO_FN(IRQ7_PORT209),
+ GPIO_FN(IRQ8),
+ GPIO_FN(IRQ9_PORT118), GPIO_FN(IRQ9_PORT210),
+ GPIO_FN(IRQ10),
+ GPIO_FN(IRQ11),
+ GPIO_FN(IRQ12_PORT42), GPIO_FN(IRQ12_PORT97),
+ GPIO_FN(IRQ13_PORT64), GPIO_FN(IRQ13_PORT98),
+ GPIO_FN(IRQ14_PORT63), GPIO_FN(IRQ14_PORT99),
+ GPIO_FN(IRQ15_PORT62), GPIO_FN(IRQ15_PORT100),
+ GPIO_FN(IRQ16_PORT68), GPIO_FN(IRQ16_PORT211),
+ GPIO_FN(IRQ17),
+ GPIO_FN(IRQ18),
+ GPIO_FN(IRQ19),
+ GPIO_FN(IRQ20),
+ GPIO_FN(IRQ21),
+ GPIO_FN(IRQ22),
+ GPIO_FN(IRQ23),
+ GPIO_FN(IRQ24),
+ GPIO_FN(IRQ25),
+ GPIO_FN(IRQ26_PORT58), GPIO_FN(IRQ26_PORT81),
+ GPIO_FN(IRQ27_PORT57), GPIO_FN(IRQ27_PORT168),
+ GPIO_FN(IRQ28_PORT56), GPIO_FN(IRQ28_PORT169),
+ GPIO_FN(IRQ29_PORT50), GPIO_FN(IRQ29_PORT170),
+ GPIO_FN(IRQ30_PORT49), GPIO_FN(IRQ30_PORT171),
+ GPIO_FN(IRQ31_PORT41), GPIO_FN(IRQ31_PORT167),
+
+ /* Function */
+
+ /* DBGT */
+ GPIO_FN(DBGMDT2), GPIO_FN(DBGMDT1), GPIO_FN(DBGMDT0),
+ GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20),
+ GPIO_FN(DBGMD21),
+
+ /* FSI-A */
+ GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */
+ GPIO_FN(FSIAISLD_PORT5),
+ GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */
+ GPIO_FN(FSIASPDIF_PORT18),
+ GPIO_FN(FSIAOSLD1), GPIO_FN(FSIAOSLD2), GPIO_FN(FSIAOLR),
+ GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT),
+
+ /* FSI-B */
+ GPIO_FN(FSIBCK),
+
+ /* FMSI */
+ GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */
+ GPIO_FN(FMSISLD_PORT6),
+ GPIO_FN(FMSIILR), GPIO_FN(FMSIIBT), GPIO_FN(FMSIOLR),
+ GPIO_FN(FMSIOBT), GPIO_FN(FMSICK), GPIO_FN(FMSOILR),
+ GPIO_FN(FMSOIBT), GPIO_FN(FMSOOLR), GPIO_FN(FMSOOBT),
+ GPIO_FN(FMSOSLD), GPIO_FN(FMSOCK),
+
+ /* SCIFA0 */
+ GPIO_FN(SCIFA0_SCK), GPIO_FN(SCIFA0_CTS), GPIO_FN(SCIFA0_RTS),
+ GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_TXD),
+
+ /* SCIFA1 */
+ GPIO_FN(SCIFA1_CTS), GPIO_FN(SCIFA1_SCK),
+ GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RTS),
+
+ /* SCIFA2 */
+ GPIO_FN(SCIFA2_SCK_PORT22), /* SCIFA2_SCK Port 22/199 */
+ GPIO_FN(SCIFA2_SCK_PORT199),
+ GPIO_FN(SCIFA2_RXD), GPIO_FN(SCIFA2_TXD),
+ GPIO_FN(SCIFA2_CTS), GPIO_FN(SCIFA2_RTS),
+
+ /* SCIFA3 */
+ GPIO_FN(SCIFA3_RTS_PORT105), /* MSEL5CR_8_0 */
+ GPIO_FN(SCIFA3_SCK_PORT116),
+ GPIO_FN(SCIFA3_CTS_PORT117),
+ GPIO_FN(SCIFA3_RXD_PORT174),
+ GPIO_FN(SCIFA3_TXD_PORT175),
+
+ GPIO_FN(SCIFA3_RTS_PORT161), /* MSEL5CR_8_1 */
+ GPIO_FN(SCIFA3_SCK_PORT158),
+ GPIO_FN(SCIFA3_CTS_PORT162),
+ GPIO_FN(SCIFA3_RXD_PORT159),
+ GPIO_FN(SCIFA3_TXD_PORT160),
+
+ /* SCIFA4 */
+ GPIO_FN(SCIFA4_RXD_PORT12), /* MSEL5CR[12:11] = 00 */
+ GPIO_FN(SCIFA4_TXD_PORT13),
+
+ GPIO_FN(SCIFA4_RXD_PORT204), /* MSEL5CR[12:11] = 01 */
+ GPIO_FN(SCIFA4_TXD_PORT203),
+
+ GPIO_FN(SCIFA4_RXD_PORT94), /* MSEL5CR[12:11] = 10 */
+ GPIO_FN(SCIFA4_TXD_PORT93),
+
+ GPIO_FN(SCIFA4_SCK_PORT21), /* SCIFA4_SCK Port 21/205 */
+ GPIO_FN(SCIFA4_SCK_PORT205),
+
+ /* SCIFA5 */
+ GPIO_FN(SCIFA5_TXD_PORT20), /* MSEL5CR[15:14] = 00 */
+ GPIO_FN(SCIFA5_RXD_PORT10),
+
+ GPIO_FN(SCIFA5_RXD_PORT207), /* MSEL5CR[15:14] = 01 */
+ GPIO_FN(SCIFA5_TXD_PORT208),
+
+ GPIO_FN(SCIFA5_TXD_PORT91), /* MSEL5CR[15:14] = 10 */
+ GPIO_FN(SCIFA5_RXD_PORT92),
+
+ GPIO_FN(SCIFA5_SCK_PORT23), /* SCIFA5_SCK Port 23/206 */
+ GPIO_FN(SCIFA5_SCK_PORT206),
+
+ /* SCIFA6 */
+ GPIO_FN(SCIFA6_SCK), GPIO_FN(SCIFA6_RXD), GPIO_FN(SCIFA6_TXD),
+
+ /* SCIFA7 */
+ GPIO_FN(SCIFA7_TXD), GPIO_FN(SCIFA7_RXD),
+
+ /* SCIFAB */
+ GPIO_FN(SCIFB_SCK_PORT190), /* MSEL5CR_17_0 */
+ GPIO_FN(SCIFB_RXD_PORT191),
+ GPIO_FN(SCIFB_TXD_PORT192),
+ GPIO_FN(SCIFB_RTS_PORT186),
+ GPIO_FN(SCIFB_CTS_PORT187),
+
+ GPIO_FN(SCIFB_SCK_PORT2), /* MSEL5CR_17_1 */
+ GPIO_FN(SCIFB_RXD_PORT3),
+ GPIO_FN(SCIFB_TXD_PORT4),
+ GPIO_FN(SCIFB_RTS_PORT172),
+ GPIO_FN(SCIFB_CTS_PORT173),
+
+ /* LCD0 */
+ GPIO_FN(LCD0_D0), GPIO_FN(LCD0_D1), GPIO_FN(LCD0_D2),
+ GPIO_FN(LCD0_D3), GPIO_FN(LCD0_D4), GPIO_FN(LCD0_D5),
+ GPIO_FN(LCD0_D6), GPIO_FN(LCD0_D7), GPIO_FN(LCD0_D8),
+ GPIO_FN(LCD0_D9), GPIO_FN(LCD0_D10), GPIO_FN(LCD0_D11),
+ GPIO_FN(LCD0_D12), GPIO_FN(LCD0_D13), GPIO_FN(LCD0_D14),
+ GPIO_FN(LCD0_D15), GPIO_FN(LCD0_D16), GPIO_FN(LCD0_D17),
+ GPIO_FN(LCD0_DON), GPIO_FN(LCD0_VCPWC), GPIO_FN(LCD0_VEPWC),
+ GPIO_FN(LCD0_DCK), GPIO_FN(LCD0_VSYN),
+ GPIO_FN(LCD0_HSYN), GPIO_FN(LCD0_DISP),
+ GPIO_FN(LCD0_WR), GPIO_FN(LCD0_RD),
+ GPIO_FN(LCD0_CS), GPIO_FN(LCD0_RS),
+
+ GPIO_FN(LCD0_D18_PORT163), GPIO_FN(LCD0_D19_PORT162),
+ GPIO_FN(LCD0_D20_PORT161), GPIO_FN(LCD0_D21_PORT158),
+ GPIO_FN(LCD0_D22_PORT160), GPIO_FN(LCD0_D23_PORT159),
+ GPIO_FN(LCD0_LCLK_PORT165), /* MSEL5CR_6_1 */
+
+ GPIO_FN(LCD0_D18_PORT40), GPIO_FN(LCD0_D19_PORT4),
+ GPIO_FN(LCD0_D20_PORT3), GPIO_FN(LCD0_D21_PORT2),
+ GPIO_FN(LCD0_D22_PORT0), GPIO_FN(LCD0_D23_PORT1),
+ GPIO_FN(LCD0_LCLK_PORT102), /* MSEL5CR_6_0 */
+
+ /* LCD1 */
+ GPIO_FN(LCD1_D0), GPIO_FN(LCD1_D1), GPIO_FN(LCD1_D2),
+ GPIO_FN(LCD1_D3), GPIO_FN(LCD1_D4), GPIO_FN(LCD1_D5),
+ GPIO_FN(LCD1_D6), GPIO_FN(LCD1_D7), GPIO_FN(LCD1_D8),
+ GPIO_FN(LCD1_D9), GPIO_FN(LCD1_D10), GPIO_FN(LCD1_D11),
+ GPIO_FN(LCD1_D12), GPIO_FN(LCD1_D13), GPIO_FN(LCD1_D14),
+ GPIO_FN(LCD1_D15), GPIO_FN(LCD1_D16), GPIO_FN(LCD1_D17),
+ GPIO_FN(LCD1_D18), GPIO_FN(LCD1_D19), GPIO_FN(LCD1_D20),
+ GPIO_FN(LCD1_D21), GPIO_FN(LCD1_D22), GPIO_FN(LCD1_D23),
+ GPIO_FN(LCD1_RS), GPIO_FN(LCD1_RD), GPIO_FN(LCD1_CS),
+ GPIO_FN(LCD1_WR), GPIO_FN(LCD1_DCK), GPIO_FN(LCD1_DON),
+ GPIO_FN(LCD1_VCPWC), GPIO_FN(LCD1_LCLK), GPIO_FN(LCD1_HSYN),
+ GPIO_FN(LCD1_VSYN), GPIO_FN(LCD1_VEPWC), GPIO_FN(LCD1_DISP),
+
+ /* RSPI */
+ GPIO_FN(RSPI_SSL0_A), GPIO_FN(RSPI_SSL1_A), GPIO_FN(RSPI_SSL2_A),
+ GPIO_FN(RSPI_SSL3_A), GPIO_FN(RSPI_CK_A), GPIO_FN(RSPI_MOSI_A),
+ GPIO_FN(RSPI_MISO_A),
+
+ /* VIO CKO */
+ GPIO_FN(VIO_CKO1),
+ GPIO_FN(VIO_CKO2),
+ GPIO_FN(VIO_CKO_1),
+ GPIO_FN(VIO_CKO),
+
+ /* VIO0 */
+ GPIO_FN(VIO0_D0), GPIO_FN(VIO0_D1), GPIO_FN(VIO0_D2),
+ GPIO_FN(VIO0_D3), GPIO_FN(VIO0_D4), GPIO_FN(VIO0_D5),
+ GPIO_FN(VIO0_D6), GPIO_FN(VIO0_D7), GPIO_FN(VIO0_D8),
+ GPIO_FN(VIO0_D9), GPIO_FN(VIO0_D10), GPIO_FN(VIO0_D11),
+ GPIO_FN(VIO0_D12), GPIO_FN(VIO0_VD), GPIO_FN(VIO0_HD),
+ GPIO_FN(VIO0_CLK), GPIO_FN(VIO0_FIELD),
+
+ GPIO_FN(VIO0_D13_PORT26), /* MSEL5CR_27_0 */
+ GPIO_FN(VIO0_D14_PORT25),
+ GPIO_FN(VIO0_D15_PORT24),
+
+ GPIO_FN(VIO0_D13_PORT22), /* MSEL5CR_27_1 */
+ GPIO_FN(VIO0_D14_PORT95),
+ GPIO_FN(VIO0_D15_PORT96),
+
+ /* VIO1 */
+ GPIO_FN(VIO1_D0), GPIO_FN(VIO1_D1), GPIO_FN(VIO1_D2),
+ GPIO_FN(VIO1_D3), GPIO_FN(VIO1_D4), GPIO_FN(VIO1_D5),
+ GPIO_FN(VIO1_D6), GPIO_FN(VIO1_D7), GPIO_FN(VIO1_VD),
+ GPIO_FN(VIO1_HD), GPIO_FN(VIO1_CLK), GPIO_FN(VIO1_FIELD),
+
+ /* TPU0 */
+ GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO3),
+ GPIO_FN(TPU0TO2_PORT66), /* TPU0TO2 Port 66/202 */
+ GPIO_FN(TPU0TO2_PORT202),
+
+ /* SSP1 0 */
+ GPIO_FN(STP0_IPD0), GPIO_FN(STP0_IPD1), GPIO_FN(STP0_IPD2),
+ GPIO_FN(STP0_IPD3), GPIO_FN(STP0_IPD4), GPIO_FN(STP0_IPD5),
+ GPIO_FN(STP0_IPD6), GPIO_FN(STP0_IPD7), GPIO_FN(STP0_IPEN),
+ GPIO_FN(STP0_IPCLK), GPIO_FN(STP0_IPSYNC),
+
+ /* SSP1 1 */
+ GPIO_FN(STP1_IPD1), GPIO_FN(STP1_IPD2), GPIO_FN(STP1_IPD3),
+ GPIO_FN(STP1_IPD4), GPIO_FN(STP1_IPD5), GPIO_FN(STP1_IPD6),
+ GPIO_FN(STP1_IPD7), GPIO_FN(STP1_IPCLK), GPIO_FN(STP1_IPSYNC),
+
+ GPIO_FN(STP1_IPD0_PORT186), /* MSEL5CR_23_0 */
+ GPIO_FN(STP1_IPEN_PORT187),
+
+ GPIO_FN(STP1_IPD0_PORT194), /* MSEL5CR_23_1 */
+ GPIO_FN(STP1_IPEN_PORT193),
+
+ /* SIM */
+ GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK),
+ GPIO_FN(SIM_D_PORT22), /* SIM_D Port 22/199 */
+ GPIO_FN(SIM_D_PORT199),
+
+ /* SDHI0 */
+ GPIO_FN(SDHI0_D0), GPIO_FN(SDHI0_D1), GPIO_FN(SDHI0_D2),
+ GPIO_FN(SDHI0_D3), GPIO_FN(SDHI0_CD), GPIO_FN(SDHI0_WP),
+ GPIO_FN(SDHI0_CMD), GPIO_FN(SDHI0_CLK),
+
+ /* SDHI1 */
+ GPIO_FN(SDHI1_D0), GPIO_FN(SDHI1_D1), GPIO_FN(SDHI1_D2),
+ GPIO_FN(SDHI1_D3), GPIO_FN(SDHI1_CD), GPIO_FN(SDHI1_WP),
+ GPIO_FN(SDHI1_CMD), GPIO_FN(SDHI1_CLK),
+
+ /* SDHI2 */
+ GPIO_FN(SDHI2_D0), GPIO_FN(SDHI2_D1), GPIO_FN(SDHI2_D2),
+ GPIO_FN(SDHI2_D3), GPIO_FN(SDHI2_CLK), GPIO_FN(SDHI2_CMD),
+
+ GPIO_FN(SDHI2_CD_PORT24), /* MSEL5CR_19_0 */
+ GPIO_FN(SDHI2_WP_PORT25),
+
+ GPIO_FN(SDHI2_WP_PORT177), /* MSEL5CR_19_1 */
+ GPIO_FN(SDHI2_CD_PORT202),
+
+ /* MSIOF2 */
+ GPIO_FN(MSIOF2_TXD), GPIO_FN(MSIOF2_RXD), GPIO_FN(MSIOF2_TSCK),
+ GPIO_FN(MSIOF2_SS2), GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_SS1),
+ GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_MCK0), GPIO_FN(MSIOF2_RSYNC),
+ GPIO_FN(MSIOF2_RSCK),
+
+ /* KEYSC */
+ GPIO_FN(KEYIN4), GPIO_FN(KEYIN5),
+ GPIO_FN(KEYIN6), GPIO_FN(KEYIN7),
+ GPIO_FN(KEYOUT0), GPIO_FN(KEYOUT1), GPIO_FN(KEYOUT2),
+ GPIO_FN(KEYOUT3), GPIO_FN(KEYOUT4), GPIO_FN(KEYOUT5),
+ GPIO_FN(KEYOUT6), GPIO_FN(KEYOUT7),
+
+ GPIO_FN(KEYIN0_PORT43), /* MSEL4CR_18_0 */
+ GPIO_FN(KEYIN1_PORT44),
+ GPIO_FN(KEYIN2_PORT45),
+ GPIO_FN(KEYIN3_PORT46),
+
+ GPIO_FN(KEYIN0_PORT58), /* MSEL4CR_18_1 */
+ GPIO_FN(KEYIN1_PORT57),
+ GPIO_FN(KEYIN2_PORT56),
+ GPIO_FN(KEYIN3_PORT55),
+
+ /* VOU */
+ GPIO_FN(DV_D0), GPIO_FN(DV_D1), GPIO_FN(DV_D2),
+ GPIO_FN(DV_D3), GPIO_FN(DV_D4), GPIO_FN(DV_D5),
+ GPIO_FN(DV_D6), GPIO_FN(DV_D7), GPIO_FN(DV_D8),
+ GPIO_FN(DV_D9), GPIO_FN(DV_D10), GPIO_FN(DV_D11),
+ GPIO_FN(DV_D12), GPIO_FN(DV_D13), GPIO_FN(DV_D14),
+ GPIO_FN(DV_D15), GPIO_FN(DV_CLK),
+ GPIO_FN(DV_VSYNC), GPIO_FN(DV_HSYNC),
+
+ /* MEMC */
+ GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2),
+ GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5),
+ GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8),
+ GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11),
+ GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14),
+ GPIO_FN(MEMC_AD15), GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_INT),
+ GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_NOE), GPIO_FN(MEMC_CS1),
+ GPIO_FN(MEMC_A1), GPIO_FN(MEMC_ADV), GPIO_FN(MEMC_DREQ0),
+ GPIO_FN(MEMC_WAIT), GPIO_FN(MEMC_DREQ1), GPIO_FN(MEMC_BUSCLK),
+ GPIO_FN(MEMC_A0),
+
+ /* MMC */
+ GPIO_FN(MMC0_D0_PORT68), GPIO_FN(MMC0_D1_PORT69),
+ GPIO_FN(MMC0_D2_PORT70), GPIO_FN(MMC0_D3_PORT71),
+ GPIO_FN(MMC0_D4_PORT72), GPIO_FN(MMC0_D5_PORT73),
+ GPIO_FN(MMC0_D6_PORT74), GPIO_FN(MMC0_D7_PORT75),
+ GPIO_FN(MMC0_CLK_PORT66),
+ GPIO_FN(MMC0_CMD_PORT67), /* MSEL4CR_15_0 */
+
+ GPIO_FN(MMC1_D0_PORT149), GPIO_FN(MMC1_D1_PORT148),
+ GPIO_FN(MMC1_D2_PORT147), GPIO_FN(MMC1_D3_PORT146),
+ GPIO_FN(MMC1_D4_PORT145), GPIO_FN(MMC1_D5_PORT144),
+ GPIO_FN(MMC1_D6_PORT143), GPIO_FN(MMC1_D7_PORT142),
+ GPIO_FN(MMC1_CLK_PORT103),
+ GPIO_FN(MMC1_CMD_PORT104), /* MSEL4CR_15_1 */
+
+ /* MSIOF0 */
+ GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2), GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(MSIOF0_TXD), GPIO_FN(MSIOF0_MCK0), GPIO_FN(MSIOF0_MCK1),
+ GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(MSIOF0_TSYNC),
+
+ /* MSIOF1 */
+ GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC),
+ GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1),
+
+ GPIO_FN(MSIOF1_SS2_PORT116), GPIO_FN(MSIOF1_SS1_PORT117),
+ GPIO_FN(MSIOF1_RXD_PORT118), GPIO_FN(MSIOF1_TXD_PORT119),
+ GPIO_FN(MSIOF1_TSYNC_PORT120),
+ GPIO_FN(MSIOF1_TSCK_PORT121), /* MSEL4CR_10_0 */
+
+ GPIO_FN(MSIOF1_SS1_PORT67), GPIO_FN(MSIOF1_TSCK_PORT72),
+ GPIO_FN(MSIOF1_TSYNC_PORT73), GPIO_FN(MSIOF1_TXD_PORT74),
+ GPIO_FN(MSIOF1_RXD_PORT75),
+ GPIO_FN(MSIOF1_SS2_PORT202), /* MSEL4CR_10_1 */
+
+ /* GPIO */
+ GPIO_FN(GPO0), GPIO_FN(GPI0),
+ GPIO_FN(GPO1), GPIO_FN(GPI1),
+
+ /* USB0 */
+ GPIO_FN(USB0_OCI), GPIO_FN(USB0_PPON), GPIO_FN(VBUS),
+
+ /* USB1 */
+ GPIO_FN(USB1_OCI), GPIO_FN(USB1_PPON),
+
+ /* BBIF1 */
+ GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_TSYNC),
+ GPIO_FN(BBIF1_TSCK), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
+ GPIO_FN(BBIF1_FLOW), GPIO_FN(BBIF1_RX_FLOW_N),
+
+ /* BBIF2 */
+ GPIO_FN(BBIF2_TXD2_PORT5), /* MSEL5CR_0_0 */
+ GPIO_FN(BBIF2_RXD2_PORT60),
+ GPIO_FN(BBIF2_TSYNC2_PORT6),
+ GPIO_FN(BBIF2_TSCK2_PORT59),
+
+ GPIO_FN(BBIF2_RXD2_PORT90), /* MSEL5CR_0_1 */
+ GPIO_FN(BBIF2_TXD2_PORT183),
+ GPIO_FN(BBIF2_TSCK2_PORT89),
+ GPIO_FN(BBIF2_TSYNC2_PORT184),
+
+ /* BSC / FLCTL / PCMCIA */
+ GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4),
+ GPIO_FN(CS5B), GPIO_FN(CS6A),
+ GPIO_FN(CS5A_PORT105), /* CS5A PORT 19/105 */
+ GPIO_FN(CS5A_PORT19),
+ GPIO_FN(IOIS16), /* ? */
+
+ GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2), GPIO_FN(A3),
+ GPIO_FN(A4_FOE), GPIO_FN(A5_FCDE), /* share with FLCTL */
+ GPIO_FN(A6), GPIO_FN(A7), GPIO_FN(A8), GPIO_FN(A9),
+ GPIO_FN(A10), GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13),
+ GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16), GPIO_FN(A17),
+ GPIO_FN(A18), GPIO_FN(A19), GPIO_FN(A20), GPIO_FN(A21),
+ GPIO_FN(A22), GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25),
+ GPIO_FN(A26),
+
+ GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1), /* share with FLCTL */
+ GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), /* share with FLCTL */
+ GPIO_FN(D4_NAF4), GPIO_FN(D5_NAF5), /* share with FLCTL */
+ GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7), /* share with FLCTL */
+ GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), /* share with FLCTL */
+ GPIO_FN(D10_NAF10), GPIO_FN(D11_NAF11), /* share with FLCTL */
+ GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13), /* share with FLCTL */
+ GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15), /* share with FLCTL */
+ GPIO_FN(D16), GPIO_FN(D17), GPIO_FN(D18), GPIO_FN(D19),
+ GPIO_FN(D20), GPIO_FN(D21), GPIO_FN(D22), GPIO_FN(D23),
+ GPIO_FN(D24), GPIO_FN(D25), GPIO_FN(D26), GPIO_FN(D27),
+ GPIO_FN(D28), GPIO_FN(D29), GPIO_FN(D30), GPIO_FN(D31),
+
+ GPIO_FN(WE0_FWE), /* share with FLCTL */
+ GPIO_FN(WE1),
+ GPIO_FN(WE2_ICIORD), /* share with PCMCIA */
+ GPIO_FN(WE3_ICIOWR), /* share with PCMCIA */
+ GPIO_FN(CKO), GPIO_FN(BS), GPIO_FN(RDWR),
+ GPIO_FN(RD_FSC), /* share with FLCTL */
+ GPIO_FN(WAIT_PORT177), /* WAIT Port 90/177 */
+ GPIO_FN(WAIT_PORT90),
+
+ GPIO_FN(FCE0), GPIO_FN(FCE1), GPIO_FN(FRB), /* FLCTL */
+
+ /* IRDA */
+ GPIO_FN(IRDA_FIRSEL), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_OUT),
+
+ /* ATAPI */
+ GPIO_FN(IDE_D0), GPIO_FN(IDE_D1), GPIO_FN(IDE_D2),
+ GPIO_FN(IDE_D3), GPIO_FN(IDE_D4), GPIO_FN(IDE_D5),
+ GPIO_FN(IDE_D6), GPIO_FN(IDE_D7), GPIO_FN(IDE_D8),
+ GPIO_FN(IDE_D9), GPIO_FN(IDE_D10), GPIO_FN(IDE_D11),
+ GPIO_FN(IDE_D12), GPIO_FN(IDE_D13), GPIO_FN(IDE_D14),
+ GPIO_FN(IDE_D15), GPIO_FN(IDE_A0), GPIO_FN(IDE_A1),
+ GPIO_FN(IDE_A2), GPIO_FN(IDE_CS0), GPIO_FN(IDE_CS1),
+ GPIO_FN(IDE_IOWR), GPIO_FN(IDE_IORD), GPIO_FN(IDE_IORDY),
+ GPIO_FN(IDE_INT), GPIO_FN(IDE_RST), GPIO_FN(IDE_DIRECTION),
+ GPIO_FN(IDE_EXBUF_ENB), GPIO_FN(IDE_IODACK), GPIO_FN(IDE_IODREQ),
+
+ /* RMII */
+ GPIO_FN(RMII_CRS_DV), GPIO_FN(RMII_RX_ER), GPIO_FN(RMII_RXD0),
+ GPIO_FN(RMII_RXD1), GPIO_FN(RMII_TX_EN), GPIO_FN(RMII_TXD0),
+ GPIO_FN(RMII_MDC), GPIO_FN(RMII_TXD1), GPIO_FN(RMII_MDIO),
+ GPIO_FN(RMII_REF50CK), GPIO_FN(RMII_REF125CK), /* for GMII */
+
+ /* GEther */
+ GPIO_FN(ET_TX_CLK), GPIO_FN(ET_TX_EN), GPIO_FN(ET_ETXD0),
+ GPIO_FN(ET_ETXD1), GPIO_FN(ET_ETXD2), GPIO_FN(ET_ETXD3),
+ GPIO_FN(ET_ETXD4), GPIO_FN(ET_ETXD5), /* for GEther */
+ GPIO_FN(ET_ETXD6), GPIO_FN(ET_ETXD7), /* for GEther */
+ GPIO_FN(ET_COL), GPIO_FN(ET_TX_ER), GPIO_FN(ET_RX_CLK),
+ GPIO_FN(ET_RX_DV), GPIO_FN(ET_ERXD0), GPIO_FN(ET_ERXD1),
+ GPIO_FN(ET_ERXD2), GPIO_FN(ET_ERXD3),
+ GPIO_FN(ET_ERXD4), GPIO_FN(ET_ERXD5), /* for GEther */
+ GPIO_FN(ET_ERXD6), GPIO_FN(ET_ERXD7), /* for GEther */
+ GPIO_FN(ET_RX_ER), GPIO_FN(ET_CRS), GPIO_FN(ET_MDC),
+ GPIO_FN(ET_MDIO), GPIO_FN(ET_LINK), GPIO_FN(ET_PHY_INT),
+ GPIO_FN(ET_WOL), GPIO_FN(ET_GTX_CLK),
+
+ /* DMA0 */
+ GPIO_FN(DREQ0), GPIO_FN(DACK0),
+
+ /* DMA1 */
+ GPIO_FN(DREQ1), GPIO_FN(DACK1),
+
+ /* SYSC */
+ GPIO_FN(RESETOUTS),
+
+ /* IRREM */
+ GPIO_FN(IROUT),
+
+ /* LCDC */
+ GPIO_FN(LCDC0_SELECT),
+ GPIO_FN(LCDC1_SELECT),
+
+ /* SDENC */
+ GPIO_FN(SDENC_CPG),
+ GPIO_FN(SDENC_DV_CLKI),
+
+ /* HDMI */
+ GPIO_FN(HDMI_HPD),
+ GPIO_FN(HDMI_CEC),
+
+ /* SYSC */
+ GPIO_FN(RESETP_PULLUP),
+ GPIO_FN(RESETP_PLAIN),
+
+ /* DEBUG */
+ GPIO_FN(EDEBGREQ_PULLDOWN),
+ GPIO_FN(EDEBGREQ_PULLUP),
+
+ GPIO_FN(TRACEAUD_FROM_VIO),
+ GPIO_FN(TRACEAUD_FROM_LCDC0),
+ GPIO_FN(TRACEAUD_FROM_MEMC),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000), /* PORT0CR */
+ PORTCR(1, 0xe6050001), /* PORT1CR */
+ PORTCR(2, 0xe6050002), /* PORT2CR */
+ PORTCR(3, 0xe6050003), /* PORT3CR */
+ PORTCR(4, 0xe6050004), /* PORT4CR */
+ PORTCR(5, 0xe6050005), /* PORT5CR */
+ PORTCR(6, 0xe6050006), /* PORT6CR */
+ PORTCR(7, 0xe6050007), /* PORT7CR */
+ PORTCR(8, 0xe6050008), /* PORT8CR */
+ PORTCR(9, 0xe6050009), /* PORT9CR */
+ PORTCR(10, 0xe605000a), /* PORT10CR */
+ PORTCR(11, 0xe605000b), /* PORT11CR */
+ PORTCR(12, 0xe605000c), /* PORT12CR */
+ PORTCR(13, 0xe605000d), /* PORT13CR */
+ PORTCR(14, 0xe605000e), /* PORT14CR */
+ PORTCR(15, 0xe605000f), /* PORT15CR */
+ PORTCR(16, 0xe6050010), /* PORT16CR */
+ PORTCR(17, 0xe6050011), /* PORT17CR */
+ PORTCR(18, 0xe6050012), /* PORT18CR */
+ PORTCR(19, 0xe6050013), /* PORT19CR */
+ PORTCR(20, 0xe6050014), /* PORT20CR */
+ PORTCR(21, 0xe6050015), /* PORT21CR */
+ PORTCR(22, 0xe6050016), /* PORT22CR */
+ PORTCR(23, 0xe6050017), /* PORT23CR */
+ PORTCR(24, 0xe6050018), /* PORT24CR */
+ PORTCR(25, 0xe6050019), /* PORT25CR */
+ PORTCR(26, 0xe605001a), /* PORT26CR */
+ PORTCR(27, 0xe605001b), /* PORT27CR */
+ PORTCR(28, 0xe605001c), /* PORT28CR */
+ PORTCR(29, 0xe605001d), /* PORT29CR */
+ PORTCR(30, 0xe605001e), /* PORT30CR */
+ PORTCR(31, 0xe605001f), /* PORT31CR */
+ PORTCR(32, 0xe6050020), /* PORT32CR */
+ PORTCR(33, 0xe6050021), /* PORT33CR */
+ PORTCR(34, 0xe6050022), /* PORT34CR */
+ PORTCR(35, 0xe6050023), /* PORT35CR */
+ PORTCR(36, 0xe6050024), /* PORT36CR */
+ PORTCR(37, 0xe6050025), /* PORT37CR */
+ PORTCR(38, 0xe6050026), /* PORT38CR */
+ PORTCR(39, 0xe6050027), /* PORT39CR */
+ PORTCR(40, 0xe6050028), /* PORT40CR */
+ PORTCR(41, 0xe6050029), /* PORT41CR */
+ PORTCR(42, 0xe605002a), /* PORT42CR */
+ PORTCR(43, 0xe605002b), /* PORT43CR */
+ PORTCR(44, 0xe605002c), /* PORT44CR */
+ PORTCR(45, 0xe605002d), /* PORT45CR */
+ PORTCR(46, 0xe605002e), /* PORT46CR */
+ PORTCR(47, 0xe605002f), /* PORT47CR */
+ PORTCR(48, 0xe6050030), /* PORT48CR */
+ PORTCR(49, 0xe6050031), /* PORT49CR */
+ PORTCR(50, 0xe6050032), /* PORT50CR */
+ PORTCR(51, 0xe6050033), /* PORT51CR */
+ PORTCR(52, 0xe6050034), /* PORT52CR */
+ PORTCR(53, 0xe6050035), /* PORT53CR */
+ PORTCR(54, 0xe6050036), /* PORT54CR */
+ PORTCR(55, 0xe6050037), /* PORT55CR */
+ PORTCR(56, 0xe6050038), /* PORT56CR */
+ PORTCR(57, 0xe6050039), /* PORT57CR */
+ PORTCR(58, 0xe605003a), /* PORT58CR */
+ PORTCR(59, 0xe605003b), /* PORT59CR */
+ PORTCR(60, 0xe605003c), /* PORT60CR */
+ PORTCR(61, 0xe605003d), /* PORT61CR */
+ PORTCR(62, 0xe605003e), /* PORT62CR */
+ PORTCR(63, 0xe605003f), /* PORT63CR */
+ PORTCR(64, 0xe6050040), /* PORT64CR */
+ PORTCR(65, 0xe6050041), /* PORT65CR */
+ PORTCR(66, 0xe6050042), /* PORT66CR */
+ PORTCR(67, 0xe6050043), /* PORT67CR */
+ PORTCR(68, 0xe6050044), /* PORT68CR */
+ PORTCR(69, 0xe6050045), /* PORT69CR */
+ PORTCR(70, 0xe6050046), /* PORT70CR */
+ PORTCR(71, 0xe6050047), /* PORT71CR */
+ PORTCR(72, 0xe6050048), /* PORT72CR */
+ PORTCR(73, 0xe6050049), /* PORT73CR */
+ PORTCR(74, 0xe605004a), /* PORT74CR */
+ PORTCR(75, 0xe605004b), /* PORT75CR */
+ PORTCR(76, 0xe605004c), /* PORT76CR */
+ PORTCR(77, 0xe605004d), /* PORT77CR */
+ PORTCR(78, 0xe605004e), /* PORT78CR */
+ PORTCR(79, 0xe605004f), /* PORT79CR */
+ PORTCR(80, 0xe6050050), /* PORT80CR */
+ PORTCR(81, 0xe6050051), /* PORT81CR */
+ PORTCR(82, 0xe6050052), /* PORT82CR */
+ PORTCR(83, 0xe6050053), /* PORT83CR */
+
+ PORTCR(84, 0xe6051054), /* PORT84CR */
+ PORTCR(85, 0xe6051055), /* PORT85CR */
+ PORTCR(86, 0xe6051056), /* PORT86CR */
+ PORTCR(87, 0xe6051057), /* PORT87CR */
+ PORTCR(88, 0xe6051058), /* PORT88CR */
+ PORTCR(89, 0xe6051059), /* PORT89CR */
+ PORTCR(90, 0xe605105a), /* PORT90CR */
+ PORTCR(91, 0xe605105b), /* PORT91CR */
+ PORTCR(92, 0xe605105c), /* PORT92CR */
+ PORTCR(93, 0xe605105d), /* PORT93CR */
+ PORTCR(94, 0xe605105e), /* PORT94CR */
+ PORTCR(95, 0xe605105f), /* PORT95CR */
+ PORTCR(96, 0xe6051060), /* PORT96CR */
+ PORTCR(97, 0xe6051061), /* PORT97CR */
+ PORTCR(98, 0xe6051062), /* PORT98CR */
+ PORTCR(99, 0xe6051063), /* PORT99CR */
+ PORTCR(100, 0xe6051064), /* PORT100CR */
+ PORTCR(101, 0xe6051065), /* PORT101CR */
+ PORTCR(102, 0xe6051066), /* PORT102CR */
+ PORTCR(103, 0xe6051067), /* PORT103CR */
+ PORTCR(104, 0xe6051068), /* PORT104CR */
+ PORTCR(105, 0xe6051069), /* PORT105CR */
+ PORTCR(106, 0xe605106a), /* PORT106CR */
+ PORTCR(107, 0xe605106b), /* PORT107CR */
+ PORTCR(108, 0xe605106c), /* PORT108CR */
+ PORTCR(109, 0xe605106d), /* PORT109CR */
+ PORTCR(110, 0xe605106e), /* PORT110CR */
+ PORTCR(111, 0xe605106f), /* PORT111CR */
+ PORTCR(112, 0xe6051070), /* PORT112CR */
+ PORTCR(113, 0xe6051071), /* PORT113CR */
+ PORTCR(114, 0xe6051072), /* PORT114CR */
+
+ PORTCR(115, 0xe6052073), /* PORT115CR */
+ PORTCR(116, 0xe6052074), /* PORT116CR */
+ PORTCR(117, 0xe6052075), /* PORT117CR */
+ PORTCR(118, 0xe6052076), /* PORT118CR */
+ PORTCR(119, 0xe6052077), /* PORT119CR */
+ PORTCR(120, 0xe6052078), /* PORT120CR */
+ PORTCR(121, 0xe6052079), /* PORT121CR */
+ PORTCR(122, 0xe605207a), /* PORT122CR */
+ PORTCR(123, 0xe605207b), /* PORT123CR */
+ PORTCR(124, 0xe605207c), /* PORT124CR */
+ PORTCR(125, 0xe605207d), /* PORT125CR */
+ PORTCR(126, 0xe605207e), /* PORT126CR */
+ PORTCR(127, 0xe605207f), /* PORT127CR */
+ PORTCR(128, 0xe6052080), /* PORT128CR */
+ PORTCR(129, 0xe6052081), /* PORT129CR */
+ PORTCR(130, 0xe6052082), /* PORT130CR */
+ PORTCR(131, 0xe6052083), /* PORT131CR */
+ PORTCR(132, 0xe6052084), /* PORT132CR */
+ PORTCR(133, 0xe6052085), /* PORT133CR */
+ PORTCR(134, 0xe6052086), /* PORT134CR */
+ PORTCR(135, 0xe6052087), /* PORT135CR */
+ PORTCR(136, 0xe6052088), /* PORT136CR */
+ PORTCR(137, 0xe6052089), /* PORT137CR */
+ PORTCR(138, 0xe605208a), /* PORT138CR */
+ PORTCR(139, 0xe605208b), /* PORT139CR */
+ PORTCR(140, 0xe605208c), /* PORT140CR */
+ PORTCR(141, 0xe605208d), /* PORT141CR */
+ PORTCR(142, 0xe605208e), /* PORT142CR */
+ PORTCR(143, 0xe605208f), /* PORT143CR */
+ PORTCR(144, 0xe6052090), /* PORT144CR */
+ PORTCR(145, 0xe6052091), /* PORT145CR */
+ PORTCR(146, 0xe6052092), /* PORT146CR */
+ PORTCR(147, 0xe6052093), /* PORT147CR */
+ PORTCR(148, 0xe6052094), /* PORT148CR */
+ PORTCR(149, 0xe6052095), /* PORT149CR */
+ PORTCR(150, 0xe6052096), /* PORT150CR */
+ PORTCR(151, 0xe6052097), /* PORT151CR */
+ PORTCR(152, 0xe6052098), /* PORT152CR */
+ PORTCR(153, 0xe6052099), /* PORT153CR */
+ PORTCR(154, 0xe605209a), /* PORT154CR */
+ PORTCR(155, 0xe605209b), /* PORT155CR */
+ PORTCR(156, 0xe605209c), /* PORT156CR */
+ PORTCR(157, 0xe605209d), /* PORT157CR */
+ PORTCR(158, 0xe605209e), /* PORT158CR */
+ PORTCR(159, 0xe605209f), /* PORT159CR */
+ PORTCR(160, 0xe60520a0), /* PORT160CR */
+ PORTCR(161, 0xe60520a1), /* PORT161CR */
+ PORTCR(162, 0xe60520a2), /* PORT162CR */
+ PORTCR(163, 0xe60520a3), /* PORT163CR */
+ PORTCR(164, 0xe60520a4), /* PORT164CR */
+ PORTCR(165, 0xe60520a5), /* PORT165CR */
+ PORTCR(166, 0xe60520a6), /* PORT166CR */
+ PORTCR(167, 0xe60520a7), /* PORT167CR */
+ PORTCR(168, 0xe60520a8), /* PORT168CR */
+ PORTCR(169, 0xe60520a9), /* PORT169CR */
+ PORTCR(170, 0xe60520aa), /* PORT170CR */
+ PORTCR(171, 0xe60520ab), /* PORT171CR */
+ PORTCR(172, 0xe60520ac), /* PORT172CR */
+ PORTCR(173, 0xe60520ad), /* PORT173CR */
+ PORTCR(174, 0xe60520ae), /* PORT174CR */
+ PORTCR(175, 0xe60520af), /* PORT175CR */
+ PORTCR(176, 0xe60520b0), /* PORT176CR */
+ PORTCR(177, 0xe60520b1), /* PORT177CR */
+ PORTCR(178, 0xe60520b2), /* PORT178CR */
+ PORTCR(179, 0xe60520b3), /* PORT179CR */
+ PORTCR(180, 0xe60520b4), /* PORT180CR */
+ PORTCR(181, 0xe60520b5), /* PORT181CR */
+ PORTCR(182, 0xe60520b6), /* PORT182CR */
+ PORTCR(183, 0xe60520b7), /* PORT183CR */
+ PORTCR(184, 0xe60520b8), /* PORT184CR */
+ PORTCR(185, 0xe60520b9), /* PORT185CR */
+ PORTCR(186, 0xe60520ba), /* PORT186CR */
+ PORTCR(187, 0xe60520bb), /* PORT187CR */
+ PORTCR(188, 0xe60520bc), /* PORT188CR */
+ PORTCR(189, 0xe60520bd), /* PORT189CR */
+ PORTCR(190, 0xe60520be), /* PORT190CR */
+ PORTCR(191, 0xe60520bf), /* PORT191CR */
+ PORTCR(192, 0xe60520c0), /* PORT192CR */
+ PORTCR(193, 0xe60520c1), /* PORT193CR */
+ PORTCR(194, 0xe60520c2), /* PORT194CR */
+ PORTCR(195, 0xe60520c3), /* PORT195CR */
+ PORTCR(196, 0xe60520c4), /* PORT196CR */
+ PORTCR(197, 0xe60520c5), /* PORT197CR */
+ PORTCR(198, 0xe60520c6), /* PORT198CR */
+ PORTCR(199, 0xe60520c7), /* PORT199CR */
+ PORTCR(200, 0xe60520c8), /* PORT200CR */
+ PORTCR(201, 0xe60520c9), /* PORT201CR */
+ PORTCR(202, 0xe60520ca), /* PORT202CR */
+ PORTCR(203, 0xe60520cb), /* PORT203CR */
+ PORTCR(204, 0xe60520cc), /* PORT204CR */
+ PORTCR(205, 0xe60520cd), /* PORT205CR */
+ PORTCR(206, 0xe60520ce), /* PORT206CR */
+ PORTCR(207, 0xe60520cf), /* PORT207CR */
+ PORTCR(208, 0xe60520d0), /* PORT208CR */
+ PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+ PORTCR(210, 0xe60530d2), /* PORT210CR */
+ PORTCR(211, 0xe60530d3), /* PORT211CR */
+
+ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ 0, 0, 0, 0,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ 0, 0,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_5_0, MSEL1CR_5_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ 0, 0,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ 0, 0, 0, 0,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ 0, 0, 0, 0, 0, 0,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ 0, 0,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ 0, 0, 0, 0,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1) {
+ MSEL5CR_31_0, MSEL5CR_31_1,
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ 0, 0,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ 0, 0,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ 0, 0,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ 0, 0,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ 0, 0,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ 0, 0,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ 0, 0,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ 0, 0,
+ MSEL5CR_8_0, MSEL5CR_8_1,
+ MSEL5CR_7_0, MSEL5CR_7_1,
+ MSEL5CR_6_0, MSEL5CR_6_1,
+ MSEL5CR_5_0, MSEL5CR_5_1,
+ MSEL5CR_4_0, MSEL5CR_4_1,
+ MSEL5CR_3_0, MSEL5CR_3_1,
+ MSEL5CR_2_0, MSEL5CR_2_1,
+ 0, 0,
+ MSEL5CR_0_0, MSEL5CR_0_1,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ },
+ { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ },
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32) {
+ PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
+ PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
+ PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32) {
+ PORT191_DATA, PORT190_DATA, PORT189_DATA, PORT188_DATA,
+ PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
+ PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
+ PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
+ PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
+ PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
+ PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT211_DATA, PORT210_DATA, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { },
+};
+
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(evt2irq(0x0200), PORT2_FN0, PORT13_FN0), /* IRQ0A */
+ PINMUX_IRQ(evt2irq(0x0220), PORT20_FN0), /* IRQ1A */
+ PINMUX_IRQ(evt2irq(0x0240), PORT11_FN0, PORT12_FN0), /* IRQ2A */
+ PINMUX_IRQ(evt2irq(0x0260), PORT10_FN0, PORT14_FN0), /* IRQ3A */
+ PINMUX_IRQ(evt2irq(0x0280), PORT15_FN0, PORT172_FN0), /* IRQ4A */
+ PINMUX_IRQ(evt2irq(0x02A0), PORT0_FN0, PORT1_FN0), /* IRQ5A */
+ PINMUX_IRQ(evt2irq(0x02C0), PORT121_FN0, PORT173_FN0), /* IRQ6A */
+ PINMUX_IRQ(evt2irq(0x02E0), PORT120_FN0, PORT209_FN0), /* IRQ7A */
+ PINMUX_IRQ(evt2irq(0x0300), PORT119_FN0), /* IRQ8A */
+ PINMUX_IRQ(evt2irq(0x0320), PORT118_FN0, PORT210_FN0), /* IRQ9A */
+ PINMUX_IRQ(evt2irq(0x0340), PORT19_FN0), /* IRQ10A */
+ PINMUX_IRQ(evt2irq(0x0360), PORT104_FN0), /* IRQ11A */
+ PINMUX_IRQ(evt2irq(0x0380), PORT42_FN0, PORT97_FN0), /* IRQ12A */
+ PINMUX_IRQ(evt2irq(0x03A0), PORT64_FN0, PORT98_FN0), /* IRQ13A */
+ PINMUX_IRQ(evt2irq(0x03C0), PORT63_FN0, PORT99_FN0), /* IRQ14A */
+ PINMUX_IRQ(evt2irq(0x03E0), PORT62_FN0, PORT100_FN0), /* IRQ15A */
+ PINMUX_IRQ(evt2irq(0x3200), PORT68_FN0, PORT211_FN0), /* IRQ16A */
+ PINMUX_IRQ(evt2irq(0x3220), PORT69_FN0), /* IRQ17A */
+ PINMUX_IRQ(evt2irq(0x3240), PORT70_FN0), /* IRQ18A */
+ PINMUX_IRQ(evt2irq(0x3260), PORT71_FN0), /* IRQ19A */
+ PINMUX_IRQ(evt2irq(0x3280), PORT67_FN0), /* IRQ20A */
+ PINMUX_IRQ(evt2irq(0x32A0), PORT202_FN0), /* IRQ21A */
+ PINMUX_IRQ(evt2irq(0x32C0), PORT95_FN0), /* IRQ22A */
+ PINMUX_IRQ(evt2irq(0x32E0), PORT96_FN0), /* IRQ23A */
+ PINMUX_IRQ(evt2irq(0x3300), PORT180_FN0), /* IRQ24A */
+ PINMUX_IRQ(evt2irq(0x3320), PORT38_FN0), /* IRQ25A */
+ PINMUX_IRQ(evt2irq(0x3340), PORT58_FN0, PORT81_FN0), /* IRQ26A */
+ PINMUX_IRQ(evt2irq(0x3360), PORT57_FN0, PORT168_FN0), /* IRQ27A */
+ PINMUX_IRQ(evt2irq(0x3380), PORT56_FN0, PORT169_FN0), /* IRQ28A */
+ PINMUX_IRQ(evt2irq(0x33A0), PORT50_FN0, PORT170_FN0), /* IRQ29A */
+ PINMUX_IRQ(evt2irq(0x33C0), PORT49_FN0, PORT171_FN0), /* IRQ30A */
+ PINMUX_IRQ(evt2irq(0x33E0), PORT41_FN0, PORT167_FN0), /* IRQ31A */
+};
+
+struct sh_pfc_soc_info r8a7740_pinmux_info = {
+ .name = "r8a7740_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN,
+ PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN,
+ PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN,
+ PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN,
+ PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN,
+ PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN,
+ PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_TRACEAUD_FROM_MEMC,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
new file mode 100644
index 000000000000..13feaa0c0eb7
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -0,0 +1,2624 @@
+/*
+ * r8a7779 processor support - PFC hardware block
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <mach/r8a7779.h>
+
+#include "sh_pfc.h"
+
+#define CPU_32_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_1(fn, pfx##31, sfx)
+
+#define CPU_32_PORT6(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx)
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ CPU_32_PORT(fn, pfx##_0_, sfx), \
+ CPU_32_PORT(fn, pfx##_1_, sfx), \
+ CPU_32_PORT(fn, pfx##_2_, sfx), \
+ CPU_32_PORT(fn, pfx##_3_, sfx), \
+ CPU_32_PORT(fn, pfx##_4_, sfx), \
+ CPU_32_PORT(fn, pfx##_5_, sfx), \
+ CPU_32_PORT6(fn, pfx##_6_, sfx)
+
+#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
+#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
+ GP##pfx##_IN, GP##pfx##_OUT)
+
+#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
+#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
+
+#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
+
+
+#define PORT_10_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
+ PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
+ PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
+ PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
+
+#define CPU_32_PORT_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
+ PORT_10_REV(fn, pfx, sfx)
+
+#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
+#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+
+#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
+#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
+ FN_##ipsr, FN_##fn)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA), /* GP_0_0_DATA -> GP_6_8_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ GP_ALL(IN), /* GP_0_0_IN -> GP_6_8_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ GP_ALL(OUT), /* GP_0_0_OUT -> GP_6_8_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN), /* GP_0_0_FN -> GP_6_8_FN */
+
+ /* GPSR0 */
+ FN_AVS1, FN_AVS2, FN_IP0_7_6, FN_A17,
+ FN_A18, FN_A19, FN_IP0_9_8, FN_IP0_11_10,
+ FN_IP0_13_12, FN_IP0_15_14, FN_IP0_18_16, FN_IP0_22_19,
+ FN_IP0_24_23, FN_IP0_25, FN_IP0_27_26, FN_IP1_1_0,
+ FN_IP1_3_2, FN_IP1_6_4, FN_IP1_10_7, FN_IP1_14_11,
+ FN_IP1_18_15, FN_IP0_5_3, FN_IP0_30_28, FN_IP2_18_16,
+ FN_IP2_21_19, FN_IP2_30_28, FN_IP3_2_0, FN_IP3_11_9,
+ FN_IP3_14_12, FN_IP3_22_21, FN_IP3_26_24, FN_IP3_31_29,
+
+ /* GPSR1 */
+ FN_IP4_1_0, FN_IP4_4_2, FN_IP4_7_5, FN_IP4_10_8,
+ FN_IP4_11, FN_IP4_12, FN_IP4_13, FN_IP4_14,
+ FN_IP4_15, FN_IP4_16, FN_IP4_19_17, FN_IP4_22_20,
+ FN_IP4_23, FN_IP4_24, FN_IP4_25, FN_IP4_26,
+ FN_IP4_27, FN_IP4_28, FN_IP4_31_29, FN_IP5_2_0,
+ FN_IP5_3, FN_IP5_4, FN_IP5_5, FN_IP5_6,
+ FN_IP5_7, FN_IP5_8, FN_IP5_10_9, FN_IP5_12_11,
+ FN_IP5_14_13, FN_IP5_16_15, FN_IP5_20_17, FN_IP5_23_21,
+
+ /* GPSR2 */
+ FN_IP5_27_24, FN_IP8_20, FN_IP8_22_21, FN_IP8_24_23,
+ FN_IP8_27_25, FN_IP8_30_28, FN_IP9_1_0, FN_IP9_3_2,
+ FN_IP9_4, FN_IP9_5, FN_IP9_6, FN_IP9_7,
+ FN_IP9_9_8, FN_IP9_11_10, FN_IP9_13_12, FN_IP9_15_14,
+ FN_IP9_18_16, FN_IP9_21_19, FN_IP9_23_22, FN_IP9_25_24,
+ FN_IP9_27_26, FN_IP9_29_28, FN_IP10_2_0, FN_IP10_5_3,
+ FN_IP10_8_6, FN_IP10_11_9, FN_IP10_14_12, FN_IP10_17_15,
+ FN_IP10_20_18, FN_IP10_23_21, FN_IP10_25_24, FN_IP10_28_26,
+
+ /* GPSR3 */
+ FN_IP10_31_29, FN_IP11_2_0, FN_IP11_5_3, FN_IP11_8_6,
+ FN_IP11_11_9, FN_IP11_14_12, FN_IP11_17_15, FN_IP11_20_18,
+ FN_IP11_23_21, FN_IP11_26_24, FN_IP11_29_27, FN_IP12_2_0,
+ FN_IP12_5_3, FN_IP12_8_6, FN_IP12_11_9, FN_IP12_14_12,
+ FN_IP12_17_15, FN_IP7_16_15, FN_IP7_18_17, FN_IP7_28_27,
+ FN_IP7_30_29, FN_IP7_20_19, FN_IP7_22_21, FN_IP7_24_23,
+ FN_IP7_26_25, FN_IP1_20_19, FN_IP1_22_21, FN_IP1_24_23,
+ FN_IP5_28, FN_IP5_30_29, FN_IP6_1_0, FN_IP6_3_2,
+
+ /* GPSR4 */
+ FN_IP6_5_4, FN_IP6_7_6, FN_IP6_8, FN_IP6_11_9,
+ FN_IP6_14_12, FN_IP6_17_15, FN_IP6_19_18, FN_IP6_22_20,
+ FN_IP6_24_23, FN_IP6_26_25, FN_IP6_30_29, FN_IP7_1_0,
+ FN_IP7_3_2, FN_IP7_6_4, FN_IP7_9_7, FN_IP7_12_10,
+ FN_IP7_14_13, FN_IP2_7_4, FN_IP2_11_8, FN_IP2_15_12,
+ FN_IP1_28_25, FN_IP2_3_0, FN_IP8_3_0, FN_IP8_7_4,
+ FN_IP8_11_8, FN_IP8_15_12, FN_USB_PENC0, FN_USB_PENC1,
+ FN_IP0_2_0, FN_IP8_17_16, FN_IP8_18, FN_IP8_19,
+
+ /* GPSR5 */
+ FN_A1, FN_A2, FN_A3, FN_A4,
+ FN_A5, FN_A6, FN_A7, FN_A8,
+ FN_A9, FN_A10, FN_A11, FN_A12,
+ FN_A13, FN_A14, FN_A15, FN_A16,
+ FN_RD, FN_WE0, FN_WE1, FN_EX_WAIT0,
+ FN_IP3_23, FN_IP3_27, FN_IP3_28, FN_IP2_22,
+ FN_IP2_23, FN_IP2_24, FN_IP2_25, FN_IP2_26,
+ FN_IP2_27, FN_IP3_3, FN_IP3_4, FN_IP3_5,
+
+ /* GPSR6 */
+ FN_IP3_6, FN_IP3_7, FN_IP3_8, FN_IP3_15,
+ FN_IP3_16, FN_IP3_17, FN_IP3_18, FN_IP3_19,
+ FN_IP3_20,
+
+ /* IPSR0 */
+ FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7,
+ FN_HRTS1, FN_RX4_C,
+ FN_CS1_A26, FN_HSPI_TX2, FN_SDSELF_B,
+ FN_CS0, FN_HSPI_CS2_B,
+ FN_CLKOUT, FN_TX3C_IRDA_TX_C, FN_PWM0_B,
+ FN_A25, FN_SD1_WP, FN_MMC0_D5, FN_FD5,
+ FN_HSPI_RX2, FN_VI1_R3, FN_TX5_B, FN_SSI_SDATA7_B,
+ FN_CTS0_B,
+ FN_A24, FN_SD1_CD, FN_MMC0_D4, FN_FD4,
+ FN_HSPI_CS2, FN_VI1_R2, FN_SSI_WS78_B,
+ FN_A23, FN_FCLE, FN_HSPI_CLK2, FN_VI1_R1,
+ FN_A22, FN_RX5_D, FN_HSPI_RX2_B, FN_VI1_R0,
+ FN_A21, FN_SCK5_D, FN_HSPI_CLK2_B,
+ FN_A20, FN_TX5_D, FN_HSPI_TX2_B,
+ FN_A0, FN_SD1_DAT3, FN_MMC0_D3, FN_FD3,
+ FN_BS, FN_SD1_DAT2, FN_MMC0_D2, FN_FD2,
+ FN_ATADIR0, FN_SDSELF, FN_HCTS1, FN_TX4_C,
+ FN_USB_PENC2, FN_SCK0, FN_PWM1, FN_PWMFSW0,
+ FN_SCIF_CLK, FN_TCLK0_C,
+
+ /* IPSR1 */
+ FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6,
+ FN_FD6, FN_EX_CS1, FN_MMC0_D7, FN_FD7,
+ FN_EX_CS2, FN_SD1_CLK, FN_MMC0_CLK, FN_FALE,
+ FN_ATACS00, FN_EX_CS3, FN_SD1_CMD, FN_MMC0_CMD,
+ FN_FRE, FN_ATACS10, FN_VI1_R4, FN_RX5_B,
+ FN_HSCK1, FN_SSI_SDATA8_B, FN_RTS0_B_TANS_B, FN_SSI_SDATA9,
+ FN_EX_CS4, FN_SD1_DAT0, FN_MMC0_D0, FN_FD0,
+ FN_ATARD0, FN_VI1_R5, FN_SCK5_B, FN_HTX1,
+ FN_TX2_E, FN_TX0_B, FN_SSI_SCK9, FN_EX_CS5,
+ FN_SD1_DAT1, FN_MMC0_D1, FN_FD1, FN_ATAWR0,
+ FN_VI1_R6, FN_HRX1, FN_RX2_E, FN_RX0_B,
+ FN_SSI_WS9, FN_MLB_CLK, FN_PWM2, FN_SCK4,
+ FN_MLB_SIG, FN_PWM3, FN_TX4, FN_MLB_DAT,
+ FN_PWM4, FN_RX4, FN_HTX0, FN_TX1,
+ FN_SDATA, FN_CTS0_C, FN_SUB_TCK, FN_CC5_STATE2,
+ FN_CC5_STATE10, FN_CC5_STATE18, FN_CC5_STATE26, FN_CC5_STATE34,
+
+ /* IPSR2 */
+ FN_HRX0, FN_RX1, FN_SCKZ, FN_RTS0_C_TANS_C,
+ FN_SUB_TDI, FN_CC5_STATE3, FN_CC5_STATE11, FN_CC5_STATE19,
+ FN_CC5_STATE27, FN_CC5_STATE35, FN_HSCK0, FN_SCK1,
+ FN_MTS, FN_PWM5, FN_SCK0_C, FN_SSI_SDATA9_B,
+ FN_SUB_TDO, FN_CC5_STATE0, FN_CC5_STATE8, FN_CC5_STATE16,
+ FN_CC5_STATE24, FN_CC5_STATE32, FN_HCTS0, FN_CTS1,
+ FN_STM, FN_PWM0_D, FN_RX0_C, FN_SCIF_CLK_C,
+ FN_SUB_TRST, FN_TCLK1_B, FN_CC5_OSCOUT, FN_HRTS0,
+ FN_RTS1_TANS, FN_MDATA, FN_TX0_C, FN_SUB_TMS,
+ FN_CC5_STATE1, FN_CC5_STATE9, FN_CC5_STATE17, FN_CC5_STATE25,
+ FN_CC5_STATE33, FN_DU0_DR0, FN_LCDOUT0, FN_DREQ0,
+ FN_GPS_CLK_B, FN_AUDATA0, FN_TX5_C, FN_DU0_DR1,
+ FN_LCDOUT1, FN_DACK0, FN_DRACK0, FN_GPS_SIGN_B,
+ FN_AUDATA1, FN_RX5_C, FN_DU0_DR2, FN_LCDOUT2,
+ FN_DU0_DR3, FN_LCDOUT3, FN_DU0_DR4, FN_LCDOUT4,
+ FN_DU0_DR5, FN_LCDOUT5, FN_DU0_DR6, FN_LCDOUT6,
+ FN_DU0_DR7, FN_LCDOUT7, FN_DU0_DG0, FN_LCDOUT8,
+ FN_DREQ1, FN_SCL2, FN_AUDATA2,
+
+ /* IPSR3 */
+ FN_DU0_DG1, FN_LCDOUT9, FN_DACK1, FN_SDA2,
+ FN_AUDATA3, FN_DU0_DG2, FN_LCDOUT10, FN_DU0_DG3,
+ FN_LCDOUT11, FN_DU0_DG4, FN_LCDOUT12, FN_DU0_DG5,
+ FN_LCDOUT13, FN_DU0_DG6, FN_LCDOUT14, FN_DU0_DG7,
+ FN_LCDOUT15, FN_DU0_DB0, FN_LCDOUT16, FN_EX_WAIT1,
+ FN_SCL1, FN_TCLK1, FN_AUDATA4, FN_DU0_DB1,
+ FN_LCDOUT17, FN_EX_WAIT2, FN_SDA1, FN_GPS_MAG_B,
+ FN_AUDATA5, FN_SCK5_C, FN_DU0_DB2, FN_LCDOUT18,
+ FN_DU0_DB3, FN_LCDOUT19, FN_DU0_DB4, FN_LCDOUT20,
+ FN_DU0_DB5, FN_LCDOUT21, FN_DU0_DB6, FN_LCDOUT22,
+ FN_DU0_DB7, FN_LCDOUT23, FN_DU0_DOTCLKIN, FN_QSTVA_QVS,
+ FN_TX3_D_IRDA_TX_D, FN_SCL3_B, FN_DU0_DOTCLKOUT0, FN_QCLK,
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, FN_RX3_D_IRDA_RX_D, FN_SDA3_B,
+ FN_SDA2_C, FN_DACK0_B, FN_DRACK0_B, FN_DU0_EXHSYNC_DU0_HSYNC,
+ FN_QSTH_QHS, FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CAN1_TX,
+ FN_TX2_C, FN_SCL2_C, FN_REMOCON,
+
+ /* IPSR4 */
+ FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C,
+ FN_DU0_CDE, FN_QPOLB, FN_CAN1_RX, FN_RX2_C,
+ FN_DREQ0_B, FN_SSI_SCK78_B, FN_SCK0_B, FN_DU1_DR0,
+ FN_VI2_DATA0_VI2_B0, FN_PWM6, FN_SD3_CLK, FN_TX3_E_IRDA_TX_E,
+ FN_AUDCK, FN_PWMFSW0_B, FN_DU1_DR1, FN_VI2_DATA1_VI2_B1,
+ FN_PWM0, FN_SD3_CMD, FN_RX3_E_IRDA_RX_E, FN_AUDSYNC,
+ FN_CTS0_D, FN_DU1_DR2, FN_VI2_G0, FN_DU1_DR3,
+ FN_VI2_G1, FN_DU1_DR4, FN_VI2_G2, FN_DU1_DR5,
+ FN_VI2_G3, FN_DU1_DR6, FN_VI2_G4, FN_DU1_DR7,
+ FN_VI2_G5, FN_DU1_DG0, FN_VI2_DATA2_VI2_B2, FN_SCL1_B,
+ FN_SD3_DAT2, FN_SCK3_E, FN_AUDATA6, FN_TX0_D,
+ FN_DU1_DG1, FN_VI2_DATA3_VI2_B3, FN_SDA1_B, FN_SD3_DAT3,
+ FN_SCK5, FN_AUDATA7, FN_RX0_D, FN_DU1_DG2,
+ FN_VI2_G6, FN_DU1_DG3, FN_VI2_G7, FN_DU1_DG4,
+ FN_VI2_R0, FN_DU1_DG5, FN_VI2_R1, FN_DU1_DG6,
+ FN_VI2_R2, FN_DU1_DG7, FN_VI2_R3, FN_DU1_DB0,
+ FN_VI2_DATA4_VI2_B4, FN_SCL2_B, FN_SD3_DAT0, FN_TX5,
+ FN_SCK0_D,
+
+ /* IPSR5 */
+ FN_DU1_DB1, FN_VI2_DATA5_VI2_B5, FN_SDA2_B, FN_SD3_DAT1,
+ FN_RX5, FN_RTS0_D_TANS_D, FN_DU1_DB2, FN_VI2_R4,
+ FN_DU1_DB3, FN_VI2_R5, FN_DU1_DB4, FN_VI2_R6,
+ FN_DU1_DB5, FN_VI2_R7, FN_DU1_DB6, FN_SCL2_D,
+ FN_DU1_DB7, FN_SDA2_D, FN_DU1_DOTCLKIN, FN_VI2_CLKENB,
+ FN_HSPI_CS1, FN_SCL1_D, FN_DU1_DOTCLKOUT, FN_VI2_FIELD,
+ FN_SDA1_D, FN_DU1_EXHSYNC_DU1_HSYNC, FN_VI2_HSYNC,
+ FN_VI3_HSYNC, FN_DU1_EXVSYNC_DU1_VSYNC, FN_VI2_VSYNC, FN_VI3_VSYNC,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_VI2_CLK, FN_TX3_B_IRDA_TX_B,
+ FN_SD3_CD, FN_HSPI_TX1, FN_VI1_CLKENB, FN_VI3_CLKENB,
+ FN_AUDIO_CLKC, FN_TX2_D, FN_SPEEDIN, FN_GPS_SIGN_D,
+ FN_DU1_DISP, FN_VI2_DATA6_VI2_B6, FN_TCLK0, FN_QSTVA_B_QVS_B,
+ FN_HSPI_CLK1, FN_SCK2_D, FN_AUDIO_CLKOUT_B, FN_GPS_MAG_D,
+ FN_DU1_CDE, FN_VI2_DATA7_VI2_B7, FN_RX3_B_IRDA_RX_B,
+ FN_SD3_WP, FN_HSPI_RX1, FN_VI1_FIELD, FN_VI3_FIELD,
+ FN_AUDIO_CLKOUT, FN_RX2_D, FN_GPS_CLK_C, FN_GPS_CLK_D,
+ FN_AUDIO_CLKA, FN_CAN_TXCLK, FN_AUDIO_CLKB, FN_USB_OVC2,
+ FN_CAN_DEBUGOUT0, FN_MOUT0,
+
+ /* IPSR6 */
+ FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, FN_SSI_WS0129,
+ FN_CAN_DEBUGOUT2, FN_MOUT2, FN_SSI_SDATA0, FN_CAN_DEBUGOUT3,
+ FN_MOUT5, FN_SSI_SDATA1, FN_CAN_DEBUGOUT4, FN_MOUT6,
+ FN_SSI_SDATA2, FN_CAN_DEBUGOUT5, FN_SSI_SCK34, FN_CAN_DEBUGOUT6,
+ FN_CAN0_TX_B, FN_IERX, FN_SSI_SCK9_C, FN_SSI_WS34,
+ FN_CAN_DEBUGOUT7, FN_CAN0_RX_B, FN_IETX, FN_SSI_WS9_C,
+ FN_SSI_SDATA3, FN_PWM0_C, FN_CAN_DEBUGOUT8, FN_CAN_CLK_B,
+ FN_IECLK, FN_SCIF_CLK_B, FN_TCLK0_B, FN_SSI_SDATA4,
+ FN_CAN_DEBUGOUT9, FN_SSI_SDATA9_C, FN_SSI_SCK5, FN_ADICLK,
+ FN_CAN_DEBUGOUT10, FN_SCK3, FN_TCLK0_D, FN_SSI_WS5,
+ FN_ADICS_SAMP, FN_CAN_DEBUGOUT11, FN_TX3_IRDA_TX, FN_SSI_SDATA5,
+ FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX, FN_SSI_SCK6,
+ FN_ADICHS0, FN_CAN0_TX, FN_IERX_B,
+
+ /* IPSR7 */
+ FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B,
+ FN_SSI_SDATA6, FN_ADICHS2, FN_CAN_CLK, FN_IECLK_B,
+ FN_SSI_SCK78, FN_CAN_DEBUGOUT13, FN_IRQ0_B, FN_SSI_SCK9_B,
+ FN_HSPI_CLK1_C, FN_SSI_WS78, FN_CAN_DEBUGOUT14, FN_IRQ1_B,
+ FN_SSI_WS9_B, FN_HSPI_CS1_C, FN_SSI_SDATA7, FN_CAN_DEBUGOUT15,
+ FN_IRQ2_B, FN_TCLK1_C, FN_HSPI_TX1_C, FN_SSI_SDATA8,
+ FN_VSP, FN_IRQ3_B, FN_HSPI_RX1_C, FN_SD0_CLK,
+ FN_ATACS01, FN_SCK1_B, FN_SD0_CMD, FN_ATACS11,
+ FN_TX1_B, FN_CC5_TDO, FN_SD0_DAT0, FN_ATADIR1,
+ FN_RX1_B, FN_CC5_TRST, FN_SD0_DAT1, FN_ATAG1,
+ FN_SCK2_B, FN_CC5_TMS, FN_SD0_DAT2, FN_ATARD1,
+ FN_TX2_B, FN_CC5_TCK, FN_SD0_DAT3, FN_ATAWR1,
+ FN_RX2_B, FN_CC5_TDI, FN_SD0_CD, FN_DREQ2,
+ FN_RTS1_B_TANS_B, FN_SD0_WP, FN_DACK2, FN_CTS1_B,
+
+ /* IPSR8 */
+ FN_HSPI_CLK0, FN_CTS0, FN_USB_OVC0, FN_AD_CLK,
+ FN_CC5_STATE4, FN_CC5_STATE12, FN_CC5_STATE20, FN_CC5_STATE28,
+ FN_CC5_STATE36, FN_HSPI_CS0, FN_RTS0_TANS, FN_USB_OVC1,
+ FN_AD_DI, FN_CC5_STATE5, FN_CC5_STATE13, FN_CC5_STATE21,
+ FN_CC5_STATE29, FN_CC5_STATE37, FN_HSPI_TX0, FN_TX0,
+ FN_CAN_DEBUG_HW_TRIGGER, FN_AD_DO, FN_CC5_STATE6, FN_CC5_STATE14,
+ FN_CC5_STATE22, FN_CC5_STATE30, FN_CC5_STATE38, FN_HSPI_RX0,
+ FN_RX0, FN_CAN_STEP0, FN_AD_NCS, FN_CC5_STATE7,
+ FN_CC5_STATE15, FN_CC5_STATE23, FN_CC5_STATE31, FN_CC5_STATE39,
+ FN_FMCLK, FN_RDS_CLK, FN_PCMOE, FN_BPFCLK,
+ FN_PCMWE, FN_FMIN, FN_RDS_DATA, FN_VI0_CLK,
+ FN_MMC1_CLK, FN_VI0_CLKENB, FN_TX1_C, FN_HTX1_B,
+ FN_MT1_SYNC, FN_VI0_FIELD, FN_RX1_C, FN_HRX1_B,
+ FN_VI0_HSYNC, FN_VI0_DATA0_B_VI0_B0_B, FN_CTS1_C, FN_TX4_D,
+ FN_MMC1_CMD, FN_HSCK1_B, FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B,
+ FN_RTS1_C_TANS_C, FN_RX4_D, FN_PWMFSW0_C,
+
+ /* IPSR9 */
+ FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, FN_VI0_DATA1_VI0_B1,
+ FN_HCTS1_B, FN_MT1_PWM, FN_VI0_DATA2_VI0_B2, FN_MMC1_D0,
+ FN_VI0_DATA3_VI0_B3, FN_MMC1_D1, FN_VI0_DATA4_VI0_B4, FN_MMC1_D2,
+ FN_VI0_DATA5_VI0_B5, FN_MMC1_D3, FN_VI0_DATA6_VI0_B6, FN_MMC1_D4,
+ FN_ARM_TRACEDATA_0, FN_VI0_DATA7_VI0_B7, FN_MMC1_D5,
+ FN_ARM_TRACEDATA_1, FN_VI0_G0, FN_SSI_SCK78_C, FN_IRQ0,
+ FN_ARM_TRACEDATA_2, FN_VI0_G1, FN_SSI_WS78_C, FN_IRQ1,
+ FN_ARM_TRACEDATA_3, FN_VI0_G2, FN_ETH_TXD1, FN_MMC1_D6,
+ FN_ARM_TRACEDATA_4, FN_TS_SPSYNC0, FN_VI0_G3, FN_ETH_CRS_DV,
+ FN_MMC1_D7, FN_ARM_TRACEDATA_5, FN_TS_SDAT0, FN_VI0_G4,
+ FN_ETH_TX_EN, FN_SD2_DAT0_B, FN_ARM_TRACEDATA_6, FN_VI0_G5,
+ FN_ETH_RX_ER, FN_SD2_DAT1_B, FN_ARM_TRACEDATA_7, FN_VI0_G6,
+ FN_ETH_RXD0, FN_SD2_DAT2_B, FN_ARM_TRACEDATA_8, FN_VI0_G7,
+ FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9,
+
+ /* IPSR10 */
+ FN_VI0_R0, FN_SSI_SDATA7_C, FN_SCK1_C, FN_DREQ1_B,
+ FN_ARM_TRACEDATA_10, FN_DREQ0_C, FN_VI0_R1, FN_SSI_SDATA8_C,
+ FN_DACK1_B, FN_ARM_TRACEDATA_11, FN_DACK0_C, FN_DRACK0_C,
+ FN_VI0_R2, FN_ETH_LINK, FN_SD2_CLK_B, FN_IRQ2,
+ FN_ARM_TRACEDATA_12, FN_VI0_R3, FN_ETH_MAGIC, FN_SD2_CMD_B,
+ FN_IRQ3, FN_ARM_TRACEDATA_13, FN_VI0_R4, FN_ETH_REFCLK,
+ FN_SD2_CD_B, FN_HSPI_CLK1_B, FN_ARM_TRACEDATA_14, FN_MT1_CLK,
+ FN_TS_SCK0, FN_VI0_R5, FN_ETH_TXD0, FN_SD2_WP_B, FN_HSPI_CS1_B,
+ FN_ARM_TRACEDATA_15, FN_MT1_D, FN_TS_SDEN0, FN_VI0_R6,
+ FN_ETH_MDC, FN_DREQ2_C, FN_HSPI_TX1_B, FN_TRACECLK,
+ FN_MT1_BEN, FN_PWMFSW0_D, FN_VI0_R7, FN_ETH_MDIO,
+ FN_DACK2_C, FN_HSPI_RX1_B, FN_SCIF_CLK_D, FN_TRACECTL,
+ FN_MT1_PEN, FN_VI1_CLK, FN_SIM_D, FN_SDA3,
+ FN_VI1_HSYNC, FN_VI3_CLK, FN_SSI_SCK4, FN_GPS_SIGN_C,
+ FN_PWMFSW0_E, FN_VI1_VSYNC, FN_AUDIO_CLKOUT_C, FN_SSI_WS4,
+ FN_SIM_CLK, FN_GPS_MAG_C, FN_SPV_TRST, FN_SCL3,
+
+ /* IPSR11 */
+ FN_VI1_DATA0_VI1_B0, FN_SD2_DAT0, FN_SIM_RST, FN_SPV_TCK,
+ FN_ADICLK_B, FN_VI1_DATA1_VI1_B1, FN_SD2_DAT1, FN_MT0_CLK,
+ FN_SPV_TMS, FN_ADICS_B_SAMP_B, FN_VI1_DATA2_VI1_B2, FN_SD2_DAT2,
+ FN_MT0_D, FN_SPVTDI, FN_ADIDATA_B, FN_VI1_DATA3_VI1_B3,
+ FN_SD2_DAT3, FN_MT0_BEN, FN_SPV_TDO, FN_ADICHS0_B,
+ FN_VI1_DATA4_VI1_B4, FN_SD2_CLK, FN_MT0_PEN, FN_SPA_TRST,
+ FN_HSPI_CLK1_D, FN_ADICHS1_B, FN_VI1_DATA5_VI1_B5, FN_SD2_CMD,
+ FN_MT0_SYNC, FN_SPA_TCK, FN_HSPI_CS1_D, FN_ADICHS2_B,
+ FN_VI1_DATA6_VI1_B6, FN_SD2_CD, FN_MT0_VCXO, FN_SPA_TMS,
+ FN_HSPI_TX1_D, FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM,
+ FN_SPA_TDI, FN_HSPI_RX1_D, FN_VI1_G0, FN_VI3_DATA0,
+ FN_DU1_DOTCLKOUT1, FN_TS_SCK1, FN_DREQ2_B, FN_TX2,
+ FN_SPA_TDO, FN_HCTS0_B, FN_VI1_G1, FN_VI3_DATA1,
+ FN_SSI_SCK1, FN_TS_SDEN1, FN_DACK2_B, FN_RX2, FN_HRTS0_B,
+
+ /* IPSR12 */
+ FN_VI1_G2, FN_VI3_DATA2, FN_SSI_WS1, FN_TS_SPSYNC1,
+ FN_SCK2, FN_HSCK0_B, FN_VI1_G3, FN_VI3_DATA3,
+ FN_SSI_SCK2, FN_TS_SDAT1, FN_SCL1_C, FN_HTX0_B,
+ FN_VI1_G4, FN_VI3_DATA4, FN_SSI_WS2, FN_SDA1_C,
+ FN_SIM_RST_B, FN_HRX0_B, FN_VI1_G5, FN_VI3_DATA5,
+ FN_GPS_CLK, FN_FSE, FN_TX4_B, FN_SIM_D_B,
+ FN_VI1_G6, FN_VI3_DATA6, FN_GPS_SIGN, FN_FRB,
+ FN_RX4_B, FN_SIM_CLK_B, FN_VI1_G7, FN_VI3_DATA7,
+ FN_GPS_MAG, FN_FCE, FN_SCK4_B,
+
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF3_3, FN_SEL_SCIF3_4,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_3, FN_SEL_SCIF2_4,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, FN_SEL_SSI9_2,
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2,
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, FN_SEL_SSI7_2,
+ FN_SEL_VI0_0, FN_SEL_VI0_1,
+ FN_SEL_SD2_0, FN_SEL_SD2_1,
+ FN_SEL_INT3_0, FN_SEL_INT3_1,
+ FN_SEL_INT2_0, FN_SEL_INT2_1,
+ FN_SEL_INT1_0, FN_SEL_INT1_1,
+ FN_SEL_INT0_0, FN_SEL_INT0_1,
+ FN_SEL_IE_0, FN_SEL_IE_1,
+ FN_SEL_EXBUS2_0, FN_SEL_EXBUS2_1, FN_SEL_EXBUS2_2,
+ FN_SEL_EXBUS1_0, FN_SEL_EXBUS1_1,
+ FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2,
+
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1, FN_SEL_TMU1_2,
+ FN_SEL_TMU0_0, FN_SEL_TMU0_1, FN_SEL_TMU0_2, FN_SEL_TMU0_3,
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1, FN_SEL_SCIF_2, FN_SEL_SCIF_3,
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1,
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+ FN_SEL_PWMFSW_0, FN_SEL_PWMFSW_1, FN_SEL_PWMFSW_2,
+ FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4,
+ FN_SEL_ADI_0, FN_SEL_ADI_1,
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ FN_SEL_HSPI2_0, FN_SEL_HSPI2_1,
+ FN_SEL_HSPI1_0, FN_SEL_HSPI1_1, FN_SEL_HSPI1_2, FN_SEL_HSPI1_3,
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1,
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ AVS1_MARK, AVS2_MARK, A17_MARK, A18_MARK,
+ A19_MARK,
+
+ RD_WR_MARK, FWE_MARK, ATAG0_MARK, VI1_R7_MARK,
+ HRTS1_MARK, RX4_C_MARK,
+ CS1_A26_MARK, HSPI_TX2_MARK, SDSELF_B_MARK,
+ CS0_MARK, HSPI_CS2_B_MARK,
+ CLKOUT_MARK, TX3C_IRDA_TX_C_MARK, PWM0_B_MARK,
+ A25_MARK, SD1_WP_MARK, MMC0_D5_MARK, FD5_MARK,
+ HSPI_RX2_MARK, VI1_R3_MARK, TX5_B_MARK, SSI_SDATA7_B_MARK, CTS0_B_MARK,
+ A24_MARK, SD1_CD_MARK, MMC0_D4_MARK, FD4_MARK,
+ HSPI_CS2_MARK, VI1_R2_MARK, SSI_WS78_B_MARK,
+ A23_MARK, FCLE_MARK, HSPI_CLK2_MARK, VI1_R1_MARK,
+ A22_MARK, RX5_D_MARK, HSPI_RX2_B_MARK, VI1_R0_MARK,
+ A21_MARK, SCK5_D_MARK, HSPI_CLK2_B_MARK,
+ A20_MARK, TX5_D_MARK, HSPI_TX2_B_MARK,
+ A0_MARK, SD1_DAT3_MARK, MMC0_D3_MARK, FD3_MARK,
+ BS_MARK, SD1_DAT2_MARK, MMC0_D2_MARK, FD2_MARK,
+ ATADIR0_MARK, SDSELF_MARK, HCTS1_MARK, TX4_C_MARK,
+ USB_PENC2_MARK, SCK0_MARK, PWM1_MARK, PWMFSW0_MARK,
+ SCIF_CLK_MARK, TCLK0_C_MARK,
+
+ EX_CS0_MARK, RX3_C_IRDA_RX_C_MARK, MMC0_D6_MARK,
+ FD6_MARK, EX_CS1_MARK, MMC0_D7_MARK, FD7_MARK,
+ EX_CS2_MARK, SD1_CLK_MARK, MMC0_CLK_MARK, FALE_MARK,
+ ATACS00_MARK, EX_CS3_MARK, SD1_CMD_MARK, MMC0_CMD_MARK,
+ FRE_MARK, ATACS10_MARK, VI1_R4_MARK, RX5_B_MARK,
+ HSCK1_MARK, SSI_SDATA8_B_MARK, RTS0_B_TANS_B_MARK, SSI_SDATA9_MARK,
+ EX_CS4_MARK, SD1_DAT0_MARK, MMC0_D0_MARK, FD0_MARK,
+ ATARD0_MARK, VI1_R5_MARK, SCK5_B_MARK, HTX1_MARK,
+ TX2_E_MARK, TX0_B_MARK, SSI_SCK9_MARK, EX_CS5_MARK,
+ SD1_DAT1_MARK, MMC0_D1_MARK, FD1_MARK, ATAWR0_MARK,
+ VI1_R6_MARK, HRX1_MARK, RX2_E_MARK, RX0_B_MARK,
+ SSI_WS9_MARK, MLB_CLK_MARK, PWM2_MARK, SCK4_MARK,
+ MLB_SIG_MARK, PWM3_MARK, TX4_MARK, MLB_DAT_MARK,
+ PWM4_MARK, RX4_MARK, HTX0_MARK, TX1_MARK,
+ SDATA_MARK, CTS0_C_MARK, SUB_TCK_MARK, CC5_STATE2_MARK,
+ CC5_STATE10_MARK, CC5_STATE18_MARK, CC5_STATE26_MARK, CC5_STATE34_MARK,
+
+ HRX0_MARK, RX1_MARK, SCKZ_MARK, RTS0_C_TANS_C_MARK,
+ SUB_TDI_MARK, CC5_STATE3_MARK, CC5_STATE11_MARK, CC5_STATE19_MARK,
+ CC5_STATE27_MARK, CC5_STATE35_MARK, HSCK0_MARK, SCK1_MARK,
+ MTS_MARK, PWM5_MARK, SCK0_C_MARK, SSI_SDATA9_B_MARK,
+ SUB_TDO_MARK, CC5_STATE0_MARK, CC5_STATE8_MARK, CC5_STATE16_MARK,
+ CC5_STATE24_MARK, CC5_STATE32_MARK, HCTS0_MARK, CTS1_MARK,
+ STM_MARK, PWM0_D_MARK, RX0_C_MARK, SCIF_CLK_C_MARK,
+ SUB_TRST_MARK, TCLK1_B_MARK, CC5_OSCOUT_MARK, HRTS0_MARK,
+ RTS1_TANS_MARK, MDATA_MARK, TX0_C_MARK, SUB_TMS_MARK,
+ CC5_STATE1_MARK, CC5_STATE9_MARK, CC5_STATE17_MARK, CC5_STATE25_MARK,
+ CC5_STATE33_MARK, DU0_DR0_MARK, LCDOUT0_MARK, DREQ0_MARK,
+ GPS_CLK_B_MARK, AUDATA0_MARK, TX5_C_MARK, DU0_DR1_MARK,
+ LCDOUT1_MARK, DACK0_MARK, DRACK0_MARK, GPS_SIGN_B_MARK,
+ AUDATA1_MARK, RX5_C_MARK, DU0_DR2_MARK, LCDOUT2_MARK,
+ DU0_DR3_MARK, LCDOUT3_MARK, DU0_DR4_MARK, LCDOUT4_MARK,
+ DU0_DR5_MARK, LCDOUT5_MARK, DU0_DR6_MARK, LCDOUT6_MARK,
+ DU0_DR7_MARK, LCDOUT7_MARK, DU0_DG0_MARK, LCDOUT8_MARK,
+ DREQ1_MARK, SCL2_MARK, AUDATA2_MARK,
+
+ DU0_DG1_MARK, LCDOUT9_MARK, DACK1_MARK, SDA2_MARK,
+ AUDATA3_MARK, DU0_DG2_MARK, LCDOUT10_MARK, DU0_DG3_MARK,
+ LCDOUT11_MARK, DU0_DG4_MARK, LCDOUT12_MARK, DU0_DG5_MARK,
+ LCDOUT13_MARK, DU0_DG6_MARK, LCDOUT14_MARK, DU0_DG7_MARK,
+ LCDOUT15_MARK, DU0_DB0_MARK, LCDOUT16_MARK, EX_WAIT1_MARK,
+ SCL1_MARK, TCLK1_MARK, AUDATA4_MARK, DU0_DB1_MARK,
+ LCDOUT17_MARK, EX_WAIT2_MARK, SDA1_MARK, GPS_MAG_B_MARK,
+ AUDATA5_MARK, SCK5_C_MARK, DU0_DB2_MARK, LCDOUT18_MARK,
+ DU0_DB3_MARK, LCDOUT19_MARK, DU0_DB4_MARK, LCDOUT20_MARK,
+ DU0_DB5_MARK, LCDOUT21_MARK, DU0_DB6_MARK, LCDOUT22_MARK,
+ DU0_DB7_MARK, LCDOUT23_MARK, DU0_DOTCLKIN_MARK, QSTVA_QVS_MARK,
+ TX3_D_IRDA_TX_D_MARK, SCL3_B_MARK, DU0_DOTCLKOUT0_MARK, QCLK_MARK,
+ DU0_DOTCLKOUT1_MARK, QSTVB_QVE_MARK, RX3_D_IRDA_RX_D_MARK, SDA3_B_MARK,
+ SDA2_C_MARK, DACK0_B_MARK, DRACK0_B_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK,
+ QSTH_QHS_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK, QSTB_QHE_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK, CAN1_TX_MARK,
+ TX2_C_MARK, SCL2_C_MARK, REMOCON_MARK,
+
+ DU0_DISP_MARK, QPOLA_MARK, CAN_CLK_C_MARK, SCK2_C_MARK,
+ DU0_CDE_MARK, QPOLB_MARK, CAN1_RX_MARK, RX2_C_MARK,
+ DREQ0_B_MARK, SSI_SCK78_B_MARK, SCK0_B_MARK, DU1_DR0_MARK,
+ VI2_DATA0_VI2_B0_MARK, PWM6_MARK, SD3_CLK_MARK, TX3_E_IRDA_TX_E_MARK,
+ AUDCK_MARK, PWMFSW0_B_MARK, DU1_DR1_MARK, VI2_DATA1_VI2_B1_MARK,
+ PWM0_MARK, SD3_CMD_MARK, RX3_E_IRDA_RX_E_MARK, AUDSYNC_MARK,
+ CTS0_D_MARK, DU1_DR2_MARK, VI2_G0_MARK, DU1_DR3_MARK,
+ VI2_G1_MARK, DU1_DR4_MARK, VI2_G2_MARK, DU1_DR5_MARK,
+ VI2_G3_MARK, DU1_DR6_MARK, VI2_G4_MARK, DU1_DR7_MARK,
+ VI2_G5_MARK, DU1_DG0_MARK, VI2_DATA2_VI2_B2_MARK, SCL1_B_MARK,
+ SD3_DAT2_MARK, SCK3_E_MARK, AUDATA6_MARK, TX0_D_MARK,
+ DU1_DG1_MARK, VI2_DATA3_VI2_B3_MARK, SDA1_B_MARK, SD3_DAT3_MARK,
+ SCK5_MARK, AUDATA7_MARK, RX0_D_MARK, DU1_DG2_MARK,
+ VI2_G6_MARK, DU1_DG3_MARK, VI2_G7_MARK, DU1_DG4_MARK,
+ VI2_R0_MARK, DU1_DG5_MARK, VI2_R1_MARK, DU1_DG6_MARK,
+ VI2_R2_MARK, DU1_DG7_MARK, VI2_R3_MARK, DU1_DB0_MARK,
+ VI2_DATA4_VI2_B4_MARK, SCL2_B_MARK, SD3_DAT0_MARK, TX5_MARK,
+ SCK0_D_MARK,
+
+ DU1_DB1_MARK, VI2_DATA5_VI2_B5_MARK, SDA2_B_MARK, SD3_DAT1_MARK,
+ RX5_MARK, RTS0_D_TANS_D_MARK, DU1_DB2_MARK, VI2_R4_MARK,
+ DU1_DB3_MARK, VI2_R5_MARK, DU1_DB4_MARK, VI2_R6_MARK,
+ DU1_DB5_MARK, VI2_R7_MARK, DU1_DB6_MARK, SCL2_D_MARK,
+ DU1_DB7_MARK, SDA2_D_MARK, DU1_DOTCLKIN_MARK, VI2_CLKENB_MARK,
+ HSPI_CS1_MARK, SCL1_D_MARK, DU1_DOTCLKOUT_MARK, VI2_FIELD_MARK,
+ SDA1_D_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK, VI2_HSYNC_MARK,
+ VI3_HSYNC_MARK, DU1_EXVSYNC_DU1_VSYNC_MARK, VI2_VSYNC_MARK,
+ VI3_VSYNC_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK, VI2_CLK_MARK,
+ TX3_B_IRDA_TX_B_MARK, SD3_CD_MARK, HSPI_TX1_MARK, VI1_CLKENB_MARK,
+ VI3_CLKENB_MARK, AUDIO_CLKC_MARK, TX2_D_MARK, SPEEDIN_MARK,
+ GPS_SIGN_D_MARK, DU1_DISP_MARK, VI2_DATA6_VI2_B6_MARK, TCLK0_MARK,
+ QSTVA_B_QVS_B_MARK, HSPI_CLK1_MARK, SCK2_D_MARK, AUDIO_CLKOUT_B_MARK,
+ GPS_MAG_D_MARK, DU1_CDE_MARK, VI2_DATA7_VI2_B7_MARK,
+ RX3_B_IRDA_RX_B_MARK, SD3_WP_MARK, HSPI_RX1_MARK, VI1_FIELD_MARK,
+ VI3_FIELD_MARK, AUDIO_CLKOUT_MARK, RX2_D_MARK, GPS_CLK_C_MARK,
+ GPS_CLK_D_MARK, AUDIO_CLKA_MARK, CAN_TXCLK_MARK, AUDIO_CLKB_MARK,
+ USB_OVC2_MARK, CAN_DEBUGOUT0_MARK, MOUT0_MARK,
+
+ SSI_SCK0129_MARK, CAN_DEBUGOUT1_MARK, MOUT1_MARK, SSI_WS0129_MARK,
+ CAN_DEBUGOUT2_MARK, MOUT2_MARK, SSI_SDATA0_MARK, CAN_DEBUGOUT3_MARK,
+ MOUT5_MARK, SSI_SDATA1_MARK, CAN_DEBUGOUT4_MARK, MOUT6_MARK,
+ SSI_SDATA2_MARK, CAN_DEBUGOUT5_MARK, SSI_SCK34_MARK,
+ CAN_DEBUGOUT6_MARK, CAN0_TX_B_MARK, IERX_MARK, SSI_SCK9_C_MARK,
+ SSI_WS34_MARK, CAN_DEBUGOUT7_MARK, CAN0_RX_B_MARK, IETX_MARK,
+ SSI_WS9_C_MARK, SSI_SDATA3_MARK, PWM0_C_MARK, CAN_DEBUGOUT8_MARK,
+ CAN_CLK_B_MARK, IECLK_MARK, SCIF_CLK_B_MARK, TCLK0_B_MARK,
+ SSI_SDATA4_MARK, CAN_DEBUGOUT9_MARK, SSI_SDATA9_C_MARK, SSI_SCK5_MARK,
+ ADICLK_MARK, CAN_DEBUGOUT10_MARK, SCK3_MARK, TCLK0_D_MARK,
+ SSI_WS5_MARK, ADICS_SAMP_MARK, CAN_DEBUGOUT11_MARK, TX3_IRDA_TX_MARK,
+ SSI_SDATA5_MARK, ADIDATA_MARK, CAN_DEBUGOUT12_MARK, RX3_IRDA_RX_MARK,
+ SSI_SCK6_MARK, ADICHS0_MARK, CAN0_TX_MARK, IERX_B_MARK,
+
+ SSI_WS6_MARK, ADICHS1_MARK, CAN0_RX_MARK, IETX_B_MARK,
+ SSI_SDATA6_MARK, ADICHS2_MARK, CAN_CLK_MARK, IECLK_B_MARK,
+ SSI_SCK78_MARK, CAN_DEBUGOUT13_MARK, IRQ0_B_MARK, SSI_SCK9_B_MARK,
+ HSPI_CLK1_C_MARK, SSI_WS78_MARK, CAN_DEBUGOUT14_MARK, IRQ1_B_MARK,
+ SSI_WS9_B_MARK, HSPI_CS1_C_MARK, SSI_SDATA7_MARK, CAN_DEBUGOUT15_MARK,
+ IRQ2_B_MARK, TCLK1_C_MARK, HSPI_TX1_C_MARK, SSI_SDATA8_MARK,
+ VSP_MARK, IRQ3_B_MARK, HSPI_RX1_C_MARK, SD0_CLK_MARK,
+ ATACS01_MARK, SCK1_B_MARK, SD0_CMD_MARK, ATACS11_MARK,
+ TX1_B_MARK, CC5_TDO_MARK, SD0_DAT0_MARK, ATADIR1_MARK,
+ RX1_B_MARK, CC5_TRST_MARK, SD0_DAT1_MARK, ATAG1_MARK,
+ SCK2_B_MARK, CC5_TMS_MARK, SD0_DAT2_MARK, ATARD1_MARK,
+ TX2_B_MARK, CC5_TCK_MARK, SD0_DAT3_MARK, ATAWR1_MARK,
+ RX2_B_MARK, CC5_TDI_MARK, SD0_CD_MARK, DREQ2_MARK,
+ RTS1_B_TANS_B_MARK, SD0_WP_MARK, DACK2_MARK, CTS1_B_MARK,
+
+ HSPI_CLK0_MARK, CTS0_MARK, USB_OVC0_MARK, AD_CLK_MARK,
+ CC5_STATE4_MARK, CC5_STATE12_MARK, CC5_STATE20_MARK, CC5_STATE28_MARK,
+ CC5_STATE36_MARK, HSPI_CS0_MARK, RTS0_TANS_MARK, USB_OVC1_MARK,
+ AD_DI_MARK, CC5_STATE5_MARK, CC5_STATE13_MARK, CC5_STATE21_MARK,
+ CC5_STATE29_MARK, CC5_STATE37_MARK, HSPI_TX0_MARK, TX0_MARK,
+ CAN_DEBUG_HW_TRIGGER_MARK, AD_DO_MARK, CC5_STATE6_MARK,
+ CC5_STATE14_MARK, CC5_STATE22_MARK, CC5_STATE30_MARK,
+ CC5_STATE38_MARK, HSPI_RX0_MARK, RX0_MARK, CAN_STEP0_MARK,
+ AD_NCS_MARK, CC5_STATE7_MARK, CC5_STATE15_MARK, CC5_STATE23_MARK,
+ CC5_STATE31_MARK, CC5_STATE39_MARK, FMCLK_MARK, RDS_CLK_MARK,
+ PCMOE_MARK, BPFCLK_MARK, PCMWE_MARK, FMIN_MARK, RDS_DATA_MARK,
+ VI0_CLK_MARK, MMC1_CLK_MARK, VI0_CLKENB_MARK, TX1_C_MARK, HTX1_B_MARK,
+ MT1_SYNC_MARK, VI0_FIELD_MARK, RX1_C_MARK, HRX1_B_MARK,
+ VI0_HSYNC_MARK, VI0_DATA0_B_VI0_B0_B_MARK, CTS1_C_MARK, TX4_D_MARK,
+ MMC1_CMD_MARK, HSCK1_B_MARK, VI0_VSYNC_MARK, VI0_DATA1_B_VI0_B1_B_MARK,
+ RTS1_C_TANS_C_MARK, RX4_D_MARK, PWMFSW0_C_MARK,
+
+ VI0_DATA0_VI0_B0_MARK, HRTS1_B_MARK, MT1_VCXO_MARK,
+ VI0_DATA1_VI0_B1_MARK, HCTS1_B_MARK, MT1_PWM_MARK,
+ VI0_DATA2_VI0_B2_MARK, MMC1_D0_MARK, VI0_DATA3_VI0_B3_MARK,
+ MMC1_D1_MARK, VI0_DATA4_VI0_B4_MARK, MMC1_D2_MARK,
+ VI0_DATA5_VI0_B5_MARK, MMC1_D3_MARK, VI0_DATA6_VI0_B6_MARK,
+ MMC1_D4_MARK, ARM_TRACEDATA_0_MARK, VI0_DATA7_VI0_B7_MARK,
+ MMC1_D5_MARK, ARM_TRACEDATA_1_MARK, VI0_G0_MARK, SSI_SCK78_C_MARK,
+ IRQ0_MARK, ARM_TRACEDATA_2_MARK, VI0_G1_MARK, SSI_WS78_C_MARK,
+ IRQ1_MARK, ARM_TRACEDATA_3_MARK, VI0_G2_MARK, ETH_TXD1_MARK,
+ MMC1_D6_MARK, ARM_TRACEDATA_4_MARK, TS_SPSYNC0_MARK, VI0_G3_MARK,
+ ETH_CRS_DV_MARK, MMC1_D7_MARK, ARM_TRACEDATA_5_MARK, TS_SDAT0_MARK,
+ VI0_G4_MARK, ETH_TX_EN_MARK, SD2_DAT0_B_MARK, ARM_TRACEDATA_6_MARK,
+ VI0_G5_MARK, ETH_RX_ER_MARK, SD2_DAT1_B_MARK, ARM_TRACEDATA_7_MARK,
+ VI0_G6_MARK, ETH_RXD0_MARK, SD2_DAT2_B_MARK, ARM_TRACEDATA_8_MARK,
+ VI0_G7_MARK, ETH_RXD1_MARK, SD2_DAT3_B_MARK, ARM_TRACEDATA_9_MARK,
+
+ VI0_R0_MARK, SSI_SDATA7_C_MARK, SCK1_C_MARK, DREQ1_B_MARK,
+ ARM_TRACEDATA_10_MARK, DREQ0_C_MARK, VI0_R1_MARK, SSI_SDATA8_C_MARK,
+ DACK1_B_MARK, ARM_TRACEDATA_11_MARK, DACK0_C_MARK, DRACK0_C_MARK,
+ VI0_R2_MARK, ETH_LINK_MARK, SD2_CLK_B_MARK, IRQ2_MARK,
+ ARM_TRACEDATA_12_MARK, VI0_R3_MARK, ETH_MAGIC_MARK, SD2_CMD_B_MARK,
+ IRQ3_MARK, ARM_TRACEDATA_13_MARK, VI0_R4_MARK, ETH_REFCLK_MARK,
+ SD2_CD_B_MARK, HSPI_CLK1_B_MARK, ARM_TRACEDATA_14_MARK, MT1_CLK_MARK,
+ TS_SCK0_MARK, VI0_R5_MARK, ETH_TXD0_MARK, SD2_WP_B_MARK,
+ HSPI_CS1_B_MARK, ARM_TRACEDATA_15_MARK, MT1_D_MARK, TS_SDEN0_MARK,
+ VI0_R6_MARK, ETH_MDC_MARK, DREQ2_C_MARK, HSPI_TX1_B_MARK,
+ TRACECLK_MARK, MT1_BEN_MARK, PWMFSW0_D_MARK, VI0_R7_MARK,
+ ETH_MDIO_MARK, DACK2_C_MARK, HSPI_RX1_B_MARK, SCIF_CLK_D_MARK,
+ TRACECTL_MARK, MT1_PEN_MARK, VI1_CLK_MARK, SIM_D_MARK, SDA3_MARK,
+ VI1_HSYNC_MARK, VI3_CLK_MARK, SSI_SCK4_MARK, GPS_SIGN_C_MARK,
+ PWMFSW0_E_MARK, VI1_VSYNC_MARK, AUDIO_CLKOUT_C_MARK, SSI_WS4_MARK,
+ SIM_CLK_MARK, GPS_MAG_C_MARK, SPV_TRST_MARK, SCL3_MARK,
+
+ VI1_DATA0_VI1_B0_MARK, SD2_DAT0_MARK, SIM_RST_MARK, SPV_TCK_MARK,
+ ADICLK_B_MARK, VI1_DATA1_VI1_B1_MARK, SD2_DAT1_MARK, MT0_CLK_MARK,
+ SPV_TMS_MARK, ADICS_B_SAMP_B_MARK, VI1_DATA2_VI1_B2_MARK,
+ SD2_DAT2_MARK, MT0_D_MARK, SPVTDI_MARK, ADIDATA_B_MARK,
+ VI1_DATA3_VI1_B3_MARK, SD2_DAT3_MARK, MT0_BEN_MARK, SPV_TDO_MARK,
+ ADICHS0_B_MARK, VI1_DATA4_VI1_B4_MARK, SD2_CLK_MARK, MT0_PEN_MARK,
+ SPA_TRST_MARK, HSPI_CLK1_D_MARK, ADICHS1_B_MARK,
+ VI1_DATA5_VI1_B5_MARK, SD2_CMD_MARK, MT0_SYNC_MARK, SPA_TCK_MARK,
+ HSPI_CS1_D_MARK, ADICHS2_B_MARK, VI1_DATA6_VI1_B6_MARK, SD2_CD_MARK,
+ MT0_VCXO_MARK, SPA_TMS_MARK, HSPI_TX1_D_MARK, VI1_DATA7_VI1_B7_MARK,
+ SD2_WP_MARK, MT0_PWM_MARK, SPA_TDI_MARK, HSPI_RX1_D_MARK,
+ VI1_G0_MARK, VI3_DATA0_MARK, DU1_DOTCLKOUT1_MARK, TS_SCK1_MARK,
+ DREQ2_B_MARK, TX2_MARK, SPA_TDO_MARK, HCTS0_B_MARK,
+ VI1_G1_MARK, VI3_DATA1_MARK, SSI_SCK1_MARK, TS_SDEN1_MARK,
+ DACK2_B_MARK, RX2_MARK, HRTS0_B_MARK,
+
+ VI1_G2_MARK, VI3_DATA2_MARK, SSI_WS1_MARK, TS_SPSYNC1_MARK,
+ SCK2_MARK, HSCK0_B_MARK, VI1_G3_MARK, VI3_DATA3_MARK,
+ SSI_SCK2_MARK, TS_SDAT1_MARK, SCL1_C_MARK, HTX0_B_MARK,
+ VI1_G4_MARK, VI3_DATA4_MARK, SSI_WS2_MARK, SDA1_C_MARK,
+ SIM_RST_B_MARK, HRX0_B_MARK, VI1_G5_MARK, VI3_DATA5_MARK,
+ GPS_CLK_MARK, FSE_MARK, TX4_B_MARK, SIM_D_B_MARK,
+ VI1_G6_MARK, VI3_DATA6_MARK, GPS_SIGN_MARK, FRB_MARK,
+ RX4_B_MARK, SIM_CLK_B_MARK, VI1_G7_MARK, VI3_DATA7_MARK,
+ GPS_MAG_MARK, FCE_MARK, SCK4_B_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_DATA(AVS1_MARK, FN_AVS1),
+ PINMUX_DATA(AVS1_MARK, FN_AVS1),
+ PINMUX_DATA(A17_MARK, FN_A17),
+ PINMUX_DATA(A18_MARK, FN_A18),
+ PINMUX_DATA(A19_MARK, FN_A19),
+
+ PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCK0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP0_2_0, PWM1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, TCLK0_C, SEL_TMU0_2),
+ PINMUX_IPSR_DATA(IP0_5_3, BS),
+ PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
+ PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
+ PINMUX_IPSR_DATA(IP0_5_3, FD2),
+ PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
+ PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_3, HCTS1, SEL_HSCIF1_0),
+ PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
+ PINMUX_IPSR_DATA(IP0_7_6, A0),
+ PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
+ PINMUX_IPSR_DATA(IP0_7_6, MMC0_D3),
+ PINMUX_IPSR_DATA(IP0_7_6, FD3),
+ PINMUX_IPSR_DATA(IP0_9_8, A20),
+ PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
+ PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
+ PINMUX_IPSR_DATA(IP0_11_10, A21),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, SCK5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_13_12, A22),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
+ PINMUX_IPSR_DATA(IP0_15_14, A23),
+ PINMUX_IPSR_DATA(IP0_15_14, FCLE),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
+ PINMUX_IPSR_DATA(IP0_18_16, A24),
+ PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
+ PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
+ PINMUX_IPSR_DATA(IP0_18_16, FD4),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
+ PINMUX_IPSR_DATA(IP0_22_19, A25),
+ PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
+ PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
+ PINMUX_IPSR_DATA(IP0_22_19, FD5),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
+ PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, CTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
+ PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
+ PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
+ PINMUX_IPSR_DATA(IP0_25, CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
+ PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
+ PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
+ PINMUX_IPSR_DATA(IP0_30_28, RD_WR),
+ PINMUX_IPSR_DATA(IP0_30_28, FWE),
+ PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
+ PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
+ PINMUX_IPSR_MODSEL_DATA(IP0_30_28, HRTS1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_30_28, RX4_C, SEL_SCIF4_2),
+
+ PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
+ PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
+ PINMUX_IPSR_DATA(IP1_1_0, FD6),
+ PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
+ PINMUX_IPSR_DATA(IP1_3_2, MMC0_D7),
+ PINMUX_IPSR_DATA(IP1_3_2, FD7),
+ PINMUX_IPSR_DATA(IP1_6_4, EX_CS2),
+ PINMUX_IPSR_DATA(IP1_6_4, SD1_CLK),
+ PINMUX_IPSR_DATA(IP1_6_4, MMC0_CLK),
+ PINMUX_IPSR_DATA(IP1_6_4, FALE),
+ PINMUX_IPSR_DATA(IP1_6_4, ATACS00),
+ PINMUX_IPSR_DATA(IP1_10_7, EX_CS3),
+ PINMUX_IPSR_DATA(IP1_10_7, SD1_CMD),
+ PINMUX_IPSR_DATA(IP1_10_7, MMC0_CMD),
+ PINMUX_IPSR_DATA(IP1_10_7, FRE),
+ PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
+ PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, HSCK1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
+ PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
+ PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
+ PINMUX_IPSR_DATA(IP1_14_11, FD0),
+ PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
+ PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
+ PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SCK5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_DATA(IP1_14_11, HTX1),
+ PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
+ PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
+ PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
+ PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
+ PINMUX_IPSR_DATA(IP1_18_15, FD1),
+ PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
+ PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, HRX1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, SSI_WS9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
+ PINMUX_IPSR_DATA(IP1_20_19, PWM2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_20_19, SCK4, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
+ PINMUX_IPSR_DATA(IP1_22_21, PWM3),
+ PINMUX_IPSR_DATA(IP1_22_21, TX4),
+ PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
+ PINMUX_IPSR_DATA(IP1_24_23, PWM4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_24_23, RX4, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP1_28_25, HTX0),
+ PINMUX_IPSR_DATA(IP1_28_25, TX1),
+ PINMUX_IPSR_DATA(IP1_28_25, SDATA),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_25, CTS0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE18),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
+
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, HRX0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RX1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
+ PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, HSCK0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_7_4, MTS),
+ PINMUX_IPSR_DATA(IP2_7_4, PWM5),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
+ PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, HCTS0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, CTS1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_11_8, STM),
+ PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, RX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
+ PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, TCLK1_B, SEL_TMU1_1),
+ PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_12, HRTS0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_15_12, MDATA),
+ PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
+ PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE1),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE9),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE17),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE25),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
+ PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
+ PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ0, SEL_EXBUS0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
+ PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
+ PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
+ PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
+ PINMUX_IPSR_DATA(IP2_21_19, DACK0),
+ PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_21_19, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
+ PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
+ PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
+ PINMUX_IPSR_DATA(IP2_23, LCDOUT3),
+ PINMUX_IPSR_DATA(IP2_24, DU0_DR4),
+ PINMUX_IPSR_DATA(IP2_24, LCDOUT4),
+ PINMUX_IPSR_DATA(IP2_25, DU0_DR5),
+ PINMUX_IPSR_DATA(IP2_25, LCDOUT5),
+ PINMUX_IPSR_DATA(IP2_26, DU0_DR6),
+ PINMUX_IPSR_DATA(IP2_26, LCDOUT6),
+ PINMUX_IPSR_DATA(IP2_27, DU0_DR7),
+ PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
+ PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
+ PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, DREQ1, SEL_EXBUS1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, SCL2, SEL_I2C2_0),
+ PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
+
+ PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
+ PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
+ PINMUX_IPSR_DATA(IP3_2_0, DACK1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2_0, SDA2, SEL_I2C2_0),
+ PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
+ PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
+ PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
+ PINMUX_IPSR_DATA(IP3_4, DU0_DG3),
+ PINMUX_IPSR_DATA(IP3_4, LCDOUT11),
+ PINMUX_IPSR_DATA(IP3_5, DU0_DG4),
+ PINMUX_IPSR_DATA(IP3_5, LCDOUT12),
+ PINMUX_IPSR_DATA(IP3_6, DU0_DG5),
+ PINMUX_IPSR_DATA(IP3_6, LCDOUT13),
+ PINMUX_IPSR_DATA(IP3_7, DU0_DG6),
+ PINMUX_IPSR_DATA(IP3_7, LCDOUT14),
+ PINMUX_IPSR_DATA(IP3_8, DU0_DG7),
+ PINMUX_IPSR_DATA(IP3_8, LCDOUT15),
+ PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
+ PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
+ PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCL1, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, TCLK1, SEL_TMU1_0),
+ PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
+ PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
+ PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
+ PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SDA1, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCK5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
+ PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
+ PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
+ PINMUX_IPSR_DATA(IP3_16, LCDOUT19),
+ PINMUX_IPSR_DATA(IP3_17, DU0_DB4),
+ PINMUX_IPSR_DATA(IP3_17, LCDOUT20),
+ PINMUX_IPSR_DATA(IP3_18, DU0_DB5),
+ PINMUX_IPSR_DATA(IP3_18, LCDOUT21),
+ PINMUX_IPSR_DATA(IP3_19, DU0_DB6),
+ PINMUX_IPSR_DATA(IP3_19, LCDOUT22),
+ PINMUX_IPSR_DATA(IP3_20, DU0_DB7),
+ PINMUX_IPSR_DATA(IP3_20, LCDOUT23),
+ PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
+ PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
+ PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
+ PINMUX_IPSR_MODSEL_DATA(IP3_22_21, SCL3_B, SEL_I2C3_1),
+ PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_DATA(IP3_23, QCLK),
+ PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA3_B, SEL_I2C3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA2_C, SEL_I2C2_2),
+ PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
+ PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
+ PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_DATA(IP3_27, QSTH_QHS),
+ PINMUX_IPSR_DATA(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_DATA(IP3_28, QSTB_QHE),
+ PINMUX_IPSR_DATA(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
+ PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
+ PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
+ PINMUX_IPSR_MODSEL_DATA(IP3_31_29, SCL2_C, SEL_I2C2_2),
+ PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
+
+ PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
+ PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCK2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
+ PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
+ PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
+ PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
+ PINMUX_IPSR_DATA(IP4_7_5, PWM6),
+ PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
+ PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
+ PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
+ PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
+ PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
+ PINMUX_IPSR_DATA(IP4_10_8, PWM0),
+ PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_10_8, CTS0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
+ PINMUX_IPSR_DATA(IP4_11, VI2_G0),
+ PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
+ PINMUX_IPSR_DATA(IP4_12, VI2_G1),
+ PINMUX_IPSR_DATA(IP4_13, DU1_DR4),
+ PINMUX_IPSR_DATA(IP4_13, VI2_G2),
+ PINMUX_IPSR_DATA(IP4_14, DU1_DR5),
+ PINMUX_IPSR_DATA(IP4_14, VI2_G3),
+ PINMUX_IPSR_DATA(IP4_15, DU1_DR6),
+ PINMUX_IPSR_DATA(IP4_15, VI2_G4),
+ PINMUX_IPSR_DATA(IP4_16, DU1_DR7),
+ PINMUX_IPSR_DATA(IP4_16, VI2_G5),
+ PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
+ PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCL1_B, SEL_I2C1_1),
+ PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCK3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
+ PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
+ PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
+ PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SDA1_B, SEL_I2C1_1),
+ PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCK5, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, RX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
+ PINMUX_IPSR_DATA(IP4_23, VI2_G6),
+ PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
+ PINMUX_IPSR_DATA(IP4_24, VI2_G7),
+ PINMUX_IPSR_DATA(IP4_25, DU1_DG4),
+ PINMUX_IPSR_DATA(IP4_25, VI2_R0),
+ PINMUX_IPSR_DATA(IP4_26, DU1_DG5),
+ PINMUX_IPSR_DATA(IP4_26, VI2_R1),
+ PINMUX_IPSR_DATA(IP4_27, DU1_DG6),
+ PINMUX_IPSR_DATA(IP4_27, VI2_R2),
+ PINMUX_IPSR_DATA(IP4_28, DU1_DG7),
+ PINMUX_IPSR_DATA(IP4_28, VI2_R3),
+ PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
+ PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
+ PINMUX_IPSR_DATA(IP4_31_29, TX5),
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCK0_D, SEL_SCIF0_3),
+
+ PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
+ PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SDA2_B, SEL_I2C2_1),
+ PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX5, SEL_SCIF5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
+ PINMUX_IPSR_DATA(IP5_3, VI2_R4),
+ PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
+ PINMUX_IPSR_DATA(IP5_4, VI2_R5),
+ PINMUX_IPSR_DATA(IP5_5, DU1_DB4),
+ PINMUX_IPSR_DATA(IP5_5, VI2_R6),
+ PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
+ PINMUX_IPSR_DATA(IP5_6, VI2_R7),
+ PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_7, SCL2_D, SEL_I2C2_3),
+ PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8, SDA2_D, SEL_I2C2_3),
+ PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
+ PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_10_9, SCL1_D, SEL_I2C1_3),
+ PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
+ PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP5_12_11, SDA1_D, SEL_I2C1_3),
+ PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
+ PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, VI2_VSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, VI3_VSYNC),
+ PINMUX_IPSR_DATA(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_DATA(IP5_20_17, VI2_CLK),
+ PINMUX_IPSR_DATA(IP5_20_17, TX3_B_IRDA_TX_B),
+ PINMUX_IPSR_DATA(IP5_20_17, SD3_CD),
+ PINMUX_IPSR_DATA(IP5_20_17, HSPI_TX1),
+ PINMUX_IPSR_DATA(IP5_20_17, VI1_CLKENB),
+ PINMUX_IPSR_DATA(IP5_20_17, VI3_CLKENB),
+ PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
+ PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
+ PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
+ PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, TCLK0, SEL_TMU0_0),
+ PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCK2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
+ PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
+ PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
+ PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
+ PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
+ PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
+ PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
+ PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
+ PINMUX_IPSR_DATA(IP5_30_29, USB_OVC2),
+ PINMUX_IPSR_DATA(IP5_30_29, CAN_DEBUGOUT0),
+ PINMUX_IPSR_DATA(IP5_30_29, MOUT0),
+
+ PINMUX_IPSR_DATA(IP6_1_0, SSI_SCK0129),
+ PINMUX_IPSR_DATA(IP6_1_0, CAN_DEBUGOUT1),
+ PINMUX_IPSR_DATA(IP6_1_0, MOUT1),
+ PINMUX_IPSR_DATA(IP6_3_2, SSI_WS0129),
+ PINMUX_IPSR_DATA(IP6_3_2, CAN_DEBUGOUT2),
+ PINMUX_IPSR_DATA(IP6_3_2, MOUT2),
+ PINMUX_IPSR_DATA(IP6_5_4, SSI_SDATA0),
+ PINMUX_IPSR_DATA(IP6_5_4, CAN_DEBUGOUT3),
+ PINMUX_IPSR_DATA(IP6_5_4, MOUT5),
+ PINMUX_IPSR_DATA(IP6_7_6, SSI_SDATA1),
+ PINMUX_IPSR_DATA(IP6_7_6, CAN_DEBUGOUT4),
+ PINMUX_IPSR_DATA(IP6_7_6, MOUT6),
+ PINMUX_IPSR_DATA(IP6_8, SSI_SDATA2),
+ PINMUX_IPSR_DATA(IP6_8, CAN_DEBUGOUT5),
+ PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
+ PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
+ PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_9, IERX, SEL_IE_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
+ PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
+ PINMUX_IPSR_MODSEL_DATA(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
+ PINMUX_IPSR_DATA(IP6_14_12, IETX),
+ PINMUX_IPSR_MODSEL_DATA(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
+ PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
+ PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, IECLK, SEL_IE_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, TCLK0_B, SEL_TMU0_1),
+ PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
+ PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
+ PINMUX_IPSR_MODSEL_DATA(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
+ PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
+ PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
+ PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK3, SEL_SCIF3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TCLK0_D, SEL_TMU0_3),
+ PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
+ PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
+ PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
+ PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_25, ADIDATA, SEL_ADI_0),
+ PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
+ PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
+ PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
+ PINMUX_IPSR_MODSEL_DATA(IP6_30_29, IERX_B, SEL_IE_1),
+
+ PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
+ PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_1_0, CAN0_RX, SEL_CAN0_0),
+ PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
+ PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
+ PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_3_2, IECLK_B, SEL_IE_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, IRQ0_B, SEL_INT0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS78, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, IRQ1_B, SEL_INT1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, IRQ2_B, SEL_INT2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TCLK1_C, SEL_TMU1_2),
+ PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
+ PINMUX_IPSR_DATA(IP7_14_13, VSP),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, IRQ3_B, SEL_INT3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
+ PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
+ PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SCK1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
+ PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
+ PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
+ PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
+ PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
+ PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_19, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
+ PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
+ PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_22_21, SCK2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
+ PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
+ PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
+ PINMUX_IPSR_DATA(IP7_24_23, TX2_B),
+ PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
+ PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
+ PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_25, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
+ PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, DREQ2, SEL_EXBUS2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
+ PINMUX_IPSR_DATA(IP7_30_29, DACK2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_30_29, CTS1_B, SEL_SCIF1_1),
+
+ PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_3_0, CTS0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
+ PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE12),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE20),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
+ PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
+ PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE13),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE21),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE29),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE37),
+ PINMUX_IPSR_DATA(IP8_11_8, HSPI_TX0),
+ PINMUX_IPSR_DATA(IP8_11_8, TX0),
+ PINMUX_IPSR_DATA(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
+ PINMUX_IPSR_DATA(IP8_11_8, AD_DO),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE6),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE14),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE22),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
+ PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_12, RX0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
+ PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE15),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE23),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE31),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE39),
+ PINMUX_IPSR_DATA(IP8_17_16, FMCLK),
+ PINMUX_IPSR_DATA(IP8_17_16, RDS_CLK),
+ PINMUX_IPSR_DATA(IP8_17_16, PCMOE),
+ PINMUX_IPSR_DATA(IP8_18, BPFCLK),
+ PINMUX_IPSR_DATA(IP8_18, PCMWE),
+ PINMUX_IPSR_DATA(IP8_19, FMIN),
+ PINMUX_IPSR_DATA(IP8_19, RDS_DATA),
+ PINMUX_IPSR_DATA(IP8_20, VI0_CLK),
+ PINMUX_IPSR_DATA(IP8_20, MMC1_CLK),
+ PINMUX_IPSR_DATA(IP8_22_21, VI0_CLKENB),
+ PINMUX_IPSR_DATA(IP8_22_21, TX1_C),
+ PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
+ PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
+ PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP8_24_23, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, CTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
+ PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
+ PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
+ PINMUX_IPSR_DATA(IP9_5, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_DATA(IP9_5, MMC1_D1),
+ PINMUX_IPSR_DATA(IP9_6, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_DATA(IP9_6, MMC1_D2),
+ PINMUX_IPSR_DATA(IP9_7, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_DATA(IP9_7, MMC1_D3),
+ PINMUX_IPSR_DATA(IP9_9_8, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_DATA(IP9_9_8, MMC1_D4),
+ PINMUX_IPSR_DATA(IP9_9_8, ARM_TRACEDATA_0),
+ PINMUX_IPSR_DATA(IP9_11_10, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
+ PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
+ PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, IRQ0, SEL_INT0_0),
+ PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
+ PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, IRQ1, SEL_INT1_0),
+ PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
+ PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
+ PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
+ PINMUX_IPSR_DATA(IP9_18_16, MMC1_D6),
+ PINMUX_IPSR_DATA(IP9_18_16, ARM_TRACEDATA_4),
+ PINMUX_IPSR_DATA(IP9_18_16, TS_SPSYNC0),
+ PINMUX_IPSR_DATA(IP9_21_19, VI0_G3),
+ PINMUX_IPSR_DATA(IP9_21_19, ETH_CRS_DV),
+ PINMUX_IPSR_DATA(IP9_21_19, MMC1_D7),
+ PINMUX_IPSR_DATA(IP9_21_19, ARM_TRACEDATA_5),
+ PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
+ PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
+ PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
+ PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
+ PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
+ PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
+ PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
+ PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
+ PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
+
+ PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCK1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
+ PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
+ PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
+ PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
+ PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
+ PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
+ PINMUX_IPSR_DATA(IP10_5_3, DRACK0_C),
+ PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
+ PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
+ PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IRQ2, SEL_INT2_0),
+ PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
+ PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
+ PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IRQ3, SEL_INT3_0),
+ PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
+ PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
+ PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SD2_CD_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
+ PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
+ PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
+ PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
+ PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SD2_WP_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
+ PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
+ PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
+ PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
+ PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
+ PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
+ PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
+ PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
+ PINMUX_IPSR_MODSEL_DATA(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
+ PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
+ PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
+ PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
+ PINMUX_IPSR_MODSEL_DATA(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
+ PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
+ PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
+ PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SIM_D, SEL_SIM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SDA3, SEL_I2C3_0),
+ PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
+ PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
+ PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
+ PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
+ PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
+ PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
+ PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL3, SEL_I2C3_0),
+
+ PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SD2_DAT0, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
+ PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
+ PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
+ PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SD2_DAT1, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
+ PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
+ PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SD2_DAT2, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
+ PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, ADIDATA_B, SEL_ADI_1),
+ PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SD2_DAT3, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
+ PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
+ PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
+ PINMUX_IPSR_DATA(IP11_14_12, VI1_DATA4_VI1_B4),
+ PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
+ PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
+ PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
+ PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SD2_CMD, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
+ PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
+ PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
+ PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SD2_CD, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
+ PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
+ PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
+ PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SD2_WP, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
+ PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
+ PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
+ PINMUX_IPSR_DATA(IP11_26_24, DU1_DOTCLKOUT1),
+ PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
+ PINMUX_IPSR_DATA(IP11_26_24, TX2),
+ PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
+ PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
+ PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
+ PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
+ PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
+ PINMUX_IPSR_MODSEL_DATA(IP11_29_27, RX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
+
+ PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
+ PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
+ PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
+ PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCK2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
+ PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
+ PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
+ PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCL1_C, SEL_I2C1_2),
+ PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
+ PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
+ PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
+ PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SDA1_C, SEL_I2C1_2),
+ PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
+ PINMUX_IPSR_MODSEL_DATA(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
+ PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP12_11_9, GPS_CLK, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_11_9, FSE),
+ PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
+ PINMUX_IPSR_MODSEL_DATA(IP12_11_9, SIM_D_B, SEL_SIM_1),
+ PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
+ PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
+ PINMUX_IPSR_MODSEL_DATA(IP12_14_12, GPS_SIGN, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_14_12, FRB),
+ PINMUX_IPSR_MODSEL_DATA(IP12_14_12, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
+ PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
+ PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_15, GPS_MAG, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_17_15, FCE),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ PINMUX_GPIO_GP_ALL(),
+ GPIO_FN(AVS1), GPIO_FN(AVS2), GPIO_FN(A17), GPIO_FN(A18),
+ GPIO_FN(A19),
+
+ /* IPSR0 */
+ GPIO_FN(USB_PENC2), GPIO_FN(SCK0), GPIO_FN(PWM1), GPIO_FN(PWMFSW0),
+ GPIO_FN(SCIF_CLK), GPIO_FN(TCLK0_C), GPIO_FN(BS), GPIO_FN(SD1_DAT2),
+ GPIO_FN(MMC0_D2), GPIO_FN(FD2), GPIO_FN(ATADIR0), GPIO_FN(SDSELF),
+ GPIO_FN(HCTS1), GPIO_FN(TX4_C), GPIO_FN(A0), GPIO_FN(SD1_DAT3),
+ GPIO_FN(MMC0_D3), GPIO_FN(FD3), GPIO_FN(A20), GPIO_FN(TX5_D),
+ GPIO_FN(HSPI_TX2_B), GPIO_FN(A21), GPIO_FN(SCK5_D),
+ GPIO_FN(HSPI_CLK2_B), GPIO_FN(A22), GPIO_FN(RX5_D),
+ GPIO_FN(HSPI_RX2_B), GPIO_FN(VI1_R0), GPIO_FN(A23), GPIO_FN(FCLE),
+ GPIO_FN(HSPI_CLK2), GPIO_FN(VI1_R1), GPIO_FN(A24), GPIO_FN(SD1_CD),
+ GPIO_FN(MMC0_D4), GPIO_FN(FD4), GPIO_FN(HSPI_CS2), GPIO_FN(VI1_R2),
+ GPIO_FN(SSI_WS78_B), GPIO_FN(A25), GPIO_FN(SD1_WP), GPIO_FN(MMC0_D5),
+ GPIO_FN(FD5), GPIO_FN(HSPI_RX2), GPIO_FN(VI1_R3), GPIO_FN(TX5_B),
+ GPIO_FN(SSI_SDATA7_B), GPIO_FN(CTS0_B), GPIO_FN(CLKOUT),
+ GPIO_FN(TX3C_IRDA_TX_C), GPIO_FN(PWM0_B), GPIO_FN(CS0),
+ GPIO_FN(HSPI_CS2_B), GPIO_FN(CS1_A26), GPIO_FN(HSPI_TX2),
+ GPIO_FN(SDSELF_B), GPIO_FN(RD_WR), GPIO_FN(FWE), GPIO_FN(ATAG0),
+ GPIO_FN(VI1_R7), GPIO_FN(HRTS1), GPIO_FN(RX4_C),
+
+ /* IPSR1 */
+ GPIO_FN(EX_CS0), GPIO_FN(RX3_C_IRDA_RX_C), GPIO_FN(MMC0_D6),
+ GPIO_FN(FD6), GPIO_FN(EX_CS1), GPIO_FN(MMC0_D7), GPIO_FN(FD7),
+ GPIO_FN(EX_CS2), GPIO_FN(SD1_CLK), GPIO_FN(MMC0_CLK), GPIO_FN(FALE),
+ GPIO_FN(ATACS00), GPIO_FN(EX_CS3), GPIO_FN(SD1_CMD), GPIO_FN(MMC0_CMD),
+ GPIO_FN(FRE), GPIO_FN(ATACS10), GPIO_FN(VI1_R4), GPIO_FN(RX5_B),
+ GPIO_FN(HSCK1), GPIO_FN(SSI_SDATA8_B), GPIO_FN(RTS0_B_TANS_B),
+ GPIO_FN(SSI_SDATA9), GPIO_FN(EX_CS4), GPIO_FN(SD1_DAT0),
+ GPIO_FN(MMC0_D0), GPIO_FN(FD0), GPIO_FN(ATARD0), GPIO_FN(VI1_R5),
+ GPIO_FN(SCK5_B), GPIO_FN(HTX1), GPIO_FN(TX2_E), GPIO_FN(TX0_B),
+ GPIO_FN(SSI_SCK9), GPIO_FN(EX_CS5), GPIO_FN(SD1_DAT1),
+ GPIO_FN(MMC0_D1), GPIO_FN(FD1), GPIO_FN(ATAWR0), GPIO_FN(VI1_R6),
+ GPIO_FN(HRX1), GPIO_FN(RX2_E), GPIO_FN(RX0_B), GPIO_FN(SSI_WS9),
+ GPIO_FN(MLB_CLK), GPIO_FN(PWM2), GPIO_FN(SCK4), GPIO_FN(MLB_SIG),
+ GPIO_FN(PWM3), GPIO_FN(TX4), GPIO_FN(MLB_DAT), GPIO_FN(PWM4),
+ GPIO_FN(RX4), GPIO_FN(HTX0), GPIO_FN(TX1), GPIO_FN(SDATA),
+ GPIO_FN(CTS0_C), GPIO_FN(SUB_TCK), GPIO_FN(CC5_STATE2),
+ GPIO_FN(CC5_STATE10), GPIO_FN(CC5_STATE18), GPIO_FN(CC5_STATE26),
+ GPIO_FN(CC5_STATE34),
+
+ /* IPSR2 */
+ GPIO_FN(HRX0), GPIO_FN(RX1), GPIO_FN(SCKZ), GPIO_FN(RTS0_C_TANS_C),
+ GPIO_FN(SUB_TDI), GPIO_FN(CC5_STATE3), GPIO_FN(CC5_STATE11),
+ GPIO_FN(CC5_STATE19), GPIO_FN(CC5_STATE27), GPIO_FN(CC5_STATE35),
+ GPIO_FN(HSCK0), GPIO_FN(SCK1), GPIO_FN(MTS), GPIO_FN(PWM5),
+ GPIO_FN(SCK0_C), GPIO_FN(SSI_SDATA9_B), GPIO_FN(SUB_TDO),
+ GPIO_FN(CC5_STATE0), GPIO_FN(CC5_STATE8), GPIO_FN(CC5_STATE16),
+ GPIO_FN(CC5_STATE24), GPIO_FN(CC5_STATE32), GPIO_FN(HCTS0),
+ GPIO_FN(CTS1), GPIO_FN(STM), GPIO_FN(PWM0_D), GPIO_FN(RX0_C),
+ GPIO_FN(SCIF_CLK_C), GPIO_FN(SUB_TRST), GPIO_FN(TCLK1_B),
+ GPIO_FN(CC5_OSCOUT), GPIO_FN(HRTS0), GPIO_FN(RTS1_TANS),
+ GPIO_FN(MDATA), GPIO_FN(TX0_C), GPIO_FN(SUB_TMS), GPIO_FN(CC5_STATE1),
+ GPIO_FN(CC5_STATE9), GPIO_FN(CC5_STATE17), GPIO_FN(CC5_STATE25),
+ GPIO_FN(CC5_STATE33), GPIO_FN(DU0_DR0), GPIO_FN(LCDOUT0),
+ GPIO_FN(DREQ0), GPIO_FN(GPS_CLK_B), GPIO_FN(AUDATA0),
+ GPIO_FN(TX5_C), GPIO_FN(DU0_DR1), GPIO_FN(LCDOUT1), GPIO_FN(DACK0),
+ GPIO_FN(DRACK0), GPIO_FN(GPS_SIGN_B), GPIO_FN(AUDATA1), GPIO_FN(RX5_C),
+ GPIO_FN(DU0_DR2), GPIO_FN(LCDOUT2), GPIO_FN(DU0_DR3), GPIO_FN(LCDOUT3),
+ GPIO_FN(DU0_DR4), GPIO_FN(LCDOUT4), GPIO_FN(DU0_DR5), GPIO_FN(LCDOUT5),
+ GPIO_FN(DU0_DR6), GPIO_FN(LCDOUT6), GPIO_FN(DU0_DR7), GPIO_FN(LCDOUT7),
+ GPIO_FN(DU0_DG0), GPIO_FN(LCDOUT8), GPIO_FN(DREQ1), GPIO_FN(SCL2),
+ GPIO_FN(AUDATA2),
+
+ /* IPSR3 */
+ GPIO_FN(DU0_DG1), GPIO_FN(LCDOUT9), GPIO_FN(DACK1), GPIO_FN(SDA2),
+ GPIO_FN(AUDATA3), GPIO_FN(DU0_DG2), GPIO_FN(LCDOUT10),
+ GPIO_FN(DU0_DG3), GPIO_FN(LCDOUT11), GPIO_FN(DU0_DG4),
+ GPIO_FN(LCDOUT12), GPIO_FN(DU0_DG5), GPIO_FN(LCDOUT13),
+ GPIO_FN(DU0_DG6), GPIO_FN(LCDOUT14), GPIO_FN(DU0_DG7),
+ GPIO_FN(LCDOUT15), GPIO_FN(DU0_DB0), GPIO_FN(LCDOUT16),
+ GPIO_FN(EX_WAIT1), GPIO_FN(SCL1), GPIO_FN(TCLK1), GPIO_FN(AUDATA4),
+ GPIO_FN(DU0_DB1), GPIO_FN(LCDOUT17), GPIO_FN(EX_WAIT2), GPIO_FN(SDA1),
+ GPIO_FN(GPS_MAG_B), GPIO_FN(AUDATA5), GPIO_FN(SCK5_C),
+ GPIO_FN(DU0_DB2), GPIO_FN(LCDOUT18), GPIO_FN(DU0_DB3),
+ GPIO_FN(LCDOUT19), GPIO_FN(DU0_DB4), GPIO_FN(LCDOUT20),
+ GPIO_FN(DU0_DB5), GPIO_FN(LCDOUT21), GPIO_FN(DU0_DB6),
+ GPIO_FN(LCDOUT22), GPIO_FN(DU0_DB7), GPIO_FN(LCDOUT23),
+ GPIO_FN(DU0_DOTCLKIN), GPIO_FN(QSTVA_QVS), GPIO_FN(TX3_D_IRDA_TX_D),
+ GPIO_FN(SCL3_B), GPIO_FN(DU0_DOTCLKOUT0), GPIO_FN(QCLK),
+ GPIO_FN(DU0_DOTCLKOUT1), GPIO_FN(QSTVB_QVE), GPIO_FN(RX3_D_IRDA_RX_D),
+ GPIO_FN(SDA3_B), GPIO_FN(SDA2_C), GPIO_FN(DACK0_B), GPIO_FN(DRACK0_B),
+ GPIO_FN(DU0_EXHSYNC_DU0_HSYNC), GPIO_FN(QSTH_QHS),
+ GPIO_FN(DU0_EXVSYNC_DU0_VSYNC), GPIO_FN(QSTB_QHE),
+ GPIO_FN(DU0_EXODDF_DU0_ODDF_DISP_CDE), GPIO_FN(QCPV_QDE),
+ GPIO_FN(CAN1_TX), GPIO_FN(TX2_C), GPIO_FN(SCL2_C), GPIO_FN(REMOCON),
+
+ /* IPSR4 */
+ GPIO_FN(DU0_DISP), GPIO_FN(QPOLA), GPIO_FN(CAN_CLK_C), GPIO_FN(SCK2_C),
+ GPIO_FN(DU0_CDE), GPIO_FN(QPOLB), GPIO_FN(CAN1_RX), GPIO_FN(RX2_C),
+ GPIO_FN(DREQ0_B), GPIO_FN(SSI_SCK78_B), GPIO_FN(SCK0_B),
+ GPIO_FN(DU1_DR0), GPIO_FN(VI2_DATA0_VI2_B0), GPIO_FN(PWM6),
+ GPIO_FN(SD3_CLK), GPIO_FN(TX3_E_IRDA_TX_E), GPIO_FN(AUDCK),
+ GPIO_FN(PWMFSW0_B), GPIO_FN(DU1_DR1), GPIO_FN(VI2_DATA1_VI2_B1),
+ GPIO_FN(PWM0), GPIO_FN(SD3_CMD), GPIO_FN(RX3_E_IRDA_RX_E),
+ GPIO_FN(AUDSYNC), GPIO_FN(CTS0_D), GPIO_FN(DU1_DR2), GPIO_FN(VI2_G0),
+ GPIO_FN(DU1_DR3), GPIO_FN(VI2_G1), GPIO_FN(DU1_DR4), GPIO_FN(VI2_G2),
+ GPIO_FN(DU1_DR5), GPIO_FN(VI2_G3), GPIO_FN(DU1_DR6), GPIO_FN(VI2_G4),
+ GPIO_FN(DU1_DR7), GPIO_FN(VI2_G5), GPIO_FN(DU1_DG0),
+ GPIO_FN(VI2_DATA2_VI2_B2), GPIO_FN(SCL1_B), GPIO_FN(SD3_DAT2),
+ GPIO_FN(SCK3_E), GPIO_FN(AUDATA6), GPIO_FN(TX0_D), GPIO_FN(DU1_DG1),
+ GPIO_FN(VI2_DATA3_VI2_B3), GPIO_FN(SDA1_B), GPIO_FN(SD3_DAT3),
+ GPIO_FN(SCK5), GPIO_FN(AUDATA7), GPIO_FN(RX0_D), GPIO_FN(DU1_DG2),
+ GPIO_FN(VI2_G6), GPIO_FN(DU1_DG3), GPIO_FN(VI2_G7), GPIO_FN(DU1_DG4),
+ GPIO_FN(VI2_R0), GPIO_FN(DU1_DG5), GPIO_FN(VI2_R1), GPIO_FN(DU1_DG6),
+ GPIO_FN(VI2_R2), GPIO_FN(DU1_DG7), GPIO_FN(VI2_R3), GPIO_FN(DU1_DB0),
+ GPIO_FN(VI2_DATA4_VI2_B4), GPIO_FN(SCL2_B), GPIO_FN(SD3_DAT0),
+ GPIO_FN(TX5), GPIO_FN(SCK0_D),
+
+ /* IPSR5 */
+ GPIO_FN(DU1_DB1), GPIO_FN(VI2_DATA5_VI2_B5), GPIO_FN(SDA2_B),
+ GPIO_FN(SD3_DAT1), GPIO_FN(RX5), GPIO_FN(RTS0_D_TANS_D),
+ GPIO_FN(DU1_DB2), GPIO_FN(VI2_R4), GPIO_FN(DU1_DB3), GPIO_FN(VI2_R5),
+ GPIO_FN(DU1_DB4), GPIO_FN(VI2_R6), GPIO_FN(DU1_DB5), GPIO_FN(VI2_R7),
+ GPIO_FN(DU1_DB6), GPIO_FN(SCL2_D), GPIO_FN(DU1_DB7), GPIO_FN(SDA2_D),
+ GPIO_FN(DU1_DOTCLKIN), GPIO_FN(VI2_CLKENB), GPIO_FN(HSPI_CS1),
+ GPIO_FN(SCL1_D), GPIO_FN(DU1_DOTCLKOUT), GPIO_FN(VI2_FIELD),
+ GPIO_FN(SDA1_D), GPIO_FN(DU1_EXHSYNC_DU1_HSYNC), GPIO_FN(VI2_HSYNC),
+ GPIO_FN(VI3_HSYNC), GPIO_FN(DU1_EXVSYNC_DU1_VSYNC), GPIO_FN(VI2_VSYNC),
+ GPIO_FN(VI3_VSYNC), GPIO_FN(DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ GPIO_FN(VI2_CLK), GPIO_FN(TX3_B_IRDA_TX_B), GPIO_FN(SD3_CD),
+ GPIO_FN(HSPI_TX1), GPIO_FN(VI1_CLKENB), GPIO_FN(VI3_CLKENB),
+ GPIO_FN(AUDIO_CLKC), GPIO_FN(TX2_D), GPIO_FN(SPEEDIN),
+ GPIO_FN(GPS_SIGN_D), GPIO_FN(DU1_DISP), GPIO_FN(VI2_DATA6_VI2_B6),
+ GPIO_FN(TCLK0), GPIO_FN(QSTVA_B_QVS_B), GPIO_FN(HSPI_CLK1),
+ GPIO_FN(SCK2_D), GPIO_FN(AUDIO_CLKOUT_B), GPIO_FN(GPS_MAG_D),
+ GPIO_FN(DU1_CDE), GPIO_FN(VI2_DATA7_VI2_B7), GPIO_FN(RX3_B_IRDA_RX_B),
+ GPIO_FN(SD3_WP), GPIO_FN(HSPI_RX1), GPIO_FN(VI1_FIELD),
+ GPIO_FN(VI3_FIELD), GPIO_FN(AUDIO_CLKOUT), GPIO_FN(RX2_D),
+ GPIO_FN(GPS_CLK_C), GPIO_FN(GPS_CLK_D), GPIO_FN(AUDIO_CLKA),
+ GPIO_FN(CAN_TXCLK), GPIO_FN(AUDIO_CLKB), GPIO_FN(USB_OVC2),
+ GPIO_FN(CAN_DEBUGOUT0), GPIO_FN(MOUT0),
+
+ /* IPSR6 */
+ GPIO_FN(SSI_SCK0129), GPIO_FN(CAN_DEBUGOUT1), GPIO_FN(MOUT1),
+ GPIO_FN(SSI_WS0129), GPIO_FN(CAN_DEBUGOUT2), GPIO_FN(MOUT2),
+ GPIO_FN(SSI_SDATA0), GPIO_FN(CAN_DEBUGOUT3), GPIO_FN(MOUT5),
+ GPIO_FN(SSI_SDATA1), GPIO_FN(CAN_DEBUGOUT4), GPIO_FN(MOUT6),
+ GPIO_FN(SSI_SDATA2), GPIO_FN(CAN_DEBUGOUT5), GPIO_FN(SSI_SCK34),
+ GPIO_FN(CAN_DEBUGOUT6), GPIO_FN(CAN0_TX_B), GPIO_FN(IERX),
+ GPIO_FN(SSI_SCK9_C), GPIO_FN(SSI_WS34), GPIO_FN(CAN_DEBUGOUT7),
+ GPIO_FN(CAN0_RX_B), GPIO_FN(IETX), GPIO_FN(SSI_WS9_C),
+ GPIO_FN(SSI_SDATA3), GPIO_FN(PWM0_C), GPIO_FN(CAN_DEBUGOUT8),
+ GPIO_FN(CAN_CLK_B), GPIO_FN(IECLK), GPIO_FN(SCIF_CLK_B),
+ GPIO_FN(TCLK0_B), GPIO_FN(SSI_SDATA4), GPIO_FN(CAN_DEBUGOUT9),
+ GPIO_FN(SSI_SDATA9_C), GPIO_FN(SSI_SCK5), GPIO_FN(ADICLK),
+ GPIO_FN(CAN_DEBUGOUT10), GPIO_FN(SCK3), GPIO_FN(TCLK0_D),
+ GPIO_FN(SSI_WS5), GPIO_FN(ADICS_SAMP), GPIO_FN(CAN_DEBUGOUT11),
+ GPIO_FN(TX3_IRDA_TX), GPIO_FN(SSI_SDATA5), GPIO_FN(ADIDATA),
+ GPIO_FN(CAN_DEBUGOUT12), GPIO_FN(RX3_IRDA_RX), GPIO_FN(SSI_SCK6),
+ GPIO_FN(ADICHS0), GPIO_FN(CAN0_TX), GPIO_FN(IERX_B),
+
+ /* IPSR7 */
+ GPIO_FN(SSI_WS6), GPIO_FN(ADICHS1), GPIO_FN(CAN0_RX), GPIO_FN(IETX_B),
+ GPIO_FN(SSI_SDATA6), GPIO_FN(ADICHS2), GPIO_FN(CAN_CLK),
+ GPIO_FN(IECLK_B), GPIO_FN(SSI_SCK78), GPIO_FN(CAN_DEBUGOUT13),
+ GPIO_FN(IRQ0_B), GPIO_FN(SSI_SCK9_B), GPIO_FN(HSPI_CLK1_C),
+ GPIO_FN(SSI_WS78), GPIO_FN(CAN_DEBUGOUT14), GPIO_FN(IRQ1_B),
+ GPIO_FN(SSI_WS9_B), GPIO_FN(HSPI_CS1_C), GPIO_FN(SSI_SDATA7),
+ GPIO_FN(CAN_DEBUGOUT15), GPIO_FN(IRQ2_B), GPIO_FN(TCLK1_C),
+ GPIO_FN(HSPI_TX1_C), GPIO_FN(SSI_SDATA8), GPIO_FN(VSP),
+ GPIO_FN(IRQ3_B), GPIO_FN(HSPI_RX1_C), GPIO_FN(SD0_CLK),
+ GPIO_FN(ATACS01), GPIO_FN(SCK1_B), GPIO_FN(SD0_CMD), GPIO_FN(ATACS11),
+ GPIO_FN(TX1_B), GPIO_FN(CC5_TDO), GPIO_FN(SD0_DAT0), GPIO_FN(ATADIR1),
+ GPIO_FN(RX1_B), GPIO_FN(CC5_TRST), GPIO_FN(SD0_DAT1), GPIO_FN(ATAG1),
+ GPIO_FN(SCK2_B), GPIO_FN(CC5_TMS), GPIO_FN(SD0_DAT2), GPIO_FN(ATARD1),
+ GPIO_FN(TX2_B), GPIO_FN(CC5_TCK), GPIO_FN(SD0_DAT3), GPIO_FN(ATAWR1),
+ GPIO_FN(RX2_B), GPIO_FN(CC5_TDI), GPIO_FN(SD0_CD), GPIO_FN(DREQ2),
+ GPIO_FN(RTS1_B_TANS_B), GPIO_FN(SD0_WP), GPIO_FN(DACK2),
+ GPIO_FN(CTS1_B),
+
+ /* IPSR8 */
+ GPIO_FN(HSPI_CLK0), GPIO_FN(CTS0), GPIO_FN(USB_OVC0), GPIO_FN(AD_CLK),
+ GPIO_FN(CC5_STATE4), GPIO_FN(CC5_STATE12), GPIO_FN(CC5_STATE20),
+ GPIO_FN(CC5_STATE28), GPIO_FN(CC5_STATE36), GPIO_FN(HSPI_CS0),
+ GPIO_FN(RTS0_TANS), GPIO_FN(USB_OVC1), GPIO_FN(AD_DI),
+ GPIO_FN(CC5_STATE5), GPIO_FN(CC5_STATE13), GPIO_FN(CC5_STATE21),
+ GPIO_FN(CC5_STATE29), GPIO_FN(CC5_STATE37), GPIO_FN(HSPI_TX0),
+ GPIO_FN(TX0), GPIO_FN(CAN_DEBUG_HW_TRIGGER), GPIO_FN(AD_DO),
+ GPIO_FN(CC5_STATE6), GPIO_FN(CC5_STATE14), GPIO_FN(CC5_STATE22),
+ GPIO_FN(CC5_STATE30), GPIO_FN(CC5_STATE38), GPIO_FN(HSPI_RX0),
+ GPIO_FN(RX0), GPIO_FN(CAN_STEP0), GPIO_FN(AD_NCS), GPIO_FN(CC5_STATE7),
+ GPIO_FN(CC5_STATE15), GPIO_FN(CC5_STATE23), GPIO_FN(CC5_STATE31),
+ GPIO_FN(CC5_STATE39), GPIO_FN(FMCLK), GPIO_FN(RDS_CLK), GPIO_FN(PCMOE),
+ GPIO_FN(BPFCLK), GPIO_FN(PCMWE), GPIO_FN(FMIN), GPIO_FN(RDS_DATA),
+ GPIO_FN(VI0_CLK), GPIO_FN(MMC1_CLK), GPIO_FN(VI0_CLKENB),
+ GPIO_FN(TX1_C), GPIO_FN(HTX1_B), GPIO_FN(MT1_SYNC),
+ GPIO_FN(VI0_FIELD), GPIO_FN(RX1_C), GPIO_FN(HRX1_B),
+ GPIO_FN(VI0_HSYNC), GPIO_FN(VI0_DATA0_B_VI0_B0_B), GPIO_FN(CTS1_C),
+ GPIO_FN(TX4_D), GPIO_FN(MMC1_CMD), GPIO_FN(HSCK1_B),
+ GPIO_FN(VI0_VSYNC), GPIO_FN(VI0_DATA1_B_VI0_B1_B),
+ GPIO_FN(RTS1_C_TANS_C), GPIO_FN(RX4_D), GPIO_FN(PWMFSW0_C),
+
+ /* IPSR9 */
+ GPIO_FN(VI0_DATA0_VI0_B0), GPIO_FN(HRTS1_B), GPIO_FN(MT1_VCXO),
+ GPIO_FN(VI0_DATA1_VI0_B1), GPIO_FN(HCTS1_B), GPIO_FN(MT1_PWM),
+ GPIO_FN(VI0_DATA2_VI0_B2), GPIO_FN(MMC1_D0), GPIO_FN(VI0_DATA3_VI0_B3),
+ GPIO_FN(MMC1_D1), GPIO_FN(VI0_DATA4_VI0_B4), GPIO_FN(MMC1_D2),
+ GPIO_FN(VI0_DATA5_VI0_B5), GPIO_FN(MMC1_D3), GPIO_FN(VI0_DATA6_VI0_B6),
+ GPIO_FN(MMC1_D4), GPIO_FN(ARM_TRACEDATA_0), GPIO_FN(VI0_DATA7_VI0_B7),
+ GPIO_FN(MMC1_D5), GPIO_FN(ARM_TRACEDATA_1), GPIO_FN(VI0_G0),
+ GPIO_FN(SSI_SCK78_C), GPIO_FN(IRQ0), GPIO_FN(ARM_TRACEDATA_2),
+ GPIO_FN(VI0_G1), GPIO_FN(SSI_WS78_C), GPIO_FN(IRQ1),
+ GPIO_FN(ARM_TRACEDATA_3), GPIO_FN(VI0_G2), GPIO_FN(ETH_TXD1),
+ GPIO_FN(MMC1_D6), GPIO_FN(ARM_TRACEDATA_4), GPIO_FN(TS_SPSYNC0),
+ GPIO_FN(VI0_G3), GPIO_FN(ETH_CRS_DV), GPIO_FN(MMC1_D7),
+ GPIO_FN(ARM_TRACEDATA_5), GPIO_FN(TS_SDAT0), GPIO_FN(VI0_G4),
+ GPIO_FN(ETH_TX_EN), GPIO_FN(SD2_DAT0_B), GPIO_FN(ARM_TRACEDATA_6),
+ GPIO_FN(VI0_G5), GPIO_FN(ETH_RX_ER), GPIO_FN(SD2_DAT1_B),
+ GPIO_FN(ARM_TRACEDATA_7), GPIO_FN(VI0_G6), GPIO_FN(ETH_RXD0),
+ GPIO_FN(SD2_DAT2_B), GPIO_FN(ARM_TRACEDATA_8), GPIO_FN(VI0_G7),
+ GPIO_FN(ETH_RXD1), GPIO_FN(SD2_DAT3_B), GPIO_FN(ARM_TRACEDATA_9),
+
+ /* IPSR10 */
+ GPIO_FN(VI0_R0), GPIO_FN(SSI_SDATA7_C), GPIO_FN(SCK1_C),
+ GPIO_FN(DREQ1_B), GPIO_FN(ARM_TRACEDATA_10), GPIO_FN(DREQ0_C),
+ GPIO_FN(VI0_R1), GPIO_FN(SSI_SDATA8_C), GPIO_FN(DACK1_B),
+ GPIO_FN(ARM_TRACEDATA_11), GPIO_FN(DACK0_C), GPIO_FN(DRACK0_C),
+ GPIO_FN(VI0_R2), GPIO_FN(ETH_LINK), GPIO_FN(SD2_CLK_B), GPIO_FN(IRQ2),
+ GPIO_FN(ARM_TRACEDATA_12), GPIO_FN(VI0_R3), GPIO_FN(ETH_MAGIC),
+ GPIO_FN(SD2_CMD_B), GPIO_FN(IRQ3), GPIO_FN(ARM_TRACEDATA_13),
+ GPIO_FN(VI0_R4), GPIO_FN(ETH_REFCLK), GPIO_FN(SD2_CD_B),
+ GPIO_FN(HSPI_CLK1_B), GPIO_FN(ARM_TRACEDATA_14), GPIO_FN(MT1_CLK),
+ GPIO_FN(TS_SCK0), GPIO_FN(VI0_R5), GPIO_FN(ETH_TXD0),
+ GPIO_FN(SD2_WP_B), GPIO_FN(HSPI_CS1_B), GPIO_FN(ARM_TRACEDATA_15),
+ GPIO_FN(MT1_D), GPIO_FN(TS_SDEN0), GPIO_FN(VI0_R6), GPIO_FN(ETH_MDC),
+ GPIO_FN(DREQ2_C), GPIO_FN(HSPI_TX1_B), GPIO_FN(TRACECLK),
+ GPIO_FN(MT1_BEN), GPIO_FN(PWMFSW0_D), GPIO_FN(VI0_R7),
+ GPIO_FN(ETH_MDIO), GPIO_FN(DACK2_C), GPIO_FN(HSPI_RX1_B),
+ GPIO_FN(SCIF_CLK_D), GPIO_FN(TRACECTL), GPIO_FN(MT1_PEN),
+ GPIO_FN(VI1_CLK), GPIO_FN(SIM_D), GPIO_FN(SDA3), GPIO_FN(VI1_HSYNC),
+ GPIO_FN(VI3_CLK), GPIO_FN(SSI_SCK4), GPIO_FN(GPS_SIGN_C),
+ GPIO_FN(PWMFSW0_E), GPIO_FN(VI1_VSYNC), GPIO_FN(AUDIO_CLKOUT_C),
+ GPIO_FN(SSI_WS4), GPIO_FN(SIM_CLK), GPIO_FN(GPS_MAG_C),
+ GPIO_FN(SPV_TRST), GPIO_FN(SCL3),
+
+ /* IPSR11 */
+ GPIO_FN(VI1_DATA0_VI1_B0), GPIO_FN(SD2_DAT0), GPIO_FN(SIM_RST),
+ GPIO_FN(SPV_TCK), GPIO_FN(ADICLK_B), GPIO_FN(VI1_DATA1_VI1_B1),
+ GPIO_FN(SD2_DAT1), GPIO_FN(MT0_CLK), GPIO_FN(SPV_TMS),
+ GPIO_FN(ADICS_B_SAMP_B), GPIO_FN(VI1_DATA2_VI1_B2), GPIO_FN(SD2_DAT2),
+ GPIO_FN(MT0_D), GPIO_FN(SPVTDI), GPIO_FN(ADIDATA_B),
+ GPIO_FN(VI1_DATA3_VI1_B3), GPIO_FN(SD2_DAT3), GPIO_FN(MT0_BEN),
+ GPIO_FN(SPV_TDO), GPIO_FN(ADICHS0_B), GPIO_FN(VI1_DATA4_VI1_B4),
+ GPIO_FN(SD2_CLK), GPIO_FN(MT0_PEN), GPIO_FN(SPA_TRST),
+ GPIO_FN(HSPI_CLK1_D), GPIO_FN(ADICHS1_B), GPIO_FN(VI1_DATA5_VI1_B5),
+ GPIO_FN(SD2_CMD), GPIO_FN(MT0_SYNC), GPIO_FN(SPA_TCK),
+ GPIO_FN(HSPI_CS1_D), GPIO_FN(ADICHS2_B), GPIO_FN(VI1_DATA6_VI1_B6),
+ GPIO_FN(SD2_CD), GPIO_FN(MT0_VCXO), GPIO_FN(SPA_TMS),
+ GPIO_FN(HSPI_TX1_D), GPIO_FN(VI1_DATA7_VI1_B7), GPIO_FN(SD2_WP),
+ GPIO_FN(MT0_PWM), GPIO_FN(SPA_TDI), GPIO_FN(HSPI_RX1_D),
+ GPIO_FN(VI1_G0), GPIO_FN(VI3_DATA0), GPIO_FN(DU1_DOTCLKOUT1),
+ GPIO_FN(TS_SCK1), GPIO_FN(DREQ2_B), GPIO_FN(TX2), GPIO_FN(SPA_TDO),
+ GPIO_FN(HCTS0_B), GPIO_FN(VI1_G1), GPIO_FN(VI3_DATA1),
+ GPIO_FN(SSI_SCK1), GPIO_FN(TS_SDEN1), GPIO_FN(DACK2_B), GPIO_FN(RX2),
+ GPIO_FN(HRTS0_B),
+
+ /* IPSR12 */
+ GPIO_FN(VI1_G2), GPIO_FN(VI3_DATA2), GPIO_FN(SSI_WS1),
+ GPIO_FN(TS_SPSYNC1), GPIO_FN(SCK2), GPIO_FN(HSCK0_B), GPIO_FN(VI1_G3),
+ GPIO_FN(VI3_DATA3), GPIO_FN(SSI_SCK2), GPIO_FN(TS_SDAT1),
+ GPIO_FN(SCL1_C), GPIO_FN(HTX0_B), GPIO_FN(VI1_G4), GPIO_FN(VI3_DATA4),
+ GPIO_FN(SSI_WS2), GPIO_FN(SDA1_C), GPIO_FN(SIM_RST_B),
+ GPIO_FN(HRX0_B), GPIO_FN(VI1_G5), GPIO_FN(VI3_DATA5),
+ GPIO_FN(GPS_CLK), GPIO_FN(FSE), GPIO_FN(TX4_B), GPIO_FN(SIM_D_B),
+ GPIO_FN(VI1_G6), GPIO_FN(VI3_DATA6), GPIO_FN(GPS_SIGN), GPIO_FN(FRB),
+ GPIO_FN(RX4_B), GPIO_FN(SIM_CLK_B), GPIO_FN(VI1_G7),
+ GPIO_FN(VI3_DATA7), GPIO_FN(GPS_MAG), GPIO_FN(FCE), GPIO_FN(SCK4_B),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
+ GP_0_31_FN, FN_IP3_31_29,
+ GP_0_30_FN, FN_IP3_26_24,
+ GP_0_29_FN, FN_IP3_22_21,
+ GP_0_28_FN, FN_IP3_14_12,
+ GP_0_27_FN, FN_IP3_11_9,
+ GP_0_26_FN, FN_IP3_2_0,
+ GP_0_25_FN, FN_IP2_30_28,
+ GP_0_24_FN, FN_IP2_21_19,
+ GP_0_23_FN, FN_IP2_18_16,
+ GP_0_22_FN, FN_IP0_30_28,
+ GP_0_21_FN, FN_IP0_5_3,
+ GP_0_20_FN, FN_IP1_18_15,
+ GP_0_19_FN, FN_IP1_14_11,
+ GP_0_18_FN, FN_IP1_10_7,
+ GP_0_17_FN, FN_IP1_6_4,
+ GP_0_16_FN, FN_IP1_3_2,
+ GP_0_15_FN, FN_IP1_1_0,
+ GP_0_14_FN, FN_IP0_27_26,
+ GP_0_13_FN, FN_IP0_25,
+ GP_0_12_FN, FN_IP0_24_23,
+ GP_0_11_FN, FN_IP0_22_19,
+ GP_0_10_FN, FN_IP0_18_16,
+ GP_0_9_FN, FN_IP0_15_14,
+ GP_0_8_FN, FN_IP0_13_12,
+ GP_0_7_FN, FN_IP0_11_10,
+ GP_0_6_FN, FN_IP0_9_8,
+ GP_0_5_FN, FN_A19,
+ GP_0_4_FN, FN_A18,
+ GP_0_3_FN, FN_A17,
+ GP_0_2_FN, FN_IP0_7_6,
+ GP_0_1_FN, FN_AVS2,
+ GP_0_0_FN, FN_AVS1 }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1) {
+ GP_1_31_FN, FN_IP5_23_21,
+ GP_1_30_FN, FN_IP5_20_17,
+ GP_1_29_FN, FN_IP5_16_15,
+ GP_1_28_FN, FN_IP5_14_13,
+ GP_1_27_FN, FN_IP5_12_11,
+ GP_1_26_FN, FN_IP5_10_9,
+ GP_1_25_FN, FN_IP5_8,
+ GP_1_24_FN, FN_IP5_7,
+ GP_1_23_FN, FN_IP5_6,
+ GP_1_22_FN, FN_IP5_5,
+ GP_1_21_FN, FN_IP5_4,
+ GP_1_20_FN, FN_IP5_3,
+ GP_1_19_FN, FN_IP5_2_0,
+ GP_1_18_FN, FN_IP4_31_29,
+ GP_1_17_FN, FN_IP4_28,
+ GP_1_16_FN, FN_IP4_27,
+ GP_1_15_FN, FN_IP4_26,
+ GP_1_14_FN, FN_IP4_25,
+ GP_1_13_FN, FN_IP4_24,
+ GP_1_12_FN, FN_IP4_23,
+ GP_1_11_FN, FN_IP4_22_20,
+ GP_1_10_FN, FN_IP4_19_17,
+ GP_1_9_FN, FN_IP4_16,
+ GP_1_8_FN, FN_IP4_15,
+ GP_1_7_FN, FN_IP4_14,
+ GP_1_6_FN, FN_IP4_13,
+ GP_1_5_FN, FN_IP4_12,
+ GP_1_4_FN, FN_IP4_11,
+ GP_1_3_FN, FN_IP4_10_8,
+ GP_1_2_FN, FN_IP4_7_5,
+ GP_1_1_FN, FN_IP4_4_2,
+ GP_1_0_FN, FN_IP4_1_0 }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1) {
+ GP_2_31_FN, FN_IP10_28_26,
+ GP_2_30_FN, FN_IP10_25_24,
+ GP_2_29_FN, FN_IP10_23_21,
+ GP_2_28_FN, FN_IP10_20_18,
+ GP_2_27_FN, FN_IP10_17_15,
+ GP_2_26_FN, FN_IP10_14_12,
+ GP_2_25_FN, FN_IP10_11_9,
+ GP_2_24_FN, FN_IP10_8_6,
+ GP_2_23_FN, FN_IP10_5_3,
+ GP_2_22_FN, FN_IP10_2_0,
+ GP_2_21_FN, FN_IP9_29_28,
+ GP_2_20_FN, FN_IP9_27_26,
+ GP_2_19_FN, FN_IP9_25_24,
+ GP_2_18_FN, FN_IP9_23_22,
+ GP_2_17_FN, FN_IP9_21_19,
+ GP_2_16_FN, FN_IP9_18_16,
+ GP_2_15_FN, FN_IP9_15_14,
+ GP_2_14_FN, FN_IP9_13_12,
+ GP_2_13_FN, FN_IP9_11_10,
+ GP_2_12_FN, FN_IP9_9_8,
+ GP_2_11_FN, FN_IP9_7,
+ GP_2_10_FN, FN_IP9_6,
+ GP_2_9_FN, FN_IP9_5,
+ GP_2_8_FN, FN_IP9_4,
+ GP_2_7_FN, FN_IP9_3_2,
+ GP_2_6_FN, FN_IP9_1_0,
+ GP_2_5_FN, FN_IP8_30_28,
+ GP_2_4_FN, FN_IP8_27_25,
+ GP_2_3_FN, FN_IP8_24_23,
+ GP_2_2_FN, FN_IP8_22_21,
+ GP_2_1_FN, FN_IP8_20,
+ GP_2_0_FN, FN_IP5_27_24 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1) {
+ GP_3_31_FN, FN_IP6_3_2,
+ GP_3_30_FN, FN_IP6_1_0,
+ GP_3_29_FN, FN_IP5_30_29,
+ GP_3_28_FN, FN_IP5_28,
+ GP_3_27_FN, FN_IP1_24_23,
+ GP_3_26_FN, FN_IP1_22_21,
+ GP_3_25_FN, FN_IP1_20_19,
+ GP_3_24_FN, FN_IP7_26_25,
+ GP_3_23_FN, FN_IP7_24_23,
+ GP_3_22_FN, FN_IP7_22_21,
+ GP_3_21_FN, FN_IP7_20_19,
+ GP_3_20_FN, FN_IP7_30_29,
+ GP_3_19_FN, FN_IP7_28_27,
+ GP_3_18_FN, FN_IP7_18_17,
+ GP_3_17_FN, FN_IP7_16_15,
+ GP_3_16_FN, FN_IP12_17_15,
+ GP_3_15_FN, FN_IP12_14_12,
+ GP_3_14_FN, FN_IP12_11_9,
+ GP_3_13_FN, FN_IP12_8_6,
+ GP_3_12_FN, FN_IP12_5_3,
+ GP_3_11_FN, FN_IP12_2_0,
+ GP_3_10_FN, FN_IP11_29_27,
+ GP_3_9_FN, FN_IP11_26_24,
+ GP_3_8_FN, FN_IP11_23_21,
+ GP_3_7_FN, FN_IP11_20_18,
+ GP_3_6_FN, FN_IP11_17_15,
+ GP_3_5_FN, FN_IP11_14_12,
+ GP_3_4_FN, FN_IP11_11_9,
+ GP_3_3_FN, FN_IP11_8_6,
+ GP_3_2_FN, FN_IP11_5_3,
+ GP_3_1_FN, FN_IP11_2_0,
+ GP_3_0_FN, FN_IP10_31_29 }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1) {
+ GP_4_31_FN, FN_IP8_19,
+ GP_4_30_FN, FN_IP8_18,
+ GP_4_29_FN, FN_IP8_17_16,
+ GP_4_28_FN, FN_IP0_2_0,
+ GP_4_27_FN, FN_USB_PENC1,
+ GP_4_26_FN, FN_USB_PENC0,
+ GP_4_25_FN, FN_IP8_15_12,
+ GP_4_24_FN, FN_IP8_11_8,
+ GP_4_23_FN, FN_IP8_7_4,
+ GP_4_22_FN, FN_IP8_3_0,
+ GP_4_21_FN, FN_IP2_3_0,
+ GP_4_20_FN, FN_IP1_28_25,
+ GP_4_19_FN, FN_IP2_15_12,
+ GP_4_18_FN, FN_IP2_11_8,
+ GP_4_17_FN, FN_IP2_7_4,
+ GP_4_16_FN, FN_IP7_14_13,
+ GP_4_15_FN, FN_IP7_12_10,
+ GP_4_14_FN, FN_IP7_9_7,
+ GP_4_13_FN, FN_IP7_6_4,
+ GP_4_12_FN, FN_IP7_3_2,
+ GP_4_11_FN, FN_IP7_1_0,
+ GP_4_10_FN, FN_IP6_30_29,
+ GP_4_9_FN, FN_IP6_26_25,
+ GP_4_8_FN, FN_IP6_24_23,
+ GP_4_7_FN, FN_IP6_22_20,
+ GP_4_6_FN, FN_IP6_19_18,
+ GP_4_5_FN, FN_IP6_17_15,
+ GP_4_4_FN, FN_IP6_14_12,
+ GP_4_3_FN, FN_IP6_11_9,
+ GP_4_2_FN, FN_IP6_8,
+ GP_4_1_FN, FN_IP6_7_6,
+ GP_4_0_FN, FN_IP6_5_4 }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xfffc0018, 32, 1) {
+ GP_5_31_FN, FN_IP3_5,
+ GP_5_30_FN, FN_IP3_4,
+ GP_5_29_FN, FN_IP3_3,
+ GP_5_28_FN, FN_IP2_27,
+ GP_5_27_FN, FN_IP2_26,
+ GP_5_26_FN, FN_IP2_25,
+ GP_5_25_FN, FN_IP2_24,
+ GP_5_24_FN, FN_IP2_23,
+ GP_5_23_FN, FN_IP2_22,
+ GP_5_22_FN, FN_IP3_28,
+ GP_5_21_FN, FN_IP3_27,
+ GP_5_20_FN, FN_IP3_23,
+ GP_5_19_FN, FN_EX_WAIT0,
+ GP_5_18_FN, FN_WE1,
+ GP_5_17_FN, FN_WE0,
+ GP_5_16_FN, FN_RD,
+ GP_5_15_FN, FN_A16,
+ GP_5_14_FN, FN_A15,
+ GP_5_13_FN, FN_A14,
+ GP_5_12_FN, FN_A13,
+ GP_5_11_FN, FN_A12,
+ GP_5_10_FN, FN_A11,
+ GP_5_9_FN, FN_A10,
+ GP_5_8_FN, FN_A9,
+ GP_5_7_FN, FN_A8,
+ GP_5_6_FN, FN_A7,
+ GP_5_5_FN, FN_A6,
+ GP_5_4_FN, FN_A5,
+ GP_5_3_FN, FN_A4,
+ GP_5_2_FN, FN_A3,
+ GP_5_1_FN, FN_A2,
+ GP_5_0_FN, FN_A1 }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_8_FN, FN_IP3_20,
+ GP_6_7_FN, FN_IP3_19,
+ GP_6_6_FN, FN_IP3_18,
+ GP_6_5_FN, FN_IP3_17,
+ GP_6_4_FN, FN_IP3_16,
+ GP_6_3_FN, FN_IP3_15,
+ GP_6_2_FN, FN_IP3_8,
+ GP_6_1_FN, FN_IP3_7,
+ GP_6_0_FN, FN_IP3_6 }
+ },
+
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
+ 1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3) {
+ /* IP0_31 [1] */
+ 0, 0,
+ /* IP0_30_28 [3] */
+ FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7,
+ FN_HRTS1, FN_RX4_C, 0, 0,
+ /* IP0_27_26 [2] */
+ FN_CS1_A26, FN_HSPI_TX2, FN_SDSELF_B, 0,
+ /* IP0_25 [1] */
+ FN_CS0, FN_HSPI_CS2_B,
+ /* IP0_24_23 [2] */
+ FN_CLKOUT, FN_TX3C_IRDA_TX_C, FN_PWM0_B, 0,
+ /* IP0_22_19 [4] */
+ FN_A25, FN_SD1_WP, FN_MMC0_D5, FN_FD5,
+ FN_HSPI_RX2, FN_VI1_R3, FN_TX5_B, FN_SSI_SDATA7_B,
+ FN_CTS0_B, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP0_18_16 [3] */
+ FN_A24, FN_SD1_CD, FN_MMC0_D4, FN_FD4,
+ FN_HSPI_CS2, FN_VI1_R2, FN_SSI_WS78_B, 0,
+ /* IP0_15_14 [2] */
+ FN_A23, FN_FCLE, FN_HSPI_CLK2, FN_VI1_R1,
+ /* IP0_13_12 [2] */
+ FN_A22, FN_RX5_D, FN_HSPI_RX2_B, FN_VI1_R0,
+ /* IP0_11_10 [2] */
+ FN_A21, FN_SCK5_D, FN_HSPI_CLK2_B, 0,
+ /* IP0_9_8 [2] */
+ FN_A20, FN_TX5_D, FN_HSPI_TX2_B, 0,
+ /* IP0_7_6 [2] */
+ FN_A0, FN_SD1_DAT3, FN_MMC0_D3, FN_FD3,
+ /* IP0_5_3 [3] */
+ FN_BS, FN_SD1_DAT2, FN_MMC0_D2, FN_FD2,
+ FN_ATADIR0, FN_SDSELF, FN_HCTS1, FN_TX4_C,
+ /* IP0_2_0 [3] */
+ FN_USB_PENC2, FN_SCK0, FN_PWM1, FN_PWMFSW0,
+ FN_SCIF_CLK, FN_TCLK0_C, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
+ 3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2) {
+ /* IP1_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_28_25 [4] */
+ FN_HTX0, FN_TX1, FN_SDATA, FN_CTS0_C,
+ FN_SUB_TCK, FN_CC5_STATE2, FN_CC5_STATE10, FN_CC5_STATE18,
+ FN_CC5_STATE26, FN_CC5_STATE34, 0, 0,
+ 0, 0, 0, 0,
+ /* IP1_24_23 [2] */
+ FN_MLB_DAT, FN_PWM4, FN_RX4, 0,
+ /* IP1_22_21 [2] */
+ FN_MLB_SIG, FN_PWM3, FN_TX4, 0,
+ /* IP1_20_19 [2] */
+ FN_MLB_CLK, FN_PWM2, FN_SCK4, 0,
+ /* IP1_18_15 [4] */
+ FN_EX_CS5, FN_SD1_DAT1, FN_MMC0_D1, FN_FD1,
+ FN_ATAWR0, FN_VI1_R6, FN_HRX1, FN_RX2_E,
+ FN_RX0_B, FN_SSI_WS9, 0, 0,
+ 0, 0, 0, 0,
+ /* IP1_14_11 [4] */
+ FN_EX_CS4, FN_SD1_DAT0, FN_MMC0_D0, FN_FD0,
+ FN_ATARD0, FN_VI1_R5, FN_SCK5_B, FN_HTX1,
+ FN_TX2_E, FN_TX0_B, FN_SSI_SCK9, 0,
+ 0, 0, 0, 0,
+ /* IP1_10_7 [4] */
+ FN_EX_CS3, FN_SD1_CMD, FN_MMC0_CMD, FN_FRE,
+ FN_ATACS10, FN_VI1_R4, FN_RX5_B, FN_HSCK1,
+ FN_SSI_SDATA8_B, FN_RTS0_B_TANS_B, FN_SSI_SDATA9, 0,
+ 0, 0, 0, 0,
+ /* IP1_6_4 [3] */
+ FN_EX_CS2, FN_SD1_CLK, FN_MMC0_CLK, FN_FALE,
+ FN_ATACS00, 0, 0, 0,
+ /* IP1_3_2 [2] */
+ FN_EX_CS1, FN_MMC0_D7, FN_FD7, 0,
+ /* IP1_1_0 [2] */
+ FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32,
+ 1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4) {
+ /* IP2_31 [1] */
+ 0, 0,
+ /* IP2_30_28 [3] */
+ FN_DU0_DG0, FN_LCDOUT8, FN_DREQ1, FN_SCL2,
+ FN_AUDATA2, 0, 0, 0,
+ /* IP2_27 [1] */
+ FN_DU0_DR7, FN_LCDOUT7,
+ /* IP2_26 [1] */
+ FN_DU0_DR6, FN_LCDOUT6,
+ /* IP2_25 [1] */
+ FN_DU0_DR5, FN_LCDOUT5,
+ /* IP2_24 [1] */
+ FN_DU0_DR4, FN_LCDOUT4,
+ /* IP2_23 [1] */
+ FN_DU0_DR3, FN_LCDOUT3,
+ /* IP2_22 [1] */
+ FN_DU0_DR2, FN_LCDOUT2,
+ /* IP2_21_19 [3] */
+ FN_DU0_DR1, FN_LCDOUT1, FN_DACK0, FN_DRACK0,
+ FN_GPS_SIGN_B, FN_AUDATA1, FN_RX5_C, 0,
+ /* IP2_18_16 [3] */
+ FN_DU0_DR0, FN_LCDOUT0, FN_DREQ0, FN_GPS_CLK_B,
+ FN_AUDATA0, FN_TX5_C, 0, 0,
+ /* IP2_15_12 [4] */
+ FN_HRTS0, FN_RTS1_TANS, FN_MDATA, FN_TX0_C,
+ FN_SUB_TMS, FN_CC5_STATE1, FN_CC5_STATE9, FN_CC5_STATE17,
+ FN_CC5_STATE25, FN_CC5_STATE33, 0, 0,
+ 0, 0, 0, 0,
+ /* IP2_11_8 [4] */
+ FN_HCTS0, FN_CTS1, FN_STM, FN_PWM0_D,
+ FN_RX0_C, FN_SCIF_CLK_C, FN_SUB_TRST, FN_TCLK1_B,
+ FN_CC5_OSCOUT, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP2_7_4 [4] */
+ FN_HSCK0, FN_SCK1, FN_MTS, FN_PWM5,
+ FN_SCK0_C, FN_SSI_SDATA9_B, FN_SUB_TDO, FN_CC5_STATE0,
+ FN_CC5_STATE8, FN_CC5_STATE16, FN_CC5_STATE24, FN_CC5_STATE32,
+ 0, 0, 0, 0,
+ /* IP2_3_0 [4] */
+ FN_HRX0, FN_RX1, FN_SCKZ, FN_RTS0_C_TANS_C,
+ FN_SUB_TDI, FN_CC5_STATE3, FN_CC5_STATE11, FN_CC5_STATE19,
+ FN_CC5_STATE27, FN_CC5_STATE35, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xfffc002c, 32,
+ 3, 1, 1, 3, 1, 2, 1, 1, 1, 1, 1,
+ 1, 3, 3, 1, 1, 1, 1, 1, 1, 3) {
+ /* IP3_31_29 [3] */
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CAN1_TX, FN_TX2_C,
+ FN_SCL2_C, FN_REMOCON, 0, 0,
+ /* IP3_28 [1] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE,
+ /* IP3_27 [1] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS,
+ /* IP3_26_24 [3] */
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, FN_RX3_D_IRDA_RX_D, FN_SDA3_B,
+ FN_SDA2_C, FN_DACK0_B, FN_DRACK0_B, 0,
+ /* IP3_23 [1] */
+ FN_DU0_DOTCLKOUT0, FN_QCLK,
+ /* IP3_22_21 [2] */
+ FN_DU0_DOTCLKIN, FN_QSTVA_QVS, FN_TX3_D_IRDA_TX_D, FN_SCL3_B,
+ /* IP3_20 [1] */
+ FN_DU0_DB7, FN_LCDOUT23,
+ /* IP3_19 [1] */
+ FN_DU0_DB6, FN_LCDOUT22,
+ /* IP3_18 [1] */
+ FN_DU0_DB5, FN_LCDOUT21,
+ /* IP3_17 [1] */
+ FN_DU0_DB4, FN_LCDOUT20,
+ /* IP3_16 [1] */
+ FN_DU0_DB3, FN_LCDOUT19,
+ /* IP3_15 [1] */
+ FN_DU0_DB2, FN_LCDOUT18,
+ /* IP3_14_12 [3] */
+ FN_DU0_DB1, FN_LCDOUT17, FN_EX_WAIT2, FN_SDA1,
+ FN_GPS_MAG_B, FN_AUDATA5, FN_SCK5_C, 0,
+ /* IP3_11_9 [3] */
+ FN_DU0_DB0, FN_LCDOUT16, FN_EX_WAIT1, FN_SCL1,
+ FN_TCLK1, FN_AUDATA4, 0, 0,
+ /* IP3_8 [1] */
+ FN_DU0_DG7, FN_LCDOUT15,
+ /* IP3_7 [1] */
+ FN_DU0_DG6, FN_LCDOUT14,
+ /* IP3_6 [1] */
+ FN_DU0_DG5, FN_LCDOUT13,
+ /* IP3_5 [1] */
+ FN_DU0_DG4, FN_LCDOUT12,
+ /* IP3_4 [1] */
+ FN_DU0_DG3, FN_LCDOUT11,
+ /* IP3_3 [1] */
+ FN_DU0_DG2, FN_LCDOUT10,
+ /* IP3_2_0 [3] */
+ FN_DU0_DG1, FN_LCDOUT9, FN_DACK1, FN_SDA2,
+ FN_AUDATA3, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32,
+ 3, 1, 1, 1, 1, 1, 1, 3, 3,
+ 1, 1, 1, 1, 1, 1, 3, 3, 3, 2) {
+ /* IP4_31_29 [3] */
+ FN_DU1_DB0, FN_VI2_DATA4_VI2_B4, FN_SCL2_B, FN_SD3_DAT0,
+ FN_TX5, FN_SCK0_D, 0, 0,
+ /* IP4_28 [1] */
+ FN_DU1_DG7, FN_VI2_R3,
+ /* IP4_27 [1] */
+ FN_DU1_DG6, FN_VI2_R2,
+ /* IP4_26 [1] */
+ FN_DU1_DG5, FN_VI2_R1,
+ /* IP4_25 [1] */
+ FN_DU1_DG4, FN_VI2_R0,
+ /* IP4_24 [1] */
+ FN_DU1_DG3, FN_VI2_G7,
+ /* IP4_23 [1] */
+ FN_DU1_DG2, FN_VI2_G6,
+ /* IP4_22_20 [3] */
+ FN_DU1_DG1, FN_VI2_DATA3_VI2_B3, FN_SDA1_B, FN_SD3_DAT3,
+ FN_SCK5, FN_AUDATA7, FN_RX0_D, 0,
+ /* IP4_19_17 [3] */
+ FN_DU1_DG0, FN_VI2_DATA2_VI2_B2, FN_SCL1_B, FN_SD3_DAT2,
+ FN_SCK3_E, FN_AUDATA6, FN_TX0_D, 0,
+ /* IP4_16 [1] */
+ FN_DU1_DR7, FN_VI2_G5,
+ /* IP4_15 [1] */
+ FN_DU1_DR6, FN_VI2_G4,
+ /* IP4_14 [1] */
+ FN_DU1_DR5, FN_VI2_G3,
+ /* IP4_13 [1] */
+ FN_DU1_DR4, FN_VI2_G2,
+ /* IP4_12 [1] */
+ FN_DU1_DR3, FN_VI2_G1,
+ /* IP4_11 [1] */
+ FN_DU1_DR2, FN_VI2_G0,
+ /* IP4_10_8 [3] */
+ FN_DU1_DR1, FN_VI2_DATA1_VI2_B1, FN_PWM0, FN_SD3_CMD,
+ FN_RX3_E_IRDA_RX_E, FN_AUDSYNC, FN_CTS0_D, 0,
+ /* IP4_7_5 [3] */
+ FN_DU1_DR0, FN_VI2_DATA0_VI2_B0, FN_PWM6, FN_SD3_CLK,
+ FN_TX3_E_IRDA_TX_E, FN_AUDCK, FN_PWMFSW0_B, 0,
+ /* IP4_4_2 [3] */
+ FN_DU0_CDE, FN_QPOLB, FN_CAN1_RX, FN_RX2_C,
+ FN_DREQ0_B, FN_SSI_SCK78_B, FN_SCK0_B, 0,
+ /* IP4_1_0 [2] */
+ FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
+ 1, 2, 1, 4, 3, 4, 2, 2,
+ 2, 2, 1, 1, 1, 1, 1, 1, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP5_30_29 [2] */
+ FN_AUDIO_CLKB, FN_USB_OVC2, FN_CAN_DEBUGOUT0, FN_MOUT0,
+ /* IP5_28 [1] */
+ FN_AUDIO_CLKA, FN_CAN_TXCLK,
+ /* IP5_27_24 [4] */
+ FN_DU1_CDE, FN_VI2_DATA7_VI2_B7, FN_RX3_B_IRDA_RX_B, FN_SD3_WP,
+ FN_HSPI_RX1, FN_VI1_FIELD, FN_VI3_FIELD, FN_AUDIO_CLKOUT,
+ FN_RX2_D, FN_GPS_CLK_C, FN_GPS_CLK_D, 0,
+ 0, 0, 0, 0,
+ /* IP5_23_21 [3] */
+ FN_DU1_DISP, FN_VI2_DATA6_VI2_B6, FN_TCLK0, FN_QSTVA_B_QVS_B,
+ FN_HSPI_CLK1, FN_SCK2_D, FN_AUDIO_CLKOUT_B, FN_GPS_MAG_D,
+ /* IP5_20_17 [4] */
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_VI2_CLK, FN_TX3_B_IRDA_TX_B,
+ FN_SD3_CD, FN_HSPI_TX1, FN_VI1_CLKENB, FN_VI3_CLKENB,
+ FN_AUDIO_CLKC, FN_TX2_D, FN_SPEEDIN, FN_GPS_SIGN_D, 0,
+ 0, 0, 0, 0,
+ /* IP5_16_15 [2] */
+ FN_DU1_EXVSYNC_DU1_VSYNC, FN_VI2_VSYNC, FN_VI3_VSYNC, 0,
+ /* IP5_14_13 [2] */
+ FN_DU1_EXHSYNC_DU1_HSYNC, FN_VI2_HSYNC, FN_VI3_HSYNC, 0,
+ /* IP5_12_11 [2] */
+ FN_DU1_DOTCLKOUT, FN_VI2_FIELD, FN_SDA1_D, 0,
+ /* IP5_10_9 [2] */
+ FN_DU1_DOTCLKIN, FN_VI2_CLKENB, FN_HSPI_CS1, FN_SCL1_D,
+ /* IP5_8 [1] */
+ FN_DU1_DB7, FN_SDA2_D,
+ /* IP5_7 [1] */
+ FN_DU1_DB6, FN_SCL2_D,
+ /* IP5_6 [1] */
+ FN_DU1_DB5, FN_VI2_R7,
+ /* IP5_5 [1] */
+ FN_DU1_DB4, FN_VI2_R6,
+ /* IP5_4 [1] */
+ FN_DU1_DB3, FN_VI2_R5,
+ /* IP5_3 [1] */
+ FN_DU1_DB2, FN_VI2_R4,
+ /* IP5_2_0 [3] */
+ FN_DU1_DB1, FN_VI2_DATA5_VI2_B5, FN_SDA2_B, FN_SD3_DAT1,
+ FN_RX5, FN_RTS0_D_TANS_D, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32,
+ 1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2) {
+ /* IP6_31 [1] */
+ 0, 0,
+ /* IP6_30_29 [2] */
+ FN_SSI_SCK6, FN_ADICHS0, FN_CAN0_TX, FN_IERX_B,
+ /* IP_28_27 [2] */
+ 0, 0, 0, 0,
+ /* IP6_26_25 [2] */
+ FN_SSI_SDATA5, FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX,
+ /* IP6_24_23 [2] */
+ FN_SSI_WS5, FN_ADICS_SAMP, FN_CAN_DEBUGOUT11, FN_TX3_IRDA_TX,
+ /* IP6_22_20 [3] */
+ FN_SSI_SCK5, FN_ADICLK, FN_CAN_DEBUGOUT10, FN_SCK3,
+ FN_TCLK0_D, 0, 0, 0,
+ /* IP6_19_18 [2] */
+ FN_SSI_SDATA4, FN_CAN_DEBUGOUT9, FN_SSI_SDATA9_C, 0,
+ /* IP6_17_15 [3] */
+ FN_SSI_SDATA3, FN_PWM0_C, FN_CAN_DEBUGOUT8, FN_CAN_CLK_B,
+ FN_IECLK, FN_SCIF_CLK_B, FN_TCLK0_B, 0,
+ /* IP6_14_12 [3] */
+ FN_SSI_WS34, FN_CAN_DEBUGOUT7, FN_CAN0_RX_B, FN_IETX,
+ FN_SSI_WS9_C, 0, 0, 0,
+ /* IP6_11_9 [3] */
+ FN_SSI_SCK34, FN_CAN_DEBUGOUT6, FN_CAN0_TX_B, FN_IERX,
+ FN_SSI_SCK9_C, 0, 0, 0,
+ /* IP6_8 [1] */
+ FN_SSI_SDATA2, FN_CAN_DEBUGOUT5,
+ /* IP6_7_6 [2] */
+ FN_SSI_SDATA1, FN_CAN_DEBUGOUT4, FN_MOUT6, 0,
+ /* IP6_5_4 [2] */
+ FN_SSI_SDATA0, FN_CAN_DEBUGOUT3, FN_MOUT5, 0,
+ /* IP6_3_2 [2] */
+ FN_SSI_WS0129, FN_CAN_DEBUGOUT2, FN_MOUT2, 0,
+ /* IP6_1_0 [2] */
+ FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2) {
+ /* IP7_31 [1] */
+ 0, 0,
+ /* IP7_30_29 [2] */
+ FN_SD0_WP, FN_DACK2, FN_CTS1_B, 0,
+ /* IP7_28_27 [2] */
+ FN_SD0_CD, FN_DREQ2, FN_RTS1_B_TANS_B, 0,
+ /* IP7_26_25 [2] */
+ FN_SD0_DAT3, FN_ATAWR1, FN_RX2_B, FN_CC5_TDI,
+ /* IP7_24_23 [2] */
+ FN_SD0_DAT2, FN_ATARD1, FN_TX2_B, FN_CC5_TCK,
+ /* IP7_22_21 [2] */
+ FN_SD0_DAT1, FN_ATAG1, FN_SCK2_B, FN_CC5_TMS,
+ /* IP7_20_19 [2] */
+ FN_SD0_DAT0, FN_ATADIR1, FN_RX1_B, FN_CC5_TRST,
+ /* IP7_18_17 [2] */
+ FN_SD0_CMD, FN_ATACS11, FN_TX1_B, FN_CC5_TDO,
+ /* IP7_16_15 [2] */
+ FN_SD0_CLK, FN_ATACS01, FN_SCK1_B, 0,
+ /* IP7_14_13 [2] */
+ FN_SSI_SDATA8, FN_VSP, FN_IRQ3_B, FN_HSPI_RX1_C,
+ /* IP7_12_10 [3] */
+ FN_SSI_SDATA7, FN_CAN_DEBUGOUT15, FN_IRQ2_B, FN_TCLK1_C,
+ FN_HSPI_TX1_C, 0, 0, 0,
+ /* IP7_9_7 [3] */
+ FN_SSI_WS78, FN_CAN_DEBUGOUT14, FN_IRQ1_B, FN_SSI_WS9_B,
+ FN_HSPI_CS1_C, 0, 0, 0,
+ /* IP7_6_4 [3] */
+ FN_SSI_SCK78, FN_CAN_DEBUGOUT13, FN_IRQ0_B, FN_SSI_SCK9_B,
+ FN_HSPI_CLK1_C, 0, 0, 0,
+ /* IP7_3_2 [2] */
+ FN_SSI_SDATA6, FN_ADICHS2, FN_CAN_CLK, FN_IECLK_B,
+ /* IP7_1_0 [2] */
+ FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
+ 1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4) {
+ /* IP8_31 [1] */
+ 0, 0,
+ /* IP8_30_28 [3] */
+ FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B, FN_RTS1_C_TANS_C, FN_RX4_D,
+ FN_PWMFSW0_C, 0, 0, 0,
+ /* IP8_27_25 [3] */
+ FN_VI0_HSYNC, FN_VI0_DATA0_B_VI0_B0_B, FN_CTS1_C, FN_TX4_D,
+ FN_MMC1_CMD, FN_HSCK1_B, 0, 0,
+ /* IP8_24_23 [2] */
+ FN_VI0_FIELD, FN_RX1_C, FN_HRX1_B, 0,
+ /* IP8_22_21 [2] */
+ FN_VI0_CLKENB, FN_TX1_C, FN_HTX1_B, FN_MT1_SYNC,
+ /* IP8_20 [1] */
+ FN_VI0_CLK, FN_MMC1_CLK,
+ /* IP8_19 [1] */
+ FN_FMIN, FN_RDS_DATA,
+ /* IP8_18 [1] */
+ FN_BPFCLK, FN_PCMWE,
+ /* IP8_17_16 [2] */
+ FN_FMCLK, FN_RDS_CLK, FN_PCMOE, 0,
+ /* IP8_15_12 [4] */
+ FN_HSPI_RX0, FN_RX0, FN_CAN_STEP0, FN_AD_NCS,
+ FN_CC5_STATE7, FN_CC5_STATE15, FN_CC5_STATE23, FN_CC5_STATE31,
+ FN_CC5_STATE39, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_11_8 [4] */
+ FN_HSPI_TX0, FN_TX0, FN_CAN_DEBUG_HW_TRIGGER, FN_AD_DO,
+ FN_CC5_STATE6, FN_CC5_STATE14, FN_CC5_STATE22, FN_CC5_STATE30,
+ FN_CC5_STATE38, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_7_4 [4] */
+ FN_HSPI_CS0, FN_RTS0_TANS, FN_USB_OVC1, FN_AD_DI,
+ FN_CC5_STATE5, FN_CC5_STATE13, FN_CC5_STATE21, FN_CC5_STATE29,
+ FN_CC5_STATE37, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_3_0 [4] */
+ FN_HSPI_CLK0, FN_CTS0, FN_USB_OVC0, FN_AD_CLK,
+ FN_CC5_STATE4, FN_CC5_STATE12, FN_CC5_STATE20, FN_CC5_STATE28,
+ FN_CC5_STATE36, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
+ 2, 2, 2, 2, 2, 3, 3, 2, 2,
+ 2, 2, 1, 1, 1, 1, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP9_29_28 [2] */
+ FN_VI0_G7, FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9,
+ /* IP9_27_26 [2] */
+ FN_VI0_G6, FN_ETH_RXD0, FN_SD2_DAT2_B, FN_ARM_TRACEDATA_8,
+ /* IP9_25_24 [2] */
+ FN_VI0_G5, FN_ETH_RX_ER, FN_SD2_DAT1_B, FN_ARM_TRACEDATA_7,
+ /* IP9_23_22 [2] */
+ FN_VI0_G4, FN_ETH_TX_EN, FN_SD2_DAT0_B, FN_ARM_TRACEDATA_6,
+ /* IP9_21_19 [3] */
+ FN_VI0_G3, FN_ETH_CRS_DV, FN_MMC1_D7, FN_ARM_TRACEDATA_5,
+ FN_TS_SDAT0, 0, 0, 0,
+ /* IP9_18_16 [3] */
+ FN_VI0_G2, FN_ETH_TXD1, FN_MMC1_D6, FN_ARM_TRACEDATA_4,
+ FN_TS_SPSYNC0, 0, 0, 0,
+ /* IP9_15_14 [2] */
+ FN_VI0_G1, FN_SSI_WS78_C, FN_IRQ1, FN_ARM_TRACEDATA_3,
+ /* IP9_13_12 [2] */
+ FN_VI0_G0, FN_SSI_SCK78_C, FN_IRQ0, FN_ARM_TRACEDATA_2,
+ /* IP9_11_10 [2] */
+ FN_VI0_DATA7_VI0_B7, FN_MMC1_D5, FN_ARM_TRACEDATA_1, 0,
+ /* IP9_9_8 [2] */
+ FN_VI0_DATA6_VI0_B6, FN_MMC1_D4, FN_ARM_TRACEDATA_0, 0,
+ /* IP9_7 [1] */
+ FN_VI0_DATA5_VI0_B5, FN_MMC1_D3,
+ /* IP9_6 [1] */
+ FN_VI0_DATA4_VI0_B4, FN_MMC1_D2,
+ /* IP9_5 [1] */
+ FN_VI0_DATA3_VI0_B3, FN_MMC1_D1,
+ /* IP9_4 [1] */
+ FN_VI0_DATA2_VI0_B2, FN_MMC1_D0,
+ /* IP9_3_2 [2] */
+ FN_VI0_DATA1_VI0_B1, FN_HCTS1_B, FN_MT1_PWM, 0,
+ /* IP9_1_0 [2] */
+ FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32,
+ 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP10_31_29 [3] */
+ FN_VI1_VSYNC, FN_AUDIO_CLKOUT_C, FN_SSI_WS4, FN_SIM_CLK,
+ FN_GPS_MAG_C, FN_SPV_TRST, FN_SCL3, 0,
+ /* IP10_28_26 [3] */
+ FN_VI1_HSYNC, FN_VI3_CLK, FN_SSI_SCK4, FN_GPS_SIGN_C,
+ FN_PWMFSW0_E, 0, 0, 0,
+ /* IP10_25_24 [2] */
+ FN_VI1_CLK, FN_SIM_D, FN_SDA3, 0,
+ /* IP10_23_21 [3] */
+ FN_VI0_R7, FN_ETH_MDIO, FN_DACK2_C, FN_HSPI_RX1_B,
+ FN_SCIF_CLK_D, FN_TRACECTL, FN_MT1_PEN, 0,
+ /* IP10_20_18 [3] */
+ FN_VI0_R6, FN_ETH_MDC, FN_DREQ2_C, FN_HSPI_TX1_B,
+ FN_TRACECLK, FN_MT1_BEN, FN_PWMFSW0_D, 0,
+ /* IP10_17_15 [3] */
+ FN_VI0_R5, FN_ETH_TXD0, FN_SD2_WP_B, FN_HSPI_CS1_B,
+ FN_ARM_TRACEDATA_15, FN_MT1_D, FN_TS_SDEN0, 0,
+ /* IP10_14_12 [3] */
+ FN_VI0_R4, FN_ETH_REFCLK, FN_SD2_CD_B, FN_HSPI_CLK1_B,
+ FN_ARM_TRACEDATA_14, FN_MT1_CLK, FN_TS_SCK0, 0,
+ /* IP10_11_9 [3] */
+ FN_VI0_R3, FN_ETH_MAGIC, FN_SD2_CMD_B, FN_IRQ3,
+ FN_ARM_TRACEDATA_13, 0, 0, 0,
+ /* IP10_8_6 [3] */
+ FN_VI0_R2, FN_ETH_LINK, FN_SD2_CLK_B, FN_IRQ2,
+ FN_ARM_TRACEDATA_12, 0, 0, 0,
+ /* IP10_5_3 [3] */
+ FN_VI0_R1, FN_SSI_SDATA8_C, FN_DACK1_B, FN_ARM_TRACEDATA_11,
+ FN_DACK0_C, FN_DRACK0_C, 0, 0,
+ /* IP10_2_0 [3] */
+ FN_VI0_R0, FN_SSI_SDATA7_C, FN_SCK1_C, FN_DREQ1_B,
+ FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xfffc004c, 32,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP11_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP11_29_27 [3] */
+ FN_VI1_G1, FN_VI3_DATA1, FN_SSI_SCK1, FN_TS_SDEN1,
+ FN_DACK2_B, FN_RX2, FN_HRTS0_B, 0,
+ /* IP11_26_24 [3] */
+ FN_VI1_G0, FN_VI3_DATA0, FN_DU1_DOTCLKOUT1, FN_TS_SCK1,
+ FN_DREQ2_B, FN_TX2, FN_SPA_TDO, FN_HCTS0_B,
+ /* IP11_23_21 [3] */
+ FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM, FN_SPA_TDI,
+ FN_HSPI_RX1_D, 0, 0, 0,
+ /* IP11_20_18 [3] */
+ FN_VI1_DATA6_VI1_B6, FN_SD2_CD, FN_MT0_VCXO, FN_SPA_TMS,
+ FN_HSPI_TX1_D, 0, 0, 0,
+ /* IP11_17_15 [3] */
+ FN_VI1_DATA5_VI1_B5, FN_SD2_CMD, FN_MT0_SYNC, FN_SPA_TCK,
+ FN_HSPI_CS1_D, FN_ADICHS2_B, 0, 0,
+ /* IP11_14_12 [3] */
+ FN_VI1_DATA4_VI1_B4, FN_SD2_CLK, FN_MT0_PEN, FN_SPA_TRST,
+ FN_HSPI_CLK1_D, FN_ADICHS1_B, 0, 0,
+ /* IP11_11_9 [3] */
+ FN_VI1_DATA3_VI1_B3, FN_SD2_DAT3, FN_MT0_BEN, FN_SPV_TDO,
+ FN_ADICHS0_B, 0, 0, 0,
+ /* IP11_8_6 [3] */
+ FN_VI1_DATA2_VI1_B2, FN_SD2_DAT2, FN_MT0_D, FN_SPVTDI,
+ FN_ADIDATA_B, 0, 0, 0,
+ /* IP11_5_3 [3] */
+ FN_VI1_DATA1_VI1_B1, FN_SD2_DAT1, FN_MT0_CLK, FN_SPV_TMS,
+ FN_ADICS_B_SAMP_B, 0, 0, 0,
+ /* IP11_2_0 [3] */
+ FN_VI1_DATA0_VI1_B0, FN_SD2_DAT0, FN_SIM_RST, FN_SPV_TCK,
+ FN_ADICLK_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR12", 0xfffc0050, 32,
+ 4, 4, 4, 2, 3, 3, 3, 3, 3, 3) {
+ /* IP12_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_19_18 [2] */
+ 0, 0, 0, 0,
+ /* IP12_17_15 [3] */
+ FN_VI1_G7, FN_VI3_DATA7, FN_GPS_MAG, FN_FCE,
+ FN_SCK4_B, 0, 0, 0,
+ /* IP12_14_12 [3] */
+ FN_VI1_G6, FN_VI3_DATA6, FN_GPS_SIGN, FN_FRB,
+ FN_RX4_B, FN_SIM_CLK_B, 0, 0,
+ /* IP12_11_9 [3] */
+ FN_VI1_G5, FN_VI3_DATA5, FN_GPS_CLK, FN_FSE,
+ FN_TX4_B, FN_SIM_D_B, 0, 0,
+ /* IP12_8_6 [3] */
+ FN_VI1_G4, FN_VI3_DATA4, FN_SSI_WS2, FN_SDA1_C,
+ FN_SIM_RST_B, FN_HRX0_B, 0, 0,
+ /* IP12_5_3 [3] */
+ FN_VI1_G3, FN_VI3_DATA3, FN_SSI_SCK2, FN_TS_SDAT1,
+ FN_SCL1_C, FN_HTX0_B, 0, 0,
+ /* IP12_2_0 [3] */
+ FN_VI1_G2, FN_VI3_DATA2, FN_SSI_WS1, FN_TS_SPSYNC1,
+ FN_SCK2, FN_HSCK0_B, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL", 0xfffc0090, 32,
+ 2, 2, 3, 3, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 2, 1, 2) {
+ /* SEL_SCIF5 [2] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ /* SEL_SCIF4 [2] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ /* SEL_SCIF3 [3] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ FN_SEL_SCIF3_4, 0, 0, 0,
+ /* SEL_SCIF2 [3] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, FN_SEL_SCIF2_3,
+ FN_SEL_SCIF2_4, 0, 0, 0,
+ /* SEL_SCIF1 [2] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, 0,
+ /* SEL_SCIF0 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ /* SEL_SSI9 [2] */
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, FN_SEL_SSI9_2, 0,
+ /* SEL_SSI8 [2] */
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0,
+ /* SEL_SSI7 [2] */
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, FN_SEL_SSI7_2, 0,
+ /* SEL_VI0 [1] */
+ FN_SEL_VI0_0, FN_SEL_VI0_1,
+ /* SEL_SD2 [1] */
+ FN_SEL_SD2_0, FN_SEL_SD2_1,
+ /* SEL_INT3 [1] */
+ FN_SEL_INT3_0, FN_SEL_INT3_1,
+ /* SEL_INT2 [1] */
+ FN_SEL_INT2_0, FN_SEL_INT2_1,
+ /* SEL_INT1 [1] */
+ FN_SEL_INT1_0, FN_SEL_INT1_1,
+ /* SEL_INT0 [1] */
+ FN_SEL_INT0_0, FN_SEL_INT0_1,
+ /* SEL_IE [1] */
+ FN_SEL_IE_0, FN_SEL_IE_1,
+ /* SEL_EXBUS2 [2] */
+ FN_SEL_EXBUS2_0, FN_SEL_EXBUS2_1, FN_SEL_EXBUS2_2, 0,
+ /* SEL_EXBUS1 [1] */
+ FN_SEL_EXBUS1_0, FN_SEL_EXBUS1_1,
+ /* SEL_EXBUS0 [2] */
+ FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xfffc0094, 32,
+ 2, 2, 2, 2, 1, 1, 1, 3, 1,
+ 2, 2, 2, 2, 1, 1, 2, 1, 2, 2) {
+ /* SEL_TMU1 [2] */
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1, FN_SEL_TMU1_2, 0,
+ /* SEL_TMU0 [2] */
+ FN_SEL_TMU0_0, FN_SEL_TMU0_1, FN_SEL_TMU0_2, FN_SEL_TMU0_3,
+ /* SEL_SCIF [2] */
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1, FN_SEL_SCIF_2, FN_SEL_SCIF_3,
+ /* SEL_CANCLK [2] */
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ /* SEL_CAN0 [1] */
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1,
+ /* SEL_HSCIF1 [1] */
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
+ /* SEL_HSCIF0 [1] */
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+ /* SEL_PWMFSW [3] */
+ FN_SEL_PWMFSW_0, FN_SEL_PWMFSW_1, FN_SEL_PWMFSW_2,
+ FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4, 0, 0, 0,
+ /* SEL_ADI [1] */
+ FN_SEL_ADI_0, FN_SEL_ADI_1,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* SEL_GPS [2] */
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ /* SEL_SIM [1] */
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ /* SEL_HSPI2 [1] */
+ FN_SEL_HSPI2_0, FN_SEL_HSPI2_1,
+ /* SEL_HSPI1 [2] */
+ FN_SEL_HSPI1_0, FN_SEL_HSPI1_1, FN_SEL_HSPI1_2, FN_SEL_HSPI1_3,
+ /* SEL_I2C3 [1] */
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1,
+ /* SEL_I2C2 [2] */
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ /* SEL_I2C1 [2] */
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3 }
+ },
+ { PINMUX_CFG_REG("INOUTSEL0", 0xffc40004, 32, 1) { GP_INOUTSEL(0) } },
+ { PINMUX_CFG_REG("INOUTSEL1", 0xffc41004, 32, 1) { GP_INOUTSEL(1) } },
+ { PINMUX_CFG_REG("INOUTSEL2", 0xffc42004, 32, 1) { GP_INOUTSEL(2) } },
+ { PINMUX_CFG_REG("INOUTSEL3", 0xffc43004, 32, 1) { GP_INOUTSEL(3) } },
+ { PINMUX_CFG_REG("INOUTSEL4", 0xffc44004, 32, 1) { GP_INOUTSEL(4) } },
+ { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) { GP_INOUTSEL(5) } },
+ { PINMUX_CFG_REG("INOUTSEL6", 0xffc46004, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_8_IN, GP_6_8_OUT,
+ GP_6_7_IN, GP_6_7_OUT,
+ GP_6_6_IN, GP_6_6_OUT,
+ GP_6_5_IN, GP_6_5_OUT,
+ GP_6_4_IN, GP_6_4_OUT,
+ GP_6_3_IN, GP_6_3_OUT,
+ GP_6_2_IN, GP_6_2_OUT,
+ GP_6_1_IN, GP_6_1_OUT,
+ GP_6_0_IN, GP_6_0_OUT, }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("INDT0", 0xffc40008, 32) { GP_INDT(0) } },
+ { PINMUX_DATA_REG("INDT1", 0xffc41008, 32) { GP_INDT(1) } },
+ { PINMUX_DATA_REG("INDT2", 0xffc42008, 32) { GP_INDT(2) } },
+ { PINMUX_DATA_REG("INDT3", 0xffc43008, 32) { GP_INDT(3) } },
+ { PINMUX_DATA_REG("INDT4", 0xffc44008, 32) { GP_INDT(4) } },
+ { PINMUX_DATA_REG("INDT5", 0xffc45008, 32) { GP_INDT(5) } },
+ { PINMUX_DATA_REG("INDT6", 0xffc46008, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, GP_6_8_DATA,
+ GP_6_7_DATA, GP_6_6_DATA, GP_6_5_DATA, GP_6_4_DATA,
+ GP_6_3_DATA, GP_6_2_DATA, GP_6_1_DATA, GP_6_0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info r8a7779_pinmux_info = {
+ .name = "r8a7779_pfc",
+
+ .unlock_reg = 0xfffc0000, /* PMMR */
+
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_GP_0_0,
+ .last_gpio = GPIO_FN_SCK4_B,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
new file mode 100644
index 000000000000..01b425dfd162
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -0,0 +1,1592 @@
+/*
+ * SH7203 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7203.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PC14_IN, PC13_IN, PC12_IN,
+ PC11_IN, PC10_IN, PC9_IN, PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE15_IN, PE14_IN, PE13_IN, PE12_IN,
+ PE11_IN, PE10_IN, PE9_IN, PE8_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF30_IN, PF29_IN, PF28_IN,
+ PF27_IN, PF26_IN, PF25_IN, PF24_IN,
+ PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+ PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+ PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PC14_OUT, PC13_OUT, PC12_OUT,
+ PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT,
+ PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF30_OUT, PF29_OUT, PF28_OUT,
+ PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT,
+ PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+ PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+ PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ PB11MD_0, PB11MD_1,
+ PB10MD_0, PB10MD_1,
+ PB9MD_00, PB9MD_01, PB9MD_10,
+ PB8MD_00, PB8MD_01, PB8MD_10,
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10,
+
+ PC14MD_0, PC14MD_1,
+ PC13MD_0, PC13MD_1,
+ PC12MD_0, PC12MD_1,
+ PC11MD_00, PC11MD_01, PC11MD_10,
+ PC10MD_00, PC10MD_01, PC10MD_10,
+ PC9MD_0, PC9MD_1,
+ PC8MD_0, PC8MD_1,
+ PC7MD_0, PC7MD_1,
+ PC6MD_0, PC6MD_1,
+ PC5MD_0, PC5MD_1,
+ PC4MD_0, PC4MD_1,
+ PC3MD_0, PC3MD_1,
+ PC2MD_0, PC2MD_1,
+ PC1MD_0, PC1MD_1,
+ PC0MD_00, PC0MD_01, PC0MD_10,
+
+ PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101,
+ PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101,
+ PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101,
+ PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101,
+ PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101,
+ PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101,
+ PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101,
+ PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101,
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101,
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101,
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101,
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101,
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101,
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101,
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101,
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101,
+
+ PE15MD_00, PE15MD_01, PE15MD_11,
+ PE14MD_00, PE14MD_01, PE14MD_11,
+ PE13MD_00, PE13MD_11,
+ PE12MD_00, PE12MD_11,
+ PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100,
+ PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100,
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100,
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100,
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100,
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100,
+ PE3MD_00, PE3MD_01, PE3MD_11,
+ PE2MD_00, PE2MD_01, PE2MD_11,
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100,
+
+ PF30MD_0, PF30MD_1,
+ PF29MD_0, PF29MD_1,
+ PF28MD_0, PF28MD_1,
+ PF27MD_0, PF27MD_1,
+ PF26MD_0, PF26MD_1,
+ PF25MD_0, PF25MD_1,
+ PF24MD_0, PF24MD_1,
+ PF23MD_00, PF23MD_01, PF23MD_10,
+ PF22MD_00, PF22MD_01, PF22MD_10,
+ PF21MD_00, PF21MD_01, PF21MD_10,
+ PF20MD_00, PF20MD_01, PF20MD_10,
+ PF19MD_00, PF19MD_01, PF19MD_10,
+ PF18MD_00, PF18MD_01, PF18MD_10,
+ PF17MD_00, PF17MD_01, PF17MD_10,
+ PF16MD_00, PF16MD_01, PF16MD_10,
+ PF15MD_00, PF15MD_01, PF15MD_10,
+ PF14MD_00, PF14MD_01, PF14MD_10,
+ PF13MD_00, PF13MD_01, PF13MD_10,
+ PF12MD_00, PF12MD_01, PF12MD_10,
+ PF11MD_00, PF11MD_01, PF11MD_10,
+ PF10MD_00, PF10MD_01, PF10MD_10,
+ PF9MD_00, PF9MD_01, PF9MD_10,
+ PF8MD_00, PF8MD_01, PF8MD_10,
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK,
+ PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK,
+ PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK,
+ PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK,
+ IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK,
+ IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK,
+ IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK,
+ IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK,
+ IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK,
+ IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+ WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK,
+ UBCTRG_MARK,
+ CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK,
+ CRX0_MARK, CRX0_CRX1_MARK,
+ SDA3_MARK, SCL3_MARK,
+ SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK,
+ SDA0_MARK, SCL0_MARK,
+ TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK,
+ DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK,
+ DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK,
+ DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK,
+ ADTRG_PD_MARK, ADTRG_PE_MARK,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, CS4_MARK, MRES_MARK, BS_MARK,
+ IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK,
+ CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK,
+ RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK,
+ RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK,
+ WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK,
+ WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK,
+ CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK,
+ TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK,
+ TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK,
+ TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK,
+ TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK,
+ TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK,
+ TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK,
+ SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK,
+ SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK,
+ SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK,
+ SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK,
+ TXD0_MARK, RXD0_MARK, SCK0_MARK,
+ TXD1_MARK, RXD1_MARK, SCK1_MARK,
+ TXD2_MARK, RXD2_MARK, SCK2_MARK,
+ RTS3_MARK, CTS3_MARK, TXD3_MARK,
+ RXD3_MARK, SCK3_MARK,
+ AUDIO_CLK_MARK,
+ SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK,
+ SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK,
+ SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK,
+ SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK,
+ FCE_MARK, FRB_MARK,
+ NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK,
+ FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK,
+ LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA */
+ PINMUX_DATA(PA7_DATA, PA7_IN),
+ PINMUX_DATA(PA6_DATA, PA6_IN),
+ PINMUX_DATA(PA5_DATA, PA5_IN),
+ PINMUX_DATA(PA4_DATA, PA4_IN),
+ PINMUX_DATA(PA3_DATA, PA3_IN),
+ PINMUX_DATA(PA2_DATA, PA2_IN),
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* PB */
+ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT),
+ PINMUX_DATA(WDTOVF_MARK, PB12MD_01),
+ PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00),
+ PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01),
+ PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10),
+ PINMUX_DATA(UBCTRG_MARK, PB12MD_11),
+
+ PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT),
+ PINMUX_DATA(CTX1_MARK, PB11MD_1),
+
+ PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT),
+ PINMUX_DATA(CRX1_MARK, PB10MD_1),
+
+ PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT),
+ PINMUX_DATA(CTX0_MARK, PB9MD_01),
+ PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10),
+
+ PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT),
+ PINMUX_DATA(CRX0_MARK, PB8MD_01),
+ PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10),
+
+ PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN),
+ PINMUX_DATA(SDA3_MARK, PB7MD_01),
+ PINMUX_DATA(PINT7_PB_MARK, PB7MD_10),
+ PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11),
+
+ PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN),
+ PINMUX_DATA(SCL3_MARK, PB6MD_01),
+ PINMUX_DATA(PINT6_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN),
+ PINMUX_DATA(SDA2_MARK, PB6MD_01),
+ PINMUX_DATA(PINT5_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN),
+ PINMUX_DATA(SCL2_MARK, PB4MD_01),
+ PINMUX_DATA(PINT4_PB_MARK, PB4MD_10),
+ PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11),
+
+ PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN),
+ PINMUX_DATA(SDA1_MARK, PB3MD_01),
+ PINMUX_DATA(PINT3_PB_MARK, PB3MD_10),
+ PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11),
+
+ PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN),
+ PINMUX_DATA(SCL1_MARK, PB2MD_01),
+ PINMUX_DATA(PINT2_PB_MARK, PB2MD_10),
+ PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11),
+
+ PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN),
+ PINMUX_DATA(SDA0_MARK, PB1MD_01),
+ PINMUX_DATA(PINT1_PB_MARK, PB1MD_10),
+ PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11),
+
+ PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN),
+ PINMUX_DATA(SCL0_MARK, PB0MD_01),
+ PINMUX_DATA(PINT0_PB_MARK, PB0MD_10),
+ PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11),
+
+ /* PC */
+ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT),
+ PINMUX_DATA(WAIT_MARK, PC14MD_1),
+
+ PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT),
+ PINMUX_DATA(RDWR_MARK, PC13MD_1),
+
+ PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT),
+ PINMUX_DATA(CKE_MARK, PC12MD_1),
+
+ PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT),
+ PINMUX_DATA(CASU_MARK, PC11MD_01),
+ PINMUX_DATA(BREQ_MARK, PC11MD_10),
+
+ PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT),
+ PINMUX_DATA(RASU_MARK, PC10MD_01),
+ PINMUX_DATA(BACK_MARK, PC10MD_10),
+
+ PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT),
+ PINMUX_DATA(CASL_MARK, PC9MD_1),
+
+ PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT),
+ PINMUX_DATA(RASL_MARK, PC8MD_1),
+
+ PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT),
+ PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1),
+
+ PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT),
+ PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1),
+
+ PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT),
+ PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1),
+
+ PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT),
+ PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1),
+
+ PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT),
+ PINMUX_DATA(CS3_MARK, PC3MD_1),
+
+ PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT),
+ PINMUX_DATA(CS2_MARK, PC2MD_1),
+
+ PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT),
+ PINMUX_DATA(A1_MARK, PC1MD_1),
+
+ PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT),
+ PINMUX_DATA(A0_MARK, PC0MD_01),
+ PINMUX_DATA(CS7_MARK, PC0MD_10),
+
+ /* PD */
+ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT),
+ PINMUX_DATA(D31_MARK, PD15MD_001),
+ PINMUX_DATA(PINT7_PD_MARK, PD15MD_010),
+ PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100),
+ PINMUX_DATA(TIOC4D_MARK, PD15MD_101),
+
+ PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT),
+ PINMUX_DATA(D30_MARK, PD14MD_001),
+ PINMUX_DATA(PINT6_PD_MARK, PD14MD_010),
+ PINMUX_DATA(TIOC4C_MARK, PD14MD_101),
+
+ PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT),
+ PINMUX_DATA(D29_MARK, PD13MD_001),
+ PINMUX_DATA(PINT5_PD_MARK, PD13MD_010),
+ PINMUX_DATA(TEND1_PD_MARK, PD13MD_100),
+ PINMUX_DATA(TIOC4B_MARK, PD13MD_101),
+
+ PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT),
+ PINMUX_DATA(D28_MARK, PD12MD_001),
+ PINMUX_DATA(PINT4_PD_MARK, PD12MD_010),
+ PINMUX_DATA(DACK1_PD_MARK, PD12MD_100),
+ PINMUX_DATA(TIOC4A_MARK, PD12MD_101),
+
+ PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT),
+ PINMUX_DATA(D27_MARK, PD11MD_001),
+ PINMUX_DATA(PINT3_PD_MARK, PD11MD_010),
+ PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100),
+ PINMUX_DATA(TIOC3D_MARK, PD11MD_101),
+
+ PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT),
+ PINMUX_DATA(D26_MARK, PD10MD_001),
+ PINMUX_DATA(PINT2_PD_MARK, PD10MD_010),
+ PINMUX_DATA(TEND0_PD_MARK, PD10MD_100),
+ PINMUX_DATA(TIOC3C_MARK, PD10MD_101),
+
+ PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT),
+ PINMUX_DATA(D25_MARK, PD9MD_001),
+ PINMUX_DATA(PINT1_PD_MARK, PD9MD_010),
+ PINMUX_DATA(DACK0_PD_MARK, PD9MD_100),
+ PINMUX_DATA(TIOC3B_MARK, PD9MD_101),
+
+ PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT),
+ PINMUX_DATA(D24_MARK, PD8MD_001),
+ PINMUX_DATA(PINT0_PD_MARK, PD8MD_010),
+ PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100),
+ PINMUX_DATA(TIOC3A_MARK, PD8MD_101),
+
+ PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT),
+ PINMUX_DATA(D23_MARK, PD7MD_001),
+ PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010),
+ PINMUX_DATA(SCS1_PD_MARK, PD7MD_011),
+ PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100),
+ PINMUX_DATA(TIOC2B_MARK, PD7MD_101),
+
+ PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT),
+ PINMUX_DATA(D22_MARK, PD6MD_001),
+ PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010),
+ PINMUX_DATA(SSO1_PD_MARK, PD6MD_011),
+ PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100),
+ PINMUX_DATA(TIOC2A_MARK, PD6MD_101),
+
+ PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT),
+ PINMUX_DATA(D21_MARK, PD5MD_001),
+ PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010),
+ PINMUX_DATA(SSI1_PD_MARK, PD5MD_011),
+ PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100),
+ PINMUX_DATA(TIOC1B_MARK, PD5MD_101),
+
+ PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT),
+ PINMUX_DATA(D20_MARK, PD4MD_001),
+ PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010),
+ PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011),
+ PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100),
+ PINMUX_DATA(TIOC1A_MARK, PD4MD_101),
+
+ PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT),
+ PINMUX_DATA(D19_MARK, PD3MD_001),
+ PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010),
+ PINMUX_DATA(SCS0_PD_MARK, PD3MD_011),
+ PINMUX_DATA(DACK3_MARK, PD3MD_100),
+ PINMUX_DATA(TIOC0D_MARK, PD3MD_101),
+
+ PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT),
+ PINMUX_DATA(D18_MARK, PD2MD_001),
+ PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010),
+ PINMUX_DATA(SSO0_PD_MARK, PD2MD_011),
+ PINMUX_DATA(DREQ3_MARK, PD2MD_100),
+ PINMUX_DATA(TIOC0C_MARK, PD2MD_101),
+
+ PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT),
+ PINMUX_DATA(D17_MARK, PD1MD_001),
+ PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010),
+ PINMUX_DATA(SSI0_PD_MARK, PD1MD_011),
+ PINMUX_DATA(DACK2_MARK, PD1MD_100),
+ PINMUX_DATA(TIOC0B_MARK, PD1MD_101),
+
+ PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT),
+ PINMUX_DATA(D16_MARK, PD0MD_001),
+ PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010),
+ PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011),
+ PINMUX_DATA(DREQ2_MARK, PD0MD_100),
+ PINMUX_DATA(TIOC0A_MARK, PD0MD_101),
+
+ /* PE */
+ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT),
+ PINMUX_DATA(IOIS16_MARK, PE15MD_01),
+ PINMUX_DATA(RTS3_MARK, PE15MD_11),
+
+ PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT),
+ PINMUX_DATA(CS1_MARK, PE14MD_01),
+ PINMUX_DATA(CTS3_MARK, PE14MD_11),
+
+ PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT),
+ PINMUX_DATA(TXD3_MARK, PE13MD_11),
+
+ PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT),
+ PINMUX_DATA(RXD3_MARK, PE12MD_11),
+
+ PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT),
+ PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001),
+ PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010),
+ PINMUX_DATA(TEND1_PE_MARK, PE11MD_100),
+
+ PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT),
+ PINMUX_DATA(CE2B_MARK, PE10MD_001),
+ PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010),
+ PINMUX_DATA(TEND0_PE_MARK, PE10MD_100),
+
+ PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT),
+ PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01),
+ PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10),
+ PINMUX_DATA(SCK3_MARK, PE9MD_11),
+
+ PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT),
+ PINMUX_DATA(CE2A_MARK, PE8MD_01),
+ PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10),
+ PINMUX_DATA(SCK2_MARK, PE8MD_11),
+
+ PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT),
+ PINMUX_DATA(FRAME_MARK, PE7MD_001),
+ PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010),
+ PINMUX_DATA(TXD2_MARK, PE7MD_011),
+ PINMUX_DATA(DACK1_PE_MARK, PE7MD_100),
+
+ PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT),
+ PINMUX_DATA(A25_MARK, PE6MD_001),
+ PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010),
+ PINMUX_DATA(RXD2_MARK, PE6MD_011),
+ PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100),
+
+ PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT),
+ PINMUX_DATA(A24_MARK, PE5MD_001),
+ PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010),
+ PINMUX_DATA(TXD1_MARK, PE5MD_011),
+ PINMUX_DATA(DACK0_PE_MARK, PE5MD_100),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT),
+ PINMUX_DATA(A23_MARK, PE4MD_001),
+ PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010),
+ PINMUX_DATA(RXD1_MARK, PE4MD_011),
+ PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT),
+ PINMUX_DATA(A22_MARK, PE3MD_01),
+ PINMUX_DATA(SCK1_MARK, PE3MD_11),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT),
+ PINMUX_DATA(A21_MARK, PE2MD_01),
+ PINMUX_DATA(SCK0_MARK, PE2MD_11),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT),
+ PINMUX_DATA(CS4_MARK, PE1MD_01),
+ PINMUX_DATA(MRES_MARK, PE1MD_10),
+ PINMUX_DATA(TXD0_MARK, PE1MD_11),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT),
+ PINMUX_DATA(BS_MARK, PE0MD_001),
+ PINMUX_DATA(RXD0_MARK, PE0MD_011),
+ PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100),
+
+ /* PF */
+ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT),
+ PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1),
+
+ PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT),
+ PINMUX_DATA(SSIDATA3_MARK, PF29MD_1),
+
+ PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT),
+ PINMUX_DATA(SSIWS3_MARK, PF28MD_1),
+
+ PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT),
+ PINMUX_DATA(SSISCK3_MARK, PF27MD_1),
+
+ PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT),
+ PINMUX_DATA(SSIDATA2_MARK, PF26MD_1),
+
+ PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT),
+ PINMUX_DATA(SSIWS2_MARK, PF25MD_1),
+
+ PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT),
+ PINMUX_DATA(SSISCK2_MARK, PF24MD_1),
+
+ PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT),
+ PINMUX_DATA(SSIDATA1_MARK, PF23MD_01),
+ PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10),
+
+ PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT),
+ PINMUX_DATA(SSIWS1_MARK, PF22MD_01),
+ PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10),
+
+ PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT),
+ PINMUX_DATA(SSISCK1_MARK, PF21MD_01),
+ PINMUX_DATA(LCD_CLK_MARK, PF21MD_10),
+
+ PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT),
+ PINMUX_DATA(SSIDATA0_MARK, PF20MD_01),
+ PINMUX_DATA(LCD_FLM_MARK, PF20MD_10),
+
+ PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT),
+ PINMUX_DATA(SSIWS0_MARK, PF19MD_01),
+ PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10),
+
+ PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT),
+ PINMUX_DATA(SSISCK0_MARK, PF18MD_01),
+ PINMUX_DATA(LCD_CL2_MARK, PF18MD_10),
+
+ PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT),
+ PINMUX_DATA(FCE_MARK, PF17MD_01),
+ PINMUX_DATA(LCD_CL1_MARK, PF17MD_10),
+
+ PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT),
+ PINMUX_DATA(FRB_MARK, PF16MD_01),
+ PINMUX_DATA(LCD_DON_MARK, PF16MD_10),
+
+ PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT),
+ PINMUX_DATA(NAF7_MARK, PF15MD_01),
+ PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10),
+
+ PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT),
+ PINMUX_DATA(NAF6_MARK, PF14MD_01),
+ PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10),
+
+ PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT),
+ PINMUX_DATA(NAF5_MARK, PF13MD_01),
+ PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10),
+
+ PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT),
+ PINMUX_DATA(NAF4_MARK, PF12MD_01),
+ PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT),
+ PINMUX_DATA(NAF3_MARK, PF11MD_01),
+ PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT),
+ PINMUX_DATA(NAF2_MARK, PF10MD_01),
+ PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT),
+ PINMUX_DATA(NAF1_MARK, PF9MD_01),
+ PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT),
+ PINMUX_DATA(NAF0_MARK, PF8MD_01),
+ PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT),
+ PINMUX_DATA(FSC_MARK, PF7MD_01),
+ PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10),
+ PINMUX_DATA(SCS1_PF_MARK, PF7MD_11),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT),
+ PINMUX_DATA(FOE_MARK, PF6MD_01),
+ PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10),
+ PINMUX_DATA(SSO1_PF_MARK, PF6MD_11),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT),
+ PINMUX_DATA(FCDE_MARK, PF5MD_01),
+ PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10),
+ PINMUX_DATA(SSI1_PF_MARK, PF5MD_11),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT),
+ PINMUX_DATA(FWE_MARK, PF4MD_01),
+ PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10),
+ PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT),
+ PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01),
+ PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10),
+ PINMUX_DATA(SCS0_PF_MARK, PF3MD_11),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT),
+ PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01),
+ PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10),
+ PINMUX_DATA(SSO0_PF_MARK, PF2MD_11),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT),
+ PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01),
+ PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10),
+ PINMUX_DATA(SSI0_PF_MARK, PF1MD_11),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT),
+ PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01),
+ PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10),
+ PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC14, PC14_DATA),
+ PINMUX_GPIO(GPIO_PC13, PC13_DATA),
+ PINMUX_GPIO(GPIO_PC12, PC12_DATA),
+ PINMUX_GPIO(GPIO_PC11, PC11_DATA),
+ PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+ PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE15, PE15_DATA),
+ PINMUX_GPIO(GPIO_PE14, PE14_DATA),
+ PINMUX_GPIO(GPIO_PE13, PE13_DATA),
+ PINMUX_GPIO(GPIO_PE12, PE12_DATA),
+ PINMUX_GPIO(GPIO_PE11, PE11_DATA),
+ PINMUX_GPIO(GPIO_PE10, PE10_DATA),
+ PINMUX_GPIO(GPIO_PE9, PE9_DATA),
+ PINMUX_GPIO(GPIO_PE8, PE8_DATA),
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF30, PF30_DATA),
+ PINMUX_GPIO(GPIO_PF29, PF29_DATA),
+ PINMUX_GPIO(GPIO_PF28, PF28_DATA),
+ PINMUX_GPIO(GPIO_PF27, PF27_DATA),
+ PINMUX_GPIO(GPIO_PF26, PF26_DATA),
+ PINMUX_GPIO(GPIO_PF25, PF25_DATA),
+ PINMUX_GPIO(GPIO_PF24, PF24_DATA),
+ PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+ PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+ PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+ PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+ PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+ PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+ PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+ PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+ PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+ PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+ PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK),
+ PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK),
+
+ /* SSU */
+ PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) {
+ PB11MD_0, PB11MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB10MD_0, PB10MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB9MD_00, PB9MD_01, PB9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB8MD_00, PB8MD_01, PB8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) {
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) {
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) {
+ 0, 0,
+ PC14_IN, PC14_OUT,
+ PC13_IN, PC13_OUT,
+ PC12_IN, PC12_OUT,
+ PC11_IN, PC11_OUT,
+ PC10_IN, PC10_OUT,
+ PC9_IN, PC9_OUT,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT }
+ },
+ { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC14MD_0, PC14MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC13MD_0, PC13MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC12MD_0, PC12MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) {
+ PC11MD_00, PC11MD_01, PC11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC10MD_00, PC10MD_01, PC10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC9MD_0, PC9MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC8MD_0, PC8MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) {
+ PC7MD_0, PC7MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC6MD_0, PC6MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC5MD_0, PC5MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC4MD_0, PC4MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) {
+ PC3MD_0, PC3MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC2MD_0, PC2MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC1MD_0, PC1MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC0MD_00, PC0MD_01, PC0MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+ { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) {
+ PD15MD_000, PD15MD_001, PD15MD_010, 0,
+ PD15MD_100, PD15MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD14MD_000, PD14MD_001, PD14MD_010, 0,
+ 0, PD14MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD13MD_000, PD13MD_001, PD13MD_010, 0,
+ PD13MD_100, PD13MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD12MD_000, PD12MD_001, PD12MD_010, 0,
+ PD12MD_100, PD12MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) {
+ PD11MD_000, PD11MD_001, PD11MD_010, 0,
+ PD11MD_100, PD11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD10MD_000, PD10MD_001, PD10MD_010, 0,
+ PD10MD_100, PD10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD9MD_000, PD9MD_001, PD9MD_010, 0,
+ PD9MD_100, PD9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD8MD_000, PD8MD_001, PD8MD_010, 0,
+ PD8MD_100, PD8MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) {
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011,
+ PD7MD_100, PD7MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011,
+ PD6MD_100, PD6MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011,
+ PD5MD_100, PD5MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011,
+ PD4MD_100, PD4MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) {
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011,
+ PD3MD_100, PD3MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011,
+ PD2MD_100, PD2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011,
+ PD1MD_100, PD1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011,
+ PD0MD_100, PD0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) {
+ PE15_IN, PE15_OUT,
+ PE14_IN, PE14_OUT,
+ PE13_IN, PE13_OUT,
+ PE12_IN, PE12_OUT,
+ PE11_IN, PE11_OUT,
+ PE10_IN, PE10_OUT,
+ PE9_IN, PE9_OUT,
+ PE8_IN, PE8_OUT,
+ PE7_IN, PE7_OUT,
+ PE6_IN, PE6_OUT,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+ { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) {
+ PE15MD_00, PE15MD_01, 0, PE15MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE14MD_00, PE14MD_01, 0, PE14MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE13MD_00, 0, 0, PE13MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE12MD_00, 0, 0, PE12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) {
+ PE11MD_000, PE11MD_001, PE11MD_010, 0,
+ PE11MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE10MD_000, PE10MD_001, PE10MD_010, 0,
+ PE10MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) {
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011,
+ PE7MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011,
+ PE6MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011,
+ PE5MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011,
+ PE4MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) {
+ PE3MD_00, PE3MD_01, 0, PE3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE2MD_00, PE2MD_01, 0, PE2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE0MD_000, PE0MD_001, 0, PE0MD_011,
+ PE0MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) {
+ 0, 0,
+ PF30_IN, PF30_OUT,
+ PF29_IN, PF29_OUT,
+ PF28_IN, PF28_OUT,
+ PF27_IN, PF27_OUT,
+ PF26_IN, PF26_OUT,
+ PF25_IN, PF25_OUT,
+ PF24_IN, PF24_OUT,
+ PF23_IN, PF23_OUT,
+ PF22_IN, PF22_OUT,
+ PF21_IN, PF21_OUT,
+ PF20_IN, PF20_OUT,
+ PF19_IN, PF19_OUT,
+ PF18_IN, PF18_OUT,
+ PF17_IN, PF17_OUT,
+ PF16_IN, PF16_OUT }
+ },
+ { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) {
+ PF15_IN, PF15_OUT,
+ PF14_IN, PF14_OUT,
+ PF13_IN, PF13_OUT,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+ { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF30MD_0, PF30MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF29MD_0, PF29MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF28MD_0, PF28MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) {
+ PF27MD_0, PF27MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF26MD_0, PF26MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF25MD_0, PF25MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF24MD_0, PF24MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) {
+ PF23MD_00, PF23MD_01, PF23MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF22MD_00, PF22MD_01, PF22MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF21MD_00, PF21MD_01, PF21MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF20MD_00, PF20MD_01, PF20MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) {
+ PF19MD_00, PF19MD_01, PF19MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF18MD_00, PF18MD_01, PF18MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF17MD_00, PF17MD_01, PF17MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF16MD_00, PF16MD_01, PF16MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) {
+ PF15MD_00, PF15MD_01, PF15MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF14MD_00, PF14MD_01, PF14MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF13MD_00, PF13MD_01, PF13MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF12MD_00, PF12MD_01, PF12MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) {
+ PF11MD_00, PF11MD_01, PF11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF10MD_00, PF10MD_01, PF10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF9MD_00, PF9MD_01, PF9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF8MD_00, PF8MD_01, PF8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) {
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) {
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) {
+ 0, 0, 0, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) {
+ 0, PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) {
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) {
+ 0, PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) {
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7203_pinmux_info = {
+ .name = "sh7203_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_LCD_DATA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
new file mode 100644
index 000000000000..2ba5639dcf34
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -0,0 +1,2131 @@
+/*
+ * SH7264 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7264.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ /* Port A */
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ /* Port B */
+ PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA,
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA,
+ /* Port C */
+ PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ /* Port D */
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ /* Port E */
+ PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ /* Port F */
+ PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ /* Port G */
+ PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA,
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ /* Port H */
+ /* NOTE - Port H does not have a Data Register, but PH Data is
+ connected to PH Port Register */
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ /* Port K */
+ PK12_DATA,
+ PK11_DATA, PK10_DATA, PK9_DATA, PK8_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ /* Port A */
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ /* Port B */
+ PB22_IN, PB21_IN, PB20_IN,
+ PB19_IN, PB18_IN, PB17_IN, PB16_IN,
+ PB15_IN, PB14_IN, PB13_IN, PB12_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN,
+ /* Port C */
+ PC10_IN, PC9_IN, PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ /* Port D */
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ /* Port E */
+ PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ /* Port F */
+ PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ /* Port G */
+ PG24_IN,
+ PG23_IN, PG22_IN, PG21_IN, PG20_IN,
+ PG19_IN, PG18_IN, PG17_IN, PG16_IN,
+ PG15_IN, PG14_IN, PG13_IN, PG12_IN,
+ PG11_IN, PG10_IN, PG9_IN, PG8_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_IN,
+ PJ11_IN, PJ10_IN, PJ9_IN, PJ8_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ /* Port K */
+ PK12_IN,
+ PK11_IN, PK10_IN, PK9_IN, PK8_IN,
+ PK7_IN, PK6_IN, PK5_IN, PK4_IN,
+ PK3_IN, PK2_IN, PK1_IN, PK0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ /* Port A */
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ /* Port B */
+ PB22_OUT, PB21_OUT, PB20_OUT,
+ PB19_OUT, PB18_OUT, PB17_OUT, PB16_OUT,
+ PB15_OUT, PB14_OUT, PB13_OUT, PB12_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT,
+ /* Port C */
+ PC10_OUT, PC9_OUT, PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ /* Port D */
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ /* Port E */
+ PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ /* Port F */
+ PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ /* Port G */
+ PG24_OUT,
+ PG23_OUT, PG22_OUT, PG21_OUT, PG20_OUT,
+ PG19_OUT, PG18_OUT, PG17_OUT, PG16_OUT,
+ PG15_OUT, PG14_OUT, PG13_OUT, PG12_OUT,
+ PG11_OUT, PG10_OUT, PG9_OUT, PG8_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_OUT,
+ PJ11_OUT, PJ10_OUT, PJ9_OUT, PJ8_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ /* Port K */
+ PK12_OUT,
+ PK11_OUT, PK10_OUT, PK9_OUT, PK8_OUT,
+ PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT,
+ PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ /* Port A */
+ PA3_IOR_IN, PA3_IOR_OUT,
+ PA2_IOR_IN, PA2_IOR_OUT,
+ PA1_IOR_IN, PA1_IOR_OUT,
+ PA0_IOR_IN, PA0_IOR_OUT,
+
+ /* Port B */
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+
+ PB22MD_00, PB22MD_01, PB22MD_10,
+ PB21MD_0, PB21MD_1,
+ PB20MD_0, PB20MD_1,
+ PB19MD_00, PB19MD_01, PB19MD_10, PB19MD_11,
+ PB18MD_00, PB18MD_01, PB18MD_10, PB18MD_11,
+ PB17MD_00, PB17MD_01, PB17MD_10, PB17MD_11,
+ PB16MD_00, PB16MD_01, PB16MD_10, PB16MD_11,
+ PB15MD_00, PB15MD_01, PB15MD_10, PB15MD_11,
+ PB14MD_00, PB14MD_01, PB14MD_10, PB14MD_11,
+ PB13MD_00, PB13MD_01, PB13MD_10, PB13MD_11,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11,
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11,
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11,
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11,
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ PB3MD_0, PB3MD_1,
+ PB2MD_0, PB2MD_1,
+ PB1MD_0, PB1MD_1,
+
+ /* Port C */
+ PC14_IOR_IN, PC14_IOR_OUT,
+ PC13_IOR_IN, PC13_IOR_OUT,
+ PC12_IOR_IN, PC12_IOR_OUT,
+ PC11_IOR_IN, PC11_IOR_OUT,
+ PC10_IOR_IN, PC10_IOR_OUT,
+ PC9_IOR_IN, PC9_IOR_OUT,
+ PC8_IOR_IN, PC8_IOR_OUT,
+ PC7_IOR_IN, PC7_IOR_OUT,
+ PC6_IOR_IN, PC6_IOR_OUT,
+ PC5_IOR_IN, PC5_IOR_OUT,
+ PC4_IOR_IN, PC4_IOR_OUT,
+ PC3_IOR_IN, PC3_IOR_OUT,
+ PC2_IOR_IN, PC2_IOR_OUT,
+ PC1_IOR_IN, PC1_IOR_OUT,
+ PC0_IOR_IN, PC0_IOR_OUT,
+
+ PC10MD_0, PC10MD_1,
+ PC9MD_0, PC9MD_1,
+ PC8MD_00, PC8MD_01, PC8MD_10, PC8MD_11,
+ PC7MD_00, PC7MD_01, PC7MD_10, PC7MD_11,
+ PC6MD_00, PC6MD_01, PC6MD_10, PC6MD_11,
+ PC5MD_00, PC5MD_01, PC5MD_10, PC5MD_11,
+ PC4MD_0, PC4MD_1,
+ PC3MD_0, PC3MD_1,
+ PC2MD_0, PC2MD_1,
+ PC1MD_0, PC1MD_1,
+ PC0MD_0, PC0MD_1,
+
+ /* Port D */
+ PD15_IOR_IN, PD15_IOR_OUT,
+ PD14_IOR_IN, PD14_IOR_OUT,
+ PD13_IOR_IN, PD13_IOR_OUT,
+ PD12_IOR_IN, PD12_IOR_OUT,
+ PD11_IOR_IN, PD11_IOR_OUT,
+ PD10_IOR_IN, PD10_IOR_OUT,
+ PD9_IOR_IN, PD9_IOR_OUT,
+ PD8_IOR_IN, PD8_IOR_OUT,
+ PD7_IOR_IN, PD7_IOR_OUT,
+ PD6_IOR_IN, PD6_IOR_OUT,
+ PD5_IOR_IN, PD5_IOR_OUT,
+ PD4_IOR_IN, PD4_IOR_OUT,
+ PD3_IOR_IN, PD3_IOR_OUT,
+ PD2_IOR_IN, PD2_IOR_OUT,
+ PD1_IOR_IN, PD1_IOR_OUT,
+ PD0_IOR_IN, PD0_IOR_OUT,
+
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11,
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11,
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11,
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11,
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11,
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11,
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11,
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11,
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11,
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11,
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11,
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11,
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11,
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11,
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11,
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11,
+
+ /* Port E */
+ PE5_IOR_IN, PE5_IOR_OUT,
+ PE4_IOR_IN, PE4_IOR_OUT,
+ PE3_IOR_IN, PE3_IOR_OUT,
+ PE2_IOR_IN, PE2_IOR_OUT,
+ PE1_IOR_IN, PE1_IOR_OUT,
+ PE0_IOR_IN, PE0_IOR_OUT,
+
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11,
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11,
+ PE3MD_00, PE3MD_01, PE3MD_10, PE3MD_11,
+ PE2MD_00, PE2MD_01, PE2MD_10, PE2MD_11,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11,
+
+ /* Port F */
+ PF12_IOR_IN, PF12_IOR_OUT,
+ PF11_IOR_IN, PF11_IOR_OUT,
+ PF10_IOR_IN, PF10_IOR_OUT,
+ PF9_IOR_IN, PF9_IOR_OUT,
+ PF8_IOR_IN, PF8_IOR_OUT,
+ PF7_IOR_IN, PF7_IOR_OUT,
+ PF6_IOR_IN, PF6_IOR_OUT,
+ PF5_IOR_IN, PF5_IOR_OUT,
+ PF4_IOR_IN, PF4_IOR_OUT,
+ PF3_IOR_IN, PF3_IOR_OUT,
+ PF2_IOR_IN, PF2_IOR_OUT,
+ PF1_IOR_IN, PF1_IOR_OUT,
+ PF0_IOR_IN, PF0_IOR_OUT,
+
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ PF8MD_00, PF8MD_01, PF8MD_10, PF8MD_11,
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+
+ /* Port G */
+ PG24_IOR_IN, PG24_IOR_OUT,
+ PG23_IOR_IN, PG23_IOR_OUT,
+ PG22_IOR_IN, PG22_IOR_OUT,
+ PG21_IOR_IN, PG21_IOR_OUT,
+ PG20_IOR_IN, PG20_IOR_OUT,
+ PG19_IOR_IN, PG19_IOR_OUT,
+ PG18_IOR_IN, PG18_IOR_OUT,
+ PG17_IOR_IN, PG17_IOR_OUT,
+ PG16_IOR_IN, PG16_IOR_OUT,
+ PG15_IOR_IN, PG15_IOR_OUT,
+ PG14_IOR_IN, PG14_IOR_OUT,
+ PG13_IOR_IN, PG13_IOR_OUT,
+ PG12_IOR_IN, PG12_IOR_OUT,
+ PG11_IOR_IN, PG11_IOR_OUT,
+ PG10_IOR_IN, PG10_IOR_OUT,
+ PG9_IOR_IN, PG9_IOR_OUT,
+ PG8_IOR_IN, PG8_IOR_OUT,
+ PG7_IOR_IN, PG7_IOR_OUT,
+ PG6_IOR_IN, PG6_IOR_OUT,
+ PG5_IOR_IN, PG5_IOR_OUT,
+ PG4_IOR_IN, PG4_IOR_OUT,
+ PG3_IOR_IN, PG3_IOR_OUT,
+ PG2_IOR_IN, PG2_IOR_OUT,
+ PG1_IOR_IN, PG1_IOR_OUT,
+ PG0_IOR_IN, PG0_IOR_OUT,
+
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11,
+ PG23MD_00, PG23MD_01, PG23MD_10, PG23MD_11,
+ PG22MD_00, PG22MD_01, PG22MD_10, PG22MD_11,
+ PG21MD_00, PG21MD_01, PG21MD_10, PG21MD_11,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ PG17MD_000, PG17MD_001, PG17MD_010, PG17MD_011,
+ PG17MD_100, PG17MD_101, PG17MD_110, PG17MD_111,
+ PG16MD_000, PG16MD_001, PG16MD_010, PG16MD_011,
+ PG16MD_100, PG16MD_101, PG16MD_110, PG16MD_111,
+ PG15MD_000, PG15MD_001, PG15MD_010, PG15MD_011,
+ PG15MD_100, PG15MD_101, PG15MD_110, PG15MD_111,
+ PG14MD_000, PG14MD_001, PG14MD_010, PG14MD_011,
+ PG14MD_100, PG14MD_101, PG14MD_110, PG14MD_111,
+ PG13MD_000, PG13MD_001, PG13MD_010, PG13MD_011,
+ PG13MD_100, PG13MD_101, PG13MD_110, PG13MD_111,
+ PG12MD_000, PG12MD_001, PG12MD_010, PG12MD_011,
+ PG12MD_100, PG12MD_101, PG12MD_110, PG12MD_111,
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+ PG7MD_00, PG7MD_01, PG7MD_10, PG7MD_11,
+ PG6MD_00, PG6MD_01, PG6MD_10, PG6MD_11,
+ PG5MD_00, PG5MD_01, PG5MD_10, PG5MD_11,
+ PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11,
+ PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11,
+ PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11,
+ PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+
+ /* Port H */
+ PH7MD_0, PH7MD_1,
+ PH6MD_0, PH6MD_1,
+ PH5MD_0, PH5MD_1,
+ PH4MD_0, PH4MD_1,
+ PH3MD_0, PH3MD_1,
+ PH2MD_0, PH2MD_1,
+ PH1MD_0, PH1MD_1,
+ PH0MD_0, PH0MD_1,
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PJ11_IOR_IN, PJ11_IOR_OUT,
+ PJ10_IOR_IN, PJ10_IOR_OUT,
+ PJ9_IOR_IN, PJ9_IOR_OUT,
+ PJ8_IOR_IN, PJ8_IOR_OUT,
+ PJ7_IOR_IN, PJ7_IOR_OUT,
+ PJ6_IOR_IN, PJ6_IOR_OUT,
+ PJ5_IOR_IN, PJ5_IOR_OUT,
+ PJ4_IOR_IN, PJ4_IOR_OUT,
+ PJ3_IOR_IN, PJ3_IOR_OUT,
+ PJ2_IOR_IN, PJ2_IOR_OUT,
+ PJ1_IOR_IN, PJ1_IOR_OUT,
+ PJ0_IOR_IN, PJ0_IOR_OUT,
+
+ PJ11MD_00, PJ11MD_01, PJ11MD_10, PJ11MD_11,
+ PJ10MD_00, PJ10MD_01, PJ10MD_10, PJ10MD_11,
+ PJ9MD_00, PJ9MD_01, PJ9MD_10, PJ9MD_11,
+ PJ8MD_00, PJ8MD_01, PJ8MD_10, PJ8MD_11,
+ PJ7MD_00, PJ7MD_01, PJ7MD_10, PJ7MD_11,
+ PJ6MD_00, PJ6MD_01, PJ6MD_10, PJ6MD_11,
+ PJ5MD_00, PJ5MD_01, PJ5MD_10, PJ5MD_11,
+ PJ4MD_00, PJ4MD_01, PJ4MD_10, PJ4MD_11,
+ PJ3MD_00, PJ3MD_01, PJ3MD_10, PJ3MD_11,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+
+ /* Port K */
+ PK11_IOR_IN, PK11_IOR_OUT,
+ PK10_IOR_IN, PK10_IOR_OUT,
+ PK9_IOR_IN, PK9_IOR_OUT,
+ PK8_IOR_IN, PK8_IOR_OUT,
+ PK7_IOR_IN, PK7_IOR_OUT,
+ PK6_IOR_IN, PK6_IOR_OUT,
+ PK5_IOR_IN, PK5_IOR_OUT,
+ PK4_IOR_IN, PK4_IOR_OUT,
+ PK3_IOR_IN, PK3_IOR_OUT,
+ PK2_IOR_IN, PK2_IOR_OUT,
+ PK1_IOR_IN, PK1_IOR_OUT,
+ PK0_IOR_IN, PK0_IOR_OUT,
+
+ PK11MD_00, PK11MD_01, PK11MD_10, PK11MD_11,
+ PK10MD_00, PK10MD_01, PK10MD_10, PK10MD_11,
+ PK9MD_00, PK9MD_01, PK9MD_10, PK9MD_11,
+ PK8MD_00, PK8MD_01, PK8MD_10, PK8MD_11,
+ PK7MD_00, PK7MD_01, PK7MD_10, PK7MD_11,
+ PK6MD_00, PK6MD_01, PK6MD_10, PK6MD_11,
+ PK5MD_00, PK5MD_01, PK5MD_10, PK5MD_11,
+ PK4MD_00, PK4MD_01, PK4MD_10, PK4MD_11,
+ PK3MD_00, PK3MD_01, PK3MD_10, PK3MD_11,
+ PK2MD_00, PK2MD_01, PK2MD_10, PK2MD_11,
+ PK1MD_00, PK1MD_01, PK1MD_10, PK1MD_11,
+ PK0MD_00, PK0MD_01, PK0MD_10, PK0MD_11,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Port A */
+
+ /* Port B */
+
+ /* Port C */
+
+ /* Port D */
+
+ /* Port E */
+
+ /* Port F */
+
+ /* Port G */
+
+ /* Port H */
+ PHAN7_MARK, PHAN6_MARK, PHAN5_MARK, PHAN4_MARK,
+ PHAN3_MARK, PHAN2_MARK, PHAN1_MARK, PHAN0_MARK,
+
+ /* Port I - not on device */
+
+ /* Port J */
+
+ /* Port K */
+
+ IRQ7_PC_MARK, IRQ6_PC_MARK, IRQ5_PC_MARK, IRQ4_PC_MARK,
+ IRQ3_PG_MARK, IRQ2_PG_MARK, IRQ1_PJ_MARK, IRQ0_PJ_MARK,
+ IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+
+ PINT7_PG_MARK, PINT6_PG_MARK, PINT5_PG_MARK, PINT4_PG_MARK,
+ PINT3_PG_MARK, PINT2_PG_MARK, PINT1_PG_MARK, PINT0_PG_MARK,
+
+ SD_CD_MARK, SD_D0_MARK, SD_D1_MARK, SD_D2_MARK, SD_D3_MARK,
+ SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ CRX0_MARK, CRX1_MARK,
+ CTX0_MARK, CTX1_MARK,
+
+ PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
+ PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
+ PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
+ PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
+ IERXD_MARK, IETXD_MARK,
+ CRX0_CRX1_MARK,
+ WDTOVF_MARK,
+
+ CRX0X1_MARK,
+
+ /* DMAC */
+ TEND0_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, DACK1_MARK, DREQ1_MARK,
+
+ /* ADC */
+ ADTRG_MARK,
+
+ /* BSC */
+ A25_MARK, A24_MARK,
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+ BS_MARK,
+ CS4_MARK, CS3_MARK, CS2_MARK, CS1_MARK, CS0_MARK,
+ CS6CE1B_MARK, CS5CE1A_MARK,
+ CE2A_MARK, CE2B_MARK,
+ RD_MARK, RDWR_MARK,
+ ICIOWRAH_MARK,
+ ICIORD_MARK,
+ WE1DQMUWE_MARK,
+ WE0DQML_MARK,
+ RAS_MARK, CAS_MARK, CKE_MARK,
+ WAIT_MARK, BREQ_MARK, BACK_MARK, IOIS16_MARK,
+
+ /* TMU */
+ TIOC0A_MARK, TIOC0B_MARK, TIOC0C_MARK, TIOC0D_MARK,
+ TIOC1A_MARK, TIOC1B_MARK,
+ TIOC2A_MARK, TIOC2B_MARK,
+ TIOC3A_MARK, TIOC3B_MARK, TIOC3C_MARK, TIOC3D_MARK,
+ TIOC4A_MARK, TIOC4B_MARK, TIOC4C_MARK, TIOC4D_MARK,
+ TCLKA_MARK, TCLKB_MARK, TCLKC_MARK, TCLKD_MARK,
+
+ /* SCIF */
+ SCK0_MARK, SCK1_MARK, SCK2_MARK, SCK3_MARK,
+ RXD0_MARK, RXD1_MARK, RXD2_MARK, RXD3_MARK,
+ TXD0_MARK, TXD1_MARK, TXD2_MARK, TXD3_MARK,
+ RXD4_MARK, RXD5_MARK, RXD6_MARK, RXD7_MARK,
+ TXD4_MARK, TXD5_MARK, TXD6_MARK, TXD7_MARK,
+ RTS1_MARK, RTS3_MARK,
+ CTS1_MARK, CTS3_MARK,
+
+ /* RSPI */
+ RSPCK0_MARK, RSPCK1_MARK,
+ MOSI0_MARK, MOSI1_MARK,
+ MISO0_PF12_MARK, MISO1_MARK, MISO1_PG19_MARK,
+ SSL00_MARK, SSL10_MARK,
+
+ /* IIC3 */
+ SCL0_MARK, SCL1_MARK, SCL2_MARK,
+ SDA0_MARK, SDA1_MARK, SDA2_MARK,
+
+ /* SSI */
+ SSISCK0_MARK,
+ SSIWS0_MARK,
+ SSITXD0_MARK,
+ SSIRXD0_MARK,
+ SSIWS1_MARK, SSIWS2_MARK, SSIWS3_MARK,
+ SSISCK1_MARK, SSISCK2_MARK, SSISCK3_MARK,
+ SSIDATA1_MARK, SSIDATA2_MARK, SSIDATA3_MARK,
+ AUDIO_CLK_MARK,
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SIOFTXD_MARK, SIOFRXD_MARK, SIOFSYNC_MARK, SIOFSCK_MARK,
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SPDIF_IN_MARK, SPDIF_OUT_MARK,
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ FCE_MARK,
+ FRB_MARK,
+
+ /* VDC3 */
+ DV_CLK_MARK,
+ DV_VSYNC_MARK, DV_HSYNC_MARK,
+ DV_DATA7_MARK, DV_DATA6_MARK, DV_DATA5_MARK, DV_DATA4_MARK,
+ DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
+ LCD_CLK_MARK, LCD_EXTCLK_MARK,
+ LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_M_DISP_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* Port A */
+ PINMUX_DATA(PA3_DATA, PA3_IN),
+ PINMUX_DATA(PA2_DATA, PA2_IN),
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* Port B */
+ PINMUX_DATA(PB22_DATA, PB22MD_00, PB22_IN, PB22_OUT),
+ PINMUX_DATA(A22_MARK, PB22MD_01),
+ PINMUX_DATA(CS4_MARK, PB22MD_10),
+
+ PINMUX_DATA(PB21_DATA, PB21MD_0, PB21_IN, PB21_OUT),
+ PINMUX_DATA(A21_MARK, PB21MD_1),
+ PINMUX_DATA(A20_MARK, PB20MD_1),
+ PINMUX_DATA(A19_MARK, PB19MD_01),
+ PINMUX_DATA(A18_MARK, PB18MD_01),
+ PINMUX_DATA(A17_MARK, PB17MD_01),
+ PINMUX_DATA(A16_MARK, PB16MD_01),
+ PINMUX_DATA(A15_MARK, PB15MD_01),
+ PINMUX_DATA(A14_MARK, PB14MD_01),
+ PINMUX_DATA(A13_MARK, PB13MD_01),
+ PINMUX_DATA(A12_MARK, PB12MD_01),
+ PINMUX_DATA(A11_MARK, PB11MD_01),
+ PINMUX_DATA(A10_MARK, PB10MD_01),
+ PINMUX_DATA(A9_MARK, PB9MD_01),
+ PINMUX_DATA(A8_MARK, PB8MD_01),
+ PINMUX_DATA(A7_MARK, PB7MD_01),
+ PINMUX_DATA(A6_MARK, PB6MD_01),
+ PINMUX_DATA(A5_MARK, PB5MD_01),
+ PINMUX_DATA(A4_MARK, PB4MD_01),
+ PINMUX_DATA(A3_MARK, PB3MD_1),
+ PINMUX_DATA(A2_MARK, PB2MD_1),
+ PINMUX_DATA(A1_MARK, PB1MD_1),
+
+ /* Port C */
+ PINMUX_DATA(PC10_DATA, PC10MD_0),
+ PINMUX_DATA(TIOC2B_MARK, PC1MD_1),
+ PINMUX_DATA(PC9_DATA, PC9MD_0),
+ PINMUX_DATA(TIOC2A_MARK, PC9MD_1),
+ PINMUX_DATA(PC8_DATA, PC8MD_00),
+ PINMUX_DATA(CS3_MARK, PC8MD_01),
+ PINMUX_DATA(TIOC4D_MARK, PC8MD_10),
+ PINMUX_DATA(IRQ7_PC_MARK, PC8MD_11),
+ PINMUX_DATA(PC7_DATA, PC7MD_00),
+ PINMUX_DATA(CKE_MARK, PC7MD_01),
+ PINMUX_DATA(TIOC4C_MARK, PC7MD_10),
+ PINMUX_DATA(IRQ6_PC_MARK, PC7MD_11),
+ PINMUX_DATA(PC6_DATA, PC6MD_00),
+ PINMUX_DATA(CAS_MARK, PC6MD_01),
+ PINMUX_DATA(TIOC4B_MARK, PC6MD_10),
+ PINMUX_DATA(IRQ5_PC_MARK, PC6MD_11),
+ PINMUX_DATA(PC5_DATA, PC5MD_00),
+ PINMUX_DATA(RAS_MARK, PC5MD_01),
+ PINMUX_DATA(TIOC4A_MARK, PC5MD_10),
+ PINMUX_DATA(IRQ4_PC_MARK, PC5MD_11),
+ PINMUX_DATA(PC4_DATA, PC4MD_0),
+ PINMUX_DATA(WE1DQMUWE_MARK, PC4MD_1),
+ PINMUX_DATA(PC3_DATA, PC3MD_0),
+ PINMUX_DATA(WE0DQML_MARK, PC3MD_1),
+ PINMUX_DATA(PC2_DATA, PC2MD_0),
+ PINMUX_DATA(RDWR_MARK, PC2MD_1),
+ PINMUX_DATA(PC1_DATA, PC1MD_0),
+ PINMUX_DATA(RD_MARK, PC1MD_1),
+ PINMUX_DATA(PC0_DATA, PC0MD_0),
+ PINMUX_DATA(CS0_MARK, PC0MD_1),
+
+ /* Port D */
+ PINMUX_DATA(D15_MARK, PD15MD_01),
+ PINMUX_DATA(D14_MARK, PD14MD_01),
+ PINMUX_DATA(D13_MARK, PD13MD_01),
+ PINMUX_DATA(D12_MARK, PD12MD_01),
+ PINMUX_DATA(D11_MARK, PD11MD_01),
+ PINMUX_DATA(D10_MARK, PD10MD_01),
+ PINMUX_DATA(D9_MARK, PD9MD_01),
+ PINMUX_DATA(D8_MARK, PD8MD_01),
+ PINMUX_DATA(D7_MARK, PD7MD_01),
+ PINMUX_DATA(D6_MARK, PD6MD_01),
+ PINMUX_DATA(D5_MARK, PD5MD_01),
+ PINMUX_DATA(D4_MARK, PD4MD_01),
+ PINMUX_DATA(D3_MARK, PD3MD_01),
+ PINMUX_DATA(D2_MARK, PD2MD_01),
+ PINMUX_DATA(D1_MARK, PD1MD_01),
+ PINMUX_DATA(D0_MARK, PD0MD_01),
+
+ /* Port E */
+ PINMUX_DATA(PE5_DATA, PE5MD_00),
+ PINMUX_DATA(SDA2_MARK, PE5MD_01),
+ PINMUX_DATA(DV_HSYNC_MARK, PE5MD_11),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_00),
+ PINMUX_DATA(SCL2_MARK, PE4MD_01),
+ PINMUX_DATA(DV_VSYNC_MARK, PE4MD_11),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_00),
+ PINMUX_DATA(SDA1_MARK, PE3MD_01),
+ PINMUX_DATA(IRQ3_PE_MARK, PE3MD_11),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_00),
+ PINMUX_DATA(SCL1_MARK, PE2MD_01),
+ PINMUX_DATA(IRQ2_PE_MARK, PE2MD_11),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_000),
+ PINMUX_DATA(SDA0_MARK, PE1MD_001),
+ PINMUX_DATA(IOIS16_MARK, PE1MD_010),
+ PINMUX_DATA(IRQ1_PE_MARK, PE1MD_011),
+ PINMUX_DATA(TCLKA_MARK, PE1MD_100),
+ PINMUX_DATA(ADTRG_MARK, PE1MD_101),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_00),
+ PINMUX_DATA(SCL0_MARK, PE0MD_01),
+ PINMUX_DATA(AUDIO_CLK_MARK, PE0MD_10),
+ PINMUX_DATA(IRQ0_PE_MARK, PE0MD_11),
+
+ /* Port F */
+ PINMUX_DATA(PF12_DATA, PF12MD_000),
+ PINMUX_DATA(BS_MARK, PF12MD_001),
+ PINMUX_DATA(MISO0_PF12_MARK, PF12MD_011),
+ PINMUX_DATA(TIOC3D_MARK, PF12MD_100),
+ PINMUX_DATA(SPDIF_OUT_MARK, PF12MD_101),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_000),
+ PINMUX_DATA(A25_MARK, PF11MD_001),
+ PINMUX_DATA(SSIDATA3_MARK, PF11MD_010),
+ PINMUX_DATA(MOSI0_MARK, PF11MD_011),
+ PINMUX_DATA(TIOC3C_MARK, PF11MD_100),
+ PINMUX_DATA(SPDIF_IN_MARK, PF11MD_101),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_000),
+ PINMUX_DATA(A24_MARK, PF10MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF10MD_010),
+ PINMUX_DATA(SSL00_MARK, PF10MD_011),
+ PINMUX_DATA(TIOC3B_MARK, PF10MD_100),
+ PINMUX_DATA(FCE_MARK, PF10MD_101),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_000),
+ PINMUX_DATA(A23_MARK, PF9MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF9MD_010),
+ PINMUX_DATA(RSPCK0_MARK, PF9MD_011),
+ PINMUX_DATA(TIOC3A_MARK, PF9MD_100),
+ PINMUX_DATA(FRB_MARK, PF9MD_101),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_00),
+ PINMUX_DATA(CE2B_MARK, PF8MD_01),
+ PINMUX_DATA(SSIDATA3_MARK, PF8MD_10),
+ PINMUX_DATA(DV_CLK_MARK, PF8MD_11),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_000),
+ PINMUX_DATA(CE2A_MARK, PF7MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF7MD_010),
+ PINMUX_DATA(DV_DATA7_MARK, PF7MD_011),
+ PINMUX_DATA(TCLKD_MARK, PF7MD_100),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_000),
+ PINMUX_DATA(CS6CE1B_MARK, PF6MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF6MD_010),
+ PINMUX_DATA(DV_DATA6_MARK, PF6MD_011),
+ PINMUX_DATA(TCLKB_MARK, PF6MD_100),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_000),
+ PINMUX_DATA(CS5CE1A_MARK, PF5MD_001),
+ PINMUX_DATA(SSIDATA2_MARK, PF5MD_010),
+ PINMUX_DATA(DV_DATA5_MARK, PF5MD_011),
+ PINMUX_DATA(TCLKC_MARK, PF5MD_100),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_000),
+ PINMUX_DATA(ICIOWRAH_MARK, PF4MD_001),
+ PINMUX_DATA(SSIWS2_MARK, PF4MD_010),
+ PINMUX_DATA(DV_DATA4_MARK, PF4MD_011),
+ PINMUX_DATA(TXD3_MARK, PF4MD_100),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_000),
+ PINMUX_DATA(ICIORD_MARK, PF3MD_001),
+ PINMUX_DATA(SSISCK2_MARK, PF3MD_010),
+ PINMUX_DATA(DV_DATA3_MARK, PF3MD_011),
+ PINMUX_DATA(RXD3_MARK, PF3MD_100),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_000),
+ PINMUX_DATA(BACK_MARK, PF2MD_001),
+ PINMUX_DATA(SSIDATA1_MARK, PF2MD_010),
+ PINMUX_DATA(DV_DATA2_MARK, PF2MD_011),
+ PINMUX_DATA(TXD2_MARK, PF2MD_100),
+ PINMUX_DATA(DACK0_MARK, PF2MD_101),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_000),
+ PINMUX_DATA(BREQ_MARK, PF1MD_001),
+ PINMUX_DATA(SSIWS1_MARK, PF1MD_010),
+ PINMUX_DATA(DV_DATA1_MARK, PF1MD_011),
+ PINMUX_DATA(RXD2_MARK, PF1MD_100),
+ PINMUX_DATA(DREQ0_MARK, PF1MD_101),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_000),
+ PINMUX_DATA(WAIT_MARK, PF0MD_001),
+ PINMUX_DATA(SSISCK1_MARK, PF0MD_010),
+ PINMUX_DATA(DV_DATA0_MARK, PF0MD_011),
+ PINMUX_DATA(SCK2_MARK, PF0MD_100),
+ PINMUX_DATA(TEND0_MARK, PF0MD_101),
+
+ /* Port G */
+ PINMUX_DATA(PG24_DATA, PG24MD_00),
+ PINMUX_DATA(MOSI0_MARK, PG24MD_01),
+ PINMUX_DATA(TIOC0D_MARK, PG24MD_10),
+
+ PINMUX_DATA(PG23_DATA, PG23MD_00),
+ PINMUX_DATA(MOSI1_MARK, PG23MD_01),
+ PINMUX_DATA(TIOC0C_MARK, PG23MD_10),
+
+ PINMUX_DATA(PG22_DATA, PG22MD_00),
+ PINMUX_DATA(SSL10_MARK, PG22MD_01),
+ PINMUX_DATA(TIOC0B_MARK, PG22MD_10),
+
+ PINMUX_DATA(PG21_DATA, PG21MD_00),
+ PINMUX_DATA(RSPCK1_MARK, PG21MD_01),
+ PINMUX_DATA(TIOC0A_MARK, PG21MD_10),
+
+ PINMUX_DATA(PG20_DATA, PG20MD_000),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PG20MD_001),
+ PINMUX_DATA(MISO1_MARK, PG20MD_011),
+ PINMUX_DATA(TXD7_MARK, PG20MD_100),
+
+ PINMUX_DATA(PG19_DATA, PG19MD_000),
+ PINMUX_DATA(LCD_CLK_MARK, PG19MD_001),
+ PINMUX_DATA(TIOC2B_MARK, PG19MD_010),
+ PINMUX_DATA(MISO1_PG19_MARK, PG19MD_011),
+ PINMUX_DATA(RXD7_MARK, PG19MD_100),
+
+ PINMUX_DATA(PG18_DATA, PG18MD_000),
+ PINMUX_DATA(LCD_DE_MARK, PG18MD_001),
+ PINMUX_DATA(TIOC2A_MARK, PG18MD_010),
+ PINMUX_DATA(SSL10_MARK, PG18MD_011),
+ PINMUX_DATA(TXD6_MARK, PG18MD_100),
+
+ PINMUX_DATA(PG17_DATA, PG17MD_000),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG17MD_001),
+ PINMUX_DATA(TIOC1B_MARK, PG17MD_010),
+ PINMUX_DATA(RSPCK1_MARK, PG17MD_011),
+ PINMUX_DATA(RXD6_MARK, PG17MD_100),
+
+ PINMUX_DATA(PG16_DATA, PG16MD_000),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG16MD_001),
+ PINMUX_DATA(TIOC1A_MARK, PG16MD_010),
+ PINMUX_DATA(TXD3_MARK, PG16MD_011),
+ PINMUX_DATA(CTS1_MARK, PG16MD_100),
+
+ PINMUX_DATA(PG15_DATA, PG15MD_000),
+ PINMUX_DATA(LCD_DATA15_MARK, PG15MD_001),
+ PINMUX_DATA(TIOC0D_MARK, PG15MD_010),
+ PINMUX_DATA(RXD3_MARK, PG15MD_011),
+ PINMUX_DATA(RTS1_MARK, PG15MD_100),
+
+ PINMUX_DATA(PG14_DATA, PG14MD_000),
+ PINMUX_DATA(LCD_DATA14_MARK, PG14MD_001),
+ PINMUX_DATA(TIOC0C_MARK, PG14MD_010),
+ PINMUX_DATA(SCK1_MARK, PG14MD_100),
+
+ PINMUX_DATA(PG13_DATA, PG13MD_000),
+ PINMUX_DATA(LCD_DATA13_MARK, PG13MD_001),
+ PINMUX_DATA(TIOC0B_MARK, PG13MD_010),
+ PINMUX_DATA(TXD1_MARK, PG13MD_100),
+
+ PINMUX_DATA(PG12_DATA, PG12MD_000),
+ PINMUX_DATA(LCD_DATA12_MARK, PG12MD_001),
+ PINMUX_DATA(TIOC0A_MARK, PG12MD_010),
+ PINMUX_DATA(RXD1_MARK, PG12MD_100),
+
+ PINMUX_DATA(PG11_DATA, PG11MD_000),
+ PINMUX_DATA(LCD_DATA11_MARK, PG11MD_001),
+ PINMUX_DATA(SSITXD0_MARK, PG11MD_010),
+ PINMUX_DATA(IRQ3_PG_MARK, PG11MD_011),
+ PINMUX_DATA(TXD5_MARK, PG11MD_100),
+ PINMUX_DATA(SIOFTXD_MARK, PG11MD_101),
+
+ PINMUX_DATA(PG10_DATA, PG10MD_000),
+ PINMUX_DATA(LCD_DATA10_MARK, PG10MD_001),
+ PINMUX_DATA(SSIRXD0_MARK, PG10MD_010),
+ PINMUX_DATA(IRQ2_PG_MARK, PG10MD_011),
+ PINMUX_DATA(RXD5_MARK, PG10MD_100),
+ PINMUX_DATA(SIOFRXD_MARK, PG10MD_101),
+
+ PINMUX_DATA(PG9_DATA, PG9MD_000),
+ PINMUX_DATA(LCD_DATA9_MARK, PG9MD_001),
+ PINMUX_DATA(SSIWS0_MARK, PG9MD_010),
+ PINMUX_DATA(TXD4_MARK, PG9MD_100),
+ PINMUX_DATA(SIOFSYNC_MARK, PG9MD_101),
+
+ PINMUX_DATA(PG8_DATA, PG8MD_000),
+ PINMUX_DATA(LCD_DATA8_MARK, PG8MD_001),
+ PINMUX_DATA(SSISCK0_MARK, PG8MD_010),
+ PINMUX_DATA(RXD4_MARK, PG8MD_100),
+ PINMUX_DATA(SIOFSCK_MARK, PG8MD_101),
+
+ PINMUX_DATA(PG7_DATA, PG7MD_00),
+ PINMUX_DATA(LCD_DATA7_MARK, PG7MD_01),
+ PINMUX_DATA(SD_CD_MARK, PG7MD_10),
+ PINMUX_DATA(PINT7_PG_MARK, PG7MD_11),
+
+ PINMUX_DATA(PG6_DATA, PG7MD_00),
+ PINMUX_DATA(LCD_DATA6_MARK, PG7MD_01),
+ PINMUX_DATA(SD_WP_MARK, PG7MD_10),
+ PINMUX_DATA(PINT6_PG_MARK, PG7MD_11),
+
+ PINMUX_DATA(PG5_DATA, PG5MD_00),
+ PINMUX_DATA(LCD_DATA5_MARK, PG5MD_01),
+ PINMUX_DATA(SD_D1_MARK, PG5MD_10),
+ PINMUX_DATA(PINT5_PG_MARK, PG5MD_11),
+
+ PINMUX_DATA(PG4_DATA, PG4MD_00),
+ PINMUX_DATA(LCD_DATA4_MARK, PG4MD_01),
+ PINMUX_DATA(SD_D0_MARK, PG4MD_10),
+ PINMUX_DATA(PINT4_PG_MARK, PG4MD_11),
+
+ PINMUX_DATA(PG3_DATA, PG3MD_00),
+ PINMUX_DATA(LCD_DATA3_MARK, PG3MD_01),
+ PINMUX_DATA(SD_CLK_MARK, PG3MD_10),
+ PINMUX_DATA(PINT3_PG_MARK, PG3MD_11),
+
+ PINMUX_DATA(PG2_DATA, PG2MD_00),
+ PINMUX_DATA(LCD_DATA2_MARK, PG2MD_01),
+ PINMUX_DATA(SD_CMD_MARK, PG2MD_10),
+ PINMUX_DATA(PINT2_PG_MARK, PG2MD_11),
+
+ PINMUX_DATA(PG1_DATA, PG1MD_00),
+ PINMUX_DATA(LCD_DATA1_MARK, PG1MD_01),
+ PINMUX_DATA(SD_D3_MARK, PG1MD_10),
+ PINMUX_DATA(PINT1_PG_MARK, PG1MD_11),
+
+ PINMUX_DATA(PG0_DATA, PG0MD_000),
+ PINMUX_DATA(LCD_DATA0_MARK, PG0MD_001),
+ PINMUX_DATA(SD_D2_MARK, PG0MD_010),
+ PINMUX_DATA(PINT0_PG_MARK, PG0MD_011),
+ PINMUX_DATA(WDTOVF_MARK, PG0MD_100),
+
+ /* Port H */
+ PINMUX_DATA(PH7_DATA, PH7MD_0),
+ PINMUX_DATA(PHAN7_MARK, PH7MD_1),
+
+ PINMUX_DATA(PH6_DATA, PH6MD_0),
+ PINMUX_DATA(PHAN6_MARK, PH6MD_1),
+
+ PINMUX_DATA(PH5_DATA, PH5MD_0),
+ PINMUX_DATA(PHAN5_MARK, PH5MD_1),
+
+ PINMUX_DATA(PH4_DATA, PH4MD_0),
+ PINMUX_DATA(PHAN4_MARK, PH4MD_1),
+
+ PINMUX_DATA(PH3_DATA, PH3MD_0),
+ PINMUX_DATA(PHAN3_MARK, PH3MD_1),
+
+ PINMUX_DATA(PH2_DATA, PH2MD_0),
+ PINMUX_DATA(PHAN2_MARK, PH2MD_1),
+
+ PINMUX_DATA(PH1_DATA, PH1MD_0),
+ PINMUX_DATA(PHAN1_MARK, PH1MD_1),
+
+ PINMUX_DATA(PH0_DATA, PH0MD_0),
+ PINMUX_DATA(PHAN0_MARK, PH0MD_1),
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_DATA(PJ11_DATA, PJ11MD_00),
+ PINMUX_DATA(PWM2H_MARK, PJ11MD_01),
+ PINMUX_DATA(DACK1_MARK, PJ11MD_10),
+
+ PINMUX_DATA(PJ10_DATA, PJ10MD_00),
+ PINMUX_DATA(PWM2G_MARK, PJ10MD_01),
+ PINMUX_DATA(DREQ1_MARK, PJ10MD_10),
+
+ PINMUX_DATA(PJ9_DATA, PJ9MD_00),
+ PINMUX_DATA(PWM2F_MARK, PJ9MD_01),
+ PINMUX_DATA(TEND1_MARK, PJ9MD_10),
+
+ PINMUX_DATA(PJ8_DATA, PJ8MD_00),
+ PINMUX_DATA(PWM2E_MARK, PJ8MD_01),
+ PINMUX_DATA(RTS3_MARK, PJ8MD_10),
+
+ PINMUX_DATA(PJ7_DATA, PJ7MD_00),
+ PINMUX_DATA(TIOC1B_MARK, PJ7MD_01),
+ PINMUX_DATA(CTS3_MARK, PJ7MD_10),
+
+ PINMUX_DATA(PJ6_DATA, PJ6MD_00),
+ PINMUX_DATA(TIOC1A_MARK, PJ6MD_01),
+ PINMUX_DATA(SCK3_MARK, PJ6MD_10),
+
+ PINMUX_DATA(PJ5_DATA, PJ5MD_00),
+ PINMUX_DATA(IERXD_MARK, PJ5MD_01),
+ PINMUX_DATA(TXD3_MARK, PJ5MD_10),
+
+ PINMUX_DATA(PJ4_DATA, PJ4MD_00),
+ PINMUX_DATA(IETXD_MARK, PJ4MD_01),
+ PINMUX_DATA(RXD3_MARK, PJ4MD_10),
+
+ PINMUX_DATA(PJ3_DATA, PJ3MD_00),
+ PINMUX_DATA(CRX1_MARK, PJ3MD_01),
+ PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
+
+ PINMUX_DATA(PJ2_DATA, PJ2MD_000),
+ PINMUX_DATA(CTX1_MARK, PJ2MD_001),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CS2_MARK, PJ2MD_011),
+ PINMUX_DATA(SCK0_MARK, PJ2MD_100),
+ PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
+
+ PINMUX_DATA(PJ1_DATA, PJ1MD_000),
+ PINMUX_DATA(CRX0_MARK, PJ1MD_001),
+ PINMUX_DATA(IERXD_MARK, PJ1MD_010),
+ PINMUX_DATA(IRQ0_PJ_MARK, PJ1MD_011),
+ PINMUX_DATA(RXD0_MARK, PJ1MD_100),
+
+ PINMUX_DATA(PJ0_DATA, PJ0MD_000),
+ PINMUX_DATA(CTX0_MARK, PJ0MD_001),
+ PINMUX_DATA(IERXD_MARK, PJ0MD_010),
+ PINMUX_DATA(CS1_MARK, PJ0MD_011),
+ PINMUX_DATA(TXD0_MARK, PJ0MD_100),
+ PINMUX_DATA(A0_MARK, PJ0MD_101),
+
+ /* Port K */
+ PINMUX_DATA(PK11_DATA, PK11MD_00),
+ PINMUX_DATA(PWM2D_MARK, PK11MD_01),
+ PINMUX_DATA(SSITXD0_MARK, PK11MD_10),
+
+ PINMUX_DATA(PK10_DATA, PK10MD_00),
+ PINMUX_DATA(PWM2C_MARK, PK10MD_01),
+ PINMUX_DATA(SSIRXD0_MARK, PK10MD_10),
+
+ PINMUX_DATA(PK9_DATA, PK9MD_00),
+ PINMUX_DATA(PWM2B_MARK, PK9MD_01),
+ PINMUX_DATA(SSIWS0_MARK, PK9MD_10),
+
+ PINMUX_DATA(PK8_DATA, PK8MD_00),
+ PINMUX_DATA(PWM2A_MARK, PK8MD_01),
+ PINMUX_DATA(SSISCK0_MARK, PK8MD_10),
+
+ PINMUX_DATA(PK7_DATA, PK7MD_00),
+ PINMUX_DATA(PWM1H_MARK, PK7MD_01),
+ PINMUX_DATA(SD_CD_MARK, PK7MD_10),
+
+ PINMUX_DATA(PK6_DATA, PK6MD_00),
+ PINMUX_DATA(PWM1G_MARK, PK6MD_01),
+ PINMUX_DATA(SD_WP_MARK, PK6MD_10),
+
+ PINMUX_DATA(PK5_DATA, PK5MD_00),
+ PINMUX_DATA(PWM1F_MARK, PK5MD_01),
+ PINMUX_DATA(SD_D1_MARK, PK5MD_10),
+
+ PINMUX_DATA(PK4_DATA, PK4MD_00),
+ PINMUX_DATA(PWM1E_MARK, PK4MD_01),
+ PINMUX_DATA(SD_D0_MARK, PK4MD_10),
+
+ PINMUX_DATA(PK3_DATA, PK3MD_00),
+ PINMUX_DATA(PWM1D_MARK, PK3MD_01),
+ PINMUX_DATA(SD_CLK_MARK, PK3MD_10),
+
+ PINMUX_DATA(PK2_DATA, PK2MD_00),
+ PINMUX_DATA(PWM1C_MARK, PK2MD_01),
+ PINMUX_DATA(SD_CMD_MARK, PK2MD_10),
+
+ PINMUX_DATA(PK1_DATA, PK1MD_00),
+ PINMUX_DATA(PWM1B_MARK, PK1MD_01),
+ PINMUX_DATA(SD_D3_MARK, PK1MD_10),
+
+ PINMUX_DATA(PK0_DATA, PK0MD_00),
+ PINMUX_DATA(PWM1A_MARK, PK0MD_01),
+ PINMUX_DATA(SD_D2_MARK, PK0MD_10),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* Port A */
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* Port B */
+ PINMUX_GPIO(GPIO_PB22, PB22_DATA),
+ PINMUX_GPIO(GPIO_PB21, PB21_DATA),
+ PINMUX_GPIO(GPIO_PB20, PB20_DATA),
+ PINMUX_GPIO(GPIO_PB19, PB19_DATA),
+ PINMUX_GPIO(GPIO_PB18, PB18_DATA),
+ PINMUX_GPIO(GPIO_PB17, PB17_DATA),
+ PINMUX_GPIO(GPIO_PB16, PB16_DATA),
+ PINMUX_GPIO(GPIO_PB15, PB15_DATA),
+ PINMUX_GPIO(GPIO_PB14, PB14_DATA),
+ PINMUX_GPIO(GPIO_PB13, PB13_DATA),
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+
+ /* Port C */
+ PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+ PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* Port D */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* Port E */
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* Port F */
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* Port G */
+ PINMUX_GPIO(GPIO_PG24, PG24_DATA),
+ PINMUX_GPIO(GPIO_PG23, PG23_DATA),
+ PINMUX_GPIO(GPIO_PG22, PG22_DATA),
+ PINMUX_GPIO(GPIO_PG21, PG21_DATA),
+ PINMUX_GPIO(GPIO_PG20, PG20_DATA),
+ PINMUX_GPIO(GPIO_PG19, PG19_DATA),
+ PINMUX_GPIO(GPIO_PG18, PG18_DATA),
+ PINMUX_GPIO(GPIO_PG17, PG17_DATA),
+ PINMUX_GPIO(GPIO_PG16, PG16_DATA),
+ PINMUX_GPIO(GPIO_PG15, PG15_DATA),
+ PINMUX_GPIO(GPIO_PG14, PG14_DATA),
+ PINMUX_GPIO(GPIO_PG13, PG13_DATA),
+ PINMUX_GPIO(GPIO_PG12, PG12_DATA),
+ PINMUX_GPIO(GPIO_PG11, PG11_DATA),
+ PINMUX_GPIO(GPIO_PG10, PG10_DATA),
+ PINMUX_GPIO(GPIO_PG9, PG9_DATA),
+ PINMUX_GPIO(GPIO_PG8, PG8_DATA),
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* Port H - Port H does not have a Data Register */
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
+ PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
+ PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
+ PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* Port K */
+ PINMUX_GPIO(GPIO_PK11, PK11_DATA),
+ PINMUX_GPIO(GPIO_PK10, PK10_DATA),
+ PINMUX_GPIO(GPIO_PK9, PK9_DATA),
+ PINMUX_GPIO(GPIO_PK8, PK8_DATA),
+ PINMUX_GPIO(GPIO_PK7, PK7_DATA),
+ PINMUX_GPIO(GPIO_PK6, PK6_DATA),
+ PINMUX_GPIO(GPIO_PK5, PK5_DATA),
+ PINMUX_GPIO(GPIO_PK4, PK4_DATA),
+ PINMUX_GPIO(GPIO_PK3, PK3_DATA),
+ PINMUX_GPIO(GPIO_PK2, PK2_DATA),
+ PINMUX_GPIO(GPIO_PK1, PK1_DATA),
+ PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
+
+ PINMUX_GPIO(GPIO_FN_IRQ7_PC, IRQ7_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PC, IRQ6_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PC, IRQ5_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PC, IRQ4_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+ /* WDT */
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* BSCh */
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6CE1B, CS6CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_ICIOWRAH, ICIOWRAH_MARK),
+ PINMUX_GPIO(GPIO_FN_ICIORD, ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
+
+ /* RSPI */
+ PINMUX_GPIO(GPIO_FN_RSPCK0, RSPCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0, MOSI0_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PF12, MISO0_PF12_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00, SSL00_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1_PG19, MISO1_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* VDC3 */
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA3_IN, PA3_OUT,
+ PA2_IN, PA2_OUT,
+ PA1_IN, PA1_OUT,
+ PA0_IN, PA0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB22MD_00, PB22MD_01, PB22MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB21MD_0, PB21MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB20MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+
+ },
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ 0, PB19MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB18MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB17MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB16MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ 0, PB15MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB14MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB13MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB12MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ 0, PB11MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB10MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB9MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB8MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ 0, PB7MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB6MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB5MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB4MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ 0, PB3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ PB22_IN, PB22_OUT,
+ PB21_IN, PB21_OUT,
+ PB20_IN, PB20_OUT,
+ PB19_IN, PB19_OUT,
+ PB18_IN, PB18_OUT,
+ PB17_IN, PB17_OUT,
+ PB16_IN, PB16_OUT }
+ },
+
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ PB15_IN, PB15_OUT,
+ PB14_IN, PB14_OUT,
+ PB13_IN, PB13_OUT,
+ PB12_IN, PB12_OUT,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ PB7_IN, PB7_OUT,
+ PB6_IN, PB6_OUT,
+ PB5_IN, PB5_OUT,
+ PB4_IN, PB4_OUT,
+ PB3_IN, PB3_OUT,
+ PB2_IN, PB2_OUT,
+ PB1_IN, PB1_OUT,
+ 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC10MD_0, PC10MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC8MD_00, PC8MD_01, PC8MD_10, PC8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ PC7MD_00, PC7MD_01, PC7MD_10, PC7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC6MD_00, PC6MD_01, PC6MD_10, PC6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC5MD_00, PC5MD_01, PC5MD_10, PC5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PC10_IN, PC10_OUT,
+ PC9_IN, PC9_OUT,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT
+ }
+ },
+
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ 0, PD15MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD14MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD13MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD12MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ 0, PD11MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD10MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD9MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD8MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ 0, PD7MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD6MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD5MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD4MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ 0, PD3MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD2MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD1MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD0MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE5MD_00, PE5MD_01, 0, PE5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE4MD_00, PE4MD_01, 0, PE4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ PF12MD_000, PF12MD_001, 0, PF12MD_011,
+ PF12MD_100, PF12MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF8MD_00, PF8MD_01, PF8MD_10, PF8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ }
+ },
+
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ 0, 0, 0, 0, 0, 0,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ PG23MD_00, PG23MD_01, PG23MD_10, PG23MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG22MD_00, PG22MD_01, PG22MD_10, PG22MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG21MD_00, PG21MD_01, PG21MD_10, PG21MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG17MD_000, PG17MD_001, PG17MD_010, PG17MD_011,
+ PG17MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG16MD_000, PG16MD_001, PG16MD_010, PG16MD_011,
+ PG16MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ PG15MD_000, PG15MD_001, PG15MD_010, PG15MD_011,
+ PG15MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG14MD_000, PG14MD_001, PG14MD_010, 0,
+ PG14MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG13MD_000, PG13MD_001, PG13MD_010, 0,
+ PG13MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG12MD_000, PG12MD_001, PG12MD_010, 0,
+ PG12MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ PG7MD_00, PG7MD_01, PG7MD_10, PG7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG6MD_00, PG6MD_01, PG6MD_10, PG6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG5MD_00, PG5MD_01, PG5MD_10, PG5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ PG24_IN, PG24_OUT,
+ PG23_IN, PG23_OUT,
+ PG22_IN, PG22_OUT,
+ PG21_IN, PG21_OUT,
+ PG20_IN, PG20_OUT,
+ PG19_IN, PG19_OUT,
+ PG18_IN, PG18_OUT,
+ PG17_IN, PG17_OUT,
+ PG16_IN, PG16_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ PG15_IN, PG15_OUT,
+ PG14_IN, PG14_OUT,
+ PG13_IN, PG13_OUT,
+ PG12_IN, PG12_OUT,
+ PG11_IN, PG11_OUT,
+ PG10_IN, PG10_OUT,
+ PG9_IN, PG9_OUT,
+ PG8_IN, PG8_OUT,
+ PG7_IN, PG7_OUT,
+ PG6_IN, PG6_OUT,
+ PG5_IN, PG5_OUT,
+ PG4_IN, PG4_OUT,
+ PG3_IN, PG3_OUT,
+ PG2_IN, PG2_OUT,
+ PG1_IN, PG1_OUT,
+ PG0_IN, PG0_OUT
+ }
+ },
+
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ PH7MD_0, PH7MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH6MD_0, PH6MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH5MD_0, PH5MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH4MD_0, PH4MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ PH3MD_0, PH3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH2MD_0, PH2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH1MD_0, PH1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH0MD_0, PH0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ PJ11MD_00, PJ11MD_01, PJ11MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ10MD_00, PJ10MD_01, PJ10MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ9MD_00, PJ9MD_01, PJ9MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ8MD_00, PJ8MD_01, PJ8MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ PJ7MD_00, PJ7MD_01, PJ7MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ6MD_00, PJ6MD_01, PJ6MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ5MD_00, PJ5MD_01, PJ5MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ4MD_00, PJ4MD_01, PJ4MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ PJ3MD_00, PJ3MD_01, PJ3MD_10, PJ3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PKCR2", 0xfffe392a, 16, 4) {
+ PK11MD_00, PK11MD_01, PK11MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK10MD_00, PK10MD_01, PK10MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK9MD_00, PK9MD_01, PK9MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK8MD_00, PK8MD_01, PK8MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PKCR1", 0xfffe392c, 16, 4) {
+ PK7MD_00, PK7MD_01, PK7MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK6MD_00, PK6MD_01, PK6MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK5MD_00, PK5MD_01, PK5MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK4MD_00, PK4MD_01, PK4MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PKCR0", 0xfffe392e, 16, 4) {
+ PK3MD_00, PK3MD_01, PK3MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK2MD_00, PK2MD_01, PK2MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK1MD_00, PK1MD_01, PK1MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK0MD_00, PK0MD_01, PK0MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR1", 0xfffe3814, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA3_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA2_DATA }
+ },
+
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA1_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ },
+
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ 0, 0, 0, 0,
+ 0, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ 0, 0, 0, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ 0, 0, 0, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR0", 0xfffe3936, 16) {
+ 0, 0, 0, PK12_DATA,
+ PK11_DATA, PK10_DATA, PK9_DATA, PK8_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ },
+ { }
+};
+
+struct sh_pfc_soc_info sh7264_pinmux_info = {
+ .name = "sh7264_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA3,
+ .last_gpio = GPIO_FN_LCD_M_DISP,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
new file mode 100644
index 000000000000..b1b5d6d4ad76
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -0,0 +1,2834 @@
+/*
+ * SH7269 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7269.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ /* Port A */
+ PA1_DATA, PA0_DATA,
+ /* Port B */
+ PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA,
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA,
+ /* Port C */
+ PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ /* Port D */
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ /* Port E */
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ /* Port F */
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ /* Port G */
+ PG27_DATA, PG26_DATA, PG25_DATA, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA,
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ /* Port H */
+ /* NOTE - Port H does not have a Data Register, but PH Data is
+ connected to PH Port Register */
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_DATA, PJ30_DATA, PJ29_DATA, PJ28_DATA,
+ PJ27_DATA, PJ26_DATA, PJ25_DATA, PJ24_DATA,
+ PJ23_DATA, PJ22_DATA, PJ21_DATA, PJ20_DATA,
+ PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA,
+ PJ15_DATA, PJ14_DATA, PJ13_DATA, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ /* Port A */
+ PA1_IN, PA0_IN,
+ /* Port B */
+ PB22_IN, PB21_IN, PB20_IN,
+ PB19_IN, PB18_IN, PB17_IN, PB16_IN,
+ PB15_IN, PB14_IN, PB13_IN, PB12_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN,
+ /* Port C */
+ PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ /* Port D */
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ /* Port E */
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ /* Port F */
+ PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+ PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+ PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ /* Port G */
+ PG27_IN, PG26_IN, PG25_IN, PG24_IN,
+ PG23_IN, PG22_IN, PG21_IN, PG20_IN,
+ PG19_IN, PG18_IN, PG17_IN, PG16_IN,
+ PG15_IN, PG14_IN, PG13_IN, PG12_IN,
+ PG11_IN, PG10_IN, PG9_IN, PG8_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_IN, PJ30_IN, PJ29_IN, PJ28_IN,
+ PJ27_IN, PJ26_IN, PJ25_IN, PJ24_IN,
+ PJ23_IN, PJ22_IN, PJ21_IN, PJ20_IN,
+ PJ19_IN, PJ18_IN, PJ17_IN, PJ16_IN,
+ PJ15_IN, PJ14_IN, PJ13_IN, PJ12_IN,
+ PJ11_IN, PJ10_IN, PJ9_IN, PJ8_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ /* Port A */
+ PA1_OUT, PA0_OUT,
+ /* Port B */
+ PB22_OUT, PB21_OUT, PB20_OUT,
+ PB19_OUT, PB18_OUT, PB17_OUT, PB16_OUT,
+ PB15_OUT, PB14_OUT, PB13_OUT, PB12_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT,
+ /* Port C */
+ PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ /* Port D */
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ /* Port E */
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ /* Port F */
+ PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+ PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+ PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ /* Port G */
+ PG27_OUT, PG26_OUT, PG25_OUT, PG24_OUT,
+ PG23_OUT, PG22_OUT, PG21_OUT, PG20_OUT,
+ PG19_OUT, PG18_OUT, PG17_OUT, PG16_OUT,
+ PG15_OUT, PG14_OUT, PG13_OUT, PG12_OUT,
+ PG11_OUT, PG10_OUT, PG9_OUT, PG8_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_OUT, PJ30_OUT, PJ29_OUT, PJ28_OUT,
+ PJ27_OUT, PJ26_OUT, PJ25_OUT, PJ24_OUT,
+ PJ23_OUT, PJ22_OUT, PJ21_OUT, PJ20_OUT,
+ PJ19_OUT, PJ18_OUT, PJ17_OUT, PJ16_OUT,
+ PJ15_OUT, PJ14_OUT, PJ13_OUT, PJ12_OUT,
+ PJ11_OUT, PJ10_OUT, PJ9_OUT, PJ8_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ /* Port A */
+ PA1_IOR_IN, PA1_IOR_OUT,
+ PA0_IOR_IN, PA0_IOR_OUT,
+
+ /* Port B */
+ PB22_IOR_IN, PB22_IOR_OUT,
+ PB21_IOR_IN, PB21_IOR_OUT,
+ PB20_IOR_IN, PB20_IOR_OUT,
+ PB19_IOR_IN, PB19_IOR_OUT,
+ PB18_IOR_IN, PB18_IOR_OUT,
+ PB17_IOR_IN, PB17_IOR_OUT,
+ PB16_IOR_IN, PB16_IOR_OUT,
+
+ PB15_IOR_IN, PB15_IOR_OUT,
+ PB14_IOR_IN, PB14_IOR_OUT,
+ PB13_IOR_IN, PB13_IOR_OUT,
+ PB12_IOR_IN, PB12_IOR_OUT,
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+
+ PB7_IOR_IN, PB7_IOR_OUT,
+ PB6_IOR_IN, PB6_IOR_OUT,
+ PB5_IOR_IN, PB5_IOR_OUT,
+ PB4_IOR_IN, PB4_IOR_OUT,
+ PB3_IOR_IN, PB3_IOR_OUT,
+ PB2_IOR_IN, PB2_IOR_OUT,
+ PB1_IOR_IN, PB1_IOR_OUT,
+ PB0_IOR_IN, PB0_IOR_OUT,
+
+ PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
+ PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111,
+ PB21MD_00, PB21MD_01, PB21MD_10, PB21MD_11,
+ PB20MD_000, PB20MD_001, PB20MD_010, PB20MD_011,
+ PB20MD_100, PB20MD_101, PB20MD_110, PB20MD_111,
+ PB19MD_000, PB19MD_001, PB19MD_010, PB19MD_011,
+ PB19MD_100, PB19MD_101, PB19MD_110, PB19MD_111,
+ PB18MD_000, PB18MD_001, PB18MD_010, PB18MD_011,
+ PB18MD_100, PB18MD_101, PB18MD_110, PB18MD_111,
+ PB17MD_000, PB17MD_001, PB17MD_010, PB17MD_011,
+ PB17MD_100, PB17MD_101, PB17MD_110, PB17MD_111,
+ PB16MD_000, PB16MD_001, PB16MD_010, PB16MD_011,
+ PB16MD_100, PB16MD_101, PB16MD_110, PB16MD_111,
+ PB15MD_000, PB15MD_001, PB15MD_010, PB15MD_011,
+ PB15MD_100, PB15MD_101, PB15MD_110, PB15MD_111,
+ PB14MD_000, PB14MD_001, PB14MD_010, PB14MD_011,
+ PB14MD_100, PB14MD_101, PB14MD_110, PB14MD_111,
+ PB13MD_000, PB13MD_001, PB13MD_010, PB13MD_011,
+ PB13MD_100, PB13MD_101, PB13MD_110, PB13MD_111,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11,
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11,
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11,
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11,
+
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+
+ /* Port C */
+ PC8_IOR_IN, PC8_IOR_OUT,
+ PC7_IOR_IN, PC7_IOR_OUT,
+ PC6_IOR_IN, PC6_IOR_OUT,
+ PC5_IOR_IN, PC5_IOR_OUT,
+ PC4_IOR_IN, PC4_IOR_OUT,
+ PC3_IOR_IN, PC3_IOR_OUT,
+ PC2_IOR_IN, PC2_IOR_OUT,
+ PC1_IOR_IN, PC1_IOR_OUT,
+ PC0_IOR_IN, PC0_IOR_OUT,
+
+ PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
+ PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
+ PC7MD_000, PC7MD_001, PC7MD_010, PC7MD_011,
+ PC7MD_100, PC7MD_101, PC7MD_110, PC7MD_111,
+ PC6MD_000, PC6MD_001, PC6MD_010, PC6MD_011,
+ PC6MD_100, PC6MD_101, PC6MD_110, PC6MD_111,
+ PC5MD_000, PC5MD_001, PC5MD_010, PC5MD_011,
+ PC5MD_100, PC5MD_101, PC5MD_110, PC5MD_111,
+ PC4MD_00, PC4MD_01, PC4MD_10, PC4MD_11,
+
+ PC3MD_00, PC3MD_01, PC3MD_10, PC3MD_11,
+ PC2MD_00, PC2MD_01, PC2MD_10, PC2MD_11,
+ PC1MD_0, PC1MD_1,
+ PC0MD_0, PC0MD_1,
+
+ /* Port D */
+ PD15_IOR_IN, PD15_IOR_OUT,
+ PD14_IOR_IN, PD14_IOR_OUT,
+ PD13_IOR_IN, PD13_IOR_OUT,
+ PD12_IOR_IN, PD12_IOR_OUT,
+ PD11_IOR_IN, PD11_IOR_OUT,
+ PD10_IOR_IN, PD10_IOR_OUT,
+ PD9_IOR_IN, PD9_IOR_OUT,
+ PD8_IOR_IN, PD8_IOR_OUT,
+ PD7_IOR_IN, PD7_IOR_OUT,
+ PD6_IOR_IN, PD6_IOR_OUT,
+ PD5_IOR_IN, PD5_IOR_OUT,
+ PD4_IOR_IN, PD4_IOR_OUT,
+ PD3_IOR_IN, PD3_IOR_OUT,
+ PD2_IOR_IN, PD2_IOR_OUT,
+ PD1_IOR_IN, PD1_IOR_OUT,
+ PD0_IOR_IN, PD0_IOR_OUT,
+
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11,
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11,
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11,
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11,
+
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11,
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11,
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11,
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11,
+
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11,
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11,
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11,
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11,
+
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11,
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11,
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11,
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11,
+
+ /* Port E */
+ PE7_IOR_IN, PE7_IOR_OUT,
+ PE6_IOR_IN, PE6_IOR_OUT,
+ PE5_IOR_IN, PE5_IOR_OUT,
+ PE4_IOR_IN, PE4_IOR_OUT,
+ PE3_IOR_IN, PE3_IOR_OUT,
+ PE2_IOR_IN, PE2_IOR_OUT,
+ PE1_IOR_IN, PE1_IOR_OUT,
+ PE0_IOR_IN, PE0_IOR_OUT,
+
+ PE7MD_00, PE7MD_01, PE7MD_10, PE7MD_11,
+ PE6MD_00, PE6MD_01, PE6MD_10, PE6MD_11,
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11,
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11,
+
+ PE3MD_000, PE3MD_001, PE3MD_010, PE3MD_011,
+ PE3MD_100, PE3MD_101, PE3MD_110, PE3MD_111,
+ PE2MD_000, PE2MD_001, PE2MD_010, PE2MD_011,
+ PE2MD_100, PE2MD_101, PE2MD_110, PE2MD_111,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11,
+
+ /* Port F */
+ PF23_IOR_IN, PF23_IOR_OUT,
+ PF22_IOR_IN, PF22_IOR_OUT,
+ PF21_IOR_IN, PF21_IOR_OUT,
+ PF20_IOR_IN, PF20_IOR_OUT,
+ PF19_IOR_IN, PF19_IOR_OUT,
+ PF18_IOR_IN, PF18_IOR_OUT,
+ PF17_IOR_IN, PF17_IOR_OUT,
+ PF16_IOR_IN, PF16_IOR_OUT,
+ PF15_IOR_IN, PF15_IOR_OUT,
+ PF14_IOR_IN, PF14_IOR_OUT,
+ PF13_IOR_IN, PF13_IOR_OUT,
+ PF12_IOR_IN, PF12_IOR_OUT,
+ PF11_IOR_IN, PF11_IOR_OUT,
+ PF10_IOR_IN, PF10_IOR_OUT,
+ PF9_IOR_IN, PF9_IOR_OUT,
+ PF8_IOR_IN, PF8_IOR_OUT,
+ PF7_IOR_IN, PF7_IOR_OUT,
+ PF6_IOR_IN, PF6_IOR_OUT,
+ PF5_IOR_IN, PF5_IOR_OUT,
+ PF4_IOR_IN, PF4_IOR_OUT,
+ PF3_IOR_IN, PF3_IOR_OUT,
+ PF2_IOR_IN, PF2_IOR_OUT,
+ PF1_IOR_IN, PF1_IOR_OUT,
+ PF0_IOR_IN, PF0_IOR_OUT,
+
+ PF23MD_000, PF23MD_001, PF23MD_010, PF23MD_011,
+ PF23MD_100, PF23MD_101, PF23MD_110, PF23MD_111,
+ PF22MD_000, PF22MD_001, PF22MD_010, PF22MD_011,
+ PF22MD_100, PF22MD_101, PF22MD_110, PF22MD_111,
+ PF21MD_000, PF21MD_001, PF21MD_010, PF21MD_011,
+ PF21MD_100, PF21MD_101, PF21MD_110, PF21MD_111,
+ PF20MD_000, PF20MD_001, PF20MD_010, PF20MD_011,
+ PF20MD_100, PF20MD_101, PF20MD_110, PF20MD_111,
+
+ PF19MD_000, PF19MD_001, PF19MD_010, PF19MD_011,
+ PF19MD_100, PF19MD_101, PF19MD_110, PF19MD_111,
+ PF18MD_000, PF18MD_001, PF18MD_010, PF18MD_011,
+ PF18MD_100, PF18MD_101, PF18MD_110, PF18MD_111,
+ PF17MD_000, PF17MD_001, PF17MD_010, PF17MD_011,
+ PF17MD_100, PF17MD_101, PF17MD_110, PF17MD_111,
+ PF16MD_000, PF16MD_001, PF16MD_010, PF16MD_011,
+ PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
+
+ PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
+ PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
+ PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
+ PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111,
+ PF13MD_000, PF13MD_001, PF13MD_010, PF13MD_011,
+ PF13MD_100, PF13MD_101, PF13MD_110, PF13MD_111,
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ PF8MD_000, PF8MD_001, PF8MD_010, PF8MD_011,
+ PF8MD_100, PF8MD_101, PF8MD_110, PF8MD_111,
+
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+
+ /* Port G */
+ PG27_IOR_IN, PG27_IOR_OUT,
+ PG26_IOR_IN, PG26_IOR_OUT,
+ PG25_IOR_IN, PG25_IOR_OUT,
+ PG24_IOR_IN, PG24_IOR_OUT,
+ PG23_IOR_IN, PG23_IOR_OUT,
+ PG22_IOR_IN, PG22_IOR_OUT,
+ PG21_IOR_IN, PG21_IOR_OUT,
+ PG20_IOR_IN, PG20_IOR_OUT,
+ PG19_IOR_IN, PG19_IOR_OUT,
+ PG18_IOR_IN, PG18_IOR_OUT,
+ PG17_IOR_IN, PG17_IOR_OUT,
+ PG16_IOR_IN, PG16_IOR_OUT,
+ PG15_IOR_IN, PG15_IOR_OUT,
+ PG14_IOR_IN, PG14_IOR_OUT,
+ PG13_IOR_IN, PG13_IOR_OUT,
+ PG12_IOR_IN, PG12_IOR_OUT,
+ PG11_IOR_IN, PG11_IOR_OUT,
+ PG10_IOR_IN, PG10_IOR_OUT,
+ PG9_IOR_IN, PG9_IOR_OUT,
+ PG8_IOR_IN, PG8_IOR_OUT,
+ PG7_IOR_IN, PG7_IOR_OUT,
+ PG6_IOR_IN, PG6_IOR_OUT,
+ PG5_IOR_IN, PG5_IOR_OUT,
+ PG4_IOR_IN, PG4_IOR_OUT,
+ PG3_IOR_IN, PG3_IOR_OUT,
+ PG2_IOR_IN, PG2_IOR_OUT,
+ PG1_IOR_IN, PG1_IOR_OUT,
+ PG0_IOR_IN, PG0_IOR_OUT,
+
+ PG27MD_00, PG27MD_01, PG27MD_10, PG27MD_11,
+ PG26MD_00, PG26MD_01, PG26MD_10, PG26MD_11,
+ PG25MD_00, PG25MD_01, PG25MD_10, PG25MD_11,
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11,
+
+ PG23MD_000, PG23MD_001, PG23MD_010, PG23MD_011,
+ PG23MD_100, PG23MD_101, PG23MD_110, PG23MD_111,
+ PG22MD_000, PG22MD_001, PG22MD_010, PG22MD_011,
+ PG22MD_100, PG22MD_101, PG22MD_110, PG22MD_111,
+ PG21MD_000, PG21MD_001, PG21MD_010, PG21MD_011,
+ PG21MD_100, PG21MD_101, PG21MD_110, PG21MD_111,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ PG17MD_00, PG17MD_01, PG17MD_10, PG17MD_11,
+ PG16MD_00, PG16MD_01, PG16MD_10, PG16MD_11,
+
+ PG15MD_00, PG15MD_01, PG15MD_10, PG15MD_11,
+ PG14MD_00, PG14MD_01, PG14MD_10, PG14MD_11,
+ PG13MD_00, PG13MD_01, PG13MD_10, PG13MD_11,
+ PG12MD_00, PG12MD_01, PG12MD_10, PG12MD_11,
+
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+
+ PG7MD_000, PG7MD_001, PG7MD_010, PG7MD_011,
+ PG7MD_100, PG7MD_101, PG7MD_110, PG7MD_111,
+ PG6MD_000, PG6MD_001, PG6MD_010, PG6MD_011,
+ PG6MD_100, PG6MD_101, PG6MD_110, PG6MD_111,
+ PG5MD_000, PG5MD_001, PG5MD_010, PG5MD_011,
+ PG5MD_100, PG5MD_101, PG5MD_110, PG5MD_111,
+ PG4MD_000, PG4MD_001, PG4MD_010, PG4MD_011,
+ PG4MD_100, PG4MD_101, PG4MD_110, PG4MD_111,
+
+ PG3MD_000, PG3MD_001, PG3MD_010, PG3MD_011,
+ PG3MD_100, PG3MD_101, PG3MD_110, PG3MD_111,
+ PG2MD_000, PG2MD_001, PG2MD_010, PG2MD_011,
+ PG2MD_100, PG2MD_101, PG2MD_110, PG2MD_111,
+ PG1MD_000, PG1MD_001, PG1MD_010, PG1MD_011,
+ PG1MD_100, PG1MD_101, PG1MD_110, PG1MD_111,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+
+ /* Port H */
+ PH7MD_00, PH7MD_01, PH7MD_10, PH7MD_11,
+ PH6MD_00, PH6MD_01, PH6MD_10, PH6MD_11,
+ PH5MD_00, PH5MD_01, PH5MD_10, PH5MD_11,
+ PH4MD_00, PH4MD_01, PH4MD_10, PH4MD_11,
+
+ PH3MD_00, PH3MD_01, PH3MD_10, PH3MD_11,
+ PH2MD_00, PH2MD_01, PH2MD_10, PH2MD_11,
+ PH1MD_00, PH1MD_01, PH1MD_10, PH1MD_11,
+ PH0MD_00, PH0MD_01, PH0MD_10, PH0MD_11,
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PJ31_IOR_IN, PJ31_IOR_OUT,
+ PJ30_IOR_IN, PJ30_IOR_OUT,
+ PJ29_IOR_IN, PJ29_IOR_OUT,
+ PJ28_IOR_IN, PJ28_IOR_OUT,
+ PJ27_IOR_IN, PJ27_IOR_OUT,
+ PJ26_IOR_IN, PJ26_IOR_OUT,
+ PJ25_IOR_IN, PJ25_IOR_OUT,
+ PJ24_IOR_IN, PJ24_IOR_OUT,
+ PJ23_IOR_IN, PJ23_IOR_OUT,
+ PJ22_IOR_IN, PJ22_IOR_OUT,
+ PJ21_IOR_IN, PJ21_IOR_OUT,
+ PJ20_IOR_IN, PJ20_IOR_OUT,
+ PJ19_IOR_IN, PJ19_IOR_OUT,
+ PJ18_IOR_IN, PJ18_IOR_OUT,
+ PJ17_IOR_IN, PJ17_IOR_OUT,
+ PJ16_IOR_IN, PJ16_IOR_OUT,
+ PJ15_IOR_IN, PJ15_IOR_OUT,
+ PJ14_IOR_IN, PJ14_IOR_OUT,
+ PJ13_IOR_IN, PJ13_IOR_OUT,
+ PJ12_IOR_IN, PJ12_IOR_OUT,
+ PJ11_IOR_IN, PJ11_IOR_OUT,
+ PJ10_IOR_IN, PJ10_IOR_OUT,
+ PJ9_IOR_IN, PJ9_IOR_OUT,
+ PJ8_IOR_IN, PJ8_IOR_OUT,
+ PJ7_IOR_IN, PJ7_IOR_OUT,
+ PJ6_IOR_IN, PJ6_IOR_OUT,
+ PJ5_IOR_IN, PJ5_IOR_OUT,
+ PJ4_IOR_IN, PJ4_IOR_OUT,
+ PJ3_IOR_IN, PJ3_IOR_OUT,
+ PJ2_IOR_IN, PJ2_IOR_OUT,
+ PJ1_IOR_IN, PJ1_IOR_OUT,
+ PJ0_IOR_IN, PJ0_IOR_OUT,
+
+ PJ31MD_0, PJ31MD_1,
+ PJ30MD_000, PJ30MD_001, PJ30MD_010, PJ30MD_011,
+ PJ30MD_100, PJ30MD_101, PJ30MD_110, PJ30MD_111,
+ PJ29MD_000, PJ29MD_001, PJ29MD_010, PJ29MD_011,
+ PJ29MD_100, PJ29MD_101, PJ29MD_110, PJ29MD_111,
+ PJ28MD_000, PJ28MD_001, PJ28MD_010, PJ28MD_011,
+ PJ28MD_100, PJ28MD_101, PJ28MD_110, PJ28MD_111,
+
+ PJ27MD_000, PJ27MD_001, PJ27MD_010, PJ27MD_011,
+ PJ27MD_100, PJ27MD_101, PJ27MD_110, PJ27MD_111,
+ PJ26MD_000, PJ26MD_001, PJ26MD_010, PJ26MD_011,
+ PJ26MD_100, PJ26MD_101, PJ26MD_110, PJ26MD_111,
+ PJ25MD_000, PJ25MD_001, PJ25MD_010, PJ25MD_011,
+ PJ25MD_100, PJ25MD_101, PJ25MD_110, PJ25MD_111,
+ PJ24MD_000, PJ24MD_001, PJ24MD_010, PJ24MD_011,
+ PJ24MD_100, PJ24MD_101, PJ24MD_110, PJ24MD_111,
+
+ PJ23MD_000, PJ23MD_001, PJ23MD_010, PJ23MD_011,
+ PJ23MD_100, PJ23MD_101, PJ23MD_110, PJ23MD_111,
+ PJ22MD_000, PJ22MD_001, PJ22MD_010, PJ22MD_011,
+ PJ22MD_100, PJ22MD_101, PJ22MD_110, PJ22MD_111,
+ PJ21MD_000, PJ21MD_001, PJ21MD_010, PJ21MD_011,
+ PJ21MD_100, PJ21MD_101, PJ21MD_110, PJ21MD_111,
+ PJ20MD_000, PJ20MD_001, PJ20MD_010, PJ20MD_011,
+ PJ20MD_100, PJ20MD_101, PJ20MD_110, PJ20MD_111,
+
+ PJ19MD_000, PJ19MD_001, PJ19MD_010, PJ19MD_011,
+ PJ19MD_100, PJ19MD_101, PJ19MD_110, PJ19MD_111,
+ PJ18MD_000, PJ18MD_001, PJ18MD_010, PJ18MD_011,
+ PJ18MD_100, PJ18MD_101, PJ18MD_110, PJ18MD_111,
+ PJ17MD_000, PJ17MD_001, PJ17MD_010, PJ17MD_011,
+ PJ17MD_100, PJ17MD_101, PJ17MD_110, PJ17MD_111,
+ PJ16MD_000, PJ16MD_001, PJ16MD_010, PJ16MD_011,
+ PJ16MD_100, PJ16MD_101, PJ16MD_110, PJ16MD_111,
+
+ PJ15MD_000, PJ15MD_001, PJ15MD_010, PJ15MD_011,
+ PJ15MD_100, PJ15MD_101, PJ15MD_110, PJ15MD_111,
+ PJ14MD_000, PJ14MD_001, PJ14MD_010, PJ14MD_011,
+ PJ14MD_100, PJ14MD_101, PJ14MD_110, PJ14MD_111,
+ PJ13MD_000, PJ13MD_001, PJ13MD_010, PJ13MD_011,
+ PJ13MD_100, PJ13MD_101, PJ13MD_110, PJ13MD_111,
+ PJ12MD_000, PJ12MD_001, PJ12MD_010, PJ12MD_011,
+ PJ12MD_100, PJ12MD_101, PJ12MD_110, PJ12MD_111,
+
+ PJ11MD_000, PJ11MD_001, PJ11MD_010, PJ11MD_011,
+ PJ11MD_100, PJ11MD_101, PJ11MD_110, PJ11MD_111,
+ PJ10MD_000, PJ10MD_001, PJ10MD_010, PJ10MD_011,
+ PJ10MD_100, PJ10MD_101, PJ10MD_110, PJ10MD_111,
+ PJ9MD_000, PJ9MD_001, PJ9MD_010, PJ9MD_011,
+ PJ9MD_100, PJ9MD_101, PJ9MD_110, PJ9MD_111,
+ PJ8MD_000, PJ8MD_001, PJ8MD_010, PJ8MD_011,
+ PJ8MD_100, PJ8MD_101, PJ8MD_110, PJ8MD_111,
+
+ PJ7MD_000, PJ7MD_001, PJ7MD_010, PJ7MD_011,
+ PJ7MD_100, PJ7MD_101, PJ7MD_110, PJ7MD_111,
+ PJ6MD_000, PJ6MD_001, PJ6MD_010, PJ6MD_011,
+ PJ6MD_100, PJ6MD_101, PJ6MD_110, PJ6MD_111,
+ PJ5MD_000, PJ5MD_001, PJ5MD_010, PJ5MD_011,
+ PJ5MD_100, PJ5MD_101, PJ5MD_110, PJ5MD_111,
+ PJ4MD_000, PJ4MD_001, PJ4MD_010, PJ4MD_011,
+ PJ4MD_100, PJ4MD_101, PJ4MD_110, PJ4MD_111,
+
+ PJ3MD_000, PJ3MD_001, PJ3MD_010, PJ3MD_011,
+ PJ3MD_100, PJ3MD_101, PJ3MD_110, PJ3MD_111,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Port H */
+ PHAN7_MARK, PHAN6_MARK, PHAN5_MARK, PHAN4_MARK,
+ PHAN3_MARK, PHAN2_MARK, PHAN1_MARK, PHAN0_MARK,
+
+ /* IRQs */
+ IRQ7_PG_MARK, IRQ6_PG_MARK, IRQ5_PG_MARK, IRQ4_PG_MARK,
+ IRQ3_PG_MARK, IRQ2_PG_MARK, IRQ1_PG_MARK, IRQ0_PG_MARK,
+ IRQ7_PF_MARK, IRQ6_PF_MARK, IRQ5_PF_MARK, IRQ4_PF_MARK,
+ IRQ3_PJ_MARK, IRQ2_PJ_MARK, IRQ1_PJ_MARK, IRQ0_PJ_MARK,
+ IRQ1_PC_MARK, IRQ0_PC_MARK,
+
+ PINT7_PG_MARK, PINT6_PG_MARK, PINT5_PG_MARK, PINT4_PG_MARK,
+ PINT3_PG_MARK, PINT2_PG_MARK, PINT1_PG_MARK, PINT0_PG_MARK,
+ PINT7_PH_MARK, PINT6_PH_MARK, PINT5_PH_MARK, PINT4_PH_MARK,
+ PINT3_PH_MARK, PINT2_PH_MARK, PINT1_PH_MARK, PINT0_PH_MARK,
+ PINT7_PJ_MARK, PINT6_PJ_MARK, PINT5_PJ_MARK, PINT4_PJ_MARK,
+ PINT3_PJ_MARK, PINT2_PJ_MARK, PINT1_PJ_MARK, PINT0_PJ_MARK,
+
+ /* SD */
+ SD_D0_MARK, SD_D1_MARK, SD_D2_MARK, SD_D3_MARK,
+ SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK, SD_CD_MARK,
+
+ /* MMC */
+ MMC_D0_MARK, MMC_D1_MARK, MMC_D2_MARK, MMC_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK, MMC_D6_MARK, MMC_D7_MARK,
+ MMC_CLK_MARK, MMC_CMD_MARK, MMC_CD_MARK,
+
+ /* PWM */
+ PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
+ PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
+ PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
+ PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
+
+ /* IEBus */
+ IERXD_MARK, IETXD_MARK,
+
+ /* WDT */
+ WDTOVF_MARK,
+
+ /* DMAC */
+ TEND0_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, DACK1_MARK, DREQ1_MARK,
+
+ /* ADC */
+ ADTRG_MARK,
+
+ /* BSC */
+ A25_MARK, A24_MARK,
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+ BS_MARK,
+ CS4_MARK, CS3_MARK, CS2_MARK, CS1_MARK, CS0_MARK,
+ CS5CE1A_MARK,
+ CE2A_MARK, CE2B_MARK,
+ RD_MARK, RDWR_MARK,
+ WE3ICIOWRAHDQMUU_MARK,
+ WE2ICIORDDQMUL_MARK,
+ WE1DQMUWE_MARK,
+ WE0DQML_MARK,
+ RAS_MARK, CAS_MARK, CKE_MARK,
+ WAIT_MARK, BREQ_MARK, BACK_MARK, IOIS16_MARK,
+
+ /* TMU */
+ TIOC0A_MARK, TIOC0B_MARK, TIOC0C_MARK, TIOC0D_MARK,
+ TIOC1A_MARK, TIOC1B_MARK,
+ TIOC2A_MARK, TIOC2B_MARK,
+ TIOC3A_MARK, TIOC3B_MARK, TIOC3C_MARK, TIOC3D_MARK,
+ TIOC4A_MARK, TIOC4B_MARK, TIOC4C_MARK, TIOC4D_MARK,
+ TCLKA_MARK, TCLKB_MARK, TCLKC_MARK, TCLKD_MARK,
+
+ /* SCIF */
+ SCK0_MARK, RXD0_MARK, TXD0_MARK,
+ SCK1_MARK, RXD1_MARK, TXD1_MARK, RTS1_MARK, CTS1_MARK,
+ SCK2_MARK, RXD2_MARK, TXD2_MARK,
+ SCK3_MARK, RXD3_MARK, TXD3_MARK,
+ SCK4_MARK, RXD4_MARK, TXD4_MARK,
+ SCK5_MARK, RXD5_MARK, TXD5_MARK, RTS5_MARK, CTS5_MARK,
+ SCK6_MARK, RXD6_MARK, TXD6_MARK,
+ SCK7_MARK, RXD7_MARK, TXD7_MARK, RTS7_MARK, CTS7_MARK,
+
+ /* RSPI */
+ MISO0_PB20_MARK, MOSI0_PB19_MARK, SSL00_PB18_MARK, RSPCK0_PB17_MARK,
+ MISO0_PJ19_MARK, MOSI0_PJ18_MARK, SSL00_PJ17_MARK, RSPCK0_PJ16_MARK,
+ MISO1_MARK, MOSI1_MARK, SSL10_MARK, RSPCK1_MARK,
+
+ /* IIC3 */
+ SCL0_MARK, SDA0_MARK,
+ SCL1_MARK, SDA1_MARK,
+ SCL2_MARK, SDA2_MARK,
+ SCL3_MARK, SDA3_MARK,
+
+ /* SSI */
+ SSISCK0_MARK, SSIWS0_MARK, SSITXD0_MARK, SSIRXD0_MARK,
+ SSISCK1_MARK, SSIWS1_MARK, SSIDATA1_MARK,
+ SSISCK2_MARK, SSIWS2_MARK, SSIDATA2_MARK,
+ SSISCK3_MARK, SSIWS3_MARK, SSIDATA3_MARK,
+ SSISCK4_MARK, SSIWS4_MARK, SSIDATA4_MARK,
+ SSISCK5_MARK, SSIWS5_MARK, SSIDATA5_MARK,
+ AUDIO_CLK_MARK,
+ AUDIO_XOUT_MARK,
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SIOFTXD_MARK, SIOFRXD_MARK, SIOFSYNC_MARK, SIOFSCK_MARK,
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SPDIF_IN_MARK, SPDIF_OUT_MARK,
+ SPDIF_IN_PJ24_MARK, SPDIF_OUT_PJ25_MARK,
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ FCE_MARK,
+ FRB_MARK,
+
+ /* CAN */
+ CRX0_MARK, CTX0_MARK,
+ CRX1_MARK, CTX1_MARK,
+ CRX2_MARK, CTX2_MARK,
+ CRX0_CRX1_MARK,
+ CRX0_CRX1_CRX2_MARK,
+ CTX0CTX1CTX2_MARK,
+ CRX1_PJ22_MARK, CTX1_PJ23_MARK,
+ CRX2_PJ20_MARK, CTX2_PJ21_MARK,
+ CRX0CRX1_PJ22_MARK,
+ CRX0CRX1CRX2_PJ20_MARK,
+
+ /* VDC */
+ DV_CLK_MARK,
+ DV_VSYNC_MARK, DV_HSYNC_MARK,
+ DV_DATA23_MARK, DV_DATA22_MARK, DV_DATA21_MARK, DV_DATA20_MARK,
+ DV_DATA19_MARK, DV_DATA18_MARK, DV_DATA17_MARK, DV_DATA16_MARK,
+ DV_DATA15_MARK, DV_DATA14_MARK, DV_DATA13_MARK, DV_DATA12_MARK,
+ DV_DATA11_MARK, DV_DATA10_MARK, DV_DATA9_MARK, DV_DATA8_MARK,
+ DV_DATA7_MARK, DV_DATA6_MARK, DV_DATA5_MARK, DV_DATA4_MARK,
+ DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
+ LCD_CLK_MARK, LCD_EXTCLK_MARK,
+ LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
+ LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+ LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+ LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+ LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+ LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+ LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+ LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+ LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+ LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+ LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+ LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+ LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+ LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+ LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+ LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+ LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
+ LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
+ LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
+ LCD_M_DISP_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* Port A */
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* Port B */
+ PINMUX_DATA(PB22_DATA, PB22MD_000, PB22_IN, PB22_OUT),
+ PINMUX_DATA(A22_MARK, PB22MD_001),
+ PINMUX_DATA(CTX2_MARK, PB22MD_010),
+ PINMUX_DATA(IETXD_MARK, PB22MD_011),
+ PINMUX_DATA(CS4_MARK, PB22MD_100),
+
+ PINMUX_DATA(PB21_DATA, PB21MD_00, PB21_IN, PB21_OUT),
+ PINMUX_DATA(A21_MARK, PB21MD_01),
+ PINMUX_DATA(CRX2_MARK, PB21MD_10),
+ PINMUX_DATA(IERXD_MARK, PB21MD_11),
+
+ PINMUX_DATA(A20_MARK, PB20MD_001),
+ PINMUX_DATA(A19_MARK, PB19MD_001),
+ PINMUX_DATA(A18_MARK, PB18MD_001),
+ PINMUX_DATA(A17_MARK, PB17MD_001),
+ PINMUX_DATA(A16_MARK, PB16MD_001),
+ PINMUX_DATA(A15_MARK, PB15MD_001),
+ PINMUX_DATA(A14_MARK, PB14MD_001),
+ PINMUX_DATA(A13_MARK, PB13MD_001),
+ PINMUX_DATA(A12_MARK, PB12MD_01),
+ PINMUX_DATA(A11_MARK, PB11MD_01),
+ PINMUX_DATA(A10_MARK, PB10MD_01),
+ PINMUX_DATA(A9_MARK, PB9MD_01),
+ PINMUX_DATA(A8_MARK, PB8MD_01),
+ PINMUX_DATA(A7_MARK, PB7MD_01),
+ PINMUX_DATA(A6_MARK, PB6MD_01),
+ PINMUX_DATA(A5_MARK, PB5MD_01),
+ PINMUX_DATA(A4_MARK, PB4MD_01),
+ PINMUX_DATA(A3_MARK, PB3MD_01),
+ PINMUX_DATA(A2_MARK, PB2MD_01),
+ PINMUX_DATA(A1_MARK, PB1MD_01),
+
+ /* Port C */
+ PINMUX_DATA(PC8_DATA, PC8MD_000),
+ PINMUX_DATA(CS3_MARK, PC8MD_001),
+ PINMUX_DATA(TXD7_MARK, PC8MD_010),
+ PINMUX_DATA(CTX1_MARK, PC8MD_011),
+
+ PINMUX_DATA(PC7_DATA, PC7MD_000),
+ PINMUX_DATA(CKE_MARK, PC7MD_001),
+ PINMUX_DATA(RXD7_MARK, PC7MD_010),
+ PINMUX_DATA(CRX1_MARK, PC7MD_011),
+ PINMUX_DATA(CRX0_CRX1_MARK, PC7MD_100),
+ PINMUX_DATA(IRQ1_PC_MARK, PC7MD_101),
+
+ PINMUX_DATA(PC6_DATA, PC6MD_000),
+ PINMUX_DATA(CAS_MARK, PC6MD_001),
+ PINMUX_DATA(SCK7_MARK, PC6MD_010),
+ PINMUX_DATA(CTX0_MARK, PC6MD_011),
+
+ PINMUX_DATA(PC5_DATA, PC5MD_000),
+ PINMUX_DATA(RAS_MARK, PC5MD_001),
+ PINMUX_DATA(CRX0_MARK, PC5MD_011),
+ PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
+
+ PINMUX_DATA(PC4_DATA, PC4MD_00),
+ PINMUX_DATA(WE1DQMUWE_MARK, PC4MD_01),
+ PINMUX_DATA(TXD6_MARK, PC4MD_10),
+
+ PINMUX_DATA(PC3_DATA, PC3MD_00),
+ PINMUX_DATA(WE0DQML_MARK, PC3MD_01),
+ PINMUX_DATA(RXD6_MARK, PC3MD_10),
+
+ PINMUX_DATA(PC2_DATA, PC2MD_00),
+ PINMUX_DATA(RDWR_MARK, PC2MD_01),
+ PINMUX_DATA(SCK5_MARK, PC2MD_10),
+
+ PINMUX_DATA(PC1_DATA, PC1MD_0),
+ PINMUX_DATA(RD_MARK, PC1MD_1),
+
+ PINMUX_DATA(PC0_DATA, PC0MD_0),
+ PINMUX_DATA(CS0_MARK, PC0MD_1),
+
+ /* Port D */
+ PINMUX_DATA(D15_MARK, PD15MD_01),
+ PINMUX_DATA(D14_MARK, PD14MD_01),
+
+ PINMUX_DATA(PD13_DATA, PD13MD_00),
+ PINMUX_DATA(D13_MARK, PD13MD_01),
+ PINMUX_DATA(PWM2F_MARK, PD13MD_10),
+
+ PINMUX_DATA(PD12_DATA, PD12MD_00),
+ PINMUX_DATA(D12_MARK, PD12MD_01),
+ PINMUX_DATA(PWM2E_MARK, PD12MD_10),
+
+ PINMUX_DATA(D11_MARK, PD11MD_01),
+ PINMUX_DATA(D10_MARK, PD10MD_01),
+ PINMUX_DATA(D9_MARK, PD9MD_01),
+ PINMUX_DATA(D8_MARK, PD8MD_01),
+ PINMUX_DATA(D7_MARK, PD7MD_01),
+ PINMUX_DATA(D6_MARK, PD6MD_01),
+ PINMUX_DATA(D5_MARK, PD5MD_01),
+ PINMUX_DATA(D4_MARK, PD4MD_01),
+ PINMUX_DATA(D3_MARK, PD3MD_01),
+ PINMUX_DATA(D2_MARK, PD2MD_01),
+ PINMUX_DATA(D1_MARK, PD1MD_01),
+ PINMUX_DATA(D0_MARK, PD0MD_01),
+
+ /* Port E */
+ PINMUX_DATA(PE7_DATA, PE7MD_00),
+ PINMUX_DATA(SDA3_MARK, PE7MD_01),
+ PINMUX_DATA(RXD7_MARK, PE7MD_10),
+
+ PINMUX_DATA(PE6_DATA, PE6MD_00),
+ PINMUX_DATA(SCL3_MARK, PE6MD_01),
+ PINMUX_DATA(RXD6_MARK, PE6MD_10),
+
+ PINMUX_DATA(PE5_DATA, PE5MD_00),
+ PINMUX_DATA(SDA2_MARK, PE5MD_01),
+ PINMUX_DATA(RXD5_MARK, PE5MD_10),
+ PINMUX_DATA(DV_HSYNC_MARK, PE5MD_11),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_00),
+ PINMUX_DATA(SCL2_MARK, PE4MD_01),
+ PINMUX_DATA(DV_VSYNC_MARK, PE4MD_11),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_000),
+ PINMUX_DATA(SDA1_MARK, PE3MD_001),
+ PINMUX_DATA(TCLKD_MARK, PE3MD_010),
+ PINMUX_DATA(ADTRG_MARK, PE3MD_011),
+ PINMUX_DATA(DV_HSYNC_MARK, PE3MD_100),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_000),
+ PINMUX_DATA(SCL1_MARK, PE2MD_001),
+ PINMUX_DATA(TCLKD_MARK, PE2MD_010),
+ PINMUX_DATA(IOIS16_MARK, PE2MD_011),
+ PINMUX_DATA(DV_VSYNC_MARK, PE2MD_100),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_000),
+ PINMUX_DATA(SDA0_MARK, PE1MD_001),
+ PINMUX_DATA(TCLKB_MARK, PE1MD_010),
+ PINMUX_DATA(AUDIO_CLK_MARK, PE1MD_010),
+ PINMUX_DATA(DV_CLK_MARK, PE1MD_100),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_00),
+ PINMUX_DATA(SCL0_MARK, PE0MD_01),
+ PINMUX_DATA(TCLKA_MARK, PE0MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PE0MD_11),
+
+ /* Port F */
+ PINMUX_DATA(PF23_DATA, PF23MD_000),
+ PINMUX_DATA(SD_D2_MARK, PF23MD_001),
+ PINMUX_DATA(TXD3_MARK, PF23MD_100),
+ PINMUX_DATA(MMC_D2_MARK, PF23MD_101),
+
+ PINMUX_DATA(PF22_DATA, PF22MD_000),
+ PINMUX_DATA(SD_D3_MARK, PF22MD_001),
+ PINMUX_DATA(RXD3_MARK, PF22MD_100),
+ PINMUX_DATA(MMC_D3_MARK, PF22MD_101),
+
+ PINMUX_DATA(PF21_DATA, PF21MD_000),
+ PINMUX_DATA(SD_CMD_MARK, PF21MD_001),
+ PINMUX_DATA(SCK3_MARK, PF21MD_100),
+ PINMUX_DATA(MMC_CMD_MARK, PF21MD_101),
+
+ PINMUX_DATA(PF20_DATA, PF20MD_000),
+ PINMUX_DATA(SD_CLK_MARK, PF20MD_001),
+ PINMUX_DATA(SSIDATA3_MARK, PF20MD_010),
+ PINMUX_DATA(MMC_CLK_MARK, PF20MD_101),
+
+ PINMUX_DATA(PF19_DATA, PF19MD_000),
+ PINMUX_DATA(SD_D0_MARK, PF19MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF19MD_010),
+ PINMUX_DATA(IRQ7_PF_MARK, PF19MD_100),
+ PINMUX_DATA(MMC_D0_MARK, PF19MD_101),
+
+ PINMUX_DATA(PF18_DATA, PF18MD_000),
+ PINMUX_DATA(SD_D1_MARK, PF18MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF18MD_010),
+ PINMUX_DATA(IRQ6_PF_MARK, PF18MD_100),
+ PINMUX_DATA(MMC_D1_MARK, PF18MD_101),
+
+ PINMUX_DATA(PF17_DATA, PF17MD_000),
+ PINMUX_DATA(SD_WP_MARK, PF17MD_001),
+ PINMUX_DATA(FRB_MARK, PF17MD_011),
+ PINMUX_DATA(IRQ5_PF_MARK, PF17MD_100),
+
+ PINMUX_DATA(PF16_DATA, PF16MD_000),
+ PINMUX_DATA(SD_CD_MARK, PF16MD_001),
+ PINMUX_DATA(FCE_MARK, PF16MD_011),
+ PINMUX_DATA(IRQ4_PF_MARK, PF16MD_100),
+ PINMUX_DATA(MMC_CD_MARK, PF16MD_101),
+
+ PINMUX_DATA(PF15_DATA, PF15MD_000),
+ PINMUX_DATA(A0_MARK, PF15MD_001),
+ PINMUX_DATA(SSIDATA2_MARK, PF15MD_010),
+ PINMUX_DATA(WDTOVF_MARK, PF15MD_011),
+ PINMUX_DATA(TXD2_MARK, PF15MD_100),
+
+ PINMUX_DATA(PF14_DATA, PF14MD_000),
+ PINMUX_DATA(A25_MARK, PF14MD_001),
+ PINMUX_DATA(SSIWS2_MARK, PF14MD_010),
+ PINMUX_DATA(RXD2_MARK, PF14MD_100),
+
+ PINMUX_DATA(PF13_DATA, PF13MD_000),
+ PINMUX_DATA(A24_MARK, PF13MD_001),
+ PINMUX_DATA(SSISCK2_MARK, PF13MD_010),
+ PINMUX_DATA(SCK2_MARK, PF13MD_100),
+
+ PINMUX_DATA(PF12_DATA, PF12MD_000),
+ PINMUX_DATA(SSIDATA1_MARK, PF12MD_010),
+ PINMUX_DATA(DV_DATA12_MARK, PF12MD_011),
+ PINMUX_DATA(TXD1_MARK, PF12MD_100),
+ PINMUX_DATA(MMC_D7_MARK, PF12MD_101),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_000),
+ PINMUX_DATA(SSIWS1_MARK, PF11MD_010),
+ PINMUX_DATA(DV_DATA2_MARK, PF11MD_011),
+ PINMUX_DATA(RXD1_MARK, PF11MD_100),
+ PINMUX_DATA(MMC_D6_MARK, PF11MD_101),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_000),
+ PINMUX_DATA(CS1_MARK, PF10MD_001),
+ PINMUX_DATA(SSISCK1_MARK, PF10MD_010),
+ PINMUX_DATA(DV_DATA1_MARK, PF10MD_011),
+ PINMUX_DATA(SCK1_MARK, PF10MD_100),
+ PINMUX_DATA(MMC_D5_MARK, PF10MD_101),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_000),
+ PINMUX_DATA(BS_MARK, PF9MD_001),
+ PINMUX_DATA(DV_DATA0_MARK, PF9MD_011),
+ PINMUX_DATA(SCK0_MARK, PF9MD_100),
+ PINMUX_DATA(MMC_D4_MARK, PF9MD_101),
+ PINMUX_DATA(RTS1_MARK, PF9MD_110),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_000),
+ PINMUX_DATA(A23_MARK, PF8MD_001),
+ PINMUX_DATA(TXD0_MARK, PF8MD_100),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_000),
+ PINMUX_DATA(SSIRXD0_MARK, PF7MD_010),
+ PINMUX_DATA(RXD0_MARK, PF7MD_100),
+ PINMUX_DATA(CTS1_MARK, PF7MD_110),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_000),
+ PINMUX_DATA(CE2A_MARK, PF6MD_001),
+ PINMUX_DATA(SSITXD0_MARK, PF6MD_010),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_000),
+ PINMUX_DATA(SSIWS0_MARK, PF5MD_010),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_000),
+ PINMUX_DATA(CS5CE1A_MARK, PF4MD_001),
+ PINMUX_DATA(SSISCK0_MARK, PF4MD_010),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_000),
+ PINMUX_DATA(CS2_MARK, PF3MD_001),
+ PINMUX_DATA(MISO1_MARK, PF3MD_011),
+ PINMUX_DATA(TIOC4D_MARK, PF3MD_100),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_000),
+ PINMUX_DATA(WAIT_MARK, PF2MD_001),
+ PINMUX_DATA(MOSI1_MARK, PF2MD_011),
+ PINMUX_DATA(TIOC4C_MARK, PF2MD_100),
+ PINMUX_DATA(TEND0_MARK, PF2MD_101),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_000),
+ PINMUX_DATA(BACK_MARK, PF1MD_001),
+ PINMUX_DATA(SSL10_MARK, PF1MD_011),
+ PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
+ PINMUX_DATA(DACK0_MARK, PF1MD_101),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_000),
+ PINMUX_DATA(BREQ_MARK, PF0MD_001),
+ PINMUX_DATA(RSPCK1_MARK, PF0MD_011),
+ PINMUX_DATA(TIOC4A_MARK, PF0MD_100),
+ PINMUX_DATA(DREQ0_MARK, PF0MD_101),
+
+ /* Port G */
+ PINMUX_DATA(PG27_DATA, PG27MD_00),
+ PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+ PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
+
+ PINMUX_DATA(PG26_DATA, PG26MD_00),
+ PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
+
+ PINMUX_DATA(PG25_DATA, PG25MD_00),
+ PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
+
+ PINMUX_DATA(PG24_DATA, PG24MD_00),
+ PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
+
+ PINMUX_DATA(PG23_DATA, PG23MD_000),
+ PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
+ PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
+ PINMUX_DATA(TXD5_MARK, PG23MD_100),
+
+ PINMUX_DATA(PG22_DATA, PG22MD_000),
+ PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
+ PINMUX_DATA(RXD5_MARK, PG22MD_100),
+
+ PINMUX_DATA(PG21_DATA, PG21MD_000),
+ PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
+ PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
+ PINMUX_DATA(TXD4_MARK, PG21MD_100),
+
+ PINMUX_DATA(PG20_DATA, PG20MD_000),
+ PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
+ PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
+ PINMUX_DATA(RXD4_MARK, PG20MD_100),
+
+ PINMUX_DATA(PG19_DATA, PG19MD_000),
+ PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
+ PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
+ PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
+ PINMUX_DATA(SCK5_MARK, PG19MD_100),
+
+ PINMUX_DATA(PG18_DATA, PG18MD_000),
+ PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
+ PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
+ PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
+ PINMUX_DATA(SCK4_MARK, PG18MD_100),
+
+// TODO hardware manual has PG17 3 bits wide in reg picture and 2 bits in description
+// we're going with 2 bits
+ PINMUX_DATA(PG17_DATA, PG17MD_00),
+ PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
+ PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
+
+// TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
+// we're going with 2 bits
+ PINMUX_DATA(PG16_DATA, PG16MD_00),
+ PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
+ PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
+
+ PINMUX_DATA(PG15_DATA, PG15MD_00),
+ PINMUX_DATA(D31_MARK, PG15MD_01),
+ PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
+ PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
+
+ PINMUX_DATA(PG14_DATA, PG14MD_00),
+ PINMUX_DATA(D30_MARK, PG14MD_01),
+ PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
+ PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
+
+ PINMUX_DATA(PG13_DATA, PG13MD_00),
+ PINMUX_DATA(D29_MARK, PG13MD_01),
+ PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
+ PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
+
+ PINMUX_DATA(PG12_DATA, PG12MD_00),
+ PINMUX_DATA(D28_MARK, PG12MD_01),
+ PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
+ PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
+
+ PINMUX_DATA(PG11_DATA, PG11MD_000),
+ PINMUX_DATA(D27_MARK, PG11MD_001),
+ PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
+ PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
+ PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
+
+ PINMUX_DATA(PG10_DATA, PG10MD_000),
+ PINMUX_DATA(D26_MARK, PG10MD_001),
+ PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
+ PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
+ PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
+
+ PINMUX_DATA(PG9_DATA, PG9MD_000),
+ PINMUX_DATA(D25_MARK, PG9MD_001),
+ PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
+ PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
+ PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
+
+ PINMUX_DATA(PG8_DATA, PG8MD_000),
+ PINMUX_DATA(D24_MARK, PG8MD_001),
+ PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
+ PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
+ PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
+
+ PINMUX_DATA(PG7_DATA, PG7MD_000),
+ PINMUX_DATA(D23_MARK, PG7MD_001),
+ PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
+ PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
+ PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
+
+ PINMUX_DATA(PG6_DATA, PG6MD_000),
+ PINMUX_DATA(D22_MARK, PG6MD_001),
+ PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
+ PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
+ PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
+
+ PINMUX_DATA(PG5_DATA, PG5MD_000),
+ PINMUX_DATA(D21_MARK, PG5MD_001),
+ PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
+ PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
+ PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
+
+ PINMUX_DATA(PG4_DATA, PG4MD_000),
+ PINMUX_DATA(D20_MARK, PG4MD_001),
+ PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
+ PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
+ PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
+
+ PINMUX_DATA(PG3_DATA, PG3MD_000),
+ PINMUX_DATA(D19_MARK, PG3MD_001),
+ PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
+ PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
+ PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
+
+ PINMUX_DATA(PG2_DATA, PG2MD_000),
+ PINMUX_DATA(D18_MARK, PG2MD_001),
+ PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
+ PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
+ PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
+
+ PINMUX_DATA(PG1_DATA, PG1MD_000),
+ PINMUX_DATA(D17_MARK, PG1MD_001),
+ PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
+ PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
+ PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
+
+ PINMUX_DATA(PG0_DATA, PG0MD_000),
+ PINMUX_DATA(D16_MARK, PG0MD_001),
+ PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
+ PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
+ PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
+
+ /* Port H */
+ PINMUX_DATA(PH7_DATA, PH7MD_00),
+ PINMUX_DATA(PHAN7_MARK, PH7MD_01),
+ PINMUX_DATA(PINT7_PH_MARK, PH7MD_10),
+
+ PINMUX_DATA(PH6_DATA, PH6MD_00),
+ PINMUX_DATA(PHAN6_MARK, PH6MD_01),
+ PINMUX_DATA(PINT6_PH_MARK, PH6MD_10),
+
+ PINMUX_DATA(PH5_DATA, PH5MD_00),
+ PINMUX_DATA(PHAN5_MARK, PH5MD_01),
+ PINMUX_DATA(PINT5_PH_MARK, PH5MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PH5MD_11),
+
+ PINMUX_DATA(PH4_DATA, PH4MD_00),
+ PINMUX_DATA(PHAN4_MARK, PH4MD_01),
+ PINMUX_DATA(PINT4_PH_MARK, PH4MD_10),
+
+ PINMUX_DATA(PH3_DATA, PH3MD_00),
+ PINMUX_DATA(PHAN3_MARK, PH3MD_01),
+ PINMUX_DATA(PINT3_PH_MARK, PH3MD_10),
+
+ PINMUX_DATA(PH2_DATA, PH2MD_00),
+ PINMUX_DATA(PHAN2_MARK, PH2MD_01),
+ PINMUX_DATA(PINT2_PH_MARK, PH2MD_10),
+
+ PINMUX_DATA(PH1_DATA, PH1MD_00),
+ PINMUX_DATA(PHAN1_MARK, PH1MD_01),
+ PINMUX_DATA(PINT1_PH_MARK, PH1MD_10),
+
+ PINMUX_DATA(PH0_DATA, PH0MD_00),
+ PINMUX_DATA(PHAN0_MARK, PH0MD_01),
+ PINMUX_DATA(PINT0_PH_MARK, PH0MD_10),
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_DATA(PJ31_DATA, PJ31MD_0),
+ PINMUX_DATA(DV_CLK_MARK, PJ31MD_1),
+
+ PINMUX_DATA(PJ30_DATA, PJ30MD_000),
+ PINMUX_DATA(SSIDATA5_MARK, PJ30MD_010),
+ PINMUX_DATA(TIOC2B_MARK, PJ30MD_100),
+ PINMUX_DATA(IETXD_MARK, PJ30MD_101),
+
+ PINMUX_DATA(PJ29_DATA, PJ29MD_000),
+ PINMUX_DATA(SSIWS5_MARK, PJ29MD_010),
+ PINMUX_DATA(TIOC2A_MARK, PJ29MD_100),
+ PINMUX_DATA(IERXD_MARK, PJ29MD_101),
+
+ PINMUX_DATA(PJ28_DATA, PJ28MD_000),
+ PINMUX_DATA(SSISCK5_MARK, PJ28MD_010),
+ PINMUX_DATA(TIOC1B_MARK, PJ28MD_100),
+ PINMUX_DATA(RTS7_MARK, PJ28MD_101),
+
+ PINMUX_DATA(PJ27_DATA, PJ27MD_000),
+ PINMUX_DATA(TIOC1A_MARK, PJ27MD_100),
+ PINMUX_DATA(CTS7_MARK, PJ27MD_101),
+
+ PINMUX_DATA(PJ26_DATA, PJ26MD_000),
+ PINMUX_DATA(SSIDATA4_MARK, PJ26MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PJ26MD_011),
+ PINMUX_DATA(TXD7_MARK, PJ26MD_101),
+
+ PINMUX_DATA(PJ25_DATA, PJ25MD_000),
+ PINMUX_DATA(SSIWS4_MARK, PJ25MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PJ25MD_011),
+ PINMUX_DATA(SPDIF_OUT_MARK, PJ25MD_100),
+ PINMUX_DATA(RXD7_MARK, PJ25MD_101),
+
+ PINMUX_DATA(PJ24_DATA, PJ24MD_000),
+ PINMUX_DATA(SSISCK4_MARK, PJ24MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PJ24MD_011),
+ PINMUX_DATA(SPDIF_IN_MARK, PJ24MD_100),
+ PINMUX_DATA(SCK7_MARK, PJ24MD_101),
+
+ PINMUX_DATA(PJ23_DATA, PJ23MD_000),
+ PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
+ PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
+ PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
+ PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
+ PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+
+ PINMUX_DATA(PJ22_DATA, PJ22MD_000),
+ PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
+ PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
+ PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
+ PINMUX_DATA(CRX1_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+
+ PINMUX_DATA(PJ21_DATA, PJ21MD_000),
+ PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
+ PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
+ PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
+ PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+
+ PINMUX_DATA(PJ20_DATA, PJ20MD_000),
+ PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
+ PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
+ PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
+ PINMUX_DATA(CRX2_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+
+ PINMUX_DATA(PJ19_DATA, PJ19MD_000),
+ PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
+ PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
+ PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
+ PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
+ PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
+ PINMUX_DATA(AUDIO_XOUT_MARK, PJ19MD_110),
+
+ PINMUX_DATA(PJ18_DATA, PJ18MD_000),
+ PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
+ PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
+ PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
+ PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
+ PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
+
+ PINMUX_DATA(PJ17_DATA, PJ17MD_000),
+ PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
+ PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
+ PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
+ PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
+ PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
+
+ PINMUX_DATA(PJ16_DATA, PJ16MD_000),
+ PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
+ PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
+ PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
+ PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
+ PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
+
+ PINMUX_DATA(PJ15_DATA, PJ15MD_000),
+ PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
+ PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
+ PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
+ PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
+ PINMUX_DATA(TXD7_MARK, PJ15MD_101),
+
+ PINMUX_DATA(PJ14_DATA, PJ14MD_000),
+ PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
+ PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
+ PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
+ PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
+ PINMUX_DATA(TXD6_MARK, PJ14MD_101),
+
+ PINMUX_DATA(PJ13_DATA, PJ13MD_000),
+ PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
+ PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
+ PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
+ PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
+ PINMUX_DATA(TXD5_MARK, PJ13MD_101),
+
+ PINMUX_DATA(PJ12_DATA, PJ12MD_000),
+ PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
+ PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
+ PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
+ PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
+ PINMUX_DATA(SCK7_MARK, PJ12MD_101),
+
+ PINMUX_DATA(PJ11_DATA, PJ11MD_000),
+ PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
+ PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
+ PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
+ PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
+ PINMUX_DATA(SCK6_MARK, PJ11MD_101),
+
+ PINMUX_DATA(PJ10_DATA, PJ10MD_000),
+ PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
+ PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
+ PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
+ PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
+ PINMUX_DATA(SCK5_MARK, PJ10MD_101),
+
+ PINMUX_DATA(PJ9_DATA, PJ9MD_000),
+ PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
+ PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
+ PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
+ PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
+ PINMUX_DATA(RTS5_MARK, PJ9MD_101),
+
+ PINMUX_DATA(PJ8_DATA, PJ8MD_000),
+ PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
+ PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
+ PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
+ PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
+ PINMUX_DATA(CTS5_MARK, PJ8MD_101),
+
+ PINMUX_DATA(PJ7_DATA, PJ7MD_000),
+ PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
+ PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
+ PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
+ PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
+
+ PINMUX_DATA(PJ6_DATA, PJ6MD_000),
+ PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
+ PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
+ PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
+ PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
+
+ PINMUX_DATA(PJ5_DATA, PJ5MD_000),
+ PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
+ PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
+ PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
+ PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
+
+ PINMUX_DATA(PJ4_DATA, PJ4MD_000),
+ PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
+ PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
+ PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
+ PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
+
+ PINMUX_DATA(PJ3_DATA, PJ3MD_000),
+ PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
+ PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
+ PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
+ PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
+
+ PINMUX_DATA(PJ2_DATA, PJ2MD_000),
+ PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
+ PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
+ PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
+ PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
+
+ PINMUX_DATA(PJ1_DATA, PJ1MD_000),
+ PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
+ PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
+ PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
+ PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
+
+ PINMUX_DATA(PJ0_DATA, PJ0MD_000),
+ PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
+ PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
+ PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
+ PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* Port A */
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* Port B */
+ PINMUX_GPIO(GPIO_PB22, PB22_DATA),
+ PINMUX_GPIO(GPIO_PB21, PB21_DATA),
+ PINMUX_GPIO(GPIO_PB20, PB20_DATA),
+ PINMUX_GPIO(GPIO_PB19, PB19_DATA),
+ PINMUX_GPIO(GPIO_PB18, PB18_DATA),
+ PINMUX_GPIO(GPIO_PB17, PB17_DATA),
+ PINMUX_GPIO(GPIO_PB16, PB16_DATA),
+ PINMUX_GPIO(GPIO_PB15, PB15_DATA),
+ PINMUX_GPIO(GPIO_PB14, PB14_DATA),
+ PINMUX_GPIO(GPIO_PB13, PB13_DATA),
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+
+ /* Port C */
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* Port D */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* Port E */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* Port F */
+ PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+ PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+ PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+ PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+ PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+ PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+ PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+ PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+ PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+ PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+ PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* Port G */
+ PINMUX_GPIO(GPIO_PG27, PG27_DATA),
+ PINMUX_GPIO(GPIO_PG26, PG26_DATA),
+ PINMUX_GPIO(GPIO_PG25, PG25_DATA),
+ PINMUX_GPIO(GPIO_PG24, PG24_DATA),
+ PINMUX_GPIO(GPIO_PG23, PG23_DATA),
+ PINMUX_GPIO(GPIO_PG22, PG22_DATA),
+ PINMUX_GPIO(GPIO_PG21, PG21_DATA),
+ PINMUX_GPIO(GPIO_PG20, PG20_DATA),
+ PINMUX_GPIO(GPIO_PG19, PG19_DATA),
+ PINMUX_GPIO(GPIO_PG18, PG18_DATA),
+ PINMUX_GPIO(GPIO_PG17, PG17_DATA),
+ PINMUX_GPIO(GPIO_PG16, PG16_DATA),
+ PINMUX_GPIO(GPIO_PG15, PG15_DATA),
+ PINMUX_GPIO(GPIO_PG14, PG14_DATA),
+ PINMUX_GPIO(GPIO_PG13, PG13_DATA),
+ PINMUX_GPIO(GPIO_PG12, PG12_DATA),
+ PINMUX_GPIO(GPIO_PG11, PG11_DATA),
+ PINMUX_GPIO(GPIO_PG10, PG10_DATA),
+ PINMUX_GPIO(GPIO_PG9, PG9_DATA),
+ PINMUX_GPIO(GPIO_PG8, PG8_DATA),
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* Port H - Port H does not have a Data Register */
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_GPIO(GPIO_PJ31, PJ31_DATA),
+ PINMUX_GPIO(GPIO_PJ30, PJ30_DATA),
+ PINMUX_GPIO(GPIO_PJ29, PJ29_DATA),
+ PINMUX_GPIO(GPIO_PJ28, PJ28_DATA),
+ PINMUX_GPIO(GPIO_PJ27, PJ27_DATA),
+ PINMUX_GPIO(GPIO_PJ26, PJ26_DATA),
+ PINMUX_GPIO(GPIO_PJ25, PJ25_DATA),
+ PINMUX_GPIO(GPIO_PJ24, PJ24_DATA),
+ PINMUX_GPIO(GPIO_PJ23, PJ23_DATA),
+ PINMUX_GPIO(GPIO_PJ22, PJ22_DATA),
+ PINMUX_GPIO(GPIO_PJ21, PJ21_DATA),
+ PINMUX_GPIO(GPIO_PJ20, PJ20_DATA),
+ PINMUX_GPIO(GPIO_PJ19, PJ19_DATA),
+ PINMUX_GPIO(GPIO_PJ18, PJ18_DATA),
+ PINMUX_GPIO(GPIO_PJ17, PJ17_DATA),
+ PINMUX_GPIO(GPIO_PJ16, PJ16_DATA),
+ PINMUX_GPIO(GPIO_PJ15, PJ15_DATA),
+ PINMUX_GPIO(GPIO_PJ14, PJ14_DATA),
+ PINMUX_GPIO(GPIO_PJ13, PJ13_DATA),
+ PINMUX_GPIO(GPIO_PJ12, PJ12_DATA),
+ PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
+ PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
+ PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
+ PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_IRQ7_PG, IRQ7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PG, IRQ6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PG, IRQ5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PG, IRQ4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PG, IRQ1_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PG, IRQ0_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PF, IRQ7_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PF, IRQ6_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PF, IRQ5_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PF, IRQ4_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PJ, IRQ3_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PJ, IRQ2_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PC, IRQ1_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PC, IRQ0_PC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PG, PINT0_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PH, PINT7_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PH, PINT6_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PH, PINT5_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PH, PINT4_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PH, PINT3_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PH, PINT2_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PH, PINT1_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PH, PINT0_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PJ, PINT7_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PJ, PINT6_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PJ, PINT5_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PJ, PINT4_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PJ, PINT3_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PJ, PINT2_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PJ, PINT1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PJ, PINT0_PJ_MARK),
+
+ /* WDT */
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1_CRX2, CRX0_CRX1_CRX2_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* BSCh */
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3ICIOWRAHDQMUU, WE3ICIOWRAHDQMUU_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2ICIORDDQMUL, WE2ICIORDDQMUL_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK5, SCK5_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS5, RTS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS5, CTS5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK6, SCK6_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK7, SCK7_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS7, RTS7_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS7, CTS7_MARK),
+
+ /* RSPI */
+ PINMUX_GPIO(GPIO_FN_RSPCK0_PJ16, RSPCK0_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00_PJ17, SSL00_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0_PJ18, MOSI0_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PJ19, MISO0_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK0_PB17, RSPCK0_PB17_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00_PB18, SSL00_PB18_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0_PB19, MOSI0_PB19_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PB20, MISO0_PB20_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_XOUT, AUDIO_XOUT_MARK),
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* VDC3 */
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_DV_DATA23, DV_DATA23_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA22, DV_DATA22_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA21, DV_DATA21_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA20, DV_DATA20_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA19, DV_DATA19_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA18, DV_DATA18_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA17, DV_DATA17_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA16, DV_DATA16_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA15, DV_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA14, DV_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA13, DV_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA12, DV_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA11, DV_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA10, DV_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA9, DV_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA8, DV_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ /* "name" addr register_size Field_Width */
+
+ /* where Field_Width is 1 for single mode registers or 4 for upto 16
+ mode registers and modes are described in assending order [0..16] */
+
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, PA1_IN, PA1_OUT,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT }
+ },
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
+ PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB21MD_00, PB21MD_01, PB21MD_10, PB21MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB20MD_000, PB20MD_001, PB20MD_010, PB20MD_011,
+ PB20MD_100, PB20MD_101, PB20MD_110, PB20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ PB19MD_000, PB19MD_001, PB19MD_010, PB19MD_011,
+ PB19MD_100, PB19MD_101, PB19MD_110, PB19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB18MD_000, PB18MD_001, PB18MD_010, PB18MD_011,
+ PB18MD_100, PB18MD_101, PB18MD_110, PB18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB17MD_000, PB17MD_001, PB17MD_010, PB17MD_011,
+ PB17MD_100, PB17MD_101, PB17MD_110, PB17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB16MD_000, PB16MD_001, PB16MD_010, PB16MD_011,
+ PB16MD_100, PB16MD_101, PB16MD_110, PB16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ PB15MD_000, PB15MD_001, PB15MD_010, PB15MD_011,
+ PB15MD_100, PB15MD_101, PB15MD_110, PB15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB14MD_000, PB14MD_001, PB14MD_010, PB14MD_011,
+ PB14MD_100, PB14MD_101, PB14MD_110, PB14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB13MD_000, PB13MD_001, PB13MD_010, PB13MD_011,
+ PB13MD_100, PB13MD_101, PB13MD_110, PB13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ PB22_IN, PB22_OUT,
+ PB21_IN, PB21_OUT,
+ PB20_IN, PB20_OUT,
+ PB19_IN, PB19_OUT,
+ PB18_IN, PB18_OUT,
+ PB17_IN, PB17_OUT,
+ PB16_IN, PB16_OUT }
+ },
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ PB15_IN, PB15_OUT,
+ PB14_IN, PB14_OUT,
+ PB13_IN, PB13_OUT,
+ PB12_IN, PB12_OUT,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ PB7_IN, PB7_OUT,
+ PB6_IN, PB6_OUT,
+ PB5_IN, PB5_OUT,
+ PB4_IN, PB4_OUT,
+ PB3_IN, PB3_OUT,
+ PB2_IN, PB2_OUT,
+ PB1_IN, PB1_OUT,
+ 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
+ PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ PC7MD_000, PC7MD_001, PC7MD_010, PC7MD_011,
+ PC7MD_100, PC7MD_101, PC7MD_110, PC7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC6MD_000, PC6MD_001, PC6MD_010, PC6MD_011,
+ PC6MD_100, PC6MD_101, PC6MD_110, PC6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC5MD_000, PC5MD_001, PC5MD_010, PC5MD_011,
+ PC5MD_100, PC5MD_101, PC5MD_110, PC5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC4MD_00, PC4MD_01, PC4MD_10, PC4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ PC3MD_00, PC3MD_01, PC3MD_10, PC3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC2MD_00, PC2MD_01, PC2MD_10, PC2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ PE7MD_00, PE7MD_01, PE7MD_10, PE7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE6MD_00, PE6MD_01, PE6MD_10, PE6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ PE3MD_000, PE3MD_001, PE3MD_010, PE3MD_011,
+ PE3MD_100, PE3MD_101, PE3MD_110, PE3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE2MD_000, PE2MD_001, PE2MD_010, PE2MD_011,
+ PE2MD_100, PE2MD_101, PE2MD_110, PE2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_IN, PE7_OUT,
+ PE6_IN, PE6_OUT,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PFCR6", 0xfffe38a2, 16, 4) {
+ PF23MD_000, PF23MD_001, PF23MD_010, PF23MD_011,
+ PF23MD_100, PF23MD_101, PF23MD_110, PF23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF22MD_000, PF22MD_001, PF22MD_010, PF22MD_011,
+ PF22MD_100, PF22MD_101, PF22MD_110, PF22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF21MD_000, PF21MD_001, PF21MD_010, PF21MD_011,
+ PF21MD_100, PF21MD_101, PF21MD_110, PF21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF20MD_000, PF20MD_001, PF20MD_010, PF20MD_011,
+ PF20MD_100, PF20MD_101, PF20MD_110, PF20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR5", 0xfffe38a4, 16, 4) {
+ PF19MD_000, PF19MD_001, PF19MD_010, PF19MD_011,
+ PF19MD_100, PF19MD_101, PF19MD_110, PF19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF18MD_000, PF18MD_001, PF18MD_010, PF18MD_011,
+ PF18MD_100, PF18MD_101, PF18MD_110, PF18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF17MD_000, PF17MD_001, PF17MD_010, PF17MD_011,
+ PF17MD_100, PF17MD_101, PF17MD_110, PF17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF16MD_000, PF16MD_001, PF16MD_010, PF16MD_011,
+ PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
+ PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
+ PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF13MD_000, PF13MD_001, PF13MD_010, PF13MD_011,
+ PF13MD_100, PF13MD_101, PF13MD_110, PF13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF8MD_000, PF8MD_001, PF8MD_010, PF8MD_011,
+ PF8MD_100, PF8MD_101, PF8MD_110, PF8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF23_IN, PF23_OUT,
+ PF22_IN, PF22_OUT,
+ PF21_IN, PF21_OUT,
+ PF20_IN, PF20_OUT,
+ PF19_IN, PF19_OUT,
+ PF18_IN, PF18_OUT,
+ PF17_IN, PF17_OUT,
+ PF16_IN, PF16_OUT }
+ },
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ PF15_IN, PF15_OUT,
+ PF14_IN, PF14_OUT,
+ PF13_IN, PF13_OUT,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ PG27MD_00, PG27MD_01, PG27MD_10, PG27MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG26MD_00, PG26MD_01, PG26MD_10, PG26MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG25MD_00, PG25MD_01, PG25MD_10, PG25MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ PG23MD_000, PG23MD_001, PG23MD_010, PG23MD_011,
+ PG23MD_100, PG23MD_101, PG23MD_110, PG23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG22MD_000, PG22MD_001, PG22MD_010, PG22MD_011,
+ PG22MD_100, PG22MD_101, PG22MD_110, PG22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG21MD_000, PG21MD_001, PG21MD_010, PG21MD_011,
+ PG21MD_100, PG21MD_101, PG21MD_110, PG21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG17MD_00, PG17MD_01, PG17MD_10, PG17MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG16MD_00, PG16MD_01, PG16MD_10, PG16MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ PG15MD_00, PG15MD_01, PG15MD_10, PG15MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG14MD_00, PG14MD_01, PG14MD_10, PG14MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG13MD_00, PG13MD_01, PG13MD_10, PG13MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG12MD_00, PG12MD_01, PG12MD_10, PG12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ PG7MD_000, PG7MD_001, PG7MD_010, PG7MD_011,
+ PG7MD_100, PG7MD_101, PG7MD_110, PG7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG6MD_000, PG6MD_001, PG6MD_010, PG6MD_011,
+ PG6MD_100, PG6MD_101, PG6MD_110, PG6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG5MD_000, PG5MD_001, PG5MD_010, PG5MD_011,
+ PG5MD_100, PG5MD_101, PG5MD_110, PG5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG4MD_000, PG4MD_001, PG4MD_010, PG4MD_011,
+ PG4MD_100, PG4MD_101, PG4MD_110, PG4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ PG3MD_000, PG3MD_001, PG3MD_010, PG3MD_011,
+ PG3MD_100, PG3MD_101, PG3MD_110, PG3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG2MD_000, PG2MD_001, PG2MD_010, PG2MD_011,
+ PG2MD_100, PG2MD_101, PG2MD_110, PG2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG1MD_000, PG1MD_001, PG1MD_010, PG1MD_011,
+ PG1MD_100, PG1MD_101, PG1MD_110, PG1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG27_IN, PG27_OUT,
+ PG26_IN, PG26_OUT,
+ PG25_IN, PG25_OUT,
+ PG24_IN, PG24_OUT,
+ PG23_IN, PG23_OUT,
+ PG22_IN, PG22_OUT,
+ PG21_IN, PG21_OUT,
+ PG20_IN, PG20_OUT,
+ PG19_IN, PG19_OUT,
+ PG18_IN, PG18_OUT,
+ PG17_IN, PG17_OUT,
+ PG16_IN, PG16_OUT }
+ },
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ PG15_IN, PG15_OUT,
+ PG14_IN, PG14_OUT,
+ PG13_IN, PG13_OUT,
+ PG12_IN, PG12_OUT,
+ PG11_IN, PG11_OUT,
+ PG10_IN, PG10_OUT,
+ PG9_IN, PG9_OUT,
+ PG8_IN, PG8_OUT,
+ PG7_IN, PG7_OUT,
+ PG6_IN, PG6_OUT,
+ PG5_IN, PG5_OUT,
+ PG4_IN, PG4_OUT,
+ PG3_IN, PG3_OUT,
+ PG2_IN, PG2_OUT,
+ PG1_IN, PG1_OUT,
+ PG0_IN, PG0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ PH7MD_00, PH7MD_01, PH7MD_10, PH7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH6MD_00, PH6MD_01, PH6MD_10, PH6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH5MD_00, PH5MD_01, PH5MD_10, PH5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH4MD_00, PH4MD_01, PH4MD_10, PH4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ PH3MD_00, PH3MD_01, PH3MD_10, PH3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH2MD_00, PH2MD_01, PH2MD_10, PH2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH1MD_00, PH1MD_01, PH1MD_10, PH1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH0MD_00, PH0MD_01, PH0MD_10, PH0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJCR7", 0xfffe3900, 16, 4) {
+ PJ31MD_0, PJ31MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ30MD_000, PJ30MD_001, PJ30MD_010, PJ30MD_011,
+ PJ30MD_100, PJ30MD_101, PJ30MD_110, PJ30MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ29MD_000, PJ29MD_001, PJ29MD_010, PJ29MD_011,
+ PJ29MD_100, PJ29MD_101, PJ29MD_110, PJ29MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ28MD_000, PJ28MD_001, PJ28MD_010, PJ28MD_011,
+ PJ28MD_100, PJ28MD_101, PJ28MD_110, PJ28MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR6", 0xfffe3902, 16, 4) {
+ PJ27MD_000, PJ27MD_001, PJ27MD_010, PJ27MD_011,
+ PJ27MD_100, PJ27MD_101, PJ27MD_110, PJ27MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ26MD_000, PJ26MD_001, PJ26MD_010, PJ26MD_011,
+ PJ26MD_100, PJ26MD_101, PJ26MD_110, PJ26MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ25MD_000, PJ25MD_001, PJ25MD_010, PJ25MD_011,
+ PJ25MD_100, PJ25MD_101, PJ25MD_110, PJ25MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ24MD_000, PJ24MD_001, PJ24MD_010, PJ24MD_011,
+ PJ24MD_100, PJ24MD_101, PJ24MD_110, PJ24MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR5", 0xfffe3904, 16, 4) {
+ PJ23MD_000, PJ23MD_001, PJ23MD_010, PJ23MD_011,
+ PJ23MD_100, PJ23MD_101, PJ23MD_110, PJ23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ22MD_000, PJ22MD_001, PJ22MD_010, PJ22MD_011,
+ PJ22MD_100, PJ22MD_101, PJ22MD_110, PJ22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ21MD_000, PJ21MD_001, PJ21MD_010, PJ21MD_011,
+ PJ21MD_100, PJ21MD_101, PJ21MD_110, PJ21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ20MD_000, PJ20MD_001, PJ20MD_010, PJ20MD_011,
+ PJ20MD_100, PJ20MD_101, PJ20MD_110, PJ20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR4", 0xfffe3906, 16, 4) {
+ PJ19MD_000, PJ19MD_001, PJ19MD_010, PJ19MD_011,
+ PJ19MD_100, PJ19MD_101, PJ19MD_110, PJ19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ18MD_000, PJ18MD_001, PJ18MD_010, PJ18MD_011,
+ PJ18MD_100, PJ18MD_101, PJ18MD_110, PJ18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ17MD_000, PJ17MD_001, PJ17MD_010, PJ17MD_011,
+ PJ17MD_100, PJ17MD_101, PJ17MD_110, PJ17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ16MD_000, PJ16MD_001, PJ16MD_010, PJ16MD_011,
+ PJ16MD_100, PJ16MD_101, PJ16MD_110, PJ16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR3", 0xfffe3908, 16, 4) {
+ PJ15MD_000, PJ15MD_001, PJ15MD_010, PJ15MD_011,
+ PJ15MD_100, PJ15MD_101, PJ15MD_110, PJ15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ14MD_000, PJ14MD_001, PJ14MD_010, PJ14MD_011,
+ PJ14MD_100, PJ14MD_101, PJ14MD_110, PJ14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ13MD_000, PJ13MD_001, PJ13MD_010, PJ13MD_011,
+ PJ13MD_100, PJ13MD_101, PJ13MD_110, PJ13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ12MD_000, PJ12MD_001, PJ12MD_010, PJ12MD_011,
+ PJ12MD_100, PJ12MD_101, PJ12MD_110, PJ12MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ PJ11MD_000, PJ11MD_001, PJ11MD_010, PJ11MD_011,
+ PJ11MD_100, PJ11MD_101, PJ11MD_110, PJ11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ10MD_000, PJ10MD_001, PJ10MD_010, PJ10MD_011,
+ PJ10MD_100, PJ10MD_101, PJ10MD_110, PJ10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ9MD_000, PJ9MD_001, PJ9MD_010, PJ9MD_011,
+ PJ9MD_100, PJ9MD_101, PJ9MD_110, PJ9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ8MD_000, PJ8MD_001, PJ8MD_010, PJ8MD_011,
+ PJ8MD_100, PJ8MD_101, PJ8MD_110, PJ8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ PJ7MD_000, PJ7MD_001, PJ7MD_010, PJ7MD_011,
+ PJ7MD_100, PJ7MD_101, PJ7MD_110, PJ7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ6MD_000, PJ6MD_001, PJ6MD_010, PJ6MD_011,
+ PJ6MD_100, PJ6MD_101, PJ6MD_110, PJ6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ5MD_000, PJ5MD_001, PJ5MD_010, PJ5MD_011,
+ PJ5MD_100, PJ5MD_101, PJ5MD_110, PJ5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ4MD_000, PJ4MD_001, PJ4MD_010, PJ4MD_011,
+ PJ4MD_100, PJ4MD_101, PJ4MD_110, PJ4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ PJ3MD_000, PJ3MD_001, PJ3MD_010, PJ3MD_011,
+ PJ3MD_100, PJ3MD_101, PJ3MD_110, PJ3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJIOR1", 0xfffe3910, 16, 1) {
+ PJ31_IN, PJ31_OUT,
+ PJ30_IN, PJ30_OUT,
+ PJ29_IN, PJ29_OUT,
+ PJ28_IN, PJ28_OUT,
+ PJ27_IN, PJ27_OUT,
+ PJ26_IN, PJ26_OUT,
+ PJ25_IN, PJ25_OUT,
+ PJ24_IN, PJ24_OUT,
+ PJ23_IN, PJ23_OUT,
+ PJ22_IN, PJ22_OUT,
+ PJ21_IN, PJ21_OUT,
+ PJ20_IN, PJ20_OUT,
+ PJ19_IN, PJ19_OUT,
+ PJ18_IN, PJ18_OUT,
+ PJ17_IN, PJ17_OUT,
+ PJ16_IN, PJ16_OUT }
+ },
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ PJ15_IN, PJ15_OUT,
+ PJ14_IN, PJ14_OUT,
+ PJ13_IN, PJ13_OUT,
+ PJ12_IN, PJ12_OUT,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA1_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ },
+
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ 0, 0, 0, 0,
+ 0, 0, 0, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PFDR1", 0xfffe38b4, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ 0, 0, 0, 0,
+ PG27_DATA, PG26_DATA, PG25_DATA, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PJDR1", 0xfffe3914, 16) {
+ PJ31_DATA, PJ30_DATA, PJ29_DATA, PJ28_DATA,
+ PJ27_DATA, PJ26_DATA, PJ25_DATA, PJ24_DATA,
+ PJ23_DATA, PJ22_DATA, PJ21_DATA, PJ20_DATA,
+ PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ PJ15_DATA, PJ14_DATA, PJ13_DATA, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+
+ { }
+};
+
+struct sh_pfc_soc_info sh7269_pinmux_info = {
+ .name = "sh7269_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA1,
+ .last_gpio = GPIO_FN_LCD_M_DISP,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
new file mode 100644
index 000000000000..d44e7f02069b
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -0,0 +1,1658 @@
+/*
+ * sh7372 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on
+ * sh7367 processor support - PFC hardware block
+ * Copyright (C) 2010 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/irqs.h>
+#include <mach/sh7372.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
+ PORT_10(fn, pfx##10, sfx), PORT_10(fn, pfx##11, sfx), \
+ PORT_10(fn, pfx##12, sfx), PORT_10(fn, pfx##13, sfx), \
+ PORT_10(fn, pfx##14, sfx), PORT_10(fn, pfx##15, sfx), \
+ PORT_10(fn, pfx##16, sfx), PORT_10(fn, pfx##17, sfx), \
+ PORT_10(fn, pfx##18, sfx), PORT_1(fn, pfx##190, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ /* PORT0_DATA -> PORT190_DATA */
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ /* PORT0_IN -> PORT190_IN */
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN),
+ PINMUX_INPUT_END,
+
+ /* PORT0_IN_PU -> PORT190_IN_PU */
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU),
+ PINMUX_INPUT_PULLUP_END,
+
+ /* PORT0_IN_PD -> PORT190_IN_PD */
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD),
+ PINMUX_INPUT_PULLDOWN_END,
+
+ /* PORT0_OUT -> PORT190_OUT */
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT),
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT190_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT190_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT190_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT190_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT190_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT190_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT190_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT190_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT190_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT190_FN7 */
+
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_8_0, MSEL1CR_8_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ MSEL3CR_9_0, MSEL3CR_9_1,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ /* IRQ */
+ IRQ0_6_MARK, IRQ0_162_MARK, IRQ1_MARK, IRQ2_4_MARK,
+ IRQ2_5_MARK, IRQ3_8_MARK, IRQ3_16_MARK, IRQ4_17_MARK,
+ IRQ4_163_MARK, IRQ5_MARK, IRQ6_39_MARK, IRQ6_164_MARK,
+ IRQ7_40_MARK, IRQ7_167_MARK, IRQ8_41_MARK, IRQ8_168_MARK,
+ IRQ9_42_MARK, IRQ9_169_MARK, IRQ10_MARK, IRQ11_MARK,
+ IRQ12_80_MARK, IRQ12_137_MARK, IRQ13_81_MARK, IRQ13_145_MARK,
+ IRQ14_82_MARK, IRQ14_146_MARK, IRQ15_83_MARK, IRQ15_147_MARK,
+ IRQ16_84_MARK, IRQ16_170_MARK, IRQ17_MARK, IRQ18_MARK,
+ IRQ19_MARK, IRQ20_MARK, IRQ21_MARK, IRQ22_MARK,
+ IRQ23_MARK, IRQ24_MARK, IRQ25_MARK, IRQ26_121_MARK,
+ IRQ26_172_MARK, IRQ27_122_MARK, IRQ27_180_MARK, IRQ28_123_MARK,
+ IRQ28_181_MARK, IRQ29_129_MARK, IRQ29_182_MARK, IRQ30_130_MARK,
+ IRQ30_183_MARK, IRQ31_138_MARK, IRQ31_184_MARK,
+
+ /* MSIOF0 */
+ MSIOF0_TSYNC_MARK, MSIOF0_TSCK_MARK, MSIOF0_RXD_MARK,
+ MSIOF0_RSCK_MARK, MSIOF0_RSYNC_MARK, MSIOF0_MCK0_MARK,
+ MSIOF0_MCK1_MARK, MSIOF0_SS1_MARK, MSIOF0_SS2_MARK,
+ MSIOF0_TXD_MARK,
+
+ /* MSIOF1 */
+ MSIOF1_TSCK_39_MARK, MSIOF1_TSYNC_40_MARK,
+ MSIOF1_TSCK_88_MARK, MSIOF1_TSYNC_89_MARK,
+ MSIOF1_TXD_41_MARK, MSIOF1_RXD_42_MARK,
+ MSIOF1_TXD_90_MARK, MSIOF1_RXD_91_MARK,
+ MSIOF1_SS1_43_MARK, MSIOF1_SS2_44_MARK,
+ MSIOF1_SS1_92_MARK, MSIOF1_SS2_93_MARK,
+ MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
+
+ /* MSIOF2 */
+ MSIOF2_RSCK_MARK, MSIOF2_RSYNC_MARK, MSIOF2_MCK0_MARK,
+ MSIOF2_MCK1_MARK, MSIOF2_SS1_MARK, MSIOF2_SS2_MARK,
+ MSIOF2_TSYNC_MARK, MSIOF2_TSCK_MARK, MSIOF2_RXD_MARK,
+ MSIOF2_TXD_MARK,
+
+ /* BBIF1 */
+ BBIF1_RXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK,
+ BBIF1_TXD_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
+ BBIF1_FLOW_MARK, BB_RX_FLOW_N_MARK,
+
+ /* BBIF2 */
+ BBIF2_TSCK1_MARK, BBIF2_TSYNC1_MARK,
+ BBIF2_TXD1_MARK, BBIF2_RXD_MARK,
+
+ /* FSI */
+ FSIACK_MARK, FSIBCK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
+ FSIAISLD_MARK, FSIAOMC_MARK, FSIAOLR_MARK, FSIAOBT_MARK,
+ FSIAOSLD_MARK, FSIASPDIF_11_MARK, FSIASPDIF_15_MARK,
+
+ /* FMSI */
+ FMSOCK_MARK, FMSOOLR_MARK, FMSIOLR_MARK, FMSOOBT_MARK,
+ FMSIOBT_MARK, FMSOSLD_MARK, FMSOILR_MARK, FMSIILR_MARK,
+ FMSOIBT_MARK, FMSIIBT_MARK, FMSISLD_MARK, FMSICK_MARK,
+
+ /* SCIFA0 */
+ SCIFA0_TXD_MARK, SCIFA0_RXD_MARK, SCIFA0_SCK_MARK,
+ SCIFA0_RTS_MARK, SCIFA0_CTS_MARK,
+
+ /* SCIFA1 */
+ SCIFA1_TXD_MARK, SCIFA1_RXD_MARK, SCIFA1_SCK_MARK,
+ SCIFA1_RTS_MARK, SCIFA1_CTS_MARK,
+
+ /* SCIFA2 */
+ SCIFA2_CTS1_MARK, SCIFA2_RTS1_MARK, SCIFA2_TXD1_MARK,
+ SCIFA2_RXD1_MARK, SCIFA2_SCK1_MARK,
+
+ /* SCIFA3 */
+ SCIFA3_CTS_43_MARK, SCIFA3_CTS_140_MARK, SCIFA3_RTS_44_MARK,
+ SCIFA3_RTS_141_MARK, SCIFA3_SCK_MARK, SCIFA3_TXD_MARK,
+ SCIFA3_RXD_MARK,
+
+ /* SCIFA4 */
+ SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
+
+ /* SCIFA5 */
+ SCIFA5_RXD_MARK, SCIFA5_TXD_MARK,
+
+ /* SCIFB */
+ SCIFB_SCK_MARK, SCIFB_RTS_MARK, SCIFB_CTS_MARK,
+ SCIFB_TXD_MARK, SCIFB_RXD_MARK,
+
+ /* CEU */
+ VIO_HD_MARK, VIO_CKO1_MARK, VIO_CKO2_MARK, VIO_VD_MARK,
+ VIO_CLK_MARK, VIO_FIELD_MARK, VIO_CKO_MARK,
+ VIO_D0_MARK, VIO_D1_MARK, VIO_D2_MARK, VIO_D3_MARK,
+ VIO_D4_MARK, VIO_D5_MARK, VIO_D6_MARK, VIO_D7_MARK,
+ VIO_D8_MARK, VIO_D9_MARK, VIO_D10_MARK, VIO_D11_MARK,
+ VIO_D12_MARK, VIO_D13_MARK, VIO_D14_MARK, VIO_D15_MARK,
+
+ /* USB0 */
+ IDIN_0_MARK, EXTLP_0_MARK, OVCN2_0_MARK, PWEN_0_MARK,
+ OVCN_0_MARK, VBUS0_0_MARK,
+
+ /* USB1 */
+ IDIN_1_18_MARK, IDIN_1_113_MARK,
+ PWEN_1_115_MARK, PWEN_1_138_MARK,
+ OVCN_1_114_MARK, OVCN_1_162_MARK,
+ EXTLP_1_MARK, OVCN2_1_MARK,
+ VBUS0_1_MARK,
+
+ /* GPIO */
+ GPI0_MARK, GPI1_MARK, GPO0_MARK, GPO1_MARK,
+
+ /* BSC */
+ BS_MARK, WE1_MARK,
+ CKO_MARK, WAIT_MARK, RDWR_MARK,
+
+ A0_MARK, A1_MARK, A2_MARK, A3_MARK,
+ A6_MARK, A7_MARK, A8_MARK, A9_MARK,
+ A10_MARK, A11_MARK, A12_MARK, A13_MARK,
+ A14_MARK, A15_MARK, A16_MARK, A17_MARK,
+ A18_MARK, A19_MARK, A20_MARK, A21_MARK,
+ A22_MARK, A23_MARK, A24_MARK, A25_MARK,
+ A26_MARK,
+
+ CS0_MARK, CS2_MARK, CS4_MARK,
+ CS5A_MARK, CS5B_MARK, CS6A_MARK,
+
+ /* BSC/FLCTL */
+ RD_FSC_MARK, WE0_FWE_MARK, A4_FOE_MARK, A5_FCDE_MARK,
+ D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
+ D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
+ D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
+ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
+
+ /* MMCIF(1) */
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+ MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
+ MMCCMD0_MARK, MMCCLK0_MARK,
+
+ /* MMCIF(2) */
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+ MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
+ MMCCLK1_MARK, MMCCMD1_MARK,
+
+ /* SPU2 */
+ VINT_I_MARK,
+
+ /* FLCTL */
+ FCE1_MARK, FCE0_MARK, FRB_MARK,
+
+ /* HSI */
+ GP_RX_FLAG_MARK, GP_RX_DATA_MARK, GP_TX_READY_MARK,
+ GP_RX_WAKE_MARK, MP_TX_FLAG_MARK, MP_TX_DATA_MARK,
+ MP_RX_READY_MARK, MP_TX_WAKE_MARK,
+
+ /* MFI */
+ MFIv6_MARK,
+ MFIv4_MARK,
+
+ MEMC_CS0_MARK, MEMC_BUSCLK_MEMC_A0_MARK,
+ MEMC_CS1_MEMC_A1_MARK, MEMC_ADV_MEMC_DREQ0_MARK,
+ MEMC_WAIT_MEMC_DREQ1_MARK, MEMC_NOE_MARK,
+ MEMC_NWE_MARK, MEMC_INT_MARK,
+
+ MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK,
+ MEMC_AD3_MARK, MEMC_AD4_MARK, MEMC_AD5_MARK,
+ MEMC_AD6_MARK, MEMC_AD7_MARK, MEMC_AD8_MARK,
+ MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
+ MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK,
+ MEMC_AD15_MARK,
+
+ /* SIM */
+ SIM_RST_MARK, SIM_CLK_MARK, SIM_D_MARK,
+
+ /* TPU */
+ TPU0TO0_MARK, TPU0TO1_MARK,
+ TPU0TO2_93_MARK, TPU0TO2_99_MARK,
+ TPU0TO3_MARK,
+
+ /* I2C2 */
+ I2C_SCL2_MARK, I2C_SDA2_MARK,
+
+ /* I2C3(1) */
+ I2C_SCL3_MARK, I2C_SDA3_MARK,
+
+ /* I2C3(2) */
+ I2C_SCL3S_MARK, I2C_SDA3S_MARK,
+
+ /* I2C4(2) */
+ I2C_SCL4_MARK, I2C_SDA4_MARK,
+
+ /* I2C4(2) */
+ I2C_SCL4S_MARK, I2C_SDA4S_MARK,
+
+ /* KEYSC */
+ KEYOUT0_MARK, KEYIN0_121_MARK, KEYIN0_136_MARK,
+ KEYOUT1_MARK, KEYIN1_122_MARK, KEYIN1_135_MARK,
+ KEYOUT2_MARK, KEYIN2_123_MARK, KEYIN2_134_MARK,
+ KEYOUT3_MARK, KEYIN3_124_MARK, KEYIN3_133_MARK,
+ KEYOUT4_MARK, KEYIN4_MARK,
+ KEYOUT5_MARK, KEYIN5_MARK,
+ KEYOUT6_MARK, KEYIN6_MARK,
+ KEYOUT7_MARK, KEYIN7_MARK,
+
+ /* LCDC */
+ LCDC0_SELECT_MARK,
+ LCDC1_SELECT_MARK,
+ LCDHSYN_MARK, LCDCS_MARK, LCDVSYN_MARK, LCDDCK_MARK,
+ LCDWR_MARK, LCDRD_MARK, LCDDISP_MARK, LCDRS_MARK,
+ LCDLCLK_MARK, LCDDON_MARK,
+
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+ LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
+ LCDD16_MARK, LCDD17_MARK, LCDD18_MARK, LCDD19_MARK,
+ LCDD20_MARK, LCDD21_MARK, LCDD22_MARK, LCDD23_MARK,
+
+ /* IRDA */
+ IRDA_OUT_MARK, IRDA_IN_MARK, IRDA_FIRSEL_MARK,
+ IROUT_139_MARK, IROUT_140_MARK,
+
+ /* TSIF1 */
+ TS0_1SELECT_MARK,
+ TS0_2SELECT_MARK,
+ TS1_1SELECT_MARK,
+ TS1_2SELECT_MARK,
+
+ TS_SPSYNC1_MARK, TS_SDAT1_MARK,
+ TS_SDEN1_MARK, TS_SCK1_MARK,
+
+ /* TSIF2 */
+ TS_SPSYNC2_MARK, TS_SDAT2_MARK,
+ TS_SDEN2_MARK, TS_SCK2_MARK,
+
+ /* HDMI */
+ HDMI_HPD_MARK, HDMI_CEC_MARK,
+
+ /* SDHI0 */
+ SDHICLK0_MARK, SDHICD0_MARK,
+ SDHICMD0_MARK, SDHIWP0_MARK,
+ SDHID0_0_MARK, SDHID0_1_MARK,
+ SDHID0_2_MARK, SDHID0_3_MARK,
+
+ /* SDHI1 */
+ SDHICLK1_MARK, SDHICMD1_MARK, SDHID1_0_MARK,
+ SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
+
+ /* SDHI2 */
+ SDHICLK2_MARK, SDHICMD2_MARK, SDHID2_0_MARK,
+ SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
+
+ /* SDENC */
+ SDENC_CPG_MARK,
+ SDENC_DV_CLKI_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* specify valid pin states for each pin in GPIO mode */
+ PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
+ PORT_DATA_O(2), PORT_DATA_I_PD(3),
+ PORT_DATA_I_PD(4), PORT_DATA_I_PD(5),
+ PORT_DATA_IO_PU_PD(6), PORT_DATA_I_PD(7),
+ PORT_DATA_IO_PD(8), PORT_DATA_O(9),
+
+ PORT_DATA_O(10), PORT_DATA_O(11),
+ PORT_DATA_IO_PU_PD(12), PORT_DATA_IO_PD(13),
+ PORT_DATA_IO_PD(14), PORT_DATA_O(15),
+ PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
+ PORT_DATA_I_PD(18), PORT_DATA_IO(19),
+
+ PORT_DATA_IO(20), PORT_DATA_IO(21),
+ PORT_DATA_IO(22), PORT_DATA_IO(23),
+ PORT_DATA_IO(24), PORT_DATA_IO(25),
+ PORT_DATA_IO(26), PORT_DATA_IO(27),
+ PORT_DATA_IO(28), PORT_DATA_IO(29),
+
+ PORT_DATA_IO(30), PORT_DATA_IO(31),
+ PORT_DATA_IO(32), PORT_DATA_IO(33),
+ PORT_DATA_IO(34), PORT_DATA_IO(35),
+ PORT_DATA_IO(36), PORT_DATA_IO(37),
+ PORT_DATA_IO(38), PORT_DATA_IO(39),
+
+ PORT_DATA_IO(40), PORT_DATA_IO(41),
+ PORT_DATA_IO(42), PORT_DATA_IO(43),
+ PORT_DATA_IO(44), PORT_DATA_IO(45),
+ PORT_DATA_IO_PU(46), PORT_DATA_IO_PU(47),
+ PORT_DATA_IO_PU(48), PORT_DATA_IO_PU(49),
+
+ PORT_DATA_IO_PU(50), PORT_DATA_IO_PU(51),
+ PORT_DATA_IO_PU(52), PORT_DATA_IO_PU(53),
+ PORT_DATA_IO_PU(54), PORT_DATA_IO_PU(55),
+ PORT_DATA_IO_PU(56), PORT_DATA_IO_PU(57),
+ PORT_DATA_IO_PU(58), PORT_DATA_IO_PU(59),
+
+ PORT_DATA_IO_PU(60), PORT_DATA_IO_PU(61),
+ PORT_DATA_IO(62), PORT_DATA_O(63),
+ PORT_DATA_O(64), PORT_DATA_IO_PU(65),
+ PORT_DATA_O(66), PORT_DATA_IO_PU(67), /*66?*/
+ PORT_DATA_O(68), PORT_DATA_IO(69),
+
+ PORT_DATA_IO(70), PORT_DATA_IO(71),
+ PORT_DATA_O(72), PORT_DATA_I_PU(73),
+ PORT_DATA_I_PU_PD(74), PORT_DATA_IO_PU_PD(75),
+ PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
+ PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
+
+ PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
+ PORT_DATA_IO_PU_PD(82), PORT_DATA_IO_PU_PD(83),
+ PORT_DATA_IO_PU_PD(84), PORT_DATA_IO_PU_PD(85),
+ PORT_DATA_IO_PU_PD(86), PORT_DATA_IO_PU_PD(87),
+ PORT_DATA_IO_PU_PD(88), PORT_DATA_IO_PU_PD(89),
+
+ PORT_DATA_IO_PU_PD(90), PORT_DATA_IO_PU_PD(91),
+ PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
+ PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
+ PORT_DATA_IO_PU(96), PORT_DATA_IO_PU_PD(97),
+ PORT_DATA_IO_PU_PD(98), PORT_DATA_O(99), /*99?*/
+
+ PORT_DATA_IO_PD(100), PORT_DATA_IO_PD(101),
+ PORT_DATA_IO_PD(102), PORT_DATA_IO_PD(103),
+ PORT_DATA_IO_PD(104), PORT_DATA_IO_PD(105),
+ PORT_DATA_IO_PU(106), PORT_DATA_IO_PU(107),
+ PORT_DATA_IO_PU(108), PORT_DATA_IO_PU(109),
+
+ PORT_DATA_IO_PU(110), PORT_DATA_IO_PU(111),
+ PORT_DATA_IO_PD(112), PORT_DATA_IO_PD(113),
+ PORT_DATA_IO_PU(114), PORT_DATA_IO_PU(115),
+ PORT_DATA_IO_PU(116), PORT_DATA_IO_PU(117),
+ PORT_DATA_IO_PU(118), PORT_DATA_IO_PU(119),
+
+ PORT_DATA_IO_PU(120), PORT_DATA_IO_PD(121),
+ PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
+ PORT_DATA_IO_PD(124), PORT_DATA_IO_PD(125),
+ PORT_DATA_IO_PD(126), PORT_DATA_IO_PD(127),
+ PORT_DATA_IO_PD(128), PORT_DATA_IO_PU_PD(129),
+
+ PORT_DATA_IO_PU_PD(130), PORT_DATA_IO_PU_PD(131),
+ PORT_DATA_IO_PU_PD(132), PORT_DATA_IO_PU_PD(133),
+ PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135),
+ PORT_DATA_IO_PD(136), PORT_DATA_IO_PD(137),
+ PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139),
+
+ PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141),
+ PORT_DATA_IO_PD(142), PORT_DATA_IO_PU_PD(143),
+ PORT_DATA_IO_PD(144), PORT_DATA_IO_PD(145),
+ PORT_DATA_IO_PD(146), PORT_DATA_IO_PD(147),
+ PORT_DATA_IO_PD(148), PORT_DATA_IO_PD(149),
+
+ PORT_DATA_IO_PD(150), PORT_DATA_IO_PD(151),
+ PORT_DATA_IO_PU_PD(152), PORT_DATA_I_PD(153),
+ PORT_DATA_IO_PU_PD(154), PORT_DATA_I_PD(155),
+ PORT_DATA_IO_PD(156), PORT_DATA_IO_PD(157),
+ PORT_DATA_I_PD(158), PORT_DATA_IO_PD(159),
+
+ PORT_DATA_O(160), PORT_DATA_IO_PD(161),
+ PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
+ PORT_DATA_I_PD(164), PORT_DATA_IO_PD(165),
+ PORT_DATA_I_PD(166), PORT_DATA_I_PD(167),
+ PORT_DATA_I_PD(168), PORT_DATA_I_PD(169),
+
+ PORT_DATA_I_PD(170), PORT_DATA_O(171),
+ PORT_DATA_IO_PU_PD(172), PORT_DATA_IO_PU_PD(173),
+ PORT_DATA_IO_PU_PD(174), PORT_DATA_IO_PU_PD(175),
+ PORT_DATA_IO_PU_PD(176), PORT_DATA_IO_PU_PD(177),
+ PORT_DATA_IO_PU_PD(178), PORT_DATA_O(179),
+
+ PORT_DATA_IO_PU_PD(180), PORT_DATA_IO_PU_PD(181),
+ PORT_DATA_IO_PU_PD(182), PORT_DATA_IO_PU_PD(183),
+ PORT_DATA_IO_PU_PD(184), PORT_DATA_O(185),
+ PORT_DATA_IO_PU_PD(186), PORT_DATA_IO_PU_PD(187),
+ PORT_DATA_IO_PU_PD(188), PORT_DATA_IO_PU_PD(189),
+
+ PORT_DATA_IO_PU_PD(190),
+
+ /* IRQ */
+ PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
+ PINMUX_DATA(IRQ0_162_MARK, PORT162_FN0, MSEL1CR_0_1),
+ PINMUX_DATA(IRQ1_MARK, PORT12_FN0),
+ PINMUX_DATA(IRQ2_4_MARK, PORT4_FN0, MSEL1CR_2_0),
+ PINMUX_DATA(IRQ2_5_MARK, PORT5_FN0, MSEL1CR_2_1),
+ PINMUX_DATA(IRQ3_8_MARK, PORT8_FN0, MSEL1CR_3_0),
+ PINMUX_DATA(IRQ3_16_MARK, PORT16_FN0, MSEL1CR_3_1),
+ PINMUX_DATA(IRQ4_17_MARK, PORT17_FN0, MSEL1CR_4_0),
+ PINMUX_DATA(IRQ4_163_MARK, PORT163_FN0, MSEL1CR_4_1),
+ PINMUX_DATA(IRQ5_MARK, PORT18_FN0),
+ PINMUX_DATA(IRQ6_39_MARK, PORT39_FN0, MSEL1CR_6_0),
+ PINMUX_DATA(IRQ6_164_MARK, PORT164_FN0, MSEL1CR_6_1),
+ PINMUX_DATA(IRQ7_40_MARK, PORT40_FN0, MSEL1CR_7_1),
+ PINMUX_DATA(IRQ7_167_MARK, PORT167_FN0, MSEL1CR_7_0),
+ PINMUX_DATA(IRQ8_41_MARK, PORT41_FN0, MSEL1CR_8_1),
+ PINMUX_DATA(IRQ8_168_MARK, PORT168_FN0, MSEL1CR_8_0),
+ PINMUX_DATA(IRQ9_42_MARK, PORT42_FN0, MSEL1CR_9_0),
+ PINMUX_DATA(IRQ9_169_MARK, PORT169_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(IRQ10_MARK, PORT65_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(IRQ11_MARK, PORT67_FN0),
+ PINMUX_DATA(IRQ12_80_MARK, PORT80_FN0, MSEL1CR_12_0),
+ PINMUX_DATA(IRQ12_137_MARK, PORT137_FN0, MSEL1CR_12_1),
+ PINMUX_DATA(IRQ13_81_MARK, PORT81_FN0, MSEL1CR_13_0),
+ PINMUX_DATA(IRQ13_145_MARK, PORT145_FN0, MSEL1CR_13_1),
+ PINMUX_DATA(IRQ14_82_MARK, PORT82_FN0, MSEL1CR_14_0),
+ PINMUX_DATA(IRQ14_146_MARK, PORT146_FN0, MSEL1CR_14_1),
+ PINMUX_DATA(IRQ15_83_MARK, PORT83_FN0, MSEL1CR_15_0),
+ PINMUX_DATA(IRQ15_147_MARK, PORT147_FN0, MSEL1CR_15_1),
+ PINMUX_DATA(IRQ16_84_MARK, PORT84_FN0, MSEL1CR_16_0),
+ PINMUX_DATA(IRQ16_170_MARK, PORT170_FN0, MSEL1CR_16_1),
+ PINMUX_DATA(IRQ17_MARK, PORT85_FN0),
+ PINMUX_DATA(IRQ18_MARK, PORT86_FN0),
+ PINMUX_DATA(IRQ19_MARK, PORT87_FN0),
+ PINMUX_DATA(IRQ20_MARK, PORT92_FN0),
+ PINMUX_DATA(IRQ21_MARK, PORT93_FN0),
+ PINMUX_DATA(IRQ22_MARK, PORT94_FN0),
+ PINMUX_DATA(IRQ23_MARK, PORT95_FN0),
+ PINMUX_DATA(IRQ24_MARK, PORT112_FN0),
+ PINMUX_DATA(IRQ25_MARK, PORT119_FN0),
+ PINMUX_DATA(IRQ26_121_MARK, PORT121_FN0, MSEL1CR_26_1),
+ PINMUX_DATA(IRQ26_172_MARK, PORT172_FN0, MSEL1CR_26_0),
+ PINMUX_DATA(IRQ27_122_MARK, PORT122_FN0, MSEL1CR_27_1),
+ PINMUX_DATA(IRQ27_180_MARK, PORT180_FN0, MSEL1CR_27_0),
+ PINMUX_DATA(IRQ28_123_MARK, PORT123_FN0, MSEL1CR_28_1),
+ PINMUX_DATA(IRQ28_181_MARK, PORT181_FN0, MSEL1CR_28_0),
+ PINMUX_DATA(IRQ29_129_MARK, PORT129_FN0, MSEL1CR_29_1),
+ PINMUX_DATA(IRQ29_182_MARK, PORT182_FN0, MSEL1CR_29_0),
+ PINMUX_DATA(IRQ30_130_MARK, PORT130_FN0, MSEL1CR_30_1),
+ PINMUX_DATA(IRQ30_183_MARK, PORT183_FN0, MSEL1CR_30_0),
+ PINMUX_DATA(IRQ31_138_MARK, PORT138_FN0, MSEL1CR_31_1),
+ PINMUX_DATA(IRQ31_184_MARK, PORT184_FN0, MSEL1CR_31_0),
+
+ /* Function 1 */
+ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT0_FN1),
+ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT1_FN1),
+ PINMUX_DATA(BBIF2_TXD1_MARK, PORT2_FN1),
+ PINMUX_DATA(BBIF2_RXD_MARK, PORT3_FN1),
+ PINMUX_DATA(FSIACK_MARK, PORT4_FN1),
+ PINMUX_DATA(FSIAILR_MARK, PORT5_FN1),
+ PINMUX_DATA(FSIAIBT_MARK, PORT6_FN1),
+ PINMUX_DATA(FSIAISLD_MARK, PORT7_FN1),
+ PINMUX_DATA(FSIAOMC_MARK, PORT8_FN1),
+ PINMUX_DATA(FSIAOLR_MARK, PORT9_FN1),
+ PINMUX_DATA(FSIAOBT_MARK, PORT10_FN1),
+ PINMUX_DATA(FSIAOSLD_MARK, PORT11_FN1),
+ PINMUX_DATA(FMSOCK_MARK, PORT12_FN1),
+ PINMUX_DATA(FMSOOLR_MARK, PORT13_FN1),
+ PINMUX_DATA(FMSOOBT_MARK, PORT14_FN1),
+ PINMUX_DATA(FMSOSLD_MARK, PORT15_FN1),
+ PINMUX_DATA(FMSOILR_MARK, PORT16_FN1),
+ PINMUX_DATA(FMSOIBT_MARK, PORT17_FN1),
+ PINMUX_DATA(FMSISLD_MARK, PORT18_FN1),
+ PINMUX_DATA(A0_MARK, PORT19_FN1),
+ PINMUX_DATA(A1_MARK, PORT20_FN1),
+ PINMUX_DATA(A2_MARK, PORT21_FN1),
+ PINMUX_DATA(A3_MARK, PORT22_FN1),
+ PINMUX_DATA(A4_FOE_MARK, PORT23_FN1),
+ PINMUX_DATA(A5_FCDE_MARK, PORT24_FN1),
+ PINMUX_DATA(A6_MARK, PORT25_FN1),
+ PINMUX_DATA(A7_MARK, PORT26_FN1),
+ PINMUX_DATA(A8_MARK, PORT27_FN1),
+ PINMUX_DATA(A9_MARK, PORT28_FN1),
+ PINMUX_DATA(A10_MARK, PORT29_FN1),
+ PINMUX_DATA(A11_MARK, PORT30_FN1),
+ PINMUX_DATA(A12_MARK, PORT31_FN1),
+ PINMUX_DATA(A13_MARK, PORT32_FN1),
+ PINMUX_DATA(A14_MARK, PORT33_FN1),
+ PINMUX_DATA(A15_MARK, PORT34_FN1),
+ PINMUX_DATA(A16_MARK, PORT35_FN1),
+ PINMUX_DATA(A17_MARK, PORT36_FN1),
+ PINMUX_DATA(A18_MARK, PORT37_FN1),
+ PINMUX_DATA(A19_MARK, PORT38_FN1),
+ PINMUX_DATA(A20_MARK, PORT39_FN1),
+ PINMUX_DATA(A21_MARK, PORT40_FN1),
+ PINMUX_DATA(A22_MARK, PORT41_FN1),
+ PINMUX_DATA(A23_MARK, PORT42_FN1),
+ PINMUX_DATA(A24_MARK, PORT43_FN1),
+ PINMUX_DATA(A25_MARK, PORT44_FN1),
+ PINMUX_DATA(A26_MARK, PORT45_FN1),
+ PINMUX_DATA(D0_NAF0_MARK, PORT46_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT47_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT48_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT49_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT50_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT51_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT52_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT53_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT54_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT55_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT56_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT57_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT58_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT59_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT60_FN1),
+ PINMUX_DATA(D15_NAF15_MARK, PORT61_FN1),
+ PINMUX_DATA(CS0_MARK, PORT62_FN1),
+ PINMUX_DATA(CS2_MARK, PORT63_FN1),
+ PINMUX_DATA(CS4_MARK, PORT64_FN1),
+ PINMUX_DATA(CS5A_MARK, PORT65_FN1),
+ PINMUX_DATA(CS5B_MARK, PORT66_FN1),
+ PINMUX_DATA(CS6A_MARK, PORT67_FN1),
+ PINMUX_DATA(FCE0_MARK, PORT68_FN1),
+ PINMUX_DATA(RD_FSC_MARK, PORT69_FN1),
+ PINMUX_DATA(WE0_FWE_MARK, PORT70_FN1),
+ PINMUX_DATA(WE1_MARK, PORT71_FN1),
+ PINMUX_DATA(CKO_MARK, PORT72_FN1),
+ PINMUX_DATA(FRB_MARK, PORT73_FN1),
+ PINMUX_DATA(WAIT_MARK, PORT74_FN1),
+ PINMUX_DATA(RDWR_MARK, PORT75_FN1),
+ PINMUX_DATA(MEMC_AD0_MARK, PORT76_FN1),
+ PINMUX_DATA(MEMC_AD1_MARK, PORT77_FN1),
+ PINMUX_DATA(MEMC_AD2_MARK, PORT78_FN1),
+ PINMUX_DATA(MEMC_AD3_MARK, PORT79_FN1),
+ PINMUX_DATA(MEMC_AD4_MARK, PORT80_FN1),
+ PINMUX_DATA(MEMC_AD5_MARK, PORT81_FN1),
+ PINMUX_DATA(MEMC_AD6_MARK, PORT82_FN1),
+ PINMUX_DATA(MEMC_AD7_MARK, PORT83_FN1),
+ PINMUX_DATA(MEMC_AD8_MARK, PORT84_FN1),
+ PINMUX_DATA(MEMC_AD9_MARK, PORT85_FN1),
+ PINMUX_DATA(MEMC_AD10_MARK, PORT86_FN1),
+ PINMUX_DATA(MEMC_AD11_MARK, PORT87_FN1),
+ PINMUX_DATA(MEMC_AD12_MARK, PORT88_FN1),
+ PINMUX_DATA(MEMC_AD13_MARK, PORT89_FN1),
+ PINMUX_DATA(MEMC_AD14_MARK, PORT90_FN1),
+ PINMUX_DATA(MEMC_AD15_MARK, PORT91_FN1),
+ PINMUX_DATA(MEMC_CS0_MARK, PORT92_FN1),
+ PINMUX_DATA(MEMC_BUSCLK_MEMC_A0_MARK, PORT93_FN1),
+ PINMUX_DATA(MEMC_CS1_MEMC_A1_MARK, PORT94_FN1),
+ PINMUX_DATA(MEMC_ADV_MEMC_DREQ0_MARK, PORT95_FN1),
+ PINMUX_DATA(MEMC_WAIT_MEMC_DREQ1_MARK, PORT96_FN1),
+ PINMUX_DATA(MEMC_NOE_MARK, PORT97_FN1),
+ PINMUX_DATA(MEMC_NWE_MARK, PORT98_FN1),
+ PINMUX_DATA(MEMC_INT_MARK, PORT99_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT100_FN1),
+ PINMUX_DATA(VIO_HD_MARK, PORT101_FN1),
+ PINMUX_DATA(VIO_D0_MARK, PORT102_FN1),
+ PINMUX_DATA(VIO_D1_MARK, PORT103_FN1),
+ PINMUX_DATA(VIO_D2_MARK, PORT104_FN1),
+ PINMUX_DATA(VIO_D3_MARK, PORT105_FN1),
+ PINMUX_DATA(VIO_D4_MARK, PORT106_FN1),
+ PINMUX_DATA(VIO_D5_MARK, PORT107_FN1),
+ PINMUX_DATA(VIO_D6_MARK, PORT108_FN1),
+ PINMUX_DATA(VIO_D7_MARK, PORT109_FN1),
+ PINMUX_DATA(VIO_D8_MARK, PORT110_FN1),
+ PINMUX_DATA(VIO_D9_MARK, PORT111_FN1),
+ PINMUX_DATA(VIO_D10_MARK, PORT112_FN1),
+ PINMUX_DATA(VIO_D11_MARK, PORT113_FN1),
+ PINMUX_DATA(VIO_D12_MARK, PORT114_FN1),
+ PINMUX_DATA(VIO_D13_MARK, PORT115_FN1),
+ PINMUX_DATA(VIO_D14_MARK, PORT116_FN1),
+ PINMUX_DATA(VIO_D15_MARK, PORT117_FN1),
+ PINMUX_DATA(VIO_CLK_MARK, PORT118_FN1),
+ PINMUX_DATA(VIO_FIELD_MARK, PORT119_FN1),
+ PINMUX_DATA(VIO_CKO_MARK, PORT120_FN1),
+ PINMUX_DATA(LCDD0_MARK, PORT121_FN1),
+ PINMUX_DATA(LCDD1_MARK, PORT122_FN1),
+ PINMUX_DATA(LCDD2_MARK, PORT123_FN1),
+ PINMUX_DATA(LCDD3_MARK, PORT124_FN1),
+ PINMUX_DATA(LCDD4_MARK, PORT125_FN1),
+ PINMUX_DATA(LCDD5_MARK, PORT126_FN1),
+ PINMUX_DATA(LCDD6_MARK, PORT127_FN1),
+ PINMUX_DATA(LCDD7_MARK, PORT128_FN1),
+ PINMUX_DATA(LCDD8_MARK, PORT129_FN1),
+ PINMUX_DATA(LCDD9_MARK, PORT130_FN1),
+ PINMUX_DATA(LCDD10_MARK, PORT131_FN1),
+ PINMUX_DATA(LCDD11_MARK, PORT132_FN1),
+ PINMUX_DATA(LCDD12_MARK, PORT133_FN1),
+ PINMUX_DATA(LCDD13_MARK, PORT134_FN1),
+ PINMUX_DATA(LCDD14_MARK, PORT135_FN1),
+ PINMUX_DATA(LCDD15_MARK, PORT136_FN1),
+ PINMUX_DATA(LCDD16_MARK, PORT137_FN1),
+ PINMUX_DATA(LCDD17_MARK, PORT138_FN1),
+ PINMUX_DATA(LCDD18_MARK, PORT139_FN1),
+ PINMUX_DATA(LCDD19_MARK, PORT140_FN1),
+ PINMUX_DATA(LCDD20_MARK, PORT141_FN1),
+ PINMUX_DATA(LCDD21_MARK, PORT142_FN1),
+ PINMUX_DATA(LCDD22_MARK, PORT143_FN1),
+ PINMUX_DATA(LCDD23_MARK, PORT144_FN1),
+ PINMUX_DATA(LCDHSYN_MARK, PORT145_FN1),
+ PINMUX_DATA(LCDVSYN_MARK, PORT146_FN1),
+ PINMUX_DATA(LCDDCK_MARK, PORT147_FN1),
+ PINMUX_DATA(LCDRD_MARK, PORT148_FN1),
+ PINMUX_DATA(LCDDISP_MARK, PORT149_FN1),
+ PINMUX_DATA(LCDLCLK_MARK, PORT150_FN1),
+ PINMUX_DATA(LCDDON_MARK, PORT151_FN1),
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT152_FN1),
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT153_FN1),
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT154_FN1),
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT155_FN1),
+ PINMUX_DATA(TS_SPSYNC1_MARK, PORT156_FN1),
+ PINMUX_DATA(TS_SDAT1_MARK, PORT157_FN1),
+ PINMUX_DATA(TS_SDEN1_MARK, PORT158_FN1),
+ PINMUX_DATA(TS_SCK1_MARK, PORT159_FN1),
+ PINMUX_DATA(TPU0TO0_MARK, PORT160_FN1),
+ PINMUX_DATA(TPU0TO1_MARK, PORT161_FN1),
+ PINMUX_DATA(SCIFB_SCK_MARK, PORT162_FN1),
+ PINMUX_DATA(SCIFB_RTS_MARK, PORT163_FN1),
+ PINMUX_DATA(SCIFB_CTS_MARK, PORT164_FN1),
+ PINMUX_DATA(SCIFB_TXD_MARK, PORT165_FN1),
+ PINMUX_DATA(SCIFB_RXD_MARK, PORT166_FN1),
+ PINMUX_DATA(VBUS0_0_MARK, PORT167_FN1),
+ PINMUX_DATA(VBUS0_1_MARK, PORT168_FN1),
+ PINMUX_DATA(HDMI_HPD_MARK, PORT169_FN1),
+ PINMUX_DATA(HDMI_CEC_MARK, PORT170_FN1),
+ PINMUX_DATA(SDHICLK0_MARK, PORT171_FN1),
+ PINMUX_DATA(SDHICD0_MARK, PORT172_FN1),
+ PINMUX_DATA(SDHID0_0_MARK, PORT173_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT174_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT175_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT176_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT177_FN1),
+ PINMUX_DATA(SDHIWP0_MARK, PORT178_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT179_FN1),
+ PINMUX_DATA(SDHID1_0_MARK, PORT180_FN1),
+ PINMUX_DATA(SDHID1_1_MARK, PORT181_FN1),
+ PINMUX_DATA(SDHID1_2_MARK, PORT182_FN1),
+ PINMUX_DATA(SDHID1_3_MARK, PORT183_FN1),
+ PINMUX_DATA(SDHICMD1_MARK, PORT184_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT185_FN1),
+ PINMUX_DATA(SDHID2_0_MARK, PORT186_FN1),
+ PINMUX_DATA(SDHID2_1_MARK, PORT187_FN1),
+ PINMUX_DATA(SDHID2_2_MARK, PORT188_FN1),
+ PINMUX_DATA(SDHID2_3_MARK, PORT189_FN1),
+ PINMUX_DATA(SDHICMD2_MARK, PORT190_FN1),
+
+ /* Function 2 */
+ PINMUX_DATA(FSIBCK_MARK, PORT4_FN2),
+ PINMUX_DATA(SCIFA4_RXD_MARK, PORT5_FN2),
+ PINMUX_DATA(SCIFA4_TXD_MARK, PORT6_FN2),
+ PINMUX_DATA(SCIFA5_RXD_MARK, PORT8_FN2),
+ PINMUX_DATA(FSIASPDIF_11_MARK, PORT11_FN2),
+ PINMUX_DATA(SCIFA5_TXD_MARK, PORT12_FN2),
+ PINMUX_DATA(FMSIOLR_MARK, PORT13_FN2),
+ PINMUX_DATA(FMSIOBT_MARK, PORT14_FN2),
+ PINMUX_DATA(FSIASPDIF_15_MARK, PORT15_FN2),
+ PINMUX_DATA(FMSIILR_MARK, PORT16_FN2),
+ PINMUX_DATA(FMSIIBT_MARK, PORT17_FN2),
+ PINMUX_DATA(BS_MARK, PORT19_FN2),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT36_FN2),
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT37_FN2),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT38_FN2),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT39_FN2),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT40_FN2),
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT41_FN2),
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT42_FN2),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT43_FN2),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT44_FN2),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT45_FN2),
+ PINMUX_DATA(FMSICK_MARK, PORT65_FN2),
+ PINMUX_DATA(FCE1_MARK, PORT66_FN2),
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT76_FN2),
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT77_FN2),
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT78_FN2),
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT79_FN2),
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT80_FN2),
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT81_FN2),
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT82_FN2),
+ PINMUX_DATA(BB_RX_FLOW_N_MARK, PORT83_FN2),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT84_FN2),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT85_FN2),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT86_FN2),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT87_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_88_MARK, PORT88_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TSYNC_89_MARK, PORT89_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TXD_90_MARK, PORT90_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_RXD_91_MARK, PORT91_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_SS1_92_MARK, PORT92_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_SS2_93_MARK, PORT93_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(SCIFA2_CTS1_MARK, PORT94_FN2),
+ PINMUX_DATA(SCIFA2_RTS1_MARK, PORT95_FN2),
+ PINMUX_DATA(SCIFA2_TXD1_MARK, PORT96_FN2),
+ PINMUX_DATA(SCIFA2_RXD1_MARK, PORT97_FN2),
+ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT98_FN2),
+ PINMUX_DATA(I2C_SCL2_MARK, PORT110_FN2),
+ PINMUX_DATA(I2C_SDA2_MARK, PORT111_FN2),
+ PINMUX_DATA(I2C_SCL3_MARK, PORT114_FN2, MSEL4CR_16_1),
+ PINMUX_DATA(I2C_SDA3_MARK, PORT115_FN2, MSEL4CR_16_1),
+ PINMUX_DATA(I2C_SCL4_MARK, PORT116_FN2, MSEL4CR_17_1),
+ PINMUX_DATA(I2C_SDA4_MARK, PORT117_FN2, MSEL4CR_17_1),
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT134_FN2),
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT135_FN2),
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT136_FN2),
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT137_FN2),
+ PINMUX_DATA(MSIOF2_SS1_MARK, PORT138_FN2),
+ PINMUX_DATA(MSIOF2_SS2_MARK, PORT139_FN2),
+ PINMUX_DATA(SCIFA3_CTS_140_MARK, PORT140_FN2, MSEL3CR_9_1),
+ PINMUX_DATA(SCIFA3_RTS_141_MARK, PORT141_FN2),
+ PINMUX_DATA(SCIFA3_SCK_MARK, PORT142_FN2),
+ PINMUX_DATA(SCIFA3_TXD_MARK, PORT143_FN2),
+ PINMUX_DATA(SCIFA3_RXD_MARK, PORT144_FN2),
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT148_FN2),
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT149_FN2),
+ PINMUX_DATA(MSIOF2_RXD_MARK, PORT150_FN2),
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT151_FN2),
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT156_FN2),
+ PINMUX_DATA(SCIFA0_RTS_MARK, PORT157_FN2),
+ PINMUX_DATA(SCIFA0_CTS_MARK, PORT158_FN2),
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT159_FN2),
+ PINMUX_DATA(SCIFA1_RTS_MARK, PORT160_FN2),
+ PINMUX_DATA(SCIFA1_CTS_MARK, PORT161_FN2),
+
+ /* Function 3 */
+ PINMUX_DATA(VIO_CKO1_MARK, PORT16_FN3),
+ PINMUX_DATA(VIO_CKO2_MARK, PORT17_FN3),
+ PINMUX_DATA(IDIN_1_18_MARK, PORT18_FN3, MSEL4CR_14_1),
+ PINMUX_DATA(MSIOF1_TSCK_39_MARK, PORT39_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_TSYNC_40_MARK, PORT40_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_TXD_41_MARK, PORT41_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_RXD_42_MARK, PORT42_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_SS1_43_MARK, PORT43_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_SS2_44_MARK, PORT44_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MMCD1_0_MARK, PORT54_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_1_MARK, PORT55_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_2_MARK, PORT56_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_3_MARK, PORT57_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_4_MARK, PORT58_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_5_MARK, PORT59_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_6_MARK, PORT60_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_7_MARK, PORT61_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(VINT_I_MARK, PORT65_FN3),
+ PINMUX_DATA(MMCCLK1_MARK, PORT66_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCCMD1_MARK, PORT67_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(TPU0TO2_93_MARK, PORT93_FN3),
+ PINMUX_DATA(TPU0TO2_99_MARK, PORT99_FN3),
+ PINMUX_DATA(TPU0TO3_MARK, PORT112_FN3),
+ PINMUX_DATA(IDIN_0_MARK, PORT113_FN3),
+ PINMUX_DATA(EXTLP_0_MARK, PORT114_FN3),
+ PINMUX_DATA(OVCN2_0_MARK, PORT115_FN3),
+ PINMUX_DATA(PWEN_0_MARK, PORT116_FN3),
+ PINMUX_DATA(OVCN_0_MARK, PORT117_FN3),
+ PINMUX_DATA(KEYOUT7_MARK, PORT121_FN3),
+ PINMUX_DATA(KEYOUT6_MARK, PORT122_FN3),
+ PINMUX_DATA(KEYOUT5_MARK, PORT123_FN3),
+ PINMUX_DATA(KEYOUT4_MARK, PORT124_FN3),
+ PINMUX_DATA(KEYOUT3_MARK, PORT125_FN3),
+ PINMUX_DATA(KEYOUT2_MARK, PORT126_FN3),
+ PINMUX_DATA(KEYOUT1_MARK, PORT127_FN3),
+ PINMUX_DATA(KEYOUT0_MARK, PORT128_FN3),
+ PINMUX_DATA(KEYIN7_MARK, PORT129_FN3),
+ PINMUX_DATA(KEYIN6_MARK, PORT130_FN3),
+ PINMUX_DATA(KEYIN5_MARK, PORT131_FN3),
+ PINMUX_DATA(KEYIN4_MARK, PORT132_FN3),
+ PINMUX_DATA(KEYIN3_133_MARK, PORT133_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN2_134_MARK, PORT134_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN1_135_MARK, PORT135_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN0_136_MARK, PORT136_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(TS_SPSYNC2_MARK, PORT137_FN3),
+ PINMUX_DATA(IROUT_139_MARK, PORT139_FN3),
+ PINMUX_DATA(IRDA_OUT_MARK, PORT140_FN3),
+ PINMUX_DATA(IRDA_IN_MARK, PORT141_FN3),
+ PINMUX_DATA(IRDA_FIRSEL_MARK, PORT142_FN3),
+ PINMUX_DATA(TS_SDAT2_MARK, PORT145_FN3),
+ PINMUX_DATA(TS_SDEN2_MARK, PORT146_FN3),
+ PINMUX_DATA(TS_SCK2_MARK, PORT147_FN3),
+
+ /* Function 4 */
+ PINMUX_DATA(SCIFA3_CTS_43_MARK, PORT43_FN4, MSEL3CR_9_0),
+ PINMUX_DATA(SCIFA3_RTS_44_MARK, PORT44_FN4),
+ PINMUX_DATA(GP_RX_FLAG_MARK, PORT76_FN4),
+ PINMUX_DATA(GP_RX_DATA_MARK, PORT77_FN4),
+ PINMUX_DATA(GP_TX_READY_MARK, PORT78_FN4),
+ PINMUX_DATA(GP_RX_WAKE_MARK, PORT79_FN4),
+ PINMUX_DATA(MP_TX_FLAG_MARK, PORT80_FN4),
+ PINMUX_DATA(MP_TX_DATA_MARK, PORT81_FN4),
+ PINMUX_DATA(MP_RX_READY_MARK, PORT82_FN4),
+ PINMUX_DATA(MP_TX_WAKE_MARK, PORT83_FN4),
+ PINMUX_DATA(MMCD0_0_MARK, PORT84_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT85_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT86_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT87_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT88_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_5_MARK, PORT89_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_6_MARK, PORT90_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_7_MARK, PORT91_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCCMD0_MARK, PORT92_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(SIM_RST_MARK, PORT94_FN4),
+ PINMUX_DATA(SIM_CLK_MARK, PORT95_FN4),
+ PINMUX_DATA(SIM_D_MARK, PORT98_FN4),
+ PINMUX_DATA(MMCCLK0_MARK, PORT99_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(IDIN_1_113_MARK, PORT113_FN4, MSEL4CR_14_0),
+ PINMUX_DATA(OVCN_1_114_MARK, PORT114_FN4, MSEL4CR_14_0),
+ PINMUX_DATA(PWEN_1_115_MARK, PORT115_FN4),
+ PINMUX_DATA(EXTLP_1_MARK, PORT116_FN4),
+ PINMUX_DATA(OVCN2_1_MARK, PORT117_FN4),
+ PINMUX_DATA(KEYIN0_121_MARK, PORT121_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN1_122_MARK, PORT122_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN2_123_MARK, PORT123_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN3_124_MARK, PORT124_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(PWEN_1_138_MARK, PORT138_FN4),
+ PINMUX_DATA(IROUT_140_MARK, PORT140_FN4),
+ PINMUX_DATA(LCDCS_MARK, PORT145_FN4),
+ PINMUX_DATA(LCDWR_MARK, PORT147_FN4),
+ PINMUX_DATA(LCDRS_MARK, PORT149_FN4),
+ PINMUX_DATA(OVCN_1_162_MARK, PORT162_FN4, MSEL4CR_14_1),
+
+ /* Function 5 */
+ PINMUX_DATA(GPI0_MARK, PORT41_FN5),
+ PINMUX_DATA(GPI1_MARK, PORT42_FN5),
+ PINMUX_DATA(GPO0_MARK, PORT43_FN5),
+ PINMUX_DATA(GPO1_MARK, PORT44_FN5),
+ PINMUX_DATA(I2C_SCL3S_MARK, PORT137_FN5, MSEL4CR_16_0),
+ PINMUX_DATA(I2C_SDA3S_MARK, PORT145_FN5, MSEL4CR_16_0),
+ PINMUX_DATA(I2C_SCL4S_MARK, PORT146_FN5, MSEL4CR_17_0),
+ PINMUX_DATA(I2C_SDA4S_MARK, PORT147_FN5, MSEL4CR_17_0),
+
+ /* Function select */
+ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
+ PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
+
+ PINMUX_DATA(TS0_1SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_0),
+ PINMUX_DATA(TS0_2SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_1),
+ PINMUX_DATA(TS1_1SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_0),
+ PINMUX_DATA(TS1_2SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_1),
+
+ PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
+ PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
+
+ PINMUX_DATA(MFIv6_MARK, MSEL4CR_6_0),
+ PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PORT */
+ GPIO_PORT_ALL(),
+
+ /* IRQ */
+ GPIO_FN(IRQ0_6), GPIO_FN(IRQ0_162), GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2_4), GPIO_FN(IRQ2_5), GPIO_FN(IRQ3_8),
+ GPIO_FN(IRQ3_16), GPIO_FN(IRQ4_17), GPIO_FN(IRQ4_163),
+ GPIO_FN(IRQ5), GPIO_FN(IRQ6_39), GPIO_FN(IRQ6_164),
+ GPIO_FN(IRQ7_40), GPIO_FN(IRQ7_167), GPIO_FN(IRQ8_41),
+ GPIO_FN(IRQ8_168), GPIO_FN(IRQ9_42), GPIO_FN(IRQ9_169),
+ GPIO_FN(IRQ10), GPIO_FN(IRQ11), GPIO_FN(IRQ12_80),
+ GPIO_FN(IRQ12_137), GPIO_FN(IRQ13_81), GPIO_FN(IRQ13_145),
+ GPIO_FN(IRQ14_82), GPIO_FN(IRQ14_146), GPIO_FN(IRQ15_83),
+ GPIO_FN(IRQ15_147), GPIO_FN(IRQ16_84), GPIO_FN(IRQ16_170),
+ GPIO_FN(IRQ17), GPIO_FN(IRQ18), GPIO_FN(IRQ19),
+ GPIO_FN(IRQ20), GPIO_FN(IRQ21), GPIO_FN(IRQ22),
+ GPIO_FN(IRQ23), GPIO_FN(IRQ24), GPIO_FN(IRQ25),
+ GPIO_FN(IRQ26_121), GPIO_FN(IRQ26_172), GPIO_FN(IRQ27_122),
+ GPIO_FN(IRQ27_180), GPIO_FN(IRQ28_123), GPIO_FN(IRQ28_181),
+ GPIO_FN(IRQ29_129), GPIO_FN(IRQ29_182), GPIO_FN(IRQ30_130),
+ GPIO_FN(IRQ30_183), GPIO_FN(IRQ31_138), GPIO_FN(IRQ31_184),
+
+ /* MSIOF0 */
+ GPIO_FN(MSIOF0_TSYNC), GPIO_FN(MSIOF0_TSCK), GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_MCK0),
+ GPIO_FN(MSIOF0_MCK1), GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(MSIOF0_TXD),
+
+ /* MSIOF1 */
+ GPIO_FN(MSIOF1_TSCK_39), GPIO_FN(MSIOF1_TSCK_88),
+ GPIO_FN(MSIOF1_TSYNC_40), GPIO_FN(MSIOF1_TSYNC_89),
+ GPIO_FN(MSIOF1_TXD_41), GPIO_FN(MSIOF1_TXD_90),
+ GPIO_FN(MSIOF1_RXD_42), GPIO_FN(MSIOF1_RXD_91),
+ GPIO_FN(MSIOF1_SS1_43), GPIO_FN(MSIOF1_SS1_92),
+ GPIO_FN(MSIOF1_SS2_44), GPIO_FN(MSIOF1_SS2_93),
+ GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC),
+ GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1),
+
+ /* MSIOF2 */
+ GPIO_FN(MSIOF2_RSCK), GPIO_FN(MSIOF2_RSYNC), GPIO_FN(MSIOF2_MCK0),
+ GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_SS1), GPIO_FN(MSIOF2_SS2),
+ GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_TSCK), GPIO_FN(MSIOF2_RXD),
+ GPIO_FN(MSIOF2_TXD),
+
+ /* BBIF1 */
+ GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TSYNC), GPIO_FN(BBIF1_TSCK),
+ GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
+ GPIO_FN(BBIF1_FLOW), GPIO_FN(BB_RX_FLOW_N),
+
+ /* BBIF2 */
+ GPIO_FN(BBIF2_TSCK1), GPIO_FN(BBIF2_TSYNC1),
+ GPIO_FN(BBIF2_TXD1), GPIO_FN(BBIF2_RXD),
+
+ /* FSI */
+ GPIO_FN(FSIACK), GPIO_FN(FSIBCK), GPIO_FN(FSIAILR),
+ GPIO_FN(FSIAIBT), GPIO_FN(FSIAISLD), GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIAOLR), GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD),
+ GPIO_FN(FSIASPDIF_11), GPIO_FN(FSIASPDIF_15),
+
+ /* FMSI */
+ GPIO_FN(FMSOCK), GPIO_FN(FMSOOLR), GPIO_FN(FMSIOLR),
+ GPIO_FN(FMSOOBT), GPIO_FN(FMSIOBT), GPIO_FN(FMSOSLD),
+ GPIO_FN(FMSOILR), GPIO_FN(FMSIILR), GPIO_FN(FMSOIBT),
+ GPIO_FN(FMSIIBT), GPIO_FN(FMSISLD), GPIO_FN(FMSICK),
+
+ /* SCIFA0 */
+ GPIO_FN(SCIFA0_TXD), GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_SCK),
+ GPIO_FN(SCIFA0_RTS), GPIO_FN(SCIFA0_CTS),
+
+ /* SCIFA1 */
+ GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_SCK),
+ GPIO_FN(SCIFA1_RTS), GPIO_FN(SCIFA1_CTS),
+
+ /* SCIFA2 */
+ GPIO_FN(SCIFA2_CTS1), GPIO_FN(SCIFA2_RTS1), GPIO_FN(SCIFA2_TXD1),
+ GPIO_FN(SCIFA2_RXD1), GPIO_FN(SCIFA2_SCK1),
+
+ /* SCIFA3 */
+ GPIO_FN(SCIFA3_CTS_43), GPIO_FN(SCIFA3_CTS_140),
+ GPIO_FN(SCIFA3_RTS_44), GPIO_FN(SCIFA3_RTS_141),
+ GPIO_FN(SCIFA3_SCK), GPIO_FN(SCIFA3_TXD),
+ GPIO_FN(SCIFA3_RXD),
+
+ /* SCIFA4 */
+ GPIO_FN(SCIFA4_RXD), GPIO_FN(SCIFA4_TXD),
+
+ /* SCIFA5 */
+ GPIO_FN(SCIFA5_RXD), GPIO_FN(SCIFA5_TXD),
+
+ /* SCIFB */
+ GPIO_FN(SCIFB_SCK), GPIO_FN(SCIFB_RTS), GPIO_FN(SCIFB_CTS),
+ GPIO_FN(SCIFB_TXD), GPIO_FN(SCIFB_RXD),
+
+ /* CEU */
+ GPIO_FN(VIO_HD), GPIO_FN(VIO_CKO1), GPIO_FN(VIO_CKO2),
+ GPIO_FN(VIO_VD), GPIO_FN(VIO_CLK), GPIO_FN(VIO_FIELD),
+ GPIO_FN(VIO_CKO), GPIO_FN(VIO_D0), GPIO_FN(VIO_D1),
+ GPIO_FN(VIO_D2), GPIO_FN(VIO_D3), GPIO_FN(VIO_D4),
+ GPIO_FN(VIO_D5), GPIO_FN(VIO_D6), GPIO_FN(VIO_D7),
+ GPIO_FN(VIO_D8), GPIO_FN(VIO_D9), GPIO_FN(VIO_D10),
+ GPIO_FN(VIO_D11), GPIO_FN(VIO_D12), GPIO_FN(VIO_D13),
+ GPIO_FN(VIO_D14), GPIO_FN(VIO_D15),
+
+ /* USB0 */
+ GPIO_FN(IDIN_0), GPIO_FN(EXTLP_0), GPIO_FN(OVCN2_0),
+ GPIO_FN(PWEN_0), GPIO_FN(OVCN_0), GPIO_FN(VBUS0_0),
+
+ /* USB1 */
+ GPIO_FN(IDIN_1_18), GPIO_FN(IDIN_1_113),
+ GPIO_FN(OVCN_1_114), GPIO_FN(OVCN_1_162),
+ GPIO_FN(PWEN_1_115), GPIO_FN(PWEN_1_138),
+ GPIO_FN(EXTLP_1), GPIO_FN(OVCN2_1),
+ GPIO_FN(VBUS0_1),
+
+ /* GPIO */
+ GPIO_FN(GPI0), GPIO_FN(GPI1), GPIO_FN(GPO0), GPIO_FN(GPO1),
+
+ /* BSC */
+ GPIO_FN(BS), GPIO_FN(WE1), GPIO_FN(CKO),
+ GPIO_FN(WAIT), GPIO_FN(RDWR),
+
+ GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2),
+ GPIO_FN(A3), GPIO_FN(A6), GPIO_FN(A7),
+ GPIO_FN(A8), GPIO_FN(A9), GPIO_FN(A10),
+ GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13),
+ GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16),
+ GPIO_FN(A17), GPIO_FN(A18), GPIO_FN(A19),
+ GPIO_FN(A20), GPIO_FN(A21), GPIO_FN(A22),
+ GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25),
+ GPIO_FN(A26),
+
+ GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4),
+ GPIO_FN(CS5A), GPIO_FN(CS5B), GPIO_FN(CS6A),
+
+ /* BSC/FLCTL */
+ GPIO_FN(RD_FSC), GPIO_FN(WE0_FWE), GPIO_FN(A4_FOE),
+ GPIO_FN(A5_FCDE), GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1),
+ GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), GPIO_FN(D4_NAF4),
+ GPIO_FN(D5_NAF5), GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7),
+ GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), GPIO_FN(D10_NAF10),
+ GPIO_FN(D11_NAF11), GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13),
+ GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15),
+
+ /* MMCIF(1) */
+ GPIO_FN(MMCD0_0), GPIO_FN(MMCD0_1), GPIO_FN(MMCD0_2),
+ GPIO_FN(MMCD0_3), GPIO_FN(MMCD0_4), GPIO_FN(MMCD0_5),
+ GPIO_FN(MMCD0_6), GPIO_FN(MMCD0_7), GPIO_FN(MMCCMD0),
+ GPIO_FN(MMCCLK0),
+
+ /* MMCIF(2) */
+ GPIO_FN(MMCD1_0), GPIO_FN(MMCD1_1), GPIO_FN(MMCD1_2),
+ GPIO_FN(MMCD1_3), GPIO_FN(MMCD1_4), GPIO_FN(MMCD1_5),
+ GPIO_FN(MMCD1_6), GPIO_FN(MMCD1_7), GPIO_FN(MMCCLK1),
+ GPIO_FN(MMCCMD1),
+
+ /* SPU2 */
+ GPIO_FN(VINT_I),
+
+ /* FLCTL */
+ GPIO_FN(FCE1), GPIO_FN(FCE0), GPIO_FN(FRB),
+
+ /* HSI */
+ GPIO_FN(GP_RX_FLAG), GPIO_FN(GP_RX_DATA), GPIO_FN(GP_TX_READY),
+ GPIO_FN(GP_RX_WAKE), GPIO_FN(MP_TX_FLAG), GPIO_FN(MP_TX_DATA),
+ GPIO_FN(MP_RX_READY), GPIO_FN(MP_TX_WAKE),
+
+ /* MFI */
+ GPIO_FN(MFIv6),
+ GPIO_FN(MFIv4),
+
+ GPIO_FN(MEMC_BUSCLK_MEMC_A0), GPIO_FN(MEMC_ADV_MEMC_DREQ0),
+ GPIO_FN(MEMC_WAIT_MEMC_DREQ1), GPIO_FN(MEMC_CS1_MEMC_A1),
+ GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_NOE),
+ GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_INT),
+
+ GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2),
+ GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5),
+ GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8),
+ GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11),
+ GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14),
+ GPIO_FN(MEMC_AD15),
+
+ /* SIM */
+ GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK), GPIO_FN(SIM_D),
+
+ /* TPU */
+ GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO2_93),
+ GPIO_FN(TPU0TO2_99), GPIO_FN(TPU0TO3),
+
+ /* I2C2 */
+ GPIO_FN(I2C_SCL2), GPIO_FN(I2C_SDA2),
+
+ /* I2C3(1) */
+ GPIO_FN(I2C_SCL3), GPIO_FN(I2C_SDA3),
+
+ /* I2C3(2) */
+ GPIO_FN(I2C_SCL3S), GPIO_FN(I2C_SDA3S),
+
+ /* I2C4(2) */
+ GPIO_FN(I2C_SCL4), GPIO_FN(I2C_SDA4),
+
+ /* I2C4(2) */
+ GPIO_FN(I2C_SCL4S), GPIO_FN(I2C_SDA4S),
+
+ /* KEYSC */
+ GPIO_FN(KEYOUT0), GPIO_FN(KEYIN0_121), GPIO_FN(KEYIN0_136),
+ GPIO_FN(KEYOUT1), GPIO_FN(KEYIN1_122), GPIO_FN(KEYIN1_135),
+ GPIO_FN(KEYOUT2), GPIO_FN(KEYIN2_123), GPIO_FN(KEYIN2_134),
+ GPIO_FN(KEYOUT3), GPIO_FN(KEYIN3_124), GPIO_FN(KEYIN3_133),
+ GPIO_FN(KEYOUT4), GPIO_FN(KEYIN4), GPIO_FN(KEYOUT5),
+ GPIO_FN(KEYIN5), GPIO_FN(KEYOUT6), GPIO_FN(KEYIN6),
+ GPIO_FN(KEYOUT7), GPIO_FN(KEYIN7),
+
+ /* LCDC */
+ GPIO_FN(LCDHSYN), GPIO_FN(LCDCS), GPIO_FN(LCDVSYN),
+ GPIO_FN(LCDDCK), GPIO_FN(LCDWR), GPIO_FN(LCDRD),
+ GPIO_FN(LCDDISP), GPIO_FN(LCDRS), GPIO_FN(LCDLCLK),
+ GPIO_FN(LCDDON),
+
+ GPIO_FN(LCDD0), GPIO_FN(LCDD1), GPIO_FN(LCDD2),
+ GPIO_FN(LCDD3), GPIO_FN(LCDD4), GPIO_FN(LCDD5),
+ GPIO_FN(LCDD6), GPIO_FN(LCDD7), GPIO_FN(LCDD8),
+ GPIO_FN(LCDD9), GPIO_FN(LCDD10), GPIO_FN(LCDD11),
+ GPIO_FN(LCDD12), GPIO_FN(LCDD13), GPIO_FN(LCDD14),
+ GPIO_FN(LCDD15), GPIO_FN(LCDD16), GPIO_FN(LCDD17),
+ GPIO_FN(LCDD18), GPIO_FN(LCDD19), GPIO_FN(LCDD20),
+ GPIO_FN(LCDD21), GPIO_FN(LCDD22), GPIO_FN(LCDD23),
+
+ GPIO_FN(LCDC0_SELECT),
+ GPIO_FN(LCDC1_SELECT),
+
+ /* IRDA */
+ GPIO_FN(IRDA_OUT), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_FIRSEL),
+ GPIO_FN(IROUT_139), GPIO_FN(IROUT_140),
+
+ /* TSIF1 */
+ GPIO_FN(TS0_1SELECT),
+ GPIO_FN(TS0_2SELECT),
+ GPIO_FN(TS1_1SELECT),
+ GPIO_FN(TS1_2SELECT),
+
+ GPIO_FN(TS_SPSYNC1), GPIO_FN(TS_SDAT1),
+ GPIO_FN(TS_SDEN1), GPIO_FN(TS_SCK1),
+
+ /* TSIF2 */
+ GPIO_FN(TS_SPSYNC2), GPIO_FN(TS_SDAT2),
+ GPIO_FN(TS_SDEN2), GPIO_FN(TS_SCK2),
+
+ /* HDMI */
+ GPIO_FN(HDMI_HPD), GPIO_FN(HDMI_CEC),
+
+ /* SDHI0 */
+ GPIO_FN(SDHICLK0), GPIO_FN(SDHICD0), GPIO_FN(SDHICMD0),
+ GPIO_FN(SDHIWP0), GPIO_FN(SDHID0_0), GPIO_FN(SDHID0_1),
+ GPIO_FN(SDHID0_2), GPIO_FN(SDHID0_3),
+
+ /* SDHI1 */
+ GPIO_FN(SDHICLK1), GPIO_FN(SDHICMD1), GPIO_FN(SDHID1_0),
+ GPIO_FN(SDHID1_1), GPIO_FN(SDHID1_2), GPIO_FN(SDHID1_3),
+
+ /* SDHI2 */
+ GPIO_FN(SDHICLK2), GPIO_FN(SDHICMD2), GPIO_FN(SDHID2_0),
+ GPIO_FN(SDHID2_1), GPIO_FN(SDHID2_2), GPIO_FN(SDHID2_3),
+
+ /* SDENC */
+ GPIO_FN(SDENC_CPG),
+ GPIO_FN(SDENC_DV_CLKI),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xE6051000), /* PORT0CR */
+ PORTCR(1, 0xE6051001), /* PORT1CR */
+ PORTCR(2, 0xE6051002), /* PORT2CR */
+ PORTCR(3, 0xE6051003), /* PORT3CR */
+ PORTCR(4, 0xE6051004), /* PORT4CR */
+ PORTCR(5, 0xE6051005), /* PORT5CR */
+ PORTCR(6, 0xE6051006), /* PORT6CR */
+ PORTCR(7, 0xE6051007), /* PORT7CR */
+ PORTCR(8, 0xE6051008), /* PORT8CR */
+ PORTCR(9, 0xE6051009), /* PORT9CR */
+ PORTCR(10, 0xE605100A), /* PORT10CR */
+ PORTCR(11, 0xE605100B), /* PORT11CR */
+ PORTCR(12, 0xE605100C), /* PORT12CR */
+ PORTCR(13, 0xE605100D), /* PORT13CR */
+ PORTCR(14, 0xE605100E), /* PORT14CR */
+ PORTCR(15, 0xE605100F), /* PORT15CR */
+ PORTCR(16, 0xE6051010), /* PORT16CR */
+ PORTCR(17, 0xE6051011), /* PORT17CR */
+ PORTCR(18, 0xE6051012), /* PORT18CR */
+ PORTCR(19, 0xE6051013), /* PORT19CR */
+ PORTCR(20, 0xE6051014), /* PORT20CR */
+ PORTCR(21, 0xE6051015), /* PORT21CR */
+ PORTCR(22, 0xE6051016), /* PORT22CR */
+ PORTCR(23, 0xE6051017), /* PORT23CR */
+ PORTCR(24, 0xE6051018), /* PORT24CR */
+ PORTCR(25, 0xE6051019), /* PORT25CR */
+ PORTCR(26, 0xE605101A), /* PORT26CR */
+ PORTCR(27, 0xE605101B), /* PORT27CR */
+ PORTCR(28, 0xE605101C), /* PORT28CR */
+ PORTCR(29, 0xE605101D), /* PORT29CR */
+ PORTCR(30, 0xE605101E), /* PORT30CR */
+ PORTCR(31, 0xE605101F), /* PORT31CR */
+ PORTCR(32, 0xE6051020), /* PORT32CR */
+ PORTCR(33, 0xE6051021), /* PORT33CR */
+ PORTCR(34, 0xE6051022), /* PORT34CR */
+ PORTCR(35, 0xE6051023), /* PORT35CR */
+ PORTCR(36, 0xE6051024), /* PORT36CR */
+ PORTCR(37, 0xE6051025), /* PORT37CR */
+ PORTCR(38, 0xE6051026), /* PORT38CR */
+ PORTCR(39, 0xE6051027), /* PORT39CR */
+ PORTCR(40, 0xE6051028), /* PORT40CR */
+ PORTCR(41, 0xE6051029), /* PORT41CR */
+ PORTCR(42, 0xE605102A), /* PORT42CR */
+ PORTCR(43, 0xE605102B), /* PORT43CR */
+ PORTCR(44, 0xE605102C), /* PORT44CR */
+ PORTCR(45, 0xE605102D), /* PORT45CR */
+ PORTCR(46, 0xE605202E), /* PORT46CR */
+ PORTCR(47, 0xE605202F), /* PORT47CR */
+ PORTCR(48, 0xE6052030), /* PORT48CR */
+ PORTCR(49, 0xE6052031), /* PORT49CR */
+ PORTCR(50, 0xE6052032), /* PORT50CR */
+ PORTCR(51, 0xE6052033), /* PORT51CR */
+ PORTCR(52, 0xE6052034), /* PORT52CR */
+ PORTCR(53, 0xE6052035), /* PORT53CR */
+ PORTCR(54, 0xE6052036), /* PORT54CR */
+ PORTCR(55, 0xE6052037), /* PORT55CR */
+ PORTCR(56, 0xE6052038), /* PORT56CR */
+ PORTCR(57, 0xE6052039), /* PORT57CR */
+ PORTCR(58, 0xE605203A), /* PORT58CR */
+ PORTCR(59, 0xE605203B), /* PORT59CR */
+ PORTCR(60, 0xE605203C), /* PORT60CR */
+ PORTCR(61, 0xE605203D), /* PORT61CR */
+ PORTCR(62, 0xE605203E), /* PORT62CR */
+ PORTCR(63, 0xE605203F), /* PORT63CR */
+ PORTCR(64, 0xE6052040), /* PORT64CR */
+ PORTCR(65, 0xE6052041), /* PORT65CR */
+ PORTCR(66, 0xE6052042), /* PORT66CR */
+ PORTCR(67, 0xE6052043), /* PORT67CR */
+ PORTCR(68, 0xE6052044), /* PORT68CR */
+ PORTCR(69, 0xE6052045), /* PORT69CR */
+ PORTCR(70, 0xE6052046), /* PORT70CR */
+ PORTCR(71, 0xE6052047), /* PORT71CR */
+ PORTCR(72, 0xE6052048), /* PORT72CR */
+ PORTCR(73, 0xE6052049), /* PORT73CR */
+ PORTCR(74, 0xE605204A), /* PORT74CR */
+ PORTCR(75, 0xE605204B), /* PORT75CR */
+ PORTCR(76, 0xE605004C), /* PORT76CR */
+ PORTCR(77, 0xE605004D), /* PORT77CR */
+ PORTCR(78, 0xE605004E), /* PORT78CR */
+ PORTCR(79, 0xE605004F), /* PORT79CR */
+ PORTCR(80, 0xE6050050), /* PORT80CR */
+ PORTCR(81, 0xE6050051), /* PORT81CR */
+ PORTCR(82, 0xE6050052), /* PORT82CR */
+ PORTCR(83, 0xE6050053), /* PORT83CR */
+ PORTCR(84, 0xE6050054), /* PORT84CR */
+ PORTCR(85, 0xE6050055), /* PORT85CR */
+ PORTCR(86, 0xE6050056), /* PORT86CR */
+ PORTCR(87, 0xE6050057), /* PORT87CR */
+ PORTCR(88, 0xE6050058), /* PORT88CR */
+ PORTCR(89, 0xE6050059), /* PORT89CR */
+ PORTCR(90, 0xE605005A), /* PORT90CR */
+ PORTCR(91, 0xE605005B), /* PORT91CR */
+ PORTCR(92, 0xE605005C), /* PORT92CR */
+ PORTCR(93, 0xE605005D), /* PORT93CR */
+ PORTCR(94, 0xE605005E), /* PORT94CR */
+ PORTCR(95, 0xE605005F), /* PORT95CR */
+ PORTCR(96, 0xE6050060), /* PORT96CR */
+ PORTCR(97, 0xE6050061), /* PORT97CR */
+ PORTCR(98, 0xE6050062), /* PORT98CR */
+ PORTCR(99, 0xE6050063), /* PORT99CR */
+ PORTCR(100, 0xE6053064), /* PORT100CR */
+ PORTCR(101, 0xE6053065), /* PORT101CR */
+ PORTCR(102, 0xE6053066), /* PORT102CR */
+ PORTCR(103, 0xE6053067), /* PORT103CR */
+ PORTCR(104, 0xE6053068), /* PORT104CR */
+ PORTCR(105, 0xE6053069), /* PORT105CR */
+ PORTCR(106, 0xE605306A), /* PORT106CR */
+ PORTCR(107, 0xE605306B), /* PORT107CR */
+ PORTCR(108, 0xE605306C), /* PORT108CR */
+ PORTCR(109, 0xE605306D), /* PORT109CR */
+ PORTCR(110, 0xE605306E), /* PORT110CR */
+ PORTCR(111, 0xE605306F), /* PORT111CR */
+ PORTCR(112, 0xE6053070), /* PORT112CR */
+ PORTCR(113, 0xE6053071), /* PORT113CR */
+ PORTCR(114, 0xE6053072), /* PORT114CR */
+ PORTCR(115, 0xE6053073), /* PORT115CR */
+ PORTCR(116, 0xE6053074), /* PORT116CR */
+ PORTCR(117, 0xE6053075), /* PORT117CR */
+ PORTCR(118, 0xE6053076), /* PORT118CR */
+ PORTCR(119, 0xE6053077), /* PORT119CR */
+ PORTCR(120, 0xE6053078), /* PORT120CR */
+ PORTCR(121, 0xE6050079), /* PORT121CR */
+ PORTCR(122, 0xE605007A), /* PORT122CR */
+ PORTCR(123, 0xE605007B), /* PORT123CR */
+ PORTCR(124, 0xE605007C), /* PORT124CR */
+ PORTCR(125, 0xE605007D), /* PORT125CR */
+ PORTCR(126, 0xE605007E), /* PORT126CR */
+ PORTCR(127, 0xE605007F), /* PORT127CR */
+ PORTCR(128, 0xE6050080), /* PORT128CR */
+ PORTCR(129, 0xE6050081), /* PORT129CR */
+ PORTCR(130, 0xE6050082), /* PORT130CR */
+ PORTCR(131, 0xE6050083), /* PORT131CR */
+ PORTCR(132, 0xE6050084), /* PORT132CR */
+ PORTCR(133, 0xE6050085), /* PORT133CR */
+ PORTCR(134, 0xE6050086), /* PORT134CR */
+ PORTCR(135, 0xE6050087), /* PORT135CR */
+ PORTCR(136, 0xE6050088), /* PORT136CR */
+ PORTCR(137, 0xE6050089), /* PORT137CR */
+ PORTCR(138, 0xE605008A), /* PORT138CR */
+ PORTCR(139, 0xE605008B), /* PORT139CR */
+ PORTCR(140, 0xE605008C), /* PORT140CR */
+ PORTCR(141, 0xE605008D), /* PORT141CR */
+ PORTCR(142, 0xE605008E), /* PORT142CR */
+ PORTCR(143, 0xE605008F), /* PORT143CR */
+ PORTCR(144, 0xE6050090), /* PORT144CR */
+ PORTCR(145, 0xE6050091), /* PORT145CR */
+ PORTCR(146, 0xE6050092), /* PORT146CR */
+ PORTCR(147, 0xE6050093), /* PORT147CR */
+ PORTCR(148, 0xE6050094), /* PORT148CR */
+ PORTCR(149, 0xE6050095), /* PORT149CR */
+ PORTCR(150, 0xE6050096), /* PORT150CR */
+ PORTCR(151, 0xE6050097), /* PORT151CR */
+ PORTCR(152, 0xE6053098), /* PORT152CR */
+ PORTCR(153, 0xE6053099), /* PORT153CR */
+ PORTCR(154, 0xE605309A), /* PORT154CR */
+ PORTCR(155, 0xE605309B), /* PORT155CR */
+ PORTCR(156, 0xE605009C), /* PORT156CR */
+ PORTCR(157, 0xE605009D), /* PORT157CR */
+ PORTCR(158, 0xE605009E), /* PORT158CR */
+ PORTCR(159, 0xE605009F), /* PORT159CR */
+ PORTCR(160, 0xE60500A0), /* PORT160CR */
+ PORTCR(161, 0xE60500A1), /* PORT161CR */
+ PORTCR(162, 0xE60500A2), /* PORT162CR */
+ PORTCR(163, 0xE60500A3), /* PORT163CR */
+ PORTCR(164, 0xE60500A4), /* PORT164CR */
+ PORTCR(165, 0xE60500A5), /* PORT165CR */
+ PORTCR(166, 0xE60500A6), /* PORT166CR */
+ PORTCR(167, 0xE60520A7), /* PORT167CR */
+ PORTCR(168, 0xE60520A8), /* PORT168CR */
+ PORTCR(169, 0xE60520A9), /* PORT169CR */
+ PORTCR(170, 0xE60520AA), /* PORT170CR */
+ PORTCR(171, 0xE60520AB), /* PORT171CR */
+ PORTCR(172, 0xE60520AC), /* PORT172CR */
+ PORTCR(173, 0xE60520AD), /* PORT173CR */
+ PORTCR(174, 0xE60520AE), /* PORT174CR */
+ PORTCR(175, 0xE60520AF), /* PORT175CR */
+ PORTCR(176, 0xE60520B0), /* PORT176CR */
+ PORTCR(177, 0xE60520B1), /* PORT177CR */
+ PORTCR(178, 0xE60520B2), /* PORT178CR */
+ PORTCR(179, 0xE60520B3), /* PORT179CR */
+ PORTCR(180, 0xE60520B4), /* PORT180CR */
+ PORTCR(181, 0xE60520B5), /* PORT181CR */
+ PORTCR(182, 0xE60520B6), /* PORT182CR */
+ PORTCR(183, 0xE60520B7), /* PORT183CR */
+ PORTCR(184, 0xE60520B8), /* PORT184CR */
+ PORTCR(185, 0xE60520B9), /* PORT185CR */
+ PORTCR(186, 0xE60520BA), /* PORT186CR */
+ PORTCR(187, 0xE60520BB), /* PORT187CR */
+ PORTCR(188, 0xE60520BC), /* PORT188CR */
+ PORTCR(189, 0xE60520BD), /* PORT189CR */
+ PORTCR(190, 0xE60520BE), /* PORT190CR */
+
+ { PINMUX_CFG_REG("MSEL1CR", 0xE605800C, 32, 1) {
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ 0, 0, 0, 0,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_8_0, MSEL1CR_8_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ 0, 0,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ 0, 0,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL3CR_9_0, MSEL3CR_9_1,
+ 0, 0, 0, 0,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ 0, 0,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ 0, 0, 0, 0,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ 0, 0,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xE6054008, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL127_096DR", 0xE605400C, 32) {
+ PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
+ PORT123_DATA, PORT122_DATA, PORT121_DATA, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL159_128DR", 0xE6054010, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ 0, 0, 0, 0,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL191_160DR", 0xE6054014, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT166_DATA, PORT165_DATA, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD031_000DR", 0xE6055000, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xE6055004, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR063_032DR", 0xE6056004, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR095_064DR", 0xE6056008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xE6056014, 32) {
+ 0, PORT190_DATA, PORT189_DATA, PORT188_DATA,
+ PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
+ PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
+ PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
+ PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
+ PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
+ PORT167_DATA, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU127_096DR", 0xE605700C, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT120_DATA,
+ PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU159_128DR", 0xE6057010, 32) {
+ 0, 0, 0, 0,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { },
+};
+
+#define EXT_IRQ16L(n) evt2irq(0x200 + ((n) << 5))
+#define EXT_IRQ16H(n) evt2irq(0x3200 + (((n) - 16) << 5))
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16L(0), PORT6_FN0, PORT162_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(1), PORT12_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(2), PORT4_FN0, PORT5_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(3), PORT8_FN0, PORT16_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(4), PORT17_FN0, PORT163_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(5), PORT18_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(6), PORT39_FN0, PORT164_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(7), PORT40_FN0, PORT167_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(8), PORT41_FN0, PORT168_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT42_FN0, PORT169_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(10), PORT65_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(11), PORT67_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(12), PORT80_FN0, PORT137_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(13), PORT81_FN0, PORT145_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(14), PORT82_FN0, PORT146_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(15), PORT83_FN0, PORT147_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(16), PORT84_FN0, PORT170_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(17), PORT85_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(18), PORT86_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(19), PORT87_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(20), PORT92_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(21), PORT93_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(22), PORT94_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(23), PORT95_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(24), PORT112_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(25), PORT119_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(26), PORT121_FN0, PORT172_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(27), PORT122_FN0, PORT180_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(28), PORT123_FN0, PORT181_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(29), PORT129_FN0, PORT182_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(30), PORT130_FN0, PORT183_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(31), PORT138_FN0, PORT184_FN0),
+};
+
+struct sh_pfc_soc_info sh7372_pinmux_info = {
+ .name = "sh7372_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_SDENC_DV_CLKI,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
new file mode 100644
index 000000000000..709008e94124
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -0,0 +1,2798 @@
+/*
+ * sh73a0 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Copyright (C) 2010 NISHIMOTO Hiroki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/sh73a0.h>
+#include <mach/irqs.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_10(fn, pfx##3, sfx), \
+ PORT_10(fn, pfx##4, sfx), PORT_10(fn, pfx##5, sfx), \
+ PORT_10(fn, pfx##6, sfx), PORT_10(fn, pfx##7, sfx), \
+ PORT_10(fn, pfx##8, sfx), PORT_10(fn, pfx##9, sfx), \
+ PORT_10(fn, pfx##10, sfx), \
+ PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
+ PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
+ PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
+ PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
+ PORT_1(fn, pfx##118, sfx), \
+ PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
+ PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
+ PORT_10(fn, pfx##15, sfx), \
+ PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
+ PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
+ PORT_1(fn, pfx##164, sfx), \
+ PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
+ PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
+ PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
+ PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
+ PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
+ PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
+ PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
+ PORT_10(fn, pfx##26, sfx), PORT_10(fn, pfx##27, sfx), \
+ PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
+ PORT_1(fn, pfx##282, sfx), \
+ PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
+ PORT_10(fn, pfx##29, sfx), PORT_10(fn, pfx##30, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA), /* PORT0_DATA -> PORT309_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN), /* PORT0_IN -> PORT309_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT), /* PORT0_OUT -> PORT309_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT309_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT309_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT309_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT309_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT309_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT309_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT309_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT309_FN7 */
+
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Hardware manual Table 25-1 (Function 0-7) */
+ VBUS_0_MARK,
+ GPI0_MARK,
+ GPI1_MARK,
+ GPI2_MARK,
+ GPI3_MARK,
+ GPI4_MARK,
+ GPI5_MARK,
+ GPI6_MARK,
+ GPI7_MARK,
+ SCIFA7_RXD_MARK,
+ SCIFA7_CTS__MARK,
+ GPO7_MARK, MFG0_OUT2_MARK,
+ GPO6_MARK, MFG1_OUT2_MARK,
+ GPO5_MARK, SCIFA0_SCK_MARK, FSICOSLDT3_MARK, PORT16_VIO_CKOR_MARK,
+ SCIFA0_TXD_MARK,
+ SCIFA7_TXD_MARK,
+ SCIFA7_RTS__MARK, PORT19_VIO_CKO2_MARK,
+ GPO0_MARK,
+ GPO1_MARK,
+ GPO2_MARK, STATUS0_MARK,
+ GPO3_MARK, STATUS1_MARK,
+ GPO4_MARK, STATUS2_MARK,
+ VINT_MARK,
+ TCKON_MARK,
+ XDVFS1_MARK, PORT27_I2C_SCL2_MARK, PORT27_I2C_SCL3_MARK, \
+ MFG0_OUT1_MARK, PORT27_IROUT_MARK,
+ XDVFS2_MARK, PORT28_I2C_SDA2_MARK, PORT28_I2C_SDA3_MARK, \
+ PORT28_TPU1TO1_MARK,
+ SIM_RST_MARK, PORT29_TPU1TO1_MARK,
+ SIM_CLK_MARK, PORT30_VIO_CKOR_MARK,
+ SIM_D_MARK, PORT31_IROUT_MARK,
+ SCIFA4_TXD_MARK,
+ SCIFA4_RXD_MARK, XWUP_MARK,
+ SCIFA4_RTS__MARK,
+ SCIFA4_CTS__MARK,
+ FSIBOBT_MARK, FSIBIBT_MARK,
+ FSIBOLR_MARK, FSIBILR_MARK,
+ FSIBOSLD_MARK,
+ FSIBISLD_MARK,
+ VACK_MARK,
+ XTAL1L_MARK,
+ SCIFA0_RTS__MARK, FSICOSLDT2_MARK,
+ SCIFA0_RXD_MARK,
+ SCIFA0_CTS__MARK, FSICOSLDT1_MARK,
+ FSICOBT_MARK, FSICIBT_MARK, FSIDOBT_MARK, FSIDIBT_MARK,
+ FSICOLR_MARK, FSICILR_MARK, FSIDOLR_MARK, FSIDILR_MARK,
+ FSICOSLD_MARK, PORT47_FSICSPDIF_MARK,
+ FSICISLD_MARK, FSIDISLD_MARK,
+ FSIACK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FSIAOMC_MARK,
+ FSIAOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, FSIAILR_MARK,
+
+ FSIAOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, FSIAIBT_MARK,
+ FSIAOSLD_MARK, BBIF2_TXD2_MARK,
+ FSIASPDIF_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, FSIBSPDIF_MARK, \
+ PORT53_FSICSPDIF_MARK,
+ FSIBCK_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FSIBOMC_MARK, \
+ FSICCK_MARK, FSICOMC_MARK,
+ FSIAISLD_MARK, TPU0TO0_MARK,
+ A0_MARK, BS__MARK,
+ A12_MARK, PORT58_KEYOUT7_MARK, TPU4TO2_MARK,
+ A13_MARK, PORT59_KEYOUT6_MARK, TPU0TO1_MARK,
+ A14_MARK, KEYOUT5_MARK,
+ A15_MARK, KEYOUT4_MARK,
+ A16_MARK, KEYOUT3_MARK, MSIOF0_SS1_MARK,
+ A17_MARK, KEYOUT2_MARK, MSIOF0_TSYNC_MARK,
+ A18_MARK, KEYOUT1_MARK, MSIOF0_TSCK_MARK,
+ A19_MARK, KEYOUT0_MARK, MSIOF0_TXD_MARK,
+ A20_MARK, KEYIN0_MARK, MSIOF0_RSCK_MARK,
+ A21_MARK, KEYIN1_MARK, MSIOF0_RSYNC_MARK,
+ A22_MARK, KEYIN2_MARK, MSIOF0_MCK0_MARK,
+ A23_MARK, KEYIN3_MARK, MSIOF0_MCK1_MARK,
+ A24_MARK, KEYIN4_MARK, MSIOF0_RXD_MARK,
+ A25_MARK, KEYIN5_MARK, MSIOF0_SS2_MARK,
+ A26_MARK, KEYIN6_MARK,
+ KEYIN7_MARK,
+ D0_NAF0_MARK,
+ D1_NAF1_MARK,
+ D2_NAF2_MARK,
+ D3_NAF3_MARK,
+ D4_NAF4_MARK,
+ D5_NAF5_MARK,
+ D6_NAF6_MARK,
+ D7_NAF7_MARK,
+ D8_NAF8_MARK,
+ D9_NAF9_MARK,
+ D10_NAF10_MARK,
+ D11_NAF11_MARK,
+ D12_NAF12_MARK,
+ D13_NAF13_MARK,
+ D14_NAF14_MARK,
+ D15_NAF15_MARK,
+ CS4__MARK,
+ CS5A__MARK, PORT91_RDWR_MARK,
+ CS5B__MARK, FCE1__MARK,
+ CS6B__MARK, DACK0_MARK,
+ FCE0__MARK, CS6A__MARK,
+ WAIT__MARK, DREQ0_MARK,
+ RD__FSC_MARK,
+ WE0__FWE_MARK, RDWR_FWE_MARK,
+ WE1__MARK,
+ FRB_MARK,
+ CKO_MARK,
+ NBRSTOUT__MARK,
+ NBRST__MARK,
+ BBIF2_TXD_MARK,
+ BBIF2_RXD_MARK,
+ BBIF2_SYNC_MARK,
+ BBIF2_SCK_MARK,
+ SCIFA3_CTS__MARK, MFG3_IN2_MARK,
+ SCIFA3_RXD_MARK, MFG3_IN1_MARK,
+ BBIF1_SS2_MARK, SCIFA3_RTS__MARK, MFG3_OUT1_MARK,
+ SCIFA3_TXD_MARK,
+ HSI_RX_DATA_MARK, BBIF1_RXD_MARK,
+ HSI_TX_WAKE_MARK, BBIF1_TSCK_MARK,
+ HSI_TX_DATA_MARK, BBIF1_TSYNC_MARK,
+ HSI_TX_READY_MARK, BBIF1_TXD_MARK,
+ HSI_RX_READY_MARK, BBIF1_RSCK_MARK, PORT115_I2C_SCL2_MARK, \
+ PORT115_I2C_SCL3_MARK,
+ HSI_RX_WAKE_MARK, BBIF1_RSYNC_MARK, PORT116_I2C_SDA2_MARK, \
+ PORT116_I2C_SDA3_MARK,
+ HSI_RX_FLAG_MARK, BBIF1_SS1_MARK, BBIF1_FLOW_MARK,
+ HSI_TX_FLAG_MARK,
+ VIO_VD_MARK, PORT128_LCD2VSYN_MARK, VIO2_VD_MARK, LCD2D0_MARK,
+
+ VIO_HD_MARK, PORT129_LCD2HSYN_MARK, PORT129_LCD2CS__MARK, \
+ VIO2_HD_MARK, LCD2D1_MARK,
+ VIO_D0_MARK, PORT130_MSIOF2_RXD_MARK, LCD2D10_MARK,
+ VIO_D1_MARK, PORT131_KEYOUT6_MARK, PORT131_MSIOF2_SS1_MARK, \
+ PORT131_KEYOUT11_MARK, LCD2D11_MARK,
+ VIO_D2_MARK, PORT132_KEYOUT7_MARK, PORT132_MSIOF2_SS2_MARK, \
+ PORT132_KEYOUT10_MARK, LCD2D12_MARK,
+ VIO_D3_MARK, MSIOF2_TSYNC_MARK, LCD2D13_MARK,
+ VIO_D4_MARK, MSIOF2_TXD_MARK, LCD2D14_MARK,
+ VIO_D5_MARK, MSIOF2_TSCK_MARK, LCD2D15_MARK,
+ VIO_D6_MARK, PORT136_KEYOUT8_MARK, LCD2D16_MARK,
+ VIO_D7_MARK, PORT137_KEYOUT9_MARK, LCD2D17_MARK,
+ VIO_D8_MARK, PORT138_KEYOUT8_MARK, VIO2_D0_MARK, LCD2D6_MARK,
+ VIO_D9_MARK, PORT139_KEYOUT9_MARK, VIO2_D1_MARK, LCD2D7_MARK,
+ VIO_D10_MARK, TPU0TO2_MARK, VIO2_D2_MARK, LCD2D8_MARK,
+ VIO_D11_MARK, TPU0TO3_MARK, VIO2_D3_MARK, LCD2D9_MARK,
+ VIO_D12_MARK, PORT142_KEYOUT10_MARK, VIO2_D4_MARK, LCD2D2_MARK,
+ VIO_D13_MARK, PORT143_KEYOUT11_MARK, PORT143_KEYOUT6_MARK, \
+ VIO2_D5_MARK, LCD2D3_MARK,
+ VIO_D14_MARK, PORT144_KEYOUT7_MARK, VIO2_D6_MARK, LCD2D4_MARK,
+ VIO_D15_MARK, TPU1TO3_MARK, PORT145_LCD2DISP_MARK, \
+ PORT145_LCD2RS_MARK, VIO2_D7_MARK, LCD2D5_MARK,
+ VIO_CLK_MARK, LCD2DCK_MARK, PORT146_LCD2WR__MARK, VIO2_CLK_MARK, \
+ LCD2D18_MARK,
+ VIO_FIELD_MARK, LCD2RD__MARK, VIO2_FIELD_MARK, LCD2D19_MARK,
+ VIO_CKO_MARK,
+ A27_MARK, PORT149_RDWR_MARK, MFG0_IN1_MARK, PORT149_KEYOUT9_MARK,
+ MFG0_IN2_MARK,
+ TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK,
+ TS_SDAT3_MARK, MSIOF2_RSYNC_MARK,
+ TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK,
+ SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK,
+ SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK,
+ SCIFA2_RTS1__MARK, PORT156_MSIOF2_SS2_MARK,
+ SCIFA2_CTS1__MARK, PORT157_MSIOF2_RXD_MARK,
+ DINT__MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK,
+ PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK,
+ PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK,
+ PORT161_SCIFB_CTS__MARK, PORT161_SCIFA5_CTS__MARK,
+ PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK,
+ PORT163_SCIFB_RTS__MARK, PORT163_SCIFA5_RTS__MARK, TPU3TO0_MARK,
+ LCDD0_MARK,
+ LCDD1_MARK, PORT193_SCIFA5_CTS__MARK, BBIF2_TSYNC1_MARK,
+ LCDD2_MARK, PORT194_SCIFA5_RTS__MARK, BBIF2_TSCK1_MARK,
+ LCDD3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK,
+ LCDD4_MARK, PORT196_SCIFA5_TXD_MARK,
+ LCDD5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK,
+ LCDD6_MARK,
+ LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK,
+ LCDD8_MARK, D16_MARK,
+ LCDD9_MARK, D17_MARK,
+ LCDD10_MARK, D18_MARK,
+ LCDD11_MARK, D19_MARK,
+ LCDD12_MARK, D20_MARK,
+ LCDD13_MARK, D21_MARK,
+ LCDD14_MARK, D22_MARK,
+ LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, D23_MARK,
+ LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, D24_MARK,
+ LCDD17_MARK, D25_MARK,
+ LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK,
+ LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK,
+ LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK,
+ LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK,
+ LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK,
+ LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK,
+ LCDDCK_MARK, LCDWR__MARK,
+ LCDRD__MARK, DACK2_MARK, PORT217_LCD2RS_MARK, MSIOF0L_TSYNC_MARK, \
+ VIO2_FIELD3_MARK, PORT217_LCD2DISP_MARK,
+ LCDHSYN_MARK, LCDCS__MARK, LCDCS2__MARK, DACK3_MARK, \
+ PORT218_VIO_CKOR_MARK,
+ LCDDISP_MARK, LCDRS_MARK, PORT219_LCD2WR__MARK, DREQ3_MARK, \
+ MSIOF0L_TSCK_MARK, VIO2_CLK3_MARK, LCD2DCK_2_MARK,
+ LCDVSYN_MARK, LCDVSYN2_MARK,
+ LCDLCLK_MARK, DREQ1_MARK, PORT221_LCD2CS__MARK, PWEN_MARK, \
+ MSIOF0L_RXD_MARK, VIO2_HD3_MARK, PORT221_LCD2HSYN_MARK,
+ LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, \
+ VIO2_VD3_MARK, PORT222_LCD2VSYN_MARK,
+
+ SCIFA1_TXD_MARK, OVCN2_MARK,
+ EXTLP_MARK, SCIFA1_SCK_MARK, PORT226_VIO_CKO2_MARK,
+ SCIFA1_RTS__MARK, IDIN_MARK,
+ SCIFA1_RXD_MARK,
+ SCIFA1_CTS__MARK, MFG1_IN1_MARK,
+ MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK,
+ MSIOF1_TSYNC_MARK, SCIFA2_CTS2__MARK,
+ MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK,
+ MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK,
+ MSIOF1_RSCK_MARK, SCIFA2_RTS2__MARK, VIO2_CLK2_MARK, LCD2D20_MARK,
+ MSIOF1_RSYNC_MARK, MFG1_IN2_MARK, VIO2_VD2_MARK, LCD2D21_MARK,
+ MSIOF1_MCK0_MARK, PORT236_I2C_SDA2_MARK,
+ MSIOF1_MCK1_MARK, PORT237_I2C_SCL2_MARK,
+ MSIOF1_SS1_MARK, VIO2_FIELD2_MARK, LCD2D22_MARK,
+ MSIOF1_SS2_MARK, VIO2_HD2_MARK, LCD2D23_MARK,
+ SCIFA6_TXD_MARK,
+ PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK,
+ PORT242_IRDA_IN_MARK, MFG4_IN2_MARK,
+ PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK,
+ PORT244_SCIFA5_CTS__MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS__MARK, \
+ MSIOF2R_RXD_MARK,
+ PORT245_SCIFA5_RTS__MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS__MARK, \
+ MSIOF2R_TXD_MARK,
+ PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, \
+ TPU1TO0_MARK,
+ PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, \
+ TPU3TO1_MARK,
+ PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, \
+ TPU2TO0_MARK, PORT248_I2C_SCL3_MARK, MSIOF2R_TSCK_MARK,
+ PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_I2C_SDA3_MARK, \
+ MSIOF2R_TSYNC_MARK,
+ SDHICLK0_MARK,
+ SDHICD0_MARK,
+ SDHID0_0_MARK,
+ SDHID0_1_MARK,
+ SDHID0_2_MARK,
+ SDHID0_3_MARK,
+ SDHICMD0_MARK,
+ SDHIWP0_MARK,
+ SDHICLK1_MARK,
+ SDHID1_0_MARK, TS_SPSYNC2_MARK,
+ SDHID1_1_MARK, TS_SDAT2_MARK,
+ SDHID1_2_MARK, TS_SDEN2_MARK,
+ SDHID1_3_MARK, TS_SCK2_MARK,
+ SDHICMD1_MARK,
+ SDHICLK2_MARK,
+ SDHID2_0_MARK, TS_SPSYNC4_MARK,
+ SDHID2_1_MARK, TS_SDAT4_MARK,
+ SDHID2_2_MARK, TS_SDEN4_MARK,
+ SDHID2_3_MARK, TS_SCK4_MARK,
+ SDHICMD2_MARK,
+ MMCCLK0_MARK,
+ MMCD0_0_MARK,
+ MMCD0_1_MARK,
+ MMCD0_2_MARK,
+ MMCD0_3_MARK,
+ MMCD0_4_MARK, TS_SPSYNC5_MARK,
+ MMCD0_5_MARK, TS_SDAT5_MARK,
+ MMCD0_6_MARK, TS_SDEN5_MARK,
+ MMCD0_7_MARK, TS_SCK5_MARK,
+ MMCCMD0_MARK,
+ RESETOUTS__MARK, EXTAL2OUT_MARK,
+ MCP_WAIT__MCP_FRB_MARK,
+ MCP_CKO_MARK, MMCCLK1_MARK,
+ MCP_D15_MCP_NAF15_MARK,
+ MCP_D14_MCP_NAF14_MARK,
+ MCP_D13_MCP_NAF13_MARK,
+ MCP_D12_MCP_NAF12_MARK,
+ MCP_D11_MCP_NAF11_MARK,
+ MCP_D10_MCP_NAF10_MARK,
+ MCP_D9_MCP_NAF9_MARK,
+ MCP_D8_MCP_NAF8_MARK, MMCCMD1_MARK,
+ MCP_D7_MCP_NAF7_MARK, MMCD1_7_MARK,
+
+ MCP_D6_MCP_NAF6_MARK, MMCD1_6_MARK,
+ MCP_D5_MCP_NAF5_MARK, MMCD1_5_MARK,
+ MCP_D4_MCP_NAF4_MARK, MMCD1_4_MARK,
+ MCP_D3_MCP_NAF3_MARK, MMCD1_3_MARK,
+ MCP_D2_MCP_NAF2_MARK, MMCD1_2_MARK,
+ MCP_D1_MCP_NAF1_MARK, MMCD1_1_MARK,
+ MCP_D0_MCP_NAF0_MARK, MMCD1_0_MARK,
+ MCP_NBRSTOUT__MARK,
+ MCP_WE0__MCP_FWE_MARK, MCP_RDWR_MCP_FWE_MARK,
+
+ /* MSEL2 special cases */
+ TSIF2_TS_XX1_MARK,
+ TSIF2_TS_XX2_MARK,
+ TSIF2_TS_XX3_MARK,
+ TSIF2_TS_XX4_MARK,
+ TSIF2_TS_XX5_MARK,
+ TSIF1_TS_XX1_MARK,
+ TSIF1_TS_XX2_MARK,
+ TSIF1_TS_XX3_MARK,
+ TSIF1_TS_XX4_MARK,
+ TSIF1_TS_XX5_MARK,
+ TSIF0_TS_XX1_MARK,
+ TSIF0_TS_XX2_MARK,
+ TSIF0_TS_XX3_MARK,
+ TSIF0_TS_XX4_MARK,
+ TSIF0_TS_XX5_MARK,
+ MST1_TS_XX1_MARK,
+ MST1_TS_XX2_MARK,
+ MST1_TS_XX3_MARK,
+ MST1_TS_XX4_MARK,
+ MST1_TS_XX5_MARK,
+ MST0_TS_XX1_MARK,
+ MST0_TS_XX2_MARK,
+ MST0_TS_XX3_MARK,
+ MST0_TS_XX4_MARK,
+ MST0_TS_XX5_MARK,
+
+ /* MSEL3 special cases */
+ SDHI0_VCCQ_MC0_ON_MARK,
+ SDHI0_VCCQ_MC0_OFF_MARK,
+ DEBUG_MON_VIO_MARK,
+ DEBUG_MON_LCDD_MARK,
+ LCDC_LCDC0_MARK,
+ LCDC_LCDC1_MARK,
+
+ /* MSEL4 special cases */
+ IRQ9_MEM_INT_MARK,
+ IRQ9_MCP_INT_MARK,
+ A11_MARK,
+ KEYOUT8_MARK,
+ TPU4TO3_MARK,
+ RESETA_N_PU_ON_MARK,
+ RESETA_N_PU_OFF_MARK,
+ EDBGREQ_PD_MARK,
+ EDBGREQ_PU_MARK,
+
+ /* Functions with pull-ups */
+ KEYIN0_PU_MARK,
+ KEYIN1_PU_MARK,
+ KEYIN2_PU_MARK,
+ KEYIN3_PU_MARK,
+ KEYIN4_PU_MARK,
+ KEYIN5_PU_MARK,
+ KEYIN6_PU_MARK,
+ KEYIN7_PU_MARK,
+ SDHICD0_PU_MARK,
+ SDHID0_0_PU_MARK,
+ SDHID0_1_PU_MARK,
+ SDHID0_2_PU_MARK,
+ SDHID0_3_PU_MARK,
+ SDHICMD0_PU_MARK,
+ SDHIWP0_PU_MARK,
+ SDHID1_0_PU_MARK,
+ SDHID1_1_PU_MARK,
+ SDHID1_2_PU_MARK,
+ SDHID1_3_PU_MARK,
+ SDHICMD1_PU_MARK,
+ SDHID2_0_PU_MARK,
+ SDHID2_1_PU_MARK,
+ SDHID2_2_PU_MARK,
+ SDHID2_3_PU_MARK,
+ SDHICMD2_PU_MARK,
+ MMCCMD0_PU_MARK,
+ MMCCMD1_PU_MARK,
+ MMCD0_0_PU_MARK,
+ MMCD0_1_PU_MARK,
+ MMCD0_2_PU_MARK,
+ MMCD0_3_PU_MARK,
+ MMCD0_4_PU_MARK,
+ MMCD0_5_PU_MARK,
+ MMCD0_6_PU_MARK,
+ MMCD0_7_PU_MARK,
+ FSIBISLD_PU_MARK,
+ FSIACK_PU_MARK,
+ FSIAILR_PU_MARK,
+ FSIAIBT_PU_MARK,
+ FSIAISLD_PU_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+
+ /* Table 25-1 (I/O and Pull U/D) */
+ PORT_DATA_I_PD(0),
+ PORT_DATA_I_PU(1),
+ PORT_DATA_I_PU(2),
+ PORT_DATA_I_PU(3),
+ PORT_DATA_I_PU(4),
+ PORT_DATA_I_PU(5),
+ PORT_DATA_I_PU(6),
+ PORT_DATA_I_PU(7),
+ PORT_DATA_I_PU(8),
+ PORT_DATA_I_PD(9),
+ PORT_DATA_I_PD(10),
+ PORT_DATA_I_PU_PD(11),
+ PORT_DATA_IO_PU_PD(12),
+ PORT_DATA_IO_PU_PD(13),
+ PORT_DATA_IO_PU_PD(14),
+ PORT_DATA_IO_PU_PD(15),
+ PORT_DATA_IO_PD(16),
+ PORT_DATA_IO_PD(17),
+ PORT_DATA_IO_PU(18),
+ PORT_DATA_IO_PU(19),
+ PORT_DATA_O(20),
+ PORT_DATA_O(21),
+ PORT_DATA_O(22),
+ PORT_DATA_O(23),
+ PORT_DATA_O(24),
+ PORT_DATA_I_PD(25),
+ PORT_DATA_I_PD(26),
+ PORT_DATA_IO_PU(27),
+ PORT_DATA_IO_PU(28),
+ PORT_DATA_IO_PD(29),
+ PORT_DATA_IO_PD(30),
+ PORT_DATA_IO_PU(31),
+ PORT_DATA_IO_PD(32),
+ PORT_DATA_I_PU_PD(33),
+ PORT_DATA_IO_PD(34),
+ PORT_DATA_I_PU_PD(35),
+ PORT_DATA_IO_PD(36),
+ PORT_DATA_IO(37),
+ PORT_DATA_O(38),
+ PORT_DATA_I_PU(39),
+ PORT_DATA_I_PU_PD(40),
+ PORT_DATA_O(41),
+ PORT_DATA_IO_PD(42),
+ PORT_DATA_IO_PU_PD(43),
+ PORT_DATA_IO_PU_PD(44),
+ PORT_DATA_IO_PD(45),
+ PORT_DATA_IO_PD(46),
+ PORT_DATA_IO_PD(47),
+ PORT_DATA_I_PD(48),
+ PORT_DATA_IO_PU_PD(49),
+ PORT_DATA_IO_PD(50),
+
+ PORT_DATA_IO_PD(51),
+ PORT_DATA_O(52),
+ PORT_DATA_IO_PU_PD(53),
+ PORT_DATA_IO_PU_PD(54),
+ PORT_DATA_IO_PD(55),
+ PORT_DATA_I_PU_PD(56),
+ PORT_DATA_IO(57),
+ PORT_DATA_IO(58),
+ PORT_DATA_IO(59),
+ PORT_DATA_IO(60),
+ PORT_DATA_IO(61),
+ PORT_DATA_IO_PD(62),
+ PORT_DATA_IO_PD(63),
+ PORT_DATA_IO_PU_PD(64),
+ PORT_DATA_IO_PD(65),
+ PORT_DATA_IO_PU_PD(66),
+ PORT_DATA_IO_PU_PD(67),
+ PORT_DATA_IO_PU_PD(68),
+ PORT_DATA_IO_PU_PD(69),
+ PORT_DATA_IO_PU_PD(70),
+ PORT_DATA_IO_PU_PD(71),
+ PORT_DATA_IO_PU_PD(72),
+ PORT_DATA_I_PU_PD(73),
+ PORT_DATA_IO_PU(74),
+ PORT_DATA_IO_PU(75),
+ PORT_DATA_IO_PU(76),
+ PORT_DATA_IO_PU(77),
+ PORT_DATA_IO_PU(78),
+ PORT_DATA_IO_PU(79),
+ PORT_DATA_IO_PU(80),
+ PORT_DATA_IO_PU(81),
+ PORT_DATA_IO_PU(82),
+ PORT_DATA_IO_PU(83),
+ PORT_DATA_IO_PU(84),
+ PORT_DATA_IO_PU(85),
+ PORT_DATA_IO_PU(86),
+ PORT_DATA_IO_PU(87),
+ PORT_DATA_IO_PU(88),
+ PORT_DATA_IO_PU(89),
+ PORT_DATA_O(90),
+ PORT_DATA_IO_PU(91),
+ PORT_DATA_O(92),
+ PORT_DATA_IO_PU(93),
+ PORT_DATA_O(94),
+ PORT_DATA_I_PU_PD(95),
+ PORT_DATA_IO(96),
+ PORT_DATA_IO(97),
+ PORT_DATA_IO(98),
+ PORT_DATA_I_PU(99),
+ PORT_DATA_O(100),
+ PORT_DATA_O(101),
+ PORT_DATA_I_PU(102),
+ PORT_DATA_IO_PD(103),
+ PORT_DATA_I_PU_PD(104),
+ PORT_DATA_I_PD(105),
+ PORT_DATA_I_PD(106),
+ PORT_DATA_I_PU_PD(107),
+ PORT_DATA_I_PU_PD(108),
+ PORT_DATA_IO_PD(109),
+ PORT_DATA_IO_PD(110),
+ PORT_DATA_IO_PU_PD(111),
+ PORT_DATA_IO_PU_PD(112),
+ PORT_DATA_IO_PU_PD(113),
+ PORT_DATA_IO_PD(114),
+ PORT_DATA_IO_PU(115),
+ PORT_DATA_IO_PU(116),
+ PORT_DATA_IO_PU_PD(117),
+ PORT_DATA_IO_PU_PD(118),
+ PORT_DATA_IO_PD(128),
+
+ PORT_DATA_IO_PD(129),
+ PORT_DATA_IO_PU_PD(130),
+ PORT_DATA_IO_PD(131),
+ PORT_DATA_IO_PD(132),
+ PORT_DATA_IO_PD(133),
+ PORT_DATA_IO_PU_PD(134),
+ PORT_DATA_IO_PU_PD(135),
+ PORT_DATA_IO_PU_PD(136),
+ PORT_DATA_IO_PU_PD(137),
+ PORT_DATA_IO_PD(138),
+ PORT_DATA_IO_PD(139),
+ PORT_DATA_IO_PD(140),
+ PORT_DATA_IO_PD(141),
+ PORT_DATA_IO_PD(142),
+ PORT_DATA_IO_PD(143),
+ PORT_DATA_IO_PU_PD(144),
+ PORT_DATA_IO_PD(145),
+ PORT_DATA_IO_PU_PD(146),
+ PORT_DATA_IO_PU_PD(147),
+ PORT_DATA_IO_PU_PD(148),
+ PORT_DATA_IO_PU_PD(149),
+ PORT_DATA_I_PU_PD(150),
+ PORT_DATA_IO_PU_PD(151),
+ PORT_DATA_IO_PU_PD(152),
+ PORT_DATA_IO_PD(153),
+ PORT_DATA_IO_PD(154),
+ PORT_DATA_I_PU_PD(155),
+ PORT_DATA_IO_PU_PD(156),
+ PORT_DATA_I_PD(157),
+ PORT_DATA_IO_PD(158),
+ PORT_DATA_IO_PU_PD(159),
+ PORT_DATA_IO_PU_PD(160),
+ PORT_DATA_I_PU_PD(161),
+ PORT_DATA_I_PU_PD(162),
+ PORT_DATA_IO_PU_PD(163),
+ PORT_DATA_I_PU_PD(164),
+ PORT_DATA_IO_PD(192),
+ PORT_DATA_IO_PU_PD(193),
+ PORT_DATA_IO_PD(194),
+ PORT_DATA_IO_PU_PD(195),
+ PORT_DATA_IO_PD(196),
+ PORT_DATA_IO_PD(197),
+ PORT_DATA_IO_PD(198),
+ PORT_DATA_IO_PD(199),
+ PORT_DATA_IO_PU_PD(200),
+ PORT_DATA_IO_PU_PD(201),
+ PORT_DATA_IO_PU_PD(202),
+ PORT_DATA_IO_PU_PD(203),
+ PORT_DATA_IO_PU_PD(204),
+ PORT_DATA_IO_PU_PD(205),
+ PORT_DATA_IO_PU_PD(206),
+ PORT_DATA_IO_PD(207),
+ PORT_DATA_IO_PD(208),
+ PORT_DATA_IO_PD(209),
+ PORT_DATA_IO_PD(210),
+ PORT_DATA_IO_PD(211),
+ PORT_DATA_IO_PD(212),
+ PORT_DATA_IO_PD(213),
+ PORT_DATA_IO_PU_PD(214),
+ PORT_DATA_IO_PU_PD(215),
+ PORT_DATA_IO_PD(216),
+ PORT_DATA_IO_PD(217),
+ PORT_DATA_O(218),
+ PORT_DATA_IO_PD(219),
+ PORT_DATA_IO_PD(220),
+ PORT_DATA_IO_PU_PD(221),
+ PORT_DATA_IO_PU_PD(222),
+ PORT_DATA_I_PU_PD(223),
+ PORT_DATA_I_PU_PD(224),
+
+ PORT_DATA_IO_PU_PD(225),
+ PORT_DATA_O(226),
+ PORT_DATA_IO_PU_PD(227),
+ PORT_DATA_I_PU_PD(228),
+ PORT_DATA_I_PD(229),
+ PORT_DATA_IO(230),
+ PORT_DATA_IO_PU_PD(231),
+ PORT_DATA_IO_PU_PD(232),
+ PORT_DATA_I_PU_PD(233),
+ PORT_DATA_IO_PU_PD(234),
+ PORT_DATA_IO_PU_PD(235),
+ PORT_DATA_IO_PU_PD(236),
+ PORT_DATA_IO_PD(237),
+ PORT_DATA_IO_PU_PD(238),
+ PORT_DATA_IO_PU_PD(239),
+ PORT_DATA_IO_PU_PD(240),
+ PORT_DATA_O(241),
+ PORT_DATA_I_PD(242),
+ PORT_DATA_IO_PU_PD(243),
+ PORT_DATA_IO_PU_PD(244),
+ PORT_DATA_IO_PU_PD(245),
+ PORT_DATA_IO_PU_PD(246),
+ PORT_DATA_IO_PU_PD(247),
+ PORT_DATA_IO_PU_PD(248),
+ PORT_DATA_IO_PU_PD(249),
+ PORT_DATA_IO_PU_PD(250),
+ PORT_DATA_IO_PU_PD(251),
+ PORT_DATA_IO_PU_PD(252),
+ PORT_DATA_IO_PU_PD(253),
+ PORT_DATA_IO_PU_PD(254),
+ PORT_DATA_IO_PU_PD(255),
+ PORT_DATA_IO_PU_PD(256),
+ PORT_DATA_IO_PU_PD(257),
+ PORT_DATA_IO_PU_PD(258),
+ PORT_DATA_IO_PU_PD(259),
+ PORT_DATA_IO_PU_PD(260),
+ PORT_DATA_IO_PU_PD(261),
+ PORT_DATA_IO_PU_PD(262),
+ PORT_DATA_IO_PU_PD(263),
+ PORT_DATA_IO_PU_PD(264),
+ PORT_DATA_IO_PU_PD(265),
+ PORT_DATA_IO_PU_PD(266),
+ PORT_DATA_IO_PU_PD(267),
+ PORT_DATA_IO_PU_PD(268),
+ PORT_DATA_IO_PU_PD(269),
+ PORT_DATA_IO_PU_PD(270),
+ PORT_DATA_IO_PU_PD(271),
+ PORT_DATA_IO_PU_PD(272),
+ PORT_DATA_IO_PU_PD(273),
+ PORT_DATA_IO_PU_PD(274),
+ PORT_DATA_IO_PU_PD(275),
+ PORT_DATA_IO_PU_PD(276),
+ PORT_DATA_IO_PU_PD(277),
+ PORT_DATA_IO_PU_PD(278),
+ PORT_DATA_IO_PU_PD(279),
+ PORT_DATA_IO_PU_PD(280),
+ PORT_DATA_O(281),
+ PORT_DATA_O(282),
+ PORT_DATA_I_PU(288),
+ PORT_DATA_IO_PU_PD(289),
+ PORT_DATA_IO_PU_PD(290),
+ PORT_DATA_IO_PU_PD(291),
+ PORT_DATA_IO_PU_PD(292),
+ PORT_DATA_IO_PU_PD(293),
+ PORT_DATA_IO_PU_PD(294),
+ PORT_DATA_IO_PU_PD(295),
+ PORT_DATA_IO_PU_PD(296),
+ PORT_DATA_IO_PU_PD(297),
+ PORT_DATA_IO_PU_PD(298),
+
+ PORT_DATA_IO_PU_PD(299),
+ PORT_DATA_IO_PU_PD(300),
+ PORT_DATA_IO_PU_PD(301),
+ PORT_DATA_IO_PU_PD(302),
+ PORT_DATA_IO_PU_PD(303),
+ PORT_DATA_IO_PU_PD(304),
+ PORT_DATA_IO_PU_PD(305),
+ PORT_DATA_O(306),
+ PORT_DATA_O(307),
+ PORT_DATA_I_PU(308),
+ PORT_DATA_O(309),
+
+ /* Table 25-1 (Function 0-7) */
+ PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
+ PINMUX_DATA(GPI0_MARK, PORT1_FN1),
+ PINMUX_DATA(GPI1_MARK, PORT2_FN1),
+ PINMUX_DATA(GPI2_MARK, PORT3_FN1),
+ PINMUX_DATA(GPI3_MARK, PORT4_FN1),
+ PINMUX_DATA(GPI4_MARK, PORT5_FN1),
+ PINMUX_DATA(GPI5_MARK, PORT6_FN1),
+ PINMUX_DATA(GPI6_MARK, PORT7_FN1),
+ PINMUX_DATA(GPI7_MARK, PORT8_FN1),
+ PINMUX_DATA(SCIFA7_RXD_MARK, PORT12_FN2),
+ PINMUX_DATA(SCIFA7_CTS__MARK, PORT13_FN2),
+ PINMUX_DATA(GPO7_MARK, PORT14_FN1), \
+ PINMUX_DATA(MFG0_OUT2_MARK, PORT14_FN4),
+ PINMUX_DATA(GPO6_MARK, PORT15_FN1), \
+ PINMUX_DATA(MFG1_OUT2_MARK, PORT15_FN4),
+ PINMUX_DATA(GPO5_MARK, PORT16_FN1), \
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), \
+ PINMUX_DATA(FSICOSLDT3_MARK, PORT16_FN3), \
+ PINMUX_DATA(PORT16_VIO_CKOR_MARK, PORT16_FN4),
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2),
+ PINMUX_DATA(SCIFA7_TXD_MARK, PORT18_FN2),
+ PINMUX_DATA(SCIFA7_RTS__MARK, PORT19_FN2), \
+ PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3),
+ PINMUX_DATA(GPO0_MARK, PORT20_FN1),
+ PINMUX_DATA(GPO1_MARK, PORT21_FN1),
+ PINMUX_DATA(GPO2_MARK, PORT22_FN1), \
+ PINMUX_DATA(STATUS0_MARK, PORT22_FN2),
+ PINMUX_DATA(GPO3_MARK, PORT23_FN1), \
+ PINMUX_DATA(STATUS1_MARK, PORT23_FN2),
+ PINMUX_DATA(GPO4_MARK, PORT24_FN1), \
+ PINMUX_DATA(STATUS2_MARK, PORT24_FN2),
+ PINMUX_DATA(VINT_MARK, PORT25_FN1),
+ PINMUX_DATA(TCKON_MARK, PORT26_FN1),
+ PINMUX_DATA(XDVFS1_MARK, PORT27_FN1), \
+ PINMUX_DATA(PORT27_I2C_SCL2_MARK, PORT27_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT27_I2C_SCL3_MARK, PORT27_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_1), \
+ PINMUX_DATA(MFG0_OUT1_MARK, PORT27_FN4), \
+ PINMUX_DATA(PORT27_IROUT_MARK, PORT27_FN7),
+ PINMUX_DATA(XDVFS2_MARK, PORT28_FN1), \
+ PINMUX_DATA(PORT28_I2C_SDA2_MARK, PORT28_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT28_I2C_SDA3_MARK, PORT28_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_1), \
+ PINMUX_DATA(PORT28_TPU1TO1_MARK, PORT28_FN7),
+ PINMUX_DATA(SIM_RST_MARK, PORT29_FN1), \
+ PINMUX_DATA(PORT29_TPU1TO1_MARK, PORT29_FN4),
+ PINMUX_DATA(SIM_CLK_MARK, PORT30_FN1), \
+ PINMUX_DATA(PORT30_VIO_CKOR_MARK, PORT30_FN4),
+ PINMUX_DATA(SIM_D_MARK, PORT31_FN1), \
+ PINMUX_DATA(PORT31_IROUT_MARK, PORT31_FN4),
+ PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2),
+ PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), \
+ PINMUX_DATA(XWUP_MARK, PORT33_FN3),
+ PINMUX_DATA(SCIFA4_RTS__MARK, PORT34_FN2),
+ PINMUX_DATA(SCIFA4_CTS__MARK, PORT35_FN2),
+ PINMUX_DATA(FSIBOBT_MARK, PORT36_FN1), \
+ PINMUX_DATA(FSIBIBT_MARK, PORT36_FN2),
+ PINMUX_DATA(FSIBOLR_MARK, PORT37_FN1), \
+ PINMUX_DATA(FSIBILR_MARK, PORT37_FN2),
+ PINMUX_DATA(FSIBOSLD_MARK, PORT38_FN1),
+ PINMUX_DATA(FSIBISLD_MARK, PORT39_FN1),
+ PINMUX_DATA(VACK_MARK, PORT40_FN1),
+ PINMUX_DATA(XTAL1L_MARK, PORT41_FN1),
+ PINMUX_DATA(SCIFA0_RTS__MARK, PORT42_FN2), \
+ PINMUX_DATA(FSICOSLDT2_MARK, PORT42_FN3),
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2),
+ PINMUX_DATA(SCIFA0_CTS__MARK, PORT44_FN2), \
+ PINMUX_DATA(FSICOSLDT1_MARK, PORT44_FN3),
+ PINMUX_DATA(FSICOBT_MARK, PORT45_FN1), \
+ PINMUX_DATA(FSICIBT_MARK, PORT45_FN2), \
+ PINMUX_DATA(FSIDOBT_MARK, PORT45_FN3), \
+ PINMUX_DATA(FSIDIBT_MARK, PORT45_FN4),
+ PINMUX_DATA(FSICOLR_MARK, PORT46_FN1), \
+ PINMUX_DATA(FSICILR_MARK, PORT46_FN2), \
+ PINMUX_DATA(FSIDOLR_MARK, PORT46_FN3), \
+ PINMUX_DATA(FSIDILR_MARK, PORT46_FN4),
+ PINMUX_DATA(FSICOSLD_MARK, PORT47_FN1), \
+ PINMUX_DATA(PORT47_FSICSPDIF_MARK, PORT47_FN2),
+ PINMUX_DATA(FSICISLD_MARK, PORT48_FN1), \
+ PINMUX_DATA(FSIDISLD_MARK, PORT48_FN3),
+ PINMUX_DATA(FSIACK_MARK, PORT49_FN1), \
+ PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN4), \
+ PINMUX_DATA(FSIAOMC_MARK, PORT49_FN5),
+ PINMUX_DATA(FSIAOLR_MARK, PORT50_FN1), \
+ PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), \
+ PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), \
+ PINMUX_DATA(FSIAILR_MARK, PORT50_FN5),
+
+ PINMUX_DATA(FSIAOBT_MARK, PORT51_FN1), \
+ PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), \
+ PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), \
+ PINMUX_DATA(FSIAIBT_MARK, PORT51_FN5),
+ PINMUX_DATA(FSIAOSLD_MARK, PORT52_FN1), \
+ PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2),
+ PINMUX_DATA(FSIASPDIF_MARK, PORT53_FN1), \
+ PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), \
+ PINMUX_DATA(FSIBSPDIF_MARK, PORT53_FN5), \
+ PINMUX_DATA(PORT53_FSICSPDIF_MARK, PORT53_FN6),
+ PINMUX_DATA(FSIBCK_MARK, PORT54_FN1), \
+ PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), \
+ PINMUX_DATA(FSIBOMC_MARK, PORT54_FN5), \
+ PINMUX_DATA(FSICCK_MARK, PORT54_FN6), \
+ PINMUX_DATA(FSICOMC_MARK, PORT54_FN7),
+ PINMUX_DATA(FSIAISLD_MARK, PORT55_FN1), \
+ PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3),
+ PINMUX_DATA(A0_MARK, PORT57_FN1), \
+ PINMUX_DATA(BS__MARK, PORT57_FN2),
+ PINMUX_DATA(A12_MARK, PORT58_FN1), \
+ PINMUX_DATA(PORT58_KEYOUT7_MARK, PORT58_FN2), \
+ PINMUX_DATA(TPU4TO2_MARK, PORT58_FN4),
+ PINMUX_DATA(A13_MARK, PORT59_FN1), \
+ PINMUX_DATA(PORT59_KEYOUT6_MARK, PORT59_FN2), \
+ PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4),
+ PINMUX_DATA(A14_MARK, PORT60_FN1), \
+ PINMUX_DATA(KEYOUT5_MARK, PORT60_FN2),
+ PINMUX_DATA(A15_MARK, PORT61_FN1), \
+ PINMUX_DATA(KEYOUT4_MARK, PORT61_FN2),
+ PINMUX_DATA(A16_MARK, PORT62_FN1), \
+ PINMUX_DATA(KEYOUT3_MARK, PORT62_FN2), \
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A17_MARK, PORT63_FN1), \
+ PINMUX_DATA(KEYOUT2_MARK, PORT63_FN2), \
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A18_MARK, PORT64_FN1), \
+ PINMUX_DATA(KEYOUT1_MARK, PORT64_FN2), \
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A19_MARK, PORT65_FN1), \
+ PINMUX_DATA(KEYOUT0_MARK, PORT65_FN2), \
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A20_MARK, PORT66_FN1), \
+ PINMUX_DATA(KEYIN0_MARK, PORT66_FN2), \
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A21_MARK, PORT67_FN1), \
+ PINMUX_DATA(KEYIN1_MARK, PORT67_FN2), \
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A22_MARK, PORT68_FN1), \
+ PINMUX_DATA(KEYIN2_MARK, PORT68_FN2), \
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A23_MARK, PORT69_FN1), \
+ PINMUX_DATA(KEYIN3_MARK, PORT69_FN2), \
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A24_MARK, PORT70_FN1), \
+ PINMUX_DATA(KEYIN4_MARK, PORT70_FN2), \
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A25_MARK, PORT71_FN1), \
+ PINMUX_DATA(KEYIN5_MARK, PORT71_FN2), \
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A26_MARK, PORT72_FN1), \
+ PINMUX_DATA(KEYIN6_MARK, PORT72_FN2),
+ PINMUX_DATA(KEYIN7_MARK, PORT73_FN2),
+ PINMUX_DATA(D0_NAF0_MARK, PORT74_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT75_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT76_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT77_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT78_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT79_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT80_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT81_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT82_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT83_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT84_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT85_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT86_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT87_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT88_FN1),
+ PINMUX_DATA(D15_NAF15_MARK, PORT89_FN1),
+ PINMUX_DATA(CS4__MARK, PORT90_FN1),
+ PINMUX_DATA(CS5A__MARK, PORT91_FN1), \
+ PINMUX_DATA(PORT91_RDWR_MARK, PORT91_FN2),
+ PINMUX_DATA(CS5B__MARK, PORT92_FN1), \
+ PINMUX_DATA(FCE1__MARK, PORT92_FN2),
+ PINMUX_DATA(CS6B__MARK, PORT93_FN1), \
+ PINMUX_DATA(DACK0_MARK, PORT93_FN4),
+ PINMUX_DATA(FCE0__MARK, PORT94_FN1), \
+ PINMUX_DATA(CS6A__MARK, PORT94_FN2),
+ PINMUX_DATA(WAIT__MARK, PORT95_FN1), \
+ PINMUX_DATA(DREQ0_MARK, PORT95_FN2),
+ PINMUX_DATA(RD__FSC_MARK, PORT96_FN1),
+ PINMUX_DATA(WE0__FWE_MARK, PORT97_FN1), \
+ PINMUX_DATA(RDWR_FWE_MARK, PORT97_FN2),
+ PINMUX_DATA(WE1__MARK, PORT98_FN1),
+ PINMUX_DATA(FRB_MARK, PORT99_FN1),
+ PINMUX_DATA(CKO_MARK, PORT100_FN1),
+ PINMUX_DATA(NBRSTOUT__MARK, PORT101_FN1),
+ PINMUX_DATA(NBRST__MARK, PORT102_FN1),
+ PINMUX_DATA(BBIF2_TXD_MARK, PORT103_FN3),
+ PINMUX_DATA(BBIF2_RXD_MARK, PORT104_FN3),
+ PINMUX_DATA(BBIF2_SYNC_MARK, PORT105_FN3),
+ PINMUX_DATA(BBIF2_SCK_MARK, PORT106_FN3),
+ PINMUX_DATA(SCIFA3_CTS__MARK, PORT107_FN3), \
+ PINMUX_DATA(MFG3_IN2_MARK, PORT107_FN4),
+ PINMUX_DATA(SCIFA3_RXD_MARK, PORT108_FN3), \
+ PINMUX_DATA(MFG3_IN1_MARK, PORT108_FN4),
+ PINMUX_DATA(BBIF1_SS2_MARK, PORT109_FN2), \
+ PINMUX_DATA(SCIFA3_RTS__MARK, PORT109_FN3), \
+ PINMUX_DATA(MFG3_OUT1_MARK, PORT109_FN4),
+ PINMUX_DATA(SCIFA3_TXD_MARK, PORT110_FN3),
+ PINMUX_DATA(HSI_RX_DATA_MARK, PORT111_FN1), \
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT111_FN3),
+ PINMUX_DATA(HSI_TX_WAKE_MARK, PORT112_FN1), \
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT112_FN3),
+ PINMUX_DATA(HSI_TX_DATA_MARK, PORT113_FN1), \
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT113_FN3),
+ PINMUX_DATA(HSI_TX_READY_MARK, PORT114_FN1), \
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT114_FN3),
+ PINMUX_DATA(HSI_RX_READY_MARK, PORT115_FN1), \
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT115_FN3), \
+ PINMUX_DATA(PORT115_I2C_SCL2_MARK, PORT115_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT115_I2C_SCL3_MARK, PORT115_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_WAKE_MARK, PORT116_FN1), \
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT116_FN3), \
+ PINMUX_DATA(PORT116_I2C_SDA2_MARK, PORT116_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT116_I2C_SDA3_MARK, PORT116_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_FLAG_MARK, PORT117_FN1), \
+ PINMUX_DATA(BBIF1_SS1_MARK, PORT117_FN2), \
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT117_FN3),
+ PINMUX_DATA(HSI_TX_FLAG_MARK, PORT118_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), \
+ PINMUX_DATA(PORT128_LCD2VSYN_MARK, PORT128_FN4, MSEL3CR_MSEL2_0), \
+ PINMUX_DATA(VIO2_VD_MARK, PORT128_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D0_MARK, PORT128_FN7),
+
+ PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), \
+ PINMUX_DATA(PORT129_LCD2HSYN_MARK, PORT129_FN4), \
+ PINMUX_DATA(PORT129_LCD2CS__MARK, PORT129_FN5), \
+ PINMUX_DATA(VIO2_HD_MARK, PORT129_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D1_MARK, PORT129_FN7),
+ PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), \
+ PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_1), \
+ PINMUX_DATA(LCD2D10_MARK, PORT130_FN7),
+ PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), \
+ PINMUX_DATA(PORT131_KEYOUT6_MARK, PORT131_FN2), \
+ PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), \
+ PINMUX_DATA(PORT131_KEYOUT11_MARK, PORT131_FN4), \
+ PINMUX_DATA(LCD2D11_MARK, PORT131_FN7),
+ PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), \
+ PINMUX_DATA(PORT132_KEYOUT7_MARK, PORT132_FN2), \
+ PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), \
+ PINMUX_DATA(PORT132_KEYOUT10_MARK, PORT132_FN4), \
+ PINMUX_DATA(LCD2D12_MARK, PORT132_FN7),
+ PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), \
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT133_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D13_MARK, PORT133_FN7),
+ PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), \
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT134_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D14_MARK, PORT134_FN7),
+ PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), \
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT135_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D15_MARK, PORT135_FN7),
+ PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), \
+ PINMUX_DATA(PORT136_KEYOUT8_MARK, PORT136_FN2), \
+ PINMUX_DATA(LCD2D16_MARK, PORT136_FN7),
+ PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), \
+ PINMUX_DATA(PORT137_KEYOUT9_MARK, PORT137_FN2), \
+ PINMUX_DATA(LCD2D17_MARK, PORT137_FN7),
+ PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), \
+ PINMUX_DATA(PORT138_KEYOUT8_MARK, PORT138_FN2), \
+ PINMUX_DATA(VIO2_D0_MARK, PORT138_FN6), \
+ PINMUX_DATA(LCD2D6_MARK, PORT138_FN7),
+ PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), \
+ PINMUX_DATA(PORT139_KEYOUT9_MARK, PORT139_FN2), \
+ PINMUX_DATA(VIO2_D1_MARK, PORT139_FN6), \
+ PINMUX_DATA(LCD2D7_MARK, PORT139_FN7),
+ PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), \
+ PINMUX_DATA(TPU0TO2_MARK, PORT140_FN4), \
+ PINMUX_DATA(VIO2_D2_MARK, PORT140_FN6), \
+ PINMUX_DATA(LCD2D8_MARK, PORT140_FN7),
+ PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), \
+ PINMUX_DATA(TPU0TO3_MARK, PORT141_FN4), \
+ PINMUX_DATA(VIO2_D3_MARK, PORT141_FN6), \
+ PINMUX_DATA(LCD2D9_MARK, PORT141_FN7),
+ PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), \
+ PINMUX_DATA(PORT142_KEYOUT10_MARK, PORT142_FN2), \
+ PINMUX_DATA(VIO2_D4_MARK, PORT142_FN6), \
+ PINMUX_DATA(LCD2D2_MARK, PORT142_FN7),
+ PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), \
+ PINMUX_DATA(PORT143_KEYOUT11_MARK, PORT143_FN2), \
+ PINMUX_DATA(PORT143_KEYOUT6_MARK, PORT143_FN3), \
+ PINMUX_DATA(VIO2_D5_MARK, PORT143_FN6), \
+ PINMUX_DATA(LCD2D3_MARK, PORT143_FN7),
+ PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), \
+ PINMUX_DATA(PORT144_KEYOUT7_MARK, PORT144_FN2), \
+ PINMUX_DATA(VIO2_D6_MARK, PORT144_FN6), \
+ PINMUX_DATA(LCD2D4_MARK, PORT144_FN7),
+ PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), \
+ PINMUX_DATA(TPU1TO3_MARK, PORT145_FN3), \
+ PINMUX_DATA(PORT145_LCD2DISP_MARK, PORT145_FN4), \
+ PINMUX_DATA(PORT145_LCD2RS_MARK, PORT145_FN5), \
+ PINMUX_DATA(VIO2_D7_MARK, PORT145_FN6), \
+ PINMUX_DATA(LCD2D5_MARK, PORT145_FN7),
+ PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), \
+ PINMUX_DATA(LCD2DCK_MARK, PORT146_FN4), \
+ PINMUX_DATA(PORT146_LCD2WR__MARK, PORT146_FN5), \
+ PINMUX_DATA(VIO2_CLK_MARK, PORT146_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D18_MARK, PORT146_FN7),
+ PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), \
+ PINMUX_DATA(LCD2RD__MARK, PORT147_FN4), \
+ PINMUX_DATA(VIO2_FIELD_MARK, PORT147_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D19_MARK, PORT147_FN7),
+ PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1),
+ PINMUX_DATA(A27_MARK, PORT149_FN1), \
+ PINMUX_DATA(PORT149_RDWR_MARK, PORT149_FN2), \
+ PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), \
+ PINMUX_DATA(PORT149_KEYOUT9_MARK, PORT149_FN4),
+ PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN3),
+ PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN4), \
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN5),
+ PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN4), \
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN5),
+ PINMUX_DATA(TPU1TO2_MARK, PORT153_FN3), \
+ PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN4), \
+ PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN5),
+ PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN5),
+ PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN5),
+ PINMUX_DATA(SCIFA2_RTS1__MARK, PORT156_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN5),
+ PINMUX_DATA(SCIFA2_CTS1__MARK, PORT157_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN5, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_0),
+ PINMUX_DATA(DINT__MARK, PORT158_FN1), \
+ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(TS_SCK3_MARK, PORT158_FN4),
+ PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(NMI_MARK, PORT159_FN3),
+ PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT161_SCIFB_CTS__MARK, PORT161_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT161_SCIFA5_CTS__MARK, PORT161_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT163_SCIFB_RTS__MARK, PORT163_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT163_SCIFA5_RTS__MARK, PORT163_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5),
+ PINMUX_DATA(LCDD0_MARK, PORT192_FN1),
+ PINMUX_DATA(LCDD1_MARK, PORT193_FN1), \
+ PINMUX_DATA(PORT193_SCIFA5_CTS__MARK, PORT193_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN5),
+ PINMUX_DATA(LCDD2_MARK, PORT194_FN1), \
+ PINMUX_DATA(PORT194_SCIFA5_RTS__MARK, PORT194_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN5),
+ PINMUX_DATA(LCDD3_MARK, PORT195_FN1), \
+ PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN5),
+ PINMUX_DATA(LCDD4_MARK, PORT196_FN1), \
+ PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1),
+ PINMUX_DATA(LCDD5_MARK, PORT197_FN1), \
+ PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN5), \
+ PINMUX_DATA(TPU2TO1_MARK, PORT197_FN7),
+ PINMUX_DATA(LCDD6_MARK, PORT198_FN1),
+ PINMUX_DATA(LCDD7_MARK, PORT199_FN1), \
+ PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), \
+ PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN5),
+ PINMUX_DATA(LCDD8_MARK, PORT200_FN1), \
+ PINMUX_DATA(D16_MARK, PORT200_FN6),
+ PINMUX_DATA(LCDD9_MARK, PORT201_FN1), \
+ PINMUX_DATA(D17_MARK, PORT201_FN6),
+ PINMUX_DATA(LCDD10_MARK, PORT202_FN1), \
+ PINMUX_DATA(D18_MARK, PORT202_FN6),
+ PINMUX_DATA(LCDD11_MARK, PORT203_FN1), \
+ PINMUX_DATA(D19_MARK, PORT203_FN6),
+ PINMUX_DATA(LCDD12_MARK, PORT204_FN1), \
+ PINMUX_DATA(D20_MARK, PORT204_FN6),
+ PINMUX_DATA(LCDD13_MARK, PORT205_FN1), \
+ PINMUX_DATA(D21_MARK, PORT205_FN6),
+ PINMUX_DATA(LCDD14_MARK, PORT206_FN1), \
+ PINMUX_DATA(D22_MARK, PORT206_FN6),
+ PINMUX_DATA(LCDD15_MARK, PORT207_FN1), \
+ PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D23_MARK, PORT207_FN6),
+ PINMUX_DATA(LCDD16_MARK, PORT208_FN1), \
+ PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D24_MARK, PORT208_FN6),
+ PINMUX_DATA(LCDD17_MARK, PORT209_FN1), \
+ PINMUX_DATA(D25_MARK, PORT209_FN6),
+ PINMUX_DATA(LCDD18_MARK, PORT210_FN1), \
+ PINMUX_DATA(DREQ2_MARK, PORT210_FN2), \
+ PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D26_MARK, PORT210_FN6),
+ PINMUX_DATA(LCDD19_MARK, PORT211_FN1), \
+ PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D27_MARK, PORT211_FN6),
+ PINMUX_DATA(LCDD20_MARK, PORT212_FN1), \
+ PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D28_MARK, PORT212_FN6),
+ PINMUX_DATA(LCDD21_MARK, PORT213_FN1), \
+ PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D29_MARK, PORT213_FN6),
+ PINMUX_DATA(LCDD22_MARK, PORT214_FN1), \
+ PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), \
+ PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D30_MARK, PORT214_FN6),
+ PINMUX_DATA(LCDD23_MARK, PORT215_FN1), \
+ PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), \
+ PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D31_MARK, PORT215_FN6),
+ PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), \
+ PINMUX_DATA(LCDWR__MARK, PORT216_FN2),
+ PINMUX_DATA(LCDRD__MARK, PORT217_FN1), \
+ PINMUX_DATA(DACK2_MARK, PORT217_FN2), \
+ PINMUX_DATA(PORT217_LCD2RS_MARK, PORT217_FN3), \
+ PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_FIELD3_MARK, PORT217_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT217_LCD2DISP_MARK, PORT217_FN7),
+ PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), \
+ PINMUX_DATA(LCDCS__MARK, PORT218_FN2), \
+ PINMUX_DATA(LCDCS2__MARK, PORT218_FN3), \
+ PINMUX_DATA(DACK3_MARK, PORT218_FN4), \
+ PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5),
+ PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), \
+ PINMUX_DATA(LCDRS_MARK, PORT219_FN2), \
+ PINMUX_DATA(PORT219_LCD2WR__MARK, PORT219_FN3), \
+ PINMUX_DATA(DREQ3_MARK, PORT219_FN4), \
+ PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_CLK3_MARK, PORT219_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(LCD2DCK_2_MARK, PORT219_FN7),
+ PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), \
+ PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2),
+ PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), \
+ PINMUX_DATA(DREQ1_MARK, PORT221_FN2), \
+ PINMUX_DATA(PORT221_LCD2CS__MARK, PORT221_FN3), \
+ PINMUX_DATA(PWEN_MARK, PORT221_FN4), \
+ PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_HD3_MARK, PORT221_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT221_LCD2HSYN_MARK, PORT221_FN7),
+ PINMUX_DATA(LCDDON_MARK, PORT222_FN1), \
+ PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), \
+ PINMUX_DATA(DACK1_MARK, PORT222_FN3), \
+ PINMUX_DATA(OVCN_MARK, PORT222_FN4), \
+ PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_VD3_MARK, PORT222_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT222_LCD2VSYN_MARK, PORT222_FN7, MSEL3CR_MSEL2_1),
+
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN2), \
+ PINMUX_DATA(OVCN2_MARK, PORT225_FN4),
+ PINMUX_DATA(EXTLP_MARK, PORT226_FN1), \
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), \
+ PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN5),
+ PINMUX_DATA(SCIFA1_RTS__MARK, PORT227_FN2), \
+ PINMUX_DATA(IDIN_MARK, PORT227_FN4),
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN2),
+ PINMUX_DATA(SCIFA1_CTS__MARK, PORT229_FN2), \
+ PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN3),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), \
+ PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), \
+ PINMUX_DATA(SCIFA2_CTS2__MARK, PORT231_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), \
+ PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), \
+ PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), \
+ PINMUX_DATA(SCIFA2_RTS2__MARK, PORT234_FN2, MSEL3CR_MSEL9_1), \
+ PINMUX_DATA(VIO2_CLK2_MARK, PORT234_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D20_MARK, PORT234_FN7),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), \
+ PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), \
+ PINMUX_DATA(VIO2_VD2_MARK, PORT235_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D21_MARK, PORT235_FN7),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), \
+ PINMUX_DATA(PORT236_I2C_SDA2_MARK, PORT236_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), \
+ PINMUX_DATA(PORT237_I2C_SCL2_MARK, PORT237_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), \
+ PINMUX_DATA(VIO2_FIELD2_MARK, PORT238_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D22_MARK, PORT238_FN7),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), \
+ PINMUX_DATA(VIO2_HD2_MARK, PORT239_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D23_MARK, PORT239_FN7),
+ PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1),
+ PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), \
+ PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), \
+ PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4),
+ PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN3),
+ PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2),
+ PINMUX_DATA(PORT244_SCIFA5_CTS__MARK, PORT244_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), \
+ PINMUX_DATA(PORT244_SCIFB_CTS__MARK, PORT244_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_RXD_MARK, PORT244_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT245_SCIFA5_RTS__MARK, PORT245_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), \
+ PINMUX_DATA(PORT245_SCIFB_RTS__MARK, PORT245_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_TXD_MARK, PORT245_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), \
+ PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4),
+ PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), \
+ PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4),
+ PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), \
+ PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), \
+ PINMUX_DATA(PORT248_I2C_SCL3_MARK, PORT248_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSCK_MARK, PORT248_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), \
+ PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), \
+ PINMUX_DATA(PORT249_I2C_SDA3_MARK, PORT249_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSYNC_MARK, PORT249_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1),
+ PINMUX_DATA(SDHICD0_MARK, PORT251_FN1),
+ PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1),
+ PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1),
+ PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), \
+ PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3),
+ PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), \
+ PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3),
+ PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), \
+ PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3),
+ PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), \
+ PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3),
+ PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT264_FN1),
+ PINMUX_DATA(SDHID2_0_MARK, PORT265_FN1), \
+ PINMUX_DATA(TS_SPSYNC4_MARK, PORT265_FN3),
+ PINMUX_DATA(SDHID2_1_MARK, PORT266_FN1), \
+ PINMUX_DATA(TS_SDAT4_MARK, PORT266_FN3),
+ PINMUX_DATA(SDHID2_2_MARK, PORT267_FN1), \
+ PINMUX_DATA(TS_SDEN4_MARK, PORT267_FN3),
+ PINMUX_DATA(SDHID2_3_MARK, PORT268_FN1), \
+ PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3),
+ PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1),
+ PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, PORT271_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, PORT272_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, PORT273_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, PORT274_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, PORT275_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3),
+ PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, PORT276_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3),
+ PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, PORT277_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3),
+ PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, PORT278_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3),
+ PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, PORT279_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \
+ PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2),
+ PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1),
+ PINMUX_DATA(MCP_CKO_MARK, PORT289_FN1), \
+ PINMUX_DATA(MMCCLK1_MARK, PORT289_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D15_MCP_NAF15_MARK, PORT290_FN1),
+ PINMUX_DATA(MCP_D14_MCP_NAF14_MARK, PORT291_FN1),
+ PINMUX_DATA(MCP_D13_MCP_NAF13_MARK, PORT292_FN1),
+ PINMUX_DATA(MCP_D12_MCP_NAF12_MARK, PORT293_FN1),
+ PINMUX_DATA(MCP_D11_MCP_NAF11_MARK, PORT294_FN1),
+ PINMUX_DATA(MCP_D10_MCP_NAF10_MARK, PORT295_FN1),
+ PINMUX_DATA(MCP_D9_MCP_NAF9_MARK, PORT296_FN1),
+ PINMUX_DATA(MCP_D8_MCP_NAF8_MARK, PORT297_FN1), \
+ PINMUX_DATA(MMCCMD1_MARK, PORT297_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D7_MCP_NAF7_MARK, PORT298_FN1), \
+ PINMUX_DATA(MMCD1_7_MARK, PORT298_FN2, MSEL4CR_MSEL15_1),
+
+ PINMUX_DATA(MCP_D6_MCP_NAF6_MARK, PORT299_FN1), \
+ PINMUX_DATA(MMCD1_6_MARK, PORT299_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D5_MCP_NAF5_MARK, PORT300_FN1), \
+ PINMUX_DATA(MMCD1_5_MARK, PORT300_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D4_MCP_NAF4_MARK, PORT301_FN1), \
+ PINMUX_DATA(MMCD1_4_MARK, PORT301_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D3_MCP_NAF3_MARK, PORT302_FN1), \
+ PINMUX_DATA(MMCD1_3_MARK, PORT302_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D2_MCP_NAF2_MARK, PORT303_FN1), \
+ PINMUX_DATA(MMCD1_2_MARK, PORT303_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D1_MCP_NAF1_MARK, PORT304_FN1), \
+ PINMUX_DATA(MMCD1_1_MARK, PORT304_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D0_MCP_NAF0_MARK, PORT305_FN1), \
+ PINMUX_DATA(MMCD1_0_MARK, PORT305_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_NBRSTOUT__MARK, PORT306_FN1),
+ PINMUX_DATA(MCP_WE0__MCP_FWE_MARK, PORT309_FN1), \
+ PINMUX_DATA(MCP_RDWR_MCP_FWE_MARK, PORT309_FN2),
+
+ /* MSEL2 special cases */
+ PINMUX_DATA(TSIF2_TS_XX1_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX2_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX3_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX4_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX5_MARK, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF1_TS_XX1_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX2_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX3_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX4_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX5_MARK, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF0_TS_XX1_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX2_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX3_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX4_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX5_MARK, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(MST1_TS_XX1_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX2_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX3_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX4_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX5_MARK, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST0_TS_XX1_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX2_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX3_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX4_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX5_MARK, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+
+ /* MSEL3 special cases */
+ PINMUX_DATA(SDHI0_VCCQ_MC0_ON_MARK, MSEL3CR_MSEL28_1),
+ PINMUX_DATA(SDHI0_VCCQ_MC0_OFF_MARK, MSEL3CR_MSEL28_0),
+ PINMUX_DATA(DEBUG_MON_VIO_MARK, MSEL3CR_MSEL15_0),
+ PINMUX_DATA(DEBUG_MON_LCDD_MARK, MSEL3CR_MSEL15_1),
+ PINMUX_DATA(LCDC_LCDC0_MARK, MSEL3CR_MSEL6_0),
+ PINMUX_DATA(LCDC_LCDC1_MARK, MSEL3CR_MSEL6_1),
+
+ /* MSEL4 special cases */
+ PINMUX_DATA(IRQ9_MEM_INT_MARK, MSEL4CR_MSEL29_0),
+ PINMUX_DATA(IRQ9_MCP_INT_MARK, MSEL4CR_MSEL29_1),
+ PINMUX_DATA(A11_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(KEYOUT8_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_1),
+ PINMUX_DATA(TPU4TO3_MARK, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(RESETA_N_PU_ON_MARK, MSEL4CR_MSEL4_0),
+ PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1),
+ PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0),
+ PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
+
+ /* Functions with pull-ups */
+ PINMUX_DATA(KEYIN0_PU_MARK, PORT66_FN2, PORT66_IN_PU),
+ PINMUX_DATA(KEYIN1_PU_MARK, PORT67_FN2, PORT67_IN_PU),
+ PINMUX_DATA(KEYIN2_PU_MARK, PORT68_FN2, PORT68_IN_PU),
+ PINMUX_DATA(KEYIN3_PU_MARK, PORT69_FN2, PORT69_IN_PU),
+ PINMUX_DATA(KEYIN4_PU_MARK, PORT70_FN2, PORT70_IN_PU),
+ PINMUX_DATA(KEYIN5_PU_MARK, PORT71_FN2, PORT71_IN_PU),
+ PINMUX_DATA(KEYIN6_PU_MARK, PORT72_FN2, PORT72_IN_PU),
+ PINMUX_DATA(KEYIN7_PU_MARK, PORT73_FN2, PORT73_IN_PU),
+
+ PINMUX_DATA(SDHICD0_PU_MARK, PORT251_FN1, PORT251_IN_PU),
+ PINMUX_DATA(SDHID0_0_PU_MARK, PORT252_FN1, PORT252_IN_PU),
+ PINMUX_DATA(SDHID0_1_PU_MARK, PORT253_FN1, PORT253_IN_PU),
+ PINMUX_DATA(SDHID0_2_PU_MARK, PORT254_FN1, PORT254_IN_PU),
+ PINMUX_DATA(SDHID0_3_PU_MARK, PORT255_FN1, PORT255_IN_PU),
+ PINMUX_DATA(SDHICMD0_PU_MARK, PORT256_FN1, PORT256_IN_PU),
+ PINMUX_DATA(SDHIWP0_PU_MARK, PORT257_FN1, PORT256_IN_PU),
+ PINMUX_DATA(SDHID1_0_PU_MARK, PORT259_FN1, PORT259_IN_PU),
+ PINMUX_DATA(SDHID1_1_PU_MARK, PORT260_FN1, PORT260_IN_PU),
+ PINMUX_DATA(SDHID1_2_PU_MARK, PORT261_FN1, PORT261_IN_PU),
+ PINMUX_DATA(SDHID1_3_PU_MARK, PORT262_FN1, PORT262_IN_PU),
+ PINMUX_DATA(SDHICMD1_PU_MARK, PORT263_FN1, PORT263_IN_PU),
+ PINMUX_DATA(SDHID2_0_PU_MARK, PORT265_FN1, PORT265_IN_PU),
+ PINMUX_DATA(SDHID2_1_PU_MARK, PORT266_FN1, PORT266_IN_PU),
+ PINMUX_DATA(SDHID2_2_PU_MARK, PORT267_FN1, PORT267_IN_PU),
+ PINMUX_DATA(SDHID2_3_PU_MARK, PORT268_FN1, PORT268_IN_PU),
+ PINMUX_DATA(SDHICMD2_PU_MARK, PORT269_FN1, PORT269_IN_PU),
+
+ PINMUX_DATA(MMCCMD0_PU_MARK, PORT279_FN1, PORT279_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU,
+ MSEL4CR_MSEL15_1),
+
+ PINMUX_DATA(MMCD0_0_PU_MARK,
+ PORT271_FN1, PORT271_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_PU_MARK,
+ PORT272_FN1, PORT272_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_PU_MARK,
+ PORT273_FN1, PORT273_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_PU_MARK,
+ PORT274_FN1, PORT274_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_PU_MARK,
+ PORT275_FN1, PORT275_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_5_PU_MARK,
+ PORT276_FN1, PORT276_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_6_PU_MARK,
+ PORT277_FN1, PORT277_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_7_PU_MARK,
+ PORT278_FN1, PORT278_IN_PU, MSEL4CR_MSEL15_0),
+
+ PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU),
+ PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
+ PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
+ PINMUX_DATA(FSIAIBT_PU_MARK, PORT51_FN5, PORT51_IN_PU),
+ PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ GPIO_PORT_ALL(),
+
+ /* Table 25-1 (Functions 0-7) */
+ GPIO_FN(VBUS_0),
+ GPIO_FN(GPI0),
+ GPIO_FN(GPI1),
+ GPIO_FN(GPI2),
+ GPIO_FN(GPI3),
+ GPIO_FN(GPI4),
+ GPIO_FN(GPI5),
+ GPIO_FN(GPI6),
+ GPIO_FN(GPI7),
+ GPIO_FN(SCIFA7_RXD),
+ GPIO_FN(SCIFA7_CTS_),
+ GPIO_FN(GPO7), \
+ GPIO_FN(MFG0_OUT2),
+ GPIO_FN(GPO6), \
+ GPIO_FN(MFG1_OUT2),
+ GPIO_FN(GPO5), \
+ GPIO_FN(SCIFA0_SCK), \
+ GPIO_FN(FSICOSLDT3), \
+ GPIO_FN(PORT16_VIO_CKOR),
+ GPIO_FN(SCIFA0_TXD),
+ GPIO_FN(SCIFA7_TXD),
+ GPIO_FN(SCIFA7_RTS_), \
+ GPIO_FN(PORT19_VIO_CKO2),
+ GPIO_FN(GPO0),
+ GPIO_FN(GPO1),
+ GPIO_FN(GPO2), \
+ GPIO_FN(STATUS0),
+ GPIO_FN(GPO3), \
+ GPIO_FN(STATUS1),
+ GPIO_FN(GPO4), \
+ GPIO_FN(STATUS2),
+ GPIO_FN(VINT),
+ GPIO_FN(TCKON),
+ GPIO_FN(XDVFS1), \
+ GPIO_FN(PORT27_I2C_SCL2), \
+ GPIO_FN(PORT27_I2C_SCL3), \
+ GPIO_FN(MFG0_OUT1), \
+ GPIO_FN(PORT27_IROUT),
+ GPIO_FN(XDVFS2), \
+ GPIO_FN(PORT28_I2C_SDA2), \
+ GPIO_FN(PORT28_I2C_SDA3), \
+ GPIO_FN(PORT28_TPU1TO1),
+ GPIO_FN(SIM_RST), \
+ GPIO_FN(PORT29_TPU1TO1),
+ GPIO_FN(SIM_CLK), \
+ GPIO_FN(PORT30_VIO_CKOR),
+ GPIO_FN(SIM_D), \
+ GPIO_FN(PORT31_IROUT),
+ GPIO_FN(SCIFA4_TXD),
+ GPIO_FN(SCIFA4_RXD), \
+ GPIO_FN(XWUP),
+ GPIO_FN(SCIFA4_RTS_),
+ GPIO_FN(SCIFA4_CTS_),
+ GPIO_FN(FSIBOBT), \
+ GPIO_FN(FSIBIBT),
+ GPIO_FN(FSIBOLR), \
+ GPIO_FN(FSIBILR),
+ GPIO_FN(FSIBOSLD),
+ GPIO_FN(FSIBISLD),
+ GPIO_FN(VACK),
+ GPIO_FN(XTAL1L),
+ GPIO_FN(SCIFA0_RTS_), \
+ GPIO_FN(FSICOSLDT2),
+ GPIO_FN(SCIFA0_RXD),
+ GPIO_FN(SCIFA0_CTS_), \
+ GPIO_FN(FSICOSLDT1),
+ GPIO_FN(FSICOBT), \
+ GPIO_FN(FSICIBT), \
+ GPIO_FN(FSIDOBT), \
+ GPIO_FN(FSIDIBT),
+ GPIO_FN(FSICOLR), \
+ GPIO_FN(FSICILR), \
+ GPIO_FN(FSIDOLR), \
+ GPIO_FN(FSIDILR),
+ GPIO_FN(FSICOSLD), \
+ GPIO_FN(PORT47_FSICSPDIF),
+ GPIO_FN(FSICISLD), \
+ GPIO_FN(FSIDISLD),
+ GPIO_FN(FSIACK), \
+ GPIO_FN(PORT49_IRDA_OUT), \
+ GPIO_FN(PORT49_IROUT), \
+ GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIAOLR), \
+ GPIO_FN(BBIF2_TSYNC2), \
+ GPIO_FN(TPU2TO2), \
+ GPIO_FN(FSIAILR),
+
+ GPIO_FN(FSIAOBT), \
+ GPIO_FN(BBIF2_TSCK2), \
+ GPIO_FN(TPU2TO3), \
+ GPIO_FN(FSIAIBT),
+ GPIO_FN(FSIAOSLD), \
+ GPIO_FN(BBIF2_TXD2),
+ GPIO_FN(FSIASPDIF), \
+ GPIO_FN(PORT53_IRDA_IN), \
+ GPIO_FN(TPU3TO3), \
+ GPIO_FN(FSIBSPDIF), \
+ GPIO_FN(PORT53_FSICSPDIF),
+ GPIO_FN(FSIBCK), \
+ GPIO_FN(PORT54_IRDA_FIRSEL), \
+ GPIO_FN(TPU3TO2), \
+ GPIO_FN(FSIBOMC), \
+ GPIO_FN(FSICCK), \
+ GPIO_FN(FSICOMC),
+ GPIO_FN(FSIAISLD), \
+ GPIO_FN(TPU0TO0),
+ GPIO_FN(A0), \
+ GPIO_FN(BS_),
+ GPIO_FN(A12), \
+ GPIO_FN(PORT58_KEYOUT7), \
+ GPIO_FN(TPU4TO2),
+ GPIO_FN(A13), \
+ GPIO_FN(PORT59_KEYOUT6), \
+ GPIO_FN(TPU0TO1),
+ GPIO_FN(A14), \
+ GPIO_FN(KEYOUT5),
+ GPIO_FN(A15), \
+ GPIO_FN(KEYOUT4),
+ GPIO_FN(A16), \
+ GPIO_FN(KEYOUT3), \
+ GPIO_FN(MSIOF0_SS1),
+ GPIO_FN(A17), \
+ GPIO_FN(KEYOUT2), \
+ GPIO_FN(MSIOF0_TSYNC),
+ GPIO_FN(A18), \
+ GPIO_FN(KEYOUT1), \
+ GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(A19), \
+ GPIO_FN(KEYOUT0), \
+ GPIO_FN(MSIOF0_TXD),
+ GPIO_FN(A20), \
+ GPIO_FN(KEYIN0), \
+ GPIO_FN(MSIOF0_RSCK),
+ GPIO_FN(A21), \
+ GPIO_FN(KEYIN1), \
+ GPIO_FN(MSIOF0_RSYNC),
+ GPIO_FN(A22), \
+ GPIO_FN(KEYIN2), \
+ GPIO_FN(MSIOF0_MCK0),
+ GPIO_FN(A23), \
+ GPIO_FN(KEYIN3), \
+ GPIO_FN(MSIOF0_MCK1),
+ GPIO_FN(A24), \
+ GPIO_FN(KEYIN4), \
+ GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(A25), \
+ GPIO_FN(KEYIN5), \
+ GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(A26), \
+ GPIO_FN(KEYIN6),
+ GPIO_FN(KEYIN7),
+ GPIO_FN(D0_NAF0),
+ GPIO_FN(D1_NAF1),
+ GPIO_FN(D2_NAF2),
+ GPIO_FN(D3_NAF3),
+ GPIO_FN(D4_NAF4),
+ GPIO_FN(D5_NAF5),
+ GPIO_FN(D6_NAF6),
+ GPIO_FN(D7_NAF7),
+ GPIO_FN(D8_NAF8),
+ GPIO_FN(D9_NAF9),
+ GPIO_FN(D10_NAF10),
+ GPIO_FN(D11_NAF11),
+ GPIO_FN(D12_NAF12),
+ GPIO_FN(D13_NAF13),
+ GPIO_FN(D14_NAF14),
+ GPIO_FN(D15_NAF15),
+ GPIO_FN(CS4_),
+ GPIO_FN(CS5A_), \
+ GPIO_FN(PORT91_RDWR),
+ GPIO_FN(CS5B_), \
+ GPIO_FN(FCE1_),
+ GPIO_FN(CS6B_), \
+ GPIO_FN(DACK0),
+ GPIO_FN(FCE0_), \
+ GPIO_FN(CS6A_),
+ GPIO_FN(WAIT_), \
+ GPIO_FN(DREQ0),
+ GPIO_FN(RD__FSC),
+ GPIO_FN(WE0__FWE), \
+ GPIO_FN(RDWR_FWE),
+ GPIO_FN(WE1_),
+ GPIO_FN(FRB),
+ GPIO_FN(CKO),
+ GPIO_FN(NBRSTOUT_),
+ GPIO_FN(NBRST_),
+ GPIO_FN(BBIF2_TXD),
+ GPIO_FN(BBIF2_RXD),
+ GPIO_FN(BBIF2_SYNC),
+ GPIO_FN(BBIF2_SCK),
+ GPIO_FN(SCIFA3_CTS_), \
+ GPIO_FN(MFG3_IN2),
+ GPIO_FN(SCIFA3_RXD), \
+ GPIO_FN(MFG3_IN1),
+ GPIO_FN(BBIF1_SS2), \
+ GPIO_FN(SCIFA3_RTS_), \
+ GPIO_FN(MFG3_OUT1),
+ GPIO_FN(SCIFA3_TXD),
+ GPIO_FN(HSI_RX_DATA), \
+ GPIO_FN(BBIF1_RXD),
+ GPIO_FN(HSI_TX_WAKE), \
+ GPIO_FN(BBIF1_TSCK),
+ GPIO_FN(HSI_TX_DATA), \
+ GPIO_FN(BBIF1_TSYNC),
+ GPIO_FN(HSI_TX_READY), \
+ GPIO_FN(BBIF1_TXD),
+ GPIO_FN(HSI_RX_READY), \
+ GPIO_FN(BBIF1_RSCK), \
+ GPIO_FN(PORT115_I2C_SCL2), \
+ GPIO_FN(PORT115_I2C_SCL3),
+ GPIO_FN(HSI_RX_WAKE), \
+ GPIO_FN(BBIF1_RSYNC), \
+ GPIO_FN(PORT116_I2C_SDA2), \
+ GPIO_FN(PORT116_I2C_SDA3),
+ GPIO_FN(HSI_RX_FLAG), \
+ GPIO_FN(BBIF1_SS1), \
+ GPIO_FN(BBIF1_FLOW),
+ GPIO_FN(HSI_TX_FLAG),
+ GPIO_FN(VIO_VD), \
+ GPIO_FN(PORT128_LCD2VSYN), \
+ GPIO_FN(VIO2_VD), \
+ GPIO_FN(LCD2D0),
+
+ GPIO_FN(VIO_HD), \
+ GPIO_FN(PORT129_LCD2HSYN), \
+ GPIO_FN(PORT129_LCD2CS_), \
+ GPIO_FN(VIO2_HD), \
+ GPIO_FN(LCD2D1),
+ GPIO_FN(VIO_D0), \
+ GPIO_FN(PORT130_MSIOF2_RXD), \
+ GPIO_FN(LCD2D10),
+ GPIO_FN(VIO_D1), \
+ GPIO_FN(PORT131_KEYOUT6), \
+ GPIO_FN(PORT131_MSIOF2_SS1), \
+ GPIO_FN(PORT131_KEYOUT11), \
+ GPIO_FN(LCD2D11),
+ GPIO_FN(VIO_D2), \
+ GPIO_FN(PORT132_KEYOUT7), \
+ GPIO_FN(PORT132_MSIOF2_SS2), \
+ GPIO_FN(PORT132_KEYOUT10), \
+ GPIO_FN(LCD2D12),
+ GPIO_FN(VIO_D3), \
+ GPIO_FN(MSIOF2_TSYNC), \
+ GPIO_FN(LCD2D13),
+ GPIO_FN(VIO_D4), \
+ GPIO_FN(MSIOF2_TXD), \
+ GPIO_FN(LCD2D14),
+ GPIO_FN(VIO_D5), \
+ GPIO_FN(MSIOF2_TSCK), \
+ GPIO_FN(LCD2D15),
+ GPIO_FN(VIO_D6), \
+ GPIO_FN(PORT136_KEYOUT8), \
+ GPIO_FN(LCD2D16),
+ GPIO_FN(VIO_D7), \
+ GPIO_FN(PORT137_KEYOUT9), \
+ GPIO_FN(LCD2D17),
+ GPIO_FN(VIO_D8), \
+ GPIO_FN(PORT138_KEYOUT8), \
+ GPIO_FN(VIO2_D0), \
+ GPIO_FN(LCD2D6),
+ GPIO_FN(VIO_D9), \
+ GPIO_FN(PORT139_KEYOUT9), \
+ GPIO_FN(VIO2_D1), \
+ GPIO_FN(LCD2D7),
+ GPIO_FN(VIO_D10), \
+ GPIO_FN(TPU0TO2), \
+ GPIO_FN(VIO2_D2), \
+ GPIO_FN(LCD2D8),
+ GPIO_FN(VIO_D11), \
+ GPIO_FN(TPU0TO3), \
+ GPIO_FN(VIO2_D3), \
+ GPIO_FN(LCD2D9),
+ GPIO_FN(VIO_D12), \
+ GPIO_FN(PORT142_KEYOUT10), \
+ GPIO_FN(VIO2_D4), \
+ GPIO_FN(LCD2D2),
+ GPIO_FN(VIO_D13), \
+ GPIO_FN(PORT143_KEYOUT11), \
+ GPIO_FN(PORT143_KEYOUT6), \
+ GPIO_FN(VIO2_D5), \
+ GPIO_FN(LCD2D3),
+ GPIO_FN(VIO_D14), \
+ GPIO_FN(PORT144_KEYOUT7), \
+ GPIO_FN(VIO2_D6), \
+ GPIO_FN(LCD2D4),
+ GPIO_FN(VIO_D15), \
+ GPIO_FN(TPU1TO3), \
+ GPIO_FN(PORT145_LCD2DISP), \
+ GPIO_FN(PORT145_LCD2RS), \
+ GPIO_FN(VIO2_D7), \
+ GPIO_FN(LCD2D5),
+ GPIO_FN(VIO_CLK), \
+ GPIO_FN(LCD2DCK), \
+ GPIO_FN(PORT146_LCD2WR_), \
+ GPIO_FN(VIO2_CLK), \
+ GPIO_FN(LCD2D18),
+ GPIO_FN(VIO_FIELD), \
+ GPIO_FN(LCD2RD_), \
+ GPIO_FN(VIO2_FIELD), \
+ GPIO_FN(LCD2D19),
+ GPIO_FN(VIO_CKO),
+ GPIO_FN(A27), \
+ GPIO_FN(PORT149_RDWR), \
+ GPIO_FN(MFG0_IN1), \
+ GPIO_FN(PORT149_KEYOUT9),
+ GPIO_FN(MFG0_IN2),
+ GPIO_FN(TS_SPSYNC3), \
+ GPIO_FN(MSIOF2_RSCK),
+ GPIO_FN(TS_SDAT3), \
+ GPIO_FN(MSIOF2_RSYNC),
+ GPIO_FN(TPU1TO2), \
+ GPIO_FN(TS_SDEN3), \
+ GPIO_FN(PORT153_MSIOF2_SS1),
+ GPIO_FN(SCIFA2_TXD1), \
+ GPIO_FN(MSIOF2_MCK0),
+ GPIO_FN(SCIFA2_RXD1), \
+ GPIO_FN(MSIOF2_MCK1),
+ GPIO_FN(SCIFA2_RTS1_), \
+ GPIO_FN(PORT156_MSIOF2_SS2),
+ GPIO_FN(SCIFA2_CTS1_), \
+ GPIO_FN(PORT157_MSIOF2_RXD),
+ GPIO_FN(DINT_), \
+ GPIO_FN(SCIFA2_SCK1), \
+ GPIO_FN(TS_SCK3),
+ GPIO_FN(PORT159_SCIFB_SCK), \
+ GPIO_FN(PORT159_SCIFA5_SCK), \
+ GPIO_FN(NMI),
+ GPIO_FN(PORT160_SCIFB_TXD), \
+ GPIO_FN(PORT160_SCIFA5_TXD),
+ GPIO_FN(PORT161_SCIFB_CTS_), \
+ GPIO_FN(PORT161_SCIFA5_CTS_),
+ GPIO_FN(PORT162_SCIFB_RXD), \
+ GPIO_FN(PORT162_SCIFA5_RXD),
+ GPIO_FN(PORT163_SCIFB_RTS_), \
+ GPIO_FN(PORT163_SCIFA5_RTS_), \
+ GPIO_FN(TPU3TO0),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDD1), \
+ GPIO_FN(PORT193_SCIFA5_CTS_), \
+ GPIO_FN(BBIF2_TSYNC1),
+ GPIO_FN(LCDD2), \
+ GPIO_FN(PORT194_SCIFA5_RTS_), \
+ GPIO_FN(BBIF2_TSCK1),
+ GPIO_FN(LCDD3), \
+ GPIO_FN(PORT195_SCIFA5_RXD), \
+ GPIO_FN(BBIF2_TXD1),
+ GPIO_FN(LCDD4), \
+ GPIO_FN(PORT196_SCIFA5_TXD),
+ GPIO_FN(LCDD5), \
+ GPIO_FN(PORT197_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT2), \
+ GPIO_FN(TPU2TO1),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD7), \
+ GPIO_FN(TPU4TO1), \
+ GPIO_FN(MFG4_OUT2),
+ GPIO_FN(LCDD8), \
+ GPIO_FN(D16),
+ GPIO_FN(LCDD9), \
+ GPIO_FN(D17),
+ GPIO_FN(LCDD10), \
+ GPIO_FN(D18),
+ GPIO_FN(LCDD11), \
+ GPIO_FN(D19),
+ GPIO_FN(LCDD12), \
+ GPIO_FN(D20),
+ GPIO_FN(LCDD13), \
+ GPIO_FN(D21),
+ GPIO_FN(LCDD14), \
+ GPIO_FN(D22),
+ GPIO_FN(LCDD15), \
+ GPIO_FN(PORT207_MSIOF0L_SS1), \
+ GPIO_FN(D23),
+ GPIO_FN(LCDD16), \
+ GPIO_FN(PORT208_MSIOF0L_SS2), \
+ GPIO_FN(D24),
+ GPIO_FN(LCDD17), \
+ GPIO_FN(D25),
+ GPIO_FN(LCDD18), \
+ GPIO_FN(DREQ2), \
+ GPIO_FN(PORT210_MSIOF0L_SS1), \
+ GPIO_FN(D26),
+ GPIO_FN(LCDD19), \
+ GPIO_FN(PORT211_MSIOF0L_SS2), \
+ GPIO_FN(D27),
+ GPIO_FN(LCDD20), \
+ GPIO_FN(TS_SPSYNC1), \
+ GPIO_FN(MSIOF0L_MCK0), \
+ GPIO_FN(D28),
+ GPIO_FN(LCDD21), \
+ GPIO_FN(TS_SDAT1), \
+ GPIO_FN(MSIOF0L_MCK1), \
+ GPIO_FN(D29),
+ GPIO_FN(LCDD22), \
+ GPIO_FN(TS_SDEN1), \
+ GPIO_FN(MSIOF0L_RSCK), \
+ GPIO_FN(D30),
+ GPIO_FN(LCDD23), \
+ GPIO_FN(TS_SCK1), \
+ GPIO_FN(MSIOF0L_RSYNC), \
+ GPIO_FN(D31),
+ GPIO_FN(LCDDCK), \
+ GPIO_FN(LCDWR_),
+ GPIO_FN(LCDRD_), \
+ GPIO_FN(DACK2), \
+ GPIO_FN(PORT217_LCD2RS), \
+ GPIO_FN(MSIOF0L_TSYNC), \
+ GPIO_FN(VIO2_FIELD3), \
+ GPIO_FN(PORT217_LCD2DISP),
+ GPIO_FN(LCDHSYN), \
+ GPIO_FN(LCDCS_), \
+ GPIO_FN(LCDCS2_), \
+ GPIO_FN(DACK3), \
+ GPIO_FN(PORT218_VIO_CKOR),
+ GPIO_FN(LCDDISP), \
+ GPIO_FN(LCDRS), \
+ GPIO_FN(PORT219_LCD2WR_), \
+ GPIO_FN(DREQ3), \
+ GPIO_FN(MSIOF0L_TSCK), \
+ GPIO_FN(VIO2_CLK3), \
+ GPIO_FN(LCD2DCK_2),
+ GPIO_FN(LCDVSYN), \
+ GPIO_FN(LCDVSYN2),
+ GPIO_FN(LCDLCLK), \
+ GPIO_FN(DREQ1), \
+ GPIO_FN(PORT221_LCD2CS_), \
+ GPIO_FN(PWEN), \
+ GPIO_FN(MSIOF0L_RXD), \
+ GPIO_FN(VIO2_HD3), \
+ GPIO_FN(PORT221_LCD2HSYN),
+ GPIO_FN(LCDDON), \
+ GPIO_FN(LCDDON2), \
+ GPIO_FN(DACK1), \
+ GPIO_FN(OVCN), \
+ GPIO_FN(MSIOF0L_TXD), \
+ GPIO_FN(VIO2_VD3), \
+ GPIO_FN(PORT222_LCD2VSYN),
+
+ GPIO_FN(SCIFA1_TXD), \
+ GPIO_FN(OVCN2),
+ GPIO_FN(EXTLP), \
+ GPIO_FN(SCIFA1_SCK), \
+ GPIO_FN(PORT226_VIO_CKO2),
+ GPIO_FN(SCIFA1_RTS_), \
+ GPIO_FN(IDIN),
+ GPIO_FN(SCIFA1_RXD),
+ GPIO_FN(SCIFA1_CTS_), \
+ GPIO_FN(MFG1_IN1),
+ GPIO_FN(MSIOF1_TXD), \
+ GPIO_FN(SCIFA2_TXD2),
+ GPIO_FN(MSIOF1_TSYNC), \
+ GPIO_FN(SCIFA2_CTS2_),
+ GPIO_FN(MSIOF1_TSCK), \
+ GPIO_FN(SCIFA2_SCK2),
+ GPIO_FN(MSIOF1_RXD), \
+ GPIO_FN(SCIFA2_RXD2),
+ GPIO_FN(MSIOF1_RSCK), \
+ GPIO_FN(SCIFA2_RTS2_), \
+ GPIO_FN(VIO2_CLK2), \
+ GPIO_FN(LCD2D20),
+ GPIO_FN(MSIOF1_RSYNC), \
+ GPIO_FN(MFG1_IN2), \
+ GPIO_FN(VIO2_VD2), \
+ GPIO_FN(LCD2D21),
+ GPIO_FN(MSIOF1_MCK0), \
+ GPIO_FN(PORT236_I2C_SDA2),
+ GPIO_FN(MSIOF1_MCK1), \
+ GPIO_FN(PORT237_I2C_SCL2),
+ GPIO_FN(MSIOF1_SS1), \
+ GPIO_FN(VIO2_FIELD2), \
+ GPIO_FN(LCD2D22),
+ GPIO_FN(MSIOF1_SS2), \
+ GPIO_FN(VIO2_HD2), \
+ GPIO_FN(LCD2D23),
+ GPIO_FN(SCIFA6_TXD),
+ GPIO_FN(PORT241_IRDA_OUT), \
+ GPIO_FN(PORT241_IROUT), \
+ GPIO_FN(MFG4_OUT1), \
+ GPIO_FN(TPU4TO0),
+ GPIO_FN(PORT242_IRDA_IN), \
+ GPIO_FN(MFG4_IN2),
+ GPIO_FN(PORT243_IRDA_FIRSEL), \
+ GPIO_FN(PORT243_VIO_CKO2),
+ GPIO_FN(PORT244_SCIFA5_CTS_), \
+ GPIO_FN(MFG2_IN1), \
+ GPIO_FN(PORT244_SCIFB_CTS_), \
+ GPIO_FN(MSIOF2R_RXD),
+ GPIO_FN(PORT245_SCIFA5_RTS_), \
+ GPIO_FN(MFG2_IN2), \
+ GPIO_FN(PORT245_SCIFB_RTS_), \
+ GPIO_FN(MSIOF2R_TXD),
+ GPIO_FN(PORT246_SCIFA5_RXD), \
+ GPIO_FN(MFG1_OUT1), \
+ GPIO_FN(PORT246_SCIFB_RXD), \
+ GPIO_FN(TPU1TO0),
+ GPIO_FN(PORT247_SCIFA5_TXD), \
+ GPIO_FN(MFG3_OUT2), \
+ GPIO_FN(PORT247_SCIFB_TXD), \
+ GPIO_FN(TPU3TO1),
+ GPIO_FN(PORT248_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT1), \
+ GPIO_FN(PORT248_SCIFB_SCK), \
+ GPIO_FN(TPU2TO0), \
+ GPIO_FN(PORT248_I2C_SCL3), \
+ GPIO_FN(MSIOF2R_TSCK),
+ GPIO_FN(PORT249_IROUT), \
+ GPIO_FN(MFG4_IN1), \
+ GPIO_FN(PORT249_I2C_SDA3), \
+ GPIO_FN(MSIOF2R_TSYNC),
+ GPIO_FN(SDHICLK0),
+ GPIO_FN(SDHICD0),
+ GPIO_FN(SDHID0_0),
+ GPIO_FN(SDHID0_1),
+ GPIO_FN(SDHID0_2),
+ GPIO_FN(SDHID0_3),
+ GPIO_FN(SDHICMD0),
+ GPIO_FN(SDHIWP0),
+ GPIO_FN(SDHICLK1),
+ GPIO_FN(SDHID1_0), \
+ GPIO_FN(TS_SPSYNC2),
+ GPIO_FN(SDHID1_1), \
+ GPIO_FN(TS_SDAT2),
+ GPIO_FN(SDHID1_2), \
+ GPIO_FN(TS_SDEN2),
+ GPIO_FN(SDHID1_3), \
+ GPIO_FN(TS_SCK2),
+ GPIO_FN(SDHICMD1),
+ GPIO_FN(SDHICLK2),
+ GPIO_FN(SDHID2_0), \
+ GPIO_FN(TS_SPSYNC4),
+ GPIO_FN(SDHID2_1), \
+ GPIO_FN(TS_SDAT4),
+ GPIO_FN(SDHID2_2), \
+ GPIO_FN(TS_SDEN4),
+ GPIO_FN(SDHID2_3), \
+ GPIO_FN(TS_SCK4),
+ GPIO_FN(SDHICMD2),
+ GPIO_FN(MMCCLK0),
+ GPIO_FN(MMCD0_0),
+ GPIO_FN(MMCD0_1),
+ GPIO_FN(MMCD0_2),
+ GPIO_FN(MMCD0_3),
+ GPIO_FN(MMCD0_4), \
+ GPIO_FN(TS_SPSYNC5),
+ GPIO_FN(MMCD0_5), \
+ GPIO_FN(TS_SDAT5),
+ GPIO_FN(MMCD0_6), \
+ GPIO_FN(TS_SDEN5),
+ GPIO_FN(MMCD0_7), \
+ GPIO_FN(TS_SCK5),
+ GPIO_FN(MMCCMD0),
+ GPIO_FN(RESETOUTS_), \
+ GPIO_FN(EXTAL2OUT),
+ GPIO_FN(MCP_WAIT__MCP_FRB),
+ GPIO_FN(MCP_CKO), \
+ GPIO_FN(MMCCLK1),
+ GPIO_FN(MCP_D15_MCP_NAF15),
+ GPIO_FN(MCP_D14_MCP_NAF14),
+ GPIO_FN(MCP_D13_MCP_NAF13),
+ GPIO_FN(MCP_D12_MCP_NAF12),
+ GPIO_FN(MCP_D11_MCP_NAF11),
+ GPIO_FN(MCP_D10_MCP_NAF10),
+ GPIO_FN(MCP_D9_MCP_NAF9),
+ GPIO_FN(MCP_D8_MCP_NAF8), \
+ GPIO_FN(MMCCMD1),
+ GPIO_FN(MCP_D7_MCP_NAF7), \
+ GPIO_FN(MMCD1_7),
+
+ GPIO_FN(MCP_D6_MCP_NAF6), \
+ GPIO_FN(MMCD1_6),
+ GPIO_FN(MCP_D5_MCP_NAF5), \
+ GPIO_FN(MMCD1_5),
+ GPIO_FN(MCP_D4_MCP_NAF4), \
+ GPIO_FN(MMCD1_4),
+ GPIO_FN(MCP_D3_MCP_NAF3), \
+ GPIO_FN(MMCD1_3),
+ GPIO_FN(MCP_D2_MCP_NAF2), \
+ GPIO_FN(MMCD1_2),
+ GPIO_FN(MCP_D1_MCP_NAF1), \
+ GPIO_FN(MMCD1_1),
+ GPIO_FN(MCP_D0_MCP_NAF0), \
+ GPIO_FN(MMCD1_0),
+ GPIO_FN(MCP_NBRSTOUT_),
+ GPIO_FN(MCP_WE0__MCP_FWE), \
+ GPIO_FN(MCP_RDWR_MCP_FWE),
+
+ /* MSEL2 special cases */
+ GPIO_FN(TSIF2_TS_XX1),
+ GPIO_FN(TSIF2_TS_XX2),
+ GPIO_FN(TSIF2_TS_XX3),
+ GPIO_FN(TSIF2_TS_XX4),
+ GPIO_FN(TSIF2_TS_XX5),
+ GPIO_FN(TSIF1_TS_XX1),
+ GPIO_FN(TSIF1_TS_XX2),
+ GPIO_FN(TSIF1_TS_XX3),
+ GPIO_FN(TSIF1_TS_XX4),
+ GPIO_FN(TSIF1_TS_XX5),
+ GPIO_FN(TSIF0_TS_XX1),
+ GPIO_FN(TSIF0_TS_XX2),
+ GPIO_FN(TSIF0_TS_XX3),
+ GPIO_FN(TSIF0_TS_XX4),
+ GPIO_FN(TSIF0_TS_XX5),
+ GPIO_FN(MST1_TS_XX1),
+ GPIO_FN(MST1_TS_XX2),
+ GPIO_FN(MST1_TS_XX3),
+ GPIO_FN(MST1_TS_XX4),
+ GPIO_FN(MST1_TS_XX5),
+ GPIO_FN(MST0_TS_XX1),
+ GPIO_FN(MST0_TS_XX2),
+ GPIO_FN(MST0_TS_XX3),
+ GPIO_FN(MST0_TS_XX4),
+ GPIO_FN(MST0_TS_XX5),
+
+ /* MSEL3 special cases */
+ GPIO_FN(SDHI0_VCCQ_MC0_ON),
+ GPIO_FN(SDHI0_VCCQ_MC0_OFF),
+ GPIO_FN(DEBUG_MON_VIO),
+ GPIO_FN(DEBUG_MON_LCDD),
+ GPIO_FN(LCDC_LCDC0),
+ GPIO_FN(LCDC_LCDC1),
+
+ /* MSEL4 special cases */
+ GPIO_FN(IRQ9_MEM_INT),
+ GPIO_FN(IRQ9_MCP_INT),
+ GPIO_FN(A11),
+ GPIO_FN(KEYOUT8),
+ GPIO_FN(TPU4TO3),
+ GPIO_FN(RESETA_N_PU_ON),
+ GPIO_FN(RESETA_N_PU_OFF),
+ GPIO_FN(EDBGREQ_PD),
+ GPIO_FN(EDBGREQ_PU),
+
+ /* Functions with pull-ups */
+ GPIO_FN(KEYIN0_PU),
+ GPIO_FN(KEYIN1_PU),
+ GPIO_FN(KEYIN2_PU),
+ GPIO_FN(KEYIN3_PU),
+ GPIO_FN(KEYIN4_PU),
+ GPIO_FN(KEYIN5_PU),
+ GPIO_FN(KEYIN6_PU),
+ GPIO_FN(KEYIN7_PU),
+ GPIO_FN(SDHICD0_PU),
+ GPIO_FN(SDHID0_0_PU),
+ GPIO_FN(SDHID0_1_PU),
+ GPIO_FN(SDHID0_2_PU),
+ GPIO_FN(SDHID0_3_PU),
+ GPIO_FN(SDHICMD0_PU),
+ GPIO_FN(SDHIWP0_PU),
+ GPIO_FN(SDHID1_0_PU),
+ GPIO_FN(SDHID1_1_PU),
+ GPIO_FN(SDHID1_2_PU),
+ GPIO_FN(SDHID1_3_PU),
+ GPIO_FN(SDHICMD1_PU),
+ GPIO_FN(SDHID2_0_PU),
+ GPIO_FN(SDHID2_1_PU),
+ GPIO_FN(SDHID2_2_PU),
+ GPIO_FN(SDHID2_3_PU),
+ GPIO_FN(SDHICMD2_PU),
+ GPIO_FN(MMCCMD0_PU),
+ GPIO_FN(MMCCMD1_PU),
+ GPIO_FN(MMCD0_0_PU),
+ GPIO_FN(MMCD0_1_PU),
+ GPIO_FN(MMCD0_2_PU),
+ GPIO_FN(MMCD0_3_PU),
+ GPIO_FN(MMCD0_4_PU),
+ GPIO_FN(MMCD0_5_PU),
+ GPIO_FN(MMCD0_6_PU),
+ GPIO_FN(MMCD0_7_PU),
+ GPIO_FN(FSIACK_PU),
+ GPIO_FN(FSIAILR_PU),
+ GPIO_FN(FSIAIBT_PU),
+ GPIO_FN(FSIAISLD_PU),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000), /* PORT0CR */
+ PORTCR(1, 0xe6050001), /* PORT1CR */
+ PORTCR(2, 0xe6050002), /* PORT2CR */
+ PORTCR(3, 0xe6050003), /* PORT3CR */
+ PORTCR(4, 0xe6050004), /* PORT4CR */
+ PORTCR(5, 0xe6050005), /* PORT5CR */
+ PORTCR(6, 0xe6050006), /* PORT6CR */
+ PORTCR(7, 0xe6050007), /* PORT7CR */
+ PORTCR(8, 0xe6050008), /* PORT8CR */
+ PORTCR(9, 0xe6050009), /* PORT9CR */
+
+ PORTCR(10, 0xe605000a), /* PORT10CR */
+ PORTCR(11, 0xe605000b), /* PORT11CR */
+ PORTCR(12, 0xe605000c), /* PORT12CR */
+ PORTCR(13, 0xe605000d), /* PORT13CR */
+ PORTCR(14, 0xe605000e), /* PORT14CR */
+ PORTCR(15, 0xe605000f), /* PORT15CR */
+ PORTCR(16, 0xe6050010), /* PORT16CR */
+ PORTCR(17, 0xe6050011), /* PORT17CR */
+ PORTCR(18, 0xe6050012), /* PORT18CR */
+ PORTCR(19, 0xe6050013), /* PORT19CR */
+
+ PORTCR(20, 0xe6050014), /* PORT20CR */
+ PORTCR(21, 0xe6050015), /* PORT21CR */
+ PORTCR(22, 0xe6050016), /* PORT22CR */
+ PORTCR(23, 0xe6050017), /* PORT23CR */
+ PORTCR(24, 0xe6050018), /* PORT24CR */
+ PORTCR(25, 0xe6050019), /* PORT25CR */
+ PORTCR(26, 0xe605001a), /* PORT26CR */
+ PORTCR(27, 0xe605001b), /* PORT27CR */
+ PORTCR(28, 0xe605001c), /* PORT28CR */
+ PORTCR(29, 0xe605001d), /* PORT29CR */
+
+ PORTCR(30, 0xe605001e), /* PORT30CR */
+ PORTCR(31, 0xe605001f), /* PORT31CR */
+ PORTCR(32, 0xe6051020), /* PORT32CR */
+ PORTCR(33, 0xe6051021), /* PORT33CR */
+ PORTCR(34, 0xe6051022), /* PORT34CR */
+ PORTCR(35, 0xe6051023), /* PORT35CR */
+ PORTCR(36, 0xe6051024), /* PORT36CR */
+ PORTCR(37, 0xe6051025), /* PORT37CR */
+ PORTCR(38, 0xe6051026), /* PORT38CR */
+ PORTCR(39, 0xe6051027), /* PORT39CR */
+
+ PORTCR(40, 0xe6051028), /* PORT40CR */
+ PORTCR(41, 0xe6051029), /* PORT41CR */
+ PORTCR(42, 0xe605102a), /* PORT42CR */
+ PORTCR(43, 0xe605102b), /* PORT43CR */
+ PORTCR(44, 0xe605102c), /* PORT44CR */
+ PORTCR(45, 0xe605102d), /* PORT45CR */
+ PORTCR(46, 0xe605102e), /* PORT46CR */
+ PORTCR(47, 0xe605102f), /* PORT47CR */
+ PORTCR(48, 0xe6051030), /* PORT48CR */
+ PORTCR(49, 0xe6051031), /* PORT49CR */
+
+ PORTCR(50, 0xe6051032), /* PORT50CR */
+ PORTCR(51, 0xe6051033), /* PORT51CR */
+ PORTCR(52, 0xe6051034), /* PORT52CR */
+ PORTCR(53, 0xe6051035), /* PORT53CR */
+ PORTCR(54, 0xe6051036), /* PORT54CR */
+ PORTCR(55, 0xe6051037), /* PORT55CR */
+ PORTCR(56, 0xe6051038), /* PORT56CR */
+ PORTCR(57, 0xe6051039), /* PORT57CR */
+ PORTCR(58, 0xe605103a), /* PORT58CR */
+ PORTCR(59, 0xe605103b), /* PORT59CR */
+
+ PORTCR(60, 0xe605103c), /* PORT60CR */
+ PORTCR(61, 0xe605103d), /* PORT61CR */
+ PORTCR(62, 0xe605103e), /* PORT62CR */
+ PORTCR(63, 0xe605103f), /* PORT63CR */
+ PORTCR(64, 0xe6051040), /* PORT64CR */
+ PORTCR(65, 0xe6051041), /* PORT65CR */
+ PORTCR(66, 0xe6051042), /* PORT66CR */
+ PORTCR(67, 0xe6051043), /* PORT67CR */
+ PORTCR(68, 0xe6051044), /* PORT68CR */
+ PORTCR(69, 0xe6051045), /* PORT69CR */
+
+ PORTCR(70, 0xe6051046), /* PORT70CR */
+ PORTCR(71, 0xe6051047), /* PORT71CR */
+ PORTCR(72, 0xe6051048), /* PORT72CR */
+ PORTCR(73, 0xe6051049), /* PORT73CR */
+ PORTCR(74, 0xe605104a), /* PORT74CR */
+ PORTCR(75, 0xe605104b), /* PORT75CR */
+ PORTCR(76, 0xe605104c), /* PORT76CR */
+ PORTCR(77, 0xe605104d), /* PORT77CR */
+ PORTCR(78, 0xe605104e), /* PORT78CR */
+ PORTCR(79, 0xe605104f), /* PORT79CR */
+
+ PORTCR(80, 0xe6051050), /* PORT80CR */
+ PORTCR(81, 0xe6051051), /* PORT81CR */
+ PORTCR(82, 0xe6051052), /* PORT82CR */
+ PORTCR(83, 0xe6051053), /* PORT83CR */
+ PORTCR(84, 0xe6051054), /* PORT84CR */
+ PORTCR(85, 0xe6051055), /* PORT85CR */
+ PORTCR(86, 0xe6051056), /* PORT86CR */
+ PORTCR(87, 0xe6051057), /* PORT87CR */
+ PORTCR(88, 0xe6051058), /* PORT88CR */
+ PORTCR(89, 0xe6051059), /* PORT89CR */
+
+ PORTCR(90, 0xe605105a), /* PORT90CR */
+ PORTCR(91, 0xe605105b), /* PORT91CR */
+ PORTCR(92, 0xe605105c), /* PORT92CR */
+ PORTCR(93, 0xe605105d), /* PORT93CR */
+ PORTCR(94, 0xe605105e), /* PORT94CR */
+ PORTCR(95, 0xe605105f), /* PORT95CR */
+ PORTCR(96, 0xe6052060), /* PORT96CR */
+ PORTCR(97, 0xe6052061), /* PORT97CR */
+ PORTCR(98, 0xe6052062), /* PORT98CR */
+ PORTCR(99, 0xe6052063), /* PORT99CR */
+
+ PORTCR(100, 0xe6052064), /* PORT100CR */
+ PORTCR(101, 0xe6052065), /* PORT101CR */
+ PORTCR(102, 0xe6052066), /* PORT102CR */
+ PORTCR(103, 0xe6052067), /* PORT103CR */
+ PORTCR(104, 0xe6052068), /* PORT104CR */
+ PORTCR(105, 0xe6052069), /* PORT105CR */
+ PORTCR(106, 0xe605206a), /* PORT106CR */
+ PORTCR(107, 0xe605206b), /* PORT107CR */
+ PORTCR(108, 0xe605206c), /* PORT108CR */
+ PORTCR(109, 0xe605206d), /* PORT109CR */
+
+ PORTCR(110, 0xe605206e), /* PORT110CR */
+ PORTCR(111, 0xe605206f), /* PORT111CR */
+ PORTCR(112, 0xe6052070), /* PORT112CR */
+ PORTCR(113, 0xe6052071), /* PORT113CR */
+ PORTCR(114, 0xe6052072), /* PORT114CR */
+ PORTCR(115, 0xe6052073), /* PORT115CR */
+ PORTCR(116, 0xe6052074), /* PORT116CR */
+ PORTCR(117, 0xe6052075), /* PORT117CR */
+ PORTCR(118, 0xe6052076), /* PORT118CR */
+
+ PORTCR(128, 0xe6052080), /* PORT128CR */
+ PORTCR(129, 0xe6052081), /* PORT129CR */
+
+ PORTCR(130, 0xe6052082), /* PORT130CR */
+ PORTCR(131, 0xe6052083), /* PORT131CR */
+ PORTCR(132, 0xe6052084), /* PORT132CR */
+ PORTCR(133, 0xe6052085), /* PORT133CR */
+ PORTCR(134, 0xe6052086), /* PORT134CR */
+ PORTCR(135, 0xe6052087), /* PORT135CR */
+ PORTCR(136, 0xe6052088), /* PORT136CR */
+ PORTCR(137, 0xe6052089), /* PORT137CR */
+ PORTCR(138, 0xe605208a), /* PORT138CR */
+ PORTCR(139, 0xe605208b), /* PORT139CR */
+
+ PORTCR(140, 0xe605208c), /* PORT140CR */
+ PORTCR(141, 0xe605208d), /* PORT141CR */
+ PORTCR(142, 0xe605208e), /* PORT142CR */
+ PORTCR(143, 0xe605208f), /* PORT143CR */
+ PORTCR(144, 0xe6052090), /* PORT144CR */
+ PORTCR(145, 0xe6052091), /* PORT145CR */
+ PORTCR(146, 0xe6052092), /* PORT146CR */
+ PORTCR(147, 0xe6052093), /* PORT147CR */
+ PORTCR(148, 0xe6052094), /* PORT148CR */
+ PORTCR(149, 0xe6052095), /* PORT149CR */
+
+ PORTCR(150, 0xe6052096), /* PORT150CR */
+ PORTCR(151, 0xe6052097), /* PORT151CR */
+ PORTCR(152, 0xe6052098), /* PORT152CR */
+ PORTCR(153, 0xe6052099), /* PORT153CR */
+ PORTCR(154, 0xe605209a), /* PORT154CR */
+ PORTCR(155, 0xe605209b), /* PORT155CR */
+ PORTCR(156, 0xe605209c), /* PORT156CR */
+ PORTCR(157, 0xe605209d), /* PORT157CR */
+ PORTCR(158, 0xe605209e), /* PORT158CR */
+ PORTCR(159, 0xe605209f), /* PORT159CR */
+
+ PORTCR(160, 0xe60520a0), /* PORT160CR */
+ PORTCR(161, 0xe60520a1), /* PORT161CR */
+ PORTCR(162, 0xe60520a2), /* PORT162CR */
+ PORTCR(163, 0xe60520a3), /* PORT163CR */
+ PORTCR(164, 0xe60520a4), /* PORT164CR */
+
+ PORTCR(192, 0xe60520c0), /* PORT192CR */
+ PORTCR(193, 0xe60520c1), /* PORT193CR */
+ PORTCR(194, 0xe60520c2), /* PORT194CR */
+ PORTCR(195, 0xe60520c3), /* PORT195CR */
+ PORTCR(196, 0xe60520c4), /* PORT196CR */
+ PORTCR(197, 0xe60520c5), /* PORT197CR */
+ PORTCR(198, 0xe60520c6), /* PORT198CR */
+ PORTCR(199, 0xe60520c7), /* PORT199CR */
+
+ PORTCR(200, 0xe60520c8), /* PORT200CR */
+ PORTCR(201, 0xe60520c9), /* PORT201CR */
+ PORTCR(202, 0xe60520ca), /* PORT202CR */
+ PORTCR(203, 0xe60520cb), /* PORT203CR */
+ PORTCR(204, 0xe60520cc), /* PORT204CR */
+ PORTCR(205, 0xe60520cd), /* PORT205CR */
+ PORTCR(206, 0xe60520ce), /* PORT206CR */
+ PORTCR(207, 0xe60520cf), /* PORT207CR */
+ PORTCR(208, 0xe60520d0), /* PORT208CR */
+ PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+ PORTCR(210, 0xe60520d2), /* PORT210CR */
+ PORTCR(211, 0xe60520d3), /* PORT211CR */
+ PORTCR(212, 0xe60520d4), /* PORT212CR */
+ PORTCR(213, 0xe60520d5), /* PORT213CR */
+ PORTCR(214, 0xe60520d6), /* PORT214CR */
+ PORTCR(215, 0xe60520d7), /* PORT215CR */
+ PORTCR(216, 0xe60520d8), /* PORT216CR */
+ PORTCR(217, 0xe60520d9), /* PORT217CR */
+ PORTCR(218, 0xe60520da), /* PORT218CR */
+ PORTCR(219, 0xe60520db), /* PORT219CR */
+
+ PORTCR(220, 0xe60520dc), /* PORT220CR */
+ PORTCR(221, 0xe60520dd), /* PORT221CR */
+ PORTCR(222, 0xe60520de), /* PORT222CR */
+ PORTCR(223, 0xe60520df), /* PORT223CR */
+ PORTCR(224, 0xe60530e0), /* PORT224CR */
+ PORTCR(225, 0xe60530e1), /* PORT225CR */
+ PORTCR(226, 0xe60530e2), /* PORT226CR */
+ PORTCR(227, 0xe60530e3), /* PORT227CR */
+ PORTCR(228, 0xe60530e4), /* PORT228CR */
+ PORTCR(229, 0xe60530e5), /* PORT229CR */
+
+ PORTCR(230, 0xe60530e6), /* PORT230CR */
+ PORTCR(231, 0xe60530e7), /* PORT231CR */
+ PORTCR(232, 0xe60530e8), /* PORT232CR */
+ PORTCR(233, 0xe60530e9), /* PORT233CR */
+ PORTCR(234, 0xe60530ea), /* PORT234CR */
+ PORTCR(235, 0xe60530eb), /* PORT235CR */
+ PORTCR(236, 0xe60530ec), /* PORT236CR */
+ PORTCR(237, 0xe60530ed), /* PORT237CR */
+ PORTCR(238, 0xe60530ee), /* PORT238CR */
+ PORTCR(239, 0xe60530ef), /* PORT239CR */
+
+ PORTCR(240, 0xe60530f0), /* PORT240CR */
+ PORTCR(241, 0xe60530f1), /* PORT241CR */
+ PORTCR(242, 0xe60530f2), /* PORT242CR */
+ PORTCR(243, 0xe60530f3), /* PORT243CR */
+ PORTCR(244, 0xe60530f4), /* PORT244CR */
+ PORTCR(245, 0xe60530f5), /* PORT245CR */
+ PORTCR(246, 0xe60530f6), /* PORT246CR */
+ PORTCR(247, 0xe60530f7), /* PORT247CR */
+ PORTCR(248, 0xe60530f8), /* PORT248CR */
+ PORTCR(249, 0xe60530f9), /* PORT249CR */
+
+ PORTCR(250, 0xe60530fa), /* PORT250CR */
+ PORTCR(251, 0xe60530fb), /* PORT251CR */
+ PORTCR(252, 0xe60530fc), /* PORT252CR */
+ PORTCR(253, 0xe60530fd), /* PORT253CR */
+ PORTCR(254, 0xe60530fe), /* PORT254CR */
+ PORTCR(255, 0xe60530ff), /* PORT255CR */
+ PORTCR(256, 0xe6053100), /* PORT256CR */
+ PORTCR(257, 0xe6053101), /* PORT257CR */
+ PORTCR(258, 0xe6053102), /* PORT258CR */
+ PORTCR(259, 0xe6053103), /* PORT259CR */
+
+ PORTCR(260, 0xe6053104), /* PORT260CR */
+ PORTCR(261, 0xe6053105), /* PORT261CR */
+ PORTCR(262, 0xe6053106), /* PORT262CR */
+ PORTCR(263, 0xe6053107), /* PORT263CR */
+ PORTCR(264, 0xe6053108), /* PORT264CR */
+ PORTCR(265, 0xe6053109), /* PORT265CR */
+ PORTCR(266, 0xe605310a), /* PORT266CR */
+ PORTCR(267, 0xe605310b), /* PORT267CR */
+ PORTCR(268, 0xe605310c), /* PORT268CR */
+ PORTCR(269, 0xe605310d), /* PORT269CR */
+
+ PORTCR(270, 0xe605310e), /* PORT270CR */
+ PORTCR(271, 0xe605310f), /* PORT271CR */
+ PORTCR(272, 0xe6053110), /* PORT272CR */
+ PORTCR(273, 0xe6053111), /* PORT273CR */
+ PORTCR(274, 0xe6053112), /* PORT274CR */
+ PORTCR(275, 0xe6053113), /* PORT275CR */
+ PORTCR(276, 0xe6053114), /* PORT276CR */
+ PORTCR(277, 0xe6053115), /* PORT277CR */
+ PORTCR(278, 0xe6053116), /* PORT278CR */
+ PORTCR(279, 0xe6053117), /* PORT279CR */
+
+ PORTCR(280, 0xe6053118), /* PORT280CR */
+ PORTCR(281, 0xe6053119), /* PORT281CR */
+ PORTCR(282, 0xe605311a), /* PORT282CR */
+
+ PORTCR(288, 0xe6052120), /* PORT288CR */
+ PORTCR(289, 0xe6052121), /* PORT289CR */
+
+ PORTCR(290, 0xe6052122), /* PORT290CR */
+ PORTCR(291, 0xe6052123), /* PORT291CR */
+ PORTCR(292, 0xe6052124), /* PORT292CR */
+ PORTCR(293, 0xe6052125), /* PORT293CR */
+ PORTCR(294, 0xe6052126), /* PORT294CR */
+ PORTCR(295, 0xe6052127), /* PORT295CR */
+ PORTCR(296, 0xe6052128), /* PORT296CR */
+ PORTCR(297, 0xe6052129), /* PORT297CR */
+ PORTCR(298, 0xe605212a), /* PORT298CR */
+ PORTCR(299, 0xe605212b), /* PORT299CR */
+
+ PORTCR(300, 0xe605212c), /* PORT300CR */
+ PORTCR(301, 0xe605212d), /* PORT301CR */
+ PORTCR(302, 0xe605212e), /* PORT302CR */
+ PORTCR(303, 0xe605212f), /* PORT303CR */
+ PORTCR(304, 0xe6052130), /* PORT304CR */
+ PORTCR(305, 0xe6052131), /* PORT305CR */
+ PORTCR(306, 0xe6052132), /* PORT306CR */
+ PORTCR(307, 0xe6052133), /* PORT307CR */
+ PORTCR(308, 0xe6052134), /* PORT308CR */
+ PORTCR(309, 0xe6052135), /* PORT309CR */
+
+ { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ 0, 0,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ 0, 0,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ 0, 0,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ 0, 0,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ 0, 0,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ 0, 0,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) {
+ PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA,
+ PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
+ PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
+ PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) {
+ PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA,
+ PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA,
+ PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
+ PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA,
+ PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
+ PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
+ PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
+ PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) {
+ 0, 0, 0, 0,
+ 0, PORT282_DATA, PORT281_DATA, PORT280_DATA,
+ PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
+ PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA,
+ PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
+ PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
+ PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
+ PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT309_DATA, PORT308_DATA,
+ PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA,
+ PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
+ PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
+ PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
+ PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA }
+ },
+ { },
+};
+
+/* IRQ pins through INTCS with IRQ0->15 from 0x200 and IRQ16-31 from 0x3200 */
+#define EXT_IRQ16L(n) intcs_evt2irq(0x200 + ((n) << 5))
+#define EXT_IRQ16H(n) intcs_evt2irq(0x3200 + ((n - 16) << 5))
+
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16H(19), PORT9_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(1), PORT10_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(0), PORT11_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(18), PORT13_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(20), PORT14_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(21), PORT15_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(31), PORT26_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(30), PORT27_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(29), PORT28_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(22), PORT40_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(23), PORT53_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(10), PORT54_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT56_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(26), PORT115_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(27), PORT116_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(28), PORT117_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(24), PORT118_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(6), PORT147_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(2), PORT149_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(7), PORT150_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(12), PORT156_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(4), PORT159_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(25), PORT164_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(8), PORT223_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(3), PORT224_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(5), PORT227_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(17), PORT234_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(11), PORT238_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(13), PORT239_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(16), PORT249_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(14), PORT251_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT308_FN0),
+};
+
+struct sh_pfc_soc_info sh73a0_pinmux_info = {
+ .name = "sh73a0_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_FSIAISLD_PU,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
new file mode 100644
index 000000000000..10872ed688a6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -0,0 +1,1236 @@
+/*
+ * SH7720 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7720.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF0_IN_PU,
+ PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU,
+ PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU,
+ PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF0_OUT,
+ PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+
+ PSELA_1_0_00, PSELA_1_0_01, PSELA_1_0_10,
+ PSELA_3_2_00, PSELA_3_2_01, PSELA_3_2_10, PSELA_3_2_11,
+ PSELA_5_4_00, PSELA_5_4_01, PSELA_5_4_10, PSELA_5_4_11,
+ PSELA_7_6_00, PSELA_7_6_01, PSELA_7_6_10,
+ PSELA_9_8_00, PSELA_9_8_01, PSELA_9_8_10,
+ PSELA_11_10_00, PSELA_11_10_01, PSELA_11_10_10,
+ PSELA_13_12_00, PSELA_13_12_10,
+ PSELA_15_14_00, PSELA_15_14_10,
+ PSELB_9_8_00, PSELB_9_8_11,
+ PSELB_11_10_00, PSELB_11_10_01, PSELB_11_10_10, PSELB_11_10_11,
+ PSELB_13_12_00, PSELB_13_12_01, PSELB_13_12_10, PSELB_13_12_11,
+ PSELB_15_14_00, PSELB_15_14_11,
+ PSELC_9_8_00, PSELC_9_8_10,
+ PSELC_11_10_00, PSELC_11_10_10,
+ PSELC_13_12_00, PSELC_13_12_01, PSELC_13_12_10,
+ PSELC_15_14_00, PSELC_15_14_01, PSELC_15_14_10,
+ PSELD_1_0_00, PSELD_1_0_10,
+ PSELD_11_10_00, PSELD_11_10_01,
+ PSELD_15_14_00, PSELD_15_14_01, PSELD_15_14_10,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, RAS_MARK, CAS_MARK, CKE_MARK,
+ CS5B_CE1A_MARK, CS6B_CE1B_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, A20_MARK, A19_MARK, A0_MARK,
+ REFOUT_MARK, IRQOUT_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK,
+ LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK,
+ LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK,
+ LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK,
+ LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_M_DISP_MARK,
+ LCD_CL1_MARK, LCD_CL2_MARK,
+ LCD_DON_MARK, LCD_FLM_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK,
+ AFE_RXIN_MARK, AFE_RDET_MARK,
+ AFE_FS_MARK, AFE_TXOUT_MARK,
+ AFE_SCLK_MARK, AFE_RLYCNT_MARK,
+ AFE_HC1_MARK,
+ IIC_SCL_MARK, IIC_SDA_MARK,
+ DA1_MARK, DA0_MARK,
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+ USB1D_RCV_MARK, USB1D_TXSE0_MARK,
+ USB1D_TXDPLS_MARK, USB1D_DMNS_MARK,
+ USB1D_DPLS_MARK, USB1D_SPEED_MARK,
+ USB1D_TXENL_MARK,
+ USB2_PWR_EN_MARK, USB1_PWR_EN_USBF_UPLUP_MARK, USB1D_SUSPEND_MARK,
+ IRQ5_MARK, IRQ4_MARK,
+ IRQ3_IRL3_MARK, IRQ2_IRL2_MARK,
+ IRQ1_IRL1_MARK, IRQ0_IRL0_MARK,
+ PCC_REG_MARK, PCC_DRV_MARK,
+ PCC_BVD2_MARK, PCC_BVD1_MARK,
+ PCC_CD2_MARK, PCC_CD1_MARK,
+ PCC_RESET_MARK, PCC_RDY_MARK,
+ PCC_VS2_MARK, PCC_VS1_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK, ASEBRKAK_MARK, TRST_MARK,
+ TMS_MARK, TDO_MARK, TDI_MARK, TCK_MARK,
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, TEND0_MARK,
+ SIOF0_SYNC_MARK, SIOF0_MCLK_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF0_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_MCLK_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIOF1_SCK_MARK,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ TPU_TO1_MARK, TPU_TO0_MARK,
+ TPU_TI3B_MARK, TPU_TI3A_MARK,
+ TPU_TI2B_MARK, TPU_TI2A_MARK,
+ TPU_TO3_MARK, TPU_TO2_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ MMC_DAT_MARK, MMC_CMD_MARK,
+ MMC_CLK_MARK, MMC_VDDON_MARK,
+ MMC_ODMOD_MARK,
+ STATUS0_MARK, STATUS1_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE6_DATA, PTE6_IN),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF6_DATA, PTF6_IN),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTP GPIO */
+ PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU),
+ PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU),
+ PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU),
+ PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU),
+ PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCD_DATA7_MARK, PTC7_FN),
+ PINMUX_DATA(LCD_DATA6_MARK, PTC6_FN),
+ PINMUX_DATA(LCD_DATA5_MARK, PTC5_FN),
+ PINMUX_DATA(LCD_DATA4_MARK, PTC4_FN),
+ PINMUX_DATA(LCD_DATA3_MARK, PTC3_FN),
+ PINMUX_DATA(LCD_DATA2_MARK, PTC2_FN),
+ PINMUX_DATA(LCD_DATA1_MARK, PTC1_FN),
+ PINMUX_DATA(LCD_DATA0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCD_DATA15_MARK, PTD7_FN),
+ PINMUX_DATA(LCD_DATA14_MARK, PTD6_FN),
+ PINMUX_DATA(LCD_DATA13_MARK, PTD5_FN),
+ PINMUX_DATA(LCD_DATA12_MARK, PTD4_FN),
+ PINMUX_DATA(LCD_DATA11_MARK, PTD3_FN),
+ PINMUX_DATA(LCD_DATA10_MARK, PTD2_FN),
+ PINMUX_DATA(LCD_DATA9_MARK, PTD1_FN),
+ PINMUX_DATA(LCD_DATA8_MARK, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(IIC_SCL_MARK, PSELB_9_8_00, PTE6_FN),
+ PINMUX_DATA(AFE_RXIN_MARK, PSELB_9_8_11, PTE6_FN),
+ PINMUX_DATA(IIC_SDA_MARK, PSELB_9_8_00, PTE5_FN),
+ PINMUX_DATA(AFE_RDET_MARK, PSELB_9_8_11, PTE5_FN),
+ PINMUX_DATA(LCD_M_DISP_MARK, PTE4_FN),
+ PINMUX_DATA(LCD_CL1_MARK, PTE3_FN),
+ PINMUX_DATA(LCD_CL2_MARK, PTE2_FN),
+ PINMUX_DATA(LCD_DON_MARK, PTE1_FN),
+ PINMUX_DATA(LCD_FLM_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(DA1_MARK, PTF6_FN),
+ PINMUX_DATA(DA0_MARK, PTF5_FN),
+ PINMUX_DATA(AN3_MARK, PTF4_FN),
+ PINMUX_DATA(AN2_MARK, PTF3_FN),
+ PINMUX_DATA(AN1_MARK, PTF2_FN),
+ PINMUX_DATA(AN0_MARK, PTF1_FN),
+ PINMUX_DATA(ADTRG_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(USB1D_RCV_MARK, PSELA_3_2_00, PTG6_FN),
+ PINMUX_DATA(AFE_FS_MARK, PSELA_3_2_01, PTG6_FN),
+ PINMUX_DATA(PCC_REG_MARK, PSELA_3_2_10, PTG6_FN),
+ PINMUX_DATA(IRQ5_MARK, PSELA_3_2_11, PTG6_FN),
+ PINMUX_DATA(USB1D_TXSE0_MARK, PSELA_5_4_00, PTG5_FN),
+ PINMUX_DATA(AFE_TXOUT_MARK, PSELA_5_4_01, PTG5_FN),
+ PINMUX_DATA(PCC_DRV_MARK, PSELA_5_4_10, PTG5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSELA_5_4_11, PTG5_FN),
+ PINMUX_DATA(USB1D_TXDPLS_MARK, PSELA_7_6_00, PTG4_FN),
+ PINMUX_DATA(AFE_SCLK_MARK, PSELA_7_6_01, PTG4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSELA_7_6_10, PTG4_FN),
+ PINMUX_DATA(USB1D_DMNS_MARK, PSELA_9_8_00, PTG3_FN),
+ PINMUX_DATA(AFE_RLYCNT_MARK, PSELA_9_8_01, PTG3_FN),
+ PINMUX_DATA(PCC_BVD2_MARK, PSELA_9_8_10, PTG3_FN),
+ PINMUX_DATA(USB1D_DPLS_MARK, PSELA_11_10_00, PTG2_FN),
+ PINMUX_DATA(AFE_HC1_MARK, PSELA_11_10_01, PTG2_FN),
+ PINMUX_DATA(PCC_BVD1_MARK, PSELA_11_10_10, PTG2_FN),
+ PINMUX_DATA(USB1D_SPEED_MARK, PSELA_13_12_00, PTG1_FN),
+ PINMUX_DATA(PCC_CD2_MARK, PSELA_13_12_10, PTG1_FN),
+ PINMUX_DATA(USB1D_TXENL_MARK, PSELA_15_14_00, PTG0_FN),
+ PINMUX_DATA(PCC_CD1_MARK, PSELA_15_14_10, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(RAS_MARK, PTH6_FN),
+ PINMUX_DATA(CAS_MARK, PTH5_FN),
+ PINMUX_DATA(CKE_MARK, PTH4_FN),
+ PINMUX_DATA(STATUS1_MARK, PTH3_FN),
+ PINMUX_DATA(STATUS0_MARK, PTH2_FN),
+ PINMUX_DATA(USB2_PWR_EN_MARK, PTH1_FN),
+ PINMUX_DATA(USB1_PWR_EN_USBF_UPLUP_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(AUDCK_MARK, PTJ6_FN),
+ PINMUX_DATA(ASEBRKAK_MARK, PTJ5_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTJ4_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTJ3_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTJ2_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTJ1_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(PCC_RESET_MARK, PTK3_FN),
+ PINMUX_DATA(PCC_RDY_MARK, PTK2_FN),
+ PINMUX_DATA(PCC_VS2_MARK, PTK1_FN),
+ PINMUX_DATA(PCC_VS1_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(TRST_MARK, PTL7_FN),
+ PINMUX_DATA(TMS_MARK, PTL6_FN),
+ PINMUX_DATA(TDO_MARK, PTL5_FN),
+ PINMUX_DATA(TDI_MARK, PTL4_FN),
+ PINMUX_DATA(TCK_MARK, PTL3_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DREQ1_MARK, PTM7_FN),
+ PINMUX_DATA(DREQ0_MARK, PTM6_FN),
+ PINMUX_DATA(DACK1_MARK, PTM5_FN),
+ PINMUX_DATA(DACK0_MARK, PTM4_FN),
+ PINMUX_DATA(TEND1_MARK, PTM3_FN),
+ PINMUX_DATA(TEND0_MARK, PTM2_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTM1_FN),
+ PINMUX_DATA(CS6B_CE1B_MARK, PTM0_FN),
+
+ /* PTP FN */
+ PINMUX_DATA(USB1D_SUSPEND_MARK, PSELA_1_0_00, PTP4_FN),
+ PINMUX_DATA(REFOUT_MARK, PSELA_1_0_01, PTP4_FN),
+ PINMUX_DATA(IRQOUT_MARK, PSELA_1_0_10, PTP4_FN),
+ PINMUX_DATA(IRQ3_IRL3_MARK, PTP3_FN),
+ PINMUX_DATA(IRQ2_IRL2_MARK, PTP2_FN),
+ PINMUX_DATA(IRQ1_IRL1_MARK, PTP1_FN),
+ PINMUX_DATA(IRQ0_IRL0_MARK, PTP0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(A25_MARK, PTR7_FN),
+ PINMUX_DATA(A24_MARK, PTR6_FN),
+ PINMUX_DATA(A23_MARK, PTR5_FN),
+ PINMUX_DATA(A22_MARK, PTR4_FN),
+ PINMUX_DATA(A21_MARK, PTR3_FN),
+ PINMUX_DATA(A20_MARK, PTR2_FN),
+ PINMUX_DATA(A19_MARK, PTR1_FN),
+ PINMUX_DATA(A0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SIOF0_SYNC_MARK, PTS4_FN),
+ PINMUX_DATA(SIOF0_MCLK_MARK, PTS3_FN),
+ PINMUX_DATA(SIOF0_TXD_MARK, PTS2_FN),
+ PINMUX_DATA(SIOF0_RXD_MARK, PTS1_FN),
+ PINMUX_DATA(SIOF0_SCK_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_CTS_MARK, PSELB_15_14_00, PTT4_FN),
+ PINMUX_DATA(TPU_TO1_MARK, PSELB_15_14_11, PTT4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSELB_15_14_00, PTT3_FN),
+ PINMUX_DATA(TPU_TO0_MARK, PSELB_15_14_11, PTT3_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, PTT2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, PTT1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(SIOF1_SYNC_MARK, PTU4_FN),
+ PINMUX_DATA(SIOF1_MCLK_MARK, PSELD_11_10_00, PTU3_FN),
+ PINMUX_DATA(TPU_TI3B_MARK, PSELD_11_10_01, PTU3_FN),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSELD_15_14_00, PTU2_FN),
+ PINMUX_DATA(TPU_TI3A_MARK, PSELD_15_14_01, PTU2_FN),
+ PINMUX_DATA(MMC_DAT_MARK, PSELD_15_14_10, PTU2_FN),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSELC_13_12_00, PTU1_FN),
+ PINMUX_DATA(TPU_TI2B_MARK, PSELC_13_12_01, PTU1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PSELC_13_12_10, PTU1_FN),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSELC_15_14_00, PTU0_FN),
+ PINMUX_DATA(TPU_TI2A_MARK, PSELC_15_14_01, PTU0_FN),
+ PINMUX_DATA(MMC_CLK_MARK, PSELC_15_14_10, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(SCIF1_CTS_MARK, PSELB_11_10_00, PTV4_FN),
+ PINMUX_DATA(TPU_TO3_MARK, PSELB_11_10_01, PTV4_FN),
+ PINMUX_DATA(MMC_VDDON_MARK, PSELB_11_10_10, PTV4_FN),
+ PINMUX_DATA(LCD_VEPWC_MARK, PSELB_11_10_11, PTV4_FN),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSELB_13_12_00, PTV3_FN),
+ PINMUX_DATA(TPU_TO2_MARK, PSELB_13_12_01, PTV3_FN),
+ PINMUX_DATA(MMC_ODMOD_MARK, PSELB_13_12_10, PTV3_FN),
+ PINMUX_DATA(LCD_VCPWC_MARK, PSELB_13_12_11, PTV3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSELC_9_8_00, PTV2_FN),
+ PINMUX_DATA(SIM_D_MARK, PSELC_9_8_10, PTV2_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSELC_11_10_00, PTV1_FN),
+ PINMUX_DATA(SIM_RST_MARK, PSELC_11_10_10, PTV1_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSELD_1_0_00, PTV0_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+
+ /* AFEIF */
+ PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK),
+
+ /* IIC */
+ PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK),
+ PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK),
+
+ /* DAC */
+ PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* USB */
+ PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK),
+
+ PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP,
+ USB1_PWR_EN_USBF_UPLUP_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK),
+
+ /* PCC */
+ PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK),
+
+ /* HUDI */
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK),
+ PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK),
+ PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK),
+ PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK),
+ PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK),
+ PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK),
+
+ /* SYSC */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ PTE6_FN, 0, 0, PTE6_IN,
+ PTE5_FN, 0, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6_FN, 0, 0, PTF6_IN,
+ PTF5_FN, 0, 0, PTF5_IN,
+ PTF4_FN, 0, 0, PTF4_IN,
+ PTF3_FN, 0, 0, PTF3_IN,
+ PTF2_FN, 0, 0, PTF2_IN,
+ PTF1_FN, 0, 0, PTF1_IN,
+ PTF0_FN, 0, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN,
+ PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN,
+ PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN,
+ PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN,
+ PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN,
+ PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN,
+ PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ 0, 0, 0, 0,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ 0, 0, 0, 0,
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN,
+ PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN,
+ PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN,
+ PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN,
+ PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050142, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050144, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050148, 8) {
+ 0, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405014a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405014c, 8) {
+ 0, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405014e, 8) {
+ 0, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050150, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050152, 8) {
+ 0, 0, 0, 0,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050154, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050156, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xa4050158, 8) {
+ 0, 0, 0, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405015a, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405015c, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa405015e, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050160, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050162, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7720_pinmux_info = {
+ .name = "sh7720_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_STATUS1,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
new file mode 100644
index 000000000000..2de0929315e6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -0,0 +1,1779 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7722.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN,
+ PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN,
+ PTJ1_IN, PTJ0_IN,
+ PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN,
+ PTR2_IN,
+ PTS4_IN, PTS2_IN, PTS1_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN,
+ PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD,
+ PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD,
+ PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD,
+ PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD,
+ PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD,
+ PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD,
+ PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD,
+ PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD,
+ PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD,
+ PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD,
+ PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD,
+ PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD,
+ PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD,
+ PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD,
+ PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD,
+ PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD,
+ PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD,
+ PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD,
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTC7_IN_PU, PTC5_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU,
+ PTJ1_IN_PU, PTJ0_IN_PU,
+ PTQ0_IN_PU,
+ PTR2_IN_PU,
+ PTX6_IN_PU,
+ PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU,
+ PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA5_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT,
+ PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT,
+ PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT,
+ PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT,
+ PTS3_OUT, PTS2_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ SCIF2_TXD_MARK, SCIF2_RXD_MARK,
+ SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK,
+ SIOTXD_MARK, SIORXD_MARK,
+ SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK,
+ SIOSCK_MARK, SIOMCK_MARK,
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK,
+ VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK,
+ VIO_HD2_MARK, VIO_CLK2_MARK,
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK,
+ LCDCS2_MARK,
+ IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK,
+ HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK,
+ HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK,
+ HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK,
+ HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK,
+ HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK,
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+ SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK,
+ SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK,
+ SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK,
+ SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK,
+ AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ DACK_MARK, DREQ0_MARK,
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+ STATUS0_MARK, PDSTATUS_MARK,
+ SIOF0_MCK_MARK, SIOF0_SCK_MARK,
+ SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF1_MCK_MARK, SIOF1_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK,
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+ TPUTO_MARK,
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+ PINMUX_MARK_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4,
+ VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK,
+ HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48,
+ IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4,
+ SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK,
+ A25, A24, A23, A22, IRQ5, IRQ4_BS,
+ PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR,
+ SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD,
+ AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0,
+ LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS,
+ LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC,
+ STATUS0, PDSTATUS, IRQ1, IRQ0,
+ SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC,
+ SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0,
+ LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12,
+ LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8,
+ LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4,
+ LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0,
+ HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56,
+ SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN,
+ SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0,
+ LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2,
+ SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD,
+ SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD,
+ FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE,
+ NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8,
+ FRB_VIO_CLK2, FCE_VIO_HD2,
+ NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11,
+ VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK,
+ VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD,
+ VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS,
+ CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20,
+ LCDD19_DV_CLKI, LCDD18_DV_CLK,
+ KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0,
+ KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6,
+
+ PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7,
+ PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2,
+ PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD,
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO,
+ PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV,
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8,
+
+ HIZA14_KEYSC, HIZA14_HIZ,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ HIZB4_SIUA, HIZB4_HIZ,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ,
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ PINMUX_FUNCTION_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD),
+
+ /* PTB */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_OUT),
+
+ /* PTE */
+ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD),
+ PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD),
+ PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD),
+ PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD),
+ PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD),
+ PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD),
+
+ /* PTF */
+ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD),
+ PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD),
+ PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD),
+ PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD),
+ PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD),
+ PINMUX_DATA(PTF0_DATA, PTF0_OUT),
+
+ /* PTG */
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH */
+ PINMUX_DATA(PTH7_DATA, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD),
+ PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD),
+ PINMUX_DATA(PTH4_DATA, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD),
+ PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD),
+
+ /* PTJ */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU),
+
+ /* PTK */
+ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD),
+ PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD),
+ PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD),
+ PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD),
+ PINMUX_DATA(PTK1_DATA, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD),
+
+ /* PTL */
+ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD),
+ PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD),
+ PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD),
+ PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD),
+ PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD),
+ PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD),
+ PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD),
+ PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD),
+
+ /* PTM */
+ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD),
+ PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD),
+ PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD),
+ PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD),
+ PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD),
+ PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD),
+ PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD),
+ PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD),
+
+ /* PTN */
+ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN),
+ PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN),
+ PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN),
+ PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN),
+ PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN),
+ PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN),
+ PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN),
+ PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN),
+
+ /* PTQ */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU),
+
+ /* PTR */
+ PINMUX_DATA(PTR4_DATA, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_OUT),
+
+ /* PTS */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD),
+ PINMUX_DATA(PTS3_DATA, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD),
+ PINMUX_DATA(PTS0_DATA, PTS0_OUT),
+
+ /* PTT */
+ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD),
+ PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD),
+ PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD),
+ PINMUX_DATA(PTT0_DATA, PTT0_OUT),
+
+ /* PTU */
+ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD),
+ PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD),
+ PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD),
+ PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD),
+
+ /* PTV */
+ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD),
+ PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD),
+ PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD),
+ PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD),
+ PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD),
+
+ /* PTW */
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD),
+ PINMUX_DATA(PTW5_DATA, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD),
+ PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD),
+ PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD),
+ PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD),
+ PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD),
+
+ /* PTX */
+ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD),
+ PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD),
+ PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD),
+ PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD),
+ PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD),
+ PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD),
+ PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD),
+
+ /* PTY */
+ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU),
+
+ /* PTZ */
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU),
+
+ /* SCIF0 */
+ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD),
+ PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD),
+ PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO),
+
+ /* SCIF1 */
+ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK),
+
+ /* SCIF2 */
+ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK),
+
+ /* SIO */
+ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR),
+ PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6),
+
+ /* CEU */
+ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK),
+ PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(VIO_D4_MARK, VIO_D4),
+ PINMUX_DATA(VIO_D3_MARK, VIO_D3),
+ PINMUX_DATA(VIO_D2_MARK, VIO_D2),
+ PINMUX_DATA(VIO_D1_MARK, VIO_D1),
+ PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK),
+ PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK),
+ PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FCE_VIO_HD2),
+ PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FRB_VIO_CLK2),
+
+ /* LCDC */
+ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23),
+ PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22),
+ PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21),
+ PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20),
+ PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI),
+ PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK),
+ PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD17_DV_HSYNC),
+ PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD16_DV_VSYNC),
+ PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15),
+ PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14),
+ PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13),
+ PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12),
+ PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11),
+ PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10),
+ PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9),
+ PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8),
+ PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7),
+ PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6),
+ PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5),
+ PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4),
+ PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3),
+ PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2),
+ PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1),
+ PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0),
+ PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK),
+ /* Main LCD */
+ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN),
+ /* Main LCD - RGB Mode */
+ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS),
+ /* Main LCD - SYS Mode */
+ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS),
+ PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD),
+ /* Sub LCD - SYS Mode */
+ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK),
+ PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2),
+
+ /* BSC */
+ PINMUX_DATA(IOIS16_MARK, IOIS16),
+ PINMUX_DATA(A25_MARK, A25),
+ PINMUX_DATA(A24_MARK, A24),
+ PINMUX_DATA(A23_MARK, A23),
+ PINMUX_DATA(A22_MARK, A22),
+ PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS),
+ PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2),
+ PINMUX_DATA(WAIT_MARK, WAIT),
+ PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B),
+
+ /* SBSC */
+ PINMUX_DATA(HPD63_MARK, HPD63),
+ PINMUX_DATA(HPD62_MARK, HPD62),
+ PINMUX_DATA(HPD61_MARK, HPD61),
+ PINMUX_DATA(HPD60_MARK, HPD60),
+ PINMUX_DATA(HPD59_MARK, HPD59),
+ PINMUX_DATA(HPD58_MARK, HPD58),
+ PINMUX_DATA(HPD57_MARK, HPD57),
+ PINMUX_DATA(HPD56_MARK, HPD56),
+ PINMUX_DATA(HPD55_MARK, HPD55),
+ PINMUX_DATA(HPD54_MARK, HPD54),
+ PINMUX_DATA(HPD53_MARK, HPD53),
+ PINMUX_DATA(HPD52_MARK, HPD52),
+ PINMUX_DATA(HPD51_MARK, HPD51),
+ PINMUX_DATA(HPD50_MARK, HPD50),
+ PINMUX_DATA(HPD49_MARK, HPD49),
+ PINMUX_DATA(HPD48_MARK, HPD48),
+ PINMUX_DATA(HPDQM7_MARK, HPDQM7),
+ PINMUX_DATA(HPDQM6_MARK, HPDQM6),
+ PINMUX_DATA(HPDQM5_MARK, HPDQM5),
+ PINMUX_DATA(HPDQM4_MARK, HPDQM4),
+
+ /* IRQ */
+ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0),
+ PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1),
+ PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2),
+ PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3,
+ HIZC11_IRQ3, PTQ0),
+ PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS),
+ PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5),
+ PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7),
+
+ /* SDHI */
+ PINMUX_DATA(SDHICD_MARK, SDHICD),
+ PINMUX_DATA(SDHIWP_MARK, SDHIWP),
+ PINMUX_DATA(SDHID3_MARK, SDHID3),
+ PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2),
+ PINMUX_DATA(SDHID1_MARK, SDHID1),
+ PINMUX_DATA(SDHID0_MARK, SDHID0),
+ PINMUX_DATA(SDHICMD_MARK, SDHICMD),
+ PINMUX_DATA(SDHICLK_MARK, SDHICLK),
+
+ /* SIU - Port A */
+ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
+ PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
+ PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
+
+ /* SIU - Port B */
+ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR),
+ PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6),
+ PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6),
+
+ /* AUD */
+ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC),
+ PINMUX_DATA(AUDATA3_MARK, AUDATA3),
+ PINMUX_DATA(AUDATA2_MARK, AUDATA2),
+ PINMUX_DATA(AUDATA1_MARK, AUDATA1),
+ PINMUX_DATA(AUDATA0_MARK, AUDATA0),
+
+ /* DMAC */
+ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK),
+ PINMUX_DATA(DREQ0_MARK, DREQ0),
+
+ /* VOU */
+ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI),
+ PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK),
+ PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC),
+ PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15),
+ PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14),
+ PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13),
+ PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12),
+ PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11),
+ PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10),
+ PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9),
+ PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8),
+ PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7),
+ PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6),
+ PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5),
+ PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4),
+ PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3),
+ PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2),
+ PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1),
+ PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0),
+
+ /* CPG */
+ PINMUX_DATA(STATUS0_MARK, STATUS0),
+ PINMUX_DATA(PDSTATUS_MARK, PDSTATUS),
+
+ /* SIOF0 */
+ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0),
+ PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC),
+ PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST),
+ PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_SIOF0_TXD, PTQ1),
+ PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN,
+ PSB6_SIOF0_RXD, PTQ2),
+
+ /* SIOF1 */
+ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK,
+ PSB1_SIOF1_MCK, PTK0),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD),
+
+ /* SIM */
+ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0),
+ PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1),
+ PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST),
+
+ /* TSIF */
+ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2),
+ PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC),
+
+ /* IRDA */
+ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2),
+ PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_IRDA_OUT, PTQ1),
+
+ /* TPU */
+ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO),
+
+ /* FLCTL */
+ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2),
+ PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(FCDE_MARK, FCDE),
+ PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(FSC_MARK, FSC),
+ PINMUX_DATA(FWE_MARK, FWE),
+ PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2),
+
+ /* KEYSC */
+ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1),
+ PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2),
+ PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3),
+ PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7),
+ PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0),
+ PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1),
+ PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2),
+ PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+
+ /* SIO */
+ PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ /* Sub LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+
+ /* SBSC */
+ PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* SDHI */
+ PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK),
+
+ /* SIU - Port A */
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK),
+
+ /* SIU - Port B */
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
+ VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
+ VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN,
+ VIO_D4, 0, PTA4_IN_PD, PTA4_IN,
+ VIO_D3, 0, PTA3_IN_PD, PTA3_IN,
+ VIO_D2, 0, PTA2_IN_PD, PTA2_IN,
+ VIO_D1, 0, PTA1_IN_PD, PTA1_IN,
+ VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ HPD55, PTB7_OUT, 0, PTB7_IN,
+ HPD54, PTB6_OUT, 0, PTB6_IN,
+ HPD53, PTB5_OUT, 0, PTB5_IN,
+ HPD52, PTB4_OUT, 0, PTB4_IN,
+ HPD51, PTB3_OUT, 0, PTB3_IN,
+ HPD50, PTB2_OUT, 0, PTB2_IN,
+ HPD49, PTB1_OUT, 0, PTB1_IN,
+ HPD48, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ 0, 0, PTC7_IN_PU, PTC7_IN,
+ 0, 0, 0, 0,
+ IOIS16, 0, PTC5_IN_PU, PTC5_IN,
+ HPDQM7, PTC4_OUT, 0, PTC4_IN,
+ HPDQM6, PTC3_OUT, 0, PTC3_IN,
+ HPDQM5, PTC2_OUT, 0, PTC2_IN,
+ 0, 0, 0, 0,
+ HPDQM4, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ SDHICD, 0, PTD7_IN_PU, PTD7_IN,
+ SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ SDHICLK, PTD0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN,
+ A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN,
+ A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN,
+ A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN,
+ IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN,
+ SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN,
+ SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN,
+ SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN,
+ SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN,
+ SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN,
+ SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ AUDSYNC, PTG4_OUT, 0, 0,
+ AUDATA3, PTG3_OUT, 0, 0,
+ AUDATA2, PTG2_OUT, 0, 0,
+ AUDATA1, PTG1_OUT, 0, 0,
+ AUDATA0, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
+ LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN,
+ LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN,
+ LCDDISP_LCDRS, PTH4_OUT, 0, 0,
+ LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
+ LCDDON_LCDDON2, PTH2_OUT, 0, 0,
+ LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN,
+ LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ STATUS0, PTJ7_OUT, 0, 0,
+ 0, PTJ6_OUT, 0, 0,
+ PDSTATUS, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN,
+ SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN,
+ SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN,
+ SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN,
+ SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN,
+ SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
+ PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN,
+ LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN,
+ LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN,
+ LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN,
+ LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN,
+ LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN,
+ LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN,
+ LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN,
+ LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN,
+ LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN,
+ LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN,
+ LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN,
+ LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN,
+ LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN,
+ LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ HPD63, PTN7_OUT, 0, PTN7_IN,
+ HPD62, PTN6_OUT, 0, PTN6_IN,
+ HPD61, PTN5_OUT, 0, PTN5_IN,
+ HPD60, PTN4_OUT, 0, PTN4_IN,
+ HPD59, PTN3_OUT, 0, PTN3_IN,
+ HPD58, PTN2_OUT, 0, PTN2_IN,
+ HPD57, PTN1_OUT, 0, PTN1_IN,
+ HPD56, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
+ SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN,
+ SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN,
+ SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN,
+ PTQ2, 0, PTQ2_IN_PD, PTQ2_IN,
+ PTQ1, PTQ1_OUT, 0, 0,
+ PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ LCDRD, PTR4_OUT, 0, 0,
+ CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
+ WAIT, 0, PTR2_IN_PU, PTR2_IN,
+ LCDDCK_LCDWR, PTR1_OUT, 0, 0,
+ LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN,
+ SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
+ SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN,
+ SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN,
+ SCIF0_TXD, PTS0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN,
+ FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN,
+ FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN,
+ DREQ0, 0, PTT1_IN_PD, PTT1_IN,
+ FCDE, PTT0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN,
+ NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN,
+ NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN,
+ FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN,
+ FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN,
+ NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN,
+ NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN,
+ NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN,
+ NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ 0, 0, 0, 0,
+ VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN,
+ VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
+ VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN,
+ VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN,
+ VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN,
+ VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN,
+ VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ 0, 0, 0, 0,
+ CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN,
+ LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN,
+ LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN,
+ LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN,
+ LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN,
+ LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ KEYOUT1, PTY1_OUT, 0, 0,
+ KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN,
+ KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN,
+ KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN,
+ KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN,
+ KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_KEYIN0, PSA15_IRQ6,
+ PSA14_KEYIN4, PSA14_IRQ7,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA9_IRQ4, PSA9_BS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA4_IRQ2, PSA4_SDHID2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ PSB15_SIOTXD, PSB15_SIUBOSLD,
+ PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR,
+ PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR,
+ PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB,
+ PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT,
+ PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK,
+ PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC,
+ PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK,
+ PSB0_SIUAOSLD, PSB0_SIOF1_TXD }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD,
+ PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
+ PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSC0_NAF, PSC0_VIO }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PSD13_VIO, PSD13_SCIF2,
+ PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1,
+ PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB,
+ PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD,
+ PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ 0, 0,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2,
+ 0, 0,
+ PSD0_LCDD19_LCDD0, PSD0_DV }
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
+ PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSE3_FLCTL, PSE3_VIO,
+ PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9,
+ PSE0_NAF0, PSE0_VIO_D8 }
+ },
+ { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) {
+ 0, 0,
+ HIZA14_KEYSC, HIZA14_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZB4_SIUA, HIZB4_HIZ,
+ 0, 0,
+ 0, 0,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ }
+ },
+ { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) {
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, 0, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, 0, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ 0, 0, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, 0, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ 0, 0, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ 0, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ 0, 0, 0, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ 0, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ 0, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ 0, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ 0, 0, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7722_pinmux_info = {
+ .name = "sh7722_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_KEYOUT5_IN5,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
new file mode 100644
index 000000000000..609673d3d70e
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -0,0 +1,1903 @@
+/*
+ * SH7723 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7723.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB2_IN_PU, PTB1_IN_PU,
+ PTR2_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2,
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2,
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3,
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_PTT_TXD_MARK, SCIF0_PTT_RXD_MARK,
+ SCIF0_PTT_SCK_MARK, SCIF0_PTU_TXD_MARK,
+ SCIF0_PTU_RXD_MARK, SCIF0_PTU_SCK_MARK,
+
+ SCIF1_PTS_TXD_MARK, SCIF1_PTS_RXD_MARK,
+ SCIF1_PTS_SCK_MARK, SCIF1_PTV_TXD_MARK,
+ SCIF1_PTV_RXD_MARK, SCIF1_PTV_SCK_MARK,
+
+ SCIF2_PTT_TXD_MARK, SCIF2_PTT_RXD_MARK,
+ SCIF2_PTT_SCK_MARK, SCIF2_PTU_TXD_MARK,
+ SCIF2_PTU_RXD_MARK, SCIF2_PTU_SCK_MARK,
+
+ SCIF3_PTS_TXD_MARK, SCIF3_PTS_RXD_MARK,
+ SCIF3_PTS_SCK_MARK, SCIF3_PTS_RTS_MARK,
+ SCIF3_PTS_CTS_MARK, SCIF3_PTV_TXD_MARK,
+ SCIF3_PTV_RXD_MARK, SCIF3_PTV_SCK_MARK,
+ SCIF3_PTV_RTS_MARK, SCIF3_PTV_CTS_MARK,
+
+ SCIF4_PTE_TXD_MARK, SCIF4_PTE_RXD_MARK,
+ SCIF4_PTE_SCK_MARK, SCIF4_PTN_TXD_MARK,
+ SCIF4_PTN_RXD_MARK, SCIF4_PTN_SCK_MARK,
+
+ SCIF5_PTE_TXD_MARK, SCIF5_PTE_RXD_MARK,
+ SCIF5_PTE_SCK_MARK, SCIF5_PTN_TXD_MARK,
+ SCIF5_PTN_RXD_MARK, SCIF5_PTN_SCK_MARK,
+
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_FLD_MARK, VIO_CKO_MARK,
+ VIO_VD1_MARK, VIO_HD1_MARK, VIO_CLK1_MARK,
+ VIO_HD2_MARK, VIO_VD2_MARK, VIO_CLK2_MARK,
+
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDLCLK_PTR_MARK, LCDLCLK_PTW_MARK,
+
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK,
+
+ SDHI0CD_PTD_MARK, SDHI0WP_PTD_MARK,
+ SDHI0D3_PTD_MARK, SDHI0D2_PTD_MARK,
+ SDHI0D1_PTD_MARK, SDHI0D0_PTD_MARK,
+ SDHI0CMD_PTD_MARK, SDHI0CLK_PTD_MARK,
+
+ SDHI0CD_PTS_MARK, SDHI0WP_PTS_MARK,
+ SDHI0D3_PTS_MARK, SDHI0D2_PTS_MARK,
+ SDHI0D1_PTS_MARK, SDHI0D0_PTS_MARK,
+ SDHI0CMD_PTS_MARK, SDHI0CLK_PTS_MARK,
+
+ SDHI1CD_MARK, SDHI1WP_MARK, SDHI1D3_MARK, SDHI1D2_MARK,
+ SDHI1D1_MARK, SDHI1D0_MARK, SDHI1CMD_MARK, SDHI1CLK_MARK,
+
+ SIUAFCK_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAISLD_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAOSLD_MARK, SIUAMCK_MARK,
+ SIUAISPD_MARK, SIUAOSPD_MARK,
+
+ SIUBFCK_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBISLD_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBOSLD_MARK, SIUBMCK_MARK,
+
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+
+ MSIOF0_PTF_TXD_MARK, MSIOF0_PTF_RXD_MARK, MSIOF0_PTF_MCK_MARK,
+ MSIOF0_PTF_TSYNC_MARK, MSIOF0_PTF_TSCK_MARK, MSIOF0_PTF_RSYNC_MARK,
+ MSIOF0_PTF_RSCK_MARK, MSIOF0_PTF_SS1_MARK, MSIOF0_PTF_SS2_MARK,
+
+ MSIOF0_PTT_TXD_MARK, MSIOF0_PTT_RXD_MARK, MSIOF0_PTX_MCK_MARK,
+ MSIOF0_PTT_TSYNC_MARK, MSIOF0_PTT_TSCK_MARK, MSIOF0_PTT_RSYNC_MARK,
+ MSIOF0_PTT_RSCK_MARK, MSIOF0_PTT_SS1_MARK, MSIOF0_PTT_SS2_MARK,
+
+ MSIOF1_TXD_MARK, MSIOF1_RXD_MARK, MSIOF1_MCK_MARK,
+ MSIOF1_TSYNC_MARK, MSIOF1_TSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_RSCK_MARK, MSIOF1_SS1_MARK, MSIOF1_SS2_MARK,
+
+ TS0_SDAT_MARK, TS0_SCK_MARK, TS0_SDEN_MARK, TS0_SPSYNC_MARK,
+
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+
+ STATUS0_MARK, PDSTATUS_MARK,
+
+ TPUTO3_MARK, TPUTO2_MARK, TPUTO1_MARK, TPUTO0_MARK,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, WAIT_MARK, BS_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ CS6B_CE1B_MARK, CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK, CS5A_CE2A_MARK,
+ WE3_ICIOWR_MARK, WE2_ICIORD_MARK,
+
+ IDED15_MARK, IDED14_MARK, IDED13_MARK, IDED12_MARK,
+ IDED11_MARK, IDED10_MARK, IDED9_MARK, IDED8_MARK,
+ IDED7_MARK, IDED6_MARK, IDED5_MARK, IDED4_MARK,
+ IDED3_MARK, IDED2_MARK, IDED1_MARK, IDED0_MARK,
+ DIRECTION_MARK, EXBUF_ENB_MARK, IDERST_MARK, IODACK_MARK,
+ IODREQ_MARK, IDEIORDY_MARK, IDEINT_MARK, IDEIOWR_MARK,
+ IDEIORD_MARK, IDECS1_MARK, IDECS0_MARK, IDEA2_MARK,
+ IDEA1_MARK, IDEA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_PSA14_FN1, PTA7_FN),
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_PSA14_FN2, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_PSA14_FN1, PTA6_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_PSA14_FN2, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_PSA14_FN1, PTA5_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_PSA14_FN2, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_PSA14_FN1, PTA4_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_PSA14_FN2, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_PSA14_FN1, PTA3_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_PSA14_FN2, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_PSA14_FN1, PTA2_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_PSA14_FN2, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_PSA14_FN1, PTA1_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_PSA14_FN2, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_PSA14_FN1, PTA0_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_PSA14_FN2, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_PSA14_FN1, PTB2_FN),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_PSA14_FN2, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_PSA14_FN1, PTB1_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_PSA14_FN2, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_PSA14_FN1, PTB0_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_PSA14_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IDED15_MARK, PSA11_PSA10_FN1, PTC7_FN),
+ PINMUX_DATA(SDHI1CD_MARK, PSA11_PSA10_FN2, PTC7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA11_PSA10_FN1, PTC6_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSA11_PSA10_FN2, PTC6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA11_PSA10_FN1, PTC5_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSA11_PSA10_FN2, PTC5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA11_PSA10_FN1, PTC4_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSA11_PSA10_FN2, PTC4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA11_PSA10_FN1, PTC3_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA11_PSA10_FN2, PTC3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA11_PSA10_FN1, PTC2_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA11_PSA10_FN2, PTC2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA11_PSA10_FN1, PTC1_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA11_PSA10_FN2, PTC1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA11_PSA10_FN1, PTC0_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA11_PSA10_FN2, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(IDED7_MARK, PSA11_PSA10_FN1, PTD7_FN),
+ PINMUX_DATA(SDHI0CD_PTD_MARK, PSA11_PSA10_FN2, PTD7_FN),
+ PINMUX_DATA(IDED6_MARK, PSA11_PSA10_FN1, PTD6_FN),
+ PINMUX_DATA(SDHI0WP_PTD_MARK, PSA11_PSA10_FN2, PTD6_FN),
+ PINMUX_DATA(IDED5_MARK, PSA11_PSA10_FN1, PTD5_FN),
+ PINMUX_DATA(SDHI0D3_PTD_MARK, PSA11_PSA10_FN2, PTD5_FN),
+ PINMUX_DATA(IDED4_MARK, PSA11_PSA10_FN1, PTD4_FN),
+ PINMUX_DATA(SDHI0D2_PTD_MARK, PSA11_PSA10_FN2, PTD4_FN),
+ PINMUX_DATA(IDED3_MARK, PSA11_PSA10_FN1, PTD3_FN),
+ PINMUX_DATA(SDHI0D1_PTD_MARK, PSA11_PSA10_FN2, PTD3_FN),
+ PINMUX_DATA(IDED2_MARK, PSA11_PSA10_FN1, PTD2_FN),
+ PINMUX_DATA(SDHI0D0_PTD_MARK, PSA11_PSA10_FN2, PTD2_FN),
+ PINMUX_DATA(IDED1_MARK, PSA11_PSA10_FN1, PTD1_FN),
+ PINMUX_DATA(SDHI0CMD_PTD_MARK, PSA11_PSA10_FN2, PTD1_FN),
+ PINMUX_DATA(IDED0_MARK, PSA11_PSA10_FN1, PTD0_FN),
+ PINMUX_DATA(SDHI0CLK_PTD_MARK, PSA11_PSA10_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(DIRECTION_MARK, PSA11_PSA10_FN1, PTE5_FN),
+ PINMUX_DATA(SCIF5_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE5_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSA11_PSA10_FN1, PTE4_FN),
+ PINMUX_DATA(SCIF5_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE4_FN),
+ PINMUX_DATA(IDERST_MARK, PSA11_PSA10_FN1, PTE3_FN),
+ PINMUX_DATA(SCIF5_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE3_FN),
+ PINMUX_DATA(IODACK_MARK, PSA11_PSA10_FN1, PTE2_FN),
+ PINMUX_DATA(SCIF4_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE2_FN),
+ PINMUX_DATA(IODREQ_MARK, PSA11_PSA10_FN1, PTE1_FN),
+ PINMUX_DATA(SCIF4_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE1_FN),
+ PINMUX_DATA(IDEIORDY_MARK, PSA11_PSA10_FN1, PTE0_FN),
+ PINMUX_DATA(SCIF4_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(IDEINT_MARK, PTF7_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA5_PSA4_FN1, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS2_MARK, PSA5_PSA4_FN2, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSYNC_MARK, PSA5_PSA4_FN3, PTF6_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA5_PSA4_FN1, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS1_MARK, PSA5_PSA4_FN2, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSCK_MARK, PSA5_PSA4_FN3, PTF5_FN),
+ PINMUX_DATA(IDECS1_MARK, PSA11_PSA10_FN1, PTF4_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSYNC_MARK, PSA11_PSA10_FN2, PTF4_FN),
+ PINMUX_DATA(IDECS0_MARK, PSA11_PSA10_FN1, PTF3_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSCK_MARK, PSA11_PSA10_FN2, PTF3_FN),
+ PINMUX_DATA(IDEA2_MARK, PSA11_PSA10_FN1, PTF2_FN),
+ PINMUX_DATA(MSIOF0_PTF_RXD_MARK, PSA11_PSA10_FN2, PTF2_FN),
+ PINMUX_DATA(IDEA1_MARK, PSA11_PSA10_FN1, PTF1_FN),
+ PINMUX_DATA(MSIOF0_PTF_TXD_MARK, PSA11_PSA10_FN2, PTF1_FN),
+ PINMUX_DATA(IDEA0_MARK, PSA11_PSA10_FN1, PTF0_FN),
+ PINMUX_DATA(MSIOF0_PTF_MCK_MARK, PSA11_PSA10_FN2, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PSA3_PSA2_FN1, PTG3_FN),
+ PINMUX_DATA(TPUTO3_MARK, PSA3_PSA2_FN2, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PSA3_PSA2_FN1, PTG2_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSA3_PSA2_FN2, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PSA3_PSA2_FN1, PTG1_FN),
+ PINMUX_DATA(TPUTO1_MARK, PSA3_PSA2_FN2, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PSA3_PSA2_FN1, PTG0_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSA3_PSA2_FN2, PTG0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(LCDVCPWC_MARK, PTH7_FN),
+ PINMUX_DATA(LCDRD_MARK, PSB15_PSB14_FN1, PTH6_FN),
+ PINMUX_DATA(DV_CLKI_MARK, PSB15_PSB14_FN2, PTH6_FN),
+ PINMUX_DATA(LCDVSYN_MARK, PSB15_PSB14_FN1, PTH5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSB15_PSB14_FN2, PTH5_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSB13_PSB12_LCDC_RGB, PTH4_FN),
+ PINMUX_DATA(LCDRS_MARK, PSB13_PSB12_LCDC_SYS, PTH4_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSB13_PSB12_LCDC_RGB, PTH3_FN),
+ PINMUX_DATA(LCDCS_MARK, PSB13_PSB12_LCDC_SYS, PTH3_FN),
+ PINMUX_DATA(LCDDON_MARK, PTH2_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSB13_PSB12_LCDC_RGB, PTH1_FN),
+ PINMUX_DATA(LCDWR_MARK, PSB13_PSB12_LCDC_SYS, PTH1_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(STATUS0_MARK, PTJ7_FN),
+ PINMUX_DATA(PDSTATUS_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(SIUAFCK_MARK, PTK7_FN),
+ PINMUX_DATA(SIUAILR_MARK, PSB9_PSB8_FN1, PTK6_FN),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSB9_PSB8_FN2, PTK6_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSB9_PSB8_FN3, PTK6_FN),
+ PINMUX_DATA(SIUAIBT_MARK, PSB9_PSB8_FN1, PTK5_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSB9_PSB8_FN2, PTK5_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSB9_PSB8_FN3, PTK5_FN),
+ PINMUX_DATA(SIUAISLD_MARK, PSB7_PSB6_FN1, PTK4_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK4_FN),
+ PINMUX_DATA(SIUAOLR_MARK, PSB7_PSB6_FN1, PTK3_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSB7_PSB6_FN2, PTK3_FN),
+ PINMUX_DATA(SIUAOBT_MARK, PSB7_PSB6_FN1, PTK2_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSB7_PSB6_FN2, PTK2_FN),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB7_PSB6_FN1, PTK1_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK1_FN),
+ PINMUX_DATA(SIUAMCK_MARK, PSB7_PSB6_FN1, PTK0_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSB7_PSB6_FN2, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(LCDD15_MARK, PSB5_PSB4_FN1, PTL7_FN),
+ PINMUX_DATA(DV_D15_MARK, PSB5_PSB4_FN2, PTL7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSB5_PSB4_FN1, PTL6_FN),
+ PINMUX_DATA(DV_D14_MARK, PSB5_PSB4_FN2, PTL6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSB5_PSB4_FN1, PTL5_FN),
+ PINMUX_DATA(DV_D13_MARK, PSB5_PSB4_FN2, PTL5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSB5_PSB4_FN1, PTL4_FN),
+ PINMUX_DATA(DV_D12_MARK, PSB5_PSB4_FN2, PTL4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSB5_PSB4_FN1, PTL3_FN),
+ PINMUX_DATA(DV_D11_MARK, PSB5_PSB4_FN2, PTL3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSB5_PSB4_FN1, PTL2_FN),
+ PINMUX_DATA(DV_D10_MARK, PSB5_PSB4_FN2, PTL2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSB5_PSB4_FN1, PTL1_FN),
+ PINMUX_DATA(DV_D9_MARK, PSB5_PSB4_FN2, PTL1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSB5_PSB4_FN1, PTL0_FN),
+ PINMUX_DATA(DV_D8_MARK, PSB5_PSB4_FN2, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(LCDD7_MARK, PSB5_PSB4_FN1, PTM7_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB5_PSB4_FN2, PTM7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSB5_PSB4_FN1, PTM6_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB5_PSB4_FN2, PTM6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSB5_PSB4_FN1, PTM5_FN),
+ PINMUX_DATA(DV_D5_MARK, PSB5_PSB4_FN2, PTM5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSB5_PSB4_FN1, PTM4_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB5_PSB4_FN2, PTM4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSB5_PSB4_FN1, PTM3_FN),
+ PINMUX_DATA(DV_D3_MARK, PSB5_PSB4_FN2, PTM3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSB5_PSB4_FN1, PTM2_FN),
+ PINMUX_DATA(DV_D2_MARK, PSB5_PSB4_FN2, PTM2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSB5_PSB4_FN1, PTM1_FN),
+ PINMUX_DATA(DV_D1_MARK, PSB5_PSB4_FN2, PTM1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSB5_PSB4_FN1, PTM0_FN),
+ PINMUX_DATA(DV_D0_MARK, PSB5_PSB4_FN2, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(LCDD23_MARK, PSB3_PSB2_FN1, PTN7_FN),
+ PINMUX_DATA(SCIF5_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN7_FN),
+ PINMUX_DATA(LCDD22_MARK, PSB3_PSB2_FN1, PTN6_FN),
+ PINMUX_DATA(SCIF5_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN6_FN),
+ PINMUX_DATA(LCDD21_MARK, PSB3_PSB2_FN1, PTN5_FN),
+ PINMUX_DATA(SCIF5_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSB3_PSB2_FN1, PTN4_FN),
+ PINMUX_DATA(SCIF4_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSB3_PSB2_FN1, PTN3_FN),
+ PINMUX_DATA(SCIF4_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSB3_PSB2_FN1, PTN2_FN),
+ PINMUX_DATA(SCIF4_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSB5_PSB4_FN1, PTN1_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSB5_PSB4_FN2, PTN1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSB5_PSB4_FN1, PTN0_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_PSB4_FN2, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(AN3_MARK, PTQ3_FN),
+ PINMUX_DATA(AN2_MARK, PTQ2_FN),
+ PINMUX_DATA(AN1_MARK, PTQ1_FN),
+ PINMUX_DATA(AN0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA13_PSA12_FN1, PTR3_FN),
+ PINMUX_DATA(LCDLCLK_PTR_MARK, PSA13_PSA12_FN2, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SCIF1_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS7_FN),
+ PINMUX_DATA(SDHI0CD_PTS_MARK, PSC15_PSC14_FN2, PTS7_FN),
+ PINMUX_DATA(SCIF1_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS6_FN),
+ PINMUX_DATA(SDHI0WP_PTS_MARK, PSC15_PSC14_FN2, PTS6_FN),
+ PINMUX_DATA(SCIF1_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS5_FN),
+ PINMUX_DATA(SDHI0D3_PTS_MARK, PSC15_PSC14_FN2, PTS5_FN),
+ PINMUX_DATA(SCIF3_PTS_CTS_MARK, PSC15_PSC14_FN1, PTS4_FN),
+ PINMUX_DATA(SDHI0D2_PTS_MARK, PSC15_PSC14_FN2, PTS4_FN),
+ PINMUX_DATA(SCIF3_PTS_RTS_MARK, PSC15_PSC14_FN1, PTS3_FN),
+ PINMUX_DATA(SDHI0D1_PTS_MARK, PSC15_PSC14_FN2, PTS3_FN),
+ PINMUX_DATA(SCIF3_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS2_FN),
+ PINMUX_DATA(SDHI0D0_PTS_MARK, PSC15_PSC14_FN2, PTS2_FN),
+ PINMUX_DATA(SCIF3_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS1_FN),
+ PINMUX_DATA(SDHI0CMD_PTS_MARK, PSC15_PSC14_FN2, PTS1_FN),
+ PINMUX_DATA(SCIF3_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS0_FN),
+ PINMUX_DATA(SDHI0CLK_PTS_MARK, PSC15_PSC14_FN2, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_PTT_SCK_MARK, PSC13_PSC12_FN1, PTT5_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSCK_MARK, PSC13_PSC12_FN2, PTT5_FN),
+ PINMUX_DATA(SCIF0_PTT_RXD_MARK, PSC13_PSC12_FN1, PTT4_FN),
+ PINMUX_DATA(MSIOF0_PTT_RXD_MARK, PSC13_PSC12_FN2, PTT4_FN),
+ PINMUX_DATA(SCIF0_PTT_TXD_MARK, PSC13_PSC12_FN1, PTT3_FN),
+ PINMUX_DATA(MSIOF0_PTT_TXD_MARK, PSC13_PSC12_FN2, PTT3_FN),
+ PINMUX_DATA(SCIF2_PTT_SCK_MARK, PSC11_PSC10_FN1, PTT2_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSYNC_MARK, PSC11_PSC10_FN2, PTT2_FN),
+ PINMUX_DATA(SCIF2_PTT_RXD_MARK, PSC11_PSC10_FN1, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS1_MARK, PSC11_PSC10_FN2, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSCK_MARK, PSC11_PSC10_FN3, PTT1_FN),
+ PINMUX_DATA(SCIF2_PTT_TXD_MARK, PSC11_PSC10_FN1, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS2_MARK, PSC11_PSC10_FN2, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSYNC_MARK, PSC11_PSC10_FN3, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(FCDE_MARK, PSC9_PSC8_FN1, PTU5_FN),
+ PINMUX_DATA(SCIF0_PTU_SCK_MARK, PSC9_PSC8_FN2, PTU5_FN),
+ PINMUX_DATA(FSC_MARK, PSC9_PSC8_FN1, PTU4_FN),
+ PINMUX_DATA(SCIF0_PTU_RXD_MARK, PSC9_PSC8_FN2, PTU4_FN),
+ PINMUX_DATA(FWE_MARK, PSC9_PSC8_FN1, PTU3_FN),
+ PINMUX_DATA(SCIF0_PTU_TXD_MARK, PSC9_PSC8_FN2, PTU3_FN),
+ PINMUX_DATA(FOE_MARK, PSC7_PSC6_FN1, PTU2_FN),
+ PINMUX_DATA(SCIF2_PTU_SCK_MARK, PSC7_PSC6_FN2, PTU2_FN),
+ PINMUX_DATA(VIO_VD2_MARK, PSC7_PSC6_FN3, PTU2_FN),
+ PINMUX_DATA(FRB_MARK, PSC7_PSC6_FN1, PTU1_FN),
+ PINMUX_DATA(SCIF2_PTU_RXD_MARK, PSC7_PSC6_FN2, PTU1_FN),
+ PINMUX_DATA(VIO_CLK2_MARK, PSC7_PSC6_FN3, PTU1_FN),
+ PINMUX_DATA(FCE_MARK, PSC7_PSC6_FN1, PTU0_FN),
+ PINMUX_DATA(SCIF2_PTU_TXD_MARK, PSC7_PSC6_FN2, PTU0_FN),
+ PINMUX_DATA(VIO_HD2_MARK, PSC7_PSC6_FN3, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(NAF7_MARK, PSC7_PSC6_FN1, PTV7_FN),
+ PINMUX_DATA(SCIF1_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV7_FN),
+ PINMUX_DATA(VIO_D15_MARK, PSC7_PSC6_FN3, PTV7_FN),
+ PINMUX_DATA(NAF6_MARK, PSC7_PSC6_FN1, PTV6_FN),
+ PINMUX_DATA(SCIF1_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV6_FN),
+ PINMUX_DATA(VIO_D14_MARK, PSC7_PSC6_FN3, PTV6_FN),
+ PINMUX_DATA(NAF5_MARK, PSC7_PSC6_FN1, PTV5_FN),
+ PINMUX_DATA(SCIF1_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV5_FN),
+ PINMUX_DATA(VIO_D13_MARK, PSC7_PSC6_FN3, PTV5_FN),
+ PINMUX_DATA(NAF4_MARK, PSC7_PSC6_FN1, PTV4_FN),
+ PINMUX_DATA(SCIF3_PTV_CTS_MARK, PSC7_PSC6_FN2, PTV4_FN),
+ PINMUX_DATA(VIO_D12_MARK, PSC7_PSC6_FN3, PTV4_FN),
+ PINMUX_DATA(NAF3_MARK, PSC7_PSC6_FN1, PTV3_FN),
+ PINMUX_DATA(SCIF3_PTV_RTS_MARK, PSC7_PSC6_FN2, PTV3_FN),
+ PINMUX_DATA(VIO_D11_MARK, PSC7_PSC6_FN3, PTV3_FN),
+ PINMUX_DATA(NAF2_MARK, PSC7_PSC6_FN1, PTV2_FN),
+ PINMUX_DATA(SCIF3_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV2_FN),
+ PINMUX_DATA(VIO_D10_MARK, PSC7_PSC6_FN3, PTV2_FN),
+ PINMUX_DATA(NAF1_MARK, PSC7_PSC6_FN1, PTV1_FN),
+ PINMUX_DATA(SCIF3_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV1_FN),
+ PINMUX_DATA(VIO_D9_MARK, PSC7_PSC6_FN3, PTV1_FN),
+ PINMUX_DATA(NAF0_MARK, PSC7_PSC6_FN1, PTV0_FN),
+ PINMUX_DATA(SCIF3_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV0_FN),
+ PINMUX_DATA(VIO_D8_MARK, PSC7_PSC6_FN3, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(IRQ7_MARK, PTW7_FN),
+ PINMUX_DATA(IRQ6_MARK, PTW6_FN),
+ PINMUX_DATA(IRQ5_MARK, PTW5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSD15_PSD14_FN1, PTW4_FN),
+ PINMUX_DATA(LCDLCLK_PTW_MARK, PSD15_PSD14_FN2, PTW4_FN),
+ PINMUX_DATA(IRQ3_MARK, PSD13_PSD12_FN1, PTW3_FN),
+ PINMUX_DATA(ADTRG_MARK, PSD13_PSD12_FN2, PTW3_FN),
+ PINMUX_DATA(IRQ2_MARK, PSD11_PSD10_FN1, PTW2_FN),
+ PINMUX_DATA(BS_MARK, PSD11_PSD10_FN2, PTW2_FN),
+ PINMUX_DATA(VIO_CKO_MARK, PSD11_PSD10_FN3, PTW2_FN),
+ PINMUX_DATA(IRQ1_MARK, PSD9_PSD8_FN1, PTW1_FN),
+ PINMUX_DATA(SIUAISPD_MARK, PSD9_PSD8_FN2, PTW1_FN),
+ PINMUX_DATA(IRQ0_MARK, PSD7_PSD6_FN1, PTW0_FN),
+ PINMUX_DATA(SIUAOSPD_MARK, PSD7_PSD6_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DACK1_MARK, PTX7_FN),
+ PINMUX_DATA(DREQ1_MARK, PSD3_PSD2_FN1, PTX6_FN),
+ PINMUX_DATA(MSIOF0_PTX_MCK_MARK, PSD3_PSD2_FN2, PTX6_FN),
+ PINMUX_DATA(DACK1_MARK, PTX5_FN),
+ PINMUX_DATA(IRDA_OUT_MARK, PSD5_PSD4_FN2, PTX5_FN),
+ PINMUX_DATA(DREQ1_MARK, PTX4_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSD5_PSD4_FN2, PTX4_FN),
+ PINMUX_DATA(TS0_SDAT_MARK, PTX3_FN),
+ PINMUX_DATA(TS0_SCK_MARK, PTX2_FN),
+ PINMUX_DATA(TS0_SDEN_MARK, PTX1_FN),
+ PINMUX_DATA(TS0_SPSYNC_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(VIO_D7_MARK, PTY7_FN),
+ PINMUX_DATA(VIO_D6_MARK, PTY6_FN),
+ PINMUX_DATA(VIO_D5_MARK, PTY5_FN),
+ PINMUX_DATA(VIO_D4_MARK, PTY4_FN),
+ PINMUX_DATA(VIO_D3_MARK, PTY3_FN),
+ PINMUX_DATA(VIO_D2_MARK, PTY2_FN),
+ PINMUX_DATA(VIO_D1_MARK, PTY1_FN),
+ PINMUX_DATA(VIO_D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(SIUBOBT_MARK, PTZ7_FN),
+ PINMUX_DATA(SIUBOLR_MARK, PTZ6_FN),
+ PINMUX_DATA(SIUBOSLD_MARK, PTZ5_FN),
+ PINMUX_DATA(SIUBMCK_MARK, PTZ4_FN),
+ PINMUX_DATA(VIO_FLD_MARK, PSD1_PSD0_FN1, PTZ3_FN),
+ PINMUX_DATA(SIUBFCK_MARK, PSD1_PSD0_FN2, PTZ3_FN),
+ PINMUX_DATA(VIO_HD1_MARK, PSD1_PSD0_FN1, PTZ2_FN),
+ PINMUX_DATA(SIUBILR_MARK, PSD1_PSD0_FN2, PTZ2_FN),
+ PINMUX_DATA(VIO_VD1_MARK, PSD1_PSD0_FN1, PTZ1_FN),
+ PINMUX_DATA(SIUBIBT_MARK, PSD1_PSD0_FN2, PTZ1_FN),
+ PINMUX_DATA(VIO_CLK1_MARK, PSD1_PSD0_FN1, PTZ0_FN),
+ PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* SDHI0 (PTD) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
+
+ /* SDHI0 (PTS) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+
+ /* SIUA */
+ PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSPD, SIUAOSPD_MARK),
+
+ /* SIUB */
+ PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+
+ /* MSIOF0 (PTF) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
+
+ /* MSIOF0 (PTT+PTX) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+ PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+ PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+ PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+ PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+ PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+ PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+ PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+ PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+ PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+ PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+ PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+ PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+ PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+ PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+ PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+ PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+ PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+ PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTE5_FN, PTE5_OUT, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+ PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+ PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+ PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, 0, PTF7_IN,
+ PTF6_FN, PTF6_OUT, 0, PTF6_IN,
+ PTF5_FN, PTF5_OUT, 0, PTF5_IN,
+ PTF4_FN, PTF4_OUT, 0, PTF4_IN,
+ PTF3_FN, PTF3_OUT, 0, PTF3_IN,
+ PTF2_FN, PTF2_OUT, 0, PTF2_IN,
+ PTF1_FN, PTF1_OUT, 0, PTF1_IN,
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, 0, PTH7_IN,
+ PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+ PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+ PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+ PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+ PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+ PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, 0, PTK7_IN,
+ PTK6_FN, PTK6_OUT, 0, PTK6_IN,
+ PTK5_FN, PTK5_OUT, 0, PTK5_IN,
+ PTK4_FN, PTK4_OUT, 0, PTK4_IN,
+ PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+ PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+ PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+ PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+ PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+ PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+ PTL3_FN, PTL3_OUT, 0, PTL3_IN,
+ PTL2_FN, PTL2_OUT, 0, PTL2_IN,
+ PTL1_FN, PTL1_OUT, 0, PTL1_IN,
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+ PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+ PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+ PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+ PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+ PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+ PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, 0, PTN7_IN,
+ PTN6_FN, PTN6_OUT, 0, PTN6_IN,
+ PTN5_FN, PTN5_OUT, 0, PTN5_IN,
+ PTN4_FN, PTN4_OUT, 0, PTN4_IN,
+ PTN3_FN, PTN3_OUT, 0, PTN3_IN,
+ PTN2_FN, PTN2_OUT, 0, PTN2_IN,
+ PTN1_FN, PTN1_OUT, 0, PTN1_IN,
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTQ3_FN, 0, 0, PTQ3_IN,
+ PTQ2_FN, 0, 0, PTQ2_IN,
+ PTQ1_FN, 0, 0, PTQ1_IN,
+ PTQ0_FN, 0, 0, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+ PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+ PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+ PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+ PTR3_FN, 0, 0, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ PTS7_FN, PTS7_OUT, 0, PTS7_IN,
+ PTS6_FN, PTS6_OUT, 0, PTS6_IN,
+ PTS5_FN, PTS5_OUT, 0, PTS5_IN,
+ PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+ PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+ PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+ PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT5_FN, PTT5_OUT, 0, PTT5_IN,
+ PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+ PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+ PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+ PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU5_FN, PTU5_OUT, 0, PTU5_IN,
+ PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+ PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+ PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+ PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, 0, PTV7_IN,
+ PTV6_FN, PTV6_OUT, 0, PTV6_IN,
+ PTV5_FN, PTV5_OUT, 0, PTV5_IN,
+ PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+ PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+ PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+ PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, 0, PTW7_IN,
+ PTW6_FN, PTW6_OUT, 0, PTW6_IN,
+ PTW5_FN, PTW5_OUT, 0, PTW5_IN,
+ PTW4_FN, PTW4_OUT, 0, PTW4_IN,
+ PTW3_FN, PTW3_OUT, 0, PTW3_IN,
+ PTW2_FN, PTW2_OUT, 0, PTW2_IN,
+ PTW1_FN, PTW1_OUT, 0, PTW1_IN,
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, 0, PTX7_IN,
+ PTX6_FN, PTX6_OUT, 0, PTX6_IN,
+ PTX5_FN, PTX5_OUT, 0, PTX5_IN,
+ PTX4_FN, PTX4_OUT, 0, PTX4_IN,
+ PTX3_FN, PTX3_OUT, 0, PTX3_IN,
+ PTX2_FN, PTX2_OUT, 0, PTX2_IN,
+ PTX1_FN, PTX1_OUT, 0, PTX1_IN,
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, 0, PTY7_IN,
+ PTY6_FN, PTY6_OUT, 0, PTY6_IN,
+ PTY5_FN, PTY5_OUT, 0, PTY5_IN,
+ PTY4_FN, PTY4_OUT, 0, PTY4_IN,
+ PTY3_FN, PTY3_OUT, 0, PTY3_IN,
+ PTY2_FN, PTY2_OUT, 0, PTY2_IN,
+ PTY1_FN, PTY1_OUT, 0, PTY1_IN,
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
+ 0, 0, 0, 0,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2, 0, 0,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ 0, 0, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, 0, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, 0, 0, 0,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7723_pinmux_info = {
+ .name = "sh7723_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_IDEA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
new file mode 100644
index 000000000000..233fbf750b39
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -0,0 +1,2225 @@
+/*
+ * SH7724 Pinmux
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7724.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ7_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU,
+ PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU,
+ PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
+ PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTQ7_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ6_FN, PTJ5_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ7_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1,
+
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1,
+
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1,
+
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1,
+
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /*PTA*/
+ D23_MARK, KEYOUT2_MARK, IDED15_MARK,
+ D22_MARK, KEYOUT1_MARK, IDED14_MARK,
+ D21_MARK, KEYOUT0_MARK, IDED13_MARK,
+ D20_MARK, KEYIN4_MARK, IDED12_MARK,
+ D19_MARK, KEYIN3_MARK, IDED11_MARK,
+ D18_MARK, KEYIN2_MARK, IDED10_MARK,
+ D17_MARK, KEYIN1_MARK, IDED9_MARK,
+ D16_MARK, KEYIN0_MARK, IDED8_MARK,
+
+ /*PTB*/
+ D31_MARK, TPUTO1_MARK, IDEA1_MARK,
+ D30_MARK, TPUTO0_MARK, IDEA0_MARK,
+ D29_MARK, IODREQ_MARK,
+ D28_MARK, IDECS0_MARK,
+ D27_MARK, IDECS1_MARK,
+ D26_MARK, KEYOUT5_IN5_MARK, IDEIORD_MARK,
+ D25_MARK, KEYOUT4_IN6_MARK, IDEIOWR_MARK,
+ D24_MARK, KEYOUT3_MARK, IDEINT_MARK,
+
+ /*PTC*/
+ LCDD7_MARK,
+ LCDD6_MARK,
+ LCDD5_MARK,
+ LCDD4_MARK,
+ LCDD3_MARK,
+ LCDD2_MARK,
+ LCDD1_MARK,
+ LCDD0_MARK,
+
+ /*PTD*/
+ LCDD15_MARK,
+ LCDD14_MARK,
+ LCDD13_MARK,
+ LCDD12_MARK,
+ LCDD11_MARK,
+ LCDD10_MARK,
+ LCDD9_MARK,
+ LCDD8_MARK,
+
+ /*PTE*/
+ FSIMCKB_MARK,
+ FSIMCKA_MARK,
+ LCDD21_MARK, SCIF2_L_TXD_MARK,
+ LCDD20_MARK, SCIF4_SCK_MARK,
+ LCDD19_MARK, SCIF4_RXD_MARK,
+ LCDD18_MARK, SCIF4_TXD_MARK,
+ LCDD17_MARK,
+ LCDD16_MARK,
+
+ /*PTF*/
+ LCDVSYN_MARK,
+ LCDDISP_MARK, LCDRS_MARK,
+ LCDHSYN_MARK, LCDCS_MARK,
+ LCDDON_MARK,
+ LCDDCK_MARK, LCDWR_MARK,
+ LCDVEPWC_MARK, SCIF0_TXD_MARK,
+ LCDD23_MARK, SCIF2_L_SCK_MARK,
+ LCDD22_MARK, SCIF2_L_RXD_MARK,
+
+ /*PTG*/
+ AUDCK_MARK,
+ AUDSYNC_MARK,
+ AUDATA3_MARK,
+ AUDATA2_MARK,
+ AUDATA1_MARK,
+ AUDATA0_MARK,
+
+ /*PTH*/
+ VIO0_VD_MARK,
+ VIO0_CLK_MARK,
+ VIO0_D7_MARK,
+ VIO0_D6_MARK,
+ VIO0_D5_MARK,
+ VIO0_D4_MARK,
+ VIO0_D3_MARK,
+ VIO0_D2_MARK,
+
+ /*PTJ*/
+ PDSTATUS_MARK,
+ STATUS2_MARK,
+ STATUS0_MARK,
+ A25_MARK, BS_MARK,
+ A24_MARK,
+ A23_MARK,
+ A22_MARK,
+
+ /*PTK*/
+ VIO1_D5_MARK, VIO0_D13_MARK, IDED5_MARK,
+ VIO1_D4_MARK, VIO0_D12_MARK, IDED4_MARK,
+ VIO1_D3_MARK, VIO0_D11_MARK, IDED3_MARK,
+ VIO1_D2_MARK, VIO0_D10_MARK, IDED2_MARK,
+ VIO1_D1_MARK, VIO0_D9_MARK, IDED1_MARK,
+ VIO1_D0_MARK, VIO0_D8_MARK, IDED0_MARK,
+ VIO0_FLD_MARK,
+ VIO0_HD_MARK,
+
+ /*PTL*/
+ DV_D5_MARK, SCIF3_V_SCK_MARK, RMII_RXD0_MARK,
+ DV_D4_MARK, SCIF3_V_RXD_MARK, RMII_RXD1_MARK,
+ DV_D3_MARK, SCIF3_V_TXD_MARK, RMII_REF_CLK_MARK,
+ DV_D2_MARK, SCIF1_SCK_MARK, RMII_TX_EN_MARK,
+ DV_D1_MARK, SCIF1_RXD_MARK, RMII_TXD0_MARK,
+ DV_D0_MARK, SCIF1_TXD_MARK, RMII_TXD1_MARK,
+ DV_D15_MARK,
+ DV_D14_MARK, MSIOF0_MCK_MARK,
+
+ /*PTM*/
+ DV_D13_MARK, MSIOF0_TSCK_MARK,
+ DV_D12_MARK, MSIOF0_RXD_MARK,
+ DV_D11_MARK, MSIOF0_TXD_MARK,
+ DV_D10_MARK, MSIOF0_TSYNC_MARK,
+ DV_D9_MARK, MSIOF0_SS1_MARK, MSIOF0_RSCK_MARK,
+ DV_D8_MARK, MSIOF0_SS2_MARK, MSIOF0_RSYNC_MARK,
+ LCDVCPWC_MARK, SCIF0_RXD_MARK,
+ LCDRD_MARK, SCIF0_SCK_MARK,
+
+ /*PTN*/
+ VIO0_D1_MARK,
+ VIO0_D0_MARK,
+ DV_CLKI_MARK,
+ DV_CLK_MARK, SCIF2_V_SCK_MARK,
+ DV_VSYNC_MARK, SCIF2_V_RXD_MARK,
+ DV_HSYNC_MARK, SCIF2_V_TXD_MARK,
+ DV_D7_MARK, SCIF3_V_CTS_MARK, RMII_RX_ER_MARK,
+ DV_D6_MARK, SCIF3_V_RTS_MARK, RMII_CRS_DV_MARK,
+
+ /*PTQ*/
+ D7_MARK,
+ D6_MARK,
+ D5_MARK,
+ D4_MARK,
+ D3_MARK,
+ D2_MARK,
+ D1_MARK,
+ D0_MARK,
+
+ /*PTR*/
+ CS6B_CE1B_MARK,
+ CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK,
+ CS5A_CE2A_MARK,
+ IOIS16_MARK, LCDLCLK_MARK,
+ WAIT_MARK,
+ WE3_ICIOWR_MARK, TPUTO3_MARK, TPUTI3_MARK,
+ WE2_ICIORD_MARK, TPUTO2_MARK, IDEA2_MARK,
+
+ /*PTS*/
+ VIO_CKO_MARK,
+ VIO1_FLD_MARK, TPUTI2_MARK, IDEIORDY_MARK,
+ VIO1_HD_MARK, SCIF5_SCK_MARK,
+ VIO1_VD_MARK, SCIF5_RXD_MARK,
+ VIO1_CLK_MARK, SCIF5_TXD_MARK,
+ VIO1_D7_MARK, VIO0_D15_MARK, IDED7_MARK,
+ VIO1_D6_MARK, VIO0_D14_MARK, IDED6_MARK,
+
+ /*PTT*/
+ D15_MARK,
+ D14_MARK,
+ D13_MARK,
+ D12_MARK,
+ D11_MARK,
+ D10_MARK,
+ D9_MARK,
+ D8_MARK,
+
+ /*PTU*/
+ DMAC_DACK0_MARK,
+ DMAC_DREQ0_MARK,
+ FSIOASD_MARK,
+ FSIIABCK_MARK,
+ FSIIALRCK_MARK,
+ FSIOABCK_MARK,
+ FSIOALRCK_MARK,
+ CLKAUDIOAO_MARK,
+
+ /*PTV*/
+ FSIIBSD_MARK, MSIOF1_SS2_MARK, MSIOF1_RSYNC_MARK,
+ FSIOBSD_MARK, MSIOF1_SS1_MARK, MSIOF1_RSCK_MARK,
+ FSIIBBCK_MARK, MSIOF1_RXD_MARK,
+ FSIIBLRCK_MARK, MSIOF1_TSYNC_MARK,
+ FSIOBBCK_MARK, MSIOF1_TSCK_MARK,
+ FSIOBLRCK_MARK, MSIOF1_TXD_MARK,
+ CLKAUDIOBO_MARK, MSIOF1_MCK_MARK,
+ FSIIASD_MARK,
+
+ /*PTW*/
+ MMC_D7_MARK, SDHI1CD_MARK, IODACK_MARK,
+ MMC_D6_MARK, SDHI1WP_MARK, IDERST_MARK,
+ MMC_D5_MARK, SDHI1D3_MARK, EXBUF_ENB_MARK,
+ MMC_D4_MARK, SDHI1D2_MARK, DIRECTION_MARK,
+ MMC_D3_MARK, SDHI1D1_MARK,
+ MMC_D2_MARK, SDHI1D0_MARK,
+ MMC_D1_MARK, SDHI1CMD_MARK,
+ MMC_D0_MARK, SDHI1CLK_MARK,
+
+ /*PTX*/
+ DMAC_DACK1_MARK, IRDA_OUT_MARK,
+ DMAC_DREQ1_MARK, IRDA_IN_MARK,
+ TSIF_TS0_SDAT_MARK, LNKSTA_MARK,
+ TSIF_TS0_SCK_MARK, MDIO_MARK,
+ TSIF_TS0_SDEN_MARK, MDC_MARK,
+ TSIF_TS0_SPSYNC_MARK,
+ MMC_CLK_MARK,
+ MMC_CMD_MARK,
+
+ /*PTY*/
+ SDHI0CD_MARK,
+ SDHI0WP_MARK,
+ SDHI0D3_MARK,
+ SDHI0D2_MARK,
+ SDHI0D1_MARK,
+ SDHI0D0_MARK,
+ SDHI0CMD_MARK,
+ SDHI0CLK_MARK,
+
+ /*PTZ*/
+ INTC_IRQ7_MARK, SCIF3_I_CTS_MARK,
+ INTC_IRQ6_MARK, SCIF3_I_RTS_MARK,
+ INTC_IRQ5_MARK, SCIF3_I_SCK_MARK,
+ INTC_IRQ4_MARK, SCIF3_I_RXD_MARK,
+ INTC_IRQ3_MARK, SCIF3_I_TXD_MARK,
+ INTC_IRQ2_MARK,
+ INTC_IRQ1_MARK,
+ INTC_IRQ0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU),
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_0, PSA14_0, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_0, PSA14_0, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_0, PSA14_0, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_0, PSA14_0, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_0, PSA14_0, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_0, PSA14_0, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_0, PSA14_0, PTA0_FN),
+
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_0, PSA14_1, PTA7_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_0, PSA14_1, PTA6_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_0, PSA14_1, PTA5_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_0, PSA14_1, PTA4_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_0, PSA14_1, PTA3_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_0, PSA14_1, PTA2_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_0, PSA14_1, PTA1_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_0, PSA14_1, PTA0_FN),
+
+ PINMUX_DATA(IDED15_MARK, PSA15_1, PSA14_0, PTA7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA15_1, PSA14_0, PTA6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA15_1, PSA14_0, PTA5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA15_1, PSA14_0, PTA4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA15_1, PSA14_0, PTA3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA15_1, PSA14_0, PTA2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA15_1, PSA14_0, PTA1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA15_1, PSA14_0, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PSE15_0, PSE14_0, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PSE15_0, PSE14_0, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PSE11_0, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PSE11_0, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PSE11_0, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_0, PSA14_0, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_0, PSA14_0, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_0, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(IDEA1_MARK, PSE15_1, PSE14_0, PTB7_FN),
+ PINMUX_DATA(IDEA0_MARK, PSE15_1, PSE14_0, PTB6_FN),
+ PINMUX_DATA(IODREQ_MARK, PSE11_1, PTB5_FN),
+ PINMUX_DATA(IDECS0_MARK, PSE11_1, PTB4_FN),
+ PINMUX_DATA(IDECS1_MARK, PSE11_1, PTB3_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA15_1, PSA14_0, PTB2_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA15_1, PSA14_0, PTB1_FN),
+ PINMUX_DATA(IDEINT_MARK, PSA15_1, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(TPUTO1_MARK, PSE15_0, PSE14_1, PTB7_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSE15_0, PSE14_1, PTB6_FN),
+
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_0, PSA14_1, PTB2_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_0, PSA14_1, PTB1_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_0, PSA14_1, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCDD7_MARK, PSD5_0, PTC7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSD5_0, PTC6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSD5_0, PTC5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSD5_0, PTC4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSD5_0, PTC3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSD5_0, PTC2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSD5_0, PTC1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSD5_0, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCDD15_MARK, PSD5_0, PTD7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSD5_0, PTD6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSD5_0, PTD5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSD5_0, PTD4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSD5_0, PTD3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSD5_0, PTD2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSD5_0, PTD1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSD5_0, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(FSIMCKB_MARK, PTE7_FN),
+ PINMUX_DATA(FSIMCKA_MARK, PTE6_FN),
+
+ PINMUX_DATA(LCDD21_MARK, PSC5_0, PSC4_0, PTE5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSD3_0, PSD2_0, PTE4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSA3_0, PSA2_0, PTE3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSA3_0, PSA2_0, PTE2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSD5_0, PTE1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSD5_0, PTE0_FN),
+
+ PINMUX_DATA(SCIF2_L_TXD_MARK, PSC5_0, PSC4_1, PTE5_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, PSD3_0, PSD2_1, PTE4_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, PSA3_0, PSA2_1, PTE3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, PSA3_0, PSA2_1, PTE2_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(LCDVSYN_MARK, PSD8_0, PTF7_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSD10_0, PSD9_0, PTF6_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSD10_0, PSD9_0, PTF5_FN),
+ PINMUX_DATA(LCDDON_MARK, PSD8_0, PTF4_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSD10_0, PSD9_0, PTF3_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PSA6_0, PTF2_FN),
+ PINMUX_DATA(LCDD23_MARK, PSC7_0, PSC6_0, PTF1_FN),
+ PINMUX_DATA(LCDD22_MARK, PSC5_0, PSC4_0, PTF0_FN),
+
+ PINMUX_DATA(LCDRS_MARK, PSD10_0, PSD9_1, PTF6_FN),
+ PINMUX_DATA(LCDCS_MARK, PSD10_0, PSD9_1, PTF5_FN),
+ PINMUX_DATA(LCDWR_MARK, PSD10_0, PSD9_1, PTF3_FN),
+
+ PINMUX_DATA(SCIF0_TXD_MARK, PSA6_1, PTF2_FN),
+ PINMUX_DATA(SCIF2_L_SCK_MARK, PSC7_0, PSC6_1, PTF1_FN),
+ PINMUX_DATA(SCIF2_L_RXD_MARK, PSC5_0, PSC4_1, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(VIO0_VD_MARK, PTH7_FN),
+ PINMUX_DATA(VIO0_CLK_MARK, PTH6_FN),
+ PINMUX_DATA(VIO0_D7_MARK, PTH5_FN),
+ PINMUX_DATA(VIO0_D6_MARK, PTH4_FN),
+ PINMUX_DATA(VIO0_D5_MARK, PTH3_FN),
+ PINMUX_DATA(VIO0_D4_MARK, PTH2_FN),
+ PINMUX_DATA(VIO0_D3_MARK, PTH1_FN),
+ PINMUX_DATA(VIO0_D2_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(PDSTATUS_MARK, PTJ7_FN),
+ PINMUX_DATA(STATUS2_MARK, PTJ6_FN),
+ PINMUX_DATA(STATUS0_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PSA8_0, PTJ3_FN),
+ PINMUX_DATA(BS_MARK, PSA8_1, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(VIO1_D5_MARK, PSB7_0, PSB6_0, PTK7_FN),
+ PINMUX_DATA(VIO1_D4_MARK, PSB7_0, PSB6_0, PTK6_FN),
+ PINMUX_DATA(VIO1_D3_MARK, PSB7_0, PSB6_0, PTK5_FN),
+ PINMUX_DATA(VIO1_D2_MARK, PSB7_0, PSB6_0, PTK4_FN),
+ PINMUX_DATA(VIO1_D1_MARK, PSB7_0, PSB6_0, PTK3_FN),
+ PINMUX_DATA(VIO1_D0_MARK, PSB7_0, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_D13_MARK, PSB7_0, PSB6_1, PTK7_FN),
+ PINMUX_DATA(VIO0_D12_MARK, PSB7_0, PSB6_1, PTK6_FN),
+ PINMUX_DATA(VIO0_D11_MARK, PSB7_0, PSB6_1, PTK5_FN),
+ PINMUX_DATA(VIO0_D10_MARK, PSB7_0, PSB6_1, PTK4_FN),
+ PINMUX_DATA(VIO0_D9_MARK, PSB7_0, PSB6_1, PTK3_FN),
+ PINMUX_DATA(VIO0_D8_MARK, PSB7_0, PSB6_1, PTK2_FN),
+
+ PINMUX_DATA(IDED5_MARK, PSB7_1, PSB6_0, PTK7_FN),
+ PINMUX_DATA(IDED4_MARK, PSB7_1, PSB6_0, PTK6_FN),
+ PINMUX_DATA(IDED3_MARK, PSB7_1, PSB6_0, PTK5_FN),
+ PINMUX_DATA(IDED2_MARK, PSB7_1, PSB6_0, PTK4_FN),
+ PINMUX_DATA(IDED1_MARK, PSB7_1, PSB6_0, PTK3_FN),
+ PINMUX_DATA(IDED0_MARK, PSB7_1, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_FLD_MARK, PTK1_FN),
+ PINMUX_DATA(VIO0_HD_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(DV_D5_MARK, PSB9_0, PSB8_0, PTL7_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB9_0, PSB8_0, PTL6_FN),
+ PINMUX_DATA(DV_D3_MARK, PSE7_0, PSE6_0, PTL5_FN),
+ PINMUX_DATA(DV_D2_MARK, PSC9_0, PSC8_0, PTL4_FN),
+ PINMUX_DATA(DV_D1_MARK, PSC9_0, PSC8_0, PTL3_FN),
+ PINMUX_DATA(DV_D0_MARK, PSC9_0, PSC8_0, PTL2_FN),
+ PINMUX_DATA(DV_D15_MARK, PSD4_0, PTL1_FN),
+ PINMUX_DATA(DV_D14_MARK, PSE5_0, PSE4_0, PTL0_FN),
+
+ PINMUX_DATA(SCIF3_V_SCK_MARK, PSB9_0, PSB8_1, PTL7_FN),
+ PINMUX_DATA(SCIF3_V_RXD_MARK, PSB9_0, PSB8_1, PTL6_FN),
+ PINMUX_DATA(SCIF3_V_TXD_MARK, PSE7_0, PSE6_1, PTL5_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSC9_0, PSC8_1, PTL4_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSC9_0, PSC8_1, PTL3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSC9_0, PSC8_1, PTL2_FN),
+
+ PINMUX_DATA(RMII_RXD0_MARK, PSB9_1, PSB8_0, PTL7_FN),
+ PINMUX_DATA(RMII_RXD1_MARK, PSB9_1, PSB8_0, PTL6_FN),
+ PINMUX_DATA(RMII_REF_CLK_MARK, PSE7_1, PSE6_0, PTL5_FN),
+ PINMUX_DATA(RMII_TX_EN_MARK, PSC9_1, PSC8_0, PTL4_FN),
+ PINMUX_DATA(RMII_TXD0_MARK, PSC9_1, PSC8_0, PTL3_FN),
+ PINMUX_DATA(RMII_TXD1_MARK, PSC9_1, PSC8_0, PTL2_FN),
+
+ PINMUX_DATA(MSIOF0_MCK_MARK, PSE5_0, PSE4_1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DV_D13_MARK, PSC13_0, PSC12_0, PTM7_FN),
+ PINMUX_DATA(DV_D12_MARK, PSC13_0, PSC12_0, PTM6_FN),
+ PINMUX_DATA(DV_D11_MARK, PSC13_0, PSC12_0, PTM5_FN),
+ PINMUX_DATA(DV_D10_MARK, PSC13_0, PSC12_0, PTM4_FN),
+ PINMUX_DATA(DV_D9_MARK, PSC11_0, PSC10_0, PTM3_FN),
+ PINMUX_DATA(DV_D8_MARK, PSC11_0, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PSC13_0, PSC12_1, PTM7_FN),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PSC13_0, PSC12_1, PTM6_FN),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PSC13_0, PSC12_1, PTM5_FN),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PSC13_0, PSC12_1, PTM4_FN),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PSC11_0, PSC10_1, PTM3_FN),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PSC11_1, PSC10_0, PTM3_FN),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PSC11_0, PSC10_1, PTM2_FN),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PSC11_1, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(LCDVCPWC_MARK, PSA6_0, PTM1_FN),
+ PINMUX_DATA(LCDRD_MARK, PSA7_0, PTM0_FN),
+
+ PINMUX_DATA(SCIF0_RXD_MARK, PSA6_1, PTM1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSA7_1, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VIO0_D1_MARK, PTN7_FN),
+ PINMUX_DATA(VIO0_D0_MARK, PTN6_FN),
+
+ PINMUX_DATA(DV_CLKI_MARK, PSD11_0, PTN5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSD13_0, PSD12_0, PTN4_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD15_0, PSD14_0, PTN3_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_0, PSB4_0, PTN2_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB3_0, PSB2_0, PTN1_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB1_0, PSB0_0, PTN0_FN),
+
+ PINMUX_DATA(SCIF2_V_SCK_MARK, PSD13_0, PSD12_1, PTN4_FN),
+ PINMUX_DATA(SCIF2_V_RXD_MARK, PSD15_0, PSD14_1, PTN3_FN),
+ PINMUX_DATA(SCIF2_V_TXD_MARK, PSB5_0, PSB4_1, PTN2_FN),
+ PINMUX_DATA(SCIF3_V_CTS_MARK, PSB3_0, PSB2_1, PTN1_FN),
+ PINMUX_DATA(SCIF3_V_RTS_MARK, PSB1_0, PSB0_1, PTN0_FN),
+
+ PINMUX_DATA(RMII_RX_ER_MARK, PSB3_1, PSB2_0, PTN1_FN),
+ PINMUX_DATA(RMII_CRS_DV_MARK, PSB1_1, PSB0_0, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(D7_MARK, PTQ7_FN),
+ PINMUX_DATA(D6_MARK, PTQ6_FN),
+ PINMUX_DATA(D5_MARK, PTQ5_FN),
+ PINMUX_DATA(D4_MARK, PTQ4_FN),
+ PINMUX_DATA(D3_MARK, PTQ3_FN),
+ PINMUX_DATA(D2_MARK, PTQ2_FN),
+ PINMUX_DATA(D1_MARK, PTQ1_FN),
+ PINMUX_DATA(D0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA5_0, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PSA1_0, PSA0_0, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PSD1_0, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(LCDLCLK_MARK, PSA5_1, PTR3_FN),
+
+ PINMUX_DATA(IDEA2_MARK, PSD1_1, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(TPUTO3_MARK, PSA1_0, PSA0_1, PTR1_FN),
+ PINMUX_DATA(TPUTI3_MARK, PSA1_1, PSA0_0, PTR1_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSD1_0, PSD0_1, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(VIO_CKO_MARK, PTS6_FN),
+
+ PINMUX_DATA(TPUTI2_MARK, PSE9_0, PSE8_1, PTS5_FN),
+
+ PINMUX_DATA(IDEIORDY_MARK, PSE9_1, PSE8_0, PTS5_FN),
+
+ PINMUX_DATA(VIO1_FLD_MARK, PSE9_0, PSE8_0, PTS5_FN),
+ PINMUX_DATA(VIO1_HD_MARK, PSA10_0, PTS4_FN),
+ PINMUX_DATA(VIO1_VD_MARK, PSA9_0, PTS3_FN),
+ PINMUX_DATA(VIO1_CLK_MARK, PSA9_0, PTS2_FN),
+ PINMUX_DATA(VIO1_D7_MARK, PSB7_0, PSB6_0, PTS1_FN),
+ PINMUX_DATA(VIO1_D6_MARK, PSB7_0, PSB6_0, PTS0_FN),
+
+ PINMUX_DATA(SCIF5_SCK_MARK, PSA10_1, PTS4_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, PSA9_1, PTS3_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, PSA9_1, PTS2_FN),
+
+ PINMUX_DATA(VIO0_D15_MARK, PSB7_0, PSB6_1, PTS1_FN),
+ PINMUX_DATA(VIO0_D14_MARK, PSB7_0, PSB6_1, PTS0_FN),
+
+ PINMUX_DATA(IDED7_MARK, PSB7_1, PSB6_0, PTS1_FN),
+ PINMUX_DATA(IDED6_MARK, PSB7_1, PSB6_0, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(D15_MARK, PTT7_FN),
+ PINMUX_DATA(D14_MARK, PTT6_FN),
+ PINMUX_DATA(D13_MARK, PTT5_FN),
+ PINMUX_DATA(D12_MARK, PTT4_FN),
+ PINMUX_DATA(D11_MARK, PTT3_FN),
+ PINMUX_DATA(D10_MARK, PTT2_FN),
+ PINMUX_DATA(D9_MARK, PTT1_FN),
+ PINMUX_DATA(D8_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(DMAC_DACK0_MARK, PTU7_FN),
+ PINMUX_DATA(DMAC_DREQ0_MARK, PTU6_FN),
+
+ PINMUX_DATA(FSIOASD_MARK, PSE1_0, PTU5_FN),
+ PINMUX_DATA(FSIIABCK_MARK, PSE1_0, PTU4_FN),
+ PINMUX_DATA(FSIIALRCK_MARK, PSE1_0, PTU3_FN),
+ PINMUX_DATA(FSIOABCK_MARK, PSE1_0, PTU2_FN),
+ PINMUX_DATA(FSIOALRCK_MARK, PSE1_0, PTU1_FN),
+ PINMUX_DATA(CLKAUDIOAO_MARK, PSE0_0, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(FSIIBSD_MARK, PSD7_0, PSD6_0, PTV7_FN),
+ PINMUX_DATA(FSIOBSD_MARK, PSD7_0, PSD6_0, PTV6_FN),
+ PINMUX_DATA(FSIIBBCK_MARK, PSC15_0, PSC14_0, PTV5_FN),
+ PINMUX_DATA(FSIIBLRCK_MARK, PSC15_0, PSC14_0, PTV4_FN),
+ PINMUX_DATA(FSIOBBCK_MARK, PSC15_0, PSC14_0, PTV3_FN),
+ PINMUX_DATA(FSIOBLRCK_MARK, PSC15_0, PSC14_0, PTV2_FN),
+ PINMUX_DATA(CLKAUDIOBO_MARK, PSE3_0, PSE2_0, PTV1_FN),
+ PINMUX_DATA(FSIIASD_MARK, PSE10_0, PTV0_FN),
+
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSD7_0, PSD6_1, PTV7_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSD7_1, PSD6_0, PTV7_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSD7_0, PSD6_1, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSD7_1, PSD6_0, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSC15_0, PSC14_1, PTV5_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSC15_0, PSC14_1, PTV4_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSC15_0, PSC14_1, PTV3_FN),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PSC15_0, PSC14_1, PTV2_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSE3_0, PSE2_1, PTV1_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(MMC_D7_MARK, PSE13_0, PSE12_0, PTW7_FN),
+ PINMUX_DATA(MMC_D6_MARK, PSE13_0, PSE12_0, PTW6_FN),
+ PINMUX_DATA(MMC_D5_MARK, PSE13_0, PSE12_0, PTW5_FN),
+ PINMUX_DATA(MMC_D4_MARK, PSE13_0, PSE12_0, PTW4_FN),
+ PINMUX_DATA(MMC_D3_MARK, PSA13_0, PTW3_FN),
+ PINMUX_DATA(MMC_D2_MARK, PSA13_0, PTW2_FN),
+ PINMUX_DATA(MMC_D1_MARK, PSA13_0, PTW1_FN),
+ PINMUX_DATA(MMC_D0_MARK, PSA13_0, PTW0_FN),
+
+ PINMUX_DATA(SDHI1CD_MARK, PSE13_0, PSE12_1, PTW7_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSE13_0, PSE12_1, PTW6_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSE13_0, PSE12_1, PTW5_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSE13_0, PSE12_1, PTW4_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA13_1, PTW3_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA13_1, PTW2_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA13_1, PTW1_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA13_1, PTW0_FN),
+
+ PINMUX_DATA(IODACK_MARK, PSE13_1, PSE12_0, PTW7_FN),
+ PINMUX_DATA(IDERST_MARK, PSE13_1, PSE12_0, PTW6_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSE13_1, PSE12_0, PTW5_FN),
+ PINMUX_DATA(DIRECTION_MARK, PSE13_1, PSE12_0, PTW4_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DMAC_DACK1_MARK, PSA12_0, PTX7_FN),
+ PINMUX_DATA(DMAC_DREQ1_MARK, PSA12_0, PTX6_FN),
+
+ PINMUX_DATA(IRDA_OUT_MARK, PSA12_1, PTX7_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSA12_1, PTX6_FN),
+
+ PINMUX_DATA(TSIF_TS0_SDAT_MARK, PSC0_0, PTX5_FN),
+ PINMUX_DATA(TSIF_TS0_SCK_MARK, PSC1_0, PTX4_FN),
+ PINMUX_DATA(TSIF_TS0_SDEN_MARK, PSC2_0, PTX3_FN),
+ PINMUX_DATA(TSIF_TS0_SPSYNC_MARK, PTX2_FN),
+
+ PINMUX_DATA(LNKSTA_MARK, PSC0_1, PTX5_FN),
+ PINMUX_DATA(MDIO_MARK, PSC1_1, PTX4_FN),
+ PINMUX_DATA(MDC_MARK, PSC2_1, PTX3_FN),
+
+ PINMUX_DATA(MMC_CLK_MARK, PTX1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(SDHI0CD_MARK, PTY7_FN),
+ PINMUX_DATA(SDHI0WP_MARK, PTY6_FN),
+ PINMUX_DATA(SDHI0D3_MARK, PTY5_FN),
+ PINMUX_DATA(SDHI0D2_MARK, PTY4_FN),
+ PINMUX_DATA(SDHI0D1_MARK, PTY3_FN),
+ PINMUX_DATA(SDHI0D0_MARK, PTY2_FN),
+ PINMUX_DATA(SDHI0CMD_MARK, PTY1_FN),
+ PINMUX_DATA(SDHI0CLK_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(INTC_IRQ7_MARK, PSB10_0, PTZ7_FN),
+ PINMUX_DATA(INTC_IRQ6_MARK, PSB11_0, PTZ6_FN),
+ PINMUX_DATA(INTC_IRQ5_MARK, PSB12_0, PTZ5_FN),
+ PINMUX_DATA(INTC_IRQ4_MARK, PSB13_0, PTZ4_FN),
+ PINMUX_DATA(INTC_IRQ3_MARK, PSB14_0, PTZ3_FN),
+ PINMUX_DATA(INTC_IRQ2_MARK, PTZ2_FN),
+ PINMUX_DATA(INTC_IRQ1_MARK, PTZ1_FN),
+ PINMUX_DATA(INTC_IRQ0_MARK, PTZ0_FN),
+
+ PINMUX_DATA(SCIF3_I_CTS_MARK, PSB10_1, PTZ7_FN),
+ PINMUX_DATA(SCIF3_I_RTS_MARK, PSB11_1, PTZ6_FN),
+ PINMUX_DATA(SCIF3_I_SCK_MARK, PSB12_1, PTZ5_FN),
+ PINMUX_DATA(SCIF3_I_RXD_MARK, PSB13_1, PTZ4_FN),
+ PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA),
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+
+ /* FSI */
+ PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* VIO */
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+
+ /* VIO0 */
+ PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK),
+
+ /* VIO1 */
+ PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK),
+
+ /* Eth */
+ PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK),
+ PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK),
+
+ /* System */
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ /* MSIOF0 */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK),
+
+ /* SDHI0 */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+
+ /* IrDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN,
+ PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN,
+ PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN,
+ PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN,
+ PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN,
+ PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN,
+ PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN,
+ PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN,
+ PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN,
+ PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ PTJ6_FN, PTJ6_OUT, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN,
+ PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN,
+ PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN,
+ PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN,
+ PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN,
+ PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN,
+ PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN,
+ PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN,
+ PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN,
+ PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN,
+ PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN,
+ PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN,
+ PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN,
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, 0, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN,
+ PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN,
+ PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN,
+ PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN,
+ PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN,
+ PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN,
+ PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN,
+ PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN,
+ PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN,
+ PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN,
+ PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN,
+ PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN,
+ PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN,
+ PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN,
+ PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN,
+ PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN,
+ PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN,
+ PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN,
+ PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN,
+ PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN,
+ PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN,
+ PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN,
+ PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN,
+ PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ 0, 0,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ 0, 0,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1}
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ 0, 0,
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1}
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ 0, 0,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1}
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1}
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1}
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7724_pinmux_info = {
+ .name = "sh7724_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_INTC_IRQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
new file mode 100644
index 000000000000..23d76d262c32
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -0,0 +1,2475 @@
+/*
+ * SH7734 processor support - PFC hardware block
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7734.h>
+
+#include "sh_pfc.h"
+
+#define CPU_32_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_1(fn, pfx##31, sfx)
+
+#define CPU_32_PORT5(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx), \
+ PORT_1(fn, pfx##10, sfx), PORT_1(fn, pfx##11, sfx)
+
+/* GPSR0 - GPSR5 */
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ CPU_32_PORT(fn, pfx##_0_, sfx), \
+ CPU_32_PORT(fn, pfx##_1_, sfx), \
+ CPU_32_PORT(fn, pfx##_2_, sfx), \
+ CPU_32_PORT(fn, pfx##_3_, sfx), \
+ CPU_32_PORT(fn, pfx##_4_, sfx), \
+ CPU_32_PORT5(fn, pfx##_5_, sfx)
+
+#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
+#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
+ GP##pfx##_IN, GP##pfx##_OUT)
+
+#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
+#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
+
+#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
+
+#define PORT_10_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
+ PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
+ PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
+ PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
+
+#define CPU_32_PORT_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
+ PORT_10_REV(fn, pfx, sfx)
+
+#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
+#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+
+#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
+#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
+ FN_##ipsr, FN_##fn)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA), /* GP_0_0_DATA -> GP_5_11_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ GP_ALL(IN), /* GP_0_0_IN -> GP_5_11_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ GP_ALL(OUT), /* GP_0_0_OUT -> GP_5_11_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN), /* GP_0_0_FN -> GP_5_11_FN */
+
+ /* GPSR0 */
+ FN_IP1_9_8, FN_IP1_11_10, FN_IP1_13_12, FN_IP1_15_14,
+ FN_IP0_7_6, FN_IP0_9_8, FN_IP0_11_10, FN_IP0_13_12,
+ FN_IP0_15_14, FN_IP0_17_16, FN_IP0_19_18, FN_IP0_21_20,
+ FN_IP0_23_22, FN_IP0_25_24, FN_IP0_27_26, FN_IP0_29_28,
+ FN_IP0_31_30, FN_IP1_1_0, FN_IP1_3_2, FN_IP1_5_4,
+ FN_IP1_7_6, FN_IP11_28, FN_IP0_1_0, FN_IP0_3_2,
+ FN_IP0_5_4, FN_IP1_17_16, FN_IP1_19_18, FN_IP1_22_20,
+ FN_IP1_25_23, FN_IP1_28_26, FN_IP1_31_29, FN_IP2_2_0,
+
+ /* GPSR1 */
+ FN_IP3_20, FN_IP3_29_27, FN_IP11_20_19, FN_IP11_22_21,
+ FN_IP2_16_14, FN_IP2_19_17, FN_IP2_22_20, FN_IP2_24_23,
+ FN_IP2_27_25, FN_IP2_30_28, FN_IP3_1_0, FN_CLKOUT,
+ FN_BS, FN_CS0, FN_IP3_2, FN_EX_CS0,
+ FN_IP3_5_3, FN_IP3_8_6, FN_IP3_11_9, FN_IP3_14_12,
+ FN_IP3_17_15, FN_RD, FN_IP3_19_18, FN_WE0,
+ FN_WE1, FN_IP2_4_3, FN_IP3_23_21, FN_IP3_26_24,
+ FN_IP2_7_5, FN_IP2_10_8, FN_IP2_13_11, FN_IP11_25_23,
+
+ /* GPSR2 */
+ FN_IP11_6_4, FN_IP11_9_7, FN_IP11_11_10, FN_IP4_2_0,
+ FN_IP8_29_28, FN_IP11_27_26, FN_IP8_22_20, FN_IP8_25_23,
+ FN_IP11_12, FN_IP8_27_26, FN_IP4_5_3, FN_IP4_8_6,
+ FN_IP4_11_9, FN_IP4_14_12, FN_IP4_17_15, FN_IP4_19_18,
+ FN_IP4_21_20, FN_IP4_23_22, FN_IP4_25_24, FN_IP4_27_26,
+ FN_IP4_29_28, FN_IP4_31_30, FN_IP5_2_0, FN_IP5_5_3,
+ FN_IP5_8_6, FN_IP5_11_9, FN_IP5_14_12, FN_IP5_17_15,
+ FN_IP5_20_18, FN_IP5_22_21, FN_IP5_24_23, FN_IP5_26_25,
+
+ /* GPSR3 */
+ FN_IP6_2_0, FN_IP6_5_3, FN_IP6_7_6, FN_IP6_9_8,
+ FN_IP6_11_10, FN_IP6_13_12, FN_IP6_15_14, FN_IP6_17_16,
+ FN_IP6_20_18, FN_IP6_23_21, FN_IP7_2_0, FN_IP7_5_3,
+ FN_IP7_8_6, FN_IP7_11_9, FN_IP7_14_12, FN_IP7_17_15,
+ FN_IP7_20_18, FN_IP7_23_21, FN_IP7_26_24, FN_IP7_28_27,
+ FN_IP7_30_29, FN_IP8_1_0, FN_IP8_3_2, FN_IP8_5_4,
+ FN_IP8_7_6, FN_IP8_9_8, FN_IP8_11_10, FN_IP8_13_12,
+ FN_IP8_15_14, FN_IP8_17_16, FN_IP8_19_18, FN_IP9_1_0,
+
+ /* GPSR4 */
+ FN_IP9_19_18, FN_IP9_21_20, FN_IP9_23_22, FN_IP9_25_24,
+ FN_IP9_11_10, FN_IP9_13_12, FN_IP9_15_14, FN_IP9_17_16,
+ FN_IP9_3_2, FN_IP9_5_4, FN_IP9_7_6, FN_IP9_9_8,
+ FN_IP9_27_26, FN_IP9_29_28, FN_IP10_2_0, FN_IP10_5_3,
+ FN_IP10_8_6, FN_IP10_11_9, FN_IP10_14_12, FN_IP10_15,
+ FN_IP10_18_16, FN_IP10_21_19, FN_IP11_0, FN_IP11_1,
+ FN_SCL0, FN_IP11_2, FN_PENC0, FN_IP11_15_13, /* Need check*/
+ FN_USB_OVC0, FN_IP11_18_16,
+ FN_IP10_22, FN_IP10_24_23,
+
+ /* GPSR5 */
+ FN_IP10_25, FN_IP11_3, FN_IRQ2_B, FN_IRQ3_B,
+ FN_IP10_27_26, /* 10 */
+ FN_IP10_29_28, /* 11 */
+
+ /* IPSR0 */
+ FN_A15, FN_ST0_VCO_CLKIN, FN_LCD_DATA15_A, FN_TIOC3D_C,
+ FN_A14, FN_LCD_DATA14_A, FN_TIOC3C_C,
+ FN_A13, FN_LCD_DATA13_A, FN_TIOC3B_C,
+ FN_A12, FN_LCD_DATA12_A, FN_TIOC3A_C,
+ FN_A11, FN_ST0_D7, FN_LCD_DATA11_A, FN_TIOC2B_C,
+ FN_A10, FN_ST0_D6, FN_LCD_DATA10_A, FN_TIOC2A_C,
+ FN_A9, FN_ST0_D5, FN_LCD_DATA9_A, FN_TIOC1B_C,
+ FN_A8, FN_ST0_D4, FN_LCD_DATA8_A, FN_TIOC1A_C,
+ FN_A7, FN_ST0_D3, FN_LCD_DATA7_A, FN_TIOC0D_C,
+ FN_A6, FN_ST0_D2, FN_LCD_DATA6_A, FN_TIOC0C_C,
+ FN_A5, FN_ST0_D1, FN_LCD_DATA5_A, FN_TIOC0B_C,
+ FN_A4, FN_ST0_D0, FN_LCD_DATA4_A, FN_TIOC0A_C,
+ FN_A3, FN_ST0_VLD, FN_LCD_DATA3_A, FN_TCLKD_C,
+ FN_A2, FN_ST0_SYC, FN_LCD_DATA2_A, FN_TCLKC_C,
+ FN_A1, FN_ST0_REQ, FN_LCD_DATA1_A, FN_TCLKB_C,
+ FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C,
+
+ /* IPSR1 */
+ FN_D3, FN_SD0_DAT3_A, FN_MMC_D3_A, FN_ST1_D6, FN_FD3_A,
+ FN_D2, FN_SD0_DAT2_A, FN_MMC_D2_A, FN_ST1_D5, FN_FD2_A,
+ FN_D1, FN_SD0_DAT1_A, FN_MMC_D1_A, FN_ST1_D4, FN_FD1_A,
+ FN_D0, FN_SD0_DAT0_A, FN_MMC_D0_A, FN_ST1_D3, FN_FD0_A,
+ FN_A25, FN_TX2_D, FN_ST1_D2,
+ FN_A24, FN_RX2_D, FN_ST1_D1,
+ FN_A23, FN_ST1_D0, FN_LCD_M_DISP_A,
+ FN_A22, FN_ST1_VLD, FN_LCD_VEPWC_A,
+ FN_A21, FN_ST1_SYC, FN_LCD_VCPWC_A,
+ FN_A20, FN_ST1_REQ, FN_LCD_FLM_A,
+ FN_A19, FN_ST1_CLKIN, FN_LCD_CLK_A, FN_TIOC4D_C,
+ FN_A18, FN_ST1_PWM, FN_LCD_CL2_A, FN_TIOC4C_C,
+ FN_A17, FN_ST1_VCO_CLKIN, FN_LCD_CL1_A, FN_TIOC4B_C,
+ FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C,
+
+ /* IPSR2 */
+ FN_D14, FN_TX2_B, FN_FSE_A, FN_ET0_TX_CLK_B,
+ FN_D13, FN_RX2_B, FN_FRB_A, FN_ET0_ETXD6_B,
+ FN_D12, FN_FWE_A, FN_ET0_ETXD5_B,
+ FN_D11, FN_RSPI_MISO_A, FN_QMI_QIO1_A, FN_FRE_A,
+ FN_ET0_ETXD3_B,
+ FN_D10, FN_RSPI_MOSI_A, FN_QMO_QIO0_A, FN_FALE_A,
+ FN_ET0_ETXD2_B,
+ FN_D9, FN_SD0_CMD_A, FN_MMC_CMD_A, FN_QIO3_A, FN_FCLE_A,
+ FN_ET0_ETXD1_B,
+ FN_D8, FN_SD0_CLK_A, FN_MMC_CLK_A, FN_QIO2_A, FN_FCE_A,
+ FN_ET0_GTX_CLK_B,
+ FN_D7, FN_RSPI_SSL_A, FN_MMC_D7_A, FN_QSSL_A, FN_FD7_A,
+ FN_D6, FN_RSPI_RSPCK_A, FN_MMC_D6_A, FN_QSPCLK_A, FN_FD6_A,
+ FN_D5, FN_SD0_WP_A, FN_MMC_D5_A, FN_FD5_A,
+ FN_D4, FN_SD0_CD_A, FN_MMC_D4_A, FN_ST1_D7, FN_FD4_A,
+
+ /* IPSR3 */
+ FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A, FN_ET0_ETXD7,
+ FN_EX_WAIT2, FN_SD1_DAT1_A, FN_DACK2, FN_CAN1_RX_C,
+ FN_ET0_MAGIC_C, FN_ET0_ETXD6_A,
+ FN_EX_WAIT1, FN_SD1_DAT0_A, FN_DREQ2, FN_CAN1_TX_C,
+ FN_ET0_LINK_C, FN_ET0_ETXD5_A,
+ FN_EX_WAIT0, FN_TCLK1_B,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
+ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B, FN_ET0_ETXD3_A,
+ FN_EX_CS4, FN_SD1_WP_A, FN_ATAWR, FN_QMI_QIO1_B, FN_ET0_ETXD2_A,
+ FN_EX_CS3, FN_SD1_CD_A, FN_ATARD, FN_QMO_QIO0_B, FN_ET0_ETXD1_A,
+ FN_EX_CS2, FN_TX3_B, FN_ATACS1, FN_QSPCLK_B, FN_ET0_GTX_CLK_A,
+ FN_EX_CS1, FN_RX3_B, FN_ATACS0, FN_QIO2_B, FN_ET0_ETXD0,
+ FN_CS1_A26, FN_QIO3_B,
+ FN_D15, FN_SCK2_B,
+
+ /* IPSR4 */
+ FN_SCK2_A, FN_VI0_G3,
+ FN_RTS1_B, FN_VI0_G2,
+ FN_CTS1_B, FN_VI0_DATA7_VI0_G1,
+ FN_TX1_B, FN_VI0_DATA6_VI0_G0, FN_ET0_PHY_INT_A,
+ FN_RX1_B, FN_VI0_DATA5_VI0_B5, FN_ET0_MAGIC_A,
+ FN_SCK1_B, FN_VI0_DATA4_VI0_B4, FN_ET0_LINK_A,
+ FN_RTS0_B, FN_VI0_DATA3_VI0_B3, FN_ET0_MDIO_A,
+ FN_CTS0_B, FN_VI0_DATA2_VI0_B2, FN_RMII0_MDIO_A, FN_ET0_MDC,
+ FN_HTX0_A, FN_TX1_A, FN_VI0_DATA1_VI0_B1, FN_RMII0_MDC_A, FN_ET0_COL,
+ FN_HRX0_A, FN_RX1_A, FN_VI0_DATA0_VI0_B0, FN_RMII0_CRS_DV_A, FN_ET0_CRS,
+ FN_HSCK0_A, FN_SCK1_A, FN_VI0_VSYNC, FN_RMII0_RX_ER_A, FN_ET0_RX_ER,
+ FN_HRTS0_A, FN_RTS1_A, FN_VI0_HSYNC, FN_RMII0_TXD_EN_A, FN_ET0_RX_DV,
+ FN_HCTS0_A, FN_CTS1_A, FN_VI0_FIELD, FN_RMII0_RXD1_A, FN_ET0_ERXD7,
+
+ /* IPSR5 */
+ FN_SD2_CLK_A, FN_RX2_A, FN_VI0_G4, FN_ET0_RX_CLK_B,
+ FN_SD2_CMD_A, FN_TX2_A, FN_VI0_G5, FN_ET0_ERXD2_B,
+ FN_SD2_DAT0_A, FN_RX3_A, FN_VI0_R0, FN_ET0_ERXD3_B,
+ FN_SD2_DAT1_A, FN_TX3_A, FN_VI0_R1, FN_ET0_MDIO_B,
+ FN_SD2_DAT2_A, FN_RX4_A, FN_VI0_R2, FN_ET0_LINK_B,
+ FN_SD2_DAT3_A, FN_TX4_A, FN_VI0_R3, FN_ET0_MAGIC_B,
+ FN_SD2_CD_A, FN_RX5_A, FN_VI0_R4, FN_ET0_PHY_INT_B,
+ FN_SD2_WP_A, FN_TX5_A, FN_VI0_R5,
+ FN_REF125CK, FN_ADTRG, FN_RX5_C,
+ FN_REF50CK, FN_CTS1_E, FN_HCTS0_D,
+
+ /* IPSR6 */
+ FN_DU0_DR0, FN_SCIF_CLK_B, FN_HRX0_D, FN_IETX_A, FN_TCLKA_A, FN_HIFD00,
+ FN_DU0_DR1, FN_SCK0_B, FN_HTX0_D, FN_IERX_A, FN_TCLKB_A, FN_HIFD01,
+ FN_DU0_DR2, FN_RX0_B, FN_TCLKC_A, FN_HIFD02,
+ FN_DU0_DR3, FN_TX0_B, FN_TCLKD_A, FN_HIFD03,
+ FN_DU0_DR4, FN_CTS0_C, FN_TIOC0A_A, FN_HIFD04,
+ FN_DU0_DR5, FN_RTS0_C, FN_TIOC0B_A, FN_HIFD05,
+ FN_DU0_DR6, FN_SCK1_C, FN_TIOC0C_A, FN_HIFD06,
+ FN_DU0_DR7, FN_RX1_C, FN_TIOC0D_A, FN_HIFD07,
+ FN_DU0_DG0, FN_TX1_C, FN_HSCK0_D, FN_IECLK_A, FN_TIOC1A_A, FN_HIFD08,
+ FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A, FN_HIFD09,
+
+ /* IPSR7 */
+ FN_DU0_DG2, FN_RTS1_C, FN_RMII0_MDC_B, FN_TIOC2A_A, FN_HIFD10,
+ FN_DU0_DG3, FN_SCK2_C, FN_RMII0_MDIO_B, FN_TIOC2B_A, FN_HIFD11,
+ FN_DU0_DG4, FN_RX2_C, FN_RMII0_CRS_DV_B, FN_TIOC3A_A, FN_HIFD12,
+ FN_DU0_DG5, FN_TX2_C, FN_RMII0_RX_ER_B, FN_TIOC3B_A, FN_HIFD13,
+ FN_DU0_DG6, FN_RX3_C, FN_RMII0_RXD0_B, FN_TIOC3C_A, FN_HIFD14,
+ FN_DU0_DG7, FN_TX3_C, FN_RMII0_RXD1_B, FN_TIOC3D_A, FN_HIFD15,
+ FN_DU0_DB0, FN_RX4_C, FN_RMII0_TXD_EN_B, FN_TIOC4A_A, FN_HIFCS,
+ FN_DU0_DB1, FN_TX4_C, FN_RMII0_TXD0_B, FN_TIOC4B_A, FN_HIFRS,
+ FN_DU0_DB2, FN_RX5_B, FN_RMII0_TXD1_B, FN_TIOC4C_A, FN_HIFWR,
+ FN_DU0_DB3, FN_TX5_B, FN_TIOC4D_A, FN_HIFRD,
+ FN_DU0_DB4, FN_HIFINT,
+
+ /* IPSR8 */
+ FN_DU0_DB5, FN_HIFDREQ,
+ FN_DU0_DB6, FN_HIFRDY,
+ FN_DU0_DB7, FN_SSI_SCK0_B, FN_HIFEBL_B,
+ FN_DU0_DOTCLKIN, FN_HSPI_CS0_C, FN_SSI_WS0_B,
+ FN_DU0_DOTCLKOUT, FN_HSPI_CLK0_C, FN_SSI_SDATA0_B,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_HSPI_TX0_C, FN_SSI_SCK1_B,
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_HSPI_RX0_C, FN_SSI_WS1_B,
+ FN_DU0_EXODDF_DU0_ODDF, FN_CAN0_RX_B, FN_HSCK0_B, FN_SSI_SDATA1_B,
+ FN_DU0_DISP, FN_CAN0_TX_B, FN_HRX0_B, FN_AUDIO_CLKA_B,
+ FN_DU0_CDE, FN_HTX0_B, FN_AUDIO_CLKB_B, FN_LCD_VCPWC_B,
+ FN_IRQ0_A, FN_HSPI_TX_B, FN_RX3_E, FN_ET0_ERXD0,
+ FN_IRQ1_A, FN_HSPI_RX_B, FN_TX3_E, FN_ET0_ERXD1,
+ FN_IRQ2_A, FN_CTS0_A, FN_HCTS0_B, FN_ET0_ERXD2_A,
+ FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A,
+
+ /* IPSR9 */
+ FN_VI1_CLK_A, FN_FD0_B, FN_LCD_DATA0_B,
+ FN_VI1_0_A, FN_FD1_B, FN_LCD_DATA1_B,
+ FN_VI1_1_A, FN_FD2_B, FN_LCD_DATA2_B,
+ FN_VI1_2_A, FN_FD3_B, FN_LCD_DATA3_B,
+ FN_VI1_3_A, FN_FD4_B, FN_LCD_DATA4_B,
+ FN_VI1_4_A, FN_FD5_B, FN_LCD_DATA5_B,
+ FN_VI1_5_A, FN_FD6_B, FN_LCD_DATA6_B,
+ FN_VI1_6_A, FN_FD7_B, FN_LCD_DATA7_B,
+ FN_VI1_7_A, FN_FCE_B, FN_LCD_DATA8_B,
+ FN_SSI_SCK0_A, FN_TIOC1A_B, FN_LCD_DATA9_B,
+ FN_SSI_WS0_A, FN_TIOC1B_B, FN_LCD_DATA10_B,
+ FN_SSI_SDATA0_A, FN_VI1_0_B, FN_TIOC2A_B, FN_LCD_DATA11_B,
+ FN_SSI_SCK1_A, FN_VI1_1_B, FN_TIOC2B_B, FN_LCD_DATA12_B,
+ FN_SSI_WS1_A, FN_VI1_2_B, FN_LCD_DATA13_B,
+ FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B,
+
+ /* IPSR10 */
+ FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B, FN_LCD_DATA15_B,
+ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B, FN_LCD_DON_B,
+ FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B, FN_LCD_CL1_B,
+ FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B, FN_LCD_CL2_B,
+ FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B, FN_LCD_FLM_B,
+ FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
+ FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B, FN_LCD_VEPWC_B,
+ FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B, FN_LCD_M_DISP_B,
+ FN_CAN_CLK_A, FN_RX4_D,
+ FN_CAN0_TX_A, FN_TX4_D, FN_MLB_CLK,
+ FN_CAN1_RX_A, FN_IRQ1_B,
+ FN_CAN0_RX_A, FN_IRQ0_B, FN_MLB_SIG,
+ FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT,
+
+ /* IPSR11 */
+ FN_SCL1, FN_SCIF_CLK_C,
+ FN_SDA1, FN_RX1_E,
+ FN_SDA0, FN_HIFEBL_A,
+ FN_SDSELF, FN_RTS1_E,
+ FN_SCIF_CLK_A, FN_HSPI_CLK_A, FN_VI0_CLK, FN_RMII0_TXD0_A, FN_ET0_ERXD4,
+ FN_SCK0_A, FN_HSPI_CS_A, FN_VI0_CLKENB, FN_RMII0_TXD1_A, FN_ET0_ERXD5,
+ FN_RX0_A, FN_HSPI_RX_A, FN_RMII0_RXD0_A, FN_ET0_ERXD6,
+ FN_TX0_A, FN_HSPI_TX_A,
+ FN_PENC1, FN_TX3_D, FN_CAN1_TX_B, FN_TX5_D, FN_IETX_B,
+ FN_USB_OVC1, FN_RX3_D, FN_CAN1_RX_B, FN_RX5_D, FN_IERX_B,
+ FN_DREQ0, FN_SD1_CLK_A, FN_ET0_TX_EN,
+ FN_DACK0, FN_SD1_DAT3_A, FN_ET0_TX_ER,
+ FN_DREQ1, FN_HSPI_CLK_B, FN_RX4_B, FN_ET0_PHY_INT_C, FN_ET0_TX_CLK_A,
+ FN_DACK1, FN_HSPI_CS_B, FN_TX4_B, FN_ET0_RX_CLK_A,
+ FN_PRESETOUT, FN_ST_CLKOUT,
+
+ /* MOD_SEL1 */
+ FN_SEL_IEBUS_0, FN_SEL_IEBUS_1,
+ FN_SEL_RQSPI_0, FN_SEL_RQSPI_1,
+ FN_SEL_VIN1_0, FN_SEL_VIN1_1,
+ FN_SEL_HIF_0, FN_SEL_HIF_1,
+ FN_SEL_RSPI_0, FN_SEL_RSPI_1,
+ FN_SEL_LCDC_0, FN_SEL_LCDC_1,
+ FN_SEL_ET0_CTL_0, FN_SEL_ET0_CTL_1, FN_SEL_ET0_CTL_2,
+ FN_SEL_ET0_0, FN_SEL_ET0_1,
+ FN_SEL_RMII_0, FN_SEL_RMII_1,
+ FN_SEL_TMU_0, FN_SEL_TMU_1,
+ FN_SEL_HSPI_0, FN_SEL_HSPI_1, FN_SEL_HSPI_2,
+ FN_SEL_HSCIF_0, FN_SEL_HSCIF_1, FN_SEL_HSCIF_2, FN_SEL_HSCIF_3,
+ FN_SEL_RCAN_CLK_0, FN_SEL_RCAN_CLK_1,
+ FN_SEL_RCAN1_0, FN_SEL_RCAN1_1, FN_SEL_RCAN1_2,
+ FN_SEL_RCAN0_0, FN_SEL_RCAN0_1,
+ FN_SEL_SDHI2_0, FN_SEL_SDHI2_1,
+ FN_SEL_SDHI1_0, FN_SEL_SDHI1_1,
+ FN_SEL_SDHI0_0, FN_SEL_SDHI0_1,
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ FN_SEL_AUDIO_CLKB_0, FN_SEL_AUDIO_CLKB_1,
+ FN_SEL_AUDIO_CLKA_0, FN_SEL_AUDIO_CLKA_1,
+ FN_SEL_FLCTL_0, FN_SEL_FLCTL_1,
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ FN_SEL_INTC_0, FN_SEL_INTC_1,
+
+ /* MOD_SEL2 */
+ FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1,
+ FN_SEL_MTU2_CH4_0, FN_SEL_MTU2_CH4_1,
+ FN_SEL_MTU2_CH3_0, FN_SEL_MTU2_CH3_1,
+ FN_SEL_MTU2_CH2_0, FN_SEL_MTU2_CH2_1, FN_SEL_MTU2_CH2_2,
+ FN_SEL_MTU2_CH1_0, FN_SEL_MTU2_CH1_1, FN_SEL_MTU2_CH1_2,
+ FN_SEL_MTU2_CH0_0, FN_SEL_MTU2_CH0_1,
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
+ FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1,
+ FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF3_3, FN_SEL_SCIF3_4,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_3,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2,
+ FN_SEL_SCIF1_3, FN_SEL_SCIF1_4,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2,
+ FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ CLKOUT_MARK, BS_MARK, CS0_MARK, EX_CS0_MARK, RD_MARK,
+ WE0_MARK, WE1_MARK,
+
+ SCL0_MARK, PENC0_MARK, USB_OVC0_MARK,
+
+ IRQ2_B_MARK, IRQ3_B_MARK,
+
+ /* IPSR0 */
+ A15_MARK, ST0_VCO_CLKIN_MARK, LCD_DATA15_A_MARK, TIOC3D_C_MARK,
+ A14_MARK, LCD_DATA14_A_MARK, TIOC3C_C_MARK,
+ A13_MARK, LCD_DATA13_A_MARK, TIOC3B_C_MARK,
+ A12_MARK, LCD_DATA12_A_MARK, TIOC3A_C_MARK,
+ A11_MARK, ST0_D7_MARK, LCD_DATA11_A_MARK, TIOC2B_C_MARK,
+ A10_MARK, ST0_D6_MARK, LCD_DATA10_A_MARK, TIOC2A_C_MARK,
+ A9_MARK, ST0_D5_MARK, LCD_DATA9_A_MARK, TIOC1B_C_MARK,
+ A8_MARK, ST0_D4_MARK, LCD_DATA8_A_MARK, TIOC1A_C_MARK,
+ A7_MARK, ST0_D3_MARK, LCD_DATA7_A_MARK, TIOC0D_C_MARK,
+ A6_MARK, ST0_D2_MARK, LCD_DATA6_A_MARK, TIOC0C_C_MARK,
+ A5_MARK, ST0_D1_MARK, LCD_DATA5_A_MARK, TIOC0B_C_MARK,
+ A4_MARK, ST0_D0_MARK, LCD_DATA4_A_MARK, TIOC0A_C_MARK,
+ A3_MARK, ST0_VLD_MARK, LCD_DATA3_A_MARK, TCLKD_C_MARK,
+ A2_MARK, ST0_SYC_MARK, LCD_DATA2_A_MARK, TCLKC_C_MARK,
+ A1_MARK, ST0_REQ_MARK, LCD_DATA1_A_MARK, TCLKB_C_MARK,
+ A0_MARK, ST0_CLKIN_MARK, LCD_DATA0_A_MARK, TCLKA_C_MARK,
+
+ /* IPSR1 */
+ D3_MARK, SD0_DAT3_A_MARK, MMC_D3_A_MARK, ST1_D6_MARK, FD3_A_MARK,
+ D2_MARK, SD0_DAT2_A_MARK, MMC_D2_A_MARK, ST1_D5_MARK, FD2_A_MARK,
+ D1_MARK, SD0_DAT1_A_MARK, MMC_D1_A_MARK, ST1_D4_MARK, FD1_A_MARK,
+ D0_MARK, SD0_DAT0_A_MARK, MMC_D0_A_MARK, ST1_D3_MARK, FD0_A_MARK,
+ A25_MARK, TX2_D_MARK, ST1_D2_MARK,
+ A24_MARK, RX2_D_MARK, ST1_D1_MARK,
+ A23_MARK, ST1_D0_MARK, LCD_M_DISP_A_MARK,
+ A22_MARK, ST1_VLD_MARK, LCD_VEPWC_A_MARK,
+ A21_MARK, ST1_SYC_MARK, LCD_VCPWC_A_MARK,
+ A20_MARK, ST1_REQ_MARK, LCD_FLM_A_MARK,
+ A19_MARK, ST1_CLKIN_MARK, LCD_CLK_A_MARK, TIOC4D_C_MARK,
+ A18_MARK, ST1_PWM_MARK, LCD_CL2_A_MARK, TIOC4C_C_MARK,
+ A17_MARK, ST1_VCO_CLKIN_MARK, LCD_CL1_A_MARK, TIOC4B_C_MARK,
+ A16_MARK, ST0_PWM_MARK, LCD_DON_A_MARK, TIOC4A_C_MARK,
+
+ /* IPSR2 */
+ D14_MARK, TX2_B_MARK, FSE_A_MARK, ET0_TX_CLK_B_MARK,
+ D13_MARK, RX2_B_MARK, FRB_A_MARK, ET0_ETXD6_B_MARK,
+ D12_MARK, FWE_A_MARK, ET0_ETXD5_B_MARK,
+ D11_MARK, RSPI_MISO_A_MARK, QMI_QIO1_A_MARK, FRE_A_MARK,
+ ET0_ETXD3_B_MARK,
+ D10_MARK, RSPI_MOSI_A_MARK, QMO_QIO0_A_MARK, FALE_A_MARK,
+ ET0_ETXD2_B_MARK,
+ D9_MARK, SD0_CMD_A_MARK, MMC_CMD_A_MARK, QIO3_A_MARK,
+ FCLE_A_MARK, ET0_ETXD1_B_MARK,
+ D8_MARK, SD0_CLK_A_MARK, MMC_CLK_A_MARK, QIO2_A_MARK,
+ FCE_A_MARK, ET0_GTX_CLK_B_MARK,
+ D7_MARK, RSPI_SSL_A_MARK, MMC_D7_A_MARK, QSSL_A_MARK,
+ FD7_A_MARK,
+ D6_MARK, RSPI_RSPCK_A_MARK, MMC_D6_A_MARK, QSPCLK_A_MARK,
+ FD6_A_MARK,
+ D5_MARK, SD0_WP_A_MARK, MMC_D5_A_MARK, FD5_A_MARK,
+ D4_MARK, SD0_CD_A_MARK, MMC_D4_A_MARK, ST1_D7_MARK,
+ FD4_A_MARK,
+
+ /* IPSR3 */
+ DRACK0_MARK, SD1_DAT2_A_MARK, ATAG_MARK, TCLK1_A_MARK, ET0_ETXD7_MARK,
+ EX_WAIT2_MARK, SD1_DAT1_A_MARK, DACK2_MARK, CAN1_RX_C_MARK,
+ ET0_MAGIC_C_MARK, ET0_ETXD6_A_MARK,
+ EX_WAIT1_MARK, SD1_DAT0_A_MARK, DREQ2_MARK, CAN1_TX_C_MARK,
+ ET0_LINK_C_MARK, ET0_ETXD5_A_MARK,
+ EX_WAIT0_MARK, TCLK1_B_MARK,
+ RD_WR_MARK, TCLK0_MARK, CAN_CLK_B_MARK, ET0_ETXD4_MARK,
+ EX_CS5_MARK, SD1_CMD_A_MARK, ATADIR_MARK, QSSL_B_MARK,
+ ET0_ETXD3_A_MARK,
+ EX_CS4_MARK, SD1_WP_A_MARK, ATAWR_MARK, QMI_QIO1_B_MARK,
+ ET0_ETXD2_A_MARK,
+ EX_CS3_MARK, SD1_CD_A_MARK, ATARD_MARK, QMO_QIO0_B_MARK,
+ ET0_ETXD1_A_MARK,
+ EX_CS2_MARK, TX3_B_MARK, ATACS1_MARK, QSPCLK_B_MARK,
+ ET0_GTX_CLK_A_MARK,
+ EX_CS1_MARK, RX3_B_MARK, ATACS0_MARK, QIO2_B_MARK,
+ ET0_ETXD0_MARK,
+ CS1_A26_MARK, QIO3_B_MARK,
+ D15_MARK, SCK2_B_MARK,
+
+ /* IPSR4 */
+ SCK2_A_MARK, VI0_G3_MARK,
+ RTS1_B_MARK, VI0_G2_MARK,
+ CTS1_B_MARK, VI0_DATA7_VI0_G1_MARK,
+ TX1_B_MARK, VI0_DATA6_VI0_G0_MARK, ET0_PHY_INT_A_MARK,
+ RX1_B_MARK, VI0_DATA5_VI0_B5_MARK, ET0_MAGIC_A_MARK,
+ SCK1_B_MARK, VI0_DATA4_VI0_B4_MARK, ET0_LINK_A_MARK,
+ RTS0_B_MARK, VI0_DATA3_VI0_B3_MARK, ET0_MDIO_A_MARK,
+ CTS0_B_MARK, VI0_DATA2_VI0_B2_MARK, RMII0_MDIO_A_MARK,
+ ET0_MDC_MARK,
+ HTX0_A_MARK, TX1_A_MARK, VI0_DATA1_VI0_B1_MARK,
+ RMII0_MDC_A_MARK, ET0_COL_MARK,
+ HRX0_A_MARK, RX1_A_MARK, VI0_DATA0_VI0_B0_MARK,
+ RMII0_CRS_DV_A_MARK, ET0_CRS_MARK,
+ HSCK0_A_MARK, SCK1_A_MARK, VI0_VSYNC_MARK,
+ RMII0_RX_ER_A_MARK, ET0_RX_ER_MARK,
+ HRTS0_A_MARK, RTS1_A_MARK, VI0_HSYNC_MARK,
+ RMII0_TXD_EN_A_MARK, ET0_RX_DV_MARK,
+ HCTS0_A_MARK, CTS1_A_MARK, VI0_FIELD_MARK,
+ RMII0_RXD1_A_MARK, ET0_ERXD7_MARK,
+
+ /* IPSR5 */
+ SD2_CLK_A_MARK, RX2_A_MARK, VI0_G4_MARK, ET0_RX_CLK_B_MARK,
+ SD2_CMD_A_MARK, TX2_A_MARK, VI0_G5_MARK, ET0_ERXD2_B_MARK,
+ SD2_DAT0_A_MARK, RX3_A_MARK, VI0_R0_MARK, ET0_ERXD3_B_MARK,
+ SD2_DAT1_A_MARK, TX3_A_MARK, VI0_R1_MARK, ET0_MDIO_B_MARK,
+ SD2_DAT2_A_MARK, RX4_A_MARK, VI0_R2_MARK, ET0_LINK_B_MARK,
+ SD2_DAT3_A_MARK, TX4_A_MARK, VI0_R3_MARK, ET0_MAGIC_B_MARK,
+ SD2_CD_A_MARK, RX5_A_MARK, VI0_R4_MARK, ET0_PHY_INT_B_MARK,
+ SD2_WP_A_MARK, TX5_A_MARK, VI0_R5_MARK,
+ REF125CK_MARK, ADTRG_MARK, RX5_C_MARK,
+ REF50CK_MARK, CTS1_E_MARK, HCTS0_D_MARK,
+
+ /* IPSR6 */
+ DU0_DR0_MARK, SCIF_CLK_B_MARK, HRX0_D_MARK, IETX_A_MARK,
+ TCLKA_A_MARK, HIFD00_MARK,
+ DU0_DR1_MARK, SCK0_B_MARK, HTX0_D_MARK, IERX_A_MARK,
+ TCLKB_A_MARK, HIFD01_MARK,
+ DU0_DR2_MARK, RX0_B_MARK, TCLKC_A_MARK, HIFD02_MARK,
+ DU0_DR3_MARK, TX0_B_MARK, TCLKD_A_MARK, HIFD03_MARK,
+ DU0_DR4_MARK, CTS0_C_MARK, TIOC0A_A_MARK, HIFD04_MARK,
+ DU0_DR5_MARK, RTS0_C_MARK, TIOC0B_A_MARK, HIFD05_MARK,
+ DU0_DR6_MARK, SCK1_C_MARK, TIOC0C_A_MARK, HIFD06_MARK,
+ DU0_DR7_MARK, RX1_C_MARK, TIOC0D_A_MARK, HIFD07_MARK,
+ DU0_DG0_MARK, TX1_C_MARK, HSCK0_D_MARK, IECLK_A_MARK,
+ TIOC1A_A_MARK, HIFD08_MARK,
+ DU0_DG1_MARK, CTS1_C_MARK, HRTS0_D_MARK, TIOC1B_A_MARK,
+ HIFD09_MARK,
+
+ /* IPSR7 */
+ DU0_DG2_MARK, RTS1_C_MARK, RMII0_MDC_B_MARK, TIOC2A_A_MARK,
+ HIFD10_MARK,
+ DU0_DG3_MARK, SCK2_C_MARK, RMII0_MDIO_B_MARK, TIOC2B_A_MARK,
+ HIFD11_MARK,
+ DU0_DG4_MARK, RX2_C_MARK, RMII0_CRS_DV_B_MARK, TIOC3A_A_MARK,
+ HIFD12_MARK,
+ DU0_DG5_MARK, TX2_C_MARK, RMII0_RX_ER_B_MARK, TIOC3B_A_MARK,
+ HIFD13_MARK,
+ DU0_DG6_MARK, RX3_C_MARK, RMII0_RXD0_B_MARK, TIOC3C_A_MARK,
+ HIFD14_MARK,
+ DU0_DG7_MARK, TX3_C_MARK, RMII0_RXD1_B_MARK, TIOC3D_A_MARK,
+ HIFD15_MARK,
+ DU0_DB0_MARK, RX4_C_MARK, RMII0_TXD_EN_B_MARK, TIOC4A_A_MARK,
+ HIFCS_MARK,
+ DU0_DB1_MARK, TX4_C_MARK, RMII0_TXD0_B_MARK, TIOC4B_A_MARK,
+ HIFRS_MARK,
+ DU0_DB2_MARK, RX5_B_MARK, RMII0_TXD1_B_MARK, TIOC4C_A_MARK,
+ HIFWR_MARK,
+ DU0_DB3_MARK, TX5_B_MARK, TIOC4D_A_MARK, HIFRD_MARK,
+ DU0_DB4_MARK, HIFINT_MARK,
+
+ /* IPSR8 */
+ DU0_DB5_MARK, HIFDREQ_MARK,
+ DU0_DB6_MARK, HIFRDY_MARK,
+ DU0_DB7_MARK, SSI_SCK0_B_MARK, HIFEBL_B_MARK,
+ DU0_DOTCLKIN_MARK, HSPI_CS0_C_MARK, SSI_WS0_B_MARK,
+ DU0_DOTCLKOUT_MARK, HSPI_CLK0_C_MARK, SSI_SDATA0_B_MARK,
+ DU0_EXHSYNC_DU0_HSYNC_MARK, HSPI_TX0_C_MARK, SSI_SCK1_B_MARK,
+ DU0_EXVSYNC_DU0_VSYNC_MARK, HSPI_RX0_C_MARK, SSI_WS1_B_MARK,
+ DU0_EXODDF_DU0_ODDF_MARK, CAN0_RX_B_MARK, HSCK0_B_MARK,
+ SSI_SDATA1_B_MARK,
+ DU0_DISP_MARK, CAN0_TX_B_MARK, HRX0_B_MARK, AUDIO_CLKA_B_MARK,
+ DU0_CDE_MARK, HTX0_B_MARK, AUDIO_CLKB_B_MARK, LCD_VCPWC_B_MARK,
+ IRQ0_A_MARK, HSPI_TX_B_MARK, RX3_E_MARK, ET0_ERXD0_MARK,
+ IRQ1_A_MARK, HSPI_RX_B_MARK, TX3_E_MARK, ET0_ERXD1_MARK,
+ IRQ2_A_MARK, CTS0_A_MARK, HCTS0_B_MARK, ET0_ERXD2_A_MARK,
+ IRQ3_A_MARK, RTS0_A_MARK, HRTS0_B_MARK, ET0_ERXD3_A_MARK,
+
+ /* IPSR9 */
+ VI1_CLK_A_MARK, FD0_B_MARK, LCD_DATA0_B_MARK,
+ VI1_0_A_MARK, FD1_B_MARK, LCD_DATA1_B_MARK,
+ VI1_1_A_MARK, FD2_B_MARK, LCD_DATA2_B_MARK,
+ VI1_2_A_MARK, FD3_B_MARK, LCD_DATA3_B_MARK,
+ VI1_3_A_MARK, FD4_B_MARK, LCD_DATA4_B_MARK,
+ VI1_4_A_MARK, FD5_B_MARK, LCD_DATA5_B_MARK,
+ VI1_5_A_MARK, FD6_B_MARK, LCD_DATA6_B_MARK,
+ VI1_6_A_MARK, FD7_B_MARK, LCD_DATA7_B_MARK,
+ VI1_7_A_MARK, FCE_B_MARK, LCD_DATA8_B_MARK,
+ SSI_SCK0_A_MARK, TIOC1A_B_MARK, LCD_DATA9_B_MARK,
+ SSI_WS0_A_MARK, TIOC1B_B_MARK, LCD_DATA10_B_MARK,
+ SSI_SDATA0_A_MARK, VI1_0_B_MARK, TIOC2A_B_MARK, LCD_DATA11_B_MARK,
+ SSI_SCK1_A_MARK, VI1_1_B_MARK, TIOC2B_B_MARK, LCD_DATA12_B_MARK,
+ SSI_WS1_A_MARK, VI1_2_B_MARK, LCD_DATA13_B_MARK,
+ SSI_SDATA1_A_MARK, VI1_3_B_MARK, LCD_DATA14_B_MARK,
+
+ /* IPSR10 */
+ SSI_SCK23_MARK, VI1_4_B_MARK, RX1_D_MARK, FCLE_B_MARK,
+ LCD_DATA15_B_MARK,
+ SSI_WS23_MARK, VI1_5_B_MARK, TX1_D_MARK, HSCK0_C_MARK,
+ FALE_B_MARK, LCD_DON_B_MARK,
+ SSI_SDATA2_MARK, VI1_6_B_MARK, HRX0_C_MARK, FRE_B_MARK,
+ LCD_CL1_B_MARK,
+ SSI_SDATA3_MARK, VI1_7_B_MARK, HTX0_C_MARK, FWE_B_MARK,
+ LCD_CL2_B_MARK,
+ AUDIO_CLKA_A_MARK, VI1_CLK_B_MARK, SCK1_D_MARK, IECLK_B_MARK,
+ LCD_FLM_B_MARK,
+ AUDIO_CLKB_A_MARK, LCD_CLK_B_MARK,
+ AUDIO_CLKC_MARK, SCK1_E_MARK, HCTS0_C_MARK, FRB_B_MARK,
+ LCD_VEPWC_B_MARK,
+ AUDIO_CLKOUT_MARK, TX1_E_MARK, HRTS0_C_MARK, FSE_B_MARK,
+ LCD_M_DISP_B_MARK,
+ CAN_CLK_A_MARK, RX4_D_MARK,
+ CAN0_TX_A_MARK, TX4_D_MARK, MLB_CLK_MARK,
+ CAN1_RX_A_MARK, IRQ1_B_MARK,
+ CAN0_RX_A_MARK, IRQ0_B_MARK, MLB_SIG_MARK,
+ CAN1_TX_A_MARK, TX5_C_MARK, MLB_DAT_MARK,
+
+ /* IPSR11 */
+ SCL1_MARK, SCIF_CLK_C_MARK,
+ SDA1_MARK, RX1_E_MARK,
+ SDA0_MARK, HIFEBL_A_MARK,
+ SDSELF_MARK, RTS1_E_MARK,
+ SCIF_CLK_A_MARK, HSPI_CLK_A_MARK, VI0_CLK_MARK, RMII0_TXD0_A_MARK,
+ ET0_ERXD4_MARK,
+ SCK0_A_MARK, HSPI_CS_A_MARK, VI0_CLKENB_MARK, RMII0_TXD1_A_MARK,
+ ET0_ERXD5_MARK,
+ RX0_A_MARK, HSPI_RX_A_MARK, RMII0_RXD0_A_MARK, ET0_ERXD6_MARK,
+ TX0_A_MARK, HSPI_TX_A_MARK,
+ PENC1_MARK, TX3_D_MARK, CAN1_TX_B_MARK, TX5_D_MARK,
+ IETX_B_MARK,
+ USB_OVC1_MARK, RX3_D_MARK, CAN1_RX_B_MARK, RX5_D_MARK,
+ IERX_B_MARK,
+ DREQ0_MARK, SD1_CLK_A_MARK, ET0_TX_EN_MARK,
+ DACK0_MARK, SD1_DAT3_A_MARK, ET0_TX_ER_MARK,
+ DREQ1_MARK, HSPI_CLK_B_MARK, RX4_B_MARK, ET0_PHY_INT_C_MARK,
+ ET0_TX_CLK_A_MARK,
+ DACK1_MARK, HSPI_CS_B_MARK, TX4_B_MARK, ET0_RX_CLK_A_MARK,
+ PRESETOUT_MARK, ST_CLKOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_DATA(CLKOUT_MARK, FN_CLKOUT),
+ PINMUX_DATA(BS_MARK, FN_BS), PINMUX_DATA(CS0_MARK, FN_CS0),
+ PINMUX_DATA(EX_CS0_MARK, FN_EX_CS0),
+ PINMUX_DATA(RD_MARK, FN_RD), PINMUX_DATA(WE0_MARK, FN_WE0),
+ PINMUX_DATA(WE1_MARK, FN_WE1),
+ PINMUX_DATA(SCL0_MARK, FN_SCL0), PINMUX_DATA(PENC0_MARK, FN_PENC0),
+ PINMUX_DATA(USB_OVC0_MARK, FN_USB_OVC0),
+ PINMUX_DATA(IRQ2_B_MARK, FN_IRQ2_B),
+ PINMUX_DATA(IRQ3_B_MARK, FN_IRQ3_B),
+
+ /* IPSR0 */
+ PINMUX_IPSR_DATA(IP0_1_0, A0),
+ PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_3_2, A1),
+ PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
+ PINMUX_IPSR_MODSEL_DATA(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_5_4, A2),
+ PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_7_6, A3),
+ PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
+ PINMUX_IPSR_MODSEL_DATA(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_9_8, A4),
+ PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_11_10, A5),
+ PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_13_12, A6),
+ PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_15_14, A7),
+ PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_17_16, A8),
+ PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
+ PINMUX_IPSR_MODSEL_DATA(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
+
+ PINMUX_IPSR_DATA(IP0_19_18, A9),
+ PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
+ PINMUX_IPSR_MODSEL_DATA(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
+
+ PINMUX_IPSR_DATA(IP0_21_20, A10),
+ PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
+ PINMUX_IPSR_MODSEL_DATA(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
+
+ PINMUX_IPSR_DATA(IP0_23_22, A11),
+ PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
+ PINMUX_IPSR_MODSEL_DATA(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
+
+ PINMUX_IPSR_DATA(IP0_25_24, A12),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_27_26, A13),
+ PINMUX_IPSR_MODSEL_DATA(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_29_28, A14),
+ PINMUX_IPSR_MODSEL_DATA(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_31_30, A15),
+ PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
+
+
+ /* IPSR1 */
+ PINMUX_IPSR_DATA(IP1_1_0, A16),
+ PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_3_2, A17),
+ PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_5_4, A18),
+ PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_7_6, A19),
+ PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_9_8, A20),
+ PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
+ PINMUX_IPSR_MODSEL_DATA(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_11_10, A21),
+ PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
+ PINMUX_IPSR_MODSEL_DATA(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_13_12, A22),
+ PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
+ PINMUX_IPSR_MODSEL_DATA(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_15_14, A23),
+ PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_17_16, A24),
+ PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
+
+ PINMUX_IPSR_DATA(IP1_19_18, A25),
+ PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
+
+ PINMUX_IPSR_DATA(IP1_22_20, D0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, MMC_D0_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, FD0_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_25_23, D1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, MMC_D1_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FD1_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_28_26, D2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, MMC_D2_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, FD2_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_31_29, D3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, MMC_D3_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, FD3_A, SEL_FLCTL_0),
+
+ /* IPSR2 */
+ PINMUX_IPSR_DATA(IP2_2_0, D4),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MMC_D4_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, FD4_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_4_3, D5),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, MMC_D5_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, FD5_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_7_5, D6),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, MMC_D6_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, FD6_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_10_8, D7),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, MMC_D7_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, QSSL_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, FD7_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_13_11, D8),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, QIO2_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, FCE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_16_14, D9),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, QIO3_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, FCLE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_19_17, D10),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, FALE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_22_20, D11),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, FRE_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_24_23, D12),
+ PINMUX_IPSR_MODSEL_DATA(IP2_24_23, FWE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_27_25, D13),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, FRB_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_30_28, D14),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, FSE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
+
+ /* IPSR3 */
+ PINMUX_IPSR_DATA(IP3_1_0, D15),
+ PINMUX_IPSR_MODSEL_DATA(IP3_1_0, SCK2_B, SEL_SCIF2_1),
+
+ PINMUX_IPSR_DATA(IP3_2, CS1_A26),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2, QIO3_B, SEL_RQSPI_1),
+
+ PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, RX3_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, QIO2_B, SEL_RQSPI_1),
+ PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
+
+ PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_11_9, ATARD),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, QSSL_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
+ PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
+ PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
+
+ PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_20, TCLK1_B, SEL_TMU_1),
+
+ PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_26_24, DACK2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_29_27, ATAG),
+ PINMUX_IPSR_MODSEL_DATA(IP3_29_27, TCLK1_A, SEL_TMU_0),
+ PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
+
+ /* IPSR4 */
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, CTS1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RTS1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, SCK1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, HRX0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, HTX0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, TX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_17_15, CTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_18, RTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_21_20, SCK1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_23_22, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_25_24, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_27_26, CTS1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_29_28, RTS1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_30, SCK2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
+
+ /* IPSR5 */
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, TX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, TX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_22_21, TX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
+
+ PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
+ PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
+ PINMUX_IPSR_MODSEL_DATA(IP5_24_23, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
+ PINMUX_IPSR_MODSEL_DATA(IP5_26_25, CTS1_E, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
+
+ /* IPSR6 */
+ PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, HRX0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, IETX_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
+
+ PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, HTX0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, IERX_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
+
+ PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
+
+ PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
+
+ PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_10, CTS0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
+
+ PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_13_12, RTS0_C, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
+
+ PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCK1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
+
+ PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_16, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
+
+ PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, IECLK_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
+ PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
+
+ PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, CTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
+ PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
+
+ /* IPSR7 */
+ PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
+ PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
+
+ PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCK2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
+ PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
+
+ PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
+
+ PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
+
+ PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
+
+ PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
+
+ PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
+
+ PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
+
+ PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
+
+ PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
+
+ PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
+ PINMUX_IPSR_DATA(IP7_30_29, HIFINT),
+
+ /* IPSR8 */
+ PINMUX_IPSR_DATA(IP8_1_0, DU0_DB5),
+ PINMUX_IPSR_DATA(IP8_1_0, HIFDREQ),
+
+ PINMUX_IPSR_DATA(IP8_3_2, DU0_DB6),
+ PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
+
+ PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_4, HIFEBL_B, SEL_HIF_1),
+
+ PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
+
+ PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
+
+ PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, HRX0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
+
+ PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, HTX0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, IRQ0_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, RX3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, IRQ1_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TX3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, IRQ2_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CTS0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, HCTS0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, IRQ3_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, RTS0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, HRTS0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0),
+
+ /* IPSR9 */
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_CLK_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, FD0_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_0_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, FD1_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_1_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, FD2_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_2_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, FD3_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, VI1_3_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, FD4_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, VI1_4_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, FD5_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, VI1_5_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, FD6_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, VI1_6_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, FD7_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, VI1_7_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, FCE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SSI_WS0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, VI1_0_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, VI1_1_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SSI_WS1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, VI1_2_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, VI1_3_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
+
+ /* IPSE10 */
+ PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, VI1_4_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, RX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, FCLE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, VI1_5_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, TX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, FALE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, VI1_6_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, FRE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, VI1_7_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, FWE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, LCD_CL2_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, VI1_CLK_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCK1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, IECLK_B, SEL_IEBUS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, LCD_FLM_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_15, LCD_CLK_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, SCK1_E, SEL_SCIF1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, FRB_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TX1_E, SEL_SCIF1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, FSE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_22, RX4_D, SEL_SCIF4_3),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_23, TX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25, IRQ1_B, SEL_INTC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_27_26, IRQ0_B, SEL_INTC_1),
+ PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_29_28, TX5_C, SEL_SCIF1_2),
+ PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
+
+ /* IPSR11 */
+ PINMUX_IPSR_DATA(IP11_0, SCL1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
+
+ PINMUX_IPSR_DATA(IP11_1, SDA1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_0, RX1_E, SEL_SCIF1_4),
+
+ PINMUX_IPSR_DATA(IP11_2, SDA0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2, HIFEBL_A, SEL_HIF_0),
+
+ PINMUX_IPSR_DATA(IP11_3, SDSELF),
+ PINMUX_IPSR_MODSEL_DATA(IP11_3, RTS1_E, SEL_SCIF1_3),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
+ PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, SCK0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
+ PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_12, TX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_12, HSPI_TX_A, SEL_HSPI_0),
+
+ PINMUX_IPSR_DATA(IP11_15_13, PENC1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, CAN1_TX_B, SEL_RCAN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, IETX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, IERX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
+
+ PINMUX_IPSR_DATA(IP11_22_21, DACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
+
+ PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP11_27_26, DACK1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, TX4_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
+ PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ PINMUX_GPIO_GP_ALL(),
+
+ GPIO_FN(CLKOUT), GPIO_FN(BS), GPIO_FN(CS0), GPIO_FN(EX_CS0),
+ GPIO_FN(RD), GPIO_FN(WE0), GPIO_FN(WE1),
+ GPIO_FN(SCL0), GPIO_FN(PENC0), GPIO_FN(USB_OVC0),
+ GPIO_FN(IRQ2_B), GPIO_FN(IRQ3_B),
+
+ /* IPSR0 */
+ GPIO_FN(A0), GPIO_FN(ST0_CLKIN), GPIO_FN(LCD_DATA0_A),
+ GPIO_FN(TCLKA_C),
+ GPIO_FN(A1), GPIO_FN(ST0_REQ), GPIO_FN(LCD_DATA1_A),
+ GPIO_FN(TCLKB_C),
+ GPIO_FN(A2), GPIO_FN(ST0_SYC), GPIO_FN(LCD_DATA2_A),
+ GPIO_FN(TCLKC_C),
+ GPIO_FN(A3), GPIO_FN(ST0_VLD), GPIO_FN(LCD_DATA3_A),
+ GPIO_FN(TCLKD_C),
+ GPIO_FN(A4), GPIO_FN(ST0_D0), GPIO_FN(LCD_DATA4_A),
+ GPIO_FN(TIOC0A_C),
+ GPIO_FN(A5), GPIO_FN(ST0_D1), GPIO_FN(LCD_DATA5_A),
+ GPIO_FN(TIOC0B_C),
+ GPIO_FN(A6), GPIO_FN(ST0_D2), GPIO_FN(LCD_DATA6_A),
+ GPIO_FN(TIOC0C_C),
+ GPIO_FN(A7), GPIO_FN(ST0_D3), GPIO_FN(LCD_DATA7_A),
+ GPIO_FN(TIOC0D_C),
+ GPIO_FN(A8), GPIO_FN(ST0_D4), GPIO_FN(LCD_DATA8_A),
+ GPIO_FN(TIOC1A_C),
+ GPIO_FN(A9), GPIO_FN(ST0_D5), GPIO_FN(LCD_DATA9_A),
+ GPIO_FN(TIOC1B_C),
+ GPIO_FN(A10), GPIO_FN(ST0_D6), GPIO_FN(LCD_DATA10_A),
+ GPIO_FN(TIOC2A_C),
+ GPIO_FN(A11), GPIO_FN(ST0_D7), GPIO_FN(LCD_DATA11_A),
+ GPIO_FN(TIOC2B_C),
+ GPIO_FN(A12), GPIO_FN(LCD_DATA12_A), GPIO_FN(TIOC3A_C),
+ GPIO_FN(A13), GPIO_FN(LCD_DATA13_A), GPIO_FN(TIOC3B_C),
+ GPIO_FN(A14), GPIO_FN(LCD_DATA14_A), GPIO_FN(TIOC3C_C),
+ GPIO_FN(A15), GPIO_FN(ST0_VCO_CLKIN), GPIO_FN(LCD_DATA15_A),
+ GPIO_FN(TIOC3D_C),
+
+ /* IPSR1 */
+ GPIO_FN(A16), GPIO_FN(ST0_PWM), GPIO_FN(LCD_DON_A),
+ GPIO_FN(TIOC4A_C),
+ GPIO_FN(A17), GPIO_FN(ST1_VCO_CLKIN), GPIO_FN(LCD_CL1_A),
+ GPIO_FN(TIOC4B_C),
+ GPIO_FN(A18), GPIO_FN(ST1_PWM), GPIO_FN(LCD_CL2_A),
+ GPIO_FN(TIOC4C_C),
+ GPIO_FN(A19), GPIO_FN(ST1_CLKIN), GPIO_FN(LCD_CLK_A),
+ GPIO_FN(TIOC4D_C),
+ GPIO_FN(A20), GPIO_FN(ST1_REQ), GPIO_FN(LCD_FLM_A),
+ GPIO_FN(A21), GPIO_FN(ST1_SYC), GPIO_FN(LCD_VCPWC_A),
+ GPIO_FN(A22), GPIO_FN(ST1_VLD), GPIO_FN(LCD_VEPWC_A),
+ GPIO_FN(A23), GPIO_FN(ST1_D0), GPIO_FN(LCD_M_DISP_A),
+ GPIO_FN(A24), GPIO_FN(RX2_D), GPIO_FN(ST1_D1),
+ GPIO_FN(A25), GPIO_FN(TX2_D), GPIO_FN(ST1_D2),
+ GPIO_FN(D0), GPIO_FN(SD0_DAT0_A), GPIO_FN(MMC_D0_A),
+ GPIO_FN(ST1_D3), GPIO_FN(FD0_A),
+ GPIO_FN(D1), GPIO_FN(SD0_DAT1_A), GPIO_FN(MMC_D1_A),
+ GPIO_FN(ST1_D4), GPIO_FN(FD1_A),
+ GPIO_FN(D2), GPIO_FN(SD0_DAT2_A), GPIO_FN(MMC_D2_A),
+ GPIO_FN(ST1_D5), GPIO_FN(FD2_A),
+ GPIO_FN(D3), GPIO_FN(SD0_DAT3_A), GPIO_FN(MMC_D3_A),
+ GPIO_FN(ST1_D6), GPIO_FN(FD3_A),
+
+ /* IPSR2 */
+ GPIO_FN(D4), GPIO_FN(SD0_CD_A), GPIO_FN(MMC_D4_A), GPIO_FN(ST1_D7),
+ GPIO_FN(FD4_A),
+ GPIO_FN(D5), GPIO_FN(SD0_WP_A), GPIO_FN(MMC_D5_A), GPIO_FN(FD5_A),
+ GPIO_FN(D6), GPIO_FN(RSPI_RSPCK_A), GPIO_FN(MMC_D6_A),
+ GPIO_FN(QSPCLK_A),
+ GPIO_FN(FD6_A),
+ GPIO_FN(D7), GPIO_FN(RSPI_SSL_A), GPIO_FN(MMC_D7_A), GPIO_FN(QSSL_A),
+ GPIO_FN(FD7_A),
+ GPIO_FN(D8), GPIO_FN(SD0_CLK_A), GPIO_FN(MMC_CLK_A), GPIO_FN(QIO2_A),
+ GPIO_FN(FCE_A), GPIO_FN(ET0_GTX_CLK_B),
+ GPIO_FN(D9), GPIO_FN(SD0_CMD_A), GPIO_FN(MMC_CMD_A), GPIO_FN(QIO3_A),
+ GPIO_FN(FCLE_A), GPIO_FN(ET0_ETXD1_B),
+ GPIO_FN(D10), GPIO_FN(RSPI_MOSI_A), GPIO_FN(QMO_QIO0_A),
+ GPIO_FN(FALE_A), GPIO_FN(ET0_ETXD2_B),
+ GPIO_FN(D11), GPIO_FN(RSPI_MISO_A), GPIO_FN(QMI_QIO1_A), GPIO_FN(FRE_A),
+ GPIO_FN(ET0_ETXD3_B),
+ GPIO_FN(D12), GPIO_FN(FWE_A), GPIO_FN(ET0_ETXD5_B),
+ GPIO_FN(D13), GPIO_FN(RX2_B), GPIO_FN(FRB_A), GPIO_FN(ET0_ETXD6_B),
+ GPIO_FN(D14), GPIO_FN(TX2_B), GPIO_FN(FSE_A), GPIO_FN(ET0_TX_CLK_B),
+
+ /* IPSR3 */
+ GPIO_FN(D15), GPIO_FN(SCK2_B),
+ GPIO_FN(CS1_A26), GPIO_FN(QIO3_B),
+ GPIO_FN(EX_CS1), GPIO_FN(RX3_B), GPIO_FN(ATACS0), GPIO_FN(QIO2_B),
+ GPIO_FN(ET0_ETXD0),
+ GPIO_FN(EX_CS2), GPIO_FN(TX3_B), GPIO_FN(ATACS1), GPIO_FN(QSPCLK_B),
+ GPIO_FN(ET0_GTX_CLK_A),
+ GPIO_FN(EX_CS3), GPIO_FN(SD1_CD_A), GPIO_FN(ATARD), GPIO_FN(QMO_QIO0_B),
+ GPIO_FN(ET0_ETXD1_A),
+ GPIO_FN(EX_CS4), GPIO_FN(SD1_WP_A), GPIO_FN(ATAWR), GPIO_FN(QMI_QIO1_B),
+ GPIO_FN(ET0_ETXD2_A),
+ GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
+ GPIO_FN(ET0_ETXD3_A),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
+ GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
+ GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
+ GPIO_FN(EX_WAIT2), GPIO_FN(SD1_DAT1_A), GPIO_FN(DACK2),
+ GPIO_FN(CAN1_RX_C), GPIO_FN(ET0_MAGIC_C), GPIO_FN(ET0_ETXD6_A),
+ GPIO_FN(DRACK0), GPIO_FN(SD1_DAT2_A), GPIO_FN(ATAG), GPIO_FN(TCLK1_A),
+ GPIO_FN(ET0_ETXD7),
+
+ /* IPSR4 */
+ GPIO_FN(HCTS0_A), GPIO_FN(CTS1_A), GPIO_FN(VI0_FIELD),
+ GPIO_FN(RMII0_RXD1_A), GPIO_FN(ET0_ERXD7),
+ GPIO_FN(HRTS0_A), GPIO_FN(RTS1_A), GPIO_FN(VI0_HSYNC),
+ GPIO_FN(RMII0_TXD_EN_A), GPIO_FN(ET0_RX_DV),
+ GPIO_FN(HSCK0_A), GPIO_FN(SCK1_A), GPIO_FN(VI0_VSYNC),
+ GPIO_FN(RMII0_RX_ER_A), GPIO_FN(ET0_RX_ER),
+ GPIO_FN(HRX0_A), GPIO_FN(RX1_A), GPIO_FN(VI0_DATA0_VI0_B0),
+ GPIO_FN(RMII0_CRS_DV_A), GPIO_FN(ET0_CRS),
+ GPIO_FN(HTX0_A), GPIO_FN(TX1_A), GPIO_FN(VI0_DATA1_VI0_B1),
+ GPIO_FN(RMII0_MDC_A), GPIO_FN(ET0_COL),
+ GPIO_FN(CTS0_B), GPIO_FN(VI0_DATA2_VI0_B2), GPIO_FN(RMII0_MDIO_A),
+ GPIO_FN(ET0_MDC),
+ GPIO_FN(RTS0_B), GPIO_FN(VI0_DATA3_VI0_B3), GPIO_FN(ET0_MDIO_A),
+ GPIO_FN(SCK1_B), GPIO_FN(VI0_DATA4_VI0_B4), GPIO_FN(ET0_LINK_A),
+ GPIO_FN(RX1_B), GPIO_FN(VI0_DATA5_VI0_B5), GPIO_FN(ET0_MAGIC_A),
+ GPIO_FN(TX1_B), GPIO_FN(VI0_DATA6_VI0_G0), GPIO_FN(ET0_PHY_INT_A),
+ GPIO_FN(CTS1_B), GPIO_FN(VI0_DATA7_VI0_G1),
+ GPIO_FN(RTS1_B), GPIO_FN(VI0_G2),
+ GPIO_FN(SCK2_A), GPIO_FN(VI0_G3),
+
+ /* IPSR5 */
+ GPIO_FN(REF50CK), GPIO_FN(CTS1_E), GPIO_FN(HCTS0_D),
+ GPIO_FN(REF125CK), GPIO_FN(ADTRG), GPIO_FN(RX5_C),
+ GPIO_FN(SD2_WP_A), GPIO_FN(TX5_A), GPIO_FN(VI0_R5),
+ GPIO_FN(SD2_CD_A), GPIO_FN(RX5_A), GPIO_FN(VI0_R4),
+ GPIO_FN(ET0_PHY_INT_B),
+ GPIO_FN(SD2_DAT3_A), GPIO_FN(TX4_A), GPIO_FN(VI0_R3),
+ GPIO_FN(ET0_MAGIC_B),
+ GPIO_FN(SD2_DAT2_A), GPIO_FN(RX4_A), GPIO_FN(VI0_R2),
+ GPIO_FN(ET0_LINK_B),
+ GPIO_FN(SD2_DAT1_A), GPIO_FN(TX3_A), GPIO_FN(VI0_R1),
+ GPIO_FN(ET0_MDIO_B),
+ GPIO_FN(SD2_DAT0_A), GPIO_FN(RX3_A), GPIO_FN(VI0_R0),
+ GPIO_FN(ET0_ERXD3_B),
+ GPIO_FN(SD2_CMD_A), GPIO_FN(TX2_A), GPIO_FN(VI0_G5),
+ GPIO_FN(ET0_ERXD2_B),
+ GPIO_FN(SD2_CLK_A), GPIO_FN(RX2_A), GPIO_FN(VI0_G4),
+ GPIO_FN(ET0_RX_CLK_B),
+
+ /* IPSR6 */
+ GPIO_FN(DU0_DG1), GPIO_FN(CTS1_C), GPIO_FN(HRTS0_D),
+ GPIO_FN(TIOC1B_A), GPIO_FN(HIFD09),
+ GPIO_FN(DU0_DG0), GPIO_FN(TX1_C), GPIO_FN(HSCK0_D),
+ GPIO_FN(IECLK_A), GPIO_FN(TIOC1A_A), GPIO_FN(HIFD08),
+ GPIO_FN(DU0_DR7), GPIO_FN(RX1_C), GPIO_FN(TIOC0D_A),
+ GPIO_FN(HIFD07),
+ GPIO_FN(DU0_DR6), GPIO_FN(SCK1_C), GPIO_FN(TIOC0C_A),
+ GPIO_FN(HIFD06),
+ GPIO_FN(DU0_DR5), GPIO_FN(RTS0_C), GPIO_FN(TIOC0B_A),
+ GPIO_FN(HIFD05),
+ GPIO_FN(DU0_DR4), GPIO_FN(CTS0_C), GPIO_FN(TIOC0A_A),
+ GPIO_FN(HIFD04),
+ GPIO_FN(DU0_DR3), GPIO_FN(TX0_B), GPIO_FN(TCLKD_A), GPIO_FN(HIFD03),
+ GPIO_FN(DU0_DR2), GPIO_FN(RX0_B), GPIO_FN(TCLKC_A), GPIO_FN(HIFD02),
+ GPIO_FN(DU0_DR1), GPIO_FN(SCK0_B), GPIO_FN(HTX0_D),
+ GPIO_FN(IERX_A), GPIO_FN(TCLKB_A), GPIO_FN(HIFD01),
+ GPIO_FN(DU0_DR0), GPIO_FN(SCIF_CLK_B), GPIO_FN(HRX0_D),
+ GPIO_FN(IETX_A), GPIO_FN(TCLKA_A), GPIO_FN(HIFD00),
+
+ /* IPSR7 */
+ GPIO_FN(DU0_DB4), GPIO_FN(HIFINT),
+ GPIO_FN(DU0_DB3), GPIO_FN(TX5_B), GPIO_FN(TIOC4D_A), GPIO_FN(HIFRD),
+ GPIO_FN(DU0_DB2), GPIO_FN(RX5_B), GPIO_FN(RMII0_TXD1_B),
+ GPIO_FN(TIOC4C_A), GPIO_FN(HIFWR),
+ GPIO_FN(DU0_DB1), GPIO_FN(TX4_C), GPIO_FN(RMII0_TXD0_B),
+ GPIO_FN(TIOC4B_A), GPIO_FN(HIFRS),
+ GPIO_FN(DU0_DB0), GPIO_FN(RX4_C), GPIO_FN(RMII0_TXD_EN_B),
+ GPIO_FN(TIOC4A_A), GPIO_FN(HIFCS),
+ GPIO_FN(DU0_DG7), GPIO_FN(TX3_C), GPIO_FN(RMII0_RXD1_B),
+ GPIO_FN(TIOC3D_A), GPIO_FN(HIFD15),
+ GPIO_FN(DU0_DG6), GPIO_FN(RX3_C), GPIO_FN(RMII0_RXD0_B),
+ GPIO_FN(TIOC3C_A), GPIO_FN(HIFD14),
+ GPIO_FN(DU0_DG5), GPIO_FN(TX2_C), GPIO_FN(RMII0_RX_ER_B),
+ GPIO_FN(TIOC3B_A), GPIO_FN(HIFD13),
+ GPIO_FN(DU0_DG4), GPIO_FN(RX2_C), GPIO_FN(RMII0_CRS_DV_B),
+ GPIO_FN(TIOC3A_A), GPIO_FN(HIFD12),
+ GPIO_FN(DU0_DG3), GPIO_FN(SCK2_C), GPIO_FN(RMII0_MDIO_B),
+ GPIO_FN(TIOC2B_A), GPIO_FN(HIFD11),
+ GPIO_FN(DU0_DG2), GPIO_FN(RTS1_C), GPIO_FN(RMII0_MDC_B),
+ GPIO_FN(TIOC2A_A), GPIO_FN(HIFD10),
+
+ /* IPSR8 */
+ GPIO_FN(IRQ3_A), GPIO_FN(RTS0_A), GPIO_FN(HRTS0_B),
+ GPIO_FN(ET0_ERXD3_A),
+ GPIO_FN(IRQ2_A), GPIO_FN(CTS0_A), GPIO_FN(HCTS0_B),
+ GPIO_FN(ET0_ERXD2_A),
+ GPIO_FN(IRQ1_A), GPIO_FN(HSPI_RX_B), GPIO_FN(TX3_E),
+ GPIO_FN(ET0_ERXD1),
+ GPIO_FN(IRQ0_A), GPIO_FN(HSPI_TX_B), GPIO_FN(RX3_E),
+ GPIO_FN(ET0_ERXD0),
+ GPIO_FN(DU0_CDE), GPIO_FN(HTX0_B), GPIO_FN(AUDIO_CLKB_B),
+ GPIO_FN(LCD_VCPWC_B),
+ GPIO_FN(DU0_DISP), GPIO_FN(CAN0_TX_B), GPIO_FN(HRX0_B),
+ GPIO_FN(AUDIO_CLKA_B),
+ GPIO_FN(DU0_EXODDF_DU0_ODDF), GPIO_FN(CAN0_RX_B), GPIO_FN(HSCK0_B),
+ GPIO_FN(SSI_SDATA1_B),
+ GPIO_FN(DU0_EXVSYNC_DU0_VSYNC), GPIO_FN(HSPI_RX0_C),
+ GPIO_FN(SSI_WS1_B),
+ GPIO_FN(DU0_EXHSYNC_DU0_HSYNC), GPIO_FN(HSPI_TX0_C),
+ GPIO_FN(SSI_SCK1_B),
+ GPIO_FN(DU0_DOTCLKOUT), GPIO_FN(HSPI_CLK0_C),
+ GPIO_FN(SSI_SDATA0_B),
+ GPIO_FN(DU0_DOTCLKIN), GPIO_FN(HSPI_CS0_C),
+ GPIO_FN(SSI_WS0_B),
+ GPIO_FN(DU0_DB7), GPIO_FN(SSI_SCK0_B), GPIO_FN(HIFEBL_B),
+ GPIO_FN(DU0_DB6), GPIO_FN(HIFRDY),
+ GPIO_FN(DU0_DB5), GPIO_FN(HIFDREQ),
+
+ /* IPSR9 */
+ GPIO_FN(SSI_SDATA1_A), GPIO_FN(VI1_3_B), GPIO_FN(LCD_DATA14_B),
+ GPIO_FN(SSI_WS1_A), GPIO_FN(VI1_2_B), GPIO_FN(LCD_DATA13_B),
+ GPIO_FN(SSI_SCK1_A), GPIO_FN(VI1_1_B), GPIO_FN(TIOC2B_B),
+ GPIO_FN(LCD_DATA12_B),
+ GPIO_FN(SSI_SDATA0_A), GPIO_FN(VI1_0_B), GPIO_FN(TIOC2A_B),
+ GPIO_FN(LCD_DATA11_B),
+ GPIO_FN(SSI_WS0_A), GPIO_FN(TIOC1B_B), GPIO_FN(LCD_DATA10_B),
+ GPIO_FN(SSI_SCK0_A), GPIO_FN(TIOC1A_B), GPIO_FN(LCD_DATA9_B),
+ GPIO_FN(VI1_7_A), GPIO_FN(FCE_B), GPIO_FN(LCD_DATA8_B),
+ GPIO_FN(VI1_6_A), GPIO_FN(FD7_B), GPIO_FN(LCD_DATA7_B),
+ GPIO_FN(VI1_5_A), GPIO_FN(FD6_B), GPIO_FN(LCD_DATA6_B),
+ GPIO_FN(VI1_4_A), GPIO_FN(FD5_B), GPIO_FN(LCD_DATA5_B),
+ GPIO_FN(VI1_3_A), GPIO_FN(FD4_B), GPIO_FN(LCD_DATA4_B),
+ GPIO_FN(VI1_2_A), GPIO_FN(FD3_B), GPIO_FN(LCD_DATA3_B),
+ GPIO_FN(VI1_1_A), GPIO_FN(FD2_B), GPIO_FN(LCD_DATA2_B),
+ GPIO_FN(VI1_0_A), GPIO_FN(FD1_B), GPIO_FN(LCD_DATA1_B),
+ GPIO_FN(VI1_CLK_A), GPIO_FN(FD0_B), GPIO_FN(LCD_DATA0_B),
+
+ /* IPSR10 */
+ GPIO_FN(CAN1_TX_A), GPIO_FN(TX5_C), GPIO_FN(MLB_DAT),
+ GPIO_FN(CAN0_RX_A), GPIO_FN(IRQ0_B), GPIO_FN(MLB_SIG),
+ GPIO_FN(CAN1_RX_A), GPIO_FN(IRQ1_B),
+ GPIO_FN(CAN0_TX_A), GPIO_FN(TX4_D), GPIO_FN(MLB_CLK),
+ GPIO_FN(CAN_CLK_A), GPIO_FN(RX4_D),
+ GPIO_FN(AUDIO_CLKOUT), GPIO_FN(TX1_E), GPIO_FN(HRTS0_C),
+ GPIO_FN(FSE_B), GPIO_FN(LCD_M_DISP_B),
+ GPIO_FN(AUDIO_CLKC), GPIO_FN(SCK1_E), GPIO_FN(HCTS0_C),
+ GPIO_FN(FRB_B), GPIO_FN(LCD_VEPWC_B),
+ GPIO_FN(AUDIO_CLKB_A), GPIO_FN(LCD_CLK_B),
+ GPIO_FN(AUDIO_CLKA_A), GPIO_FN(VI1_CLK_B), GPIO_FN(SCK1_D),
+ GPIO_FN(IECLK_B), GPIO_FN(LCD_FLM_B),
+ GPIO_FN(SSI_SDATA3), GPIO_FN(VI1_7_B), GPIO_FN(HTX0_C),
+ GPIO_FN(FWE_B), GPIO_FN(LCD_CL2_B),
+ GPIO_FN(SSI_SDATA2), GPIO_FN(VI1_6_B), GPIO_FN(HRX0_C),
+ GPIO_FN(FRE_B), GPIO_FN(LCD_CL1_B),
+ GPIO_FN(SSI_WS23), GPIO_FN(VI1_5_B), GPIO_FN(TX1_D),
+ GPIO_FN(HSCK0_C), GPIO_FN(FALE_B), GPIO_FN(LCD_DON_B),
+ GPIO_FN(SSI_SCK23), GPIO_FN(VI1_4_B), GPIO_FN(RX1_D),
+ GPIO_FN(FCLE_B), GPIO_FN(LCD_DATA15_B),
+
+ /* IPSR11 */
+ GPIO_FN(PRESETOUT), GPIO_FN(ST_CLKOUT),
+ GPIO_FN(DACK1), GPIO_FN(HSPI_CS_B), GPIO_FN(TX4_B),
+ GPIO_FN(ET0_RX_CLK_A),
+ GPIO_FN(DREQ1), GPIO_FN(HSPI_CLK_B), GPIO_FN(RX4_B),
+ GPIO_FN(ET0_PHY_INT_C), GPIO_FN(ET0_TX_CLK_A),
+ GPIO_FN(DACK0), GPIO_FN(SD1_DAT3_A), GPIO_FN(ET0_TX_ER),
+ GPIO_FN(DREQ0), GPIO_FN(SD1_CLK_A), GPIO_FN(ET0_TX_EN),
+ GPIO_FN(USB_OVC1), GPIO_FN(RX3_D), GPIO_FN(CAN1_RX_B),
+ GPIO_FN(RX5_D), GPIO_FN(IERX_B),
+ GPIO_FN(PENC1), GPIO_FN(TX3_D), GPIO_FN(CAN1_TX_B),
+ GPIO_FN(TX5_D), GPIO_FN(IETX_B),
+ GPIO_FN(TX0_A), GPIO_FN(HSPI_TX_A),
+ GPIO_FN(RX0_A), GPIO_FN(HSPI_RX_A), GPIO_FN(RMII0_RXD0_A),
+ GPIO_FN(ET0_ERXD6),
+ GPIO_FN(SCK0_A), GPIO_FN(HSPI_CS_A), GPIO_FN(VI0_CLKENB),
+ GPIO_FN(RMII0_TXD1_A), GPIO_FN(ET0_ERXD5),
+ GPIO_FN(SCIF_CLK_A), GPIO_FN(HSPI_CLK_A), GPIO_FN(VI0_CLK),
+ GPIO_FN(RMII0_TXD0_A), GPIO_FN(ET0_ERXD4),
+ GPIO_FN(SDSELF), GPIO_FN(RTS1_E),
+ GPIO_FN(SDA0), GPIO_FN(HIFEBL_A),
+ GPIO_FN(SDA1), GPIO_FN(RX1_E),
+ GPIO_FN(SCL1), GPIO_FN(SCIF_CLK_C),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xFFFC0004, 32, 1) {
+ GP_0_31_FN, FN_IP2_2_0,
+ GP_0_30_FN, FN_IP1_31_29,
+ GP_0_29_FN, FN_IP1_28_26,
+ GP_0_28_FN, FN_IP1_25_23,
+ GP_0_27_FN, FN_IP1_22_20,
+ GP_0_26_FN, FN_IP1_19_18,
+ GP_0_25_FN, FN_IP1_17_16,
+ GP_0_24_FN, FN_IP0_5_4,
+ GP_0_23_FN, FN_IP0_3_2,
+ GP_0_22_FN, FN_IP0_1_0,
+ GP_0_21_FN, FN_IP11_28,
+ GP_0_20_FN, FN_IP1_7_6,
+ GP_0_19_FN, FN_IP1_5_4,
+ GP_0_18_FN, FN_IP1_3_2,
+ GP_0_17_FN, FN_IP1_1_0,
+ GP_0_16_FN, FN_IP0_31_30,
+ GP_0_15_FN, FN_IP0_29_28,
+ GP_0_14_FN, FN_IP0_27_26,
+ GP_0_13_FN, FN_IP0_25_24,
+ GP_0_12_FN, FN_IP0_23_22,
+ GP_0_11_FN, FN_IP0_21_20,
+ GP_0_10_FN, FN_IP0_19_18,
+ GP_0_9_FN, FN_IP0_17_16,
+ GP_0_8_FN, FN_IP0_15_14,
+ GP_0_7_FN, FN_IP0_13_12,
+ GP_0_6_FN, FN_IP0_11_10,
+ GP_0_5_FN, FN_IP0_9_8,
+ GP_0_4_FN, FN_IP0_7_6,
+ GP_0_3_FN, FN_IP1_15_14,
+ GP_0_2_FN, FN_IP1_13_12,
+ GP_0_1_FN, FN_IP1_11_10,
+ GP_0_0_FN, FN_IP1_9_8 }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xFFFC0008, 32, 1) {
+ GP_1_31_FN, FN_IP11_25_23,
+ GP_1_30_FN, FN_IP2_13_11,
+ GP_1_29_FN, FN_IP2_10_8,
+ GP_1_28_FN, FN_IP2_7_5,
+ GP_1_27_FN, FN_IP3_26_24,
+ GP_1_26_FN, FN_IP3_23_21,
+ GP_1_25_FN, FN_IP2_4_3,
+ GP_1_24_FN, FN_WE1,
+ GP_1_23_FN, FN_WE0,
+ GP_1_22_FN, FN_IP3_19_18,
+ GP_1_21_FN, FN_RD,
+ GP_1_20_FN, FN_IP3_17_15,
+ GP_1_19_FN, FN_IP3_14_12,
+ GP_1_18_FN, FN_IP3_11_9,
+ GP_1_17_FN, FN_IP3_8_6,
+ GP_1_16_FN, FN_IP3_5_3,
+ GP_1_15_FN, FN_EX_CS0,
+ GP_1_14_FN, FN_IP3_2,
+ GP_1_13_FN, FN_CS0,
+ GP_1_12_FN, FN_BS,
+ GP_1_11_FN, FN_CLKOUT,
+ GP_1_10_FN, FN_IP3_1_0,
+ GP_1_9_FN, FN_IP2_30_28,
+ GP_1_8_FN, FN_IP2_27_25,
+ GP_1_7_FN, FN_IP2_24_23,
+ GP_1_6_FN, FN_IP2_22_20,
+ GP_1_5_FN, FN_IP2_19_17,
+ GP_1_4_FN, FN_IP2_16_14,
+ GP_1_3_FN, FN_IP11_22_21,
+ GP_1_2_FN, FN_IP11_20_19,
+ GP_1_1_FN, FN_IP3_29_27,
+ GP_1_0_FN, FN_IP3_20 }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xFFFC000C, 32, 1) {
+ GP_2_31_FN, FN_IP4_31_30,
+ GP_2_30_FN, FN_IP5_2_0,
+ GP_2_29_FN, FN_IP5_5_3,
+ GP_2_28_FN, FN_IP5_8_6,
+ GP_2_27_FN, FN_IP5_11_9,
+ GP_2_26_FN, FN_IP5_14_12,
+ GP_2_25_FN, FN_IP5_17_15,
+ GP_2_24_FN, FN_IP5_20_18,
+ GP_2_23_FN, FN_IP5_22_21,
+ GP_2_22_FN, FN_IP5_24_23,
+ GP_2_21_FN, FN_IP5_26_25,
+ GP_2_20_FN, FN_IP4_29_28,
+ GP_2_19_FN, FN_IP4_27_26,
+ GP_2_18_FN, FN_IP4_25_24,
+ GP_2_17_FN, FN_IP4_23_22,
+ GP_2_16_FN, FN_IP4_21_20,
+ GP_2_15_FN, FN_IP4_19_18,
+ GP_2_14_FN, FN_IP4_17_15,
+ GP_2_13_FN, FN_IP4_14_12,
+ GP_2_12_FN, FN_IP4_11_9,
+ GP_2_11_FN, FN_IP4_8_6,
+ GP_2_10_FN, FN_IP4_5_3,
+ GP_2_9_FN, FN_IP8_27_26,
+ GP_2_8_FN, FN_IP11_12,
+ GP_2_7_FN, FN_IP8_25_23,
+ GP_2_6_FN, FN_IP8_22_20,
+ GP_2_5_FN, FN_IP11_27_26,
+ GP_2_4_FN, FN_IP8_29_28,
+ GP_2_3_FN, FN_IP4_2_0,
+ GP_2_2_FN, FN_IP11_11_10,
+ GP_2_1_FN, FN_IP11_9_7,
+ GP_2_0_FN, FN_IP11_6_4 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xFFFC0010, 32, 1) {
+ GP_3_31_FN, FN_IP9_1_0,
+ GP_3_30_FN, FN_IP8_19_18,
+ GP_3_29_FN, FN_IP8_17_16,
+ GP_3_28_FN, FN_IP8_15_14,
+ GP_3_27_FN, FN_IP8_13_12,
+ GP_3_26_FN, FN_IP8_11_10,
+ GP_3_25_FN, FN_IP8_9_8,
+ GP_3_24_FN, FN_IP8_7_6,
+ GP_3_23_FN, FN_IP8_5_4,
+ GP_3_22_FN, FN_IP8_3_2,
+ GP_3_21_FN, FN_IP8_1_0,
+ GP_3_20_FN, FN_IP7_30_29,
+ GP_3_19_FN, FN_IP7_28_27,
+ GP_3_18_FN, FN_IP7_26_24,
+ GP_3_17_FN, FN_IP7_23_21,
+ GP_3_16_FN, FN_IP7_20_18,
+ GP_3_15_FN, FN_IP7_17_15,
+ GP_3_14_FN, FN_IP7_14_12,
+ GP_3_13_FN, FN_IP7_11_9,
+ GP_3_12_FN, FN_IP7_8_6,
+ GP_3_11_FN, FN_IP7_5_3,
+ GP_3_10_FN, FN_IP7_2_0,
+ GP_3_9_FN, FN_IP6_23_21,
+ GP_3_8_FN, FN_IP6_20_18,
+ GP_3_7_FN, FN_IP6_17_16,
+ GP_3_6_FN, FN_IP6_15_14,
+ GP_3_5_FN, FN_IP6_13_12,
+ GP_3_4_FN, FN_IP6_11_10,
+ GP_3_3_FN, FN_IP6_9_8,
+ GP_3_2_FN, FN_IP6_7_6,
+ GP_3_1_FN, FN_IP6_5_3,
+ GP_3_0_FN, FN_IP6_2_0 }
+ },
+
+ { PINMUX_CFG_REG("GPSR4", 0xFFFC0014, 32, 1) {
+ GP_4_31_FN, FN_IP10_24_23,
+ GP_4_30_FN, FN_IP10_22,
+ GP_4_29_FN, FN_IP11_18_16,
+ GP_4_28_FN, FN_USB_OVC0,
+ GP_4_27_FN, FN_IP11_15_13,
+ GP_4_26_FN, FN_PENC0,
+ GP_4_25_FN, FN_IP11_2,
+ GP_4_24_FN, FN_SCL0,
+ GP_4_23_FN, FN_IP11_1,
+ GP_4_22_FN, FN_IP11_0,
+ GP_4_21_FN, FN_IP10_21_19,
+ GP_4_20_FN, FN_IP10_18_16,
+ GP_4_19_FN, FN_IP10_15,
+ GP_4_18_FN, FN_IP10_14_12,
+ GP_4_17_FN, FN_IP10_11_9,
+ GP_4_16_FN, FN_IP10_8_6,
+ GP_4_15_FN, FN_IP10_5_3,
+ GP_4_14_FN, FN_IP10_2_0,
+ GP_4_13_FN, FN_IP9_29_28,
+ GP_4_12_FN, FN_IP9_27_26,
+ GP_4_11_FN, FN_IP9_9_8,
+ GP_4_10_FN, FN_IP9_7_6,
+ GP_4_9_FN, FN_IP9_5_4,
+ GP_4_8_FN, FN_IP9_3_2,
+ GP_4_7_FN, FN_IP9_17_16,
+ GP_4_6_FN, FN_IP9_15_14,
+ GP_4_5_FN, FN_IP9_13_12,
+ GP_4_4_FN, FN_IP9_11_10,
+ GP_4_3_FN, FN_IP9_25_24,
+ GP_4_2_FN, FN_IP9_23_22,
+ GP_4_1_FN, FN_IP9_21_20,
+ GP_4_0_FN, FN_IP9_19_18 }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 28 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 27 - 24 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 20 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 19 - 16 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ GP_5_11_FN, FN_IP10_29_28,
+ GP_5_10_FN, FN_IP10_27_26,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 9 - 6 */
+ 0, 0, 0, 0, /* 5, 4 */
+ GP_5_3_FN, FN_IRQ3_B,
+ GP_5_2_FN, FN_IRQ2_B,
+ GP_5_1_FN, FN_IP11_3,
+ GP_5_0_FN, FN_IP10_25 }
+ },
+
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xFFFC001C, 32,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP0_31_30 [2] */
+ FN_A15, FN_ST0_VCO_CLKIN, FN_LCD_DATA15_A,
+ FN_TIOC3D_C,
+ /* IP0_29_28 [2] */
+ FN_A14, FN_LCD_DATA14_A, FN_TIOC3C_C, 0,
+ /* IP0_27_26 [2] */
+ FN_A13, FN_LCD_DATA13_A, FN_TIOC3B_C, 0,
+ /* IP0_25_24 [2] */
+ FN_A12, FN_LCD_DATA12_A, FN_TIOC3A_C, 0,
+ /* IP0_23_22 [2] */
+ FN_A11, FN_ST0_D7, FN_LCD_DATA11_A, FN_TIOC2B_C,
+ /* IP0_21_20 [2] */
+ FN_A10, FN_ST0_D6, FN_LCD_DATA10_A, FN_TIOC2A_C,
+ /* IP0_19_18 [2] */
+ FN_A9, FN_ST0_D5, FN_LCD_DATA9_A, FN_TIOC1B_C,
+ /* IP0_17_16 [2] */
+ FN_A8, FN_ST0_D4, FN_LCD_DATA8_A, FN_TIOC1A_C,
+ /* IP0_15_14 [2] */
+ FN_A7, FN_ST0_D3, FN_LCD_DATA7_A, FN_TIOC0D_C,
+ /* IP0_13_12 [2] */
+ FN_A6, FN_ST0_D2, FN_LCD_DATA6_A, FN_TIOC0C_C,
+ /* IP0_11_10 [2] */
+ FN_A5, FN_ST0_D1, FN_LCD_DATA5_A, FN_TIOC0B_C,
+ /* IP0_9_8 [2] */
+ FN_A4, FN_ST0_D0, FN_LCD_DATA4_A, FN_TIOC0A_C,
+ /* IP0_7_6 [2] */
+ FN_A3, FN_ST0_VLD, FN_LCD_DATA3_A, FN_TCLKD_C,
+ /* IP0_5_4 [2] */
+ FN_A2, FN_ST0_SYC, FN_LCD_DATA2_A, FN_TCLKC_C,
+ /* IP0_3_2 [2] */
+ FN_A1, FN_ST0_REQ, FN_LCD_DATA1_A, FN_TCLKB_C,
+ /* IP0_1_0 [2] */
+ FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xFFFC0020, 32,
+ 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP1_31_29 [3] */
+ FN_D3, FN_SD0_DAT3_A, FN_MMC_D3_A, FN_ST1_D6,
+ FN_FD3_A, 0, 0, 0,
+ /* IP1_28_26 [3] */
+ FN_D2, FN_SD0_DAT2_A, FN_MMC_D2_A, FN_ST1_D5,
+ FN_FD2_A, 0, 0, 0,
+ /* IP1_25_23 [3] */
+ FN_D1, FN_SD0_DAT1_A, FN_MMC_D1_A, FN_ST1_D4,
+ FN_FD1_A, 0, 0, 0,
+ /* IP1_22_20 [3] */
+ FN_D0, FN_SD0_DAT0_A, FN_MMC_D0_A, FN_ST1_D3,
+ FN_FD0_A, 0, 0, 0,
+ /* IP1_19_18 [2] */
+ FN_A25, FN_TX2_D, FN_ST1_D2, 0,
+ /* IP1_17_16 [2] */
+ FN_A24, FN_RX2_D, FN_ST1_D1, 0,
+ /* IP1_15_14 [2] */
+ FN_A23, FN_ST1_D0, FN_LCD_M_DISP_A, 0,
+ /* IP1_13_12 [2] */
+ FN_A22, FN_ST1_VLD, FN_LCD_VEPWC_A, 0,
+ /* IP1_11_10 [2] */
+ FN_A21, FN_ST1_SYC, FN_LCD_VCPWC_A, 0,
+ /* IP1_9_8 [2] */
+ FN_A20, FN_ST1_REQ, FN_LCD_FLM_A, 0,
+ /* IP1_7_6 [2] */
+ FN_A19, FN_ST1_CLKIN, FN_LCD_CLK_A, FN_TIOC4D_C,
+ /* IP1_5_4 [2] */
+ FN_A18, FN_ST1_PWM, FN_LCD_CL2_A, FN_TIOC4C_C,
+ /* IP1_3_2 [2] */
+ FN_A17, FN_ST1_VCO_CLKIN, FN_LCD_CL1_A, FN_TIOC4B_C,
+ /* IP1_1_0 [2] */
+ FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xFFFC0024, 32,
+ 1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3) {
+ /* IP2_31 [1] */
+ 0, 0,
+ /* IP2_30_28 [3] */
+ FN_D14, FN_TX2_B, 0, FN_FSE_A,
+ FN_ET0_TX_CLK_B, 0, 0, 0,
+ /* IP2_27_25 [3] */
+ FN_D13, FN_RX2_B, 0, FN_FRB_A,
+ FN_ET0_ETXD6_B, 0, 0, 0,
+ /* IP2_24_23 [2] */
+ FN_D12, 0, FN_FWE_A, FN_ET0_ETXD5_B,
+ /* IP2_22_20 [3] */
+ FN_D11, FN_RSPI_MISO_A, 0, FN_QMI_QIO1_A,
+ FN_FRE_A, FN_ET0_ETXD3_B, 0, 0,
+ /* IP2_19_17 [3] */
+ FN_D10, FN_RSPI_MOSI_A, 0, FN_QMO_QIO0_A,
+ FN_FALE_A, FN_ET0_ETXD2_B, 0, 0,
+ /* IP2_16_14 [3] */
+ FN_D9, FN_SD0_CMD_A, FN_MMC_CMD_A, FN_QIO3_A,
+ FN_FCLE_A, FN_ET0_ETXD1_B, 0, 0,
+ /* IP2_13_11 [3] */
+ FN_D8, FN_SD0_CLK_A, FN_MMC_CLK_A, FN_QIO2_A,
+ FN_FCE_A, FN_ET0_GTX_CLK_B, 0, 0,
+ /* IP2_10_8 [3] */
+ FN_D7, FN_RSPI_SSL_A, FN_MMC_D7_A, FN_QSSL_A,
+ FN_FD7_A, 0, 0, 0,
+ /* IP2_7_5 [3] */
+ FN_D6, FN_RSPI_RSPCK_A, FN_MMC_D6_A, FN_QSPCLK_A,
+ FN_FD6_A, 0, 0, 0,
+ /* IP2_4_3 [2] */
+ FN_D5, FN_SD0_WP_A, FN_MMC_D5_A, FN_FD5_A,
+ /* IP2_2_0 [3] */
+ FN_D4, FN_SD0_CD_A, FN_MMC_D4_A, FN_ST1_D7,
+ FN_FD4_A, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xFFFC0028, 32,
+ 2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2) {
+ /* IP3_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP3_29_27 [3] */
+ FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A,
+ FN_ET0_ETXD7, 0, 0, 0,
+ /* IP3_26_24 [3] */
+ FN_EX_WAIT2, FN_SD1_DAT1_A, FN_DACK2, FN_CAN1_RX_C,
+ FN_ET0_MAGIC_C, FN_ET0_ETXD6_A, 0, 0,
+ /* IP3_23_21 [3] */
+ FN_EX_WAIT1, FN_SD1_DAT0_A, FN_DREQ2, FN_CAN1_TX_C,
+ FN_ET0_LINK_C, FN_ET0_ETXD5_A, 0, 0,
+ /* IP3_20 [1] */
+ FN_EX_WAIT0, FN_TCLK1_B,
+ /* IP3_19_18 [2] */
+ FN_RD_WR, FN_TCLK1_B, 0, 0,
+ /* IP3_17_15 [3] */
+ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
+ FN_ET0_ETXD3_A, 0, 0, 0,
+ /* IP3_14_12 [3] */
+ FN_EX_CS4, FN_SD1_WP_A, FN_ATAWR, FN_QMI_QIO1_B,
+ FN_ET0_ETXD2_A, 0, 0, 0,
+ /* IP3_11_9 [3] */
+ FN_EX_CS3, FN_SD1_CD_A, FN_ATARD, FN_QMO_QIO0_B,
+ FN_ET0_ETXD1_A, 0, 0, 0,
+ /* IP3_8_6 [3] */
+ FN_EX_CS2, FN_TX3_B, FN_ATACS1, FN_QSPCLK_B,
+ FN_ET0_GTX_CLK_A, 0, 0, 0,
+ /* IP3_5_3 [3] */
+ FN_EX_CS1, FN_RX3_B, FN_ATACS0, FN_QIO2_B,
+ FN_ET0_ETXD0, 0, 0, 0,
+ /* IP3_2 [1] */
+ FN_CS1_A26, FN_QIO3_B,
+ /* IP3_1_0 [2] */
+ FN_D15, FN_SCK2_B, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xFFFC002C, 32,
+ 2, 2, 2, 2, 2, 2 , 2, 3, 3, 3, 3, 3, 3) {
+ /* IP4_31_30 [2] */
+ 0, FN_SCK2_A, FN_VI0_G3, 0,
+ /* IP4_29_28 [2] */
+ 0, FN_RTS1_B, FN_VI0_G2, 0,
+ /* IP4_27_26 [2] */
+ 0, FN_CTS1_B, FN_VI0_DATA7_VI0_G1, 0,
+ /* IP4_25_24 [2] */
+ 0, FN_TX1_B, FN_VI0_DATA6_VI0_G0, FN_ET0_PHY_INT_A,
+ /* IP4_23_22 [2] */
+ 0, FN_RX1_B, FN_VI0_DATA5_VI0_B5, FN_ET0_MAGIC_A,
+ /* IP4_21_20 [2] */
+ 0, FN_SCK1_B, FN_VI0_DATA4_VI0_B4, FN_ET0_LINK_A,
+ /* IP4_19_18 [2] */
+ 0, FN_RTS0_B, FN_VI0_DATA3_VI0_B3, FN_ET0_MDIO_A,
+ /* IP4_17_15 [3] */
+ 0, FN_CTS0_B, FN_VI0_DATA2_VI0_B2, FN_RMII0_MDIO_A,
+ FN_ET0_MDC, 0, 0, 0,
+ /* IP4_14_12 [3] */
+ FN_HTX0_A, FN_TX1_A, FN_VI0_DATA1_VI0_B1, FN_RMII0_MDC_A,
+ FN_ET0_COL, 0, 0, 0,
+ /* IP4_11_9 [3] */
+ FN_HRX0_A, FN_RX1_A, FN_VI0_DATA0_VI0_B0, FN_RMII0_CRS_DV_A,
+ FN_ET0_CRS, 0, 0, 0,
+ /* IP4_8_6 [3] */
+ FN_HSCK0_A, FN_SCK1_A, FN_VI0_VSYNC, FN_RMII0_RX_ER_A,
+ FN_ET0_RX_ER, 0, 0, 0,
+ /* IP4_5_3 [3] */
+ FN_HRTS0_A, FN_RTS1_A, FN_VI0_HSYNC, FN_RMII0_TXD_EN_A,
+ FN_ET0_RX_DV, 0, 0, 0,
+ /* IP4_2_0 [3] */
+ FN_HCTS0_A, FN_CTS1_A, FN_VI0_FIELD, FN_RMII0_RXD1_A,
+ FN_ET0_ERXD7, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xFFFC0030, 32,
+ 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP5_30 [1] */
+ 0, 0,
+ /* IP5_29 [1] */
+ 0, 0,
+ /* IP5_28 [1] */
+ 0, 0,
+ /* IP5_27 [1] */
+ 0, 0,
+ /* IP5_26_25 [2] */
+ FN_REF50CK, FN_CTS1_E, FN_HCTS0_D, 0,
+ /* IP5_24_23 [2] */
+ FN_REF125CK, FN_ADTRG, FN_RX5_C, 0,
+ /* IP5_22_21 [2] */
+ FN_SD2_WP_A, FN_TX5_A, FN_VI0_R5, 0,
+ /* IP5_20_18 [3] */
+ FN_SD2_CD_A, FN_RX5_A, FN_VI0_R4, 0,
+ 0, 0, 0, FN_ET0_PHY_INT_B,
+ /* IP5_17_15 [3] */
+ FN_SD2_DAT3_A, FN_TX4_A, FN_VI0_R3, 0,
+ 0, 0, 0, FN_ET0_MAGIC_B,
+ /* IP5_14_12 [3] */
+ FN_SD2_DAT2_A, FN_RX4_A, FN_VI0_R2, 0,
+ 0, 0, 0, FN_ET0_LINK_B,
+ /* IP5_11_9 [3] */
+ FN_SD2_DAT1_A, FN_TX3_A, FN_VI0_R1, 0,
+ 0, 0, 0, FN_ET0_MDIO_B,
+ /* IP5_8_6 [3] */
+ FN_SD2_DAT0_A, FN_RX3_A, FN_VI0_R0, 0,
+ 0, 0, 0, FN_ET0_ERXD3_B,
+ /* IP5_5_3 [3] */
+ FN_SD2_CMD_A, FN_TX2_A, FN_VI0_G5, 0,
+ 0, 0, 0, FN_ET0_ERXD2_B,
+ /* IP5_2_0 [3] */
+ FN_SD2_CLK_A, FN_RX2_A, FN_VI0_G4, 0,
+ FN_ET0_RX_CLK_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xFFFC0034, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 3, 2, 2, 2, 2, 2, 2, 3, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP6_30 [1] */
+ 0, 0,
+ /* IP6_29 [1] */
+ 0, 0,
+ /* IP6_28 [1] */
+ 0, 0,
+ /* IP6_27 [1] */
+ 0, 0,
+ /* IP6_26 [1] */
+ 0, 0,
+ /* IP6_25 [1] */
+ 0, 0,
+ /* IP6_24 [1] */
+ 0, 0,
+ /* IP6_23_21 [3] */
+ FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A,
+ FN_HIFD09, 0, 0, 0,
+ /* IP6_20_18 [3] */
+ FN_DU0_DG0, FN_TX1_C, FN_HSCK0_D, FN_IECLK_A,
+ FN_TIOC1A_A, FN_HIFD08, 0, 0,
+ /* IP6_17_16 [2] */
+ FN_DU0_DR7, FN_RX1_C, FN_TIOC0D_A, FN_HIFD07,
+ /* IP6_15_14 [2] */
+ FN_DU0_DR6, FN_SCK1_C, FN_TIOC0C_A, FN_HIFD06,
+ /* IP6_13_12 [2] */
+ FN_DU0_DR5, FN_RTS0_C, FN_TIOC0B_A, FN_HIFD05,
+ /* IP6_11_10 [2] */
+ FN_DU0_DR4, FN_CTS0_C, FN_TIOC0A_A, FN_HIFD04,
+ /* IP6_9_8 [2] */
+ FN_DU0_DR3, FN_TX0_B, FN_TCLKD_A, FN_HIFD03,
+ /* IP6_7_6 [2] */
+ FN_DU0_DR2, FN_RX0_B, FN_TCLKC_A, FN_HIFD02,
+ /* IP6_5_3 [3] */
+ FN_DU0_DR1, FN_SCK0_B, FN_HTX0_D, FN_IERX_A,
+ FN_TCLKB_A, FN_HIFD01, 0, 0,
+ /* IP6_2_0 [3] */
+ FN_DU0_DR0, FN_SCIF_CLK_B, FN_HRX0_D, FN_IETX_A,
+ FN_TCLKA_A, FN_HIFD00, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xFFFC0038, 32,
+ 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP7_31 [1] */
+ 0, 0,
+ /* IP7_30_29 [2] */
+ FN_DU0_DB4, 0, FN_HIFINT, 0,
+ /* IP7_28_27 [2] */
+ FN_DU0_DB3, FN_TX5_B, FN_TIOC4D_A, FN_HIFRD,
+ /* IP7_26_24 [3] */
+ FN_DU0_DB2, FN_RX5_B, FN_RMII0_TXD1_B, FN_TIOC4C_A,
+ FN_HIFWR, 0, 0, 0,
+ /* IP7_23_21 [3] */
+ FN_DU0_DB1, FN_TX4_C, FN_RMII0_TXD0_B, FN_TIOC4B_A,
+ FN_HIFRS, 0, 0, 0,
+ /* IP7_20_18 [3] */
+ FN_DU0_DB0, FN_RX4_C, FN_RMII0_TXD_EN_B, FN_TIOC4A_A,
+ FN_HIFCS, 0, 0, 0,
+ /* IP7_17_15 [3] */
+ FN_DU0_DG7, FN_TX3_C, FN_RMII0_RXD1_B, FN_TIOC3D_A,
+ FN_HIFD15, 0, 0, 0,
+ /* IP7_14_12 [3] */
+ FN_DU0_DG6, FN_RX3_C, FN_RMII0_RXD0_B, FN_TIOC3C_A,
+ FN_HIFD14, 0, 0, 0,
+ /* IP7_11_9 [3] */
+ FN_DU0_DG5, FN_TX2_C, FN_RMII0_RX_ER_B, FN_TIOC3B_A,
+ FN_HIFD13, 0, 0, 0,
+ /* IP7_8_6 [3] */
+ FN_DU0_DG4, FN_RX2_C, FN_RMII0_CRS_DV_B, FN_TIOC3A_A,
+ FN_HIFD12, 0, 0, 0,
+ /* IP7_5_3 [3] */
+ FN_DU0_DG3, FN_SCK2_C, FN_RMII0_MDIO_B, FN_TIOC2B_A,
+ FN_HIFD11, 0, 0, 0,
+ /* IP7_2_0 [3] */
+ FN_DU0_DG2, FN_RTS1_C, FN_RMII0_MDC_B, FN_TIOC2A_A,
+ FN_HIFD10, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xFFFC003C, 32,
+ 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP8_29_28 [2] */
+ FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A,
+ /* IP8_27_26 [2] */
+ FN_IRQ2_A, FN_CTS0_A, FN_HCTS0_B, FN_ET0_ERXD2_A,
+ /* IP8_25_23 [3] */
+ FN_IRQ1_A, 0, FN_HSPI_RX_B, FN_TX3_E,
+ FN_ET0_ERXD1, 0, 0, 0,
+ /* IP8_22_20 [3] */
+ FN_IRQ0_A, 0, FN_HSPI_TX_B, FN_RX3_E,
+ FN_ET0_ERXD0, 0, 0, 0,
+ /* IP8_19_18 [2] */
+ FN_DU0_CDE, FN_HTX0_B, FN_AUDIO_CLKB_B, FN_LCD_VCPWC_B,
+ /* IP8_17_16 [2] */
+ FN_DU0_DISP, FN_CAN0_TX_B, FN_HRX0_B, FN_AUDIO_CLKA_B,
+ /* IP8_15_14 [2] */
+ FN_DU0_EXODDF_DU0_ODDF, FN_CAN0_RX_B, FN_HSCK0_B,
+ FN_SSI_SDATA1_B,
+ /* IP8_13_12 [2] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, 0, FN_HSPI_RX0_C, FN_SSI_WS1_B,
+ /* IP8_11_10 [2] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0, FN_HSPI_TX0_C, FN_SSI_SCK1_B,
+ /* IP8_9_8 [2] */
+ FN_DU0_DOTCLKOUT, 0, FN_HSPI_CLK0_C, FN_SSI_SDATA0_B,
+ /* IP8_7_6 [2] */
+ FN_DU0_DOTCLKIN, 0, FN_HSPI_CS0_C, FN_SSI_WS0_B,
+ /* IP8_5_4 [2] */
+ FN_DU0_DB7, 0, FN_SSI_SCK0_B, FN_HIFEBL_B,
+ /* IP8_3_2 [2] */
+ FN_DU0_DB6, 0, FN_HIFRDY, 0,
+ /* IP8_1_0 [2] */
+ FN_DU0_DB5, 0, FN_HIFDREQ, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xFFFC0040, 32,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP9_29_28 [2] */
+ FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B, 0,
+ /* IP9_27_26 [2] */
+ FN_SSI_WS1_A, FN_VI1_2_B, FN_LCD_DATA13_B, 0,
+ /* IP9_25_24 [2] */
+ FN_SSI_SCK1_A, FN_VI1_1_B, FN_TIOC2B_B, FN_LCD_DATA12_B,
+ /* IP9_23_22 [2] */
+ FN_SSI_SDATA0_A, FN_VI1_0_B, FN_TIOC2A_B, FN_LCD_DATA11_B,
+ /* IP9_21_20 [2] */
+ FN_SSI_WS0_A, FN_TIOC1B_B, FN_LCD_DATA10_B, 0,
+ /* IP9_19_18 [2] */
+ FN_SSI_SCK0_A, FN_TIOC1A_B, FN_LCD_DATA9_B, 0,
+ /* IP9_17_16 [2] */
+ FN_VI1_7_A, FN_FCE_B, FN_LCD_DATA8_B, 0,
+ /* IP9_15_14 [2] */
+ FN_VI1_6_A, 0, FN_FD7_B, FN_LCD_DATA7_B,
+ /* IP9_13_12 [2] */
+ FN_VI1_5_A, 0, FN_FD6_B, FN_LCD_DATA6_B,
+ /* IP9_11_10 [2] */
+ FN_VI1_4_A, 0, FN_FD5_B, FN_LCD_DATA5_B,
+ /* IP9_9_8 [2] */
+ FN_VI1_3_A, 0, FN_FD4_B, FN_LCD_DATA4_B,
+ /* IP9_7_6 [2] */
+ FN_VI1_2_A, 0, FN_FD3_B, FN_LCD_DATA3_B,
+ /* IP9_5_4 [2] */
+ FN_VI1_1_A, 0, FN_FD2_B, FN_LCD_DATA2_B,
+ /* IP9_3_2 [2] */
+ FN_VI1_0_A, 0, FN_FD1_B, FN_LCD_DATA1_B,
+ /* IP9_1_0 [2] */
+ FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xFFFC0044, 32,
+ 2, 2, 2, 1, 2, 1, 3,
+ 3, 1, 3, 3, 3, 3, 3) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP10_29_28 [2] */
+ FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT, 0,
+ /* IP10_27_26 [2] */
+ FN_CAN0_RX_A, FN_IRQ0_B, FN_MLB_SIG, 0,
+ /* IP10_25 [1] */
+ FN_CAN1_RX_A, FN_IRQ1_B,
+ /* IP10_24_23 [2] */
+ FN_CAN0_TX_A, FN_TX4_D, FN_MLB_CLK, 0,
+ /* IP10_22 [1] */
+ FN_CAN_CLK_A, FN_RX4_D,
+ /* IP10_21_19 [3] */
+ FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B,
+ FN_LCD_M_DISP_B, 0, 0, 0,
+ /* IP10_18_16 [3] */
+ FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B,
+ FN_LCD_VEPWC_B, 0, 0, 0,
+ /* IP10_15 [1] */
+ FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
+ /* IP10_14_12 [3] */
+ FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B,
+ FN_LCD_FLM_B, 0, 0, 0,
+ /* IP10_11_9 [3] */
+ FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B,
+ FN_LCD_CL2_B, 0, 0, 0,
+ /* IP10_8_6 [3] */
+ FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B,
+ FN_LCD_CL1_B, 0, 0, 0,
+ /* IP10_5_3 [3] */
+ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B,
+ FN_LCD_DON_B, 0, 0, 0,
+ /* IP10_2_0 [3] */
+ FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B,
+ FN_LCD_DATA15_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
+ 3, 1, 2, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
+ /* IP11_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_28 [1] */
+ FN_PRESETOUT, FN_ST_CLKOUT,
+ /* IP11_27_26 [2] */
+ FN_DACK1, FN_HSPI_CS_B, FN_TX4_B, FN_ET0_RX_CLK_A,
+ /* IP11_25_23 [3] */
+ FN_DREQ1, FN_HSPI_CLK_B, FN_RX4_B, FN_ET0_PHY_INT_C,
+ FN_ET0_TX_CLK_A, 0, 0, 0,
+ /* IP11_22_21 [2] */
+ FN_DACK0, FN_SD1_DAT3_A, FN_ET0_TX_ER, 0,
+ /* IP11_20_19 [2] */
+ FN_DREQ0, FN_SD1_CLK_A, FN_ET0_TX_EN, 0,
+ /* IP11_18_16 [3] */
+ FN_USB_OVC1, FN_RX3_D, FN_CAN1_RX_B, FN_RX5_D,
+ FN_IERX_B, 0, 0, 0,
+ /* IP11_15_13 [3] */
+ FN_PENC1, FN_TX3_D, FN_CAN1_TX_B, FN_TX5_D,
+ FN_IETX_B, 0, 0, 0,
+ /* IP11_12 [1] */
+ FN_TX0_A, FN_HSPI_TX_A,
+ /* IP11_11_10 [2] */
+ FN_RX0_A, FN_HSPI_RX_A, FN_RMII0_RXD0_A, FN_ET0_ERXD6,
+ /* IP11_9_7 [3] */
+ FN_SCK0_A, FN_HSPI_CS_A, FN_VI0_CLKENB, FN_RMII0_TXD1_A,
+ FN_ET0_ERXD5, 0, 0, 0,
+ /* IP11_6_4 [3] */
+ FN_SCIF_CLK_A, FN_HSPI_CLK_A, FN_VI0_CLK, FN_RMII0_TXD0_A,
+ FN_ET0_ERXD4, 0, 0, 0,
+ /* IP11_3 [1] */
+ FN_SDSELF, FN_RTS1_E,
+ /* IP11_2 [1] */
+ FN_SDA0, FN_HIFEBL_A,
+ /* IP11_1 [1] */
+ FN_SDA1, FN_RX1_E,
+ /* IP11_0 [1] */
+ FN_SCL1, FN_SCIF_CLK_C }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xFFFC004C, 32,
+ 3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* SEL1_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* SEL1_28 [1] */
+ FN_SEL_IEBUS_0, FN_SEL_IEBUS_1,
+ /* SEL1_27 [1] */
+ FN_SEL_RQSPI_0, FN_SEL_RQSPI_1,
+ /* SEL1_26 [1] */
+ FN_SEL_VIN1_0, FN_SEL_VIN1_1,
+ /* SEL1_25 [1] */
+ FN_SEL_HIF_0, FN_SEL_HIF_1,
+ /* SEL1_24 [1] */
+ FN_SEL_RSPI_0, FN_SEL_RSPI_1,
+ /* SEL1_23 [1] */
+ FN_SEL_LCDC_0, FN_SEL_LCDC_1,
+ /* SEL1_22_21 [2] */
+ FN_SEL_ET0_CTL_0, FN_SEL_ET0_CTL_1, FN_SEL_ET0_CTL_2, 0,
+ /* SEL1_20 [1] */
+ FN_SEL_ET0_0, FN_SEL_ET0_1,
+ /* SEL1_19 [1] */
+ FN_SEL_RMII_0, FN_SEL_RMII_1,
+ /* SEL1_18 [1] */
+ FN_SEL_TMU_0, FN_SEL_TMU_1,
+ /* SEL1_17_16 [2] */
+ FN_SEL_HSPI_0, FN_SEL_HSPI_1, FN_SEL_HSPI_2, 0,
+ /* SEL1_15_14 [2] */
+ FN_SEL_HSCIF_0, FN_SEL_HSCIF_1, FN_SEL_HSCIF_2, FN_SEL_HSCIF_3,
+ /* SEL1_13 [1] */
+ FN_SEL_RCAN_CLK_0, FN_SEL_RCAN_CLK_1,
+ /* SEL1_12_11 [2] */
+ FN_SEL_RCAN1_0, FN_SEL_RCAN1_1, FN_SEL_RCAN1_2, 0,
+ /* SEL1_10 [1] */
+ FN_SEL_RCAN0_0, FN_SEL_RCAN0_1,
+ /* SEL1_9 [1] */
+ FN_SEL_SDHI2_0, FN_SEL_SDHI2_1,
+ /* SEL1_8 [1] */
+ FN_SEL_SDHI1_0, FN_SEL_SDHI1_1,
+ /* SEL1_7 [1] */
+ FN_SEL_SDHI0_0, FN_SEL_SDHI0_1,
+ /* SEL1_6 [1] */
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ /* SEL1_5 [1] */
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ /* SEL1_4 [1] */
+ FN_SEL_AUDIO_CLKB_0, FN_SEL_AUDIO_CLKB_1,
+ /* SEL1_3 [1] */
+ FN_SEL_AUDIO_CLKA_0, FN_SEL_AUDIO_CLKA_1,
+ /* SEL1_2 [1] */
+ FN_SEL_FLCTL_0, FN_SEL_FLCTL_1,
+ /* SEL1_1 [1] */
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ /* SEL1_0 [1] */
+ FN_SEL_INTC_0, FN_SEL_INTC_1 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xFFFC0050, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 1, 2, 2, 3, 2, 3, 2, 2) {
+ /* SEL2_31 [1] */
+ 0, 0,
+ /* SEL2_30 [1] */
+ 0, 0,
+ /* SEL2_29 [1] */
+ 0, 0,
+ /* SEL2_28 [1] */
+ 0, 0,
+ /* SEL2_27 [1] */
+ 0, 0,
+ /* SEL2_26 [1] */
+ 0, 0,
+ /* SEL2_25 [1] */
+ 0, 0,
+ /* SEL2_24 [1] */
+ 0, 0,
+ /* SEL2_23 [1] */
+ FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1,
+ /* SEL2_22 [1] */
+ FN_SEL_MTU2_CH4_0, FN_SEL_MTU2_CH4_1,
+ /* SEL2_21 [1] */
+ FN_SEL_MTU2_CH3_0, FN_SEL_MTU2_CH3_1,
+ /* SEL2_20_19 [2] */
+ FN_SEL_MTU2_CH2_0, FN_SEL_MTU2_CH2_1, FN_SEL_MTU2_CH2_2, 0,
+ /* SEL2_18_17 [2] */
+ FN_SEL_MTU2_CH1_0, FN_SEL_MTU2_CH1_1, FN_SEL_MTU2_CH1_2, 0,
+ /* SEL2_16 [1] */
+ FN_SEL_MTU2_CH0_0, FN_SEL_MTU2_CH0_1,
+ /* SEL2_15_14 [2] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ /* SEL2_13_12 [2] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ /* SEL2_11_9 [3] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ FN_SEL_SCIF3_4, 0, 0, 0,
+ /* SEL2_8_7 [2] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, FN_SEL_SCIF2_3,
+ /* SEL2_6_4 [3] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ FN_SEL_SCIF1_4, 0, 0, 0,
+ /* SEL2_3_2 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, 0,
+ /* SEL2_1_0 [2] */
+ FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2, 0 }
+ },
+ /* GPIO 0 - 5*/
+ { PINMUX_CFG_REG("INOUTSEL0", 0xFFC40004, 32, 1) { GP_INOUTSEL(0) } },
+ { PINMUX_CFG_REG("INOUTSEL1", 0xFFC41004, 32, 1) { GP_INOUTSEL(1) } },
+ { PINMUX_CFG_REG("INOUTSEL2", 0xFFC42004, 32, 1) { GP_INOUTSEL(2) } },
+ { PINMUX_CFG_REG("INOUTSEL3", 0xFFC43004, 32, 1) { GP_INOUTSEL(3) } },
+ { PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1) { GP_INOUTSEL(4) } },
+ { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 24 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 16 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ GP_5_11_IN, GP_5_11_OUT,
+ GP_5_10_IN, GP_5_10_OUT,
+ GP_5_9_IN, GP_5_9_OUT,
+ GP_5_8_IN, GP_5_8_OUT,
+ GP_5_7_IN, GP_5_7_OUT,
+ GP_5_6_IN, GP_5_6_OUT,
+ GP_5_5_IN, GP_5_5_OUT,
+ GP_5_4_IN, GP_5_4_OUT,
+ GP_5_3_IN, GP_5_3_OUT,
+ GP_5_2_IN, GP_5_2_OUT,
+ GP_5_1_IN, GP_5_1_OUT,
+ GP_5_0_IN, GP_5_0_OUT }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ /* GPIO 0 - 5*/
+ { PINMUX_DATA_REG("INDT0", 0xFFC4000C, 32) { GP_INDT(0) } },
+ { PINMUX_DATA_REG("INDT1", 0xFFC4100C, 32) { GP_INDT(1) } },
+ { PINMUX_DATA_REG("INDT2", 0xFFC4200C, 32) { GP_INDT(2) } },
+ { PINMUX_DATA_REG("INDT3", 0xFFC4300C, 32) { GP_INDT(3) } },
+ { PINMUX_DATA_REG("INDT4", 0xFFC4400C, 32) { GP_INDT(4) } },
+ { PINMUX_DATA_REG("INDT5", 0xFFC4500C, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ GP_5_11_DATA, GP_5_10_DATA, GP_5_9_DATA, GP_5_8_DATA,
+ GP_5_7_DATA, GP_5_6_DATA, GP_5_5_DATA, GP_5_4_DATA,
+ GP_5_3_DATA, GP_5_2_DATA, GP_5_1_DATA, GP_5_0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7734_pinmux_info = {
+ .name = "sh7734_pfc",
+
+ .unlock_reg = 0xFFFC0000,
+
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_GP_0_0,
+ .last_gpio = GPIO_FN_ST_CLKOUT,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
new file mode 100644
index 000000000000..5ed74cd0ba99
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -0,0 +1,2282 @@
+/*
+ * SH7757 (B0 step) Pinmux
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7757.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
+ PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
+ PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
+ PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN,
+ PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
+ PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
+ PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU,
+ PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
+ PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
+ PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
+ PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT,
+ PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
+ PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
+ PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
+ PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN,
+ PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ PS1_2_FN1, PS1_2_FN2,
+
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ PS2_2_FN1, PS2_2_FN2,
+
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2,
+
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2,
+
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ PS7_4_FN1, PS7_4_FN2,
+
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* PTA (mobule: LBSC, RGMII) */
+ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
+ ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
+ IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+ ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK,
+ ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK,
+
+ /* PTC (mobule: IRQ, PWMU) */
+ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+ PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK,
+ PWMU4_MARK, PWMU5_MARK,
+
+ /* PTD (mobule: SPI0, DMAC) */
+ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK,
+ SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK,
+ DREQ0_MARK, DACK0_MARK, TEND0_MARK,
+
+ /* PTE (mobule: RMII) */
+ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK,
+ RMII0_TXD0_MARK, RMII0_TXEN_MARK,
+ RMII0_REFCLK_MARK, RMII0_RXD1_MARK,
+ RMII0_RXD0_MARK, RMII0_RX_ER_MARK,
+
+ /* PTF (mobule: RMII, SerMux) */
+ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK,
+ RMII1_TXD0_MARK, RMII1_TXEN_MARK,
+ RMII1_REFCLK_MARK, RMII1_RXD1_MARK,
+ RMII1_RXD0_MARK, RMII1_RX_ER_MARK,
+ RAC_RI_MARK,
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK,
+ SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK,
+ MMCCLK_MARK, MMCCMD_MARK,
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
+ SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK,
+ TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK,
+ ADTRG0_MARK,
+
+ /* PTI (mobule: LBSC, SDHI) */
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
+
+ /* PTJ (mobule: SCIF234) */
+ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK,
+ RTS4_MARK, RXD4_MARK, TXD4_MARK,
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
+ COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK,
+ SCK2_MARK, SCK4_MARK, SCK3_MARK,
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK,
+ RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK,
+ CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK,
+ TXD2_MARK,
+
+ /* PTM (mobule: LBSC, IIC) */
+ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK,
+ SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK,
+ JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK,
+ SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK,
+ SGPIO1_DO_MARK, SUB_CLKIN_MARK,
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK,
+ SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK,
+ SGPIO2_DI_MARK, SGPIO2_DO_MARK,
+ COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
+
+ /* PTQ (mobule: LPC) */
+ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
+ LFRAME_MARK, LRESET_MARK, LCLK_MARK,
+
+ /* PTR (mobule: GRA, IIC) */
+ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
+ SDA8_MARK, SCL8_MARK,
+
+ /* PTS (mobule: GRA, IIC) */
+ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK,
+ SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
+ SDA9_MARK, SCL9_MARK,
+
+ /* PTT (mobule: PWMX, AUD) */
+ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK,
+ PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ /* PTU (mobule: LPC, APM) */
+ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
+ LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
+ APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK,
+ APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK,
+ APMS3N_MARK,
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK,
+ R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK,
+ EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK,
+ VBIOS_CLK_MARK, VBIOS_CS_MARK,
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK,
+ EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK,
+
+ /* PTX (mobule: LBSC, SCIF, SIM) */
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+
+ /* PTY (mobule: LBSC) */
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK,
+ MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK,
+ ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK,
+ ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT),
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTI GPIO */
+ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT),
+ PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT),
+ PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT),
+ PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT),
+ PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT),
+ PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT),
+ PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT),
+ PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTO GPIO */
+ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT),
+ PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT),
+ PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT),
+ PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT),
+ PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT),
+ PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT),
+ PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT),
+ PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(BS_MARK, PTA7_FN),
+ PINMUX_DATA(RDWR_MARK, PTA6_FN),
+ PINMUX_DATA(WE1_MARK, PTA5_FN),
+ PINMUX_DATA(RDY_MARK, PTA4_FN),
+ PINMUX_DATA(ET0_MDC_MARK, PTA3_FN),
+ PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN),
+ PINMUX_DATA(ET1_MDC_MARK, PTA1_FN),
+ PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN),
+ PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN),
+ PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN),
+ PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN),
+ PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN),
+ PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN),
+ PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN),
+ PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN),
+ PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN),
+ PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN),
+ PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN),
+ PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN),
+ PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN),
+ PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN),
+ PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN),
+ PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN),
+ PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN),
+ PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN),
+ PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN),
+ PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN),
+ PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN),
+ PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN),
+ PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN),
+ PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN),
+ PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN),
+ PINMUX_DATA(IRQ1_MARK, PTC1_FN),
+ PINMUX_DATA(IRQ0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN),
+ PINMUX_DATA(SP0_MISO_MARK, PTD6_FN),
+ PINMUX_DATA(SP0_SCK_MARK, PTD5_FN),
+ PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN),
+ PINMUX_DATA(SP0_SS0_MARK, PTD3_FN),
+ PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN),
+ PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN),
+ PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN),
+ PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN),
+ PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN),
+ PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN),
+ PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN),
+ PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN),
+ PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN),
+ PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN),
+ PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN),
+ PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN),
+ PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN),
+ PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN),
+ PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN),
+ PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN),
+ PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN),
+ PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN),
+ PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN),
+ PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN),
+ PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN),
+ PINMUX_DATA(BOOTWP_MARK, PTG6_FN),
+ PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN),
+ PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN),
+ PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN),
+ PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN),
+ PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
+ PINMUX_DATA(WDTOVF_MARK, PTG2_FN),
+ PINMUX_DATA(LPCPD_MARK, PTG1_FN),
+ PINMUX_DATA(LDRQ_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN),
+ PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN),
+ PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN),
+ PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN),
+ PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN),
+ PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN),
+ PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN),
+ PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN),
+ PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
+ PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN),
+ PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN),
+ PINMUX_DATA(WP_MARK, PTH1_FN),
+ PINMUX_DATA(FMS0_MARK, PTH0_FN),
+
+ /* PTI FN */
+ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN),
+ PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN),
+ PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN),
+ PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN),
+ PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN),
+ PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN),
+ PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN),
+ PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN),
+ PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN),
+ PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN),
+ PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN),
+ PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN),
+ PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN),
+ PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN),
+ PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN),
+ PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(RTS3_MARK, PTJ6_FN),
+ PINMUX_DATA(CTS3_MARK, PTJ5_FN),
+ PINMUX_DATA(TXD3_MARK, PTJ4_FN),
+ PINMUX_DATA(RXD3_MARK, PTJ3_FN),
+ PINMUX_DATA(RTS4_MARK, PTJ2_FN),
+ PINMUX_DATA(RXD4_MARK, PTJ1_FN),
+ PINMUX_DATA(TXD4_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN),
+ PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN),
+ PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
+ PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
+ PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
+ PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
+ PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN),
+ PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN),
+ PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN),
+ PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN),
+ PINMUX_DATA(CLKOUT_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN),
+ PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN),
+ PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN),
+ PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN),
+ PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN),
+ PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN),
+ PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
+ PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN),
+ PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN),
+ PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN),
+ PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN),
+ PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(CS4_MARK, PTM7_FN),
+ PINMUX_DATA(RD_MARK, PTM6_FN),
+ PINMUX_DATA(WE0_MARK, PTM7_FN),
+ PINMUX_DATA(CS0_MARK, PTM4_FN),
+ PINMUX_DATA(SDA6_MARK, PTM3_FN),
+ PINMUX_DATA(SCL6_MARK, PTM2_FN),
+ PINMUX_DATA(SDA7_MARK, PTM1_FN),
+ PINMUX_DATA(SCL7_MARK, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN),
+ PINMUX_DATA(VBUS_OC_MARK, PTN5_FN),
+ PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN),
+ PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN),
+ PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN),
+ PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN),
+ PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN),
+ PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN),
+ PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN),
+ PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN),
+ PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN),
+ PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN),
+
+ /* PTO FN */
+ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
+ PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
+ PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
+ PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
+ PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN),
+ PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN),
+ PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN),
+ PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN),
+ PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN),
+ PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN),
+ PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN),
+ PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN),
+
+ /* PTP FN */
+
+ /* PTQ FN */
+ PINMUX_DATA(LAD3_MARK, PTQ6_FN),
+ PINMUX_DATA(LAD2_MARK, PTQ5_FN),
+ PINMUX_DATA(LAD1_MARK, PTQ4_FN),
+ PINMUX_DATA(LAD0_MARK, PTQ3_FN),
+ PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
+ PINMUX_DATA(LRESET_MARK, PTQ1_FN),
+ PINMUX_DATA(LCLK_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
+ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */
+ PINMUX_DATA(SDA2_MARK, PTR5_FN),
+ PINMUX_DATA(SCL2_MARK, PTR4_FN),
+ PINMUX_DATA(SDA1_MARK, PTR3_FN),
+ PINMUX_DATA(SCL1_MARK, PTR2_FN),
+ PINMUX_DATA(SDA0_MARK, PTR1_FN),
+ PINMUX_DATA(SCL0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */
+ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */
+ PINMUX_DATA(SDA5_MARK, PTS5_FN),
+ PINMUX_DATA(SCL5_MARK, PTS4_FN),
+ PINMUX_DATA(SDA4_MARK, PTS3_FN),
+ PINMUX_DATA(SCL4_MARK, PTS2_FN),
+ PINMUX_DATA(SDA3_MARK, PTS1_FN),
+ PINMUX_DATA(SCL3_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN),
+ PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN),
+ PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN),
+ PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN),
+ PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN),
+ PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN),
+ PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN),
+ PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN),
+ PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN),
+ PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN),
+ PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN),
+ PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN),
+ PINMUX_DATA(PWMX1_MARK, PTT1_FN),
+ PINMUX_DATA(PWMX0_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN),
+ PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN),
+ PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN),
+ PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN),
+ PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN),
+ PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN),
+ PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN),
+ PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN),
+ PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN),
+ PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN),
+ PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN),
+ PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN),
+ PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN),
+ PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN),
+ PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN),
+ PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN),
+ PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN),
+ PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN),
+ PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN),
+ PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN),
+ PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN),
+ PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN),
+ PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN),
+ PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN),
+ PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN),
+ PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN),
+ PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN),
+ PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN),
+ PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN),
+ PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN),
+ PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN),
+ PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN),
+ PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN),
+ PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN),
+ PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN),
+ PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN),
+ PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN),
+ PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN),
+ PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN),
+ PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN),
+ PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN),
+ PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN),
+ PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN),
+ PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN),
+ PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN),
+ PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN),
+ PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN),
+ PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN),
+ PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN),
+ PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN),
+ PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN),
+ PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN),
+ PINMUX_DATA(A3_MARK, PTX3_FN),
+ PINMUX_DATA(A2_MARK, PTX2_FN),
+ PINMUX_DATA(A1_MARK, PTX1_FN),
+ PINMUX_DATA(A0_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(D7_MARK, PTY7_FN),
+ PINMUX_DATA(D6_MARK, PTY6_FN),
+ PINMUX_DATA(D5_MARK, PTY5_FN),
+ PINMUX_DATA(D4_MARK, PTY4_FN),
+ PINMUX_DATA(D3_MARK, PTY3_FN),
+ PINMUX_DATA(D2_MARK, PTY2_FN),
+ PINMUX_DATA(D1_MARK, PTY1_FN),
+ PINMUX_DATA(D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN),
+ PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN),
+ PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN),
+ PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN),
+ PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN),
+ PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN),
+ PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN),
+ PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN),
+ PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN),
+ PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN),
+ PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN),
+ PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN),
+ PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN),
+ PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN),
+ PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN),
+ PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTI */
+ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
+ PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
+ PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
+ PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
+ PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
+ PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
+ PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
+ PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTO */
+ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
+ PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
+ PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
+ PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
+ PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
+ PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
+ PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
+ PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
+ PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
+ PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* PTA (mobule: LBSC, RGMII) */
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
+ PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK),
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+
+ /* PTC (mobule: IRQ, PWMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
+
+ /* PTD (mobule: SPI0, DMAC) */
+ PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* PTE (mobule: RMII) */
+ PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
+
+ /* PTF (mobule: RMII, SerMux) */
+ PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
+ PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
+ PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+
+ /* PTI (mobule: LBSC, SDHI) */
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+
+ /* PTJ (mobule: SCIF234, SERMUX) */
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+
+ /* PTM (mobule: LBSC, IIC) */
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+
+ /* PTP (mobule: EVC, ADC) */
+
+ /* PTQ (mobule: LPC) */
+ PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
+ PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
+
+ /* PTR (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* PTS (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+
+ /* PTT (mobule: PWMX, AUD) */
+ PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* PTU (mobule: LPC, APM) */
+ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
+ PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
+
+ /* PTX (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* PTY (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
+ PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
+ PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
+ PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
+ PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
+ PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
+ PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
+ PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN, 0,
+ PTB6_FN, PTB6_OUT, PTB6_IN, 0,
+ PTB5_FN, PTB5_OUT, PTB5_IN, 0,
+ PTB4_FN, PTB4_OUT, PTB4_IN, 0,
+ PTB3_FN, PTB3_OUT, PTB3_IN, 0,
+ PTB2_FN, PTB2_OUT, PTB2_IN, 0,
+ PTB1_FN, PTB1_OUT, PTB1_IN, 0,
+ PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN, 0,
+ PTC6_FN, PTC6_OUT, PTC6_IN, 0,
+ PTC5_FN, PTC5_OUT, PTC5_IN, 0,
+ PTC4_FN, PTC4_OUT, PTC4_IN, 0,
+ PTC3_FN, PTC3_OUT, PTC3_IN, 0,
+ PTC2_FN, PTC2_OUT, PTC2_IN, 0,
+ PTC1_FN, PTC1_OUT, PTC1_IN, 0,
+ PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
+ PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
+ PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
+ PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
+ PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
+ PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
+ PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
+ PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
+ PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
+ PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
+ PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
+ PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
+ PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
+ PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
+ PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
+ PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
+ PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
+ PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
+ PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
+ PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
+ PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
+ PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
+ PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
+ PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
+ PTG5_FN, PTG5_OUT, PTG5_IN, 0,
+ PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
+ PTG3_FN, PTG3_OUT, PTG3_IN, 0,
+ PTG2_FN, PTG2_OUT, PTG2_IN, 0,
+ PTG1_FN, PTG1_OUT, PTG1_IN, 0,
+ PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
+ PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
+ PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
+ PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
+ PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
+ PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
+ PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
+ PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
+ PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
+ PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
+ PTI5_FN, PTI5_OUT, PTI5_IN, 0,
+ PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
+ PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
+ PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
+ PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
+ PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
+ PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
+ PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
+ PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
+ PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
+ PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
+ PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
+ PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
+ PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
+ PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
+ PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
+ PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
+ PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
+ PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
+ PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
+ PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
+ PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
+ PTM3_FN, PTM3_OUT, PTM3_IN, 0,
+ PTM2_FN, PTM2_OUT, PTM2_IN, 0,
+ PTM1_FN, PTM1_OUT, PTM1_IN, 0,
+ PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTN6_FN, PTN6_OUT, PTN6_IN, 0,
+ PTN5_FN, PTN5_OUT, PTN5_IN, 0,
+ PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
+ PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
+ PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
+ PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
+ PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
+ PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
+ PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
+ PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
+ PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
+ PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
+ PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
+ PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
+ PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
+ },
+#if 0 /* FIXME: Remove it? */
+ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTP6_FN, PTP6_OUT, PTP6_IN, 0,
+ PTP5_FN, PTP5_OUT, PTP5_IN, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN, 0,
+ PTP3_FN, PTP3_OUT, PTP3_IN, 0,
+ PTP2_FN, PTP2_OUT, PTP2_IN, 0,
+ PTP1_FN, PTP1_OUT, PTP1_IN, 0,
+ PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
+ },
+#endif
+ { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN, 0,
+ PTR6_FN, PTR6_OUT, PTR6_IN, 0,
+ PTR5_FN, PTR5_OUT, PTR5_IN, 0,
+ PTR4_FN, PTR4_OUT, PTR4_IN, 0,
+ PTR3_FN, PTR3_OUT, PTR3_IN, 0,
+ PTR2_FN, PTR2_OUT, PTR2_IN, 0,
+ PTR1_FN, PTR1_OUT, PTR1_IN, 0,
+ PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
+ PTS7_FN, PTS7_OUT, PTS7_IN, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN, 0,
+ PTS5_FN, PTS5_OUT, PTS5_IN, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN, 0,
+ PTS3_FN, PTS3_OUT, PTS3_IN, 0,
+ PTS2_FN, PTS2_OUT, PTS2_IN, 0,
+ PTS1_FN, PTS1_OUT, PTS1_IN, 0,
+ PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
+ PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
+ PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
+ PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
+ PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
+ PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
+ PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
+ PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
+ PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
+ PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
+ PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
+ PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
+ PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
+ PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
+ PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
+ PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
+ PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
+ PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
+ PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
+ PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
+ PTV1_FN, PTV1_OUT, PTV1_IN, 0,
+ PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN, 0,
+ PTW6_FN, PTW6_OUT, PTW6_IN, 0,
+ PTW5_FN, PTW5_OUT, PTW5_IN, 0,
+ PTW4_FN, PTW4_OUT, PTW4_IN, 0,
+ PTW3_FN, PTW3_OUT, PTW3_IN, 0,
+ PTW2_FN, PTW2_OUT, PTW2_IN, 0,
+ PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
+ PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
+ PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
+ PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
+ PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
+ PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
+ PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
+ PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
+ PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
+ PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
+ PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
+ PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
+ PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
+ PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
+ PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
+ PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
+ },
+
+ { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_2_FN1, PS1_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ 0, 0,
+ PS2_2_FN1, PS2_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+ 0, 0, }
+ },
+
+ { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
+ 0, 0,
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ 0, 0,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
+ 0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
+ 0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7757_pinmux_info = {
+ .name = "sh7757_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA0,
+ .last_gpio = GPIO_FN_ON_DQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
new file mode 100644
index 000000000000..3b1825d925bb
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -0,0 +1,1304 @@
+/*
+ * SH7785 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7785.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA,
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA,
+ PM1_DATA, PM0_DATA,
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA,
+ PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA,
+ PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ PK7_IN, PK6_IN, PK5_IN, PK4_IN,
+ PK3_IN, PK2_IN, PK1_IN, PK0_IN,
+ PL7_IN, PL6_IN, PL5_IN, PL4_IN,
+ PL3_IN, PL2_IN, PL1_IN, PL0_IN,
+ PM1_IN, PM0_IN,
+ PN7_IN, PN6_IN, PN5_IN, PN4_IN,
+ PN3_IN, PN2_IN, PN1_IN, PN0_IN,
+ PP5_IN, PP4_IN, PP3_IN, PP2_IN, PP1_IN, PP0_IN,
+ PQ4_IN, PQ3_IN, PQ2_IN, PQ1_IN, PQ0_IN,
+ PR3_IN, PR2_IN, PR1_IN, PR0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU,
+ PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU,
+ PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU,
+ PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU,
+ PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU,
+ PM1_IN_PU, PM0_IN_PU,
+ PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU,
+ PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU,
+ PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU,
+ PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU,
+ PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT,
+ PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT,
+ PL7_OUT, PL6_OUT, PL5_OUT, PL4_OUT,
+ PL3_OUT, PL2_OUT, PL1_OUT, PL0_OUT,
+ PM1_OUT, PM0_OUT,
+ PN7_OUT, PN6_OUT, PN5_OUT, PN4_OUT,
+ PN3_OUT, PN2_OUT, PN1_OUT, PN0_OUT,
+ PP5_OUT, PP4_OUT, PP3_OUT, PP2_OUT, PP1_OUT, PP0_OUT,
+ PQ4_OUT, PQ3_OUT, PQ2_OUT, PQ1_OUT, PQ0_OUT,
+ PR3_OUT, PR2_OUT, PR1_OUT, PR0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN, PJ0_FN,
+ PK7_FN, PK6_FN, PK5_FN, PK4_FN,
+ PK3_FN, PK2_FN, PK1_FN, PK0_FN,
+ PL7_FN, PL6_FN, PL5_FN, PL4_FN,
+ PL3_FN, PL2_FN, PL1_FN, PL0_FN,
+ PM1_FN, PM0_FN,
+ PN7_FN, PN6_FN, PN5_FN, PN4_FN,
+ PN3_FN, PN2_FN, PN1_FN, PN0_FN,
+ PP5_FN, PP4_FN, PP3_FN, PP2_FN, PP1_FN, PP0_FN,
+ PQ4_FN, PQ3_FN, PQ2_FN, PQ1_FN, PQ0_FN,
+ PR3_FN, PR2_FN, PR1_FN, PR0_FN,
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D63_AD31_MARK,
+ D62_AD30_MARK,
+ D61_AD29_MARK,
+ D60_AD28_MARK,
+ D59_AD27_MARK,
+ D58_AD26_MARK,
+ D57_AD25_MARK,
+ D56_AD24_MARK,
+ D55_AD23_MARK,
+ D54_AD22_MARK,
+ D53_AD21_MARK,
+ D52_AD20_MARK,
+ D51_AD19_MARK,
+ D50_AD18_MARK,
+ D49_AD17_DB5_MARK,
+ D48_AD16_DB4_MARK,
+ D47_AD15_DB3_MARK,
+ D46_AD14_DB2_MARK,
+ D45_AD13_DB1_MARK,
+ D44_AD12_DB0_MARK,
+ D43_AD11_DG5_MARK,
+ D42_AD10_DG4_MARK,
+ D41_AD9_DG3_MARK,
+ D40_AD8_DG2_MARK,
+ D39_AD7_DG1_MARK,
+ D38_AD6_DG0_MARK,
+ D37_AD5_DR5_MARK,
+ D36_AD4_DR4_MARK,
+ D35_AD3_DR3_MARK,
+ D34_AD2_DR2_MARK,
+ D33_AD1_DR1_MARK,
+ D32_AD0_DR0_MARK,
+ REQ1_MARK,
+ REQ2_MARK,
+ REQ3_MARK,
+ GNT1_MARK,
+ GNT2_MARK,
+ GNT3_MARK,
+ MMCCLK_MARK,
+ D31_MARK,
+ D30_MARK,
+ D29_MARK,
+ D28_MARK,
+ D27_MARK,
+ D26_MARK,
+ D25_MARK,
+ D24_MARK,
+ D23_MARK,
+ D22_MARK,
+ D21_MARK,
+ D20_MARK,
+ D19_MARK,
+ D18_MARK,
+ D17_MARK,
+ D16_MARK,
+ SCIF1_SCK_MARK,
+ SCIF1_RXD_MARK,
+ SCIF1_TXD_MARK,
+ SCIF0_CTS_MARK,
+ INTD_MARK,
+ FCE_MARK,
+ SCIF0_RTS_MARK,
+ HSPI_CS_MARK,
+ FSE_MARK,
+ SCIF0_SCK_MARK,
+ HSPI_CLK_MARK,
+ FRE_MARK,
+ SCIF0_RXD_MARK,
+ HSPI_RX_MARK,
+ FRB_MARK,
+ SCIF0_TXD_MARK,
+ HSPI_TX_MARK,
+ FWE_MARK,
+ SCIF5_TXD_MARK,
+ HAC1_SYNC_MARK,
+ SSI1_WS_MARK,
+ SIOF_TXD_PJ_MARK,
+ HAC0_SDOUT_MARK,
+ SSI0_SDATA_MARK,
+ SIOF_RXD_PJ_MARK,
+ HAC0_SDIN_MARK,
+ SSI0_SCK_MARK,
+ SIOF_SYNC_PJ_MARK,
+ HAC0_SYNC_MARK,
+ SSI0_WS_MARK,
+ SIOF_MCLK_PJ_MARK,
+ HAC_RES_MARK,
+ SIOF_SCK_PJ_MARK,
+ HAC0_BITCLK_MARK,
+ SSI0_CLK_MARK,
+ HAC1_BITCLK_MARK,
+ SSI1_CLK_MARK,
+ TCLK_MARK,
+ IOIS16_MARK,
+ STATUS0_MARK,
+ DRAK0_PK3_MARK,
+ STATUS1_MARK,
+ DRAK1_PK2_MARK,
+ DACK2_MARK,
+ SCIF2_TXD_MARK,
+ MMCCMD_MARK,
+ SIOF_TXD_PK_MARK,
+ DACK3_MARK,
+ SCIF2_SCK_MARK,
+ MMCDAT_MARK,
+ SIOF_SCK_PK_MARK,
+ DREQ0_MARK,
+ DREQ1_MARK,
+ DRAK0_PK1_MARK,
+ DRAK1_PK0_MARK,
+ DREQ2_MARK,
+ INTB_MARK,
+ DREQ3_MARK,
+ INTC_MARK,
+ DRAK2_MARK,
+ CE2A_MARK,
+ IRL4_MARK,
+ FD4_MARK,
+ IRL5_MARK,
+ FD5_MARK,
+ IRL6_MARK,
+ FD6_MARK,
+ IRL7_MARK,
+ FD7_MARK,
+ DRAK3_MARK,
+ CE2B_MARK,
+ BREQ_BSACK_MARK,
+ BACK_BSREQ_MARK,
+ SCIF5_RXD_MARK,
+ HAC1_SDIN_MARK,
+ SSI1_SCK_MARK,
+ SCIF5_SCK_MARK,
+ HAC1_SDOUT_MARK,
+ SSI1_SDATA_MARK,
+ SCIF3_TXD_MARK,
+ FCLE_MARK,
+ SCIF3_RXD_MARK,
+ FALE_MARK,
+ SCIF3_SCK_MARK,
+ FD0_MARK,
+ SCIF4_TXD_MARK,
+ FD1_MARK,
+ SCIF4_RXD_MARK,
+ FD2_MARK,
+ SCIF4_SCK_MARK,
+ FD3_MARK,
+ DEVSEL_DCLKOUT_MARK,
+ STOP_CDE_MARK,
+ LOCK_ODDF_MARK,
+ TRDY_DISPL_MARK,
+ IRDY_HSYNC_MARK,
+ PCIFRAME_VSYNC_MARK,
+ INTA_MARK,
+ GNT0_GNTIN_MARK,
+ REQ0_REQOUT_MARK,
+ PERR_MARK,
+ SERR_MARK,
+ WE7_CBE3_MARK,
+ WE6_CBE2_MARK,
+ WE5_CBE1_MARK,
+ WE4_CBE0_MARK,
+ SCIF2_RXD_MARK,
+ SIOF_RXD_MARK,
+ MRESETOUT_MARK,
+ IRQOUT_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+ PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU),
+
+ /* PK GPIO */
+ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU),
+ PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU),
+ PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU),
+ PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU),
+ PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU),
+ PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU),
+ PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU),
+ PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU),
+
+ /* PL GPIO */
+ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU),
+ PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU),
+ PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU),
+ PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU),
+ PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU),
+ PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU),
+ PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU),
+ PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU),
+
+ /* PM GPIO */
+ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU),
+ PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU),
+
+ /* PN GPIO */
+ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU),
+ PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU),
+ PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU),
+ PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU),
+ PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU),
+ PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU),
+ PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU),
+ PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU),
+
+ /* PP GPIO */
+ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU),
+ PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU),
+ PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU),
+ PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU),
+ PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU),
+ PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU),
+
+ /* PQ GPIO */
+ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU),
+ PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU),
+ PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU),
+ PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU),
+ PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU),
+
+ /* PR GPIO */
+ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU),
+ PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU),
+ PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU),
+ PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D63_AD31_MARK, PA7_FN),
+ PINMUX_DATA(D62_AD30_MARK, PA6_FN),
+ PINMUX_DATA(D61_AD29_MARK, PA5_FN),
+ PINMUX_DATA(D60_AD28_MARK, PA4_FN),
+ PINMUX_DATA(D59_AD27_MARK, PA3_FN),
+ PINMUX_DATA(D58_AD26_MARK, PA2_FN),
+ PINMUX_DATA(D57_AD25_MARK, PA1_FN),
+ PINMUX_DATA(D56_AD24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D55_AD23_MARK, PB7_FN),
+ PINMUX_DATA(D54_AD22_MARK, PB6_FN),
+ PINMUX_DATA(D53_AD21_MARK, PB5_FN),
+ PINMUX_DATA(D52_AD20_MARK, PB4_FN),
+ PINMUX_DATA(D51_AD19_MARK, PB3_FN),
+ PINMUX_DATA(D50_AD18_MARK, PB2_FN),
+ PINMUX_DATA(D49_AD17_DB5_MARK, PB1_FN),
+ PINMUX_DATA(D48_AD16_DB4_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(D47_AD15_DB3_MARK, PC7_FN),
+ PINMUX_DATA(D46_AD14_DB2_MARK, PC6_FN),
+ PINMUX_DATA(D45_AD13_DB1_MARK, PC5_FN),
+ PINMUX_DATA(D44_AD12_DB0_MARK, PC4_FN),
+ PINMUX_DATA(D43_AD11_DG5_MARK, PC3_FN),
+ PINMUX_DATA(D42_AD10_DG4_MARK, PC2_FN),
+ PINMUX_DATA(D41_AD9_DG3_MARK, PC1_FN),
+ PINMUX_DATA(D40_AD8_DG2_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(D39_AD7_DG1_MARK, PD7_FN),
+ PINMUX_DATA(D38_AD6_DG0_MARK, PD6_FN),
+ PINMUX_DATA(D37_AD5_DR5_MARK, PD5_FN),
+ PINMUX_DATA(D36_AD4_DR4_MARK, PD4_FN),
+ PINMUX_DATA(D35_AD3_DR3_MARK, PD3_FN),
+ PINMUX_DATA(D34_AD2_DR2_MARK, PD2_FN),
+ PINMUX_DATA(D33_AD1_DR1_MARK, PD1_FN),
+ PINMUX_DATA(D32_AD0_DR0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(REQ1_MARK, PE5_FN),
+ PINMUX_DATA(REQ2_MARK, PE4_FN),
+ PINMUX_DATA(REQ3_MARK, P2MSEL0_0, PE3_FN),
+ PINMUX_DATA(GNT1_MARK, PE2_FN),
+ PINMUX_DATA(GNT2_MARK, PE1_FN),
+ PINMUX_DATA(GNT3_MARK, P2MSEL0_0, PE0_FN),
+ PINMUX_DATA(MMCCLK_MARK, P2MSEL0_1, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D31_MARK, PF7_FN),
+ PINMUX_DATA(D30_MARK, PF6_FN),
+ PINMUX_DATA(D29_MARK, PF5_FN),
+ PINMUX_DATA(D28_MARK, PF4_FN),
+ PINMUX_DATA(D27_MARK, PF3_FN),
+ PINMUX_DATA(D26_MARK, PF2_FN),
+ PINMUX_DATA(D25_MARK, PF1_FN),
+ PINMUX_DATA(D24_MARK, PF0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D23_MARK, PG7_FN),
+ PINMUX_DATA(D22_MARK, PG6_FN),
+ PINMUX_DATA(D21_MARK, PG5_FN),
+ PINMUX_DATA(D20_MARK, PG4_FN),
+ PINMUX_DATA(D19_MARK, PG3_FN),
+ PINMUX_DATA(D18_MARK, PG2_FN),
+ PINMUX_DATA(D17_MARK, PG1_FN),
+ PINMUX_DATA(D16_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(SCIF1_SCK_MARK, PH7_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PH6_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PH5_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, PH4_FN),
+ PINMUX_DATA(INTD_MARK, P1MSEL7_1, PH4_FN),
+ PINMUX_DATA(FCE_MARK, P1MSEL8_1, P1MSEL7_0, PH4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P1MSEL8_0, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL8_0, P1MSEL7_1, PH3_FN),
+ PINMUX_DATA(FSE_MARK, P1MSEL8_1, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P1MSEL8_0, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL8_0, P1MSEL7_1, PH2_FN),
+ PINMUX_DATA(FRE_MARK, P1MSEL8_1, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P1MSEL8_0, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL8_0, P1MSEL7_1, PH1_FN),
+ PINMUX_DATA(FRB_MARK, P1MSEL8_1, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P1MSEL8_0, P1MSEL7_0, PH0_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL8_0, P1MSEL7_1, PH0_FN),
+ PINMUX_DATA(FWE_MARK, P1MSEL8_1, P1MSEL7_0, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_TXD_MARK, P1MSEL2_0, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P1MSEL2_0, P1MSEL1_1, PJ7_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P1MSEL2_1, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(SIOF_TXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P1MSEL4_0, P1MSEL3_1, PJ6_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P1MSEL4_1, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(SIOF_RXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P1MSEL4_0, P1MSEL3_1, PJ5_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P1MSEL4_1, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(SIOF_SYNC_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P1MSEL4_0, P1MSEL3_1, PJ4_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P1MSEL4_1, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(SIOF_MCLK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ3_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL4_0, P1MSEL3_1, PJ3_FN),
+ PINMUX_DATA(SIOF_SCK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P1MSEL4_0, P1MSEL3_1, PJ2_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P1MSEL4_1, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P1MSEL2_0, PJ1_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P1MSEL2_1, P1MSEL1_0, PJ1_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL9_0, PJ0_FN),
+ PINMUX_DATA(IOIS16_MARK, P1MSEL9_1, PJ0_FN),
+
+ /* PK FN */
+ PINMUX_DATA(STATUS0_MARK, P1MSEL15_0, PK7_FN),
+ PINMUX_DATA(DRAK0_PK3_MARK, P1MSEL15_1, PK7_FN),
+ PINMUX_DATA(STATUS1_MARK, P1MSEL15_0, PK6_FN),
+ PINMUX_DATA(DRAK1_PK2_MARK, P1MSEL15_1, PK6_FN),
+ PINMUX_DATA(DACK2_MARK, P1MSEL12_0, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(SCIF2_TXD_MARK, P1MSEL12_1, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(MMCCMD_MARK, P1MSEL12_1, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(SIOF_TXD_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(DACK3_MARK, P1MSEL12_0, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(SCIF2_SCK_MARK, P1MSEL12_1, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(MMCDAT_MARK, P1MSEL12_1, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(SIOF_SCK_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(DREQ0_MARK, PK3_FN),
+ PINMUX_DATA(DREQ1_MARK, PK2_FN),
+ PINMUX_DATA(DRAK0_PK1_MARK, PK1_FN),
+ PINMUX_DATA(DRAK1_PK0_MARK, PK0_FN),
+
+ /* PL FN */
+ PINMUX_DATA(DREQ2_MARK, P1MSEL13_0, PL7_FN),
+ PINMUX_DATA(INTB_MARK, P1MSEL13_1, PL7_FN),
+ PINMUX_DATA(DREQ3_MARK, P1MSEL13_0, PL6_FN),
+ PINMUX_DATA(INTC_MARK, P1MSEL13_1, PL6_FN),
+ PINMUX_DATA(DRAK2_MARK, P1MSEL10_0, PL5_FN),
+ PINMUX_DATA(CE2A_MARK, P1MSEL10_1, PL5_FN),
+ PINMUX_DATA(IRL4_MARK, P1MSEL14_0, PL4_FN),
+ PINMUX_DATA(FD4_MARK, P1MSEL14_1, PL4_FN),
+ PINMUX_DATA(IRL5_MARK, P1MSEL14_0, PL3_FN),
+ PINMUX_DATA(FD5_MARK, P1MSEL14_1, PL3_FN),
+ PINMUX_DATA(IRL6_MARK, P1MSEL14_0, PL2_FN),
+ PINMUX_DATA(FD6_MARK, P1MSEL14_1, PL2_FN),
+ PINMUX_DATA(IRL7_MARK, P1MSEL14_0, PL1_FN),
+ PINMUX_DATA(FD7_MARK, P1MSEL14_1, PL1_FN),
+ PINMUX_DATA(DRAK3_MARK, P1MSEL10_0, PL0_FN),
+ PINMUX_DATA(CE2B_MARK, P1MSEL10_1, PL0_FN),
+
+ /* PM FN */
+ PINMUX_DATA(BREQ_BSACK_MARK, PM1_FN),
+ PINMUX_DATA(BACK_BSREQ_MARK, PM0_FN),
+
+ /* PN FN */
+ PINMUX_DATA(SCIF5_RXD_MARK, P1MSEL2_0, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P1MSEL2_0, P1MSEL1_1, PN7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P1MSEL2_1, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(SCIF5_SCK_MARK, P1MSEL2_0, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(HAC1_SDOUT_MARK, P1MSEL2_0, P1MSEL1_1, PN6_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P1MSEL2_1, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL0_0, PN5_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL0_1, PN5_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL0_0, PN4_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL0_1, PN4_FN),
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL0_0, PN3_FN),
+ PINMUX_DATA(FD0_MARK, P1MSEL0_1, PN3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P1MSEL0_0, PN2_FN),
+ PINMUX_DATA(FD1_MARK, P1MSEL0_1, PN2_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P1MSEL0_0, PN1_FN),
+ PINMUX_DATA(FD2_MARK, P1MSEL0_1, PN1_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P1MSEL0_0, PN0_FN),
+ PINMUX_DATA(FD3_MARK, P1MSEL0_1, PN0_FN),
+
+ /* PP FN */
+ PINMUX_DATA(DEVSEL_DCLKOUT_MARK, PP5_FN),
+ PINMUX_DATA(STOP_CDE_MARK, PP4_FN),
+ PINMUX_DATA(LOCK_ODDF_MARK, PP3_FN),
+ PINMUX_DATA(TRDY_DISPL_MARK, PP2_FN),
+ PINMUX_DATA(IRDY_HSYNC_MARK, PP1_FN),
+ PINMUX_DATA(PCIFRAME_VSYNC_MARK, PP0_FN),
+
+ /* PQ FN */
+ PINMUX_DATA(INTA_MARK, PQ4_FN),
+ PINMUX_DATA(GNT0_GNTIN_MARK, PQ3_FN),
+ PINMUX_DATA(REQ0_REQOUT_MARK, PQ2_FN),
+ PINMUX_DATA(PERR_MARK, PQ1_FN),
+ PINMUX_DATA(SERR_MARK, PQ0_FN),
+
+ /* PR FN */
+ PINMUX_DATA(WE7_CBE3_MARK, PR3_FN),
+ PINMUX_DATA(WE6_CBE2_MARK, PR2_FN),
+ PINMUX_DATA(WE5_CBE1_MARK, PR1_FN),
+ PINMUX_DATA(WE4_CBE0_MARK, PR0_FN),
+
+ /* MISC FN */
+ PINMUX_DATA(SCIF2_RXD_MARK, P1MSEL6_0, P1MSEL5_0),
+ PINMUX_DATA(SIOF_RXD_MARK, P2MSEL1_1, P1MSEL6_1, P1MSEL5_0),
+ PINMUX_DATA(MRESETOUT_MARK, P2MSEL2_0),
+ PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* PK */
+ PINMUX_GPIO(GPIO_PK7, PK7_DATA),
+ PINMUX_GPIO(GPIO_PK6, PK6_DATA),
+ PINMUX_GPIO(GPIO_PK5, PK5_DATA),
+ PINMUX_GPIO(GPIO_PK4, PK4_DATA),
+ PINMUX_GPIO(GPIO_PK3, PK3_DATA),
+ PINMUX_GPIO(GPIO_PK2, PK2_DATA),
+ PINMUX_GPIO(GPIO_PK1, PK1_DATA),
+ PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+
+ /* PL */
+ PINMUX_GPIO(GPIO_PL7, PL7_DATA),
+ PINMUX_GPIO(GPIO_PL6, PL6_DATA),
+ PINMUX_GPIO(GPIO_PL5, PL5_DATA),
+ PINMUX_GPIO(GPIO_PL4, PL4_DATA),
+ PINMUX_GPIO(GPIO_PL3, PL3_DATA),
+ PINMUX_GPIO(GPIO_PL2, PL2_DATA),
+ PINMUX_GPIO(GPIO_PL1, PL1_DATA),
+ PINMUX_GPIO(GPIO_PL0, PL0_DATA),
+
+ /* PM */
+ PINMUX_GPIO(GPIO_PM1, PM1_DATA),
+ PINMUX_GPIO(GPIO_PM0, PM0_DATA),
+
+ /* PN */
+ PINMUX_GPIO(GPIO_PN7, PN7_DATA),
+ PINMUX_GPIO(GPIO_PN6, PN6_DATA),
+ PINMUX_GPIO(GPIO_PN5, PN5_DATA),
+ PINMUX_GPIO(GPIO_PN4, PN4_DATA),
+ PINMUX_GPIO(GPIO_PN3, PN3_DATA),
+ PINMUX_GPIO(GPIO_PN2, PN2_DATA),
+ PINMUX_GPIO(GPIO_PN1, PN1_DATA),
+ PINMUX_GPIO(GPIO_PN0, PN0_DATA),
+
+ /* PP */
+ PINMUX_GPIO(GPIO_PP5, PP5_DATA),
+ PINMUX_GPIO(GPIO_PP4, PP4_DATA),
+ PINMUX_GPIO(GPIO_PP3, PP3_DATA),
+ PINMUX_GPIO(GPIO_PP2, PP2_DATA),
+ PINMUX_GPIO(GPIO_PP1, PP1_DATA),
+ PINMUX_GPIO(GPIO_PP0, PP0_DATA),
+
+ /* PQ */
+ PINMUX_GPIO(GPIO_PQ4, PQ4_DATA),
+ PINMUX_GPIO(GPIO_PQ3, PQ3_DATA),
+ PINMUX_GPIO(GPIO_PQ2, PQ2_DATA),
+ PINMUX_GPIO(GPIO_PQ1, PQ1_DATA),
+ PINMUX_GPIO(GPIO_PQ0, PQ0_DATA),
+
+ /* PR */
+ PINMUX_GPIO(GPIO_PR3, PR3_DATA),
+ PINMUX_GPIO(GPIO_PR2, PR2_DATA),
+ PINMUX_GPIO(GPIO_PR1, PR1_DATA),
+ PINMUX_GPIO(GPIO_PR0, PR0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK),
+ PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK),
+ PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK),
+ PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK),
+ PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK),
+ PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK),
+ PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK),
+ PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK),
+ PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK),
+ PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK),
+ PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK),
+ PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK),
+ PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK),
+ PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK),
+ PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK),
+ PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK),
+ PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK),
+ PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) {
+ PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU,
+ PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU,
+ PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU,
+ PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU,
+ PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU,
+ PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU,
+ PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU,
+ PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) {
+ PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU,
+ PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU,
+ PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU,
+ PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU,
+ PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU,
+ PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU,
+ PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU,
+ PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU,
+ PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) {
+ PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU,
+ PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU,
+ PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU,
+ PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU,
+ PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU,
+ PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU,
+ PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU,
+ PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU,
+ PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU,
+ PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU,
+ PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU,
+ PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU,
+ PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU,
+ PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU,
+ PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU,
+ PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU,
+ PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU,
+ PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU,
+ PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU,
+ PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) {
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, 0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffe70020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) {
+ 0, 0, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) {
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) {
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) {
+ 0, 0, 0, 0,
+ 0, 0, PM1_DATA, PM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) {
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) {
+ 0, 0, PP5_DATA, PP4_DATA,
+ PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) {
+ 0, 0, 0, PQ4_DATA,
+ PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) {
+ 0, 0, 0, 0,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7785_pinmux_info = {
+ .name = "sh7785_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRQOUT,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
new file mode 100644
index 000000000000..1e18b58f9e5f
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -0,0 +1,837 @@
+/*
+ * SH7786 Pinmux
+ *
+ * Copyright (C) 2008, 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7785 pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7786.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ DCLKIN_MARK, DCLKOUT_MARK, ODDF_MARK,
+ VSYNC_MARK, HSYNC_MARK, CDE_MARK, DISP_MARK,
+ DR0_MARK, DR1_MARK, DR2_MARK, DR3_MARK, DR4_MARK, DR5_MARK,
+ DG0_MARK, DG1_MARK, DG2_MARK, DG3_MARK, DG4_MARK, DG5_MARK,
+ DB0_MARK, DB1_MARK, DB2_MARK, DB3_MARK, DB4_MARK, DB5_MARK,
+ ETH_MAGIC_MARK, ETH_LINK_MARK, ETH_TX_ER_MARK, ETH_TX_EN_MARK,
+ ETH_MDIO_MARK, ETH_RX_CLK_MARK, ETH_MDC_MARK, ETH_COL_MARK,
+ ETH_TX_CLK_MARK, ETH_CRS_MARK, ETH_RX_DV_MARK, ETH_RX_ER_MARK,
+ ETH_TXD3_MARK, ETH_TXD2_MARK, ETH_TXD1_MARK, ETH_TXD0_MARK,
+ ETH_RXD3_MARK, ETH_RXD2_MARK, ETH_RXD1_MARK, ETH_RXD0_MARK,
+ HSPI_CLK_MARK, HSPI_CS_MARK, HSPI_RX_MARK, HSPI_TX_MARK,
+ SCIF0_CTS_MARK, SCIF0_RTS_MARK,
+ SCIF0_SCK_MARK, SCIF0_RXD_MARK, SCIF0_TXD_MARK,
+ SCIF1_SCK_MARK, SCIF1_RXD_MARK, SCIF1_TXD_MARK,
+ SCIF3_SCK_MARK, SCIF3_RXD_MARK, SCIF3_TXD_MARK,
+ SCIF4_SCK_MARK, SCIF4_RXD_MARK, SCIF4_TXD_MARK,
+ SCIF5_SCK_MARK, SCIF5_RXD_MARK, SCIF5_TXD_MARK,
+ BREQ_MARK, IOIS16_MARK, CE2B_MARK, CE2A_MARK, BACK_MARK,
+ FALE_MARK, FRB_MARK, FSTATUS_MARK,
+ FSE_MARK, FCLE_MARK,
+ DACK0_MARK, DACK1_MARK, DACK2_MARK, DACK3_MARK,
+ DREQ0_MARK, DREQ1_MARK, DREQ2_MARK, DREQ3_MARK,
+ DRAK0_MARK, DRAK1_MARK, DRAK2_MARK, DRAK3_MARK,
+ USB_OVC1_MARK, USB_OVC0_MARK,
+ USB_PENC1_MARK, USB_PENC0_MARK,
+ HAC_RES_MARK,
+ HAC1_SDOUT_MARK, HAC1_SDIN_MARK, HAC1_SYNC_MARK, HAC1_BITCLK_MARK,
+ HAC0_SDOUT_MARK, HAC0_SDIN_MARK, HAC0_SYNC_MARK, HAC0_BITCLK_MARK,
+ SSI0_SDATA_MARK, SSI0_SCK_MARK, SSI0_WS_MARK, SSI0_CLK_MARK,
+ SSI1_SDATA_MARK, SSI1_SCK_MARK, SSI1_WS_MARK, SSI1_CLK_MARK,
+ SSI2_SDATA_MARK, SSI2_SCK_MARK, SSI2_WS_MARK,
+ SSI3_SDATA_MARK, SSI3_SCK_MARK, SSI3_WS_MARK,
+ SDIF1CMD_MARK, SDIF1CD_MARK, SDIF1WP_MARK, SDIF1CLK_MARK,
+ SDIF1D3_MARK, SDIF1D2_MARK, SDIF1D1_MARK, SDIF1D0_MARK,
+ SDIF0CMD_MARK, SDIF0CD_MARK, SDIF0WP_MARK, SDIF0CLK_MARK,
+ SDIF0D3_MARK, SDIF0D2_MARK, SDIF0D1_MARK, SDIF0D0_MARK,
+ TCLK_MARK,
+ IRL7_MARK, IRL6_MARK, IRL5_MARK, IRL4_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(CDE_MARK, P1MSEL2_0, PA7_FN),
+ PINMUX_DATA(DISP_MARK, P1MSEL2_0, PA6_FN),
+ PINMUX_DATA(DR5_MARK, P1MSEL2_0, PA5_FN),
+ PINMUX_DATA(DR4_MARK, P1MSEL2_0, PA4_FN),
+ PINMUX_DATA(DR3_MARK, P1MSEL2_0, PA3_FN),
+ PINMUX_DATA(DR2_MARK, P1MSEL2_0, PA2_FN),
+ PINMUX_DATA(DR1_MARK, P1MSEL2_0, PA1_FN),
+ PINMUX_DATA(DR0_MARK, P1MSEL2_0, PA0_FN),
+ PINMUX_DATA(ETH_MAGIC_MARK, P1MSEL2_1, PA7_FN),
+ PINMUX_DATA(ETH_LINK_MARK, P1MSEL2_1, PA6_FN),
+ PINMUX_DATA(ETH_TX_ER_MARK, P1MSEL2_1, PA5_FN),
+ PINMUX_DATA(ETH_TX_EN_MARK, P1MSEL2_1, PA4_FN),
+ PINMUX_DATA(ETH_TXD3_MARK, P1MSEL2_1, PA3_FN),
+ PINMUX_DATA(ETH_TXD2_MARK, P1MSEL2_1, PA2_FN),
+ PINMUX_DATA(ETH_TXD1_MARK, P1MSEL2_1, PA1_FN),
+ PINMUX_DATA(ETH_TXD0_MARK, P1MSEL2_1, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(VSYNC_MARK, P1MSEL3_0, PB7_FN),
+ PINMUX_DATA(ODDF_MARK, P1MSEL3_0, PB6_FN),
+ PINMUX_DATA(DG5_MARK, P1MSEL2_0, PB5_FN),
+ PINMUX_DATA(DG4_MARK, P1MSEL2_0, PB4_FN),
+ PINMUX_DATA(DG3_MARK, P1MSEL2_0, PB3_FN),
+ PINMUX_DATA(DG2_MARK, P1MSEL2_0, PB2_FN),
+ PINMUX_DATA(DG1_MARK, P1MSEL2_0, PB1_FN),
+ PINMUX_DATA(DG0_MARK, P1MSEL2_0, PB0_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL3_1, PB7_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL3_1, PB6_FN),
+ PINMUX_DATA(ETH_MDIO_MARK, P1MSEL2_1, PB5_FN),
+ PINMUX_DATA(ETH_RX_CLK_MARK, P1MSEL2_1, PB4_FN),
+ PINMUX_DATA(ETH_MDC_MARK, P1MSEL2_1, PB3_FN),
+ PINMUX_DATA(ETH_COL_MARK, P1MSEL2_1, PB2_FN),
+ PINMUX_DATA(ETH_TX_CLK_MARK, P1MSEL2_1, PB1_FN),
+ PINMUX_DATA(ETH_CRS_MARK, P1MSEL2_1, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(DCLKIN_MARK, P1MSEL3_0, PC7_FN),
+ PINMUX_DATA(HSYNC_MARK, P1MSEL3_0, PC6_FN),
+ PINMUX_DATA(DB5_MARK, P1MSEL2_0, PC5_FN),
+ PINMUX_DATA(DB4_MARK, P1MSEL2_0, PC4_FN),
+ PINMUX_DATA(DB3_MARK, P1MSEL2_0, PC3_FN),
+ PINMUX_DATA(DB2_MARK, P1MSEL2_0, PC2_FN),
+ PINMUX_DATA(DB1_MARK, P1MSEL2_0, PC1_FN),
+ PINMUX_DATA(DB0_MARK, P1MSEL2_0, PC0_FN),
+
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL3_1, PC7_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL3_1, PC6_FN),
+ PINMUX_DATA(ETH_RXD3_MARK, P1MSEL2_1, PC5_FN),
+ PINMUX_DATA(ETH_RXD2_MARK, P1MSEL2_1, PC4_FN),
+ PINMUX_DATA(ETH_RXD1_MARK, P1MSEL2_1, PC3_FN),
+ PINMUX_DATA(ETH_RXD0_MARK, P1MSEL2_1, PC2_FN),
+ PINMUX_DATA(ETH_RX_DV_MARK, P1MSEL2_1, PC1_FN),
+ PINMUX_DATA(ETH_RX_ER_MARK, P1MSEL2_1, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DCLKOUT_MARK, PD7_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PD6_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PD5_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PD4_FN),
+ PINMUX_DATA(DACK1_MARK, P1MSEL13_1, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(BACK_MARK, P1MSEL13_0, P1MSEL12_1, PD3_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL13_0, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(DACK0_MARK, P1MSEL14_1, PD2_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL14_0, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, P1MSEL10_0, P1MSEL9_1, PD1_FN),
+ PINMUX_DATA(BREQ_MARK, P1MSEL10_1, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(USB_OVC1_MARK, P1MSEL10_0, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, P1MSEL11_1, PD0_FN),
+ PINMUX_DATA(USB_OVC0_MARK, P1MSEL11_0, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(USB_PENC1_MARK, PE7_FN),
+ PINMUX_DATA(USB_PENC0_MARK, PE6_FN),
+
+ /* PF FN */
+ PINMUX_DATA(HAC1_SDOUT_MARK, P2MSEL15_0, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P2MSEL15_0, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P2MSEL15_0, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P2MSEL15_0, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P2MSEL13_0, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P2MSEL13_0, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P2MSEL13_0, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P2MSEL13_0, P2MSEL12_0, PF0_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P2MSEL15_0, P2MSEL14_1, PF7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P2MSEL15_0, P2MSEL14_1, PF6_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P2MSEL15_0, P2MSEL14_1, PF5_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P2MSEL15_0, P2MSEL14_1, PF4_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P2MSEL13_0, P2MSEL12_1, PF3_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P2MSEL13_0, P2MSEL12_1, PF2_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P2MSEL13_0, P2MSEL12_1, PF1_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P2MSEL13_0, P2MSEL12_1, PF0_FN),
+ PINMUX_DATA(SDIF1CMD_MARK, P2MSEL15_1, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(SDIF1CD_MARK, P2MSEL15_1, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(SDIF1WP_MARK, P2MSEL15_1, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(SDIF1CLK_MARK, P2MSEL15_1, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(SDIF1D3_MARK, P2MSEL13_1, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(SDIF1D2_MARK, P2MSEL13_1, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(SDIF1D1_MARK, P2MSEL13_1, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(SDIF1D0_MARK, P2MSEL13_1, P2MSEL12_0, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL8_0, PG7_FN),
+ PINMUX_DATA(SSI2_SDATA_MARK, P1MSEL8_1, PG7_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL7_0, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(SSI2_SCK_MARK, P1MSEL7_1, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL7_0, P1MSEL6_1, PG6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL5_0, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(SSI2_WS_MARK, P1MSEL5_1, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL5_0, P1MSEL4_1, PG5_FN),
+
+ /* PH FN */
+ PINMUX_DATA(DACK3_MARK, P2MSEL4_0, PH7_FN),
+ PINMUX_DATA(SDIF0CMD_MARK, P2MSEL4_1, PH7_FN),
+ PINMUX_DATA(DACK2_MARK, P2MSEL4_0, PH6_FN),
+ PINMUX_DATA(SDIF0CD_MARK, P2MSEL4_1, PH6_FN),
+ PINMUX_DATA(DREQ3_MARK, P2MSEL4_0, PH5_FN),
+ PINMUX_DATA(SDIF0WP_MARK, P2MSEL4_1, PH5_FN),
+ PINMUX_DATA(DREQ2_MARK, P2MSEL3_0, P2MSEL2_1, PH4_FN),
+ PINMUX_DATA(SDIF0CLK_MARK, P2MSEL3_1, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, P2MSEL3_0, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SDIF0D3_MARK, P2MSEL1_1, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P2MSEL1_0, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(IRL7_MARK, P2MSEL1_0, P2MSEL0_1, PH3_FN),
+ PINMUX_DATA(SDIF0D2_MARK, P2MSEL1_1, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P2MSEL1_0, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(IRL6_MARK, P2MSEL1_0, P2MSEL0_1, PH2_FN),
+ PINMUX_DATA(SDIF0D1_MARK, P2MSEL1_1, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P2MSEL1_0, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(IRL5_MARK, P2MSEL1_0, P2MSEL0_1, PH1_FN),
+ PINMUX_DATA(SDIF0D0_MARK, P2MSEL1_1, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P2MSEL1_0, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(IRL4_MARK, P2MSEL1_0, P2MSEL0_1, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_SCK_MARK, P2MSEL11_1, PJ7_FN),
+ PINMUX_DATA(FRB_MARK, P2MSEL11_0, PJ7_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, P2MSEL10_0, PJ6_FN),
+ PINMUX_DATA(IOIS16_MARK, P2MSEL10_1, PJ6_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, P2MSEL10_0, PJ5_FN),
+ PINMUX_DATA(CE2B_MARK, P2MSEL10_1, PJ5_FN),
+ PINMUX_DATA(DRAK3_MARK, P2MSEL7_0, PJ4_FN),
+ PINMUX_DATA(CE2A_MARK, P2MSEL7_1, PJ4_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P2MSEL9_0, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(DRAK2_MARK, P2MSEL9_0, P2MSEL8_1, PJ3_FN),
+ PINMUX_DATA(SSI3_WS_MARK, P2MSEL9_1, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(DRAK1_MARK, P2MSEL6_0, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(FSTATUS_MARK, P2MSEL6_0, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(SSI3_SDATA_MARK, P2MSEL6_1, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(DRAK0_MARK, P2MSEL6_0, P2MSEL5_1, PJ1_FN),
+ PINMUX_DATA(FSE_MARK, P2MSEL6_0, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_CDE, CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MAGIC, ETH_MAGIC_MARK),
+ PINMUX_GPIO(GPIO_FN_DISP, DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_LINK, ETH_LINK_MARK),
+ PINMUX_GPIO(GPIO_FN_DR5, DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_ER, ETH_TX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DR4, DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_EN, ETH_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_DR3, DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD3, ETH_TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DR2, DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD2, ETH_TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DR1, DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD1, ETH_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DR0, DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD0, ETH_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_VSYNC, VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_ODDF, ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_DG5, DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDIO, ETH_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_DG4, DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_CLK, ETH_RX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG3, DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDC, ETH_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_DG2, DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_COL, ETH_COL_MARK),
+ PINMUX_GPIO(GPIO_FN_DG1, DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_CLK, ETH_TX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG0, DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_CRS, ETH_CRS_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKIN, DCLKIN_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_HSYNC, HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_DB5, DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD3, ETH_RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DB4, DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD2, ETH_RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DB3, DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD1, ETH_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DB2, DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD0, ETH_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_DB1, DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_DV, ETH_RX_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_DB0, DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_ER, ETH_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKOUT, DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC1, USB_OVC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC0, USB_OVC0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC1, USB_PENC1_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC0, USB_PENC0_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CMD, SDIF1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CD, SDIF1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1WP, SDIF1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CLK, SDIF1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D3, SDIF1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D2, SDIF1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D1, SDIF1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D0, SDIF1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SDATA, SSI2_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SCK, SSI2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_WS, SSI2_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CMD, SDIF0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CD, SDIF0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0WP, SDIF0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CLK, SDIF0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D3, SDIF0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D2, SDIF0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D1, SDIF0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D0, SDIF0D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_WS, SSI3_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SDATA, SSI3_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSTATUS, FSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SCK, SSI3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) {
+ 0, 0,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1) {
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffcc0020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8) {
+ PE7_DATA, PE6_DATA,
+ 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7786_pinmux_info = {
+ .name = "sh7786_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRL4,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
new file mode 100644
index 000000000000..ccf6918b03c6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -0,0 +1,582 @@
+/*
+ * SH-X3 prototype CPU pinmux
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/shx3.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+
+ PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+
+ PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
+ PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+
+ PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+
+ PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN, PE5_FN, PE4_FN,
+ PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+
+ PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK,
+ D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+
+ BACK_MARK, BREQ_MARK,
+ WE3_MARK, WE2_MARK,
+ CS6_MARK, CS5_MARK, CS4_MARK,
+ CLKOUTENB_MARK,
+
+ DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK,
+ DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK,
+
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+
+ DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK,
+
+ SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK,
+ IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK,
+ TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK,
+ RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK,
+
+ CE2B_MARK, CE2A_MARK, IOIS16_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ IRQOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t shx3_pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D31_MARK, PA7_FN),
+ PINMUX_DATA(D30_MARK, PA6_FN),
+ PINMUX_DATA(D29_MARK, PA5_FN),
+ PINMUX_DATA(D28_MARK, PA4_FN),
+ PINMUX_DATA(D27_MARK, PA3_FN),
+ PINMUX_DATA(D26_MARK, PA2_FN),
+ PINMUX_DATA(D25_MARK, PA1_FN),
+ PINMUX_DATA(D24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D23_MARK, PB7_FN),
+ PINMUX_DATA(D22_MARK, PB6_FN),
+ PINMUX_DATA(D21_MARK, PB5_FN),
+ PINMUX_DATA(D20_MARK, PB4_FN),
+ PINMUX_DATA(D19_MARK, PB3_FN),
+ PINMUX_DATA(D18_MARK, PB2_FN),
+ PINMUX_DATA(D17_MARK, PB1_FN),
+ PINMUX_DATA(D16_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(BACK_MARK, PC7_FN),
+ PINMUX_DATA(BREQ_MARK, PC6_FN),
+ PINMUX_DATA(WE3_MARK, PC5_FN),
+ PINMUX_DATA(WE2_MARK, PC4_FN),
+ PINMUX_DATA(CS6_MARK, PC3_FN),
+ PINMUX_DATA(CS5_MARK, PC2_FN),
+ PINMUX_DATA(CS4_MARK, PC1_FN),
+ PINMUX_DATA(CLKOUTENB_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DACK3_MARK, PD7_FN),
+ PINMUX_DATA(DACK2_MARK, PD6_FN),
+ PINMUX_DATA(DACK1_MARK, PD5_FN),
+ PINMUX_DATA(DACK0_MARK, PD4_FN),
+ PINMUX_DATA(DREQ3_MARK, PD3_FN),
+ PINMUX_DATA(DREQ2_MARK, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(IRQ3_MARK, PE7_FN),
+ PINMUX_DATA(IRQ2_MARK, PE6_FN),
+ PINMUX_DATA(IRQ1_MARK, PE5_FN),
+ PINMUX_DATA(IRQ0_MARK, PE4_FN),
+ PINMUX_DATA(DRAK3_MARK, PE3_FN),
+ PINMUX_DATA(DRAK2_MARK, PE2_FN),
+ PINMUX_DATA(DRAK1_MARK, PE1_FN),
+ PINMUX_DATA(DRAK0_MARK, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(SCK3_MARK, PF7_FN),
+ PINMUX_DATA(SCK2_MARK, PF6_FN),
+ PINMUX_DATA(SCK1_MARK, PF5_FN),
+ PINMUX_DATA(SCK0_MARK, PF4_FN),
+ PINMUX_DATA(IRL3_MARK, PF3_FN),
+ PINMUX_DATA(IRL2_MARK, PF2_FN),
+ PINMUX_DATA(IRL1_MARK, PF1_FN),
+ PINMUX_DATA(IRL0_MARK, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(TXD3_MARK, PG7_FN),
+ PINMUX_DATA(TXD2_MARK, PG6_FN),
+ PINMUX_DATA(TXD1_MARK, PG5_FN),
+ PINMUX_DATA(TXD0_MARK, PG4_FN),
+ PINMUX_DATA(RXD3_MARK, PG3_FN),
+ PINMUX_DATA(RXD2_MARK, PG2_FN),
+ PINMUX_DATA(RXD1_MARK, PG1_FN),
+ PINMUX_DATA(RXD0_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(CE2B_MARK, PH5_FN),
+ PINMUX_DATA(CE2A_MARK, PH4_FN),
+ PINMUX_DATA(IOIS16_MARK, PH3_FN),
+ PINMUX_DATA(STATUS1_MARK, PH2_FN),
+ PINMUX_DATA(STATUS0_MARK, PH1_FN),
+ PINMUX_DATA(IRQOUT_MARK, PH0_FN),
+};
+
+static struct pinmux_gpio shx3_pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
+ },
+ { },
+};
+
+static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
+ },
+ { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
+ },
+ { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
+ },
+ { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
+ },
+ { },
+};
+
+struct sh_pfc_soc_info shx3_pinmux_info = {
+ .name = "shx3_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_STATUS0,
+ .gpios = shx3_pinmux_gpios,
+ .gpio_data = shx3_pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
+ .cfg_regs = shx3_pinmux_config_regs,
+ .data_regs = shx3_pinmux_data_regs,
+};
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 0646bf6e7889..11e0e1374d65 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -7,22 +7,23 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define DRV_NAME "pinctrl-sh_pfc"
-#define pr_fmt(fmt) DRV_NAME " " KBUILD_MODNAME ": " fmt
+#define DRV_NAME "sh-pfc"
+#define pr_fmt(fmt) KBUILD_MODNAME " pinctrl: " fmt
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/sh_pfc.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
struct sh_pfc_pinctrl {
struct pinctrl_dev *pctl;
@@ -37,8 +38,6 @@ struct sh_pfc_pinctrl {
spinlock_t lock;
};
-static struct sh_pfc_pinctrl *sh_pfc_pmx;
-
static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -116,7 +115,7 @@ static void sh_pfc_noop_disable(struct pinctrl_dev *pctldev, unsigned func,
{
}
-static inline int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
+static int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
{
if (sh_pfc_config_gpio(pfc, offset,
PINMUX_TYPE_FUNCTION,
@@ -140,7 +139,7 @@ static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
/*
* See if the present config needs to first be de-configured.
@@ -172,8 +171,8 @@ static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
GPIO_CFG_REQ) != 0)
goto err;
- pfc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- pfc->gpios[offset].flags |= new_type;
+ pfc->info->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
+ pfc->info->gpios[offset].flags |= new_type;
ret = 0;
@@ -195,7 +194,7 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
switch (pinmux_type) {
case PINMUX_TYPE_FUNCTION:
@@ -236,7 +235,7 @@ static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
@@ -270,7 +269,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
- *config = pfc->gpios[pin].flags & PINMUX_FLAG_TYPE;
+ *config = pfc->info->gpios[pin].flags & PINMUX_FLAG_TYPE;
return 0;
}
@@ -328,10 +327,8 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = {
.confops = &sh_pfc_pinconf_ops,
};
-static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc,
- struct sh_pfc_pinctrl *pmx,
- struct pinmux_gpio *gpio,
- unsigned offset)
+static void sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx,
+ struct pinmux_gpio *gpio, unsigned offset)
{
struct pinmux_data_reg *dummy;
unsigned long flags;
@@ -351,16 +348,15 @@ static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc,
}
/* pinmux ranges -> pinctrl pin descs */
-static int __devinit sh_pfc_map_gpios(struct sh_pfc *pfc,
- struct sh_pfc_pinctrl *pmx)
+static int sh_pfc_map_gpios(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
{
unsigned long flags;
int i;
- pmx->nr_pads = pfc->last_gpio - pfc->first_gpio + 1;
+ pmx->nr_pads = pfc->info->last_gpio - pfc->info->first_gpio + 1;
- pmx->pads = kmalloc(sizeof(struct pinctrl_pin_desc) * pmx->nr_pads,
- GFP_KERNEL);
+ pmx->pads = devm_kzalloc(pfc->dev, sizeof(*pmx->pads) * pmx->nr_pads,
+ GFP_KERNEL);
if (unlikely(!pmx->pads)) {
pmx->nr_pads = 0;
return -ENOMEM;
@@ -376,9 +372,9 @@ static int __devinit sh_pfc_map_gpios(struct sh_pfc *pfc,
*/
for (i = 0; i < pmx->nr_pads; i++) {
struct pinctrl_pin_desc *pin = pmx->pads + i;
- struct pinmux_gpio *gpio = pfc->gpios + i;
+ struct pinmux_gpio *gpio = pfc->info->gpios + i;
- pin->number = pfc->first_gpio + i;
+ pin->number = pfc->info->first_gpio + i;
pin->name = gpio->name;
/* XXX */
@@ -396,21 +392,20 @@ static int __devinit sh_pfc_map_gpios(struct sh_pfc *pfc,
return 0;
}
-static int __devinit sh_pfc_map_functions(struct sh_pfc *pfc,
- struct sh_pfc_pinctrl *pmx)
+static int sh_pfc_map_functions(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
{
unsigned long flags;
int i, fn;
- pmx->functions = kzalloc(pmx->nr_functions * sizeof(void *),
- GFP_KERNEL);
+ pmx->functions = devm_kzalloc(pfc->dev, pmx->nr_functions *
+ sizeof(*pmx->functions), GFP_KERNEL);
if (unlikely(!pmx->functions))
return -ENOMEM;
spin_lock_irqsave(&pmx->lock, flags);
for (i = fn = 0; i < pmx->nr_pads; i++) {
- struct pinmux_gpio *gpio = pfc->gpios + i;
+ struct pinmux_gpio *gpio = pfc->info->gpios + i;
if ((gpio->flags & PINMUX_FLAG_TYPE) == PINMUX_TYPE_FUNCTION)
pmx->functions[fn++] = gpio;
@@ -421,109 +416,48 @@ static int __devinit sh_pfc_map_functions(struct sh_pfc *pfc,
return 0;
}
-static int __devinit sh_pfc_pinctrl_probe(struct platform_device *pdev)
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
{
- struct sh_pfc *pfc;
+ struct sh_pfc_pinctrl *pmx;
int ret;
- if (unlikely(!sh_pfc_pmx))
- return -ENODEV;
+ pmx = devm_kzalloc(pfc->dev, sizeof(*pmx), GFP_KERNEL);
+ if (unlikely(!pmx))
+ return -ENOMEM;
+
+ spin_lock_init(&pmx->lock);
- pfc = sh_pfc_pmx->pfc;
+ pmx->pfc = pfc;
+ pfc->pinctrl = pmx;
- ret = sh_pfc_map_gpios(pfc, sh_pfc_pmx);
+ ret = sh_pfc_map_gpios(pfc, pmx);
if (unlikely(ret != 0))
return ret;
- ret = sh_pfc_map_functions(pfc, sh_pfc_pmx);
+ ret = sh_pfc_map_functions(pfc, pmx);
if (unlikely(ret != 0))
- goto free_pads;
-
- sh_pfc_pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, &pdev->dev,
- sh_pfc_pmx);
- if (IS_ERR(sh_pfc_pmx->pctl)) {
- ret = PTR_ERR(sh_pfc_pmx->pctl);
- goto free_functions;
- }
+ return ret;
- sh_pfc_gpio_range.npins = pfc->last_gpio - pfc->first_gpio + 1;
- sh_pfc_gpio_range.base = pfc->first_gpio;
- sh_pfc_gpio_range.pin_base = pfc->first_gpio;
+ pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, pfc->dev, pmx);
+ if (IS_ERR(pmx->pctl))
+ return PTR_ERR(pmx->pctl);
- pinctrl_add_gpio_range(sh_pfc_pmx->pctl, &sh_pfc_gpio_range);
+ sh_pfc_gpio_range.npins = pfc->info->last_gpio
+ - pfc->info->first_gpio + 1;
+ sh_pfc_gpio_range.base = pfc->info->first_gpio;
+ sh_pfc_gpio_range.pin_base = pfc->info->first_gpio;
- platform_set_drvdata(pdev, sh_pfc_pmx);
+ pinctrl_add_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
return 0;
-
-free_functions:
- kfree(sh_pfc_pmx->functions);
-free_pads:
- kfree(sh_pfc_pmx->pads);
- kfree(sh_pfc_pmx);
-
- return ret;
}
-static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev)
+int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc)
{
- struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev);
+ struct sh_pfc_pinctrl *pmx = pfc->pinctrl;
pinctrl_unregister(pmx->pctl);
- platform_set_drvdata(pdev, NULL);
-
- kfree(sh_pfc_pmx->functions);
- kfree(sh_pfc_pmx->pads);
- kfree(sh_pfc_pmx);
-
+ pfc->pinctrl = NULL;
return 0;
}
-
-static struct platform_driver sh_pfc_pinctrl_driver = {
- .probe = sh_pfc_pinctrl_probe,
- .remove = __devexit_p(sh_pfc_pinctrl_remove),
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static struct platform_device sh_pfc_pinctrl_device = {
- .name = DRV_NAME,
- .id = -1,
-};
-
-static int sh_pfc_pinctrl_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&sh_pfc_pinctrl_driver);
- if (likely(!rc)) {
- rc = platform_device_register(&sh_pfc_pinctrl_device);
- if (unlikely(rc))
- platform_driver_unregister(&sh_pfc_pinctrl_driver);
- }
-
- return rc;
-}
-
-int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
-{
- sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
- if (unlikely(!sh_pfc_pmx))
- return -ENOMEM;
-
- spin_lock_init(&sh_pfc_pmx->lock);
-
- sh_pfc_pmx->pfc = pfc;
-
- return sh_pfc_pinctrl_init();
-}
-EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
-
-static void __exit sh_pfc_pinctrl_exit(void)
-{
- platform_driver_unregister(&sh_pfc_pinctrl_driver);
-}
-module_exit(sh_pfc_pinctrl_exit);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
new file mode 100644
index 000000000000..13049c4c8d30
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -0,0 +1,195 @@
+/*
+ * SuperH Pin Function Controller Support
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __SH_PFC_H
+#define __SH_PFC_H
+
+#include <linux/stringify.h>
+#include <asm-generic/gpio.h>
+
+typedef unsigned short pinmux_enum_t;
+typedef unsigned short pinmux_flag_t;
+
+enum {
+ PINMUX_TYPE_NONE,
+
+ PINMUX_TYPE_FUNCTION,
+ PINMUX_TYPE_GPIO,
+ PINMUX_TYPE_OUTPUT,
+ PINMUX_TYPE_INPUT,
+ PINMUX_TYPE_INPUT_PULLUP,
+ PINMUX_TYPE_INPUT_PULLDOWN,
+
+ PINMUX_FLAG_TYPE, /* must be last */
+};
+
+#define PINMUX_FLAG_DBIT_SHIFT 5
+#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
+#define PINMUX_FLAG_DREG_SHIFT 10
+#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
+
+struct pinmux_gpio {
+ pinmux_enum_t enum_id;
+ pinmux_flag_t flags;
+ const char *name;
+};
+
+#define PINMUX_GPIO(gpio, data_or_mark) \
+ [gpio] = { .name = __stringify(gpio), .enum_id = data_or_mark, .flags = PINMUX_TYPE_NONE }
+
+#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
+
+struct pinmux_cfg_reg {
+ unsigned long reg, reg_width, field_width;
+ unsigned long *cnt;
+ pinmux_enum_t *enum_ids;
+ unsigned long *var_field_width;
+};
+
+#define PINMUX_CFG_REG(name, r, r_width, f_width) \
+ .reg = r, .reg_width = r_width, .field_width = f_width, \
+ .cnt = (unsigned long [r_width / f_width]) {}, \
+ .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)])
+
+#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
+ .reg = r, .reg_width = r_width, \
+ .cnt = (unsigned long [r_width]) {}, \
+ .var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \
+ .enum_ids = (pinmux_enum_t [])
+
+struct pinmux_data_reg {
+ unsigned long reg, reg_width, reg_shadow;
+ pinmux_enum_t *enum_ids;
+ void __iomem *mapped_reg;
+};
+
+#define PINMUX_DATA_REG(name, r, r_width) \
+ .reg = r, .reg_width = r_width, \
+ .enum_ids = (pinmux_enum_t [r_width]) \
+
+struct pinmux_irq {
+ int irq;
+ pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_IRQ(irq_nr, ids...) \
+ { .irq = irq_nr, .enum_ids = (pinmux_enum_t []) { ids, 0 } } \
+
+struct pinmux_range {
+ pinmux_enum_t begin;
+ pinmux_enum_t end;
+ pinmux_enum_t force;
+};
+
+struct sh_pfc_soc_info {
+ char *name;
+ pinmux_enum_t reserved_id;
+ struct pinmux_range data;
+ struct pinmux_range input;
+ struct pinmux_range input_pd;
+ struct pinmux_range input_pu;
+ struct pinmux_range output;
+ struct pinmux_range mark;
+ struct pinmux_range function;
+
+ unsigned first_gpio, last_gpio;
+
+ struct pinmux_gpio *gpios;
+ struct pinmux_cfg_reg *cfg_regs;
+ struct pinmux_data_reg *data_regs;
+
+ pinmux_enum_t *gpio_data;
+ unsigned int gpio_data_size;
+
+ struct pinmux_irq *gpio_irq;
+ unsigned int gpio_irq_size;
+
+ unsigned long unlock_reg;
+};
+
+enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
+
+/* helper macro for port */
+#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
+
+#define PORT_10(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
+
+#define PORT_90(fn, pfx, sfx) \
+ PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
+ PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
+ PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \
+ PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \
+ PORT_10(fn, pfx##9, sfx)
+
+#define _PORT_ALL(pfx, sfx) pfx##_##sfx
+#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
+#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
+#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
+#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
+
+/* helper macro for pinmux_enum_t */
+#define PORT_DATA_I(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
+
+#define PORT_DATA_I_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_I_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_I_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+#define PORT_DATA_O(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
+
+#define PORT_DATA_IO(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN)
+
+#define PORT_DATA_IO_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_IO_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_IO_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+/* helper macro for top 4 bits in PORTnCR */
+#define _PCRH(in, in_pd, in_pu, out) \
+ 0, (out), (in), 0, \
+ 0, 0, 0, 0, \
+ 0, 0, (in_pd), 0, \
+ 0, 0, (in_pu), 0
+
+#define PORTCR(nr, reg) \
+ { \
+ PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+ _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
+ PORT##nr##_IN_PU, PORT##nr##_OUT), \
+ PORT##nr##_FN0, PORT##nr##_FN1, \
+ PORT##nr##_FN2, PORT##nr##_FN3, \
+ PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7 } \
+ }
+
+#endif /* __SH_PFC_H */
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 4c045053bbdd..295b349a05cf 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -451,8 +451,7 @@ int spear310_o2p(int offset)
return offset + 2;
}
-static int __devinit plgpio_probe_dt(struct platform_device *pdev,
- struct plgpio *plgpio)
+static int plgpio_probe_dt(struct platform_device *pdev, struct plgpio *plgpio)
{
struct device_node *np = pdev->dev.of_node;
int ret = -EINVAL;
@@ -522,7 +521,7 @@ static int __devinit plgpio_probe_dt(struct platform_device *pdev,
end:
return ret;
}
-static int __devinit plgpio_probe(struct platform_device *pdev)
+static int plgpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct plgpio *plgpio;
@@ -541,11 +540,9 @@ static int __devinit plgpio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- plgpio->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!plgpio->base) {
- dev_err(&pdev->dev, "request and ioremap fail\n");
- return -ENOMEM;
- }
+ plgpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(plgpio->base))
+ return PTR_ERR(plgpio->base);
ret = plgpio_probe_dt(pdev, plgpio);
if (ret) {
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 922c057521a1..6a7dae70db08 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -82,9 +82,8 @@ static int set_mode(struct spear_pmx *pmx, int mode)
return 0;
}
-void __devinit
-pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
- unsigned count, u16 reg)
+void pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
+ unsigned count, u16 reg)
{
int i, j;
@@ -93,7 +92,7 @@ pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
gpio_pingroup[i].muxregs[j].reg = reg;
}
-void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg)
+void pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg)
{
struct spear_pingroup *pgroup;
struct spear_modemux *modemux;
@@ -358,8 +357,8 @@ static struct pinctrl_desc spear_pinctrl_desc = {
.owner = THIS_MODULE,
};
-int __devinit spear_pinctrl_probe(struct platform_device *pdev,
- struct spear_pinctrl_machdata *machdata)
+int spear_pinctrl_probe(struct platform_device *pdev,
+ struct spear_pinctrl_machdata *machdata)
{
struct device_node *np = pdev->dev.of_node;
struct resource *res;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 1be46ecc6d91..dc8bf85ecb2a 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -192,12 +192,11 @@ static inline void pmx_writel(struct spear_pmx *pmx, u32 val, u32 reg)
writel_relaxed(val, pmx->vbase + reg);
}
-void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
-void __devinit
-pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
- unsigned count, u16 reg);
-int __devinit spear_pinctrl_probe(struct platform_device *pdev,
- struct spear_pinctrl_machdata *machdata);
+void pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
+void pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
+ unsigned count, u16 reg);
+int spear_pinctrl_probe(struct platform_device *pdev,
+ struct spear_pinctrl_machdata *machdata);
int spear_pinctrl_remove(struct platform_device *pdev);
#define SPEAR_PIN_0_TO_101 \
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index e40d785a3fc2..1a8bbfec60ca 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2699,7 +2699,7 @@ static struct of_device_id spear1310_pinctrl_of_match[] = {
{},
};
-static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+static int spear1310_pinctrl_probe(struct platform_device *pdev)
{
return spear_pinctrl_probe(pdev, &spear1310_machdata);
}
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index 8deaaff3156c..873966e2b99f 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2015,7 +2015,7 @@ static struct of_device_id spear1340_pinctrl_of_match[] = {
{},
};
-static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+static int spear1340_pinctrl_probe(struct platform_device *pdev)
{
return spear_pinctrl_probe(pdev, &spear1340_machdata);
}
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index f48e466e605a..4777c0d0e730 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -653,7 +653,7 @@ static struct of_device_id spear300_pinctrl_of_match[] = {
{},
};
-static int __devinit spear300_pinctrl_probe(struct platform_device *pdev)
+static int spear300_pinctrl_probe(struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 5b954c19a6d2..06c7e6f1c7f2 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -378,7 +378,7 @@ static struct of_device_id spear310_pinctrl_of_match[] = {
{},
};
-static int __devinit spear310_pinctrl_probe(struct platform_device *pdev)
+static int spear310_pinctrl_probe(struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index e9a5e6d39242..b8e290a8c8c9 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3417,7 +3417,7 @@ static struct of_device_id spear320_pinctrl_of_match[] = {
{},
};
-static int __devinit spear320_pinctrl_probe(struct platform_device *pdev)
+static int spear320_pinctrl_probe(struct platform_device *pdev)
{
int ret;
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 8390dca2b4e1..69616aeaa966 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,3 +1,7 @@
if X86
source "drivers/platform/x86/Kconfig"
endif
+if GOLDFISH
+source "drivers/platform/goldfish/Kconfig"
+endif
+
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index b17c16ce54ad..8a44a4cd6d1e 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_OLPC) += olpc/
+obj-$(CONFIG_GOLDFISH) += goldfish/
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
new file mode 100644
index 000000000000..635ef25cc722
--- /dev/null
+++ b/drivers/platform/goldfish/Kconfig
@@ -0,0 +1,5 @@
+config GOLDFISH_PIPE
+ tristate "Goldfish virtual device for QEMU pipes"
+ ---help---
+ This is a virtual device to drive the QEMU pipe interface used by
+ the Goldfish Android Virtual Device.
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
new file mode 100644
index 000000000000..a0022395eee9
--- /dev/null
+++ b/drivers/platform/goldfish/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Goldfish platform specific drivers
+#
+obj-$(CONFIG_GOLDFISH) += pdev_bus.o
+obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
new file mode 100644
index 000000000000..4f5aa831f549
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of a special device driver
+ * that intends to provide a *very* fast communication channel between the
+ * guest system and the QEMU emulator.
+ *
+ * Usage from the guest is simply the following (error handling simplified):
+ *
+ * int fd = open("/dev/qemu_pipe",O_RDWR);
+ * .... write() or read() through the pipe.
+ *
+ * This driver doesn't deal with the exact protocol used during the session.
+ * It is intended to be as simple as something like:
+ *
+ * // do this _just_ after opening the fd to connect to a specific
+ * // emulator service.
+ * const char* msg = "<pipename>";
+ * if (write(fd, msg, strlen(msg)+1) < 0) {
+ * ... could not connect to <pipename> service
+ * close(fd);
+ * }
+ *
+ * // after this, simply read() and write() to communicate with the
+ * // service. Exact protocol details left as an exercise to the reader.
+ *
+ * This driver is very fast because it doesn't copy any data through
+ * intermediate buffers, since the emulator is capable of translating
+ * guest user addresses into host ones.
+ *
+ * Note that we must however ensure that each user page involved in the
+ * exchange is properly mapped during a transfer.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * IMPORTANT: The following constants must match the ones used and defined
+ * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
+ */
+
+/* pipe device registers */
+#define PIPE_REG_COMMAND 0x00 /* write: value = command */
+#define PIPE_REG_STATUS 0x04 /* read */
+#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
+#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
+#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
+#define PIPE_REG_WAKES 0x14 /* read: wake flags */
+#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
+#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
+#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
+
+/* list of commands for PIPE_REG_COMMAND */
+#define CMD_OPEN 1 /* open new channel */
+#define CMD_CLOSE 2 /* close channel (from guest) */
+#define CMD_POLL 3 /* poll read/write status */
+
+/* List of bitflags returned in status of CMD_POLL command */
+#define PIPE_POLL_IN (1 << 0)
+#define PIPE_POLL_OUT (1 << 1)
+#define PIPE_POLL_HUP (1 << 2)
+
+/* The following commands are related to write operations */
+#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
+#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
+ is possible */
+
+/* The following commands are related to read operations, they must be
+ * listed in the same order than the corresponding write ones, since we
+ * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
+ * in goldfish_pipe_read_write() below.
+ */
+#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
+#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
+ * is possible */
+
+/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
+#define PIPE_ERROR_INVAL -1
+#define PIPE_ERROR_AGAIN -2
+#define PIPE_ERROR_NOMEM -3
+#define PIPE_ERROR_IO -4
+
+/* Bit-flags used to signal events from the emulator */
+#define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
+#define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
+#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
+
+struct access_params {
+ u32 channel;
+ u32 size;
+ u32 address;
+ u32 cmd;
+ u32 result;
+ /* reserved for future extension */
+ u32 flags;
+};
+
+/* The global driver data. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+ spinlock_t lock;
+ unsigned char __iomem *base;
+ struct access_params *aps;
+ int irq;
+};
+
+static struct goldfish_pipe_dev pipe_dev[1];
+
+/* This data type models a given pipe instance */
+struct goldfish_pipe {
+ struct goldfish_pipe_dev *dev;
+ struct mutex lock;
+ unsigned long flags;
+ wait_queue_head_t wake_queue;
+};
+
+
+/* Bit flags for the 'flags' field */
+enum {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+
+static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
+{
+ unsigned long flags;
+ u32 status;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ writel(cmd, dev->base + PIPE_REG_COMMAND);
+ status = readl(dev->base + PIPE_REG_STATUS);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return status;
+}
+
+static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
+{
+ unsigned long flags;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ writel(cmd, dev->base + PIPE_REG_COMMAND);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* This function converts an error code returned by the emulator through
+ * the PIPE_REG_STATUS i/o register into a valid negative errno value.
+ */
+static int goldfish_pipe_error_convert(int status)
+{
+ switch (status) {
+ case PIPE_ERROR_AGAIN:
+ return -EAGAIN;
+ case PIPE_ERROR_NOMEM:
+ return -ENOMEM;
+ case PIPE_ERROR_IO:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Notice: QEMU will return 0 for un-known register access, indicating
+ * param_acess is supported or not
+ */
+static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
+ struct access_params *aps)
+{
+ u32 aph, apl;
+ u64 paddr;
+ aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
+ apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+
+ paddr = ((u64)aph << 32) | apl;
+ if (paddr != (__pa(aps)))
+ return 0;
+ return 1;
+}
+
+/* 0 on success */
+static int setup_access_params_addr(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
+{
+ u64 paddr;
+ struct access_params *aps;
+
+ aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
+ if (!aps)
+ return -1;
+
+ /* FIXME */
+ paddr = __pa(aps);
+ writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
+ writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+
+ if (valid_batchbuffer_addr(dev, aps)) {
+ dev->aps = aps;
+ return 0;
+ } else
+ return -1;
+}
+
+/* A value that will not be set by qemu emulator */
+#define INITIAL_BATCH_RESULT (0xdeadbeaf)
+static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
+ unsigned long address, unsigned long avail,
+ struct goldfish_pipe *pipe, int *status)
+{
+ struct access_params *aps = dev->aps;
+
+ if (aps == NULL)
+ return -1;
+
+ aps->result = INITIAL_BATCH_RESULT;
+ aps->channel = (unsigned long)pipe;
+ aps->size = avail;
+ aps->address = address;
+ aps->cmd = cmd;
+ writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
+ /*
+ * If the aps->result has not changed, that means
+ * that the batch command failed
+ */
+ if (aps->result == INITIAL_BATCH_RESULT)
+ return -1;
+ *status = aps->result;
+ return 0;
+}
+
+/* This function is used for both reading from and writing to a given
+ * pipe.
+ */
+static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
+ size_t bufflen, int is_write)
+{
+ unsigned long irq_flags;
+ struct goldfish_pipe *pipe = filp->private_data;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+ const int cmd_offset = is_write ? 0
+ : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
+ unsigned long address, address_end;
+ int ret = 0;
+
+ /* If the emulator already closed the pipe, no need to go further */
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+
+ /* Null reads or writes succeeds */
+ if (unlikely(bufflen) == 0)
+ return 0;
+
+ /* Check the buffer range for access */
+ if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
+ buffer, bufflen))
+ return -EFAULT;
+
+ /* Serialize access to the pipe */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ address = (unsigned long)(void *)buffer;
+ address_end = address + bufflen;
+
+ while (address < address_end) {
+ unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ unsigned long next = page_end < address_end ? page_end
+ : address_end;
+ unsigned long avail = next - address;
+ int status, wakeBit;
+
+ /* Ensure that the corresponding page is properly mapped */
+ /* FIXME: this isn't safe or sufficient - use get_user_pages */
+ if (is_write) {
+ char c;
+ /* Ensure that the page is mapped and readable */
+ if (__get_user(c, (char __user *)address)) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ } else {
+ /* Ensure that the page is mapped and writable */
+ if (__put_user(0, (char __user *)address)) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ }
+
+ /* Now, try to transfer the bytes in the current page */
+ spin_lock_irqsave(&dev->lock, irq_flags);
+ if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
+ address, avail, pipe, &status)) {
+ writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ writel(avail, dev->base + PIPE_REG_SIZE);
+ writel(address, dev->base + PIPE_REG_ADDRESS);
+ writel(CMD_WRITE_BUFFER + cmd_offset,
+ dev->base + PIPE_REG_COMMAND);
+ status = readl(dev->base + PIPE_REG_STATUS);
+ }
+ spin_unlock_irqrestore(&dev->lock, irq_flags);
+
+ if (status > 0) { /* Correct transfer */
+ ret += status;
+ address += status;
+ continue;
+ }
+
+ if (status == 0) /* EOF */
+ break;
+
+ /* An error occured. If we already transfered stuff, just
+ * return with its count. We expect the next call to return
+ * an error code */
+ if (ret > 0)
+ break;
+
+ /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * non-blocking mode, just return the error code.
+ */
+ if (status != PIPE_ERROR_AGAIN ||
+ (filp->f_flags & O_NONBLOCK) != 0) {
+ ret = goldfish_pipe_error_convert(status);
+ break;
+ }
+
+ /* We will have to wait until more data/space is available.
+ * First, mark the pipe as waiting for a specific wake signal.
+ */
+ wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+ set_bit(wakeBit, &pipe->flags);
+
+ /* Tell the emulator we're going to wait for a wake event */
+ goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+
+ /* Unlock the pipe, then wait for the wake signal */
+ mutex_unlock(&pipe->lock);
+
+ while (test_bit(wakeBit, &pipe->flags)) {
+ if (wait_event_interruptible(
+ pipe->wake_queue,
+ !test_bit(wakeBit, &pipe->flags)))
+ return -ERESTARTSYS;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+ }
+
+ /* Try to re-acquire the lock */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ /* Try the transfer again */
+ continue;
+ }
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
+ size_t bufflen, loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
+}
+
+static ssize_t goldfish_pipe_write(struct file *filp,
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, (char __user *)buffer,
+ bufflen, 1);
+}
+
+
+static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ unsigned int mask = 0;
+ int status;
+
+ mutex_lock(&pipe->lock);
+
+ poll_wait(filp, &pipe->wake_queue, wait);
+
+ status = goldfish_cmd_status(pipe, CMD_POLL);
+
+ mutex_unlock(&pipe->lock);
+
+ if (status & PIPE_POLL_IN)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (status & PIPE_POLL_OUT)
+ mask |= POLLOUT | POLLWRNORM;
+
+ if (status & PIPE_POLL_HUP)
+ mask |= POLLHUP;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ mask |= POLLERR;
+
+ return mask;
+}
+
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_pipe_dev *dev = dev_id;
+ unsigned long irq_flags;
+ int count = 0;
+
+ /* We're going to read from the emulator a list of (channel,flags)
+ * pairs corresponding to the wake events that occured on each
+ * blocked pipe (i.e. channel).
+ */
+ spin_lock_irqsave(&dev->lock, irq_flags);
+ for (;;) {
+ /* First read the channel, 0 means the end of the list */
+ struct goldfish_pipe *pipe;
+ unsigned long wakes;
+ unsigned long channel = readl(dev->base + PIPE_REG_CHANNEL);
+
+ if (channel == 0)
+ break;
+
+ /* Convert channel to struct pipe pointer + read wake flags */
+ wakes = readl(dev->base + PIPE_REG_WAKES);
+ pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
+
+ /* Did the emulator just closed a pipe? */
+ if (wakes & PIPE_WAKE_CLOSED) {
+ set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
+ wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
+ }
+ if (wakes & PIPE_WAKE_READ)
+ clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+ if (wakes & PIPE_WAKE_WRITE)
+ clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
+
+ wake_up_interruptible(&pipe->wake_queue);
+ count++;
+ }
+ spin_unlock_irqrestore(&dev->lock, irq_flags);
+
+ return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/**
+ * goldfish_pipe_open - open a channel to the AVD
+ * @inode: inode of device
+ * @file: file struct of opener
+ *
+ * Create a new pipe link between the emulator and the use application.
+ * Each new request produces a new pipe.
+ *
+ * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
+ * right now so this is fine. A move to 64bit will need this addressing
+ */
+static int goldfish_pipe_open(struct inode *inode, struct file *file)
+{
+ struct goldfish_pipe *pipe;
+ struct goldfish_pipe_dev *dev = pipe_dev;
+ int32_t status;
+
+ /* Allocate new pipe kernel object */
+ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ if (pipe == NULL)
+ return -ENOMEM;
+
+ pipe->dev = dev;
+ mutex_init(&pipe->lock);
+ init_waitqueue_head(&pipe->wake_queue);
+
+ /*
+ * Now, tell the emulator we're opening a new pipe. We use the
+ * pipe object's address as the channel identifier for simplicity.
+ */
+
+ status = goldfish_cmd_status(pipe, CMD_OPEN);
+ if (status < 0) {
+ kfree(pipe);
+ return status;
+ }
+
+ /* All is done, save the pipe into the file's private data field */
+ file->private_data = pipe;
+ return 0;
+}
+
+static int goldfish_pipe_release(struct inode *inode, struct file *filp)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+
+ /* The guest is closing the channel, so tell the emulator right now */
+ goldfish_cmd(pipe, CMD_CLOSE);
+ kfree(pipe);
+ filp->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations goldfish_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = goldfish_pipe_read,
+ .write = goldfish_pipe_write,
+ .poll = goldfish_pipe_poll,
+ .open = goldfish_pipe_open,
+ .release = goldfish_pipe_release,
+};
+
+static struct miscdevice goldfish_pipe_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "goldfish_pipe",
+ .fops = &goldfish_pipe_fops,
+};
+
+static int goldfish_pipe_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *r;
+ struct goldfish_pipe_dev *dev = pipe_dev;
+
+ /* not thread safe, but this should not happen */
+ WARN_ON(dev->base != NULL);
+
+ spin_lock_init(&dev->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL || resource_size(r) < PAGE_SIZE) {
+ dev_err(&pdev->dev, "can't allocate i/o page\n");
+ return -EINVAL;
+ }
+ dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (dev->base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -EINVAL;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+ dev->irq = r->start;
+
+ err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
+ IRQF_SHARED, "goldfish_pipe", dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate IRQ\n");
+ goto error;
+ }
+
+ err = misc_register(&goldfish_pipe_device);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto error;
+ }
+ setup_access_params_addr(pdev, dev);
+ return 0;
+
+error:
+ dev->base = NULL;
+ return err;
+}
+
+static int goldfish_pipe_remove(struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev *dev = pipe_dev;
+ misc_deregister(&goldfish_pipe_device);
+ dev->base = NULL;
+ return 0;
+}
+
+static struct platform_driver goldfish_pipe = {
+ .probe = goldfish_pipe_probe,
+ .remove = goldfish_pipe_remove,
+ .driver = {
+ .name = "goldfish_pipe"
+ }
+};
+
+module_platform_driver(goldfish_pipe);
+MODULE_AUTHOR("David Turner <digit@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
new file mode 100644
index 000000000000..92cc4cfafde5
--- /dev/null
+++ b/drivers/platform/goldfish/pdev_bus.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2011 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#define PDEV_BUS_OP_DONE (0x00)
+#define PDEV_BUS_OP_REMOVE_DEV (0x04)
+#define PDEV_BUS_OP_ADD_DEV (0x08)
+
+#define PDEV_BUS_OP_INIT (0x00)
+
+#define PDEV_BUS_OP (0x00)
+#define PDEV_BUS_GET_NAME (0x04)
+#define PDEV_BUS_NAME_LEN (0x08)
+#define PDEV_BUS_ID (0x0c)
+#define PDEV_BUS_IO_BASE (0x10)
+#define PDEV_BUS_IO_SIZE (0x14)
+#define PDEV_BUS_IRQ (0x18)
+#define PDEV_BUS_IRQ_COUNT (0x1c)
+
+struct pdev_bus_dev {
+ struct list_head list;
+ struct platform_device pdev;
+ struct resource resources[0];
+};
+
+static void goldfish_pdev_worker(struct work_struct *work);
+
+static void __iomem *pdev_bus_base;
+static unsigned long pdev_bus_addr;
+static unsigned long pdev_bus_len;
+static u32 pdev_bus_irq;
+static LIST_HEAD(pdev_bus_new_devices);
+static LIST_HEAD(pdev_bus_registered_devices);
+static LIST_HEAD(pdev_bus_removed_devices);
+static DECLARE_WORK(pdev_bus_worker, goldfish_pdev_worker);
+
+
+static void goldfish_pdev_worker(struct work_struct *work)
+{
+ int ret;
+ struct pdev_bus_dev *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &pdev_bus_removed_devices, list) {
+ list_del(&pos->list);
+ platform_device_unregister(&pos->pdev);
+ kfree(pos);
+ }
+ list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
+ list_del(&pos->list);
+ ret = platform_device_register(&pos->pdev);
+ if (ret)
+ pr_err("goldfish_pdev_worker failed to register device, %s\n",
+ pos->pdev.name);
+ list_add_tail(&pos->list, &pdev_bus_registered_devices);
+ }
+}
+
+static void goldfish_pdev_remove(void)
+{
+ struct pdev_bus_dev *pos, *n;
+ u32 base;
+
+ base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
+
+ list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
+ if (pos->resources[0].start == base) {
+ list_del(&pos->list);
+ kfree(pos);
+ return;
+ }
+ }
+ list_for_each_entry_safe(pos, n, &pdev_bus_registered_devices, list) {
+ if (pos->resources[0].start == base) {
+ list_del(&pos->list);
+ list_add_tail(&pos->list, &pdev_bus_removed_devices);
+ schedule_work(&pdev_bus_worker);
+ return;
+ }
+ };
+ pr_err("goldfish_pdev_remove could not find device at %x\n", base);
+}
+
+static int goldfish_new_pdev(void)
+{
+ struct pdev_bus_dev *dev;
+ u32 name_len;
+ u32 irq = -1, irq_count;
+ int resource_count = 2;
+ u32 base;
+ char *name;
+
+ base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
+
+ irq_count = readl(pdev_bus_base + PDEV_BUS_IRQ_COUNT);
+ name_len = readl(pdev_bus_base + PDEV_BUS_NAME_LEN);
+ if (irq_count)
+ resource_count++;
+
+ dev = kzalloc(sizeof(*dev) +
+ sizeof(struct resource) * resource_count +
+ name_len + 1 + sizeof(*dev->pdev.dev.dma_mask), GFP_ATOMIC);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->pdev.num_resources = resource_count;
+ dev->pdev.resource = (struct resource *)(dev + 1);
+ dev->pdev.name = name = (char *)(dev->pdev.resource + resource_count);
+ dev->pdev.dev.coherent_dma_mask = ~0;
+ dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1);
+ *dev->pdev.dev.dma_mask = ~0;
+
+ writel((unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME);
+ name[name_len] = '\0';
+ dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID);
+ dev->pdev.resource[0].start = base;
+ dev->pdev.resource[0].end = base +
+ readl(pdev_bus_base + PDEV_BUS_IO_SIZE) - 1;
+ dev->pdev.resource[0].flags = IORESOURCE_MEM;
+ if (irq_count) {
+ irq = readl(pdev_bus_base + PDEV_BUS_IRQ);
+ dev->pdev.resource[1].start = irq;
+ dev->pdev.resource[1].end = irq + irq_count - 1;
+ dev->pdev.resource[1].flags = IORESOURCE_IRQ;
+ }
+
+ pr_debug("goldfish_new_pdev %s at %x irq %d\n", name, base, irq);
+ list_add_tail(&dev->list, &pdev_bus_new_devices);
+ schedule_work(&pdev_bus_worker);
+
+ return 0;
+}
+
+static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t ret = IRQ_NONE;
+ while (1) {
+ u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
+ switch (op) {
+ case PDEV_BUS_OP_DONE:
+ return IRQ_NONE;
+
+ case PDEV_BUS_OP_REMOVE_DEV:
+ goldfish_pdev_remove();
+ break;
+
+ case PDEV_BUS_OP_ADD_DEV:
+ goldfish_new_pdev();
+ break;
+ }
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int goldfish_pdev_bus_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL)
+ return -EINVAL;
+
+ pdev_bus_addr = r->start;
+ pdev_bus_len = resource_size(r);
+
+ if (request_mem_region(pdev_bus_addr, pdev_bus_len, "goldfish")) {
+ dev_err(&pdev->dev, "unable to reserve Goldfish MMIO.\n");
+ return -EBUSY;
+ }
+
+ pdev_bus_base = ioremap(pdev_bus_addr, pdev_bus_len);
+ if (pdev_bus_base == NULL) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "unable to map Goldfish MMIO.\n");
+ goto free_resources;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto free_map;
+ }
+
+ pdev_bus_irq = r->start;
+
+ ret = request_irq(pdev_bus_irq, goldfish_pdev_bus_interrupt,
+ IRQF_SHARED, "goldfish_pdev_bus", pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request Goldfish IRQ\n");
+ goto free_map;
+ }
+
+ writel(PDEV_BUS_OP_INIT, pdev_bus_base + PDEV_BUS_OP);
+ return 0;
+
+free_map:
+ iounmap(pdev_bus_base);
+free_resources:
+ release_mem_region(pdev_bus_addr, pdev_bus_len);
+ return ret;
+}
+
+static int goldfish_pdev_bus_remove(struct platform_device *pdev)
+{
+ iounmap(pdev_bus_base);
+ free_irq(pdev_bus_irq, pdev);
+ release_mem_region(pdev_bus_addr, pdev_bus_len);
+ return 0;
+}
+
+static struct platform_driver goldfish_pdev_bus_driver = {
+ .probe = goldfish_pdev_bus_probe,
+ .remove = goldfish_pdev_bus_remove,
+ .driver = {
+ .name = "goldfish_pdev_bus"
+ }
+};
+
+module_platform_driver(goldfish_pdev_bus_driver);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c86bae828c28..7ab0b2fba503 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -80,10 +80,9 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
config DELL_LAPTOP
- tristate "Dell Laptop Extras (EXPERIMENTAL)"
+ tristate "Dell Laptop Extras"
depends on X86
depends on DCDBAS
- depends on EXPERIMENTAL
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL || RFKILL = n
depends on SERIO_I8042
@@ -171,9 +170,8 @@ config AMILO_RFKILL
laptops.
config TC1100_WMI
- tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
+ tristate "HP Compaq TC1100 Tablet WMI Extras"
depends on !X86_64
- depends on EXPERIMENTAL
depends on ACPI
depends on ACPI_WMI
---help---
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 934d861a3235..afed7018a2b5 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -125,8 +125,11 @@ static const struct key_entry acer_wmi_keymap[] = {
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
{KE_IGNORE, 0x81, {KEY_SLEEP} },
- {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+ {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+ {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+ {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} },
{KE_END, 0}
};
@@ -147,6 +150,7 @@ struct event_return_value {
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
+#define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */
struct lm_input_params {
u8 function_num; /* Function Number */
@@ -335,7 +339,7 @@ static struct quirk_entry quirk_lenovo_ideapad_s205 = {
};
/* The Aspire One has a dummy ACPI-WMI interface - disable it */
-static struct dmi_system_id __devinitdata acer_blacklist[] = {
+static struct dmi_system_id acer_blacklist[] = {
{
.ident = "Acer Aspire One (SSD)",
.matches = {
@@ -875,7 +879,7 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
- u32 tmp;
+ u32 tmp = 0;
acpi_status status;
status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
@@ -884,14 +888,14 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
return status;
obj = (union acpi_object *) result.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- tmp = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- tmp = (u32) obj->integer.value;
- } else {
- tmp = 0;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
+ }
}
if (out)
@@ -1193,12 +1197,14 @@ static acpi_status WMID_set_capabilities(void)
return status;
obj = (union acpi_object *) out.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- devices = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- devices = (u32) obj->integer.value;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ }
} else {
kfree(out.pointer);
return AE_ERROR;
@@ -1330,7 +1336,7 @@ static struct led_classdev mail_led = {
.brightness_set = mail_led_set,
};
-static int __devinit acer_led_init(struct device *dev)
+static int acer_led_init(struct device *dev)
{
return led_classdev_register(dev, &mail_led);
}
@@ -1372,7 +1378,7 @@ static const struct backlight_ops acer_bl_ops = {
.update_status = update_bl_status,
};
-static int __devinit acer_backlight_init(struct device *dev)
+static int acer_backlight_init(struct device *dev)
{
struct backlight_properties props;
struct backlight_device *bd;
@@ -1676,6 +1682,7 @@ static void acer_wmi_notify(u32 value, void *context)
acpi_status status;
u16 device_state;
const struct key_entry *key;
+ u32 scancode;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1712,6 +1719,7 @@ static void acer_wmi_notify(u32 value, void *context)
pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
} else {
+ scancode = return_value.key_num;
switch (key->keycode) {
case KEY_WLAN:
case KEY_BLUETOOTH:
@@ -1725,9 +1733,11 @@ static void acer_wmi_notify(u32 value, void *context)
rfkill_set_sw_state(bluetooth_rfkill,
!(device_state & ACER_WMID3_GDS_BLUETOOTH));
break;
+ case KEY_TOUCHPAD_TOGGLE:
+ scancode = (device_state & ACER_WMID3_GDS_TOUCHPAD) ?
+ KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF;
}
- sparse_keymap_report_entry(acer_wmi_input_dev, key,
- 1, true);
+ sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
}
break;
case WMID_ACCEL_EVENT:
@@ -1946,12 +1956,14 @@ static u32 get_wmid_devices(void)
return 0;
obj = (union acpi_object *) out.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- devices = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- devices = (u32) obj->integer.value;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ }
}
kfree(out.pointer);
@@ -1961,7 +1973,7 @@ static u32 get_wmid_devices(void)
/*
* Platform device
*/
-static int __devinit acer_platform_probe(struct platform_device *device)
+static int acer_platform_probe(struct platform_device *device)
{
int err;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index c2e3e63d2c15..f94467c05225 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -515,7 +515,7 @@ static int acerhdf_suspend(struct device *dev)
return 0;
}
-static int __devinit acerhdf_probe(struct platform_device *device)
+static int acerhdf_probe(struct platform_device *device)
{
return 0;
}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index 1deca7f6c4ea..6296f078b7bc 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -74,7 +74,7 @@ static const struct rfkill_ops amilo_m7440_rfkill_ops = {
.set_block = amilo_m7440_rfkill_set_block
};
-static const struct dmi_system_id __devinitconst amilo_rfkill_id_table[] = {
+static const struct dmi_system_id amilo_rfkill_id_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
@@ -95,7 +95,7 @@ static const struct dmi_system_id __devinitconst amilo_rfkill_id_table[] = {
static struct platform_device *amilo_rfkill_pdev;
static struct rfkill *amilo_rfkill_dev;
-static int __devinit amilo_rfkill_probe(struct platform_device *device)
+static int amilo_rfkill_probe(struct platform_device *device)
{
int rc;
const struct dmi_system_id *system_id =
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index db8f63841b42..f74bfcbb7bad 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -411,8 +411,7 @@ static int gmux_resume(struct pnp_dev *pnp)
return 0;
}
-static int __devinit gmux_probe(struct pnp_dev *pnp,
- const struct pnp_device_id *id)
+static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
struct apple_gmux_data *gmux_data;
struct resource *res;
@@ -577,7 +576,7 @@ err_free:
return ret;
}
-static void __devexit gmux_remove(struct pnp_dev *pnp)
+static void gmux_remove(struct pnp_dev *pnp)
{
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
@@ -609,7 +608,7 @@ static const struct pnp_device_id gmux_device_ids[] = {
static struct pnp_driver gmux_pnp_driver = {
.name = "apple-gmux",
.probe = gmux_probe,
- .remove = __devexit_p(gmux_remove),
+ .remove = gmux_remove,
.id_table = gmux_device_ids,
.suspend = gmux_suspend,
.resume = gmux_resume
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 4b568df56643..d9f9a0dbc6f3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -860,8 +860,10 @@ static ssize_t show_infos(struct device *dev,
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
+ * 0x40 for WWAN, 0x10 for WIMAX.
* The significance of others is yet to be found.
- * If we don't find the method, we assume the device are present.
+ * We don't currently use this for device detection, and it
+ * takes several seconds to run on some systems.
*/
rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
if (!ACPI_FAILURE(rv))
@@ -1682,7 +1684,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
- unsigned long long bsts_result, hwrs_result;
+ unsigned long long bsts_result;
char *string = NULL;
acpi_status status;
@@ -1741,20 +1743,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
return -ENOMEM;
}
- if (*string)
+ if (string)
pr_notice(" %s model detected\n", string);
- /*
- * The HWRS method return informations about the hardware.
- * 0x80 bit is for WLAN, 0x100 for Bluetooth,
- * 0x40 for WWAN, 0x10 for WIMAX.
- * The significance of others is yet to be found.
- */
- status =
- acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
- if (!ACPI_FAILURE(status))
- pr_notice(" HWRS returned %x", (int)hwrs_result);
-
if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
asus->have_rsts = true;
@@ -1763,7 +1754,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
return AE_OK;
}
-static int __devinit asus_acpi_init(struct asus_laptop *asus)
+static int asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
@@ -1823,7 +1814,7 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
return result;
}
-static void __devinit asus_dmi_check(void)
+static void asus_dmi_check(void)
{
const char *model;
@@ -1839,7 +1830,7 @@ static void __devinit asus_dmi_check(void)
static bool asus_device_present;
-static int __devinit asus_acpi_add(struct acpi_device *device)
+static int asus_acpi_add(struct acpi_device *device)
{
struct asus_laptop *asus;
int result;
@@ -1919,7 +1910,7 @@ fail_platform:
return result;
}
-static int asus_acpi_remove(struct acpi_device *device, int type)
+static int asus_acpi_remove(struct acpi_device *device)
{
struct asus_laptop *asus = acpi_driver_data(device);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6b0ebdeae916..be790402e0f1 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -32,7 +32,7 @@
#define ASUS_NB_WMI_FILE "asus-nb-wmi"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c0e9ff489b24..f80ae4d10f68 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -51,7 +51,7 @@
#include "asus-wmi.h"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, "
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
"Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index c87ff16873f9..36e5e6c13db4 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -432,7 +432,7 @@ failed_sensitivity:
return error;
}
-static int cmpc_accel_remove_v4(struct acpi_device *acpi, int type)
+static int cmpc_accel_remove_v4(struct acpi_device *acpi)
{
struct input_dev *inputdev;
struct cmpc_accel *accel;
@@ -668,7 +668,7 @@ failed_file:
return error;
}
-static int cmpc_accel_remove(struct acpi_device *acpi, int type)
+static int cmpc_accel_remove(struct acpi_device *acpi)
{
struct input_dev *inputdev;
struct cmpc_accel *accel;
@@ -753,7 +753,7 @@ static int cmpc_tablet_add(struct acpi_device *acpi)
cmpc_tablet_idev_init);
}
-static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
+static int cmpc_tablet_remove(struct acpi_device *acpi)
{
return cmpc_remove_acpi_notify_device(acpi);
}
@@ -1000,7 +1000,7 @@ out_bd:
return retval;
}
-static int cmpc_ipml_remove(struct acpi_device *acpi, int type)
+static int cmpc_ipml_remove(struct acpi_device *acpi)
{
struct ipml200_dev *ipml;
@@ -1079,7 +1079,7 @@ static int cmpc_keys_add(struct acpi_device *acpi)
cmpc_keys_idev_init);
}
-static int cmpc_keys_remove(struct acpi_device *acpi, int type)
+static int cmpc_keys_remove(struct acpi_device *acpi)
{
return cmpc_remove_acpi_notify_device(acpi);
}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 1887e2f166a4..475cc5242511 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -713,15 +713,15 @@ static struct attribute_group compal_attribute_group = {
.attrs = compal_attributes
};
-static int __devinit compal_probe(struct platform_device *);
-static int __devexit compal_remove(struct platform_device *);
+static int compal_probe(struct platform_device *);
+static int compal_remove(struct platform_device *);
static struct platform_driver compal_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = compal_probe,
- .remove = __devexit_p(compal_remove)
+ .remove = compal_remove,
};
static enum power_supply_property compal_bat_properties[] = {
@@ -1015,7 +1015,7 @@ err_backlight:
return ret;
}
-static int __devinit compal_probe(struct platform_device *pdev)
+static int compal_probe(struct platform_device *pdev)
{
int err;
struct compal_data *data;
@@ -1067,7 +1067,7 @@ static void __exit compal_cleanup(void)
pr_info("Driver unloaded\n");
}
-static int __devexit compal_remove(struct platform_device *pdev)
+static int compal_remove(struct platform_device *pdev)
{
struct compal_data *data;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 927c33af67ec..fa3ee6209572 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -115,7 +115,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
};
MODULE_DEVICE_TABLE(dmi, dell_device_table);
-static struct dmi_system_id __devinitdata dell_quirks[] = {
+static struct dmi_system_id dell_quirks[] = {
{
.callback = dmi_matched,
.ident = "Dell Vostro V130",
@@ -503,7 +503,7 @@ static struct led_classdev touchpad_led = {
.flags = LED_CORE_SUSPENDRESUME,
};
-static int __devinit touchpad_led_init(struct device *dev)
+static int touchpad_led_init(struct device *dev)
{
return led_classdev_register(dev, &touchpad_led);
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 5ca264179f4e..5d26e70bed6c 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1007,7 +1007,7 @@ static int eeepc_get_fan_pwm(void)
static void eeepc_set_fan_pwm(int value)
{
- value = SENSORS_LIMIT(value, 0, 255);
+ value = clamp_val(value, 0, 255);
value = value * 100 / 255;
ec_write(EEEPC_EC_FAN_PWM, value);
}
@@ -1375,7 +1375,7 @@ static void cmsg_quirks(struct eeepc_laptop *eeepc)
cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
}
-static int __devinit eeepc_acpi_init(struct eeepc_laptop *eeepc)
+static int eeepc_acpi_init(struct eeepc_laptop *eeepc)
{
unsigned int init_flags;
int result;
@@ -1407,7 +1407,7 @@ static int __devinit eeepc_acpi_init(struct eeepc_laptop *eeepc)
return 0;
}
-static void __devinit eeepc_enable_camera(struct eeepc_laptop *eeepc)
+static void eeepc_enable_camera(struct eeepc_laptop *eeepc)
{
/*
* If the following call to set_acpi() fails, it's because there's no
@@ -1419,7 +1419,7 @@ static void __devinit eeepc_enable_camera(struct eeepc_laptop *eeepc)
static bool eeepc_device_present;
-static int __devinit eeepc_acpi_add(struct acpi_device *device)
+static int eeepc_acpi_add(struct acpi_device *device)
{
struct eeepc_laptop *eeepc;
int result;
@@ -1501,7 +1501,7 @@ fail_platform:
return result;
}
-static int eeepc_acpi_remove(struct acpi_device *device, int type)
+static int eeepc_acpi_remove(struct acpi_device *device)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 5838332ea5bd..60cb76a5b513 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -39,7 +39,7 @@
#define EEEPC_WMI_FILE "eeepc-wmi"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index c4c1a5444b38..1c9386e7c58c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -733,7 +733,7 @@ err_stop:
return result;
}
-static int acpi_fujitsu_remove(struct acpi_device *device, int type)
+static int acpi_fujitsu_remove(struct acpi_device *device)
{
struct fujitsu_t *fujitsu = acpi_driver_data(device);
struct input_dev *input = fujitsu->input;
@@ -938,7 +938,7 @@ err_stop:
return result;
}
-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
+static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
{
struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
struct input_dev *input = fujitsu_hotkey->input;
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index f77484528b1b..570926c10014 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -192,8 +192,8 @@ static void fujitsu_reset(void)
fujitsu_send_state();
}
-static int __devinit input_fujitsu_setup(struct device *parent,
- const char *name, const char *phys)
+static int input_fujitsu_setup(struct device *parent, const char *name,
+ const char *phys)
{
struct input_dev *idev;
int error;
@@ -277,21 +277,21 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
+static void fujitsu_dmi_common(const struct dmi_system_id *dmi)
{
pr_info("%s\n", dmi->ident);
memcpy(fujitsu.config.keymap, dmi->driver_data,
sizeof(fujitsu.config.keymap));
}
-static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+static int fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
{
fujitsu_dmi_common(dmi);
fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
return 1;
}
-static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
+static int fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
{
fujitsu_dmi_common(dmi);
fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
@@ -366,8 +366,7 @@ static const struct dmi_system_id dmi_ids[] __initconst = {
{ NULL }
};
-static acpi_status __devinit
-fujitsu_walk_resources(struct acpi_resource *res, void *data)
+static acpi_status fujitsu_walk_resources(struct acpi_resource *res, void *data)
{
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
@@ -390,7 +389,7 @@ fujitsu_walk_resources(struct acpi_resource *res, void *data)
}
}
-static int __devinit acpi_fujitsu_add(struct acpi_device *adev)
+static int acpi_fujitsu_add(struct acpi_device *adev)
{
acpi_status status;
int error;
@@ -432,7 +431,7 @@ static int __devinit acpi_fujitsu_add(struct acpi_device *adev)
return 0;
}
-static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
+static int acpi_fujitsu_remove(struct acpi_device *adev)
{
free_irq(fujitsu.irq, fujitsu_interrupt);
release_region(fujitsu.io_base, fujitsu.io_length);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 387183a2d6dd..1dde7accf27c 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -72,7 +72,7 @@ enum hp_wmi_event_ids {
HPWMI_LOCK_SWITCH = 7,
};
-static int __devinit hp_wmi_bios_setup(struct platform_device *device);
+static int hp_wmi_bios_setup(struct platform_device *device);
static int __exit hp_wmi_bios_remove(struct platform_device *device);
static int hp_wmi_resume_handler(struct device *device);
@@ -619,7 +619,7 @@ static void cleanup_sysfs(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_tablet);
}
-static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
+static int hp_wmi_rfkill_setup(struct platform_device *device)
{
int err;
int wireless = 0;
@@ -698,7 +698,7 @@ register_wifi_error:
return err;
}
-static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
+static int hp_wmi_rfkill2_setup(struct platform_device *device)
{
int err, i;
struct bios_rfkill2_state state;
@@ -778,7 +778,7 @@ fail:
return err;
}
-static int __devinit hp_wmi_bios_setup(struct platform_device *device)
+static int hp_wmi_bios_setup(struct platform_device *device)
{
int err;
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 18d74f29dcb2..e64a7a870d42 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -337,7 +337,7 @@ static int lis3lv02d_add(struct acpi_device *device)
return ret;
}
-static int lis3lv02d_remove(struct acpi_device *device, int type)
+static int lis3lv02d_remove(struct acpi_device *device)
{
if (!device)
return -EINVAL;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 7481146a5b47..97c2be195efc 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
if (force)
pr_warn("module loaded by force\n");
/* first ensure that we are running on IBM HW */
- else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
+ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
return -ENODEV;
/* Get the address for the Extended BIOS Data Area */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5ff4f2e314d2..17f00b8dc5cb 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -298,7 +298,7 @@ static const struct file_operations debugfs_cfg_fops = {
.release = single_release,
};
-static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
+static int ideapad_debugfs_init(struct ideapad_private *priv)
{
struct dentry *node;
@@ -468,8 +468,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
-static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
- int dev)
+static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
@@ -519,7 +518,7 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
/*
* Platform device
*/
-static int __devinit ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_platform_init(struct ideapad_private *priv)
{
int result;
@@ -569,7 +568,7 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_END, 0 },
};
-static int __devinit ideapad_input_init(struct ideapad_private *priv)
+static int ideapad_input_init(struct ideapad_private *priv)
{
struct input_dev *inputdev;
int error;
@@ -776,7 +775,7 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
}
}
-static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
+static int ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
int cfg;
@@ -835,7 +834,7 @@ platform_failed:
return ret;
}
-static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
+static int ideapad_acpi_remove(struct acpi_device *adevice)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int i;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 3271ac85115e..d6cfc1558c2f 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -200,7 +200,7 @@ static int intel_menlow_memory_add(struct acpi_device *device)
}
-static int intel_menlow_memory_remove(struct acpi_device *device, int type)
+static int intel_menlow_memory_remove(struct acpi_device *device)
{
struct thermal_cooling_device *cdev = acpi_driver_data(device);
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index bcbad8452a6f..f59683aa13d5 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -56,7 +56,7 @@ static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mfld_pb_probe(struct platform_device *pdev)
+static int mfld_pb_probe(struct platform_device *pdev)
{
struct input_dev *input;
int irq = platform_get_irq(pdev, 0);
@@ -121,7 +121,7 @@ err_free_input:
return error;
}
-static int __devexit mfld_pb_remove(struct platform_device *pdev)
+static int mfld_pb_remove(struct platform_device *pdev)
{
struct input_dev *input = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -139,7 +139,7 @@ static struct platform_driver mfld_pb_driver = {
.owner = THIS_MODULE,
},
.probe = mfld_pb_probe,
- .remove = __devexit_p(mfld_pb_remove),
+ .remove = mfld_pb_remove,
};
module_platform_driver(mfld_pb_driver);
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 93de09019d1d..81c491e74b34 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -563,7 +563,7 @@ static struct platform_driver mid_thermal_driver = {
.pm = &mid_thermal_pm,
},
.probe = mid_thermal_probe,
- .remove = __devexit_p(mid_thermal_remove),
+ .remove = mid_thermal_remove,
.id_table = therm_id_table,
};
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 79a0c2f6be53..f6f18cde0f11 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -278,12 +278,12 @@ static void oaktrail_backlight_exit(void)
backlight_device_unregister(oaktrail_bl_device);
}
-static int __devinit oaktrail_probe(struct platform_device *pdev)
+static int oaktrail_probe(struct platform_device *pdev)
{
return 0;
}
-static int __devexit oaktrail_remove(struct platform_device *pdev)
+static int oaktrail_remove(struct platform_device *pdev)
{
return 0;
}
@@ -294,7 +294,7 @@ static struct platform_driver oaktrail_driver = {
.owner = THIS_MODULE,
},
.probe = oaktrail_probe,
- .remove = __devexit_p(oaktrail_remove)
+ .remove = oaktrail_remove,
};
static int dmi_check_cb(const struct dmi_system_id *id)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 1686c1e07d5d..6f4b7289a059 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -230,7 +230,7 @@ static irqreturn_t pmic_irq_handler(int irq, void *data)
return ret;
}
-static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
+static int platform_pmic_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq(pdev, 0);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 8e8caa767d6a..4add9a31bf60 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -176,7 +176,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0,
/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
static int acpi_pcc_hotkey_add(struct acpi_device *device);
-static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
+static int acpi_pcc_hotkey_remove(struct acpi_device *device);
static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id pcc_device_ids[] = {
@@ -663,7 +663,7 @@ static int __init acpi_pcc_init(void)
return 0;
}
-static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
+static int acpi_pcc_hotkey_remove(struct acpi_device *device)
{
struct pcc_acpi *pcc = acpi_driver_data(device);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index dd90d15f5210..d1f030053176 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -26,6 +26,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
+#include <linux/efi.h>
#include <acpi/video.h>
/*
@@ -1523,6 +1524,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
},
.driver_data = &samsung_broken_acpi_video,
},
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "N250P",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ },
+ .driver_data = &samsung_broken_acpi_video,
+ },
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1534,6 +1545,9 @@ static int __init samsung_init(void)
struct samsung_laptop *samsung;
int ret;
+ if (efi_enabled(EFI_BOOT))
+ return -ENODEV;
+
quirks = &samsung_unknown;
if (!force && !dmi_check_system(samsung_dmi_table))
return -ENODEV;
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 1e54ae74274c..5f770059fd4d 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -77,7 +77,7 @@ static int samsungq10_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
samsungq10_suspend, samsungq10_resume);
-static int __devinit samsungq10_probe(struct platform_device *pdev)
+static int samsungq10_probe(struct platform_device *pdev)
{
struct backlight_properties props;
@@ -99,7 +99,7 @@ static int __devinit samsungq10_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit samsungq10_remove(struct platform_device *pdev)
+static int samsungq10_remove(struct platform_device *pdev)
{
struct backlight_device *bd = platform_get_drvdata(pdev);
@@ -119,7 +119,7 @@ static struct platform_driver samsungq10_driver = {
.pm = &samsungq10_pm_ops,
},
.probe = samsungq10_probe,
- .remove = __devexit_p(samsungq10_remove),
+ .remove = samsungq10_remove,
};
static struct platform_device *samsungq10_device;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index daaddec68def..ceb41eff4230 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
{
+ int ret = 0;
size_t len = len;
union acpi_object *object = __call_snc_method(handle, name, value);
if (!object)
return -EINVAL;
- if (object->type == ACPI_TYPE_BUFFER)
+ if (object->type == ACPI_TYPE_BUFFER) {
len = MIN(buflen, object->buffer.length);
+ memcpy(buffer, object->buffer.pointer, len);
- else if (object->type == ACPI_TYPE_INTEGER)
+ } else if (object->type == ACPI_TYPE_INTEGER) {
len = MIN(buflen, sizeof(object->integer.value));
+ memcpy(buffer, &object->integer.value, len);
- else {
+ } else {
pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
ACPI_TYPE_BUFFER, object->type);
- kfree(object);
- return -EINVAL;
+ ret = -EINVAL;
}
- memcpy(buffer, object->buffer.pointer, len);
kfree(object);
- return 0;
+ return ret;
}
struct sony_nc_handles {
@@ -2739,7 +2740,7 @@ outwalk:
return result;
}
-static int sony_nc_remove(struct acpi_device *device, int type)
+static int sony_nc_remove(struct acpi_device *device)
{
struct sony_nc_value *item;
@@ -4110,7 +4111,7 @@ found:
* ACPI driver
*
*****************/
-static int sony_pic_remove(struct acpi_device *device, int type)
+static int sony_pic_remove(struct acpi_device *device)
{
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index e24f5ae475af..9b93fdb61ed7 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -187,7 +187,7 @@ static int __init tc1100_probe(struct platform_device *device)
}
-static int __devexit tc1100_remove(struct platform_device *device)
+static int tc1100_remove(struct platform_device *device)
{
sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
@@ -241,7 +241,7 @@ static struct platform_driver tc1100_driver = {
.pm = &tc1100_pm_ops,
#endif
},
- .remove = __devexit_p(tc1100_remove),
+ .remove = tc1100_remove,
};
static int __init tc1100_init(void)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 75dd651664ae..ebcb461bb2b0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4877,8 +4877,7 @@ static int __init light_init(struct ibm_init_struct *iibm)
static void light_exit(void)
{
led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
- if (work_pending(&tpacpi_led_thinklight.work))
- flush_workqueue(tpacpi_wq);
+ flush_workqueue(tpacpi_wq);
}
static int light_read(struct seq_file *m)
@@ -6732,7 +6731,7 @@ static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
}
-static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
+static struct snd_kcontrol_new volume_alsa_control_vol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Console Playback Volume",
.index = 0,
@@ -6741,7 +6740,7 @@ static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
.get = volume_alsa_vol_get,
};
-static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = {
+static struct snd_kcontrol_new volume_alsa_control_mute = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Console Playback Switch",
.index = 0,
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index d727bfee89a6..4ab618c63b45 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -157,7 +157,7 @@ add_err:
return -ENODEV;
}
-static int acpi_topstar_remove(struct acpi_device *device, int type)
+static int acpi_topstar_remove(struct acpi_device *device)
{
struct topstar_hkey *tps_hkey = acpi_driver_data(device);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 5f1256d5e933..904476b2fa8f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -150,7 +150,7 @@ static const struct acpi_device_id toshiba_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
-static const struct key_entry toshiba_acpi_keymap[] __devinitconst = {
+static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
@@ -875,8 +875,7 @@ static const struct file_operations version_proc_fops = {
#define PROC_TOSHIBA "toshiba"
-static void __devinit
-create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
+static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
if (dev->backlight_dev)
proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
@@ -979,7 +978,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
pr_info("Unknown key %x\n", scancode);
}
-static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
+static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
acpi_handle ec_handle, handle;
@@ -1069,7 +1068,7 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
return error;
}
-static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
{
struct backlight_properties props;
int brightness;
@@ -1119,7 +1118,7 @@ static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
-static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
+static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1154,7 +1153,7 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
return 0;
}
-static const char * __devinit find_hci_method(acpi_handle handle)
+static const char *find_hci_method(acpi_handle handle)
{
acpi_status status;
acpi_handle hci_handle;
@@ -1170,7 +1169,7 @@ static const char * __devinit find_hci_method(acpi_handle handle)
return NULL;
}
-static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
+static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
const char *hci_method;
@@ -1251,7 +1250,7 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
return 0;
error:
- toshiba_acpi_remove(acpi_dev, 0);
+ toshiba_acpi_remove(acpi_dev);
return ret;
}
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index e95be0b74859..74dd01ae343b 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
static int toshiba_bt_rfkill_add(struct acpi_device *device);
-static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type);
+static int toshiba_bt_rfkill_remove(struct acpi_device *device);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id bt_device_ids[] = {
@@ -122,7 +122,7 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
return result;
}
-static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type)
+static int toshiba_bt_rfkill_remove(struct acpi_device *device)
{
/* clean up */
return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 42a4dcc25f92..e4ac38aca580 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -92,7 +92,7 @@ module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
-static int acpi_wmi_remove(struct acpi_device *device, int type);
+static int acpi_wmi_remove(struct acpi_device *device);
static int acpi_wmi_add(struct acpi_device *device);
static void acpi_wmi_notify(struct acpi_device *device, u32 event);
@@ -917,7 +917,7 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
}
}
-static int acpi_wmi_remove(struct acpi_device *device, int type)
+static int acpi_wmi_remove(struct acpi_device *device)
{
acpi_remove_address_space_handler(device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 1da13ed34b04..4bd17248dfc6 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -40,7 +40,7 @@ static const struct rfkill_ops rfkill_ops = {
.set_block = rfkill_set_block,
};
-static int __devinit xo1_rfkill_probe(struct platform_device *pdev)
+static int xo1_rfkill_probe(struct platform_device *pdev)
{
struct rfkill *rfk;
int r;
@@ -60,7 +60,7 @@ static int __devinit xo1_rfkill_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit xo1_rfkill_remove(struct platform_device *pdev)
+static int xo1_rfkill_remove(struct platform_device *pdev)
{
struct rfkill *rfk = platform_get_drvdata(pdev);
rfkill_unregister(rfk);
@@ -74,7 +74,7 @@ static struct platform_driver xo1_rfkill_driver = {
.owner = THIS_MODULE,
},
.probe = xo1_rfkill_probe,
- .remove = __devexit_p(xo1_rfkill_remove),
+ .remove = xo1_rfkill_remove,
};
module_platform_driver(xo1_rfkill_driver);
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 16d340c3b852..4b1377bd5944 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -150,7 +150,7 @@ static int ebook_switch_add(struct acpi_device *device)
return error;
}
-static int ebook_switch_remove(struct acpi_device *device, int type)
+static int ebook_switch_remove(struct acpi_device *device)
{
struct ebook_switch *button = acpi_driver_data(device);
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index cfaf5b73540b..0c201317284b 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -298,6 +298,39 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
return ret;
}
+static char *pnp_get_resource_value(char *buf,
+ unsigned long type,
+ resource_size_t *start,
+ resource_size_t *end,
+ unsigned long *flags)
+{
+ if (start)
+ *start = 0;
+ if (end)
+ *end = 0;
+ if (flags)
+ *flags = 0;
+
+ /* TBD: allow for disabled resources */
+
+ buf = skip_spaces(buf);
+ if (start) {
+ *start = simple_strtoull(buf, &buf, 0);
+ if (end) {
+ buf = skip_spaces(buf);
+ if (*buf == '-') {
+ buf = skip_spaces(buf + 1);
+ *end = simple_strtoull(buf, &buf, 0);
+ } else
+ *end = *start;
+ }
+ }
+
+ /* TBD: allow for additional flags, e.g., IORESOURCE_WINDOW */
+
+ return buf;
+}
+
static ssize_t pnp_set_current_resources(struct device *dmdev,
struct device_attribute *attr,
const char *ubuf, size_t count)
@@ -305,7 +338,6 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
struct pnp_dev *dev = to_pnp_dev(dmdev);
char *buf = (void *)ubuf;
int retval = 0;
- resource_size_t start, end;
if (dev->status & PNP_ATTACHED) {
retval = -EBUSY;
@@ -349,6 +381,10 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
goto done;
}
if (!strnicmp(buf, "set", 3)) {
+ resource_size_t start;
+ resource_size_t end;
+ unsigned long flags;
+
if (dev->active)
goto done;
buf += 3;
@@ -357,42 +393,37 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
while (1) {
buf = skip_spaces(buf);
if (!strnicmp(buf, "io", 2)) {
- buf = skip_spaces(buf + 2);
- start = simple_strtoul(buf, &buf, 0);
- buf = skip_spaces(buf);
- if (*buf == '-') {
- buf = skip_spaces(buf + 1);
- end = simple_strtoul(buf, &buf, 0);
- } else
- end = start;
- pnp_add_io_resource(dev, start, end, 0);
- continue;
- }
- if (!strnicmp(buf, "mem", 3)) {
- buf = skip_spaces(buf + 3);
- start = simple_strtoul(buf, &buf, 0);
- buf = skip_spaces(buf);
- if (*buf == '-') {
- buf = skip_spaces(buf + 1);
- end = simple_strtoul(buf, &buf, 0);
- } else
- end = start;
- pnp_add_mem_resource(dev, start, end, 0);
- continue;
- }
- if (!strnicmp(buf, "irq", 3)) {
- buf = skip_spaces(buf + 3);
- start = simple_strtoul(buf, &buf, 0);
- pnp_add_irq_resource(dev, start, 0);
- continue;
- }
- if (!strnicmp(buf, "dma", 3)) {
- buf = skip_spaces(buf + 3);
- start = simple_strtoul(buf, &buf, 0);
- pnp_add_dma_resource(dev, start, 0);
- continue;
- }
- break;
+ buf = pnp_get_resource_value(buf + 2,
+ IORESOURCE_IO,
+ &start, &end,
+ &flags);
+ pnp_add_io_resource(dev, start, end, flags);
+ } else if (!strnicmp(buf, "mem", 3)) {
+ buf = pnp_get_resource_value(buf + 3,
+ IORESOURCE_MEM,
+ &start, &end,
+ &flags);
+ pnp_add_mem_resource(dev, start, end, flags);
+ } else if (!strnicmp(buf, "irq", 3)) {
+ buf = pnp_get_resource_value(buf + 3,
+ IORESOURCE_IRQ,
+ &start, NULL,
+ &flags);
+ pnp_add_irq_resource(dev, start, flags);
+ } else if (!strnicmp(buf, "dma", 3)) {
+ buf = pnp_get_resource_value(buf + 3,
+ IORESOURCE_DMA,
+ &start, NULL,
+ &flags);
+ pnp_add_dma_resource(dev, start, flags);
+ } else if (!strnicmp(buf, "bus", 3)) {
+ buf = pnp_get_resource_value(buf + 3,
+ IORESOURCE_BUS,
+ &start, &end,
+ NULL);
+ pnp_add_bus_resource(dev, start, end);
+ } else
+ break;
}
mutex_unlock(&pnp_res_mutex);
goto done;
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index ed9ce507149a..95cebf0185de 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -18,11 +18,27 @@
DEFINE_MUTEX(pnp_res_mutex);
+static struct resource *pnp_find_resource(struct pnp_dev *dev,
+ unsigned char rule,
+ unsigned long type,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, type, bar);
+
+ /* when the resource already exists, set its resource bits from rule */
+ if (res) {
+ res->flags &= ~IORESOURCE_BITS;
+ res->flags |= rule & IORESOURCE_BITS;
+ }
+
+ return res;
+}
+
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
struct resource *res, local_res;
- res = pnp_get_resource(dev, IORESOURCE_IO, idx);
+ res = pnp_find_resource(dev, rule->flags, IORESOURCE_IO, idx);
if (res) {
pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
@@ -65,7 +81,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
{
struct resource *res, local_res;
- res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
+ res = pnp_find_resource(dev, rule->flags, IORESOURCE_MEM, idx);
if (res) {
pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
@@ -78,6 +94,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
res->start = 0;
res->end = 0;
+ /* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY;
if (rule->flags & IORESOURCE_MEM_CACHEABLE)
@@ -123,7 +140,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
};
- res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
+ res = pnp_find_resource(dev, rule->flags, IORESOURCE_IRQ, idx);
if (res) {
pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
@@ -182,7 +199,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
1, 3, 5, 6, 7, 0, 2, 4
};
- res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
+ res = pnp_find_resource(dev, rule->flags, IORESOURCE_DMA, idx);
if (res) {
pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 72e822e17d47..8813fc03aa09 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -90,7 +90,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
pnp_dbg(&dev->dev, "set resources\n");
handle = DEVICE_ACPI_HANDLE(&dev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
@@ -123,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
dev_dbg(&dev->dev, "disable resources\n");
handle = DEVICE_ACPI_HANDLE(&dev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
@@ -145,7 +145,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
acpi_handle handle;
handle = DEVICE_ACPI_HANDLE(&dev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
}
@@ -160,7 +160,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
int error = 0;
handle = DEVICE_ACPI_HANDLE(&dev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
@@ -197,7 +197,7 @@ static int pnpacpi_resume(struct pnp_dev *dev)
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
int error = 0;
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
diff --git a/drivers/pnp/pnpbios/Kconfig b/drivers/pnp/pnpbios/Kconfig
index b986d9fa3b9a..50c3dd065e03 100644
--- a/drivers/pnp/pnpbios/Kconfig
+++ b/drivers/pnp/pnpbios/Kconfig
@@ -2,8 +2,8 @@
# Plug and Play BIOS configuration
#
config PNPBIOS
- bool "Plug and Play BIOS support (EXPERIMENTAL)"
- depends on ISA && X86 && EXPERIMENTAL
+ bool "Plug and Play BIOS support"
+ depends on ISA && X86
default n
---help---
Linux uses the PNPBIOS as defined in "Plug and Play BIOS
diff --git a/drivers/power/88pm860x_battery.c b/drivers/power/88pm860x_battery.c
index 8bc80b05c63c..d338c1c4e8c8 100644
--- a/drivers/power/88pm860x_battery.c
+++ b/drivers/power/88pm860x_battery.c
@@ -915,15 +915,13 @@ static int pm860x_battery_probe(struct platform_device *pdev)
info->irq_cc = platform_get_irq(pdev, 0);
if (info->irq_cc <= 0) {
dev_err(&pdev->dev, "No IRQ resource!\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
info->irq_batt = platform_get_irq(pdev, 1);
if (info->irq_batt <= 0) {
dev_err(&pdev->dev, "No IRQ resource!\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
info->chip = chip;
@@ -957,7 +955,7 @@ static int pm860x_battery_probe(struct platform_device *pdev)
ret = power_supply_register(&pdev->dev, &info->battery);
if (ret)
- goto out;
+ return ret;
info->battery.dev->parent = &pdev->dev;
ret = request_threaded_irq(info->irq_cc, NULL,
@@ -984,8 +982,6 @@ out_coulomb:
free_irq(info->irq_cc, info);
out_reg:
power_supply_unregister(&info->battery);
-out:
- kfree(info);
return ret;
}
@@ -993,10 +989,9 @@ static int pm860x_battery_remove(struct platform_device *pdev)
{
struct pm860x_battery_info *info = platform_get_drvdata(pdev);
- power_supply_unregister(&info->battery);
free_irq(info->irq_batt, info);
free_irq(info->irq_cc, info);
- kfree(info);
+ power_supply_unregister(&info->battery);
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index b1d956d81f0c..9e00c389e777 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -245,6 +245,13 @@ config BATTERY_INTEL_MID
Say Y here to enable the battery driver on Intel MID
platforms.
+config BATTERY_RX51
+ tristate "Nokia RX-51 (N900) battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable support for battery information on Nokia
+ RX-51, also known as N900 tablet.
+
config CHARGER_ISP1704
tristate "ISP1704 USB Charger Detection"
depends on USB_OTG_UTILS
@@ -315,6 +322,16 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_BQ2415X
+ tristate "TI BQ2415x battery charger driver"
+ depends on I2C
+ help
+ Say Y to enable support for the TI BQ2415x battery charger
+ PMICs.
+
+ You'll need this driver to charge batteries on e.g. Nokia
+ RX-51/N900.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -329,12 +346,19 @@ config AB8500_BM
help
Say Y to include support for AB8500 battery management.
-config AB8500_BATTERY_THERM_ON_BATCTRL
- bool "Thermistor connected on BATCTRL ADC"
+config BATTERY_GOLDFISH
+ tristate "Goldfish battery driver"
+ depends on GENERIC_HARDIRQS
+ help
+ Say Y to enable support for the battery and AC power in the
+ Goldfish emulator.
+
+config CHARGER_PM2301
+ bool "PM2301 Battery Charger Driver"
depends on AB8500_BM
help
- Say Y to enable battery temperature measurements using
- thermistor connected on BATCTRL ADC.
+ Say Y to include support for PM2301 charger driver.
+ Depends on AB8500 battery management core.
source "drivers/power/reset/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index f1d99f4a0bc3..3f66436af45c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
+obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
@@ -37,7 +38,8 @@ obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
-obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
+obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
+obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
@@ -45,8 +47,10 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+obj-$(CONFIG_CHARGER_PM2301) += pm2301_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_POWER_RESET) += reset/
diff --git a/drivers/power/ab8500_bmdata.c b/drivers/power/ab8500_bmdata.c
new file mode 100644
index 000000000000..7a96c0650fbb
--- /dev/null
+++ b/drivers/power/ab8500_bmdata.c
@@ -0,0 +1,507 @@
+#include <linux/export.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the res_to_temp table must be strictly sorted by falling resistance
+ * values to work.
+ */
+static struct abx500_res_to_temp temp_tbl_A_thermistor[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+
+static struct abx500_res_to_temp temp_tbl_B_thermistor[] = {
+ {-5, 200000},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+
+static struct abx500_v_to_cap cap_tbl_A_thermistor[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl_B_thermistor[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct abx500_res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+static struct batres_vs_temp temp_to_batres_tbl_thermistor[] = {
+ { 40, 120},
+ { 30, 135},
+ { 20, 165},
+ { 10, 230},
+ { 00, 325},
+ {-10, 445},
+ {-20, 595},
+};
+
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+static struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = {
+ { 60, 300},
+ { 30, 300},
+ { 20, 300},
+ { 10, 300},
+ { 00, 300},
+ {-10, 300},
+ {-20, 300},
+};
+
+/* battery resistance table for LI ION 9100 battery */
+static struct batres_vs_temp temp_to_batres_tbl_9100[] = {
+ { 60, 180},
+ { 30, 180},
+ { 20, 180},
+ { 10, 180},
+ { 00, 180},
+ {-10, 180},
+ {-20, 180},
+};
+
+static struct abx500_battery_type bat_type_thermistor[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 53407,
+ .resis_low = 12500,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
+ .r_to_t_tbl = temp_tbl_A_thermistor,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
+ .v_to_cap_tbl = cap_tbl_A_thermistor,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 200000,
+ .resis_low = 82869,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
+ .r_to_t_tbl = temp_tbl_B_thermistor,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
+ .v_to_cap_tbl = cap_tbl_B_thermistor,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+};
+
+static struct abx500_battery_type bat_type_ext_thermistor[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_cap = 95,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+ },
+};
+
+static const struct abx500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct abx500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3100,
+ .battok_falling_th_sel0 = 2860,
+ .battok_raising_th_sel1 = 2860,
+ .maint_thres = 95,
+ .user_cap_limit = 15,
+};
+
+static const struct abx500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct abx500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct abx500_bm_data ab8500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ .temp_high = 43,
+ .temp_over = 48,
+ .main_safety_tmr_h = 4,
+ .temp_interval_chg = 20,
+ .temp_interval_nochg = 120,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = BUP_VCH_SEL_2P6V,
+ .bkup_bat_i = BUP_ICH_SEL_150UA,
+ .no_maintenance = false,
+ .capacity_scaling = false,
+ .adc_therm = ABx500_ADC_THERM_BATCTRL,
+ .chg_unknown_bat = false,
+ .enable_overshoot = false,
+ .fg_res = 100,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type_thermistor,
+ .n_btypes = 3,
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .gnd_lift_resistance = 34,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
+
+int ab8500_bm_of_probe(struct device *dev,
+ struct device_node *np,
+ struct abx500_bm_data *bm)
+{
+ struct batres_vs_temp *tmp_batres_tbl;
+ struct device_node *battery_node;
+ const char *btech;
+ int i;
+
+ /* get phandle to 'battery-info' node */
+ battery_node = of_parse_phandle(np, "battery", 0);
+ if (!battery_node) {
+ dev_err(dev, "battery node or reference missing\n");
+ return -EINVAL;
+ }
+
+ btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
+ if (!btech) {
+ dev_warn(dev, "missing property battery-name/type\n");
+ return -EINVAL;
+ }
+
+ if (strncmp(btech, "LION", 4) == 0) {
+ bm->no_maintenance = true;
+ bm->chg_unknown_bat = true;
+ bm->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
+ bm->bat_type[BATTERY_UNKNOWN].termination_vol = 4150;
+ bm->bat_type[BATTERY_UNKNOWN].recharge_cap = 95;
+ bm->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520;
+ bm->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200;
+ }
+
+ if (of_property_read_bool(battery_node, "thermistor-on-batctrl")) {
+ if (strncmp(btech, "LION", 4) == 0)
+ tmp_batres_tbl = temp_to_batres_tbl_9100;
+ else
+ tmp_batres_tbl = temp_to_batres_tbl_thermistor;
+ } else {
+ bm->n_btypes = 4;
+ bm->bat_type = bat_type_ext_thermistor;
+ bm->adc_therm = ABx500_ADC_THERM_BATTEMP;
+ tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
+ }
+
+ /* select the battery resolution table */
+ for (i = 0; i < bm->n_btypes; ++i)
+ bm->bat_type[i].batres_tbl = tmp_batres_tbl;
+
+ of_node_put(battery_node);
+
+ return 0;
+}
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 989b09950aff..07689064996e 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -20,11 +20,13 @@
#include <linux/power_supply.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
-#include <linux/mfd/abx500/ab8500.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/abx500/ab8500-gpadc.h>
-#include <linux/jiffies.h>
#define VTVOUT_V 1800
@@ -37,6 +39,9 @@
#define BTEMP_BATCTRL_CURR_SRC_7UA 7
#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+#define BTEMP_BATCTRL_CURR_SRC_16UA 16
+#define BTEMP_BATCTRL_CURR_SRC_18UA 18
+
#define to_ab8500_btemp_device_info(x) container_of((x), \
struct ab8500_btemp, btemp_psy);
@@ -76,13 +81,13 @@ struct ab8500_btemp_ranges {
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
* @fg: Pointer to the struct fg
- * @pdata: Pointer to the abx500_btemp platform data
- * @bat: Pointer to the abx500_bm platform data
+ * @bm: Platform specific battery management information
* @btemp_psy: Structure for BTEMP specific battery properties
* @events: Structure for information about events triggered
* @btemp_ranges: Battery temperature range structure
* @btemp_wq: Work queue for measuring the temperature periodically
* @btemp_periodic_work: Work for measuring the temperature periodically
+ * @initialized: True if battery id read.
*/
struct ab8500_btemp {
struct device *dev;
@@ -93,13 +98,13 @@ struct ab8500_btemp {
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
struct ab8500_fg *fg;
- struct abx500_btemp_platform_data *pdata;
- struct abx500_bm_data *bat;
+ struct abx500_bm_data *bm;
struct power_supply btemp_psy;
struct ab8500_btemp_events events;
struct ab8500_btemp_ranges btemp_ranges;
struct workqueue_struct *btemp_wq;
struct delayed_work btemp_periodic_work;
+ bool initialized;
};
/* BTEMP power supply properties */
@@ -147,13 +152,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
return (450000 * (v_batctrl)) / (1800 - v_batctrl);
}
- if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+ if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
/*
* If the battery has internal NTC, we use the current
* source to calculate the resistance, 7uA or 20uA
*/
rbs = (v_batctrl * 1000
- - di->bat->gnd_lift_resistance * inst_curr)
+ - di->bm->gnd_lift_resistance * inst_curr)
/ di->curr_source;
} else {
/*
@@ -209,11 +214,19 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
return 0;
/* Only do this for batteries with internal NTC */
- if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
- if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
- curr = BAT_CTRL_7U_ENA;
- else
- curr = BAT_CTRL_20U_ENA;
+ if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+
+ if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_16UA)
+ curr = BAT_CTRL_16U_ENA;
+ else
+ curr = BAT_CTRL_18U_ENA;
+ } else {
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+ curr = BAT_CTRL_7U_ENA;
+ else
+ curr = BAT_CTRL_20U_ENA;
+ }
dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
@@ -241,14 +254,25 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
__func__);
goto disable_curr_source;
}
- } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+ } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
dev_dbg(di->dev, "Disable BATCTRL curr source\n");
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
- ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(
+ di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
+ ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
+ } else {
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(
+ di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ }
+
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
__func__);
@@ -290,11 +314,20 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
* if we got an error above
*/
disable_curr_source:
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(di->dev,
+ if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
+ ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
+ } else {
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ }
+
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
__func__);
@@ -372,13 +405,10 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
return ret;
}
- /*
- * Since there is no interrupt when current measurement is done,
- * loop for over 250ms (250ms is one sample conversion time
- * with 32.768 Khz RTC clock). Note that a stop time must be set
- * since the ab8500_btemp_read_batctrl_voltage call can block and
- * take an unknown amount of time to complete.
- */
+ do {
+ msleep(20);
+ } while (!ab8500_fg_inst_curr_started(di->fg));
+
i = 0;
do {
@@ -457,9 +487,9 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
int rbat, rntc, vntc;
u8 id;
- id = di->bat->batt_id;
+ id = di->bm->batt_id;
- if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
id != BATTERY_UNKNOWN) {
rbat = ab8500_btemp_get_batctrl_res(di);
@@ -474,8 +504,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
}
temp = ab8500_btemp_res_to_temp(di,
- di->bat->bat_type[id].r_to_t_tbl,
- di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ di->bm->bat_type[id].r_to_t_tbl,
+ di->bm->bat_type[id].n_temp_tbl_elements, rbat);
} else {
vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
if (vntc < 0) {
@@ -491,8 +521,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
rntc = 230000 * vntc / (VTVOUT_V - vntc);
temp = ab8500_btemp_res_to_temp(di,
- di->bat->bat_type[id].r_to_t_tbl,
- di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ di->bm->bat_type[id].r_to_t_tbl,
+ di->bm->bat_type[id].n_temp_tbl_elements, rntc);
prev = temp;
}
dev_dbg(di->dev, "Battery temperature is %d\n", temp);
@@ -511,9 +541,12 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
{
int res;
u8 i;
+ if (is_ab9540(di->parent) || is_ab8505(di->parent))
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
+ else
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
- di->bat->batt_id = BATTERY_UNKNOWN;
+ di->bm->batt_id = BATTERY_UNKNOWN;
res = ab8500_btemp_get_batctrl_res(di);
if (res < 0) {
@@ -522,23 +555,23 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
}
/* BATTERY_UNKNOWN is defined on position 0, skip it! */
- for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
- if ((res <= di->bat->bat_type[i].resis_high) &&
- (res >= di->bat->bat_type[i].resis_low)) {
+ for (i = BATTERY_UNKNOWN + 1; i < di->bm->n_btypes; i++) {
+ if ((res <= di->bm->bat_type[i].resis_high) &&
+ (res >= di->bm->bat_type[i].resis_low)) {
dev_dbg(di->dev, "Battery detected on %s"
" low %d < res %d < high: %d"
" index: %d\n",
- di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+ di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
"BATCTRL" : "BATTEMP",
- di->bat->bat_type[i].resis_low, res,
- di->bat->bat_type[i].resis_high, i);
+ di->bm->bat_type[i].resis_low, res,
+ di->bm->bat_type[i].resis_high, i);
- di->bat->batt_id = i;
+ di->bm->batt_id = i;
break;
}
}
- if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ if (di->bm->batt_id == BATTERY_UNKNOWN) {
dev_warn(di->dev, "Battery identified as unknown"
", resistance %d Ohm\n", res);
return -ENXIO;
@@ -548,13 +581,18 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
* We only have to change current source if the
* detected type is Type 1, else we use the 7uA source
*/
- if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
- di->bat->batt_id == 1) {
- dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+ if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ di->bm->batt_id == 1) {
+ if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 16uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
+ } else {
+ dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+ }
}
- return di->bat->batt_id;
+ return di->bm->batt_id;
}
/**
@@ -569,6 +607,13 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
struct ab8500_btemp *di = container_of(work,
struct ab8500_btemp, btemp_periodic_work.work);
+ if (!di->initialized) {
+ di->initialized = true;
+ /* Identify the battery */
+ if (ab8500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+ }
+
di->bat_temp = ab8500_btemp_measure_temp(di);
if (di->bat_temp != di->prev_bat_temp) {
@@ -577,9 +622,9 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
}
if (di->events.ac_conn || di->events.usb_conn)
- interval = di->bat->temp_interval_chg;
+ interval = di->bm->temp_interval_chg;
else
- interval = di->bat->temp_interval_nochg;
+ interval = di->bm->temp_interval_nochg;
/* Schedule a new measurement */
queue_delayed_work(di->btemp_wq,
@@ -616,9 +661,9 @@ static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
- if (is_ab8500_2p0_or_earlier(di->parent)) {
+ if (is_ab8500_3p3_or_earlier(di->parent)) {
dev_dbg(di->dev, "Ignore false btemp low irq"
- " for ABB cut 1.0, 1.1 and 2.0\n");
+ " for ABB cut 1.0, 1.1, 2.0 and 3.3\n");
} else {
dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
@@ -732,30 +777,30 @@ static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
int temp = 0;
/*
- * The BTEMP events are not reliabe on AB8500 cut2.0
+ * The BTEMP events are not reliabe on AB8500 cut3.3
* and prior versions
*/
- if (is_ab8500_2p0_or_earlier(di->parent)) {
+ if (is_ab8500_3p3_or_earlier(di->parent)) {
temp = di->bat_temp * 10;
} else {
if (di->events.btemp_low) {
if (temp > di->btemp_ranges.btemp_low_limit)
- temp = di->btemp_ranges.btemp_low_limit;
+ temp = di->btemp_ranges.btemp_low_limit * 10;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_high) {
if (temp < di->btemp_ranges.btemp_high_limit)
- temp = di->btemp_ranges.btemp_high_limit;
+ temp = di->btemp_ranges.btemp_high_limit * 10;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_lowmed) {
if (temp > di->btemp_ranges.btemp_med_limit)
- temp = di->btemp_ranges.btemp_med_limit;
+ temp = di->btemp_ranges.btemp_med_limit * 10;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_medhigh) {
if (temp < di->btemp_ranges.btemp_med_limit)
- temp = di->btemp_ranges.btemp_med_limit;
+ temp = di->btemp_ranges.btemp_med_limit * 10;
else
temp = di->bat_temp * 10;
} else
@@ -806,7 +851,7 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
val->intval = 1;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ val->intval = di->bm->bat_type[di->bm->batt_id].name;
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
@@ -955,47 +1000,49 @@ static int ab8500_btemp_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->btemp_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+};
+
static int ab8500_btemp_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_bm_data *plat = pdev->dev.platform_data;
+ struct ab8500_btemp *di;
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
- struct ab8500_btemp *di;
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no battery management data supplied\n");
return -EINVAL;
}
+ di->bm = plat;
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
- return -ENOMEM;
+ if (np) {
+ ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get battery information\n");
+ return ret;
+ }
+ }
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- /* get btemp specific platform data */
- di->pdata = plat_data->btemp;
- if (!di->pdata) {
- dev_err(di->dev, "no btemp platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
+ di->initialized = false;
/* BTEMP supply */
di->btemp_psy.name = "ab8500_btemp";
@@ -1003,8 +1050,8 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->btemp_psy.properties = ab8500_btemp_props;
di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
di->btemp_psy.get_property = ab8500_btemp_get_property;
- di->btemp_psy.supplied_to = di->pdata->supplied_to;
- di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.supplied_to = supply_interface;
+ di->btemp_psy.num_supplicants = ARRAY_SIZE(supply_interface);
di->btemp_psy.external_power_changed =
ab8500_btemp_external_power_changed;
@@ -1014,18 +1061,13 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
create_singlethread_workqueue("ab8500_btemp_wq");
if (di->btemp_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for measuring temperature periodically */
INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
ab8500_btemp_periodic_work);
- /* Identify the battery */
- if (ab8500_btemp_id(di) < 0)
- dev_warn(di->dev, "failed to identify the battery\n");
-
/* Set BTEMP thermal limits. Low and Med are fixed */
di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
@@ -1093,12 +1135,14 @@ free_irq:
}
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_btemp_match[] = {
+ { .compatible = "stericsson,ab8500-btemp", },
+ { },
+};
+
static struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = ab8500_btemp_remove,
@@ -1107,6 +1151,7 @@ static struct platform_driver ab8500_btemp_driver = {
.driver = {
.name = "ab8500-btemp",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_btemp_match,
},
};
@@ -1120,7 +1165,7 @@ static void __exit ab8500_btemp_exit(void)
platform_driver_unregister(&ab8500_btemp_driver);
}
-subsys_initcall_sync(ab8500_btemp_init);
+device_initcall(ab8500_btemp_init);
module_exit(ab8500_btemp_exit);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 7ecb8abe20b5..24b30b7ea5ca 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -23,12 +23,15 @@
#include <linux/err.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/usb/otg.h>
+#include <linux/mutex.h>
/* Charger constants */
#define NO_PW_CONN 0
@@ -52,6 +55,7 @@
#define MAIN_CH_INPUT_CURR_SHIFT 4
#define VBUS_IN_CURR_LIM_SHIFT 4
+#define AUTO_VBUS_IN_CURR_LIM_SHIFT 4
#define LED_INDICATOR_PWM_ENA 0x01
#define LED_INDICATOR_PWM_DIS 0x00
@@ -66,6 +70,11 @@
#define MAIN_CH_NOK 0x01
#define VBUS_DET 0x80
+#define MAIN_CH_STATUS2_MAINCHGDROP 0x80
+#define MAIN_CH_STATUS2_MAINCHARGERDETDBNC 0x40
+#define USB_CH_VBUSDROP 0x40
+#define USB_CH_VBUSDETDBNC 0x01
+
/* UsbLineStatus register bit masks */
#define AB8500_USB_LINK_STATUS 0x78
#define AB8500_STD_HOST_SUSP 0x18
@@ -77,6 +86,17 @@
/* Lowest charger voltage is 3.39V -> 0x4E */
#define LOW_VOLT_REG 0x4E
+/* Step up/down delay in us */
+#define STEP_UDELAY 1000
+
+#define CHARGER_STATUS_POLL 10 /* in ms */
+
+#define CHG_WD_INTERVAL (60 * HZ)
+
+#define AB8500_SW_CONTROL_FALLBACK 0x03
+/* Wait for enumeration before charing in us */
+#define WAIT_ACA_RID_ENUMERATION (5 * 1000)
+
/* UsbLineStatus register - usb types */
enum ab8500_charger_link_status {
USB_STAT_NOT_CONFIGURED,
@@ -95,6 +115,13 @@ enum ab8500_charger_link_status {
USB_STAT_HM_IDGND,
USB_STAT_RESERVED,
USB_STAT_NOT_VALID_LINK,
+ USB_STAT_PHY_EN,
+ USB_STAT_SUP_NO_IDGND_VBUS,
+ USB_STAT_SUP_IDGND_VBUS,
+ USB_STAT_CHARGER_LINE_1,
+ USB_STAT_CARKIT_1,
+ USB_STAT_CARKIT_2,
+ USB_STAT_ACA_DOCK_CHARGER,
};
enum ab8500_usb_state {
@@ -147,6 +174,7 @@ struct ab8500_charger_info {
int charger_voltage;
int cv_active;
bool wd_expired;
+ int charger_current;
};
struct ab8500_charger_event_flags {
@@ -157,12 +185,14 @@ struct ab8500_charger_event_flags {
bool usbchargernotok;
bool chgwdexp;
bool vbus_collapse;
+ bool vbus_drop_end;
};
struct ab8500_charger_usb_state {
- bool usb_changed;
int usb_current;
+ int usb_current_tmp;
enum ab8500_usb_state state;
+ enum ab8500_usb_state state_tmp;
spinlock_t usb_lock;
};
@@ -180,11 +210,17 @@ struct ab8500_charger_usb_state {
* charger is enabled
* @vbat Battery voltage
* @old_vbat Previously measured battery voltage
+ * @usb_device_is_unrecognised USB device is unrecognised by the hardware
* @autopower Indicate if we should have automatic pwron after pwrloss
+ * @autopower_cfg platform specific power config support for "pwron after pwrloss"
+ * @invalid_charger_detect_state State when forcing AB to use invalid charger
+ * @is_usb_host: Indicate if last detected USB type is host
+ * @is_aca_rid: Incicate if accessory is ACA type
+ * @current_stepping_sessions:
+ * Counter for current stepping sessions
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
- * @pdata: Pointer to the abx500_charger platform data
- * @bat: Pointer to the abx500_bm platform data
+ * @bm: Platform specific battery management information
* @flags: Structure for information about events triggered
* @usb_state: Structure for usb stack information
* @ac_chg: AC charger power supply
@@ -193,19 +229,28 @@ struct ab8500_charger_usb_state {
* @usb: Structure that holds the USB charger properties
* @regu: Pointer to the struct regulator
* @charger_wq: Work queue for the IRQs and checking HW state
+ * @usb_ipt_crnt_lock: Lock to protect VBUS input current setting from mutuals
+ * @pm_lock: Lock to prevent system to suspend
* @check_vbat_work Work for checking vbat threshold to adjust vbus current
* @check_hw_failure_work: Work for checking HW state
* @check_usbchgnotok_work: Work for checking USB charger not ok status
* @kick_wd_work: Work for kicking the charger watchdog in case
* of ABB rev 1.* due to the watchog logic bug
+ * @ac_charger_attached_work: Work for checking if AC charger is still
+ * connected
+ * @usb_charger_attached_work: Work for checking if USB charger is still
+ * connected
* @ac_work: Work for checking AC charger connection
* @detect_usb_type_work: Work for detecting the USB type connected
* @usb_link_status_work: Work for checking the new USB link status
* @usb_state_changed_work: Work for checking USB state
+ * @attach_work: Work for detecting USB type
+ * @vbus_drop_end_work: Work for detecting VBUS drop end
* @check_main_thermal_prot_work:
* Work for checking Main thermal status
* @check_usb_thermal_prot_work:
* Work for checking USB thermal status
+ * @charger_attached_mutex: For controlling the wakelock
*/
struct ab8500_charger {
struct device *dev;
@@ -217,11 +262,16 @@ struct ab8500_charger {
bool vddadc_en_usb;
int vbat;
int old_vbat;
+ bool usb_device_is_unrecognised;
bool autopower;
+ bool autopower_cfg;
+ int invalid_charger_detect_state;
+ bool is_usb_host;
+ int is_aca_rid;
+ atomic_t current_stepping_sessions;
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
- struct abx500_charger_platform_data *pdata;
- struct abx500_bm_data *bat;
+ struct abx500_bm_data *bm;
struct ab8500_charger_event_flags flags;
struct ab8500_charger_usb_state usb_state;
struct ux500_charger ac_chg;
@@ -230,18 +280,24 @@ struct ab8500_charger {
struct ab8500_charger_info usb;
struct regulator *regu;
struct workqueue_struct *charger_wq;
+ struct mutex usb_ipt_crnt_lock;
struct delayed_work check_vbat_work;
struct delayed_work check_hw_failure_work;
struct delayed_work check_usbchgnotok_work;
struct delayed_work kick_wd_work;
+ struct delayed_work usb_state_changed_work;
+ struct delayed_work attach_work;
+ struct delayed_work ac_charger_attached_work;
+ struct delayed_work usb_charger_attached_work;
+ struct delayed_work vbus_drop_end_work;
struct work_struct ac_work;
struct work_struct detect_usb_type_work;
struct work_struct usb_link_status_work;
- struct work_struct usb_state_changed_work;
struct work_struct check_main_thermal_prot_work;
struct work_struct check_usb_thermal_prot_work;
struct usb_phy *usb_phy;
struct notifier_block nb;
+ struct mutex charger_attached_mutex;
};
/* AC properties */
@@ -265,50 +321,65 @@ static enum power_supply_property ab8500_charger_usb_props[] = {
POWER_SUPPLY_PROP_CURRENT_NOW,
};
-/**
- * ab8500_power_loss_handling - set how we handle powerloss.
- * @di: pointer to the ab8500_charger structure
- *
- * Magic nummbers are from STE HW department.
+/*
+ * Function for enabling and disabling sw fallback mode
+ * should always be disabled when no charger is connected.
*/
-static void ab8500_power_loss_handling(struct ab8500_charger *di)
+static void ab8500_enable_disable_sw_fallback(struct ab8500_charger *di,
+ bool fallback)
{
+ u8 val;
u8 reg;
+ u8 bank;
+ u8 bit;
int ret;
- dev_dbg(di->dev, "Autopower : %d\n", di->autopower);
+ dev_dbg(di->dev, "SW Fallback: %d\n", fallback);
- /* read the autopower register */
- ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg);
- if (ret) {
- dev_err(di->dev, "%d write failed\n", __LINE__);
- return;
+ if (is_ab8500(di->parent)) {
+ bank = 0x15;
+ reg = 0x0;
+ bit = 3;
+ } else {
+ bank = AB8500_SYS_CTRL1_BLOCK;
+ reg = AB8500_SW_CONTROL_FALLBACK;
+ bit = 0;
}
- /* enable the OPT emulation registers */
- ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
- if (ret) {
- dev_err(di->dev, "%d write failed\n", __LINE__);
+ /* read the register containing fallback bit */
+ ret = abx500_get_register_interruptible(di->dev, bank, reg, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%d read failed\n", __LINE__);
return;
}
- if (di->autopower)
- reg |= 0x8;
+ if (is_ab8500(di->parent)) {
+ /* enable the OPT emulation registers */
+ ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ goto disable_otp;
+ }
+ }
+
+ if (fallback)
+ val |= (1 << bit);
else
- reg &= ~0x8;
+ val &= ~(1 << bit);
- /* write back the changed value to autopower reg */
- ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg);
+ /* write back the changed fallback bit value to register */
+ ret = abx500_set_register_interruptible(di->dev, bank, reg, val);
if (ret) {
dev_err(di->dev, "%d write failed\n", __LINE__);
- return;
}
- /* disable the set OTP registers again */
- ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
- if (ret) {
- dev_err(di->dev, "%d write failed\n", __LINE__);
- return;
+disable_otp:
+ if (is_ab8500(di->parent)) {
+ /* disable the set OTP registers again */
+ ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
+ if (ret) {
+ dev_err(di->dev, "%d write failed\n", __LINE__);
+ }
}
}
@@ -322,17 +393,17 @@ static void ab8500_power_loss_handling(struct ab8500_charger *di)
static void ab8500_power_supply_changed(struct ab8500_charger *di,
struct power_supply *psy)
{
- if (di->pdata->autopower_cfg) {
+ if (di->autopower_cfg) {
if (!di->usb.charger_connected &&
!di->ac.charger_connected &&
di->autopower) {
di->autopower = false;
- ab8500_power_loss_handling(di);
+ ab8500_enable_disable_sw_fallback(di, false);
} else if (!di->autopower &&
(di->ac.charger_connected ||
di->usb.charger_connected)) {
di->autopower = true;
- ab8500_power_loss_handling(di);
+ ab8500_enable_disable_sw_fallback(di, true);
}
}
power_supply_changed(psy);
@@ -345,6 +416,19 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
dev_dbg(di->dev, "USB connected:%i\n", connected);
di->usb.charger_connected = connected;
sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
+
+ if (connected) {
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+
+ queue_delayed_work(di->charger_wq,
+ &di->usb_charger_attached_work,
+ HZ);
+ } else {
+ cancel_delayed_work_sync(&di->usb_charger_attached_work);
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+ }
}
}
@@ -498,6 +582,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
/**
* ab8500_charger_detect_chargers() - Detect the connected chargers
* @di: pointer to the ab8500_charger structure
+ * @probe: if probe, don't delay and wait for HW
*
* Returns the type of charger connected.
* For USB it will not mean we can actually charge from it
@@ -511,7 +596,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
* USB_PW_CONN if the USB power supply is connected
* AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
*/
-static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di, bool probe)
{
int result = NO_PW_CONN;
int ret;
@@ -529,13 +614,25 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
result = AC_PW_CONN;
/* Check for USB charger */
+
+ if (!probe) {
+ /*
+ * AB8500 says VBUS_DET_DBNC1 & VBUS_DET_DBNC100
+ * when disconnecting ACA even though no
+ * charger was connected. Try waiting a little
+ * longer than the 100 ms of VBUS_DET_DBNC100...
+ */
+ msleep(110);
+ }
ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
AB8500_CH_USBCH_STAT1_REG, &val);
if (ret < 0) {
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
return ret;
}
-
+ dev_dbg(di->dev,
+ "%s AB8500_CH_USBCH_STAT1_REG %x\n", __func__,
+ val);
if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
result |= USB_PW_CONN;
@@ -552,31 +649,53 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
* Returns error code in case of failure else 0 on success
*/
static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
- enum ab8500_charger_link_status link_status)
+ enum ab8500_charger_link_status link_status)
{
int ret = 0;
+ di->usb_device_is_unrecognised = false;
+
+ /*
+ * Platform only supports USB 2.0.
+ * This means that charging current from USB source
+ * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_*
+ * should set USB_CH_IP_CUR_LVL_0P5.
+ */
+
switch (link_status) {
case USB_STAT_STD_HOST_NC:
case USB_STAT_STD_HOST_C_NS:
case USB_STAT_STD_HOST_C_S:
dev_dbg(di->dev, "USB Type - Standard host is "
"detected through USB driver\n");
- di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ di->is_usb_host = true;
+ di->is_aca_rid = 0;
break;
case USB_STAT_HOST_CHG_HS_CHIRP:
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ di->is_usb_host = true;
+ di->is_aca_rid = 0;
break;
case USB_STAT_HOST_CHG_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ di->is_usb_host = true;
+ di->is_aca_rid = 0;
+ break;
case USB_STAT_ACA_RID_C_HS:
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ di->is_usb_host = false;
+ di->is_aca_rid = 0;
break;
case USB_STAT_ACA_RID_A:
/*
* Dedicated charger level minus maximum current accessory
- * can consume (300mA). Closest level is 1100mA
+ * can consume (900mA). Closest level is 500mA
*/
- di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ dev_dbg(di->dev, "USB_STAT_ACA_RID_A detected\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ di->is_usb_host = false;
+ di->is_aca_rid = 1;
break;
case USB_STAT_ACA_RID_B:
/*
@@ -584,34 +703,68 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
* 100mA for potential accessory). Closest level is 1300mA
*/
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
+ di->max_usb_in_curr);
+ di->is_usb_host = false;
+ di->is_aca_rid = 1;
break;
- case USB_STAT_DEDICATED_CHG:
case USB_STAT_HOST_CHG_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ di->is_usb_host = true;
+ di->is_aca_rid = 0;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ di->is_usb_host = false;
+ di->is_aca_rid = 0;
+ break;
case USB_STAT_ACA_RID_C_HS_CHIRP:
case USB_STAT_ACA_RID_C_NM:
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ di->is_usb_host = false;
+ di->is_aca_rid = 1;
break;
- case USB_STAT_RESERVED:
- /*
- * This state is used to indicate that VBUS has dropped below
- * the detection level 4 times in a row. This is due to the
- * charger output current is set to high making the charger
- * voltage collapse. This have to be propagated through to
- * chargalg. This is done using the property
- * POWER_SUPPLY_PROP_CURRENT_AVG = 1
- */
- di->flags.vbus_collapse = true;
- dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
- "VBUS has collapsed\n");
- ret = -1;
- break;
- case USB_STAT_HM_IDGND:
case USB_STAT_NOT_CONFIGURED:
- case USB_STAT_NOT_VALID_LINK:
+ if (di->vbus_detected) {
+ di->usb_device_is_unrecognised = true;
+ dev_dbg(di->dev, "USB Type - Legacy charger.\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ }
+ case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
ret = -ENXIO;
break;
+ case USB_STAT_RESERVED:
+ if (is_ab8500(di->parent)) {
+ di->flags.vbus_collapse = true;
+ dev_err(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -ENXIO;
+ break;
+ }
+ if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+ dev_dbg(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case USB_STAT_CARKIT_1:
+ case USB_STAT_CARKIT_2:
+ case USB_STAT_ACA_DOCK_CHARGER:
+ case USB_STAT_CHARGER_LINE_1:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
+ di->max_usb_in_curr);
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type invalid - try charging anyway\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+
default:
dev_err(di->dev, "USB Type - Unknown\n");
di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
@@ -643,8 +796,14 @@ static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
return ret;
}
- ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
- AB8500_USB_LINE_STAT_REG, &val);
+ if (is_ab8500(di->parent)) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ } else {
+ if (is_ab9540(di->parent) || is_ab8505(di->parent))
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
+ }
if (ret < 0) {
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
return ret;
@@ -680,16 +839,25 @@ static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
ret = abx500_get_register_interruptible(di->dev,
AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
&val);
+ dev_dbg(di->dev, "%s AB8500_IT_SOURCE21_REG %x\n",
+ __func__, val);
if (ret < 0) {
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
return ret;
}
- ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
- AB8500_USB_LINE_STAT_REG, &val);
+
+ if (is_ab8500(di->parent))
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_USB, AB8500_USB_LINE_STAT_REG, &val);
+ else
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
if (ret < 0) {
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
return ret;
}
+ dev_dbg(di->dev, "%s AB8500_USB_LINE_STAT_REG %x\n", __func__,
+ val);
/*
* Until the IT source register is read the UsbLineStatus
* register is not updated, hence doing the same
@@ -934,6 +1102,144 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
}
/**
+ * ab8500_charger_set_current() - set charger current
+ * @di: pointer to the ab8500_charger structure
+ * @ich: charger current, in mA
+ * @reg: select what charger register to set
+ *
+ * Set charger current.
+ * There is no state machine in the AB to step up/down the charger
+ * current to avoid dips and spikes on MAIN, VBUS and VBAT when
+ * charging is started. Instead we need to implement
+ * this charger current step-up/down here.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_current(struct ab8500_charger *di,
+ int ich, int reg)
+{
+ int ret = 0;
+ int auto_curr_index, curr_index, prev_curr_index, shift_value, i;
+ u8 reg_value;
+ u32 step_udelay;
+ bool no_stepping = false;
+
+ atomic_inc(&di->current_stepping_sessions);
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ reg, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s read failed\n", __func__);
+ goto exit_set_current;
+ }
+
+ switch (reg) {
+ case AB8500_MCH_IPT_CURLVL_REG:
+ shift_value = MAIN_CH_INPUT_CURR_SHIFT;
+ prev_curr_index = (reg_value >> shift_value);
+ curr_index = ab8500_current_to_regval(ich);
+ step_udelay = STEP_UDELAY;
+ if (!di->ac.charger_connected)
+ no_stepping = true;
+ break;
+ case AB8500_USBCH_IPT_CRNTLVL_REG:
+ shift_value = VBUS_IN_CURR_LIM_SHIFT;
+ prev_curr_index = (reg_value >> shift_value);
+ curr_index = ab8500_vbus_in_curr_to_regval(ich);
+ step_udelay = STEP_UDELAY * 100;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s read failed\n", __func__);
+ goto exit_set_current;
+ }
+ auto_curr_index =
+ reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT;
+
+ dev_dbg(di->dev, "%s Auto VBUS curr is %d mA\n",
+ __func__,
+ ab8500_charger_vbus_in_curr_map[auto_curr_index]);
+
+ prev_curr_index = min(prev_curr_index, auto_curr_index);
+
+ if (!di->usb.charger_connected)
+ no_stepping = true;
+ break;
+ case AB8500_CH_OPT_CRNTLVL_REG:
+ shift_value = 0;
+ prev_curr_index = (reg_value >> shift_value);
+ curr_index = ab8500_current_to_regval(ich);
+ step_udelay = STEP_UDELAY;
+ if (curr_index && (curr_index - prev_curr_index) > 1)
+ step_udelay *= 100;
+
+ if (!di->usb.charger_connected && !di->ac.charger_connected)
+ no_stepping = true;
+
+ break;
+ default:
+ dev_err(di->dev, "%s current register not valid\n", __func__);
+ ret = -ENXIO;
+ goto exit_set_current;
+ }
+
+ if (curr_index < 0) {
+ dev_err(di->dev, "requested current limit out-of-range\n");
+ ret = -ENXIO;
+ goto exit_set_current;
+ }
+
+ /* only update current if it's been changed */
+ if (prev_curr_index == curr_index) {
+ dev_dbg(di->dev, "%s current not changed for reg: 0x%02x\n",
+ __func__, reg);
+ ret = 0;
+ goto exit_set_current;
+ }
+
+ dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n",
+ __func__, ich, reg);
+
+ if (no_stepping) {
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ reg, (u8)curr_index << shift_value);
+ if (ret)
+ dev_err(di->dev, "%s write failed\n", __func__);
+ } else if (prev_curr_index > curr_index) {
+ for (i = prev_curr_index - 1; i >= curr_index; i--) {
+ dev_dbg(di->dev, "curr change_1 to: %x for 0x%02x\n",
+ (u8) i << shift_value, reg);
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, reg, (u8)i << shift_value);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ goto exit_set_current;
+ }
+ if (i != curr_index)
+ usleep_range(step_udelay, step_udelay * 2);
+ }
+ } else {
+ for (i = prev_curr_index + 1; i <= curr_index; i++) {
+ dev_dbg(di->dev, "curr change_2 to: %x for 0x%02x\n",
+ (u8)i << shift_value, reg);
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, reg, (u8)i << shift_value);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ goto exit_set_current;
+ }
+ if (i != curr_index)
+ usleep_range(step_udelay, step_udelay * 2);
+ }
+ }
+
+exit_set_current:
+ atomic_dec(&di->current_stepping_sessions);
+
+ return ret;
+}
+
+/**
* ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
* @di: pointer to the ab8500_charger structure
* @ich_in: charger input current limit
@@ -944,12 +1250,11 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
int ich_in)
{
- int ret;
- int input_curr_index;
int min_value;
+ int ret;
/* We should always use to lowest current limit */
- min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+ min_value = min(di->bm->chg_params->usb_curr_max, ich_in);
switch (min_value) {
case 100:
@@ -964,22 +1269,47 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
break;
}
- input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
- if (input_curr_index < 0) {
- dev_err(di->dev, "VBUS input current limit too high\n");
- return -ENXIO;
- }
+ dev_info(di->dev, "VBUS input current limit set to %d mA\n", min_value);
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_USBCH_IPT_CRNTLVL_REG,
- input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
- if (ret)
- dev_err(di->dev, "%s write failed\n", __func__);
+ mutex_lock(&di->usb_ipt_crnt_lock);
+ ret = ab8500_charger_set_current(di, min_value,
+ AB8500_USBCH_IPT_CRNTLVL_REG);
+ mutex_unlock(&di->usb_ipt_crnt_lock);
return ret;
}
/**
+ * ab8500_charger_set_main_in_curr() - set main charger input current
+ * @di: pointer to the ab8500_charger structure
+ * @ich_in: input charger current, in mA
+ *
+ * Set main charger input current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di,
+ int ich_in)
+{
+ return ab8500_charger_set_current(di, ich_in,
+ AB8500_MCH_IPT_CURLVL_REG);
+}
+
+/**
+ * ab8500_charger_set_output_curr() - set charger output current
+ * @di: pointer to the ab8500_charger structure
+ * @ich_out: output charger current, in mA
+ *
+ * Set charger output current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_output_curr(struct ab8500_charger *di,
+ int ich_out)
+{
+ return ab8500_charger_set_current(di, ich_out,
+ AB8500_CH_OPT_CRNTLVL_REG);
+}
+
+/**
* ab8500_charger_led_en() - turn on/off chargign led
* @di: pointer to the ab8500_charger structure
* @on: flag to turn on/off the chargign led
@@ -1072,7 +1402,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
volt_index = ab8500_voltage_to_regval(vset);
curr_index = ab8500_current_to_regval(iset);
input_curr_index = ab8500_current_to_regval(
- di->bat->chg_params->ac_curr_max);
+ di->bm->chg_params->ac_curr_max);
if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
dev_err(di->dev,
"Charger voltage or current too high, "
@@ -1088,23 +1418,24 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
return ret;
}
/* MainChInputCurr: current that can be drawn from the charger*/
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_MCH_IPT_CURLVL_REG,
- input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+ ret = ab8500_charger_set_main_in_curr(di,
+ di->bm->chg_params->ac_curr_max);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s Failed to set MainChInputCurr\n",
+ __func__);
return ret;
}
/* ChOutputCurentLevel: protected output current */
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ ret = ab8500_charger_set_output_curr(di, iset);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
/* Check if VBAT overshoot control should be enabled */
- if (!di->bat->enable_overshoot)
+ if (!di->bm->enable_overshoot)
overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
/* Enable Main Charger */
@@ -1156,12 +1487,11 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
return ret;
}
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+ ret = ab8500_charger_set_output_curr(di, 0);
if (ret) {
- dev_err(di->dev,
- "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
} else {
@@ -1257,24 +1587,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
dev_err(di->dev, "%s write failed\n", __func__);
return ret;
}
- /* USBChInputCurr: current that can be drawn from the usb */
- ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
- if (ret) {
- dev_err(di->dev, "setting USBChInputCurr failed\n");
- return ret;
- }
- /* ChOutputCurentLevel: protected output current */
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
- if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
- return ret;
- }
/* Check if VBAT overshoot control should be enabled */
- if (!di->bat->enable_overshoot)
+ if (!di->bm->enable_overshoot)
overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
/* Enable USB Charger */
+ dev_dbg(di->dev,
+ "Enabling USB with write to AB8500_USBCH_CTRL1_REG\n");
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
if (ret) {
@@ -1287,11 +1606,29 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
if (ret < 0)
dev_err(di->dev, "failed to enable LED\n");
+ di->usb.charger_online = 1;
+
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "setting USBChInputCurr failed\n");
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = ab8500_charger_set_output_curr(di, ich_out);
+ if (ret) {
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
+ return ret;
+ }
+
queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
- di->usb.charger_online = 1;
} else {
/* Disable USB charging */
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
ret = abx500_set_register_interruptible(di->dev,
AB8500_CHARGER,
AB8500_USBCH_CTRL1_REG, 0);
@@ -1304,7 +1641,21 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
ret = ab8500_charger_led_en(di, false);
if (ret < 0)
dev_err(di->dev, "failed to disable LED\n");
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab8500_charger_set_vbus_in_curr(di, 0);
+ if (ret) {
+ dev_err(di->dev, "setting USBChInputCurr failed\n");
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = ab8500_charger_set_output_curr(di, 0);
+ if (ret) {
+ dev_err(di->dev, "%s "
+ "Failed to reset ChOutputCurentLevel\n",
+ __func__);
+ return ret;
+ }
di->usb.charger_online = 0;
di->usb.wd_expired = false;
@@ -1364,7 +1715,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
int ich_out)
{
int ret;
- int curr_index;
struct ab8500_charger *di;
if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
@@ -1374,18 +1724,11 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
else
return -ENXIO;
- curr_index = ab8500_current_to_regval(ich_out);
- if (curr_index < 0) {
- dev_err(di->dev,
- "Charger current too high, "
- "charging not started\n");
- return -ENXIO;
- }
-
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ ret = ab8500_charger_set_output_curr(di, ich_out);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
@@ -1595,7 +1938,7 @@ static void ab8500_charger_ac_work(struct work_struct *work)
* synchronously, we have the check if the main charger is
* connected by reading the status register
*/
- ret = ab8500_charger_detect_chargers(di);
+ ret = ab8500_charger_detect_chargers(di, false);
if (ret < 0)
return;
@@ -1610,6 +1953,84 @@ static void ab8500_charger_ac_work(struct work_struct *work)
sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
}
+static void ab8500_charger_usb_attached_work(struct work_struct *work)
+{
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger,
+ usb_charger_attached_work.work);
+ int usbch = (USB_CH_VBUSDROP | USB_CH_VBUSDETDBNC);
+ int ret, i;
+ u8 statval;
+
+ for (i = 0; i < 10; i++) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG,
+ &statval);
+ if (ret < 0) {
+ dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+ goto reschedule;
+ }
+ if ((statval & usbch) != usbch)
+ goto reschedule;
+
+ msleep(CHARGER_STATUS_POLL);
+ }
+
+ ab8500_charger_usb_en(&di->usb_chg, 0, 0, 0);
+
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+
+ return;
+
+reschedule:
+ queue_delayed_work(di->charger_wq,
+ &di->usb_charger_attached_work,
+ HZ);
+}
+
+static void ab8500_charger_ac_attached_work(struct work_struct *work)
+{
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger,
+ ac_charger_attached_work.work);
+ int mainch = (MAIN_CH_STATUS2_MAINCHGDROP |
+ MAIN_CH_STATUS2_MAINCHARGERDETDBNC);
+ int ret, i;
+ u8 statval;
+
+ for (i = 0; i < 10; i++) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_STATUS2_REG,
+ &statval);
+ if (ret < 0) {
+ dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+ goto reschedule;
+ }
+
+ if ((statval & mainch) != mainch)
+ goto reschedule;
+
+ msleep(CHARGER_STATUS_POLL);
+ }
+
+ ab8500_charger_ac_en(&di->ac_chg, 0, 0, 0);
+ queue_work(di->charger_wq, &di->ac_work);
+
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+
+ return;
+
+reschedule:
+ queue_delayed_work(di->charger_wq,
+ &di->ac_charger_attached_work,
+ HZ);
+}
+
/**
* ab8500_charger_detect_usb_type_work() - work to detect USB type
* @work: Pointer to the work_struct structure
@@ -1628,16 +2049,18 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
* synchronously, we have the check if is
* connected by reading the status register
*/
- ret = ab8500_charger_detect_chargers(di);
+ ret = ab8500_charger_detect_chargers(di, false);
if (ret < 0)
return;
if (!(ret & USB_PW_CONN)) {
- di->vbus_detected = 0;
+ dev_dbg(di->dev, "%s di->vbus_detected = false\n", __func__);
+ di->vbus_detected = false;
ab8500_charger_set_usb_connected(di, false);
ab8500_power_supply_changed(di, &di->usb_chg.psy);
} else {
- di->vbus_detected = 1;
+ dev_dbg(di->dev, "%s di->vbus_detected = true\n", __func__);
+ di->vbus_detected = true;
if (is_ab8500_1p1_or_earlier(di->parent)) {
ret = ab8500_charger_detect_usb_type(di);
@@ -1647,7 +2070,8 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
&di->usb_chg.psy);
}
} else {
- /* For ABB cut2.0 and onwards we have an IRQ,
+ /*
+ * For ABB cut2.0 and onwards we have an IRQ,
* USB_LINK_STATUS that will be triggered when the USB
* link status changes. The exception is USB connected
* during startup. Then we don't get a
@@ -1668,6 +2092,29 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
}
/**
+ * ab8500_charger_usb_link_attach_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_attach_work(struct work_struct *work)
+{
+ struct ab8500_charger *di =
+ container_of(work, struct ab8500_charger, attach_work.work);
+ int ret;
+
+ /* Update maximum input current if USB enumeration is not detected */
+ if (!di->usb.charger_online) {
+ ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret)
+ return;
+ }
+
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
* ab8500_charger_usb_link_status_work() - work to detect USB type
* @work: pointer to the work_struct structure
*
@@ -1675,7 +2122,9 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
*/
static void ab8500_charger_usb_link_status_work(struct work_struct *work)
{
+ int detected_chargers;
int ret;
+ u8 val;
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, usb_link_status_work);
@@ -1685,31 +2134,95 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work)
* synchronously, we have the check if is
* connected by reading the status register
*/
- ret = ab8500_charger_detect_chargers(di);
- if (ret < 0)
+ detected_chargers = ab8500_charger_detect_chargers(di, false);
+ if (detected_chargers < 0)
return;
- if (!(ret & USB_PW_CONN)) {
- di->vbus_detected = 0;
+ /*
+ * Some chargers that breaks the USB spec is
+ * identified as invalid by AB8500 and it refuse
+ * to start the charging process. but by jumping
+ * thru a few hoops it can be forced to start.
+ */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret >= 0)
+ dev_dbg(di->dev, "UsbLineStatus register = 0x%02x\n", val);
+ else
+ dev_dbg(di->dev, "Error reading USB link status\n");
+
+ if (detected_chargers & USB_PW_CONN) {
+ if (((val & AB8500_USB_LINK_STATUS) >> 3) == USB_STAT_NOT_VALID_LINK &&
+ di->invalid_charger_detect_state == 0) {
+ dev_dbg(di->dev, "Invalid charger detected, state= 0\n");
+ /*Enable charger*/
+ abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, 0x01, 0x01);
+ /*Enable charger detection*/
+ abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
+ AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x01);
+ di->invalid_charger_detect_state = 1;
+ /*exit and wait for new link status interrupt.*/
+ return;
+
+ }
+ if (di->invalid_charger_detect_state == 1) {
+ dev_dbg(di->dev, "Invalid charger detected, state= 1\n");
+ /*Stop charger detection*/
+ abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
+ AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x00);
+ /*Check link status*/
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ dev_dbg(di->dev, "USB link status= 0x%02x\n",
+ (val & AB8500_USB_LINK_STATUS) >> 3);
+ di->invalid_charger_detect_state = 2;
+ }
+ } else {
+ di->invalid_charger_detect_state = 0;
+ }
+
+ if (!(detected_chargers & USB_PW_CONN)) {
+ di->vbus_detected = false;
ab8500_charger_set_usb_connected(di, false);
ab8500_power_supply_changed(di, &di->usb_chg.psy);
- } else {
- di->vbus_detected = 1;
- ret = ab8500_charger_read_usb_type(di);
- if (!ret) {
- /* Update maximum input current */
- ret = ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr);
- if (ret)
- return;
+ return;
+ }
- ab8500_charger_set_usb_connected(di, true);
- ab8500_power_supply_changed(di, &di->usb_chg.psy);
- } else if (ret == -ENXIO) {
+ dev_dbg(di->dev,"%s di->vbus_detected = true\n",__func__);
+ di->vbus_detected = true;
+ ret = ab8500_charger_read_usb_type(di);
+ if (ret) {
+ if (ret == -ENXIO) {
/* No valid charger type detected */
ab8500_charger_set_usb_connected(di, false);
ab8500_power_supply_changed(di, &di->usb_chg.psy);
}
+ return;
+ }
+
+ if (di->usb_device_is_unrecognised) {
+ dev_dbg(di->dev,
+ "Potential Legacy Charger device. "
+ "Delay work for %d msec for USB enum "
+ "to finish",
+ WAIT_ACA_RID_ENUMERATION);
+ queue_delayed_work(di->charger_wq,
+ &di->attach_work,
+ msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
+ } else if (di->is_aca_rid == 1) {
+ /* Only wait once */
+ di->is_aca_rid++;
+ dev_dbg(di->dev,
+ "%s Wait %d msec for USB enum to finish",
+ __func__, WAIT_ACA_RID_ENUMERATION);
+ queue_delayed_work(di->charger_wq,
+ &di->attach_work,
+ msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
+ } else {
+ queue_delayed_work(di->charger_wq,
+ &di->attach_work,
+ 0);
}
}
@@ -1719,24 +2232,20 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
unsigned long flags;
struct ab8500_charger *di = container_of(work,
- struct ab8500_charger, usb_state_changed_work);
+ struct ab8500_charger, usb_state_changed_work.work);
- if (!di->vbus_detected)
+ if (!di->vbus_detected) {
+ dev_dbg(di->dev,
+ "%s !di->vbus_detected\n",
+ __func__);
return;
+ }
spin_lock_irqsave(&di->usb_state.usb_lock, flags);
- di->usb_state.usb_changed = false;
+ di->usb_state.state = di->usb_state.state_tmp;
+ di->usb_state.usb_current = di->usb_state.usb_current_tmp;
spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
- /*
- * wait for some time until you get updates from the usb stack
- * and negotiations are completed
- */
- msleep(250);
-
- if (di->usb_state.usb_changed)
- return;
-
dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
__func__, di->usb_state.state, di->usb_state.usb_current);
@@ -1890,6 +2399,10 @@ static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
dev_dbg(di->dev, "Main charger unplugged\n");
queue_work(di->charger_wq, &di->ac_work);
+ cancel_delayed_work_sync(&di->ac_charger_attached_work);
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+
return IRQ_HANDLED;
}
@@ -1907,6 +2420,11 @@ static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
dev_dbg(di->dev, "Main charger plugged\n");
queue_work(di->charger_wq, &di->ac_work);
+ mutex_lock(&di->charger_attached_mutex);
+ mutex_unlock(&di->charger_attached_mutex);
+ queue_delayed_work(di->charger_wq,
+ &di->ac_charger_attached_work,
+ HZ);
return IRQ_HANDLED;
}
@@ -1969,6 +2487,21 @@ static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
return IRQ_HANDLED;
}
+static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
+{
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, vbus_drop_end_work.work);
+
+ di->flags.vbus_drop_end = false;
+
+ /* Reset the drop counter */
+ abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CHARGER_CTRL, 0x01);
+
+ if (di->usb.charger_connected)
+ ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+}
+
/**
* ab8500_charger_vbusdetf_handler() - VBUS falling detected
* @irq: interrupt number
@@ -1980,6 +2513,7 @@ static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
{
struct ab8500_charger *di = _di;
+ di->vbus_detected = false;
dev_dbg(di->dev, "VBUS falling detected\n");
queue_work(di->charger_wq, &di->detect_usb_type_work);
@@ -1999,6 +2533,7 @@ static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
di->vbus_detected = true;
dev_dbg(di->dev, "VBUS rising detected\n");
+
queue_work(di->charger_wq, &di->detect_usb_type_work);
return IRQ_HANDLED;
@@ -2107,6 +2642,25 @@ static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
}
/**
+ * ab8500_charger_vbuschdropend_handler() - VBUS drop removed
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbuschdropend_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS charger drop ended\n");
+ di->flags.vbus_drop_end = true;
+ queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work,
+ round_jiffies(30 * HZ));
+
+ return IRQ_HANDLED;
+}
+
+/**
* ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
* @irq: interrupt number
* @_di: pointer to the ab8500_charger structure
@@ -2146,6 +2700,7 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct ab8500_charger *di;
+ int ret;
di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
@@ -2167,7 +2722,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
val->intval = di->ac.charger_connected;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+ ret = ab8500_charger_get_ac_voltage(di);
+ if (ret >= 0)
+ di->ac.charger_voltage = ret;
+ /* On error, use previous value */
val->intval = di->ac.charger_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2179,7 +2737,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
val->intval = di->ac.cv_active;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = ab8500_charger_get_ac_current(di) * 1000;
+ ret = ab8500_charger_get_ac_current(di);
+ if (ret >= 0)
+ di->ac.charger_current = ret;
+ val->intval = di->ac.charger_current * 1000;
break;
default:
return -EINVAL;
@@ -2206,6 +2767,7 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct ab8500_charger *di;
+ int ret;
di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
@@ -2229,7 +2791,9 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
val->intval = di->usb.charger_connected;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+ ret = ab8500_charger_get_vbus_voltage(di);
+ if (ret >= 0)
+ di->usb.charger_voltage = ret;
val->intval = di->usb.charger_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2241,7 +2805,10 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
val->intval = di->usb.cv_active;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = ab8500_charger_get_usb_current(di) * 1000;
+ ret = ab8500_charger_get_usb_current(di);
+ if (ret >= 0)
+ di->usb.charger_current = ret;
+ val->intval = di->usb.charger_current * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
/*
@@ -2291,13 +2858,23 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
}
- /* VBUS OVV set to 6.3V and enable automatic current limitiation */
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_CHARGER,
- AB8500_USBCH_CTRL2_REG,
- VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+ if (is_ab9540_2p0(di->parent) || is_ab8505_2p0(di->parent))
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL2_REG,
+ VBUS_AUTO_IN_CURR_LIM_ENA,
+ VBUS_AUTO_IN_CURR_LIM_ENA);
+ else
+ /*
+ * VBUS OVV set to 6.3V and enable automatic current limitation
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL2_REG,
+ VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
if (ret) {
- dev_err(di->dev, "failed to set VBUS OVV\n");
+ dev_err(di->dev,
+ "failed to set automatic current limitation\n");
goto out;
}
@@ -2353,12 +2930,26 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
goto out;
}
+ /* Set charger watchdog timeout */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_WD_TIMER_REG, WD_TIMER);
+ if (ret) {
+ dev_err(di->dev, "failed to set charger watchdog timeout\n");
+ goto out;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to disable LED\n");
+ goto out;
+ }
+
/* Backup battery voltage and current */
ret = abx500_set_register_interruptible(di->dev,
AB8500_RTC,
AB8500_RTC_BACKUP_CHG_REG,
- di->bat->bkup_bat_v |
- di->bat->bkup_bat_i);
+ di->bm->bkup_bat_v |
+ di->bm->bkup_bat_i);
if (ret) {
dev_err(di->dev, "failed to setup backup battery charging\n");
goto out;
@@ -2392,6 +2983,7 @@ static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
{"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
{"VBUS_OVV", ab8500_charger_vbusovv_handler},
{"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+ {"VBUS_CH_DROP_END", ab8500_charger_vbuschdropend_handler},
};
static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
@@ -2402,6 +2994,9 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
enum ab8500_usb_state bm_usb_state;
unsigned mA = *((unsigned *)power);
+ if (!di)
+ return NOTIFY_DONE;
+
if (event != USB_EVENT_VBUS) {
dev_dbg(di->dev, "not a standard host, returning\n");
return NOTIFY_DONE;
@@ -2425,13 +3020,15 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
__func__, bm_usb_state, mA);
spin_lock(&di->usb_state.usb_lock);
- di->usb_state.usb_changed = true;
+ di->usb_state.state_tmp = bm_usb_state;
+ di->usb_state.usb_current_tmp = mA;
spin_unlock(&di->usb_state.usb_lock);
- di->usb_state.state = bm_usb_state;
- di->usb_state.usb_current = mA;
-
- queue_work(di->charger_wq, &di->usb_state_changed_work);
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ queue_delayed_work(di->charger_wq, &di->usb_state_changed_work, HZ/2);
return NOTIFY_OK;
}
@@ -2471,6 +3068,9 @@ static int ab8500_charger_resume(struct platform_device *pdev)
&di->check_hw_failure_work, 0);
}
+ if (di->flags.vbus_drop_end)
+ queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work, 0);
+
return 0;
}
@@ -2483,6 +3083,23 @@ static int ab8500_charger_suspend(struct platform_device *pdev,
if (delayed_work_pending(&di->check_hw_failure_work))
cancel_delayed_work(&di->check_hw_failure_work);
+ if (delayed_work_pending(&di->vbus_drop_end_work))
+ cancel_delayed_work(&di->vbus_drop_end_work);
+
+ flush_delayed_work(&di->attach_work);
+ flush_delayed_work(&di->usb_charger_attached_work);
+ flush_delayed_work(&di->ac_charger_attached_work);
+ flush_delayed_work(&di->check_usbchgnotok_work);
+ flush_delayed_work(&di->check_vbat_work);
+ flush_delayed_work(&di->kick_wd_work);
+
+ flush_work(&di->usb_link_status_work);
+ flush_work(&di->ac_work);
+ flush_work(&di->detect_usb_type_work);
+
+ if (atomic_read(&di->current_stepping_sessions))
+ return -EAGAIN;
+
return 0;
}
#else
@@ -2507,9 +3124,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
free_irq(irq, di);
}
- /* disable the regulator */
- regulator_put(di->regu);
-
/* Backup battery voltage and current disable */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
@@ -2523,28 +3137,51 @@ static int ab8500_charger_remove(struct platform_device *pdev)
destroy_workqueue(di->charger_wq);
flush_scheduled_work();
- power_supply_unregister(&di->usb_chg.psy);
- power_supply_unregister(&di->ac_chg.psy);
+ if(di->usb_chg.enabled)
+ power_supply_unregister(&di->usb_chg.psy);
+#if !defined(CONFIG_CHARGER_PM2301)
+ if(di->ac_chg.enabled)
+ power_supply_unregister(&di->ac_chg.psy);
+#endif
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+ "ab8500_btemp",
+};
+
static int ab8500_charger_probe(struct platform_device *pdev)
{
- int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_bm_data *plat = pdev->dev.platform_data;
struct ab8500_charger *di;
+ int irq, i, charger_status, ret = 0, ch_stat;
+
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
+ return -ENOMEM;
+ }
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
+ if (!plat) {
+ dev_err(&pdev->dev, "no battery management data supplied\n");
return -EINVAL;
}
+ di->bm = plat;
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
- return -ENOMEM;
+ if (np) {
+ ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get battery information\n");
+ return ret;
+ }
+ di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
+ } else
+ di->autopower_cfg = false;
/* get parent data */
di->dev = &pdev->dev;
@@ -2553,24 +3190,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
-
- /* get charger specific platform data */
- di->pdata = plat_data->charger;
- if (!di->pdata) {
- dev_err(di->dev, "no charger platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
+ mutex_init(&di->usb_ipt_crnt_lock);
di->autopower = false;
+ di->invalid_charger_detect_state = 0;
/* AC supply */
/* power_supply base class */
@@ -2579,8 +3202,8 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->ac_chg.psy.properties = ab8500_charger_ac_props;
di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
- di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
- di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ di->ac_chg.psy.supplied_to = supply_interface;
+ di->ac_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
/* ux500_charger sub-class */
di->ac_chg.ops.enable = &ab8500_charger_ac_en;
di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
@@ -2589,6 +3212,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
di->ac_chg.max_out_curr = ab8500_charger_current_map[
ARRAY_SIZE(ab8500_charger_current_map) - 1];
+ di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
+ di->ac_chg.enabled = di->bm->ac_enabled;
+ di->ac_chg.external = false;
/* USB supply */
/* power_supply base class */
@@ -2597,8 +3223,8 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.psy.properties = ab8500_charger_usb_props;
di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
- di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
- di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ di->usb_chg.psy.supplied_to = supply_interface;
+ di->usb_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
/* ux500_charger sub-class */
di->usb_chg.ops.enable = &ab8500_charger_usb_en;
di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
@@ -2607,23 +3233,31 @@ static int ab8500_charger_probe(struct platform_device *pdev)
ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
di->usb_chg.max_out_curr = ab8500_charger_current_map[
ARRAY_SIZE(ab8500_charger_current_map) - 1];
-
+ di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
+ di->usb_chg.enabled = di->bm->usb_enabled;
+ di->usb_chg.external = false;
/* Create a work queue for the charger */
di->charger_wq =
create_singlethread_workqueue("ab8500_charger_wq");
if (di->charger_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
+ mutex_init(&di->charger_attached_mutex);
+
/* Init work for HW failure check */
INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
ab8500_charger_check_hw_failure_work);
INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
ab8500_charger_check_usbchargernotok_work);
+ INIT_DELAYED_WORK(&di->ac_charger_attached_work,
+ ab8500_charger_ac_attached_work);
+ INIT_DELAYED_WORK(&di->usb_charger_attached_work,
+ ab8500_charger_usb_attached_work);
+
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
* logic. That means we have to continously kick the charger
@@ -2639,6 +3273,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
INIT_DEFERRABLE_WORK(&di->check_vbat_work,
ab8500_charger_check_vbat_work);
+ INIT_DELAYED_WORK(&di->attach_work,
+ ab8500_charger_usb_link_attach_work);
+
+ INIT_DELAYED_WORK(&di->usb_state_changed_work,
+ ab8500_charger_usb_state_changed_work);
+
+ INIT_DELAYED_WORK(&di->vbus_drop_end_work,
+ ab8500_charger_vbus_drop_end_work);
+
/* Init work for charger detection */
INIT_WORK(&di->usb_link_status_work,
ab8500_charger_usb_link_status_work);
@@ -2646,9 +3289,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
INIT_WORK(&di->detect_usb_type_work,
ab8500_charger_detect_usb_type_work);
- INIT_WORK(&di->usb_state_changed_work,
- ab8500_charger_usb_state_changed_work);
-
/* Init work for checking HW status */
INIT_WORK(&di->check_main_thermal_prot_work,
ab8500_charger_check_main_thermal_prot_work);
@@ -2660,7 +3300,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
* is a charger connected to avoid erroneous BTEMP_HIGH/LOW
* interrupts during charging
*/
- di->regu = regulator_get(di->dev, "vddadc");
+ di->regu = devm_regulator_get(di->dev, "vddadc");
if (IS_ERR(di->regu)) {
ret = PTR_ERR(di->regu);
dev_err(di->dev, "failed to get vddadc regulator\n");
@@ -2672,21 +3312,25 @@ static int ab8500_charger_probe(struct platform_device *pdev)
ret = ab8500_charger_init_hw_registers(di);
if (ret) {
dev_err(di->dev, "failed to initialize ABB registers\n");
- goto free_regulator;
+ goto free_charger_wq;
}
/* Register AC charger class */
- ret = power_supply_register(di->dev, &di->ac_chg.psy);
- if (ret) {
- dev_err(di->dev, "failed to register AC charger\n");
- goto free_regulator;
+ if(di->ac_chg.enabled) {
+ ret = power_supply_register(di->dev, &di->ac_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register AC charger\n");
+ goto free_charger_wq;
+ }
}
/* Register USB charger class */
- ret = power_supply_register(di->dev, &di->usb_chg.psy);
- if (ret) {
- dev_err(di->dev, "failed to register USB charger\n");
- goto free_ac;
+ if(di->usb_chg.enabled) {
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_ac;
+ }
}
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
@@ -2703,7 +3347,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
}
/* Identify the connected charger types during startup */
- charger_status = ab8500_charger_detect_chargers(di);
+ charger_status = ab8500_charger_detect_chargers(di, true);
if (charger_status & AC_PW_CONN) {
di->ac.charger_connected = 1;
di->ac_conn = true;
@@ -2712,7 +3356,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
}
if (charger_status & USB_PW_CONN) {
- dev_dbg(di->dev, "VBUS Detect during startup\n");
di->vbus_detected = true;
di->vbus_detected_start = true;
queue_work(di->charger_wq,
@@ -2737,6 +3380,23 @@ static int ab8500_charger_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
+ mutex_lock(&di->charger_attached_mutex);
+
+ ch_stat = ab8500_charger_detect_chargers(di, false);
+
+ if ((ch_stat & AC_PW_CONN) == AC_PW_CONN) {
+ queue_delayed_work(di->charger_wq,
+ &di->ac_charger_attached_work,
+ HZ);
+ }
+ if ((ch_stat & USB_PW_CONN) == USB_PW_CONN) {
+ queue_delayed_work(di->charger_wq,
+ &di->usb_charger_attached_work,
+ HZ);
+ }
+
+ mutex_unlock(&di->charger_attached_mutex);
+
return ret;
free_irq:
@@ -2750,19 +3410,21 @@ free_irq:
put_usb_phy:
usb_put_phy(di->usb_phy);
free_usb:
- power_supply_unregister(&di->usb_chg.psy);
+ if(di->usb_chg.enabled)
+ power_supply_unregister(&di->usb_chg.psy);
free_ac:
- power_supply_unregister(&di->ac_chg.psy);
-free_regulator:
- regulator_put(di->regu);
+ if(di->ac_chg.enabled)
+ power_supply_unregister(&di->ac_chg.psy);
free_charger_wq:
destroy_workqueue(di->charger_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_charger_match[] = {
+ { .compatible = "stericsson,ab8500-charger", },
+ { },
+};
+
static struct platform_driver ab8500_charger_driver = {
.probe = ab8500_charger_probe,
.remove = ab8500_charger_remove,
@@ -2771,6 +3433,7 @@ static struct platform_driver ab8500_charger_driver = {
.driver = {
.name = "ab8500-charger",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_charger_match,
},
};
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 331dc43ded4e..25dae4c4b0ef 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -22,15 +22,17 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/kobject.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500.h>
#include <linux/slab.h>
-#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/delay.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
-#include <linux/mfd/abx500.h>
#include <linux/time.h>
+#include <linux/of.h>
#include <linux/completion.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/kernel.h>
#define MILLI_TO_MICRO 1000
#define FG_LSB_IN_MA 1627
@@ -41,7 +43,7 @@
#define NBR_AVG_SAMPLES 20
-#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+#define LOW_BAT_CHECK_INTERVAL (HZ / 16) /* 62.5 ms */
#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
#define BATT_OK_MIN 2360 /* mV */
@@ -112,6 +114,13 @@ struct ab8500_fg_avg_cap {
int sum;
};
+struct ab8500_fg_cap_scaling {
+ bool enable;
+ int cap_to_scale[2];
+ int disable_cap_level;
+ int scaled_cap;
+};
+
struct ab8500_fg_battery_capacity {
int max_mah_design;
int max_mah;
@@ -122,6 +131,7 @@ struct ab8500_fg_battery_capacity {
int prev_percent;
int prev_level;
int user_mah;
+ struct ab8500_fg_cap_scaling cap_scale;
};
struct ab8500_fg_flags {
@@ -159,6 +169,8 @@ struct inst_curr_result_list {
* @recovery_cnt: Counter for recovery mode
* @high_curr_cnt: Counter for high current mode
* @init_cnt: Counter for init mode
+ * @low_bat_cnt Counter for number of consecutive low battery measures
+ * @nbr_cceoc_irq_cnt Counter for number of CCEOC irqs received since enabled
* @recovery_needed: Indicate if recovery is needed
* @high_curr_mode: Indicate if we're in high current mode
* @init_capacity: Indicate if initial capacity measuring should be done
@@ -166,14 +178,14 @@ struct inst_curr_result_list {
* @calib_state State during offset calibration
* @discharge_state: Current discharge state
* @charge_state: Current charge state
+ * @ab8500_fg_started Completion struct used for the instant current start
* @ab8500_fg_complete Completion struct used for the instant current reading
* @flags: Structure for information about events triggered
* @bat_cap: Structure for battery capacity specific parameters
* @avg_cap: Average capacity filter
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
- * @pdata: Pointer to the abx500_fg platform data
- * @bat: Pointer to the abx500_bm platform data
+ * @bm: Platform specific battery management information
* @fg_psy: Structure that holds the FG specific battery properties
* @fg_wq: Work queue for running the FG algorithm
* @fg_periodic_work: Work to run the FG algorithm periodically
@@ -199,6 +211,8 @@ struct ab8500_fg {
int recovery_cnt;
int high_curr_cnt;
int init_cnt;
+ int low_bat_cnt;
+ int nbr_cceoc_irq_cnt;
bool recovery_needed;
bool high_curr_mode;
bool init_capacity;
@@ -206,14 +220,14 @@ struct ab8500_fg {
enum ab8500_fg_calibration_state calib_state;
enum ab8500_fg_discharge_state discharge_state;
enum ab8500_fg_charge_state charge_state;
+ struct completion ab8500_fg_started;
struct completion ab8500_fg_complete;
struct ab8500_fg_flags flags;
struct ab8500_fg_battery_capacity bat_cap;
struct ab8500_fg_avg_cap avg_cap;
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
- struct abx500_fg_platform_data *pdata;
- struct abx500_bm_data *bat;
+ struct abx500_bm_data *bm;
struct power_supply fg_psy;
struct workqueue_struct *fg_wq;
struct delayed_work fg_periodic_work;
@@ -356,7 +370,7 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
/*
* We want to know if we're in low current mode
*/
- if (curr > -di->bat->fg_params->high_curr_threshold)
+ if (curr > -di->bm->fg_params->high_curr_threshold)
return true;
else
return false;
@@ -485,8 +499,9 @@ static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
di->flags.fg_enabled = true;
} else {
/* Clear any pending read requests */
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ (RESET_ACCU | READ_REQ), 0);
if (ret)
goto cc_err;
@@ -524,13 +539,14 @@ cc_err:
* Note: This is part "one" and has to be called before
* ab8500_fg_inst_curr_finalize()
*/
- int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
{
u8 reg_val;
int ret;
mutex_lock(&di->cc_lock);
+ di->nbr_cceoc_irq_cnt = 0;
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
AB8500_RTC_CC_CONF_REG, &reg_val);
if (ret < 0)
@@ -558,6 +574,7 @@ cc_err:
}
/* Return and WFI */
+ INIT_COMPLETION(di->ab8500_fg_started);
INIT_COMPLETION(di->ab8500_fg_complete);
enable_irq(di->irq);
@@ -569,6 +586,17 @@ fail:
}
/**
+ * ab8500_fg_inst_curr_started() - check if fg conversion has started
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns 1 if conversion started, 0 if still waiting
+ */
+int ab8500_fg_inst_curr_started(struct ab8500_fg *di)
+{
+ return completion_done(&di->ab8500_fg_started);
+}
+
+/**
* ab8500_fg_inst_curr_done() - check if fg conversion is done
* @di: pointer to the ab8500_fg structure
*
@@ -596,13 +624,15 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
int timeout;
if (!completion_done(&di->ab8500_fg_complete)) {
- timeout = wait_for_completion_timeout(&di->ab8500_fg_complete,
+ timeout = wait_for_completion_timeout(
+ &di->ab8500_fg_complete,
INS_CURR_TIMEOUT);
dev_dbg(di->dev, "Finalize time: %d ms\n",
((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
if (!timeout) {
ret = -ETIME;
disable_irq(di->irq);
+ di->nbr_cceoc_irq_cnt = 0;
dev_err(di->dev, "completion timed out [%d]\n",
__LINE__);
goto fail;
@@ -610,6 +640,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
}
disable_irq(di->irq);
+ di->nbr_cceoc_irq_cnt = 0;
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
@@ -648,7 +679,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
* 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
*/
val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
- (1000 * di->bat->fg_res);
+ (1000 * di->bm->fg_res);
if (di->turn_off_fg) {
dev_dbg(di->dev, "%s Disable FG\n", __func__);
@@ -684,6 +715,7 @@ fail:
int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
{
int ret;
+ int timeout;
int res = 0;
ret = ab8500_fg_inst_curr_start(di);
@@ -692,13 +724,33 @@ int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
return 0;
}
+ /* Wait for CC to actually start */
+ if (!completion_done(&di->ab8500_fg_started)) {
+ timeout = wait_for_completion_timeout(
+ &di->ab8500_fg_started,
+ INS_CURR_TIMEOUT);
+ dev_dbg(di->dev, "Start time: %d ms\n",
+ ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
+ if (!timeout) {
+ ret = -ETIME;
+ dev_err(di->dev, "completion timed out [%d]\n",
+ __LINE__);
+ goto fail;
+ }
+ }
+
ret = ab8500_fg_inst_curr_finalize(di, &res);
if (ret) {
dev_err(di->dev, "Failed to finalize fg_inst\n");
return 0;
}
+ dev_dbg(di->dev, "%s instant current: %d", __func__, res);
return res;
+fail:
+ disable_irq(di->irq);
+ mutex_unlock(&di->cc_lock);
+ return ret;
}
/**
@@ -751,19 +803,16 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
* 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
*/
di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
- (100 * di->bat->fg_res);
+ (100 * di->bm->fg_res);
/*
* Convert to unit value in mA
- * Full scale input voltage is
- * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
- * Given a 250ms conversion cycle time the LSB corresponds
- * to 112.9 nAh. Convert to current by dividing by the conversion
+ * by dividing by the conversion
* time in hours (= samples / (3600 * 4)h)
- * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ * and multiply with 1000
*/
di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
- (1000 * di->bat->fg_res * (di->fg_samples / 4));
+ (1000 * di->bm->fg_res * (di->fg_samples / 4));
di->flags.conv_done = true;
@@ -771,6 +820,8 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
queue_work(di->fg_wq, &di->fg_work);
+ dev_dbg(di->dev, "fg_res: %d, fg_samples: %d, gasg: %d, accu_charge: %d \n",
+ di->bm->fg_res, di->fg_samples, val, di->accu_charge);
return;
exit:
dev_err(di->dev,
@@ -815,8 +866,8 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
struct abx500_v_to_cap *tbl;
int cap = 0;
- tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
- tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+ tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl,
+ tbl_size = di->bm->bat_type[di->bm->batt_id].n_v_cap_tbl_elements;
for (i = 0; i < tbl_size; ++i) {
if (voltage > tbl[i].voltage)
@@ -867,8 +918,8 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
struct batres_vs_temp *tbl;
int resist = 0;
- tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
- tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
+ tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl;
+ tbl_size = di->bm->bat_type[di->bm->batt_id].n_batres_tbl_elements;
for (i = 0; i < tbl_size; ++i) {
if (di->bat_temp / 10 > tbl[i].temp)
@@ -889,11 +940,11 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
" fg resistance %d, total: %d (mOhm)\n",
- __func__, di->bat_temp, resist, di->bat->fg_res / 10,
- (di->bat->fg_res / 10) + resist);
+ __func__, di->bat_temp, resist, di->bm->fg_res / 10,
+ (di->bm->fg_res / 10) + resist);
/* fg_res variable is in 0.1mOhm */
- resist += di->bat->fg_res / 10;
+ resist += di->bm->fg_res / 10;
return resist;
}
@@ -916,7 +967,7 @@ static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
do {
vbat += ab8500_fg_bat_voltage(di);
i++;
- msleep(5);
+ usleep_range(5000, 6000);
} while (!ab8500_fg_inst_curr_done(di));
ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
@@ -1109,16 +1160,16 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
{
int ret, percent;
- percent = di->bat_cap.permille / 10;
+ percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
- if (percent <= di->bat->cap_levels->critical ||
+ if (percent <= di->bm->cap_levels->critical ||
di->flags.low_bat)
ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else if (percent <= di->bat->cap_levels->low)
+ else if (percent <= di->bm->cap_levels->low)
ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
- else if (percent <= di->bat->cap_levels->normal)
+ else if (percent <= di->bm->cap_levels->normal)
ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
- else if (percent <= di->bat->cap_levels->high)
+ else if (percent <= di->bm->cap_levels->high)
ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
else
ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
@@ -1127,6 +1178,99 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
}
/**
+ * ab8500_fg_calculate_scaled_capacity() - Capacity scaling
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Calculates the capacity to be shown to upper layers. Scales the capacity
+ * to have 100% as a reference from the actual capacity upon removal of charger
+ * when charging is in maintenance mode.
+ */
+static int ab8500_fg_calculate_scaled_capacity(struct ab8500_fg *di)
+{
+ struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
+ int capacity = di->bat_cap.prev_percent;
+
+ if (!cs->enable)
+ return capacity;
+
+ /*
+ * As long as we are in fully charge mode scale the capacity
+ * to show 100%.
+ */
+ if (di->flags.fully_charged) {
+ cs->cap_to_scale[0] = 100;
+ cs->cap_to_scale[1] =
+ max(capacity, di->bm->fg_params->maint_thres);
+ dev_dbg(di->dev, "Scale cap with %d/%d\n",
+ cs->cap_to_scale[0], cs->cap_to_scale[1]);
+ }
+
+ /* Calculates the scaled capacity. */
+ if ((cs->cap_to_scale[0] != cs->cap_to_scale[1])
+ && (cs->cap_to_scale[1] > 0))
+ capacity = min(100,
+ DIV_ROUND_CLOSEST(di->bat_cap.prev_percent *
+ cs->cap_to_scale[0],
+ cs->cap_to_scale[1]));
+
+ if (di->flags.charging) {
+ if (capacity < cs->disable_cap_level) {
+ cs->disable_cap_level = capacity;
+ dev_dbg(di->dev, "Cap to stop scale lowered %d%%\n",
+ cs->disable_cap_level);
+ } else if (!di->flags.fully_charged) {
+ if (di->bat_cap.prev_percent >=
+ cs->disable_cap_level) {
+ dev_dbg(di->dev, "Disabling scaled capacity\n");
+ cs->enable = false;
+ capacity = di->bat_cap.prev_percent;
+ } else {
+ dev_dbg(di->dev,
+ "Waiting in cap to level %d%%\n",
+ cs->disable_cap_level);
+ capacity = cs->disable_cap_level;
+ }
+ }
+ }
+
+ return capacity;
+}
+
+/**
+ * ab8500_fg_update_cap_scalers() - Capacity scaling
+ * @di: pointer to the ab8500_fg structure
+ *
+ * To be called when state change from charge<->discharge to update
+ * the capacity scalers.
+ */
+static void ab8500_fg_update_cap_scalers(struct ab8500_fg *di)
+{
+ struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
+
+ if (!cs->enable)
+ return;
+ if (di->flags.charging) {
+ di->bat_cap.cap_scale.disable_cap_level =
+ di->bat_cap.cap_scale.scaled_cap;
+ dev_dbg(di->dev, "Cap to stop scale at charge %d%%\n",
+ di->bat_cap.cap_scale.disable_cap_level);
+ } else {
+ if (cs->scaled_cap != 100) {
+ cs->cap_to_scale[0] = cs->scaled_cap;
+ cs->cap_to_scale[1] = di->bat_cap.prev_percent;
+ } else {
+ cs->cap_to_scale[0] = 100;
+ cs->cap_to_scale[1] =
+ max(di->bat_cap.prev_percent,
+ di->bm->fg_params->maint_thres);
+ }
+
+ dev_dbg(di->dev, "Cap to scale at discharge %d/%d\n",
+ cs->cap_to_scale[0], cs->cap_to_scale[1]);
+ }
+}
+
+/**
* ab8500_fg_check_capacity_limits() - Check if capacity has changed
* @di: pointer to the ab8500_fg structure
* @init: capacity is allowed to go up in init mode
@@ -1137,6 +1281,7 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
{
bool changed = false;
+ int percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
di->bat_cap.level = ab8500_fg_capacity_level(di);
@@ -1168,33 +1313,41 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
dev_dbg(di->dev, "Battery low, set capacity to 0\n");
di->bat_cap.prev_percent = 0;
di->bat_cap.permille = 0;
+ percent = 0;
di->bat_cap.prev_mah = 0;
di->bat_cap.mah = 0;
changed = true;
} else if (di->flags.fully_charged) {
/*
* We report 100% if algorithm reported fully charged
- * unless capacity drops too much
+ * and show 100% during maintenance charging (scaling).
*/
if (di->flags.force_full) {
- di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_percent = percent;
di->bat_cap.prev_mah = di->bat_cap.mah;
- } else if (!di->flags.force_full &&
- di->bat_cap.prev_percent !=
- (di->bat_cap.permille) / 10 &&
- (di->bat_cap.permille / 10) <
- di->bat->fg_params->maint_thres) {
+
+ changed = true;
+
+ if (!di->bat_cap.cap_scale.enable &&
+ di->bm->capacity_scaling) {
+ di->bat_cap.cap_scale.enable = true;
+ di->bat_cap.cap_scale.cap_to_scale[0] = 100;
+ di->bat_cap.cap_scale.cap_to_scale[1] =
+ di->bat_cap.prev_percent;
+ di->bat_cap.cap_scale.disable_cap_level = 100;
+ }
+ } else if (di->bat_cap.prev_percent != percent) {
dev_dbg(di->dev,
"battery reported full "
"but capacity dropping: %d\n",
- di->bat_cap.permille / 10);
- di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ percent);
+ di->bat_cap.prev_percent = percent;
di->bat_cap.prev_mah = di->bat_cap.mah;
changed = true;
}
- } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
- if (di->bat_cap.permille / 10 == 0) {
+ } else if (di->bat_cap.prev_percent != percent) {
+ if (percent == 0) {
/*
* We will not report 0% unless we've got
* the LOW_BAT IRQ, no matter what the FG
@@ -1204,11 +1357,11 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
di->bat_cap.permille = 1;
di->bat_cap.prev_mah = 1;
di->bat_cap.mah = 1;
+ percent = 1;
changed = true;
} else if (!(!di->flags.charging &&
- (di->bat_cap.permille / 10) >
- di->bat_cap.prev_percent) || init) {
+ percent > di->bat_cap.prev_percent) || init) {
/*
* We do not allow reported capacity to go up
* unless we're charging or if we're in init
@@ -1216,9 +1369,9 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
dev_dbg(di->dev,
"capacity changed from %d to %d (%d)\n",
di->bat_cap.prev_percent,
- di->bat_cap.permille / 10,
+ percent,
di->bat_cap.permille);
- di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_percent = percent;
di->bat_cap.prev_mah = di->bat_cap.mah;
changed = true;
@@ -1226,12 +1379,20 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
dev_dbg(di->dev, "capacity not allowed to go up since "
"no charger is connected: %d to %d (%d)\n",
di->bat_cap.prev_percent,
- di->bat_cap.permille / 10,
+ percent,
di->bat_cap.permille);
}
}
if (changed) {
+ if (di->bm->capacity_scaling) {
+ di->bat_cap.cap_scale.scaled_cap =
+ ab8500_fg_calculate_scaled_capacity(di);
+
+ dev_info(di->dev, "capacity=%d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.cap_scale.scaled_cap);
+ }
power_supply_changed(&di->fg_psy);
if (di->flags.fully_charged && di->flags.force_full) {
dev_dbg(di->dev, "Battery full, notifying.\n");
@@ -1285,7 +1446,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
switch (di->charge_state) {
case AB8500_FG_CHARGE_INIT:
di->fg_samples = SEC_TO_SAMPLE(
- di->bat->fg_params->accu_charging);
+ di->bm->fg_params->accu_charging);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
@@ -1297,7 +1458,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
* Read the FG and calculate the new capacity
*/
mutex_lock(&di->cc_lock);
- if (!di->flags.conv_done) {
+ if (!di->flags.conv_done && !di->flags.force_full) {
/* Wasn't the CC IRQ that got us here */
mutex_unlock(&di->cc_lock);
dev_dbg(di->dev, "%s CC conv not done\n",
@@ -1347,8 +1508,8 @@ static bool check_sysfs_capacity(struct ab8500_fg *di)
cap_permille = ab8500_fg_convert_mah_to_permille(di,
di->bat_cap.user_mah);
- lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
- upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
+ lower = di->bat_cap.permille - di->bm->fg_params->user_cap_limit * 10;
+ upper = di->bat_cap.permille + di->bm->fg_params->user_cap_limit * 10;
if (lower < 0)
lower = 0;
@@ -1388,7 +1549,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
case AB8500_FG_DISCHARGE_INIT:
/* We use the FG IRQ to work on */
di->init_cnt = 0;
- di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_INITMEASURING);
@@ -1401,18 +1562,17 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
* samples to get an initial capacity.
* Then go to READOUT
*/
- sleep_time = di->bat->fg_params->init_timer;
+ sleep_time = di->bm->fg_params->init_timer;
/* Discard the first [x] seconds */
- if (di->init_cnt >
- di->bat->fg_params->init_discard_time) {
+ if (di->init_cnt > di->bm->fg_params->init_discard_time) {
ab8500_fg_calc_cap_discharge_voltage(di, true);
ab8500_fg_check_capacity_limits(di, true);
}
di->init_cnt += sleep_time;
- if (di->init_cnt > di->bat->fg_params->init_total_time)
+ if (di->init_cnt > di->bm->fg_params->init_total_time)
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_READOUT_INIT);
@@ -1427,7 +1587,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
/* Intentional fallthrough */
case AB8500_FG_DISCHARGE_RECOVERY:
- sleep_time = di->bat->fg_params->recovery_sleep_timer;
+ sleep_time = di->bm->fg_params->recovery_sleep_timer;
/*
* We should check the power consumption
@@ -1439,9 +1599,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
if (di->recovery_cnt >
- di->bat->fg_params->recovery_total_time) {
+ di->bm->fg_params->recovery_total_time) {
di->fg_samples = SEC_TO_SAMPLE(
- di->bat->fg_params->accu_high_curr);
+ di->bm->fg_params->accu_high_curr);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_READOUT);
@@ -1454,7 +1614,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
di->recovery_cnt += sleep_time;
} else {
di->fg_samples = SEC_TO_SAMPLE(
- di->bat->fg_params->accu_high_curr);
+ di->bm->fg_params->accu_high_curr);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_READOUT);
@@ -1463,7 +1623,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
case AB8500_FG_DISCHARGE_READOUT_INIT:
di->fg_samples = SEC_TO_SAMPLE(
- di->bat->fg_params->accu_high_curr);
+ di->bm->fg_params->accu_high_curr);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_READOUT);
@@ -1481,7 +1641,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
if (di->recovery_needed) {
ab8500_fg_discharge_state_to(di,
- AB8500_FG_DISCHARGE_RECOVERY);
+ AB8500_FG_DISCHARGE_INIT_RECOVERY);
queue_delayed_work(di->fg_wq,
&di->fg_periodic_work, 0);
@@ -1510,9 +1670,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
}
di->high_curr_cnt +=
- di->bat->fg_params->accu_high_curr;
+ di->bm->fg_params->accu_high_curr;
if (di->high_curr_cnt >
- di->bat->fg_params->high_curr_time)
+ di->bm->fg_params->high_curr_time)
di->recovery_needed = true;
ab8500_fg_calc_cap_discharge_fg(di);
@@ -1524,12 +1684,10 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
case AB8500_FG_DISCHARGE_WAKEUP:
ab8500_fg_coulomb_counter(di, true);
- di->inst_curr = ab8500_fg_inst_curr_blocking(di);
-
ab8500_fg_calc_cap_discharge_voltage(di, true);
di->fg_samples = SEC_TO_SAMPLE(
- di->bat->fg_params->accu_high_curr);
+ di->bm->fg_params->accu_high_curr);
ab8500_fg_coulomb_counter(di, true);
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_READOUT);
@@ -1642,8 +1800,6 @@ static void ab8500_fg_periodic_work(struct work_struct *work)
fg_periodic_work.work);
if (di->init_capacity) {
- /* A dummy read that will return 0 */
- di->inst_curr = ab8500_fg_inst_curr_blocking(di);
/* Get an initial capacity calculation */
ab8500_fg_calc_cap_discharge_voltage(di, true);
ab8500_fg_check_capacity_limits(di, true);
@@ -1685,24 +1841,26 @@ static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
* If we have had a battery over-voltage situation,
* check ovv-bit to see if it should be reset.
*/
- if (di->flags.bat_ovv) {
- ret = abx500_get_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_CH_STAT_REG,
- &reg_value);
- if (ret < 0) {
- dev_err(di->dev, "%s ab8500 read failed\n", __func__);
- return;
- }
- if ((reg_value & BATT_OVV) != BATT_OVV) {
- dev_dbg(di->dev, "Battery recovered from OVV\n");
- di->flags.bat_ovv = false;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STAT_REG,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if ((reg_value & BATT_OVV) == BATT_OVV) {
+ if (!di->flags.bat_ovv) {
+ dev_dbg(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
power_supply_changed(&di->fg_psy);
- return;
}
-
/* Not yet recovered from ovv, reschedule this test */
queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
- round_jiffies(HZ));
+ HZ);
+ } else {
+ dev_dbg(di->dev, "Battery recovered from OVV\n");
+ di->flags.bat_ovv = false;
+ power_supply_changed(&di->fg_psy);
}
}
@@ -1722,26 +1880,30 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
vbat = ab8500_fg_bat_voltage(di);
/* Check if LOW_BAT still fulfilled */
- if (vbat < di->bat->fg_params->lowbat_threshold) {
- di->flags.low_bat = true;
- dev_warn(di->dev, "Battery voltage still LOW\n");
-
- /*
- * We need to re-schedule this check to be able to detect
- * if the voltage increases again during charging
- */
- queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
- round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ if (vbat < di->bm->fg_params->lowbat_threshold) {
+ /* Is it time to shut down? */
+ if (di->low_bat_cnt < 1) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Shut down pending...\n");
+ } else {
+ /*
+ * Else we need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging or
+ * due to decreasing load.
+ */
+ di->low_bat_cnt--;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ }
} else {
- di->flags.low_bat = false;
+ di->flags.low_bat_delay = false;
+ di->low_bat_cnt = 10;
dev_warn(di->dev, "Battery voltage OK again\n");
}
/* This is needed to dispatch LOW_BAT */
ab8500_fg_check_capacity_limits(di, false);
-
- /* Set this flag to check if LOW_BAT IRQ still occurs */
- di->flags.low_bat_delay = false;
}
/**
@@ -1780,8 +1942,8 @@ static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
int ret;
int new_val;
- sel0 = di->bat->fg_params->battok_falling_th_sel0;
- sel1 = di->bat->fg_params->battok_raising_th_sel1;
+ sel0 = di->bm->fg_params->battok_falling_th_sel0;
+ sel1 = di->bm->fg_params->battok_raising_th_sel1;
cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
@@ -1829,7 +1991,13 @@ static void ab8500_fg_instant_work(struct work_struct *work)
static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
{
struct ab8500_fg *di = _di;
- complete(&di->ab8500_fg_complete);
+ if (!di->nbr_cceoc_irq_cnt) {
+ di->nbr_cceoc_irq_cnt++;
+ complete(&di->ab8500_fg_started);
+ } else {
+ di->nbr_cceoc_irq_cnt = 0;
+ complete(&di->ab8500_fg_complete);
+ }
return IRQ_HANDLED;
}
@@ -1876,8 +2044,6 @@ static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
struct ab8500_fg *di = _di;
dev_dbg(di->dev, "Battery OVV\n");
- di->flags.bat_ovv = true;
- power_supply_changed(&di->fg_psy);
/* Schedule a new HW failure check */
queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
@@ -1896,6 +2062,7 @@ static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
{
struct ab8500_fg *di = _di;
+ /* Initiate handling in ab8500_fg_low_bat_work() if not already initiated. */
if (!di->flags.low_bat_delay) {
dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
di->flags.low_bat_delay = true;
@@ -1964,7 +2131,7 @@ static int ab8500_fg_get_property(struct power_supply *psy,
di->bat_cap.max_mah);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
- if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
di->flags.batt_id_received)
val->intval = ab8500_fg_convert_mah_to_uwh(di,
di->bat_cap.max_mah);
@@ -1979,21 +2146,23 @@ static int ab8500_fg_get_property(struct power_supply *psy,
val->intval = di->bat_cap.max_mah;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
- if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
di->flags.batt_id_received)
val->intval = di->bat_cap.max_mah;
else
val->intval = di->bat_cap.prev_mah;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ if (di->bm->capacity_scaling)
+ val->intval = di->bat_cap.cap_scale.scaled_cap;
+ else if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
di->flags.batt_id_received)
val->intval = 100;
else
val->intval = di->bat_cap.prev_percent;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
- if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
di->flags.batt_id_received)
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
else
@@ -2050,6 +2219,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
break;
di->flags.charging = false;
di->flags.fully_charged = false;
+ if (di->bm->capacity_scaling)
+ ab8500_fg_update_cap_scalers(di);
queue_work(di->fg_wq, &di->fg_work);
break;
case POWER_SUPPLY_STATUS_FULL:
@@ -2062,10 +2233,13 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
queue_work(di->fg_wq, &di->fg_work);
break;
case POWER_SUPPLY_STATUS_CHARGING:
- if (di->flags.charging)
+ if (di->flags.charging &&
+ !di->flags.fully_charged)
break;
di->flags.charging = true;
di->flags.fully_charged = false;
+ if (di->bm->capacity_scaling)
+ ab8500_fg_update_cap_scalers(di);
queue_work(di->fg_wq, &di->fg_work);
break;
};
@@ -2076,10 +2250,11 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
case POWER_SUPPLY_PROP_TECHNOLOGY:
switch (ext->type) {
case POWER_SUPPLY_TYPE_BATTERY:
- if (!di->flags.batt_id_received) {
+ if (!di->flags.batt_id_received &&
+ di->bm->batt_id != BATTERY_UNKNOWN) {
const struct abx500_battery_type *b;
- b = &(di->bat->bat_type[di->bat->batt_id]);
+ b = &(di->bm->bat_type[di->bm->batt_id]);
di->flags.batt_id_received = true;
@@ -2105,8 +2280,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
case POWER_SUPPLY_PROP_TEMP:
switch (ext->type) {
case POWER_SUPPLY_TYPE_BATTERY:
- if (di->flags.batt_id_received)
- di->bat_temp = ret.intval;
+ if (di->flags.batt_id_received)
+ di->bat_temp = ret.intval;
break;
default:
break;
@@ -2156,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
AB8500_SYS_CTRL2_BLOCK,
AB8500_LOW_BAT_REG,
ab8500_volt_to_regval(
- di->bat->fg_params->lowbat_threshold) << 1 |
+ di->bm->fg_params->lowbat_threshold) << 1 |
LOW_BAT_ENABLE);
if (ret) {
dev_err(di->dev, "%s write failed\n", __func__);
@@ -2396,6 +2571,11 @@ static int ab8500_fg_suspend(struct platform_device *pdev,
struct ab8500_fg *di = platform_get_drvdata(pdev);
flush_delayed_work(&di->fg_periodic_work);
+ flush_work(&di->fg_work);
+ flush_work(&di->fg_acc_cur_work);
+ flush_delayed_work(&di->fg_reinit_work);
+ flush_delayed_work(&di->fg_low_bat_work);
+ flush_delayed_work(&di->fg_check_hw_failure_work);
/*
* If the FG is enabled we will disable it before going to suspend
@@ -2429,7 +2609,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->fg_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return ret;
}
@@ -2442,21 +2621,38 @@ static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
{"CCEOC", ab8500_fg_cc_data_end_handler},
};
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_usb",
+};
+
static int ab8500_fg_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_bm_data *plat = pdev->dev.platform_data;
+ struct ab8500_fg *di;
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
- struct ab8500_fg *di;
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no battery management data supplied\n");
return -EINVAL;
}
+ di->bm = plat;
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
- return -ENOMEM;
+ if (np) {
+ ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get battery information\n");
+ return ret;
+ }
+ }
mutex_init(&di->cc_lock);
@@ -2465,37 +2661,21 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- /* get fg specific platform data */
- di->pdata = plat_data->fg;
- if (!di->pdata) {
- dev_err(di->dev, "no fg platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
di->fg_psy.name = "ab8500_fg";
di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->fg_psy.properties = ab8500_fg_props;
di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
di->fg_psy.get_property = ab8500_fg_get_property;
- di->fg_psy.supplied_to = di->pdata->supplied_to;
- di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.supplied_to = supply_interface;
+ di->fg_psy.num_supplicants = ARRAY_SIZE(supply_interface),
di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
di->bat_cap.max_mah_design = MILLI_TO_MICRO *
- di->bat->bat_type[di->bat->batt_id].charge_full_design;
+ di->bm->bat_type[di->bm->batt_id].charge_full_design;
di->bat_cap.max_mah = di->bat_cap.max_mah_design;
- di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+ di->vbat_nom = di->bm->bat_type[di->bm->batt_id].nominal_voltage;
di->init_capacity = true;
@@ -2506,8 +2686,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
if (di->fg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for running the fg algorithm instantly */
@@ -2532,6 +2711,12 @@ static int ab8500_fg_probe(struct platform_device *pdev)
INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
ab8500_fg_check_hw_failure_work);
+ /* Reset battery low voltage flag */
+ di->flags.low_bat = false;
+
+ /* Initialize low battery counter */
+ di->low_bat_cnt = 10;
+
/* Initialize OVV, and other registers */
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
@@ -2550,10 +2735,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
goto free_inst_curr_wq;
}
- di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
ab8500_fg_coulomb_counter(di, true);
- /* Initialize completion used to notify completion of inst current */
+ /*
+ * Initialize completion used to notify completion and start
+ * of inst current
+ */
+ init_completion(&di->ab8500_fg_started);
init_completion(&di->ab8500_fg_complete);
/* Register interrupts */
@@ -2573,6 +2762,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
}
di->irq = platform_get_irq_byname(pdev, "CCEOC");
disable_irq(di->irq);
+ di->nbr_cceoc_irq_cnt = 0;
platform_set_drvdata(pdev, di);
@@ -2606,12 +2796,14 @@ free_irq:
}
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_fg_match[] = {
+ { .compatible = "stericsson,ab8500-fg", },
+ { },
+};
+
static struct platform_driver ab8500_fg_driver = {
.probe = ab8500_fg_probe,
.remove = ab8500_fg_remove,
@@ -2620,6 +2812,7 @@ static struct platform_driver ab8500_fg_driver = {
.driver = {
.name = "ab8500-fg",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_fg_match,
},
};
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 19f254190790..f043c0851a76 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -21,6 +21,8 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/mfd/abx500/ab8500-bm.h>
@@ -31,9 +33,6 @@
/* End-of-charge criteria counter */
#define EOC_COND_CNT 10
-/* Recharge criteria counter */
-#define RCH_COND_CNT 3
-
#define to_abx500_chargalg_device_info(x) container_of((x), \
struct abx500_chargalg, chargalg_psy);
@@ -194,7 +193,6 @@ enum maxim_ret {
* @dev: pointer to the structure device
* @charge_status: battery operating status
* @eoc_cnt: counter used to determine end-of_charge
- * @rch_cnt: counter used to determine start of recharge
* @maintenance_chg: indicate if maintenance charge is active
* @t_hyst_norm temperature hysteresis when the temperature has been
* over or under normal limits
@@ -205,8 +203,7 @@ enum maxim_ret {
* @chg_info: information about connected charger types
* @batt_data: data of the battery
* @susp_status: current charger suspension status
- * @pdata: pointer to the abx500_chargalg platform data
- * @bat: pointer to the abx500_bm platform data
+ * @bm: Platform specific battery management information
* @chargalg_psy: structure that holds the battery properties exposed by
* the charging algorithm
* @events: structure for information about events triggered
@@ -222,7 +219,6 @@ struct abx500_chargalg {
struct device *dev;
int charge_status;
int eoc_cnt;
- int rch_cnt;
bool maintenance_chg;
int t_hyst_norm;
int t_hyst_lowhigh;
@@ -231,8 +227,7 @@ struct abx500_chargalg {
struct abx500_chargalg_charger_info chg_info;
struct abx500_chargalg_battery_data batt_data;
struct abx500_chargalg_suspension_status susp_status;
- struct abx500_chargalg_platform_data *pdata;
- struct abx500_bm_data *bat;
+ struct abx500_bm_data *bm;
struct power_supply chargalg_psy;
struct ux500_charger *ac_chg;
struct ux500_charger *usb_chg;
@@ -367,13 +362,13 @@ static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
case AC_CHG:
timer_expiration =
round_jiffies(jiffies +
- (di->bat->main_safety_tmr_h * 3600 * HZ));
+ (di->bm->main_safety_tmr_h * 3600 * HZ));
break;
case USB_CHG:
timer_expiration =
round_jiffies(jiffies +
- (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ (di->bm->usb_safety_tmr_h * 3600 * HZ));
break;
default:
@@ -450,8 +445,18 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
{
/* Check if charger exists and kick watchdog if charging */
if (di->ac_chg && di->ac_chg->ops.kick_wd &&
- di->chg_info.online_chg & AC_CHG)
+ di->chg_info.online_chg & AC_CHG) {
+ /*
+ * If AB charger watchdog expired, pm2xxx charging
+ * gets disabled. To be safe, kick both AB charger watchdog
+ * and pm2xxx watchdog.
+ */
+ if (di->ac_chg->external &&
+ di->usb_chg && di->usb_chg->ops.kick_wd)
+ di->usb_chg->ops.kick_wd(di->usb_chg);
+
return di->ac_chg->ops.kick_wd(di->ac_chg);
+ }
else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
di->chg_info.online_chg & USB_CHG)
return di->usb_chg->ops.kick_wd(di->usb_chg);
@@ -608,6 +613,8 @@ static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
int vset, int iset)
{
+ bool start_chargalg_wd = true;
+
switch (di->chg_info.charger_type) {
case AC_CHG:
dev_dbg(di->dev,
@@ -625,8 +632,12 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
default:
dev_err(di->dev, "Unknown charger to charge from\n");
+ start_chargalg_wd = false;
break;
}
+
+ if (start_chargalg_wd && !delayed_work_pending(&di->chargalg_wd_work))
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
}
/**
@@ -638,32 +649,32 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
*/
static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
{
- if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
- di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
/* Temp OK! */
di->events.btemp_underover = false;
di->events.btemp_lowhigh = false;
di->t_hyst_norm = 0;
di->t_hyst_lowhigh = 0;
} else {
- if (((di->batt_data.temp >= di->bat->temp_high) &&
+ if (((di->batt_data.temp >= di->bm->temp_high) &&
(di->batt_data.temp <
- (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ (di->bm->temp_over - di->t_hyst_lowhigh))) ||
((di->batt_data.temp >
- (di->bat->temp_under + di->t_hyst_lowhigh)) &&
- (di->batt_data.temp <= di->bat->temp_low))) {
+ (di->bm->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bm->temp_low))) {
/* TEMP minor!!!!! */
di->events.btemp_underover = false;
di->events.btemp_lowhigh = true;
- di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_norm = di->bm->temp_hysteresis;
di->t_hyst_lowhigh = 0;
- } else if (di->batt_data.temp <= di->bat->temp_under ||
- di->batt_data.temp >= di->bat->temp_over) {
+ } else if (di->batt_data.temp <= di->bm->temp_under ||
+ di->batt_data.temp >= di->bm->temp_over) {
/* TEMP major!!!!! */
di->events.btemp_underover = true;
di->events.btemp_lowhigh = false;
di->t_hyst_norm = 0;
- di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = di->bm->temp_hysteresis;
} else {
/* Within hysteresis */
dev_dbg(di->dev, "Within hysteresis limit temp: %d "
@@ -682,12 +693,12 @@ static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
*/
static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
{
- if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
di->chg_info.usb_chg_ok = false;
else
di->chg_info.usb_chg_ok = true;
- if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
di->chg_info.ac_chg_ok = false;
else
di->chg_info.ac_chg_ok = true;
@@ -707,10 +718,10 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
di->charge_state == STATE_NORMAL &&
!di->maintenance_chg && (di->batt_data.volt >=
- di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->bm->bat_type[di->bm->batt_id].termination_vol ||
di->events.usb_cv_active || di->events.ac_cv_active) &&
di->batt_data.avg_curr <
- di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->bm->bat_type[di->bm->batt_id].termination_curr &&
di->batt_data.avg_curr > 0) {
if (++di->eoc_cnt >= EOC_COND_CNT) {
di->eoc_cnt = 0;
@@ -733,12 +744,12 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
static void init_maxim_chg_curr(struct abx500_chargalg *di)
{
di->ccm.original_iset =
- di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
di->ccm.current_iset =
- di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
- di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
- di->ccm.max_current = di->bat->maxi->chg_curr;
- di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
+ di->ccm.max_current = di->bm->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
di->ccm.level = 0;
}
@@ -755,7 +766,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
{
int delta_i;
- if (!di->bat->maxi->ena_maxi)
+ if (!di->bm->maxi->ena_maxi)
return MAXIM_RET_NOACTION;
delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
@@ -766,7 +777,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
if (di->ccm.wait_cnt == 0) {
dev_dbg(di->dev, "lowering current\n");
di->ccm.wait_cnt++;
- di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
di->ccm.max_current =
di->ccm.current_iset - di->ccm.test_delta_i;
di->ccm.current_iset = di->ccm.max_current;
@@ -791,7 +802,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
if (di->ccm.current_iset == di->ccm.original_iset)
return MAXIM_RET_NOACTION;
- di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
di->ccm.current_iset = di->ccm.original_iset;
di->ccm.level = 0;
@@ -803,7 +814,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
di->ccm.max_current) {
if (di->ccm.condition_cnt-- == 0) {
/* Increse the iset with cco.test_delta_i */
- di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
di->ccm.current_iset += di->ccm.test_delta_i;
di->ccm.level++;
dev_dbg(di->dev, " Maximization needed, increase"
@@ -818,7 +829,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
return MAXIM_RET_NOACTION;
}
} else {
- di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
return MAXIM_RET_NOACTION;
}
}
@@ -838,7 +849,7 @@ static void handle_maxim_chg_curr(struct abx500_chargalg *di)
break;
case MAXIM_RET_IBAT_TOO_HIGH:
result = abx500_chargalg_update_chg_curr(di,
- di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
if (result)
dev_err(di->dev, "failed to set chg curr\n");
break;
@@ -858,6 +869,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
union power_supply_propval ret;
int i, j;
bool psy_found = false;
+ bool capacity_updated = false;
psy = (struct power_supply *)data;
ext = dev_get_drvdata(dev);
@@ -870,6 +882,16 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
if (!psy_found)
return 0;
+ /*
+ * If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
+ * property because of handling that sysfs entry on its own, this is
+ * the place to get the battery capacity.
+ */
+ if (!ext->get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
+ di->batt_data.percent = ret.intval;
+ capacity_updated = true;
+ }
+
/* Go through all properties for the psy */
for (j = 0; j < ext->num_properties; j++) {
enum power_supply_property prop;
@@ -1154,7 +1176,8 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
}
break;
case POWER_SUPPLY_PROP_CAPACITY:
- di->batt_data.percent = ret.intval;
+ if (!capacity_updated)
+ di->batt_data.percent = ret.intval;
break;
default:
break;
@@ -1210,7 +1233,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
* this way
*/
if (!charger_status ||
- (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
if (di->charge_state != STATE_HANDHELD) {
di->events.safety_timer_expired = false;
abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
@@ -1394,8 +1417,8 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_NORMAL_INIT:
abx500_chargalg_start_charging(di,
- di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
- di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
abx500_chargalg_state_to(di, STATE_NORMAL);
abx500_chargalg_start_safety_timer(di);
abx500_chargalg_stop_maintenance_timer(di);
@@ -1411,7 +1434,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
handle_maxim_chg_curr(di);
if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
di->maintenance_chg) {
- if (di->bat->no_maintenance)
+ if (di->bm->no_maintenance)
abx500_chargalg_state_to(di,
STATE_WAIT_FOR_RECHARGE_INIT);
else
@@ -1424,28 +1447,25 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_WAIT_FOR_RECHARGE_INIT:
abx500_chargalg_hold_charging(di);
abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
- di->rch_cnt = RCH_COND_CNT;
/* Intentional fallthrough */
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.volt <=
- di->bat->bat_type[di->bat->batt_id].recharge_vol) {
- if (di->rch_cnt-- == 0)
- abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
- } else
- di->rch_cnt = RCH_COND_CNT;
+ if (di->batt_data.percent <=
+ di->bm->bat_type[di->bm->batt_id].
+ recharge_cap)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
case STATE_MAINTENANCE_A_INIT:
abx500_chargalg_stop_safety_timer(di);
abx500_chargalg_start_maintenance_timer(di,
- di->bat->bat_type[
- di->bat->batt_id].maint_a_chg_timer_h);
+ di->bm->bat_type[
+ di->bm->batt_id].maint_a_chg_timer_h);
abx500_chargalg_start_charging(di,
- di->bat->bat_type[
- di->bat->batt_id].maint_a_vol_lvl,
- di->bat->bat_type[
- di->bat->batt_id].maint_a_cur_lvl);
+ di->bm->bat_type[
+ di->bm->batt_id].maint_a_vol_lvl,
+ di->bm->bat_type[
+ di->bm->batt_id].maint_a_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
power_supply_changed(&di->chargalg_psy);
/* Intentional fallthrough*/
@@ -1459,13 +1479,13 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_MAINTENANCE_B_INIT:
abx500_chargalg_start_maintenance_timer(di,
- di->bat->bat_type[
- di->bat->batt_id].maint_b_chg_timer_h);
+ di->bm->bat_type[
+ di->bm->batt_id].maint_b_chg_timer_h);
abx500_chargalg_start_charging(di,
- di->bat->bat_type[
- di->bat->batt_id].maint_b_vol_lvl,
- di->bat->bat_type[
- di->bat->batt_id].maint_b_cur_lvl);
+ di->bm->bat_type[
+ di->bm->batt_id].maint_b_vol_lvl,
+ di->bm->bat_type[
+ di->bm->batt_id].maint_b_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
power_supply_changed(&di->chargalg_psy);
/* Intentional fallthrough*/
@@ -1479,10 +1499,10 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
case STATE_TEMP_LOWHIGH_INIT:
abx500_chargalg_start_charging(di,
- di->bat->bat_type[
- di->bat->batt_id].low_high_vol_lvl,
- di->bat->bat_type[
- di->bat->batt_id].low_high_cur_lvl);
+ di->bm->bat_type[
+ di->bm->batt_id].low_high_vol_lvl,
+ di->bm->bat_type[
+ di->bm->batt_id].low_high_cur_lvl);
abx500_chargalg_stop_maintenance_timer(di);
di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
@@ -1543,11 +1563,11 @@ static void abx500_chargalg_periodic_work(struct work_struct *work)
if (di->chg_info.conn_chg)
queue_delayed_work(di->chargalg_wq,
&di->chargalg_periodic_work,
- di->bat->interval_charging * HZ);
+ di->bm->interval_charging * HZ);
else
queue_delayed_work(di->chargalg_wq,
&di->chargalg_periodic_work,
- di->bat->interval_not_charging * HZ);
+ di->bm->interval_not_charging * HZ);
}
/**
@@ -1614,10 +1634,13 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
if (di->events.batt_ovv) {
val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else if (di->events.btemp_underover) {
- if (di->batt_data.temp <= di->bat->temp_under)
+ if (di->batt_data.temp <= di->bm->temp_under)
val->intval = POWER_SUPPLY_HEALTH_COLD;
else
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
+ di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
} else {
val->intval = POWER_SUPPLY_HEALTH_GOOD;
}
@@ -1631,6 +1654,25 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
/* Exposure to the sysfs interface */
/**
+ * abx500_chargalg_sysfs_show() - sysfs show operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter to send to userspace
+ *
+ * Returns a buffer to be displayed in user space
+ */
+static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct abx500_chargalg *di = container_of(kobj,
+ struct abx500_chargalg, chargalg_kobject);
+
+ return sprintf(buf, "%d\n",
+ di->susp_status.ac_suspended &&
+ di->susp_status.usb_suspended);
+}
+
+/**
* abx500_chargalg_sysfs_charger() - sysfs store operations
* @kobj: pointer to the struct kobject
* @attr: pointer to the struct attribute
@@ -1698,7 +1740,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
static struct attribute abx500_chargalg_en_charger = \
{
.name = "chargalg",
- .mode = S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
};
static struct attribute *abx500_chargalg_chg[] = {
@@ -1707,6 +1749,7 @@ static struct attribute *abx500_chargalg_chg[] = {
};
static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+ .show = abx500_chargalg_sysfs_show,
.store = abx500_chargalg_sysfs_charger,
};
@@ -1795,36 +1838,52 @@ static int abx500_chargalg_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->chargalg_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_fg",
+};
+
static int abx500_chargalg_probe(struct platform_device *pdev)
{
- struct abx500_bm_plat_data *plat_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_bm_data *plat = pdev->dev.platform_data;
+ struct abx500_chargalg *di;
int ret = 0;
- struct abx500_chargalg *di =
- kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
- if (!di)
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__);
return -ENOMEM;
+ }
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no battery management data supplied\n");
+ return -EINVAL;
+ }
+ di->bm = plat;
+
+ if (np) {
+ ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get battery information\n");
+ return ret;
+ }
+ }
/* get device struct */
di->dev = &pdev->dev;
- plat_data = pdev->dev.platform_data;
- di->pdata = plat_data->chargalg;
- di->bat = plat_data->battery;
-
/* chargalg supply */
di->chargalg_psy.name = "abx500_chargalg";
di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->chargalg_psy.properties = abx500_chargalg_props;
di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
di->chargalg_psy.get_property = abx500_chargalg_get_property;
- di->chargalg_psy.supplied_to = di->pdata->supplied_to;
- di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.supplied_to = supply_interface;
+ di->chargalg_psy.num_supplicants = ARRAY_SIZE(supply_interface),
di->chargalg_psy.external_power_changed =
abx500_chargalg_external_power_changed;
@@ -1844,7 +1903,7 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
create_singlethread_workqueue("abx500_chargalg_wq");
if (di->chargalg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for chargalg */
@@ -1885,20 +1944,23 @@ free_psy:
power_supply_unregister(&di->chargalg_psy);
free_chargalg_wq:
destroy_workqueue(di->chargalg_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_chargalg_match[] = {
+ { .compatible = "stericsson,ab8500-chargalg", },
+ { },
+};
+
static struct platform_driver abx500_chargalg_driver = {
.probe = abx500_chargalg_probe,
.remove = abx500_chargalg_remove,
.suspend = abx500_chargalg_suspend,
.resume = abx500_chargalg_resume,
.driver = {
- .name = "abx500-chargalg",
+ .name = "ab8500-chargalg",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_chargalg_match,
},
};
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index a17d08411723..6b2238bb6a81 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -27,8 +27,6 @@
#include <linux/pm_runtime.h>
#include <linux/power/smartreflex.h>
-#include <plat/cpu.h>
-
#define SMARTREFLEX_NAME_LEN 16
#define NVALUE_NAME_LEN 40
#define SR_DISABLE_TIMEOUT 200
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
new file mode 100644
index 000000000000..ca91396fc48e
--- /dev/null
+++ b/drivers/power/bq2415x_charger.c
@@ -0,0 +1,1666 @@
+/*
+ * bq2415x charger driver
+ *
+ * Copyright (C) 2011-2012 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Datasheets:
+ * http://www.ti.com/product/bq24150
+ * http://www.ti.com/product/bq24150a
+ * http://www.ti.com/product/bq24152
+ * http://www.ti.com/product/bq24153
+ * http://www.ti.com/product/bq24153a
+ * http://www.ti.com/product/bq24155
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <linux/power/bq2415x_charger.h>
+
+/* timeout for resetting chip timer */
+#define BQ2415X_TIMER_TIMEOUT 10
+
+#define BQ2415X_REG_STATUS 0x00
+#define BQ2415X_REG_CONTROL 0x01
+#define BQ2415X_REG_VOLTAGE 0x02
+#define BQ2415X_REG_VENDER 0x03
+#define BQ2415X_REG_CURRENT 0x04
+
+/* reset state for all registers */
+#define BQ2415X_RESET_STATUS BIT(6)
+#define BQ2415X_RESET_CONTROL (BIT(4)|BIT(5))
+#define BQ2415X_RESET_VOLTAGE (BIT(1)|BIT(3))
+#define BQ2415X_RESET_CURRENT (BIT(0)|BIT(3)|BIT(7))
+
+/* status register */
+#define BQ2415X_BIT_TMR_RST 7
+#define BQ2415X_BIT_OTG 7
+#define BQ2415X_BIT_EN_STAT 6
+#define BQ2415X_MASK_STAT (BIT(4)|BIT(5))
+#define BQ2415X_SHIFT_STAT 4
+#define BQ2415X_BIT_BOOST 3
+#define BQ2415X_MASK_FAULT (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_FAULT 0
+
+/* control register */
+#define BQ2415X_MASK_LIMIT (BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_LIMIT 6
+#define BQ2415X_MASK_VLOWV (BIT(4)|BIT(5))
+#define BQ2415X_SHIFT_VLOWV 4
+#define BQ2415X_BIT_TE 3
+#define BQ2415X_BIT_CE 2
+#define BQ2415X_BIT_HZ_MODE 1
+#define BQ2415X_BIT_OPA_MODE 0
+
+/* voltage register */
+#define BQ2415X_MASK_VO (BIT(2)|BIT(3)|BIT(4)|BIT(5)|BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_VO 2
+#define BQ2415X_BIT_OTG_PL 1
+#define BQ2415X_BIT_OTG_EN 0
+
+/* vender register */
+#define BQ2415X_MASK_VENDER (BIT(5)|BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_VENDER 5
+#define BQ2415X_MASK_PN (BIT(3)|BIT(4))
+#define BQ2415X_SHIFT_PN 3
+#define BQ2415X_MASK_REVISION (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_REVISION 0
+
+/* current register */
+#define BQ2415X_MASK_RESET BIT(7)
+#define BQ2415X_MASK_VI_CHRG (BIT(4)|BIT(5)|BIT(6))
+#define BQ2415X_SHIFT_VI_CHRG 4
+/* N/A BIT(3) */
+#define BQ2415X_MASK_VI_TERM (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_VI_TERM 0
+
+
+enum bq2415x_command {
+ BQ2415X_TIMER_RESET,
+ BQ2415X_OTG_STATUS,
+ BQ2415X_STAT_PIN_STATUS,
+ BQ2415X_STAT_PIN_ENABLE,
+ BQ2415X_STAT_PIN_DISABLE,
+ BQ2415X_CHARGE_STATUS,
+ BQ2415X_BOOST_STATUS,
+ BQ2415X_FAULT_STATUS,
+
+ BQ2415X_CHARGE_TERMINATION_STATUS,
+ BQ2415X_CHARGE_TERMINATION_ENABLE,
+ BQ2415X_CHARGE_TERMINATION_DISABLE,
+ BQ2415X_CHARGER_STATUS,
+ BQ2415X_CHARGER_ENABLE,
+ BQ2415X_CHARGER_DISABLE,
+ BQ2415X_HIGH_IMPEDANCE_STATUS,
+ BQ2415X_HIGH_IMPEDANCE_ENABLE,
+ BQ2415X_HIGH_IMPEDANCE_DISABLE,
+ BQ2415X_BOOST_MODE_STATUS,
+ BQ2415X_BOOST_MODE_ENABLE,
+ BQ2415X_BOOST_MODE_DISABLE,
+
+ BQ2415X_OTG_LEVEL,
+ BQ2415X_OTG_ACTIVATE_HIGH,
+ BQ2415X_OTG_ACTIVATE_LOW,
+ BQ2415X_OTG_PIN_STATUS,
+ BQ2415X_OTG_PIN_ENABLE,
+ BQ2415X_OTG_PIN_DISABLE,
+
+ BQ2415X_VENDER_CODE,
+ BQ2415X_PART_NUMBER,
+ BQ2415X_REVISION,
+};
+
+enum bq2415x_chip {
+ BQUNKNOWN,
+ BQ24150,
+ BQ24150A,
+ BQ24151,
+ BQ24151A,
+ BQ24152,
+ BQ24153,
+ BQ24153A,
+ BQ24155,
+ BQ24156,
+ BQ24156A,
+ BQ24158,
+};
+
+static char *bq2415x_chip_name[] = {
+ "unknown",
+ "bq24150",
+ "bq24150a",
+ "bq24151",
+ "bq24151a",
+ "bq24152",
+ "bq24153",
+ "bq24153a",
+ "bq24155",
+ "bq24156",
+ "bq24156a",
+ "bq24158",
+};
+
+struct bq2415x_device {
+ struct device *dev;
+ struct bq2415x_platform_data init_data;
+ struct power_supply charger;
+ struct delayed_work work;
+ enum bq2415x_mode reported_mode;/* mode reported by hook function */
+ enum bq2415x_mode mode; /* current configured mode */
+ enum bq2415x_chip chip;
+ const char *timer_error;
+ char *model;
+ char *name;
+ int autotimer; /* 1 - if driver automatically reset timer, 0 - not */
+ int automode; /* 1 - enabled, 0 - disabled; -1 - not supported */
+ int id;
+};
+
+/* each registered chip must have unique id */
+static DEFINE_IDR(bq2415x_id);
+
+static DEFINE_MUTEX(bq2415x_id_mutex);
+static DEFINE_MUTEX(bq2415x_timer_mutex);
+static DEFINE_MUTEX(bq2415x_i2c_mutex);
+
+/**** i2c read functions ****/
+
+/* read value from register */
+static int bq2415x_i2c_read(struct bq2415x_device *bq, u8 reg)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ struct i2c_msg msg[2];
+ u8 val;
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = &reg;
+ msg[0].len = sizeof(reg);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = &val;
+ msg[1].len = sizeof(val);
+
+ mutex_lock(&bq2415x_i2c_mutex);
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ mutex_unlock(&bq2415x_i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+/* read value from register, apply mask and right shift it */
+static int bq2415x_i2c_read_mask(struct bq2415x_device *bq, u8 reg,
+ u8 mask, u8 shift)
+{
+ int ret;
+
+ if (shift > 8)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_read(bq, reg);
+ if (ret < 0)
+ return ret;
+ return (ret & mask) >> shift;
+}
+
+/* read value from register and return one specified bit */
+static int bq2415x_i2c_read_bit(struct bq2415x_device *bq, u8 reg, u8 bit)
+{
+ if (bit > 8)
+ return -EINVAL;
+ return bq2415x_i2c_read_mask(bq, reg, BIT(bit), bit);
+}
+
+/**** i2c write functions ****/
+
+/* write value to register */
+static int bq2415x_i2c_write(struct bq2415x_device *bq, u8 reg, u8 val)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ struct i2c_msg msg[1];
+ u8 data[2];
+ int ret;
+
+ data[0] = reg;
+ data[1] = val;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = data;
+ msg[0].len = ARRAY_SIZE(data);
+
+ mutex_lock(&bq2415x_i2c_mutex);
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ mutex_unlock(&bq2415x_i2c_mutex);
+
+ /* i2c_transfer returns number of messages transferred */
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+/* read value from register, change it with mask left shifted and write back */
+static int bq2415x_i2c_write_mask(struct bq2415x_device *bq, u8 reg, u8 val,
+ u8 mask, u8 shift)
+{
+ int ret;
+
+ if (shift > 8)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_read(bq, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~mask;
+ ret |= val << shift;
+
+ return bq2415x_i2c_write(bq, reg, ret);
+}
+
+/* change only one bit in register */
+static int bq2415x_i2c_write_bit(struct bq2415x_device *bq, u8 reg,
+ bool val, u8 bit)
+{
+ if (bit > 8)
+ return -EINVAL;
+ return bq2415x_i2c_write_mask(bq, reg, val, BIT(bit), bit);
+}
+
+/**** global functions ****/
+
+/* exec command function */
+static int bq2415x_exec_command(struct bq2415x_device *bq,
+ enum bq2415x_command command)
+{
+ int ret;
+
+ switch (command) {
+ case BQ2415X_TIMER_RESET:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS,
+ 1, BQ2415X_BIT_TMR_RST);
+ case BQ2415X_OTG_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_OTG);
+ case BQ2415X_STAT_PIN_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_STAT_PIN_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS, 1,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_STAT_PIN_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS, 0,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_CHARGE_STATUS:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_STATUS,
+ BQ2415X_MASK_STAT, BQ2415X_SHIFT_STAT);
+ case BQ2415X_BOOST_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_BOOST);
+ case BQ2415X_FAULT_STATUS:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_STATUS,
+ BQ2415X_MASK_FAULT, BQ2415X_SHIFT_FAULT);
+
+ case BQ2415X_CHARGE_TERMINATION_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_TE);
+ case BQ2415X_CHARGE_TERMINATION_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_TE);
+ case BQ2415X_CHARGE_TERMINATION_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_TE);
+ case BQ2415X_CHARGER_STATUS:
+ ret = bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_CE);
+ if (ret < 0)
+ return ret;
+ else
+ return ret > 0 ? 0 : 1;
+ case BQ2415X_CHARGER_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_CE);
+ case BQ2415X_CHARGER_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_CE);
+ case BQ2415X_HIGH_IMPEDANCE_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_HIGH_IMPEDANCE_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_HIGH_IMPEDANCE_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_BOOST_MODE_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_OPA_MODE);
+ case BQ2415X_BOOST_MODE_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_OPA_MODE);
+ case BQ2415X_BOOST_MODE_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_OPA_MODE);
+
+ case BQ2415X_OTG_LEVEL:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_ACTIVATE_HIGH:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 1, BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_ACTIVATE_LOW:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 0, BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_PIN_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_BIT_OTG_EN);
+ case BQ2415X_OTG_PIN_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 1, BQ2415X_BIT_OTG_EN);
+ case BQ2415X_OTG_PIN_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 0, BQ2415X_BIT_OTG_EN);
+
+ case BQ2415X_VENDER_CODE:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_VENDER, BQ2415X_SHIFT_VENDER);
+ case BQ2415X_PART_NUMBER:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_PN, BQ2415X_SHIFT_PN);
+ case BQ2415X_REVISION:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_REVISION, BQ2415X_SHIFT_REVISION);
+ }
+ return -EINVAL;
+}
+
+/* detect chip type */
+static enum bq2415x_chip bq2415x_detect_chip(struct bq2415x_device *bq)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ int ret = bq2415x_exec_command(bq, BQ2415X_PART_NUMBER);
+
+ if (ret < 0)
+ return ret;
+
+ switch (client->addr) {
+ case 0x6b:
+ switch (ret) {
+ case 0:
+ if (bq->chip == BQ24151A)
+ return bq->chip;
+ else
+ return BQ24151;
+ case 1:
+ if (bq->chip == BQ24150A ||
+ bq->chip == BQ24152 ||
+ bq->chip == BQ24155)
+ return bq->chip;
+ else
+ return BQ24150;
+ case 2:
+ if (bq->chip == BQ24153A)
+ return bq->chip;
+ else
+ return BQ24153;
+ default:
+ return BQUNKNOWN;
+ }
+ break;
+
+ case 0x6a:
+ switch (ret) {
+ case 0:
+ if (bq->chip == BQ24156A)
+ return bq->chip;
+ else
+ return BQ24156;
+ case 2:
+ return BQ24158;
+ default:
+ return BQUNKNOWN;
+ }
+ break;
+ }
+
+ return BQUNKNOWN;
+}
+
+/* detect chip revision */
+static int bq2415x_detect_revision(struct bq2415x_device *bq)
+{
+ int ret = bq2415x_exec_command(bq, BQ2415X_REVISION);
+ int chip = bq2415x_detect_chip(bq);
+
+ if (ret < 0 || chip < 0)
+ return -1;
+
+ switch (chip) {
+ case BQ24150:
+ case BQ24150A:
+ case BQ24151:
+ case BQ24151A:
+ case BQ24152:
+ if (ret >= 0 && ret <= 3)
+ return ret;
+ else
+ return -1;
+ case BQ24153:
+ case BQ24153A:
+ case BQ24156:
+ case BQ24156A:
+ case BQ24158:
+ if (ret == 3)
+ return 0;
+ else if (ret == 1)
+ return 1;
+ else
+ return -1;
+ case BQ24155:
+ if (ret == 3)
+ return 3;
+ else
+ return -1;
+ case BQUNKNOWN:
+ return -1;
+ }
+
+ return -1;
+}
+
+/* return chip vender code */
+static int bq2415x_get_vender_code(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_exec_command(bq, BQ2415X_VENDER_CODE);
+ if (ret < 0)
+ return 0;
+
+ /* convert to binary */
+ return (ret & 0x1) +
+ ((ret >> 1) & 0x1) * 10 +
+ ((ret >> 2) & 0x1) * 100;
+}
+
+/* reset all chip registers to default state */
+static void bq2415x_reset_chip(struct bq2415x_device *bq)
+{
+ bq2415x_i2c_write(bq, BQ2415X_REG_CURRENT, BQ2415X_RESET_CURRENT);
+ bq2415x_i2c_write(bq, BQ2415X_REG_VOLTAGE, BQ2415X_RESET_VOLTAGE);
+ bq2415x_i2c_write(bq, BQ2415X_REG_CONTROL, BQ2415X_RESET_CONTROL);
+ bq2415x_i2c_write(bq, BQ2415X_REG_STATUS, BQ2415X_RESET_STATUS);
+ bq->timer_error = NULL;
+}
+
+/**** properties functions ****/
+
+/* set current limit in mA */
+static int bq2415x_set_current_limit(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (mA <= 100)
+ val = 0;
+ else if (mA <= 500)
+ val = 1;
+ else if (mA <= 800)
+ val = 2;
+ else
+ val = 3;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CONTROL, val,
+ BQ2415X_MASK_LIMIT, BQ2415X_SHIFT_LIMIT);
+}
+
+/* get current limit in mA */
+static int bq2415x_get_current_limit(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_MASK_LIMIT, BQ2415X_SHIFT_LIMIT);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return 100;
+ else if (ret == 1)
+ return 500;
+ else if (ret == 2)
+ return 800;
+ else if (ret == 3)
+ return 1800;
+ return -EINVAL;
+}
+
+/* set weak battery voltage in mV */
+static int bq2415x_set_weak_battery_voltage(struct bq2415x_device *bq, int mV)
+{
+ int val;
+
+ /* round to 100mV */
+ if (mV <= 3400 + 50)
+ val = 0;
+ else if (mV <= 3500 + 50)
+ val = 1;
+ else if (mV <= 3600 + 50)
+ val = 2;
+ else
+ val = 3;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CONTROL, val,
+ BQ2415X_MASK_VLOWV, BQ2415X_SHIFT_VLOWV);
+}
+
+/* get weak battery voltage in mV */
+static int bq2415x_get_weak_battery_voltage(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_MASK_VLOWV, BQ2415X_SHIFT_VLOWV);
+ if (ret < 0)
+ return ret;
+ return 100 * (34 + ret);
+}
+
+/* set battery regulation voltage in mV */
+static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq,
+ int mV)
+{
+ int val = (mV/10 - 350) / 2;
+
+ if (val < 0)
+ val = 0;
+ else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */
+ return -EINVAL;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val,
+ BQ2415X_MASK_VO, BQ2415X_SHIFT_VO);
+}
+
+/* get battery regulation voltage in mV */
+static int bq2415x_get_battery_regulation_voltage(struct bq2415x_device *bq)
+{
+ int ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_MASK_VO, BQ2415X_SHIFT_VO);
+
+ if (ret < 0)
+ return ret;
+ return 10 * (350 + 2*ret);
+}
+
+/* set charge current in mA (platform data must provide resistor sense) */
+static int bq2415x_set_charge_current(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ val = (mA * bq->init_data.resistor_sense - 37400) / 6800;
+ if (val < 0)
+ val = 0;
+ else if (val > 7)
+ val = 7;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CURRENT, val,
+ BQ2415X_MASK_VI_CHRG | BQ2415X_MASK_RESET,
+ BQ2415X_SHIFT_VI_CHRG);
+}
+
+/* get charge current in mA (platform data must provide resistor sense) */
+static int bq2415x_get_charge_current(struct bq2415x_device *bq)
+{
+ int ret;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CURRENT,
+ BQ2415X_MASK_VI_CHRG, BQ2415X_SHIFT_VI_CHRG);
+ if (ret < 0)
+ return ret;
+ return (37400 + 6800*ret) / bq->init_data.resistor_sense;
+}
+
+/* set termination current in mA (platform data must provide resistor sense) */
+static int bq2415x_set_termination_current(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ val = (mA * bq->init_data.resistor_sense - 3400) / 3400;
+ if (val < 0)
+ val = 0;
+ else if (val > 7)
+ val = 7;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CURRENT, val,
+ BQ2415X_MASK_VI_TERM | BQ2415X_MASK_RESET,
+ BQ2415X_SHIFT_VI_TERM);
+}
+
+/* get termination current in mA (platform data must provide resistor sense) */
+static int bq2415x_get_termination_current(struct bq2415x_device *bq)
+{
+ int ret;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CURRENT,
+ BQ2415X_MASK_VI_TERM, BQ2415X_SHIFT_VI_TERM);
+ if (ret < 0)
+ return ret;
+ return (3400 + 3400*ret) / bq->init_data.resistor_sense;
+}
+
+/* set default value of property */
+#define bq2415x_set_default_value(bq, prop) \
+ do { \
+ int ret = 0; \
+ if (bq->init_data.prop != -1) \
+ ret = bq2415x_set_##prop(bq, bq->init_data.prop); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+/* set default values of all properties */
+static int bq2415x_set_defaults(struct bq2415x_device *bq)
+{
+ bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_DISABLE);
+ bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
+ bq2415x_exec_command(bq, BQ2415X_CHARGE_TERMINATION_DISABLE);
+
+ bq2415x_set_default_value(bq, current_limit);
+ bq2415x_set_default_value(bq, weak_battery_voltage);
+ bq2415x_set_default_value(bq, battery_regulation_voltage);
+
+ if (bq->init_data.resistor_sense > 0) {
+ bq2415x_set_default_value(bq, charge_current);
+ bq2415x_set_default_value(bq, termination_current);
+ bq2415x_exec_command(bq, BQ2415X_CHARGE_TERMINATION_ENABLE);
+ }
+
+ bq2415x_exec_command(bq, BQ2415X_CHARGER_ENABLE);
+ return 0;
+}
+
+/**** charger mode functions ****/
+
+/* set charger mode */
+static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
+{
+ int ret = 0;
+ int charger = 0;
+ int boost = 0;
+
+ if (mode == BQ2415X_MODE_BOOST)
+ boost = 1;
+ else if (mode != BQ2415X_MODE_OFF)
+ charger = 1;
+
+ if (!charger)
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
+
+ if (!boost)
+ ret = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_DISABLE);
+
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case BQ2415X_MODE_OFF:
+ dev_dbg(bq->dev, "changing mode to: Offline\n");
+ ret = bq2415x_set_current_limit(bq, 100);
+ break;
+ case BQ2415X_MODE_NONE:
+ dev_dbg(bq->dev, "changing mode to: N/A\n");
+ ret = bq2415x_set_current_limit(bq, 100);
+ break;
+ case BQ2415X_MODE_HOST_CHARGER:
+ dev_dbg(bq->dev, "changing mode to: Host/HUB charger\n");
+ ret = bq2415x_set_current_limit(bq, 500);
+ break;
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ dev_dbg(bq->dev, "changing mode to: Dedicated charger\n");
+ ret = bq2415x_set_current_limit(bq, 1800);
+ break;
+ case BQ2415X_MODE_BOOST: /* Boost mode */
+ dev_dbg(bq->dev, "changing mode to: Boost\n");
+ ret = bq2415x_set_current_limit(bq, 100);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ if (charger)
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_ENABLE);
+ else if (boost)
+ ret = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_ENABLE);
+
+ if (ret < 0)
+ return ret;
+
+ bq2415x_set_default_value(bq, weak_battery_voltage);
+ bq2415x_set_default_value(bq, battery_regulation_voltage);
+
+ bq->mode = mode;
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "mode");
+
+ return 0;
+
+}
+
+/* hook function called by other driver which set reported mode */
+static void bq2415x_hook_function(enum bq2415x_mode mode, void *data)
+{
+ struct bq2415x_device *bq = data;
+
+ if (!bq)
+ return;
+
+ dev_dbg(bq->dev, "hook function was called\n");
+ bq->reported_mode = mode;
+
+ /* if automode is not enabled do not tell about reported_mode */
+ if (bq->automode < 1)
+ return;
+
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
+ bq2415x_set_mode(bq, bq->reported_mode);
+
+}
+
+/**** timer functions ****/
+
+/* enable/disable auto resetting chip timer */
+static void bq2415x_set_autotimer(struct bq2415x_device *bq, int state)
+{
+ mutex_lock(&bq2415x_timer_mutex);
+
+ if (bq->autotimer == state) {
+ mutex_unlock(&bq2415x_timer_mutex);
+ return;
+ }
+
+ bq->autotimer = state;
+
+ if (state) {
+ schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
+ bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+ bq->timer_error = NULL;
+ } else {
+ cancel_delayed_work_sync(&bq->work);
+ }
+
+ mutex_unlock(&bq2415x_timer_mutex);
+}
+
+/* called by bq2415x_timer_work on timer error */
+static void bq2415x_timer_error(struct bq2415x_device *bq, const char *msg)
+{
+ bq->timer_error = msg;
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "timer");
+ dev_err(bq->dev, "%s\n", msg);
+ if (bq->automode > 0)
+ bq->automode = 0;
+ bq2415x_set_mode(bq, BQ2415X_MODE_OFF);
+ bq2415x_set_autotimer(bq, 0);
+}
+
+/* delayed work function for auto resetting chip timer */
+static void bq2415x_timer_work(struct work_struct *work)
+{
+ struct bq2415x_device *bq = container_of(work, struct bq2415x_device,
+ work.work);
+ int ret;
+ int error;
+ int boost;
+
+ if (!bq->autotimer)
+ return;
+
+ ret = bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+ if (ret < 0) {
+ bq2415x_timer_error(bq, "Resetting timer failed");
+ return;
+ }
+
+ boost = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_STATUS);
+ if (boost < 0) {
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+
+ error = bq2415x_exec_command(bq, BQ2415X_FAULT_STATUS);
+ if (error < 0) {
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+
+ if (boost) {
+ switch (error) {
+ /* Non fatal errors, chip is OK */
+ case 0: /* No error */
+ break;
+ case 6: /* Timer expired */
+ dev_err(bq->dev, "Timer expired\n");
+ break;
+ case 3: /* Battery voltage too low */
+ dev_err(bq->dev, "Battery voltage to low\n");
+ break;
+
+ /* Fatal errors, disable and reset chip */
+ case 1: /* Overvoltage protection (chip fried) */
+ bq2415x_timer_error(bq,
+ "Overvoltage protection (chip fried)");
+ return;
+ case 2: /* Overload */
+ bq2415x_timer_error(bq, "Overload");
+ return;
+ case 4: /* Battery overvoltage protection */
+ bq2415x_timer_error(bq,
+ "Battery overvoltage protection");
+ return;
+ case 5: /* Thermal shutdown (too hot) */
+ bq2415x_timer_error(bq,
+ "Thermal shutdown (too hot)");
+ return;
+ case 7: /* N/A */
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+ } else {
+ switch (error) {
+ /* Non fatal errors, chip is OK */
+ case 0: /* No error */
+ break;
+ case 2: /* Sleep mode */
+ dev_err(bq->dev, "Sleep mode\n");
+ break;
+ case 3: /* Poor input source */
+ dev_err(bq->dev, "Poor input source\n");
+ break;
+ case 6: /* Timer expired */
+ dev_err(bq->dev, "Timer expired\n");
+ break;
+ case 7: /* No battery */
+ dev_err(bq->dev, "No battery\n");
+ break;
+
+ /* Fatal errors, disable and reset chip */
+ case 1: /* Overvoltage protection (chip fried) */
+ bq2415x_timer_error(bq,
+ "Overvoltage protection (chip fried)");
+ return;
+ case 4: /* Battery overvoltage protection */
+ bq2415x_timer_error(bq,
+ "Battery overvoltage protection");
+ return;
+ case 5: /* Thermal shutdown (too hot) */
+ bq2415x_timer_error(bq,
+ "Thermal shutdown (too hot)");
+ return;
+ }
+ }
+
+ schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
+}
+
+/**** power supply interface code ****/
+
+static enum power_supply_property bq2415x_power_supply_props[] = {
+ /* TODO: maybe add more power supply properties */
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int bq2415x_power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0) /* Ready */
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret == 1) /* Charge in progress */
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (ret == 2) /* Charge done */
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bq->model;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bq2415x_power_supply_init(struct bq2415x_device *bq)
+{
+ int ret;
+ int chip;
+ char revstr[8];
+
+ bq->charger.name = bq->name;
+ bq->charger.type = POWER_SUPPLY_TYPE_USB;
+ bq->charger.properties = bq2415x_power_supply_props;
+ bq->charger.num_properties = ARRAY_SIZE(bq2415x_power_supply_props);
+ bq->charger.get_property = bq2415x_power_supply_get_property;
+
+ ret = bq2415x_detect_chip(bq);
+ if (ret < 0)
+ chip = BQUNKNOWN;
+ else
+ chip = ret;
+
+ ret = bq2415x_detect_revision(bq);
+ if (ret < 0)
+ strcpy(revstr, "unknown");
+ else
+ sprintf(revstr, "1.%d", ret);
+
+ bq->model = kasprintf(GFP_KERNEL,
+ "chip %s, revision %s, vender code %.3d",
+ bq2415x_chip_name[chip], revstr,
+ bq2415x_get_vender_code(bq));
+ if (!bq->model) {
+ dev_err(bq->dev, "failed to allocate model name\n");
+ return -ENOMEM;
+ }
+
+ ret = power_supply_register(bq->dev, &bq->charger);
+ if (ret) {
+ kfree(bq->model);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bq2415x_power_supply_exit(struct bq2415x_device *bq)
+{
+ bq->autotimer = 0;
+ if (bq->automode > 0)
+ bq->automode = 0;
+ cancel_delayed_work_sync(&bq->work);
+ power_supply_unregister(&bq->charger);
+ kfree(bq->model);
+}
+
+/**** additional sysfs entries for power supply interface ****/
+
+/* show *_status entries */
+static ssize_t bq2415x_sysfs_show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ int ret;
+
+ if (strcmp(attr->attr.name, "otg_status") == 0)
+ command = BQ2415X_OTG_STATUS;
+ else if (strcmp(attr->attr.name, "charge_status") == 0)
+ command = BQ2415X_CHARGE_STATUS;
+ else if (strcmp(attr->attr.name, "boost_status") == 0)
+ command = BQ2415X_BOOST_STATUS;
+ else if (strcmp(attr->attr.name, "fault_status") == 0)
+ command = BQ2415X_FAULT_STATUS;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+/*
+ * set timer entry:
+ * auto - enable auto mode
+ * off - disable auto mode
+ * (other values) - reset chip timer
+ */
+static ssize_t bq2415x_sysfs_set_timer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret = 0;
+
+ if (strncmp(buf, "auto", 4) == 0)
+ bq2415x_set_autotimer(bq, 1);
+ else if (strncmp(buf, "off", 3) == 0)
+ bq2415x_set_autotimer(bq, 0);
+ else
+ ret = bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show timer entry (auto or off) */
+static ssize_t bq2415x_sysfs_show_timer(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+
+ if (bq->timer_error)
+ return sprintf(buf, "%s\n", bq->timer_error);
+
+ if (bq->autotimer)
+ return sprintf(buf, "auto\n");
+ return sprintf(buf, "off\n");
+}
+
+/*
+ * set mode entry:
+ * auto - if automode is supported, enable it and set mode to reported
+ * none - disable charger and boost mode
+ * host - charging mode for host/hub chargers (current limit 500mA)
+ * dedicated - charging mode for dedicated chargers (unlimited current limit)
+ * boost - disable charger and enable boost mode
+ */
+static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_mode mode;
+ int ret = 0;
+
+ if (strncmp(buf, "auto", 4) == 0) {
+ if (bq->automode < 0)
+ return -ENOSYS;
+ bq->automode = 1;
+ mode = bq->reported_mode;
+ } else if (strncmp(buf, "off", 3) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_OFF;
+ } else if (strncmp(buf, "none", 4) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_NONE;
+ } else if (strncmp(buf, "host", 4) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_HOST_CHARGER;
+ } else if (strncmp(buf, "dedicated", 9) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_DEDICATED_CHARGER;
+ } else if (strncmp(buf, "boost", 5) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_BOOST;
+ } else if (strncmp(buf, "reset", 5) == 0) {
+ bq2415x_reset_chip(bq);
+ bq2415x_set_defaults(bq);
+ if (bq->automode <= 0)
+ return count;
+ bq->automode = 1;
+ mode = bq->reported_mode;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = bq2415x_set_mode(bq, mode);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show mode entry (auto, none, host, dedicated or boost) */
+static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+
+ if (bq->automode > 0)
+ ret += sprintf(buf+ret, "auto (");
+
+ switch (bq->mode) {
+ case BQ2415X_MODE_OFF:
+ ret += sprintf(buf+ret, "off");
+ break;
+ case BQ2415X_MODE_NONE:
+ ret += sprintf(buf+ret, "none");
+ break;
+ case BQ2415X_MODE_HOST_CHARGER:
+ ret += sprintf(buf+ret, "host");
+ break;
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ ret += sprintf(buf+ret, "dedicated");
+ break;
+ case BQ2415X_MODE_BOOST:
+ ret += sprintf(buf+ret, "boost");
+ break;
+ }
+
+ if (bq->automode > 0)
+ ret += sprintf(buf+ret, ")");
+
+ ret += sprintf(buf+ret, "\n");
+ return ret;
+}
+
+/* show reported_mode entry (none, host, dedicated or boost) */
+static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+
+ if (bq->automode < 0)
+ return -EINVAL;
+
+ switch (bq->reported_mode) {
+ case BQ2415X_MODE_OFF:
+ return sprintf(buf, "off\n");
+ case BQ2415X_MODE_NONE:
+ return sprintf(buf, "none\n");
+ case BQ2415X_MODE_HOST_CHARGER:
+ return sprintf(buf, "host\n");
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ return sprintf(buf, "dedicated\n");
+ case BQ2415X_MODE_BOOST:
+ return sprintf(buf, "boost\n");
+ }
+
+ return -EINVAL;
+}
+
+/* directly set raw value to chip register, format: 'register value' */
+static ssize_t bq2415x_sysfs_set_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+ unsigned int reg;
+ unsigned int val;
+
+ if (sscanf(buf, "%x %x", &reg, &val) != 2)
+ return -EINVAL;
+
+ if (reg > 4 || val > 255)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_write(bq, reg, val);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* print value of chip register, format: 'register=value' */
+static ssize_t bq2415x_sysfs_print_reg(struct bq2415x_device *bq,
+ u8 reg,
+ char *buf)
+{
+ int ret = bq2415x_i2c_read(bq, reg);
+
+ if (ret < 0)
+ return sprintf(buf, "%#.2x=error %d\n", reg, ret);
+ return sprintf(buf, "%#.2x=%#.2x\n", reg, ret);
+}
+
+/* show all raw values of chip register, format per line: 'register=value' */
+static ssize_t bq2415x_sysfs_show_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_STATUS, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_CONTROL, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_VOLTAGE, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_VENDER, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_CURRENT, buf+ret);
+ return ret;
+}
+
+/* set current and voltage limit entries (in mA or mV) */
+static ssize_t bq2415x_sysfs_set_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "current_limit") == 0)
+ ret = bq2415x_set_current_limit(bq, val);
+ else if (strcmp(attr->attr.name, "weak_battery_voltage") == 0)
+ ret = bq2415x_set_weak_battery_voltage(bq, val);
+ else if (strcmp(attr->attr.name, "battery_regulation_voltage") == 0)
+ ret = bq2415x_set_battery_regulation_voltage(bq, val);
+ else if (strcmp(attr->attr.name, "charge_current") == 0)
+ ret = bq2415x_set_charge_current(bq, val);
+ else if (strcmp(attr->attr.name, "termination_current") == 0)
+ ret = bq2415x_set_termination_current(bq, val);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show current and voltage limit entries (in mA or mV) */
+static ssize_t bq2415x_sysfs_show_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret;
+
+ if (strcmp(attr->attr.name, "current_limit") == 0)
+ ret = bq2415x_get_current_limit(bq);
+ else if (strcmp(attr->attr.name, "weak_battery_voltage") == 0)
+ ret = bq2415x_get_weak_battery_voltage(bq);
+ else if (strcmp(attr->attr.name, "battery_regulation_voltage") == 0)
+ ret = bq2415x_get_battery_regulation_voltage(bq);
+ else if (strcmp(attr->attr.name, "charge_current") == 0)
+ ret = bq2415x_get_charge_current(bq);
+ else if (strcmp(attr->attr.name, "termination_current") == 0)
+ ret = bq2415x_get_termination_current(bq);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+/* set *_enable entries */
+static ssize_t bq2415x_sysfs_set_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "charge_termination_enable") == 0)
+ command = val ? BQ2415X_CHARGE_TERMINATION_ENABLE :
+ BQ2415X_CHARGE_TERMINATION_DISABLE;
+ else if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ command = val ? BQ2415X_HIGH_IMPEDANCE_ENABLE :
+ BQ2415X_HIGH_IMPEDANCE_DISABLE;
+ else if (strcmp(attr->attr.name, "otg_pin_enable") == 0)
+ command = val ? BQ2415X_OTG_PIN_ENABLE :
+ BQ2415X_OTG_PIN_DISABLE;
+ else if (strcmp(attr->attr.name, "stat_pin_enable") == 0)
+ command = val ? BQ2415X_STAT_PIN_ENABLE :
+ BQ2415X_STAT_PIN_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show *_enable entries */
+static ssize_t bq2415x_sysfs_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ int ret;
+
+ if (strcmp(attr->attr.name, "charge_termination_enable") == 0)
+ command = BQ2415X_CHARGE_TERMINATION_STATUS;
+ else if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ command = BQ2415X_HIGH_IMPEDANCE_STATUS;
+ else if (strcmp(attr->attr.name, "otg_pin_enable") == 0)
+ command = BQ2415X_OTG_PIN_STATUS;
+ else if (strcmp(attr->attr.name, "stat_pin_enable") == 0)
+ command = BQ2415X_STAT_PIN_STATUS;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static DEVICE_ATTR(current_limit, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(weak_battery_voltage, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(battery_regulation_voltage, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(charge_current, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(termination_current, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+
+static DEVICE_ATTR(charge_termination_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(high_impedance_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(otg_pin_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(stat_pin_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+
+static DEVICE_ATTR(reported_mode, S_IRUGO,
+ bq2415x_sysfs_show_reported_mode, NULL);
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_mode, bq2415x_sysfs_set_mode);
+static DEVICE_ATTR(timer, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_timer, bq2415x_sysfs_set_timer);
+
+static DEVICE_ATTR(registers, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_registers, bq2415x_sysfs_set_registers);
+
+static DEVICE_ATTR(otg_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(charge_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(boost_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(fault_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+
+static struct attribute *bq2415x_sysfs_attributes[] = {
+ /*
+ * TODO: some (appropriate) of these attrs should be switched to
+ * use power supply class props.
+ */
+ &dev_attr_current_limit.attr,
+ &dev_attr_weak_battery_voltage.attr,
+ &dev_attr_battery_regulation_voltage.attr,
+ &dev_attr_charge_current.attr,
+ &dev_attr_termination_current.attr,
+
+ &dev_attr_charge_termination_enable.attr,
+ &dev_attr_high_impedance_enable.attr,
+ &dev_attr_otg_pin_enable.attr,
+ &dev_attr_stat_pin_enable.attr,
+
+ &dev_attr_reported_mode.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_timer.attr,
+
+ &dev_attr_registers.attr,
+
+ &dev_attr_otg_status.attr,
+ &dev_attr_charge_status.attr,
+ &dev_attr_boost_status.attr,
+ &dev_attr_fault_status.attr,
+ NULL,
+};
+
+static const struct attribute_group bq2415x_sysfs_attr_group = {
+ .attrs = bq2415x_sysfs_attributes,
+};
+
+static int bq2415x_sysfs_init(struct bq2415x_device *bq)
+{
+ return sysfs_create_group(&bq->charger.dev->kobj,
+ &bq2415x_sysfs_attr_group);
+}
+
+static void bq2415x_sysfs_exit(struct bq2415x_device *bq)
+{
+ sysfs_remove_group(&bq->charger.dev->kobj, &bq2415x_sysfs_attr_group);
+}
+
+/* main bq2415x probe function */
+static int bq2415x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ int num;
+ char *name;
+ struct bq2415x_device *bq;
+
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "platform data not set\n");
+ return -ENODEV;
+ }
+
+ /* Get new ID for the new device */
+ ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
+ if (ret == 0)
+ return -ENOMEM;
+
+ mutex_lock(&bq2415x_id_mutex);
+ ret = idr_get_new(&bq2415x_id, client, &num);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name) {
+ dev_err(&client->dev, "failed to allocate device name\n");
+ ret = -ENOMEM;
+ goto error_1;
+ }
+
+ bq = devm_kzalloc(&client->dev, sizeof(*bq), GFP_KERNEL);
+ if (!bq) {
+ dev_err(&client->dev, "failed to allocate device data\n");
+ ret = -ENOMEM;
+ goto error_2;
+ }
+
+ i2c_set_clientdata(client, bq);
+
+ bq->id = num;
+ bq->dev = &client->dev;
+ bq->chip = id->driver_data;
+ bq->name = name;
+ bq->mode = BQ2415X_MODE_OFF;
+ bq->reported_mode = BQ2415X_MODE_OFF;
+ bq->autotimer = 0;
+ bq->automode = 0;
+
+ memcpy(&bq->init_data, client->dev.platform_data,
+ sizeof(bq->init_data));
+
+ bq2415x_reset_chip(bq);
+
+ ret = bq2415x_power_supply_init(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to register power supply: %d\n", ret);
+ goto error_2;
+ }
+
+ ret = bq2415x_sysfs_init(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
+ goto error_3;
+ }
+
+ ret = bq2415x_set_defaults(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to set default values: %d\n", ret);
+ goto error_4;
+ }
+
+ if (bq->init_data.set_mode_hook) {
+ if (bq->init_data.set_mode_hook(
+ bq2415x_hook_function, bq)) {
+ bq->automode = 1;
+ bq2415x_set_mode(bq, bq->reported_mode);
+ dev_info(bq->dev, "automode enabled\n");
+ } else {
+ bq->automode = -1;
+ dev_info(bq->dev, "automode failed\n");
+ }
+ } else {
+ bq->automode = -1;
+ dev_info(bq->dev, "automode not supported\n");
+ }
+
+ INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work);
+ bq2415x_set_autotimer(bq, 1);
+
+ dev_info(bq->dev, "driver registered\n");
+ return 0;
+
+error_4:
+ bq2415x_sysfs_exit(bq);
+error_3:
+ bq2415x_power_supply_exit(bq);
+error_2:
+ kfree(name);
+error_1:
+ mutex_lock(&bq2415x_id_mutex);
+ idr_remove(&bq2415x_id, num);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ return ret;
+}
+
+/* main bq2415x remove function */
+
+static int bq2415x_remove(struct i2c_client *client)
+{
+ struct bq2415x_device *bq = i2c_get_clientdata(client);
+
+ if (bq->init_data.set_mode_hook)
+ bq->init_data.set_mode_hook(NULL, NULL);
+
+ bq2415x_sysfs_exit(bq);
+ bq2415x_power_supply_exit(bq);
+
+ bq2415x_reset_chip(bq);
+
+ mutex_lock(&bq2415x_id_mutex);
+ idr_remove(&bq2415x_id, bq->id);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ dev_info(bq->dev, "driver unregistered\n");
+
+ kfree(bq->name);
+
+ return 0;
+}
+
+static const struct i2c_device_id bq2415x_i2c_id_table[] = {
+ { "bq2415x", BQUNKNOWN },
+ { "bq24150", BQ24150 },
+ { "bq24150a", BQ24150A },
+ { "bq24151", BQ24151 },
+ { "bq24151a", BQ24151A },
+ { "bq24152", BQ24152 },
+ { "bq24153", BQ24153 },
+ { "bq24153a", BQ24153A },
+ { "bq24155", BQ24155 },
+ { "bq24156", BQ24156 },
+ { "bq24156a", BQ24156A },
+ { "bq24158", BQ24158 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bq2415x_i2c_id_table);
+
+static struct i2c_driver bq2415x_driver = {
+ .driver = {
+ .name = "bq2415x-charger",
+ },
+ .probe = bq2415x_probe,
+ .remove = bq2415x_remove,
+ .id_table = bq2415x_i2c_id_table,
+};
+module_i2c_driver(bq2415x_driver);
+
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("bq2415x charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index e0edaf7de54b..8ccf5d7d0add 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -230,6 +230,14 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
*/
static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
{
+ int flags;
+ bool is_bq27500 = di->chip == BQ27500;
+ bool is_higher = bq27xxx_is_chip_version_higher(di);
+
+ flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
+ if (flags >= 0 && !is_higher && (flags & BQ27000_FLAG_CI))
+ return -ENODATA;
+
return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
}
@@ -291,7 +299,7 @@ static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
}
/*
- * Return the battery temperature in tenths of degree Celsius
+ * Return the battery temperature in tenths of degree Kelvin
* Or < 0 if something fails.
*/
static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
@@ -304,10 +312,8 @@ static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
return temp;
}
- if (bq27xxx_is_chip_version_higher(di))
- temp -= 2731;
- else
- temp = ((temp * 5) - 5463) / 2;
+ if (!bq27xxx_is_chip_version_higher(di))
+ temp = 5 * temp / 2;
return temp;
}
@@ -440,7 +446,6 @@ static void bq27x00_update(struct bq27x00_device_info *di)
cache.temperature = bq27x00_battery_read_temperature(di);
if (!is_bq27425)
cache.cycle_count = bq27x00_battery_read_cyct(di);
- cache.cycle_count = bq27x00_battery_read_cyct(di);
cache.power_avg =
bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
@@ -634,6 +639,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_TEMP:
ret = bq27x00_simple_value(di->cache.temperature, val);
+ if (ret == 0)
+ val->intval -= 2731;
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -688,7 +695,6 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
int ret;
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
- di->chip = BQ27425;
if (di->chip == BQ27425) {
di->bat.properties = bq27425_battery_props;
di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index adb3a4b59cb3..8acc3f8d303c 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -239,44 +239,37 @@ static bool is_full_charged(struct charger_manager *cm)
int uV;
/* If there is no battery, it cannot be charged */
- if (!is_batt_present(cm)) {
- val.intval = 0;
- goto out;
- }
+ if (!is_batt_present(cm))
+ return false;
if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
+ val.intval = 0;
+
/* Not full if capacity of fuel gauge isn't full */
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_FULL, &val);
- if (!ret && val.intval > desc->fullbatt_full_capacity) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && val.intval > desc->fullbatt_full_capacity)
+ return true;
}
/* Full, if it's over the fullbatt voltage */
if (desc->fullbatt_uV > 0) {
ret = get_batt_uV(cm, &uV);
- if (!ret && uV >= desc->fullbatt_uV) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && uV >= desc->fullbatt_uV)
+ return true;
}
/* Full, if the capacity is more than fullbatt_soc */
if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
+ val.intval = 0;
+
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_CAPACITY, &val);
- if (!ret && val.intval >= desc->fullbatt_soc) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && val.intval >= desc->fullbatt_soc)
+ return true;
}
- val.intval = 0;
-
-out:
- return val.intval ? true : false;
+ return false;
}
/**
@@ -489,8 +482,9 @@ static void fullbatt_vchk(struct work_struct *work)
return;
}
- diff = desc->fullbatt_uV;
- diff -= batt_uV;
+ diff = desc->fullbatt_uV - batt_uV;
+ if (diff < 0)
+ return;
dev_info(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
@@ -675,15 +669,21 @@ static void _setup_polling(struct work_struct *work)
WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
". try it later. %s\n", __func__);
+ /*
+ * Use mod_delayed_work() iff the next polling interval should
+ * occur before the currently scheduled one. If @cm_monitor_work
+ * isn't active, the end result is the same, so no need to worry
+ * about stale @next_polling.
+ */
_next_polling = jiffies + polling_jiffy;
- if (!delayed_work_pending(&cm_monitor_work) ||
- (delayed_work_pending(&cm_monitor_work) &&
- time_after(next_polling, _next_polling))) {
- next_polling = jiffies + polling_jiffy;
+ if (time_before(_next_polling, next_polling)) {
mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+ next_polling = _next_polling;
+ } else {
+ if (queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy))
+ next_polling = _next_polling;
}
-
out:
mutex_unlock(&cm_list_mtx);
}
@@ -757,8 +757,7 @@ static void misc_event_handler(struct charger_manager *cm,
if (cm_suspended)
device_set_wakeup_capable(cm->dev, true);
- if (!delayed_work_pending(&cm_monitor_work) &&
- is_polling_required(cm) && cm->desc->polling_interval_ms)
+ if (is_polling_required(cm) && cm->desc->polling_interval_ms)
schedule_work(&setup_polling);
uevent_notify(cm, default_event_names[type]);
}
@@ -1176,8 +1175,7 @@ static int charger_extcon_notifier(struct notifier_block *self,
* when charger cable is attached.
*/
if (cable->attached && is_polling_required(cable->cm)) {
- if (work_pending(&setup_polling))
- cancel_work_sync(&setup_polling);
+ cancel_work_sync(&setup_polling);
schedule_work(&setup_polling);
}
@@ -1221,6 +1219,55 @@ static int charger_extcon_init(struct charger_manager *cm,
return ret;
}
+/**
+ * charger_manager_register_extcon - Register extcon device to recevie state
+ * of charger cable.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * This function support EXTCON(External Connector) subsystem to detect the
+ * state of charger cables for enabling or disabling charger(regulator) and
+ * select the charger cable for charging among a number of external cable
+ * according to policy of H/W board.
+ */
+static int charger_manager_register_extcon(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ struct charger_regulator *charger;
+ int ret = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < desc->num_charger_regulators; i++) {
+ charger = &desc->charger_regulators[i];
+
+ charger->consumer = regulator_get(cm->dev,
+ charger->regulator_name);
+ if (charger->consumer == NULL) {
+ dev_err(cm->dev, "Cannot find charger(%s)n",
+ charger->regulator_name);
+ ret = -EINVAL;
+ goto err;
+ }
+ charger->cm = cm;
+
+ for (j = 0; j < charger->num_cables; j++) {
+ struct charger_cable *cable = &charger->cables[j];
+
+ ret = charger_extcon_init(cm, cable);
+ if (ret < 0) {
+ dev_err(cm->dev, "Cannot initialize charger(%s)n",
+ charger->regulator_name);
+ goto err;
+ }
+ cable->charger = charger;
+ cable->cm = cm;
+ }
+ }
+
+err:
+ return ret;
+}
+
/* help function of sysfs node to control charger(regulator) */
static ssize_t charger_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1280,7 +1327,7 @@ static ssize_t charger_externally_control_store(struct device *dev,
for (i = 0; i < desc->num_charger_regulators; i++) {
if (&desc->charger_regulators[i] != charger &&
- !desc->charger_regulators[i].externally_control) {
+ !desc->charger_regulators[i].externally_control) {
/*
* At least, one charger is controlled by
* charger-manager
@@ -1309,13 +1356,107 @@ static ssize_t charger_externally_control_store(struct device *dev,
return count;
}
+/**
+ * charger_manager_register_sysfs - Register sysfs entry for each charger
+ * @cm: the Charger Manager representing the battery.
+ *
+ * This function add sysfs entry for charger(regulator) to control charger from
+ * user-space. If some development board use one more chargers for charging
+ * but only need one charger on specific case which is dependent on user
+ * scenario or hardware restrictions, the user enter 1 or 0(zero) to '/sys/
+ * class/power_supply/battery/charger.[index]/externally_control'. For example,
+ * if user enter 1 to 'sys/class/power_supply/battery/charger.[index]/
+ * externally_control, this charger isn't controlled from charger-manager and
+ * always stay off state of regulator.
+ */
+static int charger_manager_register_sysfs(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ struct charger_regulator *charger;
+ int chargers_externally_control = 1;
+ char buf[11];
+ char *str;
+ int ret = 0;
+ int i;
+
+ /* Create sysfs entry to control charger(regulator) */
+ for (i = 0; i < desc->num_charger_regulators; i++) {
+ charger = &desc->charger_regulators[i];
+
+ snprintf(buf, 10, "charger.%d", i);
+ str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
+ if (!str) {
+ dev_err(cm->dev, "Cannot allocate memory: %s\n",
+ charger->regulator_name);
+ ret = -ENOMEM;
+ goto err;
+ }
+ strcpy(str, buf);
+
+ charger->attrs[0] = &charger->attr_name.attr;
+ charger->attrs[1] = &charger->attr_state.attr;
+ charger->attrs[2] = &charger->attr_externally_control.attr;
+ charger->attrs[3] = NULL;
+ charger->attr_g.name = str;
+ charger->attr_g.attrs = charger->attrs;
+
+ sysfs_attr_init(&charger->attr_name.attr);
+ charger->attr_name.attr.name = "name";
+ charger->attr_name.attr.mode = 0444;
+ charger->attr_name.show = charger_name_show;
+
+ sysfs_attr_init(&charger->attr_state.attr);
+ charger->attr_state.attr.name = "state";
+ charger->attr_state.attr.mode = 0444;
+ charger->attr_state.show = charger_state_show;
+
+ sysfs_attr_init(&charger->attr_externally_control.attr);
+ charger->attr_externally_control.attr.name
+ = "externally_control";
+ charger->attr_externally_control.attr.mode = 0644;
+ charger->attr_externally_control.show
+ = charger_externally_control_show;
+ charger->attr_externally_control.store
+ = charger_externally_control_store;
+
+ if (!desc->charger_regulators[i].externally_control ||
+ !chargers_externally_control)
+ chargers_externally_control = 0;
+
+ dev_info(cm->dev, "'%s' regulator's externally_control"
+ "is %d\n", charger->regulator_name,
+ charger->externally_control);
+
+ ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
+ &charger->attr_g);
+ if (ret < 0) {
+ dev_err(cm->dev, "Cannot create sysfs entry"
+ "of %s regulator\n",
+ charger->regulator_name);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (chargers_externally_control) {
+ dev_err(cm->dev, "Cannot register regulator because "
+ "charger-manager must need at least "
+ "one charger for charging battery\n");
+
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ return ret;
+}
+
static int charger_manager_probe(struct platform_device *pdev)
{
struct charger_desc *desc = dev_get_platdata(&pdev->dev);
struct charger_manager *cm;
int ret = 0, i = 0;
int j = 0;
- int chargers_externally_control = 1;
union power_supply_propval val;
if (g_desc && !rtc_dev && g_desc->rtc_name) {
@@ -1446,11 +1587,10 @@ static int charger_manager_probe(struct platform_device *pdev)
memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
- if (!desc->psy_name) {
+ if (!desc->psy_name)
strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
- } else {
+ else
strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
- }
cm->charger_psy.name = cm->psy_name_buf;
/* Allocate for psy properties because they may vary */
@@ -1502,105 +1642,19 @@ static int charger_manager_probe(struct platform_device *pdev)
goto err_register;
}
- for (i = 0 ; i < desc->num_charger_regulators ; i++) {
- struct charger_regulator *charger
- = &desc->charger_regulators[i];
- char buf[11];
- char *str;
-
- charger->consumer = regulator_get(&pdev->dev,
- charger->regulator_name);
- if (charger->consumer == NULL) {
- dev_err(&pdev->dev, "Cannot find charger(%s)n",
- charger->regulator_name);
- ret = -EINVAL;
- goto err_chg_get;
- }
- charger->cm = cm;
-
- for (j = 0 ; j < charger->num_cables ; j++) {
- struct charger_cable *cable = &charger->cables[j];
-
- ret = charger_extcon_init(cm, cable);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot find charger(%s)n",
- charger->regulator_name);
- goto err_extcon;
- }
- cable->charger = charger;
- cable->cm = cm;
- }
-
- /* Create sysfs entry to control charger(regulator) */
- snprintf(buf, 10, "charger.%d", i);
- str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
- if (!str) {
- for (i--; i >= 0; i--) {
- charger = &desc->charger_regulators[i];
- kfree(charger->attr_g.name);
- }
- ret = -ENOMEM;
-
- goto err_extcon;
- }
- strcpy(str, buf);
-
- charger->attrs[0] = &charger->attr_name.attr;
- charger->attrs[1] = &charger->attr_state.attr;
- charger->attrs[2] = &charger->attr_externally_control.attr;
- charger->attrs[3] = NULL;
- charger->attr_g.name = str;
- charger->attr_g.attrs = charger->attrs;
-
- sysfs_attr_init(&charger->attr_name.attr);
- charger->attr_name.attr.name = "name";
- charger->attr_name.attr.mode = 0444;
- charger->attr_name.show = charger_name_show;
-
- sysfs_attr_init(&charger->attr_state.attr);
- charger->attr_state.attr.name = "state";
- charger->attr_state.attr.mode = 0444;
- charger->attr_state.show = charger_state_show;
-
- sysfs_attr_init(&charger->attr_externally_control.attr);
- charger->attr_externally_control.attr.name
- = "externally_control";
- charger->attr_externally_control.attr.mode = 0644;
- charger->attr_externally_control.show
- = charger_externally_control_show;
- charger->attr_externally_control.store
- = charger_externally_control_store;
-
- if (!desc->charger_regulators[i].externally_control ||
- !chargers_externally_control) {
- chargers_externally_control = 0;
- }
- dev_info(&pdev->dev, "'%s' regulator's externally_control"
- "is %d\n", charger->regulator_name,
- charger->externally_control);
-
- ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
- &charger->attr_g);
- if (ret < 0) {
- dev_info(&pdev->dev, "Cannot create sysfs entry"
- "of %s regulator\n",
- charger->regulator_name);
- }
- }
-
- if (chargers_externally_control) {
- dev_err(&pdev->dev, "Cannot register regulator because "
- "charger-manager must need at least "
- "one charger for charging battery\n");
-
- ret = -EINVAL;
- goto err_chg_enable;
+ /* Register extcon device for charger cable */
+ ret = charger_manager_register_extcon(cm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot initialize extcon device\n");
+ goto err_reg_extcon;
}
- ret = try_charger_enable(cm, true);
- if (ret) {
- dev_err(&pdev->dev, "Cannot enable charger regulators\n");
- goto err_chg_enable;
+ /* Register sysfs entry for charger(regulator) */
+ ret = charger_manager_register_sysfs(cm);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Cannot initialize sysfs entry of regulator\n");
+ goto err_reg_sysfs;
}
/* Add to the list */
@@ -1619,27 +1673,28 @@ static int charger_manager_probe(struct platform_device *pdev)
return 0;
-err_chg_enable:
+err_reg_sysfs:
for (i = 0; i < desc->num_charger_regulators; i++) {
struct charger_regulator *charger;
charger = &desc->charger_regulators[i];
sysfs_remove_group(&cm->charger_psy.dev->kobj,
&charger->attr_g);
+
kfree(charger->attr_g.name);
}
-err_extcon:
- for (i = 0 ; i < desc->num_charger_regulators ; i++) {
- struct charger_regulator *charger
- = &desc->charger_regulators[i];
- for (j = 0 ; j < charger->num_cables ; j++) {
+err_reg_extcon:
+ for (i = 0; i < desc->num_charger_regulators; i++) {
+ struct charger_regulator *charger;
+
+ charger = &desc->charger_regulators[i];
+ for (j = 0; j < charger->num_cables; j++) {
struct charger_cable *cable = &charger->cables[j];
extcon_unregister_interest(&cable->extcon_dev);
}
- }
-err_chg_get:
- for (i = 0 ; i < desc->num_charger_regulators ; i++)
+
regulator_put(desc->charger_regulators[i].consumer);
+ }
power_supply_unregister(&cm->charger_psy);
err_register:
@@ -1667,10 +1722,8 @@ static int charger_manager_remove(struct platform_device *pdev)
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
- if (work_pending(&setup_polling))
- cancel_work_sync(&setup_polling);
- if (delayed_work_pending(&cm_monitor_work))
- cancel_delayed_work_sync(&cm_monitor_work);
+ cancel_work_sync(&setup_polling);
+ cancel_delayed_work_sync(&cm_monitor_work);
for (i = 0 ; i < desc->num_charger_regulators ; i++) {
struct charger_regulator *charger
@@ -1739,8 +1792,7 @@ static int cm_suspend_prepare(struct device *dev)
cm_suspended = true;
}
- if (delayed_work_pending(&cm->fullbatt_vchk_work))
- cancel_delayed_work(&cm->fullbatt_vchk_work);
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
cm->status_save_batt = is_batt_present(cm);
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 94762e67e22b..e8c5a391a498 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/notifier.h>
#define DA9030_FAULT_LOG 0x0a
#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7)
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index bb0df8917adc..08193feb3b08 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -337,7 +337,7 @@ static unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
return DA9052_VC_TBL_REF_SZ - 1;
- for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) {
+ for (i = 0; i < DA9052_VC_TBL_REF_SZ - 1; i++) {
if ((adc_temp > vc_tbl_ref[i]) &&
(adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
return i;
@@ -440,8 +440,10 @@ static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
static irqreturn_t da9052_bat_irq(int irq, void *data)
{
struct da9052_battery *bat = data;
+ int virq;
- irq -= bat->da9052->irq_base;
+ virq = regmap_irq_get_virq(bat->da9052->irq_data, irq);
+ irq -= virq;
if (irq == DA9052_IRQ_CHGEND)
bat->status = POWER_SUPPLY_STATUS_FULL;
@@ -567,7 +569,7 @@ static struct power_supply template_battery = {
.get_property = da9052_bat_get_property,
};
-static const char *const da9052_bat_irqs[] = {
+static char *da9052_bat_irqs[] = {
"BATT TEMP",
"DCIN DET",
"DCIN REM",
@@ -576,12 +578,20 @@ static const char *const da9052_bat_irqs[] = {
"CHG END",
};
+static int da9052_bat_irq_bits[] = {
+ DA9052_IRQ_TBAT,
+ DA9052_IRQ_DCIN,
+ DA9052_IRQ_DCINREM,
+ DA9052_IRQ_VBUS,
+ DA9052_IRQ_VBUSREM,
+ DA9052_IRQ_CHGEND,
+};
+
static s32 da9052_bat_probe(struct platform_device *pdev)
{
struct da9052_pdata *pdata;
struct da9052_battery *bat;
int ret;
- int irq;
int i;
bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
@@ -602,15 +612,14 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
bat->psy.use_for_apm = 1;
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- ret = request_threaded_irq(bat->da9052->irq_base + irq,
- NULL, da9052_bat_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- da9052_bat_irqs[i], bat);
+ ret = da9052_request_irq(bat->da9052,
+ da9052_bat_irq_bits[i], da9052_bat_irqs[i],
+ da9052_bat_irq, bat);
+
if (ret != 0) {
dev_err(bat->da9052->dev,
- "DA9052 failed to request %s IRQ %d: %d\n",
- da9052_bat_irqs[i], irq, ret);
+ "DA9052 failed to request %s IRQ: %d\n",
+ da9052_bat_irqs[i], ret);
goto err;
}
}
@@ -623,23 +632,20 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
return 0;
err:
- while (--i >= 0) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- free_irq(bat->da9052->irq_base + irq, bat);
- }
+ while (--i >= 0)
+ da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
kfree(bat);
return ret;
}
static int da9052_bat_remove(struct platform_device *pdev)
{
int i;
- int irq;
struct da9052_battery *bat = platform_get_drvdata(pdev);
- for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- free_irq(bat->da9052->irq_base + irq, bat);
- }
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++)
+ da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
power_supply_unregister(&bat->psy);
kfree(bat);
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 6bb6e2f5ea81..e7301b3ed623 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -7,6 +7,8 @@
*
* DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
*
+ * UEvent sending added by Evgeny Romanov <romanov@neurosoft.ru>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -19,6 +21,7 @@
#include <linux/errno.h>
#include <linux/swab.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -40,6 +43,8 @@
#define DS2786_CURRENT_UNITS 25
+#define DS278x_DELAY 1000
+
struct ds278x_info;
struct ds278x_battery_ops {
@@ -54,8 +59,11 @@ struct ds278x_info {
struct i2c_client *client;
struct power_supply battery;
struct ds278x_battery_ops *ops;
+ struct delayed_work bat_work;
int id;
int rsns;
+ int capacity;
+ int status; /* State Of Charge */
};
static DEFINE_IDR(battery_id);
@@ -80,13 +88,13 @@ static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb,
{
int ret;
- ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb));
+ ret = i2c_smbus_read_word_data(info->client, reg_msb);
if (ret < 0) {
dev_err(&info->client->dev, "register read failed\n");
return ret;
}
- *val = ret;
+ *val = swab16(ret);
return 0;
}
@@ -220,6 +228,8 @@ static int ds278x_get_status(struct ds278x_info *info, int *status)
if (err)
return err;
+ info->capacity = capacity;
+
if (capacity == 100)
*status = POWER_SUPPLY_STATUS_FULL;
else if (current_uA == 0)
@@ -267,6 +277,27 @@ static int ds278x_battery_get_property(struct power_supply *psy,
return ret;
}
+static void ds278x_bat_update(struct ds278x_info *info)
+{
+ int old_status = info->status;
+ int old_capacity = info->capacity;
+
+ ds278x_get_status(info, &info->status);
+
+ if ((old_status != info->status) || (old_capacity != info->capacity))
+ power_supply_changed(&info->battery);
+}
+
+static void ds278x_bat_work(struct work_struct *work)
+{
+ struct ds278x_info *info;
+
+ info = container_of(work, struct ds278x_info, bat_work.work);
+ ds278x_bat_update(info);
+
+ schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+}
+
static enum power_supply_property ds278x_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CAPACITY,
@@ -295,10 +326,39 @@ static int ds278x_battery_remove(struct i2c_client *client)
idr_remove(&battery_id, info->id);
mutex_unlock(&battery_lock);
+ cancel_delayed_work(&info->bat_work);
+
kfree(info);
return 0;
}
+#ifdef CONFIG_PM
+
+static int ds278x_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct ds278x_info *info = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&info->bat_work);
+ return 0;
+}
+
+static int ds278x_resume(struct i2c_client *client)
+{
+ struct ds278x_info *info = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+ return 0;
+}
+
+#else
+
+#define ds278x_suspend NULL
+#define ds278x_resume NULL
+
+#endif /* CONFIG_PM */
+
+
enum ds278x_num_id {
DS2782 = 0,
DS2786,
@@ -368,10 +428,17 @@ static int ds278x_battery_probe(struct i2c_client *client,
info->ops = &ds278x_ops[id->driver_data];
ds278x_power_supply_init(&info->battery);
+ info->capacity = 100;
+ info->status = POWER_SUPPLY_STATUS_FULL;
+
+ INIT_DELAYED_WORK(&info->bat_work, ds278x_bat_work);
+
ret = power_supply_register(&client->dev, &info->battery);
if (ret) {
dev_err(&client->dev, "failed to register battery\n");
goto fail_register;
+ } else {
+ schedule_delayed_work(&info->bat_work, DS278x_DELAY);
}
return 0;
@@ -401,6 +468,8 @@ static struct i2c_driver ds278x_battery_driver = {
},
.probe = ds278x_battery_probe,
.remove = ds278x_battery_remove,
+ .suspend = ds278x_suspend,
+ .resume = ds278x_resume,
.id_table = ds278x_id,
};
module_i2c_driver(ds278x_battery_driver);
diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/generic-adc-battery.c
index e902b088d52c..8cb5d7f67ace 100644
--- a/drivers/power/generic-adc-battery.c
+++ b/drivers/power/generic-adc-battery.c
@@ -263,9 +263,6 @@ static int gab_probe(struct platform_device *pdev)
psy->external_power_changed = gab_ext_power_changed;
adc_bat->pdata = pdata;
- /* calculate the total number of channels */
- chan = ARRAY_SIZE(gab_chan_name);
-
/*
* copying the static properties and allocating extra memory for holding
* the extra configurable properties received from platform data.
@@ -279,17 +276,19 @@ static int gab_probe(struct platform_device *pdev)
}
memcpy(psy->properties, gab_props, sizeof(gab_props));
- properties = psy->properties + sizeof(gab_props);
+ properties = (enum power_supply_property *)
+ ((char *)psy->properties + sizeof(gab_props));
/*
* getting channel from iio and copying the battery properties
* based on the channel supported by consumer device.
*/
for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
- adc_bat->channel[chan] = iio_channel_get(dev_name(&pdev->dev),
- gab_chan_name[chan]);
+ adc_bat->channel[chan] = iio_channel_get(&pdev->dev,
+ gab_chan_name[chan]);
if (IS_ERR(adc_bat->channel[chan])) {
ret = PTR_ERR(adc_bat->channel[chan]);
+ adc_bat->channel[chan] = NULL;
} else {
/* copying properties for supported channels only */
memcpy(properties + sizeof(*(psy->properties)) * index,
@@ -327,7 +326,7 @@ static int gab_probe(struct platform_device *pdev)
ret = request_any_context_irq(irq, gab_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", adc_bat);
- if (ret)
+ if (ret < 0)
goto err_gpio;
}
@@ -343,8 +342,10 @@ err_gpio:
gpio_req_fail:
power_supply_unregister(psy);
err_reg_fail:
- for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++)
- iio_channel_release(adc_bat->channel[chan]);
+ for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
+ if (adc_bat->channel[chan])
+ iio_channel_release(adc_bat->channel[chan]);
+ }
second_mem_fail:
kfree(psy->properties);
first_mem_fail:
@@ -364,8 +365,10 @@ static int gab_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_charge_finished);
}
- for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++)
- iio_channel_release(adc_bat->channel[chan]);
+ for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
+ if (adc_bat->channel[chan])
+ iio_channel_release(adc_bat->channel[chan]);
+ }
kfree(adc_bat->psy.properties);
cancel_delayed_work(&adc_bat->bat_work);
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
new file mode 100644
index 000000000000..c10f460f986f
--- /dev/null
+++ b/drivers/power/goldfish_battery.c
@@ -0,0 +1,236 @@
+/*
+ * Power supply driver for the goldfish emulator
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct goldfish_battery_data {
+ void __iomem *reg_base;
+ int irq;
+ spinlock_t lock;
+
+ struct power_supply battery;
+ struct power_supply ac;
+};
+
+#define GOLDFISH_BATTERY_READ(data, addr) \
+ (readl(data->reg_base + addr))
+#define GOLDFISH_BATTERY_WRITE(data, addr, x) \
+ (writel(x, data->reg_base + addr))
+
+/*
+ * Temporary variable used between goldfish_battery_probe() and
+ * goldfish_battery_open().
+ */
+static struct goldfish_battery_data *battery_data;
+
+enum {
+ /* status register */
+ BATTERY_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ BATTERY_INT_ENABLE = 0x04,
+
+ BATTERY_AC_ONLINE = 0x08,
+ BATTERY_STATUS = 0x0C,
+ BATTERY_HEALTH = 0x10,
+ BATTERY_PRESENT = 0x14,
+ BATTERY_CAPACITY = 0x18,
+
+ BATTERY_STATUS_CHANGED = 1U << 0,
+ AC_STATUS_CHANGED = 1U << 1,
+ BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED,
+};
+
+
+static int goldfish_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct goldfish_battery_data *data = container_of(psy,
+ struct goldfish_battery_data, ac);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int goldfish_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct goldfish_battery_data *data = container_of(psy,
+ struct goldfish_battery_data, battery);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_STATUS);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_HEALTH);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_PRESENT);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property goldfish_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static enum power_supply_property goldfish_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+ struct goldfish_battery_data *data = dev_id;
+ uint32_t status;
+
+ spin_lock_irqsave(&data->lock, irq_flags);
+
+ /* read status flags, which will clear the interrupt */
+ status = GOLDFISH_BATTERY_READ(data, BATTERY_INT_STATUS);
+ status &= BATTERY_INT_MASK;
+
+ if (status & BATTERY_STATUS_CHANGED)
+ power_supply_changed(&data->battery);
+ if (status & AC_STATUS_CHANGED)
+ power_supply_changed(&data->ac);
+
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+
+static int goldfish_battery_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_battery_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+
+ data->battery.properties = goldfish_battery_props;
+ data->battery.num_properties = ARRAY_SIZE(goldfish_battery_props);
+ data->battery.get_property = goldfish_battery_get_property;
+ data->battery.name = "battery";
+ data->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ data->ac.properties = goldfish_ac_props;
+ data->ac.num_properties = ARRAY_SIZE(goldfish_ac_props);
+ data->ac.get_property = goldfish_ac_get_property;
+ data->ac.name = "ac";
+ data->ac.type = POWER_SUPPLY_TYPE_MAINS;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ return -ENODEV;
+ }
+
+ data->reg_base = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+ if (data->reg_base == NULL) {
+ dev_err(&pdev->dev, "unable to remap MMIO\n");
+ return -ENOMEM;
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt,
+ IRQF_SHARED, pdev->name, data);
+ if (ret)
+ return ret;
+
+ ret = power_supply_register(&pdev->dev, &data->ac);
+ if (ret)
+ return ret;
+
+ ret = power_supply_register(&pdev->dev, &data->battery);
+ if (ret) {
+ power_supply_unregister(&data->ac);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, data);
+ battery_data = data;
+
+ GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
+ return 0;
+}
+
+static int goldfish_battery_remove(struct platform_device *pdev)
+{
+ struct goldfish_battery_data *data = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&data->battery);
+ power_supply_unregister(&data->ac);
+ battery_data = NULL;
+ return 0;
+}
+
+static struct platform_driver goldfish_battery_device = {
+ .probe = goldfish_battery_probe,
+ .remove = goldfish_battery_remove,
+ .driver = {
+ .name = "goldfish-battery"
+ }
+};
+module_platform_driver(goldfish_battery_device);
+
+MODULE_AUTHOR("Mike Lockwood lockwood@android.com");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Battery driver for the Goldfish emulator");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 74ac69e0687f..c675553d4647 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/power_supply.h>
@@ -33,7 +34,6 @@ struct jz_battery {
struct jz_battery_platform_data *pdata;
struct platform_device *pdev;
- struct resource *mem;
void __iomem *base;
int irq;
@@ -244,13 +244,14 @@ static int jz_battery_probe(struct platform_device *pdev)
struct jz_battery_platform_data *pdata = pdev->dev.parent->platform_data;
struct jz_battery *jz_battery;
struct power_supply *battery;
+ struct resource *mem;
if (!pdata) {
dev_err(&pdev->dev, "No platform_data supplied\n");
return -ENXIO;
}
- jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
+ jz_battery = devm_kzalloc(&pdev->dev, sizeof(*jz_battery), GFP_KERNEL);
if (!jz_battery) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
@@ -260,33 +261,15 @@ static int jz_battery_probe(struct platform_device *pdev)
jz_battery->irq = platform_get_irq(pdev, 0);
if (jz_battery->irq < 0) {
- ret = jz_battery->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
- goto err_free;
+ return jz_battery->irq;
}
- jz_battery->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!jz_battery->mem) {
- ret = -ENOENT;
- dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- goto err_free;
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jz_battery->mem = request_mem_region(jz_battery->mem->start,
- resource_size(jz_battery->mem), pdev->name);
- if (!jz_battery->mem) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- goto err_free;
- }
-
- jz_battery->base = ioremap_nocache(jz_battery->mem->start,
- resource_size(jz_battery->mem));
- if (!jz_battery->base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- goto err_release_mem_region;
- }
+ jz_battery->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(jz_battery->base))
+ return PTR_ERR(jz_battery->base);
battery = &jz_battery->battery;
battery->name = pdata->info.name;
@@ -309,7 +292,7 @@ static int jz_battery_probe(struct platform_device *pdev)
jz_battery);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %d\n", ret);
- goto err_iounmap;
+ goto err;
}
disable_irq(jz_battery->irq);
@@ -366,13 +349,8 @@ err_free_gpio:
gpio_free(jz_battery->pdata->gpio_charge);
err_free_irq:
free_irq(jz_battery->irq, jz_battery);
-err_iounmap:
+err:
platform_set_drvdata(pdev, NULL);
- iounmap(jz_battery->base);
-err_release_mem_region:
- release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
-err_free:
- kfree(jz_battery);
return ret;
}
@@ -392,10 +370,6 @@ static int jz_battery_remove(struct platform_device *pdev)
free_irq(jz_battery->irq, jz_battery);
- iounmap(jz_battery->base);
- release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
- kfree(jz_battery);
-
return 0;
}
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index 4ee71a90e248..5ef41b819172 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -367,28 +367,28 @@ static int lp8727_battery_get_property(struct power_supply *psy,
return -EINVAL;
if (pdata->get_batt_present)
- val->intval = pchg->pdata->get_batt_present();
+ val->intval = pdata->get_batt_present();
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (!pdata)
return -EINVAL;
if (pdata->get_batt_level)
- val->intval = pchg->pdata->get_batt_level();
+ val->intval = pdata->get_batt_level();
break;
case POWER_SUPPLY_PROP_CAPACITY:
if (!pdata)
return -EINVAL;
if (pdata->get_batt_capacity)
- val->intval = pchg->pdata->get_batt_capacity();
+ val->intval = pdata->get_batt_capacity();
break;
case POWER_SUPPLY_PROP_TEMP:
if (!pdata)
return -EINVAL;
if (pdata->get_batt_temp)
- val->intval = pchg->pdata->get_batt_temp();
+ val->intval = pdata->get_batt_temp();
break;
default:
break;
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index a1c51ac117fd..6d1f452810b8 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -235,25 +235,14 @@ static int lp8788_get_battery_present(struct lp8788_charger *pchg,
return 0;
}
-static int lp8788_get_vbatt_adc(struct lp8788_charger *pchg,
- unsigned int *result)
+static int lp8788_get_vbatt_adc(struct lp8788_charger *pchg, int *result)
{
struct iio_channel *channel = pchg->chan[LP8788_VBATT];
- int scaleint;
- int scalepart;
- int ret;
if (!channel)
return -EINVAL;
- ret = iio_read_channel_scale(channel, &scaleint, &scalepart);
- if (ret != IIO_VAL_INT_PLUS_MICRO)
- return -EINVAL;
-
- /* unit: mV */
- *result = (scaleint + scalepart * 1000000) / 1000;
-
- return 0;
+ return iio_read_channel_processed(channel, result);
}
static int lp8788_get_battery_voltage(struct lp8788_charger *pchg,
@@ -268,7 +257,7 @@ static int lp8788_get_battery_capacity(struct lp8788_charger *pchg,
struct lp8788 *lp = pchg->lp;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
unsigned int max_vbatt;
- unsigned int vbatt;
+ int vbatt;
enum lp8788_charging_state state;
u8 data;
int ret;
@@ -304,19 +293,18 @@ static int lp8788_get_battery_temperature(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
struct iio_channel *channel = pchg->chan[LP8788_BATT_TEMP];
- int scaleint;
- int scalepart;
+ int result;
int ret;
if (!channel)
return -EINVAL;
- ret = iio_read_channel_scale(channel, &scaleint, &scalepart);
- if (ret != IIO_VAL_INT_PLUS_MICRO)
+ ret = iio_read_channel_processed(channel, &result);
+ if (ret < 0)
return -EINVAL;
/* unit: 0.1 'C */
- val->intval = (scaleint + scalepart * 1000000) / 100;
+ val->intval = result * 10;
return 0;
}
@@ -379,7 +367,8 @@ static inline bool lp8788_is_valid_charger_register(u8 addr)
return addr >= LP8788_CHG_START && addr <= LP8788_CHG_END;
}
-static int lp8788_update_charger_params(struct lp8788_charger *pchg)
+static int lp8788_update_charger_params(struct platform_device *pdev,
+ struct lp8788_charger *pchg)
{
struct lp8788 *lp = pchg->lp;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
@@ -388,7 +377,7 @@ static int lp8788_update_charger_params(struct lp8788_charger *pchg)
int ret;
if (!pdata || !pdata->chg_params) {
- dev_info(lp->dev, "skip updating charger parameters\n");
+ dev_info(&pdev->dev, "skip updating charger parameters\n");
return 0;
}
@@ -549,7 +538,6 @@ err_free_irq:
static int lp8788_irq_register(struct platform_device *pdev,
struct lp8788_charger *pchg)
{
- struct lp8788 *lp = pchg->lp;
const char *name[] = {
LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ
};
@@ -562,13 +550,13 @@ static int lp8788_irq_register(struct platform_device *pdev,
for (i = 0; i < ARRAY_SIZE(name); i++) {
ret = lp8788_set_irqs(pdev, pchg, name[i]);
if (ret) {
- dev_warn(lp->dev, "irq setup failed: %s\n", name[i]);
+ dev_warn(&pdev->dev, "irq setup failed: %s\n", name[i]);
return ret;
}
}
if (pchg->num_irqs > LP8788_MAX_CHG_IRQS) {
- dev_err(lp->dev, "invalid total number of irqs: %d\n",
+ dev_err(&pdev->dev, "invalid total number of irqs: %d\n",
pchg->num_irqs);
return -EINVAL;
}
@@ -592,53 +580,22 @@ static void lp8788_irq_unregister(struct platform_device *pdev,
}
}
-static void lp8788_setup_adc_channel(struct lp8788_charger *pchg)
+static void lp8788_setup_adc_channel(struct device *dev,
+ struct lp8788_charger *pchg)
{
struct lp8788_charger_platform_data *pdata = pchg->pdata;
- struct device *dev = pchg->lp->dev;
struct iio_channel *chan;
- enum lp8788_adc_id id;
- const char *chan_name[LPADC_MAX] = {
- [LPADC_VBATT_5P5] = "vbatt-5p5",
- [LPADC_VBATT_6P0] = "vbatt-6p0",
- [LPADC_VBATT_5P0] = "vbatt-5p0",
- [LPADC_ADC1] = "adc1",
- [LPADC_ADC2] = "adc2",
- [LPADC_ADC3] = "adc3",
- [LPADC_ADC4] = "adc4",
- };
if (!pdata)
return;
- id = pdata->vbatt_adc;
- switch (id) {
- case LPADC_VBATT_5P5:
- case LPADC_VBATT_6P0:
- case LPADC_VBATT_5P0:
- chan = iio_channel_get(NULL, chan_name[id]);
- pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
- break;
- default:
- dev_err(dev, "invalid ADC id for VBATT: %d\n", id);
- pchg->chan[LP8788_VBATT] = NULL;
- break;
- }
+ /* ADC channel for battery voltage */
+ chan = iio_channel_get(dev, pdata->adc_vbatt);
+ pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
- id = pdata->batt_temp_adc;
- switch (id) {
- case LPADC_ADC1:
- case LPADC_ADC2:
- case LPADC_ADC3:
- case LPADC_ADC4:
- chan = iio_channel_get(NULL, chan_name[id]);
- pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
- break;
- default:
- dev_err(dev, "invalid ADC id for BATT_TEMP : %d\n", id);
- pchg->chan[LP8788_BATT_TEMP] = NULL;
- break;
- }
+ /* ADC channel for battery temperature */
+ chan = iio_channel_get(dev, pdata->adc_batt_temp);
+ pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
}
static void lp8788_release_adc_channel(struct lp8788_charger *pchg)
@@ -733,9 +690,10 @@ static int lp8788_charger_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_charger *pchg;
+ struct device *dev = &pdev->dev;
int ret;
- pchg = devm_kzalloc(lp->dev, sizeof(struct lp8788_charger), GFP_KERNEL);
+ pchg = devm_kzalloc(dev, sizeof(struct lp8788_charger), GFP_KERNEL);
if (!pchg)
return -ENOMEM;
@@ -743,11 +701,11 @@ static int lp8788_charger_probe(struct platform_device *pdev)
pchg->pdata = lp->pdata ? lp->pdata->chg_pdata : NULL;
platform_set_drvdata(pdev, pchg);
- ret = lp8788_update_charger_params(pchg);
+ ret = lp8788_update_charger_params(pdev, pchg);
if (ret)
return ret;
- lp8788_setup_adc_channel(pchg);
+ lp8788_setup_adc_channel(&pdev->dev, pchg);
ret = lp8788_psy_register(pdev, pchg);
if (ret)
@@ -761,7 +719,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
ret = lp8788_irq_register(pdev, pchg);
if (ret)
- dev_warn(lp->dev, "failed to register charger irq: %d\n", ret);
+ dev_warn(dev, "failed to register charger irq: %d\n", ret);
return 0;
}
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 22cfe9cc4727..74a0bd9bc162 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -207,7 +207,7 @@ static int max17040_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -225,7 +225,6 @@ static int max17040_probe(struct i2c_client *client,
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
- kfree(chip);
return ret;
}
@@ -244,7 +243,6 @@ static int max17040_remove(struct i2c_client *client)
power_supply_unregister(&chip->battery);
cancel_delayed_work(&chip->work);
- kfree(chip);
return 0;
}
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 5ffe46916f0b..d664ef58afa7 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -572,7 +572,8 @@ static int max17042_init_chip(struct max17042_chip *chip)
__func__);
return -EIO;
}
- max17042_verify_model_lock(chip);
+
+ ret = max17042_verify_model_lock(chip);
if (ret) {
dev_err(&chip->client->dev, "%s lock verify failed\n",
__func__);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 1a075f1f1b67..665cdc76c265 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -426,6 +427,54 @@ static int max8925_deinit_charger(struct max8925_power_info *info)
return 0;
}
+#ifdef CONFIG_OF
+static struct max8925_power_pdata *
+max8925_power_dt_init(struct platform_device *pdev)
+{
+ struct device_node *nproot = pdev->dev.parent->of_node;
+ struct device_node *np;
+ int batt_detect;
+ int topoff_threshold;
+ int fast_charge;
+ int no_temp_support;
+ int no_insert_detect;
+ struct max8925_power_pdata *pdata;
+
+ if (!nproot)
+ return pdev->dev.platform_data;
+
+ np = of_find_node_by_name(nproot, "charger");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find charger node\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_power_pdata),
+ GFP_KERNEL);
+
+ of_property_read_u32(np, "topoff-threshold", &topoff_threshold);
+ of_property_read_u32(np, "batt-detect", &batt_detect);
+ of_property_read_u32(np, "fast-charge", &fast_charge);
+ of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
+ of_property_read_u32(np, "no-temp-support", &no_temp_support);
+
+ pdata->batt_detect = batt_detect;
+ pdata->fast_charge = fast_charge;
+ pdata->topoff_threshold = topoff_threshold;
+ pdata->no_insert_detect = no_insert_detect;
+ pdata->no_temp_support = no_temp_support;
+
+ return pdata;
+}
+#else
+static struct max8925_power_pdata *
+max8925_power_dt_init(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+#endif
+
static int max8925_power_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -433,7 +482,7 @@ static int max8925_power_probe(struct platform_device *pdev)
struct max8925_power_info *info;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = max8925_power_dt_init(pdev);
if (!pdata) {
dev_err(&pdev->dev, "platform data isn't assigned to "
"power supply\n");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 298c47d111b4..1ec810ada5ed 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -668,7 +668,7 @@ static int olpc_battery_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id olpc_battery_ids[] __devinitconst = {
+static const struct of_device_id olpc_battery_ids[] = {
{ .compatible = "olpc,xo1-battery" },
{}
};
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
new file mode 100644
index 000000000000..ed48d75bb786
--- /dev/null
+++ b/drivers/power/pm2301_charger.c
@@ -0,0 +1,1088 @@
+/*
+ * Copyright 2012 ST Ericsson.
+ *
+ * Power supply driver for ST Ericsson pm2xxx_charger charger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/pm2301_charger.h>
+#include <linux/gpio.h>
+
+#include "pm2301_charger.h"
+
+#define to_pm2xxx_charger_ac_device_info(x) container_of((x), \
+ struct pm2xxx_charger, ac_chg)
+
+static int pm2xxx_interrupt_registers[] = {
+ PM2XXX_REG_INT1,
+ PM2XXX_REG_INT2,
+ PM2XXX_REG_INT3,
+ PM2XXX_REG_INT4,
+ PM2XXX_REG_INT5,
+ PM2XXX_REG_INT6,
+};
+
+static enum power_supply_property pm2xxx_charger_ac_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+};
+
+static int pm2xxx_charger_voltage_map[] = {
+ 3500,
+ 3525,
+ 3550,
+ 3575,
+ 3600,
+ 3625,
+ 3650,
+ 3675,
+ 3700,
+ 3725,
+ 3750,
+ 3775,
+ 3800,
+ 3825,
+ 3850,
+ 3875,
+ 3900,
+ 3925,
+ 3950,
+ 3975,
+ 4000,
+ 4025,
+ 4050,
+ 4075,
+ 4100,
+ 4125,
+ 4150,
+ 4175,
+ 4200,
+ 4225,
+ 4250,
+ 4275,
+ 4300,
+};
+
+static int pm2xxx_charger_current_map[] = {
+ 200,
+ 200,
+ 400,
+ 600,
+ 800,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
+ 1800,
+ 2000,
+ 2200,
+ 2400,
+ 2600,
+ 2800,
+ 3000,
+};
+
+static const struct i2c_device_id pm2xxx_ident[] = {
+ { "pm2301", 0 },
+ { }
+};
+
+static void set_lpn_pin(struct pm2xxx_charger *pm2)
+{
+ if (pm2->ac.charger_connected)
+ return;
+ gpio_set_value(pm2->lpn_pin, 1);
+
+ return;
+}
+
+static void clear_lpn_pin(struct pm2xxx_charger *pm2)
+{
+ if (pm2->ac.charger_connected)
+ return;
+ gpio_set_value(pm2->lpn_pin, 0);
+
+ return;
+}
+
+static int pm2xxx_reg_read(struct pm2xxx_charger *pm2, int reg, u8 *val)
+{
+ int ret;
+ /*
+ * When AC adaptor is unplugged, the host
+ * must put LPN high to be able to
+ * communicate by I2C with PM2301
+ * and receive I2C "acknowledge" from PM2301.
+ */
+ mutex_lock(&pm2->lock);
+ set_lpn_pin(pm2);
+
+ ret = i2c_smbus_read_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
+ 1, val);
+ if (ret < 0)
+ dev_err(pm2->dev, "Error reading register at 0x%x\n", reg);
+ else
+ ret = 0;
+ clear_lpn_pin(pm2);
+ mutex_unlock(&pm2->lock);
+
+ return ret;
+}
+
+static int pm2xxx_reg_write(struct pm2xxx_charger *pm2, int reg, u8 val)
+{
+ int ret;
+ /*
+ * When AC adaptor is unplugged, the host
+ * must put LPN high to be able to
+ * communicate by I2C with PM2301
+ * and receive I2C "acknowledge" from PM2301.
+ */
+ mutex_lock(&pm2->lock);
+ set_lpn_pin(pm2);
+
+ ret = i2c_smbus_write_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
+ 1, &val);
+ if (ret < 0)
+ dev_err(pm2->dev, "Error writing register at 0x%x\n", reg);
+ else
+ ret = 0;
+ clear_lpn_pin(pm2);
+ mutex_unlock(&pm2->lock);
+
+ return ret;
+}
+
+static int pm2xxx_charging_enable_mngt(struct pm2xxx_charger *pm2)
+{
+ int ret;
+
+ /* Enable charging */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
+ (PM2XXX_CH_AUTO_RESUME_EN | PM2XXX_CHARGER_ENA));
+
+ return ret;
+}
+
+static int pm2xxx_charging_disable_mngt(struct pm2xxx_charger *pm2)
+{
+ int ret;
+
+ /* Disable charging */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
+ (PM2XXX_CH_AUTO_RESUME_DIS | PM2XXX_CHARGER_DIS));
+
+ return ret;
+}
+
+static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
+
+ return 0;
+}
+
+
+int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
+
+ return 0;
+}
+
+static int pm2xxx_charger_ovv_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ int ret = 0;
+
+ pm2->failure_input_ovv++;
+ if (pm2->failure_input_ovv < 4) {
+ ret = pm2xxx_charging_enable_mngt(pm2);
+ goto out;
+ } else {
+ pm2->failure_input_ovv = 0;
+ dev_err(pm2->dev, "Overvoltage detected\n");
+ pm2->flags.ovv = true;
+ power_supply_changed(&pm2->ac_chg.psy);
+ }
+
+out:
+ return ret;
+}
+
+static int pm2xxx_charger_wd_exp_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ dev_dbg(pm2->dev , "20 minutes watchdog occured\n");
+
+ pm2->ac.wd_expired = true;
+ power_supply_changed(&pm2->ac_chg.psy);
+
+ return 0;
+}
+
+static int pm2xxx_charger_vbat_lsig_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ switch (val) {
+ case PM2XXX_INT1_ITVBATLOWR:
+ dev_dbg(pm2->dev, "VBAT grows above VBAT_LOW level\n");
+ break;
+
+ case PM2XXX_INT1_ITVBATLOWF:
+ dev_dbg(pm2->dev, "VBAT drops below VBAT_LOW level\n");
+ break;
+
+ default:
+ dev_err(pm2->dev, "Unknown VBAT level\n");
+ }
+
+ return 0;
+}
+
+static int pm2xxx_charger_bat_disc_mngt(struct pm2xxx_charger *pm2, int val)
+{
+ dev_dbg(pm2->dev, "battery disconnected\n");
+
+ return 0;
+}
+
+static int pm2xxx_charger_detection(struct pm2xxx_charger *pm2, u8 *val)
+{
+ int ret;
+
+ ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT2, val);
+
+ if (ret < 0) {
+ dev_err(pm2->dev, "Charger detection failed\n");
+ goto out;
+ }
+
+ *val &= (PM2XXX_INT2_S_ITVPWR1PLUG | PM2XXX_INT2_S_ITVPWR2PLUG);
+
+out:
+ return ret;
+}
+
+static int pm2xxx_charger_itv_pwr_plug_mngt(struct pm2xxx_charger *pm2, int val)
+{
+
+ int ret;
+ u8 read_val;
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if the main charger is
+ * connected by reading the interrupt source register.
+ */
+ ret = pm2xxx_charger_detection(pm2, &read_val);
+
+ if ((ret == 0) && read_val) {
+ pm2->ac.charger_connected = 1;
+ pm2->ac_conn = true;
+ queue_work(pm2->charger_wq, &pm2->ac_work);
+ }
+
+
+ return ret;
+}
+
+static int pm2xxx_charger_itv_pwr_unplug_mngt(struct pm2xxx_charger *pm2,
+ int val)
+{
+ pm2->ac.charger_connected = 0;
+ queue_work(pm2->charger_wq, &pm2->ac_work);
+
+ return 0;
+}
+
+static int pm2_int_reg0(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+ if (val & (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF)) {
+ ret = pm2xxx_charger_vbat_lsig_mngt(pm2, val &
+ (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF));
+ }
+
+ if (val & PM2XXX_INT1_ITVBATDISCONNECT) {
+ ret = pm2xxx_charger_bat_disc_mngt(pm2,
+ PM2XXX_INT1_ITVBATDISCONNECT);
+ }
+
+ return ret;
+}
+
+static int pm2_int_reg1(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+ if (val & (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG)) {
+ dev_dbg(pm2->dev , "Main charger plugged\n");
+ ret = pm2xxx_charger_itv_pwr_plug_mngt(pm2, val &
+ (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG));
+ }
+
+ if (val &
+ (PM2XXX_INT2_ITVPWR1UNPLUG | PM2XXX_INT2_ITVPWR2UNPLUG)) {
+ dev_dbg(pm2->dev , "Main charger unplugged\n");
+ ret = pm2xxx_charger_itv_pwr_unplug_mngt(pm2, val &
+ (PM2XXX_INT2_ITVPWR1UNPLUG |
+ PM2XXX_INT2_ITVPWR2UNPLUG));
+ }
+
+ return ret;
+}
+
+static int pm2_int_reg2(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+ if (val & PM2XXX_INT3_ITAUTOTIMEOUTWD)
+ ret = pm2xxx_charger_wd_exp_mngt(pm2, val);
+
+ if (val & (PM2XXX_INT3_ITCHPRECHARGEWD |
+ PM2XXX_INT3_ITCHCCWD | PM2XXX_INT3_ITCHCVWD)) {
+ dev_dbg(pm2->dev,
+ "Watchdog occured for precharge, CC and CV charge\n");
+ }
+
+ return ret;
+}
+
+static int pm2_int_reg3(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+ if (val & (PM2XXX_INT4_ITCHARGINGON)) {
+ dev_dbg(pm2->dev ,
+ "chargind operation has started\n");
+ }
+
+ if (val & (PM2XXX_INT4_ITVRESUME)) {
+ dev_dbg(pm2->dev,
+ "battery discharged down to VResume threshold\n");
+ }
+
+ if (val & (PM2XXX_INT4_ITBATTFULL)) {
+ dev_dbg(pm2->dev , "battery fully detected\n");
+ }
+
+ if (val & (PM2XXX_INT4_ITCVPHASE)) {
+ dev_dbg(pm2->dev, "CV phase enter with 0.5C charging\n");
+ }
+
+ if (val & (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV)) {
+ pm2->failure_case = VPWR_OVV;
+ ret = pm2xxx_charger_ovv_mngt(pm2, val &
+ (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV));
+ dev_dbg(pm2->dev, "VPWR/VSYSTEM overvoltage detected\n");
+ }
+
+ if (val & (PM2XXX_INT4_S_ITBATTEMPCOLD |
+ PM2XXX_INT4_S_ITBATTEMPHOT)) {
+ ret = pm2xxx_charger_batt_therm_mngt(pm2, val &
+ (PM2XXX_INT4_S_ITBATTEMPCOLD |
+ PM2XXX_INT4_S_ITBATTEMPHOT));
+ dev_dbg(pm2->dev, "BTEMP is too Low/High\n");
+ }
+
+ return ret;
+}
+
+static int pm2_int_reg4(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+ if (val & PM2XXX_INT5_ITVSYSTEMOVV) {
+ pm2->failure_case = VSYSTEM_OVV;
+ ret = pm2xxx_charger_ovv_mngt(pm2, val &
+ PM2XXX_INT5_ITVSYSTEMOVV);
+ dev_dbg(pm2->dev, "VSYSTEM overvoltage detected\n");
+ }
+
+ if (val & (PM2XXX_INT5_ITTHERMALWARNINGFALL |
+ PM2XXX_INT5_ITTHERMALWARNINGRISE |
+ PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
+ PM2XXX_INT5_ITTHERMALSHUTDOWNRISE)) {
+ dev_dbg(pm2->dev, "BTEMP die temperature is too Low/High\n");
+ ret = pm2xxx_charger_die_therm_mngt(pm2, val &
+ (PM2XXX_INT5_ITTHERMALWARNINGFALL |
+ PM2XXX_INT5_ITTHERMALWARNINGRISE |
+ PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
+ PM2XXX_INT5_ITTHERMALSHUTDOWNRISE));
+ }
+
+ return ret;
+}
+
+static int pm2_int_reg5(void *pm2_data, int val)
+{
+ struct pm2xxx_charger *pm2 = pm2_data;
+ int ret = 0;
+
+
+ if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
+ dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
+ }
+
+ if (val & (PM2XXX_INT6_ITVPWR2VALIDRISE |
+ PM2XXX_INT6_ITVPWR1VALIDRISE |
+ PM2XXX_INT6_ITVPWR2VALIDFALL |
+ PM2XXX_INT6_ITVPWR1VALIDFALL)) {
+ dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
+ }
+
+ return ret;
+}
+
+static irqreturn_t pm2xxx_irq_int(int irq, void *data)
+{
+ struct pm2xxx_charger *pm2 = data;
+ struct pm2xxx_interrupts *interrupt = pm2->pm2_int;
+ int i;
+
+ for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
+ pm2xxx_reg_read(pm2,
+ pm2xxx_interrupt_registers[i],
+ &(interrupt->reg[i]));
+
+ if (interrupt->reg[i] > 0)
+ interrupt->handler[i](pm2, interrupt->reg[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pm2xxx_charger_get_ac_cv(struct pm2xxx_charger *pm2)
+{
+ int ret = 0;
+ u8 val;
+
+ if (pm2->ac.charger_connected && pm2->ac.charger_online) {
+
+ ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+ goto out;
+ }
+
+ if (val & PM2XXX_INT4_S_ITCVPHASE)
+ ret = PM2XXX_CONST_VOLT;
+ else
+ ret = PM2XXX_CONST_CURR;
+ }
+out:
+ return ret;
+}
+
+static int pm2xxx_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < pm2xxx_charger_current_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_current_map); i++) {
+ if (curr < pm2xxx_charger_current_map[i])
+ return (i - 1);
+ }
+
+ i = ARRAY_SIZE(pm2xxx_charger_current_map) - 1;
+ if (curr == pm2xxx_charger_current_map[i])
+ return i;
+ else
+ return -EINVAL;
+}
+
+static int pm2xxx_voltage_to_regval(int curr)
+{
+ int i;
+
+ if (curr < pm2xxx_charger_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_voltage_map); i++) {
+ if (curr < pm2xxx_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ i = ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1;
+ if (curr == pm2xxx_charger_voltage_map[i])
+ return i;
+ else
+ return -EINVAL;
+}
+
+static int pm2xxx_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret;
+ int curr_index;
+ struct pm2xxx_charger *pm2;
+ u8 val;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ pm2 = to_pm2xxx_charger_ac_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = pm2xxx_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(pm2->dev,
+ "Charger current too high, charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
+ if (ret >= 0) {
+ val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
+ val |= curr_index;
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
+ if (ret < 0) {
+ dev_err(pm2->dev,
+ "%s write failed\n", __func__);
+ }
+ }
+ else
+ dev_err(pm2->dev, "%s read failed\n", __func__);
+
+ return ret;
+}
+
+static int pm2xxx_charger_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct pm2xxx_charger *pm2;
+
+ pm2 = to_pm2xxx_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (pm2->flags.mainextchnotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (pm2->ac.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (pm2->flags.main_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = pm2->ac.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = pm2->ac.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ pm2->ac.cv_active = pm2xxx_charger_get_ac_cv(pm2);
+ val->intval = pm2->ac.cv_active;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int pm2xxx_charging_init(struct pm2xxx_charger *pm2)
+{
+ int ret = 0;
+
+ /* enable CC and CV watchdog */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG3,
+ (PM2XXX_CH_WD_CV_PHASE_60MIN | PM2XXX_CH_WD_CC_PHASE_60MIN));
+ if( ret < 0)
+ return ret;
+
+ /* enable precharge watchdog */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG4,
+ PM2XXX_CH_WD_PRECH_PHASE_60MIN);
+
+ /* Disable auto timeout */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG5,
+ PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN);
+
+ /*
+ * EOC current level = 100mA
+ * Precharge current level = 100mA
+ * CC current level = 1000mA
+ */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6,
+ (PM2XXX_DIR_CH_CC_CURRENT_1000MA |
+ PM2XXX_CH_PRECH_CURRENT_100MA |
+ PM2XXX_CH_EOC_CURRENT_100MA));
+
+ /*
+ * recharge threshold = 3.8V
+ * Precharge to CC threshold = 2.9V
+ */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG7,
+ (PM2XXX_CH_PRECH_VOL_2_9 | PM2XXX_CH_VRESUME_VOL_3_8));
+
+ /* float voltage charger level = 4.2V */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8,
+ PM2XXX_CH_VOLT_4_2);
+
+ /* Voltage drop between VBAT and VSYS in HW charging = 300mV */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG9,
+ (PM2XXX_CH_150MV_DROP_300MV | PM2XXX_CHARCHING_INFO_DIS |
+ PM2XXX_CH_CC_REDUCED_CURRENT_IDENT |
+ PM2XXX_CH_CC_MODEDROP_DIS));
+
+ /* Input charger level of over voltage = 10V */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR2,
+ PM2XXX_VPWR2_OVV_10);
+ ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR1,
+ PM2XXX_VPWR1_OVV_10);
+
+ /* Input charger drop */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR2,
+ (PM2XXX_VPWR2_HW_OPT_DIS | PM2XXX_VPWR2_VALID_DIS |
+ PM2XXX_VPWR2_DROP_DIS));
+ ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR1,
+ (PM2XXX_VPWR1_HW_OPT_DIS | PM2XXX_VPWR1_VALID_DIS |
+ PM2XXX_VPWR1_DROP_DIS));
+
+ /* Disable battery low monitoring */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_LOW_LEV_COMP_REG,
+ PM2XXX_VBAT_LOW_MONITORING_ENA);
+
+ /* Disable LED */
+ ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG,
+ PM2XXX_LED_SELECT_DIS);
+
+ return ret;
+}
+
+static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
+ int enable, int vset, int iset)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ u8 val;
+
+ struct pm2xxx_charger *pm2 = to_pm2xxx_charger_ac_device_info(charger);
+
+ if (enable) {
+ if (!pm2->ac.charger_connected) {
+ dev_dbg(pm2->dev, "AC charger not connected\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+ if (!pm2->vddadc_en_ac) {
+ regulator_enable(pm2->regu);
+ pm2->vddadc_en_ac = true;
+ }
+
+ ret = pm2xxx_charging_init(pm2);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s charging init failed\n",
+ __func__);
+ goto error_occured;
+ }
+
+ volt_index = pm2xxx_voltage_to_regval(vset);
+ curr_index = pm2xxx_current_to_regval(iset);
+
+ if (volt_index < 0 || curr_index < 0) {
+ dev_err(pm2->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG8, &val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+ goto error_occured;
+ }
+ val &= ~PM2XXX_CH_VOLT_MASK;
+ val |= volt_index;
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8, val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+ goto error_occured;
+ }
+
+ ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+ goto error_occured;
+ }
+ val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
+ val |= curr_index;
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+ goto error_occured;
+ }
+
+ if (!pm2->bat->enable_overshoot) {
+ ret = pm2xxx_reg_read(pm2, PM2XXX_LED_CTRL_REG, &val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx read failed\n",
+ __func__);
+ goto error_occured;
+ }
+ val |= PM2XXX_ANTI_OVERSHOOT_EN;
+ ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG, val);
+ if (ret < 0) {
+ dev_err(pm2->dev, "%s pm2xxx write failed\n",
+ __func__);
+ goto error_occured;
+ }
+ }
+
+ ret = pm2xxx_charging_enable_mngt(pm2);
+ if (ret < 0) {
+ dev_err(pm2->dev, "Failed to enable"
+ "pm2xxx ac charger\n");
+ goto error_occured;
+ }
+
+ pm2->ac.charger_online = 1;
+ } else {
+ pm2->ac.charger_online = 0;
+ pm2->ac.wd_expired = false;
+
+ /* Disable regulator if enabled */
+ if (pm2->vddadc_en_ac) {
+ regulator_disable(pm2->regu);
+ pm2->vddadc_en_ac = false;
+ }
+
+ ret = pm2xxx_charging_disable_mngt(pm2);
+ if (ret < 0) {
+ dev_err(pm2->dev, "failed to disable"
+ "pm2xxx ac charger\n");
+ goto error_occured;
+ }
+
+ dev_dbg(pm2->dev, "PM2301: " "Disabled AC charging\n");
+ }
+ power_supply_changed(&pm2->ac_chg.psy);
+
+error_occured:
+ return ret;
+}
+
+static int pm2xxx_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct pm2xxx_charger *pm2;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ pm2 = to_pm2xxx_charger_ac_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_WD_KICK, WD_TIMER);
+ if (ret)
+ dev_err(pm2->dev, "Failed to kick WD!\n");
+
+ return ret;
+}
+
+static void pm2xxx_charger_ac_work(struct work_struct *work)
+{
+ struct pm2xxx_charger *pm2 = container_of(work,
+ struct pm2xxx_charger, ac_work);
+
+
+ power_supply_changed(&pm2->ac_chg.psy);
+ sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
+};
+
+static void pm2xxx_charger_check_main_thermal_prot_work(
+ struct work_struct *work)
+{
+};
+
+static struct pm2xxx_interrupts pm2xxx_int = {
+ .handler[0] = pm2_int_reg0,
+ .handler[1] = pm2_int_reg1,
+ .handler[2] = pm2_int_reg2,
+ .handler[3] = pm2_int_reg3,
+ .handler[4] = pm2_int_reg4,
+ .handler[5] = pm2_int_reg5,
+};
+
+static struct pm2xxx_irq pm2xxx_charger_irq[] = {
+ {"PM2XXX_IRQ_INT", pm2xxx_irq_int},
+};
+
+static int pm2xxx_wall_charger_resume(struct i2c_client *i2c_client)
+{
+ return 0;
+}
+
+static int pm2xxx_wall_charger_suspend(struct i2c_client *i2c_client,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int __devinit pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct pm2xxx_platform_data *pl_data = i2c_client->dev.platform_data;
+ struct pm2xxx_charger *pm2;
+ int ret = 0;
+ u8 val;
+
+ pm2 = kzalloc(sizeof(struct pm2xxx_charger), GFP_KERNEL);
+ if (!pm2) {
+ dev_err(pm2->dev, "pm2xxx_charger allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* get parent data */
+ pm2->dev = &i2c_client->dev;
+ pm2->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ pm2->pm2_int = &pm2xxx_int;
+
+ /* get charger spcific platform data */
+ if (!pl_data->wall_charger) {
+ dev_err(pm2->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ pm2->pdata = pl_data->wall_charger;
+
+ /* get battery specific platform data */
+ if (!pl_data->battery) {
+ dev_err(pm2->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ pm2->bat = pl_data->battery;
+
+ /*get lpn GPIO from platform data*/
+ if (!pm2->pdata->lpn_gpio) {
+ dev_err(pm2->dev, "no lpn gpio data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ pm2->lpn_pin = pm2->pdata->lpn_gpio;
+
+ if (!i2c_check_functionality(i2c_client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ ret = -ENODEV;
+ dev_info(pm2->dev, "pm2301 i2c_check_functionality failed\n");
+ goto free_device_info;
+ }
+
+ pm2->config.pm2xxx_i2c = i2c_client;
+ pm2->config.pm2xxx_id = (struct i2c_device_id *) id;
+ i2c_set_clientdata(i2c_client, pm2);
+
+ /* AC supply */
+ /* power_supply base class */
+ pm2->ac_chg.psy.name = pm2->pdata->label;
+ pm2->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+ pm2->ac_chg.psy.properties = pm2xxx_charger_ac_props;
+ pm2->ac_chg.psy.num_properties = ARRAY_SIZE(pm2xxx_charger_ac_props);
+ pm2->ac_chg.psy.get_property = pm2xxx_charger_ac_get_property;
+ pm2->ac_chg.psy.supplied_to = pm2->pdata->supplied_to;
+ pm2->ac_chg.psy.num_supplicants = pm2->pdata->num_supplicants;
+ /* pm2xxx_charger sub-class */
+ pm2->ac_chg.ops.enable = &pm2xxx_charger_ac_en;
+ pm2->ac_chg.ops.kick_wd = &pm2xxx_charger_watchdog_kick;
+ pm2->ac_chg.ops.update_curr = &pm2xxx_charger_update_charger_current;
+ pm2->ac_chg.max_out_volt = pm2xxx_charger_voltage_map[
+ ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1];
+ pm2->ac_chg.max_out_curr = pm2xxx_charger_current_map[
+ ARRAY_SIZE(pm2xxx_charger_current_map) - 1];
+ pm2->ac_chg.wdt_refresh = WD_KICK_INTERVAL;
+ pm2->ac_chg.enabled = true;
+ pm2->ac_chg.external = true;
+
+ /* Create a work queue for the charger */
+ pm2->charger_wq =
+ create_singlethread_workqueue("pm2xxx_charger_wq");
+ if (pm2->charger_wq == NULL) {
+ dev_err(pm2->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for charger detection */
+ INIT_WORK(&pm2->ac_work, pm2xxx_charger_ac_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&pm2->check_main_thermal_prot_work,
+ pm2xxx_charger_check_main_thermal_prot_work);
+
+ /*
+ * VDD ADC supply needs to be enabled from this driver when there
+ * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+ * interrupts during charging
+ */
+ pm2->regu = regulator_get(pm2->dev, "vddadc");
+ if (IS_ERR(pm2->regu)) {
+ ret = PTR_ERR(pm2->regu);
+ dev_err(pm2->dev, "failed to get vddadc regulator\n");
+ goto free_charger_wq;
+ }
+
+ /* Register AC charger class */
+ ret = power_supply_register(pm2->dev, &pm2->ac_chg.psy);
+ if (ret) {
+ dev_err(pm2->dev, "failed to register AC charger\n");
+ goto free_regulator;
+ }
+
+ /* Register interrupts */
+ ret = request_threaded_irq(pm2->pdata->irq_number, NULL,
+ pm2xxx_charger_irq[0].isr,
+ pm2->pdata->irq_type,
+ pm2xxx_charger_irq[0].name, pm2);
+
+ if (ret != 0) {
+ dev_err(pm2->dev, "failed to request %s IRQ %d: %d\n",
+ pm2xxx_charger_irq[0].name, pm2->pdata->irq_number, ret);
+ goto unregister_pm2xxx_charger;
+ }
+
+ /*Initialize lock*/
+ mutex_init(&pm2->lock);
+
+ /*
+ * Charger detection mechanism requires pulling up the LPN pin
+ * while i2c communication if Charger is not connected
+ * LPN pin of PM2301 is GPIO60 of AB9540
+ */
+ ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
+ if (ret < 0) {
+ dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
+ goto unregister_pm2xxx_charger;
+ }
+ ret = gpio_direction_output(pm2->lpn_pin, 0);
+ if (ret < 0) {
+ dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
+ goto free_gpio;
+ }
+
+ ret = pm2xxx_charger_detection(pm2, &val);
+
+ if ((ret == 0) && val) {
+ pm2->ac.charger_connected = 1;
+ pm2->ac_conn = true;
+ power_supply_changed(&pm2->ac_chg.psy);
+ sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
+ }
+
+ return 0;
+
+free_gpio:
+ gpio_free(pm2->lpn_pin);
+unregister_pm2xxx_charger:
+ /* unregister power supply */
+ power_supply_unregister(&pm2->ac_chg.psy);
+free_regulator:
+ /* disable the regulator */
+ regulator_put(pm2->regu);
+free_charger_wq:
+ destroy_workqueue(pm2->charger_wq);
+free_device_info:
+ kfree(pm2);
+ return ret;
+}
+
+static int __devexit pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
+{
+ struct pm2xxx_charger *pm2 = i2c_get_clientdata(i2c_client);
+
+ /* Disable AC charging */
+ pm2xxx_charger_ac_en(&pm2->ac_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ free_irq(pm2->pdata->irq_number, pm2);
+
+ /* Delete the work queue */
+ destroy_workqueue(pm2->charger_wq);
+
+ flush_scheduled_work();
+
+ /* disable the regulator */
+ regulator_put(pm2->regu);
+
+ power_supply_unregister(&pm2->ac_chg.psy);
+
+ /*Free GPIO60*/
+ gpio_free(pm2->lpn_pin);
+
+ kfree(pm2);
+
+ return 0;
+}
+
+static const struct i2c_device_id pm2xxx_id[] = {
+ { "pm2301", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, pm2xxx_id);
+
+static struct i2c_driver pm2xxx_charger_driver = {
+ .probe = pm2xxx_wall_charger_probe,
+ .remove = __devexit_p(pm2xxx_wall_charger_remove),
+ .suspend = pm2xxx_wall_charger_suspend,
+ .resume = pm2xxx_wall_charger_resume,
+ .driver = {
+ .name = "pm2xxx-wall_charger",
+ .owner = THIS_MODULE,
+ },
+ .id_table = pm2xxx_id,
+};
+
+static int __init pm2xxx_charger_init(void)
+{
+ return i2c_add_driver(&pm2xxx_charger_driver);
+}
+
+static void __exit pm2xxx_charger_exit(void)
+{
+ i2c_del_driver(&pm2xxx_charger_driver);
+}
+
+subsys_initcall_sync(pm2xxx_charger_init);
+module_exit(pm2xxx_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
+MODULE_ALIAS("platform:pm2xxx-charger");
+MODULE_DESCRIPTION("PM2xxx charger management driver");
+
diff --git a/drivers/power/pm2301_charger.h b/drivers/power/pm2301_charger.h
new file mode 100644
index 000000000000..e6319cdbc94f
--- /dev/null
+++ b/drivers/power/pm2301_charger.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * PM2301 power supply interface
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef PM2301_CHARGER_H
+#define PM2301_CHARGER_H
+
+#define MAIN_WDOG_ENA 0x01
+#define MAIN_WDOG_KICK 0x02
+#define MAIN_WDOG_DIS 0x00
+#define CHARG_WD_KICK 0x01
+#define MAIN_CH_ENA 0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
+#define MAIN_CH_DET 0x01
+#define MAIN_CH_CV_ON 0x04
+#define OTP_ENABLE_WD 0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT 4
+
+#define LED_INDICATOR_PWM_ENA 0x01
+#define LED_INDICATOR_PWM_DIS 0x00
+#define LED_IND_CUR_5MA 0x04
+#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT 0x02
+#define MAIN_CH_NOK 0x01
+
+/* Watchdog timeout constant */
+#define WD_TIMER 0x30 /* 4min */
+#define WD_KICK_INTERVAL (30 * HZ)
+
+#define PM2XXX_NUM_INT_REG 0x6
+
+/* Constant voltage/current */
+#define PM2XXX_CONST_CURR 0x0
+#define PM2XXX_CONST_VOLT 0x1
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG 0x4E
+
+#define PM2XXX_BATT_CTRL_REG1 0x00
+#define PM2XXX_BATT_CTRL_REG2 0x01
+#define PM2XXX_BATT_CTRL_REG3 0x02
+#define PM2XXX_BATT_CTRL_REG4 0x03
+#define PM2XXX_BATT_CTRL_REG5 0x04
+#define PM2XXX_BATT_CTRL_REG6 0x05
+#define PM2XXX_BATT_CTRL_REG7 0x06
+#define PM2XXX_BATT_CTRL_REG8 0x07
+#define PM2XXX_NTC_CTRL_REG1 0x08
+#define PM2XXX_NTC_CTRL_REG2 0x09
+#define PM2XXX_BATT_CTRL_REG9 0x0A
+#define PM2XXX_BATT_STAT_REG1 0x0B
+#define PM2XXX_INP_VOLT_VPWR2 0x11
+#define PM2XXX_INP_DROP_VPWR2 0x13
+#define PM2XXX_INP_VOLT_VPWR1 0x15
+#define PM2XXX_INP_DROP_VPWR1 0x17
+#define PM2XXX_INP_MODE_VPWR 0x18
+#define PM2XXX_BATT_WD_KICK 0x70
+#define PM2XXX_DEV_VER_STAT 0x0C
+#define PM2XXX_THERM_WARN_CTRL_REG 0x20
+#define PM2XXX_BATT_DISC_REG 0x21
+#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
+#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
+#define PM2XXX_I2C_PAD_CTRL_REG 0x24
+#define PM2XXX_SW_CTRL_REG 0x26
+#define PM2XXX_LED_CTRL_REG 0x28
+
+#define PM2XXX_REG_INT1 0x40
+#define PM2XXX_MASK_REG_INT1 0x50
+#define PM2XXX_SRCE_REG_INT1 0x60
+#define PM2XXX_REG_INT2 0x41
+#define PM2XXX_MASK_REG_INT2 0x51
+#define PM2XXX_SRCE_REG_INT2 0x61
+#define PM2XXX_REG_INT3 0x42
+#define PM2XXX_MASK_REG_INT3 0x52
+#define PM2XXX_SRCE_REG_INT3 0x62
+#define PM2XXX_REG_INT4 0x43
+#define PM2XXX_MASK_REG_INT4 0x53
+#define PM2XXX_SRCE_REG_INT4 0x63
+#define PM2XXX_REG_INT5 0x44
+#define PM2XXX_MASK_REG_INT5 0x54
+#define PM2XXX_SRCE_REG_INT5 0x64
+#define PM2XXX_REG_INT6 0x45
+#define PM2XXX_MASK_REG_INT6 0x55
+#define PM2XXX_SRCE_REG_INT6 0x65
+
+#define VPWR_OVV 0x0
+#define VSYSTEM_OVV 0x1
+
+/* control Reg 1 */
+#define PM2XXX_CH_RESUME_EN 0x1
+#define PM2XXX_CH_RESUME_DIS 0x0
+
+/* control Reg 2 */
+#define PM2XXX_CH_AUTO_RESUME_EN 0X2
+#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
+#define PM2XXX_CHARGER_ENA 0x4
+#define PM2XXX_CHARGER_DIS 0x0
+
+/* control Reg 3 */
+#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
+#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
+#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
+#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
+#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
+#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
+#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
+#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
+
+#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
+#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
+#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
+#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
+#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
+#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
+#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
+#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
+
+/* control Reg 4 */
+#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
+#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
+#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
+#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
+#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
+#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
+#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
+#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
+
+/* control Reg 5 */
+#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
+#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
+
+/* control Reg 6 */
+#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
+#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
+#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
+#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
+#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
+#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
+#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
+#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
+#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
+#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
+#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
+#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
+#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
+#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
+#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
+#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
+
+#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
+#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
+#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
+#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
+#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
+
+#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
+#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
+#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
+#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
+#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
+
+/* control Reg 7 */
+#define PM2XXX_CH_PRECH_VOL_2_5 0x0
+#define PM2XXX_CH_PRECH_VOL_2_7 0x1
+#define PM2XXX_CH_PRECH_VOL_2_9 0x2
+#define PM2XXX_CH_PRECH_VOL_3_1 0x3
+
+#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
+
+/* control Reg 8 */
+#define PM2XXX_CH_VOLT_MASK 0x3F
+#define PM2XXX_CH_VOLT_3_5 0x0
+#define PM2XXX_CH_VOLT_3_5225 0x1
+#define PM2XXX_CH_VOLT_3_6 0x4
+#define PM2XXX_CH_VOLT_3_7 0x8
+#define PM2XXX_CH_VOLT_4_0 0x14
+#define PM2XXX_CH_VOLT_4_175 0x1B
+#define PM2XXX_CH_VOLT_4_2 0x1C
+#define PM2XXX_CH_VOLT_4_275 0x1F
+#define PM2XXX_CH_VOLT_4_3 0x20
+
+/*NTC control register 1*/
+#define PM2XXX_BTEMP_HIGH_TH_45 0x0
+#define PM2XXX_BTEMP_HIGH_TH_50 0x1
+#define PM2XXX_BTEMP_HIGH_TH_55 0x2
+#define PM2XXX_BTEMP_HIGH_TH_60 0x3
+#define PM2XXX_BTEMP_HIGH_TH_65 0x4
+
+#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
+#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
+#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
+#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
+
+/*NTC control register 2*/
+#define PM2XXX_NTC_BETA_COEFF_3477 0x0
+#define PM2XXX_NTC_BETA_COEFF_3964 0x1
+
+#define PM2XXX_NTC_RES_10K (0x0<<2)
+#define PM2XXX_NTC_RES_47K (0x1<<2)
+#define PM2XXX_NTC_RES_100K (0x2<<2)
+#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
+
+/* control Reg 9 */
+#define PM2XXX_CH_CC_MODEDROP_EN 1
+#define PM2XXX_CH_CC_MODEDROP_DIS 0
+
+#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
+
+#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
+#define PM2XXX_CHARCHING_INFO_EN (1<<3)
+
+#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
+#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
+
+
+/* charger status register */
+#define PM2XXX_CHG_STATUS_OFF 0x0
+#define PM2XXX_CHG_STATUS_ON 0x1
+#define PM2XXX_CHG_STATUS_FULL 0x2
+#define PM2XXX_CHG_STATUS_ERR 0x3
+#define PM2XXX_CHG_STATUS_WAIT 0x4
+#define PM2XXX_CHG_STATUS_NOBAT 0x5
+
+/* Input charger voltage VPWR2 */
+#define PM2XXX_VPWR2_OVV_6_0 0x0
+#define PM2XXX_VPWR2_OVV_6_3 0x1
+#define PM2XXX_VPWR2_OVV_10 0x2
+#define PM2XXX_VPWR2_OVV_NONE 0x3
+
+/* Input charger drop VPWR2 */
+#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
+#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
+
+#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
+#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
+
+#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
+#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
+
+/* Input charger voltage VPWR1 */
+#define PM2XXX_VPWR1_OVV_6_0 0x0
+#define PM2XXX_VPWR1_OVV_6_3 0x1
+#define PM2XXX_VPWR1_OVV_10 0x2
+#define PM2XXX_VPWR1_OVV_NONE 0x3
+
+/* Input charger drop VPWR1 */
+#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
+#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
+
+#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
+#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
+
+#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
+#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
+
+/* Battery low level comparator control register */
+#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
+#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
+
+/* Battery low level value control register */
+#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
+#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
+#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
+#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
+#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
+#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
+#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
+#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
+#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
+#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
+#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
+#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
+#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
+#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
+#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
+#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
+#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
+#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
+#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
+#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
+
+/* SW CTRL */
+#define PM2XXX_SWCTRL_HW 0x0
+#define PM2XXX_SWCTRL_SW 0x1
+
+
+/* LED Driver Control */
+#define PM2XXX_LED_CURRENT_MASK 0x0C
+#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
+#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
+#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
+#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
+
+#define PM2XXX_LED_SELECT_MASK 0x02
+#define PM2XXX_LED_SELECT_EN (0X0<<1)
+#define PM2XXX_LED_SELECT_DIS (0X1<<1)
+
+#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
+#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
+#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
+
+enum pm2xxx_reg_int1 {
+ PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
+ PM2XXX_INT1_ITVBATLOWR = 0x04,
+ PM2XXX_INT1_ITVBATLOWF = 0x08,
+};
+
+enum pm2xxx_mask_reg_int1 {
+ PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
+ PM2XXX_INT1_M_ITVBATLOWR = 0x04,
+ PM2XXX_INT1_M_ITVBATLOWF = 0x08,
+};
+
+enum pm2xxx_source_reg_int1 {
+ PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
+ PM2XXX_INT1_S_ITVBATLOWR = 0x04,
+ PM2XXX_INT1_S_ITVBATLOWF = 0x08,
+};
+
+enum pm2xxx_reg_int2 {
+ PM2XXX_INT2_ITVPWR2PLUG = 0x01,
+ PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
+ PM2XXX_INT2_ITVPWR1PLUG = 0x04,
+ PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
+};
+
+enum pm2xxx_mask_reg_int2 {
+ PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
+ PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
+ PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
+ PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
+};
+
+enum pm2xxx_source_reg_int2 {
+ PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
+ PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
+};
+
+enum pm2xxx_reg_int3 {
+ PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
+ PM2XXX_INT3_ITCHCCWD = 0x02,
+ PM2XXX_INT3_ITCHCVWD = 0x04,
+ PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
+};
+
+enum pm2xxx_mask_reg_int3 {
+ PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
+ PM2XXX_INT3_M_ITCHCCWD = 0x02,
+ PM2XXX_INT3_M_ITCHCVWD = 0x04,
+ PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
+};
+
+enum pm2xxx_source_reg_int3 {
+ PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
+ PM2XXX_INT3_S_ITCHCCWD = 0x02,
+ PM2XXX_INT3_S_ITCHCVWD = 0x04,
+ PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
+};
+
+enum pm2xxx_reg_int4 {
+ PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
+ PM2XXX_INT4_ITBATTEMPHOT = 0x02,
+ PM2XXX_INT4_ITVPWR2OVV = 0x04,
+ PM2XXX_INT4_ITVPWR1OVV = 0x08,
+ PM2XXX_INT4_ITCHARGINGON = 0x10,
+ PM2XXX_INT4_ITVRESUME = 0x20,
+ PM2XXX_INT4_ITBATTFULL = 0x40,
+ PM2XXX_INT4_ITCVPHASE = 0x80,
+};
+
+enum pm2xxx_mask_reg_int4 {
+ PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
+ PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
+ PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
+ PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
+ PM2XXX_INT4_M_ITCHARGINGON = 0x10,
+ PM2XXX_INT4_M_ITVRESUME = 0x20,
+ PM2XXX_INT4_M_ITBATTFULL = 0x40,
+ PM2XXX_INT4_M_ITCVPHASE = 0x80,
+};
+
+enum pm2xxx_source_reg_int4 {
+ PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
+ PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
+ PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
+ PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
+ PM2XXX_INT4_S_ITCHARGINGON = 0x10,
+ PM2XXX_INT4_S_ITVRESUME = 0x20,
+ PM2XXX_INT4_S_ITBATTFULL = 0x40,
+ PM2XXX_INT4_S_ITCVPHASE = 0x80,
+};
+
+enum pm2xxx_reg_int5 {
+ PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
+ PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
+ PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
+ PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
+ PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
+};
+
+enum pm2xxx_mask_reg_int5 {
+ PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
+ PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
+ PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
+ PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
+ PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
+};
+
+enum pm2xxx_source_reg_int5 {
+ PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
+ PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
+ PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
+ PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
+ PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
+};
+
+enum pm2xxx_reg_int6 {
+ PM2XXX_INT6_ITVPWR2DROP = 0x01,
+ PM2XXX_INT6_ITVPWR1DROP = 0x02,
+ PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
+ PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
+ PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
+ PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
+};
+
+enum pm2xxx_mask_reg_int6 {
+ PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
+ PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
+ PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
+ PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
+ PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
+ PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
+};
+
+enum pm2xxx_source_reg_int6 {
+ PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
+ PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
+ PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
+ PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
+ PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
+ PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
+};
+
+struct pm2xxx_charger_info {
+ int charger_connected;
+ int charger_online;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct pm2xxx_charger_event_flags {
+ bool mainextchnotok;
+ bool main_thermal_prot;
+ bool ovv;
+ bool chgwdexp;
+};
+
+struct pm2xxx_interrupts {
+ u8 reg[PM2XXX_NUM_INT_REG];
+ int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
+};
+
+struct pm2xxx_config {
+ struct i2c_client *pm2xxx_i2c;
+ struct i2c_device_id *pm2xxx_id;
+};
+
+struct pm2xxx_irq {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct pm2xxx_charger {
+ struct device *dev;
+ u8 chip_id;
+ bool vddadc_en_ac;
+ struct pm2xxx_config config;
+ bool ac_conn;
+ unsigned int gpio_irq;
+ int vbat;
+ int old_vbat;
+ int failure_case;
+ int failure_input_ovv;
+ unsigned int lpn_pin;
+ struct pm2xxx_interrupts *pm2_int;
+ struct ab8500_gpadc *gpadc;
+ struct regulator *regu;
+ struct pm2xxx_bm_data *bat;
+ struct mutex lock;
+ struct ab8500 *parent;
+ struct pm2xxx_charger_info ac;
+ struct pm2xxx_charger_platform_data *pdata;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_vbat_work;
+ struct work_struct ac_work;
+ struct work_struct check_main_thermal_prot_work;
+ struct ux500_charger ac_chg;
+ struct pm2xxx_charger_event_flags flags;
+};
+
+#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index f77a41272e5d..5deac432e2ae 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -141,7 +141,7 @@ int power_supply_set_battery_charged(struct power_supply *psy)
}
EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
-static int power_supply_match_device_by_name(struct device *dev, void *data)
+static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
struct power_supply *psy = dev_get_drvdata(dev);
@@ -149,7 +149,7 @@ static int power_supply_match_device_by_name(struct device *dev, void *data)
return strcmp(psy->name, name) == 0;
}
-struct power_supply *power_supply_get_by_name(char *name)
+struct power_supply *power_supply_get_by_name(const char *name)
{
struct device *dev = class_find_device(power_supply_class, NULL, name,
power_supply_match_device_by_name);
@@ -216,6 +216,86 @@ static void psy_unregister_thermal(struct power_supply *psy)
return;
thermal_zone_device_unregister(psy->tzd);
}
+
+/* thermal cooling device callbacks */
+static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long *state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ ret = psy->get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (!ret)
+ *state = val.intval;
+
+ return ret;
+}
+
+static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long *state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ ret = psy->get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (!ret)
+ *state = val.intval;
+
+ return ret;
+}
+
+static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ val.intval = state;
+ ret = psy->set_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+
+ return ret;
+}
+
+static struct thermal_cooling_device_ops psy_tcd_ops = {
+ .get_max_state = ps_get_max_charge_cntl_limit,
+ .get_cur_state = ps_get_cur_chrage_cntl_limit,
+ .set_cur_state = ps_set_cur_charge_cntl_limit,
+};
+
+static int psy_register_cooler(struct power_supply *psy)
+{
+ int i;
+
+ /* Register for cooling device if psy can control charging */
+ for (i = 0; i < psy->num_properties; i++) {
+ if (psy->properties[i] ==
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
+ psy->tcd = thermal_cooling_device_register(
+ (char *)psy->name,
+ psy, &psy_tcd_ops);
+ if (IS_ERR(psy->tcd))
+ return PTR_ERR(psy->tcd);
+ break;
+ }
+ }
+ return 0;
+}
+
+static void psy_unregister_cooler(struct power_supply *psy)
+{
+ if (IS_ERR_OR_NULL(psy->tcd))
+ return;
+ thermal_cooling_device_unregister(psy->tcd);
+}
#else
static int psy_register_thermal(struct power_supply *psy)
{
@@ -225,6 +305,15 @@ static int psy_register_thermal(struct power_supply *psy)
static void psy_unregister_thermal(struct power_supply *psy)
{
}
+
+static int psy_register_cooler(struct power_supply *psy)
+{
+ return 0;
+}
+
+static void psy_unregister_cooler(struct power_supply *psy)
+{
+}
#endif
int power_supply_register(struct device *parent, struct power_supply *psy)
@@ -259,6 +348,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto register_thermal_failed;
+ rc = psy_register_cooler(psy);
+ if (rc)
+ goto register_cooler_failed;
+
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
@@ -268,6 +361,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
+ psy_unregister_cooler(psy);
+register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
device_del(dev);
@@ -284,6 +379,7 @@ void power_supply_unregister(struct power_supply *psy)
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
+ psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_unregister(psy->dev);
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 395c2cfa16c0..29178f78d73c 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -55,7 +55,8 @@ static ssize_t power_supply_show_property(struct device *dev,
};
static char *health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
- "Unspecified failure", "Cold",
+ "Unspecified failure", "Cold", "Watchdog timer expire",
+ "Safety timer expire"
};
static char *technology_text[] = {
"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
@@ -164,6 +165,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_current_max),
POWER_SUPPLY_ATTR(constant_charge_voltage),
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
+ POWER_SUPPLY_ATTR(charge_control_limit),
+ POWER_SUPPLY_ATTR(charge_control_limit_max),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
POWER_SUPPLY_ATTR(energy_full),
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 6461b489fb09..1ae65b822864 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -13,3 +13,20 @@ config POWER_RESET_GPIO
This driver supports turning off your board via a GPIO line.
If your board needs a GPIO high/low to power down, say Y and
create a binding in your devicetree.
+
+config POWER_RESET_QNAP
+ bool "QNAP power-off driver"
+ depends on OF_GPIO && POWER_RESET && PLAT_ORION
+ help
+ This driver supports turning off QNAP NAS devices by sending
+ commands to the microcontroller which controls the main power.
+
+ Say Y if you have a QNAP NAS.
+
+config POWER_RESET_RESTART
+ bool "Restart power-off driver"
+ depends on ARM
+ help
+ Some boards don't actually have the ability to power off.
+ Instead they restart, and u-boot holds the SoC until the
+ user presses a key. u-boot then boots into Linux.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 751488a4a0c5..0f317f50c56f 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
+obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o \ No newline at end of file
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 0491e5335d02..e290d48ddd99 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -29,15 +29,16 @@ static int gpio_active_low;
static void gpio_poweroff_do_poweroff(void)
{
- BUG_ON(gpio_num == -1);
+ BUG_ON(!gpio_is_valid(gpio_num));
- /* drive it active */
+ /* drive it active, also inactive->active edge */
gpio_direction_output(gpio_num, !gpio_active_low);
mdelay(100);
- /* rising edge or drive inactive */
+ /* drive inactive, also active->inactive edge */
gpio_set_value(gpio_num, gpio_active_low);
mdelay(100);
- /* falling edge */
+
+ /* drive it active, also inactive->active edge */
gpio_set_value(gpio_num, !gpio_active_low);
/* give it some time */
@@ -46,7 +47,7 @@ static void gpio_poweroff_do_poweroff(void)
WARN_ON(1);
}
-static int __devinit gpio_poweroff_probe(struct platform_device *pdev)
+static int gpio_poweroff_probe(struct platform_device *pdev)
{
enum of_gpio_flags flags;
bool input = false;
@@ -60,15 +61,12 @@ static int __devinit gpio_poweroff_probe(struct platform_device *pdev)
}
gpio_num = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
- if (gpio_num < 0) {
- pr_err("%s: Could not get GPIO configuration: %d",
- __func__, gpio_num);
- return -ENODEV;
- }
+ if (!gpio_is_valid(gpio_num))
+ return gpio_num;
+
gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
- if (of_get_property(pdev->dev.of_node, "input", NULL))
- input = true;
+ input = of_property_read_bool(pdev->dev.of_node, "input");
ret = gpio_request(gpio_num, "poweroff-gpio");
if (ret) {
@@ -96,10 +94,9 @@ err:
return -ENODEV;
}
-static int __devexit gpio_poweroff_remove(struct platform_device *pdev)
+static int gpio_poweroff_remove(struct platform_device *pdev)
{
- if (gpio_num != -1)
- gpio_free(gpio_num);
+ gpio_free(gpio_num);
if (pm_power_off == &gpio_poweroff_do_poweroff)
pm_power_off = NULL;
@@ -113,17 +110,17 @@ static const struct of_device_id of_gpio_poweroff_match[] = {
static struct platform_driver gpio_poweroff_driver = {
.probe = gpio_poweroff_probe,
- .remove = __devexit_p(gpio_poweroff_remove),
+ .remove = gpio_poweroff_remove,
.driver = {
- .name = "poweroff-gpio",
- .owner = THIS_MODULE,
- .of_match_table = of_gpio_poweroff_match,
- },
+ .name = "poweroff-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_gpio_poweroff_match,
+ },
};
module_platform_driver(gpio_poweroff_driver);
MODULE_AUTHOR("Jamie Lentin <jm@lentin.co.uk>");
MODULE_DESCRIPTION("GPIO poweroff driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-gpio");
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
new file mode 100644
index 000000000000..37f56f7ee926
--- /dev/null
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -0,0 +1,116 @@
+/*
+ * QNAP Turbo NAS Board power off
+ *
+ * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch>
+ *
+ * Based on the code from:
+ *
+ * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com>
+ * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/serial_reg.h>
+#include <linux/kallsyms.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#define UART1_REG(x) (base + ((UART_##x) << 2))
+
+static void __iomem *base;
+static unsigned long tclk;
+
+static void qnap_power_off(void)
+{
+ /* 19200 baud divisor */
+ const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200));
+
+ pr_err("%s: triggering power-off...\n", __func__);
+
+ /* hijack UART1 and reset into sane state (19200,8n1) */
+ writel(0x83, UART1_REG(LCR));
+ writel(divisor & 0xff, UART1_REG(DLL));
+ writel((divisor >> 8) & 0xff, UART1_REG(DLM));
+ writel(0x03, UART1_REG(LCR));
+ writel(0x00, UART1_REG(IER));
+ writel(0x00, UART1_REG(FCR));
+ writel(0x00, UART1_REG(MCR));
+
+ /* send the power-off command 'A' to PIC */
+ writel('A', UART1_REG(TX));
+}
+
+static int qnap_power_off_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *clk;
+ char symname[KSYM_NAME_LEN];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing resource");
+ return -EINVAL;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "Unable to map resource");
+ return -EINVAL;
+ }
+
+ /* We need to know tclk in order to calculate the UART divisor */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Clk missing");
+ return PTR_ERR(clk);
+ }
+
+ tclk = clk_get_rate(clk);
+
+ /* Check that nothing else has already setup a handler */
+ if (pm_power_off) {
+ lookup_symbol_name((ulong)pm_power_off, symname);
+ dev_err(&pdev->dev,
+ "pm_power_off already claimed %p %s",
+ pm_power_off, symname);
+ return -EBUSY;
+ }
+ pm_power_off = qnap_power_off;
+
+ return 0;
+}
+
+static int qnap_power_off_remove(struct platform_device *pdev)
+{
+ pm_power_off = NULL;
+ return 0;
+}
+
+static const struct of_device_id qnap_power_off_of_match_table[] = {
+ { .compatible = "qnap,power-off", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table);
+
+static struct platform_driver qnap_power_off_driver = {
+ .probe = qnap_power_off_probe,
+ .remove = qnap_power_off_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qnap_power_off",
+ .of_match_table = of_match_ptr(qnap_power_off_of_match_table),
+ },
+};
+module_platform_driver(qnap_power_off_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("QNAP Power off driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
new file mode 100644
index 000000000000..059cd1501e2a
--- /dev/null
+++ b/drivers/power/reset/restart-poweroff.c
@@ -0,0 +1,65 @@
+/*
+ * Power off by restarting and let u-boot keep hold of the machine
+ * until the user presses a button for example.
+ *
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * Copyright (C) 2012 Andrew Lunn
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <asm/system_misc.h>
+
+static void restart_poweroff_do_poweroff(void)
+{
+ arm_pm_restart('h', NULL);
+}
+
+static int restart_poweroff_probe(struct platform_device *pdev)
+{
+ /* If a pm_power_off function has already been added, leave it alone */
+ if (pm_power_off != NULL) {
+ dev_err(&pdev->dev,
+ "pm_power_off function already registered");
+ return -EBUSY;
+ }
+
+ pm_power_off = &restart_poweroff_do_poweroff;
+ return 0;
+}
+
+static int restart_poweroff_remove(struct platform_device *pdev)
+{
+ if (pm_power_off == &restart_poweroff_do_poweroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id of_restart_poweroff_match[] = {
+ { .compatible = "restart-poweroff", },
+ {},
+};
+
+static struct platform_driver restart_poweroff_driver = {
+ .probe = restart_poweroff_probe,
+ .remove = restart_poweroff_remove,
+ .driver = {
+ .name = "poweroff-restart",
+ .owner = THIS_MODULE,
+ .of_match_table = of_restart_poweroff_match,
+ },
+};
+module_platform_driver(restart_poweroff_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("restart poweroff driver");
+MODULE_LICENSE("GPLv2");
+MODULE_ALIAS("platform:poweroff-restart");
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
new file mode 100644
index 000000000000..8208888b844e
--- /dev/null
+++ b/drivers/power/rx51_battery.c
@@ -0,0 +1,251 @@
+/*
+ * Nokia RX-51 battery driver
+ *
+ * Copyright (C) 2012 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl4030-madc.h>
+
+struct rx51_device_info {
+ struct device *dev;
+ struct power_supply bat;
+};
+
+/*
+ * Read ADCIN channel value, code copied from maemo kernel
+ */
+static int rx51_battery_read_adc(int channel)
+{
+ struct twl4030_madc_request req;
+
+ req.channels = 1 << channel;
+ req.do_avg = 1;
+ req.method = TWL4030_MADC_SW1;
+ req.func_cb = NULL;
+ req.type = TWL4030_MADC_WAIT;
+
+ if (twl4030_madc_conversion(&req) <= 0)
+ return -ENODATA;
+
+ return req.rbuf[channel];
+}
+
+/*
+ * Read ADCIN channel 12 (voltage) and convert RAW value to micro voltage
+ * This conversion formula was extracted from maemo program bsi-read
+ */
+static int rx51_battery_read_voltage(struct rx51_device_info *di)
+{
+ int voltage = rx51_battery_read_adc(12);
+
+ if (voltage < 0)
+ return voltage;
+
+ return 1000 * (10000 * voltage / 1705);
+}
+
+/*
+ * Temperature look-up tables
+ * TEMP = (1/(t1 + 1/298) - 273.15)
+ * Where t1 = (1/B) * ln((RAW_ADC_U * 2.5)/(R * I * 255))
+ * Formula is based on experimental data, RX-51 CAL data, maemo program bme
+ * and formula from da9052 driver with values R = 100, B = 3380, I = 0.00671
+ */
+
+/*
+ * Table1 (temperature for first 25 RAW values)
+ * Usage: TEMP = rx51_temp_table1[RAW]
+ * RAW is between 1 and 24
+ * TEMP is between 201 C and 55 C
+ */
+static u8 rx51_temp_table1[] = {
+ 255, 201, 159, 138, 124, 114, 106, 99, 94, 89, 85, 82, 78, 75,
+ 73, 70, 68, 66, 64, 62, 61, 59, 57, 56, 55
+};
+
+/*
+ * Table2 (lowest RAW value for temperature)
+ * Usage: RAW = rx51_temp_table2[TEMP-rx51_temp_table2_first]
+ * TEMP is between 53 C and -32 C
+ * RAW is between 25 and 993
+ */
+#define rx51_temp_table2_first 53
+static u16 rx51_temp_table2[] = {
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39,
+ 40, 41, 43, 44, 46, 48, 49, 51, 53, 55, 57, 59, 61, 64,
+ 66, 69, 71, 74, 77, 80, 83, 86, 90, 94, 97, 101, 106, 110,
+ 115, 119, 125, 130, 136, 141, 148, 154, 161, 168, 176, 184, 202, 211,
+ 221, 231, 242, 254, 266, 279, 293, 308, 323, 340, 357, 375, 395, 415,
+ 437, 460, 485, 511, 539, 568, 600, 633, 669, 706, 747, 790, 836, 885,
+ 937, 993, 1024
+};
+
+/*
+ * Read ADCIN channel 0 (battery temp) and convert value to tenths of Celsius
+ * Use Temperature look-up tables for conversation
+ */
+static int rx51_battery_read_temperature(struct rx51_device_info *di)
+{
+ int min = 0;
+ int max = ARRAY_SIZE(rx51_temp_table2) - 1;
+ int raw = rx51_battery_read_adc(0);
+
+ /* Zero and negative values are undefined */
+ if (raw <= 0)
+ return INT_MAX;
+
+ /* ADC channels are 10 bit, higher value are undefined */
+ if (raw >= (1 << 10))
+ return INT_MIN;
+
+ /* First check for temperature in first direct table */
+ if (raw < ARRAY_SIZE(rx51_temp_table1))
+ return rx51_temp_table1[raw] * 100;
+
+ /* Binary search RAW value in second inverse table */
+ while (max - min > 1) {
+ int mid = (max + min) / 2;
+ if (rx51_temp_table2[mid] <= raw)
+ min = mid;
+ else if (rx51_temp_table2[mid] > raw)
+ max = mid;
+ if (rx51_temp_table2[mid] == raw)
+ break;
+ }
+
+ return (rx51_temp_table2_first - min) * 100;
+}
+
+/*
+ * Read ADCIN channel 4 (BSI) and convert RAW value to micro Ah
+ * This conversion formula was extracted from maemo program bsi-read
+ */
+static int rx51_battery_read_capacity(struct rx51_device_info *di)
+{
+ int capacity = rx51_battery_read_adc(4);
+
+ if (capacity < 0)
+ return capacity;
+
+ return 1280 * (1200 * capacity)/(1024 - capacity);
+}
+
+/*
+ * Return power_supply property
+ */
+static int rx51_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rx51_device_info *di = container_of((psy),
+ struct rx51_device_info, bat);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = 4200000;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = rx51_battery_read_voltage(di) ? 1 : 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = rx51_battery_read_voltage(di);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = rx51_battery_read_temperature(di);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = rx51_battery_read_capacity(di);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val->intval == INT_MAX || val->intval == INT_MIN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum power_supply_property rx51_battery_props[] = {
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+};
+
+static int rx51_battery_probe(struct platform_device *pdev)
+{
+ struct rx51_device_info *di;
+ int ret;
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, di);
+
+ di->bat.name = dev_name(&pdev->dev);
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = rx51_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(rx51_battery_props);
+ di->bat.get_property = rx51_battery_get_property;
+
+ ret = power_supply_register(di->dev, &di->bat);
+ if (ret) {
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rx51_battery_remove(struct platform_device *pdev)
+{
+ struct rx51_device_info *di = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&di->bat);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static struct platform_driver rx51_battery_driver = {
+ .probe = rx51_battery_probe,
+ .remove = rx51_battery_remove,
+ .driver = {
+ .name = "rx51-battery",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(rx51_battery_driver);
+
+MODULE_ALIAS("platform:rx51-battery");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("Nokia RX-51 battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f9e70cf08199..a69d0d11b540 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -114,12 +114,12 @@ static int twl4030_clear_set(u8 mod_no, u8 clear, u8 set, u8 reg)
static int twl4030_bci_read(u8 reg, u8 *val)
{
- return twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
+ return twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, val, reg);
}
static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
{
- return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, clear,
+ return twl4030_clear_set(TWL_MODULE_PM_MASTER, clear,
TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
TWL4030_PM_MASTER_BOOT_BCI);
}
@@ -152,7 +152,7 @@ static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
int ret;
u8 hwsts;
- ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &hwsts,
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &hwsts,
TWL4030_PM_MASTER_STS_HW_CONDITIONS);
if (ret < 0)
return 0;
@@ -199,7 +199,7 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
return ret;
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
- ret = twl4030_clear_set(TWL4030_MODULE_MAIN_CHARGE, 0,
+ ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
} else {
ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
@@ -238,7 +238,7 @@ static int twl4030_charger_enable_backup(int uvolt, int uamp)
if (uvolt < 2500000 ||
uamp < 25) {
/* disable charging of backup battery */
- ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ ret = twl4030_clear_set(TWL_MODULE_PM_RECEIVER,
TWL4030_BBCHEN, 0, TWL4030_BB_CFG);
return ret;
}
@@ -262,7 +262,7 @@ static int twl4030_charger_enable_backup(int uvolt, int uamp)
else
flags |= TWL4030_BBISEL_25uA;
- ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ ret = twl4030_clear_set(TWL_MODULE_PM_RECEIVER,
TWL4030_BBSEL_MASK | TWL4030_BBISEL_MASK,
flags,
TWL4030_BB_CFG);
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 445197d4a8c4..6efd9b60d8ff 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -17,7 +17,7 @@ config PPS_CLIENT_KTIMER
config PPS_CLIENT_LDISC
tristate "PPS line discipline"
- depends on PPS
+ depends on PPS && TTY
help
If you say yes here you get support for a PPS source connected
with the CD (Carrier Detect) pin of your serial port.
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 655055545479..2bf0c1b608dd 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -196,7 +196,7 @@ static int pps_gpio_remove(struct platform_device *pdev)
static struct platform_driver pps_gpio_driver = {
.probe = pps_gpio_probe,
- .remove = __devexit_p(pps_gpio_remove),
+ .remove = pps_gpio_remove,
.driver = {
.name = PPS_GPIO_NAME,
.owner = THIS_MODULE
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 79451f2dea6a..73bd3bb4d93b 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -25,18 +25,27 @@
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/pps_kernel.h>
+#include <linux/bug.h>
#define PPS_TTY_MAGIC 0x0001
-static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
- struct pps_event_time *ts)
+static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status)
{
- struct pps_device *pps = (struct pps_device *)tty->disc_data;
+ struct pps_device *pps;
+ struct pps_event_time ts;
+
+ pps_get_ts(&ts);
- BUG_ON(pps == NULL);
+ pps = pps_lookup_dev(tty);
+ /*
+ * This should never fail, but the ldisc locking is very
+ * convoluted, so don't crash just in case.
+ */
+ if (WARN_ON_ONCE(pps == NULL))
+ return;
/* Now do the PPS event report */
- pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+ pps_event(pps, &ts, status ? PPS_CAPTUREASSERT :
PPS_CAPTURECLEAR, NULL);
dev_dbg(pps->dev, "PPS %s at %lu\n",
@@ -67,9 +76,9 @@ static int pps_tty_open(struct tty_struct *tty)
pr_err("cannot register PPS source \"%s\"\n", info.path);
return -ENOMEM;
}
- tty->disc_data = pps;
+ pps->lookup_cookie = tty;
- /* Should open N_TTY ldisc too */
+ /* Now open the base class N_TTY ldisc */
ret = alias_n_tty_open(tty);
if (ret < 0) {
pr_err("cannot open tty ldisc \"%s\"\n", info.path);
@@ -81,7 +90,6 @@ static int pps_tty_open(struct tty_struct *tty)
return 0;
err_unregister:
- tty->disc_data = NULL;
pps_unregister_source(pps);
return ret;
}
@@ -90,11 +98,13 @@ static void (*alias_n_tty_close)(struct tty_struct *tty);
static void pps_tty_close(struct tty_struct *tty)
{
- struct pps_device *pps = (struct pps_device *)tty->disc_data;
+ struct pps_device *pps = pps_lookup_dev(tty);
alias_n_tty_close(tty);
- tty->disc_data = NULL;
+ if (WARN_ON(!pps))
+ return;
+
dev_info(pps->dev, "removed\n");
pps_unregister_source(pps);
}
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2420d5af0583..6437703eb10f 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -247,12 +247,15 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
struct pps_device *pps = container_of(inode->i_cdev,
struct pps_device, cdev);
file->private_data = pps;
-
+ kobject_get(&pps->dev->kobj);
return 0;
}
static int pps_cdev_release(struct inode *inode, struct file *file)
{
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ kobject_put(&pps->dev->kobj);
return 0;
}
@@ -274,8 +277,10 @@ static void pps_device_destruct(struct device *dev)
{
struct pps_device *pps = dev_get_drvdata(dev);
- /* release id here to protect others from using it while it's
- * still in use */
+ cdev_del(&pps->cdev);
+
+ /* Now we can release the ID for re-use */
+ pr_debug("deallocating pps%d\n", pps->id);
mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
mutex_unlock(&pps_idr_lock);
@@ -332,6 +337,7 @@ int pps_register_cdev(struct pps_device *pps)
goto del_cdev;
}
+ /* Override the release function with our own */
pps->dev->release = pps_device_destruct;
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
@@ -352,11 +358,44 @@ free_idr:
void pps_unregister_cdev(struct pps_device *pps)
{
+ pr_debug("unregistering pps%d\n", pps->id);
+ pps->lookup_cookie = NULL;
device_destroy(pps_class, pps->dev->devt);
- cdev_del(&pps->cdev);
}
/*
+ * Look up a pps device by magic cookie.
+ * The cookie is usually a pointer to some enclosing device, but this
+ * code doesn't care; you should never be dereferencing it.
+ *
+ * This is a bit of a kludge that is currently used only by the PPS
+ * serial line discipline. It may need to be tweaked when a second user
+ * is found.
+ *
+ * There is no function interface for setting the lookup_cookie field.
+ * It's initialized to NULL when the pps device is created, and if a
+ * client wants to use it, just fill it in afterward.
+ *
+ * The cookie is automatically set to NULL in pps_unregister_source()
+ * so that it will not be used again, even if the pps device cannot
+ * be removed from the idr due to pending references holding the minor
+ * number in use.
+ */
+struct pps_device *pps_lookup_dev(void const *cookie)
+{
+ struct pps_device *pps;
+ unsigned id;
+
+ rcu_read_lock();
+ idr_for_each_entry(&pps_idr, pps, id)
+ if (cookie == pps->lookup_cookie)
+ break;
+ rcu_read_unlock();
+ return pps;
+}
+EXPORT_SYMBOL(pps_lookup_dev);
+
+/*
* Module stuff
*/
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 643697f71390..b139b7792e9f 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -1185,7 +1185,7 @@ int ps3_lpm_close(void)
}
EXPORT_SYMBOL_GPL(ps3_lpm_close);
-static int __devinit ps3_lpm_probe(struct ps3_system_bus_device *dev)
+static int ps3_lpm_probe(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u\n", __func__, __LINE__);
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index 1b98367110c4..f2ab435954f6 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -706,7 +706,7 @@ static void ps3_sys_manager_work(struct ps3_system_bus_device *dev)
ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
}
-static int __devinit ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
+static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_sys_manager_ops ops;
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 93d0a8b7718a..437fc35beb7b 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -932,7 +932,7 @@ int ps3av_audio_mute(int mute)
}
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
-static int __devinit ps3av_probe(struct ps3_system_bus_device *dev)
+static int ps3av_probe(struct ps3_system_bus_device *dev)
{
int res;
int id;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index ed81720e7b2b..e513cd998170 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -112,6 +112,17 @@ config PWM_SAMSUNG
To compile this driver as a module, choose M here: the module
will be called pwm-samsung.
+config PWM_SPEAR
+ tristate "STMicroelectronics SPEAr PWM support"
+ depends on PLAT_SPEAR
+ depends on OF
+ help
+ Generic PWM framework driver for the PWM controller on ST
+ SPEAr SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-spear.
+
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA
@@ -125,6 +136,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
depends on SOC_AM33XX
+ select PWM_TIPWMSS
help
PWM driver support for the ECAP APWM controller found on AM33XX
TI SOC
@@ -135,6 +147,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
depends on SOC_AM33XX
+ select PWM_TIPWMSS
help
PWM driver support for the EHRPWM controller found on AM33XX
TI SOC
@@ -142,14 +155,32 @@ config PWM_TIEHRPWM
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
-config PWM_TWL6030
- tristate "TWL6030 PWM support"
+config PWM_TIPWMSS
+ bool
+ depends on SOC_AM33XX && (PWM_TIEHRPWM || PWM_TIECAP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
+config PWM_TWL
+ tristate "TWL4030/6030 PWM support"
+ depends on TWL4030_CORE
+ help
+ Generic PWM framework driver for TWL4030/6030.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-twl.
+
+config PWM_TWL_LED
+ tristate "TWL4030/6030 PWM support for LED drivers"
depends on TWL4030_CORE
help
- Generic PWM framework driver for TWL6030.
+ Generic PWM framework driver for TWL4030/6030 LED terminals.
To compile this driver as a module, choose M here: the module
- will be called pwm-twl6030.
+ will be called pwm-twl-led.
config PWM_VT8500
tristate "vt8500 pwm support"
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index acfe4821c58b..62a2963cfe58 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
+obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
-obj-$(CONFIG_PWM_TWL6030) += pwm-twl6030.o
+obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
+obj-$(CONFIG_PWM_TWL) += pwm-twl.o
+obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f5acdaa52707..903138b18842 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -32,6 +32,9 @@
#define MAX_PWMS 1024
+/* flags in the third cell of the DT PWM specifier */
+#define PWM_SPEC_POLARITY (1 << 0)
+
static DEFINE_MUTEX(pwm_lookup_lock);
static LIST_HEAD(pwm_lookup_list);
static DEFINE_MUTEX(pwm_lock);
@@ -129,6 +132,32 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
return 0;
}
+struct pwm_device *
+of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (pc->of_pwm_n_cells < 3)
+ return ERR_PTR(-EINVAL);
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[1]);
+
+ if (args->args[2] & PWM_SPEC_POLARITY)
+ pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
+ else
+ pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+
static struct pwm_device *
of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
{
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 8f26e9fcea97..3f5677b7690e 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -235,7 +235,7 @@ static int imx_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_pwm_dt_ids, &pdev->dev);
- struct imx_pwm_data *data;
+ const struct imx_pwm_data *data;
struct imx_chip *imx;
struct resource *r;
int ret = 0;
@@ -274,9 +274,9 @@ static int imx_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- imx->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
- if (imx->mmio_base == NULL)
- return -EADDRNOTAVAIL;
+ imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(imx->mmio_base))
+ return PTR_ERR(imx->mmio_base);
data = of_id->data;
imx->config = data->config;
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 015a82235620..b3f0d0dfd748 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -49,9 +49,24 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
c = 0; /* 0 set division by 256 */
period_cycles = c;
+ /* The duty-cycle value is as follows:
+ *
+ * DUTY-CYCLE HIGH LEVEL
+ * 1 99.9%
+ * 25 90.0%
+ * 128 50.0%
+ * 220 10.0%
+ * 255 0.1%
+ * 0 0.0%
+ *
+ * In other words, the register value is duty-cycle % 256 with
+ * duty-cycle in the range 1-256.
+ */
c = 256 * duty_ns;
do_div(c, period_ns);
- duty_cycles = c;
+ if (c > 255)
+ c = 255;
+ duty_cycles = 256 - c;
writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles),
lpc32xx->base + (pwm->hwpwm << 2));
@@ -95,9 +110,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- lpc32xx->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!lpc32xx->base)
- return -EADDRNOTAVAIL;
+ lpc32xx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lpc32xx->base))
+ return PTR_ERR(lpc32xx->base);
lpc32xx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(lpc32xx->clk))
@@ -106,6 +121,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 2;
+ lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
if (ret < 0) {
@@ -121,8 +137,11 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
static int lpc32xx_pwm_remove(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < lpc32xx->chip.npwm; i++)
+ pwm_disable(&lpc32xx->chip.pwms[i]);
- clk_disable(lpc32xx->clk);
return pwmchip_remove(&lpc32xx->chip);
}
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 7ec345f01831..a53d3094b75a 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -139,9 +139,9 @@ static int mxs_pwm_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxs->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!mxs->base)
- return -EADDRNOTAVAIL;
+ mxs->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mxs->base))
+ return PTR_ERR(mxs->base);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index b882f6032fee..db964e6ecf5c 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -123,9 +123,9 @@ static int pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- puv3->base = devm_request_and_ioremap(&pdev->dev, r);
- if (puv3->base == NULL)
- return -EADDRNOTAVAIL;
+ puv3->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(puv3->base))
+ return PTR_ERR(puv3->base);
puv3->chip.dev = &pdev->dev;
puv3->chip.ops = &puv3_pwm_ops;
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index f32fc4e66e0c..20370e61de5a 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -165,9 +165,9 @@ static int pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
- if (pwm->mmio_base == NULL)
- return -EADDRNOTAVAIL;
+ pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pwm->mmio_base))
+ return PTR_ERR(pwm->mmio_base);
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index e9b15d099c03..5207e6cd8648 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -222,6 +222,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
/* calculate base of control bits in TCON */
s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
+ s3c->pwm_id = id;
s3c->chip.dev = &pdev->dev;
s3c->chip.ops = &s3c_pwm_ops;
s3c->chip.base = -1;
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
new file mode 100644
index 000000000000..69a2d9eb34db
--- /dev/null
+++ b/drivers/pwm/pwm-spear.c
@@ -0,0 +1,276 @@
+/*
+ * ST Microelectronics SPEAr Pulse Width Modulator driver
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define NUM_PWM 4
+
+/* PWM registers and bits definitions */
+#define PWMCR 0x00 /* Control Register */
+#define PWMCR_PWM_ENABLE 0x1
+#define PWMCR_PRESCALE_SHIFT 2
+#define PWMCR_MIN_PRESCALE 0x00
+#define PWMCR_MAX_PRESCALE 0x3FFF
+
+#define PWMDCR 0x04 /* Duty Cycle Register */
+#define PWMDCR_MIN_DUTY 0x0001
+#define PWMDCR_MAX_DUTY 0xFFFF
+
+#define PWMPCR 0x08 /* Period Register */
+#define PWMPCR_MIN_PERIOD 0x0001
+#define PWMPCR_MAX_PERIOD 0xFFFF
+
+/* Following only available on 13xx SoCs */
+#define PWMMCR 0x3C /* Master Control Register */
+#define PWMMCR_PWM_ENABLE 0x1
+
+/**
+ * struct spear_pwm_chip - struct representing pwm chip
+ *
+ * @mmio_base: base address of pwm chip
+ * @clk: pointer to clk structure of pwm chip
+ * @chip: linux pwm chip representation
+ * @dev: pointer to device structure of pwm chip
+ */
+struct spear_pwm_chip {
+ void __iomem *mmio_base;
+ struct clk *clk;
+ struct pwm_chip chip;
+ struct device *dev;
+};
+
+static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct spear_pwm_chip, chip);
+}
+
+static inline u32 spear_pwm_readl(struct spear_pwm_chip *chip, unsigned int num,
+ unsigned long offset)
+{
+ return readl_relaxed(chip->mmio_base + (num << 4) + offset);
+}
+
+static inline void spear_pwm_writel(struct spear_pwm_chip *chip,
+ unsigned int num, unsigned long offset,
+ unsigned long val)
+{
+ writel_relaxed(val, chip->mmio_base + (num << 4) + offset);
+}
+
+static int spear_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ u64 val, div, clk_rate;
+ unsigned long prescale = PWMCR_MIN_PRESCALE, pv, dc;
+ int ret;
+
+ /*
+ * Find pv, dc and prescale to suit duty_ns and period_ns. This is done
+ * according to formulas described below:
+ *
+ * period_ns = 10^9 * (PRESCALE + 1) * PV / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ *
+ * PV = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
+ * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
+ */
+ clk_rate = clk_get_rate(pc->clk);
+ while (1) {
+ div = 1000000000;
+ div *= 1 + prescale;
+ val = clk_rate * period_ns;
+ pv = div64_u64(val, div);
+ val = clk_rate * duty_ns;
+ dc = div64_u64(val, div);
+
+ /* if duty_ns and period_ns are not achievable then return */
+ if (pv < PWMPCR_MIN_PERIOD || dc < PWMDCR_MIN_DUTY)
+ return -EINVAL;
+
+ /*
+ * if pv and dc have crossed their upper limit, then increase
+ * prescale and recalculate pv and dc.
+ */
+ if (pv > PWMPCR_MAX_PERIOD || dc > PWMDCR_MAX_DUTY) {
+ if (++prescale > PWMCR_MAX_PRESCALE)
+ return -EINVAL;
+ continue;
+ }
+ break;
+ }
+
+ /*
+ * NOTE: the clock to PWM has to be enabled first before writing to the
+ * registers.
+ */
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR,
+ prescale << PWMCR_PRESCALE_SHIFT);
+ spear_pwm_writel(pc, pwm->hwpwm, PWMDCR, dc);
+ spear_pwm_writel(pc, pwm->hwpwm, PWMPCR, pv);
+ clk_disable(pc->clk);
+
+ return 0;
+}
+
+static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ int rc = 0;
+ u32 val;
+
+ rc = clk_enable(pc->clk);
+ if (!rc)
+ return rc;
+
+ val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
+ val |= PWMCR_PWM_ENABLE;
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
+
+ return 0;
+}
+
+static void spear_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ u32 val;
+
+ val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
+ val &= ~PWMCR_PWM_ENABLE;
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
+
+ clk_disable(pc->clk);
+}
+
+static const struct pwm_ops spear_pwm_ops = {
+ .config = spear_pwm_config,
+ .enable = spear_pwm_enable,
+ .disable = spear_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int spear_pwm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_pwm_chip *pc;
+ struct resource *r;
+ int ret;
+ u32 val;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resources defined\n");
+ return -ENODEV;
+ }
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pc->mmio_base))
+ return PTR_ERR(pc->mmio_base);
+
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk))
+ return PTR_ERR(pc->clk);
+
+ pc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pc);
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &spear_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = NUM_PWM;
+
+ ret = clk_prepare(pc->clk);
+ if (!ret)
+ return ret;
+
+ if (of_device_is_compatible(np, "st,spear1340-pwm")) {
+ ret = clk_enable(pc->clk);
+ if (!ret) {
+ clk_unprepare(pc->clk);
+ return ret;
+ }
+ /*
+ * Following enables PWM chip, channels would still be
+ * enabled individually through their control register
+ */
+ val = readl_relaxed(pc->mmio_base + PWMMCR);
+ val |= PWMMCR_PWM_ENABLE;
+ writel_relaxed(val, pc->mmio_base + PWMMCR);
+
+ clk_disable(pc->clk);
+ }
+
+ ret = pwmchip_add(&pc->chip);
+ if (!ret) {
+ clk_unprepare(pc->clk);
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int spear_pwm_remove(struct platform_device *pdev)
+{
+ struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < NUM_PWM; i++)
+ pwm_disable(&pc->chip.pwms[i]);
+
+ /* clk was prepared in probe, hence unprepare it here */
+ clk_unprepare(pc->clk);
+ return pwmchip_remove(&pc->chip);
+}
+
+static struct of_device_id spear_pwm_of_match[] = {
+ { .compatible = "st,spear320-pwm" },
+ { .compatible = "st,spear1340-pwm" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, spear_pwm_of_match);
+
+static struct platform_driver spear_pwm_driver = {
+ .driver = {
+ .name = "spear-pwm",
+ .of_match_table = spear_pwm_of_match,
+ },
+ .probe = spear_pwm_probe,
+ .remove = spear_pwm_remove,
+};
+
+module_platform_driver(spear_pwm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>");
+MODULE_ALIAS("platform:spear-pwm");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 30c0e2b70ce8..71900e8cd3d1 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -186,9 +186,9 @@ static int tegra_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
- if (!pwm->mmio_base)
- return -EADDRNOTAVAIL;
+ pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pwm->mmio_base))
+ return PTR_ERR(pwm->mmio_base);
platform_set_drvdata(pdev, pwm);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 87c091b245cc..27a67d6b27c1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -25,6 +25,10 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "pwm-tipwmss.h"
/* ECAP registers and bits definitions */
#define CAP1 0x08
@@ -184,12 +188,24 @@ static const struct pwm_ops ecap_pwm_ops = {
.owner = THIS_MODULE,
};
+static const struct of_device_id ecap_of_match[] = {
+ { .compatible = "ti,am33xx-ecap" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ecap_of_match);
+
static int ecap_pwm_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
struct clk *clk;
struct ecap_pwm_chip *pc;
+ u16 status;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
@@ -211,6 +227,8 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ecap_pwm_ops;
+ pc->chip.of_xlate = of_pwm_xlate_with_flags;
+ pc->chip.of_pwm_n_cells = 3;
pc->chip.base = -1;
pc->chip.npwm = 1;
@@ -220,9 +238,9 @@ static int ecap_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
- if (!pc->mmio_base)
- return -EADDRNOTAVAIL;
+ pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pc->mmio_base))
+ return PTR_ERR(pc->mmio_base);
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -231,14 +249,40 @@ static int ecap_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ status = pwmss_submodule_state_change(pdev->dev.parent,
+ PWMSS_ECAPCLK_EN);
+ if (!(status & PWMSS_ECAPCLK_EN_ACK)) {
+ dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
+ ret = -EINVAL;
+ goto pwmss_clk_failure;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
platform_set_drvdata(pdev, pc);
return 0;
+
+pwmss_clk_failure:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pwmchip_remove(&pc->chip);
+ return ret;
}
static int ecap_pwm_remove(struct platform_device *pdev)
{
struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ /*
+ * Due to hardware misbehaviour, acknowledge of the stop_req
+ * is missing. Hence checking of the status bit skipped.
+ */
+ pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
+ pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
@@ -246,7 +290,9 @@ static int ecap_pwm_remove(struct platform_device *pdev)
static struct platform_driver ecap_pwm_driver = {
.driver = {
- .name = "ecap",
+ .name = "ecap",
+ .owner = THIS_MODULE,
+ .of_match_table = ecap_of_match,
},
.probe = ecap_pwm_probe,
.remove = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 9ffd389d0c8b..5a1399580533 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -25,6 +25,10 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "pwm-tipwmss.h"
/* EHRPWM registers and bits definitions */
@@ -115,6 +119,7 @@ struct ehrpwm_pwm_chip {
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
enum pwm_polarity polarity[NUM_PWM_CHANNEL];
+ struct clk *tbclk;
};
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -335,6 +340,9 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Channels polarity can be configured from action qualifier module */
configure_polarity(pc, pwm->hwpwm);
+ /* Enable TBCLK before enabling PWM device */
+ clk_enable(pc->tbclk);
+
/* Enable time counter for free_run */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
return 0;
@@ -363,6 +371,9 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+ /* Disabling TBCLK on PWM disable */
+ clk_disable(pc->tbclk);
+
/* Stop Time base counter */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
@@ -392,12 +403,24 @@ static const struct pwm_ops ehrpwm_pwm_ops = {
.owner = THIS_MODULE,
};
+static const struct of_device_id ehrpwm_of_match[] = {
+ { .compatible = "ti,am33xx-ehrpwm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehrpwm_of_match);
+
static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
struct clk *clk;
struct ehrpwm_pwm_chip *pc;
+ u16 status;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
@@ -419,6 +442,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ehrpwm_pwm_ops;
+ pc->chip.of_xlate = of_pwm_xlate_with_flags;
+ pc->chip.of_pwm_n_cells = 3;
pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
@@ -428,9 +453,16 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
- if (!pc->mmio_base)
- return -EADDRNOTAVAIL;
+ pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pc->mmio_base))
+ return PTR_ERR(pc->mmio_base);
+
+ /* Acquire tbclk for Time Base EHRPWM submodule */
+ pc->tbclk = devm_clk_get(&pdev->dev, "tbclk");
+ if (IS_ERR(pc->tbclk)) {
+ dev_err(&pdev->dev, "Failed to get tbclk\n");
+ return PTR_ERR(pc->tbclk);
+ }
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -439,14 +471,40 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ status = pwmss_submodule_state_change(pdev->dev.parent,
+ PWMSS_EPWMCLK_EN);
+ if (!(status & PWMSS_EPWMCLK_EN_ACK)) {
+ dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
+ ret = -EINVAL;
+ goto pwmss_clk_failure;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
platform_set_drvdata(pdev, pc);
return 0;
+
+pwmss_clk_failure:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pwmchip_remove(&pc->chip);
+ return ret;
}
static int ehrpwm_pwm_remove(struct platform_device *pdev)
{
struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ /*
+ * Due to hardware misbehaviour, acknowledge of the stop_req
+ * is missing. Hence checking of the status bit skipped.
+ */
+ pwmss_submodule_state_change(pdev->dev.parent, PWMSS_EPWMCLK_STOP_REQ);
+ pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
@@ -454,7 +512,9 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
- .name = "ehrpwm",
+ .name = "ehrpwm",
+ .owner = THIS_MODULE,
+ .of_match_table = ehrpwm_of_match,
},
.probe = ehrpwm_pwm_probe,
.remove = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
new file mode 100644
index 000000000000..17cbc59660ec
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -0,0 +1,139 @@
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+
+#include "pwm-tipwmss.h"
+
+#define PWMSS_CLKCONFIG 0x8 /* Clock gating reg */
+#define PWMSS_CLKSTATUS 0xc /* Clock gating status reg */
+
+struct pwmss_info {
+ void __iomem *mmio_base;
+ struct mutex pwmss_lock;
+ u16 pwmss_clkconfig;
+};
+
+u16 pwmss_submodule_state_change(struct device *dev, int set)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+ u16 val;
+
+ mutex_lock(&info->pwmss_lock);
+ val = readw(info->mmio_base + PWMSS_CLKCONFIG);
+ val |= set;
+ writew(val , info->mmio_base + PWMSS_CLKCONFIG);
+ mutex_unlock(&info->pwmss_lock);
+
+ return readw(info->mmio_base + PWMSS_CLKSTATUS);
+}
+EXPORT_SYMBOL(pwmss_submodule_state_change);
+
+static const struct of_device_id pwmss_of_match[] = {
+ { .compatible = "ti,am33xx-pwmss" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pwmss_of_match);
+
+static int pwmss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct pwmss_info *info;
+ struct device_node *node = pdev->dev.of_node;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&info->pwmss_lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(info->mmio_base))
+ return PTR_ERR(info->mmio_base);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ platform_set_drvdata(pdev, info);
+
+ /* Populate all the child nodes here... */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "no child node found\n");
+
+ return ret;
+}
+
+static int pwmss_remove(struct platform_device *pdev)
+{
+ struct pwmss_info *info = platform_get_drvdata(pdev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ mutex_destroy(&info->pwmss_lock);
+ return 0;
+}
+
+static int pwmss_suspend(struct device *dev)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+
+ info->pwmss_clkconfig = readw(info->mmio_base + PWMSS_CLKCONFIG);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int pwmss_resume(struct device *dev)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
+
+static struct platform_driver pwmss_driver = {
+ .driver = {
+ .name = "pwmss",
+ .owner = THIS_MODULE,
+ .pm = &pwmss_pm_ops,
+ .of_match_table = pwmss_of_match,
+ },
+ .probe = pwmss_probe,
+ .remove = pwmss_remove,
+};
+
+module_platform_driver(pwmss_driver);
+
+MODULE_DESCRIPTION("PWM Subsystem driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tipwmss.h b/drivers/pwm/pwm-tipwmss.h
new file mode 100644
index 000000000000..11f76a1e266b
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.h
@@ -0,0 +1,39 @@
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TIPWMSS_H
+#define __TIPWMSS_H
+
+#ifdef CONFIG_PWM_TIPWMSS
+/* PWM substem clock gating */
+#define PWMSS_ECAPCLK_EN BIT(0)
+#define PWMSS_ECAPCLK_STOP_REQ BIT(1)
+#define PWMSS_EPWMCLK_EN BIT(8)
+#define PWMSS_EPWMCLK_STOP_REQ BIT(9)
+
+#define PWMSS_ECAPCLK_EN_ACK BIT(0)
+#define PWMSS_EPWMCLK_EN_ACK BIT(8)
+
+extern u16 pwmss_submodule_state_change(struct device *dev, int set);
+#else
+static inline u16 pwmss_submodule_state_change(struct device *dev, int set)
+{
+ /* return success status value */
+ return 0xFFFF;
+}
+#endif
+#endif /* __TIPWMSS_H */
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
new file mode 100644
index 000000000000..9dfa0f3eca30
--- /dev/null
+++ b/drivers/pwm/pwm-twl-led.c
@@ -0,0 +1,344 @@
+/*
+ * Driver for TWL4030/6030 Pulse Width Modulator used as LED driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This driver is a complete rewrite of the former pwm-twl6030.c authorded by:
+ * Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+/*
+ * This driver handles the PWM driven LED terminals of TWL4030 and TWL6030.
+ * To generate the signal on TWL4030:
+ * - LEDA uses PWMA
+ * - LEDB uses PWMB
+ * TWL6030 has one LED pin with dedicated LEDPWM
+ */
+
+#define TWL4030_LED_MAX 0x7f
+#define TWL6030_LED_MAX 0xff
+
+/* Registers, bits and macro for TWL4030 */
+#define TWL4030_LEDEN_REG 0x00
+#define TWL4030_PWMA_REG 0x01
+
+#define TWL4030_LEDXON (1 << 0)
+#define TWL4030_LEDXPWM (1 << 4)
+#define TWL4030_LED_PINS (TWL4030_LEDXON | TWL4030_LEDXPWM)
+#define TWL4030_LED_TOGGLE(led, x) ((x) << (led))
+
+/* Register, bits and macro for TWL6030 */
+#define TWL6030_LED_PWM_CTRL1 0xf4
+#define TWL6030_LED_PWM_CTRL2 0xf5
+
+#define TWL6040_LED_MODE_HW 0x00
+#define TWL6040_LED_MODE_ON 0x01
+#define TWL6040_LED_MODE_OFF 0x02
+#define TWL6040_LED_MODE_MASK 0x03
+
+struct twl_pwmled_chip {
+ struct pwm_chip chip;
+ struct mutex mutex;
+};
+
+static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
+{
+ return container_of(chip, struct twl_pwmled_chip, chip);
+}
+
+static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = DIV_ROUND_UP(duty_ns * TWL4030_LED_MAX, period_ns) + 1;
+ u8 pwm_config[2] = { 1, 0 };
+ int base, ret;
+
+ /*
+ * To configure the duty period:
+ * On-cycle is set to 1 (the minimum allowed value)
+ * The off time of 0 is not configurable, so the mapping is:
+ * 0 -> off cycle = 2,
+ * 1 -> off cycle = 2,
+ * 2 -> off cycle = 3,
+ * 126 - > off cycle 127,
+ * 127 - > off cycle 1
+ * When on cycle == off cycle the PWM will be always on
+ */
+ if (duty_cycle == 1)
+ duty_cycle = 2;
+ else if (duty_cycle > TWL4030_LED_MAX)
+ duty_cycle = 1;
+
+ base = pwm->hwpwm * 2 + TWL4030_PWMA_REG;
+
+ pwm_config[1] = duty_cycle;
+
+ ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwmled_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = (duty_ns * TWL6030_LED_MAX) / period_ns;
+ u8 on_time;
+ int ret;
+
+ on_time = duty_cycle & 0xff;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
+ TWL6030_LED_PWM_CTRL1);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_ON;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl6030_pwmled_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_OFF;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_OFF;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_HW;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static const struct pwm_ops twl4030_pwmled_ops = {
+ .enable = twl4030_pwmled_enable,
+ .disable = twl4030_pwmled_disable,
+ .config = twl4030_pwmled_config,
+};
+
+static const struct pwm_ops twl6030_pwmled_ops = {
+ .enable = twl6030_pwmled_enable,
+ .disable = twl6030_pwmled_disable,
+ .config = twl6030_pwmled_config,
+ .request = twl6030_pwmled_request,
+ .free = twl6030_pwmled_free,
+};
+
+static int twl_pwmled_probe(struct platform_device *pdev)
+{
+ struct twl_pwmled_chip *twl;
+ int ret;
+
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
+ if (!twl)
+ return -ENOMEM;
+
+ if (twl_class_is_4030()) {
+ twl->chip.ops = &twl4030_pwmled_ops;
+ twl->chip.npwm = 2;
+ } else {
+ twl->chip.ops = &twl6030_pwmled_ops;
+ twl->chip.npwm = 1;
+ }
+
+ twl->chip.dev = &pdev->dev;
+ twl->chip.base = -1;
+
+ mutex_init(&twl->mutex);
+
+ ret = pwmchip_add(&twl->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, twl);
+
+ return 0;
+}
+
+static int twl_pwmled_remove(struct platform_device *pdev)
+{
+ struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&twl->chip);
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id twl_pwmled_of_match[] = {
+ { .compatible = "ti,twl4030-pwmled" },
+ { .compatible = "ti,twl6030-pwmled" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_pwmled_of_match);
+#endif
+
+static struct platform_driver twl_pwmled_driver = {
+ .driver = {
+ .name = "twl-pwmled",
+ .of_match_table = of_match_ptr(twl_pwmled_of_match),
+ },
+ .probe = twl_pwmled_probe,
+ .remove = twl_pwmled_remove,
+};
+module_platform_driver(twl_pwmled_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030 LED outputs");
+MODULE_ALIAS("platform:twl-pwmled");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
new file mode 100644
index 000000000000..e65db95d5e59
--- /dev/null
+++ b/drivers/pwm/pwm-twl.c
@@ -0,0 +1,359 @@
+/*
+ * Driver for TWL4030/6030 Generic Pulse Width Modulator
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+/*
+ * This driver handles the PWMs of TWL4030 and TWL6030.
+ * The TRM names for the PWMs on TWL4030 are: PWM0, PWM1
+ * TWL6030 also have two PWMs named in the TRM as PWM1, PWM2
+ */
+
+#define TWL_PWM_MAX 0x7f
+
+/* Registers, bits and macro for TWL4030 */
+#define TWL4030_GPBR1_REG 0x0c
+#define TWL4030_PMBR1_REG 0x0d
+
+/* GPBR1 register bits */
+#define TWL4030_PWMXCLK_ENABLE (1 << 0)
+#define TWL4030_PWMX_ENABLE (1 << 2)
+#define TWL4030_PWMX_BITS (TWL4030_PWMX_ENABLE | TWL4030_PWMXCLK_ENABLE)
+#define TWL4030_PWM_TOGGLE(pwm, x) ((x) << (pwm))
+
+/* PMBR1 register bits */
+#define TWL4030_GPIO6_PWM0_MUTE_MASK (0x03 << 2)
+#define TWL4030_GPIO6_PWM0_MUTE_PWM0 (0x01 << 2)
+#define TWL4030_GPIO7_VIBRASYNC_PWM1_MASK (0x03 << 4)
+#define TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1 (0x03 << 4)
+
+/* Register, bits and macro for TWL6030 */
+#define TWL6030_TOGGLE3_REG 0x92
+
+#define TWL6030_PWMXR (1 << 0)
+#define TWL6030_PWMXS (1 << 1)
+#define TWL6030_PWMXEN (1 << 2)
+#define TWL6030_PWM_TOGGLE(pwm, x) ((x) << (pwm * 3))
+
+struct twl_pwm_chip {
+ struct pwm_chip chip;
+ struct mutex mutex;
+ u8 twl6030_toggle3;
+ u8 twl4030_pwm_mux;
+};
+
+static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
+{
+ return container_of(chip, struct twl_pwm_chip, chip);
+}
+
+static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
+ u8 pwm_config[2] = { 1, 0 };
+ int base, ret;
+
+ /*
+ * To configure the duty period:
+ * On-cycle is set to 1 (the minimum allowed value)
+ * The off time of 0 is not configurable, so the mapping is:
+ * 0 -> off cycle = 2,
+ * 1 -> off cycle = 2,
+ * 2 -> off cycle = 3,
+ * 126 - > off cycle 127,
+ * 127 - > off cycle 1
+ * When on cycle == off cycle the PWM will be always on
+ */
+ if (duty_cycle == 1)
+ duty_cycle = 2;
+ else if (duty_cycle > TWL_PWM_MAX)
+ duty_cycle = 1;
+
+ base = pwm->hwpwm * 3;
+
+ pwm_config[1] = duty_cycle;
+
+ ret = twl_i2c_write(TWL_MODULE_PWM, pwm_config, base, 2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+ val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+ val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val, mask, bits;
+
+ if (pwm->hwpwm == 1) {
+ mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
+ bits = TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1;
+ } else {
+ mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
+ bits = TWL4030_GPIO6_PWM0_MUTE_PWM0;
+ }
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ goto out;
+ }
+
+ /* Save the current MUX configuration for the PWM */
+ twl->twl4030_pwm_mux &= ~mask;
+ twl->twl4030_pwm_mux |= (val & mask);
+
+ /* Select PWM functionality */
+ val &= ~mask;
+ val |= bits;
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val, mask;
+
+ if (pwm->hwpwm == 1)
+ mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
+ else
+ mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ goto out;
+ }
+
+ /* Restore the MUX configuration for the PWM */
+ val &= ~mask;
+ val |= (twl->twl4030_pwm_mux & mask);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ val = twl->twl6030_toggle3;
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+ val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ goto out;
+ }
+
+ twl->twl6030_toggle3 = val;
+out:
+ mutex_unlock(&twl->mutex);
+ return 0;
+}
+
+static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ val = twl->twl6030_toggle3;
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
+ val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read TOGGLE3\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ goto out;
+ }
+
+ twl->twl6030_toggle3 = val;
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static const struct pwm_ops twl4030_pwm_ops = {
+ .config = twl_pwm_config,
+ .enable = twl4030_pwm_enable,
+ .disable = twl4030_pwm_disable,
+ .request = twl4030_pwm_request,
+ .free = twl4030_pwm_free,
+};
+
+static const struct pwm_ops twl6030_pwm_ops = {
+ .config = twl_pwm_config,
+ .enable = twl6030_pwm_enable,
+ .disable = twl6030_pwm_disable,
+};
+
+static int twl_pwm_probe(struct platform_device *pdev)
+{
+ struct twl_pwm_chip *twl;
+ int ret;
+
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
+ if (!twl)
+ return -ENOMEM;
+
+ if (twl_class_is_4030())
+ twl->chip.ops = &twl4030_pwm_ops;
+ else
+ twl->chip.ops = &twl6030_pwm_ops;
+
+ twl->chip.dev = &pdev->dev;
+ twl->chip.base = -1;
+ twl->chip.npwm = 2;
+
+ mutex_init(&twl->mutex);
+
+ ret = pwmchip_add(&twl->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, twl);
+
+ return 0;
+}
+
+static int twl_pwm_remove(struct platform_device *pdev)
+{
+ struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&twl->chip);
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id twl_pwm_of_match[] = {
+ { .compatible = "ti,twl4030-pwm" },
+ { .compatible = "ti,twl6030-pwm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_pwm_of_match);
+#endif
+
+static struct platform_driver twl_pwm_driver = {
+ .driver = {
+ .name = "twl-pwm",
+ .of_match_table = of_match_ptr(twl_pwm_of_match),
+ },
+ .probe = twl_pwm_probe,
+ .remove = twl_pwm_remove,
+};
+module_platform_driver(twl_pwm_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030");
+MODULE_ALIAS("platform:twl-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl6030.c b/drivers/pwm/pwm-twl6030.c
deleted file mode 100644
index 378a7e286366..000000000000
--- a/drivers/pwm/pwm-twl6030.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * twl6030_pwm.c
- * Driver for PHOENIX (TWL6030) Pulse Width Modulator
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/i2c/twl.h>
-#include <linux/slab.h>
-
-#define LED_PWM_CTRL1 0xF4
-#define LED_PWM_CTRL2 0xF5
-
-/* Max value for CTRL1 register */
-#define PWM_CTRL1_MAX 255
-
-/* Pull down disable */
-#define PWM_CTRL2_DIS_PD (1 << 6)
-
-/* Current control 2.5 milli Amps */
-#define PWM_CTRL2_CURR_02 (2 << 4)
-
-/* LED supply source */
-#define PWM_CTRL2_SRC_VAC (1 << 2)
-
-/* LED modes */
-#define PWM_CTRL2_MODE_HW (0 << 0)
-#define PWM_CTRL2_MODE_SW (1 << 0)
-#define PWM_CTRL2_MODE_DIS (2 << 0)
-
-#define PWM_CTRL2_MODE_MASK 0x3
-
-struct twl6030_pwm_chip {
- struct pwm_chip chip;
-};
-
-static int twl6030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- /* Configure PWM */
- val = PWM_CTRL2_DIS_PD | PWM_CTRL2_CURR_02 | PWM_CTRL2_SRC_VAC |
- PWM_CTRL2_MODE_HW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to configure PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int twl6030_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
-{
- u8 duty_cycle = (duty_ns * PWM_CTRL1_MAX) / period_ns;
- int ret;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, duty_cycle, LED_PWM_CTRL1);
- if (ret < 0) {
- pr_err("%s: Failed to configure PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- /* Change mode to software control */
- val &= ~PWM_CTRL2_MODE_MASK;
- val |= PWM_CTRL2_MODE_SW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- return 0;
-}
-
-static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
- return;
- }
-
- val &= ~PWM_CTRL2_MODE_MASK;
- val |= PWM_CTRL2_MODE_HW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
- }
-}
-
-static const struct pwm_ops twl6030_pwm_ops = {
- .request = twl6030_pwm_request,
- .config = twl6030_pwm_config,
- .enable = twl6030_pwm_enable,
- .disable = twl6030_pwm_disable,
-};
-
-static int twl6030_pwm_probe(struct platform_device *pdev)
-{
- struct twl6030_pwm_chip *twl6030;
- int ret;
-
- twl6030 = devm_kzalloc(&pdev->dev, sizeof(*twl6030), GFP_KERNEL);
- if (!twl6030)
- return -ENOMEM;
-
- twl6030->chip.dev = &pdev->dev;
- twl6030->chip.ops = &twl6030_pwm_ops;
- twl6030->chip.base = -1;
- twl6030->chip.npwm = 1;
-
- ret = pwmchip_add(&twl6030->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl6030);
-
- return 0;
-}
-
-static int twl6030_pwm_remove(struct platform_device *pdev)
-{
- struct twl6030_pwm_chip *twl6030 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl6030->chip);
-}
-
-static struct platform_driver twl6030_pwm_driver = {
- .driver = {
- .name = "twl6030-pwm",
- },
- .probe = twl6030_pwm_probe,
- .remove = twl6030_pwm_remove,
-};
-module_platform_driver(twl6030_pwm_driver);
-
-MODULE_ALIAS("platform:twl6030-pwm");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index ad14389b7144..f9de9b28e46e 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -1,7 +1,8 @@
/*
* drivers/pwm/pwm-vt8500.c
*
- * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,14 +22,24 @@
#include <linux/io.h>
#include <linux/pwm.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <asm/div64.h>
-#define VT8500_NR_PWMS 4
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+/*
+ * SoC architecture allocates register space for 4 PWMs but only
+ * 2 are currently implemented.
+ */
+#define VT8500_NR_PWMS 2
struct vt8500_chip {
struct pwm_chip chip;
void __iomem *base;
+ struct clk *clk;
};
#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
@@ -51,8 +62,15 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
unsigned long long c;
unsigned long period_cycles, prescale, pv, dc;
+ int err;
- c = 25000000/2; /* wild guess --- need to implement clocks */
+ err = clk_enable(vt8500->clk);
+ if (err < 0) {
+ dev_err(chip->dev, "failed to enable clock\n");
+ return err;
+ }
+
+ c = clk_get_rate(vt8500->clk);
c = c * period_ns;
do_div(c, 1000000000);
period_cycles = c;
@@ -64,8 +82,10 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (pv > 4095)
pv = 4095;
- if (prescale > 1023)
+ if (prescale > 1023) {
+ clk_disable(vt8500->clk);
return -EINVAL;
+ }
c = (unsigned long long)pv * duty_ns;
do_div(c, period_ns);
@@ -80,13 +100,21 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
+ clk_disable(vt8500->clk);
return 0;
}
static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ int err;
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ err = clk_enable(vt8500->clk);
+ if (err < 0) {
+ dev_err(chip->dev, "failed to enable clock\n");
+ return err;
+ }
+
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
writel(5, vt8500->base + (pwm->hwpwm << 4));
return 0;
@@ -98,6 +126,8 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
writel(0, vt8500->base + (pwm->hwpwm << 4));
+
+ clk_disable(vt8500->clk);
}
static struct pwm_ops vt8500_pwm_ops = {
@@ -107,12 +137,24 @@ static struct pwm_ops vt8500_pwm_ops = {
.owner = THIS_MODULE,
};
-static int __devinit pwm_probe(struct platform_device *pdev)
+static const struct of_device_id vt8500_pwm_dt_ids[] = {
+ { .compatible = "via,vt8500-pwm", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
+
+static int vt8500_pwm_probe(struct platform_device *pdev)
{
struct vt8500_chip *chip;
struct resource *r;
+ struct device_node *np = pdev->dev.of_node;
int ret;
+ if (!np) {
+ dev_err(&pdev->dev, "invalid devicetree node\n");
+ return -EINVAL;
+ }
+
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
@@ -124,25 +166,39 @@ static int __devinit pwm_probe(struct platform_device *pdev)
chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
+ chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(chip->clk)) {
+ dev_err(&pdev->dev, "clock source not specified\n");
+ return PTR_ERR(chip->clk);
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no memory resource defined\n");
return -ENODEV;
}
- chip->base = devm_request_and_ioremap(&pdev->dev, r);
- if (chip->base == NULL)
- return -EADDRNOTAVAIL;
+ chip->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = clk_prepare(chip->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to prepare clock\n");
+ return ret;
+ }
ret = pwmchip_add(&chip->chip);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip\n");
return ret;
+ }
platform_set_drvdata(pdev, chip);
return ret;
}
-static int __devexit pwm_remove(struct platform_device *pdev)
+static int vt8500_pwm_remove(struct platform_device *pdev)
{
struct vt8500_chip *chip;
@@ -150,28 +206,22 @@ static int __devexit pwm_remove(struct platform_device *pdev)
if (chip == NULL)
return -ENODEV;
+ clk_unprepare(chip->clk);
+
return pwmchip_remove(&chip->chip);
}
-static struct platform_driver pwm_driver = {
+static struct platform_driver vt8500_pwm_driver = {
+ .probe = vt8500_pwm_probe,
+ .remove = vt8500_pwm_remove,
.driver = {
.name = "vt8500-pwm",
.owner = THIS_MODULE,
+ .of_match_table = vt8500_pwm_dt_ids,
},
- .probe = pwm_probe,
- .remove = __devexit_p(pwm_remove),
};
+module_platform_driver(vt8500_pwm_driver);
-static int __init pwm_init(void)
-{
- return platform_driver_register(&pwm_driver);
-}
-arch_initcall(pwm_init);
-
-static void __exit pwm_exit(void)
-{
- platform_driver_unregister(&pwm_driver);
-}
-module_exit(pwm_exit);
-
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VT8500 PWM Driver");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2b557119adad..c79ab843333e 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -30,8 +30,6 @@ struct pm8607_regulator_info {
unsigned int *vol_table;
unsigned int *vol_suspend;
- int update_reg;
- int update_bit;
int slope_double;
};
@@ -222,29 +220,6 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
return ret;
}
-static int pm8607_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
-{
- struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- uint8_t val;
- int ret;
-
- val = (uint8_t)(selector << (ffs(rdev->desc->vsel_mask) - 1));
-
- ret = pm860x_set_bits(info->i2c, rdev->desc->vsel_reg,
- rdev->desc->vsel_mask, val);
- if (ret)
- return ret;
- switch (info->desc.id) {
- case PM8607_ID_BUCK1:
- case PM8607_ID_BUCK3:
- ret = pm860x_set_bits(info->i2c, info->update_reg,
- 1 << info->update_bit,
- 1 << info->update_bit);
- break;
- }
- return ret;
-}
-
static int pm8606_preg_enable(struct regulator_dev *rdev)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
@@ -276,7 +251,7 @@ static int pm8606_preg_is_enabled(struct regulator_dev *rdev)
static struct regulator_ops pm8607_regulator_ops = {
.list_voltage = pm8607_list_voltage,
- .set_voltage_sel = pm8607_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -313,11 +288,11 @@ static struct regulator_ops pm8606_preg_ops = {
.n_voltages = ARRAY_SIZE(vreg##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \
+ .apply_reg = PM8607_##ureg, \
+ .apply_bit = (ubit), \
.enable_reg = PM8607_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
- .update_reg = PM8607_##ureg, \
- .update_bit = (ubit), \
.slope_double = (0), \
.vol_table = (unsigned int *)&vreg##_table, \
.vol_suspend = (unsigned int *)&vreg##_suspend_table, \
@@ -343,9 +318,9 @@ static struct regulator_ops pm8606_preg_ops = {
}
static struct pm8607_regulator_info pm8607_regulator_info[] = {
- PM8607_DVC(BUCK1, GO, 0, SUPPLIES_EN11, 0),
- PM8607_DVC(BUCK2, GO, 1, SUPPLIES_EN11, 1),
- PM8607_DVC(BUCK3, GO, 2, SUPPLIES_EN11, 2),
+ PM8607_DVC(BUCK1, GO, BIT(0), SUPPLIES_EN11, 0),
+ PM8607_DVC(BUCK2, GO, BIT(1), SUPPLIES_EN11, 1),
+ PM8607_DVC(BUCK3, GO, BIT(2), SUPPLIES_EN11, 2),
PM8607_LDO(1, LDO1, 0, SUPPLIES_EN11, 3),
PM8607_LDO(2, LDO2, 0, SUPPLIES_EN11, 4),
@@ -372,7 +347,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
struct regulator_config *config)
{
struct device_node *nproot, *np;
- nproot = pdev->dev.parent->of_node;
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
nproot = of_find_node_by_name(nproot, "regulators");
@@ -388,6 +363,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
break;
}
}
+ of_node_put(nproot);
return 0;
}
#else
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 551a22b07538..a5d97eaee99e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -91,6 +91,7 @@ config REGULATOR_AAT2870
config REGULATOR_ARIZONA
tristate "Wolfson Arizona class devices"
depends on MFD_ARIZONA
+ depends on SND_SOC
help
Support for the regulators found on Wolfson Arizona class
devices.
@@ -277,6 +278,15 @@ config REGULATOR_LP872X
help
This driver supports LP8720/LP8725 PMIC
+config REGULATOR_LP8755
+ tristate "TI LP8755 High Performance PMU driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports LP8755 High Performance PMU driver. This
+ chip contains six step-down DC/DC converters which can support
+ 9 mode multiphase configuration.
+
config REGULATOR_LP8788
bool "TI LP8788 Power Regulators"
depends on MFD_LP8788
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b802b0c7fb02..6e8250382def 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
+obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 0199eeea63b1..0d4a8ccbb536 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -31,12 +31,18 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
+#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
+
struct anatop_regulator {
const char *name;
u32 control_reg;
struct regmap *anatop;
int vol_bit_shift;
int vol_bit_width;
+ u32 delay_reg;
+ int delay_bit_shift;
+ int delay_bit_width;
int min_bit_val;
int min_voltage;
int max_voltage;
@@ -55,6 +61,32 @@ static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
return regulator_set_voltage_sel_regmap(reg, selector);
}
+static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
+ unsigned int old_sel,
+ unsigned int new_sel)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ u32 val;
+ int ret = 0;
+
+ /* check whether need to care about LDO ramp up speed */
+ if (anatop_reg->delay_bit_width && new_sel > old_sel) {
+ /*
+ * the delay for LDO ramp up time is
+ * based on the register setting, we need
+ * to calculate how many steps LDO need to
+ * ramp up, and how much delay needed. (us)
+ */
+ regmap_read(anatop_reg->anatop, anatop_reg->delay_reg, &val);
+ val = (val >> anatop_reg->delay_bit_shift) &
+ ((1 << anatop_reg->delay_bit_width) - 1);
+ ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
+ val) / LDO_RAMP_UP_FREQ_IN_MHZ + 1;
+ }
+
+ return ret;
+}
+
static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
@@ -67,6 +99,7 @@ static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
static struct regulator_ops anatop_rops = {
.set_voltage_sel = anatop_regmap_set_voltage_sel,
+ .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
.get_voltage_sel = anatop_regmap_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -143,6 +176,14 @@ static int anatop_regulator_probe(struct platform_device *pdev)
goto anatop_probe_end;
}
+ /* read LDO ramp up setting, only for core reg */
+ of_property_read_u32(np, "anatop-delay-reg-offset",
+ &sreg->delay_reg);
+ of_property_read_u32(np, "anatop-delay-bit-width",
+ &sreg->delay_bit_width);
+ of_property_read_u32(np, "anatop-delay-bit-shift",
+ &sreg->delay_bit_shift);
+
rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
+ sreg->min_bit_val;
rdesc->min_uV = sreg->min_voltage;
@@ -188,7 +229,7 @@ static int anatop_regulator_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id __devinitdata of_anatop_regulator_match_tbl[] = {
+static struct of_device_id of_anatop_regulator_match_tbl[] = {
{ .compatible = "fsl,anatop-regulator", },
{ /* end */ }
};
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index a6d040cbf8ac..e87536bf0bed 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -21,6 +21,8 @@
#include <linux/regulator/machine.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <sound/soc.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
@@ -34,6 +36,8 @@ struct arizona_micsupp {
struct regulator_consumer_supply supply;
struct regulator_init_data init_data;
+
+ struct work_struct check_cp_work;
};
static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
@@ -72,9 +76,73 @@ static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
return selector;
}
+static void arizona_micsupp_check_cp(struct work_struct *work)
+{
+ struct arizona_micsupp *micsupp =
+ container_of(work, struct arizona_micsupp, check_cp_work);
+ struct snd_soc_dapm_context *dapm = micsupp->arizona->dapm;
+ struct arizona *arizona = micsupp->arizona;
+ struct regmap *regmap = arizona->regmap;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(regmap, ARIZONA_MIC_CHARGE_PUMP_1, &reg);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read CP state: %d\n", ret);
+ return;
+ }
+
+ if (dapm) {
+ if ((reg & (ARIZONA_CPMIC_ENA | ARIZONA_CPMIC_BYPASS)) ==
+ ARIZONA_CPMIC_ENA)
+ snd_soc_dapm_force_enable_pin(dapm, "MICSUPP");
+ else
+ snd_soc_dapm_disable_pin(dapm, "MICSUPP");
+
+ snd_soc_dapm_sync(dapm);
+ }
+}
+
+static int arizona_micsupp_enable(struct regulator_dev *rdev)
+{
+ struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = regulator_enable_regmap(rdev);
+
+ if (ret == 0)
+ schedule_work(&micsupp->check_cp_work);
+
+ return ret;
+}
+
+static int arizona_micsupp_disable(struct regulator_dev *rdev)
+{
+ struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = regulator_disable_regmap(rdev);
+ if (ret == 0)
+ schedule_work(&micsupp->check_cp_work);
+
+ return ret;
+}
+
+static int arizona_micsupp_set_bypass(struct regulator_dev *rdev, bool ena)
+{
+ struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = regulator_set_bypass_regmap(rdev, ena);
+ if (ret == 0)
+ schedule_work(&micsupp->check_cp_work);
+
+ return ret;
+}
+
static struct regulator_ops arizona_micsupp_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
+ .enable = arizona_micsupp_enable,
+ .disable = arizona_micsupp_disable,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = arizona_micsupp_list_voltage,
@@ -84,7 +152,7 @@ static struct regulator_ops arizona_micsupp_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_bypass = regulator_get_bypass_regmap,
- .set_bypass = regulator_set_bypass_regmap,
+ .set_bypass = arizona_micsupp_set_bypass,
};
static const struct regulator_desc arizona_micsupp = {
@@ -109,7 +177,8 @@ static const struct regulator_desc arizona_micsupp = {
static const struct regulator_init_data arizona_micsupp_default = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_VOLTAGE,
+ REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_BYPASS,
.min_uV = 1700000,
.max_uV = 3300000,
},
@@ -131,6 +200,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
}
micsupp->arizona = arizona;
+ INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
/*
* Since the chip usually supplies itself we provide some
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 2f1341db38a0..f0ba8c4eefa9 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -303,7 +303,7 @@ static int as3711_regulator_probe(struct platform_device *pdev)
reg_data = pdata ? pdata->init_data[id] : NULL;
/* No need to register if there is no regulator data */
- if (!ri->desc.name)
+ if (!reg_data)
continue;
reg = &regs[id];
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0f65b246cc0c..da9782bd27d0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -200,8 +200,8 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
}
if (*min_uV > *max_uV) {
- dev_err(regulator->dev, "Restricting voltage, %u-%uuV\n",
- regulator->min_uV, regulator->max_uV);
+ rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
+ *min_uV, *max_uV);
return -EINVAL;
}
@@ -1885,9 +1885,15 @@ int regulator_can_change_voltage(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
if (rdev->constraints &&
- rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE &&
- (rdev->desc->n_voltages - rdev->desc->linear_min_sel) > 1)
- return 1;
+ (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
+ return 1;
+
+ if (rdev->desc->continuous_voltage_range &&
+ rdev->constraints->min_uV && rdev->constraints->max_uV &&
+ rdev->constraints->min_uV != rdev->constraints->max_uV)
+ return 1;
+ }
return 0;
}
@@ -2074,10 +2080,20 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
*/
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
{
+ int ret;
+
sel <<= ffs(rdev->desc->vsel_mask) - 1;
- return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+ rdev->desc->apply_bit,
+ rdev->desc->apply_bit);
+ return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
@@ -2223,8 +2239,11 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
best_val = rdev->desc->ops->list_voltage(rdev, ret);
if (min_uV <= best_val && max_uV >= best_val) {
selector = ret;
- ret = rdev->desc->ops->set_voltage_sel(rdev,
- ret);
+ if (old_selector == selector)
+ ret = 0;
+ else
+ ret = rdev->desc->ops->set_voltage_sel(
+ rdev, ret);
} else {
ret = -EINVAL;
}
@@ -2235,7 +2254,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
/* Call set_voltage_time_sel if successfully obtained old_selector */
if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
- rdev->desc->ops->set_voltage_time_sel) {
+ old_selector != selector && rdev->desc->ops->set_voltage_time_sel) {
delay = rdev->desc->ops->set_voltage_time_sel(rdev,
old_selector, selector);
@@ -2288,6 +2307,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
{
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
+ int old_min_uV, old_max_uV;
mutex_lock(&rdev->mutex);
@@ -2309,18 +2329,29 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
+
+ /* restore original values in case of error */
+ old_min_uV = regulator->min_uV;
+ old_max_uV = regulator->max_uV;
regulator->min_uV = min_uV;
regulator->max_uV = max_uV;
ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
if (ret < 0)
- goto out;
+ goto out2;
ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
-
+ if (ret < 0)
+ goto out2;
+
out:
mutex_unlock(&rdev->mutex);
return ret;
+out2:
+ regulator->min_uV = old_min_uV;
+ regulator->max_uV = old_max_uV;
+ mutex_unlock(&rdev->mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
@@ -3202,7 +3233,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
if (status < 0)
return status;
}
- if (ops->is_enabled) {
+ if (rdev->ena_gpio || ops->is_enabled) {
status = device_create_file(dev, &dev_attr_state);
if (status < 0)
return status;
@@ -3315,7 +3346,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
* @config: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator.
- * Returns 0 on success.
+ * Returns a valid pointer to struct regulator_dev on success
+ * or an ERR_PTR() on error.
*/
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index d0963090442d..96b569abb46c 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -70,7 +70,6 @@ struct da9052_regulator_info {
int step_uV;
int min_uV;
int max_uV;
- unsigned char activate_bit;
};
struct da9052_regulator {
@@ -210,36 +209,6 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
return sel;
}
-static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9052_regulator_info *info = regulator->info;
- int id = rdev_get_id(rdev);
- int ret;
-
- ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
- rdev->desc->vsel_mask, selector);
- if (ret < 0)
- return ret;
-
- /* Some LDOs and DCDCs are DVC controlled which requires enabling of
- * the activate bit to implment the changes on the output.
- */
- switch (id) {
- case DA9052_ID_BUCK1:
- case DA9052_ID_BUCK2:
- case DA9052_ID_BUCK3:
- case DA9052_ID_LDO2:
- case DA9052_ID_LDO3:
- ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
- info->activate_bit, info->activate_bit);
- break;
- }
-
- return ret;
-}
-
static struct regulator_ops da9052_dcdc_ops = {
.get_current_limit = da9052_dcdc_get_current_limit,
.set_current_limit = da9052_dcdc_set_current_limit,
@@ -247,7 +216,7 @@ static struct regulator_ops da9052_dcdc_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -257,7 +226,7 @@ static struct regulator_ops da9052_ldo_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -274,13 +243,14 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
+ .apply_reg = DA9052_SUPPLY_REG, \
+ .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
- .activate_bit = (abits),\
}
#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
@@ -294,13 +264,14 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
+ .apply_reg = DA9052_SUPPLY_REG, \
+ .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
- .activate_bit = (abits),\
}
static struct da9052_regulator_info da9052_regulator_info[] = {
@@ -395,9 +366,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[pdev->id];
} else {
#ifdef CONFIG_OF
- struct device_node *nproot = da9052->dev->of_node;
- struct device_node *np;
+ struct device_node *nproot, *np;
+ nproot = of_node_get(da9052->dev->of_node);
if (!nproot)
return -ENODEV;
@@ -414,6 +385,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
break;
}
}
+ of_node_put(nproot);
#endif
}
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index a4b9cb8c4317..30221099d09c 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -58,7 +58,6 @@ struct da9055_volt_reg {
int reg_b;
int sl_shift;
int v_mask;
- int v_shift;
};
struct da9055_mode_reg {
@@ -388,7 +387,6 @@ static struct regulator_ops da9055_ldo_ops = {
.reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
.sl_shift = 7,\
.v_mask = (1 << (vbits)) - 1,\
- .v_shift = (vbits),\
},\
}
@@ -417,7 +415,6 @@ static struct regulator_ops da9055_ldo_ops = {
.reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
.sl_shift = 7,\
.v_mask = (1 << (vbits)) - 1,\
- .v_shift = (vbits),\
},\
.mode = {\
.reg = DA9055_REG_BCORE_MODE,\
@@ -442,9 +439,9 @@ static struct da9055_regulator_info da9055_regulator_info[] = {
* GPIO can control regulator state and/or select the regulator register
* set A/B for voltage ramping.
*/
-static __devinit int da9055_gpio_init(struct da9055_regulator *regulator,
- struct regulator_config *config,
- struct da9055_pdata *pdata, int id)
+static int da9055_gpio_init(struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ struct da9055_pdata *pdata, int id)
{
struct da9055_regulator_info *info = regulator->info;
int ret = 0;
@@ -533,7 +530,7 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
return NULL;
}
-static int __devinit da9055_regulator_probe(struct platform_device *pdev)
+static int da9055_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
struct da9055_regulator *regulator;
@@ -605,7 +602,7 @@ err_regulator:
return ret;
}
-static int __devexit da9055_regulator_remove(struct platform_device *pdev)
+static int da9055_regulator_remove(struct platform_device *pdev)
{
struct da9055_regulator *regulator = platform_get_drvdata(pdev);
@@ -616,7 +613,7 @@ static int __devexit da9055_regulator_remove(struct platform_device *pdev)
static struct platform_driver da9055_regulator_driver = {
.probe = da9055_regulator_probe,
- .remove = __devexit_p(da9055_regulator_remove),
+ .remove = da9055_regulator_remove,
.driver = {
.name = "da9055-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index 261f3d2299bc..89bd2faaef8c 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "dbx500-prcmu.h"
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 48d5b7608b00..e5c03b534fae 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -246,7 +246,7 @@ static int reg_fixed_voltage_remove(struct platform_device *pdev)
}
#if defined(CONFIG_OF)
-static const struct of_device_id fixed_of_match[] __devinitconst = {
+static const struct of_device_id fixed_of_match[] = {
{ .compatible = "regulator-fixed", },
{},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 8ae288fc150b..9d39eb4aafa3 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -132,7 +132,7 @@ static struct regulator_ops gpio_regulator_voltage_ops = {
.list_voltage = gpio_regulator_list_voltage,
};
-struct gpio_regulator_config *
+static struct gpio_regulator_config *
of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
{
struct gpio_regulator_config *config;
@@ -163,10 +163,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
/* Fetch GPIOs. */
- for (i = 0; ; i++)
- if (of_get_named_gpio(np, "gpios", i) < 0)
- break;
- config->nr_gpios = i;
+ config->nr_gpios = of_gpio_count(np);
config->gpios = devm_kzalloc(dev,
sizeof(struct gpio) * config->nr_gpios,
@@ -365,7 +362,7 @@ static int gpio_regulator_remove(struct platform_device *pdev)
}
#if defined(CONFIG_OF)
-static const struct of_device_id regulator_gpio_of_match[] __devinitconst = {
+static const struct of_device_id regulator_gpio_of_match[] = {
{ .compatible = "regulator-gpio", },
{},
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 5f68ff11a298..9cb2c0f34515 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -73,8 +73,6 @@ static const unsigned int buck_voltage_map[] = {
};
#define BUCK_TARGET_VOL_MASK 0x3f
-#define BUCK_TARGET_VOL_MIN_IDX 0x01
-#define BUCK_TARGET_VOL_MAX_IDX 0x19
#define LP3971_BUCK_RAMP_REG(x) (buck_base_addr[x]+2)
@@ -140,7 +138,7 @@ static int lp3971_ldo_disable(struct regulator_dev *dev)
return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0);
}
-static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
+static int lp3971_ldo_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -149,7 +147,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
- return dev->desc->volt_table[val];
+ return val;
}
static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -168,7 +166,7 @@ static struct regulator_ops lp3971_ldo_ops = {
.is_enabled = lp3971_ldo_is_enabled,
.enable = lp3971_ldo_enable,
.disable = lp3971_ldo_disable,
- .get_voltage = lp3971_ldo_get_voltage,
+ .get_voltage_sel = lp3971_ldo_get_voltage_sel,
.set_voltage_sel = lp3971_ldo_set_voltage_sel,
};
@@ -201,24 +199,16 @@ static int lp3971_dcdc_disable(struct regulator_dev *dev)
return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0);
}
-static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
+static int lp3971_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
u16 reg;
- int val;
reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck));
reg &= BUCK_TARGET_VOL_MASK;
- if (reg <= BUCK_TARGET_VOL_MAX_IDX)
- val = buck_voltage_map[reg];
- else {
- val = 0;
- dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
- }
-
- return val;
+ return reg;
}
static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -249,7 +239,7 @@ static struct regulator_ops lp3971_dcdc_ops = {
.is_enabled = lp3971_dcdc_is_enabled,
.enable = lp3971_dcdc_enable,
.disable = lp3971_dcdc_disable,
- .get_voltage = lp3971_dcdc_get_voltage,
+ .get_voltage_sel = lp3971_dcdc_get_voltage_sel,
.set_voltage_sel = lp3971_dcdc_set_voltage_sel,
};
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 69c42c318b87..0baabcfb578a 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -165,8 +165,6 @@ static const int buck_base_addr[] = {
#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
#define LP3972_BUCK_VOL_MASK 0x1f
-#define LP3972_BUCK_VOL_MIN_IDX(x) ((x) ? 0x01 : 0x00)
-#define LP3972_BUCK_VOL_MAX_IDX(x) ((x) ? 0x19 : 0x1f)
static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count,
u16 *dest)
@@ -257,7 +255,7 @@ static int lp3972_ldo_disable(struct regulator_dev *dev)
mask, 0);
}
-static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
+static int lp3972_ldo_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -267,7 +265,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
- return dev->desc->volt_table[val];
+ return val;
}
static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -314,7 +312,7 @@ static struct regulator_ops lp3972_ldo_ops = {
.is_enabled = lp3972_ldo_is_enabled,
.enable = lp3972_ldo_enable,
.disable = lp3972_ldo_disable,
- .get_voltage = lp3972_ldo_get_voltage,
+ .get_voltage_sel = lp3972_ldo_get_voltage_sel,
.set_voltage_sel = lp3972_ldo_set_voltage_sel,
};
@@ -353,24 +351,16 @@ static int lp3972_dcdc_disable(struct regulator_dev *dev)
return val;
}
-static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
+static int lp3972_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
u16 reg;
- int val;
reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
reg &= LP3972_BUCK_VOL_MASK;
- if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
- val = dev->desc->volt_table[reg];
- else {
- val = 0;
- dev_warn(&dev->dev, "chip reported incorrect voltage value."
- " reg = %d\n", reg);
- }
- return val;
+ return reg;
}
static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -402,7 +392,7 @@ static struct regulator_ops lp3972_dcdc_ops = {
.is_enabled = lp3972_dcdc_is_enabled,
.enable = lp3972_dcdc_enable,
.disable = lp3972_dcdc_disable,
- .get_voltage = lp3972_dcdc_get_voltage,
+ .get_voltage_sel = lp3972_dcdc_get_voltage_sel,
.set_voltage_sel = lp3972_dcdc_set_voltage_sel,
};
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 9289ead715ca..8e3c7ae0047f 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -181,20 +181,6 @@ static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
return regmap_update_bits(lp->regmap, addr, mask, data);
}
-static int _rdev_to_offset(struct regulator_dev *rdev)
-{
- enum lp872x_regulator_id id = rdev_get_id(rdev);
-
- switch (id) {
- case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
- return id;
- case LP8725_ID_LDO1 ... LP8725_ID_BUCK2:
- return id - LP8725_ID_BASE;
- default:
- return -EINVAL;
- }
-}
-
static int lp872x_get_timestep_usec(struct lp872x *lp)
{
enum lp872x_id chip = lp->chipid;
@@ -234,28 +220,20 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
- enum lp872x_regulator_id regulator = rdev_get_id(rdev);
+ enum lp872x_regulator_id rid = rdev_get_id(rdev);
int time_step_us = lp872x_get_timestep_usec(lp);
- int ret, offset;
+ int ret;
u8 addr, val;
if (time_step_us < 0)
return -EINVAL;
- switch (regulator) {
- case LP8720_ID_LDO1 ... LP8720_ID_LDO5:
- case LP8725_ID_LDO1 ... LP8725_ID_LILO2:
- offset = _rdev_to_offset(rdev);
- if (offset < 0)
- return -EINVAL;
-
- addr = LP872X_LDO1_VOUT + offset;
- break;
- case LP8720_ID_BUCK:
- addr = LP8720_BUCK_VOUT1;
+ switch (rid) {
+ case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
+ addr = LP872X_LDO1_VOUT + rid;
break;
- case LP8725_ID_BUCK1:
- addr = LP8725_BUCK1_VOUT1;
+ case LP8725_ID_LDO1 ... LP8725_ID_BUCK1:
+ addr = LP872X_LDO1_VOUT + rid - LP8725_ID_BASE;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK2_VOUT1;
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
new file mode 100644
index 000000000000..f0f6ea05065b
--- /dev/null
+++ b/drivers/regulator/lp8755.c
@@ -0,0 +1,566 @@
+/*
+ * LP8755 High Performance Power Management Unit : System Interface Driver
+ * (based on rev. 0.26)
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/platform_data/lp8755.h>
+
+#define LP8755_REG_BUCK0 0x00
+#define LP8755_REG_BUCK1 0x03
+#define LP8755_REG_BUCK2 0x04
+#define LP8755_REG_BUCK3 0x01
+#define LP8755_REG_BUCK4 0x05
+#define LP8755_REG_BUCK5 0x02
+#define LP8755_REG_MAX 0xFF
+
+#define LP8755_BUCK_EN_M BIT(7)
+#define LP8755_BUCK_LINEAR_OUT_MAX 0x76
+#define LP8755_BUCK_VOUT_M 0x7F
+
+struct lp8755_mphase {
+ int nreg;
+ int buck_num[LP8755_BUCK_MAX];
+};
+
+struct lp8755_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ struct lp8755_platform_data *pdata;
+
+ int irq;
+ unsigned int irqmask;
+
+ int mphase;
+ struct regulator_dev *rdev[LP8755_BUCK_MAX];
+};
+
+/**
+ *lp8755_read : read a single register value from lp8755.
+ *@pchip : device to read from
+ *@reg : register to read from
+ *@val : pointer to store read value
+ */
+static int lp8755_read(struct lp8755_chip *pchip, unsigned int reg,
+ unsigned int *val)
+{
+ return regmap_read(pchip->regmap, reg, val);
+}
+
+/**
+ *lp8755_write : write a single register value to lp8755.
+ *@pchip : device to write to
+ *@reg : register to write to
+ *@val : value to be written
+ */
+static int lp8755_write(struct lp8755_chip *pchip, unsigned int reg,
+ unsigned int val)
+{
+ return regmap_write(pchip->regmap, reg, val);
+}
+
+/**
+ *lp8755_update_bits : set the values of bit fields in lp8755 register.
+ *@pchip : device to read from
+ *@reg : register to update
+ *@mask : bitmask to be changed
+ *@val : value for bitmask
+ */
+static int lp8755_update_bits(struct lp8755_chip *pchip, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits(pchip->regmap, reg, mask, val);
+}
+
+static int lp8755_buck_enable_time(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int regval;
+ enum lp8755_bucks id = rdev_get_id(rdev);
+ struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+ ret = lp8755_read(pchip, 0x12 + id, &regval);
+ if (ret < 0) {
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return ret;
+ }
+ return (regval & 0xff) * 100;
+}
+
+static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ int ret;
+ unsigned int regbval = 0x0;
+ enum lp8755_bucks id = rdev_get_id(rdev);
+ struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* forced pwm mode */
+ regbval = (0x01 << id);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ /* enable automatic pwm/pfm mode */
+ ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x00);
+ if (ret < 0)
+ goto err_i2c;
+ break;
+ case REGULATOR_MODE_IDLE:
+ /* enable automatic pwm/pfm/lppfm mode */
+ ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x20);
+ if (ret < 0)
+ goto err_i2c;
+
+ ret = lp8755_update_bits(pchip, 0x10, 0x01, 0x01);
+ if (ret < 0)
+ goto err_i2c;
+ break;
+ default:
+ dev_err(pchip->dev, "Not supported buck mode %s\n", __func__);
+ /* forced pwm mode */
+ regbval = (0x01 << id);
+ }
+
+ ret = lp8755_update_bits(pchip, 0x06, 0x01 << id, regbval);
+ if (ret < 0)
+ goto err_i2c;
+ return ret;
+err_i2c:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return ret;
+}
+
+static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int regval;
+ enum lp8755_bucks id = rdev_get_id(rdev);
+ struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+ ret = lp8755_read(pchip, 0x06, &regval);
+ if (ret < 0)
+ goto err_i2c;
+
+ /* mode fast means forced pwm mode */
+ if (regval & (0x01 << id))
+ return REGULATOR_MODE_FAST;
+
+ ret = lp8755_read(pchip, 0x08 + id, &regval);
+ if (ret < 0)
+ goto err_i2c;
+
+ /* mode idle means automatic pwm/pfm/lppfm mode */
+ if (regval & 0x20)
+ return REGULATOR_MODE_IDLE;
+
+ /* mode normal means automatic pwm/pfm mode */
+ return REGULATOR_MODE_NORMAL;
+
+err_i2c:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return 0;
+}
+
+static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
+{
+ int ret;
+ unsigned int regval = 0x00;
+ enum lp8755_bucks id = rdev_get_id(rdev);
+ struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+ /* uV/us */
+ switch (ramp) {
+ case 0 ... 230:
+ regval = 0x07;
+ break;
+ case 231 ... 470:
+ regval = 0x06;
+ break;
+ case 471 ... 940:
+ regval = 0x05;
+ break;
+ case 941 ... 1900:
+ regval = 0x04;
+ break;
+ case 1901 ... 3800:
+ regval = 0x03;
+ break;
+ case 3801 ... 7500:
+ regval = 0x02;
+ break;
+ case 7501 ... 15000:
+ regval = 0x01;
+ break;
+ case 15001 ... 30000:
+ regval = 0x00;
+ break;
+ default:
+ dev_err(pchip->dev,
+ "Not supported ramp value %d %s\n", ramp, __func__);
+ return -EINVAL;
+ }
+
+ ret = lp8755_update_bits(pchip, 0x07 + id, 0x07, regval);
+ if (ret < 0)
+ goto err_i2c;
+ return ret;
+err_i2c:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return ret;
+}
+
+static struct regulator_ops lp8755_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp8755_buck_enable_time,
+ .set_mode = lp8755_buck_set_mode,
+ .get_mode = lp8755_buck_get_mode,
+ .set_ramp_delay = lp8755_buck_set_ramp,
+};
+
+#define lp8755_rail(_id) "lp8755_buck"#_id
+#define lp8755_buck_init(_id)\
+{\
+ .constraints = {\
+ .name = lp8755_rail(_id),\
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,\
+ .min_uV = 500000,\
+ .max_uV = 1675000,\
+ },\
+}
+
+static struct regulator_init_data lp8755_reg_default[LP8755_BUCK_MAX] = {
+ [LP8755_BUCK0] = lp8755_buck_init(0),
+ [LP8755_BUCK1] = lp8755_buck_init(1),
+ [LP8755_BUCK2] = lp8755_buck_init(2),
+ [LP8755_BUCK3] = lp8755_buck_init(3),
+ [LP8755_BUCK4] = lp8755_buck_init(4),
+ [LP8755_BUCK5] = lp8755_buck_init(5),
+};
+
+static const struct lp8755_mphase mphase_buck[MPHASE_CONF_MAX] = {
+ { 3, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK5 } },
+ { 6, { LP8755_BUCK0, LP8755_BUCK1, LP8755_BUCK2, LP8755_BUCK3,
+ LP8755_BUCK4, LP8755_BUCK5 } },
+ { 5, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK4,
+ LP8755_BUCK5} },
+ { 4, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK4, LP8755_BUCK5} },
+ { 3, { LP8755_BUCK0, LP8755_BUCK4, LP8755_BUCK5} },
+ { 2, { LP8755_BUCK0, LP8755_BUCK5} },
+ { 1, { LP8755_BUCK0} },
+ { 2, { LP8755_BUCK0, LP8755_BUCK3} },
+ { 4, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK5} },
+};
+
+static int lp8755_init_data(struct lp8755_chip *pchip)
+{
+ unsigned int regval;
+ int ret, icnt, buck_num;
+ struct lp8755_platform_data *pdata = pchip->pdata;
+
+ /* read back muti-phase configuration */
+ ret = lp8755_read(pchip, 0x3D, &regval);
+ if (ret < 0)
+ goto out_i2c_error;
+ pchip->mphase = regval & 0x0F;
+
+ /* set default data based on multi-phase config */
+ for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
+ buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
+ pdata->buck_data[buck_num] = &lp8755_reg_default[buck_num];
+ }
+ return ret;
+
+out_i2c_error:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return ret;
+}
+
+#define lp8755_buck_desc(_id)\
+{\
+ .name = lp8755_rail(_id),\
+ .id = LP8755_BUCK##_id,\
+ .ops = &lp8755_buck_ops,\
+ .n_voltages = LP8755_BUCK_LINEAR_OUT_MAX+1,\
+ .uV_step = 10000,\
+ .min_uV = 500000,\
+ .type = REGULATOR_VOLTAGE,\
+ .owner = THIS_MODULE,\
+ .enable_reg = LP8755_REG_BUCK##_id,\
+ .enable_mask = LP8755_BUCK_EN_M,\
+ .vsel_reg = LP8755_REG_BUCK##_id,\
+ .vsel_mask = LP8755_BUCK_VOUT_M,\
+}
+
+static struct regulator_desc lp8755_regulators[] = {
+ lp8755_buck_desc(0),
+ lp8755_buck_desc(1),
+ lp8755_buck_desc(2),
+ lp8755_buck_desc(3),
+ lp8755_buck_desc(4),
+ lp8755_buck_desc(5),
+};
+
+static int lp8755_regulator_init(struct lp8755_chip *pchip)
+{
+ int ret, icnt, buck_num;
+ struct lp8755_platform_data *pdata = pchip->pdata;
+ struct regulator_config rconfig = { };
+
+ rconfig.regmap = pchip->regmap;
+ rconfig.dev = pchip->dev;
+ rconfig.driver_data = pchip;
+
+ for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
+ buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
+ rconfig.init_data = pdata->buck_data[buck_num];
+ rconfig.of_node = pchip->dev->of_node;
+ pchip->rdev[buck_num] =
+ regulator_register(&lp8755_regulators[buck_num], &rconfig);
+ if (IS_ERR(pchip->rdev[buck_num])) {
+ ret = PTR_ERR(pchip->rdev[buck_num]);
+ pchip->rdev[buck_num] = NULL;
+ dev_err(pchip->dev, "regulator init failed: buck %d\n",
+ buck_num);
+ goto err_buck;
+ }
+ }
+
+ return 0;
+
+err_buck:
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ regulator_unregister(pchip->rdev[icnt]);
+ return ret;
+}
+
+static irqreturn_t lp8755_irq_handler(int irq, void *data)
+{
+ int ret, icnt;
+ unsigned int flag0, flag1;
+ struct lp8755_chip *pchip = data;
+
+ /* read flag0 register */
+ ret = lp8755_read(pchip, 0x0D, &flag0);
+ if (ret < 0)
+ goto err_i2c;
+ /* clear flag register to pull up int. pin */
+ ret = lp8755_write(pchip, 0x0D, 0x00);
+ if (ret < 0)
+ goto err_i2c;
+
+ /* sent power fault detection event to specific regulator */
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ if ((flag0 & (0x4 << icnt))
+ && (pchip->irqmask & (0x04 << icnt))
+ && (pchip->rdev[icnt] != NULL))
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_PWR_FAULT,
+ NULL);
+
+ /* read flag1 register */
+ ret = lp8755_read(pchip, 0x0E, &flag1);
+ if (ret < 0)
+ goto err_i2c;
+ /* clear flag register to pull up int. pin */
+ ret = lp8755_write(pchip, 0x0E, 0x00);
+ if (ret < 0)
+ goto err_i2c;
+
+ /* send OCP event to all regualtor devices */
+ if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ if (pchip->rdev[icnt] != NULL)
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_OCP,
+ NULL);
+
+ /* send OVP event to all regualtor devices */
+ if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ if (pchip->rdev[icnt] != NULL)
+ regulator_notifier_call_chain(pchip->rdev[icnt],
+ LP8755_EVENT_OVP,
+ NULL);
+ return IRQ_HANDLED;
+
+err_i2c:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return IRQ_NONE;
+}
+
+static int lp8755_int_config(struct lp8755_chip *pchip)
+{
+ int ret;
+ unsigned int regval;
+
+ if (pchip->irq == 0) {
+ dev_warn(pchip->dev, "not use interrupt : %s\n", __func__);
+ return 0;
+ }
+
+ ret = lp8755_read(pchip, 0x0F, &regval);
+ if (ret < 0)
+ goto err_i2c;
+ pchip->irqmask = regval;
+ ret = request_threaded_irq(pchip->irq, NULL, lp8755_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "lp8755-irq", pchip);
+ if (ret)
+ return ret;
+
+ return ret;
+
+err_i2c:
+ dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+ return ret;
+}
+
+static const struct regmap_config lp8755_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LP8755_REG_MAX,
+};
+
+static int lp8755_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret, icnt;
+ struct lp8755_chip *pchip;
+ struct lp8755_platform_data *pdata = client->dev.platform_data;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c functionality check fail.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pchip = devm_kzalloc(&client->dev,
+ sizeof(struct lp8755_chip), GFP_KERNEL);
+ if (!pchip)
+ return -ENOMEM;
+
+ pchip->dev = &client->dev;
+ pchip->regmap = devm_regmap_init_i2c(client, &lp8755_regmap);
+ if (IS_ERR(pchip->regmap)) {
+ ret = PTR_ERR(pchip->regmap);
+ dev_err(&client->dev, "fail to allocate regmap %d\n", ret);
+ return ret;
+ }
+ i2c_set_clientdata(client, pchip);
+
+ if (pdata != NULL) {
+ pchip->pdata = pdata;
+ pchip->mphase = pdata->mphase;
+ } else {
+ pchip->pdata = devm_kzalloc(pchip->dev,
+ sizeof(struct lp8755_platform_data),
+ GFP_KERNEL);
+ if (!pchip->pdata)
+ return -ENOMEM;
+ ret = lp8755_init_data(pchip);
+ if (ret < 0) {
+ dev_err(&client->dev, "fail to initialize chip\n");
+ return ret;
+ }
+ }
+
+ ret = lp8755_regulator_init(pchip);
+ if (ret < 0) {
+ dev_err(&client->dev, "fail to initialize regulators\n");
+ goto err_regulator;
+ }
+
+ pchip->irq = client->irq;
+ ret = lp8755_int_config(pchip);
+ if (ret < 0) {
+ dev_err(&client->dev, "fail to irq config\n");
+ goto err_irq;
+ }
+
+ return ret;
+
+err_irq:
+ for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
+ regulator_unregister(pchip->rdev[icnt]);
+
+err_regulator:
+ /* output disable */
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ lp8755_write(pchip, icnt, 0x00);
+
+ return ret;
+}
+
+static int lp8755_remove(struct i2c_client *client)
+{
+ int icnt;
+ struct lp8755_chip *pchip = i2c_get_clientdata(client);
+
+ for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
+ regulator_unregister(pchip->rdev[icnt]);
+
+ for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+ lp8755_write(pchip, icnt, 0x00);
+
+ if (pchip->irq != 0)
+ free_irq(pchip->irq, pchip);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp8755_id[] = {
+ {LP8755_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp8755_id);
+
+static struct i2c_driver lp8755_i2c_driver = {
+ .driver = {
+ .name = LP8755_NAME,
+ },
+ .probe = lp8755_probe,
+ .remove = lp8755_remove,
+ .id_table = lp8755_id,
+};
+
+static int __init lp8755_init(void)
+{
+ return i2c_add_driver(&lp8755_i2c_driver);
+}
+
+subsys_initcall(lp8755_init);
+
+static void __exit lp8755_exit(void)
+{
+ i2c_del_driver(&lp8755_i2c_driver);
+}
+
+module_exit(lp8755_exit);
+
+MODULE_DESCRIPTION("Texas Instruments lp8755 driver");
+MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index aef3f2b0c5ea..97891a7ea7b2 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -103,16 +103,6 @@ static const int lp8788_buck_vtbl[] = {
1950000, 2000000,
};
-static const u8 buck1_vout_addr[] = {
- LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1,
- LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3,
-};
-
-static const u8 buck2_vout_addr[] = {
- LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1,
- LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3,
-};
-
static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
{
struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs;
@@ -235,7 +225,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
}
- addr = buck1_vout_addr[idx];
+ addr = LP8788_BUCK1_VOUT0 + idx;
break;
case BUCK2:
if (mode == EXTPIN) {
@@ -258,7 +248,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S;
}
- addr = buck2_vout_addr[idx];
+ addr = LP8788_BUCK2_VOUT0 + idx;
break;
default:
goto err;
@@ -429,7 +419,8 @@ static struct regulator_desc lp8788_buck_desc[] = {
},
};
-static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
+static int lp8788_dvs_gpio_request(struct platform_device *pdev,
+ struct lp8788_buck *buck,
enum lp8788_buck_id id)
{
struct lp8788_platform_data *pdata = buck->lp->pdata;
@@ -440,7 +431,7 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
switch (id) {
case BUCK1:
gpio = pdata->buck1_dvs->gpio;
- ret = devm_gpio_request_one(buck->lp->dev, gpio, DVS_LOW,
+ ret = devm_gpio_request_one(&pdev->dev, gpio, DVS_LOW,
b1_name);
if (ret)
return ret;
@@ -448,9 +439,9 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
buck->dvs = pdata->buck1_dvs;
break;
case BUCK2:
- for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) {
+ for (i = 0; i < LP8788_NUM_BUCK2_DVS; i++) {
gpio = pdata->buck2_dvs->gpio[i];
- ret = devm_gpio_request_one(buck->lp->dev, gpio,
+ ret = devm_gpio_request_one(&pdev->dev, gpio,
DVS_LOW, b2_name[i]);
if (ret)
return ret;
@@ -464,7 +455,8 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
return 0;
}
-static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+static int lp8788_init_dvs(struct platform_device *pdev,
+ struct lp8788_buck *buck, enum lp8788_buck_id id)
{
struct lp8788_platform_data *pdata = buck->lp->pdata;
u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M };
@@ -472,7 +464,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
u8 default_dvs_mode[] = { LP8788_BUCK1_DVS_I2C, LP8788_BUCK2_DVS_I2C };
/* no dvs for buck3, 4 */
- if (id == BUCK3 || id == BUCK4)
+ if (id > BUCK2)
return 0;
/* no dvs platform data, then dvs will be selected by I2C registers */
@@ -483,7 +475,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
(id == BUCK2 && !pdata->buck2_dvs))
goto set_default_dvs_mode;
- if (lp8788_dvs_gpio_request(buck, id))
+ if (lp8788_dvs_gpio_request(pdev, buck, id))
goto set_default_dvs_mode;
return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id],
@@ -503,17 +495,20 @@ static int lp8788_buck_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
int ret;
- buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
+ if (id >= LP8788_NUM_BUCKS)
+ return -EINVAL;
+
+ buck = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
if (!buck)
return -ENOMEM;
buck->lp = lp;
- ret = lp8788_init_dvs(buck, id);
+ ret = lp8788_init_dvs(pdev, buck, id);
if (ret)
return ret;
- cfg.dev = lp->dev;
+ cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL;
cfg.driver_data = buck;
cfg.regmap = lp->regmap;
@@ -521,7 +516,7 @@ static int lp8788_buck_probe(struct platform_device *pdev)
rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
- dev_err(lp->dev, "BUCK%d regulator register err = %d\n",
+ dev_err(&pdev->dev, "BUCK%d regulator register err = %d\n",
id + 1, ret);
return ret;
}
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 3792741708ce..cd5a14ad9263 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -88,11 +88,6 @@
#define ENABLE GPIOF_OUT_INIT_HIGH
#define DISABLE GPIOF_OUT_INIT_LOW
-enum lp8788_enable_mode {
- REGISTER,
- EXTPIN,
-};
-
enum lp8788_ldo_id {
DLDO1,
DLDO2,
@@ -189,114 +184,38 @@ static enum lp8788_ldo_id lp8788_aldo_id[] = {
ALDO10,
};
-/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7
- : can be enabled either by external pin or by i2c register */
-static enum lp8788_enable_mode
-lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id)
-{
- int ret;
- u8 val, mask;
-
- ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val);
- if (ret)
- return ret;
-
- switch (id) {
- case DLDO7:
- mask = LP8788_EN_SEL_DLDO7_M;
- break;
- case DLDO9:
- case DLDO11:
- mask = LP8788_EN_SEL_DLDO911_M;
- break;
- case ALDO1:
- mask = LP8788_EN_SEL_ALDO1_M;
- break;
- case ALDO2 ... ALDO4:
- mask = LP8788_EN_SEL_ALDO234_M;
- break;
- case ALDO5:
- mask = LP8788_EN_SEL_ALDO5_M;
- break;
- case ALDO7:
- mask = LP8788_EN_SEL_ALDO7_M;
- break;
- default:
- return REGISTER;
- }
-
- return val & mask ? EXTPIN : REGISTER;
-}
-
-static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate)
-{
- struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
-
- if (!pin)
- return -EINVAL;
-
- if (gpio_is_valid(pin->gpio))
- gpio_set_value(pin->gpio, pinstate);
-
- return 0;
-}
-
-static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo)
-{
- struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
-
- if (!pin)
- return -EINVAL;
-
- return gpio_get_value(pin->gpio) ? 1 : 0;
-}
-
static int lp8788_ldo_enable(struct regulator_dev *rdev)
{
struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
- enum lp8788_ldo_id id = rdev_get_id(rdev);
- enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
- switch (mode) {
- case EXTPIN:
- return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE);
- case REGISTER:
+ if (ldo->en_pin) {
+ gpio_set_value(ldo->en_pin->gpio, ENABLE);
+ return 0;
+ } else {
return regulator_enable_regmap(rdev);
- default:
- return -EINVAL;
}
}
static int lp8788_ldo_disable(struct regulator_dev *rdev)
{
struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
- enum lp8788_ldo_id id = rdev_get_id(rdev);
- enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
- switch (mode) {
- case EXTPIN:
- return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE);
- case REGISTER:
+ if (ldo->en_pin) {
+ gpio_set_value(ldo->en_pin->gpio, DISABLE);
+ return 0;
+ } else {
return regulator_disable_regmap(rdev);
- default:
- return -EINVAL;
}
}
static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
{
struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
- enum lp8788_ldo_id id = rdev_get_id(rdev);
- enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
- switch (mode) {
- case EXTPIN:
- return lp8788_ldo_is_enabled_by_extern_pin(ldo);
- case REGISTER:
+ if (ldo->en_pin)
+ return gpio_get_value(ldo->en_pin->gpio) ? 1 : 0;
+ else
return regulator_is_enabled_regmap(rdev);
- default:
- return -EINVAL;
- }
}
static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
@@ -616,10 +535,11 @@ static struct regulator_desc lp8788_aldo_desc[] = {
},
};
-static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
+static int lp8788_gpio_request_ldo_en(struct platform_device *pdev,
+ struct lp8788_ldo *ldo,
enum lp8788_ext_ldo_en_id id)
{
- struct device *dev = ldo->lp->dev;
+ struct device *dev = &pdev->dev;
struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
int ret, gpio, pinstate;
char *name[] = {
@@ -647,7 +567,8 @@ static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
return ret;
}
-static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
+static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
+ struct lp8788_ldo *ldo,
enum lp8788_ldo_id id)
{
int ret;
@@ -693,9 +614,11 @@ static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
ldo->en_pin = pdata->ldo_pin[enable_id];
- ret = lp8788_gpio_request_ldo_en(ldo, enable_id);
- if (ret)
+ ret = lp8788_gpio_request_ldo_en(pdev, ldo, enable_id);
+ if (ret) {
+ ldo->en_pin = NULL;
goto set_default_ldo_enable_mode;
+ }
return ret;
@@ -712,16 +635,16 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
int ret;
- ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->lp = lp;
- ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]);
+ ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_dldo_id[id]);
if (ret)
return ret;
- cfg.dev = lp->dev;
+ cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
@@ -729,7 +652,7 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
- dev_err(lp->dev, "DLDO%d regulator register err = %d\n",
+ dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
id + 1, ret);
return ret;
}
@@ -768,16 +691,16 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
int ret;
- ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->lp = lp;
- ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]);
+ ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_aldo_id[id]);
if (ret)
return ret;
- cfg.dev = lp->dev;
+ cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
@@ -785,7 +708,7 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
- dev_err(lp->dev, "ALDO%d regulator register err = %d\n",
+ dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
id + 1, ret);
return ret;
}
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index b85040caaea3..e4586ee8858d 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -75,13 +75,14 @@ static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val;
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
- if (rdev->desc->id == MAX77686_BUCK1)
+ if (id == MAX77686_BUCK1)
val = 0x1;
else
val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
- max77686->opmode[rdev->desc->id] = val;
+ max77686->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
val);
@@ -93,9 +94,10 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
{
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
unsigned int val;
+ int id = rdev_get_id(rdev);
/* BUCK[5-9] doesn't support this feature */
- if (rdev->desc->id >= MAX77686_BUCK5)
+ if (id >= MAX77686_BUCK5)
return 0;
switch (mode) {
@@ -111,7 +113,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
return -EINVAL;
}
- max77686->opmode[rdev->desc->id] = val;
+ max77686->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
val);
@@ -140,7 +142,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
return -EINVAL;
}
- max77686->opmode[rdev->desc->id] = val;
+ max77686->opmode[rdev_get_id(rdev)] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
val);
@@ -152,7 +154,7 @@ static int max77686_enable(struct regulator_dev *rdev)
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
- max77686->opmode[rdev->desc->id]);
+ max77686->opmode[rdev_get_id(rdev)]);
}
static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
@@ -379,9 +381,10 @@ static struct regulator_desc regulators[] = {
};
#ifdef CONFIG_OF
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max77686_platform_data *pdata)
{
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *pmic_np, *regulators_np;
struct max77686_regulator_data *rdata;
struct of_regulator_match rmatch;
@@ -390,15 +393,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
pmic_np = iodev->dev->of_node;
regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
if (!regulators_np) {
- dev_err(iodev->dev, "could not find regulators sub-node\n");
+ dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
}
pdata->num_regulators = ARRAY_SIZE(regulators);
- rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+ rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
- dev_err(iodev->dev,
+ dev_err(&pdev->dev,
"could not allocate memory for regulator data\n");
return -ENOMEM;
}
@@ -407,7 +410,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
rmatch.name = regulators[i].name;
rmatch.init_data = NULL;
rmatch.of_node = NULL;
- of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
+ of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
rdata[i].initdata = rmatch.init_data;
rdata[i].of_node = rmatch.of_node;
}
@@ -417,7 +420,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
return 0;
}
#else
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max77686_platform_data *pdata)
{
return 0;
@@ -440,7 +443,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
}
if (iodev->dev->of_node) {
- ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
+ ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
if (ret)
return ret;
}
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index d1a77512d83e..4568c15fa78d 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -224,11 +224,11 @@ static struct of_regulator_match max8907_matches[] = {
static int max8907_regulator_parse_dt(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
+ struct device_node *np, *regulators;
int ret;
- if (!pdev->dev.parent->of_node)
+ np = of_node_get(pdev->dev.parent->of_node);
+ if (!np)
return 0;
regulators = of_find_node_by_name(np, "regulators");
@@ -237,9 +237,9 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
return -EINVAL;
}
- ret = of_regulator_match(pdev->dev.parent, regulators,
- max8907_matches,
+ ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,
ARRAY_SIZE(max8907_matches));
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 446a85445553..0d5f64a805a0 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -252,7 +252,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
{
struct device_node *nproot, *np;
int rcount;
- nproot = pdev->dev.parent->of_node;
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
np = of_find_node_by_name(nproot, "regulators");
@@ -263,6 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
rcount = of_regulator_match(&pdev->dev, np,
&max8925_regulator_matches[ridx], 1);
+ of_node_put(np);
if (rcount < 0)
return -ENODEV;
config->init_data = max8925_regulator_matches[ridx].init_data;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 3ee26387b121..9a8ea9163005 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -248,8 +248,8 @@ static struct regulator_ops max8973_dcdc_ops = {
.get_mode = max8973_dcdc_get_mode,
};
-static int __devinit max8973_init_dcdc(struct max8973_chip *max,
- struct max8973_regulator_platform_data *pdata)
+static int max8973_init_dcdc(struct max8973_chip *max,
+ struct max8973_regulator_platform_data *pdata)
{
int ret;
uint8_t control1 = 0;
@@ -359,8 +359,8 @@ static const struct regmap_config max8973_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit max8973_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max8973_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct max8973_regulator_platform_data *pdata;
struct regulator_config config = { };
@@ -463,7 +463,7 @@ static int __devinit max8973_probe(struct i2c_client *client,
return 0;
}
-static int __devexit max8973_remove(struct i2c_client *client)
+static int max8973_remove(struct i2c_client *client)
{
struct max8973_chip *max = i2c_get_clientdata(client);
@@ -484,7 +484,7 @@ static struct i2c_driver max8973_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max8973_probe,
- .remove = __devexit_p(max8973_remove),
+ .remove = max8973_remove,
.id_table = max8973_id,
};
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index df0eafb0dc7e..0ac7a87519b4 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -54,6 +54,13 @@ struct max8997_data {
u8 saved_states[MAX8997_REG_MAX];
};
+static const unsigned int safeoutvolt[] = {
+ 4850000,
+ 4900000,
+ 4950000,
+ 3300000,
+};
+
static inline void max8997_set_gpio(struct max8997_data *max8997)
{
int set3 = (max8997->buck125_gpioindex) & 0x1;
@@ -71,26 +78,26 @@ struct voltage_map_desc {
int step;
};
-/* Voltage maps in mV */
+/* Voltage maps in uV */
static const struct voltage_map_desc ldo_voltage_map_desc = {
- .min = 800, .max = 3950, .step = 50,
+ .min = 800000, .max = 3950000, .step = 50000,
}; /* LDO1 ~ 18, 21 all */
static const struct voltage_map_desc buck1245_voltage_map_desc = {
- .min = 650, .max = 2225, .step = 25,
+ .min = 650000, .max = 2225000, .step = 25000,
}; /* Buck1, 2, 4, 5 */
static const struct voltage_map_desc buck37_voltage_map_desc = {
- .min = 750, .max = 3900, .step = 50,
+ .min = 750000, .max = 3900000, .step = 50000,
}; /* Buck3, 7 */
-/* current map in mA */
+/* current map in uA */
static const struct voltage_map_desc charger_current_map_desc = {
- .min = 200, .max = 950, .step = 50,
+ .min = 200000, .max = 950000, .step = 50000,
};
static const struct voltage_map_desc topoff_current_map_desc = {
- .min = 50, .max = 200, .step = 10,
+ .min = 50000, .max = 200000, .step = 10000,
};
static const struct voltage_map_desc *reg_voltage_map[] = {
@@ -130,29 +137,6 @@ static const struct voltage_map_desc *reg_voltage_map[] = {
[MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
};
-static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
- unsigned int selector)
-{
- int rid = rdev_get_id(rdev);
-
- if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
- switch (selector) {
- case 0:
- return 4850000;
- case 1:
- return 4900000;
- case 2:
- return 4950000;
- case 3:
- return 3300000;
- default:
- return -EINVAL;
- }
- }
-
- return -EINVAL;
-}
-
static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
unsigned int selector)
{
@@ -194,7 +178,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
if (val > desc->max)
return -EINVAL;
- return val * 1000;
+ return val;
}
static int max8997_get_enable_register(struct regulator_dev *rdev,
@@ -485,7 +469,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
const struct voltage_map_desc *desc;
int rid = rdev_get_id(rdev);
int i, reg, shift, mask, ret;
@@ -509,7 +492,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
desc = reg_voltage_map[rid];
- i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
if (i < 0)
return i;
@@ -523,7 +506,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
return ret;
}
-static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
+static int max8997_set_voltage_buck_time_sel(struct regulator_dev *rdev,
unsigned int old_selector,
unsigned int new_selector)
{
@@ -557,7 +540,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
case MAX8997_BUCK4:
case MAX8997_BUCK5:
return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
- max8997->ramp_delay);
+ max8997->ramp_delay * 1000);
}
return 0;
@@ -656,7 +639,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
bool gpio_dvs_mode = false;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
return -EINVAL;
@@ -681,7 +663,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
selector);
desc = reg_voltage_map[rid];
- new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
if (new_val < 0)
return new_val;
@@ -722,49 +704,23 @@ out:
return 0;
}
-static const int safeoutvolt[] = {
- 3300000,
- 4850000,
- 4900000,
- 4950000,
-};
-
/* For SAFEOUT1 and SAFEOUT2 */
-static int max8997_set_voltage_safeout(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int max8997_set_voltage_safeout_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int rid = rdev_get_id(rdev);
int reg, shift = 0, mask, ret;
- int i = 0;
- u8 val;
if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
- if (min_uV <= safeoutvolt[i] &&
- max_uV >= safeoutvolt[i])
- break;
- }
-
- if (i >= ARRAY_SIZE(safeoutvolt))
- return -EINVAL;
-
- if (i == 0)
- val = 0x3;
- else
- val = i - 1;
-
ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
- ret = max8997_update_reg(i2c, reg, val << shift, mask << shift);
- *selector = val;
-
- return ret;
+ return max8997_update_reg(i2c, reg, selector << shift, mask << shift);
}
static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
@@ -801,7 +757,6 @@ static struct regulator_ops max8997_ldo_ops = {
.disable = max8997_reg_disable,
.get_voltage_sel = max8997_get_voltage_sel,
.set_voltage = max8997_set_voltage_ldobuck,
- .set_voltage_time_sel = max8997_set_voltage_ldobuck_time_sel,
.set_suspend_disable = max8997_reg_disable_suspend,
};
@@ -812,7 +767,7 @@ static struct regulator_ops max8997_buck_ops = {
.disable = max8997_reg_disable,
.get_voltage_sel = max8997_get_voltage_sel,
.set_voltage = max8997_set_voltage_buck,
- .set_voltage_time_sel = max8997_set_voltage_ldobuck_time_sel,
+ .set_voltage_time_sel = max8997_set_voltage_buck_time_sel,
.set_suspend_disable = max8997_reg_disable_suspend,
};
@@ -825,12 +780,12 @@ static struct regulator_ops max8997_fixedvolt_ops = {
};
static struct regulator_ops max8997_safeout_ops = {
- .list_voltage = max8997_list_voltage_safeout,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.get_voltage_sel = max8997_get_voltage_sel,
- .set_voltage = max8997_set_voltage_safeout,
+ .set_voltage_sel = max8997_set_voltage_safeout_sel,
.set_suspend_disable = max8997_reg_disable_suspend,
};
@@ -936,7 +891,7 @@ static struct regulator_desc regulators[] = {
};
#ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
struct max8997_platform_data *pdata,
struct device_node *pmic_np)
{
@@ -946,7 +901,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
gpio = of_get_named_gpio(pmic_np,
"max8997,pmic-buck125-dvs-gpios", i);
if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+ dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
return -EINVAL;
}
pdata->buck125_gpios[i] = gpio;
@@ -954,35 +909,34 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
return 0;
}
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max8997_platform_data *pdata)
{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *pmic_np, *regulators_np, *reg_np;
struct max8997_regulator_data *rdata;
unsigned int i, dvs_voltage_nr = 1, ret;
- pmic_np = iodev->dev->of_node;
+ pmic_np = of_node_get(iodev->dev->of_node);
if (!pmic_np) {
- dev_err(iodev->dev, "could not find pmic sub-node\n");
+ dev_err(&pdev->dev, "could not find pmic sub-node\n");
return -ENODEV;
}
regulators_np = of_find_node_by_name(pmic_np, "regulators");
if (!regulators_np) {
- dev_err(iodev->dev, "could not find regulators sub-node\n");
+ dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
}
/* count the number of regulators to be supported in pmic */
- pdata->num_regulators = 0;
- for_each_child_of_node(regulators_np, reg_np)
- pdata->num_regulators++;
+ pdata->num_regulators = of_get_child_count(regulators_np);
- rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+ rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
- dev_err(iodev->dev, "could not allocate memory for "
- "regulator data\n");
+ of_node_put(regulators_np);
+ dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
return -ENOMEM;
}
@@ -993,17 +947,18 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
break;
if (i == ARRAY_SIZE(regulators)) {
- dev_warn(iodev->dev, "don't know how to configure "
- "regulator %s\n", reg_np->name);
+ dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
+ reg_np->name);
continue;
}
rdata->id = i;
- rdata->initdata = of_get_regulator_init_data(
- iodev->dev, reg_np);
+ rdata->initdata = of_get_regulator_init_data(&pdev->dev,
+ reg_np);
rdata->reg_node = reg_np;
rdata++;
}
+ of_node_put(regulators_np);
if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
pdata->buck1_gpiodvs = true;
@@ -1016,7 +971,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
- ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+ ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
if (ret)
return -EINVAL;
@@ -1027,8 +982,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
} else {
if (pdata->buck125_default_idx >= 8) {
pdata->buck125_default_idx = 0;
- dev_info(iodev->dev, "invalid value for "
- "default dvs index, using 0 instead\n");
+ dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
}
}
@@ -1042,28 +996,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
if (of_property_read_u32_array(pmic_np,
"max8997,pmic-buck1-dvs-voltage",
pdata->buck1_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck1 voltages not specified\n");
+ dev_err(&pdev->dev, "buck1 voltages not specified\n");
return -EINVAL;
}
if (of_property_read_u32_array(pmic_np,
"max8997,pmic-buck2-dvs-voltage",
pdata->buck2_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck2 voltages not specified\n");
+ dev_err(&pdev->dev, "buck2 voltages not specified\n");
return -EINVAL;
}
if (of_property_read_u32_array(pmic_np,
"max8997,pmic-buck5-dvs-voltage",
pdata->buck5_voltage, dvs_voltage_nr)) {
- dev_err(iodev->dev, "buck5 voltages not specified\n");
+ dev_err(&pdev->dev, "buck5 voltages not specified\n");
return -EINVAL;
}
return 0;
}
#else
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max8997_platform_data *pdata)
{
return 0;
@@ -1087,7 +1041,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
}
if (iodev->dev->of_node) {
- ret = max8997_pmic_dt_parse_pdata(iodev, pdata);
+ ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
if (ret)
return ret;
}
@@ -1123,8 +1077,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck1_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck1_voltage[i] / 1000,
- pdata->buck1_voltage[i] / 1000 +
+ pdata->buck1_voltage[i],
+ pdata->buck1_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
@@ -1132,8 +1086,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck2_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck2_voltage[i] / 1000,
- pdata->buck2_voltage[i] / 1000 +
+ pdata->buck2_voltage[i],
+ pdata->buck2_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
@@ -1141,8 +1095,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck5_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck5_voltage[i] / 1000,
- pdata->buck5_voltage[i] / 1000 +
+ pdata->buck5_voltage[i],
+ pdata->buck5_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
@@ -1236,13 +1190,15 @@ static int max8997_pmic_probe(struct platform_device *pdev)
int id = pdata->regulators[i].id;
desc = reg_voltage_map[id];
- if (desc)
+ if (desc) {
regulators[id].n_voltages =
(desc->max - desc->min) / desc->step + 1;
- else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2)
- regulators[id].n_voltages = 4;
- else if (id == MAX8997_CHARGER_CV)
+ } else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) {
+ regulators[id].volt_table = safeoutvolt;
+ regulators[id].n_voltages = ARRAY_SIZE(safeoutvolt);
+ } else if (id == MAX8997_CHARGER_CV) {
regulators[id].n_voltages = 16;
+ }
config.dev = max8997->dev;
config.init_data = pdata->regulators[i].initdata;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index b821d08eb64a..b588f07c7cad 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -51,39 +51,39 @@ struct voltage_map_desc {
int step;
};
-/* Voltage maps */
+/* Voltage maps in uV*/
static const struct voltage_map_desc ldo23_voltage_map_desc = {
- .min = 800, .step = 50, .max = 1300,
+ .min = 800000, .step = 50000, .max = 1300000,
};
static const struct voltage_map_desc ldo456711_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc ldo8_voltage_map_desc = {
- .min = 3000, .step = 100, .max = 3600,
+ .min = 3000000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc ldo9_voltage_map_desc = {
- .min = 2800, .step = 100, .max = 3100,
+ .min = 2800000, .step = 100000, .max = 3100000,
};
static const struct voltage_map_desc ldo10_voltage_map_desc = {
- .min = 950, .step = 50, .max = 1300,
+ .min = 950000, .step = 50000, .max = 1300000,
};
static const struct voltage_map_desc ldo1213_voltage_map_desc = {
- .min = 800, .step = 100, .max = 3300,
+ .min = 800000, .step = 100000, .max = 3300000,
};
static const struct voltage_map_desc ldo1415_voltage_map_desc = {
- .min = 1200, .step = 100, .max = 3300,
+ .min = 1200000, .step = 100000, .max = 3300000,
};
static const struct voltage_map_desc ldo1617_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc buck12_voltage_map_desc = {
- .min = 750, .step = 25, .max = 1525,
+ .min = 750000, .step = 25000, .max = 1525000,
};
static const struct voltage_map_desc buck3_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc buck4_voltage_map_desc = {
- .min = 800, .step = 100, .max = 2300,
+ .min = 800000, .step = 100000, .max = 2300000,
};
static const struct voltage_map_desc *ldo_voltage_map[] = {
@@ -311,25 +311,13 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
dev_get_platdata(max8998->iodev->dev);
struct i2c_client *i2c = max8998->iodev->i2c;
int buck = rdev_get_id(rdev);
- int reg, shift = 0, mask, ret;
- int j, previous_sel;
+ int reg, shift = 0, mask, ret, j;
static u8 buck1_last_val;
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
- previous_sel = max8998_get_voltage_sel(rdev);
-
- /* Check if voltage needs to be changed */
- /* if previous_voltage equal new voltage, return */
- if (previous_sel == selector) {
- dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
- regulator_list_voltage_linear(rdev, previous_sel),
- regulator_list_voltage_linear(rdev, selector));
- return ret;
- }
-
switch (buck) {
case MAX8998_BUCK1:
dev_dbg(max8998->dev,
@@ -445,9 +433,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
return 0;
- difference = (new_selector - old_selector) * desc->step;
+ difference = (new_selector - old_selector) * desc->step / 1000;
if (difference > 0)
- return difference / ((val & 0x0f) + 1);
+ return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
return 0;
}
@@ -702,7 +690,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage1 / 1000))
+ < pdata->buck1_voltage1)
i++;
max8998->buck1_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
@@ -713,7 +701,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage2 / 1000))
+ < pdata->buck1_voltage2)
i++;
max8998->buck1_vol[1] = i;
@@ -725,7 +713,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage3 / 1000))
+ < pdata->buck1_voltage3)
i++;
max8998->buck1_vol[2] = i;
@@ -737,7 +725,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage4 / 1000))
+ < pdata->buck1_voltage4)
i++;
max8998->buck1_vol[3] = i;
@@ -763,7 +751,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck2_voltage1 / 1000))
+ < pdata->buck2_voltage1)
i++;
max8998->buck2_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
@@ -774,7 +762,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck2_voltage2 / 1000))
+ < pdata->buck2_voltage2)
i++;
max8998->buck2_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
@@ -792,8 +780,8 @@ static int max8998_pmic_probe(struct platform_device *pdev)
int count = (desc->max - desc->min) / desc->step + 1;
regulators[index].n_voltages = count;
- regulators[index].min_uV = desc->min * 1000;
- regulators[index].uV_step = desc->step * 1000;
+ regulators[index].min_uV = desc->min;
+ regulators[index].uV_step = desc->step;
}
config.dev = max8998->dev;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 0d84b1f33199..9891aec47b57 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -164,6 +164,14 @@ static const unsigned int mc13892_sw1[] = {
1350000, 1375000
};
+/*
+ * Note: this table is used to derive SWxVSEL by index into
+ * the array. Offset the values by the index of 1100000uV
+ * to get the actual register value for that voltage selector
+ * if the HI bit is to be set as well.
+ */
+#define MC13892_SWxHI_SEL_OFFSET 20
+
static const unsigned int mc13892_sw[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
@@ -239,7 +247,6 @@ static const unsigned int mc13892_pwgtdrv[] = {
};
static struct regulator_ops mc13892_gpo_regulator_ops;
-/* sw regulators need special care due to the "hi bit" */
static struct regulator_ops mc13892_sw_regulator_ops;
@@ -396,7 +403,7 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int ret, id = rdev_get_id(rdev);
- unsigned int val;
+ unsigned int val, selector;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
@@ -407,12 +414,28 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
if (ret)
return ret;
- val = (val & mc13892_regulators[id].vsel_mask)
- >> mc13892_regulators[id].vsel_shift;
+ /*
+ * Figure out if the HI bit is set inside the switcher mode register
+ * since this means the selector value we return is at a different
+ * offset into the selector table.
+ *
+ * According to the MC13892 documentation note 59 (Table 47) the SW1
+ * buck switcher does not support output range programming therefore
+ * the HI bit must always remain 0. So do not do anything strange if
+ * our register is MC13892_SWITCHERS0.
+ */
+
+ selector = val & mc13892_regulators[id].vsel_mask;
+
+ if ((mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) &&
+ (val & MC13892_SWITCHERS0_SWxHI)) {
+ selector += MC13892_SWxHI_SEL_OFFSET;
+ }
- dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: 0x%08x selector: %d\n",
+ __func__, id, val, selector);
- return val;
+ return selector;
}
static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
@@ -425,18 +448,35 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
volt = rdev->desc->volt_table[selector];
mask = mc13892_regulators[id].vsel_mask;
- reg_value = selector << mc13892_regulators[id].vsel_shift;
-
- if (volt > 1375000) {
- mask |= MC13892_SWITCHERS0_SWxHI;
- reg_value |= MC13892_SWITCHERS0_SWxHI;
- } else if (volt < 1100000) {
- mask |= MC13892_SWITCHERS0_SWxHI;
- reg_value &= ~MC13892_SWITCHERS0_SWxHI;
+ reg_value = selector;
+
+ /*
+ * Don't mess with the HI bit or support HI voltage offsets for SW1.
+ *
+ * Since the get_voltage_sel callback has given a fudged value for
+ * the selector offset, we need to back out that offset if HI is
+ * to be set so we write the correct value to the register.
+ *
+ * The HI bit addition and selector offset handling COULD be more
+ * complicated by shifting and masking off the voltage selector part
+ * of the register then logical OR it back in, but since the selector
+ * is at bits 4:0 there is very little point. This makes the whole
+ * thing more readable and we do far less work.
+ */
+
+ if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) {
+ if (volt > 1375000) {
+ reg_value -= MC13892_SWxHI_SEL_OFFSET;
+ reg_value |= MC13892_SWITCHERS0_SWxHI;
+ mask |= MC13892_SWITCHERS0_SWxHI;
+ } else if (volt < 1100000) {
+ reg_value &= ~MC13892_SWITCHERS0_SWxHI;
+ mask |= MC13892_SWITCHERS0_SWxHI;
+ }
}
mc13xxx_lock(priv->mc13xxx);
- ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, mask,
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask,
reg_value);
mc13xxx_unlock(priv->mc13xxx);
@@ -495,15 +535,18 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
int i, ret;
- int num_regulators = 0;
+ int num_regulators = 0, num_parsed;
u32 val;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+
if (num_regulators <= 0 && pdata)
num_regulators = pdata->num_regulators;
if (num_regulators <= 0)
return -EINVAL;
+ num_parsed = num_regulators;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
@@ -520,7 +563,7 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
if (ret)
goto err_unlock;
- /* enable switch auto mode */
+ /* enable switch auto mode (on 2.0A silicon only) */
if ((val & 0x0000FFFF) == 0x45d0) {
ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
MC13892_SWITCHERS4_SW1MODE_M |
@@ -546,7 +589,39 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
= mc13892_vcam_get_mode;
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
- ARRAY_SIZE(mc13892_regulators));
+ ARRAY_SIZE(mc13892_regulators),
+ &num_parsed);
+
+ /*
+ * Perform a little sanity check on the regulator tree - if we found
+ * a number of regulators from mc13xxx_get_num_regulators_dt and
+ * then parsed a smaller number in mc13xxx_parse_regulators_dt then
+ * there is a regulator defined in the regulators node which has
+ * not matched any usable regulator in the driver. In this case,
+ * there is one missing and what will happen is the first regulator
+ * will get registered again.
+ *
+ * Fix this by basically making our number of registerable regulators
+ * equal to the number of regulators we parsed. We are allocating
+ * too much memory for priv, but this is unavoidable at this point.
+ *
+ * As an example of how this can happen, try making a typo in your
+ * regulators node (vviohi {} instead of viohi {}) so that the name
+ * does not match..
+ *
+ * The check will basically pass for platform data (non-DT) because
+ * mc13xxx_parse_regulators_dt for !CONFIG_OF will not touch num_parsed.
+ *
+ */
+ if (num_parsed != num_regulators) {
+ dev_warn(&pdev->dev,
+ "parsed %d != regulators %d - check your device tree!\n",
+ num_parsed, num_regulators);
+
+ num_regulators = num_parsed;
+ priv->num_regulators = num_regulators;
+ }
+
for (i = 0; i < num_regulators; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 4ed89c654110..23cf9f9c383b 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -164,29 +164,30 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
#ifdef CONFIG_OF
int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
- struct device_node *parent, *child;
- int num = 0;
+ struct device_node *parent;
+ int num;
of_node_get(pdev->dev.parent->of_node);
parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return -ENODEV;
- for_each_child_of_node(parent, child)
- num++;
-
+ num = of_get_child_count(parent);
+ of_node_put(parent);
return num;
}
EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators)
+ int num_regulators, int *num_parsed)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13xxx_regulator_init_data *data, *p;
struct device_node *parent, *child;
- int i;
+ int i, parsed = 0;
+
+ *num_parsed = 0;
of_node_get(pdev->dev.parent->of_node);
parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
@@ -195,24 +196,32 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
GFP_KERNEL);
- if (!data)
+ if (!data) {
+ of_node_put(parent);
return NULL;
+ }
+
p = data;
for_each_child_of_node(parent, child) {
for (i = 0; i < num_regulators; i++) {
if (!of_node_cmp(child->name,
regulators[i].desc.name)) {
+
p->id = i;
p->init_data = of_get_regulator_init_data(
&pdev->dev, child);
p->node = child;
p++;
+
+ parsed++;
break;
}
}
}
+ of_node_put(parent);
+ *num_parsed = parsed;
return data;
}
EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 06c8903f182a..007f83387fd6 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -39,7 +39,7 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators);
+ int num_regulators, int *num_parsed);
#else
static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
@@ -48,7 +48,7 @@ static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators)
+ int num_regulators, int *num_parsed)
{
return NULL;
}
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 6f684916fd79..66ca769287ab 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
if (!dev || !node)
return -EINVAL;
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+ match->init_data = NULL;
+ match->of_node = NULL;
+ }
+
for_each_child_of_node(node, child) {
name = of_get_property(child,
"regulator-compatible", NULL);
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index e915629a25cf..cde13bb5a8fb 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -527,6 +527,7 @@ static void palmas_dt_to_pdata(struct device *dev,
u32 prop;
int idx, ret;
+ node = of_node_get(node);
regulators = of_find_node_by_name(node, "regulators");
if (!regulators) {
dev_info(dev, "regulator node not found\n");
@@ -535,6 +536,7 @@ static void palmas_dt_to_pdata(struct device *dev,
ret = of_regulator_match(dev, regulators, palmas_matches,
PALMAS_NUM_REGS);
+ of_node_put(regulators);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n", ret);
return;
@@ -566,11 +568,6 @@ static void palmas_dt_to_pdata(struct device *dev,
pdata->reg_init[idx]->mode_sleep = prop;
ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,warm_reset", &prop);
- if (!ret)
- pdata->reg_init[idx]->warm_reset = prop;
-
- ret = of_property_read_u32(palmas_matches[idx].of_node,
"ti,tstep", &prop);
if (!ret)
pdata->reg_init[idx]->tstep = prop;
@@ -806,7 +803,7 @@ static int palmas_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id __devinitdata of_palmas_match_tbl[] = {
+static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-pmic", },
{ /* end */ }
};
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index bd062a2ffbe2..cd9ea2ea1826 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
.min_uV = S2MPS11_BUCK_MIN2, \
.uV_step = S2MPS11_BUCK_STEP2, \
.n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
- .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_reg = S2MPS11_REG_B10CTRL2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
- .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_reg = S2MPS11_REG_B10CTRL1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 9f991f2c525a..8a831947c351 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,6 +22,9 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regulator/of_regulator.h>
+
+#define S5M8767_OPMODE_NORMAL_MODE 0x1
struct s5m8767_info {
struct device *dev;
@@ -214,7 +218,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int ret, reg;
int mask = 0xc0, enable_ctrl;
- u8 val;
+ unsigned int val;
ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
if (ret == -EINVAL)
@@ -255,10 +259,8 @@ static int s5m8767_reg_disable(struct regulator_dev *rdev)
return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
}
-static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
+static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int reg_id = rdev_get_id(rdev);
int reg;
switch (reg_id) {
@@ -296,43 +298,18 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
return -EINVAL;
}
- *_reg = reg;
-
- return 0;
+ return reg;
}
-static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int reg, mask, ret;
- int reg_id = rdev_get_id(rdev);
- u8 val;
-
- ret = s5m8767_get_voltage_register(rdev, &reg);
- if (ret)
- return ret;
-
- mask = (reg_id < S5M8767_BUCK1) ? 0x3f : 0xff;
-
- ret = sec_reg_read(s5m8767->iodev, reg, &val);
- if (ret)
- return ret;
-
- val &= mask;
-
- return val;
-}
-
-static int s5m8767_convert_voltage_to_sel(
- const struct sec_voltage_desc *desc,
- int min_vol, int max_vol)
+static int s5m8767_convert_voltage_to_sel(const struct sec_voltage_desc *desc,
+ int min_vol)
{
int selector = 0;
if (desc == NULL)
return -EINVAL;
- if (max_vol < desc->min || min_vol > desc->max)
+ if (min_vol > desc->max)
return -EINVAL;
if (min_vol < desc->min)
@@ -340,7 +317,7 @@ static int s5m8767_convert_voltage_to_sel(
selector = DIV_ROUND_UP(min_vol - desc->min, desc->step);
- if (desc->min + desc->step * selector > max_vol)
+ if (desc->min + desc->step * selector > desc->max)
return -EINVAL;
return selector;
@@ -373,15 +350,13 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int reg_id = rdev_get_id(rdev);
- int reg, mask, ret = 0, old_index, index = 0;
+ int old_index, index = 0;
u8 *buck234_vol = NULL;
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO28:
- mask = 0x3f;
break;
case S5M8767_BUCK1 ... S5M8767_BUCK6:
- mask = 0xff;
if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs)
buck234_vol = &s5m8767->buck2_vol[0];
else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs)
@@ -392,7 +367,6 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
case S5M8767_BUCK7 ... S5M8767_BUCK8:
return -EINVAL;
case S5M8767_BUCK9:
- mask = 0xff;
break;
default:
return -EINVAL;
@@ -412,11 +386,7 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
else
return s5m8767_set_low(s5m8767);
} else {
- ret = s5m8767_get_voltage_register(rdev, &reg);
- if (ret)
- return ret;
-
- return sec_reg_update(s5m8767->iodev, reg, selector, mask);
+ return regulator_set_voltage_sel_regmap(rdev, selector);
}
}
@@ -441,7 +411,7 @@ static struct regulator_ops s5m8767_ops = {
.is_enabled = s5m8767_reg_is_enabled,
.enable = s5m8767_reg_enable,
.disable = s5m8767_reg_disable,
- .get_voltage_sel = s5m8767_get_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
};
@@ -508,10 +478,182 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK9),
};
+#ifdef CONFIG_OF
+static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
+ struct sec_platform_data *pdata,
+ struct device_node *pmic_np)
+{
+ int i, gpio;
+
+ for (i = 0; i < 3; i++) {
+ gpio = of_get_named_gpio(pmic_np,
+ "s5m8767,pmic-buck-dvs-gpios", i);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+ return -EINVAL;
+ }
+ pdata->buck_gpios[i] = gpio;
+ }
+ return 0;
+}
+
+static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
+ struct sec_platform_data *pdata,
+ struct device_node *pmic_np)
+{
+ int i, gpio;
+
+ for (i = 0; i < 3; i++) {
+ gpio = of_get_named_gpio(pmic_np,
+ "s5m8767,pmic-buck-ds-gpios", i);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+ return -EINVAL;
+ }
+ pdata->buck_ds[i] = gpio;
+ }
+ return 0;
+}
+
+static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ struct sec_platform_data *pdata)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *pmic_np, *regulators_np, *reg_np;
+ struct sec_regulator_data *rdata;
+ struct sec_opmode_data *rmode;
+ unsigned int i, dvs_voltage_nr = 1, ret;
+
+ pmic_np = iodev->dev->of_node;
+ if (!pmic_np) {
+ dev_err(iodev->dev, "could not find pmic sub-node\n");
+ return -ENODEV;
+ }
+
+ regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ if (!regulators_np) {
+ dev_err(iodev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ /* count the number of regulators to be supported in pmic */
+ pdata->num_regulators = of_get_child_count(regulators_np);
+
+ rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ dev_err(iodev->dev,
+ "could not allocate memory for regulator data\n");
+ return -ENOMEM;
+ }
+
+ rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ dev_err(iodev->dev,
+ "could not allocate memory for regulator mode\n");
+ return -ENOMEM;
+ }
+
+ pdata->regulators = rdata;
+ pdata->opmode = rmode;
+ for_each_child_of_node(regulators_np, reg_np) {
+ for (i = 0; i < ARRAY_SIZE(regulators); i++)
+ if (!of_node_cmp(reg_np->name, regulators[i].name))
+ break;
+
+ if (i == ARRAY_SIZE(regulators)) {
+ dev_warn(iodev->dev,
+ "don't know how to configure regulator %s\n",
+ reg_np->name);
+ continue;
+ }
+
+ rdata->id = i;
+ rdata->initdata = of_get_regulator_init_data(
+ &pdev->dev, reg_np);
+ rdata->reg_node = reg_np;
+ rdata++;
+ rmode->id = i;
+ if (of_property_read_u32(reg_np, "op_mode",
+ &rmode->mode)) {
+ dev_warn(iodev->dev,
+ "no op_mode property property at %s\n",
+ reg_np->full_name);
+
+ rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
+ }
+ rmode++;
+ }
+
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL))
+ pdata->buck2_gpiodvs = true;
+
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs", NULL))
+ pdata->buck3_gpiodvs = true;
+
+ if (of_get_property(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs", NULL))
+ pdata->buck4_gpiodvs = true;
+
+ if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
+ pdata->buck4_gpiodvs) {
+ ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+ if (ret)
+ return -EINVAL;
+
+ if (of_property_read_u32(pmic_np,
+ "s5m8767,pmic-buck-default-dvs-idx",
+ &pdata->buck_default_idx)) {
+ pdata->buck_default_idx = 0;
+ } else {
+ if (pdata->buck_default_idx >= 8) {
+ pdata->buck_default_idx = 0;
+ dev_info(iodev->dev,
+ "invalid value for default dvs index, use 0\n");
+ }
+ }
+ dvs_voltage_nr = 8;
+ }
+
+ ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
+ if (ret)
+ return -EINVAL;
+
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck2-dvs-voltage",
+ pdata->buck2_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck2 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck3-dvs-voltage",
+ pdata->buck3_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck3 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_array(pmic_np,
+ "s5m8767,pmic-buck4-dvs-voltage",
+ pdata->buck4_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck4 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ struct sec_platform_data *pdata)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
static int s5m8767_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct sec_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
struct regulator_dev **rdev;
struct s5m8767_info *s5m8767;
@@ -522,6 +664,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (iodev->dev->of_node) {
+ ret = s5m8767_pmic_dt_parse_pdata(pdev, pdata);
+ if (ret)
+ return ret;
+ }
+
if (pdata->buck2_gpiodvs) {
if (pdata->buck3_gpiodvs || pdata->buck4_gpiodvs) {
dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
@@ -577,23 +725,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->opmode = pdata->opmode;
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
- pdata->buck2_init,
- pdata->buck2_init +
- buck_voltage_val2.step);
+ pdata->buck2_init);
sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
- pdata->buck3_init,
- pdata->buck3_init +
- buck_voltage_val2.step);
+ pdata->buck3_init);
sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
- pdata->buck4_init,
- pdata->buck4_init +
- buck_voltage_val2.step);
+ pdata->buck4_init);
sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
@@ -602,27 +744,21 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck2_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
- pdata->buck2_voltage[i],
- pdata->buck2_voltage[i] +
- buck_voltage_val2.step);
+ pdata->buck2_voltage[i]);
}
if (s5m8767->buck3_gpiodvs) {
s5m8767->buck3_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
- pdata->buck3_voltage[i],
- pdata->buck3_voltage[i] +
- buck_voltage_val2.step);
+ pdata->buck3_voltage[i]);
}
if (s5m8767->buck4_gpiodvs) {
s5m8767->buck4_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
- pdata->buck4_voltage[i],
- pdata->buck4_voltage[i] +
- buck_voltage_val2.step);
+ pdata->buck4_voltage[i]);
}
}
@@ -760,11 +896,19 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
(desc->max - desc->min) / desc->step + 1;
regulators[id].min_uV = desc->min;
regulators[id].uV_step = desc->step;
+ regulators[id].vsel_reg =
+ s5m8767_get_vsel_reg(id, s5m8767);
+ if (id < S5M8767_BUCK1)
+ regulators[id].vsel_mask = 0x3f;
+ else
+ regulators[id].vsel_mask = 0xff;
}
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
config.driver_data = s5m8767;
+ config.regmap = iodev->regmap;
+ config.of_node = pdata->regulators[i].reg_node;
rdev[i] = regulator_register(&regulators[id], &config);
if (IS_ERR(rdev[i])) {
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index ab21133e6784..6e67be75ea1b 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -28,10 +28,13 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/tps51632-regulator.h>
#include <linux/slab.h>
@@ -85,49 +88,8 @@ struct tps51632_chip {
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
- bool enable_pwm_dvfs;
};
-static int tps51632_dcdc_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct tps51632_chip *tps = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
- unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
- int vsel;
-
- if (tps->enable_pwm_dvfs)
- reg = TPS51632_VOLTAGE_BASE_REG;
-
- ret = regmap_read(tps->regmap, reg, &data);
- if (ret < 0) {
- dev_err(tps->dev, "reg read failed, err %d\n", ret);
- return ret;
- }
-
- vsel = data & TPS51632_VOUT_MASK;
- return vsel;
-}
-
-static int tps51632_dcdc_set_voltage_sel(struct regulator_dev *rdev,
- unsigned selector)
-{
- struct tps51632_chip *tps = rdev_get_drvdata(rdev);
- int ret;
- unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
-
- if (tps->enable_pwm_dvfs)
- reg = TPS51632_VOLTAGE_BASE_REG;
-
- if (selector > TPS51632_MAX_VSEL)
- return -EINVAL;
-
- ret = regmap_write(tps->regmap, reg, selector);
- if (ret < 0)
- dev_err(tps->dev, "reg write failed, err %d\n", ret);
- return ret;
-}
-
static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
@@ -144,8 +106,8 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
}
static struct regulator_ops tps51632_dcdc_ops = {
- .get_voltage_sel = tps51632_dcdc_get_voltage_sel,
- .set_voltage_sel = tps51632_dcdc_set_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = tps51632_dcdc_set_ramp_delay,
@@ -162,7 +124,6 @@ static int tps51632_init_dcdc(struct tps51632_chip *tps,
goto skip_pwm_config;
control |= TPS51632_DVFS_PWMEN;
- tps->enable_pwm_dvfs = pdata->enable_pwm_dvfs;
vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV);
ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel);
if (ret < 0) {
@@ -205,22 +166,96 @@ skip_pwm_config:
return ret;
}
-static bool rd_wr_reg(struct device *dev, unsigned int reg)
+static bool is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS51632_OFFSET_REG:
+ case TPS51632_FAULT_REG:
+ case TPS51632_IMON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_read_reg(struct device *dev, unsigned int reg)
{
- if ((reg >= 0x8) && (reg <= 0x10))
+ switch (reg) {
+ case 0x08 ... 0x0F:
return false;
- return true;
+ default:
+ return true;
+ }
+}
+
+static bool is_write_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS51632_VOLTAGE_SELECT_REG:
+ case TPS51632_VOLTAGE_BASE_REG:
+ case TPS51632_VMAX_REG:
+ case TPS51632_DVFS_CONTROL_REG:
+ case TPS51632_POWER_STATE_REG:
+ case TPS51632_SLEW_REGS:
+ return true;
+ default:
+ return false;
+ }
}
static const struct regmap_config tps51632_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .writeable_reg = rd_wr_reg,
- .readable_reg = rd_wr_reg,
+ .writeable_reg = is_write_reg,
+ .readable_reg = is_read_reg,
+ .volatile_reg = is_volatile_reg,
.max_register = TPS51632_MAX_REG - 1,
.cache_type = REGCACHE_RBTREE,
};
+#if defined(CONFIG_OF)
+static const struct of_device_id tps51632_of_match[] = {
+ { .compatible = "ti,tps51632",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps51632_of_match);
+
+static struct tps51632_regulator_platform_data *
+ of_get_tps51632_platform_data(struct device *dev)
+{
+ struct tps51632_regulator_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "Memory alloc failed for platform data\n");
+ return NULL;
+ }
+
+ pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
+ if (!pdata->reg_init_data) {
+ dev_err(dev, "Not able to get OF regulator init data\n");
+ return NULL;
+ }
+
+ pdata->enable_pwm_dvfs =
+ of_property_read_bool(np, "ti,enable-pwm-dvfs");
+ pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
+
+ pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
+ TPS51632_MIN_VOLATGE;
+ pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
+ TPS51632_MAX_VOLATGE;
+ return pdata;
+}
+#else
+static struct tps51632_regulator_platform_data *
+ of_get_tps51632_platform_data(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int tps51632_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -230,7 +265,19 @@ static int tps51632_probe(struct i2c_client *client,
int ret;
struct regulator_config config = { };
+ if (client->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_device(of_match_ptr(tps51632_of_match),
+ &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ }
+
pdata = client->dev.platform_data;
+ if (!pdata && client->dev.of_node)
+ pdata = of_get_tps51632_platform_data(&client->dev);
if (!pdata) {
dev_err(&client->dev, "No Platform data\n");
return -EINVAL;
@@ -269,6 +316,12 @@ static int tps51632_probe(struct i2c_client *client,
tps->desc.type = REGULATOR_VOLTAGE;
tps->desc.owner = THIS_MODULE;
+ if (pdata->enable_pwm_dvfs)
+ tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG;
+ else
+ tps->desc.vsel_reg = TPS51632_VOLTAGE_SELECT_REG;
+ tps->desc.vsel_mask = TPS51632_VOUT_MASK;
+
tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
@@ -319,6 +372,7 @@ static struct i2c_driver tps51632_i2c_driver = {
.driver = {
.name = "tps51632",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps51632_of_match),
},
.probe = tps51632_probe,
.remove = tps51632_remove,
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 0233cfb56560..54aa2da7283b 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -23,8 +23,10 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/mfd/tps6507x.h>
+#include <linux/regulator/of_regulator.h>
/* DCDC's */
#define TPS6507X_DCDC_1 0
@@ -356,6 +358,80 @@ static struct regulator_ops tps6507x_pmic_ops = {
.list_voltage = regulator_list_voltage_table,
};
+#ifdef CONFIG_OF
+static struct of_regulator_match tps6507x_matches[] = {
+ { .name = "VDCDC1"},
+ { .name = "VDCDC2"},
+ { .name = "VDCDC3"},
+ { .name = "LDO1"},
+ { .name = "LDO2"},
+};
+
+static struct tps6507x_board *tps6507x_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps6507x_reg_matches)
+{
+ struct tps6507x_board *tps_board;
+ struct device_node *np = pdev->dev.parent->of_node;
+ struct device_node *regulators;
+ struct of_regulator_match *matches;
+ static struct regulator_init_data *reg_data;
+ int idx = 0, count, ret;
+
+ tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
+ GFP_KERNEL);
+ if (!tps_board) {
+ dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+ return NULL;
+ }
+
+ regulators = of_find_node_by_name(np, "regulators");
+ if (!regulators) {
+ dev_err(&pdev->dev, "regulator node not found\n");
+ return NULL;
+ }
+
+ count = ARRAY_SIZE(tps6507x_matches);
+ matches = tps6507x_matches;
+
+ ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+ ret);
+ return NULL;
+ }
+
+ *tps6507x_reg_matches = matches;
+
+ reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
+ * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
+ if (!reg_data) {
+ dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
+ return NULL;
+ }
+
+ tps_board->tps6507x_pmic_init_data = reg_data;
+
+ for (idx = 0; idx < count; idx++) {
+ if (!matches[idx].init_data || !matches[idx].of_node)
+ continue;
+
+ memcpy(&reg_data[idx], matches[idx].init_data,
+ sizeof(struct regulator_init_data));
+
+ }
+
+ return tps_board;
+}
+#else
+static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps6507x_reg_matches)
+{
+ *tps6507x_reg_matches = NULL;
+ return NULL;
+}
+#endif
static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -365,8 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct tps6507x_pmic *tps;
struct tps6507x_board *tps_board;
+ struct of_regulator_match *tps6507x_reg_matches = NULL;
int i;
int error;
+ unsigned int prop;
/**
* tps_board points to pmic related constants
@@ -374,6 +452,9 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
+ if (!tps_board && tps6507x_dev->dev->of_node)
+ tps_board = tps6507x_parse_dt_reg_data(pdev,
+ &tps6507x_reg_matches);
if (!tps_board)
return -EINVAL;
@@ -415,6 +496,17 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
config.init_data = init_data;
config.driver_data = tps;
+ if (tps6507x_reg_matches) {
+ error = of_property_read_u32(
+ tps6507x_reg_matches[i].of_node,
+ "ti,defdcdc_default", &prop);
+
+ if (!error)
+ tps->info[i]->defdcdc_default = prop;
+
+ config.of_node = tps6507x_reg_matches[i].of_node;
+ }
+
rdev = regulator_register(&tps->desc[i], &config);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 41c391789c97..c8e70451df38 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -19,11 +19,13 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/tps65090.h>
struct tps65090_regulator {
@@ -67,8 +69,8 @@ static struct regulator_desc tps65090_regulator_desc[] = {
tps65090_REG_DESC(FET5, "infet5", 0x13, tps65090_reg_contol_ops),
tps65090_REG_DESC(FET6, "infet6", 0x14, tps65090_reg_contol_ops),
tps65090_REG_DESC(FET7, "infet7", 0x15, tps65090_reg_contol_ops),
- tps65090_REG_DESC(LDO1, "vsys_l1", 0, tps65090_ldo_ops),
- tps65090_REG_DESC(LDO2, "vsys_l2", 0, tps65090_ldo_ops),
+ tps65090_REG_DESC(LDO1, "vsys-l1", 0, tps65090_ldo_ops),
+ tps65090_REG_DESC(LDO2, "vsys-l2", 0, tps65090_ldo_ops),
};
static inline bool is_dcdc(int id)
@@ -138,6 +140,92 @@ static void tps65090_configure_regulator_config(
}
}
+#ifdef CONFIG_OF
+static struct of_regulator_match tps65090_matches[] = {
+ { .name = "dcdc1", },
+ { .name = "dcdc2", },
+ { .name = "dcdc3", },
+ { .name = "fet1", },
+ { .name = "fet2", },
+ { .name = "fet3", },
+ { .name = "fet4", },
+ { .name = "fet5", },
+ { .name = "fet6", },
+ { .name = "fet7", },
+ { .name = "ldo1", },
+ { .name = "ldo2", },
+};
+
+static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps65090_reg_matches)
+{
+ struct tps65090_platform_data *tps65090_pdata;
+ struct device_node *np = pdev->dev.parent->of_node;
+ struct device_node *regulators;
+ int idx = 0, ret;
+ struct tps65090_regulator_plat_data *reg_pdata;
+
+ tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
+ GFP_KERNEL);
+ if (!tps65090_pdata) {
+ dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
+ sizeof(*reg_pdata), GFP_KERNEL);
+ if (!reg_pdata) {
+ dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ regulators = of_find_node_by_name(np, "regulators");
+ if (!regulators) {
+ dev_err(&pdev->dev, "regulator node not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
+ ARRAY_SIZE(tps65090_matches));
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Error parsing regulator init data: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ *tps65090_reg_matches = tps65090_matches;
+ for (idx = 0; idx < ARRAY_SIZE(tps65090_matches); idx++) {
+ struct regulator_init_data *ri_data;
+ struct tps65090_regulator_plat_data *rpdata;
+
+ rpdata = &reg_pdata[idx];
+ ri_data = tps65090_matches[idx].init_data;
+ if (!ri_data || !tps65090_matches[idx].of_node)
+ continue;
+
+ rpdata->reg_init_data = ri_data;
+ rpdata->enable_ext_control = of_property_read_bool(
+ tps65090_matches[idx].of_node,
+ "ti,enable-ext-control");
+ if (rpdata->enable_ext_control)
+ rpdata->gpio = of_get_named_gpio(np,
+ "dcdc-ext-control-gpios", 0);
+
+ tps65090_pdata->reg_pdata[idx] = rpdata;
+ }
+ return tps65090_pdata;
+}
+#else
+static inline struct tps65090_platform_data *tps65090_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps65090_reg_matches)
+{
+ *tps65090_reg_matches = NULL;
+ return NULL;
+}
+#endif
+
static int tps65090_regulator_probe(struct platform_device *pdev)
{
struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent);
@@ -147,15 +235,19 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
struct tps65090_regulator_plat_data *tps_pdata;
struct tps65090_regulator *pmic;
struct tps65090_platform_data *tps65090_pdata;
+ struct of_regulator_match *tps65090_reg_matches = NULL;
int num;
int ret;
dev_dbg(&pdev->dev, "Probing regulator\n");
tps65090_pdata = dev_get_platdata(pdev->dev.parent);
- if (!tps65090_pdata) {
+ if (!tps65090_pdata && tps65090_mfd->dev->of_node)
+ tps65090_pdata = tps65090_parse_dt_reg_data(pdev,
+ &tps65090_reg_matches);
+ if (IS_ERR_OR_NULL(tps65090_pdata)) {
dev_err(&pdev->dev, "Platform data missing\n");
- return -EINVAL;
+ return tps65090_pdata ? PTR_ERR(tps65090_pdata) : -EINVAL;
}
pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
@@ -192,13 +284,17 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
}
}
- config.dev = &pdev->dev;
+ config.dev = pdev->dev.parent;
config.driver_data = ri;
config.regmap = tps65090_mfd->rmap;
if (tps_pdata)
config.init_data = tps_pdata->reg_init_data;
else
config.init_data = NULL;
+ if (tps65090_reg_matches)
+ config.of_node = tps65090_reg_matches[num].of_node;
+ else
+ config.of_node = NULL;
rdev = regulator_register(ri->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 73dce7664126..df395187c063 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
if (!regs)
return NULL;
- count = of_regulator_match(pdev->dev.parent, regs,
- reg_matches, TPS65217_NUM_REGULATOR);
+ count = of_regulator_match(&pdev->dev, regs, reg_matches,
+ TPS65217_NUM_REGULATOR);
of_node_put(regs);
if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
return NULL;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index f86da672c758..e68382d0e1ea 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -61,10 +61,6 @@ struct tps6586x_regulator {
int enable_bit[2];
int enable_reg[2];
-
- /* for DVM regulators */
- int go_reg;
- int go_bit;
};
static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
@@ -72,37 +68,10 @@ static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
return rdev_get_dev(rdev)->parent;
}
-static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
- unsigned selector)
-{
- struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps6586x_dev(rdev);
- int ret, val, rid = rdev_get_id(rdev);
- uint8_t mask;
-
- val = selector << (ffs(rdev->desc->vsel_mask) - 1);
- mask = rdev->desc->vsel_mask;
-
- ret = tps6586x_update(parent, rdev->desc->vsel_reg, val, mask);
- if (ret)
- return ret;
-
- /* Update go bit for DVM regulators */
- switch (rid) {
- case TPS6586X_ID_LDO_2:
- case TPS6586X_ID_LDO_4:
- case TPS6586X_ID_SM_0:
- case TPS6586X_ID_SM_1:
- ret = tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
- break;
- }
- return ret;
-}
-
static struct regulator_ops tps6586x_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = tps6586x_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -142,7 +111,7 @@ static const unsigned int tps6586x_dvm_voltages[] = {
};
#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
.desc = { \
.supply_name = _pin_name, \
.name = "REG-" #_id, \
@@ -156,29 +125,26 @@ static const unsigned int tps6586x_dvm_voltages[] = {
.enable_mask = 1 << (ebit0), \
.vsel_reg = TPS6586X_##vreg, \
.vsel_mask = ((1 << (nbits)) - 1) << (shift), \
+ .apply_reg = (goreg), \
+ .apply_bit = (gobit), \
}, \
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1),
-#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
- .go_reg = TPS6586X_##goreg, \
- .go_bit = (gobit),
-
#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
+ ereg0, ebit0, ereg1, ebit1, 0, 0) \
}
#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
{ \
TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1) \
- TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
}
#define TPS6586X_SYS_REGULATOR() \
@@ -207,13 +173,13 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
- ENB, 3, VCC2, 6),
+ ENB, 3, TPS6586X_VCC2, BIT(6)),
TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
- END, 3, VCC1, 6),
+ END, 3, TPS6586X_VCC1, BIT(6)),
TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
- ENB, 1, VCC1, 2),
+ ENB, 1, TPS6586X_VCC1, BIT(2)),
TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
- ENB, 0, VCC1, 0),
+ ENB, 0, TPS6586X_VCC1, BIT(0)),
};
/*
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 59c3770fa77d..6ba6931ac855 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -964,8 +964,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
{
struct tps65910_board *pmic_plat_data;
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
+ struct device_node *np, *regulators;
struct of_regulator_match *matches;
unsigned int prop;
int idx = 0, ret, count;
@@ -978,6 +977,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
return NULL;
}
+ np = of_node_get(pdev->dev.parent->of_node);
regulators = of_find_node_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
@@ -994,11 +994,13 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
matches = tps65911_matches;
break;
default:
+ of_node_put(regulators);
dev_err(&pdev->dev, "Invalid tps chip version\n");
return NULL;
}
- ret = of_regulator_match(pdev->dev.parent, regulators, matches, count);
+ ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index b15d711bc8c6..9019d0e7ecb6 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -728,7 +728,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
}
}
rdev = regulator_register(&ri->rinfo->desc, &config);
- if (IS_ERR_OR_NULL(rdev)) {
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"register regulator failed %s\n",
ri->rinfo->desc.name);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 493c8c6a241f..74508cc62d67 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1064,7 +1064,7 @@ static u8 twl_get_smps_mult(void)
#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
-static const struct of_device_id twl_of_match[] __devinitconst = {
+static const struct of_device_id twl_of_match[] = {
TWL4030_OF_MATCH("ti,twl4030-vaux1", VAUX1),
TWL4030_OF_MATCH("ti,twl4030-vaux2", VAUX2_4030),
TWL4030_OF_MATCH("ti,twl5030-vaux2", VAUX2),
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 96ce101b9067..cc1f7bf53fd0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -1,21 +1,19 @@
-menu "Remoteproc drivers (EXPERIMENTAL)"
+menu "Remoteproc drivers"
# REMOTEPROC gets selected by whoever wants it
config REMOTEPROC
tristate
- depends on EXPERIMENTAL
depends on HAS_DMA
select FW_CONFIG
select VIRTIO
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
- depends on EXPERIMENTAL
depends on HAS_DMA
depends on ARCH_OMAP4
depends on OMAP_IOMMU
+ depends on OMAP_MBOX_FWK
select REMOTEPROC
- select OMAP_MBOX_FWK
select RPMSG
help
Say y here to support OMAP's remote processors (dual M3
@@ -32,7 +30,6 @@ config OMAP_REMOTEPROC
config STE_MODEM_RPROC
tristate "STE-Modem remoteproc support"
- depends on EXPERIMENTAL
depends on HAS_DMA
select REMOTEPROC
default n
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 32c289c2ba13..0e396c155b3b 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -179,7 +179,7 @@ static struct rproc_ops omap_rproc_ops = {
.kick = omap_rproc_kick,
};
-static int __devinit omap_rproc_probe(struct platform_device *pdev)
+static int omap_rproc_probe(struct platform_device *pdev)
{
struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
struct omap_rproc *oproc;
@@ -213,7 +213,7 @@ free_rproc:
return ret;
}
-static int __devexit omap_rproc_remove(struct platform_device *pdev)
+static int omap_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
@@ -225,7 +225,7 @@ static int __devexit omap_rproc_remove(struct platform_device *pdev)
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
- .remove = __devexit_p(omap_rproc_remove),
+ .remove = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
.owner = THIS_MODULE,
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 2bd911f12571..f6e0ea6ffda5 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -1,9 +1,8 @@
-menu "Rpmsg drivers (EXPERIMENTAL)"
+menu "Rpmsg drivers"
# RPMSG always gets selected by whoever wants it
config RPMSG
tristate
select VIRTIO
- depends on EXPERIMENTAL
endmenu
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 1859f71372e2..d85446021ddb 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -764,7 +764,7 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
/* add message to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
- if (err < 0) {
+ if (err) {
/*
* need to reclaim the buffer here, otherwise it's lost
* (memory won't leak, but rpmsg won't use it again for TX).
@@ -776,8 +776,6 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
/* tell the remote processor it has a pending message to read */
virtqueue_kick(vrp->svq);
-
- err = 0;
out:
mutex_unlock(&vrp->tx_lock);
return err;
@@ -841,7 +839,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
/* farewell, ept, we don't need you anymore */
kref_put(&ept->refcount, __ept_release);
} else
- dev_warn(dev, "msg received with no recepient\n");
+ dev_warn(dev, "msg received with no recipient\n");
/* publish the real size of the buffer */
sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
@@ -980,7 +978,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
GFP_KERNEL);
- WARN_ON(err < 0); /* sanity check; this can't really happen */
+ WARN_ON(err); /* sanity check; this can't really happen */
}
/* suppress "tx-complete" interrupts */
@@ -1024,7 +1022,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
return 0;
}
-static void __devexit rpmsg_remove(struct virtio_device *vdev)
+static void rpmsg_remove(struct virtio_device *vdev)
{
struct virtproc_info *vrp = vdev->priv;
int ret;
@@ -1065,7 +1063,7 @@ static struct virtio_driver virtio_ipc_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = rpmsg_probe,
- .remove = __devexit_p(rpmsg_remove),
+ .remove = rpmsg_remove,
};
static int __init rpmsg_init(void)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 19c03ab2bdcb..e6ab071fb6fd 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,14 +20,24 @@ if RTC_CLASS
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
default y
+ depends on !ALWAYS_USE_PERSISTENT_CLOCK
help
If you say yes here, the system time (wall clock) will be set using
the value read from a specified RTC device. This is useful to avoid
unnecessary fsck runs at boot time, and to network better.
+config RTC_SYSTOHC
+ bool "Set the RTC time based on NTP synchronization"
+ default y
+ depends on !ALWAYS_USE_PERSISTENT_CLOCK
+ help
+ If you say yes here, the system time (wall clock) will be stored
+ in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
+ minutes if userspace reports synchronized NTP status.
+
config RTC_HCTOSYS_DEVICE
string "RTC used to set the system time"
- depends on RTC_HCTOSYS = y
+ depends on RTC_HCTOSYS = y || RTC_SYSTOHC = y
default "rtc0"
help
The RTC device that will be used to (re)initialize the system
@@ -194,6 +204,12 @@ config RTC_DRV_DS3232
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
+config RTC_DRV_LP8788
+ tristate "TI LP8788 RTC driver"
+ depends on MFD_LP8788
+ help
+ Say Y to enable support for the LP8788 RTC/ALARM driver.
+
config RTC_DRV_MAX6900
tristate "Maxim MAX6900"
help
@@ -233,6 +249,26 @@ config RTC_DRV_MAX8998
This driver can also be built as a module. If so, the module
will be called rtc-max8998.
+config RTC_DRV_MAX8997
+ tristate "Maxim MAX8997"
+ depends on MFD_MAX8997
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX8997 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max8997.
+
+config RTC_DRV_MAX77686
+ tristate "Maxim MAX77686"
+ depends on MFD_MAX77686
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX77686 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max77686.
+
config RTC_DRV_RS5C372
tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
@@ -269,6 +305,15 @@ config RTC_DRV_X1205
This driver can also be built as a module. If so, the module
will be called rtc-x1205.
+config RTC_DRV_PCF8523
+ tristate "NXP PCF8523"
+ help
+ If you say yes here you get support for the NXP PCF8523 RTC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf8523.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -343,6 +388,14 @@ config RTC_DRV_TWL4030
This driver can also be built as a module. If so, the module
will be called rtc-twl.
+config RTC_DRV_TPS6586X
+ tristate "TI TPS6586X RTC driver"
+ depends on MFD_TPS6586X
+ help
+ TI Power Managment IC TPS6586X supports RTC functionality
+ along with alarm. This driver supports the RTC driver for
+ the TPS6586X RTC module.
+
config RTC_DRV_TPS65910
tristate "TI TPS65910 RTC driver"
depends on RTC_CLASS && MFD_TPS65910
@@ -353,6 +406,14 @@ config RTC_DRV_TPS65910
This driver can also be built as a module. If so, the module
will be called rtc-tps65910.
+config RTC_DRV_TPS80031
+ tristate "TI TPS80031/TPS80032 RTC driver"
+ depends on MFD_TPS80031
+ help
+ TI Power Managment IC TPS80031 supports RTC functionality
+ along with alarm. This driver supports the RTC driver for
+ the TPS80031 RTC module.
+
config RTC_DRV_RC5T583
tristate "RICOH 5T583 RTC driver"
depends on MFD_RC5T583
@@ -510,6 +571,14 @@ config RTC_DRV_PCF2123
This driver can also be built as a module. If so, the module
will be called rtc-pcf2123.
+config RTC_DRV_RX4581
+ tristate "Epson RX-4581"
+ help
+ If you say yes here you will get support for the Epson RX-4581.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx4581.
+
endif # SPI_MASTER
comment "Platform RTC drivers"
@@ -600,6 +669,16 @@ config RTC_DRV_DA9052
Say y here to support the RTC driver for Dialog Semiconductor
DA9052-BC and DA9053-AA/Bx PMICs.
+config RTC_DRV_DA9055
+ tristate "Dialog Semiconductor DA9055 RTC"
+ depends on MFD_DA9055
+ help
+ If you say yes here you will get support for the
+ RTC of the Dialog DA9055 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-da9055
+
config RTC_DRV_EFI
tristate "EFI RTC"
depends on IA64
@@ -768,7 +847,7 @@ config RTC_DRV_DAVINCI
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
- depends on SOC_IMX25
+ depends on ARCH_MXC
help
Support for Freescale IMX DryIce RTC
@@ -777,11 +856,13 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
+ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX || SOC_AM33XX
help
- Say "yes" here to support the real time clock on TI OMAP1 and
- DA8xx/OMAP-L13x chips. This driver can also be built as a
- module called rtc-omap.
+ Say "yes" here to support the on chip real time clock
+ present on TI OMAP1, AM33xx and DA8xx/OMAP-L13x.
+
+ This driver can also be built as a module, if so, module
+ will be called rtc-omap.
config HAVE_S3C_RTC
bool
@@ -994,7 +1075,7 @@ config RTC_DRV_TX4939
config RTC_DRV_MV
tristate "Marvell SoC RTC"
- depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_KIRKWOOD || ARCH_DOVE || ARCH_MVEBU
help
If you say yes here you will get support for the in-chip RTC
that can be found in some of Marvell's SoC devices, such as
@@ -1144,4 +1225,20 @@ config RTC_DRV_SNVS
This driver can also be built as a module, if so, the module
will be called "rtc-snvs".
+comment "HID Sensor RTC drivers"
+
+config RTC_DRV_HID_SENSOR_TIME
+ tristate "HID Sensor Time"
+ depends on USB_HID
+ select IIO
+ select HID_SENSOR_HUB
+ select HID_SENSOR_IIO_COMMON
+ help
+ Say yes here to build support for the HID Sensors of type Time.
+ This drivers makes such sensors available as RTCs.
+
+ If this driver is compiled as a module, it will be named
+ rtc-hid-sensor-time.
+
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 56297f0fd388..e8f2e2fee06f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
obj-$(CONFIG_RTC_LIB) += rtc-lib.o
obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
+obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
rtc-core-y := class.o interface.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
+obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o
obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
@@ -51,10 +53,12 @@ obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
+obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
obj-$(CONFIG_RTC_DRV_LOONGSON1) += rtc-ls1x.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
@@ -68,7 +72,9 @@ obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8907) += rtc-max8907.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
+obj-$(CONFIG_RTC_DRV_MAX8997) += rtc-max8997.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
@@ -76,6 +82,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
+obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
@@ -93,6 +100,7 @@ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
+obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o
obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
@@ -109,7 +117,9 @@ obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
+obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
+obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index f8a0aab218cb..9b742d3ffb94 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
@@ -50,6 +52,10 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec delta, delta_delta;
+
+ if (has_persistent_clock())
+ return 0;
+
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
@@ -88,6 +94,9 @@ static int rtc_resume(struct device *dev)
struct timespec new_system, new_rtc;
struct timespec sleep_time;
+ if (has_persistent_clock())
+ return 0;
+
rtc_hctosys_ret = -ENODEV;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
@@ -244,7 +253,6 @@ void rtc_device_unregister(struct rtc_device *rtc)
rtc_proc_del_device(rtc);
device_unregister(&rtc->dev);
rtc->ops = NULL;
- ida_simple_remove(&rtc_ida, rtc->id);
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
}
@@ -255,7 +263,7 @@ static int __init rtc_init(void)
{
rtc_class = class_create(THIS_MODULE, "rtc");
if (IS_ERR(rtc_class)) {
- printk(KERN_ERR "%s: couldn't create class\n", __FILE__);
+ pr_err("couldn't create class\n");
return PTR_ERR(rtc_class);
}
rtc_class->suspend = rtc_suspend;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 9592b936b71b..42bd57da239d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -587,16 +587,16 @@ void rtc_update_irq(struct rtc_device *rtc,
}
EXPORT_SYMBOL_GPL(rtc_update_irq);
-static int __rtc_match(struct device *dev, void *data)
+static int __rtc_match(struct device *dev, const void *data)
{
- char *name = (char *)data;
+ const char *name = data;
if (strcmp(dev_name(dev), name) == 0)
return 1;
return 0;
}
-struct rtc_device *rtc_class_open(char *name)
+struct rtc_device *rtc_class_open(const char *name)
{
struct device *dev;
struct rtc_device *rtc = NULL;
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 6367984e0565..63b17ebe90e8 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -248,7 +248,7 @@ static int pm80x_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume);
-static int __devinit pm80x_rtc_probe(struct platform_device *pdev)
+static int pm80x_rtc_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pm80x_pdata;
@@ -342,7 +342,7 @@ out:
return ret;
}
-static int __devexit pm80x_rtc_remove(struct platform_device *pdev)
+static int pm80x_rtc_remove(struct platform_device *pdev)
{
struct pm80x_rtc_info *info = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
@@ -358,7 +358,7 @@ static struct platform_driver pm80x_rtc_driver = {
.pm = &pm80x_rtc_pm_ops,
},
.probe = pm80x_rtc_probe,
- .remove = __devexit_p(pm80x_rtc_remove),
+ .remove = pm80x_rtc_remove,
};
module_platform_driver(pm80x_rtc_driver);
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index de9e854b326a..f663746f4603 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -286,8 +286,8 @@ out:
#endif
#ifdef CONFIG_OF
-static int __devinit pm860x_rtc_dt_init(struct platform_device *pdev,
- struct pm860x_rtc_info *info)
+static int pm860x_rtc_dt_init(struct platform_device *pdev,
+ struct pm860x_rtc_info *info)
{
struct device_node *np = pdev->dev.parent->of_node;
int ret;
@@ -307,7 +307,7 @@ static int __devinit pm860x_rtc_dt_init(struct platform_device *pdev,
#define pm860x_rtc_dt_init(x, y) (-1)
#endif
-static int __devinit pm860x_rtc_probe(struct platform_device *pdev)
+static int pm860x_rtc_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_rtc_pdata *pdata = NULL;
@@ -412,7 +412,7 @@ out:
return ret;
}
-static int __devexit pm860x_rtc_remove(struct platform_device *pdev)
+static int pm860x_rtc_remove(struct platform_device *pdev)
{
struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
@@ -459,7 +459,7 @@ static struct platform_driver pm860x_rtc_driver = {
.pm = &pm860x_rtc_pm_ops,
},
.probe = pm860x_rtc_probe,
- .remove = __devexit_p(pm860x_rtc_remove),
+ .remove = pm860x_rtc_remove,
};
module_platform_driver(pm860x_rtc_driver);
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 2e5970fe9eeb..57cde2b061e6 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -389,7 +389,7 @@ static const struct rtc_class_ops ab8500_rtc_ops = {
.alarm_irq_enable = ab8500_rtc_irq_enable,
};
-static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
+static int ab8500_rtc_probe(struct platform_device *pdev)
{
int err;
struct rtc_device *rtc;
@@ -448,7 +448,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
+static int ab8500_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ALARM");
@@ -468,7 +468,7 @@ static struct platform_driver ab8500_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_rtc_probe,
- .remove = __devexit_p(ab8500_rtc_remove),
+ .remove = ab8500_rtc_remove,
};
module_platform_driver(ab8500_rtc_driver);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b6469e2cae89..434ebc3a99dc 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -86,7 +86,7 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = tm->tm_year - 1900;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -100,7 +100,7 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
{
unsigned long cr;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -145,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -183,7 +183,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
@@ -192,7 +192,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+ dev_dbg(dev, "%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
@@ -240,7 +240,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
rtc_update_irq(rtc, 1, events);
- pr_debug("%s(): num=%ld, events=0x%02lx\n", __func__,
+ dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
events >> 8, events & 0x000000FF);
return IRQ_HANDLED;
@@ -296,8 +296,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
- printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
- irq);
+ dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
return ret;
}
@@ -315,7 +314,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
- printk(KERN_INFO "AT91 Real Time Clock driver.\n");
+ dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index e981798e9a9b..39cfd2ee0042 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -289,7 +289,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
/*
* Initialize and install RTC driver
*/
-static int __devinit at91_rtc_probe(struct platform_device *pdev)
+static int at91_rtc_probe(struct platform_device *pdev)
{
struct resource *r, *r_gpbr;
struct sam9_rtc *rtc;
@@ -387,7 +387,7 @@ fail:
/*
* Disable and remove the RTC driver
*/
-static int __devexit at91_rtc_remove(struct platform_device *pdev)
+static int at91_rtc_remove(struct platform_device *pdev)
{
struct sam9_rtc *rtc = platform_get_drvdata(pdev);
u32 mr = rtt_readl(rtc, MR);
@@ -463,7 +463,7 @@ static int at91_rtc_resume(struct platform_device *pdev)
static struct platform_driver at91_rtc_driver = {
.probe = at91_rtc_probe,
- .remove = __devexit_p(at91_rtc_remove),
+ .remove = at91_rtc_remove,
.shutdown = at91_rtc_shutdown,
.suspend = at91_rtc_suspend,
.resume = at91_rtc_resume,
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 979ed0406ce9..b309da4ec745 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -62,7 +62,7 @@ static struct rtc_class_ops au1xtoy_rtc_ops = {
.set_time = au1xtoy_rtc_set_time,
};
-static int __devinit au1xtoy_rtc_probe(struct platform_device *pdev)
+static int au1xtoy_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long t;
@@ -116,7 +116,7 @@ out_err:
return ret;
}
-static int __devexit au1xtoy_rtc_remove(struct platform_device *pdev)
+static int au1xtoy_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtcdev = platform_get_drvdata(pdev);
@@ -131,7 +131,7 @@ static struct platform_driver au1xrtc_driver = {
.name = "rtc-au1xxx",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(au1xtoy_rtc_remove),
+ .remove = au1xtoy_rtc_remove,
};
static int __init au1xtoy_rtc_init(void)
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index abfc1a0c07d9..4ec614b0954d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -342,7 +342,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
.alarm_irq_enable = bfin_rtc_alarm_irq_enable,
};
-static int __devinit bfin_rtc_probe(struct platform_device *pdev)
+static int bfin_rtc_probe(struct platform_device *pdev)
{
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
@@ -388,7 +388,7 @@ err:
return ret;
}
-static int __devexit bfin_rtc_remove(struct platform_device *pdev)
+static int bfin_rtc_remove(struct platform_device *pdev)
{
struct bfin_rtc *rtc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -451,7 +451,7 @@ static struct platform_driver bfin_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_rtc_probe,
- .remove = __devexit_p(bfin_rtc_remove),
+ .remove = bfin_rtc_remove,
.suspend = bfin_rtc_suspend,
.resume = bfin_rtc_resume,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index f090159dce4a..036cb89f8188 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -163,7 +163,7 @@ static int bq32k_probe(struct i2c_client *client,
return 0;
}
-static int __devexit bq32k_remove(struct i2c_client *client)
+static int bq32k_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
@@ -183,7 +183,7 @@ static struct i2c_driver bq32k_driver = {
.owner = THIS_MODULE,
},
.probe = bq32k_probe,
- .remove = __devexit_p(bq32k_remove),
+ .remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index bf612ef22941..693be71b5b18 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -140,7 +140,7 @@ static const struct rtc_class_ops bq4802_ops = {
.set_time = bq4802_set_time,
};
-static int __devinit bq4802_probe(struct platform_device *pdev)
+static int bq4802_probe(struct platform_device *pdev)
{
struct bq4802 *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -191,7 +191,7 @@ out_free:
goto out;
}
-static int __devexit bq4802_remove(struct platform_device *pdev)
+static int bq4802_remove(struct platform_device *pdev)
{
struct bq4802 *p = platform_get_drvdata(pdev);
@@ -215,7 +215,7 @@ static struct platform_driver bq4802_driver = {
.owner = THIS_MODULE,
},
.probe = bq4802_probe,
- .remove = __devexit_p(bq4802_remove),
+ .remove = bq4802_remove,
};
module_platform_driver(bq4802_driver);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 4267789ca995..af97c94e8a3a 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -706,7 +706,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = hpet_rtc_interrupt;
err = hpet_register_irq_handler(cmos_interrupt);
if (err != 0) {
- printk(KERN_WARNING "hpet_register_irq_handler "
+ dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
}
@@ -731,8 +731,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup2;
}
- pr_info("%s: %s%s, %zd bytes nvram%s\n",
- dev_name(&cmos_rtc.rtc->dev),
+ dev_info(dev, "%s%s, %zd bytes nvram%s\n",
!is_valid_irq(rtc_irq) ? "no alarms" :
cmos_rtc.mon_alrm ? "alarms up to one year" :
cmos_rtc.day_alrm ? "alarms up to one month" :
@@ -820,8 +819,7 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
- pr_debug("%s: suspend%s, ctrl %02x\n",
- dev_name(&cmos_rtc.rtc->dev),
+ dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -876,9 +874,7 @@ static int cmos_resume(struct device *dev)
spin_unlock_irq(&rtc_lock);
}
- pr_debug("%s: resume, ctrl %02x\n",
- dev_name(&cmos_rtc.rtc->dev),
- tmp);
+ dev_dbg(dev, "resume, ctrl %02x\n", tmp);
return 0;
}
@@ -947,8 +943,7 @@ static void rtc_wake_off(struct device *dev)
*/
static struct cmos_rtc_board_info acpi_rtc_info;
-static void __devinit
-cmos_wake_setup(struct device *dev)
+static void cmos_wake_setup(struct device *dev)
{
if (acpi_disabled)
return;
@@ -980,8 +975,7 @@ cmos_wake_setup(struct device *dev)
#else
-static void __devinit
-cmos_wake_setup(struct device *dev)
+static void cmos_wake_setup(struct device *dev)
{
}
@@ -991,8 +985,7 @@ cmos_wake_setup(struct device *dev)
#include <linux/pnp.h>
-static int __devinit
-cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
cmos_wake_setup(&pnp->dev);
@@ -1101,7 +1094,6 @@ static __init void cmos_of_init(struct platform_device *pdev)
}
#else
static inline void cmos_of_init(struct platform_device *pdev) {}
-#define of_cmos_match NULL
#endif
/*----------------------------------------------------------------*/
@@ -1143,7 +1135,7 @@ static struct platform_driver cmos_platform_driver = {
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
- .of_match_table = of_cmos_match,
+ .of_match_table = of_match_ptr(of_cmos_match),
}
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index c8115b83e5ab..2d28ec1aa1cd 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -157,7 +157,6 @@ static int __exit coh901331_remove(struct platform_device *pdev)
if (rtap) {
rtc_device_unregister(rtap->rtc);
clk_unprepare(rtap->clk);
- clk_put(rtap->clk);
platform_set_drvdata(pdev, NULL);
}
@@ -196,7 +195,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
"RTC COH 901 331 Alarm", rtap))
return -EIO;
- rtap->clk = clk_get(&pdev->dev, NULL);
+ rtap->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rtap->clk)) {
ret = PTR_ERR(rtap->clk);
dev_err(&pdev->dev, "could not get clock\n");
@@ -207,7 +206,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
ret = clk_prepare_enable(rtap->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clock\n");
- goto out_no_clk_prepenable;
+ return ret;
}
clk_disable(rtap->clk);
@@ -224,8 +223,6 @@ static int __init coh901331_probe(struct platform_device *pdev)
out_no_rtc:
platform_set_drvdata(pdev, NULL);
clk_unprepare(rtap->clk);
- out_no_clk_prepenable:
- clk_put(rtap->clk);
return ret;
}
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 78070255bd3f..0dde688ca09b 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -228,7 +228,7 @@ static const struct rtc_class_ops da9052_rtc_ops = {
.alarm_irq_enable = da9052_rtc_alarm_irq_enable,
};
-static int __devinit da9052_rtc_probe(struct platform_device *pdev)
+static int da9052_rtc_probe(struct platform_device *pdev)
{
struct da9052_rtc *rtc;
int ret;
@@ -240,9 +240,10 @@ static int __devinit da9052_rtc_probe(struct platform_device *pdev)
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
rtc->irq = platform_get_irq_byname(pdev, "ALM");
- ret = request_threaded_irq(rtc->irq, NULL, da9052_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ALM", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ da9052_rtc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "ALM", rtc);
if (ret != 0) {
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
return ret;
@@ -250,24 +251,17 @@ static int __devinit da9052_rtc_probe(struct platform_device *pdev)
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&da9052_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
- goto err_free_irq;
- }
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
return 0;
-
-err_free_irq:
- free_irq(rtc->irq, rtc);
- return ret;
}
-static int __devexit da9052_rtc_remove(struct platform_device *pdev)
+static int da9052_rtc_remove(struct platform_device *pdev)
{
struct da9052_rtc *rtc = pdev->dev.platform_data;
rtc_device_unregister(rtc->rtc);
- free_irq(rtc->irq, rtc);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -275,7 +269,7 @@ static int __devexit da9052_rtc_remove(struct platform_device *pdev)
static struct platform_driver da9052_rtc_driver = {
.probe = da9052_rtc_probe,
- .remove = __devexit_p(da9052_rtc_remove),
+ .remove = da9052_rtc_remove,
.driver = {
.name = "da9052-rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
new file mode 100644
index 000000000000..8f0dcfedb83c
--- /dev/null
+++ b/drivers/rtc/rtc-da9055.c
@@ -0,0 +1,413 @@
+/*
+ * Real time clock driver for DA9055
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Dajun Dajun Chen <dajun.chen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+#include <linux/mfd/da9055/pdata.h>
+
+struct da9055_rtc {
+ struct rtc_device *rtc;
+ struct da9055 *da9055;
+ int alarm_enable;
+};
+
+static int da9055_rtc_enable_alarm(struct da9055_rtc *rtc, bool enable)
+{
+ int ret;
+ if (enable) {
+ ret = da9055_reg_update(rtc->da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_EN,
+ DA9055_RTC_ALM_EN);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev, "Failed to enable ALM: %d\n",
+ ret);
+ rtc->alarm_enable = 1;
+ } else {
+ ret = da9055_reg_update(rtc->da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_EN, 0);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev,
+ "Failed to disable ALM: %d\n", ret);
+ rtc->alarm_enable = 0;
+ }
+ return ret;
+}
+
+static irqreturn_t da9055_rtc_alm_irq(int irq, void *data)
+{
+ struct da9055_rtc *rtc = data;
+
+ da9055_rtc_enable_alarm(rtc, 0);
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int da9055_read_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
+{
+ int ret;
+ uint8_t v[5];
+
+ ret = da9055_group_read(da9055, DA9055_REG_ALARM_MI, 5, v);
+ if (ret != 0) {
+ dev_err(da9055->dev, "Failed to group read ALM: %d\n", ret);
+ return ret;
+ }
+
+ rtc_tm->tm_year = (v[4] & DA9055_RTC_ALM_YEAR) + 100;
+ rtc_tm->tm_mon = (v[3] & DA9055_RTC_ALM_MONTH) - 1;
+ rtc_tm->tm_mday = v[2] & DA9055_RTC_ALM_DAY;
+ rtc_tm->tm_hour = v[1] & DA9055_RTC_ALM_HOUR;
+ rtc_tm->tm_min = v[0] & DA9055_RTC_ALM_MIN;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int da9055_set_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
+{
+ int ret;
+ uint8_t v[2];
+
+ rtc_tm->tm_year -= 100;
+ rtc_tm->tm_mon += 1;
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MI,
+ DA9055_RTC_ALM_MIN, rtc_tm->tm_min);
+ if (ret != 0) {
+ dev_err(da9055->dev, "Failed to write ALRM MIN: %d\n", ret);
+ return ret;
+ }
+
+ v[0] = rtc_tm->tm_hour;
+ v[1] = rtc_tm->tm_mday;
+
+ ret = da9055_group_write(da9055, DA9055_REG_ALARM_H, 2, v);
+ if (ret < 0)
+ return ret;
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MO,
+ DA9055_RTC_ALM_MONTH, rtc_tm->tm_mon);
+ if (ret < 0)
+ dev_err(da9055->dev, "Failed to write ALM Month:%d\n", ret);
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_YEAR, rtc_tm->tm_year);
+ if (ret < 0)
+ dev_err(da9055->dev, "Failed to write ALM Year:%d\n", ret);
+
+ return ret;
+}
+
+static int da9055_rtc_get_alarm_status(struct da9055 *da9055)
+{
+ int ret;
+
+ ret = da9055_reg_read(da9055, DA9055_REG_ALARM_Y);
+ if (ret < 0) {
+ dev_err(da9055->dev, "Failed to read ALM: %d\n", ret);
+ return ret;
+ }
+ ret &= DA9055_RTC_ALM_EN;
+ return (ret > 0) ? 1 : 0;
+}
+
+static int da9055_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+ uint8_t v[6];
+ int ret;
+
+ ret = da9055_reg_read(rtc->da9055, DA9055_REG_COUNT_S);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Registers are only valid when RTC_READ
+ * status bit is asserted
+ */
+ if (!(ret & DA9055_RTC_READ))
+ return -EBUSY;
+
+ ret = da9055_group_read(rtc->da9055, DA9055_REG_COUNT_S, 6, v);
+ if (ret < 0) {
+ dev_err(rtc->da9055->dev, "Failed to read RTC time : %d\n",
+ ret);
+ return ret;
+ }
+
+ rtc_tm->tm_year = (v[5] & DA9055_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[4] & DA9055_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[3] & DA9055_RTC_DAY;
+ rtc_tm->tm_hour = v[2] & DA9055_RTC_HOUR;
+ rtc_tm->tm_min = v[1] & DA9055_RTC_MIN;
+ rtc_tm->tm_sec = v[0] & DA9055_RTC_SEC;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int da9055_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct da9055_rtc *rtc;
+ uint8_t v[6];
+
+ rtc = dev_get_drvdata(dev);
+
+ v[0] = tm->tm_sec;
+ v[1] = tm->tm_min;
+ v[2] = tm->tm_hour;
+ v[3] = tm->tm_mday;
+ v[4] = tm->tm_mon + 1;
+ v[5] = tm->tm_year - 100;
+
+ return da9055_group_write(rtc->da9055, DA9055_REG_COUNT_S, 6, v);
+}
+
+static int da9055_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ struct rtc_time *tm = &alrm->time;
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ ret = da9055_read_alarm(rtc->da9055, tm);
+
+ if (ret)
+ return ret;
+
+ alrm->enabled = da9055_rtc_get_alarm_status(rtc->da9055);
+
+ return 0;
+}
+
+static int da9055_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ struct rtc_time *tm = &alrm->time;
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = da9055_set_alarm(rtc->da9055, tm);
+ if (ret)
+ return ret;
+
+ ret = da9055_rtc_enable_alarm(rtc, 1);
+
+ return ret;
+}
+
+static int da9055_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ return da9055_rtc_enable_alarm(rtc, enabled);
+}
+
+static const struct rtc_class_ops da9055_rtc_ops = {
+ .read_time = da9055_rtc_read_time,
+ .set_time = da9055_rtc_set_time,
+ .read_alarm = da9055_rtc_read_alarm,
+ .set_alarm = da9055_rtc_set_alarm,
+ .alarm_irq_enable = da9055_rtc_alarm_irq_enable,
+};
+
+static int da9055_rtc_device_init(struct da9055 *da9055,
+ struct da9055_pdata *pdata)
+{
+ int ret;
+
+ /* Enable RTC and the internal Crystal */
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_EN, DA9055_RTC_EN);
+ if (ret < 0)
+ return ret;
+ ret = da9055_reg_update(da9055, DA9055_REG_EN_32K,
+ DA9055_CRYSTAL_EN, DA9055_CRYSTAL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Enable RTC in Power Down mode */
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_MODE_PD, DA9055_RTC_MODE_PD);
+ if (ret < 0)
+ return ret;
+
+ /* Enable RTC in Reset mode */
+ if (pdata && pdata->reset_enable) {
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_MODE_SD,
+ DA9055_RTC_MODE_SD <<
+ DA9055_RTC_MODE_SD_SHIFT);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Disable the RTC TICK ALM */
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MO,
+ DA9055_RTC_TICK_WAKE_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int da9055_rtc_probe(struct platform_device *pdev)
+{
+ struct da9055_rtc *rtc;
+ struct da9055_pdata *pdata = NULL;
+ int ret, alm_irq;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(struct da9055_rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->da9055 = dev_get_drvdata(pdev->dev.parent);
+ pdata = rtc->da9055->dev->platform_data;
+ platform_set_drvdata(pdev, rtc);
+
+ ret = da9055_rtc_device_init(rtc->da9055, pdata);
+ if (ret < 0)
+ goto err_rtc;
+
+ ret = da9055_reg_read(rtc->da9055, DA9055_REG_ALARM_Y);
+ if (ret < 0)
+ goto err_rtc;
+
+ if (ret & DA9055_RTC_ALM_EN)
+ rtc->alarm_enable = 1;
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &da9055_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ goto err_rtc;
+ }
+
+ alm_irq = platform_get_irq_byname(pdev, "ALM");
+ alm_irq = regmap_irq_get_virq(rtc->da9055->irq_data, alm_irq);
+ ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
+ da9055_rtc_alm_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ALM", rtc);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev, "irq registration failed: %d\n", ret);
+
+err_rtc:
+ return ret;
+
+}
+
+static int da9055_rtc_remove(struct platform_device *pdev)
+{
+ struct da9055_rtc *rtc = pdev->dev.platform_data;
+
+ rtc_device_unregister(rtc->rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/* Turn off the alarm if it should not be a wake source. */
+static int da9055_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ /* Disable the ALM IRQ */
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to disable RTC ALM\n");
+ }
+
+ return 0;
+}
+
+/* Enable the alarm if it should be enabled (in case it was disabled to
+ * prevent use as a wake source).
+ */
+static int da9055_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ if (rtc->alarm_enable) {
+ ret = da9055_rtc_enable_alarm(rtc, 1);
+ if (ret < 0)
+ dev_err(&pdev->dev,
+ "Failed to restart RTC ALM\n");
+ }
+ }
+
+ return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int da9055_rtc_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to freeze RTC ALMs\n");
+
+ return 0;
+
+}
+#else
+#define da9055_rtc_suspend NULL
+#define da9055_rtc_resume NULL
+#define da9055_rtc_freeze NULL
+#endif
+
+static const struct dev_pm_ops da9055_rtc_pm_ops = {
+ .suspend = da9055_rtc_suspend,
+ .resume = da9055_rtc_resume,
+
+ .freeze = da9055_rtc_freeze,
+ .thaw = da9055_rtc_resume,
+ .restore = da9055_rtc_resume,
+
+ .poweroff = da9055_rtc_suspend,
+};
+
+static struct platform_driver da9055_rtc_driver = {
+ .probe = da9055_rtc_probe,
+ .remove = da9055_rtc_remove,
+ .driver = {
+ .name = "da9055-rtc",
+ .owner = THIS_MODULE,
+ .pm = &da9055_rtc_pm_ops,
+ },
+};
+
+module_platform_driver(da9055_rtc_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("RTC driver for Dialog DA9055 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-rtc");
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 14c2109dbaa3..56b73089bb29 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -485,7 +485,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
struct resource *res, *mem;
int ret = 0;
- davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
+ davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
if (!davinci_rtc) {
dev_dbg(dev, "could not allocate memory for private data\n");
return -ENOMEM;
@@ -494,34 +494,31 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
davinci_rtc->irq = platform_get_irq(pdev, 0);
if (davinci_rtc->irq < 0) {
dev_err(dev, "no RTC irq\n");
- ret = davinci_rtc->irq;
- goto fail1;
+ return davinci_rtc->irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no mem resource\n");
- ret = -EINVAL;
- goto fail1;
+ return -EINVAL;
}
davinci_rtc->pbase = res->start;
davinci_rtc->base_size = resource_size(res);
- mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
- pdev->name);
+ mem = devm_request_mem_region(dev, davinci_rtc->pbase,
+ davinci_rtc->base_size, pdev->name);
if (!mem) {
dev_err(dev, "RTC registers at %08x are not free\n",
davinci_rtc->pbase);
- ret = -EBUSY;
- goto fail1;
+ return -EBUSY;
}
- davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
+ davinci_rtc->base = devm_ioremap(dev, davinci_rtc->pbase,
+ davinci_rtc->base_size);
if (!davinci_rtc->base) {
dev_err(dev, "unable to ioremap MEM resource\n");
- ret = -ENOMEM;
- goto fail2;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, davinci_rtc);
@@ -529,9 +526,10 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&davinci_rtc_ops, THIS_MODULE);
if (IS_ERR(davinci_rtc->rtc)) {
- dev_err(dev, "unable to register RTC device, err %ld\n",
- PTR_ERR(davinci_rtc->rtc));
- goto fail3;
+ ret = PTR_ERR(davinci_rtc->rtc);
+ dev_err(dev, "unable to register RTC device, err %d\n",
+ ret);
+ goto fail1;
}
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
@@ -541,11 +539,11 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
- ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
+ ret = devm_request_irq(dev, davinci_rtc->irq, davinci_rtc_interrupt,
0, "davinci_rtc", davinci_rtc);
if (ret < 0) {
dev_err(dev, "unable to register davinci RTC interrupt\n");
- goto fail4;
+ goto fail2;
}
/* Enable interrupts */
@@ -559,20 +557,14 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
return 0;
-fail4:
- rtc_device_unregister(davinci_rtc->rtc);
-fail3:
- platform_set_drvdata(pdev, NULL);
- iounmap(davinci_rtc->base);
fail2:
- release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+ rtc_device_unregister(davinci_rtc->rtc);
fail1:
- kfree(davinci_rtc);
-
+ platform_set_drvdata(pdev, NULL);
return ret;
}
-static int __devexit davinci_rtc_remove(struct platform_device *pdev)
+static int davinci_rtc_remove(struct platform_device *pdev)
{
struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
@@ -580,23 +572,16 @@ static int __devexit davinci_rtc_remove(struct platform_device *pdev)
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
- free_irq(davinci_rtc->irq, davinci_rtc);
-
rtc_device_unregister(davinci_rtc->rtc);
- iounmap(davinci_rtc->base);
- release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
-
platform_set_drvdata(pdev, NULL);
- kfree(davinci_rtc);
-
return 0;
}
static struct platform_driver davinci_rtc_driver = {
.probe = davinci_rtc_probe,
- .remove = __devexit_p(davinci_rtc_remove),
+ .remove = davinci_rtc_remove,
.driver = {
.name = "rtc_davinci",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index cace6d3aed9a..d04939369251 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched.h>
@@ -379,25 +381,6 @@ static long rtc_dev_ioctl(struct file *file,
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
-#if 0
- case RTC_EPOCH_SET:
-#ifndef rtc_epoch
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900) {
- err = -EINVAL;
- break;
- }
- rtc_epoch = arg;
- err = 0;
-#endif
- break;
-
- case RTC_EPOCH_READ:
- err = put_user(rtc_epoch, (unsigned long __user *)uarg);
- break;
-#endif
case RTC_WKALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm, uarg, sizeof(alarm)))
@@ -481,7 +464,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
return;
if (rtc->id >= RTC_DEV_MAX) {
- pr_debug("%s: too many RTC devices\n", rtc->name);
+ dev_dbg(&rtc->dev, "%s: too many RTC devices\n", rtc->name);
return;
}
@@ -499,10 +482,10 @@ void rtc_dev_prepare(struct rtc_device *rtc)
void rtc_dev_add_device(struct rtc_device *rtc)
{
if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
- printk(KERN_WARNING "%s: failed to add char device %d:%d\n",
+ dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n",
rtc->name, MAJOR(rtc_devt), rtc->id);
else
- pr_debug("%s: dev (%d:%d)\n", rtc->name,
+ dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name,
MAJOR(rtc_devt), rtc->id);
}
@@ -518,8 +501,7 @@ void __init rtc_dev_init(void)
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
+ pr_err("failed to allocate char dev region\n");
}
void __exit rtc_dev_exit(void)
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index d4457afcba89..b2ed2c94b081 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -123,7 +123,7 @@ static struct rtc_class_ops dm355evm_rtc_ops = {
/*----------------------------------------------------------------------*/
-static int __devinit dm355evm_rtc_probe(struct platform_device *pdev)
+static int dm355evm_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -139,7 +139,7 @@ static int __devinit dm355evm_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit dm355evm_rtc_remove(struct platform_device *pdev)
+static int dm355evm_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
@@ -154,7 +154,7 @@ static int __devexit dm355evm_rtc_remove(struct platform_device *pdev)
*/
static struct platform_driver rtc_dm355evm_driver = {
.probe = dm355evm_rtc_probe,
- .remove = __devexit_p(dm355evm_rtc_remove),
+ .remove = dm355evm_rtc_remove,
.driver = {
.owner = THIS_MODULE,
.name = "rtc-dm355evm",
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 990c3ff489bf..d989412a348a 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -329,7 +329,7 @@ static const struct rtc_class_ops ds1286_ops = {
.alarm_irq_enable = ds1286_alarm_irq_enable,
};
-static int __devinit ds1286_probe(struct platform_device *pdev)
+static int ds1286_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
@@ -376,7 +376,7 @@ out:
return ret;
}
-static int __devexit ds1286_remove(struct platform_device *pdev)
+static int ds1286_remove(struct platform_device *pdev)
{
struct ds1286_priv *priv = platform_get_drvdata(pdev);
@@ -393,7 +393,7 @@ static struct platform_driver ds1286_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ds1286_probe,
- .remove = __devexit_p(ds1286_remove),
+ .remove = ds1286_remove,
};
module_platform_driver(ds1286_platform_driver);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index f0d638922644..fdbcdb289d60 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -234,7 +234,7 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ds1302_rtc_remove(struct platform_device *pdev)
+static int ds1302_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
@@ -249,7 +249,7 @@ static struct platform_driver ds1302_platform_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
- .remove = __devexit_p(ds1302_rtc_remove),
+ .remove = ds1302_rtc_remove,
};
static int __init ds1302_rtc_init(void)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 686a865913e1..b05a6dc96405 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -601,7 +601,7 @@ static struct bin_attribute nvram = {
* Interface to SPI stack
*/
-static int __devinit ds1305_probe(struct spi_device *spi)
+static int ds1305_probe(struct spi_device *spi)
{
struct ds1305 *ds1305;
int status;
@@ -635,9 +635,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
goto fail0;
}
- dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
- "read", ds1305->ctrl[0],
- ds1305->ctrl[1], ds1305->ctrl[2]);
+ dev_dbg(&spi->dev, "ctrl %s: %3ph\n", "read", ds1305->ctrl);
/* Sanity check register values ... partially compensating for the
* fact that SPI has no device handshake. A pullup on MISO would
@@ -723,9 +721,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
goto fail0;
}
- dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
- "write", ds1305->ctrl[0],
- ds1305->ctrl[1], ds1305->ctrl[2]);
+ dev_dbg(&spi->dev, "ctrl %s: %3ph\n", "write", ds1305->ctrl);
}
/* see if non-Linux software set up AM/PM mode */
@@ -787,7 +783,7 @@ fail0:
return status;
}
-static int __devexit ds1305_remove(struct spi_device *spi)
+static int ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
@@ -810,7 +806,7 @@ static struct spi_driver ds1305_driver = {
.driver.name = "rtc-ds1305",
.driver.owner = THIS_MODULE,
.probe = ds1305_probe,
- .remove = __devexit_p(ds1305_remove),
+ .remove = ds1305_remove,
/* REVISIT add suspend/resume */
};
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 836710ce750e..970a236b147a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -322,12 +322,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
return -EIO;
}
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
- "read",
- ds1307->regs[0], ds1307->regs[1],
- ds1307->regs[2], ds1307->regs[3],
- ds1307->regs[4], ds1307->regs[5],
- ds1307->regs[6]);
+ dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs);
t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f);
t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
@@ -398,9 +393,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
break;
}
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
- "write", buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6]);
+ dev_dbg(dev, "%s: %7ph\n", "write", buf);
result = ds1307->write_block_data(ds1307->client,
ds1307->offset, 7, buf);
@@ -617,8 +610,8 @@ ds1307_nvram_write(struct file *filp, struct kobject *kobj,
/*----------------------------------------------------------------------*/
-static int __devinit ds1307_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1307_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct ds1307 *ds1307;
int err = -ENODEV;
@@ -938,7 +931,7 @@ exit_free:
return err;
}
-static int __devexit ds1307_remove(struct i2c_client *client)
+static int ds1307_remove(struct i2c_client *client)
{
struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -963,7 +956,7 @@ static struct i2c_driver ds1307_driver = {
.owner = THIS_MODULE,
},
.probe = ds1307_probe,
- .remove = __devexit_p(ds1307_remove),
+ .remove = ds1307_remove,
.id_table = ds1307_id,
};
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 966316088b7f..fef76868aae0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -391,7 +391,7 @@ out_free:
return ret;
}
-static int __devexit ds1374_remove(struct i2c_client *client)
+static int ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
@@ -442,7 +442,7 @@ static struct i2c_driver ds1374_driver = {
.pm = DS1374_PM,
},
.probe = ds1374_probe,
- .remove = __devexit_p(ds1374_remove),
+ .remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index b0a99e1b25be..f994257981a0 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -121,7 +121,7 @@ static const struct rtc_class_ops ds1390_rtc_ops = {
.set_time = ds1390_set_time,
};
-static int __devinit ds1390_probe(struct spi_device *spi)
+static int ds1390_probe(struct spi_device *spi)
{
unsigned char tmp;
struct ds1390 *chip;
@@ -156,7 +156,7 @@ static int __devinit ds1390_probe(struct spi_device *spi)
return res;
}
-static int __devexit ds1390_remove(struct spi_device *spi)
+static int ds1390_remove(struct spi_device *spi)
{
struct ds1390 *chip = spi_get_drvdata(spi);
@@ -172,7 +172,7 @@ static struct spi_driver ds1390_driver = {
.owner = THIS_MODULE,
},
.probe = ds1390_probe,
- .remove = __devexit_p(ds1390_remove),
+ .remove = ds1390_remove,
};
module_spi_driver(ds1390_driver);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1f675f5294f5..6a3fcfe3b0e7 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -476,8 +476,7 @@ static struct bin_attribute ds1511_nvram_attr = {
.write = ds1511_nvram_write,
};
- static int __devinit
-ds1511_rtc_probe(struct platform_device *pdev)
+static int ds1511_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
@@ -551,8 +550,7 @@ ds1511_rtc_probe(struct platform_device *pdev)
return ret;
}
- static int __devexit
-ds1511_rtc_remove(struct platform_device *pdev)
+static int ds1511_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -573,7 +571,7 @@ MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
- .remove = __devexit_p(ds1511_rtc_remove),
+ .remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 6ccedbbf923c..25ce0621ade9 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -276,7 +276,7 @@ static struct bin_attribute ds1553_nvram_attr = {
.write = ds1553_nvram_write,
};
-static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
+static int ds1553_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
@@ -338,7 +338,7 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
+static int ds1553_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -354,7 +354,7 @@ MODULE_ALIAS("platform:rtc-ds1553");
static struct platform_driver ds1553_rtc_driver = {
.probe = ds1553_rtc_probe,
- .remove = __devexit_p(ds1553_rtc_remove),
+ .remove = ds1553_rtc_remove,
.driver = {
.name = "rtc-ds1553",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 76112667c507..609c870e2cc5 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -159,7 +159,7 @@ static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj,
return count;
}
-static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
+static int ds1742_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
@@ -222,7 +222,7 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
+static int ds1742_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -233,7 +233,7 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
static struct platform_driver ds1742_rtc_driver = {
.probe = ds1742_rtc_probe,
- .remove = __devexit_p(ds1742_rtc_remove),
+ .remove = ds1742_rtc_remove,
.driver = {
.name = "rtc-ds1742",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 5ea9df7c8c31..b04fc4272fb3 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -70,7 +70,7 @@ static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev,
for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++) {
err = gpio_request(ds2404_gpio[i].gpio, ds2404_gpio[i].name);
if (err) {
- printk(KERN_ERR "error mapping gpio %s: %d\n",
+ dev_err(&pdev->dev, "error mapping gpio %s: %d\n",
ds2404_gpio[i].name, err);
goto err_request;
}
@@ -177,7 +177,7 @@ static void ds2404_write_memory(struct device *dev, u16 offset,
for (i = 0; i < length; i++) {
if (out[i] != ds2404_read_byte(dev)) {
- printk(KERN_ERR "read invalid data\n");
+ dev_err(dev, "read invalid data\n");
return;
}
}
@@ -283,19 +283,7 @@ static struct platform_driver rtc_device_driver = {
.owner = THIS_MODULE,
},
};
-
-static __init int ds2404_init(void)
-{
- return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void ds2404_exit(void)
-{
- platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(ds2404_init);
-module_exit(ds2404_exit);
+module_platform_driver(rtc_device_driver);
MODULE_DESCRIPTION("DS2404 RTC");
MODULE_AUTHOR("Sven Schnelle");
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index e1945095814e..db0ca08db315 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -391,8 +391,8 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
.alarm_irq_enable = ds3232_alarm_irq_enable,
};
-static int __devinit ds3232_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct ds3232 *ds3232;
int ret;
@@ -439,7 +439,7 @@ out_free:
return ret;
}
-static int __devexit ds3232_remove(struct i2c_client *client)
+static int ds3232_remove(struct i2c_client *client)
{
struct ds3232 *ds3232 = i2c_get_clientdata(client);
@@ -469,7 +469,7 @@ static struct i2c_driver ds3232_driver = {
.owner = THIS_MODULE,
},
.probe = ds3232_probe,
- .remove = __devexit_p(ds3232_remove),
+ .remove = ds3232_remove,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index fda707926f02..7a4495ef1c39 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -105,7 +105,7 @@ static const struct rtc_class_ops ds3234_rtc_ops = {
.set_time = ds3234_set_time,
};
-static int __devinit ds3234_probe(struct spi_device *spi)
+static int ds3234_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
unsigned char tmp;
@@ -156,7 +156,7 @@ static int __devinit ds3234_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ds3234_remove(struct spi_device *spi)
+static int ds3234_remove(struct spi_device *spi)
{
struct rtc_device *rtc = spi_get_drvdata(spi);
@@ -170,7 +170,7 @@ static struct spi_driver ds3234_driver = {
.owner = THIS_MODULE,
},
.probe = ds3234_probe,
- .remove = __devexit_p(ds3234_remove),
+ .remove = ds3234_remove,
};
module_spi_driver(ds3234_driver);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index c9f890b088da..1a0c37c9152b 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
@@ -47,7 +49,7 @@ compute_wday(efi_time_t *eft)
int ndays = 0;
if (eft->year < 1998) {
- printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
+ pr_err("EFI year < 1998, invalid date\n");
return -1;
}
@@ -70,7 +72,7 @@ convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
eft->day = wtime->tm_mday;
eft->hour = wtime->tm_hour;
eft->minute = wtime->tm_min;
- eft->second = wtime->tm_sec;
+ eft->second = wtime->tm_sec;
eft->nanosecond = 0;
eft->daylight = wtime->tm_isdst ? EFI_ISDST : 0;
eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
@@ -142,7 +144,7 @@ static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
*/
status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft);
- printk(KERN_WARNING "write status is %d\n", (int)status);
+ dev_warn(dev, "write status is %d\n", (int)status);
return status == EFI_SUCCESS ? 0 : -EINVAL;
}
@@ -157,7 +159,7 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm)
if (status != EFI_SUCCESS) {
/* should never happen */
- printk(KERN_ERR "efitime: can't read time\n");
+ dev_err(dev, "can't read time\n");
return -EINVAL;
}
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 9602278ff988..1a4e5e4a70cd 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
.attrs = ep93xx_rtc_attrs,
};
-static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
+static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
@@ -174,7 +174,7 @@ exit:
return err;
}
-static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
+static int ep93xx_rtc_remove(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
@@ -192,7 +192,7 @@ static struct platform_driver ep93xx_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_rtc_probe,
- .remove = __devexit_p(ep93xx_rtc_remove),
+ .remove = ep93xx_rtc_remove,
};
module_platform_driver(ep93xx_rtc_driver);
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 86b6ecce99f0..bff3cdc5140e 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -116,17 +116,7 @@ static int fm3130_get_time(struct device *dev, struct rtc_time *t)
fm3130_rtc_mode(dev, FM3130_MODE_NORMAL);
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x\n",
- "read",
- fm3130->regs[0], fm3130->regs[1],
- fm3130->regs[2], fm3130->regs[3],
- fm3130->regs[4], fm3130->regs[5],
- fm3130->regs[6], fm3130->regs[7],
- fm3130->regs[8], fm3130->regs[9],
- fm3130->regs[0xa], fm3130->regs[0xb],
- fm3130->regs[0xc], fm3130->regs[0xd],
- fm3130->regs[0xe]);
+ dev_dbg(dev, "%s: %15ph\n", "read", fm3130->regs);
t->tm_sec = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
t->tm_min = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
@@ -175,12 +165,7 @@ static int fm3130_set_time(struct device *dev, struct rtc_time *t)
tmp = t->tm_year - 100;
buf[FM3130_RTC_YEARS] = bin2bcd(tmp);
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- "write", buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6], buf[7],
- buf[8], buf[9], buf[0xa], buf[0xb],
- buf[0xc], buf[0xd], buf[0xe]);
+ dev_dbg(dev, "%s: %15ph\n", "write", buf);
fm3130_rtc_mode(dev, FM3130_MODE_WRITE);
@@ -361,8 +346,8 @@ static const struct rtc_class_ops fm3130_rtc_ops = {
static struct i2c_driver fm3130_driver;
-static int __devinit fm3130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fm3130_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct fm3130 *fm3130;
int err = -ENODEV;
@@ -517,18 +502,8 @@ bad_alarm:
bad_clock:
if (!fm3130->data_valid || !fm3130->alarm_valid)
- dev_dbg(&client->dev,
- "%s: %02x %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x\n",
- "bogus registers",
- fm3130->regs[0], fm3130->regs[1],
- fm3130->regs[2], fm3130->regs[3],
- fm3130->regs[4], fm3130->regs[5],
- fm3130->regs[6], fm3130->regs[7],
- fm3130->regs[8], fm3130->regs[9],
- fm3130->regs[0xa], fm3130->regs[0xb],
- fm3130->regs[0xc], fm3130->regs[0xd],
- fm3130->regs[0xe]);
+ dev_dbg(&client->dev, "%s: %15ph\n", "bogus registers",
+ fm3130->regs);
/* We won't bail out here because we just got invalid data.
Time setting from u-boot doesn't work anyway */
@@ -546,7 +521,7 @@ exit_free:
return err;
}
-static int __devexit fm3130_remove(struct i2c_client *client)
+static int fm3130_remove(struct i2c_client *client)
{
struct fm3130 *fm3130 = i2c_get_clientdata(client);
@@ -561,7 +536,7 @@ static struct i2c_driver fm3130_driver = {
.owner = THIS_MODULE,
},
.probe = fm3130_probe,
- .remove = __devexit_p(fm3130_remove),
+ .remove = fm3130_remove,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
new file mode 100644
index 000000000000..31c5728ef629
--- /dev/null
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -0,0 +1,292 @@
+/*
+ * HID Sensor Time Driver
+ * Copyright (c) 2012, Alexander Holler.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/rtc.h>
+
+/* Format: HID-SENSOR-usage_id_in_hex */
+/* Usage ID from spec for Time: 0x2000A0 */
+#define DRIVER_NAME "HID-SENSOR-2000a0" /* must be lowercase */
+
+enum hid_time_channel {
+ CHANNEL_SCAN_INDEX_YEAR,
+ CHANNEL_SCAN_INDEX_MONTH,
+ CHANNEL_SCAN_INDEX_DAY,
+ CHANNEL_SCAN_INDEX_HOUR,
+ CHANNEL_SCAN_INDEX_MINUTE,
+ CHANNEL_SCAN_INDEX_SECOND,
+ TIME_RTC_CHANNEL_MAX,
+};
+
+struct hid_time_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info info[TIME_RTC_CHANNEL_MAX];
+ struct rtc_time last_time;
+ spinlock_t lock_last_time;
+ struct completion comp_last_time;
+ struct rtc_time time_buf;
+ struct rtc_device *rtc;
+};
+
+static const u32 hid_time_addresses[TIME_RTC_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_TIME_YEAR,
+ HID_USAGE_SENSOR_TIME_MONTH,
+ HID_USAGE_SENSOR_TIME_DAY,
+ HID_USAGE_SENSOR_TIME_HOUR,
+ HID_USAGE_SENSOR_TIME_MINUTE,
+ HID_USAGE_SENSOR_TIME_SECOND,
+};
+
+/* Channel names for verbose error messages */
+static const char * const hid_time_channel_names[TIME_RTC_CHANNEL_MAX] = {
+ "year", "month", "day", "hour", "minute", "second",
+};
+
+/* Callback handler to send event after all samples are received and captured */
+static int hid_time_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id, void *priv)
+{
+ unsigned long flags;
+ struct hid_time_state *time_state = platform_get_drvdata(priv);
+
+ spin_lock_irqsave(&time_state->lock_last_time, flags);
+ time_state->last_time = time_state->time_buf;
+ spin_unlock_irqrestore(&time_state->lock_last_time, flags);
+ complete(&time_state->comp_last_time);
+ return 0;
+}
+
+static int hid_time_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id, size_t raw_len,
+ char *raw_data, void *priv)
+{
+ struct hid_time_state *time_state = platform_get_drvdata(priv);
+ struct rtc_time *time_buf = &time_state->time_buf;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_TIME_YEAR:
+ time_buf->tm_year = *(u8 *)raw_data;
+ if (time_buf->tm_year < 70)
+ /* assume we are in 1970...2069 */
+ time_buf->tm_year += 100;
+ break;
+ case HID_USAGE_SENSOR_TIME_MONTH:
+ /* sensor sending the month as 1-12, we need 0-11 */
+ time_buf->tm_mon = *(u8 *)raw_data-1;
+ break;
+ case HID_USAGE_SENSOR_TIME_DAY:
+ time_buf->tm_mday = *(u8 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_TIME_HOUR:
+ time_buf->tm_hour = *(u8 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_TIME_MINUTE:
+ time_buf->tm_min = *(u8 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_TIME_SECOND:
+ time_buf->tm_sec = *(u8 *)raw_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* small helper, haven't found any other way */
+static const char *hid_time_attrib_name(u32 attrib_id)
+{
+ static const char unknown[] = "unknown";
+ unsigned i;
+
+ for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i) {
+ if (hid_time_addresses[i] == attrib_id)
+ return hid_time_channel_names[i];
+ }
+ return unknown; /* should never happen */
+}
+
+static int hid_time_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ struct hid_time_state *time_state)
+{
+ int report_id, i;
+
+ for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i)
+ if (sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT, usage_id,
+ hid_time_addresses[i],
+ &time_state->info[i]) < 0)
+ return -EINVAL;
+ /* Check the (needed) attributes for sanity */
+ report_id = time_state->info[0].report_id;
+ if (report_id < 0) {
+ dev_err(&pdev->dev, "bad report ID!\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i) {
+ if (time_state->info[i].report_id != report_id) {
+ dev_err(&pdev->dev,
+ "not all needed attributes inside the same report!\n");
+ return -EINVAL;
+ }
+ if (time_state->info[i].size != 1) {
+ dev_err(&pdev->dev,
+ "attribute '%s' not 8 bits wide!\n",
+ hid_time_attrib_name(
+ time_state->info[i].attrib_id));
+ return -EINVAL;
+ }
+ if (time_state->info[i].units !=
+ HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED &&
+ /* allow attribute seconds with unit seconds */
+ !(time_state->info[i].attrib_id ==
+ HID_USAGE_SENSOR_TIME_SECOND &&
+ time_state->info[i].units ==
+ HID_USAGE_SENSOR_UNITS_SECOND)) {
+ dev_err(&pdev->dev,
+ "attribute '%s' hasn't a unit of type 'none'!\n",
+ hid_time_attrib_name(
+ time_state->info[i].attrib_id));
+ return -EINVAL;
+ }
+ if (time_state->info[i].unit_expo) {
+ dev_err(&pdev->dev,
+ "attribute '%s' hasn't a unit exponent of 1!\n",
+ hid_time_attrib_name(
+ time_state->info[i].attrib_id));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long flags;
+ struct hid_time_state *time_state =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret;
+
+ INIT_COMPLETION(time_state->comp_last_time);
+ /* get a report with all values through requesting one value */
+ sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
+ time_state->info[0].report_id);
+ /* wait for all values (event) */
+ ret = wait_for_completion_killable_timeout(
+ &time_state->comp_last_time, HZ*6);
+ if (ret > 0) {
+ /* no error */
+ spin_lock_irqsave(&time_state->lock_last_time, flags);
+ *tm = time_state->last_time;
+ spin_unlock_irqrestore(&time_state->lock_last_time, flags);
+ return 0;
+ }
+ if (!ret)
+ return -EIO; /* timeouted */
+ return ret; /* killed (-ERESTARTSYS) */
+}
+
+static const struct rtc_class_ops hid_time_rtc_ops = {
+ .read_time = hid_rtc_read_time,
+};
+
+static int hid_time_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
+ sizeof(struct hid_time_state), GFP_KERNEL);
+
+ if (time_state == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, time_state);
+
+ spin_lock_init(&time_state->lock_last_time);
+ init_completion(&time_state->comp_last_time);
+ time_state->common_attributes.hsdev = hsdev;
+ time_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_TIME,
+ &time_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes!\n");
+ return ret;
+ }
+
+ ret = hid_time_parse_report(pdev, hsdev, HID_USAGE_SENSOR_TIME,
+ time_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes!\n");
+ return ret;
+ }
+
+ time_state->callbacks.send_event = hid_time_proc_event;
+ time_state->callbacks.capture_sample = hid_time_capture_sample;
+ time_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_TIME,
+ &time_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "register callback failed!\n");
+ return ret;
+ }
+
+ time_state->rtc = rtc_device_register("hid-sensor-time",
+ &pdev->dev, &hid_time_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(time_state->rtc)) {
+ dev_err(&pdev->dev, "rtc device register failed!\n");
+ return PTR_ERR(time_state->rtc);
+ }
+
+ return ret;
+}
+
+static int hid_time_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct hid_time_state *time_state = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(time_state->rtc);
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
+
+ return 0;
+}
+
+static struct platform_driver hid_time_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_time_probe,
+ .remove = hid_time_remove,
+};
+module_platform_driver(hid_time_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Time");
+MODULE_AUTHOR("Alexander Holler <holler@ahsoftware.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 4eed51044c5d..82aad695979e 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -36,7 +36,9 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
/* DryIce Register Definitions */
@@ -404,7 +406,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
mutex_init(&imxdi->write_mutex);
- imxdi->clk = clk_get(&pdev->dev, NULL);
+ imxdi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
clk_prepare_enable(imxdi->clk);
@@ -473,12 +475,11 @@ static int dryice_rtc_probe(struct platform_device *pdev)
err:
clk_disable_unprepare(imxdi->clk);
- clk_put(imxdi->clk);
return rc;
}
-static int __devexit dryice_rtc_remove(struct platform_device *pdev)
+static int dryice_rtc_remove(struct platform_device *pdev)
{
struct imxdi_dev *imxdi = platform_get_drvdata(pdev);
@@ -490,17 +491,26 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(imxdi->rtc);
clk_disable_unprepare(imxdi->clk);
- clk_put(imxdi->clk);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dryice_dt_ids[] = {
+ { .compatible = "fsl,imx25-rtc" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, dryice_dt_ids);
+#endif
+
static struct platform_driver dryice_rtc_driver = {
.driver = {
.name = "imxdi_rtc",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dryice_dt_ids),
},
- .remove = __devexit_p(dryice_rtc_remove),
+ .remove = dryice_rtc_remove,
};
static int __init dryice_rtc_init(void)
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 1850104705c0..6b4298ea683d 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -227,7 +227,7 @@ static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[ISL12022_REG_SC + i]);
if (ret)
return -EIO;
- };
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index afb7cfa85ccc..c016ad81767a 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct i2c_client *client = data;
+ struct rtc_device *rtc = i2c_get_clientdata(client);
int handled = 0, sr, err;
/*
@@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)
if (sr & ISL1208_REG_SR_ALM) {
dev_dbg(&client->dev, "alarm!\n");
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
/* Clear the alarm */
sr &= ~ISL1208_REG_SR_ALM;
sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 1224182d3eab..1e48686ca6d2 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -210,7 +210,7 @@ void jz4740_rtc_poweroff(struct device *dev)
}
EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff);
-static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
+static int jz4740_rtc_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_rtc *rtc;
@@ -297,7 +297,7 @@ err_free:
return ret;
}
-static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
+static int jz4740_rtc_remove(struct platform_device *pdev)
{
struct jz4740_rtc *rtc = platform_get_drvdata(pdev);
@@ -347,7 +347,7 @@ static const struct dev_pm_ops jz4740_pm_ops = {
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
- .remove = __devexit_p(jz4740_rtc_remove),
+ .remove = jz4740_rtc_remove,
.driver = {
.name = "jz4740-rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c
new file mode 100644
index 000000000000..9a4631218f41
--- /dev/null
+++ b/drivers/rtc/rtc-lp8788.c
@@ -0,0 +1,338 @@
+/*
+ * TI LP8788 MFD - rtc driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+/* register address */
+#define LP8788_INTEN_3 0x05
+#define LP8788_RTC_UNLOCK 0x64
+#define LP8788_RTC_SEC 0x70
+#define LP8788_ALM1_SEC 0x77
+#define LP8788_ALM1_EN 0x7D
+#define LP8788_ALM2_SEC 0x7E
+#define LP8788_ALM2_EN 0x84
+
+/* mask/shift bits */
+#define LP8788_INT_RTC_ALM1_M BIT(1) /* Addr 05h */
+#define LP8788_INT_RTC_ALM1_S 1
+#define LP8788_INT_RTC_ALM2_M BIT(2) /* Addr 05h */
+#define LP8788_INT_RTC_ALM2_S 2
+#define LP8788_ALM_EN_M BIT(7) /* Addr 7Dh or 84h */
+#define LP8788_ALM_EN_S 7
+
+#define DEFAULT_ALARM_SEL LP8788_ALARM_1
+#define LP8788_MONTH_OFFSET 1
+#define LP8788_BASE_YEAR 2000
+#define MAX_WDAY_BITS 7
+#define LP8788_WDAY_SET 1
+#define RTC_UNLOCK 0x1
+#define RTC_LATCH 0x2
+#define ALARM_IRQ_FLAG (RTC_IRQF | RTC_AF)
+
+enum lp8788_time {
+ LPTIME_SEC,
+ LPTIME_MIN,
+ LPTIME_HOUR,
+ LPTIME_MDAY,
+ LPTIME_MON,
+ LPTIME_YEAR,
+ LPTIME_WDAY,
+ LPTIME_MAX,
+};
+
+struct lp8788_rtc {
+ struct lp8788 *lp;
+ struct rtc_device *rdev;
+ enum lp8788_alarm_sel alarm;
+ int irq;
+};
+
+static const u8 addr_alarm_sec[LP8788_ALARM_MAX] = {
+ LP8788_ALM1_SEC,
+ LP8788_ALM2_SEC,
+};
+
+static const u8 addr_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_ALM1_EN,
+ LP8788_ALM2_EN,
+};
+
+static const u8 mask_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_INT_RTC_ALM1_M,
+ LP8788_INT_RTC_ALM2_M,
+};
+
+static const u8 shift_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_INT_RTC_ALM1_S,
+ LP8788_INT_RTC_ALM2_S,
+};
+
+static int _to_tm_wday(u8 lp8788_wday)
+{
+ int i;
+
+ if (lp8788_wday == 0)
+ return 0;
+
+ /* lookup defined weekday from read register value */
+ for (i = 0; i < MAX_WDAY_BITS; i++) {
+ if ((lp8788_wday >> i) == LP8788_WDAY_SET)
+ break;
+ }
+
+ return i + 1;
+}
+
+static inline int _to_lp8788_wday(int tm_wday)
+{
+ return LP8788_WDAY_SET << (tm_wday - 1);
+}
+
+static void lp8788_rtc_unlock(struct lp8788 *lp)
+{
+ lp8788_write_byte(lp, LP8788_RTC_UNLOCK, RTC_UNLOCK);
+ lp8788_write_byte(lp, LP8788_RTC_UNLOCK, RTC_LATCH);
+}
+
+static int lp8788_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 data[LPTIME_MAX];
+ int ret;
+
+ lp8788_rtc_unlock(lp);
+
+ ret = lp8788_read_multi_bytes(lp, LP8788_RTC_SEC, data, LPTIME_MAX);
+ if (ret)
+ return ret;
+
+ tm->tm_sec = data[LPTIME_SEC];
+ tm->tm_min = data[LPTIME_MIN];
+ tm->tm_hour = data[LPTIME_HOUR];
+ tm->tm_mday = data[LPTIME_MDAY];
+ tm->tm_mon = data[LPTIME_MON] - LP8788_MONTH_OFFSET;
+ tm->tm_year = data[LPTIME_YEAR] + LP8788_BASE_YEAR - 1900;
+ tm->tm_wday = _to_tm_wday(data[LPTIME_WDAY]);
+
+ return 0;
+}
+
+static int lp8788_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 data[LPTIME_MAX - 1];
+ int ret, i, year;
+
+ year = tm->tm_year + 1900 - LP8788_BASE_YEAR;
+ if (year < 0) {
+ dev_err(lp->dev, "invalid year: %d\n", year);
+ return -EINVAL;
+ }
+
+ /* because rtc weekday is a readonly register, do not update */
+ data[LPTIME_SEC] = tm->tm_sec;
+ data[LPTIME_MIN] = tm->tm_min;
+ data[LPTIME_HOUR] = tm->tm_hour;
+ data[LPTIME_MDAY] = tm->tm_mday;
+ data[LPTIME_MON] = tm->tm_mon + LP8788_MONTH_OFFSET;
+ data[LPTIME_YEAR] = year;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = lp8788_write_byte(lp, LP8788_RTC_SEC + i, data[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lp8788_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ struct rtc_time *tm = &alarm->time;
+ u8 addr, data[LPTIME_MAX];
+ int ret;
+
+ addr = addr_alarm_sec[rtc->alarm];
+ ret = lp8788_read_multi_bytes(lp, addr, data, LPTIME_MAX);
+ if (ret)
+ return ret;
+
+ tm->tm_sec = data[LPTIME_SEC];
+ tm->tm_min = data[LPTIME_MIN];
+ tm->tm_hour = data[LPTIME_HOUR];
+ tm->tm_mday = data[LPTIME_MDAY];
+ tm->tm_mon = data[LPTIME_MON] - LP8788_MONTH_OFFSET;
+ tm->tm_year = data[LPTIME_YEAR] + LP8788_BASE_YEAR - 1900;
+ tm->tm_wday = _to_tm_wday(data[LPTIME_WDAY]);
+ alarm->enabled = data[LPTIME_WDAY] & LP8788_ALM_EN_M;
+
+ return 0;
+}
+
+static int lp8788_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ struct rtc_time *tm = &alarm->time;
+ u8 addr, data[LPTIME_MAX];
+ int ret, i, year;
+
+ year = tm->tm_year + 1900 - LP8788_BASE_YEAR;
+ if (year < 0) {
+ dev_err(lp->dev, "invalid year: %d\n", year);
+ return -EINVAL;
+ }
+
+ data[LPTIME_SEC] = tm->tm_sec;
+ data[LPTIME_MIN] = tm->tm_min;
+ data[LPTIME_HOUR] = tm->tm_hour;
+ data[LPTIME_MDAY] = tm->tm_mday;
+ data[LPTIME_MON] = tm->tm_mon + LP8788_MONTH_OFFSET;
+ data[LPTIME_YEAR] = year;
+ data[LPTIME_WDAY] = _to_lp8788_wday(tm->tm_wday);
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ addr = addr_alarm_sec[rtc->alarm] + i;
+ ret = lp8788_write_byte(lp, addr, data[i]);
+ if (ret)
+ return ret;
+ }
+
+ alarm->enabled = 1;
+ addr = addr_alarm_en[rtc->alarm];
+
+ return lp8788_update_bits(lp, addr, LP8788_ALM_EN_M,
+ alarm->enabled << LP8788_ALM_EN_S);
+}
+
+static int lp8788_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 mask, shift;
+
+ if (!rtc->irq)
+ return -EIO;
+
+ mask = mask_alarm_en[rtc->alarm];
+ shift = shift_alarm_en[rtc->alarm];
+
+ return lp8788_update_bits(lp, LP8788_INTEN_3, mask, enable << shift);
+}
+
+static const struct rtc_class_ops lp8788_rtc_ops = {
+ .read_time = lp8788_rtc_read_time,
+ .set_time = lp8788_rtc_set_time,
+ .read_alarm = lp8788_read_alarm,
+ .set_alarm = lp8788_set_alarm,
+ .alarm_irq_enable = lp8788_alarm_irq_enable,
+};
+
+static irqreturn_t lp8788_alarm_irq_handler(int irq, void *ptr)
+{
+ struct lp8788_rtc *rtc = ptr;
+
+ rtc_update_irq(rtc->rdev, 1, ALARM_IRQ_FLAG);
+ return IRQ_HANDLED;
+}
+
+static int lp8788_alarm_irq_register(struct platform_device *pdev,
+ struct lp8788_rtc *rtc)
+{
+ struct resource *r;
+ struct lp8788 *lp = rtc->lp;
+ struct irq_domain *irqdm = lp->irqdm;
+ int irq;
+
+ rtc->irq = 0;
+
+ /* even the alarm IRQ number is not specified, rtc time should work */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, LP8788_ALM_IRQ);
+ if (!r)
+ return 0;
+
+ if (rtc->alarm == LP8788_ALARM_1)
+ irq = r->start;
+ else
+ irq = r->end;
+
+ rtc->irq = irq_create_mapping(irqdm, irq);
+
+ return devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ lp8788_alarm_irq_handler,
+ 0, LP8788_ALM_IRQ, rtc);
+}
+
+static int lp8788_rtc_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct lp8788_rtc *rtc;
+ struct device *dev = &pdev->dev;
+
+ rtc = devm_kzalloc(dev, sizeof(struct lp8788_rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->lp = lp;
+ rtc->alarm = lp->pdata ? lp->pdata->alarm_sel : DEFAULT_ALARM_SEL;
+ platform_set_drvdata(pdev, rtc);
+
+ device_init_wakeup(dev, 1);
+
+ rtc->rdev = rtc_device_register("lp8788_rtc", dev,
+ &lp8788_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rdev)) {
+ dev_err(dev, "can not register rtc device\n");
+ return PTR_ERR(rtc->rdev);
+ }
+
+ if (lp8788_alarm_irq_register(pdev, rtc))
+ dev_warn(lp->dev, "no rtc irq handler\n");
+
+ return 0;
+}
+
+static int lp8788_rtc_remove(struct platform_device *pdev)
+{
+ struct lp8788_rtc *rtc = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(rtc->rdev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_rtc_driver = {
+ .probe = lp8788_rtc_probe,
+ .remove = lp8788_rtc_remove,
+ .driver = {
+ .name = LP8788_DEV_RTC,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(lp8788_rtc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 RTC Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-rtc");
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index d5218553741f..40a598332bac 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -197,7 +197,7 @@ static const struct rtc_class_ops lpc32xx_rtc_ops = {
.alarm_irq_enable = lpc32xx_rtc_alarm_irq_enable,
};
-static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
+static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct lpc32xx_rtc *rtc;
@@ -299,7 +299,7 @@ static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev)
+static int lpc32xx_rtc_remove(struct platform_device *pdev)
{
struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
@@ -397,7 +397,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
- .remove = __devexit_p(lpc32xx_rtc_remove),
+ .remove = lpc32xx_rtc_remove,
.driver = {
.name = RTC_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
index 07e81c5f8247..f59b6349551a 100644
--- a/drivers/rtc/rtc-ls1x.c
+++ b/drivers/rtc/rtc-ls1x.c
@@ -143,7 +143,7 @@ static struct rtc_class_ops ls1x_rtc_ops = {
.set_time = ls1x_rtc_set_time,
};
-static int __devinit ls1x_rtc_probe(struct platform_device *pdev)
+static int ls1x_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long v;
@@ -185,7 +185,7 @@ err:
return ret;
}
-static int __devexit ls1x_rtc_remove(struct platform_device *pdev)
+static int ls1x_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtcdev = platform_get_drvdata(pdev);
@@ -200,7 +200,7 @@ static struct platform_driver ls1x_rtc_driver = {
.name = "ls1x-rtc",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(ls1x_rtc_remove),
+ .remove = ls1x_rtc_remove,
.probe = ls1x_rtc_probe,
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index efab3d48cb15..49169680786e 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -170,7 +170,7 @@ static const struct rtc_class_ops m41t93_rtc_ops = {
static struct spi_driver m41t93_driver;
-static int __devinit m41t93_probe(struct spi_device *spi)
+static int m41t93_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
int res;
@@ -195,7 +195,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
}
-static int __devexit m41t93_remove(struct spi_device *spi)
+static int m41t93_remove(struct spi_device *spi)
{
struct rtc_device *rtc = spi_get_drvdata(spi);
@@ -211,7 +211,7 @@ static struct spi_driver m41t93_driver = {
.owner = THIS_MODULE,
},
.probe = m41t93_probe,
- .remove = __devexit_p(m41t93_remove),
+ .remove = m41t93_remove,
};
module_spi_driver(m41t93_driver);
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index 6e78193e026b..89266c6764bc 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -110,7 +110,7 @@ static const struct rtc_class_ops m41t94_rtc_ops = {
static struct spi_driver m41t94_driver;
-static int __devinit m41t94_probe(struct spi_device *spi)
+static int m41t94_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
int res;
@@ -134,7 +134,7 @@ static int __devinit m41t94_probe(struct spi_device *spi)
return 0;
}
-static int __devexit m41t94_remove(struct spi_device *spi)
+static int m41t94_remove(struct spi_device *spi)
{
struct rtc_device *rtc = spi_get_drvdata(spi);
@@ -150,7 +150,7 @@ static struct spi_driver m41t94_driver = {
.owner = THIS_MODULE,
},
.probe = m41t94_probe,
- .remove = __devexit_p(m41t94_remove),
+ .remove = m41t94_remove,
};
module_spi_driver(m41t94_driver);
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index f9e3b3583733..31c9190a1fcb 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -141,7 +141,7 @@ static const struct rtc_class_ops m48t35_ops = {
.set_time = m48t35_set_time,
};
-static int __devinit m48t35_probe(struct platform_device *pdev)
+static int m48t35_probe(struct platform_device *pdev)
{
struct resource *res;
struct m48t35_priv *priv;
@@ -194,7 +194,7 @@ out:
return ret;
}
-static int __devexit m48t35_remove(struct platform_device *pdev)
+static int m48t35_remove(struct platform_device *pdev)
{
struct m48t35_priv *priv = platform_get_drvdata(pdev);
@@ -213,7 +213,7 @@ static struct platform_driver m48t35_platform_driver = {
.owner = THIS_MODULE,
},
.probe = m48t35_probe,
- .remove = __devexit_p(m48t35_remove),
+ .remove = m48t35_remove,
};
module_platform_driver(m48t35_platform_driver);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 30ebfec9fd2b..130f29af3869 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -383,7 +383,7 @@ static struct bin_attribute m48t59_nvram_attr = {
.write = m48t59_nvram_write,
};
-static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
+static int m48t59_rtc_probe(struct platform_device *pdev)
{
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
struct m48t59_private *m48t59 = NULL;
@@ -501,7 +501,7 @@ out:
return ret;
}
-static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
+static int m48t59_rtc_remove(struct platform_device *pdev)
{
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
@@ -527,7 +527,7 @@ static struct platform_driver m48t59_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = m48t59_rtc_probe,
- .remove = __devexit_p(m48t59_rtc_remove),
+ .remove = m48t59_rtc_remove,
};
module_platform_driver(m48t59_rtc_driver);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 863fb3363aa6..2ffbcacd2439 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -144,7 +144,7 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
.proc = m48t86_rtc_proc,
};
-static int __devinit m48t86_rtc_probe(struct platform_device *dev)
+static int m48t86_rtc_probe(struct platform_device *dev)
{
unsigned char reg;
struct m48t86_ops *ops = dev->dev.platform_data;
@@ -164,7 +164,7 @@ static int __devinit m48t86_rtc_probe(struct platform_device *dev)
return 0;
}
-static int __devexit m48t86_rtc_remove(struct platform_device *dev)
+static int m48t86_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
@@ -182,7 +182,7 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.owner = THIS_MODULE,
},
.probe = m48t86_rtc_probe,
- .remove = __devexit_p(m48t86_rtc_remove),
+ .remove = m48t86_rtc_remove,
};
module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 36c74d22e8b5..7d0bf698b79e 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -120,7 +120,7 @@ static const struct rtc_class_ops max6902_rtc_ops = {
.set_time = max6902_set_time,
};
-static int __devinit max6902_probe(struct spi_device *spi)
+static int max6902_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
unsigned char tmp;
@@ -143,7 +143,7 @@ static int __devinit max6902_probe(struct spi_device *spi)
return 0;
}
-static int __devexit max6902_remove(struct spi_device *spi)
+static int max6902_remove(struct spi_device *spi)
{
struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
@@ -157,7 +157,7 @@ static struct spi_driver max6902_driver = {
.owner = THIS_MODULE,
},
.probe = max6902_probe,
- .remove = __devexit_p(max6902_remove),
+ .remove = max6902_remove,
};
module_spi_driver(max6902_driver);
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
new file mode 100644
index 000000000000..6b1337f9baf4
--- /dev/null
+++ b/drivers/rtc/rtc-max77686.c
@@ -0,0 +1,641 @@
+/*
+ * RTC driver for Maxim MAX77686
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * based on rtc-max8997.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max77686-private.h>
+#include <linux/irqdomain.h>
+#include <linux/regmap.h>
+
+/* RTC Control Register */
+#define BCD_EN_SHIFT 0
+#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define MODEL24_SHIFT 1
+#define MODEL24_MASK (1 << MODEL24_SHIFT)
+/* RTC Update Register1 */
+#define RTC_UDR_SHIFT 0
+#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+#define RTC_RBUDR_SHIFT 4
+#define RTC_RBUDR_MASK (1 << RTC_RBUDR_SHIFT)
+/* WTSR and SMPL Register */
+#define WTSRT_SHIFT 0
+#define SMPLT_SHIFT 2
+#define WTSR_EN_SHIFT 6
+#define SMPL_EN_SHIFT 7
+#define WTSRT_MASK (3 << WTSRT_SHIFT)
+#define SMPLT_MASK (3 << SMPLT_SHIFT)
+#define WTSR_EN_MASK (1 << WTSR_EN_SHIFT)
+#define SMPL_EN_MASK (1 << SMPL_EN_SHIFT)
+/* RTC Hour register */
+#define HOUR_PM_SHIFT 6
+#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+/* RTC Alarm Enable */
+#define ALARM_ENABLE_SHIFT 7
+#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+
+#define MAX77686_RTC_UPDATE_DELAY 16
+#undef MAX77686_RTC_WTSR_SMPL
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_DATE,
+ RTC_NR_TIME
+};
+
+struct max77686_rtc_info {
+ struct device *dev;
+ struct max77686_dev *max77686;
+ struct i2c_client *rtc;
+ struct rtc_device *rtc_dev;
+ struct mutex lock;
+
+ struct regmap *regmap;
+
+ int virq;
+ int rtc_24hr_mode;
+};
+
+enum MAX77686_RTC_OP {
+ MAX77686_RTC_WRITE,
+ MAX77686_RTC_READ,
+};
+
+static inline int max77686_rtc_calculate_wday(u8 shifted)
+{
+ int counter = -1;
+ while (shifted) {
+ shifted >>= 1;
+ counter++;
+ }
+ return counter;
+}
+
+static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
+ int rtc_24hr_mode)
+{
+ tm->tm_sec = data[RTC_SEC] & 0x7f;
+ tm->tm_min = data[RTC_MIN] & 0x7f;
+ if (rtc_24hr_mode)
+ tm->tm_hour = data[RTC_HOUR] & 0x1f;
+ else {
+ tm->tm_hour = data[RTC_HOUR] & 0x0f;
+ if (data[RTC_HOUR] & HOUR_PM_MASK)
+ tm->tm_hour += 12;
+ }
+
+ tm->tm_wday = max77686_rtc_calculate_wday(data[RTC_WEEKDAY] & 0x7f);
+ tm->tm_mday = data[RTC_DATE] & 0x1f;
+ tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+ tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+}
+
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = tm->tm_sec;
+ data[RTC_MIN] = tm->tm_min;
+ data[RTC_HOUR] = tm->tm_hour;
+ data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+ data[RTC_DATE] = tm->tm_mday;
+ data[RTC_MONTH] = tm->tm_mon + 1;
+ data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ;
+
+ if (tm->tm_year < 100) {
+ pr_warn("%s: MAX77686 RTC cannot handle the year %d."
+ "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max77686_rtc_update(struct max77686_rtc_info *info,
+ enum MAX77686_RTC_OP op)
+{
+ int ret;
+ unsigned int data;
+
+ if (op == MAX77686_RTC_WRITE)
+ data = 1 << RTC_UDR_SHIFT;
+ else
+ data = 1 << RTC_RBUDR_SHIFT;
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_RTC_UPDATE0, data, data);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
+ __func__, ret, data);
+ else {
+ /* Minimum 16ms delay required before RTC update. */
+ msleep(MAX77686_RTC_UPDATE_DELAY);
+ }
+
+ return ret;
+}
+
+static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+
+ ret = rtc_valid_tm(tm);
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max77686_rtc_tm_to_data(tm, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ unsigned int val;
+ int i, ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+
+ alrm->enabled = 0;
+ for (i = 0; i < RTC_NR_TIME; i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
+ }
+
+ alrm->pending = 0;
+ ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ if (val & (1 << 4)) /* RTCA1 */
+ alrm->pending = 1;
+
+out:
+ mutex_unlock(&info->lock);
+ return 0;
+}
+
+static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret, i;
+ struct rtc_time tm;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+
+ for (i = 0; i < RTC_NR_TIME; i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+out:
+ return ret;
+}
+
+static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret;
+ struct rtc_time tm;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+
+ data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_YEAR] & 0x7f)
+ data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+out:
+ return ret;
+}
+
+static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max77686_rtc_tm_to_data(&alrm->time, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_stop_alarm(info);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+ if (ret < 0)
+ goto out;
+
+ if (alrm->enabled)
+ ret = max77686_rtc_start_alarm(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&info->lock);
+ if (enabled)
+ ret = max77686_rtc_start_alarm(info);
+ else
+ ret = max77686_rtc_stop_alarm(info);
+ mutex_unlock(&info->lock);
+
+ return ret;
+}
+
+static irqreturn_t max77686_rtc_alarm_irq(int irq, void *data)
+{
+ struct max77686_rtc_info *info = data;
+
+ dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max77686_rtc_ops = {
+ .read_time = max77686_rtc_read_time,
+ .set_time = max77686_rtc_set_time,
+ .read_alarm = max77686_rtc_read_alarm,
+ .set_alarm = max77686_rtc_set_alarm,
+ .alarm_irq_enable = max77686_rtc_alarm_irq_enable,
+};
+
+#ifdef MAX77686_RTC_WTSR_SMPL
+static void max77686_rtc_enable_wtsr(struct max77686_rtc_info *info, bool enable)
+{
+ int ret;
+ unsigned int val, mask;
+
+ if (enable)
+ val = (1 << WTSR_EN_SHIFT) | (3 << WTSRT_SHIFT);
+ else
+ val = 0;
+
+ mask = WTSR_EN_MASK | WTSRT_MASK;
+
+ dev_info(info->dev, "%s: %s WTSR\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_WTSR_SMPL_CNTL, mask, val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max77686_rtc_update(info, MAX77686_RTC_WRITE);
+}
+
+static void max77686_rtc_enable_smpl(struct max77686_rtc_info *info, bool enable)
+{
+ int ret;
+ unsigned int val, mask;
+
+ if (enable)
+ val = (1 << SMPL_EN_SHIFT) | (0 << SMPLT_SHIFT);
+ else
+ val = 0;
+
+ mask = SMPL_EN_MASK | SMPLT_MASK;
+
+ dev_info(info->dev, "%s: %s SMPL\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_WTSR_SMPL_CNTL, mask, val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max77686_rtc_update(info, MAX77686_RTC_WRITE);
+
+ val = 0;
+ regmap_read(info->max77686->rtc_regmap, MAX77686_WTSR_SMPL_CNTL, &val);
+ pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+}
+#endif /* MAX77686_RTC_WTSR_SMPL */
+
+static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
+{
+ u8 data[2];
+ int ret;
+
+ /* Set RTC control register : Binary mode, 24hour mdoe */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ info->rtc_24hr_mode = 1;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+ return ret;
+}
+
+static struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max77686_rtc_probe(struct platform_device *pdev)
+{
+ struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
+ struct max77686_rtc_info *info;
+ int ret, virq;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ info = kzalloc(sizeof(struct max77686_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+ info->max77686 = max77686;
+ info->rtc = max77686->rtc;
+ info->max77686->rtc_regmap = regmap_init_i2c(info->max77686->rtc,
+ &max77686_rtc_regmap_config);
+ if (IS_ERR(info->max77686->rtc_regmap)) {
+ ret = PTR_ERR(info->max77686->rtc_regmap);
+ dev_err(info->max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(info);
+ return ret;
+ }
+ platform_set_drvdata(pdev, info);
+
+ ret = max77686_rtc_init_reg(info);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
+ goto err_rtc;
+ }
+
+#ifdef MAX77686_RTC_WTSR_SMPL
+ max77686_rtc_enable_wtsr(info, true);
+ max77686_rtc_enable_smpl(info, true);
+#endif
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = rtc_device_register("max77686-rtc", &pdev->dev,
+ &max77686_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev)) {
+ dev_info(&pdev->dev, "%s: fail\n", __func__);
+
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ if (ret == 0)
+ ret = -EINVAL;
+ goto err_rtc;
+ }
+ virq = irq_create_mapping(max77686->irq_domain, MAX77686_RTCIRQ_RTCA1);
+ if (!virq)
+ goto err_rtc;
+ info->virq = virq;
+
+ ret = request_threaded_irq(virq, NULL, max77686_rtc_alarm_irq, 0,
+ "rtc-alarm0", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->virq, ret);
+ goto err_rtc;
+ }
+
+ goto out;
+err_rtc:
+ kfree(info);
+ return ret;
+out:
+ return ret;
+}
+
+static int max77686_rtc_remove(struct platform_device *pdev)
+{
+ struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info) {
+ free_irq(info->virq, info);
+ rtc_device_unregister(info->rtc_dev);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static void max77686_rtc_shutdown(struct platform_device *pdev)
+{
+#ifdef MAX77686_RTC_WTSR_SMPL
+ struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+ int i;
+ u8 val = 0;
+
+ for (i = 0; i < 3; i++) {
+ max77686_rtc_enable_wtsr(info, false);
+ regmap_read(info->max77686->rtc_regmap, MAX77686_WTSR_SMPL_CNTL, &val);
+ pr_info("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
+ if (val & WTSR_EN_MASK)
+ pr_emerg("%s: fail to disable WTSR\n", __func__);
+ else {
+ pr_info("%s: success to disable WTSR\n", __func__);
+ break;
+ }
+ }
+
+ /* Disable SMPL when power off */
+ max77686_rtc_enable_smpl(info, false);
+#endif /* MAX77686_RTC_WTSR_SMPL */
+}
+
+static const struct platform_device_id rtc_id[] = {
+ { "max77686-rtc", 0 },
+ {},
+};
+
+static struct platform_driver max77686_rtc_driver = {
+ .driver = {
+ .name = "max77686-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max77686_rtc_probe,
+ .remove = max77686_rtc_remove,
+ .shutdown = max77686_rtc_shutdown,
+ .id_table = rtc_id,
+};
+
+static int __init max77686_rtc_init(void)
+{
+ return platform_driver_register(&max77686_rtc_driver);
+}
+module_init(max77686_rtc_init);
+
+static void __exit max77686_rtc_exit(void)
+{
+ platform_driver_unregister(&max77686_rtc_driver);
+}
+module_exit(max77686_rtc_exit);
+
+MODULE_DESCRIPTION("Maxim MAX77686 RTC driver");
+MODULE_AUTHOR("<woong.byun@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index e094ffa434f8..31ca8faf9f05 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -176,7 +176,7 @@ static const struct rtc_class_ops max8907_rtc_ops = {
.set_alarm = max8907_rtc_set_alarm,
};
-static int __devinit max8907_rtc_probe(struct platform_device *pdev)
+static int max8907_rtc_probe(struct platform_device *pdev)
{
struct max8907 *max8907 = dev_get_drvdata(pdev->dev.parent);
struct max8907_rtc *rtc;
@@ -205,8 +205,9 @@ static int __devinit max8907_rtc_probe(struct platform_device *pdev)
goto err_unregister;
}
- ret = request_threaded_irq(rtc->irq, NULL, max8907_irq_handler,
- IRQF_ONESHOT, "max8907-alarm0", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ max8907_irq_handler,
+ IRQF_ONESHOT, "max8907-alarm0", rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request IRQ%d: %d\n",
rtc->irq, ret);
@@ -220,11 +221,10 @@ err_unregister:
return ret;
}
-static int __devexit max8907_rtc_remove(struct platform_device *pdev)
+static int max8907_rtc_remove(struct platform_device *pdev)
{
struct max8907_rtc *rtc = platform_get_drvdata(pdev);
- free_irq(rtc->irq, rtc);
rtc_device_unregister(rtc->rtc_dev);
return 0;
@@ -236,7 +236,7 @@ static struct platform_driver max8907_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = max8907_rtc_probe,
- .remove = __devexit_p(max8907_rtc_remove),
+ .remove = max8907_rtc_remove,
};
module_platform_driver(max8907_rtc_driver);
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 34e4349611db..a0c8265646d2 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -247,7 +247,7 @@ static const struct rtc_class_ops max8925_rtc_ops = {
.set_alarm = max8925_rtc_set_alarm,
};
-static int __devinit max8925_rtc_probe(struct platform_device *pdev)
+static int max8925_rtc_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_rtc_info *info;
@@ -292,7 +292,7 @@ out_irq:
return ret;
}
-static int __devexit max8925_rtc_remove(struct platform_device *pdev)
+static int max8925_rtc_remove(struct platform_device *pdev)
{
struct max8925_rtc_info *info = platform_get_drvdata(pdev);
@@ -334,7 +334,7 @@ static struct platform_driver max8925_rtc_driver = {
.pm = &max8925_rtc_pm_ops,
},
.probe = max8925_rtc_probe,
- .remove = __devexit_p(max8925_rtc_remove),
+ .remove = max8925_rtc_remove,
};
module_platform_driver(max8925_rtc_driver);
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
new file mode 100644
index 000000000000..00e505b6bee3
--- /dev/null
+++ b/drivers/rtc/rtc-max8997.c
@@ -0,0 +1,552 @@
+/*
+ * RTC driver for Maxim MAX8997
+ *
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ *
+ * based on rtc-max8998.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max8997-private.h>
+#include <linux/irqdomain.h>
+
+/* Module parameter for WTSR function control */
+static int wtsr_en = 1;
+module_param(wtsr_en, int, 0444);
+MODULE_PARM_DESC(wtsr_en, "Wachdog Timeout & Sofware Reset (default=on)");
+/* Module parameter for SMPL function control */
+static int smpl_en = 1;
+module_param(smpl_en, int, 0444);
+MODULE_PARM_DESC(smpl_en, "Sudden Momentary Power Loss (default=on)");
+
+/* RTC Control Register */
+#define BCD_EN_SHIFT 0
+#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define MODEL24_SHIFT 1
+#define MODEL24_MASK (1 << MODEL24_SHIFT)
+/* RTC Update Register1 */
+#define RTC_UDR_SHIFT 0
+#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+/* WTSR and SMPL Register */
+#define WTSRT_SHIFT 0
+#define SMPLT_SHIFT 2
+#define WTSR_EN_SHIFT 6
+#define SMPL_EN_SHIFT 7
+#define WTSRT_MASK (3 << WTSRT_SHIFT)
+#define SMPLT_MASK (3 << SMPLT_SHIFT)
+#define WTSR_EN_MASK (1 << WTSR_EN_SHIFT)
+#define SMPL_EN_MASK (1 << SMPL_EN_SHIFT)
+/* RTC Hour register */
+#define HOUR_PM_SHIFT 6
+#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+/* RTC Alarm Enable */
+#define ALARM_ENABLE_SHIFT 7
+#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_DATE,
+ RTC_NR_TIME
+};
+
+struct max8997_rtc_info {
+ struct device *dev;
+ struct max8997_dev *max8997;
+ struct i2c_client *rtc;
+ struct rtc_device *rtc_dev;
+ struct mutex lock;
+ int virq;
+ int rtc_24hr_mode;
+};
+
+static void max8997_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
+ int rtc_24hr_mode)
+{
+ tm->tm_sec = data[RTC_SEC] & 0x7f;
+ tm->tm_min = data[RTC_MIN] & 0x7f;
+ if (rtc_24hr_mode)
+ tm->tm_hour = data[RTC_HOUR] & 0x1f;
+ else {
+ tm->tm_hour = data[RTC_HOUR] & 0x0f;
+ if (data[RTC_HOUR] & HOUR_PM_MASK)
+ tm->tm_hour += 12;
+ }
+
+ tm->tm_wday = fls(data[RTC_WEEKDAY] & 0x7f) - 1;
+ tm->tm_mday = data[RTC_DATE] & 0x1f;
+ tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+ tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+}
+
+static int max8997_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = tm->tm_sec;
+ data[RTC_MIN] = tm->tm_min;
+ data[RTC_HOUR] = tm->tm_hour;
+ data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+ data[RTC_DATE] = tm->tm_mday;
+ data[RTC_MONTH] = tm->tm_mon + 1;
+ data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ;
+
+ if (tm->tm_year < 100) {
+ pr_warn("%s: MAX8997 RTC cannot handle the year %d."
+ "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int max8997_rtc_set_update_reg(struct max8997_rtc_info *info)
+{
+ int ret;
+
+ ret = max8997_write_reg(info->rtc, MAX8997_RTC_UPDATE1,
+ RTC_UDR_MASK);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to write update reg(%d)\n",
+ __func__, ret);
+ else {
+ /* Minimum 16ms delay required before RTC update.
+ * Otherwise, we may read and update based on out-of-date
+ * value */
+ msleep(20);
+ }
+
+ return ret;
+}
+
+static int max8997_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ mutex_lock(&info->lock);
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_SEC, RTC_NR_TIME, data);
+ mutex_unlock(&info->lock);
+
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
+ ret);
+ return ret;
+ }
+
+ max8997_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+
+ return rtc_valid_tm(tm);
+}
+
+static int max8997_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max8997_rtc_tm_to_data(tm, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_SEC, RTC_NR_TIME, data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max8997_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ u8 val;
+ int i, ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ max8997_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+
+ alrm->enabled = 0;
+ for (i = 0; i < RTC_NR_TIME; i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
+ }
+
+ alrm->pending = 0;
+ ret = max8997_read_reg(info->max8997->i2c, MAX8997_REG_STATUS1, &val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ if (val & (1 << 4)) /* RTCA1 */
+ alrm->pending = 1;
+
+out:
+ mutex_unlock(&info->lock);
+ return 0;
+}
+
+static int max8997_rtc_stop_alarm(struct max8997_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret, i;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ for (i = 0; i < RTC_NR_TIME; i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ return ret;
+}
+
+static int max8997_rtc_start_alarm(struct max8997_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_YEAR] & 0x7f)
+ data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ return ret;
+}
+static int max8997_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max8997_rtc_tm_to_data(&alrm->time, data);
+ if (ret < 0)
+ return ret;
+
+ dev_info(info->dev, "%s: %d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ data[RTC_YEAR] + 2000, data[RTC_MONTH], data[RTC_DATE],
+ data[RTC_HOUR], data[RTC_MIN], data[RTC_SEC]);
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_rtc_stop_alarm(info);
+ if (ret < 0)
+ goto out;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+ if (ret < 0)
+ goto out;
+
+ if (alrm->enabled)
+ ret = max8997_rtc_start_alarm(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max8997_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&info->lock);
+ if (enabled)
+ ret = max8997_rtc_start_alarm(info);
+ else
+ ret = max8997_rtc_stop_alarm(info);
+ mutex_unlock(&info->lock);
+
+ return ret;
+}
+
+static irqreturn_t max8997_rtc_alarm_irq(int irq, void *data)
+{
+ struct max8997_rtc_info *info = data;
+
+ dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max8997_rtc_ops = {
+ .read_time = max8997_rtc_read_time,
+ .set_time = max8997_rtc_set_time,
+ .read_alarm = max8997_rtc_read_alarm,
+ .set_alarm = max8997_rtc_set_alarm,
+ .alarm_irq_enable = max8997_rtc_alarm_irq_enable,
+};
+
+static void max8997_rtc_enable_wtsr(struct max8997_rtc_info *info, bool enable)
+{
+ int ret;
+ u8 val, mask;
+
+ if (!wtsr_en)
+ return;
+
+ if (enable)
+ val = (1 << WTSR_EN_SHIFT) | (3 << WTSRT_SHIFT);
+ else
+ val = 0;
+
+ mask = WTSR_EN_MASK | WTSRT_MASK;
+
+ dev_info(info->dev, "%s: %s WTSR\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = max8997_update_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, val, mask);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max8997_rtc_set_update_reg(info);
+}
+
+static void max8997_rtc_enable_smpl(struct max8997_rtc_info *info, bool enable)
+{
+ int ret;
+ u8 val, mask;
+
+ if (!smpl_en)
+ return;
+
+ if (enable)
+ val = (1 << SMPL_EN_SHIFT) | (0 << SMPLT_SHIFT);
+ else
+ val = 0;
+
+ mask = SMPL_EN_MASK | SMPLT_MASK;
+
+ dev_info(info->dev, "%s: %s SMPL\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = max8997_update_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, val, mask);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max8997_rtc_set_update_reg(info);
+
+ val = 0;
+ max8997_read_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, &val);
+ pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+}
+
+static int max8997_rtc_init_reg(struct max8997_rtc_info *info)
+{
+ u8 data[2];
+ int ret;
+
+ /* Set RTC control register : Binary mode, 24hour mdoe */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ info->rtc_24hr_mode = 1;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_CTRLMASK, 2, data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+ return ret;
+}
+
+static int max8997_rtc_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_rtc_info *info;
+ int ret, virq;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_rtc_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+ info->max8997 = max8997;
+ info->rtc = max8997->rtc;
+
+ platform_set_drvdata(pdev, info);
+
+ ret = max8997_rtc_init_reg(info);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
+ return ret;
+ }
+
+ max8997_rtc_enable_wtsr(info, true);
+ max8997_rtc_enable_smpl(info, true);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = rtc_device_register("max8997-rtc", &pdev->dev,
+ &max8997_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev)) {
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ return ret;
+ }
+
+ virq = irq_create_mapping(max8997->irq_domain, MAX8997_PMICIRQ_RTCA1);
+ if (!virq) {
+ dev_err(&pdev->dev, "Failed to create mapping alarm IRQ\n");
+ goto err_out;
+ }
+ info->virq = virq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max8997_rtc_alarm_irq, 0,
+ "rtc-alarm0", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->virq, ret);
+ goto err_out;
+ }
+
+ return ret;
+
+err_out:
+ rtc_device_unregister(info->rtc_dev);
+ return ret;
+}
+
+static int max8997_rtc_remove(struct platform_device *pdev)
+{
+ struct max8997_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info)
+ rtc_device_unregister(info->rtc_dev);
+
+ return 0;
+}
+
+static void max8997_rtc_shutdown(struct platform_device *pdev)
+{
+ struct max8997_rtc_info *info = platform_get_drvdata(pdev);
+
+ max8997_rtc_enable_wtsr(info, false);
+ max8997_rtc_enable_smpl(info, false);
+}
+
+static const struct platform_device_id rtc_id[] = {
+ { "max8997-rtc", 0 },
+ {},
+};
+
+static struct platform_driver max8997_rtc_driver = {
+ .driver = {
+ .name = "max8997-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_rtc_probe,
+ .remove = max8997_rtc_remove,
+ .shutdown = max8997_rtc_shutdown,
+ .id_table = rtc_id,
+};
+
+module_platform_driver(max8997_rtc_driver);
+
+MODULE_DESCRIPTION("Maxim MAX8997 RTC driver");
+MODULE_AUTHOR("<ms925.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 7196f438c089..8f234a075e8f 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -249,7 +249,7 @@ static const struct rtc_class_ops max8998_rtc_ops = {
.alarm_irq_enable = max8998_rtc_alarm_irq_enable,
};
-static int __devinit max8998_rtc_probe(struct platform_device *pdev)
+static int max8998_rtc_probe(struct platform_device *pdev)
{
struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev);
@@ -298,7 +298,7 @@ out_rtc:
return ret;
}
-static int __devexit max8998_rtc_remove(struct platform_device *pdev)
+static int max8998_rtc_remove(struct platform_device *pdev)
{
struct max8998_rtc_info *info = platform_get_drvdata(pdev);
@@ -323,7 +323,7 @@ static struct platform_driver max8998_rtc_driver = {
.owner = THIS_MODULE,
},
.probe = max8998_rtc_probe,
- .remove = __devexit_p(max8998_rtc_remove),
+ .remove = max8998_rtc_remove,
.id_table = max8998_rtc_id,
};
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 029e421baaed..bdcc60830aec 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rtc.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/io.h>
@@ -306,7 +307,7 @@ static const struct rtc_class_ops mpc5200_rtc_ops = {
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
};
-static int __devinit mpc5121_rtc_probe(struct platform_device *op)
+static int mpc5121_rtc_probe(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc;
int err = 0;
@@ -382,7 +383,7 @@ out_free:
return err;
}
-static int __devexit mpc5121_rtc_remove(struct platform_device *op)
+static int mpc5121_rtc_remove(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
@@ -403,20 +404,22 @@ static int __devexit mpc5121_rtc_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
+#ifdef CONFIG_OF
+static struct of_device_id mpc5121_rtc_match[] = {
{ .compatible = "fsl,mpc5121-rtc", },
{ .compatible = "fsl,mpc5200-rtc", },
{},
};
+#endif
static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
.owner = THIS_MODULE,
- .of_match_table = mpc5121_rtc_match,
+ .of_match_table = of_match_ptr(mpc5121_rtc_match),
},
.probe = mpc5121_rtc_probe,
- .remove = __devexit_p(mpc5121_rtc_remove),
+ .remove = mpc5121_rtc_remove,
};
module_platform_driver(mpc5121_rtc_driver);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index f51719bf4a75..578baf9d9725 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -322,8 +322,8 @@ static irqreturn_t mrst_rtc_irq(int irq, void *p)
return IRQ_NONE;
}
-static int __devinit
-vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
+static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
+ int rtc_irq)
{
int retval = 0;
unsigned char rtc_control;
@@ -394,7 +394,7 @@ static void rtc_mrst_do_shutdown(void)
spin_unlock_irq(&rtc_lock);
}
-static void __devexit rtc_mrst_do_remove(struct device *dev)
+static void rtc_mrst_do_remove(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
struct resource *iomem;
@@ -503,14 +503,14 @@ static inline int mrst_poweroff(struct device *dev)
#endif
-static int __devinit vrtc_mrst_platform_probe(struct platform_device *pdev)
+static int vrtc_mrst_platform_probe(struct platform_device *pdev)
{
return vrtc_mrst_do_probe(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0),
platform_get_irq(pdev, 0));
}
-static int __devexit vrtc_mrst_platform_remove(struct platform_device *pdev)
+static int vrtc_mrst_platform_remove(struct platform_device *pdev)
{
rtc_mrst_do_remove(&pdev->dev);
return 0;
@@ -528,7 +528,7 @@ MODULE_ALIAS("platform:vrtc_mrst");
static struct platform_driver vrtc_mrst_platform_driver = {
.probe = vrtc_mrst_platform_probe,
- .remove = __devexit_p(vrtc_mrst_platform_remove),
+ .remove = vrtc_mrst_platform_remove,
.shutdown = vrtc_mrst_platform_shutdown,
.driver = {
.name = (char *) driver_name,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index ebc1649d45d6..57233c885998 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -215,7 +215,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
.alarm_irq_enable = mv_rtc_alarm_irq_enable,
};
-static int __devinit mv_rtc_probe(struct platform_device *pdev)
+static int mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 7304139934aa..1c3ef7289565 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -368,7 +368,7 @@ static struct rtc_class_ops mxc_rtc_ops = {
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
-static int __devinit mxc_rtc_probe(struct platform_device *pdev)
+static int mxc_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_device *rtc;
@@ -460,7 +460,7 @@ exit_free_pdata:
return ret;
}
-static int __devexit mxc_rtc_remove(struct platform_device *pdev)
+static int mxc_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -509,7 +509,7 @@ static struct platform_driver mxc_rtc_driver = {
},
.id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
- .remove = __devexit_p(mxc_rtc_remove),
+ .remove = mxc_rtc_remove,
};
module_platform_driver(mxc_rtc_driver)
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index b79010987d1e..a63680850fef 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -222,7 +222,7 @@ static struct rtc_class_ops nuc900_rtc_ops = {
.alarm_irq_enable = nuc900_alarm_irq_enable,
};
-static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
+static int nuc900_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct nuc900_rtc *nuc900_rtc;
@@ -284,7 +284,7 @@ fail1: kfree(nuc900_rtc);
return err;
}
-static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
+static int nuc900_rtc_remove(struct platform_device *pdev)
{
struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
struct resource *res;
@@ -304,7 +304,7 @@ static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
}
static struct platform_driver nuc900_rtc_driver = {
- .remove = __devexit_p(nuc900_rtc_remove),
+ .remove = nuc900_rtc_remove,
.driver = {
.name = "nuc900-rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 0b614e32653d..600971407aac 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -20,6 +20,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -38,6 +41,8 @@
* the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
*/
+#define DRIVER_NAME "omap_rtc"
+
#define OMAP_RTC_BASE 0xfffb4800
/* RTC registers */
@@ -64,6 +69,9 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_KICK0_REG 0x6c
+#define OMAP_RTC_KICK1_REG 0x70
+
/* OMAP_RTC_CTRL_REG bit fields: */
#define OMAP_RTC_CTRL_SPLIT (1<<7)
#define OMAP_RTC_CTRL_DISABLE (1<<6)
@@ -88,10 +96,18 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+/* OMAP_RTC_KICKER values */
+#define KICK0_VALUE 0x83e70b13
+#define KICK1_VALUE 0x95a4f1e0
+
+#define OMAP_RTC_HAS_KICKER 0x1
+
static void __iomem *rtc_base;
-#define rtc_read(addr) __raw_readb(rtc_base + (addr))
-#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
+#define rtc_read(addr) readb(rtc_base + (addr))
+#define rtc_write(val, addr) writeb(val, rtc_base + (addr))
+
+#define rtc_writel(val, addr) writel(val, rtc_base + (addr))
/* we rely on the rtc framework to handle locking (rtc->ops_lock),
@@ -285,11 +301,38 @@ static struct rtc_class_ops omap_rtc_ops = {
static int omap_rtc_alarm;
static int omap_rtc_timer;
+#define OMAP_RTC_DATA_DA830_IDX 1
+
+static struct platform_device_id omap_rtc_devtype[] = {
+ {
+ .name = DRIVER_NAME,
+ }, {
+ .name = "da830-rtc",
+ .driver_data = OMAP_RTC_HAS_KICKER,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, omap_rtc_devtype);
+
+static const struct of_device_id omap_rtc_of_match[] = {
+ { .compatible = "ti,da830-rtc",
+ .data = &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
+
static int __init omap_rtc_probe(struct platform_device *pdev)
{
struct resource *res, *mem;
struct rtc_device *rtc;
u8 reg, new_ctrl;
+ const struct platform_device_id *id_entry;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(omap_rtc_of_match, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
omap_rtc_timer = platform_get_irq(pdev, 0);
if (omap_rtc_timer <= 0) {
@@ -322,6 +365,16 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
goto fail;
}
+ /* Enable the clock/module so that we can access the registers */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ id_entry = platform_get_device_id(pdev);
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER)) {
+ rtc_writel(KICK0_VALUE, OMAP_RTC_KICK0_REG);
+ rtc_writel(KICK1_VALUE, OMAP_RTC_KICK1_REG);
+ }
+
rtc = rtc_device_register(pdev->name, &pdev->dev,
&omap_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -398,6 +451,10 @@ fail2:
fail1:
rtc_device_unregister(rtc);
fail0:
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER))
+ rtc_writel(0, OMAP_RTC_KICK0_REG);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
iounmap(rtc_base);
fail:
release_mem_region(mem->start, resource_size(mem));
@@ -408,6 +465,8 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
struct resource *mem = dev_get_drvdata(&rtc->dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
device_init_wakeup(&pdev->dev, 0);
@@ -420,6 +479,13 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
free_irq(omap_rtc_alarm, rtc);
rtc_device_unregister(rtc);
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER))
+ rtc_writel(0, OMAP_RTC_KICK0_REG);
+
+ /* Disable the clock/module */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
iounmap(rtc_base);
release_mem_region(mem->start, resource_size(mem));
return 0;
@@ -442,11 +508,17 @@ static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state)
else
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+ /* Disable the clock/module */
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
static int omap_rtc_resume(struct platform_device *pdev)
{
+ /* Enable the clock/module so that we can access the registers */
+ pm_runtime_get_sync(&pdev->dev);
+
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(omap_rtc_alarm);
else
@@ -471,9 +543,11 @@ static struct platform_driver omap_rtc_driver = {
.resume = omap_rtc_resume,
.shutdown = omap_rtc_shutdown,
.driver = {
- .name = "omap_rtc",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_rtc_of_match),
},
+ .id_table = omap_rtc_devtype,
};
static int __init rtc_init(void)
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index cd4f198cc2ef..e0019cd0bf71 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -139,7 +139,7 @@ static const struct rtc_class_ops pcap_rtc_ops = {
.alarm_irq_enable = pcap_rtc_alarm_irq_enable,
};
-static int __devinit pcap_rtc_probe(struct platform_device *pdev)
+static int pcap_rtc_probe(struct platform_device *pdev)
{
struct pcap_rtc *pcap_rtc;
int timer_irq, alarm_irq;
@@ -183,7 +183,7 @@ fail_rtc:
return err;
}
-static int __devexit pcap_rtc_remove(struct platform_device *pdev)
+static int pcap_rtc_remove(struct platform_device *pdev)
{
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
@@ -196,7 +196,7 @@ static int __devexit pcap_rtc_remove(struct platform_device *pdev)
}
static struct platform_driver pcap_rtc_driver = {
- .remove = __devexit_p(pcap_rtc_remove),
+ .remove = pcap_rtc_remove,
.driver = {
.name = "pcap-rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 13e4df63974f..02b742afa761 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -219,7 +219,7 @@ static const struct rtc_class_ops pcf2123_rtc_ops = {
.set_time = pcf2123_rtc_set_time,
};
-static int __devinit pcf2123_probe(struct spi_device *spi)
+static int pcf2123_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
struct pcf2123_plat_data *pdata;
@@ -319,7 +319,7 @@ kfree_exit:
return ret;
}
-static int __devexit pcf2123_remove(struct spi_device *spi)
+static int pcf2123_remove(struct spi_device *spi)
{
struct pcf2123_plat_data *pdata = spi->dev.platform_data;
int i;
@@ -345,7 +345,7 @@ static struct spi_driver pcf2123_driver = {
.owner = THIS_MODULE,
},
.probe = pcf2123_probe,
- .remove = __devexit_p(pcf2123_remove),
+ .remove = pcf2123_remove,
};
module_spi_driver(pcf2123_driver);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index a20202f9ee57..e9f3135d305f 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -248,7 +248,7 @@ static void pcf50633_rtc_irq(int irq, void *data)
rtc->alarm_pending = 1;
}
-static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
+static int pcf50633_rtc_probe(struct platform_device *pdev)
{
struct pcf50633_rtc *rtc;
@@ -272,7 +272,7 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_rtc_remove(struct platform_device *pdev)
+static int pcf50633_rtc_remove(struct platform_device *pdev)
{
struct pcf50633_rtc *rtc;
@@ -291,7 +291,7 @@ static struct platform_driver pcf50633_rtc_driver = {
.name = "pcf50633-rtc",
},
.probe = pcf50633_rtc_probe,
- .remove = __devexit_p(pcf50633_rtc_remove),
+ .remove = pcf50633_rtc_remove,
};
module_platform_driver(pcf50633_rtc_driver);
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
new file mode 100644
index 000000000000..889e3160e701
--- /dev/null
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+
+#define DRIVER_NAME "rtc-pcf8523"
+
+#define REG_CONTROL1 0x00
+#define REG_CONTROL1_CAP_SEL (1 << 7)
+#define REG_CONTROL1_STOP (1 << 5)
+
+#define REG_CONTROL3 0x02
+#define REG_CONTROL3_PM_BLD (1 << 7) /* battery low detection disabled */
+#define REG_CONTROL3_PM_VDD (1 << 6) /* switch-over disabled */
+#define REG_CONTROL3_PM_DSM (1 << 5) /* direct switching mode */
+#define REG_CONTROL3_PM_MASK 0xe0
+#define REG_CONTROL3_BLF (1 << 2) /* battery low bit, read-only */
+
+#define REG_SECONDS 0x03
+#define REG_SECONDS_OS (1 << 7)
+
+#define REG_MINUTES 0x04
+#define REG_HOURS 0x05
+#define REG_DAYS 0x06
+#define REG_WEEKDAYS 0x07
+#define REG_MONTHS 0x08
+#define REG_YEARS 0x09
+
+struct pcf8523 {
+ struct rtc_device *rtc;
+};
+
+static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
+{
+ struct i2c_msg msgs[2];
+ u8 value = 0;
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(reg);
+ msgs[0].buf = &reg;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(value);
+ msgs[1].buf = &value;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ *valuep = value;
+
+ return 0;
+}
+
+static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ u8 buffer[2] = { reg, value };
+ struct i2c_msg msg;
+ int err;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(buffer);
+ msg.buf = buffer;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ if (!high)
+ value &= ~REG_CONTROL1_CAP_SEL;
+ else
+ value |= REG_CONTROL1_CAP_SEL;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
+static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ value = (value & ~REG_CONTROL3_PM_MASK) | pm;
+
+ err = pcf8523_write(client, REG_CONTROL3, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_stop_rtc(struct i2c_client *client)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value |= REG_CONTROL1_STOP;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_start_rtc(struct i2c_client *client)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~REG_CONTROL1_STOP;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 start = REG_SECONDS, regs[7];
+ struct i2c_msg msgs[2];
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &start;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(regs);
+ msgs[1].buf = regs;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ if (regs[0] & REG_SECONDS_OS) {
+ /*
+ * If the oscillator was stopped, try to clear the flag. Upon
+ * power-up the flag is always set, but if we cannot clear it
+ * the oscillator isn't running properly for some reason. The
+ * sensible thing therefore is to return an error, signalling
+ * that the clock cannot be assumed to be correct.
+ */
+
+ regs[0] &= ~REG_SECONDS_OS;
+
+ err = pcf8523_write(client, REG_SECONDS, regs[0]);
+ if (err < 0)
+ return err;
+
+ err = pcf8523_read(client, REG_SECONDS, &regs[0]);
+ if (err < 0)
+ return err;
+
+ if (regs[0] & REG_SECONDS_OS)
+ return -EAGAIN;
+ }
+
+ tm->tm_sec = bcd2bin(regs[0] & 0x7f);
+ tm->tm_min = bcd2bin(regs[1] & 0x7f);
+ tm->tm_hour = bcd2bin(regs[2] & 0x3f);
+ tm->tm_mday = bcd2bin(regs[3] & 0x3f);
+ tm->tm_wday = regs[4] & 0x7;
+ tm->tm_mon = bcd2bin(regs[5] & 0x1f);
+ tm->tm_year = bcd2bin(regs[6]) + 100;
+
+ return rtc_valid_tm(tm);
+}
+
+static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msg;
+ u8 regs[8];
+ int err;
+
+ err = pcf8523_stop_rtc(client);
+ if (err < 0)
+ return err;
+
+ regs[0] = REG_SECONDS;
+ regs[1] = bin2bcd(tm->tm_sec);
+ regs[2] = bin2bcd(tm->tm_min);
+ regs[3] = bin2bcd(tm->tm_hour);
+ regs[4] = bin2bcd(tm->tm_mday);
+ regs[5] = tm->tm_wday;
+ regs[6] = bin2bcd(tm->tm_mon);
+ regs[7] = bin2bcd(tm->tm_year - 100);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(regs);
+ msg.buf = regs;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0) {
+ /*
+ * If the time cannot be set, restart the RTC anyway. Note
+ * that errors are ignored if the RTC cannot be started so
+ * that we have a chance to propagate the original error.
+ */
+ pcf8523_start_rtc(client);
+ return err;
+ }
+
+ return pcf8523_start_rtc(client);
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 value;
+ int ret = 0, err;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ if (value & REG_CONTROL3_BLF)
+ ret = 1;
+
+ if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8523_rtc_ioctl NULL
+#endif
+
+static const struct rtc_class_ops pcf8523_rtc_ops = {
+ .read_time = pcf8523_rtc_read_time,
+ .set_time = pcf8523_rtc_set_time,
+ .ioctl = pcf8523_rtc_ioctl,
+};
+
+static int pcf8523_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf8523 *pcf;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
+ if (!pcf)
+ return -ENOMEM;
+
+ err = pcf8523_select_capacitance(client, true);
+ if (err < 0)
+ return err;
+
+ err = pcf8523_set_pm(client, 0);
+ if (err < 0)
+ return err;
+
+ pcf->rtc = rtc_device_register(DRIVER_NAME, &client->dev,
+ &pcf8523_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pcf->rtc))
+ return PTR_ERR(pcf->rtc);
+
+ i2c_set_clientdata(client, pcf);
+
+ return 0;
+}
+
+static int pcf8523_remove(struct i2c_client *client)
+{
+ struct pcf8523 *pcf = i2c_get_clientdata(client);
+
+ rtc_device_unregister(pcf->rtc);
+
+ return 0;
+}
+
+static const struct i2c_device_id pcf8523_id[] = {
+ { "pcf8523", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8523_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcf8523_of_match[] = {
+ { .compatible = "nxp,pcf8523" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcf8523_of_match);
+#endif
+
+static struct i2c_driver pcf8523_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf8523_of_match),
+ },
+ .probe = pcf8523_probe,
+ .remove = pcf8523_remove,
+ .id_table = pcf8523_id,
+};
+module_i2c_driver(pcf8523_driver);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NXP PCF8523 RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 98e3a2b681e6..f7daf18a112e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -181,7 +181,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
__func__, err, data[0], data[1]);
return -EIO;
}
- };
+ }
return 0;
}
@@ -296,7 +296,7 @@ static const struct i2c_device_id pcf8563_id[] = {
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
#ifdef CONFIG_OF
-static const struct of_device_id pcf8563_of_match[] __devinitconst = {
+static const struct of_device_id pcf8563_of_match[] = {
{ .compatible = "nxp,pcf8563" },
{}
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 019ff3571168..5f97c61247d5 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -185,8 +185,8 @@ static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ctrl & (CTRL_STOP | CTRL_HOLD)) {
unsigned char new_ctrl = ctrl & ~(CTRL_STOP | CTRL_HOLD);
- printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
- ctrl, new_ctrl);
+ dev_warn(dev, "resetting control %02x -> %02x\n",
+ ctrl, new_ctrl);
if ((err = pcf8583_set_ctrl(client, &new_ctrl)) < 0)
return err;
@@ -294,7 +294,7 @@ exit_kfree:
return err;
}
-static int __devexit pcf8583_remove(struct i2c_client *client)
+static int pcf8583_remove(struct i2c_client *client)
{
struct pcf8583 *pcf8583 = i2c_get_clientdata(client);
@@ -316,7 +316,7 @@ static struct i2c_driver pcf8583_driver = {
.owner = THIS_MODULE,
},
.probe = pcf8583_probe,
- .remove = __devexit_p(pcf8583_remove),
+ .remove = pcf8583_remove,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 08378e3cc21c..8900ea784817 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -44,6 +44,7 @@
#define RTC_YMR 0x34 /* Year match register */
#define RTC_YLR 0x38 /* Year data load register */
+#define RTC_CR_EN (1 << 0) /* counter enable bit */
#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
@@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
struct rtc_class_ops *ops = &vendor->ops;
- unsigned long time;
+ unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
@@ -345,10 +346,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
+ data = readl(ldata->base + RTC_CR);
/* Enable the clockwatch on ST Variants */
if (vendor->clockwatch)
- writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
- ldata->base + RTC_CR);
+ data |= RTC_CR_CWEN;
+ else
+ data |= RTC_CR_EN;
+ writel(data, ldata->base + RTC_CR);
/*
* On ST PL031 variants, the RTC reset value does not provide correct
@@ -380,6 +384,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_irq;
}
+ device_init_wakeup(&adev->dev, 1);
+
return 0;
out_no_irq:
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index d00bd24342a3..f1a6557261f3 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -382,7 +382,7 @@ rtc_alarm_handled:
return IRQ_HANDLED;
}
-static int __devinit pm8xxx_rtc_probe(struct platform_device *pdev)
+static int pm8xxx_rtc_probe(struct platform_device *pdev)
{
int rc;
u8 ctrl_reg;
@@ -485,7 +485,7 @@ fail_rtc_enable:
return rc;
}
-static int __devexit pm8xxx_rtc_remove(struct platform_device *pdev)
+static int pm8xxx_rtc_remove(struct platform_device *pdev)
{
struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
@@ -524,7 +524,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops, pm8xxx_rtc_suspend, pm8xxx_rtc_resum
static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
- .remove = __devexit_p(pm8xxx_rtc_remove),
+ .remove = pm8xxx_rtc_remove,
.driver = {
.name = PM8XXX_RTC_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index ab0acaeb2371..0407e13d4de4 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
}
}
-static int __devexit puv3_rtc_remove(struct platform_device *dev)
+static int puv3_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
@@ -236,7 +236,7 @@ static int __devexit puv3_rtc_remove(struct platform_device *dev)
return 0;
}
-static int __devinit puv3_rtc_probe(struct platform_device *pdev)
+static int puv3_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
@@ -328,7 +328,7 @@ static int puv3_rtc_resume(struct platform_device *pdev)
static struct platform_driver puv3_rtc_driver = {
.probe = puv3_rtc_probe,
- .remove = __devexit_p(puv3_rtc_remove),
+ .remove = puv3_rtc_remove,
.suspend = puv3_rtc_suspend,
.resume = puv3_rtc_resume,
.driver = {
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index f771b2ee4b18..03c85ee719a7 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -62,6 +62,10 @@
#define RYxR_MONTH_S 5
#define RYxR_MONTH_MASK (0xf << RYxR_MONTH_S)
#define RYxR_DAY_MASK 0x1f
+#define RDxR_WOM_S 20
+#define RDxR_WOM_MASK (0x7 << RDxR_WOM_S)
+#define RDxR_DOW_S 17
+#define RDxR_DOW_MASK (0x7 << RDxR_DOW_S)
#define RDxR_HOUR_S 12
#define RDxR_HOUR_MASK (0x1f << RDxR_HOUR_S)
#define RDxR_MIN_S 6
@@ -91,6 +95,7 @@ struct pxa_rtc {
spinlock_t lock; /* Protects this structure */
};
+
static u32 ryxr_calc(struct rtc_time *tm)
{
return ((tm->tm_year + 1900) << RYxR_YEAR_S)
@@ -100,7 +105,10 @@ static u32 ryxr_calc(struct rtc_time *tm)
static u32 rdxr_calc(struct rtc_time *tm)
{
- return (tm->tm_hour << RDxR_HOUR_S) | (tm->tm_min << RDxR_MIN_S)
+ return ((((tm->tm_mday + 6) / 7) << RDxR_WOM_S) & RDxR_WOM_MASK)
+ | (((tm->tm_wday + 1) << RDxR_DOW_S) & RDxR_DOW_MASK)
+ | (tm->tm_hour << RDxR_HOUR_S)
+ | (tm->tm_min << RDxR_MIN_S)
| tm->tm_sec;
}
@@ -109,6 +117,7 @@ static void tm_calc(u32 rycr, u32 rdcr, struct rtc_time *tm)
tm->tm_year = ((rycr & RYxR_YEAR_MASK) >> RYxR_YEAR_S) - 1900;
tm->tm_mon = (((rycr & RYxR_MONTH_MASK) >> RYxR_MONTH_S)) - 1;
tm->tm_mday = (rycr & RYxR_DAY_MASK);
+ tm->tm_wday = ((rycr & RDxR_DOW_MASK) >> RDxR_DOW_S) - 1;
tm->tm_hour = (rdcr & RDxR_HOUR_MASK) >> RDxR_HOUR_S;
tm->tm_min = (rdcr & RDxR_MIN_MASK) >> RDxR_MIN_S;
tm->tm_sec = rdcr & RDxR_SEC_MASK;
@@ -300,8 +309,6 @@ static int pxa_rtc_proc(struct device *dev, struct seq_file *seq)
}
static const struct rtc_class_ops pxa_rtc_ops = {
- .open = pxa_rtc_open,
- .release = pxa_rtc_release,
.read_time = pxa_rtc_read_time,
.set_time = pxa_rtc_set_time,
.read_alarm = pxa_rtc_read_alarm,
@@ -341,7 +348,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
dev_err(dev, "No alarm IRQ resource defined\n");
goto err_ress;
}
-
+ pxa_rtc_open(dev);
ret = -ENOMEM;
pxa_rtc->base = ioremap(pxa_rtc->ress->start,
resource_size(pxa_rtc->ress));
@@ -387,6 +394,9 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev)
{
struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ pxa_rtc_release(dev);
+
rtc_device_unregister(pxa_rtc->rtc);
spin_lock_irq(&pxa_rtc->lock);
@@ -444,10 +454,7 @@ static struct platform_driver pxa_rtc_driver = {
static int __init pxa_rtc_init(void)
{
- if (cpu_is_pxa27x() || cpu_is_pxa3xx())
- return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe);
-
- return -ENODEV;
+ return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe);
}
static void __exit pxa_rtc_exit(void)
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 2c183ebff715..7726f4a4f2d0 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -119,7 +119,7 @@ static const struct rtc_class_ops r9701_rtc_ops = {
.set_time = r9701_set_datetime,
};
-static int __devinit r9701_probe(struct spi_device *spi)
+static int r9701_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
struct rtc_time dt;
@@ -164,7 +164,7 @@ static int __devinit r9701_probe(struct spi_device *spi)
return 0;
}
-static int __devexit r9701_remove(struct spi_device *spi)
+static int r9701_remove(struct spi_device *spi)
{
struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
@@ -178,7 +178,7 @@ static struct spi_driver r9701_driver = {
.owner = THIS_MODULE,
},
.probe = r9701_probe,
- .remove = __devexit_p(r9701_remove),
+ .remove = r9701_remove,
};
module_spi_driver(r9701_driver);
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index cdb140c29c56..eb3194d664a8 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -211,7 +211,7 @@ static const struct rtc_class_ops rc5t583_rtc_ops = {
.alarm_irq_enable = rc5t583_rtc_alarm_irq_enable,
};
-static int __devinit rc5t583_rtc_probe(struct platform_device *pdev)
+static int rc5t583_rtc_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_rtc *ricoh_rtc;
@@ -271,7 +271,7 @@ static int __devinit rc5t583_rtc_probe(struct platform_device *pdev)
* Disable rc5t583 RTC interrupts.
* Sets status flag to free.
*/
-static int __devexit rc5t583_rtc_remove(struct platform_device *pdev)
+static int rc5t583_rtc_remove(struct platform_device *pdev)
{
struct rc5t583_rtc *rc5t583_rtc = dev_get_drvdata(&pdev->dev);
@@ -317,7 +317,7 @@ static const struct dev_pm_ops rc5t583_rtc_pm_ops = {
static struct platform_driver rc5t583_rtc_driver = {
.probe = rc5t583_rtc_probe,
- .remove = __devexit_p(rc5t583_rtc_remove),
+ .remove = rc5t583_rtc_remove,
.driver = {
.owner = THIS_MODULE,
.name = "rtc-rc5t583",
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index e3ff179b99ca..d98ea5b759c8 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -39,6 +39,8 @@
* 1.13 Nobuhiro Iwamatsu: Updata driver.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/rtc.h>
@@ -352,8 +354,7 @@ static void rs5c313_check_xstp_bit(void)
tm.tm_year = 2000 - 1900;
rs5c313_rtc_set_time(NULL, &tm);
- printk(KERN_ERR "RICHO RS5C313: invalid value, resetting to "
- "1 Jan 2000\n");
+ pr_err("invalid value, resetting to 1 Jan 2000\n");
}
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
@@ -377,7 +378,7 @@ static int rs5c313_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rs5c313_rtc_remove(struct platform_device *pdev)
+static int rs5c313_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata( pdev );
@@ -392,7 +393,7 @@ static struct platform_driver rs5c313_rtc_platform_driver = {
.owner = THIS_MODULE,
},
.probe = rs5c313_rtc_probe,
- .remove = __devexit_p( rs5c313_rtc_remove ),
+ .remove = rs5c313_rtc_remove,
};
static int __init rs5c313_rtc_init(void)
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index fd5c7af04ae5..72ef10be8662 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -152,7 +152,7 @@ static const struct rtc_class_ops rs5c348_rtc_ops = {
static struct spi_driver rs5c348_driver;
-static int __devinit rs5c348_probe(struct spi_device *spi)
+static int rs5c348_probe(struct spi_device *spi)
{
int ret;
struct rtc_device *rtc;
@@ -218,7 +218,7 @@ static int __devinit rs5c348_probe(struct spi_device *spi)
return ret;
}
-static int __devexit rs5c348_remove(struct spi_device *spi)
+static int rs5c348_remove(struct spi_device *spi)
{
struct rs5c348_plat_data *pdata = spi->dev.platform_data;
struct rtc_device *rtc = pdata->rtc;
@@ -235,7 +235,7 @@ static struct spi_driver rs5c348_driver = {
.owner = THIS_MODULE,
},
.probe = rs5c348_probe,
- .remove = __devexit_p(rs5c348_remove),
+ .remove = rs5c348_remove,
};
module_spi_driver(rs5c348_driver);
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 76f565ae384d..581739f40097 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -311,8 +311,7 @@ static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
buf &= ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
- printk(KERN_WARNING "%s: can't update alarm\n",
- rs5c->rtc->name);
+ dev_warn(dev, "can't update alarm\n");
status = -EIO;
} else
rs5c->regs[RS5C_REG_CTRL1] = buf;
@@ -381,7 +380,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0) {
- pr_debug("%s: can't disable alarm\n", rs5c->rtc->name);
+ dev_dbg(dev, "can't disable alarm\n");
return -EIO;
}
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
@@ -395,7 +394,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
for (i = 0; i < sizeof(buf); i++) {
addr = RS5C_ADDR(RS5C_REG_ALARM_A_MIN + i);
if (i2c_smbus_write_byte_data(client, addr, buf[i]) < 0) {
- pr_debug("%s: can't set alarm time\n", rs5c->rtc->name);
+ dev_dbg(dev, "can't set alarm time\n");
return -EIO;
}
}
@@ -405,8 +404,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0)
- printk(KERN_WARNING "%s: can't enable alarm\n",
- rs5c->rtc->name);
+ dev_warn(dev, "can't enable alarm\n");
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
}
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 0fbe57b2f6d2..f8ee8ad7825e 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -385,8 +385,8 @@ static struct i2c_device_id rv3029c2_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
-static int __devinit
-rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int rv3029c2_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct rtc_device *rtc;
int rc = 0;
@@ -418,7 +418,7 @@ exit_unregister:
return rc;
}
-static int __devexit rv3029c2_remove(struct i2c_client *client)
+static int rv3029c2_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
@@ -432,7 +432,7 @@ static struct i2c_driver rv3029c2_driver = {
.name = "rtc-rv3029c2",
},
.probe = rv3029c2_probe,
- .remove = __devexit_p(rv3029c2_remove),
+ .remove = rv3029c2_remove,
.id_table = rv3029c2_id,
};
diff --git a/drivers/rtc/rtc-rx4581.c b/drivers/rtc/rtc-rx4581.c
new file mode 100644
index 000000000000..599ec73ec886
--- /dev/null
+++ b/drivers/rtc/rtc-rx4581.c
@@ -0,0 +1,314 @@
+/* drivers/rtc/rtc-rx4581.c
+ *
+ * written by Torben Hohn <torbenh@linutronix.de>
+ *
+ * Based on:
+ * drivers/rtc/rtc-max6902.c
+ *
+ * Copyright (C) 2006 8D Technologies inc.
+ * Copyright (C) 2004 Compulab Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for MAX6902 spi RTC
+ *
+ * and based on:
+ * drivers/rtc/rtc-rx8581.c
+ *
+ * An I2C driver for the Epson RX8581 RTC
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on: rtc-pcf8563.c (An I2C driver for the Philips PCF8563 RTC)
+ * Copyright 2005-06 Tower Technologies
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+
+#define RX4581_REG_SC 0x00 /* Second in BCD */
+#define RX4581_REG_MN 0x01 /* Minute in BCD */
+#define RX4581_REG_HR 0x02 /* Hour in BCD */
+#define RX4581_REG_DW 0x03 /* Day of Week */
+#define RX4581_REG_DM 0x04 /* Day of Month in BCD */
+#define RX4581_REG_MO 0x05 /* Month in BCD */
+#define RX4581_REG_YR 0x06 /* Year in BCD */
+#define RX4581_REG_RAM 0x07 /* RAM */
+#define RX4581_REG_AMN 0x08 /* Alarm Min in BCD*/
+#define RX4581_REG_AHR 0x09 /* Alarm Hour in BCD */
+#define RX4581_REG_ADM 0x0A
+#define RX4581_REG_ADW 0x0A
+#define RX4581_REG_TMR0 0x0B
+#define RX4581_REG_TMR1 0x0C
+#define RX4581_REG_EXT 0x0D /* Extension Register */
+#define RX4581_REG_FLAG 0x0E /* Flag Register */
+#define RX4581_REG_CTRL 0x0F /* Control Register */
+
+
+/* Flag Register bit definitions */
+#define RX4581_FLAG_UF 0x20 /* Update */
+#define RX4581_FLAG_TF 0x10 /* Timer */
+#define RX4581_FLAG_AF 0x08 /* Alarm */
+#define RX4581_FLAG_VLF 0x02 /* Voltage Low */
+
+/* Control Register bit definitions */
+#define RX4581_CTRL_UIE 0x20 /* Update Interrupt Enable */
+#define RX4581_CTRL_TIE 0x10 /* Timer Interrupt Enable */
+#define RX4581_CTRL_AIE 0x08 /* Alarm Interrupt Enable */
+#define RX4581_CTRL_STOP 0x02 /* STOP bit */
+#define RX4581_CTRL_RESET 0x01 /* RESET bit */
+
+static int rx4581_set_reg(struct device *dev, unsigned char address,
+ unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ /* high nibble must be '0' to write */
+ buf[0] = address & 0x0f;
+ buf[1] = data;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int rx4581_get_reg(struct device *dev, unsigned char address,
+ unsigned char *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ /* Set MSB to indicate read */
+ *data = address | 0x80;
+
+ return spi_write_then_read(spi, data, 1, data, 1);
+}
+
+/*
+ * In the routines that deal directly with the rx8581 hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int rx4581_get_datetime(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char date[7];
+ unsigned char data;
+ int err;
+
+ /* First we ensure that the "update flag" is not set, we read the
+ * time and date then re-read the "update flag". If the update flag
+ * has been set, we know that the time has changed during the read so
+ * we repeat the whole process again.
+ */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read device flags\n");
+ return -EIO;
+ }
+
+ do {
+ /* If update flag set, clear it */
+ if (data & RX4581_FLAG_UF) {
+ err = rx4581_set_reg(dev,
+ RX4581_REG_FLAG, (data & ~RX4581_FLAG_UF));
+ if (err != 0) {
+ dev_err(dev, "Unable to write device "
+ "flags\n");
+ return -EIO;
+ }
+ }
+
+ /* Now read time and date */
+ date[0] = 0x80;
+ err = spi_write_then_read(spi, date, 1, date, 7);
+ if (err < 0) {
+ dev_err(dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ /* Check flag register */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read device flags\n");
+ return -EIO;
+ }
+ } while (data & RX4581_FLAG_UF);
+
+ if (data & RX4581_FLAG_VLF)
+ dev_info(dev,
+ "low voltage detected, date/time is not reliable.\n");
+
+ dev_dbg(dev,
+ "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
+ "wday=%02x, mday=%02x, mon=%02x, year=%02x\n",
+ __func__,
+ date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
+
+ tm->tm_sec = bcd2bin(date[RX4581_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(date[RX4581_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(date[RX4581_REG_HR] & 0x3F); /* rtc hr 0-23 */
+ tm->tm_wday = ilog2(date[RX4581_REG_DW] & 0x7F);
+ tm->tm_mday = bcd2bin(date[RX4581_REG_DM] & 0x3F);
+ tm->tm_mon = bcd2bin(date[RX4581_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
+ tm->tm_year = bcd2bin(date[RX4581_REG_YR]);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100; /* assume we are in 1970...2069 */
+
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ err = rtc_valid_tm(tm);
+ if (err < 0)
+ dev_err(dev, "retrieved date/time is not valid.\n");
+
+ return err;
+}
+
+static int rx4581_set_datetime(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int err;
+ unsigned char buf[8], data;
+
+ dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ buf[0] = 0x00;
+ /* hours, minutes and seconds */
+ buf[RX4581_REG_SC+1] = bin2bcd(tm->tm_sec);
+ buf[RX4581_REG_MN+1] = bin2bcd(tm->tm_min);
+ buf[RX4581_REG_HR+1] = bin2bcd(tm->tm_hour);
+
+ buf[RX4581_REG_DM+1] = bin2bcd(tm->tm_mday);
+
+ /* month, 1 - 12 */
+ buf[RX4581_REG_MO+1] = bin2bcd(tm->tm_mon + 1);
+
+ /* year and century */
+ buf[RX4581_REG_YR+1] = bin2bcd(tm->tm_year % 100);
+ buf[RX4581_REG_DW+1] = (0x1 << tm->tm_wday);
+
+ /* Stop the clock */
+ err = rx4581_get_reg(dev, RX4581_REG_CTRL, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_CTRL,
+ (data | RX4581_CTRL_STOP));
+ if (err != 0) {
+ dev_err(dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ /* write register's data */
+ err = spi_write_then_read(spi, buf, 8, NULL, 0);
+ if (err != 0) {
+ dev_err(dev, "Unable to write to date registers\n");
+ return -EIO;
+ }
+
+ /* get VLF and clear it */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read flag register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_FLAG,
+ (data & ~(RX4581_FLAG_VLF)));
+ if (err != 0) {
+ dev_err(dev, "Unable to write flag register\n");
+ return -EIO;
+ }
+
+ /* Restart the clock */
+ err = rx4581_get_reg(dev, RX4581_REG_CTRL, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_CTRL,
+ (data & ~(RX4581_CTRL_STOP)));
+ if (err != 0) {
+ dev_err(dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops rx4581_rtc_ops = {
+ .read_time = rx4581_get_datetime,
+ .set_time = rx4581_set_datetime,
+};
+
+static int rx4581_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char tmp;
+ int res;
+
+ res = rx4581_get_reg(&spi->dev, RX4581_REG_SC, &tmp);
+ if (res != 0)
+ return res;
+
+ rtc = rtc_device_register("rx4581",
+ &spi->dev, &rx4581_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ dev_set_drvdata(&spi->dev, rtc);
+ return 0;
+}
+
+static int rx4581_remove(struct spi_device *spi)
+{
+ struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static const struct spi_device_id rx4581_id[] = {
+ { "rx4581", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, rx4581_id);
+
+static struct spi_driver rx4581_driver = {
+ .driver = {
+ .name = "rtc-rx4581",
+ .owner = THIS_MODULE,
+ },
+ .probe = rx4581_probe,
+ .remove = rx4581_remove,
+ .id_table = rx4581_id,
+};
+
+module_spi_driver(rx4581_driver);
+
+MODULE_DESCRIPTION("rx4581 spi RTC driver");
+MODULE_AUTHOR("Torben Hohn");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-rx4581");
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 0de902dc1cd5..0722d36b9c9a 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -534,8 +534,8 @@ static void rx8025_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_clock_adjust_ppb);
}
-static int __devinit rx8025_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8025_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rx8025_data *rx8025;
@@ -614,7 +614,7 @@ errout:
return err;
}
-static int __devexit rx8025_remove(struct i2c_client *client)
+static int rx8025_remove(struct i2c_client *client)
{
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
struct mutex *lock = &rx8025->rtc->ops_lock;
@@ -640,7 +640,7 @@ static struct i2c_driver rx8025_driver = {
.owner = THIS_MODULE,
},
.probe = rx8025_probe,
- .remove = __devexit_p(rx8025_remove),
+ .remove = rx8025_remove,
.id_table = rx8025_id,
};
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index d84825124a7a..b0c272658fa2 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -228,8 +228,8 @@ static const struct rtc_class_ops rx8581_rtc_ops = {
.set_time = rx8581_rtc_set_time,
};
-static int __devinit rx8581_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8581_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct rtc_device *rtc;
@@ -251,7 +251,7 @@ static int __devinit rx8581_probe(struct i2c_client *client,
return 0;
}
-static int __devexit rx8581_remove(struct i2c_client *client)
+static int rx8581_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
@@ -272,7 +272,7 @@ static struct i2c_driver rx8581_driver = {
.owner = THIS_MODULE,
},
.probe = rx8581_probe,
- .remove = __devexit_p(rx8581_remove),
+ .remove = rx8581_remove,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a7a2a998fa91..fb994e9ddc15 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -47,8 +47,6 @@ struct s3c_rtc_drv_data {
/* I have yet to find an S3C implementation with more than one
* of these rtc blocks in */
-static struct resource *s3c_rtc_mem;
-
static struct clk *rtc_clk;
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno = NO_IRQ;
@@ -117,7 +115,7 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, enabled);
+ dev_dbg(dev, "%s: aie=%d\n", __func__, enabled);
clk_enable(rtc_clk);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
@@ -205,7 +203,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
- pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
@@ -220,7 +218,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -261,7 +259,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
- pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alm_en,
1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -312,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int alrm_en;
clk_enable(rtc_clk);
- pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -335,7 +333,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(bin2bcd(tm->tm_hour), base + S3C2410_ALMHOUR);
}
- pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
+ dev_dbg(dev, "setting S3C2410_RTCALM to %08x\n", alrm_en);
writeb(alrm_en, base + S3C2410_RTCALM);
@@ -423,25 +421,17 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
clk_disable(rtc_clk);
}
-static int __devexit s3c_rtc_remove(struct platform_device *dev)
+static int s3c_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
- free_irq(s3c_rtc_alarmno, rtc);
- free_irq(s3c_rtc_tickno, rtc);
-
platform_set_drvdata(dev, NULL);
rtc_device_unregister(rtc);
s3c_rtc_setaie(&dev->dev, 0);
- clk_put(rtc_clk);
rtc_clk = NULL;
- iounmap(s3c_rtc_base);
- release_resource(s3c_rtc_mem);
- kfree(s3c_rtc_mem);
-
return 0;
}
@@ -461,7 +451,7 @@ static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
return platform_get_device_id(pdev)->driver_data;
}
-static int __devinit s3c_rtc_probe(struct platform_device *pdev)
+static int s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct rtc_time rtc_tm;
@@ -469,7 +459,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
int ret;
int tmp;
- pr_debug("%s: probe=%p\n", __func__, pdev);
+ dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
@@ -485,7 +475,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
return s3c_rtc_alarmno;
}
- pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n",
+ dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
s3c_rtc_tickno, s3c_rtc_alarmno);
/* get the memory region */
@@ -496,28 +486,16 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- s3c_rtc_mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (s3c_rtc_mem == NULL) {
- dev_err(&pdev->dev, "failed to reserve memory region\n");
- ret = -ENOENT;
- goto err_nores;
- }
-
- s3c_rtc_base = ioremap(res->start, resource_size(res));
- if (s3c_rtc_base == NULL) {
- dev_err(&pdev->dev, "failed ioremap()\n");
- ret = -EINVAL;
- goto err_nomap;
- }
+ s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(s3c_rtc_base))
+ return PTR_ERR(s3c_rtc_base);
- rtc_clk = clk_get(&pdev->dev, "rtc");
+ rtc_clk = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(rtc_clk)) {
dev_err(&pdev->dev, "failed to find rtc clock source\n");
ret = PTR_ERR(rtc_clk);
rtc_clk = NULL;
- goto err_clk;
+ return ret;
}
clk_enable(rtc_clk);
@@ -526,7 +504,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
- pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
readw(s3c_rtc_base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
@@ -576,28 +554,24 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(&pdev->dev, 1);
- ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
+ ret = devm_request_irq(&pdev->dev, s3c_rtc_alarmno, s3c_rtc_alarmirq,
0, "s3c2410-rtc alarm", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
goto err_alarm_irq;
}
- ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
+ ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq,
0, "s3c2410-rtc tick", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
- free_irq(s3c_rtc_alarmno, rtc);
- goto err_tick_irq;
+ goto err_alarm_irq;
}
clk_disable(rtc_clk);
return 0;
- err_tick_irq:
- free_irq(s3c_rtc_alarmno, rtc);
-
err_alarm_irq:
platform_set_drvdata(pdev, NULL);
rtc_device_unregister(rtc);
@@ -605,15 +579,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
err_nortc:
s3c_rtc_enable(pdev, 0);
clk_disable(rtc_clk);
- clk_put(rtc_clk);
- err_clk:
- iounmap(s3c_rtc_base);
-
- err_nomap:
- release_resource(s3c_rtc_mem);
-
- err_nores:
return ret;
}
@@ -695,8 +661,6 @@ static const struct of_device_id s3c_rtc_dt_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
-#else
-#define s3c_rtc_dt_match NULL
#endif
static struct platform_device_id s3c_rtc_driver_ids[] = {
@@ -720,14 +684,14 @@ MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids);
static struct platform_driver s3c_rtc_driver = {
.probe = s3c_rtc_probe,
- .remove = __devexit_p(s3c_rtc_remove),
+ .remove = s3c_rtc_remove,
.suspend = s3c_rtc_suspend,
.resume = s3c_rtc_resume,
.id_table = s3c_rtc_driver_ids,
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
- .of_match_table = s3c_rtc_dt_match,
+ .of_match_table = of_match_ptr(s3c_rtc_dt_match),
},
};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 50a5c4adee48..5ec5036df0bc 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -108,9 +108,6 @@ static int sa1100_rtc_open(struct device *dev)
struct rtc_device *rtc = info->rtc;
int ret;
- ret = clk_prepare_enable(info->clk);
- if (ret)
- goto fail_clk;
ret = request_irq(info->irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", dev);
if (ret) {
dev_err(dev, "IRQ %d already in use.\n", info->irq_1hz);
@@ -130,7 +127,6 @@ static int sa1100_rtc_open(struct device *dev)
free_irq(info->irq_1hz, dev);
fail_ui:
clk_disable_unprepare(info->clk);
- fail_clk:
return ret;
}
@@ -144,7 +140,6 @@ static void sa1100_rtc_release(struct device *dev)
free_irq(info->irq_alarm, dev);
free_irq(info->irq_1hz, dev);
- clk_disable_unprepare(info->clk);
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
@@ -253,6 +248,9 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
spin_lock_init(&info->lock);
platform_set_drvdata(pdev, info);
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ goto err_enable_clk;
/*
* According to the manual we should be able to let RTTR be zero
* and then a default diviser for a 32.768KHz clock is used.
@@ -305,6 +303,8 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
return 0;
err_dev:
+ clk_disable_unprepare(info->clk);
+err_enable_clk:
platform_set_drvdata(pdev, NULL);
clk_put(info->clk);
err_clk:
@@ -318,6 +318,7 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
if (info) {
rtc_device_unregister(info->rtc);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
platform_set_drvdata(pdev, NULL);
kfree(info);
@@ -349,12 +350,14 @@ static const struct dev_pm_ops sa1100_rtc_pm_ops = {
};
#endif
+#ifdef CONFIG_OF
static struct of_device_id sa1100_rtc_dt_ids[] = {
{ .compatible = "mrvl,sa1100-rtc", },
{ .compatible = "mrvl,mmp-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, sa1100_rtc_dt_ids);
+#endif
static struct platform_driver sa1100_rtc_driver = {
.probe = sa1100_rtc_probe,
@@ -364,7 +367,7 @@ static struct platform_driver sa1100_rtc_driver = {
#ifdef CONFIG_PM
.pm = &sa1100_rtc_pm_ops,
#endif
- .of_match_table = sa1100_rtc_dt_ids,
+ .of_match_table = of_match_ptr(sa1100_rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 3c0da333f465..f7d90703db5e 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -241,7 +241,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int __devinit snvs_rtc_probe(struct platform_device *pdev)
+static int snvs_rtc_probe(struct platform_device *pdev)
{
struct snvs_rtc_data *data;
struct resource *res;
@@ -252,9 +252,9 @@ static int __devinit snvs_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
- if (!data->ioaddr)
- return -EADDRNOTAVAIL;
+ data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->ioaddr))
+ return PTR_ERR(data->ioaddr);
data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0)
@@ -294,7 +294,7 @@ static int __devinit snvs_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit snvs_rtc_remove(struct platform_device *pdev)
+static int snvs_rtc_remove(struct platform_device *pdev)
{
struct snvs_rtc_data *data = platform_get_drvdata(pdev);
@@ -327,7 +327,7 @@ static int snvs_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(snvs_rtc_pm_ops, snvs_rtc_suspend, snvs_rtc_resume);
-static const struct of_device_id __devinitconst snvs_dt_ids[] = {
+static const struct of_device_id snvs_dt_ids[] = {
{ .compatible = "fsl,sec-v4.0-mon-rtc-lp", },
{ /* sentinel */ }
};
@@ -338,10 +338,10 @@ static struct platform_driver snvs_rtc_driver = {
.name = "snvs_rtc",
.owner = THIS_MODULE,
.pm = &snvs_rtc_pm_ops,
- .of_match_table = snvs_dt_ids,
+ .of_match_table = of_match_ptr(snvs_dt_ids),
},
.probe = snvs_rtc_probe,
- .remove = __devexit_p(snvs_rtc_remove),
+ .remove = snvs_rtc_remove,
};
module_platform_driver(snvs_rtc_driver);
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index bb507d23f6ce..a18c3192ed40 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -351,7 +351,7 @@ static struct rtc_class_ops spear_rtc_ops = {
.alarm_irq_enable = spear_alarm_irq_enable,
};
-static int __devinit spear_rtc_probe(struct platform_device *pdev)
+static int spear_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct spear_rtc_config *config;
@@ -363,35 +363,40 @@ static int __devinit spear_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no resource defined\n");
return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "rtc region already claimed\n");
- return -EBUSY;
- }
- config = kzalloc(sizeof(*config), GFP_KERNEL);
+ config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
if (!config) {
dev_err(&pdev->dev, "out of memory\n");
- status = -ENOMEM;
- goto err_release_region;
+ return -ENOMEM;
}
- config->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(config->clk)) {
- status = PTR_ERR(config->clk);
- goto err_kfree;
+ /* alarm irqs */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no update irq?\n");
+ return irq;
}
- status = clk_enable(config->clk);
- if (status < 0)
- goto err_clk_put;
-
- config->ioaddr = ioremap(res->start, resource_size(res));
- if (!config->ioaddr) {
- dev_err(&pdev->dev, "ioremap fail\n");
- status = -ENOMEM;
- goto err_disable_clock;
+ status = devm_request_irq(&pdev->dev, irq, spear_rtc_irq, 0, pdev->name,
+ config);
+ if (status) {
+ dev_err(&pdev->dev, "Alarm interrupt IRQ%d already claimed\n",
+ irq);
+ return status;
}
+ config->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(config->ioaddr))
+ return PTR_ERR(config->ioaddr);
+
+ config->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(config->clk))
+ return PTR_ERR(config->clk);
+
+ status = clk_prepare_enable(config->clk);
+ if (status < 0)
+ return status;
+
spin_lock_init(&config->lock);
platform_set_drvdata(pdev, config);
@@ -401,67 +406,31 @@ static int __devinit spear_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
PTR_ERR(config->rtc));
status = PTR_ERR(config->rtc);
- goto err_iounmap;
- }
-
- /* alarm irqs */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no update irq?\n");
- status = irq;
- goto err_clear_platdata;
+ goto err_disable_clock;
}
- status = request_irq(irq, spear_rtc_irq, 0, pdev->name, config);
- if (status) {
- dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \
- claimed\n", irq);
- goto err_clear_platdata;
- }
+ config->rtc->uie_unsupported = 1;
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
return 0;
-err_clear_platdata:
- platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(config->rtc);
-err_iounmap:
- iounmap(config->ioaddr);
err_disable_clock:
- clk_disable(config->clk);
-err_clk_put:
- clk_put(config->clk);
-err_kfree:
- kfree(config);
-err_release_region:
- release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ clk_disable_unprepare(config->clk);
return status;
}
-static int __devexit spear_rtc_remove(struct platform_device *pdev)
+static int spear_rtc_remove(struct platform_device *pdev)
{
struct spear_rtc_config *config = platform_get_drvdata(pdev);
- int irq;
- struct resource *res;
- /* leave rtc running, but disable irqs */
+ rtc_device_unregister(config->rtc);
spear_rtc_disable_interrupt(config);
+ clk_disable_unprepare(config->clk);
device_init_wakeup(&pdev->dev, 0);
- irq = platform_get_irq(pdev, 0);
- if (irq)
- free_irq(irq, pdev);
- clk_disable(config->clk);
- clk_put(config->clk);
- iounmap(config->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(config->rtc);
- kfree(config);
return 0;
}
@@ -528,7 +497,7 @@ MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
- .remove = __devexit_p(spear_rtc_remove),
+ .remove = spear_rtc_remove,
.suspend = spear_rtc_suspend,
.resume = spear_rtc_resume,
.shutdown = spear_rtc_shutdown,
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 279f5cfa691a..7e4a6f65cb91 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -285,7 +285,7 @@ static struct bin_attribute stk17ta8_nvram_attr = {
.write = stk17ta8_nvram_write,
};
-static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
+static int stk17ta8_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int cal;
@@ -347,7 +347,7 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
+static int stk17ta8_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
@@ -363,7 +363,7 @@ MODULE_ALIAS("platform:stk17ta8");
static struct platform_driver stk17ta8_rtc_driver = {
.probe = stk17ta8_rtc_probe,
- .remove = __devexit_p(stk17ta8_rtc_remove),
+ .remove = stk17ta8_rtc_remove,
.driver = {
.name = "stk17ta8",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 739ef55694f4..b2a8ed99b2bf 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -26,6 +26,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/of_device.h>
+#include <linux/of.h>
#include <mach/common.h>
@@ -280,7 +281,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.driver = {
.name = "stmp3xxx-rtc",
.owner = THIS_MODULE,
- .of_match_table = rtc_dt_ids,
+ .of_match_table = of_match_ptr(rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 5b2261052a65..59b5c2dcb58c 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -3,6 +3,8 @@
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -26,10 +28,10 @@ retry:
udelay(100);
goto retry;
}
- printk(KERN_WARNING "SUN4V: tod_get() timed out.\n");
+ pr_warn("tod_get() timed out.\n");
return 0;
}
- printk(KERN_WARNING "SUN4V: tod_get() not supported.\n");
+ pr_warn("tod_get() not supported.\n");
return 0;
}
@@ -53,10 +55,10 @@ retry:
udelay(100);
goto retry;
}
- printk(KERN_WARNING "SUN4V: tod_set() timed out.\n");
+ pr_warn("tod_set() timed out.\n");
return -EAGAIN;
}
- printk(KERN_WARNING "SUN4V: tod_set() not supported.\n");
+ pr_warn("tod_set() not supported.\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index c006025cecc8..7c033756d6b5 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -303,7 +303,13 @@ static struct rtc_class_ops tegra_rtc_ops = {
.alarm_irq_enable = tegra_rtc_alarm_irq_enable,
};
-static int __devinit tegra_rtc_probe(struct platform_device *pdev)
+static const struct of_device_id tegra_rtc_dt_match[] = {
+ { .compatible = "nvidia,tegra20-rtc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match);
+
+static int tegra_rtc_probe(struct platform_device *pdev)
{
struct tegra_rtc_info *info;
struct resource *res;
@@ -321,11 +327,9 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
return -EBUSY;
}
- info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!info->rtc_base) {
- dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
- return -EBUSY;
- }
+ info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->rtc_base))
+ return PTR_ERR(info->rtc_base);
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
if (info->tegra_rtc_irq <= 0)
@@ -375,7 +379,7 @@ err_dev_unreg:
return ret;
}
-static int __devexit tegra_rtc_remove(struct platform_device *pdev)
+static int tegra_rtc_remove(struct platform_device *pdev)
{
struct tegra_rtc_info *info = platform_get_drvdata(pdev);
@@ -435,11 +439,12 @@ static void tegra_rtc_shutdown(struct platform_device *pdev)
MODULE_ALIAS("platform:tegra_rtc");
static struct platform_driver tegra_rtc_driver = {
- .remove = __devexit_p(tegra_rtc_remove),
+ .remove = tegra_rtc_remove,
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
.owner = THIS_MODULE,
+ .of_match_table = tegra_rtc_dt_match,
},
#ifdef CONFIG_PM
.suspend = tegra_rtc_suspend,
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 7e96254bd365..b92e0f6383e6 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -119,7 +119,7 @@ err:
return err;
}
-static int __devexit test_remove(struct platform_device *plat_dev)
+static int test_remove(struct platform_device *plat_dev)
{
struct rtc_device *rtc = platform_get_drvdata(plat_dev);
@@ -131,7 +131,7 @@ static int __devexit test_remove(struct platform_device *plat_dev)
static struct platform_driver test_driver = {
.probe = test_probe,
- .remove = __devexit_p(test_remove),
+ .remove = test_remove,
.driver = {
.name = "rtc-test",
.owner = THIS_MODULE,
@@ -152,24 +152,24 @@ static int __init test_init(void)
if ((test1 = platform_device_alloc("rtc-test", 1)) == NULL) {
err = -ENOMEM;
- goto exit_free_test0;
+ goto exit_put_test0;
}
if ((err = platform_device_add(test0)))
- goto exit_free_test1;
+ goto exit_put_test1;
if ((err = platform_device_add(test1)))
- goto exit_device_unregister;
+ goto exit_del_test0;
return 0;
-exit_device_unregister:
- platform_device_unregister(test0);
+exit_del_test0:
+ platform_device_del(test0);
-exit_free_test1:
+exit_put_test1:
platform_device_put(test1);
-exit_free_test0:
+exit_put_test0:
platform_device_put(test0);
exit_driver_unregister:
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
index eb65dafee66e..62db4841078b 100644
--- a/drivers/rtc/rtc-tile.c
+++ b/drivers/rtc/rtc-tile.c
@@ -76,7 +76,7 @@ static const struct rtc_class_ops tile_rtc_ops = {
/*
* Device probe routine.
*/
-static int __devinit tile_rtc_probe(struct platform_device *dev)
+static int tile_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
@@ -94,7 +94,7 @@ static int __devinit tile_rtc_probe(struct platform_device *dev)
/*
* Device cleanup routine.
*/
-static int __devexit tile_rtc_remove(struct platform_device *dev)
+static int tile_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
@@ -112,7 +112,7 @@ static struct platform_driver tile_rtc_platform_driver = {
.owner = THIS_MODULE,
},
.probe = tile_rtc_probe,
- .remove = __devexit_p(tile_rtc_remove),
+ .remove = tile_rtc_remove,
};
/*
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
new file mode 100644
index 000000000000..aab4e8c93622
--- /dev/null
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -0,0 +1,356 @@
+/*
+ * rtc-tps6586x.c: RTC driver for TI PMIC TPS6586X
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define RTC_CTRL 0xc0
+#define POR_RESET_N BIT(7)
+#define OSC_SRC_SEL BIT(6)
+#define RTC_ENABLE BIT(5) /* enables alarm */
+#define RTC_BUF_ENABLE BIT(4) /* 32 KHz buffer enable */
+#define PRE_BYPASS BIT(3) /* 0=1KHz or 1=32KHz updates */
+#define CL_SEL_MASK (BIT(2)|BIT(1))
+#define CL_SEL_POS 1
+#define RTC_ALARM1_HI 0xc1
+#define RTC_COUNT4 0xc6
+
+/* start a PMU RTC access by reading the register prior to the RTC_COUNT4 */
+#define RTC_COUNT4_DUMMYREAD 0xc5
+
+/*only 14-bits width in second*/
+#define ALM1_VALID_RANGE_IN_SEC 0x3FFF
+
+#define TPS6586X_RTC_CL_SEL_1_5PF 0x0
+#define TPS6586X_RTC_CL_SEL_6_5PF 0x1
+#define TPS6586X_RTC_CL_SEL_7_5PF 0x2
+#define TPS6586X_RTC_CL_SEL_12_5PF 0x3
+
+struct tps6586x_rtc {
+ struct device *dev;
+ struct rtc_device *rtc;
+ int irq;
+ bool irq_en;
+ unsigned long long epoch_start;
+};
+
+static inline struct device *to_tps6586x_dev(struct device *dev)
+{
+ return dev->parent;
+}
+
+static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks = 0;
+ unsigned long seconds;
+ u8 buff[6];
+ int ret;
+ int i;
+
+ ret = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD, sizeof(buff), buff);
+ if (ret < 0) {
+ dev_err(dev, "read counter failed with err %d\n", ret);
+ return ret;
+ }
+
+ for (i = 1; i < sizeof(buff); i++) {
+ ticks <<= 8;
+ ticks |= buff[i];
+ }
+
+ seconds = ticks >> 10;
+ seconds += rtc->epoch_start;
+ rtc_time_to_tm(seconds, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks;
+ unsigned long seconds;
+ u8 buff[5];
+ int ret;
+
+ rtc_tm_to_time(tm, &seconds);
+ if (seconds < rtc->epoch_start) {
+ dev_err(dev, "requested time unsupported\n");
+ return -EINVAL;
+ }
+ seconds -= rtc->epoch_start;
+
+ ticks = (unsigned long long)seconds << 10;
+ buff[0] = (ticks >> 32) & 0xff;
+ buff[1] = (ticks >> 24) & 0xff;
+ buff[2] = (ticks >> 16) & 0xff;
+ buff[3] = (ticks >> 8) & 0xff;
+ buff[4] = ticks & 0xff;
+
+ /* Disable RTC before changing time */
+ ret = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (ret < 0) {
+ dev_err(dev, "failed to clear RTC_ENABLE\n");
+ return ret;
+ }
+
+ ret = tps6586x_writes(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (ret < 0) {
+ dev_err(dev, "failed to program new time\n");
+ return ret;
+ }
+
+ /* Enable RTC */
+ ret = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (ret < 0) {
+ dev_err(dev, "failed to set RTC_ENABLE\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int tps6586x_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled && !rtc->irq_en) {
+ enable_irq(rtc->irq);
+ rtc->irq_en = true;
+ } else if (!enabled && rtc->irq_en) {
+ disable_irq(rtc->irq);
+ rtc->irq_en = false;
+ }
+ return 0;
+}
+
+static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long seconds;
+ unsigned long ticks;
+ unsigned long rtc_current_time;
+ unsigned long long rticks = 0;
+ u8 buff[3];
+ u8 rbuff[6];
+ int ret;
+ int i;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+
+ if (alrm->enabled && (seconds < rtc->epoch_start)) {
+ dev_err(dev, "can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ ret = tps6586x_rtc_alarm_irq_enable(dev, alrm->enabled);
+ if (ret < 0) {
+ dev_err(dev, "can't set alarm irq, err %d\n", ret);
+ return ret;
+ }
+
+ seconds -= rtc->epoch_start;
+ ret = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD,
+ sizeof(rbuff), rbuff);
+ if (ret < 0) {
+ dev_err(dev, "read counter failed with err %d\n", ret);
+ return ret;
+ }
+
+ for (i = 1; i < sizeof(rbuff); i++) {
+ rticks <<= 8;
+ rticks |= rbuff[i];
+ }
+
+ rtc_current_time = rticks >> 10;
+ if ((seconds - rtc_current_time) > ALM1_VALID_RANGE_IN_SEC)
+ seconds = rtc_current_time - 1;
+
+ ticks = (unsigned long long)seconds << 10;
+ buff[0] = (ticks >> 16) & 0xff;
+ buff[1] = (ticks >> 8) & 0xff;
+ buff[2] = ticks & 0xff;
+
+ ret = tps6586x_writes(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (ret)
+ dev_err(dev, "programming alarm failed with err %d\n", ret);
+
+ return ret;
+}
+
+static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long ticks;
+ unsigned long seconds;
+ u8 buff[3];
+ int ret;
+
+ ret = tps6586x_reads(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (ret) {
+ dev_err(dev, "read RTC_ALARM1_HI failed with err %d\n", ret);
+ return ret;
+ }
+
+ ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
+ seconds = ticks >> 10;
+ seconds += rtc->epoch_start;
+
+ rtc_time_to_tm(seconds, &alrm->time);
+ return 0;
+}
+
+static const struct rtc_class_ops tps6586x_rtc_ops = {
+ .read_time = tps6586x_rtc_read_time,
+ .set_time = tps6586x_rtc_set_time,
+ .set_alarm = tps6586x_rtc_set_alarm,
+ .read_alarm = tps6586x_rtc_read_alarm,
+ .alarm_irq_enable = tps6586x_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t tps6586x_rtc_irq(int irq, void *data)
+{
+ struct tps6586x_rtc *rtc = data;
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int tps6586x_rtc_probe(struct platform_device *pdev)
+{
+ struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
+ struct tps6586x_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->dev = &pdev->dev;
+ rtc->irq = platform_get_irq(pdev, 0);
+
+ /* Set epoch start as 00:00:00:01:01:2009 */
+ rtc->epoch_start = mktime(2009, 1, 1, 0, 0, 0);
+
+ /* 1 kHz tick mode, enable tick counting */
+ ret = tps6586x_update(tps_dev, RTC_CTRL,
+ RTC_ENABLE | OSC_SRC_SEL |
+ ((TPS6586X_RTC_CL_SEL_1_5PF << CL_SEL_POS) & CL_SEL_MASK),
+ RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to start counter\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+ rtc->rtc = rtc_device_register(dev_name(&pdev->dev), &pdev->dev,
+ &tps6586x_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ dev_err(&pdev->dev, "RTC device register: ret %d\n", ret);
+ goto fail_rtc_register;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ tps6586x_rtc_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request IRQ(%d) failed with ret %d\n",
+ rtc->irq, ret);
+ goto fail_req_irq;
+ }
+ disable_irq(rtc->irq);
+ device_set_wakeup_capable(&pdev->dev, 1);
+ return 0;
+
+fail_req_irq:
+ rtc_device_unregister(rtc->rtc);
+
+fail_rtc_register:
+ tps6586x_update(tps_dev, RTC_CTRL, 0,
+ RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
+ return ret;
+};
+
+static int tps6586x_rtc_remove(struct platform_device *pdev)
+{
+ struct tps6586x_rtc *rtc = platform_get_drvdata(pdev);
+ struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
+
+ tps6586x_update(tps_dev, RTC_CTRL, 0,
+ RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
+ rtc_device_unregister(rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tps6586x_rtc_suspend(struct device *dev)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static int tps6586x_rtc_resume(struct device *dev)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tps6586x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps6586x_rtc_suspend, tps6586x_rtc_resume)
+};
+
+static struct platform_driver tps6586x_rtc_driver = {
+ .driver = {
+ .name = "tps6586x-rtc",
+ .owner = THIS_MODULE,
+ .pm = &tps6586x_pm_ops,
+ },
+ .probe = tps6586x_rtc_probe,
+ .remove = tps6586x_rtc_remove,
+};
+module_platform_driver(tps6586x_rtc_driver);
+
+MODULE_ALIAS("platform:rtc-tps6586x");
+MODULE_DESCRIPTION("TI TPS6586x RTC driver");
+MODULE_AUTHOR("Laxman dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 073108dcf9e7..8bd8115329b5 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -22,13 +22,13 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/mfd/tps65910.h>
struct tps65910_rtc {
struct rtc_device *rtc;
- /* To store the list of enabled interrupts */
- u32 irqstat;
+ int irq;
};
/* Total number of RTC registers needed to set time*/
@@ -222,7 +222,7 @@ static const struct rtc_class_ops tps65910_rtc_ops = {
.alarm_irq_enable = tps65910_rtc_alarm_irq_enable,
};
-static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
+static int tps65910_rtc_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = NULL;
struct tps65910_rtc *tps_rtc = NULL;
@@ -247,6 +247,13 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
return ret;
dev_dbg(&pdev->dev, "Enabling rtc-tps65910.\n");
+
+ /* Enable RTC digital power domain */
+ ret = regmap_update_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_RTC_PWDN_MASK, 0 << DEVCTRL_RTC_PWDN_SHIFT);
+ if (ret < 0)
+ return ret;
+
rtc_reg = TPS65910_RTC_CTRL_STOP_RTC;
ret = regmap_write(tps65910->regmap, TPS65910_RTC_CTRL, rtc_reg);
if (ret < 0)
@@ -260,13 +267,14 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
- "rtc-tps65910", &pdev->dev);
+ tps65910_rtc_interrupt, IRQF_TRIGGER_LOW | IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ tps_rtc->irq = irq;
+ device_set_wakeup_capable(&pdev->dev, 1);
tps_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&tps65910_rtc_ops, THIS_MODULE);
@@ -285,7 +293,7 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
* Disable tps65910 RTC interrupts.
* Sets status flag to free.
*/
-static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
+static int tps65910_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
@@ -297,49 +305,36 @@ static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-
static int tps65910_rtc_suspend(struct device *dev)
{
- struct tps65910 *tps = dev_get_drvdata(dev->parent);
- u8 alarm = TPS65910_RTC_INTERRUPTS_IT_ALARM;
- int ret;
-
- /* Store current list of enabled interrupts*/
- ret = regmap_read(tps->regmap, TPS65910_RTC_INTERRUPTS,
- &tps->rtc->irqstat);
- if (ret < 0)
- return ret;
+ struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev);
- /* Enable RTC ALARM interrupt only */
- return regmap_write(tps->regmap, TPS65910_RTC_INTERRUPTS, alarm);
+ if (device_may_wakeup(dev))
+ enable_irq_wake(tps_rtc->irq);
+ return 0;
}
static int tps65910_rtc_resume(struct device *dev)
{
- struct tps65910 *tps = dev_get_drvdata(dev->parent);
+ struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev);
- /* Restore list of enabled interrupts before suspend */
- return regmap_write(tps->regmap, TPS65910_RTC_INTERRUPTS,
- tps->rtc->irqstat);
+ if (device_may_wakeup(dev))
+ disable_irq_wake(tps_rtc->irq);
+ return 0;
}
+#endif
static const struct dev_pm_ops tps65910_rtc_pm_ops = {
- .suspend = tps65910_rtc_suspend,
- .resume = tps65910_rtc_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tps65910_rtc_suspend, tps65910_rtc_resume)
};
-#define DEV_PM_OPS (&tps65910_rtc_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
static struct platform_driver tps65910_rtc_driver = {
.probe = tps65910_rtc_probe,
- .remove = __devexit_p(tps65910_rtc_remove),
+ .remove = tps65910_rtc_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tps65910-rtc",
- .pm = DEV_PM_OPS,
+ .pm = &tps65910_rtc_pm_ops,
},
};
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
new file mode 100644
index 000000000000..9aaf8aaebae9
--- /dev/null
+++ b/drivers/rtc/rtc-tps80031.c
@@ -0,0 +1,349 @@
+/*
+ * rtc-tps80031.c -- TI TPS80031/TPS80032 RTC driver
+ *
+ * RTC driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/bcd.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define ENABLE_ALARM_INT 0x08
+#define ALARM_INT_STATUS 0x40
+
+/**
+ * Setting bit to 1 in STOP_RTC will run the RTC and
+ * setting this bit to 0 will freeze RTC.
+ */
+#define STOP_RTC 0x1
+
+/* Power on reset Values of RTC registers */
+#define TPS80031_RTC_POR_YEAR 0
+#define TPS80031_RTC_POR_MONTH 1
+#define TPS80031_RTC_POR_DAY 1
+
+/* Numbers of registers for time and alarms */
+#define TPS80031_RTC_TIME_NUM_REGS 7
+#define TPS80031_RTC_ALARM_NUM_REGS 6
+
+/**
+ * PMU RTC have only 2 nibbles to store year information, so using an
+ * offset of 100 to set the base year as 2000 for our driver.
+ */
+#define RTC_YEAR_OFFSET 100
+
+struct tps80031_rtc {
+ struct rtc_device *rtc;
+ int irq;
+};
+
+static int tps80031_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[TPS80031_RTC_TIME_NUM_REGS];
+ int ret;
+
+ ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_SECONDS_REG, TPS80031_RTC_TIME_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "reading RTC_SECONDS_REG failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(buff[0]);
+ tm->tm_min = bcd2bin(buff[1]);
+ tm->tm_hour = bcd2bin(buff[2]);
+ tm->tm_mday = bcd2bin(buff[3]);
+ tm->tm_mon = bcd2bin(buff[4]) - 1;
+ tm->tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
+ tm->tm_wday = bcd2bin(buff[6]);
+ return 0;
+}
+
+static int tps80031_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int ret;
+
+ buff[0] = bin2bcd(tm->tm_sec);
+ buff[1] = bin2bcd(tm->tm_min);
+ buff[2] = bin2bcd(tm->tm_hour);
+ buff[3] = bin2bcd(tm->tm_mday);
+ buff[4] = bin2bcd(tm->tm_mon + 1);
+ buff[5] = bin2bcd(tm->tm_year % RTC_YEAR_OFFSET);
+ buff[6] = bin2bcd(tm->tm_wday);
+
+ /* Stop RTC while updating the RTC time registers */
+ ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0) {
+ dev_err(dev->parent, "Stop RTC failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_SECONDS_REG,
+ TPS80031_RTC_TIME_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "writing RTC_SECONDS_REG failed, err %d\n", ret);
+ return ret;
+ }
+
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0)
+ dev_err(dev->parent, "Start RTC failed, err = %d\n", ret);
+ return ret;
+}
+
+static int tps80031_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ int ret;
+
+ if (enable)
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
+ else
+ ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
+ if (ret < 0) {
+ dev_err(dev, "Update on RTC_INT failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int tps80031_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[TPS80031_RTC_ALARM_NUM_REGS];
+ int ret;
+
+ buff[0] = bin2bcd(alrm->time.tm_sec);
+ buff[1] = bin2bcd(alrm->time.tm_min);
+ buff[2] = bin2bcd(alrm->time.tm_hour);
+ buff[3] = bin2bcd(alrm->time.tm_mday);
+ buff[4] = bin2bcd(alrm->time.tm_mon + 1);
+ buff[5] = bin2bcd(alrm->time.tm_year % RTC_YEAR_OFFSET);
+ ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_ALARM_SECONDS_REG,
+ TPS80031_RTC_ALARM_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "Writing RTC_ALARM failed, err %d\n", ret);
+ return ret;
+ }
+ return tps80031_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static int tps80031_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[6];
+ int ret;
+
+ ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_ALARM_SECONDS_REG,
+ TPS80031_RTC_ALARM_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev->parent,
+ "reading RTC_ALARM failed, err = %d\n", ret);
+ return ret;
+ }
+
+ alrm->time.tm_sec = bcd2bin(buff[0]);
+ alrm->time.tm_min = bcd2bin(buff[1]);
+ alrm->time.tm_hour = bcd2bin(buff[2]);
+ alrm->time.tm_mday = bcd2bin(buff[3]);
+ alrm->time.tm_mon = bcd2bin(buff[4]) - 1;
+ alrm->time.tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
+ return 0;
+}
+
+static int clear_alarm_int_status(struct device *dev, struct tps80031_rtc *rtc)
+{
+ int ret;
+ u8 buf;
+
+ /**
+ * As per datasheet, A dummy read of this RTC_STATUS_REG register
+ * is necessary before each I2C read in order to update the status
+ * register value.
+ */
+ ret = tps80031_read(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_STATUS_REG, &buf);
+ if (ret < 0) {
+ dev_err(dev, "reading RTC_STATUS failed. err = %d\n", ret);
+ return ret;
+ }
+
+ /* clear Alarm status bits.*/
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_STATUS_REG, ALARM_INT_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "clear Alarm INT failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t tps80031_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clear_alarm_int_status(dev, rtc);
+ if (ret < 0)
+ return ret;
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops tps80031_rtc_ops = {
+ .read_time = tps80031_rtc_read_time,
+ .set_time = tps80031_rtc_set_time,
+ .set_alarm = tps80031_rtc_set_alarm,
+ .read_alarm = tps80031_rtc_read_alarm,
+ .alarm_irq_enable = tps80031_rtc_alarm_irq_enable,
+};
+
+static int tps80031_rtc_probe(struct platform_device *pdev)
+{
+ struct tps80031_rtc *rtc;
+ struct rtc_time tm;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ platform_set_drvdata(pdev, rtc);
+
+ /* Start RTC */
+ ret = tps80031_set_bits(pdev->dev.parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to start RTC. err = %d\n", ret);
+ return ret;
+ }
+
+ /* If RTC have POR values, set time 01:01:2000 */
+ tps80031_rtc_read_time(&pdev->dev, &tm);
+ if ((tm.tm_year == RTC_YEAR_OFFSET + TPS80031_RTC_POR_YEAR) &&
+ (tm.tm_mon == (TPS80031_RTC_POR_MONTH - 1)) &&
+ (tm.tm_mday == TPS80031_RTC_POR_DAY)) {
+ tm.tm_year = 2000;
+ tm.tm_mday = 1;
+ tm.tm_mon = 1;
+ ret = tps80031_rtc_set_time(&pdev->dev, &tm);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "RTC set time failed, err = %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Clear alarm intretupt status if it is there */
+ ret = clear_alarm_int_status(&pdev->dev, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clear alarm int failed, err = %d\n", ret);
+ return ret;
+ }
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &tps80031_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ dev_err(&pdev->dev, "RTC registration failed, err %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ tps80031_rtc_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n",
+ rtc->irq, ret);
+ rtc_device_unregister(rtc->rtc);
+ return ret;
+ }
+ device_set_wakeup_capable(&pdev->dev, 1);
+ return 0;
+}
+
+static int tps80031_rtc_remove(struct platform_device *pdev)
+{
+ struct tps80031_rtc *rtc = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tps80031_rtc_suspend(struct device *dev)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static int tps80031_rtc_resume(struct device *dev)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops tps80031_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps80031_rtc_suspend, tps80031_rtc_resume)
+};
+
+static struct platform_driver tps80031_rtc_driver = {
+ .driver = {
+ .name = "tps80031-rtc",
+ .owner = THIS_MODULE,
+ .pm = &tps80031_pm_ops,
+ },
+ .probe = tps80031_rtc_probe,
+ .remove = tps80031_rtc_remove,
+};
+
+module_platform_driver(tps80031_rtc_driver);
+
+MODULE_ALIAS("platform:tps80031-rtc");
+MODULE_DESCRIPTION("TI TPS80031/TPS80032 RTC driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9277d945bf48..8bc6c80b184c 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -27,6 +27,7 @@
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/i2c/twl.h>
@@ -233,7 +234,7 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
*/
static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
u8 save_control;
u8 rtc_control;
@@ -300,15 +301,15 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char save_control;
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
- rtc_data[1] = bin2bcd(tm->tm_sec);
- rtc_data[2] = bin2bcd(tm->tm_min);
- rtc_data[3] = bin2bcd(tm->tm_hour);
- rtc_data[4] = bin2bcd(tm->tm_mday);
- rtc_data[5] = bin2bcd(tm->tm_mon + 1);
- rtc_data[6] = bin2bcd(tm->tm_year - 100);
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
@@ -341,7 +342,7 @@ out:
*/
static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
@@ -368,19 +369,19 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- unsigned char alarm_data[ALL_TIME_REGS + 1];
+ unsigned char alarm_data[ALL_TIME_REGS];
int ret;
ret = twl_rtc_alarm_irq_enable(dev, 0);
if (ret)
goto out;
- alarm_data[1] = bin2bcd(alm->time.tm_sec);
- alarm_data[2] = bin2bcd(alm->time.tm_min);
- alarm_data[3] = bin2bcd(alm->time.tm_hour);
- alarm_data[4] = bin2bcd(alm->time.tm_mday);
- alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
- alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
+ alarm_data[0] = bin2bcd(alm->time.tm_sec);
+ alarm_data[1] = bin2bcd(alm->time.tm_min);
+ alarm_data[2] = bin2bcd(alm->time.tm_hour);
+ alarm_data[3] = bin2bcd(alm->time.tm_mday);
+ alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
/* update all the alarm registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
@@ -458,7 +459,7 @@ static struct rtc_class_ops twl_rtc_ops = {
/*----------------------------------------------------------------------*/
-static int __devinit twl_rtc_probe(struct platform_device *pdev)
+static int twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
int ret = -EINVAL;
@@ -535,7 +536,7 @@ out1:
* Disable all TWL RTC module interrupts.
* Sets status flag to free.
*/
-static int __devexit twl_rtc_remove(struct platform_device *pdev)
+static int twl_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
struct rtc_device *rtc = platform_get_drvdata(pdev);
@@ -588,23 +589,26 @@ static int twl_rtc_resume(struct platform_device *pdev)
#define twl_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static const struct of_device_id twl_rtc_of_match[] = {
{.compatible = "ti,twl4030-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
+#endif
+
MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
.probe = twl_rtc_probe,
- .remove = __devexit_p(twl_rtc_remove),
+ .remove = twl_rtc_remove,
.shutdown = twl_rtc_shutdown,
.suspend = twl_rtc_suspend,
.resume = twl_rtc_resume,
.driver = {
.owner = THIS_MODULE,
.name = "twl_rtc",
- .of_match_table = twl_rtc_of_match,
+ .of_match_table = of_match_ptr(twl_rtc_of_match),
},
};
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 5f60a7c6a155..f91be04b9050 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -280,7 +280,7 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
.set_alarm = vr41xx_rtc_set_alarm,
};
-static int __devinit rtc_probe(struct platform_device *pdev)
+static int rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_device *rtc;
@@ -352,7 +352,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
disable_irq(aie_irq);
disable_irq(pie_irq);
- printk(KERN_INFO "rtc: Real Time Clock of NEC VR4100 series\n");
+ dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
return 0;
@@ -373,7 +373,7 @@ err_rtc1_iounmap:
return retval;
}
-static int __devexit rtc_remove(struct platform_device *pdev)
+static int rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -398,7 +398,7 @@ MODULE_ALIAS("platform:RTC");
static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
- .remove = __devexit_p(rtc_remove),
+ .remove = rtc_remove,
.driver = {
.name = rtc_name,
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index 07bf19364a74..a000bc0a8bff 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -70,7 +70,7 @@
| ALARM_SEC_BIT)
#define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
-#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
+#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */
#define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
#define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
#define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
@@ -119,7 +119,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
- tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
+ tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1;
tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
+ ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
@@ -137,9 +137,10 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
- | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
- | (bin2bcd(tm->tm_mday)),
+ writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
+ | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
+ | (bin2bcd(tm->tm_mday))
+ | ((tm->tm_year >= 200) << DATE_CENTURY_S),
vt8500_rtc->regbase + VT8500_RTC_DS);
writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
| (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
@@ -205,12 +206,13 @@ static const struct rtc_class_ops vt8500_rtc_ops = {
.alarm_irq_enable = vt8500_alarm_irq_enable,
};
-static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
+static int vt8500_rtc_probe(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc;
int ret;
- vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL);
+ vt8500_rtc = devm_kzalloc(&pdev->dev,
+ sizeof(struct vt8500_rtc), GFP_KERNEL);
if (!vt8500_rtc)
return -ENOMEM;
@@ -220,36 +222,34 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!vt8500_rtc->res) {
dev_err(&pdev->dev, "No I/O memory resource defined\n");
- ret = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
if (vt8500_rtc->irq_alarm < 0) {
dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
- ret = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
- vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res),
- "vt8500-rtc");
+ vt8500_rtc->res = devm_request_mem_region(&pdev->dev,
+ vt8500_rtc->res->start,
+ resource_size(vt8500_rtc->res),
+ "vt8500-rtc");
if (vt8500_rtc->res == NULL) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
- ret = -EBUSY;
- goto err_free;
+ return -EBUSY;
}
- vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
+ vt8500_rtc->regbase = devm_ioremap(&pdev->dev, vt8500_rtc->res->start,
resource_size(vt8500_rtc->res));
if (!vt8500_rtc->regbase) {
dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
ret = -EBUSY;
- goto err_release;
+ goto err_return;
}
/* Enable RTC and set it to 24-hour mode */
- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
+ writel(VT8500_RTC_CR_ENABLE,
vt8500_rtc->regbase + VT8500_RTC_CR);
vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -258,11 +258,11 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
ret = PTR_ERR(vt8500_rtc->rtc);
dev_err(&pdev->dev,
"Failed to register RTC device -> %d\n", ret);
- goto err_unmap;
+ goto err_return;
}
- ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
- "rtc alarm", vt8500_rtc);
+ ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm,
+ vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
@@ -273,31 +273,19 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
err_unreg:
rtc_device_unregister(vt8500_rtc->rtc);
-err_unmap:
- iounmap(vt8500_rtc->regbase);
-err_release:
- release_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res));
-err_free:
- kfree(vt8500_rtc);
+err_return:
return ret;
}
-static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
+static int vt8500_rtc_remove(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
- free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
-
rtc_device_unregister(vt8500_rtc->rtc);
/* Disable alarm matching */
writel(0, vt8500_rtc->regbase + VT8500_RTC_IS);
- iounmap(vt8500_rtc->regbase);
- release_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res));
- kfree(vt8500_rtc);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -310,7 +298,7 @@ static const struct of_device_id wmt_dt_ids[] = {
static struct platform_driver vt8500_rtc_driver = {
.probe = vt8500_rtc_probe,
- .remove = __devexit_p(vt8500_rtc_remove),
+ .remove = vt8500_rtc_remove,
.driver = {
.name = "vt8500-rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index ea5c6f857ca5..2f0ac7b30a0c 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -443,9 +443,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
- IRQF_TRIGGER_RISING, "RTC alarm",
- wm831x_rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
+ wm831x_alm_irq,
+ IRQF_TRIGGER_RISING, "RTC alarm",
+ wm831x_rtc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
alm_irq, ret);
@@ -459,12 +460,10 @@ err:
return ret;
}
-static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
+static int wm831x_rtc_remove(struct platform_device *pdev)
{
struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
- int alm_irq = platform_get_irq_byname(pdev, "ALM");
- free_irq(alm_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
return 0;
@@ -483,7 +482,7 @@ static const struct dev_pm_ops wm831x_rtc_pm_ops = {
static struct platform_driver wm831x_rtc_driver = {
.probe = wm831x_rtc_probe,
- .remove = __devexit_p(wm831x_rtc_remove),
+ .remove = wm831x_rtc_remove,
.driver = {
.name = "wm831x-rtc",
.pm = &wm831x_rtc_pm_ops,
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index c2e52d15abb2..8ad86ae0d30f 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -459,7 +459,7 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
+static int wm8350_rtc_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
@@ -479,7 +479,7 @@ static struct dev_pm_ops wm8350_rtc_pm_ops = {
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
- .remove = __devexit_p(wm8350_rtc_remove),
+ .remove = wm8350_rtc_remove,
.driver = {
.name = "wm8350-rtc",
.pm = &wm8350_rtc_pm_ops,
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
new file mode 100644
index 000000000000..bf3e242ccc5c
--- /dev/null
+++ b/drivers/rtc/systohc.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+/**
+ * rtc_set_ntp_time - Save NTP synchronized time to the RTC
+ * @now: Current time of day
+ *
+ * Replacement for the NTP platform function update_persistent_clock
+ * that stores time for later retrieval by rtc_hctosys.
+ *
+ * Returns 0 on successful RTC update, -ENODEV if a RTC update is not
+ * possible at all, and various other -errno for specific temporary failure
+ * cases.
+ *
+ * If temporary failure is indicated the caller should try again 'soon'
+ */
+int rtc_set_ntp_time(struct timespec now)
+{
+ struct rtc_device *rtc;
+ struct rtc_time tm;
+ int err = -ENODEV;
+
+ if (now.tv_nsec < (NSEC_PER_SEC >> 1))
+ rtc_time_to_tm(now.tv_sec, &tm);
+ else
+ rtc_time_to_tm(now.tv_sec + 1, &tm);
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc) {
+ /* rtc_hctosys exclusively uses UTC, so we call set_time here,
+ * not set_mmss. */
+ if (rtc->ops && (rtc->ops->set_time || rtc->ops->set_mmss))
+ err = rtc_set_time(rtc, &tm);
+ rtc_class_close(rtc);
+ }
+
+ return err;
+}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29225e1c159c..f1b7fdc58a5f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
switch (rc) {
case 0: /* termination successful */
cqr->status = DASD_CQR_CLEAR_PENDING;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
@@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
- cqr->startclk = get_clock();
+ cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
@@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
- now = get_clock();
+ now = get_tod_clock();
cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
@@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
}
break;
case DASD_CQR_QUEUED:
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_CLEARED;
break;
default: /* no need to modify the others */
@@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
wait_event(generic_waitq, _wait_for_wakeup(cqr));
}
- maincqr->endclk = get_clock();
+ maincqr->endclk = get_tod_clock();
if ((maincqr->status != DASD_CQR_DONE) &&
(maincqr->intrc != -ERESTARTSYS))
dasd_log_sense(maincqr, &maincqr->irb);
@@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
"Cancelling request %p failed with rc=%d\n",
cqr, rc);
} else {
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
break;
default: /* already finished or clear pending - do nothing */
@@ -2568,7 +2568,7 @@ restart:
}
/* Rechain finished requests to final queue */
- cqr->endclk = get_clock();
+ cqr->endclk = get_tod_clock();
list_move_tail(&cqr->blocklist, final_queue);
}
}
@@ -2711,7 +2711,7 @@ restart_cb:
}
/* call the callback function */
spin_lock_irq(&block->request_queue_lock);
- cqr->endclk = get_clock();
+ cqr->endclk = get_tod_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
spin_unlock_irq(&block->request_queue_lock);
@@ -3042,12 +3042,15 @@ void dasd_generic_remove(struct ccw_device *cdev)
cdev->handler = NULL;
device = dasd_device_from_cdev(cdev);
- if (IS_ERR(device))
+ if (IS_ERR(device)) {
+ dasd_remove_sysfs_files(cdev);
return;
+ }
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
+ dasd_remove_sysfs_files(cdev);
return;
}
/*
@@ -3504,7 +3507,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
cqr->memdev = device;
cqr->expires = 10*HZ;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f8212d54013a..d26134713682 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
dctl_cqr->expires = 5 * 60 * HZ;
dctl_cqr->retries = 2;
- dctl_cqr->buildclk = get_clock();
+ dctl_cqr->buildclk = get_tod_clock();
dctl_cqr->status = DASD_CQR_FILLED;
@@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
erp->magic = default_erp->magic;
erp->expires = default_erp->expires;
erp->retries = 256;
- erp->buildclk = get_clock();
+ erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
/* remove the default erp */
@@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"Unable to allocate ERP request");
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock ();
+ cqr->stopclk = get_tod_clock();
} else {
DBF_DEV_EVENT(DBF_ERR, device,
"Unable to allocate ERP request "
@@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->magic = cqr->magic;
erp->expires = cqr->expires;
erp->retries = 256;
- erp->buildclk = get_clock();
+ erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
return erp;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 6b556995bb33..a2597e683e79 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->count = sizeof(*(lcu->uac));
ccw->cda = (__u32)(addr_t) lcu->uac;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* need to unset flag here to detect race with summary unit check */
@@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 5 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 9bd5da36f99e..cc0603358522 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
private->iob.bio_list = dreq->bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
- cqr->startclk = get_clock();
+ cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = dia250(&private->iob, RW_BIO);
switch (rc) {
case 0: /* Synchronous I/O finished successfully */
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_SUCCESS;
/* Indicate to calling function that only a dasd_schedule_bh()
and no timer is needed */
@@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
mdsk_term_io(device);
mdsk_init_io(device, device->block->bp_block, 0, NULL);
cqr->status = DASD_CQR_CLEAR_PENDING;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
dasd_schedule_device_bh(device);
return 0;
}
@@ -248,7 +248,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
default:
return;
}
- kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
+ inc_irq_stat(IRQEXT_DSD);
if (!ip) { /* no intparm: unsolicited interrupt */
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
"interrupt");
@@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
return;
}
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
expires = 0;
if ((ext_code.subcode & 0xff) == 0) {
@@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
}
}
cqr->retries = DIAG_MAX_RETRIES;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 806fe912d6e7..33f26bfa62f2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
cqr->expires = 10*HZ;
cqr->lpm = lpm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
}
@@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->count = sizeof(struct dasd_rssd_features);
ccw->cda = (__u32)(addr_t) features;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
@@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 255;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device,
fcp->startdev = device;
fcp->memdev = device;
fcp->retries = 256;
- fcp->buildclk = get_clock();
+ fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
}
@@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
out_error:
@@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->expires = startdev->default_expires * HZ;
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
@@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->expires = 10 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->lpm = usrparm.path_mask;
@@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
ccw->cda = (__u32)(addr_t) stats;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
@@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
cqr->memdev = device;
cqr->retries = 3;
cqr->expires = 10 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Build the ccws */
@@ -4274,7 +4274,7 @@ static struct ccw_driver dasd_eckd_driver = {
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
.uc_handler = dasd_generic_uc_handler,
- .int_class = IOINT_DAS,
+ .int_class = IRQIO_DAS,
};
/*
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index ff901b5509c1..21ef63cf0960 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->flags = 0;
ccw->cda = (__u32)(addr_t) cqr->data;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d01ef82f8757..3250cb471f78 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end dasd_default_erp_action */
@@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
cqr->status = DASD_CQR_DONE;
else {
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
return cqr;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index eb748507c7fa..4dd0e2f6047e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -78,7 +78,7 @@ static struct ccw_driver dasd_fba_driver = {
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
- .int_class = IOINT_DAS,
+ .int_class = IRQIO_DAS,
};
static void
@@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
cqr->block = block;
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = 32;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 7ac6bad919ef..3c1ccf494647 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -68,19 +68,34 @@ void scm_initiate_cluster_request(struct scm_request *);
void scm_cluster_request_irq(struct scm_request *);
bool scm_test_cluster_request(struct scm_request *);
bool scm_cluster_size_valid(void);
-#else
-#define __scm_free_rq_cluster(scmrq) {}
-#define __scm_alloc_rq_cluster(scmrq) 0
-#define scm_request_cluster_init(scmrq) {}
-#define scm_reserve_cluster(scmrq) true
-#define scm_release_cluster(scmrq) {}
-#define scm_blk_dev_cluster_setup(bdev) {}
-#define scm_need_cluster_request(scmrq) false
-#define scm_initiate_cluster_request(scmrq) {}
-#define scm_cluster_request_irq(scmrq) {}
-#define scm_test_cluster_request(scmrq) false
-#define scm_cluster_size_valid() true
-#endif
+#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
+static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
+static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
+{
+ return 0;
+}
+static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
+static inline bool scm_reserve_cluster(struct scm_request *scmrq)
+{
+ return true;
+}
+static inline void scm_release_cluster(struct scm_request *scmrq) {}
+static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
+static inline bool scm_need_cluster_request(struct scm_request *scmrq)
+{
+ return false;
+}
+static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
+static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
+static inline bool scm_test_cluster_request(struct scm_request *scmrq)
+{
+ return false;
+}
+static inline bool scm_cluster_size_valid(void)
+{
+ return true;
+}
+#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
extern debug_info_t *scm_debug;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 2c9a776bd63c..71bf959732fe 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -11,7 +11,7 @@ config TN3270
config TN3270_TTY
def_tristate y
prompt "Support for tty input/output on 3270 terminals"
- depends on TN3270
+ depends on TN3270 && TTY
help
Include support for using an IBM 3270 terminal as a Linux tty.
@@ -33,7 +33,7 @@ config TN3270_CONSOLE
config TN3215
def_bool y
prompt "Support for 3215 line mode terminal"
- depends on CCW
+ depends on CCW && TTY
help
Include support for IBM 3215 line-mode terminals.
@@ -51,7 +51,7 @@ config CCW_CONSOLE
config SCLP_TTY
def_bool y
prompt "Support for SCLP line mode terminal"
- depends on S390
+ depends on S390 && TTY
help
Include support for IBM SCLP line-mode terminals.
@@ -66,7 +66,7 @@ config SCLP_CONSOLE
config SCLP_VT220_TTY
def_bool y
prompt "Support for SCLP VT220-compatible terminal"
- depends on S390
+ depends on S390 && TTY
help
Include support for an IBM SCLP VT220-compatible terminal.
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 40084501c31b..7b00fa634d40 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -44,6 +44,7 @@
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
+#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
@@ -411,8 +412,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
break;
case CTRLCHAR_CTRL:
- tty_insert_flip_char(tty, cchar, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&raw->port, cchar,
+ TTY_NORMAL);
+ tty_flip_buffer_push(&raw->port);
break;
case CTRLCHAR_NONE:
@@ -424,8 +426,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
count++;
} else
count -= 2;
- tty_insert_flip_string(tty, raw->inbuf, count);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&raw->port, raw->inbuf,
+ count);
+ tty_flip_buffer_push(&raw->port);
break;
}
} else if (req->type == RAW3215_WRITE) {
@@ -630,7 +633,8 @@ static void raw3215_shutdown(struct raw3215_info *raw)
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
- if (!(raw->port.flags & ASYNC_INITIALIZED))
+ if (!(raw->port.flags & ASYNC_INITIALIZED) ||
+ (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -805,7 +809,7 @@ static struct ccw_driver raw3215_ccw_driver = {
.freeze = &raw3215_pm_stop,
.thaw = &raw3215_pm_start,
.restore = &raw3215_pm_start,
- .int_class = IOINT_C15,
+ .int_class = IRQIO_C15,
};
#ifdef CONFIG_TN3215_CONSOLE
@@ -927,6 +931,8 @@ static int __init con3215_init(void)
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
+ raw->flags |= RAW3215_FIXED;
+
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);
@@ -966,7 +972,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(&raw->port, tty);
- tty->low_latency = 0; /* don't use bottom half for pushing chars */
+ raw->port.low_latency = 0; /* don't use bottom half for pushing chars */
/*
* Start up 3215 device
*/
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 911704571b9c..230697aac94b 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -443,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp)
tty_kref_put(tty);
return -ENODEV;
}
- minor = tty->index + RAW3270_FIRSTMINOR;
+ minor = tty->index;
tty_kref_put(tty);
}
mutex_lock(&fs3270_mutex);
@@ -524,6 +524,25 @@ static const struct file_operations fs3270_fops = {
.llseek = no_llseek,
};
+void fs3270_create_cb(int minor)
+{
+ __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
+ device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+ NULL, "3270/tub%d", minor);
+}
+
+void fs3270_destroy_cb(int minor)
+{
+ device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+ __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
+}
+
+struct raw3270_notifier fs3270_notifier =
+{
+ .create = fs3270_create_cb,
+ .destroy = fs3270_destroy_cb,
+};
+
/*
* 3270 fullscreen driver initialization.
*/
@@ -532,16 +551,20 @@ fs3270_init(void)
{
int rc;
- rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
+ rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
+ device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+ NULL, "3270/tub");
+ raw3270_register_notifier(&fs3270_notifier);
return 0;
}
static void __exit
fs3270_exit(void)
{
- unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
+ raw3270_unregister_notifier(&fs3270_notifier);
+ __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index d0ae2be58191..a31f339211d5 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -43,22 +43,14 @@ int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
- struct tty_struct *tty = tty_port_tty_get(port);
- if (!tty)
- return;
- tty_insert_flip_char(tty, ch, 0);
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_insert_flip_char(port, ch, 0);
+ tty_schedule_flip(port);
}
static inline void
kbd_puts_queue(struct tty_port *port, char *cp)
{
- struct tty_struct *tty = tty_port_tty_get(port);
- if (!tty)
- return;
while (*cp)
- tty_insert_flip_char(tty, *cp++, 0);
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_insert_flip_char(port, *cp++, 0);
+ tty_schedule_flip(port);
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f3b8bb84faf2..4c9030a5b9f2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,7 +28,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
-static struct class *class3270;
+struct class *class3270;
/* The main 3270 data structure. */
struct raw3270 {
@@ -37,6 +37,7 @@ struct raw3270 {
int minor;
short model, rows, cols;
+ unsigned int state;
unsigned long flags;
struct list_head req_queue; /* Request queue. */
@@ -46,20 +47,26 @@ struct raw3270 {
struct timer_list timer; /* Device timer. */
unsigned char *ascebc; /* ascii -> ebcdic table */
- struct device *clttydev; /* 3270-class tty device ptr */
- struct device *cltubdev; /* 3270-class tub device ptr */
- struct raw3270_request init_request;
+ struct raw3270_view init_view;
+ struct raw3270_request init_reset;
+ struct raw3270_request init_readpart;
+ struct raw3270_request init_readmod;
unsigned char init_data[256];
};
+/* raw3270->state */
+#define RAW3270_STATE_INIT 0 /* Initial state */
+#define RAW3270_STATE_RESET 1 /* Reset command is pending */
+#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */
+#define RAW3270_STATE_READMOD 3 /* Read partition is pending */
+#define RAW3270_STATE_READY 4 /* Device is usable by views */
+
/* raw3270->flags */
#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
-#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
-#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
-#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
-#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */
+#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */
+#define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
static DEFINE_MUTEX(raw3270_mutex);
@@ -97,6 +104,17 @@ static unsigned char raw3270_ebcgraf[64] = {
0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
};
+static inline int raw3270_state_ready(struct raw3270 *rp)
+{
+ return rp->state == RAW3270_STATE_READY;
+}
+
+static inline int raw3270_state_final(struct raw3270 *rp)
+{
+ return rp->state == RAW3270_STATE_INIT ||
+ rp->state == RAW3270_STATE_READY;
+}
+
void
raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
{
@@ -214,7 +232,7 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
* Stop running ccw.
*/
static int
-raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
+__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
{
int retries;
int rc;
@@ -233,18 +251,6 @@ raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
return rc;
}
-static int
-raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
-{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- rc = raw3270_halt_io_nolock(rp, rq);
- spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
- return rc;
-}
-
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
@@ -281,8 +287,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
@@ -299,8 +305,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
return rc;
@@ -378,7 +384,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case RAW3270_IO_STOP:
if (!rq)
break;
- raw3270_halt_io_nolock(rp, rq);
+ __raw3270_halt_io(rp, rq);
rq->rc = -EIO;
break;
default:
@@ -413,9 +419,14 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
/*
- * Size sensing.
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that precedes the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
*/
-
struct raw3270_ua { /* Query Reply structure for Usable Area */
struct { /* Usable Area Query Reply Base */
short l; /* Length of this structured field */
@@ -451,117 +462,21 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
} __attribute__ ((packed)) aua;
} __attribute__ ((packed));
-static struct diag210 raw3270_init_diag210;
-static DEFINE_MUTEX(raw3270_init_mutex);
-
-static int
-raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
- struct irb *irb)
-{
- /*
- * Unit-Check Processing:
- * Expect Command Reject or Intervention Required.
- */
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- /* Request finished abnormally. */
- if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
- set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
- return RAW3270_IO_BUSY;
- }
- }
- if (rq) {
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- if (irb->ecw[0] & SNS0_CMD_REJECT)
- rq->rc = -EOPNOTSUPP;
- else
- rq->rc = -EIO;
- } else
- /* Request finished normally. Copy residual count. */
- rq->rescnt = irb->scsw.cmd.count;
- }
- if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
- set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
- wake_up(&raw3270_wait_queue);
- }
- return RAW3270_IO_DONE;
-}
-
-static struct raw3270_fn raw3270_init_fn = {
- .intv = raw3270_init_irq
-};
-
-static struct raw3270_view raw3270_init_view = {
- .fn = &raw3270_init_fn
-};
-
-/*
- * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
- * Wait for end of request. The request must have been started
- * with raw3270_start, rc = 0. The device lock may NOT have been
- * released between calling raw3270_start and raw3270_wait.
- */
static void
-raw3270_wake_init(struct raw3270_request *rq, void *data)
-{
- wake_up((wait_queue_head_t *) data);
-}
-
-/*
- * Special wait function that can cope with console initialization.
- */
-static int
-raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
- struct raw3270_request *rq)
-{
- unsigned long flags;
- int rc;
-
-#ifdef CONFIG_TN3270_CONSOLE
- if (raw3270_registered == 0) {
- spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
- rq->callback = NULL;
- rc = __raw3270_start(rp, view, rq);
- if (rc == 0)
- while (!raw3270_request_final(rq)) {
- wait_cons_dev();
- barrier();
- }
- spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
- return rq->rc;
- }
-#endif
- rq->callback = raw3270_wake_init;
- rq->callback_data = &raw3270_wait_queue;
- spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
- rc = __raw3270_start(rp, view, rq);
- spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
- if (rc)
- return rc;
- /* Now wait for the completion. */
- rc = wait_event_interruptible(raw3270_wait_queue,
- raw3270_request_final(rq));
- if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
- raw3270_halt_io(view->dev, rq);
- /* No wait for the halt to complete. */
- wait_event(raw3270_wait_queue, raw3270_request_final(rq));
- return -ERESTARTSYS;
- }
- return rq->rc;
-}
-
-static int
-__raw3270_size_device_vm(struct raw3270 *rp)
+raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
struct ccw_dev_id dev_id;
+ struct diag210 diag_data;
ccw_device_get_id(rp->cdev, &dev_id);
- raw3270_init_diag210.vrdcdvno = dev_id.devno;
- raw3270_init_diag210.vrdclen = sizeof(struct diag210);
- rc = diag210(&raw3270_init_diag210);
- if (rc)
- return rc;
- model = raw3270_init_diag210.vrdccrmd;
+ diag_data.vrdcdvno = dev_id.devno;
+ diag_data.vrdclen = sizeof(struct diag210);
+ rc = diag210(&diag_data);
+ model = diag_data.vrdccrmd;
+ /* Use default model 2 if the size could not be detected */
+ if (rc || model < 2 || model > 5)
+ model = 2;
switch (model) {
case 2:
rp->model = model;
@@ -583,77 +498,25 @@ __raw3270_size_device_vm(struct raw3270 *rp)
rp->rows = 27;
rp->cols = 132;
break;
- default:
- rc = -EOPNOTSUPP;
- break;
}
- return rc;
}
-static int
-__raw3270_size_device(struct raw3270 *rp)
+static void
+raw3270_size_device(struct raw3270 *rp)
{
- static const unsigned char wbuf[] =
- { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
struct raw3270_ua *uap;
- int rc;
- /*
- * To determine the size of the 3270 device we need to do:
- * 1) send a 'read partition' data stream to the device
- * 2) wait for the attn interrupt that precedes the query reply
- * 3) do a read modified to get the query reply
- * To make things worse we have to cope with intervention
- * required (3270 device switched to 'stand-by') and command
- * rejects (old devices that can't do 'read partition').
- */
- memset(&rp->init_request, 0, sizeof(rp->init_request));
- memset(&rp->init_data, 0, 256);
- /* Store 'read partition' data stream to init_data */
- memcpy(&rp->init_data, wbuf, sizeof(wbuf));
- INIT_LIST_HEAD(&rp->init_request.list);
- rp->init_request.ccw.cmd_code = TC_WRITESF;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = sizeof(wbuf);
- rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
-
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- if (rc)
- /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
- return rc;
-
- /* Wait for attention interrupt. */
-#ifdef CONFIG_TN3270_CONSOLE
- if (raw3270_registered == 0) {
- unsigned long flags;
-
- spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
- wait_cons_dev();
- spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
- } else
-#endif
- rc = wait_event_interruptible(raw3270_wait_queue,
- test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
- if (rc)
- return rc;
-
- /*
- * The device accepted the 'read partition' command. Now
- * set up a read ccw and issue it.
- */
- rp->init_request.ccw.cmd_code = TC_READMOD;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = sizeof(rp->init_data);
- rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- if (rc)
- return rc;
/* Got a Query Reply */
uap = (struct raw3270_ua *) (rp->init_data + 1);
/* Paranoia check. */
- if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
- return -EOPNOTSUPP;
+ if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
+ uap->uab.qcode != 0x81) {
+ /* Couldn't detect size. Use default model 2. */
+ rp->model = 2;
+ rp->rows = 24;
+ rp->cols = 80;
+ return;
+ }
/* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h;
rp->cols = uap->uab.w;
@@ -666,66 +529,131 @@ __raw3270_size_device(struct raw3270 *rp)
rp->rows = uap->aua.hauai;
rp->cols = uap->aua.wauai;
}
- return 0;
+ /* Try to find a model. */
+ rp->model = 0;
+ if (rp->rows == 24 && rp->cols == 80)
+ rp->model = 2;
+ if (rp->rows == 32 && rp->cols == 80)
+ rp->model = 3;
+ if (rp->rows == 43 && rp->cols == 80)
+ rp->model = 4;
+ if (rp->rows == 27 && rp->cols == 132)
+ rp->model = 5;
}
-static int
-raw3270_size_device(struct raw3270 *rp)
+static void
+raw3270_size_device_done(struct raw3270 *rp)
{
- int rc;
+ struct raw3270_view *view;
- mutex_lock(&raw3270_init_mutex);
- rp->view = &raw3270_init_view;
- raw3270_init_view.dev = rp;
- if (MACHINE_IS_VM)
- rc = __raw3270_size_device_vm(rp);
- else
- rc = __raw3270_size_device(rp);
- raw3270_init_view.dev = NULL;
rp->view = NULL;
- mutex_unlock(&raw3270_init_mutex);
- if (rc == 0) { /* Found something. */
- /* Try to find a model. */
- rp->model = 0;
- if (rp->rows == 24 && rp->cols == 80)
- rp->model = 2;
- if (rp->rows == 32 && rp->cols == 80)
- rp->model = 3;
- if (rp->rows == 43 && rp->cols == 80)
- rp->model = 4;
- if (rp->rows == 27 && rp->cols == 132)
- rp->model = 5;
- } else {
- /* Couldn't detect size. Use default model 2. */
- rp->model = 2;
- rp->rows = 24;
- rp->cols = 80;
- return 0;
+ rp->state = RAW3270_STATE_READY;
+ /* Notify views about new size */
+ list_for_each_entry(view, &rp->view_list, list)
+ if (view->fn->resize)
+ view->fn->resize(view, rp->model, rp->rows, rp->cols);
+ /* Setup processing done, now activate a view */
+ list_for_each_entry(view, &rp->view_list, list) {
+ rp->view = view;
+ if (view->fn->activate(view) == 0)
+ break;
+ rp->view = NULL;
}
- return rc;
+}
+
+static void
+raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+{
+ struct raw3270 *rp = rq->view->dev;
+
+ raw3270_size_device(rp);
+ raw3270_size_device_done(rp);
+}
+
+static void
+raw3270_read_modified(struct raw3270 *rp)
+{
+ if (rp->state != RAW3270_STATE_W4ATTN)
+ return;
+ /* Use 'read modified' to get the result of a read partition. */
+ memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
+ rp->init_readmod.ccw.cmd_code = TC_READMOD;
+ rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
+ rp->init_readmod.ccw.count = sizeof(rp->init_data);
+ rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_readmod.callback = raw3270_read_modified_cb;
+ rp->state = RAW3270_STATE_READMOD;
+ raw3270_start_irq(&rp->init_view, &rp->init_readmod);
+}
+
+static void
+raw3270_writesf_readpart(struct raw3270 *rp)
+{
+ static const unsigned char wbuf[] =
+ { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+
+ /* Store 'read partition' data stream to init_data */
+ memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
+ memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+ rp->init_readpart.ccw.cmd_code = TC_WRITESF;
+ rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
+ rp->init_readpart.ccw.count = sizeof(wbuf);
+ rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+ rp->state = RAW3270_STATE_W4ATTN;
+ raw3270_start_irq(&rp->init_view, &rp->init_readpart);
+}
+
+/*
+ * Device reset
+ */
+static void
+raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+{
+ struct raw3270 *rp = rq->view->dev;
+
+ if (rp->state != RAW3270_STATE_RESET)
+ return;
+ if (rq && rq->rc) {
+ /* Reset command failed. */
+ rp->state = RAW3270_STATE_INIT;
+ } else if (0 && MACHINE_IS_VM) {
+ raw3270_size_device_vm(rp);
+ raw3270_size_device_done(rp);
+ } else
+ raw3270_writesf_readpart(rp);
}
static int
-raw3270_reset_device(struct raw3270 *rp)
+__raw3270_reset_device(struct raw3270 *rp)
{
int rc;
- mutex_lock(&raw3270_init_mutex);
- memset(&rp->init_request, 0, sizeof(rp->init_request));
+ /* Store reset data stream to init_data/init_reset */
+ memset(&rp->init_reset, 0, sizeof(rp->init_reset));
memset(&rp->init_data, 0, sizeof(rp->init_data));
- /* Store reset data stream to init_data/init_request */
rp->init_data[0] = TW_KR;
- INIT_LIST_HEAD(&rp->init_request.list);
- rp->init_request.ccw.cmd_code = TC_EWRITEA;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = 1;
- rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
- rp->view = &raw3270_init_view;
- raw3270_init_view.dev = rp;
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- raw3270_init_view.dev = NULL;
- rp->view = NULL;
- mutex_unlock(&raw3270_init_mutex);
+ rp->init_reset.ccw.cmd_code = TC_EWRITEA;
+ rp->init_reset.ccw.flags = CCW_FLAG_SLI;
+ rp->init_reset.ccw.count = 1;
+ rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_reset.callback = raw3270_reset_device_cb;
+ rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
+ if (rc == 0 && rp->state == RAW3270_STATE_INIT)
+ rp->state = RAW3270_STATE_RESET;
+ return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ rc = __raw3270_reset_device(rp);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
@@ -739,13 +667,50 @@ raw3270_reset(struct raw3270_view *view)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = raw3270_reset_device(view->dev);
return rc;
}
+static int
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+ struct irb *irb)
+{
+ struct raw3270 *rp;
+
+ /*
+ * Unit-Check Processing:
+ * Expect Command Reject or Intervention Required.
+ */
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ /* Request finished abnormally. */
+ if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
+ set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
+ return RAW3270_IO_BUSY;
+ }
+ }
+ if (rq) {
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ rq->rc = -EOPNOTSUPP;
+ else
+ rq->rc = -EIO;
+ }
+ }
+ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+ /* Queue read modified after attention interrupt */
+ rp = view->dev;
+ raw3270_read_modified(rp);
+ }
+ return RAW3270_IO_DONE;
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+ .intv = raw3270_init_irq
+};
+
/*
* Setup new 3270 device.
*/
@@ -774,6 +739,10 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
INIT_LIST_HEAD(&rp->req_queue);
INIT_LIST_HEAD(&rp->view_list);
+ rp->init_view.dev = rp;
+ rp->init_view.fn = &raw3270_init_fn;
+ rp->view = &rp->init_view;
+
/*
* Add device to list and find the smallest unused minor
* number for it. Note: there is no device with minor 0,
@@ -812,6 +781,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
*/
struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
{
+ unsigned long flags;
struct raw3270 *rp;
char *ascebc;
int rc;
@@ -822,16 +792,15 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
- rc = raw3270_reset_device(rp);
- if (rc)
- return ERR_PTR(rc);
- rc = raw3270_size_device(rp);
- if (rc)
- return ERR_PTR(rc);
- rc = raw3270_reset_device(rp);
- if (rc)
- return ERR_PTR(rc);
- set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ do {
+ __raw3270_reset_device(rp);
+ while (!raw3270_state_final(rp)) {
+ wait_cons_dev();
+ barrier();
+ }
+ } while (rp->state != RAW3270_STATE_READY);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rp;
}
@@ -893,13 +862,13 @@ raw3270_activate_view(struct raw3270_view *view)
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view)
rc = 0;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else {
oldview = NULL;
- if (rp->view) {
+ if (rp->view && rp->view->fn->deactivate) {
oldview = rp->view;
oldview->fn->deactivate(oldview);
}
@@ -944,7 +913,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
list_del_init(&view->list);
list_add_tail(&view->list, &rp->view_list);
/* Try to activate another view. */
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ if (raw3270_state_ready(rp) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
@@ -975,18 +944,16 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
- atomic_set(&view->ref_count, 2);
- view->dev = rp;
- view->fn = fn;
- view->model = rp->model;
- view->rows = rp->rows;
- view->cols = rp->cols;
- view->ascebc = rp->ascebc;
- spin_lock_init(&view->lock);
- list_add(&view->list, &rp->view_list);
- rc = 0;
- }
+ atomic_set(&view->ref_count, 2);
+ view->dev = rp;
+ view->fn = fn;
+ view->model = rp->model;
+ view->rows = rp->rows;
+ view->cols = rp->cols;
+ view->ascebc = rp->ascebc;
+ spin_lock_init(&view->lock);
+ list_add(&view->list, &rp->view_list);
+ rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
@@ -1010,14 +977,11 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
- view = ERR_PTR(-ENOENT);
- list_for_each_entry(tmp, &rp->view_list, list) {
- if (tmp->fn == fn) {
- raw3270_get_view(tmp);
- view = tmp;
- break;
- }
+ list_for_each_entry(tmp, &rp->view_list, list) {
+ if (tmp->fn == fn) {
+ raw3270_get_view(tmp);
+ view = tmp;
+ break;
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -1044,7 +1008,7 @@ raw3270_del_view(struct raw3270_view *view)
rp->view = NULL;
}
list_del_init(&view->list);
- if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ if (!rp->view && raw3270_state_ready(rp) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
/* Try to activate another view. */
list_for_each_entry(nv, &rp->view_list, list) {
@@ -1072,10 +1036,6 @@ raw3270_delete_device(struct raw3270 *rp)
/* Remove from device chain. */
mutex_lock(&raw3270_mutex);
- if (rp->clttydev && !IS_ERR(rp->clttydev))
- device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
- if (rp->cltubdev && !IS_ERR(rp->cltubdev))
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
list_del_init(&rp->list);
mutex_unlock(&raw3270_mutex);
@@ -1139,75 +1099,34 @@ static struct attribute_group raw3270_attr_group = {
static int raw3270_create_attributes(struct raw3270 *rp)
{
- int rc;
-
- rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
- if (rc)
- goto out;
-
- rp->clttydev = device_create(class3270, &rp->cdev->dev,
- MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
- "tty%s", dev_name(&rp->cdev->dev));
- if (IS_ERR(rp->clttydev)) {
- rc = PTR_ERR(rp->clttydev);
- goto out_ttydev;
- }
-
- rp->cltubdev = device_create(class3270, &rp->cdev->dev,
- MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
- "tub%s", dev_name(&rp->cdev->dev));
- if (!IS_ERR(rp->cltubdev))
- goto out;
-
- rc = PTR_ERR(rp->cltubdev);
- device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
-
-out_ttydev:
- sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
-out:
- return rc;
+ return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
}
/*
* Notifier for device addition/removal
*/
-struct raw3270_notifier {
- struct list_head list;
- void (*notifier)(int, int);
-};
-
static LIST_HEAD(raw3270_notifier);
-int raw3270_register_notifier(void (*notifier)(int, int))
+int raw3270_register_notifier(struct raw3270_notifier *notifier)
{
- struct raw3270_notifier *np;
struct raw3270 *rp;
- np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
- if (!np)
- return -ENOMEM;
- np->notifier = notifier;
mutex_lock(&raw3270_mutex);
- list_add_tail(&np->list, &raw3270_notifier);
- list_for_each_entry(rp, &raw3270_devices, list) {
- get_device(&rp->cdev->dev);
- notifier(rp->minor, 1);
- }
+ list_add_tail(&notifier->list, &raw3270_notifier);
+ list_for_each_entry(rp, &raw3270_devices, list)
+ notifier->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
}
-void raw3270_unregister_notifier(void (*notifier)(int, int))
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
{
- struct raw3270_notifier *np;
+ struct raw3270 *rp;
mutex_lock(&raw3270_mutex);
- list_for_each_entry(np, &raw3270_notifier, list)
- if (np->notifier == notifier) {
- list_del(&np->list);
- kfree(np);
- break;
- }
+ list_for_each_entry(rp, &raw3270_devices, list)
+ notifier->destroy(rp->minor);
+ list_del(&notifier->list);
mutex_unlock(&raw3270_mutex);
}
@@ -1217,29 +1136,20 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
static int
raw3270_set_online (struct ccw_device *cdev)
{
- struct raw3270 *rp;
struct raw3270_notifier *np;
+ struct raw3270 *rp;
int rc;
rp = raw3270_create_device(cdev);
if (IS_ERR(rp))
return PTR_ERR(rp);
- rc = raw3270_reset_device(rp);
- if (rc)
- goto failure;
- rc = raw3270_size_device(rp);
- if (rc)
- goto failure;
- rc = raw3270_reset_device(rp);
- if (rc)
- goto failure;
rc = raw3270_create_attributes(rp);
if (rc)
goto failure;
- set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ raw3270_reset_device(rp);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
- np->notifier(rp->minor, 1);
+ np->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
@@ -1268,14 +1178,14 @@ raw3270_remove (struct ccw_device *cdev)
*/
if (rp == NULL)
return;
- clear_bit(RAW3270_FLAGS_READY, &rp->flags);
sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
/* Deactivate current view and remove all views. */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (rp->view) {
- rp->view->fn->deactivate(rp->view);
+ if (rp->view->fn->deactivate)
+ rp->view->fn->deactivate(rp->view);
rp->view = NULL;
}
while (!list_empty(&rp->view_list)) {
@@ -1290,7 +1200,7 @@ raw3270_remove (struct ccw_device *cdev)
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
- np->notifier(rp->minor, 0);
+ np->destroy(rp->minor);
mutex_unlock(&raw3270_mutex);
/* Reset 3270 device. */
@@ -1324,7 +1234,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (rp->view)
+ if (rp->view && rp->view->fn->deactivate)
rp->view->fn->deactivate(rp->view);
if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
/*
@@ -1351,7 +1261,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
- if (rp->view)
+ if (rp->view && rp->view->fn->activate)
rp->view->fn->activate(rp->view);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return 0;
@@ -1396,7 +1306,7 @@ static struct ccw_driver raw3270_ccw_driver = {
.freeze = &raw3270_pm_stop,
.thaw = &raw3270_pm_start,
.restore = &raw3270_pm_start,
- .int_class = IOINT_C70,
+ .int_class = IRQIO_C70,
};
static int
@@ -1434,6 +1344,7 @@ MODULE_LICENSE("GPL");
module_init(raw3270_init);
module_exit(raw3270_exit);
+EXPORT_SYMBOL(class3270);
EXPORT_SYMBOL(raw3270_request_alloc);
EXPORT_SYMBOL(raw3270_request_free);
EXPORT_SYMBOL(raw3270_request_reset);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index ed34eb2199cc..7b73ff8c1bd7 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -91,6 +91,7 @@ struct raw3270_iocb {
struct raw3270;
struct raw3270_view;
+extern struct class *class3270;
/* 3270 CCW request */
struct raw3270_request {
@@ -140,6 +141,7 @@ struct raw3270_fn {
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
+ void (*resize)(struct raw3270_view *, int, int, int);
};
/*
@@ -192,8 +194,14 @@ struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
-int raw3270_register_notifier(void (*notifier)(int, int));
-void raw3270_unregister_notifier(void (*notifier)(int, int));
+struct raw3270_notifier {
+ struct list_head list;
+ void (*create)(int minor);
+ void (*destroy)(int minor);
+};
+
+int raw3270_register_notifier(struct raw3270_notifier *);
+void raw3270_unregister_notifier(struct raw3270_notifier *);
void raw3270_pm_unfreeze(struct raw3270_view *);
/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4fa21f7e2308..bd6871bf545a 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -400,7 +400,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
u32 finished_sccb;
u32 evbuf_pending;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+ inc_irq_stat(IRQEXT_SCP);
spin_lock(&sclp_lock);
finished_sccb = param32 & 0xfffffff8;
evbuf_pending = param32 & 0x3;
@@ -450,7 +450,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = get_clock() +
+ timeout = get_tod_clock() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -472,7 +472,7 @@ sclp_sync_wait(void)
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- get_clock() > timeout &&
+ get_tod_clock() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
cpu_relax();
@@ -813,7 +813,7 @@ static void sclp_check_handler(struct ext_code ext_code,
{
u32 finished_sccb;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+ inc_irq_stat(IRQEXT_SCP);
finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index c44d13f607bc..30a2255389e5 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -56,7 +56,6 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
-static u8 sclp_fac85;
static unsigned long long rzm;
static unsigned long long rnmax;
@@ -131,7 +130,8 @@ void __init sclp_facilities_detect(void)
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
- sclp_fac85 = sccb->fac85;
+ if (sccb->fac85 & 0x02)
+ S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
@@ -171,12 +171,6 @@ unsigned long long sclp_get_rzm(void)
return rzm;
}
-u8 sclp_get_fac85(void)
-{
- return sclp_fac85;
-}
-EXPORT_SYMBOL_GPL(sclp_get_fac85);
-
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 877fbc37c1e7..14b4cb8abcc8 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -65,7 +65,7 @@ sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
- tty->low_latency = 0;
+ sclp_port.low_latency = 0;
return 0;
}
@@ -342,8 +342,8 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
- tty_insert_flip_char(tty, cchar, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
+ tty_flip_buffer_push(&sclp_port);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
@@ -351,11 +351,11 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
- tty_insert_flip_string(tty, buf, count);
- tty_insert_flip_char(tty, '\n', TTY_NORMAL);
+ tty_insert_flip_string(&sclp_port, buf, count);
+ tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
} else
- tty_insert_flip_string(tty, buf, count - 2);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&sclp_port, buf, count - 2);
+ tty_flip_buffer_push(&sclp_port);
break;
}
tty_kref_put(tty);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index effcc8756e0a..6c92f62623be 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -461,14 +461,9 @@ sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
- struct tty_struct *tty = tty_port_tty_get(&sclp_vt220_port);
char *buffer;
unsigned int count;
- /* Ignore input if device is not open */
- if (tty == NULL)
- return;
-
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
@@ -480,11 +475,10 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
/* Send input to line discipline */
buffer++;
count--;
- tty_insert_flip_string(tty, buffer, count);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+ tty_flip_buffer_push(&sclp_vt220_port);
break;
}
- tty_kref_put(tty);
}
/*
@@ -495,7 +489,7 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
- tty->low_latency = 0;
+ sclp_vt220_port.low_latency = 0;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 6ae929c024ae..9aa79702b370 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1193,7 +1193,7 @@ static struct ccw_driver tape_34xx_driver = {
.set_online = tape_34xx_online,
.set_offline = tape_generic_offline,
.freeze = tape_generic_pm_suspend,
- .int_class = IOINT_TAP,
+ .int_class = IRQIO_TAP,
};
static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 1b0eb49f739c..327cb19ad0b0 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1656,7 +1656,7 @@ static struct ccw_driver tape_3590_driver = {
.set_offline = tape_generic_offline,
.set_online = tape_3590_online,
.freeze = tape_generic_pm_suspend,
- .int_class = IOINT_TAP,
+ .int_class = IRQIO_TAP,
};
/*
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 43ea0593bdb0..b907dba24025 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
@@ -80,6 +81,8 @@ struct tty3270 {
unsigned int highlight; /* Blink/reverse/underscore */
unsigned int f_color; /* Foreground color */
struct tty3270_line *screen;
+ unsigned int n_model, n_cols, n_rows; /* New model & size */
+ struct work_struct resize_work;
/* Input stuff. */
struct string *prompt; /* Output string for input area. */
@@ -115,6 +118,7 @@ struct tty3270 {
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
static void tty3270_update(struct tty3270 *);
+static void tty3270_resize_work(struct work_struct *work);
/*
* Setup timeout for a device. On timeout trigger an update.
@@ -683,12 +687,6 @@ tty3270_alloc_view(void)
INIT_LIST_HEAD(&tp->update);
INIT_LIST_HEAD(&tp->rcl_lines);
tp->rcl_max = 20;
- tty_port_init(&tp->port);
- setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
- (unsigned long) tp);
- tasklet_init(&tp->readlet,
- (void (*)(unsigned long)) tty3270_read_tasklet,
- (unsigned long) tp->read);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
tp->freemem_pages[pages] = (void *)
@@ -710,6 +708,15 @@ tty3270_alloc_view(void)
tp->kbd = kbd_alloc();
if (!tp->kbd)
goto out_reset;
+
+ tty_port_init(&tp->port);
+ setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+ (unsigned long) tp);
+ tasklet_init(&tp->readlet,
+ (void (*)(unsigned long)) tty3270_read_tasklet,
+ (unsigned long) tp->read);
+ INIT_WORK(&tp->resize_work, tty3270_resize_work);
+
return tp;
out_reset:
@@ -752,42 +759,96 @@ tty3270_free_view(struct tty3270 *tp)
/*
* Allocate tty3270 screen.
*/
-static int
-tty3270_alloc_screen(struct tty3270 *tp)
+static struct tty3270_line *
+tty3270_alloc_screen(unsigned int rows, unsigned int cols)
{
+ struct tty3270_line *screen;
unsigned long size;
int lines;
- size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
- tp->screen = kzalloc(size, GFP_KERNEL);
- if (!tp->screen)
+ size = sizeof(struct tty3270_line) * (rows - 2);
+ screen = kzalloc(size, GFP_KERNEL);
+ if (!screen)
goto out_err;
- for (lines = 0; lines < tp->view.rows - 2; lines++) {
- size = sizeof(struct tty3270_cell) * tp->view.cols;
- tp->screen[lines].cells = kzalloc(size, GFP_KERNEL);
- if (!tp->screen[lines].cells)
+ for (lines = 0; lines < rows - 2; lines++) {
+ size = sizeof(struct tty3270_cell) * cols;
+ screen[lines].cells = kzalloc(size, GFP_KERNEL);
+ if (!screen[lines].cells)
goto out_screen;
}
- return 0;
+ return screen;
out_screen:
while (lines--)
- kfree(tp->screen[lines].cells);
- kfree(tp->screen);
+ kfree(screen[lines].cells);
+ kfree(screen);
out_err:
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 screen.
*/
static void
-tty3270_free_screen(struct tty3270 *tp)
+tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
{
int lines;
- for (lines = 0; lines < tp->view.rows - 2; lines++)
- kfree(tp->screen[lines].cells);
- kfree(tp->screen);
+ for (lines = 0; lines < rows - 2; lines++)
+ kfree(screen[lines].cells);
+ kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize_work(struct work_struct *work)
+{
+ struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
+ struct tty3270_line *screen, *oscreen;
+ struct tty_struct *tty;
+ unsigned int orows;
+ struct winsize ws;
+
+ screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
+ if (!screen)
+ return;
+ /* Switch to new output size */
+ spin_lock_bh(&tp->view.lock);
+ oscreen = tp->screen;
+ orows = tp->view.rows;
+ tp->view.model = tp->n_model;
+ tp->view.rows = tp->n_rows;
+ tp->view.cols = tp->n_cols;
+ tp->screen = screen;
+ free_string(&tp->freemem, tp->prompt);
+ free_string(&tp->freemem, tp->status);
+ tty3270_create_prompt(tp);
+ tty3270_create_status(tp);
+ tp->nr_up = 0;
+ while (tp->nr_lines < tp->view.rows - 2)
+ tty3270_blank_line(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ spin_unlock_bh(&tp->view.lock);
+ tty3270_free_screen(oscreen, orows);
+ tty3270_set_timer(tp, 1);
+ /* Informat tty layer about new size */
+ tty = tty_port_tty_get(&tp->port);
+ if (!tty)
+ return;
+ ws.ws_row = tp->view.rows - 2;
+ ws.ws_col = tp->view.cols;
+ tty_do_resize(tty, &ws);
+}
+
+static void
+tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+ tp->n_model = model;
+ tp->n_rows = rows;
+ tp->n_cols = cols;
+ schedule_work(&tp->resize_work);
}
/*
@@ -815,7 +876,8 @@ static void
tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
- tty3270_free_screen(tp);
+
+ tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
@@ -827,9 +889,8 @@ tty3270_del_views(void)
{
int i;
- for (i = 0; i < tty3270_max_index; i++) {
- struct raw3270_view *view =
- raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
+ for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+ struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
if (!IS_ERR(view))
raw3270_del_view(view);
}
@@ -840,7 +901,8 @@ static struct raw3270_fn tty3270_fn = {
.deactivate = tty3270_deactivate,
.intv = (void *) tty3270_irq,
.release = tty3270_release,
- .free = tty3270_free
+ .free = tty3270_free,
+ .resize = tty3270_resize
};
/*
@@ -853,47 +915,43 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
int i, rc;
/* Check if the tty3270 is already there. */
- view = raw3270_find_view(&tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ view = raw3270_find_view(&tty3270_fn, tty->index);
if (!IS_ERR(view)) {
tp = container_of(view, struct tty3270, view);
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
- tty->low_latency = 0;
+ tp->port.low_latency = 0;
/* why to reassign? */
tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
}
- if (tty3270_max_index < tty->index + 1)
- tty3270_max_index = tty->index + 1;
-
- /* Quick exit if there is no device for tty->index. */
- if (PTR_ERR(view) == -ENODEV)
- return -ENODEV;
+ if (tty3270_max_index < tty->index)
+ tty3270_max_index = tty->index;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
- rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
if (rc) {
tty3270_free_view(tp);
return rc;
}
- rc = tty3270_alloc_screen(tp);
- if (rc) {
+ tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+ if (IS_ERR(tp->screen)) {
+ rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
raw3270_del_view(&tp->view);
+ tty3270_free_view(tp);
return rc;
}
tty_port_tty_set(&tp->port, tty);
- tty->low_latency = 0;
+ tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
@@ -926,6 +984,20 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
}
/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+ struct tty3270 *tp = tty->driver_data;
+ struct tty_port *port = &tp->port;
+
+ port->count++;
+ tty_port_tty_set(port, tty);
+ return 0;
+}
+
+/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
@@ -1753,6 +1825,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty,
static const struct tty_operations tty3270_ops = {
.install = tty3270_install,
.cleanup = tty3270_cleanup,
+ .open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
.put_char = tty3270_put_char,
@@ -1771,6 +1844,22 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
+void tty3270_create_cb(int minor)
+{
+ tty_register_device(tty3270_driver, minor, NULL);
+}
+
+void tty3270_destroy_cb(int minor)
+{
+ tty_unregister_device(tty3270_driver, minor);
+}
+
+struct raw3270_notifier tty3270_notifier =
+{
+ .create = tty3270_create_cb,
+ .destroy = tty3270_destroy_cb,
+};
+
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1780,23 +1869,25 @@ static int __init tty3270_init(void)
struct tty_driver *driver;
int ret;
- driver = alloc_tty_driver(RAW3270_MAXDEVS);
- if (!driver)
- return -ENOMEM;
+ driver = tty_alloc_driver(RAW3270_MAXDEVS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_RESET_TERMIOS);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
/*
* Initialize the tty_driver structure
* Entries in tty3270_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
- driver->driver_name = "ttyTUB";
- driver->name = "ttyTUB";
+ driver->driver_name = "tty3270";
+ driver->name = "3270/tty";
driver->major = IBM_TTY3270_MAJOR;
- driver->minor_start = RAW3270_FIRSTMINOR;
+ driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
- driver->flags = TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
@@ -1804,6 +1895,7 @@ static int __init tty3270_init(void)
return ret;
}
tty3270_driver = driver;
+ raw3270_register_notifier(&tty3270_notifier);
return 0;
}
@@ -1812,6 +1904,7 @@ tty3270_exit(void)
{
struct tty_driver *driver;
+ raw3270_unregister_notifier(&tty3270_notifier);
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 73bef0bd394c..483f72ba030d 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -74,7 +74,7 @@ static struct ccw_driver ur_driver = {
.set_online = ur_set_online,
.set_offline = ur_set_offline,
.freeze = ur_pm_suspend,
- .int_class = IOINT_VMR,
+ .int_class = IRQIO_VMR,
};
static DEFINE_MUTEX(vmur_mutex);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index e3b9308b0fe3..1d61a01576d2 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -62,6 +62,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
+static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *ipl_block;
/*
@@ -77,6 +78,8 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+ if (!hsa_available)
+ return -ENODATA;
if (count == 0)
return 0;
@@ -278,6 +281,15 @@ next:
}
/*
+ * Release the HSA
+ */
+static void release_hsa(void)
+{
+ diag308(DIAG308_REL_HSA, NULL);
+ hsa_available = 0;
+}
+
+/*
* Read routine for zcore character device
* First 4K are dump header
* Next 32MB are HSA Memory
@@ -363,8 +375,8 @@ static int zcore_open(struct inode *inode, struct file *filp)
static int zcore_release(struct inode *inode, struct file *filep)
{
- diag308(DIAG308_REL_HSA, NULL);
- hsa_available = 0;
+ if (hsa_available)
+ release_hsa();
return 0;
}
@@ -474,6 +486,41 @@ static const struct file_operations zcore_reipl_fops = {
.llseek = no_llseek,
};
+static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ static char str[18];
+
+ if (hsa_available)
+ snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE);
+ else
+ snprintf(str, sizeof(str), "0\n");
+ return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
+}
+
+static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char value;
+
+ if (*ppos != 0)
+ return -EPIPE;
+ if (copy_from_user(&value, buf, 1))
+ return -EFAULT;
+ if (value != '0')
+ return -EINVAL;
+ release_hsa();
+ return count;
+}
+
+static const struct file_operations zcore_hsa_fops = {
+ .owner = THIS_MODULE,
+ .write = zcore_hsa_write,
+ .read = zcore_hsa_read,
+ .open = nonseekable_open,
+ .llseek = no_llseek,
+};
+
#ifdef CONFIG_32BIT
static void __init set_lc_mask(struct save_area *map)
@@ -590,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
hdr->rmem_size = memory;
hdr->mem_end = sys_info.mem_size;
hdr->num_pages = memory / PAGE_SIZE;
- hdr->tod = get_clock();
+ hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; zfcpdump_save_areas[i]; i++) {
prefix = zfcpdump_save_areas[i]->pref_reg;
@@ -658,6 +705,7 @@ static int __init zcore_init(void)
rc = check_sdias();
if (rc)
goto fail;
+ hsa_available = 1;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
@@ -714,9 +762,16 @@ static int __init zcore_init(void)
rc = -ENOMEM;
goto fail_memmap_file;
}
- hsa_available = 1;
+ zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
+ NULL, &zcore_hsa_fops);
+ if (!zcore_hsa_file) {
+ rc = -ENOMEM;
+ goto fail_reipl_file;
+ }
return 0;
+fail_reipl_file:
+ debugfs_remove(zcore_reipl_file);
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_file:
@@ -733,6 +788,7 @@ static void __exit zcore_exit(void)
debug_unregister(zcore_dbf);
sclp_sdias_exit();
free_page((unsigned long) ipl_block);
+ debugfs_remove(zcore_hsa_file);
debugfs_remove(zcore_reipl_file);
debugfs_remove(zcore_memmap_file);
debugfs_remove(zcore_file);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 68e80e2734a4..31ceef1beb8b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -283,7 +283,7 @@ struct chsc_sei_nt2_area {
u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
} __packed;
-#define CHSC_SEI_NT0 0ULL
+#define CHSC_SEI_NT0 (1ULL << 63)
#define CHSC_SEI_NT2 (1ULL << 61)
struct chsc_sei {
@@ -291,7 +291,8 @@ struct chsc_sei {
u32 reserved1;
u64 ntsm; /* notification type mask */
struct chsc_header response;
- u32 reserved2;
+ u32 :24;
+ u8 nt;
union {
struct chsc_sei_nt0_area nt0_area;
struct chsc_sei_nt2_area nt2_area;
@@ -434,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
-#ifdef CONFIG_PCI
switch (sei_area->cc) {
case 1:
zpci_event_error(sei_area->ccdf);
@@ -443,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
zpci_event_availability(sei_area->ccdf);
break;
default:
- CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
sei_area->cc);
break;
}
-#endif
}
static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
@@ -470,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
chsc_process_sei_scm_change(sei_area);
break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
}
-static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
do {
memset(sei, 0, sizeof(*sei));
@@ -487,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
if (chsc(sei))
break;
- if (sei->response.code == 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei successful\n");
-
- /* Check if we might have lost some information. */
- if (sei->u.nt0_area.flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
- }
-
- switch (sei->ntsm) {
- case CHSC_SEI_NT0:
- chsc_process_sei_nt0(&sei->u.nt0_area);
- return 1;
- case CHSC_SEI_NT2:
- chsc_process_sei_nt2(&sei->u.nt2_area);
- return 1;
- default:
- CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
- sei->ntsm);
- return 0;
- }
- } else {
+ if (sei->response.code != 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei->response.code);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
- return 0;
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+ } while (sei->u.nt0_area.flags & 0x80);
}
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei *sei;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -530,22 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
-
- /*
- * The ntsm does not allow to select NT0 and NT2 together. We need to
- * first check for NT2, than additionally for NT0...
- */
-#ifdef CONFIG_PCI
- if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
-#endif
- __chsc_process_crw(sei, CHSC_SEI_NT0);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4b93e6..227e05f674b3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
#else /* CONFIG_SCM_BUS */
-#define scm_update_information() 0
+static inline int scm_update_information(void) { return 0; }
#endif /* CONFIG_SCM_BUS */
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 8f9a1a384496..facdf809113f 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
- kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+ inc_irq_stat(IRQIO_CSC);
/* Copy irb to provided request and set done. */
if (!request) {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8e927b9f285f..986ef6a92a41 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -611,7 +611,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
irb = (struct irb *)&S390_lowcore.irb;
do {
- kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+ kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
continue;
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch) {
/* Clear pending interrupt condition. */
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
tsch(tpi_info->schid, irb);
continue;
}
@@ -633,9 +633,9 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
} else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
spin_unlock(sch->lock);
/*
* Are more interrupts pending?
@@ -678,7 +678,7 @@ static void cio_tsch(struct subchannel *sch)
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
if (!irq_context) {
irq_exit();
_local_bh_enable();
@@ -962,9 +962,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_clock() > timeout)
+ if (get_tod_clock() > timeout)
break;
cpu_relax();
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c0a866..4495e0627a40 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -33,7 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/timex.h> /* get_clock() */
+#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
- cmb_data->last_update = get_clock();
+ cmb_data->last_update = get_tod_clock();
kfree(reference_buf);
return 0;
}
@@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
- cdev->private->cmb_start_time = get_clock();
+ cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd8b850..a239237d43f3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -780,7 +780,7 @@ static int __init setup_css(int nr)
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_clock() >> 32);
+ tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6995cff44636..c6767f5a58b2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
&dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
NULL,
};
@@ -758,7 +768,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
cdev->private->cdev = cdev;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
@@ -1023,7 +1033,7 @@ static void io_subchannel_irq(struct subchannel *sch)
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
}
void io_subchannel_init_config(struct subchannel *sch)
@@ -1634,7 +1644,7 @@ ccw_device_probe_console(void)
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
console_private.cdev = &console_cdev;
- console_private.int_class = IOINT_CIO;
+ console_private.int_class = IRQIO_CIO;
ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) {
cio_release_console();
@@ -1715,13 +1725,13 @@ ccw_device_probe (struct device *dev)
if (cdrv->int_class != 0)
cdev->private->int_class = cdrv->int_class;
else
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
if (ret) {
cdev->drv = NULL;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
return ret;
}
@@ -1755,7 +1765,7 @@ ccw_device_remove (struct device *dev)
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
return 0;
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 2e575cff9845..7d4ecb65db00 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -61,11 +61,10 @@ dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
if (dev_event == DEV_EVENT_INTERRUPT) {
if (state == DEV_STATE_ONLINE)
- kstat_cpu(smp_processor_id()).
- irqs[cdev->private->int_class]++;
+ inc_irq_stat(cdev->private->int_class);
else if (state != DEV_STATE_CMFCHANGE &&
state != DEV_STATE_CMFUPDATE)
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
}
dev_jumptable[state][dev_event](cdev, dev_event);
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00095af..c7638c543250 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
- "device information:\n", get_clock());
+ "device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c1..37ada05e82a5 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
#define PGID_RETRIES 256
#define PGID_TIMEOUT (10 * HZ)
+static void verify_start(struct ccw_device *cdev);
+
/*
* Process path verification data and report result.
*/
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- /* Adjust lpm. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
sch->vpm |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev);
return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
req->cp = cp;
}
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
/*
* Perform establish/resign SET PGID on a single path.
*/
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
return;
out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
-static void verify_start(struct ccw_device *cdev);
-
/*
* Process SET PGID request result for a single path.
*/
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
case 0:
sch->vpm |= req->lpm & sch->opm;
break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
case -EOPNOTSUPP:
if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc)
else {
donepm = pgid_to_donepm(cdev);
sch->vpm = donepm & sch->opm;
- cdev->private->pgid_todo_mask &= ~donepm;
cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
pgid_fill(cdev, pgid);
}
out:
@@ -341,6 +404,10 @@ out:
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
/* Anything left to do? */
if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
+ int ret;
- /* Adjust lpm if paths are not set in pam. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
snid_build_cp(cdev);
@@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
return;
out_nopath:
- snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
}
/*
@@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
cdev->private->pgid_valid_mask |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev);
return;
@@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev)
sch->vpm = 0;
sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
@@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev)
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
-
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
- /* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
- cdev->private->pgid_valid_mask = 0;
- cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
@@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 6c9673400464..d9eddcba7e88 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -139,7 +139,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
EADM_LOG(6, "irq");
EADM_LOG_HEX(6, irb, sizeof(*irb));
- kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
+ inc_irq_stat(IRQIO_ADM);
if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
&& scsw->eswf == 1 && irb->esw.eadm.erw.r)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253dfcc1be..b108f4a5c7dd 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -126,6 +126,10 @@ struct ccw_device_private {
u8 pgid_valid_mask; /* mask of valid PGIDs */
u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */
struct {
@@ -145,6 +149,7 @@ struct ccw_device_private {
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d3461f29..abc550e5dd35 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
retries++;
if (!start_time) {
- start_time = get_clock();
+ start_time = get_tod_clock();
goto again;
}
- if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if (bufnr != q->last_move) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_clock();
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index bdb394b066fc..bde5255200dc 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -182,7 +182,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
struct qdio_q *q;
last_ai_time = S390_lowcore.int_clock;
- kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
+ inc_irq_stat(IRQIO_QAI);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7b865a7300e6..b8b340ac5332 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1272,7 +1272,7 @@ out:
static void ap_interrupt_handler(void *unused1, void *unused2)
{
- kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
+ inc_irq_stat(IRQIO_APB);
tasklet_schedule(&ap_tasklet);
}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index b846b6c4130a..03a15e016778 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -392,7 +392,7 @@ static void kvm_extint_handler(struct ext_code ext_code,
if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
+ inc_irq_stat(IRQEXT_VRT);
/* The LSB might be overloaded, we have to mask it */
vq = (struct virtqueue *)(param64 & ~1UL);
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 3217dfe5cb8b..2029b6caa595 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -818,7 +818,7 @@ static struct ccw_driver virtio_ccw_driver = {
.set_offline = virtio_ccw_offline,
.set_online = virtio_ccw_online,
.notify = virtio_ccw_cio_notify,
- .int_class = IOINT_VIR,
+ .int_class = IRQIO_VIR,
};
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index dfda748c4000..8b3f55991805 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -74,8 +74,8 @@ config QETH
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
help
This driver supports the IBM System z OSA Express adapters
- in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
- interfaces in QDIO and HIPER mode.
+ in QDIO mode (all media types), HiperSockets interfaces and z/VM
+ virtual NICs for Guest LAN and VSWITCH.
For details please refer to the documentation provided by IBM at
<http://www.ibm.com/developerworks/linux/linux390>
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 5c70a6599578..83bc9c5fa0c1 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -282,7 +282,7 @@ static struct ccw_driver claw_ccw_driver = {
.ids = claw_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_CLW,
+ .int_class = IRQIO_CLW,
};
static ssize_t claw_driver_group_store(struct device_driver *ddrv,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 817b68925ddd..676f12049a36 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1755,7 +1755,7 @@ static struct ccw_driver ctcm_ccw_driver = {
.ids = ctcm_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_CTC,
+ .int_class = IRQIO_CTC,
};
static struct ccwgroup_driver ctcm_group_driver = {
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2ca0f1dd7a00..c645dc9e98af 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2384,7 +2384,7 @@ static struct ccw_driver lcs_ccw_driver = {
.ids = lcs_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_LCS,
+ .int_class = IRQIO_LCS,
};
/**
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 480fbeab0256..d87961d4c0de 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -678,6 +678,7 @@ struct qeth_card_options {
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
+ enum qeth_ipa_isolation_modes prev_isolation;
int sniffer;
enum qeth_cq cq;
char hsuid[9];
@@ -789,6 +790,7 @@ struct qeth_card {
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
int reclaim_index;
+ struct work_struct close_dev_work;
};
struct qeth_card_list_struct {
@@ -816,7 +818,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
static inline int qeth_get_micros(void)
{
- return (int) (get_clock() >> 12);
+ return (int) (get_tod_clock() >> 12);
}
static inline int qeth_get_ip_version(struct sk_buff *skb)
@@ -909,9 +911,6 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
-struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
-int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
- unsigned long);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
@@ -928,12 +927,13 @@ void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
-int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
+void qeth_close_dev(struct qeth_card *);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 638a57f4d8a1..0d8cdff81813 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -68,6 +68,27 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+static struct workqueue_struct *qeth_wq;
+
+static void qeth_close_dev_handler(struct work_struct *work)
+{
+ struct qeth_card *card;
+
+ card = container_of(work, struct qeth_card, close_dev_work);
+ QETH_CARD_TEXT(card, 2, "cldevhdl");
+ rtnl_lock();
+ dev_close(card->dev);
+ rtnl_unlock();
+ ccwgroup_set_offline(card->gdev);
+}
+
+void qeth_close_dev(struct qeth_card *card)
+{
+ QETH_CARD_TEXT(card, 2, "cldevsubm");
+ queue_work(qeth_wq, &card->close_dev_work);
+}
+EXPORT_SYMBOL_GPL(qeth_close_dev);
+
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -542,11 +563,23 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
} else {
switch (cmd->hdr.command) {
case IPA_CMD_STOPLAN:
- dev_warn(&card->gdev->dev,
+ if (cmd->hdr.return_code ==
+ IPA_RC_VEPA_TO_VEB_TRANSITION) {
+ dev_err(&card->gdev->dev,
+ "Interface %s is down because the "
+ "adjacent port is no longer in "
+ "reflective relay mode\n",
+ QETH_CARD_IFNAME(card));
+ qeth_close_dev(card);
+ } else {
+ dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID"
" 0x%X failed\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
+ qeth_issue_ipa_msg(cmd,
+ cmd->hdr.return_code, card);
+ }
card->lan_online = 0;
if (card->dev && netif_carrier_ok(card->dev))
netif_carrier_off(card->dev);
@@ -1416,6 +1449,7 @@ static int qeth_setup_card(struct qeth_card *card)
/* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
+ INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
return 0;
}
@@ -2868,7 +2902,7 @@ int qeth_send_startlan(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_send_startlan);
-int qeth_default_setadapterparms_cb(struct qeth_card *card,
+static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
@@ -2881,7 +2915,6 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card,
cmd->data.setadapterparms.hdr.return_code;
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -2901,7 +2934,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
}
-struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
+static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
__u32 command, __u32 cmdlen)
{
struct qeth_cmd_buffer *iob;
@@ -2917,7 +2950,6 @@ struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
int qeth_query_setadapterparms(struct qeth_card *card)
{
@@ -4059,6 +4091,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
+ int fallback = *(int *)reply->param;
QETH_CARD_TEXT(card, 4, "setaccb");
@@ -4068,12 +4101,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
cmd->data.setadapterparms.hdr.return_code);
+ if (cmd->data.setadapterparms.hdr.return_code !=
+ SET_ACCESS_CTRL_RC_SUCCESS)
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
+ card->gdev->dev.kobj.name,
+ access_ctrl_req->subcmd_code,
+ cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
- case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
- case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
- {
- card->options.isolation = access_ctrl_req->subcmd_code;
if (card->options.isolation == ISOLATION_MODE_NONE) {
dev_info(&card->gdev->dev,
"QDIO data connection isolation is deactivated\n");
@@ -4081,72 +4116,64 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
dev_info(&card->gdev->dev,
"QDIO data connection isolation is activated\n");
}
- QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
break;
- }
+ case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
+ QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
+ "deactivated\n", dev_name(&card->gdev->dev));
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
+ break;
+ case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
+ QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
+ " activated\n", dev_name(&card->gdev->dev));
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
+ break;
case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
- {
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
-
- /* ensure isolation mode is "none" */
- card->options.isolation = ISOLATION_MODE_NONE;
break;
- }
case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
- {
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev,
"Adapter is dedicated. "
"QDIO data connection isolation not supported\n");
-
- /* ensure isolation mode is "none" */
- card->options.isolation = ISOLATION_MODE_NONE;
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
break;
- }
case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
- {
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev,
"TSO does not permit QDIO data connection isolation\n");
-
- /* ensure isolation mode is "none" */
- card->options.isolation = ISOLATION_MODE_NONE;
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
+ break;
+ case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
+ dev_err(&card->gdev->dev, "The adjacent switch port does not "
+ "support reflective relay mode\n");
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
+ break;
+ case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
+ dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
+ "enabled at the adjacent switch port");
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
+ break;
+ case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
+ dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
+ "at the adjacent switch failed\n");
break;
- }
default:
- {
/* this should never happen */
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
- "==UNKNOWN\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
-
- /* ensure isolation mode is "none" */
- card->options.isolation = ISOLATION_MODE_NONE;
+ if (fallback)
+ card->options.isolation = card->options.prev_isolation;
break;
}
- }
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
- enum qeth_ipa_isolation_modes isolation)
+ enum qeth_ipa_isolation_modes isolation, int fallback)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -4166,12 +4193,12 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
access_ctrl_req->subcmd_code = isolation;
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
- NULL);
+ &fallback);
QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
return rc;
}
-int qeth_set_access_ctrl_online(struct qeth_card *card)
+int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
{
int rc = 0;
@@ -4181,12 +4208,13 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
card->info.type == QETH_CARD_TYPE_OSX) &&
qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
- card->options.isolation);
+ card->options.isolation, fallback);
if (rc) {
QETH_DBF_MESSAGE(3,
"IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
+ rc = -EOPNOTSUPP;
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
card->options.isolation = ISOLATION_MODE_NONE;
@@ -4672,7 +4700,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
init_data.scan_threshold =
- (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
+ (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -4765,14 +4793,14 @@ static struct ccw_driver qeth_ccw_driver = {
int qeth_core_hardsetup_card(struct qeth_card *card)
{
- int retries = 0;
+ int retries = 3;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
qeth_update_from_chp_desc(card);
retry:
- if (retries)
+ if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
ccw_device_set_offline(CARD_DDEV(card));
@@ -4794,7 +4822,7 @@ retriable:
return rc;
} else if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (++retries > 3)
+ if (--retries < 0)
goto out;
else
goto retry;
@@ -5094,13 +5122,81 @@ static const struct device_type qeth_osn_devtype = {
.groups = qeth_osn_attr_groups,
};
+#define DBF_NAME_LEN 20
+
+struct qeth_dbf_entry {
+ char dbf_name[DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
+
+static LIST_HEAD(qeth_dbf_list);
+static DEFINE_MUTEX(qeth_dbf_list_mutex);
+
+static debug_info_t *qeth_get_dbf_entry(char *name)
+{
+ struct qeth_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qeth_dbf_list_mutex);
+ list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qeth_dbf_list_mutex);
+ return rc;
+}
+
+static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
+{
+ struct qeth_dbf_entry *new_entry;
+
+ card->debug = debug_register(name, 2, 1, 8);
+ if (!card->debug) {
+ QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+ goto err;
+ }
+ if (debug_register_view(card->debug, &debug_hex_ascii_view))
+ goto err_dbg;
+ new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
+ if (!new_entry)
+ goto err_dbg;
+ strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
+ new_entry->dbf_info = card->debug;
+ mutex_lock(&qeth_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qeth_dbf_list);
+ mutex_unlock(&qeth_dbf_list_mutex);
+
+ return 0;
+
+err_dbg:
+ debug_unregister(card->debug);
+err:
+ return -ENOMEM;
+}
+
+static void qeth_clear_dbf_list(void)
+{
+ struct qeth_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qeth_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qeth_dbf_list_mutex);
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
struct device *dev;
int rc;
unsigned long flags;
- char dbf_name[20];
+ char dbf_name[DBF_NAME_LEN];
QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -5119,13 +5215,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
dev_name(&gdev->dev));
- card->debug = debug_register(dbf_name, 2, 1, 8);
+ card->debug = qeth_get_dbf_entry(dbf_name);
if (!card->debug) {
- QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
- rc = -ENOMEM;
- goto err_card;
+ rc = qeth_add_dbf_entry(card, dbf_name);
+ if (rc)
+ goto err_card;
}
- debug_register_view(card->debug, &debug_hex_ascii_view);
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
@@ -5139,12 +5234,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = qeth_determine_card_type(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto err_dbf;
+ goto err_card;
}
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- goto err_dbf;
+ goto err_card;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -5157,7 +5252,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
if (rc)
- goto err_dbf;
+ goto err_card;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
@@ -5176,8 +5271,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
-err_dbf:
- debug_unregister(card->debug);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -5197,7 +5290,6 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
- debug_unregister(card->debug);
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
@@ -5444,17 +5536,14 @@ void qeth_core_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct qeth_card *card = dev->ml_priv;
- if (card->options.layer2)
- strcpy(info->driver, "qeth_l2");
- else
- strcpy(info->driver, "qeth_l3");
- strcpy(info->version, "1.0");
- strcpy(info->fw_version, card->info.mcl_level);
- sprintf(info->bus_info, "%s/%s/%s",
- CARD_RDEV_ID(card),
- CARD_WDEV_ID(card),
- CARD_DDEV_ID(card));
+ strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3",
+ sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->fw_version, card->info.mcl_level,
+ sizeof(info->fw_version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+ CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
@@ -5554,9 +5643,12 @@ static int __init qeth_core_init(void)
pr_info("loading core functions\n");
INIT_LIST_HEAD(&qeth_core_card_list.list);
+ INIT_LIST_HEAD(&qeth_dbf_list);
rwlock_init(&qeth_core_card_list.rwlock);
mutex_init(&qeth_mod_mutex);
+ qeth_wq = create_singlethread_workqueue("qeth_wq");
+
rc = qeth_register_dbf_views();
if (rc)
goto out_err;
@@ -5603,6 +5695,8 @@ out_err:
static void __exit qeth_core_exit(void)
{
+ qeth_clear_dbf_list();
+ destroy_workqueue(qeth_wq);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5cebfddb86bd..06c55780005e 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -204,6 +204,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
+ {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
{IPA_RC_ENOMEM, "Memory problem"},
{IPA_RC_FFFF, "Unknown Error"}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3690bbf2cb3c..07085d55f9a1 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -177,6 +177,7 @@ enum qeth_ipa_return_codes {
IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012,
IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
IPA_RC_LAN_OFFLINE = 0xe080,
+ IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
@@ -269,6 +270,9 @@ enum qeth_ipa_set_access_mode_rc {
SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
+ SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED = 0x0022,
+ SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024,
+ SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028,
};
@@ -386,6 +390,7 @@ struct qeth_snmp_ureq {
/* SET_ACCESS_CONTROL: same format for request and reply */
struct qeth_set_access_ctrl {
__u32 subcmd_code;
+ __u8 reserved[8];
} __attribute__((packed));
struct qeth_query_oat {
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9655dc0ea0ec..425c0ecf1f3b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -513,10 +513,11 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
rc = count;
/* defer IP assist if device is offline (until discipline->set_online)*/
+ card->options.prev_isolation = card->options.isolation;
card->options.isolation = isolation;
if (card->state == CARD_STATE_SOFTSETUP ||
card->state == CARD_STATE_UP) {
- int ipa_rc = qeth_set_access_ctrl_online(card);
+ int ipa_rc = qeth_set_access_ctrl_online(card, 1);
if (ipa_rc != 0)
rc = ipa_rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 73195553f84b..d690166efeaf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1025,9 +1025,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
- (card->info.type == QETH_CARD_TYPE_OSX))
+ (card->info.type == QETH_CARD_TYPE_OSX)) {
/* configure isolation level */
- qeth_set_access_ctrl_online(card);
+ rc = qeth_set_access_ctrl_online(card, 0);
+ if (rc) {
+ rc = -ENODEV;
+ goto out_remove;
+ }
+ }
if (card->info.type != QETH_CARD_TYPE_OSN &&
card->info.type != QETH_CARD_TYPE_OSM)
@@ -1144,12 +1149,9 @@ static int qeth_l2_recover(void *ptr)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- if (rtnl_trylock()) {
- dev_close(card->dev);
- rtnl_unlock();
- dev_warn(&card->gdev->dev, "The qeth device driver "
+ qeth_close_dev(card);
+ dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- }
}
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e5eef01e667..091ca0efa1c5 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1449,7 +1449,8 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
- qeth_set_access_ctrl_online(card); /* go on*/
+ if (qeth_set_access_ctrl_online(card, 0))
+ return -EIO;
qeth_l3_start_ipa_arp_processing(card); /* go on*/
qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -1640,6 +1641,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
}
}
+/* called with rcu_read_lock */
static void qeth_l3_add_vlan_mc(struct qeth_card *card)
{
struct in_device *in_dev;
@@ -1652,19 +1654,14 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
struct net_device *netdev;
- rcu_read_lock();
netdev = __vlan_find_dev_deep(card->dev, vid);
- rcu_read_unlock();
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue;
- in_dev = in_dev_get(netdev);
+ in_dev = __in_dev_get_rcu(netdev);
if (!in_dev)
continue;
- rcu_read_lock();
qeth_l3_add_mc(card, in_dev);
- rcu_read_unlock();
- in_dev_put(in_dev);
}
}
@@ -1673,14 +1670,14 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
struct in_device *in4_dev;
QETH_CARD_TEXT(card, 4, "chkmcv4");
- in4_dev = in_dev_get(card->dev);
- if (in4_dev == NULL)
- return;
rcu_read_lock();
+ in4_dev = __in_dev_get_rcu(card->dev);
+ if (in4_dev == NULL)
+ goto unlock;
qeth_l3_add_mc(card, in4_dev);
qeth_l3_add_vlan_mc(card);
+unlock:
rcu_read_unlock();
- in_dev_put(in4_dev);
}
#ifdef CONFIG_QETH_IPV6
@@ -1705,6 +1702,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
}
}
+/* called with rcu_read_lock */
static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
{
struct inet6_dev *in_dev;
@@ -1741,10 +1739,12 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
in6_dev = in6_dev_get(card->dev);
if (in6_dev == NULL)
return;
+ rcu_read_lock();
read_lock_bh(&in6_dev->lock);
qeth_l3_add_mc6(card, in6_dev);
qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
+ rcu_read_unlock();
in6_dev_put(in6_dev);
}
#endif /* CONFIG_QETH_IPV6 */
@@ -1813,8 +1813,10 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
unsigned short vid)
{
+ rcu_read_lock();
qeth_l3_free_vlan_addresses4(card, vid);
qeth_l3_free_vlan_addresses6(card, vid);
+ rcu_read_unlock();
}
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
@@ -3387,8 +3389,10 @@ contin:
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
- if (rc)
+ if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ goto out_remove;
+ }
rc = qeth_l3_setrouting_v4(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
@@ -3510,12 +3514,9 @@ static int qeth_l3_recover(void *ptr)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- if (rtnl_trylock()) {
- dev_close(card->dev);
- rtnl_unlock();
- dev_warn(&card->gdev->dev, "The qeth device driver "
+ qeth_close_dev(card);
+ dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- }
}
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c96320d79fbc..c7e148f33b2a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
zfcp_reqlist_add(adapter->req_list, req);
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
- req->issued = get_clock();
+ req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
/* lookup request again, list might have changed */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 50b5615848f6..665e3cfaaf85 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
unsigned long long now, span;
int used;
- now = get_clock_monotonic();
+ now = get_tod_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index 73cde85d04d8..5ba684f73ab8 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -21,8 +21,7 @@ config OBP_FLASH
able to upgrade the OBP firmware, say Y here.
config TADPOLE_TS102_UCTRL
- tristate "Tadpole TS102 Microcontroller support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Tadpole TS102 Microcontroller support"
help
Say Y here to directly support the TS102 Microcontroller interface
on the Tadpole Sparcbook 3. This device handles power-management
@@ -30,8 +29,8 @@ config TADPOLE_TS102_UCTRL
monitors and mice.
config SUN_JSFLASH
- tristate "JavaStation OS Flash SIMM (EXPERIMENTAL)"
- depends on EXPERIMENTAL && SPARC32
+ tristate "JavaStation OS Flash SIMM"
+ depends on SPARC32
help
If you say Y here, you will be able to boot from your JavaStation's
Flash memory.
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 542668292900..1a9d1e3ce64c 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -355,7 +355,7 @@ fail:
extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
-static int __devinit bbc_i2c_probe(struct platform_device *op)
+static int bbc_i2c_probe(struct platform_device *op)
{
struct bbc_i2c_bus *bp;
int err, index = 0;
@@ -379,7 +379,7 @@ static int __devinit bbc_i2c_probe(struct platform_device *op)
return err;
}
-static int __devexit bbc_i2c_remove(struct platform_device *op)
+static int bbc_i2c_remove(struct platform_device *op)
{
struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev);
@@ -413,7 +413,7 @@ static struct platform_driver bbc_i2c_driver = {
.of_match_table = bbc_i2c_match,
},
.probe = bbc_i2c_probe,
- .remove = __devexit_p(bbc_i2c_remove),
+ .remove = bbc_i2c_remove,
};
module_platform_driver(bbc_i2c_driver);
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index b160073e54b6..e85c803b30cd 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -171,7 +171,7 @@ static struct miscdevice d7s_miscdev = {
.fops = &d7s_fops
};
-static int __devinit d7s_probe(struct platform_device *op)
+static int d7s_probe(struct platform_device *op)
{
struct device_node *opts;
int err = -EINVAL;
@@ -236,7 +236,7 @@ out_free:
goto out;
}
-static int __devexit d7s_remove(struct platform_device *op)
+static int d7s_remove(struct platform_device *op)
{
struct d7s *p = dev_get_drvdata(&op->dev);
u8 regs = readb(p->regs);
@@ -272,7 +272,7 @@ static struct platform_driver d7s_driver = {
.of_match_table = d7s_match,
},
.probe = d7s_probe,
- .remove = __devexit_p(d7s_remove),
+ .remove = d7s_remove,
};
module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 0bc18569f9c0..ddbe5a9e713d 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1028,7 +1028,7 @@ static int kenvctrld(void *__unused)
return 0;
}
-static int __devinit envctrl_probe(struct platform_device *op)
+static int envctrl_probe(struct platform_device *op)
{
struct device_node *dp;
int index, err;
@@ -1104,7 +1104,7 @@ out_iounmap:
return err;
}
-static int __devexit envctrl_remove(struct platform_device *op)
+static int envctrl_remove(struct platform_device *op)
{
int index;
@@ -1135,7 +1135,7 @@ static struct platform_driver envctrl_driver = {
.of_match_table = envctrl_match,
},
.probe = envctrl_probe,
- .remove = __devexit_p(envctrl_remove),
+ .remove = envctrl_remove,
};
module_platform_driver(envctrl_driver);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 327657e2e264..d9f268f23774 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -159,7 +159,7 @@ static const struct file_operations flash_fops = {
static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
-static int __devinit flash_probe(struct platform_device *op)
+static int flash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct device_node *parent;
@@ -190,7 +190,7 @@ static int __devinit flash_probe(struct platform_device *op)
return misc_register(&flash_dev);
}
-static int __devexit flash_remove(struct platform_device *op)
+static int flash_remove(struct platform_device *op)
{
misc_deregister(&flash_dev);
@@ -212,7 +212,7 @@ static struct platform_driver flash_driver = {
.of_match_table = flash_match,
},
.probe = flash_probe,
- .remove = __devexit_p(flash_remove),
+ .remove = flash_remove,
};
module_platform_driver(flash_driver);
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index a9e468cc1cac..b0aae0536d58 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -347,7 +347,7 @@ static void uctrl_get_external_status(struct uctrl_driver *driver)
}
-static int __devinit uctrl_probe(struct platform_device *op)
+static int uctrl_probe(struct platform_device *op)
{
struct uctrl_driver *p;
int err = -ENOMEM;
@@ -402,7 +402,7 @@ out_free:
goto out;
}
-static int __devexit uctrl_remove(struct platform_device *op)
+static int uctrl_remove(struct platform_device *op)
{
struct uctrl_driver *p = dev_get_drvdata(&op->dev);
@@ -430,7 +430,7 @@ static struct platform_driver uctrl_driver = {
.of_match_table = uctrl_match,
},
.probe = uctrl_probe,
- .remove = __devexit_p(uctrl_remove),
+ .remove = uctrl_remove,
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3868ab2397c6..d1f0120cdb98 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2029,7 +2029,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -2305,7 +2305,7 @@ out_disable_device:
#endif
/* PCI Devices supported by this driver */
-static struct pci_device_id twa_pci_tbl[] __devinitdata = {
+static struct pci_device_id twa_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 13e39e1fdfe2..52a2f0580d97 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1604,7 +1604,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -1893,7 +1893,7 @@ out_disable_device:
#endif
/* PCI Devices supported by this driver */
-static struct pci_device_id twl_pci_tbl[] __devinitdata = {
+static struct pci_device_id twl_pci_tbl[] = {
{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
{ }
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 7fe96ff60c58..62071d2fc1ce 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2281,7 +2281,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -2422,7 +2422,7 @@ static void tw_remove(struct pci_dev *pdev)
} /* End tw_remove() */
/* PCI Devices supported by this driver */
-static struct pci_device_id tw_pci_tbl[] __devinitdata = {
+static struct pci_device_id tw_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index d4da3708763b..d7ca247efa35 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3615,7 +3615,7 @@ static void __exit BusLogic_exit(void)
__setup("BusLogic=", BusLogic_Setup);
#ifdef MODULE
-static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
+static struct pci_device_id BusLogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 74bf1aa7af46..db95c547c09d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -42,7 +42,7 @@ config SCSI_DMA
config SCSI_TGT
tristate "SCSI target support"
- depends on SCSI && EXPERIMENTAL
+ depends on SCSI
---help---
If you want to use SCSI target mode drivers enable this option.
If you choose M, the module will be called scsi_tgt.
@@ -603,6 +603,7 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
@@ -882,7 +883,7 @@ config SCSI_IBMVSCSI
This is the IBM POWER Virtual SCSI Client
To compile this driver as a module, choose M here: the
- module will be called ibmvscsic.
+ module will be called ibmvscsi.
config SCSI_IBMVSCSIS
tristate "IBM Virtual SCSI Server support"
@@ -1391,8 +1392,8 @@ config SCSI_SYM53C416
module will be called sym53c416.
config SCSI_DC395x
- tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support (EXPERIMENTAL)"
- depends on PCI && SCSI && EXPERIMENTAL
+ tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
+ depends on PCI && SCSI
---help---
This driver supports PCI SCSI host adapters based on the ASIC
TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants.
@@ -1617,8 +1618,8 @@ config GVP11_SCSI
module will be called gvp11.
config SCSI_A4000T
- tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
- depends on AMIGA && SCSI && EXPERIMENTAL
+ tristate "A4000T NCR53c710 SCSI support"
+ depends on AMIGA && SCSI
select SCSI_SPI_ATTRS
help
If you have an Amiga 4000T and have SCSI devices connected to the
@@ -1628,8 +1629,8 @@ config SCSI_A4000T
module will be called a4000t.
config SCSI_ZORRO7XX
- tristate "Zorro NCR53c710 SCSI support (EXPERIMENTAL)"
- depends on ZORRO && SCSI && EXPERIMENTAL
+ tristate "Zorro NCR53c710 SCSI support"
+ depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
help
Support for various NCR53c710-based SCSI controllers on Zorro
@@ -1806,12 +1807,13 @@ config SCSI_BFA_FC
be called bfa.
config SCSI_VIRTIO
- tristate "virtio-scsi support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
+ tristate "virtio-scsi support"
+ depends on VIRTIO
help
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
+source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 888f73a4aae1..b607ba4f5630 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -106,6 +107,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 165e4dd865d9..450353e04dde 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -814,7 +814,7 @@ static char *lprint_opcode(int opcode, char *pos, char *buffer, int length)
* Locks: interrupts must be enabled when we are called
*/
-static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
+static int NCR5380_init(struct Scsi_Host *instance, int flags)
{
NCR5380_local_declare();
int i, pass;
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 8647256ad66d..b39a2409a507 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
MODULE_LICENSE("GPL");
module_param(NCR_D700, charp, 0);
-static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
+static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
{ [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
char pad;
};
-static int __devinit
+static int
NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
int slot, u32 region, int differential)
{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data)
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
-static int __devinit
+static int
NCR_D700_probe(struct device *dev)
{
struct NCR_D700_private *p;
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
return 0;
}
-static void __devexit
+static void
NCR_D700_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
release_region(host->base, 64);
}
-static int __devexit
+static int
NCR_D700_remove(struct device *dev)
{
struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
.name = "NCR_D700",
.bus = &mca_bus_type,
.probe = NCR_D700_probe,
- .remove = __devexit_p(NCR_D700_remove),
+ .remove = NCR_D700_remove,
},
};
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index afdbb9addf18..05835bf1bf9c 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -351,7 +351,7 @@ static struct mca_driver NCR_Q720_driver = {
.name = "NCR_Q720",
.bus = &mca_bus_type,
.probe = NCR_Q720_probe,
- .remove = __devexit_p(NCR_Q720_remove),
+ .remove = NCR_Q720_remove,
},
};
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index a391090a17c5..0163457c12bb 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1082,8 +1082,8 @@ static struct scsi_host_template inia100_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit inia100_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int inia100_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct orc_host *host;
@@ -1197,7 +1197,7 @@ out:
return error;
}
-static void __devexit inia100_remove_one(struct pci_dev *pdev)
+static void inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct orc_host *host = (struct orc_host *)shost->hostdata;
@@ -1224,7 +1224,7 @@ static struct pci_driver inia100_pci_driver = {
.name = "inia100",
.id_table = inia100_pci_tbl,
.probe = inia100_probe_one,
- .remove = __devexit_p(inia100_remove_one),
+ .remove = inia100_remove_one,
};
static int __init inia100_init(void)
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 79a30633d4aa..3e09aa21c1ca 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -179,8 +179,7 @@ static struct scsi_host_template a2091_scsi_template = {
.use_clustering = DISABLE_CLUSTERING
};
-static int __devinit a2091_probe(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
{
struct Scsi_Host *instance;
int error;
@@ -239,7 +238,7 @@ fail_alloc:
return error;
}
-static void __devexit a2091_remove(struct zorro_dev *z)
+static void a2091_remove(struct zorro_dev *z)
{
struct Scsi_Host *instance = zorro_get_drvdata(z);
struct a2091_hostdata *hdata = shost_priv(instance);
@@ -251,7 +250,7 @@ static void __devexit a2091_remove(struct zorro_dev *z)
release_mem_region(z->resource.start, 256);
}
-static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id a2091_zorro_tbl[] = {
{ ZORRO_PROD_CBM_A590_A2091_1 },
{ ZORRO_PROD_CBM_A590_A2091_2 },
{ 0 }
@@ -262,7 +261,7 @@ static struct zorro_driver a2091_driver = {
.name = "a2091",
.id_table = a2091_zorro_tbl,
.probe = a2091_probe,
- .remove = __devexit_p(a2091_remove),
+ .remove = a2091_remove,
};
static int __init a2091_init(void)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d79457ac8bef..681434e2dfe9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -132,11 +132,13 @@ struct inquiry_data {
* M O D U L E G L O B A L S
*/
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
-static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+ int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
@@ -971,6 +973,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -982,7 +985,10 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd2->byteCount = cpu_to_le32(count<<9);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
- aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, readcmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -996,7 +1002,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
- aac_build_sgraw(cmd, &readcmd->sg);
+ ret = aac_build_sgraw(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
@@ -1019,6 +1027,8 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
u16 fibsize;
struct aac_read64 *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read64 *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -1028,7 +1038,9 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
readcmd->pad = 0;
readcmd->flags = 0;
- aac_build_sg64(cmd, &readcmd->sg);
+ ret = aac_build_sg64(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1050,6 +1062,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
@@ -1057,7 +1071,9 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
- aac_build_sg(cmd, &readcmd->sg);
+ ret = aac_build_sg(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1079,6 +1095,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -1093,7 +1110,10 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
cpu_to_le16(RIO2_IO_TYPE_WRITE);
- aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, writecmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -1110,7 +1130,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
cpu_to_le16(RIO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
- aac_build_sgraw(cmd, &writecmd->sg);
+ ret = aac_build_sgraw(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
@@ -1133,6 +1155,8 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
{
u16 fibsize;
struct aac_write64 *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write64 *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1142,7 +1166,9 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
writecmd->pad = 0;
writecmd->flags = 0;
- aac_build_sg64(cmd, &writecmd->sg);
+ ret = aac_build_sg64(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1164,6 +1190,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
@@ -1173,7 +1201,9 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
- aac_build_sg(cmd, &writecmd->sg);
+ ret = aac_build_sg(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1235,8 +1265,11 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
+ ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -1263,8 +1296,11 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
+ ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -2870,7 +2906,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
return -1;
}
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2883,7 +2919,8 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2912,7 +2949,7 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
}
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2927,7 +2964,8 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2957,7 +2995,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
return byte_count;
}
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
{
unsigned long byte_count = 0;
int nseg;
@@ -2972,7 +3010,8 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[0].flags = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -3005,13 +3044,15 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
return byte_count;
}
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max)
{
unsigned long byte_count = 0;
int nseg;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i, conformable = 0;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9e933a88a8bc..742f5d7eb0f5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29800
+# define AAC_DRIVER_BUILD 29801
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index cb7f1582a6d1..408a42ef787a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -88,13 +88,7 @@ char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
*
* Note: The last field is used to index into aac_drivers below.
*/
-#ifdef DECLARE_PCI_DEVICE_TABLE
-static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
-#elif defined(__devinitconst)
-static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
-#else
-static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
-#endif
+static const struct pci_device_id aac_pci_tbl[] = {
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
@@ -1107,8 +1101,7 @@ static void __aac_shutdown(struct aac_dev * aac)
pci_disable_msi(aac->pdev);
}
-static int __devinit aac_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
struct Scsi_Host *shost;
@@ -1310,7 +1303,7 @@ static void aac_shutdown(struct pci_dev *dev)
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
-static void __devexit aac_remove_one(struct pci_dev *pdev)
+static void aac_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
@@ -1341,7 +1334,7 @@ static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
- .remove = __devexit_p(aac_remove_one),
+ .remove = aac_remove_one,
.shutdown = aac_shutdown,
};
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 374c4edf4fcb..dcfaee66a8b9 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -9526,7 +9526,7 @@ advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *
static DEF_SCSI_QCMD(advansys_queuecommand)
-static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base)
+static ushort AscGetEisaChipCfg(PortAddr iop_base)
{
PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
(PortAddr) (ASC_EISA_CFG_IOP_MASK);
@@ -9537,8 +9537,8 @@ static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base)
* Return the BIOS address of the adapter at the specified
* I/O port and with the specified bus type.
*/
-static unsigned short __devinit
-AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type)
+static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
+ unsigned short bus_type)
{
unsigned short cfg_lsw;
unsigned short bios_addr;
@@ -9569,7 +9569,7 @@ AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type)
return bios_addr;
}
-static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
+static uchar AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
{
ushort cfg_lsw;
@@ -9583,7 +9583,7 @@ static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
return (AscGetChipScsiID(iop_base));
}
-static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base)
+static unsigned char AscGetChipScsiCtrl(PortAddr iop_base)
{
unsigned char sc;
@@ -9593,8 +9593,8 @@ static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base)
return sc;
}
-static unsigned char __devinit
-AscGetChipVersion(PortAddr iop_base, unsigned short bus_type)
+static unsigned char AscGetChipVersion(PortAddr iop_base,
+ unsigned short bus_type)
{
if (bus_type & ASC_IS_EISA) {
PortAddr eisa_iop;
@@ -9608,7 +9608,7 @@ AscGetChipVersion(PortAddr iop_base, unsigned short bus_type)
}
#ifdef CONFIG_ISA
-static void __devinit AscEnableIsaDma(uchar dma_channel)
+static void AscEnableIsaDma(uchar dma_channel)
{
if (dma_channel < 4) {
outp(0x000B, (ushort)(0xC0 | dma_channel));
@@ -9638,7 +9638,7 @@ static int AscStopQueueExe(PortAddr iop_base)
return (0);
}
-static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type)
+static ASC_DCNT AscGetMaxDmaCount(ushort bus_type)
{
if (bus_type & ASC_IS_ISA)
return ASC_MAX_ISA_DMA_COUNT;
@@ -9648,7 +9648,7 @@ static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type)
}
#ifdef CONFIG_ISA
-static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base)
+static ushort AscGetIsaDmaChannel(PortAddr iop_base)
{
ushort channel;
@@ -9660,7 +9660,7 @@ static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base)
return (channel + 4);
}
-static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
+static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
{
ushort cfg_lsw;
uchar value;
@@ -9678,7 +9678,7 @@ static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channe
return 0;
}
-static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base)
+static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
{
uchar speed_value;
@@ -9689,7 +9689,7 @@ static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base)
return speed_value;
}
-static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
+static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
{
speed_value &= 0x07;
AscSetBank(iop_base, 1);
@@ -9699,7 +9699,7 @@ static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
}
#endif /* CONFIG_ISA */
-static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
+static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
{
int i;
PortAddr iop_base;
@@ -9786,7 +9786,7 @@ static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
return warn_code;
}
-static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
+static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
{
int retry;
@@ -9801,12 +9801,12 @@ static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
return 0;
}
-static void __devinit AscWaitEEPRead(void)
+static void AscWaitEEPRead(void)
{
mdelay(1);
}
-static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr)
+static ushort AscReadEEPWord(PortAddr iop_base, uchar addr)
{
ushort read_wval;
uchar cmd_reg;
@@ -9821,8 +9821,8 @@ static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr)
return read_wval;
}
-static ushort __devinit
-AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static ushort AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
ushort wval;
ushort sum;
@@ -9868,7 +9868,7 @@ AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return sum;
}
-static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
+static int AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
{
PortAddr iop_base;
ushort q_addr;
@@ -9890,12 +9890,12 @@ static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
return (sta);
}
-static void __devinit AscWaitEEPWrite(void)
+static void AscWaitEEPWrite(void)
{
mdelay(20);
}
-static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
+static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
{
ushort read_back;
int retry;
@@ -9914,8 +9914,7 @@ static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
}
}
-static ushort __devinit
-AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
+static ushort AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
{
ushort read_wval;
@@ -9935,8 +9934,8 @@ AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
return (read_wval);
}
-static int __devinit
-AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static int AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
int n_error;
ushort *wbuf;
@@ -10031,8 +10030,8 @@ AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return n_error;
}
-static int __devinit
-AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
int retry;
int n_error;
@@ -10050,7 +10049,7 @@ AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return n_error;
}
-static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
+static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
{
ASCEEP_CONFIG eep_config_buf;
ASCEEP_CONFIG *eep_config;
@@ -10215,7 +10214,7 @@ static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
return (warn_code);
}
-static int __devinit AscInitGetConfig(struct Scsi_Host *shost)
+static int AscInitGetConfig(struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
@@ -10269,7 +10268,7 @@ static int __devinit AscInitGetConfig(struct Scsi_Host *shost)
return asc_dvc->err_code;
}
-static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
@@ -10383,7 +10382,7 @@ static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *sh
* on big-endian platforms so char fields read as words are actually being
* unswapped on big-endian platforms.
*/
-static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = {
+static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */
0x0000, /* cfg_msw */
0xFFFF, /* disc_enable */
@@ -10421,7 +10420,7 @@ static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = {
0 /* num_of_err */
};
-static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = {
+static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar = {
0, /* cfg_lsw */
0, /* cfg_msw */
0, /* -disc_enable */
@@ -10459,7 +10458,7 @@ static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = {
0 /* num_of_err */
};
-static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = {
+static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
0x0000, /* 01 cfg_msw */
0xFFFF, /* 02 disc_enable */
@@ -10524,7 +10523,7 @@ static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = {
0 /* 63 reserved */
};
-static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata = {
+static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar = {
0, /* 00 cfg_lsw */
0, /* 01 cfg_msw */
0, /* 02 disc_enable */
@@ -10589,7 +10588,7 @@ static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata =
0 /* 63 reserved */
};
-static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = {
+static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
0x0000, /* 01 cfg_msw */
0xFFFF, /* 02 disc_enable */
@@ -10654,7 +10653,7 @@ static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = {
0 /* 63 reserved */
};
-static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata = {
+static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = {
0, /* 00 cfg_lsw */
0, /* 01 cfg_msw */
0, /* 02 disc_enable */
@@ -10723,7 +10722,7 @@ static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata =
/*
* Wait for EEPROM command to complete
*/
-static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base)
+static void AdvWaitEEPCmd(AdvPortAddr iop_base)
{
int eep_delay_ms;
@@ -10742,7 +10741,7 @@ static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base)
/*
* Read the EEPROM from specified location
*/
-static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
+static ushort AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
{
AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
ASC_EEP_CMD_READ | eep_word_addr);
@@ -10753,8 +10752,8 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
+static void AdvSet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort addr, chksum;
@@ -10820,8 +10819,8 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
+static void AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort *charfields;
@@ -10887,8 +10886,8 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
+static void AdvSet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort *charfields;
@@ -10956,8 +10955,8 @@ AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
+static ushort AdvGet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -10999,8 +10998,8 @@ AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
+static ushort AdvGet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -11042,8 +11041,8 @@ AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
+static ushort AdvGet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -11092,7 +11091,7 @@ AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11242,7 +11241,7 @@ static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11441,7 +11440,7 @@ static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11661,8 +11660,7 @@ static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
* For a non-fatal error return a warning code. If there are no warnings
* then 0 is returned.
*/
-static int __devinit
-AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var;
@@ -11769,7 +11767,7 @@ static struct scsi_host_template advansys_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit advansys_wide_init_chip(struct Scsi_Host *shost)
+static int advansys_wide_init_chip(struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
@@ -11882,8 +11880,8 @@ static void advansys_wide_free_mem(struct asc_board *board)
}
}
-static int __devinit advansys_board_found(struct Scsi_Host *shost,
- unsigned int iop, int bus_type)
+static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
+ int bus_type)
{
struct pci_dev *pdev;
struct asc_board *boardp = shost_priv(shost);
@@ -12428,7 +12426,7 @@ static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
* 10: 12
* 11: 15
*/
-static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base)
+static unsigned int advansys_isa_irq_no(PortAddr iop_base)
{
unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
@@ -12437,7 +12435,7 @@ static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base)
return chip_irq;
}
-static int __devinit advansys_isa_probe(struct device *dev, unsigned int id)
+static int advansys_isa_probe(struct device *dev, unsigned int id)
{
int err = -ENODEV;
PortAddr iop_base = _asc_def_iop_base[id];
@@ -12477,7 +12475,7 @@ static int __devinit advansys_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int __devexit advansys_isa_remove(struct device *dev, unsigned int id)
+static int advansys_isa_remove(struct device *dev, unsigned int id)
{
int ioport = _asc_def_iop_base[id];
advansys_release(dev_get_drvdata(dev));
@@ -12487,7 +12485,7 @@ static int __devexit advansys_isa_remove(struct device *dev, unsigned int id)
static struct isa_driver advansys_isa_driver = {
.probe = advansys_isa_probe,
- .remove = __devexit_p(advansys_isa_remove),
+ .remove = advansys_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -12505,7 +12503,7 @@ static struct isa_driver advansys_isa_driver = {
* 110: 15
* 111: invalid
*/
-static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base)
+static unsigned int advansys_vlb_irq_no(PortAddr iop_base)
{
unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9;
@@ -12514,7 +12512,7 @@ static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base)
return chip_irq;
}
-static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id)
+static int advansys_vlb_probe(struct device *dev, unsigned int id)
{
int err = -ENODEV;
PortAddr iop_base = _asc_def_iop_base[id];
@@ -12561,14 +12559,14 @@ static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id)
static struct isa_driver advansys_vlb_driver = {
.probe = advansys_vlb_probe,
- .remove = __devexit_p(advansys_isa_remove),
+ .remove = advansys_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = "advansys_vlb",
},
};
-static struct eisa_device_id advansys_eisa_table[] __devinitdata = {
+static struct eisa_device_id advansys_eisa_table[] = {
{ "ABP7401" },
{ "ABP7501" },
{ "" }
@@ -12595,7 +12593,7 @@ struct eisa_scsi_data {
* 110: invalid
* 111: invalid
*/
-static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev)
+static unsigned int advansys_eisa_irq_no(struct eisa_device *edev)
{
unsigned short cfg_lsw = inw(edev->base_addr + 0xc86);
unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10;
@@ -12604,7 +12602,7 @@ static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev)
return chip_irq;
}
-static int __devinit advansys_eisa_probe(struct device *dev)
+static int advansys_eisa_probe(struct device *dev)
{
int i, ioport, irq = 0;
int err;
@@ -12677,7 +12675,7 @@ static int __devinit advansys_eisa_probe(struct device *dev)
return err;
}
-static __devexit int advansys_eisa_remove(struct device *dev)
+static int advansys_eisa_remove(struct device *dev)
{
int i;
struct eisa_scsi_data *data = dev_get_drvdata(dev);
@@ -12701,12 +12699,12 @@ static struct eisa_driver advansys_eisa_driver = {
.driver = {
.name = DRV_NAME,
.probe = advansys_eisa_probe,
- .remove = __devexit_p(advansys_eisa_remove),
+ .remove = advansys_eisa_remove,
}
};
/* PCI Devices supported by this driver */
-static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
+static struct pci_device_id advansys_pci_tbl[] = {
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
@@ -12724,7 +12722,7 @@ static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
-static void __devinit advansys_set_latency(struct pci_dev *pdev)
+static void advansys_set_latency(struct pci_dev *pdev)
{
if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) ||
(pdev->device == PCI_DEVICE_ID_ASP_ABP940)) {
@@ -12737,8 +12735,8 @@ static void __devinit advansys_set_latency(struct pci_dev *pdev)
}
}
-static int __devinit
-advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int advansys_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int err, ioport;
struct Scsi_Host *shost;
@@ -12791,7 +12789,7 @@ advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit advansys_pci_remove(struct pci_dev *pdev)
+static void advansys_pci_remove(struct pci_dev *pdev)
{
advansys_release(pci_get_drvdata(pdev));
pci_release_regions(pdev);
@@ -12802,7 +12800,7 @@ static struct pci_driver advansys_pci_driver = {
.name = DRV_NAME,
.id_table = advansys_pci_tbl,
.probe = advansys_pci_probe,
- .remove = __devexit_p(advansys_pci_remove),
+ .remove = advansys_pci_remove,
};
static int __init advansys_init(void)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index dd4547bf6881..a284be17699f 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -420,7 +420,7 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller");
#endif /* MODULE */
#ifdef __ISAPNP__
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index a3e6ed353917..df775e6ba579 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
return -ENODEV;
}
-static __devexit int aha1740_remove (struct device *dev)
+static int aha1740_remove (struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
struct aha1740_hostdata *host = HOSTDATA (shpnt);
@@ -677,7 +677,7 @@ static struct eisa_driver aha1740_driver = {
.driver = {
.name = "aha1740",
.probe = aha1740_probe,
- .remove = __devexit_p (aha1740_remove),
+ .remove = aha1740_remove,
},
};
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1c4120c3db41..c56741fc4b99 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -85,7 +85,7 @@ static struct scsi_host_template aic94xx_sht = {
.ioctl = sas_ioctl,
};
-static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
+static int asd_map_memio(struct asd_ha_struct *asd_ha)
{
int err, i;
struct asd_ha_addrspace *io_handle;
@@ -146,7 +146,7 @@ static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
pci_release_region(asd_ha->pcidev, 0);
}
-static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
+static int asd_map_ioport(struct asd_ha_struct *asd_ha)
{
int i = PCI_IOBAR_OFFSET, err;
struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
@@ -175,7 +175,7 @@ static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
}
-static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha)
+static int asd_map_ha(struct asd_ha_struct *asd_ha)
{
int err;
u16 cmd_reg;
@@ -221,7 +221,7 @@ static const char *asd_dev_rev[30] = {
[8] = "B0",
};
-static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
+static int asd_common_setup(struct asd_ha_struct *asd_ha)
{
int err, i;
@@ -257,7 +257,7 @@ Err:
return err;
}
-static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
+static int asd_aic9410_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
@@ -272,7 +272,7 @@ static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
return 0;
}
-static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha)
+static int asd_aic9405_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
@@ -531,7 +531,7 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
static const struct asd_pcidev_struct {
const char * name;
int (*setup)(struct asd_ha_struct *asd_ha);
-} asd_pcidev_data[] __devinitconst = {
+} asd_pcidev_data[] = {
/* Id 0 is used for dynamic ids. */
{ .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
.setup = asd_aic9410_setup
@@ -731,8 +731,7 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
return err;
}
-static int __devinit asd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct asd_pcidev_struct *asd_dev;
unsigned asd_id = (unsigned) id->driver_data;
@@ -924,7 +923,7 @@ static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
}
}
-static void __devexit asd_pci_remove(struct pci_dev *dev)
+static void asd_pci_remove(struct pci_dev *dev)
{
struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
@@ -1012,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_ata_set_dmamode = asd_set_dmamode,
};
-static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
+static const struct pci_device_id aic94xx_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
@@ -1031,7 +1030,7 @@ static struct pci_driver aic94xx_pci_driver = {
.name = ASD_DRIVER_NAME,
.id_table = aic94xx_pci_table,
.probe = asd_pci_probe,
- .remove = __devexit_p(asd_pci_remove),
+ .remove = asd_pci_remove,
};
static int __init aic94xx_init(void)
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index a8587f1f5e7e..cfd172a439c9 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -64,19 +64,19 @@ config SCSI_POWERTECSCSI
you have one of these, say Y. If unsure, say N.
comment "The following drivers are not fully supported"
- depends on ARCH_ACORN && EXPERIMENTAL
+ depends on ARCH_ACORN
config SCSI_CUMANA_1
- tristate "CumanaSCSI I support (EXPERIMENTAL)"
- depends on ARCH_ACORN && EXPERIMENTAL && SCSI
+ tristate "CumanaSCSI I support"
+ depends on ARCH_ACORN && SCSI
select SCSI_SPI_ATTRS
help
This enables support for the Cumana SCSI I card. If you have an
Acorn system with one of these, say Y. If unsure, say N.
config SCSI_OAK1
- tristate "Oak SCSI support (EXPERIMENTAL)"
- depends on ARCH_ACORN && EXPERIMENTAL && SCSI
+ tristate "Oak SCSI support"
+ depends on ARCH_ACORN && SCSI
select SCSI_SPI_ATTRS
help
This enables support for the Oak SCSI card. If you have an Acorn
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index b330438ac662..3e1172adb37b 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2965,8 +2965,7 @@ static struct scsi_host_template acornscsi_template = {
.proc_name = "acornscsi",
};
-static int __devinit
-acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
AS_Host *ashost;
@@ -3032,7 +3031,7 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit acornscsi_remove(struct expansion_card *ec)
+static void acornscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
AS_Host *ashost = (AS_Host *)host->hostdata;
@@ -3063,7 +3062,7 @@ static const struct ecard_id acornscsi_cids[] = {
static struct ecard_driver acornscsi_driver = {
.probe = acornscsi_probe,
- .remove = __devexit_p(acornscsi_remove),
+ .remove = acornscsi_remove,
.id_table = acornscsi_cids,
.drv = {
.name = "acornscsi",
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 2a28b4ad1975..9274510294ac 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -276,8 +276,7 @@ static struct scsi_host_template arxescsi_template = {
.proc_name = "arxescsi",
};
-static int __devinit
-arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
struct arxescsi_info *info;
@@ -340,7 +339,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit arxescsi_remove(struct expansion_card *ec)
+static void arxescsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -359,7 +358,7 @@ static const struct ecard_id arxescsi_cids[] = {
static struct ecard_driver arxescsi_driver = {
.probe = arxescsi_probe,
- .remove = __devexit_p(arxescsi_remove),
+ .remove = arxescsi_remove,
.id_table = arxescsi_cids,
.drv = {
.name = "arxescsi",
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index c3b99c93637a..c93938b246d5 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -225,8 +225,8 @@ static struct scsi_host_template cumanascsi_template = {
.proc_name = "CumanaSCSI-1",
};
-static int __devinit
-cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int cumanascsi1_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
int ret;
@@ -298,7 +298,7 @@ cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit cumanascsi1_remove(struct expansion_card *ec)
+static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -320,7 +320,7 @@ static const struct ecard_id cumanascsi1_cids[] = {
static struct ecard_driver cumanascsi1_driver = {
.probe = cumanascsi1_probe,
- .remove = __devexit_p(cumanascsi1_remove),
+ .remove = cumanascsi1_remove,
.id_table = cumanascsi1_cids,
.drv = {
.name = "cumanascsi1",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 547987b86384..e3bae93c3c22 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -397,8 +397,8 @@ static struct scsi_host_template cumanascsi2_template = {
.proc_name = "cumanascsi2",
};
-static int __devinit
-cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int cumanascsi2_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
struct cumanascsi2_info *info;
@@ -495,7 +495,7 @@ cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit cumanascsi2_remove(struct expansion_card *ec)
+static void cumanascsi2_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
@@ -519,7 +519,7 @@ static const struct ecard_id cumanascsi2_cids[] = {
static struct ecard_driver cumanascsi2_driver = {
.probe = cumanascsi2_probe,
- .remove = __devexit_p(cumanascsi2_remove),
+ .remove = cumanascsi2_remove,
.id_table = cumanascsi2_cids,
.drv = {
.name = "cumanascsi2",
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 968d08358d20..8e36908415ec 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -515,8 +515,7 @@ static struct scsi_host_template eesox_template = {
.proc_name = "eesox",
};
-static int __devinit
-eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
struct eesoxscsi_info *info;
@@ -617,7 +616,7 @@ eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit eesoxscsi_remove(struct expansion_card *ec)
+static void eesoxscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
@@ -643,7 +642,7 @@ static const struct ecard_id eesoxscsi_cids[] = {
static struct ecard_driver eesoxscsi_driver = {
.probe = eesoxscsi_probe,
- .remove = __devexit_p(eesoxscsi_remove),
+ .remove = eesoxscsi_remove,
.id_table = eesoxscsi_cids,
.drv = {
.name = "eesoxscsi",
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index fc6a5aabf66e..48facdc18002 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -129,8 +129,7 @@ static struct scsi_host_template oakscsi_template = {
.proc_name = "oakscsi",
};
-static int __devinit
-oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
int ret = -ENOMEM;
@@ -182,7 +181,7 @@ oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit oakscsi_remove(struct expansion_card *ec)
+static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -202,7 +201,7 @@ static const struct ecard_id oakscsi_cids[] = {
static struct ecard_driver oakscsi_driver = {
.probe = oakscsi_probe,
- .remove = __devexit_p(oakscsi_remove),
+ .remove = oakscsi_remove,
.id_table = oakscsi_cids,
.drv = {
.name = "oakscsi",
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 9274c0677b9c..246600b93555 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -309,8 +309,8 @@ static struct scsi_host_template powertecscsi_template = {
.proc_name = "powertec",
};
-static int __devinit
-powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int powertecscsi_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
struct powertec_info *info;
@@ -409,7 +409,7 @@ powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit powertecscsi_remove(struct expansion_card *ec)
+static void powertecscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct powertec_info *info = (struct powertec_info *)host->hostdata;
@@ -435,7 +435,7 @@ static const struct ecard_id powertecscsi_cids[] = {
static struct ecard_driver powertecscsi_driver = {
.probe = powertecscsi_probe,
- .remove = __devexit_p(powertecscsi_remove),
+ .remove = powertecscsi_remove,
.id_table = powertecscsi_cids,
.drv = {
.name = "powertecscsi",
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index a540162ac59c..cfc73041f102 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3210,7 +3210,7 @@ static struct pci_driver atp870u_driver = {
.id_table = atp870u_id_table,
.name = "atp870u",
.probe = atp870u_probe,
- .remove = __devexit_p(atp870u_remove),
+ .remove = atp870u_remove,
};
static int __init atp870u_init(void)
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a50b6a9030e8..f1733dfa3ae2 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
-
+#define BE_GEN4 4
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -84,9 +84,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_eq_obj {
+ bool todo_mcc_cq;
+ bool todo_cq;
struct be_queue_info q;
struct beiscsi_hba *phba;
struct be_queue_info *cq;
+ struct work_struct work_cqs; /* Work Item */
struct blk_iopoll iopoll;
};
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 07d2cb126d93..5c87768c109c 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -56,7 +56,7 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
writel(pconline0, (void *)pci_online0_offset);
writel(pconline1, (void *)pci_online1_offset);
- sreset = BE2_SET_RESET;
+ sreset |= BE2_SET_RESET;
writel(sreset, (void *)pci_reset_offset);
i = 0;
@@ -133,6 +133,87 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
return tag;
}
+/*
+ * beiscsi_mccq_compl()- Wait for completion of MBX
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ *
+ * Waits for MBX completion with the passed TAG.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ void *cmd_hdr)
+{
+ int rc = 0;
+ uint32_t mcc_tag_response;
+ uint16_t status = 0, addl_status = 0, wrb_num = 0;
+ struct be_mcc_wrb *temp_wrb;
+ struct be_cmd_req_hdr *ioctl_hdr;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
+ /* wait for the mccq completion */
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+
+ if (rc <= 0) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Completion timed out\n");
+ rc = -EAGAIN;
+ goto release_mcc_tag;
+ } else
+ rc = 0;
+
+ mcc_tag_response = phba->ctrl.mcc_numtag[tag];
+ status = (mcc_tag_response & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ CQE_STATUS_ADDL_SHIFT);
+
+ if (cmd_hdr) {
+ ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+ } else {
+ wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ CQE_STATUS_WRB_SHIFT;
+ temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+ ioctl_hdr = embedded_payload(temp_wrb);
+
+ if (wrb)
+ *wrb = temp_wrb;
+ }
+
+ if (status || addl_status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Failed for "
+ "Subsys : %d Opcode : %d with "
+ "Status : %d and Extd_Status : %d\n",
+ ioctl_hdr->subsystem,
+ ioctl_hdr->opcode,
+ status, addl_status);
+ rc = -EAGAIN;
+ }
+
+release_mcc_tag:
+ /* Release the MCC entry */
+ free_mcc_tag(&phba->ctrl, tag);
+
+ return rc;
+}
+
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
{
spin_lock(&ctrl->mbox_lock);
@@ -168,11 +249,24 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
compl->flags = 0;
}
+/*
+ * be_mcc_compl_process()- Check the MBX comapletion status
+ * @ctrl: Function specific MBX data structure
+ * @compl: Completion status of MBX Command
+ *
+ * Check for the MBX completion status when BMBX method used
+ *
+ * return
+ * Success: Zero
+ * Failure: Non-Zero
+ **/
static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
be_dws_le_to_cpu(compl, 4);
@@ -184,7 +278,10 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
+ "BC_%d : error in cmd completion: "
+ "Subsystem : %d Opcode : %d "
+ "status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode,
compl_status, extd_status);
return -EBUSY;
@@ -314,11 +411,24 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
return status;
}
-/* Wait till no more pending mcc requests are present */
+/*
+ * be_mcc_wait_compl()- Wait for MBX completion
+ * @phba: driver private structure
+ *
+ * Wait till no more pending mcc requests are present
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ *
+ **/
static int be_mcc_wait_compl(struct beiscsi_hba *phba)
{
int i, status;
for (i = 0; i < mcc_timeout; i++) {
+ if (beiscsi_error(phba))
+ return -EIO;
+
status = beiscsi_process_mcc(phba);
if (status)
return status;
@@ -330,51 +440,83 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
if (i == mcc_timeout) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mccq poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
return 0;
}
-/* Notify MCC requests and wait for completion */
+/*
+ * be_mcc_notify_wait()- Notify and wait for Compl
+ * @phba: driver private structure
+ *
+ * Notify MCC requests and wait for completion
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mcc_notify_wait(struct beiscsi_hba *phba)
{
be_mcc_notify(phba);
return be_mcc_wait_compl(phba);
}
+/*
+ * be_mbox_db_ready_wait()- Check ready status
+ * @ctrl: Function specific MBX data structure
+ *
+ * Check for the ready status of FW to send BMBX
+ * commands to adapter.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
-#define long_delay 2000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
- int cnt = 0, wait = 5; /* in usecs */
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int wait = 0;
u32 ready;
do {
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
- if (cnt > 12000000) {
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mbox_db poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
- if (cnt > 50) {
- wait = long_delay;
- mdelay(long_delay / 1000);
- } else
- udelay(wait);
- cnt += wait;
+ mdelay(1);
+ wait++;
} while (true);
return 0;
}
+/*
+ * be_mbox_notify: Notify adapter of new BMBX command
+ * @ctrl: Function specific MBX data structure
+ *
+ * Ring doorbell to inform adapter of a BMBX command
+ * to process
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mbox_notify(struct be_ctrl_info *ctrl)
{
int status;
@@ -391,13 +533,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
val = 0;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val &= ~MPU_MAILBOX_DB_HI_MASK;
@@ -405,13 +543,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
@@ -499,7 +633,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
- req_hdr->timeout = 120;
+ req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -649,18 +783,34 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (chip_skh_r(ctrl->pdev)) {
+ req->hdr.version = MBX_CMD_VER2;
+ req->page_size = 1;
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ }
- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
- __ilog2_u32(cq->len / 256));
- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
- PCI_FUNC(ctrl->pdev->devfn));
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 2c8f98df1287..23397d51ac54 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -57,6 +57,16 @@ struct be_mcc_wrb {
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_ADDL_MASK 0xFF00
+#define CQE_STATUS_MASK 0xFF
+#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_WRB_MASK 0xFF0000
+#define CQE_STATUS_WRB_SHIFT 16
+#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
+#define BEISCSI_FW_MBX_TIMEOUT 100
+
+/* MBOX Command VER */
+#define MBX_CMD_VER2 0x02
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -183,7 +193,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd0; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd0[3]; /* dword 3 */
};
struct be_cmd_resp_hdr {
@@ -483,10 +494,28 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3 */
} __packed;
+struct amap_cq_context_v2 {
+ u8 rsvd0[12]; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 rsvd1[12]; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd2; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 eqid[16]; /* dword 1 */
+ u8 rsvd3[15]; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 cqecount[16];/* dword 2 */
+ u8 rsvd4[16]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+};
+
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
+ u8 page_size;
+ u8 rsvd0;
u8 context[sizeof(struct amap_cq_context) / 8];
struct phys_addr pages[4];
} __packed;
@@ -663,6 +692,9 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
@@ -804,6 +836,59 @@ struct amap_sol_cqe_ring {
u8 valid; /* dword 3 */
} __packed;
+struct amap_sol_cqe_v2 {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 wrb_index[16]; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cmd_cmpl; /* dword 2 */
+ u8 rsvd0; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 cid[13]; /* dword 2 */
+ u8 u; /* dword 2 */
+ u8 o; /* dword 2 */
+ u8 s; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct common_sol_cqe {
+ u32 exp_cmdsn;
+ u32 res_cnt;
+ u16 wrb_index;
+ u16 cid;
+ u8 hw_sts;
+ u8 cmd_wnd;
+ u8 res_flag; /* the s feild of structure */
+ u8 i_resp; /* for skh if cmd_complete is set then i_sts is response */
+ u8 i_flags; /* for skh or the u and o feilds */
+ u8 i_sts; /* for skh if cmd_complete is not-set then i_sts is status */
+};
+
+/*** iSCSI ack/driver message completions ***/
+struct amap_it_dmsg_cqe {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 cid[10]; /* DWORD 2 */
+ u8 wrb_idx[8]; /* DWORD 2 */
+ u8 rsvd0[8]; /* DWORD 2*/
+ u8 rsvd1[31]; /* DWORD 3*/
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+struct amap_it_dmsg_cqe_v2 {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 rsvd0[10]; /* DWORD 2 */
+ u8 wrb_idx[16]; /* DWORD 2 */
+ u8 rsvd1[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
/**
@@ -992,8 +1077,6 @@ struct be_cmd_get_all_if_id_req {
#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
* sequence number by driver */
-/* Returns byte size of given field with a structure. */
-
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
(FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index aedb0d9a9dae..214d691adb53 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -531,9 +531,9 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
if (!if_info.dhcp_state)
- len = sprintf(buf, "static");
+ len = sprintf(buf, "static\n");
else
- len = sprintf(buf, "dhcp");
+ len = sprintf(buf, "dhcp\n");
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
@@ -541,7 +541,7 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
(if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
- ? "Disabled" : "Enabled");
+ ? "Disabled\n" : "Enabled\n");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
@@ -586,7 +586,7 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
len = be2iscsi_get_if_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
- len = sprintf(buf, "enabled");
+ len = sprintf(buf, "enabled\n");
break;
case ISCSI_NET_PARAM_IPV4_GW:
memset(&gateway, 0, sizeof(gateway));
@@ -690,11 +690,9 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
{
int rc;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_hba_name *resp;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_initname(phba);
if (!tag) {
@@ -702,26 +700,16 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
"BS_%d : Getting Initiator Name Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
resp = embedded_payload(wrb);
rc = sprintf(buf, "%s\n", resp->initiator_name);
return rc;
@@ -731,7 +719,6 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
* beiscsi_get_port_state - Get the Port State
* @shost : pointer to scsi_host structure
*
- * returns number of bytes
*/
static void beiscsi_get_port_state(struct Scsi_Host *shost)
{
@@ -750,13 +737,12 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
*/
static int beiscsi_get_port_speed(struct Scsi_Host *shost)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_ntwk_link_status_resp *resp;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_port_speed(phba);
if (!tag) {
@@ -764,26 +750,14 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
"BS_%d : Getting Port Speed Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-
- if (status || extd_status) {
+ }
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Port Speed MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
resp = embedded_payload(wrb);
switch (resp->mac_speed) {
@@ -937,6 +911,14 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->initial_r2t_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
session->imm_data_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ data_seq_inorder, params,
+ session->dataseq_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder, params,
+ session->pdu_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params,
+ session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
}
@@ -1027,12 +1009,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_out *ptcpcnct_out;
- unsigned short status, extd_status;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
+ unsigned int tag;
int ret = -ENOMEM;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1084,35 +1064,26 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
}
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+
+ ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : mgmt_open_connection Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ "BS_%d : mgmt_open_connection Failed");
- free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
goto free_ep;
- } else {
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
-
- ptcpcnct_out = embedded_payload(wrb);
- beiscsi_ep = ep->dd_data;
- beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
- beiscsi_ep->cid_vld = 1;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : mgmt_open_connection Success\n");
}
+
+ ptcpcnct_out = embedded_payload(wrb);
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+ beiscsi_ep->cid_vld = 1;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Success\n");
+
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
@@ -1150,8 +1121,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : The Adapter state is Not UP\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : The Adapter Port state is Down!!!\n");
return ERR_PTR(ret);
}
@@ -1216,11 +1187,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
beiscsi_ep->ep_cid);
ret = -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
return ret;
}
@@ -1281,12 +1250,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8b826fc06bcc..38eab7232159 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index ff73f9500b01..4e2733d23003 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -47,8 +47,6 @@
static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
static unsigned int enable_msix = 1;
-static unsigned int gcrashmode = 0;
-static unsigned int num_hba = 0;
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -153,11 +151,54 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tIO Path Events : 0x10\n"
"\t\t\t\tConfiguration Path : 0x20\n");
+DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
+ &dev_attr_beiscsi_drvr_ver,
+ &dev_attr_beiscsi_adapter_family,
NULL,
};
+static char const *cqe_desc[] = {
+ "RESERVED_DESC",
+ "SOL_CMD_COMPLETE",
+ "SOL_CMD_KILLED_DATA_DIGEST_ERR",
+ "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
+ "CXN_KILLED_BURST_LEN_MISMATCH",
+ "CXN_KILLED_AHS_RCVD",
+ "CXN_KILLED_HDR_DIGEST_ERR",
+ "CXN_KILLED_UNKNOWN_HDR",
+ "CXN_KILLED_STALE_ITT_TTT_RCVD",
+ "CXN_KILLED_INVALID_ITT_TTT_RCVD",
+ "CXN_KILLED_RST_RCVD",
+ "CXN_KILLED_TIMED_OUT",
+ "CXN_KILLED_RST_SENT",
+ "CXN_KILLED_FIN_RCVD",
+ "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
+ "CXN_KILLED_BAD_WRB_INDEX_ERROR",
+ "CXN_KILLED_OVER_RUN_RESIDUAL",
+ "CXN_KILLED_UNDER_RUN_RESIDUAL",
+ "CMD_KILLED_INVALID_STATSN_RCVD",
+ "CMD_KILLED_INVALID_R2T_RCVD",
+ "CMD_CXN_KILLED_LUN_INVALID",
+ "CMD_CXN_KILLED_ICD_INVALID",
+ "CMD_CXN_KILLED_ITT_INVALID",
+ "CMD_CXN_KILLED_SEQ_OUTOFORDER",
+ "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
+ "CXN_INVALIDATE_NOTIFY",
+ "CXN_INVALIDATE_INDEX_NOTIFY",
+ "CMD_INVALIDATED_NOTIFY",
+ "UNSOL_HDR_NOTIFY",
+ "UNSOL_DATA_NOTIFY",
+ "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
+ "DRIVERMSG_NOTIFY",
+ "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
+ "SOL_CMD_KILLED_DIF_ERR",
+ "CXN_KILLED_SYN_RCVD",
+ "CXN_KILLED_IMM_DATA_RCVD"
+};
+
static int beiscsi_slave_configure(struct scsi_device *sdev)
{
blk_queue_max_segment_size(sdev->request_queue, 65536);
@@ -226,11 +267,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_abort(sc);
@@ -301,11 +340,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
@@ -482,6 +519,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
+ { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -730,7 +768,7 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -738,8 +776,8 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_eq_processed)
hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
@@ -779,29 +817,26 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
-
- return IRQ_HANDLED;
} else {
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_cq)
- queue_work(phba->wq, &phba->work_cqs);
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
-
- return IRQ_HANDLED;
+ if (pbe_eq->todo_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
}
+
+ if (num_eq_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ return IRQ_HANDLED;
}
/**
@@ -849,7 +884,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
num_mcceq_processed++;
} else {
@@ -862,8 +897,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
}
if (num_ioeq_processed || num_mcceq_processed) {
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
@@ -886,11 +921,11 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) != cq->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
} else {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -898,8 +933,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_ioeq_processed++;
}
- if (phba->todo_cq || phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_ioeq_processed) {
hwi_ring_eq_db(phba, eq->id, 0,
@@ -1211,7 +1246,8 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
static void
be_complete_io(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct be_status_bhs *sts_bhs =
@@ -1221,20 +1257,14 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
- exp_cmdsn = (psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- max_cmdsn = ((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
- rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
- & SOL_RESP_MASK) >> 16);
- status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
- & SOL_STS_MASK) >> 8);
- flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
+ exp_cmdsn = csol_cqe->exp_cmdsn;
+ max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+ rsp = csol_cqe->i_resp;
+ status = csol_cqe->i_sts;
+ flags = csol_cqe->i_flags;
+ resid = csol_cqe->res_cnt;
+
if (!task->sc) {
if (io_task->scsi_cmnd)
scsi_dma_unmap(io_task->scsi_cmnd);
@@ -1249,9 +1279,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
/* bidi not initially supported */
if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
- resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
- 32] & SOL_RES_CNT_MASK);
-
if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
task->sc->result = DID_ERROR << 16;
@@ -1273,13 +1300,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
- if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
- if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK)
- conn->rxdata_octets += (psol->
- dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK);
- }
+ if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
+ conn->rxdata_octets += resid;
unmap:
scsi_dma_unmap(io_task->scsi_cmnd);
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
@@ -1287,7 +1309,8 @@ unmap:
static void
be_complete_logout(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_logout_rsp *hdr;
struct beiscsi_io_task *io_task = task->dd_data;
@@ -1297,18 +1320,11 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->t2wait = 5;
hdr->t2retain = 0;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+
hdr->dlength[0] = 0;
hdr->dlength[1] = 0;
hdr->dlength[2] = 0;
@@ -1319,7 +1335,8 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_tm_rsp *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
@@ -1327,16 +1344,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
hdr = (struct iscsi_tm_rsp *)task->hdr;
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
@@ -1352,15 +1365,24 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_io_task *io_task;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ uint16_t wrb_index, cid;
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->
- dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+ if (chip_skh_r(phba->pcidev)) {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ cid, psol);
+ } else {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ cid, psol);
+ }
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ cid - phba->fw_config.iscsi_cid_start];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
task = pwrb_handle->pio_handle;
io_task = task->dd_data;
@@ -1374,26 +1396,78 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_nopin *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct beiscsi_io_task *io_task = task->dd_data;
hdr = (struct iscsi_nopin *)task->hdr;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->opcode = ISCSI_OP_NOOP_IN;
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
+static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
+ struct sol_cqe *psol,
+ struct common_sol_cqe *csol_cqe)
+{
+ if (chip_skh_r(phba->pcidev)) {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_res_cnt, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ hw_sts, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cmd_cmpl, psol))
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ else
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ u, psol))
+ csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
+
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ o, psol))
+ csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ } else {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_res_cnt, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ hw_sts, psol);
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_resp, psol);
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_sts, psol);
+ csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_flags, psol);
+ }
+}
+
+
static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol)
{
@@ -1405,19 +1479,22 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
unsigned int type;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ struct common_sol_cqe csol_cqe = {0};
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
- (struct amap_sol_cqe, cid) / 32]
- & SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+
+ /* Copy the elements to a common structure */
+ adapter_get_sol_cqe(phba, psol, &csol_cqe);
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[
+ csol_cqe.wrb_index];
+
task = pwrb_handle->pio_handle;
pwrb = pwrb_handle->pwrb;
- type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
- WRB_TYPE_MASK) >> 28;
+ type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
spin_lock_bh(&session->lock);
switch (type) {
@@ -1425,17 +1502,16 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
case HWH_TYPE_IO_RD:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
ISCSI_OP_NOOP_OUT)
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
else
- be_complete_io(beiscsi_conn, task, psol);
+ be_complete_io(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGOUT:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
- be_complete_logout(beiscsi_conn, task, psol);
+ be_complete_logout(beiscsi_conn, task, &csol_cqe);
else
- be_complete_tmf(beiscsi_conn, task, psol);
-
+ be_complete_tmf(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGIN:
@@ -1446,7 +1522,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
case HWH_TYPE_NOP:
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
break;
default:
@@ -1454,10 +1530,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d : In hwi_complete_cmd, unknown type = %d"
"wrb_index 0x%x CID 0x%x\n", type,
- ((psol->dw[offsetof(struct amap_iscsi_wrb,
- type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
- ((psol->dw[offsetof(struct amap_sol_cqe,
- cid) / 32] & SOL_CID_MASK) >> 6));
+ csol_cqe.wrb_index,
+ csol_cqe.cid);
break;
}
@@ -1485,13 +1559,26 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
struct list_head *pbusy_list;
struct async_pdu_handle *pasync_handle = NULL;
unsigned char is_header = 0;
+ unsigned int index, dpl;
+
+ if (chip_skh_r(phba->pcidev)) {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ index, pdpdu_cqe);
+ } else {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ index, pdpdu_cqe);
+ }
phys_addr.u.a32.address_lo =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
- ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_lo) / 32] - dpl);
phys_addr.u.a32.address_hi =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_hi) / 32];
phys_addr.u.a64.address =
*((unsigned long long *)(&phys_addr.u.a64.address));
@@ -1501,14 +1588,12 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
case UNSOL_HDR_NOTIFY:
is_header = 1;
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
- (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
case UNSOL_DATA_NOTIFY:
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
default:
pbusy_list = NULL;
@@ -1531,12 +1616,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start;
pasync_handle->is_header = is_header;
- pasync_handle->buffer_len = ((pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ pasync_handle->buffer_len = dpl;
+ *pcq_index = index;
- *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK);
return pasync_handle;
}
@@ -1914,6 +1996,13 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
}
+/**
+ * beiscsi_process_cq()- Process the Completion Queue
+ * @pbe_eq: Event Q on which the Completion has come
+ *
+ * return
+ * Number of Completion Entries processed.
+ **/
static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
{
struct be_queue_info *cq;
@@ -1935,12 +2024,24 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
CQE_VALID_MASK) {
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
- CQE_CID_MASK) >> 6);
- code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
- CQE_CODE_MASK);
- ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK);
+
+ /* Get the CID */
+ if (chip_skh_r(phba->pcidev)) {
+ if ((code == DRIVERMSG_NOTIFY) ||
+ (code == UNSOL_HDR_NOTIFY) ||
+ (code == UNSOL_DATA_NOTIFY))
+ cid = AMAP_GET_BITS(
+ struct amap_i_t_dpdu_cqe_v2,
+ cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
+ } else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+ ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@@ -1958,7 +2059,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case DRIVERMSG_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received DRIVERMSG_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
@@ -1966,7 +2068,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_HDR_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1974,7 +2077,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : Received UNSOL_DATA_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1984,8 +2088,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_INVALIDATE_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Ignoring CQ Error notification for"
- " cmd/cxn invalidate\n");
+ "BM_%d : Ignoring %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1997,14 +2101,14 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : CQ Error notification for cmd.. "
- "code %d cid 0x%x\n", code, cid);
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Digest error on def pdu ring,"
- " dropping..\n");
+ "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
break;
@@ -2017,6 +2121,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_INVALID_ITT_TTT_RCVD:
case CXN_KILLED_TIMED_OUT:
case CXN_KILLED_FIN_RCVD:
+ case CXN_KILLED_RST_SENT:
+ case CXN_KILLED_RST_RCVD:
case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
case CXN_KILLED_BAD_WRB_INDEX_ERROR:
case CXN_KILLED_OVER_RUN_RESIDUAL:
@@ -2024,19 +2130,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset CID 0x%x...\n",
- code, cid);
- if (beiscsi_conn)
- iscsi_conn_failure(beiscsi_conn->conn,
- ISCSI_ERR_CONN_FAILED);
- break;
- case CXN_KILLED_RST_SENT:
- case CXN_KILLED_RST_RCVD:
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset"
- "received/sent on CID 0x%x...\n",
- code, cid);
+ "BM_%d : Event %s[%d] received on CID : %d\n",
+ cqe_desc[code], code, cid);
if (beiscsi_conn)
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED);
@@ -2044,8 +2139,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error Invalid code= %d "
- "received on CID 0x%x...\n",
+ "BM_%d : Invalid CQE Event Received Code : %d"
+ "CID 0x%x...\n",
code, cid);
break;
}
@@ -2068,30 +2163,30 @@ void beiscsi_process_all_cqs(struct work_struct *work)
unsigned long flags;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- struct be_eq_obj *pbe_eq;
- struct beiscsi_hba *phba =
- container_of(work, struct beiscsi_hba, work_cqs);
+ struct beiscsi_hba *phba;
+ struct be_eq_obj *pbe_eq =
+ container_of(work, struct be_eq_obj, work_cqs);
+ phba = pbe_eq->phba;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled)
- pbe_eq = &phwi_context->be_eq[phba->num_cpus];
- else
- pbe_eq = &phwi_context->be_eq[0];
- if (phba->todo_mcc_cq) {
+ if (pbe_eq->todo_mcc_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 0;
+ pbe_eq->todo_mcc_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_mcc_isr(phba);
}
- if (phba->todo_cq) {
+ if (pbe_eq->todo_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 0;
+ pbe_eq->todo_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_cq(pbe_eq);
}
+
+ /* rearm EQ for further interrupts */
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
static int be_iopoll(struct blk_iopoll *iop, int budget)
@@ -2115,6 +2210,101 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
}
static void
+hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned int sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ }
+
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void
hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
unsigned int num_sg, struct beiscsi_io_task *io_task)
{
@@ -2202,13 +2392,18 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
}
+/**
+ * hwi_write_buffer()- Populate the WRB with task info
+ * @pwrb: ptr to the WRB entry
+ * @task: iscsi task which is to be executed
+ **/
static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
{
struct iscsi_sge *psgl;
- unsigned long long addr;
struct beiscsi_io_task *io_task = task->dd_data;
struct beiscsi_conn *beiscsi_conn = io_task->conn;
struct beiscsi_hba *phba = beiscsi_conn->phba;
+ uint8_t dsp_value = 0;
io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
@@ -2217,26 +2412,38 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
io_task->bhs_pa.u.a32.address_hi);
if (task->data) {
- if (task->data_count) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
- addr = (u64) pci_map_single(phba->pcidev,
- task->data,
- task->data_count, 1);
- } else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
- }
+
+ /* Check for the data_count */
+ dsp_value = (task->data_count) ? 1 : 0;
+
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+ pwrb, dsp_value);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+ pwrb, dsp_value);
+
+ /* Map addr only if there is data_count */
+ if (dsp_value) {
+ io_task->mtask_addr = pci_map_single(phba->pcidev,
+ task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_data_count = task->data_count;
+ } else
+ io_task->mtask_addr = 0;
+
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
+ io_task->mtask_addr = 0;
}
psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
@@ -2259,9 +2466,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
psgl++;
if (task->data) {
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
}
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
}
@@ -2843,7 +3050,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
}
return 0;
create_eq_error:
- for (i = 0; i < (phba->num_cpus + 1); i++) {
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
@@ -3268,15 +3475,31 @@ err:
return -ENOMEM;
}
-static int find_num_cpus(void)
+/**
+ * find_num_cpus()- Get the CPU online count
+ * @phba: ptr to priv structure
+ *
+ * CPU count is used for creating EQ.
+ **/
+static void find_num_cpus(struct beiscsi_hba *phba)
{
int num_cpus = 0;
num_cpus = num_online_cpus();
- if (num_cpus >= MAX_CPUS)
- num_cpus = MAX_CPUS - 1;
- return num_cpus;
+ switch (phba->generation) {
+ case BE_GEN2:
+ case BE_GEN3:
+ phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
+ BEISCSI_MAX_NUM_CPUS : num_cpus;
+ break;
+ case BE_GEN4:
+ phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
+ OC_SKH_MAX_NUM_CPUS : num_cpus;
+ break;
+ default:
+ phba->num_cpus = 1;
+ }
}
static int hwi_init_port(struct beiscsi_hba *phba)
@@ -3644,12 +3867,9 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
struct be_cmd_get_session_resp *session_resp;
- struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
unsigned int s_handle;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int ret = -ENOMEM;
/* Get the session handle of the boot target */
@@ -3682,25 +3902,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
" Failed\n");
goto boot_freemem;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : beiscsi_get_session_info Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
+ "BM_%d : beiscsi_get_session_info Failed");
goto boot_freemem;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
session_resp = nonemb_cmd.va ;
memcpy(&phba->boot_sess, &session_resp->session_info,
@@ -3853,6 +4064,11 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
kfree(phba->ep_array);
}
+/**
+ * beiscsi_cleanup_task()- Free driver resources of the task
+ * @task: ptr to the iscsi task
+ *
+ **/
static void beiscsi_cleanup_task(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
@@ -3900,6 +4116,13 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
spin_unlock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = NULL;
}
+ if (io_task->mtask_addr) {
+ pci_unmap_single(phba->pcidev,
+ io_task->mtask_addr,
+ io_task->mtask_data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_addr = 0;
+ }
}
}
}
@@ -3909,8 +4132,6 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params)
{
struct wrb_handle *pwrb_handle;
- struct iscsi_target_context_update_wrb *pwrb = NULL;
- struct be_mem_descriptor *mem_descr;
struct beiscsi_hba *phba = beiscsi_conn->phba;
struct iscsi_task *task = beiscsi_conn->task;
struct iscsi_session *session = task->conn->session;
@@ -3927,67 +4148,16 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start));
- pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_burst_length, pwrb, params->dw[offsetof
- (struct amap_beiscsi_offload_params,
- max_burst_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_send_data_segment_length, pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- max_send_data_segment_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- first_burst_length,
- pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- first_burst_length) / 32]);
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- erl) / 32] & OFFLD_PARAMS_ERL));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
- pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- exp_statsn) / 32] + 1));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
- 0x7);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
- pwrb, pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
- pwrb, pwrb_handle->nxt_wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- session_state, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
- pwrb, 1);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
- 0);
- mem_descr = phba->init_mem;
- mem_descr += ISCSI_MEM_GLOBAL_HEADER;
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_hi, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_hi);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_lo, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+ /* Check for the adapter family */
+ if (chip_skh_r(phba->pcidev))
+ beiscsi_offload_cxn_v2(params, pwrb_handle);
+ else
+ beiscsi_offload_cxn_v0(params, pwrb_handle,
+ phba->init_mem);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
+ be_dws_le_to_cpu(pwrb_handle->pwrb,
+ sizeof(struct iscsi_target_context_update_wrb));
doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
@@ -4044,13 +4214,25 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->io_sgl_lock);
io_task->psgl_handle = alloc_io_sgl_handle(phba);
spin_unlock(&phba->io_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle = alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
+ }
} else {
io_task->scsi_cmnd = NULL;
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -4059,8 +4241,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->psgl_handle = (struct sgl_handle *)
alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
beiscsi_conn->login_in_progress = 1;
beiscsi_conn->plogin_sgl_handle =
@@ -4069,8 +4259,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
- goto free_io_hndls;
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_mgmt_hndls;
+ }
beiscsi_conn->plogin_wrb_handle =
io_task->pwrb_handle;
@@ -4085,14 +4283,28 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle =
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
+ }
}
}
@@ -4124,11 +4336,64 @@ free_hndls:
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
io_task->cmd_bhs = NULL;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of SGL_ICD Failed\n");
return -ENOMEM;
}
+int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
+ }
+
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
+ type, pwrb);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) <<
+ DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ return 0;
+}
static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
@@ -4156,6 +4421,9 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
}
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
+ type, pwrb);
+
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
cpu_to_be16(*(unsigned short *)
&io_task->cmd_bhs->iscsi_hdr.lun));
@@ -4191,55 +4459,75 @@ static int beiscsi_mtask(struct iscsi_task *task)
struct iscsi_wrb *pwrb = NULL;
unsigned int doorbell = 0;
unsigned int cid;
+ unsigned int pwrb_typeoffset = 0;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
- be32_to_cpu(task->cmdsn));
- AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
- io_task->pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
- io_task->psgl_handle->sgl_index);
+
+ if (chip_skh_r(phba->pcidev)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+ }
+
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 1);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 1);
} else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_RD_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 0);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 0);
}
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_TMF_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- HWH_TYPE_LOGOUT);
+ ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
@@ -4251,11 +4539,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
return -EINVAL;
}
- AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
- task->data_count);
- AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
- io_task->pwrb_handle->nxt_wrb_index);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+ /* Set the task type */
+ io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
+ AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
+ AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
doorbell |= cid & DB_WRB_POST_CID_MASK;
doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4269,10 +4556,13 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct beiscsi_hba *phba = NULL;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
+ phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+
if (!sc)
return beiscsi_mtask(task);
@@ -4295,7 +4585,7 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
else
writedir = 0;
- return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
@@ -4326,20 +4616,24 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
"beiscsi_bsg_request\n");
- return -EIO;
+ return -ENOMEM;
}
tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
&nonemb_cmd);
if (!tag) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed\n");
+ "BM_%d : MBX Tag Allocation Failed\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
+
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
free_mcc_tag(&phba->ctrl, tag);
@@ -4356,11 +4650,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed"
+ "BM_%d : MBX Cmd Failed"
" status = %d extd_status = %d\n",
status, extd_status);
return -EIO;
+ } else {
+ rc = 0;
}
break;
@@ -4380,14 +4676,18 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
beiscsi_log_enable_init(phba, beiscsi_log_enable);
}
+/*
+ * beiscsi_quiesce()- Cleanup Driver resources
+ * @phba: Instance Priv structure
+ *
+ * Free the OS and HW resources held by the driver
+ **/
static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
unsigned int i, msix_vec;
- u8 *real_offset = 0;
- u32 value = 0;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -4411,19 +4711,14 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
beiscsi_unmap_pci_function(phba);
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
+
+ cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
static void beiscsi_remove(struct pci_dev *pcidev)
@@ -4476,16 +4771,33 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
return;
}
-static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
- const struct pci_device_id *id)
+/*
+ * beiscsi_hw_health_check()- Check adapter health
+ * @work: work item to check HW health
+ *
+ * Check if adapter in an unrecoverable state or not.
+ **/
+static void
+beiscsi_hw_health_check(struct work_struct *work)
+{
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba,
+ beiscsi_hw_check_task.work);
+
+ beiscsi_ue_detect(phba);
+
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+}
+
+static int beiscsi_dev_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
{
struct beiscsi_hba *phba = NULL;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret, num_cpus, i;
- u8 *real_offset = 0;
- u32 value = 0;
+ int ret, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -4504,25 +4816,33 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
+ phba->fw_timeout = false;
+
+
switch (pcidev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
case OC_DEVICE_ID2:
phba->generation = BE_GEN2;
+ phba->iotask_fn = beiscsi_iotask;
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
phba->generation = BE_GEN3;
+ phba->iotask_fn = beiscsi_iotask;
break;
+ case OC_SKH_ID1:
+ phba->generation = BE_GEN4;
+ phba->iotask_fn = beiscsi_iotask_v2;
default:
phba->generation = 0;
}
if (enable_msix)
- num_cpus = find_num_cpus();
+ find_num_cpus(phba);
else
- num_cpus = 1;
- phba->num_cpus = num_cpus;
+ phba->num_cpus = 1;
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : num_cpus = %d\n",
phba->num_cpus);
@@ -4540,31 +4860,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto hba_free;
}
- if (!num_hba) {
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
- if (value & 0x00010000) {
- gcrashmode++;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Loading Driver in crashdump mode\n");
- ret = beiscsi_cmd_reset_function(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed. Aborting Crashdump\n");
- goto hba_free;
- }
- ret = be_chk_reset_complete(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset."
- "Aborting Crashdump\n");
- goto hba_free;
- }
- } else {
- value |= 0x00010000;
- writel(value, (void *)real_offset);
- num_hba++;
- }
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed. Aborting Crashdump\n");
+ goto hba_free;
+ }
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset."
+ "Aborting Crashdump\n");
+ goto hba_free;
}
spin_lock_init(&phba->io_sgl_lock);
@@ -4596,7 +4903,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
+ snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
phba->shost->host_no);
phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
@@ -4606,10 +4913,12 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_twq;
}
- INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
+ INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
+ beiscsi_hw_health_check);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
+
if (blk_iopoll_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
@@ -4617,7 +4926,25 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
be_iopoll);
blk_iopoll_enable(&pbe_eq->iopoll);
}
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ } else {
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ } else {
+ pbe_eq = &phwi_context->be_eq[0];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
}
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4637,6 +4964,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
"iSCSI boot info.\n");
beiscsi_create_def_ifaces(phba);
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
@@ -4652,15 +4982,6 @@ free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
free_port:
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
-
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
-
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b8912263ef4e..5946577d79d6 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -36,12 +36,13 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "4.4.58.0"
+#define BUILD_STR "10.0.272.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
+#define ELX_VENDOR_ID 0x10DF
/* DEVICE ID's for BE2 */
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
@@ -51,6 +52,9 @@
#define BE_DEVICE_ID2 0x222
#define OC_DEVICE_ID3 0x712
+/* DEVICE ID for SKH */
+#define OC_SKH_ID1 0x722
+
#define BE2_IO_DEPTH 1024
#define BE2_MAX_SESSIONS 256
#define BE2_CMDS_PER_CXN 128
@@ -60,7 +64,11 @@
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
-#define MAX_CPUS 31
+#define MAX_CPUS 64
+#define BEISCSI_MAX_NUM_CPUS 7
+#define OC_SKH_MAX_NUM_CPUS 63
+
+
#define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
@@ -257,6 +265,7 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
+#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1)
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -270,12 +279,11 @@ struct beiscsi_hba {
struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
- unsigned int state;
unsigned short asic_revision;
unsigned int num_cpus;
unsigned int nxt_cqid;
- struct msix_entry msix_entries[MAX_CPUS + 1];
- char *msi_name[MAX_CPUS + 1];
+ struct msix_entry msix_entries[MAX_CPUS];
+ char *msi_name[MAX_CPUS];
bool msix_enabled;
struct be_mem_descriptor *init_mem;
@@ -325,12 +333,14 @@ struct beiscsi_hba {
spinlock_t cid_lock;
} fw_config;
+ unsigned int state;
+ bool fw_timeout;
+ bool ue_detected;
+ struct delayed_work beiscsi_hw_check_task;
+
u8 mac_address[ETH_ALEN];
- unsigned short todo_cq;
- unsigned short todo_mcc_cq;
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
- struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
unsigned int interface_handle;
@@ -338,7 +348,10 @@ struct beiscsi_hba {
struct invalidate_command_table inv_tbl[128];
unsigned int attr_log_enable;
-
+ int (*iotask_fn)(struct iscsi_task *,
+ struct scatterlist *sg,
+ uint32_t num_sg, uint32_t xferlen,
+ uint32_t writedir);
};
struct beiscsi_session {
@@ -410,6 +423,9 @@ struct beiscsi_io_task {
struct be_cmd_bhs *cmd_bhs;
struct be_bus_address bhs_pa;
unsigned short bhs_len;
+ dma_addr_t mtask_addr;
+ uint32_t mtask_data_count;
+ uint8_t wrb_type;
};
struct be_nonio_bhs {
@@ -457,6 +473,9 @@ struct beiscsi_offload_params {
#define OFFLD_PARAMS_HDE 0x00000008
#define OFFLD_PARAMS_IR2T 0x00000010
#define OFFLD_PARAMS_IMD 0x00000020
+#define OFFLD_PARAMS_DATA_SEQ_INORDER 0x00000040
+#define OFFLD_PARAMS_PDU_SEQ_INORDER 0x00000080
+#define OFFLD_PARAMS_MAX_R2T 0x00FFFF00
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -471,7 +490,10 @@ struct amap_beiscsi_offload_params {
u8 hde[1];
u8 ir2t[1];
u8 imd[1];
- u8 pad[26];
+ u8 data_seq_inorder[1];
+ u8 pdu_seq_inorder[1];
+ u8 max_r2t[16];
+ u8 pad[8];
u8 exp_statsn[32];
};
@@ -569,6 +591,20 @@ struct amap_i_t_dpdu_cqe {
u8 valid;
} __packed;
+struct amap_i_t_dpdu_cqe_v2 {
+ u8 db_addr_hi[32]; /* DWORD 0 */
+ u8 db_addr_lo[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 num_cons; /* DWORD 2*/
+ u8 rsvd0[8]; /* DWORD 2 */
+ u8 dpl[17]; /* DWORD 2 */
+ u8 index[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd1; /* DWORD 3 */
+ u8 final; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
+
#define CQE_VALID_MASK 0x80000000
#define CQE_CODE_MASK 0x0000003F
#define CQE_CID_MASK 0x0000FFC0
@@ -617,6 +653,11 @@ struct iscsi_wrb {
} __packed;
#define WRB_TYPE_MASK 0xF0000000
+#define SKH_WRB_TYPE_OFFSET 27
+#define BE_WRB_TYPE_OFFSET 28
+
+#define ADAPTER_SET_WRB_TYPE(pwrb, wrb_type, type_offset) \
+ (pwrb->dw[0] |= (wrb_type << type_offset))
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -663,12 +704,57 @@ struct amap_iscsi_wrb {
} __packed;
+struct amap_iscsi_wrb_v2 {
+ u8 r2t_exp_dtl[25]; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0*/
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 lun[16]; /* DWORD 1 */
+ u8 sgl_idx[16]; /* DWORD 2 */
+ u8 ref_sgl_icd_idx[16]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cq_id[16]; /* DWORD 6 */
+ u8 rsvd1[16]; /* DWORD 6 */
+ u8 cmdsn_itt[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[24]; /* DWORD 10 */
+ u8 rsvd2[7]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 rsvd3[7]; /* DWORD 11 */
+ u8 diff_enbl; /* DWORD 11 */
+ u8 u_run; /* DWORD 11 */
+ u8 o_run; /* DWORD 11 */
+ u8 invalid; /* DWORD 11 */
+ u8 dsp; /* DWORD 11 */
+ u8 dmsg; /* DWORD 11 */
+ u8 rsvd4; /* DWORD 11 */
+ u8 lt; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[24]; /* DWORD 14 */
+ u8 rsvd5[7]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 rsvd6[15]; /* DWORD 15 */
+} __packed;
+
+
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
void beiscsi_process_all_cqs(struct work_struct *work);
+static inline bool beiscsi_error(struct beiscsi_hba *phba)
+{
+ return phba->ue_detected || phba->fw_timeout;
+}
+
struct pdu_nop_out {
u32 dw[12];
};
@@ -728,6 +814,7 @@ struct iscsi_target_context_update_wrb {
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
+#define BE_TGT_CTX_UPDT_CMD 0x07
struct amap_iscsi_target_context_update_wrb {
u8 lun[14]; /* DWORD 0 */
u8 lt; /* DWORD 0 */
@@ -773,6 +860,47 @@ struct amap_iscsi_target_context_update_wrb {
} __packed;
+#define BEISCSI_MAX_RECV_DATASEG_LEN (64 * 1024)
+#define BEISCSI_MAX_CXNS 1
+struct amap_iscsi_target_context_update_wrb_v2 {
+ u8 max_burst_length[24]; /* DWORD 0 */
+ u8 rsvd0[3]; /* DWORD 0 */
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 rsvd1[16]; /* DWORD 1 */
+ u8 max_send_data_segment_length[24]; /* DWORD 2 */
+ u8 rsvd2[8]; /* DWORD 2 */
+ u8 first_burst_length[24]; /* DWORD 3 */
+ u8 rsvd3[8]; /* DOWRD 3 */
+ u8 max_r2t[16]; /* DWORD 4 */
+ u8 rsvd4[10]; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 rsvd5[32]; /* DWORD 6 */
+ u8 rsvd6[32]; /* DWORD 7 */
+ u8 max_recv_dataseg_len[24]; /* DWORD 8 */
+ u8 rsvd7[8]; /* DWORD 8 */
+ u8 rsvd8[32]; /* DWORD 9 */
+ u8 rsvd9[32]; /* DWORD 10 */
+ u8 max_cxns[16]; /* DWORD 11 */
+ u8 rsvd10[11]; /* DWORD 11*/
+ u8 invld; /* DWORD 11 */
+ u8 rsvd11;/* DWORD 11*/
+ u8 dmsg; /* DWORD 11 */
+ u8 data_seq_inorder; /* DWORD 11 */
+ u8 pdu_seq_inorder; /* DWORD 11 */
+ u8 rsvd12[32]; /*DWORD 12 */
+ u8 rsvd13[32]; /* DWORD 13 */
+ u8 rsvd14[32]; /* DWORD 14 */
+ u8 rsvd15[32]; /* DWORD 15 */
+} __packed;
+
+
struct be_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
@@ -837,7 +965,7 @@ struct hwi_context_memory {
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
struct be_eq_obj be_eq[MAX_CPUS];
- struct be_queue_info be_cq[MAX_CPUS];
+ struct be_queue_info be_cq[MAX_CPUS - 1];
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aab5dd359e2c..a6c2fe4b4d65 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -22,6 +22,138 @@
#include <scsi/scsi_bsg_iscsi.h>
#include "be_mgmt.h"
#include "be_iscsi.h"
+#include "be_main.h"
+
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+/*
+ * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+void beiscsi_ue_detect(struct beiscsi_hba *phba)
+{
+ uint32_t ue_hi = 0, ue_lo = 0;
+ uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+ uint8_t i = 0;
+
+ if (phba->ue_detected)
+ return;
+
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_LOW,
+ &ue_mask_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_HIGH,
+ &ue_hi);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_HI,
+ &ue_mask_hi);
+
+ ue_lo = (ue_lo & ~ue_mask_lo);
+ ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+ if (ue_lo || ue_hi) {
+ phba->ue_detected = true;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : Error detected on the adapter\n");
+ }
+
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_LOW %s bit set\n",
+ desc_ue_status_low[i]);
+ }
+ }
+
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_HIGH %s bit set\n",
+ desc_ue_status_hi[i]);
+ }
+ }
+}
/**
* mgmt_reopen_session()- Reopen a session based on reopen_type
@@ -575,13 +707,20 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
return status;
}
+/*
+ * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
+ * @phba: Driver priv structure
+ * @nonemb_cmd: Address of the MBX command issued
+ * @resp_buf: Buffer to copy the MBX cmd response
+ * @resp_buf_len: respone lenght to be copied
+ *
+ **/
static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *nonemb_cmd, void *resp_buf,
int resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- unsigned short status, extd_status;
struct be_sge *sge;
unsigned int tag;
int rc = 0;
@@ -599,31 +738,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
- "extd_status = %d\n", status, extd_status);
+ "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
rc = -EIO;
- goto free_tag;
+ goto free_cmd;
}
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-free_tag:
- free_mcc_tag(&phba->ctrl, tag);
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
@@ -1009,10 +1142,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
{
struct be_cmd_get_boot_target_resp *boot_resp;
struct be_mcc_wrb *wrb;
- unsigned int tag, wrb_num;
+ unsigned int tag;
uint8_t boot_retry = 3;
- unsigned short status, extd_status;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int rc;
do {
/* Get the Boot Target Session Handle and Count*/
@@ -1022,24 +1154,16 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BG_%d : Getting Boot Target Info Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_get_boot_target Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
+ "BG_%d : MBX CMD get_boot_target Failed\n");
return -EBUSY;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
boot_resp = embedded_payload(wrb);
/* Check if the there are any Boot targets configured */
@@ -1064,24 +1188,15 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_reopen_session Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_reopen_session Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EBUSY;
+ "BG_%d : mgmt_reopen_session Failed");
+ return rc;
}
- free_mcc_tag(&phba->ctrl, tag);
-
} while (--boot_retry);
/* Couldn't log into the boot target */
@@ -1106,8 +1221,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
int mgmt_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
+ struct be_mcc_wrb *wrb = NULL;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
@@ -1115,24 +1231,208 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
"BG_%d : VLAN Setting Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
- "BS_%d : status : %d extd_status : %d\n",
- status, extd_status);
+ "BS_%d : VLAN MBX Cmd Failed\n");
+ return rc;
+ }
+ return rc;
+}
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+/**
+ * beiscsi_drvr_ver_disp()- Display the driver Name and Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
+}
+
+/**
+ * beiscsi_adap_family_disp()- Display adapter family.
+ * @dev: ptr to device to get priv structure
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ uint16_t dev_id = 0;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ dev_id = phba->pcidev->device;
+ switch (dev_id) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
+ break;
+ case OC_SKH_ID1:
+ return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE,
+ "Unkown Adapter Family: 0x%x\n", dev_id);
+ break;
}
+}
- free_mcc_tag(&phba->ctrl, tag);
- return 0;
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ first_burst_length,
+ pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ session_state, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
+ pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
+ pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
+ 0);
+
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_hi, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_lo, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+}
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ first_burst_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_cxns, pwrb, BEISCSI_MAX_CXNS);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ data_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ data_seq_inorder) / 32] &
+ OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ pdu_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder) / 32] &
+ OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_r2t) / 32] &
+ OFFLD_PARAMS_MAX_R2T) >> 8);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index c50cef6fec0d..2e4968add799 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -30,6 +30,12 @@
#define IP_V6_LEN 16
#define IP_V4_LEN 4
+/* UE Status and Mask register */
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_MASK_LOW 0xA8
+#define PCICFG_UE_STATUS_MASK_HI 0xAC
+
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
@@ -301,4 +307,19 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
+ssize_t beiscsi_drvr_ver_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_adap_family_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr);
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle);
+void beiscsi_ue_detect(struct beiscsi_hba *phba);
+
#endif
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 895b0e516e07..e6bf12675db8 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1739,7 +1739,7 @@ static struct pci_driver bfad_pci_driver = {
.name = BFAD_DRIVER_NAME,
.id_table = bfad_id_table,
.probe = bfad_pci_probe,
- .remove = __devexit_p(bfad_pci_remove),
+ .remove = bfad_pci_remove,
.err_handler = &bfad_err_handler,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e0558656c646..70ecd953a579 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -25,7 +25,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_RELDATE "Jun 04, 2012"
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 3f9e7061258e..b44d04e41b0d 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -800,7 +800,7 @@ extern struct device_attribute *bnx2i_dev_attributes[];
/*
* Function Prototypes
*/
-extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev);
extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index b17637aab9a7..50fef6963a81 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -21,7 +21,7 @@ static u32 adapter_count;
#define DRV_MODULE_VERSION "2.7.2.2"
#define DRV_MODULE_RELDATE "Apr 25, 2012"
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -79,42 +79,33 @@ static struct notifier_block bnx2i_cpu_notifier = {
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
+ * @cnic: Corresponding cnic device
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
* access MBOX regs using *bin* mode
*/
-void bnx2i_identify_device(struct bnx2i_hba *hba)
+void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev)
{
hba->cnic_dev_type = 0;
- if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
- set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
- set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
- set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
- hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_VF)
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5706S) {
+ set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5708S) {
+ set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5709S) {
+ set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ }
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
- else
+ } else {
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
hba->pci_did);
+ }
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 3b34c13e2f02..0056e47bd56e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -808,7 +808,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
- bnx2i_identify_device(hba);
+ bnx2i_identify_device(hba, cnic);
bnx2i_setup_host_queue_size(hba, shost);
hba->reg_base = pci_resource_start(hba->pcidev, 0);
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index d40ea2f5be10..1e3f96adf9da 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -34,7 +34,7 @@ static struct scsi_host_template bvme6000_scsi_driver_template = {
static struct platform_device *bvme6000_scsi_device;
-static __devinit int
+static int
bvme6000_probe(struct platform_device *dev)
{
struct Scsi_Host *host;
@@ -88,7 +88,7 @@ bvme6000_probe(struct platform_device *dev)
return -ENODEV;
}
-static __devexit int
+static int
bvme6000_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
@@ -108,7 +108,7 @@ static struct platform_driver bvme6000_scsi_driver = {
.owner = THIS_MODULE,
},
.probe = bvme6000_probe,
- .remove = __devexit_p(bvme6000_device_remove),
+ .remove = bvme6000_device_remove,
};
static int __init bvme6000_scsi_init(void)
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig
new file mode 100644
index 000000000000..4d03b032aa10
--- /dev/null
+++ b/drivers/scsi/csiostor/Kconfig
@@ -0,0 +1,19 @@
+config SCSI_CHELSIO_FCOE
+ tristate "Chelsio Communications FCoE support"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ select FW_LOADER
+ help
+ This driver supports FCoE Offload functionality over
+ Chelsio T4-based 10Gb Converged Network Adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called csiostor.
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
new file mode 100644
index 000000000000..b581966c88f9
--- /dev/null
+++ b/drivers/scsi/csiostor/Makefile
@@ -0,0 +1,11 @@
+#
+## Chelsio FCoE driver
+#
+##
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
+
+csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
+ csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
new file mode 100644
index 000000000000..065a87ace623
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -0,0 +1,796 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_init.h"
+
+static void
+csio_vport_set_state(struct csio_lnode *ln);
+
+/*
+ * csio_reg_rnode - Register a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_add() to register this remote port with FC transport.
+ * If remote port is Initiator OR Target OR both, change the role appropriately.
+ *
+ */
+void
+csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct csio_service_parms *sp;
+
+ ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
+ ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
+ ids.port_id = rn->nport_id;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
+ rport = rn->rport;
+ CSIO_ASSERT(rport != NULL);
+ goto update_role;
+ }
+
+ rn->rport = fc_remote_port_add(shost, 0, &ids);
+ if (!rn->rport) {
+ csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
+ rn->nport_id);
+ return;
+ }
+
+ ln->num_reg_rnodes++;
+ rport = rn->rport;
+ spin_lock_irq(shost->host_lock);
+ *((struct csio_rnode **)rport->dd_data) = rn;
+ spin_unlock_irq(shost->host_lock);
+
+ sp = &rn->rn_sparm;
+ rport->maxframe_size = ntohs(sp->csp.sp_bb_data);
+ if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
+ rport->supported_classes = FC_COS_CLASS3;
+ else
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+update_role:
+ if (rn->role & CSIO_RNFR_INITIATOR)
+ ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (rn->role & CSIO_RNFR_TARGET)
+ ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(rport, ids.roles);
+
+ rn->scsi_id = rport->scsi_target_id;
+
+ csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
+ rn->nport_id, ids.roles);
+}
+
+/*
+ * csio_unreg_rnode - Unregister a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_delete() to unregister this remote port with FC
+ * transport.
+ *
+ */
+void
+csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct fc_rport *rport = rn->rport;
+
+ rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
+ fc_remote_port_delete(rport);
+ ln->num_reg_rnodes--;
+
+ csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
+}
+
+/*
+ * csio_lnode_async_event - Async events from local port.
+ * @ln: lnode representing local port.
+ *
+ * Async events from local node that FC transport/SCSI ML
+ * should be made aware of (Eg: RSCN).
+ */
+void
+csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
+{
+ switch (fc_evt) {
+ case CSIO_LN_FC_RSCN:
+ /* Get payload of rscn from ln */
+ /* For each RSCN entry */
+ /*
+ * fc_host_post_event(shost,
+ * fc_get_event_number(),
+ * FCH_EVT_RSCN,
+ * rscn_entry);
+ */
+ break;
+ case CSIO_LN_FC_LINKUP:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_LINKDOWN:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_ATTRIB_UPDATE:
+ csio_fchost_attr_init(ln);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * csio_fchost_attr_init - Initialize FC transport attributes
+ * @ln: Lnode.
+ *
+ */
+void
+csio_fchost_attr_init(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
+ fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
+
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(shost) =
+ (csio_lnode_to_hw(ln))->fres_info.max_vnps;
+ fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_1GBIT;
+
+ fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data);
+ memset(fc_host_supported_fc4s(shost), 0,
+ sizeof(fc_host_supported_fc4s(shost)));
+ fc_host_supported_fc4s(shost)[7] = 1;
+
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+ fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * csio_get_host_port_id - sysfs entries for nport_id is
+ * populated/cached from this function
+ */
+static void
+csio_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ fc_host_port_id(shost) = ln->nport_id;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_type - Return FC local port type.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ if (csio_is_npiv_ln(ln))
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_state - Return FC local port state.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ char state[16];
+
+ spin_lock_irq(&hw->lock);
+
+ csio_lnode_state_to_str(ln, state);
+ if (!strcmp(state, "READY"))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else if (!strcmp(state, "OFFLINE"))
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return link speed to FC transport.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_speed(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ switch (hw->pport[ln->portid].link_speed) {
+ case FW_PORT_CAP_SPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case FW_PORT_CAP_SPEED_10G:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_fabric_name - Return fabric name
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_rnode *rn = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
+ if (rn)
+ fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
+ else
+ fc_host_fabric_name(shost) = 0;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return FC transport statistics.
+ * @ln: Lnode.
+ *
+ */
+static struct fc_host_statistics *
+csio_get_stats(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct fc_host_statistics *fhs = &ln->fch_stats;
+ struct fw_fcoe_port_stats fcoe_port_stats;
+ uint64_t seconds;
+
+ memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
+ csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
+
+ fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_frames));
+ fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_frames));
+ fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames);
+ fhs->fcp_input_requests += ln->stats.n_input_requests;
+ fhs->fcp_output_requests += ln->stats.n_output_requests;
+ fhs->fcp_control_requests += ln->stats.n_control_requests;
+ fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
+ fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
+ fhs->link_failure_count = ln->stats.n_link_down;
+ /* Reset stats for the device */
+ seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
+ do_div(seconds, 1000);
+ fhs->seconds_since_last_reset = seconds;
+
+ return fhs;
+}
+
+/*
+ * csio_set_rport_loss_tmo - Set the rport dev loss timeout
+ * @rport: fc rport.
+ * @timeout: new value for dev loss tmo.
+ *
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ */
+static void
+csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void
+csio_vport_set_state(struct csio_lnode *ln)
+{
+ struct fc_vport *fc_vport = ln->fc_vport;
+ struct csio_lnode *pln = ln->pln;
+ char state[16];
+
+ /* Set fc vport state based on phyiscal lnode */
+ csio_lnode_state_to_str(pln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+
+ if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
+ fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
+ return;
+ }
+
+ /* Set fc vport state based on virtual lnode */
+ csio_lnode_state_to_str(ln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+static int
+csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to alloc vport */
+ /* Allocate Mbox request */
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+ ln->fcf_flowid = pln->fcf_flowid;
+ ln->portid = pln->portid;
+
+ csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ pln->fcf_flowid, pln->vnp_flowid, 0,
+ csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ /* FW is expected to complete vnp cmd in immediate mode
+ * without much delay.
+ * Otherwise, there will be increase in IO latency since HW
+ * lock is held till completion of vnp mbox cmd.
+ */
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
+ ntohl(rsp->gen_wwn_to_vnpi));
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
+ csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
+ ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
+ ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
+ ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
+ csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
+ ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
+ ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
+ ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to free vport */
+ /* Allocate Mbox request */
+
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+
+ csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid, ln->vnp_flowid,
+ NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ }
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct csio_lnode *pln = shost_priv(shost);
+ struct csio_lnode *ln = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(pln);
+ uint8_t wwn[8];
+ int ret = -1;
+
+ ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
+ if (!ln)
+ goto error;
+
+ if (fc_vport->node_name != 0) {
+ u64_to_wwn(fc_vport->node_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwnn\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwnn(ln), wwn, 8);
+ }
+
+ if (fc_vport->port_name != 0) {
+ u64_to_wwn(fc_vport->port_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwpn\n");
+ goto error;
+ }
+
+ if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. wwpn already exists\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwpn(ln), wwn, 8);
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+
+ if (csio_fcoe_alloc_vnp(hw, ln))
+ goto error;
+
+ *(struct csio_lnode **)fc_vport->dd_data = ln;
+ ln->fc_vport = fc_vport;
+ if (!fc_vport->node_name)
+ fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
+ if (!fc_vport->port_name)
+ fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
+ csio_fchost_attr_init(ln);
+ return 0;
+error:
+ if (ln)
+ csio_shost_exit(ln);
+
+ return ret;
+}
+
+static int
+csio_vport_delete(struct fc_vport *fc_vport)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ int rmv;
+
+ spin_lock_irq(&hw->lock);
+ rmv = csio_is_hw_removing(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (rmv) {
+ csio_shost_exit(ln);
+ return 0;
+ }
+
+ /* Quiesce ios and send remove event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_close(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ if (fc_vport->vport_state != FC_VPORT_DISABLED)
+ csio_fcoe_free_vnp(hw, ln);
+
+ csio_shost_exit(ln);
+ return 0;
+}
+
+static int
+csio_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* disable vport */
+ if (disable) {
+ /* Quiesce ios and send stop event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_stop(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ csio_fcoe_free_vnp(hw, ln);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ csio_ln_err(ln, "vport disabled\n");
+ return 0;
+ } else {
+ /* enable vport */
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ if (csio_fcoe_alloc_vnp(hw, ln)) {
+ csio_ln_err(ln, "vport enabled failed.\n");
+ return -1;
+ }
+ csio_ln_err(ln, "vport enabled\n");
+ return 0;
+ }
+}
+
+static void
+csio_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct csio_rnode *rn;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rn = *((struct csio_rnode **)rport->dd_data);
+ ln = csio_rnode_to_lnode(rn);
+ hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+
+ /* return if driver is being removed or same rnode comes back online */
+ if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
+ goto out;
+
+ csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
+ rn, rn->nport_id, csio_rn_flowid(rn));
+
+ CSIO_INC_STATS(ln, n_dev_loss_tmo);
+
+ /*
+ * enqueue devloss event to event worker thread to serialize all
+ * rnode events.
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out;
+ }
+
+ if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+
+out:
+ spin_unlock_irq(&hw->lock);
+}
+
+/* FC transport functions template - Physical port */
+struct fc_function_template csio_fc_transport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .show_host_active_fc4s = 1,
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+ .dd_fcvport_size = sizeof(struct csio_lnode *),
+
+ .vport_create = csio_vport_create,
+ .vport_disable = csio_vport_disable,
+ .vport_delete = csio_vport_delete,
+};
+
+/* FC transport functions template - Virtual port */
+struct fc_function_template csio_fc_transport_vport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+
+};
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
new file mode 100644
index 000000000000..c38017b4af98
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_DEFS_H__
+#define __CSIO_DEFS_H__
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+
+#define CSIO_INVALID_IDX 0xFFFFFFFF
+#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
+#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
+#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
+#define CSIO_DID_MASK 0xFFFFFF
+#define CSIO_WORD_TO_BYTE 4
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline int
+csio_list_deleted(struct list_head *list)
+{
+ return ((list->next == list) && (list->prev == list));
+}
+
+#define csio_list_next(elem) (((struct list_head *)(elem))->next)
+#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
+
+/* State machine */
+typedef void (*csio_sm_state_t)(void *, uint32_t);
+
+struct csio_sm {
+ struct list_head sm_list;
+ csio_sm_state_t sm_state;
+};
+
+static inline void
+csio_set_state(void *smp, void *state)
+{
+ ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+}
+
+static inline void
+csio_init_state(struct csio_sm *smp, void *state)
+{
+ csio_set_state(smp, state);
+}
+
+static inline void
+csio_post_event(void *smp, uint32_t evt)
+{
+ ((struct csio_sm *)smp)->sm_state(smp, evt);
+}
+
+static inline csio_sm_state_t
+csio_get_state(void *smp)
+{
+ return ((struct csio_sm *)smp)->sm_state;
+}
+
+static inline bool
+csio_match_state(void *smp, void *state)
+{
+ return (csio_get_state(smp) == (csio_sm_state_t)state);
+}
+
+#define CSIO_ASSERT(cond) BUG_ON(!(cond))
+
+#ifdef __CSIO_DEBUG__
+#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
+#else
+#define CSIO_DB_ASSERT(__c)
+#endif
+
+#endif /* ifndef __CSIO_DEFS_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
new file mode 100644
index 000000000000..8ecdb94a59f4
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -0,0 +1,4395 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_force_master;
+int csio_dbg_level = 0xFEFF;
+unsigned int csio_port_mask = 0xf;
+
+/* Default FW event queue entries. */
+static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
+
+/* Default MSI param level */
+int csio_msi = 2;
+
+/* FCoE function instances */
+static int dev_num;
+
+/* FCoE Adapter types & its description */
+static const struct csio_adap_desc csio_fcoe_adapters[] = {
+ {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
+ {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
+ {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
+ {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
+ {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
+ {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
+ {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
+ {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
+ {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
+ {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
+ {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
+ {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
+ {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
+ {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
+ {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
+ {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+};
+
+static void csio_mgmtm_cleanup(struct csio_mgmtm *);
+static void csio_hw_mbm_cleanup(struct csio_hw *);
+
+/* State machine forward declarations */
+static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
+
+static void csio_hw_initialize(struct csio_hw *hw);
+static void csio_evtq_stop(struct csio_hw *hw);
+static void csio_evtq_start(struct csio_hw *hw);
+
+int csio_is_hw_ready(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_ready);
+}
+
+int csio_is_hw_removing(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_removing);
+}
+
+
+/*
+ * csio_hw_wait_op_done_val - wait until an operation is completed
+ * @hw: the HW module
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+static int
+csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
+ int polarity, int attempts, int delay, uint32_t *valp)
+{
+ uint32_t val;
+ while (1) {
+ val = csio_rd_reg32(hw, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+void
+csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
+ uint32_t value)
+{
+ uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
+
+ csio_wr_reg32(hw, val | value, reg);
+ /* Flush */
+ csio_rd_reg32(hw, reg);
+
+}
+
+/*
+ * csio_hw_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ MC_BIST_CMD);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_hw_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
+ EDC_BIST_CMD + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_mem_win_rw - read/write memory through PCIE memory window
+ * @hw: the adapter
+ * @addr: address of first byte requested
+ * @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ * MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ * address @addr.
+ */
+static int
+csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
+{
+ int i;
+
+ /*
+ * Setup offset into PCIE memory window. Address must be a
+ * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
+ * ensure that changes propagate before we attempt to use the new
+ * values.)
+ */
+ csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
+ PCIE_MEM_ACCESS_OFFSET);
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
+
+ /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+ for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
+ if (dir)
+ *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
+ else
+ csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
+ }
+
+ return 0;
+}
+
+/*
+ * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
+ uint32_t *buf, int dir)
+{
+ uint32_t pos, start, end, offset, memoffset;
+ int ret;
+ uint32_t *data;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2
+ */
+ memoffset = (mtype * (5 * 1024 * 1024));
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+ * at a time so we need to round down the start and round up the end.
+ * We'll start copying out of the first line at (addr - start) a word
+ * at a time.
+ */
+ start = addr & ~(MEMWIN0_APERTURE-1);
+ end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+ offset = (addr - start)/sizeof(__be32);
+
+ for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+ /*
+ * If we're writing, copy the data from the caller's memory
+ * buffer
+ */
+ if (!dir) {
+ /*
+ * If we're doing a partial write, then we need to do
+ * a read-modify-write ...
+ */
+ if (offset || len < MEMWIN0_APERTURE) {
+ ret = csio_mem_win_rw(hw, pos, data, 1);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+ }
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ data[offset++] = *buf++;
+ len -= sizeof(__be32);
+ }
+ }
+
+ /*
+ * Transfer a block of memory and bail if there's an error.
+ */
+ ret = csio_mem_win_rw(hw, pos, data, dir);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+
+ /*
+ * If we're reading, copy the data into the caller's memory
+ * buffer.
+ */
+ if (dir)
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ *buf++ = data[offset++];
+ len -= sizeof(__be32);
+ }
+ }
+
+ kfree(data);
+
+ return 0;
+}
+
+static int
+csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
+{
+ return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+}
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 512
+#define VPD_INFO_FLD_HDR_SIZE 3
+
+/*
+ * csio_hw_seeprom_read - read a serial EEPROM location
+ * @hw: hw to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+static int
+csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
+{
+ uint16_t val = 0;
+ int attempts = EEPROM_MAX_RD_POLL;
+ uint32_t base = hw->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
+
+ do {
+ udelay(10);
+ pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+
+ return 0;
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd_hdr {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+};
+
+/*
+ * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
+ * the VPD
+ * @v: Pointer to buffered vpd data structure
+ * @kw: The keyword to search for
+ *
+ * Returns the value of the information field keyword or
+ * -EINVAL otherwise.
+ */
+static int
+csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+ int32_t i;
+ int32_t offset , len;
+ const uint8_t *buf = &v->id_tag;
+ const uint8_t *vpdr_len = &v->vpdr_tag;
+ offset = sizeof(struct t4_vpd_hdr);
+ len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
+
+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
+ return -EINVAL;
+
+ for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
+ if (memcmp(buf + i , kw, 2) == 0) {
+ i += VPD_INFO_FLD_HDR_SIZE;
+ return i;
+ }
+
+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+ }
+
+ return -EINVAL;
+}
+
+static int
+csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
+{
+ *pos = pci_find_capability(pdev, cap);
+ if (*pos)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
+ * @hw: HW module
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int
+csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
+{
+ int i, ret, ec, sn, addr;
+ uint8_t *vpd, csum;
+ const struct t4_vpd_hdr *v;
+ /* To get around compilation warning from strstrip */
+ char *s;
+
+ if (csio_is_valid_vpd(hw))
+ return 0;
+
+ ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
+ &hw->params.pci.vpd_cap_addr);
+ if (ret)
+ return -EINVAL;
+
+ vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
+ if (vpd == NULL)
+ return -ENOMEM;
+
+ /*
+ * Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
+ ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ for (i = 0; i < VPD_LEN; i += 4) {
+ ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
+ if (ret) {
+ kfree(vpd);
+ return ret;
+ }
+ }
+
+ /* Reset the VPD flag! */
+ hw->flags &= (~CSIO_HWF_VPD_VALID);
+
+ v = (const struct t4_vpd_hdr *)vpd;
+
+#define FIND_VPD_KW(var, name) do { \
+ var = csio_hw_get_vpd_keyword_val(v, name); \
+ if (var < 0) { \
+ csio_err(hw, "missing VPD keyword " name "\n"); \
+ kfree(vpd); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
+ kfree(vpd);
+ return -EINVAL;
+ }
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, v->id_data, ID_LEN);
+ s = strstrip(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ s = strstrip(p->ec);
+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ s = strstrip(p->sn);
+
+ csio_valid_vpd_copied(hw);
+
+ kfree(vpd);
+ return 0;
+}
+
+/*
+ * csio_hw_sf1_read - read data from the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
+ int32_t lock, uint32_t *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+ if (!ret)
+ *valp = csio_rd_reg32(hw, SF_DATA);
+ return ret;
+}
+
+/*
+ * csio_hw_sf1_write - write data to the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
+ int32_t lock, uint32_t val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, val, SF_DATA);
+ csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+
+ return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+}
+
+/*
+ * csio_hw_flash_wait_op - wait for a flash operation to complete
+ * @hw: the HW module
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int
+csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
+{
+ int ret;
+ uint32_t status;
+
+ while (1) {
+ ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
+ if (ret != 0)
+ return ret;
+
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/*
+ * csio_hw_read_flash - read words from serial flash
+ * @hw: the HW module
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int
+csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
+ uint32_t *data, int32_t byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_write_flash - write up to a page of data to the serial flash
+ * @hw: the hw
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int
+csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
+ uint32_t n, const uint8_t *data)
+{
+ int ret = -EINVAL;
+ uint32_t buf[64];
+ uint32_t i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto unlock;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
+ if (ret != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = csio_hw_flash_wait_op(hw, 8, 1);
+ if (ret)
+ goto unlock;
+
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
+ csio_err(hw,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EINVAL;
+ }
+
+ return 0;
+
+unlock:
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return ret;
+}
+
+/*
+ * csio_hw_flash_erase_sectors - erase a range of flash sectors
+ * @hw: the HW module
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int
+csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_sf1_write(hw, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8));
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_flash_wait_op(hw, 14, 500);
+ if (ret != 0)
+ goto out;
+
+ start++;
+ }
+out:
+ if (ret)
+ csio_err(hw, "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return 0;
+}
+
+/*
+ * csio_hw_flash_cfg_addr - return the address of the flash
+ * configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_hw_flash_cfg_addr(struct csio_hw *hw)
+{
+ if (hw->params.sf_size == 0x100000)
+ return FPGA_FLASH_CFG_OFFSET;
+ else
+ return FLASH_CFG_OFFSET;
+}
+
+static void
+csio_hw_print_fw_version(struct csio_hw *hw, char *str)
+{
+ csio_info(hw, "%s: %u.%u.%u.%u\n", str,
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+}
+
+/*
+ * csio_hw_get_fw_version - read the firmware version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int
+csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
+{
+ return csio_hw_read_flash(hw, FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_get_tp_version - read the TP microcode version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int
+csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
+{
+ return csio_hw_read_flash(hw, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_check_fw_version - check if the FW is compatible with
+ * this driver
+ * @hw: HW module
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major/minor version mismatch/minor.
+ */
+static int
+csio_hw_check_fw_version(struct csio_hw *hw)
+{
+ int ret, major, minor, micro;
+
+ ret = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (!ret)
+ ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
+ minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
+ micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ csio_err(hw, "card FW has major version %u, driver wants %u\n",
+ major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch */
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_fw_dload - download firmware.
+ * @hw: HW module
+ * @fw_data: firmware image to write.
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+static int
+csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
+{
+ uint32_t csum;
+ int32_t addr;
+ int ret;
+ uint32_t i;
+ uint8_t first_page[SF_PAGE_SIZE];
+ const __be32 *p = (const __be32 *)fw_data;
+ struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
+ uint32_t sf_sec_size;
+
+ if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
+ csio_err(hw, "Serial Flash data invalid\n");
+ return -EINVAL;
+ }
+
+ if (!size) {
+ csio_err(hw, "FW image has no data\n");
+ return -EINVAL;
+ }
+
+ if (size & 511) {
+ csio_err(hw, "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+
+ if (ntohs(hdr->len512) * 512 != size) {
+ csio_err(hw, "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+
+ if (size > FW_MAX_SIZE) {
+ csio_err(hw, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+
+ csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
+ FW_START_SEC, FW_START_SEC + i - 1);
+
+ ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
+ FW_START_SEC + i - 1);
+ if (ret) {
+ csio_err(hw, "Flash Erase failed\n");
+ goto out;
+ }
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
+ FW_IMG_START, FW_IMG_START + size);
+
+ addr = FW_IMG_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = csio_hw_write_flash(hw,
+ FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver),
+ (const uint8_t *)&hdr->fw_ver);
+
+out:
+ if (ret)
+ csio_err(hw, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+static int
+csio_hw_get_flash_params(struct csio_hw *hw)
+{
+ int ret;
+ uint32_t info = 0;
+
+ ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret != 0)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ hw->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ hw->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ hw->params.sf_size = 1 << info;
+
+ return 0;
+}
+
+static void
+csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
+{
+ uint16_t val;
+ uint32_t pcie_cap;
+
+ if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range ;
+ pci_write_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
+
+/*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static uint32_t
+csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
+{
+ u32 val = 0;
+ struct csio_mb *mbp;
+ int rv;
+ struct fw_ldst_cmd *ldst_cmd;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ pci_read_config_dword(hw->pdev, reg, &val);
+ return val;
+ }
+
+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
+
+ rv = csio_mb_issue(hw, mbp);
+
+ /*
+ * If the LDST Command suucceeded, exctract the returned register
+ * value. Otherwise read it directly ourself.
+ */
+ if (rv == 0) {
+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ val = ntohl(ldst_cmd->u.pcie.data[0]);
+ } else
+ pci_read_config_dword(hw->pdev, reg, &val);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return val;
+} /* csio_read_pcie_cfg4 */
+
+static int
+csio_hw_set_mem_win(struct csio_hw *hw)
+{
+ u32 bar0;
+
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+ bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
+ csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
+ csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ return 0;
+} /* csio_hw_set_mem_win */
+
+
+
+/*****************************************************************************/
+/* HW State machine assists */
+/*****************************************************************************/
+
+static int
+csio_hw_dev_ready(struct csio_hw *hw)
+{
+ uint32_t reg;
+ int cnt = 6;
+
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
+ mdelay(100);
+
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
+ (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+ csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
+ return -EIO;
+ }
+
+ hw->pfn = SOURCEPF_GET(reg);
+
+ return 0;
+}
+
+/*
+ * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
+ * @hw: HW module
+ * @state: Device state
+ *
+ * FW_HELLO_CMD has to be polled for completion.
+ */
+static int
+csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
+{
+ struct csio_mb *mbp;
+ int rv = 0;
+ enum csio_dev_master master;
+ enum fw_retval retval;
+ uint8_t mpfn;
+ char state_str[16];
+ int retries = FW_CMD_HELLO_RETRIES;
+
+ memset(state_str, 0, sizeof(state_str));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ rv = -ENOMEM;
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto out;
+ }
+
+ master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
+
+retry:
+ csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
+ hw->pfn, master, NULL);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv) {
+ csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
+ goto out_free_mb;
+ }
+
+ csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
+ rv = -EINVAL;
+ goto out_free_mb;
+ }
+
+ /* Firmware has designated us to be master */
+ if (hw->pfn == mpfn) {
+ hw->flags |= CSIO_HWF_MASTER;
+ } else if (*state == CSIO_DEV_STATE_UNINIT) {
+ /*
+ * If we're not the Master PF then we need to wait around for
+ * the Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable
+ * PF and there is no current Master PF; a Master PF may show up
+ * momentarily and we wouldn't want to fail pointlessly. (This
+ * can happen when an OS loads lots of different drivers rapidly
+ * at the same time). In this case, the Master PF returned by
+ * the firmware will be PCIE_FW_MASTER_MASK so the test below
+ * will work ...
+ */
+
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ uint32_t pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW);
+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ rv = -ETIMEDOUT;
+ break;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & PCIE_FW_ERR) {
+ *state = CSIO_DEV_STATE_ERR;
+ rv = -ETIMEDOUT;
+ } else if (pcie_fw & PCIE_FW_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (mpfn == PCIE_FW_MASTER_MASK &&
+ (pcie_fw & PCIE_FW_MASTER_VLD))
+ mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+ break;
+ }
+ hw->flags &= ~CSIO_HWF_MASTER;
+ }
+
+ switch (*state) {
+ case CSIO_DEV_STATE_UNINIT:
+ strcpy(state_str, "Initializing");
+ break;
+ case CSIO_DEV_STATE_INIT:
+ strcpy(state_str, "Initialized");
+ break;
+ case CSIO_DEV_STATE_ERR:
+ strcpy(state_str, "Error");
+ break;
+ default:
+ strcpy(state_str, "Unknown");
+ break;
+ }
+
+ if (hw->pfn == mpfn)
+ csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
+ hw->pfn, state_str);
+ else
+ csio_info(hw,
+ "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
+ hw->pfn, mpfn, state_str);
+
+out_free_mb:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return rv;
+}
+
+/*
+ * csio_do_bye - Perform the BYE FW Mailbox command and process response.
+ * @hw: HW module
+ *
+ */
+static int
+csio_do_bye(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of BYE command failed\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_do_reset- Perform the device reset.
+ * @hw: HW module
+ * @fw_rst: FW reset
+ *
+ * If fw_rst is set, issues FW reset mbox cmd otherwise
+ * does PIO reset.
+ * Performs reset of the function.
+ */
+static int
+csio_do_reset(struct csio_hw *hw, bool fw_rst)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ if (!fw_rst) {
+ /* PIO reset */
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ return 0;
+ }
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed.n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
+ uint16_t caps;
+
+ caps = ntohs(rsp->fcoecaps);
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
+ csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
+ return -EINVAL;
+ }
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
+ csio_err(hw, "No FCoE Control Offload capability\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * PCIE_FW_MASTER_MASK).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+static int
+csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
+{
+ enum fw_retval retval = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (retval == 0 || force) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return retval ? -EINVAL : 0;
+}
+
+/*
+ * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
+ * @hw: the HW module
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by csio_hw_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+static int
+csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ msleep(100);
+ if (csio_do_reset(hw, true) == 0)
+ return 0;
+ }
+
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ msleep(2000);
+ } else {
+ int ms;
+
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+ return 0;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @fw_data: the firmware image to write
+ * @size: image size
+ * @force: force upgrade even if firmware doesn't cooperate
+ *
+ * Perform all of the steps necessary for upgrading an adapter's
+ * firmware image. Normally this requires the cooperation of the
+ * existing firmware in order to halt all existing activities
+ * but if an invalid mailbox token is passed in we skip that step
+ * (though we'll still put the adapter microprocessor into RESET in
+ * that case).
+ *
+ * On successful return the new firmware will have been loaded and
+ * the adapter will have been fully RESET losing all previous setup
+ * state. On unsuccessful return the adapter may be completely hosed ...
+ * positive errno indicates that the adapter is ~probably~ intact, a
+ * negative errno indicates that things are looking bad ...
+ */
+static int
+csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
+ const u8 *fw_data, uint32_t size, int32_t force)
+{
+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+ int reset, ret;
+
+ ret = csio_hw_fw_halt(hw, mbox, force);
+ if (ret != 0 && !force)
+ return ret;
+
+ ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Older versions of the firmware don't understand the new
+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+ * restart. So for newly loaded older firmware we'll have to do the
+ * RESET for it so it starts up on a clean slate. We can tell if
+ * the newly loaded firmware will handle this right by checking
+ * its header flags to see if it advertises the capability.
+ */
+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ return csio_hw_fw_restart(hw, mbox, reset);
+}
+
+
+/*
+ * csio_hw_fw_config_file - setup an adapter via a Configuration File
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW command
+ * @mtype: the memory type where the Configuration File is located
+ * @maddr: the memory address where the Configuration File is located
+ * @finiver: return value for CF [fini] version
+ * @finicsum: return value for CF [fini] checksum
+ * @cfcsum: return value for CF computed checksum
+ *
+ * Issue a command to get the firmware to process the Configuration
+ * File located at the specified mtype/maddress. If the Configuration
+ * File is processed successfully and return value pointers are
+ * provided, the Configuration File "[fini] section version and
+ * checksum values will be returned along with the computed checksum.
+ * It's up to the caller to decide how it wants to respond to the
+ * checksums not matching but it recommended that a prominant warning
+ * be emitted in order to help people rapidly identify changed or
+ * corrupted Configuration Files.
+ *
+ * Also note that it's possible to modify things like "niccaps",
+ * "toecaps",etc. between processing the Configuration File and telling
+ * the firmware to use the new configuration. Callers which want to
+ * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ * Configuration Files if they want to do this.
+ */
+static int
+csio_hw_fw_config_file(struct csio_hw *hw,
+ unsigned int mtype, unsigned int maddr,
+ uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
+{
+ struct csio_mb *mbp;
+ struct fw_caps_config_cmd *caps_cmd;
+ int rv = -EINVAL;
+ enum fw_retval ret;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd->cfvalid_to_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ if (finiver)
+ *finiver = ntohl(caps_cmd->finiver);
+ if (finicsum)
+ *finicsum = ntohl(caps_cmd->finicsum);
+ if (cfcsum)
+ *cfcsum = ntohl(caps_cmd->cfcsum);
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp)) {
+ rv = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+/*
+ * csio_get_device_params - Get device parameters.
+ * @hw: HW module
+ *
+ */
+static int
+csio_get_device_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 param[6];
+ int i, j = 0;
+
+ /* Initialize portids to -1 */
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ hw->pport[i].portid = -1;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get port vec information. */
+ param[0] = FW_PARAM_DEV(PORTVEC);
+
+ /* Get Core clock. */
+ param[1] = FW_PARAM_DEV(CCLK);
+
+ /* Get EQ id start and end. */
+ param[2] = FW_PARAM_PFVF(EQ_START);
+ param[3] = FW_PARAM_PFVF(EQ_END);
+
+ /* Get IQ id start and end. */
+ param[4] = FW_PARAM_PFVF(IQFLINT_START);
+ param[5] = FW_PARAM_PFVF(IQFLINT_END);
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(param), param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(param), param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* cache the information. */
+ hw->port_vec = param[0];
+ hw->vpd.cclk = param[1];
+ wrm->fw_eq_start = param[2];
+ wrm->fw_iq_start = param[4];
+
+ /* Using FW configured max iqs & eqs */
+ if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
+ !csio_is_hw_master(hw)) {
+ hw->cfg_niq = param[5] - param[4] + 1;
+ hw->cfg_neq = param[3] - param[2] + 1;
+ csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
+ hw->cfg_niq, hw->cfg_neq);
+ }
+
+ hw->port_vec &= csio_port_mask;
+
+ hw->num_pports = hweight32(hw->port_vec);
+
+ csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
+ hw->port_vec, hw->num_pports);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ while ((hw->port_vec & (1 << j)) == 0)
+ j++;
+ hw->pport[i].portid = j++;
+ csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
+ }
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+
+/*
+ * csio_config_device_caps - Get and set device capabilities.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_device_caps(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv = -EINVAL;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
+ goto out;
+ }
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp))
+ goto out;
+
+ /* Don't config device capabilities if already configured */
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ rv = 0;
+ goto out;
+ }
+
+ /* Write back desired device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
+ false, true, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+static int
+csio_config_global_rss(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP,
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_config_pfvf - Configure Physical/Virtual functions settings.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_pfvf(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * For now, allow all PFs to access to all ports using a pmask
+ * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will
+ * need to provide access based on some rule.
+ */
+ csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ,
+ CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK,
+ CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PFVF_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_enable_ports - Bring up all available ports.
+ * @hw: HW module.
+ *
+ */
+static int
+csio_enable_ports(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_pports; i++) {
+ portid = hw->pport[i].portid;
+
+ /* Read PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
+ false, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_port_rsp(hw, mbp, &retval,
+ &hw->pport[i].pcap);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Write back PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
+ (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ } /* For all ports */
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_get_fcoe_resinfo - Read fcoe fw resource info.
+ * @hw: HW module
+ * Issued with lock held.
+ */
+static int
+csio_get_fcoe_resinfo(struct csio_hw *hw)
+{
+ struct csio_fcoe_res_info *res_info = &hw->fres_info;
+ struct fw_fcoe_res_info_cmd *rsp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FW resource information */
+ csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ res_info->e_d_tov = ntohs(rsp->e_d_tov);
+ res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
+ res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
+ res_info->r_r_tov = ntohs(rsp->r_r_tov);
+ res_info->max_xchgs = ntohl(rsp->max_xchgs);
+ res_info->max_ssns = ntohl(rsp->max_ssns);
+ res_info->used_xchgs = ntohl(rsp->used_xchgs);
+ res_info->used_ssns = ntohl(rsp->used_ssns);
+ res_info->max_fcfs = ntohl(rsp->max_fcfs);
+ res_info->max_vnps = ntohl(rsp->max_vnps);
+ res_info->used_fcfs = ntohl(rsp->used_fcfs);
+ res_info->used_vnps = ntohl(rsp->used_vnps);
+
+ csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
+ res_info->max_xchgs);
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 _param[1];
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * Find out whether we're dealing with a version of
+ * the firmware which has configuration file support.
+ */
+ _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(_param), _param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(_param), _param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ *param = _param[0];
+
+ return 0;
+}
+
+static int
+csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
+{
+ int ret = 0;
+ const struct firmware *cf;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev;
+ unsigned int mtype = 0, maddr = 0;
+ uint32_t *cfg_data;
+ int value_to_add = 0;
+
+ if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
+ csio_err(hw, "could not find config file " CSIO_CF_FNAME
+ ",err: %d\n", ret);
+ return -ENOENT;
+ }
+
+ if (cf->size%4 != 0)
+ value_to_add = 4 - (cf->size % 4);
+
+ cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
+ if (cfg_data == NULL)
+ return -ENOMEM;
+
+ memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
+
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
+ return -EINVAL;
+
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+
+ ret = csio_memory_write(hw, mtype, maddr,
+ cf->size + value_to_add, cfg_data);
+ if (ret == 0) {
+ csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
+ strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ }
+
+ kfree(cfg_data);
+ release_firmware(cf);
+
+ return ret;
+}
+
+/*
+ * HW initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration -- either using the configuration
+ * file stored in flash on the adapter or using a filesystem-local file
+ * if available.
+ *
+ * If we don't have configuration file support in the firmware, then we'll
+ * have to set things up the old fashioned way with hard-coded register
+ * writes and firmware commands ...
+ */
+
+/*
+ * Attempt to initialize the HW via a Firmware Configuration File.
+ */
+static int
+csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+{
+ unsigned int mtype, maddr;
+ int rv;
+ uint32_t finiver, finicsum, cfcsum;
+ int using_flash;
+ char path[64];
+
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto bye;
+ }
+
+ /*
+ * If we have a configuration file in host ,
+ * then use that. Otherwise, use the configuration file stored
+ * in the HW flash ...
+ */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_config(hw, fw_cfg_param, path);
+ spin_lock_irq(&hw->lock);
+ if (rv != 0) {
+ if (rv == -ENOENT) {
+ /*
+ * config file was not found. Use default
+ * config file from flash.
+ */
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = csio_hw_flash_cfg_addr(hw);
+ using_flash = 1;
+ } else {
+ /*
+ * we revert back to the hardwired config if
+ * flashing failed.
+ */
+ goto bye;
+ }
+ } else {
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+ using_flash = 0;
+ }
+
+ hw->cfg_store = (uint8_t)mtype;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File.
+ */
+ rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
+ &finicsum, &cfcsum);
+ if (rv != 0)
+ goto bye;
+
+ hw->cfg_finiver = finiver;
+ hw->cfg_finicsum = finicsum;
+ hw->cfg_cfcsum = cfcsum;
+ hw->cfg_csum_status = true;
+
+ if (finicsum != cfcsum) {
+ csio_warn(hw,
+ "Config File checksum mismatch: csum=%#x, computed=%#x\n",
+ finicsum, cfcsum);
+
+ hw->cfg_csum_status = false;
+ }
+
+ /*
+ * Note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto bye;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+ csio_info(hw,
+ "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
+ (using_flash ? "in device FLASH" : path), finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ...
+ */
+bye:
+ hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
+ csio_dbg(hw, "Configuration file error %d\n", rv);
+ return rv;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int
+csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
+{
+ int rv;
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto out;
+ }
+
+ /* Get and set device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Config Global RSS command */
+ rv = csio_config_global_rss(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure PF/VF capabilities of device */
+ rv = csio_config_pfvf(hw);
+ if (rv != 0)
+ goto out;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+out:
+ return rv;
+}
+
+/*
+ * Returns -EINVAL if attempts to flash the firmware failed
+ * else returns 0,
+ * if flashing was not attempted because the card had the
+ * latest firmware ECANCELED is returned
+ */
+static int
+csio_hw_flash_fw(struct csio_hw *hw)
+{
+ int ret = -ECANCELED;
+ const struct firmware *fw;
+ const struct fw_hdr *hdr;
+ u32 fw_ver;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev ;
+
+ if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
+ csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
+ ",err: %d\n", ret);
+ return -EINVAL;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ fw_ver = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ return -EINVAL; /* wrong major version, won't do */
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ fw_ver > hw->fwrev) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
+ /*force=*/false);
+ if (!ret)
+ csio_info(hw, "firmware upgraded to version %pI4 from "
+ CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ else
+ csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+
+/*
+ * csio_hw_configure - Configure HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_configure(struct csio_hw *hw)
+{
+ int reset = 1;
+ int rv;
+ u32 param[1];
+
+ rv = csio_hw_dev_ready(hw);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* HW version */
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+
+ /* Needed for FW download */
+ rv = csio_hw_get_flash_params(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Set pci completion timeout value to 4 seconds. */
+ csio_set_pcie_completion_timeout(hw, 0xd);
+
+ csio_hw_set_mem_win(hw);
+
+ rv = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (rv != 0)
+ goto out;
+
+ csio_hw_print_fw_version(hw, "Firmware revision");
+
+ rv = csio_do_hello(hw, &hw->fw_state);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Read vpd */
+ rv = csio_hw_get_vpd_params(hw, &hw->vpd);
+ if (rv != 0)
+ goto out;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_hw_check_fw_version(hw);
+ if (rv == -EINVAL) {
+
+ /* Do firmware update */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv == 0) {
+ reset = 0;
+ /*
+ * Note that the chip was reset as part of the
+ * firmware upgrade so we don't reset it again
+ * below and grab the new firmware version.
+ */
+ rv = csio_hw_check_fw_version(hw);
+ }
+ }
+ /*
+ * If the firmware doesn't support Configuration
+ * Files, use the old Driver-based, hard-wired
+ * initialization. Otherwise, try using the
+ * Configuration File support and fall back to the
+ * Driver-based initialization if there's no
+ * Configuration File found.
+ */
+ if (csio_hw_check_fwconfig(hw, param) == 0) {
+ rv = csio_hw_use_fwconfig(hw, reset, param);
+ if (rv == -ENOENT)
+ goto out;
+ if (rv != 0) {
+ csio_info(hw,
+ "No Configuration File present "
+ "on adapter. Using hard-wired "
+ "configuration parameters.\n");
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+ } else {
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+
+ if (rv != 0)
+ goto out;
+
+ } else {
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Get device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+ goto out;
+ }
+ } /* if not master */
+
+out:
+ return;
+}
+
+/*
+ * csio_hw_initialize - Initialize HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_initialize(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv;
+ int i;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ goto out;
+
+ csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
+ goto free_and_out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
+ retval);
+ goto free_and_out;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ rv = csio_get_fcoe_resinfo(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
+ goto out;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ rv = csio_config_queues(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0) {
+ csio_err(hw, "Config of queues failed!: %d\n", rv);
+ goto out;
+ }
+
+ for (i = 0; i < hw->num_pports; i++)
+ hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_enable_ports(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to enable ports: %d\n", rv);
+ goto out;
+ }
+ }
+
+ csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
+ return;
+
+free_and_out:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return;
+}
+
+#define PF_INTR_MASK (PFSW | PFCIM)
+
+/*
+ * csio_hw_intr_enable - Enable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Enable interrupts in HW registers.
+ */
+static void
+csio_hw_intr_enable(struct csio_hw *hw)
+{
+ uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+
+ /*
+ * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
+ * by FW, so do nothing for INTX.
+ */
+ if (hw->intr_mode == CSIO_IM_MSIX)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), vec);
+ else if (hw->intr_mode == CSIO_IM_MSI)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), 0);
+
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+
+ /* Turn on MB interrupts - this will internally flush PIO as well */
+ csio_mb_intr_enable(hw);
+
+ /* These are common registers - only a master can modify them */
+ if (csio_is_hw_master(hw)) {
+ /*
+ * Disable the Serial FLASH interrupt, if enabled!
+ */
+ pl &= (~SF);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
+ EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
+ ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
+ ERR_DATA_CPL_ON_HIGH_QID1 |
+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
+ SGE_INT_ENABLE3);
+ csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+ }
+
+ hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
+
+}
+
+/*
+ * csio_hw_intr_disable - Disable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Turn off Mailbox and PCI_PF_CFG interrupts.
+ */
+void
+csio_hw_intr_disable(struct csio_hw *hw)
+{
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+
+ if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
+ return;
+
+ hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
+
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+ if (csio_is_hw_master(hw))
+ csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+
+ /* Turn off MB interrupts */
+ csio_mb_intr_disable(hw);
+
+}
+
+static void
+csio_hw_fatal_err(struct csio_hw *hw)
+{
+ csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+ csio_hw_intr_disable(hw);
+
+ /* Do not reset HW, we may need FW state for debugging */
+ csio_fatal(hw, "HW Fatal error encountered!\n");
+}
+
+/*****************************************************************************/
+/* START: HW SM */
+/*****************************************************************************/
+/*
+ * csio_hws_uninit - Uninit state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_CFG:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_configuring - Configuring state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT:
+ csio_set_state(&hw->sm, csio_hws_initializing);
+ csio_hw_initialize(hw);
+ break;
+
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_initializing - Initialiazing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+
+ /* Enable interrupts */
+ csio_hw_intr_enable(hw);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_ready - Ready state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ /* Remember the event */
+ hw->evtflag = evt;
+
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ case CSIO_HWE_FW_DLOAD:
+ case CSIO_HWE_SUSPEND:
+ case CSIO_HWE_PCI_REMOVE:
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_quiescing);
+ /* cleanup all outstanding cmds */
+ if (evt == CSIO_HWE_HBA_RESET ||
+ evt == CSIO_HWE_PCIERR_DETECTED)
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
+ else
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
+
+ csio_hw_intr_disable(hw);
+ csio_hw_mbm_cleanup(hw);
+ csio_evtq_stop(hw);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
+ csio_evtq_flush(hw);
+ csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
+ csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiescing - Quiescing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_QUIESCED:
+ switch (hw->evtflag) {
+ case CSIO_HWE_FW_DLOAD:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Download firmware */
+ /* Fall through */
+
+ case CSIO_HWE_HBA_RESET:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Start reset of the HBA */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
+ csio_wr_destroy_queues(hw, false);
+ csio_do_reset(hw, false);
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_set_state(&hw->sm, csio_hws_removing);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
+ csio_wr_destroy_queues(hw, true);
+ /* Now send the bye command */
+ csio_do_bye(hw);
+ break;
+
+ case CSIO_HWE_SUSPEND:
+ csio_set_state(&hw->sm, csio_hws_quiesced);
+ break;
+
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_pcierr);
+ csio_wr_destroy_queues(hw, false);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiesced - Quiesced state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_RESUME:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_resetting - HW Resetting state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET_DONE:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_removing - PCI Hotplug removing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ if (!csio_is_hw_master(hw))
+ break;
+ /*
+ * The BYE should have alerady been issued, so we cant
+ * use the mailbox interface. Hence we use the PL_RST
+ * register directly.
+ */
+ csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ break;
+
+ /* Should never receive any new events */
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+}
+
+/*
+ * csio_hws_pcierr - PCI Error state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_PCIERR_SLOT_RESET:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: HW SM */
+/*****************************************************************************/
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/*
+ * csio_handle_intr_status - table driven interrupt handler
+ * @hw: HW instance
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int
+csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = csio_rd_reg32(hw, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ csio_fatal(hw, "Fatal %s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ csio_info(hw, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ csio_wr_reg32(hw, status, reg);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void csio_tp_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void csio_sge_intr_handler(struct csio_hw *hw)
+{
+ uint64_t v;
+
+ static struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+ if (v) {
+ csio_fatal(hw, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
+ SGE_INT_CAUSE1);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+ }
+
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+ v != 0)
+ csio_hw_fatal_err(hw);
+}
+
+#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
+ OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
+#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
+ IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+
+/*
+ * CIM interrupt handler.
+ */
+static void csio_cim_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cim_intr_info[] = {
+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT, "CIM illegal write", -1, 1 },
+ { ILLRDINT, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void csio_ulprx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void csio_ulptx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void csio_pmtx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { 0xffffff0, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ 1 },
+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void csio_pmrx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { 0x3ffff0, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ 1 },
+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void csio_cplsw_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void csio_le_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info le_intr_info[] = {
+ { LIPMISS, "LE LIP miss", -1, 0 },
+ { LIP0, "LE 0 LIP error", -1, 0 },
+ { PARITYERR, "LE parity error", -1, 1 },
+ { UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void csio_mps_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+ { BUBBLE, "MPS Tx underflow", -1, 1 },
+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
+ csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ } else {
+ addr = MC_INT_CAUSE;
+ cnt_addr = MC_ECC_STATUS;
+ }
+
+ v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE)
+ csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
+ if (v & ECC_CE_INT_CAUSE) {
+ uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+
+ csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+ csio_warn(hw, "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE)
+ csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
+
+ csio_wr_reg32(hw, v, addr);
+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void csio_ma_intr_handler(struct csio_hw *hw)
+{
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+
+ if (status & MEM_PERR_INT_CAUSE)
+ csio_fatal(hw, "MA parity error, parity status %#x\n",
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
+ if (status & MEM_WRAP_INT_CAUSE) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+ csio_fatal(hw,
+ "MA address wrap-around error by client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+ }
+ csio_wr_reg32(hw, status, MA_INT_CAUSE);
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void csio_smb_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void csio_ncsi_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
+{
+ uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
+ if (v & RXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
+ csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void csio_pl_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pl_intr_info[] = {
+ { FATALPERR, "T4 fatal parity error", -1, 1 },
+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_hw_slow_intr_handler - control path interrupt handler
+ * @hw: HW module
+ *
+ * Interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int
+csio_hw_slow_intr_handler(struct csio_hw *hw)
+{
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+
+ if (!(cause & CSIO_GLBL_INTR_MASK)) {
+ CSIO_INC_STATS(hw, n_plint_unexp);
+ return 0;
+ }
+
+ csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
+
+ CSIO_INC_STATS(hw, n_plint_cnt);
+
+ if (cause & CIM)
+ csio_cim_intr_handler(hw);
+
+ if (cause & MPS)
+ csio_mps_intr_handler(hw);
+
+ if (cause & NCSI)
+ csio_ncsi_intr_handler(hw);
+
+ if (cause & PL)
+ csio_pl_intr_handler(hw);
+
+ if (cause & SMB)
+ csio_smb_intr_handler(hw);
+
+ if (cause & XGMAC0)
+ csio_xgmac_intr_handler(hw, 0);
+
+ if (cause & XGMAC1)
+ csio_xgmac_intr_handler(hw, 1);
+
+ if (cause & XGMAC_KR0)
+ csio_xgmac_intr_handler(hw, 2);
+
+ if (cause & XGMAC_KR1)
+ csio_xgmac_intr_handler(hw, 3);
+
+ if (cause & PCIE)
+ csio_pcie_intr_handler(hw);
+
+ if (cause & MC)
+ csio_mem_intr_handler(hw, MEM_MC);
+
+ if (cause & EDC0)
+ csio_mem_intr_handler(hw, MEM_EDC0);
+
+ if (cause & EDC1)
+ csio_mem_intr_handler(hw, MEM_EDC1);
+
+ if (cause & LE)
+ csio_le_intr_handler(hw);
+
+ if (cause & TP)
+ csio_tp_intr_handler(hw);
+
+ if (cause & MA)
+ csio_ma_intr_handler(hw);
+
+ if (cause & PM_TX)
+ csio_pmtx_intr_handler(hw);
+
+ if (cause & PM_RX)
+ csio_pmrx_intr_handler(hw);
+
+ if (cause & ULP_RX)
+ csio_ulprx_intr_handler(hw);
+
+ if (cause & CPL_SWITCH)
+ csio_cplsw_intr_handler(hw);
+
+ if (cause & SGE)
+ csio_sge_intr_handler(hw);
+
+ if (cause & ULP_TX)
+ csio_ulptx_intr_handler(hw);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
+ csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+
+ return 1;
+}
+
+/*****************************************************************************
+ * HW <--> mailbox interfacing routines.
+ ****************************************************************************/
+/*
+ * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
+ *
+ * @data: Private data pointer.
+ *
+ * Called from worker thread context.
+ */
+static void
+csio_mberr_worker(void *data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mbm *mbm = &hw->mbm;
+ LIST_HEAD(cbfn_q);
+ struct csio_mb *mbp_next;
+ int rv;
+
+ del_timer_sync(&mbm->timer);
+
+ spin_lock_irq(&hw->lock);
+ if (list_empty(&mbm->cbfn_q)) {
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+
+ /* Try to start waiting mailboxes */
+ if (!list_empty(&mbm->req_q)) {
+ mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
+ list_del_init(&mbp_next->list);
+
+ rv = csio_mb_issue(hw, mbp_next);
+ if (rv != 0)
+ list_add_tail(&mbp_next->list, &mbm->req_q);
+ else
+ CSIO_DEC_STATS(mbm, n_activeq);
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Now callback completions */
+ csio_mb_completions(hw, &cbfn_q);
+}
+
+/*
+ * csio_hw_mb_timer - Top-level Mailbox timeout handler.
+ *
+ * @data: private data pointer
+ *
+ **/
+static void
+csio_hw_mb_timer(uintptr_t data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mb *mbp = NULL;
+
+ spin_lock_irq(&hw->lock);
+ mbp = csio_mb_tmo_handler(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Call back the function for the timed-out Mailbox */
+ if (mbp)
+ mbp->mb_cbfn(hw, mbp);
+
+}
+
+/*
+ * csio_hw_mbm_cleanup - Cleanup Mailbox module.
+ * @hw: HW module
+ *
+ * Called with lock held, should exit with lock held.
+ * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
+ * into a local queue. Drops lock and calls the completions. Holds
+ * lock and returns.
+ */
+static void
+csio_hw_mbm_cleanup(struct csio_hw *hw)
+{
+ LIST_HEAD(cbfn_q);
+
+ csio_mb_cancel_all(hw, &cbfn_q);
+
+ spin_unlock_irq(&hw->lock);
+ csio_mb_completions(hw, &cbfn_q);
+ spin_lock_irq(&hw->lock);
+}
+
+/*****************************************************************************
+ * Event handling
+ ****************************************************************************/
+int
+csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ return -EINVAL;
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ return -ENOMEM;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+ memcpy((void *)evt_entry->data, evt_msg, len);
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+
+ return 0;
+}
+
+static int
+csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len, bool msg_sg)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+ struct csio_fl_dma_buf *fl_sg;
+ uint32_t off = 0;
+ unsigned long flags;
+ int n, ret = 0;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+
+ /* If Payload in SG list*/
+ if (msg_sg) {
+ fl_sg = (struct csio_fl_dma_buf *) evt_msg;
+ for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
+ memcpy((void *)((uintptr_t)evt_entry->data + off),
+ fl_sg->flbufs[n].vaddr,
+ fl_sg->flbufs[n].len);
+ off += fl_sg->flbufs[n].len;
+ }
+ } else
+ memcpy((void *)evt_entry->data, evt_msg, len);
+
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+out:
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return ret;
+}
+
+static void
+csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
+{
+ if (evt_entry) {
+ spin_lock_irq(&hw->lock);
+ list_del_init(&evt_entry->list);
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_DEC_STATS(hw, n_evt_activeq);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ spin_unlock_irq(&hw->lock);
+ }
+}
+
+void
+csio_evtq_flush(struct csio_hw *hw)
+{
+ uint32_t count;
+ count = 30;
+ while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
+}
+
+static void
+csio_evtq_stop(struct csio_hw *hw)
+{
+ hw->flags |= CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_start(struct csio_hw *hw)
+{
+ hw->flags &= ~CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_cleanup(struct csio_hw *hw)
+{
+ struct list_head *evt_entry, *next_entry;
+
+ /* Release outstanding events from activeq to freeq*/
+ if (!list_empty(&hw->evt_active_q))
+ list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
+
+ hw->stats.n_evt_activeq = 0;
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+
+ /* Freeup event entry */
+ list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
+ kfree(evt_entry);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->stats.n_evt_freeq = 0;
+}
+
+
+static void
+csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ __u8 op;
+ __be64 *data;
+ void *msg = NULL;
+ uint32_t msg_len = 0;
+ bool msg_sg = 0;
+
+ op = ((struct rss_header *) wr)->opcode;
+ if (op == CPL_FW6_PLD) {
+ CSIO_INC_STATS(hw, n_cpl_fw6_pld);
+ if (!flb || !flb->totlen) {
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ msg = (void *) flb;
+ msg_len = flb->totlen;
+ msg_sg = 1;
+
+ data = (__be64 *) msg;
+ } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
+
+ CSIO_INC_STATS(hw, n_cpl_fw6_msg);
+ /* skip RSS header */
+ msg = (void *)((uintptr_t)wr + sizeof(__be64));
+ msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
+ sizeof(struct cpl_fw4_msg);
+
+ data = (__be64 *) msg;
+ } else {
+ csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
+ (uint16_t)msg_len, msg_sg))
+ CSIO_INC_STATS(hw, n_evt_drop);
+}
+
+void
+csio_evtq_worker(struct work_struct *work)
+{
+ struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
+ struct list_head *evt_entry, *next_entry;
+ LIST_HEAD(evt_q);
+ struct csio_evt_msg *evt_msg;
+ struct cpl_fw6_msg *msg;
+ struct csio_rnode *rn;
+ int rv = 0;
+ uint8_t evtq_stop = 0;
+
+ csio_dbg(hw, "event worker thread active evts#%d\n",
+ hw->stats.n_evt_activeq);
+
+ spin_lock_irq(&hw->lock);
+ while (!list_empty(&hw->evt_active_q)) {
+ list_splice_tail_init(&hw->evt_active_q, &evt_q);
+ spin_unlock_irq(&hw->lock);
+
+ list_for_each_safe(evt_entry, next_entry, &evt_q) {
+ evt_msg = (struct csio_evt_msg *) evt_entry;
+
+ /* Drop events if queue is STOPPED */
+ spin_lock_irq(&hw->lock);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ evtq_stop = 1;
+ spin_unlock_irq(&hw->lock);
+ if (evtq_stop) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto free_evt;
+ }
+
+ switch (evt_msg->type) {
+ case CSIO_EVT_FW:
+ msg = (struct cpl_fw6_msg *)(evt_msg->data);
+
+ if ((msg->opcode == CPL_FW6_MSG ||
+ msg->opcode == CPL_FW4_MSG) &&
+ !msg->type) {
+ rv = csio_mb_fwevt_handler(hw,
+ msg->data);
+ if (!rv)
+ break;
+ /* Handle any remaining fw events */
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else if (msg->opcode == CPL_FW6_PLD) {
+
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else {
+ csio_warn(hw,
+ "Unhandled FW msg op %x type %x\n",
+ msg->opcode, msg->type);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+ break;
+
+ case CSIO_EVT_MBX:
+ csio_mberr_worker(hw);
+ break;
+
+ case CSIO_EVT_DEV_LOSS:
+ memcpy(&rn, evt_msg->data, sizeof(rn));
+ csio_rnode_devloss_handler(rn);
+ break;
+
+ default:
+ csio_warn(hw, "Unhandled event %x on evtq\n",
+ evt_msg->type);
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+free_evt:
+ csio_free_evt(hw, evt_msg);
+ }
+
+ spin_lock_irq(&hw->lock);
+ }
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+}
+
+int
+csio_fwevtq_handler(struct csio_hw *hw)
+{
+ int rv;
+
+ if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+
+ rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
+ csio_process_fwevtq_entry, NULL);
+ return rv;
+}
+
+/****************************************************************************
+ * Entry points
+ ****************************************************************************/
+
+/* Management module */
+/*
+ * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
+ * mgmt - mgmt module
+ * @io_req - io request
+ *
+ * Return - 0:if given IO Req exists in active Q.
+ * -EINVAL :if lookup fails.
+ */
+int
+csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
+{
+ struct list_head *tmp;
+
+ /* Lookup ioreq in the ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ if (io_req == (struct csio_ioreq *)tmp)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
+
+/*
+ * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
+ * @data - Event data.
+ *
+ * Return - none.
+ */
+static void
+csio_mgmt_tmo_handler(uintptr_t data)
+{
+ struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
+ struct list_head *tmp;
+ struct csio_ioreq *io_req;
+
+ csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
+
+ spin_lock_irq(&mgmtm->hw->lock);
+
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
+
+ if (!io_req->tmo) {
+ /* Dequeue the request from retry Q. */
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ } else {
+ CSIO_DB_ASSERT(0);
+ }
+ }
+ }
+
+ /* If retry queue is not empty, re-arm timer */
+ if (!list_empty(&mgmtm->active_q))
+ mod_timer(&mgmtm->mgmt_timer,
+ jiffies + msecs_to_jiffies(ECM_MIN_TMO));
+ spin_unlock_irq(&mgmtm->hw->lock);
+}
+
+static void
+csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
+{
+ struct csio_hw *hw = mgmtm->hw;
+ struct csio_ioreq *io_req;
+ struct list_head *tmp;
+ uint32_t count;
+
+ count = 30;
+ /* Wait for all outstanding req to complete gracefully */
+ while ((!list_empty(&mgmtm->active_q)) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* release outstanding req from ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ }
+ }
+}
+
+/*
+ * csio_mgmt_init - Mgmt module init entry point
+ * @mgmtsm - mgmt module
+ * @hw - HW module
+ *
+ * Initialize mgmt timer, resource wait queue, active queue,
+ * completion q. Allocate Egress and Ingress
+ * WR queues and save off the queue index returned by the WR
+ * module for future use. Allocate and save off mgmt reqs in the
+ * mgmt_req_freelist for future use. Make sure their SM is initialized
+ * to uninit state.
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
+{
+ struct timer_list *timer = &mgmtm->mgmt_timer;
+
+ init_timer(timer);
+ timer->function = csio_mgmt_tmo_handler;
+ timer->data = (unsigned long)mgmtm;
+
+ INIT_LIST_HEAD(&mgmtm->active_q);
+ INIT_LIST_HEAD(&mgmtm->cbfn_q);
+
+ mgmtm->hw = hw;
+ /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
+
+ return 0;
+}
+
+/*
+ * csio_mgmtm_exit - MGMT module exit entry point
+ * @mgmtsm - mgmt module
+ *
+ * This function called during MGMT module uninit.
+ * Stop timers, free ioreqs allocated.
+ * Returns: None
+ *
+ */
+static void
+csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
+{
+ del_timer_sync(&mgmtm->mgmt_timer);
+}
+
+
+/**
+ * csio_hw_start - Kicks off the HW State machine
+ * @hw: Pointer to HW module.
+ *
+ * It is assumed that the initialization is a synchronous operation.
+ * So when we return afer posting the event, the HW SM should be in
+ * the ready state, if there were no errors during init.
+ */
+int
+csio_hw_start(struct csio_hw *hw)
+{
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_CFG);
+ spin_unlock_irq(&hw->lock);
+
+ if (csio_is_hw_ready(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+int
+csio_hw_stop(struct csio_hw *hw)
+{
+ csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
+
+ if (csio_is_hw_removing(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/* Max reset retries */
+#define CSIO_MAX_RESET_RETRIES 3
+
+/**
+ * csio_hw_reset - Reset the hardware
+ * @hw: HW module.
+ *
+ * Caller should hold lock across this function.
+ */
+int
+csio_hw_reset(struct csio_hw *hw)
+{
+ if (!csio_is_hw_master(hw))
+ return -EPERM;
+
+ if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
+ csio_dbg(hw, "Max hw reset attempts reached..");
+ return -EINVAL;
+ }
+
+ hw->rst_retries++;
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
+
+ if (csio_is_hw_ready(hw)) {
+ hw->rst_retries = 0;
+ hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
+ * @hw: HW module.
+ */
+static void
+csio_hw_get_device_id(struct csio_hw *hw)
+{
+ /* Is the adapter device id cached already ?*/
+ if (csio_is_dev_id_cached(hw))
+ return;
+
+ /* Get the PCI vendor & device id */
+ pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
+ &hw->params.pci.vendor_id);
+ pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
+ &hw->params.pci.device_id);
+
+ csio_dev_id_cached(hw);
+
+} /* csio_hw_get_device_id */
+
+/*
+ * csio_hw_set_description - Set the model, description of the hw.
+ * @hw: HW module.
+ * @ven_id: PCI Vendor ID
+ * @dev_id: PCI Device ID
+ */
+static void
+csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
+{
+ uint32_t adap_type, prot_type;
+
+ if (ven_id == CSIO_VENDOR_ID) {
+ prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
+ adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
+
+ if (prot_type == CSIO_FPGA) {
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[13].description, 32);
+ } else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_fcoe_adapters[adap_type].model_no, 16);
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[adap_type].description, 32);
+ } else {
+ char tempName[32] = "Chelsio FCoE Controller";
+ memcpy(hw->model_desc, tempName, 32);
+
+ CSIO_DB_ASSERT(0);
+ }
+ }
+} /* csio_hw_set_description */
+
+/**
+ * csio_hw_init - Initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ * Initialize the members of the HW module.
+ */
+int
+csio_hw_init(struct csio_hw *hw)
+{
+ int rv = -EINVAL;
+ uint32_t i;
+ uint16_t ven_id, dev_id;
+ struct csio_evt_msg *evt_entry;
+
+ INIT_LIST_HEAD(&hw->sm.sm_list);
+ csio_init_state(&hw->sm, csio_hws_uninit);
+ spin_lock_init(&hw->lock);
+ INIT_LIST_HEAD(&hw->sln_head);
+
+ /* Get the PCI vendor & device id */
+ csio_hw_get_device_id(hw);
+
+ strcpy(hw->name, CSIO_HW_NAME);
+
+ /* Set the model & its description */
+
+ ven_id = hw->params.pci.vendor_id;
+ dev_id = hw->params.pci.device_id;
+
+ csio_hw_set_description(hw, ven_id, dev_id);
+
+ /* Initialize default log level */
+ hw->params.log_level = (uint32_t) csio_dbg_level;
+
+ csio_set_fwevt_intr_idx(hw, -1);
+ csio_set_nondata_intr_idx(hw, -1);
+
+ /* Init all the modules: Mailbox, WorkRequest and Transport */
+ if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
+ goto err;
+
+ rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
+ if (rv)
+ goto err_mbm_exit;
+
+ rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
+ if (rv)
+ goto err_wrm_exit;
+
+ rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
+ if (rv)
+ goto err_scsim_exit;
+ /* Pre-allocate evtq and initialize them */
+ INIT_LIST_HEAD(&hw->evt_active_q);
+ INIT_LIST_HEAD(&hw->evt_free_q);
+ for (i = 0; i < csio_evtq_sz; i++) {
+
+ evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
+ if (!evt_entry) {
+ csio_err(hw, "Failed to initialize eventq");
+ goto err_evtq_cleanup;
+ }
+
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->dev_num = dev_num;
+ dev_num++;
+
+ return 0;
+
+err_evtq_cleanup:
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+err_scsim_exit:
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+err_wrm_exit:
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+err_mbm_exit:
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+err:
+ return rv;
+}
+
+/**
+ * csio_hw_exit - Un-initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ */
+void
+csio_hw_exit(struct csio_hw *hw)
+{
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
new file mode 100644
index 000000000000..9edcca4c71af
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -0,0 +1,665 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_H__
+#define __CSIO_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/compiler.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_scsi.h"
+#include "csio_defs.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+
+/*
+ * An error value used by host. Should not clash with FW defined return values.
+ */
+#define FW_HOSTERROR 255
+
+#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
+#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 2
+#define FW_VERSION_MICRO 8
+
+#define CSIO_HW_NAME "Chelsio FCoE Adapter"
+#define CSIO_MAX_PFN 8
+#define CSIO_MAX_PPORTS 4
+
+#define CSIO_MAX_LUN 0xFFFF
+#define CSIO_MAX_QUEUE 2048
+#define CSIO_MAX_CMD_PER_LUN 32
+#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
+#define CSIO_MAX_SECTOR_SIZE 128
+
+/* Interrupts */
+#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
+ * (Forward intr iq + fw iq) */
+#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
+#define CSIO_MAX_SCSI_CPU 128
+#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
+#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
+
+/* Queues */
+enum {
+ CSIO_INTR_WRSIZE = 128,
+ CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
+ CSIO_FWEVT_WRSIZE = 128,
+ CSIO_FWEVT_IQLEN = 128,
+ CSIO_FWEVT_FLBUFS = 64,
+ CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
+ CSIO_HW_NIQ = 1,
+ CSIO_HW_NFLQ = 1,
+ CSIO_HW_NEQ = 1,
+ CSIO_HW_NINTXQ = 1,
+};
+
+struct csio_msix_entries {
+ unsigned short vector; /* Vector assigned by pci_enable_msix */
+ void *dev_id; /* Priv object associated w/ this msix*/
+ char desc[24]; /* Description of this vector */
+};
+
+struct csio_scsi_qset {
+ int iq_idx; /* Ingress index */
+ int eq_idx; /* Egress index */
+ uint32_t intr_idx; /* MSIX Vector index */
+};
+
+struct csio_scsi_cpu_info {
+ int16_t max_cpus;
+};
+
+extern int csio_dbg_level;
+extern int csio_force_master;
+extern unsigned int csio_port_mask;
+extern int csio_msi;
+
+#define CSIO_VENDOR_ID 0x1425
+#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
+#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
+#define CSIO_FPGA 0xA000
+#define CSIO_T4_FCOE_ASIC 0x4600
+
+#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+ EDC1 | LE | TP | MA | PM_TX | PM_RX | \
+ ULP_RX | CPL_SWITCH | SGE | \
+ ULP_TX | SF)
+
+/*
+ * Hard parameters used to initialize the card in the absence of a
+ * configuration file.
+ */
+enum {
+ /* General */
+ CSIO_SGE_DBFIFO_INT_THRESH = 10,
+
+ CSIO_SGE_RX_DMA_OFFSET = 2,
+
+ CSIO_SGE_FLBUF_SIZE1 = 65536,
+ CSIO_SGE_FLBUF_SIZE2 = 1536,
+ CSIO_SGE_FLBUF_SIZE3 = 9024,
+ CSIO_SGE_FLBUF_SIZE4 = 9216,
+ CSIO_SGE_FLBUF_SIZE5 = 2048,
+ CSIO_SGE_FLBUF_SIZE6 = 128,
+ CSIO_SGE_FLBUF_SIZE7 = 8192,
+ CSIO_SGE_FLBUF_SIZE8 = 16384,
+
+ CSIO_SGE_TIMER_VAL_0 = 5,
+ CSIO_SGE_TIMER_VAL_1 = 10,
+ CSIO_SGE_TIMER_VAL_2 = 20,
+ CSIO_SGE_TIMER_VAL_3 = 50,
+ CSIO_SGE_TIMER_VAL_4 = 100,
+ CSIO_SGE_TIMER_VAL_5 = 200,
+
+ CSIO_SGE_INT_CNT_VAL_0 = 1,
+ CSIO_SGE_INT_CNT_VAL_1 = 4,
+ CSIO_SGE_INT_CNT_VAL_2 = 8,
+ CSIO_SGE_INT_CNT_VAL_3 = 16,
+
+ /* Storage specific - used by FW_PFVF_CMD */
+ CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */
+ CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */
+ CSIO_NVI = 4,
+ CSIO_NIQ_FLINT = 34,
+ CSIO_NETH_CTRL = 32,
+ CSIO_NEQ = 66,
+ CSIO_NEXACTF = 32,
+ CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,
+ CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,
+};
+
+/* Slowpath events */
+enum csio_evt {
+ CSIO_EVT_FW = 0, /* FW event */
+ CSIO_EVT_MBX, /* MBX event */
+ CSIO_EVT_SCN, /* State change notification */
+ CSIO_EVT_DEV_LOSS, /* Device loss event */
+ CSIO_EVT_MAX, /* Max supported event */
+};
+
+#define CSIO_EVT_MSG_SIZE 512
+#define CSIO_EVTQ_SIZE 512
+
+/* Event msg */
+struct csio_evt_msg {
+ struct list_head list; /* evt queue*/
+ enum csio_evt type;
+ uint8_t data[CSIO_EVT_MSG_SIZE];
+};
+
+enum {
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ SERNUM_LEN = 16, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ TRACE_LEN = 112, /* length of trace data and mask */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 65536,
+ MEMWIN2_BASE = 0x30000,
+};
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_START_SEC = 8, /* first flash sector for FW */
+ FW_END_SEC = 15, /* last flash sector for FW */
+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+
+ FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
+ FLASH_CFG_OFFSET = 0x1f0000,
+ FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
+ FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
+ * at 1MB - 64KB */
+ FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
+};
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 8,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+/* Management module */
+enum {
+ CSIO_MGMT_EQ_WRSIZE = 512,
+ CSIO_MGMT_IQ_WRSIZE = 128,
+ CSIO_MGMT_EQLEN = 64,
+ CSIO_MGMT_IQLEN = 64,
+};
+
+#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
+#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
+
+/* mgmt module stats */
+struct csio_mgmtm_stats {
+ uint32_t n_abort_req; /* Total abort request */
+ uint32_t n_abort_rsp; /* Total abort response */
+ uint32_t n_close_req; /* Total close request */
+ uint32_t n_close_rsp; /* Total close response */
+ uint32_t n_err; /* Total Errors */
+ uint32_t n_drop; /* Total request dropped */
+ uint32_t n_active; /* Count of active_q */
+ uint32_t n_cbfn; /* Count of cbfn_q */
+};
+
+/* MGMT module */
+struct csio_mgmtm {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ int eq_idx; /* Egress queue index */
+ int iq_idx; /* Ingress queue index */
+ int msi_vec; /* MSI vector */
+ struct list_head active_q; /* Outstanding ELS/CT */
+ struct list_head abort_q; /* Outstanding abort req */
+ struct list_head cbfn_q; /* Completion queue */
+ struct list_head mgmt_req_freelist; /* Free poll of reqs */
+ /* ELSCT request freelist*/
+ struct timer_list mgmt_timer; /* MGMT timer */
+ struct csio_mgmtm_stats stats; /* ELS/CT stats */
+};
+
+struct csio_adap_desc {
+ char model_no[16];
+ char description[32];
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/* User configurable hw parameters */
+struct csio_hw_params {
+ uint32_t sf_size; /* serial flash
+ * size in bytes
+ */
+ uint32_t sf_nsec; /* # of flash sectors */
+ struct pci_params pci;
+ uint32_t log_level; /* Module-level for
+ * debug log.
+ */
+};
+
+struct csio_vpd {
+ uint32_t cclk;
+ uint8_t ec[EC_LEN + 1];
+ uint8_t sn[SERNUM_LEN + 1];
+ uint8_t id[ID_LEN + 1];
+};
+
+struct csio_pport {
+ uint16_t pcap;
+ uint8_t portid;
+ uint8_t link_status;
+ uint16_t link_speed;
+ uint8_t mac[6];
+ uint8_t mod_type;
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
+};
+
+/* fcoe resource information */
+struct csio_fcoe_res_info {
+ uint16_t e_d_tov;
+ uint16_t r_a_tov_seq;
+ uint16_t r_a_tov_els;
+ uint16_t r_r_tov;
+ uint32_t max_xchgs;
+ uint32_t max_ssns;
+ uint32_t used_xchgs;
+ uint32_t used_ssns;
+ uint32_t max_fcfs;
+ uint32_t max_vnps;
+ uint32_t used_fcfs;
+ uint32_t used_vnps;
+};
+
+/* HW State machine Events */
+enum csio_hw_ev {
+ CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
+ CSIO_HWE_INIT, /* Config done, start Init */
+ CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
+ CSIO_HWE_FATAL, /* Fatal error during initialization */
+ CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
+ CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
+ CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
+ CSIO_HWE_QUIESCED, /* HBA quiesced */
+ CSIO_HWE_HBA_RESET, /* HBA reset requested */
+ CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
+ CSIO_HWE_FW_DLOAD, /* FW download requested */
+ CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
+ CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
+ CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
+ CSIO_HWE_MAX, /* Max HW event */
+};
+
+/* hw stats */
+struct csio_hw_stats {
+ uint32_t n_evt_activeq; /* Number of event in active Q */
+ uint32_t n_evt_freeq; /* Number of event in free Q */
+ uint32_t n_evt_drop; /* Number of event droped */
+ uint32_t n_evt_unexp; /* Number of unexpected events */
+ uint32_t n_pcich_offline;/* Number of pci channel offline */
+ uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
+ uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
+ uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
+ uint32_t n_cpl_unexp; /* Number of unexpected cpl */
+ uint32_t n_mbint_unexp; /* Number of unexpected mbox */
+ /* interrupt */
+ uint32_t n_plint_unexp; /* Number of unexpected PL */
+ /* interrupt */
+ uint32_t n_plint_cnt; /* Number of PL interrupt */
+ uint32_t n_int_stray; /* Number of stray interrupt */
+ uint32_t n_err; /* Number of hw errors */
+ uint32_t n_err_fatal; /* Number of fatal errors */
+ uint32_t n_err_nomem; /* Number of memory alloc failure */
+ uint32_t n_err_io; /* Number of IO failure */
+ enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
+ uint64_t n_reset_start; /* Start time after the reset */
+ uint32_t rsvd1;
+};
+
+/* Defines for hw->flags */
+#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
+ * function for the
+ * card.
+ */
+#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
+ * enable bit set?
+ */
+#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
+#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
+ * allocated memory.
+ */
+#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
+ * allocated in FW.
+ */
+#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
+#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
+ * id cached */
+#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
+ * FW events
+ */
+#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
+ * params
+ */
+#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
+ * enabled?
+ */
+
+#define csio_is_hw_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
+#define csio_is_host_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
+#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
+#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
+#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
+#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
+#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
+
+/* Defines for intr_mode */
+enum csio_intr_mode {
+ CSIO_IM_NONE = 0,
+ CSIO_IM_INTX = 1,
+ CSIO_IM_MSI = 2,
+ CSIO_IM_MSIX = 3,
+};
+
+/* Master HW structure: One per function */
+struct csio_hw {
+ struct csio_sm sm; /* State machine: should
+ * be the 1st member.
+ */
+ spinlock_t lock; /* Lock for hw */
+
+ struct csio_scsim scsim; /* SCSI module*/
+ struct csio_wrm wrm; /* Work request module*/
+ struct pci_dev *pdev; /* PCI device */
+
+ void __iomem *regstart; /* Virtual address of
+ * register map
+ */
+ /* SCSI queue sets */
+ uint32_t num_sqsets; /* Number of SCSI
+ * queue sets */
+ uint32_t num_scsi_msix_cpus; /* Number of CPUs that
+ * will be used
+ * for ingress
+ * processing.
+ */
+
+ struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
+ struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
+
+ uint32_t evtflag; /* Event flag */
+ uint32_t flags; /* HW flags */
+
+ struct csio_mgmtm mgmtm; /* management module */
+ struct csio_mbm mbm; /* Mailbox module */
+
+ /* Lnodes */
+ uint32_t num_lns; /* Number of lnodes */
+ struct csio_lnode *rln; /* Root lnode */
+ struct list_head sln_head; /* Sibling node list
+ * list
+ */
+ int intr_iq_idx; /* Forward interrupt
+ * queue.
+ */
+ int fwevt_iq_idx; /* FW evt queue */
+ struct work_struct evtq_work; /* Worker thread for
+ * HW events.
+ */
+ struct list_head evt_free_q; /* freelist of evt
+ * elements
+ */
+ struct list_head evt_active_q; /* active evt queue*/
+
+ /* board related info */
+ char name[32];
+ char hw_ver[16];
+ char model_desc[32];
+ char drv_version[32];
+ char fwrev_str[32];
+ uint32_t optrom_ver;
+ uint32_t fwrev;
+ uint32_t tp_vers;
+ char chip_ver;
+ uint32_t cfg_finiver;
+ uint32_t cfg_finicsum;
+ uint32_t cfg_cfcsum;
+ uint8_t cfg_csum_status;
+ uint8_t cfg_store;
+ enum csio_dev_state fw_state;
+ struct csio_vpd vpd;
+
+ uint8_t pfn; /* Physical Function
+ * number
+ */
+ uint32_t port_vec; /* Port vector */
+ uint8_t num_pports; /* Number of physical
+ * ports.
+ */
+ uint8_t rst_retries; /* Reset retries */
+ uint8_t cur_evt; /* current s/m evt */
+ uint8_t prev_evt; /* Previous s/m evt */
+ uint32_t dev_num; /* device number */
+ struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
+ struct csio_hw_params params; /* Hw parameters */
+
+ struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
+ mempool_t *mb_mempool; /* Mailbox memory pool*/
+ mempool_t *rnode_mempool; /* rnode memory pool */
+
+ /* Interrupt */
+ enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
+ uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
+ * index
+ */
+ uint32_t nondata_intr_idx; /* nondata MSIX/intr
+ * idx
+ */
+
+ uint8_t cfg_neq; /* FW configured no of
+ * egress queues
+ */
+ uint8_t cfg_niq; /* FW configured no of
+ * iq queues.
+ */
+
+ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+
+ /* MSIX vectors */
+ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
+
+ struct dentry *debugfs_root; /* Debug FS */
+ struct csio_hw_stats stats; /* Hw statistics */
+};
+
+/* Register access macros */
+#define csio_reg(_b, _r) ((_b) + (_r))
+
+#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
+
+#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg16(_h, _v, _r) writew((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg32(_h, _v, _r) writel((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
+ csio_reg((_h)->regstart, (_r)))
+
+void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
+
+/* Core clocks <==> uSecs */
+static inline uint32_t
+csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
+}
+
+static inline uint32_t
+csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
+{
+ return (us * hw->vpd.cclk) / 1000;
+}
+
+/* Easy access macros */
+#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
+#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
+#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
+#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
+
+#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
+#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
+#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
+
+#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
+#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
+#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
+#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
+
+/* Printing/logging */
+#define CSIO_DEVID(__dev) ((__dev)->dev_num)
+#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
+#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
+
+#define csio_info(__hw, __fmt, ...) \
+ dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_fatal(__hw, __fmt, ...) \
+ dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_err(__hw, __fmt, ...) \
+ dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_warn(__hw, __fmt, ...) \
+ dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#ifdef __CSIO_DEBUG__
+#define csio_dbg(__hw, __fmt, ...) \
+ csio_info((__hw), __fmt, ##__VA_ARGS__);
+#else
+#define csio_dbg(__hw, __fmt, ...)
+#endif
+
+int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
+void csio_hw_intr_disable(struct csio_hw *);
+int csio_hw_slow_intr_handler(struct csio_hw *hw);
+int csio_hw_start(struct csio_hw *);
+int csio_hw_stop(struct csio_hw *);
+int csio_hw_reset(struct csio_hw *);
+int csio_is_hw_ready(struct csio_hw *);
+int csio_is_hw_removing(struct csio_hw *);
+
+int csio_fwevtq_handler(struct csio_hw *);
+void csio_evtq_worker(struct work_struct *);
+int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
+ void *evt_msg, uint16_t len);
+void csio_evtq_flush(struct csio_hw *hw);
+
+int csio_request_irqs(struct csio_hw *);
+void csio_intr_enable(struct csio_hw *);
+void csio_intr_disable(struct csio_hw *, bool);
+
+struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
+int csio_config_queues(struct csio_hw *);
+
+int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
+int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
+int csio_hw_init(struct csio_hw *);
+void csio_hw_exit(struct csio_hw *);
+#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
new file mode 100644
index 000000000000..b42cbbd3d92d
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -0,0 +1,1269 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "csio_init.h"
+#include "csio_defs.h"
+
+#define CSIO_MIN_MEMPOOL_SZ 64
+
+static struct dentry *csio_debugfs_root;
+
+static struct scsi_transport_template *csio_fcoe_transport;
+static struct scsi_transport_template *csio_fcoe_transport_vport;
+
+/*
+ * debugfs support
+ */
+static int
+csio_mem_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct csio_hw *hw = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = csio_hw_mc_read(hw, pos, data, NULL);
+ else
+ ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations csio_mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = csio_mem_open,
+ .read = csio_mem_read,
+ .llseek = default_llseek,
+};
+
+static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
+ (void *)hw + idx, &csio_mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int csio_setup_debugfs(struct csio_hw *hw)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(hw->debugfs_root))
+ return -1;
+
+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ csio_add_debugfs_mem(hw, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+ return 0;
+}
+
+/*
+ * csio_dfs_create - Creates and sets up per-hw debugfs.
+ *
+ */
+static int
+csio_dfs_create(struct csio_hw *hw)
+{
+ if (csio_debugfs_root) {
+ hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
+ csio_debugfs_root);
+ csio_setup_debugfs(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * csio_dfs_destroy - Destroys per-hw debugfs.
+ */
+static int
+csio_dfs_destroy(struct csio_hw *hw)
+{
+ if (hw->debugfs_root)
+ debugfs_remove_recursive(hw->debugfs_root);
+
+ return 0;
+}
+
+/*
+ * csio_dfs_init - Debug filesystem initialization for the module.
+ *
+ */
+static int
+csio_dfs_init(void)
+{
+ csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!csio_debugfs_root)
+ pr_warn("Could not create debugfs entry, continuing\n");
+
+ return 0;
+}
+
+/*
+ * csio_dfs_exit - debugfs cleanup for the module.
+ */
+static void
+csio_dfs_exit(void)
+{
+ debugfs_remove(csio_debugfs_root);
+}
+
+/*
+ * csio_pci_init - PCI initialization.
+ * @pdev: PCI device.
+ * @bars: Bitmask of bars to be requested.
+ *
+ * Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ */
+static int
+csio_pci_init(struct pci_dev *pdev, int *bars)
+{
+ int rv = -ENODEV;
+
+ *bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_enable_device_mem(pdev))
+ goto err;
+
+ if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
+ goto err_disable_device;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_selected_regions(pdev, *bars);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return rv;
+
+}
+
+/*
+ * csio_pci_exit - PCI unitialization.
+ * @pdev: PCI device.
+ * @bars: Bars to be released.
+ *
+ */
+static void
+csio_pci_exit(struct pci_dev *pdev, int *bars)
+{
+ pci_release_selected_regions(pdev, *bars);
+ pci_disable_device(pdev);
+}
+
+/*
+ * csio_hw_init_workers - Initialize the HW module's worker threads.
+ * @hw: HW module.
+ *
+ */
+static void
+csio_hw_init_workers(struct csio_hw *hw)
+{
+ INIT_WORK(&hw->evtq_work, csio_evtq_worker);
+}
+
+static void
+csio_hw_exit_workers(struct csio_hw *hw)
+{
+ cancel_work_sync(&hw->evtq_work);
+ flush_scheduled_work();
+}
+
+static int
+csio_create_queues(struct csio_hw *hw)
+{
+ int i, j;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
+ return 0;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
+ 0, hw->pport[0].portid, false, NULL);
+ if (rv != 0) {
+ csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
+ return rv;
+ }
+ }
+
+ /* FW event queue */
+ rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
+ csio_get_fwevt_intr_idx(hw),
+ hw->pport[0].portid, true, NULL);
+ if (rv != 0) {
+ csio_err(hw, "FW event IQ config failed!: %d\n", rv);
+ return rv;
+ }
+
+ /* Create mgmt queue */
+ rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
+ mgmtm->iq_idx, hw->pport[0].portid, NULL);
+
+ if (rv != 0) {
+ csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
+ goto err;
+ }
+
+ /* Create SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < info->max_cpus; j++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+
+ rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
+ sqset->intr_idx, i, false, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module IQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
+ sqset->iq_idx, i, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module EQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
+ return 0;
+err:
+ csio_wr_destroy_queues(hw, true);
+ return -EINVAL;
+}
+
+/*
+ * csio_config_queues - Configure the DMA queues.
+ * @hw: HW module.
+ *
+ * Allocates memory for queues are registers them with FW.
+ */
+int
+csio_config_queues(struct csio_hw *hw)
+{
+ int i, j, idx, k = 0;
+ int rv;
+ struct csio_scsi_qset *sqset;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_scsi_qset *orig;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
+ return csio_create_queues(hw);
+
+ /* Calculate number of SCSI queues for MSIX we would like */
+ hw->num_scsi_msix_cpus = num_online_cpus();
+ hw->num_sqsets = num_online_cpus() * hw->num_pports;
+
+ if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
+ hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
+ hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
+ }
+
+ /* Initialize max_cpus, may get reduced during msix allocations */
+ for (i = 0; i < hw->num_pports; i++)
+ hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
+
+ csio_dbg(hw, "nsqsets:%d scpus:%d\n",
+ hw->num_sqsets, hw->num_scsi_msix_cpus);
+
+ csio_intr_enable(hw);
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+
+ /* Allocate Forward interrupt iq. */
+ hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
+ CSIO_INTR_WRSIZE, CSIO_INGRESS,
+ (void *)hw, 0, 0, NULL);
+ if (hw->intr_iq_idx == -1) {
+ csio_err(hw,
+ "Forward interrupt queue creation failed\n");
+ goto intr_disable;
+ }
+ }
+
+ /* Allocate the FW evt queue */
+ hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
+ CSIO_FWEVT_WRSIZE,
+ CSIO_INGRESS, (void *)hw,
+ CSIO_FWEVT_FLBUFS, 0,
+ csio_fwevt_intx_handler);
+ if (hw->fwevt_iq_idx == -1) {
+ csio_err(hw, "FW evt queue creation failed\n");
+ goto intr_disable;
+ }
+
+ /* Allocate the mgmt queue */
+ mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
+ CSIO_MGMT_EQ_WRSIZE,
+ CSIO_EGRESS, (void *)hw, 0, 0, NULL);
+ if (mgmtm->eq_idx == -1) {
+ csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
+ goto intr_disable;
+ }
+
+ /* Use FW IQ for MGMT req completion */
+ mgmtm->iq_idx = hw->fwevt_iq_idx;
+
+ /* Allocate SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ sqset = &hw->sqset[i][j];
+
+ if (j >= info->max_cpus) {
+ k = j % info->max_cpus;
+ orig = &hw->sqset[i][k];
+ sqset->eq_idx = orig->eq_idx;
+ sqset->iq_idx = orig->iq_idx;
+ continue;
+ }
+
+ idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
+ CSIO_EGRESS, (void *)hw, 0, 0,
+ NULL);
+ if (idx == -1) {
+ csio_err(hw, "EQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+
+ sqset->eq_idx = idx;
+
+ idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
+ CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
+ (void *)hw, 0, 0,
+ csio_scsi_intx_handler);
+ if (idx == -1) {
+ csio_err(hw, "IQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+ sqset->iq_idx = idx;
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
+
+ rv = csio_create_queues(hw);
+ if (rv != 0)
+ goto intr_disable;
+
+ /*
+ * Now request IRQs for the vectors. In the event of a failure,
+ * cleanup is handled internally by this function.
+ */
+ rv = csio_request_irqs(hw);
+ if (rv != 0)
+ return -EINVAL;
+
+ return 0;
+
+intr_disable:
+ csio_intr_disable(hw, false);
+
+ return -EINVAL;
+}
+
+static int
+csio_resource_alloc(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv = -ENOMEM;
+
+ wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
+ CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
+
+ hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_mb));
+ if (!hw->mb_mempool)
+ goto err;
+
+ hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_rnode));
+ if (!hw->rnode_mempool)
+ goto err_free_mb_mempool;
+
+ hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
+ CSIO_SCSI_RSP_LEN, 8, 0);
+ if (!hw->scsi_pci_pool)
+ goto err_free_rn_pool;
+
+ return 0;
+
+err_free_rn_pool:
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+err_free_mb_mempool:
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+err:
+ return rv;
+}
+
+static void
+csio_resource_free(struct csio_hw *hw)
+{
+ pci_pool_destroy(hw->scsi_pci_pool);
+ hw->scsi_pci_pool = NULL;
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+}
+
+/*
+ * csio_hw_alloc - Allocate and initialize the HW module.
+ * @pdev: PCI device.
+ *
+ * Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ */
+static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
+{
+ struct csio_hw *hw;
+
+ hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
+ if (!hw)
+ goto err;
+
+ hw->pdev = pdev;
+ strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+
+ /* memory pool/DMA pool allocation */
+ if (csio_resource_alloc(hw))
+ goto err_free_hw;
+
+ /* Get the start address of registers from BAR 0 */
+ hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->regstart) {
+ csio_err(hw, "Could not map BAR 0, regstart = %p\n",
+ hw->regstart);
+ goto err_resource_free;
+ }
+
+ csio_hw_init_workers(hw);
+
+ if (csio_hw_init(hw))
+ goto err_unmap_bar;
+
+ csio_dfs_create(hw);
+
+ csio_dbg(hw, "hw:%p\n", hw);
+
+ return hw;
+
+err_unmap_bar:
+ csio_hw_exit_workers(hw);
+ iounmap(hw->regstart);
+err_resource_free:
+ csio_resource_free(hw);
+err_free_hw:
+ kfree(hw);
+err:
+ return NULL;
+}
+
+/*
+ * csio_hw_free - Uninitialize and free the HW module.
+ * @hw: The HW module
+ *
+ * Disable interrupts, uninit the HW module, free resources, free hw.
+ */
+static void
+csio_hw_free(struct csio_hw *hw)
+{
+ csio_intr_disable(hw, true);
+ csio_hw_exit_workers(hw);
+ csio_hw_exit(hw);
+ iounmap(hw->regstart);
+ csio_dfs_destroy(hw);
+ csio_resource_free(hw);
+ kfree(hw);
+}
+
+/**
+ * csio_shost_init - Create and initialize the lnode module.
+ * @hw: The HW module.
+ * @dev: The device associated with this invocation.
+ * @probe: Called from probe context or not?
+ * @os_pln: Parent lnode if any.
+ *
+ * Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initializes lnode module and registers with SCSI ML
+ * via scsi_host_add. This function is shared between physical and
+ * virtual node ports.
+ */
+struct csio_lnode *
+csio_shost_init(struct csio_hw *hw, struct device *dev,
+ bool probe, struct csio_lnode *pln)
+{
+ struct Scsi_Host *shost = NULL;
+ struct csio_lnode *ln;
+
+ csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
+ csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
+
+ /*
+ * hw->pdev is the physical port's PCI dev structure,
+ * which will be different from the NPIV dev structure.
+ */
+ if (dev == &hw->pdev->dev)
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_template,
+ sizeof(struct csio_lnode));
+ else
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_vport_template,
+ sizeof(struct csio_lnode));
+
+ if (!shost)
+ goto err;
+
+ ln = shost_priv(shost);
+ memset(ln, 0, sizeof(struct csio_lnode));
+
+ /* Link common lnode to this lnode */
+ ln->dev_num = (shost->host_no << 16);
+
+ shost->can_queue = CSIO_MAX_QUEUE;
+ shost->this_id = -1;
+ shost->unique_id = shost->host_no;
+ shost->max_cmd_len = 16; /* Max CDB length supported */
+ shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
+ hw->fres_info.max_ssns);
+ shost->max_lun = CSIO_MAX_LUN;
+ if (dev == &hw->pdev->dev)
+ shost->transportt = csio_fcoe_transport;
+ else
+ shost->transportt = csio_fcoe_transport_vport;
+
+ /* root lnode */
+ if (!hw->rln)
+ hw->rln = ln;
+
+ /* Other initialization here: Common, Transport specific */
+ if (csio_lnode_init(ln, hw, pln))
+ goto err_shost_put;
+
+ if (scsi_add_host(shost, dev))
+ goto err_lnode_exit;
+
+ return ln;
+
+err_lnode_exit:
+ csio_lnode_exit(ln);
+err_shost_put:
+ scsi_host_put(shost);
+err:
+ return NULL;
+}
+
+/**
+ * csio_shost_exit - De-instantiate the shost.
+ * @ln: The lnode module corresponding to the shost.
+ *
+ */
+void
+csio_shost_exit(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* Inform transport */
+ fc_remove_host(shost);
+
+ /* Inform SCSI ML */
+ scsi_remove_host(shost);
+
+ /* Flush all the events, so that any rnode removal events
+ * already queued are all handled, before we remove the lnode.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_evtq_flush(hw);
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_exit(ln);
+ scsi_host_put(shost);
+}
+
+struct csio_lnode *
+csio_lnode_alloc(struct csio_hw *hw)
+{
+ return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
+}
+
+void
+csio_lnodes_block_request(struct csio_hw *hw)
+{
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_request(struct csio_hw *hw)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_exit(struct csio_hw *hw, bool npiv)
+{
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
+ return;
+ }
+
+ /* Get all child lnodes(NPIV ports) */
+ spin_lock_irq(&hw->lock);
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete NPIV lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ fc_vport_terminate(ln->fc_vport);
+ }
+
+ /* Delete only npiv lnodes */
+ if (npiv)
+ goto free_lnodes;
+
+ cur_cnt = 0;
+ /* Get all physical lnodes */
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete physical lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
+ csio_shost_exit(lnode_list[ii]);
+ }
+
+free_lnodes:
+ kfree(lnode_list);
+}
+
+/*
+ * csio_lnode_init_post: Set lnode attributes after starting HW.
+ * @ln: lnode.
+ *
+ */
+static void
+csio_lnode_init_post(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ csio_fchost_attr_init(ln);
+
+ scsi_scan_host(shost);
+}
+
+/*
+ * csio_probe_one - Instantiate this function.
+ * @pdev: PCI device
+ * @id: Device ID
+ *
+ * This is the .probe() callback of the driver. This function:
+ * - Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ * - Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ * - Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initialized lnode module and registers with SCSI ML
+ * via scsi_host_add.
+ * - Enables interrupts, and starts the chip by kicking off the
+ * HW state machine.
+ * - Once hardware is ready, initiated scan of the host via
+ * scsi_scan_host.
+ */
+static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rv;
+ int bars;
+ int i;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rv = csio_pci_init(pdev, &bars);
+ if (rv)
+ goto err;
+
+ hw = csio_hw_alloc(pdev);
+ if (!hw) {
+ rv = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ pci_set_drvdata(pdev, hw);
+
+ if (csio_hw_start(hw) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to start FW, continuing in debug mode.\n");
+ return 0;
+ }
+
+ sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_lnode_exit;
+
+ return 0;
+
+err_lnode_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ pci_set_drvdata(hw->pdev, NULL);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+err_pci_exit:
+ csio_pci_exit(pdev, &bars);
+err:
+ dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
+ return rv;
+}
+
+/*
+ * csio_remove_one - Remove one instance of the driver at this PCI function.
+ * @pdev: PCI device
+ *
+ * Used during hotplug operation.
+ */
+static void csio_remove_one(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Stops lnode, Rnode s/m
+ * Quiesce IOs.
+ * All sessions with remote ports are unregistered.
+ */
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ pci_set_drvdata(pdev, NULL);
+ csio_pci_exit(pdev, &bars);
+}
+
+/*
+ * csio_pci_error_detected - PCI error was detected
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Post PCI error detected evt to HW s/m
+ * HW s/m handles this evt by quiescing IOs, unregisters rports
+ * and finally takes the device to offline.
+ */
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_intr_disable(hw, true);
+ pci_disable_device(pdev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * csio_pci_slot_reset - PCI slot has been reset.
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int ready;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* Bring HW s/m to ready state.
+ * but don't resume IOs.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
+ ready = csio_is_hw_ready(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (ready) {
+ return PCI_ERS_RESULT_RECOVERED;
+ } else {
+ dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+/*
+ * csio_pci_resume - Resume normal operations
+ * @pdev: PCI device
+ *
+ */
+static void
+csio_pci_resume(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ struct csio_lnode *ln;
+ int rv = 0;
+ int i;
+
+ /* Bring the LINK UP and Resume IO */
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_resume_exit;
+
+ return;
+
+err_resume_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
+}
+
+static struct pci_error_handlers csio_err_handler = {
+ .error_detected = csio_pci_error_detected,
+ .slot_reset = csio_pci_slot_reset,
+ .resume = csio_pci_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
+ { 0, 0, 0, 0, 0, 0, 0 }
+};
+
+
+static struct pci_driver csio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ .id_table = csio_pci_tbl,
+ .probe = csio_probe_one,
+ .remove = csio_remove_one,
+ .err_handler = &csio_err_handler,
+};
+
+/*
+ * csio_init - Chelsio storage driver initialization function.
+ *
+ */
+static int __init
+csio_init(void)
+{
+ int rv = -ENOMEM;
+
+ pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
+
+ csio_dfs_init();
+
+ csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
+ if (!csio_fcoe_transport)
+ goto err;
+
+ csio_fcoe_transport_vport =
+ fc_attach_transport(&csio_fc_transport_vport_funcs);
+ if (!csio_fcoe_transport_vport)
+ goto err_vport;
+
+ rv = pci_register_driver(&csio_pci_driver);
+ if (rv)
+ goto err_pci;
+
+ return 0;
+
+err_pci:
+ fc_release_transport(csio_fcoe_transport_vport);
+err_vport:
+ fc_release_transport(csio_fcoe_transport);
+err:
+ csio_dfs_exit();
+ return rv;
+}
+
+/*
+ * csio_exit - Chelsio storage driver uninitialization .
+ *
+ * Function that gets called in the unload path.
+ */
+static void __exit
+csio_exit(void)
+{
+ pci_unregister_driver(&csio_pci_driver);
+ csio_dfs_exit();
+ fc_release_transport(csio_fcoe_transport_vport);
+ fc_release_transport(csio_fcoe_transport);
+}
+
+module_init(csio_init);
+module_exit(csio_exit);
+MODULE_AUTHOR(CSIO_DRV_AUTHOR);
+MODULE_DESCRIPTION(CSIO_DRV_DESC);
+MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+MODULE_VERSION(CSIO_DRV_VERSION);
+MODULE_FIRMWARE(CSIO_FW_FNAME);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
new file mode 100644
index 000000000000..0838fd7ec9c7
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_INIT_H__
+#define __CSIO_INIT_H__
+
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_scsi.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_hw.h"
+
+#define CSIO_DRV_AUTHOR "Chelsio Communications"
+#define CSIO_DRV_LICENSE "Dual BSD/GPL"
+#define CSIO_DRV_DESC "Chelsio FCoE driver"
+#define CSIO_DRV_VERSION "1.0.0"
+
+#define CSIO_DEVICE(devid, idx) \
+{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
+ ((_dev) == CSIO_DEVID_PE10K_PF1))
+
+/* FCoE device IDs */
+#define CSIO_DEVID_PE10K 0xA000
+#define CSIO_DEVID_PE10K_PF1 0xA001
+#define CSIO_DEVID_T440DBG_FCOE 0x4600
+#define CSIO_DEVID_T420CR_FCOE 0x4601
+#define CSIO_DEVID_T422CR_FCOE 0x4602
+#define CSIO_DEVID_T440CR_FCOE 0x4603
+#define CSIO_DEVID_T420BCH_FCOE 0x4604
+#define CSIO_DEVID_T440BCH_FCOE 0x4605
+#define CSIO_DEVID_T440CH_FCOE 0x4606
+#define CSIO_DEVID_T420SO_FCOE 0x4607
+#define CSIO_DEVID_T420CX_FCOE 0x4608
+#define CSIO_DEVID_T420BT_FCOE 0x4609
+#define CSIO_DEVID_T404BT_FCOE 0x460A
+#define CSIO_DEVID_B420_FCOE 0x460B
+#define CSIO_DEVID_B404_FCOE 0x460C
+#define CSIO_DEVID_T480CR_FCOE 0x460D
+#define CSIO_DEVID_T440LPCR_FCOE 0x460E
+
+extern struct fc_function_template csio_fc_transport_funcs;
+extern struct fc_function_template csio_fc_transport_vport_funcs;
+
+void csio_fchost_attr_init(struct csio_lnode *);
+
+/* INTx handlers */
+void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+/* Common os lnode APIs */
+void csio_lnodes_block_request(struct csio_hw *);
+void csio_lnodes_unblock_request(struct csio_hw *);
+void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
+void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
+
+struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
+ struct csio_lnode *);
+void csio_shost_exit(struct csio_lnode *);
+void csio_lnodes_exit(struct csio_hw *, bool);
+
+static inline struct Scsi_Host *
+csio_ln_to_shost(struct csio_lnode *ln)
+{
+ return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
+}
+
+/* SCSI -- locking version of get/put ioreqs */
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
+{
+ struct csio_ioreq *ioreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ ioreq = csio_get_scsi_ioreq(scsim);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+
+ return ioreq;
+}
+
+static inline void
+csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct csio_ioreq *ioreq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq(scsim, ioreq);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_put_scsi_ddp_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+#endif /* ifndef __CSIO_INIT_H__ */
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
new file mode 100644
index 000000000000..7ee9777ae2c5
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+
+#include "csio_init.h"
+#include "csio_hw.h"
+
+static irqreturn_t
+csio_nondata_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ int rv;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_hw_slow_intr_handler(hw);
+ rv = csio_mb_isr_handler(hw);
+
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_handler - Common FW event handler routine.
+ * @hw: HW module.
+ *
+ * This is the ISR for FW events. It is shared b/w MSIX
+ * and INTx handlers.
+ */
+static void
+csio_fwevt_handler(struct csio_hw *hw)
+{
+ int rv;
+ unsigned long flags;
+
+ rv = csio_fwevtq_handler(hw);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+} /* csio_fwevt_handler */
+
+/*
+ * csio_fwevt_isr() - FW events MSIX ISR
+ * @irq:
+ * @dev_id:
+ *
+ * Process WRs on the FW event queue.
+ *
+ */
+static irqreturn_t
+csio_fwevt_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_fwevt_handler(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_isr() - INTx wrapper for handling FW events.
+ * @irq:
+ * @dev_id:
+ */
+void
+csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ csio_fwevt_handler(hw);
+} /* csio_fwevt_intx_handler */
+
+/*
+ * csio_process_scsi_cmpl - Process a SCSI WR completion.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ *
+ */
+static void
+csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *cbfn_q)
+{
+ struct csio_ioreq *ioreq;
+ uint8_t *scsiwr;
+ uint8_t subop;
+ void *cmnd;
+ unsigned long flags;
+
+ ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
+ if (likely(ioreq)) {
+ if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
+ subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
+ ((struct fw_scsi_abrt_cls_wr *)
+ scsiwr)->sub_opcode_to_chk_all_io);
+
+ csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
+ subop ? "Close" : "Abort",
+ ioreq, ioreq->wr_status);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (subop)
+ csio_scsi_closed(ioreq,
+ (struct list_head *)cbfn_q);
+ else
+ csio_scsi_aborted(ioreq,
+ (struct list_head *)cbfn_q);
+ /*
+ * We call scsi_done for I/Os that driver thinks aborts
+ * have timed out. If there is a race caused by FW
+ * completing abort at the exact same time that the
+ * driver has deteced the abort timeout, the following
+ * check prevents calling of scsi_done twice for the
+ * same command: once from the eh_abort_handler, another
+ * from csio_scsi_isr_handler(). This also avoids the
+ * need to check if csio_scsi_cmnd(req) is NULL in the
+ * fast path.
+ */
+ cmnd = csio_scsi_cmnd(ioreq);
+ if (unlikely(cmnd == NULL))
+ list_del_init(&ioreq->sm.sm_list);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (unlikely(cmnd == NULL))
+ csio_put_scsi_ioreq_lock(hw,
+ csio_hw_to_scsim(hw), ioreq);
+ } else {
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ }
+}
+
+/*
+ * csio_scsi_isr_handler() - Common SCSI ISR handler.
+ * @iq: Ingress queue pointer.
+ *
+ * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
+ * by calling csio_wr_process_iq_idx. If there are completions on the
+ * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
+ * Once done, add these completions onto the freelist.
+ * This routine is shared b/w MSIX and INTx.
+ */
+static inline irqreturn_t
+csio_scsi_isr_handler(struct csio_q *iq)
+{
+ struct csio_hw *hw = (struct csio_hw *)iq->owner;
+ LIST_HEAD(cbfn_q);
+ struct list_head *tmp;
+ struct csio_scsim *scm;
+ struct csio_ioreq *ioreq;
+ int isr_completions = 0;
+
+ scm = csio_hw_to_scsim(hw);
+
+ if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
+ &cbfn_q) != 0))
+ return IRQ_NONE;
+
+ /* Call back the completion routines */
+ list_for_each(tmp, &cbfn_q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ isr_completions++;
+ ioreq->io_cbfn(hw, ioreq);
+ /* Release ddp buffer if used for this req */
+ if (unlikely(ioreq->dcopy))
+ csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
+ ioreq->nsge);
+ }
+
+ if (isr_completions) {
+ /* Return the ioreqs back to ioreq->freelist */
+ csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
+ isr_completions);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_isr() - SCSI MSIX handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+static irqreturn_t
+csio_scsi_isr(int irq, void *dev_id)
+{
+ struct csio_q *iq = (struct csio_q *) dev_id;
+ struct csio_hw *hw;
+
+ if (unlikely(!iq))
+ return IRQ_NONE;
+
+ hw = (struct csio_hw *)iq->owner;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_scsi_isr_handler(iq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_intx_handler() - SCSI INTx handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+void
+csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ struct csio_q *iq = priv;
+
+ csio_scsi_isr_handler(iq);
+
+} /* csio_scsi_intx_handler */
+
+/*
+ * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
+ * @irq:
+ * @dev_id:
+ *
+ *
+ */
+static irqreturn_t
+csio_fcoe_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ struct csio_q *intx_q = NULL;
+ int rv;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ /* Disable the interrupt for this PCI function. */
+ if (hw->intr_mode == CSIO_IM_INTX)
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+
+ /*
+ * The read in the following function will flush the
+ * above write.
+ */
+ if (csio_hw_slow_intr_handler(hw))
+ ret = IRQ_HANDLED;
+
+ /* Get the INTx Forward interrupt IQ. */
+ intx_q = csio_get_q(hw, hw->intr_iq_idx);
+
+ CSIO_DB_ASSERT(intx_q);
+
+ /* IQ handler is not possible for intx_q, hence pass in NULL */
+ if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
+ ret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ rv = csio_mb_isr_handler(hw);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return ret;
+}
+
+static void
+csio_add_msix_desc(struct csio_hw *hw)
+{
+ int i;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ int k = CSIO_EXTRA_VECS;
+ int len = sizeof(entryp->desc) - 1;
+ int cnt = hw->num_sqsets + k;
+
+ /* Non-data vector */
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+
+ entryp++;
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+ entryp++;
+
+ /* Name SCSI vecs */
+ for (i = k; i < cnt; i++, entryp++) {
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
+ CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
+ }
+}
+
+int
+csio_request_irqs(struct csio_hw *hw)
+{
+ int rv, i, j, k = 0;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
+ (hw->intr_mode == CSIO_IM_MSI) ?
+ 0 : IRQF_SHARED,
+ KBUILD_MODNAME, hw);
+ if (rv) {
+ if (hw->intr_mode == CSIO_IM_MSI)
+ pci_disable_msi(hw->pdev);
+ csio_err(hw, "Failed to allocate interrupt line.\n");
+ return -EINVAL;
+ }
+
+ goto out;
+ }
+
+ /* Add the MSIX vector descriptions */
+ csio_add_msix_desc(hw);
+
+ rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ /* Allocate IRQs for SCSI */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ for (j = 0; j < info->max_cpus; j++, k++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+ struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
+
+ rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
+ entryp[k].desc, q);
+ if (rv) {
+ csio_err(hw,
+ "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k].dev_id = (void *)q;
+
+ } /* for all scsi cpus */
+ } /* for all ports */
+
+out:
+ hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
+
+ return 0;
+
+err:
+ for (i = 0; i < k; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ pci_disable_msix(hw->pdev);
+
+ return -EINVAL;
+}
+
+static void
+csio_disable_msix(struct csio_hw *hw, bool free)
+{
+ int i;
+ struct csio_msix_entries *entryp;
+ int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
+
+ if (free) {
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ }
+ pci_disable_msix(hw->pdev);
+}
+
+/* Reduce per-port max possible CPUs */
+static void
+csio_reduce_sqsets(struct csio_hw *hw, int cnt)
+{
+ int i;
+ struct csio_scsi_cpu_info *info;
+
+ while (cnt < hw->num_sqsets) {
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ if (info->max_cpus > 1) {
+ info->max_cpus--;
+ hw->num_sqsets--;
+ if (hw->num_sqsets <= cnt)
+ break;
+ }
+ }
+ }
+
+ csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
+}
+
+static int
+csio_enable_msix(struct csio_hw *hw)
+{
+ int rv, i, j, k, n, min, cnt;
+ struct csio_msix_entries *entryp;
+ struct msix_entry *entries;
+ int extra = CSIO_EXTRA_VECS;
+ struct csio_scsi_cpu_info *info;
+
+ min = hw->num_pports + extra;
+ cnt = hw->num_sqsets + extra;
+
+ /* Max vectors required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
+ cnt = min_t(uint8_t, hw->cfg_niq, cnt);
+
+ entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++)
+ entries[i].entry = (uint16_t)i;
+
+ csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
+
+ while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
+ cnt = rv;
+ if (!rv) {
+ if (cnt < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+ csio_reduce_sqsets(hw, cnt - extra);
+ }
+ } else {
+ if (rv > 0) {
+ pci_disable_msix(hw->pdev);
+ csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
+ }
+
+ kfree(entries);
+ return -ENOMEM;
+ }
+
+ /* Save off vectors */
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ entryp->vector = entries[i].vector;
+ }
+
+ /* Distribute vectors */
+ k = 0;
+ csio_set_nondata_intr_idx(hw, entries[k].entry);
+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
+ csio_set_fwevt_intr_idx(hw, entries[k++].entry);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ n = (j % info->max_cpus) + k;
+ hw->sqset[i][j].intr_idx = entries[n].entry;
+ }
+
+ k += info->max_cpus;
+ }
+
+ kfree(entries);
+ return 0;
+}
+
+void
+csio_intr_enable(struct csio_hw *hw)
+{
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+
+ /* Try MSIX, then MSI or fall back to INTx */
+ if ((csio_msi == 2) && !csio_enable_msix(hw))
+ hw->intr_mode = CSIO_IM_MSIX;
+ else {
+ /* Max iqs required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
+ !csio_is_hw_master(hw)) {
+ int extra = CSIO_EXTRA_MSI_IQS;
+
+ if (hw->cfg_niq < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n",
+ hw->cfg_niq - extra);
+ csio_reduce_sqsets(hw, hw->cfg_niq - extra);
+ }
+ }
+
+ if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
+ hw->intr_mode = CSIO_IM_MSI;
+ else
+ hw->intr_mode = CSIO_IM_INTX;
+ }
+
+ csio_dbg(hw, "Using %s interrupt mode.\n",
+ (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
+ ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
+}
+
+void
+csio_intr_disable(struct csio_hw *hw, bool free)
+{
+ csio_hw_intr_disable(hw);
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_MSIX:
+ csio_disable_msix(hw, free);
+ break;
+ case CSIO_IM_MSI:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ pci_disable_msi(hw->pdev);
+ break;
+ case CSIO_IM_INTX:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ break;
+ default:
+ break;
+ }
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
new file mode 100644
index 000000000000..ffe9be04dc39
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -0,0 +1,2135 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ms.h>
+
+#include "csio_hw.h"
+#include "csio_mb.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_fcoe_rnodes = 1024;
+int csio_fdmi_enable = 1;
+
+#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
+
+/* Lnode SM declarations */
+static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
+
+static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
+
+/* LN event mapping */
+static enum csio_ln_ev fwevt_to_lnevt[] = {
+ CSIO_LNE_NONE, /* None */
+ CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RCVD */
+ CSIO_LNE_NONE, /* PLOGO_RCVD */
+ CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RCVD */
+ CSIO_LNE_NONE, /* PRLO_RCVD */
+ CSIO_LNE_NONE, /* NPORT_ID_CHGD */
+ CSIO_LNE_LOGO, /* FLOGO_RCVD */
+ CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
+ CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_LNE_NONE, /* PRLI_TMO */
+ CSIO_LNE_NONE, /* ADISC_TMO */
+ CSIO_LNE_NONE, /* RSCN_DEV_LOST */
+ CSIO_LNE_NONE, /* SCR_ACC_RCVD */
+ CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* LOGO_SNT */
+ CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_LNE_NONE : \
+ fwevt_to_lnevt[_evt])
+
+#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
+#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
+#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
+#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
+
+/*
+ * csio_ln_match_by_portid - lookup lnode using given portid.
+ * @hw: HW module
+ * @portid: port-id.
+ *
+ * If found, returns lnode matching given portid otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln = hw->rln;
+ struct list_head *tmp;
+
+ /* Match siblings lnode with portid */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid == portid)
+ return ln;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
+ * @hw - HW module
+ * @vnpi - vnp index.
+ * Returns - If found, returns lnode matching given vnp id
+ * otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (sln->vnp_flowid == vnp_id)
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (cln->vnp_flowid == vnp_id)
+ return cln;
+ }
+ }
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+}
+
+/**
+ * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
+ * @hw: HW module.
+ * @wwpn: WWPN.
+ *
+ * If found, returns lnode matching given wwpn, returns NULL otherwise.
+ */
+struct csio_lnode *
+csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
+ return cln;
+ }
+ }
+ return NULL;
+}
+
+/* FDMI */
+static void
+csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
+{
+ struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
+ cmd->ct_rev = FC_CT_REV;
+ cmd->ct_fs_type = type;
+ cmd->ct_fs_subtype = sub_type;
+ cmd->ct_cmd = htons(op);
+}
+
+static int
+csio_hostname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
+ return 0;
+ return -1;
+}
+
+static int
+csio_osname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version) > 0)
+ return 0;
+
+ return -1;
+}
+
+static inline void
+csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+{
+ struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+ ae->type = htons(type);
+ len += 4; /* includes attribute type and length */
+ len = (len + 3) & ~3; /* should be multiple of 4 bytes */
+ ae->len = htons(len);
+ memcpy(ae->value, val, len);
+ *ptr += len;
+}
+
+/*
+ * csio_ln_fdmi_done - FDMI registeration completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ struct csio_lnode *ln = fdmi_req->lnode;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+}
+
+/*
+ * csio_ln_fdmi_rhba_cbfn - RHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ __be32 val;
+ __be16 mfs;
+ uint32_t numattrs = 0;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fs_fdmi_attrs *attrib_blk;
+ struct fc_fdmi_port_name *port_name;
+ uint8_t buf[64];
+ uint8_t *fc4_type;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+
+ /* Prepare CT hdr for RPA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
+
+ /* Prepare RPA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ port_name = (struct fc_fdmi_port_name *)pld;
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*port_name);
+
+ /* Start appending Port attributes */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ fc4_type = &buf[0];
+ memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ fc4_type[2] = 1;
+ fc4_type[7] = 1;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
+ fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ numattrs++;
+ val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ numattrs++;
+
+ if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
+ val = htonl(FC_PORTSPEED_1GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
+ val = htonl(FC_PORTSPEED_10GBIT);
+ else
+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ numattrs++;
+
+ mfs = ln->ln_sparm.csp.sp_bb_data;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ numattrs++;
+
+ strcpy(buf, "csiostor");
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+
+ if (!csio_hostname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+ attrib_blk->numattrs = htonl(numattrs);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+
+ /* Submit FDMI RPA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dprt_cbfn - DPRT completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ uint32_t numattrs = 0;
+ __be32 maxpayload = htonl(65536);
+ struct fc_fdmi_hba_identifier *hbaid;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fc_fdmi_rpl *reg_pl;
+ struct fs_fdmi_attrs *attrib_blk;
+ uint8_t buf[64];
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Prepare CT hdr for RHBA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
+ len = FC_CT_HDR_LEN;
+
+ /* Prepare RHBA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ hbaid = (struct fc_fdmi_hba_identifier *)pld;
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
+ pld += sizeof(*hbaid);
+
+ /* Register one port per hba */
+ reg_pl = (struct fc_fdmi_rpl *)pld;
+ reg_pl->numport = htonl(1);
+ memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*reg_pl);
+
+ /* Start appending HBA attributes hba */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ numattrs++;
+
+ memset(buf, 0, sizeof(buf));
+
+ strcpy(buf, "Chelsio Communications");
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
+ (uint16_t)sizeof(hw->vpd.id));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ numattrs++;
+
+ if (!csio_osname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ (uint8_t *)&maxpayload,
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+ numattrs++;
+ attrib_blk->numattrs = htonl(numattrs);
+
+ /* Submit FDMI RHBA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dhba_cbfn - DHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ struct csio_lnode *ln = fdmi_req->lnode;
+ void *cmd;
+ struct fc_fdmi_port_name *port_name;
+ uint32_t len;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Send FDMI cmd to de-register any Port attributes if registered
+ * before
+ */
+
+ /* Prepare FDMI DPRT cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
+ len = FC_CT_HDR_LEN;
+ port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ len += sizeof(*port_name);
+
+ /* Submit FDMI request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/**
+ * csio_ln_fdmi_start - Start an FDMI request.
+ * @ln: lnode
+ * @context: session context
+ *
+ * Issued with lock held.
+ */
+int
+csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
+{
+ struct csio_ioreq *fdmi_req;
+ struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
+ void *cmd;
+ struct fc_fdmi_hba_identifier *hbaid;
+ uint32_t len;
+
+ if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
+ return -EPROTONOSUPPORT;
+
+ if (!csio_is_rnode_ready(fdmi_rn))
+ CSIO_INC_STATS(ln, n_fdmi_err);
+
+ /* Send FDMI cmd to de-register any HBA attributes if registered
+ * before
+ */
+
+ fdmi_req = ln->mgmt_req;
+ fdmi_req->lnode = ln;
+ fdmi_req->rnode = fdmi_rn;
+
+ /* Prepare FDMI DHBA cmd */
+ cmd = fdmi_req->dma_buf.vaddr;
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
+ len = FC_CT_HDR_LEN;
+
+ hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
+ len += sizeof(*hbaid);
+
+ /* Submit FDMI request */
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
+ }
+
+ return 0;
+}
+
+/*
+ * csio_ln_vnp_read_cbfn - vnp read completion handler.
+ * @hw: HW lnode
+ * @cbfn: Completion handler.
+ *
+ * Reads vnp response and updates ln parameters.
+ */
+static void
+csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
+ struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *clsp;
+ enum fw_retval retval;
+ __be32 nport_id;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+
+ memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
+ memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
+ ln->nport_id = ntohl(nport_id);
+ ln->nport_id = ln->nport_id >> 8;
+
+ /* Update WWNs */
+ /*
+ * This may look like a duplication of what csio_fcoe_enable_link()
+ * does, but is absolutely necessary if the vnpi changes between
+ * a FCOE LINK UP and FCOE LINK DOWN.
+ */
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ /* Copy common sparam */
+ csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
+ ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
+ ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
+ ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
+ ln->ln_sparm.csp.sp_features = csp->sp_features;
+ ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
+ ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
+ ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
+
+ /* Copy word 0 & word 1 of class sparam */
+ clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
+ ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
+ ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
+ ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
+ ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ /* Send an event to update local attribs */
+ csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
+}
+
+/*
+ * csio_ln_vnp_read - Read vnp params.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_vnp_read(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ /* Allocate Mbox request */
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Prepare VNP Command */
+ csio_fcoe_vnp_read_init_mb(ln, mbp,
+ CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid,
+ ln->vnp_flowid,
+ cbfn);
+
+ /* Issue MBOX cmd */
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_fcoe_enable_link - Enable fcoe link.
+ * @ln: lnode
+ * @enable: enable/disable
+ * Issued with lock held.
+ * Issues mbox cmd to bring up FCOE link on port associated with given ln.
+ */
+static int
+csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ uint8_t sub_op;
+ struct fw_fcoe_link_cmd *lcmd;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ portid = ln->portid;
+ sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
+
+ csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
+ sub_op ? "UP" : "DOWN", portid);
+
+ csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ portid, sub_op, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw,
+ "FCOE LINK %s cmd on port[%d] failed with "
+ "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ goto out;
+
+ lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
+
+ memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
+
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ if (hw->pport[i].portid == portid)
+ memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
+
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_read_fcf_cbfn - Read fcf parameters
+ * @ln: lnode
+ *
+ * read fcf response and Update ln fcf information.
+ */
+static void
+csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
+ struct csio_fcf_info *fcf_info;
+ struct fw_fcoe_fcf_cmd *rsp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+ enum fw_retval retval;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ fcf_info = ln->fcfinfo;
+ fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
+ ntohs(rsp->priority_pkd));
+ fcf_info->vf_id = ntohs(rsp->vf_id);
+ fcf_info->vlan_id = rsp->vlan_id;
+ fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
+ fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
+ fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
+ fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
+ fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
+ fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
+ fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
+ memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
+ memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
+ memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
+ memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
+ memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+}
+
+/*
+ * csio_ln_read_fcf_entry - Read fcf entry.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_read_fcf_entry(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FCF information */
+ csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->portid, ln->fcf_flowid, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE FCF cmd\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_handle_link_up - Logical Linkup event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none.
+ *
+ * This event is received from FW, when virtual link is established between
+ * Physical port[ENode] and FCF. If its new vnpi, then local node object is
+ * created on this FCF and set to [ONLINE] state.
+ * Lnode waits for FW_RDEV_CMD event to be received indicating that
+ * Fabric login is completed and lnode moves to [READY] state.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_lnode *ln = NULL;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ /* Pick lnode based on portid */
+ ln = csio_ln_lookup_by_portid(hw, portid);
+ if (!ln) {
+ csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
+ portid);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+
+ /* Check if lnode has valid vnp flowid */
+ if (ln->vnp_flowid != CSIO_INVALID_IDX) {
+ /* New VN-Port */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_alloc(hw);
+ spin_lock_irq(&hw->lock);
+ if (!ln) {
+ csio_err(hw,
+ "failed to allocate fcoe lnode"
+ "for port:%d vnpi:x%x\n",
+ portid, vnpi);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+ ln->portid = portid;
+ }
+ ln->vnp_flowid = vnpi;
+ ln->dev_num &= ~0xFFFF;
+ ln->dev_num |= vnpi;
+ }
+
+ /*Initialize fcfi */
+ ln->fcf_flowid = fcfi;
+
+ csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
+
+ CSIO_INC_STATS(ln, n_link_up);
+
+ /* Send LINKUP event to SM */
+ csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
+}
+
+/*
+ * csio_post_event_rns
+ * @ln - FCOE lnode
+ * @evt - Given rnode event
+ * Returns - none
+ *
+ * Posts given rnode event to all FCOE rnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_post_event(&rn->sm, evt);
+ }
+}
+
+/*
+ * csio_cleanup_rns
+ * @ln - FCOE lnode
+ * Returns - none
+ *
+ * Frees all FCOE rnodes connected with given Lnode.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_cleanup_rns(struct csio_lnode *ln)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next_rn;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_put_rnode(ln, rn);
+ }
+
+}
+
+/*
+ * csio_post_event_lns
+ * @ln - FCOE lnode
+ * @evt - Given lnode event
+ * Returns - none
+ *
+ * Posts given lnode event to all FCOE lnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct list_head *tmp;
+ struct csio_lnode *cln, *sln;
+
+ /* If NPIV lnode, send evt only to that and return */
+ if (csio_is_npiv_ln(ln)) {
+ csio_post_event(&ln->sm, evt);
+ return;
+ }
+
+ sln = ln;
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp;
+ csio_post_event(&cln->sm, evt);
+ }
+
+ /* Send evt to parent lnode */
+ csio_post_event(&ln->sm, evt);
+}
+
+/*
+ * csio_ln_down - Lcoal nport is down
+ * @ln - FCOE Lnode
+ * Returns - none
+ *
+ * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_ln_down(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
+}
+
+/*
+ * csio_handle_link_down - Logical Linkdown event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none
+ *
+ * This event is received from FW, when virtual link goes down between
+ * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
+ * this vnpi[VN-Port] will be de-instantiated.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_fcf_info *fp;
+ struct csio_lnode *ln;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (ln) {
+ fp = ln->fcfinfo;
+ CSIO_INC_STATS(ln, n_link_down);
+
+ /*Warn if linkdown received if lnode is not in ready state */
+ if (!csio_is_lnode_ready(ln)) {
+ csio_ln_warn(ln,
+ "warn: FCOE link is already in offline "
+ "Ignoring Fcoe linkdown event on portid %d\n",
+ portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* Verify portid */
+ if (fp->portid != portid) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid port %d\n", portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* verify fcfi */
+ if (ln->fcf_flowid != fcfi) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid fcfi x%x\n", fcfi);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
+
+ /* Send LINK_DOWN event to lnode s/m */
+ csio_ln_down(ln);
+
+ return;
+ } else {
+ csio_warn(hw,
+ "warn: FCOE linkdown recv with invalid vnpi x%x\n",
+ vnpi);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+}
+
+/*
+ * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
+ * @ln: Lnode module
+ *
+ * Returns True if FCOE lnode is in ready state.
+ */
+int
+csio_is_lnode_ready(struct csio_lnode *ln)
+{
+ return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+}
+
+/*****************************************************************************/
+/* START: Lnode SM */
+/*****************************************************************************/
+/*
+ * csio_lns_uninit - The request in uninit state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "uninit" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_online - The request in online state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "online" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_ln_warn(ln,
+ "warn: FCOE link is up already "
+ "Ignoring linkup on port:%d\n", ln->portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_set_state(&ln->sm, csio_lns_ready);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
+ spin_lock_irq(&hw->lock);
+
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ /* Fall through */
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_ready - The request in ready state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "ready" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[ready].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ /* Host need to issue aborts in case if FW has not returned
+ * WRs with status "ABORTED"
+ */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ case CSIO_LNE_LOGO:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_offline - The request in offline state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "offline" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ case CSIO_LNE_DOWN_LINK:
+ case CSIO_LNE_LOGO:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[offline].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[offline]\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*****************************************************************************/
+/* END: Lnode SM */
+/*****************************************************************************/
+
+static void
+csio_free_fcfinfo(struct kref *kref)
+{
+ struct csio_fcf_info *fcfinfo = container_of(kref,
+ struct csio_fcf_info, kref);
+ kfree(fcfinfo);
+}
+
+/* Helper routines for attributes */
+/*
+ * csio_lnode_state_to_str - Get current state of FCOE lnode.
+ * @ln - lnode
+ * @str - state of lnode.
+ *
+ */
+void
+csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+{
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ strcpy(str, "UNINIT");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ strcpy(str, "READY");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ strcpy(str, "OFFLINE");
+ return;
+ }
+ strcpy(str, "UNKNOWN");
+} /* csio_lnode_state_to_str */
+
+
+int
+csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
+ struct fw_fcoe_port_stats *port_stats)
+{
+ struct csio_mb *mbp;
+ struct fw_fcoe_port_cmd_params portparams;
+ enum fw_retval retval;
+ int idx;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
+ return -EINVAL;
+ }
+ portparams.portid = portid;
+
+ for (idx = 1; idx <= 3; idx++) {
+ portparams.idx = (idx-1)*6 + 1;
+ portparams.nstats = 6;
+ if (idx == 3)
+ portparams.nstats = 4;
+ csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ &portparams, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FCoE port params failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_mb_process_portparams_rsp(hw, mbp, &retval,
+ &portparams, port_stats);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
+ * @wr - WR.
+ * @len - WR len.
+ * This handler is invoked when an outstanding mgmt WR is completed.
+ * Its invoked in the context of FW event worker thread for every
+ * mgmt event received.
+ * Return - none.
+ */
+
+static void
+csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
+{
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_ioreq *io_req = NULL;
+ struct fw_fcoe_els_ct_wr *wr_cmd;
+
+
+ wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
+
+ if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
+ csio_err(mgmtm->hw,
+ "Invalid ELS CT WR length recvd, len:%x\n", len);
+ mgmtm->stats.n_err++;
+ return;
+ }
+
+ io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
+ io_req->wr_status = csio_wr_status(wr_cmd);
+
+ /* lookup ioreq exists in our active Q */
+ spin_lock_irq(&hw->lock);
+ if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
+ csio_err(mgmtm->hw,
+ "Error- Invalid IO handle recv in WR. handle: %p\n",
+ io_req);
+ mgmtm->stats.n_err++;
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ mgmtm = csio_hw_to_mgmtm(hw);
+
+ /* Dequeue from active queue */
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ spin_unlock_irq(&hw->lock);
+
+ /* io_req will be freed by completion handler */
+ if (io_req->io_cbfn)
+ io_req->io_cbfn(hw, io_req);
+}
+
+/**
+ * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
+ * @hw: HW module
+ * @cpl_op: CPL opcode
+ * @cmd: FW cmd/WR.
+ *
+ * Process received FCoE cmd/WR event from FW.
+ */
+void
+csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
+{
+ struct csio_lnode *ln;
+ struct csio_rnode *rn;
+ uint8_t portid, opcode = *(uint8_t *)cmd;
+ struct fw_fcoe_link_cmd *lcmd;
+ struct fw_wr_hdr *wr;
+ struct fw_rdev_wr *rdev_wr;
+ enum fw_fcoe_link_status lstatus;
+ uint32_t fcfi, rdev_flowid, vnpi;
+ enum csio_ln_ev evt;
+
+ if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
+
+ lcmd = (struct fw_fcoe_link_cmd *)cmd;
+ lstatus = lcmd->lstatus;
+ portid = FW_FCOE_LINK_CMD_PORTID_GET(
+ ntohl(lcmd->op_to_portid));
+ fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
+ vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
+
+ if (lstatus == FCOE_LINKUP) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_up(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+
+ } else if (lstatus == FCOE_LINKDOWN) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_down(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+ } else {
+ csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
+ lcmd->lstatus);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_PLD) {
+ wr = (struct fw_wr_hdr *) (cmd + 4);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
+ == FW_RDEV_WR) {
+
+ rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
+
+ rdev_flowid = FW_RDEV_WR_FLOWID_GET(
+ ntohl(rdev_wr->alloc_to_len16));
+ vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
+ ntohl(rdev_wr->flags_to_assoc_flowid));
+
+ csio_dbg(hw,
+ "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
+ "vnpi:0x%x\n", rdev_flowid,
+ rdev_wr->event_cause, vnpi);
+
+ if (rdev_wr->protocol != PROT_FCOE) {
+ csio_err(hw,
+ "FW_RDEV_WR: invalid proto:x%x "
+ "received with flowid:x%x\n",
+ rdev_wr->protocol,
+ rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ return;
+ }
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ csio_err(hw,
+ "FW_DEV_WR: invalid vnpi:x%x received "
+ "with flowid:x%x\n", vnpi, rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ rn = csio_confirm_rnode(ln, rdev_flowid,
+ &rdev_wr->u.fcoe_rdev);
+ if (!rn) {
+ csio_ln_dbg(ln,
+ "Failed to confirm rnode "
+ "for flowid:x%x\n", rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ /* save previous event for debugging */
+ ln->prev_evt = ln->cur_evt;
+ ln->cur_evt = rdev_wr->event_cause;
+ CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
+
+ /* Translate all the fabric events to lnode SM events */
+ evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
+ if (evt) {
+ csio_ln_dbg(ln,
+ "Posting event to lnode event:%d "
+ "cause:%d flowid:x%x\n", evt,
+ rdev_wr->event_cause, rdev_flowid);
+ csio_post_event(&ln->sm, evt);
+ }
+
+ /* Handover event to rn SM here. */
+ csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
+out_pld:
+ spin_unlock_irq(&hw->lock);
+ return;
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_MSG) {
+ wr = (struct fw_wr_hdr *) (cmd);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
+ csio_ln_mgmt_wr_handler(hw, wr,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else {
+ csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+}
+
+/**
+ * csio_lnode_start - Kickstart lnode discovery.
+ * @ln: lnode
+ *
+ * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
+ */
+int
+csio_lnode_start(struct csio_lnode *ln)
+{
+ int rv = 0;
+ if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ rv = csio_fcoe_enable_link(ln, 1);
+ ln->flags |= CSIO_LNF_LINK_ENABLE;
+ }
+
+ return rv;
+}
+
+/**
+ * csio_lnode_stop - Stop the lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to stop lnode and its associated NPIV
+ * lnodes.
+ */
+void
+csio_lnode_stop(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
+ if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ csio_fcoe_enable_link(ln, 0);
+ ln->flags &= ~CSIO_LNF_LINK_ENABLE;
+ }
+ csio_ln_dbg(ln, "stopping ln :%p\n", ln);
+}
+
+/**
+ * csio_lnode_close - Close an lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to close an lnode and its
+ * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
+ * set to uninitialized state.
+ */
+void
+csio_lnode_close(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_CLOSE);
+ if (csio_is_phys_ln(ln))
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+
+ csio_ln_dbg(ln, "closed ln :%p\n", ln);
+}
+
+/*
+ * csio_ln_prep_ecwr - Prepare ELS/CT WR.
+ * @io_req - IO request.
+ * @wr_len - WR len
+ * @immd_len - WR immediate data
+ * @sub_op - Sub opcode
+ * @sid - source portid.
+ * @did - destination portid
+ * @flow_id - flowid
+ * @fw_wr - ELS/CT WR to be prepared.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
+ uint32_t immd_len, uint8_t sub_op, uint32_t sid,
+ uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
+{
+ struct fw_fcoe_els_ct_wr *wr;
+ __be32 port_id;
+
+ wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
+ FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
+
+ wr_len = DIV_ROUND_UP(wr_len, 16);
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
+ FW_WR_LEN16(wr_len));
+ wr->els_ct_type = sub_op;
+ wr->ctl_pri = 0;
+ wr->cp_en_class = 0;
+ wr->cookie = io_req->fw_handle;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(
+ io_req->lnode->hwp, io_req->iq_idx));
+ wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
+ wr->tmo_val = (uint8_t) io_req->tmo;
+ port_id = htonl(sid);
+ memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
+ port_id = htonl(did);
+ memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
+ wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_submit_wr - Post elsct work request.
+ * @mgmtm - mgmtm
+ * @io_req - io request.
+ * @sub_op - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ * Prepares ELSCT Work request and sents it to FW.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
+ uint8_t sub_op, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_wr_pair wrp;
+ struct csio_lnode *ln = io_req->lnode;
+ struct csio_rnode *rn = io_req->rnode;
+ struct csio_hw *hw = mgmtm->hw;
+ uint8_t fw_wr[64];
+ struct ulptx_sgl dsgl;
+ uint32_t wr_size = 0;
+ uint8_t im_len = 0;
+ uint32_t wr_off = 0;
+
+ int ret = 0;
+
+ /* Calculate WR Size for this ELS REQ */
+ wr_size = sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Send as immediate data if pld < 256 */
+ if (pld_len < 256) {
+ wr_size += ALIGN(pld_len, 8);
+ im_len = (uint8_t)pld_len;
+ } else
+ wr_size += sizeof(struct ulptx_sgl);
+
+ /* Roundup WR size in units of 16 bytes */
+ wr_size = ALIGN(wr_size, 16);
+
+ /* Get WR to send ELS REQ */
+ ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
+ if (ret != 0) {
+ csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
+ io_req, ret);
+ return ret;
+ }
+
+ /* Prepare Generic WR used by all ELS/CT cmd */
+ csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
+ ln->nport_id, rn->nport_id,
+ csio_rn_flowid(rn),
+ &fw_wr[0]);
+
+ /* Copy ELS/CT WR CMD */
+ csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ wr_off += sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Copy payload to Immediate section of WR */
+ if (im_len)
+ csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
+ else {
+ /* Program DSGL to dma payload */
+ dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_MORE | ULPTX_NSGE(1));
+ dsgl.len0 = cpu_to_be32(pld_len);
+ dsgl.addr0 = cpu_to_be64(pld->paddr);
+ csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
+ sizeof(struct ulptx_sgl));
+ }
+
+ /* Issue work request to xmit ELS/CT req to FW */
+ csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
+ return ret;
+}
+
+/*
+ * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
+ * @io_req - IO Request
+ * @io_cbfn - Completion handler.
+ * @req_type - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ *
+ *
+ * This API used submit managment ELS/CT request.
+ * This called with hw lock held
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+
+ io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
+ io_req->fw_handle = (uintptr_t) (io_req);
+ io_req->eq_idx = mgmtm->eq_idx;
+ io_req->iq_idx = mgmtm->iq_idx;
+
+ rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
+ if (rv == 0) {
+ list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
+ mgmtm->stats.n_active++;
+ }
+ return rv;
+}
+
+/*
+ * csio_ln_fdmi_init - FDMI Init entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_init(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_dma_buf *dma_buf;
+
+ /* Allocate MGMT request required for FDMI */
+ ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ln->mgmt_req) {
+ csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Allocate Dma buffers for FDMI response Payload */
+ dma_buf = &ln->mgmt_req->dma_buf;
+ dma_buf->len = 2048;
+ dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
+ kfree(ln->mgmt_req);
+ ln->mgmt_req = NULL;
+ return -ENOMEM;
+ }
+
+ ln->flags |= CSIO_LNF_FDMI_ENABLE;
+ return 0;
+}
+
+/*
+ * csio_ln_fdmi_exit - FDMI exit entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_exit(struct csio_lnode *ln)
+{
+ struct csio_dma_buf *dma_buf;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (!ln->mgmt_req)
+ return 0;
+
+ dma_buf = &ln->mgmt_req->dma_buf;
+ if (dma_buf->vaddr)
+ pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ln->mgmt_req);
+ return 0;
+}
+
+int
+csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
+ unsigned long time, unsigned long max_scan_ticks,
+ unsigned long delta_scan_ticks)
+{
+ int rv = 0;
+
+ if (time >= max_scan_ticks)
+ return 1;
+
+ if (!ln->tgt_scan_tick)
+ ln->tgt_scan_tick = ticks;
+
+ if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
+ if (!ln->last_scan_ntgts)
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ else {
+ if (ln->last_scan_ntgts == ln->n_scsi_tgts)
+ return 1;
+
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ }
+ ln->tgt_scan_tick = ticks;
+ }
+ return rv;
+}
+
+/*
+ * csio_notify_lnodes:
+ * @hw: HW module
+ * @note: Notification
+ *
+ * Called from the HW SM to fan out notifications to the
+ * Lnode SM. Since the HW SM is entered with lock held,
+ * there is no need to hold locks here.
+ *
+ */
+void
+csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying all nodes of event %d\n", note);
+
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+
+ switch (note) {
+ case CSIO_LN_NOTIFY_HWREADY:
+ csio_lnode_start(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWRESET:
+ case CSIO_LN_NOTIFY_HWREMOVE:
+ csio_lnode_close(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWSTOP:
+ csio_lnode_stop(ln);
+ break;
+
+ default:
+ break;
+
+ }
+ }
+}
+
+/*
+ * csio_disable_lnodes:
+ * @hw: HW module
+ * @portid:port id
+ * @disable: disable/enable flag.
+ * If disable=1, disables all lnode hosted on given physical port.
+ * otherwise enables all the lnodes on given phsysical port.
+ * This routine need to called with hw lock held.
+ */
+void
+csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
+
+ /* Traverse sibling lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid != portid)
+ continue;
+
+ if (disable)
+ csio_lnode_stop(ln);
+ else
+ csio_lnode_start(ln);
+ }
+}
+
+/*
+ * csio_ln_init - Initialize an lnode.
+ * @ln: lnode
+ *
+ */
+static int
+csio_ln_init(struct csio_lnode *ln)
+{
+ int rv = -EINVAL;
+ struct csio_lnode *rln, *pln;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_init_state(&ln->sm, csio_lns_uninit);
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+ ln->fcf_flowid = CSIO_INVALID_IDX;
+
+ if (csio_is_root_ln(ln)) {
+
+ /* This is the lnode used during initialization */
+
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF record\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&ln->fcf_lsthead);
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+
+ } else { /* Either a non-root physical or a virtual lnode */
+
+ /*
+ * THe rest is common for non-root physical and NPIV lnodes.
+ * Just get references to all other modules
+ */
+ rln = csio_root_lnode(ln);
+
+ if (csio_is_npiv_ln(ln)) {
+ /* NPIV */
+ pln = csio_parent_lnode(ln);
+ kref_get(&pln->fcfinfo->kref);
+ ln->fcfinfo = pln->fcfinfo;
+ } else {
+ /* Another non-root physical lnode (FCF) */
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
+ GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF info\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+ }
+
+ } /* if (!csio_is_root_ln(ln)) */
+
+ return 0;
+err:
+ return rv;
+}
+
+static void
+csio_ln_exit(struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+
+ csio_cleanup_rns(ln);
+ if (csio_is_npiv_ln(ln)) {
+ pln = csio_parent_lnode(ln);
+ kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
+ } else {
+ kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
+ if (csio_fdmi_enable)
+ csio_ln_fdmi_exit(ln);
+ }
+ ln->fcfinfo = NULL;
+}
+
+/**
+ * csio_lnode_init - Initialize the members of an lnode.
+ * @ln: lnode
+ *
+ */
+int
+csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
+ struct csio_lnode *pln)
+{
+ int rv = -EINVAL;
+
+ /* Link this lnode to hw */
+ csio_lnode_to_hw(ln) = hw;
+
+ /* Link child to parent if child lnode */
+ if (pln)
+ ln->pln = pln;
+ else
+ ln->pln = NULL;
+
+ /* Initialize scsi_tgt and timers to zero */
+ ln->n_scsi_tgts = 0;
+ ln->last_scan_ntgts = 0;
+ ln->tgt_scan_tick = 0;
+
+ /* Initialize rnode list */
+ INIT_LIST_HEAD(&ln->rnhead);
+ INIT_LIST_HEAD(&ln->cln_head);
+
+ /* Initialize log level for debug */
+ ln->params.log_level = hw->params.log_level;
+
+ if (csio_ln_init(ln))
+ goto err;
+
+ /* Add lnode to list of sibling or children lnodes */
+ spin_lock_irq(&hw->lock);
+ list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
+ if (pln)
+ pln->num_vports++;
+ spin_unlock_irq(&hw->lock);
+
+ hw->num_lns++;
+
+ return 0;
+err:
+ csio_lnode_to_hw(ln) = NULL;
+ return rv;
+}
+
+/**
+ * csio_lnode_exit - De-instantiate an lnode.
+ * @ln: lnode
+ *
+ */
+void
+csio_lnode_exit(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_ln_exit(ln);
+
+ /* Remove this lnode from hw->sln_head */
+ spin_lock_irq(&hw->lock);
+
+ list_del_init(&ln->sm.sm_list);
+
+ /* If it is children lnode, decrement the
+ * counter in its parent lnode
+ */
+ if (ln->pln)
+ ln->pln->num_vports--;
+
+ /* Update root lnode pointer */
+ if (list_empty(&hw->sln_head))
+ hw->rln = NULL;
+ else
+ hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
+
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_to_hw(ln) = NULL;
+ hw->num_lns--;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
new file mode 100644
index 000000000000..8d84988ab06d
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -0,0 +1,255 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_LNODE_H__
+#define __CSIO_LNODE_H__
+
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+
+
+#include "csio_defs.h"
+#include "csio_hw.h"
+
+#define CSIO_FCOE_MAX_NPIV 128
+#define CSIO_FCOE_MAX_RNODES 2048
+
+/* FDMI port attribute unknown speed */
+#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
+
+extern int csio_fcoe_rnodes;
+extern int csio_fdmi_enable;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+
+struct csio_fcf_info {
+ struct list_head list;
+ uint8_t priority;
+ uint8_t mac[6];
+ uint8_t name_id[8];
+ uint8_t fabric[8];
+ uint16_t vf_id;
+ uint8_t vlan_id;
+ uint16_t max_fcoe_size;
+ uint8_t fc_map[3];
+ uint32_t fka_adv;
+ uint32_t fcfi;
+ uint8_t get_next:1;
+ uint8_t link_aff:1;
+ uint8_t fpma:1;
+ uint8_t spma:1;
+ uint8_t login:1;
+ uint8_t portid;
+ uint8_t spma_mac[6];
+ struct kref kref;
+};
+
+/* Defines for flags */
+#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
+#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
+#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
+#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
+
+/* Transport events */
+enum csio_ln_fc_evt {
+ CSIO_LN_FC_LINKUP = 1,
+ CSIO_LN_FC_LINKDOWN,
+ CSIO_LN_FC_RSCN,
+ CSIO_LN_FC_ATTRIB_UPDATE,
+};
+
+/* Lnode stats */
+struct csio_lnode_stats {
+ uint32_t n_link_up; /* Link down */
+ uint32_t n_link_down; /* Link up */
+ uint32_t n_err; /* error */
+ uint32_t n_err_nomem; /* memory not available */
+ uint32_t n_inval_parm; /* Invalid parameters */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* dropped event */
+ uint32_t n_rnode_match; /* matched rnode */
+ uint32_t n_dev_loss_tmo; /* Device loss timeout */
+ uint32_t n_fdmi_err; /* fdmi err */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_rnode_alloc; /* rnode allocated */
+ uint32_t n_rnode_free; /* rnode freed */
+ uint32_t n_rnode_nomem; /* rnode alloc failure */
+ uint32_t n_input_requests; /* Input Requests */
+ uint32_t n_output_requests; /* Output Requests */
+ uint32_t n_control_requests; /* Control Requests */
+ uint32_t n_input_bytes; /* Input Bytes */
+ uint32_t n_output_bytes; /* Output Bytes */
+ uint32_t rsvd1;
+};
+
+/* Common Lnode params */
+struct csio_lnode_params {
+ uint32_t ra_tov;
+ uint32_t fcfi;
+ uint32_t log_level; /* Module level for debugging */
+};
+
+struct csio_service_parms {
+ struct fc_els_csp csp; /* Common service parms */
+ uint8_t wwpn[8]; /* WWPN */
+ uint8_t wwnn[8]; /* WWNN */
+ struct fc_els_cssp clsp[4]; /* Class service params */
+ uint8_t vvl[16]; /* Vendor version level */
+};
+
+/* Lnode */
+struct csio_lnode {
+ struct csio_sm sm; /* State machine + sibling
+ * lnode list.
+ */
+ struct csio_hw *hwp; /* Pointer to the HW module */
+ uint8_t portid; /* Port ID */
+ uint8_t rsvd1;
+ uint16_t rsvd2;
+ uint32_t dev_num; /* Device number */
+ uint32_t flags; /* Flags */
+ struct list_head fcf_lsthead; /* FCF entries */
+ struct csio_fcf_info *fcfinfo; /* FCF in use */
+ struct csio_ioreq *mgmt_req; /* MGMT request */
+
+ /* FCoE identifiers */
+ uint8_t mac[6];
+ uint32_t nport_id;
+ struct csio_service_parms ln_sparm; /* Service parms */
+
+ /* Firmware identifiers */
+ uint32_t fcf_flowid; /*fcf flowid */
+ uint32_t vnp_flowid;
+ uint16_t ssn_cnt; /* Registered Session */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+
+ /* Children */
+ struct list_head cln_head; /* Head of the children lnode
+ * list.
+ */
+ uint32_t num_vports; /* Total NPIV/children LNodes*/
+ struct csio_lnode *pln; /* Parent lnode of child
+ * lnodes.
+ */
+ struct list_head cmpl_q; /* Pending I/Os on this lnode */
+
+ /* Remote node information */
+ struct list_head rnhead; /* Head of rnode list */
+ uint32_t num_reg_rnodes; /* Number of rnodes registered
+ * with the host.
+ */
+ uint32_t n_scsi_tgts; /* Number of scsi targets
+ * found
+ */
+ uint32_t last_scan_ntgts;/* Number of scsi targets
+ * found per last scan.
+ */
+ uint32_t tgt_scan_tick; /* timer started after
+ * new tgt found
+ */
+ /* FC transport data */
+ struct fc_vport *fc_vport;
+ struct fc_host_statistics fch_stats;
+
+ struct csio_lnode_stats stats; /* Common lnode stats */
+ struct csio_lnode_params params; /* Common lnode params */
+};
+
+#define csio_lnode_to_hw(ln) ((ln)->hwp)
+#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
+#define csio_parent_lnode(ln) ((ln)->pln)
+#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
+#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
+#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
+
+#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
+#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
+#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
+
+
+#define csio_ln_dbg(_ln, _fmt, ...) \
+ csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_err(_ln, _fmt, ...) \
+ csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_warn(_ln, _fmt, ...) \
+ csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+/* HW->Lnode notifications */
+enum csio_ln_notify {
+ CSIO_LN_NOTIFY_HWREADY = 1,
+ CSIO_LN_NOTIFY_HWSTOP,
+ CSIO_LN_NOTIFY_HWREMOVE,
+ CSIO_LN_NOTIFY_HWRESET,
+};
+
+void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
+int csio_is_lnode_ready(struct csio_lnode *);
+void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
+struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
+int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
+ struct fw_fcoe_port_stats *);
+int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
+void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
+void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
+int csio_ln_fdmi_start(struct csio_lnode *, void *);
+int csio_lnode_start(struct csio_lnode *);
+void csio_lnode_stop(struct csio_lnode *);
+void csio_lnode_close(struct csio_lnode *);
+int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
+ struct csio_lnode *);
+void csio_lnode_exit(struct csio_lnode *);
+
+#endif /* ifndef __CSIO_LNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
new file mode 100644
index 000000000000..5b27c48f6836
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -0,0 +1,1750 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_mb.h"
+#include "csio_wr.h"
+
+#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
+
+/* MB Command/Response Helpers */
+/*
+ * csio_mb_fw_retval - FW return value from a mailbox response.
+ * @mbp: Mailbox structure
+ *
+ */
+enum fw_retval
+csio_mb_fw_retval(struct csio_mb *mbp)
+{
+ struct fw_cmd_hdr *hdr;
+
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
+}
+
+/*
+ * csio_mb_hello - FW HELLO command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @m_mbox: Master mailbox number, if any.
+ * @a_mbox: Mailbox number for asycn notifications.
+ * @master: Device mastership.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->err_to_clearinit = htonl(
+ FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
+ m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
+ FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
+ FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT);
+
+}
+
+/*
+ * csio_mb_process_hello_rsp - FW HELLO response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @state: State that the function is in.
+ * @mpfn: Master pfn
+ *
+ */
+void
+csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, enum csio_dev_state *state,
+ uint8_t *mpfn)
+{
+ struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
+ uint32_t value;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS) {
+ hw->fwrev = ntohl(rsp->fwrev);
+
+ value = ntohl(rsp->err_to_clearinit);
+ *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
+
+ if (value & FW_HELLO_CMD_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ else if (value & FW_HELLO_CMD_ERR)
+ *state = CSIO_DEV_STATE_ERR;
+ else
+ *state = CSIO_DEV_STATE_UNINIT;
+ }
+}
+
+/*
+ * csio_mb_bye - FW BYE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_reset - FW RESET command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @reset: Type of reset.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reset, int halt,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->val = htonl(reset);
+ cmdp->halt_pkd = htonl(halt);
+
+}
+
+/*
+ * csio_mb_params - FW PARAMS command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: Command timeout.
+ * @pf: PF number.
+ * @vf: VF number.
+ * @nparams: Number of paramters
+ * @params: Parameter mnemonic array.
+ * @val: Parameter value array.
+ * @wr: Write/Read PARAMS.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int nparams,
+ const u32 *params, u32 *val, bool wr,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ uint32_t i;
+ uint32_t temp_params = 0, temp_val = 0;
+ struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
+ __be32 *p = &cmdp->param[0].mnem;
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ) |
+ FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Write Params */
+ if (wr) {
+ while (nparams--) {
+ temp_params = *params++;
+ temp_val = *val++;
+
+ *p++ = htonl(temp_params);
+ *p++ = htonl(temp_val);
+ }
+ } else {
+ for (i = 0; i < nparams; i++, p += 2) {
+ temp_params = *params++;
+ *p = htonl(temp_params);
+ }
+ }
+
+}
+
+/*
+ * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @nparams: Number of parameters
+ * @val: Parameter value array.
+ *
+ */
+void
+csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, unsigned int nparams,
+ u32 *val)
+{
+ struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
+ uint32_t i;
+ __be32 *p = &rsp->param[0].val;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS)
+ for (i = 0; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+}
+
+/*
+ * csio_mb_ldst - FW LDST command
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: timeout
+ * @reg: register
+ *
+ */
+void
+csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
+{
+ struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
+
+ /*
+ * Construct and send the Firmware LDST Command to retrieve the
+ * specified PCI-E Configuration Space register.
+ */
+ ldst_cmd->op_to_addrspace =
+ htonl(FW_CMD_OP(FW_LDST_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd->u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
+ ldst_cmd->u.pcie.r = (uint8_t)reg;
+}
+
+/*
+ *
+ * csio_mb_caps_config - FW Read/Write Capabilities command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @wr: Write if 1, Read if 0
+ * @init: Turn on initiator mode.
+ * @tgt: Turn on target mode.
+ * @cofld: If 1, Control Offload for FCoE
+ * @cbfn: Callback, if any.
+ *
+ * This helper assumes that cmdp has MB payload from a previous CAPS
+ * read command.
+ */
+void
+csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ bool wr, bool init, bool tgt, bool cofld,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_caps_config_cmd *cmdp =
+ (struct fw_caps_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ));
+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Read config */
+ if (!wr)
+ return;
+
+ /* Write config */
+ cmdp->fcoecaps = 0;
+
+ if (cofld)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
+ if (init)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
+ if (tgt)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
+}
+
+void
+csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t tmo, uint8_t mode, unsigned int flags,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ struct fw_rss_glb_config_cmd *cmdp =
+ (struct fw_rss_glb_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ cmdp->u.manual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ cmdp->u.basicvirtual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ }
+}
+
+
+/*
+ * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @pf:
+ * @vf:
+ * @txq:
+ * @txq_eht_ctrl:
+ * @rxqi:
+ * @rxq:
+ * @tc:
+ * @vi:
+ * @pmask:
+ * @rcaps:
+ * @wxcaps:
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int txq,
+ unsigned int txq_eth_ctrl, unsigned int rxqi,
+ unsigned int rxq, unsigned int tc, unsigned int vi,
+ unsigned int cmask, unsigned int pmask, unsigned int nexactf,
+ unsigned int rcaps, unsigned int wxcaps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_PFVF_CMD_PFN(pf) |
+ FW_PFVF_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+ FW_PFVF_CMD_NIQ(rxq));
+
+ cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
+ FW_PFVF_CMD_CMASK(cmask) |
+ FW_PFVF_CMD_PMASK(pmask) |
+ FW_PFVF_CMD_NEQ(txq));
+ cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
+ FW_PFVF_CMD_NVI(vi) |
+ FW_PFVF_CMD_NEXACTF(nexactf));
+ cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+ FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+}
+
+#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/*
+ * csio_mb_port- FW PORT command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @portid: Port ID to get/set info
+ * @wr: Write/Read PORT information.
+ * @fc: Flow control
+ * @caps: Port capabilites to set.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
+ unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_EXEC : FW_CMD_READ) |
+ FW_PORT_CMD_PORTID(portid));
+ if (!wr) {
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ return;
+ }
+
+ /* Set port */
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (fc & PAUSE_RX)
+ lfc |= FW_PORT_CAP_FC_RX;
+ if (fc & PAUSE_TX)
+ lfc |= FW_PORT_CAP_FC_TX;
+
+ if (!(caps & FW_PORT_CAP_ANEG))
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
+ else
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
+ lfc | mdi);
+}
+
+/*
+ * csio_mb_process_read_port_rsp - FW PORT command response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @caps: port capabilities
+ *
+ */
+void
+csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, uint16_t *caps)
+{
+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
+
+ if (*retval == FW_SUCCESS)
+ *caps = ntohs(rsp->u.info.pcap);
+}
+
+/*
+ * csio_mb_initialize - FW INITIALIZE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_iq_alloc - Initializes the mailbox to allocate an
+ * Ingress DMA queue in the firmware.
+ *
+ * @hw: The hw structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->type_to_iqandstindex = htonl(
+ FW_IQ_CMD_VIID(iq_params->viid) |
+ FW_IQ_CMD_TYPE(iq_params->type) |
+ FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
+
+ cmdp->fl0size = htons(iq_params->fl0size);
+ cmdp->fl0size = htons(iq_params->fl1size);
+
+} /* csio_mb_iq_alloc */
+
+/*
+ * csio_mb_iq_write - Initializes the mailbox for writing into an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
+ * @iq_params: Ingress queue params needed for writing.
+ * @cbfn: The call-back function
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this IQ write request can be cascaded with a previous
+ * IQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ uint32_t iq_start_stop = (iq_params->iq_start) ?
+ FW_IQ_CMD_IQSTART(1) :
+ FW_IQ_CMD_IQSTOP(1);
+
+ /*
+ * If this IQ write is cascaded with IQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(iq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->iqid |= htons(iq_params->iqid);
+ cmdp->fl0id |= htons(iq_params->fl0id);
+ cmdp->fl1id |= htons(iq_params->fl1id);
+ cmdp->type_to_iqandstindex |= htonl(
+ FW_IQ_CMD_IQANDST(iq_params->iqandst) |
+ FW_IQ_CMD_IQANUS(iq_params->iqanus) |
+ FW_IQ_CMD_IQANUD(iq_params->iqanud) |
+ FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
+ cmdp->iqdroprss_to_iqesize |= htons(
+ FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
+ FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
+ FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
+ FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
+ FW_IQ_CMD_IQESIZE(iq_params->iqesize));
+
+ cmdp->iqsize |= htons(iq_params->iqsize);
+ cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
+
+ if (iq_params->type == 0) {
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
+ FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
+ }
+
+ if (iq_params->fl0size && iq_params->fl0addr &&
+ (iq_params->fl0id != 0xFFFF)) {
+
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
+ FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
+ FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
+ cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
+ FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
+ FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
+ FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
+ FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
+ FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
+ cmdp->fl0size |= htons(iq_params->fl0size);
+ cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
+ }
+} /* csio_mb_iq_write */
+
+/*
+ * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation & writing.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
+ csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
+} /* csio_mb_iq_alloc_write */
+
+/*
+ * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
+ * of ingress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @iq_params: Ingress queue parameters, after allocation and write.
+ *
+ */
+void
+csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *ret_val,
+ struct csio_iq_params *iq_params)
+{
+ struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (*ret_val == FW_SUCCESS) {
+ iq_params->physiqid = ntohs(rsp->physiqid);
+ iq_params->iqid = ntohs(rsp->iqid);
+ iq_params->fl0id = ntohs(rsp->fl0id);
+ iq_params->fl1id = ntohs(rsp->fl1id);
+ } else {
+ iq_params->physiqid = iq_params->iqid =
+ iq_params->fl0id = iq_params->fl1id = 0;
+ }
+} /* csio_mb_iq_alloc_write_rsp */
+
+/*
+ * csio_mb_iq_free - Initializes the mailbox for freeing a
+ * specified Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Parameters of ingress queue, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
+
+ cmdp->iqid = htons(iq_params->iqid);
+ cmdp->fl0id = htons(iq_params->fl0id);
+ cmdp->fl1id = htons(iq_params->fl1id);
+
+} /* csio_mb_iq_free */
+
+/*
+ * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
+ * an offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_mb_eq_ofld_alloc */
+
+/*
+ * csio_mb_eq_ofld_write - Initializes the mailbox for writing
+ * an alloacted offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this EQ write request can be cascaded with a previous
+ * EQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
+ FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
+
+ /*
+ * If this EQ write is cascaded with EQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(eq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+ cmdp->fetchszm_to_iqid |= htonl(
+ FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
+ FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
+ FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
+ FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
+
+ cmdp->dcaen_to_eqsize |= htonl(
+ FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
+ FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
+ FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
+ FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
+ FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
+
+ cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
+
+} /* csio_mb_eq_ofld_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
+ * writing into an Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
+ void *priv, uint32_t mb_tmo,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
+ csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
+ eq_ofld_params, cbfn);
+} /* csio_mb_eq_ofld_alloc_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
+ * & write egress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp, enum fw_retval *ret_val,
+ struct csio_eq_params *eq_ofld_params)
+{
+ struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+
+ if (*ret_val == FW_SUCCESS) {
+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
+ ntohl(rsp->eqid_pkd));
+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
+ ntohl(rsp->physeqid_pkd));
+ } else
+ eq_ofld_params->eqid = 0;
+
+} /* csio_mb_eq_ofld_alloc_write_rsp */
+
+/*
+ * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
+ * specified Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data area.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+} /* csio_mb_eq_ofld_free */
+
+/*
+ * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
+ * condition.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call back function.
+ *
+ *
+ */
+void
+csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
+ uint8_t cos, bool link_status, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_link_cmd *cmdp =
+ (struct fw_fcoe_link_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_portid = htonl((
+ FW_CMD_OP(FW_FCOE_LINK_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_FCOE_LINK_CMD_PORTID(port_id)));
+ cmdp->sub_opcode_fcfi = htonl(
+ FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
+ FW_FCOE_LINK_CMD_FCFI(fcfi));
+ cmdp->lstatus = link_status;
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_write_fcoe_link_cond_init_mb */
+
+/*
+ * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
+ * resource information(FW_GET_RES_INFO_CMD).
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_res_info_cmd *cmdp =
+ (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+
+ cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ));
+
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_res_info_init_mb */
+
+/*
+ * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
+ * in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @iqid: iqid
+ * @vnport_wwnn: vnport WWNN
+ * @vnport_wwpn: vnport WWPN
+ * @cbfn: The call-back function.
+ *
+ *
+ */
+void
+csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
+ uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi)));
+
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+
+ cmdp->iqid = htons(iqid);
+
+ if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
+ cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
+
+ if (vnport_wwnn)
+ memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
+ if (vnport_wwpn)
+ memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
+
+} /* csio_fcoe_vnp_alloc_init_mb */
+
+/*
+ * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @cbfn: The call-back handler.
+ */
+void
+csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
+ * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF flow id
+ * @vnpi: VNP flow id
+ * @cbfn: The call-back function.
+ * Return: None
+ */
+void
+csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
+ * FCF records.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcf_params: FC-Forwarder parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_fcf_cmd *cmdp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_FCF_CMD_FCFI(fcfi));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_fcf_init_mb */
+
+void
+csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *,
+ struct csio_mb *))
+{
+ struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+ mbp->mb_size = 64;
+
+ cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
+
+ cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
+ FW_FCOE_STATS_CMD_PORT(portparams->portid);
+
+ cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
+ FW_FCOE_STATS_CMD_PORT_VALID;
+
+} /* csio_fcoe_read_portparams_init_mb */
+
+void
+csio_mb_process_portparams_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats)
+{
+ struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+ struct fw_fcoe_port_stats stats;
+ uint8_t *src;
+ uint8_t *dst;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
+
+ memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
+
+ if (*retval == FW_SUCCESS) {
+ dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
+ src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
+ memcpy(dst, src, (portparams->nstats * 8));
+ if (portparams->idx == 1) {
+ /* Get the first 6 flits from the Mailbox */
+ portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
+ portstats->tx_bcast_frames = stats.tx_bcast_frames;
+ portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
+ portstats->tx_mcast_frames = stats.tx_mcast_frames;
+ portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
+ portstats->tx_ucast_frames = stats.tx_ucast_frames;
+ }
+ if (portparams->idx == 7) {
+ /* Get the second 6 flits from the Mailbox */
+ portstats->tx_drop_frames = stats.tx_drop_frames;
+ portstats->tx_offload_bytes = stats.tx_offload_bytes;
+ portstats->tx_offload_frames = stats.tx_offload_frames;
+#if 0
+ portstats->rx_pf_bytes = stats.rx_pf_bytes;
+ portstats->rx_pf_frames = stats.rx_pf_frames;
+#endif
+ portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
+ portstats->rx_bcast_frames = stats.rx_bcast_frames;
+ portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
+ }
+ if (portparams->idx == 13) {
+ /* Get the last 4 flits from the Mailbox */
+ portstats->rx_mcast_frames = stats.rx_mcast_frames;
+ portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
+ portstats->rx_ucast_frames = stats.rx_ucast_frames;
+ portstats->rx_err_frames = stats.rx_err_frames;
+ }
+ }
+}
+
+/* Entry points/APIs for MB module */
+/*
+ * csio_mb_intr_enable - Enable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
+ */
+void
+csio_mb_intr_enable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+/*
+ * csio_mb_intr_disable - Disable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Disable bit in HostInterruptEnable CIM register.
+ */
+void
+csio_mb_intr_disable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+static void
+csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
+{
+ struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
+
+ if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
+ csio_info(hw, "FW print message:\n");
+ csio_info(hw, "\tdebug->dprtstridx = %d\n",
+ ntohs(dbg->u.prt.dprtstridx));
+ csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam0));
+ csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam1));
+ csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam2));
+ csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam3));
+ } else {
+ /* This is a FW assertion */
+ csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ dbg->u.assert.filename_0_7,
+ ntohl(dbg->u.assert.line),
+ ntohl(dbg->u.assert.x),
+ ntohl(dbg->u.assert.y));
+ }
+}
+
+static void
+csio_mb_debug_cmd_handler(struct csio_hw *hw)
+{
+ int i;
+ __be64 cmd[CSIO_MB_MAX_REGS];
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = sizeof(struct fw_debug_cmd);
+
+ /* Copy mailbox data */
+ for (i = 0; i < size; i += 8)
+ cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
+
+ csio_mb_dump_fw_dbg(hw, cmd);
+
+ /* Notify FW of mailbox by setting owner as UP */
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+}
+
+/*
+ * csio_mb_issue - generic routine for issuing Mailbox commands.
+ * @hw: The HW structure
+ * @mbp: Mailbox command to issue
+ *
+ * Caller should hold hw lock across this call.
+ */
+int
+csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ uint32_t owner, ctl;
+ int i;
+ uint32_t ii;
+ __be64 *cmd = mbp->mb;
+ __be64 hdr;
+ struct csio_mbm *mbm = &hw->mbm;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = mbp->mb_size;
+ int rv = -EINVAL;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /* Determine mode */
+ if (mbp->mb_cbfn == NULL) {
+ /* Need to issue/get results in the same context */
+ if (mbp->tmo < CSIO_MB_POLL_FREQ) {
+ csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
+ goto error_out;
+ }
+ } else if (!csio_is_host_intr_enabled(hw) ||
+ !csio_is_hw_intr_enabled(hw)) {
+ csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
+ *((uint8_t *)mbp->mb));
+ goto error_out;
+ }
+
+ if (mbm->mcurrent != NULL) {
+ /* Queue mbox cmd, if another mbox cmd is active */
+ if (mbp->mb_cbfn == NULL) {
+ rv = -EBUSY;
+ csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb));
+
+ goto error_out;
+ } else {
+ list_add_tail(&mbp->list, &mbm->req_q);
+ CSIO_INC_STATS(mbm, n_activeq);
+
+ return 0;
+ }
+ }
+
+ /* Now get ownership of mailbox */
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+
+ if (!csio_mb_is_host_owner(owner)) {
+
+ for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ /*
+ * Mailbox unavailable. In immediate mode, fail the command.
+ * In other modes, enqueue the request.
+ */
+ if (!csio_mb_is_host_owner(owner)) {
+ if (mbp->mb_cbfn == NULL) {
+ rv = owner ? -EBUSY : -ETIMEDOUT;
+
+ csio_dbg(hw,
+ "Couldnt own Mailbox %x op:0x%x "
+ "owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb), owner);
+ goto error_out;
+ } else {
+ if (mbm->mcurrent == NULL) {
+ csio_err(hw,
+ "Couldnt own Mailbox %x "
+ "op:0x%x owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb),
+ owner);
+ csio_err(hw,
+ "No outstanding driver"
+ " mailbox as well\n");
+ goto error_out;
+ }
+ }
+ }
+ }
+
+ /* Mailbox is available, copy mailbox data into it */
+ for (i = 0; i < size; i += 8) {
+ csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
+ cmd++;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ /* Start completion timers in non-immediate modes and notify FW */
+ if (mbp->mb_cbfn != NULL) {
+ mbm->mcurrent = mbp;
+ mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
+ MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+ } else
+ csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ /* Flush posted writes */
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+
+ CSIO_INC_STATS(mbm, n_req);
+
+ if (mbp->mb_cbfn)
+ return 0;
+
+ /* Poll for completion in immediate mode */
+ cmd = mbp->mb;
+
+ for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
+ mdelay(CSIO_MB_POLL_FREQ);
+
+ /* Check for response */
+ ctl = csio_rd_reg32(hw, ctl_reg);
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_wr_reg32(hw, 0, ctl_reg);
+ continue;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ continue;
+ }
+
+ /* Copy response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+ csio_wr_reg32(hw, 0, ctl_reg);
+
+ if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
+ CSIO_INC_STATS(mbm, n_err);
+
+ CSIO_INC_STATS(mbm, n_rsp);
+ return 0;
+ }
+ }
+
+ CSIO_INC_STATS(mbm, n_tmo);
+
+ csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
+ hw->pfn, *((uint8_t *)cmd));
+
+ return -ETIMEDOUT;
+
+error_out:
+ CSIO_INC_STATS(mbm, n_err);
+ return rv;
+}
+
+/*
+ * csio_mb_completions - Completion handler for Mailbox commands
+ * @hw: The HW structure
+ * @cbfn_q: Completion queue.
+ *
+ */
+void
+csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ enum fw_retval rv;
+
+ while (!list_empty(cbfn_q)) {
+ mbp = list_first_entry(cbfn_q, struct csio_mb, list);
+ list_del_init(&mbp->list);
+
+ rv = csio_mb_fw_retval(mbp);
+ if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
+ CSIO_INC_STATS(mbm, n_err);
+ else if (rv != FW_HOSTERROR)
+ CSIO_INC_STATS(mbm, n_rsp);
+
+ if (mbp->mb_cbfn)
+ mbp->mb_cbfn(hw, mbp);
+ }
+}
+
+static void
+csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
+{
+ static char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
+ };
+
+ struct csio_pport *port = &hw->pport[port_id];
+
+ if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
+ csio_info(hw, "Port:%d - port module unplugged\n", port_id);
+ else if (port->mod_type < ARRAY_SIZE(mod_str))
+ csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
+ mod_str[port->mod_type]);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ csio_info(hw,
+ "Port:%d - unsupported optical port module "
+ "inserted\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ csio_info(hw,
+ "Port:%d - unknown port module inserted, forcing "
+ "TWINAX\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ csio_info(hw, "Port:%d - transceiver module error\n", port_id);
+ else
+ csio_info(hw, "Port:%d - unknown module type %d inserted\n",
+ port_id, port->mod_type);
+}
+
+int
+csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
+{
+ uint8_t opcode = *(uint8_t *)cmd;
+ struct fw_port_cmd *pcmd;
+ uint8_t port_id;
+ uint32_t link_status;
+ uint16_t action;
+ uint8_t mod_type;
+
+ if (opcode == FW_PORT_CMD) {
+ pcmd = (struct fw_port_cmd *)cmd;
+ port_id = FW_PORT_CMD_PORTID_GET(
+ ntohl(pcmd->op_to_portid));
+ action = FW_PORT_CMD_ACTION_GET(
+ ntohl(pcmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
+ action);
+ return -EINVAL;
+ }
+
+ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
+ mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
+
+ hw->pport[port_id].link_status =
+ FW_PORT_CMD_LSTATUS_GET(link_status);
+ hw->pport[port_id].link_speed =
+ FW_PORT_CMD_LSPEED_GET(link_status);
+
+ csio_info(hw, "Port:%x - LINK %s\n", port_id,
+ FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
+
+ if (mod_type != hw->pport[port_id].mod_type) {
+ hw->pport[port_id].mod_type = mod_type;
+ csio_mb_portmod_changed(hw, port_id);
+ }
+ } else if (opcode == FW_DEBUG_CMD) {
+ csio_mb_dump_fw_dbg(hw, cmd);
+ } else {
+ csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_mb_isr_handler - Handle mailboxes related interrupts.
+ * @hw: The HW structure
+ *
+ * Called from the ISR to handle Mailbox related interrupts.
+ * HW Lock should be held across this call.
+ */
+int
+csio_mb_isr_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ __be64 *cmd;
+ uint32_t ctl, cim_cause, pl_cause;
+ int i;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size;
+ __be64 hdr;
+ struct fw_cmd_hdr *fw_hdr;
+
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+
+ if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+ CSIO_INC_STATS(hw, n_mbint_unexp);
+ return -EINVAL;
+ }
+
+ /*
+ * The cause registers below HAVE to be cleared in the SAME
+ * order as below: The low level cause register followed by
+ * the upper level cause register. In other words, CIM-cause
+ * first followed by PL-Cause next.
+ */
+ csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+ csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+
+ ctl = csio_rd_reg32(hw, ctl_reg);
+
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_warn(hw,
+ "Stray mailbox interrupt recvd,"
+ " mailbox data not valid\n");
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+ return -EINVAL;
+ }
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ return -EINVAL;
+#if 0
+ case FW_ERROR_CMD:
+ case FW_INITIALIZE_CMD: /* When we are not master */
+#endif
+ }
+
+ CSIO_ASSERT(mbp != NULL);
+
+ cmd = mbp->mb;
+ size = mbp->mb_size;
+ /* Get response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+
+ mbm->mcurrent = NULL;
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, &mbm->cbfn_q);
+ CSIO_INC_STATS(mbm, n_cbfnq);
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
+ CSIO_INC_STATS(hw, n_evt_drop);
+
+ return 0;
+
+ } else {
+ /*
+ * We can get here if mailbox MSIX vector is shared,
+ * or in INTx case. Or a stray interrupt.
+ */
+ csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+}
+
+/*
+ * csio_mb_tmo_handler - Timeout handler
+ * @hw: The HW structure
+ *
+ */
+struct csio_mb *
+csio_mb_tmo_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /*
+ * Could be a race b/w the completion handler and the timer
+ * and the completion handler won that race.
+ */
+ if (mbp == NULL) {
+ CSIO_DB_ASSERT(0);
+ return NULL;
+ }
+
+ fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
+ FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
+
+ mbm->mcurrent = NULL;
+ CSIO_INC_STATS(mbm, n_tmo);
+ fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
+
+ return mbp;
+}
+
+/*
+ * csio_mb_cancel_all - Cancel all waiting commands.
+ * @hw: The HW structure
+ * @cbfn_q: The callback queue.
+ *
+ * Caller should hold hw lock across this call.
+ */
+void
+csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ struct fw_cmd_hdr *hdr;
+ struct list_head *tmp;
+
+ if (mbm->mcurrent) {
+ mbp = mbm->mcurrent;
+
+ /* Stop mailbox completion timer */
+ del_timer_sync(&mbm->timer);
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, cbfn_q);
+ mbm->mcurrent = NULL;
+ }
+
+ if (!list_empty(&mbm->req_q)) {
+ list_splice_tail_init(&mbm->req_q, cbfn_q);
+ mbm->stats.n_activeq = 0;
+ }
+
+ if (!list_empty(&mbm->cbfn_q)) {
+ list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+ }
+
+ if (list_empty(cbfn_q))
+ return;
+
+ list_for_each(tmp, cbfn_q) {
+ mbp = (struct csio_mb *)tmp;
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
+ hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
+
+ CSIO_INC_STATS(mbm, n_cancel);
+ hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
+ }
+}
+
+/*
+ * csio_mbm_init - Initialize Mailbox module
+ * @mbm: Mailbox module
+ * @hw: The HW structure
+ * @timer: Timing function for interrupting mailboxes
+ *
+ * Initialize timer and the request/response queues.
+ */
+int
+csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
+ void (*timer_fn)(uintptr_t))
+{
+ struct timer_list *timer = &mbm->timer;
+
+ init_timer(timer);
+ timer->function = timer_fn;
+ timer->data = (unsigned long)hw;
+
+ INIT_LIST_HEAD(&mbm->req_q);
+ INIT_LIST_HEAD(&mbm->cbfn_q);
+ csio_set_mb_intr_idx(mbm, -1);
+
+ return 0;
+}
+
+/*
+ * csio_mbm_exit - Uninitialize mailbox module
+ * @mbm: Mailbox module
+ *
+ * Stop timer.
+ */
+void
+csio_mbm_exit(struct csio_mbm *mbm)
+{
+ del_timer_sync(&mbm->timer);
+
+ CSIO_DB_ASSERT(mbm->mcurrent == NULL);
+ CSIO_DB_ASSERT(list_empty(&mbm->req_q));
+ CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
+}
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
new file mode 100644
index 000000000000..1788ea506f39
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -0,0 +1,278 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_MB_H__
+#define __CSIO_MB_H__
+
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+#include "csio_defs.h"
+
+#define CSIO_STATS_OFFSET (2)
+#define CSIO_NUM_STATS_PER_MB (6)
+
+struct fw_fcoe_port_cmd_params {
+ uint8_t portid;
+ uint8_t idx;
+ uint8_t nstats;
+};
+
+#define CSIO_DUMP_MB(__hw, __num, __mb) \
+ csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
+ (unsigned long long)csio_rd_reg64(__hw, __mb), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 56))
+
+#define CSIO_MB_MAX_REGS 8
+#define CSIO_MAX_MB_SIZE 64
+#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
+#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
+
+/* Device master in HELLO command */
+enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
+
+enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
+
+enum csio_dev_state {
+ CSIO_DEV_STATE_UNINIT,
+ CSIO_DEV_STATE_INIT,
+ CSIO_DEV_STATE_ERR
+};
+
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y(0) | \
+ FW_PARAMS_PARAM_Z(0))
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
+do { \
+ if (__clear) \
+ memset((__cp), 0, \
+ CSIO_MB_MAX_REGS * sizeof(__be64)); \
+ INIT_LIST_HEAD(&(__mbp)->list); \
+ (__mbp)->tmo = (__tmo); \
+ (__mbp)->priv = (void *)(__priv); \
+ (__mbp)->mb_cbfn = (__fn); \
+ (__mbp)->mb_size = sizeof(*(__cp)); \
+} while (0)
+
+struct csio_mbm_stats {
+ uint32_t n_req; /* number of mbox req */
+ uint32_t n_rsp; /* number of mbox rsp */
+ uint32_t n_activeq; /* number of mbox req active Q */
+ uint32_t n_cbfnq; /* number of mbox req cbfn Q */
+ uint32_t n_tmo; /* number of mbox timeout */
+ uint32_t n_cancel; /* number of mbox cancel */
+ uint32_t n_err; /* number of mbox error */
+};
+
+/* Driver version of Mailbox */
+struct csio_mb {
+ struct list_head list; /* for req/resp */
+ /* queue in driver */
+ __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
+ int mb_size; /* Size of this
+ * mailbox.
+ */
+ uint32_t tmo; /* Timeout */
+ struct completion cmplobj; /* MB Completion
+ * object
+ */
+ void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
+ /* Callback fn */
+ void *priv; /* Owner private ptr */
+};
+
+struct csio_mbm {
+ uint32_t a_mbox; /* Async mbox num */
+ uint32_t intr_idx; /* Interrupt index */
+ struct timer_list timer; /* Mbox timer */
+ struct list_head req_q; /* Mbox request queue */
+ struct list_head cbfn_q; /* Mbox completion q */
+ struct csio_mb *mcurrent; /* Current mailbox */
+ uint32_t req_q_cnt; /* Outstanding mbox
+ * cmds
+ */
+ struct csio_mbm_stats stats; /* Statistics */
+};
+
+#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
+#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
+
+struct csio_iq_params;
+struct csio_eq_params;
+
+enum fw_retval csio_mb_fw_retval(struct csio_mb *);
+
+/* MB helpers */
+void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint32_t, uint32_t, enum csio_dev_master,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, enum csio_dev_state *,
+ uint8_t *);
+
+void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
+ unsigned int, unsigned int, const u32 *, u32 *, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, unsigned int , u32 *);
+
+void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reg);
+
+void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
+ bool, bool, bool, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
+ uint32_t, uint8_t, unsigned int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint8_t, bool, uint32_t, uint16_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, uint16_t *);
+
+void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_iq_params *);
+
+void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_eq_params *);
+
+void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t , struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
+ uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t , uint16_t,
+ uint8_t [8], uint8_t [8],
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t , uint32_t, uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t, uint32_t,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
+ struct csio_mb *mbp, uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats);
+
+/* MB module functions */
+int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
+ void (*)(uintptr_t));
+void csio_mbm_exit(struct csio_mbm *);
+void csio_mb_intr_enable(struct csio_hw *);
+void csio_mb_intr_disable(struct csio_hw *);
+
+int csio_mb_issue(struct csio_hw *, struct csio_mb *);
+void csio_mb_completions(struct csio_hw *, struct list_head *);
+int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
+int csio_mb_isr_handler(struct csio_hw *);
+struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
+void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
+
+#endif /* ifndef __CSIO_MB_H__ */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
new file mode 100644
index 000000000000..51c6a388de2b
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -0,0 +1,913 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
+static void csio_rnode_exit(struct csio_rnode *);
+
+/* Static machine forward declarations */
+static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
+
+/* RNF event mapping */
+static enum csio_rn_ev fwevt_to_rnevt[] = {
+ CSIO_RNFE_NONE, /* None */
+ CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
+ CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
+ CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
+ CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
+ CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
+ CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
+ CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_RNFE_NONE, /* PRLI_TMO */
+ CSIO_RNFE_NONE, /* ADISC_TMO */
+ CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
+ CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
+ CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* LOGO_SNT */
+ CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_RNFE_NONE : \
+ fwevt_to_rnevt[_evt])
+int
+csio_is_rnode_ready(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_ready);
+}
+
+static int
+csio_is_rnode_uninit(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_uninit);
+}
+
+static int
+csio_is_rnode_wka(uint8_t rport_type)
+{
+ if ((rport_type == FLOGI_VFPORT) ||
+ (rport_type == FDISC_VFPORT) ||
+ (rport_type == NS_VNPORT) ||
+ (rport_type == FDMI_VNPORT))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * csio_rn_lookup - Finds the rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flowid.
+ *
+ * Does the rnode lookup on the given lnode and flowid.If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->flowid == flowid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
+ * @ln: lnode
+ * @wwpn: wwpn
+ *
+ * Does the rnode lookup on the given lnode and wwpn. If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
+ return rn;
+ }
+
+ return NULL;
+}
+
+/**
+ * csio_rnode_lookup_portid - Finds the rnode with the given portid
+ * @ln: lnode
+ * @portid: port id
+ *
+ * Lookup the rnode list for a given portid. If no matching entry
+ * found, NULL is returned.
+ */
+struct csio_rnode *
+csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->nport_id == portid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+static int
+csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
+ uint32_t *vnp_flowid)
+{
+ struct csio_rnode *rnhead;
+ struct list_head *tmp, *tmp1;
+ struct csio_rnode *rn;
+ struct csio_lnode *ln_tmp;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ list_for_each(tmp1, &hw->sln_head) {
+ ln_tmp = (struct csio_lnode *) tmp1;
+ if (ln_tmp == ln)
+ continue;
+
+ rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+
+ rn = (struct csio_rnode *) tmp;
+ if (csio_is_rnode_ready(rn)) {
+ if (rn->flowid == rdev_flowid) {
+ *vnp_flowid = csio_ln_flowid(ln_tmp);
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct csio_rnode *
+csio_alloc_rnode(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
+ if (!rn)
+ goto err;
+
+ memset(rn, 0, sizeof(struct csio_rnode));
+ if (csio_rnode_init(rn, ln))
+ goto err_free;
+
+ CSIO_INC_STATS(ln, n_rnode_alloc);
+
+ return rn;
+
+err_free:
+ mempool_free(rn, hw->rnode_mempool);
+err:
+ CSIO_INC_STATS(ln, n_rnode_nomem);
+ return NULL;
+}
+
+static void
+csio_free_rnode(struct csio_rnode *rn)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
+
+ csio_rnode_exit(rn);
+ CSIO_INC_STATS(rn->lnp, n_rnode_free);
+ mempool_free(rn, hw->rnode_mempool);
+}
+
+/*
+ * csio_get_rnode - Gets rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+static struct csio_rnode *
+csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rn;
+
+ rn = csio_rn_lookup(ln, flowid);
+ if (!rn) {
+ rn = csio_alloc_rnode(ln);
+ if (!rn)
+ return NULL;
+
+ rn->flowid = flowid;
+ }
+
+ return rn;
+}
+
+/*
+ * csio_put_rnode - Frees the given rnode
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+void
+csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
+{
+ CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
+ csio_free_rnode(rn);
+}
+
+/*
+ * csio_confirm_rnode - confirms rnode based on wwpn.
+ * @ln: lnode
+ * @rdev_flowid: remote device flowid
+ * @rdevp: remote device params
+ * This routines searches other rnode in list having same wwpn of new rnode.
+ * If there is a match, then matched rnode is returned and otherwise new rnode
+ * is returned.
+ * returns rnode.
+ */
+struct csio_rnode *
+csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t rport_type;
+ struct csio_rnode *rn, *match_rn;
+ uint32_t vnp_flowid;
+ __be32 *port_id;
+
+ port_id = (__be32 *)&rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+
+ /* Drop rdev event for cntrl port */
+ if (rport_type == FAB_CTLR_VNPORT) {
+ csio_ln_dbg(ln,
+ "Unhandled rport_type:%d recv in rdev evt "
+ "ssni:x%x\n", rport_type, rdev_flowid);
+ return NULL;
+ }
+
+ /* Lookup on flowid */
+ rn = csio_rn_lookup(ln, rdev_flowid);
+ if (!rn) {
+
+ /* Drop events with duplicate flowid */
+ if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
+ csio_ln_warn(ln,
+ "ssni:%x already active on vnpi:%x",
+ rdev_flowid, vnp_flowid);
+ return NULL;
+ }
+
+ /* Lookup on wwpn for NPORTs */
+ rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (!rn)
+ goto alloc_rnode;
+
+ } else {
+ /* Lookup well-known ports with nport id */
+ if (csio_is_rnode_wka(rport_type)) {
+ match_rn = csio_rnode_lookup_portid(ln,
+ ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
+ if (match_rn == NULL) {
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /*
+ * Now compare the wwpn to confirm that
+ * same port relogged in. If so update the matched rn.
+ * Else, go ahead and alloc a new rnode.
+ */
+ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already"
+ "active ssni:x%x\n",
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+
+ /* Update rn */
+ goto found_rnode;
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /* wwpn match */
+ if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
+ goto found_rnode;
+
+ /* Search for rnode that have same wwpn */
+ match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (match_rn != NULL) {
+ csio_ln_dbg(ln,
+ "ssni:x%x changed for rport name(wwpn):%llx "
+ "did:x%x\n", rdev_flowid,
+ wwn_to_u64(rdevp->wwpn),
+ match_rn->nport_id);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+ } else {
+ csio_ln_dbg(ln,
+ "rnode wwpn mismatch found ssni:x%x "
+ "name(wwpn):%llx\n",
+ rdev_flowid,
+ wwn_to_u64(csio_rn_wwpn(rn)));
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already active "
+ "wwpn:%llx ssni:x%x\n",
+ wwn_to_u64(csio_rn_wwpn(rn)),
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+ }
+
+found_rnode:
+ csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* Update flowid */
+ csio_rn_flowid(rn) = rdev_flowid;
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ CSIO_INC_STATS(ln, n_rnode_match);
+ return rn;
+
+alloc_rnode:
+ rn = csio_get_rnode(ln, rdev_flowid);
+ if (!rn)
+ return NULL;
+
+ csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ return rn;
+}
+
+/*
+ * csio_rn_verify_rparams - verify rparams.
+ * @ln: lnode
+ * @rn: rnode
+ * @rdevp: remote device params
+ * returns success if rparams are verified.
+ */
+static int
+csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t null[8];
+ uint8_t rport_type;
+ uint8_t fc_class;
+ __be32 *did;
+
+ did = (__be32 *) &rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+ switch (rport_type) {
+ case FLOGI_VFPORT:
+ rn->role = CSIO_RNFR_FABRIC;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ /* NPIV support */
+ if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
+ ln->flags |= CSIO_LNF_NPIVSUPP;
+
+ break;
+
+ case NS_VNPORT:
+ rn->role = CSIO_RNFR_NS;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ break;
+
+ case REG_FC4_VNPORT:
+ case REG_VNPORT:
+ rn->role = CSIO_RNFR_NPORT;
+ if (rdevp->event_cause == PRLI_ACC_RCVD ||
+ rdevp->event_cause == PRLI_RCVD) {
+ if (FW_RDEV_WR_TASK_RETRY_ID_GET(
+ rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
+
+ if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_RETRY;
+
+ if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
+
+ if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_TARGET;
+
+ if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_INITIATOR;
+ }
+
+ break;
+
+ case FDMI_VNPORT:
+ case FAB_CTLR_VNPORT:
+ rn->role = 0;
+ break;
+
+ default:
+ csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
+ csio_rn_flowid(rn), rport_type);
+ return -EINVAL;
+ }
+
+ /* validate wwpn/wwnn for Name server/remote port */
+ if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
+ memset(null, 0, 8);
+ if (!memcmp(rdevp->wwnn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwnn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ if (!memcmp(rdevp->wwpn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwpn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ }
+
+ /* Copy wwnn, wwpn and nport id */
+ rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
+ memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
+ memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
+ rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
+ fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
+ rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
+
+ return 0;
+}
+
+static void
+__csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_unlock_irq(&hw->lock);
+ csio_reg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ if (rn->role & CSIO_RNFR_TARGET)
+ ln->n_scsi_tgts++;
+
+ if (rn->nport_id == FC_FID_MGMT_SERV)
+ csio_ln_fdmi_start(ln, (void *) rn);
+}
+
+static void
+__csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ LIST_HEAD(tmp_q);
+ int cmpl = 0;
+
+ if (!list_empty(&rn->host_cmpl_q)) {
+ csio_dbg(hw, "Returning completion queue I/Os\n");
+ list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
+ cmpl = 1;
+ }
+
+ if (rn->role & CSIO_RNFR_TARGET) {
+ ln->n_scsi_tgts--;
+ ln->last_scan_ntgts--;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ csio_unreg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ /* Cleanup I/Os that were waiting for rnode to unregister */
+ if (cmpl)
+ csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
+
+}
+
+/*****************************************************************************/
+/* START: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rns_uninit -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ }
+ break;
+ case CSIO_RNFE_LOGO_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_ready -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[ready]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_PRLI_DONE:
+ case CSIO_RNFE_PRLI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret)
+ __csio_reg_rnode(rn);
+ else
+ CSIO_INC_STATS(rn, n_err_inval);
+
+ break;
+ case CSIO_RNFE_DOWN:
+ csio_set_state(&rn->sm, csio_rns_offline);
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_LOGO_RECV:
+ csio_set_state(&rn->sm, csio_rns_offline);
+
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /*
+ * Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ __csio_unreg_rnode(rn);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ __csio_unreg_rnode(rn);
+
+ /*
+ * FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_offline -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_DOWN:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_disappeared -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset.
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_DOWN:
+ case CSIO_RNFE_NAME_MISSING:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rnode_devloss_handler - Device loss event handler
+ * @rn: rnode
+ *
+ * Post event to close rnode SM and free rnode.
+ */
+void
+csio_rnode_devloss_handler(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+
+ /* ignore if same rnode came back as online */
+ if (csio_is_rnode_ready(rn))
+ return;
+
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/**
+ * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
+ * @rn: rnode
+ *
+ */
+void
+csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ enum csio_rn_ev evt;
+
+ evt = CSIO_FWE_TO_RNFE(fwevt);
+ if (!evt) {
+ csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
+ csio_rn_flowid(rn), fwevt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ return;
+ }
+ CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
+
+ /* Track previous & current events for debugging */
+ rn->prev_evt = rn->cur_evt;
+ rn->cur_evt = fwevt;
+
+ /* Post event to rnode SM */
+ csio_post_event(&rn->sm, evt);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/*
+ * csio_rnode_init - Initialize rnode.
+ * @rn: RNode
+ * @ln: Associated lnode
+ *
+ * Caller is responsible for holding the lock. The lock is required
+ * to be held for inserting the rnode in ln->rnhead list.
+ */
+static int
+csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
+{
+ csio_rnode_to_lnode(rn) = ln;
+ csio_init_state(&rn->sm, csio_rns_uninit);
+ INIT_LIST_HEAD(&rn->host_cmpl_q);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+
+ /* Add rnode to list of lnodes->rnhead */
+ list_add_tail(&rn->sm.sm_list, &ln->rnhead);
+
+ return 0;
+}
+
+static void
+csio_rnode_exit(struct csio_rnode *rn)
+{
+ list_del_init(&rn->sm.sm_list);
+ CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
+}
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
new file mode 100644
index 000000000000..a3b434c801da
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -0,0 +1,141 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_RNODE_H__
+#define __CSIO_RNODE_H__
+
+#include "csio_defs.h"
+
+/* State machine evets */
+enum csio_rn_ev {
+ CSIO_RNFE_NONE = (uint32_t)0, /* None */
+ CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
+ * complete.
+ */
+ CSIO_RNFE_PRLI_DONE, /* PRLI completed */
+ CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
+ CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
+ CSIO_RNFE_LOGO_RECV, /* Received LOGO */
+ CSIO_RNFE_PRLO_RECV, /* Received PRLO */
+ CSIO_RNFE_DOWN, /* Rnode is down */
+ CSIO_RNFE_CLOSE, /* Close rnode */
+ CSIO_RNFE_NAME_MISSING, /* Rnode name missing
+ * in name server.
+ */
+ CSIO_RNFE_MAX_EVENT,
+};
+
+/* rnode stats */
+struct csio_rnode_stats {
+ uint32_t n_err; /* error */
+ uint32_t n_err_inval; /* invalid parameter */
+ uint32_t n_err_nomem; /* error nomem */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* unexpected event */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_lun_rst; /* Number of resets of
+ * of LUNs under this
+ * target
+ */
+ uint32_t n_lun_rst_fail; /* Number of LUN reset
+ * failures.
+ */
+ uint32_t n_tgt_rst; /* Number of target resets */
+ uint32_t n_tgt_rst_fail; /* Number of target reset
+ * failures.
+ */
+};
+
+/* Defines for rnode role */
+#define CSIO_RNFR_INITIATOR 0x1
+#define CSIO_RNFR_TARGET 0x2
+#define CSIO_RNFR_FABRIC 0x4
+#define CSIO_RNFR_NS 0x8
+#define CSIO_RNFR_NPORT 0x10
+
+struct csio_rnode {
+ struct csio_sm sm; /* State machine -
+ * should be the
+ * 1st member
+ */
+ struct csio_lnode *lnp; /* Pointer to owning
+ * Lnode */
+ uint32_t flowid; /* Firmware ID */
+ struct list_head host_cmpl_q; /* SCSI IOs
+ * pending to completed
+ * to Mid-layer.
+ */
+ /* FC identifiers for remote node */
+ uint32_t nport_id;
+ uint16_t fcp_flags; /* FCP Flags */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+ uint32_t role; /* Fabric/Target/
+ * Initiator/NS
+ */
+ struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
+ struct csio_service_parms rn_sparm;
+
+ /* FC transport attributes */
+ struct fc_rport *rport; /* FC transport rport */
+ uint32_t supp_classes; /* Supported FC classes */
+ uint32_t maxframe_size; /* Max Frame size */
+ uint32_t scsi_id; /* Transport given SCSI id */
+
+ struct csio_rnode_stats stats; /* Common rnode stats */
+};
+
+#define csio_rn_flowid(rn) ((rn)->flowid)
+#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
+#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
+#define csio_rnode_to_lnode(rn) ((rn)->lnp)
+
+int csio_is_rnode_ready(struct csio_rnode *rn);
+void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
+
+struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
+struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
+ uint32_t, struct fcoe_rdev_entry *);
+
+void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
+
+void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
+
+void csio_reg_rnode(struct csio_rnode *);
+void csio_unreg_rnode(struct csio_rnode *);
+
+void csio_rnode_devloss_handler(struct csio_rnode *);
+
+#endif /* ifndef __CSIO_RNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
new file mode 100644
index 000000000000..ddd38e5eb0e7
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -0,0 +1,2555 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_scsi.h"
+#include "csio_init.h"
+
+int csio_scsi_eqsize = 65536;
+int csio_scsi_iqlen = 128;
+int csio_scsi_ioreqs = 2048;
+uint32_t csio_max_scan_tmo;
+uint32_t csio_delta_scan_tmo = 5;
+int csio_lun_qdepth = 32;
+
+static int csio_ddp_descs = 128;
+
+static int csio_do_abrt_cls(struct csio_hw *,
+ struct csio_ioreq *, bool);
+
+static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
+
+/*
+ * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
+ * @ioreq: The I/O request
+ * @sld: Level information
+ *
+ * Should be called with lock held.
+ *
+ */
+static bool
+csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
+{
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
+
+ switch (sld->level) {
+ case CSIO_LEV_LUN:
+ if (scmnd == NULL)
+ return false;
+
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode) &&
+ ((uint64_t)scmnd->device->lun == sld->oslun));
+
+ case CSIO_LEV_RNODE:
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode));
+ case CSIO_LEV_LNODE:
+ return (ioreq->lnode == sld->lnode);
+ case CSIO_LEV_ALL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * csio_scsi_gather_active_ios - Gather active I/Os based on level
+ * @scm: SCSI module
+ * @sld: Level information
+ * @dest: The queue where these I/Os have to be gathered.
+ *
+ * Should be called with lock held.
+ */
+static void
+csio_scsi_gather_active_ios(struct csio_scsim *scm,
+ struct csio_scsi_level_data *sld,
+ struct list_head *dest)
+{
+ struct list_head *tmp, *next;
+
+ if (list_empty(&scm->active_q))
+ return;
+
+ /* Just splice the entire active_q into dest */
+ if (sld->level == CSIO_LEV_ALL) {
+ list_splice_tail_init(&scm->active_q, dest);
+ return;
+ }
+
+ list_for_each_safe(tmp, next, &scm->active_q) {
+ if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
+ list_del_init(tmp);
+ list_add_tail(tmp, dest);
+ }
+ }
+}
+
+static inline bool
+csio_scsi_itnexus_loss_error(uint16_t error)
+{
+ switch (error) {
+ case FW_ERR_LINK_DOWN:
+ case FW_RDEV_NOT_READY:
+ case FW_ERR_RDEV_LOST:
+ case FW_ERR_RDEV_LOGO:
+ case FW_ERR_RDEV_IMPL_LOGO:
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,
+ uint8_t oq, uint8_t sq)
+{
+ char stag[2];
+
+ if (scsi_populate_tag_msg(scmnd, stag)) {
+ switch (stag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ *tag = hq;
+ break;
+ case ORDERED_QUEUE_TAG:
+ *tag = oq;
+ break;
+ default:
+ *tag = sq;
+ break;
+ }
+ } else
+ *tag = 0;
+}
+
+/*
+ * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ *
+ * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
+ */
+static inline void
+csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
+{
+ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ /* Check for Task Management */
+ if (likely(scmnd->SCp.Message == 0)) {
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = 0;
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+
+ memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
+ csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta,
+ FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE);
+ fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
+
+ if (req->nsge)
+ if (req->datadir == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
+ else
+ fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
+ else
+ fcp_cmnd->fc_flags = 0;
+ } else {
+ memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ }
+}
+
+/*
+ * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |
+ FW_SCSI_CMD_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ wr->r3 = 0;
+ memset(&wr->r5, 0, 8);
+
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r6 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r4_lo[0] = 0;
+ wr->u.fcoe.r4_lo[1] = 0;
+
+ /* Frame a FCP command */
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
+ sizeof(struct fw_scsi_cmd_wr)));
+}
+
+#define CSIO_SCSI_CMD_WR_SZ(_imm) \
+ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \
+ ALIGN((_imm), 16)) /* Immed data */
+
+#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
+ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
+
+/*
+ * csio_scsi_cmd - Create a SCSI CMD WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
+ *
+ */
+static inline void
+csio_scsi_cmd(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (unlikely(req->drv_status != 0))
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_cmd_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*
+ * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
+ * @hw: HW module
+ * @req: IO request
+ * @sgl: ULP TX SGL pointer.
+ *
+ */
+static inline void
+csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
+ struct ulptx_sgl *sgl)
+{
+ struct ulptx_sge_pair *sge_pair = NULL;
+ struct scatterlist *sgel;
+ uint32_t i = 0;
+ uint32_t xfer_len;
+ struct list_head *tmp;
+ struct csio_dma_buf *dma_buf;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |
+ ULPTX_NSGE(req->nsge));
+ /* Now add the data SGLs */
+ if (likely(!req->dcopy)) {
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
+ sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ continue;
+ }
+ if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[1] = cpu_to_be32(
+ sg_dma_len(sgel));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[0] = cpu_to_be32(
+ sg_dma_len(sgel));
+ }
+ }
+ } else {
+ /* Program sg elements with driver's DDP buffer */
+ xfer_len = scsi_bufflen(scmnd);
+ list_for_each(tmp, &req->gen_list) {
+ dma_buf = (struct csio_dma_buf *)tmp;
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(dma_buf->paddr);
+ sgl->len0 = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ } else if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[1] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[0] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ }
+ xfer_len -= min(xfer_len, dma_buf->len);
+ i++;
+ }
+ }
+}
+
+/*
+ * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_read_wr.
+ */
+static inline void
+csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |
+ FW_SCSI_READ_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/*
+ * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_write_wr.
+ */
+static inline void
+csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |
+ FW_SCSI_WRITE_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
+#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
+do { \
+ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \
+ ALIGN((imm), 16) + /* Immed data */ \
+ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \
+ \
+ if (unlikely((req)->nsge > 1)) \
+ (sz) += (sizeof(struct ulptx_sge_pair) * \
+ (ALIGN(((req)->nsge - 1), 2) / 2)); \
+ /* Data SGE */ \
+} while (0)
+
+/*
+ * csio_scsi_read - Create a SCSI READ WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI READ WR.
+ *
+ */
+static inline void
+csio_scsi_read(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_read_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_read_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_scsi_write - Create a SCSI WRITE WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI WRITE WR.
+ *
+ */
+static inline void
+csio_scsi_write(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_write_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_write_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_setup_ddp - Setup DDP buffers for Read request.
+ * @req: IO req structure.
+ *
+ * Checks SGLs/Data buffers are virtually contiguous required for DDP.
+ * If contiguous,driver posts SGLs in the WR otherwise post internal
+ * buffers for such request for DDP.
+ */
+static inline void
+csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
+{
+#ifdef __CSIO_DEBUG__
+ struct csio_hw *hw = req->lnode->hwp;
+#endif
+ struct scatterlist *sgel = NULL;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+ uint64_t sg_addr = 0;
+ uint32_t ddp_pagesz = 4096;
+ uint32_t buf_off;
+ struct csio_dma_buf *dma_buf = NULL;
+ uint32_t alloc_len = 0;
+ uint32_t xfer_len = 0;
+ uint32_t sg_len = 0;
+ uint32_t i;
+
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ sg_addr = sg_dma_address(sgel);
+ sg_len = sg_dma_len(sgel);
+
+ buf_off = sg_addr & (ddp_pagesz - 1);
+
+ /* Except 1st buffer,all buffer addr have to be Page aligned */
+ if (i != 0 && buf_off) {
+ csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
+ sg_addr, sg_len);
+ goto unaligned;
+ }
+
+ /* Except last buffer,all buffer must end on page boundary */
+ if ((i != (req->nsge - 1)) &&
+ ((buf_off + sg_len) & (ddp_pagesz - 1))) {
+ csio_dbg(hw,
+ "SGL addr not ending on page boundary"
+ "(%llx:%d)\n", sg_addr, sg_len);
+ goto unaligned;
+ }
+ }
+
+ /* SGL's are virtually contiguous. HW will DDP to SGLs */
+ req->dcopy = 0;
+ csio_scsi_read(req);
+
+ return;
+
+unaligned:
+ CSIO_INC_STATS(scsim, n_unaligned);
+ /*
+ * For unaligned SGLs, driver will allocate internal DDP buffer.
+ * Once command is completed data from DDP buffer copied to SGLs
+ */
+ req->dcopy = 1;
+
+ /* Use gen_list to store the DDP buffers */
+ INIT_LIST_HEAD(&req->gen_list);
+ xfer_len = scsi_bufflen(scmnd);
+
+ i = 0;
+ /* Allocate ddp buffers for this request */
+ while (alloc_len < xfer_len) {
+ dma_buf = csio_get_scsi_ddp(scsim);
+ if (dma_buf == NULL || i > scsim->max_sge) {
+ req->drv_status = -EBUSY;
+ break;
+ }
+ alloc_len += dma_buf->len;
+ /* Added to IO req */
+ list_add_tail(&dma_buf->list, &req->gen_list);
+ i++;
+ }
+
+ if (!req->drv_status) {
+ /* set number of ddp bufs used */
+ req->nsge = i;
+ csio_scsi_read(req);
+ return;
+ }
+
+ /* release dma descs */
+ if (i > 0)
+ csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
+}
+
+/*
+ * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR
+ * @abort: abort OR close
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
+ bool abort)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
+ wr->sub_opcode_to_chk_all_io =
+ (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
+ FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
+ wr->r3[0] = 0;
+ wr->r3[1] = 0;
+ wr->r3[2] = 0;
+ wr->r3[3] = 0;
+ /* Since we re-use the same ioreq for abort as well */
+ wr->t_cookie = (uintptr_t) req;
+}
+
+static inline void
+csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (req->drv_status != 0)
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*****************************************************************************/
+/* START: SCSI SM */
+/*****************************************************************************/
+static void
+csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_START_IO:
+
+ if (req->nsge) {
+ if (req->datadir == DMA_TO_DEVICE) {
+ req->dcopy = 0;
+ csio_scsi_write(req);
+ } else
+ csio_setup_ddp(scsim, req);
+ } else {
+ csio_scsi_cmd(req);
+ }
+
+ if (likely(req->drv_status == 0)) {
+ /* change state and enqueue on active_q */
+ csio_set_state(&req->sm, csio_scsis_io_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_active);
+
+ return;
+ }
+ break;
+
+ case CSIO_SCSIE_START_TM:
+ csio_scsi_cmd(req);
+ if (req->drv_status == 0) {
+ /*
+ * NOTE: We collect the affected I/Os prior to issuing
+ * LUN reset, and not after it. This is to prevent
+ * aborting I/Os that get issued after the LUN reset,
+ * but prior to LUN reset completion (in the event that
+ * the host stack has not blocked I/Os to a LUN that is
+ * being reset.
+ */
+ csio_set_state(&req->sm, csio_scsis_tm_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_tm_active);
+ }
+ return;
+
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * NOTE:
+ * We could get here due to :
+ * - a window in the cleanup path of the SCSI module
+ * (csio_scsi_abort_io()). Please see NOTE in this function.
+ * - a window in the time we tried to issue an abort/close
+ * of a request to FW, and the FW completed the request
+ * itself.
+ * Print a message for now, and return INVAL either way.
+ */
+ req->drv_status = -EINVAL;
+ csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn;
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ /*
+ * In MSIX mode, with multiple queues, the SCSI compeltions
+ * could reach us sooner than the FW events sent to indicate
+ * I-T nexus loss (link down, remote device logo etc). We
+ * dont want to be returning such I/Os to the upper layer
+ * immediately, since we wouldnt have reported the I-T nexus
+ * loss itself. This forces us to serialize such completions
+ * with the reporting of the I-T nexus loss. Therefore, we
+ * internally queue up such up such completions in the rnode.
+ * The reporting of I-T nexus loss to the upper layer is then
+ * followed by the returning of I/Os in this internal queue.
+ * Having another state alongwith another queue helps us take
+ * actions for events such as ABORT received while we are
+ * in this rnode queue.
+ */
+ if (unlikely(req->wr_status != FW_SUCCESS)) {
+ rn = req->rnode;
+ /*
+ * FW says remote device is lost, but rnode
+ * doesnt reflect it.
+ */
+ if (csio_scsi_itnexus_loss_error(req->wr_status) &&
+ csio_is_rnode_ready(rn)) {
+ csio_set_state(&req->sm,
+ csio_scsis_shost_cmpl_await);
+ list_add_tail(&req->sm.sm_list,
+ &rn->host_cmpl_q);
+ }
+ }
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_tm_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_tm_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in aborting st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the ABORTED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the ABORT and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ CSIO_INC_STATS(scm, n_abrt_dups);
+ break;
+
+ case CSIO_SCSIE_ABORTED:
+
+ csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
+ req, req->wr_status, req->drv_status);
+ /*
+ * Check if original I/O WR completed before the Abort
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_warn(hw,
+ "Abort completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * There are the following possible scenarios:
+ * 1. The abort completed successfully, FW returned FW_SUCCESS.
+ * 2. The completion of an I/O and the receipt of
+ * abort for that I/O by the FW crossed each other.
+ * The FW returned FW_EINVAL. The original I/O would have
+ * returned with FW_SUCCESS or any other SCSI error.
+ * 3. The FW couldnt sent the abort out on the wire, as there
+ * was an I-T nexus loss (link down, remote device logged
+ * out etc). FW sent back an appropriate IT nexus loss status
+ * for the abort.
+ * 4. FW sent an abort, but abort timed out (remote device
+ * didnt respond). FW replied back with
+ * FW_SCSI_ABORT_TIMEDOUT.
+ * 5. FW couldnt genuinely abort the request for some reason,
+ * and sent us an error.
+ *
+ * The first 3 scenarios are treated as succesful abort
+ * operations by the host, while the last 2 are failed attempts
+ * to abort. Manipulate the return value of the request
+ * appropriately, so that host can convey these results
+ * back to the upper layer.
+ */
+ if ((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL) ||
+ csio_scsi_itnexus_loss_error(req->wr_status))
+ req->wr_status = FW_SCSI_ABORT_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * We can receive this event from the module
+ * cleanup paths, if the FW forgot to reply to the ABORT WR
+ * and left this ioreq in this state. For now, just ignore
+ * the event. The CLOSE event is sent to this state, as
+ * the LINK may have already gone down.
+ */
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in closing st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the CLOSED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the CLOSE and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_CLOSED:
+ /*
+ * Check if original I/O WR completed before the Close
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_fatal(hw,
+ "Close completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * Either close succeeded, or we issued close to FW at the
+ * same time FW compelted it to us. Either way, the I/O
+ * is closed.
+ */
+ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL));
+ req->wr_status = FW_SCSI_CLOSE_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ switch (evt) {
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * Just succeed the abort request, and hope that
+ * the remote device unregister path will cleanup
+ * this I/O to the upper layer within a sane
+ * amount of time.
+ */
+ /*
+ * A close can come in during a LINK DOWN. The FW would have
+ * returned us the I/O back, but not the remote device lost
+ * FW event. In this interval, if the I/O times out at the upper
+ * layer, a close can come in. Take the same action as abort:
+ * return success, and hope that the remote device unregister
+ * path will cleanup this I/O. If the FW still doesnt send
+ * the msg, the close times out, and the upper layer resorts
+ * to the next level of error recovery.
+ */
+ req->drv_status = 0;
+ break;
+ case CSIO_SCSIE_DRVCLEANUP:
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+ default:
+ csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
+ evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+/*
+ * csio_scsi_cmpl_handler - WR completion handler for SCSI.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ * @priv: Private object
+ * @scsiwr: Pointer to SCSI WR.
+ *
+ * This is the WR completion handler called per completion from the
+ * ISR. It is called with lock held. It walks past the RSS and CPL message
+ * header where the actual WR is present.
+ * It then gets the status, WR handle (ioreq pointer) and the len of
+ * the WR, based on WR opcode. Only on a non-good status is the entire
+ * WR copied into the WR cache (ioreq->fw_wr).
+ * The ioreq corresponding to the WR is returned to the caller.
+ * NOTE: The SCSI queue doesnt allocate a freelist today, hence
+ * no freelist buffer is expected.
+ */
+struct csio_ioreq *
+csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
+{
+ struct csio_ioreq *ioreq = NULL;
+ struct cpl_fw6_msg *cpl;
+ uint8_t *tempwr;
+ uint8_t status;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ /* skip RSS header */
+ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
+
+ if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
+ csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
+ cpl->opcode);
+ CSIO_INC_STATS(scm, n_inval_cplop);
+ return NULL;
+ }
+
+ tempwr = (uint8_t *)(cpl->data);
+ status = csio_wr_status(tempwr);
+ *scsiwr = tempwr;
+
+ if (likely((*tempwr == FW_SCSI_READ_WR) ||
+ (*tempwr == FW_SCSI_WRITE_WR) ||
+ (*tempwr == FW_SCSI_CMD_WR))) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_read_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+
+ return ioreq;
+ }
+
+ if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+ return ioreq;
+ }
+
+ csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
+ CSIO_INC_STATS(scm, n_inval_scsiop);
+ return NULL;
+}
+
+/*
+ * csio_scsi_cleanup_io_q - Cleanup the given queue.
+ * @scm: SCSI module.
+ * @q: Queue to be cleaned up.
+ *
+ * Called with lock held. Has to exit with lock held.
+ */
+void
+csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_ioreq *ioreq;
+ struct list_head *tmp, *next;
+ struct scsi_cmnd *scmnd;
+
+ /* Call back the completion routines of the active_q */
+ list_for_each_safe(tmp, next, q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ scmnd = csio_scsi_cmnd(ioreq);
+ spin_unlock_irq(&hw->lock);
+
+ /*
+ * Upper layers may have cleared this command, hence this
+ * check to avoid accessing stale references.
+ */
+ if (scmnd != NULL)
+ ioreq->io_cbfn(hw, ioreq);
+
+ spin_lock_irq(&scm->freelist_lock);
+ csio_put_scsi_ioreq(scm, ioreq);
+ spin_unlock_irq(&scm->freelist_lock);
+
+ spin_lock_irq(&hw->lock);
+ }
+}
+
+#define CSIO_SCSI_ABORT_Q_POLL_MS 2000
+
+static void
+csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
+{
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_hw *hw = ln->hwp;
+ int ready = 0;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int rv;
+
+ if (csio_scsi_cmnd(ioreq) != scmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ if (rv != 0) {
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+ }
+}
+
+/*
+ * csio_scsi_abort_io_q - Abort all I/Os on given queue
+ * @scm: SCSI module.
+ * @q: Queue to abort.
+ * @tmo: Timeout in ms
+ *
+ * Attempt to abort all I/Os on given queue, and wait for a max
+ * of tmo milliseconds for them to complete. Returns success
+ * if all I/Os are aborted. Else returns -ETIMEDOUT.
+ * Should be entered with lock held. Exits with lock held.
+ * NOTE:
+ * Lock has to be held across the loop that aborts I/Os, since dropping the lock
+ * in between can cause the list to be corrupted. As a result, the caller
+ * of this function has to ensure that the number of I/os to be aborted
+ * is finite enough to not cause lock-held-for-too-long issues.
+ */
+static int
+csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
+{
+ struct csio_hw *hw = scm->hw;
+ struct list_head *tmp, *next;
+ int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
+ struct scsi_cmnd *scmnd;
+
+ if (list_empty(q))
+ return 0;
+
+ csio_dbg(hw, "Aborting SCSI I/Os\n");
+
+ /* Now abort/close I/Os in the queue passed */
+ list_for_each_safe(tmp, next, q) {
+ scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
+ csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
+ }
+
+ /* Wait till all active I/Os are completed/aborted/closed */
+ while (!list_empty(q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all aborts completed */
+ if (list_empty(q))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
+ * @scm: SCSI module.
+ * @abort: abort required.
+ * Called with lock held, should exit with lock held.
+ * Can sleep when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
+{
+ struct csio_hw *hw = scm->hw;
+ int rv = 0;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ /* No I/Os pending */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Wait until all active I/Os are completed */
+ while (!list_empty(&scm->active_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Else abort */
+ if (abort) {
+ rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
+ if (rv == 0)
+ return rv;
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ }
+
+ csio_scsi_cleanup_io_q(scm, &scm->active_q);
+
+ CSIO_DB_ASSERT(list_empty(&scm->active_q));
+
+ return rv;
+}
+
+/*
+ * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
+ * @scm: SCSI module.
+ * @lnode: lnode
+ *
+ * Called with lock held, should exit with lock held.
+ * Can sleep (with dropped lock) when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_scsi_level_data sld;
+ int rv;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
+
+ sld.level = CSIO_LEV_LNODE;
+ sld.lnode = ln;
+ INIT_LIST_HEAD(&ln->cmpl_q);
+ csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
+
+ /* No I/Os pending on this lnode */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ /* Wait until all active I/Os on this lnode are completed */
+ while (!list_empty(&ln->cmpl_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
+
+ /* I/Os are pending, abort them */
+ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
+ if (rv != 0) {
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
+ }
+
+ CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
+
+ return rv;
+}
+
+static ssize_t
+csio_show_hw_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (csio_is_hw_ready(hw))
+ return snprintf(buf, PAGE_SIZE, "ready\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "not ready\n");
+}
+
+/* Device reset */
+static ssize_t
+csio_device_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (*buf != '1')
+ return -EINVAL;
+
+ /* Delete NPIV lnodes */
+ csio_lnodes_exit(hw, 1);
+
+ /* Block upper IOs */
+ csio_lnodes_block_request(hw);
+
+ spin_lock_irq(&hw->lock);
+ csio_hw_reset(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_request(hw);
+ return count;
+}
+
+/* disable port */
+static ssize_t
+csio_disable_port(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ bool disable;
+
+ if (*buf == '1' || *buf == '0')
+ disable = (*buf == '1') ? true : false;
+ else
+ return -EINVAL;
+
+ /* Block upper IOs */
+ csio_lnodes_block_by_port(hw, ln->portid);
+
+ spin_lock_irq(&hw->lock);
+ csio_disable_lnodes(hw, ln->portid, disable);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_by_port(hw, ln->portid);
+ return count;
+}
+
+/* Show debug level */
+static ssize_t
+csio_show_dbg_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+}
+
+/* Store debug level */
+static ssize_t
+csio_store_dbg_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ uint32_t dbg_level = 0;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &dbg_level))
+ return -EINVAL;
+
+ ln->params.log_level = dbg_level;
+ hw->params.log_level = dbg_level;
+
+ return 0;
+}
+
+static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
+static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);
+static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);
+static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
+ csio_store_dbg_level);
+
+static struct device_attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state,
+ &dev_attr_device_reset,
+ &dev_attr_disable_port,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static ssize_t
+csio_show_num_reg_rnodes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+}
+
+static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
+
+static struct device_attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static inline uint32_t
+csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct scatterlist *sg;
+ uint32_t bytes_left;
+ uint32_t bytes_copy;
+ uint32_t buf_off = 0;
+ uint32_t start_off = 0;
+ uint32_t sg_off = 0;
+ void *sg_addr;
+ void *buf_addr;
+ struct csio_dma_buf *dma_buf;
+
+ bytes_left = scsi_bufflen(scmnd);
+ sg = scsi_sglist(scmnd);
+ dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
+
+ /* Copy data from driver buffer to SGs of SCSI CMD */
+ while (bytes_left > 0 && sg && dma_buf) {
+ if (buf_off >= dma_buf->len) {
+ buf_off = 0;
+ dma_buf = (struct csio_dma_buf *)
+ csio_list_next(dma_buf);
+ continue;
+ }
+
+ if (start_off >= sg->length) {
+ start_off -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+
+ buf_addr = dma_buf->vaddr + buf_off;
+ sg_off = sg->offset + start_off;
+ bytes_copy = min((dma_buf->len - buf_off),
+ sg->length - start_off);
+ bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
+ bytes_copy);
+
+ sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
+ if (!sg_addr) {
+ csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
+ sg, req);
+ break;
+ }
+
+ csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
+ sg_addr, sg_off, buf_addr, bytes_copy);
+ memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
+ kunmap_atomic(sg_addr);
+
+ start_off += bytes_copy;
+ buf_off += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+
+ if (bytes_left > 0)
+ return DID_ERROR;
+ else
+ return DID_OK;
+}
+
+/*
+ * csio_scsi_err_handler - SCSI error handler.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static inline void
+csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags, scsi_status = 0;
+ uint32_t host_status = DID_OK;
+ uint32_t rsp_len = 0, sns_len = 0;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+
+ switch (req->wr_status) {
+ case FW_HOSTERROR:
+ if (unlikely(!csio_is_hw_ready(hw)))
+ return;
+
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_hosterror);
+
+ break;
+ case FW_SCSI_RSP_ERR:
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+ flags = fcp_resp->resp.fr_flags;
+ scsi_status = fcp_resp->resp.fr_status;
+
+ if (flags & FCP_RSP_LEN_VAL) {
+ rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
+ if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
+ (rsp_info->rsp_code != FCP_TMF_CMPL)) {
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
+ sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
+ if (sns_len > SCSI_SENSE_BUFFERSIZE)
+ sns_len = SCSI_SENSE_BUFFERSIZE;
+
+ memcpy(cmnd->sense_buffer,
+ &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
+ CSIO_INC_STATS(scm, n_autosense);
+ }
+
+ scsi_set_resid(cmnd, 0);
+
+ /* Under run */
+ if (flags & FCP_RESID_UNDER) {
+ scsi_set_resid(cmnd,
+ be32_to_cpu(fcp_resp->ext.fr_resid));
+
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
+ < cmnd->underflow))
+ host_status = DID_ERROR;
+ } else if (flags & FCP_RESID_OVER)
+ host_status = DID_ERROR;
+
+ CSIO_INC_STATS(scm, n_rsperror);
+ break;
+
+ case FW_SCSI_OVER_FLOW_ERR:
+ csio_warn(hw,
+ "Over-flow error,cmnd:0x%x expected len:0x%x"
+ " resid:0x%x\n", cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_ovflerror);
+ break;
+
+ case FW_SCSI_UNDER_FLOW_ERR:
+ csio_warn(hw,
+ "Under-flow error,cmnd:0x%x expected"
+ " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",
+ cmnd->cmnd[0], scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->device->lun,
+ rn->flowid);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_unflerror);
+ break;
+
+ case FW_SCSI_ABORT_REQUESTED:
+ case FW_SCSI_ABORTED:
+ case FW_SCSI_CLOSE_REQUESTED:
+ csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
+ cmnd->cmnd[0],
+ (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
+ "closed" : "aborted");
+ /*
+ * csio_eh_abort_handler checks this value to
+ * succeed or fail the abort request.
+ */
+ host_status = DID_REQUEUE;
+ if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
+ CSIO_INC_STATS(scm, n_closed);
+ else
+ CSIO_INC_STATS(scm, n_aborted);
+ break;
+
+ case FW_SCSI_ABORT_TIMEDOUT:
+ /* FW timed out the abort itself */
+ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
+ req, cmnd, req->wr_status);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_abrt_timedout);
+ break;
+
+ case FW_RDEV_NOT_READY:
+ /*
+ * In firmware, a RDEV can get into this state
+ * temporarily, before moving into dissapeared/lost
+ * state. So, the driver should complete the request equivalent
+ * to device-disappeared!
+ */
+ CSIO_INC_STATS(scm, n_rdev_nr_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOST:
+ CSIO_INC_STATS(scm, n_rdev_lost_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOGO:
+ CSIO_INC_STATS(scm, n_rdev_logo_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_IMPL_LOGO:
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_LINK_DOWN:
+ CSIO_INC_STATS(scm, n_link_down_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_FCOE_NO_XCHG:
+ CSIO_INC_STATS(scm, n_no_xchg_error);
+ host_status = DID_ERROR;
+ break;
+
+ default:
+ csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
+ req->wr_status, req, cmnd);
+ CSIO_DB_ASSERT(0);
+
+ CSIO_INC_STATS(scm, n_unknown_error);
+ host_status = DID_ERROR;
+ break;
+ }
+
+out:
+ if (req->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+
+ /* Wake up waiting threads */
+ csio_scsi_cmnd(req) = NULL;
+ complete_all(&req->cmplobj);
+}
+
+/*
+ * csio_scsi_cbfn - SCSI callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static void
+csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ uint8_t scsi_status = SAM_STAT_GOOD;
+ uint32_t host_status = DID_OK;
+
+ if (likely(req->wr_status == FW_SUCCESS)) {
+ if (req->nsge > 0) {
+ scsi_dma_unmap(cmnd);
+ if (req->dcopy)
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+ csio_scsi_cmnd(req) = NULL;
+ CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
+ } else {
+ /* Error handling */
+ csio_scsi_err_handler(hw, req);
+ }
+}
+
+/**
+ * csio_queuecommand - Entry point to kickstart an I/O request.
+ * @host: The scsi_host pointer.
+ * @cmnd: The I/O request from ML.
+ *
+ * This routine does the following:
+ * - Checks for HW and Rnode module readiness.
+ * - Gets a free ioreq structure (which is already initialized
+ * to uninit during its allocation).
+ * - Maps SG elements.
+ * - Initializes ioreq members.
+ * - Kicks off the SCSI state machine for this IO.
+ * - Returns busy status on error.
+ */
+static int
+csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ unsigned long flags;
+ int nsge = 0;
+ int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
+ int retval;
+ int cpu;
+ struct csio_scsi_qset *sqset;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ if (!blk_rq_cpu_valid(cmnd->request))
+ cpu = smp_processor_id();
+ else
+ cpu = cmnd->request->cpu;
+
+ sqset = &hw->sqset[ln->portid][cpu];
+
+ nr = fc_remote_port_chkready(rport);
+ if (nr) {
+ cmnd->result = nr;
+ CSIO_INC_STATS(scsim, n_rn_nr_error);
+ goto err_done;
+ }
+
+ if (unlikely(!csio_is_hw_ready(hw))) {
+ cmnd->result = (DID_REQUEUE << 16);
+ CSIO_INC_STATS(scsim, n_hw_nr_error);
+ goto err_done;
+ }
+
+ /* Get req->nsge, if there are SG elements to be mapped */
+ nsge = scsi_dma_map(cmnd);
+ if (unlikely(nsge < 0)) {
+ CSIO_INC_STATS(scsim, n_dmamap_error);
+ goto err;
+ }
+
+ /* Do we support so many mappings? */
+ if (unlikely(nsge > scsim->max_sge)) {
+ csio_warn(hw,
+ "More SGEs than can be supported."
+ " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
+ CSIO_INC_STATS(scsim, n_unsupp_sge_error);
+ goto err_dma_unmap;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+ if (!ioreq) {
+ csio_err(hw, "Out of I/O request elements. Active #:%d\n",
+ scsim->stats.n_active);
+ CSIO_INC_STATS(scsim, n_no_req_error);
+ goto err_dma_unmap;
+ }
+
+ ioreq->nsge = nsge;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+ ioreq->wr_status = 0;
+ ioreq->drv_status = 0;
+ csio_scsi_cmnd(ioreq) = (void *)cmnd;
+ ioreq->tmo = 0;
+ ioreq->datadir = cmnd->sc_data_direction;
+
+ if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ CSIO_INC_STATS(ln, n_output_requests);
+ ln->stats.n_output_bytes += scsi_bufflen(cmnd);
+ } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
+ CSIO_INC_STATS(ln, n_input_requests);
+ ln->stats.n_input_bytes += scsi_bufflen(cmnd);
+ } else
+ CSIO_INC_STATS(ln, n_control_requests);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_scsi_cbfn;
+
+ /* Needed during abort */
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Message = 0;
+
+ /* Kick off SCSI IO SM on the ioreq */
+ spin_lock_irqsave(&hw->lock, flags);
+ retval = csio_scsi_start_io(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ ioreq, retval);
+ CSIO_INC_STATS(scsim, n_busy_error);
+ goto err_put_req;
+ }
+
+ return 0;
+
+err_put_req:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+err_dma_unmap:
+ if (nsge > 0)
+ scsi_dma_unmap(cmnd);
+err:
+ return rv;
+
+err_done:
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+static int
+csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
+{
+ int rv;
+ int cpu = smp_processor_id();
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
+
+ ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
+ /*
+ * Use current processor queue for posting the abort/close, but retain
+ * the ingress queue ID of the original I/O being aborted/closed - we
+ * need the abort/close completion to be received on the same queue
+ * as the original I/O.
+ */
+ ioreq->eq_idx = sqset->eq_idx;
+
+ if (abort == SCSI_ABORT)
+ rv = csio_scsi_abort(ioreq);
+ else
+ rv = csio_scsi_close(ioreq);
+
+ return rv;
+}
+
+static int
+csio_eh_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int ready = 0, ret;
+ unsigned long tmo = 0;
+ int rv;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ ioreq = (struct csio_ioreq *)cmnd->host_scribble;
+ if (!ioreq)
+ return SUCCESS;
+
+ if (!rn)
+ return FAILED;
+
+ csio_dbg(hw,
+ "Request to abort ioreq:%p cmd:%p cdb:%08llx"
+ " ssni:0x%x lun:%d iq:0x%x\n",
+ ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
+ cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
+
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return SUCCESS;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+ tmo = CSIO_SCSI_ABRT_TMO_MS;
+
+ spin_lock_irq(&hw->lock);
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ spin_unlock_irq(&hw->lock);
+
+ if (rv != 0) {
+ if (rv == -EINVAL) {
+ /* Return success, if abort/close request issued on
+ * already completed IO
+ */
+ return SUCCESS;
+ }
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+
+ goto inval_scmnd;
+ }
+
+ /* Wait for completion */
+ init_completion(&ioreq->cmplobj);
+ wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
+
+ /* FW didnt respond to abort within our timeout */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+
+ csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
+ CSIO_INC_STATS(scsim, n_abrt_timedout);
+
+inval_scmnd:
+ if (ioreq->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_cmnd(ioreq) = NULL;
+ spin_unlock_irq(&hw->lock);
+
+ cmnd->result = (DID_ERROR << 16);
+ cmnd->scsi_done(cmnd);
+
+ return FAILED;
+ }
+
+ /* FW successfully aborted the request */
+ if (host_byte(cmnd->result) == DID_REQUEUE) {
+ csio_info(hw,
+ "Aborted SCSI command to (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return SUCCESS;
+ } else {
+ csio_info(hw,
+ "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return FAILED;
+ }
+}
+
+/*
+ * csio_tm_cbfn - TM callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ * Cache the result in 'cmnd', since ioreq will be freed soon
+ * after we return from here, and the waiting thread shouldnt trust
+ * the ioreq contents.
+ */
+static void
+csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags = 0;
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
+ req, req->wr_status);
+
+ /* Cache FW return status */
+ cmnd->SCp.Status = req->wr_status;
+
+ /* Special handling based on FCP response */
+
+ /*
+ * FW returns us this error, if flags were set. FCP4 says
+ * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
+ * So if a target were to set this bit, we expect that the
+ * rsp_code is set to FCP_TMF_CMPL for a successful TM
+ * completion. Any other rsp_code means TM operation failed.
+ * If a target were to just ignore setting flags, we treat
+ * the TM operation as success, and FW returns FW_SUCCESS.
+ */
+ if (req->wr_status == FW_SCSI_RSP_ERR) {
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+
+ flags = fcp_resp->resp.fr_flags;
+
+ /* Modify return status if flags indicate success */
+ if (flags & FCP_RSP_LEN_VAL)
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ cmnd->SCp.Status = FW_SUCCESS;
+
+ csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
+ }
+
+ /* Wake up the TM handler thread */
+ csio_scsi_cmnd(req) = NULL;
+}
+
+static int
+csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ struct csio_scsi_qset *sqset;
+ unsigned long flags;
+ int retval;
+ int count, ret;
+ LIST_HEAD(local_q);
+ struct csio_scsi_level_data sld;
+
+ if (!rn)
+ goto fail;
+
+ csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",
+ cmnd->device->lun, rn->flowid, rn->scsi_id);
+
+ if (!csio_is_lnode_ready(ln)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " local node vnpi:0x%x (LUN:%d)\n",
+ ln->vnp_flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Lnode is ready, now wait on rport node readiness */
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ /*
+ * If we have blocked in the previous call, at this point, either the
+ * remote node has come back online, or device loss timer has fired
+ * and the remote node is destroyed. Allow the LUN reset only for
+ * the former case, since LUN reset is a TMF I/O on the wire, and we
+ * need a valid session to issue it.
+ */
+ if (fc_remote_port_chkready(rn->rport)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " remote node ssni:0x%x (LUN:%d)\n",
+ rn->flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+
+ if (!ioreq) {
+ csio_err(hw, "Out of IO request elements. Active # :%d\n",
+ scsim->stats.n_active);
+ goto fail;
+ }
+
+ sqset = &hw->sqset[ln->portid][smp_processor_id()];
+ ioreq->nsge = 0;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+
+ csio_scsi_cmnd(ioreq) = cmnd;
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Status = 0;
+
+ cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
+
+ /*
+ * FW times the LUN reset for ioreq->tmo, so we got to wait a little
+ * longer (10s for now) than that to allow FW to return the timed
+ * out command.
+ */
+ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_tm_cbfn;
+
+ /* Save of the ioreq info for later use */
+ sld.level = CSIO_LEV_LUN;
+ sld.lnode = ioreq->lnode;
+ sld.rnode = ioreq->rnode;
+ sld.oslun = (uint64_t)cmnd->device->lun;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ /* Kick off TM SM on the ioreq */
+ retval = csio_scsi_start_tm(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
+ ioreq, retval);
+ goto fail_ret_ioreq;
+ }
+
+ csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
+ count * (CSIO_SCSI_TM_POLL_MS / 1000));
+ /* Wait for completion */
+ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
+ && count--)
+ msleep(CSIO_SCSI_TM_POLL_MS);
+
+ /* LUN reset timed-out */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+ csio_err(hw, "LUN reset (%d:%d) timed out\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ spin_unlock_irq(&hw->lock);
+
+ goto fail_ret_ioreq;
+ }
+
+ /* LUN reset returned, check cached status */
+ if (cmnd->SCp.Status != FW_SUCCESS) {
+ csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",
+ cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ goto fail;
+ }
+
+ /* LUN reset succeeded, Start aborting affected I/Os */
+ /*
+ * Since the host guarantees during LUN reset that there
+ * will not be any more I/Os to that LUN, until the LUN reset
+ * completes, we gather pending I/Os after the LUN reset.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_scsi_gather_active_ios(scsim, &sld, &local_q);
+
+ retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
+ spin_unlock_irq(&hw->lock);
+
+ /* Aborts may have timed out */
+ if (retval != 0) {
+ csio_err(hw,
+ "Attempt to abort I/Os during LUN reset of %d"
+ " returned %d\n", cmnd->device->lun, retval);
+ /* Return I/Os back to active_q */
+ spin_lock_irq(&hw->lock);
+ list_splice_tail_init(&local_q, &scsim->active_q);
+ spin_unlock_irq(&hw->lock);
+ goto fail;
+ }
+
+ CSIO_INC_STATS(rn, n_lun_rst);
+
+ csio_info(hw, "LUN reset occurred (%d:%d)\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ return SUCCESS;
+
+fail_ret_ioreq:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+fail:
+ CSIO_INC_STATS(rn, n_lun_rst_fail);
+ return FAILED;
+}
+
+static int
+csio_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
+
+ return 0;
+}
+
+static int
+csio_slave_configure(struct scsi_device *sdev)
+{
+ if (sdev->tagged_supported)
+ scsi_activate_tcq(sdev, csio_lun_qdepth);
+ else
+ scsi_deactivate_tcq(sdev, csio_lun_qdepth);
+
+ return 0;
+}
+
+static void
+csio_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
+static int
+csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ int rv = 1;
+
+ spin_lock_irq(shost->host_lock);
+ if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
+ goto out;
+
+ rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
+ csio_delta_scan_tmo * HZ);
+out:
+ spin_unlock_irq(shost->host_lock);
+
+ return rv;
+}
+
+struct scsi_host_template csio_fcoe_shost_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_lport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+struct scsi_host_template csio_fcoe_shost_vport_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_vport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+/*
+ * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ * @buf_size: buffer size
+ * @num_buf : Number of buffers.
+ *
+ * This routine allocates DMA buffers required for SCSI Data xfer, if
+ * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
+ * not virtually contiguous.
+ */
+static int
+csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
+ int buf_size, int num_buf)
+{
+ int n = 0;
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc = NULL;
+ uint32_t unit_size = 0;
+
+ if (!num_buf)
+ return 0;
+
+ if (!buf_size)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&scm->ddp_freelist);
+
+ /* Align buf size to page size */
+ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
+ /* Initialize dma descriptors */
+ for (n = 0; n < num_buf; n++) {
+ /* Set unit size to request size */
+ unit_size = buf_size;
+ ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
+ if (!ddp_desc) {
+ csio_err(hw,
+ "Failed to allocate ddp descriptors,"
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ddp);
+ goto no_mem;
+ }
+
+ /* Allocate Dma buffers for DDP */
+ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
+ &ddp_desc->paddr);
+ if (!ddp_desc->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer (ddp) allocation"
+ " failed!\n");
+ kfree(ddp_desc);
+ goto no_mem;
+ }
+
+ ddp_desc->len = unit_size;
+
+ /* Added it to scsi ddp freelist */
+ list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+ }
+
+ return 0;
+no_mem:
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+
+ return -ENOMEM;
+}
+
+/*
+ * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ *
+ * This routine frees ddp buffers.
+ */
+static void
+csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc;
+
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+}
+
+/**
+ * csio_scsim_init - Initialize SCSI Module
+ * @scm: SCSI Module
+ * @hw: HW module
+ *
+ */
+int
+csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ int i;
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ INIT_LIST_HEAD(&scm->active_q);
+ scm->hw = hw;
+
+ scm->proto_cmd_len = sizeof(struct fcp_cmnd);
+ scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
+ scm->max_sge = CSIO_SCSI_MAX_SGE;
+
+ spin_lock_init(&scm->freelist_lock);
+
+ /* Pre-allocate ioreqs and initialize them */
+ INIT_LIST_HEAD(&scm->ioreq_freelist);
+ for (i = 0; i < csio_scsi_ioreqs; i++) {
+
+ ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ioreq) {
+ csio_err(hw,
+ "I/O request element allocation failed, "
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ioreq);
+
+ goto free_ioreq;
+ }
+
+ /* Allocate Dma buffers for Response Payload */
+ dma_buf = &ioreq->dma_buf;
+ dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer allocation"
+ " failed!\n");
+ kfree(ioreq);
+ goto free_ioreq;
+ }
+
+ dma_buf->len = scm->proto_rsp_len;
+
+ /* Set state to uninit */
+ csio_init_state(&ioreq->sm, csio_scsis_uninit);
+ INIT_LIST_HEAD(&ioreq->gen_list);
+ init_completion(&ioreq->cmplobj);
+
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+ }
+
+ if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
+ goto free_ioreq;
+
+ return 0;
+
+free_ioreq:
+ /*
+ * Free up existing allocations, since an error
+ * from here means we are returning for good
+ */
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * csio_scsim_exit: Uninitialize SCSI Module
+ * @scm: SCSI Module
+ *
+ */
+void
+csio_scsim_exit(struct csio_scsim *scm)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ csio_scsi_free_ddp_bufs(scm, scm->hw);
+}
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
new file mode 100644
index 000000000000..2257c3dcf724
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -0,0 +1,342 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_SCSI_H__
+#define __CSIO_SCSI_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/completion.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "csio_defs.h"
+#include "csio_wr.h"
+
+extern struct scsi_host_template csio_fcoe_shost_template;
+extern struct scsi_host_template csio_fcoe_shost_vport_template;
+
+extern int csio_scsi_eqsize;
+extern int csio_scsi_iqlen;
+extern int csio_scsi_ioreqs;
+extern uint32_t csio_max_scan_tmo;
+extern uint32_t csio_delta_scan_tmo;
+extern int csio_lun_qdepth;
+
+/*
+ **************************** NOTE *******************************
+ * How do we calculate MAX FCoE SCSI SGEs? Here is the math:
+ * Max Egress WR size = 512 bytes
+ * One SCSI egress WR has the following fixed no of bytes:
+ * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
+ * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
+ * ------
+ * 80
+ * ------
+ * That leaves us with 512 - 96 = 432 bytes for data SGE. Using
+ * struct ulptx_sgl header for the SGE consumes:
+ * - 4 bytes for cmnd_sge.
+ * - 12 bytes for the first SGL.
+ * That leaves us with 416 bytes for the remaining SGE pairs. Which is
+ * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
+ * or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
+ */
+#define CSIO_SCSI_MAX_SGE 35
+#define CSIO_SCSI_ABRT_TMO_MS 60000
+#define CSIO_SCSI_LUNRST_TMO_MS 60000
+#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
+ * all TM timeouts.
+ */
+#define CSIO_SCSI_IQ_WRSZ 128
+#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
+
+#define CSIO_MAX_SNS_LEN 128
+#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
+
+/* Reference to scsi_cmnd */
+#define csio_scsi_cmnd(req) ((req)->scratch1)
+
+struct csio_scsi_stats {
+ uint64_t n_tot_success; /* Total number of good I/Os */
+ uint32_t n_rn_nr_error; /* No. of remote-node-not-
+ * ready errors
+ */
+ uint32_t n_hw_nr_error; /* No. of hw-module-not-
+ * ready errors
+ */
+ uint32_t n_dmamap_error; /* No. of DMA map erros */
+ uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
+ * errors.
+ */
+ uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
+ uint32_t n_busy_error; /* No. of -EBUSY errors */
+ uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
+ uint32_t n_rsperror; /* No. of response errors */
+ uint32_t n_autosense; /* No. of auto sense replies */
+ uint32_t n_ovflerror; /* No. of overflow errors */
+ uint32_t n_unflerror; /* No. of underflow errors */
+ uint32_t n_rdev_nr_error;/* No. of rdev not
+ * ready errors
+ */
+ uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
+ uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
+ uint32_t n_link_down_error;/* No. of link down errors */
+ uint32_t n_no_xchg_error; /* No. no exchange error */
+ uint32_t n_unknown_error;/* No. of unhandled errors */
+ uint32_t n_aborted; /* No. of aborted I/Os */
+ uint32_t n_abrt_timedout; /* No. of abort timedouts */
+ uint32_t n_abrt_fail; /* No. of abort failures */
+ uint32_t n_abrt_dups; /* No. of duplicate aborts */
+ uint32_t n_abrt_race_comp; /* No. of aborts that raced
+ * with completions.
+ */
+ uint32_t n_abrt_busy_error;/* No. of abort failures
+ * due to -EBUSY.
+ */
+ uint32_t n_closed; /* No. of closed I/Os */
+ uint32_t n_cls_busy_error; /* No. of close failures
+ * due to -EBUSY.
+ */
+ uint32_t n_active; /* No. of IOs in active_q */
+ uint32_t n_tm_active; /* No. of TMs in active_q */
+ uint32_t n_wcbfn; /* No. of I/Os in worker
+ * cbfn q
+ */
+ uint32_t n_free_ioreq; /* No. of freelist entries */
+ uint32_t n_free_ddp; /* No. of DDP freelist */
+ uint32_t n_unaligned; /* No. of Unaligned SGls */
+ uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
+ uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
+};
+
+struct csio_scsim {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ uint8_t max_sge; /* Max SGE */
+ uint8_t proto_cmd_len; /* Proto specific SCSI
+ * cmd length
+ */
+ uint16_t proto_rsp_len; /* Proto specific SCSI
+ * response length
+ */
+ spinlock_t freelist_lock; /* Lock for ioreq freelist */
+ struct list_head active_q; /* Outstanding SCSI I/Os */
+ struct list_head ioreq_freelist; /* Free list of ioreq's */
+ struct list_head ddp_freelist; /* DDP descriptor freelist */
+ struct csio_scsi_stats stats; /* This module's statistics */
+};
+
+/* State machine defines */
+enum csio_scsi_ev {
+ CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
+ CSIO_SCSIE_START_TM, /* Start a TM IO */
+ CSIO_SCSIE_COMPLETED, /* IO Completed */
+ CSIO_SCSIE_ABORT, /* Abort IO */
+ CSIO_SCSIE_ABORTED, /* IO Aborted */
+ CSIO_SCSIE_CLOSE, /* Close exchange */
+ CSIO_SCSIE_CLOSED, /* Exchange closed */
+ CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
+ * cleanup this I/O.
+ */
+};
+
+enum csio_scsi_lev {
+ CSIO_LEV_ALL = 1,
+ CSIO_LEV_LNODE,
+ CSIO_LEV_RNODE,
+ CSIO_LEV_LUN,
+};
+
+struct csio_scsi_level_data {
+ enum csio_scsi_lev level;
+ struct csio_rnode *rnode;
+ struct csio_lnode *lnode;
+ uint64_t oslun;
+};
+
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq(struct csio_scsim *scm)
+{
+ struct csio_sm *req;
+
+ if (likely(!list_empty(&scm->ioreq_freelist))) {
+ req = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&req->sm_list);
+ CSIO_DEC_STATS(scm, n_free_ioreq);
+ return (struct csio_ioreq *)req;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
+{
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+}
+
+static inline void
+csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_init(reqlist, &scm->ioreq_freelist);
+ scm->stats.n_free_ioreq += n;
+}
+
+static inline struct csio_dma_buf *
+csio_get_scsi_ddp(struct csio_scsim *scm)
+{
+ struct csio_dma_buf *ddp;
+
+ if (likely(!list_empty(&scm->ddp_freelist))) {
+ ddp = list_first_entry(&scm->ddp_freelist,
+ struct csio_dma_buf, list);
+ list_del_init(&ddp->list);
+ CSIO_DEC_STATS(scm, n_free_ddp);
+ return ddp;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
+{
+ list_add_tail(&ddp->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+}
+
+static inline void
+csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_tail_init(reqlist, &scm->ddp_freelist);
+ scm->stats.n_free_ddp += n;
+}
+
+static inline void
+csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
+ if (csio_list_deleted(&ioreq->sm.sm_list))
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
+}
+
+/*
+ * csio_scsi_start_io - Kick starts the IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_io(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_start_tm - Kicks off the Task management IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_tm(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_abort - Abort an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_abort(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_close - Close an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_close(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
+ return ioreq->drv_status;
+}
+
+void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
+int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
+int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
+ struct csio_lnode *);
+struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *,
+ void *, uint8_t **);
+int csio_scsi_qconfig(struct csio_hw *);
+int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
+void csio_scsim_exit(struct csio_scsim *);
+
+#endif /* __CSIO_SCSI_H__ */
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
new file mode 100644
index 000000000000..c32df1bdaa97
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -0,0 +1,1632 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <linux/cache.h>
+
+#include "csio_hw.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_defs.h"
+
+int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
+static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
+
+int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
+static int csio_sge_timer_reg = 1;
+
+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+
+static void
+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
+{
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+ reg * sizeof(uint32_t));
+}
+
+/* Free list buffer size */
+static inline uint32_t
+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
+{
+ return sge->sge_fl_buf_size[buf->paddr & 0xF];
+}
+
+/* Size of the egress queue status page */
+static inline uint32_t
+csio_wr_qstat_pgsz(struct csio_hw *hw)
+{
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
+}
+
+/* Ring freelist doorbell */
+static inline void
+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
+{
+ /*
+ * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
+ * number of bytes in the freelist queue. This translates to atleast
+ * 8 freelist buffer pointers (since each pointer is 8 bytes).
+ */
+ if (flq->inc_idx >= 8) {
+ csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+ PIDX(flq->inc_idx / 8),
+ MYPF_REG(SGE_PF_KDOORBELL));
+ flq->inc_idx &= 7;
+ }
+}
+
+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
+static void
+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
+{
+ csio_wr_reg32(hw, CIDXINC(0) |
+ INGRESSQID(iqid) |
+ TIMERREG(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS));
+}
+
+/*
+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ * Fill up freelist buffer entries with buffers of size specified
+ * in the size register.
+ *
+ */
+static int
+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ __be64 *d = (__be64 *)(flq->vstart);
+ struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
+ uint64_t paddr;
+ int sreg = flq->un.fl.sreg;
+ int n = flq->credits;
+
+ while (n--) {
+ buf->len = sge->sge_fl_buf_size[sreg];
+ buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
+ &buf->paddr);
+ if (!buf->vaddr) {
+ csio_err(hw, "Could only fill %d buffers!\n", n + 1);
+ return -ENOMEM;
+ }
+
+ paddr = buf->paddr | (sreg & 0xF);
+
+ *d++ = cpu_to_be64(paddr);
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_wr_update_fl -
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ *
+ */
+static inline void
+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
+{
+
+ flq->inc_idx += n;
+ flq->pidx += n;
+ if (unlikely(flq->pidx >= flq->credits))
+ flq->pidx -= (uint16_t)flq->credits;
+
+ CSIO_INC_STATS(flq, n_flq_refill);
+}
+
+/*
+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.
+ * @hw: HW module
+ * @qsize: Size of the queue in bytes
+ * @wrsize: Since of WR in this queue, if fixed.
+ * @type: Type of queue (Ingress/Egress/Freelist)
+ * @owner: Module that owns this queue.
+ * @nflb: Number of freelist buffers for FL.
+ * @sreg: What is the FL buffer size register?
+ * @iq_int_handler: Ingress queue handler in INTx mode.
+ *
+ * This function allocates and sets up a queue for the caller
+ * of size qsize, aligned at the required boundary. This is subject to
+ * be free entries being available in the queue array. If one is found,
+ * it is initialized with the allocated queue, marked as being used (owner),
+ * and a handle returned to the caller in form of the queue's index
+ * into the q_arr array.
+ * If user has indicated a freelist (by specifying nflb > 0), create
+ * another queue (with its own index into q_arr) for the freelist. Allocate
+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated
+ * with its owning ingress queue.
+ */
+int
+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
+ uint16_t type, void *owner, uint32_t nflb, int sreg,
+ iq_handler_t iq_intx_handler)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q, *flq;
+ int free_idx = wrm->free_qidx;
+ int ret_idx = free_idx;
+ uint32_t qsz;
+ int flq_idx;
+
+ if (free_idx >= wrm->num_q) {
+ csio_err(hw, "No more free queues.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case CSIO_EGRESS:
+ qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
+ break;
+ case CSIO_INGRESS:
+ switch (wrsize) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ csio_err(hw, "Invalid Ingress queue WR size:%d\n",
+ wrsize);
+ return -1;
+ }
+
+ /*
+ * Number of elements must be a multiple of 16
+ * So this includes status page size
+ */
+ qsz = ALIGN(qsize/wrsize, 16) * wrsize;
+
+ break;
+ case CSIO_FREELIST:
+ qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
+ break;
+ default:
+ csio_err(hw, "Invalid queue type: 0x%x\n", type);
+ return -1;
+ }
+
+ q = wrm->q_arr[free_idx];
+
+ q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
+ if (!q->vstart) {
+ csio_err(hw,
+ "Failed to allocate DMA memory for "
+ "queue at id: %d size: %d\n", free_idx, qsize);
+ return -1;
+ }
+
+ /*
+ * We need to zero out the contents, importantly for ingress,
+ * since we start with a generatiom bit of 1 for ingress.
+ */
+ memset(q->vstart, 0, qsz);
+
+ q->type = type;
+ q->owner = owner;
+ q->pidx = q->cidx = q->inc_idx = 0;
+ q->size = qsz;
+ q->wr_sz = wrsize; /* If using fixed size WRs */
+
+ wrm->free_qidx++;
+
+ if (type == CSIO_INGRESS) {
+ /* Since queue area is set to zero */
+ q->un.iq.genbit = 1;
+
+ /*
+ * Ingress queue status page size is always the size of
+ * the ingress queue entry.
+ */
+ q->credits = (qsz - q->wr_sz) / q->wr_sz;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - q->wr_sz);
+
+ /* Allocate memory for FL if requested */
+ if (nflb > 0) {
+ flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
+ sizeof(__be64), CSIO_FREELIST,
+ owner, 0, sreg, NULL);
+ if (flq_idx == -1) {
+ csio_err(hw,
+ "Failed to allocate FL queue"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ /* Associate the new FL with the Ingress quue */
+ q->un.iq.flq_idx = flq_idx;
+
+ flq = wrm->q_arr[q->un.iq.flq_idx];
+ flq->un.fl.bufs = kzalloc(flq->credits *
+ sizeof(struct csio_dma_buf),
+ GFP_KERNEL);
+ if (!flq->un.fl.bufs) {
+ csio_err(hw,
+ "Failed to allocate FL queue bufs"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ flq->un.fl.packen = 0;
+ flq->un.fl.offset = 0;
+ flq->un.fl.sreg = sreg;
+
+ /* Fill up the free list buffers */
+ if (csio_wr_fill_fl(hw, flq))
+ return -1;
+
+ /*
+ * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated,otherwise HW thinks
+ * FLQ is empty.
+ */
+ flq->pidx = flq->inc_idx = flq->credits - 8;
+ } else {
+ q->un.iq.flq_idx = -1;
+ }
+
+ /* Associate the IQ INTx handler. */
+ q->un.iq.iq_intx_handler = iq_intx_handler;
+
+ csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
+
+ } else if (type == CSIO_EGRESS) {
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
+ } else { /* Freelist */
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
+ }
+
+ return ret_idx;
+}
+
+/*
+ * csio_wr_iq_create_rsp - Response handler for IQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that got created.
+ *
+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
+ */
+static int
+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ struct csio_iq_params iqp;
+ enum fw_retval retval;
+ uint32_t iq_id;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_iqid(hw, iq_idx) = iqp.iqid;
+ csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
+ csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
+ csio_q_inc_idx(hw, iq_idx) = 0;
+
+ /* Actual iq-id. */
+ iq_id = iqp.iqid - hw->wrm.fw_iq_start;
+
+ /* Set the iq-id to iq map table. */
+ if (iq_id >= CSIO_MAX_IQ) {
+ csio_err(hw,
+ "Exceeding MAX_IQ(%d) supported!"
+ " iqid:%d rel_iqid:%d FW iq_start:%d\n",
+ CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_q_set_intr_map(hw, iq_idx, iq_id);
+
+ /*
+ * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
+ * ingress context of this queue. This will block interrupts to
+ * this queue until the next GTS write. Therefore, we do a
+ * 0-cidx increment GTS write for this queue just to clear the
+ * interrupt_sent bit. This will re-enable interrupts to this
+ * queue.
+ */
+ csio_wr_sge_intr_enable(hw, iqp.physiqid);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ csio_q_flid(hw, flq_idx) = iqp.fl0id;
+ csio_q_cidx(hw, flq_idx) = 0;
+ csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+ csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+
+ /* Now update SGE about the buffers allocated during init */
+ csio_wr_ring_fldb(hw, flq);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_iq_create - Configure an Ingress queue with FW.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index in the WR module.
+ * @vec: MSIX vector.
+ * @portid: PCIE Channel to be associated with this queue.
+ * @async: Is this a FW asynchronous message handling queue?
+ * @cbfn: Completion callback.
+ *
+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
+ * with alloc/write bits set.
+ */
+int
+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
+ uint32_t vec, uint8_t portid, bool async,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+ csio_q_portid(hw, iq_idx) = portid;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "IQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_INTX:
+ case CSIO_IM_MSI:
+ /* For interrupt forwarding queue only */
+ if (hw->intr_iq_idx == iq_idx)
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ else
+ iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
+ iqp.iqandstindex =
+ csio_q_physiqid(hw, hw->intr_iq_idx);
+ break;
+ case CSIO_IM_MSIX:
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ iqp.iqandstindex = (uint16_t)vec;
+ break;
+ case CSIO_IM_NONE:
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Pass in the ingress queue cmd parameters */
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iq_start = 1;
+ iqp.viid = 0;
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+ iqp.iqasynch = async;
+ if (csio_intr_coalesce_cnt)
+ iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
+ else
+ iqp.iqanus = X_UPDATESCHEDULING_TIMER;
+ iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
+ iqp.iqpciech = portid;
+ iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
+
+ switch (csio_q_wr_sz(hw, iq_idx)) {
+ case 16:
+ iqp.iqesize = 0; break;
+ case 32:
+ iqp.iqesize = 1; break;
+ case 64:
+ iqp.iqesize = 2; break;
+ case 128:
+ iqp.iqesize = 3; break;
+ }
+
+ iqp.iqsize = csio_q_size(hw, iq_idx) /
+ csio_q_wr_sz(hw, iq_idx);
+ iqp.iqaddr = csio_q_pstart(hw, iq_idx);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ iqp.fl0paden = 1;
+ iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
+ iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
+ iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
+ iqp.fl0addr = csio_q_pstart(hw, flq_idx);
+ }
+
+ csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of IQ cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_create_rsp - Response handler for EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that got created.
+ *
+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
+ */
+static int
+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ struct csio_eq_params eqp;
+ enum fw_retval retval;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
+ csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
+ csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
+ csio_q_inc_idx(hw, eq_idx) = 0;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_eq_create - Configure an Egress queue with FW.
+ * @hw: HW module.
+ * @priv: Private data.
+ * @eq_idx: Egress queue index in the WR module.
+ * @iq_idx: Associated ingress queue index.
+ * @cbfn: Completion callback.
+ *
+ * This API configures a offload egress queue with FW by issuing a
+ * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
+ */
+int
+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
+ int iq_idx, uint8_t portid,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "EQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqstart = 1;
+ eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
+ eqp.iqid = csio_q_iqid(hw, iq_idx);
+ eqp.fbmin = X_FETCHBURSTMIN_64B;
+ eqp.fbmax = X_FETCHBURSTMAX_512B;
+ eqp.cidxfthresh = 0;
+ eqp.pciechn = portid;
+ eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
+ eqp.eqaddr = csio_q_pstart(hw, eq_idx);
+
+ csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
+ &eqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that was freed.
+ *
+ * Handle FW_IQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_iq_destroy - Free an ingress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an ingress queue by issuing the FW_IQ_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iqid = csio_q_iqid(hw, iq_idx);
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1)
+ iqp.fl0id = csio_q_flid(hw, flq_idx);
+ else
+ iqp.fl0id = 0xFFFF;
+
+ iqp.fl1id = 0xFFFF;
+
+ csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that was freed.
+ *
+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_eq_destroy - Free an Egress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @eq_idx: Egress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqid = csio_q_eqid(hw, eq_idx);
+
+ csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
+ * @hw: HW module
+ * @qidx: Egress queue index
+ *
+ * Cleanup the Egress queue status page.
+ */
+static void
+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
+{
+ struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+
+ memset(stp, 0, sizeof(*stp));
+}
+
+/*
+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
+ * @hw: HW module
+ * @qidx: Ingress queue index
+ *
+ * Cleanup the footer entries in the given ingress queue,
+ * set to 1 the internal copy of genbit.
+ */
+static void
+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *wr;
+ struct csio_iqwr_footer *ftr;
+ uint32_t i = 0;
+
+ /* set to 1 since we are just about zero out genbit */
+ q->un.iq.genbit = 1;
+
+ for (i = 0; i < q->credits; i++) {
+ /* Get the WR */
+ wr = (void *)((uintptr_t)q->vstart +
+ (i * q->wr_sz));
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ /* Zero out footer */
+ memset(ftr, 0, sizeof(*ftr));
+ }
+}
+
+int
+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
+{
+ int i, flq_idx;
+ struct csio_q *q;
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv;
+
+ for (i = 0; i < wrm->free_qidx; i++) {
+ q = wrm->q_arr[i];
+
+ switch (q->type) {
+ case CSIO_EGRESS:
+ if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_eq_stpg(hw, i);
+ if (!cmd) {
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ }
+ case CSIO_INGRESS:
+ if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_iq_ftr(hw, i);
+ if (!cmd) {
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) =
+ CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
+ }
+ default:
+ break;
+ }
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
+
+ return 0;
+}
+
+/*
+ * csio_wr_get - Get requested size of WR entry/entries from queue.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @size: Cumulative size of Work request(s).
+ * @wrp: Work request pair.
+ *
+ * If requested credits are available, return the start address of the
+ * work request in the work request pair. Set pidx accordingly and
+ * return.
+ *
+ * NOTE about WR pair:
+ * ==================
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/size to be passed back to the caller -
+ * hence Work request pair format.
+ */
+int
+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
+ struct csio_wr_pair *wrp)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *cwr = (void *)((uintptr_t)(q->vstart) +
+ (q->pidx * CSIO_QCREDIT_SZ));
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+ uint16_t cidx = q->cidx = ntohs(stp->cidx);
+ uint16_t pidx = q->pidx;
+ uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
+ int req_credits = req_sz / CSIO_QCREDIT_SZ;
+ int credits;
+
+ CSIO_DB_ASSERT(q->owner != NULL);
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+ CSIO_DB_ASSERT(cidx <= q->credits);
+
+ /* Calculate credits */
+ if (pidx > cidx) {
+ credits = q->credits - (pidx - cidx) - 1;
+ } else if (cidx > pidx) {
+ credits = cidx - pidx - 1;
+ } else {
+ /* cidx == pidx, empty queue */
+ credits = q->credits;
+ CSIO_INC_STATS(q, n_qempty);
+ }
+
+ /*
+ * Check if we have enough credits.
+ * credits = 1 implies queue is full.
+ */
+ if (!credits || (req_credits > credits)) {
+ CSIO_INC_STATS(q, n_qfull);
+ return -EBUSY;
+ }
+
+ /*
+ * If we are here, we have enough credits to satisfy the
+ * request. Check if we are near the end of q, and if WR spills over.
+ * If it does, use the first addr/size to cover the queue until
+ * the end. Fit the remainder portion of the request at the top
+ * of queue and return it in the second addr/len. Set pidx
+ * accordingly.
+ */
+ if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
+ wrp->addr1 = cwr;
+ wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
+ wrp->addr2 = q->vstart;
+ wrp->size2 = req_sz - wrp->size1;
+ q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
+ CSIO_QCREDIT_SZ);
+ CSIO_INC_STATS(q, n_qwrap);
+ CSIO_INC_STATS(q, n_eq_wr_split);
+ } else {
+ wrp->addr1 = cwr;
+ wrp->size1 = req_sz;
+ wrp->addr2 = NULL;
+ wrp->size2 = 0;
+ q->pidx += (uint16_t)req_credits;
+
+ /* We are the end of queue, roll back pidx to top of queue */
+ if (unlikely(q->pidx == q->credits)) {
+ q->pidx = 0;
+ CSIO_INC_STATS(q, n_qwrap);
+ }
+ }
+
+ q->inc_idx = (uint16_t)req_credits;
+
+ CSIO_INC_STATS(q, n_tot_reqs);
+
+ return 0;
+}
+
+/*
+ * csio_wr_copy_to_wrp - Copies given data into WR.
+ * @data_buf - Data buffer
+ * @wrp - Work request pair.
+ * @wr_off - Work request offset.
+ * @data_len - Data length.
+ *
+ * Copies the given data in Work Request. Work request pair(wrp) specifies
+ * address information of Work request.
+ * Returns: none
+ */
+void
+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
+ uint32_t wr_off, uint32_t data_len)
+{
+ uint32_t nbytes;
+
+ /* Number of space available in buffer addr1 of WRP */
+ nbytes = ((wrp->size1 - wr_off) >= data_len) ?
+ data_len : (wrp->size1 - wr_off);
+
+ memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
+ data_len -= nbytes;
+
+ /* Write the remaining data from the begining of circular buffer */
+ if (data_len) {
+ CSIO_DB_ASSERT(data_len <= wrp->size2);
+ CSIO_DB_ASSERT(wrp->addr2 != NULL);
+ memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
+ }
+}
+
+/*
+ * csio_wr_issue - Notify chip of Work request.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @prio: 0: Low priority, 1: High priority
+ *
+ * Rings the SGE Doorbell by writing the current producer index of the passed
+ * in queue into the register.
+ *
+ */
+int
+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+
+ wmb();
+ /* Ring SGE Doorbell writing q->pidx into it */
+ csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+ PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+ q->inc_idx = 0;
+
+ return 0;
+}
+
+static inline uint32_t
+csio_wr_avail_qcredits(struct csio_q *q)
+{
+ if (q->pidx > q->cidx)
+ return q->pidx - q->cidx;
+ else if (q->cidx > q->pidx)
+ return q->credits - (q->cidx - q->pidx);
+ else
+ return 0; /* cidx == pidx, empty queue */
+}
+
+/*
+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
+ * @hw: HW module.
+ * @flq: The freelist queue.
+ *
+ * Invalidate the driver's version of a freelist buffer entry,
+ * without freeing the associated the DMA memory. The entry
+ * to be invalidated is picked up from the current Free list
+ * queue cidx.
+ *
+ */
+static inline void
+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
+{
+ flq->cidx++;
+ if (flq->cidx == flq->credits) {
+ flq->cidx = 0;
+ CSIO_INC_STATS(flq, n_qwrap);
+ }
+}
+
+/*
+ * csio_wr_process_fl - Process a freelist completion.
+ * @hw: HW module.
+ * @q: The ingress queue attached to the Freelist.
+ * @wr: The freelist completion WR in the ingress queue.
+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
+ * @iq_handler: Caller's handler for this completion.
+ * @priv: Private pointer of caller
+ *
+ */
+static inline void
+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
+ void *wr, uint32_t len_to_qid,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ struct csio_fl_dma_buf flb;
+ struct csio_dma_buf *buf, *fbuf;
+ uint32_t bufsz, len, lastlen = 0;
+ int n;
+ struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
+
+ CSIO_DB_ASSERT(flq != NULL);
+
+ len = len_to_qid;
+
+ if (len & IQWRF_NEWBUF) {
+ if (flq->un.fl.offset > 0) {
+ csio_wr_inval_flq_buf(hw, flq);
+ flq->un.fl.offset = 0;
+ }
+ len = IQWRF_LEN_GET(len);
+ }
+
+ CSIO_DB_ASSERT(len != 0);
+
+ flb.totlen = len;
+
+ /* Consume all freelist buffers used for len bytes */
+ for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
+ buf = &flq->un.fl.bufs[flq->cidx];
+ bufsz = csio_wr_fl_bufsz(sge, buf);
+
+ fbuf->paddr = buf->paddr;
+ fbuf->vaddr = buf->vaddr;
+
+ flb.offset = flq->un.fl.offset;
+ lastlen = min(bufsz, len);
+ fbuf->len = lastlen;
+
+ len -= lastlen;
+ if (!len)
+ break;
+ csio_wr_inval_flq_buf(hw, flq);
+ }
+
+ flb.defer_free = flq->un.fl.packen ? 0 : 1;
+
+ iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
+ &flb, priv);
+
+ if (flq->un.fl.packen)
+ flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
+ else
+ csio_wr_inval_flq_buf(hw, flq);
+
+}
+
+/*
+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?
+ * @q: Ingress quueue.
+ * @ftr: Ingress queue WR SGE footer.
+ *
+ * The entry is new if our generation bit matches the corresponding
+ * bit in the footer of the current WR.
+ */
+static inline bool
+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
+{
+ return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
+}
+
+/*
+ * csio_wr_process_iq - Process elements in Ingress queue.
+ * @hw: HW pointer
+ * @qidx: Index of queue
+ * @iq_handler: Handler for this queue
+ * @priv: Caller's private pointer
+ *
+ * This routine walks through every entry of the ingress queue, calling
+ * the provided iq_handler with the entry, until the generation bit
+ * flips.
+ */
+int
+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
+ struct csio_iqwr_footer *ftr;
+ uint32_t wr_type, fw_qid, qid;
+ struct csio_q *q_completed;
+ struct csio_q *flq = csio_iq_has_fl(q) ?
+ wrm->q_arr[q->un.iq.flq_idx] : NULL;
+ int rv = 0;
+
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+
+ /*
+ * When q wrapped around last time, driver should have inverted
+ * ic.genbit as well.
+ */
+ while (csio_is_new_iqwr(q, ftr)) {
+
+ CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
+ (uintptr_t)q->vwrap);
+ rmb();
+ wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
+
+ switch (wr_type) {
+ case X_RSPD_TYPE_CPL:
+ /* Subtract footer from WR len */
+ iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
+ break;
+ case X_RSPD_TYPE_FLBUF:
+ csio_wr_process_fl(hw, q, wr,
+ ntohl(ftr->pldbuflen_qid),
+ iq_handler, priv);
+ break;
+ case X_RSPD_TYPE_INTR:
+ fw_qid = ntohl(ftr->pldbuflen_qid);
+ qid = fw_qid - wrm->fw_iq_start;
+ q_completed = hw->wrm.intr_map[qid];
+
+ if (unlikely(qid ==
+ csio_q_physiqid(hw, hw->intr_iq_idx))) {
+ /*
+ * We are already in the Forward Interrupt
+ * Interrupt Queue Service! Do-not service
+ * again!
+ *
+ */
+ } else {
+ CSIO_DB_ASSERT(q_completed);
+ CSIO_DB_ASSERT(
+ q_completed->un.iq.iq_intx_handler);
+
+ /* Call the queue handler. */
+ q_completed->un.iq.iq_intx_handler(hw, NULL,
+ 0, NULL, (void *)q_completed);
+ }
+ break;
+ default:
+ csio_warn(hw, "Unknown resp type 0x%x received\n",
+ wr_type);
+ CSIO_INC_STATS(q, n_rsp_unknown);
+ break;
+ }
+
+ /*
+ * Ingress *always* has fixed size WR entries. Therefore,
+ * there should always be complete WRs towards the end of
+ * queue.
+ */
+ if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
+
+ /* Roll over to start of queue */
+ q->cidx = 0;
+ wr = q->vstart;
+
+ /* Toggle genbit */
+ q->un.iq.genbit ^= 0x1;
+
+ CSIO_INC_STATS(q, n_qwrap);
+ } else {
+ q->cidx++;
+ wr = (void *)((uintptr_t)(q->vstart) +
+ (q->cidx * q->wr_sz));
+ }
+
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ q->inc_idx++;
+
+ } /* while (q->un.iq.genbit == hdr->genbit) */
+
+ /*
+ * We need to re-arm SGE interrupts in case we got a stray interrupt,
+ * especially in msix mode. With INTx, this may be a common occurence.
+ */
+ if (unlikely(!q->inc_idx)) {
+ CSIO_INC_STATS(q, n_stray_comp);
+ rv = -EINVAL;
+ goto restart;
+ }
+
+ /* Replenish free list buffers if pending falls below low water mark */
+ if (flq) {
+ uint32_t avail = csio_wr_avail_qcredits(flq);
+ if (avail <= 16) {
+ /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated otherwise HW thinks
+ * FLQ is empty.
+ */
+ csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
+ csio_wr_ring_fldb(hw, flq);
+ }
+ }
+
+restart:
+ /* Now inform SGE about our incremental index value */
+ csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
+ INGRESSQID(q->un.iq.physiqid) |
+ TIMERREG(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS));
+ q->stats.n_tot_rsps += q->inc_idx;
+
+ q->inc_idx = 0;
+
+ return rv;
+}
+
+int
+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *iq = wrm->q_arr[qidx];
+
+ return csio_wr_process_iq(hw, iq, iq_handler, priv);
+}
+
+static int
+csio_closest_timer(struct csio_sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int
+csio_closest_thresh(struct csio_sge *s, int cnt)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = cnt - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void
+csio_wr_fixup_host_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t clsz = L1_CACHE_BYTES;
+ uint32_t s_hps = PAGE_SHIFT - 10;
+ uint32_t ingpad = 0;
+ uint32_t stat_len = clsz > 64 ? 128 : 64;
+
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
+ HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
+ HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
+ HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
+ SGE_HOST_PAGE_SIZE);
+
+ sge->csio_fl_align = clsz < 32 ? 32 : clsz;
+ ingpad = ilog2(sge->csio_fl_align) - 5;
+
+ csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE(1),
+ INGPADBOUNDARY(ingpad) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+
+ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3);
+
+ csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+
+ /* default value of rx_dma_offset of the NIC driver */
+ csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
+ PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+}
+
+static void
+csio_init_intr_coalesce_parms(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+
+ csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
+ if (csio_intr_coalesce_cnt) {
+ csio_sge_thresh_reg = 0;
+ csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
+ return;
+ }
+
+ csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
+}
+
+/*
+ * csio_wr_get_sge - Get SGE register values.
+ * @hw: HW module.
+ *
+ * Used by non-master functions and by master-functions relying on config file.
+ */
+static void
+csio_wr_get_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t ingpad;
+ int i;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+
+ switch (ingpad) {
+ case X_INGPCIEBOUNDARY_32B:
+ sge->csio_fl_align = 32; break;
+ case X_INGPCIEBOUNDARY_64B:
+ sge->csio_fl_align = 64; break;
+ case X_INGPCIEBOUNDARY_128B:
+ sge->csio_fl_align = 128; break;
+ case X_INGPCIEBOUNDARY_256B:
+ sge->csio_fl_align = 256; break;
+ case X_INGPCIEBOUNDARY_512B:
+ sge->csio_fl_align = 512; break;
+ case X_INGPCIEBOUNDARY_1024B:
+ sge->csio_fl_align = 1024; break;
+ case X_INGPCIEBOUNDARY_2048B:
+ sge->csio_fl_align = 2048; break;
+ case X_INGPCIEBOUNDARY_4096B:
+ sge->csio_fl_align = 4096; break;
+ }
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+
+ sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE0_GET(timer_value_0_and_1));
+ sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE1_GET(timer_value_0_and_1));
+ sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE2_GET(timer_value_2_and_3));
+ sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE3_GET(timer_value_2_and_3));
+ sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE4_GET(timer_value_4_and_5));
+ sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE5_GET(timer_value_4_and_5));
+
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
+ sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+/*
+ * csio_wr_set_sge - Initialize SGE registers
+ * @hw: HW module.
+ *
+ * Used by Master function to initialize SGE registers in the absence
+ * of a config file.
+ */
+static void
+csio_wr_set_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ int i;
+
+ /*
+ * Set up our basic SGE mode to deliver CPL messages to our Ingress
+ * Queue and Packet Date to the Free List.
+ */
+ csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
+
+ /*
+ * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+ * and generate an interrupt when this occurs so we can recover.
+ */
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
+ HP_INT_THRESH(HP_INT_THRESH_MASK) |
+ LP_INT_THRESH(LP_INT_THRESH_MASK),
+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+ LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
+ ENABLE_DROP);
+
+ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
+
+ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
+ CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
+ CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+ CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
+ CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
+ CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
+ CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
+ CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ /* Initialize interrupt coalescing attributes */
+ sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
+ sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
+ sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
+ sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
+ sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
+ sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
+
+ sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
+ sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
+ sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
+ sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
+
+ csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
+ THRESHOLD_1(sge->counter_val[1]) |
+ THRESHOLD_2(sge->counter_val[2]) |
+ THRESHOLD_3(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+void
+csio_wr_sge_init(struct csio_hw *hw)
+{
+ /*
+ * If we are master:
+ * - If we plan to use the config file, we need to fixup some
+ * host specific registers, and read the rest of the SGE
+ * configuration.
+ * - If we dont plan to use the config file, we need to initialize
+ * SGE entirely, including fixing the host specific registers.
+ * If we arent the master, we are only allowed to read and work off of
+ * the already initialized SGE values.
+ *
+ * Therefore, before calling this function, we assume that the master-
+ * ship of the card, and whether to use config file or not, have
+ * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
+ * CSIO_HWF_MASTER should be set/unset.
+ */
+ if (csio_is_hw_master(hw)) {
+ csio_wr_fixup_host_params(hw);
+
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
+ csio_wr_get_sge(hw);
+ else
+ csio_wr_set_sge(hw);
+ } else
+ csio_wr_get_sge(hw);
+}
+
+/*
+ * csio_wrm_init - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW pointer
+ *
+ * Allocates memory for an array of queue pointers starting at q_arr.
+ */
+int
+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+
+ if (!wrm->num_q) {
+ csio_err(hw, "Num queues is not set\n");
+ return -EINVAL;
+ }
+
+ wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+ if (!wrm->q_arr)
+ goto err;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
+ if (!wrm->q_arr[i]) {
+ while (--i >= 0)
+ kfree(wrm->q_arr[i]);
+ goto err_free_arr;
+ }
+ }
+ wrm->free_qidx = 0;
+
+ return 0;
+
+err_free_arr:
+ kfree(wrm->q_arr);
+err:
+ return -ENOMEM;
+}
+
+/*
+ * csio_wrm_exit - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW module
+ *
+ * Uninitialize WR module. Free q_arr and pointers in it.
+ * We have the additional job of freeing the DMA memory associated
+ * with the queues.
+ */
+void
+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+ uint32_t j;
+ struct csio_q *q;
+ struct csio_dma_buf *buf;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ q = wrm->q_arr[i];
+
+ if (wrm->free_qidx && (i < wrm->free_qidx)) {
+ if (q->type == CSIO_FREELIST) {
+ if (!q->un.fl.bufs)
+ continue;
+ for (j = 0; j < q->credits; j++) {
+ buf = &q->un.fl.bufs[j];
+ if (!buf->vaddr)
+ continue;
+ pci_free_consistent(hw->pdev, buf->len,
+ buf->vaddr,
+ buf->paddr);
+ }
+ kfree(q->un.fl.bufs);
+ }
+ pci_free_consistent(hw->pdev, q->size,
+ q->vstart, q->pstart);
+ }
+ kfree(q);
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
+
+ kfree(wrm->q_arr);
+}
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
new file mode 100644
index 000000000000..8d30e7ac1f5e
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -0,0 +1,512 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_WR_H__
+#define __CSIO_WR_H__
+
+#include <linux/cache.h>
+
+#include "csio_defs.h"
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+
+/*
+ * SGE register field values.
+ */
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPCIEBOUNDARY_64B 1
+#define X_INGPCIEBOUNDARY_128B 2
+#define X_INGPCIEBOUNDARY_256B 3
+#define X_INGPCIEBOUNDARY_512B 4
+#define X_INGPCIEBOUNDARY_1024B 5
+#define X_INGPCIEBOUNDARY_2048B 6
+#define X_INGPCIEBOUNDARY_4096B 7
+
+/* GTS register */
+#define X_TIMERREG_COUNTER0 0
+#define X_TIMERREG_COUNTER1 1
+#define X_TIMERREG_COUNTER2 2
+#define X_TIMERREG_COUNTER3 3
+#define X_TIMERREG_COUNTER4 4
+#define X_TIMERREG_COUNTER5 5
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_16B 0
+#define X_FETCHBURSTMIN_32B 1
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+
+#define X_FETCHBURSTMAX_64B 0
+#define X_FETCHBURSTMAX_128B 1
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+#define X_HOSTFCMODE_INGRESS_QUEUE 1
+#define X_HOSTFCMODE_STATUS_PAGE 2
+#define X_HOSTFCMODE_BOTH 3
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATESCHEDULING_TIMER 0
+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
+
+#define X_UPDATEDELIVERY_NONE 0
+#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+#define X_UPDATEDELIVERY_BOTH 3
+
+#define X_INTERRUPTDESTINATION_PCIE 0
+#define X_INTERRUPTDESTINATION_IQ 1
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+#define X_RSPD_TYPE_INTR 2
+
+/* WR status is at the same position as retval in a CMD header */
+#define csio_wr_status(_wr) \
+ (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
+
+struct csio_hw;
+
+extern int csio_intr_coalesce_cnt;
+extern int csio_intr_coalesce_time;
+
+/* Ingress queue params */
+struct csio_iq_params {
+
+ uint8_t iq_start:1;
+ uint8_t iq_stop:1;
+ uint8_t pfn:3;
+
+ uint8_t vfn;
+
+ uint16_t physiqid;
+ uint16_t iqid;
+
+ uint16_t fl0id;
+ uint16_t fl1id;
+
+ uint8_t viid;
+
+ uint8_t type;
+ uint8_t iqasynch;
+ uint8_t reserved4;
+
+ uint8_t iqandst;
+ uint8_t iqanus;
+ uint8_t iqanud;
+
+ uint16_t iqandstindex;
+
+ uint8_t iqdroprss;
+ uint8_t iqpciech;
+ uint8_t iqdcaen;
+
+ uint8_t iqdcacpu;
+ uint8_t iqintcntthresh;
+ uint8_t iqo;
+
+ uint8_t iqcprio;
+ uint8_t iqesize;
+
+ uint16_t iqsize;
+
+ uint64_t iqaddr;
+
+ uint8_t iqflintiqhsen;
+ uint8_t reserved5;
+ uint8_t iqflintcongen;
+ uint8_t iqflintcngchmap;
+
+ uint32_t reserved6;
+
+ uint8_t fl0hostfcmode;
+ uint8_t fl0cprio;
+ uint8_t fl0paden;
+ uint8_t fl0packen;
+ uint8_t fl0congen;
+ uint8_t fl0dcaen;
+
+ uint8_t fl0dcacpu;
+ uint8_t fl0fbmin;
+
+ uint8_t fl0fbmax;
+ uint8_t fl0cidxfthresho;
+ uint8_t fl0cidxfthresh;
+
+ uint16_t fl0size;
+
+ uint64_t fl0addr;
+
+ uint64_t reserved7;
+
+ uint8_t fl1hostfcmode;
+ uint8_t fl1cprio;
+ uint8_t fl1paden;
+ uint8_t fl1packen;
+ uint8_t fl1congen;
+ uint8_t fl1dcaen;
+
+ uint8_t fl1dcacpu;
+ uint8_t fl1fbmin;
+
+ uint8_t fl1fbmax;
+ uint8_t fl1cidxfthresho;
+ uint8_t fl1cidxfthresh;
+
+ uint16_t fl1size;
+
+ uint64_t fl1addr;
+};
+
+/* Egress queue params */
+struct csio_eq_params {
+
+ uint8_t pfn;
+ uint8_t vfn;
+
+ uint8_t eqstart:1;
+ uint8_t eqstop:1;
+
+ uint16_t physeqid;
+ uint32_t eqid;
+
+ uint8_t hostfcmode:2;
+ uint8_t cprio:1;
+ uint8_t pciechn:3;
+
+ uint16_t iqid;
+
+ uint8_t dcaen:1;
+ uint8_t dcacpu:5;
+
+ uint8_t fbmin:3;
+ uint8_t fbmax:3;
+
+ uint8_t cidxfthresho:1;
+ uint8_t cidxfthresh:3;
+
+ uint16_t eqsize;
+
+ uint64_t eqaddr;
+};
+
+struct csio_dma_buf {
+ struct list_head list;
+ void *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ uint32_t len; /* Buffer size */
+};
+
+/* Generic I/O request structure */
+struct csio_ioreq {
+ struct csio_sm sm; /* SM, List
+ * should be the first member
+ */
+ int iq_idx; /* Ingress queue index */
+ int eq_idx; /* Egress queue index */
+ uint32_t nsge; /* Number of SG elements */
+ uint32_t tmo; /* Driver timeout */
+ uint32_t datadir; /* Data direction */
+ struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
+ uint16_t wr_status; /* WR completion status */
+ int16_t drv_status; /* Driver internal status */
+ struct csio_lnode *lnode; /* Owner lnode */
+ struct csio_rnode *rnode; /* Src/destination rnode */
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
+ /* completion callback */
+ void *scratch1; /* Scratch area 1.
+ */
+ void *scratch2; /* Scratch area 2. */
+ struct list_head gen_list; /* Any list associated with
+ * this ioreq.
+ */
+ uint64_t fw_handle; /* Unique handle passed
+ * to FW
+ */
+ uint8_t dcopy; /* Data copy required */
+ uint8_t reserved1;
+ uint16_t reserved2;
+ struct completion cmplobj; /* ioreq completion object */
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Egress status page for egress cidx updates
+ */
+struct csio_qstatus_page {
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+
+enum {
+ CSIO_MAX_FLBUF_PER_IQWR = 4,
+ CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
+ * in bytes
+ */
+ CSIO_MAX_QID = 0xFFFF,
+ CSIO_MAX_IQ = 128,
+
+ CSIO_SGE_NTIMERS = 6,
+ CSIO_SGE_NCOUNTERS = 4,
+ CSIO_SGE_FL_SIZE_REGS = 16,
+};
+
+/* Defines for type */
+enum {
+ CSIO_EGRESS = 1,
+ CSIO_INGRESS = 2,
+ CSIO_FREELIST = 3,
+};
+
+/*
+ * Structure for footer (last 2 flits) of Ingress Queue Entry.
+ */
+struct csio_iqwr_footer {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define IQWRF_NEWBUF (1 << 31)
+#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
+#define IQWRF_GEN_SHIFT 7
+#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
+
+
+/*
+ * WR pair:
+ * ========
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/len to be passed back to the caller -
+ * hence the Work request pair structure.
+ */
+struct csio_wr_pair {
+ void *addr1;
+ uint32_t size1;
+ void *addr2;
+ uint32_t size2;
+};
+
+/*
+ * The following structure is used by ingress processing to return the
+ * free list buffers to consumers.
+ */
+struct csio_fl_dma_buf {
+ struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
+ /* Freelist DMA buffers */
+ int offset; /* Offset within the
+ * first FL buf.
+ */
+ uint32_t totlen; /* Total length */
+ uint8_t defer_free; /* Free of buffer can
+ * deferred
+ */
+};
+
+/* Data-types */
+typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+struct csio_iq {
+ uint16_t iqid; /* Queue ID */
+ uint16_t physiqid; /* Physical Queue ID */
+ uint16_t genbit; /* Generation bit,
+ * initially set to 1
+ */
+ int flq_idx; /* Freelist queue index */
+ iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
+};
+
+struct csio_eq {
+ uint16_t eqid; /* Qid */
+ uint16_t physeqid; /* Physical Queue ID */
+ uint8_t wrap[512]; /* Temp area for q-wrap around*/
+};
+
+struct csio_fl {
+ uint16_t flid; /* Qid */
+ uint16_t packen; /* Packing enabled? */
+ int offset; /* Offset within FL buf */
+ int sreg; /* Size register */
+ struct csio_dma_buf *bufs; /* Free list buffer ptr array
+ * indexed using flq->cidx/pidx
+ */
+};
+
+struct csio_qstats {
+ uint32_t n_tot_reqs; /* Total no. of Requests */
+ uint32_t n_tot_rsps; /* Total no. of responses */
+ uint32_t n_qwrap; /* Queue wraps */
+ uint32_t n_eq_wr_split; /* Number of split EQ WRs */
+ uint32_t n_qentry; /* Queue entry */
+ uint32_t n_qempty; /* Queue empty */
+ uint32_t n_qfull; /* Queue fulls */
+ uint32_t n_rsp_unknown; /* Unknown response type */
+ uint32_t n_stray_comp; /* Stray completion intr */
+ uint32_t n_flq_refill; /* Number of FL refills */
+};
+
+/* Queue metadata */
+struct csio_q {
+ uint16_t type; /* Type: Ingress/Egress/FL */
+ uint16_t pidx; /* producer index */
+ uint16_t cidx; /* consumer index */
+ uint16_t inc_idx; /* Incremental index */
+ uint32_t wr_sz; /* Size of all WRs in this q
+ * if fixed
+ */
+ void *vstart; /* Base virtual address
+ * of queue
+ */
+ void *vwrap; /* Virtual end address to
+ * wrap around at
+ */
+ uint32_t credits; /* Size of queue in credits */
+ void *owner; /* Owner */
+ union { /* Queue contexts */
+ struct csio_iq iq;
+ struct csio_eq eq;
+ struct csio_fl fl;
+ } un;
+
+ dma_addr_t pstart; /* Base physical address of
+ * queue
+ */
+ uint32_t portid; /* PCIE Channel */
+ uint32_t size; /* Size of queue in bytes */
+ struct csio_qstats stats; /* Statistics */
+} ____cacheline_aligned_in_smp;
+
+struct csio_sge {
+ uint32_t csio_fl_align; /* Calculated and cached
+ * for fast path
+ */
+ uint32_t sge_control; /* padding, boundaries,
+ * lengths, etc.
+ */
+ uint32_t sge_host_page_size; /* Host page size */
+ uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
+ /* free list buffer sizes */
+ uint16_t timer_val[CSIO_SGE_NTIMERS];
+ uint8_t counter_val[CSIO_SGE_NCOUNTERS];
+};
+
+/* Work request module */
+struct csio_wrm {
+ int num_q; /* Number of queues */
+ struct csio_q **q_arr; /* Array of queue pointers
+ * allocated dynamically
+ * based on configured values
+ */
+ uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
+ uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
+ struct csio_q *intr_map[CSIO_MAX_IQ];
+ /* IQ-id to IQ map table. */
+ int free_qidx; /* queue idx of free queue */
+ struct csio_sge sge; /* SGE params */
+};
+
+#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
+#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
+#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
+#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
+#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
+#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
+#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
+#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
+#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
+#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
+#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
+#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
+#define csio_q_physiqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
+#define csio_q_iq_flq_idx(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
+#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
+#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
+
+#define csio_q_physeqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
+#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
+
+#define csio_q_iq_to_flid(__hw, __iq_idx) \
+ csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
+#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
+ (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
+#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
+
+struct csio_mb;
+
+int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
+ uint16_t, void *, uint32_t, int, iq_handler_t);
+int csio_wr_iq_create(struct csio_hw *, void *, int,
+ uint32_t, uint8_t, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
+
+
+int csio_wr_get(struct csio_hw *, int, uint32_t,
+ struct csio_wr_pair *);
+void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
+int csio_wr_issue(struct csio_hw *, int, bool);
+int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+int csio_wr_process_iq_idx(struct csio_hw *, int,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+
+void csio_wr_sge_init(struct csio_hw *);
+int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
+void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
+
+#endif /* ifndef __CSIO_WR_H__ */
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h
new file mode 100644
index 000000000000..097e52c0f8e1
--- /dev/null
+++ b/drivers/scsi/csiostor/t4fw_api_stor.h
@@ -0,0 +1,539 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_API_STOR_H_
+#define _T4FW_API_STOR_H_
+
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_fcoe_link_sub_op {
+ FCOE_LINK_DOWN = 0x0,
+ FCOE_LINK_UP = 0x1,
+ FCOE_LINK_COND = 0x2,
+};
+
+enum fw_fcoe_link_status {
+ FCOE_LINKDOWN = 0x0,
+ FCOE_LINKUP = 0x1,
+};
+
+enum fw_ofld_prot {
+ PROT_FCOE = 0x1,
+ PROT_ISCSI = 0x2,
+};
+
+enum rport_type_fcoe {
+ FLOGI_VFPORT = 0x1, /* 0xfffffe */
+ FDISC_VFPORT = 0x2, /* 0xfffffe */
+ NS_VNPORT = 0x3, /* 0xfffffc */
+ REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
+ REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
+ FDMI_VNPORT = 0x6, /* 0xfffffa */
+ FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
+};
+
+enum event_cause_fcoe {
+ PLOGI_ACC_RCVD = 0x01,
+ PLOGI_RJT_RCVD = 0x02,
+ PLOGI_RCVD = 0x03,
+ PLOGO_RCVD = 0x04,
+ PRLI_ACC_RCVD = 0x05,
+ PRLI_RJT_RCVD = 0x06,
+ PRLI_RCVD = 0x07,
+ PRLO_RCVD = 0x08,
+ NPORT_ID_CHGD = 0x09,
+ FLOGO_RCVD = 0x0a,
+ CLR_VIRT_LNK_RCVD = 0x0b,
+ FLOGI_ACC_RCVD = 0x0c,
+ FLOGI_RJT_RCVD = 0x0d,
+ FDISC_ACC_RCVD = 0x0e,
+ FDISC_RJT_RCVD = 0x0f,
+ FLOGI_TMO_MAX_RETRY = 0x10,
+ IMPL_LOGO_ADISC_ACC = 0x11,
+ IMPL_LOGO_ADISC_RJT = 0x12,
+ IMPL_LOGO_ADISC_CNFLT = 0x13,
+ PRLI_TMO = 0x14,
+ ADISC_TMO = 0x15,
+ RSCN_DEV_LOST = 0x16,
+ SCR_ACC_RCVD = 0x17,
+ ADISC_RJT_RCVD = 0x18,
+ LOGO_SNT = 0x19,
+ PROTO_ERR_IMPL_LOGO = 0x1a,
+};
+
+enum fcoe_cmn_type {
+ FCOE_ELS,
+ FCOE_CT,
+ FCOE_SCSI_CMD,
+ FCOE_UNSOL_ELS,
+};
+
+enum fw_wr_stor_opcodes {
+ FW_RDEV_WR = 0x38,
+ FW_FCOE_ELS_CT_WR = 0x30,
+ FW_SCSI_WRITE_WR = 0x31,
+ FW_SCSI_READ_WR = 0x32,
+ FW_SCSI_CMD_WR = 0x33,
+ FW_SCSI_ABRT_CLS_WR = 0x34,
+};
+
+struct fw_rdev_wr {
+ __be32 op_to_immdlen;
+ __be32 alloc_to_len16;
+ __be64 cookie;
+ u8 protocol;
+ u8 event_cause;
+ u8 cur_state;
+ u8 prev_state;
+ __be32 flags_to_assoc_flowid;
+ union rdev_entry {
+ struct fcoe_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 rjt_reason;
+ u8 cur_login_st;
+ u8 prev_login_st;
+ __be16 rcv_fr_sz;
+ u8 rd_xfer_rdy_to_rport_type;
+ u8 vft_to_qos;
+ u8 org_proc_assoc_to_acc_rsp_code;
+ u8 enh_disc_to_tgt;
+ u8 wwnn[8];
+ u8 wwpn[8];
+ __be16 iqid;
+ u8 fc_oui[3];
+ u8 r_id[3];
+ } fcoe_rdev;
+ struct iscsi_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 r3;
+ __be16 iscsi_opts;
+ __be16 tcp_opts;
+ __be16 ip_opts;
+ __be16 max_rcv_len;
+ __be16 max_snd_len;
+ __be16 first_brst_len;
+ __be16 max_brst_len;
+ __be16 r4;
+ __be16 def_time2wait;
+ __be16 def_time2ret;
+ __be16 nop_out_intrvl;
+ __be16 non_scsi_to;
+ __be16 isid;
+ __be16 tsid;
+ __be16 port;
+ __be16 tpgt;
+ u8 r5[6];
+ __be16 iqid;
+ } iscsi_rdev;
+ } u;
+};
+
+#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
+#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
+#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
+#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
+#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
+#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
+#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
+#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
+#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
+
+struct fw_fcoe_els_ct_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 els_ct_type;
+ u8 ctl_pri;
+ u8 cp_en_class;
+ __be16 xfer_cnt;
+ u8 fl_to_sp;
+ u8 l_id[3];
+ u8 r5;
+ u8 r_id[3];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
+#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
+
+struct fw_scsi_write_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_write_priv {
+ struct fcoe_write_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_write_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_read_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_read_priv {
+ struct fcoe_read_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_read_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_cmd_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 r3;
+ union fw_scsi_cmd_priv {
+ struct fcoe_cmd_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r4_lo[2];
+ } fcoe;
+ struct iscsi_cmd_priv {
+ u8 r4[4];
+ } iscsi;
+ } u;
+ u8 r5[8];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
+
+#define SCSI_ABORT 0
+#define SCSI_CLOSE 1
+
+struct fw_scsi_abrt_cls_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 sub_opcode_to_chk_all_io;
+ u8 r3[4];
+ u64 t_cookie;
+};
+
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
+#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
+
+enum fw_cmd_stor_opcodes {
+ FW_FCOE_RES_INFO_CMD = 0x31,
+ FW_FCOE_LINK_CMD = 0x32,
+ FW_FCOE_VNP_CMD = 0x33,
+ FW_FCOE_SPARAMS_CMD = 0x35,
+ FW_FCOE_STATS_CMD = 0x37,
+ FW_FCOE_FCF_CMD = 0x38,
+};
+
+struct fw_fcoe_res_info_cmd {
+ __be32 op_to_read;
+ __be32 retval_len16;
+ __be16 e_d_tov;
+ __be16 r_a_tov_seq;
+ __be16 r_a_tov_els;
+ __be16 r_r_tov;
+ __be32 max_xchgs;
+ __be32 max_ssns;
+ __be32 used_xchgs;
+ __be32 used_ssns;
+ __be32 max_fcfs;
+ __be32 max_vnps;
+ __be32 used_fcfs;
+ __be32 used_vnps;
+};
+
+struct fw_fcoe_link_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be32 sub_opcode_fcfi;
+ u8 r3;
+ u8 lstatus;
+ __be16 flags;
+ u8 r4;
+ u8 set_vlan;
+ __be16 vlan_id;
+ __be32 vnpi_pkd;
+ __be16 r6;
+ u8 phy_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+};
+
+#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
+#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
+#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_vnp_cmd {
+ __be32 op_to_fcfi;
+ __be32 alloc_to_len16;
+ __be32 gen_wwn_to_vnpi;
+ __be32 vf_id;
+ __be16 iqid;
+ u8 vnport_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 clsp_word_0_1[8];
+};
+
+#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
+#define FW_FCOE_VNP_CMD_FREE (1U << 30)
+#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
+#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
+#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
+#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_sparams_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ u8 r3[7];
+ u8 cos;
+ u8 lport_wwnn[8];
+ u8 lport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 cls_srv_parms[16];
+};
+
+#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
+
+struct fw_fcoe_stats_cmd {
+ __be32 op_to_flowid;
+ __be32 free_to_len16;
+ union fw_fcoe_stats {
+ struct fw_fcoe_stats_ctl {
+ u8 nstats_port;
+ u8 port_valid_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_fcoe_port_stats {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } port_stats;
+ struct fw_fcoe_fcf_stats {
+ __be32 fip_tx_bytes;
+ __be32 fip_tx_fr;
+ __be64 fcf_ka;
+ __be64 mcast_adv_rcvd;
+ __be16 ucast_adv_rcvd;
+ __be16 sol_sent;
+ __be16 vlan_req;
+ __be16 vlan_rpl;
+ __be16 clr_vlink;
+ __be16 link_down;
+ __be16 link_up;
+ __be16 logo;
+ __be16 flogi_req;
+ __be16 flogi_rpl;
+ __be16 fdisc_req;
+ __be16 fdisc_rpl;
+ __be16 fka_prd_chg;
+ __be16 fc_map_chg;
+ __be16 vfid_chg;
+ u8 no_fka_req;
+ u8 no_vnp;
+ } fcf_stats;
+ struct fw_fcoe_pcb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 vnp_ka;
+ __be32 unsol_els_rcvd;
+ __be64 unsol_cmd_rcvd;
+ __be16 implicit_logo;
+ __be16 flogi_inv_sparm;
+ __be16 fdisc_inv_sparm;
+ __be16 flogi_rjt;
+ __be16 fdisc_rjt;
+ __be16 no_ssn;
+ __be16 mac_flt_fail;
+ __be16 inv_fr_rcvd;
+ } pcb_stats;
+ struct fw_fcoe_scb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 host_abrt_req;
+ __be32 adap_auto_abrt;
+ __be32 adap_abrt_rsp;
+ __be32 host_ios_req;
+ __be16 ssn_offl_ios;
+ __be16 ssn_not_rdy_ios;
+ u8 rx_data_ddp_err;
+ u8 ddp_flt_set_err;
+ __be16 rx_data_fr_err;
+ u8 bad_st_abrt_req;
+ u8 no_io_abrt_req;
+ u8 abort_tmo;
+ u8 abort_tmo_2;
+ __be32 abort_req;
+ u8 no_ppod_res_tmo;
+ u8 bp_tmo;
+ u8 adap_auto_cls;
+ u8 no_io_cls_req;
+ __be32 host_cls_req;
+ __be64 unsol_cmd_rcvd;
+ __be32 plogi_req_rcvd;
+ __be32 prli_req_rcvd;
+ __be16 logo_req_rcvd;
+ __be16 prlo_req_rcvd;
+ __be16 plogi_rjt_rcvd;
+ __be16 prli_rjt_rcvd;
+ __be32 adisc_req_rcvd;
+ __be32 rscn_rcvd;
+ __be32 rrq_req_rcvd;
+ __be32 unsol_els_rcvd;
+ u8 adisc_rjt_rcvd;
+ u8 scr_rjt;
+ u8 ct_rjt;
+ u8 inval_bls_rcvd;
+ __be32 ba_rjt_rcvd;
+ } scb_stats;
+ } u;
+};
+
+#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_FREE (1U << 30)
+#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
+#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_fcoe_fcf_cmd {
+ __be32 op_to_fcfi;
+ __be32 retval_len16;
+ __be16 priority_pkd;
+ u8 mac[6];
+ u8 name_id[8];
+ u8 fabric[8];
+ __be16 vf_id;
+ __be16 max_fcoe_size;
+ u8 vlan_id;
+ u8 fc_map[3];
+ __be32 fka_adv;
+ __be32 r6;
+ u8 r7_hi;
+ u8 fpma_to_portid;
+ u8 spma_mac[6];
+ __be64 r8;
+};
+
+#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
+#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
+#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
+#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#endif /* _T4FW_API_STOR_H_ */
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 13aeca3d51f2..865c64fa923c 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -489,7 +489,7 @@ struct ParameterData {
int def; /* default value */
int safe; /* safe value */
};
-static struct ParameterData __devinitdata cfg_data[] = {
+static struct ParameterData cfg_data[] = {
{ /* adapter id */
CFG_PARAM_UNSET,
0,
@@ -574,7 +574,7 @@ MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
* set_safe_settings - if the use_safe_settings option is set then
* set all values to the safe and slow values.
**/
-static void __devinit set_safe_settings(void)
+static void set_safe_settings(void)
{
if (use_safe_settings)
{
@@ -593,7 +593,7 @@ static void __devinit set_safe_settings(void)
* fix_settings - reset any boot parameters which are out of range
* back to the default values.
**/
-static void __devinit fix_settings(void)
+static void fix_settings(void)
{
int i;
@@ -620,7 +620,7 @@ static void __devinit fix_settings(void)
* Mapping from the eeprom delay index value (index into this array)
* to the number of actual seconds that the delay should be for.
*/
-static char __devinitdata eeprom_index_to_delay_map[] =
+static char eeprom_index_to_delay_map[] =
{ 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -630,7 +630,7 @@ static char __devinitdata eeprom_index_to_delay_map[] =
*
* @eeprom: The eeprom structure in which we find the delay index to map.
**/
-static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
+static void eeprom_index_to_delay(struct NvRamType *eeprom)
{
eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
}
@@ -643,7 +643,7 @@ static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
*
* @delay: The delay, in seconds, to find the eeprom index for.
**/
-static int __devinit delay_to_eeprom_index(int delay)
+static int delay_to_eeprom_index(int delay)
{
u8 idx = 0;
while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
@@ -659,7 +659,7 @@ static int __devinit delay_to_eeprom_index(int delay)
*
* @eeprom: The eeprom data to override with command line options.
**/
-static void __devinit eeprom_override(struct NvRamType *eeprom)
+static void eeprom_override(struct NvRamType *eeprom)
{
u8 id;
@@ -3938,7 +3938,7 @@ static void dc395x_slave_destroy(struct scsi_device *scsi_device)
*
* @io_port: base I/O address
**/
-static void __devinit trms1040_wait_30us(unsigned long io_port)
+static void trms1040_wait_30us(unsigned long io_port)
{
/* ScsiPortStallExecution(30); wait 30 us */
outb(5, io_port + TRM_S1040_GEN_TIMER);
@@ -3955,7 +3955,7 @@ static void __devinit trms1040_wait_30us(unsigned long io_port)
* @cmd: SB + op code (command) to send
* @addr: address to send
**/
-static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
+static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
{
int i;
u8 send_data;
@@ -4000,7 +4000,7 @@ static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
* @addr: offset into EEPROM
* @byte: bytes to write
**/
-static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
+static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
{
int i;
u8 send_data;
@@ -4054,7 +4054,7 @@ static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
* @eeprom: the data to write
* @io_port: the base io port
**/
-static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
+static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
{
u8 *b_eeprom = (u8 *)eeprom;
u8 addr;
@@ -4094,7 +4094,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
*
* Returns the byte read.
**/
-static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
+static u8 trms1040_get_data(unsigned long io_port, u8 addr)
{
int i;
u8 read_byte;
@@ -4132,7 +4132,7 @@ static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
* @eeprom: where to store the data
* @io_port: the base io port
**/
-static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
+static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
{
u8 *b_eeprom = (u8 *)eeprom;
u8 addr;
@@ -4162,7 +4162,7 @@ static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long
* @eeprom: caller allocated strcuture to read the eeprom data into
* @io_port: io port to read from
**/
-static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
+static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
{
u16 *w_eeprom = (u16 *)eeprom;
u16 w_addr;
@@ -4232,7 +4232,7 @@ static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_po
*
* @eeprom: The eeprom data strucutre to show details for.
**/
-static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
+static void print_eeprom_settings(struct NvRamType *eeprom)
{
dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
eeprom->scsi_id,
@@ -4260,7 +4260,7 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
/*
* Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
* should never cross a page boundary */
-static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
+static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
{
const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
*SEGMENTX_LEN;
@@ -4306,7 +4306,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
*
* @acb: The adapter to print the information for.
**/
-static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
+static void adapter_print_config(struct AdapterCtlBlk *acb)
{
u8 bval;
@@ -4350,7 +4350,7 @@ static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
*
* @acb: The adapter to initialize.
**/
-static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
+static void adapter_init_params(struct AdapterCtlBlk *acb)
{
struct NvRamType *eeprom = &acb->eeprom;
int i;
@@ -4412,7 +4412,7 @@ static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
*
* @host: The scsi host instance to fill in the values for.
**/
-static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
+static void adapter_init_scsi_host(struct Scsi_Host *host)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
struct NvRamType *eeprom = &acb->eeprom;
@@ -4453,7 +4453,7 @@ static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
*
* @acb: The adapter which we are to init.
**/
-static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
+static void adapter_init_chip(struct AdapterCtlBlk *acb)
{
struct NvRamType *eeprom = &acb->eeprom;
@@ -4506,8 +4506,8 @@ static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
* Returns 0 if the initialization succeeds, any other value on
* failure.
**/
-static int __devinit adapter_init(struct AdapterCtlBlk *acb,
- unsigned long io_port, u32 io_port_len, unsigned int irq)
+static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
+ u32 io_port_len, unsigned int irq)
{
if (!request_region(io_port, io_port_len, DC395X_NAME)) {
dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
@@ -4794,8 +4794,7 @@ static void banner_display(void)
*
* Returns 0 on success, or an error code (-ve) on failure.
**/
-static int __devinit dc395x_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct Scsi_Host *scsi_host = NULL;
struct AdapterCtlBlk *acb = NULL;
@@ -4861,7 +4860,7 @@ fail:
*
* @dev: The PCI device to initialize.
**/
-static void __devexit dc395x_remove_one(struct pci_dev *dev)
+static void dc395x_remove_one(struct pci_dev *dev)
{
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
@@ -4892,7 +4891,7 @@ static struct pci_driver dc395x_driver = {
.name = DC395X_NAME,
.id_table = dc395x_pci_table,
.probe = dc395x_init_one,
- .remove = __devexit_p(dc395x_remove_one),
+ .remove = dc395x_remove_one,
};
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 67070257919f..69abd0ad48e2 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -32,8 +32,8 @@ config SCSI_DH_EMC
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
- tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
- depends on SCSI_DH && EXPERIMENTAL
+ tristate "SPC-3 ALUA Device Handler"
+ depends on SCSI_DH
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 207352cc70cc..4b0dd8c56707 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -68,8 +68,8 @@ static struct scsi_host_template dmx3191d_driver_template = {
.use_clustering = DISABLE_CLUSTERING,
};
-static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int dmx3191d_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct Scsi_Host *shost;
unsigned long io;
@@ -123,7 +123,7 @@ static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
return error;
}
-static void __devexit dmx3191d_remove_one(struct pci_dev *pdev)
+static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -150,7 +150,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.name = DMX3191D_DRIVER_NAME,
.id_table = dmx3191d_pci_tbl,
.probe = dmx3191d_probe_one,
- .remove = __devexit_p(dmx3191d_remove_one),
+ .remove = dmx3191d_remove_one,
};
static int __init dmx3191d_init(void)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 2ebe03a4b51d..4a909d7cfde1 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2144,7 +2144,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
*/
port_id = fip->port_id;
if (fip->probe_tries)
- port_id = prandom32(&fip->rnd_state) & 0xffff;
+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
else if (!port_id)
port_id = fip->lp->wwpn & 0xffff;
if (!port_id || port_id == 0xffff)
@@ -2169,7 +2169,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
{
fip->probe_tries = 0;
- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
fcoe_ctlr_vn_restart(fip);
}
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 1a2a1e5824e3..fff682976c56 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1771,7 +1771,7 @@ struct scsi_host_template fdomain_driver_template = {
#ifndef PCMCIA
#ifdef CONFIG_PCI
-static struct pci_device_id fdomain_pci_tbl[] __devinitdata = {
+static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fc98eb61e760..fbf3ac6e0c55 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -399,8 +399,7 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
return fnic->data_src_addr;
}
-static int __devinit fnic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
struct fc_lport *lp;
@@ -774,7 +773,7 @@ err_out:
return err;
}
-static void __devexit fnic_remove(struct pci_dev *pdev)
+static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
struct fc_lport *lp = fnic->lport;
@@ -849,7 +848,7 @@ static struct pci_driver fnic_driver = {
.name = DRV_NAME,
.id_table = fnic_id_table,
.probe = fnic_probe,
- .remove = __devexit_p(fnic_remove),
+ .remove = fnic_remove,
};
static int __init fnic_init_module(void)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 1a5954f0915a..5041f925c191 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -939,7 +939,7 @@ module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 5d72274c507f..599790e41a98 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -590,7 +590,7 @@ static struct pci_driver gdth_pci_driver = {
.remove = gdth_pci_remove_one,
};
-static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
+static void gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
@@ -602,8 +602,8 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int gdth_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
u16 vendor = pdev->vendor;
u16 device = pdev->device;
@@ -855,8 +855,8 @@ static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
-static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
- gdth_ha_str *ha)
+static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
+ gdth_ha_str *ha)
{
register gdt6_dpram_str __iomem *dp6_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
@@ -1239,7 +1239,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
/* controller protocol functions */
-static void __devinit gdth_enable_int(gdth_ha_str *ha)
+static void gdth_enable_int(gdth_ha_str *ha)
{
unsigned long flags;
gdt2_dpram_str __iomem *dp2_ptr;
@@ -1555,7 +1555,7 @@ static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
/* search for devices */
-static int __devinit gdth_search_drives(gdth_ha_str *ha)
+static int gdth_search_drives(gdth_ha_str *ha)
{
u16 cdev_cnt, i;
int ok;
@@ -4959,8 +4959,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
#endif /* CONFIG_EISA */
#ifdef CONFIG_PCI
-static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
- gdth_ha_str **ha_out)
+static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
{
struct Scsi_Host *shp;
gdth_ha_str *ha;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 488fbc648656..dbe4cc6b9f8b 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -204,7 +204,7 @@ static struct scsi_host_template gvp11_scsi_template = {
.use_clustering = DISABLE_CLUSTERING
};
-static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
+static int check_wd33c93(struct gvp11_scsiregs *regs)
{
#ifdef CHECK_WD33C93
volatile unsigned char *sasr_3393, *scmd_3393;
@@ -284,8 +284,7 @@ static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
return 0;
}
-static int __devinit gvp11_probe(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
{
struct Scsi_Host *instance;
unsigned long address;
@@ -380,7 +379,7 @@ fail_check_or_alloc:
return error;
}
-static void __devexit gvp11_remove(struct zorro_dev *z)
+static void gvp11_remove(struct zorro_dev *z)
{
struct Scsi_Host *instance = zorro_get_drvdata(z);
struct gvp11_hostdata *hdata = shost_priv(instance);
@@ -398,7 +397,7 @@ static void __devexit gvp11_remove(struct zorro_dev *z)
* SERIES I though).
*/
-static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id gvp11_zorro_tbl[] = {
{ ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
{ ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
{ ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
@@ -414,7 +413,7 @@ static struct zorro_driver gvp11_driver = {
.name = "gvp11",
.id_table = gvp11_zorro_tbl,
.probe = gvp11_probe,
- .remove = __devexit_p(gvp11_remove),
+ .remove = gvp11_remove,
};
static int __init gvp11_init(void)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 593085a52275..df0c3c71ea43 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -468,10 +468,10 @@ void scsi_unregister(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_unregister);
-static int __scsi_host_match(struct device *dev, void *data)
+static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
- unsigned short *hostnum = (unsigned short *)data;
+ const unsigned short *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4217e49aea46..4f338061b5c3 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -189,16 +189,16 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int *bucket_map);
-static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
-static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
-static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready);
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
@@ -3182,8 +3182,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
}
}
-static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
- unsigned char *scsi3addr, u8 reset_type)
+static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
struct CommandList *c;
@@ -3606,8 +3606,8 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
* in simple mode, not performant mode due to the tag lookup.
* We only ever use this immediately after a controller reset.
*/
-static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
- unsigned char type)
+static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
{
struct Command {
struct CommandListHeader CommandHeader;
@@ -3756,14 +3756,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
return 0;
}
-static __devinit void init_driver_version(char *driver_version, int len)
+static void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
}
-static __devinit int write_driver_ver_to_cfgtable(
- struct CfgTable __iomem *cfgtable)
+static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
@@ -3779,8 +3778,8 @@ static __devinit int write_driver_ver_to_cfgtable(
return 0;
}
-static __devinit void read_driver_ver_from_cfgtable(
- struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
+static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
+ unsigned char *driver_ver)
{
int i;
@@ -3788,8 +3787,7 @@ static __devinit void read_driver_ver_from_cfgtable(
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
-static __devinit int controller_reset_failed(
- struct CfgTable __iomem *cfgtable)
+static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
@@ -3812,7 +3810,7 @@ static __devinit int controller_reset_failed(
/* This does a hard reset of the controller using PCI power management
* states or the using the doorbell register.
*/
-static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4029,7 +4027,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
+static void hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
int err, i;
@@ -4077,7 +4075,7 @@ default_int_mode:
h->intr[h->intr_mode] = h->pdev->irq;
}
-static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
@@ -4101,8 +4099,8 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
}
-static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
{
int i;
@@ -4118,8 +4116,8 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready)
{
int i, iterations;
u32 scratchpad;
@@ -4143,9 +4141,9 @@ static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
@@ -4158,7 +4156,7 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
return 0;
}
-static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4187,7 +4185,7 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
return 0;
}
-static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
+static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
@@ -4208,7 +4206,7 @@ static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
-static void __devinit hpsa_find_board_params(struct ctlr_info *h)
+static void hpsa_find_board_params(struct ctlr_info *h)
{
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
@@ -4266,7 +4264,7 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
-static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
@@ -4287,7 +4285,7 @@ static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
}
}
-static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
+static int hpsa_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
@@ -4310,7 +4308,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
return 0;
}
-static int __devinit hpsa_pci_init(struct ctlr_info *h)
+static int hpsa_pci_init(struct ctlr_info *h)
{
int prod_index, err;
@@ -4378,7 +4376,7 @@ err_out_free_res:
return err;
}
-static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+static void hpsa_hba_inquiry(struct ctlr_info *h)
{
int rc;
@@ -4394,7 +4392,7 @@ static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
}
}
-static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
+static int hpsa_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
@@ -4426,7 +4424,7 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0;
}
-static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
+static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
{
h->cmd_pool_bits = kzalloc(
DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
@@ -4499,7 +4497,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
return 0;
}
-static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
+static int hpsa_kdump_soft_reset(struct ctlr_info *h)
{
if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
HPSA_RESET_TYPE_CONTROLLER)) {
@@ -4713,8 +4711,7 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
spin_unlock_irqrestore(&lockup_detector_lock, flags);
}
-static int __devinit hpsa_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int dac, rc;
struct ctlr_info *h;
@@ -4910,7 +4907,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
hpsa_free_irqs_and_disable_msix(h);
}
-static void __devexit hpsa_free_device_info(struct ctlr_info *h)
+static void hpsa_free_device_info(struct ctlr_info *h)
{
int i;
@@ -4918,7 +4915,7 @@ static void __devexit hpsa_free_device_info(struct ctlr_info *h)
kfree(h->dev[i]);
}
-static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+static void hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
@@ -4966,7 +4963,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
- .remove = __devexit_p(hpsa_remove_one),
+ .remove = hpsa_remove_one,
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
.suspend = hpsa_suspend,
@@ -5010,8 +5007,7 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
- u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -5079,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
h->transMethod = CFGTBL_Trans_Performant;
}
-static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
int i;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 192724ed7a32..ee196b363d81 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.6 (091225)";
+static const char driver_ver[] = "v1.8";
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -77,6 +77,11 @@ static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
}
+static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
@@ -230,6 +235,74 @@ static int iop_intr_mv(struct hptiop_hba *hba)
return ret;
}
+static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
+{
+ u32 req_type = _tag & 0xf;
+ struct hpt_iop_request_scsi_command *req;
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = IOP_RESULT_SUCCESS;
+ hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mvfrey(struct hptiop_hba *hba)
+{
+ u32 _tag, status, cptr, cur_rptr;
+ int ret = 0;
+
+ if (hba->initialized)
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
+ if (status & CPU_TO_F0_DRBL_MSG_BIT) {
+ u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ }
+ ret = 1;
+ }
+
+ status = readl(&(hba->u.mvfrey.mu->isr_cause));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->isr_cause));
+ do {
+ cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+ cur_rptr = hba->u.mvfrey.outlist_rptr;
+ while (cur_rptr != cptr) {
+ cur_rptr++;
+ if (cur_rptr == hba->u.mvfrey.list_count)
+ cur_rptr = 0;
+
+ _tag = hba->u.mvfrey.outlist[cur_rptr].val;
+ BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
+ hptiop_request_callback_mvfrey(hba, _tag);
+ ret = 1;
+ }
+ hba->u.mvfrey.outlist_rptr = cur_rptr;
+ } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+ }
+
+ if (hba->initialized)
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ return ret;
+}
+
static int iop_send_sync_request_itl(struct hptiop_hba *hba,
void __iomem *_req, u32 millisec)
{
@@ -272,6 +345,26 @@ static int iop_send_sync_request_mv(struct hptiop_hba *hba,
return -1;
}
+static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
+{
+ struct hpt_iop_request_header *reqhdr =
+ hba->u.mvfrey.internal_req.req_virt;
+ u32 i;
+
+ hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mvfrey(hba);
+ if (hba->msg_done)
+ break;
+ msleep(1);
+ }
+ return hba->msg_done ? 0 : -1;
+}
+
static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
{
writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
@@ -285,11 +378,18 @@ static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
readl(&hba->u.mv.regs->inbound_doorbell);
}
+static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+ readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+}
+
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
{
u32 i;
hba->msg_done = 0;
+ hba->ops->disable_intr(hba);
hba->ops->post_msg(hba, msg);
for (i = 0; i < millisec; i++) {
@@ -301,6 +401,7 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
msleep(1);
}
+ hba->ops->enable_intr(hba);
return hba->msg_done? 0 : -1;
}
@@ -354,6 +455,28 @@ static int iop_get_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_get_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+ if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+ info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
+ return -1;
+
+ config->interface_version = info->interface_version;
+ config->firmware_version = info->firmware_version;
+ config->max_requests = info->max_requests;
+ config->request_size = info->request_size;
+ config->max_sg_count = info->max_sg_count;
+ config->data_transfer_length = info->data_transfer_length;
+ config->alignment_mask = info->alignment_mask;
+ config->max_devices = info->max_devices;
+ config->sdram_size = info->sdram_size;
+
+ return 0;
+}
+
static int iop_set_config_itl(struct hptiop_hba *hba,
struct hpt_iop_request_set_config *config)
{
@@ -408,6 +531,29 @@ static int iop_set_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_set_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req =
+ hba->u.mvfrey.internal_req.req_virt;
+
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
{
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
@@ -420,6 +566,13 @@ static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static int hptiop_initialize_iop(struct hptiop_hba *hba)
{
/* enable interrupts */
@@ -502,17 +655,39 @@ static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
return 0;
}
+static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mvfrey.config == NULL)
+ return -1;
+
+ hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mvfrey.mu == NULL) {
+ iounmap(hba->u.mvfrey.config);
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
{
iounmap(hba->u.mv.regs);
iounmap(hba->u.mv.mu);
}
+static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mvfrey.config);
+ iounmap(hba->u.mvfrey.mu);
+}
+
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
{
dprintk("iop message 0x%x\n", msg);
- if (msg == IOPMU_INBOUND_MSG0_NOP)
+ if (msg == IOPMU_INBOUND_MSG0_NOP ||
+ msg == IOPMU_INBOUND_MSG0_RESET_COMM)
hba->msg_done = 1;
if (!hba->initialized)
@@ -592,6 +767,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
memcpy(scp->sense_buffer, &req->sg_list,
min_t(size_t, SCSI_SENSE_BUFFERSIZE,
le32_to_cpu(req->dataxfer_length)));
+ goto skip_resid;
break;
default:
@@ -599,6 +775,10 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
break;
}
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+
+skip_resid:
dprintk("scsi_done(%p)\n", scp);
scp->scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
@@ -692,7 +872,8 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
- psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
+ hba->ops->host_phy_flag;
psg[idx].size = cpu_to_le32(sg_dma_len(sg));
psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
cpu_to_le32(1) : 0;
@@ -751,6 +932,78 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba,
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
}
+static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 index;
+
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
+ IOP_REQUEST_FLAG_ADDR_BITS |
+ ((_req->req_shifted_phy >> 11) & 0xffff0000));
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (_req->index << 4) | reqhdr->type);
+ reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
+ 0xffffffff);
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr =
+ (dma_addr_t)_req->req_shifted_phy << 5;
+ hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+ writel(hba->u.mvfrey.inlist_wptr,
+ &(hba->u.mvfrey.mu->inbound_write_ptr));
+ readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
+}
+
+static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = hba->u.mvfrey.list_count;
+
+ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+ return -1;
+
+ /* wait 100ms for MCU ready */
+ msleep(100);
+
+ writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->inbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->inbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_shadow_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_shadow_base_high));
+
+ hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ hba->u.mvfrey.outlist_rptr = list_count - 1;
+ return 0;
+}
+
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
@@ -771,14 +1024,15 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
_req->scp = scp;
- dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
+ dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) "
"req_index=%d, req=%p\n",
scp,
host->host_no, scp->device->channel,
scp->device->id, scp->device->lun,
- ((u32 *)scp->cmnd)[0],
- ((u32 *)scp->cmnd)[1],
- ((u32 *)scp->cmnd)[2],
+ cpu_to_be32(((u32 *)scp->cmnd)[0]),
+ cpu_to_be32(((u32 *)scp->cmnd)[1]),
+ cpu_to_be32(((u32 *)scp->cmnd)[2]),
+ cpu_to_be32(((u32 *)scp->cmnd)[3]),
_req->index, _req->req_virt);
scp->result = 0;
@@ -933,6 +1187,11 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = hptiop_adjust_disk_queue_depth,
};
+static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
{
hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
@@ -943,6 +1202,63 @@ static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
return -1;
}
+static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
+ char *p;
+ dma_addr_t phy;
+
+ BUG_ON(hba->max_request_size == 0);
+
+ if (list_count == 0) {
+ BUG_ON(1);
+ return -1;
+ }
+
+ list_count >>= 16;
+
+ hba->u.mvfrey.list_count = list_count;
+ hba->u.mvfrey.internal_mem_size = 0x800 +
+ list_count * sizeof(struct mvfrey_inlist_entry) +
+ list_count * sizeof(struct mvfrey_outlist_entry) +
+ sizeof(int);
+
+ p = dma_alloc_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ hba->u.mvfrey.internal_req.req_virt = p;
+ hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
+ hba->u.mvfrey.internal_req.scp = NULL;
+ hba->u.mvfrey.internal_req.next = NULL;
+
+ p += 0x800;
+ phy += 0x800;
+
+ hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
+ hba->u.mvfrey.inlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_inlist_entry);
+ phy += list_count * sizeof(struct mvfrey_inlist_entry);
+
+ hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
+ hba->u.mvfrey.outlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_outlist_entry);
+ phy += list_count * sizeof(struct mvfrey_outlist_entry);
+
+ hba->u.mvfrey.outlist_cptr = (__le32 *)p;
+ hba->u.mvfrey.outlist_cptr_phy = phy;
+
+ return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
{
if (hba->u.mv.internal_req) {
@@ -953,8 +1269,20 @@ static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
return -1;
}
-static int __devinit hptiop_probe(struct pci_dev *pcidev,
- const struct pci_device_id *id)
+static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
+{
+ if (hba->u.mvfrey.internal_req.req_virt) {
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size,
+ hba->u.mvfrey.internal_req.req_virt,
+ (dma_addr_t)
+ hba->u.mvfrey.internal_req.req_shifted_phy << 5);
+ return 0;
+ } else
+ return -1;
+}
+
+static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
{
struct Scsi_Host *host = NULL;
struct hptiop_hba *hba;
@@ -1027,7 +1355,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto unmap_pci_bar;
}
- if (hba->ops->internal_memalloc) {
+ if (hba->ops->family == MV_BASED_IOP) {
if (hba->ops->internal_memalloc(hba)) {
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
hba->host->host_no);
@@ -1050,6 +1378,19 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->interface_version = le32_to_cpu(iop_config.interface_version);
hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
+ if (hba->ops->family == MVFREY_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ if (hba->ops->reset_comm(hba)) {
+ printk(KERN_ERR "scsi%d: reset comm failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
if (hba->firmware_version > 0x01020000 ||
hba->interface_version > 0x01020000)
hba->iopintf_v2 = 1;
@@ -1104,14 +1445,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->dma_coherent = start_virt;
hba->dma_coherent_handle = start_phy;
- if ((start_phy & 0x1f) != 0)
- {
+ if ((start_phy & 0x1f) != 0) {
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
start_phy += offset;
start_virt += offset;
}
- hba->req_list = start_virt;
+ hba->req_list = NULL;
for (i = 0; i < hba->max_requests; i++) {
hba->reqs[i].next = NULL;
hba->reqs[i].req_virt = start_virt;
@@ -1132,7 +1472,6 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto free_request_mem;
}
-
scsi_scan_host(host);
dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
@@ -1147,8 +1486,7 @@ free_request_irq:
free_irq(hba->pcidev->irq, hba);
unmap_pci_bar:
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1198,6 +1536,16 @@ static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
readl(&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0, &(hba->u.mvfrey.mu->isr_enable));
+ readl(&(hba->u.mvfrey.mu->isr_enable));
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+ readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static void hptiop_remove(struct pci_dev *pcidev)
{
struct Scsi_Host *host = pci_get_drvdata(pcidev);
@@ -1216,8 +1564,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
hba->dma_coherent,
hba->dma_coherent_handle);
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1229,9 +1576,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
}
static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .family = INTEL_BASED_IOP,
.iop_wait_ready = iop_wait_ready_itl,
- .internal_memalloc = NULL,
- .internal_memfree = NULL,
+ .internal_memalloc = hptiop_internal_memalloc_itl,
+ .internal_memfree = hptiop_internal_memfree_itl,
.map_pci_bar = hptiop_map_pci_bar_itl,
.unmap_pci_bar = hptiop_unmap_pci_bar_itl,
.enable_intr = hptiop_enable_intr_itl,
@@ -1242,9 +1590,12 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
.post_msg = hptiop_post_msg_itl,
.post_req = hptiop_post_req_itl,
.hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_itl,
+ .host_phy_flag = cpu_to_le64(0),
};
static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .family = MV_BASED_IOP,
.iop_wait_ready = iop_wait_ready_mv,
.internal_memalloc = hptiop_internal_memalloc_mv,
.internal_memfree = hptiop_internal_memfree_mv,
@@ -1258,6 +1609,27 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
.post_msg = hptiop_post_msg_mv,
.post_req = hptiop_post_req_mv,
.hw_dma_bit_mask = 33,
+ .reset_comm = hptiop_reset_comm_mv,
+ .host_phy_flag = cpu_to_le64(0),
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+ .family = MVFREY_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_mvfrey,
+ .internal_memalloc = hptiop_internal_memalloc_mvfrey,
+ .internal_memfree = hptiop_internal_memfree_mvfrey,
+ .map_pci_bar = hptiop_map_pci_bar_mvfrey,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
+ .enable_intr = hptiop_enable_intr_mvfrey,
+ .disable_intr = hptiop_disable_intr_mvfrey,
+ .get_config = iop_get_config_mvfrey,
+ .set_config = iop_set_config_mvfrey,
+ .iop_intr = iop_intr_mvfrey,
+ .post_msg = hptiop_post_msg_mvfrey,
+ .post_req = hptiop_post_req_mvfrey,
+ .hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_mvfrey,
+ .host_phy_flag = cpu_to_le64(1),
};
static struct pci_device_id hptiop_id_table[] = {
@@ -1283,6 +1655,8 @@ static struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
{},
};
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index baa648d87fde..020619d60b08 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,6 +75,45 @@ struct hpt_iopmv_regs {
__le32 outbound_intmask;
};
+#pragma pack(1)
+struct hpt_iopmu_mvfrey {
+ __le32 reserved0[(0x4000 - 0) / 4];
+ __le32 inbound_base;
+ __le32 inbound_base_high;
+ __le32 reserved1[(0x4018 - 0x4008) / 4];
+ __le32 inbound_write_ptr;
+ __le32 reserved2[(0x402c - 0x401c) / 4];
+ __le32 inbound_conf_ctl;
+ __le32 reserved3[(0x4050 - 0x4030) / 4];
+ __le32 outbound_base;
+ __le32 outbound_base_high;
+ __le32 outbound_shadow_base;
+ __le32 outbound_shadow_base_high;
+ __le32 reserved4[(0x4088 - 0x4060) / 4];
+ __le32 isr_cause;
+ __le32 isr_enable;
+ __le32 reserved5[(0x1020c - 0x4090) / 4];
+ __le32 pcie_f0_int_enable;
+ __le32 reserved6[(0x10400 - 0x10210) / 4];
+ __le32 f0_to_cpu_msg_a;
+ __le32 reserved7[(0x10420 - 0x10404) / 4];
+ __le32 cpu_to_f0_msg_a;
+ __le32 reserved8[(0x10480 - 0x10424) / 4];
+ __le32 f0_doorbell;
+ __le32 f0_doorbell_enable;
+};
+
+struct mvfrey_inlist_entry {
+ dma_addr_t addr;
+ __le32 intrfc_len;
+ __le32 reserved;
+};
+
+struct mvfrey_outlist_entry {
+ __le32 val;
+};
+#pragma pack()
+
#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
@@ -87,6 +126,9 @@ struct hpt_iopmv_regs {
#define MVIOP_MU_OUTBOUND_INT_MSG 1
#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+#define CL_POINTER_TOGGLE 0x00004000
+#define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
+
enum hpt_iopmu_message {
/* host-to-iop messages */
IOPMU_INBOUND_MSG0_NOP = 0,
@@ -95,6 +137,7 @@ enum hpt_iopmu_message {
IOPMU_INBOUND_MSG0_SHUTDOWN,
IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_RESET_COMM,
IOPMU_INBOUND_MSG0_MAX = 0xff,
/* iop-to-host messages */
IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
@@ -118,6 +161,7 @@ struct hpt_iop_request_header {
#define IOP_REQUEST_FLAG_BIST_REQUEST 2
#define IOP_REQUEST_FLAG_REMAPPED 4
#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
+#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
enum hpt_iop_request_type {
IOP_REQUEST_TYPE_GET_CONFIG = 0,
@@ -223,6 +267,13 @@ struct hpt_scsi_pointer {
#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+enum hptiop_family {
+ UNKNOWN_BASED_IOP,
+ INTEL_BASED_IOP,
+ MV_BASED_IOP,
+ MVFREY_BASED_IOP
+} ;
+
struct hptiop_hba {
struct hptiop_adapter_ops *ops;
union {
@@ -236,6 +287,22 @@ struct hptiop_hba {
void *internal_req;
dma_addr_t internal_req_phy;
} mv;
+ struct {
+ struct hpt_iop_request_get_config __iomem *config;
+ struct hpt_iopmu_mvfrey __iomem *mu;
+
+ int internal_mem_size;
+ struct hptiop_request internal_req;
+ int list_count;
+ struct mvfrey_inlist_entry *inlist;
+ dma_addr_t inlist_phy;
+ __le32 inlist_wptr;
+ struct mvfrey_outlist_entry *outlist;
+ dma_addr_t outlist_phy;
+ __le32 *outlist_cptr; /* copy pointer shadow */
+ dma_addr_t outlist_cptr_phy;
+ __le32 outlist_rptr;
+ } mvfrey;
} u;
struct Scsi_Host *host;
@@ -283,6 +350,7 @@ struct hpt_ioctl_k {
};
struct hptiop_adapter_ops {
+ enum hptiop_family family;
int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
int (*internal_memalloc)(struct hptiop_hba *hba);
int (*internal_memfree)(struct hptiop_hba *hba);
@@ -298,6 +366,8 @@ struct hptiop_adapter_ops {
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
int hw_dma_bit_mask;
+ int (*reset_comm)(struct hptiop_hba *hba);
+ __le64 host_phy_flag;
};
#define HPT_IOCTL_RESULT_OK 0
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 5e8d51bd03de..cc82d0f322b6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4905,7 +4905,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
-static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+static struct vio_device_id ibmvfc_device_table[] = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ef9a54c7da67..a044f593e8b9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2362,7 +2362,7 @@ static int ibmvscsi_resume(struct device *dev)
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
* support.
*/
-static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
+static struct vio_device_id ibmvscsi_device_table[] = {
{"vscsi", "IBM,v-scsi"},
{ "", "" }
};
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index aa7ed81e9237..bf9eca845166 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -907,7 +907,7 @@ static int ibmvstgt_remove(struct vio_dev *dev)
return 0;
}
-static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
+static struct vio_device_id ibmvstgt_device_table[] = {
{"v-scsi-host", "IBM,v-scsi-host"},
{"",""}
};
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index dd741bcd6ccd..280d5af113d1 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2992,7 +2992,7 @@ static struct pci_driver initio_pci_driver = {
.name = "initio",
.id_table = initio_pci_tbl,
.probe = initio_probe_one,
- .remove = __devexit_p(initio_remove_one),
+ .remove = initio_remove_one,
};
static int __init initio_init_driver(void)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fe6029f4df16..1d7da3f41ebb 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8296,7 +8296,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
* Return value:
* 0 on success / -EIO on failure
**/
-static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
{
int rc = 0;
unsigned long host_lock_flags = 0;
@@ -8425,7 +8425,7 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* 0 on success / -ENOMEM on allocation failure
**/
-static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
@@ -8497,7 +8497,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* 0 on success / non-zero for error
**/
-static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
int i, rc = -ENOMEM;
@@ -8601,7 +8601,7 @@ out_free_res_entries:
* Return value:
* none
**/
-static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
@@ -8625,8 +8625,8 @@ static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* none
**/
-static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
- struct Scsi_Host *host, struct pci_dev *pdev)
+static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+ struct Scsi_Host *host, struct pci_dev *pdev)
{
const struct ipr_interrupt_offsets *p;
struct ipr_interrupts *t;
@@ -8712,7 +8712,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* ptr to chip information on success / NULL on failure
**/
-static const struct ipr_chip_t * __devinit
+static const struct ipr_chip_t *
ipr_get_chip_info(const struct pci_device_id *dev_id)
{
int i;
@@ -8734,7 +8734,7 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
* Return value:
* 0 on success / non-zero on failure
**/
-static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
+static irqreturn_t ipr_test_intr(int irq, void *devp)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
unsigned long lock_flags = 0;
@@ -8761,8 +8761,7 @@ static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
- struct pci_dev *pdev)
+static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
{
int rc;
volatile u32 int_reg;
@@ -8815,8 +8814,8 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
+static int ipr_probe_ioa(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
struct Scsi_Host *host;
@@ -9113,7 +9112,7 @@ static void __ipr_remove(struct pci_dev *pdev)
* Return value:
* none
**/
-static void __devexit ipr_remove(struct pci_dev *pdev)
+static void ipr_remove(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
@@ -9136,8 +9135,7 @@ static void __devexit ipr_remove(struct pci_dev *pdev)
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
+static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
int rc;
@@ -9218,7 +9216,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
}
-static struct pci_device_id ipr_pci_table[] __devinitdata = {
+static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
@@ -9305,7 +9303,7 @@ static struct pci_driver ipr_driver = {
.name = IPR_NAME,
.id_table = ipr_pci_table,
.probe = ipr_probe,
- .remove = __devexit_p(ipr_remove),
+ .remove = ipr_remove,
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
};
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index b6d7a5c2fc94..9aa86a315a08 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -389,14 +389,14 @@ MODULE_DEVICE_TABLE( pci, ips_pci_table );
static char ips_hot_plug_name[] = "ips";
-static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
-static void __devexit ips_remove_device(struct pci_dev *pci_dev);
+static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
+static void ips_remove_device(struct pci_dev *pci_dev);
static struct pci_driver ips_pci_driver = {
.name = ips_hot_plug_name,
.id_table = ips_pci_table,
.probe = ips_insert_device,
- .remove = __devexit_p(ips_remove_device),
+ .remove = ips_remove_device,
};
@@ -6837,7 +6837,7 @@ err_out_sh:
/* Routine Description: */
/* Remove one Adapter ( Hot Plugging ) */
/*---------------------------------------------------------------------------*/
-static void __devexit
+static void
ips_remove_device(struct pci_dev *pci_dev)
{
struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
@@ -6898,7 +6898,7 @@ module_exit(ips_module_exit);
/* Return Value: */
/* 0 if Successful, else non-zero */
/*---------------------------------------------------------------------------*/
-static int __devinit
+static int
ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
{
int index = -1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index b74050b95d6a..2839baa82a5a 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -282,7 +282,7 @@ static void isci_unregister(struct isci_host *isci_host)
scsi_host_put(shost);
}
-static int __devinit isci_pci_init(struct pci_dev *pdev)
+static int isci_pci_init(struct pci_dev *pdev)
{
int err, bar_num, bar_mask = 0;
void __iomem * const *iomap;
@@ -616,7 +616,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
}
-static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct isci_pci_info *pci_info;
int err, i;
@@ -633,7 +633,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
return -ENOMEM;
pci_set_drvdata(pdev, pci_info);
- if (efi_enabled)
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
orom = isci_get_efi_var(pdev);
if (!orom)
@@ -709,7 +709,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
return err;
}
-static void __devexit isci_pci_remove(struct pci_dev *pdev)
+static void isci_pci_remove(struct pci_dev *pdev)
{
struct isci_host *ihost;
int i;
@@ -778,7 +778,7 @@ static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
- .remove = __devexit_p(isci_pci_remove),
+ .remove = isci_pci_remove,
#ifdef CONFIG_PM
.driver.pm = &isci_pm_ops,
#endif
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 27cfb0cb186c..69efbf12b299 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -129,7 +129,7 @@ static const struct esp_driver_ops jazz_esp_ops = {
.dma_error = jazz_esp_dma_error,
};
-static int __devinit esp_jazz_probe(struct platform_device *dev)
+static int esp_jazz_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -201,7 +201,7 @@ fail:
return err;
}
-static int __devexit esp_jazz_remove(struct platform_device *dev)
+static int esp_jazz_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -223,7 +223,7 @@ MODULE_ALIAS("platform:jazz_esp");
static struct platform_driver esp_jazz_driver = {
.probe = esp_jazz_probe,
- .remove = __devexit_p(esp_jazz_remove),
+ .remove = esp_jazz_remove,
.driver = {
.name = "jazz_esp",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 23880f8fe7e4..5c4ded997265 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -168,7 +168,7 @@ static struct parisc_driver lasi700_driver = {
.name = "lasi_scsi",
.id_table = lasi700_ids,
.probe = lasi700_probe,
- .remove = __devexit_p(lasi700_driver_remove),
+ .remove = lasi700_driver_remove,
};
static int __init
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 69b59935b53f..df4c13a5534c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -689,6 +689,7 @@ struct lpfc_hba {
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
uint32_t cfg_fcp_io_sched;
+ uint32_t cfg_fcp2_no_tgt_reset;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -714,6 +715,7 @@ struct lpfc_hba {
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
+ uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ad16e54ac383..a364cae9e984 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3618,6 +3618,77 @@ static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
/**
+ * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_request_firmware_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val != 1)
+ return -EINVAL;
+
+ rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
+ if (rc)
+ rc = -EPERM;
+ else
+ rc = strlen(buf);
+ return rc;
+}
+
+static int lpfc_req_fw_upgrade;
+module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
+lpfc_param_show(request_firmware_upgrade)
+
+/**
+ * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
+ * @phba: lpfc_hba pointer.
+ * @val: 0 or 1.
+ *
+ * Description:
+ * Set the initial Linux generic firmware upgrade enable or disable flag.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= 1) {
+ phba->cfg_request_firmware_upgrade = val;
+ return 0;
+ }
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
+ lpfc_request_firmware_upgrade_show,
+ lpfc_request_firmware_upgrade_store);
+
+/**
* lpfc_fcp_imax_store
*
* @dev: class device that is converted into a Scsi_host.
@@ -3788,6 +3859,16 @@ LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
+# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
+# range is [0,1]. Default value is 0.
+# For [0], bus reset issues target reset to ALL devices
+# For [1], bus reset issues target reset to non-FCP2 devices
+*/
+LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
+ "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
+
+
+/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -4029,6 +4110,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
+ &dev_attr_lpfc_fcp2_no_tgt_reset,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
@@ -4069,6 +4151,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_sriov_nr_virtfn,
+ &dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -5019,6 +5102,7 @@ void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+ lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -5051,6 +5135,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+ lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4380a44000bc..69d66e3662cb 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -468,3 +468,4 @@ void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
+int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7ffabb7e3afa..65f9fb6862e6 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -634,7 +634,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
+ (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f19e9b6f9f13..b9440deaad45 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1182,8 +1182,6 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cmn.w2.r_a_tov = 0;
sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
- sp->cls2.seqDelivery = 1;
- sp->cls3.seqDelivery = 1;
if (sp->cmn.fcphLow < FC_PH3)
sp->cmn.fcphLow = FC_PH3;
if (sp->cmn.fcphHigh < FC_PH3)
@@ -1198,7 +1196,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
}
+ /* Can't do SLI4 class2 without support sequence coalescing */
+ sp->cls2.classValid = 0;
+ sp->cls2.seqDelivery = 0;
} else {
+ /* Historical, setting sequential-delivery bit for SLI3 */
+ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
+ sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2cdeb5434fb7..a47cfbdd05f2 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3219,6 +3219,9 @@ struct wqe_common {
#define wqe_dif_SHIFT 0
#define wqe_dif_MASK 0x00000003
#define wqe_dif_WORD word7
+#define LPFC_WQE_DIF_PASSTHRU 1
+#define LPFC_WQE_DIF_STRIP 2
+#define LPFC_WQE_DIF_INSERT 3
#define wqe_ct_SHIFT 2
#define wqe_ct_MASK 0x00000003
#define wqe_ct_WORD word7
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dc4218d9c4c..89ad55807012 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3854,7 +3854,7 @@ static void
lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
{
char port_name;
- char message[80];
+ char message[128];
uint8_t status;
struct lpfc_acqe_misconfigured_event *misconfigured;
@@ -8813,7 +8813,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct lpfc_hba *phba;
@@ -8980,7 +8980,7 @@ out_free_phba:
* removed from PCI bus, it performs all the necessary cleanup for the HBA
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one_s3(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9450,7 +9450,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
- /* It can be null, sanity check */
+ /* It can be null in no-wait mode, sanity check */
if (!fw) {
rc = -ENXIO;
goto out;
@@ -9528,11 +9528,48 @@ release_out:
release_firmware(fw);
out:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.", rc);
+ "3024 Firmware update done: %d.\n", rc);
return;
}
/**
+ * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to perform Linux generic firmware upgrade on device
+ * that supports such feature.
+ **/
+int
+lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+{
+ uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ int ret;
+ const struct firmware *fw;
+
+ /* Only supported on SLI4 interface type 2 for now */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -EPERM;
+
+ snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+
+ if (fw_upgrade == INT_FW_UPGRADE) {
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ file_name, &phba->pcidev->dev,
+ GFP_KERNEL, (void *)phba,
+ lpfc_write_firmware);
+ } else if (fw_upgrade == RUN_FW_UPGRADE) {
+ ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!ret)
+ lpfc_write_firmware(fw, (void *)phba);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -9550,7 +9587,7 @@ out:
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct lpfc_hba *phba;
@@ -9560,7 +9597,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
uint32_t cfg_mode, intr_mode;
int mcnt;
int adjusted_fcp_io_channel;
- uint8_t file_name[ELX_MODEL_NAME_SIZE];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -9703,16 +9739,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade (if_type 2 only) */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_2) {
- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
- phba->ModelName);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- file_name, &phba->pcidev->dev,
- GFP_KERNEL, (void *)phba,
- lpfc_write_firmware);
- }
+ /* check for firmware upgrade or downgrade */
+ if (phba->cfg_request_firmware_upgrade)
+ ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9750,7 +9779,7 @@ out_free_phba:
* removed from PCI bus, it performs all the necessary cleanup for the HBA
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one_s4(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -10176,7 +10205,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
int rc;
@@ -10204,7 +10233,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* remove routine, which will perform all the necessary cleanup for the
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -10546,7 +10575,7 @@ static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
- .remove = __devexit_p(lpfc_pci_remove_one),
+ .remove = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7f45ac9964a9..60e5a177644c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3227,6 +3227,21 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
}
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_STRIP:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_INSERT:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_READ_PASS:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ break;
+ }
+
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
@@ -3236,7 +3251,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
return 0;
err:
@@ -4914,6 +4928,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
+ if (vport->phba->cfg_fcp2_no_tgt_reset &&
+ (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+ continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
ndlp->nlp_sid == i &&
ndlp->rport) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d7f3313ef886..624eab370396 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8068,10 +8068,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
break;
case CMD_FCP_IREAD64_CR:
@@ -8091,10 +8087,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
@@ -8304,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
}
+ if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+ iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
+ LPFC_IO_DIF_INSERT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2f48d000a3b4..9d2e0c6fe334 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,7 +69,9 @@ struct lpfc_iocbq {
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
-#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
+#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
+#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
+#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index f44a06a4c6e7..44c427a45d66 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -82,6 +82,9 @@
#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+#define INT_FW_UPGRADE 0
+#define RUN_FW_UPGRADE 1
+
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0c2149189dda..ba596e854bbc 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.35"
+#define LPFC_DRIVER_VERSION "8.3.36"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 70eb1f79b1ba..994fc5caf036 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -481,7 +481,7 @@ static struct esp_driver_ops mac_esp_ops = {
.dma_error = mac_esp_dma_error,
};
-static int __devinit esp_mac_probe(struct platform_device *dev)
+static int esp_mac_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -591,7 +591,7 @@ fail:
return err;
}
-static int __devexit esp_mac_remove(struct platform_device *dev)
+static int esp_mac_remove(struct platform_device *dev)
{
struct mac_esp_priv *mep = platform_get_drvdata(dev);
struct esp *esp = mep->esp;
@@ -614,7 +614,7 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
static struct platform_driver esp_mac_driver = {
.probe = esp_mac_probe,
- .remove = __devexit_p(esp_mac_remove),
+ .remove = esp_mac_remove,
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 76ad72d32c3f..9504ec0ec682 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4522,7 +4522,7 @@ static struct scsi_host_template megaraid_template = {
.eh_host_reset_handler = megaraid_reset,
};
-static int __devinit
+static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -4914,7 +4914,7 @@ __megaraid_shutdown(adapter_t *adapter)
mdelay(1000);
}
-static void __devexit
+static void
megaraid_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -5008,7 +5008,7 @@ static struct pci_driver megaraid_pci_driver = {
.name = "megaraid_legacy",
.id_table = megaraid_pci_tbl,
.probe = megaraid_probe_one,
- .remove = __devexit_p(megaraid_remove_one),
+ .remove = megaraid_remove_one,
.shutdown = megaraid_shutdown,
};
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 54b1c5bb310f..e6a1e0b38a19 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -305,7 +305,7 @@ static struct pci_driver megaraid_pci_driver = {
.name = "megaraid",
.id_table = pci_id_table_g,
.probe = megaraid_probe_one,
- .remove = __devexit_p(megaraid_detach_one),
+ .remove = megaraid_detach_one,
.shutdown = megaraid_mbox_shutdown,
};
@@ -434,7 +434,7 @@ megaraid_exit(void)
* This routine should be called whenever a new adapter is detected by the
* PCI hotplug susbsystem.
*/
-static int __devinit
+static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
adapter_t *adapter;
@@ -735,7 +735,7 @@ megaraid_io_detach(adapter_t *adapter)
* - Allocate memory required for all the commands
* - Use internal library of FW routines, build up complete soft state
*/
-static int __devinit
+static int
megaraid_init_mbox(adapter_t *adapter)
{
struct pci_dev *pdev;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e4f2baacf1e1..66a0fec0437b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3972,8 +3972,8 @@ fail_set_dma_mask:
* @pdev: PCI device structure
* @id: PCI ids of supported hotplugged adapter
*/
-static int __devinit
-megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
int rval, pos, i, j;
struct Scsi_Host *host;
@@ -4525,7 +4525,7 @@ fail_ready_state:
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
*/
-static void __devexit megasas_detach_one(struct pci_dev *pdev)
+static void megasas_detach_one(struct pci_dev *pdev)
{
int i;
struct Scsi_Host *host;
@@ -5119,7 +5119,7 @@ static struct pci_driver megasas_pci_driver = {
.name = "megaraid_sas",
.id_table = megasas_pci_table,
.probe = megasas_probe_one,
- .remove = __devexit_p(megasas_detach_one),
+ .remove = megasas_detach_one,
.suspend = megasas_suspend,
.resume = megasas_resume,
.shutdown = megasas_shutdown,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index af4e6c451b1b..c6bdc9267229 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -7686,7 +7686,7 @@ _scsih_shutdown(struct pci_dev *pdev)
* Routine called when unloading the driver.
* Return nothing.
*/
-static void __devexit
+static void
_scsih_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8338,7 +8338,7 @@ static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
.id_table = scsih_pci_table,
.probe = _scsih_probe,
- .remove = __devexit_p(_scsih_remove),
+ .remove = _scsih_remove,
.shutdown = _scsih_shutdown,
.err_handler = &_scsih_err_handler,
#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
new file mode 100644
index 000000000000..81471bf415d8
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -0,0 +1,67 @@
+#
+# Kernel configuration file for the MPT3SAS
+#
+# This code is based on drivers/scsi/mpt3sas/Kconfig
+# Copyright (C) 2012 LSI Corporation
+# (mailto:DL-MPTFusionLinux@lsi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_MPT3SAS
+ tristate "LSI MPT Fusion SAS 3.0 Device Driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports PCI-Express SAS 12Gb/s Host Adapters.
+
+config SCSI_MPT3SAS_MAX_SGE
+ int "LSI MPT Fusion Max number of SG Entries (16 - 256)"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ default "128"
+ range 16 256
+ ---help---
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
+ can be 256. However, it may decreased down to 16. Decreasing this
+ parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT3SAS_LOGGING
+ bool "LSI MPT Fusion logging facility"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ ---help---
+ This turns on a logging facility.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
new file mode 100644
index 000000000000..4c1d2e7a1176
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -0,0 +1,8 @@
+# mpt3sas makefile
+obj-m += mpt3sas.o
+mpt3sas-y += mpt3sas_base.o \
+ mpt3sas_config.o \
+ mpt3sas_scsih.o \
+ mpt3sas_transport.o \
+ mpt3sas_ctl.o \
+ mpt3sas_trigger_diag.o
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
new file mode 100644
index 000000000000..03317ffea62c
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -0,0 +1,1164 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.26
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+
+/*major version for all MPI v2.x */
+#define MPI2_VERSION_MAJOR (0x02)
+
+/*minor version for MPI v2.0 compatible products */
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_VERSION_02_00 (0x0200)
+
+/*minor version for MPI v2.5 compatible products */
+#define MPI25_VERSION_MINOR (0x05)
+#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI25_VERSION_MINOR)
+#define MPI2_VERSION_02_05 (0x0205)
+
+/*Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x1A)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/*Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+ U32 Doorbell; /*0x00 */
+ U32 WriteSequence; /*0x04 */
+ U32 HostDiagnostic; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DiagRWData; /*0x10 */
+ U32 DiagRWAddressLow; /*0x14 */
+ U32 DiagRWAddressHigh; /*0x18 */
+ U32 Reserved2[5]; /*0x1C */
+ U32 HostInterruptStatus; /*0x30 */
+ U32 HostInterruptMask; /*0x34 */
+ U32 DCRData; /*0x38 */
+ U32 DCRAddress; /*0x3C */
+ U32 Reserved3[2]; /*0x40 */
+ U32 ReplyFreeHostIndex; /*0x48 */
+ U32 Reserved4[8]; /*0x4C */
+ U32 ReplyPostHostIndex; /*0x6C */
+ U32 Reserved5; /*0x70 */
+ U32 HCBSize; /*0x74 */
+ U32 HCBAddressLow; /*0x78 */
+ U32 HCBAddressHigh; /*0x7C */
+ U32 Reserved6[16]; /*0x80 */
+ U32 RequestDescriptorPostLow; /*0xC0 */
+ U32 RequestDescriptorPostHigh; /*0xC4 */
+ U32 Reserved7[14]; /*0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS,
+ *PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t,
+ *pMpi2SystemInterfaceRegs_t;
+
+/*
+ *Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/*IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/*System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+/*
+ *Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ *Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ *Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ *Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ *Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ *Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ *Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ *Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+
+/*
+ *Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ *Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+/*Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/*Request Descriptors */
+
+/*Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DescriptorTypeDependent; /*0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t,
+ *pMpi2DefaultRequestDescriptor_t;
+
+/*defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+/*High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ *pMpi2HighPriorityRequestDescriptor_t;
+
+/*SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DevHandle; /*0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t,
+ *pMpi2SCSIIORequestDescriptor_t;
+
+/*SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ *pMpi2SCSITargetRequestDescriptor_t;
+
+/*RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved; /*0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ *pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/*Fast Path SCSI IO Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi25FastPathSCSIIORequestDescriptor_t,
+ *pMpi25FastPathSCSIIORequestDescriptor_t;
+
+/*union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION,
+ *PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t,
+ *pMpi2RequestDescriptorUnion_t;
+
+/*Reply Descriptors */
+
+/*Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 DescriptorTypeDependent1; /*0x02 */
+ U32 DescriptorTypeDependent2; /*0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t,
+ *pMpi2DefaultReplyDescriptor_t;
+
+/*defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/*values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/*Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 ReplyFrameAddress; /*0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t,
+ *pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+/*SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 TaskTag; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ *pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/*TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U8 SequenceNumber; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ *pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/*Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U8 VP_ID; /*0x02 */
+ U8 Flags; /*0x03 */
+ U16 InitiatorDevHandle; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ *pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/*defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+/*RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 Reserved; /*0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/*Fast Path SCSI IO Success Reply Descriptor */
+typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
+ *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+
+/*union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION,
+ *PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t,
+ *pMpi2ReplyDescriptorsUnion_t;
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_FUNCTION_IOC_INIT (0x02)
+#define MPI2_FUNCTION_IOC_FACTS (0x03)
+#define MPI2_FUNCTION_CONFIG (0x04)
+#define MPI2_FUNCTION_PORT_FACTS (0x05)
+#define MPI2_FUNCTION_PORT_ENABLE (0x06)
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07)
+#define MPI2_FUNCTION_EVENT_ACK (0x08)
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09)
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B)
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C)
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D)
+#define MPI2_FUNCTION_FW_UPLOAD (0x12)
+#define MPI2_FUNCTION_RAID_ACTION (0x15)
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
+#define MPI2_FUNCTION_TOOLBOX (0x17)
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C)
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/*mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+*Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER {
+ U16 FunctionDependent1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, *pMPI2RequestHeader_t;
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY {
+ U16 FunctionDependent1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 FunctionDependent5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, *pMPI2DefaultReply_t;
+
+/*common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT {
+ U8 Dev; /*0x00 */
+ U8 Unit; /*0x01 */
+ U8 Minor; /*0x02 */
+ U8 Major; /*0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION {
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+/*LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32 {
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, *pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64 {
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, *pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION {
+ U32 FlagsLength;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION,
+ *PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t,
+ *pMpi2SGESimpleUnion_t;
+
+/****************************************************************************
+* MPI Chain Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, *pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, *pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION,
+ *PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t,
+ *pMpi2SGEChainUnion_t;
+
+/****************************************************************************
+* MPI Transaction Context Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32,
+ *PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t,
+ *pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64,
+ *PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t,
+ *pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION,
+ *PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t,
+ *pMpi2SGETransactionUnion_t;
+
+/****************************************************************************
+* MPI SGE union for IO SGL's - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t;
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION,
+ *PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t,
+ *pMpi2SGETransSimpleUnion_t;
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t;
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/*Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/*Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/*Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/*Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/*Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \
+ MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_SGE_SET_FLAGS(f))
+#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \
+ MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_SIMPLE32 {
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION {
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION,
+ *PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t,
+ *pMpi2IeeeSgeSimpleUnion_t;
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION,
+ *PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t,
+ *pMpi2IeeeSgeChainUnion_t;
+
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+typedef struct _MPI25_IEEE_SGE_CHAIN64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 NextChainOffset;
+ U8 Flags;
+} MPI25_IEEE_SGE_CHAIN64,
+ *PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t,
+ *pMpi25IeeeSgeChain64_t;
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_UNION {
+ union {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t;
+
+/****************************************************************************
+* IEEE SGE union for IO SGL's
+****************************************************************************/
+
+typedef union _MPI25_SGE_IO_UNION {
+ MPI2_IEEE_SGE_SIMPLE64 IeeeSimple;
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain;
+} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION,
+ Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t;
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/*Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/*Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \
+ >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \
+ MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \
+ MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_SET_FLAGS(f))
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t;
+
+typedef union _MPI2_SGE_IO_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t;
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/*values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/*values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
new file mode 100644
index 000000000000..d8b2c3eedb57
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,3323 @@
+/*
+ * Copyright (c) 2000-2011 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.22
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/*Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 PageLength; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion;
+
+/*Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 Reserved2; /*0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t,
+ *pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion,
+ *pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/*PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/*ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/*RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/*SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/*SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/*SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/*SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/*Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/*Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/*Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 ProxyVF_ID; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ U32 Reserved3; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+ U32 PageAddress; /*0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */
+} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t;
+
+/*values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/*Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 Reserved2; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, *pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/*MPI v2.0 SAS products */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+/*MPI v2.5 SAS products */
+#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
+#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097)
+#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090)
+#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091)
+#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
+#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
+
+
+
+
+/*Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 ChipName[16]; /*0x04 */
+ U8 ChipRevision[8]; /*0x14 */
+ U8 BoardName[16]; /*0x1C */
+ U8 BoardAssembly[16]; /*0x2C */
+ U8 BoardTracerNumber[16]; /*0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ *PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t,
+ *pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 VPD[256]; /*0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ *PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t,
+ *pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID {
+ U16 DeviceID; /*0x00 */
+ U8 PCIRevisionID; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t;
+
+
+/*Manufacturing Page 2 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ *PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t,
+ *pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ *PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t,
+ *pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS {
+ U8 PowerSaveFlags; /*0x00 */
+ U8 InternalOperationsSleepTime; /*0x01 */
+ U8 InternalOperationsRunTime; /*0x02 */
+ U8 HostIdleTime; /*0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t,
+ *pMpi2ManPage4PwrSaveSettings_t;
+
+/*defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Flags; /*0x08 */
+ U8 InquirySize; /*0x0C */
+ U8 Reserved2; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ U8 InquiryData[56]; /*0x10 */
+ U32 RAID0VolumeSettings; /*0x48 */
+ U32 RAID1EVolumeSettings; /*0x4C */
+ U32 RAID1VolumeSettings; /*0x50 */
+ U32 RAID10VolumeSettings; /*0x54 */
+ U32 Reserved4; /*0x58 */
+ U32 Reserved5; /*0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */
+ U8 MaxOCEDisks; /*0x64 */
+ U8 ResyncRate; /*0x65 */
+ U16 DataScrubDuration; /*0x66 */
+ U8 MaxHotSpares; /*0x68 */
+ U8 MaxPhysDisksPerVol; /*0x69 */
+ U8 MaxPhysDisks; /*0x6A */
+ U8 MaxVolumes; /*0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ *PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t,
+ *pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/*Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/*Manufacturing Page 5 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY {
+ U64 WWID; /*0x00 */
+ U64 DeviceName; /*0x08 */
+} MPI2_MANUFACTURING5_ENTRY,
+ *PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t,
+ *pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_MANUFACTURING5_ENTRY
+ Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ *PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t,
+ *pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/*Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ *PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t,
+ *pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
+ U32 Pinout; /*0x00 */
+ U8 Connector[16]; /*0x04 */
+ U8 Location; /*0x14 */
+ U8 ReceptacleID; /*0x15 */
+ U16 Slot; /*0x16 */
+ U32 Reserved2; /*0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO,
+ *PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t,
+ *pMpi2ManPage7ConnectorInfo_t;
+
+/*defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/*defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Flags; /*0x0C */
+ U8 EnclosureName[16]; /*0x10 */
+ U8 NumPhys; /*0x20 */
+ U8 Reserved3; /*0x21 */
+ U16 Reserved4; /*0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO
+ ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ *PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t,
+ *pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ *Generic structure to use for product-specific manufacturing pages
+ *(currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t,
+ *pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/*IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64 UniqueValue; /*0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/*IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/*IO Unit Page 1 Flags defines */
+#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
+#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/*IO Unit Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 GPIOCount; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16
+ GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/*defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/*IO Unit Page 5 */
+
+/*
+ *Upper layer code (drivers, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64
+ RaidAcceleratorBufferBaseAddress; /*0x04 */
+ U64
+ RaidAcceleratorBufferSize; /*0x0C */
+ U64
+ RaidAcceleratorControlBaseAddress; /*0x14 */
+ U8 RAControlSize; /*0x1C */
+ U8 NumDmaEngines; /*0x1D */
+ U8 RAMinControlSize; /*0x1E */
+ U8 RAMaxControlSize; /*0x1F */
+ U32 Reserved1; /*0x20 */
+ U32 Reserved2; /*0x24 */
+ U32 Reserved3; /*0x28 */
+ U32
+ DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/*IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 Flags; /*0x04 */
+ U8 RAHostControlSize; /*0x06 */
+ U8 Reserved0; /*0x07 */
+ U64
+ RaidAcceleratorHostControlBaseAddress; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+ U32 Reserved3; /*0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/*IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 CurrentPowerMode; /*0x04 */
+ U8 PreviousPowerMode; /*0x05 */
+ U8 PCIeWidth; /*0x06 */
+ U8 PCIeSpeed; /*0x07 */
+ U32 ProcessorState; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U16 IOCTemperature; /*0x10 */
+ U8
+ IOCTemperatureUnits; /*0x12 */
+ U8 IOCSpeed; /*0x13 */
+ U16 BoardTemperature; /*0x14 */
+ U8
+ BoardTemperatureUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
+
+/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
+#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
+#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40)
+#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80)
+#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01)
+#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04)
+#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05)
+#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06)
+
+
+/*defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/*defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/*defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/*defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001)
+
+/*obsolete names for the PowerManagementCapabilities bits (above) */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */
+
+
+/*defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/*defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/*defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+
+/*IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR,
+ Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t;
+
+/*defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 PollingInterval; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+ Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 Flags; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U16 Reserved3; /*0x06 */
+ U32 Reserved4; /*0x08 */
+ U32 Reserved5; /*0x0C */
+} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR,
+ Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t;
+
+/*defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 Reserved4; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+ Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_IOUNIT10_FUNCTION,
+ *PTR_MPI2_IOUNIT10_FUNCTION,
+ Mpi2IOUnit10Function_t,
+ *pMpi2IOUnit10Function_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumFunctions; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+ Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/*IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U16 VendorID; /*0x0C */
+ U16 DeviceID; /*0x0E */
+ U8 RevisionID; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ U32 ClassCode; /*0x14 */
+ U16 SubsystemVendorID; /*0x18 */
+ U16 SubsystemID; /*0x1A */
+} MPI2_CONFIG_PAGE_IOC_0,
+ *PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, *pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/*IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+ U32 CoalescingTimeout; /*0x08 */
+ U8 CoalescingDepth; /*0x0C */
+ U8 PCISlotNum; /*0x0D */
+ U8 PCIBusNum; /*0x0E */
+ U8 PCIDomainSegment; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_1,
+ *PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/*defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/*IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32
+ CapabilitiesFlags; /*0x04 */
+ U8 MaxDrivesRAID0; /*0x08 */
+ U8 MaxDrivesRAID1; /*0x09 */
+ U8
+ MaxDrivesRAID1E; /*0x0A */
+ U8
+ MaxDrivesRAID10; /*0x0B */
+ U8 MinDrivesRAID0; /*0x0C */
+ U8 MinDrivesRAID1; /*0x0D */
+ U8
+ MinDrivesRAID1E; /*0x0E */
+ U8
+ MinDrivesRAID10; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U8
+ MaxGlobalHotSpares; /*0x14 */
+ U8 MaxPhysDisks; /*0x15 */
+ U8 MaxVolumes; /*0x16 */
+ U8 MaxConfigs; /*0x17 */
+ U8 MaxOCEDisks; /*0x18 */
+ U8 Reserved2; /*0x19 */
+ U16 Reserved3; /*0x1A */
+ U32
+ SupportedStripeSizeMapRAID0; /*0x1C */
+ U32
+ SupportedStripeSizeMapRAID1E; /*0x20 */
+ U32
+ SupportedStripeSizeMapRAID10; /*0x24 */
+ U32 Reserved4; /*0x28 */
+ U32 Reserved5; /*0x2C */
+ U16
+ DefaultMetadataSize; /*0x30 */
+ U16 Reserved6; /*0x32 */
+ U16
+ MaxBadBlockTableEntries; /*0x34 */
+ U16 Reserved7; /*0x36 */
+ U32
+ IRNvsramVersion; /*0x38 */
+} MPI2_CONFIG_PAGE_IOC_6,
+ *PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, *pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/*defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/*IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32
+ EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */
+ U16 SASBroadcastPrimitiveMasks; /*0x18 */
+ U16 SASNotifyPrimitiveMasks; /*0x1A */
+ U32 Reserved3; /*0x1C */
+} MPI2_CONFIG_PAGE_IOC_7,
+ *PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, *pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/*IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumDevsPerEnclosure; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16 MaxPersistentEntries; /*0x08 */
+ U16 MaxNumPhysicalMappedIDs; /*0x0A */
+ U16 Flags; /*0x0C */
+ U16 Reserved3; /*0x0E */
+ U16 IRVolumeMappingFlags; /*0x10 */
+ U16 Reserved4; /*0x12 */
+ U32 Reserved5; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_8,
+ *PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, *pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/*defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/*defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/*BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 BiosOptions; /*0x04 */
+ U32 IOCSettings; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DeviceSettings; /*0x10 */
+ U16 NumberOfDevices; /*0x14 */
+ U16 UEFIVersion; /*0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /*0x18 */
+ U16 IOTimeoutSequential; /*0x1A */
+ U16 IOTimeoutOther; /*0x1C */
+ U16 IOTimeoutBlockDevicesRM; /*0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+
+/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/*values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/*values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/*defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/*BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER {
+ U32 Reserved1; /*0x00 */
+ U32 Reserved2; /*0x04 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t,
+ *pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID {
+ U64 SASAddress; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID,
+ *PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t,
+ *pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT {
+ U64 EnclosureLogicalID; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 SlotNumber; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t,
+ *pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME {
+ U64 DeviceName; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME,
+ *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t,
+ *pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE {
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE,
+ *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t,
+ *pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+ U32 Reserved5; /*0x14 */
+ U32 Reserved6; /*0x18 */
+ U8 ReqBootDeviceForm; /*0x1C */
+ U8 Reserved7; /*0x1D */
+ U16 Reserved8; /*0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */
+ U8 ReqAltBootDeviceForm; /*0x38 */
+ U8 Reserved9; /*0x39 */
+ U16 Reserved10; /*0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */
+ U8 CurrentBootDeviceForm; /*0x58 */
+ U8 Reserved11; /*0x59 */
+ U16 Reserved12; /*0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, *pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/*values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/*BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO {
+ U8 PciBusNumber; /*0x00 */
+ U8 PciDeviceAndFunctionNumber; /*0x01 */
+ U16 AdapterFlags; /*0x02 */
+} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 GlobalFlags; /*0x04 */
+ U32 BiosVersion; /*0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ U32 Reserved1; /*0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/*values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/*BIOS Page 4 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY {
+ U64 ReassignmentWWID; /*0x00 */
+ U64 ReassignmentDeviceName; /*0x08 */
+} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ MPI2_BIOS4_ENTRY
+ Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, *pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/*RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U8 PhysDiskNum; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS {
+ U16 Settings; /*0x00 */
+ U8 HotSparePool; /*0x01 */
+ U8 Reserved; /*0x02 */
+} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t,
+ *pMpi2RaidVol0Settings_t;
+
+/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/*RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 VolumeState; /*0x06 */
+ U8 VolumeType; /*0x07 */
+ U32 VolumeStatusFlags; /*0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */
+ U64 MaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U16 BlockSize; /*0x1C */
+ U16 Reserved1; /*0x1E */
+ U8 SupportedPhysDisks;/*0x20 */
+ U8 ResyncRate; /*0x21 */
+ U16 DataScrubDuration; /*0x22 */
+ U8 NumPhysDisks; /*0x24 */
+ U8 Reserved2; /*0x25 */
+ U8 Reserved3; /*0x26 */
+ U8 InactiveStatus; /*0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK
+ PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/*values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/*values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/*values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/*values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/*values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/*RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U16 Reserved0; /*0x06 */
+ U8 GUID[24]; /*0x08 */
+ U8 Name[16]; /*0x20 */
+ U64 WWID; /*0x30 */
+ U32 Reserved1; /*0x38 */
+ U32 Reserved2; /*0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/*RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS {
+ U16 Reserved1; /*0x00 */
+ U8 HotSparePool; /*0x02 */
+ U8 Reserved2; /*0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS,
+ *PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t,
+ *pMpi2RaidPhysDisk0Settings_t;
+
+/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA {
+ U8 VendorID[8]; /*0x00 */
+ U8 ProductID[16]; /*0x08 */
+ U8 ProductRevLevel[4]; /*0x18 */
+ U8 SerialNum[32]; /*0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t,
+ *pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 PhysDiskNum; /*0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */
+ U32 Reserved3; /*0x4C */
+ U8 PhysDiskState; /*0x50 */
+ U8 OfflineReason; /*0x51 */
+ U8 IncompatibleReason; /*0x52 */
+ U8 PhysDiskAttributes; /*0x53 */
+ U32 PhysDiskStatusFlags;/*0x54 */
+ U64 DeviceMaxLBA; /*0x58 */
+ U64 HostMaxLBA; /*0x60 */
+ U64 CoercedMaxLBA; /*0x68 */
+ U16 BlockSize; /*0x70 */
+ U16 Reserved5; /*0x72 */
+ U32 Reserved6; /*0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t,
+ *pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/*PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/*OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/*IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/*PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/*PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/*RAID Physical Disk Page 1 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH {
+ U16 DevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U64 WWID; /*0x04 */
+ U64 OwnerWWID; /*0x0C */
+ U8 OwnerIdentifier; /*0x14 */
+ U8 Reserved2; /*0x15 */
+ U16 Flags; /*0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t,
+ *pMpi2RaidPhysDisk1Path_t;
+
+/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhysDiskPaths; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 Reserved1; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ MPI2_RAIDPHYSDISK1_PATH
+ PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t,
+ *pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+
+
+/*values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/*values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/*values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+
+
+/*values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/*SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 DiscoveryStatus; /*0x0C */
+ U32 Reserved; /*0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t,
+ *pMpi2SasIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1;/*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 Reserved2;/*0x0D */
+ U16 Reserved3;/*0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/*values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/*values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/*SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U16 MaxTargetPortConnectTime; /*0x08 */
+ U16 Reserved1; /*0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t,
+ *pMpi2SasIOUnit1PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16
+ ControlFlags; /*0x08 */
+ U16
+ SASNarrowMaxQueueDepth; /*0x0A */
+ U16
+ AdditionalControlFlags; /*0x0C */
+ U16
+ SASWideMaxQueueDepth; /*0x0E */
+ U8
+ NumPhys; /*0x10 */
+ U8
+ SATAMaxQDepth; /*0x11 */
+ U8
+ ReportDeviceMissingDelay; /*0x12 */
+ U8
+ IODeviceMissingDelay; /*0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/*values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/*values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/*values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/*SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /*0x00 */
+ U8 SpinupDelay; /*0x01 */
+ U8 SpinupFlags; /*0x02 */
+ U8 Reserved1; /*0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t,
+ *pMpi2SasIOUnit4SpinupGroup_t;
+/*defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP
+ SpinupGroupParameters[4]; /*0x08 */
+ U32
+ Reserved1; /*0x18 */
+ U32
+ Reserved2; /*0x1C */
+ U32
+ Reserved3; /*0x20 */
+ U8
+ BootDeviceWaitTime; /*0x24 */
+ U8
+ Reserved4; /*0x25 */
+ U16
+ Reserved5; /*0x26 */
+ U8
+ NumPhys; /*0x28 */
+ U8
+ PEInitialSpinupDelay; /*0x29 */
+ U8
+ PEReplyDelay; /*0x2A */
+ U8
+ Flags; /*0x2B */
+ U8
+ PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/*defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/*SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+ U8 ControlFlags; /*0x00 */
+ U8 PortWidthModGroup; /*0x01 */
+ U16 InactivityTimerExponent; /*0x02 */
+ U8 SATAPartialTimeout; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 SATASlumberTimeout; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U8 SASPartialTimeout; /*0x08 */
+ U8 Reserved4; /*0x09 */
+ U8 SASSlumberTimeout; /*0x0A */
+ U8 Reserved5; /*0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t,
+ *pMpi2SasIOUnit5PhyPmSettings_t;
+
+/*defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/*defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/*defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x08 */
+ U8 Reserved1;/*0x09 */
+ U16 Reserved2;/*0x0A */
+ U32 Reserved3;/*0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
+ SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/*SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
+ U8 CurrentStatus; /*0x00 */
+ U8 CurrentModulation; /*0x01 */
+ U8 CurrentUtilization; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 Reserved2; /*0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ *pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/*defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/*defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U8 NumGroups; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
+ U8 Flags; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 Threshold75Pct; /*0x04 */
+ U8 Threshold50Pct; /*0x05 */
+ U8 Threshold25Pct; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ *pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 SamplingInterval; /*0x08 */
+ U8 WindowLength; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U8 NumGroups; /*0x14 */
+ U8 Reserved4; /*0x15 */
+ U16 Reserved5; /*0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U8
+ TxRxSleepStatus; /*0x10 */
+ U8
+ Reserved2; /*0x11 */
+ U16
+ Reserved3; /*0x12 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/*defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+/*defines for TxRxSleepStatus field */
+#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03)
+
+
+
+/*SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U64
+ TimeStamp; /*0x08 */
+ U32
+ Reserved1; /*0x10 */
+ U32
+ Reserved2; /*0x14 */
+ U32
+ FastPathPendedRequests; /*0x18 */
+ U32
+ FastPathUnPendedRequests; /*0x1C */
+ U32
+ FastPathHostRequestStarts; /*0x20 */
+ U32
+ FastPathFirmwareRequestStarts; /*0x24 */
+ U32
+ FastPathHostCompletions; /*0x28 */
+ U32
+ FastPathFirmwareCompletions; /*0x2C */
+ U32
+ NonFastPathRequestStarts; /*0x30 */
+ U32
+ NonFastPathHostCompletions; /*0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+ Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/*SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ ReportGenLength; /*0x09 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ DiscoveryStatus; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ ParentDevHandle; /*0x1A */
+ U16
+ ExpanderChangeCount; /*0x1C */
+ U16
+ ExpanderRouteIndexes; /*0x1E */
+ U8
+ NumPhys; /*0x20 */
+ U8
+ SASLevel; /*0x21 */
+ U16
+ Flags; /*0x22 */
+ U16
+ STPBusInactivityTimeLimit; /*0x24 */
+ U16
+ STPMaxConnectTimeLimit; /*0x26 */
+ U16
+ STP_SMP_NexusLossTime; /*0x28 */
+ U16
+ MaxNumRoutedSasAddresses; /*0x2A */
+ U64
+ ActiveZoneManagerSASAddress;/*0x2C */
+ U16
+ ZoneLockInactivityLimit; /*0x34 */
+ U16
+ Reserved1; /*0x36 */
+ U8
+ TimeToReducedFunc; /*0x38 */
+ U8
+ InitialTimeToReducedFunc; /*0x39 */
+ U8
+ MaxReducedFuncTime; /*0x3A */
+ U8
+ Reserved2; /*0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/*values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/*values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/*SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ Reserved1; /*0x09 */
+ U16
+ Reserved2; /*0x0A */
+ U8
+ NumPhys; /*0x0C */
+ U8
+ Phy; /*0x0D */
+ U16
+ NumTableEntriesProgrammed; /*0x0E */
+ U8
+ ProgrammedLinkRate; /*0x10 */
+ U8
+ HwLinkRate; /*0x11 */
+ U16
+ AttachedDevHandle; /*0x12 */
+ U32
+ PhyInfo; /*0x14 */
+ U32
+ AttachedDeviceInfo; /*0x18 */
+ U16
+ ExpanderDevHandle; /*0x1C */
+ U8
+ ChangeCount; /*0x1E */
+ U8
+ NegotiatedLinkRate; /*0x1F */
+ U8
+ PhyIdentifier; /*0x20 */
+ U8
+ AttachedPhyIdentifier; /*0x21 */
+ U8
+ Reserved3; /*0x22 */
+ U8
+ DiscoveryInfo; /*0x23 */
+ U32
+ AttachedPhyInfo; /*0x24 */
+ U8
+ ZoneGroup; /*0x28 */
+ U8
+ SelfConfigStatus; /*0x29 */
+ U16
+ Reserved4; /*0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines
+ *used for the AttachedDeviceInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/*SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Slot; /*0x08 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U16
+ ParentDevHandle; /*0x14 */
+ U8
+ PhyNum; /*0x16 */
+ U8
+ AccessStatus; /*0x17 */
+ U16
+ DevHandle; /*0x18 */
+ U8
+ AttachedPhyIdentifier; /*0x1A */
+ U8
+ ZoneGroup; /*0x1B */
+ U32
+ DeviceInfo; /*0x1C */
+ U16
+ Flags; /*0x20 */
+ U8
+ PhysicalPort; /*0x22 */
+ U8
+ MaxPortConnections; /*0x23 */
+ U64
+ DeviceName; /*0x24 */
+ U8
+ PortGroups; /*0x2C */
+ U8
+ DmaGroup; /*0x2D */
+ U8
+ ControlGroup; /*0x2E */
+ U8
+ Reserved1; /*0x2F */
+ U32
+ Reserved2; /*0x30 */
+ U32
+ Reserved3; /*0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t,
+ *pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+
+/*values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/*specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/*values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/*SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ Reserved2; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ Reserved3; /*0x1A */
+ U8
+ InitialRegDeviceFIS[20];/*0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t,
+ *pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/*SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ OwnerDevHandle; /*0x08 */
+ U16
+ Reserved1; /*0x0A */
+ U16
+ AttachedDevHandle; /*0x0C */
+ U8
+ AttachedPhyIdentifier; /*0x0E */
+ U8
+ Reserved2; /*0x0F */
+ U32
+ AttachedPhyInfo; /*0x10 */
+ U8
+ ProgrammedLinkRate; /*0x14 */
+ U8
+ HwLinkRate; /*0x15 */
+ U8
+ ChangeCount; /*0x16 */
+ U8
+ Flags; /*0x17 */
+ U32
+ PhyInfo; /*0x18 */
+ U8
+ NegotiatedLinkRate; /*0x1C */
+ U8
+ Reserved3; /*0x1D */
+ U16
+ Reserved4; /*0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ InvalidDwordCount; /*0x0C */
+ U32
+ RunningDisparityErrorCount; /*0x10 */
+ U32
+ LossDwordSynchCount; /*0x14 */
+ U32
+ PhyResetProblemCount; /*0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/*SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 PhyEventInfo; /*0x04 */
+} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY2_PHY_EVENT
+ PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t,
+ *pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t,
+ *pMpi2SasPhy3PhyEventConfig_t;
+
+/*values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/*values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG
+ PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Reserved1; /*0x08 */
+ U8
+ Reserved2; /*0x0A */
+ U8
+ Flags; /*0x0B */
+ U8
+ InitialFrame[28]; /*0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/*values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/*SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PortNumber; /*0x08 */
+ U8
+ PhysicalPort; /*0x09 */
+ U8
+ PortWidth; /*0x0A */
+ U8
+ PhysicalPortWidth; /*0x0B */
+ U8
+ ZoneGroup; /*0x0C */
+ U8
+ Reserved1; /*0x0D */
+ U16
+ Reserved2; /*0x0E */
+ U64
+ SASAddress; /*0x10 */
+ U32
+ DeviceInfo; /*0x18 */
+ U32
+ Reserved3; /*0x1C */
+ U32
+ Reserved4; /*0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/*SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ EnclosureLogicalID; /*0x0C */
+ U16
+ Flags; /*0x14 */
+ U16
+ EnclosureHandle; /*0x16 */
+ U16
+ NumSlots; /*0x18 */
+ U16
+ StartSlot; /*0x1A */
+ U16
+ Reserved2; /*0x1C */
+ U16
+ SEPDevHandle; /*0x1E */
+ U32
+ Reserved3; /*0x20 */
+ U32
+ Reserved4; /*0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+
+/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/*Log Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8
+ LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */
+} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, *pMpi2Log0Entry_t;
+
+/*values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 NumLogEntries;/*0x10 */
+ U16 Reserved3; /*0x12 */
+ MPI2_LOG_0_ENTRY
+ LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, *pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/*RAID Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 HotSparePool; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t,
+ *pMpi2RaidConfig0ConfigElement_t;
+
+/*values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumHotSpares; /*0x08 */
+ U8 NumPhysDisks; /*0x09 */
+ U8 NumVolumes; /*0x0A */
+ U8 ConfigNum; /*0x0B */
+ U32 Flags; /*0x0C */
+ U8 ConfigGUID[24]; /*0x10 */
+ U32 Reserved1; /*0x28 */
+ U8 NumElements; /*0x2C */
+ U8 Reserved2; /*0x2D */
+ U16 Reserved3; /*0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+ ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t,
+ *pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/*values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/*Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY {
+ U64 PhysicalIdentifier; /*0x00 */
+ U16 MappingInformation; /*0x08 */
+ U16 DeviceIndex; /*0x0A */
+ U32 PhysicalBitsMapping; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/*values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/*Ethernet Page 0 */
+
+/*IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumInterfaces; /*0x08 */
+ U8 Reserved0; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Status; /*0x0C */
+ U8 MediaState; /*0x10 */
+ U8 Reserved2; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U8 MacAddress[6]; /*0x14 */
+ U8 Reserved4; /*0x1A */
+ U8 Reserved5; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/*values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/*Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved0; /*0x08 */
+ U32
+ Flags; /*0x0C */
+ U8
+ MediaState; /*0x10 */
+ U8
+ Reserved1; /*0x11 */
+ U16
+ Reserved2; /*0x12 */
+ U8
+ MacAddress[6]; /*0x14 */
+ U8
+ Reserved3; /*0x1A */
+ U8
+ Reserved4; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR
+ StaticIpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticSubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticGatewayIpAddress; /*0x3C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS2IpAddress; /*0x5C */
+ U32
+ Reserved5; /*0x6C */
+ U32
+ Reserved6; /*0x70 */
+ U32
+ Reserved7; /*0x74 */
+ U32
+ Reserved8; /*0x78 */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/*values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ *Generic structure to use for product-specific extended manufacturing pages
+ *(currently Extended Manufacturing Page 40 through Extended Manufacturing
+ *Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ ProductSpecificInfo; /*0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ *pMpi2ExtManufacturingPagePS_t;
+
+/*PageVersion should be provided by product-specific code */
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
new file mode 100644
index 000000000000..a079e5242474
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.14
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
+ U8 CDB[20]; /*0x00 */
+ U32 PrimaryReferenceTag; /*0x14 */
+ U16 PrimaryApplicationTag; /*0x18 */
+ U16 PrimaryApplicationTagMask; /*0x1A */
+ U32 TransferLength; /*0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t;
+
+/*MPI v2.0 CDB field */
+typedef union _MPI2_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t;
+
+/*MPI v2.0 SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U16 SGLFlags; /*0x10 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U32 EEDPBlockSize; /*0x28 */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t;
+
+/*SCSI IO MsgFlags bits */
+
+/*MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/*SCSI IO SGLFlags bits */
+
+/*base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/*base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/*shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/*number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*SCSI IO IoFlags bits */
+
+/*Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/*SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/*alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+/*MPI v2.5 CDB field */
+typedef union _MPI25_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_IEEE_SGE_SIMPLE64 SGE;
+} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
+ Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
+
+/*MPI v2.5 SCSI IO Request Message */
+typedef struct _MPI25_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U8 DMAFlags; /*0x10 */
+ U8 Reserved5; /*0x11 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U16 EEDPBlockSize; /*0x28 */
+ U16 Reserved6; /*0x2A */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI25_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST,
+ Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t;
+
+/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */
+
+/*Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/*number of SGLOffset fields */
+#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*defines for the IoFlags field */
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+
+#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*MPI v2.5 defines for the EEDPFlags bits */
+/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */
+#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+/*use MPI2_LUN_ defines from mpi2.h for the LUN field */
+
+/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */
+
+/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so
+ * MPI2_SCSI_IO_REPLY is used for both.
+ */
+
+/*SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 SCSIStatus; /*0x0C */
+ U8 SCSIState; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferCount; /*0x14 */
+ U32 SenseCount; /*0x18 */
+ U32 ResponseInfo; /*0x1C */
+ U16 TaskTag; /*0x20 */
+ U16 Reserved4; /*0x22 */
+ U32 BidirectionalTransferCount; /*0x24 */
+ U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
+ U32 Reserved6; /*0x2C */
+} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+
+/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/*SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved1; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 LUN[8]; /*0x0C */
+ U32 Reserved4[7]; /*0x14 */
+ U16 TaskMID; /*0x30 */
+ U16 Reserved5; /*0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ *pMpi2SCSITaskManagementRequest_t;
+
+/*TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/*obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
+ (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/*MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+/*SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 ResponseCode; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TerminationCount; /*0x14 */
+ U32 ResponseInfo; /*0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t;
+
+/*ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/*SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 SlotStatus; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, *pMpi2SepRequest_t;
+
+/*Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/*Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+/*SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 SlotStatus; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, *pMpi2SepReply_t;
+
+/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
new file mode 100644
index 000000000000..0de425d8fd70
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1665 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.21
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/*IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 MsgVersion; /*0x0C */
+ U16 HeaderVersion; /*0x0E */
+ U32 Reserved5; /*0x10 */
+ U16 Reserved6; /*0x14 */
+ U8 Reserved7; /*0x16 */
+ U8 HostMSIxVectors; /*0x17 */
+ U16 Reserved8; /*0x18 */
+ U16 SystemRequestFrameSize; /*0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /*0x1C */
+ U16 ReplyFreeQueueDepth; /*0x1E */
+ U32 SenseBufferAddressHigh; /*0x20 */
+ U32 SystemReplyAddressHigh; /*0x24 */
+ U64 SystemRequestFrameBaseAddress; /*0x28 */
+ U64 ReplyDescriptorPostQueueAddress; /*0x30 */
+ U64 ReplyFreeQueueAddress; /*0x38 */
+ U64 TimeStamp; /*0x40 */
+} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t;
+
+/*WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/*MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/*minimum depth for the Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/*IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t;
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/*IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t;
+
+/*IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY {
+ U16 MsgVersion; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 HeaderVersion; /*0x04 */
+ U8 IOCNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 IOCExceptions; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 MaxChainDepth; /*0x14 */
+ U8 WhoInit; /*0x15 */
+ U8 NumberOfPorts; /*0x16 */
+ U8 MaxMSIxVectors; /*0x17 */
+ U16 RequestCredit; /*0x18 */
+ U16 ProductID; /*0x1A */
+ U32 IOCCapabilities; /*0x1C */
+ MPI2_VERSION_UNION FWVersion; /*0x20 */
+ U16 IOCRequestFrameSize; /*0x24 */
+ U16 IOCMaxChainSegmentSize; /*0x26 */
+ U16 MaxInitiators; /*0x28 */
+ U16 MaxTargets; /*0x2A */
+ U16 MaxSasExpanders; /*0x2C */
+ U16 MaxEnclosures; /*0x2E */
+ U16 ProtocolFlags; /*0x30 */
+ U16 HighPriorityCredit; /*0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */
+ U8 ReplyFrameSize; /*0x36 */
+ U8 MaxVolumes; /*0x37 */
+ U16 MaxDevHandle; /*0x38 */
+ U16 MaxPersistentEntries; /*0x3A */
+ U16 MinDevHandle; /*0x3C */
+ U16 Reserved4; /*0x3E */
+} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
+
+/*MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/*defines for WhoInit field are after the IOCInit Request */
+
+/*ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/*IOCCapabilities */
+#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/*PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t;
+
+/*PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 Reserved5; /*0x14 */
+ U8 PortType; /*0x15 */
+ U16 Reserved6; /*0x16 */
+ U16 MaxPostedCmdBuffers; /*0x18 */
+ U16 Reserved7; /*0x1A */
+} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t;
+
+/*PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/*PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t;
+
+/*PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t;
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/*EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */
+ U16 SASBroadcastPrimitiveMasks; /*0x24 */
+ U16 SASNotifyPrimitiveMasks; /*0x26 */
+ U32 Reserved8; /*0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ *PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t,
+ *pMpi2EventNotificationRequest_t;
+
+/*EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
+ U16 EventDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 AckRequired; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 Event; /*0x14 */
+ U16 Reserved4; /*0x16 */
+ U32 EventContext; /*0x18 */
+ U32 EventData[1]; /*0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t,
+ *pMpi2EventNotificationReply_t;
+
+/*AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/*Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+/*Log Entry Added Event data */
+
+/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t,
+ *pMpi2EventDataLogEntryAdded_t;
+
+/*GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+ U8 GPIONum; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t,
+ *pMpi2EventDataGpioInterrupt_t;
+
+/*Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /*0x00 */
+ U8 SensorNum; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U16 CurrentTemperature; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+ *PTR_MPI2_EVENT_DATA_TEMPERATURE,
+ Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t;
+
+/*Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+/*Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ U32 HostData[1]; /*0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+ Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
+
+/*Power Performance Change Event */
+
+typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
+ U8 CurrentPowerMode; /*0x00 */
+ U8 PreviousPowerMode; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ Mpi2EventDataPowerPerfChange_t,
+ *pMpi2EventDataPowerPerfChange_t;
+
+/*defines for CurrentPowerMode and PreviousPowerMode fields */
+#define MPI2_EVENT_PM_INIT_MASK (0xC0)
+#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_INIT_HOST (0x40)
+#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80)
+#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI2_EVENT_PM_MODE_MASK (0x07)
+#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01)
+#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04)
+#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
+#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+
+/*Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
+ U8 Reserved1; /*0x00 */
+ U8 Port; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ *pMpi2EventDataHardResetReceived_t;
+
+/*Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL {
+ U16 DevHandle; /*0x00 */
+ U16 CurrentDepth; /*0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t;
+
+/*SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 SASAddress; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ *pMpi2EventDataSasDeviceStatusChange_t;
+
+/*SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+/*Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS {
+ U16 VolDevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 RAIDOperation; /*0x04 */
+ U8 PercentComplete; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Resereved3; /*0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ *pMpi2EventDataIrOperationStatus_t;
+
+/*Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+/*Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME {
+ U16 VolDevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 NewValue; /*0x04 */
+ U32 PreviousValue; /*0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t;
+
+/*Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK {
+ U16 Reserved1; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysDiskNum; /*0x03 */
+ U16 PhysDiskDevHandle; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U32 NewValue; /*0x0C */
+ U32 PreviousValue; /*0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t,
+ *pMpi2EventDataIrPhysicalDisk_t;
+
+/*Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Configuration Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 ReasonCode; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t;
+
+/*IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/*IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST {
+ U8 NumElements; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 Reserved2; /*0x02 */
+ U8 ConfigNum; /*0x03 */
+ U32 Flags; /*0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT
+ ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ *pMpi2EventDataIrConfigChangeList_t;
+
+/*IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+/*SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 DiscoveryStatus; /*0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t;
+
+/*SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/*SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/*SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+/*SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 PortWidth; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ *pMpi2EventDataSasBroadcastPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/*SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 Reserved1; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ Mpi2EventDataSasNotifyPrimitive_t,
+ *pMpi2EventDataSasNotifyPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+/*SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE {
+ U8 ReasonCode; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U16 DevHandle; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ *pMpi2EventDataSasInitDevStatusChange_t;
+
+/*SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+/*SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW {
+ U16 MaxInit; /*0x00 */
+ U16 CurrentInit; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ *pMpi2EventDataSasInitTableOverflow_t;
+
+/*SAS Topology Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 LinkRate; /*0x02 */
+ U8 PhyStatus; /*0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 ExpanderDevHandle; /*0x02 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPhyNum; /*0x09 */
+ U8 ExpStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+ PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ *pMpi2EventDataSasTopologyChangeList_t;
+
+/*values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/*defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+
+/*values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/*values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+/*SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
+ U16 EnclosureHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U64 EnclosureLogicalID; /*0x04 */
+ U16 NumSlots; /*0x0C */
+ U16 StartSlot; /*0x0E */
+ U32 PhyBits; /*0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ *pMpi2EventDataSasEnclDevStatusChange_t;
+
+/*SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+/*SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 PhyEventCode; /*0x0C */
+ U8 PhyNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 PhyEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t,
+ *pMpi2EventDataSasPhyCounter_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h
+ *for the PhyEventCode field */
+
+/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h
+ *for the CounterType field */
+
+/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h
+ *for the TimeUnits field */
+
+/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h
+ *for the ThresholdFlags field */
+
+/*SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
+ U8 ReasonCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ *PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t;
+
+/*SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+/*Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /*0x00 */
+ U8 NegotiatedLinkRate; /*0x01 */
+ U8 PhyNum; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1; /*0x04 */
+ U8 InitialFrame[28]; /*0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t;
+
+/*values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h
+ *for the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t,
+ *pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/*values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/*EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Event; /*0x0C */
+ U16 Reserved5; /*0x0E */
+ U32 EventContext; /*0x10 */
+} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t;
+
+/*EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, *pMpi2EventAckReply_t;
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/*SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 Reserved4; /*0x0C */
+ U8 DestVF_ID; /*0x0D */
+ U16 Reserved5; /*0x0E */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 Reserved8; /*0x18 */
+ U32 Reserved9; /*0x1C */
+ U32 Reserved10; /*0x20 */
+ U32 HostData[1]; /*0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+ *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+ Mpi2SendHostMessageRequest_t,
+ *pMpi2SendHostMessageRequest_t;
+
+/*SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+ Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t;
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/*MPI v2.0 FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/*MPI v2.0 FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t;
+
+/*MPI v2.5 FWDownload Request message */
+typedef struct _MPI25_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST,
+ Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest;
+
+/*FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t;
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/*MPI v2.0 FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+/*MPI v2.0 FWUpload TransactionContext Element */
+typedef struct _MPI2_FW_UPLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t;
+
+/*MPI v2.5 FWUpload Request message */
+typedef struct _MPI25_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST,
+ Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t;
+
+/*FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ActualImageSize; /*0x14 */
+} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+ U32 Signature; /*0x00 */
+ U32 Signature0; /*0x04 */
+ U32 Signature1; /*0x08 */
+ U32 Signature2; /*0x0C */
+ MPI2_VERSION_UNION MPIVersion; /*0x10 */
+ MPI2_VERSION_UNION FWVersion; /*0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
+ MPI2_VERSION_UNION PackageVersion; /*0x1C */
+ U16 VendorID; /*0x20 */
+ U16 ProductID; /*0x22 */
+ U16 ProtocolFlags; /*0x24 */
+ U16 Reserved26; /*0x26 */
+ U32 IOCCapabilities; /*0x28 */
+ U32 ImageSize; /*0x2C */
+ U32 NextImageHeaderOffset; /*0x30 */
+ U32 Checksum; /*0x34 */
+ U32 Reserved38; /*0x38 */
+ U32 Reserved3C; /*0x3C */
+ U32 Reserved40; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ U32 Reserved4C; /*0x4C */
+ U32 Reserved50; /*0x50 */
+ U32 Reserved54; /*0x54 */
+ U32 Reserved58; /*0x58 */
+ U32 Reserved5C; /*0x5C */
+ U32 Reserved60; /*0x60 */
+ U32 FirmwareVersionNameWhat; /*0x64 */
+ U8 FirmwareVersionName[32]; /*0x68 */
+ U32 VendorNameWhat; /*0x88 */
+ U8 VendorName[32]; /*0x8C */
+ U32 PackageNameWhat; /*0x88 */
+ U8 PackageName[32]; /*0x8C */
+ U32 ReservedD0; /*0xD0 */
+ U32 ReservedD4; /*0xD4 */
+ U32 ReservedD8; /*0xD8 */
+ U32 ReservedDC; /*0xDC */
+ U32 ReservedE0; /*0xE0 */
+ U32 ReservedE4; /*0xE4 */
+ U32 ReservedE8; /*0xE8 */
+ U32 ReservedEC; /*0xEC */
+ U32 ReservedF0; /*0xF0 */
+ U32 ReservedF4; /*0xF4 */
+ U32 ReservedF8; /*0xF8 */
+ U32 ReservedFC; /*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Checksum; /*0x04 */
+ U32 ImageSize; /*0x08 */
+ U32 NextImageHeaderOffset; /*0x0C */
+ U32 PackageVersion; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U32 Reserved5; /*0x1C */
+ U8 IdentifyString[32]; /*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+ U8 RegionType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 RegionOffset; /*0x04 */
+ U32 RegionSize; /*0x08 */
+ U32 Reserved3; /*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+ U32 FlashSize; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 SizeOfRegion; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U16 NumberOfLayouts; /*0x04 */
+ U16 RegionsPerLayout; /*0x06 */
+ U16 MinimumSectorAlignment; /*0x08 */
+ U16 Reserved3; /*0x0A */
+ U32 Reserved4; /*0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+ U16 DeviceID; /*0x00 */
+ U16 VendorID; /*0x02 */
+ U16 DeviceIDMask; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U8 LowPCIRev; /*0x08 */
+ U8 HighPCIRev; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 NumberOfDevices; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U32 Reserved3; /*0x04 */
+ MPI2_SUPPORTED_DEVICE
+ SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+ U32 BootFlags; /*0x00 */
+ U32 ImageSize; /*0x04 */
+ U32 Signature0; /*0x08 */
+ U32 Signature1; /*0x0C */
+ U32 Signature2; /*0x10 */
+ U32 ResetVector; /*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/*PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Parameter1; /*0x0C */
+ U8 Parameter2; /*0x0D */
+ U8 Parameter3; /*0x0E */
+ U8 Parameter4; /*0x0F */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t;
+
+/*defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/*Parameter1 contains a PHY number */
+/*Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
+ * Feature */
+/*Parameter1 contains SAS port width modulation group number */
+/*Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/*Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/*Parameter4 is reserved */
+
+/*this next set (_PCIE_LINK) is obsolete */
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/*Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */
+/*Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/*Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/*Parameter2, Parameter3, and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/
+/*Parameter1 indicates host action regarding global power management mode */
+#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01)
+#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02)
+#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03)
+/*Parameter2 indicates the requested global power management mode */
+#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01)
+#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08)
+#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40)
+/*Parameter3 and Parameter4 are reserved */
+
+/*PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
new file mode 100644
index 000000000000..d1d9866cf300
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.08
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA {
+ U8 RateToChange; /*0x00 */
+ U8 RateOrMode; /*0x01 */
+ U16 DataScrubDuration; /*0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ *pMpi2RaidActionStartRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ *pMpi2RaidActionStopRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE {
+ U8 HotSparePool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 DevHandle; /*0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t;
+
+/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE {
+ U8 Flags; /*0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t,
+ *pMpi2RaidActionFwUpdateMode_t;
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA {
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, *pMpi2RaidActionData_t;
+
+/*RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */
+} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t;
+
+/*RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*RAID Volume Creation Structure */
+
+/*
+ *The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U16 PhysDiskDevHandle; /*0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT {
+ U8 NumPhysDisks; /*0x00 */
+ U8 VolumeType; /*0x01 */
+ U16 Reserved1; /*0x02 */
+ U32 VolumeCreationFlags; /*0x04 */
+ U32 VolumeSettings; /*0x08 */
+ U8 Reserved2; /*0x0C */
+ U8 ResyncRate; /*0x0D */
+ U16 DataScrubDuration; /*0x0E */
+ U64 VolumeMaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U8 Name[16]; /*0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK
+ PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t,
+ *pMpi2RaidVolumeCreationStruct_t;
+
+/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/*defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/*The following is an obsolete define.
+ *It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+/*RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION {
+ U32 Flags; /*0x00 */
+ U16 DevHandle0; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U16 DevHandle1; /*0x08 */
+ U16 Reserved2; /*0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ *pMpi2RaidOnlineCapacityExpansion_t;
+
+/*RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /*0x00 */
+ U16 CandidateDevHandle; /*0x02 */
+ U32 Flags; /*0x04 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ Mpi2RaidCompatibilityInputStruct_t,
+ *pMpi2RaidCompatibilityInputStruct_t;
+
+/*defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+/*RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR {
+ U64 TotalBlocks; /*0x00 */
+ U64 BlocksRemaining; /*0x08 */
+ U32 Flags; /*0x10 */
+} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t;
+
+/*defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+/*RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 GenericAttributes; /*0x04 */
+ U32 OEMSpecificAttributes; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ Mpi2RaidCompatibilityResultStruct_t,
+ *pMpi2RaidCompatibilityResultStruct_t;
+
+/*defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/*defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+/*RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA {
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t;
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */
+} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
new file mode 100644
index 000000000000..b4e7084aba31
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.07
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ *Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+/*
+ *Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ *data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/*SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 RequestDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U64 SASAddress; /*0x10 */
+ U32 Reserved3; /*0x18 */
+ U32 Reserved4; /*0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */
+} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ResponseDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U8 ResponseData[4]; /*0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+typedef union _MPI2_SATA_PT_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */
+ MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */
+} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION,
+ Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t;
+
+/*SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 DataLength; /*0x18 */
+ U8 CommandFIS[20]; /*0x1C */
+ MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/
+} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t,
+ *pMpi2SataPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 StatusFIS[20]; /*0x14 */
+ U32 StatusControlRegisters; /*0x28 */
+ U32 TransferCount; /*0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t;
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/*SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U8 PhyNum; /*0x0E */
+ U8 PrimFlags; /*0x0F */
+ U32 Primitive; /*0x10 */
+ U8 LookupMethod; /*0x14 */
+ U8 Reserved5; /*0x15 */
+ U16 SlotNumber; /*0x16 */
+ U64 LookupAddress; /*0x18 */
+ U32 IOCParameterValue; /*0x20 */
+ U32 Reserved7; /*0x24 */
+ U32 Reserved8; /*0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t,
+ *pMpi2SasIoUnitControlRequest_t;
+
+/*values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12)
+#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/*values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/*values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+/*SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
new file mode 100644
index 000000000000..71453d11c1c1
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.09
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/*defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t;
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Flags; /*0x0C */
+} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t;
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 Flags; /*0x10 */
+ U32 DataLength; /*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ *pMpi2ToolboxDiagDataUploadRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+ U32 DiagDataLength; /*00h */
+ U8 FormatCode; /*04h */
+ U8 Reserved1; /*05h */
+ U16 Reserved2; /*06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t;
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/*Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 SGLFlags; /*0x16 */
+ U8 Flags; /*0x17 */
+ U16 TxDataLength; /*0x18 */
+ U16 RxDataLength; /*0x1A */
+ U32 Reserved8; /*0x1C */
+ U32 Reserved9; /*0x20 */
+ U32 Reserved10; /*0x24 */
+ U32 Reserved11; /*0x28 */
+ U32 Reserved12; /*0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ *pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/*values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+/*Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 IstwiStatus; /*0x16 */
+ U8 Reserved6; /*0x17 */
+ U16 TxDataCount; /*0x18 */
+ U16 RxDataCount; /*0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t;
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Reserved5; /*0x0C */
+ U8 PhysicalPort; /*0x0D */
+ U8 Reserved6; /*0x0E */
+ U8 Flags; /*0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ *pMpi2ToolboxDiagnosticCliRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI25_SGE_IO_UNION SGL; /*0x70 */
+} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi25ToolboxDiagnosticCliRequest_t,
+ *pMpi25ToolboxDiagnosticCliRequest_t;
+
+/*Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ReturnedDataLength; /*0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ *pMpi2ToolboxDiagnosticCliReply_t;
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U64 BufferAddress; /*0x0C */
+ U32 BufferLength; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+ U32 Flags; /*0x20 */
+ U32 ProductSpecific[23]; /*0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t;
+
+/*values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/*values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/*count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/*values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferLength; /*0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t;
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t;
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
new file mode 100644
index 000000000000..516f959573f5
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2007 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+/*******************************************************************************
+ * Define * if it hasn't already been defined. By default
+ * * is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining * as "far *" before this header file is
+ * included.
+ */
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef u8 U8;
+typedef __le16 U16;
+typedef __le32 U32;
+typedef __le64 U64 __attribute__ ((aligned(4)));
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef U8 *PU8;
+typedef U16 *PU16;
+typedef U32 *PU32;
+typedef U64 *PU64;
+
+#endif
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
new file mode 100644
index 000000000000..04f8010f0770
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -0,0 +1,4840 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/kthread.h>
+#include <linux/aer.h>
+
+
+#include "mpt3sas_base.h"
+
+static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+
+#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+ /* maximum controller queue depth */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
+static int max_queue_depth = -1;
+module_param(max_queue_depth, int, 0);
+MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+
+static int max_sgl_entries = -1;
+module_param(max_sgl_entries, int, 0);
+MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
+
+static int msix_disable = -1;
+module_param(msix_disable, int, 0);
+MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+
+static int mpt3sas_fwfault_debug;
+MODULE_PARM_DESC(mpt3sas_fwfault_debug,
+ " enable detection of firmware fault and halt firmware - (default=0)");
+
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+ return 0;
+}
+module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
+ param_get_int, &mpt3sas_fwfault_debug, 0644);
+
+/**
+ * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt3sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_stop_and_remove_bus_device(pdev);
+ return 0;
+}
+
+/**
+ * _base_fault_reset_work - workq handling ioc fault conditions
+ * @work: input argument, used to derive ioc
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+static void
+_base_fault_reset_work(struct work_struct *work)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
+ unsigned long flags;
+ u32 doorbell;
+ int rc;
+ struct task_struct *p;
+
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery)
+ goto rearm_timer;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
+ ioc->name);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
+ "mpt3sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p))
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ else
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ return; /* don't rearm timer */
+ }
+
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_OPERATIONAL)
+ return; /* don't rearm timer */
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ rearm_timer:
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+{
+ pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
+ ioc->name, fault_code);
+}
+
+/**
+ * mpt3sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues. Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 doorbell;
+
+ if (!ioc->fwfault_debug)
+ return;
+
+ dump_stack();
+
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc , doorbell);
+ else {
+ writel(0xC0FFEE00, &ioc->chip->Doorbell);
+ pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
+ ioc->name);
+ }
+
+ if (ioc->fwfault_debug == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @request_hdr: request mf
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
+ MPI2RequestHeader_t *request_hdr)
+{
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ char *desc = NULL;
+ u16 frame_sz;
+ char *func_str = NULL;
+
+ /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
+ switch (ioc_status) {
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_BUSY:
+ desc = "busy";
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ desc = "invalid sgl";
+ break;
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ desc = "internal error";
+ break;
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ desc = "invalid vpid";
+ break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ desc = "insufficient resources";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ desc = "invalid field";
+ break;
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ desc = "invalid state";
+ break;
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ desc = "op state not supported";
+ break;
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
+ desc = "config invalid action";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
+ desc = "config invalid type";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
+ desc = "config invalid page";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
+ desc = "config invalid data";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
+ desc = "config no defaults";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
+ desc = "config cant commit";
+ break;
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ break;
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc = "eedp app tag error";
+ break;
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
+ desc = "target invalid io index";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ABORTED:
+ desc = "target aborted";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
+ desc = "target no conn retryable";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
+ desc = "target no connection";
+ break;
+ case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
+ desc = "target xfer count mismatch";
+ break;
+ case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
+ desc = "target data offset error";
+ break;
+ case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
+ desc = "target too much write data";
+ break;
+ case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
+ desc = "target iu too short";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
+ desc = "target ack nak timeout";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
+ desc = "target nak received";
+ break;
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
+ desc = "smp request failed";
+ break;
+ case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
+ desc = "smp data overrun";
+ break;
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
+ desc = "diagnostic released";
+ break;
+ default:
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ switch (request_hdr->Function) {
+ case MPI2_FUNCTION_CONFIG:
+ frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
+ func_str = "config_page";
+ break;
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
+ func_str = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
+ func_str = "sas_iounit_ctl";
+ break;
+ case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ frame_sz = sizeof(Mpi2SepRequest_t);
+ func_str = "enclosure";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ frame_sz = sizeof(Mpi2IOCInitRequest_t);
+ func_str = "ioc_init";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ frame_sz = sizeof(Mpi2PortEnableRequest_t);
+ func_str = "port_enable";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
+ func_str = "smp_passthru";
+ break;
+ default:
+ frame_sz = 32;
+ func_str = "unknown";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ ioc->name, desc, ioc_status, request_hdr, func_str);
+
+ _debug_dump_mf(request_hdr, frame_sz/4);
+}
+
+/**
+ * _base_display_event_data - verbose translation of firmware asyn events
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * Return nothing.
+ */
+static void
+_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ case MPI2_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI2_EVENT_STATE_CHANGE:
+ desc = "Status Change";
+ break;
+ case MPI2_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI2_EVENT_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ desc = "Device Status Change";
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ desc = "IR Operation Status";
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "SAS Enclosure Device Status Change";
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ desc = "IR Volume";
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ desc = "IR Physical Disk";
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ desc = "IR Configuration Change List";
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ desc = "Log Entry Added";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+}
+#endif
+
+/**
+ * _base_sas_log_info - verbose translation of firmware log info
+ * @ioc: per adapter object
+ * @log_info: log info
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ originator_str = "IR";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ ioc->name, log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+/**
+ * _base_display_reply_info -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return nothing.
+ */
+static void
+_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+ u32 loginfo = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
+ (ioc->logging_level & MPT_DEBUG_REPLY)) {
+ _base_sas_ioc_info(ioc , mpi_reply,
+ mpt3sas_base_get_msg_frame(ioc, smid));
+ }
+#endif
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
+ _base_sas_log_info(ioc, loginfo);
+ }
+
+ if (ioc_status || loginfo) {
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
+ }
+}
+
+/**
+ * mpt3sas_base_done - base internal command completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ }
+ ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
+
+ complete(&ioc->base_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_async_event - main callback handler for firmware asyn events
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ Mpi2EventAckRequest_t *ack_request;
+ u16 smid;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+ if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return 1;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _base_display_event_data(ioc, mpi_reply);
+#endif
+ if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
+ goto out;
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = mpi_reply->Event;
+ ack_request->EventContext = mpi_reply->EventContext;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+
+ out:
+
+ /* scsih callback handler */
+ mpt3sas_scsih_event_callback(ioc, msix_index, reply);
+
+ /* ctl callback handler */
+ mpt3sas_ctl_event_callback(ioc, msix_index, reply);
+
+ return 1;
+}
+
+/**
+ * _base_get_cb_idx - obtain the callback index
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return callback index.
+ */
+static u8
+_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ int i;
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
+ i = smid - 1;
+ cb_idx = ioc->scsi_lookup[i].cb_idx;
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
+ return cb_idx;
+}
+
+/**
+ * _base_mask_interrupts - disable interrupts
+ * @ioc: per adapter object
+ *
+ * Disabling ResetIRQ, Reply and Doorbell Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ ioc->mask_interrupts = 1;
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ readl(&ioc->chip->HostInterruptMask);
+}
+
+/**
+ * _base_unmask_interrupts - enable interrupts
+ * @ioc: per adapter object
+ *
+ * Enabling only Reply Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register &= ~MPI2_HIM_RIM;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ ioc->mask_interrupts = 0;
+}
+
+union reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * @r: pt_regs pointer (not used)
+ *
+ * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ union reply_descriptor rd;
+ u32 completed_cmds;
+ u8 request_desript_type;
+ u16 smid;
+ u8 cb_idx;
+ u32 reply;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+ Mpi2ReplyDescriptorsUnion_t *rpf;
+ u8 rc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
+ request_desript_type = rpf->Default.ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ completed_cmds = 0;
+ cb_idx = 0xFF;
+ do {
+ rd.word = le64_to_cpu(rpf->Words);
+ if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
+ goto out;
+ reply = 0;
+ smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+ if (request_desript_type ==
+ MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, 0);
+ if (rc)
+ mpt3sas_base_free_smid(ioc, smid);
+ }
+ } else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
+ reply = le32_to_cpu(
+ rpf->AddressReply.ReplyFrameAddress);
+ if (reply > ioc->reply_dma_max_address ||
+ reply < ioc->reply_dma_min_address)
+ reply = 0;
+ if (smid) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
+ if (reply)
+ _base_display_reply_info(ioc,
+ smid, msix_index, reply);
+ if (rc)
+ mpt3sas_base_free_smid(ioc,
+ smid);
+ }
+ } else {
+ _base_async_event(ioc, msix_index, reply);
+ }
+
+ /* reply free queue handling */
+ if (reply) {
+ ioc->reply_free_host_index =
+ (ioc->reply_free_host_index ==
+ (ioc->reply_free_queue_depth - 1)) ?
+ 0 : ioc->reply_free_host_index + 1;
+ ioc->reply_free[ioc->reply_free_host_index] =
+ cpu_to_le32(reply);
+ wmb();
+ writel(ioc->reply_free_host_index,
+ &ioc->chip->ReplyFreeHostIndex);
+ }
+ }
+
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
+ (ioc->reply_post_queue_depth - 1)) ? 0 :
+ reply_q->reply_post_host_index + 1;
+ request_desript_type =
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ completed_cmds++;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ goto out;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
+ else
+ rpf++;
+ } while (1);
+
+ out:
+
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ wmb();
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+}
+
+/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
+ * mpt3sas_base_release_callback_handler - clear interrupt callback handler
+ * @cb_idx: callback index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_release_callback_handler(u8 cb_idx)
+{
+ mpt_callbacks[cb_idx] = NULL;
+}
+
+/**
+ * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
+ * @cb_func: callback function
+ *
+ * Returns cb_func.
+ */
+u8
+mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
+ if (mpt_callbacks[cb_idx] == NULL)
+ break;
+
+ mpt_callbacks[cb_idx] = cb_func;
+ return cb_idx;
+}
+
+/**
+ * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_initialize_callback_handler(void)
+{
+ u8 cb_idx;
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
+ mpt3sas_base_release_callback_handler(cb_idx);
+}
+
+
+/**
+ * _base_build_zero_len_sge - build zero length sg entry
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ ioc->base_add_sg_single(paddr, flags_length, -1);
+}
+
+/**
+ * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple32_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le32(dma_addr);
+}
+
+
+/**
+ * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple64_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_get_chain_buffer_tracker - obtain chain tracker
+ * @ioc: per adapter object
+ * @smid: smid associated to an IO request
+ *
+ * Returns chain tracker(from ioc->free_chain_list)
+ */
+static struct chain_tracker *
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct chain_tracker *chain_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "chain buffers not available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+ struct chain_tracker, tracker_list);
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->scsi_lookup[smid - 1].chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return chain_req;
+}
+
+
+/**
+ * _base_build_sg - build generic sg
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u32 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ }
+}
+
+/* IEEE format sgls */
+
+/**
+ * _base_add_sg_single_ieee - add sg element for IEEE format
+ * @paddr: virtual address for SGE
+ * @flags: SGE flags
+ * @chain_offset: number of 128 byte elements from start of segment
+ * @length: data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
+ dma_addr_t dma_addr)
+{
+ Mpi25IeeeSgeChain64_t *sgel = paddr;
+
+ sgel->Flags = flags;
+ sgel->NextChainOffset = chain_offset;
+ sgel->Length = cpu_to_le32(length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+ _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+}
+
+/**
+ * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
+ * @ioc: per adapter object
+ * @scmd: scsi command
+ * @smid: system request message index
+ * Context: none.
+ *
+ * The main routine that builds scatter gather table from a given
+ * scsi request sent via the .queuecommand main handler.
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_offset;
+ u32 chain_length;
+ u32 chain_flags;
+ int sges_left;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 chain_sgl_flags;
+ struct chain_tracker *chain_req;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /* init scatter gather flags */
+ simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (!sges_left) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "pci_map_sg failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+
+ sg_local = &mpi_request->SGL;
+ sges_in_segment = (ioc->request_sz -
+ offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
+ (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ /* initializing the chain flags and pointers */
+ chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ do {
+ sges_in_segment = (sges_left <=
+ ioc->max_sges_in_chain_message) ? sges_left :
+ ioc->max_sges_in_chain_message;
+ chain_offset = (sges_left == sges_in_segment) ?
+ 0 : sges_in_segment;
+ chain_length = sges_in_segment * ioc->sge_size_ieee;
+ if (chain_offset)
+ chain_length += ioc->sge_size_ieee;
+ _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
+ chain_offset, chain_length, chain_dma);
+
+ sg_local = chain;
+ if (!chain_offset)
+ goto fill_in_last_segment;
+
+ /* fill in chain segments */
+ while (sges_in_segment) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ } while (1);
+
+
+ fill_in_last_segment:
+
+ /* fill the last segment */
+ while (sges_left) {
+ if (sges_left == 1)
+ _base_add_sg_single_ieee(sg_local,
+ simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * _base_build_sg_ieee - build generic sg for IEEE format
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u8 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge_ieee(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size_ieee;
+
+ /* READ sgel last */
+ sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ }
+}
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/**
+ * _base_config_dma_addressing - set dma addressing
+ * @ioc: per adapter object
+ * @pdev: PCI device struct
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct sysinfo s;
+ char *desc = NULL;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask =
+ dma_get_required_mask(&pdev->dev);
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ desc = "64";
+ goto out;
+ }
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ desc = "32";
+ } else
+ return -ENODEV;
+
+ out:
+ si_meminfo(&s);
+ pr_info(MPT3SAS_FMT
+ "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, desc, convert_to_kb(s.totalram));
+
+ return 0;
+}
+
+/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
+ */
+static int
+_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ int base;
+ u16 message_control;
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
+ ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ if (ioc->msix_vector_count > 8)
+ ioc->msix_vector_count = 8;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "msix is supported, vector_count(%d)\n",
+ ioc->name, ioc->msix_vector_count));
+ return 0;
+}
+
+/**
+ * _base_free_irq - free irq
+ * @ioc: per adapter object
+ *
+ * Freeing respective reply_queue from the list.
+ */
+static void
+_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
+}
+
+/**
+ * _base_request_irq - request irq
+ * @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
+ *
+ * Inserting respective reply_queue into the list.
+ */
+static int
+_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
+{
+ struct adapter_reply_queue *reply_q;
+ int r;
+
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT3SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT3SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
+
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+ int cpu_id;
+ int cpu_grouping, loop, grouping, grouping_mod;
+ int reply_queue;
+
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+
+ /* NUMA Hardware bug workaround - drop to less reply queues */
+ if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
+ ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
+ reply_queue = 0;
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->msix_index = reply_queue;
+ if (++reply_queue == ioc->reply_queue_count)
+ reply_queue = 0;
+ }
+ }
+
+ /* when there are more cpus than available msix vectors,
+ * then group cpus togeather on same irq
+ */
+ if (ioc->cpu_count > ioc->msix_vector_count) {
+ grouping = ioc->cpu_count / ioc->msix_vector_count;
+ grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
+ if (grouping < 2 || (grouping == 2 && !grouping_mod))
+ cpu_grouping = 2;
+ else if (grouping < 4 || (grouping == 4 && !grouping_mod))
+ cpu_grouping = 4;
+ else if (grouping < 8 || (grouping == 8 && !grouping_mod))
+ cpu_grouping = 8;
+ else
+ cpu_grouping = 16;
+ } else
+ cpu_grouping = 0;
+
+ loop = 0;
+ reply_q = list_entry(ioc->reply_queue_list.next,
+ struct adapter_reply_queue, list);
+ for_each_online_cpu(cpu_id) {
+ if (!cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ } else {
+ if (loop < cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop++;
+ } else {
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop = 1;
+ }
+ }
+ }
+}
+
+/**
+ * _base_disable_msix - disables msix
+ * @ioc: per adapter object
+ *
+ */
+static void
+_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (!ioc->msix_enable)
+ return;
+ pci_disable_msix(ioc->pdev);
+ ioc->msix_enable = 0;
+}
+
+/**
+ * _base_enable_msix - enables msix, failback to io_apic
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct msix_entry *entries, *a;
+ int r;
+ int i;
+ u8 try_msix = 0;
+
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
+ if (msix_disable == -1 || msix_disable == 0)
+ try_msix = 1;
+
+ if (!try_msix)
+ goto try_ioapic;
+
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "kcalloc failed @ at %s:%d/%s() !!!\n",
+ ioc->name, __FILE__, __LINE__, __func__));
+ goto try_ioapic;
+ }
+
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+ if (r) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "pci_enable_msix failed (r=%d) !!!\n",
+ ioc->name, r));
+ kfree(entries);
+ goto try_ioapic;
+ }
+
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
+ }
+
+ kfree(entries);
+ return 0;
+
+/* failback to io_apic interrupt routing */
+ try_ioapic:
+
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+ u32 memap_sz;
+ u32 pio_sz;
+ int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
+ ioc->name, __func__));
+
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+
+ if (pci_request_selected_regions(pdev, ioc->bars,
+ MPT3SAS_DRIVER_NAME)) {
+ pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
+ ioc->name);
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+/* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+
+ if (_base_config_dma_addressing(ioc, pdev) != 0) {
+ pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pio_sz)
+ continue;
+ pio_chip = (u64)pci_resource_start(pdev, i);
+ pio_sz = pci_resource_len(pdev, i);
+ } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ if (memap_sz)
+ continue;
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
+ ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+ }
+ }
+
+ _base_mask_interrupts(ioc);
+ r = _base_enable_msix(ioc);
+ if (r)
+ goto out_fail;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
+ pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
+
+ /* Save PCI configuration state for recovery from PCI AER/EEH errors */
+ pci_save_state(pdev);
+ return 0;
+
+ out_fail:
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return r;
+}
+
+/**
+ * mpt3sas_base_get_msg_frame - obtain request mf pointer
+ * @ioc: per adapter object
+ * @smid: system request message index(smid zero is invalid)
+ *
+ * Returns virt pointer to message frame.
+ */
+void *
+mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->request + (smid * ioc->request_sz));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to sense buffer.
+ */
+void *
+mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the low 32bit address of the sense buffer.
+ */
+__le32
+mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
+ SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
+ * @ioc: per adapter object
+ * @phys_addr: lower 32 physical addr of the reply
+ *
+ * Converts 32bit lower physical addr into a virt address.
+ */
+void *
+mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+}
+
+/**
+ * mpt3sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->internal_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->internal_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ * @scmd: pointer to scsi command object
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct scsiio_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->free_list.next,
+ struct scsiio_tracker, tracker_list);
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->hpr_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return 0;
+ }
+
+ request = list_entry(ioc->hpr_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_free_smid - put smid back on free_list
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ int i;
+ struct chain_tracker *chain_req, *next;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
+ }
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
+ }
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * _base_writeq - 64 bit write to MMIO
+ * @ioc: per adapter object
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
+ * care of 32 bit environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ writeq(cpu_to_le64(b), addr);
+}
+#else
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+#endif
+
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
+/**
+ * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.SCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+
+
+/**
+ * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ char desc[16];
+ u32 iounit_pg1_flags;
+ u32 bios_version;
+
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
+ "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc->name, desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
+
+ pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
+ pr_info("Initiator");
+ i++;
+ }
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
+ pr_info("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ i = 0;
+ pr_info("), ");
+ pr_info("Capabilities=(");
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
+ pr_info("Raid");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
+ pr_info("%sTLR", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
+ pr_info("%sMulticast", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
+ pr_info("%sBIDI Target", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
+ pr_info("%sEEDP", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
+ pr_info("%sSnapshot Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
+ pr_info("%sDiag Trace Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+ pr_info("%sDiag Extended Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
+ pr_info("%sTask Set Full", i ? "," : "");
+ i++;
+ }
+
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
+ pr_info("%sNCQ", i ? "," : "");
+ i++;
+ }
+
+ pr_info(")\n");
+}
+
+/**
+ * mpt3sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+void
+mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+{
+ u16 dmd, dmd_new, dmd_orignal;
+ u8 io_missing_delay_original;
+ u16 sz;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 num_phys = 0;
+ u16 ioc_status;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys)
+ return;
+
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* device missing delay */
+ dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ dmd_orignal = dmd;
+ if (device_missing_delay > 0x7F) {
+ dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+ device_missing_delay;
+ dmd = dmd / 16;
+ dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+ } else
+ dmd = device_missing_delay;
+ sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+ /* io missing delay */
+ io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+ sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+ if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd_new = (dmd &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd_new =
+ dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
+ ioc->name, dmd_orignal, dmd_new);
+ pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
+ ioc->name, io_missing_delay_original,
+ io_missing_delay);
+ ioc->device_missing_delay = dmd_new;
+ ioc->io_missing_delay = io_missing_delay;
+ }
+
+out:
+ kfree(sas_iounit_pg1);
+}
+/**
+ * _base_static_config_pages - static start of day config pages
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ u32 iounit_pg1_flags;
+
+ mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+ if (ioc->ir_firmware)
+ mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+
+ /*
+ * Ensure correct T10 PI operation if vendor left EEDPTagMode
+ * flag unset in NVDATA.
+ */
+ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ if (ioc->manu_pg11.EEDPTagMode == 0) {
+ pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ ioc->name);
+ ioc->manu_pg11.EEDPTagMode &= ~0x3;
+ ioc->manu_pg11.EEDPTagMode |= 0x1;
+ mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ }
+
+ mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ _base_display_ioc_capabilities(ioc);
+
+ /*
+ * Enable task_set_full handling in iounit_pg1 when the
+ * facts capabilities indicate that its supported.
+ */
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
+ iounit_pg1_flags &=
+ ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ else
+ iounit_pg1_flags |=
+ MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
+ mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+}
+
+/**
+ * _base_release_memory_pools - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated from _base_allocate_memory_pools.
+ *
+ * Return nothing.
+ */
+static void
+_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->request) {
+ pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ ioc->request, ioc->request_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request_pool(0x%p): free\n",
+ ioc->name, ioc->request));
+ ioc->request = NULL;
+ }
+
+ if (ioc->sense) {
+ pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ if (ioc->sense_dma_pool)
+ pci_pool_destroy(ioc->sense_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense_pool(0x%p): free\n",
+ ioc->name, ioc->sense));
+ ioc->sense = NULL;
+ }
+
+ if (ioc->reply) {
+ pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+ if (ioc->reply_dma_pool)
+ pci_pool_destroy(ioc->reply_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_pool(0x%p): free\n",
+ ioc->name, ioc->reply));
+ ioc->reply = NULL;
+ }
+
+ if (ioc->reply_free) {
+ pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+ ioc->reply_free_dma);
+ if (ioc->reply_free_dma_pool)
+ pci_pool_destroy(ioc->reply_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_pool(0x%p): free\n",
+ ioc->name, ioc->reply_free));
+ ioc->reply_free = NULL;
+ }
+
+ if (ioc->reply_post_free) {
+ pci_pool_free(ioc->reply_post_free_dma_pool,
+ ioc->reply_post_free, ioc->reply_post_free_dma);
+ if (ioc->reply_post_free_dma_pool)
+ pci_pool_destroy(ioc->reply_post_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_pool(0x%p): free\n", ioc->name,
+ ioc->reply_post_free));
+ ioc->reply_post_free = NULL;
+ }
+
+ if (ioc->config_page) {
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config_page(0x%p): free\n", ioc->name,
+ ioc->config_page));
+ pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ ioc->config_page, ioc->config_page_dma);
+ }
+
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
+ kfree(ioc->hpr_lookup);
+ kfree(ioc->internal_lookup);
+ if (ioc->chain_lookup) {
+ for (i = 0; i < ioc->chain_depth; i++) {
+ if (ioc->chain_lookup[i].chain_buffer)
+ pci_pool_free(ioc->chain_dma_pool,
+ ioc->chain_lookup[i].chain_buffer,
+ ioc->chain_lookup[i].chain_buffer_dma);
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+}
+
+/**
+ * _base_allocate_memory_pools - allocate start of day memory pools
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ struct mpt3sas_facts *facts;
+ u16 max_sge_elements;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+ u16 max_request_credit;
+ unsigned short sg_tablesize;
+ u16 sge_size;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+
+ retry_sz = 0;
+ facts = &ioc->facts;
+
+ /* command line tunables for max sgl entries */
+ if (max_sgl_entries != -1)
+ sg_tablesize = max_sgl_entries;
+ else
+ sg_tablesize = MPT3SAS_SG_DEPTH;
+
+ if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
+ ioc->shost->sg_tablesize = sg_tablesize;
+
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+ ioc->internal_depth = ioc->hi_priority_depth + (5);
+ /* command line tunables for max controller queue depth */
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+
+ /* request frame size */
+ ioc->request_sz = facts->IOCRequestFrameSize * 4;
+
+ /* reply frame size */
+ ioc->reply_sz = facts->ReplyFrameSize * 4;
+
+ /* calculate the max scatter element size */
+ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
+
+ retry_allocation:
+ total_sz = 0;
+ /* calculate number of sg elements left over in the 1st frame */
+ max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
+ sizeof(Mpi2SGEIOUnion_t)) + sge_size);
+ ioc->max_sges_in_main_message = max_sge_elements/sge_size;
+
+ /* now do the same for a chain buffer */
+ max_sge_elements = ioc->request_sz - sge_size;
+ ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
+
+ /*
+ * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
+ */
+ chains_needed_per_io = ((ioc->shost->sg_tablesize -
+ ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
+ + 1;
+ if (chains_needed_per_io > facts->MaxChainDepth) {
+ chains_needed_per_io = facts->MaxChainDepth;
+ ioc->shost->sg_tablesize = min_t(u16,
+ ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
+ * chains_needed_per_io), ioc->shost->sg_tablesize);
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1 ;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+
+
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth =
+ facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16);
+ ioc->hba_queue_depth =
+ ((ioc->reply_post_queue_depth - 64) / 2) - 1;
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+ "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
+
+ ioc->scsiio_depth = ioc->hba_queue_depth -
+ ioc->hi_priority_depth - ioc->internal_depth;
+
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "scsi host: can_queue depth (%d)\n",
+ ioc->name, ioc->shost->can_queue));
+
+
+ /* contiguous pool for request and chains, 16 byte align, one extra "
+ * "frame for smid=0
+ */
+ ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
+ sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
+
+ /* hi-priority queue */
+ sz += (ioc->hi_priority_depth * ioc->request_sz);
+
+ /* internal queue */
+ sz += (ioc->internal_depth * ioc->request_sz);
+
+ ioc->request_dma_sz = sz;
+ ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ if (!ioc->request) {
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
+ goto out;
+ retry_sz += 64;
+ ioc->hba_queue_depth = max_request_credit - retry_sz;
+ goto retry_allocation;
+ }
+
+ if (retry_sz)
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+
+ /* hi-priority queue */
+ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+ ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+
+ /* internal queue */
+ ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
+ ioc->request_sz);
+ ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
+ ioc->request_sz);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz)/1024));
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
+ ioc->name, (unsigned long long) ioc->request_dma));
+ total_sz += sz;
+
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
+ if (!ioc->scsi_lookup) {
+ pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
+ ioc->name, (int)sz);
+ goto out;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
+ ioc->name, ioc->request, ioc->scsiio_depth));
+
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->chain_depth; i++) {
+ ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+ ioc->chain_dma_pool , GFP_KERNEL,
+ &ioc->chain_lookup[i].chain_buffer_dma);
+ if (!ioc->chain_lookup[i].chain_buffer) {
+ ioc->chain_depth = i;
+ goto chain_done;
+ }
+ total_sz += ioc->request_sz;
+ }
+ chain_done:
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->chain_depth, ioc->request_sz,
+ ((ioc->chain_depth * ioc->request_sz))/1024));
+
+ /* initialize hi-priority queue smid's */
+ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->hpr_lookup) {
+ pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->hi_priority_smid = ioc->scsiio_depth + 1;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
+
+ /* initialize internal queue smid's */
+ ioc->internal_lookup = kcalloc(ioc->internal_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->internal_lookup) {
+ pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
+
+ /* sense buffers, 4 byte align */
+ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->sense_dma_pool) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
+ "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->sense_dma));
+ total_sz += sz;
+
+ /* reply pool, 4 byte align */
+ sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->reply_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_dma));
+ total_sz += sz;
+
+ /* reply free queue, 16 byte align */
+ sz = ioc->reply_free_queue_depth * 4;
+ ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+ &ioc->reply_free_dma);
+ if (!ioc->reply_free) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
+ "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_dma (0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_free_dma));
+ total_sz += sz;
+
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ if (_base_is_controller_msix_enabled(ioc))
+ sz = reply_post_free_sz * ioc->reply_queue_count;
+ else
+ sz = reply_post_free_sz;
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
+ GFP_KERNEL, &ioc->reply_post_free_dma);
+ if (!ioc->reply_post_free) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
+ "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
+ sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->name, (unsigned long long)
+ ioc->reply_post_free_dma));
+ total_sz += sz;
+
+ ioc->config_page_sz = 512;
+ ioc->config_page = pci_alloc_consistent(ioc->pdev,
+ ioc->config_page_sz, &ioc->config_page_dma);
+ if (!ioc->config_page) {
+ pr_err(MPT3SAS_FMT
+ "config page: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config page(0x%p): size(%d)\n",
+ ioc->name, ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->config_page_dma));
+ total_sz += ioc->config_page_sz;
+
+ pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
+ ioc->name, total_sz/1024);
+ pr_info(MPT3SAS_FMT
+ "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->name, ioc->shost->can_queue, facts->RequestCredit);
+ pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
+ ioc->name, ioc->shost->sg_tablesize);
+ return 0;
+
+ out:
+ return -ENOMEM;
+}
+
+/**
+ * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ s = readl(&ioc->chip->Doorbell);
+ sc = s & MPI2_IOC_STATE_MASK;
+ return cooked ? sc : s;
+}
+
+/**
+ * _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
+ int sleep_flag)
+{
+ u32 count, cntdn;
+ u32 current_state;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ current_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (current_state == ioc_state)
+ return 0;
+ if (count && current_state == MPI2_IOC_STATE_FAULT)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ return current_state;
+}
+
+/**
+ * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
+ * a write to the doorbell)
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
+ */
+static int
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
+ * doorbell.
+ */
+static int
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc , doorbell);
+ return -EFAULT;
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 doorbell_reg;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ doorbell_reg = readl(&ioc->chip->Doorbell);
+ if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ ioc->name, __func__, count, doorbell_reg);
+ return -EFAULT;
+}
+
+/**
+ * _base_send_ioc_reset - send doorbell reset
+ * @ioc: per adapter object
+ * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int r = 0;
+
+ if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
+ pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
+ ioc->name, __func__);
+ return -EFAULT;
+ }
+
+ if (!(ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
+ return -EFAULT;
+
+ pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+
+ writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
+ &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ r = -EFAULT;
+ goto out;
+ }
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ pr_info(MPT3SAS_FMT "message unit reset: %s\n",
+ ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * _base_handshake_req_reply_wait - send request thru doorbell interface
+ * @ioc: per adapter object
+ * @request_bytes: request length
+ * @request: pointer having request payload
+ * @reply_bytes: reply length
+ * @reply: pointer to reply payload
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+{
+ MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
+ int i;
+ u8 failed;
+ u16 dummy;
+ __le32 *mfp;
+
+ /* make sure doorbell is not in use */
+ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ pr_err(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* clear pending doorbell interrupts from previous state changes */
+ if (readl(&ioc->chip->HostInterruptStatus) &
+ MPI2_HIS_IOC2SYS_DB_STATUS)
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ /* send message to ioc */
+ writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
+ ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
+ &ioc->chip->Doorbell);
+
+ if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake ack failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* send message 32-bits at a time */
+ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ failed = 1;
+ }
+
+ if (failed) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake sending request failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* now wait for the reply */
+ if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* read the first two 16-bits, it gives the total length of the reply */
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ for (i = 2; i < default_reply->MsgLength * 2; i++) {
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ if (i >= reply_bytes/2) /* overflow case */
+ dummy = readl(&ioc->chip->Doorbell);
+ else
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ }
+
+ _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
+ if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ mfp = (__le32 *)reply;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < reply_bytes/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SAS IO Unit Control Request message allows the host to perform low-level
+ * operations, such as resets on the PHYs of the IO Unit, also allows the host
+ * to obtain the IOC assigned device handles for a device if it has other
+ * identifying information about the device, in addition allows the host to
+ * remove IOC resources associated with the device.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
+ if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
+ ioc->ioc_link_reset_in_progress = 1;
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
+ ioc->ioc_link_reset_in_progress)
+ ioc->ioc_link_reset_in_progress = 0;
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SasIoUnitControlReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SCSI Enclosure Processor request message causes the IOC to
+ * communicate with SES devices to control LED status signals.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SepReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _base_get_port_facts - obtain port facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+{
+ Mpi2PortFactsRequest_t mpi_request;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt3sas_port_facts *pfacts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
+ mpi_request.PortNumber = port;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ pfacts = &ioc->pfacts[port];
+ memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+ pfacts->MaxPostedCmdBuffers =
+ le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
+
+ return 0;
+}
+
+/**
+ * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCFactsRequest_t mpi_request;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt3sas_facts *facts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ facts = &ioc->facts;
+ memset(facts, 0, sizeof(struct mpt3sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+ facts->VF_ID = mpi_reply.VF_ID;
+ facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
+ facts->MaxChainDepth = mpi_reply.MaxChainDepth;
+ facts->WhoInit = mpi_reply.WhoInit;
+ facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
+ facts->MaxReplyDescriptorPostQueueDepth =
+ le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
+ facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
+ facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
+ if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
+ ioc->ir_firmware = 1;
+ facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
+ facts->IOCRequestFrameSize =
+ le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
+ facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
+ ioc->shost->max_id = -1;
+ facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
+ facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
+ facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
+ facts->HighPriorityCredit =
+ le16_to_cpu(mpi_reply.HighPriorityCredit);
+ facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
+ facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hba queue depth(%d), max chains per io(%d)\n",
+ ioc->name, facts->RequestCredit,
+ facts->MaxChainDepth));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request frame size(%d), reply frame size(%d)\n", ioc->name,
+ facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ return 0;
+}
+
+/**
+ * _base_send_ioc_init - send ioc_init to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCInitRequest_t mpi_request;
+ Mpi2IOCInitReply_t mpi_reply;
+ int r;
+ struct timeval current_time;
+ u16 ioc_status;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
+ mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ mpi_request.VF_ID = 0; /* TODO */
+ mpi_request.VP_ID = 0;
+ mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
+ mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
+ mpi_request.ReplyDescriptorPostQueueDepth =
+ cpu_to_le16(ioc->reply_post_queue_depth);
+ mpi_request.ReplyFreeQueueDepth =
+ cpu_to_le16(ioc->reply_free_queue_depth);
+
+ mpi_request.SenseBufferAddressHigh =
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
+ mpi_request.SystemReplyAddressHigh =
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
+ mpi_request.SystemRequestFrameBaseAddress =
+ cpu_to_le64((u64)ioc->request_dma);
+ mpi_request.ReplyFreeQueueAddress =
+ cpu_to_le64((u64)ioc->reply_free_dma);
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post_free_dma);
+
+
+ /* This time stamp specifies number of milliseconds
+ * since epoch ~ midnight January 1, 1970.
+ */
+ do_gettimeofday(&current_time);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ __le32 *mfp;
+ int i;
+
+ mfp = (__le32 *)&mpi_request;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+
+ r = _base_handshake_req_reply_wait(ioc,
+ sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
+ sleep_flag);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
+ mpi_reply.IOCLogInfo) {
+ pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ r = -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mpt3sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+
+ if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
+ return 1;
+
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
+ ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt3sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_send_port_enable - send port_enable(discovery stuff) to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ Mpi2PortEnableReply_t *mpi_reply;
+ unsigned long timeleft;
+ int r = 0;
+ u16 smid;
+ u16 ioc_status;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ init_completion(&ioc->port_enable_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
+ 300*HZ);
+ if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2PortEnableRequest_t)/4);
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ goto out;
+ }
+
+ mpi_reply = ioc->port_enable_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
+ r = -EFAULT;
+ goto out;
+ }
+
+ out:
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * _base_unmask_events - turn on notification for this event
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The mask is stored in ioc->event_masks.
+ */
+static void
+_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u32 desired_event;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+
+ if (event < 32)
+ ioc->event_masks[0] &= ~desired_event;
+ else if (event < 64)
+ ioc->event_masks[1] &= ~desired_event;
+ else if (event < 96)
+ ioc->event_masks[2] &= ~desired_event;
+ else if (event < 128)
+ ioc->event_masks[3] &= ~desired_event;
+}
+
+/**
+ * _base_event_notification - send event notification
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2EventNotificationRequest_t *mpi_request;
+ unsigned long timeleft;
+ u16 smid;
+ int r = 0;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mpi_request->EventMasks[i] =
+ cpu_to_le32(ioc->event_masks[i]);
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2EventNotificationRequest_t)/4);
+ if (ioc->base_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ } else
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
+ ioc->name, __func__));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ return r;
+}
+
+/**
+ * mpt3sas_base_validate_event_type - validating event types
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * This will turn on firmware event notification when application
+ * ask for that event. We don't mask events that are already enabled.
+ */
+void
+mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
+{
+ int i, j;
+ u32 event_mask, desired_event;
+ u8 send_update_to_fw;
+
+ for (i = 0, send_update_to_fw = 0; i <
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
+ event_mask = ~event_type[i];
+ desired_event = 1;
+ for (j = 0; j < 32; j++) {
+ if (!(event_mask & desired_event) &&
+ (ioc->event_masks[i] & desired_event)) {
+ ioc->event_masks[i] &= ~desired_event;
+ send_update_to_fw = 1;
+ }
+ desired_event = (desired_event << 1);
+ }
+ }
+
+ if (!send_update_to_fw)
+ return;
+
+ mutex_lock(&ioc->base_cmds.mutex);
+ _base_event_notification(ioc, CAN_SLEEP);
+ mutex_unlock(&ioc->base_cmds.mutex);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
+ ioc->name));
+
+ count = 0;
+ do {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "write magic sequence\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ /* wait 100 msec */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+
+ if (count++ > 20)
+ goto out;
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ ioc->name, count, host_diagnostic));
+
+ } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+
+ hcb_size = readl(&ioc->chip->HCBSize);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
+ ioc->name));
+ writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
+ &ioc->chip->HostDiagnostic);
+
+ /* don't access any registers for 50 milliseconds */
+ msleep(50);
+
+ /* 300 second max wait */
+ for (count = 0; count < 3000000 ; count++) {
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic == 0xFFFFFFFF)
+ goto out;
+ if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
+ break;
+
+ /* wait 1 msec */
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ mdelay(1);
+ }
+
+ if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "restart the adapter assuming the HCB Address points to good F/W\n",
+ ioc->name));
+ host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
+ host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "re-enable the HCDW\n", ioc->name));
+ writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
+ &ioc->chip->HCBSize);
+ }
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
+ ioc->name));
+ writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
+ &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "disable writes to the diagnostic register\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "Wait for FW to go to the READY state\n", ioc->name));
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
+ sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ goto out;
+ }
+
+ pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+ out:
+ pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ return -EFAULT;
+}
+
+/**
+ * _base_make_ioc_ready - put controller in READY state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ u32 ioc_state;
+ int rc;
+ int count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ /* if in RESET state, it should move to READY state shortly */
+ count = 0;
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
+ while ((ioc_state & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_READY) {
+ if (count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ ssleep(1);
+ else
+ mdelay(1000);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ }
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n",
+ ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ if (type == FORCE_BIG_HAMMER)
+ goto issue_diag_reset;
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ if (!(_base_send_ioc_reset(ioc,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ return 0;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, CAN_SLEEP);
+ return rc;
+}
+
+/**
+ * _base_make_ioc_operational - put controller in OPERATIONAL state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ int r, i;
+ unsigned long flags;
+ u32 reply_address;
+ u16 smid;
+ struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* clean the delayed target reset list */
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_volume_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ /* initialize the scsi lookup free list */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ INIT_LIST_HEAD(&ioc->free_list);
+ smid = 1;
+ for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+ INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].smid = smid;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ }
+
+ /* hi-priority queue */
+ INIT_LIST_HEAD(&ioc->hpr_free_list);
+ smid = ioc->hi_priority_smid;
+ for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ ioc->hpr_lookup[i].smid = smid;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ }
+
+ /* internal queue */
+ INIT_LIST_HEAD(&ioc->internal_free_list);
+ smid = ioc->internal_smid;
+ for (i = 0; i < ioc->internal_depth; i++, smid++) {
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ ioc->internal_lookup[i].smid = smid;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+
+ /* chain pool */
+ INIT_LIST_HEAD(&ioc->free_chain_list);
+ for (i = 0; i < ioc->chain_depth; i++)
+ list_add_tail(&ioc->chain_lookup[i].tracker_list,
+ &ioc->free_chain_list);
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /* initialize Reply Free Queue */
+ for (i = 0, reply_address = (u32)ioc->reply_dma ;
+ i < ioc->reply_free_queue_depth ; i++, reply_address +=
+ ioc->reply_sz)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free = (long)ioc->reply_post_free;
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
+
+ r = _base_send_ioc_init(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ /* initialize reply free host index */
+ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
+ writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
+
+ _base_unmask_interrupts(ioc);
+ r = _base_event_notification(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ if (sleep_flag == CAN_SLEEP)
+ _base_static_config_pages(ioc);
+
+
+ if (ioc->is_driver_loading) {
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+
+ return r; /* scan_start and scan_finished support */
+ }
+
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_free_resources - free resources controller resources
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return;
+}
+
+/**
+ * mpt3sas_base_attach - attach controller instance
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+{
+ int r, i;
+ int cpu_id, last_cpu_id = 0;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "allocation for cpu_msix_table failed!!!\n",
+ ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ goto out_free_resources;
+
+
+ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ /*
+ * In SAS3.0,
+ * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
+ * Target Status - all require the IEEE formated scatter gather
+ * elements.
+ */
+
+ ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
+ ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
+ ioc->mpi25 = 1;
+ ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
+ /*
+ * These function pointers for other requests that don't
+ * the require IEEE scatter gather elements.
+ *
+ * For example Configuration Pages and SAS IOUNIT Control don't.
+ */
+ ioc->build_sg_mpi = &_base_build_sg;
+ ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
+
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+ sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
+ r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+ }
+
+ r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ init_waitqueue_head(&ioc->reset_wq);
+
+ /* allocate memory pd handle bitmask list */
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+
+ /* base internal command bits */
+ mutex_init(&ioc->base_cmds.mutex);
+ ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* transport internal command bits */
+ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->transport_cmds.mutex);
+
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
+ /* task management internal command bits */
+ ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->tm_cmds.mutex);
+
+ /* config page internal command bits */
+ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->config_cmds.mutex);
+
+ /* ctl module internal command bits */
+ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->ctl_cmds.mutex);
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
+ !ioc->ctl_cmds.sense) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ ioc->event_masks[i] = -1;
+
+ /* here we enable the events we care about */
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
+ _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+
+ r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ return 0;
+
+ out_free_resources:
+
+ ioc->remove_host = 1;
+
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->pfacts);
+ ioc->ctl_cmds.reply = NULL;
+ ioc->base_cmds.reply = NULL;
+ ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
+ ioc->transport_cmds.reply = NULL;
+ ioc->config_cmds.reply = NULL;
+ ioc->pfacts = NULL;
+ return r;
+}
+
+
+/**
+ * mpt3sas_base_detach - remove controller instance
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
+{
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+}
+
+/**
+ * _base_reset_handler - reset callback handler (for base)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+static void
+_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ mpt3sas_scsih_reset_handler(ioc, reset_phase);
+ mpt3sas_ctl_reset_handler(ioc, reset_phase);
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+ }
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ break;
+ }
+}
+
+/**
+ * _wait_for_commands_to_complete - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * This function waiting(3s) for all pending commands to complete
+ * prior to putting controller in reset.
+ */
+static void
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 ioc_state;
+ unsigned long flags;
+ u16 i;
+
+ ioc->pending_io_count = 0;
+ if (sleep_flag != CAN_SLEEP)
+ return;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
+ return;
+
+ /* pending command count */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->scsiio_depth; i++)
+ if (ioc->scsi_lookup[i].cb_idx != 0xFF)
+ ioc->pending_io_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!ioc->pending_io_count)
+ return;
+
+ /* wait for pending commands to complete */
+ wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
+}
+
+/**
+ * mpt3sas_base_hard_reset_handler - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ int r;
+ unsigned long flags;
+ u32 ioc_state;
+ u8 is_fault = 0, is_trigger = 0;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
+ ioc->name, __func__);
+ r = 0;
+ goto out_unlocked;
+ }
+
+ if (mpt3sas_fwfault_debug)
+ mpt3sas_halt_firmware(ioc);
+
+ /* TODO - What we really should be doing is pulling
+ * out all the code associated with NO_SLEEP; its never used.
+ * That is legacy code from mpt fusion driver, ported over.
+ * I will leave this BUG_ON here for now till its been resolved.
+ */
+ BUG_ON(sleep_flag == NO_SLEEP);
+
+ /* wait for an active reset in progress to complete */
+ if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
+ do {
+ ssleep(1);
+ } while (ioc->shost_recovery == 1);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return ioc->ioc_reset_in_progress_status;
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))) {
+ is_trigger = 1;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ is_fault = 1;
+ }
+ _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _wait_for_commands_to_complete(ioc, sleep_flag);
+ _base_mask_interrupts(ioc);
+ r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ if (r)
+ goto out;
+ _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
+ r = -EFAULT;
+ goto out;
+ }
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+ r = _base_make_ioc_operational(ioc, sleep_flag);
+ if (!r)
+ _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+
+ out:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
+ ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_in_progress_status = r;
+ ioc->shost_recovery = 0;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_count++;
+ mutex_unlock(&ioc->reset_in_progress_mutex);
+
+ out_unlocked:
+ if ((r == 0) && is_trigger) {
+ if (is_fault)
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
+ else
+ mpt3sas_trigger_master(ioc,
+ MASTER_TRIGGER_ADAPTER_RESET);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return r;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
new file mode 100644
index 000000000000..994656cbfac9
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -0,0 +1,1139 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_BASE_H_INCLUDED
+#define MPT3SAS_BASE_H_INCLUDED
+
+#include "mpi/mpi2_type.h"
+#include "mpi/mpi2.h"
+#include "mpi/mpi2_ioc.h"
+#include "mpi/mpi2_cnfg.h"
+#include "mpi/mpi2_init.h"
+#include "mpi/mpi2_raid.h"
+#include "mpi/mpi2_tool.h"
+#include "mpi/mpi2_sas.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "mpt3sas_debug.h"
+#include "mpt3sas_trigger_diag.h"
+
+/* driver versioning info */
+#define MPT3SAS_DRIVER_NAME "mpt3sas"
+#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
+#define MPT3SAS_DRIVER_VERSION "01.100.01.00"
+#define MPT3SAS_MAJOR_VERSION 1
+#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_BUILD_VERSION 1
+#define MPT3SAS_RELEASE_VERSION 00
+
+/*
+ * Set MPT3SAS_SG_DEPTH value based on user input.
+ */
+#define MPT3SAS_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#define MPT3SAS_MIN_PHYS_SEGMENTS 16
+#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
+#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
+#else
+#define MPT3SAS_SG_DEPTH MPT3SAS_MAX_PHYS_SEGMENTS
+#endif
+
+
+/*
+ * Generic Defines
+ */
+#define MPT3SAS_SATA_QUEUE_DEPTH 32
+#define MPT3SAS_SAS_QUEUE_DEPTH 254
+#define MPT3SAS_RAID_QUEUE_DEPTH 128
+
+#define MPT_NAME_LENGTH 32 /* generic length of strings */
+#define MPT_STRING_LENGTH 64
+
+#define MPT_MAX_CALLBACKS 32
+
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+
+#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
+
+#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+
+/*
+ * reset phases
+ */
+#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
+#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
+#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+
+/*
+ * logging format
+ */
+#define MPT3SAS_FMT "%s: "
+
+/*
+ * per target private data
+ */
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
+#define MPT_TARGET_FLAGS_VOLUME 0x02
+#define MPT_TARGET_FLAGS_DELETED 0x04
+#define MPT_TARGET_FASTPATH_IO 0x08
+
+
+
+/*
+ * status bits for ioc->diag_buffer_status
+ */
+#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
+#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
+#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID (0x00000000)
+#define MFG10_OEM_ID_DELL (0x00000001)
+#define MFG10_OEM_ID_FSC (0x00000002)
+#define MFG10_OEM_ID_SUN (0x00000003)
+#define MFG10_OEM_ID_IBM (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
+#define MFG10_GF0_R10_DISPLAY (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+struct Mpi2ManufacturingPage10_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 OEMIdentifier; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 GenericFlags0; /* 10h */
+ U32 GenericFlags1; /* 14h */
+ U32 Reserved4; /* 18h */
+ U32 OEMSpecificFlags0; /* 1Ch */
+ U32 OEMSpecificFlags1; /* 20h */
+ U32 Reserved5[18]; /* 24h - 60h*/
+};
+
+
+/* Miscellaneous options */
+struct Mpi2ManufacturingPage11_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ __le32 Reserved1; /* 04h */
+ u8 Reserved2; /* 08h */
+ u8 EEDPTagMode; /* 09h */
+ u8 Reserved3; /* 0Ah */
+ u8 Reserved4; /* 0Bh */
+ __le32 Reserved5[23]; /* 0Ch-60h*/
+};
+
+/**
+ * struct MPT3SAS_TARGET - starget private hostdata
+ * @starget: starget object
+ * @sas_address: target sas address
+ * @handle: device handle
+ * @num_luns: number luns
+ * @flags: MPT_TARGET_FLAGS_XXX flags
+ * @deleted: target flaged for deletion
+ * @tm_busy: target is busy with TM request.
+ */
+struct MPT3SAS_TARGET {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 handle;
+ int num_luns;
+ u32 flags;
+ u8 deleted;
+ u8 tm_busy;
+};
+
+
+/*
+ * per device private data
+ */
+#define MPT_DEVICE_FLAGS_INIT 0x01
+#define MPT_DEVICE_TLR_ON 0x02
+
+/**
+ * struct MPT3SAS_DEVICE - sdev private hostdata
+ * @sas_target: starget private hostdata
+ * @lun: lun number
+ * @flags: MPT_DEVICE_XXX flags
+ * @configured_lun: lun is configured
+ * @block: device is in SDEV_BLOCK state
+ * @tlr_snoop_check: flag used in determining whether to disable TLR
+ * @eedp_enable: eedp support enable bit
+ * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+ * @eedp_block_length: block size
+ */
+struct MPT3SAS_DEVICE {
+ struct MPT3SAS_TARGET *sas_target;
+ unsigned int lun;
+ u32 flags;
+ u8 configured_lun;
+ u8 block;
+ u8 tlr_snoop_check;
+};
+
+#define MPT3_CMD_NOT_USED 0x8000 /* free */
+#define MPT3_CMD_COMPLETE 0x0001 /* completed */
+#define MPT3_CMD_PENDING 0x0002 /* pending */
+#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
+#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+
+/**
+ * struct _internal_cmd - internal commands struct
+ * @mutex: mutex
+ * @done: completion
+ * @reply: reply message pointer
+ * @sense: sense data
+ * @status: MPT3_CMD_XXX status
+ * @smid: system message id
+ */
+struct _internal_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ void *sense;
+ u16 status;
+ u16 smid;
+};
+
+
+
+/**
+ * struct _sas_device - attached device information
+ * @list: sas device list
+ * @starget: starget object
+ * @sas_address: device sas address
+ * @device_name: retrieved from the SAS IDENTIFY frame.
+ * @handle: device handle
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @volume_handle: volume handle (valid when hidden raid member)
+ * @volume_wwid: volume unique identifier
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: number number
+ * @phy: phy identifier provided in sas device page 0
+ * @fast_path: fast path feature enable bit
+ * @responding: used in _scsih_sas_device_mark_responding
+ */
+struct _sas_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 sas_address;
+ u64 device_name;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u16 volume_handle;
+ u64 volume_wwid;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 phy;
+ u8 responding;
+ u8 fast_path;
+};
+
+/**
+ * struct _raid_device - raid volume link list
+ * @list: sas device list
+ * @starget: starget object
+ * @sdev: scsi device struct (volumes are single lun)
+ * @wwid: unique identifier for the volume
+ * @handle: device handle
+ * @id: target id
+ * @channel: target channel
+ * @volume_type: the raid level
+ * @device_info: bitfield provides detailed info about the hidden components
+ * @num_pds: number of hidden raid components
+ * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
+ */
+#define MPT_MAX_WARPDRIVE_PDS 8
+struct _raid_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u16 handle;
+ int id;
+ int channel;
+ u8 volume_type;
+ u8 num_pds;
+ u8 responding;
+ u8 percent_complete;
+ u32 device_info;
+};
+
+/**
+ * struct _boot_device - boot device info
+ * @is_raid: flag to indicate whether this is volume
+ * @device: holds pointer for either struct _sas_device or
+ * struct _raid_device
+ */
+struct _boot_device {
+ u8 is_raid;
+ void *device;
+};
+
+/**
+ * struct _sas_port - wide/narrow sas port information
+ * @port_list: list of ports belonging to expander
+ * @num_phys: number of phys belonging to this port
+ * @remote_identify: attached device identification
+ * @rphy: sas transport rphy object
+ * @port: sas transport wide/narrow port object
+ * @phy_list: _sas_phy list objects belonging to this port
+ */
+struct _sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct _sas_phy - phy information
+ * @port_siblings: list of phys belonging to a port
+ * @identify: phy identification
+ * @remote_identify: attached device identification
+ * @phy: sas transport phy object
+ * @phy_id: unique phy id
+ * @handle: device handle for this phy
+ * @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
+ */
+struct _sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+};
+
+/**
+ * struct _sas_node - sas_host/expander information
+ * @list: list of expanders
+ * @parent_dev: parent device class
+ * @num_phys: number phys belonging to this sas_host/expander
+ * @sas_address: sas address of this sas_host/expander
+ * @handle: handle for this sas_host/expander
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: handle for this a member of an enclosure
+ * @device_info: bitwise defining capabilities of this sas_host/expander
+ * @responding: used in _scsih_expander_device_mark_responding
+ * @phy: a list of phys that make up this sas_host/expander
+ * @sas_port_list: list of ports attached to this sas_host/expander
+ */
+struct _sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 responding;
+ struct _sas_phy *phy;
+ struct list_head sas_port_list;
+};
+
+/**
+ * enum reset_type - reset state
+ * @FORCE_BIG_HAMMER: issue diagnostic reset
+ * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
+ */
+enum reset_type {
+ FORCE_BIG_HAMMER,
+ SOFT_RESET,
+};
+
+/**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+ void *chain_buffer;
+ dma_addr_t chain_buffer_dma;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct scsiio_tracker - scsi mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct scsiio_tracker {
+ u16 smid;
+ struct scsi_cmnd *scmd;
+ u8 cb_idx;
+ struct list_head chain_list;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct request_tracker - firmware request tracker
+ * @smid: system message id
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct _tr_list - target reset list
+ * @handle: device handle
+ * @state: state machine
+ */
+struct _tr_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT3SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ struct list_head list;
+};
+
+typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+
+/* SAS3.0 support */
+typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid);
+typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz,
+ dma_addr_t data_in_dma, size_t data_in_sz);
+typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
+ void *paddr);
+
+
+
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi3_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt3sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi3_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt3sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
+typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
+/**
+ * struct MPT3SAS_ADAPTER - per adapter struct
+ * @list: ioc_list
+ * @shost: shost object
+ * @id: unique adapter id
+ * @cpu_count: number online cpus
+ * @name: generic ioc string
+ * @tmp_string: tmp string used for logging
+ * @pdev: pci pdev object
+ * @pio_chip: physical io register space
+ * @chip: memory mapped register space
+ * @chip_phys: physical addrss prior to mapping
+ * @logging_level: see mpt3sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
+ * @ir_firmware: IR firmware present
+ * @bars: bitmask of BAR's that must be configured
+ * @mask_interrupts: ignore interrupt
+ * @fault_reset_work_q_name: fw fault work queue
+ * @fault_reset_work_q: ""
+ * @fault_reset_work: ""
+ * @firmware_event_name: fw event work queue
+ * @firmware_event_thread: ""
+ * @fw_event_lock:
+ * @fw_event_list: list of fw events
+ * @aen_event_read_flag: event log was read
+ * @broadcast_aen_busy: broadcast aen waiting to be serviced
+ * @shost_recovery: host reset in progress
+ * @ioc_reset_in_progress_lock:
+ * @ioc_link_reset_in_progress: phy/hard reset in progress
+ * @ignore_loginfos: ignore loginfos during task management
+ * @remove_host: flag for when driver unloads, to avoid sending dev resets
+ * @pci_error_recovery: flag to prevent ioc access until slot reset completes
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+ * @transport_cb_idx: transport internal commands
+ * @ctl_cb_idx: clt internal commands
+ * @base_cb_idx: base internal commands
+ * @config_cb_idx: base internal commands
+ * @tm_tr_cb_idx : device removal target reset handshake
+ * @tm_tr_volume_cb_idx : volume removal target reset
+ * @base_cmds:
+ * @transport_cmds:
+ * @scsih_cmds:
+ * @tm_cmds:
+ * @ctl_cmds:
+ * @config_cmds:
+ * @base_add_sg_single: handler for either 32/64 bit sgl's
+ * @event_type: bits indicating which events to log
+ * @event_context: unique id for each logged event
+ * @event_log: event log pointer
+ * @event_masks: events that are masked
+ * @facts: static facts data
+ * @pfacts: static port facts data
+ * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
+ * @manu_pg11: static manufacturing page 11
+ * @bios_pg2: static bios page 2
+ * @bios_pg3: static bios page 3
+ * @ioc_pg8: static ioc page 8
+ * @iounit_pg0: static iounit page 0
+ * @iounit_pg1: static iounit page 1
+ * @sas_hba: sas host object
+ * @sas_expander_list: expander object list
+ * @sas_node_lock:
+ * @sas_device_list: sas device object list
+ * @sas_device_init_list: sas device object list (used only at init time)
+ * @sas_device_lock:
+ * @io_missing_delay: time for IO completed by fw when PDR enabled
+ * @device_missing_delay: time for device missing by fw when PDR enabled
+ * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
+ * @pd_handles : bitmask for PD handles
+ * @pd_handles_sz : size of pd_handle bitmask
+ * @config_page_sz: config page size
+ * @config_page: reserve memory for config page payload
+ * @config_page_dma:
+ * @hba_queue_depth: hba request queue depth
+ * @sge_size: sg element size for either 32/64 bit
+ * @scsiio_depth: SCSI_IO queue depth
+ * @request_sz: per request frame size
+ * @request: pool of request frames
+ * @request_dma:
+ * @request_dma_sz:
+ * @scsi_lookup: firmware request tracker list
+ * @scsi_lookup_lock:
+ * @free_list: free list of request
+ * @pending_io_count:
+ * @reset_wq:
+ * @chain: pool of chains
+ * @chain_dma:
+ * @max_sges_in_main_message: number sg elements in main message
+ * @max_sges_in_chain_message: number sg elements per chain
+ * @chains_needed_per_io: max chains per io
+ * @chain_depth: total chains allocated
+ * @hi_priority_smid:
+ * @hi_priority:
+ * @hi_priority_dma:
+ * @hi_priority_depth:
+ * @hpr_lookup:
+ * @hpr_free_list:
+ * @internal_smid:
+ * @internal:
+ * @internal_dma:
+ * @internal_depth:
+ * @internal_lookup:
+ * @internal_free_list:
+ * @sense: pool of sense
+ * @sense_dma:
+ * @sense_dma_pool:
+ * @reply_depth: hba reply queue depth:
+ * @reply_sz: per reply frame size:
+ * @reply: pool of replys:
+ * @reply_dma:
+ * @reply_dma_pool:
+ * @reply_free_queue_depth: reply free depth
+ * @reply_free: pool for reply free queue (32 bit addr)
+ * @reply_free_dma:
+ * @reply_free_dma_pool:
+ * @reply_free_host_index: tail index in pool to insert free replys
+ * @reply_post_queue_depth: reply post queue depth
+ * @reply_post_free: pool for reply post (64bit descriptor)
+ * @reply_post_free_dma:
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @delayed_tr_list: target reset link list
+ * @delayed_tr_volume_list: volume target reset link list
+ */
+struct MPT3SAS_ADAPTER {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ char name[MPT_NAME_LENGTH];
+ char tmp_string[MPT_STRING_LENGTH];
+ struct pci_dev *pdev;
+ Mpi2SystemInterfaceRegs_t __iomem *chip;
+ resource_size_t chip_phys;
+ int logging_level;
+ int fwfault_debug;
+ u8 ir_firmware;
+ int bars;
+ u8 mask_interrupts;
+
+ /* fw fault handler */
+ char fault_reset_work_q_name[20];
+ struct workqueue_struct *fault_reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ /* fw event handler */
+ char firmware_event_name[20];
+ struct workqueue_struct *firmware_event_thread;
+ spinlock_t fw_event_lock;
+ struct list_head fw_event_list;
+
+ /* misc flags */
+ int aen_event_read_flag;
+ u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
+ u8 shost_recovery;
+
+ struct mutex reset_in_progress_mutex;
+ spinlock_t ioc_reset_in_progress_lock;
+ u8 ioc_link_reset_in_progress;
+ u8 ioc_reset_in_progress_status;
+
+ u8 ignore_loginfos;
+ u8 remove_host;
+ u8 pci_error_recovery;
+ u8 wait_for_discovery_to_complete;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+ u8 start_scan;
+ u16 start_scan_failed;
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+ u8 *cpu_msix_table;
+ u16 cpu_msix_table_sz;
+ u32 ioc_reset_count;
+ MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+
+ /* internal commands, callback index */
+ u8 scsi_io_cb_idx;
+ u8 tm_cb_idx;
+ u8 transport_cb_idx;
+ u8 scsih_cb_idx;
+ u8 ctl_cb_idx;
+ u8 base_cb_idx;
+ u8 port_enable_cb_idx;
+ u8 config_cb_idx;
+ u8 tm_tr_cb_idx;
+ u8 tm_tr_volume_cb_idx;
+ u8 tm_sas_control_cb_idx;
+ struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
+ struct _internal_cmd transport_cmds;
+ struct _internal_cmd scsih_cmds;
+ struct _internal_cmd tm_cmds;
+ struct _internal_cmd ctl_cmds;
+ struct _internal_cmd config_cmds;
+
+ MPT_ADD_SGE base_add_sg_single;
+
+ /* function ptr for either IEEE or MPI sg elements */
+ MPT_BUILD_SG_SCMD build_sg_scmd;
+ MPT_BUILD_SG build_sg;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
+ u8 mpi25;
+ u16 sge_size_ieee;
+
+ /* function ptr for MPI sg elements only */
+ MPT_BUILD_SG build_sg_mpi;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+
+ /* event log */
+ u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u32 event_context;
+ void *event_log;
+ u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ /* static config pages */
+ struct mpt3sas_facts facts;
+ struct mpt3sas_port_facts *pfacts;
+ Mpi2ManufacturingPage0_t manu_pg0;
+ struct Mpi2ManufacturingPage10_t manu_pg10;
+ struct Mpi2ManufacturingPage11_t manu_pg11;
+ Mpi2BiosPage2_t bios_pg2;
+ Mpi2BiosPage3_t bios_pg3;
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage0_t iounit_pg0;
+ Mpi2IOUnitPage1_t iounit_pg1;
+
+ struct _boot_device req_boot_device;
+ struct _boot_device req_alt_boot_device;
+ struct _boot_device current_boot_device;
+
+ /* sas hba, expander, and device list */
+ struct _sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head sas_device_list;
+ struct list_head sas_device_init_list;
+ spinlock_t sas_device_lock;
+ struct list_head raid_device_list;
+ spinlock_t raid_device_lock;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ int sas_id;
+
+ void *blocking_handles;
+ void *pd_handles;
+ u16 pd_handles_sz;
+
+ /* config page */
+ u16 config_page_sz;
+ void *config_page;
+ dma_addr_t config_page_dma;
+
+ /* scsiio request */
+ u16 hba_queue_depth;
+ u16 sge_size;
+ u16 scsiio_depth;
+ u16 request_sz;
+ u8 *request;
+ dma_addr_t request_dma;
+ u32 request_dma_sz;
+ struct scsiio_tracker *scsi_lookup;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
+ struct list_head free_list;
+ int pending_io_count;
+ wait_queue_head_t reset_wq;
+
+ /* chain */
+ struct chain_tracker *chain_lookup;
+ struct list_head free_chain_list;
+ struct dma_pool *chain_dma_pool;
+ ulong chain_pages;
+ u16 max_sges_in_main_message;
+ u16 max_sges_in_chain_message;
+ u16 chains_needed_per_io;
+ u32 chain_depth;
+
+ /* hi-priority queue */
+ u16 hi_priority_smid;
+ u8 *hi_priority;
+ dma_addr_t hi_priority_dma;
+ u16 hi_priority_depth;
+ struct request_tracker *hpr_lookup;
+ struct list_head hpr_free_list;
+
+ /* internal queue */
+ u16 internal_smid;
+ u8 *internal;
+ dma_addr_t internal_dma;
+ u16 internal_depth;
+ struct request_tracker *internal_lookup;
+ struct list_head internal_free_list;
+
+ /* sense */
+ u8 *sense;
+ dma_addr_t sense_dma;
+ struct dma_pool *sense_dma_pool;
+
+ /* reply */
+ u16 reply_sz;
+ u8 *reply;
+ dma_addr_t reply_dma;
+ u32 reply_dma_max_address;
+ u32 reply_dma_min_address;
+ struct dma_pool *reply_dma_pool;
+
+ /* reply free queue */
+ u16 reply_free_queue_depth;
+ __le32 *reply_free;
+ dma_addr_t reply_free_dma;
+ struct dma_pool *reply_free_dma_pool;
+ u32 reply_free_host_index;
+
+ /* reply post queue */
+ u16 reply_post_queue_depth;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+ struct dma_pool *reply_post_free_dma_pool;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
+
+ struct list_head delayed_tr_list;
+ struct list_head delayed_tr_volume_list;
+
+ /* diag buffer support */
+ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
+ dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
+ u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
+ u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 ring_buffer_offset;
+ u32 ring_buffer_sz;
+ spinlock_t diag_trigger_lock;
+ u8 diag_trigger_active;
+ struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+ struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+};
+
+typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+
+
+/* base shared API */
+extern struct list_head mpt3sas_ioc_list;
+void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc);
+
+int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type);
+
+void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
+void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+/* hi-priority queue */
+u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd);
+
+u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_initialize_callback_handler(void);
+u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+void mpt3sas_base_release_callback_handler(u8 cb_idx);
+
+u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
+void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
+ u32 phys_addr);
+
+u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
+
+void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request);
+int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
+
+void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc,
+ u32 *event_type);
+
+void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay);
+
+int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
+
+
+/* scsih shared API */
+u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address);
+
+struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
+ struct MPT3SAS_ADAPTER *ioc, u16 handle);
+struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_scsih_sas_device_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+
+void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
+
+/* config shared API */
+u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
+ u8 *num_phys);
+int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page);
+
+int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+
+int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage2_t *config_page);
+int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage0_t *config_page);
+int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage8_t *config_page);
+int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page,
+ u32 phy_number, u16 handle);
+int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
+int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
+int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle);
+int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds);
+int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz);
+int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 form, u32 form_specific);
+int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle);
+int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
+ u16 volume_handle, u64 *wwid);
+
+/* ctl shared API */
+extern struct device_attribute *mpt3sas_host_attrs[];
+extern struct device_attribute *mpt3sas_dev_attrs[];
+void mpt3sas_ctl_init(void);
+void mpt3sas_ctl_exit(void);
+u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
+ u8 msix_index, u32 reply);
+void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply);
+
+void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
+ u8 bits_to_regsiter);
+int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset);
+
+/* transport shared API */
+u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, u64 sas_address);
+void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent);
+int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
+int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev);
+void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+extern struct sas_function_template mpt3sas_transport_functions;
+extern struct scsi_transport_template *mpt3sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+/* trigger data externs */
+void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc,
+ u32 tigger_bitmask);
+void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier);
+void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key,
+ u8 asc, u8 ascq);
+void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
+ u32 loginfo);
+#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
new file mode 100644
index 000000000000..ce7e59b2fc08
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -0,0 +1,1650 @@
+/*
+ * This module provides common API for accessing firmware configuration pages
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "mpt3sas_base.h"
+
+/* local definitions */
+
+/* Timeout for config page request (in seconds) */
+#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15
+
+/* Common sgl flags for READING a config page. */
+#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
+
+/* Common sgl flags for WRITING a config page. */
+#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
+ << MPI2_SGE_FLAGS_SHIFT)
+
+/**
+ * struct config_request - obtain dma memory via routine
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
+ *
+ */
+struct config_request {
+ u16 sz;
+ void *page;
+ dma_addr_t page_dma;
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _config_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
+ case MPI2_CONFIG_PAGETYPE_IO_UNIT:
+ desc = "io_unit";
+ break;
+ case MPI2_CONFIG_PAGETYPE_IOC:
+ desc = "ioc";
+ break;
+ case MPI2_CONFIG_PAGETYPE_BIOS:
+ desc = "bios";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
+ desc = "raid_volume";
+ break;
+ case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
+ desc = "manufaucturing";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
+ desc = "physdisk";
+ break;
+ case MPI2_CONFIG_PAGETYPE_EXTENDED:
+ switch (mpi_request->ExtPageType) {
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
+ desc = "sas_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
+ desc = "sas_expander";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
+ desc = "sas_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
+ desc = "sas_phy";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_LOG:
+ desc = "log";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
+ desc = "enclosure";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
+ desc = "raid_config";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
+ desc = "driver_mappping";
+ break;
+ }
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT
+ "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ ioc->name, calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+#endif
+
+/**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ int r = 0;
+
+ if (mem->sz > ioc->config_page_sz) {
+ mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+ &mem->page_dma, GFP_KERNEL);
+ if (!mem->page) {
+ pr_err(MPT3SAS_FMT
+ "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ ioc->name, __func__, mem->sz);
+ r = -ENOMEM;
+ }
+ } else { /* use tmp buffer if less than 512 bytes */
+ mem->page = ioc->config_page;
+ mem->page_dma = ioc->config_page_dma;
+ }
+ return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ if (mem->sz > ioc->config_page_sz)
+ dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+ mem->page_dma);
+}
+
+/**
+ * mpt3sas_config_done - config page completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using _config_request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->config_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->config_cmds.smid != smid)
+ return 1;
+ ioc->config_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->config_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+#endif
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ return 1;
+}
+
+/**
+ * _config_request - main routine for sending config page requests
+ * @ioc: per adapter object
+ * @mpi_request: request message frame
+ * @mpi_reply: reply mf payload returned from firmware
+ * @timeout: timeout in seconds
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
+ *
+ * A generic API for config page requests to firmware.
+ *
+ * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling
+ * this API.
+ *
+ * The callback index is set inside `ioc->config_cb_idx.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+ void *config_page, u16 config_page_sz)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count, issue_host_reset = 0;
+ u16 wait_state_count;
+ struct config_request mem;
+ u32 ioc_status = UINT_MAX;
+
+ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+
+ retry_count = 0;
+ memset(&mem, 0, sizeof(struct config_request));
+
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ if (config_page) {
+ mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+ mpi_request->Header.PageType = mpi_reply->Header.PageType;
+ mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+ mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+ mpi_request->ExtPageType = mpi_reply->ExtPageType;
+ if (mpi_request->Header.PageLength)
+ mem.sz = mpi_request->Header.PageLength * 4;
+ else
+ mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+ r = _config_alloc_config_dma_memory(ioc, &mem);
+ if (r != 0)
+ goto out;
+ if (mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+ mem.page_dma);
+ memcpy(mem.page, config_page, min_t(u16, mem.sz,
+ config_page_sz));
+ } else {
+ memset(config_page, 0, config_page_sz);
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+ memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
+ }
+ }
+
+ retry_config:
+ if (retry_count) {
+ if (retry_count > 2) { /* attempt only 2 retries */
+ r = -EFAULT;
+ goto free_mem;
+ }
+ pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EFAULT;
+ goto free_mem;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EAGAIN;
+ goto free_mem;
+ }
+
+ r = 0;
+ memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ ioc->config_cmds.status = MPT3_CMD_PENDING;
+ config_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->config_cmds.smid = smid;
+ memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
+#endif
+ init_completion(&ioc->config_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
+ timeout*HZ);
+ if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt3sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+ MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto free_mem;
+ }
+
+ if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) {
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+ sizeof(Mpi2ConfigReply_t));
+
+ /* Reply Frame Sanity Checks to workaround FW issues */
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (mpi_reply->Header.PageType & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested PageType(0x%02x)" \
+ " Reply PageType(0x%02x)\n", \
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (mpi_reply->Header.PageType & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__, mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ }
+
+ if (retry_count)
+ pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
+ ioc->name, __func__, retry_count);
+
+ if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
+ config_page && mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) {
+ u8 *p = (u8 *)mem.page;
+
+ /* Config Page Sanity Checks to workaround FW issues */
+ if (p) {
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (p[3] & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested PageType(0x%02x)"
+ " Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (p[3] & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ (mpi_request->ExtPageType != p[6])) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
+ }
+ }
+ memcpy(config_page, mem.page, min_t(u16, mem.sz,
+ config_page_sz));
+ }
+
+ free_mem:
+ if (config_page)
+ _config_free_config_dma_memory(ioc, &mem);
+ out:
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->config_cmds.mutex);
+
+ if (issue_host_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 7;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 10;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg2 - obtain bios page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg3 - obtain bios page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_iounit_pg1 - set iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
+ * @ioc: per adapter object
+ * @num_phys: pointer returned with the number of phys
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ u16 ioc_status;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+ *num_phys = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2SasIOUnitPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_phys = config_page.NumPhys;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg0 - obtain expander page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg1 - obtain expander page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
+ u16 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
+ (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg0 - obtain phy page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg1 - obtain phy page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume
+ * @ioc: per adapter object
+ * @handle: volume handle
+ * @num_pds: returns pds count
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2RaidVolPage0_t config_page;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2RaidVolPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_pds = config_page.NumPhysDisks;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
+ * @form_specific: specific to the form
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | form_specific);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_handle - returns volume handle for give hidden
+ * raid components
+ * @ioc: per adapter object
+ * @pd_handle: phys disk handle
+ * @volume_handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle)
+{
+ Mpi2RaidConfigurationPage0_t *config_page = NULL;
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2ConfigReply_t mpi_reply;
+ int r, i, config_page_sz;
+ u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
+
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
+ mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+ config_page = kmalloc(config_page_sz, GFP_KERNEL);
+ if (!config_page) {
+ r = -1;
+ goto out;
+ }
+
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
+ }
+ config_num = config_page->ConfigNum;
+ }
+ out:
+ kfree(config_page);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle
+ * @ioc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
+ volume_handle))) {
+ *wwid = le64_to_cpu(raid_vol_pg1.WWID);
+ return 0;
+ } else
+ return -1;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
new file mode 100644
index 000000000000..8af944d7d13d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -0,0 +1,3297 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+#include "mpt3sas_ctl.h"
+
+
+static struct fasync_struct *async_queue;
+static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
+
+
+/**
+ * enum block_state - blocking state
+ * @NON_BLOCKING: non blocking
+ * @BLOCKING: blocking
+ *
+ * These states are for ioctls that need to wait for a response
+ * from firmware, so they probably require sleep.
+ */
+enum block_state {
+ NON_BLOCKING,
+ BLOCKING,
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _ctl_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->handle != handle)
+ continue;
+ r = sas_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _ctl_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "scsi_io, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ desc = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ desc = "ioc_init";
+ break;
+ case MPI2_FUNCTION_IOC_FACTS:
+ desc = "ioc_facts";
+ break;
+ case MPI2_FUNCTION_CONFIG:
+ {
+ Mpi2ConfigRequest_t *config_request =
+ (Mpi2ConfigRequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "config, type(0x%02x), ext_type(0x%02x), number(%d)",
+ (config_request->Header.PageType &
+ MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
+ config_request->Header.PageNumber);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_PORT_FACTS:
+ desc = "port_facts";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ desc = "port_enable";
+ break;
+ case MPI2_FUNCTION_EVENT_NOTIFICATION:
+ desc = "event_notification";
+ break;
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ desc = "fw_download";
+ break;
+ case MPI2_FUNCTION_FW_UPLOAD:
+ desc = "fw_upload";
+ break;
+ case MPI2_FUNCTION_RAID_ACTION:
+ desc = "raid_action";
+ break;
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "raid_pass, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ desc = "sas_iounit_cntl";
+ break;
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ desc = "sata_pass";
+ break;
+ case MPI2_FUNCTION_DIAG_BUFFER_POST:
+ desc = "diag_buffer_post";
+ break;
+ case MPI2_FUNCTION_DIAG_RELEASE:
+ desc = "diag_release";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ desc = "smp_passthrough";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
+ ioc->name, calling_function_name, desc, smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ Mpi2SCSIIOReply_t *scsi_reply =
+ (Mpi2SCSIIOReply_t *)mpi_reply;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _ctl_sas_device_find_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
+ pr_info(MPT3SAS_FMT
+ "\tscsi_state(0x%02x), scsi_status"
+ "(0x%02x)\n", ioc->name,
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
+ }
+}
+
+#endif
+
+/**
+ * mpt3sas_ctl_done - ctl module completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using ioc->ctl_cb_idx.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi2SCSIIOReply_t *scsiio_reply;
+ const void *sense_data;
+ u32 sz;
+
+ if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->ctl_cmds.smid != smid)
+ return 1;
+ ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
+ /* get sense data */
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_reply->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
+ if (scsiio_reply->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->SenseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
+ }
+ }
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
+#endif
+ ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->ctl_cmds.done);
+ return 1;
+}
+
+/**
+ * _ctl_check_event_type - determines when an event needs logging
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The bitmask in ioc->event_type[] indicates which events should be
+ * be saved in the driver event_log. This bitmask is set by application.
+ *
+ * Returns 1 when event should be captured, or zero means no match.
+ */
+static int
+_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u16 i;
+ u32 desired_event;
+
+ if (event >= 128 || !event || !ioc->event_log)
+ return 0;
+
+ desired_event = (1 << (event % 32));
+ if (!desired_event)
+ desired_event = 1;
+ i = event / 32;
+ return desired_event & ioc->event_type[i];
+}
+
+/**
+ * mpt3sas_ctl_add_to_event_log - add event
+ * @ioc: per adapter object
+ * @mpi_reply: reply message frame
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ struct MPT3_IOCTL_EVENTS *event_log;
+ u16 event;
+ int i;
+ u32 sz, event_data_sz;
+ u8 send_aen = 0;
+
+ if (!ioc->event_log)
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (_ctl_check_event_type(ioc, event)) {
+
+ /* insert entry into circular event_log */
+ i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
+ event_log = ioc->event_log;
+ event_log[i].event = event;
+ event_log[i].context = ioc->event_context++;
+
+ event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
+ sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
+ memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
+ memcpy(event_log[i].data, mpi_reply->EventData, sz);
+ send_aen = 1;
+ }
+
+ /* This aen_event_read_flag flag is set until the
+ * application has read the event log.
+ * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
+ (send_aen && !ioc->aen_event_read_flag)) {
+ ioc->aen_event_read_flag = 1;
+ wake_up_interruptible(&ctl_poll_wait);
+ if (async_queue)
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+}
+
+/**
+ * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ return 1;
+}
+
+/**
+ * _ctl_verify_adapter - validates ioc_number passed from application
+ * @ioc: per adapter object
+ * @iocpp: The ioc pointer is returned in this.
+ *
+ * Return (-1) means error, else ioc_number.
+ */
+static int
+_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->id != ioc_number)
+ continue;
+ *iocpp = ioc;
+ return ioc_number;
+ }
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ */
+void
+mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ int i;
+ u8 issue_reset;
+
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ }
+ break;
+ }
+}
+
+/**
+ * _ctl_fasync -
+ * @fd -
+ * @filep -
+ * @mode -
+ *
+ * Called when application request fasyn callback handler.
+ */
+static int
+_ctl_fasync(int fd, struct file *filep, int mode)
+{
+ return fasync_helper(fd, filep, mode, &async_queue);
+}
+
+/**
+ * _ctl_release -
+ * @inode -
+ * @filep -
+ *
+ * Called when application releases the fasyn callback handler.
+ */
+static int
+_ctl_release(struct inode *inode, struct file *filep)
+{
+ return fasync_helper(-1, filep, 0, &async_queue);
+}
+
+/**
+ * _ctl_poll -
+ * @file -
+ * @wait -
+ *
+ */
+static unsigned int
+_ctl_poll(struct file *filep, poll_table *wait)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ poll_wait(filep, &ctl_poll_wait, wait);
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->aen_event_read_flag)
+ return POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_set_task_mid - assign an active smid to tm request
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @tm_request - pointer to mf from user space
+ *
+ * Returns 0 when an smid if found, else fail.
+ * during failure, the reply frame is filled.
+ */
+static int
+_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
+ Mpi2SCSITaskManagementRequest_t *tm_request)
+{
+ u8 found = 0;
+ u16 i;
+ u16 handle;
+ struct scsi_cmnd *scmd;
+ struct MPT3SAS_DEVICE *priv_data;
+ unsigned long flags;
+ Mpi2SCSITaskManagementReply_t *tm_reply;
+ u32 sz;
+ u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
+
+ lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
+
+ handle = le16_to_cpu(tm_request->DevHandle);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = ioc->scsiio_depth; i && !found; i--) {
+ scmd = ioc->scsi_lookup[i - 1].scmd;
+ if (scmd == NULL || scmd->device == NULL ||
+ scmd->device->hostdata == NULL)
+ continue;
+ if (lun != scmd->device->lun)
+ continue;
+ priv_data = scmd->device->hostdata;
+ if (priv_data->sas_target == NULL)
+ continue;
+ if (priv_data->sas_target->handle != handle)
+ continue;
+ tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!found) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
+ tm_reply = ioc->ctl_cmds.reply;
+ tm_reply->DevHandle = tm_request->DevHandle;
+ tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_reply->TaskType = tm_request->TaskType;
+ tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
+ tm_reply->VP_ID = tm_request->VP_ID;
+ tm_reply->VF_ID = tm_request->VF_ID;
+ sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz))
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 1;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
+ return 0;
+}
+
+/**
+ * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @mf - pointer to mf in user space
+ */
+static long
+_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ void __user *mf)
+{
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ u32 ioc_state;
+ u16 ioc_status;
+ u16 smid;
+ unsigned long timeout, timeleft;
+ u8 issue_reset;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+ u16 wait_state_count;
+
+ issue_reset = 0;
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
+ if (!mpi_request) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed obtaining a memory for mpi_request\n",
+ ioc->name, __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Check for overflow and wraparound */
+ if (karg.data_sge_offset * 4 > ioc->request_sz ||
+ karg.data_sge_offset > (UINT_MAX / 4)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy in request message frame from user */
+ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memcpy(request, mpi_request, karg.data_sge_offset*4);
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = karg.data_out_size;
+ data_in_sz = karg.data_in_size;
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
+ ret = -EINVAL;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
+ &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ if (copy_from_user(data_out, karg.data_out_buf_ptr,
+ data_out_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -EFAULT;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
+ &data_in_dma);
+ if (!data_in) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = (void *)request + (karg.data_sge_offset*4);
+
+ /* send command to firmware */
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
+#endif
+
+ init_completion(&ioc->ctl_cmds.done);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsiio_request =
+ (Mpi2SCSIIORequest_t *)request;
+ scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)request;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
+ if (tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ {
+ Mpi2SmpPassthroughRequest_t *smp_request =
+ (Mpi2SmpPassthroughRequest_t *)mpi_request;
+ u8 *data;
+
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ if (smp_request->PassthroughFlags &
+ MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
+ data = (u8 *)&smp_request->SGL;
+ else {
+ if (unlikely(data_out == NULL)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ data = data_out;
+ }
+
+ if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ case MPI2_FUNCTION_FW_UPLOAD:
+ {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_TOOLBOX:
+ {
+ Mpi2ToolboxCleanRequest_t *toolbox_request =
+ (Mpi2ToolboxCleanRequest_t *)mpi_request;
+
+ if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ } else {
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ }
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ {
+ Mpi2SasIoUnitControlRequest_t *sasiounit_request =
+ (Mpi2SasIoUnitControlRequest_t *)mpi_request;
+
+ if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
+ || sasiounit_request->Operation ==
+ MPI2_SAS_OP_PHY_LINK_RESET) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ /* drop to default case for posting the request */
+ }
+ default:
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+
+ if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+ else
+ timeout = karg.timeout;
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ timeout*HZ);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
+ ioc->ioc_link_reset_in_progress) {
+ ioc->ioc_link_reset_in_progress = 0;
+ ioc->ignore_loginfos = 0;
+ }
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request, karg.data_sge_offset);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
+ (ioc->logging_level & MPT_DEBUG_TM)) {
+ Mpi2SCSITaskManagementReply_t *tm_reply =
+ (Mpi2SCSITaskManagementReply_t *)mpi_reply;
+
+ pr_info(MPT3SAS_FMT "TASK_MGMT: " \
+ "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
+ "TerminationCount(0x%08x)\n", ioc->name,
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
+ }
+#endif
+ /* copy out xdata to user */
+ if (data_in_sz) {
+ if (copy_to_user(karg.data_in_buf_ptr, data_in,
+ data_in_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out reply message frame to user */
+ if (karg.max_reply_bytes) {
+ sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out sense to user */
+ if (karg.max_sense_bytes && (mpi_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
+ pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_halt_firmware(ioc);
+ mpt3sas_scsih_issue_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
+ 0, TM_MUTEX_ON);
+ } else
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ }
+
+ out:
+
+ /* free memory associated with sg buffers */
+ if (data_in)
+ pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ data_out_dma);
+
+ kfree(mpi_request);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return ret;
+}
+
+/**
+ * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_iocinfo karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memset(&karg, 0 , sizeof(karg));
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->pfacts)
+ karg.port_number = ioc->pfacts[0].PortNumber;
+ karg.hw_rev = ioc->pdev->revision;
+ karg.pci_id = ioc->pdev->device;
+ karg.subsystem_device = ioc->pdev->subsystem_device;
+ karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
+ karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
+ karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
+ karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
+ karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
+ karg.firmware_version = ioc->facts.FWVersion.Word;
+ strcpy(karg.driver_version, MPT3SAS_DRIVER_NAME);
+ strcat(karg.driver_version, "-");
+ strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
+ karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventquery karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
+ memcpy(karg.event_types, ioc->event_type,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventenable karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memcpy(ioc->event_type, karg.event_types,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+ mpt3sas_base_validate_event_type(ioc, ioc->event_type);
+
+ if (ioc->event_log)
+ return 0;
+ /* initialize event_log */
+ ioc->event_context = 0;
+ ioc->aen_event_read_flag = 0;
+ ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
+ sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
+ if (!ioc->event_log) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventreport karg;
+ u32 number_bytes, max_events, max;
+ struct mpt3_ioctl_eventreport __user *uarg = arg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ number_bytes = karg.hdr.max_data_size -
+ sizeof(struct mpt3_ioctl_header);
+ max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
+ max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if (!max || !ioc->event_log)
+ return -ENODATA;
+
+ number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
+ if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ /* reset flag so SIGIO can restart */
+ ioc->aen_event_read_flag = 0;
+ return 0;
+}
+
+/**
+ * _ctl_do_reset - main handler for MPT3HARDRESET opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_diag_reset karg;
+ int retval;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_info(MPT3SAS_FMT "host reset: %s\n",
+ ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ return 0;
+}
+
+/**
+ * _ctl_btdh_search_sas_device - searching for sas device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->sas_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == sas_device->handle) {
+ btdh->bus = sas_device->channel;
+ btdh->id = sas_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == sas_device->channel && btdh->id ==
+ sas_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = sas_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_search_raid_device - searching for raid device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->raid_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == raid_device->handle) {
+ btdh->bus = raid_device->channel;
+ btdh->id = raid_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == raid_device->channel && btdh->id ==
+ raid_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = raid_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_btdh_mapping karg;
+ int rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = _ctl_btdh_search_sas_device(ioc, &karg);
+ if (!rc)
+ _ctl_btdh_search_raid_device(ioc, &karg);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_diag_capability - return diag buffer capability
+ * @ioc: per adapter object
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ *
+ * returns 1 when diag buffer support is enabled in firmware
+ */
+static u8
+_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
+{
+ u8 rc = 0;
+
+ switch (buffer_type) {
+ case MPI2_DIAG_BUF_TYPE_TRACE:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_EXTENDED:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ rc = 1;
+ }
+
+ return rc;
+}
+
+
+/**
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
+ *
+ */
+static long
+_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_diag_register *diag_register)
+{
+ int rc, i;
+ void *request_data = NULL;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz = 0;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ u8 buffer_type;
+ unsigned long timeleft;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ u8 issue_reset = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ buffer_type = diag_register->buffer_type;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ pr_err(MPT3SAS_FMT
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__,
+ buffer_type);
+ return -EINVAL;
+ }
+
+ if (diag_register->requested_buffer_size % 4) {
+ pr_err(MPT3SAS_FMT
+ "%s: the requested_buffer_size is not 4 byte aligned\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ request_data = ioc->diag_buffer[buffer_type];
+ request_data_sz = diag_register->requested_buffer_size;
+ ioc->unique_id[buffer_type] = diag_register->unique_id;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ memcpy(ioc->product_specific[buffer_type],
+ diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
+ ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
+
+ if (request_data) {
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
+ request_data = NULL;
+ }
+ }
+
+ if (request_data == NULL) {
+ ioc->diag_buffer_sz[buffer_type] = 0;
+ ioc->diag_buffer_dma[buffer_type] = 0;
+ request_data = pci_alloc_consistent(
+ ioc->pdev, request_data_sz, &request_data_dma);
+ if (request_data == NULL) {
+ pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
+ " for diag buffers, requested size(%d)\n",
+ ioc->name, __func__, request_data_sz);
+ mpt3sas_base_free_smid(ioc, smid);
+ return -ENOMEM;
+ }
+ ioc->diag_buffer[buffer_type] = request_data;
+ ioc->diag_buffer_sz[buffer_type] = request_data_sz;
+ ioc->diag_buffer_dma[buffer_type] = request_data_dma;
+ }
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = diag_register->buffer_type;
+ mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
+ mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
+ mpi_request->BufferLength = cpu_to_le32(request_data_sz);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ ioc->name, __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ if (rc && request_data)
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+ struct mpt3_diag_register diag_register;
+
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+
+ if (bits_to_register & 1) {
+ pr_info(MPT3SAS_FMT "registering trace buffer support\n",
+ ioc->name);
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 2) {
+ pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 4) {
+ pr_info(MPT3SAS_FMT "registering extended buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_register karg;
+ long rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ rc = _ctl_diag_register_2(ioc, &karg);
+ return rc;
+}
+
+/**
+ * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+static long
+_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_unregister karg;
+ void *request_data;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) has not been released\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ return 0;
+}
+
+/**
+ * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+static long
+_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_query karg;
+ void *request_data;
+ int i;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ karg.application_flags = 0;
+ buffer_type = karg.buffer_type;
+
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID);
+ else
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID |
+ MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ karg.product_specific[i] =
+ ioc->product_specific[buffer_type][i];
+
+ karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
+ karg.driver_added_buffer_size = 0;
+ karg.unique_id = ioc->unique_id[buffer_type];
+ karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
+
+ if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
+ pr_err(MPT3SAS_FMT
+ "%s: unable to write mpt3_diag_query data @ %p\n",
+ ioc->name, __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_send_diag_release - Diag Release Message
+ * @ioc: per adapter object
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset - specifies whether host reset is required.
+ *
+ */
+int
+mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset)
+{
+ Mpi2DiagReleaseRequest_t *mpi_request;
+ Mpi2DiagReleaseReply_t *mpi_reply;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ int rc;
+ unsigned long timeleft;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = 0;
+ *issue_reset = 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED)
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: skipping due to FAULT state\n", ioc->name,
+ __func__));
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ *issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ out:
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * _ctl_diag_release - request to send Diag Release Message to firmware
+ * @arg - user space buffer containing ioctl content
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+static long
+_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_release karg;
+ void *request_data;
+ int rc;
+ u8 buffer_type;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is already released\n",
+ ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ /* buffers were released by due to host reset */
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) was released due to host reset\n",
+ ioc->name, __func__, buffer_type);
+ return 0;
+ }
+
+ rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_read_buffer karg;
+ struct mpt3_diag_read_buffer __user *uarg = arg;
+ void *request_data, *diag_data;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ int rc, i;
+ u8 buffer_type;
+ unsigned long timeleft, request_size, copy_size;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_size = ioc->diag_buffer_sz[buffer_type];
+
+ if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
+ pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
+ "or bytes_to_read are not 4 byte aligned\n", ioc->name,
+ __func__);
+ return -EINVAL;
+ }
+
+ if (karg.starting_offset > request_size)
+ return -EINVAL;
+
+ diag_data = (void *)(request_data + karg.starting_offset);
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ ioc->name, __func__,
+ diag_data, karg.starting_offset, karg.bytes_to_read));
+
+ /* Truncate data on requests that are too large */
+ if ((diag_data + karg.bytes_to_read < diag_data) ||
+ (diag_data + karg.bytes_to_read > request_data + request_size))
+ copy_size = request_size - karg.starting_offset;
+ else
+ copy_size = karg.bytes_to_read;
+
+ if (copy_to_user((void __user *)uarg->diagnostic_data,
+ diag_data, copy_size)) {
+ pr_err(MPT3SAS_FMT
+ "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ ioc->name, __func__, diag_data);
+ return -EFAULT;
+ }
+
+ if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
+ return 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: Reregister buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is still registered\n",
+ ioc->name, __func__, buffer_type));
+ return 0;
+ }
+ /* Get a free request frame and save the message context.
+ */
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->BufferLength =
+ cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
+ mpi_request->BufferAddress =
+ cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt3_ioctl_command32)
+ *
+ * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt3_ioctl_command32 karg32;
+ struct mpt3_ioctl_command32 __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt3_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct mpt3_ioctl_header ioctl_header;
+ enum block_state state;
+ long ret = -EINVAL;
+
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt3_ioctl_header))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ return -ERESTARTSYS;
+
+
+ switch (cmd) {
+ case MPT3IOCINFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
+ ret = _ctl_getiocinfo(ioc, arg);
+ break;
+#ifdef CONFIG_COMPAT
+ case MPT3COMMAND32:
+#endif
+ case MPT3COMMAND:
+ {
+ struct mpt3_ioctl_command __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+ }
+ break;
+ }
+ case MPT3EVENTQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
+ ret = _ctl_eventquery(ioc, arg);
+ break;
+ case MPT3EVENTENABLE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
+ ret = _ctl_eventenable(ioc, arg);
+ break;
+ case MPT3EVENTREPORT:
+ ret = _ctl_eventreport(ioc, arg);
+ break;
+ case MPT3HARDRESET:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
+ ret = _ctl_do_reset(ioc, arg);
+ break;
+ case MPT3BTDHMAPPING:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
+ ret = _ctl_btdh_mapping(ioc, arg);
+ break;
+ case MPT3DIAGREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
+ ret = _ctl_diag_register(ioc, arg);
+ break;
+ case MPT3DIAGUNREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
+ ret = _ctl_diag_unregister(ioc, arg);
+ break;
+ case MPT3DIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
+ ret = _ctl_diag_query(ioc, arg);
+ break;
+ case MPT3DIAGRELEASE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
+ ret = _ctl_diag_release(ioc, arg);
+ break;
+ case MPT3DIAGREADBUFFER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
+ ret = _ctl_diag_read_buffer(ioc, arg);
+ break;
+ default:
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ break;
+ }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+ return ret;
+}
+
+/**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
+ * @file -
+ * @cmd -
+ * @arg -
+ *
+ * This routine handles 32 bit applications in 64bit os.
+ */
+static long
+_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
+ return ret;
+}
+#endif
+
+/* scsi host attributes */
+/**
+ * _ctl_version_fw_show - firmware version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+
+/**
+ * _ctl_version_bios_show - bios version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (version & 0xFF000000) >> 24,
+ (version & 0x00FF0000) >> 16,
+ (version & 0x0000FF00) >> 8,
+ version & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+
+/**
+ * _ctl_version_mpi_show - MPI (message passing interface) version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+
+/**
+ * _ctl_version_product_show - product name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
+}
+static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+
+/**
+ * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_persistent_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ _ctl_version_nvdata_persistent_show, NULL);
+
+/**
+ * _ctl_version_nvdata_default_show - nvdata default version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ _ctl_version_nvdata_default_show, NULL);
+
+/**
+ * _ctl_board_name_show - board name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+
+/**
+ * _ctl_board_assembly_show - board assembly name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+
+/**
+ * _ctl_board_tracer_show - board tracer number
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+
+/**
+ * _ctl_io_delay_show - io missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+
+/**
+ * _ctl_device_delay_show - device missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+
+/**
+ * _ctl_fw_queue_depth_show - global credits
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
+}
+static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+
+/**
+ * _ctl_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)ioc->sas_hba.sas_address);
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ _ctl_host_sas_address_show, NULL);
+
+/**
+ * _ctl_logging_level_show - logging level
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
+}
+static ssize_t
+_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->logging_level = val;
+ pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
+ ioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
+ _ctl_logging_level_store);
+
+/**
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt3sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->fwfault_debug = val;
+ pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
+ ioc->fwfault_debug);
+ return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+ _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
+/**
+ * _ctl_ioc_reset_count_show - ioc reset count
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
+}
+static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
+ NULL);
+
+struct DIAG_BUFFER_START {
+ __le32 Size;
+ __le32 DiagVersion;
+ u8 BufferType;
+ u8 Reserved[3];
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
+};
+
+/**
+ * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_trace_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 size = 0;
+ struct DIAG_BUFFER_START *request_data;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request_data = (struct DIAG_BUFFER_START *)
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
+ le32_to_cpu(request_data->Reserved3) == 0x4742444c)
+ size = le32_to_cpu(request_data->Size);
+
+ ioc->ring_buffer_sz = size;
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
+ _ctl_host_trace_buffer_size_show, NULL);
+
+/**
+ * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * You will only be able to read 4k bytes of ring buffer at a time.
+ * In order to read beyond 4k bytes, you will have to write out the
+ * offset to the same attribute, it will move the pointer.
+ */
+static ssize_t
+_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ void *request_data;
+ u32 size;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
+ return 0;
+
+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+ request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
+ memcpy(buf, request_data, size);
+ return size;
+}
+
+static ssize_t
+_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->ring_buffer_offset = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+
+
+/*****************************************/
+
+/**
+ * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * This is a mechnism to post/release host_trace_buffers
+ */
+static ssize_t
+_ctl_host_trace_buffer_enable_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+ else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ return snprintf(buf, PAGE_SIZE, "release\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "post\n");
+}
+
+static ssize_t
+_ctl_host_trace_buffer_enable_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ char str[10] = "";
+ struct mpt3_diag_register diag_register;
+ u8 issue_reset = 0;
+
+ /* don't allow post/release occurr while recovery is active */
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery || ioc->is_driver_loading)
+ return -EBUSY;
+
+ if (sscanf(buf, "%9s", str) != 1)
+ return -EINVAL;
+
+ if (!strcmp(str, "post")) {
+ /* exit out if host buffers are already posted */
+ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
+ goto out;
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+ pr_info(MPT3SAS_FMT "posting host trace buffers\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
+ _ctl_diag_register_2(ioc, &diag_register);
+ } else if (!strcmp(str, "release")) {
+ /* exit out if host buffers are already released */
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ goto out;
+ pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
+ ioc->name);
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ out:
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_enable_show,
+ _ctl_host_trace_buffer_enable_store);
+
+/*********** diagnostic trigger suppport *********************************/
+
+/**
+ * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
+ memcpy(buf, &ioc->diag_trigger_master, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+ memset(&ioc->diag_trigger_master, 0,
+ sizeof(struct SL_WH_MASTER_TRIGGER_T));
+ memcpy(&ioc->diag_trigger_master, buf, rc);
+ ioc->diag_trigger_master.MasterData |=
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+
+
+/**
+ * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_event, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_event, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_event, buf, sz);
+ if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_scsi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_scsi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_scsi, buf, sz);
+ if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_mpi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_mpi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_mpi, buf, sz);
+ if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+
+static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+
+/*********** diagnostic trigger suppport *** END ****************************/
+
+
+
+/*****************************************/
+
+struct device_attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_logging_level,
+ &dev_attr_fwfault_debug,
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ &dev_attr_ioc_reset_count,
+ &dev_attr_host_trace_buffer_size,
+ &dev_attr_host_trace_buffer,
+ &dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
+ &dev_attr_diag_trigger_master,
+ &dev_attr_diag_trigger_event,
+ &dev_attr_diag_trigger_scsi,
+ &dev_attr_diag_trigger_mpi,
+ NULL,
+};
+
+/* device attributes */
+
+/**
+ * _ctl_device_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the sas address for the target
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->sas_target->sas_address);
+}
+static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+
+/**
+ * _ctl_device_handle_show - device handle
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the firmware assigned device handle
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->sas_target->handle);
+}
+static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+
+struct device_attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ NULL,
+};
+
+static const struct file_operations ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = _ctl_ioctl,
+ .release = _ctl_release,
+ .poll = _ctl_poll,
+ .fasync = _ctl_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = _ctl_ioctl_compat,
+#endif
+};
+
+static struct miscdevice ctl_dev = {
+ .minor = MPT3SAS_MINOR,
+ .name = MPT3SAS_DEV_NAME,
+ .fops = &ctl_fops,
+};
+
+/**
+ * mpt3sas_ctl_init - main entry point for ctl.
+ *
+ */
+void
+mpt3sas_ctl_init(void)
+{
+ async_queue = NULL;
+ if (misc_register(&ctl_dev) < 0)
+ pr_err("%s can't register misc device [minor=%d]\n",
+ MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
+
+ init_waitqueue_head(&ctl_poll_wait);
+}
+
+/**
+ * mpt3sas_ctl_exit - exit point for ctl
+ *
+ */
+void
+mpt3sas_ctl_exit(void)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ int i;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
+
+ kfree(ioc->event_log);
+ }
+ misc_deregister(&ctl_dev);
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
new file mode 100644
index 000000000000..bd89f4f00550
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -0,0 +1,418 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_CTL_H_INCLUDED
+#define MPT3SAS_CTL_H_INCLUDED
+
+#ifdef __KERNEL__
+#include <linux/miscdevice.h>
+#endif
+
+
+#ifndef MPT3SAS_MINOR
+#define MPT3SAS_MINOR (MPT_MINOR + 2)
+#endif
+#define MPT3SAS_DEV_NAME "mpt3ctl"
+#define MPT3_MAGIC_NUMBER 'L'
+#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
+
+/**
+ * IOCTL opcodes
+ */
+#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \
+ struct mpt3_ioctl_iocinfo)
+#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command)
+#ifdef CONFIG_COMPAT
+#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command32)
+#endif
+#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \
+ struct mpt3_ioctl_eventquery)
+#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \
+ struct mpt3_ioctl_eventenable)
+#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \
+ struct mpt3_ioctl_eventreport)
+#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \
+ struct mpt3_ioctl_diag_reset)
+#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \
+ struct mpt3_ioctl_btdh_mapping)
+
+/* diag buffer support */
+#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \
+ struct mpt3_diag_register)
+#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \
+ struct mpt3_diag_release)
+#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \
+ struct mpt3_diag_unregister)
+#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \
+ struct mpt3_diag_query)
+#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
+ struct mpt3_diag_read_buffer)
+
+/**
+ * struct mpt3_ioctl_header - main header structure
+ * @ioc_number - IOC unit number
+ * @port_number - IOC port number
+ * @max_data_size - maximum number bytes to transfer on read
+ */
+struct mpt3_ioctl_header {
+ uint32_t ioc_number;
+ uint32_t port_number;
+ uint32_t max_data_size;
+};
+
+/**
+ * struct mpt3_ioctl_diag_reset - diagnostic reset
+ * @hdr - generic header
+ */
+struct mpt3_ioctl_diag_reset {
+ struct mpt3_ioctl_header hdr;
+};
+
+
+/**
+ * struct mpt3_ioctl_pci_info - pci device info
+ * @device - pci device id
+ * @function - pci function id
+ * @bus - pci bus id
+ * @segment_id - pci segment id
+ */
+struct mpt3_ioctl_pci_info {
+ union {
+ struct {
+ uint32_t device:5;
+ uint32_t function:3;
+ uint32_t bus:24;
+ } bits;
+ uint32_t word;
+ } u;
+ uint32_t segment_id;
+};
+
+
+#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT2_IOCTL_INTERFACE_FC (0x01)
+#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT2_IOCTL_INTERFACE_SAS (0x03)
+#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
+#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT2_IOCTL_VERSION_LENGTH (32)
+
+/**
+ * struct mpt3_ioctl_iocinfo - generic controller info
+ * @hdr - generic header
+ * @adapter_type - type of adapter (spi, fc, sas)
+ * @port_number - port number
+ * @pci_id - PCI Id
+ * @hw_rev - hardware revision
+ * @sub_system_device - PCI subsystem Device ID
+ * @sub_system_vendor - PCI subsystem Vendor ID
+ * @rsvd0 - reserved
+ * @firmware_version - firmware version
+ * @bios_version - BIOS version
+ * @driver_version - driver version - 32 ASCII characters
+ * @rsvd1 - reserved
+ * @scsi_id - scsi id of adapter 0
+ * @rsvd2 - reserved
+ * @pci_information - pci info (2nd revision)
+ */
+struct mpt3_ioctl_iocinfo {
+ struct mpt3_ioctl_header hdr;
+ uint32_t adapter_type;
+ uint32_t port_number;
+ uint32_t pci_id;
+ uint32_t hw_rev;
+ uint32_t subsystem_device;
+ uint32_t subsystem_vendor;
+ uint32_t rsvd0;
+ uint32_t firmware_version;
+ uint32_t bios_version;
+ uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
+ uint8_t rsvd1;
+ uint8_t scsi_id;
+ uint16_t rsvd2;
+ struct mpt3_ioctl_pci_info pci_information;
+};
+
+
+/* number of event log entries */
+#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
+
+/**
+ * struct mpt3_ioctl_eventquery - query event count and type
+ * @hdr - generic header
+ * @event_entries - number of events returned by get_event_report
+ * @rsvd - reserved
+ * @event_types - type of events currently being captured
+ */
+struct mpt3_ioctl_eventquery {
+ struct mpt3_ioctl_header hdr;
+ uint16_t event_entries;
+ uint16_t rsvd;
+ uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+/**
+ * struct mpt3_ioctl_eventenable - enable/disable event capturing
+ * @hdr - generic header
+ * @event_types - toggle off/on type of events to be captured
+ */
+struct mpt3_ioctl_eventenable {
+ struct mpt3_ioctl_header hdr;
+ uint32_t event_types[4];
+};
+
+#define MPT3_EVENT_DATA_SIZE (192)
+/**
+ * struct MPT3_IOCTL_EVENTS -
+ * @event - the event that was reported
+ * @context - unique value for each event assigned by driver
+ * @data - event data returned in fw reply message
+ */
+struct MPT3_IOCTL_EVENTS {
+ uint32_t event;
+ uint32_t context;
+ uint8_t data[MPT3_EVENT_DATA_SIZE];
+};
+
+/**
+ * struct mpt3_ioctl_eventreport - returing event log
+ * @hdr - generic header
+ * @event_data - (see struct MPT3_IOCTL_EVENTS)
+ */
+struct mpt3_ioctl_eventreport {
+ struct mpt3_ioctl_header hdr;
+ struct MPT3_IOCTL_EVENTS event_data[1];
+};
+
+/**
+ * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl
+ * @hdr - generic header
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @sense_data_ptr - sense data location
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @max_sense_bytes - maximum number of bytes for auto sense buffers
+ * @data_sge_offset - offset in words from the start of the request message to
+ * the first SGL
+ * @mf[1];
+ */
+struct mpt3_ioctl_command {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ void __user *reply_frame_buf_ptr;
+ void __user *data_in_buf_ptr;
+ void __user *data_out_buf_ptr;
+ void __user *sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+
+#ifdef CONFIG_COMPAT
+struct mpt3_ioctl_command32 {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ uint32_t reply_frame_buf_ptr;
+ uint32_t data_in_buf_ptr;
+ uint32_t data_out_buf_ptr;
+ uint32_t sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+#endif
+
+/**
+ * struct mpt3_ioctl_btdh_mapping - mapping info
+ * @hdr - generic header
+ * @id - target device identification number
+ * @bus - SCSI bus number that the target device exists on
+ * @handle - device handle for the target device
+ * @rsvd - reserved
+ *
+ * To obtain a bus/id the application sets
+ * handle to valid handle, and bus/id to 0xFFFF.
+ *
+ * To obtain the device handle the application sets
+ * bus/id valid value, and the handle to 0xFFFF.
+ */
+struct mpt3_ioctl_btdh_mapping {
+ struct mpt3_ioctl_header hdr;
+ uint32_t id;
+ uint32_t bus;
+ uint16_t handle;
+ uint16_t rsvd;
+};
+
+
+
+/* application flags for mpt3_diag_register, mpt3_diag_query */
+#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
+#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
+#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+
+/* flags for mpt3_diag_read_buffer */
+#define MPT3_FLAGS_REREGISTER (0x0001)
+
+#define MPT3_PRODUCT_SPECIFIC_DWORDS 23
+
+/**
+ * struct mpt3_diag_register - application register with driver
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @requested_buffer_size - buffers size in bytes
+ * @unique_id - tag specified by application that is used to signal ownership
+ * of the buffer.
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+struct mpt3_diag_register {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t requested_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_unregister - application unregister with driver
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be unregistered
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+struct mpt3_diag_unregister {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_query - query relevant info associated with diag buffers
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @total_buffer_size - diag buffer size in bytes
+ * @driver_added_buffer_size - size of extra space appended to end of buffer
+ * @unique_id - unique id associated with this buffer.
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+struct mpt3_diag_query {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t total_buffer_size;
+ uint32_t driver_added_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_release - request to send Diag Release Message to firmware
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be released
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+struct mpt3_diag_release {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_read_buffer - request for copy of the diag buffer
+ * @hdr - generic header
+ * @status -
+ * @reserved -
+ * @flags - misc flags
+ * @starting_offset - starting offset within drivers buffer where to start
+ * reading data at into the specified application buffer
+ * @bytes_to_read - number of bytes to copy from the drivers buffer into the
+ * application buffer starting at starting_offset.
+ * @unique_id - unique id associated with this buffer.
+ * @diagnostic_data - data payload
+ */
+struct mpt3_diag_read_buffer {
+ struct mpt3_ioctl_header hdr;
+ uint8_t status;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t starting_offset;
+ uint32_t bytes_to_read;
+ uint32_t unique_id;
+ uint32_t diagnostic_data[1];
+};
+
+#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
new file mode 100644
index 000000000000..35405e7044f8
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -0,0 +1,219 @@
+/*
+ * Logging Support for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_DEBUG_H_INCLUDED
+#define MPT3SAS_DEBUG_H_INCLUDED
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_REPLY 0x00000200
+#define MPT_DEBUG_HANDSHAKE 0x00000400
+#define MPT_DEBUG_CONFIG 0x00000800
+#define MPT_DEBUG_DL 0x00001000
+#define MPT_DEBUG_RESET 0x00002000
+#define MPT_DEBUG_SCSI 0x00004000
+#define MPT_DEBUG_IOCTL 0x00008000
+#define MPT_DEBUG_SAS 0x00020000
+#define MPT_DEBUG_TRANSPORT 0x00040000
+#define MPT_DEBUG_TASK_SET_FULL 0x00080000
+
+#define MPT_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * CONFIG_SCSI_MPT3SAS_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->logging_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define dewtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsastransport(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#define dtsfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
+
+#define dtransportprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
+
+#define dTriggerDiagPrintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG)
+
+
+
+/* inline functions for dumping debug data*/
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _debug_dump_mf - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_mf(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("mf:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_reply - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_reply(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("reply:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_config - print config page contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_config(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("config:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+#else
+#define _debug_dump_mf(mpi_request, sz)
+#define _debug_dump_reply(mpi_request, sz)
+#define _debug_dump_config(mpi_request, sz)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
new file mode 100644
index 000000000000..6421a06c4ce2
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -0,0 +1,8166 @@
+/*
+ * Scsi Host Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+#include <linux/raid_class.h>
+
+#include "mpt3sas_base.h"
+
+MODULE_AUTHOR(MPT3SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
+
+#define RAID_CHANNEL 1
+/* forward proto's */
+static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander);
+static void _firmware_event_work(struct work_struct *work);
+
+static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device);
+static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 retry_count, u8 is_pd);
+
+static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
+/* global parameters */
+LIST_HEAD(mpt3sas_ioc_list);
+
+/* local parameters */
+static u8 scsi_io_cb_idx = -1;
+static u8 tm_cb_idx = -1;
+static u8 ctl_cb_idx = -1;
+static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
+static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
+static u8 config_cb_idx = -1;
+static int mpt_ids;
+
+static u8 tm_tr_cb_idx = -1 ;
+static u8 tm_tr_volume_cb_idx = -1 ;
+static u8 tm_sas_control_cb_idx = -1;
+
+/* command line options */
+static u32 logging_level;
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
+
+
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPT3SAS_MAX_LUN (16895)
+static int max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+
+
+
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable,
+ " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
+
+/* raid transport support */
+
+static struct raid_template *mpt3sas_raid_template;
+
+
+/**
+ * struct sense_info - common structure for obtaining sense keys
+ * @skey: sense key
+ * @asc: additional sense code
+ * @ascq: additional sense code qualifier
+ */
+struct sense_info {
+ u8 skey;
+ u8 asc;
+ u8 ascq;
+};
+
+#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
+#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
+#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+/**
+ * struct fw_event_work - firmware event struct
+ * @list: link list framework
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
+ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+ * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event_data: reply event data payload follows
+ *
+ * This object stored on ioc->fw_event_list.
+ */
+struct fw_event_work {
+ struct list_head list;
+ struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+
+ struct MPT3SAS_ADAPTER *ioc;
+ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+ u16 event;
+ void *event_data;
+};
+
+/* raid transport support */
+static struct raid_template *mpt3sas_raid_template;
+
+/**
+ * struct _scsi_io_transfer - scsi io transfer
+ * @handle: sas device handle (assigned by firmware)
+ * @is_raid: flag set for hidden raid components
+ * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ * @data_length: data transfer length
+ * @data_dma: dma pointer to data
+ * @sense: sense data
+ * @lun: lun number
+ * @cdb_length: cdb length
+ * @cdb: cdb contents
+ * @timeout: timeout for this command
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @valid_reply: flag set for reply message
+ * @sense_length: sense length
+ * @ioc_status: ioc status
+ * @scsi_state: scsi state
+ * @scsi_status: scsi staus
+ * @log_info: log information
+ * @transfer_length: data length transfer when there is a reply message
+ *
+ * Used for sending internal scsi commands to devices within this module.
+ * Refer to _scsi_send_scsi_io().
+ */
+struct _scsi_io_transfer {
+ u16 handle;
+ u8 is_raid;
+ enum dma_data_direction dir;
+ u32 data_length;
+ dma_addr_t data_dma;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u32 lun;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 timeout;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 valid_reply;
+ /* the following bits are only valid when 'valid_reply = 1' */
+ u32 sense_length;
+ u16 ioc_status;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ u32 transfer_length;
+};
+
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = {
+ /* Fury ~ 3004 and 3008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Invader ~ 3108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, scsih_pci_table);
+
+/**
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
+ *
+ * Note: The logging levels are defined in mpt3sas_debug.h.
+ */
+static int
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting logging_level(0x%08x)\n", logging_level);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->logging_level = logging_level;
+ return 0;
+}
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
+ &logging_level, 0644);
+
+/**
+ * _scsih_srch_boot_sas_address - search based on sas_address
+ * @sas_address: sas address
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_sas_address(u64 sas_address,
+ Mpi2BootDeviceSasWwid_t *boot_device)
+{
+ return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_device_name - search based on device name
+ * @device_name: device name specified in INDENTIFY fram
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_device_name(u64 device_name,
+ Mpi2BootDeviceDeviceName_t *boot_device)
+{
+ return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
+ Mpi2BootDeviceEnclosureSlot_t *boot_device)
+{
+ return (enclosure_logical_id == le64_to_cpu(boot_device->
+ EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
+ SlotNumber)) ? 1 : 0;
+}
+
+/**
+ * _scsih_is_boot_device - search for matching boot device.
+ * @sas_address: sas address
+ * @device_name: device name specified in INDENTIFY fram
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @form: specifies boot device form
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static int
+_scsih_is_boot_device(u64 sas_address, u64 device_name,
+ u64 enclosure_logical_id, u16 slot, u8 form,
+ Mpi2BiosPage2BootDevice_t *boot_device)
+{
+ int rc = 0;
+
+ switch (form) {
+ case MPI2_BIOSPAGE2_FORM_SAS_WWID:
+ if (!sas_address)
+ break;
+ rc = _scsih_srch_boot_sas_address(
+ sas_address, &boot_device->SasWwid);
+ break;
+ case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
+ if (!enclosure_logical_id)
+ break;
+ rc = _scsih_srch_boot_encl_slot(
+ enclosure_logical_id,
+ slot, &boot_device->EnclosureSlot);
+ break;
+ case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
+ if (!device_name)
+ break;
+ rc = _scsih_srch_boot_device_name(
+ device_name, &boot_device->DeviceName);
+ break;
+ case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 *sas_address)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 ioc_status;
+
+ *sas_address = 0;
+
+ if (handle <= ioc->sas_hba.num_phys) {
+ *sas_address = ioc->sas_hba.sas_address;
+ return 0;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
+ }
+
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+
+ /* else error case */
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+}
+
+/**
+ * _scsih_determine_boot_device - determine boot device.
+ * @ioc: per adapter object
+ * @device: either sas_device or raid_device object
+ * @is_raid: [flag] 1 = raid object, 0 = sas object
+ *
+ * Determines whether this device should be first reported device to
+ * to scsi-ml or sas transport, this purpose is for persistent boot device.
+ * There are primary, alternate, and current entries in bios page 2. The order
+ * priority is primary, alternate, then current. This routine saves
+ * the corresponding device object and is_raid flag in the ioc object.
+ * The saved data to be used later in _scsih_probe_boot_devices().
+ */
+static void
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
+ void *device, u8 is_raid)
+{
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u64 sas_address;
+ u64 device_name;
+ u64 enclosure_logical_id;
+ u16 slot;
+
+ /* only process this function when driver loads */
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ if (!is_raid) {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
+ } else {
+ raid_device = device;
+ sas_address = raid_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ }
+
+ if (!ioc->req_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_boot_device.device = device;
+ ioc->req_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->req_alt_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedAltBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_alt_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_alt_boot_device.device = device;
+ ioc->req_alt_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->current_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.CurrentBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: current_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->current_boot_device.device = device;
+ ioc->current_boot_device.is_raid = is_raid;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+struct _sas_device *
+mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_remove - remove sas_device from list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Removing object and freeing associated memory from the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ if (!sas_device)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * @ioc: per adapter object
+ * @sas_address: device sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * _scsih_sas_device_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object to the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+}
+
+/**
+ * _scsih_sas_device_init_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->sas_device_init_list.
+ */
+static void
+_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
+ _scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_find_by_id - raid device search
+ * @ioc: per adapter object
+ * @id: sas device target id
+ * @channel: sas device channel
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on target id, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->id == id && raid_device->channel == channel) {
+ r = raid_device;
+ goto out;
+ }
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_handle - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on handle, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->handle != handle)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_wwid - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on wwid, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid != wwid)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_add - add raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ * This is added to the raid_device_list link list.
+ */
+static void
+_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ raid_device->handle, (unsigned long long)raid_device->wwid));
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_add_tail(&raid_device->list, &ioc->raid_device_list);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_remove - delete raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ */
+static void
+_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_sas_address - expander device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * This searches for expander device based on sas_address, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * _scsih_expander_node_add - insert expander device to the list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_is_end_device - determines if device is an end device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if end device.
+ */
+static int
+_scsih_is_end_device(u32 device_info)
+{
+ if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
+ ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static struct scsi_cmnd *
+_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_scmd - scmd lookup
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @scmd: pointer to scsi command object
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a scmd pointer in the scsi_lookup array,
+ * returning the revelent smid. A returned value of zero means invalid.
+ */
+static u16
+_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
+ *scmd)
+{
+ u16 smid;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ smid = 0;
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd == scmd) {
+ smid = ioc->scsi_lookup[i].smid;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+
+static void
+_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ max_depth = shost->can_queue;
+
+ /* limit max device queue for SATA to 32 */
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ goto not_sata;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ goto not_sata;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+ goto not_sata;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (sas_device && sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ not_sata:
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
+ * (see include/scsi/scsi_host.h for definition)
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
+ _scsih_adjust_queue_depth(sdev, qdepth);
+ else if (reason == SCSI_QDEPTH_QFULL)
+ scsi_track_queue_full(sdev, qdepth);
+ else
+ return -EOPNOTSUPP;
+
+ if (sdev->inquiry_len > 7)
+ sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
+ "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags, sdev->scsi_level,
+ (sdev->inquiry[7] & 2) >> 1);
+
+ return sdev->queue_depth;
+}
+
+/**
+ * _scsih_change_queue_type - changing device queue tag type
+ * @sdev: scsi device struct
+ * @tag_type: requested tag type
+ *
+ * Returns queue tag type.
+ */
+static int
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+
+/**
+ * _scsih_target_alloc - target add routine
+ * @starget: scsi target struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ if (!sas_target_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = sas_target_priv_data;
+ sas_target_priv_data->starget = starget;
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+
+ /* RAID volumes */
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+ }
+
+ /* sas/sata devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+
+ if (sas_device) {
+ sas_target_priv_data->handle = sas_device->handle;
+ sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_device->starget = starget;
+ sas_device->id = starget->id;
+ sas_device->channel = starget->channel;
+ if (test_bit(sas_device->handle, ioc->pd_handles))
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ if (sas_device->fast_path)
+ sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return 0;
+}
+
+/**
+ * _scsih_target_destroy - target destroy routine
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = starget->hostdata;
+ if (!sas_target_priv_data)
+ return;
+
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ raid_device->starget = NULL;
+ raid_device->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device && (sas_device->starget == starget) &&
+ (sas_device->id == starget->id) &&
+ (sas_device->channel == starget->channel))
+ sas_device->starget = NULL;
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ out:
+ kfree(sas_target_priv_data);
+ starget->hostdata = NULL;
+}
+
+/**
+ * _scsih_slave_alloc - device add routine
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ if (!sas_device_priv_data)
+ return -ENOMEM;
+
+ sas_device_priv_data->lun = sdev->lun;
+ sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns++;
+ sas_device_priv_data->sas_target = sas_target_priv_data;
+ sdev->hostdata = sas_device_priv_data;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
+ sdev->no_uld_attach = 1;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc,
+ starget->id, starget->channel);
+ if (raid_device)
+ raid_device->sdev = sdev; /* raid is single lun */
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_slave_destroy - device destroy routine
+ * @sdev: scsi device struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && !sas_target_priv_data->num_luns)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * _scsih_display_sata_capabilities - sata capabilities
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @sdev: scsi device struct
+ */
+static void
+_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, struct scsi_device *sdev)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u16 flags;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ flags = le16_to_cpu(sas_device_pg0.Flags);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ sdev_printk(KERN_INFO, sdev,
+ "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
+ "sw_preserve(%s)\n",
+ (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
+ "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
+}
+
+/*
+ * raid transport support -
+ * Enabled for SLES11 and newer, in older kernels the driver will panic when
+ * unloading the driver followed by a load - I beleive that the subroutine
+ * raid_class_release() is not cleaning up properly.
+ */
+
+/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!handle)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
+ out:
+ raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt3sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @volume_type: volume type
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+
+/**
+ * _scsih_get_volume_capabilities - volume capabilities
+ * @ioc: per adapter object
+ * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
+ */
+static int
+_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds;
+
+ if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ raid_device->num_pds = num_pds;
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ kfree(vol_pg0);
+ return 1;
+ }
+
+ raid_device->volume_type = vol_pg0->VolumeType;
+
+ /* figure out what the underlying devices are by
+ * obtaining the device_info bits for the 1st device
+ */
+ if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[0].PhysDiskNum))) {
+ if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16_to_cpu(pd_pg0.DevHandle)))) {
+ raid_device->device_info =
+ le32_to_cpu(sas_device_pg0.DeviceInfo);
+ }
+ }
+
+ kfree(vol_pg0);
+ return 0;
+}
+
+
+
+/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
+ * _scsih_slave_configure - device configure routine.
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int qdepth;
+ u8 ssp_target = 0;
+ char *ds = "";
+ char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ qdepth = 1;
+ sas_device_priv_data = sdev->hostdata;
+ sas_device_priv_data->configured_lun = 1;
+ sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
+
+ /* raid volume handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (!raid_device) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+
+ /* RAID Queue Depth Support
+ * IS volume = underlying qdepth of drive type, either
+ * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
+ * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
+ */
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ else
+ ds = "STP";
+ }
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ r_level = "RAID0";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ if (ioc->manu_pg10.OEMIdentifier &&
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
+ MFG10_GF0_R10_DISPLAY) &&
+ !(raid_device->num_pds % 2))
+ r_level = "RAID10";
+ else
+ r_level = "RAID1E";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID1";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID10";
+ break;
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAIDX";
+ break;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+/* raid transport support */
+ _scsih_set_level(sdev, raid_device->volume_type);
+ return 0;
+ }
+
+ /* non-raid handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt3sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, handle, (unsigned long long)sas_device->sas_address,
+ sas_device->phy, (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+ ds, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+ if (ssp_target) {
+ sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: scsi device struct
+ * @bdev: pointer to block device context
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return nothing.
+ */
+static int
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ /* return result */
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * _scsih_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @response_code: response code returned by the device
+ *
+ * Return nothing.
+ */
+static void
+_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
+ ioc->name, response_code, desc);
+}
+
+/**
+ * _scsih_tm_done - tm completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using scsih_issue_tm.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->tm_cmds.smid != smid)
+ return 1;
+ mpt3sas_base_flush_reply_queues(ioc);
+ ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->tm_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt3sas_scsih_set_tm_flag - set per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 1;
+ skip = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 0;
+ skip = 1;
+ ioc->ignore_loginfos = 0;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_issue_tm - main routine for sending tm requests
+ * @ioc: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ * @timeout: timeout in seconds
+ * @serial_number: the serial_number from scmd
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * A generic API for sending task management requests to firmware.
+ *
+ * The callback index is set inside `ioc->tm_cb_idx`.
+ *
+ * Return SUCCESS or FAILED.
+ */
+int
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ unsigned long serial_number, enum mutex_type m_type)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ u16 smid = 0;
+ u32 ioc_state;
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+ if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n", ioc->name));
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
+ ioc->name, handle, type, smid_task));
+ ioc->tm_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->tm_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt3sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
+ }
+
+ if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ mpi_reply = ioc->tm_cmds.reply;
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
+ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ _scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
+ }
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _scsih_tm_display_info - displays info about the device
+ * @ioc: per adapter struct
+ * @scmd: pointer to scsi command object
+ *
+ * Called by task management callback handlers.
+ */
+static void
+_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
+{
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ scsi_print_command(scmd);
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ starget_printk(KERN_INFO, starget,
+ "%s handle(0x%04x), %s wwid(0x%016llx)\n",
+ device_str, priv_target->handle,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ if (priv_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ starget_printk(KERN_INFO, starget,
+ "volume handle(0x%04x), "
+ "volume wwid(0x%016llx)\n",
+ sas_device->volume_handle,
+ (unsigned long long)sas_device->volume_wwid);
+ }
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy);
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_abort - eh threads main abort routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_abort(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u16 smid;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* search for the command */
+ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
+ if (!smid) {
+ scmd->result = DID_RESET << 16;
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components and volumes this is not supported */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mpt3sas_halt_firmware(ioc);
+
+ handle = sas_device_priv_data->sas_target->handle;
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting device reset! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
+ TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_target_reset - eh threads main target reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_target_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
+ scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
+ scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, 0, TM_MUTEX_ON);
+
+ out:
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+
+/**
+ * _scsih_host_reset - eh threads main host reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_host_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ int r, retval;
+
+ pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ r = (retval < 0) ? FAILED : SUCCESS;
+ pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return r;
+}
+
+/**
+ * _scsih_fw_event_add - insert and queue up fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This adds the firmware event object into link list, then queues it up to
+ * be processed from user context.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ if (ioc->firmware_event_thread == NULL)
+ return;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ INIT_LIST_HEAD(&fw_event->list);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_WORK(&fw_event->work, _firmware_event_work);
+ queue_work(ioc->firmware_event_thread, &fw_event->work);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_fw_event_free - delete fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This removes firmware event object from link list, frees associated memory.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+ *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event->event_data);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+
+ /**
+ * mpt3sas_send_trigger_data_event - send event for processing trigger data
+ * @ioc: per adapter object
+ * @event_data: trigger event data
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC);
+ if (!fw_event->event_data)
+ return;
+ fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
+ fw_event->ioc = ioc;
+ memcpy(fw_event->event_data, event_data, sizeof(*event_data));
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_error_recovery_delete_devices - remove devices not responding
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt3sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
+ * @ioc: per adapter object
+ *
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
+}
+
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ "device_running, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+}
+
+
+/**
+ * _scsih_ublock_io_device - prepare device to be deleted
+ * @ioc: per adapter object
+ * @sas_addr: sas address
+ *
+ * unblock then put device in offline state
+ */
+static void
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->sas_address
+ != sas_address)
+ continue;
+ if (sas_device_priv_data->block) {
+ sas_device_priv_data->block = 0;
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ }
+}
+
+/**
+ * _scsih_block_io_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle != handle)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev,
+ "device_blocked, handle(0x%04x)\n", handle);
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_to_ex
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * attached to this expander. This function called when expander is
+ * pulled.
+ */
+static void
+_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_sibling;
+ unsigned long flags;
+
+ if (!sas_expander)
+ return;
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device =
+ mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+ }
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ expander_sibling =
+ mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, mpt3sas_port->remote_identify.sas_address);
+ _scsih_block_io_to_children_attached_to_ex(ioc,
+ expander_sibling);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull.
+ */
+static void
+_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+
+/**
+ * _scsih_tm_tr_send - send task management request
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This code is to initiate the device removal handshake protocol
+ * with controller firmware. This function will issue target reset
+ * using high priority request queue. It will send a sas iounit
+ * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _sas_device *sas_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ struct _tr_list *delayed_tr;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed: handle(0x%04x)\n",
+ __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+
+ /* if PD, then return */
+ if (test_bit(handle, ioc->pd_handles))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device && sas_device->starget &&
+ sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = sas_device->sas_address;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, sas_address);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+}
+
+/**
+ * _scsih_tm_tr_complete -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the target reset completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u16 smid_sas_ctrl;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n", __func__,
+ ioc->name));
+ return 1;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
+ if (!smid_sas_ctrl) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return 1;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
+ mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_sas_control_complete - completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the sas iounit control completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ Mpi2SasIoUnitControlReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ return 1;
+}
+
+/**
+ * _scsih_tm_tr_volume_send - send target reset request for volumes
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _tr_list *delayed_tr;
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_volume_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+/**
+ * _scsih_tm_volume_tr_complete - target reset completion
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_check_for_pending_tm - check for pending task management
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * This will check delayed target reset list, and feed the
+ * next reqeust.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _tr_list *delayed_tr;
+
+ if (!list_empty(&ioc->delayed_tr_volume_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * _scsih_check_topo_delete_events - sanity check on topo events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This routine added to better handle cable breaker.
+ *
+ * This handles the case where driver receives multiple expander
+ * add and delete events in a single shot. When there is a delete event
+ * the routine will void any pending add events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventDataSasTopologyChangeList_t *local_event_data;
+ u16 expander_handle;
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle;
+
+ for (i = 0 ; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ if (expander_handle < ioc->sas_hba.num_phys) {
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+ return;
+ }
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ expander_handle);
+ _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
+ } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+
+ if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data = fw_event->event_data;
+ if (local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
+ expander_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag\n", ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_delete_flag - setting volume delete flag
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * This returns nothing.
+ */
+static void
+_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device && raid_device->starget &&
+ raid_device->starget->hostdata) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), "
+ "wwid(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long) raid_device->wwid));
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
+ * @handle: input handle
+ * @a: handle for volume a
+ * @b: handle for volume b
+ *
+ * IR firmware only supports two raid volumes. The purpose of this
+ * routine is to set the volume handle in either a or b. When the given
+ * input handle is non-zero, or when a and b have not been set before.
+ */
+static void
+_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
+{
+ if (!handle || handle == *a || handle == *b)
+ return;
+ if (!*a)
+ *a = handle;
+ else if (!*b)
+ *b = handle;
+}
+
+/**
+ * _scsih_check_ir_config_unhide_events - check for UNHIDE events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This routine will send target reset to volume, followed by target
+ * resets to the PDs. This is called when a PD has been removed, or
+ * volume has been deleted or removed. When the target reset is sent
+ * to volume, the PD target resets need to be queued to start upon
+ * completion of the volume target reset.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u16 handle, volume_handle, a, b;
+ struct _tr_list *delayed_tr;
+
+ a = 0;
+ b = 0;
+
+ /* Volume Resets for Deleted or Removed */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_delete_flag(ioc, volume_handle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ /* Volume Resets for UNHIDE events */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ if (a)
+ _scsih_tm_tr_volume_send(ioc, a);
+ if (b)
+ _scsih_tm_tr_volume_send(ioc, b);
+
+ /* PD target resets */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
+ continue;
+ handle = le16_to_cpu(element->PhysDiskDevHandle);
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ clear_bit(handle, ioc->pd_handles);
+ if (!volume_handle)
+ _scsih_tm_tr_send(ioc, handle);
+ else if (volume_handle == a || volume_handle == b) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ BUG_ON(!delayed_tr);
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
+ handle));
+ } else
+ _scsih_tm_tr_send(ioc, handle);
+ }
+}
+
+
+/**
+ * _scsih_check_volume_delete_events - set delete flag for volumes
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This will handle the case when the cable connected to entire volume is
+ * pulled. We will take care of setting the deleted flag so normal IO will
+ * not be sent.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrVolume_t *event_data)
+{
+ u32 state;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+ state = le32_to_cpu(event_data->NewValue);
+ if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
+ MPI2_RAID_VOL_STATE_FAILED)
+ _scsih_set_volume_delete_flag(ioc,
+ le16_to_cpu(event_data->VolDevHandle));
+}
+
+/**
+ * _scsih_flush_running_cmds - completing outstanding commands.
+ * @ioc: per adapter object
+ *
+ * The flushing out of all pending scmd commands following host reset,
+ * where all IO is dropped to the floor.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct scsi_cmnd *scmd;
+ u16 smid;
+ u16 count = 0;
+
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (!scmd)
+ continue;
+ count++;
+ mpt3sas_base_free_smid(ioc, smid);
+ scsi_dma_unmap(scmd);
+ if (ioc->pci_error_recovery)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
+ ioc->name, count));
+}
+
+/**
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+ Mpi25SCSIIORequest_t *mpi_request_3v =
+ (Mpi25SCSIIORequest_t *)mpi_request;
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+ break;
+ }
+
+ mpi_request_3v->EEDPBlockSize =
+ cpu_to_le16(scmd->device->sector_size);
+ mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
+ ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+
+/**
+ * _scsih_qcmd_lck - main scsi request entry point
+ * @scmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * The callback index is set inside `ioc->scsi_io_cb_idx`.
+ *
+ * Returns 0 on success. If there's a failure, return either:
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ */
+static int
+_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2SCSIIORequest_t *mpi_request;
+ u32 mpi_control;
+ u16 smid;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_SCSI)
+ scsi_print_command(scmd);
+#endif
+
+ scmd->scsi_done = done;
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+
+ /* invalid device handle */
+ handle = sas_target_priv_data->handle;
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ /* device busy with task managment */
+ } else if (sas_target_priv_data->tm_busy ||
+ sas_device_priv_data->block)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+
+ /* set tags */
+ if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) {
+ if (scmd->device->tagged_supported) {
+ if (scmd->device->ordered_tags)
+ mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(ioc, scmd, mpi_request);
+
+ if (scmd->cmd_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ mpi_request->Control = cpu_to_le32(mpi_control);
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
+ mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+ mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ mpi_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
+ mpi_request->LUN);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+
+ if (mpi_request->DataLength) {
+ if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ } else
+ ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
+
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
+ MPI25_SCSIIO_IOFLAGS_FAST_PATH);
+ mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_scsi_io(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+
+ out:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+static DEF_SCSI_QCMD(_scsih_qcmd)
+
+
+/**
+ * _scsih_normalize_sense - normalize descriptor and fixed format sense data
+ * @sense_buffer: sense data returned by target
+ * @data: normalized skey/asc/ascq
+ *
+ * Return nothing.
+ */
+static void
+_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
+{
+ if ((sense_buffer[0] & 0x7F) >= 0x72) {
+ /* descriptor format */
+ data->skey = sense_buffer[1] & 0x0F;
+ data->asc = sense_buffer[2];
+ data->ascq = sense_buffer[3];
+ } else {
+ /* fixed format */
+ data->skey = sense_buffer[2] & 0x0F;
+ data->asc = sense_buffer[12];
+ data->ascq = sense_buffer[13];
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * scsi_status - SCSI Status code returned from target device
+ * scsi_state - state info associated with SCSI_IO determined by ioc
+ * ioc_status - ioc supplied status info
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ scsi_print_command(scmd);
+
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ pr_warn(MPT3SAS_FMT
+ "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ ioc->name, scsi_bufflen(scmd), scmd->underflow,
+ scsi_get_resid(scmd));
+ pr_warn(MPT3SAS_FMT
+ "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ pr_warn(MPT3SAS_FMT
+ "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ ioc->name, desc_scsi_status,
+ scsi_status, desc_scsi_state, scsi_state);
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ pr_warn(MPT3SAS_FMT
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32_to_cpu(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ _scsih_response_code(ioc, response_bytes[0]);
+ }
+}
+#endif
+
+/**
+ * _scsih_turn_on_fault_led - illuminate Fault LED
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data;
+ struct _sas_device *sas_device;
+ ssize_t sz;
+ unsigned long flags;
+
+ /* only handle non-raid devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+ event_reply = kzalloc(sz, GFP_KERNEL);
+ if (!event_reply) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ event_reply->Event =
+ cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ event_reply->MsgLength = sz/4;
+ event_reply->EventDataLength =
+ cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
+ event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
+ event_reply->EventData;
+ event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
+ event_data->ASC = 0x5D;
+ event_data->DevHandle = cpu_to_le16(handle);
+ event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
+ mpt3sas_ctl_add_to_event_log(ioc, event_reply);
+ kfree(event_reply);
+}
+
+/**
+ * _scsih_io_done - scsi request callback
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when using _scsih_qcmd.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ Mpi2SCSIIOReply_t *mpi_reply;
+ struct scsi_cmnd *scmd;
+ u16 ioc_status;
+ u32 xfer_cnt;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (scmd == NULL)
+ return 1;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ if (mpi_reply == NULL) {
+ scmd->result = DID_OK << 16;
+ goto out;
+ }
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ sas_device_priv_data->sas_target->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+
+ /* turning off TLR */
+ scsi_state = mpi_reply->SCSIState;
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code =
+ le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
+ if (!sas_device_priv_data->tlr_snoop_check) {
+ sas_device_priv_data->tlr_snoop_check++;
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
+ sas_device_priv_data->flags &=
+ ~MPT_DEVICE_TLR_ON;
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ scsi_status = mpi_reply->SCSIStatus;
+
+ if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI2_SCSI_STATUS_BUSY ||
+ scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
+ ioc_status = MPI2_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(mpi_reply->SenseCount));
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ /* failure prediction threshold exceeded */
+ if (data.asc == 0x5D)
+ _scsih_smart_predicted_fault(ioc,
+ le16_to_cpu(mpi_reply->DevHandle));
+ mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
+ }
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_BUSY:
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ if (sas_device_priv_data->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ goto out;
+ }
+ if (log_info == 0x31110630) {
+ if (scmd->retries > 2) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_device_set_state(scmd->device,
+ SDEV_OFFLINE);
+ } else {
+ scmd->result = DID_SOFT_ERROR << 16;
+ scmd->device->expecting_cc_ua = 1;
+ }
+ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+
+ if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
+ break;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
+ mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->sense_buffer[0] = 0x70;
+ scmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ scmd->sense_buffer[12] = 0x20;
+ scmd->sense_buffer[13] = 0;
+ }
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (response_code ==
+ MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+ (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+
+ }
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
+ _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
+#endif
+
+ out:
+
+ scsi_dma_unmap(scmd);
+
+ scmd->scsi_done(scmd);
+ return 1;
+}
+
+/**
+ * _scsih_sas_host_refresh - refreshing sas host object contents
+ * @ioc: per adapter object
+ * Context: user
+ *
+ * During port enable, fw will send topology events for every device. Its
+ * possible that the handles may change from the previous setting, so this
+ * code keeping handles updating if changed.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz;
+ u16 ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u8 link_rate;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "updating handles for sas_host(0x%016llx)\n",
+ ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+ AttachedDevHandle);
+ if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+ attached_handle, i, link_rate);
+ }
+ out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_sas_host_add - create sas host object
+ * @ioc: per adapter object
+ *
+ * Creating host side data object, stored in ioc->sas_hba
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 ioc_status;
+ u16 sz;
+ u8 device_missing_delay;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
+ if (!ioc->sas_hba.num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ /* sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ ioc->io_missing_delay =
+ sas_iounit_pg1->IODeviceMissingDelay;
+ device_missing_delay =
+ sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ ioc->device_missing_delay = (device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ ioc->device_missing_delay = device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
+ phy_pg0, ioc->sas_hba.parent_dev);
+ }
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ pr_info(MPT3SAS_FMT
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->name, ioc->sas_hba.handle,
+ (unsigned long long) ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys) ;
+
+ if (ioc->sas_hba.enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ ioc->sas_hba.enclosure_handle)))
+ ioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_expander_add - creating expander object
+ * @ioc: per adapter object
+ * @handle: expander handle
+ *
+ * Creating expander object, stored in ioc->sas_expander_list.
+ *
+ * Return 0 for success, else error.
+ */
+static int
+_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u32 ioc_status;
+ u16 parent_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port = NULL;
+
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return -1;
+
+ if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* handle out of order topology events */
+ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
+ if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+ != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if (sas_address_parent != ioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address_parent);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = _scsih_expander_add(ioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct _sas_node),
+ GFP_KERNEL);
+ if (!sas_expander) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.NumPhys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+
+ pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
+ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys)
+ goto out_fail;
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+
+ if ((mpt3sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_expander->enclosure_handle)))
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ _scsih_expander_node_add(ioc, sas_expander);
+ return 0;
+
+ out_fail:
+
+ if (mpt3sas_port)
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_address_parent);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpt3sas_expander_remove - removing expander object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_expander)
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
+}
+
+/**
+ * _scsih_done - internal SCSI_IO callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated SCSI_IO.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->scsih_cmds.smid != smid)
+ return 1;
+ ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->scsih_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->scsih_cmds.done);
+ return 1;
+}
+
+
+
+
+#define MPT3_MAX_LUNS (255)
+
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ pr_err(MPT3SAS_FMT
+ "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc, (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+/**
+ * _scsih_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @parent_sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* wide port handling ~ we need only handle device once for the phy that
+ * is matched in sas device page zero
+ */
+ if (phy_number != sas_device_pg0.PhyNum)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
+
+}
+
+/**
+ * _scsih_add_device - creating sas device object
+ * @ioc: per adapter object
+ * @handle: sas device handle
+ * @phy_num: phy number end device attached to
+ * @is_pd: is this hidden raid component
+ *
+ * Creating end device object, stored in ioc->sas_device_list.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
+ u8 is_pd)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ u64 sas_address;
+ u32 device_info;
+ unsigned long flags;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return -1;
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return -1;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return -1;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return -1;
+
+ sas_device = kzalloc(sizeof(struct _sas_device),
+ GFP_KERNEL);
+ if (!sas_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ sas_device->handle = handle;
+ if (_scsih_get_sas_address(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ &sas_device->sas_address_parent) != 0)
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
+ sas_device->device_info = device_info;
+ sas_device->sas_address = sas_address;
+ sas_device->phy = sas_device_pg0.PhyNum;
+ sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
+ MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+
+ /* get enclosure_logical_id */
+ if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_sas_device_init_add(ioc, sas_device);
+ else
+ _scsih_sas_device_add(ioc, sas_device);
+
+ return 0;
+}
+
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ sas_target_priv_data->handle =
+ MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ kfree(sas_device);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_topology_change_event_debug - debug for topology event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->ExpStatus) {
+ case MPI2_EVENT_SAS_TOPO_ES_ADDED:
+ status_str = "add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
+ "start_phy(%02d), count(%d)\n",
+ le16_to_cpu(event_data->ExpanderDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPhyNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ status_str = "target add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
+ " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+ handle, status_str, link_rate, prev_link_rate);
+
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_topology_change_event - handle topology changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 parent_handle, handle;
+ u16 reason_code;
+ u8 phy_number, max_phys;
+ struct _sas_node *sas_expander;
+ u64 sas_address;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate;
+ Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_topology_change_event_debug(ioc, event_data);
+#endif
+
+ if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+
+ if (!ioc->sas_hba.num_phys)
+ _scsih_sas_host_add(ioc);
+ else
+ _scsih_sas_host_refresh(ioc);
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+
+ parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+
+ /* handle expander add */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
+ if (_scsih_expander_add(ioc, parent_handle) != 0)
+ return 0;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ parent_handle);
+ if (sas_expander) {
+ sas_address = sas_expander->sas_address;
+ max_phys = sas_expander->num_phys;
+ } else if (parent_handle < ioc->sas_hba.num_phys) {
+ sas_address = ioc->sas_hba.sas_address;
+ max_phys = ioc->sas_hba.num_phys;
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ phy_number = event_data->StartPhyNum + i;
+ if (phy_number >= max_phys)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if ((event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, sas_address, handle,
+ phy_number, link_rate);
+
+
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ _scsih_add_device(ioc, handle, phy_number, 0);
+
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+
+ _scsih_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+
+ /* handle expander removal */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+ sas_expander)
+ mpt3sas_expander_remove(ioc, sas_address);
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_device_status_change_event_debug - debug for device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
+ reason_str = "sata init failure";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality complete";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "device status change: (%s)\n"
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+#endif
+
+/**
+ * _scsih_sas_device_status_change_event - handle device status change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _sas_device *sas_device;
+ u64 sas_address;
+ unsigned long flags;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data =
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ event_data);
+#endif
+
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
+ if (event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(event_data->SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ reason_str = "enclosure add";
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ reason_str = "enclosure remove";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx)"
+ " number slots(%d)\n", ioc->name, reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
+}
+#endif
+
+/**
+ * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ fw_event->event_data);
+#endif
+}
+
+/**
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ u16 smid, handle;
+ u32 lun;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 termination_count;
+ u32 query_count;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ u16 ioc_status;
+ unsigned long flags;
+ int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ pr_info(MPT3SAS_FMT
+ "%s: enter: phy number(%d), width(%d)\n",
+ ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth);
+
+ _scsih_block_io_all_device(ioc);
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ mpi_reply = ioc->tm_cmds.reply;
+ broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
+ termination_count = 0;
+ query_count = 0;
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->shost_recovery)
+ goto out;
+ scmd = _scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+ continue;
+ /* skip hidden raid components */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ /* skip volumes */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_VOLUME)
+ continue;
+
+ handle = sas_device_priv_data->sas_target->handle;
+ lun = sas_device_priv_data->lun;
+ query_count++;
+
+ if (ioc->shost_recovery)
+ goto out;
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev,
+ "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
+ ioc_status, scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ continue;
+ }
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->shost_recovery)
+ goto out_no_lock;
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
+ "scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
+ termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: loop back due to pending AEN\n",
+ ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->shost_recovery)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+}
+
+/**
+ * _scsih_sas_discovery_event - handle discovery events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+ pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ }
+#endif
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
+ _scsih_sas_host_add(ioc);
+ }
+}
+
+/**
+ * _scsih_ir_fastpath - turn on fastpath for IR physdisk
+ * @ioc: per adapter object
+ * @handle: device handle for physical disk
+ * @phys_disk_num: physical disk number
+ *
+ * Return 0 for success, else failure.
+ */
+static int
+_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+ u8 issue_reset = 0;
+ int rc = 0;
+ u16 ioc_status;
+ u32 log_info;
+
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
+ mpi_request->PhysDiskNum = phys_disk_num;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
+ "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
+ handle, phys_disk_num));
+
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: failed: ioc_status(0x%04x), "
+ "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
+ log_info));
+ rc = -EFAULT;
+ } else
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: completed successfully\n",
+ ioc->name));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return rc;
+}
+
+/**
+ * _scsih_reprobe_lun - reprobing lun
+ * @sdev: scsi device struct
+ * @no_uld_attach: sdev->no_uld_attach flag setting
+ *
+ **/
+static void
+_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ int rc;
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev, "%s raid component\n",
+ sdev->no_uld_attach ? "hidding" : "exposing");
+ rc = scsi_device_reprobe(sdev);
+}
+
+/**
+ * _scsih_sas_volume_add - add new volume
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ u64 wwid;
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ return;
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ if (!ioc->wait_for_discovery_to_complete) {
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ _scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_sas_volume_delete - delete volume
+ * @ioc: per adapter object
+ * @handle: volume device handle
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+/**
+ * _scsih_sas_pd_expose - expose pd component to /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* exposing raid component */
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_hide - hide pd component from /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt3sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* hiding raid component */
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_delete - delete pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ _scsih_device_remove_by_handle(ioc, handle);
+}
+
+/**
+ * _scsih_sas_pd_add - remove pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u64 sas_address;
+ u16 parent_handle;
+
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device) {
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ _scsih_add_device(ioc, handle, 0, 1);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ u8 element_type;
+ int i;
+ char *reason_str = NULL, *element_str = NULL;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+
+ pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
+ ioc->name, (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
+ "foreign" : "native", event_data->NumElements);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ reason_str = "add";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ reason_str = "remove";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
+ reason_str = "no change";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ reason_str = "hide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ reason_str = "unhide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ reason_str = "volume_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ reason_str = "volume_deleted";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ reason_str = "pd_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ reason_str = "pd_deleted";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ element_type = le16_to_cpu(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ switch (element_type) {
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
+ element_str = "volume";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
+ element_str = "phys disk";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
+ element_str = "hot spare";
+ break;
+ default:
+ element_str = "unknown element";
+ break;
+ }
+ pr_info("\t(%s:%s), vol handle(0x%04x), " \
+ "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
+ reason_str, le16_to_cpu(element->VolDevHandle),
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_ir_config_change_event - handle ir configuration change events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+#endif
+
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ if (ioc->shost_recovery) {
+
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
+ _scsih_ir_fastpath(ioc,
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+ return;
+ }
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc,
+ le16_to_cpu(element->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ _scsih_sas_pd_expose(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ _scsih_sas_pd_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ _scsih_sas_pd_delete(ioc, element);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_sas_ir_volume_event - IR volume event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u64 wwid;
+ unsigned long flags;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u32 state;
+ int rc;
+ Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ _scsih_sas_volume_delete(ioc, handle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ break;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ break;
+
+ case MPI2_RAID_VOL_STATE_INITIALIZING:
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_sas_ir_physical_disk_event - PD event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u16 handle, parent_handle;
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ u64 sas_address;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->PhysDiskDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+
+ set_bit(handle, ioc->pd_handles);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
+ break;
+
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrOperationStatus_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->RAIDOperation) {
+ case MPI2_EVENT_IR_RAIDOP_RESYNC:
+ reason_str = "resync";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
+ reason_str = "online capacity expansion";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
+ reason_str = "consistency check";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+ reason_str = "background init";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+ reason_str = "make data consistent";
+ break;
+ }
+
+ if (!reason_str)
+ return;
+
+ pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
+ "\thandle(0x%04x), percent complete(%d)\n",
+ ioc->name, reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
+}
+#endif
+
+/**
+ * _scsih_sas_ir_operation_status_event - handle RAID operation events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_operation_status_event_debug(ioc,
+ event_data);
+#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
+ * _scsih_mark_responding_sas_device - mark a sas_devices as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @slot: enclosure slot id
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_sas_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 slot, u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address == sas_address &&
+ sas_device->slot == slot) {
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget)
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx), "
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n", handle,
+ (unsigned long long)sas_device->sas_address,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->handle == handle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_sas_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ continue;
+ _scsih_mark_responding_sas_device(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ le16_to_cpu(sas_device_pg0.Slot), handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_raid_device - mark a raid_device as responding
+ * @ioc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_raid_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
+ return;
+ }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_raid_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u8 phys_disk_num;
+
+ if (!ioc->ir_firmware)
+ return;
+
+ pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
+ ioc->name);
+
+ if (list_empty(&ioc->raid_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
+ _scsih_mark_responding_raid_device(ioc,
+ le64_to_cpu(volume_pg1.WWID), handle);
+ }
+
+ /* refresh the pd_handles */
+ phys_disk_num = 0xFF;
+ memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ set_bit(handle, ioc->pd_handles);
+ }
+ out:
+ pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_expander - mark a expander as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle:
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_expanders.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ sas_expander->responding = 1;
+ if (sas_expander->handle == handle)
+ goto out;
+ pr_info("\texpander(0x%016llx): handle changed" \
+ " from(0x%04x) to (0x%04x)!!!\n",
+ (unsigned long long)sas_expander->sas_address,
+ sas_expander->handle, handle);
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ goto out;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_expanders -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u64 sas_address;
+ u16 handle;
+
+ pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_expander_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (unsigned long long)sas_address);
+ _scsih_mark_responding_expander(ioc, sas_address, handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+}
+
+/**
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *sas_device_next;
+ struct _sas_node *sas_expander, *sas_expander_next;
+ struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
+ ioc->name);
+
+ /* removing unresponding end devices */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_list, list) {
+ if (!sas_device->responding)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
+ sas_device->responding = 0;
+ }
+
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
+ ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
+ }
+ }
+
+ /* removing unresponding expanders */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
+ sas_expander->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
+ }
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
+}
+
+/**
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
+ u8 retry_count;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ expander_device = mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
+ pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
+ ioc->name);
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 1)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (raid_device)
+ continue;
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ pr_info(MPT3SAS_FMT
+ "\tBEFORE adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
+ pr_info(MPT3SAS_FMT
+ "\tAFTER adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
+ ioc->name);
+
+ skip_to_sas:
+
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
+ ioc->name);
+
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
+ " ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 0)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+}
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
+
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
+ break;
+ }
+}
+
+/**
+ * _mpt3sas_fw_work - delayed task for processing firmware events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ /* the queue is being flushed so ignore this event */
+ if (ioc->remove_host || fw_event->cancel_pending_work ||
+ ioc->pci_error_recovery) {
+ _scsih_fw_event_free(ioc, fw_event);
+ return;
+ }
+
+ switch (fw_event->event) {
+ case MPT3SAS_PROCESS_TRIGGER_DIAG:
+ mpt3sas_process_trigger_data(ioc, fw_event->event_data);
+ break;
+ case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ ssleep(1);
+ _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT3SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
+ missing_delay[1]);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "port enable: complete from worker thread\n",
+ ioc->name));
+ break;
+ case MPT3SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ _scsih_sas_discovery_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ _scsih_sas_broadcast_primitive_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ _scsih_sas_enclosure_dev_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_sas_ir_config_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_sas_ir_volume_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ _scsih_sas_ir_physical_disk_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ _scsih_sas_ir_operation_status_event(ioc, fw_event);
+ break;
+ }
+ _scsih_fw_event_free(ioc, fw_event);
+}
+
+/**
+ * _firmware_event_work
+ * @ioc: per adapter object
+ * @work: The fw_event_work object
+ * Context: user.
+ *
+ * wrappers for the work thread handling firmware events
+ *
+ * Return nothing.
+ */
+
+static void
+_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event = container_of(work,
+ struct fw_event_work, work);
+
+ _mpt3sas_fw_work(fw_event->ioc, fw_event);
+}
+
+/**
+ * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 event;
+ u16 sz;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
+ mpt3sas_trigger_event(ioc, event, 0);
+
+ switch (event) {
+ /* handle these */
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ mpi_reply->EventData;
+
+ if (baen_data->Primitive !=
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 1;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return 1;
+ } else
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_topo_delete_events(ioc,
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_check_ir_config_unhide_events(ioc,
+ (Mpi2EventDataIrConfigChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_check_volume_delete_events(ioc,
+ (Mpi2EventDataIrVolume_t *)
+ mpi_reply->EventData);
+ break;
+
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ break;
+
+ default: /* ignore the rest */
+ return 1;
+ }
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event->event_data) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(fw_event);
+ return 1;
+ }
+
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+ fw_event->ioc = ioc;
+ fw_event->VF_ID = mpi_reply->VF_ID;
+ fw_event->VP_ID = mpi_reply->VP_ID;
+ fw_event->event = event;
+ _scsih_fw_event_add(ioc, fw_event);
+ return 1;
+}
+
+/* shost template */
+static struct scsi_host_template scsih_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT3SAS_DRIVER_NAME,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .change_queue_type = _scsih_change_queue_type,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT3SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mpt3sas_host_attrs,
+ .sdev_attrs = mpt3sas_dev_attrs,
+};
+
+/**
+ * _scsih_expander_node_remove - removing expander device from list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port, *next;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mpt3sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (ioc->shost_recovery)
+ return;
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name,
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+
+ /* is IR firmware build loaded ? */
+ if (!ioc->ir_firmware)
+ return;
+
+ /* are there any volumes ? */
+ if (list_empty(&ioc->raid_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+ pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ pr_info(MPT3SAS_FMT
+ "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_remove - detach and remove add host
+ * @pdev: PCI device struct
+ *
+ * Routine called when unloading the driver.
+ * Return nothing.
+ */
+static void _scsih_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct _sas_port *mpt3sas_port, *next_port;
+ struct _raid_device *raid_device, *next;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
+ /* free ports attached to the sas_host */
+ list_for_each_entry_safe(mpt3sas_port, next_port,
+ &ioc->sas_hba.sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ /* free phys attached to the sas_host */
+ if (ioc->sas_hba.num_phys) {
+ kfree(ioc->sas_hba.phy);
+ ioc->sas_hba.phy = NULL;
+ ioc->sas_hba.num_phys = 0;
+ }
+
+ sas_remove_host(shost);
+ mpt3sas_base_detach(ioc);
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ _scsih_ir_shutdown(ioc);
+ mpt3sas_base_detach(ioc);
+}
+
+
+/**
+ * _scsih_probe_boot_devices - reports 1st device
+ * @ioc: per adapter object
+ *
+ * If specified in bios page 2, this routine reports the 1st
+ * device scsi-ml or sas transport for persistent boot device
+ * purposes. Please refer to function _scsih_determine_boot_device()
+ */
+static void
+_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u8 is_raid;
+ void *device;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u64 sas_address_parent;
+ u64 sas_address;
+ unsigned long flags;
+ int rc;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ device = NULL;
+ is_raid = 0;
+ if (ioc->req_boot_device.device) {
+ device = ioc->req_boot_device.device;
+ is_raid = ioc->req_boot_device.is_raid;
+ } else if (ioc->req_alt_boot_device.device) {
+ device = ioc->req_alt_boot_device.device;
+ is_raid = ioc->req_alt_boot_device.is_raid;
+ } else if (ioc->current_boot_device.device) {
+ device = ioc->current_boot_device.device;
+ is_raid = ioc->current_boot_device.is_raid;
+ }
+
+ if (!device)
+ return;
+
+ if (is_raid) {
+ raid_device = device;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = device;
+ handle = sas_device->handle;
+ sas_address_parent = sas_device->sas_address_parent;
+ sas_address = sas_device->sas_address;
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc, sas_address,
+ sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+}
+
+/**
+ * _scsih_probe_raid - reporting raid volumes to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _raid_device *raid_device, *raid_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_device, raid_next,
+ &ioc->raid_device_list, list) {
+ if (raid_device->starget)
+ continue;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+}
+
+/**
+ * _scsih_probe_sas - reporting sas devices to sas transport
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *next;
+ unsigned long flags;
+
+ /* SAS Device List */
+ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
+ list) {
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_probe_devices - probing for devices
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 volume_mapping_flags;
+
+ if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
+ return; /* return when IOC doesn't support initiator mode */
+
+ _scsih_probe_boot_devices(ioc);
+
+ if (ioc->ir_firmware) {
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ _scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
+ } else {
+ _scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
+ }
+ } else
+ _scsih_probe_sas(ioc);
+}
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ if (disable_discovery > 0)
+ return;
+
+ ioc->start_scan = 1;
+ rc = mpt3sas_port_enable(ioc);
+
+ if (rc != 0)
+ pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodicallyn until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with timeout (timeout=300s)\n",
+ ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+/**
+ * _scsih_probe - attach and add scsi host
+ * @pdev: PCI device struct
+ * @id: pci device id
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct Scsi_Host *shost;
+
+ shost = scsi_host_alloc(&scsih_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+
+ /* init local params */
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ INIT_LIST_HEAD(&ioc->list);
+ list_add_tail(&ioc->list, &mpt3sas_ioc_list);
+ ioc->shost = shost;
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id);
+ ioc->pdev = pdev;
+ ioc->scsi_io_cb_idx = scsi_io_cb_idx;
+ ioc->tm_cb_idx = tm_cb_idx;
+ ioc->ctl_cb_idx = ctl_cb_idx;
+ ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
+ ioc->transport_cb_idx = transport_cb_idx;
+ ioc->scsih_cb_idx = scsih_cb_idx;
+ ioc->config_cb_idx = config_cb_idx;
+ ioc->tm_tr_cb_idx = tm_tr_cb_idx;
+ ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
+ ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
+ ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* misc semaphores and spin locks */
+ mutex_init(&ioc->reset_in_progress_mutex);
+ spin_lock_init(&ioc->ioc_reset_in_progress_lock);
+ spin_lock_init(&ioc->scsi_lookup_lock);
+ spin_lock_init(&ioc->sas_device_lock);
+ spin_lock_init(&ioc->sas_node_lock);
+ spin_lock_init(&ioc->fw_event_lock);
+ spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->diag_trigger_lock);
+
+ INIT_LIST_HEAD(&ioc->sas_device_list);
+ INIT_LIST_HEAD(&ioc->sas_device_init_list);
+ INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ INIT_LIST_HEAD(&ioc->raid_device_list);
+ INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+
+ /* init shost parameters */
+ shost->max_cmd_len = 32;
+ shost->max_lun = max_lun;
+ shost->transportt = mpt3sas_transport_template;
+ shost->unique_id = ioc->id;
+
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "default value of 32767.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ pr_info(MPT3SAS_FMT
+ "The max_sectors value is set to %d\n",
+ ioc->name, shost->max_sectors);
+ }
+ }
+
+ if ((scsi_add_host(shost, &pdev->dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ list_del(&ioc->list);
+ goto out_add_shost_fail;
+ }
+
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask > 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ /* event thread */
+ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
+ "fw_event%d", ioc->id);
+ ioc->firmware_event_thread = create_singlethread_workqueue(
+ ioc->firmware_event_name);
+ if (!ioc->firmware_event_thread) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_thread_fail;
+ }
+
+ ioc->is_driver_loading = 1;
+ if ((mpt3sas_base_attach(ioc))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_attach_fail;
+ }
+ scsi_scan_host(shost);
+ return 0;
+
+ out_attach_fail:
+ destroy_workqueue(ioc->firmware_event_thread);
+ out_thread_fail:
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ out_add_shost_fail:
+ scsi_host_put(shost);
+ return -ENODEV;
+}
+
+#ifdef CONFIG_PM
+/**
+ * _scsih_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state;
+
+ mpt3sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_save_state(pdev);
+ mpt3sas_base_free_resources(ioc);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * _scsih_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pdev = pdev;
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ return r;
+
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
+ mpt3sas_base_start_watchdog(ioc);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ ioc->pci_error_recovery = 1;
+ scsi_block_requests(ioc->shost);
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ ioc->pci_error_recovery = 1;
+ mpt3sas_base_stop_watchdog(ioc);
+ _scsih_flush_running_cmds(ioc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pci_error_recovery = 0;
+ ioc->pdev = pdev;
+ pci_restore_state(pdev);
+ rc = mpt3sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt3sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* raid transport support */
+static struct raid_function_template mpt3sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
+
+static struct pci_driver scsih_driver = {
+ .name = MPT3SAS_DRIVER_NAME,
+ .id_table = scsih_pci_table,
+ .probe = _scsih_probe,
+ .remove = _scsih_remove,
+ .shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
+#ifdef CONFIG_PM
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
+#endif
+};
+
+
+/**
+ * _scsih_init - main entry point for this driver.
+ *
+ * Returns 0 success, anything else error.
+ */
+static int __init
+_scsih_init(void)
+{
+ int error;
+
+ mpt_ids = 0;
+
+ pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_transport_template =
+ sas_attach_transport(&mpt3sas_transport_functions);
+ if (!mpt3sas_transport_template)
+ return -ENODEV;
+
+/* raid transport support */
+ mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions);
+ if (!mpt3sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+
+ mpt3sas_base_initialize_callback_handler();
+
+ /* queuecommand callback hander */
+ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
+
+ /* task managment callback handler */
+ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
+
+ /* base internal commands callback handler */
+ base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
+ port_enable_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_port_enable_done);
+
+ /* transport internal commands callback handler */
+ transport_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_transport_done);
+
+ /* scsih internal commands callback handler */
+ scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
+
+ /* configuration page API internal commands callback handler */
+ config_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_config_done);
+
+ /* ctl module callback handler */
+ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
+
+ tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_tr_complete);
+
+ tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_volume_tr_complete);
+
+ tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_sas_control_complete);
+
+ mpt3sas_ctl_init();
+
+ error = pci_register_driver(&scsih_driver);
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * _scsih_exit - exit point for this driver (when it is a module).
+ *
+ * Returns 0 success, anything else error.
+ */
+static void __exit
+_scsih_exit(void)
+{
+ pr_info("mpt3sas version %s unloading\n",
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_ctl_exit();
+
+ pci_unregister_driver(&scsih_driver);
+
+
+ mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_cb_idx);
+ mpt3sas_base_release_callback_handler(base_cb_idx);
+ mpt3sas_base_release_callback_handler(port_enable_cb_idx);
+ mpt3sas_base_release_callback_handler(transport_cb_idx);
+ mpt3sas_base_release_callback_handler(scsih_cb_idx);
+ mpt3sas_base_release_callback_handler(config_cb_idx);
+ mpt3sas_base_release_callback_handler(ctl_cb_idx);
+
+ mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
+
+/* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+}
+
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
new file mode 100644
index 000000000000..87ca2b7287c3
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -0,0 +1,2128 @@
+/*
+ * SAS Transport Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _transport_sas_node_find_by_sas_address - sas node search
+ * @ioc: per adapter object
+ * @sas_address: sas address of expander or sas host
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Search for either hba phys or expander device based on handle, then returns
+ * the sas_node object.
+ */
+static struct _sas_node *
+_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ if (ioc->sas_hba.sas_address == sas_address)
+ return &ioc->sas_hba;
+ else
+ return mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+}
+
+/**
+ * _transport_convert_phy_link_rate -
+ * @link_rate: link rate returned from mpt firmware
+ *
+ * Convert link_rate from mpi fusion into sas_transport form.
+ */
+static enum sas_linkrate
+_transport_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI2_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI25_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+
+ default:
+ case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * _transport_set_identify - set identify for phys and end devices
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @identify: sas identify info
+ *
+ * Populates sas identify info.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ struct sas_identify *identify)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 device_info;
+ u32 ioc_status;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
+ /* device_type */
+ switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI2_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /* target_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_done - internal transport layer callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated transport cmds.
+ * The callback index passed is `ioc->transport_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->transport_cmds.smid != smid)
+ return 1;
+ ioc->transport_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->transport_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->transport_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->transport_cmds.done);
+ return 1;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ &data_out_dma);
+
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + sizeof(struct rep_manu_request);
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - send to sas_addr(0x%016llx)\n",
+ ioc->name, (unsigned long long)sas_address));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+ u8 *tmp;
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+
+/**
+ * _transport_delete_port - helper function to removing a port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ enum sas_device_type device_type =
+ mpt3sas_port->remote_identify.device_type;
+
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ if (device_type == SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc, sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mpt3sas_phy->phy_id);
+
+ list_del(&mpt3sas_phy->port_siblings);
+ mpt3sas_port->num_phys--;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
+ struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mpt3sas_phy->phy_id);
+
+ list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch == mpt3sas_phy)
+ return;
+ }
+ _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
+ return;
+ }
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
+{
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch != mpt3sas_phy)
+ continue;
+
+ if (mpt3sas_port->num_phys == 1)
+ _transport_delete_port(ioc, mpt3sas_port);
+ else
+ _transport_delete_phy(ioc, mpt3sas_port,
+ mpt3sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+ continue;
+ if (sas_node->phy[i].phy_belongs_to_port == 1)
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ &sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpt3sas_transport_port_add - insert port to the list
+ * @ioc: per adapter object
+ * @handle: handle of attached device
+ * @sas_address: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new port object to the sas_node->sas_port_list.
+ *
+ * Returns mpt3sas_port.
+ */
+struct _sas_port *
+mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 sas_address)
+{
+ struct _sas_phy *mpt3sas_phy, *next;
+ struct _sas_port *mpt3sas_port;
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct sas_rphy *rphy;
+ int i;
+ struct sas_port *port;
+
+ mpt3sas_port = kzalloc(sizeof(struct _sas_port),
+ GFP_KERNEL);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mpt3sas_port->port_list);
+ INIT_LIST_HEAD(&mpt3sas_port->phy_list);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!sas_node) {
+ pr_err(MPT3SAS_FMT
+ "%s: Could not find parent sas_address(0x%016llx)!\n",
+ ioc->name, __func__, (unsigned long long)sas_address);
+ goto out_fail;
+ }
+
+ if ((_transport_set_identify(ioc, handle,
+ &mpt3sas_port->remote_identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ _transport_sanity_check(ioc, sas_node,
+ mpt3sas_port->remote_identify.sas_address);
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address !=
+ mpt3sas_port->remote_identify.sas_address)
+ continue;
+ list_add_tail(&sas_node->phy[i].port_siblings,
+ &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ }
+
+ if (!mpt3sas_port->num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &port->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ sas_port_add_phy(port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+ }
+
+ mpt3sas_port->port = port;
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ rphy = sas_end_device_alloc(port);
+ else
+ rphy = sas_expander_alloc(port,
+ mpt3sas_port->remote_identify.device_type);
+
+ rphy->identify = mpt3sas_port->remote_identify;
+ if ((sas_rphy_add(rphy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->rphy = rphy;
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* fill in report manufacture */
+ if (mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
+ _transport_expander_report_manufacture(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy));
+ return mpt3sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt3sas_phy->port_siblings);
+ kfree(mpt3sas_port);
+ return NULL;
+}
+
+/**
+ * mpt3sas_transport_port_remove - remove port from the list
+ * @ioc: per adapter object
+ * @sas_address: sas address of attached device
+ * @sas_address_parent: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_port_list.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent)
+{
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_node *sas_node;
+ u8 found = 0;
+ struct _sas_phy *mpt3sas_phy, *next_phy;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address_parent);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ found = 1;
+ list_del(&mpt3sas_port->port_list);
+ goto out;
+ }
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ memset(&sas_node->phy[i].remote_identify, 0 ,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ list_for_each_entry_safe(mpt3sas_phy, next_phy,
+ &mpt3sas_port->phy_list, port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ list_del(&mpt3sas_phy->port_siblings);
+ }
+ sas_port_delete(mpt3sas_port->port);
+ kfree(mpt3sas_port);
+}
+
+/**
+ * mpt3sas_transport_add_host_phy - report sas_host phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @phy_pg0: sas phy page 0
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+
+/**
+ * mpt3sas_transport_add_expander_phy - report expander phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @expander_pg1: expander page 1
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_update_links - refreshing phy link changes
+ * @ioc: per adapter object
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct _sas_phy *mpt3sas_phy;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ mpt3sas_phy = &sas_node->phy[phy_number];
+ mpt3sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ _transport_set_identify(ioc, handle,
+ &mpt3sas_phy->remote_identify);
+ _transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ } else
+ memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+ sas_identify));
+
+ if (mpt3sas_phy->phy)
+ mpt3sas_phy->phy->negotiated_linkrate =
+ _transport_convert_phy_link_rate(link_rate);
+
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "refresh: parent sas_addr(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ (unsigned long long)sas_address,
+ link_rate, phy_number, handle, (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+}
+
+static inline void *
+phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return shost_priv(shost);
+}
+
+static inline void *
+rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+/**
+ * _transport_get_expander_phy_error_log - return expander counters
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_error_log_request) +
+ sizeof(struct phy_error_log_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_error_log_request),
+ data_out_dma + sizeof(struct phy_error_log_request),
+ sizeof(struct phy_error_log_reply));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ phy_error_log_reply = data_out +
+ sizeof(struct phy_error_log_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - function_result(%d)\n",
+ ioc->name, phy_error_log_reply->function_result));
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_get_linkerrors - return phy counters for both hba and expanders
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasPhyPage1_t phy_pg1;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_get_expander_phy_error_log(ioc, phy);
+
+ /* get hba phy error logs */
+ if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
+ phy->number))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.PhyResetProblemCount);
+ return 0;
+}
+
+/**
+ * _transport_get_enclosure_identifier -
+ * @phy: The sas phy object
+ *
+ * Obtain the enclosure logical id for an expander.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _transport_get_bay_identifier -
+ * @phy: The sas phy object
+ *
+ * Returns the slot id for a device that resides inside an enclosure.
+ */
+static int
+_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * _transport_expander_phy_control - expander phy control
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_control_request) +
+ sizeof(struct phy_control_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_reply), data_out_dma +
+ sizeof(struct phy_control_request));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number, phy_operation));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+
+ phy_control_reply = data_out +
+ sizeof(struct phy_control_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - function_result(%d)\n",
+ ioc->name, phy_control_reply->function_result));
+
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_phy_reset -
+ * @phy: The sas phy object
+ * @hard_reset:
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIoUnitControlReply_t mpi_reply;
+ Mpi2SasIoUnitControlRequest_t mpi_request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request.Operation = hard_reset ?
+ MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
+ mpi_request.PhyNum = phy->number;
+
+ if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ return 0;
+}
+
+/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+ unsigned long flags;
+ int i, discovery_active;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ pr_err(MPT3SAS_FMT "discovery is active on " \
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
+
+ if (enable)
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ _transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return _transport_expander_phy_control(ioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (phy->number != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ phy->number)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * _transport_smp_handler - transport portal for smp passthru
+ * @shost: shost object
+ * @rphy: sas transport rphy object
+ * @req:
+ *
+ * This used primarily for smp_utils.
+ * Example:
+ * smp_rep_general /sys/class/bsg/expander-5:0
+ */
+static int
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ int rc, i;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
+ u16 wait_state_count;
+ struct request *rsp = req->next_rq;
+ struct bio_vec *bvec = NULL;
+
+ if (!rsp) {
+ pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
+ if (rc)
+ return rc;
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
+ __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ /* Check if the request is split across multiple segments */
+ if (req->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, i) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec->bv_page) + bvec->bv_offset,
+ bvec->bv_len);
+ offset += bvec->bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (rsp->bio->bi_vcnt > 1) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto unmap;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto unmap;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(ioc->sas_hba.sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ psge = &mpi_request->SGL;
+
+ if (req->bio->bi_vcnt > 1)
+ ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
+ pci_dma_in, (blk_rq_bytes(rsp) + 4));
+ else
+ ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
+ dma_addr_in, (blk_rq_bytes(rsp) + 4));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - sending smp request\n", ioc->name, __func__));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s : timeout\n",
+ __func__, ioc->name);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - complete\n", ioc->name, __func__));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - reply data transfer size(%d)\n",
+ ioc->name, __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
+ req->sense_len = sizeof(*mpi_reply);
+ req->resid_len = 0;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (rsp->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, i) {
+ if (bytes_to_copy <= bvec->bv_len) {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bvec->bv_len);
+ bytes_to_copy -= bvec->bv_len;
+ }
+ offset += bvec->bv_len;
+ }
+ }
+ } else {
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - no reply\n", ioc->name, __func__));
+ rc = -ENXIO;
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = -ETIMEDOUT;
+ }
+
+ unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+struct sas_function_template mpt3sas_transport_functions = {
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
+ .smp_handler = _transport_smp_handler,
+};
+
+struct scsi_transport_template *mpt3sas_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
new file mode 100644
index 000000000000..da6c5f25749c
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -0,0 +1,434 @@
+/*
+ * This module provides common API to set Diagnostic trigger for MPT
+ * (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _mpt3sas_raise_sigio - notifiy app
+ * @ioc: per adapter object
+ * @event_data:
+ */
+static void
+_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 sz, event_data_sz;
+ unsigned long flags;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
+ mpi_reply = kzalloc(sz, GFP_KERNEL);
+ if (!mpi_reply)
+ goto out;
+ mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED);
+ event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4;
+ mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
+ memcpy(&mpi_reply->EventData, event_data,
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: add to driver event log\n",
+ ioc->name, __func__));
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ kfree(mpi_reply);
+ out:
+
+ /* clearing the diag_trigger_active flag */
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: clearing diag_trigger_active flag\n",
+ ioc->name, __func__));
+ ioc->diag_trigger_active = 0;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_process_trigger_data - process the event data for the trigger
+ * @ioc: per adapter object
+ * @event_data:
+ */
+void
+mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ u8 issue_reset = 0;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ /* release the diag buffer trace */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: release trace diag buffer\n", ioc->name, __func__));
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ _mpt3sas_raise_sigio(ioc, event_data);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_master - Master trigger handler
+ * @ioc: per adapter object
+ * @trigger_bitmask:
+ *
+ */
+void
+mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ unsigned long flags;
+ u8 found_match = 0;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ goto by_pass_checks;
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ by_pass_checks:
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - trigger_bitmask = 0x%08x\n",
+ ioc->name, __func__, trigger_bitmask));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MASTER;
+ event_data.u.master.MasterData = trigger_bitmask;
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ _mpt3sas_raise_sigio(ioc, &event_data);
+ else
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_event - Event trigger handler
+ * @ioc: per adapter object
+ * @event:
+ * @log_entry_qualifier:
+ *
+ */
+void
+mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_EVENT_TRIGGER_T *event_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ ioc->name, __func__, event, log_entry_qualifier));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ event_trigger = ioc->diag_trigger_event.EventTriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries
+ && !found_match; i++, event_trigger++) {
+ if (event_trigger->EventValue != event)
+ continue;
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ if (event_trigger->LogEntryQualifier ==
+ log_entry_qualifier)
+ found_match = 1;
+ continue;
+ }
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
+ event_data.u.event.EventValue = event;
+ event_data.u.event.LogEntryQualifier = log_entry_qualifier;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_scsi - SCSI trigger handler
+ * @ioc: per adapter object
+ * @sense_key:
+ * @asc:
+ * @ascq:
+ *
+ */
+void
+mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
+ u8 ascq)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ ioc->name, __func__, sense_key, asc, ascq));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries
+ && !found_match; i++, scsi_trigger++) {
+ if (scsi_trigger->SenseKey != sense_key)
+ continue;
+ if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc))
+ continue;
+ if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
+ event_data.u.scsi.SenseKey = sense_key;
+ event_data.u.scsi.ASC = asc;
+ event_data.u.scsi.ASCQ = ascq;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_mpi - MPI trigger handler
+ * @ioc: per adapter object
+ * @ioc_status:
+ * @loginfo:
+ *
+ */
+void
+mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_MPI_TRIGGER_T *mpi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ ioc->name, __func__, ioc_status, loginfo));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries
+ && !found_match; i++, mpi_trigger++) {
+ if (mpi_trigger->IOCStatus != ioc_status)
+ continue;
+ if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF ||
+ mpi_trigger->IocLogInfo == loginfo))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
+ event_data.u.mpi.IOCStatus = ioc_status;
+ event_data.u.mpi.IocLogInfo = loginfo;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
new file mode 100644
index 000000000000..a10c30907394
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -0,0 +1,193 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to set Diagnostic triggers for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+ /* Diagnostic Trigger Configuration Data Structures */
+
+#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+
+/* limitation on number of entries */
+#define NUM_VALID_ENTRIES (20)
+
+/* trigger types */
+#define MPT3SAS_TRIGGER_MASTER (1)
+#define MPT3SAS_TRIGGER_EVENT (2)
+#define MPT3SAS_TRIGGER_SCSI (3)
+#define MPT3SAS_TRIGGER_MPI (4)
+
+/* trigger names */
+#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master"
+#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event"
+#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi"
+#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi"
+
+/* master trigger bitmask */
+#define MASTER_TRIGGER_FW_FAULT (0x00000001)
+#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002)
+#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004)
+#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008)
+
+/* fake firmware event for tigger */
+#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E)
+
+/**
+ * MasterTrigger is a single U32 passed to/from sysfs.
+ *
+ * Bit Flags (enables) include:
+ * 1. FW Faults
+ * 2. Adapter Reset issued by driver
+ * 3. TMs
+ * 4. Device Remove Event sent by FW
+ */
+
+struct SL_WH_MASTER_TRIGGER_T {
+ uint32_t MasterData;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element
+ * @EventValue: Event Code to trigger on
+ * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only)
+ *
+ * Defines an event that should induce a DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_EVENT_TRIGGER_T {
+ uint16_t EventValue;
+ uint16_t LogEntryQualifier;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of Event Triggers to be monitored for.
+ * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this
+ * structure.
+ * @EventTriggerEntry: List of Event trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set Event Triggers
+ * in the Linux Driver.
+ */
+
+struct SL_WH_EVENT_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element
+ * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for
+ * wildcard.
+ * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard
+ * @SenseKey: SCSI Sense Key
+ *
+ * Defines a sense key (single or many variants) that should induce a
+ * DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_SCSI_TRIGGER_T {
+ U8 ASCQ;
+ U8 ASC;
+ U8 SenseKey;
+ U8 Reserved;
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of SCSI sense codes that should trigger a DIAG_SERVICE event when
+ * observed.
+ * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this
+ * structure.
+ * @SCSITriggerEntry: List of SCSI Sense Code trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set SCSI Sense Code
+ * Triggers in the Linux Driver.
+ */
+struct SL_WH_SCSI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element
+ * @IOCStatus: MPI IOCStatus
+ * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard
+ *
+ * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER
+ * driver event if observed.
+ */
+struct SL_WH_MPI_TRIGGER_T {
+ uint16_t IOCStatus;
+ uint16_t Reserved;
+ uint32_t IocLogInfo;
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE
+ * event when observed.
+ * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this
+ * structure.
+ * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set MPI Error Triggers
+ * in the Linux Driver.
+ */
+struct SL_WH_MPI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger
+ * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX)
+ * @u: trigger condition that caused trigger to be sent
+ */
+struct SL_WH_TRIGGERS_EVENT_DATA_T {
+ uint32_t trigger_type;
+ union {
+ struct SL_WH_MASTER_TRIGGER_T master;
+ struct SL_WH_EVENT_TRIGGER_T event;
+ struct SL_WH_SCSI_TRIGGER_T scsi;
+ struct SL_WH_MPI_TRIGGER_T mpi;
+ } u;
+};
+#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index 39f554f5f261..8fbb97a8bfd3 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -34,8 +34,7 @@ static struct scsi_host_template mvme16x_scsi_driver_template = {
static struct platform_device *mvme16x_scsi_device;
-static __devinit int
-mvme16x_probe(struct platform_device *dev)
+static int mvme16x_probe(struct platform_device *dev)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata;
@@ -103,8 +102,7 @@ mvme16x_probe(struct platform_device *dev)
return -ENODEV;
}
-static __devexit int
-mvme16x_device_remove(struct platform_device *dev)
+static int mvme16x_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -131,7 +129,7 @@ static struct platform_driver mvme16x_scsi_driver = {
.owner = THIS_MODULE,
},
.probe = mvme16x_probe,
- .remove = __devexit_p(mvme16x_device_remove),
+ .remove = mvme16x_device_remove,
};
static int __init mvme16x_scsi_init(void)
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 8ba47229049f..8bb06995adfb 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -41,7 +41,7 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
phy->phy_type |= PORT_TYPE_SATA;
}
-static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -54,7 +54,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
mw32(MVS_PCS, tmp);
}
-static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+static void mvs_64xx_phy_hacks(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
@@ -156,7 +156,7 @@ void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
}
}
-static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+static int mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -250,7 +250,7 @@ static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
}
}
-static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+static int mvs_64xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 7e423e5ad5e1..1e4479f3331a 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -216,8 +216,7 @@ void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
}
-static void __devinit
-mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
+static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
{
u32 temp;
temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
@@ -258,7 +257,7 @@ mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
mvi->hba_info_param.phy_rate[phy_id]);
}
-static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -331,7 +330,7 @@ static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
}
-static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+static int mvs_94xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8f7eb4f21140..487aa6f97412 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
#define SPI_ADDR_VLD_94XX (1U << 1)
#define SPI_CTRL_SpiStart_94XX (1U << 0)
-#define mv_ffc(x) ffz(x)
-
static inline int
mv_ffc64(u64 v)
{
- int i;
- i = mv_ffc((u32)v);
- if (i >= 0)
- return i;
- i = mv_ffc((u32)(v>>32));
-
- if (i != 0)
- return 32 + i;
-
- return -1;
+ u64 x = ~v;
+ return x ? __ffs64(x) : -1;
}
#define r_reg_set_enable(i) \
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index bcc408042cee..8c4479ab49e8 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -160,7 +160,7 @@ static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
MVS_P4_INT_MASK, port, val);
}
-static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+static inline void mvs_phy_hacks(struct mvs_info *mvi)
{
u32 tmp;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cc59dff3810b..ce90d0546cdd 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -96,7 +96,7 @@ static struct sas_domain_function_template mvs_transport_ops = {
};
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
{
struct mvs_phy *phy = &mvi->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -235,7 +235,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
return IRQ_HANDLED;
}
-static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
int i = 0, slot_nr;
char pool_name[32];
@@ -373,7 +373,7 @@ void mvs_iounmap(void __iomem *regs)
iounmap(regs);
}
-static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct Scsi_Host *shost, unsigned int id)
{
@@ -444,7 +444,7 @@ static int pci_go_64(struct pci_dev *pdev)
return rc;
}
-static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int phy_nr, port_nr; unsigned short core_nr;
@@ -486,7 +486,7 @@ exit_free:
}
-static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int can_queue, i = 0, j = 0;
@@ -537,8 +537,7 @@ static void mvs_init_sas_add(struct mvs_info *mvi)
memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
}
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
@@ -645,7 +644,7 @@ err_out_enable:
return rc;
}
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+static void mvs_pci_remove(struct pci_dev *pdev)
{
unsigned short core_nr, i = 0;
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -677,7 +676,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
+static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
@@ -748,7 +747,7 @@ static struct pci_driver mvs_pci_driver = {
.name = DRV_NAME,
.id_table = mvs_pci_table,
.probe = mvs_pci_init,
- .remove = __devexit_p(mvs_pci_remove),
+ .remove = mvs_pci_remove,
};
static ssize_t
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a3776d6ced60..078c63913b55 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -220,8 +220,8 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
return rc;
}
-void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
- u32 off_lo, u32 off_hi, u64 sas_addr)
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr)
{
u32 lo = (u32)sas_addr;
u32 hi = (u32)(sas_addr>>32);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index c04a4f5b5972..2ae77a0394b2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
#define DEV_IS_EXPANDER(type) \
((type == EDGE_DEV) || (type == FANOUT_DEV))
-#define bit(n) ((u32)1 << n)
+#define bit(n) ((u64)1 << n)
#define for_each_phy(__lseq_mask, __mc, __lseq) \
for ((__mc) = (__lseq_mask), (__lseq) = 0; \
@@ -456,8 +456,8 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
-void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
- u32 off_lo, u32 off_hi, u64 sas_addr);
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr);
void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index c585a925b3cd..4594ccaaf49b 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2506,8 +2506,7 @@ fail_add_device:
* @pdev: PCI device structure
* @id: PCI ids of supported hotplugged adapter
*/
-static int __devinit mvumi_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
struct mvumi_hba *mhba;
@@ -2728,7 +2727,7 @@ static struct pci_driver mvumi_pci_driver = {
.name = MV_DRIVER_NAME,
.id_table = mvumi_pci_table,
.probe = mvumi_probe_one,
- .remove = __devexit_p(mvumi_detach_one),
+ .remove = mvumi_detach_one,
.shutdown = mvumi_shutdown,
#ifdef CONFIG_PM
.suspend = mvumi_suspend,
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 62b616891a33..1cc0c1c69c88 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -76,7 +76,7 @@ static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
-static struct pci_device_id nsp32_pci_table[] __devinitdata = {
+static struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
@@ -186,10 +186,10 @@ static nsp32_sync_table nsp32_sync_table_pci[] = {
* function declaration
*/
/* module entry point */
-static int __devinit nsp32_probe (struct pci_dev *, const struct pci_device_id *);
-static void __devexit nsp32_remove(struct pci_dev *);
-static int __init init_nsp32 (void);
-static void __exit exit_nsp32 (void);
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
+static int __init init_nsp32 (void);
+static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
@@ -3382,7 +3382,7 @@ static int nsp32_resume(struct pci_dev *pdev)
/************************************************************************
* PCI/Cardbus probe/remove routine
*/
-static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
nsp32_hw_data *data = &nsp32_data_base;
@@ -3418,7 +3418,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
return ret;
}
-static void __devexit nsp32_remove(struct pci_dev *pdev)
+static void nsp32_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -3435,7 +3435,7 @@ static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
.probe = nsp32_probe,
- .remove = __devexit_p(nsp32_remove),
+ .remove = nsp32_remove,
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index d4ed9eb52657..0fab6b5c7b82 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -97,9 +97,37 @@ struct osd_dev_handle {
static DEFINE_IDA(osd_minor_ida);
+/*
+ * scsi sysfs attribute operations
+ */
+static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+ return sprintf(buf, "%s\n", ould->odi.osdname);
+}
+
+static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+
+ memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
+ return ould->odi.systemid_len;
+}
+
+static struct device_attribute osd_uld_attrs[] = {
+ __ATTR(osdname, S_IRUGO, osdname_show, NULL),
+ __ATTR(systemid, S_IRUGO, systemid_show, NULL),
+ __ATTR_NULL,
+};
+
static struct class osd_uld_class = {
.owner = THIS_MODULE,
.name = "scsi_osd",
+ .dev_attrs = osd_uld_attrs,
};
/*
@@ -240,18 +268,11 @@ static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
return 0 == memcmp(a1, a2, a1_len);
}
-struct find_oud_t {
- const struct osd_dev_info *odi;
- struct device *dev;
- struct osd_uld_device *oud;
-} ;
-
-int _mach_odi(struct device *dev, void *find_data)
+static int _match_odi(struct device *dev, const void *find_data)
{
struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
class_dev);
- struct find_oud_t *fot = find_data;
- const struct osd_dev_info *odi = fot->odi;
+ const struct osd_dev_info *odi = find_data;
if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
odi->systemid, odi->systemid_len) &&
@@ -259,7 +280,6 @@ int _mach_odi(struct device *dev, void *find_data)
odi->osdname, odi->osdname_len)) {
OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
odi->systemid_len, odi->osdname_len);
- fot->oud = oud;
return 1;
} else {
return 0;
@@ -273,19 +293,19 @@ int _mach_odi(struct device *dev, void *find_data)
*/
struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
{
- struct find_oud_t find = {.odi = odi};
-
- find.dev = class_find_device(&osd_uld_class, NULL, &find, _mach_odi);
- if (likely(find.dev)) {
+ struct device *dev = class_find_device(&osd_uld_class, NULL, odi, _match_odi);
+ if (likely(dev)) {
struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
+ struct osd_uld_device *oud = container_of(dev,
+ struct osd_uld_device, class_dev);
if (unlikely(!odh)) {
- put_device(find.dev);
+ put_device(dev);
return ERR_PTR(-ENOMEM);
}
- odh->od = find.oud->od;
- odh->oud = find.oud;
+ odh->od = oud->od;
+ odh->oud = oud;
return &odh->od;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index bf54aafc2d71..b8dd05074abb 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -47,7 +47,7 @@
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00);
@@ -83,8 +83,7 @@ static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
* read_general_status_table - read the general status table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->general_stat_tbl_addr;
pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00);
@@ -118,8 +117,7 @@ read_general_status_table(struct pm8001_hba_info *pm8001_ha)
* read_inbnd_queue_table - read the inbound queue table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int inbQ_num = 1;
int i;
@@ -137,8 +135,7 @@ read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
* read_outbnd_queue_table - read the outbound queue table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int outbQ_num = 1;
int i;
@@ -156,8 +153,7 @@ read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
* init_default_table_values - init the default table.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
{
int qn = 1;
int i;
@@ -250,8 +246,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
* update_main_config_table - update the main default table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_mw32(address, 0x24,
@@ -297,8 +292,8 @@ update_main_config_table(struct pm8001_hba_info *pm8001_ha)
* update_inbnd_queue_table - update the inbound queue table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
{
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
u16 offset = number * 0x20;
@@ -318,8 +313,8 @@ update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
* update_outbnd_queue_table - update the outbound queue table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
{
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
u16 offset = number * 0x24;
@@ -370,8 +365,8 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
* @pm8001_ha: our hba card information
* @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
*/
-static void __devinit
-mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
+ u32 SSCbit)
{
u32 value, offset, i;
unsigned long flags;
@@ -438,9 +433,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
* @pm8001_ha: our hba card information
* @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
*/
-static void __devinit
-mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
- u32 interval)
+static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
+ u32 interval)
{
u32 offset;
u32 value;
@@ -601,7 +595,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
* pm8001_chip_init - the main init function that initialize whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
-static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
{
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 0267c22f8741..4c9fe733fe88 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -104,8 +104,7 @@ static struct sas_domain_function_template pm8001_transport_ops = {
*@pm8001_ha: our hba structure.
*@phy_id: phy id.
*/
-static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
- int phy_id)
+static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -195,7 +194,7 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
* @pm8001_ha:our hba structure.
*
*/
-static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
{
int i;
spin_lock_init(&pm8001_ha->lock);
@@ -360,8 +359,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
* @ent: ent
* @shost: scsi host struct which has been initialized before.
*/
-static struct pm8001_hba_info *__devinit
-pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
+static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
+ u32 chip_id,
+ struct Scsi_Host *shost)
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -433,8 +433,8 @@ static int pci_go_44(struct pci_dev *pdev)
* @shost: scsi host which has been allocated outside.
* @chip_info: our ha struct.
*/
-static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
- const struct pm8001_chip_info *chip_info)
+static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
{
int phy_nr, port_nr;
struct asd_sas_phy **arr_phy;
@@ -479,8 +479,8 @@ exit:
* @shost: scsi host which has been allocated outside
* @chip_info: our ha struct.
*/
-static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
- const struct pm8001_chip_info *chip_info)
+static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
{
int i = 0;
struct pm8001_hba_info *pm8001_ha;
@@ -615,8 +615,8 @@ intx:
* pci driver it is invoked, all struct an hardware initilization should be done
* here, also, register interrupt
*/
-static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pm8001_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
unsigned int rc;
u32 pci_reg;
@@ -707,7 +707,7 @@ err_out_enable:
return rc;
}
-static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
+static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
@@ -842,7 +842,7 @@ err_out_enable:
return rc;
}
-static struct pci_device_id __devinitdata pm8001_pci_table[] = {
+static struct pci_device_id pm8001_pci_table[] = {
{
PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
},
@@ -857,7 +857,7 @@ static struct pci_driver pm8001_pci_driver = {
.name = DRV_NAME,
.id_table = pm8001_pci_table,
.probe = pm8001_pci_probe,
- .remove = __devexit_p(pm8001_pci_remove),
+ .remove = pm8001_pci_remove,
.suspend = pm8001_pci_suspend,
.resume = pm8001_pci_resume,
};
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index af763eab2039..b46f5e906837 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -125,7 +125,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
/*
* PCI device ids supported by pmcraid driver
*/
-static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
+static struct pci_device_id pmcraid_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
@@ -4818,8 +4818,7 @@ pmcraid_release_control_blocks(
* Return Value
* 0 in case of success; -ENOMEM in case of failure
*/
-static int __devinit
-pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
{
int i;
@@ -4855,8 +4854,7 @@ pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
* Return Value
* 0 in case it can allocate all control blocks, otherwise -ENOMEM
*/
-static int __devinit
-pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
{
int i;
@@ -4922,8 +4920,7 @@ pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
* Return value
* 0 hrrq buffers are allocated, -ENOMEM otherwise.
*/
-static int __devinit
-pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
{
int i, buffer_size;
@@ -5062,8 +5059,7 @@ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 for successful allocation, -ENOMEM for any failure
*/
-static int __devinit
-pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
{
int i;
@@ -5181,7 +5177,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
*/
-static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
+static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
{
int i;
@@ -5281,11 +5277,8 @@ static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 on success, non-zero in case of any failure
*/
-static int __devinit pmcraid_init_instance(
- struct pci_dev *pdev,
- struct Scsi_Host *host,
- void __iomem *mapped_pci_addr
-)
+static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+ void __iomem *mapped_pci_addr)
{
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)host->hostdata;
@@ -5442,7 +5435,7 @@ static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
* Return value
* none
*/
-static void __devexit pmcraid_remove(struct pci_dev *pdev)
+static void pmcraid_remove(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
@@ -5883,10 +5876,8 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
* returns 0 if the device is claimed and successfully configured.
* returns non-zero error code in case of any failure
*/
-static int __devinit pmcraid_probe(
- struct pci_dev *pdev,
- const struct pci_device_id *dev_id
-)
+static int pmcraid_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
{
struct pmcraid_instance *pinstance;
struct Scsi_Host *host;
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 959f10055be7..e6e2a30493e6 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -359,7 +359,7 @@ static struct scsi_host_template ps3rom_host_template = {
};
-static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
+static int ps3rom_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
int error;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 538230be5cca..5a522c5bbd43 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1438,7 +1438,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
* Returns:
* 0 = success
*/
-static int __devinit
+static int
qla1280_initialize_adapter(struct scsi_qla_host *ha)
{
struct device_reg __iomem *reg;
@@ -4230,7 +4230,7 @@ static struct scsi_host_template qla1280_driver_template = {
};
-static int __devinit
+static int
qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int devnum = id->driver_data;
@@ -4399,7 +4399,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
-static void __devexit
+static void
qla1280_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -4433,7 +4433,7 @@ static struct pci_driver qla1280_pci_driver = {
.name = "qla1280",
.id_table = qla1280_pci_tbl,
.probe = qla1280_probe_one,
- .remove = __devexit_p(qla1280_remove_one),
+ .remove = qla1280_remove_one,
};
static int __init
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c28215f8bed..83d798428c10 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1615,8 +1615,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
*/
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
if (IS_FWI2_CAPABLE(fcport->vha->hw))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2f9bddd3c616..9f34dedcdad7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -219,7 +219,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
break;
}
exit_fcp_prio_cfg:
- bsg_job->job_done(bsg_job);
+ if (!ret)
+ bsg_job->job_done(bsg_job);
return ret;
}
@@ -741,9 +742,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
ql_dbg(ql_dbg_user, vha, 0x70c0,
@@ -761,9 +761,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
new_config, elreq.options);
if (rval) {
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -795,9 +794,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"MPI reset failed.\n");
}
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -812,34 +810,27 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x702c,
"Vendor request %s failed.\n", type);
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
-
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
-
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
- sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len =
- bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- bsg_job->reply->result = DID_OK;
+ bsg_job->reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
}
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+
+done_free_dma_rsp:
dma_free_coherent(&ha->pdev->dev, rsp_data_len,
rsp_data, rsp_data_dma);
done_free_dma_req:
@@ -853,6 +844,8 @@ done_unmap_req_sg:
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -877,16 +870,15 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -976,8 +968,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@@ -986,7 +977,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
bsg_job->reply->result = DID_OK;
}
- bsg_job->job_done(bsg_job);
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
done_free_fw_buf:
@@ -996,6 +986,8 @@ done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1163,8 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7044,
@@ -1184,8 +1175,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
}
}
- bsg_job->job_done(bsg_job);
-
done_unmap_sg:
if (mgmt_b)
dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
@@ -1200,6 +1189,8 @@ done_unmap_sg:
exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1276,9 +1267,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
fcport->port_name[3], fcport->port_name[4],
fcport->port_name[5], fcport->port_name[6],
fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
-
+ rval = (DID_ERROR << 16);
} else {
if (!port_param->mode) {
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
@@ -1292,9 +1281,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -1887,8 +1876,6 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
return qla24xx_process_bidir_cmd(bsg_job);
default:
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -ENOSYS;
}
}
@@ -1919,8 +1906,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_job->request->msgcode);
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -EBUSY;
}
@@ -1943,7 +1928,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
- bsg_job->reply->result = ret;
break;
}
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 44efe3cc79e6..53f9e492f9dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
+ * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x114f | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
@@ -526,8 +526,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
- reg = (struct device_reg_25xxmq *) ((void *)
- ha->mqiobase + cnt * QLA_QUE_PAGE);
+ reg = (struct device_reg_25xxmq __iomem *)
+ (ha->mqiobase + cnt * QLA_QUE_PAGE);
que_idx = cnt * 4;
mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
@@ -2268,7 +2268,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (!cnt) {
nxt = fw->code_ram;
- nxt += sizeof(fw->code_ram),
+ nxt += sizeof(fw->code_ram);
nxt += (ha->fw_memory_size - 0x100000 + 1);
goto copy_queue;
} else
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9725bf5527b..6e7727f46d43 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2486,9 +2486,9 @@ struct bidi_statistics {
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable || IS_QLA83XX(ha)) ? \
- ((void *)(ha->mqiobase) +\
+ ((device_reg_t __iomem *)(ha->mqiobase) +\
(QLA_QUE_PAGE * id)) :\
- ((void *)(ha->iobase)))
+ ((device_reg_t __iomem *)(ha->iobase)))
#define QLA_REQ_QUE_ID(tag) \
((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
#define QLA_DEFAULT_QUE_QOS 5
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 59524aa0ab32..be6d61a89edc 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1092,6 +1092,27 @@ struct device_reg_24xx {
uint32_t unused_6[2]; /* Gap. */
uint32_t iobase_sdata;
};
+/* RISC-RISC semaphore register PCI offet */
+#define RISC_REGISTER_BASE_OFFSET 0x7010
+#define RISC_REGISTER_WINDOW_OFFET 0x6
+
+/* RISC-RISC semaphore/flag register (risc address 0x7016) */
+
+#define RISC_SEMAPHORE 0x1UL
+#define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16)
+#define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL)
+#define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE)
+
+#define RISC_SEMAPHORE_FORCE 0x8000UL
+#define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16)
+#define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL)
+#define RISC_SEMAPHORE_FORCE_SET \
+ (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE)
+
+/* RISC semaphore timeouts (ms) */
+#define TIMEOUT_SEMAPHORE 2500
+#define TIMEOUT_SEMAPHORE_FORCE 2000
+#define TIMEOUT_TOTAL_ELAPSED 4500
/* Trace Control *************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6acb39785a46..2411d1a12b26 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -416,7 +416,7 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
-extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -598,7 +598,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
/* ISP 8021 hardware related */
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
-extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index f4e4bd7c3f4d..01efc0e9cc36 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -218,6 +218,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
WWN_SIZE);
+ fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
+ FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
@@ -1930,6 +1933,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_11:
list[i].fp_speed = PORT_SPEED_8GB;
break;
+ case BIT_10:
+ list[i].fp_speed = PORT_SPEED_16GB;
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 48fca47384b7..563eee3fa924 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -429,7 +429,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
-int
+static int
qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -997,7 +997,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-int
+static int
qla81xx_reset_mpi(scsi_qla_host_t *vha)
{
uint16_t mb[4] = {0x1010, 0, 1, 0};
@@ -1095,6 +1095,83 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ha->isp_ops->enable_intrs(ha);
}
+static void
+qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
+
+}
+
+static void
+qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+}
+
+static void
+qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t wd32 = 0;
+ uint delta_msec = 100;
+ uint elapsed_msec = 0;
+ uint timeout_msec;
+ ulong n;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+ return;
+
+attempt:
+ timeout_msec = TIMEOUT_SEMAPHORE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (wd32 & RISC_SEMAPHORE)
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (!(wd32 & RISC_SEMAPHORE))
+ goto force;
+
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ goto acquired;
+
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
+ timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (wd32 & RISC_SEMAPHORE_FORCE)
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
+
+ goto attempt;
+
+force:
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
+
+acquired:
+ return;
+}
+
/**
* qla24xx_reset_chip() - Reset ISP24xx chip.
* @ha: HA context
@@ -1113,6 +1190,8 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
+ qla25xx_manipulate_risc_semaphore(vha);
+
/* Perform RISC reset. */
qla24xx_reset_risc(vha);
}
@@ -1888,10 +1967,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
qla2x00_init_response_q_entries(rsp);
}
- spin_lock(&ha->vport_slock);
-
- spin_unlock(&ha->vport_slock);
-
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
ha->tgt.atio_ring_index = 0;
/* Initialize ATIO queue entries */
@@ -1971,6 +2046,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
"Waiting for LIP to complete.\n");
do {
+ memset(state, -1, sizeof(state));
rval = qla2x00_get_firmware_state(vha, state);
if (rval == QLA_SUCCESS) {
if (state[0] < FSTATE_LOSS_OF_SYNC) {
@@ -2907,7 +2983,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- char *link_speed;
int rval;
uint16_t mb[4];
struct qla_hw_data *ha = vha->hw;
@@ -2934,10 +3009,10 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
- "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
+ "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ qla2x00_get_link_speed_str(ha, fcport->fp_speed),
fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
@@ -3007,10 +3082,10 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
qla2x00_reg_remote_port(vha, fcport);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -3868,7 +3943,7 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
}
}
-int
+static int
__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3884,19 +3959,7 @@ __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_set_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3912,19 +3975,7 @@ __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_clear_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-const char *
+static const char *
qla83xx_dev_state_to_string(uint32_t dev_state)
{
switch (dev_state) {
@@ -3978,7 +4029,7 @@ qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
}
/* Assumes idc_lock always held on entry */
-int
+static int
qla83xx_initiating_reset(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4026,36 +4077,12 @@ __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
}
int
-qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
{
return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
}
-int
-qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_get_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
qla83xx_check_driver_presence(scsi_qla_host_t *vha)
{
uint32_t drv_presence = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 03b752632839..a481684479c1 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -520,7 +520,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
req = ha->req_q_map[0];
- mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
+ mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
"Failed to allocate Marker IOCB.\n");
@@ -2551,7 +2551,7 @@ sufficient_dsds:
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD(
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
@@ -2748,7 +2748,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
struct rsp_que *rsp;
struct req_que *req;
int rval = EXT_STATUS_OK;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
rval = QLA_SUCCESS;
@@ -2786,15 +2785,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable)
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
- else if (IS_QLA82XX(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
- else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
- else
- cnt = qla2x00_debounce_register(
- ISP_REQ_Q_OUT(ha, &reg->isp));
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9d1c7b56090a..873c82014b16 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -316,28 +316,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
}
#define LS_UNKNOWN 2
-char *
-qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+const char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
- static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
- char *link_speed;
- int fw_speed = ha->link_data_rate;
+ static const char * const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "10"
+ };
if (IS_QLA2100(ha) || IS_QLA2200(ha))
- link_speed = link_speeds[0];
- else if (fw_speed == 0x13)
- link_speed = link_speeds[6];
- else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fw_speed < 6)
- link_speed =
- link_speeds[fw_speed];
- }
-
- return link_speed;
+ return link_speeds[0];
+ else if (speed == 0x13)
+ return link_speeds[6];
+ else if (speed < 6)
+ return link_speeds[speed];
+ else
+ return link_speeds[LS_UNKNOWN];
}
-void
+static void
qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
struct qla_hw_data *ha = vha->hw;
@@ -671,7 +667,7 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -860,7 +856,7 @@ skip_rio:
mb[1], mb[2], mb[3]);
ql_log(ql_log_warn, vha, 0x505f,
"Link is operational (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
/*
* Mark all devices as missing so we will login again.
@@ -2944,7 +2940,9 @@ skip_msi:
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
- }
+ } else if (!ha->flags.msi_enabled)
+ ql_dbg(ql_dbg_init, vha, 0x0125,
+ "INTa mode: Enabled.\n");
clear_risc_ints:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 18c509fae555..68c55eaa318c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3122,7 +3122,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) != 0) {
+ if (MSB(stat) != 0 && MSB(stat) != 2) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
@@ -3536,7 +3536,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * req->id);
mcp->mb[4] = req->id;
@@ -3605,7 +3605,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * rsp->id);
mcp->mb[4] = rsp->id;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f5e297c6b684..3e3f593bada3 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -36,7 +36,7 @@
#define MAX_CRB_XFORM 60
static unsigned long crb_addr_xform[MAX_CRB_XFORM];
-int qla82xx_crb_table_initialized;
+static int qla82xx_crb_table_initialized;
#define qla82xx_crb_addr_transform(name) \
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
@@ -102,7 +102,7 @@ static void qla82xx_crb_addr_transform_setup(void)
qla82xx_crb_table_initialized = 1;
}
-struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
{{{0, 0, 0, 0} } },
{{{1, 0x0100000, 0x0102000, 0x120000},
{1, 0x0110000, 0x0120000, 0x130000},
@@ -262,7 +262,7 @@ struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
/*
* top 12 bits of crb internal address (hub, agent)
*/
-unsigned qla82xx_crb_hub_agt[64] = {
+static unsigned qla82xx_crb_hub_agt[64] = {
0,
QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -330,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-char *q_dev_state[] = {
+static char *q_dev_state[] = {
"Unknown",
"Cold",
"Initializing",
@@ -359,12 +359,13 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ha->crb_win = CRB_HI(*off);
writel(ha->crb_win,
- (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ win_read = RD_REG_DWORD((void __iomem *)
+ (CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -567,7 +568,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
return 1;
}
-int qla82xx_pci_set_window_warning_count;
+static int qla82xx_pci_set_window_warning_count;
static unsigned long
qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
@@ -677,10 +678,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -712,7 +713,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL) {
+ if (mem_ptr == NULL) {
*(u8 *)data = 0;
return -1;
}
@@ -749,10 +750,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -784,7 +785,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL)
+ if (mem_ptr == NULL)
return -1;
addr = mem_ptr;
@@ -908,24 +909,24 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
return 0;
}
-int
+static int
qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
(off & 0xFFFF0000));
/* Read back value to make sure write has gone through */
- RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD((void *)
+ WRT_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
data);
else
- rval = RD_REG_DWORD((void *)
+ rval = RD_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
return rval;
@@ -1654,7 +1655,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nx_pcibase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
"Cannot remap pcibase MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1669,7 +1669,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nxdb_wr_ptr) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
"Cannot remap MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1764,14 +1763,6 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
}
-void qla82xx_reset_adapter(struct scsi_qla_host *vha)
-{
- struct qla_hw_data *ha = vha->hw;
- vha->flags.online = 0;
- qla2x00_try_to_stop_firmware(vha);
- ha->isp_ops->disable_intrs(ha);
-}
-
static int
qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
{
@@ -1856,7 +1847,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
return -1;
}
-int
+static int
qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
{
__le32 val;
@@ -1961,20 +1952,6 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
}
/* ISR related functions */
-uint32_t qla82xx_isr_int_target_mask_enable[8] = {
- ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
- ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
- ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
- ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
-};
-
-uint32_t qla82xx_isr_int_target_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
-};
-
static struct qla82xx_legacy_intr_set legacy_intr[] = \
QLA82XX_LEGACY_INTR_CONFIG;
@@ -2813,7 +2790,7 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
else {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
@@ -2821,7 +2798,8 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
}
}
-void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
+static void
+qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -3177,7 +3155,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
}
-int
+static int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
uint32_t fw_heartbeat_counter;
@@ -3817,7 +3795,8 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
+ r_value = RD_REG_DWORD((void __iomem *)
+ (r_addr + ha->nx_pcibase));
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -4376,7 +4355,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_tmplt_hdr, ha->md_template_size / 1024);
dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
/* Release the template data buffer allocated */
@@ -4386,7 +4365,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_dump, ha->md_dump_size / 1024);
vfree(ha->md_dump);
ha->md_dump_size = 0;
- ha->md_dump = 0;
+ ha->md_dump = NULL;
}
}
@@ -4423,7 +4402,7 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
dma_free_coherent(&ha->pdev->dev,
ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d501bf5f806b..10d23f8b7036 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -41,7 +41,7 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
-int ql2xenableclass2;
+static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
@@ -89,6 +89,8 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
+ "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
"\t\t0x1e400000 - Preferred value for capturing essential "
"debug information (equivalent to old "
@@ -494,12 +496,20 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
(BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
strcpy(str, "PCIe (");
- if (lspeed == 1)
+ switch (lspeed) {
+ case 1:
strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
+ break;
+ case 2:
strcat(str, "5.0GT/s ");
- else
+ break;
+ case 3:
+ strcat(str, "8.0GT/s ");
+ break;
+ default:
strcat(str, "<unknown> ");
+ break;
+ }
snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
strcat(str, lwstr);
@@ -719,7 +729,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_io, vha, 0x3013,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
}
@@ -2144,7 +2154,7 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
/*
* PCI driver interface
*/
-static int __devinit
+static int
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
@@ -2357,7 +2367,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Configure PCI I/O space */
ret = ha->isp_ops->iospace_config(ha);
if (ret)
- goto probe_hw_failed;
+ goto iospace_config_failed;
ql_log_pci(ql_log_info, pdev, 0x001d,
"Found an ISP%04X irq %d iobase 0x%p.\n",
@@ -2668,7 +2678,11 @@ probe_hw_failed:
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ }
+iospace_config_failed:
+ if (IS_QLA82XX(ha)) {
+ if (!ha->nx_pcibase)
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
} else {
@@ -2755,6 +2769,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->flags.host_shutting_down = 1;
+ set_bit(UNLOADING, &base_vha->dpc_flags);
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2784,8 +2799,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
"Error while clearing DRV-Presence.\n");
}
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
qla2x00_dfs_remove(base_vha);
@@ -3721,10 +3734,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (fcport->flags &
FCF_FCP2_DEVICE)
opts |= BIT_1;
- status2 =
- qla2x00_get_port_database(
- vha, fcport,
- opts);
+ status2 =
+ qla2x00_get_port_database(
+ vha, fcport, opts);
if (status2 != QLA_SUCCESS)
status = 1;
}
@@ -3836,7 +3848,7 @@ qla83xx_idc_state_handler_work(struct work_struct *work)
qla83xx_idc_unlock(base_vha, 0);
}
-int
+static int
qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -3954,7 +3966,7 @@ qla83xx_wait_logic(void)
}
}
-int
+static int
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval;
@@ -4013,7 +4025,7 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
return rval;
}
-int
+static int
qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -4212,7 +4224,7 @@ qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
return rval;
}
-void
+static void
qla83xx_need_reset_handler(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4224,7 +4236,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
while (1) {
qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
- if (drv_ack == drv_presence)
+ if ((drv_ack & drv_presence) == drv_presence)
break;
if (time_after_eq(jiffies, ack_timeout)) {
@@ -4251,7 +4263,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
}
-int
+static int
qla83xx_device_bootstrap(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -4505,9 +4517,9 @@ qla2x00_do_dpc(void *data)
"ISP abort end.\n");
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
+ &base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
@@ -4987,7 +4999,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
+static uint32_t
+qla82xx_error_recovery(scsi_qla_host_t *base_vha)
{
uint32_t rval = QLA_FUNCTION_FAILED;
uint32_t drv_active = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b49d21779a24..80f4b849e2b0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1029,7 +1029,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
EXPORT_SYMBOL(qlt_stop_phase2);
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
-void qlt_release(struct qla_tgt *tgt)
+static void qlt_release(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{
struct qla_hw_data *ha = vha->hw;
+ struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct se_cmd *se_cmd;
+ u32 lun = 0;
int rc;
+ bool found_lun = false;
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+ struct qla_tgt_cmd *cmd =
+ container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ if (cmd->tag == abts->exchange_addr_to_abort) {
+ lun = cmd->unpacked_lun;
+ found_lun = true;
+ break;
+ }
+ }
+ spin_unlock(&se_sess->sess_cmd_lock);
+
+ if (!found_lun)
+ return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cfe934e1af42..49697ca41e78 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.07-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 4372e32bc95f..d182c96e17ea 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
- cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
- transport_generic_request_failure(&cmd->se_cmd);
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index fbc546e893ac..4cec123a6a6a 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -5124,8 +5124,8 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
* It returns zero if successful. It also initializes all data necessary for
* the driver.
**/
-static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int qla4xxx_probe_adapter(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret = -ENODEV, status;
struct Scsi_Host *host;
@@ -5464,7 +5464,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
* qla4xxx_remove_adapter - callback function to remove adapter.
* @pci_dev: PCI device pointer
**/
-static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
+static void qla4xxx_remove_adapter(struct pci_dev *pdev)
{
struct scsi_qla_host *ha;
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 1e874f1fb5c6..13d628b56ff7 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -142,7 +142,7 @@ module_param_array(irq, int, NULL, 0);
MODULE_PARM_DESC(iobase, "I/O address");
MODULE_PARM_DESC(irq, "IRQ");
-static int __devinit qlogicfas_detect(struct scsi_host_template *sht)
+static int qlogicfas_detect(struct scsi_host_template *sht)
{
struct Scsi_Host *shost;
struct qlogicfas408_priv *priv;
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 71fddbc60f18..6d48d30bed05 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -461,7 +461,7 @@ static int qlogicpti_reset_hardware(struct Scsi_Host *host)
#define PTI_RESET_LIMIT 400
-static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
+static int qlogicpti_load_firmware(struct qlogicpti *qpti)
{
const struct firmware *fw;
const char fwname[] = "qlogic/isp1000.bin";
@@ -670,7 +670,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
static irqreturn_t qpti_intr(int irq, void *dev_id);
-static void __devinit qpti_chain_add(struct qlogicpti *qpti)
+static void qpti_chain_add(struct qlogicpti *qpti)
{
spin_lock_irq(&qptichain_lock);
if (qptichain != NULL) {
@@ -686,7 +686,7 @@ static void __devinit qpti_chain_add(struct qlogicpti *qpti)
spin_unlock_irq(&qptichain_lock);
}
-static void __devexit qpti_chain_del(struct qlogicpti *qpti)
+static void qpti_chain_del(struct qlogicpti *qpti)
{
spin_lock_irq(&qptichain_lock);
if (qptichain == qpti) {
@@ -701,7 +701,7 @@ static void __devexit qpti_chain_del(struct qlogicpti *qpti)
spin_unlock_irq(&qptichain_lock);
}
-static int __devinit qpti_map_regs(struct qlogicpti *qpti)
+static int qpti_map_regs(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -724,7 +724,7 @@ static int __devinit qpti_map_regs(struct qlogicpti *qpti)
return 0;
}
-static int __devinit qpti_register_irq(struct qlogicpti *qpti)
+static int qpti_register_irq(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -749,7 +749,7 @@ fail:
return -1;
}
-static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
+static void qpti_get_scsi_id(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
struct device_node *dp;
@@ -803,7 +803,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
/* The request and response queues must each be aligned
* on a page boundary.
*/
-static int __devinit qpti_map_queues(struct qlogicpti *qpti)
+static int qpti_map_queues(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -1292,7 +1292,7 @@ static struct scsi_host_template qpti_template = {
};
static const struct of_device_id qpti_match[];
-static int __devinit qpti_sbus_probe(struct platform_device *op)
+static int qpti_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
@@ -1402,7 +1402,7 @@ fail_unlink:
return -ENODEV;
}
-static int __devexit qpti_sbus_remove(struct platform_device *op)
+static int qpti_sbus_remove(struct platform_device *op)
{
struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
@@ -1459,7 +1459,7 @@ static struct platform_driver qpti_sbus_driver = {
.of_match_table = qpti_match,
},
.probe = qpti_sbus_probe,
- .remove = __devexit_p(qpti_sbus_remove),
+ .remove = qpti_sbus_remove,
};
static int __init qpti_init(void)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9032e910bca3..f1bf5aff68ed 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1418,7 +1418,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (blk_queue_dead(q))
+ if (blk_queue_dying(q))
return 0;
shost = sdev->host;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index dc0ad85853e2..8f6b12cbd224 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,16 +16,14 @@
#include "scsi_priv.h"
-static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
+static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err;
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
- drv = dev->driver;
- if (drv && drv->suspend) {
- err = drv->suspend(dev, msg);
+ if (cb) {
+ err = cb(dev);
if (err)
scsi_device_resume(to_scsi_device(dev));
}
@@ -34,14 +32,12 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
return err;
}
-static int scsi_dev_type_resume(struct device *dev)
+static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err = 0;
- drv = dev->driver;
- if (drv && drv->resume)
- err = drv->resume(dev);
+ if (cb)
+ err = cb(dev);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
return err;
@@ -49,51 +45,39 @@ static int scsi_dev_type_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
-static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
+static int
+scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
if (scsi_is_sdev_device(dev)) {
/*
- * sd is the only high-level SCSI driver to implement runtime
- * PM, and sd treats runtime suspend, system suspend, and
- * system hibernate identically (but not system freeze).
+ * All the high-level SCSI drivers that implement runtime
+ * PM treat runtime suspend, system suspend, and system
+ * hibernate identically.
*/
- if (pm_runtime_suspended(dev)) {
- if (msg.event == PM_EVENT_SUSPEND ||
- msg.event == PM_EVENT_HIBERNATE)
- return 0; /* already suspended */
+ if (pm_runtime_suspended(dev))
+ return 0;
- /* wake up device so that FREEZE will succeed */
- pm_runtime_resume(dev);
- }
- err = scsi_dev_type_suspend(dev, msg);
+ err = scsi_dev_type_suspend(dev, cb);
}
+
return err;
}
-static int scsi_bus_resume_common(struct device *dev)
+static int
+scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
- /*
- * Parent device may have runtime suspended as soon as
- * it is woken up during the system resume.
- *
- * Resume it on behalf of child.
- */
- pm_runtime_get_sync(dev->parent);
-
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, cb);
+
if (err == 0) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
-
- pm_runtime_put_sync(dev->parent);
-
return err;
}
@@ -112,26 +96,49 @@ static int scsi_bus_prepare(struct device *dev)
static int scsi_bus_suspend(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+}
+
+static int scsi_bus_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
}
static int scsi_bus_freeze(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_FREEZE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+}
+
+static int scsi_bus_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
}
static int scsi_bus_poweroff(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_HIBERNATE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+}
+
+static int scsi_bus_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
}
#else /* CONFIG_PM_SLEEP */
-#define scsi_bus_resume_common NULL
#define scsi_bus_prepare NULL
#define scsi_bus_suspend NULL
+#define scsi_bus_resume NULL
#define scsi_bus_freeze NULL
+#define scsi_bus_thaw NULL
#define scsi_bus_poweroff NULL
+#define scsi_bus_restore NULL
#endif /* CONFIG_PM_SLEEP */
@@ -140,10 +147,12 @@ static int scsi_bus_poweroff(struct device *dev)
static int scsi_runtime_suspend(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_suspend\n");
if (scsi_is_sdev_device(dev)) {
- err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND);
+ err = scsi_dev_type_suspend(dev,
+ pm ? pm->runtime_suspend : NULL);
if (err == -EAGAIN)
pm_schedule_suspend(dev, jiffies_to_msecs(
round_jiffies_up_relative(HZ/10)));
@@ -157,10 +166,11 @@ static int scsi_runtime_suspend(struct device *dev)
static int scsi_runtime_resume(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_resume\n");
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
/* Insert hooks here for targets, hosts, and transport classes */
@@ -239,11 +249,11 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
const struct dev_pm_ops scsi_bus_pm_ops = {
.prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
- .resume = scsi_bus_resume_common,
+ .resume = scsi_bus_resume,
.freeze = scsi_bus_freeze,
- .thaw = scsi_bus_resume_common,
+ .thaw = scsi_bus_thaw,
.poweroff = scsi_bus_poweroff,
- .restore = scsi_bus_resume_common,
+ .restore = scsi_bus_restore,
.runtime_suspend = scsi_runtime_suspend,
.runtime_resume = scsi_runtime_resume,
.runtime_idle = scsi_runtime_idle,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ce5224c92eda..931a7d954203 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
-static int check_reset_type(char *str)
+static int check_reset_type(const char *str)
{
- if (strncmp(str, "adapter", 10) == 0)
+ if (sysfs_streq(str, "adapter"))
return SCSI_ADAPTER_RESET;
- else if (strncmp(str, "firmware", 10) == 0)
+ else if (sysfs_streq(str, "firmware"))
return SCSI_FIRMWARE_RESET;
else
return 0;
@@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct scsi_host_template *sht = shost->hostt;
int ret = -EINVAL;
- char str[10];
int type;
- sscanf(buf, "%s", str);
- type = check_reset_type(str);
-
+ type = check_reset_type(buf);
if (!type)
goto exit_store_host_reset;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 31969f2e13ce..59d427bf08e2 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -183,10 +183,10 @@ static struct attribute_group iscsi_endpoint_group = {
#define ISCSI_MAX_EPID -1
-static int iscsi_match_epid(struct device *dev, void *data)
+static int iscsi_match_epid(struct device *dev, const void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- uint64_t *epid = (uint64_t *) data;
+ const uint64_t *epid = data;
return *epid == ep->id;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f7565fc4f0e3..1b681427dde0 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -151,6 +151,7 @@ static struct {
{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
};
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 21a045e0559f..f379c7f3034c 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -38,7 +38,7 @@ struct srp_host_attrs {
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
-#define SRP_RPORT_ATTRS 2
+#define SRP_RPORT_ATTRS 3
struct srp_internal {
struct scsi_transport_template t;
@@ -47,7 +47,6 @@ struct srp_internal {
struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
- struct device_attribute private_rport_attrs[SRP_RPORT_ATTRS];
struct transport_container rport_attr_cont;
};
@@ -72,24 +71,6 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SETUP_TEMPLATE(attrb, field, perm, test, ro_test, ro_perm) \
- i->private_##attrb[count] = dev_attr_##field; \
- i->private_##attrb[count].attr.mode = perm; \
- if (ro_test) { \
- i->private_##attrb[count].attr.mode = ro_perm; \
- i->private_##attrb[count].store = NULL; \
- } \
- i->attrb[count] = &i->private_##attrb[count]; \
- if (test) \
- count++
-
-#define SETUP_RPORT_ATTRIBUTE_RD(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO, 1, 0, 0)
-
-#define SETUP_RPORT_ATTRIBUTE_RW(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO | S_IWUSR, \
- 1, 1, S_IRUGO)
-
#define SRP_PID(p) \
(p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
(p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
@@ -135,6 +116,24 @@ show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
+static ssize_t store_srp_rport_delete(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ if (i->f->rport_delete) {
+ i->f->rport_delete(rport);
+ return count;
+ } else {
+ return -ENOSYS;
+ }
+}
+
+static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
@@ -324,12 +323,16 @@ srp_attach_transport(struct srp_function_template *ft)
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &srp_rport_class.class;
i->rport_attr_cont.ac.match = srp_rport_match;
- transport_container_register(&i->rport_attr_cont);
count = 0;
- SETUP_RPORT_ATTRIBUTE_RD(port_id);
- SETUP_RPORT_ATTRIBUTE_RD(roles);
- i->rport_attrs[count] = NULL;
+ i->rport_attrs[count++] = &dev_attr_port_id;
+ i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->rport_delete)
+ i->rport_attrs[count++] = &dev_attr_delete;
+ i->rport_attrs[count++] = NULL;
+ BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
+
+ transport_container_register(&i->rport_attr_cont);
i->f = ft;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 352bc77b7c88..7992635d405f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,7 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *, pm_message_t state);
+static int sd_suspend(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
@@ -465,15 +465,23 @@ static struct class sd_disk_class = {
.dev_attrs = sd_disk_attrs,
};
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend,
+ .resume = sd_resume,
+ .poweroff = sd_suspend,
+ .restore = sd_resume,
+ .runtime_suspend = sd_suspend,
+ .runtime_resume = sd_resume,
+};
+
static struct scsi_driver sd_template = {
.owner = THIS_MODULE,
.gendrv = {
.name = "sd",
.probe = sd_probe,
.remove = sd_remove,
- .suspend = sd_suspend,
- .resume = sd_resume,
.shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
},
.rescan = sd_rescan,
.done = sd_done,
@@ -1011,7 +1019,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
- } else if (block > 0xffffffff) {
+ } else if (sdp->use_16_for_rw) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
@@ -2203,6 +2211,8 @@ got_data:
}
}
+ sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff);
+
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
sdkp->capacity <<= 3;
@@ -3052,7 +3062,7 @@ exit:
scsi_disk_put(sdkp);
}
-static int sd_suspend(struct device *dev, pm_message_t mesg)
+static int sd_suspend(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
@@ -3067,7 +3077,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
goto done;
}
- if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
+ if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
ret = sd_start_stop_device(sdkp, 0);
}
@@ -3116,10 +3126,6 @@ static int __init init_sd(void)
if (err)
goto err_out;
- err = scsi_register_driver(&sd_template.gendrv);
- if (err)
- goto err_out_class;
-
sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
0, 0, NULL);
if (!sd_cdb_cache) {
@@ -3133,8 +3139,15 @@ static int __init init_sd(void)
goto err_out_cache;
}
+ err = scsi_register_driver(&sd_template.gendrv);
+ if (err)
+ goto err_out_driver;
+
return 0;
+err_out_driver:
+ mempool_destroy(sd_cdb_pool);
+
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
@@ -3157,10 +3170,10 @@ static void __exit exit_sd(void)
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+ scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_cdb_pool);
kmem_cache_destroy(sd_cdb_cache);
- scsi_unregister_driver(&sd_template.gendrv);
class_unregister(&sd_disk_class);
for (i = 0; i < SD_MAJORS; i++)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 3a9d85ca6047..a464d959f66e 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -226,7 +226,7 @@ static struct scsi_host_template sgiwd93_template = {
.use_clustering = DISABLE_CLUSTERING,
};
-static int __devinit sgiwd93_probe(struct platform_device *pdev)
+static int sgiwd93_probe(struct platform_device *pdev)
{
struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
unsigned char *wdregs = pd->wdregs;
@@ -312,7 +312,7 @@ static int __exit sgiwd93_remove(struct platform_device *pdev)
static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
- .remove = __devexit_p(sgiwd93_remove),
+ .remove = sgiwd93_remove,
.driver = {
.name = "sgiwd93",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index a318264a4ba1..3b3b56f4a830 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -94,9 +94,9 @@ static struct scsi_host_template sim710_driver_template = {
.module = THIS_MODULE,
};
-static __devinit int
-sim710_probe_common(struct device *dev, unsigned long base_addr,
- int irq, int clock, int differential, int scsi_id)
+static int sim710_probe_common(struct device *dev, unsigned long base_addr,
+ int irq, int clock, int differential,
+ int scsi_id)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata =
@@ -153,8 +153,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
return -ENODEV;
}
-static __devexit int
-sim710_device_remove(struct device *dev)
+static int sim710_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata =
@@ -221,7 +220,7 @@ static struct eisa_driver sim710_eisa_driver = {
.driver = {
.name = "sim710",
.probe = sim710_eisa_probe,
- .remove = __devexit_p(sim710_device_remove),
+ .remove = sim710_device_remove,
},
};
#endif /* CONFIG_EISA */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index cf51432f8e72..52d54e7425db 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -65,7 +65,7 @@ static struct scsi_host_template snirm710_template = {
.module = THIS_MODULE,
};
-static int __devinit snirm710_probe(struct platform_device *dev)
+static int snirm710_probe(struct platform_device *dev)
{
unsigned long base;
struct NCR_700_Host_Parameters *hostdata;
@@ -134,7 +134,7 @@ static int __exit snirm710_driver_remove(struct platform_device *dev)
static struct platform_driver snirm710_driver = {
.probe = snirm710_probe,
- .remove = __devexit_p(snirm710_driver_remove),
+ .remove = snirm710_driver_remove,
.driver = {
.name = "snirm_53c710",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 606215e54b88..325c31caa6e0 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1540,8 +1540,7 @@ static void stex_free_irq(struct st_hba *hba)
pci_disable_msi(pdev);
}
-static int __devinit
-stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct st_hba *hba;
struct Scsi_Host *host;
@@ -1815,7 +1814,7 @@ static struct pci_driver stex_pci_driver = {
.name = DRV_NAME,
.id_table = stex_pci_tbl,
.probe = stex_probe,
- .remove = __devexit_p(stex_remove),
+ .remove = stex_remove,
.shutdown = stex_shutdown,
};
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 01440782feb2..270b3cf6f372 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1410,13 +1410,13 @@ enum {
static const struct hv_vmbus_device_id id_table[] = {
/* SCSI guid */
- { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
- 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
- .driver_data = SCSI_GUID },
+ { HV_SCSI_GUID,
+ .driver_data = SCSI_GUID
+ },
/* IDE guid */
- { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
- 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
- .driver_data = IDE_GUID },
+ { HV_IDE_GUID,
+ .driver_data = IDE_GUID
+ },
{ },
};
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0621037f0271..534eb96fc3a7 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -194,7 +194,7 @@ static const struct esp_driver_ops sun3x_esp_ops = {
.dma_error = sun3x_esp_dma_error,
};
-static int __devinit esp_sun3x_probe(struct platform_device *dev)
+static int esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -268,7 +268,7 @@ fail:
return err;
}
-static int __devexit esp_sun3x_remove(struct platform_device *dev)
+static int esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -292,7 +292,7 @@ static int __devexit esp_sun3x_remove(struct platform_device *dev)
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
- .remove = __devexit_p(esp_sun3x_remove),
+ .remove = esp_sun3x_remove,
.driver = {
.name = "sun3x_esp",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 676fe9ac7f61..f2e68459f7ea 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -43,8 +43,7 @@ enum dvma_rev {
dvmahme
};
-static int __devinit esp_sbus_setup_dma(struct esp *esp,
- struct platform_device *dma_of)
+static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
{
esp->dma = dma_of;
@@ -79,7 +78,7 @@ static int __devinit esp_sbus_setup_dma(struct esp *esp,
}
-static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
+static int esp_sbus_map_regs(struct esp *esp, int hme)
{
struct platform_device *op = esp->dev;
struct resource *res;
@@ -99,7 +98,7 @@ static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
return 0;
}
-static int __devinit esp_sbus_map_command_block(struct esp *esp)
+static int esp_sbus_map_command_block(struct esp *esp)
{
struct platform_device *op = esp->dev;
@@ -111,7 +110,7 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp)
return 0;
}
-static int __devinit esp_sbus_register_irq(struct esp *esp)
+static int esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
struct platform_device *op = esp->dev;
@@ -120,7 +119,7 @@ static int __devinit esp_sbus_register_irq(struct esp *esp)
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
}
-static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
+static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
struct platform_device *op = esp->dev;
struct device_node *dp;
@@ -142,7 +141,7 @@ done:
esp->scsi_id_mask = (1 << esp->scsi_id);
}
-static void __devinit esp_get_differential(struct esp *esp)
+static void esp_get_differential(struct esp *esp)
{
struct platform_device *op = esp->dev;
struct device_node *dp;
@@ -154,7 +153,7 @@ static void __devinit esp_get_differential(struct esp *esp)
esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
}
-static void __devinit esp_get_clock_params(struct esp *esp)
+static void esp_get_clock_params(struct esp *esp)
{
struct platform_device *op = esp->dev;
struct device_node *bus_dp, *dp;
@@ -170,7 +169,7 @@ static void __devinit esp_get_clock_params(struct esp *esp)
esp->cfreq = fmhz;
}
-static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
+static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
struct platform_device *op = esp->dev;
@@ -195,7 +194,7 @@ static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dm
esp->bursts = bursts;
}
-static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
+static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
{
esp_get_scsi_id(esp, espdma);
esp_get_differential(esp);
@@ -487,9 +486,8 @@ static const struct esp_driver_ops sbus_esp_ops = {
.dma_error = sbus_esp_dma_error,
};
-static int __devinit esp_sbus_probe_one(struct platform_device *op,
- struct platform_device *espdma,
- int hme)
+static int esp_sbus_probe_one(struct platform_device *op,
+ struct platform_device *espdma, int hme)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -562,7 +560,7 @@ fail:
return err;
}
-static int __devinit esp_sbus_probe(struct platform_device *op)
+static int esp_sbus_probe(struct platform_device *op)
{
struct device_node *dma_node = NULL;
struct device_node *dp = op->dev.of_node;
@@ -585,7 +583,7 @@ static int __devinit esp_sbus_probe(struct platform_device *op)
return esp_sbus_probe_one(op, dma_of, hme);
}
-static int __devexit esp_sbus_remove(struct platform_device *op)
+static int esp_sbus_remove(struct platform_device *op)
{
struct esp *esp = dev_get_drvdata(&op->dev);
struct platform_device *dma_of = esp->dma;
@@ -639,7 +637,7 @@ static struct platform_driver esp_sbus_driver = {
.of_match_table = esp_match,
},
.probe = esp_sbus_probe,
- .remove = __devexit_p(esp_sbus_remove),
+ .remove = esp_sbus_remove,
};
static int __init sunesp_init(void)
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index ac4eca6a5328..0b7819f3e09b 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -581,7 +581,7 @@ static int sym53c416_test(int base)
}
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4161), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index e2b8e68b57e7..599568299fbe 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1284,8 +1284,7 @@ static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer,
* sym_free_resources() should be used instead of this function after calling
* sym_attach().
*/
-static void __devinit
-sym_iounmap_device(struct sym_device *device)
+static void sym_iounmap_device(struct sym_device *device)
{
if (device->s.ioaddr)
pci_iounmap(device->pdev, device->s.ioaddr);
@@ -1325,8 +1324,8 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
* If all is OK, install interrupt handling and
* start the timer daemon.
*/
-static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
- int unit, struct sym_device *dev)
+static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
+ struct sym_device *dev)
{
struct sym_data *sym_data;
struct sym_hcb *np = NULL;
@@ -1481,7 +1480,7 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
* Detect and try to read SYMBIOS and TEKRAM NVRAM.
*/
#if SYM_CONF_NVRAM_SUPPORT
-static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
{
devp->nvram = nvp;
nvp->type = 0;
@@ -1494,7 +1493,7 @@ static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
}
#endif /* SYM_CONF_NVRAM_SUPPORT */
-static int __devinit sym_check_supported(struct sym_device *device)
+static int sym_check_supported(struct sym_device *device)
{
struct sym_chip *chip;
struct pci_dev *pdev = device->pdev;
@@ -1531,7 +1530,7 @@ static int __devinit sym_check_supported(struct sym_device *device)
* Ignore Symbios chips controlled by various RAID controllers.
* These controllers set value 0x52414944 at RAM end - 16.
*/
-static int __devinit sym_check_raid(struct sym_device *device)
+static int sym_check_raid(struct sym_device *device)
{
unsigned int ram_size, ram_val;
@@ -1552,7 +1551,7 @@ static int __devinit sym_check_raid(struct sym_device *device)
return -ENODEV;
}
-static int __devinit sym_set_workarounds(struct sym_device *device)
+static int sym_set_workarounds(struct sym_device *device)
{
struct sym_chip *chip = &device->chip;
struct pci_dev *pdev = device->pdev;
@@ -1602,8 +1601,7 @@ static int __devinit sym_set_workarounds(struct sym_device *device)
/*
* Map HBA registers and on-chip SRAM (if present).
*/
-static int __devinit
-sym_iomap_device(struct sym_device *device)
+static int sym_iomap_device(struct sym_device *device)
{
struct pci_dev *pdev = device->pdev;
struct pci_bus_region bus_addr;
@@ -1751,8 +1749,7 @@ static struct scsi_host_template sym2_template = {
static int attach_count;
-static int __devinit sym2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sym_device sym_dev;
struct sym_nvram nvram;
@@ -2077,7 +2074,7 @@ static struct spi_function_template sym2_transport_functions = {
.get_signalling = sym2_get_signalling,
};
-static struct pci_device_id sym2_id_table[] __devinitdata = {
+static struct pci_device_id sym2_id_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index a1baccce05f0..9327f5fcec4e 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2219,7 +2219,7 @@ static struct scsi_host_template driver_template = {
*
**********************************************************************/
-static void __devinit dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
+static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
{
u8 carryFlag = 1, j = 0x80, bval;
int i;
@@ -2242,7 +2242,7 @@ static void __devinit dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
}
}
-static u16 __devinit dc390_eeprom_get_data(struct pci_dev *pdev)
+static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
{
int i;
u16 wval = 0;
@@ -2264,7 +2264,7 @@ static u16 __devinit dc390_eeprom_get_data(struct pci_dev *pdev)
return wval;
}
-static void __devinit dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
+static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
{
u8 cmd = EEPROM_READ, i;
@@ -2282,7 +2282,7 @@ static void __devinit dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
}
/* Override EEprom values with explicitly set values */
-static void __devinit dc390_eeprom_override(u8 index)
+static void dc390_eeprom_override(u8 index)
{
u8 *ptr = (u8 *) dc390_eepromBuf[index], id;
@@ -2305,7 +2305,7 @@ static void __devinit dc390_eeprom_override(u8 index)
}
}
-static int __devinitdata tmscsim_def[] = {
+static int tmscsim_def[] = {
7,
0 /* 10MHz */,
PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_,
@@ -2315,7 +2315,7 @@ static int __devinitdata tmscsim_def[] = {
};
/* Copy defaults over set values where missing */
-static void __devinit dc390_fill_with_defaults (void)
+static void dc390_fill_with_defaults (void)
{
int i;
@@ -2335,7 +2335,7 @@ static void __devinit dc390_fill_with_defaults (void)
tmscsim[5] = 180;
}
-static void __devinit dc390_check_eeprom(struct pci_dev *pdev, u8 index)
+static void dc390_check_eeprom(struct pci_dev *pdev, u8 index)
{
u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120};
u8 EEbuf[128];
@@ -2372,7 +2372,7 @@ static void __devinit dc390_check_eeprom(struct pci_dev *pdev, u8 index)
}
}
-static void __devinit dc390_init_hw(struct dc390_acb *pACB, u8 index)
+static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
{
struct Scsi_Host *shost = pACB->pScsiHost;
u8 dstate;
@@ -2422,8 +2422,7 @@ static void __devinit dc390_init_hw(struct dc390_acb *pACB, u8 index)
DC390_write8(DMA_Status, dstate);
}
-static int __devinit dc390_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct dc390_acb *pACB;
struct Scsi_Host *shost;
@@ -2532,7 +2531,7 @@ static int __devinit dc390_probe_one(struct pci_dev *pdev,
*
* @dev: The PCI device to remove.
*/
-static void __devexit dc390_remove_one(struct pci_dev *dev)
+static void dc390_remove_one(struct pci_dev *dev)
{
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
unsigned long iflags;
@@ -2568,7 +2567,7 @@ static struct pci_driver dc390_driver = {
.name = "tmscsim",
.id_table = tmscsim_pci_tbl,
.probe = dc390_probe_one,
- .remove = __devexit_p(dc390_remove_one),
+ .remove = dc390_remove_one,
};
static int __init dc390_module_init(void)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 58f4ba6fe412..91a4046ca9ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1811,8 +1811,7 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
*
* Returns 0 on success, non-zero value on failure
*/
-static int __devinit
-ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
@@ -1947,7 +1946,7 @@ static struct pci_driver ufshcd_pci_driver = {
.name = UFSHCD,
.id_table = ufshcd_pci_tbl,
.probe = ufshcd_probe,
- .remove = __devexit_p(ufshcd_remove),
+ .remove = ufshcd_remove,
.shutdown = ufshcd_shutdown,
#ifdef CONFIG_PM
.suspend = ufshcd_suspend,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 595af1ae4421..3449a1f8c656 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct virtio_scsi_event_node *event_node)
{
- int ret;
+ int err;
struct scatterlist sg;
unsigned long flags;
@@ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
- ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
- if (ret >= 0)
+ err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node,
+ GFP_ATOMIC);
+ if (!err)
virtqueue_kick(vscsi->event_vq.vq);
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
- return ret;
+ return err;
}
static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
@@ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
{
unsigned int out_num, in_num;
unsigned long flags;
- int ret;
+ int err;
+ bool needs_kick = false;
spin_lock_irqsave(&tgt->tgt_lock, flags);
virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
spin_lock(&vq->vq_lock);
- ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
+ err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
spin_unlock(&tgt->tgt_lock);
- if (ret >= 0)
- ret = virtqueue_kick_prepare(vq->vq);
+ if (!err)
+ needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
- if (ret > 0)
+ if (needs_kick)
virtqueue_notify(vq->vq);
- return ret;
+ return err;
}
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
@@ -467,8 +469,10 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) >= 0)
+ GFP_ATOMIC) == 0)
ret = 0;
+ else
+ mempool_free(cmd, virtscsi_cmd_pool);
out:
return ret;
@@ -675,7 +679,7 @@ out:
return err;
}
-static int __devinit virtscsi_probe(struct virtio_device *vdev)
+static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
@@ -729,7 +733,7 @@ virtscsi_init_failed:
return err;
}
-static void __devexit virtscsi_remove(struct virtio_device *vdev)
+static void virtscsi_remove(struct virtio_device *vdev)
{
struct Scsi_Host *shost = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(shost);
@@ -781,7 +785,7 @@ static struct virtio_driver virtio_scsi_driver = {
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
#endif
- .remove = __devexit_p(virtscsi_remove),
+ .remove = virtscsi_remove,
};
static int __init init(void)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 20b3a483c2cc..3bfaa66fa0d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -397,7 +397,7 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
}
-static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
&adapter->ringStatePA);
@@ -1152,7 +1152,7 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
* just use a statically allocated scatter list.
*
*/
-static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
{
struct pvscsi_ctx *ctx;
int i;
@@ -1233,8 +1233,7 @@ exit:
return numPhys;
}
-static int __devinit pvscsi_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
struct Scsi_Host *host;
@@ -1454,7 +1453,7 @@ static struct pci_driver pvscsi_pci_driver = {
.name = "vmw_pvscsi",
.id_table = pvscsi_pci_tbl,
.probe = pvscsi_probe,
- .remove = __devexit_p(pvscsi_remove),
+ .remove = pvscsi_remove,
.shutdown = pvscsi_shutdown,
};
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 27e84e4b1fa9..97ccb0383539 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -182,7 +182,7 @@ static struct parisc_driver zalon_driver = {
.name = "zalon",
.id_table = zalon_tbl,
.probe = zalon_probe,
- .remove = __devexit_p(zalon_remove),
+ .remove = zalon_remove,
};
static int __init zalon7xx_init(void)
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index e17764d71476..cbf3476c68cd 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -38,7 +38,7 @@ static struct zorro_driver_data {
const char *name;
unsigned long offset;
int absolute; /* offset is absolute address */
-} zorro7xx_driver_data[] __devinitdata = {
+} zorro7xx_driver_data[] = {
{ .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
{ .name = "WarpEngine 40xx", .offset = 0x40000 },
{ .name = "A4091", .offset = 0x800000 },
@@ -46,7 +46,7 @@ static struct zorro_driver_data {
{ 0 }
};
-static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id zorro7xx_zorro_tbl[] = {
{
.id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
.driver_data = (unsigned long)&zorro7xx_driver_data[0],
@@ -71,8 +71,8 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
};
MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
-static int __devinit zorro7xx_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int zorro7xx_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct Scsi_Host *host;
struct NCR_700_Host_Parameters *hostdata;
@@ -150,7 +150,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
return -ENODEV;
}
-static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
+static void zorro7xx_remove_one(struct zorro_dev *z)
{
struct Scsi_Host *host = zorro_get_drvdata(z);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -167,7 +167,7 @@ static struct zorro_driver zorro7xx_driver = {
.name = "zorro7xx-scsi",
.id_table = zorro7xx_zorro_tbl,
.probe = zorro7xx_init_one,
- .remove = __devexit_p(zorro7xx_remove_one),
+ .remove = zorro7xx_remove_one,
};
static int __init zorro7xx_scsi_init(void)
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index d860ef743568..f168a6159961 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,6 +1,5 @@
menu "SuperH / SH-Mobile Driver Options"
source "drivers/sh/intc/Kconfig"
-source "drivers/sh/pfc/Kconfig"
endmenu
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index e57895b1a425..fc67f564f02c 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -5,7 +5,6 @@ obj-y := intc/
obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
-obj-$(CONFIG_SH_PFC) += pfc/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-y += pm_runtime.o
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index b3dc44146ca0..1ebe67cd1833 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -126,6 +126,12 @@ static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
static int sh_clk_div_enable(struct clk *clk)
{
+ if (clk->div_mask == SH_CLK_DIV6_MSK) {
+ int ret = sh_clk_div_set_rate(clk, clk->rate);
+ if (ret < 0)
+ return ret;
+ }
+
sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
return 0;
}
@@ -401,7 +407,6 @@ static int fsidiv_enable(struct clk *clk)
static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
{
- u32 val;
int idx;
idx = (clk->parent->rate / rate) & 0xffff;
diff --git a/drivers/sh/pfc/Kconfig b/drivers/sh/pfc/Kconfig
deleted file mode 100644
index 804f9ad1bf4a..000000000000
--- a/drivers/sh/pfc/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-comment "Pin function controller options"
-
-config SH_PFC
- # XXX move off the gpio dependency
- depends on GENERIC_GPIO
- select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
- select PINCTRL_SH_PFC
- def_bool y
-
-#
-# Placeholder for now, rehome to drivers/pinctrl once the PFC APIs
-# have settled.
-#
-config PINCTRL_SH_PFC
- tristate "SuperH PFC pin controller driver"
- depends on SH_PFC
- select PINCTRL
- select PINMUX
- select PINCONF
-
-config GPIO_SH_PFC
- tristate "SuperH PFC GPIO support"
- depends on SH_PFC && GPIOLIB
- help
- This enables support for GPIOs within the SoC's pin function
- controller.
diff --git a/drivers/sh/pfc/Makefile b/drivers/sh/pfc/Makefile
deleted file mode 100644
index 7916027cce37..000000000000
--- a/drivers/sh/pfc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y += core.o
-obj-$(CONFIG_PINCTRL_SH_PFC) += pinctrl.o
-obj-$(CONFIG_GPIO_SH_PFC) += gpio.o
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index b3b33fa26acd..fb7ea0d9a734 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -575,11 +575,10 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is)
* Device management *
*********************/
-static char * __devinitdata
-ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3",
- "MENET 4", "CADduo", "Altix Serial"};
+static char *ioc3_class_names[] = { "unknown", "IP27 BaseIO", "IP30 system",
+ "MENET 1/2/3", "MENET 4", "CADduo", "Altix Serial" };
-static int __devinit ioc3_class(struct ioc3_driver_data *idd)
+static int ioc3_class(struct ioc3_driver_data *idd)
{
int res = IOC3_CLASS_NONE;
/* NIC-based logic */
@@ -602,8 +601,7 @@ static int __devinit ioc3_class(struct ioc3_driver_data *idd)
return res;
}
/* Adds a new instance of an IOC3 card */
-static int __devinit
-ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc3_driver_data *idd;
uint32_t pcmd;
@@ -755,7 +753,7 @@ out:
}
/* Removes a particular instance of an IOC3 card. */
-static void __devexit ioc3_remove(struct pci_dev *pdev)
+static void ioc3_remove(struct pci_dev *pdev)
{
int id;
struct ioc3_driver_data *idd;
@@ -807,7 +805,7 @@ static struct pci_driver ioc3_driver = {
.name = "IOC3",
.id_table = ioc3_id_table,
.probe = ioc3_probe,
- .remove = __devexit_p(ioc3_remove),
+ .remove = ioc3_remove,
};
MODULE_DEVICE_TABLE(pci, ioc3_id_table);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae359cabe..f80eee74a311 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -88,7 +88,7 @@ config SPI_BFIN_SPORT
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
- depends on MIPS_ALCHEMY && EXPERIMENTAL
+ depends on MIPS_ALCHEMY
select SPI_BITBANG
help
If you say yes to this option, support will be included for the
@@ -123,6 +123,13 @@ config SPI_BUTTERFLY
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_CLPS711X
+ tristate "CLPS711X host SPI controller"
+ depends on ARCH_CLPS711X
+ help
+ This enables dedicated general purpose SPI/Microwire1-compatible
+ master mode interface (SSI1) for CLPS711X-based CPUs.
+
config SPI_COLDFIRE_QSPI
tristate "Freescale Coldfire QSPI controller"
depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
@@ -181,7 +188,7 @@ config SPI_IMX
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
- depends on PARPORT && EXPERIMENTAL
+ depends on PARPORT
select SPI_BITBANG
help
This driver supports the NS LM70 LLP Evaluation Board,
@@ -197,7 +204,7 @@ config SPI_MPC52xx
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
- depends on PPC_MPC52xx && EXPERIMENTAL
+ depends on PPC_MPC52xx
help
This enables using the Freescale MPC52xx Programmable Serial
Controller in master SPI mode.
@@ -266,8 +273,8 @@ config SPI_OMAP_100K
OMAP SPI 100K master controller for omap7xx boards.
config SPI_ORION
- tristate "Orion SPI master (EXPERIMENTAL)"
- depends on PLAT_ORION && EXPERIMENTAL
+ tristate "Orion SPI master"
+ depends on PLAT_ORION
help
This enables using the SPI master controller on the Orion chips.
@@ -290,9 +297,20 @@ config SPI_PPC4xx
help
This selects a driver for the PPC4xx SPI Controller.
+config SPI_PXA2XX_PXADMA
+ bool "PXA2xx SSP legacy PXA DMA API support"
+ depends on SPI_PXA2XX && ARCH_PXA
+ help
+ Enable PXA private legacy DMA API support. Note that this is
+ deprecated in favor of generic DMA engine API.
+
+config SPI_PXA2XX_DMA
+ def_bool y
+ depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
+
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL
+ depends on ARCH_PXA || PCI || ACPI
select PXA_SSP if ARCH_PXA
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
@@ -300,7 +318,7 @@ config SPI_PXA2XX
additional documentation can be found a Documentation/spi/pxa2xx.
config SPI_PXA2XX_PCI
- def_bool SPI_PXA2XX && X86_32 && PCI
+ def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
tristate "Renesas RSPI controller"
@@ -310,7 +328,7 @@ config SPI_RSPI
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
- depends on ARCH_S3C24XX && EXPERIMENTAL
+ depends on ARCH_S3C24XX
select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs
@@ -341,10 +359,10 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
- depends on SUPERH && HAVE_CLK
+ depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
select SPI_BITBANG
help
- SPI driver for SuperH MSIOF blocks.
+ SPI driver for SuperH and SH Mobile MSIOF blocks.
config SPI_SH
tristate "SuperH SPI controller"
@@ -372,12 +390,6 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
-config SPI_STMP3XXX
- tristate "Freescale STMP37xx/378x SPI/SSP controller"
- depends on ARCH_STMP3XXX
- help
- SPI driver for Freescale STMP37xx/378x SoC SSP interface
-
config SPI_MXS
tristate "Freescale MXS SPI controller"
depends on ARCH_MXS
@@ -385,6 +397,20 @@ config SPI_MXS
help
SPI driver for Freescale MXS devices.
+config SPI_TEGRA20_SFLASH
+ tristate "Nvidia Tegra20 Serial flash Controller"
+ depends on ARCH_TEGRA
+ help
+ SPI driver for Nvidia Tegra20 Serial flash Controller interface.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_TEGRA20_SLINK
+ tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
+ depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ help
+ SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
+
config SPI_TI_SSP
tristate "TI Sequencer Serial Port - SPI Support"
depends on MFD_TI_SSP
@@ -417,7 +443,7 @@ config SPI_XCOMM
config SPI_XILINX
tristate "Xilinx SPI controller common module"
- depends on HAS_IOMEM && EXPERIMENTAL
+ depends on HAS_IOMEM
select SPI_BITBANG
help
This exposes the SPI controller IP from the Xilinx EDK.
@@ -429,7 +455,7 @@ config SPI_XILINX
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
- depends on ARCH_W90X900 && EXPERIMENTAL
+ depends on ARCH_W90X900
select SPI_BITBANG
help
SPI driver for Nuvoton NUC900 series ARM SoCs
@@ -463,7 +489,6 @@ comment "SPI Protocol Masters"
config SPI_SPIDEV
tristate "User mode SPI device driver support"
- depends on EXPERIMENTAL
help
This supports user mode SPI protocol drivers.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47e4b0f..e53c30941340 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
@@ -46,7 +47,10 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
-obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
+spi-pxa2xx-platform-objs := spi-pxa2xx.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
@@ -59,11 +63,11 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
-obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
+obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
+obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
-
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index f1fec2a19d10..a537f8dffc09 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -134,7 +134,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->count = 0;
- hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8;
+ hw->bytes_per_word = t->bits_per_word / 8;
hw->len = t->len / hw->bytes_per_word;
if (hw->irq >= 0) {
@@ -215,7 +215,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit altera_spi_probe(struct platform_device *pdev)
+static int altera_spi_probe(struct platform_device *pdev)
{
struct altera_spi_platform_data *platp = pdev->dev.platform_data;
struct altera_spi *hw;
@@ -290,7 +290,7 @@ exit:
return err;
}
-static int __devexit altera_spi_remove(struct platform_device *dev)
+static int altera_spi_remove(struct platform_device *dev)
{
struct altera_spi *hw = platform_get_drvdata(dev);
struct spi_master *master = hw->bitbang.master;
@@ -311,7 +311,7 @@ MODULE_DEVICE_TABLE(of, altera_spi_match);
static struct platform_driver altera_spi_driver = {
.probe = altera_spi_probe,
- .remove = __devexit_p(altera_spi_remove),
+ .remove = altera_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 249077e5cc48..e504b7636058 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -24,17 +24,24 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79_spi_platform.h>
#define DRV_NAME "ath79-spi"
+#define ATH79_SPI_RRW_DELAY_FACTOR 12000
+#define MHZ (1000 * 1000)
+
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
u32 reg_ctrl;
void __iomem *base;
+ struct clk *clk;
+ unsigned rrw_delay;
};
static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
@@ -52,6 +59,12 @@ static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
return spi_master_get_devdata(spi->master);
}
+static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned nsecs)
+{
+ if (nsecs > sp->rrw_delay)
+ ndelay(nsecs - sp->rrw_delay);
+}
+
static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
@@ -83,15 +96,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
}
-static int ath79_spi_setup_cs(struct spi_device *spi)
+static void ath79_spi_enable(struct ath79_spi *sp)
{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
- struct ath79_spi_controller_data *cdata;
-
- cdata = spi->controller_data;
- if (spi->chip_select && !cdata)
- return -EINVAL;
-
/* enable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
@@ -101,44 +107,48 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+}
- if (spi->chip_select) {
- int status = 0;
+static void ath79_spi_disable(struct ath79_spi *sp)
+{
+ /* restore CTRL register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
- status = gpio_request(cdata->gpio, dev_name(&spi->dev));
- if (status)
- return status;
+static int ath79_spi_setup_cs(struct spi_device *spi)
+{
+ struct ath79_spi_controller_data *cdata;
+ int status;
- status = gpio_direction_output(cdata->gpio,
- spi->mode & SPI_CS_HIGH);
- if (status) {
- gpio_free(cdata->gpio);
- return status;
- }
- } else {
+ cdata = spi->controller_data;
+ if (spi->chip_select && !cdata)
+ return -EINVAL;
+
+ status = 0;
+ if (spi->chip_select) {
+ unsigned long flags;
+
+ flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ flags |= GPIOF_INIT_HIGH;
else
- sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ flags |= GPIOF_INIT_LOW;
+
+ status = gpio_request_one(cdata->gpio, flags,
+ dev_name(&spi->dev));
}
- return 0;
+ return status;
}
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
-
if (spi->chip_select) {
struct ath79_spi_controller_data *cdata = spi->controller_data;
gpio_free(cdata->gpio);
}
-
- /* restore CTRL register */
- ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
- /* disable GPIO mode */
- ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
static int ath79_spi_setup(struct spi_device *spi)
@@ -184,7 +194,11 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
/* setup MSB (to slave) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+ ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+ ath79_spi_delay(sp, nsecs);
+ if (bits == 1)
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
word <<= 1;
}
@@ -192,12 +206,13 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
-static __devinit int ath79_spi_probe(struct platform_device *pdev)
+static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
struct ath79_spi_platform_data *pdata;
struct resource *r;
+ unsigned long rate;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*sp));
@@ -236,12 +251,39 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
+ sp->clk = clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sp->clk)) {
+ ret = PTR_ERR(sp->clk);
+ goto err_unmap;
+ }
+
+ ret = clk_enable(sp->clk);
+ if (ret)
+ goto err_clk_put;
+
+ rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
+ dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
+ sp->rrw_delay);
+
+ ath79_spi_enable(sp);
ret = spi_bitbang_start(&sp->bitbang);
if (ret)
- goto err_unmap;
+ goto err_disable;
return 0;
+err_disable:
+ ath79_spi_disable(sp);
+err_clk_disable:
+ clk_disable(sp->clk);
+err_clk_put:
+ clk_put(sp->clk);
err_unmap:
iounmap(sp->base);
err_put_master:
@@ -251,11 +293,14 @@ err_put_master:
return ret;
}
-static __devexit int ath79_spi_remove(struct platform_device *pdev)
+static int ath79_spi_remove(struct platform_device *pdev)
{
struct ath79_spi *sp = platform_get_drvdata(pdev);
spi_bitbang_stop(&sp->bitbang);
+ ath79_spi_disable(sp);
+ clk_disable(sp->clk);
+ clk_put(sp->clk);
iounmap(sp->base);
platform_set_drvdata(pdev, NULL);
spi_master_put(sp->bitbang.master);
@@ -263,9 +308,15 @@ static __devexit int ath79_spi_remove(struct platform_device *pdev)
return 0;
}
+static void ath79_spi_shutdown(struct platform_device *pdev)
+{
+ ath79_spi_remove(pdev);
+}
+
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
- .remove = __devexit_p(ath79_spi_remove),
+ .remove = ath79_spi_remove,
+ .shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 61fb0ec26f06..656d137db253 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/gpio.h>
@@ -768,6 +769,10 @@ static int atmel_spi_setup(struct spi_device *spi)
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned int)spi->controller_data;
+
+ if (gpio_is_valid(spi->cs_gpio))
+ npcs_pin = spi->cs_gpio;
+
asd = spi->controller_state;
if (!asd) {
asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
@@ -907,7 +912,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
/*-------------------------------------------------------------------------*/
-static int __devinit atmel_spi_probe(struct platform_device *pdev)
+static int atmel_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
int irq;
@@ -937,8 +942,9 @@ static int __devinit atmel_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
- master->num_chipselect = 4;
+ master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
master->transfer = atmel_spi_transfer;
master->cleanup = atmel_spi_cleanup;
@@ -1003,7 +1009,7 @@ out_free:
return ret;
}
-static int __devexit atmel_spi_remove(struct platform_device *pdev)
+static int atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1064,16 +1070,25 @@ static int atmel_spi_resume(struct platform_device *pdev)
#define atmel_spi_resume NULL
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_spi_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
+#endif
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
.suspend = atmel_spi_suspend,
.resume = atmel_spi_resume,
.probe = atmel_spi_probe,
- .remove = __exit_p(atmel_spi_remove),
+ .remove = atmel_spi_remove,
};
module_platform_driver(atmel_spi_driver);
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 4de66d1cfe51..44dd34b6ad09 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -717,7 +717,7 @@ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
}
}
-static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
+static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
{
u32 stat, cfg;
@@ -766,7 +766,7 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
}
-static int __init au1550_spi_probe(struct platform_device *pdev)
+static int au1550_spi_probe(struct platform_device *pdev)
{
struct au1550_spi *hw;
struct spi_master *master;
@@ -968,7 +968,7 @@ err_nomem:
return err;
}
-static int __exit au1550_spi_remove(struct platform_device *pdev)
+static int au1550_spi_remove(struct platform_device *pdev)
{
struct au1550_spi *hw = platform_get_drvdata(pdev);
@@ -997,7 +997,7 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
- .remove = __exit_p(au1550_spi_remove),
+ .remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index a9f4049c6769..9578af782a77 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -36,7 +36,8 @@
#include <bcm63xx_dev_spi.h>
#define PFX KBUILD_MODNAME
-#define DRV_VER "0.1.2"
+
+#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
struct completion done;
@@ -50,16 +51,10 @@ struct bcm63xx_spi {
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
- /* Data buffers */
- const unsigned char *tx_ptr;
- unsigned char *rx_ptr;
-
/* data iomem */
u8 __iomem *tx_io;
const u8 __iomem *rx_io;
- int remaining_bytes;
-
struct clk *clk;
struct platform_device *pdev;
};
@@ -170,37 +165,23 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- ret = bcm63xx_spi_check_transfer(spi, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return ret;
- }
-
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
__func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
return 0;
}
-/* Fill the TX FIFO with as many bytes as possible */
-static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
-{
- u8 size;
-
- /* Fill the Tx FIFO with as many bytes as possible */
- size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
- bs->fifo_size;
- memcpy_toio(bs->tx_io, bs->tx_ptr, size);
- bs->remaining_bytes -= size;
-}
-
-static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
- struct spi_transfer *t)
+static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
+ unsigned int num_transfers)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
+ u8 rx_tail;
+ unsigned int i, timeout = 0, prepend_len = 0, len = 0;
+ struct spi_transfer *t = first;
+ bool do_rx = false;
+ bool do_tx = false;
/* Disable the CMD_DONE interrupt */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -208,25 +189,45 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
- /* Transmitter is inhibited */
- bs->tx_ptr = t->tx_buf;
- bs->rx_ptr = t->rx_buf;
+ if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
+ prepend_len = t->len;
+
+ /* prepare the buffer */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->tx_buf) {
+ do_tx = true;
+ memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
- if (t->tx_buf) {
- bs->remaining_bytes = t->len;
- bcm63xx_spi_fill_tx_fifo(bs);
+ /* don't prepend more than one tx */
+ if (t != first)
+ prepend_len = 0;
+ }
+
+ if (t->rx_buf) {
+ do_rx = true;
+ /* prepend is half-duplex write only */
+ if (t == first)
+ prepend_len = 0;
+ }
+
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
}
+ len -= prepend_len;
+
init_completion(&bs->done);
/* Fill in the Message control register */
- msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
+ msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
- if (t->rx_buf && t->tx_buf)
+ if (do_rx && do_tx && prepend_len == 0)
msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
- else if (t->rx_buf)
+ else if (do_rx)
msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
- else if (t->tx_buf)
+ else if (do_tx)
msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
switch (bs->msg_ctl_width) {
@@ -240,14 +241,41 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
/* Issue the transfer */
cmd = SPI_CMD_START_IMMEDIATE;
- cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
bcm_spi_writew(bs, cmd, SPI_CMD);
/* Enable the CMD_DONE interrupt */
bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
- return t->len - bs->remaining_bytes;
+ timeout = wait_for_completion_timeout(&bs->done, HZ);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* read out all data */
+ rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+
+ if (do_rx && rx_tail != len)
+ return -EIO;
+
+ if (!rx_tail)
+ return 0;
+
+ len = 0;
+ t = first;
+ /* Read out all the data */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->rx_buf)
+ memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
+
+ if (t != first || prepend_len == 0)
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
+
+ return 0;
}
static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
@@ -272,41 +300,76 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
struct spi_message *m)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- struct spi_transfer *t;
+ struct spi_transfer *t, *first = NULL;
struct spi_device *spi = m->spi;
int status = 0;
- unsigned int timeout = 0;
-
+ unsigned int n_transfers = 0, total_len = 0;
+ bool can_use_prepend = false;
+
+ /*
+ * This SPI controller does not support keeping CS active after a
+ * transfer.
+ * Work around this by merging as many transfers we can into one big
+ * full-duplex transfers.
+ */
list_for_each_entry(t, &m->transfers, transfer_list) {
- unsigned int len = t->len;
- u8 rx_tail;
-
status = bcm63xx_spi_check_transfer(spi, t);
if (status < 0)
goto exit;
- /* configure adapter for a new transfer */
- bcm63xx_spi_setup_transfer(spi, t);
+ if (!first)
+ first = t;
+
+ n_transfers++;
+ total_len += t->len;
+
+ if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
+ first->len <= BCM63XX_SPI_MAX_PREPEND)
+ can_use_prepend = true;
+ else if (can_use_prepend && t->tx_buf)
+ can_use_prepend = false;
+
+ /* we can only transfer one fifo worth of data */
+ if ((can_use_prepend &&
+ total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
+ (!can_use_prepend && total_len > bs->fifo_size)) {
+ dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
+ total_len, bs->fifo_size);
+ status = -EINVAL;
+ goto exit;
+ }
- while (len) {
- /* send the data */
- len -= bcm63xx_txrx_bufs(spi, t);
+ /* all combined transfers have to have the same speed */
+ if (t->speed_hz != first->speed_hz) {
+ dev_err(&spi->dev, "unable to change speed between transfers\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* CS will be deasserted directly after transfer */
+ if (t->delay_usecs) {
+ dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
+ status = -EINVAL;
+ goto exit;
+ }
- timeout = wait_for_completion_timeout(&bs->done, HZ);
- if (!timeout) {
- status = -ETIMEDOUT;
+ if (t->cs_change ||
+ list_is_last(&t->transfer_list, &m->transfers)) {
+ /* configure adapter for a new transfer */
+ bcm63xx_spi_setup_transfer(spi, first);
+
+ /* send the data */
+ status = bcm63xx_txrx_bufs(spi, first, n_transfers);
+ if (status)
goto exit;
- }
- /* read out all data */
- rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+ m->actual_length += total_len;
- /* Read out all the data */
- if (rx_tail)
- memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
+ first = NULL;
+ n_transfers = 0;
+ total_len = 0;
+ can_use_prepend = false;
}
-
- m->actual_length += t->len;
}
exit:
m->status = status;
@@ -337,7 +400,7 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
}
-static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
+static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct device *dev = &pdev->dev;
@@ -441,8 +504,8 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n",
- r->start, irq, bs->fifo_size, DRV_VER);
+ dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
+ r->start, irq, bs->fifo_size);
return 0;
@@ -457,7 +520,7 @@ out:
return ret;
}
-static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
+static int bcm63xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
@@ -485,6 +548,8 @@ static int bcm63xx_spi_suspend(struct device *dev)
platform_get_drvdata(to_platform_device(dev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ spi_master_suspend(master);
+
clk_disable(bs->clk);
return 0;
@@ -498,6 +563,8 @@ static int bcm63xx_spi_resume(struct device *dev)
clk_enable(bs->clk);
+ spi_master_resume(master);
+
return 0;
}
@@ -518,7 +585,7 @@ static struct platform_driver bcm63xx_spi_driver = {
.pm = BCM63XX_SPI_PM_OPS,
},
.probe = bcm63xx_spi_probe,
- .remove = __devexit_p(bcm63xx_spi_remove),
+ .remove = bcm63xx_spi_remove,
};
module_platform_driver(bcm63xx_spi_driver);
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 6555ecd07302..39b0d1711b4e 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -416,8 +416,7 @@ bfin_sport_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? :
- message->spi->bits_per_word ? : 8;
+ bits_per_word = transfer->bits_per_word;
if (bits_per_word % 16 == 0)
drv_data->ops = &bfin_sport_transfer_ops_u16;
else
@@ -755,8 +754,7 @@ bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
return 0;
}
-static int __devinit
-bfin_sport_spi_probe(struct platform_device *pdev)
+static int bfin_sport_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
@@ -863,8 +861,7 @@ bfin_sport_spi_probe(struct platform_device *pdev)
}
/* stop hardware and remove the driver */
-static int __devexit
-bfin_sport_spi_remove(struct platform_device *pdev)
+static int bfin_sport_spi_remove(struct platform_device *pdev)
{
struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -935,7 +932,7 @@ static struct platform_driver bfin_sport_spi_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_sport_spi_probe,
- .remove = __devexit_p(bfin_sport_spi_remove),
+ .remove = bfin_sport_spi_remove,
.suspend = bfin_sport_spi_suspend,
.resume = bfin_sport_spi_resume,
};
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 9bb4d4af8547..317f564c899c 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -642,8 +642,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? :
- message->spi->bits_per_word ? : 8;
+ bits_per_word = transfer->bits_per_word;
if (bits_per_word % 16 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
@@ -1274,7 +1273,7 @@ static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static int __init bfin_spi_probe(struct platform_device *pdev)
+static int bfin_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
@@ -1387,7 +1386,7 @@ out_error_get_res:
}
/* stop hardware and remove the driver */
-static int __devexit bfin_spi_remove(struct platform_device *pdev)
+static int bfin_spi_remove(struct platform_device *pdev)
{
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -1477,7 +1476,7 @@ static struct platform_driver bfin_spi_driver = {
},
.suspend = bfin_spi_suspend,
.resume = bfin_spi_resume,
- .remove = __devexit_p(bfin_spi_remove),
+ .remove = bfin_spi_remove,
};
static int __init bfin_spi_init(void)
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index aef59b1a15f7..a63d7da3bfe2 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -69,7 +69,7 @@ static unsigned bitbang_txrx_8(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
@@ -95,7 +95,7 @@ static unsigned bitbang_txrx_16(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
@@ -121,7 +121,7 @@ static unsigned bitbang_txrx_32(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
@@ -260,11 +260,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ struct spi_message *m, *_m;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
- while (!list_empty(&bitbang->queue)) {
- struct spi_message *m;
+ list_for_each_entry_safe(m, _m, &bitbang->queue, queue) {
struct spi_device *spi;
unsigned nsecs;
struct spi_transfer *t = NULL;
@@ -273,9 +273,7 @@ static void bitbang_work(struct work_struct *work)
int status;
int do_setup = -1;
- m = container_of(bitbang->queue.next, struct spi_message,
- queue);
- list_del_init(&m->queue);
+ list_del(&m->queue);
spin_unlock_irqrestore(&bitbang->lock, flags);
/* FIXME this is made-up ... the correct value is known to
@@ -346,17 +344,14 @@ static void bitbang_work(struct work_struct *work)
if (t->delay_usecs)
udelay(t->delay_usecs);
- if (!cs_change)
- continue;
- if (t->transfer_list.next == &m->transfers)
- break;
-
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
+ if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
}
m->status = status;
@@ -432,40 +427,41 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
- int status;
+ struct spi_master *master = bitbang->master;
+ int status;
- if (!bitbang->master || !bitbang->chipselect)
+ if (!master || !bitbang->chipselect)
return -EINVAL;
INIT_WORK(&bitbang->work, bitbang_work);
spin_lock_init(&bitbang->lock);
INIT_LIST_HEAD(&bitbang->queue);
- if (!bitbang->master->mode_bits)
- bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
- if (!bitbang->master->transfer)
- bitbang->master->transfer = spi_bitbang_transfer;
+ if (!master->transfer)
+ master->transfer = spi_bitbang_transfer;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
- if (!bitbang->master->setup) {
+ if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
- bitbang->master->setup = spi_bitbang_setup;
- bitbang->master->cleanup = spi_bitbang_cleanup;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
}
- } else if (!bitbang->master->setup)
+ } else if (!master->setup)
return -EINVAL;
- if (bitbang->master->transfer == spi_bitbang_transfer &&
+ if (master->transfer == spi_bitbang_transfer &&
!bitbang->setup_transfer)
return -EINVAL;
/* this task is the only thing to touch the SPI bits */
bitbang->busy = 0;
bitbang->workqueue = create_singlethread_workqueue(
- dev_name(bitbang->master->dev.parent));
+ dev_name(master->dev.parent));
if (bitbang->workqueue == NULL) {
status = -EBUSY;
goto err1;
@@ -474,7 +470,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- status = spi_register_master(bitbang->master);
+ status = spi_register_master(master);
if (status < 0)
goto err2;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
new file mode 100644
index 000000000000..a11cbf02691a
--- /dev/null
+++ b/drivers/spi/spi-clps711x.c
@@ -0,0 +1,296 @@
+/*
+ * CLPS711X SPI bus driver
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/spi-clps711x.h>
+
+#include <mach/hardware.h>
+
+#define DRIVER_NAME "spi-clps711x"
+
+struct spi_clps711x_data {
+ struct completion done;
+
+ struct clk *spi_clk;
+ u32 max_speed_hz;
+
+ u8 *tx_buf;
+ u8 *rx_buf;
+ int count;
+ int len;
+
+ int chipselect[0];
+};
+
+static int spi_clps711x_setup(struct spi_device *spi)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
+
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "Unsupported master bus width %i\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ /* We are expect that SPI-device is not selected */
+ gpio_direction_output(hw->chipselect[spi->chip_select],
+ !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
+static void spi_clps711x_setup_mode(struct spi_device *spi)
+{
+ /* Setup edge for transfer */
+ if (spi->mode & SPI_CPHA)
+ clps_writew(clps_readw(SYSCON3) | SYSCON3_ADCCKNSEN, SYSCON3);
+ else
+ clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCKNSEN, SYSCON3);
+}
+
+static int spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
+ u8 bpw = xfer->bits_per_word;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
+
+ if (bpw != 8) {
+ dev_err(&spi->dev, "Unsupported master bus width %i\n", bpw);
+ return -EINVAL;
+ }
+
+ /* Setup SPI frequency divider */
+ if (!speed || (speed >= hw->max_speed_hz))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(3), SYSCON1);
+ else if (speed >= (hw->max_speed_hz / 2))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(2), SYSCON1);
+ else if (speed >= (hw->max_speed_hz / 8))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(1), SYSCON1);
+ else
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(0), SYSCON1);
+
+ return 0;
+}
+
+static int spi_clps711x_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ int status = 0, cs = hw->chipselect[msg->spi->chip_select];
+ u32 data;
+
+ spi_clps711x_setup_mode(msg->spi);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (spi_clps711x_setup_xfer(msg->spi, xfer)) {
+ status = -EINVAL;
+ goto out_xfr;
+ }
+
+ gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
+
+ INIT_COMPLETION(hw->done);
+
+ hw->count = 0;
+ hw->len = xfer->len;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
+
+ /* Initiate transfer */
+ data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
+ clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+
+ wait_for_completion(&hw->done);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change ||
+ list_is_last(&xfer->transfer_list, &msg->transfers))
+ gpio_set_value(cs, !(msg->spi->mode & SPI_CS_HIGH));
+
+ msg->actual_length += xfer->len;
+ }
+
+out_xfr:
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
+{
+ struct spi_clps711x_data *hw = (struct spi_clps711x_data *)dev_id;
+ u32 data;
+
+ /* Handle RX */
+ data = clps_readb(SYNCIO);
+ if (hw->rx_buf)
+ hw->rx_buf[hw->count] = (u8)data;
+
+ hw->count++;
+
+ /* Handle TX */
+ if (hw->count < hw->len) {
+ data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
+ clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+ } else
+ complete(&hw->done);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_clps711x_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct spi_master *master;
+ struct spi_clps711x_data *hw;
+ struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ if (pdata->num_chipselect < 1) {
+ dev_err(&pdev->dev, "At least one CS must be defined\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_clps711x_data) +
+ sizeof(int) * pdata->num_chipselect);
+ if (!master) {
+ dev_err(&pdev->dev, "SPI allocating memory error\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = spi_clps711x_setup;
+ master->transfer_one_message = spi_clps711x_transfer_one_message;
+
+ hw = spi_master_get_devdata(master);
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ hw->chipselect[i] = pdata->chipselect[i];
+ if (!gpio_is_valid(hw->chipselect[i])) {
+ dev_err(&pdev->dev, "Invalid CS GPIO %i\n", i);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ if (gpio_request(hw->chipselect[i], DRIVER_NAME)) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ hw->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->spi_clk)) {
+ dev_err(&pdev->dev, "Can't get clocks\n");
+ ret = PTR_ERR(hw->spi_clk);
+ goto err_out;
+ }
+ hw->max_speed_hz = clk_get_rate(hw->spi_clk);
+
+ init_completion(&hw->done);
+ platform_set_drvdata(pdev, master);
+
+ /* Disable extended mode due hardware problems */
+ clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCON, SYSCON3);
+
+ /* Clear possible pending interrupt */
+ clps_readl(SYNCIO);
+
+ ret = devm_request_irq(&pdev->dev, IRQ_SSEOTI, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ clk_put(hw->spi_clk);
+ goto clk_out;
+ }
+
+ ret = spi_register_master(master);
+ if (!ret) {
+ dev_info(&pdev->dev,
+ "SPI bus driver initialized. Master clock %u Hz\n",
+ hw->max_speed_hz);
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to register master\n");
+ devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
+
+clk_out:
+ devm_clk_put(&pdev->dev, hw->spi_clk);
+
+err_out:
+ while (--i >= 0)
+ if (gpio_is_valid(hw->chipselect[i]))
+ gpio_free(hw->chipselect[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ kfree(master);
+
+ return ret;
+}
+
+static int spi_clps711x_remove(struct platform_device *pdev)
+{
+ int i;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+
+ devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
+
+ for (i = 0; i < master->num_chipselect; i++)
+ if (gpio_is_valid(hw->chipselect[i]))
+ gpio_free(hw->chipselect[i]);
+
+ devm_clk_put(&pdev->dev, hw->spi_clk);
+ platform_set_drvdata(pdev, NULL);
+ spi_unregister_master(master);
+ kfree(master);
+
+ return 0;
+}
+
+static struct platform_driver clps711x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = spi_clps711x_probe,
+ .remove = spi_clps711x_remove,
+};
+module_platform_driver(clps711x_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X SPI bus driver");
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 764bfee75920..7b5cc9e4e94d 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -329,8 +329,7 @@ static int mcfqspi_transfer_one_message(struct spi_master *master,
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if ((t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word) == 8)
+ if (t->bits_per_word == 8)
mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
t->rx_buf);
else
@@ -401,7 +400,7 @@ static int mcfqspi_setup(struct spi_device *spi)
return 0;
}
-static int __devinit mcfqspi_probe(struct platform_device *pdev)
+static int mcfqspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct mcfqspi *mcfqspi;
@@ -515,7 +514,7 @@ fail0:
return status;
}
-static int __devexit mcfqspi_remove(struct platform_device *pdev)
+static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
@@ -594,7 +593,7 @@ static struct platform_driver mcfqspi_driver = {
.driver.owner = THIS_MODULE,
.driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
- .remove = __devexit_p(mcfqspi_remove),
+ .remove = mcfqspi_remove,
};
module_platform_driver(mcfqspi_driver);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 147dfa87a64b..8234d2259722 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -28,6 +28,8 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/edma.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@@ -135,7 +137,7 @@ struct davinci_spi {
int dma_rx_chnum;
int dma_tx_chnum;
- struct davinci_spi_platform_data *pdata;
+ struct davinci_spi_platform_data pdata;
void (*get_rx)(u32 rx_data, struct davinci_spi *);
u32 (*get_tx)(struct davinci_spi *);
@@ -213,7 +215,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
bool gpio_chipsel = false;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
+ pdata = &dspi->pdata;
if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
@@ -392,7 +394,7 @@ static int davinci_spi_setup(struct spi_device *spi)
struct davinci_spi_platform_data *pdata;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
+ pdata = &dspi->pdata;
/* if bits per word length is zero then set it default 8 */
if (!spi->bits_per_word)
@@ -534,7 +536,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct scatterlist sg_rx, sg_tx;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
+ pdata = &dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
@@ -700,6 +702,19 @@ err_alloc_dummy_buf:
}
/**
+ * dummy_thread_fn - dummy thread function
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * This is to satisfy the request_threaded_irq() API so that the irq
+ * handler is called in interrupt context.
+ */
+static irqreturn_t dummy_thread_fn(s32 irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+/**
* davinci_spi_irq - Interrupt handler for SPI Master Controller
* @irq: IRQ number for this SPI Master
* @context_data: structure for SPI Master controller davinci_spi
@@ -758,6 +773,70 @@ rx_dma_failed:
return r;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_spi_of_match[] = {
+ {
+ .compatible = "ti,dm644x-spi",
+ },
+ {
+ .compatible = "ti,da8xx-spi",
+ .data = (void *)SPI_VERSION_2,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, davini_spi_of_match);
+
+/**
+ * spi_davinci_get_pdata - Get platform data from DTS binding
+ * @pdev: ptr to platform data
+ * @dspi: ptr to driver data
+ *
+ * Parses and populates pdata in dspi from device tree bindings.
+ *
+ * NOTE: Not all platform data params are supported currently.
+ */
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct davinci_spi_platform_data *pdata;
+ unsigned int num_cs, intr_line = 0;
+ const struct of_device_id *match;
+
+ pdata = &dspi->pdata;
+
+ pdata->version = SPI_VERSION_1;
+ match = of_match_device(of_match_ptr(davinci_spi_of_match),
+ &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ /* match data has the SPI version number for SPI_VERSION_2 */
+ if (match->data == (void *)SPI_VERSION_2)
+ pdata->version = SPI_VERSION_2;
+
+ /*
+ * default num_cs is 1 and all chipsel are internal to the chip
+ * indicated by chip_sel being NULL. GPIO based CS is not
+ * supported yet in DT bindings.
+ */
+ num_cs = 1;
+ of_property_read_u32(node, "num-cs", &num_cs);
+ pdata->num_chipselect = num_cs;
+ of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
+ pdata->intr_line = intr_line;
+ return 0;
+}
+#else
+#define davinci_spi_of_match NULL
+static struct davinci_spi_platform_data
+ *spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ return -ENODEV;
+}
+#endif
+
/**
* davinci_spi_probe - probe function for SPI Master Controller
* @pdev: platform_device structure which contains plateform specific data
@@ -769,7 +848,7 @@ rx_dma_failed:
* It will invoke spi_bitbang_start to create work queue so that client driver
* can register transfer method to work queue.
*/
-static int __devinit davinci_spi_probe(struct platform_device *pdev)
+static int davinci_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct davinci_spi *dspi;
@@ -780,12 +859,6 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
int i = 0, ret = 0;
u32 spipc0;
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- ret = -ENODEV;
- goto err;
- }
-
master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
if (master == NULL) {
ret = -ENOMEM;
@@ -800,6 +873,19 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ if (pdev->dev.platform_data) {
+ pdata = pdev->dev.platform_data;
+ dspi->pdata = *pdata;
+ } else {
+ /* update dspi pdata with that from the DT */
+ ret = spi_davinci_get_pdata(pdev, dspi);
+ if (ret < 0)
+ goto free_master;
+ }
+
+ /* pdata in dspi is now updated and point pdata to that */
+ pdata = &dspi->pdata;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENOENT;
@@ -807,7 +893,6 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
}
dspi->pbase = r->start;
- dspi->pdata = pdata;
mem = request_mem_region(r->start, resource_size(r), pdev->name);
if (mem == NULL) {
@@ -827,8 +912,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
goto unmap_io;
}
- ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
- dspi);
+ ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
+ 0, dev_name(&pdev->dev), dspi);
if (ret)
goto unmap_io;
@@ -843,8 +928,9 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
ret = -ENODEV;
goto put_master;
}
- clk_enable(dspi->clk);
+ clk_prepare_enable(dspi->clk);
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
master->setup = davinci_spi_setup;
@@ -927,7 +1013,7 @@ free_dma:
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
free_clk:
- clk_disable(dspi->clk);
+ clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
put_master:
spi_master_put(master);
@@ -952,7 +1038,7 @@ err:
* It will also call spi_bitbang_stop to destroy the work queue which was
* created by spi_bitbang_start.
*/
-static int __devexit davinci_spi_remove(struct platform_device *pdev)
+static int davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
@@ -963,7 +1049,7 @@ static int __devexit davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
- clk_disable(dspi->clk);
+ clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
spi_master_put(master);
free_irq(dspi->irq, dspi);
@@ -978,9 +1064,10 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
+ .of_match_table = davinci_spi_of_match,
},
.probe = davinci_spi_probe,
- .remove = __devexit_p(davinci_spi_remove),
+ .remove = davinci_spi_remove,
};
module_platform_driver(davinci_spi_driver);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index db2f1ba06eab..4a6d5c9057a4 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -26,7 +26,7 @@ struct dw_spi_mmio {
struct clk *clk;
};
-static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
+static int dw_spi_mmio_probe(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
@@ -106,7 +106,7 @@ err_end:
return ret;
}
-static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
+static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
struct resource *mem;
@@ -129,7 +129,7 @@ static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
- .remove = __devexit_p(dw_spi_mmio_remove),
+ .remove = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ff81abbb3066..6055c8d9fdd7 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -32,7 +32,7 @@ struct dw_spi_pci {
struct dw_spi dws;
};
-static int __devinit spi_pci_probe(struct pci_dev *pdev,
+static int spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dw_spi_pci *dwpci;
@@ -105,7 +105,7 @@ err_disable:
return ret;
}
-static void __devexit spi_pci_remove(struct pci_dev *pdev)
+static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
@@ -159,7 +159,7 @@ static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = spi_pci_probe,
- .remove = __devexit_p(spi_pci_remove),
+ .remove = spi_pci_remove,
.suspend = spi_suspend,
.resume = spi_resume,
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d1a495f64e2d..c1abc06899e7 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -696,7 +696,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __devinit init_queue(struct dw_spi *dws)
+static int init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
spin_lock_init(&dws->lock);
@@ -795,7 +795,7 @@ static void spi_hw_init(struct dw_spi *dws)
}
}
-int __devinit dw_spi_add_host(struct dw_spi *dws)
+int dw_spi_add_host(struct dw_spi *dws)
{
struct spi_master *master;
int ret;
@@ -877,7 +877,7 @@ exit:
}
EXPORT_SYMBOL_GPL(dw_spi_add_host);
-void __devexit dw_spi_remove_host(struct dw_spi *dws)
+void dw_spi_remove_host(struct dw_spi *dws)
{
int status = 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 3a219599612a..d7bac60253c9 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -446,7 +446,7 @@ static inline int bits_per_word(const struct ep93xx_spi *espi)
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
- return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
+ return t->bits_per_word;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
@@ -1023,7 +1023,7 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
free_page((unsigned long)espi->zeropage);
}
-static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
+static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
@@ -1085,10 +1085,9 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
espi->sspdr_phys = res->start + SSPDR;
- espi->regs_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!espi->regs_base) {
- dev_err(&pdev->dev, "failed to map resources\n");
- error = -ENODEV;
+ espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(espi->regs_base)) {
+ error = PTR_ERR(espi->regs_base);
goto fail_put_clock;
}
@@ -1138,7 +1137,7 @@ fail_release_master:
return error;
}
-static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
+static int ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
@@ -1180,7 +1179,7 @@ static struct platform_driver ep93xx_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
- .remove = __devexit_p(ep93xx_spi_remove),
+ .remove = ep93xx_spi_remove,
};
module_platform_driver(ep93xx_spi_driver);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 8f6aa735a24c..c7a74f0ef892 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -398,12 +398,12 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
}
m->status = ret;
- m->complete(m->context);
+ spi_finalize_current_message(master);
return 0;
}
-static int __devinit falcon_sflash_probe(struct platform_device *pdev)
+static int falcon_sflash_probe(struct platform_device *pdev)
{
struct falcon_sflash *priv;
struct spi_master *master;
@@ -423,6 +423,7 @@ static int __devinit falcon_sflash_probe(struct platform_device *pdev)
master->mode_bits = SPI_MODE_3;
master->num_chipselect = 1;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = -1;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
@@ -438,7 +439,7 @@ static int __devinit falcon_sflash_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit falcon_sflash_remove(struct platform_device *pdev)
+static int falcon_sflash_remove(struct platform_device *pdev)
{
struct falcon_sflash *priv = platform_get_drvdata(pdev);
@@ -455,7 +456,7 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
- .remove = __devexit_p(falcon_sflash_remove),
+ .remove = falcon_sflash_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 27bdc47b5250..24610ca8955d 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -587,7 +587,7 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
iounmap(mspi->reg_base);
}
-static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
+static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -686,7 +686,7 @@ static int of_fsl_espi_get_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_espi_probe(struct platform_device *ofdev)
+static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -725,7 +725,7 @@ err:
return ret;
}
-static int __devexit of_fsl_espi_remove(struct platform_device *dev)
+static int of_fsl_espi_remove(struct platform_device *dev)
{
return mpc8xxx_spi_remove(&dev->dev);
}
@@ -743,7 +743,7 @@ static struct platform_driver fsl_espi_driver = {
.of_match_table = of_fsl_espi_match,
},
.probe = of_fsl_espi_probe,
- .remove = __devexit_p(of_fsl_espi_remove),
+ .remove = of_fsl_espi_remove,
};
module_platform_driver(fsl_espi_driver);
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 1503574b215a..8ade675a04f1 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -169,7 +169,7 @@ err:
return ret;
}
-int __devexit mpc8xxx_spi_remove(struct device *dev)
+int mpc8xxx_spi_remove(struct device *dev)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct spi_master *master;
@@ -189,7 +189,7 @@ int __devexit mpc8xxx_spi_remove(struct device *dev)
return 0;
}
-int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
+int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 6a62934ca74c..086a9eef2e05 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -843,7 +843,7 @@ static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
fsl_spi_cpm_free(mspi);
}
-static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
+static struct spi_master * fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -947,12 +947,12 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
- unsigned int ngpios;
+ int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
- if (!ngpios) {
+ if (ngpios <= 0) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
@@ -1041,7 +1041,7 @@ static int of_fsl_spi_free_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
+static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -1081,7 +1081,7 @@ err:
return ret;
}
-static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
+static int of_fsl_spi_remove(struct platform_device *ofdev)
{
int ret;
@@ -1105,7 +1105,7 @@ static struct platform_driver of_fsl_spi_driver = {
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
- .remove = __devexit_p(of_fsl_spi_remove),
+ .remove = of_fsl_spi_remove,
};
#ifdef CONFIG_MPC832x_RDB
@@ -1116,7 +1116,7 @@ static struct platform_driver of_fsl_spi_driver = {
* tree can work with OpenFirmware driver. But for now we support old trees
* as well.
*/
-static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
@@ -1139,7 +1139,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
return mpc8xxx_spi_remove(&pdev->dev);
}
@@ -1147,7 +1147,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
- .remove = __devexit_p(plat_mpc8xxx_spi_remove),
+ .remove = plat_mpc8xxx_spi_remove,
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a2b50c516b31..9ddef55a7165 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -287,7 +287,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -301,9 +301,8 @@ static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __devinit
-spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
- u16 *res_flags)
+static int spi_gpio_request(struct spi_gpio_platform_data *pdata,
+ const char *label, u16 *res_flags)
{
int value;
@@ -366,9 +365,26 @@ static int spi_gpio_probe_dt(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->sck = of_get_named_gpio(np, "gpio-sck", 0);
- pdata->miso = of_get_named_gpio(np, "gpio-miso", 0);
- pdata->mosi = of_get_named_gpio(np, "gpio-mosi", 0);
+ ret = of_get_named_gpio(np, "gpio-sck", 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio-sck property not found\n");
+ goto error_free;
+ }
+ pdata->sck = ret;
+
+ ret = of_get_named_gpio(np, "gpio-miso", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-miso property not found, switching to no-rx mode\n");
+ pdata->miso = SPI_GPIO_NO_MISO;
+ } else
+ pdata->miso = ret;
+
+ ret = of_get_named_gpio(np, "gpio-mosi", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-mosi property not found, switching to no-tx mode\n");
+ pdata->mosi = SPI_GPIO_NO_MOSI;
+ } else
+ pdata->mosi = ret;
ret = of_property_read_u32(np, "num-chipselects", &tmp);
if (ret < 0) {
@@ -392,7 +408,7 @@ static inline int spi_gpio_probe_dt(struct platform_device *pdev)
}
#endif
-static int __devinit spi_gpio_probe(struct platform_device *pdev)
+static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
@@ -485,7 +501,7 @@ gpio_free:
return status;
}
-static int __devexit spi_gpio_remove(struct platform_device *pdev)
+static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
@@ -518,7 +534,7 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
- .remove = __devexit_p(spi_gpio_remove),
+ .remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c9a0d8467de6..0befeeb522f4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -750,7 +750,7 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static int __devinit spi_imx_probe(struct platform_device *pdev)
+static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
@@ -906,7 +906,7 @@ out_gpio_free:
return ret;
}
-static int __devexit spi_imx_remove(struct platform_device *pdev)
+static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -942,10 +942,11 @@ static struct platform_driver spi_imx_driver = {
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
- .remove = __devexit_p(spi_imx_remove),
+ .remove = spi_imx_remove,
};
module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 0a1e39e94d06..89480b281d74 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -406,7 +406,7 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq,
s16 bus_num)
{
@@ -438,6 +438,7 @@ static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->num_chipselect = pdata->max_chipselect;
}
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->setup = mpc512x_psc_spi_setup;
master->transfer = mpc512x_psc_spi_transfer;
master->cleanup = mpc512x_psc_spi_cleanup;
@@ -492,7 +493,7 @@ free_master:
return ret;
}
-static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
+static int mpc512x_psc_spi_do_remove(struct device *dev)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
@@ -508,7 +509,7 @@ static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
return 0;
}
-static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
+static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -522,24 +523,18 @@ static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
/* get PSC id (0..11, used by port_config) */
- if (op->dev.platform_data == NULL) {
- const u32 *psc_nump;
-
- psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
- if (!psc_nump || *psc_nump > 11) {
- dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
- "has invalid cell-index property\n",
- op->dev.of_node->full_name);
- return -EINVAL;
- }
- id = *psc_nump;
+ id = of_alias_get_id(op->dev.of_node, "spi");
+ if (id < 0) {
+ dev_err(&op->dev, "no alias id for %s\n",
+ op->dev.of_node->full_name);
+ return id;
}
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op)
+static int mpc512x_psc_spi_of_remove(struct platform_device *op)
{
return mpc512x_psc_spi_do_remove(&op->dev);
}
@@ -553,7 +548,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
static struct platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
- .remove = __devexit_p(mpc512x_psc_spi_of_remove),
+ .remove = mpc512x_psc_spi_of_remove,
.driver = {
.name = "mpc512x-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index bd47d262d53f..291120b37dbb 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -363,7 +363,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq, s16 bus_num)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -450,7 +450,7 @@ free_master:
return ret;
}
-static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
+static int mpc52xx_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -479,7 +479,7 @@ static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
+static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
@@ -505,7 +505,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
static struct platform_driver mpc52xx_psc_spi_of_driver = {
.probe = mpc52xx_psc_spi_of_probe,
- .remove = __devexit_p(mpc52xx_psc_spi_of_remove),
+ .remove = mpc52xx_psc_spi_of_remove,
.driver = {
.name = "mpc52xx-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 045410650212..29f77056eedc 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -390,7 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
/*
* OF Platform Bus Binding
*/
-static int __devinit mpc52xx_spi_probe(struct platform_device *op)
+static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
@@ -527,7 +527,7 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
return rc;
}
-static int __devexit mpc52xx_spi_remove(struct platform_device *op)
+static int mpc52xx_spi_remove(struct platform_device *op)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
@@ -547,7 +547,7 @@ static int __devexit mpc52xx_spi_remove(struct platform_device *op)
return 0;
}
-static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
+static const struct of_device_id mpc52xx_spi_match[] = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
@@ -560,6 +560,6 @@ static struct platform_driver mpc52xx_spi_of_driver = {
.of_match_table = mpc52xx_spi_match,
},
.probe = mpc52xx_spi_probe,
- .remove = __devexit_p(mpc52xx_spi_remove),
+ .remove = mpc52xx_spi_remove,
};
module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 86dd04d6bc87..22a0af0147fb 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -241,6 +241,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
INIT_COMPLETION(spi->c);
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
+ ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
if (*first)
@@ -256,8 +257,10 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
if ((sg_count + 1 == sgs) && *last)
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
- if (ssp->devid == IMX23_SSP)
+ if (ssp->devid == IMX23_SSP) {
+ ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= min;
+ }
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
@@ -509,7 +512,7 @@ static const struct of_device_id mxs_spi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
-static int __devinit mxs_spi_probe(struct platform_device *pdev)
+static int mxs_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_spi_dt_ids, &pdev->dev);
@@ -538,9 +541,9 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
if (!iores || irq_err < 0 || irq_dma < 0)
return -EINVAL;
- base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!base)
- return -EADDRNOTAVAIL;
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
@@ -636,7 +639,7 @@ out_master_free:
return ret;
}
-static int __devexit mxs_spi_remove(struct platform_device *pdev)
+static int mxs_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct mxs_spi *spi;
@@ -659,7 +662,7 @@ static int __devexit mxs_spi_remove(struct platform_device *pdev)
static struct platform_driver mxs_spi_driver = {
.probe = mxs_spi_probe,
- .remove = __devexit_p(mxs_spi_remove),
+ .remove = mxs_spi_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index a6eca6ffdabe..b3f9ec83ef73 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -346,7 +346,7 @@ static void nuc900_init_spi(struct nuc900_spi *hw)
nuc900_enable_int(hw);
}
-static int __devinit nuc900_spi_probe(struct platform_device *pdev)
+static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
@@ -453,7 +453,7 @@ err_nomem:
return err;
}
-static int __devexit nuc900_spi_remove(struct platform_device *dev)
+static int nuc900_spi_remove(struct platform_device *dev)
{
struct nuc900_spi *hw = platform_get_drvdata(dev);
@@ -477,7 +477,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev)
static struct platform_driver nuc900_spi_driver = {
.probe = nuc900_spi_probe,
- .remove = __devexit_p(nuc900_spi_remove),
+ .remove = nuc900_spi_remove,
.driver = {
.name = "nuc900-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 9d9071b730be..cb2e284bd814 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -54,7 +54,7 @@ struct tiny_spi {
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- unsigned int gpio_cs_count;
+ int gpio_cs_count;
int *gpio_cs;
};
@@ -74,7 +74,7 @@ static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
- if (hw->gpio_cs_count) {
+ if (hw->gpio_cs_count > 0) {
gpio_set_value(hw->gpio_cs[spi->chip_select],
(spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
@@ -243,7 +243,7 @@ static irqreturn_t tiny_spi_irq(int irq, void *dev)
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -254,7 +254,7 @@ static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
if (!np)
return 0;
hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count) {
+ if (hw->gpio_cs_count > 0) {
hw->gpio_cs = devm_kzalloc(&pdev->dev,
hw->gpio_cs_count * sizeof(unsigned int),
GFP_KERNEL);
@@ -277,13 +277,13 @@ static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
return 0;
}
#else /* !CONFIG_OF */
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
return 0;
}
#endif /* CONFIG_OF */
-static int __devinit tiny_spi_probe(struct platform_device *pdev)
+static int tiny_spi_probe(struct platform_device *pdev)
{
struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
struct tiny_spi *hw;
@@ -352,7 +352,7 @@ static int __devinit tiny_spi_probe(struct platform_device *pdev)
goto exit_gpio;
gpio_direction_output(hw->gpio_cs[i], 1);
}
- hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count);
+ hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -373,7 +373,7 @@ exit:
return err;
}
-static int __devexit tiny_spi_remove(struct platform_device *pdev)
+static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
@@ -399,7 +399,7 @@ MODULE_DEVICE_TABLE(of, tiny_spi_match);
static struct platform_driver tiny_spi_driver = {
.probe = tiny_spi_probe,
- .remove = __devexit_p(tiny_spi_remove),
+ .remove = tiny_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index ea8fb2efb0f8..24daf964a409 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -266,7 +266,7 @@ static int octeon_spi_nop_transfer_hardware(struct spi_master *master)
return 0;
}
-static int __devinit octeon_spi_probe(struct platform_device *pdev)
+static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
@@ -326,7 +326,7 @@ fail:
return err;
}
-static int __devexit octeon_spi_remove(struct platform_device *pdev)
+static int octeon_spi_remove(struct platform_device *pdev)
{
struct octeon_spi *p = platform_get_drvdata(pdev);
u64 register_base = p->register_base;
@@ -352,7 +352,7 @@ static struct platform_driver octeon_spi_driver = {
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
- .remove = __devexit_p(octeon_spi_remove),
+ .remove = octeon_spi_remove,
};
module_platform_driver(octeon_spi_driver);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index dfb4b7f448c5..78d29a18dcc4 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -481,12 +481,12 @@ static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
+static int omap1_spi100k_reset(struct omap1_spi100k *spi100k)
{
return 0;
}
-static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
+static int omap1_spi100k_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
@@ -560,7 +560,7 @@ err1:
return status;
}
-static int __exit omap1_spi100k_remove(struct platform_device *pdev)
+static int omap1_spi100k_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
@@ -604,7 +604,7 @@ static struct platform_driver omap1_spi100k_driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
- .remove = __exit_p(omap1_spi100k_remove),
+ .remove = omap1_spi100k_remove,
};
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 0a94d9dc9c31..102b233b50c4 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -476,7 +476,7 @@ static void uwire_off(struct uwire_spi *uwire)
spi_master_put(uwire->bitbang.master);
}
-static int __init uwire_probe(struct platform_device *pdev)
+static int uwire_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct uwire_spi *uwire;
@@ -536,7 +536,7 @@ static int __init uwire_probe(struct platform_device *pdev)
return status;
}
-static int __exit uwire_remove(struct platform_device *pdev)
+static int uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev);
int status;
@@ -557,7 +557,7 @@ static struct platform_driver uwire_driver = {
.name = "omap_uwire",
.owner = THIS_MODULE,
},
- .remove = __exit_p(uwire_remove),
+ .remove = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3542fdc664b1..893c3d78e426 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -39,7 +39,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/err.h>
#include <linux/spi/spi.h>
@@ -130,6 +129,7 @@ struct omap2_mcspi {
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
struct omap2_mcspi_regs ctx;
+ unsigned int pin_dir:1;
};
struct omap2_mcspi_cs {
@@ -298,10 +298,10 @@ static void omap2_mcspi_rx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- complete(&mcspi_dma->dma_rx_completion);
-
/* We must disable the DMA RX request */
omap2_mcspi_set_dma_req(spi, 1, 0);
+
+ complete(&mcspi_dma->dma_rx_completion);
}
static void omap2_mcspi_tx_callback(void *data)
@@ -310,10 +310,10 @@ static void omap2_mcspi_tx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- complete(&mcspi_dma->dma_tx_completion);
-
/* We must disable the DMA TX request */
omap2_mcspi_set_dma_req(spi, 0, 0);
+
+ complete(&mcspi_dma->dma_tx_completion);
}
static void omap2_mcspi_tx_dma(struct spi_device *spi,
@@ -323,19 +323,11 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
- u8 * rx;
- const u8 * tx;
- void __iomem *chstat_reg;
- struct omap2_mcspi_cs *cs = spi->controller_state;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
-
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
@@ -359,19 +351,6 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
- wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
- DMA_TO_DEVICE);
-
- /* for TX_ONLY mode, be sure all words have shifted out */
- if (rx == NULL) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXS) < 0)
- dev_err(&spi->dev, "TXS timed out\n");
- else if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0)
- dev_err(&spi->dev, "EOT timed out\n");
- }
}
static unsigned
@@ -492,6 +471,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
+ void __iomem *chstat_reg;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -526,8 +506,24 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_tx_dma(spi, xfer, cfg);
if (rx != NULL)
- return omap2_mcspi_rx_dma(spi, xfer, cfg, es);
-
+ count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
+ if (tx != NULL) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (rx == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
+ else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+ }
return count;
}
@@ -765,8 +761,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
*/
- l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
- l |= OMAP2_MCSPI_CHCONF_DPE0;
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ l &= ~OMAP2_MCSPI_CHCONF_IS;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ } else {
+ l |= OMAP2_MCSPI_CHCONF_IS;
+ l |= OMAP2_MCSPI_CHCONF_DPE1;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ }
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
@@ -924,6 +927,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
struct spi_device *spi;
struct spi_transfer *t = NULL;
+ struct spi_master *master;
int cs_active = 0;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
@@ -932,6 +936,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
u32 chconf;
spi = m->spi;
+ master = spi->master;
cs = spi->controller_state;
cd = spi->controller_data;
@@ -949,6 +954,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (!t->speed_hz && !t->bits_per_word)
par_override = 0;
}
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
if (!cs_active) {
omap2_mcspi_force_cs(spi, 1);
@@ -1010,6 +1023,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (cs_active)
omap2_mcspi_force_cs(spi, 0);
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
omap2_mcspi_set_enable(spi, 0);
m->status = status;
@@ -1017,7 +1038,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
}
static int omap2_mcspi_transfer_one_message(struct spi_master *master,
- struct spi_message *m)
+ struct spi_message *m)
{
struct omap2_mcspi *mcspi;
struct spi_transfer *t;
@@ -1038,7 +1059,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
|| (len && !(rx_buf || tx_buf))
|| (t->bits_per_word &&
( t->bits_per_word < 4
- || t->bits_per_word > 32))) {
+ || t->bits_per_word > 32))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
@@ -1049,8 +1070,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
}
if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ >> 15);
+ t->speed_hz,
+ OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
}
@@ -1085,7 +1106,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
return 0;
}
-static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
+static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1096,7 +1117,7 @@ static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
return ret;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
- OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
@@ -1138,7 +1159,7 @@ static const struct of_device_id omap_mcspi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
+static int omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
const struct omap2_mcspi_platform_config *pdata;
@@ -1167,6 +1188,11 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ dev_set_drvdata(&pdev->dev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
match = of_match_device(omap_mcspi_of_match, &pdev->dev);
if (match) {
u32 num_cs = 1; /* default number of chipselect */
@@ -1175,19 +1201,17 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
master->num_chipselect = num_cs;
master->bus_num = bus_num++;
+ if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
} else {
pdata = pdev->dev.platform_data;
master->num_chipselect = pdata->num_cs;
if (pdev->id != -1)
master->bus_num = pdev->id;
+ mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
- dev_set_drvdata(&pdev->dev, master);
-
- mcspi = spi_master_get_devdata(master);
- mcspi->master = master;
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
@@ -1198,10 +1222,9 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
r->end += regs_offset;
mcspi->phys = r->start;
- mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
- if (!mcspi->base) {
- dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
- status = -ENOMEM;
+ mcspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
goto free_master;
}
@@ -1222,7 +1245,7 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
sprintf(dma_ch_name, "rx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- dma_ch_name);
+ dma_ch_name);
if (!dma_res) {
dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
status = -ENODEV;
@@ -1232,7 +1255,7 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
sprintf(dma_ch_name, "tx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- dma_ch_name);
+ dma_ch_name);
if (!dma_res) {
dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
status = -ENODEV;
@@ -1248,7 +1271,7 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev,
- "pins are not configured from the driver\n");
+ "pins are not configured from the driver\n");
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
@@ -1272,7 +1295,7 @@ free_master:
return status;
}
-static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
+static int omap2_mcspi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi *mcspi;
@@ -1341,7 +1364,7 @@ static struct platform_driver omap2_mcspi_driver = {
.of_match_table = omap_mcspi_of_match,
},
.probe = omap2_mcspi_probe,
- .remove = __devexit_p(omap2_mcspi_remove),
+ .remove = omap2_mcspi_remove,
};
module_platform_driver(omap2_mcspi_driver);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b17c09cf0a05..66a5f82cf138 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -32,8 +32,12 @@
#define ORION_SPI_DATA_IN_REG 0x0c
#define ORION_SPI_INT_CAUSE_REG 0x10
+#define ORION_SPI_MODE_CPOL (1 << 11)
+#define ORION_SPI_MODE_CPHA (1 << 12)
#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
+ ORION_SPI_MODE_CPHA)
struct orion_spi {
struct spi_master *master;
@@ -123,6 +127,23 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
return 0;
}
+static void
+orion_spi_mode_set(struct spi_device *spi)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg &= ~ORION_SPI_MODE_MASK;
+ if (spi->mode & SPI_CPOL)
+ reg |= ORION_SPI_MODE_CPOL;
+ if (spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_MODE_CPHA;
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+}
+
/*
* called only when no transfer is active on the bus
*/
@@ -142,6 +163,8 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if ((t != NULL) && t->bits_per_word)
bits_per_word = t->bits_per_word;
+ orion_spi_mode_set(spi);
+
rc = orion_spi_baudrate_set(spi, speed);
if (rc)
return rc;
@@ -343,7 +366,7 @@ msg_done:
return 0;
}
-static int __init orion_spi_reset(struct orion_spi *orion_spi)
+static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
orion_spi_set_cs(orion_spi, 0);
@@ -373,7 +396,7 @@ static int orion_spi_setup(struct spi_device *spi)
return 0;
}
-static int __init orion_spi_probe(struct platform_device *pdev)
+static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct orion_spi *spi;
@@ -399,7 +422,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
}
/* we support only mode 0, and no options */
- master->mode_bits = 0;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
@@ -456,7 +479,7 @@ out:
}
-static int __exit orion_spi_remove(struct platform_device *pdev)
+static int orion_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct resource *r;
@@ -478,7 +501,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:" DRIVER_NAME);
-static const struct of_device_id orion_spi_of_match_table[] __devinitdata = {
+static const struct of_device_id orion_spi_of_match_table[] = {
{ .compatible = "marvell,orion-spi", },
{}
};
@@ -490,20 +513,11 @@ static struct platform_driver orion_spi_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(orion_spi_of_match_table),
},
- .remove = __exit_p(orion_spi_remove),
+ .probe = orion_spi_probe,
+ .remove = orion_spi_remove,
};
-static int __init orion_spi_init(void)
-{
- return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
-}
-module_init(orion_spi_init);
-
-static void __exit orion_spi_exit(void)
-{
- platform_driver_unregister(&orion_spi_driver);
-}
-module_exit(orion_spi_exit);
+module_platform_driver(orion_spi_driver);
MODULE_DESCRIPTION("Orion SPI driver");
MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index a1db91a99b89..b0fe393c882c 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -371,6 +371,7 @@ struct pl022 {
/* Two optional pin states - default & sleep */
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_idle;
struct pinctrl_state *pins_sleep;
struct spi_master *master;
struct pl022_ssp_controller *master_info;
@@ -1088,7 +1089,7 @@ err_alloc_rx_sg:
return -ENOMEM;
}
-static int __devinit pl022_dma_probe(struct pl022 *pl022)
+static int pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
@@ -2057,8 +2058,7 @@ pl022_platform_data_dt_get(struct device *dev)
return pd;
}
-static int __devinit
-pl022_probe(struct amba_device *adev, const struct amba_id *id)
+static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
@@ -2116,6 +2116,11 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
} else
dev_err(dev, "could not get default pinstate\n");
+ pl022->pins_idle = pinctrl_lookup_state(pl022->pinctrl,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(pl022->pins_idle))
+ dev_dbg(dev, "could not get idle pinstate\n");
+
pl022->pins_sleep = pinctrl_lookup_state(pl022->pinctrl,
PINCTRL_STATE_SLEEP);
if (IS_ERR(pl022->pins_sleep))
@@ -2246,10 +2251,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_set_autosuspend_delay(dev,
platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put_autosuspend(dev);
- } else {
- pm_runtime_put(dev);
}
+ pm_runtime_put(dev);
+
return 0;
err_spi_register:
@@ -2270,7 +2274,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
return status;
}
-static int __devexit
+static int
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
@@ -2303,35 +2307,47 @@ pl022_remove(struct amba_device *adev)
* the runtime counterparts to handle external resources like
* clocks, pins and regulators when going to sleep.
*/
-static void pl022_suspend_resources(struct pl022 *pl022)
+static void pl022_suspend_resources(struct pl022 *pl022, bool runtime)
{
int ret;
+ struct pinctrl_state *pins_state;
clk_disable(pl022->clk);
+ pins_state = runtime ? pl022->pins_idle : pl022->pins_sleep;
/* Optionally let pins go into sleep states */
- if (!IS_ERR(pl022->pins_sleep)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_sleep);
+ if (!IS_ERR(pins_state)) {
+ ret = pinctrl_select_state(pl022->pinctrl, pins_state);
if (ret)
- dev_err(&pl022->adev->dev,
- "could not set pins to sleep state\n");
+ dev_err(&pl022->adev->dev, "could not set %s pins\n",
+ runtime ? "idle" : "sleep");
}
}
-static void pl022_resume_resources(struct pl022 *pl022)
+static void pl022_resume_resources(struct pl022 *pl022, bool runtime)
{
int ret;
/* Optionaly enable pins to be muxed in and configured */
+ /* First go to the default state */
if (!IS_ERR(pl022->pins_default)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_default);
+ ret = pinctrl_select_state(pl022->pinctrl, pl022->pins_default);
if (ret)
dev_err(&pl022->adev->dev,
"could not set default pins\n");
}
+ if (!runtime) {
+ /* Then let's idle the pins until the next transfer happens */
+ if (!IS_ERR(pl022->pins_idle)) {
+ ret = pinctrl_select_state(pl022->pinctrl,
+ pl022->pins_idle);
+ if (ret)
+ dev_err(&pl022->adev->dev,
+ "could not set idle pins\n");
+ }
+ }
+
clk_enable(pl022->clk);
}
#endif
@@ -2347,7 +2363,9 @@ static int pl022_suspend(struct device *dev)
dev_warn(dev, "cannot suspend master\n");
return ret;
}
- pl022_suspend_resources(pl022);
+
+ pm_runtime_get_sync(dev);
+ pl022_suspend_resources(pl022, false);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2358,7 +2376,8 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022);
+ pl022_resume_resources(pl022, false);
+ pm_runtime_put(dev);
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2376,7 +2395,7 @@ static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022);
+ pl022_suspend_resources(pl022, true);
return 0;
}
@@ -2384,7 +2403,7 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022);
+ pl022_resume_resources(pl022, true);
return 0;
}
#endif
@@ -2464,7 +2483,7 @@ static struct amba_driver pl022_driver = {
},
.id_table = pl022_ids,
.probe = pl022_probe,
- .remove = __devexit_p(pl022_remove),
+ .remove = pl022_remove,
};
static int __init pl022_init(void)
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 7a85f22b6474..357f183a4fb7 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -389,7 +389,7 @@ static void free_gpios(struct ppc4xx_spi *hw)
/*
* platform_device layer stuff...
*/
-static int __init spi_ppc4xx_of_probe(struct platform_device *op)
+static int spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
struct spi_master *master;
@@ -419,7 +419,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
* This includes both "null" gpio's and real ones.
*/
num_gpios = of_gpio_count(np);
- if (num_gpios) {
+ if (num_gpios > 0) {
int i;
hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
@@ -471,7 +471,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
/* this many pins in all GPIO controllers */
- bbp->master->num_chipselect = num_gpios;
+ bbp->master->num_chipselect = num_gpios > 0 ? num_gpios : 0;
/* Get the clock for the OPB */
opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
@@ -560,7 +560,7 @@ free_master:
return ret;
}
-static int __exit spi_ppc4xx_of_remove(struct platform_device *op)
+static int spi_ppc4xx_of_remove(struct platform_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct ppc4xx_spi *hw = spi_master_get_devdata(master);
@@ -583,7 +583,7 @@ MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
static struct platform_driver spi_ppc4xx_of_driver = {
.probe = spi_ppc4xx_of_probe,
- .remove = __exit_p(spi_ppc4xx_of_remove),
+ .remove = spi_ppc4xx_of_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
new file mode 100644
index 000000000000..c735c5a008a2
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -0,0 +1,392 @@
+/*
+ * PXA2xx SPI DMA engine support.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include "spi-pxa2xx.h"
+
+static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ int i, nents, len = drv_data->len;
+ struct scatterlist *sg;
+ struct device *dmadev;
+ struct sg_table *sgt;
+ void *buf, *pbuf;
+
+ /*
+ * Some DMA controllers have problems transferring buffers that are
+ * not multiple of 4 bytes. So we truncate the transfer so that it
+ * is suitable for such controllers, and handle the trailing bytes
+ * manually after the DMA completes.
+ *
+ * REVISIT: It would be better if this information could be
+ * retrieved directly from the DMA device in a similar way than
+ * ->copy_align etc. is done.
+ */
+ len = ALIGN(drv_data->len, 4);
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ buf = drv_data->tx;
+ drv_data->tx_map_len = len;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ buf = drv_data->rx;
+ drv_data->rx_map_len = len;
+ }
+
+ nents = DIV_ROUND_UP(len, SZ_2K);
+ if (nents != sgt->nents) {
+ int ret;
+
+ sg_free_table(sgt);
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, SZ_2K);
+
+ if (buf)
+ sg_set_buf(sg, pbuf, bytes);
+ else
+ sg_set_buf(sg, drv_data->dummy, bytes);
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return -ENOMEM;
+
+ return nents;
+}
+
+static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ struct device *dmadev;
+ struct sg_table *sgt;
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ }
+
+ dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ if (!drv_data->dma_mapped)
+ return;
+
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+
+ drv_data->dma_mapped = 0;
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
+ bool error)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /*
+ * It is possible that one CPU is handling ROR interrupt and other
+ * just gets DMA completion. Calling pump_transfers() twice for the
+ * same transfer leads to problems thus we prevent concurrent calls
+ * by using ->dma_running.
+ */
+ if (atomic_dec_and_test(&drv_data->dma_running)) {
+ void __iomem *reg = drv_data->ioaddr;
+
+ /*
+ * If the other CPU is still handling the ROR interrupt we
+ * might not know about the error yet. So we re-check the
+ * ROR bit here before we clear the status register.
+ */
+ if (!error) {
+ u32 status = read_SSSR(reg) & drv_data->mask_sr;
+ error = status & SSSR_ROR;
+ }
+
+ /* Clear status & disable interrupts */
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+
+ if (!error) {
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ /* Handle the last bytes of unaligned transfer */
+ drv_data->tx += drv_data->tx_map_len;
+ drv_data->write(drv_data);
+
+ drv_data->rx += drv_data->rx_map_len;
+ drv_data->read(drv_data);
+
+ msg->actual_length += drv_data->len;
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+ } else {
+ /* In case we got an error we disable the SSP now */
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+
+ msg->state = ERROR_STATE;
+ }
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+}
+
+static void pxa2xx_spi_dma_callback(void *data)
+{
+ pxa2xx_spi_dma_transfer_complete(data, false);
+}
+
+static struct dma_async_tx_descriptor *
+pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
+ enum dma_transfer_direction dir)
+{
+ struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct chip_data *chip = drv_data->cur_chip;
+ enum dma_slave_buswidth width;
+ struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+ int nents, ret;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 2:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = drv_data->ssdr_physical;
+ cfg.dst_addr_width = width;
+ cfg.dst_maxburst = chip->dma_burst_size;
+ cfg.slave_id = pdata->tx_slave_id;
+
+ sgt = &drv_data->tx_sgt;
+ nents = drv_data->tx_nents;
+ chan = drv_data->tx_chan;
+ } else {
+ cfg.src_addr = drv_data->ssdr_physical;
+ cfg.src_addr_width = width;
+ cfg.src_maxburst = chip->dma_burst_size;
+ cfg.slave_id = pdata->rx_slave_id;
+
+ sgt = &drv_data->rx_sgt;
+ nents = drv_data->rx_nents;
+ chan = drv_data->rx_chan;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
+ return NULL;
+ }
+
+ return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
+static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param)
+{
+ const struct pxa2xx_spi_master *pdata = param;
+
+ return chan->chan_id == pdata->tx_chan_id ||
+ chan->chan_id == pdata->rx_chan_id;
+}
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ return len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ const struct chip_data *chip = drv_data->cur_chip;
+ int ret;
+
+ if (!chip->enable_dma)
+ return 0;
+
+ /* Don't bother with DMA if we can't do even a single burst */
+ if (drv_data->len < chip->dma_burst_size)
+ return 0;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
+ if (ret <= 0) {
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
+ return 0;
+ }
+
+ drv_data->tx_nents = ret;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ if (ret <= 0) {
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
+ return 0;
+ }
+
+ drv_data->rx_nents = ret;
+ return 1;
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 status;
+
+ status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
+ if (status & SSSR_ROR) {
+ dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
+
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dmaengine_terminate_all(drv_data->tx_chan);
+
+ pxa2xx_spi_dma_transfer_complete(drv_data, true);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+
+ tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
+ if (!tx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA TX descriptor\n");
+ return -EBUSY;
+ }
+
+ rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
+ if (!rx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA RX descriptor\n");
+ return -EBUSY;
+ }
+
+ /* We are ready when RX completes */
+ rx_desc->callback = pxa2xx_spi_dma_callback;
+ rx_desc->callback_param = drv_data;
+
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ dma_async_issue_pending(drv_data->rx_chan);
+ dma_async_issue_pending(drv_data->tx_chan);
+
+ atomic_set(&drv_data->dma_running, 1);
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL);
+ if (!drv_data->dummy)
+ return -ENOMEM;
+
+ drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
+ pdata);
+ if (!drv_data->tx_chan)
+ return -ENODEV;
+
+ drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
+ pdata);
+ if (!drv_data->rx_chan) {
+ dma_release_channel(drv_data->tx_chan);
+ drv_data->tx_chan = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ if (drv_data->rx_chan) {
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dma_release_channel(drv_data->rx_chan);
+ sg_free_table(&drv_data->rx_sgt);
+ drv_data->rx_chan = NULL;
+ }
+ if (drv_data->tx_chan) {
+ dmaengine_terminate_all(drv_data->tx_chan);
+ dma_release_channel(drv_data->tx_chan);
+ sg_free_table(&drv_data->tx_sgt);
+ drv_data->tx_chan = NULL;
+ }
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+
+ /*
+ * If the DMA burst size is given in chip_info we use that,
+ * otherwise we use the default. Also we use the default FIFO
+ * thresholds for now.
+ */
+ *burst_code = chip_info ? chip_info->dma_burst_size : 16;
+ *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
+ | SSCR1_TxTresh(TX_THRESH_DFLT);
+
+ return 0;
+}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 9f6ba34b172c..364964d2ed04 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -8,147 +8,58 @@
#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
-struct ce4100_info {
- struct ssp_device ssp;
- struct platform_device *spi_pdev;
-};
-
-static DEFINE_MUTEX(ssp_lock);
-static LIST_HEAD(ssp_list);
-
-struct ssp_device *pxa_ssp_request(int port, const char *label)
-{
- struct ssp_device *ssp = NULL;
-
- mutex_lock(&ssp_lock);
-
- list_for_each_entry(ssp, &ssp_list, node) {
- if (ssp->port_id == port && ssp->use_count == 0) {
- ssp->use_count++;
- ssp->label = label;
- break;
- }
- }
-
- mutex_unlock(&ssp_lock);
-
- if (&ssp->node == &ssp_list)
- return NULL;
-
- return ssp;
-}
-EXPORT_SYMBOL_GPL(pxa_ssp_request);
-
-void pxa_ssp_free(struct ssp_device *ssp)
-{
- mutex_lock(&ssp_lock);
- if (ssp->use_count) {
- ssp->use_count--;
- ssp->label = NULL;
- } else
- dev_err(&ssp->pdev->dev, "device already free\n");
- mutex_unlock(&ssp_lock);
-}
-EXPORT_SYMBOL_GPL(pxa_ssp_free);
-
-static int __devinit ce4100_spi_probe(struct pci_dev *dev,
+static int ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
+ struct platform_device_info pi;
int ret;
- resource_size_t phys_beg;
- resource_size_t phys_len;
- struct ce4100_info *spi_info;
struct platform_device *pdev;
struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret)
return ret;
- phys_beg = pci_resource_start(dev, 0);
- phys_len = pci_resource_len(dev, 0);
-
- if (!request_mem_region(phys_beg, phys_len,
- "CE4100 SPI")) {
- dev_err(&dev->dev, "Can't request register space.\n");
- ret = -EBUSY;
+ ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
+ if (!ret)
return ret;
- }
- pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
- spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
- if (!pdev || !spi_info ) {
- ret = -ENOMEM;
- goto err_nomem;
- }
memset(&spi_pdata, 0, sizeof(spi_pdata));
spi_pdata.num_chipselect = dev->devfn;
- ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
- if (ret)
- goto err_nomem;
-
- pdev->dev.parent = &dev->dev;
- pdev->dev.of_node = dev->dev.of_node;
- ssp = &spi_info->ssp;
+ ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
- ssp->mmio_base = ioremap(phys_beg, phys_len);
+ ssp->mmio_base = pcim_iomap_table(dev)[0];
if (!ssp->mmio_base) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -EIO;
- goto err_nomem;
+ dev_err(&dev->dev, "failed to ioremap() registers\n");
+ return -EIO;
}
ssp->irq = dev->irq;
- ssp->port_id = pdev->id;
+ ssp->port_id = dev->devfn;
ssp->type = PXA25x_SSP;
- mutex_lock(&ssp_lock);
- list_add(&ssp->node, &ssp_list);
- mutex_unlock(&ssp_lock);
+ memset(&pi, 0, sizeof(pi));
+ pi.parent = &dev->dev;
+ pi.name = "pxa2xx-spi";
+ pi.id = ssp->port_id;
+ pi.data = &spi_pdata;
+ pi.size_data = sizeof(spi_pdata);
- pci_set_drvdata(dev, spi_info);
+ pdev = platform_device_register_full(&pi);
+ if (!pdev)
+ return -ENOMEM;
- ret = platform_device_add(pdev);
- if (ret)
- goto err_dev_add;
+ pci_set_drvdata(dev, pdev);
- return ret;
-
-err_dev_add:
- pci_set_drvdata(dev, NULL);
- mutex_lock(&ssp_lock);
- list_del(&ssp->node);
- mutex_unlock(&ssp_lock);
- iounmap(ssp->mmio_base);
-
-err_nomem:
- release_mem_region(phys_beg, phys_len);
- platform_device_put(pdev);
- kfree(spi_info);
- return ret;
+ return 0;
}
-static void __devexit ce4100_spi_remove(struct pci_dev *dev)
+static void ce4100_spi_remove(struct pci_dev *dev)
{
- struct ce4100_info *spi_info;
- struct ssp_device *ssp;
-
- spi_info = pci_get_drvdata(dev);
- ssp = &spi_info->ssp;
- platform_device_unregister(spi_info->spi_pdev);
-
- iounmap(ssp->mmio_base);
- release_mem_region(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
-
- mutex_lock(&ssp_lock);
- list_del(&ssp->node);
- mutex_unlock(&ssp_lock);
+ struct platform_device *pdev = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
- pci_disable_device(dev);
- kfree(spi_info);
+ platform_device_unregister(pdev);
}
static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
@@ -161,7 +72,7 @@ static struct pci_driver ce4100_spi_driver = {
.name = "ce4100_spi",
.id_table = ce4100_spi_devices,
.probe = ce4100_spi_probe,
- .remove = __devexit_p(ce4100_spi_remove),
+ .remove = ce4100_spi_remove,
};
module_pci_driver(ce4100_spi_driver);
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
new file mode 100644
index 000000000000..2916efc7cfe5
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -0,0 +1,490 @@
+/*
+ * PXA2xx SPI private DMA support.
+ *
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include "spi-pxa2xx.h"
+
+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ /* Try to map dma buffer and do a dma transfer if successful, but
+ * only if the length is non-zero and less than MAX_DMA_LEN.
+ *
+ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
+ * of PIO instead. Care is needed above because the transfer may
+ * have have been passed with buffers that are already dma mapped.
+ * A zero-length transfer in PIO mode will not try to write/read
+ * to/from the buffers
+ *
+ * REVISIT large transfers are exactly where we most want to be
+ * using DMA. If this happens much, split those transfers into
+ * multiple DMA segments rather than forcing PIO.
+ */
+ return len > 0 && len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct device *dev = &msg->spi->dev;
+
+ if (!drv_data->cur_chip->enable_dma)
+ return 0;
+
+ if (msg->is_dma_mapped)
+ return drv_data->rx_dma && drv_data->tx_dma;
+
+ if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
+ return 0;
+
+ /* Modify setup if rx buffer is null */
+ if (drv_data->rx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->rx = drv_data->null_dma_buf;
+ drv_data->rx_map_len = 4;
+ } else
+ drv_data->rx_map_len = drv_data->len;
+
+
+ /* Modify setup if tx buffer is null */
+ if (drv_data->tx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->tx = drv_data->null_dma_buf;
+ drv_data->tx_map_len = 4;
+ } else
+ drv_data->tx_map_len = drv_data->len;
+
+ /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
+ * so we flush the cache *before* invalidating it, in case
+ * the tx and rx buffers overlap.
+ */
+ drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, drv_data->tx_dma))
+ return 0;
+
+ /* Stream map the rx buffer */
+ drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, drv_data->rx_dma)) {
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ struct device *dev;
+
+ if (!drv_data->dma_mapped)
+ return;
+
+ if (!drv_data->cur_msg->is_dma_mapped) {
+ dev = &drv_data->cur_msg->spi->dev;
+ dma_unmap_single(dev, drv_data->rx_dma,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ }
+
+ drv_data->dma_mapped = 0;
+}
+
+static int wait_ssp_rx_stall(void const __iomem *ioaddr)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static int wait_dma_channel_stop(int channel)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
+ const char *msg)
+{
+ void __iomem *reg = drv_data->ioaddr;
+
+ /* Stop and reset */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+ pxa2xx_spi_flush(drv_data);
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ dev_err(&drv_data->pdev->dev, "%s\n", msg);
+
+ drv_data->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
+{
+ void __iomem *reg = drv_data->ioaddr;
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+
+ if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: dma rx channel stop failed\n");
+
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: ssp rx stall failed\n");
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ /* update the buffer pointer for the amount completed in dma */
+ drv_data->rx += drv_data->len -
+ (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
+
+ /* read trailing data from fifo, it does not matter how many
+ * bytes are in the fifo just read until buffer is full
+ * or fifo is empty, which ever occurs first */
+ drv_data->read(drv_data);
+
+ /* return count of what was actually read */
+ msg->actual_length += drv_data->len -
+ (drv_data->rx_end - drv_data->rx);
+
+ /* Transfer delays and chip select release are
+ * handled in pump_transfers or giveback
+ */
+
+ /* Move to next transfer */
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+void pxa2xx_spi_dma_handler(int channel, void *data)
+{
+ struct driver_data *drv_data = data;
+ u32 irq_status = DCSR(channel) & DMA_INT_MASK;
+
+ if (irq_status & DCSR_BUSERR) {
+
+ if (channel == drv_data->tx_channel)
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on tx channel");
+ else
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on rx channel");
+ return;
+ }
+
+ /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
+ if ((channel == drv_data->tx_channel)
+ && (irq_status & DCSR_ENDINTR)
+ && (drv_data->ssp_type == PXA25x_SSP)) {
+
+ /* Wait for rx to stall */
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: ssp rx stall failed\n");
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+ }
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 irq_status;
+ void __iomem *reg = drv_data->ioaddr;
+
+ irq_status = read_SSSR(reg) & drv_data->mask_sr;
+ if (irq_status & SSSR_ROR) {
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ /* Check for false positive timeout */
+ if ((irq_status & SSSR_TINT)
+ && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
+ write_SSSR(SSSR_TINT, reg);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
+
+ /* Clear and disable timeout interrupt, do the rest in
+ * dma_transfer_complete */
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Opps problem detected */
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ u32 dma_width;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ dma_width = DCMD_WIDTH1;
+ break;
+ case 2:
+ dma_width = DCMD_WIDTH2;
+ break;
+ default:
+ dma_width = DCMD_WIDTH4;
+ break;
+ }
+
+ /* Setup rx DMA Channel */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
+ DTADR(drv_data->rx_channel) = drv_data->rx_dma;
+ if (drv_data->rx == drv_data->null_dma_buf)
+ /* No target address increment */
+ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
+ | DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Setup tx DMA Channel */
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->tx_channel) = drv_data->tx_dma;
+ DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
+ if (drv_data->tx == drv_data->null_dma_buf)
+ /* No source address increment */
+ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
+ | DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Enable dma end irqs on SSP to detect end of transfer */
+ if (drv_data->ssp_type == PXA25x_SSP)
+ DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ DCSR(drv_data->rx_channel) |= DCSR_RUN;
+ DCSR(drv_data->tx_channel) |= DCSR_RUN;
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct device *dev = &drv_data->pdev->dev;
+ struct ssp_device *ssp = drv_data->ssp;
+
+ /* Get two DMA channels (rx and tx) */
+ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
+ DMA_PRIO_HIGH,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->rx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting rx channel\n",
+ drv_data->rx_channel);
+ return -ENODEV;
+ }
+ drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
+ DMA_PRIO_MEDIUM,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->tx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting tx channel\n",
+ drv_data->tx_channel);
+ pxa_free_dma(drv_data->rx_channel);
+ return -ENODEV;
+ }
+
+ DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
+ DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ struct ssp_device *ssp = drv_data->ssp;
+
+ DRCMR(ssp->drcmr_rx) = 0;
+ DRCMR(ssp->drcmr_tx) = 0;
+
+ if (drv_data->tx_channel != 0)
+ pxa_free_dma(drv_data->tx_channel);
+ if (drv_data->rx_channel != 0)
+ pxa_free_dma(drv_data->rx_channel);
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+ if (drv_data->rx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_rx) =
+ DRCMR_MAPVLD | drv_data->rx_channel;
+ if (drv_data->tx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_tx) =
+ DRCMR_MAPVLD | drv_data->tx_channel;
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info =
+ (struct pxa2xx_spi_chip *)spi->controller_data;
+ int bytes_per_word;
+ int burst_bytes;
+ int thresh_words;
+ int req_burst_size;
+ int retval = 0;
+
+ /* Set the threshold (in registers) to equal the same amount of data
+ * as represented by burst size (in bytes). The computation below
+ * is (burst_size rounded up to nearest 8 byte, word or long word)
+ * divided by (bytes/register); the tx threshold is the inverse of
+ * the rx, so that there will always be enough data in the rx fifo
+ * to satisfy a burst, and there will always be enough space in the
+ * tx fifo to accept a burst (a tx burst will overwrite the fifo if
+ * there is not enough space), there must always remain enough empty
+ * space in the rx fifo for any data loaded to the tx fifo.
+ * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
+ * will be 8, or half the fifo;
+ * The threshold can only be set to 2, 4 or 8, but not 16, because
+ * to burst 16 to the tx fifo, the fifo would have to be empty;
+ * however, the minimum fifo trigger level is 1, and the tx will
+ * request service when the fifo is at this level, with only 15 spaces.
+ */
+
+ /* find bytes/word */
+ if (bits_per_word <= 8)
+ bytes_per_word = 1;
+ else if (bits_per_word <= 16)
+ bytes_per_word = 2;
+ else
+ bytes_per_word = 4;
+
+ /* use struct pxa2xx_spi_chip->dma_burst_size if available */
+ if (chip_info)
+ req_burst_size = chip_info->dma_burst_size;
+ else {
+ switch (chip->dma_burst_size) {
+ default:
+ /* if the default burst size is not set,
+ * do it now */
+ chip->dma_burst_size = DCMD_BURST8;
+ case DCMD_BURST8:
+ req_burst_size = 8;
+ break;
+ case DCMD_BURST16:
+ req_burst_size = 16;
+ break;
+ case DCMD_BURST32:
+ req_burst_size = 32;
+ break;
+ }
+ }
+ if (req_burst_size <= 8) {
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ } else if (req_burst_size <= 16) {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ }
+ } else {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else if (bytes_per_word == 2) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST32;
+ burst_bytes = 32;
+ }
+ }
+
+ thresh_words = burst_bytes / bytes_per_word;
+
+ /* thresh_words will be between 2 and 8 */
+ *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
+ | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
+
+ return retval;
+}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33f..90b27a3508a6 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,17 +25,20 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
+#include "spi-pxa2xx.h"
MODULE_AUTHOR("Stephen Street");
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -45,12 +49,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
-#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
-#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
-#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
-#define MAX_DMA_LEN 8191
-#define DMA_ALIGNMENT 8
-
/*
* for testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the pxa270 developer
@@ -65,115 +63,97 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
-#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void const __iomem *p) \
-{ return __raw_readl(p + (off)); } \
-\
-static inline void write_##reg(u32 v, void __iomem *p) \
-{ __raw_writel(v, p + (off)); }
-
-DEFINE_SSP_REG(SSCR0, 0x00)
-DEFINE_SSP_REG(SSCR1, 0x04)
-DEFINE_SSP_REG(SSSR, 0x08)
-DEFINE_SSP_REG(SSITR, 0x0c)
-DEFINE_SSP_REG(SSDR, 0x10)
-DEFINE_SSP_REG(SSTO, 0x28)
-DEFINE_SSP_REG(SSPSP, 0x2c)
-
-#define START_STATE ((void*)0)
-#define RUNNING_STATE ((void*)1)
-#define DONE_STATE ((void*)2)
-#define ERROR_STATE ((void*)-1)
-
-#define QUEUE_RUNNING 0
-#define QUEUE_STOPPED 1
-
-struct driver_data {
- /* Driver model hookup */
- struct platform_device *pdev;
-
- /* SSP Info */
- struct ssp_device *ssp;
+#define LPSS_RX_THRESH_DFLT 64
+#define LPSS_TX_LOTHRESH_DFLT 160
+#define LPSS_TX_HITHRESH_DFLT 224
- /* SPI framework hookup */
- enum pxa_ssp_type ssp_type;
- struct spi_master *master;
+/* Offset from drv_data->lpss_base */
+#define SPI_CS_CONTROL 0x18
+#define SPI_CS_CONTROL_SW_MODE BIT(0)
+#define SPI_CS_CONTROL_CS_HIGH BIT(1)
- /* PXA hookup */
- struct pxa2xx_spi_master *master_info;
-
- /* DMA setup stuff */
- int rx_channel;
- int tx_channel;
- u32 *null_dma_buf;
-
- /* SSP register addresses */
- void __iomem *ioaddr;
- u32 ssdr_physical;
-
- /* SSP masks*/
- u32 dma_cr1;
- u32 int_cr1;
- u32 clear_sr;
- u32 mask_sr;
-
- /* Driver message queue */
- struct workqueue_struct *workqueue;
- struct work_struct pump_messages;
- spinlock_t lock;
- struct list_head queue;
- int busy;
- int run;
-
- /* Message Transfer pump */
- struct tasklet_struct pump_transfers;
-
- /* Current message transfer state info */
- struct spi_message* cur_msg;
- struct spi_transfer* cur_transfer;
- struct chip_data *cur_chip;
- size_t len;
- void *tx;
- void *tx_end;
- void *rx;
- void *rx_end;
- int dma_mapped;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
- size_t rx_map_len;
- size_t tx_map_len;
- u8 n_bytes;
- u32 dma_width;
- int (*write)(struct driver_data *drv_data);
- int (*read)(struct driver_data *drv_data);
- irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
-};
+static bool is_lpss_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == LPSS_SSP;
+}
-struct chip_data {
- u32 cr0;
- u32 cr1;
- u32 psp;
- u32 timeout;
- u8 n_bytes;
- u32 dma_width;
- u32 dma_burst_size;
- u32 threshold;
- u32 dma_threshold;
- u8 enable_dma;
- u8 bits_per_word;
- u32 speed_hz;
- union {
- int gpio_cs;
- unsigned int frm;
- };
- int gpio_cs_inverted;
- int (*write)(struct driver_data *drv_data);
- int (*read)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
-};
+/*
+ * Read and write LPSS SSP private registers. Caller must first check that
+ * is_lpss_ssp() returns true before these can be called.
+ */
+static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
+{
+ WARN_ON(!drv_data->lpss_base);
+ return readl(drv_data->lpss_base + offset);
+}
-static void pump_messages(struct work_struct *work);
+static void __lpss_ssp_write_priv(struct driver_data *drv_data,
+ unsigned offset, u32 value)
+{
+ WARN_ON(!drv_data->lpss_base);
+ writel(value, drv_data->lpss_base + offset);
+}
+
+/*
+ * lpss_ssp_setup - perform LPSS SSP specific setup
+ * @drv_data: pointer to the driver private data
+ *
+ * Perform LPSS SSP specific setup. This function must be called first if
+ * one is going to use LPSS SSP private registers.
+ */
+static void lpss_ssp_setup(struct driver_data *drv_data)
+{
+ unsigned offset = 0x400;
+ u32 value, orig;
+
+ if (!is_lpss_ssp(drv_data))
+ return;
+
+ /*
+ * Perform auto-detection of the LPSS SSP private registers. They
+ * can be either at 1k or 2k offset from the base address.
+ */
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ value = orig | SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+ value &= ~SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != orig) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+detection_done:
+ /* Now set the LPSS base */
+ drv_data->lpss_base = drv_data->ioaddr + offset;
+
+ /* Enable software chip select control */
+ value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+}
+
+static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
+{
+ u32 value;
+
+ if (!is_lpss_ssp(drv_data))
+ return;
+
+ value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
+ if (enable)
+ value &= ~SPI_CS_CONTROL_CS_HIGH;
+ else
+ value |= SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+}
static void cs_assert(struct driver_data *drv_data)
{
@@ -189,8 +169,12 @@ static void cs_assert(struct driver_data *drv_data)
return;
}
- if (gpio_is_valid(chip->gpio_cs))
+ if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
+ return;
+ }
+
+ lpss_ssp_cs_control(drv_data, true);
}
static void cs_deassert(struct driver_data *drv_data)
@@ -205,30 +189,15 @@ static void cs_deassert(struct driver_data *drv_data)
return;
}
- if (gpio_is_valid(chip->gpio_cs))
+ if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
-}
-
-static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
-{
- void __iomem *reg = drv_data->ioaddr;
-
- if (drv_data->ssp_type == CE4100_SSP)
- val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
-
- write_SSSR(val, reg);
-}
+ return;
+ }
-static int pxa25x_ssp_comp(struct driver_data *drv_data)
-{
- if (drv_data->ssp_type == PXA25x_SSP)
- return 1;
- if (drv_data->ssp_type == CE4100_SSP)
- return 1;
- return 0;
+ lpss_ssp_cs_control(drv_data, false);
}
-static int flush(struct driver_data *drv_data)
+int pxa2xx_spi_flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
@@ -354,7 +323,7 @@ static int u32_reader(struct driver_data *drv_data)
return drv_data->rx == drv_data->rx_end;
}
-static void *next_transfer(struct driver_data *drv_data)
+void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct spi_transfer *trans = drv_data->cur_transfer;
@@ -370,89 +339,15 @@ static void *next_transfer(struct driver_data *drv_data)
return DONE_STATE;
}
-static int map_dma_buffers(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- struct device *dev = &msg->spi->dev;
-
- if (!drv_data->cur_chip->enable_dma)
- return 0;
-
- if (msg->is_dma_mapped)
- return drv_data->rx_dma && drv_data->tx_dma;
-
- if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
- return 0;
-
- /* Modify setup if rx buffer is null */
- if (drv_data->rx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->rx = drv_data->null_dma_buf;
- drv_data->rx_map_len = 4;
- } else
- drv_data->rx_map_len = drv_data->len;
-
-
- /* Modify setup if tx buffer is null */
- if (drv_data->tx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->tx = drv_data->null_dma_buf;
- drv_data->tx_map_len = 4;
- } else
- drv_data->tx_map_len = drv_data->len;
-
- /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
- * so we flush the cache *before* invalidating it, in case
- * the tx and rx buffers overlap.
- */
- drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, drv_data->tx_dma))
- return 0;
-
- /* Stream map the rx buffer */
- drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, drv_data->rx_dma)) {
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- return 0;
- }
-
- return 1;
-}
-
-static void unmap_dma_buffers(struct driver_data *drv_data)
-{
- struct device *dev;
-
- if (!drv_data->dma_mapped)
- return;
-
- if (!drv_data->cur_msg->is_dma_mapped) {
- dev = &drv_data->cur_msg->spi->dev;
- dma_unmap_single(dev, drv_data->rx_dma,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- }
-
- drv_data->dma_mapped = 0;
-}
-
/* caller already set message->status; dma and pio irqs are blocked */
static void giveback(struct driver_data *drv_data)
{
struct spi_transfer* last_transfer;
- unsigned long flags;
struct spi_message *msg;
- spin_lock_irqsave(&drv_data->lock, flags);
msg = drv_data->cur_msg;
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
- spin_unlock_irqrestore(&drv_data->lock, flags);
last_transfer = list_entry(msg->transfers.prev,
struct spi_transfer,
@@ -481,13 +376,7 @@ static void giveback(struct driver_data *drv_data)
*/
/* get a pointer to the next message, if any */
- spin_lock_irqsave(&drv_data->lock, flags);
- if (list_empty(&drv_data->queue))
- next_msg = NULL;
- else
- next_msg = list_entry(drv_data->queue.next,
- struct spi_message, queue);
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ next_msg = spi_get_next_queued_message(drv_data->master);
/* see if the next and current messages point
* to the same chip
@@ -498,168 +387,10 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- msg->state = NULL;
- if (msg->complete)
- msg->complete(msg->context);
-
+ spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
}
-static int wait_ssp_rx_stall(void const __iomem *ioaddr)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static int wait_dma_channel_stop(int channel)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static void dma_error_stop(struct driver_data *drv_data, const char *msg)
-{
- void __iomem *reg = drv_data->ioaddr;
-
- /* Stop and reset */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
- if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
- flush(drv_data);
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
-
- unmap_dma_buffers(drv_data);
-
- dev_err(&drv_data->pdev->dev, "%s\n", msg);
-
- drv_data->cur_msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void dma_transfer_complete(struct driver_data *drv_data)
-{
- void __iomem *reg = drv_data->ioaddr;
- struct spi_message *msg = drv_data->cur_msg;
-
- /* Clear and disable interrupts on SSP and DMA channels*/
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
-
- if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: dma rx channel stop failed\n");
-
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer: ssp rx stall failed\n");
-
- unmap_dma_buffers(drv_data);
-
- /* update the buffer pointer for the amount completed in dma */
- drv_data->rx += drv_data->len -
- (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
-
- /* read trailing data from fifo, it does not matter how many
- * bytes are in the fifo just read until buffer is full
- * or fifo is empty, which ever occurs first */
- drv_data->read(drv_data);
-
- /* return count of what was actually read */
- msg->actual_length += drv_data->len -
- (drv_data->rx_end - drv_data->rx);
-
- /* Transfer delays and chip select release are
- * handled in pump_transfers or giveback
- */
-
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void dma_handler(int channel, void *data)
-{
- struct driver_data *drv_data = data;
- u32 irq_status = DCSR(channel) & DMA_INT_MASK;
-
- if (irq_status & DCSR_BUSERR) {
-
- if (channel == drv_data->tx_channel)
- dma_error_stop(drv_data,
- "dma_handler: "
- "bad bus address on tx channel");
- else
- dma_error_stop(drv_data,
- "dma_handler: "
- "bad bus address on rx channel");
- return;
- }
-
- /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
- if ((channel == drv_data->tx_channel)
- && (irq_status & DCSR_ENDINTR)
- && (drv_data->ssp_type == PXA25x_SSP)) {
-
- /* Wait for rx to stall */
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: ssp rx stall failed\n");
-
- /* finish this transfer, start the next */
- dma_transfer_complete(drv_data);
- }
-}
-
-static irqreturn_t dma_transfer(struct driver_data *drv_data)
-{
- u32 irq_status;
- void __iomem *reg = drv_data->ioaddr;
-
- irq_status = read_SSSR(reg) & drv_data->mask_sr;
- if (irq_status & SSSR_ROR) {
- dma_error_stop(drv_data, "dma_transfer: fifo overrun");
- return IRQ_HANDLED;
- }
-
- /* Check for false positive timeout */
- if ((irq_status & SSSR_TINT)
- && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
- write_SSSR(SSSR_TINT, reg);
- return IRQ_HANDLED;
- }
-
- if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
-
- /* Clear and disable timeout interrupt, do the rest in
- * dma_transfer_complete */
- if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
-
- /* finish this transfer, start the next */
- dma_transfer_complete(drv_data);
-
- return IRQ_HANDLED;
- }
-
- /* Opps problem detected */
- return IRQ_NONE;
-}
-
static void reset_sccr1(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
@@ -681,7 +412,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
- flush(drv_data);
+ pxa2xx_spi_flush(drv_data);
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -709,7 +440,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
*/
/* Move to next transfer */
- drv_data->cur_msg->state = next_transfer(drv_data);
+ drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
@@ -789,10 +520,20 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
void __iomem *reg = drv_data->ioaddr;
- u32 sccr1_reg = read_SSCR1(reg);
+ u32 sccr1_reg;
u32 mask = drv_data->mask_sr;
u32 status;
+ /*
+ * The IRQ might be shared with other peripherals so we must first
+ * check that are we RPM suspended or not. If we are we assume that
+ * the IRQ was not for us (we shouldn't be RPM suspended when the
+ * interrupt is enabled).
+ */
+ if (pm_runtime_suspended(&drv_data->pdev->dev))
+ return IRQ_NONE;
+
+ sccr1_reg = read_SSCR1(reg);
status = read_SSSR(reg);
/* Ignore possible writes if we don't need to write */
@@ -820,106 +561,12 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
return drv_data->transfer_handler(drv_data);
}
-static int set_dma_burst_and_threshold(struct chip_data *chip,
- struct spi_device *spi,
- u8 bits_per_word, u32 *burst_code,
- u32 *threshold)
+static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
- struct pxa2xx_spi_chip *chip_info =
- (struct pxa2xx_spi_chip *)spi->controller_data;
- int bytes_per_word;
- int burst_bytes;
- int thresh_words;
- int req_burst_size;
- int retval = 0;
-
- /* Set the threshold (in registers) to equal the same amount of data
- * as represented by burst size (in bytes). The computation below
- * is (burst_size rounded up to nearest 8 byte, word or long word)
- * divided by (bytes/register); the tx threshold is the inverse of
- * the rx, so that there will always be enough data in the rx fifo
- * to satisfy a burst, and there will always be enough space in the
- * tx fifo to accept a burst (a tx burst will overwrite the fifo if
- * there is not enough space), there must always remain enough empty
- * space in the rx fifo for any data loaded to the tx fifo.
- * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
- * will be 8, or half the fifo;
- * The threshold can only be set to 2, 4 or 8, but not 16, because
- * to burst 16 to the tx fifo, the fifo would have to be empty;
- * however, the minimum fifo trigger level is 1, and the tx will
- * request service when the fifo is at this level, with only 15 spaces.
- */
+ unsigned long ssp_clk = drv_data->max_clk_rate;
+ const struct ssp_device *ssp = drv_data->ssp;
- /* find bytes/word */
- if (bits_per_word <= 8)
- bytes_per_word = 1;
- else if (bits_per_word <= 16)
- bytes_per_word = 2;
- else
- bytes_per_word = 4;
-
- /* use struct pxa2xx_spi_chip->dma_burst_size if available */
- if (chip_info)
- req_burst_size = chip_info->dma_burst_size;
- else {
- switch (chip->dma_burst_size) {
- default:
- /* if the default burst size is not set,
- * do it now */
- chip->dma_burst_size = DCMD_BURST8;
- case DCMD_BURST8:
- req_burst_size = 8;
- break;
- case DCMD_BURST16:
- req_burst_size = 16;
- break;
- case DCMD_BURST32:
- req_burst_size = 32;
- break;
- }
- }
- if (req_burst_size <= 8) {
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- } else if (req_burst_size <= 16) {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- }
- } else {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else if (bytes_per_word == 2) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST32;
- burst_bytes = 32;
- }
- }
-
- thresh_words = burst_bytes / bytes_per_word;
-
- /* thresh_words will be between 2 and 8 */
- *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
- | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
-
- return retval;
-}
-
-static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
-{
- unsigned long ssp_clk = clk_get_rate(ssp->clk);
+ rate = min_t(int, ssp_clk, rate);
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
@@ -934,7 +581,6 @@ static void pump_transfers(unsigned long data)
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
- struct ssp_device *ssp = drv_data->ssp;
void __iomem *reg = drv_data->ioaddr;
u32 clk_div = 0;
u8 bits = 0;
@@ -976,8 +622,8 @@ static void pump_transfers(unsigned long data)
cs_deassert(drv_data);
}
- /* Check for transfers that need multiple DMA segments */
- if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
+ /* Check if we can DMA this transfer */
+ if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
@@ -1000,21 +646,20 @@ static void pump_transfers(unsigned long data)
}
/* Setup the transfer state based on the type of transfer */
- if (flush(drv_data) == 0) {
+ if (pxa2xx_spi_flush(drv_data) == 0) {
dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
message->status = -EIO;
giveback(drv_data);
return;
}
drv_data->n_bytes = chip->n_bytes;
- drv_data->dma_width = chip->dma_width;
drv_data->tx = (void *)transfer->tx_buf;
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
drv_data->rx_dma = transfer->rx_dma;
drv_data->tx_dma = transfer->tx_dma;
- drv_data->len = transfer->len & DCMD_LENGTH;
+ drv_data->len = transfer->len;
drv_data->write = drv_data->tx ? chip->write : null_writer;
drv_data->read = drv_data->rx ? chip->read : null_reader;
@@ -1031,25 +676,22 @@ static void pump_transfers(unsigned long data)
if (transfer->bits_per_word)
bits = transfer->bits_per_word;
- clk_div = ssp_get_clk_div(ssp, speed);
+ clk_div = ssp_get_clk_div(drv_data, speed);
if (bits <= 8) {
drv_data->n_bytes = 1;
- drv_data->dma_width = DCMD_WIDTH1;
drv_data->read = drv_data->read != null_reader ?
u8_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u8_writer : null_writer;
} else if (bits <= 16) {
drv_data->n_bytes = 2;
- drv_data->dma_width = DCMD_WIDTH2;
drv_data->read = drv_data->read != null_reader ?
u16_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u16_writer : null_writer;
} else if (bits <= 32) {
drv_data->n_bytes = 4;
- drv_data->dma_width = DCMD_WIDTH4;
drv_data->read = drv_data->read != null_reader ?
u32_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
@@ -1058,7 +700,8 @@ static void pump_transfers(unsigned long data)
/* if bits/word is changed in dma mode, then must check the
* thresholds and burst also */
if (chip->enable_dma) {
- if (set_dma_burst_and_threshold(chip, message->spi,
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+ message->spi,
bits, &dma_burst,
&dma_thresh))
if (printk_ratelimit())
@@ -1077,70 +720,21 @@ static void pump_transfers(unsigned long data)
message->state = RUNNING_STATE;
- /* Try to map dma buffer and do a dma transfer if successful, but
- * only if the length is non-zero and less than MAX_DMA_LEN.
- *
- * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
- * of PIO instead. Care is needed above because the transfer may
- * have have been passed with buffers that are already dma mapped.
- * A zero-length transfer in PIO mode will not try to write/read
- * to/from the buffers
- *
- * REVISIT large transfers are exactly where we most want to be
- * using DMA. If this happens much, split those transfers into
- * multiple DMA segments rather than forcing PIO.
- */
drv_data->dma_mapped = 0;
- if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
- drv_data->dma_mapped = map_dma_buffers(drv_data);
+ if (pxa2xx_spi_dma_is_possible(drv_data->len))
+ drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
if (drv_data->dma_mapped) {
/* Ensure we have the correct interrupt handler */
- drv_data->transfer_handler = dma_transfer;
-
- /* Setup rx DMA Channel */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
- DTADR(drv_data->rx_channel) = drv_data->rx_dma;
- if (drv_data->rx == drv_data->null_dma_buf)
- /* No target address increment */
- DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
- | DCMD_FLOWSRC
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
-
- /* Setup tx DMA Channel */
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->tx_channel) = drv_data->tx_dma;
- DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
- if (drv_data->tx == drv_data->null_dma_buf)
- /* No source address increment */
- DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
- | DCMD_FLOWTRG
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
-
- /* Enable dma end irqs on SSP to detect end of transfer */
- if (drv_data->ssp_type == PXA25x_SSP)
- DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+ drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
+
+ pxa2xx_spi_dma_prepare(drv_data, dma_burst);
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
write_SSSR(drv_data->clear_sr, reg);
- DCSR(drv_data->rx_channel) |= DCSR_RUN;
- DCSR(drv_data->tx_channel) |= DCSR_RUN;
+
+ pxa2xx_spi_dma_start(drv_data);
} else {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = interrupt_transfer;
@@ -1150,6 +744,13 @@ static void pump_transfers(unsigned long data)
write_SSSR_CS(drv_data, drv_data->clear_sr);
}
+ if (is_lpss_ssp(drv_data)) {
+ if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
+ write_SSIRF(chip->lpss_rx_threshold, reg);
+ if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
+ write_SSITF(chip->lpss_tx_threshold, reg);
+ }
+
/* see if we need to reload the config registers */
if ((read_SSCR0(reg) != cr0)
|| (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
@@ -1176,31 +777,12 @@ static void pump_transfers(unsigned long data)
write_SSCR1(cr1, reg);
}
-static void pump_messages(struct work_struct *work)
+static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct driver_data *drv_data =
- container_of(work, struct driver_data, pump_messages);
- unsigned long flags;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&drv_data->lock, flags);
- if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
- drv_data->busy = 0;
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (drv_data->cur_msg) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Extract head of queue */
- drv_data->cur_msg = list_entry(drv_data->queue.next,
- struct spi_message, queue);
- list_del_init(&drv_data->cur_msg->queue);
+ struct driver_data *drv_data = spi_master_get_devdata(master);
+ drv_data->cur_msg = msg;
/* Initial message state*/
drv_data->cur_msg->state = START_STATE;
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -1213,34 +795,27 @@ static void pump_messages(struct work_struct *work)
/* Mark as busy and launch transfers */
tasklet_schedule(&drv_data->pump_transfers);
-
- drv_data->busy = 1;
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ return 0;
}
-static int transfer(struct spi_device *spi, struct spi_message *msg)
+static int pxa2xx_spi_prepare_transfer(struct spi_master *master)
{
- struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- if (drv_data->run == QUEUE_STOPPED) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -ESHUTDOWN;
- }
-
- msg->actual_length = 0;
- msg->status = -EINPROGRESS;
- msg->state = START_STATE;
+ struct driver_data *drv_data = spi_master_get_devdata(master);
- list_add_tail(&msg->queue, &drv_data->queue);
+ pm_runtime_get_sync(&drv_data->pdev->dev);
+ return 0;
+}
- if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
+{
+ struct driver_data *drv_data = spi_master_get_devdata(master);
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ /* Disable the SSP now */
+ write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
+ drv_data->ioaddr);
+ pm_runtime_mark_last_busy(&drv_data->pdev->dev);
+ pm_runtime_put_autosuspend(&drv_data->pdev->dev);
return 0;
}
@@ -1287,10 +862,18 @@ static int setup(struct spi_device *spi)
struct pxa2xx_spi_chip *chip_info = NULL;
struct chip_data *chip;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- struct ssp_device *ssp = drv_data->ssp;
unsigned int clk_div;
- uint tx_thres = TX_THRESH_DFLT;
- uint rx_thres = RX_THRESH_DFLT;
+ uint tx_thres, tx_hi_thres, rx_thres;
+
+ if (is_lpss_ssp(drv_data)) {
+ tx_thres = LPSS_TX_LOTHRESH_DFLT;
+ tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
+ rx_thres = LPSS_RX_THRESH_DFLT;
+ } else {
+ tx_thres = TX_THRESH_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_DFLT;
+ }
if (!pxa25x_ssp_comp(drv_data)
&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
@@ -1330,8 +913,6 @@ static int setup(struct spi_device *spi)
chip->gpio_cs = -1;
chip->enable_dma = 0;
chip->timeout = TIMOUT_DFLT;
- chip->dma_burst_size = drv_data->master_info->enable_dma ?
- DCMD_BURST8 : 0;
}
/* protocol drivers may change the chip settings, so...
@@ -1345,23 +926,37 @@ static int setup(struct spi_device *spi)
chip->timeout = chip_info->timeout;
if (chip_info->tx_threshold)
tx_thres = chip_info->tx_threshold;
+ if (chip_info->tx_hi_threshold)
+ tx_hi_thres = chip_info->tx_hi_threshold;
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
chip->enable_dma = drv_data->master_info->enable_dma;
chip->dma_threshold = 0;
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
+ } else if (ACPI_HANDLE(&spi->dev)) {
+ /*
+ * Slave devices enumerated from ACPI namespace don't
+ * usually have chip_info but we still might want to use
+ * DMA with them.
+ */
+ chip->enable_dma = drv_data->master_info->enable_dma;
}
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
+ | SSITF_TxHiThresh(tx_hi_thres);
+
/* set dma burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the
* burst and threshold can still respond to changes in bits_per_word */
if (chip->enable_dma) {
/* set up legal burst and threshold for dma */
- if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
+ spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
dev_warn(&spi->dev, "in setup: DMA burst size reduced "
@@ -1369,7 +964,7 @@ static int setup(struct spi_device *spi)
}
}
- clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
+ clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
chip->speed_hz = spi->max_speed_hz;
chip->cr0 = clk_div
@@ -1382,32 +977,32 @@ static int setup(struct spi_device *spi)
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
+ if (spi->mode & SPI_LOOP)
+ chip->cr1 |= SSCR1_LBM;
+
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk)
+ drv_data->max_clk_rate
/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
else
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk) / 2
+ drv_data->max_clk_rate / 2
/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
- chip->dma_width = DCMD_WIDTH1;
chip->read = u8_reader;
chip->write = u8_writer;
} else if (spi->bits_per_word <= 16) {
chip->n_bytes = 2;
- chip->dma_width = DCMD_WIDTH2;
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
- chip->dma_width = DCMD_WIDTH4;
chip->read = u32_reader;
chip->write = u32_writer;
} else {
@@ -1438,95 +1033,100 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __devinit init_queue(struct driver_data *drv_data)
+#ifdef CONFIG_ACPI
+static int pxa2xx_spi_acpi_add_dma(struct acpi_resource *res, void *data)
{
- INIT_LIST_HEAD(&drv_data->queue);
- spin_lock_init(&drv_data->lock);
-
- drv_data->run = QUEUE_STOPPED;
- drv_data->busy = 0;
-
- tasklet_init(&drv_data->pump_transfers,
- pump_transfers, (unsigned long)drv_data);
-
- INIT_WORK(&drv_data->pump_messages, pump_messages);
- drv_data->workqueue = create_singlethread_workqueue(
- dev_name(drv_data->master->dev.parent));
- if (drv_data->workqueue == NULL)
- return -EBUSY;
+ struct pxa2xx_spi_master *pdata = data;
+
+ if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
+ const struct acpi_resource_fixed_dma *dma;
+
+ dma = &res->data.fixed_dma;
+ if (pdata->tx_slave_id < 0) {
+ pdata->tx_slave_id = dma->request_lines;
+ pdata->tx_chan_id = dma->channels;
+ } else if (pdata->rx_slave_id < 0) {
+ pdata->rx_slave_id = dma->request_lines;
+ pdata->rx_chan_id = dma->channels;
+ }
+ }
- return 0;
+ /* Tell the ACPI core to skip this resource */
+ return 1;
}
-static int start_queue(struct driver_data *drv_data)
+static struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
- unsigned long flags;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -EBUSY;
+ struct pxa2xx_spi_master *pdata;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct ssp_device *ssp;
+ struct resource *res;
+ int devid;
+
+ if (!ACPI_HANDLE(&pdev->dev) ||
+ acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ return NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for platform data\n");
+ return NULL;
}
- drv_data->run = QUEUE_RUNNING;
- drv_data->cur_msg = NULL;
- drv_data->cur_transfer = NULL;
- drv_data->cur_chip = NULL;
- spin_unlock_irqrestore(&drv_data->lock, flags);
-
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
- return 0;
-}
+ ssp = &pdata->ssp;
-static int stop_queue(struct driver_data *drv_data)
-{
- unsigned long flags;
- unsigned limit = 500;
- int status = 0;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- /* This is a bit lame, but is optimized for the common execution path.
- * A wait_queue on the drv_data->busy could be used, but then the common
- * execution path (pump_messages) would be required to call wake_up or
- * friends on every SPI message. Do this instead */
- drv_data->run = QUEUE_STOPPED;
- while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- msleep(10);
- spin_lock_irqsave(&drv_data->lock, flags);
+ ssp->phys_base = res->start;
+ ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ssp->mmio_base) {
+ dev_err(&pdev->dev, "failed to ioremap mmio_base\n");
+ return NULL;
}
- if (!list_empty(&drv_data->queue) || drv_data->busy)
- status = -EBUSY;
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ ssp->irq = platform_get_irq(pdev, 0);
+ ssp->type = LPSS_SSP;
+ ssp->pdev = pdev;
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ ssp->port_id = -1;
+ if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
+ ssp->port_id = devid;
- return status;
-}
+ pdata->num_chipselect = 1;
+ pdata->rx_slave_id = -1;
+ pdata->tx_slave_id = -1;
-static int destroy_queue(struct driver_data *drv_data)
-{
- int status;
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(adev, &resource_list, pxa2xx_spi_acpi_add_dma,
+ pdata);
+ acpi_dev_free_resource_list(&resource_list);
- status = stop_queue(drv_data);
- /* we are unloading the module or failing to load (only two calls
- * to this routine), and neither call can handle a return value.
- * However, destroy_workqueue calls flush_workqueue, and that will
- * block until all work is done. If the reason that stop_queue
- * timed out is that the work will never finish, then it does no
- * good to call destroy_workqueue, so return anyway. */
- if (status != 0)
- return status;
+ pdata->enable_dma = pdata->rx_slave_id >= 0 && pdata->tx_slave_id >= 0;
- destroy_workqueue(drv_data->workqueue);
+ return pdata;
+}
- return 0;
+static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#else
+static inline struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
}
+#endif
-static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
+static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa2xx_spi_master *platform_info;
@@ -1535,11 +1135,21 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
struct ssp_device *ssp;
int status;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
+ if (!platform_info) {
+ platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
+ if (!platform_info) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
ssp = pxa_ssp_request(pdev->id, pdev->name);
- if (ssp == NULL) {
- dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
+ if (!ssp)
+ ssp = &platform_info->ssp;
+
+ if (!ssp->mmio_base) {
+ dev_err(&pdev->dev, "failed to get ssp\n");
return -ENODEV;
}
@@ -1558,19 +1168,21 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
+ ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
- master->bus_num = pdev->id;
+ master->bus_num = ssp->port_id;
master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
- master->transfer = transfer;
+ master->transfer_one_message = pxa2xx_spi_transfer_one_message;
+ master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer;
+ master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
drv_data->ssp_type = ssp->type;
- drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
- sizeof(struct driver_data)), 8);
+ drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
@@ -1581,7 +1193,7 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
- drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
+ drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
}
@@ -1597,35 +1209,17 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->tx_channel = -1;
drv_data->rx_channel = -1;
if (platform_info->enable_dma) {
-
- /* Get two DMA channels (rx and tx) */
- drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
- DMA_PRIO_HIGH,
- dma_handler,
- drv_data);
- if (drv_data->rx_channel < 0) {
- dev_err(dev, "problem (%d) requesting rx channel\n",
- drv_data->rx_channel);
- status = -ENODEV;
- goto out_error_irq_alloc;
+ status = pxa2xx_spi_dma_setup(drv_data);
+ if (status) {
+ dev_warn(dev, "failed to setup DMA, using PIO\n");
+ platform_info->enable_dma = false;
}
- drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
- DMA_PRIO_MEDIUM,
- dma_handler,
- drv_data);
- if (drv_data->tx_channel < 0) {
- dev_err(dev, "problem (%d) requesting tx channel\n",
- drv_data->tx_channel);
- status = -ENODEV;
- goto out_error_dma_alloc;
- }
-
- DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
- DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
}
/* Enable SOC clock */
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
+
+ drv_data->max_clk_rate = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
write_SSCR0(0, drv_data->ioaddr);
@@ -1640,41 +1234,29 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
write_SSTO(0, drv_data->ioaddr);
write_SSPSP(0, drv_data->ioaddr);
- /* Initial and start queue */
- status = init_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "problem initializing queue\n");
- goto out_error_clock_enabled;
- }
- status = start_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue\n");
- goto out_error_clock_enabled;
- }
+ lpss_ssp_setup(drv_data);
+
+ tasklet_init(&drv_data->pump_transfers, pump_transfers,
+ (unsigned long)drv_data);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_master(master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
- goto out_error_queue_alloc;
+ goto out_error_clock_enabled;
}
- return status;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
-out_error_queue_alloc:
- destroy_queue(drv_data);
+ return status;
out_error_clock_enabled:
- clk_disable(ssp->clk);
-
-out_error_dma_alloc:
- if (drv_data->tx_channel != -1)
- pxa_free_dma(drv_data->tx_channel);
- if (drv_data->rx_channel != -1)
- pxa_free_dma(drv_data->rx_channel);
-
-out_error_irq_alloc:
+ clk_disable_unprepare(ssp->clk);
+ pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
out_error_master_alloc:
@@ -1687,37 +1269,23 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct ssp_device *ssp;
- int status = 0;
if (!drv_data)
return 0;
ssp = drv_data->ssp;
- /* Remove the queue */
- status = destroy_queue(drv_data);
- if (status != 0)
- /* the kernel does not check the return status of this
- * this routine (mod->exit, within the kernel). Therefore
- * nothing is gained by returning from here, the module is
- * going away regardless, and we should not leave any more
- * resources allocated than necessary. We cannot free the
- * message memory in drv_data->queue, but we can release the
- * resources below. I think the kernel should honor -EBUSY
- * returns but... */
- dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
- "complete, message memory not freed\n");
+ pm_runtime_get_sync(&pdev->dev);
/* Disable the SSP at the peripheral and SOC level */
write_SSCR0(0, drv_data->ioaddr);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
/* Release DMA */
- if (drv_data->master_info->enable_dma) {
- DRCMR(ssp->drcmr_rx) = 0;
- DRCMR(ssp->drcmr_tx) = 0;
- pxa_free_dma(drv_data->tx_channel);
- pxa_free_dma(drv_data->rx_channel);
- }
+ if (drv_data->master_info->enable_dma)
+ pxa2xx_spi_dma_release(drv_data);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
/* Release IRQ */
free_irq(ssp->irq, drv_data);
@@ -1749,11 +1317,11 @@ static int pxa2xx_spi_suspend(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- status = stop_queue(drv_data);
+ status = spi_master_suspend(drv_data->master);
if (status != 0)
return status;
write_SSCR0(0, drv_data->ioaddr);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
return 0;
}
@@ -1764,18 +1332,13 @@ static int pxa2xx_spi_resume(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- if (drv_data->rx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_rx) =
- DRCMR_MAPVLD | drv_data->rx_channel;
- if (drv_data->tx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_tx) =
- DRCMR_MAPVLD | drv_data->tx_channel;
+ pxa2xx_spi_dma_resume(drv_data);
/* Enable the SSP clock */
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
/* Start the queue running */
- status = start_queue(drv_data);
+ status = spi_master_resume(drv_data->master);
if (status != 0) {
dev_err(dev, "problem starting queue (%d)\n", status);
return status;
@@ -1783,20 +1346,38 @@ static int pxa2xx_spi_resume(struct device *dev)
return 0;
}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pxa2xx_spi_runtime_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drv_data->ssp->clk);
+ return 0;
+}
+
+static int pxa2xx_spi_runtime_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(drv_data->ssp->clk);
+ return 0;
+}
+#endif
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
- .suspend = pxa2xx_spi_suspend,
- .resume = pxa2xx_spi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
+ SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
+ pxa2xx_spi_runtime_resume, NULL)
};
-#endif
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &pxa2xx_spi_pm_ops,
-#endif
+ .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
},
.probe = pxa2xx_spi_probe,
.remove = pxa2xx_spi_remove,
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 000000000000..5adc2a11c7bc
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_PXA2XX_H
+#define SPI_PXA2XX_H
+
+#include <linux/atomic.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+struct driver_data {
+ /* Driver model hookup */
+ struct platform_device *pdev;
+
+ /* SSP Info */
+ struct ssp_device *ssp;
+
+ /* SPI framework hookup */
+ enum pxa_ssp_type ssp_type;
+ struct spi_master *master;
+
+ /* PXA hookup */
+ struct pxa2xx_spi_master *master_info;
+
+ /* PXA private DMA setup stuff */
+ int rx_channel;
+ int tx_channel;
+ u32 *null_dma_buf;
+
+ /* SSP register addresses */
+ void __iomem *ioaddr;
+ u32 ssdr_physical;
+
+ /* SSP masks*/
+ u32 dma_cr1;
+ u32 int_cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* Maximun clock rate */
+ unsigned long max_clk_rate;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* DMA engine support */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ int rx_nents;
+ int tx_nents;
+ void *dummy;
+ atomic_t dma_running;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+
+ void __iomem *lpss_base;
+};
+
+struct chip_data {
+ u32 cr0;
+ u32 cr1;
+ u32 psp;
+ u32 timeout;
+ u8 n_bytes;
+ u32 dma_burst_size;
+ u32 threshold;
+ u32 dma_threshold;
+ u16 lpss_rx_threshold;
+ u16 lpss_tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u32 speed_hz;
+ union {
+ int gpio_cs;
+ unsigned int frm;
+ };
+ int gpio_cs_inverted;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+};
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void const __iomem *p) \
+{ return __raw_readl(p + (off)); } \
+\
+static inline void write_##reg(u32 v, void __iomem *p) \
+{ __raw_writel(v, p + (off)); }
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+DEFINE_SSP_REG(SSITF, SSITF)
+DEFINE_SSP_REG(SSIRF, SSIRF)
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
+#define DMA_ALIGNMENT 8
+
+static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
+{
+ if (drv_data->ssp_type == PXA25x_SSP)
+ return 1;
+ if (drv_data->ssp_type == CE4100_SSP)
+ return 1;
+ return 0;
+}
+
+static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+{
+ void __iomem *reg = drv_data->ioaddr;
+
+ if (drv_data->ssp_type == CE4100_SSP)
+ val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+
+ write_SSSR(val, reg);
+}
+
+extern int pxa2xx_spi_flush(struct driver_data *drv_data);
+extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
+
+/*
+ * Select the right DMA implementation.
+ */
+#if defined(CONFIG_SPI_PXA2XX_PXADMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN 8191
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
+#elif defined(CONFIG_SPI_PXA2XX_DMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN SZ_64K
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
+#else
+#undef SPI_PXA2XX_USE_DMA
+#define MAX_DMA_LEN 0
+#define DEFAULT_DMA_CR1 0
+#endif
+
+#ifdef SPI_PXA2XX_USE_DMA
+extern bool pxa2xx_spi_dma_is_possible(size_t len);
+extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
+extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
+extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
+extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold);
+#else
+static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
+static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ return 0;
+}
+#define pxa2xx_spi_dma_transfer NULL
+static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
+ u32 dma_burst) {}
+static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ return 0;
+}
+static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
+static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 30faf6d4ab91..902f2fb902db 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -661,7 +661,7 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
-static int __devinit rspi_request_dma(struct rspi_data *rspi,
+static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
@@ -709,7 +709,7 @@ static int __devinit rspi_request_dma(struct rspi_data *rspi,
return 0;
}
-static void __devexit rspi_release_dma(struct rspi_data *rspi)
+static void rspi_release_dma(struct rspi_data *rspi)
{
if (rspi->chan_tx)
dma_release_channel(rspi->chan_tx);
@@ -717,7 +717,7 @@ static void __devexit rspi_release_dma(struct rspi_data *rspi)
dma_release_channel(rspi->chan_rx);
}
-static int __devexit rspi_remove(struct platform_device *pdev)
+static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
@@ -731,7 +731,7 @@ static int __devexit rspi_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit rspi_probe(struct platform_device *pdev)
+static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -827,7 +827,7 @@ error1:
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
- .remove = __devexit_p(rspi_remove),
+ .remove = rspi_remove,
.driver = {
.name = "rspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index a2a080b7f42b..02d64603fcc5 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -506,7 +506,7 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
}
}
-static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
+static int s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c2410_spi_info *pdata;
struct s3c24xx_spi *hw;
@@ -663,7 +663,7 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
return err;
}
-static int __devexit s3c24xx_spi_remove(struct platform_device *dev)
+static int s3c24xx_spi_remove(struct platform_device *dev)
{
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
@@ -722,7 +722,7 @@ static const struct dev_pm_ops s3c24xx_spi_pmops = {
MODULE_ALIAS("platform:s3c2410-spi");
static struct platform_driver s3c24xx_spi_driver = {
.probe = s3c24xx_spi_probe,
- .remove = __devexit_p(s3c24xx_spi_remove),
+ .remove = s3c24xx_spi_remove,
.driver = {
.name = "s3c2410-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 6e7a805d324d..e862ab8853aa 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -62,7 +62,7 @@
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
-#define S3C64XX_SPI_PSR_MASK 0xff
+#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
@@ -134,7 +134,6 @@ struct s3c64xx_spi_dma_data {
unsigned ch;
enum dma_transfer_direction direction;
enum dma_ch dmach;
- struct property *dma_prop;
};
/**
@@ -215,6 +214,10 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -248,10 +251,6 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
-
- val = readl(regs + S3C64XX_SPI_CH_CFG);
- val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
- writel(val, regs + S3C64XX_SPI_CH_CFG);
}
static void s3c64xx_spi_dmacb(void *data)
@@ -319,16 +318,15 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
struct samsung_dma_req req;
+ struct device *dev = &sdd->pdev->dev;
sdd->ops = samsung_dma_get_ops();
req.cap = DMA_SLAVE;
req.client = &s3c64xx_spi_dma_client;
- req.dt_dmach_prop = sdd->rx_dma.dma_prop;
- sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req);
- req.dt_dmach_prop = sdd->tx_dma.dma_prop;
- sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req);
+ sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
+ sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
return 1;
}
@@ -516,7 +514,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* Disable Clock */
if (sdd->port_conf->clk_from_cmu) {
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
@@ -564,7 +562,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
/* Enable Clock */
- clk_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -697,7 +695,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
INIT_COMPLETION(sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
- bpw = xfer->bits_per_word ? : spi->bits_per_word;
+ bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
if (xfer->len % (bpw / 8)) {
@@ -743,8 +741,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (status) {
- dev_err(&spi->dev, "I/O Error: "
- "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
@@ -771,8 +768,6 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
if (list_is_last(&xfer->transfer_list,
&msg->transfers))
cs_toggle = 1;
- else
- disable_cs(sdd, spi);
}
msg->actual_length += xfer->len;
@@ -801,7 +796,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
/* Acquire DMA channels */
while (!acquire_dma(sdd))
- msleep(10);
+ usleep_range(10000, 11000);
pm_runtime_get_sync(&sdd->pdev->dev);
@@ -843,16 +838,14 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs) {
- dev_err(&spi->dev, "could not allocate memory for controller"
- " data\n");
+ dev_err(&spi->dev, "could not allocate memory for controller data\n");
of_node_put(data_np);
return ERR_PTR(-ENOMEM);
}
cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
if (!gpio_is_valid(cs->line)) {
- dev_err(&spi->dev, "chip select gpio is not specified or "
- "invalid\n");
+ dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
kfree(cs);
of_node_put(data_np);
return ERR_PTR(-EINVAL);
@@ -959,6 +952,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
+ dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
+ spi->max_speed_hz);
err = -EINVAL;
goto setup_exit;
}
@@ -1056,49 +1051,6 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
flush_fifo(sdd);
}
-static int __devinit s3c64xx_spi_get_dmares(
- struct s3c64xx_spi_driver_data *sdd, bool tx)
-{
- struct platform_device *pdev = sdd->pdev;
- struct s3c64xx_spi_dma_data *dma_data;
- struct property *prop;
- struct resource *res;
- char prop_name[15], *chan_str;
-
- if (tx) {
- dma_data = &sdd->tx_dma;
- dma_data->direction = DMA_MEM_TO_DEV;
- chan_str = "tx";
- } else {
- dma_data = &sdd->rx_dma;
- dma_data->direction = DMA_DEV_TO_MEM;
- chan_str = "rx";
- }
-
- if (!sdd->pdev->dev.of_node) {
- res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get SPI-%s dma "
- "resource\n", chan_str);
- return -ENXIO;
- }
- dma_data->dmach = res->start;
- return 0;
- }
-
- sprintf(prop_name, "%s-dma-channel", chan_str);
- prop = of_find_property(pdev->dev.of_node, prop_name, NULL);
- if (!prop) {
- dev_err(&pdev->dev, "%s dma channel property not specified\n",
- chan_str);
- return -ENXIO;
- }
-
- dma_data->dmach = DMACH_DT_PROP;
- dma_data->dma_prop = prop;
- return 0;
-}
-
#ifdef CONFIG_OF
static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
{
@@ -1112,7 +1064,7 @@ static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
-
+ sdd->gpios[idx] = gpio;
ret = gpio_request(gpio, "spi-bus");
if (ret) {
dev_err(dev, "gpio [%d] request failed: %d\n",
@@ -1135,8 +1087,7 @@ static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
gpio_free(sdd->gpios[idx]);
}
-static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
- struct device *dev)
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
u32 temp;
@@ -1148,16 +1099,14 @@ static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
}
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
- dev_warn(dev, "spi bus clock parent not specified, using "
- "clock at index 0 as parent\n");
+ dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
- dev_warn(dev, "number of chip select lines not specified, "
- "assuming 1 chip select line\n");
+ dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
@@ -1197,9 +1146,10 @@ static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
platform_get_device_id(pdev)->driver_data;
}
-static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
+ struct resource *res;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
struct spi_master *master;
@@ -1247,8 +1197,8 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id, "
- "errno %d\n", ret);
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
+ ret);
goto err0;
}
sdd->port_id = ret;
@@ -1258,13 +1208,26 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cur_bpw = 8;
- ret = s3c64xx_spi_get_dmares(sdd, true);
- if (ret)
- goto err0;
+ if (!sdd->pdev->dev.of_node) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get SPI tx dma "
+ "resource\n");
+ return -ENXIO;
+ }
+ sdd->tx_dma.dmach = res->start;
- ret = s3c64xx_spi_get_dmares(sdd, false);
- if (ret)
- goto err0;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get SPI rx dma "
+ "resource\n");
+ return -ENXIO;
+ }
+ sdd->rx_dma.dmach = res->start;
+ }
+
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
+ sdd->rx_dma.direction = DMA_DEV_TO_MEM;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = sdd->port_id;
@@ -1278,11 +1241,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- sdd->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (sdd->regs == NULL) {
- dev_err(&pdev->dev, "Unable to remap IO\n");
- ret = -ENXIO;
- goto err1;
+ sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sdd->regs)) {
+ ret = PTR_ERR(sdd->regs);
+ goto err0;
}
if (!sci->cfg_gpio && pdev->dev.of_node) {
@@ -1291,36 +1253,36 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
} else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
- goto err2;
+ goto err0;
}
/* Setup clocks */
- sdd->clk = clk_get(&pdev->dev, "spi");
+ sdd->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
- goto err3;
+ goto err1;
}
- if (clk_enable(sdd->clk)) {
+ if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
- goto err4;
+ goto err1;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
- sdd->src_clk = clk_get(&pdev->dev, clk_name);
+ sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
- goto err5;
+ goto err2;
}
- if (clk_enable(sdd->src_clk)) {
+ if (clk_prepare_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
- goto err6;
+ goto err2;
}
/* Setup Deufult Mode */
@@ -1330,11 +1292,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
init_completion(&sdd->xfer_completion);
INIT_LIST_HEAD(&sdd->queue);
- ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
+ ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
+ "spi-s3c64xx", sdd);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
- goto err7;
+ goto err3;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
@@ -1344,11 +1307,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
if (spi_register_master(master)) {
dev_err(&pdev->dev, "cannot register SPI master\n");
ret = -EBUSY;
- goto err8;
+ goto err3;
}
- dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
- "with %d Slaves attached\n",
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
@@ -1358,21 +1320,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
return 0;
-err8:
- free_irq(irq, sdd);
-err7:
- clk_disable(sdd->src_clk);
-err6:
- clk_put(sdd->src_clk);
-err5:
- clk_disable(sdd->clk);
-err4:
- clk_put(sdd->clk);
err3:
- if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
+ clk_disable_unprepare(sdd->src_clk);
err2:
+ clk_disable_unprepare(sdd->clk);
err1:
+ if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1391,13 +1345,9 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
- free_irq(platform_get_irq(pdev, 0), sdd);
-
- clk_disable(sdd->src_clk);
- clk_put(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
- clk_disable(sdd->clk);
- clk_put(sdd->clk);
+ clk_disable_unprepare(sdd->clk);
if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
s3c64xx_spi_dt_gpio_free(sdd);
@@ -1417,8 +1367,8 @@ static int s3c64xx_spi_suspend(struct device *dev)
spi_master_suspend(master);
/* Disable the clock */
- clk_disable(sdd->src_clk);
- clk_disable(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
s3c64xx_spi_dt_gpio_free(sdd);
@@ -1440,8 +1390,8 @@ static int s3c64xx_spi_resume(struct device *dev)
sci->cfg_gpio();
/* Enable the clock */
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
s3c64xx_spi_hwinit(sdd, sdd->port_id);
@@ -1457,8 +1407,8 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- clk_disable(sdd->clk);
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
return 0;
}
@@ -1468,8 +1418,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
return 0;
}
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 796c077ef439..60cfae51c713 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -68,6 +68,16 @@ static u32 hspi_read(struct hspi_priv *hspi, int reg)
return ioread32(hspi->addr + reg);
}
+static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
+{
+ u32 val = hspi_read(hspi, reg);
+
+ val &= ~mask;
+ val |= set & mask;
+
+ hspi_write(hspi, reg, val);
+}
+
/*
* transfer function
*/
@@ -105,6 +115,13 @@ static int hspi_unprepare_transfer(struct spi_master *master)
return 0;
}
+#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
+#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
+static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
+{
+ hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
+}
+
static void hspi_hw_setup(struct hspi_priv *hspi,
struct spi_message *msg,
struct spi_transfer *t)
@@ -155,7 +172,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
- hspi_write(hspi, SPSCR, 0x1); /* master mode */
+ hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_master *master,
@@ -166,12 +183,21 @@ static int hspi_transfer_one_message(struct spi_master *master,
u32 tx;
u32 rx;
int ret, i;
+ unsigned int cs_change;
+ const int nsecs = 50;
dev_dbg(hspi->dev, "%s\n", __func__);
+ cs_change = 1;
ret = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
- hspi_hw_setup(hspi, msg, t);
+
+ if (cs_change) {
+ hspi_hw_setup(hspi, msg, t);
+ hspi_hw_cs_enable(hspi);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
for (i = 0; i < t->len; i++) {
@@ -198,9 +224,22 @@ static int hspi_transfer_one_message(struct spi_master *master,
}
msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ ndelay(nsecs);
+ }
}
msg->status = ret;
+ if (!cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ }
spi_finalize_current_message(master);
return ret;
@@ -229,7 +268,7 @@ static void hspi_cleanup(struct spi_device *spi)
dev_dbg(dev, "%s cleanup\n", spi->modalias);
}
-static int __devinit hspi_probe(struct platform_device *pdev)
+static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -251,7 +290,7 @@ static int __devinit hspi_probe(struct platform_device *pdev)
}
clk = clk_get(NULL, "shyway_clk");
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "shyway_clk is required\n");
ret = -EINVAL;
goto error0;
@@ -300,7 +339,7 @@ static int __devinit hspi_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit hspi_remove(struct platform_device *pdev)
+static int hspi_remove(struct platform_device *pdev)
{
struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
@@ -314,7 +353,7 @@ static int __devexit hspi_remove(struct platform_device *pdev)
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
- .remove = __devexit_p(hspi_remove),
+ .remove = hspi_remove,
.driver = {
.name = "sh-hspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1f466bc66d9d..8b40d0884f8b 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -592,12 +593,42 @@ static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
return 0;
}
+#ifdef CONFIG_OF
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ struct sh_msiof_spi_info *info;
+ struct device_node *np = dev->of_node;
+ u32 num_cs = 0;
+
+ info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "failed to allocate setup data\n");
+ return NULL;
+ }
+
+ /* Parse the MSIOF properties */
+ of_property_read_u32(np, "num-cs", &num_cs);
+ of_property_read_u32(np, "renesas,tx-fifo-size",
+ &info->tx_fifo_override);
+ of_property_read_u32(np, "renesas,rx-fifo-size",
+ &info->rx_fifo_override);
+
+ info->num_chipselect = num_cs;
+
+ return info;
+}
+#else
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
struct sh_msiof_spi_priv *p;
- char clk_name[16];
int i;
int ret;
@@ -611,13 +642,22 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
- p->info = pdev->dev.platform_data;
+ if (pdev->dev.of_node)
+ p->info = sh_msiof_spi_parse_dt(&pdev->dev);
+ else
+ p->info = pdev->dev.platform_data;
+
+ if (!p->info) {
+ dev_err(&pdev->dev, "failed to obtain device info\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
init_completion(&p->done);
- snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
- p->clk = clk_get(&pdev->dev, clk_name);
+ p->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
@@ -717,6 +757,17 @@ static int sh_msiof_spi_runtime_nop(struct device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", },
+ { .compatible = "renesas,sh-mobile-msiof", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+#else
+#define sh_msiof_match NULL
+#endif
+
static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
.runtime_suspend = sh_msiof_spi_runtime_nop,
.runtime_resume = sh_msiof_spi_runtime_nop,
@@ -729,6 +780,7 @@ static struct platform_driver sh_msiof_spi_drv = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
.pm = &sh_msiof_spi_dev_pm_ops,
+ .of_match_table = sh_msiof_match,
},
};
module_platform_driver(sh_msiof_spi_drv);
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 79442c31bcd9..3c3600a994bd 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -432,7 +432,7 @@ static irqreturn_t spi_sh_irq(int irq, void *_ss)
return IRQ_HANDLED;
}
-static int __devexit spi_sh_remove(struct platform_device *pdev)
+static int spi_sh_remove(struct platform_device *pdev)
{
struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
@@ -444,7 +444,7 @@ static int __devexit spi_sh_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit spi_sh_probe(struct platform_device *pdev)
+static int spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -539,7 +539,7 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
static struct platform_driver spi_sh_driver = {
.probe = spi_sh_probe,
- .remove = __devexit_p(spi_sh_remove),
+ .remove = spi_sh_remove,
.driver = {
.name = "sh_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index ecc3d9763d10..f59d4177b419 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -382,8 +382,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi = spi_master_get_devdata(spi->master);
- bits_per_word = t && t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word;
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
/* Enable IO mode for RX, TX */
@@ -479,7 +478,7 @@ static int spi_sirfsoc_setup(struct spi_device *spi)
return spi_sirfsoc_setup_transfer(spi, NULL);
}
-static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
+static int spi_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_spi *sspi;
struct spi_master *master;
@@ -535,10 +534,9 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
}
}
- sspi->base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (!sspi->base) {
- dev_err(&pdev->dev, "IO remap failed!\n");
- ret = -ENOMEM;
+ sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sspi->base)) {
+ ret = PTR_ERR(sspi->base);
goto free_master;
}
@@ -570,7 +568,7 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
ret = -EINVAL;
goto free_pin;
}
- clk_enable(sspi->clk);
+ clk_prepare_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
init_completion(&sspi->done);
@@ -594,7 +592,7 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
return 0;
free_clk:
- clk_disable(sspi->clk);
+ clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
free_pin:
pinctrl_put(sspi->p);
@@ -604,7 +602,7 @@ err_cs:
return ret;
}
-static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
+static int spi_sirfsoc_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct sirfsoc_spi *sspi;
@@ -618,7 +616,7 @@ static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
if (sspi->chipselect[i] > 0)
gpio_free(sspi->chipselect[i]);
}
- clk_disable(sspi->clk);
+ clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
pinctrl_put(sspi->p);
spi_master_put(master);
@@ -659,6 +657,7 @@ static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
+ { .compatible = "sirf,marco-spi", },
{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
@@ -673,7 +672,7 @@ static struct platform_driver spi_sirfsoc_driver = {
.of_match_table = spi_sirfsoc_of_match,
},
.probe = spi_sirfsoc_probe,
- .remove = __devexit_p(spi_sirfsoc_remove),
+ .remove = spi_sirfsoc_remove,
};
module_platform_driver(spi_sirfsoc_driver);
diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c
deleted file mode 100644
index 911e904b3c84..000000000000
--- a/drivers/spi/spi-stmp.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Freescale STMP378X SPI master driver
- *
- * Author: dmitry pervushin <dimka@embeddedalley.com>
- *
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-
-#include <mach/platform.h>
-#include <mach/stmp3xxx.h>
-#include <mach/dma.h>
-#include <mach/regs-ssp.h>
-#include <mach/regs-apbh.h>
-
-
-/* 0 means DMA mode(recommended, default), !0 - PIO mode */
-static int pio;
-static int clock;
-
-/* default timeout for busy waits is 2 seconds */
-#define STMP_SPI_TIMEOUT (2 * HZ)
-
-struct stmp_spi {
- int id;
-
- void * __iomem regs; /* vaddr of the control registers */
-
- int irq, err_irq;
- u32 dma;
- struct stmp3xxx_dma_descriptor d;
-
- u32 speed_khz;
- u32 saved_timings;
- u32 divider;
-
- struct clk *clk;
- struct device *master_dev;
-
- struct work_struct work;
- struct workqueue_struct *workqueue;
-
- /* lock protects queue access */
- spinlock_t lock;
- struct list_head queue;
-
- struct completion done;
-};
-
-#define busy_wait(cond) \
- ({ \
- unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
- bool succeeded = false; \
- do { \
- if (cond) { \
- succeeded = true; \
- break; \
- } \
- cpu_relax(); \
- } while (time_before(jiffies, end_jiffies)); \
- succeeded; \
- })
-
-/**
- * stmp_spi_init_hw
- * Initialize the SSP port
- */
-static int stmp_spi_init_hw(struct stmp_spi *ss)
-{
- int err = 0;
- void *pins = ss->master_dev->platform_data;
-
- err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
- if (err)
- goto out;
-
- ss->clk = clk_get(NULL, "ssp");
- if (IS_ERR(ss->clk)) {
- err = PTR_ERR(ss->clk);
- goto out_free_pins;
- }
- clk_enable(ss->clk);
-
- stmp3xxx_reset_block(ss->regs, false);
- stmp3xxx_dma_reset_channel(ss->dma);
-
- return 0;
-
-out_free_pins:
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-out:
- return err;
-}
-
-static void stmp_spi_release_hw(struct stmp_spi *ss)
-{
- void *pins = ss->master_dev->platform_data;
-
- if (ss->clk && !IS_ERR(ss->clk)) {
- clk_disable(ss->clk);
- clk_put(ss->clk);
- }
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-}
-
-static int stmp_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- u8 bits_per_word;
- u32 hz;
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- u16 rate;
-
- bits_per_word = spi->bits_per_word;
- if (t && t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
- /*
- * Calculate speed:
- * - by default, use maximum speed from ssp clk
- * - if device overrides it, use it
- * - if transfer specifies other speed, use transfer's one
- */
- hz = 1000 * ss->speed_khz / ss->divider;
- if (spi->max_speed_hz)
- hz = min(hz, spi->max_speed_hz);
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
-
- if (hz == 0) {
- dev_err(&spi->dev, "Cannot continue with zero clock\n");
- return -EINVAL;
- }
-
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
- hz, ss->speed_khz, ss->divider,
- ss->speed_khz * 1000 / ss->divider);
-
- if (ss->speed_khz * 1000 / ss->divider < hz) {
- dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
- __func__, hz);
- return -EINVAL;
- }
-
- rate = 1000 * ss->speed_khz/ss->divider/hz;
-
- writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
- BF(rate - 1, SSP_TIMING_CLOCK_RATE),
- HW_SSP_TIMING + ss->regs);
-
- writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
- BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
- ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
- (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
- ss->regs + HW_SSP_CTRL1);
-
- return 0;
-}
-
-static int stmp_spi_setup(struct spi_device *spi)
-{
- /* spi_setup() does basic checks,
- * stmp_spi_setup_transfer() does more later
- */
- if (spi->bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, spi->bits_per_word);
- return -EINVAL;
- }
- return 0;
-}
-
-static inline u32 stmp_spi_cs(unsigned cs)
-{
- return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
- ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
-}
-
-static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
- unsigned char *buf, dma_addr_t dma_buf, int len,
- int first, int last, bool write)
-{
- u32 c0 = 0;
- dma_addr_t spi_buf_dma = dma_buf;
- int status = 0;
- enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
- c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
- c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
- c0 |= BM_SSP_CTRL0_DATA_XFER;
-
- c0 |= stmp_spi_cs(cs);
-
- c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
-
- if (!dma_buf)
- spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
-
- ss->d.command->cmd =
- BF(len, APBH_CHn_CMD_XFER_COUNT) |
- BF(1, APBH_CHn_CMD_CMDWORDS) |
- BM_APBH_CHn_CMD_WAIT4ENDCMD |
- BM_APBH_CHn_CMD_IRQONCMPLT |
- BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
- BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
- APBH_CHn_CMD_COMMAND);
- ss->d.command->pio_words[0] = c0;
- ss->d.command->buf_ptr = spi_buf_dma;
-
- stmp3xxx_dma_reset_channel(ss->dma);
- stmp3xxx_dma_clear_interrupt(ss->dma);
- stmp3xxx_dma_enable_interrupt(ss->dma);
- init_completion(&ss->done);
- stmp3xxx_dma_go(ss->dma, &ss->d, 1);
- wait_for_completion(&ss->done);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
- status = -ETIMEDOUT;
-
- if (!dma_buf)
- dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
-
- return status;
-}
-
-static inline void stmp_spi_enable(struct stmp_spi *ss)
-{
- stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static inline void stmp_spi_disable(struct stmp_spi *ss)
-{
- stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
- unsigned char *buf, int len,
- bool first, bool last, bool write)
-{
- if (first)
- stmp_spi_enable(ss);
-
- stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
-
- while (len--) {
- if (last && len <= 0)
- stmp_spi_disable(ss);
-
- stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
- ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
-
- if (write)
- stmp3xxx_clearl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
- else
- stmp3xxx_setl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
-
- /* Run! */
- stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- if (write)
- writel(*buf, ss->regs + HW_SSP_DATA);
-
- /* Set TRANSFER */
- stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
-
- if (!write) {
- if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
- BM_SSP_STATUS_FIFO_EMPTY)))
- break;
- *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
- }
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- /* advance to the next byte */
- buf++;
- }
-
- return len < 0 ? 0 : -ETIMEDOUT;
-}
-
-static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
-{
- bool first, last;
- struct spi_transfer *t, *tmp_t;
- int status = 0;
- int cs;
-
- cs = m->spi->chip_select;
-
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
-
- first = (&t->transfer_list == m->transfers.next);
- last = (&t->transfer_list == m->transfers.prev);
-
- if (first || t->speed_hz || t->bits_per_word)
- stmp_spi_setup_transfer(m->spi, t);
-
- /* reject "not last" transfers which request to change cs */
- if (t->cs_change && !last) {
- dev_err(&m->spi->dev,
- "Message with t->cs_change has been skipped\n");
- continue;
- }
-
- if (t->tx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
- t->len, first, last, true) :
- stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
- t->tx_dma, t->len, first, last, true);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Tx ",
- DUMP_PREFIX_OFFSET,
- t->tx_buf, t->len);
- else
- pr_debug("Tx: %d bytes\n", t->len);
-#endif
- }
- if (t->rx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, t->rx_buf,
- t->len, first, last, false) :
- stmp_spi_txrx_dma(ss, cs, t->rx_buf,
- t->rx_dma, t->len, first, last, false);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Rx ",
- DUMP_PREFIX_OFFSET,
- t->rx_buf, t->len);
- else
- pr_debug("Rx: %d bytes\n", t->len);
-#endif
- }
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (status)
- break;
-
- }
- return status;
-}
-
-/**
- * stmp_spi_handle - handle messages from the queue
- */
-static void stmp_spi_handle(struct work_struct *w)
-{
- struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
- unsigned long flags;
- struct spi_message *m;
-
- spin_lock_irqsave(&ss->lock, flags);
- while (!list_empty(&ss->queue)) {
- m = list_entry(ss->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irqrestore(&ss->lock, flags);
-
- m->status = stmp_spi_handle_message(ss, m);
- m->complete(m->context);
-
- spin_lock_irqsave(&ss->lock, flags);
- }
- spin_unlock_irqrestore(&ss->lock, flags);
-
- return;
-}
-
-/**
- * stmp_spi_transfer - perform message transfer.
- * Called indirectly from spi_async, queues all the messages to
- * spi_handle_message.
- * @spi: spi device
- * @m: message to be queued
- */
-static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->status = -EINPROGRESS;
- spin_lock_irqsave(&ss->lock, flags);
- list_add_tail(&m->queue, &ss->queue);
- queue_work(ss->workqueue, &ss->work);
- spin_unlock_irqrestore(&ss->lock, flags);
- return 0;
-}
-
-static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
-
- stmp3xxx_dma_clear_interrupt(ss->dma);
- complete(&ss->done);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
- u32 c1, st;
-
- c1 = readl(ss->regs + HW_SSP_CTRL1);
- st = readl(ss->regs + HW_SSP_STATUS);
- dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
- __func__, st, c1);
- stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit stmp_spi_probe(struct platform_device *dev)
-{
- int err = 0;
- struct spi_master *master;
- struct stmp_spi *ss;
- struct resource *r;
-
- master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
- if (master == NULL) {
- err = -ENOMEM;
- goto out0;
- }
- master->flags = SPI_MASTER_HALF_DUPLEX;
-
- ss = spi_master_get_devdata(master);
- platform_set_drvdata(dev, master);
-
- /* Get resources(memory, IRQ) associated with the device */
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
- ss->regs = ioremap(r->start, resource_size(r));
- if (!ss->regs) {
- err = -EINVAL;
- goto out_put_master;
- }
-
- ss->master_dev = &dev->dev;
- ss->id = dev->id;
-
- INIT_WORK(&ss->work, stmp_spi_handle);
- INIT_LIST_HEAD(&ss->queue);
- spin_lock_init(&ss->lock);
-
- ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
- if (!ss->workqueue) {
- err = -ENXIO;
- goto out_put_master;
- }
- master->transfer = stmp_spi_transfer;
- master->setup = stmp_spi_setup;
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
-
- ss->irq = platform_get_irq(dev, 0);
- if (ss->irq < 0) {
- err = ss->irq;
- goto out_put_master;
- }
- ss->err_irq = platform_get_irq(dev, 1);
- if (ss->err_irq < 0) {
- err = ss->err_irq;
- goto out_put_master;
- }
-
- r = platform_get_resource(dev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
-
- ss->dma = r->start;
- err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
- if (err)
- goto out_put_master;
-
- err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
- if (err)
- goto out_free_dma;
-
- master->bus_num = dev->id;
- master->num_chipselect = 1;
-
- /* SPI controller initializations */
- err = stmp_spi_init_hw(ss);
- if (err) {
- dev_dbg(&dev->dev, "cannot initialize hardware\n");
- goto out_free_dma_desc;
- }
-
- if (clock) {
- dev_info(&dev->dev, "clock rate forced to %d\n", clock);
- clk_set_rate(ss->clk, clock);
- }
- ss->speed_khz = clk_get_rate(ss->clk);
- ss->divider = 2;
- dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
- ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
-
- /* Register for SPI interrupt */
- err = request_irq(ss->irq, stmp_spi_irq, 0,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
- goto out_release_hw;
- }
-
- /* ..and shared interrupt for all SSP controllers */
- err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
- goto out_free_irq;
- }
-
- err = spi_register_master(master);
- if (err) {
- dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
- goto out_free_irq_2;
- }
- dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
- (u32)ss->regs, ss->irq, master->bus_num,
- pio ? "PIO" : "DMA");
- return 0;
-
-out_free_irq_2:
- free_irq(ss->err_irq, ss);
-out_free_irq:
- free_irq(ss->irq, ss);
-out_free_dma_desc:
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
-out_free_dma:
- stmp3xxx_dma_release(ss->dma);
-out_release_hw:
- stmp_spi_release_hw(ss);
-out_put_master:
- if (ss->workqueue)
- destroy_workqueue(ss->workqueue);
- if (ss->regs)
- iounmap(ss->regs);
- platform_set_drvdata(dev, NULL);
- spi_master_put(master);
-out0:
- return err;
-}
-
-static int __devexit stmp_spi_remove(struct platform_device *dev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = spi_master_get(platform_get_drvdata(dev));
- ss = spi_master_get_devdata(master);
-
- spi_unregister_master(master);
-
- free_irq(ss->err_irq, ss);
- free_irq(ss->irq, ss);
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
- stmp3xxx_dma_release(ss->dma);
- stmp_spi_release_hw(ss);
- destroy_workqueue(ss->workqueue);
- iounmap(ss->regs);
- spi_master_put(master);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
- clk_disable(ss->clk);
-
- return 0;
-}
-
-static int stmp_spi_resume(struct platform_device *pdev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- clk_enable(ss->clk);
- stmp3xxx_reset_block(ss->regs, false);
- writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
-
- return 0;
-}
-
-#else
-#define stmp_spi_suspend NULL
-#define stmp_spi_resume NULL
-#endif
-
-static struct platform_driver stmp_spi_driver = {
- .probe = stmp_spi_probe,
- .remove = __devexit_p(stmp_spi_remove),
- .driver = {
- .name = "stmp3xxx_ssp",
- .owner = THIS_MODULE,
- },
- .suspend = stmp_spi_suspend,
- .resume = stmp_spi_resume,
-};
-module_platform_driver(stmp_spi_driver);
-
-module_param(pio, int, S_IRUGO);
-module_param(clock, int, S_IRUGO);
-MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
-MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
new file mode 100644
index 000000000000..3d6a12b2af04
--- /dev/null
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -0,0 +1,671 @@
+/*
+ * SPI driver for Nvidia's Tegra20 Serial Flash Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-tegra.h>
+#include <linux/clk/tegra.h>
+
+#define SPI_COMMAND 0x000
+#define SPI_GO BIT(30)
+#define SPI_M_S BIT(28)
+#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
+#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
+#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
+
+#define SPI_CK_SDA_FALLING (1 << 21)
+#define SPI_CK_SDA_RISING (0 << 21)
+#define SPI_CK_SDA_MASK (1 << 21)
+#define SPI_ACTIVE_SDA (0x3 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
+#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
+
+#define SPI_CS_POL_INVERT BIT(16)
+#define SPI_TX_EN BIT(15)
+#define SPI_RX_EN BIT(14)
+#define SPI_CS_VAL_HIGH BIT(13)
+#define SPI_CS_VAL_LOW 0x0
+#define SPI_CS_SW BIT(12)
+#define SPI_CS_HW 0x0
+#define SPI_CS_DELAY_MASK (7 << 9)
+#define SPI_CS3_EN BIT(8)
+#define SPI_CS2_EN BIT(7)
+#define SPI_CS1_EN BIT(6)
+#define SPI_CS0_EN BIT(5)
+
+#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
+ SPI_CS1_EN | SPI_CS0_EN)
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+
+#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
+
+#define SPI_STATUS 0x004
+#define SPI_BSY BIT(31)
+#define SPI_RDY BIT(30)
+#define SPI_TXF_FLUSH BIT(29)
+#define SPI_RXF_FLUSH BIT(28)
+#define SPI_RX_UNF BIT(27)
+#define SPI_TX_OVF BIT(26)
+#define SPI_RXF_EMPTY BIT(25)
+#define SPI_RXF_FULL BIT(24)
+#define SPI_TXF_EMPTY BIT(23)
+#define SPI_TXF_FULL BIT(22)
+#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
+
+#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
+#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
+
+#define SPI_RX_CMP 0x8
+#define SPI_DMA_CTL 0x0C
+#define SPI_DMA_EN BIT(31)
+#define SPI_IE_RXC BIT(27)
+#define SPI_IE_TXC BIT(26)
+#define SPI_PACKED BIT(20)
+#define SPI_RX_TRIG_MASK (0x3 << 18)
+#define SPI_RX_TRIG_1W (0x0 << 18)
+#define SPI_RX_TRIG_4W (0x1 << 18)
+#define SPI_TX_TRIG_MASK (0x3 << 16)
+#define SPI_TX_TRIG_1W (0x0 << 16)
+#define SPI_TX_TRIG_4W (0x1 << 16)
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF);
+
+#define SPI_TX_FIFO 0x10
+#define SPI_RX_FIFO 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 4
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct tegra_sflash_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned irq;
+ u32 spi_max_frequency;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned bytes_per_word;
+ unsigned cur_direction;
+ unsigned curr_xfer_words;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+
+ u32 def_command_reg;
+ u32 command_reg;
+ u32 dma_control_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+};
+
+static int tegra_sflash_runtime_suspend(struct device *dev);
+static int tegra_sflash_runtime_resume(struct device *dev);
+
+static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd,
+ unsigned long reg)
+{
+ return readl(tsd->base + reg);
+}
+
+static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tsd->base + reg);
+}
+
+static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
+{
+ /* Write 1 to clear status register */
+ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
+}
+
+static unsigned tegra_sflash_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_sflash_data *tsd,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tsd->cur_pos;
+ unsigned max_word;
+
+ tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1;
+ max_word = remain_len / tsd->bytes_per_word;
+ if (max_word > SPI_FIFO_DEPTH)
+ max_word = SPI_FIFO_DEPTH;
+ tsd->curr_xfer_words = max_word;
+ return max_word;
+}
+
+static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned long status;
+ unsigned max_n_32bit = tsd->curr_xfer_words;
+ u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
+
+ if (max_n_32bit > SPI_FIFO_DEPTH)
+ max_n_32bit = SPI_FIFO_DEPTH;
+ nbytes = max_n_32bit * tsd->bytes_per_word;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_TXF_FULL)) {
+ int i;
+ unsigned int x = 0;
+
+ for (i = 0; nbytes && (i < tsd->bytes_per_word);
+ i++, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
+ if (!nbytes)
+ break;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
+ return max_n_32bit;
+}
+
+static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned long status;
+ unsigned int read_words = 0;
+ u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_RXF_EMPTY)) {
+ int i;
+ unsigned long x;
+
+ x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+ for (i = 0; (i < tsd->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ read_words++;
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
+ return 0;
+}
+
+static int tegra_sflash_start_cpu_based_transfer(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned long val = 0;
+ unsigned cur_words;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TXC;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RXC;
+
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
+ else
+ cur_words = tsd->curr_xfer_words;
+ val |= SPI_DMA_BLK_COUNT(cur_words);
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+ val |= SPI_DMA_EN;
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_sflash_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ u32 speed;
+ unsigned long command;
+
+ speed = t->speed_hz;
+ if (speed != tsd->cur_speed) {
+ clk_set_rate(tsd->clk, speed);
+ tsd->cur_speed = speed;
+ }
+
+ tsd->cur_spi = spi;
+ tsd->cur_pos = 0;
+ tsd->cur_rx_pos = 0;
+ tsd->cur_tx_pos = 0;
+ tsd->curr_xfer = t;
+ tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
+ if (is_first_of_msg) {
+ command = tsd->def_command_reg;
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command |= SPI_CS_VAL_HIGH;
+
+ command &= ~SPI_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SPI_CK_SDA_FALLING;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
+ else
+ command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
+ command |= SPI_CS0_EN << spi->chip_select;
+ } else {
+ command = tsd->command_reg;
+ command &= ~SPI_BIT_LENGTH(~0);
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command &= ~(SPI_RX_EN | SPI_TX_EN);
+ }
+
+ tsd->cur_direction = 0;
+ if (t->rx_buf) {
+ command |= SPI_RX_EN;
+ tsd->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command |= SPI_TX_EN;
+ tsd->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_sflash_writel(tsd, command, SPI_COMMAND);
+ tsd->command_reg = command;
+
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
+}
+
+static int tegra_sflash_setup(struct spi_device *spi)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+
+ /* Set speed to the spi max fequency if spi device has not set */
+ spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
+ return 0;
+}
+
+static int tegra_sflash_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ ret = pm_runtime_get_sync(tsd->dev);
+ if (ret < 0) {
+ dev_err(tsd->dev, "pm_runtime_get() failed, err = %d\n", ret);
+ return ret;
+ }
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ INIT_COMPLETION(tsd->xfer_completion);
+ ret = tegra_sflash_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tsd->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tsd->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tsd->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tsd->tx_status || tsd->rx_status) {
+ dev_err(tsd->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_sflash_writel(tsd, tsd->def_command_reg,
+ SPI_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ pm_runtime_put(tsd->dev);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
+{
+ struct spi_transfer *t = tsd->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsd->lock, flags);
+ if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
+ dev_err(tsd->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
+ dev_err(tsd->dev,
+ "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
+ tsd->dma_control_reg);
+ tegra_periph_reset_assert(tsd->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tsd->clk);
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->cur_pos = tsd->cur_tx_pos;
+ else
+ tsd->cur_pos = tsd->cur_rx_pos;
+
+ if (tsd->cur_pos == t->len) {
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
+ tegra_sflash_start_cpu_based_transfer(tsd, t);
+exit:
+ spin_unlock_irqrestore(&tsd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
+{
+ struct tegra_sflash_data *tsd = context_data;
+
+ tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
+ tegra_sflash_clear_status(tsd);
+
+ return handle_cpu_based_xfer(tsd);
+}
+
+static struct tegra_spi_platform_data *tegra_sflash_parse_dt(
+ struct platform_device *pdev)
+{
+ struct tegra_spi_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
+ return NULL;
+ }
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ pdata->spi_max_frequency = max_freq;
+
+ return pdata;
+}
+
+static struct of_device_id tegra_sflash_of_match[] = {
+ { .compatible = "nvidia,tegra20-sflash", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
+
+static int tegra_sflash_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_sflash_data *tsd;
+ struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(tegra_sflash_of_match),
+ &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ if (!pdata && pdev->dev.of_node)
+ pdata = tegra_sflash_parse_dt(pdev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data, exiting\n");
+ return -ENODEV;
+ }
+
+ if (!pdata->spi_max_frequency)
+ pdata->spi_max_frequency = 25000000; /* 25MHz */
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->setup = tegra_sflash_setup;
+ master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->bus_num = -1;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tsd = spi_master_get_devdata(master);
+ tsd->master = master;
+ tsd->dev = &pdev->dev;
+ spin_lock_init(&tsd->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tsd->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tsd->base)) {
+ ret = PTR_ERR(tsd->base);
+ goto exit_free_master;
+ }
+
+ tsd->irq = platform_get_irq(pdev, 0);
+ ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ dev_name(&pdev->dev), tsd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tsd->irq);
+ goto exit_free_master;
+ }
+
+ tsd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tsd->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tsd->clk);
+ goto exit_free_irq;
+ }
+
+ tsd->spi_max_frequency = pdata->spi_max_frequency;
+ init_completion(&tsd->xfer_completion);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_sflash_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ /* Reset controller */
+ tegra_periph_reset_assert(tsd->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tsd->clk);
+
+ tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+exit_free_irq:
+ free_irq(tsd->irq, tsd);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_sflash_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ free_irq(tsd->irq, tsd);
+ spi_unregister_master(master);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_sflash_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_sflash_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_sflash_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_sflash_readl(tsd, SPI_COMMAND);
+
+ clk_disable_unprepare(tsd->clk);
+ return 0;
+}
+
+static int tegra_sflash_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tsd->clk);
+ if (ret < 0) {
+ dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
+ tegra_sflash_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
+};
+static struct platform_driver tegra_sflash_driver = {
+ .driver = {
+ .name = "spi-tegra-sflash",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = of_match_ptr(tegra_sflash_of_match),
+ },
+ .probe = tegra_sflash_probe,
+ .remove = tegra_sflash_remove,
+};
+module_platform_driver(tegra_sflash_driver);
+
+MODULE_ALIAS("platform:spi-tegra-sflash");
+MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 000000000000..b8698b389ef3
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1353 @@
+/*
+ * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-tegra.h>
+#include <linux/clk/tegra.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
+ SLINK_TX_UNF | SLINK_RX_OVF)
+
+#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 32
+
+struct tegra_slink_chip_data {
+ bool cs_hold_time;
+};
+
+struct tegra_slink_data {
+ struct device *dev;
+ struct spi_master *master;
+ const struct tegra_slink_chip_data *chip_data;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ int dma_req_sel;
+ u32 spi_max_frequency;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+ bool is_hw_based_cs;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ unsigned long packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_slink_runtime_suspend(struct device *dev);
+static int tegra_slink_runtime_resume(struct device *dev);
+
+static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SLINK_TX_FIFO)
+ readl(tspi->base + SLINK_MAS_DATA);
+}
+
+static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
+{
+ unsigned long val;
+ unsigned long val_write = 0;
+
+ val = tegra_slink_readl(tspi, SLINK_STATUS);
+
+ /* Write 1 to clear status register */
+ val_write = SLINK_RDY | SLINK_FIFO_ERROR;
+ tegra_slink_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned long val;
+
+ switch (tspi->bytes_per_word) {
+ case 0:
+ val = SLINK_PACK_SIZE_4;
+ break;
+ case 1:
+ val = SLINK_PACK_SIZE_8;
+ break;
+ case 2:
+ val = SLINK_PACK_SIZE_16;
+ break;
+ case 4:
+ val = SLINK_PACK_SIZE_32;
+ break;
+ default:
+ val = 0;
+ }
+ return val;
+}
+
+static unsigned tegra_slink_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word ;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word;
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = max_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ unsigned int bits_per_word;
+
+ bits_per_word = t->bits_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= ((*tx_buf++) << i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ unsigned int rx_mask, bits_per_word;
+
+ bits_per_word = t->bits_per_word;
+ rx_mask = (1 << bits_per_word) - 1;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = tspi->rx_dma_buf[count];
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_slink_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_dma_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned long test_val;
+ unsigned int len;
+ int ret = 0;
+ unsigned long status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
+ dev_err(tspi->dev,
+ "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ return -EIO;
+ }
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ ret = tegra_slink_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(test_val & SLINK_TX_FULL))
+ test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_slink_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ /* HW need small delay after settign Packed mode */
+ udelay(1);
+ }
+ tspi->dma_control_reg = val;
+
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int tegra_slink_start_cpu_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned cur_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ tspi->dma_control_reg = val;
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dma_chan) {
+ dev_err(tspi->dev,
+ "Dma channel is not available, will try later\n");
+ return -EPROBE_DEFER;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ dma_sconfig.slave_id = tspi->dma_req_sel;
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_slink_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ struct tegra_spi_device_controller_data *cdata = spi->controller_data;
+ unsigned long command;
+ unsigned long command2;
+
+ bits_per_word = t->bits_per_word;
+ speed = t->speed_hz;
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (is_first_of_msg) {
+ tegra_slink_clear_status(tspi);
+
+ command = tspi->def_command_reg;
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->def_command2_reg;
+ command2 |= SLINK_SS_EN_CS(spi->chip_select);
+
+ /* possibly use the hw based chip select */
+ tspi->is_hw_based_cs = false;
+ if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
+ ((tspi->curr_dma_words * tspi->bytes_per_word) ==
+ (t->len - tspi->cur_pos))) {
+ int setup_count;
+ int sts2;
+
+ setup_count = cdata->cs_setup_clk_count >> 1;
+ setup_count = max(setup_count, 3);
+ command2 |= SLINK_SS_SETUP(setup_count);
+ if (tspi->chip_data->cs_hold_time) {
+ int hold_count;
+
+ hold_count = cdata->cs_hold_clk_count;
+ hold_count = max(hold_count, 0xF);
+ sts2 = tegra_slink_readl(tspi, SLINK_STATUS2);
+ sts2 &= ~SLINK_SS_HOLD_TIME(0xF);
+ sts2 |= SLINK_SS_HOLD_TIME(hold_count);
+ tegra_slink_writel(tspi, sts2, SLINK_STATUS2);
+ }
+ tspi->is_hw_based_cs = true;
+ }
+
+ if (tspi->is_hw_based_cs)
+ command &= ~SLINK_CS_SW;
+ else
+ command |= SLINK_CS_SW | SLINK_CS_VALUE;
+
+ command &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ command |= SLINK_IDLE_SCLK_DRIVE_LOW;
+ } else {
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
+ }
+
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ ret = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_slink_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_slink_setup(struct spi_device *spi)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long val;
+ unsigned long flags;
+ int ret;
+ unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
+
+ /* Set speed to the spi max fequency if spi device has not set */
+ spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_pol_bit[spi->chip_select];
+ else
+ val &= ~cs_pol_bit[spi->chip_select];
+ tspi->def_command_reg = val;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_prepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ return pm_runtime_get_sync(tspi->dev);
+}
+
+static int tegra_slink_unprepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ INIT_COMPLETION(tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_slink_writel(tspi, tspi->def_command_reg,
+ SLINK_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(tspi->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_slink_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev,
+ "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ err = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_slink_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_slink_isr(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ tegra_slink_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct tegra_spi_platform_data *tegra_slink_parse_dt(
+ struct platform_device *pdev)
+{
+ struct tegra_spi_platform_data *pdata;
+ const unsigned int *prop;
+ struct device_node *np = pdev->dev.of_node;
+ u32 of_dma[2];
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
+ of_dma, 2) >= 0)
+ pdata->dma_req_sel = of_dma[1];
+
+ prop = of_get_property(np, "spi-max-frequency", NULL);
+ if (prop)
+ pdata->spi_max_frequency = be32_to_cpup(prop);
+
+ return pdata;
+}
+
+const struct tegra_slink_chip_data tegra30_spi_cdata = {
+ .cs_hold_time = true,
+};
+
+const struct tegra_slink_chip_data tegra20_spi_cdata = {
+ .cs_hold_time = false,
+};
+
+static struct of_device_id tegra_slink_of_match[] = {
+ { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
+ { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
+
+static int tegra_slink_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_slink_data *tspi;
+ struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret, spi_irq;
+ const struct tegra_slink_chip_data *cdata = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+ if (!pdata && pdev->dev.of_node)
+ pdata = tegra_slink_parse_dt(pdev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data, exiting\n");
+ return -ENODEV;
+ }
+
+ if (!pdata->spi_max_frequency)
+ pdata->spi_max_frequency = 25000000; /* 25MHz */
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_slink_setup;
+ master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
+ master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->bus_num = -1;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->dma_req_sel = pdata->dma_req_sel;
+ tspi->dev = &pdev->dev;
+ tspi->chip_data = cdata;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->spi_max_frequency = pdata->spi_max_frequency;
+
+ if (pdata->dma_req_sel) {
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
+ goto exit_rx_dma_free;
+ }
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+ }
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_slink_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command_reg = SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+ tegra_slink_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_slink_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_slink_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+ spi_unregister_master(master);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_slink_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_slink_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_slink_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_slink_readl(tspi, SLINK_MAS_DATA);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_slink_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+ tegra_slink_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
+};
+static struct platform_driver tegra_slink_driver = {
+ .driver = {
+ .name = "spi-tegra-slink",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = of_match_ptr(tegra_slink_of_match),
+ },
+ .probe = tegra_slink_probe,
+ .remove = tegra_slink_remove,
+};
+module_platform_driver(tegra_slink_driver);
+
+MODULE_ALIAS("platform:spi-tegra-slink");
+MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
index 3f6f6e81c655..46992cab65f1 100644
--- a/drivers/spi/spi-ti-ssp.c
+++ b/drivers/spi/spi-ti-ssp.c
@@ -289,7 +289,7 @@ error_unlock:
return error;
}
-static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
+static int ti_ssp_spi_probe(struct platform_device *pdev)
{
const struct ti_ssp_spi_data *pdata;
struct ti_ssp_spi *hw;
@@ -357,7 +357,7 @@ error_wq:
return error;
}
-static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
+static int ti_ssp_spi_remove(struct platform_device *pdev)
{
struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
int error;
@@ -378,7 +378,7 @@ static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
static struct platform_driver ti_ssp_spi_driver = {
.probe = ti_ssp_spi_probe,
- .remove = __devexit_p(ti_ssp_spi_remove),
+ .remove = ti_ssp_spi_remove,
.driver = {
.name = "ti-ssp-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 24421024deaf..6b0874d782ed 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -240,7 +240,7 @@ static int to_gpio_num(struct device_attribute *attr)
return -1;
}
-static int __devinit tle62x0_probe(struct spi_device *spi)
+static int tle62x0_probe(struct spi_device *spi)
{
struct tle62x0_state *st;
struct tle62x0_pdata *pdata;
@@ -294,7 +294,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return ret;
}
-static int __devexit tle62x0_remove(struct spi_device *spi)
+static int tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
@@ -313,7 +313,7 @@ static struct spi_driver tle62x0_driver = {
.owner = THIS_MODULE,
},
.probe = tle62x0_probe,
- .remove = __devexit_p(tle62x0_remove),
+ .remove = tle62x0_remove,
};
module_spi_driver(tle62x0_driver);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 135f7406f4bf..f756481b0fea 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1401,7 +1401,7 @@ static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
}
-static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
struct spi_master *master;
@@ -1498,7 +1498,7 @@ err_pci_iomap:
return ret;
}
-static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
+static int pch_spi_pd_remove(struct platform_device *plat_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(plat_dev);
@@ -1619,12 +1619,12 @@ static struct platform_driver pch_spi_pd_driver = {
.owner = THIS_MODULE,
},
.probe = pch_spi_pd_probe,
- .remove = __devexit_p(pch_spi_pd_remove),
+ .remove = pch_spi_pd_remove,
.suspend = pch_spi_pd_suspend,
.resume = pch_spi_pd_resume
};
-static int __devinit pch_spi_probe(struct pci_dev *pdev,
+static int pch_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
@@ -1705,7 +1705,7 @@ err_no_mem:
return retval;
}
-static void __devexit pch_spi_remove(struct pci_dev *pdev)
+static void pch_spi_remove(struct pci_dev *pdev)
{
int i;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
@@ -1776,7 +1776,7 @@ static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
- .remove = __devexit_p(pch_spi_remove),
+ .remove = pch_spi_remove,
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index d5a3cbb646cb..adb853047926 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -189,9 +189,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
unsigned int len = t->len;
unsigned int wsize;
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+ u8 bits_per_word = t->bits_per_word;
- bits_per_word = bits_per_word ? : 8;
wsize = bits_per_word >> 3; /* in bytes */
if (prev_speed_hz != speed_hz
@@ -316,9 +315,8 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
/* check each transfer's parameters */
list_for_each_entry (t, &m->transfers, transfer_list) {
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+ u8 bits_per_word = t->bits_per_word;
- bits_per_word = bits_per_word ? : 8;
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
if (bits_per_word != 8 && bits_per_word != 16)
@@ -337,7 +335,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init txx9spi_probe(struct platform_device *dev)
+static int txx9spi_probe(struct platform_device *dev)
{
struct spi_master *master;
struct txx9spi *c;
@@ -432,7 +430,7 @@ exit:
return ret;
}
-static int __exit txx9spi_remove(struct platform_device *dev)
+static int txx9spi_remove(struct platform_device *dev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
struct txx9spi *c = spi_master_get_devdata(master);
@@ -450,7 +448,7 @@ static int __exit txx9spi_remove(struct platform_device *dev)
MODULE_ALIAS("platform:spi_txx9");
static struct platform_driver txx9spi_driver = {
- .remove = __exit_p(txx9spi_remove),
+ .remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 266a847e2992..4d3ec8b9f479 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -217,7 +217,7 @@ static int spi_xcomm_setup(struct spi_device *spi)
return 0;
}
-static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
+static int spi_xcomm_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct spi_xcomm *spi_xcomm;
@@ -246,7 +246,7 @@ static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit spi_xcomm_remove(struct i2c_client *i2c)
+static int spi_xcomm_remove(struct i2c_client *i2c)
{
struct spi_master *master = i2c_get_clientdata(i2c);
@@ -267,7 +267,7 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
- .remove = __devexit_p(spi_xcomm_remove),
+ .remove = spi_xcomm_remove,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 4c5a663b9fa8..e1d769607425 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -462,7 +462,7 @@ void xilinx_spi_deinit(struct spi_master *master)
}
EXPORT_SYMBOL(xilinx_spi_deinit);
-static int __devinit xilinx_spi_probe(struct platform_device *dev)
+static int xilinx_spi_probe(struct platform_device *dev)
{
struct xspi_platform_data *pdata;
struct resource *r;
@@ -518,7 +518,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
return 0;
}
-static int __devexit xilinx_spi_remove(struct platform_device *dev)
+static int xilinx_spi_remove(struct platform_device *dev)
{
xilinx_spi_deinit(platform_get_drvdata(dev));
platform_set_drvdata(dev, 0);
@@ -531,7 +531,7 @@ MODULE_ALIAS("platform:" XILINX_SPI_NAME);
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
- .remove = __devexit_p(xilinx_spi_remove),
+ .remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 718cc1f49230..f996c600eb8c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -30,9 +30,10 @@
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/ioport.h>
@@ -333,6 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
+ spi->cs_gpio = -EINVAL;
device_initialize(&spi->dev);
return spi;
}
@@ -350,15 +352,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
int spi_add_device(struct spi_device *spi)
{
static DEFINE_MUTEX(spi_add_lock);
- struct device *dev = spi->master->dev.parent;
+ struct spi_master *master = spi->master;
+ struct device *dev = master->dev.parent;
struct device *d;
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= spi->master->num_chipselect) {
+ if (spi->chip_select >= master->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n",
spi->chip_select,
- spi->master->num_chipselect);
+ master->num_chipselect);
return -EINVAL;
}
@@ -382,6 +385,9 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
+ if (master->cs_gpios)
+ spi->cs_gpio = master->cs_gpios[spi->chip_select];
+
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
@@ -492,8 +498,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
*/
-int __devinit
-spi_register_board_info(struct spi_board_info const *info, unsigned n)
+int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
int i;
@@ -806,7 +811,7 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
+#if defined(CONFIG_OF)
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
@@ -819,6 +824,7 @@ static void of_register_spi_devices(struct spi_master *master)
struct spi_device *spi;
struct device_node *nc;
const __be32 *prop;
+ char modalias[SPI_NAME_SIZE + 4];
int rc;
int len;
@@ -861,6 +867,8 @@ static void of_register_spi_devices(struct spi_master *master)
spi->mode |= SPI_CPOL;
if (of_find_property(nc, "spi-cs-high", NULL))
spi->mode |= SPI_CS_HIGH;
+ if (of_find_property(nc, "spi-3wire", NULL))
+ spi->mode |= SPI_3WIRE;
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
@@ -880,7 +888,9 @@ static void of_register_spi_devices(struct spi_master *master)
spi->dev.of_node = nc;
/* Register the new device */
- request_module(spi->modalias);
+ snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
+ spi->modalias);
+ request_module(modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -1046,6 +1056,44 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
}
EXPORT_SYMBOL_GPL(spi_alloc_master);
+#ifdef CONFIG_OF
+static int of_spi_register_master(struct spi_master *master)
+{
+ int nb, i, *cs;
+ struct device_node *np = master->dev.of_node;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ master->num_chipselect = max(nb, (int)master->num_chipselect);
+
+ if (nb < 1)
+ return 0;
+
+ cs = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect,
+ GFP_KERNEL);
+ master->cs_gpios = cs;
+
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ cs[i] = -EINVAL;
+
+ for (i = 0; i < nb; i++)
+ cs[i] = of_get_named_gpio(np, "cs-gpios", i);
+
+ return 0;
+}
+#else
+static int of_spi_register_master(struct spi_master *master)
+{
+ return 0;
+}
+#endif
+
/**
* spi_register_master - register SPI master controller
* @master: initialized master, originally from spi_alloc_master()
@@ -1077,12 +1125,19 @@ int spi_register_master(struct spi_master *master)
if (!dev)
return -ENODEV;
+ status = of_spi_register_master(master);
+ if (status)
+ return status;
+
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
if (master->num_chipselect == 0)
return -EINVAL;
+ if ((master->bus_num < 0) && master->dev.of_node)
+ master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
+
/* convention: dynamically assigned bus IDs count down from the max */
if (master->bus_num < 0) {
/* FIXME switch to an IDR based scheme, something like
@@ -1196,10 +1251,10 @@ int spi_master_resume(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_master_resume);
-static int __spi_master_match(struct device *dev, void *data)
+static int __spi_master_match(struct device *dev, const void *data)
{
struct spi_master *m;
- u16 *bus_num = data;
+ const u16 *bus_num = data;
m = container_of(dev, struct spi_master, dev);
return m->bus_num == *bus_num;
@@ -1257,7 +1312,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits;
- int status;
+ int status = 0;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current master
@@ -1272,7 +1327,8 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- status = spi->master->setup(spi);
+ if (spi->master->setup)
+ status = spi->master->setup(spi);
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
"%u bits/w, %u Hz max --> %d\n",
@@ -1291,6 +1347,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
+ struct spi_transfer *xfer;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1299,7 +1356,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
*/
if ((master->flags & SPI_MASTER_HALF_DUPLEX)
|| (spi->mode & SPI_3WIRE)) {
- struct spi_transfer *xfer;
unsigned flags = master->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -1312,6 +1368,17 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
}
+ /**
+ * Set transfer bits_per_word and max speed as spi device default if
+ * it is not set for this transfer.
+ */
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (!xfer->bits_per_word)
+ xfer->bits_per_word = spi->bits_per_word;
+ if (!xfer->speed_hz)
+ xfer->speed_hz = spi->max_speed_hz;
+ }
+
message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
@@ -1588,12 +1655,19 @@ int spi_write_then_read(struct spi_device *spi,
struct spi_transfer x[2];
u8 *local_buf;
- /* Use preallocated DMA-safe buffer. We can't avoid copying here,
- * (as a pure convenience thing), but we can keep heap costs
- * out of the hot path ...
+ /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ * copying here, (as a pure convenience thing), but we can
+ * keep heap costs out of the hot path unless someone else is
+ * using the pre-allocated buffer or the transfer is too large.
*/
- if ((n_tx + n_rx) > SPI_BUFSIZ)
- return -EINVAL;
+ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
+ local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
+ GFP_KERNEL | GFP_DMA);
+ if (!local_buf)
+ return -ENOMEM;
+ } else {
+ local_buf = buf;
+ }
spi_message_init(&message);
memset(x, 0, sizeof x);
@@ -1606,14 +1680,6 @@ int spi_write_then_read(struct spi_device *spi,
spi_message_add_tail(&x[1], &message);
}
- /* ... unless someone else is using the pre-allocated buffer */
- if (!mutex_trylock(&lock)) {
- local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
- if (!local_buf)
- return -ENOMEM;
- } else
- local_buf = buf;
-
memcpy(local_buf, txbuf, n_tx);
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 830adbed1d7a..2e0655dbe070 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -31,6 +31,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -571,7 +573,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
-static int __devinit spidev_probe(struct spi_device *spi)
+static int spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
@@ -620,7 +622,7 @@ static int __devinit spidev_probe(struct spi_device *spi)
return status;
}
-static int __devexit spidev_remove(struct spi_device *spi)
+static int spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -642,13 +644,21 @@ static int __devexit spidev_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spidev_dt_ids),
},
.probe = spidev_probe,
- .remove = __devexit_p(spidev_remove),
+ .remove = spidev_remove,
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 42cdaa9a4d8a..5ff3a4f19443 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -136,6 +136,11 @@ config SSB_DRIVER_MIPS
If unsure, say N
+config SSB_SFLASH
+ bool "SSB serial flash support"
+ depends on SSB_DRIVER_MIPS && BROKEN
+ default y
+
# Assumption: We are on embedded, if we compile the MIPS core.
config SSB_EMBEDDED
bool
@@ -160,4 +165,12 @@ config SSB_DRIVER_GIGE
If unsure, say N
+config SSB_DRIVER_GPIO
+ bool "SSB GPIO driver"
+ depends on SSB && GPIOLIB
+ help
+ Driver to provide access to the GPIO pins on the bus.
+
+ If unsure, say N
+
endmenu
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 656e58b92618..b1ddc116d387 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -11,10 +11,12 @@ ssb-$(CONFIG_SSB_SDIOHOST) += sdio.o
# built-in drivers
ssb-y += driver_chipcommon.o
ssb-y += driver_chipcommon_pmu.o
+ssb-$(CONFIG_SSB_SFLASH) += driver_chipcommon_sflash.o
ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o
ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o
ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o
ssb-$(CONFIG_SSB_DRIVER_GIGE) += driver_gige.o
+ssb-$(CONFIG_SSB_DRIVER_GPIO) += driver_gpio.o
# b43 pci-ssb-bridge driver
# Not strictly a part of SSB, but kept here for convenience
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 95c33a05f434..71098a7b5fed 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -349,6 +349,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+
+ spin_lock_init(&cc->gpio_lock);
+
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
@@ -505,28 +508,93 @@ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res = 0;
+
+ if (cc->dev->id.revision < 20)
+ return 0xffffffff;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLUP, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res = 0;
+
+ if (cc->dev->id.revision < 20)
+ return 0xffffffff;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLDOWN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
#ifdef CONFIG_SSB_SERIAL
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
new file mode 100644
index 000000000000..720665ca2bb1
--- /dev/null
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -0,0 +1,140 @@
+/*
+ * Sonics Silicon Backplane
+ * ChipCommon serial flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/ssb/ssb.h>
+
+#include "ssb_private.h"
+
+struct ssb_sflash_tbl_e {
+ char *name;
+ u32 id;
+ u32 blocksize;
+ u16 numblocks;
+};
+
+static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
+ { "M25P20", 0x11, 0x10000, 4, },
+ { "M25P40", 0x12, 0x10000, 8, },
+
+ { "M25P16", 0x14, 0x10000, 32, },
+ { "M25P32", 0x15, 0x10000, 64, },
+ { "M25P64", 0x16, 0x10000, 128, },
+ { "M25FL128", 0x17, 0x10000, 256, },
+ { 0 },
+};
+
+static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
+ { "SST25WF512", 1, 0x1000, 16, },
+ { "SST25VF512", 0x48, 0x1000, 16, },
+ { "SST25WF010", 2, 0x1000, 32, },
+ { "SST25VF010", 0x49, 0x1000, 32, },
+ { "SST25WF020", 3, 0x1000, 64, },
+ { "SST25VF020", 0x43, 0x1000, 64, },
+ { "SST25WF040", 4, 0x1000, 128, },
+ { "SST25VF040", 0x44, 0x1000, 128, },
+ { "SST25VF040B", 0x8d, 0x1000, 128, },
+ { "SST25WF080", 5, 0x1000, 256, },
+ { "SST25VF080B", 0x8e, 0x1000, 256, },
+ { "SST25VF016", 0x41, 0x1000, 512, },
+ { "SST25VF032", 0x4a, 0x1000, 1024, },
+ { "SST25VF064", 0x4b, 0x1000, 2048, },
+ { 0 },
+};
+
+static struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
+ { "AT45DB011", 0xc, 256, 512, },
+ { "AT45DB021", 0x14, 256, 1024, },
+ { "AT45DB041", 0x1c, 256, 2048, },
+ { "AT45DB081", 0x24, 256, 4096, },
+ { "AT45DB161", 0x2c, 512, 4096, },
+ { "AT45DB321", 0x34, 512, 8192, },
+ { "AT45DB642", 0x3c, 1024, 8192, },
+ { 0 },
+};
+
+static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
+{
+ int i;
+ chipco_write32(cc, SSB_CHIPCO_FLASHCTL,
+ SSB_CHIPCO_FLASHCTL_START | opcode);
+ for (i = 0; i < 1000; i++) {
+ if (!(chipco_read32(cc, SSB_CHIPCO_FLASHCTL) &
+ SSB_CHIPCO_FLASHCTL_BUSY))
+ return;
+ cpu_relax();
+ }
+ pr_err("SFLASH control command failed (timeout)!\n");
+}
+
+/* Initialize serial flash access */
+int ssb_sflash_init(struct ssb_chipcommon *cc)
+{
+ struct ssb_sflash_tbl_e *e;
+ u32 id, id2;
+
+ switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) {
+ case SSB_CHIPCO_FLASHT_STSER:
+ ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_DP);
+
+ chipco_write32(cc, SSB_CHIPCO_FLASHADDR, 0);
+ ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_RES);
+ id = chipco_read32(cc, SSB_CHIPCO_FLASHDATA);
+
+ chipco_write32(cc, SSB_CHIPCO_FLASHADDR, 1);
+ ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_RES);
+ id2 = chipco_read32(cc, SSB_CHIPCO_FLASHDATA);
+
+ switch (id) {
+ case 0xbf:
+ for (e = ssb_sflash_sst_tbl; e->name; e++) {
+ if (e->id == id2)
+ break;
+ }
+ break;
+ case 0x13:
+ return -ENOTSUPP;
+ default:
+ for (e = ssb_sflash_st_tbl; e->name; e++) {
+ if (e->id == id)
+ break;
+ }
+ break;
+ }
+ if (!e->name) {
+ pr_err("Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n",
+ id, id2);
+ return -ENOTSUPP;
+ }
+
+ break;
+ case SSB_CHIPCO_FLASHT_ATSER:
+ ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_AT_STATUS);
+ id = chipco_read32(cc, SSB_CHIPCO_FLASHDATA) & 0x3c;
+
+ for (e = ssb_sflash_at_tbl; e->name; e++) {
+ if (e->id == id)
+ break;
+ }
+ if (!e->name) {
+ pr_err("Unsupported Atmel serial flash (id: 0x%X)\n",
+ id);
+ return -ENOTSUPP;
+ }
+
+ break;
+ default:
+ pr_err("Unsupported flash type\n");
+ return -ENOTSUPP;
+ }
+
+ pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n",
+ e->name, e->blocksize, e->numblocks);
+
+ pr_err("Serial flash support is not implemented yet!\n");
+
+ return -ENOTSUPP;
+}
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 553227a3062d..59385fdab5b0 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -138,6 +138,13 @@ u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks)
return ticks;
}
+void ssb_extif_init(struct ssb_extif *extif)
+{
+ if (!extif->dev)
+ return; /* We don't have a Extif core */
+ spin_lock_init(&extif->gpio_lock);
+}
+
u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
{
return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask;
@@ -145,22 +152,50 @@ u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c
index f30ea689933a..21f71a1581fa 100644
--- a/drivers/ssb/driver_gige.c
+++ b/drivers/ssb/driver_gige.c
@@ -107,9 +107,8 @@ void gige_pcicfg_write32(struct ssb_gige *dev,
gige_write32(dev, SSB_GIGE_PCICFG + offset, value);
}
-static int __devinit ssb_gige_pci_read_config(struct pci_bus *bus,
- unsigned int devfn, int reg,
- int size, u32 *val)
+static int ssb_gige_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
{
struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops);
unsigned long flags;
@@ -138,9 +137,8 @@ static int __devinit ssb_gige_pci_read_config(struct pci_bus *bus,
return PCIBIOS_SUCCESSFUL;
}
-static int __devinit ssb_gige_pci_write_config(struct pci_bus *bus,
- unsigned int devfn, int reg,
- int size, u32 val)
+static int ssb_gige_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
{
struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops);
unsigned long flags;
@@ -169,8 +167,8 @@ static int __devinit ssb_gige_pci_write_config(struct pci_bus *bus,
return PCIBIOS_SUCCESSFUL;
}
-static int __devinit ssb_gige_probe(struct ssb_device *sdev,
- const struct ssb_device_id *id)
+static int ssb_gige_probe(struct ssb_device *sdev,
+ const struct ssb_device_id *id)
{
struct ssb_gige *dev;
u32 base, tmslow, tmshigh;
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
new file mode 100644
index 000000000000..dc109de228c6
--- /dev/null
+++ b/drivers/ssb/driver_gpio.c
@@ -0,0 +1,210 @@
+/*
+ * Sonics Silicon Backplane
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/ssb/ssb.h>
+
+#include "ssb_private.h"
+
+static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
+{
+ return container_of(chip, struct ssb_bus, gpio);
+}
+
+static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
+}
+
+static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
+ unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0);
+ return 0;
+}
+
+static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio);
+ ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0);
+ /* clear pulldown */
+ ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0);
+ /* Set pullup */
+ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio);
+
+ return 0;
+}
+
+static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ /* clear pullup */
+ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
+}
+
+static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return ssb_mips_irq(bus->chipco.dev) + 2;
+ else
+ return -EINVAL;
+}
+
+static int ssb_gpio_chipco_init(struct ssb_bus *bus)
+{
+ struct gpio_chip *chip = &bus->gpio;
+
+ chip->label = "ssb_chipco_gpio";
+ chip->owner = THIS_MODULE;
+ chip->request = ssb_gpio_chipco_request;
+ chip->free = ssb_gpio_chipco_free;
+ chip->get = ssb_gpio_chipco_get_value;
+ chip->set = ssb_gpio_chipco_set_value;
+ chip->direction_input = ssb_gpio_chipco_direction_input;
+ chip->direction_output = ssb_gpio_chipco_direction_output;
+ chip->to_irq = ssb_gpio_chipco_to_irq;
+ chip->ngpio = 16;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+#ifdef CONFIG_SSB_DRIVER_EXTIF
+
+static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
+}
+
+static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
+ unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0);
+ return 0;
+}
+
+static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio);
+ ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return ssb_mips_irq(bus->extif.dev) + 2;
+ else
+ return -EINVAL;
+}
+
+static int ssb_gpio_extif_init(struct ssb_bus *bus)
+{
+ struct gpio_chip *chip = &bus->gpio;
+
+ chip->label = "ssb_extif_gpio";
+ chip->owner = THIS_MODULE;
+ chip->get = ssb_gpio_extif_get_value;
+ chip->set = ssb_gpio_extif_set_value;
+ chip->direction_input = ssb_gpio_extif_direction_input;
+ chip->direction_output = ssb_gpio_extif_direction_output;
+ chip->to_irq = ssb_gpio_extif_to_irq;
+ chip->ngpio = 5;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+#else
+static int ssb_gpio_extif_init(struct ssb_bus *bus)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+int ssb_gpio_init(struct ssb_bus *bus)
+{
+ if (ssb_chipco_available(&bus->chipco))
+ return ssb_gpio_chipco_init(bus);
+ else if (ssb_extif_available(&bus->extif))
+ return ssb_gpio_extif_init(bus);
+ else
+ SSB_WARN_ON(1);
+
+ return -1;
+}
+
+int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+ if (ssb_chipco_available(&bus->chipco) ||
+ ssb_extif_available(&bus->extif)) {
+ return gpiochip_remove(&bus->gpio);
+ } else {
+ SSB_WARN_ON(1);
+ }
+
+ return -1;
+}
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 5bd05b136d22..33b37dac40bd 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -10,6 +10,7 @@
#include <linux/ssb/ssb.h>
+#include <linux/mtd/physmap.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
@@ -17,6 +18,25 @@
#include "ssb_private.h"
+static const char *part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data ssb_pflash_data = {
+ .part_probe_types = part_probes,
+};
+
+static struct resource ssb_pflash_resource = {
+ .name = "ssb_pflash",
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device ssb_pflash_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &ssb_pflash_data,
+ },
+ .resource = &ssb_pflash_resource,
+ .num_resources = 1,
+};
static inline u32 mips_read32(struct ssb_mipscore *mcore,
u16 offset)
@@ -189,34 +209,43 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
+ struct ssb_pflash *pflash = &mcore->pflash;
/* When there is no chipcommon on the bus there is 4MB flash */
if (!ssb_chipco_available(&bus->chipco)) {
- mcore->pflash.present = true;
- mcore->pflash.buswidth = 2;
- mcore->pflash.window = SSB_FLASH1;
- mcore->pflash.window_size = SSB_FLASH1_SZ;
- return;
+ pflash->present = true;
+ pflash->buswidth = 2;
+ pflash->window = SSB_FLASH1;
+ pflash->window_size = SSB_FLASH1_SZ;
+ goto ssb_pflash;
}
/* There is ChipCommon, so use it to read info about flash */
switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
case SSB_CHIPCO_FLASHT_STSER:
case SSB_CHIPCO_FLASHT_ATSER:
- pr_err("Serial flash not supported\n");
+ pr_debug("Found serial flash\n");
+ ssb_sflash_init(&bus->chipco);
break;
case SSB_CHIPCO_FLASHT_PARA:
pr_debug("Found parallel flash\n");
- mcore->pflash.present = true;
- mcore->pflash.window = SSB_FLASH2;
- mcore->pflash.window_size = SSB_FLASH2_SZ;
+ pflash->present = true;
+ pflash->window = SSB_FLASH2;
+ pflash->window_size = SSB_FLASH2_SZ;
if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
& SSB_CHIPCO_CFG_DS16) == 0)
- mcore->pflash.buswidth = 1;
+ pflash->buswidth = 1;
else
- mcore->pflash.buswidth = 2;
+ pflash->buswidth = 2;
break;
}
+
+ssb_pflash:
+ if (pflash->present) {
+ ssb_pflash_data.width = pflash->buswidth;
+ ssb_pflash_resource.start = pflash->window;
+ ssb_pflash_resource.end = pflash->window + pflash->window_size;
+ }
}
u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 49d209173f55..59801d23d7ec 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -315,7 +315,7 @@ int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return ssb_mips_irq(extpci_core->dev) + 2;
}
-static void __devinit ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
+static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
{
u32 val;
@@ -380,7 +380,7 @@ static void __devinit ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
register_pci_controller(&ssb_pcicore_controller);
}
-static int __devinit pcicore_is_in_hostmode(struct ssb_pcicore *pc)
+static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
{
struct ssb_bus *bus = pc->dev->bus;
u16 chipid_top;
@@ -413,7 +413,7 @@ static int __devinit pcicore_is_in_hostmode(struct ssb_pcicore *pc)
* Workarounds.
**************************************************/
-static void __devinit ssb_pcicore_fix_sprom_core_index(struct ssb_pcicore *pc)
+static void ssb_pcicore_fix_sprom_core_index(struct ssb_pcicore *pc)
{
u16 tmp = pcicore_read16(pc, SSB_PCICORE_SPROM(0));
if (((tmp & 0xF000) >> 12) != pc->dev->core_index) {
@@ -515,7 +515,7 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
* Generic and Clientmode operation code.
**************************************************/
-static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
+static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
struct ssb_device *pdev = pc->dev;
struct ssb_bus *bus = pdev->bus;
@@ -534,7 +534,7 @@ static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
}
}
-void __devinit ssb_pcicore_init(struct ssb_pcicore *pc)
+void ssb_pcicore_init(struct ssb_pcicore *pc)
{
struct ssb_device *dev = pc->dev;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 6e0daaa0e04b..3b645b8a261f 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
void ssb_bus_unregister(struct ssb_bus *bus)
{
+ int err;
+
+ err = ssb_gpio_unregister(bus);
+ if (err == -EBUSY)
+ ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+ else if (err)
+ ssb_dprintk(KERN_ERR PFX
+ "Can not unregister GPIO driver: %i\n", err);
+
ssb_buses_lock();
ssb_devices_unregister(bus);
list_del(&bus->list);
@@ -540,6 +549,14 @@ static int ssb_devices_register(struct ssb_bus *bus)
dev_idx++;
}
+#ifdef CONFIG_SSB_DRIVER_MIPS
+ if (bus->mipscore.pflash.present) {
+ err = platform_device_register(&ssb_pflash_dev);
+ if (err)
+ pr_err("Error registering parallel flash\n");
+ }
+#endif
+
return 0;
error:
/* Unwind the already registered devices. */
@@ -548,7 +565,7 @@ error:
}
/* Needs ssb_buses_lock() */
-static int __devinit ssb_attach_queued_buses(void)
+static int ssb_attach_queued_buses(void)
{
struct ssb_bus *bus, *n;
int err = 0;
@@ -761,9 +778,9 @@ out:
return err;
}
-static int __devinit ssb_bus_register(struct ssb_bus *bus,
- ssb_invariants_func_t get_invariants,
- unsigned long baseaddr)
+static int ssb_bus_register(struct ssb_bus *bus,
+ ssb_invariants_func_t get_invariants,
+ unsigned long baseaddr)
{
int err;
@@ -804,7 +821,14 @@ static int __devinit ssb_bus_register(struct ssb_bus *bus,
if (err)
goto err_pcmcia_exit;
ssb_chipcommon_init(&bus->chipco);
+ ssb_extif_init(&bus->extif);
ssb_mipscore_init(&bus->mipscore);
+ err = ssb_gpio_init(bus);
+ if (err == -ENOTSUPP)
+ ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n");
+ else if (err)
+ ssb_dprintk(KERN_ERR PFX
+ "Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
@@ -844,8 +868,7 @@ err_disable_xtal:
}
#ifdef CONFIG_SSB_PCIHOST
-int __devinit ssb_bus_pcibus_register(struct ssb_bus *bus,
- struct pci_dev *host_pci)
+int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
{
int err;
@@ -868,9 +891,9 @@ EXPORT_SYMBOL(ssb_bus_pcibus_register);
#endif /* CONFIG_SSB_PCIHOST */
#ifdef CONFIG_SSB_PCMCIAHOST
-int __devinit ssb_bus_pcmciabus_register(struct ssb_bus *bus,
- struct pcmcia_device *pcmcia_dev,
- unsigned long baseaddr)
+int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
+ struct pcmcia_device *pcmcia_dev,
+ unsigned long baseaddr)
{
int err;
@@ -890,9 +913,8 @@ EXPORT_SYMBOL(ssb_bus_pcmciabus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
#ifdef CONFIG_SSB_SDIOHOST
-int __devinit ssb_bus_sdiobus_register(struct ssb_bus *bus,
- struct sdio_func *func,
- unsigned int quirks)
+int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
+ unsigned int quirks)
{
int err;
@@ -912,9 +934,8 @@ int __devinit ssb_bus_sdiobus_register(struct ssb_bus *bus,
EXPORT_SYMBOL(ssb_bus_sdiobus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
-int __devinit ssb_bus_ssbbus_register(struct ssb_bus *bus,
- unsigned long baseaddr,
- ssb_invariants_func_t get_invariants)
+int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
+ ssb_invariants_func_t get_invariants)
{
int err;
diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c
index af5448f5e2d2..32ed1fa4a82e 100644
--- a/drivers/ssb/pcihost_wrapper.c
+++ b/drivers/ssb/pcihost_wrapper.c
@@ -54,8 +54,8 @@ static int ssb_pcihost_resume(struct pci_dev *dev)
# define ssb_pcihost_resume NULL
#endif /* CONFIG_PM */
-static int __devinit ssb_pcihost_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int ssb_pcihost_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct ssb_bus *ssb;
int err = -ENOMEM;
@@ -111,7 +111,7 @@ static void ssb_pcihost_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
-int __devinit ssb_pcihost_register(struct pci_driver *driver)
+int ssb_pcihost_register(struct pci_driver *driver)
{
driver->probe = ssb_pcihost_probe;
driver->remove = ssb_pcihost_remove;
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 8942db1d855a..466171b77f68 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -217,6 +217,21 @@ extern u32 ssb_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
u32 ticks);
extern u32 ssb_chipco_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
+/* driver_chipcommon_sflash.c */
+#ifdef CONFIG_SSB_SFLASH
+int ssb_sflash_init(struct ssb_chipcommon *cc);
+#else
+static inline int ssb_sflash_init(struct ssb_chipcommon *cc)
+{
+ pr_err("Serial flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_SSB_SFLASH */
+
+#ifdef CONFIG_SSB_DRIVER_MIPS
+extern struct platform_device ssb_pflash_dev;
+#endif
+
#ifdef CONFIG_SSB_DRIVER_EXTIF
extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks);
extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
@@ -242,4 +257,26 @@ static inline int ssb_watchdog_register(struct ssb_bus *bus)
}
#endif /* CONFIG_SSB_EMBEDDED */
+#ifdef CONFIG_SSB_DRIVER_EXTIF
+extern void ssb_extif_init(struct ssb_extif *extif);
+#else
+static inline void ssb_extif_init(struct ssb_extif *extif)
+{
+}
+#endif
+
+#ifdef CONFIG_SSB_DRIVER_GPIO
+extern int ssb_gpio_init(struct ssb_bus *bus);
+extern int ssb_gpio_unregister(struct ssb_bus *bus);
+#else /* CONFIG_SSB_DRIVER_GPIO */
+static inline int ssb_gpio_init(struct ssb_bus *bus)
+{
+ return -ENOTSUPP;
+}
+static inline int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+ return 0;
+}
+#endif /* CONFIG_SSB_DRIVER_GPIO */
+
#endif /* LINUX_SSB_PRIVATE_H_ */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 329bdb42109f..3a7965d6ac28 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -74,8 +74,6 @@ source "drivers/staging/iio/Kconfig"
source "drivers/staging/zram/Kconfig"
-source "drivers/staging/zcache/Kconfig"
-
source "drivers/staging/zsmalloc/Kconfig"
source "drivers/staging/wlags49_h2/Kconfig"
@@ -128,8 +126,6 @@ source "drivers/staging/csr/Kconfig"
source "drivers/staging/omap-thermal/Kconfig"
-source "drivers/staging/ramster/Kconfig"
-
source "drivers/staging/silicom/Kconfig"
source "drivers/staging/ced1401/Kconfig"
@@ -142,4 +138,8 @@ source "drivers/staging/sb105x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
+source "drivers/staging/zcache/Kconfig"
+
+source "drivers/staging/goldfish/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c7ec486680f7..5971865d0c61 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
@@ -56,10 +55,11 @@ obj-$(CONFIG_USB_G_CCG) += ccg/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
obj-$(CONFIG_CSR_WIFI) += csr/
obj-$(CONFIG_OMAP_BANDGAP) += omap-thermal/
-obj-$(CONFIG_ZCACHE2) += ramster/
obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
+obj-$(CONFIG_ZCACHE) += zcache/
+obj-$(CONFIG_GOLDFISH) += goldfish/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 0ce50d12c30f..465a28c08f20 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -11,19 +11,42 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
default n
+ ---help---
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
depends on SHMEM || TINY_SHMEM
- help
+ ---help---
The ashmem subsystem is a new shared memory allocator, similar to
POSIX SHM but with different behavior and sporting a simpler
file-based API.
+ It is, in theory, a good memory allocator for low-memory devices,
+ because it can discard shared memory units when under memory pressure.
+
config ANDROID_LOGGER
tristate "Android log driver"
default n
+ ---help---
+ This adds support for system-wide logging using four log buffers.
+
+ These are:
+
+ 1: main
+ 2: events
+ 3: radio
+ 4: system
+
+ Log reading and writing is performed via normal Linux reads and
+ optimized writes. This optimization avoids logging having too
+ much overhead in the system.
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
@@ -38,13 +61,13 @@ config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
default N
---help---
- Register processes to be killed when memory is low
+ Registers processes to be killed when memory is low
config ANDROID_INTF_ALARM_DEV
bool "Android alarm driver"
depends on RTC_CLASS
default n
- help
+ ---help---
Provides non-wakeup and rtc backed wakeup alarms based on rtc or
elapsed realtime, and a non-wakeup alarm on the monotonic clock.
Also exports the alarm interface to user-space.
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index a9b293ff3cc8..ceb1c643753d 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -42,10 +42,6 @@ do { \
ANDROID_ALARM_RTC_WAKEUP_MASK | \
ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
-/* support old userspace code */
-#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
-#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
-
static int alarm_opened;
static DEFINE_SPINLOCK(alarm_slock);
static struct wakeup_source alarm_wake_lock;
@@ -96,18 +92,116 @@ static void devalarm_cancel(struct devalarm *alrm)
hrtimer_cancel(&alrm->u.hrt);
}
+static void alarm_clear(enum android_alarm_type alarm_type)
+{
+ uint32_t alarm_type_mask = 1U << alarm_type;
+ unsigned long flags;
-static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_dbg(IO, "alarm %d clear\n", alarm_type);
+ devalarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ __pm_relax(&alarm_wake_lock);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+}
+
+static void alarm_set(enum android_alarm_type alarm_type,
+ struct timespec *ts)
{
- int rv = 0;
+ uint32_t alarm_type_mask = 1U << alarm_type;
unsigned long flags;
- struct timespec new_alarm_time;
- struct timespec new_rtc_time;
- struct timespec tmp_time;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_dbg(IO, "alarm %d set %ld.%09ld\n",
+ alarm_type, ts->tv_sec, ts->tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static int alarm_wait(void)
+{
+ unsigned long flags;
+ int rv = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_dbg(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ __pm_relax(&alarm_wake_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ return rv;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return rv;
+}
+
+static int alarm_set_rtc(struct timespec *ts)
+{
struct rtc_time new_rtc_tm;
struct rtc_device *rtc_dev;
+ unsigned long flags;
+ int rv = 0;
+
+ rtc_time_to_tm(ts->tv_sec, &new_rtc_tm);
+ rtc_dev = alarmtimer_get_rtcdev();
+ rv = do_settimeofday(ts);
+ if (rv < 0)
+ return rv;
+ if (rtc_dev)
+ rv = rtc_set_time(rtc_dev, &new_rtc_tm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return rv;
+}
+
+static int alarm_get_time(enum android_alarm_type alarm_type,
+ struct timespec *ts)
+{
+ int rv = 0;
+
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(ts);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ get_monotonic_boottime(ts);
+ break;
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(ts);
+ break;
+ default:
+ rv = -EINVAL;
+ }
+ return rv;
+}
+
+static long alarm_do_ioctl(struct file *file, unsigned int cmd,
+ struct timespec *ts)
+{
+ int rv = 0;
+ unsigned long flags;
enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
- uint32_t alarm_type_mask = 1U << alarm_type;
if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
return -EINVAL;
@@ -130,115 +224,89 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (ANDROID_ALARM_BASE_CMD(cmd)) {
case ANDROID_ALARM_CLEAR(0):
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm %d clear\n", alarm_type);
- devalarm_try_to_cancel(&alarms[alarm_type]);
- if (alarm_pending) {
- alarm_pending &= ~alarm_type_mask;
- if (!alarm_pending && !wait_pending)
- __pm_relax(&alarm_wake_lock);
- }
- alarm_enabled &= ~alarm_type_mask;
- spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm_clear(alarm_type);
break;
-
- case ANDROID_ALARM_SET_OLD:
- case ANDROID_ALARM_SET_AND_WAIT_OLD:
- if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
- rv = -EFAULT;
- goto err1;
- }
- new_alarm_time.tv_nsec = 0;
- goto from_old_alarm_set;
-
- case ANDROID_ALARM_SET_AND_WAIT(0):
case ANDROID_ALARM_SET(0):
- if (copy_from_user(&new_alarm_time, (void __user *)arg,
- sizeof(new_alarm_time))) {
- rv = -EFAULT;
- goto err1;
- }
-from_old_alarm_set:
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm %d set %ld.%09ld\n",
- alarm_type,
- new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
- alarm_enabled |= alarm_type_mask;
- devalarm_start(&alarms[alarm_type],
- timespec_to_ktime(new_alarm_time));
- spin_unlock_irqrestore(&alarm_slock, flags);
- if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
- && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
- break;
+ alarm_set(alarm_type, ts);
+ break;
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ alarm_set(alarm_type, ts);
/* fall though */
case ANDROID_ALARM_WAIT:
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_dbg(IO, "alarm wait\n");
- if (!alarm_pending && wait_pending) {
- __pm_relax(&alarm_wake_lock);
- wait_pending = 0;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
- if (rv)
- goto err1;
- spin_lock_irqsave(&alarm_slock, flags);
- rv = alarm_pending;
- wait_pending = 1;
- alarm_pending = 0;
- spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = alarm_wait();
break;
case ANDROID_ALARM_SET_RTC:
- if (copy_from_user(&new_rtc_time, (void __user *)arg,
- sizeof(new_rtc_time))) {
- rv = -EFAULT;
- goto err1;
- }
- rtc_time_to_tm(new_rtc_time.tv_sec, &new_rtc_tm);
- rtc_dev = alarmtimer_get_rtcdev();
- rv = do_settimeofday(&new_rtc_time);
- if (rv < 0)
- goto err1;
- if (rtc_dev)
- rv = rtc_set_time(rtc_dev, &new_rtc_tm);
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
- wake_up(&alarm_wait_queue);
- spin_unlock_irqrestore(&alarm_slock, flags);
- if (rv < 0)
- goto err1;
+ rv = alarm_set_rtc(ts);
break;
case ANDROID_ALARM_GET_TIME(0):
- switch (alarm_type) {
- case ANDROID_ALARM_RTC_WAKEUP:
- case ANDROID_ALARM_RTC:
- getnstimeofday(&tmp_time);
- break;
- case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
- case ANDROID_ALARM_ELAPSED_REALTIME:
- get_monotonic_boottime(&tmp_time);
- break;
- case ANDROID_ALARM_SYSTEMTIME:
- ktime_get_ts(&tmp_time);
- break;
- default:
- rv = -EINVAL;
- goto err1;
- }
- if (copy_to_user((void __user *)arg, &tmp_time,
- sizeof(tmp_time))) {
- rv = -EFAULT;
- goto err1;
- }
+ rv = alarm_get_time(alarm_type, ts);
break;
default:
rv = -EINVAL;
}
-err1:
return rv;
}
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+ struct timespec ts;
+ int rv;
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+ return -EFAULT;
+ break;
+ }
+
+ rv = alarm_do_ioctl(file, cmd, &ts);
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_GET_TIME(0):
+ if (copy_to_user((void __user *)arg, &ts, sizeof(ts)))
+ return -EFAULT;
+ break;
+ }
+
+ return rv;
+}
+#ifdef CONFIG_COMPAT
+static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+
+ struct timespec ts;
+ int rv;
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0):
+ case ANDROID_ALARM_SET_COMPAT(0):
+ case ANDROID_ALARM_SET_RTC_COMPAT:
+ if (compat_get_timespec(&ts, (void __user *)arg))
+ return -EFAULT;
+ /* fall through */
+ case ANDROID_ALARM_GET_TIME_COMPAT(0):
+ cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd);
+ break;
+ }
+
+ rv = alarm_do_ioctl(file, cmd, &ts);
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
+ if (compat_put_timespec(&ts, (void __user *)arg))
+ return -EFAULT;
+ break;
+ }
+
+ return rv;
+}
+#endif
+
static int alarm_open(struct inode *inode, struct file *file)
{
file->private_data = NULL;
@@ -319,6 +387,9 @@ static const struct file_operations alarm_fops = {
.unlocked_ioctl = alarm_ioctl,
.open = alarm_open,
.release = alarm_release,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = alarm_compat_ioctl,
+#endif
};
static struct miscdevice alarm_device = {
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index d0cafd637199..4fd32f337f9c 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -18,6 +18,7 @@
#include <linux/ioctl.h>
#include <linux/time.h>
+#include <linux/compat.h>
enum android_alarm_type {
/* return code bit numbers or set alarm arg */
@@ -59,4 +60,22 @@ enum android_alarm_return_flags {
#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#ifdef CONFIG_COMPAT
+#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
+ struct compat_timespec)
+#define ANDROID_ALARM_SET_AND_WAIT_COMPAT(type) ALARM_IOW(3, type, \
+ struct compat_timespec)
+#define ANDROID_ALARM_GET_TIME_COMPAT(type) ALARM_IOW(4, type, \
+ struct compat_timespec)
+#define ANDROID_ALARM_SET_RTC_COMPAT _IOW('a', 5, \
+ struct compat_timespec)
+#define ANDROID_ALARM_IOCTL_NR(cmd) (_IOC_NR(cmd) & ((1<<4)-1))
+#define ANDROID_ALARM_COMPAT_TO_NORM(cmd) \
+ ALARM_IOW(ANDROID_ALARM_IOCTL_NR(cmd), \
+ ANDROID_ALARM_IOCTL_TO_TYPE(cmd), \
+ struct timespec)
+
+#endif
+
#endif
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 4a36e9ab8cf7..538ebe213129 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/pid_namespace.h>
#include "binder.h"
#include "binder_trace.h"
@@ -2320,7 +2321,7 @@ retry:
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
- current->nsproxy->pid_ns);
+ task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
@@ -3226,7 +3227,7 @@ static void print_binder_proc(struct seq_file *m,
m->count = start_pos;
}
-static const char *binder_return_strings[] = {
+static const char * const binder_return_strings[] = {
"BR_ERROR",
"BR_OK",
"BR_TRANSACTION",
@@ -3247,7 +3248,7 @@ static const char *binder_return_strings[] = {
"BR_FAILED_REPLY"
};
-static const char *binder_command_strings[] = {
+static const char * const binder_command_strings[] = {
"BC_TRANSACTION",
"BC_REPLY",
"BC_ACQUIRE_RESULT",
@@ -3267,7 +3268,7 @@ static const char *binder_command_strings[] = {
"BC_DEAD_BINDER_DONE"
};
-static const char *binder_objstat_strings[] = {
+static const char * const binder_objstat_strings[] = {
"proc",
"thread",
"node",
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index 2f7d195d8b15..f240464effde 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -163,7 +163,7 @@ struct binder_pri_ptr_cookie {
void *cookie;
};
-enum BinderDriverReturnProtocol {
+enum binder_driver_return_protocol {
BR_ERROR = _IOR('r', 0, int),
/*
* int: error code
@@ -224,7 +224,7 @@ enum BinderDriverReturnProtocol {
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
- * threads waiting to service incomming transactions. When a process
+ * threads waiting to service incoming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
@@ -251,7 +251,7 @@ enum BinderDriverReturnProtocol {
*/
};
-enum BinderDriverCommandProtocol {
+enum binder_driver_command_protocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 00185478647a..d0a5a28a8fe2 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -164,11 +164,8 @@ static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
struct asus_oled_packet *packet;
packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
-
- if (!packet) {
- dev_err(&odev->udev->dev, "out of memory\n");
+ if (!packet)
return;
- }
setup_packet_header(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00);
@@ -323,11 +320,8 @@ static void send_data(struct asus_oled_dev *odev)
struct asus_oled_packet *packet;
packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
-
- if (!packet) {
- dev_err(&odev->udev->dev, "out of memory\n");
+ if (!packet)
return;
- }
if (odev->pack_mode == PACK_MODE_G1) {
/* When sending roll-mode data the display updated only
@@ -665,11 +659,8 @@ static int asus_oled_probe(struct usb_interface *interface,
}
odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL);
-
- if (odev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (odev == NULL)
return -ENOMEM;
- }
odev->udev = usb_get_dev(udev);
odev->pic_mode = ASUS_OLED_STATIC;
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index f57794827f73..1d8bf08b5bfd 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -95,7 +95,7 @@ struct bcm_classifier_rule {
UCHAR ucDirection;
BOOLEAN bIpv6Protocol;
UINT32 u32PHSRuleID;
- S_PHS_RULE sPhsRule;
+ struct bcm_phs_rule sPhsRule;
UCHAR u8AssociatedPHSI;
/* Classification fields for ETH CS */
@@ -288,7 +288,7 @@ struct bcm_mini_adapter {
wait_queue_head_t ioctl_fw_dnld_wait_queue;
BOOLEAN waiting_to_fw_download_done;
pid_t fw_download_process_pid;
- PSTARGETPARAMS pstargetparams;
+ struct bcm_target_params *pstargetparams;
BOOLEAN device_removed;
BOOLEAN DeviceAccess;
BOOLEAN bIsAutoCorrectEnabled;
@@ -303,10 +303,10 @@ struct bcm_mini_adapter {
struct task_struct *transmit_packet_thread;
/* LED Related Structures */
- LED_INFO_STRUCT LEDInfo;
+ struct bcm_led_info LEDInfo;
/* Driver State for LED Blinking */
- LedEventInfo_t DriverState;
+ enum bcm_led_events DriverState;
/* Interface Specific */
PVOID pvInterfaceAdapter;
int (*bcm_file_download)(PVOID,
@@ -333,7 +333,7 @@ struct bcm_mini_adapter {
/* BOOLEAN bTriedToWakeUpFromShutdown; */
BOOLEAN bLinkDownRequested;
int downloadDDR;
- PHS_DEVICE_EXTENSION stBCMPhsContext;
+ struct bcm_phs_extension stBCMPhsContext;
struct bcm_hdr_suppression_contextinfo stPhsTxContextInfo;
uint8_t ucaPHSPktRestoreBuf[2048];
uint8_t bPHSEnabled;
@@ -345,7 +345,7 @@ struct bcm_mini_adapter {
struct bcm_fragmented_packet_info astFragmentedPktClassifierTable[MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES];
atomic_t uiMBupdate;
UINT32 PmuMode;
- NVM_TYPE eNVMType;
+ enum bcm_nvm_type eNVMType;
UINT uiSectorSize;
UINT uiSectorSizeInCFG;
BOOLEAN bSectorSizeOverride;
@@ -366,9 +366,9 @@ struct bcm_mini_adapter {
struct device *pstCreatedClassDevice;
/* BOOLEAN InterfaceUpStatus; */
- PFLASH2X_CS_INFO psFlash2xCSInfo;
- PFLASH_CS_INFO psFlashCSInfo;
- PFLASH2X_VENDORSPECIFIC_INFO psFlash2xVendorInfo;
+ struct bcm_flash2x_cs_info *psFlash2xCSInfo;
+ struct bcm_flash_cs_info *psFlashCSInfo;
+ struct bcm_flash2x_vendor_info *psFlash2xVendorInfo;
UINT uiFlashBaseAdd; /* Flash start address */
UINT uiActiveISOOffset; /* Active ISO offset chosen before f/w download */
enum bcm_flash2x_section_val eActiveISO; /* Active ISO section val */
@@ -392,7 +392,7 @@ struct bcm_mini_adapter {
struct semaphore LowPowerModeSync;
ULONG liDrainCalculated;
UINT gpioBitMap;
- S_BCM_DEBUG_STATE stDebugState;
+ struct bcm_debug_state stDebugState;
};
#define GET_BCM_ADAPTER(net_dev) netdev_priv(net_dev)
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index efad33e3ba73..491e2bfbc464 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1013,7 +1013,7 @@ cntrlEnd:
}
case IOCTL_BCM_GET_CURRENT_STATUS: {
- LINK_STATE link_state;
+ struct bcm_link_state link_state;
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
@@ -1229,13 +1229,13 @@ cntrlEnd:
case IOCTL_BCM_SET_DEBUG:
#ifdef DEBUG
{
- USER_BCM_DBG_STATE sUserDebugState;
+ struct bcm_user_debug_state sUserDebugState;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
- if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE)))
+ if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(struct bcm_user_debug_state)))
return -EFAULT;
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
@@ -1783,16 +1783,16 @@ cntrlEnd:
}
if (IsFlash2x(Adapter) == TRUE) {
- if (IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO))
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
return -EINVAL;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO)))
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(struct bcm_flash2x_cs_info)))
return -EFAULT;
} else {
- if (IoBuffer.OutputLength < sizeof(FLASH_CS_INFO))
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
return -EINVAL;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO)))
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(struct bcm_flash_cs_info)))
return -EFAULT;
}
}
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index a3b91c7ee8ff..4e470d4bb4e8 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -145,8 +145,8 @@ static void bcm_get_drvinfo(struct net_device *dev,
struct bcm_interface_adapter *psIntfAdapter = Adapter->pvInterfaceAdapter;
struct usb_device *udev = interface_to_usbdev(psIntfAdapter->interface);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u",
Adapter->uiFlashLayoutMajorVersion,
Adapter->uiFlashLayoutMinorVersion);
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index 23ddc3d7c9ea..976514502927 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -113,7 +113,7 @@ static VOID deleteSFBySfid(struct bcm_mini_adapter *Adapter, UINT uiSearchRuleIn
static inline VOID
CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry,
B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
- BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
+ BOOLEAN bIpVersion6, enum bcm_ipaddr_context eIpAddrContext)
{
int i = 0;
UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
@@ -440,7 +440,7 @@ static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* <Pointer
B_UINT16 u16PacketClassificationRuleIndex = 0;
int i;
struct bcm_convergence_types *psfCSType = NULL;
- S_PHS_RULE sPhsRule;
+ struct bcm_phs_rule sPhsRule;
USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
UINT UGIValue = 0;
@@ -703,7 +703,7 @@ static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* <Pointer
/* Update PHS Rule For the Classifier */
if (sPhsRule.u8PHSI) {
Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
- memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
+ memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(struct bcm_phs_rule));
}
}
}
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
index eecad8d90aea..4ddfc3d45bc0 100644
--- a/drivers/staging/bcm/CmHost.h
+++ b/drivers/staging/bcm/CmHost.h
@@ -27,32 +27,28 @@ struct bcm_add_indication_alt {
u8 u8Type;
u8 u8Direction;
u16 u16TID;
- /* brief 16bitCID */
u16 u16CID;
- /* brief 16bitVCID */
u16 u16VCID;
struct bcm_connect_mgr_params sfAuthorizedSet;
struct bcm_connect_mgr_params sfAdmittedSet;
struct bcm_connect_mgr_params sfActiveSet;
u8 u8CC; /* < Confirmation Code */
- u8 u8Padd; /* < 8-bit Padding */
- u16 u16Padd; /* < 16 bit Padding */
+ u8 u8Padd;
+ u16 u16Padd;
};
struct bcm_change_indication {
u8 u8Type;
u8 u8Direction;
u16 u16TID;
- /* brief 16bitCID */
u16 u16CID;
- /* brief 16bitVCID */
u16 u16VCID;
struct bcm_connect_mgr_params sfAuthorizedSet;
struct bcm_connect_mgr_params sfAdmittedSet;
struct bcm_connect_mgr_params sfActiveSet;
u8 u8CC; /* < Confirmation Code */
- u8 u8Padd; /* < 8-bit Padding */
- u16 u16Padd; /* < 16 bit */
+ u8 u8Padd;
+ u16 u16Padd;
};
unsigned long StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer, unsigned int *puBufferLength);
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
index 8018a189f817..7b331215c1ac 100644
--- a/drivers/staging/bcm/Debug.h
+++ b/drivers/staging/bcm/Debug.h
@@ -9,142 +9,129 @@
#include <linux/string.h>
#define NONE 0xFFFF
-
-//--------------------------------------------------------------------------------
-
/* TYPE and SUBTYPE
* Define valid TYPE (or category or code-path, however you like to think of it)
* and SUBTYPE s.
* Type and SubType are treated as bitmasks.
*/
-/*-----------------BEGIN TYPEs------------------------------------------*/
-#define DBG_TYPE_INITEXIT (1 << 0) // 1
-#define DBG_TYPE_TX (1 << 1) // 2
-#define DBG_TYPE_RX (1 << 2) // 4
-#define DBG_TYPE_OTHERS (1 << 3) // 8
-/*-----------------END TYPEs------------------------------------------*/
-#define NUMTYPES 4 // careful!
-
-/*-----------------BEGIN SUBTYPEs---------------------------------------*/
-
-/*-SUBTYPEs for TX : TYPE is DBG_TYPE_TX -----//
- Transmit.c ,Arp.c, LeakyBucket.c, And Qos.c
- total 17 macros */
-// Transmit.c
-#define TX 1
-#define MP_SEND (TX<<0)
-#define NEXT_SEND (TX<<1)
-#define TX_FIFO (TX<<2)
-#define TX_CONTROL (TX<<3)
-
-// Arp.c
-#define IP_ADDR (TX<<4)
-#define ARP_REQ (TX<<5)
-#define ARP_RESP (TX<<6)
-
-// Leakybucket.c
-#define TOKEN_COUNTS (TX<<8)
-#define CHECK_TOKENS (TX<<9)
-#define TX_PACKETS (TX<<10)
-#define TIMER (TX<<11)
-
-// Qos.c
-#define QOS TX
-#define QUEUE_INDEX (QOS<<12)
-#define IPV4_DBG (QOS<<13)
-#define IPV6_DBG (QOS<<14)
-#define PRUNE_QUEUE (QOS<<15)
-#define SEND_QUEUE (QOS<<16)
-
-//TX_Misc
-#define TX_OSAL_DBG (TX<<17)
-
-
-//--SUBTYPEs for ------INIT & EXIT---------------------
-/*------------ TYPE is DBG_TYPE_INITEXIT -----//
-DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c */
-#define MP 1
-#define DRV_ENTRY (MP<<0)
-#define MP_INIT (MP<<1)
-#define READ_REG (MP<<3)
-#define DISPATCH (MP<<2)
-#define CLAIM_ADAP (MP<<4)
-#define REG_IO_PORT (MP<<5)
-#define INIT_DISP (MP<<6)
-#define RX_INIT (MP<<7)
-
-
-//-SUBTYPEs for --RX----------------------------------
-//------------RX : TYPE is DBG_TYPE_RX -----//
-// Receive.c
-#define RX 1
-#define RX_DPC (RX<<0)
-#define RX_CTRL (RX<<3)
-#define RX_DATA (RX<<4)
-#define MP_RETURN (RX<<1)
-#define LINK_MSG (RX<<2)
-
-
-//-SUBTYPEs for ----OTHER ROUTINES------------------
-//------------OTHERS : TYPE is DBG_TYPE_OTHER -----//
-// HaltnReset,CheckForHang,PnP,Misc,CmHost
-// total 12 macros
-#define OTHERS 1
-// ??ISR.C
-
-#define ISR OTHERS
-#define MP_DPC (ISR<<0)
-
-// HaltnReset.c
-#define HALT OTHERS
-#define MP_HALT (HALT<<1)
-#define CHECK_HANG (HALT<<2)
-#define MP_RESET (HALT<<3)
-#define MP_SHUTDOWN (HALT<<4)
-
-// pnp.c
-#define PNP OTHERS
-#define MP_PNP (PNP<<5)
-
-// Misc.c
-#define MISC OTHERS
-#define DUMP_INFO (MISC<<6)
-#define CLASSIFY (MISC<<7)
-#define LINK_UP_MSG (MISC<<8)
-#define CP_CTRL_PKT (MISC<<9)
-#define DUMP_CONTROL (MISC<<10)
-#define LED_DUMP_INFO (MISC<<11)
-
-// CmHost.c
-#define CMHOST OTHERS
-
-
-#define SERIAL (OTHERS<<12)
-#define IDLE_MODE (OTHERS<<13)
-
-#define WRM (OTHERS<<14)
-#define RDM (OTHERS<<15)
-
-// TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ?
-#define PHS_SEND (OTHERS<<16)
-#define PHS_RECEIVE (OTHERS<<17)
-#define PHS_MODULE (OTHERS<<18)
-
-#define INTF_INIT (OTHERS<<19)
-#define INTF_ERR (OTHERS<<20)
-#define INTF_WARN (OTHERS<<21)
-#define INTF_NORM (OTHERS<<22)
-
-#define IRP_COMPLETION (OTHERS<<23)
-#define SF_DESCRIPTOR_CNTS (OTHERS<<24)
-#define PHS_DISPATCH (OTHERS << 25)
-#define OSAL_DBG (OTHERS << 26)
-#define NVM_RW (OTHERS << 27)
-
-#define HOST_MIBS (OTHERS << 28)
-#define CONN_MSG (CMHOST << 29)
-/*-----------------END SUBTYPEs------------------------------------------*/
-
+#define DBG_TYPE_INITEXIT (1 << 0) /* 1 */
+#define DBG_TYPE_TX (1 << 1) /* 2 */
+#define DBG_TYPE_RX (1 << 2) /* 4 */
+#define DBG_TYPE_OTHERS (1 << 3) /* 8 */
+#define NUMTYPES 4
+
+/* -SUBTYPEs for TX : TYPE is DBG_TYPE_TX -----//
+ * Transmit.c ,Arp.c, LeakyBucket.c, And Qos.c
+ * total 17 macros
+ */
+/* Transmit.c */
+#define TX 1
+#define MP_SEND (TX << 0)
+#define NEXT_SEND (TX << 1)
+#define TX_FIFO (TX << 2)
+#define TX_CONTROL (TX << 3)
+
+/* Arp.c */
+#define IP_ADDR (TX << 4)
+#define ARP_REQ (TX << 5)
+#define ARP_RESP (TX << 6)
+
+/* Leakybucket.c */
+#define TOKEN_COUNTS (TX << 8)
+#define CHECK_TOKENS (TX << 9)
+#define TX_PACKETS (TX << 10)
+#define TIMER (TX << 11)
+
+/* Qos.c */
+#define QOS TX
+#define QUEUE_INDEX (QOS << 12)
+#define IPV4_DBG (QOS << 13)
+#define IPV6_DBG (QOS << 14)
+#define PRUNE_QUEUE (QOS << 15)
+#define SEND_QUEUE (QOS << 16)
+
+/* TX_Misc */
+#define TX_OSAL_DBG (TX << 17)
+
+/* --SUBTYPEs for ------INIT & EXIT---------------------
+ * ------------ TYPE is DBG_TYPE_INITEXIT -----//
+ * DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c
+ */
+#define MP 1
+#define DRV_ENTRY (MP << 0)
+#define MP_INIT (MP << 1)
+#define READ_REG (MP << 3)
+#define DISPATCH (MP << 2)
+#define CLAIM_ADAP (MP << 4)
+#define REG_IO_PORT (MP << 5)
+#define INIT_DISP (MP << 6)
+#define RX_INIT (MP << 7)
+
+/* -SUBTYPEs for --RX----------------------------------
+ * ------------RX : TYPE is DBG_TYPE_RX -----//
+ * Receive.c
+ */
+#define RX 1
+#define RX_DPC (RX << 0)
+#define RX_CTRL (RX << 3)
+#define RX_DATA (RX << 4)
+#define MP_RETURN (RX << 1)
+#define LINK_MSG (RX << 2)
+
+/* -SUBTYPEs for ----OTHER ROUTINES------------------
+ * ------------OTHERS : TYPE is DBG_TYPE_OTHER -----//
+ * HaltnReset,CheckForHang,PnP,Misc,CmHost
+ * total 12 macros
+ */
+#define OTHERS 1
+#define ISR OTHERS
+#define MP_DPC (ISR << 0)
+
+/* HaltnReset.c */
+#define HALT OTHERS
+#define MP_HALT (HALT << 1)
+#define CHECK_HANG (HALT << 2)
+#define MP_RESET (HALT << 3)
+#define MP_SHUTDOWN (HALT << 4)
+
+/* pnp.c */
+#define PNP OTHERS
+#define MP_PNP (PNP << 5)
+
+/* Misc.c */
+#define MISC OTHERS
+#define DUMP_INFO (MISC << 6)
+#define CLASSIFY (MISC << 7)
+#define LINK_UP_MSG (MISC << 8)
+#define CP_CTRL_PKT (MISC << 9)
+#define DUMP_CONTROL (MISC << 10)
+#define LED_DUMP_INFO (MISC << 11)
+
+/* CmHost.c */
+#define CMHOST OTHERS
+#define SERIAL (OTHERS << 12)
+#define IDLE_MODE (OTHERS << 13)
+#define WRM (OTHERS << 14)
+#define RDM (OTHERS << 15)
+
+/* TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ? */
+#define PHS_SEND (OTHERS << 16)
+#define PHS_RECEIVE (OTHERS << 17)
+#define PHS_MODULE (OTHERS << 18)
+
+#define INTF_INIT (OTHERS << 19)
+#define INTF_ERR (OTHERS << 20)
+#define INTF_WARN (OTHERS << 21)
+#define INTF_NORM (OTHERS << 22)
+
+#define IRP_COMPLETION (OTHERS << 23)
+#define SF_DESCRIPTOR_CNTS (OTHERS << 24)
+#define PHS_DISPATCH (OTHERS << 25)
+#define OSAL_DBG (OTHERS << 26)
+#define NVM_RW (OTHERS << 27)
+
+#define HOST_MIBS (OTHERS << 28)
+#define CONN_MSG (CMHOST << 29)
/* Debug level
* We have 8 debug levels, in (numerical) increasing order of verbosity.
@@ -157,57 +144,58 @@ DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c */
* You can compile-time change that to any of the below, if you wish to. However, as of now, there's
* no dynamic facility to have the userspace 'TestApp' set debug_level. Slated for future expansion.
*/
-#define BCM_ALL 7
-#define BCM_LOW 6
-#define BCM_PRINT 5
-#define BCM_NORMAL 4
-#define BCM_MEDIUM 3
-#define BCM_SCREAM 2
-#define BCM_ERR 1
+#define BCM_ALL 7
+#define BCM_LOW 6
+#define BCM_PRINT 5
+#define BCM_NORMAL 4
+#define BCM_MEDIUM 3
+#define BCM_SCREAM 2
+#define BCM_ERR 1
/* Not meant for developer in debug prints.
- * To be used to disable all prints by setting the DBG_LVL_CURR to this value */
-#define BCM_NONE 0
+ * To be used to disable all prints by setting the DBG_LVL_CURR to this value
+ */
+#define BCM_NONE 0
/* The current driver logging level.
* Everything at this level and (numerically) lower (meaning higher prio)
* is logged.
-* Replace 'BCM_ALL' in the DBG_LVL_CURR macro with the logging level desired.
+ * Replace 'BCM_ALL' in the DBG_LVL_CURR macro with the logging level desired.
* For eg. to set the logging level to 'errors only' use:
* #define DBG_LVL_CURR (BCM_ERR)
*/
#define DBG_LVL_CURR (BCM_ALL)
-#define DBG_LVL_ALL BCM_ALL
+#define DBG_LVL_ALL BCM_ALL
-/*---Userspace mapping of Debug State.
+/* ---Userspace mapping of Debug State.
* Delibrately matches that of the Windows driver..
* The TestApp's ioctl passes this struct to us.
*/
-typedef struct
-{
+struct bcm_user_debug_state {
unsigned int Subtype, Type;
unsigned int OnOff;
-// unsigned int debug_level; /* future expansion */
-} __attribute__((packed)) USER_BCM_DBG_STATE;
+/* unsigned int debug_level; future expansion */
+} __packed;
-//---Kernel-space mapping of Debug State
-typedef struct _S_BCM_DEBUG_STATE {
- UINT type;
+/* ---Kernel-space mapping of Debug State */
+struct bcm_debug_state {
+ unsigned int type;
/* A bitmap of 32 bits for Subtype per Type.
* Valid indexes in 'subtype' array are *only* 1,2,4 and 8,
* corresponding to valid Type values. Hence we use the 'Type' field
* as the index value, ignoring the array entries 0,3,5,6,7 !
*/
- UINT subtype[(NUMTYPES*2)+1];
- UINT debug_level;
-} S_BCM_DEBUG_STATE;
-/* Instantiated in the Adapter structure */
-/* We'll reuse the debug level parameter to include a bit (the MSB) to indicate whether or not
- * we want the function's name printed. */
-#define DBG_NO_FUNC_PRINT 1 << 31
+ unsigned int subtype[(NUMTYPES*2)+1];
+ unsigned int debug_level;
+};
+/* Instantiated in the Adapter structure
+ * We'll reuse the debug level parameter to include a bit (the MSB) to indicate whether or not
+ * we want the function's name printed.
+ */
+#define DBG_NO_FUNC_PRINT (1 << 31)
#define DBG_LVL_BITMASK 0xFF
-//--- Only for direct printk's; "hidden" to API.
+/* --- Only for direct printk's; "hidden" to API. */
#define DBG_TYPE_PRINTK 3
#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \
@@ -215,40 +203,40 @@ typedef struct _S_BCM_DEBUG_STATE {
if (DBG_TYPE_PRINTK == Type) \
pr_info("%s:" string, __func__, ##args); \
else if (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type])) { \
+ (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
+ (Type & Adapter->stDebugState.type) && \
+ (SubType & Adapter->stDebugState.subtype[Type])) { \
if (dbg_level & DBG_NO_FUNC_PRINT) \
- printk(KERN_DEBUG string, ##args); \
+ pr_debug("%s:\n", string); \
else \
- printk(KERN_DEBUG "%s:" string, __func__, ##args); \
+ pr_debug("%s:\n" string, __func__, ##args); \
} \
} while (0)
-#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) do { \
- if (DBG_TYPE_PRINTK == Type || \
- (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type]))) { \
- printk(KERN_DEBUG "%s:\n", __func__); \
- print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \
- 16, 1, buffer, bufferlen, false); \
- } \
-} while(0)
-
-
-#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \
- int i; \
- for (i=0; i<(NUMTYPES*2)+1; i++) { \
+#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) \
+ do { \
+ if (DBG_TYPE_PRINTK == Type || \
+ (Adapter && \
+ (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
+ (Type & Adapter->stDebugState.type) && \
+ (SubType & Adapter->stDebugState.subtype[Type]))) { \
+ pr_debug("%s:\n", __func__); \
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \
+ 16, 1, buffer, bufferlen, false); \
+ } \
+ } while (0)
+
+#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \
+ int i; \
+ for (i = 0; i < (NUMTYPES * 2) + 1; i++) { \
if ((i == 1) || (i == 2) || (i == 4) || (i == 8)) { \
- /* CAUTION! Forcefully turn on ALL debug paths and subpaths! \
- Adapter->stDebugState.subtype[i] = 0xffffffff; */ \
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "subtype[%d] = 0x%08x\n", \
- i, Adapter->stDebugState.subtype[i]); \
+ /* CAUTION! Forcefully turn on ALL debug paths and subpaths! \
+ * Adapter->stDebugState.subtype[i] = 0xffffffff; \
+ */ \
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "subtype[%d] = 0x%08x\n", \
+ i, Adapter->stDebugState.subtype[i]); \
} \
} \
} while (0)
#endif
-
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
index 4745ddd62f5b..6d803e7b094a 100644
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ b/drivers/staging/bcm/IPv6Protocol.c
@@ -1,10 +1,10 @@
#include "headers.h"
static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- IPV6Header *pstIpv6Header);
+ struct bcm_ipv6_hdr *pstIpv6Header);
static BOOLEAN MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- IPV6Header *pstIpv6Header);
-static VOID DumpIpv6Header(IPV6Header *pstIpv6Header);
+ struct bcm_ipv6_hdr *pstIpv6Header);
+static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header);
static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
UCHAR *pucNextHeader, BOOLEAN *bParseDone, USHORT *pusPayloadLength)
@@ -38,17 +38,17 @@ static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL, "\nIPv6 HopByHop Header");
- usNextHeaderOffset += sizeof(IPV6HopByHopOptionsHeader);
+ usNextHeaderOffset += sizeof(struct bcm_ipv6_options_hdr);
}
break;
case IPV6HDR_TYPE_ROUTING:
{
- IPV6RoutingHeader *pstIpv6RoutingHeader;
+ struct bcm_ipv6_routing_hdr *pstIpv6RoutingHeader;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL, "\nIPv6 Routing Header");
- pstIpv6RoutingHeader = (IPV6RoutingHeader *)pucPayloadPtr;
- usNextHeaderOffset += sizeof(IPV6RoutingHeader);
+ pstIpv6RoutingHeader = (struct bcm_ipv6_routing_hdr *)pucPayloadPtr;
+ usNextHeaderOffset += sizeof(struct bcm_ipv6_routing_hdr);
usNextHeaderOffset += pstIpv6RoutingHeader->ucNumAddresses * IPV6_ADDRESS_SIZEINBYTES;
}
@@ -58,25 +58,25 @@ static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL,
"\nIPv6 Fragmentation Header");
- usNextHeaderOffset += sizeof(IPV6FragmentHeader);
+ usNextHeaderOffset += sizeof(struct bcm_ipv6_fragment_hdr);
}
break;
case IPV6HDR_TYPE_DESTOPTS:
{
- IPV6DestOptionsHeader *pstIpv6DestOptsHdr = (IPV6DestOptionsHeader *)pucPayloadPtr;
+ struct bcm_ipv6_dest_options_hdr *pstIpv6DestOptsHdr = (struct bcm_ipv6_dest_options_hdr *)pucPayloadPtr;
int nTotalOptions = pstIpv6DestOptsHdr->ucHdrExtLen;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL,
"\nIPv6 DestOpts Header Header");
- usNextHeaderOffset += sizeof(IPV6DestOptionsHeader);
+ usNextHeaderOffset += sizeof(struct bcm_ipv6_dest_options_hdr);
usNextHeaderOffset += nTotalOptions * IPV6_DESTOPTS_HDR_OPTIONSIZE ;
}
break;
case IPV6HDR_TYPE_AUTHENTICATION:
{
- IPV6AuthenticationHeader *pstIpv6AuthHdr = (IPV6AuthenticationHeader *)pucPayloadPtr;
+ struct bcm_ipv6_authentication_hdr *pstIpv6AuthHdr = (struct bcm_ipv6_authentication_hdr *)pucPayloadPtr;
int nHdrLen = pstIpv6AuthHdr->ucLength;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL,
@@ -186,13 +186,13 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
USHORT ushDestPort = 0;
USHORT ushSrcPort = 0;
UCHAR ucNextProtocolAboveIP = 0;
- IPV6Header *pstIpv6Header = NULL;
+ struct bcm_ipv6_hdr *pstIpv6Header = NULL;
BOOLEAN bClassificationSucceed = FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
DBG_LVL_ALL, "IpVersion6 ==========>\n");
- pstIpv6Header = (IPV6Header *)pcIpHeader;
+ pstIpv6Header = (struct bcm_ipv6_hdr *)pcIpHeader;
DumpIpv6Header(pstIpv6Header);
@@ -200,7 +200,7 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
* Try to get the next higher layer protocol
* and the Ports Nos if TCP or UDP
*/
- ucNextProtocolAboveIP = GetIpv6ProtocolPorts((UCHAR *)(pcIpHeader + sizeof(IPV6Header)),
+ ucNextProtocolAboveIP = GetIpv6ProtocolPorts((UCHAR *)(pcIpHeader + sizeof(struct bcm_ipv6_hdr)),
&ushSrcPort,
&ushDestPort,
pstIpv6Header->usPayloadLength,
@@ -289,7 +289,7 @@ USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- IPV6Header *pstIpv6Header)
+ struct bcm_ipv6_hdr *pstIpv6Header)
{
UINT uiLoopIndex = 0;
UINT uiIpv6AddIndex = 0;
@@ -345,7 +345,7 @@ static BOOLEAN MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule
}
static BOOLEAN MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- IPV6Header *pstIpv6Header)
+ struct bcm_ipv6_hdr *pstIpv6Header)
{
UINT uiLoopIndex = 0;
UINT uiIpv6AddIndex = 0;
@@ -414,7 +414,7 @@ VOID DumpIpv6Address(ULONG *puIpv6Address)
}
-static VOID DumpIpv6Header(IPV6Header *pstIpv6Header)
+static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header)
{
UCHAR ucVersion;
UCHAR ucPrio;
diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h
index 8ba88a5b081c..96b36a579af2 100644
--- a/drivers/staging/bcm/IPv6ProtocolHdr.h
+++ b/drivers/staging/bcm/IPv6ProtocolHdr.h
@@ -1,7 +1,6 @@
#ifndef _IPV6_PROTOCOL_DEFINES_
#define _IPV6_PROTOCOL_DEFINES_
-
#define IPV6HDR_TYPE_HOPBYHOP 0x0
#define IPV6HDR_TYPE_ROUTING 0x2B
#define IPV6HDR_TYPE_FRAGMENTATION 0x2C
@@ -10,107 +9,77 @@
#define IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD 0x34
#define MASK_IPV6_CS_SPEC 0x2
-
-#define TCP_HEADER_TYPE 0x6
-#define UDP_HEADER_TYPE 0x11
+#define TCP_HEADER_TYPE 0x6
+#define UDP_HEADER_TYPE 0x11
#define IPV6_ICMP_HDR_TYPE 0x2
#define IPV6_FLOWLABEL_BITOFFSET 9
#define IPV6_MAX_CHAINEDHDR_BUFFBYTES 0x64
/*
-// Size of Dest Options field of Destinations Options Header
-// in bytes.
-*/
+ * Size of Dest Options field of Destinations Options Header
+ * in bytes.
+ */
#define IPV6_DESTOPTS_HDR_OPTIONSIZE 0x8
-//typedef unsigned char UCHAR;
-//typedef unsigned short USHORT;
-//typedef unsigned long int ULONG;
-
-typedef struct IPV6HeaderFormatTag
-{
- UCHAR ucVersionPrio;
- UCHAR aucFlowLabel[3];
- USHORT usPayloadLength;
- UCHAR ucNextHeader;
- UCHAR ucHopLimit;
- ULONG ulSrcIpAddress[4];
- ULONG ulDestIpAddress[4];
-}IPV6Header;
-
-typedef struct IPV6RoutingHeaderFormatTag
-{
- UCHAR ucNextHeader;
- UCHAR ucRoutingType;
- UCHAR ucNumAddresses;
- UCHAR ucNextAddress;
- ULONG ulReserved;
- //UCHAR aucAddressList[0];
-
-}IPV6RoutingHeader;
-
-typedef struct IPV6FragmentHeaderFormatTag
-{
- UCHAR ucNextHeader;
- UCHAR ucReserved;
- USHORT usFragmentOffset;
- ULONG ulIdentification;
-}IPV6FragmentHeader;
-
-typedef struct IPV6DestOptionsHeaderFormatTag
-{
- UCHAR ucNextHeader;
- UCHAR ucHdrExtLen;
- UCHAR ucDestOptions[6];
- //UCHAR udExtDestOptions[0];
-}IPV6DestOptionsHeader;
-
-typedef struct IPV6HopByHopOptionsHeaderFormatTag
-{
- UCHAR ucNextHeader;
- UCHAR ucMisc[3];
- ULONG ulJumboPayloadLen;
-}IPV6HopByHopOptionsHeader;
-
-typedef struct IPV6AuthenticationHeaderFormatTag
-{
- UCHAR ucNextHeader;
- UCHAR ucLength;
- USHORT usReserved;
- ULONG ulSecurityParametersIndex;
- //UCHAR ucAuthenticationData[0];
-
-}IPV6AuthenticationHeader;
-
-typedef struct IPV6IcmpHeaderFormatTag
-{
- UCHAR ucType;
- UCHAR ucCode;
- USHORT usChecksum;
- //UCHAR ucIcmpMsg[0];
-
-}IPV6IcmpHeader;
-
-typedef enum _E_IPADDR_CONTEXT
-{
+struct bcm_ipv6_hdr {
+ unsigned char ucVersionPrio;
+ unsigned char aucFlowLabel[3];
+ unsigned short usPayloadLength;
+ unsigned char ucNextHeader;
+ unsigned char ucHopLimit;
+ unsigned long ulSrcIpAddress[4];
+ unsigned long ulDestIpAddress[4];
+};
+
+struct bcm_ipv6_routing_hdr {
+ unsigned char ucNextHeader;
+ unsigned char ucRoutingType;
+ unsigned char ucNumAddresses;
+ unsigned char ucNextAddress;
+ unsigned long ulReserved;
+};
+
+struct bcm_ipv6_fragment_hdr {
+ unsigned char ucNextHeader;
+ unsigned char ucReserved;
+ unsigned short usFragmentOffset;
+ unsigned long ulIdentification;
+};
+
+struct bcm_ipv6_dest_options_hdr {
+ unsigned char ucNextHeader;
+ unsigned char ucHdrExtLen;
+ unsigned char ucDestOptions[6];
+};
+
+struct bcm_ipv6_options_hdr {
+ unsigned char ucNextHeader;
+ unsigned char ucMisc[3];
+ unsigned long ulJumboPayloadLen;
+};
+
+struct bcm_ipv6_authentication_hdr {
+ unsigned char ucNextHeader;
+ unsigned char ucLength;
+ unsigned short usReserved;
+ unsigned long ulSecurityParametersIndex;
+};
+
+enum bcm_ipaddr_context {
eSrcIpAddress,
eDestIpAddress
+};
-}E_IPADDR_CONTEXT;
-
-
-
-//Function Prototypes
-
-USHORT IpVersion6(struct bcm_mini_adapter *Adapter, /**< Pointer to the driver control structure */
- PVOID pcIpHeader, /**<Pointer to the IP Hdr of the packet*/
- struct bcm_classifier_rule *pstClassifierRule );
+/* Function Prototypes */
-VOID DumpIpv6Address(ULONG *puIpv6Address);
+unsigned short IpVersion6(struct bcm_mini_adapter *Adapter, /* < Pointer to the driver control structure */
+ void *pcIpHeader, /* <Pointer to the IP Hdr of the packet */
+ struct bcm_classifier_rule *pstClassifierRule);
-extern BOOLEAN MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
-extern BOOLEAN MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
-extern BOOLEAN MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol);
+void DumpIpv6Address(unsigned long *puIpv6Address);
+extern bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
+extern bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
+extern bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, unsigned char ucProtocol);
#endif
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 87117a797d5b..64ea6edb9dc2 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -138,12 +138,12 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
B_UINT32 value = 0;
if (Adapter->pstargetparams == NULL) {
- Adapter->pstargetparams = kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL);
+ Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
if (Adapter->pstargetparams == NULL)
return -ENOMEM;
}
- if (psFwInfo->u32FirmwareLength != sizeof(STARGETPARAMS))
+ if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params))
return -EIO;
retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
@@ -195,7 +195,7 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
}
}
- retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(STARGETPARAMS), CONFIG_BEGIN_ADDR);
+ retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
if (retval)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly");
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index eb246430b320..79058ce5b332 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -190,9 +190,9 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
}
/* Allocate interface adapter structure */
- psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter), GFP_KERNEL);
+ psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter),
+ GFP_KERNEL);
if (psIntfAdapter == NULL) {
- dev_err(&udev->dev, DRV_NAME ": no memory for Interface adapter\n");
AdapterFree(psAdapter);
return -ENOMEM;
}
@@ -564,11 +564,8 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
psIntfAdapter->sIntrIn.int_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrIn.int_in_buffer) {
- dev_err(&psIntfAdapter->udev->dev,
- "could not allocate interrupt_in_buffer\n");
+ if (!psIntfAdapter->sIntrIn.int_in_buffer)
return -EINVAL;
- }
}
if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint)) {
@@ -587,11 +584,8 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
psIntfAdapter->sIntrOut.int_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrOut.int_out_buffer) {
- dev_err(&psIntfAdapter->udev->dev,
- "could not allocate interrupt_out_buffer\n");
+ if (!psIntfAdapter->sIntrOut.int_out_buffer)
return -EINVAL;
- }
}
}
}
diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h
index 8c70af90969b..e253c080a787 100644
--- a/drivers/staging/bcm/Ioctl.h
+++ b/drivers/staging/bcm/Ioctl.h
@@ -108,7 +108,7 @@ enum bcm_interface_type {
};
struct bcm_driver_info {
- NVM_TYPE u32NVMType;
+ enum bcm_nvm_type u32NVMType;
unsigned int MaxRDMBufferSize;
enum bcm_interface_type u32InterfaceType;
unsigned int u32DSDStartOffset;
@@ -202,8 +202,8 @@ struct bcm_flash2x_bitmap {
};
struct bcm_time_elapsed {
- unsigned long long ul64TimeElapsedSinceNetEntry;
- u32 uiReserved[4];
+ u64 ul64TimeElapsedSinceNetEntry;
+ u32 uiReserved[4];
};
enum {
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
index 46f5f0feea88..dc01e3016d4f 100644
--- a/drivers/staging/bcm/Macros.h
+++ b/drivers/staging/bcm/Macros.h
@@ -162,13 +162,11 @@
#define GPIO_MODE_REGISTER 0x0F000034
#define GPIO_PIN_STATE_REGISTER 0x0F000038
-
-typedef struct _LINK_STATE {
- UCHAR ucLinkStatus;
- UCHAR bIdleMode;
- UCHAR bShutdownMode;
-} LINK_STATE, *PLINK_STATE;
-
+struct bcm_link_state {
+ unsigned char ucLinkStatus;
+ unsigned char bIdleMode;
+ unsigned char bShutdownMode;
+};
enum enLinkStatus {
WAIT_FOR_SYNC = 1,
@@ -182,13 +180,12 @@ enum enLinkStatus {
COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW = 9
};
-typedef enum _E_PHS_DSC_ACTION {
+enum bcm_phs_dsc_action {
eAddPHSRule = 0,
eSetPHSRule,
eDeletePHSRule,
eDeleteAllPHSRules
-} E_PHS_DSC_ACTION;
-
+};
#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 /* Host to Mac */
#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 /* Mac to Host */
@@ -324,18 +321,18 @@ typedef enum _E_PHS_DSC_ACTION {
#define HPM_CONFIG_MSW 0x0F000D58
#define T3B 0xbece0310
-typedef enum eNVM_TYPE {
+enum bcm_nvm_type {
NVM_AUTODETECT = 0,
NVM_EEPROM,
NVM_FLASH,
NVM_UNKNOWN
-} NVM_TYPE;
+};
-typedef enum ePMU_MODES {
+enum bcm_pmu_modes {
HYBRID_MODE_7C = 0,
INTERNAL_MODE_6 = 1,
HYBRID_MODE_6 = 2
-} PMU_MODE;
+};
#define MAX_RDM_WRM_RETIRES 1
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index c92078e7fe86..b5c2c4c15f92 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -956,7 +956,7 @@ int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter)
/* Download cfg file */
status = buffDnldVerify(ps_adapter,
(PUCHAR)ps_adapter->pstargetparams,
- sizeof(STARGETPARAMS),
+ sizeof(struct bcm_target_params),
CONFIG_BEGIN_ADDR);
if (status) {
BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
@@ -1053,7 +1053,7 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
if (!buff)
return -ENOMEM;
- Adapter->pstargetparams = kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL);
+ Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
if (Adapter->pstargetparams == NULL) {
kfree(buff);
return -ENOMEM;
@@ -1070,7 +1070,7 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
len = kernel_read(flp, 0, buff, BUFFER_1K);
filp_close(flp, NULL);
- if (len != sizeof(STARGETPARAMS)) {
+ if (len != sizeof(struct bcm_target_params)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n");
kfree(buff);
kfree(Adapter->pstargetparams);
@@ -1082,7 +1082,7 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
/*
* Values in Adapter->pstargetparams are in network byte order
*/
- memcpy(Adapter->pstargetparams, buff, sizeof(STARGETPARAMS));
+ memcpy(Adapter->pstargetparams, buff, sizeof(struct bcm_target_params));
kfree(buff);
beceem_parse_target_struct(Adapter);
return STATUS_SUCCESS;
@@ -1134,7 +1134,7 @@ void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n", uiEEPROMFlag);
- Adapter->eNVMType = (NVM_TYPE)((uiEEPROMFlag>>4)&0x3);
+ Adapter->eNVMType = (enum bcm_nvm_type)((uiEEPROMFlag>>4)&0x3);
Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
Adapter->bSectorSizeOverride = (bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
diff --git a/drivers/staging/bcm/PHSDefines.h b/drivers/staging/bcm/PHSDefines.h
index 6016fc502d2d..cd78ee4ffa22 100644
--- a/drivers/staging/bcm/PHSDefines.h
+++ b/drivers/staging/bcm/PHSDefines.h
@@ -1,124 +1,94 @@
#ifndef BCM_PHS_DEFINES_H
#define BCM_PHS_DEFINES_H
-#define PHS_INVALID_TABLE_INDEX 0xffffffff
-
-/************************* MACROS **********************************************/
+#define PHS_INVALID_TABLE_INDEX 0xffffffff
#define PHS_MEM_TAG "_SHP"
-
-
-//PHS Defines
-#define STATUS_PHS_COMPRESSED 0xa1
-#define STATUS_PHS_NOCOMPRESSION 0xa2
-#define APPLY_PHS 1
-#define MAX_NO_BIT 7
-#define ZERO_PHSI 0
-#define VERIFY 0
-#define SIZE_MULTIPLE_32 4
-#define UNCOMPRESSED_PACKET 0
-#define DYNAMIC 0
-#define SUPPRESS 0x80
-#define NO_CLASSIFIER_MATCH 0
-#define SEND_PACKET_UNCOMPRESSED 0
-#define PHSI_IS_ZERO 0
-#define PHSI_LEN 1
-#define ERROR_LEN 0
-#define PHS_BUFFER_SIZE 1532
-
-
-#define MAX_PHSRULE_PER_SF 20
-#define MAX_SERVICEFLOWS 17
-
-//PHS Error Defines
-#define PHS_SUCCESS 0
-#define ERR_PHS_INVALID_DEVICE_EXETENSION 0x800
-#define ERR_PHS_INVALID_PHS_RULE 0x801
-#define ERR_PHS_RULE_ALREADY_EXISTS 0x802
-#define ERR_SF_MATCH_FAIL 0x803
-#define ERR_INVALID_CLASSIFIERTABLE_FOR_SF 0x804
-#define ERR_SFTABLE_FULL 0x805
-#define ERR_CLSASSIFIER_TABLE_FULL 0x806
-#define ERR_PHSRULE_MEMALLOC_FAIL 0x807
-#define ERR_CLSID_MATCH_FAIL 0x808
-#define ERR_PHSRULE_MATCH_FAIL 0x809
-
-typedef struct _S_PHS_RULE
-{
- /// brief 8bit PHSI Of The Service Flow
- B_UINT8 u8PHSI;
- /// brief PHSF Of The Service Flow
- B_UINT8 u8PHSFLength;
- B_UINT8 u8PHSF[MAX_PHS_LENGTHS];
- /// brief PHSM Of The Service Flow
- B_UINT8 u8PHSMLength;
- B_UINT8 u8PHSM[MAX_PHS_LENGTHS];
- /// brief 8bit PHSS Of The Service Flow
- B_UINT8 u8PHSS;
- /// brief 8bit PHSV Of The Service Flow
- B_UINT8 u8PHSV;
- //Reference Count for this PHS Rule
- B_UINT8 u8RefCnt;
- //Flag to Store Unclassified PHS rules only in DL
- B_UINT8 bUnclassifiedPHSRule;
-
- B_UINT8 u8Reserved[3];
-
- LONG PHSModifiedBytes;
- ULONG PHSModifiedNumPackets;
- ULONG PHSErrorNumPackets;
-}S_PHS_RULE;
-
-
-typedef enum _E_CLASSIFIER_ENTRY_CONTEXT
-{
+/* PHS Defines */
+#define STATUS_PHS_COMPRESSED 0xa1
+#define STATUS_PHS_NOCOMPRESSION 0xa2
+#define APPLY_PHS 1
+#define MAX_NO_BIT 7
+#define ZERO_PHSI 0
+#define VERIFY 0
+#define SIZE_MULTIPLE_32 4
+#define UNCOMPRESSED_PACKET 0
+#define DYNAMIC 0
+#define SUPPRESS 0x80
+#define NO_CLASSIFIER_MATCH 0
+#define SEND_PACKET_UNCOMPRESSED 0
+#define PHSI_IS_ZERO 0
+#define PHSI_LEN 1
+#define ERROR_LEN 0
+#define PHS_BUFFER_SIZE 1532
+#define MAX_PHSRULE_PER_SF 20
+#define MAX_SERVICEFLOWS 17
+
+/* PHS Error Defines */
+#define PHS_SUCCESS 0
+#define ERR_PHS_INVALID_DEVICE_EXETENSION 0x800
+#define ERR_PHS_INVALID_PHS_RULE 0x801
+#define ERR_PHS_RULE_ALREADY_EXISTS 0x802
+#define ERR_SF_MATCH_FAIL 0x803
+#define ERR_INVALID_CLASSIFIERTABLE_FOR_SF 0x804
+#define ERR_SFTABLE_FULL 0x805
+#define ERR_CLSASSIFIER_TABLE_FULL 0x806
+#define ERR_PHSRULE_MEMALLOC_FAIL 0x807
+#define ERR_CLSID_MATCH_FAIL 0x808
+#define ERR_PHSRULE_MATCH_FAIL 0x809
+
+struct bcm_phs_rule {
+ u8 u8PHSI;
+ u8 u8PHSFLength;
+ u8 u8PHSF[MAX_PHS_LENGTHS];
+ u8 u8PHSMLength;
+ u8 u8PHSM[MAX_PHS_LENGTHS];
+ u8 u8PHSS;
+ u8 u8PHSV;
+ u8 u8RefCnt;
+ u8 bUnclassifiedPHSRule;
+ u8 u8Reserved[3];
+ long PHSModifiedBytes;
+ unsigned long PHSModifiedNumPackets;
+ unsigned long PHSErrorNumPackets;
+};
+
+enum bcm_phs_classifier_context {
eActiveClassifierRuleContext,
eOldClassifierRuleContext
-}E_CLASSIFIER_ENTRY_CONTEXT;
-
-typedef struct _S_CLASSIFIER_ENTRY
-{
- B_UINT8 bUsed;
- B_UINT16 uiClassifierRuleId;
- B_UINT8 u8PHSI;
- S_PHS_RULE *pstPhsRule;
- B_UINT8 bUnclassifiedPHSRule;
-
-}S_CLASSIFIER_ENTRY;
-
-
-typedef struct _S_CLASSIFIER_TABLE
-{
- B_UINT16 uiTotalClassifiers;
- S_CLASSIFIER_ENTRY stActivePhsRulesList[MAX_PHSRULE_PER_SF];
- S_CLASSIFIER_ENTRY stOldPhsRulesList[MAX_PHSRULE_PER_SF];
- B_UINT16 uiOldestPhsRuleIndex;
-
-}S_CLASSIFIER_TABLE;
-
-
-typedef struct _S_SERVICEFLOW_ENTRY
-{
- B_UINT8 bUsed;
- B_UINT16 uiVcid;
- S_CLASSIFIER_TABLE *pstClassifierTable;
-}S_SERVICEFLOW_ENTRY;
-
-typedef struct _S_SERVICEFLOW_TABLE
-{
- B_UINT16 uiTotalServiceFlows;
- S_SERVICEFLOW_ENTRY stSFList[MAX_SERVICEFLOWS];
-
-}S_SERVICEFLOW_TABLE;
-
-
-typedef struct _PHS_DEVICE_EXTENSION
-{
- /* PHS Specific data*/
- S_SERVICEFLOW_TABLE *pstServiceFlowPhsRulesTable;
- void *CompressedTxBuffer;
- void *UnCompressedRxBuffer;
-}PHS_DEVICE_EXTENSION,*PPHS_DEVICE_EXTENSION;
-
+};
+
+struct bcm_phs_classifier_entry {
+ u8 bUsed;
+ u16 uiClassifierRuleId;
+ u8 u8PHSI;
+ struct bcm_phs_rule *pstPhsRule;
+ u8 bUnclassifiedPHSRule;
+};
+
+struct bcm_phs_classifier_table {
+ u16 uiTotalClassifiers;
+ struct bcm_phs_classifier_entry stActivePhsRulesList[MAX_PHSRULE_PER_SF];
+ struct bcm_phs_classifier_entry stOldPhsRulesList[MAX_PHSRULE_PER_SF];
+ u16 uiOldestPhsRuleIndex;
+};
+
+struct bcm_phs_entry {
+ u8 bUsed;
+ u16 uiVcid;
+ struct bcm_phs_classifier_table *pstClassifierTable;
+};
+
+struct bcm_phs_table {
+ u16 uiTotalServiceFlows;
+ struct bcm_phs_entry stSFList[MAX_SERVICEFLOWS];
+};
+
+struct bcm_phs_extension {
+ /* PHS Specific data */
+ struct bcm_phs_table *pstServiceFlowPhsRulesTable;
+ void *CompressedTxBuffer;
+ void *UnCompressedRxBuffer;
+};
#endif
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index 6dc0bbcfeab0..7028bc95b4f9 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -1,24 +1,24 @@
#include "headers.h"
-static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId, struct bcm_phs_table *psServiceFlowTable, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
-static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId, struct bcm_phs_entry *pstServiceFlowEntry, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
-static UINT CreateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI);
+static UINT CreateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule, enum bcm_phs_classifier_context eClsContext,B_UINT8 u8AssociatedPHSI);
-static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_ENTRY *pstClassifierEntry,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier_entry *pstClassifierEntry, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
-static BOOLEAN ValidatePHSRuleComplete(S_PHS_RULE *psPhsRule);
+static BOOLEAN ValidatePHSRuleComplete(struct bcm_phs_rule *psPhsRule);
-static BOOLEAN DerefPhsRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule);
+static BOOLEAN DerefPhsRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule);
-static UINT GetClassifierEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext, S_CLASSIFIER_ENTRY **ppstClassifierEntry);
+static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable,B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext, struct bcm_phs_classifier_entry **ppstClassifierEntry);
-static UINT GetPhsRuleEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,S_PHS_RULE **ppstPhsRule);
+static UINT GetPhsRuleEntry(struct bcm_phs_classifier_table *pstClassifierTable,B_UINT32 uiPHSI, enum bcm_phs_classifier_context eClsContext, struct bcm_phs_rule **ppstPhsRule);
-static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable);
+static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable);
-static int phs_compress(S_PHS_RULE *phs_members,unsigned char *in_buf,
+static int phs_compress(struct bcm_phs_rule *phs_members, unsigned char *in_buf,
unsigned char *out_buf,unsigned int *header_size,UINT *new_header_size );
@@ -26,7 +26,7 @@ static int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buff
unsigned char *phsf,unsigned char *phsm,unsigned int phss,unsigned int phsv,UINT *new_header_size );
static int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,\
- S_PHS_RULE *phs_rules,UINT *header_size);
+ struct bcm_phs_rule *phs_rules, UINT *header_size);
static ULONG PhsCompress(void* pvContext,
@@ -291,17 +291,17 @@ void DumpFullPacket(UCHAR *pBuf,UINT nPktLen)
// TRUE(1) -If allocation of memory was success full.
// FALSE -If allocation of memory fails.
//-----------------------------------------------------------------------------
-int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension, struct bcm_mini_adapter *Adapter)
+int phs_init(struct bcm_phs_extension *pPhsdeviceExtension, struct bcm_mini_adapter *Adapter)
{
int i;
- S_SERVICEFLOW_TABLE *pstServiceFlowTable;
+ struct bcm_phs_table *pstServiceFlowTable;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nPHS:phs_init function ");
if(pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
return -EINVAL;
pPhsdeviceExtension->pstServiceFlowPhsRulesTable =
- kzalloc(sizeof(S_SERVICEFLOW_TABLE), GFP_KERNEL);
+ kzalloc(sizeof(struct bcm_phs_table), GFP_KERNEL);
if(!pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
{
@@ -312,8 +312,8 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension, struct bcm_mini_adapter
pstServiceFlowTable = pPhsdeviceExtension->pstServiceFlowPhsRulesTable;
for(i=0;i<MAX_SERVICEFLOWS;i++)
{
- S_SERVICEFLOW_ENTRY sServiceFlow = pstServiceFlowTable->stSFList[i];
- sServiceFlow.pstClassifierTable = kzalloc(sizeof(S_CLASSIFIER_TABLE), GFP_KERNEL);
+ struct bcm_phs_entry sServiceFlow = pstServiceFlowTable->stSFList[i];
+ sServiceFlow.pstClassifierTable = kzalloc(sizeof(struct bcm_phs_classifier_table), GFP_KERNEL);
if(!sServiceFlow.pstClassifierTable)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
@@ -351,7 +351,7 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension, struct bcm_mini_adapter
}
-int PhsCleanup(IN PPHS_DEVICE_EXTENSION pPHSDeviceExt)
+int PhsCleanup(IN struct bcm_phs_extension *pPHSDeviceExt)
{
if(pPHSDeviceExt->pstServiceFlowPhsRulesTable)
{
@@ -381,7 +381,7 @@ Arguments:
IN void* pvContext - PHS Driver Specific Context
IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
- IN S_PHS_RULE *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table.
+ IN struct bcm_phs_rule *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table.
Return Value:
@@ -392,17 +392,17 @@ Return Value:
ULONG PhsUpdateClassifierRule(IN void* pvContext,
IN B_UINT16 uiVcid ,
IN B_UINT16 uiClsId ,
- IN S_PHS_RULE *psPhsRule,
+ IN struct bcm_phs_rule *psPhsRule,
IN B_UINT8 u8AssociatedPHSI)
{
ULONG lStatus =0;
UINT nSFIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension= (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension= (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,"PHS With Corr2 Changes \n");
@@ -460,12 +460,12 @@ ULONG PhsDeletePHSRule(IN void* pvContext,IN B_UINT16 uiVcid,IN B_UINT8 u8PHSI)
{
ULONG lStatus =0;
UINT nSFIndex =0, nClsidIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_CLASSIFIER_TABLE *pstClassifierRulesTable = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension= (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension= (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "======>\n");
@@ -495,7 +495,7 @@ ULONG PhsDeletePHSRule(IN void* pvContext,IN B_UINT16 uiVcid,IN B_UINT8 u8PHSI)
if(0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0,
- sizeof(S_CLASSIFIER_ENTRY));
+ sizeof(struct bcm_phs_classifier_entry));
}
}
}
@@ -526,10 +526,10 @@ ULONG PhsDeleteClassifierRule(IN void* pvContext,IN B_UINT16 uiVcid ,IN B_UINT16
{
ULONG lStatus =0;
UINT nSFIndex =0, nClsidIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierEntry = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension= (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension= (struct bcm_phs_extension *)pvContext;
if(pDeviceExtension)
{
@@ -554,7 +554,7 @@ ULONG PhsDeleteClassifierRule(IN void* pvContext,IN B_UINT16 uiVcid ,IN B_UINT16
kfree(pstClassifierEntry->pstPhsRule);
}
- memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
+ memset(pstClassifierEntry, 0, sizeof(struct bcm_phs_classifier_entry));
}
nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
@@ -563,7 +563,7 @@ ULONG PhsDeleteClassifierRule(IN void* pvContext,IN B_UINT16 uiVcid ,IN B_UINT16
if((nClsidIndex != PHS_INVALID_TABLE_INDEX) && (!pstClassifierEntry->bUnclassifiedPHSRule))
{
kfree(pstClassifierEntry->pstPhsRule);
- memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
+ memset(pstClassifierEntry, 0, sizeof(struct bcm_phs_classifier_entry));
}
}
return lStatus;
@@ -590,10 +590,10 @@ ULONG PhsDeleteSFRules(IN void* pvContext,IN B_UINT16 uiVcid)
ULONG lStatus =0;
UINT nSFIndex =0, nClsidIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_CLASSIFIER_TABLE *pstClassifierRulesTable = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension= (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension= (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,"====> \n");
if(pDeviceExtension)
@@ -624,7 +624,7 @@ ULONG PhsDeleteSFRules(IN void* pvContext,IN B_UINT16 uiVcid)
pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
.pstPhsRule = NULL;
}
- memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
+ memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0, sizeof(struct bcm_phs_classifier_entry));
if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule)
{
if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
@@ -638,7 +638,7 @@ ULONG PhsDeleteSFRules(IN void* pvContext,IN B_UINT16 uiVcid)
pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
.pstPhsRule = NULL;
}
- memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
+ memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(struct bcm_phs_classifier_entry));
}
}
pstServiceFlowEntry->bUsed = FALSE;
@@ -680,15 +680,15 @@ ULONG PhsCompress(IN void* pvContext,
OUT UINT *pNewHeaderSize )
{
UINT nSFIndex =0, nClsidIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
+ struct bcm_phs_rule *pstPhsRule = NULL;
ULONG lStatus =0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension= (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension= (struct bcm_phs_extension *)pvContext;
if(pDeviceExtension == NULL)
@@ -775,12 +775,12 @@ ULONG PhsDeCompress(IN void* pvContext,
OUT UINT *pOutHeaderSize )
{
UINT nSFIndex =0, nPhsRuleIndex =0 ;
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_rule *pstPhsRule = NULL;
UINT phsi;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPHS_DEVICE_EXTENSION pDeviceExtension=
- (PPHS_DEVICE_EXTENSION)pvContext;
+ struct bcm_phs_extension *pDeviceExtension=
+ (struct bcm_phs_extension *)pvContext;
*pInHeaderSize = 0;
@@ -844,7 +844,7 @@ ULONG PhsDeCompress(IN void* pvContext,
// Does not return any value.
//-----------------------------------------------------------------------------
-static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
+static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable)
{
int i,j;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -854,9 +854,9 @@ static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTa
{
for(i=0;i<MAX_SERVICEFLOWS;i++)
{
- S_SERVICEFLOW_ENTRY stServiceFlowEntry =
+ struct bcm_phs_entry stServiceFlowEntry =
psServiceFlowRulesTable->stSFList[i];
- S_CLASSIFIER_TABLE *pstClassifierRulesTable =
+ struct bcm_phs_classifier_table *pstClassifierRulesTable =
stServiceFlowEntry.pstClassifierTable;
if(pstClassifierRulesTable)
@@ -898,7 +898,7 @@ static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTa
-static BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
+static BOOLEAN ValidatePHSRuleComplete(IN struct bcm_phs_rule *psPhsRule)
{
if(psPhsRule)
{
@@ -927,8 +927,8 @@ static BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
}
}
-UINT GetServiceFlowEntry(IN S_SERVICEFLOW_TABLE *psServiceFlowTable,
- IN B_UINT16 uiVcid,S_SERVICEFLOW_ENTRY **ppstServiceFlowEntry)
+UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
+ IN B_UINT16 uiVcid, struct bcm_phs_entry **ppstServiceFlowEntry)
{
int i;
for(i=0;i<MAX_SERVICEFLOWS;i++)
@@ -948,12 +948,12 @@ UINT GetServiceFlowEntry(IN S_SERVICEFLOW_TABLE *psServiceFlowTable,
}
-UINT GetClassifierEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
- IN B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
- OUT S_CLASSIFIER_ENTRY **ppstClassifierEntry)
+UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
+ IN B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext,
+ OUT struct bcm_phs_classifier_entry **ppstClassifierEntry)
{
int i;
- S_CLASSIFIER_ENTRY *psClassifierRules = NULL;
+ struct bcm_phs_classifier_entry *psClassifierRules = NULL;
for(i=0;i<MAX_PHSRULE_PER_SF;i++)
{
@@ -981,12 +981,12 @@ UINT GetClassifierEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
return PHS_INVALID_TABLE_INDEX;
}
-static UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
- IN B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
- OUT S_PHS_RULE **ppstPhsRule)
+static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
+ IN B_UINT32 uiPHSI, enum bcm_phs_classifier_context eClsContext,
+ OUT struct bcm_phs_rule **ppstPhsRule)
{
int i;
- S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
+ struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
for(i=0;i<MAX_PHSRULE_PER_SF;i++)
{
if(eClsContext == eActiveClassifierRuleContext)
@@ -1013,11 +1013,11 @@ static UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
}
UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid,IN B_UINT16 uiClsId,
- IN S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,
+ IN struct bcm_phs_table *psServiceFlowTable, struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
{
- S_CLASSIFIER_TABLE *psaClassifiertable = NULL;
+ struct bcm_phs_classifier_table *psaClassifiertable = NULL;
UINT uiStatus = 0;
int iSfIndex;
BOOLEAN bFreeEntryFound =FALSE;
@@ -1050,13 +1050,13 @@ UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid,IN B_UINT16 uiClsId,
}
UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,IN S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,
- S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI)
+ IN B_UINT16 uiClsId,IN struct bcm_phs_entry *pstServiceFlowEntry,
+ struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI)
{
- S_CLASSIFIER_ENTRY *pstClassifierEntry = NULL;
+ struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
UINT uiStatus =PHS_SUCCESS;
UINT nClassifierIndex = 0;
- S_CLASSIFIER_TABLE *psaClassifiertable = NULL;
+ struct bcm_phs_classifier_table *psaClassifiertable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
psaClassifiertable = pstServiceFlowEntry->pstClassifierTable;
@@ -1141,12 +1141,12 @@ UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
}
static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
- S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
- E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI)
+ struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule,
+ enum bcm_phs_classifier_context eClsContext,B_UINT8 u8AssociatedPHSI)
{
UINT iClassifierIndex = 0;
BOOLEAN bFreeEntryFound = FALSE;
- S_CLASSIFIER_ENTRY *psClassifierRules = NULL;
+ struct bcm_phs_classifier_entry *psClassifierRules = NULL;
UINT nStatus = PHS_SUCCESS;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,"Inside CreateClassifierPHSRule");
@@ -1227,7 +1227,7 @@ static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
{
if(psClassifierRules->pstPhsRule == NULL)
{
- psClassifierRules->pstPhsRule = kmalloc(sizeof(S_PHS_RULE),GFP_KERNEL);
+ psClassifierRules->pstPhsRule = kmalloc(sizeof(struct bcm_phs_rule),GFP_KERNEL);
if(NULL == psClassifierRules->pstPhsRule)
return ERR_PHSRULE_MEMALLOC_FAIL;
@@ -1240,7 +1240,7 @@ static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
/* Update The PHS rule */
memcpy(psClassifierRules->pstPhsRule,
- psPhsRule, sizeof(S_PHS_RULE));
+ psPhsRule, sizeof(struct bcm_phs_rule));
}
else
{
@@ -1252,11 +1252,11 @@ static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
- IN S_CLASSIFIER_ENTRY *pstClassifierEntry,
- S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
+ IN struct bcm_phs_classifier_entry *pstClassifierEntry,
+ struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
{
- S_PHS_RULE *pstAddPhsRule = NULL;
+ struct bcm_phs_rule *pstAddPhsRule = NULL;
UINT nPhsRuleIndex = 0;
BOOLEAN bPHSRuleOrphaned = FALSE;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -1281,13 +1281,13 @@ static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
//Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for uiClsId
if(FALSE == bPHSRuleOrphaned)
{
- pstClassifierEntry->pstPhsRule = kmalloc(sizeof(S_PHS_RULE), GFP_KERNEL);
+ pstClassifierEntry->pstPhsRule = kmalloc(sizeof(struct bcm_phs_rule), GFP_KERNEL);
if(NULL == pstClassifierEntry->pstPhsRule)
{
return ERR_PHSRULE_MEMALLOC_FAIL;
}
}
- memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(S_PHS_RULE));
+ memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(struct bcm_phs_rule));
}
else
@@ -1312,7 +1312,7 @@ static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
}
-static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule)
+static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule)
{
if(pstPhsRule==NULL)
return FALSE;
@@ -1331,14 +1331,14 @@ static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifi
}
}
-void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension)
+void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
{
int i,j,k,l;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n Dumping PHS Rules : \n");
for(i=0;i<MAX_SERVICEFLOWS;i++)
{
- S_SERVICEFLOW_ENTRY stServFlowEntry =
+ struct bcm_phs_entry stServFlowEntry =
pDeviceExtension->pstServiceFlowPhsRulesTable->stSFList[i];
if(stServFlowEntry.bUsed)
{
@@ -1346,7 +1346,7 @@ void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension)
{
for(l=0;l<2;l++)
{
- S_CLASSIFIER_ENTRY stClsEntry;
+ struct bcm_phs_classifier_entry stClsEntry;
if(l==0)
{
stClsEntry = stServFlowEntry.pstClassifierTable->stActivePhsRulesList[j];
@@ -1408,10 +1408,10 @@ void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension)
//-----------------------------------------------------------------------------
int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
- S_PHS_RULE *decomp_phs_rules,UINT *header_size)
+ struct bcm_phs_rule *decomp_phs_rules, UINT *header_size)
{
int phss,size=0;
- S_PHS_RULE *tmp_memb;
+ struct bcm_phs_rule *tmp_memb;
int bit,i=0;
unsigned char *phsf,*phsm;
int in_buf_len = *header_size-1;
@@ -1490,7 +1490,7 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
// size-The number of bytes copied into the output buffer i.e dynamic fields
// 0 -If PHS rule is NULL.If PHSV field is not set.If the verification fails.
//-----------------------------------------------------------------------------
-static int phs_compress(S_PHS_RULE *phs_rule,unsigned char *in_buf
+static int phs_compress(struct bcm_phs_rule *phs_rule, unsigned char *in_buf
,unsigned char *out_buf,UINT *header_size,UINT *new_header_size)
{
unsigned char *old_addr = out_buf;
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
index b5f21157ac47..82d868284180 100644
--- a/drivers/staging/bcm/PHSModule.h
+++ b/drivers/staging/bcm/PHSModule.h
@@ -22,15 +22,15 @@ void DumpDataPacketHeader(PUCHAR pPkt);
void DumpFullPacket(UCHAR *pBuf,UINT nPktLen);
-void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension);
+void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
-int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,struct bcm_mini_adapter *Adapter);
+int phs_init(struct bcm_phs_extension *pPhsdeviceExtension,struct bcm_mini_adapter *Adapter);
-int PhsCleanup(PPHS_DEVICE_EXTENSION pPHSDeviceExt);
+int PhsCleanup(struct bcm_phs_extension *pPHSDeviceExt);
//Utility Functions
-ULONG PhsUpdateClassifierRule(void* pvContext,B_UINT16 uiVcid,B_UINT16 uiClsId,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI );
+ULONG PhsUpdateClassifierRule(void* pvContext,B_UINT16 uiVcid,B_UINT16 uiClsId, struct bcm_phs_rule *psPhsRule,B_UINT8 u8AssociatedPHSI );
ULONG PhsDeletePHSRule(void* pvContext,B_UINT16 uiVcid,B_UINT8 u8PHSI);
@@ -39,12 +39,12 @@ ULONG PhsDeleteClassifierRule(void* pvContext, B_UINT16 uiVcid ,B_UINT16 uiClsI
ULONG PhsDeleteSFRules(void* pvContext,B_UINT16 uiVcid) ;
-BOOLEAN ValidatePHSRule(S_PHS_RULE *psPhsRule);
+BOOLEAN ValidatePHSRule(struct bcm_phs_rule *psPhsRule);
-UINT GetServiceFlowEntry(S_SERVICEFLOW_TABLE *psServiceFlowTable,B_UINT16 uiVcid,S_SERVICEFLOW_ENTRY **ppstServiceFlowEntry);
+UINT GetServiceFlowEntry(struct bcm_phs_table *psServiceFlowTable,B_UINT16 uiVcid, struct bcm_phs_entry **ppstServiceFlowEntry);
-void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension);
+void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
#endif
diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h
index 562d4dd81a7c..9818128d9320 100644
--- a/drivers/staging/bcm/Protocol.h
+++ b/drivers/staging/bcm/Protocol.h
@@ -1,98 +1,83 @@
/************************************
-* Protocol.h
+* Protocol.h
*************************************/
#ifndef __PROTOCOL_H__
#define __PROTOCOL_H__
-
-#define IPV4 4
-#define IPV6 6
-
+#define IPV4 4
+#define IPV6 6
struct ArpHeader {
- struct arphdr arp;
- unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
- unsigned char ar_sip[4]; /* sender IP address */
- unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
- unsigned char ar_tip[4]; /* target IP address */
-}/*__attribute__((packed))*/;
-
-
-struct TransportHeaderT
-{
- union
- {
+ struct arphdr arp;
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+};
+
+struct bcm_transport_header {
+ union {
struct udphdr uhdr;
struct tcphdr thdr;
};
-} __attribute__((packed));
-typedef struct TransportHeaderT xporthdr;
-
+} __packed;
-typedef enum _E_NWPKT_IPFRAME_TYPE
-{
+enum bcm_ip_frame_type {
eNonIPPacket,
eIPv4Packet,
eIPv6Packet
-}E_NWPKT_IPFRAME_TYPE;
+};
-typedef enum _E_NWPKT_ETHFRAME_TYPE
-{
+enum bcm_eth_frame_type {
eEthUnsupportedFrame,
eEth802LLCFrame,
eEth802LLCSNAPFrame,
eEth802QVLANFrame,
eEthOtherFrame
-} E_NWPKT_ETHFRAME_TYPE;
-
-typedef struct _S_ETHCS_PKT_INFO
-{
- E_NWPKT_IPFRAME_TYPE eNwpktIPFrameType;
- E_NWPKT_ETHFRAME_TYPE eNwpktEthFrameType;
- USHORT usEtherType;
- UCHAR ucDSAP;
-}S_ETHCS_PKT_INFO,*PS_ETHCS_PKT_INFO;
-
-typedef struct _ETH_CS_802_Q_FRAME
-{
+};
+
+struct bcm_eth_packet_info {
+ enum bcm_ip_frame_type eNwpktIPFrameType;
+ enum bcm_eth_frame_type eNwpktEthFrameType;
+ unsigned short usEtherType;
+ unsigned char ucDSAP;
+};
+
+struct bcm_eth_q_frame {
struct bcm_eth_header EThHdr;
- USHORT UserPriority:3;
- USHORT CFI:1;
- USHORT VLANID:12;
- USHORT EthType;
-} __attribute__((packed)) ETH_CS_802_Q_FRAME;
-
-typedef struct _ETH_CS_802_LLC_FRAME
-{
+ unsigned short UserPriority:3;
+ unsigned short CFI:1;
+ unsigned short VLANID:12;
+ unsigned short EthType;
+} __packed;
+
+struct bcm_eth_llc_frame {
struct bcm_eth_header EThHdr;
unsigned char DSAP;
unsigned char SSAP;
unsigned char Control;
-}__attribute__((packed)) ETH_CS_802_LLC_FRAME;
+} __packed;
-typedef struct _ETH_CS_802_LLC_SNAP_FRAME
-{
+struct bcm_eth_llc_snap_frame {
struct bcm_eth_header EThHdr;
unsigned char DSAP;
unsigned char SSAP;
unsigned char Control;
unsigned char OUI[3];
unsigned short usEtherType;
-} __attribute__((packed)) ETH_CS_802_LLC_SNAP_FRAME;
+} __packed;
-typedef struct _ETH_CS_ETH2_FRAME
-{
+struct bcm_ethernet2_frame {
struct bcm_eth_header EThHdr;
-} __attribute__((packed)) ETH_CS_ETH2_FRAME;
+} __packed;
#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
-#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
-#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100)
+#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
+#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100)
-//Per SF CS Specification Encodings
-typedef enum _E_SERVICEFLOW_CS_SPEC_
-{
- eCSSpecUnspecified =0,
+/* Per SF CS Specification Encodings */
+enum bcm_spec_encoding {
+ eCSSpecUnspecified = 0,
eCSPacketIPV4,
eCSPacketIPV6,
eCS802_3PacketEthernet,
@@ -102,50 +87,42 @@ typedef enum _E_SERVICEFLOW_CS_SPEC_
eCSPacketIPV4Over802_1QVLAN,
eCSPacketIPV6Over802_1QVLAN,
eCSPacketUnsupported
-}E_SERVICEFLOW_CS_SPEC;
-
-
-#define IP6_HEADER_LEN 40
-
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
-
+};
+#define IP6_HEADER_LEN 40
+#define IP_VERSION(byte) (((byte&0xF0)>>4))
#define MAC_ADDRESS_SIZE 6
-#define ETH_AND_IP_HEADER_LEN 14 + 20
-#define L4_SRC_PORT_LEN 2
-#define L4_DEST_PORT_LEN 2
-
-
-
-#define CTRL_PKT_LEN 8 + ETH_AND_IP_HEADER_LEN
-
-#define ETH_ARP_FRAME 0x806
-#define ETH_IPV4_FRAME 0x800
-#define ETH_IPV6_FRAME 0x86DD
-#define UDP 0x11
-#define TCP 0x06
-
-#define ARP_OP_REQUEST 0x01
-#define ARP_OP_REPLY 0x02
-#define ARP_PKT_SIZE 60
-
-// This is the format for the TCP packet header
-typedef struct _TCP_HEADER
-{
- USHORT usSrcPort;
- USHORT usDestPort;
- ULONG ulSeqNumber;
- ULONG ulAckNumber;
- UCHAR HeaderLength;
- UCHAR ucFlags;
- USHORT usWindowsSize;
- USHORT usChkSum;
- USHORT usUrgetPtr;
-} TCP_HEADER,*PTCP_HEADER;
-#define TCP_HEADER_LEN sizeof(TCP_HEADER)
-#define TCP_ACK 0x10 //Bit 4 in tcpflags field.
+#define ETH_AND_IP_HEADER_LEN (14 + 20)
+#define L4_SRC_PORT_LEN 2
+#define L4_DEST_PORT_LEN 2
+#define CTRL_PKT_LEN (8 + ETH_AND_IP_HEADER_LEN)
+
+#define ETH_ARP_FRAME 0x806
+#define ETH_IPV4_FRAME 0x800
+#define ETH_IPV6_FRAME 0x86DD
+#define UDP 0x11
+#define TCP 0x06
+
+#define ARP_OP_REQUEST 0x01
+#define ARP_OP_REPLY 0x02
+#define ARP_PKT_SIZE 60
+
+/* This is the format for the TCP packet header */
+struct bcm_tcp_header {
+ unsigned short usSrcPort;
+ unsigned short usDestPort;
+ unsigned long ulSeqNumber;
+ unsigned long ulAckNumber;
+ unsigned char HeaderLength;
+ unsigned char ucFlags;
+ unsigned short usWindowsSize;
+ unsigned short usChkSum;
+ unsigned short usUrgetPtr;
+};
+
+#define TCP_HEADER_LEN sizeof(struct bcm_tcp_header)
+#define TCP_ACK 0x10 /* Bit 4 in tcpflags field. */
#define GET_TCP_HEADER_LEN(byte) ((byte&0xF0)>>4)
-
-#endif //__PROTOCOL_H__
+#endif /* __PROTOCOL_H__ */
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
index 90dbe0f4785e..2a673b125f00 100644
--- a/drivers/staging/bcm/Prototypes.h
+++ b/drivers/staging/bcm/Prototypes.h
@@ -33,9 +33,9 @@ INT SearchSfid(struct bcm_mini_adapter *Adapter,UINT uiSfid);
USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb);
-BOOLEAN MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
-BOOLEAN MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
-BOOLEAN MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol);
+bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
+bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort);
+bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol);
INT SetupNextSend(struct bcm_mini_adapter *Adapter, /**<Logical Adapter*/
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 1b857bd887f0..8d142a547e7f 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -4,8 +4,8 @@ This file contains the routines related to Quality of Service.
*/
#include "headers.h"
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo);
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
+static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
+static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo,struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
struct bcm_classifier_rule *pstClassifierRule );
@@ -117,7 +117,7 @@ BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucTypeOfSer
*
* Returns - TRUE(If address matches) else FAIL.
****************************************************************************/
-BOOLEAN MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol)
+bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol)
{
UCHAR ucLoopIndex=0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -146,7 +146,7 @@ BOOLEAN MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProt
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-BOOLEAN MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort)
+bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort)
{
UCHAR ucLoopIndex=0;
@@ -178,7 +178,7 @@ BOOLEAN MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrc
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-BOOLEAN MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushDestPort)
+bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushDestPort)
{
UCHAR ucLoopIndex=0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -208,12 +208,12 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
struct iphdr *iphd,
struct bcm_classifier_rule *pstClassifierRule)
{
- xporthdr *xprt_hdr=NULL;
+ struct bcm_transport_header *xprt_hdr = NULL;
BOOLEAN bClassificationSucceed=FALSE;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
- xprt_hdr=(xporthdr *)((PUCHAR)iphd + sizeof(struct iphdr));
+ xprt_hdr=(struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
do {
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to see Direction = %d %d",
@@ -446,7 +446,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
{
INT uiLoopIndex=0;
struct bcm_classifier_rule *pstClassifierRule = NULL;
- S_ETHCS_PKT_INFO stEthCsPktInfo;
+ struct bcm_eth_packet_info stEthCsPktInfo;
PVOID pvEThPayload = NULL;
struct iphdr *pIpHeader = NULL;
INT uiSfIndex=0;
@@ -454,7 +454,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
BOOLEAN bFragmentedPkt=FALSE,bClassificationSucceed=FALSE;
USHORT usCurrFragment =0;
- PTCP_HEADER pTcpHeader;
+ struct bcm_tcp_header *pTcpHeader;
UCHAR IpHeaderLength;
UCHAR TcpHeaderLength;
@@ -467,32 +467,32 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
case eEth802LLCFrame:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : 802LLCFrame\n");
- pIpHeader = pvEThPayload + sizeof(ETH_CS_802_LLC_FRAME);
+ pIpHeader = pvEThPayload + sizeof(struct bcm_eth_llc_frame);
break;
}
case eEth802LLCSNAPFrame:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : 802LLC SNAP Frame\n");
- pIpHeader = pvEThPayload + sizeof(ETH_CS_802_LLC_SNAP_FRAME);
+ pIpHeader = pvEThPayload + sizeof(struct bcm_eth_llc_snap_frame);
break;
}
case eEth802QVLANFrame:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : 802.1Q VLANFrame\n");
- pIpHeader = pvEThPayload + sizeof(ETH_CS_802_Q_FRAME);
+ pIpHeader = pvEThPayload + sizeof(struct bcm_eth_q_frame);
break;
}
case eEthOtherFrame:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : ETH Other Frame\n");
- pIpHeader = pvEThPayload + sizeof(ETH_CS_ETH2_FRAME);
+ pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
break;
}
default:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : Unrecognized ETH Frame\n");
- pIpHeader = pvEThPayload + sizeof(ETH_CS_ETH2_FRAME);
+ pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
break;
}
}
@@ -614,7 +614,7 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
if((TCP == pIpHeader->protocol ) && !bFragmentedPkt && (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= skb->len) )
{
IpHeaderLength = pIpHeader->ihl;
- pTcpHeader = (PTCP_HEADER)(((PUCHAR)pIpHeader)+(IpHeaderLength*4));
+ pTcpHeader = (struct bcm_tcp_header *)(((PUCHAR)pIpHeader)+(IpHeaderLength*4));
TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength);
if((pTcpHeader->ucFlags & TCP_ACK) &&
@@ -683,7 +683,7 @@ static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifie
return TRUE;
}
-static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo)
+static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
if((pstClassifierRule->ucEtherTypeLen==0)||
@@ -718,7 +718,7 @@ static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRul
}
-static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo)
+static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
BOOLEAN bClassificationSucceed = FALSE;
USHORT usVLANID;
@@ -769,7 +769,7 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb,
- PS_ETHCS_PKT_INFO pstEthCsPktInfo,
+ struct bcm_eth_packet_info *pstEthCsPktInfo,
struct bcm_classifier_rule *pstClassifierRule,
B_UINT8 EthCSCupport)
{
@@ -802,7 +802,7 @@ static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff*
}
static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
- PS_ETHCS_PKT_INFO pstEthCsPktInfo)
+ struct bcm_eth_packet_info *pstEthCsPktInfo)
{
USHORT u16Etype = ntohs(((struct bcm_eth_header *)pvEthPayload)->u16Etype);
@@ -815,7 +815,7 @@ static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
{
//802.1Q VLAN Header
pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame;
- u16Etype = ((ETH_CS_802_Q_FRAME*)pvEthPayload)->EthType;
+ u16Etype = ((struct bcm_eth_q_frame *)pvEthPayload)->EthType;
//((ETH_CS_802_Q_FRAME*)pvEthPayload)->UserPriority
}
else
@@ -830,12 +830,12 @@ static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
//802.2 LLC
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "802.2 LLC Frame \n");
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame;
- pstEthCsPktInfo->ucDSAP = ((ETH_CS_802_LLC_FRAME*)pvEthPayload)->DSAP;
- if(pstEthCsPktInfo->ucDSAP == 0xAA && ((ETH_CS_802_LLC_FRAME*)pvEthPayload)->SSAP == 0xAA)
+ pstEthCsPktInfo->ucDSAP = ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP;
+ if(pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA)
{
//SNAP Frame
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame;
- u16Etype = ((ETH_CS_802_LLC_SNAP_FRAME*)pvEthPayload)->usEtherType;
+ u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType;
}
}
if(u16Etype == ETHERNET_FRAMETYPE_IPV4)
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index 3c5f4a5f0376..f55300db1d48 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -11,11 +11,11 @@
INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_mibs *pstHostMibs)
{
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
- S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
- PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext;
+ struct bcm_phs_entry *pstServiceFlowEntry = NULL;
+ struct bcm_phs_rule *pstPhsRule = NULL;
+ struct bcm_phs_classifier_table *pstClassifierTable = NULL;
+ struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
+ struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *) &Adapter->stBCMPhsContext;
UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0;
@@ -70,7 +70,7 @@ INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_m
memcpy(&pstHostMibs->
astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI, sizeof(S_PHS_RULE));
+ &pstPhsRule->u8PHSI, sizeof(struct bcm_phs_rule));
nPhsTableIndex++;
}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 252a1b31d618..05a948a3698c 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -24,7 +24,7 @@ BOOLEAN IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
}
static INT LED_Blink(struct bcm_mini_adapter *Adapter, UINT GPIO_Num, UCHAR uiLedIndex,
- ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
+ ULONG timeout, INT num_of_time, enum bcm_led_events currdriverstate)
{
int Status = STATUS_SUCCESS;
BOOLEAN bInfinite = FALSE;
@@ -97,7 +97,7 @@ static INT ScaleRateofTransfer(ULONG rate)
static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, UCHAR GPIO_Num_tx,
UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex,
- LedEventInfo_t currdriverstate)
+ enum bcm_led_events currdriverstate)
{
/* Initial values of TX and RX packets */
ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
@@ -607,7 +607,7 @@ static VOID LedGpioInit(struct bcm_mini_adapter *Adapter)
static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter, UCHAR *GPIO_num_tx,
UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,
- LedEventInfo_t currdriverstate)
+ enum bcm_led_events currdriverstate)
{
UINT uiIndex = 0;
@@ -651,7 +651,7 @@ static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
UCHAR GPIO_num = 0;
UCHAR uiLedIndex = 0;
UINT uiResetValue = 0;
- LedEventInfo_t currdriverstate = 0;
+ enum bcm_led_events currdriverstate = 0;
ulong timeout = 0;
INT Status = 0;
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
index ed8fbc091115..bae40e22e11b 100644
--- a/drivers/staging/bcm/led_control.h
+++ b/drivers/staging/bcm/led_control.h
@@ -1,102 +1,84 @@
#ifndef _LED_CONTROL_H
#define _LED_CONTROL_H
-/*************************TYPE DEF**********************/
-#define NUM_OF_LEDS 4
-
+#define NUM_OF_LEDS 4
#define DSD_START_OFFSET 0x0200
#define EEPROM_VERSION_OFFSET 0x020E
#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
#define GPIO_SECTION_START_OFFSET 0x03
-
-#define COMPATIBILITY_SECTION_LENGTH 42
-#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
-
-
-#define EEPROM_MAP5_MAJORVERSION 5
-#define EEPROM_MAP5_MINORVERSION 0
-
-
+#define COMPATIBILITY_SECTION_LENGTH 42
+#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
+#define EEPROM_MAP5_MAJORVERSION 5
+#define EEPROM_MAP5_MINORVERSION 0
#define MAX_NUM_OF_BLINKS 10
#define NUM_OF_GPIO_PINS 16
-
#define DISABLE_GPIO_NUM 0xFF
#define EVENT_SIGNALED 1
-
#define MAX_FILE_NAME_BUFFER_SIZE 100
-#define TURN_ON_LED(GPIO, index) do { \
- UINT gpio_val = GPIO; \
- (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0);
-
-#define TURN_OFF_LED(GPIO, index) do { \
- UINT gpio_val = GPIO; \
- (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0);
-
-#define B_ULONG32 unsigned long
-
-/*******************************************************/
-
-
-typedef enum _LEDColors{
- RED_LED = 1,
- BLUE_LED = 2,
- YELLOW_LED = 3,
- GREEN_LED = 4
-} LEDColors; /*Enumerated values of different LED types*/
-
-typedef enum LedEvents {
- SHUTDOWN_EXIT = 0x00,
- DRIVER_INIT = 0x1,
- FW_DOWNLOAD = 0x2,
- FW_DOWNLOAD_DONE = 0x4,
- NO_NETWORK_ENTRY = 0x8,
- NORMAL_OPERATION = 0x10,
- LOWPOWER_MODE_ENTER = 0x20,
- IDLEMODE_CONTINUE = 0x40,
- IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
- LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
- DRIVER_HALT = 0xff
-} LedEventInfo_t; /* Enumerated values of different driver states */
+#define TURN_ON_LED(GPIO, index) do { \
+ unsigned int gpio_val = GPIO; \
+ (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0)
+
+#define TURN_OFF_LED(GPIO, index) do { \
+ unsigned int gpio_val = GPIO; \
+ (Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0)
+
+enum bcm_led_colors {
+ RED_LED = 1,
+ BLUE_LED = 2,
+ YELLOW_LED = 3,
+ GREEN_LED = 4
+};
+
+enum bcm_led_events {
+ SHUTDOWN_EXIT = 0x00,
+ DRIVER_INIT = 0x1,
+ FW_DOWNLOAD = 0x2,
+ FW_DOWNLOAD_DONE = 0x4,
+ NO_NETWORK_ENTRY = 0x8,
+ NORMAL_OPERATION = 0x10,
+ LOWPOWER_MODE_ENTER = 0x20,
+ IDLEMODE_CONTINUE = 0x40,
+ IDLEMODE_EXIT = 0x80,
+ LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
+ LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
+ DRIVER_HALT = 0xff
+}; /* Enumerated values of different driver states */
/*
* Structure which stores the information of different LED types
* and corresponding LED state information of driver states
*/
-typedef struct LedStateInfo_t {
- UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */
- UCHAR LED_On_State; /* Bits set or reset for different states */
- UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
- UCHAR GPIO_Num;
- UCHAR BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
-} LEDStateInfo, *pLEDStateInfo;
-
-
-typedef struct _LED_INFO_STRUCT {
- LEDStateInfo LEDState[NUM_OF_LEDS];
- BOOLEAN bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target*/
- BOOLEAN bIdle_led_off;
- wait_queue_head_t notify_led_event;
+struct bcm_led_state_info {
+ unsigned char LED_Type; /* specify GPIO number - use 0xFF if not used */
+ unsigned char LED_On_State; /* Bits set or reset for different states */
+ unsigned char LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
+ unsigned char GPIO_Num;
+ unsigned char BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
+};
+
+struct bcm_led_info {
+ struct bcm_led_state_info LEDState[NUM_OF_LEDS];
+ bool bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target */
+ bool bIdle_led_off;
+ wait_queue_head_t notify_led_event;
wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
- BOOLEAN bLedInitDone;
+ struct task_struct *led_cntrl_threadid;
+ int led_thread_running;
+ bool bLedInitDone;
+};
-} LED_INFO_STRUCT, *PLED_INFO_STRUCT;
/* LED Thread state. */
-#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /*LED thread has been put on hold*/
-
-
+#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
+#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
+#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /* LED thread has been put on hold */
#endif
-
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index eab676fe53a6..e6152f4df14b 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -12,7 +12,7 @@ static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter, unsi
static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter);
static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter);
static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter);
-static NVM_TYPE BcmGetNvmType(struct bcm_mini_adapter *Adapter);
+static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter);
static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal);
@@ -472,7 +472,7 @@ static int BeceemFlashBulkRead(struct bcm_mini_adapter *Adapter,
static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter)
{
if (IsFlash2x(Adapter))
- return Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER);
+ return Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
else
return 32 * 1024;
}
@@ -1978,7 +1978,7 @@ int BeceemNVMWrite(struct bcm_mini_adapter *Adapter,
int BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, unsigned int uiSectorSize)
{
int Status = -1;
- FLASH_CS_INFO sFlashCsInfo = {0};
+ struct bcm_flash_cs_info sFlashCsInfo = {0};
unsigned int uiTemp = 0;
unsigned int uiSectorSig = 0;
unsigned int uiCurrentSectorSize = 0;
@@ -2228,20 +2228,20 @@ int BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL");
return -EINVAL;
}
- psAdapter->psFlashCSInfo = (PFLASH_CS_INFO)kzalloc(sizeof(FLASH_CS_INFO), GFP_KERNEL);
+ psAdapter->psFlashCSInfo = (struct bcm_flash_cs_info *)kzalloc(sizeof(struct bcm_flash_cs_info), GFP_KERNEL);
if (psAdapter->psFlashCSInfo == NULL) {
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 1.x");
return -ENOMEM;
}
- psAdapter->psFlash2xCSInfo = (PFLASH2X_CS_INFO)kzalloc(sizeof(FLASH2X_CS_INFO), GFP_KERNEL);
+ psAdapter->psFlash2xCSInfo = (struct bcm_flash2x_cs_info *)kzalloc(sizeof(struct bcm_flash2x_cs_info), GFP_KERNEL);
if (!psAdapter->psFlash2xCSInfo) {
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 2.x");
kfree(psAdapter->psFlashCSInfo);
return -ENOMEM;
}
- psAdapter->psFlash2xVendorInfo = (PFLASH2X_VENDORSPECIFIC_INFO)kzalloc(sizeof(FLASH2X_VENDORSPECIFIC_INFO), GFP_KERNEL);
+ psAdapter->psFlash2xVendorInfo = (struct bcm_flash2x_vendor_info *)kzalloc(sizeof(struct bcm_flash2x_vendor_info), GFP_KERNEL);
if (!psAdapter->psFlash2xVendorInfo) {
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate Vendor Info Memory for Flash 2.x");
kfree(psAdapter->psFlashCSInfo);
@@ -2264,7 +2264,7 @@ int BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
return STATUS_SUCCESS;
}
-static int BcmDumpFlash2XCSStructure(PFLASH2X_CS_INFO psFlash2xCSInfo, struct bcm_mini_adapter *Adapter)
+static int BcmDumpFlash2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo, struct bcm_mini_adapter *Adapter)
{
unsigned int Index = 0;
@@ -2324,7 +2324,7 @@ static int BcmDumpFlash2XCSStructure(PFLASH2X_CS_INFO psFlash2xCSInfo, struct bc
return STATUS_SUCCESS;
}
-static int ConvertEndianOf2XCSStructure(PFLASH2X_CS_INFO psFlash2xCSInfo)
+static int ConvertEndianOf2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo)
{
unsigned int Index = 0;
@@ -2381,7 +2381,7 @@ static int ConvertEndianOf2XCSStructure(PFLASH2X_CS_INFO psFlash2xCSInfo)
return STATUS_SUCCESS;
}
-static int ConvertEndianOfCSStructure(PFLASH_CS_INFO psFlashCSInfo)
+static int ConvertEndianOfCSStructure(struct bcm_flash_cs_info *psFlashCSInfo)
{
/* unsigned int Index = 0; */
psFlashCSInfo->MagicNumber = ntohl(psFlashCSInfo->MagicNumber);
@@ -2446,7 +2446,7 @@ static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
switch (i) {
case DSD0:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER))) &&
+ if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
(UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = VENDOR_PTR_IN_CS;
else
@@ -2454,7 +2454,7 @@ static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
break;
case DSD1:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER))) &&
+ if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
(UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = VENDOR_PTR_IN_CS;
else
@@ -2462,7 +2462,7 @@ static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
break;
case DSD2:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER))) &&
+ if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
(UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = VENDOR_PTR_IN_CS;
else
@@ -2509,7 +2509,7 @@ static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
{
- /* FLASH_CS_INFO sFlashCsInfo = {0}; */
+ /* struct bcm_flash_cs_info sFlashCsInfo = {0}; */
#if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
unsigned int value;
@@ -2522,8 +2522,8 @@ static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
Adapter->uiFlashBaseAdd = 0;
Adapter->ulFlashCalStart = 0;
- memset(Adapter->psFlashCSInfo, 0 , sizeof(FLASH_CS_INFO));
- memset(Adapter->psFlash2xCSInfo, 0 , sizeof(FLASH2X_CS_INFO));
+ memset(Adapter->psFlashCSInfo, 0 , sizeof(struct bcm_flash_cs_info));
+ memset(Adapter->psFlash2xCSInfo, 0 , sizeof(struct bcm_flash2x_cs_info));
if (!Adapter->bDDRInitDone) {
value = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
@@ -2551,7 +2551,7 @@ static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FLASH LAYOUT MAJOR VERSION :%X", uiFlashLayoutMajorVersion);
if (uiFlashLayoutMajorVersion < FLASH_2X_MAJOR_NUMBER) {
- BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, sizeof(FLASH_CS_INFO));
+ BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash_cs_info));
ConvertEndianOfCSStructure(Adapter->psFlashCSInfo);
Adapter->ulFlashCalStart = (Adapter->psFlashCSInfo->OffsetFromZeroForCalibrationStart);
@@ -2576,7 +2576,7 @@ static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
Adapter->uiFlashBaseAdd = Adapter->psFlashCSInfo->FlashBaseAddr & 0xFCFFFFFF;
} else {
if (BcmFlash2xBulkRead(Adapter, (PUINT)Adapter->psFlash2xCSInfo, NO_SECTION_VAL,
- Adapter->ulFlashControlSectionStart, sizeof(FLASH2X_CS_INFO))) {
+ Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash2x_cs_info))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to read CS structure\n");
return STATUS_FAILURE;
}
@@ -2629,7 +2629,7 @@ static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
*
*/
-static NVM_TYPE BcmGetNvmType(struct bcm_mini_adapter *Adapter)
+static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter)
{
unsigned int uiData = 0;
@@ -2810,6 +2810,7 @@ int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x
case CONTROL_SECTION:
/* Not Clear So Putting failure. confirm and fix it. */
SectEndOffset = STATUS_FAILURE;
+ break;
case ISO_IMAGE1_PART2:
if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End != UNINIT_PTR_IN_CS)
SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End);
@@ -3101,7 +3102,7 @@ static int BcmDumpFlash2xSectionBitMap(struct bcm_flash2x_bitmap *psFlash2xBitMa
int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap)
{
- PFLASH2X_CS_INFO psFlash2xCSInfo = Adapter->psFlash2xCSInfo;
+ struct bcm_flash2x_cs_info *psFlash2xCSInfo = Adapter->psFlash2xCSInfo;
enum bcm_flash2x_section_val uiHighestPriDSD = 0;
enum bcm_flash2x_section_val uiHighestPriISO = 0;
BOOLEAN SetActiveDSDDone = FALSE;
@@ -3354,8 +3355,8 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
unsigned int SectImagePriority = 0;
int Status = STATUS_SUCCESS;
- /* DSD_HEADER sDSD = {0};
- * ISO_HEADER sISO = {0};
+ /* struct bcm_dsd_header sDSD = {0};
+ * struct bcm_iso_header sISO = {0};
*/
int HighestPriDSD = 0 ;
int HighestPriISO = 0;
@@ -3391,7 +3392,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
Status = BcmFlash2xBulkWrite(Adapter,
&SectImagePriority,
HighestPriISO,
- 0 + FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImagePriority),
+ 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
SIGNATURE_SIZE,
TRUE);
if (Status) {
@@ -3416,7 +3417,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
Status = BcmFlash2xBulkWrite(Adapter,
&SectImagePriority,
eFlash2xSectVal,
- 0 + FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImagePriority),
+ 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
SIGNATURE_SIZE,
TRUE);
if (Status) {
@@ -3452,7 +3453,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
Status = BcmFlash2xBulkWrite(Adapter,
&SectImagePriority,
HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImagePriority),
+ Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
SIGNATURE_SIZE,
TRUE);
if (Status) {
@@ -3472,7 +3473,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
Status = BcmFlash2xBulkWrite(Adapter,
&SectImagePriority,
HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImagePriority),
+ Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
SIGNATURE_SIZE,
TRUE);
if (Status) {
@@ -3492,7 +3493,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
Status = BcmFlash2xBulkWrite(Adapter,
&SectImagePriority,
eFlash2xSectVal,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImagePriority),
+ Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
SIGNATURE_SIZE,
TRUE);
if (Status) {
@@ -3550,7 +3551,7 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
Status = BcmFlash2xBulkRead(Adapter,
&ISOLength,
sCopySectStrut.SrcSection,
- 0 + FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImageSize),
+ 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageSize),
4);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO\n");
@@ -3561,7 +3562,7 @@ int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section
if (ISOLength % Adapter->uiSectorSize)
ISOLength = Adapter->uiSectorSize * (1 + ISOLength/Adapter->uiSectorSize);
- sigOffset = FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImageMagicNumber);
+ sigOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
Buff = kzalloc(Adapter->uiSectorSize, GFP_KERNEL);
@@ -3846,7 +3847,7 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
unsigned int uiSignature = 0;
unsigned int uiOffset = 0;
- /* DSD_HEADER dsdHeader = {0}; */
+ /* struct bcm_dsd_header dsdHeader = {0}; */
if (Adapter->bSigCorrupted == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is not corrupted by driver, hence not restoring\n");
return STATUS_SUCCESS;
@@ -3863,7 +3864,7 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
uiSignature = htonl(DSD_IMAGE_MAGIC_NUMBER);
uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader;
- uiOffset += FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImageMagicNumber);
+ uiOffset += FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber);
if ((ReadDSDSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Corrupted Pattern is not there. Hence won't write sig");
@@ -3872,7 +3873,7 @@ int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sectio
} else if ((eFlashSectionVal == ISO_IMAGE1) || (eFlashSectionVal == ISO_IMAGE2)) {
uiSignature = htonl(ISO_IMAGE_MAGIC_NUMBER);
/* uiOffset = 0; */
- uiOffset = FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImageMagicNumber);
+ uiOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
if ((ReadISOSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Currupted Pattern is not there. Hence won't write sig");
return STATUS_FAILURE;
@@ -4141,14 +4142,14 @@ int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned
(uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD0) - Adapter->uiSectorSize)) {
/* offset from the sector boundary having the header map */
offsetToProtect = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader % Adapter->uiSectorSize;
- HeaderSizeToProtect = sizeof(DSD_HEADER);
+ HeaderSizeToProtect = sizeof(struct bcm_dsd_header);
bHasHeader = TRUE;
}
if (uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) ||
uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2)) {
offsetToProtect = 0;
- HeaderSizeToProtect = sizeof(ISO_HEADER);
+ HeaderSizeToProtect = sizeof(struct bcm_iso_header);
bHasHeader = TRUE;
}
/* If Header is present overwrite passed buffer with this */
@@ -4167,7 +4168,7 @@ int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned
kfree(pTempBuff);
}
if (bHasHeader && Adapter->bSigCorrupted) {
- sig = *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImageMagicNumber)));
+ sig = *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber)));
sig = ntohl(sig);
if ((sig & 0xFF000000) != CORRUPTED_PATTERN) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Desired pattern is not at sig offset. Hence won't restore");
@@ -4175,7 +4176,7 @@ int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned
return STATUS_SUCCESS;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Corrupted sig is :%X", sig);
- *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER);
+ *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature in Header Write only");
Adapter->bSigCorrupted = FALSE;
}
@@ -4268,7 +4269,7 @@ int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
{
unsigned int uiDSDsig = 0;
/* unsigned int sigoffsetInMap = 0;
- * DSD_HEADER dsdHeader = {0};
+ * struct bcm_dsd_header dsdHeader = {0};
*/
/* sigoffsetInMap =(PUCHAR)&(dsdHeader.DSDImageMagicNumber) -(PUCHAR)&dsdHeader; */
@@ -4280,7 +4281,7 @@ int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
BcmFlash2xBulkRead(Adapter,
&uiDSDsig,
dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImageMagicNumber),
+ Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber),
SIGNATURE_SIZE);
uiDSDsig = ntohl(uiDSDsig);
@@ -4293,7 +4294,7 @@ int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
{
/* unsigned int priOffsetInMap = 0 ; */
unsigned int uiDSDPri = STATUS_FAILURE;
- /* DSD_HEADER dsdHeader = {0};
+ /* struct bcm_dsd_header dsdHeader = {0};
* priOffsetInMap = (PUCHAR)&(dsdHeader.DSDImagePriority) -(PUCHAR)&dsdHeader;
*/
if (IsSectionWritable(Adapter, dsd)) {
@@ -4301,7 +4302,7 @@ int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
BcmFlash2xBulkRead(Adapter,
&uiDSDPri,
dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(PDSD_HEADER, DSDImagePriority),
+ Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
4);
uiDSDPri = ntohl(uiDSDPri);
@@ -4348,7 +4349,7 @@ int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
{
unsigned int uiISOsig = 0;
/* unsigned int sigoffsetInMap = 0;
- * ISO_HEADER ISOHeader = {0};
+ * struct bcm_iso_header ISOHeader = {0};
* sigoffsetInMap =(PUCHAR)&(ISOHeader.ISOImageMagicNumber) -(PUCHAR)&ISOHeader;
*/
if (iso != ISO_IMAGE1 && iso != ISO_IMAGE2) {
@@ -4358,7 +4359,7 @@ int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
BcmFlash2xBulkRead(Adapter,
&uiISOsig,
iso,
- 0 + FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImageMagicNumber),
+ 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber),
SIGNATURE_SIZE);
uiISOsig = ntohl(uiISOsig);
@@ -4375,7 +4376,7 @@ int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
BcmFlash2xBulkRead(Adapter,
&ISOPri,
iso,
- 0 + FIELD_OFFSET_IN_HEADER(PISO_HEADER, ISOImagePriority),
+ 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
4);
ISOPri = ntohl(ISOPri);
@@ -4568,7 +4569,7 @@ static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_sect
return -ENOMEM;
}
- uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER);
+ uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
uiOffset -= MAX_RW_SIZE;
BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE);
diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h
index 651b5a455b32..e765cca5d966 100644
--- a/drivers/staging/bcm/nvm.h
+++ b/drivers/staging/bcm/nvm.h
@@ -1,409 +1,286 @@
/***************************************************************************************
-//
-// Copyright (c) Beceem Communications Inc.
-//
-// Module Name:
-// NVM.h
-//
-// Abstract:
-// This file has the prototypes,preprocessors and definitions various NVM libraries.
-//
-//
-// Revision History:
-// Who When What
-// -------- -------- ----------------------------------------------
-// Name Date Created/reviewed/modified
-//
-// Notes:
-//
-****************************************************************************************/
-
+ *
+ * Copyright (c) Beceem Communications Inc.
+ *
+ * Module Name:
+ * NVM.h
+ *
+ * Abstract:
+ * This file has the prototypes,preprocessors and definitions various NVM libraries.
+ *
+ *
+ * Revision History:
+ * Who When What
+ * -------- -------- ----------------------------------------------
+ * Name Date Created/reviewed/modified
+ *
+ * Notes:
+ *
+ ****************************************************************************************/
#ifndef _NVM_H_
#define _NVM_H_
-typedef struct _FLASH_SECTOR_INFO
-{
- UINT uiSectorSig;
- UINT uiSectorSize;
-
-}FLASH_SECTOR_INFO,*PFLASH_SECTOR_INFO;
-
-typedef struct _FLASH_CS_INFO
-{
- B_UINT32 MagicNumber;
-// let the magic number be 0xBECE-F1A5 - F1A5 for "flas-h"
-
- B_UINT32 FlashLayoutVersion ;
-
- // ISO Image/Format/BuildTool versioning
- B_UINT32 ISOImageVersion;
-
- // SCSI/Flash BootLoader versioning
- B_UINT32 SCSIFirmwareVersion;
-
-
- B_UINT32 OffsetFromZeroForPart1ISOImage;
-// typically 0
-
- B_UINT32 OffsetFromZeroForScsiFirmware;
-//typically at 12MB
-
- B_UINT32 SizeOfScsiFirmware ;
-//size of the firmware - depends on binary size
-
- B_UINT32 OffsetFromZeroForPart2ISOImage;
-// typically at first Word Aligned offset 12MB + sizeOfScsiFirmware.
-
- B_UINT32 OffsetFromZeroForCalibrationStart;
-// typically at 15MB
-
- B_UINT32 OffsetFromZeroForCalibrationEnd;
-
-// VSA0 offsets
- B_UINT32 OffsetFromZeroForVSAStart;
- B_UINT32 OffsetFromZeroForVSAEnd;
-
-// Control Section offsets
- B_UINT32 OffsetFromZeroForControlSectionStart;
- B_UINT32 OffsetFromZeroForControlSectionData;
-
-// NO Data Activity timeout to switch from MSC to NW Mode
- B_UINT32 CDLessInactivityTimeout;
-
-// New ISO Image Signature
- B_UINT32 NewImageSignature;
-
-// Signature to validate the sector size.
- B_UINT32 FlashSectorSizeSig;
-
-// Sector Size
- B_UINT32 FlashSectorSize;
-
-// Write Size Support
- B_UINT32 FlashWriteSupportSize;
-
-// Total Flash Size
- B_UINT32 TotalFlashSize;
-
-// Flash Base Address for offset specified
- B_UINT32 FlashBaseAddr;
-
-// Flash Part Max Size
- B_UINT32 FlashPartMaxSize;
-
-// Is CDLess or Flash Bootloader
- B_UINT32 IsCDLessDeviceBootSig;
-
-// MSC Timeout after reset to switch from MSC to NW Mode
- B_UINT32 MassStorageTimeout;
-
-
-}FLASH_CS_INFO,*PFLASH_CS_INFO;
-
-#define FLASH2X_TOTAL_SIZE (64*1024*1024)
-#define DEFAULT_SECTOR_SIZE (64*1024)
-
-typedef struct _FLASH_2X_CS_INFO
-{
-
- // magic number as 0xBECE-F1A5 - F1A5 for "flas-h"
- B_UINT32 MagicNumber;
-
- B_UINT32 FlashLayoutVersion ;
-
- // ISO Image/Format/BuildTool versioning
- B_UINT32 ISOImageVersion;
-
- // SCSI/Flash BootLoader versioning
- B_UINT32 SCSIFirmwareVersion;
-
- // ISO Image1 Part1/SCSI Firmware/Flash Bootloader Start offset, size
- B_UINT32 OffsetFromZeroForPart1ISOImage;
- B_UINT32 OffsetFromZeroForScsiFirmware;
- B_UINT32 SizeOfScsiFirmware ;
-
- // ISO Image1 Part2 start offset
- B_UINT32 OffsetFromZeroForPart2ISOImage;
-
-
- // DSD0 offset
- B_UINT32 OffsetFromZeroForDSDStart;
- B_UINT32 OffsetFromZeroForDSDEnd;
-
- // VSA0 offset
- B_UINT32 OffsetFromZeroForVSAStart;
- B_UINT32 OffsetFromZeroForVSAEnd;
-
- // Control Section offset
- B_UINT32 OffsetFromZeroForControlSectionStart;
- B_UINT32 OffsetFromZeroForControlSectionData;
-
- // NO Data Activity timeout to switch from MSC to NW Mode
- B_UINT32 CDLessInactivityTimeout;
-
- // New ISO Image Signature
- B_UINT32 NewImageSignature;
-
- B_UINT32 FlashSectorSizeSig; // Sector Size Signature
- B_UINT32 FlashSectorSize; // Sector Size
- B_UINT32 FlashWriteSupportSize; // Write Size Support
-
- B_UINT32 TotalFlashSize; // Total Flash Size
-
- // Flash Base Address for offset specified
- B_UINT32 FlashBaseAddr;
- B_UINT32 FlashPartMaxSize; // Flash Part Max Size
-
- // Is CDLess or Flash Bootloader
- B_UINT32 IsCDLessDeviceBootSig;
-
- // MSC Timeout after reset to switch from MSC to NW Mode
- B_UINT32 MassStorageTimeout;
-
+struct bcm_flash_cs_info {
+ u32 MagicNumber;
+ /* let the magic number be 0xBECE-F1A5 - F1A5 for "flas-h" */
+ u32 FlashLayoutVersion;
+ u32 ISOImageVersion;
+ u32 SCSIFirmwareVersion;
+ u32 OffsetFromZeroForPart1ISOImage;
+ u32 OffsetFromZeroForScsiFirmware;
+ u32 SizeOfScsiFirmware;
+ u32 OffsetFromZeroForPart2ISOImage;
+ u32 OffsetFromZeroForCalibrationStart;
+ u32 OffsetFromZeroForCalibrationEnd;
+ u32 OffsetFromZeroForVSAStart;
+ u32 OffsetFromZeroForVSAEnd;
+ u32 OffsetFromZeroForControlSectionStart;
+ u32 OffsetFromZeroForControlSectionData;
+ u32 CDLessInactivityTimeout;
+ u32 NewImageSignature;
+ u32 FlashSectorSizeSig;
+ u32 FlashSectorSize;
+ u32 FlashWriteSupportSize;
+ u32 TotalFlashSize;
+ u32 FlashBaseAddr;
+ u32 FlashPartMaxSize;
+ u32 IsCDLessDeviceBootSig;
+ /* MSC Timeout after reset to switch from MSC to NW Mode */
+ u32 MassStorageTimeout;
+};
+
+#define FLASH2X_TOTAL_SIZE (64 * 1024 * 1024)
+#define DEFAULT_SECTOR_SIZE (64 * 1024)
+
+struct bcm_flash2x_cs_info {
+ /* magic number as 0xBECE-F1A5 - F1A5 for "flas-h" */
+ u32 MagicNumber;
+ u32 FlashLayoutVersion;
+ u32 ISOImageVersion;
+ u32 SCSIFirmwareVersion;
+ u32 OffsetFromZeroForPart1ISOImage;
+ u32 OffsetFromZeroForScsiFirmware;
+ u32 SizeOfScsiFirmware;
+ u32 OffsetFromZeroForPart2ISOImage;
+ u32 OffsetFromZeroForDSDStart;
+ u32 OffsetFromZeroForDSDEnd;
+ u32 OffsetFromZeroForVSAStart;
+ u32 OffsetFromZeroForVSAEnd;
+ u32 OffsetFromZeroForControlSectionStart;
+ u32 OffsetFromZeroForControlSectionData;
+ /* NO Data Activity timeout to switch from MSC to NW Mode */
+ u32 CDLessInactivityTimeout;
+ u32 NewImageSignature;
+ u32 FlashSectorSizeSig;
+ u32 FlashSectorSize;
+ u32 FlashWriteSupportSize;
+ u32 TotalFlashSize;
+ u32 FlashBaseAddr;
+ u32 FlashPartMaxSize;
+ u32 IsCDLessDeviceBootSig;
+ /* MSC Timeout after reset to switch from MSC to NW Mode */
+ u32 MassStorageTimeout;
/* Flash Map 2.0 Field */
- B_UINT32 OffsetISOImage1Part1Start; // ISO Image1 Part1 offset
- B_UINT32 OffsetISOImage1Part1End;
- B_UINT32 OffsetISOImage1Part2Start; // ISO Image1 Part2 offset
- B_UINT32 OffsetISOImage1Part2End;
- B_UINT32 OffsetISOImage1Part3Start; // ISO Image1 Part3 offset
- B_UINT32 OffsetISOImage1Part3End;
-
- B_UINT32 OffsetISOImage2Part1Start; // ISO Image2 Part1 offset
- B_UINT32 OffsetISOImage2Part1End;
- B_UINT32 OffsetISOImage2Part2Start; // ISO Image2 Part2 offset
- B_UINT32 OffsetISOImage2Part2End;
- B_UINT32 OffsetISOImage2Part3Start; // ISO Image2 Part3 offset
- B_UINT32 OffsetISOImage2Part3End;
-
-
- // DSD Header offset from start of DSD
- B_UINT32 OffsetFromDSDStartForDSDHeader;
- B_UINT32 OffsetFromZeroForDSD1Start; // DSD 1 offset
- B_UINT32 OffsetFromZeroForDSD1End;
- B_UINT32 OffsetFromZeroForDSD2Start; // DSD 2 offset
- B_UINT32 OffsetFromZeroForDSD2End;
-
- B_UINT32 OffsetFromZeroForVSA1Start; // VSA 1 offset
- B_UINT32 OffsetFromZeroForVSA1End;
- B_UINT32 OffsetFromZeroForVSA2Start; // VSA 2 offset
- B_UINT32 OffsetFromZeroForVSA2End;
-
+ u32 OffsetISOImage1Part1Start;
+ u32 OffsetISOImage1Part1End;
+ u32 OffsetISOImage1Part2Start;
+ u32 OffsetISOImage1Part2End;
+ u32 OffsetISOImage1Part3Start;
+ u32 OffsetISOImage1Part3End;
+ u32 OffsetISOImage2Part1Start;
+ u32 OffsetISOImage2Part1End;
+ u32 OffsetISOImage2Part2Start;
+ u32 OffsetISOImage2Part2End;
+ u32 OffsetISOImage2Part3Start;
+ u32 OffsetISOImage2Part3End;
+ /* DSD Header offset from start of DSD */
+ u32 OffsetFromDSDStartForDSDHeader;
+ u32 OffsetFromZeroForDSD1Start;
+ u32 OffsetFromZeroForDSD1End;
+ u32 OffsetFromZeroForDSD2Start;
+ u32 OffsetFromZeroForDSD2End;
+ u32 OffsetFromZeroForVSA1Start;
+ u32 OffsetFromZeroForVSA1End;
+ u32 OffsetFromZeroForVSA2Start;
+ u32 OffsetFromZeroForVSA2End;
/*
-* ACCESS_BITS_PER_SECTOR 2
-* ACCESS_RW 0
-* ACCESS_RO 1
-* ACCESS_RESVD 2
-* ACCESS_RESVD 3
-* */
- B_UINT32 SectorAccessBitMap[FLASH2X_TOTAL_SIZE/(DEFAULT_SECTOR_SIZE *16)];
-
-// All expansions to the control data structure should add here
-
-}FLASH2X_CS_INFO,*PFLASH2X_CS_INFO;
-
-typedef struct _VENDOR_SECTION_INFO
-{
- B_UINT32 OffsetFromZeroForSectionStart;
- B_UINT32 OffsetFromZeroForSectionEnd;
- B_UINT32 AccessFlags;
- B_UINT32 Reserved[16];
-
-} VENDOR_SECTION_INFO, *PVENDOR_SECTION_INFO;
-
-typedef struct _FLASH2X_VENDORSPECIFIC_INFO
-{
- VENDOR_SECTION_INFO VendorSection[TOTAL_SECTIONS];
- B_UINT32 Reserved[16];
-
-} FLASH2X_VENDORSPECIFIC_INFO, *PFLASH2X_VENDORSPECIFIC_INFO;
-
-typedef struct _DSD_HEADER
-{
- B_UINT32 DSDImageSize;
- B_UINT32 DSDImageCRC;
- B_UINT32 DSDImagePriority;
- //We should not consider right now. Reading reserve is worthless.
- B_UINT32 Reserved[252]; // Resvd for DSD Header
- B_UINT32 DSDImageMagicNumber;
-
-}DSD_HEADER, *PDSD_HEADER;
-
-typedef struct _ISO_HEADER
-{
- B_UINT32 ISOImageMagicNumber;
- B_UINT32 ISOImageSize;
- B_UINT32 ISOImageCRC;
- B_UINT32 ISOImagePriority;
- //We should not consider right now. Reading reserve is worthless.
- B_UINT32 Reserved[60]; //Resvd for ISO Header extension
-
-}ISO_HEADER, *PISO_HEADER;
-
-#define EEPROM_BEGIN_CIS (0)
-#define EEPROM_BEGIN_NON_CIS (0x200)
-#define EEPROM_END (0x2000)
-
-#define INIT_PARAMS_SIGNATURE (0x95a7a597)
-
-#define MAX_INIT_PARAMS_LENGTH (2048)
-
-
-#define MAC_ADDRESS_OFFSET 0x200
-
-
-#define INIT_PARAMS_1_SIGNATURE_ADDRESS EEPROM_BEGIN_NON_CIS
-#define INIT_PARAMS_1_DATA_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+16)
-#define INIT_PARAMS_1_MACADDRESS_ADDRESS (MAC_ADDRESS_OFFSET)
-#define INIT_PARAMS_1_LENGTH_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+4)
-
-#define INIT_PARAMS_2_SIGNATURE_ADDRESS (EEPROM_BEGIN_NON_CIS+2048+16)
-#define INIT_PARAMS_2_DATA_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS+16)
-#define INIT_PARAMS_2_MACADDRESS_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS+8)
-#define INIT_PARAMS_2_LENGTH_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS+4)
-
-#define EEPROM_SPI_DEV_CONFIG_REG 0x0F003000
-#define EEPROM_SPI_Q_STATUS1_REG 0x0F003004
-#define EEPROM_SPI_Q_STATUS1_MASK_REG 0x0F00300C
-
-#define EEPROM_SPI_Q_STATUS_REG 0x0F003008
-#define EEPROM_CMDQ_SPI_REG 0x0F003018
-#define EEPROM_WRITE_DATAQ_REG 0x0F00301C
-#define EEPROM_READ_DATAQ_REG 0x0F003020
-#define SPI_FLUSH_REG 0x0F00304C
-
-#define EEPROM_WRITE_ENABLE 0x06000000
-#define EEPROM_READ_STATUS_REGISTER 0x05000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_WRITE_QUEUE_EMPTY 0x00001000
-#define EEPROM_WRITE_QUEUE_AVAIL 0x00002000
-#define EEPROM_WRITE_QUEUE_FULL 0x00004000
-#define EEPROM_16_BYTE_PAGE_READ 0xFB000000
-#define EEPROM_4_BYTE_PAGE_READ 0x3B000000
-
-#define EEPROM_CMD_QUEUE_FLUSH 0x00000001
-#define EEPROM_WRITE_QUEUE_FLUSH 0x00000002
-#define EEPROM_READ_QUEUE_FLUSH 0x00000004
-#define EEPROM_ETH_QUEUE_FLUSH 0x00000008
-#define EEPROM_ALL_QUEUE_FLUSH 0x0000000f
-#define EEPROM_READ_ENABLE 0x06000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_READ_DATA_FULL 0x00000010
-#define EEPROM_READ_DATA_AVAIL 0x00000020
-#define EEPROM_READ_QUEUE_EMPTY 0x00000002
-#define EEPROM_CMD_QUEUE_EMPTY 0x00000100
-#define EEPROM_CMD_QUEUE_AVAIL 0x00000200
-#define EEPROM_CMD_QUEUE_FULL 0x00000400
+ * ACCESS_BITS_PER_SECTOR 2
+ * ACCESS_RW 0
+ * ACCESS_RO 1
+ * ACCESS_RESVD 2
+ * ACCESS_RESVD 3
+ */
+ u32 SectorAccessBitMap[FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)];
+ /* All expansions to the control data structure should add here */
+};
+
+struct bcm_vendor_section_info {
+ u32 OffsetFromZeroForSectionStart;
+ u32 OffsetFromZeroForSectionEnd;
+ u32 AccessFlags;
+ u32 Reserved[16];
+};
+
+struct bcm_flash2x_vendor_info {
+ struct bcm_vendor_section_info VendorSection[TOTAL_SECTIONS];
+ u32 Reserved[16];
+};
+
+struct bcm_dsd_header {
+ u32 DSDImageSize;
+ u32 DSDImageCRC;
+ u32 DSDImagePriority;
+ /* We should not consider right now. Reading reserve is worthless. */
+ u32 Reserved[252]; /* Resvd for DSD Header */
+ u32 DSDImageMagicNumber;
+};
+
+struct bcm_iso_header {
+ u32 ISOImageMagicNumber;
+ u32 ISOImageSize;
+ u32 ISOImageCRC;
+ u32 ISOImagePriority;
+ /* We should not consider right now. Reading reserve is worthless. */
+ u32 Reserved[60]; /* Resvd for ISO Header extension */
+};
+
+#define EEPROM_BEGIN_CIS (0)
+#define EEPROM_BEGIN_NON_CIS (0x200)
+#define EEPROM_END (0x2000)
+#define INIT_PARAMS_SIGNATURE (0x95a7a597)
+#define MAX_INIT_PARAMS_LENGTH (2048)
+#define MAC_ADDRESS_OFFSET 0x200
+
+#define INIT_PARAMS_1_SIGNATURE_ADDRESS EEPROM_BEGIN_NON_CIS
+#define INIT_PARAMS_1_DATA_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+16)
+#define INIT_PARAMS_1_MACADDRESS_ADDRESS (MAC_ADDRESS_OFFSET)
+#define INIT_PARAMS_1_LENGTH_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+4)
+
+#define INIT_PARAMS_2_SIGNATURE_ADDRESS (EEPROM_BEGIN_NON_CIS + 2048 + 16)
+#define INIT_PARAMS_2_DATA_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 16)
+#define INIT_PARAMS_2_MACADDRESS_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 8)
+#define INIT_PARAMS_2_LENGTH_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 4)
+
+#define EEPROM_SPI_DEV_CONFIG_REG 0x0F003000
+#define EEPROM_SPI_Q_STATUS1_REG 0x0F003004
+#define EEPROM_SPI_Q_STATUS1_MASK_REG 0x0F00300C
+
+#define EEPROM_SPI_Q_STATUS_REG 0x0F003008
+#define EEPROM_CMDQ_SPI_REG 0x0F003018
+#define EEPROM_WRITE_DATAQ_REG 0x0F00301C
+#define EEPROM_READ_DATAQ_REG 0x0F003020
+#define SPI_FLUSH_REG 0x0F00304C
+
+#define EEPROM_WRITE_ENABLE 0x06000000
+#define EEPROM_READ_STATUS_REGISTER 0x05000000
+#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
+#define EEPROM_WRITE_QUEUE_EMPTY 0x00001000
+#define EEPROM_WRITE_QUEUE_AVAIL 0x00002000
+#define EEPROM_WRITE_QUEUE_FULL 0x00004000
+#define EEPROM_16_BYTE_PAGE_READ 0xFB000000
+#define EEPROM_4_BYTE_PAGE_READ 0x3B000000
+
+#define EEPROM_CMD_QUEUE_FLUSH 0x00000001
+#define EEPROM_WRITE_QUEUE_FLUSH 0x00000002
+#define EEPROM_READ_QUEUE_FLUSH 0x00000004
+#define EEPROM_ETH_QUEUE_FLUSH 0x00000008
+#define EEPROM_ALL_QUEUE_FLUSH 0x0000000f
+#define EEPROM_READ_ENABLE 0x06000000
+#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
+#define EEPROM_READ_DATA_FULL 0x00000010
+#define EEPROM_READ_DATA_AVAIL 0x00000020
+#define EEPROM_READ_QUEUE_EMPTY 0x00000002
+#define EEPROM_CMD_QUEUE_EMPTY 0x00000100
+#define EEPROM_CMD_QUEUE_AVAIL 0x00000200
+#define EEPROM_CMD_QUEUE_FULL 0x00000400
/* Most EEPROM status register bit 0 indicates if the EEPROM is busy
* with a write if set 1. See the details of the EEPROM Status Register
- * in the EEPROM data sheet. */
-#define EEPROM_STATUS_REG_WRITE_BUSY 0x00000001
-
-// We will have 1 mSec for every RETRIES_PER_DELAY count and have a max attempts of MAX_EEPROM_RETRIES
-// This will give us 80 mSec minimum of delay = 80mSecs
-#define MAX_EEPROM_RETRIES 80
-#define RETRIES_PER_DELAY 64
-
-
-#define MAX_RW_SIZE 0x10
-#define MAX_READ_SIZE 0x10
-#define MAX_SECTOR_SIZE (512*1024)
-#define MIN_SECTOR_SIZE (1024)
-#define FLASH_SECTOR_SIZE_OFFSET 0xEFFFC
-#define FLASH_SECTOR_SIZE_SIG_OFFSET 0xEFFF8
-#define FLASH_SECTOR_SIZE_SIG 0xCAFEBABE
-#define FLASH_CS_INFO_START_ADDR 0xFF0000
-#define FLASH_CONTROL_STRUCT_SIGNATURE 0xBECEF1A5
-#define SCSI_FIRMWARE_MAJOR_VERSION 0x1
-#define SCSI_FIRMWARE_MINOR_VERSION 0x5
-#define BYTE_WRITE_SUPPORT 0x1
-
-#define FLASH_AUTO_INIT_BASE_ADDR 0xF00000
-
-
-
-
-#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000
-#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000
-
-#define FLASH_CONTIGIOUS_START_ADDR_BCS350 0x08000000
-#define FLASH_CONTIGIOUS_END_ADDR_BCS350 0x08FFFFFF
-
-
-
-#define FLASH_SIZE_ADDR 0xFFFFEC
-
-#define FLASH_SPI_CMDQ_REG 0xAF003040
-#define FLASH_SPI_WRITEQ_REG 0xAF003044
-#define FLASH_SPI_READQ_REG 0xAF003048
-#define FLASH_CONFIG_REG 0xAF003050
-#define FLASH_GPIO_CONFIG_REG 0xAF000030
-
-#define FLASH_CMD_WRITE_ENABLE 0x06
-#define FLASH_CMD_READ_ENABLE 0x03
-#define FLASH_CMD_RESET_WRITE_ENABLE 0x04
-#define FLASH_CMD_STATUS_REG_READ 0x05
-#define FLASH_CMD_STATUS_REG_WRITE 0x01
-#define FLASH_CMD_READ_ID 0x9F
-
-#define PAD_SELECT_REGISTER 0xAF000410
-
-#define FLASH_PART_SST25VF080B 0xBF258E
-
-#define EEPROM_CAL_DATA_INTERNAL_LOC 0xbFB00008
-
-#define EEPROM_CALPARAM_START 0x200
-#define EEPROM_SIZE_OFFSET 524
-
-//As Read/Write time vaires from 1.5 to 3.0 ms.
-//so After Ignoring the rdm/wrm time(that is dependent on many factor like interface etc.),
-//here time calculated meets the worst case delay, 3.0 ms
-#define MAX_FLASH_RETRIES 4
-#define FLASH_PER_RETRIES_DELAY 16
-
-
-#define EEPROM_MAX_CAL_AREA_SIZE 0xF0000
-
-
-
-#define BECM ntohl(0x4245434d)
-
-#define FLASH_2X_MAJOR_NUMBER 0x2
-#define DSD_IMAGE_MAGIC_NUMBER 0xBECE0D5D
-#define ISO_IMAGE_MAGIC_NUMBER 0xBECE0150
-#define NON_CDLESS_DEVICE_BOOT_SIG 0xBECEB007
-#define MINOR_VERSION(x) ((x >>16) & 0xFFFF)
+ * in the EEPROM data sheet.
+ */
+#define EEPROM_STATUS_REG_WRITE_BUSY 0x00000001
+
+/* We will have 1 mSec for every RETRIES_PER_DELAY count and have a max attempts of MAX_EEPROM_RETRIES
+ * This will give us 80 mSec minimum of delay = 80mSecs
+ */
+#define MAX_EEPROM_RETRIES 80
+#define RETRIES_PER_DELAY 64
+#define MAX_RW_SIZE 0x10
+#define MAX_READ_SIZE 0x10
+#define MAX_SECTOR_SIZE (512 * 1024)
+#define MIN_SECTOR_SIZE (1024)
+#define FLASH_SECTOR_SIZE_OFFSET 0xEFFFC
+#define FLASH_SECTOR_SIZE_SIG_OFFSET 0xEFFF8
+#define FLASH_SECTOR_SIZE_SIG 0xCAFEBABE
+#define FLASH_CS_INFO_START_ADDR 0xFF0000
+#define FLASH_CONTROL_STRUCT_SIGNATURE 0xBECEF1A5
+#define SCSI_FIRMWARE_MAJOR_VERSION 0x1
+#define SCSI_FIRMWARE_MINOR_VERSION 0x5
+#define BYTE_WRITE_SUPPORT 0x1
+#define FLASH_AUTO_INIT_BASE_ADDR 0xF00000
+#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000
+#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000
+#define FLASH_CONTIGIOUS_START_ADDR_BCS350 0x08000000
+#define FLASH_CONTIGIOUS_END_ADDR_BCS350 0x08FFFFFF
+#define FLASH_SIZE_ADDR 0xFFFFEC
+#define FLASH_SPI_CMDQ_REG 0xAF003040
+#define FLASH_SPI_WRITEQ_REG 0xAF003044
+#define FLASH_SPI_READQ_REG 0xAF003048
+#define FLASH_CONFIG_REG 0xAF003050
+#define FLASH_GPIO_CONFIG_REG 0xAF000030
+#define FLASH_CMD_WRITE_ENABLE 0x06
+#define FLASH_CMD_READ_ENABLE 0x03
+#define FLASH_CMD_RESET_WRITE_ENABLE 0x04
+#define FLASH_CMD_STATUS_REG_READ 0x05
+#define FLASH_CMD_STATUS_REG_WRITE 0x01
+#define FLASH_CMD_READ_ID 0x9F
+#define PAD_SELECT_REGISTER 0xAF000410
+#define FLASH_PART_SST25VF080B 0xBF258E
+#define EEPROM_CAL_DATA_INTERNAL_LOC 0xbFB00008
+#define EEPROM_CALPARAM_START 0x200
+#define EEPROM_SIZE_OFFSET 524
+
+/* As Read/Write time vaires from 1.5 to 3.0 ms.
+ * so After Ignoring the rdm/wrm time(that is dependent on many factor like interface etc.),
+ * here time calculated meets the worst case delay, 3.0 ms
+ */
+#define MAX_FLASH_RETRIES 4
+#define FLASH_PER_RETRIES_DELAY 16
+#define EEPROM_MAX_CAL_AREA_SIZE 0xF0000
+#define BECM ntohl(0x4245434d)
+#define FLASH_2X_MAJOR_NUMBER 0x2
+#define DSD_IMAGE_MAGIC_NUMBER 0xBECE0D5D
+#define ISO_IMAGE_MAGIC_NUMBER 0xBECE0150
+#define NON_CDLESS_DEVICE_BOOT_SIG 0xBECEB007
+
+#define MINOR_VERSION(x) ((x >> 16) & 0xFFFF)
#define MAJOR_VERSION(x) (x & 0xFFFF)
-#define CORRUPTED_PATTERN 0x0
-#define UNINIT_PTR_IN_CS 0xBBBBDDDD
-
-#define VENDOR_PTR_IN_CS 0xAAAACCCC
-
-
-#define FLASH2X_SECTION_PRESENT 1<<0
-#define FLASH2X_SECTION_VALID 1<<1
-#define FLASH2X_SECTION_RO 1<<2
-#define FLASH2X_SECTION_ACT 1<<3
-#define SECTOR_IS_NOT_WRITABLE STATUS_FAILURE
-#define INVALID_OFFSET STATUS_FAILURE
-#define INVALID_SECTION STATUS_FAILURE
-#define SECTOR_1K 1024
-#define SECTOR_64K (64 *SECTOR_1K)
-#define SECTOR_128K (2 * SECTOR_64K)
-#define SECTOR_256k (2 * SECTOR_128K)
-#define SECTOR_512K (2 * SECTOR_256k)
-#define FLASH_PART_SIZE (16 * 1024 * 1024)
-#define RESET_CHIP_SELECT -1
-#define CHIP_SELECT_BIT12 12
-
-#define SECTOR_READWRITE_PERMISSION 0
-#define SECTOR_READONLY 1
-#define SIGNATURE_SIZE 4
-#define DEFAULT_BUFF_SIZE 0x10000
-
-#define FIELD_OFFSET_IN_HEADER(HeaderPointer,Field) ((PUCHAR)&((HeaderPointer)(NULL))->Field - (PUCHAR)(NULL))
+#define CORRUPTED_PATTERN 0x0
+#define UNINIT_PTR_IN_CS 0xBBBBDDDD
+#define VENDOR_PTR_IN_CS 0xAAAACCCC
+#define FLASH2X_SECTION_PRESENT (1 << 0)
+#define FLASH2X_SECTION_VALID (1 << 1)
+#define FLASH2X_SECTION_RO (1 << 2)
+#define FLASH2X_SECTION_ACT (1 << 3)
+#define SECTOR_IS_NOT_WRITABLE STATUS_FAILURE
+#define INVALID_OFFSET STATUS_FAILURE
+#define INVALID_SECTION STATUS_FAILURE
+#define SECTOR_1K 1024
+#define SECTOR_64K (64 * SECTOR_1K)
+#define SECTOR_128K (2 * SECTOR_64K)
+#define SECTOR_256k (2 * SECTOR_128K)
+#define SECTOR_512K (2 * SECTOR_256k)
+#define FLASH_PART_SIZE (16 * 1024 * 1024)
+#define RESET_CHIP_SELECT -1
+#define CHIP_SELECT_BIT12 12
+#define SECTOR_READWRITE_PERMISSION 0
+#define SECTOR_READONLY 1
+#define SIGNATURE_SIZE 4
+#define DEFAULT_BUFF_SIZE 0x10000
+
+#define FIELD_OFFSET_IN_HEADER(HeaderPointer, Field) ((u8 *)&((HeaderPointer)(NULL))->Field - (u8 *)(NULL))
#endif
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
index ad7ec0054938..dc45f9ab854d 100644
--- a/drivers/staging/bcm/target_params.h
+++ b/drivers/staging/bcm/target_params.h
@@ -1,81 +1,57 @@
#ifndef TARGET_PARAMS_H
#define TARGET_PARAMS_H
-typedef struct _TARGET_PARAMS
-{
- B_UINT32 m_u32CfgVersion;
-
- // Scanning Related Params
- B_UINT32 m_u32CenterFrequency;
- B_UINT32 m_u32BandAScan;
- B_UINT32 m_u32BandBScan;
- B_UINT32 m_u32BandCScan;
-
-
- // QoS Params
- B_UINT32 m_u32ErtpsOptions;
-
- B_UINT32 m_u32PHSEnable;
-
-
- // HO Params
- B_UINT32 m_u32HoEnable;
-
- B_UINT32 m_u32HoReserved1;
- B_UINT32 m_u32HoReserved2;
- // Power Control Params
-
- B_UINT32 m_u32MimoEnable;
-
- B_UINT32 m_u32SecurityEnable;
-
- B_UINT32 m_u32PowerSavingModesEnable; //bit 1: 1 Idlemode enable; bit2: 1 Sleepmode Enable
- /* PowerSaving Mode Options:
- bit 0 = 1: CPE mode - to keep pcmcia if alive;
- bit 1 = 1: CINR reporting in Idlemode Msg
- bit 2 = 1: Default PSC Enable in sleepmode*/
- B_UINT32 m_u32PowerSavingModeOptions;
-
- B_UINT32 m_u32ArqEnable;
-
- // From Version #3, the HARQ section renamed as general
- B_UINT32 m_u32HarqEnable;
- // EEPROM Param Location
- B_UINT32 m_u32EEPROMFlag;
- // BINARY TYPE - 4th MSByte: Interface Type - 3rd MSByte: Vendor Type - 2nd MSByte
- // Unused - LSByte
- B_UINT32 m_u32Customize;
- B_UINT32 m_u32ConfigBW; /* In Hz */
- B_UINT32 m_u32ShutDownInitThresholdTimer;
-
- B_UINT32 m_u32RadioParameter;
- B_UINT32 m_u32PhyParameter1;
- B_UINT32 m_u32PhyParameter2;
- B_UINT32 m_u32PhyParameter3;
-
- B_UINT32 m_u32TestOptions; // in eval mode only; lower 16bits = basic cid for testing; then bit 16 is test cqich,bit 17 test init rang; bit 18 test periodic rang and bit 19 is test harq ack/nack
-
- B_UINT32 m_u32MaxMACDataperDLFrame;
- B_UINT32 m_u32MaxMACDataperULFrame;
-
- B_UINT32 m_u32Corr2MacFlags;
-
- //adding driver params.
- B_UINT32 HostDrvrConfig1;
- B_UINT32 HostDrvrConfig2;
- B_UINT32 HostDrvrConfig3;
- B_UINT32 HostDrvrConfig4;
- B_UINT32 HostDrvrConfig5;
- B_UINT32 HostDrvrConfig6;
- B_UINT32 m_u32SegmentedPUSCenable;
-
- // removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
-
- //BAMC Related Parameters
- //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
- //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
- B_UINT32 m_u32BandAMCEnable;
-
-} stTargetParams,TARGET_PARAMS,*PTARGET_PARAMS, STARGETPARAMS, *PSTARGETPARAMS;
+struct bcm_target_params {
+ u32 m_u32CfgVersion;
+ u32 m_u32CenterFrequency;
+ u32 m_u32BandAScan;
+ u32 m_u32BandBScan;
+ u32 m_u32BandCScan;
+ u32 m_u32ErtpsOptions;
+ u32 m_u32PHSEnable;
+ u32 m_u32HoEnable;
+ u32 m_u32HoReserved1;
+ u32 m_u32HoReserved2;
+ u32 m_u32MimoEnable;
+ u32 m_u32SecurityEnable;
+ u32 m_u32PowerSavingModesEnable; /* bit 1: 1 Idlemode enable; bit2: 1 Sleepmode Enable */
+ /* PowerSaving Mode Options:
+ * bit 0 = 1: CPE mode - to keep pcmcia if alive;
+ * bit 1 = 1: CINR reporting in Idlemode Msg
+ * bit 2 = 1: Default PSC Enable in sleepmode
+ */
+ u32 m_u32PowerSavingModeOptions;
+ u32 m_u32ArqEnable;
+ /* From Version #3, the HARQ section renamed as general */
+ u32 m_u32HarqEnable;
+ u32 m_u32EEPROMFlag;
+ /* BINARY TYPE - 4th MSByte: Interface Type - 3rd MSByte: Vendor Type - 2nd MSByte
+ * Unused - LSByte
+ */
+ u32 m_u32Customize;
+ u32 m_u32ConfigBW; /* In Hz */
+ u32 m_u32ShutDownInitThresholdTimer;
+ u32 m_u32RadioParameter;
+ u32 m_u32PhyParameter1;
+ u32 m_u32PhyParameter2;
+ u32 m_u32PhyParameter3;
+ u32 m_u32TestOptions; /* in eval mode only; lower 16bits = basic cid for testing; then bit 16 is test cqich,bit 17 test init rang; bit 18 test periodic rang and bit 19 is test harq ack/nack */
+ u32 m_u32MaxMACDataperDLFrame;
+ u32 m_u32MaxMACDataperULFrame;
+ u32 m_u32Corr2MacFlags;
+ u32 HostDrvrConfig1;
+ u32 HostDrvrConfig2;
+ u32 HostDrvrConfig3;
+ u32 HostDrvrConfig4;
+ u32 HostDrvrConfig5;
+ u32 HostDrvrConfig6;
+ u32 m_u32SegmentedPUSCenable;
+ /* removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
+ * BAMC Related Parameters
+ * Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
+ * bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
+ */
+ u32 m_u32BandAMCEnable;
+};
#endif
diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c
index 40be60aa909a..be1f91d955aa 100644
--- a/drivers/staging/bcm/vendorspecificextn.c
+++ b/drivers/staging/bcm/vendorspecificextn.c
@@ -11,7 +11,7 @@
// STATUS_SUCCESS/STATUS_FAILURE
//
//-----------------------------------------------------------------------------
-INT vendorextnGetSectionInfo(PVOID pContext,PFLASH2X_VENDORSPECIFIC_INFO pVendorInfo)
+INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo)
{
return STATUS_FAILURE;
}
diff --git a/drivers/staging/bcm/vendorspecificextn.h b/drivers/staging/bcm/vendorspecificextn.h
index 834410e29e75..52890d216edf 100644
--- a/drivers/staging/bcm/vendorspecificextn.h
+++ b/drivers/staging/bcm/vendorspecificextn.h
@@ -4,7 +4,7 @@
#define CONTINUE_COMMON_PATH 0xFFFF
-INT vendorextnGetSectionInfo(PVOID pContext,PFLASH2X_VENDORSPECIFIC_INFO pVendorInfo);
+INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo);
INT vendorextnExit(struct bcm_mini_adapter *Adapter);
INT vendorextnInit(struct bcm_mini_adapter *Adapter);
INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg);
diff --git a/drivers/staging/ccg/Kconfig b/drivers/staging/ccg/Kconfig
index 8997a8c757aa..7ed5bc6caadb 100644
--- a/drivers/staging/ccg/Kconfig
+++ b/drivers/staging/ccg/Kconfig
@@ -2,7 +2,7 @@ if USB_GADGET
config USB_G_CCG
tristate "Configurable Composite Gadget (STAGING)"
- depends on STAGING && BLOCK && NET && !USB_ZERO && !USB_ZERO_HNPTEST && !USB_AUDIO && !GADGET_UAC1 && !USB_ETH && !USB_ETH_RNDIS && !USB_ETH_EEM && !USB_G_NCM && !USB_GADGETFS && !USB_FUNCTIONFS && !USB_FUNCTIONFS_ETH && !USB_FUNCTIONFS_RNDIS && !USB_FUNCTIONFS_GENERIC && !USB_FILE_STORAGE && !USB_FILE_STORAGE_TEST && !USB_MASS_STORAGE && !USB_G_SERIAL && !USB_MIDI_GADGET && !USB_G_PRINTER && !USB_CDC_COMPOSITE && !USB_G_NOKIA && !USB_G_ACM_MS && !USB_G_MULTI && !USB_G_MULTI_RNDIS && !USB_G_MULTI_CDC && !USB_G_HID && !USB_G_DBGP && !USB_G_WEBCAM
+ depends on STAGING && BLOCK && NET && !USB_ZERO && !USB_ZERO_HNPTEST && !USB_AUDIO && !GADGET_UAC1 && !USB_ETH && !USB_ETH_RNDIS && !USB_ETH_EEM && !USB_G_NCM && !USB_GADGETFS && !USB_FUNCTIONFS && !USB_FUNCTIONFS_ETH && !USB_FUNCTIONFS_RNDIS && !USB_FUNCTIONFS_GENERIC && !USB_FILE_STORAGE && !USB_FILE_STORAGE_TEST && !USB_MASS_STORAGE && !USB_G_SERIAL && !USB_MIDI_GADGET && !USB_G_PRINTER && !USB_CDC_COMPOSITE && !USB_G_NOKIA && !USB_G_ACM_MS && !USB_G_MULTI && !USB_G_MULTI_RNDIS && !USB_G_MULTI_CDC && !USB_G_HID && !USB_G_DBGP && !USB_G_WEBCAM && TTY
help
The Configurable Composite Gadget supports multiple USB
functions: acm, mass storage, rndis and FunctionFS.
diff --git a/drivers/staging/ccg/u_ether.c b/drivers/staging/ccg/u_ether.c
index d0dabcf015a9..fed78865adc6 100644
--- a/drivers/staging/ccg/u_ether.c
+++ b/drivers/staging/ccg/u_ether.c
@@ -157,12 +157,12 @@ static int ueth_change_mtu(struct net_device *net, int new_mtu)
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
- struct eth_dev *dev = netdev_priv(net);
+ struct eth_dev *dev = netdev_priv(net);
- strlcpy(p->driver, "g_ether", sizeof p->driver);
- strlcpy(p->version, UETH__VERSION, sizeof p->version);
- strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
- strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+ strlcpy(p->driver, "g_ether", sizeof(p->driver));
+ strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+ strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
}
/* REVISIT can also support:
diff --git a/drivers/staging/ccg/u_serial.c b/drivers/staging/ccg/u_serial.c
index 373c40656b52..b10947ae0ac5 100644
--- a/drivers/staging/ccg/u_serial.c
+++ b/drivers/staging/ccg/u_serial.c
@@ -491,12 +491,8 @@ static void gs_rx_push(unsigned long _port)
req = list_first_entry(queue, struct usb_request, list);
- /* discard data if tty was closed */
- if (!tty)
- goto recycle;
-
/* leave data queued if tty was rx throttled */
- if (test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags))
break;
switch (req->status) {
@@ -529,7 +525,7 @@ static void gs_rx_push(unsigned long _port)
size -= n;
}
- count = tty_insert_flip_string(tty, packet, size);
+ count = tty_insert_flip_string(&port->port, packet, size);
if (count)
do_push = true;
if (count != size) {
@@ -542,7 +538,6 @@ static void gs_rx_push(unsigned long _port)
}
port->n_read = 0;
}
-recycle:
list_move(&req->list, &port->read_pool);
port->read_started--;
}
@@ -550,8 +545,8 @@ recycle:
/* Push from tty to ldisc; without low_latency set this is handled by
* a workqueue, so we won't get callbacks and can hold port_lock
*/
- if (tty && do_push)
- tty_flip_buffer_push(tty);
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
/* We want our data queue to become empty ASAP, keeping data
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index d0434714afd3..82a333f6433e 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -123,7 +123,7 @@ int SendString(DEVICE_EXTENSION * pdx, const char __user * pData,
iReturn = PutChars(pdx, buffer, n);
}
- Allowi(pdx, false); // make sure we have input int
+ Allowi(pdx); // make sure we have input int
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -140,7 +140,7 @@ int SendChar(DEVICE_EXTENSION * pdx, char c)
mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
iReturn = PutChars(pdx, &c, 1);
dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)", c, c);
- Allowi(pdx, false); // Make sure char reads are running
+ Allowi(pdx); // Make sure char reads are running
mutex_unlock(&pdx->io_mutex);
return iReturn;
}
@@ -433,8 +433,8 @@ int GetChar(DEVICE_EXTENSION * pdx)
dev_dbg(&pdx->interface->dev, "GetChar");
- Allowi(pdx, false); // Make sure char reads are running
- SendChars(pdx); // and send any buffered chars
+ Allowi(pdx); // Make sure char reads are running
+ SendChars(pdx); // and send any buffered chars
spin_lock_irq(&pdx->charInLock);
if (pdx->dwNumInput > 0) // worth looking
@@ -447,7 +447,7 @@ int GetChar(DEVICE_EXTENSION * pdx)
iReturn = U14ERR_NOIN; // no input data to read
spin_unlock_irq(&pdx->charInLock);
- Allowi(pdx, false); // Make sure char reads are running
+ Allowi(pdx); // Make sure char reads are running
mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
return iReturn;
@@ -472,7 +472,7 @@ int GetString(DEVICE_EXTENSION * pdx, char __user * pUser, int n)
return -ENOMEM;
mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx, false); // Make sure char reads are running
+ Allowi(pdx); // Make sure char reads are running
SendChars(pdx); // and send any buffered chars
spin_lock_irq(&pdx->charInLock);
@@ -518,7 +518,7 @@ int GetString(DEVICE_EXTENSION * pdx, char __user * pUser, int n)
} else
spin_unlock_irq(&pdx->charInLock);
- Allowi(pdx, false); // Make sure char reads are running
+ Allowi(pdx); // Make sure char reads are running
mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
return iReturn;
@@ -531,7 +531,7 @@ int Stat1401(DEVICE_EXTENSION * pdx)
{
int iReturn;
mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx, false); // make sure we allow pending chars
+ Allowi(pdx); // make sure we allow pending chars
SendChars(pdx); // in both directions
iReturn = pdx->dwNumInput; // no lock as single read
mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
@@ -550,7 +550,7 @@ int LineCount(DEVICE_EXTENSION * pdx)
int iReturn = 0; // will be count of line ends
mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx, false); // Make sure char reads are running
+ Allowi(pdx); // Make sure char reads are running
SendChars(pdx); // and send any buffered chars
spin_lock_irq(&pdx->charInLock); // Get protection
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index a27043a2f8c5..254131d8be5f 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -697,7 +697,7 @@ static void staged_callback(struct urb *pUrb)
// in Allowi as if it were protected by the char lock. In any case, most systems will
// not be upset by char input during DMA... sigh. Needs sorting out.
if (bRestartCharInput) // may be out of date, but...
- Allowi(pdx, true); // ...Allowi tests a lock too.
+ Allowi(pdx); // ...Allowi tests a lock too.
dev_dbg(&pdx->interface->dev, "%s done", __func__);
}
@@ -1172,7 +1172,7 @@ static void ced_readchar_callback(struct urb *pUrb)
pdx->bReadCharsPending = false; // No longer have a pending read
spin_unlock(&pdx->charInLock); // already at irq level
- Allowi(pdx, true); // see if we can do the next one
+ Allowi(pdx); // see if we can do the next one
}
/****************************************************************************
@@ -1182,7 +1182,7 @@ static void ced_readchar_callback(struct urb *pUrb)
** we can pick up any inward transfers. This can be called in multiple contexts
** so we use the irqsave version of the spinlock.
****************************************************************************/
-int Allowi(DEVICE_EXTENSION * pdx, bool bInCallback)
+int Allowi(DEVICE_EXTENSION * pdx)
{
int iReturn = U14ERR_NOERROR;
unsigned long flags;
@@ -1211,9 +1211,7 @@ int Allowi(DEVICE_EXTENSION * pdx, bool bInCallback)
pdx, pdx->bInterval);
pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // short xfers are OK by default
usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); // in case we need to kill it
- iReturn =
- usb_submit_urb(pdx->pUrbCharIn,
- bInCallback ? GFP_ATOMIC : GFP_KERNEL);
+ iReturn = usb_submit_urb(pdx->pUrbCharIn, GFP_ATOMIC);
if (iReturn) {
usb_unanchor_urb(pdx->pUrbCharIn); // remove from list of active Urbs
pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
@@ -1393,10 +1391,8 @@ static int ced_probe(struct usb_interface *interface,
// allocate memory for our device extension and initialize it
pdx = kzalloc(sizeof(*pdx), GFP_KERNEL);
- if (!pdx) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!pdx)
goto error;
- }
for (i = 0; i < MAX_TRANSAREAS; ++i) // Initialise the wait queues
{
diff --git a/drivers/staging/ced1401/usb1401.h b/drivers/staging/ced1401/usb1401.h
index adb5fa402bd4..8fc6958b6f08 100644
--- a/drivers/staging/ced1401/usb1401.h
+++ b/drivers/staging/ced1401/usb1401.h
@@ -204,7 +204,7 @@ typedef struct _DEVICE_EXTENSION
/// Definitions of routimes used between compilation object files
// in usb1401.c
-extern int Allowi(DEVICE_EXTENSION* pdx, bool bInCallback);
+extern int Allowi(DEVICE_EXTENSION* pdx);
extern int SendChars(DEVICE_EXTENSION* pdx);
extern void ced_draw_down(DEVICE_EXTENSION *pdx);
extern int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7de2a10213bd..1967852eeb17 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,7 +1,6 @@
config COMEDI
tristate "Data acquisition support (comedi)"
depends on m
- depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
---help---
Enable support a wide range of data acquisition devices
for Linux.
@@ -165,7 +164,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
- depends on VIRT_TO_BUS
+ depends on VIRT_TO_BUS && ISA_DMA_API
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -176,7 +175,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
- depends on VIRT_TO_BUS
+ depends on VIRT_TO_BUS && ISA_DMA_API
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -185,7 +184,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
- depends on VIRT_TO_BUS
+ depends on VIRT_TO_BUS && ISA_DMA_API
---help---
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -275,10 +274,11 @@ config COMEDI_DAS08_ISA
DAS08/JR-16-AO, PC104-DAS08, DAS08/JR/16.
To compile this driver as a module, choose M here: the module will be
- called das08.
+ called das08_isa.
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
+ depends on ISA_DMA_API
select COMEDI_8255
select COMEDI_FC
---help---
@@ -308,7 +308,7 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
- depends on VIRT_TO_BUS
+ depends on VIRT_TO_BUS && ISA_DMA_API
select COMEDI_FC
---help---
Enable support for DAS1800 and compatible ISA cards
@@ -373,7 +373,7 @@ config COMEDI_DT2817
config COMEDI_DT282X
tristate "Data Translation DT2821 series and DT-EZ ISA card support"
select COMEDI_FC
- depends on VIRT_TO_BUS
+ depends on VIRT_TO_BUS && ISA_DMA_API
---help---
Enable support for Data Translation DT2821 series including DT-EZ
DT2821, DT2821-F-16SE, DT2821-F-8DI, DT2821-G-16SE, DT2821-G-8DI,
@@ -444,7 +444,8 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
- depends on VIRT_TO_BUS
+ select COMEDI_FC
+ depends on VIRT_TO_BUS && ISA_DMA_API
---help---
Enable support for National Instruments AT-A2150 cards
@@ -541,11 +542,7 @@ menuconfig COMEDI_PCI_DRIVERS
bool "Comedi PCI drivers"
depends on PCI
---help---
- Enable comedi PCI drivers to be built
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about PCI comedi drivers.
+ Enable support for comedi PCI drivers.
if COMEDI_PCI_DRIVERS
@@ -566,6 +563,13 @@ config COMEDI_8255_PCI
To compile this driver as a module, choose M here: the module will
be called 8255_pci.
+config COMEDI_ADDI_WATCHDOG
+ tristate
+ ---help---
+ Provides support for the watchdog subdevice found on many ADDI-DATA
+ boards. This module will be automatically selected when needed. The
+ module will be called addi_watchdog.
+
config COMEDI_ADDI_APCI_035
tristate "ADDI-DATA APCI_035 support"
---help---
@@ -592,6 +596,7 @@ config COMEDI_ADDI_APCI_1500
config COMEDI_ADDI_APCI_1516
tristate "ADDI-DATA APCI-1016/1516/2016 support"
+ select COMEDI_ADDI_WATCHDOG
---help---
Enable support for ADDI-DATA APCI-1016, APCI-1516 and APCI-2016 boards.
These are 16 channel, optically isolated, digital I/O boards. The 1516
@@ -618,6 +623,7 @@ config COMEDI_ADDI_APCI_16XX
config COMEDI_ADDI_APCI_2032
tristate "ADDI-DATA APCI_2032 support"
+ select COMEDI_ADDI_WATCHDOG
---help---
Enable support for ADDI-DATA APCI_2032 cards
@@ -626,6 +632,7 @@ config COMEDI_ADDI_APCI_2032
config COMEDI_ADDI_APCI_2200
tristate "ADDI-DATA APCI_2200 support"
+ select COMEDI_ADDI_WATCHDOG
---help---
Enable support for ADDI-DATA APCI_2200 cards
@@ -795,7 +802,7 @@ config COMEDI_DAS08_PCI
Enable support for PCI DAS-08 cards.
To compile this driver as a module, choose M here: the module will be
- called das08.
+ called das08_pci.
config COMEDI_DT3000
tristate "Data Translation DT3000 series support"
@@ -1083,11 +1090,7 @@ menuconfig COMEDI_PCMCIA_DRIVERS
bool "Comedi PCMCIA drivers"
depends on PCMCIA
---help---
- Enable comedi PCMCIA and PCCARD drivers to be built
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about PCMCIA comedi drivers.
+ Enable support for comedi PCMCIA drivers.
if COMEDI_PCMCIA_DRIVERS
@@ -1164,11 +1167,7 @@ menuconfig COMEDI_USB_DRIVERS
bool "Comedi USB drivers"
depends on USB
---help---
- Enable comedi USB drivers to be built
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about USB comedi drivers.
+ Enable support for comedi USB drivers.
if COMEDI_USB_DRIVERS
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index 8dbd306fef88..e6dfc98f8c8e 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -1,11 +1,12 @@
-obj-$(CONFIG_COMEDI) += comedi.o
+comedi-y := comedi_fops.o range.o drivers.o \
+ comedi_buf.o
+comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
+comedi-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
+comedi-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
+comedi-$(CONFIG_PROC_FS) += proc.o
+comedi-$(CONFIG_COMPAT) += comedi_compat32.o
-obj-$(CONFIG_COMEDI) += kcomedilib/
-obj-$(CONFIG_COMEDI) += drivers/
+obj-$(CONFIG_COMEDI) += comedi.o
-comedi-y := \
- comedi_fops.o \
- proc.o \
- range.o \
- drivers.o \
- comedi_compat32.o \
+obj-$(CONFIG_COMEDI) += kcomedilib/
+obj-$(CONFIG_COMEDI) += drivers/
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index c8a8ca126127..4233605df30a 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -41,7 +41,17 @@
/* number of config options in the config structure */
#define COMEDI_NDEVCONFOPTS 32
-/*length of nth chunk of firmware data*/
+
+/*
+ * NOTE: 'comedi_config --init-data' is deprecated
+ *
+ * The following indexes in the config options were used by
+ * comedi_config to pass firmware blobs from user space to the
+ * comedi drivers. The request_firmware() hotplug interface is
+ * now used by all comedi drivers instead.
+ */
+
+/* length of nth chunk of firmware data -*/
#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25
#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
new file mode 100644
index 000000000000..9b997ae67796
--- /dev/null
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -0,0 +1,415 @@
+/*
+ * comedi_buf.c
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "comedidev.h"
+#include "comedi_internal.h"
+
+#ifdef PAGE_KERNEL_NOCACHE
+#define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
+#else
+#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
+#endif
+
+static void __comedi_buf_free(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned n_pages)
+{
+ struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf;
+ unsigned i;
+
+ if (async->prealloc_buf) {
+ vunmap(async->prealloc_buf);
+ async->prealloc_buf = NULL;
+ async->prealloc_bufsz = 0;
+ }
+
+ if (!async->buf_page_list)
+ return;
+
+ for (i = 0; i < n_pages; ++i) {
+ buf = &async->buf_page_list[i];
+ if (buf->virt_addr) {
+ clear_bit(PG_reserved,
+ &(virt_to_page(buf->virt_addr)->flags));
+ if (s->async_dma_dir != DMA_NONE) {
+ dma_free_coherent(dev->hw_dev,
+ PAGE_SIZE,
+ buf->virt_addr,
+ buf->dma_addr);
+ } else {
+ free_page((unsigned long)buf->virt_addr);
+ }
+ }
+ }
+ vfree(async->buf_page_list);
+ async->buf_page_list = NULL;
+ async->n_buf_pages = 0;
+}
+
+static void __comedi_buf_alloc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned n_pages)
+{
+ struct comedi_async *async = s->async;
+ struct page **pages = NULL;
+ struct comedi_buf_page *buf;
+ unsigned i;
+
+ async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
+ if (async->buf_page_list)
+ pages = vmalloc(sizeof(struct page *) * n_pages);
+
+ if (!pages)
+ return;
+
+ for (i = 0; i < n_pages; i++) {
+ buf = &async->buf_page_list[i];
+ if (s->async_dma_dir != DMA_NONE)
+ buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
+ PAGE_SIZE,
+ &buf->dma_addr,
+ GFP_KERNEL |
+ __GFP_COMP);
+ else
+ buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!buf->virt_addr)
+ break;
+
+ set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
+
+ pages[i] = virt_to_page(buf->virt_addr);
+ }
+
+ /* vmap the prealloc_buf if all the pages were allocated */
+ if (i == n_pages)
+ async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
+ COMEDI_PAGE_PROTECTION);
+
+ vfree(pages);
+}
+
+int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long new_size)
+{
+ struct comedi_async *async = s->async;
+
+ /* Round up new_size to multiple of PAGE_SIZE */
+ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
+
+ /* if no change is required, do nothing */
+ if (async->prealloc_buf && async->prealloc_bufsz == new_size)
+ return 0;
+
+ /* deallocate old buffer */
+ __comedi_buf_free(dev, s, async->n_buf_pages);
+
+ /* allocate new buffer */
+ if (new_size) {
+ unsigned n_pages = new_size >> PAGE_SHIFT;
+
+ __comedi_buf_alloc(dev, s, n_pages);
+
+ if (!async->prealloc_buf) {
+ /* allocation failed */
+ __comedi_buf_free(dev, s, n_pages);
+ return -ENOMEM;
+ }
+ async->n_buf_pages = n_pages;
+ }
+ async->prealloc_bufsz = new_size;
+
+ return 0;
+}
+
+void comedi_buf_reset(struct comedi_async *async)
+{
+ async->buf_write_alloc_count = 0;
+ async->buf_write_count = 0;
+ async->buf_read_alloc_count = 0;
+ async->buf_read_count = 0;
+
+ async->buf_write_ptr = 0;
+ async->buf_read_ptr = 0;
+
+ async->cur_chan = 0;
+ async->scan_progress = 0;
+ async->munge_chan = 0;
+ async->munge_count = 0;
+ async->munge_ptr = 0;
+
+ async->events = 0;
+}
+
+static unsigned int comedi_buf_write_n_available(struct comedi_async *async)
+{
+ unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
+
+ return free_end - async->buf_write_alloc_count;
+}
+
+static unsigned int __comedi_buf_write_alloc(struct comedi_async *async,
+ unsigned int nbytes,
+ int strict)
+{
+ unsigned int available = comedi_buf_write_n_available(async);
+
+ if (nbytes > available)
+ nbytes = strict ? 0 : available;
+
+ async->buf_write_alloc_count += nbytes;
+
+ /*
+ * ensure the async buffer 'counts' are read and updated
+ * before we write data to the write-alloc'ed buffer space
+ */
+ smp_mb();
+
+ return nbytes;
+}
+
+/* allocates chunk for the writer from free buffer space */
+unsigned int comedi_buf_write_alloc(struct comedi_async *async,
+ unsigned int nbytes)
+{
+ return __comedi_buf_write_alloc(async, nbytes, 0);
+}
+EXPORT_SYMBOL(comedi_buf_write_alloc);
+
+/*
+ * munging is applied to data by core as it passes between user
+ * and kernel space
+ */
+static unsigned int comedi_buf_munge(struct comedi_async *async,
+ unsigned int num_bytes)
+{
+ struct comedi_subdevice *s = async->subdevice;
+ unsigned int count = 0;
+ const unsigned num_sample_bytes = bytes_per_sample(s);
+
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ async->munge_count += num_bytes;
+ count = num_bytes;
+ } else {
+ /* don't munge partial samples */
+ num_bytes -= num_bytes % num_sample_bytes;
+ while (count < num_bytes) {
+ int block_size = num_bytes - count;
+ unsigned int buf_end;
+
+ buf_end = async->prealloc_bufsz - async->munge_ptr;
+ if (block_size > buf_end)
+ block_size = buf_end;
+
+ s->munge(s->device, s,
+ async->prealloc_buf + async->munge_ptr,
+ block_size, async->munge_chan);
+
+ /*
+ * ensure data is munged in buffer before the
+ * async buffer munge_count is incremented
+ */
+ smp_wmb();
+
+ async->munge_chan += block_size / num_sample_bytes;
+ async->munge_chan %= async->cmd.chanlist_len;
+ async->munge_count += block_size;
+ async->munge_ptr += block_size;
+ async->munge_ptr %= async->prealloc_bufsz;
+ count += block_size;
+ }
+ }
+
+ return count;
+}
+
+unsigned int comedi_buf_write_n_allocated(struct comedi_async *async)
+{
+ return async->buf_write_alloc_count - async->buf_write_count;
+}
+
+/* transfers a chunk from writer to filled buffer space */
+unsigned int comedi_buf_write_free(struct comedi_async *async,
+ unsigned int nbytes)
+{
+ unsigned int allocated = comedi_buf_write_n_allocated(async);
+
+ if (nbytes > allocated)
+ nbytes = allocated;
+
+ async->buf_write_count += nbytes;
+ async->buf_write_ptr += nbytes;
+ comedi_buf_munge(async, async->buf_write_count - async->munge_count);
+ if (async->buf_write_ptr >= async->prealloc_bufsz)
+ async->buf_write_ptr %= async->prealloc_bufsz;
+
+ return nbytes;
+}
+EXPORT_SYMBOL(comedi_buf_write_free);
+
+unsigned int comedi_buf_read_n_available(struct comedi_async *async)
+{
+ unsigned num_bytes;
+
+ if (!async)
+ return 0;
+
+ num_bytes = async->munge_count - async->buf_read_count;
+
+ /*
+ * ensure the async buffer 'counts' are read before we
+ * attempt to read data from the buffer
+ */
+ smp_rmb();
+
+ return num_bytes;
+}
+EXPORT_SYMBOL(comedi_buf_read_n_available);
+
+/* allocates a chunk for the reader from filled (and munged) buffer space */
+unsigned int comedi_buf_read_alloc(struct comedi_async *async,
+ unsigned int nbytes)
+{
+ unsigned int available;
+
+ available = async->munge_count - async->buf_read_alloc_count;
+ if (nbytes > available)
+ nbytes = available;
+
+ async->buf_read_alloc_count += nbytes;
+
+ /*
+ * ensure the async buffer 'counts' are read before we
+ * attempt to read data from the read-alloc'ed buffer space
+ */
+ smp_rmb();
+
+ return nbytes;
+}
+EXPORT_SYMBOL(comedi_buf_read_alloc);
+
+static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
+{
+ return async->buf_read_alloc_count - async->buf_read_count;
+}
+
+/* transfers control of a chunk from reader to free buffer space */
+unsigned int comedi_buf_read_free(struct comedi_async *async,
+ unsigned int nbytes)
+{
+ unsigned int allocated;
+
+ /*
+ * ensure data has been read out of buffer before
+ * the async read count is incremented
+ */
+ smp_mb();
+
+ allocated = comedi_buf_read_n_allocated(async);
+ if (nbytes > allocated)
+ nbytes = allocated;
+
+ async->buf_read_count += nbytes;
+ async->buf_read_ptr += nbytes;
+ async->buf_read_ptr %= async->prealloc_bufsz;
+ return nbytes;
+}
+EXPORT_SYMBOL(comedi_buf_read_free);
+
+int comedi_buf_put(struct comedi_async *async, short x)
+{
+ unsigned int n = __comedi_buf_write_alloc(async, sizeof(short), 1);
+
+ if (n < sizeof(short)) {
+ async->events |= COMEDI_CB_ERROR;
+ return 0;
+ }
+ *(short *)(async->prealloc_buf + async->buf_write_ptr) = x;
+ comedi_buf_write_free(async, sizeof(short));
+ return 1;
+}
+EXPORT_SYMBOL(comedi_buf_put);
+
+int comedi_buf_get(struct comedi_async *async, short *x)
+{
+ unsigned int n = comedi_buf_read_n_available(async);
+
+ if (n < sizeof(short))
+ return 0;
+ comedi_buf_read_alloc(async, sizeof(short));
+ *x = *(short *)(async->prealloc_buf + async->buf_read_ptr);
+ comedi_buf_read_free(async, sizeof(short));
+ return 1;
+}
+EXPORT_SYMBOL(comedi_buf_get);
+
+void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
+ const void *data, unsigned int num_bytes)
+{
+ unsigned int write_ptr = async->buf_write_ptr + offset;
+
+ if (write_ptr >= async->prealloc_bufsz)
+ write_ptr %= async->prealloc_bufsz;
+
+ while (num_bytes) {
+ unsigned int block_size;
+
+ if (write_ptr + num_bytes > async->prealloc_bufsz)
+ block_size = async->prealloc_bufsz - write_ptr;
+ else
+ block_size = num_bytes;
+
+ memcpy(async->prealloc_buf + write_ptr, data, block_size);
+
+ data += block_size;
+ num_bytes -= block_size;
+
+ write_ptr = 0;
+ }
+}
+EXPORT_SYMBOL(comedi_buf_memcpy_to);
+
+void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
+ void *dest, unsigned int nbytes)
+{
+ void *src;
+ unsigned int read_ptr = async->buf_read_ptr + offset;
+
+ if (read_ptr >= async->prealloc_bufsz)
+ read_ptr %= async->prealloc_bufsz;
+
+ while (nbytes) {
+ unsigned int block_size;
+
+ src = async->prealloc_buf + read_ptr;
+
+ if (nbytes >= async->prealloc_bufsz - read_ptr)
+ block_size = async->prealloc_bufsz - read_ptr;
+ else
+ block_size = nbytes;
+
+ memcpy(dest, src, block_size);
+ nbytes -= block_size;
+ dest += block_size;
+ read_ptr = 0;
+ }
+}
+EXPORT_SYMBOL(comedi_buf_memcpy_from);
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 4b7cbfad1d74..ad208cdd53d4 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -30,8 +30,6 @@
#include "comedi.h"
#include "comedi_compat32.h"
-#ifdef CONFIG_COMPAT
-
#define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct)
#define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct)
/* N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
@@ -460,5 +458,3 @@ long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return raw_ioctl(file, cmd, arg);
}
-
-#endif /* CONFIG_COMPAT */
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index b7bba1790a20..195d56d8a1ee 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -49,10 +49,6 @@
#include "comedi_internal.h"
-MODULE_AUTHOR("http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi core module");
-MODULE_LICENSE("GPL");
-
#ifdef CONFIG_COMEDI_DEBUG
int comedi_debug;
EXPORT_SYMBOL(comedi_debug);
@@ -62,11 +58,6 @@ MODULE_PARM_DESC(comedi_debug,
);
#endif
-bool comedi_autoconfig = 1;
-module_param(comedi_autoconfig, bool, S_IRUGO);
-MODULE_PARM_DESC(comedi_autoconfig,
- "enable drivers to auto-configure comedi devices (default 1)");
-
static int comedi_num_legacy_minors;
module_param(comedi_num_legacy_minors, int, S_IRUGO);
MODULE_PARM_DESC(comedi_num_legacy_minors,
@@ -86,17 +77,58 @@ MODULE_PARM_DESC(comedi_default_buf_maxsize_kb,
"default maximum size of asynchronous buffer in KiB (default "
__MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB) ")");
+struct comedi_file_info {
+ struct comedi_device *device;
+ struct comedi_subdevice *read_subdevice;
+ struct comedi_subdevice *write_subdevice;
+ struct device *hardware_device;
+};
+
static DEFINE_SPINLOCK(comedi_file_info_table_lock);
-static struct comedi_device_file_info
-*comedi_file_info_table[COMEDI_NUM_MINORS];
+static struct comedi_file_info *comedi_file_info_table[COMEDI_NUM_MINORS];
-static void do_become_nonbusy(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
+static struct comedi_file_info *comedi_file_info_from_minor(unsigned minor)
+{
+ struct comedi_file_info *info;
+
+ BUG_ON(minor >= COMEDI_NUM_MINORS);
+ spin_lock(&comedi_file_info_table_lock);
+ info = comedi_file_info_table[minor];
+ spin_unlock(&comedi_file_info_table_lock);
+ return info;
+}
+
+static struct comedi_device *
+comedi_dev_from_file_info(struct comedi_file_info *info)
+{
+ return info ? info->device : NULL;
+}
+
+struct comedi_device *comedi_dev_from_minor(unsigned minor)
+{
+ return comedi_dev_from_file_info(comedi_file_info_from_minor(minor));
+}
+EXPORT_SYMBOL_GPL(comedi_dev_from_minor);
-static int comedi_fasync(int fd, struct file *file, int on);
+static struct comedi_subdevice *
+comedi_read_subdevice(const struct comedi_file_info *info)
+{
+ if (info->read_subdevice)
+ return info->read_subdevice;
+ if (info->device)
+ return info->device->read_subdev;
+ return NULL;
+}
-static int is_device_busy(struct comedi_device *dev);
+static struct comedi_subdevice *
+comedi_write_subdevice(const struct comedi_file_info *info)
+{
+ if (info->write_subdevice)
+ return info->write_subdevice;
+ if (info->device)
+ return info->device->write_subdev;
+ return NULL;
+}
static int resize_async_buffer(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -134,7 +166,7 @@ static int resize_async_buffer(struct comedi_device *dev,
}
DPRINTK("comedi%i subd %d buffer resized to %i bytes\n",
- dev->minor, (int)(s - dev->subdevices), async->prealloc_bufsz);
+ dev->minor, s->index, async->prealloc_bufsz);
return 0;
}
@@ -143,8 +175,8 @@ static int resize_async_buffer(struct comedi_device *dev,
static ssize_t show_max_read_buffer_kb(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_read_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_read_subdevice(info);
unsigned int size = 0;
mutex_lock(&info->device->mutex);
@@ -159,8 +191,8 @@ static ssize_t store_max_read_buffer_kb(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_read_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_read_subdevice(info);
unsigned int size;
int err;
@@ -184,8 +216,8 @@ static ssize_t store_max_read_buffer_kb(struct device *dev,
static ssize_t show_read_buffer_kb(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_read_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_read_subdevice(info);
unsigned int size = 0;
mutex_lock(&info->device->mutex);
@@ -200,8 +232,8 @@ static ssize_t store_read_buffer_kb(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_read_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_read_subdevice(info);
unsigned int size;
int err;
@@ -226,8 +258,8 @@ static ssize_t show_max_write_buffer_kb(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_write_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_write_subdevice(info);
unsigned int size = 0;
mutex_lock(&info->device->mutex);
@@ -242,8 +274,8 @@ static ssize_t store_max_write_buffer_kb(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_write_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_write_subdevice(info);
unsigned int size;
int err;
@@ -267,8 +299,8 @@ static ssize_t store_max_write_buffer_kb(struct device *dev,
static ssize_t show_write_buffer_kb(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_write_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_write_subdevice(info);
unsigned int size = 0;
mutex_lock(&info->device->mutex);
@@ -283,8 +315,8 @@ static ssize_t store_write_buffer_kb(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct comedi_device_file_info *info = dev_get_drvdata(dev);
- struct comedi_subdevice *s = comedi_get_write_subdevice(info);
+ struct comedi_file_info *info = dev_get_drvdata(dev);
+ struct comedi_subdevice *s = comedi_write_subdevice(info);
unsigned int size;
int err;
@@ -317,6 +349,103 @@ static struct device_attribute comedi_dev_attrs[] = {
__ATTR_NULL
};
+static void comedi_set_subdevice_runflags(struct comedi_subdevice *s,
+ unsigned mask, unsigned bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->spin_lock, flags);
+ s->runflags &= ~mask;
+ s->runflags |= (bits & mask);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+}
+
+static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+{
+ unsigned long flags;
+ unsigned runflags;
+
+ spin_lock_irqsave(&s->spin_lock, flags);
+ runflags = s->runflags;
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+ return runflags;
+}
+
+bool comedi_is_subdevice_running(struct comedi_subdevice *s)
+{
+ unsigned runflags = comedi_get_subdevice_runflags(s);
+
+ return (runflags & SRF_RUNNING) ? true : false;
+}
+EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
+
+static bool comedi_is_subdevice_in_error(struct comedi_subdevice *s)
+{
+ unsigned runflags = comedi_get_subdevice_runflags(s);
+
+ return (runflags & SRF_ERROR) ? true : false;
+}
+
+static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
+{
+ unsigned runflags = comedi_get_subdevice_runflags(s);
+
+ return (runflags & (SRF_ERROR | SRF_RUNNING)) ? false : true;
+}
+
+/*
+ This function restores a subdevice to an idle state.
+ */
+static void do_become_nonbusy(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct comedi_async *async = s->async;
+
+ comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
+ if (async) {
+ comedi_buf_reset(async);
+ async->inttrig = NULL;
+ kfree(async->cmd.chanlist);
+ async->cmd.chanlist = NULL;
+ } else {
+ dev_err(dev->class_dev,
+ "BUG: (?) do_become_nonbusy called with async=NULL\n");
+ }
+
+ s->busy = NULL;
+}
+
+static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ int ret = 0;
+
+ if (comedi_is_subdevice_running(s) && s->cancel)
+ ret = s->cancel(dev, s);
+
+ do_become_nonbusy(dev, s);
+
+ return ret;
+}
+
+static int is_device_busy(struct comedi_device *dev)
+{
+ struct comedi_subdevice *s;
+ int i;
+
+ if (!dev->attached)
+ return 0;
+
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ if (s->busy)
+ return 1;
+ if (s->async && s->async->mmap_count)
+ return 1;
+ }
+
+ return 0;
+}
+
/*
COMEDI_DEVCONFIG
device config ioctl
@@ -335,8 +464,6 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
{
struct comedi_devconfig it;
int ret;
- unsigned char *aux_data = NULL;
- int aux_len;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -352,36 +479,15 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
return 0;
}
- if (copy_from_user(&it, arg, sizeof(struct comedi_devconfig)))
+ if (copy_from_user(&it, arg, sizeof(it)))
return -EFAULT;
it.board_name[COMEDI_NAMELEN - 1] = 0;
- if (comedi_aux_data(it.options, 0) &&
- it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
- int bit_shift;
- aux_len = it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH];
- if (aux_len < 0)
- return -EFAULT;
-
- aux_data = vmalloc(aux_len);
- if (!aux_data)
- return -ENOMEM;
-
- if (copy_from_user(aux_data,
- (unsigned char __user *
- )comedi_aux_data(it.options, 0), aux_len)) {
- vfree(aux_data);
- return -EFAULT;
- }
- it.options[COMEDI_DEVCONF_AUX_DATA_LO] =
- (unsigned long)aux_data;
- if (sizeof(void *) > sizeof(int)) {
- bit_shift = sizeof(int) * 8;
- it.options[COMEDI_DEVCONF_AUX_DATA_HI] =
- ((unsigned long)aux_data) >> bit_shift;
- } else
- it.options[COMEDI_DEVCONF_AUX_DATA_HI] = 0;
+ if (it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
+ dev_warn(dev->class_dev,
+ "comedi_config --init_data is deprecated\n");
+ return -EINVAL;
}
ret = comedi_device_attach(dev, &it);
@@ -392,9 +498,6 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
}
}
- if (aux_data)
- vfree(aux_data);
-
return ret;
}
@@ -420,7 +523,7 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
int retval = 0;
- if (copy_from_user(&bc, arg, sizeof(struct comedi_bufconfig)))
+ if (copy_from_user(&bc, arg, sizeof(bc)))
return -EFAULT;
if (bc.subdevice >= dev->n_subdevices || bc.subdevice < 0)
@@ -453,7 +556,7 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
bc.maximum_size = async->max_bufsize;
copyback:
- if (copy_to_user(arg, &bc, sizeof(struct comedi_bufconfig)))
+ if (copy_to_user(arg, &bc, sizeof(bc)))
return -EFAULT;
return 0;
@@ -477,14 +580,10 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_devinfo __user *arg,
struct file *file)
{
- struct comedi_devinfo devinfo;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_subdevice *read_subdev =
- comedi_get_read_subdevice(dev_file_info);
- struct comedi_subdevice *write_subdev =
- comedi_get_write_subdevice(dev_file_info);
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_subdevice *s;
+ struct comedi_devinfo devinfo;
memset(&devinfo, 0, sizeof(devinfo));
@@ -494,17 +593,19 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
- if (read_subdev)
- devinfo.read_subdevice = read_subdev - dev->subdevices;
+ s = comedi_read_subdevice(info);
+ if (s)
+ devinfo.read_subdevice = s->index;
else
devinfo.read_subdevice = -1;
- if (write_subdev)
- devinfo.write_subdevice = write_subdev - dev->subdevices;
+ s = comedi_write_subdevice(info);
+ if (s)
+ devinfo.write_subdevice = s->index;
else
devinfo.write_subdevice = -1;
- if (copy_to_user(arg, &devinfo, sizeof(struct comedi_devinfo)))
+ if (copy_to_user(arg, &devinfo, sizeof(devinfo)))
return -EFAULT;
return 0;
@@ -531,9 +632,7 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
struct comedi_subdinfo *tmp, *us;
struct comedi_subdevice *s;
- tmp =
- kcalloc(dev->n_subdevices, sizeof(struct comedi_subdinfo),
- GFP_KERNEL);
+ tmp = kcalloc(dev->n_subdevices, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -545,7 +644,7 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
us->type = s->type;
us->n_chan = s->n_chan;
us->subd_flags = s->subdev_flags;
- if (comedi_get_subdevice_runflags(s) & SRF_RUNNING)
+ if (comedi_is_subdevice_running(s))
us->subd_flags |= SDF_RUNNING;
#define TIMER_nanosec 5 /* backwards compatibility */
us->timer_type = TIMER_nanosec;
@@ -584,8 +683,7 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
us->settling_time_0 = s->settling_time_0;
}
- ret = copy_to_user(arg, tmp,
- dev->n_subdevices * sizeof(struct comedi_subdinfo));
+ ret = copy_to_user(arg, tmp, dev->n_subdevices * sizeof(*tmp));
kfree(tmp);
@@ -612,7 +710,7 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
struct comedi_chaninfo it;
- if (copy_from_user(&it, arg, sizeof(struct comedi_chaninfo)))
+ if (copy_from_user(&it, arg, sizeof(it)))
return -EFAULT;
if (it.subdev >= dev->n_subdevices)
@@ -679,7 +777,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
struct comedi_subdevice *s;
struct comedi_async *async;
- if (copy_from_user(&bi, arg, sizeof(struct comedi_bufinfo)))
+ if (copy_from_user(&bi, arg, sizeof(bi)))
return -EFAULT;
if (bi.subdevice >= dev->n_subdevices || bi.subdevice < 0)
@@ -714,9 +812,8 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
bi.bytes_read = comedi_buf_read_alloc(async, bi.bytes_read);
comedi_buf_read_free(async, bi.bytes_read);
- if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR |
- SRF_RUNNING))
- && async->buf_write_count == async->buf_read_count) {
+ if (comedi_is_subdevice_idle(s) &&
+ async->buf_write_count == async->buf_read_count) {
do_become_nonbusy(dev, s);
}
}
@@ -734,103 +831,12 @@ copyback_position:
bi.buf_read_ptr = async->buf_read_ptr;
copyback:
- if (copy_to_user(arg, &bi, sizeof(struct comedi_bufinfo)))
+ if (copy_to_user(arg, &bi, sizeof(bi)))
return -EFAULT;
return 0;
}
-static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
- unsigned int *data, void *file);
-/*
- * COMEDI_INSNLIST
- * synchronous instructions
- *
- * arg:
- * pointer to sync cmd structure
- *
- * reads:
- * sync cmd struct at arg
- * instruction list
- * data (for writes)
- *
- * writes:
- * data (for reads)
- */
-/* arbitrary limits */
-#define MAX_SAMPLES 256
-static int do_insnlist_ioctl(struct comedi_device *dev,
- struct comedi_insnlist __user *arg, void *file)
-{
- struct comedi_insnlist insnlist;
- struct comedi_insn *insns = NULL;
- unsigned int *data = NULL;
- int i = 0;
- int ret = 0;
-
- if (copy_from_user(&insnlist, arg, sizeof(struct comedi_insnlist)))
- return -EFAULT;
-
- data = kmalloc(sizeof(unsigned int) * MAX_SAMPLES, GFP_KERNEL);
- if (!data) {
- DPRINTK("kmalloc failed\n");
- ret = -ENOMEM;
- goto error;
- }
-
- insns =
- kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
- if (!insns) {
- DPRINTK("kmalloc failed\n");
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(insns, insnlist.insns,
- sizeof(struct comedi_insn) * insnlist.n_insns)) {
- DPRINTK("copy_from_user failed\n");
- ret = -EFAULT;
- goto error;
- }
-
- for (i = 0; i < insnlist.n_insns; i++) {
- if (insns[i].n > MAX_SAMPLES) {
- DPRINTK("number of samples too large\n");
- ret = -EINVAL;
- goto error;
- }
- if (insns[i].insn & INSN_MASK_WRITE) {
- if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_from_user failed\n");
- ret = -EFAULT;
- goto error;
- }
- }
- ret = parse_insn(dev, insns + i, data, file);
- if (ret < 0)
- goto error;
- if (insns[i].insn & INSN_MASK_READ) {
- if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_to_user failed\n");
- ret = -EFAULT;
- goto error;
- }
- }
- if (need_resched())
- schedule();
- }
-
-error:
- kfree(insns);
- kfree(data);
-
- if (ret < 0)
- return ret;
- return i;
-}
-
static int check_insn_config_length(struct comedi_insn *insn,
unsigned int *data)
{
@@ -1062,6 +1068,94 @@ out:
}
/*
+ * COMEDI_INSNLIST
+ * synchronous instructions
+ *
+ * arg:
+ * pointer to sync cmd structure
+ *
+ * reads:
+ * sync cmd struct at arg
+ * instruction list
+ * data (for writes)
+ *
+ * writes:
+ * data (for reads)
+ */
+/* arbitrary limits */
+#define MAX_SAMPLES 256
+static int do_insnlist_ioctl(struct comedi_device *dev,
+ struct comedi_insnlist __user *arg, void *file)
+{
+ struct comedi_insnlist insnlist;
+ struct comedi_insn *insns = NULL;
+ unsigned int *data = NULL;
+ int i = 0;
+ int ret = 0;
+
+ if (copy_from_user(&insnlist, arg, sizeof(insnlist)))
+ return -EFAULT;
+
+ data = kmalloc(sizeof(unsigned int) * MAX_SAMPLES, GFP_KERNEL);
+ if (!data) {
+ DPRINTK("kmalloc failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
+ if (!insns) {
+ DPRINTK("kmalloc failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(insns, insnlist.insns,
+ sizeof(*insns) * insnlist.n_insns)) {
+ DPRINTK("copy_from_user failed\n");
+ ret = -EFAULT;
+ goto error;
+ }
+
+ for (i = 0; i < insnlist.n_insns; i++) {
+ if (insns[i].n > MAX_SAMPLES) {
+ DPRINTK("number of samples too large\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ if (insns[i].insn & INSN_MASK_WRITE) {
+ if (copy_from_user(data, insns[i].data,
+ insns[i].n * sizeof(unsigned int))) {
+ DPRINTK("copy_from_user failed\n");
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+ ret = parse_insn(dev, insns + i, data, file);
+ if (ret < 0)
+ goto error;
+ if (insns[i].insn & INSN_MASK_READ) {
+ if (copy_to_user(insns[i].data, data,
+ insns[i].n * sizeof(unsigned int))) {
+ DPRINTK("copy_to_user failed\n");
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+ if (need_resched())
+ schedule();
+ }
+
+error:
+ kfree(insns);
+ kfree(data);
+
+ if (ret < 0)
+ return ret;
+ return i;
+}
+
+/*
* COMEDI_INSN
* synchronous instructions
*
@@ -1088,7 +1182,7 @@ static int do_insn_ioctl(struct comedi_device *dev,
goto error;
}
- if (copy_from_user(&insn, arg, sizeof(struct comedi_insn))) {
+ if (copy_from_user(&insn, arg, sizeof(insn))) {
ret = -EFAULT;
goto error;
}
@@ -1123,17 +1217,6 @@ error:
return ret;
}
-static void comedi_set_subdevice_runflags(struct comedi_subdevice *s,
- unsigned mask, unsigned bits)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- s->runflags &= ~mask;
- s->runflags |= (bits & mask);
- spin_unlock_irqrestore(&s->spin_lock, flags);
-}
-
static int do_cmd_ioctl(struct comedi_device *dev,
struct comedi_cmd __user *arg, void *file)
{
@@ -1143,7 +1226,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
int ret = 0;
unsigned int __user *user_chanlist;
- if (copy_from_user(&cmd, arg, sizeof(struct comedi_cmd))) {
+ if (copy_from_user(&cmd, arg, sizeof(cmd))) {
DPRINTK("bad cmd address\n");
return -EFAULT;
}
@@ -1233,7 +1316,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
/* restore chanlist pointer before copying back */
cmd.chanlist = (unsigned int __force *)user_chanlist;
cmd.data = NULL;
- if (copy_to_user(arg, &cmd, sizeof(struct comedi_cmd))) {
+ if (copy_to_user(arg, &cmd, sizeof(cmd))) {
DPRINTK("fault writing cmd\n");
ret = -EFAULT;
goto cleanup;
@@ -1248,7 +1331,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
goto cleanup;
}
- comedi_reset_async_buf(async);
+ comedi_buf_reset(async);
async->cb_mask =
COMEDI_CB_EOA | COMEDI_CB_BLOCK | COMEDI_CB_ERROR |
@@ -1292,7 +1375,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
unsigned int *chanlist = NULL;
unsigned int __user *user_chanlist;
- if (copy_from_user(&cmd, arg, sizeof(struct comedi_cmd))) {
+ if (copy_from_user(&cmd, arg, sizeof(cmd))) {
DPRINTK("bad cmd address\n");
return -EFAULT;
}
@@ -1356,7 +1439,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
/* restore chanlist pointer before copying back */
cmd.chanlist = (unsigned int __force *)user_chanlist;
- if (copy_to_user(arg, &cmd, sizeof(struct comedi_cmd))) {
+ if (copy_to_user(arg, &cmd, sizeof(cmd))) {
DPRINTK("bad cmd address\n");
ret = -EFAULT;
goto cleanup;
@@ -1533,22 +1616,28 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev;
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_from_file_info(info);
int rc;
- if (dev_file_info == NULL || dev_file_info->device == NULL)
+ if (!dev)
return -ENODEV;
- dev = dev_file_info->device;
mutex_lock(&dev->mutex);
/* Device config is special, because it must work on
* an unconfigured device. */
if (cmd == COMEDI_DEVCONFIG) {
+ if (minor >= COMEDI_NUM_BOARD_MINORS) {
+ /* Device config not appropriate on non-board minors. */
+ rc = -ENOTTY;
+ goto done;
+ }
rc = do_devconfig_ioctl(dev,
(struct comedi_devconfig __user *)arg);
+ if (rc == 0)
+ /* Evade comedi_auto_unconfig(). */
+ info->hardware_device = NULL;
goto done;
}
@@ -1621,19 +1710,6 @@ done:
return rc;
}
-static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- int ret = 0;
-
- if ((comedi_get_subdevice_runflags(s) & SRF_RUNNING) && s->cancel)
- ret = s->cancel(dev, s);
-
- do_become_nonbusy(dev, s);
-
- return ret;
-}
-
-
static void comedi_vm_open(struct vm_area_struct *area)
{
struct comedi_async *async;
@@ -1668,40 +1744,38 @@ static struct vm_operations_struct comedi_vm_ops = {
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_async *async = NULL;
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_from_file_info(info);
+ struct comedi_subdevice *s;
+ struct comedi_async *async;
unsigned long start = vma->vm_start;
unsigned long size;
int n_pages;
int i;
int retval;
- struct comedi_subdevice *s;
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
mutex_lock(&dev->mutex);
+
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
retval = -ENODEV;
goto done;
}
+
if (vma->vm_flags & VM_WRITE)
- s = comedi_get_write_subdevice(dev_file_info);
+ s = comedi_write_subdevice(info);
else
- s = comedi_get_read_subdevice(dev_file_info);
-
- if (s == NULL) {
+ s = comedi_read_subdevice(info);
+ if (!s) {
retval = -EINVAL;
goto done;
}
+
async = s->async;
- if (async == NULL) {
+ if (!async) {
retval = -EINVAL;
goto done;
}
@@ -1724,11 +1798,11 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
n_pages = size >> PAGE_SHIFT;
for (i = 0; i < n_pages; ++i) {
+ struct comedi_buf_page *buf = &async->buf_page_list[i];
+
if (remap_pfn_range(vma, start,
- page_to_pfn(virt_to_page
- (async->buf_page_list
- [i].virt_addr)), PAGE_SIZE,
- PAGE_SHARED)) {
+ page_to_pfn(virt_to_page(buf->virt_addr)),
+ PAGE_SIZE, PAGE_SHARED)) {
retval = -EAGAIN;
goto done;
}
@@ -1750,50 +1824,40 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_subdevice *read_subdev;
- struct comedi_subdevice *write_subdev;
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_from_file_info(info);
+ struct comedi_subdevice *s;
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
mutex_lock(&dev->mutex);
+
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
- mutex_unlock(&dev->mutex);
- return 0;
+ goto done;
}
- mask = 0;
- read_subdev = comedi_get_read_subdevice(dev_file_info);
- if (read_subdev) {
- poll_wait(file, &read_subdev->async->wait_head, wait);
- if (!read_subdev->busy
- || comedi_buf_read_n_available(read_subdev->async) > 0
- || !(comedi_get_subdevice_runflags(read_subdev) &
- SRF_RUNNING)) {
+ s = comedi_read_subdevice(info);
+ if (s && s->async) {
+ poll_wait(file, &s->async->wait_head, wait);
+ if (!s->busy || !comedi_is_subdevice_running(s) ||
+ comedi_buf_read_n_available(s->async) > 0)
mask |= POLLIN | POLLRDNORM;
- }
}
- write_subdev = comedi_get_write_subdevice(dev_file_info);
- if (write_subdev) {
- poll_wait(file, &write_subdev->async->wait_head, wait);
- comedi_buf_write_alloc(write_subdev->async,
- write_subdev->async->prealloc_bufsz);
- if (!write_subdev->busy
- || !(comedi_get_subdevice_runflags(write_subdev) &
- SRF_RUNNING)
- || comedi_buf_write_n_allocated(write_subdev->async) >=
- bytes_per_sample(write_subdev->async->subdevice)) {
+
+ s = comedi_write_subdevice(info);
+ if (s && s->async) {
+ unsigned int bps = bytes_per_sample(s->async->subdevice);
+
+ poll_wait(file, &s->async->wait_head, wait);
+ comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
+ if (!s->busy || !comedi_is_subdevice_running(s) ||
+ comedi_buf_write_n_allocated(s->async) >= bps)
mask |= POLLOUT | POLLWRNORM;
- }
}
+done:
mutex_unlock(&dev->mutex);
return mask;
}
@@ -1806,53 +1870,38 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_from_file_info(info);
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
- retval = -ENODEV;
- goto done;
+ return -ENODEV;
}
- s = comedi_get_write_subdevice(dev_file_info);
- if (s == NULL) {
- retval = -EIO;
- goto done;
- }
+ s = comedi_write_subdevice(info);
+ if (!s || !s->async)
+ return -EIO;
+
async = s->async;
- if (!nbytes) {
- retval = 0;
- goto done;
- }
- if (!s->busy) {
- retval = 0;
- goto done;
- }
- if (s->busy != file) {
- retval = -EACCES;
- goto done;
- }
+ if (!s->busy || !nbytes)
+ return 0;
+ if (s->busy != file)
+ return -EACCES;
+
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+ if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
- if (comedi_get_subdevice_runflags(s) &
- SRF_ERROR) {
+ if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
- } else {
+ else
retval = 0;
- }
do_become_nonbusy(dev, s);
}
break;
@@ -1905,7 +1954,6 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
-done:
return count ? count : retval;
}
@@ -1917,40 +1965,26 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
+ struct comedi_file_info *info = comedi_file_info_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_from_file_info(info);
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
- retval = -ENODEV;
- goto done;
+ return -ENODEV;
}
- s = comedi_get_read_subdevice(dev_file_info);
- if (s == NULL) {
- retval = -EIO;
- goto done;
- }
+ s = comedi_read_subdevice(info);
+ if (!s || !s->async)
+ return -EIO;
+
async = s->async;
- if (!nbytes) {
- retval = 0;
- goto done;
- }
- if (!s->busy) {
- retval = 0;
- goto done;
- }
- if (s->busy != file) {
- retval = -EACCES;
- goto done;
- }
+ if (!s->busy || !nbytes)
+ return 0;
+ if (s->busy != file)
+ return -EACCES;
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
@@ -1967,14 +2001,12 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
n = m;
if (n == 0) {
- if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+ if (!comedi_is_subdevice_running(s)) {
do_become_nonbusy(dev, s);
- if (comedi_get_subdevice_runflags(s) &
- SRF_ERROR) {
+ if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
- } else {
+ else
retval = 0;
- }
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2012,48 +2044,22 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+ if (comedi_is_subdevice_idle(s) &&
async->buf_read_count - async->buf_write_count == 0) {
do_become_nonbusy(dev, s);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
-done:
return count ? count : retval;
}
-/*
- This function restores a subdevice to an idle state.
- */
-static void do_become_nonbusy(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
-
- comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
- if (async) {
- comedi_reset_async_buf(async);
- async->inttrig = NULL;
- kfree(async->cmd.chanlist);
- async->cmd.chanlist = NULL;
- } else {
- dev_err(dev->class_dev,
- "BUG: (?) do_become_nonbusy called with async=NULL\n");
- }
-
- s->busy = NULL;
-}
-
static int comedi_open(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev =
- dev_file_info ? dev_file_info->device : NULL;
+ struct comedi_device *dev = comedi_dev_from_minor(minor);
- if (dev == NULL) {
+ if (!dev) {
DPRINTK("invalid minor number\n");
return -ENODEV;
}
@@ -2125,19 +2131,25 @@ ok:
return 0;
}
+static int comedi_fasync(int fd, struct file *file, int on)
+{
+ const unsigned minor = iminor(file->f_dentry->d_inode);
+ struct comedi_device *dev = comedi_dev_from_minor(minor);
+
+ if (!dev)
+ return -ENODEV;
+
+ return fasync_helper(fd, file, on, &dev->async_queue);
+}
+
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
+ struct comedi_device *dev = comedi_dev_from_minor(minor);
struct comedi_subdevice *s = NULL;
int i;
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
mutex_lock(&dev->mutex);
@@ -2169,22 +2181,6 @@ static int comedi_close(struct inode *inode, struct file *file)
return 0;
}
-static int comedi_fasync(int fd, struct file *file, int on)
-{
- const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
- dev_file_info = comedi_get_device_file_info(minor);
-
- if (dev_file_info == NULL)
- return -ENODEV;
- dev = dev_file_info->device;
- if (dev == NULL)
- return -ENODEV;
-
- return fasync_helper(fd, file, on, &dev->async_queue);
-}
-
static const struct file_operations comedi_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = comedi_unlocked_ioctl,
@@ -2202,99 +2198,6 @@ static const struct file_operations comedi_fops = {
static struct class *comedi_class;
static struct cdev comedi_cdev;
-static void comedi_cleanup_legacy_minors(void)
-{
- unsigned i;
-
- for (i = 0; i < comedi_num_legacy_minors; i++)
- comedi_free_board_minor(i);
-}
-
-static int __init comedi_init(void)
-{
- int i;
- int retval;
-
- pr_info("comedi: version " COMEDI_RELEASE " - http://www.comedi.org\n");
-
- if (comedi_num_legacy_minors < 0 ||
- comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) {
- pr_err("comedi: error: invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n",
- COMEDI_NUM_BOARD_MINORS);
- return -EINVAL;
- }
-
- /*
- * comedi is unusable if both comedi_autoconfig and
- * comedi_num_legacy_minors are zero, so we might as well adjust the
- * defaults in that case
- */
- if (comedi_autoconfig == 0 && comedi_num_legacy_minors == 0)
- comedi_num_legacy_minors = 16;
-
- memset(comedi_file_info_table, 0,
- sizeof(struct comedi_device_file_info *) * COMEDI_NUM_MINORS);
-
- retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS, "comedi");
- if (retval)
- return -EIO;
- cdev_init(&comedi_cdev, &comedi_fops);
- comedi_cdev.owner = THIS_MODULE;
- kobject_set_name(&comedi_cdev.kobj, "comedi");
- if (cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS)) {
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return -EIO;
- }
- comedi_class = class_create(THIS_MODULE, "comedi");
- if (IS_ERR(comedi_class)) {
- pr_err("comedi: failed to create class\n");
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return PTR_ERR(comedi_class);
- }
-
- comedi_class->dev_attrs = comedi_dev_attrs;
-
- /* XXX requires /proc interface */
- comedi_proc_init();
-
- /* create devices files for legacy/manual use */
- for (i = 0; i < comedi_num_legacy_minors; i++) {
- int minor;
- minor = comedi_alloc_board_minor(NULL);
- if (minor < 0) {
- comedi_cleanup_legacy_minors();
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return minor;
- }
- }
-
- return 0;
-}
-
-static void __exit comedi_cleanup(void)
-{
- int i;
-
- comedi_cleanup_legacy_minors();
- for (i = 0; i < COMEDI_NUM_MINORS; ++i)
- BUG_ON(comedi_file_info_table[i]);
-
- class_destroy(comedi_class);
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
-
- comedi_proc_cleanup();
-}
-
-module_init(comedi_init);
-module_exit(comedi_cleanup);
-
void comedi_error(const struct comedi_device *dev, const char *s)
{
dev_err(dev->class_dev, "%s: %s\n", dev->driver->driver_name, s);
@@ -2309,7 +2212,7 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
/* DPRINTK("comedi_event 0x%x\n",mask); */
- if ((comedi_get_subdevice_runflags(s) & SRF_RUNNING) == 0)
+ if (!comedi_is_subdevice_running(s))
return;
if (s->
@@ -2344,40 +2247,9 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
}
EXPORT_SYMBOL(comedi_event);
-unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
-{
- unsigned long flags;
- unsigned runflags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- runflags = s->runflags;
- spin_unlock_irqrestore(&s->spin_lock, flags);
- return runflags;
-}
-EXPORT_SYMBOL(comedi_get_subdevice_runflags);
-
-static int is_device_busy(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- if (!dev->attached)
- return 0;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (s->busy)
- return 1;
- if (s->async && s->async->mmap_count)
- return 1;
- }
-
- return 0;
-}
-
static void comedi_device_init(struct comedi_device *dev)
{
- memset(dev, 0, sizeof(struct comedi_device));
+ memset(dev, 0, sizeof(*dev));
spin_lock_init(&dev->spinlock);
mutex_init(&dev->mutex);
dev->minor = -1;
@@ -2395,11 +2267,11 @@ static void comedi_device_cleanup(struct comedi_device *dev)
int comedi_alloc_board_minor(struct device *hardware_device)
{
- struct comedi_device_file_info *info;
+ struct comedi_file_info *info;
struct device *csdev;
unsigned i;
- info = kzalloc(sizeof(struct comedi_device_file_info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
return -ENOMEM;
info->device = kzalloc(sizeof(struct comedi_device), GFP_KERNEL);
@@ -2436,7 +2308,7 @@ int comedi_alloc_board_minor(struct device *hardware_device)
void comedi_free_board_minor(unsigned minor)
{
- struct comedi_device_file_info *info;
+ struct comedi_file_info *info;
BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
spin_lock(&comedi_file_info_table_lock);
@@ -2461,7 +2333,7 @@ void comedi_free_board_minor(unsigned minor)
int comedi_find_board_minor(struct device *hardware_device)
{
int minor;
- struct comedi_device_file_info *info;
+ struct comedi_file_info *info;
for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) {
spin_lock(&comedi_file_info_table_lock);
@@ -2475,19 +2347,21 @@ int comedi_find_board_minor(struct device *hardware_device)
return -ENODEV;
}
-int comedi_alloc_subdevice_minor(struct comedi_device *dev,
- struct comedi_subdevice *s)
+int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
{
- struct comedi_device_file_info *info;
+ struct comedi_device *dev = s->device;
+ struct comedi_file_info *info;
struct device *csdev;
unsigned i;
- info = kmalloc(sizeof(struct comedi_device_file_info), GFP_KERNEL);
- if (info == NULL)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return -ENOMEM;
info->device = dev;
- info->read_subdevice = s;
- info->write_subdevice = s;
+ if (s->subdev_flags & SDF_CMD_READ)
+ info->read_subdevice = s;
+ if (s->subdev_flags & SDF_CMD_WRITE)
+ info->write_subdevice = s;
spin_lock(&comedi_file_info_table_lock);
for (i = COMEDI_FIRST_SUBDEVICE_MINOR; i < COMEDI_NUM_MINORS; ++i) {
if (comedi_file_info_table[i] == NULL) {
@@ -2498,23 +2372,23 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
spin_unlock(&comedi_file_info_table_lock);
if (i == COMEDI_NUM_MINORS) {
kfree(info);
- pr_err("comedi: error: ran out of minor numbers for board device files.\n");
+ pr_err("comedi: error: ran out of minor numbers for subdevice files.\n");
return -EBUSY;
}
s->minor = i;
csdev = device_create(comedi_class, dev->class_dev,
MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i",
- dev->minor, (int)(s - dev->subdevices));
+ dev->minor, s->index);
if (!IS_ERR(csdev))
s->class_dev = csdev;
dev_set_drvdata(csdev, info);
- return i;
+ return 0;
}
void comedi_free_subdevice_minor(struct comedi_subdevice *s)
{
- struct comedi_device_file_info *info;
+ struct comedi_file_info *info;
if (s == NULL)
return;
@@ -2536,14 +2410,90 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
kfree(info);
}
-struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor)
+static void comedi_cleanup_board_minors(void)
{
- struct comedi_device_file_info *info;
+ unsigned i;
- BUG_ON(minor >= COMEDI_NUM_MINORS);
- spin_lock(&comedi_file_info_table_lock);
- info = comedi_file_info_table[minor];
- spin_unlock(&comedi_file_info_table_lock);
- return info;
+ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++)
+ comedi_free_board_minor(i);
+}
+
+static int __init comedi_init(void)
+{
+ int i;
+ int retval;
+
+ pr_info("comedi: version " COMEDI_RELEASE " - http://www.comedi.org\n");
+
+ if (comedi_num_legacy_minors < 0 ||
+ comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) {
+ pr_err("comedi: error: invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n",
+ COMEDI_NUM_BOARD_MINORS);
+ return -EINVAL;
+ }
+
+ memset(comedi_file_info_table, 0,
+ sizeof(struct comedi_file_info *) * COMEDI_NUM_MINORS);
+
+ retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS, "comedi");
+ if (retval)
+ return -EIO;
+ cdev_init(&comedi_cdev, &comedi_fops);
+ comedi_cdev.owner = THIS_MODULE;
+ kobject_set_name(&comedi_cdev.kobj, "comedi");
+ if (cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS)) {
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS);
+ return -EIO;
+ }
+ comedi_class = class_create(THIS_MODULE, "comedi");
+ if (IS_ERR(comedi_class)) {
+ pr_err("comedi: failed to create class\n");
+ cdev_del(&comedi_cdev);
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS);
+ return PTR_ERR(comedi_class);
+ }
+
+ comedi_class->dev_attrs = comedi_dev_attrs;
+
+ /* XXX requires /proc interface */
+ comedi_proc_init();
+
+ /* create devices files for legacy/manual use */
+ for (i = 0; i < comedi_num_legacy_minors; i++) {
+ int minor;
+ minor = comedi_alloc_board_minor(NULL);
+ if (minor < 0) {
+ comedi_cleanup_board_minors();
+ cdev_del(&comedi_cdev);
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS);
+ return minor;
+ }
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(comedi_get_device_file_info);
+module_init(comedi_init);
+
+static void __exit comedi_cleanup(void)
+{
+ int i;
+
+ comedi_cleanup_board_minors();
+ for (i = 0; i < COMEDI_NUM_MINORS; ++i)
+ BUG_ON(comedi_file_info_table[i]);
+
+ class_destroy(comedi_class);
+ cdev_del(&comedi_cdev);
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
+
+ comedi_proc_cleanup();
+}
+module_exit(comedi_cleanup);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi core module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index e70ef0515d9a..b3743135f4aa 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -8,18 +8,43 @@
*/
int do_rangeinfo_ioctl(struct comedi_device *dev,
struct comedi_rangeinfo __user *arg);
-int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
int comedi_alloc_board_minor(struct device *hardware_device);
void comedi_free_board_minor(unsigned minor);
int comedi_find_board_minor(struct device *hardware_device);
-void comedi_reset_async_buf(struct comedi_async *async);
+int comedi_alloc_subdevice_minor(struct comedi_subdevice *s);
+void comedi_free_subdevice_minor(struct comedi_subdevice *s);
+
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size);
+void comedi_buf_reset(struct comedi_async *async);
+unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
extern unsigned int comedi_default_buf_size_kb;
extern unsigned int comedi_default_buf_maxsize_kb;
-extern bool comedi_autoconfig;
+
+/* drivers.c */
+
extern struct comedi_driver *comedi_drivers;
+int insn_inval(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *);
+
+void comedi_device_detach(struct comedi_device *);
+int comedi_device_attach(struct comedi_device *, struct comedi_devconfig *);
+
+#ifdef CONFIG_PROC_FS
+
+/* proc.c */
+
+void comedi_proc_init(void);
+void comedi_proc_cleanup(void);
+#else
+static inline void comedi_proc_init(void)
+{
+}
+static inline void comedi_proc_cleanup(void)
+{
+}
+#endif
+
#endif /* _COMEDI_INTERNAL_H */
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
new file mode 100644
index 000000000000..37d2e4677360
--- /dev/null
+++ b/drivers/staging/comedi/comedi_pci.c
@@ -0,0 +1,140 @@
+/*
+ * comedi_pci.c
+ * Comedi PCI driver specific functions.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+
+#include "comedidev.h"
+
+/**
+ * comedi_to_pci_dev() - comedi_device pointer to pci_dev pointer.
+ * @dev: comedi_device struct
+ */
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
+{
+ return dev->hw_dev ? to_pci_dev(dev->hw_dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(comedi_to_pci_dev);
+
+/**
+ * comedi_pci_enable() - Enable the PCI device and request the regions.
+ * @pcidev: pci_dev struct
+ * @res_name: name for the requested reqource
+ */
+int comedi_pci_enable(struct pci_dev *pcidev, const char *res_name)
+{
+ int rc;
+
+ rc = pci_enable_device(pcidev);
+ if (rc < 0)
+ return rc;
+
+ rc = pci_request_regions(pcidev, res_name);
+ if (rc < 0)
+ pci_disable_device(pcidev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(comedi_pci_enable);
+
+/**
+ * comedi_pci_disable() - Release the regions and disable the PCI device.
+ * @pcidev: pci_dev struct
+ *
+ * This must be matched with a previous successful call to comedi_pci_enable().
+ */
+void comedi_pci_disable(struct pci_dev *pcidev)
+{
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+}
+EXPORT_SYMBOL_GPL(comedi_pci_disable);
+
+/**
+ * comedi_pci_auto_config() - Configure/probe a comedi PCI driver.
+ * @pcidev: pci_dev struct
+ * @driver: comedi_driver struct
+ *
+ * Typically called from the pci_driver (*probe) function.
+ */
+int comedi_pci_auto_config(struct pci_dev *pcidev,
+ struct comedi_driver *driver)
+{
+ return comedi_auto_config(&pcidev->dev, driver, 0);
+}
+EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
+
+/**
+ * comedi_pci_auto_unconfig() - Unconfigure/remove a comedi PCI driver.
+ * @pcidev: pci_dev struct
+ *
+ * Typically called from the pci_driver (*remove) function.
+ */
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
+{
+ comedi_auto_unconfig(&pcidev->dev);
+}
+EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
+
+/**
+ * comedi_pci_driver_register() - Register a comedi PCI driver.
+ * @comedi_driver: comedi_driver struct
+ * @pci_driver: pci_driver struct
+ *
+ * This function is used for the module_init() of comedi PCI drivers.
+ * Do not call it directly, use the module_comedi_pci_driver() helper
+ * macro instead.
+ */
+int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver)
+{
+ int ret;
+
+ ret = comedi_driver_register(comedi_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_register_driver(pci_driver);
+ if (ret < 0) {
+ comedi_driver_unregister(comedi_driver);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_pci_driver_register);
+
+/**
+ * comedi_pci_driver_unregister() - Unregister a comedi PCI driver.
+ * @comedi_driver: comedi_driver struct
+ * @pci_driver: pci_driver struct
+ *
+ * This function is used for the module_exit() of comedi PCI drivers.
+ * Do not call it directly, use the module_comedi_pci_driver() helper
+ * macro instead.
+ */
+void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver)
+{
+ pci_unregister_driver(pci_driver);
+ comedi_driver_unregister(comedi_driver);
+}
+EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
new file mode 100644
index 000000000000..453ff3b28617
--- /dev/null
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -0,0 +1,160 @@
+/*
+ * comedi_pcmcia.c
+ * Comedi PCMCIA driver specific functions.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "comedidev.h"
+
+/**
+ * comedi_to_pcmcia_dev() - comedi_device pointer to pcmcia_device pointer.
+ * @dev: comedi_device struct
+ */
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev)
+{
+ return dev->hw_dev ? to_pcmcia_dev(dev->hw_dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(comedi_to_pcmcia_dev);
+
+static int comedi_pcmcia_conf_check(struct pcmcia_device *link,
+ void *priv_data)
+{
+ if (link->config_index == 0)
+ return -EINVAL;
+
+ return pcmcia_request_io(link);
+}
+
+/**
+ * comedi_pcmcia_enable() - Request the regions and enable the PCMCIA device.
+ * @dev: comedi_device struct
+ * @conf_check: optional callback to check the pcmcia_device configuration
+ *
+ * The comedi PCMCIA driver needs to set the link->config_flags, as
+ * appropriate for that driver, before calling this function in order
+ * to allow pcmcia_loop_config() to do its internal autoconfiguration.
+ */
+int comedi_pcmcia_enable(struct comedi_device *dev,
+ int (*conf_check)(struct pcmcia_device *, void *))
+{
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
+ int ret;
+
+ if (!link)
+ return -ENODEV;
+
+ if (!conf_check)
+ conf_check = comedi_pcmcia_conf_check;
+
+ ret = pcmcia_loop_config(link, conf_check, NULL);
+ if (ret)
+ return ret;
+
+ return pcmcia_enable_device(link);
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_enable);
+
+/**
+ * comedi_pcmcia_disable() - Disable the PCMCIA device and release the regions.
+ * @dev: comedi_device struct
+ */
+void comedi_pcmcia_disable(struct comedi_device *dev)
+{
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
+
+ if (link)
+ pcmcia_disable_device(link);
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_disable);
+
+/**
+ * comedi_pcmcia_auto_config() - Configure/probe a comedi PCMCIA driver.
+ * @link: pcmcia_device struct
+ * @driver: comedi_driver struct
+ *
+ * Typically called from the pcmcia_driver (*probe) function.
+ */
+int comedi_pcmcia_auto_config(struct pcmcia_device *link,
+ struct comedi_driver *driver)
+{
+ return comedi_auto_config(&link->dev, driver, 0);
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_auto_config);
+
+/**
+ * comedi_pcmcia_auto_unconfig() - Unconfigure/remove a comedi PCMCIA driver.
+ * @link: pcmcia_device struct
+ *
+ * Typically called from the pcmcia_driver (*remove) function.
+ */
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link)
+{
+ comedi_auto_unconfig(&link->dev);
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_auto_unconfig);
+
+/**
+ * comedi_pcmcia_driver_register() - Register a comedi PCMCIA driver.
+ * @comedi_driver: comedi_driver struct
+ * @pcmcia_driver: pcmcia_driver struct
+ *
+ * This function is used for the module_init() of comedi USB drivers.
+ * Do not call it directly, use the module_comedi_pcmcia_driver() helper
+ * macro instead.
+ */
+int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver)
+{
+ int ret;
+
+ ret = comedi_driver_register(comedi_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = pcmcia_register_driver(pcmcia_driver);
+ if (ret < 0) {
+ comedi_driver_unregister(comedi_driver);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_register);
+
+/**
+ * comedi_pcmcia_driver_unregister() - Unregister a comedi PCMCIA driver.
+ * @comedi_driver: comedi_driver struct
+ * @pcmcia_driver: pcmcia_driver struct
+ *
+ * This function is used for the module_exit() of comedi PCMCIA drivers.
+ * Do not call it directly, use the module_comedi_pcmcia_driver() helper
+ * macro instead.
+ */
+void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver)
+{
+ pcmcia_unregister_driver(pcmcia_driver);
+ comedi_driver_unregister(comedi_driver);
+}
+EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_unregister);
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
new file mode 100644
index 000000000000..9d9716a248f1
--- /dev/null
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -0,0 +1,108 @@
+/*
+ * comedi_usb.c
+ * Comedi USB driver specific functions.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+
+#include "comedidev.h"
+
+/**
+ * comedi_to_usb_interface() - comedi_device pointer to usb_interface pointer.
+ * @dev: comedi_device struct
+ */
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev)
+{
+ return dev->hw_dev ? to_usb_interface(dev->hw_dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(comedi_to_usb_interface);
+
+/**
+ * comedi_usb_auto_config() - Configure/probe a comedi USB driver.
+ * @intf: usb_interface struct
+ * @driver: comedi_driver struct
+ * @context: driver specific data, passed to comedi_auto_config()
+ *
+ * Typically called from the usb_driver (*probe) function.
+ */
+int comedi_usb_auto_config(struct usb_interface *intf,
+ struct comedi_driver *driver,
+ unsigned long context)
+{
+ return comedi_auto_config(&intf->dev, driver, context);
+}
+EXPORT_SYMBOL_GPL(comedi_usb_auto_config);
+
+/**
+ * comedi_pci_auto_unconfig() - Unconfigure/disconnect a comedi USB driver.
+ * @intf: usb_interface struct
+ *
+ * Typically called from the usb_driver (*disconnect) function.
+ */
+void comedi_usb_auto_unconfig(struct usb_interface *intf)
+{
+ comedi_auto_unconfig(&intf->dev);
+}
+EXPORT_SYMBOL_GPL(comedi_usb_auto_unconfig);
+
+/**
+ * comedi_usb_driver_register() - Register a comedi USB driver.
+ * @comedi_driver: comedi_driver struct
+ * @usb_driver: usb_driver struct
+ *
+ * This function is used for the module_init() of comedi USB drivers.
+ * Do not call it directly, use the module_comedi_usb_driver() helper
+ * macro instead.
+ */
+int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver)
+{
+ int ret;
+
+ ret = comedi_driver_register(comedi_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_register(usb_driver);
+ if (ret < 0) {
+ comedi_driver_unregister(comedi_driver);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_usb_driver_register);
+
+/**
+ * comedi_usb_driver_unregister() - Unregister a comedi USB driver.
+ * @comedi_driver: comedi_driver struct
+ * @usb_driver: usb_driver struct
+ *
+ * This function is used for the module_exit() of comedi USB drivers.
+ * Do not call it directly, use the module_comedi_usb_driver() helper
+ * macro instead.
+ */
+void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver)
+{
+ usb_deregister(usb_driver);
+ comedi_driver_unregister(comedi_driver);
+}
+EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 692e1e615d44..f3a990b45df5 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -40,8 +40,6 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/usb.h>
#include "comedi.h"
@@ -55,28 +53,13 @@
COMEDI_MINORVERSION, COMEDI_MICROVERSION)
#define COMEDI_RELEASE VERSION
-/*
- * PCI Vendor IDs not in <linux/pci_ids.h>
- */
-#define PCI_VENDOR_ID_KOLTER 0x1001
-#define PCI_VENDOR_ID_ICP 0x104c
-#define PCI_VENDOR_ID_AMCC 0x10e8
-#define PCI_VENDOR_ID_DT 0x1116
-#define PCI_VENDOR_ID_IOTECH 0x1616
-#define PCI_VENDOR_ID_CONTEC 0x1221
-#define PCI_VENDOR_ID_CB 0x1307 /* Measurement Computing */
-#define PCI_VENDOR_ID_ADVANTECH 0x13fe
-#define PCI_VENDOR_ID_MEILHAUS 0x1402
-#define PCI_VENDOR_ID_RTD 0x1435
-#define PCI_VENDOR_ID_ADLINK 0x144a
-#define PCI_VENDOR_ID_AMPLICON 0x14dc
-
#define COMEDI_NUM_MINORS 0x100
#define COMEDI_NUM_BOARD_MINORS 0x30
#define COMEDI_FIRST_SUBDEVICE_MINOR COMEDI_NUM_BOARD_MINORS
struct comedi_subdevice {
struct comedi_device *device;
+ int index;
int type;
int n_chan;
int subdev_flags;
@@ -250,13 +233,6 @@ static inline const void *comedi_board(const struct comedi_device *dev)
return dev->board_ptr;
}
-struct comedi_device_file_info {
- struct comedi_device *device;
- struct comedi_subdevice *read_subdevice;
- struct comedi_subdevice *write_subdevice;
- struct device *hardware_device;
-};
-
#ifdef CONFIG_COMEDI_DEBUG
extern int comedi_debug;
#else
@@ -280,105 +256,13 @@ enum comedi_minor_bits {
static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
-struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor);
-
-static inline struct comedi_subdevice *comedi_get_read_subdevice(
- const struct comedi_device_file_info *info)
-{
- if (info->read_subdevice)
- return info->read_subdevice;
- if (info->device == NULL)
- return NULL;
- return info->device->read_subdev;
-}
-
-static inline struct comedi_subdevice *comedi_get_write_subdevice(
- const struct comedi_device_file_info *info)
-{
- if (info->write_subdevice)
- return info->write_subdevice;
- if (info->device == NULL)
- return NULL;
- return info->device->write_subdev;
-}
-
-int comedi_alloc_subdevices(struct comedi_device *, int);
-
-void comedi_device_detach(struct comedi_device *dev);
-int comedi_device_attach(struct comedi_device *dev,
- struct comedi_devconfig *it);
-int comedi_driver_register(struct comedi_driver *);
-int comedi_driver_unregister(struct comedi_driver *);
-
-/**
- * module_comedi_driver() - Helper macro for registering a comedi driver
- * @__comedi_driver: comedi_driver struct
- *
- * Helper macro for comedi drivers which do not do anything special in module
- * init/exit. This eliminates a lot of boilerplate. Each module may only use
- * this macro once, and calling it replaces module_init() and module_exit().
- */
-#define module_comedi_driver(__comedi_driver) \
- module_driver(__comedi_driver, comedi_driver_register, \
- comedi_driver_unregister)
-
-int comedi_pci_enable(struct pci_dev *, const char *);
-void comedi_pci_disable(struct pci_dev *);
-
-int comedi_pci_driver_register(struct comedi_driver *, struct pci_driver *);
-void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
-
-/**
- * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
- * @__comedi_driver: comedi_driver struct
- * @__pci_driver: pci_driver struct
- *
- * Helper macro for comedi PCI drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
- module_driver(__comedi_driver, comedi_pci_driver_register, \
- comedi_pci_driver_unregister, &(__pci_driver))
-
-struct usb_driver;
-
-int comedi_usb_driver_register(struct comedi_driver *, struct usb_driver *);
-void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
-
-/**
- * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
- * @__comedi_driver: comedi_driver struct
- * @__usb_driver: usb_driver struct
- *
- * Helper macro for comedi USB drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
- module_driver(__comedi_driver, comedi_usb_driver_register, \
- comedi_usb_driver_unregister, &(__usb_driver))
+struct comedi_device *comedi_dev_from_minor(unsigned minor);
void init_polling(void);
void cleanup_polling(void);
void start_polling(struct comedi_device *);
void stop_polling(struct comedi_device *);
-#ifdef CONFIG_PROC_FS
-void comedi_proc_init(void);
-void comedi_proc_cleanup(void);
-#else
-static inline void comedi_proc_init(void)
-{
-}
-
-static inline void comedi_proc_cleanup(void)
-{
-}
-#endif
-
/* subdevice runflags */
enum subdevice_runflags {
SRF_USER = 0x00000001,
@@ -389,10 +273,11 @@ enum subdevice_runflags {
SRF_RUNNING = 0x08000000
};
+bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+
int comedi_check_chanlist(struct comedi_subdevice *s,
int n,
unsigned int *chanlist);
-unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s);
/* range stuff */
@@ -433,111 +318,186 @@ static inline unsigned int bytes_per_sample(const struct comedi_subdevice *subd)
return sizeof(short);
}
-/* must be used in attach to set dev->hw_dev if you wish to dma directly
-into comedi's buffer */
-static inline void comedi_set_hw_dev(struct comedi_device *dev,
- struct device *hw_dev)
-{
- if (dev->hw_dev == hw_dev)
- return;
- if (dev->hw_dev)
- put_device(dev->hw_dev);
- dev->hw_dev = hw_dev;
- if (dev->hw_dev) {
- dev->hw_dev = get_device(dev->hw_dev);
- BUG_ON(dev->hw_dev == NULL);
- }
-}
+/*
+ * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
+ * Also useful for retrieving a previously configured hardware device of
+ * known bus type. Set automatically for auto-configured devices.
+ * Automatically set to NULL when detaching hardware device.
+ */
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
-static inline struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
-{
- return dev->hw_dev ? to_pci_dev(dev->hw_dev) : NULL;
-}
+unsigned int comedi_buf_write_alloc(struct comedi_async *, unsigned int);
+unsigned int comedi_buf_write_free(struct comedi_async *, unsigned int);
-static inline struct usb_interface *
-comedi_to_usb_interface(struct comedi_device *dev)
-{
- return dev->hw_dev ? to_usb_interface(dev->hw_dev) : NULL;
-}
+unsigned int comedi_buf_read_n_available(struct comedi_async *);
+unsigned int comedi_buf_read_alloc(struct comedi_async *, unsigned int);
+unsigned int comedi_buf_read_free(struct comedi_async *, unsigned int);
+
+int comedi_buf_put(struct comedi_async *, short);
+int comedi_buf_get(struct comedi_async *, short *);
-int comedi_buf_put(struct comedi_async *async, short x);
-int comedi_buf_get(struct comedi_async *async, short *x);
-
-unsigned int comedi_buf_write_n_available(struct comedi_async *async);
-unsigned int comedi_buf_write_alloc(struct comedi_async *async,
- unsigned int nbytes);
-unsigned int comedi_buf_write_alloc_strict(struct comedi_async *async,
- unsigned int nbytes);
-unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes);
-unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes);
-unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes);
-unsigned int comedi_buf_read_n_available(struct comedi_async *async);
void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
const void *source, unsigned int num_bytes);
void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
void *destination, unsigned int num_bytes);
-static inline unsigned comedi_buf_write_n_allocated(struct comedi_async *async)
-{
- return async->buf_write_alloc_count - async->buf_write_count;
-}
-static inline unsigned comedi_buf_read_n_allocated(struct comedi_async *async)
-{
- return async->buf_read_alloc_count - async->buf_read_count;
-}
+/* drivers.c - general comedi driver functions */
-static inline void *comedi_aux_data(int options[], int n)
-{
- unsigned long address;
- unsigned long addressLow;
- int bit_shift;
- if (sizeof(int) >= sizeof(void *))
- address = options[COMEDI_DEVCONF_AUX_DATA_LO];
- else {
- address = options[COMEDI_DEVCONF_AUX_DATA_HI];
- bit_shift = sizeof(int) * 8;
- address <<= bit_shift;
- addressLow = options[COMEDI_DEVCONF_AUX_DATA_LO];
- addressLow &= (1UL << bit_shift) - 1;
- address |= addressLow;
- }
- if (n >= 1)
- address += options[COMEDI_DEVCONF_AUX_DATA0_LENGTH];
- if (n >= 2)
- address += options[COMEDI_DEVCONF_AUX_DATA1_LENGTH];
- if (n >= 3)
- address += options[COMEDI_DEVCONF_AUX_DATA2_LENGTH];
- BUG_ON(n > 3);
- return (void *)address;
-}
+int comedi_alloc_subdevices(struct comedi_device *, int);
-int comedi_alloc_subdevice_minor(struct comedi_device *dev,
- struct comedi_subdevice *s);
-void comedi_free_subdevice_minor(struct comedi_subdevice *s);
-int comedi_auto_config(struct device *hardware_device,
- struct comedi_driver *driver, unsigned long context);
-void comedi_auto_unconfig(struct device *hardware_device);
+int comedi_auto_config(struct device *, struct comedi_driver *,
+ unsigned long context);
+void comedi_auto_unconfig(struct device *);
-static inline int comedi_pci_auto_config(struct pci_dev *pcidev,
- struct comedi_driver *driver)
-{
- return comedi_auto_config(&pcidev->dev, driver, 0);
-}
+int comedi_driver_register(struct comedi_driver *);
+int comedi_driver_unregister(struct comedi_driver *);
+
+/**
+ * module_comedi_driver() - Helper macro for registering a comedi driver
+ * @__comedi_driver: comedi_driver struct
+ *
+ * Helper macro for comedi drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only use
+ * this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_comedi_driver(__comedi_driver) \
+ module_driver(__comedi_driver, comedi_driver_register, \
+ comedi_driver_unregister)
+
+#ifdef CONFIG_COMEDI_PCI_DRIVERS
+
+/* comedi_pci.c - comedi PCI driver specific functions */
+
+/*
+ * PCI Vendor IDs not in <linux/pci_ids.h>
+ */
+#define PCI_VENDOR_ID_KOLTER 0x1001
+#define PCI_VENDOR_ID_ICP 0x104c
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_VENDOR_ID_DT 0x1116
+#define PCI_VENDOR_ID_IOTECH 0x1616
+#define PCI_VENDOR_ID_CONTEC 0x1221
+#define PCI_VENDOR_ID_RTD 0x1435
+
+struct pci_dev;
+struct pci_driver;
+
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *);
+
+int comedi_pci_enable(struct pci_dev *, const char *);
+void comedi_pci_disable(struct pci_dev *);
+
+int comedi_pci_auto_config(struct pci_dev *, struct comedi_driver *);
+void comedi_pci_auto_unconfig(struct pci_dev *);
+
+int comedi_pci_driver_register(struct comedi_driver *, struct pci_driver *);
+void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
+
+/**
+ * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for comedi PCI drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
+ module_driver(__comedi_driver, comedi_pci_driver_register, \
+ comedi_pci_driver_unregister, &(__pci_driver))
-static inline void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
+#else
+
+/*
+ * Some of the comedi mixed ISA/PCI drivers call the PCI specific
+ * functions. Provide some dummy functions if CONFIG_COMEDI_PCI_DRIVERS
+ * is not enabled.
+ */
+
+static inline struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
{
- comedi_auto_unconfig(&pcidev->dev);
+ return NULL;
}
-static inline int comedi_usb_auto_config(struct usb_interface *intf,
- struct comedi_driver *driver)
+static inline int comedi_pci_enable(struct pci_dev *dev, const char *name)
{
- return comedi_auto_config(&intf->dev, driver, 0);
+ return -ENOSYS;
}
-static inline void comedi_usb_auto_unconfig(struct usb_interface *intf)
+static inline void comedi_pci_disable(struct pci_dev *dev)
{
- comedi_auto_unconfig(&intf->dev);
}
+#endif /* CONFIG_COMEDI_PCI_DRIVERS */
+
+#ifdef CONFIG_COMEDI_PCMCIA_DRIVERS
+
+/* comedi_pcmcia.c - comedi PCMCIA driver specific functions */
+
+struct pcmcia_driver;
+struct pcmcia_device;
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *);
+
+int comedi_pcmcia_enable(struct comedi_device *,
+ int (*conf_check)(struct pcmcia_device *, void *));
+void comedi_pcmcia_disable(struct comedi_device *);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *, struct comedi_driver *);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *,
+ struct pcmcia_driver *);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *,
+ struct pcmcia_driver *);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* CONFIG_COMEDI_PCMCIA_DRIVERS */
+
+#ifdef CONFIG_COMEDI_USB_DRIVERS
+
+/* comedi_usb.c - comedi USB driver specific functions */
+
+struct usb_driver;
+struct usb_interface;
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *);
+
+int comedi_usb_auto_config(struct usb_interface *, struct comedi_driver *,
+ unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *);
+
+int comedi_usb_driver_register(struct comedi_driver *, struct usb_driver *);
+void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* CONFIG_COMEDI_USB_DRIVERS */
+
#endif /* _COMEDIDEV_H */
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 50cf498698e2..64be7c5e891e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -23,8 +23,6 @@
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/usb.h>
#include <linux/errno.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
@@ -43,16 +41,25 @@
#include "comedidev.h"
#include "comedi_internal.h"
-static int postconfig(struct comedi_device *dev);
-static int insn_rw_emulate_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static void *comedi_recognize(struct comedi_driver *driv, const char *name);
-static void comedi_report_boards(struct comedi_driver *driv);
-static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s);
-
struct comedi_driver *comedi_drivers;
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev)
+{
+ if (hw_dev == dev->hw_dev)
+ return 0;
+ if (dev->hw_dev != NULL)
+ return -EEXIST;
+ dev->hw_dev = get_device(hw_dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_set_hw_dev);
+
+static void comedi_clear_hw_dev(struct comedi_device *dev)
+{
+ put_device(dev->hw_dev);
+ dev->hw_dev = NULL;
+}
+
int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
{
struct comedi_subdevice *s;
@@ -70,6 +77,7 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
for (i = 0; i < num_subdevices; ++i) {
s = &dev->subdevices[i];
s->device = dev;
+ s->index = i;
s->async_dma_dir = DMA_NONE;
spin_lock_init(&s->spin_lock);
s->minor = -1;
@@ -107,7 +115,7 @@ static void cleanup_device(struct comedi_device *dev)
dev->write_subdev = NULL;
dev->open = NULL;
dev->close = NULL;
- comedi_set_hw_dev(dev, NULL);
+ comedi_clear_hw_dev(dev);
}
static void __comedi_device_detach(struct comedi_device *dev)
@@ -128,131 +136,105 @@ void comedi_device_detach(struct comedi_device *dev)
__comedi_device_detach(dev);
}
-/* do a little post-config cleanup */
-/* called with module refcount incremented, decrements it */
-static int comedi_device_postconfig(struct comedi_device *dev)
+static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
{
- int ret = postconfig(dev);
- module_put(dev->driver->module);
- if (ret < 0) {
- __comedi_device_detach(dev);
- return ret;
- }
- if (!dev->board_name) {
- dev_warn(dev->class_dev, "BUG: dev->board_name=NULL\n");
- dev->board_name = "BUG";
- }
- smp_wmb();
- dev->attached = 1;
- return 0;
+ return -EINVAL;
}
-int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
{
- struct comedi_driver *driv;
+ return -EINVAL;
+}
+
+static int insn_rw_emulate_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct comedi_insn new_insn;
int ret;
+ static const unsigned channels_per_bitfield = 32;
- if (dev->attached)
- return -EBUSY;
+ unsigned chan = CR_CHAN(insn->chanspec);
+ const unsigned base_bitfield_channel =
+ (chan < channels_per_bitfield) ? 0 : chan;
+ unsigned int new_data[2];
+ memset(new_data, 0, sizeof(new_data));
+ memset(&new_insn, 0, sizeof(new_insn));
+ new_insn.insn = INSN_BITS;
+ new_insn.chanspec = base_bitfield_channel;
+ new_insn.n = 2;
+ new_insn.subdev = insn->subdev;
- for (driv = comedi_drivers; driv; driv = driv->next) {
- if (!try_module_get(driv->module))
- continue;
- if (driv->num_names) {
- dev->board_ptr = comedi_recognize(driv, it->board_name);
- if (dev->board_ptr)
- break;
- } else if (strcmp(driv->driver_name, it->board_name) == 0)
- break;
- module_put(driv->module);
- }
- if (driv == NULL) {
- /* recognize has failed if we get here */
- /* report valid board names before returning error */
- for (driv = comedi_drivers; driv; driv = driv->next) {
- if (!try_module_get(driv->module))
- continue;
- comedi_report_boards(driv);
- module_put(driv->module);
- }
- return -EIO;
- }
- if (driv->attach == NULL) {
- /* driver does not support manual configuration */
- dev_warn(dev->class_dev,
- "driver '%s' does not support attach using comedi_config\n",
- driv->driver_name);
- module_put(driv->module);
- return -ENOSYS;
+ if (insn->insn == INSN_WRITE) {
+ if (!(s->subdev_flags & SDF_WRITABLE))
+ return -EINVAL;
+ new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
+ new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
+ : 0; /* bits */
}
- /* initialize dev->driver here so
- * comedi_error() can be called from attach */
- dev->driver = driv;
- ret = driv->attach(dev, it);
- if (ret < 0) {
- module_put(dev->driver->module);
- __comedi_device_detach(dev);
+
+ ret = s->insn_bits(dev, s, &new_insn, new_data);
+ if (ret < 0)
return ret;
- }
- return comedi_device_postconfig(dev);
-}
-int comedi_driver_register(struct comedi_driver *driver)
-{
- driver->next = comedi_drivers;
- comedi_drivers = driver;
+ if (insn->insn == INSN_READ)
+ data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
- return 0;
+ return 1;
}
-EXPORT_SYMBOL(comedi_driver_register);
-int comedi_driver_unregister(struct comedi_driver *driver)
+static int __comedi_device_postconfig_async(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_driver *prev;
- int i;
+ struct comedi_async *async;
+ unsigned int buf_size;
+ int ret;
- /* check for devices using this driver */
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(i);
- struct comedi_device *dev;
+ if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) {
+ dev_warn(dev->class_dev,
+ "async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n");
+ return -EINVAL;
+ }
+ if (!s->do_cmdtest) {
+ dev_warn(dev->class_dev,
+ "async subdevices must have a do_cmdtest() function\n");
+ return -EINVAL;
+ }
- if (dev_file_info == NULL)
- continue;
- dev = dev_file_info->device;
+ async = kzalloc(sizeof(*async), GFP_KERNEL);
+ if (!async)
+ return -ENOMEM;
- mutex_lock(&dev->mutex);
- if (dev->attached && dev->driver == driver) {
- if (dev->use_count)
- dev_warn(dev->class_dev,
- "BUG! detaching device with use_count=%d\n",
- dev->use_count);
- comedi_device_detach(dev);
- }
- mutex_unlock(&dev->mutex);
- }
+ init_waitqueue_head(&async->wait_head);
+ async->subdevice = s;
+ s->async = async;
- if (comedi_drivers == driver) {
- comedi_drivers = driver->next;
- return 0;
- }
+ async->max_bufsize = comedi_default_buf_maxsize_kb * 1024;
+ buf_size = comedi_default_buf_size_kb * 1024;
+ if (buf_size > async->max_bufsize)
+ buf_size = async->max_bufsize;
- for (prev = comedi_drivers; prev->next; prev = prev->next) {
- if (prev->next == driver) {
- prev->next = driver->next;
- return 0;
- }
+ if (comedi_buf_alloc(dev, s, buf_size) < 0) {
+ dev_warn(dev->class_dev, "Buffer allocation failed\n");
+ return -ENOMEM;
}
- return -EINVAL;
+ if (s->buf_change) {
+ ret = s->buf_change(dev, s, buf_size);
+ if (ret < 0)
+ return ret;
+ }
+
+ comedi_alloc_subdevice_minor(s);
+
+ return 0;
}
-EXPORT_SYMBOL(comedi_driver_unregister);
-static int postconfig(struct comedi_device *dev)
+static int __comedi_device_postconfig(struct comedi_device *dev)
{
- int i;
struct comedi_subdevice *s;
- struct comedi_async *async = NULL;
int ret;
+ int i;
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
@@ -264,42 +246,9 @@ static int postconfig(struct comedi_device *dev)
s->len_chanlist = 1;
if (s->do_cmd) {
- unsigned int buf_size;
-
- BUG_ON((s->subdev_flags & (SDF_CMD_READ |
- SDF_CMD_WRITE)) == 0);
- BUG_ON(!s->do_cmdtest);
-
- async =
- kzalloc(sizeof(struct comedi_async), GFP_KERNEL);
- if (async == NULL) {
- dev_warn(dev->class_dev,
- "failed to allocate async struct\n");
- return -ENOMEM;
- }
- init_waitqueue_head(&async->wait_head);
- async->subdevice = s;
- s->async = async;
-
- async->max_bufsize =
- comedi_default_buf_maxsize_kb * 1024;
- buf_size = comedi_default_buf_size_kb * 1024;
- if (buf_size > async->max_bufsize)
- buf_size = async->max_bufsize;
-
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- if (comedi_buf_alloc(dev, s, buf_size) < 0) {
- dev_warn(dev->class_dev,
- "Buffer allocation failed\n");
- return -ENOMEM;
- }
- if (s->buf_change) {
- ret = s->buf_change(dev, s, buf_size);
- if (ret < 0)
- return ret;
- }
- comedi_alloc_subdevice_minor(dev, s);
+ ret = __comedi_device_postconfig_async(dev, s);
+ if (ret)
+ return ret;
}
if (!s->range_table && !s->range_table_list)
@@ -326,6 +275,25 @@ static int postconfig(struct comedi_device *dev)
return 0;
}
+/* do a little post-config cleanup */
+/* called with module refcount incremented, decrements it */
+static int comedi_device_postconfig(struct comedi_device *dev)
+{
+ int ret = __comedi_device_postconfig(dev);
+ module_put(dev->driver->module);
+ if (ret < 0) {
+ __comedi_device_detach(dev);
+ return ret;
+ }
+ if (!dev->board_name) {
+ dev_warn(dev->class_dev, "BUG: dev->board_name=NULL\n");
+ dev->board_name = "BUG";
+ }
+ smp_wmb();
+ dev->attached = 1;
+ return 0;
+}
+
/*
* Generic recognize function for drivers that register their supported
* board names.
@@ -384,463 +352,63 @@ static void comedi_report_boards(struct comedi_driver *driv)
pr_info(" %s\n", driv->driver_name);
}
-static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- return -EINVAL;
-}
-
-int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return -EINVAL;
-}
-
-static int insn_rw_emulate_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct comedi_insn new_insn;
+ struct comedi_driver *driv;
int ret;
- static const unsigned channels_per_bitfield = 32;
-
- unsigned chan = CR_CHAN(insn->chanspec);
- const unsigned base_bitfield_channel =
- (chan < channels_per_bitfield) ? 0 : chan;
- unsigned int new_data[2];
- memset(new_data, 0, sizeof(new_data));
- memset(&new_insn, 0, sizeof(new_insn));
- new_insn.insn = INSN_BITS;
- new_insn.chanspec = base_bitfield_channel;
- new_insn.n = 2;
- new_insn.subdev = insn->subdev;
- if (insn->insn == INSN_WRITE) {
- if (!(s->subdev_flags & SDF_WRITABLE))
- return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
- : 0; /* bits */
- }
-
- ret = s->insn_bits(dev, s, &new_insn, new_data);
- if (ret < 0)
- return ret;
-
- if (insn->insn == INSN_READ)
- data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
-
- return 1;
-}
-
-int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned long new_size)
-{
- struct comedi_async *async = s->async;
-
- /* Round up new_size to multiple of PAGE_SIZE */
- new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* if no change is required, do nothing */
- if (async->prealloc_buf && async->prealloc_bufsz == new_size)
- return 0;
-
- /* deallocate old buffer */
- if (async->prealloc_buf) {
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
- if (async->buf_page_list) {
- unsigned i;
- for (i = 0; i < async->n_buf_pages; ++i) {
- if (async->buf_page_list[i].virt_addr) {
- clear_bit(PG_reserved,
- &(virt_to_page(async->buf_page_list[i].
- virt_addr)->flags));
- if (s->async_dma_dir != DMA_NONE) {
- dma_free_coherent(dev->hw_dev,
- PAGE_SIZE,
- async->
- buf_page_list
- [i].virt_addr,
- async->
- buf_page_list
- [i].dma_addr);
- } else {
- free_page((unsigned long)
- async->buf_page_list[i].
- virt_addr);
- }
- }
- }
- vfree(async->buf_page_list);
- async->buf_page_list = NULL;
- async->n_buf_pages = 0;
- }
- /* allocate new buffer */
- if (new_size) {
- unsigned i = 0;
- unsigned n_pages = new_size >> PAGE_SHIFT;
- struct page **pages = NULL;
-
- async->buf_page_list =
- vzalloc(sizeof(struct comedi_buf_page) * n_pages);
- if (async->buf_page_list)
- pages = vmalloc(sizeof(struct page *) * n_pages);
-
- if (pages) {
- for (i = 0; i < n_pages; i++) {
- if (s->async_dma_dir != DMA_NONE) {
- async->buf_page_list[i].virt_addr =
- dma_alloc_coherent(dev->hw_dev,
- PAGE_SIZE,
- &async->
- buf_page_list
- [i].dma_addr,
- GFP_KERNEL |
- __GFP_COMP);
- } else {
- async->buf_page_list[i].virt_addr =
- (void *)
- get_zeroed_page(GFP_KERNEL);
- }
- if (async->buf_page_list[i].virt_addr == NULL)
- break;
-
- set_bit(PG_reserved,
- &(virt_to_page(async->buf_page_list[i].
- virt_addr)->flags));
- pages[i] = virt_to_page(async->buf_page_list[i].
- virt_addr);
- }
- }
- if (i == n_pages) {
- async->prealloc_buf =
-#ifdef PAGE_KERNEL_NOCACHE
- vmap(pages, n_pages, VM_MAP, PAGE_KERNEL_NOCACHE);
-#else
- vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
-#endif
- }
- vfree(pages);
-
- if (async->prealloc_buf == NULL) {
- /* Some allocation failed above. */
- if (async->buf_page_list) {
- for (i = 0; i < n_pages; i++) {
- if (async->buf_page_list[i].virt_addr ==
- NULL) {
- break;
- }
- clear_bit(PG_reserved,
- &(virt_to_page(async->
- buf_page_list[i].
- virt_addr)->flags));
- if (s->async_dma_dir != DMA_NONE) {
- dma_free_coherent(dev->hw_dev,
- PAGE_SIZE,
- async->
- buf_page_list
- [i].virt_addr,
- async->
- buf_page_list
- [i].dma_addr);
- } else {
- free_page((unsigned long)
- async->buf_page_list
- [i].virt_addr);
- }
- }
- vfree(async->buf_page_list);
- async->buf_page_list = NULL;
- }
- return -ENOMEM;
- }
- async->n_buf_pages = n_pages;
- }
- async->prealloc_bufsz = new_size;
-
- return 0;
-}
+ if (dev->attached)
+ return -EBUSY;
-/* munging is applied to data by core as it passes between user
- * and kernel space */
-static unsigned int comedi_buf_munge(struct comedi_async *async,
- unsigned int num_bytes)
-{
- struct comedi_subdevice *s = async->subdevice;
- unsigned int count = 0;
- const unsigned num_sample_bytes = bytes_per_sample(s);
-
- if (s->munge == NULL || (async->cmd.flags & CMDF_RAWDATA)) {
- async->munge_count += num_bytes;
- BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
- return num_bytes;
- }
- /* don't munge partial samples */
- num_bytes -= num_bytes % num_sample_bytes;
- while (count < num_bytes) {
- int block_size;
-
- block_size = num_bytes - count;
- if (block_size < 0) {
- dev_warn(s->device->class_dev,
- "%s: %s: bug! block_size is negative\n",
- __FILE__, __func__);
+ for (driv = comedi_drivers; driv; driv = driv->next) {
+ if (!try_module_get(driv->module))
+ continue;
+ if (driv->num_names) {
+ dev->board_ptr = comedi_recognize(driv, it->board_name);
+ if (dev->board_ptr)
+ break;
+ } else if (strcmp(driv->driver_name, it->board_name) == 0)
break;
- }
- if ((int)(async->munge_ptr + block_size -
- async->prealloc_bufsz) > 0)
- block_size = async->prealloc_bufsz - async->munge_ptr;
-
- s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
- block_size, async->munge_chan);
-
- smp_wmb(); /* barrier insures data is munged in buffer
- * before munge_count is incremented */
-
- async->munge_chan += block_size / num_sample_bytes;
- async->munge_chan %= async->cmd.chanlist_len;
- async->munge_count += block_size;
- async->munge_ptr += block_size;
- async->munge_ptr %= async->prealloc_bufsz;
- count += block_size;
- }
- BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
- return count;
-}
-
-unsigned int comedi_buf_write_n_available(struct comedi_async *async)
-{
- unsigned int free_end;
- unsigned int nbytes;
-
- if (async == NULL)
- return 0;
-
- free_end = async->buf_read_count + async->prealloc_bufsz;
- nbytes = free_end - async->buf_write_alloc_count;
- nbytes -= nbytes % bytes_per_sample(async->subdevice);
- /* barrier insures the read of buf_read_count in this
- query occurs before any following writes to the buffer which
- might be based on the return value from this query.
- */
- smp_mb();
- return nbytes;
-}
-
-/* allocates chunk for the writer from free buffer space */
-unsigned int comedi_buf_write_alloc(struct comedi_async *async,
- unsigned int nbytes)
-{
- unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
-
- if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
- nbytes = free_end - async->buf_write_alloc_count;
-
- async->buf_write_alloc_count += nbytes;
- /* barrier insures the read of buf_read_count above occurs before
- we write data to the write-alloc'ed buffer space */
- smp_mb();
- return nbytes;
-}
-EXPORT_SYMBOL(comedi_buf_write_alloc);
-
-/* allocates nothing unless it can completely fulfill the request */
-unsigned int comedi_buf_write_alloc_strict(struct comedi_async *async,
- unsigned int nbytes)
-{
- unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
-
- if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
- nbytes = 0;
-
- async->buf_write_alloc_count += nbytes;
- /* barrier insures the read of buf_read_count above occurs before
- we write data to the write-alloc'ed buffer space */
- smp_mb();
- return nbytes;
-}
-
-/* transfers a chunk from writer to filled buffer space */
-unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
-{
- if ((int)(async->buf_write_count + nbytes -
- async->buf_write_alloc_count) > 0) {
- dev_info(async->subdevice->device->class_dev,
- "attempted to write-free more bytes than have been write-allocated.\n");
- nbytes = async->buf_write_alloc_count - async->buf_write_count;
- }
- async->buf_write_count += nbytes;
- async->buf_write_ptr += nbytes;
- comedi_buf_munge(async, async->buf_write_count - async->munge_count);
- if (async->buf_write_ptr >= async->prealloc_bufsz)
- async->buf_write_ptr %= async->prealloc_bufsz;
-
- return nbytes;
-}
-EXPORT_SYMBOL(comedi_buf_write_free);
-
-/* allocates a chunk for the reader from filled (and munged) buffer space */
-unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes)
-{
- if ((int)(async->buf_read_alloc_count + nbytes - async->munge_count) >
- 0) {
- nbytes = async->munge_count - async->buf_read_alloc_count;
- }
- async->buf_read_alloc_count += nbytes;
- /* barrier insures read of munge_count occurs before we actually read
- data out of buffer */
- smp_rmb();
- return nbytes;
-}
-EXPORT_SYMBOL(comedi_buf_read_alloc);
-
-/* transfers control of a chunk from reader to free buffer space */
-unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes)
-{
- /* barrier insures data has been read out of
- * buffer before read count is incremented */
- smp_mb();
- if ((int)(async->buf_read_count + nbytes -
- async->buf_read_alloc_count) > 0) {
- dev_info(async->subdevice->device->class_dev,
- "attempted to read-free more bytes than have been read-allocated.\n");
- nbytes = async->buf_read_alloc_count - async->buf_read_count;
+ module_put(driv->module);
}
- async->buf_read_count += nbytes;
- async->buf_read_ptr += nbytes;
- async->buf_read_ptr %= async->prealloc_bufsz;
- return nbytes;
-}
-EXPORT_SYMBOL(comedi_buf_read_free);
-
-void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
- const void *data, unsigned int num_bytes)
-{
- unsigned int write_ptr = async->buf_write_ptr + offset;
-
- if (write_ptr >= async->prealloc_bufsz)
- write_ptr %= async->prealloc_bufsz;
-
- while (num_bytes) {
- unsigned int block_size;
-
- if (write_ptr + num_bytes > async->prealloc_bufsz)
- block_size = async->prealloc_bufsz - write_ptr;
- else
- block_size = num_bytes;
-
- memcpy(async->prealloc_buf + write_ptr, data, block_size);
-
- data += block_size;
- num_bytes -= block_size;
-
- write_ptr = 0;
+ if (driv == NULL) {
+ /* recognize has failed if we get here */
+ /* report valid board names before returning error */
+ for (driv = comedi_drivers; driv; driv = driv->next) {
+ if (!try_module_get(driv->module))
+ continue;
+ comedi_report_boards(driv);
+ module_put(driv->module);
+ }
+ return -EIO;
}
-}
-EXPORT_SYMBOL(comedi_buf_memcpy_to);
-
-void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
- void *dest, unsigned int nbytes)
-{
- void *src;
- unsigned int read_ptr = async->buf_read_ptr + offset;
-
- if (read_ptr >= async->prealloc_bufsz)
- read_ptr %= async->prealloc_bufsz;
-
- while (nbytes) {
- unsigned int block_size;
-
- src = async->prealloc_buf + read_ptr;
-
- if (nbytes >= async->prealloc_bufsz - read_ptr)
- block_size = async->prealloc_bufsz - read_ptr;
- else
- block_size = nbytes;
-
- memcpy(dest, src, block_size);
- nbytes -= block_size;
- dest += block_size;
- read_ptr = 0;
+ if (driv->attach == NULL) {
+ /* driver does not support manual configuration */
+ dev_warn(dev->class_dev,
+ "driver '%s' does not support attach using comedi_config\n",
+ driv->driver_name);
+ module_put(driv->module);
+ return -ENOSYS;
}
-}
-EXPORT_SYMBOL(comedi_buf_memcpy_from);
-
-unsigned int comedi_buf_read_n_available(struct comedi_async *async)
-{
- unsigned num_bytes;
-
- if (async == NULL)
- return 0;
- num_bytes = async->munge_count - async->buf_read_count;
- /* barrier insures the read of munge_count in this
- query occurs before any following reads of the buffer which
- might be based on the return value from this query.
- */
- smp_rmb();
- return num_bytes;
-}
-EXPORT_SYMBOL(comedi_buf_read_n_available);
-
-int comedi_buf_get(struct comedi_async *async, short *x)
-{
- unsigned int n = comedi_buf_read_n_available(async);
-
- if (n < sizeof(short))
- return 0;
- comedi_buf_read_alloc(async, sizeof(short));
- *x = *(short *)(async->prealloc_buf + async->buf_read_ptr);
- comedi_buf_read_free(async, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL(comedi_buf_get);
-
-int comedi_buf_put(struct comedi_async *async, short x)
-{
- unsigned int n = comedi_buf_write_alloc_strict(async, sizeof(short));
-
- if (n < sizeof(short)) {
- async->events |= COMEDI_CB_ERROR;
- return 0;
+ /* initialize dev->driver here so
+ * comedi_error() can be called from attach */
+ dev->driver = driv;
+ ret = driv->attach(dev, it);
+ if (ret < 0) {
+ module_put(dev->driver->module);
+ __comedi_device_detach(dev);
+ return ret;
}
- *(short *)(async->prealloc_buf + async->buf_write_ptr) = x;
- comedi_buf_write_free(async, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL(comedi_buf_put);
-
-void comedi_reset_async_buf(struct comedi_async *async)
-{
- async->buf_write_alloc_count = 0;
- async->buf_write_count = 0;
- async->buf_read_alloc_count = 0;
- async->buf_read_count = 0;
-
- async->buf_write_ptr = 0;
- async->buf_read_ptr = 0;
-
- async->cur_chan = 0;
- async->scan_progress = 0;
- async->munge_chan = 0;
- async->munge_count = 0;
- async->munge_ptr = 0;
-
- async->events = 0;
+ return comedi_device_postconfig(dev);
}
int comedi_auto_config(struct device *hardware_device,
struct comedi_driver *driver, unsigned long context)
{
int minor;
- struct comedi_device_file_info *dev_file_info;
struct comedi_device *comedi_dev;
int ret;
- if (!comedi_autoconfig)
- return 0;
-
if (!driver->auto_attach) {
dev_warn(hardware_device,
"BUG! comedi driver '%s' has no auto_attach handler\n",
@@ -852,8 +420,7 @@ int comedi_auto_config(struct device *hardware_device,
if (minor < 0)
return minor;
- dev_file_info = comedi_get_device_file_info(minor);
- comedi_dev = dev_file_info->device;
+ comedi_dev = comedi_dev_from_minor(minor);
mutex_lock(&comedi_dev->mutex);
if (comedi_dev->attached)
@@ -888,103 +455,53 @@ void comedi_auto_unconfig(struct device *hardware_device)
minor = comedi_find_board_minor(hardware_device);
if (minor < 0)
return;
- BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
comedi_free_board_minor(minor);
}
EXPORT_SYMBOL_GPL(comedi_auto_unconfig);
-/**
- * comedi_pci_enable() - Enable the PCI device and request the regions.
- * @pdev: pci_dev struct
- * @res_name: name for the requested reqource
- */
-int comedi_pci_enable(struct pci_dev *pdev, const char *res_name)
-{
- int rc;
-
- rc = pci_enable_device(pdev);
- if (rc < 0)
- return rc;
-
- rc = pci_request_regions(pdev, res_name);
- if (rc < 0)
- pci_disable_device(pdev);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(comedi_pci_enable);
-
-/**
- * comedi_pci_disable() - Release the regions and disable the PCI device.
- * @pdev: pci_dev struct
- *
- * This must be matched with a previous successful call to comedi_pci_enable().
- */
-void comedi_pci_disable(struct pci_dev *pdev)
-{
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_disable);
-
-int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver)
+int comedi_driver_register(struct comedi_driver *driver)
{
- int ret;
-
- ret = comedi_driver_register(comedi_driver);
- if (ret < 0)
- return ret;
-
- /* FIXME: Remove this test after auditing all comedi pci drivers */
- if (!pci_driver->name)
- pci_driver->name = comedi_driver->driver_name;
-
- ret = pci_register_driver(pci_driver);
- if (ret < 0) {
- comedi_driver_unregister(comedi_driver);
- return ret;
- }
+ driver->next = comedi_drivers;
+ comedi_drivers = driver;
return 0;
}
-EXPORT_SYMBOL_GPL(comedi_pci_driver_register);
+EXPORT_SYMBOL(comedi_driver_register);
-void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver)
+int comedi_driver_unregister(struct comedi_driver *driver)
{
- pci_unregister_driver(pci_driver);
- comedi_driver_unregister(comedi_driver);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
-
-#if IS_ENABLED(CONFIG_USB)
+ struct comedi_driver *prev;
+ int i;
-int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver)
-{
- int ret;
+ /* check for devices using this driver */
+ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
+ struct comedi_device *dev = comedi_dev_from_minor(i);
- ret = comedi_driver_register(comedi_driver);
- if (ret < 0)
- return ret;
+ if (!dev)
+ continue;
- ret = usb_register(usb_driver);
- if (ret < 0) {
- comedi_driver_unregister(comedi_driver);
- return ret;
+ mutex_lock(&dev->mutex);
+ if (dev->attached && dev->driver == driver) {
+ if (dev->use_count)
+ dev_warn(dev->class_dev,
+ "BUG! detaching device with use_count=%d\n",
+ dev->use_count);
+ comedi_device_detach(dev);
+ }
+ mutex_unlock(&dev->mutex);
}
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_usb_driver_register);
+ if (comedi_drivers == driver) {
+ comedi_drivers = driver->next;
+ return 0;
+ }
-void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver)
-{
- usb_deregister(usb_driver);
- comedi_driver_unregister(comedi_driver);
+ for (prev = comedi_drivers; prev->next; prev = prev->next) {
+ if (prev->next == driver) {
+ prev->next = driver->next;
+ return 0;
+ }
+ }
+ return -EINVAL;
}
-EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
-
-#endif
+EXPORT_SYMBOL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index e0a79521f35a..0ae356ae56ea 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -54,6 +54,8 @@ Interrupt support for these boards is also not currently supported.
Configuration Options: not applicable, uses PCI auto config
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "8255.h"
@@ -314,11 +316,6 @@ static int pci_8255_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &pci_8255_driver);
}
-static void pci_8255_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_ADLINK_PCI7224) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_ADLINK_PCI7248) },
@@ -342,7 +339,7 @@ static struct pci_driver pci_8255_pci_driver = {
.name = "8255_pci",
.id_table = pci_8255_pci_table,
.probe = pci_8255_pci_probe,
- .remove = pci_8255_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(pci_8255_driver, pci_8255_pci_driver);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 0de4d2eb76fc..315e836ff99b 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -2,7 +2,6 @@
#
# Comedi "helper" modules
-obj-$(CONFIG_COMEDI) += pcm_common.o
# Comedi misc drivers
obj-$(CONFIG_COMEDI_BOND) += comedi_bond.o
@@ -26,6 +25,7 @@ obj-$(CONFIG_COMEDI_PCM3730) += pcm3730.o
obj-$(CONFIG_COMEDI_RTI800) += rti800.o
obj-$(CONFIG_COMEDI_RTI802) += rti802.o
obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
+obj-$(CONFIG_COMEDI_DAS08_ISA) += das08_isa.o
obj-$(CONFIG_COMEDI_DAS16) += das16.o
obj-$(CONFIG_COMEDI_DAS800) += das800.o
obj-$(CONFIG_COMEDI_DAS1800) += das1800.o
@@ -56,6 +56,7 @@ obj-$(CONFIG_COMEDI_POC) += poc.o
# Comedi PCI drivers
obj-$(CONFIG_COMEDI_8255_PCI) += 8255_pci.o
+obj-$(CONFIG_COMEDI_ADDI_WATCHDOG) += addi_watchdog.o
obj-$(CONFIG_COMEDI_ADDI_APCI_035) += addi_apci_035.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1032) += addi_apci_1032.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1500) += addi_apci_1500.o
@@ -81,6 +82,7 @@ obj-$(CONFIG_COMEDI_AMPLC_PC263) += amplc_pc263.o
obj-$(CONFIG_COMEDI_AMPLC_PCI224) += amplc_pci224.o
obj-$(CONFIG_COMEDI_AMPLC_PCI230) += amplc_pci230.o
obj-$(CONFIG_COMEDI_CONTEC_PCI_DIO) += contec_pci_dio.o
+obj-$(CONFIG_COMEDI_DAS08_PCI) += das08_pci.o
obj-$(CONFIG_COMEDI_DT3000) += dt3000.o
obj-$(CONFIG_COMEDI_DYNA_PCI10XX) += dyna_pci10xx.o
obj-$(CONFIG_COMEDI_UNIOXX5) += unioxx5.o
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 90cc43263aee..1051fa5ce8f7 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -11,13 +11,21 @@ Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
http://www.addi-data.com
info@addi-data.com
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2 of the License, or (at your option) any later
+version.
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+This program is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-You should also find the complete GPL in the COPYING file accompanying this source code.
+You should also find the complete GPL in the COPYING file accompanying this
+source code.
@endverbatim
*/
@@ -29,10 +37,10 @@ You should also find the complete GPL in the COPYING file accompanying this sour
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
- | Project : ADDI DATA | Compiler : GCC |
+ | Project : ADDI DATA | Compiler : GCC |
| Modulname : addi_common.c | Version : 2.96 |
+-------------------------------+---------------------------------------+
- | Author : | Date : |
+ | Author : | Date : |
+-----------------------------------------------------------------------+
| Description : ADDI COMMON Main Module |
+-----------------------------------------------------------------------+
@@ -167,11 +175,11 @@ static int addi_auto_attach(struct comedi_device *dev,
if (this_board->i_PCIEeprom) {
if (!(strcmp(this_board->pc_EepromChip, "S5920"))) {
/* Set 3 wait stait */
- if (!(strcmp(dev->board_name, "apci035"))) {
+ if (!(strcmp(dev->board_name, "apci035")))
outl(0x80808082, devpriv->i_IobaseAmcc + 0x60);
- } else {
+ else
outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
- }
+
/* Enable the interrupt for the controller */
dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c
deleted file mode 100644
index 5958a9cb2a38..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1648 | Compiler : gcc |
- | Module name : TTL.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: S. Weber | Date : 25/05/2005 |
- +-----------------------------------------------------------------------+
- | Description : APCI-16XX TTL I/O module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- |25.05.2005| S.Weber | Creation |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-#ifndef COMEDI_SUBD_TTLIO
-#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
-#endif
-
-#define APCI16XX_TTL_INIT 0
-#define APCI16XX_TTL_INITDIRECTION 1
-#define APCI16XX_TTL_OUTPUTMEMORY 2
-
-#define APCI16XX_TTL_READCHANNEL 0
-#define APCI16XX_TTL_READPORT 1
-
-#define APCI16XX_TTL_WRITECHANNEL_ON 0
-#define APCI16XX_TTL_WRITECHANNEL_OFF 1
-#define APCI16XX_TTL_WRITEPORT_ON 2
-#define APCI16XX_TTL_WRITEPORT_OFF 3
-
-#define APCI16XX_TTL_READ_ALL_INPUTS 0
-#define APCI16XX_TTL_READ_ALL_OUTPUTS 1
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI16XX_InsnConfigInitTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task APCI16XX_TTL_INIT (using defaults) : |
-| Configure the TTL I/O operating mode from all ports |
-| You must calling this function be |
-| for you call any other function witch access of TTL. |
-| APCI16XX_TTL_INITDIRECTION(user inputs for direction) |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_InitType = (unsigned char) data[0]; |
-| b_Port0Mode = (unsigned char) data[1]; |
-| b_Port1Mode = (unsigned char) data[2]; |
-| b_Port2Mode = (unsigned char) data[3]; |
-| b_Port3Mode = (unsigned char) data[4]; |
-| ........ |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0: No error |
-| -1: Port 0 mode selection is wrong |
-| -2: Port 1 mode selection is wrong |
-| -3: Port 2 mode selection is wrong |
-| -4: Port 3 mode selection is wrong |
-| -X: Port X-1 mode selection is wrong |
-| .... |
-| -100 : Config command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI16XX_InsnConfigInitTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Command = 0;
- unsigned char b_Cpt = 0;
- unsigned char b_NumberOfPort =
- (unsigned char) (this_board->i_NbrTTLChannel / 8);
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /*******************/
- /* Get the command */
- /* **************** */
-
- b_Command = (unsigned char) data[0];
-
- /********************/
- /* Test the command */
- /********************/
-
- if ((b_Command == APCI16XX_TTL_INIT) ||
- (b_Command == APCI16XX_TTL_INITDIRECTION) ||
- (b_Command == APCI16XX_TTL_OUTPUTMEMORY)) {
- /***************************************/
- /* Test the initialisation buffer size */
- /***************************************/
-
- if ((b_Command == APCI16XX_TTL_INITDIRECTION)
- && ((unsigned char) (insn->n - 1) != b_NumberOfPort)) {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
-
- if ((b_Command == APCI16XX_TTL_OUTPUTMEMORY)
- && ((unsigned char) (insn->n) != 2)) {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
- } else {
- /************************/
- /* Config command error */
- /************************/
-
- printk("\nCommand selection error");
- i_ReturnValue = -100;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
-
- /**************************************************************************/
- /* Test if no error occur and APCI16XX_TTL_INITDIRECTION command selected */
- /**************************************************************************/
-
- if ((i_ReturnValue >= 0) && (b_Command == APCI16XX_TTL_INITDIRECTION)) {
- memset(devpriv->ul_TTLPortConfiguration, 0,
- sizeof(devpriv->ul_TTLPortConfiguration));
-
- /*************************************/
- /* Test the port direction selection */
- /*************************************/
-
- for (b_Cpt = 1;
- (b_Cpt <= b_NumberOfPort) && (i_ReturnValue >= 0);
- b_Cpt++) {
- /**********************/
- /* Test the direction */
- /**********************/
-
- if ((data[b_Cpt] != 0) && (data[b_Cpt] != 0xFF)) {
- /************************/
- /* Port direction error */
- /************************/
-
- printk("\nPort %d direction selection error",
- (int) b_Cpt);
- i_ReturnValue = -(int) b_Cpt;
- }
-
- /**************************/
- /* Save the configuration */
- /**************************/
-
- devpriv->ul_TTLPortConfiguration[(b_Cpt - 1) / 4] =
- devpriv->ul_TTLPortConfiguration[(b_Cpt -
- 1) / 4] | (data[b_Cpt] << (8 * ((b_Cpt -
- 1) % 4)));
- }
- }
-
- /**************************/
- /* Test if no error occur */
- /**************************/
-
- if (i_ReturnValue >= 0) {
- /***********************************/
- /* Test if TTL port initilaisation */
- /***********************************/
-
- if ((b_Command == APCI16XX_TTL_INIT)
- || (b_Command == APCI16XX_TTL_INITDIRECTION)) {
- /******************************/
- /* Set all port configuration */
- /******************************/
-
- for (b_Cpt = 0; b_Cpt <= b_NumberOfPort; b_Cpt++) {
- if ((b_Cpt % 4) == 0) {
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(devpriv->
- ul_TTLPortConfiguration[b_Cpt /
- 4],
- devpriv->iobase + 32 + b_Cpt);
- }
- }
- }
- }
-
- /************************************************/
- /* Test if output memory initialisation command */
- /************************************************/
-
- if (b_Command == APCI16XX_TTL_OUTPUTMEMORY) {
- if (data[1]) {
- devpriv->b_OutputMemoryStatus = ADDIDATA_ENABLE;
- } else {
- devpriv->b_OutputMemoryStatus = ADDIDATA_DISABLE;
- }
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI16XX_InsnBitsReadTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected TTL digital input |
-| (b_InputChannel) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from digital input port |
-| (b_SelectedPort) |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-| APCI16XX_TTL_READCHANNEL |
-| b_SelectedPort= CR_RANGE(insn->chanspec); |
-| b_InputChannel= CR_CHAN(insn->chanspec); |
-| b_ReadType = (unsigned char) data[0]; |
-| |
-| APCI16XX_TTL_READPORT |
-| b_SelectedPort= CR_RANGE(insn->chanspec); |
-| b_ReadType = (unsigned char) data[0]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] 0 : Channle is not active |
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : >0 : No error |
-| -100 : Config command error |
-| -101 : Data size error |
-| -102 : The selected TTL input port is wrong |
-| -103 : The selected TTL digital input is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI16XX_InsnBitsReadTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Command = 0;
- unsigned char b_NumberOfPort =
- (unsigned char) (this_board->i_NbrTTLChannel / 8);
- unsigned char b_SelectedPort = CR_RANGE(insn->chanspec);
- unsigned char b_InputChannel = CR_CHAN(insn->chanspec);
- unsigned char *pb_Status;
- unsigned int dw_Status;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /*******************/
- /* Get the command */
- /* **************** */
-
- b_Command = (unsigned char) data[0];
-
- /********************/
- /* Test the command */
- /********************/
-
- if ((b_Command == APCI16XX_TTL_READCHANNEL)
- || (b_Command == APCI16XX_TTL_READPORT)) {
- /**************************/
- /* Test the selected port */
- /**************************/
-
- if (b_SelectedPort < b_NumberOfPort) {
- /**********************/
- /* Test if input port */
- /**********************/
-
- if (((devpriv->ul_TTLPortConfiguration
- [b_SelectedPort /
- 4] >> (8 *
- (b_SelectedPort
- %
- 4))) &
- 0xFF) == 0) {
- /***************************/
- /* Test the channel number */
- /***************************/
-
- if ((b_Command ==
- APCI16XX_TTL_READCHANNEL)
- && (b_InputChannel > 7)) {
- /*******************************************/
- /* The selected TTL digital input is wrong */
- /*******************************************/
-
- printk("\nChannel selection error");
- i_ReturnValue = -103;
- }
- } else {
- /****************************************/
- /* The selected TTL input port is wrong */
- /****************************************/
-
- printk("\nPort selection error");
- i_ReturnValue = -102;
- }
- } else {
- /****************************************/
- /* The selected TTL input port is wrong */
- /****************************************/
-
- printk("\nPort selection error");
- i_ReturnValue = -102;
- }
- } else {
- /************************/
- /* Config command error */
- /************************/
-
- printk("\nCommand selection error");
- i_ReturnValue = -100;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
-
- /**************************/
- /* Test if no error occur */
- /**************************/
-
- if (i_ReturnValue >= 0) {
- pb_Status = (unsigned char *) &data[0];
-
- /*******************************/
- /* Get the digital inpu status */
- /*******************************/
-
- dw_Status =
- inl(devpriv->iobase + 8 + ((b_SelectedPort / 4) * 4));
- dw_Status = (dw_Status >> (8 * (b_SelectedPort % 4))) & 0xFF;
-
- /***********************/
- /* Save the port value */
- /***********************/
-
- *pb_Status = (unsigned char) dw_Status;
-
- /***************************************/
- /* Test if read channel status command */
- /***************************************/
-
- if (b_Command == APCI16XX_TTL_READCHANNEL) {
- *pb_Status = (*pb_Status >> b_InputChannel) & 1;
- }
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI16XX_InsnReadTTLIOAllPortValue |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from all digital input ports |
-+----------------------------------------------------------------------------+
-| Input Parameters : - |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : Port 0 to 3 data |
-| data[1] : Port 4 to 7 data |
-| .... |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -100 : Read command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI16XX_InsnReadTTLIOAllPortValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- unsigned char b_Command = (unsigned char) CR_AREF(insn->chanspec);
- int i_ReturnValue = insn->n;
- unsigned char b_Cpt = 0;
- unsigned char b_NumberOfPort = 0;
- unsigned int *pls_ReadData = data;
-
- /********************/
- /* Test the command */
- /********************/
-
- if ((b_Command == APCI16XX_TTL_READ_ALL_INPUTS)
- || (b_Command == APCI16XX_TTL_READ_ALL_OUTPUTS)) {
- /**********************************/
- /* Get the number of 32-Bit ports */
- /**********************************/
-
- b_NumberOfPort =
- (unsigned char) (this_board->i_NbrTTLChannel / 32);
- if ((b_NumberOfPort * 32) <
- this_board->i_NbrTTLChannel) {
- b_NumberOfPort = b_NumberOfPort + 1;
- }
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= b_NumberOfPort) {
- if (b_Command == APCI16XX_TTL_READ_ALL_INPUTS) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- for (b_Cpt = 0; b_Cpt < b_NumberOfPort; b_Cpt++) {
- /************************/
- /* Read the 32-Bit port */
- /************************/
-
- pls_ReadData[b_Cpt] =
- inl(devpriv->iobase + 8 +
- (b_Cpt * 4));
-
- /**************************************/
- /* Mask all channels used als outputs */
- /**************************************/
-
- pls_ReadData[b_Cpt] =
- pls_ReadData[b_Cpt] &
- (~devpriv->
- ul_TTLPortConfiguration[b_Cpt]);
- }
- } else {
- /****************************/
- /* Read all digital outputs */
- /****************************/
-
- for (b_Cpt = 0; b_Cpt < b_NumberOfPort; b_Cpt++) {
- /************************/
- /* Read the 32-Bit port */
- /************************/
-
- pls_ReadData[b_Cpt] =
- inl(devpriv->iobase + 20 +
- (b_Cpt * 4));
-
- /**************************************/
- /* Mask all channels used als outputs */
- /**************************************/
-
- pls_ReadData[b_Cpt] =
- pls_ReadData[b_Cpt] & devpriv->
- ul_TTLPortConfiguration[b_Cpt];
- }
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
- } else {
- /*****************/
- /* Command error */
- /*****************/
-
- printk("\nCommand selection error");
- i_ReturnValue = -100;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI16XX_InsnBitsWriteTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Set the state from selected TTL digital output |
-| (b_OutputChannel) |
-+----------------------------------------------------------------------------+
-| Task : Set the state from digital output port |
-| (b_SelectedPort) |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-| APCI16XX_TTL_WRITECHANNEL_ON | APCI16XX_TTL_WRITECHANNEL_OFF |
-| b_SelectedPort = CR_RANGE(insn->chanspec); |
-| b_OutputChannel= CR_CHAN(insn->chanspec); |
-| b_Command = (unsigned char) data[0]; |
-| |
-| APCI16XX_TTL_WRITEPORT_ON | APCI16XX_TTL_WRITEPORT_OFF |
-| b_SelectedPort = CR_RANGE(insn->chanspec); |
-| b_Command = (unsigned char) data[0]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : TTL output port 0 to 3 data |
-| data[1] : TTL output port 4 to 7 data |
-| .... |
-+----------------------------------------------------------------------------+
-| Return Value : >0 : No error |
-| -100 : Command error |
-| -101 : Data size error |
-| -102 : The selected TTL output port is wrong |
-| -103 : The selected TTL digital output is wrong |
-| -104 : Output memory disabled |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI16XX_InsnBitsWriteTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Command = 0;
- unsigned char b_NumberOfPort =
- (unsigned char) (this_board->i_NbrTTLChannel / 8);
- unsigned char b_SelectedPort = CR_RANGE(insn->chanspec);
- unsigned char b_OutputChannel = CR_CHAN(insn->chanspec);
- unsigned int dw_Status = 0;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /*******************/
- /* Get the command */
- /* **************** */
-
- b_Command = (unsigned char) data[0];
-
- /********************/
- /* Test the command */
- /********************/
-
- if ((b_Command == APCI16XX_TTL_WRITECHANNEL_ON) ||
- (b_Command == APCI16XX_TTL_WRITEPORT_ON) ||
- (b_Command == APCI16XX_TTL_WRITECHANNEL_OFF) ||
- (b_Command == APCI16XX_TTL_WRITEPORT_OFF)) {
- /**************************/
- /* Test the selected port */
- /**************************/
-
- if (b_SelectedPort < b_NumberOfPort) {
- /***********************/
- /* Test if output port */
- /***********************/
-
- if (((devpriv->ul_TTLPortConfiguration
- [b_SelectedPort /
- 4] >> (8 *
- (b_SelectedPort
- %
- 4))) &
- 0xFF) == 0xFF) {
- /***************************/
- /* Test the channel number */
- /***************************/
-
- if (((b_Command == APCI16XX_TTL_WRITECHANNEL_ON) || (b_Command == APCI16XX_TTL_WRITECHANNEL_OFF)) && (b_OutputChannel > 7)) {
- /********************************************/
- /* The selected TTL digital output is wrong */
- /********************************************/
-
- printk("\nChannel selection error");
- i_ReturnValue = -103;
- }
-
- if (((b_Command == APCI16XX_TTL_WRITECHANNEL_OFF) || (b_Command == APCI16XX_TTL_WRITEPORT_OFF)) && (devpriv->b_OutputMemoryStatus == ADDIDATA_DISABLE)) {
- /********************************************/
- /* The selected TTL digital output is wrong */
- /********************************************/
-
- printk("\nOutput memory disabled");
- i_ReturnValue = -104;
- }
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (((b_Command == APCI16XX_TTL_WRITEPORT_ON) || (b_Command == APCI16XX_TTL_WRITEPORT_OFF)) && (insn->n < 2)) {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
- } else {
- /*****************************************/
- /* The selected TTL output port is wrong */
- /*****************************************/
-
- printk("\nPort selection error %lX",
- (unsigned long)devpriv->
- ul_TTLPortConfiguration[0]);
- i_ReturnValue = -102;
- }
- } else {
- /****************************************/
- /* The selected TTL output port is wrong */
- /****************************************/
-
- printk("\nPort selection error %d %d",
- b_SelectedPort, b_NumberOfPort);
- i_ReturnValue = -102;
- }
- } else {
- /************************/
- /* Config command error */
- /************************/
-
- printk("\nCommand selection error");
- i_ReturnValue = -100;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("\nBuffer size error");
- i_ReturnValue = -101;
- }
-
- /**************************/
- /* Test if no error occur */
- /**************************/
-
- if (i_ReturnValue >= 0) {
- /********************************/
- /* Get the digital output state */
- /********************************/
-
- dw_Status =
- inl(devpriv->iobase + 20 + ((b_SelectedPort / 4) * 4));
-
- /**********************************/
- /* Test if output memory not used */
- /**********************************/
-
- if (devpriv->b_OutputMemoryStatus == ADDIDATA_DISABLE) {
- /*********************************/
- /* Clear the selected port value */
- /*********************************/
-
- dw_Status =
- dw_Status & (0xFFFFFFFFUL -
- (0xFFUL << (8 * (b_SelectedPort % 4))));
- }
-
- /******************************/
- /* Test if setting channel ON */
- /******************************/
-
- if (b_Command == APCI16XX_TTL_WRITECHANNEL_ON) {
- dw_Status =
- dw_Status | (1UL << ((8 * (b_SelectedPort %
- 4)) + b_OutputChannel));
- }
-
- /***************************/
- /* Test if setting port ON */
- /***************************/
-
- if (b_Command == APCI16XX_TTL_WRITEPORT_ON) {
- dw_Status =
- dw_Status | ((data[1] & 0xFF) << (8 *
- (b_SelectedPort % 4)));
- }
-
- /*******************************/
- /* Test if setting channel OFF */
- /*******************************/
-
- if (b_Command == APCI16XX_TTL_WRITECHANNEL_OFF) {
- dw_Status =
- dw_Status & (0xFFFFFFFFUL -
- (1UL << ((8 * (b_SelectedPort % 4)) +
- b_OutputChannel)));
- }
-
- /****************************/
- /* Test if setting port OFF */
- /****************************/
-
- if (b_Command == APCI16XX_TTL_WRITEPORT_OFF) {
- dw_Status =
- dw_Status & (0xFFFFFFFFUL -
- ((data[1] & 0xFF) << (8 * (b_SelectedPort %
- 4))));
- }
-
- outl(dw_Status,
- devpriv->iobase + 20 + ((b_SelectedPort / 4) * 4));
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI2200_Reset(struct comedi_device *dev) | +----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : - |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI16XX_Reset(struct comedi_device *dev)
-{
- return 0;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c
deleted file mode 100644
index 9d4a117aad43..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-2200 | Compiler : GCC |
- | Module name : hwdrv_apci2200.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-2200 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/********* Definitions for APCI-2200 card *****/
-
-/* Card Specific information */
-#define APCI2200_ADDRESS_RANGE 64
-
-/* DIGITAL INPUT-OUTPUT DEFINE */
-
-#define APCI2200_DIGITAL_OP 4
-#define APCI2200_DIGITAL_IP 0
-
-/* TIMER COUNTER WATCHDOG DEFINES */
-
-#define APCI2200_WATCHDOG 0x08
-#define APCI2200_WATCHDOG_ENABLEDISABLE 12
-#define APCI2200_WATCHDOG_RELOAD_VALUE 4
-#define APCI2200_WATCHDOG_STATUS 16
-
-static int apci2200_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[1] = inw(devpriv->iobase + APCI2200_DIGITAL_IP);
-
- return insn->n;
-}
-
-static int apci2200_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- s->state = inw(devpriv->iobase + APCI2200_DIGITAL_OP);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
- outw(s->state, devpriv->iobase + APCI2200_DIGITAL_OP);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI2200_ConfigWatchdog(struct comedi_device *dev,
-| struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-| |
-+----------------------------------------------------------------------------+
-| Task : Configures The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI2200_ConfigWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- if (data[0] == 0) {
- /* Disable the watchdog */
- outw(0x0,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_ENABLEDISABLE);
- /* Loading the Reload value */
- outw(data[1],
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_RELOAD_VALUE);
- data[1] = data[1] >> 16;
- outw(data[1],
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_RELOAD_VALUE + 2);
- } /* if(data[0]==0) */
- else {
- printk("\nThe input parameters are wrong\n");
- return -EINVAL;
- } /* elseif(data[0]==0) */
-
- return insn->n;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : int i_APCI2200_StartStopWriteWatchdog |
- | (struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data); |
- +----------------------------------------------------------------------------+
- | Task : Start / Stop The Watchdog |
- +----------------------------------------------------------------------------+
- | Input Parameters : struct comedi_device *dev : Driver handle |
- | struct comedi_subdevice *s, :pointer to subdevice structure
- struct comedi_insn *insn :pointer to insn structure |
- | unsigned int *data : Data Pointer to read status |
- +----------------------------------------------------------------------------+
- | Output Parameters : -- |
- +----------------------------------------------------------------------------+
- | Return Value : TRUE : No error occur |
- | : FALSE : Error occur. Return the error |
- | |
- +----------------------------------------------------------------------------+
- */
-
-static int i_APCI2200_StartStopWriteWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- switch (data[0]) {
- case 0: /* stop the watchdog */
- outw(0x0, devpriv->iobase + APCI2200_WATCHDOG + APCI2200_WATCHDOG_ENABLEDISABLE); /* disable the watchdog */
- break;
- case 1: /* start the watchdog */
- outw(0x0001,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_ENABLEDISABLE);
- break;
- case 2: /* Software trigger */
- outw(0x0201,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_ENABLEDISABLE);
- break;
- default:
- printk("\nSpecified functionality does not exist\n");
- return -EINVAL;
- } /* switch(data[0]) */
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI2200_ReadWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Read The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI2200_ReadWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[0] =
- inw(devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_STATUS) & 0x1;
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI2200_Reset(struct comedi_device *dev) | |
-+----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI2200_Reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- outw(0x0, devpriv->iobase + APCI2200_DIGITAL_OP); /* RESETS THE DIGITAL OUTPUTS */
- outw(0x0,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_ENABLEDISABLE);
- outw(0x0,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_RELOAD_VALUE);
- outw(0x0,
- devpriv->iobase + APCI2200_WATCHDOG +
- APCI2200_WATCHDOG_RELOAD_VALUE + 2);
- return 0;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index 829af187b249..c7908730caa5 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -633,7 +633,7 @@ static int apci3200_do_insn_bits(struct comedi_device *dev,
s->state = inl(devpriv->i_IobaseAddon) & 0xf;
if (mask) {
s->state &= ~mask;
- s->state |= (bits & mask)
+ s->state |= (bits & mask);
outl(s->state, devpriv->i_IobaseAddon);
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 7a18ce704ba4..ebc1534a8df8 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -1,274 +1,27 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*.
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-3501 | Compiler : GCC |
- | Module name : hwdrv_apci3501.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-3501 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/* Card Specific information */
-#define APCI3501_ADDRESS_RANGE 255
-
-#define APCI3501_DIGITAL_IP 0x50
-#define APCI3501_DIGITAL_OP 0x40
-#define APCI3501_ANALOG_OUTPUT 0x00
-
-/* Analog Output related Defines */
-#define APCI3501_AO_VOLT_MODE 0
-#define APCI3501_AO_PROG 4
-#define APCI3501_AO_TRIG_SCS 8
-#define UNIPOLAR 0
-#define BIPOLAR 1
-#define MODE0 0
-#define MODE1 1
-
/* Watchdog Related Defines */
-#define APCI3501_WATCHDOG 0x20
-#define APCI3501_TCW_SYNC_ENABLEDISABLE 0
-#define APCI3501_TCW_RELOAD_VALUE 4
-#define APCI3501_TCW_TIMEBASE 8
-#define APCI3501_TCW_PROG 12
-#define APCI3501_TCW_TRIG_STATUS 16
-#define APCI3501_TCW_IRQ 20
-#define APCI3501_TCW_WARN_TIMEVAL 24
-#define APCI3501_TCW_WARN_TIMEBASE 28
#define ADDIDATA_TIMER 0
#define ADDIDATA_WATCHDOG 2
-/* ANALOG OUTPUT RANGE */
-static struct comedi_lrange range_apci3501_ao = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-static int apci3501_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->iobase + APCI3501_DIGITAL_IP) & 0x3;
-
- return insn->n;
-}
-
-static int apci3501_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- s->state = inl(devpriv->iobase + APCI3501_DIGITAL_OP);
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
- outl(s->state, devpriv->iobase + APCI3501_DIGITAL_OP);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_ConfigAnalogOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Analog Output Subdevice |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s : Subdevice Pointer |
-| struct comedi_insn *insn : Insn Structure Pointer |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : Voltage Mode |
-| 0:Mode 0 |
-| 1:Mode 1 |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3501_ConfigAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- outl(data[0],
- devpriv->iobase + APCI3501_ANALOG_OUTPUT +
- APCI3501_AO_VOLT_MODE);
-
- if (data[0]) {
- devpriv->b_InterruptMode = MODE1;
- } else {
- devpriv->b_InterruptMode = MODE0;
- }
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_WriteAnalogOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Writes To the Selected Anlog Output Channel |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s : Subdevice Pointer |
-| struct comedi_insn *insn : Insn Structure Pointer |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3501_WriteAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0, ul_Channel_no, ul_Polarity, ul_DAC_Ready = 0;
-
- ul_Channel_no = CR_CHAN(insn->chanspec);
-
- if (devpriv->b_InterruptMode == MODE1) {
- ul_Polarity = 0x80000000;
- if ((*data < 0) || (*data > 16384)) {
- printk("\nIn WriteAnalogOutput :: Not Valid Data\n");
- }
-
- } /* end if(devpriv->b_InterruptMode==MODE1) */
- else {
- ul_Polarity = 0;
- if ((*data < 0) || (*data > 8192)) {
- printk("\nIn WriteAnalogOutput :: Not Valid Data\n");
- }
-
- } /* end else */
-
- if ((ul_Channel_no < 0) || (ul_Channel_no > 7)) {
- printk("\nIn WriteAnalogOutput :: Not Valid Channel\n");
- } /* end if((ul_Channel_no<0)||(ul_Channel_no>7)) */
-
- ul_DAC_Ready = inl(devpriv->iobase + APCI3501_ANALOG_OUTPUT);
-
- while (ul_DAC_Ready == 0) {
- ul_DAC_Ready = inl(devpriv->iobase + APCI3501_ANALOG_OUTPUT);
- ul_DAC_Ready = (ul_DAC_Ready >> 8) & 1;
- }
-
- if (ul_DAC_Ready) {
-/* Output the Value on the output channels. */
- ul_Command1 =
- (unsigned int) ((unsigned int) (ul_Channel_no & 0xFF) |
- (unsigned int) ((*data << 0x8) & 0x7FFFFF00L) |
- (unsigned int) (ul_Polarity));
- outl(ul_Command1,
- devpriv->iobase + APCI3501_ANALOG_OUTPUT +
- APCI3501_AO_PROG);
- }
-
- return insn->n;
-}
-
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_ConfigTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Configure As Timer |
-| 1 Configure As Counter |
-| 2 Configure As Watchdog |
-| data[1] : 1 Enable Interrupt |
-| 0 Disable Interrupt |
-| data[2] : Time Unit |
-| data[3] : Reload Value |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
+ * (*insn_config) for the timer subdevice
+ *
+ * Configures The Timer, Counter or Watchdog
+ * Data Pointer contains configuration parameters as below
+ * data[0] : 0 Configure As Timer
+ * 1 Configure As Counter
+ * 2 Configure As Watchdog
+ * data[1] : 1 Enable Interrupt
+ * 0 Disable Interrupt
+ * data[2] : Time Unit
+ * data[3] : Reload Value
+ */
static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
devpriv->tsk_Current = current;
@@ -276,224 +29,146 @@ static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
devpriv->b_TimerSelectMode = ADDIDATA_WATCHDOG;
/* Disable the watchdog */
- outl(0x0, devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG); /* disable Wa */
+ outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
if (data[1] == 1) {
/* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
} else {
- outl(0x0, devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG); /* disable Timer interrupt */
+ /* disable Timer interrupt */
+ outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
}
- /* Loading the Timebase value */
- outl(data[2],
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TIMEBASE);
+ outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
+ outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
- /* Loading the Reload value */
- outl(data[3],
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_RELOAD_VALUE);
- /* Set the mode */
- ul_Command1 = inl(devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG) | 0xFFF819E0UL; /* e2->e0 */
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
- } /* end if(data[0]==ADDIDATA_WATCHDOG) */
+ /* Set the mode (e2->e0) */
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG) | 0xFFF819E0UL;
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ }
else if (data[0] == ADDIDATA_TIMER) {
/* First Stop The Timer */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG); /* Stop The Timer */
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
if (data[1] == 1) {
/* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
} else {
- outl(0x0, devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG); /* disable Timer interrupt */
+ /* disable Timer interrupt */
+ outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
}
- /* Loading Timebase */
- outl(data[2],
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TIMEBASE);
-
- /* Loading the Reload value */
- outl(data[3],
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_RELOAD_VALUE);
+ outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
+ outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
- /* printk ("\nTimer Address :: %x\n", (devpriv->iobase+APCI3501_WATCHDOG)); */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ /* mode 2 */
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 =
(ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- outl(ul_Command1, devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG); /* mode 2 */
-
- } /* end if(data[0]==ADDIDATA_TIMER) */
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_StartStopWriteTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Timer |
-| 1 Counter |
-| 2 Watchdog | | data[1] : 1 Start |
-| 0 Stop | 2 Trigger |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-
+ * (*insn_write) for the timer subdevice
+ *
+ * Start / Stop The Selected Timer , Counter or Watchdog
+ * Data Pointer contains configuration parameters as below
+ * data[0] : 0 Timer
+ * 1 Counter
+ * 2 Watchdog
+ * data[1] : 1 Start
+ * 0 Stop
+ * 2 Trigger
+ */
static int i_APCI3501_StartStopWriteTimerCounterWatchdog(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
int i_Temp;
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
if (data[1] == 1) {
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
/* Enable the Watchdog */
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
}
else if (data[1] == 0) /* Stop The Watchdog */
{
/* Stop The Watchdog */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(0x0,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
} else if (data[1] == 2) {
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
- } /* if(data[1]==2) */
- } /* end if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ }
+ }
if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
if (data[1] == 1) {
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
/* Enable the Timer */
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
} else if (data[1] == 0) {
/* Stop The Timer */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
}
else if (data[1] == 2) {
/* Trigger the Timer */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_PROG);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
}
+ }
- } /* end if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
- i_Temp = inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TRIG_STATUS) & 0x1;
+ i_Temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_ReadTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Timer |
-| 1 Counter |
-| 2 Watchdog | | data[1] : Timer Counter Watchdog Number |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-
+ * (*insn_read) for the timer subdevice
+ *
+ * Read The Selected Timer, Counter or Watchdog
+ * Data Pointer contains configuration parameters as below
+ * data[0] : 0 Timer
+ * 1 Counter
+ * 2 Watchdog
+ * data[1] : Timer Counter Watchdog Number
+ */
static int i_APCI3501_ReadTimerCounterWatchdog(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci3501_private *devpriv = dev->private;
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
- data[0] =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TRIG_STATUS) & 0x1;
- data[1] = inl(devpriv->iobase + APCI3501_WATCHDOG);
- } /* end if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
+ data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
+ data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
+ }
else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
- data[0] =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TRIG_STATUS) & 0x1;
- data[1] = inl(devpriv->iobase + APCI3501_WATCHDOG);
- } /* end if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
+ data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
+ data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
+ }
else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
&& (devpriv->b_TimerSelectMode != ADDIDATA_WATCHDOG)) {
@@ -501,111 +176,3 @@ static int i_APCI3501_ReadTimerCounterWatchdog(struct comedi_device *dev,
}
return insn->n;
}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3501_Reset(struct comedi_device *dev) |
-| |
-+----------------------------------------------------------------------------+
-| Task :Resets the registers of the card |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI3501_Reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int i_Count = 0, i_temp = 0;
- unsigned int ul_Command1 = 0, ul_Polarity, ul_DAC_Ready = 0;
-
- outl(0x0, devpriv->iobase + APCI3501_DIGITAL_OP);
- outl(1, devpriv->iobase + APCI3501_ANALOG_OUTPUT +
- APCI3501_AO_VOLT_MODE);
-
- ul_Polarity = 0x80000000;
-
- for (i_Count = 0; i_Count <= 7; i_Count++) {
- ul_DAC_Ready = inl(devpriv->iobase + APCI3501_ANALOG_OUTPUT);
-
- while (ul_DAC_Ready == 0) {
- ul_DAC_Ready =
- inl(devpriv->iobase + APCI3501_ANALOG_OUTPUT);
- ul_DAC_Ready = (ul_DAC_Ready >> 8) & 1;
- }
-
- if (ul_DAC_Ready) {
- /* Output the Value on the output channels. */
- ul_Command1 =
- (unsigned int) ((unsigned int) (i_Count & 0xFF) |
- (unsigned int) ((i_temp << 0x8) & 0x7FFFFF00L) |
- (unsigned int) (ul_Polarity));
- outl(ul_Command1,
- devpriv->iobase + APCI3501_ANALOG_OUTPUT +
- APCI3501_AO_PROG);
- }
- }
-
- return 0;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI3501_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt processing Routine |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI3501_Interrupt(int irq, void *d)
-{
- int i_temp;
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Timer_AOWatchdog;
- unsigned long ul_Command1;
-
- /* Disable Interrupt */
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG);
-
- ul_Command1 = (ul_Command1 & 0xFFFFF9FDul);
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG);
-
- ui_Timer_AOWatchdog =
- inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_IRQ) & 0x1;
-
- if ((!ui_Timer_AOWatchdog)) {
- comedi_error(dev, "IRQ from unknown source");
- return;
- }
-
-/*
-* Enable Interrupt Send a signal to from kernel to user space
-*/
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- ul_Command1 =
- inl(devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG);
- ul_Command1 = ((ul_Command1 & 0xFFFFF9FDul) | 1 << 1);
- outl(ul_Command1,
- devpriv->iobase + APCI3501_WATCHDOG + APCI3501_TCW_PROG);
- i_temp = inl(devpriv->iobase + APCI3501_WATCHDOG +
- APCI3501_TCW_TRIG_STATUS) & 0x1;
- return;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index c981d4b1cc73..5a53e58258a0 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -53,11 +55,6 @@ static int apci035_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci035_driver);
}
-static void apci035_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci035_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) },
{ 0 }
@@ -68,7 +65,7 @@ static struct pci_driver apci035_pci_driver = {
.name = "addi_apci_035",
.id_table = apci035_pci_table,
.probe = apci035_pci_probe,
- .remove = apci035_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci035_driver, apci035_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 7f9424205a64..c0d0429c35c8 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -29,6 +29,9 @@
* source code.
*/
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -375,11 +378,6 @@ static int apci1032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1032_driver);
}
-static void apci1032_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci1032_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1003) },
{ 0 }
@@ -390,7 +388,7 @@ static struct pci_driver apci1032_pci_driver = {
.name = "addi_apci_1032",
.id_table = apci1032_pci_table,
.probe = apci1032_pci_probe,
- .remove = apci1032_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1032_driver, apci1032_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 8e686a9b811b..9c2f8eeb7977 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -53,11 +55,6 @@ static int apci1500_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1500_driver);
}
-static void apci1500_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci1500_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA_OLD, 0x80fc) },
{ 0 }
@@ -68,7 +65,7 @@ static struct pci_driver apci1500_pci_driver = {
.name = "addi_apci_1500",
.id_table = apci1500_pci_table,
.probe = apci1500_pci_probe,
- .remove = apci1500_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1500_driver, apci1500_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 8fef04b4d197..69e399638419 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -29,7 +29,10 @@
* this source code.
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
+#include "addi_watchdog.h"
#include "comedi_fc.h"
/*
@@ -49,13 +52,6 @@
* PCI bar 2 I/O Register map - Watchdog (APCI-1516 and APCI-2016)
*/
#define APCI1516_WDOG_REG 0x00
-#define APCI1516_WDOG_RELOAD_REG 0x04
-#define APCI1516_WDOG_CTRL_REG 0x0c
-#define APCI1516_WDOG_CTRL_ENABLE (1 << 0)
-#define APCI1516_WDOG_CTRL_SW_TRIG (1 << 9)
-#define APCI1516_WDOG_STATUS_REG 0x10
-#define APCI1516_WDOG_STATUS_ENABLED (1 << 0)
-#define APCI1516_WDOG_STATUS_SW_TRIG (1 << 1)
struct apci1516_boardinfo {
const char *name;
@@ -86,7 +82,6 @@ static const struct apci1516_boardinfo apci1516_boardtypes[] = {
struct apci1516_private {
unsigned long wdog_iobase;
- unsigned int ctrl;
};
static int apci1516_di_insn_bits(struct comedi_device *dev,
@@ -120,82 +115,6 @@ static int apci1516_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/*
- * The watchdog subdevice is configured with two INSN_CONFIG instructions:
- *
- * Enable the watchdog and set the reload timeout:
- * data[0] = INSN_CONFIG_ARM
- * data[1] = timeout reload value
- *
- * Disable the watchdog:
- * data[0] = INSN_CONFIG_DISARM
- */
-static int apci1516_wdog_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1516_private *devpriv = dev->private;
- unsigned int reload;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- devpriv->ctrl = APCI1516_WDOG_CTRL_ENABLE;
- reload = data[1] & s->maxdata;
- outw(reload, devpriv->wdog_iobase + APCI1516_WDOG_RELOAD_REG);
-
- /* Time base is 20ms, let the user know the timeout */
- dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
- 20 * reload + 20);
- break;
- case INSN_CONFIG_DISARM:
- devpriv->ctrl = 0;
- break;
- default:
- return -EINVAL;
- }
-
- outw(devpriv->ctrl, devpriv->wdog_iobase + APCI1516_WDOG_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1516_wdog_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1516_private *devpriv = dev->private;
- int i;
-
- if (devpriv->ctrl == 0) {
- dev_warn(dev->class_dev, "watchdog is disabled\n");
- return -EINVAL;
- }
-
- /* "ping" the watchdog */
- for (i = 0; i < insn->n; i++) {
- outw(devpriv->ctrl | APCI1516_WDOG_CTRL_SW_TRIG,
- devpriv->wdog_iobase + APCI1516_WDOG_CTRL_REG);
- }
-
- return insn->n;
-}
-
-static int apci1516_wdog_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1516_private *devpriv = dev->private;
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = inw(devpriv->wdog_iobase + APCI1516_WDOG_STATUS_REG);
-
- return insn->n;
-}
-
static int apci1516_reset(struct comedi_device *dev)
{
const struct apci1516_boardinfo *this_board = comedi_board(dev);
@@ -205,8 +124,8 @@ static int apci1516_reset(struct comedi_device *dev)
return 0;
outw(0x0, dev->iobase + APCI1516_DO_REG);
- outw(0x0, devpriv->wdog_iobase + APCI1516_WDOG_CTRL_REG);
- outw(0x0, devpriv->wdog_iobase + APCI1516_WDOG_RELOAD_REG);
+
+ addi_watchdog_reset(devpriv->wdog_iobase);
return 0;
}
@@ -285,13 +204,9 @@ static int apci1516_auto_attach(struct comedi_device *dev,
/* Initialize the watchdog subdevice */
s = &dev->subdevices[2];
if (this_board->has_wdog) {
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 1;
- s->maxdata = 0xff;
- s->insn_write = apci1516_wdog_insn_write;
- s->insn_read = apci1516_wdog_insn_read;
- s->insn_config = apci1516_wdog_insn_config;
+ ret = addi_watchdog_init(s, devpriv->wdog_iobase);
+ if (ret)
+ return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -304,10 +219,12 @@ static void apci1516_detach(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- if (dev->iobase) {
+ if (dev->iobase)
apci1516_reset(dev);
+ if (dev->subdevices)
+ addi_watchdog_cleanup(&dev->subdevices[2]);
+ if (dev->iobase)
comedi_pci_disable(pcidev);
- }
}
static struct comedi_driver apci1516_driver = {
@@ -323,11 +240,6 @@ static int apci1516_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1516_driver);
}
-static void apci1516_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci1516_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_APCI1016) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_APCI1516) },
@@ -340,7 +252,7 @@ static struct pci_driver apci1516_pci_driver = {
.name = "addi_apci_1516",
.id_table = apci1516_pci_table,
.probe = apci1516_pci_probe,
- .remove = apci1516_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1516_driver, apci1516_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 513e536f292f..ddea64df9180 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -50,11 +52,6 @@ static int apci1564_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1564_driver);
}
-static void apci1564_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci1564_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1006) },
{ 0 }
@@ -65,7 +62,7 @@ static struct pci_driver apci1564_pci_driver = {
.name = "addi_apci_1564",
.id_table = apci1564_pci_table,
.probe = apci1564_pci_probe,
- .remove = apci1564_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1564_driver, apci1564_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index ab9a96ac8180..e51f80001363 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -1,49 +1,227 @@
+/*
+ * addi_apci_16xx.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ * Project manager: S. Weber
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * You should also find the complete GPL in the COPYING file accompanying
+ * this source code.
+ */
+
+#include <linux/pci.h>
+
#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+/*
+ * PCI device ids supported by this driver
+ */
+#define PCI_DEVICE_ID_APCI1648 0x1009
+#define PCI_DEVICE_ID_APCI1696 0x100a
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci16xx.c"
-#include "addi-data/addi_common.c"
+/*
+ * Register I/O map
+ */
+#define APCI16XX_IN_REG(x) (((x) * 4) + 0x08)
+#define APCI16XX_OUT_REG(x) (((x) * 4) + 0x14)
+#define APCI16XX_DIR_REG(x) (((x) * 4) + 0x20)
-static const struct addi_board apci16xx_boardtypes[] = {
+struct apci16xx_boardinfo {
+ const char *name;
+ unsigned short vendor;
+ unsigned short device;
+ int n_chan;
+};
+
+static const struct apci16xx_boardinfo apci16xx_boardtypes[] = {
{
- .pc_DriverName = "apci1648",
- .i_VendorId = PCI_VENDOR_ID_ADDIDATA,
- .i_DeviceId = 0x1009,
- .i_IorangeBase0 = 128,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .i_NbrTTLChannel = 48,
- .reset = i_APCI16XX_Reset,
- .ttl_config = i_APCI16XX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI16XX_InsnBitsReadTTLIO,
- .ttl_read = i_APCI16XX_InsnReadTTLIOAllPortValue,
- .ttl_write = i_APCI16XX_InsnBitsWriteTTLIO,
+ .name = "apci1648",
+ .vendor = PCI_VENDOR_ID_ADDIDATA,
+ .device = PCI_DEVICE_ID_APCI1648,
+ .n_chan = 48, /* 2 subdevices */
}, {
- .pc_DriverName = "apci1696",
- .i_VendorId = PCI_VENDOR_ID_ADDIDATA,
- .i_DeviceId = 0x100A,
- .i_IorangeBase0 = 128,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .i_NbrTTLChannel = 96,
- .reset = i_APCI16XX_Reset,
- .ttl_config = i_APCI16XX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI16XX_InsnBitsReadTTLIO,
- .ttl_read = i_APCI16XX_InsnReadTTLIOAllPortValue,
- .ttl_write = i_APCI16XX_InsnBitsWriteTTLIO,
+ .name = "apci1696",
+ .vendor = PCI_VENDOR_ID_ADDIDATA,
+ .device = PCI_DEVICE_ID_APCI1696,
+ .n_chan = 96, /* 3 subdevices */
},
};
+static int apci16xx_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+ unsigned int bits;
+
+ /*
+ * Each 8-bit "port" is configurable as either input or
+ * output. Changing the configuration of any channel in
+ * a port changes the entire port.
+ */
+ if (chan_mask & 0x000000ff)
+ bits = 0x000000ff;
+ else if (chan_mask & 0x0000ff00)
+ bits = 0x0000ff00;
+ else if (chan_mask & 0x00ff0000)
+ bits = 0x00ff0000;
+ else
+ bits = 0xff000000;
+
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_INPUT:
+ s->io_bits &= ~bits;
+ break;
+ case INSN_CONFIG_DIO_OUTPUT:
+ s->io_bits |= bits;
+ break;
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = (s->io_bits & bits) ? COMEDI_INPUT : COMEDI_OUTPUT;
+ return insn->n;
+ default:
+ return -EINVAL;
+ }
+
+ outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(s->index));
+
+ return insn->n;
+}
+
+static int apci16xx_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ /* Only update the channels configured as outputs */
+ mask &= s->io_bits;
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ outl(s->state, dev->iobase + APCI16XX_OUT_REG(s->index));
+ }
+
+ data[1] = inl(dev->iobase + APCI16XX_IN_REG(s->index));
+
+ return insn->n;
+}
+
+static const void *apci16xx_find_boardinfo(struct comedi_device *dev,
+ struct pci_dev *pcidev)
+{
+ const struct apci16xx_boardinfo *board;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(apci16xx_boardtypes); i++) {
+ board = &apci16xx_boardtypes[i];
+ if (board->vendor == pcidev->vendor &&
+ board->device == pcidev->device)
+ return board;
+ }
+ return NULL;
+}
+
+static int apci16xx_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ const struct apci16xx_boardinfo *board;
+ struct comedi_subdevice *s;
+ unsigned int n_subdevs;
+ unsigned int last;
+ int i;
+ int ret;
+
+ board = apci16xx_find_boardinfo(dev, pcidev);
+ if (!board)
+ return -ENODEV;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+
+ ret = comedi_pci_enable(pcidev, dev->board_name);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 0);
+
+ /*
+ * Work out the nubmer of subdevices needed to support all the
+ * digital i/o channels on the board. Each subdevice supports
+ * up to 32 channels.
+ */
+ n_subdevs = board->n_chan / 32;
+ if ((n_subdevs * 32) < board->n_chan) {
+ last = board->n_chan - (n_subdevs * 32);
+ n_subdevs++;
+ } else {
+ last = 0;
+ }
+
+ ret = comedi_alloc_subdevices(dev, n_subdevs);
+ if (ret)
+ return ret;
+
+ /* Initialize the TTL digital i/o subdevices */
+ for (i = 0; i < n_subdevs; i++) {
+ s = &dev->subdevices[i];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_WRITEABLE | SDF_READABLE;
+ s->n_chan = ((i * 32) < board->n_chan) ? 32 : last;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = apci16xx_insn_config;
+ s->insn_bits = apci16xx_dio_insn_bits;
+
+ /* Default all channels to inputs */
+ s->io_bits = 0;
+ outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(i));
+ }
+
+ return 0;
+}
+
+static void apci16xx_detach(struct comedi_device *dev)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+
+ if (pcidev) {
+ if (dev->iobase)
+ comedi_pci_disable(pcidev);
+ }
+}
+
static struct comedi_driver apci16xx_driver = {
.driver_name = "addi_apci_16xx",
.module = THIS_MODULE,
- .auto_attach = addi_auto_attach,
- .detach = i_ADDI_Detach,
+ .auto_attach = apci16xx_auto_attach,
+ .detach = apci16xx_detach,
.num_names = ARRAY_SIZE(apci16xx_boardtypes),
- .board_name = &apci16xx_boardtypes[0].pc_DriverName,
- .offset = sizeof(struct addi_board),
+ .board_name = &apci16xx_boardtypes[0].name,
+ .offset = sizeof(struct apci16xx_boardinfo),
};
static int apci16xx_pci_probe(struct pci_dev *dev,
@@ -52,14 +230,9 @@ static int apci16xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci16xx_driver);
}
-static void apci16xx_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci16xx_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1009) },
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x100a) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_APCI1648) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, PCI_DEVICE_ID_APCI1696) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci16xx_pci_table);
@@ -68,10 +241,10 @@ static struct pci_driver apci16xx_pci_driver = {
.name = "addi_apci_16xx",
.id_table = apci16xx_pci_table,
.probe = apci16xx_pci_probe,
- .remove = apci16xx_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci16xx_driver, apci16xx_pci_driver);
+MODULE_DESCRIPTION("ADDI-DATA APCI-1648/1696, TTL I/O boards");
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1710.c b/drivers/staging/comedi/drivers/addi_apci_1710.c
index 152e7ef9b17b..e83e829831b0 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1710.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1710.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include <asm/i387.h>
#include "../comedidev.h"
@@ -128,11 +130,6 @@ static int apci1710_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1710_driver);
}
-static void apci1710_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci1710_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA_OLD, APCI1710_BOARD_DEVICE_ID) },
{ 0 }
@@ -143,7 +140,7 @@ static struct pci_driver apci1710_pci_driver = {
.name = "addi_apci_1710",
.id_table = apci1710_pci_table,
.probe = apci1710_pci_probe,
- .remove = apci1710_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci1710_driver, apci1710_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index 8f8d3e95fc78..9ce1d26aff2f 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -29,7 +29,11 @@
* this source code.
*/
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
#include "../comedidev.h"
+#include "addi_watchdog.h"
#include "comedi_fc.h"
/*
@@ -45,17 +49,12 @@
#define APCI2032_STATUS_REG 0x0c
#define APCI2032_STATUS_IRQ (1 << 0)
#define APCI2032_WDOG_REG 0x10
-#define APCI2032_WDOG_RELOAD_REG 0x14
-#define APCI2032_WDOG_TIMEBASE 0x18
-#define APCI2032_WDOG_CTRL_REG 0x1c
-#define APCI2032_WDOG_CTRL_ENABLE (1 << 0)
-#define APCI2032_WDOG_CTRL_SW_TRIG (1 << 9)
-#define APCI2032_WDOG_STATUS_REG 0x20
-#define APCI2032_WDOG_STATUS_ENABLED (1 << 0)
-#define APCI2032_WDOG_STATUS_SW_TRIG (1 << 1)
-
-struct apci2032_private {
- unsigned int wdog_ctrl;
+
+struct apci2032_int_private {
+ spinlock_t spinlock;
+ unsigned int stop_count;
+ bool active;
+ unsigned char enabled_isns;
};
static int apci2032_do_insn_bits(struct comedi_device *dev,
@@ -79,88 +78,47 @@ static int apci2032_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/*
- * The watchdog subdevice is configured with two INSN_CONFIG instructions:
- *
- * Enable the watchdog and set the reload timeout:
- * data[0] = INSN_CONFIG_ARM
- * data[1] = timeout reload value
- *
- * Disable the watchdog:
- * data[0] = INSN_CONFIG_DISARM
- */
-static int apci2032_wdog_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci2032_int_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct apci2032_private *devpriv = dev->private;
- unsigned int reload;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- devpriv->wdog_ctrl = APCI2032_WDOG_CTRL_ENABLE;
- reload = data[1] & s->maxdata;
- outw(reload, dev->iobase + APCI2032_WDOG_RELOAD_REG);
-
- /* Time base is 20ms, let the user know the timeout */
- dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
- 20 * reload + 20);
- break;
- case INSN_CONFIG_DISARM:
- devpriv->wdog_ctrl = 0;
- break;
- default:
- return -EINVAL;
- }
-
- outw(devpriv->wdog_ctrl, dev->iobase + APCI2032_WDOG_CTRL_REG);
-
+ data[1] = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3;
return insn->n;
}
-static int apci2032_wdog_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static void apci2032_int_stop(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct apci2032_private *devpriv = dev->private;
- int i;
-
- if (devpriv->wdog_ctrl == 0) {
- dev_warn(dev->class_dev, "watchdog is disabled\n");
- return -EINVAL;
- }
-
- /* "ping" the watchdog */
- for (i = 0; i < insn->n; i++) {
- outw(devpriv->wdog_ctrl | APCI2032_WDOG_CTRL_SW_TRIG,
- dev->iobase + APCI2032_WDOG_CTRL_REG);
- }
+ struct apci2032_int_private *subpriv = s->private;
- return insn->n;
+ subpriv->active = false;
+ subpriv->enabled_isns = 0;
+ outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
}
-static int apci2032_wdog_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static bool apci2032_int_start(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned char enabled_isns)
{
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = inl(dev->iobase + APCI2032_WDOG_STATUS_REG);
-
- return insn->n;
-}
+ struct apci2032_int_private *subpriv = s->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ bool do_event;
+
+ subpriv->enabled_isns = enabled_isns;
+ subpriv->stop_count = cmd->stop_arg;
+ if (cmd->stop_src == TRIG_COUNT && subpriv->stop_count == 0) {
+ /* An empty acquisition! */
+ s->async->events |= COMEDI_CB_EOA;
+ subpriv->active = false;
+ do_event = true;
+ } else {
+ subpriv->active = true;
+ outl(enabled_isns, dev->iobase + APCI2032_INT_CTRL_REG);
+ do_event = false;
+ }
-static int apci2032_int_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = s->state;
- return insn->n;
+ return do_event;
}
static int apci2032_int_cmdtest(struct comedi_device *dev,
@@ -172,15 +130,17 @@ static int apci2032_int_cmdtest(struct comedi_device *dev,
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
/* Step 2b : and mutually compatible */
if (err)
@@ -189,18 +149,11 @@ static int apci2032_int_cmdtest(struct comedi_device *dev,
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-
- /*
- * 0 == no trigger
- * 1 == trigger on VCC interrupt
- * 2 == trigger on CC interrupt
- * 3 == trigger on either VCC or CC interrupt
- */
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 3);
-
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, 1);
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+ if (cmd->stop_src == TRIG_NONE)
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
@@ -217,8 +170,22 @@ static int apci2032_int_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
+ struct apci2032_int_private *subpriv = s->private;
+ unsigned char enabled_isns;
+ unsigned int n;
+ unsigned long flags;
+ bool do_event;
+
+ enabled_isns = 0;
+ for (n = 0; n < cmd->chanlist_len; n++)
+ enabled_isns |= 1 << CR_CHAN(cmd->chanlist[n]);
+
+ spin_lock_irqsave(&subpriv->spinlock, flags);
+ do_event = apci2032_int_start(dev, s, enabled_isns);
+ spin_unlock_irqrestore(&subpriv->spinlock, flags);
- outl(cmd->scan_begin_arg, dev->iobase + APCI2032_INT_CTRL_REG);
+ if (do_event)
+ comedi_event(dev, s);
return 0;
}
@@ -226,7 +193,13 @@ static int apci2032_int_cmd(struct comedi_device *dev,
static int apci2032_int_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
+ struct apci2032_int_private *subpriv = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpriv->spinlock, flags);
+ if (subpriv->active)
+ apci2032_int_stop(dev, s);
+ spin_unlock_irqrestore(&subpriv->spinlock, flags);
return 0;
}
@@ -235,19 +208,64 @@ static irqreturn_t apci2032_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
+ struct apci2032_int_private *subpriv;
unsigned int val;
+ bool do_event = false;
+
+ if (!dev->attached)
+ return IRQ_NONE;
/* Check if VCC OR CC interrupt has occurred */
val = inl(dev->iobase + APCI2032_STATUS_REG) & APCI2032_STATUS_IRQ;
if (!val)
return IRQ_NONE;
- s->state = inl(dev->iobase + APCI2032_INT_STATUS_REG);
- outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
+ subpriv = s->private;
+ spin_lock(&subpriv->spinlock);
+
+ val = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3;
+ /* Disable triggered interrupt sources. */
+ outl(~val & 3, dev->iobase + APCI2032_INT_CTRL_REG);
+ /*
+ * Note: We don't reenable the triggered interrupt sources because they
+ * are level-sensitive, hardware error status interrupt sources and
+ * they'd keep triggering interrupts repeatedly.
+ */
- comedi_buf_put(s->async, s->state);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ if (subpriv->active && (val & subpriv->enabled_isns) != 0) {
+ unsigned short bits;
+ unsigned int n, len;
+ unsigned int *chanlist;
+
+ /* Bits in scan data correspond to indices in channel list. */
+ bits = 0;
+ len = s->async->cmd.chanlist_len;
+ chanlist = &s->async->cmd.chanlist[0];
+ for (n = 0; n < len; n++)
+ if ((val & (1U << CR_CHAN(chanlist[n]))) != 0)
+ bits |= 1U << n;
+
+ if (comedi_buf_put(s->async, bits)) {
+ s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
+ if (s->async->cmd.stop_src == TRIG_COUNT &&
+ subpriv->stop_count > 0) {
+ subpriv->stop_count--;
+ if (subpriv->stop_count == 0) {
+ /* end of acquisition */
+ s->async->events |= COMEDI_CB_EOA;
+ apci2032_int_stop(dev, s);
+ }
+ }
+ } else {
+ apci2032_int_stop(dev, s);
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ }
+ do_event = true;
+ }
+
+ spin_unlock(&subpriv->spinlock);
+ if (do_event)
+ comedi_event(dev, s);
return IRQ_HANDLED;
}
@@ -256,8 +274,8 @@ static int apci2032_reset(struct comedi_device *dev)
{
outl(0x0, dev->iobase + APCI2032_DO_REG);
outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
- outl(0x0, dev->iobase + APCI2032_WDOG_CTRL_REG);
- outl(0x0, dev->iobase + APCI2032_WDOG_RELOAD_REG);
+
+ addi_watchdog_reset(dev->iobase + APCI2032_WDOG_REG);
return 0;
}
@@ -266,21 +284,16 @@ static int apci2032_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct apci2032_private *devpriv;
struct comedi_subdevice *s;
int ret;
dev->board_name = dev->driver->driver_name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
ret = comedi_pci_enable(pcidev, dev->board_name);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 1);
+ apci2032_reset(dev);
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci2032_interrupt,
@@ -304,32 +317,34 @@ static int apci2032_auto_attach(struct comedi_device *dev,
/* Initialize the watchdog subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 1;
- s->maxdata = 0xff;
- s->insn_write = apci2032_wdog_insn_write;
- s->insn_read = apci2032_wdog_insn_read;
- s->insn_config = apci2032_wdog_insn_config;
+ ret = addi_watchdog_init(s, dev->iobase + APCI2032_WDOG_REG);
+ if (ret)
+ return ret;
/* Initialize the interrupt subdevice */
s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 2;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci2032_int_insn_bits;
if (dev->irq) {
+ struct apci2032_int_private *subpriv;
+
dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI | SDF_CMD_READ;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci2032_int_insn_bits;
+ subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
+ if (!subpriv)
+ return -ENOMEM;
+ spin_lock_init(&subpriv->spinlock);
+ s->private = subpriv;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->len_chanlist = 2;
s->do_cmdtest = apci2032_int_cmdtest;
s->do_cmd = apci2032_int_cmd;
s->cancel = apci2032_int_cancel;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
}
- apci2032_reset(dev);
return 0;
}
@@ -341,6 +356,10 @@ static void apci2032_detach(struct comedi_device *dev)
apci2032_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
+ if (dev->read_subdev)
+ kfree(dev->read_subdev->private);
+ if (dev->subdevices)
+ addi_watchdog_cleanup(&dev->subdevices[1]);
if (pcidev) {
if (dev->iobase)
comedi_pci_disable(pcidev);
@@ -360,11 +379,6 @@ static int apci2032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2032_driver);
}
-static void apci2032_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci2032_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) },
{ 0 }
@@ -375,7 +389,7 @@ static struct pci_driver apci2032_pci_driver = {
.name = "addi_apci_2032",
.id_table = apci2032_pci_table,
.probe = apci2032_pci_probe,
- .remove = apci2032_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci2032_driver, apci2032_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 7c2c5db01218..b1c4226902e1 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -1,42 +1,152 @@
+/*
+ * addi_apci_2200.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ * Project manager: Eric Stolz
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * You should also find the complete GPL in the COPYING file accompanying
+ * this source code.
+ */
+
+#include <linux/pci.h>
+
#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci2200.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci2200_boardtypes[] = {
- {
- .pc_DriverName = "apci2200",
- .i_VendorId = PCI_VENDOR_ID_ADDIDATA,
- .i_DeviceId = 0x1005,
- .i_IorangeBase0 = 4,
- .i_IorangeBase1 = APCI2200_ADDRESS_RANGE,
- .i_PCIEeprom = ADDIDATA_EEPROM,
- .pc_EepromChip = ADDIDATA_93C76,
- .i_NbrDiChannel = 8,
- .i_NbrDoChannel = 16,
- .i_Timer = 1,
- .reset = i_APCI2200_Reset,
- .di_bits = apci2200_di_insn_bits,
- .do_bits = apci2200_do_insn_bits,
- .timer_config = i_APCI2200_ConfigWatchdog,
- .timer_write = i_APCI2200_StartStopWriteWatchdog,
- .timer_read = i_APCI2200_ReadWatchdog,
- },
-};
+#include "addi_watchdog.h"
+
+/*
+ * I/O Register Map
+ */
+#define APCI2200_DI_REG 0x00
+#define APCI2200_DO_REG 0x04
+#define APCI2200_WDOG_REG 0x08
+
+static int apci2200_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inw(dev->iobase + APCI2200_DI_REG);
+
+ return insn->n;
+}
+
+static int apci2200_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ s->state = inw(dev->iobase + APCI2200_DO_REG);
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ outw(s->state, dev->iobase + APCI2200_DO_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int apci2200_reset(struct comedi_device *dev)
+{
+ outw(0x0, dev->iobase + APCI2200_DO_REG);
+
+ addi_watchdog_reset(dev->iobase + APCI2200_WDOG_REG);
+
+ return 0;
+}
+
+static int apci2200_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ int ret;
+
+ dev->board_name = dev->driver->driver_name;
+
+ ret = comedi_pci_enable(pcidev, dev->board_name);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 1);
+
+ ret = comedi_alloc_subdevices(dev, 3);
+ if (ret)
+ return ret;
+
+ /* Initialize the digital input subdevice */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci2200_di_insn_bits;
+
+ /* Initialize the digital output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci2200_do_insn_bits;
+
+ /* Initialize the watchdog subdevice */
+ s = &dev->subdevices[2];
+ ret = addi_watchdog_init(s, dev->iobase + APCI2200_WDOG_REG);
+ if (ret)
+ return ret;
+
+ apci2200_reset(dev);
+ return 0;
+}
+
+static void apci2200_detach(struct comedi_device *dev)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+
+ if (dev->iobase)
+ apci2200_reset(dev);
+ if (dev->subdevices)
+ addi_watchdog_cleanup(&dev->subdevices[2]);
+ if (pcidev) {
+ if (dev->iobase)
+ comedi_pci_disable(pcidev);
+ }
+}
static struct comedi_driver apci2200_driver = {
.driver_name = "addi_apci_2200",
.module = THIS_MODULE,
- .auto_attach = addi_auto_attach,
- .detach = i_ADDI_Detach,
- .num_names = ARRAY_SIZE(apci2200_boardtypes),
- .board_name = &apci2200_boardtypes[0].pc_DriverName,
- .offset = sizeof(struct addi_board),
+ .auto_attach = apci2200_auto_attach,
+ .detach = apci2200_detach,
};
static int apci2200_pci_probe(struct pci_dev *dev,
@@ -45,11 +155,6 @@ static int apci2200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2200_driver);
}
-static void apci2200_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci2200_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1005) },
{ 0 }
@@ -60,10 +165,10 @@ static struct pci_driver apci2200_pci_driver = {
.name = "addi_apci_2200",
.id_table = apci2200_pci_table,
.probe = apci2200_pci_probe,
- .remove = apci2200_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci2200_driver, apci2200_pci_driver);
+MODULE_DESCRIPTION("ADDI-DATA APCI-2200 Relay board, optically isolated");
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index fec2962affc7..917234d24e99 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -251,11 +253,6 @@ static int apci3120_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3120_driver);
}
-static void apci3120_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci3120_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA_OLD, 0x818d) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA_OLD, 0x828d) },
@@ -267,7 +264,7 @@ static struct pci_driver apci3120_pci_driver = {
.name = "addi_apci_3120",
.id_table = apci3120_pci_table,
.probe = apci3120_pci_probe,
- .remove = apci3120_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3120_driver, apci3120_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 9085b774b48d..90ee4f844f91 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include <asm/i387.h>
#include "../comedidev.h"
@@ -106,15 +108,10 @@ static int apci3200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3200_driver);
}
-static void apci3200_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver apci3200_pci_driver = {
.name = "addi_apci_3200",
.id_table = apci3200_pci_table,
.probe = apci3200_pci_probe,
- .remove = apci3200_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3200_driver, apci3200_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index ed297deb8634..786fcaf82c32 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -1,54 +1,445 @@
+/*
+ * addi_apci_3501.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ * Project manager: Eric Stolz
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * You should also find the complete GPL in the COPYING file accompanying
+ * this source code.
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+/*
+ * PCI bar 1 register I/O map
+ */
+#define APCI3501_AO_CTRL_STATUS_REG 0x00
+#define APCI3501_AO_CTRL_BIPOLAR (1 << 0)
+#define APCI3501_AO_STATUS_READY (1 << 8)
+#define APCI3501_AO_DATA_REG 0x04
+#define APCI3501_AO_DATA_CHAN(x) ((x) << 0)
+#define APCI3501_AO_DATA_VAL(x) ((x) << 8)
+#define APCI3501_AO_DATA_BIPOLAR (1 << 31)
+#define APCI3501_AO_TRIG_SCS_REG 0x08
+#define APCI3501_TIMER_SYNC_REG 0x20
+#define APCI3501_TIMER_RELOAD_REG 0x24
+#define APCI3501_TIMER_TIMEBASE_REG 0x28
+#define APCI3501_TIMER_CTRL_REG 0x2c
+#define APCI3501_TIMER_STATUS_REG 0x30
+#define APCI3501_TIMER_IRQ_REG 0x34
+#define APCI3501_TIMER_WARN_RELOAD_REG 0x38
+#define APCI3501_TIMER_WARN_TIMEBASE_REG 0x3c
+#define APCI3501_DO_REG 0x40
+#define APCI3501_DI_REG 0x50
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci3501.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci3501_boardtypes[] = {
- {
- .pc_DriverName = "apci3501",
- .i_VendorId = PCI_VENDOR_ID_ADDIDATA,
- .i_DeviceId = 0x3001,
- .i_IorangeBase0 = 64,
- .i_IorangeBase1 = APCI3501_ADDRESS_RANGE,
- .i_PCIEeprom = ADDIDATA_EEPROM,
- .pc_EepromChip = ADDIDATA_S5933,
- .i_AoMaxdata = 16383,
- .pr_AoRangelist = &range_apci3501_ao,
- .i_NbrDiChannel = 2,
- .i_NbrDoChannel = 2,
- .i_DoMaxdata = 0x3,
- .i_Timer = 1,
- .interrupt = v_APCI3501_Interrupt,
- .reset = i_APCI3501_Reset,
- .ao_config = i_APCI3501_ConfigAnalogOutput,
- .ao_write = i_APCI3501_WriteAnalogOutput,
- .di_bits = apci3501_di_insn_bits,
- .do_bits = apci3501_do_insn_bits,
- .timer_config = i_APCI3501_ConfigTimerCounterWatchdog,
- .timer_write = i_APCI3501_StartStopWriteTimerCounterWatchdog,
- .timer_read = i_APCI3501_ReadTimerCounterWatchdog,
- },
+/*
+ * AMCC S5933 NVRAM
+ */
+#define NVRAM_USER_DATA_START 0x100
+
+#define NVCMD_BEGIN_READ (0x7 << 5)
+#define NVCMD_LOAD_LOW (0x4 << 5)
+#define NVCMD_LOAD_HIGH (0x5 << 5)
+
+/*
+ * Function types stored in the eeprom
+ */
+#define EEPROM_DIGITALINPUT 0
+#define EEPROM_DIGITALOUTPUT 1
+#define EEPROM_ANALOGINPUT 2
+#define EEPROM_ANALOGOUTPUT 3
+#define EEPROM_TIMER 4
+#define EEPROM_WATCHDOG 5
+#define EEPROM_TIMER_WATCHDOG_COUNTER 10
+
+struct apci3501_private {
+ int i_IobaseAmcc;
+ struct task_struct *tsk_Current;
+ unsigned char b_TimerSelectMode;
};
-static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
- { 0 }
+static struct comedi_lrange apci3501_ao_range = {
+ 2, {
+ BIP_RANGE(10),
+ UNI_RANGE(10)
+ }
};
-MODULE_DEVICE_TABLE(pci, apci3501_pci_table);
+
+static int apci3501_wait_for_dac(struct comedi_device *dev)
+{
+ unsigned int status;
+
+ do {
+ status = inl(dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
+ } while (!(status & APCI3501_AO_STATUS_READY));
+
+ return 0;
+}
+
+static int apci3501_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val = 0;
+ int i;
+ int ret;
+
+ /*
+ * All analog output channels have the same output range.
+ * 14-bit bipolar: 0-10V
+ * 13-bit unipolar: +/-10V
+ * Changing the range of one channel changes all of them!
+ */
+ if (range) {
+ outl(0, dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
+ } else {
+ val |= APCI3501_AO_DATA_BIPOLAR;
+ outl(APCI3501_AO_CTRL_BIPOLAR,
+ dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
+ }
+
+ val |= APCI3501_AO_DATA_CHAN(chan);
+
+ for (i = 0; i < insn->n; i++) {
+ if (range == 1) {
+ if (data[i] > 0x1fff) {
+ dev_err(dev->class_dev,
+ "Unipolar resolution is only 13-bits\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = apci3501_wait_for_dac(dev);
+ if (ret)
+ return ret;
+
+ outl(val | APCI3501_AO_DATA_VAL(data[i]),
+ dev->iobase + APCI3501_AO_DATA_REG);
+ }
+
+ return insn->n;
+}
+
+#include "addi-data/hwdrv_apci3501.c"
+
+static int apci3501_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inl(dev->iobase + APCI3501_DI_REG) & 0x3;
+
+ return insn->n;
+}
+
+static int apci3501_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ s->state = inl(dev->iobase + APCI3501_DO_REG);
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ outl(s->state, dev->iobase + APCI3501_DO_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static void apci3501_eeprom_wait(unsigned long iobase)
+{
+ unsigned char val;
+
+ do {
+ val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
+ } while (val & 0x80);
+}
+
+static unsigned short apci3501_eeprom_readw(unsigned long iobase,
+ unsigned short addr)
+{
+ unsigned short val = 0;
+ unsigned char tmp;
+ unsigned char i;
+
+ /* Add the offset to the start of the user data */
+ addr += NVRAM_USER_DATA_START;
+
+ for (i = 0; i < 2; i++) {
+ /* Load the low 8 bit address */
+ outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
+ apci3501_eeprom_wait(iobase);
+ outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
+ apci3501_eeprom_wait(iobase);
+
+ /* Load the high 8 bit address */
+ outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
+ apci3501_eeprom_wait(iobase);
+ outb(((addr + i) >> 8) & 0xff,
+ iobase + AMCC_OP_REG_MCSR_NVDATA);
+ apci3501_eeprom_wait(iobase);
+
+ /* Read the eeprom data byte */
+ outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
+ apci3501_eeprom_wait(iobase);
+ tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
+ apci3501_eeprom_wait(iobase);
+
+ if (i == 0)
+ val |= tmp;
+ else
+ val |= (tmp << 8);
+ }
+
+ return val;
+}
+
+static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
+{
+ struct apci3501_private *devpriv = dev->private;
+ unsigned long iobase = devpriv->i_IobaseAmcc;
+ unsigned char nfuncs;
+ int i;
+
+ nfuncs = apci3501_eeprom_readw(iobase, 10) & 0xff;
+
+ /* Read functionality details */
+ for (i = 0; i < nfuncs; i++) {
+ unsigned short offset = i * 4;
+ unsigned short addr;
+ unsigned char func;
+ unsigned short val;
+
+ func = apci3501_eeprom_readw(iobase, 12 + offset) & 0x3f;
+ addr = apci3501_eeprom_readw(iobase, 14 + offset);
+
+ if (func == EEPROM_ANALOGOUTPUT) {
+ val = apci3501_eeprom_readw(iobase, addr + 10);
+ return (val >> 4) & 0x3ff;
+ }
+ }
+ return 0;
+}
+
+static int apci3501_eeprom_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3501_private *devpriv = dev->private;
+ unsigned short addr = CR_CHAN(insn->chanspec);
+
+ data[0] = apci3501_eeprom_readw(devpriv->i_IobaseAmcc, 2 * addr);
+
+ return insn->n;
+}
+
+static irqreturn_t apci3501_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct apci3501_private *devpriv = dev->private;
+ unsigned int ui_Timer_AOWatchdog;
+ unsigned long ul_Command1;
+ int i_temp;
+
+ /* Disable Interrupt */
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
+ ul_Command1 = (ul_Command1 & 0xFFFFF9FDul);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+
+ ui_Timer_AOWatchdog = inl(dev->iobase + APCI3501_TIMER_IRQ_REG) & 0x1;
+ if ((!ui_Timer_AOWatchdog)) {
+ comedi_error(dev, "IRQ from unknown source");
+ return IRQ_NONE;
+ }
+
+ /* Enable Interrupt Send a signal to from kernel to user space */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
+ ul_Command1 = ((ul_Command1 & 0xFFFFF9FDul) | 1 << 1);
+ outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ i_temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
+
+ return IRQ_HANDLED;
+}
+
+static int apci3501_reset(struct comedi_device *dev)
+{
+ unsigned int val;
+ int chan;
+ int ret;
+
+ /* Reset all digital outputs to "0" */
+ outl(0x0, dev->iobase + APCI3501_DO_REG);
+
+ /* Default all analog outputs to 0V (bipolar) */
+ outl(APCI3501_AO_CTRL_BIPOLAR,
+ dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
+ val = APCI3501_AO_DATA_BIPOLAR | APCI3501_AO_DATA_VAL(0);
+
+ /* Set all analog output channels */
+ for (chan = 0; chan < 8; chan++) {
+ ret = apci3501_wait_for_dac(dev);
+ if (ret) {
+ dev_warn(dev->class_dev,
+ "%s: DAC not-ready for channel %i\n",
+ __func__, chan);
+ } else {
+ outl(val | APCI3501_AO_DATA_CHAN(chan),
+ dev->iobase + APCI3501_AO_DATA_REG);
+ }
+ }
+
+ return 0;
+}
+
+static int apci3501_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct apci3501_private *devpriv;
+ struct comedi_subdevice *s;
+ int ao_n_chan;
+ int ret;
+
+ dev->board_name = dev->driver->driver_name;
+
+ devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ if (!devpriv)
+ return -ENOMEM;
+ dev->private = devpriv;
+
+ ret = comedi_pci_enable(pcidev, dev->board_name);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 1);
+ devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+
+ ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
+
+ if (pcidev->irq > 0) {
+ ret = request_irq(pcidev->irq, apci3501_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 5);
+ if (ret)
+ return ret;
+
+ /* Initialize the analog output subdevice */
+ s = &dev->subdevices[0];
+ if (ao_n_chan) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = ao_n_chan;
+ s->maxdata = 0x3fff;
+ s->range_table = &apci3501_ao_range;
+ s->insn_write = apci3501_ao_insn_write;
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
+
+ /* Initialize the digital input subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 2;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3501_di_insn_bits;
+
+ /* Initialize the digital output subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3501_do_insn_bits;
+
+ /* Initialize the timer/watchdog subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 1;
+ s->maxdata = 0;
+ s->len_chanlist = 1;
+ s->range_table = &range_digital;
+ s->insn_write = i_APCI3501_StartStopWriteTimerCounterWatchdog;
+ s->insn_read = i_APCI3501_ReadTimerCounterWatchdog;
+ s->insn_config = i_APCI3501_ConfigTimerCounterWatchdog;
+
+ /* Initialize the eeprom subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_MEMORY;
+ s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
+ s->n_chan = 256;
+ s->maxdata = 0xffff;
+ s->insn_read = apci3501_eeprom_insn_read;
+
+ apci3501_reset(dev);
+ return 0;
+}
+
+static void apci3501_detach(struct comedi_device *dev)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+
+ if (dev->iobase)
+ apci3501_reset(dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ if (pcidev) {
+ if (dev->iobase)
+ comedi_pci_disable(pcidev);
+ }
+}
static struct comedi_driver apci3501_driver = {
.driver_name = "addi_apci_3501",
.module = THIS_MODULE,
- .auto_attach = addi_auto_attach,
- .detach = i_ADDI_Detach,
- .num_names = ARRAY_SIZE(apci3501_boardtypes),
- .board_name = &apci3501_boardtypes[0].pc_DriverName,
- .offset = sizeof(struct addi_board),
+ .auto_attach = apci3501_auto_attach,
+ .detach = apci3501_detach,
};
static int apci3501_pci_probe(struct pci_dev *dev,
@@ -57,19 +448,20 @@ static int apci3501_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3501_driver);
}
-static void apci3501_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
+static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, apci3501_pci_table);
static struct pci_driver apci3501_pci_driver = {
.name = "addi_apci_3501",
.id_table = apci3501_pci_table,
.probe = apci3501_pci_probe,
- .remove = apci3501_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3501_driver, apci3501_pci_driver);
+MODULE_DESCRIPTION("ADDI-DATA APCI-3501 Analog output board");
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 1562347ed64b..09d4b21fce23 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -1,3 +1,5 @@
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
@@ -751,11 +753,6 @@ static int apci3xxx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3xxx_driver);
}
-static void apci3xxx_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(apci3xxx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3010) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300f) },
@@ -790,7 +787,7 @@ static struct pci_driver apci3xxx_pci_driver = {
.name = "addi_apci_3xxx",
.id_table = apci3xxx_pci_table,
.probe = apci3xxx_pci_probe,
- .remove = apci3xxx_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3xxx_driver, apci3xxx_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
new file mode 100644
index 000000000000..375ab665e091
--- /dev/null
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -0,0 +1,172 @@
+/*
+ * COMEDI driver for the watchdog subdevice found on some addi-data boards
+ * Copyright (c) 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on implementations in various addi-data COMEDI drivers.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "../comedidev.h"
+#include "addi_watchdog.h"
+
+/*
+ * Register offsets/defines for the addi-data watchdog
+ */
+#define ADDI_WDOG_REG 0x00
+#define ADDI_WDOG_RELOAD_REG 0x04
+#define ADDI_WDOG_TIMEBASE 0x08
+#define ADDI_WDOG_CTRL_REG 0x0c
+#define ADDI_WDOG_CTRL_ENABLE (1 << 0)
+#define ADDI_WDOG_CTRL_SW_TRIG (1 << 9)
+#define ADDI_WDOG_STATUS_REG 0x10
+#define ADDI_WDOG_STATUS_ENABLED (1 << 0)
+#define ADDI_WDOG_STATUS_SW_TRIG (1 << 1)
+
+struct addi_watchdog_private {
+ unsigned long iobase;
+ unsigned int wdog_ctrl;
+};
+
+/*
+ * The watchdog subdevice is configured with two INSN_CONFIG instructions:
+ *
+ * Enable the watchdog and set the reload timeout:
+ * data[0] = INSN_CONFIG_ARM
+ * data[1] = timeout reload value
+ *
+ * Disable the watchdog:
+ * data[0] = INSN_CONFIG_DISARM
+ */
+static int addi_watchdog_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct addi_watchdog_private *spriv = s->private;
+ unsigned int reload;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ spriv->wdog_ctrl = ADDI_WDOG_CTRL_ENABLE;
+ reload = data[1] & s->maxdata;
+ outl(reload, spriv->iobase + ADDI_WDOG_RELOAD_REG);
+
+ /* Time base is 20ms, let the user know the timeout */
+ dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
+ 20 * reload + 20);
+ break;
+ case INSN_CONFIG_DISARM:
+ spriv->wdog_ctrl = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ outl(spriv->wdog_ctrl, spriv->iobase + ADDI_WDOG_CTRL_REG);
+
+ return insn->n;
+}
+
+static int addi_watchdog_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct addi_watchdog_private *spriv = s->private;
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = inl(spriv->iobase + ADDI_WDOG_STATUS_REG);
+
+ return insn->n;
+}
+
+static int addi_watchdog_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct addi_watchdog_private *spriv = s->private;
+ int i;
+
+ if (spriv->wdog_ctrl == 0) {
+ dev_warn(dev->class_dev, "watchdog is disabled\n");
+ return -EINVAL;
+ }
+
+ /* "ping" the watchdog */
+ for (i = 0; i < insn->n; i++) {
+ outl(spriv->wdog_ctrl | ADDI_WDOG_CTRL_SW_TRIG,
+ spriv->iobase + ADDI_WDOG_CTRL_REG);
+ }
+
+ return insn->n;
+}
+
+void addi_watchdog_reset(unsigned long iobase)
+{
+ outl(0x0, iobase + ADDI_WDOG_CTRL_REG);
+ outl(0x0, iobase + ADDI_WDOG_RELOAD_REG);
+}
+EXPORT_SYMBOL_GPL(addi_watchdog_reset);
+
+int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
+{
+ struct addi_watchdog_private *spriv;
+
+ spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ if (!spriv)
+ return -ENOMEM;
+
+ spriv->iobase = iobase;
+
+ s->private = spriv;
+
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 1;
+ s->maxdata = 0xff;
+ s->insn_config = addi_watchdog_insn_config;
+ s->insn_read = addi_watchdog_insn_read;
+ s->insn_write = addi_watchdog_insn_write;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(addi_watchdog_init);
+
+void addi_watchdog_cleanup(struct comedi_subdevice *s)
+{
+ kfree(s->private);
+}
+EXPORT_SYMBOL_GPL(addi_watchdog_cleanup);
+
+static int __init addi_watchdog_module_init(void)
+{
+ return 0;
+}
+module_init(addi_watchdog_module_init);
+
+static void __exit addi_watchdog_module_exit(void)
+{
+}
+module_exit(addi_watchdog_module_exit);
+
+MODULE_DESCRIPTION("ADDI-DATA Watchdog subdevice");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.h b/drivers/staging/comedi/drivers/addi_watchdog.h
new file mode 100644
index 000000000000..f374a7bff44d
--- /dev/null
+++ b/drivers/staging/comedi/drivers/addi_watchdog.h
@@ -0,0 +1,10 @@
+#ifndef _ADDI_WATCHDOG_H
+#define _ADDI_WATCHDOG_H
+
+#include "../comedidev.h"
+
+void addi_watchdog_reset(unsigned long iobase);
+int addi_watchdog_init(struct comedi_subdevice *, unsigned long iobase);
+void addi_watchdog_cleanup(struct comedi_subdevice *s);
+
+#endif
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 9a56eed3910f..7b3e331616ed 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -42,6 +42,8 @@ References:
- adl_pci9118.c
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
/*
@@ -270,11 +272,6 @@ static int adl_pci6208_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adl_pci6208_driver);
}
-static void adl_pci6208_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adl_pci6208_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI6208) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI6216) },
@@ -286,7 +283,7 @@ static struct pci_driver adl_pci6208_pci_driver = {
.name = "adl_pci6208",
.id_table = adl_pci6208_pci_table,
.probe = adl_pci6208_pci_probe,
- .remove = adl_pci6208_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci6208_driver, adl_pci6208_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 772edc02f5ce..f27f48e6e702 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -38,12 +38,6 @@ Author: H Hartley Sweeten <hsweeten@visionengravers.com>
Updated: Thu, 02 Aug 2012 14:27:46 -0700
Status: untested
-This driver only attaches using the PCI PnP auto config support
-in the comedi core. The module parameter 'comedi_autoconfig'
-must be 1 (default) to enable this feature. The COMEDI_DEVCONFIG
-ioctl, used by the comedi_config utility, is not supported by
-this driver.
-
The PCI-7230, PCI-7432 and PCI-7433 boards also support external
interrupt signals on digital input channels 0 and 1. The PCI-7233
has dual-interrupt sources for change-of-state (COS) on any 16
@@ -51,9 +45,11 @@ digital input channels of LSB and for COS on any 16 digital input
lines of MSB. Interrupts are not currently supported by this
driver.
-Configuration Options: not applicable
+Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
/*
@@ -302,11 +298,6 @@ static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adl_pci7x3x_driver);
}
-static void adl_pci7x3x_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adl_pci7x3x_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7230) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7233) },
@@ -322,7 +313,7 @@ static struct pci_driver adl_pci7x3x_pci_driver = {
.name = "adl_pci7x3x",
.id_table = adl_pci7x3x_pci_table,
.probe = adl_pci7x3x_pci_probe,
- .remove = adl_pci7x3x_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci7x3x_driver, adl_pci7x3x_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 4dd9d707a79d..d06b83f38653 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -30,9 +30,11 @@ Updated: Mon, 14 Apr 2008 15:10:32 +0100
Configuration Options: not applicable, uses PCI auto config
*/
-#include "../comedidev.h"
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/delay.h>
+
+#include "../comedidev.h"
#include "comedi_fc.h"
#include "8253.h"
@@ -62,35 +64,35 @@ static void adl_pci8164_insn_read(struct comedi_device *dev,
char *action, unsigned short offset)
{
int axis, axis_reg;
- char *axisname;
+ char axisname;
axis = CR_CHAN(insn->chanspec);
switch (axis) {
case 0:
axis_reg = PCI8164_AXIS_X;
- axisname = "X";
+ axisname = 'X';
break;
case 1:
axis_reg = PCI8164_AXIS_Y;
- axisname = "Y";
+ axisname = 'Y';
break;
case 2:
axis_reg = PCI8164_AXIS_Z;
- axisname = "Z";
+ axisname = 'Z';
break;
case 3:
axis_reg = PCI8164_AXIS_U;
- axisname = "U";
+ axisname = 'U';
break;
default:
axis_reg = PCI8164_AXIS_X;
- axisname = "X";
+ axisname = 'X';
}
data[0] = inw(dev->iobase + axis_reg + offset);
dev_dbg(dev->class_dev,
- "pci8164 %s read -> %04X:%04X on axis %s\n",
+ "pci8164 %s read -> %04X:%04X on axis %c\n",
action, data[0], data[1], axisname);
}
@@ -142,36 +144,36 @@ static void adl_pci8164_insn_out(struct comedi_device *dev,
{
unsigned int axis, axis_reg;
- char *axisname;
+ char axisname;
axis = CR_CHAN(insn->chanspec);
switch (axis) {
case 0:
axis_reg = PCI8164_AXIS_X;
- axisname = "X";
+ axisname = 'X';
break;
case 1:
axis_reg = PCI8164_AXIS_Y;
- axisname = "Y";
+ axisname = 'Y';
break;
case 2:
axis_reg = PCI8164_AXIS_Z;
- axisname = "Z";
+ axisname = 'Z';
break;
case 3:
axis_reg = PCI8164_AXIS_U;
- axisname = "U";
+ axisname = 'U';
break;
default:
axis_reg = PCI8164_AXIS_X;
- axisname = "X";
+ axisname = 'X';
}
outw(data[0], dev->iobase + axis_reg + offset);
dev_dbg(dev->class_dev,
- "pci8164 %s write -> %04X:%04X on axis %s\n",
+ "pci8164 %s write -> %04X:%04X on axis %c\n",
action, data[0], data[1], axisname);
}
@@ -298,11 +300,6 @@ static int adl_pci8164_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adl_pci8164_driver);
}
-static void adl_pci8164_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) },
{0}
@@ -313,7 +310,7 @@ static struct pci_driver adl_pci8164_pci_driver = {
.name = "adl_pci8164",
.id_table = adl_pci8164_pci_table,
.probe = adl_pci8164_pci_probe,
- .remove = adl_pci8164_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci8164_driver, adl_pci8164_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index a339b9dd27cf..eeb10ec7f178 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -68,11 +68,12 @@ TODO:
*/
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include "../comedidev.h"
+
#include "8253.h"
#include "comedi_fc.h"
@@ -963,11 +964,6 @@ static int pci9111_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adl_pci9111_driver);
}
-static void pci9111_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
/* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
@@ -979,7 +975,7 @@ static struct pci_driver adl_pci9111_pci_driver = {
.name = "adl_pci9111",
.id_table = pci9111_pci_table,
.probe = pci9111_pci_probe,
- .remove = pci9111_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci9111_driver, adl_pci9111_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index b6dda809bd13..4dbac7459a48 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -76,13 +76,15 @@ Configuration options:
* attachment if necessary, and possibly to set other options supported by
* manual attachment.
*/
-#include "../comedidev.h"
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include "../comedidev.h"
+
#include "amcc_s5933.h"
#include "8253.h"
#include "comedi_fc.h"
@@ -808,7 +810,7 @@ static void pci9118_calc_divisors(char mode, struct comedi_device *dev,
*tim2 = *div1 * devpriv->i8254_osc_base;
/* real convert timer */
- if (usessh & (chnsshfront == 0)) /* use BSSH signal */
+ if (usessh && (chnsshfront == 0)) /* use BSSH signal */
if (*div2 < (chans + 2))
*div2 = chans + 2;
@@ -2225,11 +2227,6 @@ static int adl_pci9118_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adl_pci9118_driver);
}
-static void adl_pci9118_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adl_pci9118_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80d9) },
{ 0 }
@@ -2240,7 +2237,7 @@ static struct pci_driver adl_pci9118_pci_driver = {
.name = "adl_pci9118",
.id_table = adl_pci9118_pci_table,
.probe = adl_pci9118_pci_probe,
- .remove = adl_pci9118_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci9118_driver, adl_pci9118_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index a6fd8c2c16ca..3d788c76d648 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -41,6 +41,7 @@ Configuration options:
device will be used.
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -1402,11 +1403,6 @@ static int adv_pci1710_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adv_pci1710_driver);
}
-static void adv_pci1710_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adv_pci1710_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1710) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1711) },
@@ -1421,7 +1417,7 @@ static struct pci_driver adv_pci1710_pci_driver = {
.name = "adv_pci1710",
.id_table = adv_pci1710_pci_table,
.probe = adv_pci1710_pci_probe,
- .remove = adv_pci1710_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adv_pci1710_driver, adv_pci1710_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 5af73146dd85..02ce55a01d2a 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -48,6 +48,8 @@ TODO:
3. Implement calibration.
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
/* all the registers for the pci1723 board */
@@ -327,11 +329,6 @@ static int adv_pci1723_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adv_pci1723_driver);
}
-static void adv_pci1723_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adv_pci1723_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) },
{ 0 }
@@ -342,7 +339,7 @@ static struct pci_driver adv_pci1723_pci_driver = {
.name = "adv_pci1723",
.id_table = adv_pci1723_pci_table,
.probe = adv_pci1723_pci_probe,
- .remove = adv_pci1723_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adv_pci1723_driver, adv_pci1723_pci_driver);
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 05a663e970c6..338c43e716ba 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -29,10 +29,11 @@ Configuration options:
*/
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
+#include "../comedidev.h"
+
#include "8255.h"
#include "8253.h"
@@ -1206,11 +1207,6 @@ static int adv_pci_dio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adv_pci_dio_driver);
}
-static void adv_pci_dio_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(adv_pci_dio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1730) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1733) },
@@ -1234,7 +1230,7 @@ static struct pci_driver adv_pci_dio_pci_driver = {
.name = "adv_pci_dio",
.id_table = adv_pci_dio_pci_table,
.probe = adv_pci_dio_pci_probe,
- .remove = adv_pci_dio_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adv_pci_dio_driver, adv_pci_dio_pci_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 5f309ba88a1a..7c53dea12c76 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -258,6 +258,7 @@
* order they appear in the channel list.
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -1104,10 +1105,9 @@ dio200_subdev_intr_init(struct comedi_device *dev, struct comedi_subdevice *s,
struct dio200_subdev_intr *subpriv;
subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
- if (!subpriv) {
- dev_err(dev->class_dev, "error! out of memory!\n");
+ if (!subpriv)
return -ENOMEM;
- }
+
subpriv->ofs = offset;
subpriv->valid_isns = valid_isns;
spin_lock_init(&subpriv->spinlock);
@@ -1443,10 +1443,8 @@ dio200_subdev_8254_init(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int chan;
subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
- if (!subpriv) {
- dev_err(dev->class_dev, "error! out of memory!\n");
+ if (!subpriv)
return -ENOMEM;
- }
s->private = subpriv;
s->type = COMEDI_SUBD_COUNTER;
@@ -1977,8 +1975,7 @@ static int dio200_auto_attach(struct comedi_device *dev,
devpriv->io.u.iobase = (unsigned long)base;
devpriv->io.regtype = io_regtype;
}
- switch (thisboard->model)
- {
+ switch (thisboard->model) {
case pcie215_model:
case pcie236_model:
case pcie296_model:
@@ -2079,16 +2076,11 @@ static int amplc_dio200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &amplc_dio200_driver);
}
-static void amplc_dio200_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver amplc_dio200_pci_driver = {
.name = DIO200_DRIVER_NAME,
.id_table = dio200_pci_table,
.probe = &amplc_dio200_pci_probe,
- .remove = &amplc_dio200_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(amplc_dio200_driver, amplc_dio200_pci_driver);
#else
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 289835419577..479e10fddd22 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -52,6 +52,7 @@ the IRQ jumper. If no interrupt is connected, then subdevice 1 is
unused.
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -614,16 +615,11 @@ static int amplc_pc236_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &amplc_pc236_driver);
}
-static void amplc_pc236_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver amplc_pc236_pci_driver = {
.name = PC236_DRIVER_NAME,
.id_table = pc236_pci_table,
.probe = &amplc_pc236_pci_probe,
- .remove = &amplc_pc236_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(amplc_pc236_driver, amplc_pc236_pci_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index dfbff77cd795..11c1f4764eac 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -44,6 +44,8 @@ connected to a reed-relay. Relay contacts are closed when output is 1.
The state of the outputs can be read.
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#define PC263_DRIVER_NAME "amplc_pc263"
@@ -372,16 +374,11 @@ static int amplc_pc263_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &amplc_pc263_driver);
}
-static void amplc_pc263_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver amplc_pc263_pci_driver = {
.name = PC263_DRIVER_NAME,
.id_table = pc263_pci_table,
.probe = &amplc_pc263_pci_probe,
- .remove = &amplc_pc263_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(amplc_pc263_driver, amplc_pc263_pci_driver);
#else
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 6e2566a2dd57..c9da4cd74baa 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -103,6 +103,7 @@ Caveats:
correctly.
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -1512,11 +1513,6 @@ static int amplc_pci224_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &amplc_pci224_driver);
}
-static void amplc_pci224_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(amplc_pci224_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
@@ -1528,7 +1524,7 @@ static struct pci_driver amplc_pci224_pci_driver = {
.name = "amplc_pci224",
.id_table = amplc_pci224_pci_table,
.probe = amplc_pci224_pci_probe,
- .remove = amplc_pci224_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(amplc_pci224_driver, amplc_pci224_pci_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 366c68be56bd..e2244c6e536b 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -188,11 +188,12 @@ Support for PCI230+/260+, more triggered scan functionality, and workarounds
for (or detection of) various hardware problems added by Ian Abbott.
*/
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include "../comedidev.h"
+
#include "comedi_fc.h"
#include "8253.h"
#include "8255.h"
@@ -2863,11 +2864,6 @@ static int amplc_pci230_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &amplc_pci230_driver);
}
-static void amplc_pci230_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(amplc_pci230_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
@@ -2879,7 +2875,7 @@ static struct pci_driver amplc_pci230_pci_driver = {
.name = "amplc_pci230",
.id_table = amplc_pci230_pci_table,
.probe = amplc_pci230_pci_probe,
- .remove = amplc_pci230_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(amplc_pci230_driver, amplc_pci230_pci_driver);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 93731de1f2b1..f874fff44523 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -40,9 +40,10 @@ Status: experimental
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include "../comedidev.h"
#include <linux/delay.h>
+#include "../comedidev.h"
+
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -89,8 +90,6 @@ struct das16cs_private {
unsigned short status2;
};
-static struct pcmcia_device *cur_dev;
-
static const struct comedi_lrange das16cs_ai_range = {
4, {
BIP_RANGE(10),
@@ -383,46 +382,45 @@ static int das16cs_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
-static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
- struct pcmcia_device *link)
+static const void *das16cs_find_boardinfo(struct comedi_device *dev,
+ struct pcmcia_device *link)
{
+ const struct das16cs_board *board;
int i;
for (i = 0; i < ARRAY_SIZE(das16cs_boards); i++) {
- if (das16cs_boards[i].device_id == link->card_id)
- return das16cs_boards + i;
+ board = &das16cs_boards[i];
+ if (board->device_id == link->card_id)
+ return board;
}
- dev_dbg(dev->class_dev, "unknown board!\n");
-
return NULL;
}
-static int das16cs_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+static int das16cs_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- const struct das16cs_board *thisboard;
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
+ const struct das16cs_board *board;
struct das16cs_private *devpriv;
- struct pcmcia_device *link;
struct comedi_subdevice *s;
int ret;
- link = cur_dev; /* XXX hack */
- if (!link)
- return -EIO;
-
- dev->board_ptr = das16cs_probe(dev, link);
- if (!dev->board_ptr)
- return -EIO;
- thisboard = comedi_board(dev);
-
- dev->board_name = thisboard->name;
+ board = das16cs_find_boardinfo(dev, link);
+ if (!board)
+ return -ENODEV;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+ link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
dev->iobase = link->resource[0]->start;
- ret = request_irq(link->irq, das16cs_interrupt,
- IRQF_SHARED, "cb_das16_cs", dev);
- if (ret < 0)
+ link->priv = dev;
+ ret = pcmcia_request_irq(link, das16cs_interrupt);
+ if (ret)
return ret;
dev->irq = link->irq;
@@ -450,10 +448,10 @@ static int das16cs_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
/* analog output subdevice */
- if (thisboard->n_ao_chans) {
+ if (board->n_ao_chans) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
- s->n_chan = thisboard->n_ao_chans;
+ s->n_chan = board->n_ao_chans;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = &das16cs_ao_winsn;
@@ -479,58 +477,16 @@ static int das16cs_attach(struct comedi_device *dev,
return 0;
}
-static void das16cs_detach(struct comedi_device *dev)
-{
- if (dev->irq)
- free_irq(dev->irq, dev);
-}
-
static struct comedi_driver driver_das16cs = {
.driver_name = "cb_das16_cs",
.module = THIS_MODULE,
- .attach = das16cs_attach,
- .detach = das16cs_detach,
+ .auto_attach = das16cs_auto_attach,
+ .detach = comedi_pcmcia_disable,
};
-static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
static int das16cs_pcmcia_attach(struct pcmcia_device *link)
{
- int ret;
-
- /* Do we need to allocate an interrupt? */
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, das16cs_pcmcia_config_loop, NULL);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- cur_dev = link;
- return 0;
-
-failed:
- pcmcia_disable_device(link);
- return ret;
-}
-
-static void das16cs_pcmcia_detach(struct pcmcia_device *link)
-{
- pcmcia_disable_device(link);
- cur_dev = NULL;
+ return comedi_pcmcia_auto_config(link, &driver_das16cs);
}
static const struct pcmcia_device_id das16cs_id_table[] = {
@@ -543,35 +499,11 @@ MODULE_DEVICE_TABLE(pcmcia, das16cs_id_table);
static struct pcmcia_driver das16cs_driver = {
.name = "cb_das16_cs",
.owner = THIS_MODULE,
- .probe = das16cs_pcmcia_attach,
- .remove = das16cs_pcmcia_detach,
.id_table = das16cs_id_table,
+ .probe = das16cs_pcmcia_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
-
-static int __init das16cs_init(void)
-{
- int ret;
-
- ret = comedi_driver_register(&driver_das16cs);
- if (ret < 0)
- return ret;
-
- ret = pcmcia_register_driver(&das16cs_driver);
- if (ret < 0) {
- comedi_driver_unregister(&driver_das16cs);
- return ret;
- }
-
- return 0;
-}
-module_init(das16cs_init);
-
-static void __exit das16cs_exit(void)
-{
- pcmcia_unregister_driver(&das16cs_driver);
- comedi_driver_unregister(&driver_das16cs);
-}
-module_exit(das16cs_exit);
+module_comedi_pcmcia_driver(driver_das16cs, das16cs_driver);
MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi driver for Computer Boards PC-CARD DAS16/16");
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index aed68639cc9a..79c72118a090 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -67,10 +67,12 @@ TODO:
analog triggering on 1602 series
*/
-#include "../comedidev.h"
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include "../comedidev.h"
+
#include "8253.h"
#include "8255.h"
#include "amcc_s5933.h"
@@ -1632,11 +1634,6 @@ static int cb_pcidas_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &cb_pcidas_driver);
}
-static void cb_pcidas_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0001) },
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x000f) },
@@ -1654,7 +1651,7 @@ static struct pci_driver cb_pcidas_pci_driver = {
.name = "cb_pcidas",
.id_table = cb_pcidas_pci_table,
.probe = cb_pcidas_pci_probe,
- .remove = cb_pcidas_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidas_driver, cb_pcidas_pci_driver);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index d72b46cc06bc..9f3112cb7a21 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -87,10 +87,12 @@ TODO:
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "../comedidev.h"
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include "../comedidev.h"
+
#include "8253.h"
#include "8255.h"
#include "plx9080.h"
@@ -3299,7 +3301,6 @@ static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
num_bytes = load_ao_dma_buffer(dev, cmd);
if (num_bytes == 0)
return -1;
- if (num_bytes >= DMA_BUFFER_SIZE) ;
load_ao_dma(dev, cmd);
dma_start_sync(dev, 0);
@@ -4220,11 +4221,6 @@ static int cb_pcidas64_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &cb_pcidas64_driver);
}
-static void cb_pcidas64_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(cb_pcidas64_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001d) },
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001e) },
@@ -4253,7 +4249,7 @@ static struct pci_driver cb_pcidas64_pci_driver = {
.name = "cb_pcidas64",
.id_table = cb_pcidas64_pci_table,
.probe = cb_pcidas64_pci_probe,
- .remove = cb_pcidas64_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidas64_driver, cb_pcidas64_pci_driver);
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 7c6029a8c3e1..e2cadc728455 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -41,6 +41,8 @@
* Only simple analog output writing is supported.
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
@@ -438,11 +440,6 @@ static int cb_pcidda_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &cb_pcidda_driver);
}
-static void cb_pcidda_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(cb_pcidda_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_DDA02_12) },
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_DDA04_12) },
@@ -458,7 +455,7 @@ static struct pci_driver cb_pcidda_pci_driver = {
.name = "cb_pcidda",
.id_table = cb_pcidda_pci_table,
.probe = cb_pcidda_pci_probe,
- .remove = cb_pcidda_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidda_driver, cb_pcidda_pci_driver);
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index b43a5f80ac26..aae063ca85a0 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -40,11 +40,12 @@ No interrupts, multi channel or FIFO AI, although the card looks like it could s
See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
*/
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include "../comedidev.h"
+
#include "plx9052.h"
#include "8255.h"
@@ -299,11 +300,6 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &cb_pcimdas_driver);
}
-static void cb_pcimdas_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(cb_pcimdas_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) },
{ 0 }
@@ -314,7 +310,7 @@ static struct pci_driver cb_pcimdas_pci_driver = {
.name = "cb_pcimdas",
.id_table = cb_pcimdas_pci_table,
.probe = cb_pcimdas_pci_probe,
- .remove = cb_pcimdas_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcimdas_driver, cb_pcimdas_pci_driver);
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 699b84f54cc7..63cfbaf3a3fe 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -79,6 +79,8 @@ Configuration Options: not applicable, uses PCI auto config
-Calin Culianu <calin@ajvar.org>
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#include "8255.h"
@@ -222,11 +224,6 @@ static int cb_pcimdda_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &cb_pcimdda_driver);
}
-static void cb_pcimdda_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(cb_pcimdda_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_ID_PCIM_DDA06_16) },
{ 0 }
@@ -237,7 +234,7 @@ static struct pci_driver cb_pcimdda_driver_pci_driver = {
.name = "cb_pcimdda",
.id_table = cb_pcimdda_pci_table,
.probe = cb_pcimdda_pci_probe,
- .remove = cb_pcimdda_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcimdda_driver, cb_pcimdda_driver_pci_driver);
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 31515999bb97..1bb53816eca3 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -245,10 +245,9 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
return 0;
}
bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
- if (!bdev) {
- dev_err(dev->class_dev, "Out of memory\n");
+ if (!bdev)
return 0;
- }
+
bdev->dev = d;
bdev->minor = minor;
bdev->subdev = sdev;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index fb3d09323ba1..270fea5c6b51 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -62,15 +62,14 @@ zero volts).
/* Data unique to this driver */
struct waveform_private {
struct timer_list timer;
- struct timeval last; /* time at which last timer interrupt occurred */
+ struct timeval last; /* time last timer interrupt occurred */
unsigned int uvolt_amplitude; /* waveform amplitude in microvolts */
unsigned long usec_period; /* waveform period in microseconds */
- unsigned long usec_current; /* current time (modulo waveform period) */
- unsigned long usec_remainder; /* usec since last scan; */
- unsigned long ai_count; /* number of conversions remaining */
+ unsigned long usec_current; /* current time (mod waveform period) */
+ unsigned long usec_remainder; /* usec since last scan */
+ unsigned long ai_count; /* number of conversions remaining */
unsigned int scan_period; /* scan period in usec */
unsigned int convert_period; /* conversion period in usec */
- unsigned timer_running:1;
unsigned int ao_loopbacks[N_CHANS];
};
@@ -86,8 +85,9 @@ static const struct comedi_lrange waveform_ai_ranges = {
}
};
-static short fake_sawtooth(struct comedi_device *dev, unsigned int range_index,
- unsigned long current_time)
+static unsigned short fake_sawtooth(struct comedi_device *dev,
+ unsigned int range_index,
+ unsigned long current_time)
{
struct waveform_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -110,9 +110,9 @@ static short fake_sawtooth(struct comedi_device *dev, unsigned int range_index,
return offset + value;
}
-static short fake_squarewave(struct comedi_device *dev,
- unsigned int range_index,
- unsigned long current_time)
+static unsigned short fake_squarewave(struct comedi_device *dev,
+ unsigned int range_index,
+ unsigned long current_time)
{
struct waveform_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -132,15 +132,17 @@ static short fake_squarewave(struct comedi_device *dev,
return offset + value;
}
-static short fake_flatline(struct comedi_device *dev, unsigned int range_index,
- unsigned long current_time)
+static unsigned short fake_flatline(struct comedi_device *dev,
+ unsigned int range_index,
+ unsigned long current_time)
{
return dev->read_subdev->maxdata / 2;
}
/* generates a different waveform depending on what channel is read */
-static short fake_waveform(struct comedi_device *dev, unsigned int channel,
- unsigned int range, unsigned long current_time)
+static unsigned short fake_waveform(struct comedi_device *dev,
+ unsigned int channel, unsigned int range,
+ unsigned long current_time)
{
enum {
SAWTOOTH_CHAN,
@@ -176,6 +178,7 @@ static void waveform_ai_interrupt(unsigned long arg)
unsigned long elapsed_time;
unsigned int num_scans;
struct timeval now;
+ bool stopping = false;
do_gettimeofday(&now);
@@ -189,37 +192,35 @@ static void waveform_ai_interrupt(unsigned long arg)
(devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
async->events = 0;
+ if (cmd->stop_src == TRIG_COUNT) {
+ unsigned int remaining = cmd->stop_arg - devpriv->ai_count;
+ if (num_scans >= remaining) {
+ /* about to finish */
+ num_scans = remaining;
+ stopping = true;
+ }
+ }
+
for (i = 0; i < num_scans; i++) {
for (j = 0; j < cmd->chanlist_len; j++) {
- cfc_write_to_buffer(dev->read_subdev,
- fake_waveform(dev,
- CR_CHAN(cmd->
- chanlist[j]),
- CR_RANGE(cmd->
- chanlist[j]),
- devpriv->
- usec_current +
- i *
- devpriv->scan_period +
- j *
- devpriv->
- convert_period));
- }
- devpriv->ai_count++;
- if (cmd->stop_src == TRIG_COUNT
- && devpriv->ai_count >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- break;
+ unsigned short sample;
+ sample = fake_waveform(dev, CR_CHAN(cmd->chanlist[j]),
+ CR_RANGE(cmd->chanlist[j]),
+ devpriv->usec_current +
+ i * devpriv->scan_period +
+ j * devpriv->convert_period);
+ cfc_write_to_buffer(dev->read_subdev, sample);
}
}
+ devpriv->ai_count += i;
devpriv->usec_current += elapsed_time;
devpriv->usec_current %= devpriv->usec_period;
- if ((async->events & COMEDI_CB_EOA) == 0 && devpriv->timer_running)
- mod_timer(&devpriv->timer, jiffies + 1);
+ if (stopping)
+ async->events |= COMEDI_CB_EOA;
else
- del_timer(&devpriv->timer);
+ mod_timer(&devpriv->timer, jiffies + 1);
comedi_event(dev, dev->read_subdev);
}
@@ -317,7 +318,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
return -1;
}
- devpriv->timer_running = 1;
devpriv->ai_count = 0;
devpriv->scan_period = cmd->scan_begin_arg / nano_per_micro;
@@ -344,8 +344,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- devpriv->timer_running = 0;
- del_timer(&devpriv->timer);
+ del_timer_sync(&devpriv->timer);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 1a18fa37bfd0..182dea669ef2 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -30,6 +30,8 @@ Status: works
Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#define PCI_DEVICE_ID_PIO1616L 0x8172
@@ -130,11 +132,6 @@ static int contec_pci_dio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &contec_pci_dio_driver);
}
-static void contec_pci_dio_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(contec_pci_dio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L) },
{ 0 }
@@ -145,7 +142,7 @@ static struct pci_driver contec_pci_dio_pci_driver = {
.name = "contec_pci_dio",
.id_table = contec_pci_dio_pci_table,
.probe = contec_pci_dio_pci_probe,
- .remove = contec_pci_dio_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(contec_pci_dio_driver, contec_pci_dio_pci_driver);
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 992e557e6ae1..50b450f09c65 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -107,12 +107,13 @@ Configuration options: not applicable, uses PCI auto config
*/
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
+#include "../comedidev.h"
+
#include "8255.h"
#define DAQBOARD2000_FIRMWARE "daqboard2000_firmware.bin"
@@ -485,7 +486,7 @@ static void daqboard2000_pulseProgPin(struct comedi_device *dev)
writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
- udelay(10000); /* Not in the original code, but I like symmetry... */
+ udelay(10000); /* Not in the original code, but I like symmetry... */
}
static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
@@ -799,11 +800,6 @@ static int daqboard2000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &daqboard2000_driver);
}
-static void daqboard2000_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_IOTECH, 0x0409) },
{ 0 }
@@ -814,7 +810,7 @@ static struct pci_driver daqboard2000_pci_driver = {
.name = "daqboard2000",
.id_table = daqboard2000_pci_table,
.probe = daqboard2000_pci_probe,
- .remove = daqboard2000_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(daqboard2000_driver, daqboard2000_pci_driver);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index b15e05808cb0..9823aa06787a 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -1,6 +1,6 @@
/*
* comedi/drivers/das08.c
- * DAS08 driver
+ * comedi driver for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
@@ -27,57 +27,26 @@
/*
* Driver: das08
* Description: DAS-08 compatible boards
+ * Devices: various, see das08_isa, das08_cs, and das08_pci drivers
* Author: Warren Jasper, ds, Frank Hess
- * Devices: [Keithley Metrabyte] DAS08 (isa-das08),
- * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
- * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
- * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
- * DAS08/JR-16-AO (das08jr-16-ao), PCI-DAS08 (pci-das08),
- * PC104-DAS08 (pc104-das08), DAS08/JR/16 (das08jr/16)
* Updated: Fri, 31 Aug 2012 19:19:06 +0100
* Status: works
*
- * This is a rewrite of the das08 and das08jr drivers.
+ * This driver is used by the das08_isa, das08_cs, and das08_pci
+ * drivers to provide the common support for the DAS-08 hardware.
*
- * Options (for ISA cards):
- * [0] - base io address
- *
- * Manual configuration of PCI cards is not supported; they are
- * configured automatically.
- *
- * The das08 driver doesn't support asynchronous commands, since
- * the cheap das08 hardware doesn't really support them. The
- * comedi_rt_timer driver can be used to emulate commands for this
- * driver.
+ * The driver doesn't support asynchronous commands, since the
+ * cheap das08 hardware doesn't really support them.
*/
-#include "../comedidev.h"
-
#include <linux/delay.h>
+#include "../comedidev.h"
+
#include "8255.h"
#include "8253.h"
#include "das08.h"
-#define DRV_NAME "das08"
-
-#define DO_ISA IS_ENABLED(CONFIG_COMEDI_DAS08_ISA)
-#define DO_PCI IS_ENABLED(CONFIG_COMEDI_DAS08_PCI)
-#define DO_COMEDI_DRIVER_REGISTER (DO_ISA || DO_PCI)
-
-#define PCI_DEVICE_ID_PCIDAS08 0x29
-#define PCIDAS08_SIZE 0x54
-
-/* pci configuration registers */
-#define INTCSR 0x4c
-#define INTR1_ENABLE 0x1
-#define INTR1_HIGH_POLARITY 0x2
-#define PCI_INTR_ENABLE 0x40
-#define INTR1_EDGE_TRIG 0x100 /* requires high polarity */
-#define CNTRL 0x50
-#define CNTRL_DIR 0x2
-#define CNTRL_INTR 0x4
-
/*
cio-das08.pdf
@@ -235,16 +204,6 @@ static const int *const das08_gainlists[] = {
das08_pgm_gainlist,
};
-static inline bool is_isa_board(const struct das08_board_struct *board)
-{
- return DO_ISA && board->bustype == isa;
-}
-
-static inline bool is_pci_board(const struct das08_board_struct *board)
-{
- return DO_PCI && board->bustype == pci;
-}
-
#define TIMEOUT 100000
static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -501,159 +460,6 @@ static int das08_counter_config(struct comedi_device *dev,
return 2;
}
-#if DO_COMEDI_DRIVER_REGISTER
-static const struct das08_board_struct das08_boards[] = {
-#if DO_ISA
- {
- .name = "isa-das08", /* cio-das08.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 8,
- .i8254_offset = 4,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-pgm", /* cio-das08pgx.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgm,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-pgh", /* cio-das08pgx.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgh,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-pgl", /* cio-das08pgx.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgl,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-aoh", /* cio-das08_aox.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgh,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-aol", /* cio-das08_aox.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgl,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08-aom", /* cio-das08_aox.pdf */
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pgm,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08/jr-ao", /* cio-das08-jr-ao.pdf */
- .bustype = isa,
- .is_jr = true,
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 8,
- .do_nchan = 8,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08jr-16-ao", /* cio-das08jr-16-ao.pdf */
- .bustype = isa,
- .is_jr = true,
- .ai_nbits = 16,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode16,
- .ao_nbits = 16,
- .di_nchan = 8,
- .do_nchan = 8,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "pc104-das08",
- .bustype = isa,
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 4,
- .iosize = 16, /* unchecked */
- },
- {
- .name = "das08jr/16",
- .bustype = isa,
- .is_jr = true,
- .ai_nbits = 16,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode16,
- .di_nchan = 8,
- .do_nchan = 8,
- .iosize = 16, /* unchecked */
- },
-#endif /* DO_ISA */
-#if DO_PCI
- {
- .name = "pci-das08", /* pci-das08 */
- .id = PCI_DEVICE_ID_PCIDAS08,
- .bustype = pci,
- .ai_nbits = 12,
- .ai_pg = das08_bipolar5,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 4,
- .iosize = 8,
- },
-#endif /* DO_PCI */
-};
-#endif /* DO_COMEDI_DRIVER_REGISTER */
-
int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
{
const struct das08_board_struct *thisboard = comedi_board(dev);
@@ -760,84 +566,6 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
}
EXPORT_SYMBOL_GPL(das08_common_attach);
-static const struct das08_board_struct *
-das08_find_pci_board(struct pci_dev *pdev)
-{
-#if DO_COMEDI_DRIVER_REGISTER
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(das08_boards); i++)
- if (is_pci_board(&das08_boards[i]) &&
- pdev->device == das08_boards[i].id)
- return &das08_boards[i];
-#endif
- return NULL;
-}
-
-/* only called in the PCI probe path, via comedi_pci_auto_config() */
-static int __maybe_unused
-das08_auto_attach(struct comedi_device *dev, unsigned long context_unused)
-{
- struct pci_dev *pdev;
- struct das08_private_struct *devpriv;
- unsigned long iobase;
-
- if (!DO_PCI)
- return -EINVAL;
-
- pdev = comedi_to_pci_dev(dev);
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- dev_info(dev->class_dev, "attach pci %s\n", pci_name(pdev));
- dev->board_ptr = das08_find_pci_board(pdev);
- if (dev->board_ptr == NULL) {
- dev_err(dev->class_dev, "BUG! cannot determine board type!\n");
- return -EINVAL;
- }
-
- /* enable PCI device and reserve I/O spaces */
- if (comedi_pci_enable(pdev, dev->driver->driver_name)) {
- dev_err(dev->class_dev,
- "Error enabling PCI device and requesting regions\n");
- return -EIO;
- }
- /* read base addresses */
- iobase = pci_resource_start(pdev, 2);
- return das08_common_attach(dev, iobase);
-}
-
-static int __maybe_unused
-das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct das08_board_struct *thisboard = comedi_board(dev);
- struct das08_private_struct *devpriv;
- unsigned long iobase;
-
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- dev_info(dev->class_dev, "attach\n");
- if (is_pci_board(thisboard)) {
- dev_err(dev->class_dev,
- "Manual configuration of PCI board '%s' is not supported\n",
- thisboard->name);
- return -EIO;
- } else if (is_isa_board(thisboard)) {
- iobase = it->options[0];
- dev_info(dev->class_dev, "iobase 0x%lx\n", iobase);
- if (!request_region(iobase, thisboard->iosize, DRV_NAME)) {
- dev_err(dev->class_dev, "I/O port conflict\n");
- return -EIO;
- }
- return das08_common_attach(dev, iobase);
- } else
- return -EIO;
-}
-
void das08_common_detach(struct comedi_device *dev)
{
if (dev->subdevices)
@@ -845,84 +573,16 @@ void das08_common_detach(struct comedi_device *dev)
}
EXPORT_SYMBOL_GPL(das08_common_detach);
-static void __maybe_unused das08_detach(struct comedi_device *dev)
-{
- const struct das08_board_struct *thisboard = comedi_board(dev);
-
- if (!thisboard)
- return;
- das08_common_detach(dev);
- if (is_isa_board(thisboard)) {
- if (dev->iobase)
- release_region(dev->iobase, thisboard->iosize);
- } else if (is_pci_board(thisboard)) {
- struct pci_dev *pdev = comedi_to_pci_dev(dev);
- if (pdev) {
- if (dev->iobase)
- comedi_pci_disable(pdev);
- }
- }
-}
-
-#if DO_COMEDI_DRIVER_REGISTER
-static struct comedi_driver das08_driver = {
- .driver_name = DRV_NAME,
- .module = THIS_MODULE,
- .attach = das08_attach,
- .auto_attach = das08_auto_attach,
- .detach = das08_detach,
- .board_name = &das08_boards[0].name,
- .num_names = sizeof(das08_boards) / sizeof(struct das08_board_struct),
- .offset = sizeof(struct das08_board_struct),
-};
-#endif
-
-#if DO_PCI
-static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_PCIDAS08) },
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, das08_pci_table);
-
-static int das08_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
-{
- return comedi_pci_auto_config(dev, &das08_driver);
-}
-
-static void das08_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
-static struct pci_driver das08_pci_driver = {
- .id_table = das08_pci_table,
- .name = DRV_NAME,
- .probe = &das08_pci_probe,
- .remove = &das08_pci_remove
-};
-#endif /* DO_PCI */
-
-#if DO_COMEDI_DRIVER_REGISTER
-#if DO_PCI
-module_comedi_pci_driver(das08_driver, das08_pci_driver);
-#else
-module_comedi_driver(das08_driver);
-#endif
-#else /* DO_COMEDI_DRIVER_REGISTER */
static int __init das08_init(void)
{
return 0;
}
+module_init(das08_init);
static void __exit das08_exit(void)
{
}
-
-module_init(das08_init);
module_exit(das08_exit);
-#endif /* DO_COMEDI_DRIVER_REGISTER */
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
index 0314baebae39..b102ad4918c4 100644
--- a/drivers/staging/comedi/drivers/das08.h
+++ b/drivers/staging/comedi/drivers/das08.h
@@ -24,7 +24,6 @@
#ifndef _DAS08_H
#define _DAS08_H
-enum das08_bustype { isa, pci, pcmcia };
/* different ways ai data is encoded in first two registers */
enum das08_ai_encoding { das08_encode12, das08_encode16, das08_pcm_encode12 };
enum das08_lrange { das08_pg_none, das08_bipolar5, das08_pgh, das08_pgl,
@@ -34,7 +33,6 @@ enum das08_lrange { das08_pg_none, das08_bipolar5, das08_pgh, das08_pgl,
struct das08_board_struct {
const char *name;
unsigned int id; /* id for pci/pcmcia boards */
- enum das08_bustype bustype;
bool is_jr; /* true for 'JR' boards */
unsigned int ai_nbits;
enum das08_lrange ai_pg;
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 024262375e3c..cfeebe4d1ddd 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -46,125 +46,70 @@ Options (for pcm-das08):
Command support does not exist, but could be added for this board.
*/
-#include "../comedidev.h"
-
#include <linux/delay.h>
-#include <linux/pci.h>
#include <linux/slab.h>
-#include "das08.h"
+#include "../comedidev.h"
-/* pcmcia includes */
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
+#include "das08.h"
+
static const struct das08_board_struct das08_cs_boards[] = {
{
- .name = "pcm-das08",
- .id = 0x0, /* XXX */
- .bustype = pcmcia,
- .ai_nbits = 12,
- .ai_pg = das08_bipolar5,
- .ai_encoding = das08_pcm_encode12,
- .di_nchan = 3,
- .do_nchan = 3,
- .iosize = 16,
- },
- /* duplicate so driver name can be used also */
- {
- .name = "das08_cs",
- .id = 0x0, /* XXX */
- .bustype = pcmcia,
- .ai_nbits = 12,
- .ai_pg = das08_bipolar5,
- .ai_encoding = das08_pcm_encode12,
- .di_nchan = 3,
- .do_nchan = 3,
- .iosize = 16,
+ .name = "pcm-das08",
+ .id = 0x0, /* XXX */
+ .ai_nbits = 12,
+ .ai_pg = das08_bipolar5,
+ .ai_encoding = das08_pcm_encode12,
+ .di_nchan = 3,
+ .do_nchan = 3,
+ .iosize = 16,
},
};
-static struct pcmcia_device *cur_dev;
-
-static int das08_cs_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+static int das08_cs_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- const struct das08_board_struct *thisboard = comedi_board(dev);
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
struct das08_private_struct *devpriv;
unsigned long iobase;
- struct pcmcia_device *link = cur_dev; /* XXX hack */
+ int ret;
+
+ /* The das08 driver needs the board_ptr */
+ dev->board_ptr = &das08_cs_boards[0];
+
+ link->config_flags |= CONF_AUTO_SET_IO;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
+ iobase = link->resource[0]->start;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
- dev_info(dev->class_dev, "das08_cs: attach\n");
- /* deal with a pci board */
-
- if (thisboard->bustype == pcmcia) {
- if (link == NULL) {
- dev_err(dev->class_dev, "no pcmcia cards found\n");
- return -EIO;
- }
- iobase = link->resource[0]->start;
- } else {
- dev_err(dev->class_dev,
- "bug! board does not have PCMCIA bustype\n");
- return -EINVAL;
- }
-
return das08_common_attach(dev, iobase);
}
+static void das08_cs_detach(struct comedi_device *dev)
+{
+ das08_common_detach(dev);
+ comedi_pcmcia_disable(dev);
+}
+
static struct comedi_driver driver_das08_cs = {
.driver_name = "das08_cs",
.module = THIS_MODULE,
- .attach = das08_cs_attach,
- .detach = das08_common_detach,
- .board_name = &das08_cs_boards[0].name,
- .num_names = ARRAY_SIZE(das08_cs_boards),
- .offset = sizeof(struct das08_board_struct),
+ .auto_attach = das08_cs_auto_attach,
+ .detach = das08_cs_detach,
};
-static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
static int das08_pcmcia_attach(struct pcmcia_device *link)
{
- int ret;
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, das08_pcmcia_config_loop, NULL);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- cur_dev = link;
- return 0;
-
-failed:
- pcmcia_disable_device(link);
- return ret;
-}
-
-static void das08_pcmcia_detach(struct pcmcia_device *link)
-{
- pcmcia_disable_device(link);
- cur_dev = NULL;
+ return comedi_pcmcia_auto_config(link, &driver_das08_cs);
}
static const struct pcmcia_device_id das08_cs_id_table[] = {
@@ -176,36 +121,11 @@ MODULE_DEVICE_TABLE(pcmcia, das08_cs_id_table);
static struct pcmcia_driver das08_cs_driver = {
.name = "pcm-das08",
.owner = THIS_MODULE,
- .probe = das08_pcmcia_attach,
- .remove = das08_pcmcia_detach,
.id_table = das08_cs_id_table,
+ .probe = das08_pcmcia_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
-
-static int __init das08_cs_init_module(void)
-{
- int ret;
-
- ret = comedi_driver_register(&driver_das08_cs);
- if (ret < 0)
- return ret;
-
- ret = pcmcia_register_driver(&das08_cs_driver);
- if (ret < 0) {
- comedi_driver_unregister(&driver_das08_cs);
- return ret;
- }
-
- return 0;
-
-}
-module_init(das08_cs_init_module);
-
-static void __exit das08_cs_exit_module(void)
-{
- pcmcia_unregister_driver(&das08_cs_driver);
- comedi_driver_unregister(&driver_das08_cs);
-}
-module_exit(das08_cs_exit_module);
+module_comedi_pcmcia_driver(driver_das08_cs, das08_cs_driver);
MODULE_AUTHOR("David A. Schleef <ds@schleef.org>, "
"Frank Mori Hess <fmhess@users.sourceforge.net>");
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
new file mode 100644
index 000000000000..f12078247163
--- /dev/null
+++ b/drivers/staging/comedi/drivers/das08_isa.c
@@ -0,0 +1,217 @@
+/*
+ * das08_isa.c
+ * comedi driver for DAS08 ISA/PC-104 boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Driver: das08_isa
+ * Description: DAS-08 ISA/PC-104 compatible boards
+ * Devices: (Keithley Metrabyte) DAS08 [isa-das08],
+ * (ComputerBoards) DAS08 [isa-das08]
+ * (ComputerBoards) DAS08-PGM [das08-pgm]
+ * (ComputerBoards) DAS08-PGH [das08-pgh]
+ * (ComputerBoards) DAS08-PGL [das08-pgl]
+ * (ComputerBoards) DAS08-AOH [das08-aoh]
+ * (ComputerBoards) DAS08-AOL [das08-aol]
+ * (ComputerBoards) DAS08-AOM [das08-aom]
+ * (ComputerBoards) DAS08/JR-AO [das08/jr-ao]
+ * (ComputerBoards) DAS08/JR-16-AO [das08jr-16-ao]
+ * (ComputerBoards) PC104-DAS08 [pc104-das08]
+ * (ComputerBoards) DAS08/JR/16 [das08jr/16]
+ * Author: Warren Jasper, ds, Frank Hess
+ * Updated: Fri, 31 Aug 2012 19:19:06 +0100
+ * Status: works
+ *
+ * This is the ISA/PC-104-specific support split off from the das08 driver.
+ *
+ * Configuration Options:
+ * [0] - base io address
+ */
+
+#include "../comedidev.h"
+
+#include "das08.h"
+
+static const struct das08_board_struct das08_isa_boards[] = {
+ {
+ /* cio-das08.pdf */
+ .name = "isa-das08",
+ .ai_nbits = 12,
+ .ai_pg = das08_pg_none,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8255_offset = 8,
+ .i8254_offset = 4,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08pgx.pdf */
+ .name = "das08-pgm",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgm,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8255_offset = 0,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08pgx.pdf */
+ .name = "das08-pgh",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgh,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08pgx.pdf */
+ .name = "das08-pgl",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgl,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08_aox.pdf */
+ .name = "das08-aoh",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgh,
+ .ai_encoding = das08_encode12,
+ .ao_nbits = 12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8255_offset = 0x0c,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08_aox.pdf */
+ .name = "das08-aol",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgl,
+ .ai_encoding = das08_encode12,
+ .ao_nbits = 12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8255_offset = 0x0c,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08_aox.pdf */
+ .name = "das08-aom",
+ .ai_nbits = 12,
+ .ai_pg = das08_pgm,
+ .ai_encoding = das08_encode12,
+ .ao_nbits = 12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8255_offset = 0x0c,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08-jr-ao.pdf */
+ .name = "das08/jr-ao",
+ .is_jr = true,
+ .ai_nbits = 12,
+ .ai_pg = das08_pg_none,
+ .ai_encoding = das08_encode12,
+ .ao_nbits = 12,
+ .di_nchan = 8,
+ .do_nchan = 8,
+ .iosize = 16, /* unchecked */
+ }, {
+ /* cio-das08jr-16-ao.pdf */
+ .name = "das08jr-16-ao",
+ .is_jr = true,
+ .ai_nbits = 16,
+ .ai_pg = das08_pg_none,
+ .ai_encoding = das08_encode16,
+ .ao_nbits = 16,
+ .di_nchan = 8,
+ .do_nchan = 8,
+ .i8254_offset = 0x04,
+ .iosize = 16, /* unchecked */
+ }, {
+ .name = "pc104-das08",
+ .ai_nbits = 12,
+ .ai_pg = das08_pg_none,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8254_offset = 4,
+ .iosize = 16, /* unchecked */
+ }, {
+ .name = "das08jr/16",
+ .is_jr = true,
+ .ai_nbits = 16,
+ .ai_pg = das08_pg_none,
+ .ai_encoding = das08_encode16,
+ .di_nchan = 8,
+ .do_nchan = 8,
+ .iosize = 16, /* unchecked */
+ },
+};
+
+static int das08_isa_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ const struct das08_board_struct *thisboard = comedi_board(dev);
+ struct das08_private_struct *devpriv;
+
+ devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ if (!devpriv)
+ return -ENOMEM;
+ dev->private = devpriv;
+
+ if (!request_region(it->options[0], thisboard->iosize,
+ thisboard->name))
+ return -EIO;
+
+ return das08_common_attach(dev, it->options[0]);
+}
+
+static void das08_isa_detach(struct comedi_device *dev)
+{
+ const struct das08_board_struct *thisboard = comedi_board(dev);
+
+ das08_common_detach(dev);
+ if (dev->iobase)
+ release_region(dev->iobase, thisboard->iosize);
+}
+
+static struct comedi_driver das08_isa_driver = {
+ .driver_name = "isa-das08",
+ .module = THIS_MODULE,
+ .attach = das08_isa_attach,
+ .detach = das08_isa_detach,
+ .board_name = &das08_isa_boards[0].name,
+ .num_names = ARRAY_SIZE(das08_isa_boards),
+ .offset = sizeof(das08_isa_boards[0]),
+};
+module_comedi_driver(das08_isa_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
new file mode 100644
index 000000000000..c405876ddcf7
--- /dev/null
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -0,0 +1,121 @@
+/*
+ * das08_pci.c
+ * comedi driver for DAS08 PCI boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Driver: das08_pci
+ * Description: DAS-08 PCI compatible boards
+ * Devices: (ComputerBoards) PCI-DAS08 [pci-das08]
+ * Author: Warren Jasper, ds, Frank Hess
+ * Updated: Fri, 31 Aug 2012 19:19:06 +0100
+ * Status: works
+ *
+ * This is the PCI-specific support split off from the das08 driver.
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/pci.h>
+
+#include "../comedidev.h"
+
+#include "das08.h"
+
+#define PCI_DEVICE_ID_PCIDAS08 0x0029
+
+static const struct das08_board_struct das08_pci_boards[] = {
+ {
+ .name = "pci-das08",
+ .id = PCI_DEVICE_ID_PCIDAS08,
+ .ai_nbits = 12,
+ .ai_pg = das08_bipolar5,
+ .ai_encoding = das08_encode12,
+ .di_nchan = 3,
+ .do_nchan = 4,
+ .i8254_offset = 4,
+ .iosize = 8,
+ },
+};
+
+static int das08_pci_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pdev = comedi_to_pci_dev(dev);
+ struct das08_private_struct *devpriv;
+ int ret;
+
+ devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ if (!devpriv)
+ return -ENOMEM;
+ dev->private = devpriv;
+
+ /* The das08 driver needs the board_ptr */
+ dev->board_ptr = &das08_pci_boards[0];
+
+ ret = comedi_pci_enable(pdev, dev->driver->driver_name);
+ if (ret)
+ return ret;
+ dev->iobase = pci_resource_start(pdev, 2);
+
+ return das08_common_attach(dev, dev->iobase);
+}
+
+static void das08_pci_detach(struct comedi_device *dev)
+{
+ struct pci_dev *pdev = comedi_to_pci_dev(dev);
+
+ das08_common_detach(dev);
+ if (dev->iobase)
+ comedi_pci_disable(pdev);
+}
+
+static struct comedi_driver das08_pci_comedi_driver = {
+ .driver_name = "pci-das08",
+ .module = THIS_MODULE,
+ .auto_attach = das08_pci_auto_attach,
+ .detach = das08_pci_detach,
+};
+
+static int das08_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, &das08_pci_comedi_driver);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_PCIDAS08) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, das08_pci_table);
+
+static struct pci_driver das08_pci_driver = {
+ .name = "pci-das08",
+ .id_table = das08_pci_table,
+ .probe = das08_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(das08_pci_comedi_driver, das08_pci_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index b159f44d694f..f238a1fbccbf 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -82,7 +82,9 @@ www.measurementcomputing.com
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+
#include <asm/dma.h>
+
#include "../comedidev.h"
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 960da8debe17..3ce499fa5dbf 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -55,9 +55,11 @@ AO commands are not supported.
#define DEBUG 1
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
+
#include "../comedidev.h"
-#include <linux/delay.h>
#include "comedi_fc.h"
@@ -856,11 +858,6 @@ static int dt3000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &dt3000_driver);
}
-static void dt3000_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(dt3000_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DT, PCI_DEVICE_ID_DT3001) },
{ PCI_DEVICE(PCI_VENDOR_ID_DT, PCI_DEVICE_ID_DT3001_PGL) },
@@ -877,7 +874,7 @@ static struct pci_driver dt3000_pci_driver = {
.name = "dt3000",
.id_table = dt3000_pci_table,
.probe = dt3000_pci_probe,
- .remove = dt3000_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(dt3000_driver, dt3000_pci_driver);
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 176799849d20..192cf088f834 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -702,10 +702,9 @@ static int dt9812_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (dev == NULL)
goto error;
- }
+
kref_init(&dev->kref);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -1133,7 +1132,7 @@ static struct comedi_driver dt9812_comedi_driver = {
static int __init usb_dt9812_init(void)
{
- int result, i;
+ int i;
/* Initialize all driver slots */
for (i = 0; i < DT9812_NUM_SLOTS; i++) {
@@ -1144,30 +1143,13 @@ static int __init usb_dt9812_init(void)
}
dt9812[12].serial = 0x0;
- /* register with the USB subsystem */
- result = usb_register(&dt9812_usb_driver);
- if (result) {
- pr_err("usb_register failed. Error number %d\n", result);
- return result;
- }
- /* register with comedi */
- result = comedi_driver_register(&dt9812_comedi_driver);
- if (result) {
- usb_deregister(&dt9812_usb_driver);
- pr_err("comedi_driver_register failed. Error number %d\n",
- result);
- }
-
- return result;
+ return comedi_usb_driver_register(&dt9812_comedi_driver,
+ &dt9812_usb_driver);
}
static void __exit usb_dt9812_exit(void)
{
- /* unregister with comedi */
- comedi_driver_unregister(&dt9812_comedi_driver);
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&dt9812_usb_driver);
+ comedi_usb_driver_unregister(&dt9812_comedi_driver, &dt9812_usb_driver);
}
module_init(usb_dt9812_init);
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index 8497a36db7db..decc17f1867e 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -37,9 +37,11 @@
their cards in their manuals.
*/
-#include "../comedidev.h"
+#include <linux/pci.h>
#include <linux/mutex.h>
+#include "../comedidev.h"
+
#define READ_TIMEOUT 50
static const struct comedi_lrange range_pci1050_ai = { 3, {
@@ -276,11 +278,6 @@ static int dyna_pci10xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &dyna_pci10xx_driver);
}
-static void dyna_pci10xx_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, 0x1050) },
{ 0 }
@@ -291,7 +288,7 @@ static struct pci_driver dyna_pci10xx_pci_driver = {
.name = "dyna_pci10xx",
.id_table = dyna_pci10xx_pci_table,
.probe = dyna_pci10xx_pci_probe,
- .remove = dyna_pci10xx_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(dyna_pci10xx_driver, dyna_pci10xx_pci_driver);
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 154598f6d5e3..b60c97562676 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -47,9 +47,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
+
#include "../comedidev.h"
-#include <linux/delay.h>
#include "plx9080.h"
#include "comedi_fc.h"
@@ -946,11 +948,6 @@ static int gsc_hpdi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &gsc_hpdi_driver);
}
-static void gsc_hpdi_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(gsc_hpdi_pci_table) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9080, PCI_VENDOR_ID_PLX,
0x2400, 0, 0, 0},
@@ -962,7 +959,7 @@ static struct pci_driver gsc_hpdi_pci_driver = {
.name = "gsc_hpdi",
.id_table = gsc_hpdi_pci_table,
.probe = gsc_hpdi_pci_probe,
- .remove = gsc_hpdi_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(gsc_hpdi_driver, gsc_hpdi_pci_driver);
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index a91a448ba0f0..1e08f9141fad 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -47,11 +47,11 @@ There are 4 x 12-bit Analogue Outputs. Ranges : 5V, 10V, +/-5V, +/-10V
Configuration options: not applicable, uses PCI auto config
*/
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-#include <linux/delay.h>
-#include <linux/pci.h>
+#include "../comedidev.h"
#define PCI_DEVICE_ID_ICP_MULTI 0x8000
@@ -623,11 +623,6 @@ static int icp_multi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &icp_multi_driver);
}
-static void icp_multi_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(icp_multi_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ICP, PCI_DEVICE_ID_ICP_MULTI) },
{ 0 }
@@ -638,7 +633,7 @@ static struct pci_driver icp_multi_pci_driver = {
.name = "icp_multi",
.id_table = icp_multi_pci_table,
.probe = icp_multi_pci_probe,
- .remove = icp_multi_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(icp_multi_driver, icp_multi_pci_driver);
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index c756a35ce31a..17ba75e0ab89 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -42,15 +42,17 @@
* comedi_nonfree_firmware tarball. The file is called "jr3pci.idm".
*/
-#include "../comedidev.h"
-
+#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/timer.h>
-#include <linux/kernel.h>
+
+#include "../comedidev.h"
+
#include "jr3_pci.h"
#define PCI_VENDOR_ID_JR3 0x1762
@@ -844,11 +846,6 @@ static int jr3_pci_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &jr3_pci_driver);
}
-static void jr3_pci_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
@@ -863,7 +860,7 @@ static struct pci_driver jr3_pci_pci_driver = {
.name = "jr3_pci",
.id_table = jr3_pci_pci_table,
.probe = jr3_pci_pci_probe,
- .remove = jr3_pci_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(jr3_pci_driver, jr3_pci_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 19c94282ac3f..8c09c026508a 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -34,6 +34,8 @@ This driver is a simple driver to read the counter values from
Kolter Electronic PCI Counter Card.
*/
+#include <linux/pci.h>
+
#include "../comedidev.h"
#define CNT_CARD_DEVICE_ID 0x0014
@@ -152,11 +154,6 @@ static int ke_counter_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ke_counter_driver);
}
-static void ke_counter_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(ke_counter_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
{ 0 }
@@ -167,7 +164,7 @@ static struct pci_driver ke_counter_pci_driver = {
.name = "ke_counter",
.id_table = ke_counter_pci_table,
.probe = ke_counter_pci_probe,
- .remove = ke_counter_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ke_counter_driver, ke_counter_pci_driver);
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 3c4b0228e8dc..b766bb93efd6 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -45,13 +45,14 @@ broken.
*/
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
+#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include "../comedidev.h"
+
#include "comedi_fc.h"
#include "8253.h"
@@ -1734,11 +1735,6 @@ static int me4000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me4000_driver);
}
-static void me4000_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, PCI_DEVICE_ID_MEILHAUS_ME4650)},
{PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, PCI_DEVICE_ID_MEILHAUS_ME4660)},
@@ -1761,7 +1757,7 @@ static struct pci_driver me4000_pci_driver = {
.name = "me4000",
.id_table = me4000_pci_table,
.probe = me4000_pci_probe,
- .remove = me4000_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index ce8e3d3f135c..06490ebc8cc8 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -34,9 +34,11 @@
* Analog Input, Analog Output, Digital I/O
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/firmware.h>
+
#include "../comedidev.h"
#define ME2600_FIRMWARE "me2600_firmware.bin"
@@ -619,11 +621,6 @@ static int me_daq_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me_daq_driver);
}
-static void me_daq_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(me_daq_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID) },
@@ -635,7 +632,7 @@ static struct pci_driver me_daq_pci_driver = {
.name = "me_daq",
.id_table = me_daq_pci_table,
.probe = me_daq_pci_probe,
- .remove = me_daq_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(me_daq_driver, me_daq_pci_driver);
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index e27850f628ce..be2c15f84614 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -51,11 +51,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "mite.h"
+#include <linux/pci.h>
-#include "comedi_fc.h"
#include "../comedidev.h"
+#include "comedi_fc.h"
+#include "mite.h"
#define PCI_MITE_SIZE 4096
#define PCI_DAQ_SIZE 4096
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 5196b460ce11..bcd4df290ec4 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -41,7 +41,9 @@ Updated: Sat, 25 Jan 2003 13:24:40 -0800
#define DEBUG 1
#define DEBUG_FLAGS
+#include <linux/pci.h>
#include <linux/interrupt.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
@@ -452,16 +454,11 @@ static int ni6527_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni6527_driver);
}
-static void ni6527_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver ni6527_pci_driver = {
.name = DRIVER_NAME,
.id_table = ni6527_pci_table,
.probe = ni6527_pci_probe,
- .remove = ni6527_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni6527_driver, ni6527_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 2fb4b7790aeb..bfa790ecf41d 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -50,8 +50,11 @@ except maybe the 6514.
#define DEBUG 1
#define DEBUG_FLAGS
+
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
@@ -787,16 +790,11 @@ static int ni_65xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_65xx_driver);
}
-static void ni_65xx_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver ni_65xx_pci_driver = {
.name = "ni_65xx",
.id_table = ni_65xx_pci_table,
.probe = ni_65xx_pci_probe,
- .remove = ni_65xx_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_65xx_driver, ni_65xx_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 26baf9c96fff..e46dd7a1a724 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -40,8 +40,11 @@ DAQ 6601/6602 User Manual (NI 322137B-01)
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
+
#include "../comedidev.h"
+
#include "mite.h"
#include "ni_tio.h"
@@ -1327,11 +1330,6 @@ static int ni_660x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_660x_driver);
}
-static void ni_660x_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(ni_660x_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c60)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1310)},
@@ -1345,7 +1343,7 @@ static struct pci_driver ni_660x_pci_driver = {
.name = "ni_660x",
.id_table = ni_660x_pci_table,
.probe = ni_660x_pci_probe,
- .remove = ni_660x_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 272caeb6ecee..2faf86c83dc5 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -41,8 +41,10 @@ Commands are not supported.
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+
#include "../comedidev.h"
#include "mite.h"
@@ -309,11 +311,6 @@ static int ni_670x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_670x_driver);
}
-static void ni_670x_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(ni_670x_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90) },
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920) },
@@ -325,7 +322,7 @@ static struct pci_driver ni_670x_pci_driver = {
.name = "ni_670x",
.id_table = ni_670x_pci_table,
.probe = ni_670x_pci_probe,
- .remove = ni_670x_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_670x_driver, ni_670x_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 68d7c6a5db7d..9cc6092eacdd 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -50,22 +50,15 @@ Manuals: Register level: http://www.ni.com/pdf/manuals/340698.pdf
User Manual: http://www.ni.com/pdf/manuals/320676d.pdf
*/
+#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include "../comedidev.h"
-#include <linux/ioport.h>
+#include "../comedidev.h"
#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev;
-
-struct daq700_board {
- const char *name;
-};
-
/* daqcard700 registers */
#define DIO_W 0x04 /* WO 8bit */
#define DIO_R 0x05 /* RO 8bit */
@@ -202,24 +195,20 @@ static void daq700_ai_config(struct comedi_device *dev,
inw(iobase + ADFIFO_R); /* read 16bit junk from FIFO to clear */
}
-static int daq700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int daq700_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- const struct daq700_board *thisboard = comedi_board(dev);
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
struct comedi_subdevice *s;
- struct pcmcia_device *link;
int ret;
- link = pcmcia_cur_dev; /* XXX hack */
- if (!link)
- return -EIO;
+ dev->board_name = dev->driver->driver_name;
+ link->config_flags |= CONF_AUTO_SET_IO;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
dev->iobase = link->resource[0]->start;
- if (!dev->iobase) {
- dev_err(dev->class_dev, "io base address is zero!\n");
- return -EINVAL;
- }
-
- dev->board_name = thisboard->name;
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
@@ -256,68 +245,16 @@ static int daq700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return 0;
}
-static void daq700_detach(struct comedi_device *dev)
-{
- /* nothing to cleanup */
-}
-
-static const struct daq700_board daq700_boards[] = {
- {
- .name = "daqcard-700",
- }, {
- .name = "ni_daq_700",
- },
-};
-
static struct comedi_driver daq700_driver = {
.driver_name = "ni_daq_700",
.module = THIS_MODULE,
- .attach = daq700_attach,
- .detach = daq700_detach,
- .board_name = &daq700_boards[0].name,
- .num_names = ARRAY_SIZE(daq700_boards),
- .offset = sizeof(struct daq700_board),
+ .auto_attach = daq700_auto_attach,
+ .detach = comedi_pcmcia_disable,
};
-static int daq700_pcmcia_config_loop(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
static int daq700_cs_attach(struct pcmcia_device *link)
{
- int ret;
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_AUDIO |
- CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, daq700_pcmcia_config_loop, NULL);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- pcmcia_cur_dev = link;
- return 0;
-
-failed:
- pcmcia_disable_device(link);
- return ret;
-}
-
-static void daq700_cs_detach(struct pcmcia_device *link)
-{
- pcmcia_disable_device(link);
- pcmcia_cur_dev = NULL;
+ return comedi_pcmcia_auto_config(link, &daq700_driver);
}
static const struct pcmcia_device_id daq700_cs_ids[] = {
@@ -329,35 +266,11 @@ MODULE_DEVICE_TABLE(pcmcia, daq700_cs_ids);
static struct pcmcia_driver daq700_cs_driver = {
.name = "ni_daq_700",
.owner = THIS_MODULE,
- .probe = daq700_cs_attach,
- .remove = daq700_cs_detach,
.id_table = daq700_cs_ids,
+ .probe = daq700_cs_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
-
-static int __init daq700_cs_init(void)
-{
- int ret;
-
- ret = comedi_driver_register(&daq700_driver);
- if (ret < 0)
- return ret;
-
- ret = pcmcia_register_driver(&daq700_cs_driver);
- if (ret < 0) {
- comedi_driver_unregister(&daq700_driver);
- return ret;
- }
-
- return 0;
-}
-module_init(daq700_cs_init);
-
-static void __exit daq700_cs_exit(void)
-{
- pcmcia_unregister_driver(&daq700_cs_driver);
- comedi_driver_unregister(&daq700_driver);
-}
-module_exit(daq700_cs_exit);
+module_comedi_pcmcia_driver(daq700_driver, daq700_cs_driver);
MODULE_AUTHOR("Fred Brooks <nsaspook@nsaspook.com>");
MODULE_DESCRIPTION(
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 7b333353c5d9..e1cc9d01f200 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -37,127 +37,28 @@ This is just a wrapper around the 8255.o driver to properly handle
the PCMCIA interface.
*/
- /* #define LABPC_DEBUG *//* enable debugging messages */
-#undef LABPC_DEBUG
-
-#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
-#include "8255.h"
-
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev;
-
-#define DIO24_SIZE 4 /* size of io region used by board */
-
-static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it);
-static void dio24_detach(struct comedi_device *dev);
-
-enum dio24_bustype { pcmcia_bustype };
-
-struct dio24_board_struct {
- const char *name;
- int device_id; /* device id for pcmcia board */
- enum dio24_bustype bustype; /* PCMCIA */
- int have_dio; /* have 8255 chip */
- /* function pointers so we can use inb/outb or readb/writeb as appropriate */
- unsigned int (*read_byte) (unsigned int address);
- void (*write_byte) (unsigned int byte, unsigned int address);
-};
-
-static const struct dio24_board_struct dio24_boards[] = {
- {
- .name = "daqcard-dio24",
- .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */
- .bustype = pcmcia_bustype,
- .have_dio = 1,
- },
- {
- .name = "ni_daq_dio24",
- .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */
- .bustype = pcmcia_bustype,
- .have_dio = 1,
- },
-};
-
-/*
- * Useful for shorthand access to the particular board structure
- */
-#define thisboard ((const struct dio24_board_struct *)dev->board_ptr)
-
-struct dio24_private {
-
- int data; /* number of data points left to be taken */
-};
-
-static struct comedi_driver driver_dio24 = {
- .driver_name = "ni_daq_dio24",
- .module = THIS_MODULE,
- .attach = dio24_attach,
- .detach = dio24_detach,
- .num_names = ARRAY_SIZE(dio24_boards),
- .board_name = &dio24_boards[0].name,
- .offset = sizeof(struct dio24_board_struct),
-};
+#include "8255.h"
-static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int dio24_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- struct dio24_private *devpriv;
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
struct comedi_subdevice *s;
- unsigned long iobase = 0;
-#ifdef incomplete
- unsigned int irq = 0;
-#endif
- struct pcmcia_device *link;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- /* get base address, irq etc. based on bustype */
- switch (thisboard->bustype) {
- case pcmcia_bustype:
- link = pcmcia_cur_dev; /* XXX hack */
- if (!link)
- return -EIO;
- iobase = link->resource[0]->start;
-#ifdef incomplete
- irq = link->irq;
-#endif
- break;
- default:
- pr_err("bug! couldn't determine board type\n");
- return -EINVAL;
- break;
- }
- pr_debug("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
- thisboard->name, iobase);
-#ifdef incomplete
- if (irq)
- pr_debug("irq %u\n", irq);
-#endif
-
- if (iobase == 0) {
- pr_err("io base address is zero!\n");
- return -EINVAL;
- }
+ dev->board_name = dev->driver->driver_name;
- dev->iobase = iobase;
-
-#ifdef incomplete
- /* grab our IRQ */
- dev->irq = irq;
-#endif
-
- dev->board_name = thisboard->name;
+ link->config_flags |= CONF_AUTO_SET_IO;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
+ dev->iobase = link->resource[0]->start;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
@@ -165,184 +66,48 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* 8255 dio */
s = &dev->subdevices[0];
- subdev_8255_init(dev, s, NULL, dev->iobase);
+ ret = subdev_8255_init(dev, s, NULL, dev->iobase);
+ if (ret)
+ return ret;
return 0;
-};
+}
static void dio24_detach(struct comedi_device *dev)
{
- struct comedi_subdevice *s;
-
- if (dev->subdevices) {
- s = &dev->subdevices[0];
- subdev_8255_cleanup(dev, s);
- }
- if (thisboard->bustype != pcmcia_bustype && dev->iobase)
- release_region(dev->iobase, DIO24_SIZE);
- if (dev->irq)
- free_irq(dev->irq, dev);
-};
-
-static void dio24_config(struct pcmcia_device *link);
-static void dio24_release(struct pcmcia_device *link);
-static int dio24_cs_suspend(struct pcmcia_device *p_dev);
-static int dio24_cs_resume(struct pcmcia_device *p_dev);
-
-static int dio24_cs_attach(struct pcmcia_device *);
-static void dio24_cs_detach(struct pcmcia_device *);
+ if (dev->subdevices)
+ subdev_8255_cleanup(dev, &dev->subdevices[0]);
+ comedi_pcmcia_disable(dev);
+}
-struct local_info_t {
- struct pcmcia_device *link;
- int stop;
- struct bus_operations *bus;
+static struct comedi_driver driver_dio24 = {
+ .driver_name = "ni_daq_dio24",
+ .module = THIS_MODULE,
+ .auto_attach = dio24_auto_attach,
+ .detach = dio24_detach,
};
static int dio24_cs_attach(struct pcmcia_device *link)
{
- struct local_info_t *local;
-
- dev_info(&link->dev, "ni_daq_dio24: HOLA SOY YO - CS-attach!\n");
-
- dev_dbg(&link->dev, "dio24_cs_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
- local->link = link;
- link->priv = local;
-
- pcmcia_cur_dev = link;
-
- dio24_config(link);
-
- return 0;
-} /* dio24_cs_attach */
-
-static void dio24_cs_detach(struct pcmcia_device *link)
-{
- ((struct local_info_t *)link->priv)->stop = 1;
- dio24_release(link);
-
- /* This points to the parent local_info_t struct */
- kfree(link->priv);
+ return comedi_pcmcia_auto_config(link, &driver_dio24);
}
-static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-static void dio24_config(struct pcmcia_device *link)
-{
- int ret;
-
- dev_info(&link->dev, "ni_daq_dio24: HOLA SOY YO! - config\n");
-
- dev_dbg(&link->dev, "dio24_config\n");
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_AUDIO |
- CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, NULL);
- if (ret) {
- dev_warn(&link->dev, "no configuration found\n");
- goto failed;
- }
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- return;
-
-failed:
- dev_info(&link->dev, "Fallo");
- dio24_release(link);
-
-} /* dio24_config */
-
-static void dio24_release(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "dio24_release\n");
-
- pcmcia_disable_device(link);
-} /* dio24_release */
-
-static int dio24_cs_suspend(struct pcmcia_device *link)
-{
- struct local_info_t *local = link->priv;
-
- /* Mark the device as stopped, to block IO until later */
- local->stop = 1;
- return 0;
-} /* dio24_cs_suspend */
-
-static int dio24_cs_resume(struct pcmcia_device *link)
-{
- struct local_info_t *local = link->priv;
-
- local->stop = 0;
- return 0;
-} /* dio24_cs_resume */
-
-/*====================================================================*/
-
static const struct pcmcia_device_id dio24_cs_ids[] = {
- /* N.B. These IDs should match those in dio24_boards */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */
PCMCIA_DEVICE_NULL
};
-
MODULE_DEVICE_TABLE(pcmcia, dio24_cs_ids);
-MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>");
-MODULE_DESCRIPTION("Comedi driver for National Instruments "
- "PCMCIA DAQ-Card DIO-24");
-MODULE_LICENSE("GPL");
static struct pcmcia_driver dio24_cs_driver = {
- .probe = dio24_cs_attach,
- .remove = dio24_cs_detach,
- .suspend = dio24_cs_suspend,
- .resume = dio24_cs_resume,
- .id_table = dio24_cs_ids,
- .owner = THIS_MODULE,
- .name = "ni_daq_dio24",
+ .name = "ni_daq_dio24",
+ .owner = THIS_MODULE,
+ .id_table = dio24_cs_ids,
+ .probe = dio24_cs_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
+module_comedi_pcmcia_driver(driver_dio24, dio24_cs_driver);
-static int __init init_dio24_cs(void)
-{
- printk("ni_daq_dio24: HOLA SOY YO!\n");
- pcmcia_register_driver(&dio24_cs_driver);
- return 0;
-}
-
-static void __exit exit_dio24_cs(void)
-{
- pcmcia_unregister_driver(&dio24_cs_driver);
-}
-
-int __init init_module(void)
-{
- int ret;
-
- ret = init_dio24_cs();
- if (ret < 0)
- return ret;
-
- return comedi_driver_register(&driver_dio24);
-}
-
-void __exit cleanup_module(void)
-{
- exit_dio24_cs();
- comedi_driver_unregister(&driver_dio24);
-}
+MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>");
+MODULE_DESCRIPTION(
+ "Comedi driver for National Instruments PCMCIA DAQ-Card DIO-24");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index d29c4d761bac..f957b8859b3d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -73,12 +73,14 @@ NI manuals:
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
-#include <linux/delay.h>
#include <asm/dma.h>
#include "8253.h"
@@ -568,13 +570,11 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
return -EINVAL;
} else if (dma_chan) {
/* allocate dma buffer */
- devpriv->dma_buffer =
- kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
- if (devpriv->dma_buffer == NULL) {
- dev_err(dev->class_dev,
- "failed to allocate dma buffer\n");
+ devpriv->dma_buffer = kmalloc(dma_buffer_size,
+ GFP_KERNEL | GFP_DMA);
+ if (devpriv->dma_buffer == NULL)
return -ENOMEM;
- }
+
if (request_dma(dma_chan, DRV_NAME)) {
dev_err(dev->class_dev,
"failed to allocate dma channel %u\n",
@@ -1202,7 +1202,8 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
else
channel = CR_CHAN(cmd->chanlist[0]);
/* munge channel bits for differential / scan disabled mode */
- if (mode != MODE_SINGLE_CHAN && aref == AREF_DIFF)
+ if ((mode == MODE_SINGLE_CHAN || mode == MODE_SINGLE_CHAN_INTERVAL) &&
+ aref == AREF_DIFF)
channel *= 2;
devpriv->command1_bits |= ADC_CHAN_BITS(channel);
devpriv->command1_bits |= thisboard->ai_range_code[range];
@@ -1217,21 +1218,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->write_byte(devpriv->command1_bits,
dev->iobase + COMMAND1_REG);
}
- /* setup any external triggering/pacing (command4 register) */
- devpriv->command4_bits = 0;
- if (cmd->convert_src != TRIG_EXT)
- devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
- /* XXX should discard first scan when using interval scanning
- * since manual says it is not synced with scan clock */
- if (labpc_use_continuous_mode(cmd, mode) == 0) {
- devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
- if (cmd->scan_begin_src == TRIG_EXT)
- devpriv->command4_bits |= EXT_SCAN_EN_BIT;
- }
- /* single-ended/differential */
- if (aref == AREF_DIFF)
- devpriv->command4_bits |= ADC_DIFF_BIT;
- devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
devpriv->write_byte(cmd->chanlist_len,
dev->iobase + INTERVAL_COUNT_REG);
@@ -1311,6 +1297,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT;
devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG);
+ /* setup any external triggering/pacing (command4 register) */
+ devpriv->command4_bits = 0;
+ if (cmd->convert_src != TRIG_EXT)
+ devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
+ /* XXX should discard first scan when using interval scanning
+ * since manual says it is not synced with scan clock */
+ if (labpc_use_continuous_mode(cmd, mode) == 0) {
+ devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
+ if (cmd->scan_begin_src == TRIG_EXT)
+ devpriv->command4_bits |= EXT_SCAN_EN_BIT;
+ }
+ /* single-ended/differential */
+ if (aref == AREF_DIFF)
+ devpriv->command4_bits |= ADC_DIFF_BIT;
+ devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
+
/* startup acquisition */
/* command2 reg */
@@ -2116,16 +2118,11 @@ static int labpc_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &labpc_driver);
}
-static void labpc_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver labpc_pci_driver = {
.name = DRV_NAME,
.id_table = labpc_pci_table,
.probe = labpc_pci_probe,
- .remove = labpc_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(labpc_driver, labpc_pci_driver);
#else
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index bfe19fa7d66f..be7d1413b2e5 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -59,8 +59,6 @@ NI manuals:
*/
-#undef LABPC_DEBUG /* debugging messages */
-
#include "../comedidev.h"
#include <linux/delay.h>
@@ -75,240 +73,81 @@ NI manuals:
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev;
-
-static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it);
-
static const struct labpc_board_struct labpc_cs_boards[] = {
{
- .name = "daqcard-1200",
- .device_id = 0x103, /* 0x10b is manufacturer id,
- 0x103 is device id */
- .ai_speed = 10000,
- .bustype = pcmcia_bustype,
- .register_layout = labpc_1200_layout,
- .has_ao = 1,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
- .ai_range_is_unipolar = labpc_1200_is_unipolar,
- .ai_scan_up = 0,
- .memory_mapped_io = 0,
- },
- /* duplicate entry, to support using alternate name */
- {
- .name = "ni_labpc_cs",
- .device_id = 0x103,
- .ai_speed = 10000,
- .bustype = pcmcia_bustype,
- .register_layout = labpc_1200_layout,
- .has_ao = 1,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
- .ai_range_is_unipolar = labpc_1200_is_unipolar,
- .ai_scan_up = 0,
- .memory_mapped_io = 0,
- },
+ .name = "daqcard-1200",
+ .device_id = 0x103,
+ .ai_speed = 10000,
+ .bustype = pcmcia_bustype,
+ .register_layout = labpc_1200_layout,
+ .has_ao = 1,
+ .ai_range_table = &range_labpc_1200_ai,
+ .ai_range_code = labpc_1200_ai_gain_bits,
+ .ai_range_is_unipolar = labpc_1200_is_unipolar,
+ },
};
-/*
- * Useful for shorthand access to the particular board structure
- */
-#define thisboard ((const struct labpc_board_struct *)dev->board_ptr)
-
-static struct comedi_driver driver_labpc_cs = {
- .driver_name = "ni_labpc_cs",
- .module = THIS_MODULE,
- .attach = &labpc_attach,
- .detach = &labpc_common_detach,
- .num_names = ARRAY_SIZE(labpc_cs_boards),
- .board_name = &labpc_cs_boards[0].name,
- .offset = sizeof(struct labpc_board_struct),
-};
-
-static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int labpc_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
struct labpc_private *devpriv;
- unsigned long iobase = 0;
- unsigned int irq = 0;
- struct pcmcia_device *link;
+ int ret;
+
+ /* The ni_labpc driver needs the board_ptr */
+ dev->board_ptr = &labpc_cs_boards[0];
+
+ link->config_flags |= CONF_AUTO_SET_IO |
+ CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
+ dev->iobase = link->resource[0]->start;
+
+ if (!link->irq)
+ return -EINVAL;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
- /* get base address, irq etc. based on bustype */
- switch (thisboard->bustype) {
- case pcmcia_bustype:
- link = pcmcia_cur_dev; /* XXX hack */
- if (!link)
- return -EIO;
- iobase = link->resource[0]->start;
- irq = link->irq;
- break;
- default:
- pr_err("bug! couldn't determine board type\n");
- return -EINVAL;
- break;
- }
- return labpc_common_attach(dev, iobase, irq, 0);
+ return labpc_common_attach(dev, dev->iobase, link->irq, 0);
}
-static void labpc_config(struct pcmcia_device *link);
-static void labpc_release(struct pcmcia_device *link);
-static int labpc_cs_suspend(struct pcmcia_device *p_dev);
-static int labpc_cs_resume(struct pcmcia_device *p_dev);
-
-static int labpc_cs_attach(struct pcmcia_device *);
-static void labpc_cs_detach(struct pcmcia_device *);
+static void labpc_detach(struct comedi_device *dev)
+{
+ labpc_common_detach(dev);
+ comedi_pcmcia_disable(dev);
+}
-struct local_info_t {
- struct pcmcia_device *link;
- int stop;
- struct bus_operations *bus;
+static struct comedi_driver driver_labpc_cs = {
+ .driver_name = "ni_labpc_cs",
+ .module = THIS_MODULE,
+ .auto_attach = labpc_auto_attach,
+ .detach = labpc_detach,
};
static int labpc_cs_attach(struct pcmcia_device *link)
{
- struct local_info_t *local;
-
- dev_dbg(&link->dev, "labpc_cs_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
- local->link = link;
- link->priv = local;
-
- pcmcia_cur_dev = link;
-
- labpc_config(link);
-
- return 0;
-} /* labpc_cs_attach */
-
-static void labpc_cs_detach(struct pcmcia_device *link)
-{
- ((struct local_info_t *)link->priv)->stop = 1;
- labpc_release(link);
-
- /* This points to the parent local_info_t struct (may be null) */
- kfree(link->priv);
-
+ return comedi_pcmcia_auto_config(link, &driver_labpc_cs);
}
-static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-
-static void labpc_config(struct pcmcia_device *link)
-{
- int ret;
-
- dev_dbg(&link->dev, "labpc_config\n");
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ |
- CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, NULL);
- if (ret) {
- dev_warn(&link->dev, "no configuration found\n");
- goto failed;
- }
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- return;
-
-failed:
- labpc_release(link);
-
-} /* labpc_config */
-
-static void labpc_release(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "labpc_release\n");
-
- pcmcia_disable_device(link);
-} /* labpc_release */
-
-static int labpc_cs_suspend(struct pcmcia_device *link)
-{
- struct local_info_t *local = link->priv;
-
- /* Mark the device as stopped, to block IO until later */
- local->stop = 1;
- return 0;
-} /* labpc_cs_suspend */
-
-static int labpc_cs_resume(struct pcmcia_device *link)
-{
- struct local_info_t *local = link->priv;
-
- local->stop = 0;
- return 0;
-} /* labpc_cs_resume */
-
static const struct pcmcia_device_id labpc_cs_ids[] = {
- /* N.B. These IDs should match those in labpc_cs_boards (ni_labpc.c) */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */
PCMCIA_DEVICE_NULL
};
-
MODULE_DEVICE_TABLE(pcmcia, labpc_cs_ids);
-MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
-MODULE_DESCRIPTION("Comedi driver for National Instruments Lab-PC");
-MODULE_LICENSE("GPL");
static struct pcmcia_driver labpc_cs_driver = {
- .probe = labpc_cs_attach,
- .remove = labpc_cs_detach,
- .suspend = labpc_cs_suspend,
- .resume = labpc_cs_resume,
- .id_table = labpc_cs_ids,
- .owner = THIS_MODULE,
- .name = "daqcard-1200",
+ .name = "daqcard-1200",
+ .owner = THIS_MODULE,
+ .id_table = labpc_cs_ids,
+ .probe = labpc_cs_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
+module_comedi_pcmcia_driver(driver_labpc_cs, labpc_cs_driver);
-static int __init init_labpc_cs(void)
-{
- pcmcia_register_driver(&labpc_cs_driver);
- return 0;
-}
-
-static void __exit exit_labpc_cs(void)
-{
- pcmcia_unregister_driver(&labpc_cs_driver);
-}
-
-static int __init labpc_init_module(void)
-{
- int ret;
-
- ret = init_labpc_cs();
- if (ret < 0)
- return ret;
-
- return comedi_driver_register(&driver_labpc_cs);
-}
-
-static void __exit labpc_exit_module(void)
-{
- exit_labpc_cs();
- comedi_driver_unregister(&driver_labpc_cs);
-}
-
-module_init(labpc_init_module);
-module_exit(labpc_exit_module);
+MODULE_DESCRIPTION("Comedi driver for National Instruments Lab-PC");
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 56dc59908d36..b7403597e905 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -986,7 +986,7 @@ static void ni_event(struct comedi_device *dev, struct comedi_subdevice *s)
if (s->
async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW |
COMEDI_CB_EOA)) {
- switch (s - dev->subdevices) {
+ switch (s->index) {
case NI_AI_SUBDEV:
ni_ai_reset(dev, s);
break;
@@ -1086,7 +1086,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
("ni_mio_common: a_status=0xffff. Card removed?\n");
/* we probably aren't even running a command now,
* so it's a good idea to be careful. */
- if (comedi_get_subdevice_runflags(s) & SRF_RUNNING) {
+ if (comedi_is_subdevice_running(s)) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
ni_event(dev, s);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 76c6a13ea9d6..888be7b89d2d 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -65,112 +65,90 @@ See the notes in the ni_atmio.o driver.
#define MAX_N_CALDACS 32
static const struct ni_board_struct ni_boards[] = {
- {.device_id = 0x010d,
- .name = "DAQCard-ai-16xe-50",
- .n_adchan = 16,
- .adbits = 16,
- .ai_fifo_depth = 1024,
- .alwaysdither = 0,
- .gainlkup = ai_gain_8,
- .ai_speed = 5000,
- .n_aochan = 0,
- .aobits = 0,
- .ao_fifo_depth = 0,
- .ao_unipolar = 0,
- .num_p0_dio_channels = 8,
- .has_8255 = 0,
- .caldac = {dac8800, dac8043},
- },
- {.device_id = 0x010c,
- .name = "DAQCard-ai-16e-4",
- .n_adchan = 16,
- .adbits = 12,
- .ai_fifo_depth = 1024,
- .alwaysdither = 0,
- .gainlkup = ai_gain_16,
- .ai_speed = 4000,
- .n_aochan = 0,
- .aobits = 0,
- .ao_fifo_depth = 0,
- .ao_unipolar = 0,
- .num_p0_dio_channels = 8,
- .has_8255 = 0,
- .caldac = {mb88341}, /* verified */
- },
- {.device_id = 0x02c4,
- .name = "DAQCard-6062E",
- .n_adchan = 16,
- .adbits = 12,
- .ai_fifo_depth = 8192,
- .alwaysdither = 0,
- .gainlkup = ai_gain_16,
- .ai_speed = 2000,
- .n_aochan = 2,
- .aobits = 12,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_bipolar10,
- .ao_unipolar = 0,
- .ao_speed = 1176,
- .num_p0_dio_channels = 8,
- .has_8255 = 0,
- .caldac = {ad8804_debug}, /* verified */
- },
- {.device_id = 0x075e,
- .name = "DAQCard-6024E", /* specs incorrect! */
- .n_adchan = 16,
- .adbits = 12,
- .ai_fifo_depth = 1024,
- .alwaysdither = 0,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .aobits = 12,
- .ao_fifo_depth = 0,
- .ao_range_table = &range_bipolar10,
- .ao_unipolar = 0,
- .ao_speed = 1000000,
- .num_p0_dio_channels = 8,
- .has_8255 = 0,
- .caldac = {ad8804_debug},
- },
- {.device_id = 0x0245,
- .name = "DAQCard-6036E", /* specs incorrect! */
- .n_adchan = 16,
- .adbits = 16,
- .ai_fifo_depth = 1024,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .aobits = 16,
- .ao_fifo_depth = 0,
- .ao_range_table = &range_bipolar10,
- .ao_unipolar = 0,
- .ao_speed = 1000000,
- .num_p0_dio_channels = 8,
- .has_8255 = 0,
- .caldac = {ad8804_debug},
+ {
+ .device_id = 0x010d,
+ .name = "DAQCard-ai-16xe-50",
+ .n_adchan = 16,
+ .adbits = 16,
+ .ai_fifo_depth = 1024,
+ .gainlkup = ai_gain_8,
+ .ai_speed = 5000,
+ .num_p0_dio_channels = 8,
+ .caldac = { dac8800, dac8043 },
+ }, {
+ .device_id = 0x010c,
+ .name = "DAQCard-ai-16e-4",
+ .n_adchan = 16,
+ .adbits = 12,
+ .ai_fifo_depth = 1024,
+ .gainlkup = ai_gain_16,
+ .ai_speed = 4000,
+ .num_p0_dio_channels = 8,
+ .caldac = { mb88341 }, /* verified */
+ }, {
+ .device_id = 0x02c4,
+ .name = "DAQCard-6062E",
+ .n_adchan = 16,
+ .adbits = 12,
+ .ai_fifo_depth = 8192,
+ .gainlkup = ai_gain_16,
+ .ai_speed = 2000,
+ .n_aochan = 2,
+ .aobits = 12,
+ .ao_fifo_depth = 2048,
+ .ao_range_table = &range_bipolar10,
+ .ao_speed = 1176,
+ .num_p0_dio_channels = 8,
+ .caldac = { ad8804_debug }, /* verified */
+ }, {
+ /* specs incorrect! */
+ .device_id = 0x075e,
+ .name = "DAQCard-6024E",
+ .n_adchan = 16,
+ .adbits = 12,
+ .ai_fifo_depth = 1024,
+ .gainlkup = ai_gain_4,
+ .ai_speed = 5000,
+ .n_aochan = 2,
+ .aobits = 12,
+ .ao_range_table = &range_bipolar10,
+ .ao_speed = 1000000,
+ .num_p0_dio_channels = 8,
+ .caldac = { ad8804_debug },
+ }, {
+ /* specs incorrect! */
+ .device_id = 0x0245,
+ .name = "DAQCard-6036E",
+ .n_adchan = 16,
+ .adbits = 16,
+ .ai_fifo_depth = 1024,
+ .alwaysdither = 1,
+ .gainlkup = ai_gain_4,
+ .ai_speed = 5000,
+ .n_aochan = 2,
+ .aobits = 16,
+ .ao_range_table = &range_bipolar10,
+ .ao_speed = 1000000,
+ .num_p0_dio_channels = 8,
+ .caldac = { ad8804_debug },
},
#if 0
- {.device_id = 0x0000, /* unknown */
- .name = "DAQCard-6715",
- .n_adchan = 0,
- .n_aochan = 8,
- .aobits = 12,
- .ao_671x = 8192,
- .num_p0_dio_channels = 8,
- .caldac = {mb88341, mb88341},
- },
+ {
+ .device_id = 0x0000, /* unknown */
+ .name = "DAQCard-6715",
+ .n_aochan = 8,
+ .aobits = 12,
+ .ao_671x = 8192,
+ .num_p0_dio_channels = 8,
+ .caldac = { mb88341, mb88341 },
+ },
#endif
- /* N.B. Update ni_mio_cs_ids[] when entries added above. */
};
#define interrupt_pin(a) 0
#define IRQ_POLARITY 1
-#define NI_E_IRQ_FLAGS IRQF_SHARED
-
struct ni_private {
struct pcmcia_device *link;
@@ -225,67 +203,22 @@ static uint16_t mio_cs_win_in(struct comedi_device *dev, int addr)
return ret;
}
-static int mio_cs_attach(struct comedi_device *dev,
- struct comedi_devconfig *it);
-static void mio_cs_detach(struct comedi_device *dev);
-static struct comedi_driver driver_ni_mio_cs = {
- .driver_name = "ni_mio_cs",
- .module = THIS_MODULE,
- .attach = mio_cs_attach,
- .detach = mio_cs_detach,
-};
-
#include "ni_mio_common.c"
-static int ni_getboardtype(struct comedi_device *dev,
- struct pcmcia_device *link);
-
-static void mio_cs_detach(struct comedi_device *dev)
-{
- mio_common_detach(dev);
- if (dev->irq)
- free_irq(dev->irq, dev);
-}
-
-static void mio_cs_config(struct pcmcia_device *link);
-static void cs_release(struct pcmcia_device *link);
-static void cs_detach(struct pcmcia_device *);
-
-static struct pcmcia_device *cur_dev;
-
-static int cs_attach(struct pcmcia_device *link)
+static const void *ni_getboardtype(struct comedi_device *dev,
+ struct pcmcia_device *link)
{
- cur_dev = link;
-
- mio_cs_config(link);
-
- return 0;
-}
-
-static void cs_release(struct pcmcia_device *link)
-{
- pcmcia_disable_device(link);
-}
-
-static void cs_detach(struct pcmcia_device *link)
-{
- cs_release(link);
-}
-
-static int mio_cs_suspend(struct pcmcia_device *link)
-{
- DPRINTK("pm suspend\n");
-
- return 0;
-}
+ static const struct ni_board_struct *board;
+ int i;
-static int mio_cs_resume(struct pcmcia_device *link)
-{
- DPRINTK("pm resume\n");
- return 0;
+ for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
+ board = &ni_boards[i];
+ if (board->device_id == link->card_id)
+ return board;
+ }
+ return NULL;
}
-
static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data)
{
int base, ret;
@@ -302,114 +235,63 @@ static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data)
return -ENODEV;
}
-
-static void mio_cs_config(struct pcmcia_device *link)
-{
- int ret;
-
- DPRINTK("mio_cs_config(link=%p)\n", link);
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, mio_pcmcia_config_loop, NULL);
- if (ret) {
- dev_warn(&link->dev, "no configuration found\n");
- return;
- }
-
- if (!link->irq)
- dev_info(&link->dev, "no IRQ available\n");
-
- ret = pcmcia_enable_device(link);
-}
-
-static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int mio_cs_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
+ static const struct ni_board_struct *board;
struct ni_private *devpriv;
- struct pcmcia_device *link;
- unsigned int irq;
int ret;
- DPRINTK("mio_cs_attach(dev=%p,it=%p)\n", dev, it);
-
- link = cur_dev; /* XXX hack */
- if (!link)
- return -EIO;
+ board = ni_getboardtype(dev, link);
+ if (!board)
+ return -ENODEV;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
- dev->driver = &driver_ni_mio_cs;
+ link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
+ ret = comedi_pcmcia_enable(dev, mio_pcmcia_config_loop);
+ if (ret)
+ return ret;
dev->iobase = link->resource[0]->start;
- irq = link->irq;
-
- dev->board_ptr = ni_boards + ni_getboardtype(dev, link);
-
-#if 0
- {
- int i;
-
- printk("comedi%d: %s: DAQCard: io 0x%04lx, irq %u, ",
- dev->minor, dev->driver->driver_name, dev->iobase, irq);
-
- printk(" board fingerprint:");
- for (i = 0; i < 32; i += 2) {
- printk(" %04x %02x", inw(dev->iobase + i),
- inb(dev->iobase + i + 1));
- }
- printk("\n");
- printk(" board fingerprint (windowed):");
- for (i = 0; i < 10; i++)
- printk(" 0x%04x", win_in(i));
- printk("\n");
-
- printk("boardtype.name: %s\n", boardtype.name);
- }
-#endif
-
- dev->board_name = boardtype.name;
-
- ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
- "ni_mio_cs", dev);
- if (ret < 0) {
- dev_err(dev->class_dev, "irq not available\n");
- return -EINVAL;
- }
- dev->irq = irq;
+ link->priv = dev;
+ ret = pcmcia_request_irq(link, ni_E_interrupt);
+ if (ret)
+ return ret;
+ dev->irq = link->irq;
ret = ni_alloc_private(dev);
if (ret)
return ret;
- devpriv = dev->private;
-
- devpriv->stc_writew = &mio_cs_win_out;
- devpriv->stc_readw = &mio_cs_win_in;
- devpriv->stc_writel = &win_out2;
- devpriv->stc_readl = &win_in2;
-
- ret = ni_E_init(dev);
- if (ret < 0)
- return ret;
+ devpriv = dev->private;
+ devpriv->stc_writew = mio_cs_win_out;
+ devpriv->stc_readw = mio_cs_win_in;
+ devpriv->stc_writel = win_out2;
+ devpriv->stc_readl = win_in2;
- return 0;
+ return ni_E_init(dev);
}
-static int ni_getboardtype(struct comedi_device *dev,
- struct pcmcia_device *link)
+static void mio_cs_detach(struct comedi_device *dev)
{
- int i;
-
- for (i = 0; i < n_ni_boards; i++) {
- if (ni_boards[i].device_id == link->card_id)
- return i;
- }
+ mio_common_detach(dev);
+ comedi_pcmcia_disable(dev);
+}
- dev_err(dev->class_dev,
- "unknown board 0x%04x -- pretend it is a ", link->card_id);
+static struct comedi_driver driver_ni_mio_cs = {
+ .driver_name = "ni_mio_cs",
+ .module = THIS_MODULE,
+ .auto_attach = mio_cs_auto_attach,
+ .detach = mio_cs_detach,
+};
- return 0;
+static int cs_attach(struct pcmcia_device *link)
+{
+ return comedi_pcmcia_auto_config(link, &driver_ni_mio_cs);
}
-#ifdef MODULE
-
static const struct pcmcia_device_id ni_mio_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */
@@ -418,36 +300,17 @@ static const struct pcmcia_device_id ni_mio_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0245), /* DAQCard-6036E */
PCMCIA_DEVICE_NULL
};
-
MODULE_DEVICE_TABLE(pcmcia, ni_mio_cs_ids);
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
-MODULE_DESCRIPTION("Comedi driver for National Instruments DAQCard E series");
-MODULE_LICENSE("GPL");
static struct pcmcia_driver ni_mio_cs_driver = {
- .probe = &cs_attach,
- .remove = &cs_detach,
- .suspend = &mio_cs_suspend,
- .resume = &mio_cs_resume,
- .id_table = ni_mio_cs_ids,
- .owner = THIS_MODULE,
- .name = "ni_mio_cs",
+ .name = "ni_mio_cs",
+ .owner = THIS_MODULE,
+ .id_table = ni_mio_cs_ids,
+ .probe = cs_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
};
+module_comedi_pcmcia_driver(driver_ni_mio_cs, ni_mio_cs_driver);
-int init_module(void)
-{
- pcmcia_register_driver(&ni_mio_cs_driver);
- comedi_driver_register(&driver_ni_mio_cs);
- return 0;
-}
-
-void cleanup_module(void)
-{
- pcmcia_unregister_driver(&ni_mio_cs_driver);
-#if 0
- while (cur_dev != NULL)
- cs_detach(cur_dev->handle);
-#endif
- comedi_driver_unregister(&driver_ni_mio_cs);
-}
-#endif
+MODULE_DESCRIPTION("Comedi driver for National Instruments DAQCard E series");
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 084ebea33ab9..0a00260d11f3 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -55,9 +55,11 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
/* #define DEBUG 1 */
/* #define DEBUG_FLAGS */
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/firmware.h>
+
#include "../comedidev.h"
#include "comedi_fc.h"
@@ -1224,11 +1226,6 @@ static int ni_pcidio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcidio_driver);
}
-static void ni_pcidio_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(ni_pcidio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1150) },
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1320) },
@@ -1241,7 +1238,7 @@ static struct pci_driver ni_pcidio_pci_driver = {
.name = "ni_pcidio",
.id_table = ni_pcidio_pci_table,
.probe = ni_pcidio_pci_probe,
- .remove = ni_pcidio_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_pcidio_driver, ni_pcidio_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index aaac0b2cc9eb..98b43f2fc65d 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -110,10 +110,12 @@ Bugs:
*/
+#include <linux/delay.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
#include <asm/byteorder.h>
-#include <linux/delay.h>
#include "ni_stc.h"
#include "mite.h"
@@ -963,7 +965,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -982,7 +984,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1001,7 +1003,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1037,7 +1039,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1056,7 +1058,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1092,7 +1094,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1111,7 +1113,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1147,7 +1149,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1791,11 +1793,6 @@ static int ni_pcimio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcimio_driver);
}
-static void ni_pcimio_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(ni_pcimio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x0162) },
{ PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1170) },
@@ -1858,7 +1855,7 @@ static struct pci_driver ni_pcimio_pci_driver = {
.name = "ni_pcimio",
.id_table = ni_pcimio_pci_table,
.probe = ni_pcimio_pci_probe,
- .remove = ni_pcimio_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_pcimio_driver, ni_pcimio_pci_driver);
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 98f87897e2a8..225287769dc1 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -276,7 +276,7 @@ static inline unsigned NI_660x_RTSI_Second_Gate_Select(unsigned n)
}
static const unsigned int counter_status_mask =
- COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
+ COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
static int __init ni_tio_init_module(void)
{
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 0c991b99da13..13747f324936 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -159,6 +159,7 @@ static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
async->inttrig = NULL;
mite_dma_arm(counter->mite_chan);
retval = ni_tio_arm(counter, 1, cmd->start_arg);
+ break;
case TRIG_OTHER:
async->inttrig = NULL;
mite_dma_arm(counter->mite_chan);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 06127a5f62a0..b5af22eb7c37 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -243,8 +243,8 @@ static const struct comedi_lrange range_pcl818l_h_ai = { 4, {
};
static const struct comedi_lrange range718_bipolar1 = { 1, {BIP_RANGE(1),} };
-static const struct comedi_lrange range718_bipolar0_5 =
- { 1, {BIP_RANGE(0.5),} };
+static const struct comedi_lrange range718_bipolar0_5 = {
+ 1, {BIP_RANGE(0.5),} };
static const struct comedi_lrange range718_unipolar2 = { 1, {UNI_RANGE(2),} };
static const struct comedi_lrange range718_unipolar1 = { 1, {BIP_RANGE(1),} };
@@ -1005,17 +1005,14 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
switch (devpriv->dma) {
case 1: /* DMA */
case 3:
- if (devpriv->dma_rtc == 0) {
+ if (devpriv->dma_rtc == 0)
pcl818_ai_mode13dma_int(mode, dev, s);
- }
#ifdef unused
- else {
+ else
pcl818_ai_mode13dma_rtc(mode, dev, s);
- }
#else
- else {
+ else
return -EINVAL;
- }
#endif
break;
case 0:
@@ -1069,7 +1066,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
*/
#ifdef PCL818_MODE13_AO
static int pcl818_ao_mode13(int mode, struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig * it)
+ struct comedi_subdevice *s, comedi_trig *it)
{
struct pcl818_private *devpriv = dev->private;
int divisor1 = 0, divisor2 = 0;
@@ -1124,7 +1121,7 @@ static int pcl818_ao_mode13(int mode, struct comedi_device *dev,
ANALOG OUTPUT MODE 1, 818 cards
*/
static int pcl818_ao_mode1(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig * it)
+ struct comedi_subdevice *s, comedi_trig *it)
{
return pcl818_ao_mode13(1, dev, s, it);
}
@@ -1134,7 +1131,7 @@ static int pcl818_ao_mode1(struct comedi_device *dev,
ANALOG OUTPUT MODE 3, 818 cards
*/
static int pcl818_ao_mode3(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig * it)
+ struct comedi_subdevice *s, comedi_trig *it)
{
return pcl818_ao_mode13(3, dev, s, it);
}
diff --git a/drivers/staging/comedi/drivers/pcm_common.c b/drivers/staging/comedi/drivers/pcm_common.c
deleted file mode 100644
index 8a718aea6f3c..000000000000
--- a/drivers/staging/comedi/drivers/pcm_common.c
+++ /dev/null
@@ -1,63 +0,0 @@
-#include "../comedidev.h"
-
-#include "comedi_fc.h"
-#include "pcm_common.h"
-
-int comedi_pcm_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->start_src);
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- /* any count allowed */
- break;
- case TRIG_NONE:
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- /* if (err) return 4; */
-
- return 0;
-}
-EXPORT_SYMBOL(comedi_pcm_cmdtest);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcm_common.h b/drivers/staging/comedi/drivers/pcm_common.h
deleted file mode 100644
index cd4840c11444..000000000000
--- a/drivers/staging/comedi/drivers/pcm_common.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _comedi_common_H
-#define _comedi_common_H
-
-extern int comedi_pcm_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-
-#endif
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index 0882dafaf57b..13f79f49748a 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -53,9 +53,6 @@ Configuration Options:
#include "../comedidev.h"
-#include <linux/pci.h> /* for PCI devices */
-
-#define SDEV_NO ((int)(s - dev->subdevices))
#define CHANS 8
#define IOSIZE 16
#define LSB(x) ((unsigned char)((x) & 0xff))
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 7522bfb6db08..5fa1fe08eb97 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -78,9 +78,10 @@ Configuration Options:
#include <linux/interrupt.h>
#include <linux/slab.h>
+
#include "../comedidev.h"
-#include "pcm_common.h"
-#include <linux/pci.h> /* for PCI devices */
+
+#include "comedi_fc.h"
/* This stuff is all from pcmuio.c -- it refers to the DIO subdevices only */
#define CHANS_PER_PORT 8
@@ -93,7 +94,6 @@ Configuration Options:
#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT)
#define MAX_DIO_CHANS (PORTS_PER_ASIC*1*CHANS_PER_PORT)
#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC)
-#define SDEV_NO ((int)(s - dev->subdevices))
#define CALC_N_DIO_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/)
/* IO Memory sizes */
#define ASIC_IOSIZE (0x0B)
@@ -802,11 +802,59 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-static int
-pcmmio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int pcmmio_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- return comedi_pcm_cmdtest(dev, s, cmd);
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->start_src);
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ switch (cmd->stop_src) {
+ case TRIG_COUNT:
+ /* any count allowed */
+ break;
+ case TRIG_NONE:
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ /* if (err) return 4; */
+
+ return 0;
}
static int adc_wait_ready(unsigned long iobase)
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 31ea20c2d39e..433270ceda4f 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -77,10 +77,10 @@ Configuration Options:
#include <linux/interrupt.h>
#include <linux/slab.h>
+
#include "../comedidev.h"
-#include "pcm_common.h"
-#include <linux/pci.h> /* for PCI devices */
+#include "comedi_fc.h"
#define CHANS_PER_PORT 8
#define PORTS_PER_ASIC 6
@@ -92,7 +92,6 @@ Configuration Options:
#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT)
#define MAX_DIO_CHANS (PORTS_PER_ASIC*2*CHANS_PER_PORT)
#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC)
-#define SDEV_NO ((int)(s - dev->subdevices))
#define CALC_N_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/)
/* IO Memory sizes */
#define ASIC_IOSIZE (0x10)
@@ -740,11 +739,59 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-static int
-pcmuio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int pcmuio_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- return comedi_pcm_cmdtest(dev, s, cmd);
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->start_src);
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ switch (cmd->stop_src) {
+ case TRIG_COUNT:
+ /* any count allowed */
+ break;
+ case TRIG_NONE:
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ /* if (err) return 4; */
+
+ return 0;
}
static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -791,14 +838,11 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
chans_left = CHANS_PER_ASIC * board->num_asics;
n_subdevs = CALC_N_SUBDEVS(chans_left);
- devpriv->sprivs =
- kcalloc(n_subdevs, sizeof(struct pcmuio_subdev_private),
- GFP_KERNEL);
- if (!devpriv->sprivs) {
- dev_warn(dev->class_dev,
- "cannot allocate subdevice private data structures\n");
+ devpriv->sprivs = kcalloc(n_subdevs,
+ sizeof(struct pcmuio_subdev_private),
+ GFP_KERNEL);
+ if (!devpriv->sprivs)
return -ENOMEM;
- }
ret = comedi_alloc_subdevices(dev, n_subdevs);
if (ret)
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index ef0cdaa7f02e..911eb6b32296 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -47,8 +47,6 @@ Status: works
Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "../comedidev.h"
#include <linux/semaphore.h>
@@ -60,28 +58,16 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
#include "comedi_fc.h"
-/* Maximum number of separate DAQP devices we'll allow */
-#define MAX_DEV 4
-
-struct local_info_t {
- struct pcmcia_device *link;
+struct daqp_private {
int stop;
- int table_index;
- char board_name[32];
enum { semaphore, buffer } interrupt_mode;
struct completion eos;
- struct comedi_device *dev;
- struct comedi_subdevice *s;
int count;
};
-/* A list of "instances" of the device. */
-
-static struct local_info_t *dev_table[MAX_DEV] = { NULL, /* ... */ };
-
/* The DAQP communicates with the system through a 16 byte I/O window. */
#define DAQP_FIFO_SIZE 4096
@@ -165,84 +151,38 @@ static struct local_info_t *dev_table[MAX_DEV] = { NULL, /* ... */ };
#define DAQP_AUX_FIFO_NEARFULL 0x02
#define DAQP_AUX_FIFO_EMPTY 0x01
-/* These range structures tell COMEDI how the sample values map to
- * voltages. The A/D converter has four .ranges = +/- 10V through
- * +/- 1.25V, and the D/A converter has only .one = +/- 5V.
- */
-
-static const struct comedi_lrange range_daqp_ai = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_daqp_ao = { 1, {BIP_RANGE(5)} };
-
-/*====================================================================*/
-
-/* comedi interface code */
-
-static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it);
-static void daqp_detach(struct comedi_device *dev);
-static struct comedi_driver driver_daqp = {
- .driver_name = "quatech_daqp_cs",
- .module = THIS_MODULE,
- .attach = daqp_attach,
- .detach = daqp_detach,
-};
-
-#ifdef DAQP_DEBUG
-
-static void daqp_dump(struct comedi_device *dev)
-{
- dev_info(dev->class_dev, "status %02x; aux status %02x\n",
- inb(dev->iobase + DAQP_STATUS), inb(dev->iobase + DAQP_AUX));
-}
-
-static void hex_dump(char *str, void *ptr, int len)
-{
- unsigned char *cptr = ptr;
- int i;
-
- printk(str);
-
- for (i = 0; i < len; i++) {
- if (i % 16 == 0)
- printk("\n%p:", cptr);
-
- printk(" %02x", *(cptr++));
+static const struct comedi_lrange range_daqp_ai = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
}
- printk("\n");
-}
-
-#endif
+};
/* Cancel a running acquisition */
static int daqp_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
+ struct daqp_private *devpriv = dev->private;
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
-
outb(DAQP_COMMAND_STOP, dev->iobase + DAQP_COMMAND);
/* flush any linguring data in FIFO - superfluous here */
/* outb(DAQP_COMMAND_RSTF, dev->iobase+DAQP_COMMAND); */
- local->interrupt_mode = semaphore;
+ devpriv->interrupt_mode = semaphore;
return 0;
}
/* Interrupt handler
*
- * Operates in one of two modes. If local->interrupt_mode is
- * 'semaphore', just signal the local->eos completion and return
+ * Operates in one of two modes. If devpriv->interrupt_mode is
+ * 'semaphore', just signal the devpriv->eos completion and return
* (one-shot mode). Otherwise (continuous mode), read data in from
* the card, transfer it to the buffer provided by the higher-level
* comedi kernel module, and signal various comedi callback routines,
@@ -250,48 +190,21 @@ static int daqp_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
*/
static enum irqreturn daqp_interrupt(int irq, void *dev_id)
{
- struct local_info_t *local = (struct local_info_t *)dev_id;
- struct comedi_device *dev;
- struct comedi_subdevice *s;
+ struct comedi_device *dev = dev_id;
+ struct daqp_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
int loop_limit = 10000;
int status;
- if (local == NULL) {
- pr_warn("irq %d for unknown device.\n", irq);
+ if (!dev->attached)
return IRQ_NONE;
- }
-
- dev = local->dev;
- if (dev == NULL) {
- pr_warn("NULL comedi_device.\n");
- return IRQ_NONE;
- }
-
- if (!dev->attached) {
- pr_warn("struct comedi_device not yet attached.\n");
- return IRQ_NONE;
- }
-
- s = local->s;
- if (s == NULL) {
- pr_warn("NULL comedi_subdevice.\n");
- return IRQ_NONE;
- }
-
- if ((struct local_info_t *)s->private != local) {
- pr_warn("invalid comedi_subdevice.\n");
- return IRQ_NONE;
- }
-
- switch (local->interrupt_mode) {
+ switch (devpriv->interrupt_mode) {
case semaphore:
-
- complete(&local->eos);
+ complete(&devpriv->eos);
break;
case buffer:
-
while (!((status = inb(dev->iobase + DAQP_STATUS))
& DAQP_STATUS_FIFO_EMPTY)) {
@@ -315,9 +228,9 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
* and stop conversion if zero
*/
- if (local->count > 0) {
- local->count--;
- if (local->count == 0) {
+ if (devpriv->count > 0) {
+ devpriv->count--;
+ if (devpriv->count == 0) {
daqp_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
break;
@@ -342,21 +255,41 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void daqp_ai_set_one_scanlist_entry(struct comedi_device *dev,
+ unsigned int chanspec,
+ int start)
+{
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned int val;
+
+ val = DAQP_SCANLIST_CHANNEL(chan) | DAQP_SCANLIST_GAIN(range);
+
+ if (aref == AREF_DIFF)
+ val |= DAQP_SCANLIST_DIFFERENTIAL;
+
+ if (start)
+ val |= DAQP_SCANLIST_START;
+
+ outb(val & 0xff, dev->iobase + DAQP_SCANLIST);
+ outb((val >> 8) & 0xff, dev->iobase + DAQP_SCANLIST);
+}
+
/* One-shot analog data acquisition routine */
static int daqp_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
+ struct daqp_private *devpriv = dev->private;
int i;
int v;
int counter = 10000;
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
-
/* Stop any running conversion */
daqp_ai_cancel(dev, s);
@@ -366,18 +299,7 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
outb(DAQP_COMMAND_RSTQ, dev->iobase + DAQP_COMMAND);
/* Program one scan list entry */
-
- v = DAQP_SCANLIST_CHANNEL(CR_CHAN(insn->chanspec))
- | DAQP_SCANLIST_GAIN(CR_RANGE(insn->chanspec));
-
- if (CR_AREF(insn->chanspec) == AREF_DIFF)
- v |= DAQP_SCANLIST_DIFFERENTIAL;
-
-
- v |= DAQP_SCANLIST_START;
-
- outb(v & 0xff, dev->iobase + DAQP_SCANLIST);
- outb(v >> 8, dev->iobase + DAQP_SCANLIST);
+ daqp_ai_set_one_scanlist_entry(dev, insn->chanspec, 1);
/* Reset data FIFO (see page 28 of DAQP User's Manual) */
@@ -403,10 +325,8 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
return -1;
}
- init_completion(&local->eos);
- local->interrupt_mode = semaphore;
- local->dev = dev;
- local->s = s;
+ init_completion(&devpriv->eos);
+ devpriv->interrupt_mode = semaphore;
for (i = 0; i < insn->n; i++) {
@@ -416,7 +336,7 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
/* Wait for interrupt service routine to unblock completion */
/* Maybe could use a timeout here, but it's interruptible */
- if (wait_for_completion_interruptible(&local->eos))
+ if (wait_for_completion_interruptible(&devpriv->eos))
return -EINTR;
data[i] = inb(dev->iobase + DAQP_FIFO);
@@ -541,7 +461,7 @@ static int daqp_ai_cmdtest(struct comedi_device *dev,
static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
+ struct daqp_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int counter;
int scanlist_start_on_every_entry;
@@ -550,10 +470,9 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int i;
int v;
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
-
/* Stop any running conversion */
daqp_ai_cancel(dev, s);
@@ -592,24 +511,10 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* Program scan list */
-
for (i = 0; i < cmd->chanlist_len; i++) {
+ int start = (i == 0 || scanlist_start_on_every_entry);
- int chanspec = cmd->chanlist[i];
-
- /* Program one scan list entry */
-
- v = DAQP_SCANLIST_CHANNEL(CR_CHAN(chanspec))
- | DAQP_SCANLIST_GAIN(CR_RANGE(chanspec));
-
- if (CR_AREF(chanspec) == AREF_DIFF)
- v |= DAQP_SCANLIST_DIFFERENTIAL;
-
- if (i == 0 || scanlist_start_on_every_entry)
- v |= DAQP_SCANLIST_START;
-
- outb(v & 0xff, dev->iobase + DAQP_SCANLIST);
- outb(v >> 8, dev->iobase + DAQP_SCANLIST);
+ daqp_ai_set_one_scanlist_entry(dev, cmd->chanlist[i], start);
}
/* Now it's time to program the FIFO threshold, basically the
@@ -675,16 +580,16 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* Save away the number of conversions we should perform, and
* compute the FIFO threshold (in bytes, not samples - that's
- * why we multiple local->count by 2 = sizeof(sample))
+ * why we multiple devpriv->count by 2 = sizeof(sample))
*/
if (cmd->stop_src == TRIG_COUNT) {
- local->count = cmd->stop_arg * cmd->scan_end_arg;
- threshold = 2 * local->count;
+ devpriv->count = cmd->stop_arg * cmd->scan_end_arg;
+ threshold = 2 * devpriv->count;
while (threshold > DAQP_FIFO_SIZE * 3 / 4)
threshold /= 2;
} else {
- local->count = -1;
+ devpriv->count = -1;
threshold = DAQP_FIFO_SIZE / 2;
}
@@ -726,9 +631,7 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return -1;
}
- local->interrupt_mode = buffer;
- local->dev = dev;
- local->s = s;
+ devpriv->interrupt_mode = buffer;
/* Start conversion */
outb(DAQP_COMMAND_ARM | DAQP_COMMAND_FIFO_DATA,
@@ -737,341 +640,193 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/* Single-shot analog output routine */
-
static int daqp_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
- int d;
- unsigned int chan;
+ struct daqp_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
- chan = CR_CHAN(insn->chanspec);
- d = data[0];
- d &= 0x0fff;
- d ^= 0x0800; /* Flip the sign */
- d |= chan << 12;
-
/* Make sure D/A update mode is direct update */
outb(0, dev->iobase + DAQP_AUX);
- outw(d, dev->iobase + DAQP_DA);
+ for (i = 0; i > insn->n; i++) {
+ val = data[0];
+ val &= 0x0fff;
+ val ^= 0x0800; /* Flip the sign */
+ val |= (chan << 12);
- return 1;
-}
+ outw(val, dev->iobase + DAQP_DA);
+ }
-/* Digital input routine */
+ return insn->n;
+}
-static int daqp_di_insn_read(struct comedi_device *dev,
+static int daqp_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
+ struct daqp_private *devpriv = dev->private;
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
data[0] = inb(dev->iobase + DAQP_DIGITAL_IO);
- return 1;
+ return insn->n;
}
-/* Digital output routine */
-
-static int daqp_do_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int daqp_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct local_info_t *local = (struct local_info_t *)s->private;
+ struct daqp_private *devpriv = dev->private;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
- if (local->stop)
+ if (devpriv->stop)
return -EIO;
- outw(data[0] & 0xf, dev->iobase + DAQP_DIGITAL_IO);
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- return 1;
-}
+ outb(s->state, dev->iobase + DAQP_DIGITAL_IO);
+ }
-/* daqp_attach is called via comedi_config to attach a comedi device
- * to a /dev/comedi*. Note that this is different from daqp_cs_attach()
- * which is called by the pcmcia subsystem to attach the PCMCIA card
- * when it is inserted.
- */
+ data[1] = s->state;
-static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+ return insn->n;
+}
+
+static int daqp_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- int ret;
- struct local_info_t *local = dev_table[it->options[0]];
+ struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
+ struct daqp_private *devpriv;
struct comedi_subdevice *s;
+ int ret;
- if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) {
- dev_err(dev->class_dev, "No such daqp device %d\n",
- it->options[0]);
- return -EIO;
- }
+ dev->board_name = dev->driver->driver_name;
- /* Typically brittle code that I don't completely understand,
- * but "it works on my card". The intent is to pull the model
- * number of the card out the PCMCIA CIS and stash it away as
- * the COMEDI board_name. Looks like the third field in
- * CISTPL_VERS_1 (offset 2) holds what we're looking for. If
- * it doesn't work, who cares, just leave it as "DAQP".
- */
+ devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ if (!devpriv)
+ return -ENOMEM;
+ dev->private = devpriv;
- strcpy(local->board_name, "DAQP");
- dev->board_name = local->board_name;
- if (local->link->prod_id[2]) {
- if (strncmp(local->link->prod_id[2], "DAQP", 4) == 0) {
- strncpy(local->board_name, local->link->prod_id[2],
- sizeof(local->board_name));
- }
- }
+ link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
+ ret = comedi_pcmcia_enable(dev, NULL);
+ if (ret)
+ return ret;
+ dev->iobase = link->resource[0]->start;
- dev->iobase = local->link->resource[0]->start;
+ link->priv = dev;
+ ret = pcmcia_request_irq(link, daqp_interrupt);
+ if (ret)
+ return ret;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- dev_info(dev->class_dev, "attaching daqp%d (io 0x%04lx)\n",
- it->options[0], dev->iobase);
-
s = &dev->subdevices[0];
dev->read_subdev = s;
- s->private = local;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
- s->n_chan = 8;
- s->len_chanlist = 2048;
- s->maxdata = 0xffff;
- s->range_table = &range_daqp_ai;
- s->insn_read = daqp_ai_insn_read;
- s->do_cmdtest = daqp_ai_cmdtest;
- s->do_cmd = daqp_ai_cmd;
- s->cancel = daqp_ai_cancel;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
+ s->n_chan = 8;
+ s->len_chanlist = 2048;
+ s->maxdata = 0xffff;
+ s->range_table = &range_daqp_ai;
+ s->insn_read = daqp_ai_insn_read;
+ s->do_cmdtest = daqp_ai_cmdtest;
+ s->do_cmd = daqp_ai_cmd;
+ s->cancel = daqp_ai_cancel;
s = &dev->subdevices[1];
- dev->write_subdev = s;
- s->private = local;
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 2;
- s->len_chanlist = 1;
- s->maxdata = 0x0fff;
- s->range_table = &range_daqp_ao;
- s->insn_write = daqp_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_bipolar5;
+ s->insn_write = daqp_ao_insn_write;
s = &dev->subdevices[2];
- s->private = local;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 1;
- s->len_chanlist = 1;
- s->insn_read = daqp_di_insn_read;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->insn_bits = daqp_di_insn_bits;
s = &dev->subdevices[3];
- s->private = local;
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 1;
- s->len_chanlist = 1;
- s->insn_write = daqp_do_insn_write;
-
- return 1;
-}
-
-static void daqp_detach(struct comedi_device *dev)
-{
- /* Nothing to cleanup */
-}
-
-/*====================================================================
-
- PCMCIA interface code
-
- The rest of the code in this file is based on dummy_cs.c v1.24
- from the Linux pcmcia_cs distribution v3.1.8 and is subject
- to the following license agreement.
-
- The remaining contents of this file are subject to the Mozilla Public
- License Version 1.1 (the "License"); you may not use this file
- except in compliance with the License. You may obtain a copy of
- the License at http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS
- IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- implied. See the License for the specific language governing
- rights and limitations under the License.
-
- The initial developer of the original code is David A. Hinds
- <dhinds@pcmcia.sourceforge.org>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- Alternatively, the contents of this file may be used under the
- terms of the GNU Public License version 2 (the "GPL"), in which
- case the provisions of the GPL are applicable instead of the
- above. If you wish to allow the use of your version of this file
- only under the terms of the GPL and not to allow others to use
- your version of this file under the MPL, indicate your decision
- by deleting the provisions above and replace them with the notice
- and other provisions required by the GPL. If you do not delete
- the provisions above, a recipient may use your version of this
- file under either the MPL or the GPL.
-
-======================================================================*/
-
-static void daqp_cs_config(struct pcmcia_device *link);
-static void daqp_cs_release(struct pcmcia_device *link);
-static int daqp_cs_suspend(struct pcmcia_device *p_dev);
-static int daqp_cs_resume(struct pcmcia_device *p_dev);
-
-static int daqp_cs_attach(struct pcmcia_device *);
-static void daqp_cs_detach(struct pcmcia_device *);
-
-static int daqp_cs_attach(struct pcmcia_device *link)
-{
- struct local_info_t *local;
- int i;
-
- dev_dbg(&link->dev, "daqp_cs_attach()\n");
-
- for (i = 0; i < MAX_DEV; i++)
- if (dev_table[i] == NULL)
- break;
- if (i == MAX_DEV) {
- dev_notice(&link->dev, "no devices available\n");
- return -ENODEV;
- }
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- local->table_index = i;
- dev_table[i] = local;
- local->link = link;
- link->priv = local;
-
- daqp_cs_config(link);
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 1;
+ s->maxdata = 1;
+ s->insn_bits = daqp_do_insn_bits;
return 0;
-} /* daqp_cs_attach */
-
-static void daqp_cs_detach(struct pcmcia_device *link)
-{
- struct local_info_t *dev = link->priv;
-
- dev->stop = 1;
- daqp_cs_release(link);
-
- /* Unlink device structure, and free it */
- dev_table[dev->table_index] = NULL;
- kfree(dev);
-
}
-static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-static void daqp_cs_config(struct pcmcia_device *link)
-{
- int ret;
-
- dev_dbg(&link->dev, "daqp_cs_config\n");
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, daqp_pcmcia_config_loop, NULL);
- if (ret) {
- dev_warn(&link->dev, "no configuration found\n");
- goto failed;
- }
-
- ret = pcmcia_request_irq(link, daqp_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- return;
-
-failed:
- daqp_cs_release(link);
-
-} /* daqp_cs_config */
-
-static void daqp_cs_release(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "daqp_cs_release\n");
-
- pcmcia_disable_device(link);
-} /* daqp_cs_release */
+static struct comedi_driver driver_daqp = {
+ .driver_name = "quatech_daqp_cs",
+ .module = THIS_MODULE,
+ .auto_attach = daqp_auto_attach,
+ .detach = comedi_pcmcia_disable,
+};
static int daqp_cs_suspend(struct pcmcia_device *link)
{
- struct local_info_t *local = link->priv;
+ struct comedi_device *dev = link->priv;
+ struct daqp_private *devpriv = dev ? dev->private : NULL;
/* Mark the device as stopped, to block IO until later */
- local->stop = 1;
+ if (devpriv)
+ devpriv->stop = 1;
+
return 0;
}
static int daqp_cs_resume(struct pcmcia_device *link)
{
- struct local_info_t *local = link->priv;
+ struct comedi_device *dev = link->priv;
+ struct daqp_private *devpriv = dev ? dev->private : NULL;
- local->stop = 0;
+ if (devpriv)
+ devpriv->stop = 0;
return 0;
}
-/*====================================================================*/
-
-#ifdef MODULE
+static int daqp_cs_attach(struct pcmcia_device *link)
+{
+ return comedi_pcmcia_auto_config(link, &driver_daqp);
+}
static const struct pcmcia_device_id daqp_cs_id_table[] = {
PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0027),
PCMCIA_DEVICE_NULL
};
-
MODULE_DEVICE_TABLE(pcmcia, daqp_cs_id_table);
-MODULE_AUTHOR("Brent Baccala <baccala@freesoft.org>");
-MODULE_DESCRIPTION("Comedi driver for Quatech DAQP PCMCIA data capture cards");
-MODULE_LICENSE("GPL");
static struct pcmcia_driver daqp_cs_driver = {
- .probe = daqp_cs_attach,
- .remove = daqp_cs_detach,
- .suspend = daqp_cs_suspend,
- .resume = daqp_cs_resume,
- .id_table = daqp_cs_id_table,
- .owner = THIS_MODULE,
- .name = "quatech_daqp_cs",
+ .name = "quatech_daqp_cs",
+ .owner = THIS_MODULE,
+ .id_table = daqp_cs_id_table,
+ .probe = daqp_cs_attach,
+ .remove = comedi_pcmcia_auto_unconfig,
+ .suspend = daqp_cs_suspend,
+ .resume = daqp_cs_resume,
};
+module_comedi_pcmcia_driver(driver_daqp, daqp_cs_driver);
-int __init init_module(void)
-{
- pcmcia_register_driver(&daqp_cs_driver);
- comedi_driver_register(&driver_daqp);
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- comedi_driver_unregister(&driver_daqp);
- pcmcia_unregister_driver(&daqp_cs_driver);
-}
-
-#endif
+MODULE_DESCRIPTION("Comedi driver for Quatech DAQP PCMCIA data capture cards");
+MODULE_AUTHOR("Brent Baccala <baccala@freesoft.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 8d7c948a919c..6a5c914fa501 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -101,8 +101,9 @@ Configuration options:
*/
-#include <linux/interrupt.h>
+#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -1420,11 +1421,6 @@ static int rtd520_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &rtd520_driver);
}
-static void rtd520_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static DEFINE_PCI_DEVICE_TABLE(rtd520_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RTD, 0x7520) },
{ PCI_DEVICE(PCI_VENDOR_ID_RTD, 0x4520) },
@@ -1436,7 +1432,7 @@ static struct pci_driver rtd520_pci_driver = {
.name = "rtd520",
.id_table = rtd520_pci_table,
.probe = rtd520_pci_probe,
- .remove = rtd520_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(rtd520_driver, rtd520_pci_driver);
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 6dc1d2812865..81a1fe661579 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -64,6 +64,7 @@ INSN_CONFIG instructions:
comedi_do_insn(cf,&insn); //executing configuration
*/
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -2836,11 +2837,6 @@ static int s626_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &s626_driver);
}
-static void s626_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
/*
* For devices with vendor:device id == 0x1131:0x7146 you must specify
* also subvendor:subdevice ids, because otherwise it will conflict with
@@ -2857,7 +2853,7 @@ static struct pci_driver s626_pci_driver = {
.name = "s626",
.id_table = s626_pci_table,
.probe = s626_pci_probe,
- .remove = s626_pci_remove,
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(s626_driver, s626_pci_driver);
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index e2d79700a615..cb83f6ae48b9 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -72,9 +72,9 @@ Configuration Options:
* options that are used with comedi_config.
*/
-#include "../comedidev.h"
+#include <linux/pci.h>
-#include <linux/pci.h> /* for PCI devices */
+#include "../comedidev.h"
#include "comedi_fc.h"
@@ -707,15 +707,11 @@ static int skel_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &skel_driver);
}
-static void skel_pci_remove(struct pci_dev *dev)
-{
- comedi_pci_auto_unconfig(dev);
-}
-
static struct pci_driver skel_pci_driver = {
+ .name = "dummy",
.id_table = skel_pci_table,
.probe = &skel_pci_probe,
- .remove = &skel_pci_remove
+ .remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(skel_driver, skel_pci_driver);
#else
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index c9ded938314f..74b974bf1032 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -380,12 +380,8 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
}
usp = kzalloc(sizeof(*usp), GFP_KERNEL);
-
- if (usp == NULL) {
- dev_err(subdev->class_dev,
- "comedi%d: error! --> out of memory!\n", minor);
+ if (usp == NULL)
return -1;
- }
usp->usp_iobase = subdev_iobase;
dev_info(subdev->class_dev, "comedi%d: |", minor);
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 17b45ebb0553..1a0062a04456 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -2388,7 +2388,7 @@ static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
"Could not upload firmware (err=%d)\n", ret);
goto out;
}
- comedi_usb_auto_config(uinterf, &usbdux_driver);
+ comedi_usb_auto_config(uinterf, &usbdux_driver, 0);
out:
release_firmware(fw);
}
@@ -2445,8 +2445,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
/* create space for the commands of the DA converter */
usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
if (!usbduxsub[index].dac_commands) {
- dev_err(dev, "comedi_: usbdux: "
- "error alloc space for dac commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2454,8 +2452,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
/* create space for the commands going to the usb device */
usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
if (!usbduxsub[index].dux_commands) {
- dev_err(dev, "comedi_: usbdux: "
- "error alloc space for dux commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2463,8 +2459,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
/* create space for the in buffer and set it to zero */
usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].inBuffer)) {
- dev_err(dev, "comedi_: usbdux: "
- "could not alloc space for inBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2472,8 +2466,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
/* create space of the instruction buffer */
usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
if (!(usbduxsub[index].insnBuffer)) {
- dev_err(dev, "comedi_: usbdux: "
- "could not alloc space for insnBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2481,8 +2473,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
/* create space for the outbuffer */
usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].outBuffer)) {
- dev_err(dev, "comedi_: usbdux: "
- "could not alloc space for outBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2504,10 +2494,9 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL;
usbduxsub[index].urbIn =
- kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfInBuffers,
- GFP_KERNEL);
+ kcalloc(usbduxsub[index].numOfInBuffers, sizeof(struct urb *),
+ GFP_KERNEL);
if (!(usbduxsub[index].urbIn)) {
- dev_err(dev, "comedi_: usbdux: Could not alloc. urbIn array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2532,8 +2521,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbIn[i]->transfer_buffer =
kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) {
- dev_err(dev, "comedi_: usbdux%d: "
- "could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2552,11 +2539,9 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL;
usbduxsub[index].urbOut =
- kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfOutBuffers,
- GFP_KERNEL);
+ kcalloc(usbduxsub[index].numOfOutBuffers, sizeof(struct urb *),
+ GFP_KERNEL);
if (!(usbduxsub[index].urbOut)) {
- dev_err(dev, "comedi_: usbdux: "
- "Could not alloc. urbOut array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2581,8 +2566,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbOut[i]->transfer_buffer =
kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) {
- dev_err(dev, "comedi_: usbdux%d: "
- "could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2617,8 +2600,6 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbPwm->transfer_buffer =
kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL);
if (!(usbduxsub[index].urbPwm->transfer_buffer)) {
- dev_err(dev, "comedi_: usbdux%d: "
- "could not alloc. transb. for pwm\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 4e19f6186f28..4bf5dd094dc9 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1490,7 +1490,7 @@ static void usbduxfast_firmware_request_complete_handler(const struct firmware
goto out;
}
- comedi_usb_auto_config(uinterf, &usbduxfast_driver);
+ comedi_usb_auto_config(uinterf, &usbduxfast_driver, 0);
out:
release_firmware(fw);
}
@@ -1556,8 +1556,6 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf,
usbduxfastsub[index].dux_commands = kmalloc(SIZEOFDUXBUFFER,
GFP_KERNEL);
if (!usbduxfastsub[index].dux_commands) {
- dev_err(&uinterf->dev,
- "error alloc space for dac commands\n");
tidy_up(&(usbduxfastsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -1565,8 +1563,6 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf,
/* create space of the instruction buffer */
usbduxfastsub[index].insnBuffer = kmalloc(SIZEINSNBUF, GFP_KERNEL);
if (!usbduxfastsub[index].insnBuffer) {
- dev_err(&uinterf->dev,
- "could not alloc space for insnBuffer\n");
tidy_up(&(usbduxfastsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -1592,8 +1588,6 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf,
}
usbduxfastsub[index].transfer_buffer = kmalloc(SIZEINBUF, GFP_KERNEL);
if (!usbduxfastsub[index].transfer_buffer) {
- dev_err(&uinterf->dev,
- "usbduxfast%d: could not alloc. transb.\n", index);
tidy_up(&(usbduxfastsub[index]));
up(&start_stop_sem);
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index cdd279b1f61e..d066351a71b2 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -2374,7 +2374,7 @@ static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
"Could not upload firmware (err=%d)\n", ret);
goto out;
}
- comedi_usb_auto_config(uinterf, &usbduxsigma_driver);
+ comedi_usb_auto_config(uinterf, &usbduxsigma_driver, 0);
out:
release_firmware(fw);
}
@@ -2431,8 +2431,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
/* create space for the commands of the DA converter */
usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
if (!usbduxsub[index].dac_commands) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "error alloc space for dac commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2440,8 +2438,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
/* create space for the commands going to the usb device */
usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
if (!usbduxsub[index].dux_commands) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "error alloc space for dux commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2449,8 +2445,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
/* create space for the in buffer and set it to zero */
usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].inBuffer)) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "could not alloc space for inBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2458,8 +2452,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
/* create space of the instruction buffer */
usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
if (!(usbduxsub[index].insnBuffer)) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "could not alloc space for insnBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2467,8 +2459,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
/* create space for the outbuffer */
usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].outBuffer)) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "could not alloc space for outBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2489,12 +2479,10 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
else
usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL;
- usbduxsub[index].urbIn =
- kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfInBuffers,
- GFP_KERNEL);
+ usbduxsub[index].urbIn = kcalloc(usbduxsub[index].numOfInBuffers,
+ sizeof(struct urb *),
+ GFP_KERNEL);
if (!(usbduxsub[index].urbIn)) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "Could not alloc. urbIn array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2519,8 +2507,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbIn[i]->transfer_buffer =
kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2539,12 +2525,9 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
else
usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL;
- usbduxsub[index].urbOut =
- kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfOutBuffers,
- GFP_KERNEL);
+ usbduxsub[index].urbOut = kcalloc(usbduxsub[index].numOfOutBuffers,
+ sizeof(struct urb *), GFP_KERNEL);
if (!(usbduxsub[index].urbOut)) {
- dev_err(dev, "comedi_: usbduxsigma: "
- "Could not alloc. urbOut array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2569,8 +2552,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbOut[i]->transfer_buffer =
kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
@@ -2606,8 +2587,6 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
usbduxsub[index].urbPwm->transfer_buffer =
kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL);
if (!(usbduxsub[index].urbPwm->transfer_buffer)) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "could not alloc. transb. for pwm\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 609dc6915997..2be5087414f6 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -38,19 +38,6 @@ Supports:
- counter
- pwm
*/
-/*
-Changelog:
-
-0.8.81 -3- code completely rewritten (adjust driver logic)
-0.8.81 -2- full support for K8061
-0.8.81 -1- fix some mistaken among others the number of
- supported boards and I/O handling
-
-0.7.76 -4- renamed to vmk80xx
-0.7.76 -3- detect K8061 (only theoretically supported)
-0.7.76 -2- code completely rewritten (adjust driver logic)
-0.7.76 -1- support for digital and counter subdevice
-*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -113,30 +100,9 @@ enum {
#define VMK8061_CMD_RD_AO 0x0f
#define VMK8061_CMD_RD_PWM 0x10
-#define VMK80XX_MAX_BOARDS COMEDI_NUM_BOARD_MINORS
-
-#define TRANS_OUT_BUSY 1
-#define TRANS_IN_BUSY 2
-#define TRANS_IN_RUNNING 3
-
#define IC3_VERSION (1 << 0)
#define IC6_VERSION (1 << 1)
-#define URB_RCV_FLAG (1 << 0)
-#define URB_SND_FLAG (1 << 1)
-
-#ifdef CONFIG_COMEDI_DEBUG
-static int dbgcm = 1;
-#else
-static int dbgcm;
-#endif
-
-#define dbgcm(fmt, arg...) \
-do { \
- if (dbgcm) \
- printk(KERN_DEBUG fmt, ##arg); \
-} while (0)
-
enum vmk80xx_model {
VMK8055_MODEL,
VMK8061_MODEL
@@ -147,130 +113,73 @@ struct firmware_version {
unsigned char ic6_vers[32]; /* CPU */
};
-static const struct comedi_lrange vmk8055_range = {
- 1, {UNI_RANGE(5)}
-};
-
static const struct comedi_lrange vmk8061_range = {
- 2, {UNI_RANGE(5), UNI_RANGE(10)}
+ 2, {
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
struct vmk80xx_board {
const char *name;
enum vmk80xx_model model;
const struct comedi_lrange *range;
- __u8 ai_chans;
- __le16 ai_bits;
- __u8 ao_chans;
- __le16 ao_bits;
- __u8 di_chans;
- __le16 di_bits;
- __u8 do_chans;
- __le16 do_bits;
- __u8 cnt_chans;
- __le16 cnt_bits;
- __u8 pwm_chans;
- __le16 pwm_bits;
+ int ai_nchans;
+ unsigned int ai_maxdata;
+ int ao_nchans;
+ int di_nchans;
+ unsigned int cnt_maxdata;
+ int pwm_nchans;
+ unsigned int pwm_maxdata;
};
-enum {
- VMK80XX_SUBD_AI,
- VMK80XX_SUBD_AO,
- VMK80XX_SUBD_DI,
- VMK80XX_SUBD_DO,
- VMK80XX_SUBD_CNT,
- VMK80XX_SUBD_PWM,
+static const struct vmk80xx_board vmk80xx_boardinfo[] = {
+ [DEVICE_VMK8055] = {
+ .name = "K8055 (VM110)",
+ .model = VMK8055_MODEL,
+ .range = &range_unipolar5,
+ .ai_nchans = 2,
+ .ai_maxdata = 0x00ff,
+ .ao_nchans = 2,
+ .di_nchans = 6,
+ .cnt_maxdata = 0xffff,
+ },
+ [DEVICE_VMK8061] = {
+ .name = "K8061 (VM140)",
+ .model = VMK8061_MODEL,
+ .range = &vmk8061_range,
+ .ai_nchans = 8,
+ .ai_maxdata = 0x03ff,
+ .ao_nchans = 8,
+ .di_nchans = 8,
+ .cnt_maxdata = 0, /* unknown, device is not writeable */
+ .pwm_nchans = 1,
+ .pwm_maxdata = 0x03ff,
+ },
};
-struct vmk80xx_usb {
- struct usb_device *udev;
+struct vmk80xx_private {
+ struct usb_device *usb;
struct usb_interface *intf;
struct usb_endpoint_descriptor *ep_rx;
struct usb_endpoint_descriptor *ep_tx;
- struct usb_anchor rx_anchor;
- struct usb_anchor tx_anchor;
- struct vmk80xx_board board;
struct firmware_version fw;
struct semaphore limit_sem;
- wait_queue_head_t read_wait;
- wait_queue_head_t write_wait;
unsigned char *usb_rx_buf;
unsigned char *usb_tx_buf;
- unsigned long flags;
- int probed;
- int attached;
- int count;
+ enum vmk80xx_model model;
};
-static struct vmk80xx_usb vmb[VMK80XX_MAX_BOARDS];
-
-static DEFINE_MUTEX(glb_mutex);
-
-static void vmk80xx_tx_callback(struct urb *urb)
-{
- struct vmk80xx_usb *dev = urb->context;
- int stat = urb->status;
-
- if (stat && !(stat == -ENOENT
- || stat == -ECONNRESET || stat == -ESHUTDOWN))
- dbgcm("comedi#: vmk80xx: %s - nonzero urb status (%d)\n",
- __func__, stat);
-
- if (!test_bit(TRANS_OUT_BUSY, &dev->flags))
- return;
-
- clear_bit(TRANS_OUT_BUSY, &dev->flags);
-
- wake_up_interruptible(&dev->write_wait);
-}
-
-static void vmk80xx_rx_callback(struct urb *urb)
-{
- struct vmk80xx_usb *dev = urb->context;
- int stat = urb->status;
-
- switch (stat) {
- case 0:
- break;
- case -ENOENT:
- case -ECONNRESET:
- case -ESHUTDOWN:
- break;
- default:
- dbgcm("comedi#: vmk80xx: %s - nonzero urb status (%d)\n",
- __func__, stat);
- goto resubmit;
- }
-
- goto exit;
-resubmit:
- if (test_bit(TRANS_IN_RUNNING, &dev->flags) && dev->intf) {
- usb_anchor_urb(urb, &dev->rx_anchor);
-
- if (!usb_submit_urb(urb, GFP_KERNEL))
- goto exit;
-
- dev_err(&urb->dev->dev,
- "comedi#: vmk80xx: %s - submit urb failed\n",
- __func__);
-
- usb_unanchor_urb(urb);
- }
-exit:
- clear_bit(TRANS_IN_BUSY, &dev->flags);
-
- wake_up_interruptible(&dev->read_wait);
-}
-
-static int vmk80xx_check_data_link(struct vmk80xx_usb *dev)
+static int vmk80xx_check_data_link(struct vmk80xx_private *devpriv)
{
+ struct usb_device *usb = devpriv->usb;
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
unsigned char rx[2];
- tx_pipe = usb_sndbulkpipe(dev->udev, 0x01);
- rx_pipe = usb_rcvbulkpipe(dev->udev, 0x81);
+ tx_pipe = usb_sndbulkpipe(usb, 0x01);
+ rx_pipe = usb_rcvbulkpipe(usb, 0x81);
tx[0] = VMK8061_CMD_RD_PWR_STAT;
@@ -279,22 +188,23 @@ static int vmk80xx_check_data_link(struct vmk80xx_usb *dev)
* running and the data link between IC3 and
* IC6 is working properly
*/
- usb_bulk_msg(dev->udev, tx_pipe, tx, 1, NULL, dev->ep_tx->bInterval);
- usb_bulk_msg(dev->udev, rx_pipe, rx, 2, NULL, HZ * 10);
+ usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
+ usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10);
return (int)rx[1];
}
-static void vmk80xx_read_eeprom(struct vmk80xx_usb *dev, int flag)
+static void vmk80xx_read_eeprom(struct vmk80xx_private *devpriv, int flag)
{
+ struct usb_device *usb = devpriv->usb;
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
unsigned char rx[64];
int cnt;
- tx_pipe = usb_sndbulkpipe(dev->udev, 0x01);
- rx_pipe = usb_rcvbulkpipe(dev->udev, 0x81);
+ tx_pipe = usb_sndbulkpipe(usb, 0x01);
+ rx_pipe = usb_rcvbulkpipe(usb, 0x81);
tx[0] = VMK8061_CMD_RD_VERSION;
@@ -302,243 +212,116 @@ static void vmk80xx_read_eeprom(struct vmk80xx_usb *dev, int flag)
* Read the firmware version info of IC3 and
* IC6 from the internal EEPROM of the IC
*/
- usb_bulk_msg(dev->udev, tx_pipe, tx, 1, NULL, dev->ep_tx->bInterval);
- usb_bulk_msg(dev->udev, rx_pipe, rx, 64, &cnt, HZ * 10);
+ usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
+ usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10);
rx[cnt] = '\0';
if (flag & IC3_VERSION)
- strncpy(dev->fw.ic3_vers, rx + 1, 24);
+ strncpy(devpriv->fw.ic3_vers, rx + 1, 24);
else /* IC6_VERSION */
- strncpy(dev->fw.ic6_vers, rx + 25, 24);
+ strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
}
-static int vmk80xx_reset_device(struct vmk80xx_usb *dev)
-{
- struct urb *urb;
- unsigned int tx_pipe;
- int ival;
- size_t size;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
-
- tx_pipe = usb_sndintpipe(dev->udev, 0x01);
-
- ival = dev->ep_tx->bInterval;
- size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
-
- dev->usb_tx_buf[0] = VMK8055_CMD_RST;
- dev->usb_tx_buf[1] = 0x00;
- dev->usb_tx_buf[2] = 0x00;
- dev->usb_tx_buf[3] = 0x00;
- dev->usb_tx_buf[4] = 0x00;
- dev->usb_tx_buf[5] = 0x00;
- dev->usb_tx_buf[6] = 0x00;
- dev->usb_tx_buf[7] = 0x00;
-
- usb_fill_int_urb(urb, dev->udev, tx_pipe, dev->usb_tx_buf,
- size, vmk80xx_tx_callback, dev, ival);
-
- usb_anchor_urb(urb, &dev->tx_anchor);
-
- return usb_submit_urb(urb, GFP_KERNEL);
-}
-
-static void vmk80xx_build_int_urb(struct urb *urb, int flag)
-{
- struct vmk80xx_usb *dev = urb->context;
- __u8 rx_addr;
- __u8 tx_addr;
- unsigned int pipe;
- unsigned char *buf;
- size_t size;
- void (*callback) (struct urb *);
- int ival;
-
- if (flag & URB_RCV_FLAG) {
- rx_addr = dev->ep_rx->bEndpointAddress;
- pipe = usb_rcvintpipe(dev->udev, rx_addr);
- buf = dev->usb_rx_buf;
- size = le16_to_cpu(dev->ep_rx->wMaxPacketSize);
- callback = vmk80xx_rx_callback;
- ival = dev->ep_rx->bInterval;
- } else { /* URB_SND_FLAG */
- tx_addr = dev->ep_tx->bEndpointAddress;
- pipe = usb_sndintpipe(dev->udev, tx_addr);
- buf = dev->usb_tx_buf;
- size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
- callback = vmk80xx_tx_callback;
- ival = dev->ep_tx->bInterval;
- }
-
- usb_fill_int_urb(urb, dev->udev, pipe, buf, size, callback, dev, ival);
-}
-
-static void vmk80xx_do_bulk_msg(struct vmk80xx_usb *dev)
+static void vmk80xx_do_bulk_msg(struct vmk80xx_private *devpriv)
{
+ struct usb_device *usb = devpriv->usb;
__u8 tx_addr;
__u8 rx_addr;
unsigned int tx_pipe;
unsigned int rx_pipe;
size_t size;
- set_bit(TRANS_IN_BUSY, &dev->flags);
- set_bit(TRANS_OUT_BUSY, &dev->flags);
-
- tx_addr = dev->ep_tx->bEndpointAddress;
- rx_addr = dev->ep_rx->bEndpointAddress;
- tx_pipe = usb_sndbulkpipe(dev->udev, tx_addr);
- rx_pipe = usb_rcvbulkpipe(dev->udev, rx_addr);
+ tx_addr = devpriv->ep_tx->bEndpointAddress;
+ rx_addr = devpriv->ep_rx->bEndpointAddress;
+ tx_pipe = usb_sndbulkpipe(usb, tx_addr);
+ rx_pipe = usb_rcvbulkpipe(usb, rx_addr);
/*
* The max packet size attributes of the K8061
* input/output endpoints are identical
*/
- size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
-
- usb_bulk_msg(dev->udev, tx_pipe, dev->usb_tx_buf,
- size, NULL, dev->ep_tx->bInterval);
- usb_bulk_msg(dev->udev, rx_pipe, dev->usb_rx_buf, size, NULL, HZ * 10);
+ size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
- clear_bit(TRANS_OUT_BUSY, &dev->flags);
- clear_bit(TRANS_IN_BUSY, &dev->flags);
+ usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf,
+ size, NULL, devpriv->ep_tx->bInterval);
+ usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10);
}
-static int vmk80xx_read_packet(struct vmk80xx_usb *dev)
+static int vmk80xx_read_packet(struct vmk80xx_private *devpriv)
{
- struct urb *urb;
- int retval;
+ struct usb_device *usb;
+ struct usb_endpoint_descriptor *ep;
+ unsigned int pipe;
- if (!dev->intf)
+ if (!devpriv->intf)
return -ENODEV;
- /* Only useful for interrupt transfers */
- if (test_bit(TRANS_IN_BUSY, &dev->flags))
- if (wait_event_interruptible(dev->read_wait,
- !test_bit(TRANS_IN_BUSY,
- &dev->flags)))
- return -ERESTART;
-
- if (dev->board.model == VMK8061_MODEL) {
- vmk80xx_do_bulk_msg(dev);
-
+ if (devpriv->model == VMK8061_MODEL) {
+ vmk80xx_do_bulk_msg(devpriv);
return 0;
}
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
-
- urb->context = dev;
- vmk80xx_build_int_urb(urb, URB_RCV_FLAG);
-
- set_bit(TRANS_IN_RUNNING, &dev->flags);
- set_bit(TRANS_IN_BUSY, &dev->flags);
-
- usb_anchor_urb(urb, &dev->rx_anchor);
-
- retval = usb_submit_urb(urb, GFP_KERNEL);
- if (!retval)
- goto exit;
-
- clear_bit(TRANS_IN_RUNNING, &dev->flags);
- usb_unanchor_urb(urb);
-
-exit:
- usb_free_urb(urb);
-
- return retval;
+ usb = devpriv->usb;
+ ep = devpriv->ep_rx;
+ pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
+ return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
+ le16_to_cpu(ep->wMaxPacketSize), NULL,
+ HZ * 10);
}
-static int vmk80xx_write_packet(struct vmk80xx_usb *dev, int cmd)
+static int vmk80xx_write_packet(struct vmk80xx_private *devpriv, int cmd)
{
- struct urb *urb;
- int retval;
+ struct usb_device *usb;
+ struct usb_endpoint_descriptor *ep;
+ unsigned int pipe;
- if (!dev->intf)
+ if (!devpriv->intf)
return -ENODEV;
- if (test_bit(TRANS_OUT_BUSY, &dev->flags))
- if (wait_event_interruptible(dev->write_wait,
- !test_bit(TRANS_OUT_BUSY,
- &dev->flags)))
- return -ERESTART;
-
- if (dev->board.model == VMK8061_MODEL) {
- dev->usb_tx_buf[0] = cmd;
- vmk80xx_do_bulk_msg(dev);
+ devpriv->usb_tx_buf[0] = cmd;
+ if (devpriv->model == VMK8061_MODEL) {
+ vmk80xx_do_bulk_msg(devpriv);
return 0;
}
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
-
- urb->context = dev;
- vmk80xx_build_int_urb(urb, URB_SND_FLAG);
-
- set_bit(TRANS_OUT_BUSY, &dev->flags);
-
- usb_anchor_urb(urb, &dev->tx_anchor);
-
- dev->usb_tx_buf[0] = cmd;
-
- retval = usb_submit_urb(urb, GFP_KERNEL);
- if (!retval)
- goto exit;
-
- clear_bit(TRANS_OUT_BUSY, &dev->flags);
- usb_unanchor_urb(urb);
-
-exit:
- usb_free_urb(urb);
-
- return retval;
+ usb = devpriv->usb;
+ ep = devpriv->ep_tx;
+ pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
+ return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
+ le16_to_cpu(ep->wMaxPacketSize), NULL,
+ HZ * 10);
}
-#define DIR_IN 1
-#define DIR_OUT 2
-
-static int rudimentary_check(struct vmk80xx_usb *dev, int dir)
+static int vmk80xx_reset_device(struct vmk80xx_private *devpriv)
{
- if (!dev)
- return -EFAULT;
- if (!dev->probed)
- return -ENODEV;
- if (!dev->attached)
- return -ENODEV;
- if (dir & DIR_IN) {
- if (test_bit(TRANS_IN_BUSY, &dev->flags))
- return -EBUSY;
- }
- if (dir & DIR_OUT) {
- if (test_bit(TRANS_OUT_BUSY, &dev->flags))
- return -EBUSY;
- }
+ size_t size;
+ int retval;
- return 0;
+ size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ memset(devpriv->usb_tx_buf, 0, size);
+ retval = vmk80xx_write_packet(devpriv, VMK8055_CMD_RST);
+ if (retval)
+ return retval;
+ /* set outputs to known state as we cannot read them */
+ return vmk80xx_write_packet(devpriv, VMK8055_CMD_WRT_AD);
}
-static int vmk80xx_ai_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg[2];
int n;
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
- switch (dev->board.model) {
+ switch (devpriv->model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_AI1_REG;
@@ -549,48 +332,45 @@ static int vmk80xx_ai_rinsn(struct comedi_device *cdev,
default:
reg[0] = VMK8061_AI_REG1;
reg[1] = VMK8061_AI_REG2;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_AI;
- dev->usb_tx_buf[VMK8061_CH_REG] = chan;
+ devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AI;
+ devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
+ if (vmk80xx_read_packet(devpriv))
break;
- if (dev->board.model == VMK8055_MODEL) {
- data[n] = dev->usb_rx_buf[reg[0]];
+ if (devpriv->model == VMK8055_MODEL) {
+ data[n] = devpriv->usb_rx_buf[reg[0]];
continue;
}
/* VMK8061_MODEL */
- data[n] = dev->usb_rx_buf[reg[0]] + 256 *
- dev->usb_rx_buf[reg[1]];
+ data[n] = devpriv->usb_rx_buf[reg[0]] + 256 *
+ devpriv->usb_rx_buf[reg[1]];
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_ao_winsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
int chan;
int cmd;
int reg;
int n;
- n = rudimentary_check(dev, DIR_OUT);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
- switch (dev->board.model) {
+ switch (devpriv->model) {
case VMK8055_MODEL:
cmd = VMK8055_CMD_WRT_AD;
if (!chan)
@@ -601,82 +381,76 @@ static int vmk80xx_ao_winsn(struct comedi_device *cdev,
default: /* NOTE: avoid compiler warnings */
cmd = VMK8061_CMD_SET_AO;
reg = VMK8061_AO_REG;
- dev->usb_tx_buf[VMK8061_CH_REG] = chan;
+ devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
- dev->usb_tx_buf[reg] = data[n];
+ devpriv->usb_tx_buf[reg] = data[n];
- if (vmk80xx_write_packet(dev, cmd))
+ if (vmk80xx_write_packet(devpriv, cmd))
break;
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_ao_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg;
int n;
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
reg = VMK8061_AO_REG - 1;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
+ devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
+ if (vmk80xx_read_packet(devpriv))
break;
- data[n] = dev->usb_rx_buf[reg + chan];
+ data[n] = devpriv->usb_rx_buf[reg + chan];
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_di_bits(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
unsigned char *rx_buf;
int reg;
int retval;
- retval = rudimentary_check(dev, DIR_IN);
- if (retval)
- return retval;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
- rx_buf = dev->usb_rx_buf;
+ rx_buf = devpriv->usb_rx_buf;
- if (dev->board.model == VMK8061_MODEL) {
+ if (devpriv->model == VMK8061_MODEL) {
reg = VMK8061_DI_REG;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
+ devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
} else {
reg = VMK8055_DI_REG;
}
- retval = vmk80xx_read_packet(dev);
+ retval = vmk80xx_read_packet(devpriv);
if (!retval) {
- if (dev->board.model == VMK8055_MODEL)
+ if (devpriv->model == VMK8055_MODEL)
data[1] = (((rx_buf[reg] >> 4) & 0x03) |
((rx_buf[reg] << 2) & 0x04) |
((rx_buf[reg] >> 3) & 0x18));
@@ -686,185 +460,48 @@ static int vmk80xx_di_bits(struct comedi_device *cdev,
retval = 2;
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return retval;
}
-static int vmk80xx_di_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
- int chan;
- unsigned char *rx_buf;
- int reg;
- int inp;
- int n;
-
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
-
- down(&dev->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- rx_buf = dev->usb_rx_buf;
-
- if (dev->board.model == VMK8061_MODEL) {
- reg = VMK8061_DI_REG;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
- } else {
- reg = VMK8055_DI_REG;
- }
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- if (dev->board.model == VMK8055_MODEL)
- inp = (((rx_buf[reg] >> 4) & 0x03) |
- ((rx_buf[reg] << 2) & 0x04) |
- ((rx_buf[reg] >> 3) & 0x18));
- else
- inp = rx_buf[reg];
-
- data[n] = (inp >> chan) & 1;
- }
-
- up(&dev->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_do_winsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct vmk80xx_usb *dev = cdev->private;
- int chan;
- unsigned char *tx_buf;
- int reg;
- int cmd;
- int n;
-
- n = rudimentary_check(dev, DIR_OUT);
- if (n)
- return n;
-
- down(&dev->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- tx_buf = dev->usb_tx_buf;
-
- for (n = 0; n < insn->n; n++) {
- if (dev->board.model == VMK8055_MODEL) {
- reg = VMK8055_DO_REG;
- cmd = VMK8055_CMD_WRT_AD;
- if (data[n] == 1)
- tx_buf[reg] |= (1 << chan);
- else
- tx_buf[reg] ^= (1 << chan);
- } else { /* VMK8061_MODEL */
- reg = VMK8061_DO_REG;
- if (data[n] == 1) {
- cmd = VMK8061_CMD_SET_DO;
- tx_buf[reg] = 1 << chan;
- } else {
- cmd = VMK8061_CMD_CLR_DO;
- tx_buf[reg] = 0xff - (1 << chan);
- }
- }
-
- if (vmk80xx_write_packet(dev, cmd))
- break;
- }
-
- up(&dev->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_do_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct vmk80xx_usb *dev = cdev->private;
- int chan;
- int reg;
- int n;
-
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
-
- down(&dev->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- reg = VMK8061_DO_REG;
-
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_DO;
-
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- data[n] = (dev->usb_rx_buf[reg] >> chan) & 1;
- }
-
- up(&dev->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_do_bits(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
unsigned char *rx_buf, *tx_buf;
- int dir, reg, cmd;
+ int reg, cmd;
int retval;
- dir = 0;
-
- if (data[0])
- dir |= DIR_OUT;
-
- if (dev->board.model == VMK8061_MODEL)
- dir |= DIR_IN;
-
- retval = rudimentary_check(dev, dir);
- if (retval)
- return retval;
+ if (devpriv->model == VMK8061_MODEL) {
+ reg = VMK8061_DO_REG;
+ cmd = VMK8061_CMD_DO;
+ } else { /* VMK8055_MODEL */
+ reg = VMK8055_DO_REG;
+ cmd = VMK8055_CMD_WRT_AD;
+ }
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
- rx_buf = dev->usb_rx_buf;
- tx_buf = dev->usb_tx_buf;
+ rx_buf = devpriv->usb_rx_buf;
+ tx_buf = devpriv->usb_tx_buf;
if (data[0]) {
- if (dev->board.model == VMK8055_MODEL) {
- reg = VMK8055_DO_REG;
- cmd = VMK8055_CMD_WRT_AD;
- } else { /* VMK8061_MODEL */
- reg = VMK8061_DO_REG;
- cmd = VMK8061_CMD_DO;
- }
-
tx_buf[reg] &= ~data[0];
tx_buf[reg] |= (data[0] & data[1]);
- retval = vmk80xx_write_packet(dev, cmd);
+ retval = vmk80xx_write_packet(devpriv, cmd);
if (retval)
goto out;
}
- if (dev->board.model == VMK8061_MODEL) {
- reg = VMK8061_DO_REG;
+ if (devpriv->model == VMK8061_MODEL) {
tx_buf[0] = VMK8061_CMD_RD_DO;
- retval = vmk80xx_read_packet(dev);
+ retval = vmk80xx_read_packet(devpriv);
if (!retval) {
data[1] = rx_buf[reg];
@@ -876,28 +513,25 @@ static int vmk80xx_do_bits(struct comedi_device *cdev,
}
out:
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return retval;
}
-static int vmk80xx_cnt_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_cnt_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
int chan;
int reg[2];
int n;
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
- switch (dev->board.model) {
+ switch (devpriv->model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_CNT1_REG;
@@ -908,50 +542,47 @@ static int vmk80xx_cnt_rinsn(struct comedi_device *cdev,
default:
reg[0] = VMK8061_CNT_REG;
reg[1] = VMK8061_CNT_REG;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_CNT;
+ devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_CNT;
break;
}
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
+ if (vmk80xx_read_packet(devpriv))
break;
- if (dev->board.model == VMK8055_MODEL)
- data[n] = dev->usb_rx_buf[reg[0]];
+ if (devpriv->model == VMK8055_MODEL)
+ data[n] = devpriv->usb_rx_buf[reg[0]];
else /* VMK8061_MODEL */
- data[n] = dev->usb_rx_buf[reg[0] * (chan + 1) + 1]
- + 256 * dev->usb_rx_buf[reg[1] * 2 + 2];
+ data[n] = devpriv->usb_rx_buf[reg[0] * (chan + 1) + 1]
+ + 256 * devpriv->usb_rx_buf[reg[1] * 2 + 2];
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_cnt_cinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_cnt_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
unsigned int insn_cmd;
int chan;
int cmd;
int reg;
int n;
- n = rudimentary_check(dev, DIR_OUT);
- if (n)
- return n;
-
insn_cmd = data[0];
if (insn_cmd != INSN_CONFIG_RESET && insn_cmd != GPCT_RESET)
return -EINVAL;
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
- if (dev->board.model == VMK8055_MODEL) {
+ if (devpriv->model == VMK8055_MODEL) {
if (!chan) {
cmd = VMK8055_CMD_RST_CNT1;
reg = VMK8055_CNT1_REG;
@@ -960,36 +591,33 @@ static int vmk80xx_cnt_cinsn(struct comedi_device *cdev,
reg = VMK8055_CNT2_REG;
}
- dev->usb_tx_buf[reg] = 0x00;
+ devpriv->usb_tx_buf[reg] = 0x00;
} else {
cmd = VMK8061_CMD_RST_CNT;
}
for (n = 0; n < insn->n; n++)
- if (vmk80xx_write_packet(dev, cmd))
+ if (vmk80xx_write_packet(devpriv, cmd))
break;
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_cnt_winsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_cnt_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
unsigned long debtime;
unsigned long val;
int chan;
int cmd;
int n;
- n = rudimentary_check(dev, DIR_OUT);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
chan = CR_CHAN(insn->chanspec);
if (!chan)
@@ -1010,65 +638,64 @@ static int vmk80xx_cnt_winsn(struct comedi_device *cdev,
if (((val + 1) * val) < debtime * 1000 / 115)
val += 1;
- dev->usb_tx_buf[6 + chan] = val;
+ devpriv->usb_tx_buf[6 + chan] = val;
- if (vmk80xx_write_packet(dev, cmd))
+ if (vmk80xx_write_packet(devpriv, cmd))
break;
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_pwm_rinsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_pwm_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
int reg[2];
int n;
- n = rudimentary_check(dev, DIR_IN);
- if (n)
- return n;
+ down(&devpriv->limit_sem);
- down(&dev->limit_sem);
+ tx_buf = devpriv->usb_tx_buf;
+ rx_buf = devpriv->usb_rx_buf;
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
- dev->usb_tx_buf[0] = VMK8061_CMD_RD_PWM;
+ tx_buf[0] = VMK8061_CMD_RD_PWM;
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
+ if (vmk80xx_read_packet(devpriv))
break;
- data[n] = dev->usb_rx_buf[reg[0]] + 4 * dev->usb_rx_buf[reg[1]];
+ data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]];
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_pwm_winsn(struct comedi_device *cdev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int vmk80xx_pwm_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct vmk80xx_usb *dev = cdev->private;
+ struct vmk80xx_private *devpriv = dev->private;
unsigned char *tx_buf;
int reg[2];
int cmd;
int n;
- n = rudimentary_check(dev, DIR_OUT);
- if (n)
- return n;
-
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
- tx_buf = dev->usb_tx_buf;
+ tx_buf = devpriv->usb_tx_buf;
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
@@ -1092,341 +719,236 @@ static int vmk80xx_pwm_winsn(struct comedi_device *cdev,
tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03);
tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff;
- if (vmk80xx_write_packet(dev, cmd))
+ if (vmk80xx_write_packet(devpriv, cmd))
break;
}
- up(&dev->limit_sem);
+ up(&devpriv->limit_sem);
return n;
}
-static int vmk80xx_attach_common(struct comedi_device *cdev,
- struct vmk80xx_usb *dev)
+static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
{
- int n_subd;
- struct comedi_subdevice *s;
- int ret;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_interface *intf = devpriv->intf;
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i;
- down(&dev->limit_sem);
- cdev->board_name = dev->board.name;
- cdev->private = dev;
- if (dev->board.model == VMK8055_MODEL)
- n_subd = 5;
- else
- n_subd = 6;
- ret = comedi_alloc_subdevices(cdev, n_subd);
- if (ret) {
- up(&dev->limit_sem);
- return ret;
- }
- /* Analog input subdevice */
- s = &cdev->subdevices[VMK80XX_SUBD_AI];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = dev->board.ai_chans;
- s->maxdata = (1 << dev->board.ai_bits) - 1;
- s->range_table = dev->board.range;
- s->insn_read = vmk80xx_ai_rinsn;
- /* Analog output subdevice */
- s = &cdev->subdevices[VMK80XX_SUBD_AO];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
- s->n_chan = dev->board.ao_chans;
- s->maxdata = (1 << dev->board.ao_bits) - 1;
- s->range_table = dev->board.range;
- s->insn_write = vmk80xx_ao_winsn;
- if (dev->board.model == VMK8061_MODEL) {
- s->subdev_flags |= SDF_READABLE;
- s->insn_read = vmk80xx_ao_rinsn;
- }
- /* Digital input subdevice */
- s = &cdev->subdevices[VMK80XX_SUBD_DI];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = dev->board.di_chans;
- s->maxdata = 1;
- s->insn_read = vmk80xx_di_rinsn;
- s->insn_bits = vmk80xx_di_bits;
- /* Digital output subdevice */
- s = &cdev->subdevices[VMK80XX_SUBD_DO];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
- s->n_chan = dev->board.do_chans;
- s->maxdata = 1;
- s->insn_write = vmk80xx_do_winsn;
- s->insn_bits = vmk80xx_do_bits;
- if (dev->board.model == VMK8061_MODEL) {
- s->subdev_flags |= SDF_READABLE;
- s->insn_read = vmk80xx_do_rinsn;
- }
- /* Counter subdevice */
- s = &cdev->subdevices[VMK80XX_SUBD_CNT];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = dev->board.cnt_chans;
- s->insn_read = vmk80xx_cnt_rinsn;
- s->insn_config = vmk80xx_cnt_cinsn;
- if (dev->board.model == VMK8055_MODEL) {
- s->subdev_flags |= SDF_WRITEABLE;
- s->maxdata = (1 << dev->board.cnt_bits) - 1;
- s->insn_write = vmk80xx_cnt_winsn;
- }
- /* PWM subdevice */
- if (dev->board.model == VMK8061_MODEL) {
- s = &cdev->subdevices[VMK80XX_SUBD_PWM];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
- s->n_chan = dev->board.pwm_chans;
- s->maxdata = (1 << dev->board.pwm_bits) - 1;
- s->insn_read = vmk80xx_pwm_rinsn;
- s->insn_write = vmk80xx_pwm_winsn;
- }
- dev->attached = 1;
- dev_info(cdev->class_dev, "vmk80xx: board #%d [%s] attached\n",
- dev->count, dev->board.name);
- up(&dev->limit_sem);
- return 0;
-}
+ if (iface_desc->desc.bNumEndpoints != 2)
+ return -ENODEV;
-/* called for COMEDI_DEVCONFIG ioctl for board_name "vmk80xx" */
-static int vmk80xx_attach(struct comedi_device *cdev,
- struct comedi_devconfig *it)
-{
- int i;
- int ret;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &iface_desc->endpoint[i].desc;
- mutex_lock(&glb_mutex);
- for (i = 0; i < VMK80XX_MAX_BOARDS; i++)
- if (vmb[i].probed && !vmb[i].attached)
- break;
- if (i == VMK80XX_MAX_BOARDS)
- ret = -ENODEV;
- else
- ret = vmk80xx_attach_common(cdev, &vmb[i]);
- mutex_unlock(&glb_mutex);
- return ret;
-}
+ if (usb_endpoint_is_int_in(ep_desc) ||
+ usb_endpoint_is_bulk_in(ep_desc)) {
+ if (!devpriv->ep_rx)
+ devpriv->ep_rx = ep_desc;
+ continue;
+ }
-/* called via comedi_usb_auto_config() */
-static int vmk80xx_auto_attach(struct comedi_device *cdev,
- unsigned long context_unused)
-{
- struct usb_interface *intf = comedi_to_usb_interface(cdev);
- int i;
- int ret;
+ if (usb_endpoint_is_int_out(ep_desc) ||
+ usb_endpoint_is_bulk_out(ep_desc)) {
+ if (!devpriv->ep_tx)
+ devpriv->ep_tx = ep_desc;
+ continue;
+ }
+ }
- mutex_lock(&glb_mutex);
- for (i = 0; i < VMK80XX_MAX_BOARDS; i++)
- if (vmb[i].probed && vmb[i].intf == intf)
- break;
- if (i == VMK80XX_MAX_BOARDS)
- ret = -ENODEV;
- else if (vmb[i].attached)
- ret = -EBUSY;
- else
- ret = vmk80xx_attach_common(cdev, &vmb[i]);
- mutex_unlock(&glb_mutex);
- return ret;
+ if (!devpriv->ep_rx || !devpriv->ep_tx)
+ return -ENODEV;
+
+ return 0;
}
-static void vmk80xx_detach(struct comedi_device *dev)
+static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
{
- struct vmk80xx_usb *usb = dev->private;
+ struct vmk80xx_private *devpriv = dev->private;
+ size_t size;
+
+ size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize);
+ devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
+ if (!devpriv->usb_rx_buf)
+ return -ENOMEM;
- if (usb) {
- down(&usb->limit_sem);
- dev->private = NULL;
- usb->attached = 0;
- up(&usb->limit_sem);
+ size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
+ if (!devpriv->usb_tx_buf) {
+ kfree(devpriv->usb_rx_buf);
+ return -ENOMEM;
}
-}
-static struct comedi_driver vmk80xx_driver = {
- .module = THIS_MODULE,
- .driver_name = "vmk80xx",
- .attach = vmk80xx_attach,
- .detach = vmk80xx_detach,
- .auto_attach = vmk80xx_auto_attach,
-};
+ return 0;
+}
-static int vmk80xx_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int vmk80xx_init_subdevices(struct comedi_device *dev)
{
- int i;
- struct vmk80xx_usb *dev;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *ep_desc;
- size_t size;
-
- mutex_lock(&glb_mutex);
+ const struct vmk80xx_board *boardinfo = comedi_board(dev);
+ struct vmk80xx_private *devpriv = dev->private;
+ struct comedi_subdevice *s;
+ int n_subd;
+ int ret;
- for (i = 0; i < VMK80XX_MAX_BOARDS; i++)
- if (!vmb[i].probed)
- break;
+ down(&devpriv->limit_sem);
- if (i == VMK80XX_MAX_BOARDS) {
- mutex_unlock(&glb_mutex);
- return -EMFILE;
+ if (devpriv->model == VMK8055_MODEL)
+ n_subd = 5;
+ else
+ n_subd = 6;
+ ret = comedi_alloc_subdevices(dev, n_subd);
+ if (ret) {
+ up(&devpriv->limit_sem);
+ return ret;
}
- dev = &vmb[i];
+ /* Analog input subdevice */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = boardinfo->ai_nchans;
+ s->maxdata = boardinfo->ai_maxdata;
+ s->range_table = boardinfo->range;
+ s->insn_read = vmk80xx_ai_insn_read;
- memset(dev, 0x00, sizeof(struct vmk80xx_usb));
- dev->count = i;
+ /* Analog output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
+ s->n_chan = boardinfo->ao_nchans;
+ s->maxdata = 0x00ff;
+ s->range_table = boardinfo->range;
+ s->insn_write = vmk80xx_ao_insn_write;
+ if (devpriv->model == VMK8061_MODEL) {
+ s->subdev_flags |= SDF_READABLE;
+ s->insn_read = vmk80xx_ao_insn_read;
+ }
- iface_desc = intf->cur_altsetting;
- if (iface_desc->desc.bNumEndpoints != 2)
- goto error;
+ /* Digital input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = boardinfo->di_nchans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = vmk80xx_di_insn_bits;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
+ /* Digital output subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = vmk80xx_do_insn_bits;
- if (usb_endpoint_is_int_in(ep_desc)) {
- dev->ep_rx = ep_desc;
- continue;
- }
+ /* Counter subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 2;
+ s->maxdata = boardinfo->cnt_maxdata;
+ s->insn_read = vmk80xx_cnt_insn_read;
+ s->insn_config = vmk80xx_cnt_insn_config;
+ if (devpriv->model == VMK8055_MODEL) {
+ s->subdev_flags |= SDF_WRITEABLE;
+ s->insn_write = vmk80xx_cnt_insn_write;
+ }
- if (usb_endpoint_is_int_out(ep_desc)) {
- dev->ep_tx = ep_desc;
- continue;
- }
+ /* PWM subdevice */
+ if (devpriv->model == VMK8061_MODEL) {
+ s = &dev->subdevices[5];
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->n_chan = boardinfo->pwm_nchans;
+ s->maxdata = boardinfo->pwm_maxdata;
+ s->insn_read = vmk80xx_pwm_insn_read;
+ s->insn_write = vmk80xx_pwm_insn_write;
+ }
- if (usb_endpoint_is_bulk_in(ep_desc)) {
- dev->ep_rx = ep_desc;
- continue;
- }
+ up(&devpriv->limit_sem);
- if (usb_endpoint_is_bulk_out(ep_desc)) {
- dev->ep_tx = ep_desc;
- continue;
- }
- }
+ return 0;
+}
- if (!dev->ep_rx || !dev->ep_tx)
- goto error;
+static int vmk80xx_auto_attach(struct comedi_device *dev,
+ unsigned long context)
+{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ const struct vmk80xx_board *boardinfo;
+ struct vmk80xx_private *devpriv;
+ int ret;
- size = le16_to_cpu(dev->ep_rx->wMaxPacketSize);
- dev->usb_rx_buf = kmalloc(size, GFP_KERNEL);
- if (!dev->usb_rx_buf) {
- mutex_unlock(&glb_mutex);
- return -ENOMEM;
- }
+ boardinfo = &vmk80xx_boardinfo[context];
+ dev->board_ptr = boardinfo;
+ dev->board_name = boardinfo->name;
- size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
- dev->usb_tx_buf = kmalloc(size, GFP_KERNEL);
- if (!dev->usb_tx_buf) {
- kfree(dev->usb_rx_buf);
- mutex_unlock(&glb_mutex);
+ devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ if (!devpriv)
return -ENOMEM;
- }
-
- dev->udev = interface_to_usbdev(intf);
- dev->intf = intf;
-
- sema_init(&dev->limit_sem, 8);
- init_waitqueue_head(&dev->read_wait);
- init_waitqueue_head(&dev->write_wait);
-
- init_usb_anchor(&dev->rx_anchor);
- init_usb_anchor(&dev->tx_anchor);
-
- usb_set_intfdata(intf, dev);
-
- switch (id->driver_info) {
- case DEVICE_VMK8055:
- dev->board.name = "K8055 (VM110)";
- dev->board.model = VMK8055_MODEL;
- dev->board.range = &vmk8055_range;
- dev->board.ai_chans = 2;
- dev->board.ai_bits = 8;
- dev->board.ao_chans = 2;
- dev->board.ao_bits = 8;
- dev->board.di_chans = 5;
- dev->board.di_bits = 1;
- dev->board.do_chans = 8;
- dev->board.do_bits = 1;
- dev->board.cnt_chans = 2;
- dev->board.cnt_bits = 16;
- dev->board.pwm_chans = 0;
- dev->board.pwm_bits = 0;
- break;
- case DEVICE_VMK8061:
- dev->board.name = "K8061 (VM140)";
- dev->board.model = VMK8061_MODEL;
- dev->board.range = &vmk8061_range;
- dev->board.ai_chans = 8;
- dev->board.ai_bits = 10;
- dev->board.ao_chans = 8;
- dev->board.ao_bits = 8;
- dev->board.di_chans = 8;
- dev->board.di_bits = 1;
- dev->board.do_chans = 8;
- dev->board.do_bits = 1;
- dev->board.cnt_chans = 2;
- dev->board.cnt_bits = 0;
- dev->board.pwm_chans = 1;
- dev->board.pwm_bits = 10;
- break;
- }
+ dev->private = devpriv;
- if (dev->board.model == VMK8061_MODEL) {
- vmk80xx_read_eeprom(dev, IC3_VERSION);
- dev_info(&intf->dev, "%s\n", dev->fw.ic3_vers);
+ devpriv->usb = interface_to_usbdev(intf);
+ devpriv->intf = intf;
+ devpriv->model = boardinfo->model;
- if (vmk80xx_check_data_link(dev)) {
- vmk80xx_read_eeprom(dev, IC6_VERSION);
- dev_info(&intf->dev, "%s\n", dev->fw.ic6_vers);
- } else {
- dbgcm("comedi#: vmk80xx: no conn. to CPU\n");
- }
- }
+ ret = vmk80xx_find_usb_endpoints(dev);
+ if (ret)
+ return ret;
- if (dev->board.model == VMK8055_MODEL)
- vmk80xx_reset_device(dev);
+ ret = vmk80xx_alloc_usb_buffers(dev);
+ if (ret)
+ return ret;
- dev->probed = 1;
+ sema_init(&devpriv->limit_sem, 8);
- dev_info(&intf->dev, "board #%d [%s] now attached\n",
- dev->count, dev->board.name);
+ usb_set_intfdata(intf, devpriv);
- mutex_unlock(&glb_mutex);
+ if (devpriv->model == VMK8061_MODEL) {
+ vmk80xx_read_eeprom(devpriv, IC3_VERSION);
+ dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
- comedi_usb_auto_config(intf, &vmk80xx_driver);
+ if (vmk80xx_check_data_link(devpriv)) {
+ vmk80xx_read_eeprom(devpriv, IC6_VERSION);
+ dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
+ }
+ }
- return 0;
-error:
- mutex_unlock(&glb_mutex);
+ if (devpriv->model == VMK8055_MODEL)
+ vmk80xx_reset_device(devpriv);
- return -ENODEV;
+ return vmk80xx_init_subdevices(dev);
}
-static void vmk80xx_usb_disconnect(struct usb_interface *intf)
+static void vmk80xx_detach(struct comedi_device *dev)
{
- struct vmk80xx_usb *dev = usb_get_intfdata(intf);
+ struct vmk80xx_private *devpriv = dev->private;
- if (!dev)
+ if (!devpriv)
return;
- comedi_usb_auto_unconfig(intf);
-
- mutex_lock(&glb_mutex);
- down(&dev->limit_sem);
+ down(&devpriv->limit_sem);
- dev->probed = 0;
- usb_set_intfdata(dev->intf, NULL);
+ usb_set_intfdata(devpriv->intf, NULL);
- usb_kill_anchored_urbs(&dev->rx_anchor);
- usb_kill_anchored_urbs(&dev->tx_anchor);
+ kfree(devpriv->usb_rx_buf);
+ kfree(devpriv->usb_tx_buf);
- kfree(dev->usb_rx_buf);
- kfree(dev->usb_tx_buf);
+ up(&devpriv->limit_sem);
+}
- dev_info(&intf->dev, "board #%d [%s] now detached\n",
- dev->count, dev->board.name);
+static struct comedi_driver vmk80xx_driver = {
+ .module = THIS_MODULE,
+ .driver_name = "vmk80xx",
+ .auto_attach = vmk80xx_auto_attach,
+ .detach = vmk80xx_detach,
+};
- up(&dev->limit_sem);
- mutex_unlock(&glb_mutex);
+static int vmk80xx_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return comedi_usb_auto_config(intf, &vmk80xx_driver, id->driver_info);
}
static const struct usb_device_id vmk80xx_usb_id_table[] = {
@@ -1446,13 +968,11 @@ static const struct usb_device_id vmk80xx_usb_id_table[] = {
};
MODULE_DEVICE_TABLE(usb, vmk80xx_usb_id_table);
-/* TODO: Add support for suspend, resume, pre_reset,
- * post_reset and flush */
static struct usb_driver vmk80xx_usb_driver = {
.name = "vmk80xx",
- .probe = vmk80xx_usb_probe,
- .disconnect = vmk80xx_usb_disconnect,
.id_table = vmk80xx_usb_id_table,
+ .probe = vmk80xx_usb_probe,
+ .disconnect = comedi_usb_auto_unconfig,
};
module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver);
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 4dc09a210883..8932a510d96c 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -42,7 +42,6 @@ MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
- struct comedi_device_file_info *dev_file_info;
struct comedi_device *dev;
unsigned int minor;
@@ -54,12 +53,9 @@ struct comedi_device *comedi_open(const char *filename)
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
- dev_file_info = comedi_get_device_file_info(minor);
- if (dev_file_info == NULL)
- return NULL;
- dev = dev_file_info->device;
+ dev = comedi_dev_from_minor(minor);
- if (dev == NULL || !dev->attached)
+ if (!dev || !dev->attached)
return NULL;
if (!try_module_get(dev->driver->module))
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 01acbe97653c..362c214bcc0b 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -33,7 +33,6 @@
#include <linux/proc_fs.h>
#include <linux/string.h>
-#ifdef CONFIG_PROC_FS
static int comedi_read(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
@@ -49,13 +48,10 @@ static int comedi_read(char *buf, char **start, off_t offset, int len,
"driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(i);
- struct comedi_device *dev;
+ struct comedi_device *dev = comedi_dev_from_minor(i);
- if (dev_file_info == NULL)
+ if (!dev)
continue;
- dev = dev_file_info->device;
if (dev->attached) {
devices_q = 1;
@@ -95,4 +91,3 @@ void comedi_proc_cleanup(void)
{
remove_proc_entry("comedi", NULL);
}
-#endif
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
index a49b0da60049..31fb5d31bb3a 100644
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -421,11 +421,8 @@ static int cp_tm1217_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
- if (!ts) {
- dev_err(&client->dev,
- "cp_tm1217: Private Device Struct alloc failed\n");
+ if (!ts)
return -ENOMEM;
- }
ts->client = client;
ts->dev = &client->dev;
diff --git a/drivers/staging/csr/bh.c b/drivers/staging/csr/bh.c
index 1a1f5c79822a..7b133597e923 100644
--- a/drivers/staging/csr/bh.c
+++ b/drivers/staging/csr/bh.c
@@ -15,7 +15,7 @@
*/
#include "csr_wifi_hip_unifi.h"
#include "unifi_priv.h"
-
+#include <linux/sched/rt.h>
/*
* ---------------------------------------------------------------------------
diff --git a/drivers/staging/csr/drv.c b/drivers/staging/csr/drv.c
index 4780c32c2fe3..3bd52fdeac3b 100644
--- a/drivers/staging/csr/drv.c
+++ b/drivers/staging/csr/drv.c
@@ -819,15 +819,15 @@ unifi_write(struct file *filp, const char *p, size_t len, loff_t *poff)
unifi_trace(priv, UDBG2, "unifi_write: signal 0x%.4X len:%d\n",
sig_id, signal_size);
- /* Allocate a buffer for the signal */
- signal_buf = kmalloc(signal_size, GFP_KERNEL);
+ /* Allocate a buffer for the signal */
+ signal_buf = kmemdup(bulkdata.d[0].os_data_ptr, signal_size,
+ GFP_KERNEL);
if (!signal_buf) {
unifi_net_data_free(priv, &bulkdata.d[0]);
return -ENOMEM;
}
/* Get the signal from the os_data_ptr */
- memcpy(signal_buf, bulkdata.d[0].os_data_ptr, signal_size);
signal_buf[5] = (pcli->sender_id >> 8) & 0xff;
if (signal_size < len) {
diff --git a/drivers/staging/csr/sme_sys.c b/drivers/staging/csr/sme_sys.c
index 2b068197ed44..b1151a28d8e3 100644
--- a/drivers/staging/csr/sme_sys.c
+++ b/drivers/staging/csr/sme_sys.c
@@ -280,7 +280,7 @@ void CsrWifiRouterCtrlHipReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
CSR_SIGNAL *signal;
u16 interfaceTag = 0;
CSR_MA_PACKET_REQUEST *req;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
+ netInterface_priv_t *interfacePriv;
if (priv == NULL) {
return;
@@ -294,6 +294,8 @@ void CsrWifiRouterCtrlHipReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
return;
}
+ interfacePriv = priv->interfacePriv[interfaceTag];
+
/* Initialize bulkdata to avoid os_net_buf is garbage */
memset(&bulkdata, 0, sizeof(bulk_data_param_t));
@@ -1498,7 +1500,7 @@ void CsrWifiRouterMaPacketReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
u8 *daddr, *saddr;
u16 interfaceTag = mareq->interfaceTag & 0x00ff;
int queue;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
+ netInterface_priv_t *interfacePriv;
if (!mareq->frame || !priv || !priv->smepriv)
{
@@ -1510,6 +1512,8 @@ void CsrWifiRouterMaPacketReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
unifi_error(priv, "CsrWifiRouterMaPacketReqHandler: interfaceID >= CSR_WIFI_NUM_INTERFACES.\n");
return;
}
+
+ interfacePriv = priv->interfacePriv[interfaceTag];
/* get a pointer to dest & source Mac address */
daddr = mareq->frame;
saddr = (mareq->frame + ETH_ALEN);
@@ -2056,9 +2060,9 @@ void CsrWifiRouterCtrlPeerDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
CsrWifiRouterCtrlPeerDelReq* req = (CsrWifiRouterCtrlPeerDelReq*)msg;
CsrResult status = CSR_RESULT_SUCCESS;
unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
+ netInterface_priv_t *interfacePriv;
- unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerDelReqHandler \n");
+ unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerDelReqHandler\n");
if (priv == NULL)
{
unifi_error(priv, "CsrWifiRouterCtrlPeerDelReqHandler: invalid smepriv\n");
@@ -2071,6 +2075,8 @@ void CsrWifiRouterCtrlPeerDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
return;
}
+ interfacePriv = priv->interfacePriv[req->interfaceTag];
+
switch(interfacePriv->interfaceMode)
{
case CSR_WIFI_ROUTER_CTRL_MODE_AP:
@@ -2471,7 +2477,7 @@ void CsrWifiRouterCtrlPeerAddReqHandler(void* drvpriv,CsrWifiFsmEvent* msg)
CsrResult status = CSR_RESULT_SUCCESS;
unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
u32 handle = 0;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
+ netInterface_priv_t *interfacePriv;
unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerAddReqHandler \n");
if (priv == NULL)
@@ -2486,6 +2492,8 @@ void CsrWifiRouterCtrlPeerAddReqHandler(void* drvpriv,CsrWifiFsmEvent* msg)
return;
}
+ interfacePriv = priv->interfacePriv[req->interfaceTag];
+
switch(interfacePriv->interfaceMode)
{
case CSR_WIFI_ROUTER_CTRL_MODE_AP:
@@ -3036,21 +3044,24 @@ void CsrWifiRouterCtrlWapiRxPktReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
ul_client_t *client;
CSR_SIGNAL signal;
CSR_MA_PACKET_INDICATION *pkt_ind;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
+ netInterface_priv_t *interfacePriv;
+
+ if (priv == NULL) {
+ unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid priv\n", __func__);
+ return;
+ }
+
+ if (priv->smepriv == NULL) {
+ unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid sme priv\n", __func__);
+ return;
+ }
+
+ interfacePriv = priv->interfacePriv[req->interfaceTag];
if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid priv\n",__FUNCTION__);
- return;
- }
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid sme priv\n",__FUNCTION__);
- return;
- }
if (req->dataLength == 0 || req->data == NULL) {
unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq: invalid request\n",__FUNCTION__);
diff --git a/drivers/staging/csr/unifi_sme.c b/drivers/staging/csr/unifi_sme.c
index 7c6c4138fc76..7d19e632a5e4 100644
--- a/drivers/staging/csr/unifi_sme.c
+++ b/drivers/staging/csr/unifi_sme.c
@@ -15,7 +15,7 @@
#include "unifi_priv.h"
#include "csr_wifi_hip_unifi.h"
#include "csr_wifi_hip_conversions.h"
-
+#include <linux/sched/rt.h>
@@ -1196,7 +1196,6 @@ void uf_send_pkt_to_encrypt(struct work_struct *work)
if (pktBulkDataLength > 0) {
pktBulkData = kmalloc(pktBulkDataLength, GFP_KERNEL);
- memset(pktBulkData, 0, pktBulkDataLength);
} else {
unifi_error(priv, "uf_send_pkt_to_encrypt() : invalid buffer\n");
return;
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 0ff2865edec8..a829b6231a66 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -773,7 +773,9 @@ do_del_chan (struct net_device * musycc_dev, void *data)
if (copy_from_user (&cp, data,
sizeof (struct sbecom_chan_param)))
return -EFAULT;
- sprintf (buf, CHANNAME "%d", cp.channum);
+ if (cp.channum > 999)
+ return -EINVAL;
+ snprintf (buf, sizeof(buf), CHANNAME "%d", cp.channum);
if (!(dev = dev_get_by_name (&init_net, buf)))
return -ENOENT;
dev_put (dev);
diff --git a/drivers/staging/dgrp/Kconfig b/drivers/staging/dgrp/Kconfig
index 39f4bb65ec83..e4c41552923a 100644
--- a/drivers/staging/dgrp/Kconfig
+++ b/drivers/staging/dgrp/Kconfig
@@ -1,7 +1,7 @@
config DGRP
tristate "Digi Realport driver"
default n
- depends on SYSFS
+ depends on SYSFS && TTY
---help---
Support for Digi Realport devices. These devices allow you to
access remote serial ports as if they are local tty devices. This
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 2d1bbfd5b67c..e6018823b9de 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -37,6 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spinlock.h>
@@ -211,7 +212,7 @@ static void dgrp_input(struct ch_struct *ch)
data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
/* len is the amount of data we are going to transfer here */
- len = tty_buffer_request_room(tty, data_len);
+ len = tty_buffer_request_room(&ch->port, data_len);
/* Check DPA flow control */
if ((nd->nd_dpa_debug) &&
@@ -232,9 +233,9 @@ static void dgrp_input(struct ch_struct *ch)
(nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
dgrp_dpa_data(nd, 1, myflipbuf, len);
- tty_insert_flip_string_flags(tty, myflipbuf,
+ tty_insert_flip_string_flags(&ch->port, myflipbuf,
myflipflagbuf, len);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&ch->port);
ch->ch_rxcount += len;
}
@@ -2956,9 +2957,9 @@ check_query:
I_BRKINT(ch->ch_tun.un_tty) &&
!(I_IGNBRK(ch->ch_tun.un_tty))) {
- tty_buffer_request_room(ch->ch_tun.un_tty, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty, 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty);
+ tty_buffer_request_room(&ch->port, 1);
+ tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
+ tty_flip_buffer_push(&ch->port);
}
diff --git a/drivers/staging/dgrp/dgrp_specproc.c b/drivers/staging/dgrp/dgrp_specproc.c
index c214078a89e9..13c7ccf163c5 100644
--- a/drivers/staging/dgrp/dgrp_specproc.c
+++ b/drivers/staging/dgrp/dgrp_specproc.c
@@ -81,33 +81,34 @@ static struct dgrp_proc_entry dgrp_mon_table[];
static struct dgrp_proc_entry dgrp_ports_table[];
static struct dgrp_proc_entry dgrp_dpa_table[];
-static ssize_t config_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos);
+static ssize_t dgrp_config_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *pos);
-static int nodeinfo_proc_open(struct inode *inode, struct file *file);
-static int info_proc_open(struct inode *inode, struct file *file);
-static int config_proc_open(struct inode *inode, struct file *file);
+static int dgrp_nodeinfo_proc_open(struct inode *inode, struct file *file);
+static int dgrp_info_proc_open(struct inode *inode, struct file *file);
+static int dgrp_config_proc_open(struct inode *inode, struct file *file);
static struct file_operations config_proc_file_ops = {
.owner = THIS_MODULE,
- .open = config_proc_open,
+ .open = dgrp_config_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
- .write = config_proc_write
+ .write = dgrp_config_proc_write,
};
static struct file_operations info_proc_file_ops = {
.owner = THIS_MODULE,
- .open = info_proc_open,
+ .open = dgrp_info_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
static struct file_operations nodeinfo_proc_file_ops = {
.owner = THIS_MODULE,
- .open = nodeinfo_proc_open,
+ .open = dgrp_nodeinfo_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
@@ -181,13 +182,13 @@ static struct dgrp_proc_entry dgrp_dpa_table[] = {
void dgrp_unregister_proc(void)
{
- unregister_proc_table(dgrp_table, dgrp_proc_dir_entry);
net_entry_pointer = NULL;
mon_entry_pointer = NULL;
dpa_entry_pointer = NULL;
ports_entry_pointer = NULL;
if (dgrp_proc_dir_entry) {
+ unregister_proc_table(dgrp_table, dgrp_proc_dir_entry);
remove_proc_entry(dgrp_proc_dir_entry->name,
dgrp_proc_dir_entry->parent);
dgrp_proc_dir_entry = NULL;
@@ -231,6 +232,8 @@ static void register_proc_table(struct dgrp_proc_entry *table,
if (table == NULL)
return;
+ if (root == NULL)
+ return;
for (; table->id; table++) {
/* Can't do anything without a proc name. */
@@ -403,21 +406,21 @@ done:
return 0;
}
-static void *config_proc_start(struct seq_file *m, loff_t *pos)
+static void *dgrp_config_proc_start(struct seq_file *m, loff_t *pos)
{
return seq_list_start_head(&nd_struct_list, *pos);
}
-static void *config_proc_next(struct seq_file *p, void *v, loff_t *pos)
+static void *dgrp_config_proc_next(struct seq_file *p, void *v, loff_t *pos)
{
return seq_list_next(v, &nd_struct_list, pos);
}
-static void config_proc_stop(struct seq_file *m, void *v)
+static void dgrp_config_proc_stop(struct seq_file *m, void *v)
{
}
-static int config_proc_show(struct seq_file *m, void *v)
+static int dgrp_config_proc_show(struct seq_file *m, void *v)
{
struct nd_struct *nd;
char tmp_id[4];
@@ -443,13 +446,13 @@ static int config_proc_show(struct seq_file *m, void *v)
}
static const struct seq_operations proc_config_ops = {
- .start = config_proc_start,
- .next = config_proc_next,
- .stop = config_proc_stop,
- .show = config_proc_show
+ .start = dgrp_config_proc_start,
+ .next = dgrp_config_proc_next,
+ .stop = dgrp_config_proc_stop,
+ .show = dgrp_config_proc_show,
};
-static int config_proc_open(struct inode *inode, struct file *file)
+static int dgrp_config_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_config_ops);
}
@@ -460,8 +463,9 @@ static int config_proc_open(struct inode *inode, struct file *file)
* write) is treated as an independent request. See the "parse"
* description for more details.
*/
-static ssize_t config_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
+static ssize_t dgrp_config_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
{
ssize_t retval;
char *inbuf, *sp;
@@ -625,7 +629,7 @@ static int parse_write_config(char *buf)
return retval;
}
-static int info_proc_show(struct seq_file *m, void *v)
+static int dgrp_info_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "version: %s\n", DIGI_VERSION);
seq_puts(m, "register_with_sysfs: 1\n");
@@ -635,27 +639,27 @@ static int info_proc_show(struct seq_file *m, void *v)
return 0;
}
-static int info_proc_open(struct inode *inode, struct file *file)
+static int dgrp_info_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, info_proc_show, NULL);
+ return single_open(file, dgrp_info_proc_show, NULL);
}
-static void *nodeinfo_start(struct seq_file *m, loff_t *pos)
+static void *dgrp_nodeinfo_start(struct seq_file *m, loff_t *pos)
{
return seq_list_start_head(&nd_struct_list, *pos);
}
-static void *nodeinfo_next(struct seq_file *p, void *v, loff_t *pos)
+static void *dgrp_nodeinfo_next(struct seq_file *p, void *v, loff_t *pos)
{
return seq_list_next(v, &nd_struct_list, pos);
}
-static void nodeinfo_stop(struct seq_file *m, void *v)
+static void dgrp_nodeinfo_stop(struct seq_file *m, void *v)
{
}
-static int nodeinfo_show(struct seq_file *m, void *v)
+static int dgrp_nodeinfo_show(struct seq_file *m, void *v)
{
struct nd_struct *nd;
char hwver[8];
@@ -697,13 +701,13 @@ static int nodeinfo_show(struct seq_file *m, void *v)
static const struct seq_operations nodeinfo_ops = {
- .start = nodeinfo_start,
- .next = nodeinfo_next,
- .stop = nodeinfo_stop,
- .show = nodeinfo_show
+ .start = dgrp_nodeinfo_start,
+ .next = dgrp_nodeinfo_next,
+ .stop = dgrp_nodeinfo_stop,
+ .show = dgrp_nodeinfo_show,
};
-static int nodeinfo_proc_open(struct inode *inode, struct file *file)
+static int dgrp_nodeinfo_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nodeinfo_ops);
}
@@ -773,14 +777,11 @@ static int dgrp_remove_nd(struct nd_struct *nd)
dgrp_remove_node_class_sysfs_files(nd);
}
- if (nd->nd_mon_de)
- unregister_dgrp_device(nd->nd_mon_de);
+ unregister_dgrp_device(nd->nd_mon_de);
- if (nd->nd_ports_de)
- unregister_dgrp_device(nd->nd_ports_de);
+ unregister_dgrp_device(nd->nd_ports_de);
- if (nd->nd_dpa_de)
- unregister_dgrp_device(nd->nd_dpa_de);
+ unregister_dgrp_device(nd->nd_dpa_de);
dgrp_tty_uninit(nd);
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 51d3ed3dca27..654f6010b473 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/device.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index ca87ce9874b1..5882139d49af 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -119,7 +119,6 @@
static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift)
{
int i;
- int j;
int offset1;
int offset2;
int factor;
@@ -142,7 +141,7 @@ static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift)
/* asm("st:"); */
n = ec->taps;
- for (i = 0, j = offset2; i < n; i++, j++) {
+ for (i = 0; i < n; i++) {
exp = *phist++ * factor;
ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
}
@@ -230,6 +229,7 @@ struct oslec_state *oslec_create(int len, int adaption_mode)
{
struct oslec_state *ec;
int i;
+ const int16_t *history;
ec = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
@@ -239,15 +239,22 @@ struct oslec_state *oslec_create(int len, int adaption_mode)
ec->log2taps = top_bit(len);
ec->curr_pos = ec->taps - 1;
- for (i = 0; i < 2; i++) {
- ec->fir_taps16[i] =
- kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
- if (!ec->fir_taps16[i])
- goto error_oom;
- }
+ ec->fir_taps16[0] =
+ kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+ if (!ec->fir_taps16[0])
+ goto error_oom_0;
+
+ ec->fir_taps16[1] =
+ kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+ if (!ec->fir_taps16[1])
+ goto error_oom_1;
- fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
- fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
+ history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
+ if (!history)
+ goto error_state;
+ history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
+ if (!history)
+ goto error_state_bg;
for (i = 0; i < 5; i++)
ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
@@ -257,7 +264,7 @@ struct oslec_state *oslec_create(int len, int adaption_mode)
ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
if (!ec->snapshot)
- goto error_oom;
+ goto error_snap;
ec->cond_met = 0;
ec->Pstates = 0;
@@ -270,10 +277,15 @@ struct oslec_state *oslec_create(int len, int adaption_mode)
return ec;
-error_oom:
- for (i = 0; i < 2; i++)
- kfree(ec->fir_taps16[i]);
-
+error_snap:
+ fir16_free(&ec->fir_state_bg);
+error_state_bg:
+ fir16_free(&ec->fir_state);
+error_state:
+ kfree(ec->fir_taps16[1]);
+error_oom_1:
+ kfree(ec->fir_taps16[0]);
+error_oom_0:
kfree(ec);
return NULL;
}
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 38537d4c4e14..05ad08501663 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -9,6 +9,10 @@ driver as they did not build properly at the time.
TODO:
- some rx packets have CRC/code/frame errors
+ - Look at reducing the number of spinlocks
+ - Simplify code in nic_rx_pkts(), when determining multicast_pkts_rcvd
+ - Implement NAPI support
+ - in et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb().
Please send patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 84bbcd48e264..42ae5e83f907 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -1,5 +1,4 @@
-/*
- * Agere Systems Inc.
+/* Agere Systems Inc.
* 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
*
* Copyright © 2005 Agere Systems Inc.
@@ -50,7 +49,6 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -102,8 +100,7 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
/* ISR defines */
-/*
- * For interrupts, normal running is:
+/* For interrupts, normal running is:
* rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
* watchdog_interrupt & txdma_xfer_done
*
@@ -139,19 +136,19 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define NIC_SEND_HANG_THRESHOLD 0
/* MP_TCB flags */
-#define fMP_DEST_MULTI 0x00000001
-#define fMP_DEST_BROAD 0x00000002
+#define FMP_DEST_MULTI 0x00000001
+#define FMP_DEST_BROAD 0x00000002
/* MP_ADAPTER flags */
-#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
+#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
/* MP_SHARED flags */
-#define fMP_ADAPTER_LOWER_POWER 0x00200000
+#define FMP_ADAPTER_LOWER_POWER 0x00200000
-#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
-#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
+#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
+#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
-#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
+#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
/* Some offsets in PCI config space that are actually used. */
#define ET1310_PCI_MAC_ADDRESS 0xA4
@@ -245,8 +242,7 @@ struct pkt_stat_desc {
/* Typedefs for the RX DMA status word */
-/*
- * rx status word 0 holds part of the status bits of the Rx DMA engine
+/* rx status word 0 holds part of the status bits of the Rx DMA engine
* that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
* which contains the Free Buffer ring 0 and 1 available offset.
*
@@ -256,8 +252,7 @@ struct pkt_stat_desc {
* bit 26 Wrap flag for FBR0
*/
-/*
- * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
+/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
* that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
* which contains the Packet Status Ring available offset.
*
@@ -267,8 +262,7 @@ struct pkt_stat_desc {
* bit 29-31 unused
*/
-/*
- * struct rx_status_block is a structure representing the status of the Rx
+/* struct rx_status_block is a structure representing the status of the Rx
* DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
*/
struct rx_status_block {
@@ -276,8 +270,7 @@ struct rx_status_block {
u32 word1;
};
-/*
- * Structure for look-up table holding free buffer ring pointers, addresses
+/* Structure for look-up table holding free buffer ring pointers, addresses
* and state.
*/
struct fbr_lookup {
@@ -293,8 +286,7 @@ struct fbr_lookup {
dma_addr_t buffsize;
};
-/*
- * struct rx_ring is the sructure representing the adaptor's local
+/* struct rx_ring is the sructure representing the adaptor's local
* reference(s) to the rings
*/
struct rx_ring {
@@ -317,8 +309,7 @@ struct rx_ring {
};
/* TX defines */
-/*
- * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
+/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
*
* 0-15: length of packet
* 16-27: VLAN tag
@@ -344,6 +335,10 @@ struct rx_ring {
* 14: UDP checksum assist
*/
+#define TXDESC_FLAG_LASTPKT 0x0001
+#define TXDESC_FLAG_FIRSTPKT 0x0002
+#define TXDESC_FLAG_INTPROC 0x0004
+
/* struct tx_desc represents each descriptor on the ring */
struct tx_desc {
u32 addr_hi;
@@ -352,8 +347,7 @@ struct tx_desc {
u32 flags; /* data (detailed above) */
};
-/*
- * The status of the Tx DMA engine it sits in free memory, and is pointed to
+/* The status of the Tx DMA engine it sits in free memory, and is pointed to
* by 0x101c / 0x1020. This is a DMA10 type
*/
@@ -402,15 +396,13 @@ struct tx_ring {
int since_irq;
};
-/*
- * Do not change these values: if changed, then change also in respective
+/* Do not change these values: if changed, then change also in respective
* TXdma and Rxdma engines
*/
#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
#define NUM_TCB 64
-/*
- * These values are all superseded by registry entries to facilitate tuning.
+/* These values are all superseded by registry entries to facilitate tuning.
* Once the desired performance has been achieved, the optimal registry values
* should be re-populated to these #defines:
*/
@@ -555,8 +547,7 @@ static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
u32 reg;
int i;
- /*
- * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
+ /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
* bits 7,1:0 both equal to 1, at least once after reset.
* Subsequent operations need only to check that bits 1:0 are equal
* to 1 prior to starting a single byte read/write
@@ -577,9 +568,7 @@ static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
return -ETIMEDOUT;
}
-
-/**
- * eeprom_write - Write a byte to the ET1310's EEPROM
+/* eeprom_write - Write a byte to the ET1310's EEPROM
* @adapter: pointer to our private adapter structure
* @addr: the address to write
* @data: the value to write
@@ -597,8 +586,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
u32 status;
u32 val = 0;
- /*
- * For an EEPROM, an I2C single byte write is defined as a START
+ /* For an EEPROM, an I2C single byte write is defined as a START
* condition followed by the device address, EEPROM address, one byte
* of data and a STOP condition. The STOP condition will trigger the
* EEPROM's internally timed write cycle to the nonvolatile memory.
@@ -610,12 +598,11 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
if (err)
return err;
- /*
- * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
- * and bits 1:0 both =0. Bit 5 should be set according to the
- * type of EEPROM being accessed (1=two byte addressing, 0=one
- * byte addressing).
- */
+ /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
+ * and bits 1:0 both =0. Bit 5 should be set according to the
+ * type of EEPROM being accessed (1=two byte addressing, 0=one
+ * byte addressing).
+ */
if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
return -EIO;
@@ -628,14 +615,12 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
/* Write the address to the LBCIF Address Register */
if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
break;
- /*
- * Write the data to the LBCIF Data Register (the I2C write
+ /* Write the data to the LBCIF Data Register (the I2C write
* will begin).
*/
if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
break;
- /*
- * Monitor bit 1:0 of the LBCIF Status Register. When bits
+ /* Monitor bit 1:0 of the LBCIF Status Register. When bits
* 1:0 are both equal to 1, the I2C write has completed and the
* internal write cycle of the EEPROM is about to start.
* (bits 1:0 = 01 is a legal state while waiting from both
@@ -646,8 +631,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
if (err < 0)
return 0;
- /*
- * Check bit 3 of the LBCIF Status Register. If equal to 1,
+ /* Check bit 3 of the LBCIF Status Register. If equal to 1,
* an error has occurred.Don't break here if we are revision
* 1, this is so we do a blind write for load bug.
*/
@@ -655,8 +639,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
&& adapter->pdev->revision == 0)
break;
- /*
- * Check bit 2 of the LBCIF Status Register. If equal to 1 an
+ /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
* ACK error has occurred on the address phase of the write.
* This could be due to an actual hardware failure or the
* EEPROM may still be in its internal write cycle from a
@@ -664,8 +647,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
*repeated later.
*/
if (status & LBCIF_STATUS_ACK_ERROR) {
- /*
- * This could be due to an actual hardware failure
+ /* This could be due to an actual hardware failure
* or the EEPROM may still be in its internal write
* cycle from a previous write. This write operation
* was ignored and must be repeated later.
@@ -678,8 +660,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
break;
}
- /*
- * Set bit 6 of the LBCIF Control Register = 0.
+ /* Set bit 6 of the LBCIF Control Register = 0.
*/
udelay(10);
@@ -708,8 +689,7 @@ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
return writeok ? 0 : -EIO;
}
-/**
- * eeprom_read - Read a byte from the ET1310's EEPROM
+/* eeprom_read - Read a byte from the ET1310's EEPROM
* @adapter: pointer to our private adapter structure
* @addr: the address from which to read
* @pdata: a pointer to a byte in which to store the value of the read
@@ -724,16 +704,14 @@ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
int err;
u32 status;
- /*
- * A single byte read is similar to the single byte write, with the
+ /* A single byte read is similar to the single byte write, with the
* exception of the data flow:
*/
err = eeprom_wait_ready(pdev, NULL);
if (err)
return err;
- /*
- * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
+ /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
* and bits 1:0 both =0. Bit 5 should be set according to the type
* of EEPROM being accessed (1=two byte addressing, 0=one byte
* addressing).
@@ -741,27 +719,23 @@ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
LBCIF_CONTROL_LBCIF_ENABLE))
return -EIO;
- /*
- * Write the address to the LBCIF Address Register (I2C read will
+ /* Write the address to the LBCIF Address Register (I2C read will
* begin).
*/
if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
return -EIO;
- /*
- * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
+ /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
* is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
* has occurred).
*/
err = eeprom_wait_ready(pdev, &status);
if (err < 0)
return err;
- /*
- * Regardless of error status, read data byte from LBCIF Data
+ /* Regardless of error status, read data byte from LBCIF Data
* Register.
*/
*pdata = err;
- /*
- * Check bit 2 of the LBCIF Status Register. If = 1,
+ /* Check bit 2 of the LBCIF Status Register. If = 1,
* then an error has occurred.
*/
return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
@@ -775,13 +749,12 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
/* We first need to check the EEPROM Status code located at offset
* 0xB2 of config space
*/
- pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
- &eestatus);
+ pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
/* THIS IS A WORKAROUND:
* I need to call this function twice to get my card in a
* LG M1 Express Dual running. I tried also a msleep before this
- * function, because I thought there could be some time condidions
+ * function, because I thought there could be some time conditions
* but it didn't work. Call the whole function twice also work.
*/
if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
@@ -836,36 +809,35 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
return 0;
}
-/**
- * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
+/* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
* @adapter: pointer to our adapter structure
*/
static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
- u32 csr = 0x2000; /* FBR1 enable */
+ u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
if (adapter->rx_ring.fbr[1]->buffsize == 4096)
- csr |= 0x0800;
+ csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
- csr |= 0x1000;
+ csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
- csr |= 0x1800;
+ csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
- csr |= 0x0400; /* FBR0 enable */
+ csr |= ET_RXDMA_CSR_FBR0_ENABLE;
if (adapter->rx_ring.fbr[0]->buffsize == 256)
- csr |= 0x0100;
+ csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
else if (adapter->rx_ring.fbr[0]->buffsize == 512)
- csr |= 0x0200;
+ csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
- csr |= 0x0300;
+ csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
writel(csr, &adapter->regs->rxdma.csr);
csr = readl(&adapter->regs->rxdma.csr);
- if (csr & 0x00020000) {
+ if (csr & ET_RXDMA_CSR_HALT_STATUS) {
udelay(5);
csr = readl(&adapter->regs->rxdma.csr);
- if (csr & 0x00020000) {
+ if (csr & ET_RXDMA_CSR_HALT_STATUS) {
dev_err(&adapter->pdev->dev,
"RX Dma failed to exit halt state. CSR 0x%08x\n",
csr);
@@ -873,28 +845,27 @@ static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
}
}
-/**
- * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
+/* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
{
u32 csr;
/* Setup the receive dma configuration register */
- writel(0x00002001, &adapter->regs->rxdma.csr);
+ writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
+ &adapter->regs->rxdma.csr);
csr = readl(&adapter->regs->rxdma.csr);
- if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
+ if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
udelay(5);
csr = readl(&adapter->regs->rxdma.csr);
- if ((csr & 0x00020000) == 0)
+ if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
dev_err(&adapter->pdev->dev,
- "RX Dma failed to enter halt state. CSR 0x%08x\n",
- csr);
+ "RX Dma failed to enter halt state. CSR 0x%08x\n",
+ csr);
}
}
-/**
- * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
+/* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
* @adapter: pointer to our adapter structure
*
* Mainly used after a return to the D0 (full-power) state from a lower state.
@@ -918,8 +889,7 @@ static inline void add_12bit(u32 *v, int n)
*v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
}
-/**
- * et1310_config_mac_regs1 - Initialize the first part of MAC regs
+/* et1310_config_mac_regs1 - Initialize the first part of MAC regs
* @adapter: pointer to our adapter structure
*/
static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
@@ -932,7 +902,10 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
/* First we need to reset everything. Write to MAC configuration
* register 1 to perform reset.
*/
- writel(0xC00F0000, &macregs->cfg1);
+ writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
+ ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
+ ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
+ &macregs->cfg1);
/* Next lets configure the MAC Inter-packet gap register */
ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
@@ -947,7 +920,7 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
writel(0, &macregs->if_ctrl);
/* Let's move on to setting up the mii management configuration */
- writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */
+ writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
/* Next lets configure the MAC Station Address register. These
* values are read from the EEPROM during initialization and stored
@@ -978,8 +951,7 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
writel(0, &macregs->cfg1);
}
-/**
- * et1310_config_mac_regs2 - Initialize the second part of MAC regs
+/* et1310_config_mac_regs2 - Initialize the second part of MAC regs
* @adapter: pointer to our adapter structure
*/
static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
@@ -998,38 +970,44 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
ifctrl = readl(&mac->if_ctrl);
/* Set up the if mode bits */
- cfg2 &= ~0x300;
+ cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
if (phydev && phydev->speed == SPEED_1000) {
- cfg2 |= 0x200;
+ cfg2 |= ET_MAC_CFG2_IFMODE_1000;
/* Phy mode bit */
- ifctrl &= ~(1 << 24);
+ ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
} else {
- cfg2 |= 0x100;
- ifctrl |= (1 << 24);
+ cfg2 |= ET_MAC_CFG2_IFMODE_100;
+ ifctrl |= ET_MAC_IFCTRL_PHYMODE;
}
/* We need to enable Rx/Tx */
- cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
+ cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
+ ET_MAC_CFG1_TX_FLOW;
/* Initialize loop back to off */
- cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
+ cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
if (adapter->flowcontrol == FLOW_RXONLY ||
adapter->flowcontrol == FLOW_BOTH)
- cfg1 |= CFG1_RX_FLOW;
+ cfg1 |= ET_MAC_CFG1_RX_FLOW;
writel(cfg1, &mac->cfg1);
/* Now we need to initialize the MAC Configuration 2 register */
/* preamble 7, check length, huge frame off, pad crc, crc enable
- full duplex off */
- cfg2 |= 0x7016;
- cfg2 &= ~0x0021;
+ * full duplex off
+ */
+ cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
+ cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
+ cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
+ cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
+ cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
+ cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
/* Turn on duplex if needed */
if (phydev && phydev->duplex == DUPLEX_FULL)
- cfg2 |= 0x01;
+ cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
- ifctrl &= ~(1 << 26);
+ ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
if (phydev && phydev->duplex == DUPLEX_HALF)
- ifctrl |= (1<<26); /* Enable ghd */
+ ifctrl |= ET_MAC_IFCTRL_GHDMODE;
writel(ifctrl, &mac->if_ctrl);
writel(cfg2, &mac->cfg2);
@@ -1038,7 +1016,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
udelay(10);
delay++;
cfg1 = readl(&mac->cfg1);
- } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
+ } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
if (delay == 100) {
dev_warn(&adapter->pdev->dev,
@@ -1047,18 +1025,17 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
}
/* Enable txmac */
- ctl |= 0x09; /* TX mac enable, FC disable */
+ ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
writel(ctl, &adapter->regs->txmac.ctl);
/* Ready to start the RXDMA/TXDMA engine */
- if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
+ if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
et131x_rx_dma_enable(adapter);
et131x_tx_dma_enable(adapter);
}
}
-/**
- * et1310_in_phy_coma - check if the device is in phy coma
+/* et1310_in_phy_coma - check if the device is in phy coma
* @adapter: pointer to our adapter structure
*
* Returns 0 if the device is not in phy coma, 1 if it is in phy coma
@@ -1139,19 +1116,19 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
* Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
* MAC address for first address
*/
- uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
- (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
- (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
+ uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
+ (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
+ (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
adapter->addr[1];
- uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
- (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
- (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
+ uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
+ (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
+ (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
adapter->addr[5];
- uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
- (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
- (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
+ uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
+ (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
+ (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
adapter->addr[5];
pm_csr = readl(&adapter->regs->global.pm_csr);
@@ -1208,13 +1185,13 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mask4_word3);
/* Lets setup the WOL Source Address */
- sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
- (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
- (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
+ sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
+ (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
+ (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
adapter->addr[5];
writel(sa_lo, &rxmac->sa_lo);
- sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
+ sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
adapter->addr[1];
writel(sa_hi, &rxmac->sa_hi);
@@ -1224,7 +1201,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
/* Let's initialize the Unicast Packet filtering address */
if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
et1310_setup_device_for_unicast(adapter);
- pf_ctrl |= 4; /* Unicast filter */
+ pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
} else {
writel(0, &rxmac->uni_pf_addr1);
writel(0, &rxmac->uni_pf_addr2);
@@ -1233,13 +1210,13 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
/* Let's initialize the Multicast hash */
if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- pf_ctrl |= 2; /* Multicast filter */
+ pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
et1310_setup_device_for_multicast(adapter);
}
/* Runt packet filtering. Didn't work in version A silicon. */
- pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
- pf_ctrl |= 8; /* Fragment filter */
+ pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
+ pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
if (adapter->registry_jumbo_packet > 8192)
/* In order to transmit jumbo packets greater than 8k, the
@@ -1290,7 +1267,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
* but we still leave the packet filter on.
*/
writel(pf_ctrl, &rxmac->pf_ctrl);
- writel(0x9, &rxmac->ctrl);
+ writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
}
static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
@@ -1372,8 +1349,7 @@ static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
}
-/**
- * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
+/* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
* @adapter: pointer to our private adapter structure
* @addr: the address of the transceiver
* @reg: the register to read
@@ -1401,7 +1377,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
writel(0, &mac->mii_mgmt_cmd);
/* Set up the register we need to read from on the correct PHY */
- writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
+ writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
writel(0x1, &mac->mii_mgmt_cmd);
@@ -1409,7 +1385,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
udelay(50);
delay++;
mii_indicator = readl(&mac->mii_mgmt_indicator);
- } while ((mii_indicator & MGMT_WAIT) && delay < 50);
+ } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
/* If we hit the max delay, we could not read the register */
if (delay == 50) {
@@ -1422,8 +1398,9 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
}
/* If we hit here we were able to read the register and we need to
- * return the value to the caller */
- *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
+ * return the value to the caller
+ */
+ *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
/* Stop the read operation */
writel(0, &mac->mii_mgmt_cmd);
@@ -1447,8 +1424,7 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
}
-/**
- * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
+/* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC
* @adapter: pointer to our private adapter structure
* @reg: the register to read
* @value: 16-bit value to write
@@ -1483,7 +1459,7 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
writel(0, &mac->mii_mgmt_cmd);
/* Set up the register we need to write to on the correct PHY */
- writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
+ writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
/* Add the value to write to the registers to the mac */
writel(value, &mac->mii_mgmt_ctrl);
@@ -1492,7 +1468,7 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
udelay(50);
delay++;
mii_indicator = readl(&mac->mii_mgmt_indicator);
- } while ((mii_indicator & MGMT_BUSY) && delay < 100);
+ } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
/* If we hit the max delay, we could not write the register */
if (delay == 100) {
@@ -1512,8 +1488,7 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
/* Stop the write operation */
writel(0, &mac->mii_mgmt_cmd);
- /*
- * set the registers we touched back to the state at which we entered
+ /* set the registers we touched back to the state at which we entered
* this function
*/
writel(mii_addr, &mac->mii_mgmt_addr);
@@ -1528,7 +1503,7 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
u8 *value)
{
u16 reg;
- u16 mask = 0x0001 << bitnum;
+ u16 mask = 1 << bitnum;
/* Read the requested register */
et131x_mii_read(adapter, regnum, &reg);
@@ -1579,7 +1554,8 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
(remote_async_pause == TRUEPHY_BIT_CLEAR)) {
adapter->flowcontrol = FLOW_NONE;
} else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
- remote_async_pause == TRUEPHY_SET_BIT) */
+ * remote_async_pause == TRUEPHY_SET_BIT)
+ */
if (adapter->wanted_flow == FLOW_BOTH)
adapter->flowcontrol = FLOW_RXONLY;
else
@@ -1588,8 +1564,7 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
}
}
-/**
- * et1310_update_macstat_host_counters - Update the local copy of the statistics
+/* et1310_update_macstat_host_counters - Update the local copy of the statistics
* @adapter: pointer to the adapter structure
*/
static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
@@ -1616,8 +1591,7 @@ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
}
-/**
- * et1310_handle_macstat_interrupt
+/* et1310_handle_macstat_interrupt
* @adapter: pointer to the adapter structure
*
* One of the MACSTAT counters has wrapped. Update the local copy of
@@ -1708,8 +1682,7 @@ static int et131x_mdio_reset(struct mii_bus *bus)
return 0;
}
-/**
- * et1310_phy_power_down - PHY power control
+/* et1310_phy_power_down - PHY power control
* @adapter: device to control
* @down: true for off/false for back on
*
@@ -1729,8 +1702,7 @@ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
et131x_mii_write(adapter, MII_BMCR, data);
}
-/**
- * et131x_xcvr_init - Init the phy if we are setting it into force mode
+/* et131x_xcvr_init - Init the phy if we are setting it into force mode
* @adapter: pointer to our private adapter structure
*
*/
@@ -1761,8 +1733,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
}
}
-/**
- * et131x_configure_global_regs - configure JAGCore global regs
+/* et131x_configure_global_regs - configure JAGCore global regs
* @adapter: pointer to our adapter structure
*
* Used to configure the global registers on the JAGCore
@@ -1808,8 +1779,7 @@ static void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/**
- * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
+/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
* @adapter: pointer to our adapter structure
*/
static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
@@ -1839,7 +1809,7 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
writel(0, &rx_dma->psr_full_offset);
- psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
+ psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
&rx_dma->psr_min_des);
@@ -1849,11 +1819,11 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
rx_local->local_psr_full = 0;
for (id = 0; id < NUM_FBRS; id++) {
- u32 *num_des;
- u32 *full_offset;
- u32 *min_des;
- u32 *base_hi;
- u32 *base_lo;
+ u32 __iomem *num_des;
+ u32 __iomem *full_offset;
+ u32 __iomem *min_des;
+ u32 __iomem *base_hi;
+ u32 __iomem *base_lo;
if (id == 0) {
num_des = &rx_dma->fbr0_num_des;
@@ -1916,8 +1886,7 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->rcv_lock, flags);
}
-/**
- * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
+/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
* @adapter: pointer to our private adapter structure
*
* Configure the transmit engine with the ring buffers we have created
@@ -1948,8 +1917,7 @@ static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
adapter->tx_ring.send_idx = 0;
}
-/**
- * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
+/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success, errno on failure (as defined in errno.h)
@@ -1977,23 +1945,29 @@ static void et131x_adapter_setup(struct et131x_adapter *adapter)
et131x_xcvr_init(adapter);
}
-/**
- * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
+/* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
* @adapter: pointer to our private adapter structure
*/
static void et131x_soft_reset(struct et131x_adapter *adapter)
{
- /* Disable MAC Core */
- writel(0xc00f0000, &adapter->regs->mac.cfg1);
+ u32 reg;
- /* Set everything to a reset value */
- writel(0x7F, &adapter->regs->global.sw_reset);
- writel(0x000f0000, &adapter->regs->mac.cfg1);
- writel(0x00000000, &adapter->regs->mac.cfg1);
+ /* Disable MAC Core */
+ reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
+ ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
+ ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
+ writel(reg, &adapter->regs->mac.cfg1);
+
+ reg = ET_RESET_ALL;
+ writel(reg, &adapter->regs->global.sw_reset);
+
+ reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
+ ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
+ writel(reg, &adapter->regs->mac.cfg1);
+ writel(0, &adapter->regs->mac.cfg1);
}
-/**
- * et131x_enable_interrupts - enable interrupt
+/* et131x_enable_interrupts - enable interrupt
* @adapter: et131x device
*
* Enable the appropriate interrupts on the ET131x according to our
@@ -2013,8 +1987,7 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter)
writel(mask, &adapter->regs->global.int_mask);
}
-/**
- * et131x_disable_interrupts - interrupt disable
+/* et131x_disable_interrupts - interrupt disable
* @adapter: et131x device
*
* Block all interrupts from the et131x device at the device itself
@@ -2025,19 +1998,17 @@ static void et131x_disable_interrupts(struct et131x_adapter *adapter)
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
}
-/**
- * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
+/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
- writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
+ writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
&adapter->regs->txdma.csr);
}
-/**
- * et131x_enable_txrx - Enable tx/rx queues
+/* et131x_enable_txrx - Enable tx/rx queues
* @netdev: device to be enabled
*/
static void et131x_enable_txrx(struct net_device *netdev)
@@ -2049,15 +2020,14 @@ static void et131x_enable_txrx(struct net_device *netdev)
et131x_tx_dma_enable(adapter);
/* Enable device interrupts */
- if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
+ if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
et131x_enable_interrupts(adapter);
/* We're ready to move some data, so start the queue */
netif_start_queue(netdev);
}
-/**
- * et131x_disable_txrx - Disable tx/rx queues
+/* et131x_disable_txrx - Disable tx/rx queues
* @netdev: device to be disabled
*/
static void et131x_disable_txrx(struct net_device *netdev)
@@ -2075,8 +2045,7 @@ static void et131x_disable_txrx(struct net_device *netdev)
et131x_disable_interrupts(adapter);
}
-/**
- * et131x_init_send - Initialize send data structures
+/* et131x_init_send - Initialize send data structures
* @adapter: pointer to our private adapter structure
*/
static void et131x_init_send(struct et131x_adapter *adapter)
@@ -2109,8 +2078,7 @@ static void et131x_init_send(struct et131x_adapter *adapter)
tx_ring->send_tail = NULL;
}
-/**
- * et1310_enable_phy_coma - called when network cable is unplugged
+/* et1310_enable_phy_coma - called when network cable is unplugged
* @adapter: pointer to our adapter structure
*
* driver receive an phy status change interrupt while in D0 and check that
@@ -2139,8 +2107,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
/* Save the GbE PHY speed and duplex modes. Need to restore this
* when cable is plugged back in
*/
- /*
- * TODO - when PM is re-enabled, check if we need to
+ /* TODO - when PM is re-enabled, check if we need to
* perform a similar task as this -
* adapter->pdown_speed = adapter->ai_force_speed;
* adapter->pdown_duplex = adapter->ai_force_duplex;
@@ -2148,7 +2115,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
/* Stop sending packets. */
spin_lock_irqsave(&adapter->send_hw_lock, flags);
- adapter->flags |= fMP_ADAPTER_LOWER_POWER;
+ adapter->flags |= FMP_ADAPTER_LOWER_POWER;
spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
/* Wait for outstanding Receive packets */
@@ -2164,8 +2131,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
writel(pmcsr, &adapter->regs->global.pm_csr);
}
-/**
- * et1310_disable_phy_coma - Disable the Phy Coma Mode
+/* et1310_disable_phy_coma - Disable the Phy Coma Mode
* @adapter: pointer to our adapter structure
*/
static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
@@ -2201,7 +2167,7 @@ static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
et131x_adapter_setup(adapter);
/* Allow Tx to restart */
- adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
+ adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
et131x_enable_txrx(adapter->netdev);
}
@@ -2211,9 +2177,10 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
u32 tmp_free_buff_ring = *free_buff_ring;
tmp_free_buff_ring++;
/* This works for all cases where limit < 1024. The 1023 case
- works because 1023++ is 1024 which means the if condition is not
- taken but the carry of the bit into the wrap bit toggles the wrap
- value correctly */
+ * works because 1023++ is 1024 which means the if condition is not
+ * taken but the carry of the bit into the wrap bit toggles the wrap
+ * value correctly
+ */
if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
tmp_free_buff_ring &= ~ET_DMA10_MASK;
tmp_free_buff_ring ^= ET_DMA10_WRAP;
@@ -2224,8 +2191,7 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
return tmp_free_buff_ring;
}
-/**
- * et131x_rx_dma_memory_alloc
+/* et131x_rx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success and errno on failure (as defined in errno.h)
@@ -2365,8 +2331,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
pr_info("Packet Status Ring %llx\n",
(unsigned long long) rx_ring->ps_ring_physaddr);
- /*
- * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
+ /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
* ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
* are ever returned, make sure the high part is retrieved here before
* storing the adjusted address.
@@ -2392,8 +2357,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return 0;
}
-/**
- * et131x_rx_dma_memory_free - Free all memory allocated within this module.
+/* et131x_rx_dma_memory_free - Free all memory allocated within this module.
* @adapter: pointer to our private adapter structure
*/
static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
@@ -2480,8 +2444,7 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->num_ready_recv = 0;
}
-/**
- * et131x_init_recv - Initialize receive data structures.
+/* et131x_init_recv - Initialize receive data structures.
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success and errno on failure (as defined in errno.h)
@@ -2499,11 +2462,8 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* Setup each RFD */
for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
-
- if (!rfd) {
- dev_err(&adapter->pdev->dev, "Couldn't alloc RFD\n");
+ if (!rfd)
return -ENOMEM;
- }
rfd->skb = NULL;
@@ -2518,8 +2478,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
return 0;
}
-/**
- * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
+/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
* @adapter: pointer to our adapter structure
*/
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
@@ -2538,8 +2497,7 @@ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
}
}
-/**
- * NICReturnRFD - Recycle a RFD and put it back onto the receive list
+/* NICReturnRFD - Recycle a RFD and put it back onto the receive list
* @adapter: pointer to our adapter
* @rfd: pointer to the RFD
*/
@@ -2555,7 +2513,7 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
* need to clean up OOB data
*/
if (buff_index < rx_local->fbr[ring_index]->num_entries) {
- u32 *offset;
+ u32 __iomem *offset;
struct fbr_desc *next;
spin_lock_irqsave(&adapter->fbr_lock, flags);
@@ -2599,8 +2557,7 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
}
-/**
- * nic_rx_pkts - Checks the hardware for available packets
+/* nic_rx_pkts - Checks the hardware for available packets
* @adapter: pointer to our adapter
*
* Returns rfd, a pointer to our MPRFD.
@@ -2773,7 +2730,6 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
rx_local->fbr[ring_index]->virt[buff_index],
rfd->len);
- skb->dev = adapter->netdev;
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
netif_rx_ni(skb);
@@ -2783,8 +2739,7 @@ out:
return rfd;
}
-/**
- * et131x_handle_recv_interrupt - Interrupt handler for receive processing
+/* et131x_handle_recv_interrupt - Interrupt handler for receive processing
* @adapter: pointer to our adapter
*
* Assumption, Rcv spinlock has been acquired.
@@ -2838,8 +2793,7 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->rx_ring.unfinished_receives = false;
}
-/**
- * et131x_tx_dma_memory_alloc
+/* et131x_tx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success and errno on failure (as defined in errno.h).
@@ -2856,12 +2810,10 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
struct tx_ring *tx_ring = &adapter->tx_ring;
/* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->tx_ring.tcb_ring =
- kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
- if (!adapter->tx_ring.tcb_ring) {
- dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
+ adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
+ GFP_ATOMIC | GFP_DMA);
+ if (!adapter->tx_ring.tcb_ring)
return -ENOMEM;
- }
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
tx_ring->tx_desc_ring =
@@ -2895,8 +2847,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
return 0;
}
-/**
- * et131x_tx_dma_memory_free - Free all memory allocated within this module
+/* et131x_tx_dma_memory_free - Free all memory allocated within this module
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success and errno on failure (as defined in errno.h).
@@ -2928,8 +2879,7 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(adapter->tx_ring.tcb_ring);
}
-/**
- * nic_send_packet - NIC specific send handler for version B silicon.
+/* nic_send_packet - NIC specific send handler for version B silicon.
* @adapter: pointer to our adapter
* @tcb: pointer to struct tcb
*
@@ -2977,7 +2927,8 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
*/
if (skb_headlen(skb) <= 1514) {
/* Low 16bits are length, high is vlan and
- unused currently so zero */
+ * unused currently so zero
+ */
desc[frag].len_vlan = skb_headlen(skb);
dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data,
@@ -3022,23 +2973,24 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
if (phydev && phydev->speed == SPEED_1000) {
if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
/* Last element & Interrupt flag */
- desc[frag - 1].flags = 0x5;
+ desc[frag - 1].flags =
+ TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
adapter->tx_ring.since_irq = 0;
} else { /* Last element */
- desc[frag - 1].flags = 0x1;
+ desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
}
} else
- desc[frag - 1].flags = 0x5;
+ desc[frag - 1].flags =
+ TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
- desc[0].flags |= 2; /* First element flag */
+ desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
tcb->index_start = adapter->tx_ring.send_idx;
tcb->stale = 0;
spin_lock_irqsave(&adapter->send_hw_lock, flags);
- thiscopy = NUM_DESC_PER_RING_TX -
- INDEX10(adapter->tx_ring.send_idx);
+ thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx);
if (thiscopy >= frag) {
remainder = 0;
@@ -3106,8 +3058,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
return 0;
}
-/**
- * send_packet - Do the work to send a packet
+/* send_packet - Do the work to send a packet
* @skb: the packet(s) to send
* @adapter: a pointer to the device's private adapter structure
*
@@ -3150,9 +3101,9 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
if ((shbufva[0] == 0xffff) &&
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
- tcb->flags |= fMP_DEST_BROAD;
+ tcb->flags |= FMP_DEST_BROAD;
} else if ((shbufva[0] & 0x3) == 0x0001) {
- tcb->flags |= fMP_DEST_MULTI;
+ tcb->flags |= FMP_DEST_MULTI;
}
}
@@ -3178,8 +3129,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
return 0;
}
-/**
- * et131x_send_packets - This function is called by the OS to send packets
+/* et131x_send_packets - This function is called by the OS to send packets
* @skb: the packet(s) to send
* @netdev:device on which to TX the above packet(s)
*
@@ -3207,7 +3157,7 @@ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
/* We need to see if the link is up; if it's not, make the
* netif layer think we're good and drop the packet
*/
- if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
+ if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
!netif_carrier_ok(netdev)) {
dev_kfree_skb_any(skb);
skb = NULL;
@@ -3228,8 +3178,7 @@ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
return status;
}
-/**
- * free_send_packet - Recycle a struct tcb
+/* free_send_packet - Recycle a struct tcb
* @adapter: pointer to our adapter
* @tcb: pointer to struct tcb
*
@@ -3244,9 +3193,9 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
struct net_device_stats *stats = &adapter->net_stats;
u64 dma_addr;
- if (tcb->flags & fMP_DEST_BROAD)
+ if (tcb->flags & FMP_DEST_BROAD)
atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
- else if (tcb->flags & fMP_DEST_MULTI)
+ else if (tcb->flags & FMP_DEST_MULTI)
atomic_inc(&adapter->stats.multicast_pkts_xmtd);
else
atomic_inc(&adapter->stats.unicast_pkts_xmtd);
@@ -3301,8 +3250,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
WARN_ON(adapter->tx_ring.used < 0);
}
-/**
- * et131x_free_busy_send_packets - Free and complete the stopped active sends
+/* et131x_free_busy_send_packets - Free and complete the stopped active sends
* @adapter: pointer to our adapter
*
* Assumption - Send spinlock has been acquired
@@ -3345,8 +3293,7 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
adapter->tx_ring.used = 0;
}
-/**
- * et131x_handle_send_interrupt - Interrupt handler for sending processing
+/* et131x_handle_send_interrupt - Interrupt handler for sending processing
* @adapter: pointer to our adapter
*
* Re-claim the send resources, complete sends and get more to send from
@@ -3438,6 +3385,7 @@ static void et131x_get_regs(struct net_device *netdev,
struct address_map __iomem *aregs = adapter->regs;
u32 *regs_buff = regs_data;
u32 num = 0;
+ u16 tmp;
memset(regs_data, 0, et131x_get_regs_len(netdev));
@@ -3445,44 +3393,68 @@ static void et131x_get_regs(struct net_device *netdev,
adapter->pdev->device;
/* PHY regs */
- et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_BMCR, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_BMSR, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_PHYSID1, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_PHYSID2, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_LPA, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_EXPANSION, &tmp);
+ regs_buff[num++] = tmp;
/* Autoneg next page transmit reg */
- et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, 0x07, &tmp);
+ regs_buff[num++] = tmp;
/* Link partner next page reg */
- et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, 0x0b, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, 0x0c, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_MMD_CTRL, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_MMD_DATA, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
-
- et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
- (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
- (u16 *)&regs_buff[num++]);
-
- et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
- (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
- (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
- et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, 0x08, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_CTRL1000, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_STAT1000, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, 0x0b, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, 0x0c, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, MII_ESTATUS, &tmp);
+ regs_buff[num++] = tmp;
+
+ et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
+ regs_buff[num++] = tmp;
+
+ et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_CONFIG, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_LED_1, &tmp);
+ regs_buff[num++] = tmp;
+ et131x_mii_read(adapter, PHY_LED_2, &tmp);
+ regs_buff[num++] = tmp;
/* Global regs */
regs_buff[num++] = readl(&aregs->global.txq_start_addr);
@@ -3560,15 +3532,15 @@ static void et131x_get_regs(struct net_device *netdev,
regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
}
-#define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
static void et131x_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
- strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
- strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
}
static struct ethtool_ops et131x_ethtool_ops = {
@@ -3579,8 +3551,8 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_regs = et131x_get_regs,
.get_link = ethtool_op_get_link,
};
-/**
- * et131x_hwaddr_init - set up the MAC Address on the ET1310
+
+/* et131x_hwaddr_init - set up the MAC Address on the ET1310
* @adapter: pointer to our private adapter structure
*/
static void et131x_hwaddr_init(struct et131x_adapter *adapter)
@@ -3590,14 +3562,12 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter)
* device
*/
if (is_zero_ether_addr(adapter->rom_addr)) {
- /*
- * We need to randomly generate the last octet so we
+ /* We need to randomly generate the last octet so we
* decrease our chances of setting the mac address to
* same as another one of our cards in the system
*/
get_random_bytes(&adapter->addr[5], 1);
- /*
- * We have the default value in the register we are
+ /* We have the default value in the register we are
* working with so we need to copy the current
* address into the permanent address
*/
@@ -3613,8 +3583,7 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter)
}
}
-/**
- * et131x_pci_init - initial PCI setup
+/* et131x_pci_init - initial PCI setup
* @adapter: pointer to our private adapter structure
* @pdev: our PCI device
*
@@ -3706,8 +3675,7 @@ err_out:
goto out;
}
-/**
- * et131x_error_timer_handler
+/* et131x_error_timer_handler
* @data: timer-specific variable; here a pointer to our adapter structure
*
* The routine called when the error timer expires, to track the number of
@@ -3721,7 +3689,8 @@ static void et131x_error_timer_handler(unsigned long data)
if (et1310_in_phy_coma(adapter)) {
/* Bring the device immediately out of coma, to
* prevent it from sleeping indefinitely, this
- * mechanism could be improved! */
+ * mechanism could be improved!
+ */
et1310_disable_phy_coma(adapter);
adapter->boot_coma = 20;
} else {
@@ -3747,8 +3716,7 @@ static void et131x_error_timer_handler(unsigned long data)
mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
}
-/**
- * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
+/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
* @adapter: pointer to our private adapter structure
*/
static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
@@ -3758,8 +3726,7 @@ static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
et131x_rx_dma_memory_free(adapter);
}
-/**
- * et131x_adapter_memory_alloc
+/* et131x_adapter_memory_alloc
* @adapter: pointer to our private adapter structure
*
* Returns 0 on success, errno on failure (as defined in errno.h).
@@ -3801,60 +3768,54 @@ static void et131x_adjust_link(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = adapter->phydev;
- if (netif_carrier_ok(netdev)) {
- adapter->boot_coma = 20;
-
- if (phydev && phydev->speed == SPEED_10) {
- /*
- * NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
- * EMI_TRUEPHY_A13O) {
- */
- u16 register18;
-
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- &register18);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18 | 0x4);
- et131x_mii_write(adapter, PHY_INDEX_REG,
- register18 | 0x8402);
- et131x_mii_write(adapter, PHY_DATA_REG,
- register18 | 511);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18);
- }
-
- et1310_config_flow_control(adapter);
-
- if (phydev && phydev->speed == SPEED_1000 &&
- adapter->registry_jumbo_packet > 2048) {
- u16 reg;
-
- et131x_mii_read(adapter, PHY_CONFIG, &reg);
- reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
- reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
- et131x_mii_write(adapter, PHY_CONFIG, reg);
- }
-
- et131x_set_rx_dma_timer(adapter);
- et1310_config_mac_regs2(adapter);
- }
-
if (phydev && phydev->link != adapter->link) {
- /*
- * Check to see if we are in coma mode and if
+ /* Check to see if we are in coma mode and if
* so, disable it because we will not be able
* to read PHY values until we are out.
*/
if (et1310_in_phy_coma(adapter))
et1310_disable_phy_coma(adapter);
+ adapter->link = phydev->link;
+ phy_print_status(phydev);
+
if (phydev->link) {
adapter->boot_coma = 20;
+ if (phydev && phydev->speed == SPEED_10) {
+ /* NOTE - Is there a way to query this without
+ * TruePHY?
+ * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
+ * EMI_TRUEPHY_A13O) {
+ */
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
+
+ et1310_config_flow_control(adapter);
+
+ if (phydev && phydev->speed == SPEED_1000 &&
+ adapter->registry_jumbo_packet > 2048) {
+ u16 reg;
+
+ et131x_mii_read(adapter, PHY_CONFIG, &reg);
+ reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
+ reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
+ et131x_mii_write(adapter, PHY_CONFIG, reg);
+ }
+
+ et131x_set_rx_dma_timer(adapter);
+ et1310_config_mac_regs2(adapter);
} else {
- dev_warn(&adapter->pdev->dev,
- "Link down - cable problem ?\n");
adapter->boot_coma = 0;
if (phydev->speed == SPEED_10) {
@@ -3883,8 +3844,7 @@ static void et131x_adjust_link(struct net_device *netdev)
/* Re-initialize the send structures */
et131x_init_send(adapter);
- /*
- * Bring the device back to the state it was during
+ /* Bring the device back to the state it was during
* init prior to autonegotiation being complete. This
* way, when we get the auto-neg complete interrupt,
* we can complete init by calling config_mac_regs2.
@@ -3899,9 +3859,6 @@ static void et131x_adjust_link(struct net_device *netdev)
et131x_enable_txrx(netdev);
}
- adapter->link = phydev->link;
-
- phy_print_status(phydev);
}
}
@@ -3917,7 +3874,7 @@ static int et131x_mii_probe(struct net_device *netdev)
}
phydev = phy_connect(netdev, dev_name(&phydev->dev),
- &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
@@ -3944,8 +3901,7 @@ static int et131x_mii_probe(struct net_device *netdev)
return 0;
}
-/**
- * et131x_adapter_init
+/* et131x_adapter_init
* @adapter: pointer to the private adapter struct
* @pdev: pointer to the PCI device
*
@@ -3982,8 +3938,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
return adapter;
}
-/**
- * et131x_pci_remove
+/* et131x_pci_remove
* @pdev: a pointer to the device's pci_dev structure
*
* Registered in the pci_driver structure, this function is called when the
@@ -4010,8 +3965,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/**
- * et131x_up - Bring up a device for use.
+/* et131x_up - Bring up a device for use.
* @netdev: device to be opened
*/
static void et131x_up(struct net_device *netdev)
@@ -4022,8 +3976,7 @@ static void et131x_up(struct net_device *netdev)
phy_start(adapter->phydev);
}
-/**
- * et131x_down - Bring down the device
+/* et131x_down - Bring down the device
* @netdev: device to be brought down
*/
static void et131x_down(struct net_device *netdev)
@@ -4072,14 +4025,13 @@ static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
#define ET131X_PM_OPS NULL
#endif
-/**
- * et131x_isr - The Interrupt Service Routine for the driver.
+/* et131x_isr - The Interrupt Service Routine for the driver.
* @irq: the IRQ on which the interrupt was received.
* @dev_id: device-specific info (here a pointer to a net_device struct)
*
* Returns a value indicating if the interrupt was handled.
*/
-irqreturn_t et131x_isr(int irq, void *dev_id)
+static irqreturn_t et131x_isr(int irq, void *dev_id)
{
bool handled = true;
struct net_device *netdev = (struct net_device *)dev_id;
@@ -4161,8 +4113,7 @@ out:
return IRQ_RETVAL(handled);
}
-/**
- * et131x_isr_handler - The ISR handler
+/* et131x_isr_handler - The ISR handler
* @p_adapter, a pointer to the device's private adapter structure
*
* scheduled to run in a deferred context by the ISR. This is where the ISR's
@@ -4175,8 +4126,7 @@ static void et131x_isr_handler(struct work_struct *work)
u32 status = adapter->stats.interrupt_status;
struct address_map __iomem *iomem = adapter->regs;
- /*
- * These first two are by far the most common. Once handled, we clear
+ /* These first two are by far the most common. Once handled, we clear
* their two bits in the status word. If the word is now zero, we
* exit.
*/
@@ -4207,8 +4157,7 @@ static void et131x_isr_handler(struct work_struct *work)
/* Handle Free Buffer Ring 0 and 1 Low interrupt */
if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
- /*
- * This indicates the number of unused buffers in RXDMA free
+ /* This indicates the number of unused buffers in RXDMA free
* buffer ring 0 is <= the limit you programmed. Free buffer
* resources need to be returned. Free buffers are consumed as
* packets are passed from the network to the host. The host
@@ -4220,16 +4169,14 @@ static void et131x_isr_handler(struct work_struct *work)
* method of returning resources.
*/
- /*
- * If the user has flow control on, then we will
+ /* If the user has flow control on, then we will
* send a pause packet, otherwise just exit
*/
if (adapter->flowcontrol == FLOW_TXONLY ||
adapter->flowcontrol == FLOW_BOTH) {
u32 pm_csr;
- /*
- * Tell the device to send a pause packet via the back
+ /* Tell the device to send a pause packet via the back
* pressure register (bp req and bp xon/xoff)
*/
pm_csr = readl(&iomem->global.pm_csr);
@@ -4240,8 +4187,7 @@ static void et131x_isr_handler(struct work_struct *work)
/* Handle Packet Status Ring Low Interrupt */
if (status & ET_INTR_RXDMA_STAT_LOW) {
- /*
- * Same idea as with the two Free Buffer Rings. Packets going
+ /* Same idea as with the two Free Buffer Rings. Packets going
* from the network to the host each consume a free buffer
* resource and a packet status resource. These resoures are
* passed to the OS. When the OS is done with the resources,
@@ -4252,8 +4198,7 @@ static void et131x_isr_handler(struct work_struct *work)
/* Handle RXDMA Error Interrupt */
if (status & ET_INTR_RXDMA_ERR) {
- /*
- * The rxdma_error interrupt is sent when a time-out on a
+ /* The rxdma_error interrupt is sent when a time-out on a
* request issued by the JAGCore has occurred or a completion is
* returned with an un-successful status. In both cases the
* request is considered complete. The JAGCore will
@@ -4276,8 +4221,7 @@ static void et131x_isr_handler(struct work_struct *work)
/* Handle the Wake on LAN Event */
if (status & ET_INTR_WOL) {
- /*
- * This is a secondary interrupt for wake on LAN. The driver
+ /* This is a secondary interrupt for wake on LAN. The driver
* should never see this, if it does, something serious is
* wrong. We will TRAP the message when we are in DBG mode,
* otherwise we will ignore it.
@@ -4289,8 +4233,7 @@ static void et131x_isr_handler(struct work_struct *work)
if (status & ET_INTR_TXMAC) {
u32 err = readl(&iomem->txmac.err);
- /*
- * When any of the errors occur and TXMAC generates an
+ /* When any of the errors occur and TXMAC generates an
* interrupt to report these errors, it usually means that
* TXMAC has detected an error in the data stream retrieved
* from the on-chip Tx Q. All of these errors are catastrophic
@@ -4302,20 +4245,18 @@ static void et131x_isr_handler(struct work_struct *work)
"TXMAC interrupt, error 0x%08x\n",
err);
- /*
- * If we are debugging, we want to see this error, otherwise we
+ /* If we are debugging, we want to see this error, otherwise we
* just want the device to be reset and continue
*/
}
/* Handle RXMAC Interrupt */
if (status & ET_INTR_RXMAC) {
- /*
- * These interrupts are catastrophic to the device, what we need
+ /* These interrupts are catastrophic to the device, what we need
* to do is disable the interrupts and set the flag to cause us
* to reset so we can solve this issue.
*/
- /* MP_SET_FLAG( adapter, fMP_ADAPTER_HARDWARE_ERROR); */
+ /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */
dev_warn(&adapter->pdev->dev,
"RXMAC interrupt, error 0x%08x. Requesting reset\n",
@@ -4326,16 +4267,14 @@ static void et131x_isr_handler(struct work_struct *work)
readl(&iomem->rxmac.ctrl),
readl(&iomem->rxmac.rxq_diag));
- /*
- * If we are debugging, we want to see this error, otherwise we
+ /* If we are debugging, we want to see this error, otherwise we
* just want the device to be reset and continue
*/
}
/* Handle MAC_STAT Interrupt */
if (status & ET_INTR_MAC_STAT) {
- /*
- * This means at least one of the un-masked counters in the
+ /* This means at least one of the un-masked counters in the
* MAC_STAT block has rolled over. Use this to maintain the top,
* software managed bits of the counter(s).
*/
@@ -4344,8 +4283,7 @@ static void et131x_isr_handler(struct work_struct *work)
/* Handle SLV Timeout Interrupt */
if (status & ET_INTR_SLV_TIMEOUT) {
- /*
- * This means a timeout has occurred on a read or write request
+ /* This means a timeout has occurred on a read or write request
* to one of the JAGCore registers. The Global Resources block
* has terminated the request and on a read request, returned a
* "fake" value. The most likely reasons are: Bad Address or the
@@ -4356,8 +4294,7 @@ out:
et131x_enable_interrupts(adapter);
}
-/**
- * et131x_stats - Return the current device statistics.
+/* et131x_stats - Return the current device statistics.
* @netdev: device whose stats are being queried
*
* Returns 0 on success, errno on failure (as defined in errno.h)
@@ -4403,8 +4340,7 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
return stats;
}
-/**
- * et131x_open - Open the device for use.
+/* et131x_open - Open the device for use.
* @netdev: device to be opened
*
* Returns 0 on success, errno on failure (as defined in errno.h)
@@ -4430,15 +4366,14 @@ static int et131x_open(struct net_device *netdev)
return result;
}
- adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
+ adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
et131x_up(netdev);
return result;
}
-/**
- * et131x_close - Close the device
+/* et131x_close - Close the device
* @netdev: device to be closed
*
* Returns 0 on success, errno on failure (as defined in errno.h)
@@ -4449,15 +4384,14 @@ static int et131x_close(struct net_device *netdev)
et131x_down(netdev);
- adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
+ adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
return del_timer_sync(&adapter->error_timer);
}
-/**
- * et131x_ioctl - The I/O Control handler for the driver
+/* et131x_ioctl - The I/O Control handler for the driver
* @netdev: device on which the control request is being made
* @reqbuf: a pointer to the IOCTL request buffer
* @cmd: the IOCTL command code
@@ -4475,8 +4409,7 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
}
-/**
- * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
+/* et131x_set_packet_filter - Configures the Rx Packet filtering on the device
* @adapter: pointer to our private adapter structure
*
* FIXME: lot of dups with MAC code
@@ -4504,8 +4437,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
pf_ctrl &= ~7; /* Clear filter bits */
else {
- /*
- * Set us up with Multicast packet filtering. Three cases are
+ /* Set us up with Multicast packet filtering. Three cases are
* possible - (1) we have a multi-cast list, (2) we receive ALL
* multicast entries or (3) we receive none.
*/
@@ -4541,8 +4473,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
return status;
}
-/**
- * et131x_multicast - The handler to configure multicasting on the interface
+/* et131x_multicast - The handler to configure multicasting on the interface
* @netdev: a pointer to a net_device struct representing the device
*/
static void et131x_multicast(struct net_device *netdev)
@@ -4611,8 +4542,7 @@ static void et131x_multicast(struct net_device *netdev)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/**
- * et131x_tx - The handler to tx a packet on the device
+/* et131x_tx - The handler to tx a packet on the device
* @skb: data to be Tx'd
* @netdev: device on which data is to be Tx'd
*
@@ -4644,8 +4574,7 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
return status;
}
-/**
- * et131x_tx_timeout - Timeout handler
+/* et131x_tx_timeout - Timeout handler
* @netdev: a pointer to a net_device struct representing the device
*
* The handler called when a Tx request times out. The timeout period is
@@ -4659,17 +4588,17 @@ static void et131x_tx_timeout(struct net_device *netdev)
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
+ if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
* Checks adapter->flags for any failure in phy reading
*/
- if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
+ if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
return;
/* Hardware failure? */
- if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
+ if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
dev_err(&adapter->pdev->dev, "hardware error - reset\n");
return;
}
@@ -4703,8 +4632,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/**
- * et131x_change_mtu - The handler called to change the MTU for the device
+/* et131x_change_mtu - The handler called to change the MTU for the device
* @netdev: device whose MTU is to be changed
* @new_mtu: the desired MTU
*
@@ -4754,8 +4682,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
return result;
}
-/**
- * et131x_set_mac_addr - handler to change the MAC address for the device
+/* et131x_set_mac_addr - handler to change the MAC address for the device
* @netdev: device whose MAC is to be changed
* @new_mac: the desired MAC address
*
@@ -4828,8 +4755,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_do_ioctl = et131x_ioctl,
};
-/**
- * et131x_pci_setup - Perform device initialization
+/* et131x_pci_setup - Perform device initialization
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
*
@@ -4963,11 +4889,10 @@ static int et131x_pci_setup(struct pci_dev *pdev,
adapter->mii_bus->read = et131x_mdio_read;
adapter->mii_bus->write = et131x_mdio_write;
adapter->mii_bus->reset = et131x_mdio_reset;
- adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (!adapter->mii_bus->irq) {
- dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+ adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
+ if (!adapter->mii_bus->irq)
goto err_mdio_free;
- }
for (ii = 0; ii < PHY_MAX_ADDR; ii++)
adapter->mii_bus->irq[ii] = PHY_POLL;
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 347e63ddde1f..bbe78a703a23 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -145,6 +145,8 @@
*31: selfclr_disable
*/
+#define ET_RESET_ALL 0x007F;
+
/*
* SLV Timer reg at address 0x002C (low 24 bits)
*/
@@ -317,6 +319,14 @@ struct txdma_regs { /* Location: */
* 18-31: unused
*/
+#define ET_RXDMA_CSR_HALT 0x0001
+#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100
+#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200
+#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400
+#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800
+#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000
+#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000
+#define ET_RXDMA_CSR_HALT_STATUS 0x00020000
/*
* structure for dma writeback lo reg in rxdma address map
@@ -384,6 +394,8 @@ struct txdma_regs { /* Location: */
* 11-0: psr ndes
*/
+#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF;
+
/*
* structure for packet status ring available offset reg in rxdma address map
* located at address 0x202C
@@ -559,6 +571,9 @@ struct rxdma_regs { /* Location: */
* 0: txmac_en
*/
+#define ET_TX_CTRL_FC_DISABLE 0x0008
+#define ET_TX_CTRL_TXMAC_ENABLE 0x0001
+
/*
* structure for shadow pointer reg in txmac address map
* located at address 0x3004
@@ -674,6 +689,9 @@ struct txmac_regs { /* Location: */
* 0: rxmac_en
*/
+#define ET_RX_CTRL_WOL_DISABLE 0x0008
+#define ET_RX_CTRL_RXMAC_ENABLE 0x0001
+
/*
* structure for Wake On Lan Control and CRC 0 reg in rxmac address map
* located at address 0x4004
@@ -715,9 +733,9 @@ struct txmac_regs { /* Location: */
* 7-0: sa6
*/
-#define ET_WOL_LO_SA3_SHIFT 24
-#define ET_WOL_LO_SA4_SHIFT 16
-#define ET_WOL_LO_SA5_SHIFT 8
+#define ET_RX_WOL_LO_SA3_SHIFT 24
+#define ET_RX_WOL_LO_SA4_SHIFT 16
+#define ET_RX_WOL_LO_SA5_SHIFT 8
/*
* structure for Wake On Lan Source Address Hi reg in rxmac address map
@@ -728,7 +746,7 @@ struct txmac_regs { /* Location: */
* 7-0: sa2
*/
-#define ET_WOL_HI_SA1_SHIFT 8
+#define ET_RX_WOL_HI_SA1_SHIFT 8
/*
* structure for Wake On Lan mask reg in rxmac address map
@@ -746,9 +764,9 @@ struct txmac_regs { /* Location: */
* 7-0: addr1_6
*/
-#define ET_UNI_PF_ADDR1_3_SHIFT 24
-#define ET_UNI_PF_ADDR1_4_SHIFT 16
-#define ET_UNI_PF_ADDR1_5_SHIFT 8
+#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24
+#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16
+#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8
/*
* structure for Unicast Paket Filter Address 2 reg in rxmac address map
@@ -760,9 +778,9 @@ struct txmac_regs { /* Location: */
* 7-0: addr2_6
*/
-#define ET_UNI_PF_ADDR2_3_SHIFT 24
-#define ET_UNI_PF_ADDR2_4_SHIFT 16
-#define ET_UNI_PF_ADDR2_5_SHIFT 8
+#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24
+#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16
+#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8
/*
* structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map
@@ -774,10 +792,9 @@ struct txmac_regs { /* Location: */
* 7-0: addr1_2
*/
-#define ET_UNI_PF_ADDR2_1_SHIFT 24
-#define ET_UNI_PF_ADDR2_2_SHIFT 16
-#define ET_UNI_PF_ADDR1_1_SHIFT 8
-
+#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24
+#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16
+#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8
/*
* structure for Multicast Hash reg in rxmac address map
@@ -798,6 +815,12 @@ struct txmac_regs { /* Location: */
* 0: filter_broad_en
*/
+#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16;
+#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008;
+#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004;
+#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002;
+#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001;
+
/*
* structure for Memory Controller Interface Control Max Segment reg in rxmac
* address map. Located at address 0x4088
@@ -808,6 +831,10 @@ struct txmac_regs { /* Location: */
* 0: seg_en
*/
+#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2;
+#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002;
+#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001;
+
/*
* structure for Memory Controller Interface Water Mark reg in rxmac address
* map. Located at address 0x408C
@@ -907,7 +934,6 @@ struct rxmac_regs { /* Location: */
/* END OF RXMAC REGISTER ADDRESS MAP */
-
/* START OF MAC REGISTER ADDRESS MAP */
/*
@@ -932,12 +958,18 @@ struct rxmac_regs { /* Location: */
* 0: tx enable
*/
-#define CFG1_LOOPBACK 0x00000100
-#define CFG1_RX_FLOW 0x00000020
-#define CFG1_TX_FLOW 0x00000010
-#define CFG1_RX_ENABLE 0x00000004
-#define CFG1_TX_ENABLE 0x00000001
-#define CFG1_WAIT 0x0000000A /* RX & TX syncd */
+#define ET_MAC_CFG1_SOFT_RESET 0x80000000
+#define ET_MAC_CFG1_SIM_RESET 0x40000000
+#define ET_MAC_CFG1_RESET_RXMC 0x00080000
+#define ET_MAC_CFG1_RESET_TXMC 0x00040000
+#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000
+#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000
+#define ET_MAC_CFG1_LOOPBACK 0x00000100
+#define ET_MAC_CFG1_RX_FLOW 0x00000020
+#define ET_MAC_CFG1_TX_FLOW 0x00000010
+#define ET_MAC_CFG1_RX_ENABLE 0x00000004
+#define ET_MAC_CFG1_TX_ENABLE 0x00000001
+#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */
/*
* structure for configuration #2 reg in mac address map.
@@ -955,6 +987,15 @@ struct rxmac_regs { /* Location: */
* 0: full duplex
*/
+#define ET_MAC_CFG2_PREAMBLE_SHIFT 12;
+#define ET_MAC_CFG2_IFMODE_MASK 0x0300;
+#define ET_MAC_CFG2_IFMODE_1000 0x0200;
+#define ET_MAC_CFG2_IFMODE_100 0x0100;
+#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020;
+#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010;
+#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004;
+#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002;
+#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001;
/*
* structure for Interpacket gap reg in mac address map.
@@ -1009,6 +1050,8 @@ struct rxmac_regs { /* Location: */
* 2-0: mgmt clock reset
*/
+#define ET_MAC_MIIMGMT_CLK_RST 0x0007
+
/*
* structure for MII Management Command reg in mac address map.
* located at address 0x5024
@@ -1025,7 +1068,7 @@ struct rxmac_regs { /* Location: */
* 4-0: register
*/
-#define MII_ADDR(phy, reg) ((phy) << 8 | (reg))
+#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg))
/*
* structure for MII Management Control reg in mac address map.
@@ -1041,6 +1084,8 @@ struct rxmac_regs { /* Location: */
* 15-0: phy control
*/
+#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF;
+
/*
* structure for MII Management Indicators reg in mac address map.
* located at address 0x5034
@@ -1050,8 +1095,8 @@ struct rxmac_regs { /* Location: */
* 0: busy
*/
-#define MGMT_BUSY 0x00000001 /* busy */
-#define MGMT_WAIT 0x00000005 /* busy | not valid */
+#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */
+#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */
/*
* structure for Interface Control reg in mac address map.
@@ -1076,6 +1121,9 @@ struct rxmac_regs { /* Location: */
* 0: enable jabber protection
*/
+#define ET_MAC_IFCTRL_GHDMODE (1 << 26)
+#define ET_MAC_IFCTRL_PHYMODE (1 << 24)
+
/*
* structure for Interface Status reg in mac address map.
* located at address 0x503C
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 33085782689e..ea9362d7e589 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -678,10 +678,9 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&intf->dev, "Out of memory\n");
+ if (dev == NULL)
goto exit;
- }
+
mutex_init(&dev->mtx);
dev->intf = intf;
init_waitqueue_head(&dev->read_wait);
@@ -721,28 +720,21 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
/* FIXME - there are more usb_alloc routines for dma correctness.
Needed? */
- dev->ring_buffer =
- kmalloc((true_size * sizeof(struct alphatrack_icmd)), GFP_KERNEL);
-
- if (!dev->ring_buffer) {
- dev_err(&intf->dev,
- "Couldn't allocate input ring_buffer of size %d\n",
- true_size);
+ dev->ring_buffer = kmalloc_array(true_size,
+ sizeof(struct alphatrack_icmd),
+ GFP_KERNEL);
+ if (!dev->ring_buffer)
goto error;
- }
- dev->interrupt_in_buffer =
- kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
-
- if (!dev->interrupt_in_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_in_buffer\n");
+ dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size,
+ GFP_KERNEL);
+ if (!dev->interrupt_in_buffer)
goto error;
- }
+
dev->oldi_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
- if (!dev->oldi_buffer) {
- dev_err(&intf->dev, "Couldn't allocate old buffer\n");
+ if (!dev->oldi_buffer)
goto error;
- }
+
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb) {
dev_err(&intf->dev, "Couldn't allocate interrupt_in_urb\n");
@@ -764,20 +756,17 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
true_size = min(write_buffer_size, WRITE_BUFFER_SIZE);
dev->interrupt_out_buffer =
- kmalloc(true_size * dev->interrupt_out_endpoint_size, GFP_KERNEL);
-
- if (!dev->interrupt_out_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_out_buffer\n");
+ kmalloc_array(true_size,
+ dev->interrupt_out_endpoint_size,
+ GFP_KERNEL);
+ if (!dev->interrupt_out_buffer)
goto error;
- }
-
- dev->write_buffer =
- kmalloc(true_size * sizeof(struct alphatrack_ocmd), GFP_KERNEL);
- if (!dev->write_buffer) {
- dev_err(&intf->dev, "Couldn't allocate write_buffer\n");
+ dev->write_buffer = kmalloc_array(true_size,
+ sizeof(struct alphatrack_ocmd),
+ GFP_KERNEL);
+ if (!dev->write_buffer)
goto error;
- }
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb) {
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 5196a4e053e6..04b5e66d9861 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -803,10 +803,9 @@ static int usb_tranzport_probe(struct usb_interface *intf,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&intf->dev, "Out of memory\n");
+ if (dev == NULL)
goto exit;
- }
+
mutex_init(&dev->mtx);
dev->intf = intf;
init_waitqueue_head(&dev->read_wait);
@@ -848,18 +847,14 @@ static int usb_tranzport_probe(struct usb_interface *intf,
dev->ring_buffer =
kmalloc((true_size * sizeof(struct tranzport_cmd)) + 8, GFP_KERNEL);
-
- if (!dev->ring_buffer) {
- dev_err(&intf->dev,
- "Couldn't allocate ring_buffer size %d\n", true_size);
+ if (!dev->ring_buffer)
goto error;
- }
+
dev->interrupt_in_buffer =
kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
- if (!dev->interrupt_in_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_in_buffer\n");
+ if (!dev->interrupt_in_buffer)
goto error;
- }
+
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb) {
dev_err(&intf->dev, "Couldn't allocate interrupt_in_urb\n");
@@ -875,12 +870,11 @@ static int usb_tranzport_probe(struct usb_interface *intf,
"Interrupt out endpoint size is not 8!)\n");
dev->interrupt_out_buffer =
- kmalloc(write_buffer_size * dev->interrupt_out_endpoint_size,
- GFP_KERNEL);
- if (!dev->interrupt_out_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_out_buffer\n");
+ kmalloc_array(write_buffer_size,
+ dev->interrupt_out_endpoint_size, GFP_KERNEL);
+ if (!dev->interrupt_out_buffer)
goto error;
- }
+
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb) {
dev_err(&intf->dev, "Couldn't allocate interrupt_out_urb\n");
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index adb436ed2511..65f7ab6cb467 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -31,41 +31,10 @@
#define SUCCESS 0x00
#define FAILURE 0x01
-struct ft1000_info {
- struct net_device_stats stats;
- u16 DrvErrNum;
- u16 AsicID;
+struct ft1000_pcmcia {
int PktIntfErr;
- int CardReady;
- int registered;
- int mediastate;
u16 packetseqnum;
- u8 squeseqnum; /* sequence number on slow queue */
- spinlock_t dpram_lock;
- u16 fifo_cnt;
- u8 DspVer[DSPVERSZ]; /* DSP version number */
- u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
- u8 Sku[SKUSZ]; /* SKU */
- u8 eui64[EUISZ]; /* EUI64 */
- time_t ConTm; /* Connection Time */
- u16 LedStat;
- u16 ConStat;
- u16 ProgConStat;
- u8 ProductMode[MODESZ];
- u8 RfCalVer[CALVERSZ];
- u8 RfCalDate[CALDATESZ];
- u16 DSP_TIME[4];
- struct list_head prov_list;
- u16 DSPInfoBlklen;
- int (*ft1000_reset)(void *);
void *link;
- u16 DSPInfoBlk[MAX_DSP_SESS_REC];
- union {
- u16 Rec[MAX_DSP_SESS_REC];
- u32 MagRec[MAX_DSP_SESS_REC/2];
- } DSPSess;
- struct proc_dir_entry *proc_ft1000;
- char netdevname[IFNAMSIZ];
};
struct pcmcia_device;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 86a680c09ba2..29d0a72f0d65 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -328,11 +328,12 @@ static void ft1000_disable_interrupts(struct net_device *dev)
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
+ struct ft1000_pcmcia *pcmcia = info->priv;
u16 tempword;
DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
- (*info->ft1000_reset) (info->link);
+ (*info->ft1000_reset) (pcmcia->link);
// Let's use the register provided by the Magnemite ASIC to reset the
// ASIC and DSP.
@@ -1397,12 +1398,13 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
{
struct ft1000_info *info = netdev_priv(dev);
+ struct ft1000_pcmcia *pcmcia = info->priv;
u16 i;
u32 templong;
u16 tempword;
DEBUG(1, "ft1000:ft1000_hw:ft1000_flush_fifo called\n");
- if (info->PktIntfErr > MAX_PH_ERR) {
+ if (pcmcia->PktIntfErr > MAX_PH_ERR) {
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -1491,7 +1493,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
FIFO_FLUSH_BADCNT;
} else {
// Let's assume that we really flush the FIFO
- info->PktIntfErr++;
+ pcmcia->PktIntfErr++;
return;
}
} else {
@@ -1522,7 +1524,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
DEBUG(0, "FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
}
if (DrvErrNum) {
- info->PktIntfErr++;
+ pcmcia->PktIntfErr++;
}
}
}
@@ -1731,6 +1733,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
+ struct ft1000_pcmcia *pcmcia = info->priv;
union {
struct pseudo_hdr blk;
u16 buff[sizeof(struct pseudo_hdr) >> 1];
@@ -1780,7 +1783,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
pseudo.blk.control = 0;
pseudo.blk.rsvd1 = 0;
pseudo.blk.seq_num = 0;
- pseudo.blk.rsvd2 = info->packetseqnum++;
+ pseudo.blk.rsvd2 = pcmcia->packetseqnum++;
pseudo.blk.qos_class = 0;
/* Calculate pseudo header checksum */
pseudo.blk.checksum = pseudo.buff[0];
@@ -2058,6 +2061,8 @@ void stop_ft1000_card(struct net_device *dev)
kfree(ptr);
}
+ kfree(info->priv);
+
if (info->registered) {
unregister_netdev(dev);
info->registered = 0;
@@ -2077,11 +2082,12 @@ static void ft1000_get_drvinfo(struct net_device *dev,
struct ft1000_info *ft_info;
ft_info = netdev_priv(dev);
- snprintf(info->driver, 32, "ft1000");
- snprintf(info->bus_info, ETHTOOL_BUSINFO_LEN, "PCMCIA 0x%lx",
+ strlcpy(info->driver, "ft1000", sizeof(info->driver));
+ snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
dev->base_addr);
- snprintf(info->fw_version, 32, "%d.%d.%d.%d", ft_info->DspVer[0],
- ft_info->DspVer[1], ft_info->DspVer[2], ft_info->DspVer[3]);
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d.%d",
+ ft_info->DspVer[0], ft_info->DspVer[1], ft_info->DspVer[2],
+ ft_info->DspVer[3]);
}
static u32 ft1000_get_link(struct net_device *dev)
@@ -2100,6 +2106,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
void *ft1000_reset)
{
struct ft1000_info *info;
+ struct ft1000_pcmcia *pcmcia;
struct net_device *dev;
static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes
@@ -2141,10 +2148,13 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
memset(&info->stats, 0, sizeof(struct net_device_stats));
+ info->priv = kzalloc(sizeof(struct ft1000_pcmcia), GFP_KERNEL);
+ pcmcia = info->priv;
+ pcmcia->link = link;
+
spin_lock_init(&info->dpram_lock);
info->DrvErrNum = 0;
info->registered = 1;
- info->link = link;
info->ft1000_reset = ft1000_reset;
info->mediastate = 0;
info->fifo_cnt = 0;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 72727c6b9e2e..5337b415d450 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -175,8 +175,8 @@ static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
- remove_proc_entry(info->netdevname, info->proc_ft1000);
- create_proc_read_entry(dev->name, 0644, info->proc_ft1000,
+ remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+ create_proc_read_entry(dev->name, 0644, info->ft1000_proc_dir,
ft1000ReadProc, dev);
snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
break;
@@ -194,8 +194,8 @@ void ft1000InitProc(struct net_device *dev)
info = netdev_priv(dev);
- info->proc_ft1000 = proc_mkdir(FT1000_PROC, init_net.proc_net);
- create_proc_read_entry(dev->name, 0644, info->proc_ft1000,
+ info->ft1000_proc_dir = proc_mkdir(FT1000_PROC, init_net.proc_net);
+ create_proc_read_entry(dev->name, 0644, info->ft1000_proc_dir,
ft1000ReadProc, dev);
snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
register_netdevice_notifier(&ft1000_netdev_notifier);
@@ -207,7 +207,7 @@ void ft1000CleanupProc(struct net_device *dev)
info = netdev_priv(dev);
- remove_proc_entry(dev->name, info->proc_ft1000);
+ remove_proc_entry(dev->name, info->ft1000_proc_dir);
remove_proc_entry(FT1000_PROC, init_net.proc_net);
unregister_netdevice_notifier(&ft1000_netdev_notifier);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 6d911fda47fb..297389e8c608 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -137,29 +137,28 @@ void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
// Notes: Only called by init_module().
//
//---------------------------------------------------------------------------
-int ft1000_create_dev(struct ft1000_device *dev)
+int ft1000_create_dev(struct ft1000_usb *dev)
{
- struct ft1000_info *info = netdev_priv(dev->net);
int result;
int i;
struct dentry *dir, *file;
struct ft1000_debug_dirs *tmp;
// make a new device name
- sprintf(info->DeviceName, "%s%d", "FT1000_", info->CardNumber);
+ sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
- DEBUG("DeviceCreated = %x\n", info->DeviceCreated);
+ DEBUG("DeviceCreated = %x\n", dev->DeviceCreated);
- if (info->DeviceCreated)
+ if (dev->DeviceCreated)
{
- DEBUG("%s: \"%s\" already registered\n", __func__, info->DeviceName);
+ DEBUG("%s: \"%s\" already registered\n", __func__, dev->DeviceName);
return -EIO;
}
// register the device
- DEBUG("%s: \"%s\" debugfs device registration\n", __func__, info->DeviceName);
+ DEBUG("%s: \"%s\" debugfs device registration\n", __func__, dev->DeviceName);
tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
if (tmp == NULL) {
@@ -167,7 +166,7 @@ int ft1000_create_dev(struct ft1000_device *dev)
goto fail;
}
- dir = debugfs_create_dir(info->DeviceName, NULL);
+ dir = debugfs_create_dir(dev->DeviceName, NULL);
if (IS_ERR(dir)) {
result = PTR_ERR(dir);
goto debug_dir_fail;
@@ -182,27 +181,27 @@ int ft1000_create_dev(struct ft1000_device *dev)
tmp->dent = dir;
tmp->file = file;
- tmp->int_number = info->CardNumber;
- list_add(&(tmp->list), &(info->nodes.list));
+ tmp->int_number = dev->CardNumber;
+ list_add(&(tmp->list), &(dev->nodes.list));
- DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, info->DeviceName);
+ DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, dev->DeviceName);
// initialize application information
- info->appcnt = 0;
+ dev->appcnt = 0;
for (i=0; i<MAX_NUM_APP; i++) {
- info->app_info[i].nTxMsg = 0;
- info->app_info[i].nRxMsg = 0;
- info->app_info[i].nTxMsgReject = 0;
- info->app_info[i].nRxMsgMiss = 0;
- info->app_info[i].fileobject = NULL;
- info->app_info[i].app_id = i+1;
- info->app_info[i].DspBCMsgFlag = 0;
- info->app_info[i].NumOfMsg = 0;
- init_waitqueue_head(&info->app_info[i].wait_dpram_msg);
- INIT_LIST_HEAD (&info->app_info[i].app_sqlist);
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
+ dev->app_info[i].fileobject = NULL;
+ dev->app_info[i].app_id = i+1;
+ dev->app_info[i].DspBCMsgFlag = 0;
+ dev->app_info[i].NumOfMsg = 0;
+ init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
+ INIT_LIST_HEAD (&dev->app_info[i].app_sqlist);
}
- info->DeviceCreated = TRUE;
+ dev->DeviceCreated = TRUE;
ft1000_flarion_cnt++;
return 0;
@@ -225,9 +224,10 @@ fail:
// Notes: Only called by cleanup_module().
//
//---------------------------------------------------------------------------
-void ft1000_destroy_dev(struct net_device *dev)
+void ft1000_destroy_dev(struct net_device *netdev)
{
- struct ft1000_info *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(netdev);
+ struct ft1000_usb *dev = info->priv;
int i;
struct dpram_blk *pdpram_blk;
struct dpram_blk *ptr;
@@ -238,12 +238,12 @@ void ft1000_destroy_dev(struct net_device *dev)
- if (info->DeviceCreated)
+ if (dev->DeviceCreated)
{
ft1000_flarion_cnt--;
- list_for_each_safe(pos, q, &info->nodes.list) {
+ list_for_each_safe(pos, q, &dev->nodes.list) {
dir = list_entry(pos, struct ft1000_debug_dirs, list);
- if (dir->int_number == info->CardNumber) {
+ if (dir->int_number == dev->CardNumber) {
debugfs_remove(dir->file);
debugfs_remove(dir->dent);
list_del(pos);
@@ -251,17 +251,17 @@ void ft1000_destroy_dev(struct net_device *dev)
}
}
DEBUG("%s: unregistered device \"%s\"\n", __func__,
- info->DeviceName);
+ dev->DeviceName);
// Make sure we free any memory reserve for slow Queue
for (i=0; i<MAX_NUM_APP; i++) {
- while (list_empty(&info->app_info[i].app_sqlist) == 0) {
- pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
+ pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
list_del(&pdpram_blk->list);
ft1000_free_buffer(pdpram_blk, &freercvpool);
}
- wake_up_interruptible(&info->app_info[i].wait_dpram_msg);
+ wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
}
// Remove buffer allocated for receive command data
@@ -273,7 +273,7 @@ void ft1000_destroy_dev(struct net_device *dev)
kfree(ptr);
}
}
- info->DeviceCreated = FALSE;
+ dev->DeviceCreated = FALSE;
}
@@ -292,7 +292,7 @@ void ft1000_destroy_dev(struct net_device *dev)
static int ft1000_open (struct inode *inode, struct file *file)
{
struct ft1000_info *info;
- struct ft1000_device *dev = (struct ft1000_device *)inode->i_private;
+ struct ft1000_usb *dev = (struct ft1000_usb *)inode->i_private;
int i,num;
DEBUG("%s called\n", __func__);
@@ -301,17 +301,17 @@ static int ft1000_open (struct inode *inode, struct file *file)
info = file->private_data = netdev_priv(dev->net);
- DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), info->appcnt );
+ DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), dev->appcnt );
// Check if maximum number of application exceeded
- if (info->appcnt > MAX_NUM_APP) {
+ if (dev->appcnt > MAX_NUM_APP) {
DEBUG("Maximum number of application exceeded\n");
return -EACCES;
}
// Search for available application info block
for (i=0; i<MAX_NUM_APP; i++) {
- if ( (info->app_info[i].fileobject == NULL) ) {
+ if ( (dev->app_info[i].fileobject == NULL) ) {
break;
}
}
@@ -322,12 +322,12 @@ static int ft1000_open (struct inode *inode, struct file *file)
return -EACCES;
}
- info->appcnt++;
- info->app_info[i].fileobject = &file->f_owner;
- info->app_info[i].nTxMsg = 0;
- info->app_info[i].nRxMsg = 0;
- info->app_info[i].nTxMsgReject = 0;
- info->app_info[i].nRxMsgMiss = 0;
+ dev->appcnt++;
+ dev->app_info[i].fileobject = &file->f_owner;
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
nonseekable_open(inode, file);
return 0;
@@ -347,8 +347,9 @@ static int ft1000_open (struct inode *inode, struct file *file)
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
{
- struct net_device *dev = file->private_data;
- struct ft1000_info *info;
+ struct net_device *netdev = file->private_data;
+ struct ft1000_info *info = netdev_priv(netdev);
+ struct ft1000_usb *dev = info->priv;
int i;
//DEBUG("ft1000_poll_dev called\n");
@@ -357,12 +358,10 @@ static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
return (-EBADF);
}
- info = netdev_priv(dev);
-
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ if ( dev->app_info[i].fileobject == &file->f_owner) {
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", dev->app_info[i].app_id);
break;
}
}
@@ -373,12 +372,12 @@ static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
return ( -EACCES );
}
- if (list_empty(&info->app_info[i].app_sqlist) == 0) {
+ if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
DEBUG("FT1000:ft1000_poll_dev:Message detected in slow queue\n");
return(POLLIN | POLLRDNORM | POLLPRI);
}
- poll_wait (file, &info->app_info[i].wait_dpram_msg, wait);
+ poll_wait (file, &dev->app_info[i].wait_dpram_msg, wait);
//DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n");
return (0);
@@ -399,7 +398,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
{
void __user *argp = (void __user *)argument;
struct ft1000_info *info;
- struct ft1000_device *ft1000dev;
+ struct ft1000_usb *ft1000dev;
int result=0;
int cmd;
int i;
@@ -428,7 +427,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
//DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument);
info = file->private_data;
- ft1000dev = info->pFt1000Dev;
+ ft1000dev = info->priv;
cmd = _IOC_NR(command);
//DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd);
@@ -444,8 +443,8 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
if (tempword == DSPBCMSGID) {
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &file->f_owner) {
- info->app_info[i].DspBCMsgFlag = 1;
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ ft1000dev->app_info[i].DspBCMsgFlag = 1;
DEBUG("FT1000:ft1000_ioctl:Registered for broadcast messages\n");
break;
}
@@ -534,15 +533,15 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
return (-EBADF);
}
- if (info->DrvMsgPend) {
+ if (ft1000dev->DrvMsgPend) {
return (-ENOTTY);
}
- if ( (info->DspAsicReset) || (info->fProvComplete == 0) ) {
+ if (ft1000dev->fProvComplete == 0) {
return (-EACCES);
}
- info->fAppMsgPend = 1;
+ ft1000dev->fAppMsgPend = 1;
if (info->CardReady) {
@@ -571,7 +570,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
else {
// Check if this message came from a registered application
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &file->f_owner) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
break;
}
}
@@ -632,7 +631,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
// Insert slow queue sequence number
ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = info->app_info[app_index].app_id;
+ ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
// Calculate new checksum
ppseudo_hdr->checksum = *pmsg++;
//DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
@@ -645,7 +644,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
card_send_command(ft1000dev,(unsigned short*)dpram_data,total_len+2);
- info->app_info[app_index].nTxMsg++;
+ ft1000dev->app_info[app_index].nTxMsg++;
}
else {
result = -EINVAL;
@@ -675,8 +674,8 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id);
break;
}
}
@@ -690,13 +689,13 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
result = 0;
pioctl_dpram = argp;
- if (list_empty(&info->app_info[i].app_sqlist) == 0) {
+ if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
//DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n");
spin_lock_irqsave(&free_buff_lock, flags);
- pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
list_del(&pdpram_blk->list);
- info->app_info[i].NumOfMsg--;
- //DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, info->app_info[i].NumOfMsg);
+ ft1000dev->app_info[i].NumOfMsg--;
+ //DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg);
spin_unlock_irqrestore(&free_buff_lock, flags);
msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
result = get_user(msglen, &pioctl_dpram->total_len);
@@ -723,7 +722,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
result = -ENOTTY;
break;
}
- info->fAppMsgPend = 0;
+ ft1000dev->fAppMsgPend = 0;
return result;
}
@@ -741,6 +740,7 @@ static int ft1000_release (struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct net_device *dev;
+ struct ft1000_usb *ft1000dev;
int i;
struct dpram_blk *pdpram_blk;
@@ -748,16 +748,17 @@ static int ft1000_release (struct inode *inode, struct file *file)
dev = file->private_data;
info = netdev_priv(dev);
+ ft1000dev = info->priv;
if (ft1000_flarion_cnt == 0) {
- info->appcnt--;
+ ft1000dev->appcnt--;
return (-EBADF);
}
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ if ( ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id);
break;
}
}
@@ -765,17 +766,17 @@ static int ft1000_release (struct inode *inode, struct file *file)
if (i==MAX_NUM_APP)
return 0;
- while (list_empty(&info->app_info[i].app_sqlist) == 0) {
+ while (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
DEBUG("Remove and free memory queue up on slow queue\n");
- pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
list_del(&pdpram_blk->list);
ft1000_free_buffer(pdpram_blk, &freercvpool);
}
// initialize application information
- info->appcnt--;
- DEBUG("ft1000_chdev:%s:appcnt = %d\n", __FUNCTION__, info->appcnt);
- info->app_info[i].fileobject = NULL;
+ ft1000dev->appcnt--;
+ DEBUG("ft1000_chdev:%s:appcnt = %d\n", __FUNCTION__, ft1000dev->appcnt);
+ ft1000dev->app_info[i].fileobject = NULL;
return 0;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 1972b72450d4..5190c8ac4e0a 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -114,7 +114,7 @@ struct dsp_image_info {
//---------------------------------------------------------------------------
// Function: check_usb_db
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
//
// Returns: 0 - success
//
@@ -123,7 +123,7 @@ struct dsp_image_info {
// Notes:
//
//---------------------------------------------------------------------------
-static u32 check_usb_db (struct ft1000_device *ft1000dev)
+static u32 check_usb_db (struct ft1000_usb *ft1000dev)
{
int loopcnt;
u16 temp;
@@ -172,7 +172,7 @@ static u32 check_usb_db (struct ft1000_device *ft1000dev)
//---------------------------------------------------------------------------
// Function: get_handshake
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
// u16 expected_value - the handshake value expected
//
// Returns: handshakevalue - success
@@ -183,12 +183,11 @@ static u32 check_usb_db (struct ft1000_device *ft1000dev)
// Notes:
//
//---------------------------------------------------------------------------
-static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
+static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
{
u16 handshake;
int loopcnt;
u32 status = 0;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
loopcnt = 0;
@@ -196,10 +195,10 @@ static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
/* Need to clear downloader doorbell if Hartley ASIC */
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX,
FT1000_REG_DOORBELL);
- if (pft1000info->fcodeldr) {
+ if (ft1000dev->fcodeldr) {
DEBUG(" get_handshake: fcodeldr is %d\n",
- pft1000info->fcodeldr);
- pft1000info->fcodeldr = 0;
+ ft1000dev->fcodeldr);
+ ft1000dev->fcodeldr = 0;
status = check_usb_db(ft1000dev);
if (status != STATUS_SUCCESS) {
DEBUG("get_handshake: check_usb_db failed\n");
@@ -233,7 +232,7 @@ static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
//---------------------------------------------------------------------------
// Function: put_handshake
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
// u16 handshake_value - handshake to be written
//
// Returns: none
@@ -244,7 +243,7 @@ static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
// Notes:
//
//---------------------------------------------------------------------------
-static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value)
+static void put_handshake(struct ft1000_usb *ft1000dev,u16 handshake_value)
{
u32 tempx;
u16 tempword;
@@ -263,36 +262,35 @@ static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value)
FT1000_REG_DOORBELL);
}
-static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value)
+static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
{
u16 handshake;
int loopcnt;
u16 temp;
u32 status = 0;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
loopcnt = 0;
handshake = 0;
while (loopcnt < 100) {
- if (pft1000info->usbboot == 2) {
+ if (ft1000dev->usbboot == 2) {
status = ft1000_read_dpram32(ft1000dev, 0,
- (u8 *)&(pft1000info->tempbuf[0]), 64);
+ (u8 *)&(ft1000dev->tempbuf[0]), 64);
for (temp = 0; temp < 16; temp++) {
DEBUG("tempbuf %d = 0x%x\n", temp,
- pft1000info->tempbuf[temp]);
+ ft1000dev->tempbuf[temp]);
}
status = ft1000_read_dpram16(ft1000dev,
DWNLD_MAG1_HANDSHAKE_LOC,
(u8 *)&handshake, 1);
DEBUG("handshake from read_dpram16 = 0x%x\n",
handshake);
- if (pft1000info->dspalive == pft1000info->tempbuf[6]) {
+ if (ft1000dev->dspalive == ft1000dev->tempbuf[6]) {
handshake = 0;
} else {
- handshake = pft1000info->tempbuf[1];
- pft1000info->dspalive =
- pft1000info->tempbuf[6];
+ handshake = ft1000dev->tempbuf[1];
+ ft1000dev->dspalive =
+ ft1000dev->tempbuf[6];
}
} else {
status = ft1000_read_dpram16(ft1000dev,
@@ -311,7 +309,7 @@ static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value
return HANDSHAKE_TIMEOUT_VALUE;
}
-static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_value)
+static void put_handshake_usb(struct ft1000_usb *ft1000dev,u16 handshake_value)
{
int i;
@@ -321,7 +319,7 @@ static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_valu
//---------------------------------------------------------------------------
// Function: get_request_type
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
//
// Returns: request type - success
//
@@ -330,15 +328,14 @@ static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_valu
// Notes:
//
//---------------------------------------------------------------------------
-static u16 get_request_type(struct ft1000_device *ft1000dev)
+static u16 get_request_type(struct ft1000_usb *ft1000dev)
{
u16 request_type;
u32 status;
u16 tempword;
u32 tempx;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
- if (pft1000info->bootmode == 1) {
+ if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
@@ -354,22 +351,21 @@ static u16 get_request_type(struct ft1000_device *ft1000dev)
return request_type;
}
-static u16 get_request_type_usb(struct ft1000_device *ft1000dev)
+static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
{
u16 request_type;
u32 status;
u16 tempword;
u32 tempx;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
- if (pft1000info->bootmode == 1) {
+ if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
- if (pft1000info->usbboot == 2) {
- tempx = pft1000info->tempbuf[2];
- tempword = pft1000info->tempbuf[3];
+ if (ft1000dev->usbboot == 2) {
+ tempx = ft1000dev->tempbuf[2];
+ tempword = ft1000dev->tempbuf[3];
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
@@ -387,7 +383,7 @@ static u16 get_request_type_usb(struct ft1000_device *ft1000dev)
//---------------------------------------------------------------------------
// Function: get_request_value
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
//
// Returns: request value - success
//
@@ -396,14 +392,13 @@ static u16 get_request_type_usb(struct ft1000_device *ft1000dev)
// Notes:
//
//---------------------------------------------------------------------------
-static long get_request_value(struct ft1000_device *ft1000dev)
+static long get_request_value(struct ft1000_usb *ft1000dev)
{
u32 value;
u16 tempword;
u32 status;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
- if (pft1000info->bootmode == 1) {
+ if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
value = ntohl(value);
@@ -424,7 +419,7 @@ static long get_request_value(struct ft1000_device *ft1000dev)
//---------------------------------------------------------------------------
// Function: put_request_value
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
// long lvalue - value to be put into DPRAM location DWNLD_MAG1_SIZE_LOC
//
// Returns: none
@@ -434,7 +429,7 @@ static long get_request_value(struct ft1000_device *ft1000dev)
// Notes:
//
//---------------------------------------------------------------------------
-static void put_request_value(struct ft1000_device *ft1000dev, long lvalue)
+static void put_request_value(struct ft1000_usb *ft1000dev, long lvalue)
{
u32 tempx;
u32 status;
@@ -485,7 +480,7 @@ static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
//---------------------------------------------------------------------------
// Function: write_blk
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
// u16 **pUsFile - DSP image file pointer in u16
// u8 **pUcFile - DSP image file pointer in u8
// long word_length - length of the buffer to be written
@@ -499,7 +494,7 @@ static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
// Notes:
//
//---------------------------------------------------------------------------
-static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
+static u32 write_blk (struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
{
u32 Status = STATUS_SUCCESS;
u16 dpram;
@@ -507,7 +502,6 @@ static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFi
u16 tempword;
u16 tempbuffer[64];
u16 resultbuffer[64];
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
//DEBUG("FT1000:download:start word_length = %d\n",(int)word_length);
dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
@@ -548,7 +542,7 @@ static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFi
//DEBUG("write_blk: loopcnt is %d\n", loopcnt);
//DEBUG("write_blk: bootmode = %d\n", bootmode);
//DEBUG("write_blk: dpram = %x\n", dpram);
- if (pft1000info->bootmode == 0)
+ if (ft1000dev->bootmode == 0)
{
if (dpram >= 0x3F4)
Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8);
@@ -625,7 +619,7 @@ static void usb_dnld_complete (struct urb *urb)
//---------------------------------------------------------------------------
// Function: write_blk_fifo
//
-// Parameters: struct ft1000_device - device structure
+// Parameters: struct ft1000_usb - device structure
// u16 **pUsFile - DSP image file pointer in u16
// u8 **pUcFile - DSP image file pointer in u8
// long word_length - length of the buffer to be written
@@ -639,7 +633,7 @@ static void usb_dnld_complete (struct urb *urb)
// Notes:
//
//---------------------------------------------------------------------------
-static u32 write_blk_fifo(struct ft1000_device *ft1000dev, u16 **pUsFile,
+static u32 write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
u8 **pUcFile, long word_length)
{
u32 Status = STATUS_SUCCESS;
@@ -682,7 +676,7 @@ static u32 write_blk_fifo(struct ft1000_device *ft1000dev, u16 **pUsFile,
// Returns: status - return code
//---------------------------------------------------------------------------
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
+u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength)
{
u16 status = STATUS_SUCCESS;
@@ -718,9 +712,9 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
DEBUG("Entered scram_dnldr...\n");
- pft1000info->fcodeldr = 0;
- pft1000info->usbboot = 0;
- pft1000info->dspalive = 0xffff;
+ ft1000dev->fcodeldr = 0;
+ ft1000dev->usbboot = 0;
+ ft1000dev->dspalive = 0xffff;
//
// Get version id of file, at first 4 bytes of file, for newer files.
@@ -745,7 +739,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
switch (state) {
case STATE_START_DWNLD:
DEBUG("FT1000:STATE_START_DWNLD\n");
- if (pft1000info->usbboot)
+ if (ft1000dev->usbboot)
handshake =
get_handshake_usb(ft1000dev,
HANDSHAKE_DSP_BL_READY);
@@ -771,7 +765,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
case STATE_BOOT_DWNLD:
DEBUG("FT1000:STATE_BOOT_DWNLD\n");
- pft1000info->bootmode = 1;
+ ft1000dev->bootmode = 1;
handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST);
if (handshake == HANDSHAKE_REQUEST) {
/*
@@ -797,7 +791,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
//DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file);
//DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file);
state = STATE_CODE_DWNLD;
- pft1000info->fcodeldr = 1;
+ ft1000dev->fcodeldr = 1;
break;
case REQUEST_CODE_SEGMENT:
//DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");
@@ -842,7 +836,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
status = STATUS_FAILURE;
break;
}
- if (pft1000info->usbboot)
+ if (ft1000dev->usbboot)
put_handshake_usb(ft1000dev,
HANDSHAKE_RESPONSE);
else
@@ -858,8 +852,8 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
case STATE_CODE_DWNLD:
//DEBUG("FT1000:STATE_CODE_DWNLD\n");
- pft1000info->bootmode = 0;
- if (pft1000info->usbboot)
+ ft1000dev->bootmode = 0;
+ if (ft1000dev->usbboot)
handshake =
get_handshake_usb(ft1000dev,
HANDSHAKE_REQUEST);
@@ -870,7 +864,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
/*
* Get type associated with the request.
*/
- if (pft1000info->usbboot)
+ if (ft1000dev->usbboot)
request =
get_request_type_usb(ft1000dev);
else
@@ -916,7 +910,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
}
break;
case REQUEST_DONE_CL:
- pft1000info->usbboot = 3;
+ ft1000dev->usbboot = 3;
/* Reposition ptrs to beginning of provisioning section */
s_file =
(u16 *) (pFileStart +
@@ -965,9 +959,9 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
write_blk_fifo(ft1000dev, &s_file,
&c_file, word_length);
- if (pft1000info->usbboot == 0)
- pft1000info->usbboot++;
- if (pft1000info->usbboot == 1) {
+ if (ft1000dev->usbboot == 0)
+ ft1000dev->usbboot++;
+ if (ft1000dev->usbboot == 1) {
tempword = 0;
ft1000_write_dpram16(ft1000dev,
DWNLD_MAG1_PS_HDR_LOC,
@@ -1117,7 +1111,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
status = STATUS_FAILURE;
break;
}
- if (pft1000info->usbboot)
+ if (ft1000dev->usbboot)
put_handshake_usb(ft1000dev,
HANDSHAKE_RESPONSE);
else
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 809fa4886961..9b8fed7b405b 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -29,12 +29,12 @@
//#define JDEBUG
-static int ft1000_reset(struct net_device *ft1000dev);
+static int ft1000_reset(void *ft1000dev);
static int ft1000_submit_rx_urb(struct ft1000_info *info);
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ft1000_open (struct net_device *dev);
static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev);
-static int ft1000_chkcard (struct ft1000_device *dev);
+static int ft1000_chkcard (struct ft1000_usb *dev);
static u8 tempbuffer[1600];
@@ -43,7 +43,7 @@ static u8 tempbuffer[1600];
//---------------------------------------------------------------------------
// Function: ft1000_control
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// pipe - usb control message pipe
// request - control request
// requesttype - control message request type
@@ -61,7 +61,7 @@ static u8 tempbuffer[1600];
// Notes:
//
//---------------------------------------------------------------------------
-static int ft1000_control(struct ft1000_device *ft1000dev, unsigned int pipe,
+static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
@@ -84,7 +84,7 @@ static int ft1000_control(struct ft1000_device *ft1000dev, unsigned int pipe,
//---------------------------------------------------------------------------
// Function: ft1000_read_register
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// Data - data buffer to hold the value read
// nRegIndex - register index
//
@@ -97,7 +97,7 @@ static int ft1000_control(struct ft1000_device *ft1000dev, unsigned int pipe,
//
//---------------------------------------------------------------------------
-int ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data,
+int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data,
u16 nRegIndx)
{
int ret = STATUS_SUCCESS;
@@ -118,7 +118,7 @@ int ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data,
//---------------------------------------------------------------------------
// Function: ft1000_write_register
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// value - value to write into a register
// nRegIndex - register index
//
@@ -130,7 +130,7 @@ int ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data,
// Notes:
//
//---------------------------------------------------------------------------
-int ft1000_write_register(struct ft1000_device *ft1000dev, u16 value,
+int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
u16 nRegIndx)
{
int ret = STATUS_SUCCESS;
@@ -151,7 +151,7 @@ int ft1000_write_register(struct ft1000_device *ft1000dev, u16 value,
//---------------------------------------------------------------------------
// Function: ft1000_read_dpram32
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to read
// buffer - data buffer to hold the data read
// cnt - number of byte read from DPRAM
@@ -165,7 +165,7 @@ int ft1000_write_register(struct ft1000_device *ft1000dev, u16 value,
//
//---------------------------------------------------------------------------
-int ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
+int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
int ret = STATUS_SUCCESS;
@@ -186,7 +186,7 @@ int ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
//---------------------------------------------------------------------------
// Function: ft1000_write_dpram32
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to write the data
// buffer - data buffer to write into DPRAM
// cnt - number of bytes to write
@@ -199,7 +199,7 @@ int ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
// Notes:
//
//---------------------------------------------------------------------------
-int ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
+int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
int ret = STATUS_SUCCESS;
@@ -223,7 +223,7 @@ int ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
//---------------------------------------------------------------------------
// Function: ft1000_read_dpram16
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to read
// buffer - data buffer to hold the data read
// hightlow - high or low 16 bit word
@@ -236,7 +236,7 @@ int ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
// Notes:
//
//---------------------------------------------------------------------------
-int ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
+int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u8 highlow)
{
int ret = STATUS_SUCCESS;
@@ -263,7 +263,7 @@ int ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
//---------------------------------------------------------------------------
// Function: ft1000_write_dpram16
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to write the data
// value - 16bits value to write
// hightlow - high or low 16 bit word
@@ -276,7 +276,7 @@ int ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer,
// Notes:
//
//---------------------------------------------------------------------------
-int ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow)
+int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 highlow)
{
int ret = STATUS_SUCCESS;
u8 request;
@@ -302,7 +302,7 @@ int ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u
//---------------------------------------------------------------------------
// Function: fix_ft1000_read_dpram32
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to read
// buffer - data buffer to hold the data read
//
@@ -315,7 +315,7 @@ int ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u
// Notes:
//
//---------------------------------------------------------------------------
-int fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx,
+int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer)
{
u8 buf[16];
@@ -346,7 +346,7 @@ int fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx,
//---------------------------------------------------------------------------
// Function: fix_ft1000_write_dpram32
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// indx - starting address to write
// buffer - data buffer to write
//
@@ -359,7 +359,7 @@ int fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx,
// Notes:
//
//---------------------------------------------------------------------------
-int fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer)
+int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
{
u16 pos1;
u16 pos2;
@@ -426,7 +426,7 @@ int fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buff
//
// Returns: None
//-----------------------------------------------------------------------
-static void card_reset_dsp(struct ft1000_device *ft1000dev, bool value)
+static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
{
u16 status = STATUS_SUCCESS;
u16 tempword;
@@ -465,7 +465,7 @@ static void card_reset_dsp(struct ft1000_device *ft1000dev, bool value)
//---------------------------------------------------------------------------
// Function: card_send_command
//
-// Parameters: ft1000_device - device structure
+// Parameters: ft1000_usb - device structure
// ptempbuffer - command buffer
// size - command buffer size
//
@@ -477,7 +477,7 @@ static void card_reset_dsp(struct ft1000_device *ft1000dev, bool value)
// Notes:
//
//---------------------------------------------------------------------------
-void card_send_command(struct ft1000_device *ft1000dev, void *ptempbuffer,
+void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
int size)
{
unsigned short temp;
@@ -524,7 +524,7 @@ void card_send_command(struct ft1000_device *ft1000dev, void *ptempbuffer,
//
// Returns: None
//-----------------------------------------------------------------------
-int dsp_reload(struct ft1000_device *ft1000dev)
+int dsp_reload(struct ft1000_usb *ft1000dev)
{
u16 status;
u16 tempword;
@@ -588,7 +588,7 @@ int dsp_reload(struct ft1000_device *ft1000dev)
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_device *ft1000dev = info->pFt1000Dev;
+ struct ft1000_usb *ft1000dev = info->priv;
u16 tempword;
DEBUG("ft1000_hw:ft1000_reset_asic called\n");
@@ -627,15 +627,15 @@ static void ft1000_reset_asic(struct net_device *dev)
static int ft1000_reset_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_device *ft1000dev = info->pFt1000Dev;
+ struct ft1000_usb *ft1000dev = info->priv;
u16 tempword;
struct prov_record *ptr;
DEBUG("ft1000_hw:ft1000_reset_card called.....\n");
- info->fCondResetPend = 1;
+ ft1000dev->fCondResetPend = 1;
info->CardReady = 0;
- info->fProvComplete = 0;
+ ft1000dev->fProvComplete = 0;
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
@@ -666,7 +666,7 @@ static int ft1000_reset_card(struct net_device *dev)
info->CardReady = 1;
- info->fCondResetPend = 0;
+ ft1000dev->fCondResetPend = 0;
return TRUE;
}
@@ -694,7 +694,7 @@ static const struct net_device_ops ftnet_ops =
// Notes:
//
//---------------------------------------------------------------------------
-int init_ft1000_netdev(struct ft1000_device *ft1000dev)
+int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
{
struct net_device *netdev;
struct ft1000_info *pInfo = NULL;
@@ -702,7 +702,7 @@ int init_ft1000_netdev(struct ft1000_device *ft1000dev)
int i, ret_val;
struct list_head *cur, *tmp;
char card_nr[2];
- unsigned long gCardIndex = 0;
+ u8 gCardIndex = 0;
DEBUG("Enter init_ft1000_netdev...\n");
@@ -723,14 +723,14 @@ int init_ft1000_netdev(struct ft1000_device *ft1000dev)
if (strncmp(netdev->name, "eth", 3) == 0) {
card_nr[0] = netdev->name[3];
card_nr[1] = '\0';
- ret_val = strict_strtoul(card_nr, 10, &gCardIndex);
+ ret_val = kstrtou8(card_nr, 10, &gCardIndex);
if (ret_val) {
printk(KERN_ERR "Can't parse netdev\n");
goto err_net;
}
- pInfo->CardNumber = gCardIndex;
- DEBUG("card number = %d\n", pInfo->CardNumber);
+ ft1000dev->CardNumber = gCardIndex;
+ DEBUG("card number = %d\n", ft1000dev->CardNumber);
} else {
printk(KERN_ERR "ft1000: Invalid device name\n");
ret_val = -ENXIO;
@@ -740,27 +740,27 @@ int init_ft1000_netdev(struct ft1000_device *ft1000dev)
memset(&pInfo->stats, 0, sizeof(struct net_device_stats));
spin_lock_init(&pInfo->dpram_lock);
- pInfo->pFt1000Dev = ft1000dev;
+ pInfo->priv = ft1000dev;
pInfo->DrvErrNum = 0;
pInfo->registered = 1;
pInfo->ft1000_reset = ft1000_reset;
pInfo->mediastate = 0;
pInfo->fifo_cnt = 0;
- pInfo->DeviceCreated = FALSE;
+ ft1000dev->DeviceCreated = FALSE;
pInfo->CardReady = 0;
pInfo->DSP_TIME[0] = 0;
pInfo->DSP_TIME[1] = 0;
pInfo->DSP_TIME[2] = 0;
pInfo->DSP_TIME[3] = 0;
- pInfo->fAppMsgPend = 0;
- pInfo->fCondResetPend = 0;
- pInfo->usbboot = 0;
- pInfo->dspalive = 0;
- memset(&pInfo->tempbuf[0], 0, sizeof(pInfo->tempbuf));
+ ft1000dev->fAppMsgPend = 0;
+ ft1000dev->fCondResetPend = 0;
+ ft1000dev->usbboot = 0;
+ ft1000dev->dspalive = 0;
+ memset(&ft1000dev->tempbuf[0], 0, sizeof(ft1000dev->tempbuf));
INIT_LIST_HEAD(&pInfo->prov_list);
- INIT_LIST_HEAD(&pInfo->nodes.list);
+ INIT_LIST_HEAD(&ft1000dev->nodes.list);
netdev->netdev_ops = &ftnet_ops;
@@ -822,7 +822,7 @@ err_net:
// Notes:
//
//---------------------------------------------------------------------------
-int reg_ft1000_netdev(struct ft1000_device *ft1000dev,
+int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
struct usb_interface *intf)
{
struct net_device *netdev;
@@ -854,7 +854,7 @@ int reg_ft1000_netdev(struct ft1000_device *ft1000dev,
return 0;
}
-static int ft1000_reset(struct net_device *dev)
+int ft1000_reset(void *dev)
{
ft1000_reset_card(dev);
return 0;
@@ -876,7 +876,7 @@ static int ft1000_reset(struct net_device *dev)
static void ft1000_usb_transmit_complete(struct urb *urb)
{
- struct ft1000_device *ft1000dev = urb->context;
+ struct ft1000_usb *ft1000dev = urb->context;
if (urb->status)
pr_err("%s: TX status %d\n", ft1000dev->net->name, urb->status);
@@ -902,7 +902,7 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
{
struct ft1000_info *pInfo = netdev_priv(netdev);
- struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev;
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
int count, ret;
u8 *t;
@@ -981,7 +981,7 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
- struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev;
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
u8 *pdata;
int maxlen, pipe;
@@ -1039,7 +1039,7 @@ err:
static int ft1000_copy_up_pkt(struct urb *urb)
{
struct ft1000_info *info = urb->context;
- struct ft1000_device *ft1000dev = info->pFt1000Dev;
+ struct ft1000_usb *ft1000dev = info->priv;
struct net_device *net = ft1000dev->net;
u16 tempword;
@@ -1134,7 +1134,7 @@ static int ft1000_copy_up_pkt(struct urb *urb)
static int ft1000_submit_rx_urb(struct ft1000_info *info)
{
int result;
- struct ft1000_device *pFt1000Dev = info->pFt1000Dev;
+ struct ft1000_usb *pFt1000Dev = info->priv;
if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
DEBUG("network driver is closed, return\n");
@@ -1177,9 +1177,10 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
static int ft1000_open(struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
+ struct ft1000_usb *pFt1000Dev = pInfo->priv;
struct timeval tv;
- DEBUG("ft1000_open is called for card %d\n", pInfo->CardNumber);
+ DEBUG("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
pInfo->stats.rx_bytes = 0;
pInfo->stats.tx_bytes = 0;
@@ -1213,7 +1214,7 @@ static int ft1000_open(struct net_device *dev)
int ft1000_close(struct net_device *net)
{
struct ft1000_info *pInfo = netdev_priv(net);
- struct ft1000_device *ft1000dev = pInfo->pFt1000Dev;
+ struct ft1000_usb *ft1000dev = pInfo->priv;
ft1000dev->status |= FT1000_STATUS_CLOSING;
@@ -1247,13 +1248,12 @@ static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
// TRUE (device is present)
//
//---------------------------------------------------------------------------
-static int ft1000_chkcard(struct ft1000_device *dev)
+static int ft1000_chkcard(struct ft1000_usb *dev)
{
u16 tempword;
u16 status;
- struct ft1000_info *info = netdev_priv(dev->net);
- if (info->fCondResetPend) {
+ if (dev->fCondResetPend) {
DEBUG
("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
return TRUE;
@@ -1293,7 +1293,7 @@ static int ft1000_chkcard(struct ft1000_device *dev)
// = 1 (successful)
//
//---------------------------------------------------------------------------
-static bool ft1000_receive_cmd(struct ft1000_device *dev, u16 *pbuffer,
+static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
int maxsz, u16 *pnxtph)
{
u16 size, ret;
@@ -1360,7 +1360,7 @@ static bool ft1000_receive_cmd(struct ft1000_device *dev, u16 *pbuffer,
static int ft1000_dsp_prov(void *arg)
{
- struct ft1000_device *dev = (struct ft1000_device *)arg;
+ struct ft1000_usb *dev = (struct ft1000_usb *)arg;
struct ft1000_info *info = netdev_priv(dev->net);
u16 tempword;
u16 len;
@@ -1441,13 +1441,13 @@ static int ft1000_dsp_prov(void *arg)
msleep(100);
- info->fProvComplete = 1;
+ dev->fProvComplete = 1;
info->CardReady = 1;
return STATUS_SUCCESS;
}
-static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
+static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
{
struct ft1000_info *info = netdev_priv(dev->net);
u16 msgtype;
@@ -1498,7 +1498,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
if (pmediamsg->state) {
DEBUG("Media is up\n");
if (info->mediastate == 0) {
- if (info->NetDevRegDone) {
+ if (dev->NetDevRegDone) {
netif_wake_queue(dev->
net);
}
@@ -1508,7 +1508,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
DEBUG("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
- if (info->NetDevRegDone) {
+ if (dev->NetDevRegDone) {
}
info->ConTm = 0;
}
@@ -1567,12 +1567,12 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
* Send provisioning data to DSP
*/
if (list_empty(&info->prov_list) == 0) {
- info->fProvComplete = 0;
+ dev->fProvComplete = 0;
status = ft1000_dsp_prov(dev);
if (status != STATUS_SUCCESS)
goto out;
} else {
- info->fProvComplete = 1;
+ dev->fProvComplete = 1;
status =
ft1000_write_register(dev, FT1000_DB_HB,
FT1000_REG_DOORBELL);
@@ -1605,7 +1605,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
case DSP_GET_INFO:{
DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
/* copy dsp info block to dsp */
- info->DrvMsgPend = 1;
+ dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status =
@@ -1667,7 +1667,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
status =
ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
FT1000_REG_DOORBELL);
- info->DrvMsgPend = 0;
+ dev->DrvMsgPend = 0;
break;
}
@@ -1675,7 +1675,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
case GET_DRV_ERR_RPT_MSG:{
DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
/* copy driver error message to dsp */
- info->DrvMsgPend = 1;
+ dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status =
@@ -1735,7 +1735,7 @@ static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size)
(u16) (0x0012 + PSEUDOSZ));
info->DrvErrNum = 0;
}
- info->DrvMsgPend = 0;
+ dev->DrvMsgPend = 0;
break;
}
@@ -1753,7 +1753,7 @@ out:
int ft1000_poll(void* dev_id)
{
- struct ft1000_device *dev = (struct ft1000_device *)dev_id;
+ struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
struct ft1000_info *info = netdev_priv(dev->net);
u16 tempword;
@@ -1804,8 +1804,8 @@ int ft1000_poll(void* dev_id)
// Check which application has registered for dsp broadcast messages
for (i=0; i<MAX_NUM_APP; i++) {
- if ( (info->app_info[i].DspBCMsgFlag) && (info->app_info[i].fileobject) &&
- (info->app_info[i].NumOfMsg < MAX_MSG_LIMIT) )
+ if ( (dev->app_info[i].DspBCMsgFlag) && (dev->app_info[i].fileobject) &&
+ (dev->app_info[i].NumOfMsg < MAX_MSG_LIMIT) )
{
nxtph = FT1000_DPRAM_RX_BASE + 2;
pdpram_blk = ft1000_get_buffer (&freercvpool);
@@ -1813,15 +1813,15 @@ int ft1000_poll(void* dev_id)
if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) {
ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
// Put message into the appropriate application block
- info->app_info[i].nRxMsg++;
+ dev->app_info[i].nRxMsg++;
spin_lock_irqsave(&free_buff_lock, flags);
- list_add_tail(&pdpram_blk->list, &info->app_info[i].app_sqlist);
- info->app_info[i].NumOfMsg++;
+ list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
+ dev->app_info[i].NumOfMsg++;
spin_unlock_irqrestore(&free_buff_lock, flags);
- wake_up_interruptible(&info->app_info[i].wait_dpram_msg);
+ wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
}
else {
- info->app_info[i].nRxMsgMiss++;
+ dev->app_info[i].nRxMsgMiss++;
// Put memory back to free pool
ft1000_free_buffer(pdpram_blk, &freercvpool);
DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
@@ -1829,7 +1829,7 @@ int ft1000_poll(void* dev_id)
}
else {
DEBUG("Out of memory in free receive command pool\n");
- info->app_info[i].nRxMsgMiss++;
+ dev->app_info[i].nRxMsgMiss++;
}
}
}
@@ -1842,7 +1842,7 @@ int ft1000_poll(void* dev_id)
ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
// Search for correct application block
for (i=0; i<MAX_NUM_APP; i++) {
- if (info->app_info[i].app_id == ppseudo_hdr->portdest) {
+ if (dev->app_info[i].app_id == ppseudo_hdr->portdest) {
break;
}
}
@@ -1853,15 +1853,15 @@ int ft1000_poll(void* dev_id)
ft1000_free_buffer(pdpram_blk, &freercvpool);
}
else {
- if (info->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
+ if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
// Put memory back to free pool
ft1000_free_buffer(pdpram_blk, &freercvpool);
}
else {
- info->app_info[i].nRxMsg++;
+ dev->app_info[i].nRxMsg++;
// Put message into the appropriate application block
- list_add_tail(&pdpram_blk->list, &info->app_info[i].app_sqlist);
- info->app_info[i].NumOfMsg++;
+ list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
+ dev->app_info[i].NumOfMsg++;
}
}
}
@@ -1921,7 +1921,7 @@ int ft1000_poll(void* dev_id)
else if (tempword & FT1000_DB_COND_RESET) {
DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
- if (info->fAppMsgPend == 0) {
+ if (dev->fAppMsgPend == 0) {
// Reset ASIC and DSP
status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
@@ -1934,8 +1934,8 @@ int ft1000_poll(void* dev_id)
info->ft1000_reset(dev->net);
}
else {
- info->fProvComplete = 0;
- info->fCondResetPend = 1;
+ dev->fProvComplete = 0;
+ dev->fCondResetPend = 1;
}
ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index 1edaddba816f..b99640637fe0 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -51,7 +51,7 @@
#define FTNET_PROC init_net.proc_net
-int ft1000_read_dpram16 (struct ft1000_device *ft1000dev, u16 indx,
+int ft1000_read_dpram16 (struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer, u8 highlow);
@@ -94,11 +94,11 @@ ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_LED,
+ ft1000_read_dpram16(info->priv, FT1000_MAG_DSP_LED,
(u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
info->LedStat = ntohs(ledStat);
- ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_CON_STATE,
+ ft1000_read_dpram16(info->priv, FT1000_MAG_DSP_CON_STATE,
(u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
info->ConStat = ntohs(conStat);
do_gettimeofday(&tv);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index b2ecd0e6780e..614db55a8171 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -63,16 +63,13 @@ static int ft1000_probe(struct usb_interface *interface,
unsigned numaltsetting;
int i, ret = 0, size;
- struct ft1000_device *ft1000dev;
+ struct ft1000_usb *ft1000dev;
struct ft1000_info *pft1000info = NULL;
const struct firmware *dsp_fw;
- ft1000dev = kzalloc(sizeof(struct ft1000_device), GFP_KERNEL);
-
- if (!ft1000dev) {
- pr_err("out of memory allocating device structure\n");
+ ft1000dev = kzalloc(sizeof(struct ft1000_usb), GFP_KERNEL);
+ if (!ft1000dev)
return -ENOMEM;
- }
dev = interface_to_usbdev(interface);
DEBUG("ft1000_probe: usb device descriptor info:\n");
@@ -171,11 +168,11 @@ static int ft1000_probe(struct usb_interface *interface,
}
gPollingfailed = FALSE;
- pft1000info->pPollThread =
+ ft1000dev->pPollThread =
kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
- if (IS_ERR(pft1000info->pPollThread)) {
- ret = PTR_ERR(pft1000info->pPollThread);
+ if (IS_ERR(ft1000dev->pPollThread)) {
+ ret = PTR_ERR(ft1000dev->pPollThread);
goto err_load;
}
@@ -200,7 +197,7 @@ static int ft1000_probe(struct usb_interface *interface,
if (ret)
goto err_proc;
- pft1000info->NetDevRegDone = 1;
+ ft1000dev->NetDevRegDone = 1;
return 0;
@@ -208,7 +205,7 @@ err_proc:
unregister_netdev(ft1000dev->net);
free_netdev(ft1000dev->net);
err_thread:
- kthread_stop(pft1000info->pPollThread);
+ kthread_stop(ft1000dev->pPollThread);
err_load:
kfree(pFileStart);
err_fw:
@@ -219,6 +216,7 @@ err_fw:
static void ft1000_disconnect(struct usb_interface *interface)
{
struct ft1000_info *pft1000info;
+ struct ft1000_usb *ft1000dev;
DEBUG("ft1000_disconnect is called\n");
@@ -226,28 +224,29 @@ static void ft1000_disconnect(struct usb_interface *interface)
DEBUG("In disconnect pft1000info=%p\n", pft1000info);
if (pft1000info) {
+ ft1000dev = pft1000info->priv;
ft1000_cleanup_proc(pft1000info);
- if (pft1000info->pPollThread)
- kthread_stop(pft1000info->pPollThread);
+ if (ft1000dev->pPollThread)
+ kthread_stop(ft1000dev->pPollThread);
DEBUG("ft1000_disconnect: threads are terminated\n");
- if (pft1000info->pFt1000Dev->net) {
+ if (ft1000dev->net) {
DEBUG("ft1000_disconnect: destroy char driver\n");
- ft1000_destroy_dev(pft1000info->pFt1000Dev->net);
- unregister_netdev(pft1000info->pFt1000Dev->net);
+ ft1000_destroy_dev(ft1000dev->net);
+ unregister_netdev(ft1000dev->net);
DEBUG
("ft1000_disconnect: network device unregistered\n");
- free_netdev(pft1000info->pFt1000Dev->net);
+ free_netdev(ft1000dev->net);
}
- usb_free_urb(pft1000info->pFt1000Dev->rx_urb);
- usb_free_urb(pft1000info->pFt1000Dev->tx_urb);
+ usb_free_urb(ft1000dev->rx_urb);
+ usb_free_urb(ft1000dev->tx_urb);
DEBUG("ft1000_disconnect: urb freed\n");
- kfree(pft1000info->pFt1000Dev);
+ kfree(ft1000dev);
}
kfree(pFileStart);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index 2aa6a1c7fd38..bd1da1f19cd2 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -55,7 +55,14 @@ struct app_info_block {
#define MAX_BUF_SIZE 4096
-struct ft1000_device {
+struct ft1000_debug_dirs {
+ struct list_head list;
+ struct dentry *dent;
+ struct dentry *file;
+ int int_number;
+};
+
+struct ft1000_usb {
struct usb_device *dev;
struct net_device *net;
@@ -69,71 +76,26 @@ struct ft1000_device {
u8 bulk_in_endpointAddr;
u8 bulk_out_endpointAddr;
-} __packed;
-
-struct ft1000_debug_dirs {
- struct list_head list;
- struct dentry *dent;
- struct dentry *file;
- int int_number;
-};
-
-struct ft1000_info {
- struct ft1000_device *pFt1000Dev;
- struct net_device_stats stats;
struct task_struct *pPollThread;
-
unsigned char fcodeldr;
unsigned char bootmode;
unsigned char usbboot;
unsigned short dspalive;
- u16 ASIC_ID;
bool fProvComplete;
bool fCondResetPend;
bool fAppMsgPend;
- u16 DrvErrNum;
- u16 AsicID;
- int DspAsicReset;
int DeviceCreated;
- int CardReady;
int NetDevRegDone;
u8 CardNumber;
u8 DeviceName[15];
struct ft1000_debug_dirs nodes;
- int registered;
- int mediastate;
- u8 squeseqnum; /* sequence number on slow queue */
- spinlock_t dpram_lock;
spinlock_t fifo_lock;
- u16 fifo_cnt;
- u8 DspVer[DSPVERSZ]; /* DSP version number */
- u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
- u8 Sku[SKUSZ]; /* SKU */
- u8 eui64[EUISZ]; /* EUI64 */
- time_t ConTm; /* Connection Time */
- u8 ProductMode[MODESZ];
- u8 RfCalVer[CALVERSZ];
- u8 RfCalDate[CALDATESZ];
- u16 DSP_TIME[4];
- u16 LedStat;
- u16 ConStat;
- u16 ProgConStat;
- struct list_head prov_list;
int appcnt;
struct app_info_block app_info[MAX_NUM_APP];
- u16 DSPInfoBlklen;
u16 DrvMsgPend;
- int (*ft1000_reset)(struct net_device *dev);
- u16 DSPInfoBlk[MAX_DSP_SESS_REC];
- union {
- u16 Rec[MAX_DSP_SESS_REC];
- u32 MagRec[MAX_DSP_SESS_REC/2];
- } DSPSess;
unsigned short tempbuf[32];
- char netdevname[IFNAMSIZ];
- struct proc_dir_entry *ft1000_proc_dir;
-};
+} __packed;
struct dpram_blk {
@@ -141,21 +103,21 @@ struct dpram_blk {
u16 *pbuffer;
} __packed;
-int ft1000_read_register(struct ft1000_device *ft1000dev,
+int ft1000_read_register(struct ft1000_usb *ft1000dev,
u16 *Data, u16 nRegIndx);
-int ft1000_write_register(struct ft1000_device *ft1000dev,
+int ft1000_write_register(struct ft1000_usb *ft1000dev,
u16 value, u16 nRegIndx);
-int ft1000_read_dpram32(struct ft1000_device *ft1000dev,
+int ft1000_read_dpram32(struct ft1000_usb *ft1000dev,
u16 indx, u8 *buffer, u16 cnt);
-int ft1000_write_dpram32(struct ft1000_device *ft1000dev,
+int ft1000_write_dpram32(struct ft1000_usb *ft1000dev,
u16 indx, u8 *buffer, u16 cnt);
-int ft1000_read_dpram16(struct ft1000_device *ft1000dev,
+int ft1000_read_dpram16(struct ft1000_usb *ft1000dev,
u16 indx, u8 *buffer, u8 highlow);
-int ft1000_write_dpram16(struct ft1000_device *ft1000dev,
+int ft1000_write_dpram16(struct ft1000_usb *ft1000dev,
u16 indx, u16 value, u8 highlow);
-int fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev,
+int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev,
u16 indx, u8 *buffer);
-int fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev,
+int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev,
u16 indx, u8 *buffer);
extern void *pFileStart;
@@ -163,25 +125,25 @@ extern size_t FileLength;
extern int numofmsgbuf;
int ft1000_close(struct net_device *dev);
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart,
+u16 scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength);
extern struct list_head freercvpool;
extern spinlock_t free_buff_lock; /* lock to arbitrate free buffer list for receive command data */
-int ft1000_create_dev(struct ft1000_device *dev);
+int ft1000_create_dev(struct ft1000_usb *dev);
void ft1000_destroy_dev(struct net_device *dev);
-extern void card_send_command(struct ft1000_device *ft1000dev,
+extern void card_send_command(struct ft1000_usb *ft1000dev,
void *ptempbuffer, int size);
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
-int dsp_reload(struct ft1000_device *ft1000dev);
-int init_ft1000_netdev(struct ft1000_device *ft1000dev);
+int dsp_reload(struct ft1000_usb *ft1000dev);
+int init_ft1000_netdev(struct ft1000_usb *ft1000dev);
struct usb_interface;
-int reg_ft1000_netdev(struct ft1000_device *ft1000dev,
+int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
struct usb_interface *intf);
int ft1000_poll(void *dev_id);
diff --git a/drivers/staging/ft1000/ft1000.h b/drivers/staging/ft1000/ft1000.h
index 03baa5779234..175abfa7682e 100644
--- a/drivers/staging/ft1000/ft1000.h
+++ b/drivers/staging/ft1000/ft1000.h
@@ -250,3 +250,38 @@ struct prov_record {
struct list_head list;
u8 *pprov_data;
};
+
+struct ft1000_info {
+ void *priv;
+ struct net_device_stats stats;
+ u16 DrvErrNum;
+ u16 AsicID;
+ int CardReady;
+ int registered;
+ int mediastate;
+ u8 squeseqnum; /* sequence number on slow queue */
+ spinlock_t dpram_lock;
+ u16 fifo_cnt;
+ u8 DspVer[DSPVERSZ]; /* DSP version number */
+ u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
+ u8 Sku[SKUSZ]; /* SKU */
+ u8 eui64[EUISZ]; /* EUI64 */
+ time_t ConTm; /* Connection Time */
+ u8 ProductMode[MODESZ];
+ u8 RfCalVer[CALVERSZ];
+ u8 RfCalDate[CALDATESZ];
+ u16 DSP_TIME[4];
+ u16 LedStat;
+ u16 ConStat;
+ u16 ProgConStat;
+ struct list_head prov_list;
+ u16 DSPInfoBlklen;
+ int (*ft1000_reset)(void *);
+ u16 DSPInfoBlk[MAX_DSP_SESS_REC];
+ union {
+ u16 Rec[MAX_DSP_SESS_REC];
+ u32 MagRec[MAX_DSP_SESS_REC/2];
+ } DSPSess;
+ struct proc_dir_entry *ft1000_proc_dir;
+ char netdevname[IFNAMSIZ];
+};
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 580406cb1808..a0812d99136f 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,9 +1,11 @@
config FIREWIRE_SERIAL
tristate "TTY over Firewire"
- depends on FIREWIRE
+ depends on FIREWIRE && TTY
help
This enables TTY over IEEE 1394, providing high-speed serial
- connectivity to cabled peers.
+ connectivity to cabled peers. This driver implements a
+ ad-hoc transport protocol and is currently limited to
+ Linux-to-Linux communication.
To compile this driver as a module, say M here: the module will
be called firewire-serial.
diff --git a/drivers/staging/fwserial/TODO b/drivers/staging/fwserial/TODO
index 726900548eae..382a7959407c 100644
--- a/drivers/staging/fwserial/TODO
+++ b/drivers/staging/fwserial/TODO
@@ -1,5 +1,5 @@
-TODOs
------
+TODOs prior to this driver moving out of staging
+------------------------------------------------
1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- I/O is handled asynchronously which presents some issues when error
conditions occur.
@@ -11,27 +11,4 @@ TODOs
-- Issues with firewire stack --
1. This driver uses the same unregistered vendor id that the firewire core does
(0xd00d1e). Perhaps this could be exposed as a define in
- firewire-constants.h?
-2. MAX_ASYNC_PAYLOAD needs to be publicly exposed by core/ohci
- - otherwise how will this driver know the max size of address window to
- open for one packet write?
-3. Maybe device_max_receive() and link_speed_to_max_payload() should be
- taken up by the firewire core?
-4. To avoid dropping rx data while still limiting the maximum buffering,
- the size of the AR context must be known. How to expose this to drivers?
-5. Explore if bigger AR context will reduce RCODE_BUSY responses
- (or auto-grow to certain max size -- but this would require major surgery
- as the current AR is contiguously mapped)
-
--- Issues with TTY core --
- 1. Hack for alternate device name scheme
- - because udev no longer allows device renaming, devices should have
- their proper names on creation. This is an issue for creating the
- fwloop<n> device with the fwtty<n> devices because although duplicating
- roughly the same operations as tty_port_register_device() isn't difficult,
- access to the tty_class & tty_fops is restricted in scope.
-
- This is currently being worked around in create_loop_device() by
- extracting the tty_class ptr and tty_fops ptr from the previously created
- tty devices. Perhaps an add'l api can be added -- eg.,
- tty_{port_}register_named_device().
+ firewire.h?
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 61ee29083b26..5a6fb44f38a8 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -40,12 +40,10 @@ static int num_ttys = 4; /* # of std ttys to create per fw_card */
/* - doubles as loopback port index */
static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
static bool create_loop_dev = true; /* create a loopback device for each card */
-bool limit_bw; /* limit async bandwidth to 20% of max */
module_param_named(ttys, num_ttys, int, S_IRUGO | S_IWUSR);
module_param_named(auto, auto_connect, bool, S_IRUGO | S_IWUSR);
module_param_named(loop, create_loop_dev, bool, S_IRUGO | S_IWUSR);
-module_param(limit_bw, bool, S_IRUGO | S_IWUSR);
/*
* Threshold below which the tty is woken for writing
@@ -74,12 +72,20 @@ static DEFINE_MUTEX(port_table_lock);
static bool port_table_corrupt;
#define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS
+#define loop_idx(port) (((port)->index) / num_ports)
+#define table_idx(loop) ((loop) * num_ports + num_ttys)
+
/* total # of tty ports created per fw_card */
static int num_ports;
/* slab used as pool for struct fwtty_transactions */
static struct kmem_cache *fwtty_txn_cache;
+struct tty_driver *fwtty_driver;
+static struct tty_driver *fwloop_driver;
+
+static struct dentry *fwserial_debugfs;
+
struct fwtty_transaction;
typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode,
void *data, size_t length,
@@ -176,10 +182,15 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
#define dump_profile(m, stats)
#endif
-/* Returns the max receive packet size for the given card */
+/*
+ * Returns the max receive packet size for the given node
+ * Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant
+ * are required by specification to support max_rec of 8 (512 bytes) or more.
+ */
static inline int device_max_receive(struct fw_device *fw_device)
{
- return 1 << (clamp_t(int, fw_device->max_rec, 8U, 13U) + 1);
+ /* see IEEE 1394-2008 table 8-8 */
+ return min(2 << fw_device->max_rec, 4096);
}
static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
@@ -489,16 +500,11 @@ static void fwtty_do_hangup(struct work_struct *work)
static void fwtty_emit_breaks(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
- struct tty_struct *tty;
static const char buf[16];
unsigned long now = jiffies;
unsigned long elapsed = now - port->break_last;
int n, t, c, brk = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* generate breaks at the line rate (but at least 1) */
n = (elapsed * port->cps) / HZ + 1;
port->break_last = now;
@@ -507,15 +513,14 @@ static void fwtty_emit_breaks(struct work_struct *work)
while (n) {
t = min(n, 16);
- c = tty_insert_flip_string_fixed_flag(tty, buf, TTY_BREAK, t);
+ c = tty_insert_flip_string_fixed_flag(&port->port, buf,
+ TTY_BREAK, t);
n -= c;
brk += c;
if (c < t)
break;
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
if (port->mstatus & (UART_LSR_BI << 24))
schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
@@ -529,13 +534,9 @@ static void fwtty_pushrx(struct work_struct *work)
struct buffered_rx *buf, *next;
int n, c = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
spin_lock_bh(&port->lock);
list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- n = tty_insert_flip_string_fixed_flag(tty, buf->data,
+ n = tty_insert_flip_string_fixed_flag(&port->port, buf->data,
TTY_NORMAL, buf->n);
c += n;
port->buffered -= n;
@@ -544,7 +545,11 @@ static void fwtty_pushrx(struct work_struct *work)
memmove(buf->data, buf->data + n, buf->n - n);
buf->n -= n;
}
- __fwtty_throttle(port, tty);
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ __fwtty_throttle(port, tty);
+ tty_kref_put(tty);
+ }
break;
} else {
list_del(&buf->list);
@@ -552,13 +557,11 @@ static void fwtty_pushrx(struct work_struct *work)
}
}
if (c > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
if (list_empty(&port->buf_list))
clear_bit(BUFFERING_RX, &port->flags);
spin_unlock_bh(&port->lock);
-
- tty_kref_put(tty);
}
static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
@@ -566,8 +569,11 @@ static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
struct buffered_rx *buf;
size_t size = (n + sizeof(struct buffered_rx) + 0xFF) & ~0xFF;
- if (port->buffered + n > HIGH_WATERMARK)
+ if (port->buffered + n > HIGH_WATERMARK) {
+ fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d",
+ port->buffered, n, HIGH_WATERMARK);
return 0;
+ }
buf = kmalloc(size, GFP_ATOMIC);
if (!buf)
return 0;
@@ -593,10 +599,6 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
unsigned lsr;
int err = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return -ENOENT;
-
fwtty_dbg(port, "%d", n);
profile_size_distrib(port->stats.reads, n);
@@ -616,7 +618,7 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
lsr &= port->status_mask;
if (lsr & ~port->ignore_mask & UART_LSR_OE) {
- if (!tty_insert_flip_char(tty, 0, TTY_OVERRUN)) {
+ if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
err = -EIO;
goto out;
}
@@ -630,18 +632,23 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
}
if (!test_bit(BUFFERING_RX, &port->flags)) {
- c = tty_insert_flip_string_fixed_flag(tty, data, TTY_NORMAL, n);
+ c = tty_insert_flip_string_fixed_flag(&port->port, data,
+ TTY_NORMAL, n);
if (c > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
n -= c;
if (n) {
/* start buffering and throttling */
n -= fwtty_buffer_rx(port, &data[c], n);
- spin_lock_bh(&port->lock);
- __fwtty_throttle(port, tty);
- spin_unlock_bh(&port->lock);
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ spin_lock_bh(&port->lock);
+ __fwtty_throttle(port, tty);
+ spin_unlock_bh(&port->lock);
+ tty_kref_put(tty);
+ }
}
} else
n -= fwtty_buffer_rx(port, data, n);
@@ -652,8 +659,6 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
}
out:
- tty_kref_put(tty);
-
port->icount.rx += len;
port->stats.lost += n;
return err;
@@ -1160,6 +1165,19 @@ static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty)
return err;
}
+static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct fwtty_port *port = fwtty_port_get(table_idx(tty->index));
+ int err;
+
+ err = tty_standard_install(driver, tty);
+ if (!err)
+ tty->driver_data = port;
+ else
+ fwtty_port_put(port);
+ return err;
+}
+
static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct fwtty_port *port = tty->driver_data;
@@ -1487,17 +1505,26 @@ static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
if (port->port.console)
(*port->fwcon_ops->stats)(&stats, port->con_data);
- seq_printf(m, " tx:%d rx:%d", port->icount.tx + stats.xchars,
- port->icount.rx);
+ seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset,
+ port->icount.tx + stats.xchars, port->icount.rx);
seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts,
port->icount.dsr, port->icount.rng, port->icount.dcd);
seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame,
port->icount.overrun, port->icount.parity, port->icount.brk);
+}
+
+static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
+{
+ struct stats stats;
+
+ memcpy(&stats, &port->stats, sizeof(stats));
+ if (port->port.console)
+ (*port->fwcon_ops->stats)(&stats, port->con_data);
+
seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
stats.tx_stall, stats.fifo_errs, stats.lost);
seq_printf(m, " pkts:%d thr:%d wtrmk:%d", stats.sent, stats.throttled,
stats.watermark);
- seq_printf(m, " addr:%012llx", port->rx_handler.offset);
if (port->port.console) {
seq_printf(m, "\n ");
@@ -1507,7 +1534,7 @@ static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
dump_profile(m, &port->stats);
}
-static void fwtty_proc_show_peer(struct seq_file *m, struct fwtty_peer *peer)
+static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
{
int generation = peer->generation;
@@ -1516,21 +1543,14 @@ static void fwtty_proc_show_peer(struct seq_file *m, struct fwtty_peer *peer)
seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
peer->max_payload, (unsigned long long) peer->guid);
-
- if (capable(CAP_SYS_ADMIN)) {
- seq_printf(m, " mgmt:%012llx",
- (unsigned long long) peer->mgmt_addr);
- seq_printf(m, " addr:%012llx",
- (unsigned long long) peer->status_addr);
- }
+ seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr);
+ seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr);
seq_putc(m, '\n');
}
static int fwtty_proc_show(struct seq_file *m, void *v)
{
struct fwtty_port *port;
- struct fw_serial *serial;
- struct fwtty_peer *peer;
int i;
seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n");
@@ -1541,16 +1561,39 @@ static int fwtty_proc_show(struct seq_file *m, void *v)
fwtty_port_put(port);
seq_printf(m, "\n");
}
- seq_putc(m, '\n');
+ return 0;
+}
- rcu_read_lock();
- list_for_each_entry_rcu(serial, &fwserial_list, list) {
- seq_printf(m, "card: %s guid: %016llx\n",
- dev_name(serial->card->device),
- (unsigned long long) serial->card->guid);
- list_for_each_entry_rcu(peer, &serial->peer_list, list)
- fwtty_proc_show_peer(m, peer);
+static int fwtty_debugfs_stats_show(struct seq_file *m, void *v)
+{
+ struct fw_serial *serial = m->private;
+ struct fwtty_port *port;
+ int i;
+
+ for (i = 0; i < num_ports; ++i) {
+ port = fwtty_port_get(serial->ports[i]->index);
+ if (port) {
+ seq_printf(m, "%2d:", port->index);
+ fwtty_proc_show_port(m, port);
+ fwtty_debugfs_show_port(m, port);
+ fwtty_port_put(port);
+ seq_printf(m, "\n");
+ }
}
+ return 0;
+}
+
+static int fwtty_debugfs_peers_show(struct seq_file *m, void *v)
+{
+ struct fw_serial *serial = m->private;
+ struct fwtty_peer *peer;
+
+ rcu_read_lock();
+ seq_printf(m, "card: %s guid: %016llx\n",
+ dev_name(serial->card->device),
+ (unsigned long long) serial->card->guid);
+ list_for_each_entry_rcu(peer, &serial->peer_list, list)
+ fwtty_debugfs_show_peer(m, peer);
rcu_read_unlock();
return 0;
}
@@ -1560,6 +1603,32 @@ static int fwtty_proc_open(struct inode *inode, struct file *fp)
return single_open(fp, fwtty_proc_show, NULL);
}
+static int fwtty_stats_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, fwtty_debugfs_stats_show, inode->i_private);
+}
+
+static int fwtty_peers_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, fwtty_debugfs_peers_show, inode->i_private);
+}
+
+static const struct file_operations fwtty_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = fwtty_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations fwtty_peers_fops = {
+ .owner = THIS_MODULE,
+ .open = fwtty_peers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static const struct file_operations fwtty_proc_fops = {
.owner = THIS_MODULE,
.open = fwtty_proc_open,
@@ -1596,6 +1665,26 @@ static const struct tty_operations fwtty_ops = {
.proc_fops = &fwtty_proc_fops,
};
+static const struct tty_operations fwloop_ops = {
+ .open = fwtty_open,
+ .close = fwtty_close,
+ .hangup = fwtty_hangup,
+ .cleanup = fwtty_cleanup,
+ .install = fwloop_install,
+ .write = fwtty_write,
+ .write_room = fwtty_write_room,
+ .chars_in_buffer = fwtty_chars_in_buffer,
+ .send_xchar = fwtty_send_xchar,
+ .throttle = fwtty_throttle,
+ .unthrottle = fwtty_unthrottle,
+ .ioctl = fwtty_ioctl,
+ .set_termios = fwtty_set_termios,
+ .break_ctl = fwtty_break_ctl,
+ .tiocmget = fwtty_tiocmget,
+ .tiocmset = fwtty_tiocmset,
+ .get_icount = fwtty_get_icount,
+};
+
static inline int mgmt_pkt_expected_len(__be16 code)
{
static const struct fwserial_mgmt_pkt pkt;
@@ -1685,8 +1774,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
/* reconfigure tx_fifo optimally for this peer */
spin_lock_bh(&port->lock);
- port->max_payload = min3(peer->max_payload, peer->fifo_len,
- MAX_ASYNC_PAYLOAD);
+ port->max_payload = min(peer->max_payload, peer->fifo_len);
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
spin_unlock_bh(&peer->port->lock);
@@ -1781,10 +1869,11 @@ static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
return NULL;
}
-static void fwserial_release_port(struct fwtty_port *port)
+static void fwserial_release_port(struct fwtty_port *port, bool reset)
{
/* drop carrier (and all other line status) */
- fwtty_update_port_status(port, 0);
+ if (reset)
+ fwtty_update_port_status(port, 0);
spin_lock_bh(&port->lock);
@@ -1814,7 +1903,7 @@ static void fwserial_plug_timeout(unsigned long data)
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, false);
}
/**
@@ -1877,7 +1966,7 @@ cancel_timer:
peer_revert_state(peer);
release_port:
spin_unlock_bh(&peer->lock);
- fwserial_release_port(port);
+ fwserial_release_port(port, false);
free_pkt:
kfree(pkt);
return err;
@@ -1892,7 +1981,8 @@ free_pkt:
* The port reference is put by fwtty_cleanup (if a reference was
* ever taken).
*/
-static void fwserial_close_port(struct fwtty_port *port)
+static void fwserial_close_port(struct tty_driver *driver,
+ struct fwtty_port *port)
{
struct tty_struct *tty;
@@ -1904,7 +1994,10 @@ static void fwserial_close_port(struct fwtty_port *port)
}
mutex_unlock(&port->port.mutex);
- tty_unregister_device(fwtty_driver, port->index);
+ if (driver == fwloop_driver)
+ tty_unregister_device(driver, loop_idx(port));
+ else
+ tty_unregister_device(driver, port->index);
}
/**
@@ -2155,85 +2248,13 @@ static void fwserial_remove_peer(struct fwtty_peer *peer)
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, true);
synchronize_rcu();
kfree(peer);
}
/**
- * create_loop_device - create a loopback tty device
- * @tty_driver: tty_driver to own loopback device
- * @prototype: ptr to already-assigned 'prototype' tty port
- * @index: index to associate this device with the tty port
- * @parent: device to child to
- *
- * HACK - this is basically tty_port_register_device() with an
- * alternate naming scheme. Suggest tty_port_register_named_device()
- * helper api.
- *
- * Creates a loopback tty device named 'fwloop<n>' which is attached to
- * the local unit in fwserial_add_peer(). Note that <n> in the device
- * name advances in increments of port allocation blocks, ie., for port
- * indices 0..3, the device name will be 'fwloop0'; for 4..7, 'fwloop1',
- * and so on.
- *
- * Only one loopback device should be created per fw_card.
- */
-static void release_loop_device(struct device *dev)
-{
- kfree(dev);
-}
-
-static struct device *create_loop_device(struct tty_driver *driver,
- struct fwtty_port *prototype,
- struct fwtty_port *port,
- struct device *parent)
-{
- char name[64];
- int index = port->index;
- dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
- struct device *dev = NULL;
- int err;
-
- if (index >= fwtty_driver->num)
- return ERR_PTR(-EINVAL);
-
- snprintf(name, 64, "%s%d", loop_dev_name, index / num_ports);
-
- tty_port_link_device(&port->port, driver, index);
-
- cdev_init(&driver->cdevs[index], driver->cdevs[prototype->index].ops);
- driver->cdevs[index].owner = driver->owner;
- err = cdev_add(&driver->cdevs[index], devt, 1);
- if (err)
- return ERR_PTR(err);
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- cdev_del(&driver->cdevs[index]);
- return ERR_PTR(-ENOMEM);
- }
-
- dev->devt = devt;
- dev->class = prototype->device->class;
- dev->parent = parent;
- dev->release = release_loop_device;
- dev_set_name(dev, "%s", name);
- dev->groups = NULL;
- dev_set_drvdata(dev, NULL);
-
- err = device_register(dev);
- if (err) {
- put_device(dev);
- cdev_del(&driver->cdevs[index]);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-
-/**
* fwserial_create - init everything to create TTYs for a specific fw_card
* @unit: fw_unit for first 'serial' unit device probed for this fw_card
*
@@ -2331,17 +2352,28 @@ static int fwserial_create(struct fw_unit *unit)
if (create_loop_dev) {
struct device *loop_dev;
- loop_dev = create_loop_device(fwtty_driver,
- serial->ports[0],
- serial->ports[num_ttys],
- card->device);
+ loop_dev = tty_port_register_device(&serial->ports[j]->port,
+ fwloop_driver,
+ loop_idx(serial->ports[j]),
+ card->device);
if (IS_ERR(loop_dev)) {
err = PTR_ERR(loop_dev);
fwtty_err(&unit, "create loop device failed (%d)", err);
goto unregister_ttys;
}
- serial->ports[num_ttys]->device = loop_dev;
- serial->ports[num_ttys]->loopback = true;
+ serial->ports[j]->device = loop_dev;
+ serial->ports[j]->loopback = true;
+ }
+
+ if (!IS_ERR_OR_NULL(fwserial_debugfs)) {
+ serial->debugfs = debugfs_create_dir(dev_name(&unit->device),
+ fwserial_debugfs);
+ if (!IS_ERR_OR_NULL(serial->debugfs)) {
+ debugfs_create_file("peers", 0444, serial->debugfs,
+ serial, &fwtty_peers_fops);
+ debugfs_create_file("stats", 0444, serial->debugfs,
+ serial, &fwtty_stats_fops);
+ }
}
list_add_rcu(&serial->list, &fwserial_list);
@@ -2356,7 +2388,11 @@ static int fwserial_create(struct fw_unit *unit)
fwtty_err(&unit, "unable to add peer unit device (%d)", err);
/* fall-through to error processing */
+ debugfs_remove_recursive(serial->debugfs);
+
list_del_rcu(&serial->list);
+ if (create_loop_dev)
+ tty_unregister_device(fwloop_driver, loop_idx(serial->ports[j]));
unregister_ttys:
for (--j; j >= 0; --j)
tty_unregister_device(fwtty_driver, serial->ports[j]->index);
@@ -2445,8 +2481,12 @@ static int fwserial_remove(struct device *dev)
/* unlink from the fwserial_list here */
list_del_rcu(&serial->list);
- for (i = 0; i < num_ports; ++i)
- fwserial_close_port(serial->ports[i]);
+ debugfs_remove_recursive(serial->debugfs);
+
+ for (i = 0; i < num_ttys; ++i)
+ fwserial_close_port(fwtty_driver, serial->ports[i]);
+ if (create_loop_dev)
+ fwserial_close_port(fwloop_driver, serial->ports[i]);
kref_put(&serial->kref, fwserial_destroy);
}
mutex_unlock(&fwserial_list_mutex);
@@ -2510,26 +2550,25 @@ static struct fw_driver fwserial_driver = {
/* XXX: config ROM definitons could be improved with semi-automated offset
* and length calculation
*/
+#define FW_ROM_LEN(quads) ((quads) << 16)
#define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs))
struct fwserial_unit_directory_data {
- u16 crc;
- u16 len;
+ u32 len_crc;
u32 unit_specifier;
u32 unit_sw_version;
u32 unit_addr_offset;
u32 desc1_ofs;
- u16 desc1_crc;
- u16 desc1_len;
+ u32 desc1_len_crc;
u32 desc1_data[5];
} __packed;
static struct fwserial_unit_directory_data fwserial_unit_directory_data = {
- .len = 4,
+ .len_crc = FW_ROM_LEN(4),
.unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID),
.unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION),
.desc1_ofs = FW_ROM_DESCRIPTOR(1),
- .desc1_len = 5,
+ .desc1_len_crc = FW_ROM_LEN(5),
.desc1_data = {
0x00000000, /* type = text */
0x00000000, /* enc = ASCII, lang EN */
@@ -2549,7 +2588,7 @@ static struct fw_descriptor fwserial_unit_directory = {
* The management address is in the unit space region but above other known
* address users (to keep wild writes from causing havoc)
*/
-const struct fw_address_region fwserial_mgmt_addr_region = {
+static const struct fw_address_region fwserial_mgmt_addr_region = {
.start = CSR_REGISTER_BASE + 0x1e0000ULL,
.end = 0x1000000000000ULL,
};
@@ -2615,7 +2654,7 @@ static void fwserial_handle_plug_req(struct work_struct *work)
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, false);
rcode = fwserial_send_mgmt_sync(peer, pkt);
@@ -2637,7 +2676,7 @@ static void fwserial_handle_plug_req(struct work_struct *work)
cleanup:
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, false);
kfree(pkt);
return;
}
@@ -2681,15 +2720,14 @@ static void fwserial_handle_unplug_req(struct work_struct *work)
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_UNPLUG_RESPONDING) {
- if (rcode == RCODE_COMPLETE)
- port = peer_revert_state(peer);
- else
+ if (rcode != RCODE_COMPLETE)
fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)", rcode);
+ port = peer_revert_state(peer);
}
cleanup:
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, true);
kfree(pkt);
return;
}
@@ -2700,6 +2738,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
size_t len)
{
struct fwtty_port *port = NULL;
+ bool reset = false;
int rcode;
if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
@@ -2775,6 +2814,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
fwtty_notice(&peer->unit, "NACK unplug?");
port = peer_revert_state(peer);
+ reset = true;
}
break;
@@ -2786,7 +2826,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
spin_unlock_bh(&peer->lock);
if (port)
- fwserial_release_port(port);
+ fwserial_release_port(port, reset);
return rcode;
}
@@ -2836,14 +2876,18 @@ static int __init fwserial_init(void)
{
int err, num_loops = !!(create_loop_dev);
+ /* XXX: placeholder for a "firewire" debugfs node */
+ fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
/* num_ttys/num_ports must not be set above the static alloc avail */
if (num_ttys + num_loops > MAX_CARD_PORTS)
num_ttys = MAX_CARD_PORTS - num_loops;
num_ports = num_ttys + num_loops;
- fwtty_driver = alloc_tty_driver(MAX_TOTAL_PORTS);
- if (!fwtty_driver) {
- err = -ENOMEM;
+ fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW
+ | TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(fwtty_driver)) {
+ err = PTR_ERR(fwtty_driver);
return err;
}
@@ -2853,9 +2897,6 @@ static int __init fwserial_init(void)
fwtty_driver->minor_start = 0;
fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
fwtty_driver->subtype = SERIAL_TYPE_NORMAL;
- fwtty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
-
fwtty_driver->init_termios = tty_std_termios;
fwtty_driver->init_termios.c_cflag |= CLOCAL;
tty_set_operations(fwtty_driver, &fwtty_ops);
@@ -2866,12 +2907,38 @@ static int __init fwserial_init(void)
goto put_tty;
}
+ if (create_loop_dev) {
+ fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports,
+ TTY_DRIVER_REAL_RAW
+ | TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(fwloop_driver)) {
+ err = PTR_ERR(fwloop_driver);
+ goto unregister_driver;
+ }
+
+ fwloop_driver->driver_name = KBUILD_MODNAME "_loop";
+ fwloop_driver->name = loop_dev_name;
+ fwloop_driver->major = 0;
+ fwloop_driver->minor_start = 0;
+ fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ fwloop_driver->subtype = SERIAL_TYPE_NORMAL;
+ fwloop_driver->init_termios = tty_std_termios;
+ fwloop_driver->init_termios.c_cflag |= CLOCAL;
+ tty_set_operations(fwloop_driver, &fwloop_ops);
+
+ err = tty_register_driver(fwloop_driver);
+ if (err) {
+ driver_err("register loop driver failed (%d)", err);
+ goto put_loop;
+ }
+ }
+
fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
sizeof(struct fwtty_transaction),
0, 0, fwtty_txn_constructor);
if (!fwtty_txn_cache) {
err = -ENOMEM;
- goto unregister_driver;
+ goto unregister_loop;
}
/*
@@ -2913,10 +2980,17 @@ remove_handler:
fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
destroy_cache:
kmem_cache_destroy(fwtty_txn_cache);
+unregister_loop:
+ if (create_loop_dev)
+ tty_unregister_driver(fwloop_driver);
+put_loop:
+ if (create_loop_dev)
+ put_tty_driver(fwloop_driver);
unregister_driver:
tty_unregister_driver(fwtty_driver);
put_tty:
put_tty_driver(fwtty_driver);
+ debugfs_remove_recursive(fwserial_debugfs);
return err;
}
@@ -2926,8 +3000,13 @@ static void __exit fwserial_exit(void)
fw_core_remove_descriptor(&fwserial_unit_directory);
fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
kmem_cache_destroy(fwtty_txn_cache);
+ if (create_loop_dev) {
+ tty_unregister_driver(fwloop_driver);
+ put_tty_driver(fwloop_driver);
+ }
tty_unregister_driver(fwtty_driver);
put_tty_driver(fwtty_driver);
+ debugfs_remove_recursive(fwserial_debugfs);
}
module_init(fwserial_init);
@@ -2940,4 +3019,3 @@ MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table);
MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node");
MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered");
MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
-MODULE_PARM_DESC(limit_bw, "Limit bandwidth utilization to 20%.");
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 8b572edf9563..514f57173259 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -15,6 +15,7 @@
#include <linux/serial_reg.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include "dma_fifo.h"
@@ -193,7 +194,7 @@ struct buffered_rx {
* @port: underlying tty_port
* @device: tty device
* @index: index into port_table for this particular port
- * note: minor = index + FWSERIAL_TTY_START_MINOR
+ * note: minor = index + minor_start assigned by tty_alloc_driver()
* @serial: back pointer to the containing fw_serial
* @rx_handler: bus address handler for unique addr region used by remotes
* to communicate with this port. Every port uses
@@ -279,7 +280,7 @@ struct fwtty_port {
loopback:1;
unsigned long flags;
- struct fwtty_peer *peer;
+ struct fwtty_peer __rcu *peer;
struct async_icount icount;
struct stats stats;
@@ -338,6 +339,7 @@ struct fw_serial {
struct fw_card *card;
struct kref kref;
+ struct dentry *debugfs;
struct fwtty_peer *self;
struct list_head list;
@@ -351,9 +353,8 @@ struct fw_serial {
#define TTY_DEV_NAME "fwtty" /* ttyFW was taken */
static const char tty_dev_name[] = TTY_DEV_NAME;
static const char loop_dev_name[] = "fwloop";
-extern bool limit_bw;
-struct tty_driver *fwtty_driver;
+extern struct tty_driver *fwtty_driver;
#define driver_err(s, v...) pr_err(KBUILD_MODNAME ": " s, ##v)
@@ -370,18 +371,16 @@ static inline void fwtty_bind_console(struct fwtty_port *port,
/*
* Returns the max send async payload size in bytes based on the unit device
- * link speed - if set to limit bandwidth to max 20%, use lookup table
+ * link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
+ * is not necessary and does not work, because
+ * 1) asynchronous traffic will absorb all available bandwidth (less that
+ * being used for isochronous traffic)
+ * 2) isochronous arbitration always wins.
*/
static inline int link_speed_to_max_payload(unsigned speed)
{
- static const int max_async[] = { 307, 614, 1229, 2458, 4916, 9832, };
- BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_3200);
-
- speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_3200);
- if (limit_bw)
- return max_async[speed];
- else
- return 1 << (speed + 9);
+ /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
+ return min(512 << speed, 4096);
}
#endif /* _FIREWIRE_FWSERIAL_H */
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
index 8b8ed981d102..695762b0e942 100644
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ b/drivers/staging/gdm72xx/gdm_sdio.c
@@ -156,10 +156,8 @@ static int init_sdio(struct sdiowm_dev *sdev)
spin_lock_init(&tx->lock);
tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL);
- if (tx->sdu_buf == NULL) {
- dev_err(&sdev->func->dev, "Failed to allocate SDU tx buffer.\n");
+ if (tx->sdu_buf == NULL)
goto fail;
- }
for (i = 0; i < MAX_NR_SDU_BUF; i++) {
t = alloc_tx_struct(tx);
@@ -185,10 +183,8 @@ static int init_sdio(struct sdiowm_dev *sdev)
}
rx->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- if (rx->rx_buf == NULL) {
- dev_err(&sdev->func->dev, "Failed to allocate rx buffer.\n");
+ if (rx->rx_buf == NULL)
goto fail;
- }
return 0;
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
index 6291829dcdcc..93046dda78f0 100644
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ b/drivers/staging/gdm72xx/sdio_boot.c
@@ -72,10 +72,8 @@ static int download_image(struct sdio_func *func, const char *img_name)
}
buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&func->dev, "Error: kmalloc\n");
+ if (buf == NULL)
return -ENOMEM;
- }
img_len = firm->size;
@@ -141,11 +139,8 @@ int sdio_boot(struct sdio_func *func)
const char *rfs_name = FW_DIR FW_RFS;
tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL);
- if (tx_buf == NULL) {
- dev_err(&func->dev, "Error: kmalloc: %s %d\n",
- __func__, __LINE__);
+ if (tx_buf == NULL)
return -ENOMEM;
- }
ret = download_image(func, krn_name);
if (ret)
diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c
index 3e2103ae4eae..0d45eb680be5 100644
--- a/drivers/staging/gdm72xx/usb_boot.c
+++ b/drivers/staging/gdm72xx/usb_boot.c
@@ -158,10 +158,8 @@ int usb_boot(struct usb_device *usbdev, u16 pid)
}
tx_buf = kmalloc(DOWNLOAD_SIZE, GFP_KERNEL);
- if (tx_buf == NULL) {
- dev_err(&usbdev->dev, "Error: kmalloc\n");
+ if (tx_buf == NULL)
return -ENOMEM;
- }
if (firm->size < sizeof(hdr)) {
dev_err(&usbdev->dev, "Cannot read the image info.\n");
@@ -301,10 +299,8 @@ static int em_download_image(struct usb_device *usbdev, const char *img_name,
}
buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&usbdev->dev, "Error: kmalloc\n");
+ if (buf == NULL)
return -ENOMEM;
- }
strcpy(buf+pad_size, type_string);
ret = gdm_wibro_send(usbdev, buf, strlen(type_string)+pad_size);
diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig
new file mode 100644
index 000000000000..4e094602437c
--- /dev/null
+++ b/drivers/staging/goldfish/Kconfig
@@ -0,0 +1,13 @@
+config GOLDFISH_AUDIO
+ tristate "Goldfish AVD Audio Device"
+ depends on GOLDFISH
+ ---help---
+ Emulated audio channel for the Goldfish Android Virtual Device
+
+config MTD_GOLDFISH_NAND
+ tristate "Goldfish NAND device"
+ depends on GOLDFISH
+ depends on MTD
+ help
+ Drives the emulated NAND flash device on the Google Goldfish
+ Android virtual device.
diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile
new file mode 100644
index 000000000000..dec34ad58162
--- /dev/null
+++ b/drivers/staging/goldfish/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Goldfish audio driver
+#
+
+obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
+obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o
diff --git a/drivers/staging/goldfish/README b/drivers/staging/goldfish/README
new file mode 100644
index 000000000000..93d65b0f0f83
--- /dev/null
+++ b/drivers/staging/goldfish/README
@@ -0,0 +1,12 @@
+Audio
+-----
+- Move to using the ALSA framework not faking it
+- Fix the wrong user page DMA (moving to ALSA may fix that too)
+
+NAND
+----
+- Switch from spinlock to mutex
+- Remove excess checking of parameters in calls
+- Use dma coherent memory not kmalloc/__pa for the memory (this is just
+ a cleanliness issue not a correctness one)
+
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
new file mode 100644
index 000000000000..d3bed21f4072
--- /dev/null
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -0,0 +1,363 @@
+/* drivers/misc/goldfish_audio.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Android QEMU Audio Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+struct goldfish_audio {
+ char __iomem *reg_base;
+ int irq;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+
+ char __iomem *buffer_virt; /* combined buffer virtual address */
+ unsigned long buffer_phys; /* combined buffer physical address */
+
+ char __iomem *write_buffer1; /* write buffer 1 virtual address */
+ char __iomem *write_buffer2; /* write buffer 2 virtual address */
+ char __iomem *read_buffer; /* read buffer virtual address */
+ int buffer_status;
+ int read_supported; /* true if we have audio input support */
+};
+
+/* We will allocate two read buffers and two write buffers.
+ Having two read buffers facilitate stereo -> mono conversion.
+ Having two write buffers facilitate interleaved IO.
+*/
+#define READ_BUFFER_SIZE 16384
+#define WRITE_BUFFER_SIZE 16384
+#define COMBINED_BUFFER_SIZE ((2 * READ_BUFFER_SIZE) + \
+ (2 * WRITE_BUFFER_SIZE))
+
+#define AUDIO_READ(data, addr) (readl(data->reg_base + addr))
+#define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr))
+
+/* temporary variable used between goldfish_audio_probe() and
+ goldfish_audio_open() */
+static struct goldfish_audio *audio_data;
+
+enum {
+ /* audio status register */
+ AUDIO_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ AUDIO_INT_ENABLE = 0x04,
+ /* set these to specify buffer addresses */
+ AUDIO_SET_WRITE_BUFFER_1 = 0x08,
+ AUDIO_SET_WRITE_BUFFER_2 = 0x0C,
+ /* set number of bytes in buffer to write */
+ AUDIO_WRITE_BUFFER_1 = 0x10,
+ AUDIO_WRITE_BUFFER_2 = 0x14,
+
+ /* true if audio input is supported */
+ AUDIO_READ_SUPPORTED = 0x18,
+ /* buffer to use for audio input */
+ AUDIO_SET_READ_BUFFER = 0x1C,
+
+ /* driver writes number of bytes to read */
+ AUDIO_START_READ = 0x20,
+
+ /* number of bytes available in read buffer */
+ AUDIO_READ_BUFFER_AVAILABLE = 0x24,
+
+ /* AUDIO_INT_STATUS bits */
+
+ /* this bit set when it is safe to write more bytes to the buffer */
+ AUDIO_INT_WRITE_BUFFER_1_EMPTY = 1U << 0,
+ AUDIO_INT_WRITE_BUFFER_2_EMPTY = 1U << 1,
+ AUDIO_INT_READ_BUFFER_FULL = 1U << 2,
+
+ AUDIO_INT_MASK = AUDIO_INT_WRITE_BUFFER_1_EMPTY |
+ AUDIO_INT_WRITE_BUFFER_2_EMPTY |
+ AUDIO_INT_READ_BUFFER_FULL,
+};
+
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+
+static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct goldfish_audio *data = fp->private_data;
+ int length;
+ int result = 0;
+
+ if (!data->read_supported)
+ return -ENODEV;
+
+ while (count > 0) {
+ length = (count > READ_BUFFER_SIZE ? READ_BUFFER_SIZE : count);
+ AUDIO_WRITE(data, AUDIO_START_READ, length);
+
+ wait_event_interruptible(data->wait,
+ (data->buffer_status & AUDIO_INT_READ_BUFFER_FULL));
+
+ length = AUDIO_READ(data,
+ AUDIO_READ_BUFFER_AVAILABLE);
+
+ /* copy data to user space */
+ if (copy_to_user(buf, data->read_buffer, length))
+ return -EFAULT;
+
+ result += length;
+ buf += length;
+ count -= length;
+ }
+ return result;
+}
+
+static ssize_t goldfish_audio_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct goldfish_audio *data = fp->private_data;
+ unsigned long irq_flags;
+ ssize_t result = 0;
+ char __iomem *kbuf;
+
+ while (count > 0) {
+ ssize_t copy = count;
+ if (copy > WRITE_BUFFER_SIZE)
+ copy = WRITE_BUFFER_SIZE;
+ wait_event_interruptible(data->wait, (data->buffer_status &
+ (AUDIO_INT_WRITE_BUFFER_1_EMPTY |
+ AUDIO_INT_WRITE_BUFFER_2_EMPTY)));
+
+ if ((data->buffer_status & AUDIO_INT_WRITE_BUFFER_1_EMPTY) != 0)
+ kbuf = data->write_buffer1;
+ else
+ kbuf = data->write_buffer2;
+
+ /* copy from user space to the appropriate buffer */
+ if (copy_from_user(kbuf, buf, copy)) {
+ result = -EFAULT;
+ break;
+ }
+
+ spin_lock_irqsave(&data->lock, irq_flags);
+ /* clear the buffer empty flag, and signal the emulator
+ * to start writing the buffer */
+ if (kbuf == data->write_buffer1) {
+ data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_1_EMPTY;
+ AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_1, copy);
+ } else {
+ data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_2_EMPTY;
+ AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_2, copy);
+ }
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+
+ buf += copy;
+ result += copy;
+ count -= copy;
+ }
+ return result;
+}
+
+static int goldfish_audio_open(struct inode *ip, struct file *fp)
+{
+ if (!audio_data)
+ return -ENODEV;
+
+ if (atomic_inc_return(&open_count) == 1) {
+ fp->private_data = audio_data;
+ audio_data->buffer_status = (AUDIO_INT_WRITE_BUFFER_1_EMPTY |
+ AUDIO_INT_WRITE_BUFFER_2_EMPTY);
+ AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, AUDIO_INT_MASK);
+ return 0;
+ } else {
+ atomic_dec(&open_count);
+ return -EBUSY;
+ }
+}
+
+static int goldfish_audio_release(struct inode *ip, struct file *fp)
+{
+ atomic_dec(&open_count);
+ /* FIXME: surely this is wrong for the multi-opened case */
+ AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, 0);
+ return 0;
+}
+
+static long goldfish_audio_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ /* temporary workaround, until we switch to the ALSA API */
+ if (cmd == 315)
+ return -1;
+ else
+ return 0;
+}
+
+static irqreturn_t goldfish_audio_interrupt(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+ struct goldfish_audio *data = dev_id;
+ u32 status;
+
+ spin_lock_irqsave(&data->lock, irq_flags);
+
+ /* read buffer status flags */
+ status = AUDIO_READ(data, AUDIO_INT_STATUS);
+ status &= AUDIO_INT_MASK;
+ /* if buffers are newly empty, wake up blocked
+ goldfish_audio_write() call */
+ if (status) {
+ data->buffer_status = status;
+ wake_up(&data->wait);
+ }
+
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* file operations for /dev/eac */
+static const struct file_operations goldfish_audio_fops = {
+ .owner = THIS_MODULE,
+ .read = goldfish_audio_read,
+ .write = goldfish_audio_write,
+ .open = goldfish_audio_open,
+ .release = goldfish_audio_release,
+ .unlocked_ioctl = goldfish_audio_ioctl,
+};
+
+static struct miscdevice goldfish_audio_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "eac",
+ .fops = &goldfish_audio_fops,
+};
+
+static int goldfish_audio_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_audio *data;
+ dma_addr_t buf_addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ ret = -ENOMEM;
+ goto err_data_alloc_failed;
+ }
+ spin_lock_init(&data->lock);
+ init_waitqueue_head(&data->wait);
+ platform_set_drvdata(pdev, data);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ ret = -ENODEV;
+ goto err_no_io_base;
+ }
+ data->reg_base = ioremap(r->start, PAGE_SIZE);
+ if (data->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto err_no_io_base;
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ ret = -ENODEV;
+ goto err_no_irq;
+ }
+ data->buffer_virt = dma_alloc_coherent(&pdev->dev,
+ COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
+ if (data->buffer_virt == 0) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "allocate buffer failed\n");
+ goto err_alloc_write_buffer_failed;
+ }
+ data->buffer_phys = buf_addr;
+ data->write_buffer1 = data->buffer_virt;
+ data->write_buffer2 = data->buffer_virt + WRITE_BUFFER_SIZE;
+ data->read_buffer = data->buffer_virt + 2 * WRITE_BUFFER_SIZE;
+
+ ret = request_irq(data->irq, goldfish_audio_interrupt,
+ IRQF_SHARED, pdev->name, data);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_request_irq_failed;
+ }
+
+ ret = misc_register(&goldfish_audio_device);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "misc_register returned %d in goldfish_audio_init\n",
+ ret);
+ goto err_misc_register_failed;
+ }
+
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_1, buf_addr);
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_2,
+ buf_addr + WRITE_BUFFER_SIZE);
+
+ data->read_supported = AUDIO_READ(data, AUDIO_READ_SUPPORTED);
+ if (data->read_supported)
+ AUDIO_WRITE(data, AUDIO_SET_READ_BUFFER,
+ buf_addr + 2 * WRITE_BUFFER_SIZE);
+
+ audio_data = data;
+ return 0;
+
+err_misc_register_failed:
+err_request_irq_failed:
+ dma_free_coherent(&pdev->dev, COMBINED_BUFFER_SIZE,
+ data->buffer_virt, data->buffer_phys);
+err_alloc_write_buffer_failed:
+err_no_irq:
+ iounmap(data->reg_base);
+err_no_io_base:
+ kfree(data);
+err_data_alloc_failed:
+ return ret;
+}
+
+static int goldfish_audio_remove(struct platform_device *pdev)
+{
+ struct goldfish_audio *data = platform_get_drvdata(pdev);
+
+ misc_deregister(&goldfish_audio_device);
+ free_irq(data->irq, data);
+ dma_free_coherent(&pdev->dev, COMBINED_BUFFER_SIZE,
+ data->buffer_virt, data->buffer_phys);
+ iounmap(data->reg_base);
+ kfree(data);
+ audio_data = NULL;
+ return 0;
+}
+
+static struct platform_driver goldfish_audio_driver = {
+ .probe = goldfish_audio_probe,
+ .remove = goldfish_audio_remove,
+ .driver = {
+ .name = "goldfish_audio"
+ }
+};
+
+module_platform_driver(goldfish_audio_driver);
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
new file mode 100644
index 000000000000..ab1f01952b48
--- /dev/null
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -0,0 +1,444 @@
+/*
+ * drivers/mtd/devices/goldfish_nand.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+
+#include <asm/div64.h>
+
+#include "goldfish_nand_reg.h"
+
+struct goldfish_nand {
+ spinlock_t lock;
+ unsigned char __iomem *base;
+ struct cmd_params *cmd_params;
+ size_t mtd_count;
+ struct mtd_info mtd[0];
+};
+
+static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
+ enum nand_cmd cmd, u64 addr, u32 len,
+ void *ptr, u32 *rv)
+{
+ u32 cmdp;
+ struct goldfish_nand *nand = mtd->priv;
+ struct cmd_params *cps = nand->cmd_params;
+ unsigned char __iomem *base = nand->base;
+
+ if (cps == NULL)
+ return -1;
+
+ switch (cmd) {
+ case NAND_CMD_ERASE:
+ cmdp = NAND_CMD_ERASE_WITH_PARAMS;
+ break;
+ case NAND_CMD_READ:
+ cmdp = NAND_CMD_READ_WITH_PARAMS;
+ break;
+ case NAND_CMD_WRITE:
+ cmdp = NAND_CMD_WRITE_WITH_PARAMS;
+ break;
+ default:
+ return -1;
+ }
+ cps->dev = mtd - nand->mtd;
+ cps->addr_high = (u32)(addr >> 32);
+ cps->addr_low = (u32)addr;
+ cps->transfer_size = len;
+ cps->data = (u32)ptr;
+ writel(cmdp, base + NAND_COMMAND);
+ *rv = cps->result;
+ return 0;
+}
+
+static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
+ u64 addr, u32 len, void *ptr)
+{
+ struct goldfish_nand *nand = mtd->priv;
+ u32 rv;
+ unsigned long irq_flags;
+ unsigned char __iomem *base = nand->base;
+
+ spin_lock_irqsave(&nand->lock, irq_flags);
+ if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
+ writel(mtd - nand->mtd, base + NAND_DEV);
+ writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
+ writel((u32)addr, base + NAND_ADDR_LOW);
+ writel(len, base + NAND_TRANSFER_SIZE);
+ writel((u32)ptr, base + NAND_DATA);
+ writel(cmd, base + NAND_COMMAND);
+ rv = readl(base + NAND_RESULT);
+ }
+ spin_unlock_irqrestore(&nand->lock, irq_flags);
+ return rv;
+}
+
+static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ loff_t ofs = instr->addr;
+ u32 len = instr->len;
+ u32 rem;
+
+ if (ofs + len > mtd->size)
+ goto invalid_arg;
+ rem = do_div(ofs, mtd->writesize);
+ if (rem)
+ goto invalid_arg;
+ ofs *= (mtd->writesize + mtd->oobsize);
+
+ if (len % mtd->writesize)
+ goto invalid_arg;
+ len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
+
+ if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
+ pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
+ ofs, len, mtd->size, mtd->erasesize);
+ return -EIO;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
+ ofs, len, mtd->size, mtd->erasesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ u32 rem;
+
+ if (ofs + ops->len > mtd->size)
+ goto invalid_arg;
+ if (ops->datbuf && ops->len && ops->len != mtd->writesize)
+ goto invalid_arg;
+ if (ops->ooblen + ops->ooboffs > mtd->oobsize)
+ goto invalid_arg;
+
+ rem = do_div(ofs, mtd->writesize);
+ if (rem)
+ goto invalid_arg;
+ ofs *= (mtd->writesize + mtd->oobsize);
+
+ if (ops->datbuf)
+ ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
+ ops->len, ops->datbuf);
+ ofs += mtd->writesize + ops->ooboffs;
+ if (ops->oobbuf)
+ ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
+ ops->ooblen, ops->oobbuf);
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
+ ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ u32 rem;
+
+ if (ofs + ops->len > mtd->size)
+ goto invalid_arg;
+ if (ops->len && ops->len != mtd->writesize)
+ goto invalid_arg;
+ if (ops->ooblen + ops->ooboffs > mtd->oobsize)
+ goto invalid_arg;
+
+ rem = do_div(ofs, mtd->writesize);
+ if (rem)
+ goto invalid_arg;
+ ofs *= (mtd->writesize + mtd->oobsize);
+
+ if (ops->datbuf)
+ ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
+ ops->len, ops->datbuf);
+ ofs += mtd->writesize + ops->ooboffs;
+ if (ops->oobbuf)
+ ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
+ ops->ooblen, ops->oobbuf);
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
+ ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ u32 rem;
+
+ if (from + len > mtd->size)
+ goto invalid_arg;
+ if (len != mtd->writesize)
+ goto invalid_arg;
+
+ rem = do_div(from, mtd->writesize);
+ if (rem)
+ goto invalid_arg;
+ from *= (mtd->writesize + mtd->oobsize);
+
+ *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
+ from, len, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ u32 rem;
+
+ if (to + len > mtd->size)
+ goto invalid_arg;
+ if (len != mtd->writesize)
+ goto invalid_arg;
+
+ rem = do_div(to, mtd->writesize);
+ if (rem)
+ goto invalid_arg;
+ to *= (mtd->writesize + mtd->oobsize);
+
+ *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
+ to, len, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ u32 rem;
+
+ if (ofs >= mtd->size)
+ goto invalid_arg;
+
+ rem = do_div(ofs, mtd->erasesize);
+ if (rem)
+ goto invalid_arg;
+ ofs *= mtd->erasesize / mtd->writesize;
+ ofs *= (mtd->writesize + mtd->oobsize);
+
+ return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
+
+invalid_arg:
+ pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
+ ofs, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ u32 rem;
+
+ if (ofs >= mtd->size)
+ goto invalid_arg;
+
+ rem = do_div(ofs, mtd->erasesize);
+ if (rem)
+ goto invalid_arg;
+ ofs *= mtd->erasesize / mtd->writesize;
+ ofs *= (mtd->writesize + mtd->oobsize);
+
+ if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
+ return -EIO;
+ return 0;
+
+invalid_arg:
+ pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
+ ofs, mtd->size, mtd->writesize);
+ return -EINVAL;
+}
+
+static int nand_setup_cmd_params(struct platform_device *pdev,
+ struct goldfish_nand *nand)
+{
+ u64 paddr;
+ unsigned char __iomem *base = nand->base;
+
+ nand->cmd_params = devm_kzalloc(&pdev->dev,
+ sizeof(struct cmd_params), GFP_KERNEL);
+ if (!nand->cmd_params)
+ return -1;
+
+ paddr = __pa(nand->cmd_params);
+ writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
+ writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
+ return 0;
+}
+
+static int goldfish_nand_init_device(struct platform_device *pdev,
+ struct goldfish_nand *nand, int id)
+{
+ u32 name_len;
+ u32 result;
+ u32 flags;
+ unsigned long irq_flags;
+ unsigned char __iomem *base = nand->base;
+ struct mtd_info *mtd = &nand->mtd[id];
+ char *name;
+
+ spin_lock_irqsave(&nand->lock, irq_flags);
+ writel(id, base + NAND_DEV);
+ flags = readl(base + NAND_DEV_FLAGS);
+ name_len = readl(base + NAND_DEV_NAME_LEN);
+ mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
+ mtd->size = readl(base + NAND_DEV_SIZE_LOW);
+ mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
+ mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
+ mtd->oobavail = mtd->oobsize;
+ mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
+ (mtd->writesize + mtd->oobsize) * mtd->writesize;
+ do_div(mtd->size, mtd->writesize + mtd->oobsize);
+ mtd->size *= mtd->writesize;
+ dev_dbg(&pdev->dev,
+ "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
+ id, mtd->size, mtd->writesize, mtd->oobsize, mtd->erasesize);
+ spin_unlock_irqrestore(&nand->lock, irq_flags);
+
+ mtd->priv = nand;
+
+ mtd->name = name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
+ if (name == NULL)
+ return -ENOMEM;
+
+ result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
+ name);
+ if (result != name_len) {
+ dev_err(&pdev->dev,
+ "goldfish_nand_init_device failed to get dev name %d != %d\n",
+ result, name_len);
+ return -ENODEV;
+ }
+ ((char *) mtd->name)[name_len] = '\0';
+
+ /* Setup the MTD structure */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ if (flags & NAND_DEV_FLAG_READ_ONLY)
+ mtd->flags &= ~MTD_WRITEABLE;
+ if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
+ nand_setup_cmd_params(pdev, nand);
+
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = goldfish_nand_erase;
+ mtd->_read = goldfish_nand_read;
+ mtd->_write = goldfish_nand_write;
+ mtd->_read_oob = goldfish_nand_read_oob;
+ mtd->_write_oob = goldfish_nand_write_oob;
+ mtd->_block_isbad = goldfish_nand_block_isbad;
+ mtd->_block_markbad = goldfish_nand_block_markbad;
+
+ if (mtd_device_register(mtd, NULL, 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int goldfish_nand_probe(struct platform_device *pdev)
+{
+ u32 num_dev;
+ int i;
+ int err;
+ u32 num_dev_working;
+ u32 version;
+ struct resource *r;
+ struct goldfish_nand *nand;
+ unsigned char __iomem *base;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL)
+ return -ENODEV;
+
+ base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (base == NULL)
+ return -ENOMEM;
+
+ version = readl(base + NAND_VERSION);
+ if (version != NAND_VERSION_CURRENT) {
+ dev_err(&pdev->dev,
+ "goldfish_nand_init: version mismatch, got %d, expected %d\n",
+ version, NAND_VERSION_CURRENT);
+ return -ENODEV;
+ }
+ num_dev = readl(base + NAND_NUM_DEV);
+ if (num_dev == 0)
+ return -ENODEV;
+
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
+ sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
+ if (nand == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&nand->lock);
+ nand->base = base;
+ nand->mtd_count = num_dev;
+ platform_set_drvdata(pdev, nand);
+
+ num_dev_working = 0;
+ for (i = 0; i < num_dev; i++) {
+ err = goldfish_nand_init_device(pdev, nand, i);
+ if (err == 0)
+ num_dev_working++;
+ }
+ if (num_dev_working == 0)
+ return -ENODEV;
+ return 0;
+}
+
+static int goldfish_nand_remove(struct platform_device *pdev)
+{
+ struct goldfish_nand *nand = platform_get_drvdata(pdev);
+ int i;
+ for (i = 0; i < nand->mtd_count; i++) {
+ if (nand->mtd[i].name)
+ mtd_device_unregister(&nand->mtd[i]);
+ }
+ return 0;
+}
+
+static struct platform_driver goldfish_nand_driver = {
+ .probe = goldfish_nand_probe,
+ .remove = goldfish_nand_remove,
+ .driver = {
+ .name = "goldfish_nand"
+ }
+};
+
+module_platform_driver(goldfish_nand_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
new file mode 100644
index 000000000000..956c6c304b6e
--- /dev/null
+++ b/drivers/staging/goldfish/goldfish_nand_reg.h
@@ -0,0 +1,72 @@
+/* drivers/mtd/devices/goldfish_nand_reg.h
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#ifndef GOLDFISH_NAND_REG_H
+#define GOLDFISH_NAND_REG_H
+
+enum nand_cmd {
+ NAND_CMD_GET_DEV_NAME, /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
+ NAND_CMD_READ,
+ NAND_CMD_WRITE,
+ NAND_CMD_ERASE,
+ NAND_CMD_BLOCK_BAD_GET, /* NAND_RESULT is 1 if block is bad, 0 if it is not */
+ NAND_CMD_BLOCK_BAD_SET,
+ NAND_CMD_READ_WITH_PARAMS,
+ NAND_CMD_WRITE_WITH_PARAMS,
+ NAND_CMD_ERASE_WITH_PARAMS
+};
+
+enum nand_dev_flags {
+ NAND_DEV_FLAG_READ_ONLY = 0x00000001,
+ NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002,
+};
+
+#define NAND_VERSION_CURRENT (1)
+
+enum nand_reg {
+ /* Global */
+ NAND_VERSION = 0x000,
+ NAND_NUM_DEV = 0x004,
+ NAND_DEV = 0x008,
+
+ /* Dev info */
+ NAND_DEV_FLAGS = 0x010,
+ NAND_DEV_NAME_LEN = 0x014,
+ NAND_DEV_PAGE_SIZE = 0x018,
+ NAND_DEV_EXTRA_SIZE = 0x01c,
+ NAND_DEV_ERASE_SIZE = 0x020,
+ NAND_DEV_SIZE_LOW = 0x028,
+ NAND_DEV_SIZE_HIGH = 0x02c,
+
+ /* Command */
+ NAND_RESULT = 0x040,
+ NAND_COMMAND = 0x044,
+ NAND_DATA = 0x048,
+ NAND_TRANSFER_SIZE = 0x04c,
+ NAND_ADDR_LOW = 0x050,
+ NAND_ADDR_HIGH = 0x054,
+ NAND_CMD_PARAMS_ADDR_LOW = 0x058,
+ NAND_CMD_PARAMS_ADDR_HIGH = 0x05c,
+};
+
+struct cmd_params {
+ uint32_t dev;
+ uint32_t addr_low;
+ uint32_t addr_high;
+ uint32_t transfer_size;
+ uint32_t data;
+ uint32_t result;
+};
+#endif
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index ca56c75a35fc..dc267175a2b5 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -12,19 +12,6 @@ config IIO_ST_HWMON
map allows IIO devices to provide basic hwmon functionality
for those channels specified in the map.
-if IIO_BUFFER
-
-config IIO_SW_RING
- select IIO_TRIGGER
- tristate "Industrial I/O lock free software ring"
- help
- Example software ring buffer implementation. The design aim
- of this particular realization was to minimize write locking
- with the intention that some devices would be able to write
- in interrupt context.
-
-endif # IIO_BUFFER
-
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
@@ -32,7 +19,6 @@ source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
-source "drivers/staging/iio/imu/Kconfig"
source "drivers/staging/iio/light/Kconfig"
source "drivers/staging/iio/magnetometer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index fa6937d92ee3..158e0a017e7b 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -2,8 +2,6 @@
# Makefile for the industrial I/O core.
#
-obj-$(CONFIG_IIO_SW_RING) += ring_sw.o
-
obj-$(CONFIG_IIO_SIMPLE_DUMMY) += iio_dummy.o
iio_dummy-y := iio_simple_dummy.o
iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_EVENTS) += iio_simple_dummy_events.o
@@ -20,7 +18,6 @@ obj-y += cdc/
obj-y += frequency/
obj-y += gyro/
obj-y += impedance-analyzer/
-obj-y += imu/
obj-y += light/
obj-y += magnetometer/
obj-y += meter/
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 2b54430f2d99..e2e786dc9c7b 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -56,45 +56,17 @@ config ADIS16240
Say yes here to build support for Analog Devices adis16240 programmable
impact Sensor and recorder.
-config KXSD9
- tristate "Kionix KXSD9 Accelerometer Driver"
- depends on SPI
- help
- Say yes here to build support for the Kionix KXSD9 accelerometer.
- Currently this only supports the device via an SPI interface.
-
config LIS3L02DQ
tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
depends on SPI
select IIO_TRIGGER if IIO_BUFFER
- depends on !IIO_BUFFER || IIO_KFIFO_BUF || IIO_SW_RING
+ depends on !IIO_BUFFER || IIO_KFIFO_BUF
depends on GENERIC_GPIO
help
Say yes here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
and an event interface via a character device.
-choice
- prompt "Buffer type"
- depends on LIS3L02DQ && IIO_BUFFER
-
-config LIS3L02DQ_BUF_KFIFO
- depends on IIO_KFIFO_BUF
- bool "Simple FIFO"
- help
- Kfifo based FIFO. Does not provide any events so it is up
- to userspace to ensure it reads often enough that data is not
- lost.
-
-config LIS3L02DQ_BUF_RING_SW
- depends on IIO_SW_RING
- bool "IIO Software Ring"
- help
- Original IIO ring buffer implementation. Provides simple
- buffer events, half full etc.
-
-endchoice
-
config SCA3000
depends on IIO_BUFFER
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 8e7ee0368519..1ed137f1a506 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -20,8 +20,6 @@ obj-$(CONFIG_ADIS16220) += adis16220.o
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
-obj-$(CONFIG_KXSD9) += kxsd9.o
-
lis3l02dq-y := lis3l02dq_core.o
lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 2bac7221837c..0a8ea8270866 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -185,14 +185,6 @@ int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
-#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
-#define lis3l02dq_free_buf iio_sw_rb_free
-#define lis3l02dq_alloc_buf iio_sw_rb_allocate
-#endif
-#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO
-#define lis3l02dq_free_buf iio_kfifo_free
-#define lis3l02dq_alloc_buf iio_kfifo_allocate
-#endif
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 37ed1b8ebb6f..0e019306439c 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -53,7 +53,6 @@ int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
u8 reg_address, u8 *val)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
- struct spi_message msg;
int ret;
struct spi_transfer xfer = {
.tx_buf = st->tx,
@@ -66,9 +65,7 @@ int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
st->tx[1] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, &xfer, 1);
*val = st->rx[1];
mutex_unlock(&st->buf_lock);
@@ -109,7 +106,6 @@ static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
s16 value)
{
int ret;
- struct spi_message msg;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = { {
.tx_buf = st->tx,
@@ -129,10 +125,7 @@ static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
st->tx[3] = (value >> 8) & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
mutex_unlock(&st->buf_lock);
return ret;
@@ -143,8 +136,6 @@ static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
int *val)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- struct spi_message msg;
int ret;
s16 tempval;
struct spi_transfer xfers[] = { {
@@ -167,10 +158,7 @@ static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1);
st->tx[3] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register");
goto error_ret;
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index bc38651c315e..e676403ea3ea 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/iio/iio.h>
-#include "../ring_sw.h"
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
@@ -141,11 +140,8 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
char *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (data == NULL) {
- dev_err(indio_dev->dev.parent,
- "memory alloc failed in buffer bh");
+ if (data == NULL)
goto done;
- }
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
@@ -318,7 +314,7 @@ void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- lis3l02dq_free_buf(indio_dev->buffer);
+ iio_kfifo_free(indio_dev->buffer);
}
static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
@@ -401,7 +397,7 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
int ret;
struct iio_buffer *buffer;
- buffer = lis3l02dq_alloc_buf(indio_dev);
+ buffer = iio_kfifo_allocate(indio_dev);
if (!buffer)
return -ENOMEM;
@@ -427,6 +423,6 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
return 0;
error_iio_sw_rb_free:
- lis3l02dq_free_buf(indio_dev->buffer);
+ iio_kfifo_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 414d3cad55a7..14683f583ccf 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -90,7 +90,6 @@ int sca3000_read_data_short(struct sca3000_state *st,
uint8_t reg_address_high,
int len)
{
- struct spi_message msg;
struct spi_transfer xfer[2] = {
{
.len = 1,
@@ -101,11 +100,8 @@ int sca3000_read_data_short(struct sca3000_state *st,
}
};
st->tx[0] = SCA3000_READ_REG(reg_address_high);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer[0], &msg);
- spi_message_add_tail(&xfer[1], &msg);
- return spi_sync(st->us, &msg);
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
}
/**
@@ -133,7 +129,6 @@ static int sca3000_reg_lock_on(struct sca3000_state *st)
**/
static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
{
- struct spi_message msg;
struct spi_transfer xfer[3] = {
{
.len = 2,
@@ -154,12 +149,8 @@ static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
st->tx[3] = 0x50;
st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
st->tx[5] = 0xA0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer[0], &msg);
- spi_message_add_tail(&xfer[1], &msg);
- spi_message_add_tail(&xfer[2], &msg);
- return spi_sync(st->us, &msg);
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
}
/**
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index cbec2f1665e5..3e5e860aa38e 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -39,7 +39,6 @@ static int sca3000_read_data(struct sca3000_state *st,
int len)
{
int ret;
- struct spi_message msg;
struct spi_transfer xfer[2] = {
{
.len = 1,
@@ -55,10 +54,7 @@ static int sca3000_read_data(struct sca3000_state *st,
}
xfer[1].rx_buf = *rx_p;
st->tx[0] = SCA3000_READ_REG(reg_address_high);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer[0], &msg);
- spi_message_add_tail(&xfer[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
if (ret) {
dev_err(get_device(&st->us->dev), "problem reading register");
goto error_free_rx;
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index fb8c239b0c88..7b2a01d64f5e 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -119,12 +119,12 @@ config LPC32XX_ADC
via sysfs.
config MXS_LRADC
- tristate "Freescale i.MX28 LRADC"
+ tristate "Freescale i.MX23/i.MX28 LRADC"
depends on ARCH_MXS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for i.MX28 LRADC convertor
+ Say yes here to build support for i.MX23/i.MX28 LRADC convertor
built into these chips.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index fa81a491e790..1f190c1b12a6 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -199,12 +199,8 @@ static int __ad7280_read32(struct spi_device *spi, unsigned *val)
.rx_buf = &rx_buf,
.len = 4,
};
- struct spi_message m;
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(spi, &m);
+ ret = spi_sync_transfer(spi, &t, 1);
if (ret)
return ret;
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index fb31b457a56a..55a459b61907 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -32,6 +33,8 @@
#include <linux/stmp_device.h>
#include <linux/bitops.h>
#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/input.h>
#include <mach/mxs.h>
#include <mach/common.h>
@@ -59,7 +62,39 @@
#define LRADC_DELAY_TIMER_PER 200
#define LRADC_DELAY_TIMER_LOOP 5
-static const char * const mxs_lradc_irq_name[] = {
+/*
+ * Once the pen touches the touchscreen, the touchscreen switches from
+ * IRQ-driven mode to polling mode to prevent interrupt storm. The polling
+ * is realized by worker thread, which is called every 20 or so milliseconds.
+ * This gives the touchscreen enough fluence and does not strain the system
+ * too much.
+ */
+#define LRADC_TS_SAMPLE_DELAY_MS 5
+
+/*
+ * The LRADC reads the following amount of samples from each touchscreen
+ * channel and the driver then computes avarage of these.
+ */
+#define LRADC_TS_SAMPLE_AMOUNT 4
+
+enum mxs_lradc_id {
+ IMX23_LRADC,
+ IMX28_LRADC,
+};
+
+static const char * const mx23_lradc_irq_names[] = {
+ "mxs-lradc-touchscreen",
+ "mxs-lradc-channel0",
+ "mxs-lradc-channel1",
+ "mxs-lradc-channel2",
+ "mxs-lradc-channel3",
+ "mxs-lradc-channel4",
+ "mxs-lradc-channel5",
+ "mxs-lradc-channel6",
+ "mxs-lradc-channel7",
+};
+
+static const char * const mx28_lradc_irq_names[] = {
"mxs-lradc-touchscreen",
"mxs-lradc-thresh0",
"mxs-lradc-thresh1",
@@ -75,9 +110,26 @@ static const char * const mxs_lradc_irq_name[] = {
"mxs-lradc-button1",
};
-struct mxs_lradc_chan {
- uint8_t slot;
- uint8_t flags;
+struct mxs_lradc_of_config {
+ const int irq_count;
+ const char * const *irq_name;
+};
+
+static const struct mxs_lradc_of_config mxs_lradc_of_config[] = {
+ [IMX23_LRADC] = {
+ .irq_count = ARRAY_SIZE(mx23_lradc_irq_names),
+ .irq_name = mx23_lradc_irq_names,
+ },
+ [IMX28_LRADC] = {
+ .irq_count = ARRAY_SIZE(mx28_lradc_irq_names),
+ .irq_name = mx28_lradc_irq_names,
+ },
+};
+
+enum mxs_lradc_ts {
+ MXS_LRADC_TOUCHSCREEN_NONE = 0,
+ MXS_LRADC_TOUCHSCREEN_4WIRE,
+ MXS_LRADC_TOUCHSCREEN_5WIRE,
};
struct mxs_lradc {
@@ -90,24 +142,70 @@ struct mxs_lradc {
struct mutex lock;
- uint8_t enable;
-
struct completion completion;
+
+ /*
+ * Touchscreen LRADC channels receives a private slot in the CTRL4
+ * register, the slot #7. Therefore only 7 slots instead of 8 in the
+ * CTRL4 register can be mapped to LRADC channels when using the
+ * touchscreen.
+ *
+ * Furthermore, certain LRADC channels are shared between touchscreen
+ * and/or touch-buttons and generic LRADC block. Therefore when using
+ * either of these, these channels are not available for the regular
+ * sampling. The shared channels are as follows:
+ *
+ * CH0 -- Touch button #0
+ * CH1 -- Touch button #1
+ * CH2 -- Touch screen XPUL
+ * CH3 -- Touch screen YPLL
+ * CH4 -- Touch screen XNUL
+ * CH5 -- Touch screen YNLR
+ * CH6 -- Touch screen WIPER (5-wire only)
+ *
+ * The bitfields below represents which parts of the LRADC block are
+ * switched into special mode of operation. These channels can not
+ * be sampled as regular LRADC channels. The driver will refuse any
+ * attempt to sample these channels.
+ */
+#define CHAN_MASK_TOUCHBUTTON (0x3 << 0)
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
+ enum mxs_lradc_ts use_touchscreen;
+ bool stop_touchscreen;
+ bool use_touchbutton;
+
+ struct input_dev *ts_input;
+ struct work_struct ts_work;
};
#define LRADC_CTRL0 0x00
-#define LRADC_CTRL0_TOUCH_DETECT_ENABLE (1 << 23)
-#define LRADC_CTRL0_TOUCH_SCREEN_TYPE (1 << 22)
+#define LRADC_CTRL0_TOUCH_DETECT_ENABLE (1 << 23)
+#define LRADC_CTRL0_TOUCH_SCREEN_TYPE (1 << 22)
+#define LRADC_CTRL0_YNNSW /* YM */ (1 << 21)
+#define LRADC_CTRL0_YPNSW /* YP */ (1 << 20)
+#define LRADC_CTRL0_YPPSW /* YP */ (1 << 19)
+#define LRADC_CTRL0_XNNSW /* XM */ (1 << 18)
+#define LRADC_CTRL0_XNPSW /* XM */ (1 << 17)
+#define LRADC_CTRL0_XPPSW /* XP */ (1 << 16)
+#define LRADC_CTRL0_PLATE_MASK (0x3f << 16)
#define LRADC_CTRL1 0x10
-#define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n))
-#define LRADC_CTRL1_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN (1 << 24)
#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
#define LRADC_CTRL1_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ (1 << 8)
+#define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n))
+#define LRADC_CTRL1_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
#define LRADC_CTRL2 0x20
#define LRADC_CTRL2_TEMPSENSE_PWD (1 << 15)
+#define LRADC_STATUS 0x40
+#define LRADC_STATUS_TOUCH_DETECT_RAW (1 << 0)
+
#define LRADC_CH(n) (0x50 + (0x10 * (n)))
#define LRADC_CH_ACCUMULATE (1 << 29)
#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
@@ -139,6 +237,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
int ret;
+ unsigned long mask;
if (m != IIO_CHAN_INFO_RAW)
return -EINVAL;
@@ -147,6 +246,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
if (chan->channel > LRADC_MAX_TOTAL_CHANS)
return -EINVAL;
+ /* Validate the channel if it doesn't intersect with reserved chans. */
+ bitmap_set(&mask, chan->channel, 1);
+ ret = iio_validate_scan_mask_onehot(iio_dev, &mask);
+ if (ret)
+ return -EINVAL;
+
/*
* See if there is no buffered operation in progess. If there is, simply
* bail out. This can be improved to support both buffered and raw IO at
@@ -168,7 +273,11 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(chan->channel, lradc->base + LRADC_CTRL4);
+ /* Clean the slot's previous content, then set new one. */
+ writel(LRADC_CTRL4_LRADCSELECT_MASK(0),
+ lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
+ writel(chan->channel, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
+
writel(0, lradc->base + LRADC_CH(0));
/* Enable the IRQ and start sampling the channel. */
@@ -202,6 +311,269 @@ static const struct iio_info mxs_lradc_iio_info = {
};
/*
+ * Touchscreen handling
+ */
+enum lradc_ts_plate {
+ LRADC_SAMPLE_X,
+ LRADC_SAMPLE_Y,
+ LRADC_SAMPLE_PRESSURE,
+};
+
+static int mxs_lradc_ts_touched(struct mxs_lradc *lradc)
+{
+ uint32_t reg;
+
+ /* Enable touch detection. */
+ writel(LRADC_CTRL0_PLATE_MASK,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+
+ msleep(LRADC_TS_SAMPLE_DELAY_MS);
+
+ reg = readl(lradc->base + LRADC_STATUS);
+
+ return reg & LRADC_STATUS_TOUCH_DETECT_RAW;
+}
+
+static int32_t mxs_lradc_ts_sample(struct mxs_lradc *lradc,
+ enum lradc_ts_plate plate, int change)
+{
+ unsigned long delay, jiff;
+ uint32_t reg, ctrl0 = 0, chan = 0;
+ /* The touchscreen always uses CTRL4 slot #7. */
+ const uint8_t slot = 7;
+ uint32_t val;
+
+ /*
+ * There are three correct configurations of the controller sampling
+ * the touchscreen, each of these configuration provides different
+ * information from the touchscreen.
+ *
+ * The following table describes the sampling configurations:
+ * +-------------+-------+-------+-------+
+ * | Wire \ Axis | X | Y | Z |
+ * +---------------------+-------+-------+
+ * | X+ (CH2) | HI | TS | TS |
+ * +-------------+-------+-------+-------+
+ * | X- (CH4) | LO | SH | HI |
+ * +-------------+-------+-------+-------+
+ * | Y+ (CH3) | SH | HI | HI |
+ * +-------------+-------+-------+-------+
+ * | Y- (CH5) | TS | LO | SH |
+ * +-------------+-------+-------+-------+
+ *
+ * HI ... strong '1' ; LO ... strong '0'
+ * SH ... sample here ; TS ... tri-state
+ *
+ * There are a few other ways of obtaining the Z coordinate
+ * (aka. pressure), but the one in the table seems to be the
+ * most reliable one.
+ */
+ switch (plate) {
+ case LRADC_SAMPLE_X:
+ ctrl0 = LRADC_CTRL0_XPPSW | LRADC_CTRL0_XNNSW;
+ chan = 3;
+ break;
+ case LRADC_SAMPLE_Y:
+ ctrl0 = LRADC_CTRL0_YPPSW | LRADC_CTRL0_YNNSW;
+ chan = 4;
+ break;
+ case LRADC_SAMPLE_PRESSURE:
+ ctrl0 = LRADC_CTRL0_YPPSW | LRADC_CTRL0_XNNSW;
+ chan = 5;
+ break;
+ }
+
+ if (change) {
+ writel(LRADC_CTRL0_PLATE_MASK,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(ctrl0, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+
+ writel(LRADC_CTRL4_LRADCSELECT_MASK(slot),
+ lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
+ writel(chan << LRADC_CTRL4_LRADCSELECT_OFFSET(slot),
+ lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
+ }
+
+ writel(0xffffffff, lradc->base + LRADC_CH(slot) + STMP_OFFSET_REG_CLR);
+ writel(1 << slot, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+
+ delay = jiffies + msecs_to_jiffies(LRADC_TS_SAMPLE_DELAY_MS);
+ do {
+ jiff = jiffies;
+ reg = readl_relaxed(lradc->base + LRADC_CTRL1);
+ if (reg & LRADC_CTRL1_LRADC_IRQ(slot))
+ break;
+ } while (time_before(jiff, delay));
+
+ writel(LRADC_CTRL1_LRADC_IRQ(slot),
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+
+ if (time_after_eq(jiff, delay))
+ return -ETIMEDOUT;
+
+ val = readl(lradc->base + LRADC_CH(slot));
+ val &= LRADC_CH_VALUE_MASK;
+
+ return val;
+}
+
+static int32_t mxs_lradc_ts_sample_filter(struct mxs_lradc *lradc,
+ enum lradc_ts_plate plate)
+{
+ int32_t val, tot = 0;
+ int i;
+
+ val = mxs_lradc_ts_sample(lradc, plate, 1);
+
+ /* Delay a bit so the touchscreen is stable. */
+ mdelay(2);
+
+ for (i = 0; i < LRADC_TS_SAMPLE_AMOUNT; i++) {
+ val = mxs_lradc_ts_sample(lradc, plate, 0);
+ tot += val;
+ }
+
+ return tot / LRADC_TS_SAMPLE_AMOUNT;
+}
+
+static void mxs_lradc_ts_work(struct work_struct *ts_work)
+{
+ struct mxs_lradc *lradc = container_of(ts_work,
+ struct mxs_lradc, ts_work);
+ int val_x, val_y, val_p;
+ bool valid = false;
+
+ while (mxs_lradc_ts_touched(lradc)) {
+ /* Disable touch detector so we can sample the touchscreen. */
+ writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+
+ if (likely(valid)) {
+ input_report_abs(lradc->ts_input, ABS_X, val_x);
+ input_report_abs(lradc->ts_input, ABS_Y, val_y);
+ input_report_abs(lradc->ts_input, ABS_PRESSURE, val_p);
+ input_report_key(lradc->ts_input, BTN_TOUCH, 1);
+ input_sync(lradc->ts_input);
+ }
+
+ valid = false;
+
+ val_x = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_X);
+ if (val_x < 0)
+ continue;
+ val_y = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_Y);
+ if (val_y < 0)
+ continue;
+ val_p = mxs_lradc_ts_sample_filter(lradc, LRADC_SAMPLE_PRESSURE);
+ if (val_p < 0)
+ continue;
+
+ valid = true;
+ }
+
+ input_report_abs(lradc->ts_input, ABS_PRESSURE, 0);
+ input_report_key(lradc->ts_input, BTN_TOUCH, 0);
+ input_sync(lradc->ts_input);
+
+ /* Do not restart the TS IRQ if the driver is shutting down. */
+ if (lradc->stop_touchscreen)
+ return;
+
+ /* Restart the touchscreen interrupts. */
+ writel(LRADC_CTRL1_TOUCH_DETECT_IRQ,
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
+}
+
+static int mxs_lradc_ts_open(struct input_dev *dev)
+{
+ struct mxs_lradc *lradc = input_get_drvdata(dev);
+
+ /* The touchscreen is starting. */
+ lradc->stop_touchscreen = false;
+
+ /* Enable the touch-detect circuitry. */
+ writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+
+ /* Enable the touch-detect IRQ. */
+ writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
+
+ return 0;
+}
+
+static void mxs_lradc_ts_close(struct input_dev *dev)
+{
+ struct mxs_lradc *lradc = input_get_drvdata(dev);
+
+ /* Indicate the touchscreen is stopping. */
+ lradc->stop_touchscreen = true;
+ mb();
+
+ /* Wait until touchscreen thread finishes any possible remnants. */
+ cancel_work_sync(&lradc->ts_work);
+
+ /* Disable touchscreen touch-detect IRQ. */
+ writel(LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+
+ /* Power-down touchscreen touch-detect circuitry. */
+ writel(LRADC_CTRL0_TOUCH_DETECT_ENABLE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+}
+
+static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
+{
+ struct input_dev *input;
+ struct device *dev = lradc->dev;
+ int ret;
+
+ if (!lradc->use_touchscreen)
+ return 0;
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(dev, "Failed to allocate TS device!\n");
+ return -ENOMEM;
+ }
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input->open = mxs_lradc_ts_open;
+ input->close = mxs_lradc_ts_close;
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, LRADC_CH_VALUE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, LRADC_CH_VALUE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_CH_VALUE_MASK, 0, 0);
+
+ lradc->ts_input = input;
+ input_set_drvdata(input, lradc);
+ ret = input_register_device(input);
+ if (ret)
+ input_free_device(lradc->ts_input);
+
+ return ret;
+}
+
+static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
+{
+ if (!lradc->use_touchscreen)
+ return;
+
+ cancel_work_sync(&lradc->ts_work);
+
+ input_unregister_device(lradc->ts_input);
+}
+
+/*
* IRQ Handling
*/
static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
@@ -209,14 +581,24 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
struct iio_dev *iio = data;
struct mxs_lradc *lradc = iio_priv(iio);
unsigned long reg = readl(lradc->base + LRADC_CTRL1);
+ const uint32_t ts_irq_mask =
+ LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
+ LRADC_CTRL1_TOUCH_DETECT_IRQ;
if (!(reg & LRADC_CTRL1_LRADC_IRQ_MASK))
return IRQ_NONE;
/*
- * Touchscreen IRQ handling code shall probably have priority
- * and therefore shall be placed here.
+ * Touchscreen IRQ handling code has priority and therefore
+ * is placed here. In case touchscreen IRQ arrives, disable
+ * it ASAP
*/
+ if (reg & LRADC_CTRL1_TOUCH_DETECT_IRQ) {
+ writel(ts_irq_mask,
+ lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
+ if (!lradc->stop_touchscreen)
+ schedule_work(&lradc->ts_work);
+ }
if (iio_buffer_enabled(iio))
iio_trigger_poll(iio->trig, iio_get_time_ns());
@@ -239,7 +621,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
struct mxs_lradc *lradc = iio_priv(iio);
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- int i, j = 0;
+ unsigned int i, j = 0;
for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {
lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
@@ -312,8 +694,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
{
struct mxs_lradc *lradc = iio_priv(iio);
struct iio_buffer *buffer = iio->buffer;
- int ret = 0, chan, ofs = 0, enable = 0;
- uint32_t ctrl4 = 0;
+ int ret = 0, chan, ofs = 0;
+ unsigned long enable = 0;
+ uint32_t ctrl4_set = 0;
+ uint32_t ctrl4_clr = 0;
uint32_t ctrl1_irq = 0;
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
@@ -345,17 +729,20 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) {
- ctrl4 |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
+ ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
+ ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
writel(chan_value, lradc->base + LRADC_CH(ofs));
- enable |= 1 << ofs;
+ bitmap_set(&enable, ofs, 1);
ofs++;
}
writel(LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK,
lradc->base + LRADC_DELAY(0) + STMP_OFFSET_REG_CLR);
- writel(ctrl4, lradc->base + LRADC_CTRL4);
+ writel(ctrl4_clr, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_CLR);
+ writel(ctrl4_set, lradc->base + LRADC_CTRL4 + STMP_OFFSET_REG_SET);
+
writel(ctrl1_irq, lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_SET);
writel(enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET,
@@ -390,9 +777,33 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
const unsigned long *mask)
{
- const int mw = bitmap_weight(mask, iio->masklength);
-
- return mw <= LRADC_MAX_MAPPED_CHANS;
+ struct mxs_lradc *lradc = iio_priv(iio);
+ const int len = iio->masklength;
+ const int map_chans = bitmap_weight(mask, len);
+ int rsvd_chans = 0;
+ unsigned long rsvd_mask = 0;
+
+ if (lradc->use_touchbutton)
+ rsvd_mask |= CHAN_MASK_TOUCHBUTTON;
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_4WIRE)
+ rsvd_mask |= CHAN_MASK_TOUCHSCREEN_4WIRE;
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ rsvd_mask |= CHAN_MASK_TOUCHSCREEN_5WIRE;
+
+ if (lradc->use_touchbutton)
+ rsvd_chans++;
+ if (lradc->use_touchscreen)
+ rsvd_chans++;
+
+ /* Test for attempts to map channels with special mode of operation. */
+ if (bitmap_intersects(mask, &rsvd_mask, len))
+ return false;
+
+ /* Test for attempts to map more channels then available slots. */
+ if (map_chans + rsvd_chans > LRADC_MAX_MAPPED_CHANS)
+ return false;
+
+ return true;
}
static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
@@ -441,15 +852,29 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
{
- int i;
- const uint32_t cfg =
+ /* The ADC always uses DELAY CHANNEL 0. */
+ const uint32_t adc_cfg =
+ (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
(LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
stmp_reset_block(lradc->base);
- for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
- writel(cfg | (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + i)),
- lradc->base + LRADC_DELAY(i));
+ /* Configure DELAY CHANNEL 0 for generic ADC sampling. */
+ writel(adc_cfg, lradc->base + LRADC_DELAY(0));
+
+ /* Disable remaining DELAY CHANNELs */
+ writel(0, lradc->base + LRADC_DELAY(1));
+ writel(0, lradc->base + LRADC_DELAY(2));
+ writel(0, lradc->base + LRADC_DELAY(3));
+
+ /* Configure the touchscreen type */
+ writel(LRADC_CTRL0_TOUCH_SCREEN_TYPE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
+
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE) {
+ writel(LRADC_CTRL0_TOUCH_SCREEN_TYPE,
+ lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_SET);
+ }
/* Start internal temperature sensing. */
writel(0, lradc->base + LRADC_CTRL2);
@@ -466,12 +891,25 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
writel(0, lradc->base + LRADC_DELAY(i));
}
+static const struct of_device_id mxs_lradc_dt_ids[] = {
+ { .compatible = "fsl,imx23-lradc", .data = (void *)IMX23_LRADC, },
+ { .compatible = "fsl,imx28-lradc", .data = (void *)IMX28_LRADC, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids);
+
static int mxs_lradc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_lradc_dt_ids, &pdev->dev);
+ const struct mxs_lradc_of_config *of_cfg =
+ &mxs_lradc_of_config[(enum mxs_lradc_id)of_id->data];
struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct mxs_lradc *lradc;
struct iio_dev *iio;
struct resource *iores;
+ uint32_t ts_wires = 0;
int ret = 0;
int i;
@@ -487,14 +925,29 @@ static int mxs_lradc_probe(struct platform_device *pdev)
/* Grab the memory area */
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lradc->dev = &pdev->dev;
- lradc->base = devm_request_and_ioremap(dev, iores);
- if (!lradc->base) {
- ret = -EADDRNOTAVAIL;
+ lradc->base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(lradc->base)) {
+ ret = PTR_ERR(lradc->base);
goto err_addr;
}
+ INIT_WORK(&lradc->ts_work, mxs_lradc_ts_work);
+
+ /* Check if touchscreen is enabled in DT. */
+ ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires",
+ &ts_wires);
+ if (ret)
+ dev_info(dev, "Touchscreen not enabled.\n");
+ else if (ts_wires == 4)
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE;
+ else if (ts_wires == 5)
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE;
+ else
+ dev_warn(dev, "Unsupported number of touchscreen wires (%d)\n",
+ ts_wires);
+
/* Grab all IRQ sources */
- for (i = 0; i < 13; i++) {
+ for (i = 0; i < of_cfg->irq_count; i++) {
lradc->irq[i] = platform_get_irq(pdev, i);
if (lradc->irq[i] < 0) {
ret = -EINVAL;
@@ -503,7 +956,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, lradc->irq[i],
mxs_lradc_handle_irq, 0,
- mxs_lradc_irq_name[i], iio);
+ of_cfg->irq_name[i], iio);
if (ret)
goto err_addr;
}
@@ -530,11 +983,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (ret)
goto err_trig;
+ /* Register the touchscreen input device. */
+ ret = mxs_lradc_ts_register(lradc);
+ if (ret)
+ goto err_dev;
+
/* Register IIO device. */
ret = iio_device_register(iio);
if (ret) {
dev_err(dev, "Failed to register IIO device\n");
- goto err_dev;
+ goto err_ts;
}
/* Configure the hardware. */
@@ -542,6 +1000,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
return 0;
+err_ts:
+ mxs_lradc_ts_unregister(lradc);
err_dev:
mxs_lradc_trigger_remove(iio);
err_trig:
@@ -556,6 +1016,8 @@ static int mxs_lradc_remove(struct platform_device *pdev)
struct iio_dev *iio = platform_get_drvdata(pdev);
struct mxs_lradc *lradc = iio_priv(iio);
+ mxs_lradc_ts_unregister(lradc);
+
mxs_lradc_hw_stop(lradc);
iio_device_unregister(iio);
@@ -566,12 +1028,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mxs_lradc_dt_ids[] = {
- { .compatible = "fsl,imx28-lradc", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids);
-
static struct platform_driver mxs_lradc_driver = {
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/staging/iio/frequency/ad5930.c b/drivers/staging/iio/frequency/ad5930.c
index 23777be38b18..69e90e9e60ea 100644
--- a/drivers/staging/iio/frequency/ad5930.c
+++ b/drivers/staging/iio/frequency/ad5930.c
@@ -44,7 +44,6 @@ static ssize_t ad5930_set_parameter(struct device *dev,
const char *buf,
size_t len)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad5903_config *config = (struct ad5903_config *)buf;
@@ -64,9 +63,7 @@ static ssize_t ad5930_set_parameter(struct device *dev,
xfer.tx_buf = config;
mutex_lock(&st->lock);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
error_ret:
diff --git a/drivers/staging/iio/frequency/ad9850.c b/drivers/staging/iio/frequency/ad9850.c
index 104f7a4905a3..01a8a93031f5 100644
--- a/drivers/staging/iio/frequency/ad9850.c
+++ b/drivers/staging/iio/frequency/ad9850.c
@@ -39,7 +39,6 @@ static ssize_t ad9850_set_parameter(struct device *dev,
const char *buf,
size_t len)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
struct ad9850_config *config = (struct ad9850_config *)buf;
@@ -50,9 +49,7 @@ static ssize_t ad9850_set_parameter(struct device *dev,
xfer.tx_buf = config;
mutex_lock(&st->lock);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
error_ret:
diff --git a/drivers/staging/iio/frequency/ad9852.c b/drivers/staging/iio/frequency/ad9852.c
index 17ac825b3d26..1344031232bc 100644
--- a/drivers/staging/iio/frequency/ad9852.c
+++ b/drivers/staging/iio/frequency/ad9852.c
@@ -183,7 +183,6 @@ static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9852_set_parameter, 0);
static void ad9852_init(struct ad9852_state *st)
{
- struct spi_message msg;
struct spi_transfer xfer;
int ret;
u8 config[5];
@@ -199,9 +198,7 @@ static void ad9852_init(struct ad9852_state *st)
xfer.len = 5;
xfer.tx_buf = &config;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret)
goto error_ret;
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index ea295b25308c..836066287192 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -10,13 +10,6 @@ config ADIS16060
Say yes here to build support for Analog Devices adis16060 wide bandwidth
yaw rate gyroscope with SPI.
-config ADIS16080
- tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices adis16080/100 Yaw Rate
- Gyroscope with SPI.
-
config ADIS16130
tristate "Analog Devices ADIS16130 High Precision Angular Rate Sensor driver"
depends on SPI
@@ -27,8 +20,8 @@ config ADIS16130
config ADIS16260
tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- select IIO_SW_RING if IIO_BUFFER
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADIS16260 ADIS16265
ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
@@ -36,14 +29,4 @@ config ADIS16260
This driver can also be built as a module. If so, the module
will be called adis16260.
-config ADXRS450
- tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
- programmable digital output gyroscope.
-
- This driver can also be built as a module. If so, the module
- will be called adxrs450.
-
endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index 1303569e5c8a..98e650061a3a 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -5,17 +5,8 @@
adis16060-y := adis16060_core.o
obj-$(CONFIG_ADIS16060) += adis16060.o
-adis16080-y := adis16080_core.o
-obj-$(CONFIG_ADIS16080) += adis16080.o
-
adis16130-y := adis16130_core.o
obj-$(CONFIG_ADIS16130) += adis16130.o
adis16260-y := adis16260_core.o
obj-$(CONFIG_ADIS16260) += adis16260.o
-
-adis16251-y := adis16251_core.o
-obj-$(CONFIG_ADIS16251) += adis16251.o
-
-adxrs450-y := adxrs450_core.o
-obj-$(CONFIG_ADXRS450) += adxrs450.o
diff --git a/drivers/staging/iio/gyro/adxrs450.h b/drivers/staging/iio/gyro/adxrs450.h
deleted file mode 100644
index f8cf21f02943..000000000000
--- a/drivers/staging/iio/gyro/adxrs450.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef SPI_ADXRS450_H_
-#define SPI_ADXRS450_H_
-
-#define ADXRS450_STARTUP_DELAY 50 /* ms */
-
-/* The MSB for the spi commands */
-#define ADXRS450_SENSOR_DATA 0x20
-#define ADXRS450_WRITE_DATA 0x40
-#define ADXRS450_READ_DATA 0x80
-
-#define ADXRS450_RATE1 0x00 /* Rate Registers */
-#define ADXRS450_TEMP1 0x02 /* Temperature Registers */
-#define ADXRS450_LOCST1 0x04 /* Low CST Memory Registers */
-#define ADXRS450_HICST1 0x06 /* High CST Memory Registers */
-#define ADXRS450_QUAD1 0x08 /* Quad Memory Registers */
-#define ADXRS450_FAULT1 0x0A /* Fault Registers */
-#define ADXRS450_PID1 0x0C /* Part ID Register 1 */
-#define ADXRS450_SNH 0x0E /* Serial Number Registers, 4 bytes */
-#define ADXRS450_SNL 0x10
-#define ADXRS450_DNC1 0x12 /* Dynamic Null Correction Registers */
-/* Check bits */
-#define ADXRS450_P 0x01
-#define ADXRS450_CHK 0x02
-#define ADXRS450_CST 0x04
-#define ADXRS450_PWR 0x08
-#define ADXRS450_POR 0x10
-#define ADXRS450_NVM 0x20
-#define ADXRS450_Q 0x40
-#define ADXRS450_PLL 0x80
-#define ADXRS450_UV 0x100
-#define ADXRS450_OV 0x200
-#define ADXRS450_AMP 0x400
-#define ADXRS450_FAIL 0x800
-
-#define ADXRS450_WRERR_MASK (0x7 << 29)
-
-#define ADXRS450_MAX_RX 4
-#define ADXRS450_MAX_TX 4
-
-#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
-
-enum {
- ID_ADXRS450,
- ID_ADXRS453,
-};
-
-/**
- * struct adxrs450_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct adxrs450_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADXRS450_MAX_RX] ____cacheline_aligned;
- u8 rx[ADXRS450_MAX_TX];
-
-};
-
-#endif /* SPI_ADXRS450_H_ */
diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
index c7a5f97576c7..93af756ba48c 100644
--- a/drivers/staging/iio/iio_hwmon.c
+++ b/drivers/staging/iio/iio_hwmon.c
@@ -55,63 +55,58 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
return sprintf(buf, "%d\n", result);
}
-static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- int i;
- struct sensor_device_attribute *a;
- for (i = 0; i < st->num_channels; i++)
- if (st->attrs[i]) {
- a = to_sensor_dev_attr(
- container_of(st->attrs[i],
- struct device_attribute,
- attr));
- kfree(a);
- }
+ return sprintf(buf, "iio_hwmon\n");
}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
static int iio_hwmon_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct iio_hwmon_state *st;
struct sensor_device_attribute *a;
int ret, i;
int in_i = 1, temp_i = 1, curr_i = 1;
enum iio_chan_type type;
+ struct iio_channel *channels;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ channels = iio_channel_get_all(dev);
+ if (IS_ERR(channels))
+ return PTR_ERR(channels);
- st->channels = iio_channel_get_all(dev_name(&pdev->dev));
- if (IS_ERR(st->channels)) {
- ret = PTR_ERR(st->channels);
- goto error_free_state;
- }
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ st->channels = channels;
/* count how many attributes we have */
while (st->channels[st->num_channels].indio_dev)
st->num_channels++;
- st->attrs = kzalloc(sizeof(st->attrs) * (st->num_channels + 1),
- GFP_KERNEL);
+ st->attrs = devm_kzalloc(dev,
+ sizeof(*st->attrs) * (st->num_channels + 2),
+ GFP_KERNEL);
if (st->attrs == NULL) {
ret = -ENOMEM;
goto error_release_channels;
}
+
for (i = 0; i < st->num_channels; i++) {
- a = kzalloc(sizeof(*a), GFP_KERNEL);
+ a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
if (a == NULL) {
ret = -ENOMEM;
- goto error_free_attrs;
+ goto error_release_channels;
}
sysfs_attr_init(&a->dev_attr.attr);
ret = iio_get_channel_type(&st->channels[i], &type);
- if (ret < 0) {
- kfree(a);
- goto error_free_attrs;
- }
+ if (ret < 0)
+ goto error_release_channels;
+
switch (type) {
case IIO_VOLTAGE:
a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
@@ -130,27 +125,25 @@ static int iio_hwmon_probe(struct platform_device *pdev)
break;
default:
ret = -EINVAL;
- kfree(a);
- goto error_free_attrs;
+ goto error_release_channels;
}
if (a->dev_attr.attr.name == NULL) {
- kfree(a);
ret = -ENOMEM;
- goto error_free_attrs;
+ goto error_release_channels;
}
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = S_IRUGO;
a->index = i;
st->attrs[i] = &a->dev_attr.attr;
}
-
+ st->attrs[st->num_channels] = &dev_attr_name.attr;
st->attr_group.attrs = st->attrs;
platform_set_drvdata(pdev, st);
- ret = sysfs_create_group(&pdev->dev.kobj, &st->attr_group);
+ ret = sysfs_create_group(&dev->kobj, &st->attr_group);
if (ret < 0)
- goto error_free_attrs;
+ goto error_release_channels;
- st->hwmon_dev = hwmon_device_register(&pdev->dev);
+ st->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(st->hwmon_dev)) {
ret = PTR_ERR(st->hwmon_dev);
goto error_remove_group;
@@ -158,15 +151,9 @@ static int iio_hwmon_probe(struct platform_device *pdev)
return 0;
error_remove_group:
- sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
-error_free_attrs:
- iio_hwmon_free_attrs(st);
- kfree(st->attrs);
+ sysfs_remove_group(&dev->kobj, &st->attr_group);
error_release_channels:
iio_channel_release_all(st->channels);
-error_free_state:
- kfree(st);
-error_ret:
return ret;
}
@@ -176,17 +163,21 @@ static int iio_hwmon_remove(struct platform_device *pdev)
hwmon_device_unregister(st->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
- iio_hwmon_free_attrs(st);
- kfree(st->attrs);
iio_channel_release_all(st->channels);
return 0;
}
+static struct of_device_id iio_hwmon_of_match[] = {
+ { .compatible = "iio-hwmon", },
+ { }
+};
+
static struct platform_driver __refdata iio_hwmon_driver = {
.driver = {
.name = "iio_hwmon",
.owner = THIS_MODULE,
+ .of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
.remove = iio_hwmon_remove,
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index a865adf81938..aee76c710a3b 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -54,7 +54,7 @@ struct iio_dummy_accel_calibscale {
static const struct iio_dummy_accel_calibscale dummy_scales[] = {
{ 0, 100, 0x8 }, /* 0.000100 */
{ 0, 133, 0x7 }, /* 0.000133 */
- { 733, 13, 0x9 }, /* 733.00013 */
+ { 733, 13, 0x9 }, /* 733.000013 */
};
/*
@@ -284,7 +284,7 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
/**
* iio_dummy_write_raw() - data write function.
* @indio_dev: the struct iio_dev associated with this device instance
- * @chan: the channel whose data is to be read
+ * @chan: the channel whose data is to be written
* @val: first element of value to set (typically INT)
* @val2: second element of value to set (typically MICRO)
* @mask: what we actually want to write. 0 is the channel, everything else
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index dee16f0e7570..72f400c3cbcb 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -155,7 +155,7 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev,
* occurs, this function is run. Typically this grabs data
* from the device.
*
- * NULL for the top half. This is normally implemented only if we
+ * NULL for the bottom half. This is normally implemented only if we
* either want to ping a capture now pin (no sleeping) or grab
* a timestamp as close as possible to a data ready trigger firing.
*
diff --git a/drivers/staging/iio/impedance-analyzer/Kconfig b/drivers/staging/iio/impedance-analyzer/Kconfig
index ad0ff765e4b2..dd97b6bb3fd0 100644
--- a/drivers/staging/iio/impedance-analyzer/Kconfig
+++ b/drivers/staging/iio/impedance-analyzer/Kconfig
@@ -7,7 +7,7 @@ config AD5933
tristate "Analog Devices AD5933, AD5934 driver"
depends on I2C
select IIO_BUFFER
- select IIO_SW_RING
+ select IIO_KFIFO_BUF
help
Say yes here to build support for Analog Devices Impedance Converter,
Network Analyzer, AD5933/4, provides direct access via sysfs.
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 779243d24dec..440e2261e8cb 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -22,7 +22,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include "../ring_sw.h"
+#include <linux/iio/kfifo_buf.h>
#include "ad5933.h"
@@ -630,7 +630,7 @@ static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
- indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ indio_dev->buffer = iio_kfifo_allocate(indio_dev);
if (!indio_dev->buffer)
return -ENOMEM;
@@ -774,7 +774,7 @@ static int ad5933_probe(struct i2c_client *client,
error_uninitialize_ring:
iio_buffer_unregister(indio_dev);
error_unreg_ring:
- iio_sw_rb_free(indio_dev->buffer);
+ iio_kfifo_free(indio_dev->buffer);
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -794,7 +794,7 @@ static int ad5933_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_buffer_unregister(indio_dev);
- iio_sw_rb_free(indio_dev->buffer);
+ iio_kfifo_free(indio_dev->buffer);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
diff --git a/drivers/staging/iio/imu/Kconfig b/drivers/staging/iio/imu/Kconfig
deleted file mode 100644
index 2c2f47de2630..000000000000
--- a/drivers/staging/iio/imu/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# IIO imu drivers configuration
-#
-menu "Inertial measurement units"
-
-config ADIS16400
- tristate "Analog Devices ADIS16400 and similar IMU SPI driver"
- depends on SPI
- select IIO_SW_RING if IIO_BUFFER
- select IIO_TRIGGER if IIO_BUFFER
- help
- Say yes here to build support for Analog Devices adis16300, adis16344,
- adis16350, adis16354, adis16355, adis16360, adis16362, adis16364,
- adis16365, adis16400 and adis16405 triaxial inertial sensors
- (adis16400 series also have magnetometers).
-
-endmenu
diff --git a/drivers/staging/iio/imu/Makefile b/drivers/staging/iio/imu/Makefile
deleted file mode 100644
index 3400a13d1522..000000000000
--- a/drivers/staging/iio/imu/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Inertial Measurement Units
-#
-
-adis16400-y := adis16400_core.o
-adis16400-$(CONFIG_IIO_BUFFER) += adis16400_ring.o adis16400_trigger.o
-obj-$(CONFIG_ADIS16400) += adis16400.o
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
deleted file mode 100644
index 9c8f5ab7e13b..000000000000
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ /dev/null
@@ -1,1320 +0,0 @@
-/*
- * adis16400.c support Analog Devices ADIS16400/5
- * 3d 2g Linear Accelerometers,
- * 3d Gyroscopes,
- * 3d Magnetometers via SPI
- *
- * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- * Copyright (c) 2011 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "adis16400.h"
-
-enum adis16400_chip_variant {
- ADIS16300,
- ADIS16334,
- ADIS16350,
- ADIS16360,
- ADIS16362,
- ADIS16364,
- ADIS16400,
-};
-
-/**
- * adis16400_spi_write_reg_8() - write single byte to a register
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
- * @reg_address: the address of the register to be written
- * @val: the value to write
- */
-static int adis16400_spi_write_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct adis16400_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16400_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * adis16400_spi_write_reg_16() - write 2 bytes to a pair of registers
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
- * @reg_address: the address of the lower of the two registers. Second register
- * is assumed to have address one greater.
- * @val: value to be written
- *
- * At the moment the spi framework doesn't allow global setting of cs_change.
- * This means that use cannot be made of spi_write.
- */
-static int adis16400_spi_write_reg_16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- u16 value)
-{
- int ret;
- struct spi_message msg;
- struct adis16400_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .tx_buf = st->tx + 2,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16400_WRITE_REG(lower_reg_address);
- st->tx[1] = value & 0xFF;
- st->tx[2] = ADIS16400_WRITE_REG(lower_reg_address + 1);
- st->tx[3] = (value >> 8) & 0xFF;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * adis16400_spi_read_reg_16() - read 2 bytes from a 16-bit register
- * @indio_dev: iio device
- * @reg_address: the address of the lower of the two registers. Second register
- * is assumed to have address one greater.
- * @val: somewhere to pass back the value read
- *
- * At the moment the spi framework doesn't allow global setting of cs_change.
- * This means that use cannot be made of spi_read.
- **/
-static int adis16400_spi_read_reg_16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- u16 *val)
-{
- struct spi_message msg;
- struct adis16400_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16400_READ_REG(lower_reg_address);
- st->tx[1] = 0;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
- if (ret) {
- dev_err(&st->us->dev,
- "problem when reading 16 bit register 0x%02X",
- lower_reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 8) | st->rx[1];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int adis16334_get_freq(struct iio_dev *indio_dev)
-{
- int ret;
- u16 t;
-
- ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
- return ret;
-
- t >>= ADIS16334_RATE_DIV_SHIFT;
-
- return (8192 >> t) / 10;
-}
-
-static int adis16334_set_freq(struct iio_dev *indio_dev, unsigned int freq)
-{
- unsigned int t;
-
- t = ilog2(8192 / (freq * 10));
-
- if (t > 0x31)
- t = 0x31;
-
- t <<= ADIS16334_RATE_DIV_SHIFT;
- t |= ADIS16334_RATE_INT_CLK;
-
- return adis16400_spi_write_reg_16(indio_dev, ADIS16400_SMPL_PRD, t);
-}
-
-static int adis16400_get_freq(struct iio_dev *indio_dev)
-{
- int sps, ret;
- u16 t;
-
- ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
- return ret;
- sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
- sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
-
- return sps;
-}
-
-static int adis16400_set_freq(struct iio_dev *indio_dev, unsigned int freq)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
- unsigned int t;
-
- t = 1638 / freq;
- if (t > 0)
- t--;
- t &= ADIS16400_SMPL_PRD_DIV_MASK;
- if ((t & ADIS16400_SMPL_PRD_DIV_MASK) >= 0x0A)
- st->us->max_speed_hz = ADIS16400_SPI_SLOW;
- else
- st->us->max_speed_hz = ADIS16400_SPI_FAST;
-
- return adis16400_spi_write_reg_8(indio_dev,
- ADIS16400_SMPL_PRD, t);
-}
-
-static ssize_t adis16400_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16400_state *st = iio_priv(indio_dev);
- int ret, len = 0;
-
- ret = st->variant->get_freq(indio_dev);
- if (ret < 0)
- return ret;
- len = sprintf(buf, "%d SPS\n", ret);
- return len;
-}
-
-static const unsigned adis16400_3db_divisors[] = {
- [0] = 2, /* Special case */
- [1] = 5,
- [2] = 10,
- [3] = 50,
- [4] = 200,
-};
-
-static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
-{
- int i, ret;
- u16 val16;
- for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 0; i--)
- if (sps/adis16400_3db_divisors[i] > val)
- break;
- if (i == -1)
- ret = -EINVAL;
- else {
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SENS_AVG,
- &val16);
- if (ret < 0)
- goto error_ret;
-
- ret = adis16400_spi_write_reg_16(indio_dev,
- ADIS16400_SENS_AVG,
- (val16 & ~0x03) | i);
- }
-error_ret:
- return ret;
-}
-
-static ssize_t adis16400_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16400_state *st = iio_priv(indio_dev);
- long val;
- int ret;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- return ret;
- if (val == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
-
- st->variant->set_freq(indio_dev, val);
-
- /* Also update the filter */
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static int adis16400_reset(struct iio_dev *indio_dev)
-{
- int ret;
- ret = adis16400_spi_write_reg_8(indio_dev,
- ADIS16400_GLOB_CMD,
- ADIS16400_GLOB_CMD_SW_RESET);
- if (ret)
- dev_err(&indio_dev->dev, "problem resetting device");
-
- return ret;
-}
-
-int adis16400_set_irq(struct iio_dev *indio_dev, bool enable)
-{
- int ret;
- u16 msc;
-
- ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_MSC_CTRL, &msc);
- if (ret)
- goto error_ret;
-
- msc |= ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH;
- if (enable)
- msc |= ADIS16400_MSC_CTRL_DATA_RDY_EN;
- else
- msc &= ~ADIS16400_MSC_CTRL_DATA_RDY_EN;
-
- ret = adis16400_spi_write_reg_16(indio_dev, ADIS16400_MSC_CTRL, msc);
- if (ret)
- goto error_ret;
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int adis16400_stop_device(struct iio_dev *indio_dev)
-{
- int ret;
- u16 val = ADIS16400_SLP_CNT_POWER_OFF;
-
- ret = adis16400_spi_write_reg_16(indio_dev, ADIS16400_SLP_CNT, val);
- if (ret)
- dev_err(&indio_dev->dev,
- "problem with turning device off: SLP_CNT");
-
- return ret;
-}
-
-static int adis16400_check_status(struct iio_dev *indio_dev)
-{
- u16 status;
- int ret;
- struct device *dev = &indio_dev->dev;
-
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_DIAG_STAT, &status);
-
- if (ret < 0) {
- dev_err(dev, "Reading status failed\n");
- goto error_ret;
- }
- ret = status;
- if (status & ADIS16400_DIAG_STAT_ZACCL_FAIL)
- dev_err(dev, "Z-axis accelerometer self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_YACCL_FAIL)
- dev_err(dev, "Y-axis accelerometer self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_XACCL_FAIL)
- dev_err(dev, "X-axis accelerometer self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_XGYRO_FAIL)
- dev_err(dev, "X-axis gyroscope self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_YGYRO_FAIL)
- dev_err(dev, "Y-axis gyroscope self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_ZGYRO_FAIL)
- dev_err(dev, "Z-axis gyroscope self-test failure\n");
- if (status & ADIS16400_DIAG_STAT_ALARM2)
- dev_err(dev, "Alarm 2 active\n");
- if (status & ADIS16400_DIAG_STAT_ALARM1)
- dev_err(dev, "Alarm 1 active\n");
- if (status & ADIS16400_DIAG_STAT_FLASH_CHK)
- dev_err(dev, "Flash checksum error\n");
- if (status & ADIS16400_DIAG_STAT_SELF_TEST)
- dev_err(dev, "Self test error\n");
- if (status & ADIS16400_DIAG_STAT_OVERFLOW)
- dev_err(dev, "Sensor overrange\n");
- if (status & ADIS16400_DIAG_STAT_SPI_FAIL)
- dev_err(dev, "SPI failure\n");
- if (status & ADIS16400_DIAG_STAT_FLASH_UPT)
- dev_err(dev, "Flash update failed\n");
- if (status & ADIS16400_DIAG_STAT_POWER_HIGH)
- dev_err(dev, "Power supply above 5.25V\n");
- if (status & ADIS16400_DIAG_STAT_POWER_LOW)
- dev_err(dev, "Power supply below 4.75V\n");
-
-error_ret:
- return ret;
-}
-
-static int adis16400_self_test(struct iio_dev *indio_dev)
-{
- int ret;
- ret = adis16400_spi_write_reg_16(indio_dev,
- ADIS16400_MSC_CTRL,
- ADIS16400_MSC_CTRL_MEM_TEST);
- if (ret) {
- dev_err(&indio_dev->dev, "problem starting self test");
- goto err_ret;
- }
-
- msleep(ADIS16400_MTEST_DELAY);
- adis16400_check_status(indio_dev);
-
-err_ret:
- return ret;
-}
-
-static int adis16400_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- u16 prod_id, smp_prd;
- unsigned int device_id;
- struct adis16400_state *st = iio_priv(indio_dev);
-
- /* use low spi speed for init if the device has a slow mode */
- if (st->variant->flags & ADIS16400_HAS_SLOW_MODE)
- st->us->max_speed_hz = ADIS16400_SPI_SLOW;
- else
- st->us->max_speed_hz = ADIS16400_SPI_FAST;
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- ret = adis16400_set_irq(indio_dev, false);
- if (ret) {
- dev_err(&indio_dev->dev, "disable irq failed");
- goto err_ret;
- }
-
- ret = adis16400_self_test(indio_dev);
- if (ret) {
- dev_err(&indio_dev->dev, "self test failure");
- goto err_ret;
- }
-
- ret = adis16400_check_status(indio_dev);
- if (ret) {
- adis16400_reset(indio_dev);
- dev_err(&indio_dev->dev, "device not playing ball -> reset");
- msleep(ADIS16400_STARTUP_DELAY);
- ret = adis16400_check_status(indio_dev);
- if (ret) {
- dev_err(&indio_dev->dev, "giving up");
- goto err_ret;
- }
- }
- if (st->variant->flags & ADIS16400_HAS_PROD_ID) {
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_PRODUCT_ID, &prod_id);
- if (ret)
- goto err_ret;
-
- sscanf(indio_dev->name, "adis%u\n", &device_id);
-
- if (prod_id != device_id)
- dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
- device_id, prod_id);
-
- dev_info(&indio_dev->dev, "%s: prod_id 0x%04x at CS%d (irq %d)\n",
- indio_dev->name, prod_id,
- st->us->chip_select, st->us->irq);
- }
- /* use high spi speed if possible */
- if (st->variant->flags & ADIS16400_HAS_SLOW_MODE) {
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SMPL_PRD, &smp_prd);
- if (ret)
- goto err_ret;
-
- if ((smp_prd & ADIS16400_SMPL_PRD_DIV_MASK) < 0x0A) {
- st->us->max_speed_hz = ADIS16400_SPI_FAST;
- spi_setup(st->us);
- }
- }
-
-err_ret:
- return ret;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- adis16400_read_frequency,
- adis16400_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("409 546 819 1638");
-
-enum adis16400_chan {
- in_supply,
- gyro_x,
- gyro_y,
- gyro_z,
- accel_x,
- accel_y,
- accel_z,
- magn_x,
- magn_y,
- magn_z,
- temp,
- temp0, temp1, temp2,
- in1,
- in2,
- incli_x,
- incli_y,
-};
-
-static u8 adis16400_addresses[18][2] = {
- [in_supply] = { ADIS16400_SUPPLY_OUT },
- [gyro_x] = { ADIS16400_XGYRO_OUT, ADIS16400_XGYRO_OFF },
- [gyro_y] = { ADIS16400_YGYRO_OUT, ADIS16400_YGYRO_OFF },
- [gyro_z] = { ADIS16400_ZGYRO_OUT, ADIS16400_ZGYRO_OFF },
- [accel_x] = { ADIS16400_XACCL_OUT, ADIS16400_XACCL_OFF },
- [accel_y] = { ADIS16400_YACCL_OUT, ADIS16400_YACCL_OFF },
- [accel_z] = { ADIS16400_ZACCL_OUT, ADIS16400_ZACCL_OFF },
- [magn_x] = { ADIS16400_XMAGN_OUT },
- [magn_y] = { ADIS16400_YMAGN_OUT },
- [magn_z] = { ADIS16400_ZMAGN_OUT },
- [temp] = { ADIS16400_TEMP_OUT },
- [temp0] = { ADIS16350_XTEMP_OUT },
- [temp1] = { ADIS16350_YTEMP_OUT },
- [temp2] = { ADIS16350_ZTEMP_OUT },
- [in1] = { ADIS16300_AUX_ADC },
- [in2] = { ADIS16400_AUX_ADC },
- [incli_x] = { ADIS16300_PITCH_OUT },
- [incli_y] = { ADIS16300_ROLL_OUT }
-};
-
-
-static int adis16400_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
- int ret, sps;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
- ret = adis16400_spi_write_reg_16(indio_dev,
- adis16400_addresses[chan->address][1],
- val);
- mutex_unlock(&indio_dev->mlock);
- return ret;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- /* Need to cache values so we can update if the frequency
- changes */
- mutex_lock(&indio_dev->mlock);
- st->filt_int = val;
- /* Work out update to current value */
- sps = st->variant->get_freq(indio_dev);
- if (sps < 0) {
- mutex_unlock(&indio_dev->mlock);
- return sps;
- }
-
- ret = adis16400_set_filter(indio_dev, sps, val);
- mutex_unlock(&indio_dev->mlock);
- return ret;
- default:
- return -EINVAL;
- }
-}
-
-static int adis16400_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
- int ret, shift;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- ret = adis16400_spi_read_reg_16(indio_dev,
- adis16400_addresses[chan->address][0],
- &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << chan->scan_type.realbits) - 1;
- if (chan->scan_type.sign == 's') {
- shift = 16 - chan->scan_type.realbits;
- val16 = (s16)(val16 << shift) >> shift;
- }
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- *val = 0;
- *val2 = st->variant->gyro_scale_micro;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0) {
- *val = 2;
- *val2 = 418000; /* 2.418 mV */
- } else {
- *val = 0;
- *val2 = 805800; /* 805.8 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = st->variant->accel_scale_micro;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_MAGN:
- *val = 0;
- *val2 = 500; /* 0.5 mgauss */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = st->variant->temp_scale_nano / 1000000;
- *val2 = (st->variant->temp_scale_nano % 1000000);
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
- ret = adis16400_spi_read_reg_16(indio_dev,
- adis16400_addresses[chan->address][1],
- &val16);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- val16 = ((val16 & 0xFFF) << 4) >> 4;
- *val = val16;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_OFFSET:
- /* currently only temperature */
- *val = st->variant->temp_offset;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- mutex_lock(&indio_dev->mlock);
- /* Need both the number of taps and the sampling frequency */
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SENS_AVG,
- &val16);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 = st->variant->get_freq(indio_dev);
- if (ret > 0)
- *val = ret/adis16400_3db_divisors[val16 & 0x03];
- *val2 = 0;
- mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
-}
-
-static const struct iio_chan_spec adis16400_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in_supply,
- .scan_index = ADIS16400_SCAN_SUPPLY,
- .scan_type = IIO_ST('u', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_x,
- .scan_index = ADIS16400_SCAN_GYRO_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_y,
- .scan_index = ADIS16400_SCAN_GYRO_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_z,
- .scan_index = ADIS16400_SCAN_GYRO_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_x,
- .scan_index = ADIS16400_SCAN_ACC_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_y,
- .scan_index = ADIS16400_SCAN_ACC_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_z,
- .scan_index = ADIS16400_SCAN_ACC_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_MAGN,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = magn_x,
- .scan_index = ADIS16400_SCAN_MAGN_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_MAGN,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = magn_y,
- .scan_index = ADIS16400_SCAN_MAGN_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_MAGN,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = magn_z,
- .scan_index = ADIS16400_SCAN_MAGN_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = temp,
- .scan_index = ADIS16400_SCAN_TEMP,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in2,
- .scan_index = ADIS16400_SCAN_ADC_0,
- .scan_type = IIO_ST('s', 12, 16, 0),
- },
- IIO_CHAN_SOFT_TIMESTAMP(12)
-};
-
-static const struct iio_chan_spec adis16350_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in_supply,
- .scan_index = ADIS16400_SCAN_SUPPLY,
- .scan_type = IIO_ST('u', 12, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_x,
- .scan_index = ADIS16400_SCAN_GYRO_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_y,
- .scan_index = ADIS16400_SCAN_GYRO_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_z,
- .scan_index = ADIS16400_SCAN_GYRO_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_x,
- .scan_index = ADIS16400_SCAN_ACC_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_y,
- .scan_index = ADIS16400_SCAN_ACC_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_z,
- .scan_index = ADIS16400_SCAN_ACC_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .extend_name = "x",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = temp0,
- .scan_index = ADIS16350_SCAN_TEMP_X,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 1,
- .extend_name = "y",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = temp1,
- .scan_index = ADIS16350_SCAN_TEMP_Y,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 2,
- .extend_name = "z",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = temp2,
- .scan_index = ADIS16350_SCAN_TEMP_Z,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in1,
- .scan_index = ADIS16350_SCAN_ADC_0,
- .scan_type = IIO_ST('s', 12, 16, 0),
- },
- IIO_CHAN_SOFT_TIMESTAMP(11)
-};
-
-static const struct iio_chan_spec adis16300_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in_supply,
- .scan_index = ADIS16400_SCAN_SUPPLY,
- .scan_type = IIO_ST('u', 12, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_x,
- .scan_index = ADIS16400_SCAN_GYRO_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_x,
- .scan_index = ADIS16400_SCAN_ACC_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_y,
- .scan_index = ADIS16400_SCAN_ACC_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_z,
- .scan_index = ADIS16400_SCAN_ACC_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = temp0,
- .scan_index = ADIS16400_SCAN_TEMP,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
- .address = in1,
- .scan_index = ADIS16350_SCAN_ADC_0,
- .scan_type = IIO_ST('s', 12, 16, 0),
- }, {
- .type = IIO_INCLI,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
- .address = incli_x,
- .scan_index = ADIS16300_SCAN_INCLI_X,
- .scan_type = IIO_ST('s', 13, 16, 0),
- }, {
- .type = IIO_INCLI,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
- .address = incli_y,
- .scan_index = ADIS16300_SCAN_INCLI_Y,
- .scan_type = IIO_ST('s', 13, 16, 0),
- },
- IIO_CHAN_SOFT_TIMESTAMP(14)
-};
-
-static const struct iio_chan_spec adis16334_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_x,
- .scan_index = ADIS16400_SCAN_GYRO_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_y,
- .scan_index = ADIS16400_SCAN_GYRO_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = gyro_z,
- .scan_index = ADIS16400_SCAN_GYRO_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_x,
- .scan_index = ADIS16400_SCAN_ACC_X,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_y,
- .scan_index = ADIS16400_SCAN_ACC_Y,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_ACCEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT |
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
- .address = accel_z,
- .scan_index = ADIS16400_SCAN_ACC_Z,
- .scan_type = IIO_ST('s', 14, 16, 0),
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
- .address = temp0,
- .scan_index = ADIS16400_SCAN_TEMP,
- .scan_type = IIO_ST('s', 14, 16, 0),
- },
- IIO_CHAN_SOFT_TIMESTAMP(12)
-};
-
-static struct attribute *adis16400_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16400_attribute_group = {
- .attrs = adis16400_attributes,
-};
-
-static struct adis16400_chip_info adis16400_chips[] = {
- [ADIS16300] = {
- .channels = adis16300_channels,
- .num_channels = ARRAY_SIZE(adis16300_channels),
- .flags = ADIS16400_HAS_SLOW_MODE,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = 5884,
- .temp_scale_nano = 140000000, /* 0.14 C */
- .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
- .default_scan_mask = (1 << ADIS16400_SCAN_SUPPLY) |
- (1 << ADIS16400_SCAN_GYRO_X) | (1 << ADIS16400_SCAN_ACC_X) |
- (1 << ADIS16400_SCAN_ACC_Y) | (1 << ADIS16400_SCAN_ACC_Z) |
- (1 << ADIS16400_SCAN_TEMP) | (1 << ADIS16400_SCAN_ADC_0) |
- (1 << ADIS16300_SCAN_INCLI_X) | (1 << ADIS16300_SCAN_INCLI_Y) |
- (1 << 14),
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- },
- [ADIS16334] = {
- .channels = adis16334_channels,
- .num_channels = ARRAY_SIZE(adis16334_channels),
- .flags = ADIS16400_HAS_PROD_ID,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
- .temp_scale_nano = 67850000, /* 0.06785 C */
- .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
- .default_scan_mask = (1 << ADIS16400_SCAN_GYRO_X) |
- (1 << ADIS16400_SCAN_GYRO_Y) | (1 << ADIS16400_SCAN_GYRO_Z) |
- (1 << ADIS16400_SCAN_ACC_X) | (1 << ADIS16400_SCAN_ACC_Y) |
- (1 << ADIS16400_SCAN_ACC_Z),
- .set_freq = adis16334_set_freq,
- .get_freq = adis16334_get_freq,
- },
- [ADIS16350] = {
- .channels = adis16350_channels,
- .num_channels = ARRAY_SIZE(adis16350_channels),
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
- .temp_scale_nano = 145300000, /* 0.1453 C */
- .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
- .default_scan_mask = 0x7FF,
- .flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- },
- [ADIS16360] = {
- .channels = adis16350_channels,
- .num_channels = ARRAY_SIZE(adis16350_channels),
- .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
- .temp_scale_nano = 136000000, /* 0.136 C */
- .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
- .default_scan_mask = 0x7FF,
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- },
- [ADIS16362] = {
- .channels = adis16350_channels,
- .num_channels = ARRAY_SIZE(adis16350_channels),
- .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
- .temp_scale_nano = 136000000, /* 0.136 C */
- .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
- .default_scan_mask = 0x7FF,
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- },
- [ADIS16364] = {
- .channels = adis16350_channels,
- .num_channels = ARRAY_SIZE(adis16350_channels),
- .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
- .temp_scale_nano = 136000000, /* 0.136 C */
- .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
- .default_scan_mask = 0x7FF,
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- },
- [ADIS16400] = {
- .channels = adis16400_channels,
- .num_channels = ARRAY_SIZE(adis16400_channels),
- .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
- .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
- .default_scan_mask = 0xFFF,
- .temp_scale_nano = 140000000, /* 0.14 C */
- .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
- .set_freq = adis16400_set_freq,
- .get_freq = adis16400_get_freq,
- }
-};
-
-static const struct iio_info adis16400_info = {
- .driver_module = THIS_MODULE,
- .read_raw = &adis16400_read_raw,
- .write_raw = &adis16400_write_raw,
- .attrs = &adis16400_attribute_group,
-};
-
-static int adis16400_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16400_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- /* setup the industrialio driver allocated elements */
- st->variant = &adis16400_chips[spi_get_device_id(spi)->driver_data];
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->channels = st->variant->channels;
- indio_dev->num_channels = st->variant->num_channels;
- indio_dev->info = &adis16400_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis16400_configure_ring(indio_dev);
- if (ret)
- goto error_free_dev;
-
- ret = iio_buffer_register(indio_dev,
- st->variant->channels,
- st->variant->num_channels);
- if (ret) {
- dev_err(&spi->dev, "failed to initialize the ring\n");
- goto error_unreg_ring_funcs;
- }
-
- if (spi->irq) {
- ret = adis16400_probe_trigger(indio_dev);
- if (ret)
- goto error_uninitialize_ring;
- }
-
- /* Get the device into a sane initial state */
- ret = adis16400_initial_setup(indio_dev);
- if (ret)
- goto error_remove_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- if (spi->irq)
- adis16400_remove_trigger(indio_dev);
-error_uninitialize_ring:
- iio_buffer_unregister(indio_dev);
-error_unreg_ring_funcs:
- adis16400_unconfigure_ring(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int adis16400_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- adis16400_stop_device(indio_dev);
-
- adis16400_remove_trigger(indio_dev);
- iio_buffer_unregister(indio_dev);
- adis16400_unconfigure_ring(indio_dev);
- iio_device_free(indio_dev);
-
- return 0;
-}
-
-static const struct spi_device_id adis16400_id[] = {
- {"adis16300", ADIS16300},
- {"adis16334", ADIS16334},
- {"adis16350", ADIS16350},
- {"adis16354", ADIS16350},
- {"adis16355", ADIS16350},
- {"adis16360", ADIS16360},
- {"adis16362", ADIS16362},
- {"adis16364", ADIS16364},
- {"adis16365", ADIS16360},
- {"adis16400", ADIS16400},
- {"adis16405", ADIS16400},
- {}
-};
-MODULE_DEVICE_TABLE(spi, adis16400_id);
-
-static struct spi_driver adis16400_driver = {
- .driver = {
- .name = "adis16400",
- .owner = THIS_MODULE,
- },
- .id_table = adis16400_id,
- .probe = adis16400_probe,
- .remove = adis16400_remove,
-};
-module_spi_driver(adis16400_driver);
-
-MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
-MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
deleted file mode 100644
index d46c1e38cf7b..000000000000
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ /dev/null
@@ -1,204 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include "../ring_sw.h"
-#include <linux/iio/trigger_consumer.h>
-#include "adis16400.h"
-
-/**
- * adis16400_spi_read_burst() - read all data registers
- * @indio_dev: the IIO device
- * @rx: somewhere to pass back the value read (min size is 24 bytes)
- **/
-static int adis16400_spi_read_burst(struct iio_dev *indio_dev, u8 *rx)
-{
- struct spi_message msg;
- struct adis16400_state *st = iio_priv(indio_dev);
- u32 old_speed_hz = st->us->max_speed_hz;
- int ret;
-
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- }, {
- .rx_buf = rx,
- .bits_per_word = 8,
- .len = 24,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
- st->tx[1] = 0;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
-
- st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
- spi_setup(st->us);
-
- ret = spi_sync(st->us, &msg);
- if (ret)
- dev_err(&st->us->dev, "problem when burst reading");
-
- st->us->max_speed_hz = old_speed_hz;
- spi_setup(st->us);
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static const u16 read_all_tx_array[] = {
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_SUPPLY_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XGYRO_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YGYRO_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZGYRO_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XACCL_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YACCL_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZACCL_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16350_XTEMP_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16350_YTEMP_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16350_ZTEMP_OUT)),
- cpu_to_be16(ADIS16400_READ_REG(ADIS16400_AUX_ADC)),
-};
-
-static int adis16350_spi_read_all(struct iio_dev *indio_dev, u8 *rx)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
-
- struct spi_message msg;
- int i, j = 0, ret;
- struct spi_transfer *xfers;
- int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
-
- xfers = kzalloc(sizeof(*xfers)*(scan_count + 1),
- GFP_KERNEL);
- if (xfers == NULL)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
- if (test_bit(i, indio_dev->active_scan_mask)) {
- xfers[j].tx_buf = &read_all_tx_array[i];
- xfers[j].bits_per_word = 16;
- xfers[j].len = 2;
- xfers[j + 1].rx_buf = rx + j*2;
- j++;
- }
- xfers[j].bits_per_word = 16;
- xfers[j].len = 2;
-
- spi_message_init(&msg);
- for (j = 0; j < scan_count + 1; j++)
- spi_message_add_tail(&xfers[j], &msg);
-
- ret = spi_sync(st->us, &msg);
- kfree(xfers);
-
- return ret;
-}
-
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
- * specific to be rolled into the core.
- */
-static irqreturn_t adis16400_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct adis16400_state *st = iio_priv(indio_dev);
- int i = 0, j, ret = 0;
- s16 *data;
-
- /* Asumption that long is enough for maximum channels */
- unsigned long mask = *indio_dev->active_scan_mask;
- int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (data == NULL) {
- dev_err(&st->us->dev, "memory alloc failed in ring bh");
- goto done;
- }
-
- if (scan_count) {
- if (st->variant->flags & ADIS16400_NO_BURST) {
- ret = adis16350_spi_read_all(indio_dev, st->rx);
- if (ret < 0)
- goto done;
- for (; i < scan_count; i++)
- data[i] = *(s16 *)(st->rx + i*2);
- } else {
- ret = adis16400_spi_read_burst(indio_dev, st->rx);
- if (ret < 0)
- goto done;
- for (; i < scan_count; i++) {
- j = __ffs(mask);
- mask &= ~(1 << j);
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[j*2]));
- }
- }
- }
- /* Guaranteed to be aligned with 8 byte boundary */
- if (indio_dev->scan_timestamp)
- *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
- iio_push_to_buffers(indio_dev, (u8 *) data);
-
-done:
- kfree(data);
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->buffer);
-}
-
-static const struct iio_buffer_setup_ops adis16400_ring_setup_ops = {
- .preenable = &iio_sw_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
- .predisable = &iio_triggered_buffer_predisable,
-};
-
-int adis16400_configure_ring(struct iio_dev *indio_dev)
-{
- int ret = 0;
- struct iio_buffer *ring;
-
- ring = iio_sw_rb_allocate(indio_dev);
- if (!ring) {
- ret = -ENOMEM;
- return ret;
- }
- indio_dev->buffer = ring;
- ring->scan_timestamp = true;
- indio_dev->setup_ops = &adis16400_ring_setup_ops;
-
- indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
- &adis16400_trigger_handler,
- IRQF_ONESHOT,
- indio_dev,
- "%s_consumer%d",
- indio_dev->name,
- indio_dev->id);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;
- }
-
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
- return 0;
-error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->buffer);
- return ret;
-}
diff --git a/drivers/staging/iio/imu/adis16400_trigger.c b/drivers/staging/iio/imu/adis16400_trigger.c
deleted file mode 100644
index 42a678e92fc6..000000000000
--- a/drivers/staging/iio/imu/adis16400_trigger.c
+++ /dev/null
@@ -1,74 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-#include "adis16400.h"
-
-/**
- * adis16400_data_rdy_trigger_set_state() set datardy interrupt state
- **/
-static int adis16400_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
-{
- struct iio_dev *indio_dev = trig->private_data;
-
- dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16400_set_irq(indio_dev, state);
-}
-
-static const struct iio_trigger_ops adis16400_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &adis16400_data_rdy_trigger_set_state,
-};
-
-int adis16400_probe_trigger(struct iio_dev *indio_dev)
-{
- int ret;
- struct adis16400_state *st = iio_priv(indio_dev);
-
- st->trig = iio_trigger_alloc("%s-dev%d",
- indio_dev->name,
- indio_dev->id);
- if (st->trig == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- ret = request_irq(st->us->irq,
- &iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
- "adis16400",
- st->trig);
- if (ret)
- goto error_free_trig;
- st->trig->dev.parent = &st->us->dev;
- st->trig->private_data = indio_dev;
- st->trig->ops = &adis16400_trigger_ops;
- ret = iio_trigger_register(st->trig);
-
- /* select default trigger */
- indio_dev->trig = st->trig;
- if (ret)
- goto error_free_irq;
-
- return 0;
-
-error_free_irq:
- free_irq(st->us->irq, st->trig);
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
-}
-
-void adis16400_remove_trigger(struct iio_dev *indio_dev)
-{
- struct adis16400_state *st = iio_priv(indio_dev);
-
- iio_trigger_unregister(st->trig);
- free_irq(st->us->irq, st->trig);
- iio_trigger_free(st->trig);
-}
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 4bed30eac3ed..ca8d6e66c899 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -25,16 +25,6 @@ config SENSORS_ISL29028
Proximity value via iio. The ISL29028 provides the concurrent sensing
of ambient light and proximity.
-config SENSORS_TSL2563
- tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
- depends on I2C
- help
- If you say yes here you get support for the Taos TSL2560,
- TSL2561, TSL2562 and TSL2563 ambient light sensors.
-
- This driver can also be built as a module. If so, the module
- will be called tsl2563.
-
config TSL2583
tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 141af1eb164c..9960fdf7c15b 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,7 +2,6 @@
# Makefile for industrial I/O Light sensors
#
-obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
obj-$(CONFIG_TSL2583) += tsl2583.o
diff --git a/drivers/staging/iio/light/tsl2563.h b/drivers/staging/iio/light/tsl2563.h
deleted file mode 100644
index b97368bd7fff..000000000000
--- a/drivers/staging/iio/light/tsl2563.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __LINUX_TSL2563_H
-#define __LINUX_TSL2563_H
-
-struct tsl2563_platform_data {
- int cover_comp_gain;
-};
-
-#endif /* __LINUX_TSL2563_H */
-
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 9e50fbbadf9d..a58731e70bb9 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -292,59 +292,6 @@ static const u8 device_channel_config[] = {
};
/**
- * tsl2x7x_parse_buffer() - parse a decimal result from a buffer.
- * @*buf: pointer to char buffer to parse
- * @*result: pointer to buffer to contain
- * resulting interger / decimal as ints.
- *
- */
-static int
-tsl2x7x_parse_buffer(const char *buf, struct tsl2x7x_parse_result *result)
-{
- int integer = 0, fract = 0, fract_mult = 100000;
- bool integer_part = true, negative = false;
-
- if (buf[0] == '-') {
- negative = true;
- buf++;
- }
-
- while (*buf) {
- if ('0' <= *buf && *buf <= '9') {
- if (integer_part)
- integer = integer*10 + *buf - '0';
- else {
- fract += fract_mult*(*buf - '0');
- if (fract_mult == 1)
- break;
- fract_mult /= 10;
- }
- } else if (*buf == '\n') {
- if (*(buf + 1) == '\0')
- break;
- else
- return -EINVAL;
- } else if (*buf == '.') {
- integer_part = false;
- } else {
- return -EINVAL;
- }
- buf++;
- }
- if (negative) {
- if (integer)
- integer = -integer;
- else
- fract = -fract;
- }
-
- result->integer = integer;
- result->fract = fract;
-
- return 0;
-}
-
-/**
* tsl2x7x_i2c_read() - Read a byte from a register.
* @client: i2c client
* @reg: device register to read from
@@ -1036,13 +983,12 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
+ int ret;
- result.integer = 0;
- result.fract = 0;
-
- tsl2x7x_parse_buffer(buf, &result);
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
- result.fract /= 1000;
result.fract /= 3;
chip->tsl2x7x_settings.als_time =
(TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
@@ -1109,12 +1055,12 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
int y, z, filter_delay;
+ int ret;
- result.integer = 0;
- result.fract = 0;
- tsl2x7x_parse_buffer(buf, &result);
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
- result.fract /= 1000;
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
@@ -1155,12 +1101,12 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
struct tsl2x7x_parse_result result;
int y, z, filter_delay;
+ int ret;
- result.integer = 0;
- result.fract = 0;
- tsl2x7x_parse_buffer(buf, &result);
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
- result.fract /= 1000;
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index d290d2738419..e53274b64ae1 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -21,7 +21,7 @@ config ADE7758
tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
depends on SPI
select IIO_TRIGGER if IIO_BUFFER
- select IIO_SW_RING if IIO_BUFFER
+ select IIO_KFIFO_BUF if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADE7758 Polyphase
Multifunction Energy Metering IC with Per Phase Information Driver.
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 51c3bdece785..e5943e2287cf 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -103,7 +103,6 @@ static int ade7753_spi_read_reg_24(struct device *dev,
u8 reg_address,
u32 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7753_state *st = iio_priv(indio_dev);
int ret;
@@ -122,10 +121,7 @@ static int ade7753_spi_read_reg_24(struct device *dev,
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7753_READ_REG(reg_address);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
reg_address);
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index b50c89e93993..7b6503bf9a74 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -103,7 +103,6 @@ static int ade7754_spi_read_reg_24(struct device *dev,
u8 reg_address,
u32 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
int ret;
@@ -122,9 +121,7 @@ static int ade7754_spi_read_reg_24(struct device *dev,
st->tx[2] = 0;
st->tx[3] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
reg_address);
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 3454e5154ed2..53c68dcc4544 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -47,7 +47,6 @@ static int ade7758_spi_write_reg_16(struct device *dev,
u16 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
@@ -63,9 +62,7 @@ static int ade7758_spi_write_reg_16(struct device *dev,
st->tx[1] = (value >> 8) & 0xFF;
st->tx[2] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
mutex_unlock(&st->buf_lock);
return ret;
@@ -76,7 +73,6 @@ static int ade7758_spi_write_reg_24(struct device *dev,
u32 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
@@ -93,9 +89,7 @@ static int ade7758_spi_write_reg_24(struct device *dev,
st->tx[2] = (value >> 8) & 0xFF;
st->tx[3] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
mutex_unlock(&st->buf_lock);
return ret;
@@ -105,7 +99,6 @@ int ade7758_spi_read_reg_8(struct device *dev,
u8 reg_address,
u8 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
int ret;
@@ -128,10 +121,7 @@ int ade7758_spi_read_reg_8(struct device *dev,
st->tx[0] = ADE7758_READ_REG(reg_address);
st->tx[1] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
reg_address);
@@ -148,7 +138,6 @@ static int ade7758_spi_read_reg_16(struct device *dev,
u8 reg_address,
u16 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
int ret;
@@ -173,10 +162,7 @@ static int ade7758_spi_read_reg_16(struct device *dev,
st->tx[1] = 0;
st->tx[2] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -194,7 +180,6 @@ static int ade7758_spi_read_reg_24(struct device *dev,
u8 reg_address,
u32 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
int ret;
@@ -219,10 +204,7 @@ static int ade7758_spi_read_reg_24(struct device *dev,
st->tx[2] = 0;
st->tx[3] = 0;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
reg_address);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 4552a4c7fe33..b29e2d5d9937 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -13,7 +13,7 @@
#include <asm/unaligned.h>
#include <linux/iio/iio.h>
-#include "../ring_sw.h"
+#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
#include "ade7758.h"
@@ -119,7 +119,7 @@ static const struct iio_buffer_setup_ops ade7758_ring_setup_ops = {
void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_sw_rb_free(indio_dev->buffer);
+ iio_kfifo_free(indio_dev->buffer);
}
int ade7758_configure_ring(struct iio_dev *indio_dev)
@@ -127,7 +127,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
struct ade7758_state *st = iio_priv(indio_dev);
int ret = 0;
- indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
+ indio_dev->buffer = iio_kfifo_allocate(indio_dev);
if (!indio_dev->buffer) {
ret = -ENOMEM;
return ret;
@@ -143,7 +143,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
- goto error_iio_sw_rb_free;
+ goto error_iio_kfifo_free;
}
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -183,8 +183,8 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
return 0;
-error_iio_sw_rb_free:
- iio_sw_rb_free(indio_dev->buffer);
+error_iio_kfifo_free:
+ iio_kfifo_free(indio_dev->buffer);
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 10b911bd3853..17dc373e1082 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -103,7 +103,6 @@ static int ade7759_spi_read_reg_40(struct device *dev,
u8 reg_address,
u64 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7759_state *st = iio_priv(indio_dev);
int ret;
@@ -120,9 +119,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
st->tx[0] = ADE7759_READ_REG(reg_address);
memset(&st->tx[1], 0 , 5);
- spi_message_init(&msg);
- spi_message_add_tail(xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 40 bit register 0x%02X",
reg_address);
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index f0984fa1cbb9..a802cf2491d6 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -20,7 +20,6 @@ static int ade7854_spi_write_reg_8(struct device *dev,
u8 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
@@ -35,9 +34,7 @@ static int ade7854_spi_write_reg_8(struct device *dev,
st->tx[2] = reg_address & 0xFF;
st->tx[3] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
mutex_unlock(&st->buf_lock);
return ret;
@@ -48,7 +45,6 @@ static int ade7854_spi_write_reg_16(struct device *dev,
u16 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
@@ -64,9 +60,7 @@ static int ade7854_spi_write_reg_16(struct device *dev,
st->tx[3] = (value >> 8) & 0xFF;
st->tx[4] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
mutex_unlock(&st->buf_lock);
return ret;
@@ -77,7 +71,6 @@ static int ade7854_spi_write_reg_24(struct device *dev,
u32 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
@@ -94,9 +87,7 @@ static int ade7854_spi_write_reg_24(struct device *dev,
st->tx[4] = (value >> 8) & 0xFF;
st->tx[5] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
mutex_unlock(&st->buf_lock);
return ret;
@@ -107,7 +98,6 @@ static int ade7854_spi_write_reg_32(struct device *dev,
u32 value)
{
int ret;
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
@@ -125,9 +115,7 @@ static int ade7854_spi_write_reg_32(struct device *dev,
st->tx[5] = (value >> 8) & 0xFF;
st->tx[6] = value & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
mutex_unlock(&st->buf_lock);
return ret;
@@ -137,7 +125,6 @@ static int ade7854_spi_read_reg_8(struct device *dev,
u16 reg_address,
u8 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
@@ -159,10 +146,7 @@ static int ade7854_spi_read_reg_8(struct device *dev,
st->tx[1] = (reg_address >> 8) & 0xFF;
st->tx[2] = reg_address & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X",
reg_address);
@@ -179,7 +163,6 @@ static int ade7854_spi_read_reg_16(struct device *dev,
u16 reg_address,
u16 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
@@ -200,10 +183,7 @@ static int ade7854_spi_read_reg_16(struct device *dev,
st->tx[1] = (reg_address >> 8) & 0xFF;
st->tx[2] = reg_address & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -220,7 +200,6 @@ static int ade7854_spi_read_reg_24(struct device *dev,
u16 reg_address,
u32 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
@@ -242,10 +221,7 @@ static int ade7854_spi_read_reg_24(struct device *dev,
st->tx[1] = (reg_address >> 8) & 0xFF;
st->tx[2] = reg_address & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X",
reg_address);
@@ -262,7 +238,6 @@ static int ade7854_spi_read_reg_32(struct device *dev,
u16 reg_address,
u32 *val)
{
- struct spi_message msg;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
int ret;
@@ -284,10 +259,7 @@ static int ade7854_spi_read_reg_32(struct device *dev,
st->tx[1] = (reg_address >> 8) & 0xFF;
st->tx[2] = reg_address & 0xFF;
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->spi, &msg);
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X",
reg_address);
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ed07a348eb55..53110b6a3c74 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -130,15 +130,12 @@ static int ad2s1210_config_read(struct ad2s1210_state *st,
.rx_buf = st->rx,
.tx_buf = st->tx,
};
- struct spi_message msg;
int ret = 0;
ad2s1210_set_mode(MOD_CONFIG, st);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_sync_transfer(st->sdev, &xfer, 1);
if (ret < 0)
return ret;
st->old_data = true;
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
deleted file mode 100644
index 3a45f9a52de8..000000000000
--- a/drivers/staging/iio/ring_sw.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/* The industrial I/O simple minimally locked ring buffer.
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include "ring_sw.h"
-#include <linux/iio/trigger.h>
-
-/**
- * struct iio_sw_ring_buffer - software ring buffer
- * @buf: generic ring buffer elements
- * @data: the ring buffer memory
- * @read_p: read pointer (oldest available)
- * @write_p: write pointer
- * @half_p: half buffer length behind write_p (event generation)
- * @update_needed: flag to indicate change in size requested
- *
- * Note that the first element of all ring buffers must be a
- * struct iio_buffer.
-**/
-struct iio_sw_ring_buffer {
- struct iio_buffer buf;
- unsigned char *data;
- unsigned char *read_p;
- unsigned char *write_p;
- /* used to act as a point at which to signal an event */
- unsigned char *half_p;
- int update_needed;
-};
-
-#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
-
-static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
- int bytes_per_datum, int length)
-{
- if ((length == 0) || (bytes_per_datum == 0))
- return -EINVAL;
- __iio_update_buffer(&ring->buf, bytes_per_datum, length);
- ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
- ring->read_p = NULL;
- ring->write_p = NULL;
- ring->half_p = NULL;
- return ring->data ? 0 : -ENOMEM;
-}
-
-static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- kfree(ring->data);
-}
-
-/* Ring buffer related functionality */
-/* Store to ring is typically called in the bh of a data ready interrupt handler
- * in the device driver */
-/* Lock always held if their is a chance this may be called */
-/* Only one of these per ring may run concurrently - enforced by drivers */
-static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data)
-{
- int ret = 0;
- unsigned char *temp_ptr, *change_test_ptr;
-
- /* initial store */
- if (unlikely(ring->write_p == NULL)) {
- ring->write_p = ring->data;
- /* Doesn't actually matter if this is out of the set
- * as long as the read pointer is valid before this
- * passes it - guaranteed as set later in this function.
- */
- ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
- }
- /* Copy data to where ever the current write pointer says */
- memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
- barrier();
- /* Update the pointer used to get most recent value.
- * Always valid as either points to latest or second latest value.
- * Before this runs it is null and read attempts fail with -EAGAIN.
- */
- barrier();
- /* temp_ptr used to ensure we never have an invalid pointer
- * it may be slightly lagging, but never invalid
- */
- temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
- /* End of ring, back to the beginning */
- if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
- temp_ptr = ring->data;
- /* Update the write pointer
- * always valid as long as this is the only function able to write.
- * Care needed with smp systems to ensure more than one ring fill
- * is never scheduled.
- */
- ring->write_p = temp_ptr;
-
- if (ring->read_p == NULL)
- ring->read_p = ring->data;
- /* Buffer full - move the read pointer and create / escalate
- * ring event */
- /* Tricky case - if the read pointer moves before we adjust it.
- * Handle by not pushing if it has moved - may result in occasional
- * unnecessary buffer full events when it wasn't quite true.
- */
- else if (ring->write_p == ring->read_p) {
- change_test_ptr = ring->read_p;
- temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
- if (temp_ptr
- == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
- temp_ptr = ring->data;
- }
- /* We are moving pointer on one because the ring is full. Any
- * change to the read pointer will be this or greater.
- */
- if (change_test_ptr == ring->read_p)
- ring->read_p = temp_ptr;
- }
- /* investigate if our event barrier has been passed */
- /* There are definite 'issues' with this and chances of
- * simultaneous read */
- /* Also need to use loop count to ensure this only happens once */
- ring->half_p += ring->buf.bytes_per_datum;
- if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
- ring->half_p = ring->data;
- if (ring->half_p == ring->read_p) {
- ring->buf.stufftoread = true;
- wake_up_interruptible(&ring->buf.pollq);
- }
- return ret;
-}
-
-static int iio_read_first_n_sw_rb(struct iio_buffer *r,
- size_t n, char __user *buf)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
-
- u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
- u8 *data;
- int ret, max_copied, bytes_to_rip, dead_offset;
- size_t data_available, buffer_size;
-
- /* A userspace program has probably made an error if it tries to
- * read something that is not a whole number of bpds.
- * Return an error.
- */
- if (n % ring->buf.bytes_per_datum) {
- ret = -EINVAL;
- printk(KERN_INFO "Ring buffer read request not whole number of"
- "samples: Request bytes %zd, Current bytes per datum %d\n",
- n, ring->buf.bytes_per_datum);
- goto error_ret;
- }
-
- buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
-
- /* Limit size to whole of ring buffer */
- bytes_to_rip = min_t(size_t, buffer_size, n);
-
- data = kmalloc(bytes_to_rip, GFP_KERNEL);
- if (data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- /* build local copy */
- initial_read_p = ring->read_p;
- if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
- ret = 0;
- goto error_free_data_cpy;
- }
-
- initial_write_p = ring->write_p;
-
- /* Need a consistent pair */
- while ((initial_read_p != ring->read_p)
- || (initial_write_p != ring->write_p)) {
- initial_read_p = ring->read_p;
- initial_write_p = ring->write_p;
- }
- if (initial_write_p == initial_read_p) {
- /* No new data available.*/
- ret = 0;
- goto error_free_data_cpy;
- }
-
- if (initial_write_p >= initial_read_p)
- data_available = initial_write_p - initial_read_p;
- else
- data_available = buffer_size - (initial_read_p - initial_write_p);
-
- if (data_available < bytes_to_rip)
- bytes_to_rip = data_available;
-
- if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
- max_copied = ring->data + buffer_size - initial_read_p;
- memcpy(data, initial_read_p, max_copied);
- memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
- end_read_p = ring->data + bytes_to_rip - max_copied;
- } else {
- memcpy(data, initial_read_p, bytes_to_rip);
- end_read_p = initial_read_p + bytes_to_rip;
- }
-
- /* Now to verify which section was cleanly copied - i.e. how far
- * read pointer has been pushed */
- current_read_p = ring->read_p;
-
- if (initial_read_p <= current_read_p)
- dead_offset = current_read_p - initial_read_p;
- else
- dead_offset = buffer_size - (initial_read_p - current_read_p);
-
- /* possible issue if the initial write has been lapped or indeed
- * the point we were reading to has been passed */
- /* No valid data read.
- * In this case the read pointer is already correct having been
- * pushed further than we would look. */
- if (bytes_to_rip - dead_offset < 0) {
- ret = 0;
- goto error_free_data_cpy;
- }
-
- /* setup the next read position */
- /* Beware, this may fail due to concurrency fun and games.
- * Possible that sufficient fill commands have run to push the read
- * pointer past where we would be after the rip. If this occurs, leave
- * it be.
- */
- /* Tricky - deal with loops */
-
- while (ring->read_p != end_read_p)
- ring->read_p = end_read_p;
-
- ret = bytes_to_rip - dead_offset;
-
- if (copy_to_user(buf, data + dead_offset, ret)) {
- ret = -EFAULT;
- goto error_free_data_cpy;
- }
-
- if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
- ring->buf.stufftoread = 0;
-
-error_free_data_cpy:
- kfree(data);
-error_ret:
-
- return ret;
-}
-
-static int iio_store_to_sw_rb(struct iio_buffer *r,
- u8 *data)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- return iio_store_to_sw_ring(ring, data);
-}
-
-static int iio_request_update_sw_rb(struct iio_buffer *r)
-{
- int ret = 0;
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
-
- r->stufftoread = false;
- if (!ring->update_needed)
- goto error_ret;
- __iio_free_sw_ring_buffer(ring);
- ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
- ring->buf.length);
-error_ret:
- return ret;
-}
-
-static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- return ring->buf.bytes_per_datum;
-}
-
-static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- ring->update_needed = true;
- return 0;
-}
-
-static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
-{
- if (r->bytes_per_datum != bpd) {
- r->bytes_per_datum = bpd;
- iio_mark_update_needed_sw_rb(r);
- }
- return 0;
-}
-
-static int iio_get_length_sw_rb(struct iio_buffer *r)
-{
- return r->length;
-}
-
-static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
-{
- if (r->length != length) {
- r->length = length;
- iio_mark_update_needed_sw_rb(r);
- }
- return 0;
-}
-
-static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_LENGTH_ATTR;
-
-/* Standard set of ring buffer attributes */
-static struct attribute *iio_ring_attributes[] = {
- &dev_attr_length.attr,
- &dev_attr_enable.attr,
- NULL,
-};
-
-static struct attribute_group iio_ring_attribute_group = {
- .attrs = iio_ring_attributes,
- .name = "buffer",
-};
-
-static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .store_to = &iio_store_to_sw_rb,
- .read_first_n = &iio_read_first_n_sw_rb,
- .request_update = &iio_request_update_sw_rb,
- .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
- .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
- .get_length = &iio_get_length_sw_rb,
- .set_length = &iio_set_length_sw_rb,
-};
-
-struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buf;
- struct iio_sw_ring_buffer *ring;
-
- ring = kzalloc(sizeof *ring, GFP_KERNEL);
- if (!ring)
- return NULL;
- ring->update_needed = true;
- buf = &ring->buf;
- iio_buffer_init(buf);
- buf->attrs = &iio_ring_attribute_group;
- buf->access = &ring_sw_access_funcs;
-
- return buf;
-}
-EXPORT_SYMBOL(iio_sw_rb_allocate);
-
-void iio_sw_rb_free(struct iio_buffer *r)
-{
- kfree(iio_to_sw_ring(r));
-}
-EXPORT_SYMBOL(iio_sw_rb_free);
-
-MODULE_DESCRIPTION("Industrial I/O software ring buffer");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
deleted file mode 100644
index a5857aa7aefa..000000000000
--- a/drivers/staging/iio/ring_sw.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* The industrial I/O simple minimally locked ring buffer.
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This code is deliberately kept separate from the main industrialio I/O core
- * as it is intended that in the future a number of different software ring
- * buffer implementations will exist with different characteristics to suit
- * different applications.
- *
- * This particular one was designed for a data capture application where it was
- * particularly important that no userspace reads would interrupt the capture
- * process. To this end the ring is not locked during a read.
- *
- * Comments on this buffer design welcomed. It's far from efficient and some of
- * my understanding of the effects of scheduling on this are somewhat limited.
- * Frankly, to my mind, this is the current weak point in the industrial I/O
- * patch set.
- */
-
-#ifndef _IIO_RING_SW_H_
-#define _IIO_RING_SW_H_
-#include <linux/iio/buffer.h>
-
-struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
-void iio_sw_rb_free(struct iio_buffer *ring);
-#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 7d3207559265..d44d3ad26fa5 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -21,7 +21,6 @@ config IIO_GPIO_TRIGGER
config IIO_SYSFS_TRIGGER
tristate "SYSFS trigger"
depends on SYSFS
- depends on HAVE_IRQ_WORK
select IRQ_WORK
help
Provides support for using SYSFS entry as IIO triggers.
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index ecf0f44bc70e..cec19f1cf56c 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -584,7 +584,6 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
ret = imx_drm_encoder_register(imx_drm_encoder);
if (ret) {
- kfree(imx_drm_encoder);
ret = -ENOMEM;
goto err_register;
}
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 677e665ca86d..366f259e3756 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -1104,7 +1104,9 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto out_failed_irq;
- ipu_reset(ipu);
+ ret = ipu_reset(ipu);
+ if (ret)
+ goto out_failed_reset;
/* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
@@ -1129,6 +1131,7 @@ failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
ipu_irq_exit(ipu);
+out_failed_reset:
out_failed_irq:
clk_disable_unprepare(ipu->clk);
failed_clk_get:
@@ -1139,9 +1142,6 @@ failed_ioremap:
static int ipu_remove(struct platform_device *pdev)
{
struct ipu_soc *ipu = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
platform_device_unregister_children(pdev);
ipu_submodules_exit(ipu);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
index 67d974f7be36..ec340da968ac 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
@@ -677,7 +677,7 @@ int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
goto failed_clk_register;
}
- dev_info(dev, "DI%d base: 0x%08lx remapped to %p\n",
+ dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
id, base, di->base);
di->inuse = false;
di->ipu = ipu;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 1892006526b5..4b3a019409b5 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -452,7 +452,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
int ret;
ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
- if (IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) {
+ if (IS_ERR(ipu_crtc->ipu_ch)) {
ret = PTR_ERR(ipu_crtc->ipu_ch);
goto err_out;
}
@@ -472,7 +472,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
if (pdata->dp >= 0) {
ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
if (IS_ERR(ipu_crtc->dp)) {
- ret = PTR_ERR(ipu_crtc->ipu_ch);
+ ret = PTR_ERR(ipu_crtc->dp);
goto err_out;
}
}
@@ -548,6 +548,8 @@ static int ipu_drm_probe(struct platform_device *pdev)
ipu_crtc->dev = &pdev->dev;
ret = ipu_crtc_init(ipu_crtc, pdata);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, ipu_crtc);
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 55a0b82c6391..f656f8aeeda3 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -42,19 +42,12 @@ static int eucr_suspend(struct usb_interface *iface, pm_message_t message)
/* Wait until no command is running */
mutex_lock(&us->dev_mutex);
- //US_DEBUGP("%s\n", __func__);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_SUSPEND);
- /* When runtime PM is working, we'll set a flag to indicate
- * whether we should autoresume when a SCSI request arrives. */
- // us->Power_IsResum = true;
- //us->SD_Status.Ready = 0;
-
mutex_unlock(&us->dev_mutex);
return 0;
}
-//EXPORT_SYMBOL_GPL(eucr_suspend);
static int eucr_resume(struct usb_interface *iface)
{
@@ -64,43 +57,40 @@ static int eucr_resume(struct usb_interface *iface)
pr_info("--- eucr_resume---\n");
mutex_lock(&us->dev_mutex);
- //US_DEBUGP("%s\n", __func__);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
mutex_unlock(&us->dev_mutex);
-
- us->Power_IsResum = true;
- //
- //us->SD_Status.Ready = 0; //??
- us->SM_Status = *(PSM_STATUS)&tmp;
-
+ us->Power_IsResum = true;
+
+ us->SM_Status = *(PSM_STATUS)&tmp;
+
return 0;
}
-//EXPORT_SYMBOL_GPL(eucr_resume);
+
static int eucr_reset_resume(struct usb_interface *iface)
{
BYTE tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
pr_info("--- eucr_reset_resume---\n");
- //US_DEBUGP("%s\n", __func__);
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
- /* FIXME: Notify the subdrivers that they need to reinitialize
- * the device */
- //ENE_InitMedia(us);
+ /*
+ * FIXME: Notify the subdrivers that they need to reinitialize
+ * the device
+ */
+
us->Power_IsResum = true;
- //
- //us->SD_Status.Ready = 0; //??
- us->SM_Status = *(PSM_STATUS)&tmp;
+
+ us->SM_Status = *(PSM_STATUS)&tmp;
+
return 0;
}
-//EXPORT_SYMBOL_GPL(usb_stor_reset_resume);
#else
@@ -110,7 +100,6 @@ static int eucr_reset_resume(struct usb_interface *iface)
#endif
-//----- eucr_pre_reset() ---------------------
static int eucr_pre_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
@@ -122,7 +111,6 @@ static int eucr_pre_reset(struct usb_interface *iface)
return 0;
}
-//----- eucr_post_reset() ---------------------
static int eucr_post_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
@@ -136,19 +124,15 @@ static int eucr_post_reset(struct usb_interface *iface)
return 0;
}
-//----- fill_inquiry_response() ---------------------
void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int data_len)
{
pr_info("usb --- fill_inquiry_response\n");
- if (data_len<36) // You lose.
+ if (data_len < 36) /* You lose. */
return;
- if (data[0]&0x20)
- {
+ if (data[0]&0x20) {
memset(data+8,0,28);
- }
- else
- {
+ } else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
memcpy(data+8, us->unusual_dev->vendorName,
strlen(us->unusual_dev->vendorName) > 8 ? 8 :
@@ -164,18 +148,16 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int
usb_stor_set_xfer_buf(us, data, data_len, us->srb, TO_XFER_BUF);
}
-//----- usb_stor_control_thread() ---------------------
static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
pr_info("usb --- usb_stor_control_thread\n");
- for(;;)
- {
+ for (;;) {
if (wait_for_completion_interruptible(&us->cmnd_ready))
break;
-
+
/* lock the device pointers */
mutex_lock(&(us->dev_mutex));
@@ -189,44 +171,34 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* When we are called with no command pending, we're done */
- if (us->srb == NULL)
- {
+ if (us->srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
- //US_DEBUGP("-- exiting\n");
break;
}
/* has the command timed out *already* ? */
- if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags))
- {
+ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
us->srb->result = DID_ABORT << 16;
goto SkipForAbort;
}
scsi_unlock(host);
- if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL)
- {
+ if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
us->srb->result = DID_ERROR << 16;
- }
- else if (us->srb->device->id && !(us->fflags & US_FL_SCM_MULT_TARG))
- {
+ } else if (us->srb->device->id
+ && !(us->fflags & US_FL_SCM_MULT_TARG)) {
us->srb->result = DID_BAD_TARGET << 16;
- }
- else if (us->srb->device->lun > us->max_lun)
- {
+ } else if (us->srb->device->lun > us->max_lun) {
us->srb->result = DID_BAD_TARGET << 16;
- }
- else if ((us->srb->cmnd[0] == INQUIRY) && (us->fflags & US_FL_FIX_INQUIRY))
- {
+ } else if ((us->srb->cmnd[0] == INQUIRY)
+ && (us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {0x00, 0x80, 0x02, 0x02, 0x1F, 0x00, 0x00, 0x00};
fill_inquiry_response(us, data_ptr, 36);
us->srb->result = SAM_STAT_GOOD;
- }
- else
- {
+ } else {
us->proto_handler(us->srb, us);
}
@@ -234,18 +206,14 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* indicate that the command is done */
- if (us->srb->result != DID_ABORT << 16)
- {
+ if (us->srb->result != DID_ABORT << 16) {
us->srb->scsi_done(us->srb);
- }
- else
- {
+ } else {
SkipForAbort:
pr_info("scsi command aborted\n");
}
- if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags))
- {
+ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
complete(&(us->notify));
/* Allow USB transfers to resume */
@@ -262,8 +230,7 @@ SkipForAbort:
} /* for (;;) */
/* Wait until we are told to stop */
- for (;;)
- {
+ for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
@@ -271,9 +238,8 @@ SkipForAbort:
}
__set_current_state(TASK_RUNNING);
return 0;
-}
+}
-//----- associate_dev() ---------------------
static int associate_dev(struct us_data *us, struct usb_interface *intf)
{
pr_info("usb --- associate_dev\n");
@@ -288,29 +254,24 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
/* Allocate the device-related DMA-mapped buffers */
us->cr = usb_alloc_coherent(us->pusb_dev, sizeof(*us->cr), GFP_KERNEL, &us->cr_dma);
- if (!us->cr)
- {
+ if (!us->cr) {
pr_info("usb_ctrlrequest allocation failed\n");
return -ENOMEM;
}
us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE, GFP_KERNEL, &us->iobuf_dma);
- if (!us->iobuf)
- {
+ if (!us->iobuf) {
pr_info("I/O buffer allocation failed\n");
return -ENOMEM;
}
us->sensebuf = kmalloc(US_SENSE_SIZE, GFP_KERNEL);
if (!us->sensebuf)
- {
- pr_info("Sense buffer allocation failed\n");
return -ENOMEM;
- }
+
return 0;
}
-//----- get_device_info() ---------------------
static int get_device_info(struct us_data *us, const struct usb_device_id *id)
{
struct usb_device *dev = us->pusb_dev;
@@ -323,8 +284,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id)
us->fflags = id->driver_info;
us->Power_IsResum = false;
- if (us->fflags & US_FL_IGNORE_DEVICE)
- {
+ if (us->fflags & US_FL_IGNORE_DEVICE) {
pr_info("device ignored\n");
return -ENODEV;
}
@@ -335,7 +295,6 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id)
return 0;
}
-//----- get_transport() ---------------------
static int get_transport(struct us_data *us)
{
pr_info("usb --- get_transport\n");
@@ -349,7 +308,6 @@ static int get_transport(struct us_data *us)
default:
return -EIO;
}
- /* pr_info("Transport: %s\n", us->transport_name); */
/* fix for single-lun devices */
if (us->fflags & US_FL_SINGLE_LUN)
@@ -357,7 +315,6 @@ static int get_transport(struct us_data *us)
return 0;
}
-//----- get_protocol() ---------------------
static int get_protocol(struct us_data *us)
{
pr_info("usb --- get_protocol\n");
@@ -368,7 +325,8 @@ static int get_protocol(struct us_data *us)
switch (us->subclass) {
case USB_SC_SCSI:
us->protocol_name = "Transparent SCSI";
- if( (us->pusb_dev->descriptor.idVendor == 0x0CF2) && (us->pusb_dev->descriptor.idProduct == 0x6250) )
+ if ((us->pusb_dev->descriptor.idVendor == 0x0CF2)
+ && (us->pusb_dev->descriptor.idProduct == 0x6250))
us->proto_handler = ENE_stor_invoke_transport;
else
us->proto_handler = usb_stor_invoke_transport;
@@ -377,11 +335,9 @@ static int get_protocol(struct us_data *us)
default:
return -EIO;
}
- /* pr_info("Protocol: %s\n", us->protocol_name); */
return 0;
}
-//----- get_pipes() ---------------------
static int get_pipes(struct us_data *us)
{
struct usb_host_interface *altsetting = us->pusb_intf->cur_altsetting;
@@ -393,32 +349,24 @@ static int get_pipes(struct us_data *us)
pr_info("usb --- get_pipes\n");
- for (i = 0; i < altsetting->desc.bNumEndpoints; i++)
- {
+ for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
ep = &altsetting->endpoint[i].desc;
- if (usb_endpoint_xfer_bulk(ep))
- {
- if (usb_endpoint_dir_in(ep))
- {
+ if (usb_endpoint_xfer_bulk(ep)) {
+ if (usb_endpoint_dir_in(ep)) {
if (!ep_in)
ep_in = ep;
- }
- else
- {
+ } else {
if (!ep_out)
ep_out = ep;
}
- }
- else if (usb_endpoint_is_int_in(ep))
- {
+ } else if (usb_endpoint_is_int_in(ep)) {
if (!ep_int)
ep_int = ep;
}
}
- if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int))
- {
+ if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int)) {
pr_info("Endpoint sanity check failed! Rejecting dev.\n");
return -EIO;
}
@@ -428,31 +376,27 @@ static int get_pipes(struct us_data *us)
us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0);
us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev, ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev, ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- if (ep_int)
- {
+ if (ep_int) {
us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev, ep_int->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
us->ep_bInterval = ep_int->bInterval;
}
return 0;
}
-//----- usb_stor_acquire_resources() ---------------------
static int usb_stor_acquire_resources(struct us_data *us)
{
struct task_struct *th;
pr_info("usb --- usb_stor_acquire_resources\n");
us->current_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!us->current_urb)
- {
+ if (!us->current_urb) {
pr_info("URB allocation failed\n");
return -ENOMEM;
}
/* Start up our control thread */
th = kthread_run(usb_stor_control_thread, us, "eucr-storage");
- if (IS_ERR(th))
- {
+ if (IS_ERR(th)) {
pr_info("Unable to start control thread\n");
return PTR_ERR(th);
}
@@ -461,7 +405,6 @@ static int usb_stor_acquire_resources(struct us_data *us)
return 0;
}
-//----- usb_stor_release_resources() ---------------------
static void usb_stor_release_resources(struct us_data *us)
{
pr_info("usb --- usb_stor_release_resources\n");
@@ -473,8 +416,7 @@ static void usb_stor_release_resources(struct us_data *us)
kthread_stop(us->ctl_thread);
/* Call the destructor routine, if it exists */
- if (us->extra_destructor)
- {
+ if (us->extra_destructor) {
pr_info("-- calling extra_destructor()\n");
us->extra_destructor(us->extra);
}
@@ -484,7 +426,6 @@ static void usb_stor_release_resources(struct us_data *us)
usb_free_urb(us->current_urb);
}
-//----- dissociate_dev() ---------------------
static void dissociate_dev(struct us_data *us)
{
pr_info("usb --- dissociate_dev\n");
@@ -501,7 +442,6 @@ static void dissociate_dev(struct us_data *us)
usb_set_intfdata(us->pusb_intf, NULL);
}
-//----- quiesce_and_remove_host() ---------------------
static void quiesce_and_remove_host(struct us_data *us)
{
struct Scsi_Host *host = us_to_host(us);
@@ -512,19 +452,22 @@ static void quiesce_and_remove_host(struct us_data *us)
if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
- /* Prevent SCSI-scanning (if it hasn't started yet)
+ /*
+ * Prevent SCSI-scanning (if it hasn't started yet)
* and wait for the SCSI-scanning thread to stop.
*/
set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
wake_up(&us->delay_wait);
wait_for_completion(&us->scanning_done);
- /* Removing the host will perform an orderly shutdown: caches
+ /*
+ * Removing the host will perform an orderly shutdown: caches
* synchronized, disks spun down, etc.
*/
scsi_remove_host(host);
- /* Prevent any new commands from being accepted and cut short
+ /*
+ * Prevent any new commands from being accepted and cut short
* reset delays.
*/
scsi_lock(host);
@@ -533,7 +476,6 @@ static void quiesce_and_remove_host(struct us_data *us)
wake_up(&us->delay_wait);
}
-//----- release_everything() ---------------------
static void release_everything(struct us_data *us)
{
pr_info("usb --- release_everything\n");
@@ -543,7 +485,6 @@ static void release_everything(struct us_data *us)
scsi_host_put(us_to_host(us));
}
-//----- usb_stor_scan_thread() ---------------------
static int usb_stor_scan_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
@@ -560,11 +501,10 @@ static int usb_stor_scan_thread(void * __us)
}
/* If the device is still connected, perform the scanning */
- if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags))
- {
+ if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
/* For bulk-only devices, determine the max LUN value */
- if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN))
- {
+ if (us->protocol == USB_PR_BULK
+ && !(us->fflags & US_FL_SINGLE_LUN)) {
mutex_lock(&us->dev_mutex);
us->max_lun = usb_stor_Bulk_max_lun(us);
mutex_unlock(&us->dev_mutex);
@@ -575,7 +515,6 @@ static int usb_stor_scan_thread(void * __us)
complete_and_exit(&us->scanning_done, 0);
}
-//----- eucr_probe() ---------------------
static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct Scsi_Host *host;
@@ -587,8 +526,7 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
pr_info("usb --- eucr_probe\n");
host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
- if (!host)
- {
+ if (!host) {
pr_info("Unable to allocate the scsi host\n");
return -ENOMEM;
}
@@ -630,8 +568,7 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
goto BadDevice;
result = scsi_add_host(host, &intf->dev);
- if (result)
- {
+ if (result) {
pr_info("Unable to add the scsi host\n");
goto BadDevice;
}
@@ -673,7 +610,6 @@ BadDevice:
return result;
}
-//----- eucr_disconnect() ---------------------
static void eucr_disconnect(struct usb_interface *intf)
{
struct us_data *us = usb_get_intfdata(intf);
@@ -683,11 +619,7 @@ static void eucr_disconnect(struct usb_interface *intf)
release_everything(us);
}
-/***********************************************************************
- * Initialization and registration
- ***********************************************************************/
-
-//----- usb_storage_driver() ---------------------
+/* Initialization and registration */
static struct usb_driver usb_storage_driver = {
.name = "eucr",
.probe = eucr_probe,
diff --git a/drivers/staging/line6/Kconfig b/drivers/staging/line6/Kconfig
index b63543658b2e..4f1219b4c692 100644
--- a/drivers/staging/line6/Kconfig
+++ b/drivers/staging/line6/Kconfig
@@ -23,16 +23,6 @@ menuconfig LINE6_USB
if LINE6_USB
-config LINE6_USB_DUMP_PCM
- bool "dump PCM data"
- default n
- help
- Say Y here to write PCM data sent to and received from Line6
- devices to the syslog. This will produce a huge amount of
- syslog data during playback and capture.
-
- If unsure, say N.
-
config LINE6_USB_IMPULSE_RESPONSE
bool "measure impulse response"
default n
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 389c41fd1b74..f8316b71f13d 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -216,16 +216,6 @@ static void audio_in_callback(struct urb *urb)
if (urb == line6pcm->urb_audio_in[index])
break;
-#ifdef CONFIG_LINE6_USB_DUMP_PCM
- for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
- struct usb_iso_packet_descriptor *fout =
- &urb->iso_frame_desc[i];
- line6_write_hexdump(line6pcm->line6, 'C',
- urb->transfer_buffer + fout->offset,
- fout->length);
- }
-#endif
-
spin_lock_irqsave(&line6pcm->lock_audio_in, flags);
for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 1e4ce50069a9..6252aca82866 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -135,47 +135,6 @@ static void line6_stop_listen(struct usb_line6 *line6)
usb_kill_urb(line6->urb_listen);
}
-#ifdef CONFIG_LINE6_USB_DUMP_ANY
-/*
- Write hexdump to syslog.
-*/
-void line6_write_hexdump(struct usb_line6 *line6, char dir,
- const unsigned char *buffer, int size)
-{
- static const int BYTES_PER_LINE = 8;
- char hexdump[100];
- char asc[BYTES_PER_LINE + 1];
- int i, j;
-
- for (i = 0; i < size; i += BYTES_PER_LINE) {
- int hexdumpsize = sizeof(hexdump);
- char *p = hexdump;
- int n = min(size - i, BYTES_PER_LINE);
- asc[n] = 0;
-
- for (j = 0; j < BYTES_PER_LINE; ++j) {
- int bytes;
-
- if (j < n) {
- unsigned char val = buffer[i + j];
- bytes = snprintf(p, hexdumpsize, " %02X", val);
- asc[j] = ((val >= 0x20)
- && (val < 0x7f)) ? val : '.';
- } else
- bytes = snprintf(p, hexdumpsize, " ");
-
- if (bytes > hexdumpsize)
- break; /* buffer overflow */
-
- p += bytes;
- hexdumpsize -= bytes;
- }
-
- dev_info(line6->ifcdev, "%c%04X:%s %s\n", dir, i, hexdump, asc);
- }
-}
-#endif
-
/*
Send raw message in pieces of wMaxPacketSize bytes.
*/
@@ -274,11 +233,8 @@ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
/* create message: */
msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
-
- if (msg == NULL) {
- dev_err(line6->ifcdev, "Out of memory\n");
+ if (msg == NULL)
return -ENOMEM;
- }
/* create URB: */
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -307,14 +263,13 @@ int line6_version_request_async(struct usb_line6 *line6)
char *buffer;
int retval;
- buffer = kmalloc(sizeof(line6_request_version), GFP_ATOMIC);
+ buffer = kmemdup(line6_request_version,
+ sizeof(line6_request_version), GFP_ATOMIC);
if (buffer == NULL) {
dev_err(line6->ifcdev, "Out of memory");
return -ENOMEM;
}
- memcpy(buffer, line6_request_version, sizeof(line6_request_version));
-
retval = line6_send_raw_message_async(line6, buffer,
sizeof(line6_request_version));
kfree(buffer);
@@ -333,17 +288,6 @@ int line6_send_sysex_message(struct usb_line6 *line6, const char *buffer,
}
/*
- Send sysex message in pieces of wMaxPacketSize bytes.
-*/
-int line6_send_sysex_message_async(struct usb_line6 *line6, const char *buffer,
- int size)
-{
- return line6_send_raw_message_async(line6, buffer,
- size + SYSEX_EXTRA_SIZE) -
- SYSEX_EXTRA_SIZE;
-}
-
-/*
Allocate buffer for sysex message and prepare header.
@param code sysex message code
@param size number of bytes between code and sysex end
@@ -353,10 +297,8 @@ char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2,
{
char *buffer = kmalloc(size + SYSEX_EXTRA_SIZE, GFP_ATOMIC);
- if (!buffer) {
- dev_err(line6->ifcdev, "out of memory\n");
+ if (!buffer)
return NULL;
- }
buffer[0] = LINE6_SYSEX_BEGIN;
memcpy(buffer + 1, line6_midi_id, sizeof(line6_midi_id));
@@ -372,7 +314,7 @@ char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2,
static void line6_data_received(struct urb *urb)
{
struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
- struct MidiBuffer *mb = &line6->line6midi->midibuf_in;
+ struct midi_buffer *mb = &line6->line6midi->midibuf_in;
int done;
if (urb->status == -ESHUTDOWN)
@@ -456,11 +398,8 @@ int line6_send_program(struct usb_line6 *line6, u8 value)
int partial;
buffer = kmalloc(2, GFP_KERNEL);
-
- if (!buffer) {
- dev_err(line6->ifcdev, "out of memory\n");
+ if (!buffer)
return -ENOMEM;
- }
buffer[0] = LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST;
buffer[1] = value;
@@ -488,11 +427,8 @@ int line6_transmit_parameter(struct usb_line6 *line6, int param, u8 value)
int partial;
buffer = kmalloc(3, GFP_KERNEL);
-
- if (!buffer) {
- dev_err(line6->ifcdev, "out of memory\n");
+ if (!buffer)
return -ENOMEM;
- }
buffer[0] = LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST;
buffer[1] = param;
@@ -532,7 +468,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
return ret;
}
- /* Wait for data length. We'll get a couple of 0xff until length arrives. */
+ /* Wait for data length. We'll get 0xff until length arrives. */
do {
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
@@ -887,9 +823,7 @@ static int line6_probe(struct usb_interface *interface,
}
line6 = kzalloc(size, GFP_KERNEL);
-
if (line6 == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
ret = -ENODEV;
goto err_put;
}
@@ -928,18 +862,14 @@ static int line6_probe(struct usb_interface *interface,
/* initialize USB buffers: */
line6->buffer_listen =
kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
-
if (line6->buffer_listen == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
ret = -ENOMEM;
goto err_destruct;
}
line6->buffer_message =
kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
-
if (line6->buffer_message == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
ret = -ENOMEM;
goto err_destruct;
}
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index f0be5a2adaba..a8341f9fdb98 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -20,10 +20,6 @@
#define DRIVER_NAME "line6usb"
-#if defined(CONFIG_LINE6_USB_DUMP_PCM)
-#define CONFIG_LINE6_USB_DUMP_ANY
-#endif
-
#define LINE6_TIMEOUT 1
#define LINE6_BUFSIZE_LISTEN 32
#define LINE6_MESSAGE_MAXLEN 256
@@ -53,7 +49,7 @@
#define LINE6_CHANNEL_MASK 0x0f
#define MISSING_CASE \
- printk(KERN_ERR "line6usb driver bug: missing case in %s:%d\n", \
+ pr_err("line6usb driver bug: missing case in %s:%d\n", \
__FILE__, __LINE__)
#define CHECK_RETURN(x) \
@@ -208,8 +204,6 @@ extern int line6_send_raw_message_async(struct usb_line6 *line6,
const char *buffer, int size);
extern int line6_send_sysex_message(struct usb_line6 *line6,
const char *buffer, int size);
-extern int line6_send_sysex_message_async(struct usb_line6 *line6,
- const char *buffer, int size);
extern ssize_t line6_set_raw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
extern void line6_start_timer(struct timer_list *timer, unsigned int msecs,
@@ -221,9 +215,4 @@ extern int line6_version_request_async(struct usb_line6 *line6);
extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
size_t datalen);
-#ifdef CONFIG_LINE6_USB_DUMP_ANY
-extern void line6_write_hexdump(struct usb_line6 *line6, char dir,
- const unsigned char *buffer, int size);
-#endif
-
#endif
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index 6982eca661bd..e3f9a53dbd96 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -45,7 +45,7 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
struct usb_line6 *line6 =
line6_rawmidi_substream_midi(substream)->line6;
struct snd_line6_midi *line6midi = line6->line6midi;
- struct MidiBuffer *mb = &line6midi->midibuf_out;
+ struct midi_buffer *mb = &line6midi->midibuf_out;
unsigned long flags;
unsigned char chunk[line6->max_packet_size];
int req, done;
diff --git a/drivers/staging/line6/midi.h b/drivers/staging/line6/midi.h
index 19dabd54051a..78f903fb4d41 100644
--- a/drivers/staging/line6/midi.h
+++ b/drivers/staging/line6/midi.h
@@ -57,12 +57,12 @@ struct snd_line6_midi {
/**
Buffer for incoming MIDI stream.
*/
- struct MidiBuffer midibuf_in;
+ struct midi_buffer midibuf_in;
/**
Buffer for outgoing MIDI stream.
*/
- struct MidiBuffer midibuf_out;
+ struct midi_buffer midibuf_out;
};
extern int line6_init_midi(struct usb_line6 *line6);
diff --git a/drivers/staging/line6/midibuf.c b/drivers/staging/line6/midibuf.c
index 968e0de83dab..f0adb7baa603 100644
--- a/drivers/staging/line6/midibuf.c
+++ b/drivers/staging/line6/midibuf.c
@@ -33,23 +33,23 @@ static int midibuf_message_length(unsigned char code)
}
}
-static int midibuf_is_empty(struct MidiBuffer *this)
+static int midibuf_is_empty(struct midi_buffer *this)
{
return (this->pos_read == this->pos_write) && !this->full;
}
-static int midibuf_is_full(struct MidiBuffer *this)
+static int midibuf_is_full(struct midi_buffer *this)
{
return this->full;
}
-void line6_midibuf_reset(struct MidiBuffer *this)
+void line6_midibuf_reset(struct midi_buffer *this)
{
this->pos_read = this->pos_write = this->full = 0;
this->command_prev = -1;
}
-int line6_midibuf_init(struct MidiBuffer *this, int size, int split)
+int line6_midibuf_init(struct midi_buffer *this, int size, int split)
{
this->buf = kmalloc(size, GFP_KERNEL);
@@ -62,14 +62,14 @@ int line6_midibuf_init(struct MidiBuffer *this, int size, int split)
return 0;
}
-void line6_midibuf_status(struct MidiBuffer *this)
+void line6_midibuf_status(struct midi_buffer *this)
{
pr_debug("midibuf size=%d split=%d pos_read=%d pos_write=%d full=%d command_prev=%02x\n",
this->size, this->split, this->pos_read, this->pos_write,
this->full, this->command_prev);
}
-int line6_midibuf_bytes_free(struct MidiBuffer *this)
+int line6_midibuf_bytes_free(struct midi_buffer *this)
{
return
midibuf_is_full(this) ?
@@ -78,7 +78,7 @@ int line6_midibuf_bytes_free(struct MidiBuffer *this)
1;
}
-int line6_midibuf_bytes_used(struct MidiBuffer *this)
+int line6_midibuf_bytes_used(struct midi_buffer *this)
{
return
midibuf_is_empty(this) ?
@@ -87,7 +87,7 @@ int line6_midibuf_bytes_used(struct MidiBuffer *this)
1;
}
-int line6_midibuf_write(struct MidiBuffer *this, unsigned char *data,
+int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
int length)
{
int bytes_free;
@@ -130,7 +130,8 @@ int line6_midibuf_write(struct MidiBuffer *this, unsigned char *data,
return length + skip_active_sense;
}
-int line6_midibuf_read(struct MidiBuffer *this, unsigned char *data, int length)
+int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+ int length)
{
int bytes_used;
int length1, length2;
@@ -234,7 +235,7 @@ int line6_midibuf_read(struct MidiBuffer *this, unsigned char *data, int length)
return length + repeat;
}
-int line6_midibuf_ignore(struct MidiBuffer *this, int length)
+int line6_midibuf_ignore(struct midi_buffer *this, int length)
{
int bytes_used = line6_midibuf_bytes_used(this);
@@ -246,7 +247,7 @@ int line6_midibuf_ignore(struct MidiBuffer *this, int length)
return length;
}
-int line6_midibuf_skip_message(struct MidiBuffer *this, unsigned short mask)
+int line6_midibuf_skip_message(struct midi_buffer *this, unsigned short mask)
{
int cmd = this->command_prev;
@@ -257,7 +258,7 @@ int line6_midibuf_skip_message(struct MidiBuffer *this, unsigned short mask)
return 0;
}
-void line6_midibuf_destroy(struct MidiBuffer *this)
+void line6_midibuf_destroy(struct midi_buffer *this)
{
kfree(this->buf);
this->buf = NULL;
diff --git a/drivers/staging/line6/midibuf.h b/drivers/staging/line6/midibuf.h
index 444cb3a12d72..707482b940e4 100644
--- a/drivers/staging/line6/midibuf.h
+++ b/drivers/staging/line6/midibuf.h
@@ -12,7 +12,7 @@
#ifndef MIDIBUF_H
#define MIDIBUF_H
-struct MidiBuffer {
+struct midi_buffer {
unsigned char *buf;
int size;
int split;
@@ -21,18 +21,18 @@ struct MidiBuffer {
int command_prev;
};
-extern int line6_midibuf_bytes_used(struct MidiBuffer *mb);
-extern int line6_midibuf_bytes_free(struct MidiBuffer *mb);
-extern void line6_midibuf_destroy(struct MidiBuffer *mb);
-extern int line6_midibuf_ignore(struct MidiBuffer *mb, int length);
-extern int line6_midibuf_init(struct MidiBuffer *mb, int size, int split);
-extern int line6_midibuf_read(struct MidiBuffer *mb, unsigned char *data,
+extern int line6_midibuf_bytes_used(struct midi_buffer *mb);
+extern int line6_midibuf_bytes_free(struct midi_buffer *mb);
+extern void line6_midibuf_destroy(struct midi_buffer *mb);
+extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
+extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
+extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
int length);
-extern void line6_midibuf_reset(struct MidiBuffer *mb);
-extern int line6_midibuf_skip_message(struct MidiBuffer *mb,
+extern void line6_midibuf_reset(struct midi_buffer *mb);
+extern int line6_midibuf_skip_message(struct midi_buffer *mb,
unsigned short mask);
-extern void line6_midibuf_status(struct MidiBuffer *mb);
-extern int line6_midibuf_write(struct MidiBuffer *mb, unsigned char *data,
+extern void line6_midibuf_status(struct midi_buffer *mb);
+extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
int length);
#endif
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 6c1e31335d19..02f77d74809f 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -49,11 +49,11 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
{
struct snd_line6_pcm *line6pcm = dev2pcm(dev);
int value;
- int rv;
+ int ret;
- rv = kstrtoint(buf, 10, &value);
- if (rv < 0)
- return rv;
+ ret = kstrtoint(buf, 10, &value);
+ if (ret < 0)
+ return ret;
line6pcm->impulse_volume = value;
@@ -81,7 +81,14 @@ static ssize_t pcm_set_impulse_period(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- dev2pcm(dev)->impulse_period = simple_strtoul(buf, NULL, 10);
+ int value;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ dev2pcm(dev)->impulse_period = value;
return count;
}
@@ -114,10 +121,7 @@ int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
line6pcm->buffer_in =
kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
line6pcm->max_packet_size, GFP_KERNEL);
-
if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
err = -ENOMEM;
goto pcm_acquire_error;
}
@@ -153,10 +157,7 @@ int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
line6pcm->buffer_out =
kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
line6pcm->max_packet_size, GFP_KERNEL);
-
if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
err = -ENOMEM;
goto pcm_acquire_error;
}
@@ -455,13 +456,12 @@ int line6_init_pcm(struct usb_line6 *line6,
ep_write = 0x01;
break;
- /* this is for interface_number == 1:
- case LINE6_DEVID_TONEPORT_UX2:
- case LINE6_DEVID_PODSTUDIO_UX2:
- ep_read = 0x87;
- ep_write = 0x00;
- break;
- */
+ /* this is for interface_number == 1:
+ case LINE6_DEVID_TONEPORT_UX2:
+ case LINE6_DEVID_PODSTUDIO_UX2:
+ ep_read = 0x87;
+ ep_write = 0x00;
+ break; */
default:
MISSING_CASE;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 4cf23af9c627..f9135c7cb195 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -264,15 +264,6 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
}
#endif
}
-#ifdef CONFIG_LINE6_USB_DUMP_PCM
- for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
- struct usb_iso_packet_descriptor *fout =
- &urb_out->iso_frame_desc[i];
- line6_write_hexdump(line6pcm->line6, 'P',
- urb_out->transfer_buffer + fout->offset,
- fout->length);
- }
-#endif
ret = usb_submit_urb(urb_out, GFP_ATOMIC);
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index e542540d0db3..74898c3c9f90 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -34,12 +34,14 @@ enum {
POD_SYSEX_DUMPMEM = 0x73,
POD_SYSEX_DUMP = 0x74,
POD_SYSEX_DUMPREQ = 0x75
- /* POD_SYSEX_DUMPMEM2 = 0x76 */ /* dumps entire internal memory of PODxt Pro */
+
+ /* dumps entire internal memory of PODxt Pro */
+ /* POD_SYSEX_DUMPMEM2 = 0x76 */
};
enum {
- POD_monitor_level = 0x04,
- POD_system_invalid = 0x10000
+ POD_MONITOR_LEVEL = 0x04,
+ POD_SYSTEM_INVALID = 0x10000
};
/* *INDENT-ON* */
@@ -133,84 +135,27 @@ void line6_pod_process_message(struct usb_line6_pod *pod)
{
const unsigned char *buf = pod->line6.buffer_message;
- /* filter messages by type */
- switch (buf[0] & 0xf0) {
- case LINE6_PARAM_CHANGE:
- case LINE6_PROGRAM_CHANGE:
- case LINE6_SYSEX_BEGIN:
- break; /* handle these further down */
+ if (memcmp(buf, pod_version_header, sizeof(pod_version_header)) == 0) {
+ pod->firmware_version = buf[13] * 100 + buf[14] * 10 + buf[15];
+ pod->device_id = ((int)buf[8] << 16) | ((int)buf[9] << 8) |
+ (int) buf[10];
+ pod_startup3(pod);
+ return;
+ }
- default:
- return; /* ignore all others */
+ /* Only look for sysex messages from this device */
+ if (buf[0] != (LINE6_SYSEX_BEGIN | LINE6_CHANNEL_DEVICE) &&
+ buf[0] != (LINE6_SYSEX_BEGIN | LINE6_CHANNEL_UNKNOWN)) {
+ return;
+ }
+ if (memcmp(buf + 1, line6_midi_id, sizeof(line6_midi_id)) != 0) {
+ return;
}
- /* process all remaining messages */
- switch (buf[0]) {
- case LINE6_PARAM_CHANGE | LINE6_CHANNEL_DEVICE:
- case LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST:
- break;
-
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_DEVICE:
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST:
- break;
-
- case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_DEVICE:
- case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_UNKNOWN:
- if (memcmp(buf + 1, line6_midi_id, sizeof(line6_midi_id)) == 0) {
- switch (buf[5]) {
- case POD_SYSEX_DUMP:
- break;
-
- case POD_SYSEX_SYSTEM:{
- short value =
- ((int)buf[7] << 12) | ((int)buf[8]
- << 8) |
- ((int)buf[9] << 4) | (int)buf[10];
-
- if (buf[6] == POD_monitor_level)
- pod->monitor_level = value;
- break;
- }
-
- case POD_SYSEX_FINISH:
- /* do we need to respond to this? */
- break;
-
- case POD_SYSEX_SAVE:
- break;
-
- case POD_SYSEX_STORE:
- dev_dbg(pod->line6.ifcdev,
- "message %02X not yet implemented\n",
- buf[5]);
- break;
-
- default:
- dev_dbg(pod->line6.ifcdev,
- "unknown sysex message %02X\n",
- buf[5]);
- }
- } else
- if (memcmp
- (buf, pod_version_header,
- sizeof(pod_version_header)) == 0) {
- pod->firmware_version =
- buf[13] * 100 + buf[14] * 10 + buf[15];
- pod->device_id =
- ((int)buf[8] << 16) | ((int)buf[9] << 8) | (int)
- buf[10];
- pod_startup3(pod);
- } else
- dev_dbg(pod->line6.ifcdev, "unknown sysex header\n");
-
- break;
-
- case LINE6_SYSEX_END:
- break;
-
- default:
- dev_dbg(pod->line6.ifcdev, "POD: unknown message %02X\n",
- buf[0]);
+ if (buf[5] == POD_SYSEX_SYSTEM && buf[6] == POD_MONITOR_LEVEL) {
+ short value = ((int)buf[7] << 12) | ((int)buf[8] << 8) |
+ ((int)buf[9] << 4) | (int)buf[10];
+ pod->monitor_level = value;
}
}
@@ -369,7 +314,7 @@ static int snd_pod_control_monitor_put(struct snd_kcontrol *kcontrol,
pod->monitor_level = ucontrol->value.integer.value[0];
pod_set_system_param_int(pod, ucontrol->value.integer.value[0],
- POD_monitor_level);
+ POD_MONITOR_LEVEL);
return 1;
}
@@ -460,7 +405,7 @@ static int pod_try_init(struct usb_interface *interface,
*/
if (pod->line6.properties->capabilities & LINE6_BIT_CONTROL) {
- pod->monitor_level = POD_system_invalid;
+ pod->monitor_level = POD_SYSTEM_INVALID;
/* initiate startup procedure: */
pod_startup1(pod);
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index a529dd3d604e..2f44d56700af 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -87,12 +87,10 @@ static struct line6_pcm_properties toneport_pcm_properties = {
static int led_red = 0x00;
static int led_green = 0x26;
-struct ToneportSourceInfo {
+static const struct {
const char *name;
int code;
-};
-
-static const struct ToneportSourceInfo toneport_source_info[] = {
+} toneport_source_info[] = {
{"Microphone", 0x0a01},
{"Line", 0x0801},
{"Instrument", 0x0b01},
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 4fca58f11245..bd0f694fa8d8 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -133,13 +133,6 @@ void line6_variax_process_message(struct usb_line6_variax *variax)
const unsigned char *buf = variax->line6.buffer_message;
switch (buf[0]) {
- case LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST:
- break;
-
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_DEVICE:
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST:
- break;
-
case LINE6_RESET:
dev_info(variax->line6.ifcdev, "VARIAX reset\n");
break;
@@ -154,13 +147,6 @@ void line6_variax_process_message(struct usb_line6_variax *variax)
variax_startup4((unsigned long)variax);
}
break;
-
- case LINE6_SYSEX_END:
- break;
-
- default:
- dev_dbg(variax->line6.ifcdev,
- "Variax: unknown message %02X\n", buf[0]);
}
}
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 238910373f5c..479c643da2f6 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -396,7 +396,9 @@ dt3155_open(struct file *filp)
pd->q->drv_priv = pd;
pd->curr_buf = NULL;
pd->field_count = 0;
- vb2_queue_init(pd->q); /* cannot fail */
+ ret = vb2_queue_init(pd->q);
+ if (ret < 0)
+ return ret;
INIT_LIST_HEAD(&pd->dmaq);
spin_lock_init(&pd->lock);
/* disable all irqs, clear all irq flags */
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index c9a6409edfe3..f99c05b454b0 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -382,8 +382,8 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
buf = kzalloc(4096, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 4096 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 4096 bytes for firmware construction\n");
return -1;
}
@@ -652,8 +652,8 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 5120 bytes for firmware construction\n");
return -1;
}
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
@@ -839,8 +839,8 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 5120 bytes for firmware construction\n");
return -1;
}
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
@@ -1545,9 +1545,8 @@ static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
case SPECIAL_MODET:
return modet_to_package(go, code, space);
}
- printk(KERN_ERR
- "go7007: firmware file contains unsupported feature %04x\n",
- type);
+ dev_err(go->dev,
+ "firmware file contains unsupported feature %04x\n", type);
return -1;
}
@@ -1577,15 +1576,16 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
return -1;
}
if (request_firmware(&fw_entry, go->board_info->firmware, go->dev)) {
- printk(KERN_ERR
- "go7007: unable to load firmware from file \"%s\"\n",
+ dev_err(go->dev,
+ "unable to load firmware from file \"%s\"\n",
go->board_info->firmware);
return -1;
}
code = kzalloc(codespace * 2, GFP_KERNEL);
if (code == NULL) {
- printk(KERN_ERR "go7007: unable to allocate %d bytes for "
- "firmware construction\n", codespace * 2);
+ dev_err(go->dev,
+ "unable to allocate %d bytes for firmware construction\n",
+ codespace * 2);
goto fw_failed;
}
src = (__le16 *)fw_entry->data;
@@ -1594,9 +1594,9 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
chunk_flags = __le16_to_cpu(src[0]);
chunk_len = __le16_to_cpu(src[1]);
if (chunk_len + 2 > srclen) {
- printk(KERN_ERR "go7007: firmware file \"%s\" "
- "appears to be corrupted\n",
- go->board_info->firmware);
+ dev_err(go->dev,
+ "firmware file \"%s\" appears to be corrupted\n",
+ go->board_info->firmware);
goto fw_failed;
}
if (chunk_flags & mode_flag) {
@@ -1604,17 +1604,15 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
ret = do_special(go, __le16_to_cpu(src[2]),
&code[i], codespace - i, framelen);
if (ret < 0) {
- printk(KERN_ERR "go7007: insufficient "
- "memory for firmware "
- "construction\n");
+ dev_err(go->dev,
+ "insufficient memory for firmware construction\n");
goto fw_failed;
}
i += ret;
} else {
if (codespace - i < chunk_len) {
- printk(KERN_ERR "go7007: insufficient "
- "memory for firmware "
- "construction\n");
+ dev_err(go->dev,
+ "insufficient memory for firmware construction\n");
goto fw_failed;
}
memcpy(&code[i], &src[2], chunk_len * 2);
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 980371b02749..a78133b67de2 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -812,7 +812,7 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return retval;
mutex_lock(&gofh->lock);
- if (buf->index < 0 || buf->index >= gofh->buf_count)
+ if (buf->index >= gofh->buf_count)
goto unlock_and_return;
gobuf = &gofh->bufs[buf->index];
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index 014d38410c99..b3974100c6cd 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -688,15 +688,4 @@ static struct i2c_driver s2250_driver = {
.id_table = s2250_id,
};
-static __init int init_s2250(void)
-{
- return i2c_add_driver(&s2250_driver);
-}
-
-static __exit void exit_s2250(void)
-{
- i2c_del_driver(&s2250_driver);
-}
-
-module_init(init_s2250);
-module_exit(exit_s2250);
+module_i2c_driver(s2250_driver);
diff --git a/drivers/staging/media/go7007/wis-ov7640.c b/drivers/staging/media/go7007/wis-ov7640.c
index 6bc9470fecb6..9f01657f884a 100644
--- a/drivers/staging/media/go7007/wis-ov7640.c
+++ b/drivers/staging/media/go7007/wis-ov7640.c
@@ -29,8 +29,7 @@ struct wis_ov7640 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x12, 0x80,
0x12, 0x54,
0x14, 0x24,
@@ -60,12 +59,12 @@ static int wis_ov7640_probe(struct i2c_client *client,
client->flags = I2C_CLIENT_SCCB;
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-ov7640: initializing OV7640 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR "wis-ov7640: error initializing OV7640\n");
+ dev_err(&client->dev, "wis-ov7640: error initializing OV7640\n");
return -ENODEV;
}
@@ -92,17 +91,6 @@ static struct i2c_driver wis_ov7640_driver = {
.id_table = wis_ov7640_id,
};
-static int __init wis_ov7640_init(void)
-{
- return i2c_add_driver(&wis_ov7640_driver);
-}
-
-static void __exit wis_ov7640_cleanup(void)
-{
- i2c_del_driver(&wis_ov7640_driver);
-}
-
-module_init(wis_ov7640_init);
-module_exit(wis_ov7640_cleanup);
+module_i2c_driver(wis_ov7640_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-saa7113.c b/drivers/staging/media/go7007/wis-saa7113.c
index 05e0e1083864..8810c1e6e1ed 100644
--- a/drivers/staging/media/go7007/wis-saa7113.c
+++ b/drivers/staging/media/go7007/wis-saa7113.c
@@ -32,8 +32,7 @@ struct wis_saa7113 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x33,
@@ -282,12 +281,12 @@ static int wis_saa7113_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-saa7113: initializing SAA7113 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR
+ dev_err(&client->dev,
"wis-saa7113: error initializing SAA7113\n");
kfree(dec);
return -ENODEV;
@@ -320,17 +319,6 @@ static struct i2c_driver wis_saa7113_driver = {
.id_table = wis_saa7113_id,
};
-static int __init wis_saa7113_init(void)
-{
- return i2c_add_driver(&wis_saa7113_driver);
-}
-
-static void __exit wis_saa7113_cleanup(void)
-{
- i2c_del_driver(&wis_saa7113_driver);
-}
-
-module_init(wis_saa7113_init);
-module_exit(wis_saa7113_cleanup);
+module_i2c_driver(wis_saa7113_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-saa7115.c b/drivers/staging/media/go7007/wis-saa7115.c
index 46cff59e28b7..fa86acd3fdf0 100644
--- a/drivers/staging/media/go7007/wis-saa7115.c
+++ b/drivers/staging/media/go7007/wis-saa7115.c
@@ -32,8 +32,7 @@ struct wis_saa7115 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x20,
@@ -415,12 +414,12 @@ static int wis_saa7115_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-saa7115: initializing SAA7115 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR
+ dev_err(&client->dev,
"wis-saa7115: error initializing SAA7115\n");
kfree(dec);
return -ENODEV;
@@ -453,17 +452,6 @@ static struct i2c_driver wis_saa7115_driver = {
.id_table = wis_saa7115_id,
};
-static int __init wis_saa7115_init(void)
-{
- return i2c_add_driver(&wis_saa7115_driver);
-}
-
-static void __exit wis_saa7115_cleanup(void)
-{
- i2c_del_driver(&wis_saa7115_driver);
-}
-
-module_init(wis_saa7115_init);
-module_exit(wis_saa7115_cleanup);
+module_i2c_driver(wis_saa7115_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-sony-tuner.c b/drivers/staging/media/go7007/wis-sony-tuner.c
index 8f1b7d4f6a2e..1291ab79d2af 100644
--- a/drivers/staging/media/go7007/wis-sony-tuner.c
+++ b/drivers/staging/media/go7007/wis-sony-tuner.c
@@ -704,17 +704,6 @@ static struct i2c_driver wis_sony_tuner_driver = {
.id_table = wis_sony_tuner_id,
};
-static int __init wis_sony_tuner_init(void)
-{
- return i2c_add_driver(&wis_sony_tuner_driver);
-}
-
-static void __exit wis_sony_tuner_cleanup(void)
-{
- i2c_del_driver(&wis_sony_tuner_driver);
-}
-
-module_init(wis_sony_tuner_init);
-module_exit(wis_sony_tuner_cleanup);
+module_i2c_driver(wis_sony_tuner_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-tw2804.c b/drivers/staging/media/go7007/wis-tw2804.c
index 9134f03e3cf0..d6410ee01be8 100644
--- a/drivers/staging/media/go7007/wis-tw2804.c
+++ b/drivers/staging/media/go7007/wis-tw2804.c
@@ -341,17 +341,6 @@ static struct i2c_driver wis_tw2804_driver = {
.id_table = wis_tw2804_id,
};
-static int __init wis_tw2804_init(void)
-{
- return i2c_add_driver(&wis_tw2804_driver);
-}
-
-static void __exit wis_tw2804_cleanup(void)
-{
- i2c_del_driver(&wis_tw2804_driver);
-}
-
-module_init(wis_tw2804_init);
-module_exit(wis_tw2804_cleanup);
+module_i2c_driver(wis_tw2804_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-tw9903.c b/drivers/staging/media/go7007/wis-tw9903.c
index 9230f4a80529..94071def3bb4 100644
--- a/drivers/staging/media/go7007/wis-tw9903.c
+++ b/drivers/staging/media/go7007/wis-tw9903.c
@@ -325,17 +325,6 @@ static struct i2c_driver wis_tw9903_driver = {
.id_table = wis_tw9903_id,
};
-static int __init wis_tw9903_init(void)
-{
- return i2c_add_driver(&wis_tw9903_driver);
-}
-
-static void __exit wis_tw9903_cleanup(void)
-{
- i2c_del_driver(&wis_tw9903_driver);
-}
-
-module_init(wis_tw9903_init);
-module_exit(wis_tw9903_cleanup);
+module_i2c_driver(wis_tw9903_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-uda1342.c b/drivers/staging/media/go7007/wis-uda1342.c
index 0127be2f3be0..05ac798f35f7 100644
--- a/drivers/staging/media/go7007/wis-uda1342.c
+++ b/drivers/staging/media/go7007/wis-uda1342.c
@@ -98,17 +98,6 @@ static struct i2c_driver wis_uda1342_driver = {
.id_table = wis_uda1342_id,
};
-static int __init wis_uda1342_init(void)
-{
- return i2c_add_driver(&wis_uda1342_driver);
-}
-
-static void __exit wis_uda1342_cleanup(void)
-{
- i2c_del_driver(&wis_uda1342_driver);
-}
-
-module_init(wis_uda1342_init);
-module_exit(wis_uda1342_cleanup);
+module_i2c_driver(wis_uda1342_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 71e3bf2937f9..b5d0088f3102 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -1239,6 +1239,10 @@ static int __init lirc_serial_init_module(void)
}
}
+ /* make sure sense is either -1, 0, or 1 */
+ if (sense != -1)
+ sense = !!sense;
+
result = lirc_serial_init();
if (result)
return result;
@@ -1298,7 +1302,7 @@ MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
module_param(share_irq, bool, S_IRUGO);
MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-module_param(sense, bool, S_IRUGO);
+module_param(sense, int, S_IRUGO);
MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
" (0 = active high, 1 = active low )");
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index f950ab890e2e..e5ae42a0b44a 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,9 +1,5 @@
ToDo list (incomplete, unordered)
- add compile as module support
- - fix clk usage
- should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
- where conn is either NULL if the device only has one clock, or
- the device specific name if it has multiple clocks.
- move half of the nvec init stuff to i2c-tegra.c
- move event handling to nvec_events
- finish suspend/resume support
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 2830946860d1..cf159365b0ee 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -37,8 +37,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#include "nvec.h"
@@ -72,9 +71,16 @@ enum nvec_msg_category {
NVEC_MSG_TX,
};
-static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
-static const unsigned char EC_ENABLE_EVENT_REPORTING[3] = "\x04\x00\x01";
-static const unsigned char EC_GET_FIRMWARE_VERSION[2] = "\x07\x15";
+enum nvec_sleep_subcmds {
+ GLOBAL_EVENTS,
+ AP_PWR_DOWN,
+ AP_SUSPEND,
+};
+
+#define CNF_EVENT_REPORTING 0x01
+#define GET_FIRMWARE_VERSION 0x15
+#define LID_SWITCH BIT(1)
+#define PWR_BUTTON BIT(15)
static struct nvec_chip *nvec_power_handle;
@@ -318,6 +324,41 @@ struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
EXPORT_SYMBOL(nvec_write_sync);
/**
+ * nvec_toggle_global_events - enables or disables global event reporting
+ * @nvec: nvec handle
+ * @state: true for enable, false for disable
+ *
+ * This switches on/off global event reports by the embedded controller.
+ */
+static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
+{
+ unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
+
+ nvec_write_async(nvec, global_events, 3);
+}
+
+/**
+ * nvec_event_mask - fill the command string with event bitfield
+ * ev: points to event command string
+ * mask: bit to insert into the event mask
+ *
+ * Configure event command expects a 32 bit bitfield which describes
+ * which events to enable. The bitfield has the following structure
+ * (from highest byte to lowest):
+ * system state bits 7-0
+ * system state bits 15-8
+ * oem system state bits 7-0
+ * oem system state bits 15-8
+ */
+static void nvec_event_mask(char *ev, u32 mask)
+{
+ ev[3] = mask >> 16 && 0xff;
+ ev[4] = mask >> 24 && 0xff;
+ ev[5] = mask >> 0 && 0xff;
+ ev[6] = mask >> 8 && 0xff;
+}
+
+/**
* nvec_request_master - Process outgoing messages
* @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
*
@@ -711,8 +752,10 @@ static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
static void nvec_power_off(void)
{
- nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
- nvec_write_async(nvec_power_handle, "\x04\x01", 2);
+ char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
+
+ nvec_toggle_global_events(nvec_power_handle, false);
+ nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
}
static int tegra_nvec_probe(struct platform_device *pdev)
@@ -724,6 +767,9 @@ static int tegra_nvec_probe(struct platform_device *pdev)
struct nvec_msg *msg;
struct resource *res;
void __iomem *base;
+ char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
+ unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
+ enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
if (nvec == NULL) {
@@ -759,11 +805,9 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base) {
- dev_err(&pdev->dev, "Can't ioremap I2C region\n");
- return -ENOMEM;
- }
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -771,7 +815,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
- i2c_clk = clk_get_sys("tegra-i2c.2", "div-clk");
+ i2c_clk = clk_get(&pdev->dev, "div-clk");
if (IS_ERR(i2c_clk)) {
dev_err(nvec->dev, "failed to get controller clock\n");
return -ENODEV;
@@ -815,8 +859,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
/* enable event reporting */
- nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
- sizeof(EC_ENABLE_EVENT_REPORTING));
+ nvec_toggle_global_events(nvec, true);
nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
@@ -825,8 +868,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
pm_power_off = nvec_power_off;
/* Get Firmware Version */
- msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
- sizeof(EC_GET_FIRMWARE_VERSION));
+ msg = nvec_write_sync(nvec, get_firmware_version, 2);
if (msg) {
dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
@@ -841,13 +883,15 @@ static int tegra_nvec_probe(struct platform_device *pdev)
dev_err(nvec->dev, "error adding subdevices\n");
/* unmute speakers? */
- nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
+ nvec_write_async(nvec, unmute_speakers, 4);
/* enable lid switch event */
- nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
+ nvec_event_mask(enable_event, LID_SWITCH);
+ nvec_write_async(nvec, enable_event, 7);
/* enable power button event */
- nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
+ nvec_event_mask(enable_event, PWR_BUTTON);
+ nvec_write_async(nvec, enable_event, 7);
return 0;
}
@@ -856,7 +900,7 @@ static int tegra_nvec_remove(struct platform_device *pdev)
{
struct nvec_chip *nvec = platform_get_drvdata(pdev);
- nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
+ nvec_toggle_global_events(nvec, false);
mfd_remove_devices(nvec->dev);
cancel_work_sync(&nvec->rx_work);
cancel_work_sync(&nvec->tx_work);
@@ -870,13 +914,14 @@ static int nvec_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct nvec_chip *nvec = platform_get_drvdata(pdev);
struct nvec_msg *msg;
+ char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
dev_dbg(nvec->dev, "suspending\n");
/* keep these sync or you'll break suspend */
- msg = nvec_write_sync(nvec, EC_DISABLE_EVENT_REPORTING, 3);
- nvec_msg_free(nvec, msg);
- msg = nvec_write_sync(nvec, "\x04\x02", 2);
+ nvec_toggle_global_events(nvec, false);
+
+ msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
nvec_msg_free(nvec, msg);
nvec_disable_i2c_slave(nvec);
@@ -891,7 +936,7 @@ static int nvec_resume(struct device *dev)
dev_dbg(nvec->dev, "resuming\n");
tegra_init_i2c_slave(nvec);
- nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
+ nvec_toggle_global_events(nvec, true);
return 0;
}
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index ba6ed8f4e8a3..b7a14bc0ab91 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -71,9 +71,12 @@ enum nvec_event_size {
enum nvec_msg_type {
NVEC_SYS = 1,
NVEC_BAT,
- NVEC_KBD = 5,
+ NVEC_GPIO,
+ NVEC_SLEEP,
+ NVEC_KBD,
NVEC_PS2,
NVEC_CNTL,
+ NVEC_OEM0 = 0x0d,
NVEC_KB_EVT = 0x80,
NVEC_PS2_EVT,
};
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 7cb149bf3d3f..7445ce6422bb 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -21,10 +21,14 @@
#include "nvec-keytable.h"
#include "nvec.h"
-#define ACK_KBD_EVENT {'\x05', '\xed', '\x01'}
+enum kbd_subcmds {
+ CNFG_WAKE = 3,
+ CNFG_WAKE_KEY_REPORTING,
+ SET_LEDS = 0xed,
+ ENABLE_KBD = 0xf4,
+ DISABLE_KBD,
+};
-static const char led_on[3] = "\x05\xed\x07";
-static const char led_off[3] = "\x05\xed\x00";
static unsigned char keycodes[ARRAY_SIZE(code_tab_102us)
+ ARRAY_SIZE(extcode_tab_us102)];
@@ -39,12 +43,15 @@ static struct nvec_keys keys_dev;
static void nvec_kbd_toggle_led(void)
{
+ char buf[] = { NVEC_KBD, SET_LEDS, 0 };
+
keys_dev.caps_lock = !keys_dev.caps_lock;
if (keys_dev.caps_lock)
- nvec_write_async(keys_dev.nvec, led_on, sizeof(led_on));
- else
- nvec_write_async(keys_dev.nvec, led_off, sizeof(led_off));
+ /* should be BIT(0) only, firmware bug? */
+ buf[2] = BIT(0) | BIT(1) | BIT(2);
+
+ nvec_write_async(keys_dev.nvec, buf, sizeof(buf));
}
static int nvec_keys_notifier(struct notifier_block *nb,
@@ -82,8 +89,8 @@ static int nvec_keys_notifier(struct notifier_block *nb,
static int nvec_kbd_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
- unsigned char buf[] = ACK_KBD_EVENT;
struct nvec_chip *nvec = keys_dev.nvec;
+ char buf[] = { NVEC_KBD, SET_LEDS, 0 };
if (type == EV_REP)
return 0;
@@ -105,6 +112,11 @@ static int nvec_kbd_probe(struct platform_device *pdev)
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
int i, j, err;
struct input_dev *idev;
+ char clear_leds[] = { NVEC_KBD, SET_LEDS, 0 },
+ enable_kbd[] = { NVEC_KBD, ENABLE_KBD },
+ cnfg_wake[] = { NVEC_KBD, CNFG_WAKE, true, true },
+ cnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING,
+ true };
j = 0;
@@ -138,19 +150,15 @@ static int nvec_kbd_probe(struct platform_device *pdev)
nvec_register_notifier(nvec, &keys_dev.notifier, 0);
/* Enable keyboard */
- nvec_write_async(nvec, "\x05\xf4", 2);
+ nvec_write_async(nvec, enable_kbd, 2);
- /* keyboard reset? */
- nvec_write_async(nvec, "\x05\x03\x01\x01", 4);
- nvec_write_async(nvec, "\x05\x04\x01", 3);
- nvec_write_async(nvec, "\x06\x01\xff\x03", 4);
-/* FIXME
- wait until keyboard reset is finished
- or until we have a sync write */
- mdelay(1000);
+ /* configures wake on special keys */
+ nvec_write_async(nvec, cnfg_wake, 4);
+ /* enable wake key reporting */
+ nvec_write_async(nvec, cnfg_wake_key_reporting, 3);
/* Disable caps lock LED */
- nvec_write_async(nvec, led_off, sizeof(led_off));
+ nvec_write_async(nvec, clear_leds, sizeof(clear_leds));
return 0;
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index b7b6d54f58ec..296f7b9a8c8c 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -22,6 +22,8 @@
#include "nvec.h"
+#define GET_SYSTEM_STATUS 0x00
+
struct nvec_power {
struct notifier_block notifier;
struct delayed_work poller;
@@ -111,7 +113,7 @@ static const int bat_init[] = {
static void get_bat_mfg_data(struct nvec_power *power)
{
int i;
- char buf[] = { '\x02', '\x00' };
+ char buf[] = { NVEC_BAT, SLOT_STATUS };
for (i = 0; i < ARRAY_SIZE(bat_init); i++) {
buf[1] = bat_init[i];
@@ -348,7 +350,7 @@ static int const bat_iter[] = {
static void nvec_power_poll(struct work_struct *work)
{
- char buf[] = { '\x01', '\x00' };
+ char buf[] = { NVEC_SYS, GET_SYSTEM_STATUS };
struct nvec_power *power = container_of(work, struct nvec_power,
poller.work);
@@ -361,7 +363,7 @@ static void nvec_power_poll(struct work_struct *work)
/* select a battery request function via round robin
doing it all at once seems to overload the power supply */
- buf[0] = '\x02'; /* battery */
+ buf[0] = NVEC_BAT;
buf[1] = bat_iter[counter++];
nvec_write_async(power->nvec, buf, 2);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 88dd288bf3d7..aff6b9b9f9aa 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -21,9 +21,11 @@
#include "nvec.h"
-#define START_STREAMING {'\x06', '\x03', '\x06'}
-#define STOP_STREAMING {'\x06', '\x04'}
-#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
+#define PACKET_SIZE 6
+
+#define ENABLE_MOUSE 0xf4
+#define DISABLE_MOUSE 0xf5
+#define PSMOUSE_RST 0xff
#ifdef NVEC_PS2_DEBUG
#define NVEC_PHD(str, buf, len) \
@@ -33,7 +35,12 @@
#define NVEC_PHD(str, buf, len)
#endif
-static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
+enum ps2_subcmds {
+ SEND_COMMAND = 1,
+ RECEIVE_N,
+ AUTO_RECEIVE_N,
+ CANCEL_AUTO_RECEIVE,
+};
struct nvec_ps2 {
struct serio *ser_dev;
@@ -45,19 +52,19 @@ static struct nvec_ps2 ps2_dev;
static int ps2_startstreaming(struct serio *ser_dev)
{
- unsigned char buf[] = START_STREAMING;
+ unsigned char buf[] = { NVEC_PS2, AUTO_RECEIVE_N, PACKET_SIZE };
return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static void ps2_stopstreaming(struct serio *ser_dev)
{
- unsigned char buf[] = STOP_STREAMING;
+ unsigned char buf[] = { NVEC_PS2, CANCEL_AUTO_RECEIVE };
nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
{
- unsigned char buf[] = SEND_COMMAND;
+ unsigned char buf[] = { NVEC_PS2, SEND_COMMAND, ENABLE_MOUSE, 1 };
buf[2] = cmd & 0xff;
@@ -97,6 +104,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev;
+ char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
if (ser_dev == NULL)
@@ -118,7 +126,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
serio_register_port(ser_dev);
/* mouse reset */
- nvec_write_async(nvec, MOUSE_RESET, 4);
+ nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
return 0;
}
@@ -133,27 +141,22 @@ static int nvec_mouse_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int nvec_mouse_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
-
/* disable mouse */
- nvec_write_async(nvec, "\x06\xf4", 2);
+ ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE);
/* send cancel autoreceive */
- nvec_write_async(nvec, "\x06\x04", 2);
+ ps2_stopstreaming(ps2_dev.ser_dev);
return 0;
}
static int nvec_mouse_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
-
+ /* start streaming */
ps2_startstreaming(ps2_dev.ser_dev);
/* enable mouse */
- nvec_write_async(nvec, "\x06\xf5", 2);
+ ps2_sendcommand(ps2_dev.ser_dev, ENABLE_MOUSE);
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index f15b31b37ca5..83b103091cf2 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -46,9 +46,9 @@
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "cavium-ethernet");
- strcpy(info->version, OCTEON_ETHERNET_VERSION);
- strcpy(info->bus_info, "Builtin");
+ strlcpy(info->driver, "cavium-ethernet", sizeof(info->driver));
+ strlcpy(info->version, OCTEON_ETHERNET_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ef32dc1bbc80..c3a90e7012af 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -72,7 +72,7 @@ int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incomming packets to this POW\n"
+ "\twill be configured to send incoming packets to this POW\n"
"\tgroup. Also any other software can submit packets to this\n"
"\tgroup for the kernel to process.");
@@ -453,12 +453,10 @@ int cvm_oct_common_init(struct net_device *dev)
if (priv->of_node)
mac = of_get_mac_address(priv->of_node);
- if (mac && is_valid_ether_addr(mac)) {
+ if (mac && is_valid_ether_addr(mac))
memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- } else {
+ else
eth_hw_addr_random(dev);
- }
/*
* Force the interface to use the POW send if always_use_pow
diff --git a/drivers/staging/omap-thermal/omap-bandgap.c b/drivers/staging/omap-thermal/omap-bandgap.c
index 8346e3450f83..dcc1448dbf8e 100644
--- a/drivers/staging/omap-thermal/omap-bandgap.c
+++ b/drivers/staging/omap-thermal/omap-bandgap.c
@@ -568,8 +568,6 @@ int omap_bandgap_read_update_interval(struct omap_bandgap *bg_ptr, int id,
tsr = bg_ptr->conf->sensors[id].registers;
time = omap_bandgap_readl(bg_ptr, tsr->bgap_counter);
- if (ret)
- return ret;
time = (time & tsr->counter_mask) >> __ffs(tsr->counter_mask);
time = time * 1000 / bg_ptr->clk_rate;
@@ -820,15 +818,12 @@ static struct omap_bandgap *omap_bandgap_build(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
- chunk = devm_request_and_ioremap(&pdev->dev, res);
+ chunk = devm_ioremap_resource(&pdev->dev, res);
if (i == 0)
bg_ptr->base = chunk;
- if (!chunk) {
- dev_err(&pdev->dev,
- "failed to request the IO (%d:%pR).\n",
- i, res);
- return ERR_PTR(-EADDRNOTAVAIL);
- }
+ if (IS_ERR(chunk))
+ return ERR_CAST(chunk);
+
i++;
} while (res);
diff --git a/drivers/staging/omap-thermal/omap-thermal-common.c b/drivers/staging/omap-thermal/omap-thermal-common.c
index 61f1070c6667..79a55aaae5a3 100644
--- a/drivers/staging/omap-thermal/omap-thermal-common.c
+++ b/drivers/staging/omap-thermal/omap-thermal-common.c
@@ -260,7 +260,7 @@ int omap_thermal_expose_sensor(struct omap_bandgap *bg_ptr, int id,
data = omap_bandgap_get_sensor_data(bg_ptr, id);
- if (!data)
+ if (IS_ERR(data))
data = omap_thermal_build_data(bg_ptr, id);
if (!data)
@@ -309,7 +309,7 @@ int omap_thermal_register_cpu_cooling(struct omap_bandgap *bg_ptr, int id)
struct omap_thermal_data *data;
data = omap_bandgap_get_sensor_data(bg_ptr, id);
- if (!data)
+ if (IS_ERR(data))
data = omap_thermal_build_data(bg_ptr, id);
if (!data)
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
index b724a4131435..09f65dc3d2c8 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/staging/omapdrm/Kconfig
@@ -3,8 +3,8 @@ config DRM_OMAP
tristate "OMAP DRM"
depends on DRM && !CONFIG_FB_OMAP2
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+ depends on OMAP2_DSS
select DRM_KMS_HELPER
- select OMAP2_DSS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
index 1ca0e0016de4..d85e058f2845 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/staging/omapdrm/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm -Werror
omapdrm-y := omap_drv.o \
+ omap_irq.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
index 938c7888ca31..abeeb00aaa12 100644
--- a/drivers/staging/omapdrm/TODO
+++ b/drivers/staging/omapdrm/TODO
@@ -17,9 +17,6 @@ TODO
. Revisit GEM sync object infrastructure.. TTM has some framework for this
already. Possibly this could be refactored out and made more common?
There should be some way to do this with less wheel-reinvention.
-. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
- part connector. Which results in a bit of duct tape to fwd calls from
- encoder to connector. Possibly this could be done a bit better.
. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
access is made from any component in the system. Which means on suspend
CRTC's should be disabled, and on resume the LUT should be reprogrammed
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 91edb3f96972..8979c80adb5f 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -31,9 +31,10 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
+ struct drm_encoder *encoder;
};
-static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
mode->clock = timings->pixel_clock;
@@ -64,7 +65,7 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
mode->flags |= DRM_MODE_FLAG_NVSYNC;
}
-static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
struct drm_display_mode *mode)
{
timings->pixel_clock = mode->clock;
@@ -96,48 +97,7 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
}
-static void omap_connector_dpms(struct drm_connector *connector, int mode)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- int old_dpms;
-
- DBG("%s: %d", dssdev->name, mode);
-
- old_dpms = connector->dpms;
-
- /* from off to on, do from crtc to connector */
- if (mode < old_dpms)
- drm_helper_connector_dpms(connector, mode);
-
- if (mode == DRM_MODE_DPMS_ON) {
- /* store resume info for suspended displays */
- switch (dssdev->state) {
- case OMAP_DSS_DISPLAY_SUSPENDED:
- dssdev->activate_after_resume = true;
- break;
- case OMAP_DSS_DISPLAY_DISABLED: {
- int ret = dssdev->driver->enable(dssdev);
- if (ret) {
- DBG("%s: failed to enable: %d",
- dssdev->name, ret);
- dssdev->driver->disable(dssdev);
- }
- break;
- }
- default:
- break;
- }
- } else {
- /* TODO */
- }
-
- /* from on to off, do from connector to crtc */
- if (mode > old_dpms)
- drm_helper_connector_dpms(connector, mode);
-}
-
-enum drm_connector_status omap_connector_detect(
+static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -164,8 +124,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
- dssdev->driver->disable(dssdev);
-
DBG("%s", omap_connector->dssdev->name);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -261,36 +219,12 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector)
{
- int i;
struct omap_connector *omap_connector = to_omap_connector(connector);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_mode_object *obj;
-
- if (connector->encoder_ids[i] == 0)
- break;
-
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
-
- if (obj) {
- struct drm_encoder *encoder = obj_to_encoder(obj);
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(encoder);
- DBG("%s: found %s", omap_connector->dssdev->name,
- mgr->name);
- return encoder;
- }
- }
-
- DBG("%s: no encoder", omap_connector->dssdev->name);
-
- return NULL;
+ return omap_connector->encoder;
}
static const struct drm_connector_funcs omap_connector_funcs = {
- .dpms = omap_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = omap_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = omap_connector_destroy,
@@ -302,34 +236,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.best_encoder = omap_connector_attached_encoder,
};
-/* called from encoder when mode is set, to propagate settings to the dssdev */
-void omap_connector_mode_set(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = connector->dev;
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
-
- copy_timings_drm_to_omap(&timings, mode);
-
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- omap_connector->dssdev->name,
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-
- if (dssdrv->check_timings(dssdev, &timings)) {
- dev_err(dev->dev, "could not set timings\n");
- return;
- }
-
- dssdrv->set_timings(dssdev, &timings);
-}
-
/* flush an area of the framebuffer (in case of manual update display that
* is not automatically flushed)
*/
@@ -344,7 +250,8 @@ void omap_connector_flush(struct drm_connector *connector,
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev)
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
@@ -354,12 +261,12 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
omap_dss_get_device(dssdev);
omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
- if (!omap_connector) {
- dev_err(dev->dev, "could not allocate connector\n");
+ if (!omap_connector)
goto fail;
- }
omap_connector->dssdev = dssdev;
+ omap_connector->encoder = encoder;
+
connector = &omap_connector->base;
drm_connector_init(dev, connector, &omap_connector_funcs,
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index d87bd84257bd..32109c09357c 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -28,19 +28,131 @@
struct omap_crtc {
struct drm_crtc base;
struct drm_plane *plane;
+
const char *name;
- int id;
+ int pipe;
+ enum omap_channel channel;
+ struct omap_overlay_manager_info info;
+
+ /*
+ * Temporary: eventually this will go away, but it is needed
+ * for now to keep the output's happy. (They only need
+ * mgr->id.) Eventually this will be replaced w/ something
+ * more common-panel-framework-y
+ */
+ struct omap_overlay_manager mgr;
+
+ struct omap_video_timings timings;
+ bool enabled;
+ bool full_update;
+
+ struct omap_drm_apply apply;
+
+ struct omap_drm_irq apply_irq;
+ struct omap_drm_irq error_irq;
+
+ /* list of in-progress apply's: */
+ struct list_head pending_applies;
+
+ /* list of queued apply's: */
+ struct list_head queued_applies;
+
+ /* for handling queued and in-progress applies: */
+ struct work_struct apply_work;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct drm_framebuffer *old_fb;
+
+ /* for handling page flips without caring about what
+ * the callback is called from. Possibly we should just
+ * make omap_gem always call the cb from the worker so
+ * we don't have to care about this..
+ *
+ * XXX maybe fold into apply_work??
+ */
+ struct work_struct page_flip_work;
+};
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+ return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ omap_crtc->timings = *timings;
+ omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
};
+/*
+ * CRTC funcs:
+ */
+
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s", omap_crtc->name);
+
+ WARN_ON(omap_crtc->apply_irq.registered);
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
omap_crtc->plane->funcs->destroy(omap_crtc->plane);
drm_crtc_cleanup(crtc);
+
kfree(omap_crtc);
}
@@ -48,14 +160,25 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
int i;
- WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+ DBG("%s: %d", omap_crtc->name, mode);
+
+ if (enabled != omap_crtc->enabled) {
+ omap_crtc->enabled = enabled;
+ omap_crtc->full_update = true;
+ omap_crtc_apply(crtc, &omap_crtc->apply);
- for (i = 0; i < priv->num_planes; i++) {
- struct drm_plane *plane = priv->planes[i];
- if (plane->crtc == crtc)
- WARN_ON(omap_plane_dpms(plane, mode));
+ /* also enable our private plane: */
+ WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+ /* and any attached overlay planes: */
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+ if (plane->crtc == crtc)
+ WARN_ON(omap_plane_dpms(plane, mode));
+ }
}
}
@@ -73,12 +196,26 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_plane *plane = omap_crtc->plane;
- return omap_plane_mode_set(plane, crtc, crtc->fb,
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ omap_crtc->full_update = true;
+
+ return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -102,10 +239,11 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_plane *plane = omap_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
- return plane->funcs->update_plane(plane, crtc, crtc->fb,
+ return omap_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
}
static void omap_crtc_load_lut(struct drm_crtc *crtc)
@@ -114,63 +252,54 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
static void vblank_cb(void *arg)
{
- static uint32_t sequence;
struct drm_crtc *crtc = arg;
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_pending_vblank_event *event = omap_crtc->event;
unsigned long flags;
- struct timeval now;
- WARN_ON(!event);
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* wakeup userspace */
+ if (omap_crtc->event)
+ drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
omap_crtc->event = NULL;
+ omap_crtc->old_fb = NULL;
- /* wakeup userspace */
- if (event) {
- do_gettimeofday(&now);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- /* TODO: we can't yet use the vblank time accounting,
- * because omapdss lower layer is the one that knows
- * the irq # and registers the handler, which more or
- * less defeats how drm_irq works.. for now just fake
- * the sequence number and use gettimeofday..
- *
- event->event.sequence = drm_vblank_count_and_time(
- dev, omap_crtc->id, &now);
- */
- event->event.sequence = sequence++;
- event->event.tv_sec = now.tv_sec;
- event->event.tv_usec = now.tv_usec;
- list_add_tail(&event->base.link,
- &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void page_flip_cb(void *arg)
+static void page_flip_worker(struct work_struct *work)
{
- struct drm_crtc *crtc = arg;
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, page_flip_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- omap_crtc->old_fb = NULL;
-
- omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
-
- /* really we'd like to setup the callback atomically w/ setting the
- * new scanout buffer to avoid getting stuck waiting an extra vblank
- * cycle.. for now go for correctness and later figure out speed..
- */
- omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+ mutex_lock(&dev->mode_config.mutex);
+ omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x << 16, crtc->y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ vblank_cb, crtc);
+ mutex_unlock(&dev->mode_config.mutex);
bo = omap_framebuffer_bo(crtc->fb, 0);
drm_gem_object_unreference_unlocked(bo);
}
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ /* avoid assumptions about what ctxt we are called from: */
+ queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -179,14 +308,14 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_gem_object *bo;
- DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+ DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+ fb->base.id, event);
- if (omap_crtc->event) {
+ if (omap_crtc->old_fb) {
dev_err(dev->dev, "already a pending flip\n");
return -EINVAL;
}
- omap_crtc->old_fb = crtc->fb;
omap_crtc->event = event;
crtc->fb = fb;
@@ -234,26 +363,283 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.load_lut = omap_crtc_load_lut,
};
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+ /* avoid getting in a flood, unregister the irq until next vblank */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!omap_crtc->error_irq.registered)
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ dispc_mgr_go(channel);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait = NULL;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ /* ignore sync-lost irqs during enable/disable */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+ if (dispc_mgr_get_framedone_irq(channel)) {
+ if (!enable) {
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_framedone_irq(channel), 1);
+ }
+ } else {
+ /*
+ * When we disable digit output, we need to wait until fields
+ * are done. Otherwise the DSS is still working, and turning
+ * off the clocks prevents DSS from going to OFF mode. And when
+ * enabling, we need to wait for the extra sync losts
+ */
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_vsync_irq(channel), 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ if (wait) {
+ int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+ }
+
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_encoder *encoder = NULL;
+
+ DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+ omap_crtc->enabled, omap_crtc->full_update);
+
+ if (omap_crtc->full_update) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ int i;
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+ }
+
+ if (!omap_crtc->enabled) {
+ set_enabled(&omap_crtc->base, false);
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, &omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ omap_crtc->full_update = false;
+ }
+
+ dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+ dispc_mgr_set_timings(omap_crtc->channel,
+ &omap_crtc->timings);
+ set_enabled(&omap_crtc->base, true);
+ }
+
+ omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct omap_overlay *ovl, int id)
+ struct drm_plane *plane, enum omap_channel channel, int id)
{
struct drm_crtc *crtc = NULL;
- struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+ struct omap_crtc *omap_crtc;
+ struct omap_overlay_manager_info *info;
- DBG("%s", ovl->name);
+ DBG("%s", channel_names[channel]);
- if (!omap_crtc) {
- dev_err(dev->dev, "could not allocate CRTC\n");
+ omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+ if (!omap_crtc)
goto fail;
- }
crtc = &omap_crtc->base;
- omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
+ INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+ INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+ INIT_LIST_HEAD(&omap_crtc->pending_applies);
+ INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+ omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
+ omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+ omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+ omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+ omap_crtc->error_irq.irqmask =
+ dispc_mgr_get_sync_lost_irq(channel);
+ omap_crtc->error_irq.irq = omap_crtc_error_irq;
+ omap_irq_register(dev, &omap_crtc->error_irq);
+
+ omap_crtc->channel = channel;
+ omap_crtc->plane = plane;
omap_crtc->plane->crtc = crtc;
- omap_crtc->name = ovl->name;
- omap_crtc->id = id;
+ omap_crtc->name = channel_names[channel];
+ omap_crtc->pipe = id;
+
+ /* temporary: */
+ omap_crtc->mgr.id = channel;
+
+ dss_install_mgr_ops(&mgr_ops);
+
+ /* TODO: fix hard-coded setup.. add properties! */
+ info = &omap_crtc->info;
+ info->default_color = 0x00000000;
+ info->trans_key = 0x00000000;
+ info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info->trans_enabled = false;
drm_crtc_init(dev, crtc, &omap_crtc_funcs);
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
index 273ec12c028a..58bcd6ae0255 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -118,6 +118,11 @@ struct pat {
#define DESCR_SIZE 128
#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+/* For OMAP5, a fixed offset is added to all Y coordinates for 1D buffers.
+ * This is used in programming to address the upper portion of the LUT
+*/
+#define OMAP5_LUT_OFFSET 128
+
struct dmm;
struct dmm_txn {
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 59bf43899fc0..9b794c933c81 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -213,6 +213,11 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
txn->last_pat->next_pa = (uint32_t)pat_pa;
pat->area = *area;
+
+ /* adjust Y coordinates based off of container parameters */
+ pat->area.y0 += engine->tcm->y_offset;
+ pat->area.y1 += engine->tcm->y_offset;
+
pat->ctrl = (struct pat_ctrl){
.start = 1,
.lut_id = engine->tcm->lut_id,
@@ -576,10 +581,8 @@ static int omap_dmm_probe(struct platform_device *dev)
struct resource *mem;
omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
- if (!omap_dmm) {
- dev_err(&dev->dev, "failed to allocate driver data section\n");
+ if (!omap_dmm)
goto fail;
- }
/* initialize lists */
INIT_LIST_HEAD(&omap_dmm->alloc_head);
@@ -622,6 +625,11 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+ /* increment LUT by one if on OMAP5 */
+ /* LUT has twice the height, and is split into a separate container */
+ if (omap_dmm->lut_height != omap_dmm->container_height)
+ omap_dmm->num_lut++;
+
/* initialize DMM registers */
writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
@@ -671,11 +679,9 @@ static int omap_dmm_probe(struct platform_device *dev)
}
/* alloc engines */
- omap_dmm->engines = kzalloc(
- omap_dmm->num_engines * sizeof(struct refill_engine),
- GFP_KERNEL);
+ omap_dmm->engines = kcalloc(omap_dmm->num_engines,
+ sizeof(struct refill_engine), GFP_KERNEL);
if (!omap_dmm->engines) {
- dev_err(&dev->dev, "could not allocate engines\n");
ret = -ENOMEM;
goto fail;
}
@@ -692,15 +698,17 @@ static int omap_dmm_probe(struct platform_device *dev)
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
- omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+ omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
GFP_KERNEL);
if (!omap_dmm->tcm) {
- dev_err(&dev->dev, "failed to allocate lut ptrs\n");
ret = -ENOMEM;
goto fail;
}
/* init containers */
+ /* Each LUT is associated with a TCM (container manager). We use the
+ lut_id to denote the lut_id used to identify the correct LUT for
+ programming during reill operations */
for (i = 0; i < omap_dmm->num_lut; i++) {
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
omap_dmm->container_height,
@@ -717,13 +725,23 @@ static int omap_dmm_probe(struct platform_device *dev)
/* assign access mode containers to applicable tcm container */
/* OMAP 4 has 1 container for all 4 views */
+ /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
containers[TILFMT_8BIT] = omap_dmm->tcm[0];
containers[TILFMT_16BIT] = omap_dmm->tcm[0];
containers[TILFMT_32BIT] = omap_dmm->tcm[0];
- containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+
+ if (omap_dmm->container_height != omap_dmm->lut_height) {
+ /* second LUT is used for PAGE mode. Programming must use
+ y offset that is added to all y coordinates. LUT id is still
+ 0, because it is the same LUT, just the upper 128 lines */
+ containers[TILFMT_PAGE] = omap_dmm->tcm[1];
+ omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
+ omap_dmm->tcm[1]->lut_id = 0;
+ } else {
+ containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+ }
area = (struct tcm_area) {
- .is2d = true,
.tcm = NULL,
.p1.x = omap_dmm->container_width - 1,
.p1.y = omap_dmm->container_height - 1,
@@ -835,64 +853,81 @@ int tiler_map_show(struct seq_file *s, void *arg)
int h_adj;
int w_adj;
unsigned long flags;
+ int lut_idx;
+
if (!omap_dmm) {
/* early return if dmm/tiler device is not initialized */
return 0;
}
- h_adj = omap_dmm->lut_height / ydiv;
- w_adj = omap_dmm->lut_width / xdiv;
+ h_adj = omap_dmm->container_height / ydiv;
+ w_adj = omap_dmm->container_width / xdiv;
- map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
- global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+ map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
+ global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
if (!map || !global_map)
goto error;
- memset(global_map, ' ', (w_adj + 1) * h_adj);
- for (i = 0; i < omap_dmm->lut_height; i++) {
- map[i] = global_map + i * (w_adj + 1);
- map[i][w_adj] = 0;
- }
- spin_lock_irqsave(&list_lock, flags);
+ for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
+ memset(map, 0, sizeof(h_adj * sizeof(*map)));
+ memset(global_map, ' ', (w_adj + 1) * h_adj);
- list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
- if (block->fmt != TILFMT_PAGE) {
- fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
- if (!*++a2dp)
- a2dp = a2d;
- if (!*++m2dp)
- m2dp = m2d;
- map_2d_info(map, xdiv, ydiv, nice, &block->area);
- } else {
- bool start = read_map_pt(map, xdiv, ydiv,
- &block->area.p0)
- == ' ';
- bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
- == ' ';
- tcm_for_each_slice(a, block->area, p)
- fill_map(map, xdiv, ydiv, &a, '=', true);
- fill_map_pt(map, xdiv, ydiv, &block->area.p0,
+ for (i = 0; i < omap_dmm->container_height; i++) {
+ map[i] = global_map + i * (w_adj + 1);
+ map[i][w_adj] = 0;
+ }
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+ if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
+ if (block->fmt != TILFMT_PAGE) {
+ fill_map(map, xdiv, ydiv, &block->area,
+ *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice,
+ &block->area);
+ } else {
+ bool start = read_map_pt(map, xdiv,
+ ydiv, &block->area.p0) == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv,
+ &block->area.p1) == ' ';
+
+ tcm_for_each_slice(a, block->area, p)
+ fill_map(map, xdiv, ydiv, &a,
+ '=', true);
+ fill_map_pt(map, xdiv, ydiv,
+ &block->area.p0,
start ? '<' : 'X');
- fill_map_pt(map, xdiv, ydiv, &block->area.p1,
+ fill_map_pt(map, xdiv, ydiv,
+ &block->area.p1,
end ? '>' : 'X');
- map_1d_info(map, xdiv, ydiv, nice, &block->area);
+ map_1d_info(map, xdiv, ydiv, nice,
+ &block->area);
+ }
+ }
}
- }
- spin_unlock_irqrestore(&list_lock, flags);
+ spin_unlock_irqrestore(&list_lock, flags);
- if (s) {
- seq_printf(s, "BEGIN DMM TILER MAP\n");
- for (i = 0; i < 128; i++)
- seq_printf(s, "%03d:%s\n", i, map[i]);
- seq_printf(s, "END TILER MAP\n");
- } else {
- dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
- for (i = 0; i < 128; i++)
- dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
- dev_dbg(omap_dmm->dev, "END TILER MAP\n");
+ if (s) {
+ seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
+ for (i = 0; i < 128; i++)
+ seq_printf(s, "%03d:%s\n", i, map[i]);
+ seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
+ } else {
+ dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
+ lut_idx);
+ for (i = 0; i < 128; i++)
+ dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+ dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
+ lut_idx);
+ }
}
error:
@@ -903,12 +938,45 @@ error:
}
#endif
+#ifdef CONFIG_PM
+static int omap_dmm_resume(struct device *dev)
+{
+ struct tcm_area area;
+ int i;
+
+ if (!omap_dmm)
+ return -ENODEV;
+
+ area = (struct tcm_area) {
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(dev, "refill failed");
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_dmm_pm_ops = {
+ .resume = omap_dmm_resume,
+};
+#endif
+
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove = omap_dmm_remove,
.driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &omap_dmm_pm_ops,
+#endif
},
};
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index d4823fd67768..480dc343446c 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -74,320 +74,99 @@ static int get_connector_type(struct omap_dss_device *dssdev)
}
}
-#if 0 /* enable when dss2 supports hotplug */
-static int omap_drm_notifier(struct notifier_block *nb,
- unsigned long evt, void *arg)
-{
- switch (evt) {
- case OMAP_DSS_SIZE_CHANGE:
- case OMAP_DSS_HOTPLUG_CONNECT:
- case OMAP_DSS_HOTPLUG_DISCONNECT: {
- struct drm_device *dev = drm_device;
- DBG("hotplug event: evt=%d, dev=%p", evt, dev);
- if (dev)
- drm_sysfs_hotplug_event(dev);
-
- return NOTIFY_OK;
- }
- default: /* don't care about other events for now */
- return NOTIFY_DONE;
- }
-}
-#endif
-
-static void dump_video_chains(void)
-{
- int i;
-
- DBG("dumping video chains: ");
- for (i = 0; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
- struct omap_overlay_manager *mgr = ovl->manager;
- struct omap_dss_device *dssdev = mgr ?
- mgr->get_device(mgr) : NULL;
- if (dssdev) {
- DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
- dssdev->name);
- } else if (mgr) {
- DBG("%d: %s -> %s", i, ovl->name, mgr->name);
- } else {
- DBG("%d: %s", i, ovl->name);
- }
- }
-}
-
-/* create encoders for each manager */
-static int create_encoder(struct drm_device *dev,
- struct omap_overlay_manager *mgr)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
-
- if (!encoder) {
- dev_err(dev->dev, "could not create encoder: %s\n",
- mgr->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
-
- priv->encoders[priv->num_encoders++] = encoder;
-
- return 0;
-}
-
-/* create connectors for each display device */
-static int create_connector(struct drm_device *dev,
- struct omap_dss_device *dssdev)
+static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- static struct notifier_block *notifier;
- struct drm_connector *connector;
- int j;
-
- if (!dssdev->driver) {
- dev_warn(dev->dev, "%s has no driver.. skipping it\n",
- dssdev->name);
- return 0;
- }
+ struct omap_dss_device *dssdev = NULL;
+ int num_ovls = dss_feat_get_num_ovls();
+ int id;
- if (!(dssdev->driver->get_timings ||
- dssdev->driver->read_edid)) {
- dev_warn(dev->dev, "%s driver does not support "
- "get_timings or read_edid.. skipping it!\n",
- dssdev->name);
- return 0;
- }
+ drm_mode_config_init(dev);
- connector = omap_connector_init(dev,
- get_connector_type(dssdev), dssdev);
+ omap_drm_irq_install(dev);
- if (!connector) {
- dev_err(dev->dev, "could not create connector: %s\n",
- dssdev->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+ /*
+ * Create private planes and CRTCs for the last NUM_CRTCs overlay
+ * plus manager:
+ */
+ for (id = 0; id < min(num_crtc, num_ovls); id++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
- priv->connectors[priv->num_connectors++] = connector;
+ plane = omap_plane_init(dev, id, true);
+ crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
-#if 0 /* enable when dss2 supports hotplug */
- notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- notifier->notifier_call = omap_drm_notifier;
- omap_dss_add_notify(dssdev, notifier);
-#else
- notifier = NULL;
-#endif
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
- for (j = 0; j < priv->num_encoders; j++) {
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(priv->encoders[j]);
- if (mgr->get_device(mgr) == dssdev) {
- drm_mode_connector_attach_encoder(connector,
- priv->encoders[j]);
- }
+ priv->planes[id] = plane;
+ priv->num_planes++;
}
- return 0;
-}
-
-/* create up to max_overlays CRTCs mapping to overlays.. by default,
- * connect the overlays to different managers/encoders, giving priority
- * to encoders connected to connectors with a detected connection
- */
-static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
- int *j, unsigned int connected_connectors)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_overlay_manager *mgr = NULL;
- struct drm_crtc *crtc;
-
- /* find next best connector, ones with detected connection first
+ /*
+ * Create normal planes for the remaining overlays:
*/
- while (*j < priv->num_connectors && !mgr) {
- if (connected_connectors & (1 << *j)) {
- struct drm_encoder *encoder =
- omap_connector_attached_encoder(
- priv->connectors[*j]);
- if (encoder)
- mgr = omap_encoder_get_manager(encoder);
+ for (; id < num_ovls; id++) {
+ struct drm_plane *plane = omap_plane_init(dev, id, false);
- }
- (*j)++;
+ BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+ priv->planes[priv->num_planes++] = plane;
}
- /* if we couldn't find another connected connector, lets start
- * looking at the unconnected connectors:
- *
- * note: it might not be immediately apparent, but thanks to
- * the !mgr check in both this loop and the one above, the only
- * way to enter this loop is with *j == priv->num_connectors,
- * so idx can never go negative.
- */
- while (*j < 2 * priv->num_connectors && !mgr) {
- int idx = *j - priv->num_connectors;
- if (!(connected_connectors & (1 << idx))) {
- struct drm_encoder *encoder =
- omap_connector_attached_encoder(
- priv->connectors[idx]);
- if (encoder)
- mgr = omap_encoder_get_manager(encoder);
+ for_each_dss_dev(dssdev) {
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
}
- (*j)++;
- }
-
- crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
-
- if (!crtc) {
- dev_err(dev->dev, "could not create CRTC: %s\n",
- ovl->name);
- return -ENOMEM;
- }
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-
- priv->crtcs[priv->num_crtcs++] = crtc;
-
- return 0;
-}
-
-static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
- unsigned int possible_crtcs)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct drm_plane *plane =
- omap_plane_init(dev, ovl, possible_crtcs, false);
-
- if (!plane) {
- dev_err(dev->dev, "could not create plane: %s\n",
- ovl->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-
- priv->planes[priv->num_planes++] = plane;
-
- return 0;
-}
-
-static int match_dev_name(struct omap_dss_device *dssdev, void *data)
-{
- return !strcmp(dssdev->name, data);
-}
-
-static unsigned int detect_connectors(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- unsigned int connected_connectors = 0;
- int i;
-
- for (i = 0; i < priv->num_connectors; i++) {
- struct drm_connector *connector = priv->connectors[i];
- if (omap_connector_detect(connector, true) ==
- connector_status_connected) {
- connected_connectors |= (1 << i);
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
}
- }
-
- return connected_connectors;
-}
-static int omap_modeset_init(struct drm_device *dev)
-{
- const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
- struct omap_kms_platform_data *kms_pdata = NULL;
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev = NULL;
- int i, j;
- unsigned int connected_connectors = 0;
+ encoder = omap_encoder_init(dev, dssdev);
- drm_mode_config_init(dev);
-
- if (pdata && pdata->kms_pdata) {
- kms_pdata = pdata->kms_pdata;
-
- /* if platform data is provided by the board file, use it to
- * control which overlays, managers, and devices we own.
- */
- for (i = 0; i < kms_pdata->mgr_cnt; i++) {
- struct omap_overlay_manager *mgr =
- omap_dss_get_overlay_manager(
- kms_pdata->mgr_ids[i]);
- create_encoder(dev, mgr);
- }
-
- for (i = 0; i < kms_pdata->dev_cnt; i++) {
- struct omap_dss_device *dssdev =
- omap_dss_find_device(
- (void *)kms_pdata->dev_names[i],
- match_dev_name);
- if (!dssdev) {
- dev_warn(dev->dev, "no such dssdev: %s\n",
- kms_pdata->dev_names[i]);
- continue;
- }
- create_connector(dev, dssdev);
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ dssdev->name);
+ return -ENOMEM;
}
- connected_connectors = detect_connectors(dev);
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev, encoder);
- j = 0;
- for (i = 0; i < kms_pdata->ovl_cnt; i++) {
- struct omap_overlay *ovl =
- omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
- create_crtc(dev, ovl, &j, connected_connectors);
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
}
- for (i = 0; i < kms_pdata->pln_cnt; i++) {
- struct omap_overlay *ovl =
- omap_dss_get_overlay(kms_pdata->pln_ids[i]);
- create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
- }
- } else {
- /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
- * to make educated guesses about everything else
- */
- int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
- for (i = 0; i < omap_dss_get_num_overlay_managers(); i++)
- create_encoder(dev, omap_dss_get_overlay_manager(i));
-
- for_each_dss_dev(dssdev) {
- create_connector(dev, dssdev);
- }
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
- connected_connectors = detect_connectors(dev);
+ drm_mode_connector_attach_encoder(connector, encoder);
- j = 0;
- for (i = 0; i < max_overlays; i++) {
- create_crtc(dev, omap_dss_get_overlay(i),
- &j, connected_connectors);
- }
-
- /* use any remaining overlays as drm planes */
- for (; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
- create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+ /* figure out which crtc's we can connect the encoder to: */
+ encoder->possible_crtcs = 0;
+ for (id = 0; id < priv->num_crtcs; id++) {
+ enum omap_dss_output_id supported_outputs =
+ dss_feat_get_supported_outputs(pipe2chan(id));
+ if (supported_outputs & dssdev->output->id)
+ encoder->possible_crtcs |= (1 << id);
}
}
- /* for now keep the mapping of CRTCs and encoders static.. */
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(encoder);
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
-
- DBG("%s: possible_crtcs=%08x", mgr->name,
- encoder->possible_crtcs);
- }
-
- dump_video_chains();
-
dev->mode_config.min_width = 32;
dev->mode_config.min_height = 32;
@@ -450,7 +229,7 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_new *args = data;
- DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, args->flags);
return omap_gem_new_handle(dev, file_priv, args->size,
args->flags, &args->handle);
@@ -510,7 +289,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret = 0;
- DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj)
@@ -556,10 +335,8 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
DBG("load: dev=%p", dev);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev->dev, "could not allocate priv\n");
+ if (!priv)
return -ENOMEM;
- }
priv->omaprev = pdata->omaprev;
@@ -579,17 +356,20 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
return ret;
}
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret)
+ dev_warn(dev->dev, "could not init vblank\n");
+
priv->fbdev = omap_fbdev_init(dev);
if (!priv->fbdev) {
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
}
- drm_kms_helper_poll_init(dev);
+ /* store off drm_device for use in pm ops */
+ dev_set_drvdata(dev->dev, dev);
- ret = drm_vblank_init(dev, priv->num_crtcs);
- if (ret)
- dev_warn(dev->dev, "could not init vblank\n");
+ drm_kms_helper_poll_init(dev);
return 0;
}
@@ -600,8 +380,9 @@ static int dev_unload(struct drm_device *dev)
DBG("unload: dev=%p", dev);
- drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ omap_drm_irq_uninstall(dev);
omap_fbdev_free(dev);
omap_modeset_free(dev);
@@ -613,6 +394,8 @@ static int dev_unload(struct drm_device *dev)
kfree(dev->dev_private);
dev->dev_private = NULL;
+ dev_set_drvdata(dev->dev, NULL);
+
return 0;
}
@@ -669,7 +452,9 @@ static void dev_lastclose(struct drm_device *dev)
}
}
+ mutex_lock(&dev->mode_config.mutex);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ mutex_unlock(&dev->mode_config.mutex);
if (ret)
DBG("failed to restore crtc mode");
}
@@ -684,60 +469,6 @@ static void dev_postclose(struct drm_device *dev, struct drm_file *file)
DBG("postclose: dev=%p, file=%p", dev, file);
}
-/**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
-static int dev_enable_vblank(struct drm_device *dev, int crtc)
-{
- DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
- return 0;
-}
-
-/**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- */
-static void dev_disable_vblank(struct drm_device *dev, int crtc)
-{
- DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
-}
-
-static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
-{
- return IRQ_HANDLED;
-}
-
-static void dev_irq_preinstall(struct drm_device *dev)
-{
- DBG("irq_preinstall: dev=%p", dev);
-}
-
-static int dev_irq_postinstall(struct drm_device *dev)
-{
- DBG("irq_postinstall: dev=%p", dev);
- return 0;
-}
-
-static void dev_irq_uninstall(struct drm_device *dev)
-{
- DBG("irq_uninstall: dev=%p", dev);
-}
-
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
@@ -767,12 +498,12 @@ static struct drm_driver omap_drm_driver = {
.preclose = dev_preclose,
.postclose = dev_postclose,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = dev_enable_vblank,
- .disable_vblank = dev_disable_vblank,
- .irq_preinstall = dev_irq_preinstall,
- .irq_postinstall = dev_irq_postinstall,
- .irq_uninstall = dev_irq_uninstall,
- .irq_handler = dev_irq_handler,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
.debugfs_cleanup = omap_debugfs_cleanup,
@@ -830,10 +561,19 @@ static int pdev_remove(struct platform_device *device)
return 0;
}
+#ifdef CONFIG_PM
+static const struct dev_pm_ops omapdrm_pm_ops = {
+ .resume = omap_gem_resume,
+};
+#endif
+
struct platform_driver pdev = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &omapdrm_pm_ops,
+#endif
},
.probe = pdev_probe,
.remove = pdev_remove,
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 1d4aea53b75d..f921027e7500 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -28,6 +28,7 @@
#include <linux/platform_data/omap_drm.h>
#include "omap_drm.h"
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -39,6 +40,51 @@
*/
#define MAX_MAPPERS 2
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+ uint32_t rotation;
+ int32_t crtc_x, crtc_y; /* signed because can be offscreen */
+ uint32_t crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared. So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared. The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+ struct list_head pending_node, queued_node;
+ bool queued;
+ void (*pre_apply)(struct omap_drm_apply *apply);
+ void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration. We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout);
+
struct omap_drm_private {
uint32_t omaprev;
@@ -58,6 +104,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
+ /* list of GEM objects: */
struct list_head obj_list;
bool has_dmm;
@@ -65,6 +112,11 @@ struct omap_drm_private {
/* properties: */
struct drm_property *rotation_prop;
struct drm_property *zorder_prop;
+
+ /* irq handling: */
+ struct list_head irq_list; /* list of omap_drm_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ struct omap_drm_irq error_handler;
};
/* this should probably be in drm-core to standardize amongst drivers */
@@ -75,15 +127,6 @@ struct omap_drm_private {
#define DRM_REFLECT_X 4
#define DRM_REFLECT_Y 5
-/* parameters which describe (unrotated) coordinates of scanout within a fb: */
-struct omap_drm_window {
- uint32_t rotation;
- int32_t crtc_x, crtc_y; /* signed because can be offscreen */
- uint32_t crtc_w, crtc_h;
- uint32_t src_x, src_y;
- uint32_t src_w, src_h;
-};
-
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
void omap_debugfs_cleanup(struct drm_minor *minor);
@@ -92,23 +135,40 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
#endif
+#ifdef CONFIG_PM
+int omap_gem_resume(struct device *dev);
+#endif
+
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
void omap_fbdev_free(struct drm_device *dev);
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct omap_overlay *ovl, int id);
+ struct drm_plane *plane, enum omap_channel channel, int id);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- struct omap_overlay *ovl, unsigned int possible_crtcs,
- bool priv);
+ int plane_id, bool private_plane);
int omap_plane_dpms(struct drm_plane *plane, int mode);
int omap_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-void omap_plane_on_endwin(struct drm_plane *plane,
+ uint32_t src_w, uint32_t src_h,
void (*fxn)(void *), void *arg);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
@@ -116,21 +176,25 @@ int omap_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_overlay_manager *mgr);
-struct omap_overlay_manager *omap_encoder_get_manager(
+ struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
-enum drm_connector_status omap_connector_detect(
- struct drm_connector *connector, bool force);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev);
-void omap_connector_mode_set(struct drm_connector *connector,
- struct drm_display_mode *mode);
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode);
+
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
@@ -207,6 +271,40 @@ static inline int align_pitch(int pitch, int width, int bpp)
return ALIGN(pitch, 8 * bytespp);
}
+static inline enum omap_channel pipe2chan(int pipe)
+{
+ int num_mgrs = dss_feat_get_num_mgrs();
+
+ /*
+ * We usually don't want to create a CRTC for each manager,
+ * at least not until we have a way to expose private planes
+ * to userspace. Otherwise there would not be enough video
+ * pipes left for drm planes. The higher #'d managers tend
+ * to have more features so start in reverse order.
+ */
+ return num_mgrs - pipe - 1;
+}
+
+/* map crtc to vblank mask */
+static inline uint32_t pipe2vbl(int crtc)
+{
+ enum omap_channel channel = pipe2chan(crtc);
+ return dispc_mgr_get_vsync_irq(channel);
+}
+
+static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
+ if (priv->crtcs[i] == crtc)
+ return i;
+
+ BUG(); /* bogus CRTC ptr */
+ return -1;
+}
+
/* should these be made into common util helpers?
*/
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
index 5341d5e3e317..25fc0c7b4f6d 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -22,37 +22,56 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include <linux/list.h>
+
+
/*
* encoder funcs
*/
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
struct omap_encoder {
struct drm_encoder base;
- struct omap_overlay_manager *mgr;
+ struct omap_dss_device *dssdev;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s", omap_encoder->mgr->name);
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s: %d", omap_encoder->mgr->name, mode);
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s", omap_encoder->mgr->name);
return true;
}
@@ -60,47 +79,16 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct omap_drm_private *priv = dev->dev_private;
- int i;
-
- mode = adjusted_mode;
-
- DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
- mode->hdisplay, mode->vdisplay);
-
- for (i = 0; i < priv->num_connectors; i++) {
- struct drm_connector *connector = priv->connectors[i];
- if (connector->encoder == encoder)
- omap_connector_mode_set(connector, mode);
-
- }
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- DBG("%s", omap_encoder->mgr->name);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void omap_encoder_commit(struct drm_encoder *encoder)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- DBG("%s", omap_encoder->mgr->name);
- omap_encoder->mgr->apply(omap_encoder->mgr);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
-static const struct drm_encoder_funcs omap_encoder_funcs = {
- .destroy = omap_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.dpms = omap_encoder_dpms,
.mode_fixup = omap_encoder_mode_fixup,
@@ -109,57 +97,67 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.commit = omap_encoder_commit,
};
-struct omap_overlay_manager *omap_encoder_get_manager(
- struct drm_encoder *encoder)
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- return omap_encoder->mgr;
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ if (enabled) {
+ return dssdrv->enable(dssdev);
+ } else {
+ dssdrv->disable(dssdev);
+ return 0;
+ }
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ int ret;
+
+ dssdev->output->manager = mgr;
+
+ ret = dssdrv->check_timings(dssdev, timings);
+ if (ret) {
+ dev_err(dev->dev, "could not set timings: %d\n", ret);
+ return ret;
+ }
+
+ dssdrv->set_timings(dssdev, timings);
+
+ return 0;
}
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_overlay_manager *mgr)
+ struct omap_dss_device *dssdev)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
- struct omap_overlay_manager_info info;
- int ret;
-
- DBG("%s", mgr->name);
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
- if (!omap_encoder) {
- dev_err(dev->dev, "could not allocate encoder\n");
+ if (!omap_encoder)
goto fail;
- }
- omap_encoder->mgr = mgr;
+ omap_encoder->dssdev = dssdev;
+
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
- mgr->get_manager_info(mgr, &info);
-
- /* TODO: fix hard-coded setup.. */
- info.default_color = 0x00000000;
- info.trans_key = 0x00000000;
- info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
- info.trans_enabled = false;
-
- ret = mgr->set_manager_info(mgr, &info);
- if (ret) {
- dev_err(dev->dev, "could not set manager info\n");
- goto fail;
- }
-
- ret = mgr->apply(mgr);
- if (ret) {
- dev_err(dev->dev, "could not apply\n");
- goto fail;
- }
-
return encoder;
fail:
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index 09028e9c1093..bb4969942148 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -418,7 +418,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
if (!omap_fb) {
- dev_err(dev->dev, "could not allocate fb\n");
ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 8a027bb77d97..70f2d6ed2ed3 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -348,10 +348,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
int ret = 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev) {
- dev_err(dev->dev, "could not allocate fbdev\n");
+ if (!fbdev)
goto fail;
- }
INIT_WORK(&fbdev->work, pan_worker);
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index c38992b76fc9..518d03d4d4f3 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -964,6 +964,34 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
return omap_obj->vaddr;
}
+#ifdef CONFIG_PM
+/* re-pin objects in DMM in resume path: */
+int omap_gem_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = drm_dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ int ret = 0;
+
+ list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
+ if (omap_obj->block) {
+ struct drm_gem_object *obj = &omap_obj->base;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ WARN_ON(!omap_obj->pages); /* this can't happen */
+ ret = tiler_pin(omap_obj->block,
+ omap_obj->pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ dev_err(dev, "could not repin: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
@@ -1239,12 +1267,12 @@ int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
/* clearing a previously set syncobj */
- syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
+ GFP_ATOMIC);
if (!syncobj) {
ret = -ENOMEM;
goto unlock;
}
- memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
omap_obj->sync = syncobj;
} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
@@ -1374,10 +1402,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
}
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
- if (!omap_obj) {
- dev_err(dev->dev, "could not allocate GEM object\n");
+ if (!omap_obj)
goto fail;
- }
list_add(&omap_obj->mm_list, &priv->obj_list);
@@ -1433,11 +1459,9 @@ void omap_gem_init(struct drm_device *dev)
return;
}
- usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
- if (!usergart) {
- dev_warn(dev->dev, "could not allocate usergart\n");
+ usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
+ if (!usergart)
return;
- }
/* reserve 4k aligned/wide regions for userspace mappings: */
for (i = 0; i < ARRAY_SIZE(fmts); i++) {
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
index 9a302062b031..a3236abfca3d 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
@@ -53,10 +53,10 @@ static struct sg_table *omap_gem_map_dma_buf(
/* this should be after _get_paddr() to ensure we have pages attached */
omap_gem_dma_sync(obj, dir);
-out:
- if (ret)
- return ERR_PTR(ret);
return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
}
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
@@ -194,7 +194,7 @@ struct dma_buf_ops omap_dmabuf_ops = {
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
+ return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
}
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
@@ -207,7 +207,12 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
obj = buffer->priv;
/* is it from our device? */
if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
drm_gem_object_reference(obj);
+ dma_buf_put(buffer);
return obj;
}
}
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/staging/omapdrm/omap_irq.c
new file mode 100644
index 000000000000..2629ba7be6c8
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_irq.c
@@ -0,0 +1,322 @@
+/*
+ * drivers/staging/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+ uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *irq;
+ uint32_t irqmask = priv->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &priv->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ DBG("irqmask=%08x", irqmask);
+
+ dispc_write_irqenable(irqmask);
+ dispc_read_irqenable(); /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(irq->registered)) {
+ irq->registered = true;
+ list_add(&irq->node, &priv->irq_list);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(!irq->registered)) {
+ irq->registered = false;
+ list_del(&irq->node);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+ struct omap_drm_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_irq_wait *wait =
+ container_of(irq, struct omap_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count)
+{
+ struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ wait->irq.irq = wait_irq;
+ wait->irq.irqmask = irqmask;
+ wait->count = count;
+ omap_irq_register(dev, &wait->irq);
+ return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout)
+{
+ int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+ omap_irq_unregister(dev, &wait->irq);
+ kfree(wait);
+ if (ret == 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask |= pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask &= ~pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *handler, *n;
+ unsigned long flags;
+ unsigned int id;
+ u32 irqstatus;
+
+ irqstatus = dispc_read_irqstatus();
+ dispc_clear_irqstatus(irqstatus);
+ dispc_read_irqstatus(); /* flush posted write */
+
+ VERB("irqs: %08x", irqstatus);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (irqstatus & pipe2vbl(id))
+ drm_handle_vblank(dev, id);
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+ if (handler->irqmask & irqstatus) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & irqstatus);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ dispc_runtime_get();
+ dispc_clear_irqstatus(0xffffffff);
+ dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *error_handler = &priv->error_handler;
+
+ DBG("dev=%p", dev);
+
+ INIT_LIST_HEAD(&priv->irq_list);
+
+ error_handler->irq = omap_irq_error_handler;
+ error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+ /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+ * we just need to ignore it while enabling tv-out
+ */
+ error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+ omap_irq_register(dev, error_handler);
+
+ return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ // TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss. Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev->irq_enabled) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+
+ ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* After installing handler */
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ dispc_free_irq(dev);
+ }
+
+ return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+ unsigned long irqflags;
+ int irq_enabled, i;
+
+ mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * Wake up any waiters so they don't hang.
+ */
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+
+ if (!irq_enabled)
+ return -EINVAL;
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ dispc_free_irq(dev);
+
+ return 0;
+}
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
index 2a8e5bab49c9..c063476db3bb 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -41,12 +41,14 @@ struct callback {
struct omap_plane {
struct drm_plane base;
- struct omap_overlay *ovl;
+ int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+ const char *name;
struct omap_overlay_info info;
+ struct omap_drm_apply apply;
/* position/orientation of scanout within the fb: */
struct omap_drm_window win;
-
+ bool enabled;
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
@@ -54,189 +56,15 @@ struct omap_plane {
uint32_t nformats;
uint32_t formats[32];
- /* for synchronizing access to unpins fifo */
- struct mutex unpin_mutex;
+ struct omap_drm_irq error_irq;
- /* set of bo's pending unpin until next END_WIN irq */
+ /* set of bo's pending unpin until next post_apply() */
DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
- int num_unpins, pending_num_unpins;
-
- /* for deferred unpin when we need to wait for scanout complete irq */
- struct work_struct work;
-
- /* callback on next endwin irq */
- struct callback endwin;
-};
-/* map from ovl->id to the irq we are interested in for scanout-done */
-static const uint32_t id2irq[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+ // XXX maybe get rid of this and handle vblank in crtc too?
+ struct callback apply_done_cb;
};
-static void dispc_isr(void *arg, uint32_t mask)
-{
- struct drm_plane *plane = arg;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_drm_private *priv = plane->dev->dev_private;
-
- omap_dispc_unregister_isr(dispc_isr, plane,
- id2irq[omap_plane->ovl->id]);
-
- queue_work(priv->wq, &omap_plane->work);
-}
-
-static void unpin_worker(struct work_struct *work)
-{
- struct omap_plane *omap_plane =
- container_of(work, struct omap_plane, work);
- struct callback endwin;
-
- mutex_lock(&omap_plane->unpin_mutex);
- DBG("unpinning %d of %d", omap_plane->num_unpins,
- omap_plane->num_unpins + omap_plane->pending_num_unpins);
- while (omap_plane->num_unpins > 0) {
- struct drm_gem_object *bo = NULL;
- int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
- WARN_ON(!ret);
- omap_gem_put_paddr(bo);
- drm_gem_object_unreference_unlocked(bo);
- omap_plane->num_unpins--;
- }
- endwin = omap_plane->endwin;
- omap_plane->endwin.fxn = NULL;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- if (endwin.fxn)
- endwin.fxn(endwin.arg);
-}
-
-static void install_irq(struct drm_plane *plane)
-{
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- int ret;
-
- ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
-
- /*
- * omapdss has upper limit on # of registered irq handlers,
- * which we shouldn't hit.. but if we do the limit should
- * be raised or bad things happen:
- */
- WARN_ON(ret == -EBUSY);
-}
-
-/* push changes down to dss2 */
-static int commit(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- struct omap_overlay_info *info = &omap_plane->info;
- int ret;
-
- DBG("%s", ovl->name);
- DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
- info->out_height, info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
-
- /* NOTE: do we want to do this at all here, or just wait
- * for dpms(ON) since other CRTC's may not have their mode
- * set yet, so fb dimensions may still change..
- */
- ret = ovl->set_overlay_info(ovl, info);
- if (ret) {
- dev_err(dev->dev, "could not set overlay info\n");
- return ret;
- }
-
- mutex_lock(&omap_plane->unpin_mutex);
- omap_plane->num_unpins += omap_plane->pending_num_unpins;
- omap_plane->pending_num_unpins = 0;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- /* our encoder doesn't necessarily get a commit() after this, in
- * particular in the dpms() and mode_set_base() cases, so force the
- * manager to update:
- *
- * could this be in the encoder somehow?
- */
- if (ovl->manager) {
- ret = ovl->manager->apply(ovl->manager);
- if (ret) {
- dev_err(dev->dev, "could not apply settings\n");
- return ret;
- }
-
- /*
- * NOTE: really this should be atomic w/ mgr->apply() but
- * omapdss does not expose such an API
- */
- if (omap_plane->num_unpins > 0)
- install_irq(plane);
-
- } else {
- struct omap_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &omap_plane->work);
- }
-
-
- if (ovl->is_enabled(ovl)) {
- omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
- info->out_width, info->out_height);
- }
-
- return 0;
-}
-
-/* when CRTC that we are attached to has potentially changed, this checks
- * if we are attached to proper manager, and if necessary updates.
- */
-static void update_manager(struct drm_plane *plane)
-{
- struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- struct omap_overlay_manager *mgr = NULL;
- int i;
-
- if (plane->crtc) {
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
- if (encoder->crtc == plane->crtc) {
- mgr = omap_encoder_get_manager(encoder);
- break;
- }
- }
- }
-
- if (ovl->manager != mgr) {
- bool enabled = ovl->is_enabled(ovl);
-
- /* don't switch things around with enabled overlays: */
- if (enabled)
- omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
-
- if (ovl->manager) {
- DBG("disconnecting %s from %s", ovl->name,
- ovl->manager->name);
- ovl->unset_manager(ovl);
- }
-
- if (mgr) {
- DBG("connecting %s to %s", ovl->name, mgr->name);
- ovl->set_manager(ovl, mgr);
- }
-
- if (enabled && mgr)
- omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- }
-}
-
static void unpin(void *arg, struct drm_gem_object *bo)
{
struct drm_plane *plane = arg;
@@ -244,7 +72,6 @@ static void unpin(void *arg, struct drm_gem_object *bo)
if (kfifo_put(&omap_plane->unpin_fifo,
(const struct drm_gem_object **)&bo)) {
- omap_plane->pending_num_unpins++;
/* also hold a ref so it isn't free'd while pinned */
drm_gem_object_reference(bo);
} else {
@@ -264,13 +91,19 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
DBG("%p -> %p", pinned_fb, fb);
- mutex_lock(&omap_plane->unpin_mutex);
+ if (fb)
+ drm_framebuffer_reference(fb);
+
ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
- mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (pinned_fb)
+ drm_framebuffer_unreference(pinned_fb);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
+ if (fb)
+ drm_framebuffer_unreference(fb);
omap_plane->pinned_fb = NULL;
return ret;
}
@@ -281,31 +114,90 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
return 0;
}
-/* update parameters that are dependent on the framebuffer dimensions and
- * position within the fb that this plane scans out from. This is called
- * when framebuffer or x,y base may have changed.
- */
-static void update_scanout(struct drm_plane *plane)
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
{
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay_info *info = &omap_plane->info;
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
struct omap_drm_window *win = &omap_plane->win;
+ struct drm_plane *plane = &omap_plane->base;
+ struct drm_device *dev = plane->dev;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_crtc *crtc = plane->crtc;
+ enum omap_channel channel;
+ bool enabled = omap_plane->enabled && crtc;
+ bool ilace, replication;
int ret;
- ret = update_pin(plane, plane->fb);
- if (ret) {
- dev_err(plane->dev->dev,
- "could not pin fb: %d\n", ret);
- omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+ /* if fb has changed, pin new fb: */
+ update_pin(plane, enabled ? plane->fb : NULL);
+
+ if (!enabled) {
+ dispc_ovl_enable(omap_plane->id, false);
return;
}
+ channel = omap_crtc_channel(crtc);
+
+ /* update scanout: */
omap_framebuffer_update_scanout(plane->fb, win, info);
- DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
- win->src_x, win->src_y,
- (u32)info->paddr, (u32)info->p_uv_addr,
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+ info->out_width, info->out_height,
info->screen_width);
+ DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+ info->paddr, info->p_uv_addr);
+
+ /* TODO: */
+ ilace = false;
+ replication = false;
+
+ /* and finally, update omapdss: */
+ ret = dispc_ovl_setup(omap_plane->id, info,
+ replication, omap_crtc_timings(crtc), false);
+ if (ret) {
+ dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+ return;
+ }
+
+ dispc_ovl_enable(omap_plane->id, true);
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
+ struct drm_plane *plane = &omap_plane->base;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_gem_object *bo = NULL;
+ struct callback cb;
+
+ cb = omap_plane->apply_done_cb;
+ omap_plane->apply_done_cb.fxn = NULL;
+
+ while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+ omap_gem_put_paddr(bo);
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ if (cb.fxn)
+ cb.fxn(cb.arg);
+
+ if (omap_plane->enabled) {
+ omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+ info->out_width, info->out_height);
+ }
+}
+
+static int apply(struct drm_plane *plane)
+{
+ if (plane->crtc) {
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+ }
+ return 0;
}
int omap_plane_mode_set(struct drm_plane *plane,
@@ -313,7 +205,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h,
+ void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
@@ -329,17 +222,20 @@ int omap_plane_mode_set(struct drm_plane *plane,
win->src_w = src_w >> 16;
win->src_h = src_h >> 16;
- /* note: this is done after this fxn returns.. but if we need
- * to do a commit/update_scanout, etc before this returns we
- * need the current value.
- */
+ if (fxn) {
+ /* omap_crtc should ensure that a new page flip
+ * isn't permitted while there is one pending:
+ */
+ BUG_ON(omap_plane->apply_done_cb.fxn);
+
+ omap_plane->apply_done_cb.fxn = fxn;
+ omap_plane->apply_done_cb.arg = arg;
+ }
+
plane->fb = fb;
plane->crtc = crtc;
- update_scanout(plane);
- update_manager(plane);
-
- return 0;
+ return apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
@@ -349,9 +245,12 @@ static int omap_plane_update(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ omap_plane->enabled = true;
+ return omap_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h,
+ NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
@@ -364,48 +263,32 @@ static int omap_plane_disable(struct drm_plane *plane)
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- DBG("%s", omap_plane->ovl->name);
+
+ DBG("%s", omap_plane->name);
+
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
omap_plane_disable(plane);
drm_plane_cleanup(plane);
- WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+
+ WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
kfifo_free(&omap_plane->unpin_fifo);
+
kfree(omap_plane);
}
int omap_plane_dpms(struct drm_plane *plane, int mode)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- int r;
-
- DBG("%s: %d", omap_plane->ovl->name, mode);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+ int ret = 0;
- if (mode == DRM_MODE_DPMS_ON) {
- update_scanout(plane);
- r = commit(plane);
- if (!r)
- r = ovl->enable(ovl);
- } else {
- struct omap_drm_private *priv = plane->dev->dev_private;
- r = ovl->disable(ovl);
- update_pin(plane, NULL);
- queue_work(priv->wq, &omap_plane->work);
+ if (enabled != omap_plane->enabled) {
+ omap_plane->enabled = enabled;
+ ret = apply(plane);
}
- return r;
-}
-
-void omap_plane_on_endwin(struct drm_plane *plane,
- void (*fxn)(void *), void *arg)
-{
- struct omap_plane *omap_plane = to_omap_plane(plane);
-
- mutex_lock(&omap_plane->unpin_mutex);
- omap_plane->endwin.fxn = fxn;
- omap_plane->endwin.arg = arg;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- install_irq(plane);
+ return ret;
}
/* helper to install properties which are common to planes and crtcs */
@@ -454,25 +337,13 @@ int omap_plane_set_property(struct drm_plane *plane,
int ret = -EINVAL;
if (property == priv->rotation_prop) {
- struct omap_overlay *ovl = omap_plane->ovl;
-
- DBG("%s: rotation: %02x", ovl->name, (uint32_t)val);
+ DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
-
- if (ovl->is_enabled(ovl))
- ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- else
- ret = 0;
+ ret = apply(plane);
} else if (property == priv->zorder_prop) {
- struct omap_overlay *ovl = omap_plane->ovl;
-
- DBG("%s: zorder: %d", ovl->name, (uint32_t)val);
+ DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
-
- if (ovl->is_enabled(ovl))
- ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- else
- ret = 0;
+ ret = apply(plane);
}
return ret;
@@ -485,28 +356,42 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.set_property = omap_plane_set_property,
};
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_plane *omap_plane =
+ container_of(irq, struct omap_plane, error_irq);
+ DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- struct omap_overlay *ovl, unsigned int possible_crtcs,
- bool priv)
+ int id, bool private_plane)
{
+ struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
+ struct omap_overlay_info *info;
int ret;
- DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
- possible_crtcs, priv);
-
- /* friendly reminder to update table for future hw: */
- WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+ DBG("%s: priv=%d", plane_names[id], private_plane);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
- if (!omap_plane) {
- dev_err(dev->dev, "could not allocate plane\n");
+ if (!omap_plane)
goto fail;
- }
-
- mutex_init(&omap_plane->unpin_mutex);
ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
if (ret) {
@@ -514,39 +399,44 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
goto fail;
}
- INIT_WORK(&omap_plane->work, unpin_worker);
-
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
- ovl->supported_modes);
- omap_plane->ovl = ovl;
+ dss_feat_get_supported_color_modes(id));
+ omap_plane->id = id;
+ omap_plane->name = plane_names[id];
+
plane = &omap_plane->base;
- drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
- omap_plane->formats, omap_plane->nformats, priv);
+ omap_plane->apply.pre_apply = omap_plane_pre_apply;
+ omap_plane->apply.post_apply = omap_plane_post_apply;
+
+ omap_plane->error_irq.irqmask = error_irqs[id];
+ omap_plane->error_irq.irq = omap_plane_error_irq;
+ omap_irq_register(dev, &omap_plane->error_irq);
+
+ drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+ omap_plane->formats, omap_plane->nformats, private_plane);
omap_plane_install_properties(plane, &plane->base);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
*/
- ovl->get_overlay_info(ovl, &omap_plane->info);
- omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
- omap_plane->info.rotation = OMAP_DSS_ROT_0;
- omap_plane->info.global_alpha = 0xff;
- omap_plane->info.mirror = 0;
+ info = &omap_plane->info;
+ info->rotation_type = OMAP_DSS_ROT_DMA;
+ info->rotation = OMAP_DSS_ROT_0;
+ info->global_alpha = 0xff;
+ info->mirror = 0;
/* Set defaults depending on whether we are a CRTC or overlay
* layer.
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
- if (priv)
+ if (private_plane)
omap_plane->info.zorder = 0;
else
- omap_plane->info.zorder = ovl->id;
-
- update_manager(plane);
+ omap_plane->info.zorder = id;
return plane;
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/staging/omapdrm/tcm.h
index d273e3ee0b4c..a8d5ce47686f 100644
--- a/drivers/staging/omapdrm/tcm.h
+++ b/drivers/staging/omapdrm/tcm.h
@@ -59,6 +59,8 @@ struct tcm {
u16 width, height; /* container dimensions */
int lut_id; /* Lookup table identifier */
+ unsigned int y_offset; /* offset to use for y coordinates */
+
/* 'pvt' structure shall contain any tcm details (attr) along with
linked list of allocated areas and mutex for mutually exclusive access
to the list. It may also contain copies of width and height to notice
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
index b5db2456bffa..b4febd79a68d 100644
--- a/drivers/staging/ozwpan/TODO
+++ b/drivers/staging/ozwpan/TODO
@@ -10,6 +10,5 @@ TODO:
- testing with as many devices as possible.
Please send any patches for this driver to
-Rupesh Gujare <rgujare@ozmodevices.com>
-Chris Kelly <ckelly@ozmodevices.com>
+Rupesh Gujare <rupesh.gujare@atmel.com>
and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 64913aeb0bac..ba15aeb70672 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -19,6 +19,7 @@
#include "ozpd.h"
#include "ozproto.h"
#include "ozevent.h"
+#include "ozcdev.h"
/*------------------------------------------------------------------------------
*/
#define OZ_RD_BUF_SZ 256
@@ -43,7 +44,7 @@ struct oz_serial_ctx {
/*------------------------------------------------------------------------------
*/
static struct oz_cdev g_cdev;
-struct class *g_oz_class;
+static struct class *g_oz_class;
/*------------------------------------------------------------------------------
* Context: process and softirq
*/
@@ -70,7 +71,7 @@ static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
/*------------------------------------------------------------------------------
* Context: process
*/
-int oz_cdev_open(struct inode *inode, struct file *filp)
+static int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev;
oz_trace("oz_cdev_open()\n");
@@ -82,7 +83,7 @@ int oz_cdev_open(struct inode *inode, struct file *filp)
/*------------------------------------------------------------------------------
* Context: process
*/
-int oz_cdev_release(struct inode *inode, struct file *filp)
+static int oz_cdev_release(struct inode *inode, struct file *filp)
{
oz_trace("oz_cdev_release()\n");
return 0;
@@ -90,14 +91,14 @@ int oz_cdev_release(struct inode *inode, struct file *filp)
/*------------------------------------------------------------------------------
* Context: process
*/
-ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
+static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
struct oz_pd *pd;
- struct oz_serial_ctx *ctx = 0;
+ struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
@@ -142,12 +143,12 @@ out2:
/*------------------------------------------------------------------------------
* Context: process
*/
-ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *fpos)
+static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *fpos)
{
struct oz_pd *pd;
struct oz_elt_buf *eb;
- struct oz_elt_info *ei = 0;
+ struct oz_elt_info *ei;
struct oz_elt *elt;
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
@@ -182,7 +183,7 @@ ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
ctx->tx_seq_num = 1;
spin_lock(&eb->lock);
if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
- ei = 0;
+ ei = NULL;
spin_unlock(&eb->lock);
}
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -199,7 +200,7 @@ out:
/*------------------------------------------------------------------------------
* Context: process
*/
-static int oz_set_active_pd(u8 *addr)
+static int oz_set_active_pd(const u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
@@ -217,7 +218,7 @@ static int oz_set_active_pd(u8 *addr)
if (is_zero_ether_addr(addr)) {
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
- g_cdev.active_pd = 0;
+ g_cdev.active_pd = NULL;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
spin_unlock_bh(&g_cdev.lock);
@@ -232,7 +233,8 @@ static int oz_set_active_pd(u8 *addr)
/*------------------------------------------------------------------------------
* Context: process
*/
-long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
@@ -296,7 +298,7 @@ long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*------------------------------------------------------------------------------
* Context: process
*/
-unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
+static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
@@ -317,7 +319,7 @@ unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
}
/*------------------------------------------------------------------------------
*/
-const struct file_operations oz_fops = {
+static const struct file_operations oz_fops = {
.owner = THIS_MODULE,
.open = oz_cdev_open,
.release = oz_cdev_release,
@@ -385,7 +387,7 @@ int oz_cdev_deregister(void)
*/
int oz_cdev_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
@@ -394,7 +396,7 @@ int oz_cdev_init(void)
*/
void oz_cdev_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 0);
}
/*------------------------------------------------------------------------------
@@ -403,8 +405,8 @@ void oz_cdev_term(void)
int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
- struct oz_serial_ctx *old_ctx = 0;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
+ struct oz_serial_ctx *old_ctx;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, NULL, resume);
if (resume) {
oz_trace("Serial service resumed.\n");
return 0;
@@ -440,22 +442,22 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, NULL, pause);
if (pause) {
oz_trace("Serial service paused.\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
- pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
+ pd->app_ctx[OZ_APPID_SERIAL-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
if (ctx)
oz_cdev_release_ctx(ctx);
spin_lock(&g_cdev.lock);
if (pd == g_cdev.active_pd)
- g_cdev.active_pd = 0;
+ g_cdev.active_pd = NULL;
else
- pd = 0;
+ pd = NULL;
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
@@ -523,9 +525,3 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
out:
oz_cdev_release_ctx(ctx);
}
-/*------------------------------------------------------------------------------
- * Context: softirq
- */
-void oz_cdev_heartbeat(struct oz_pd *pd)
-{
-}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
index 698014bb8d72..dd11935a093f 100644
--- a/drivers/staging/ozwpan/ozcdev.h
+++ b/drivers/staging/ozwpan/ozcdev.h
@@ -13,6 +13,5 @@ void oz_cdev_term(void);
int oz_cdev_start(struct oz_pd *pd, int resume);
void oz_cdev_stop(struct oz_pd *pd, int pause);
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
-void oz_cdev_heartbeat(struct oz_pd *pd);
#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index 988f522475d9..ac90fc7f5441 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -64,7 +64,7 @@ void oz_elt_buf_term(struct oz_elt_buf *buf)
*/
struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
{
- struct oz_elt_info *ei = 0;
+ struct oz_elt_info *ei = NULL;
spin_lock_bh(&buf->lock);
if (buf->free_elts && buf->elt_pool) {
ei = container_of(buf->elt_pool, struct oz_elt_info, link);
@@ -82,9 +82,9 @@ struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
if (ei) {
ei->flags = 0;
ei->app_id = 0;
- ei->callback = 0;
+ ei->callback = NULL;
ei->context = 0;
- ei->stream = 0;
+ ei->stream = NULL;
ei->magic = OZ_ELT_INFO_MAGIC_USED;
INIT_LIST_HEAD(&ei->link);
INIT_LIST_HEAD(&ei->link_order);
@@ -135,7 +135,7 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
oz_trace("oz_elt_stream_create(0x%x)\n", id);
st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
- if (st == 0)
+ if (st == NULL)
return -ENOMEM;
atomic_set(&st->ref_count, 1);
st->id = id;
@@ -151,7 +151,7 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
{
struct list_head *e;
- struct oz_elt_stream *st;
+ struct oz_elt_stream *st = NULL;
oz_trace("oz_elt_stream_delete(0x%x)\n", id);
spin_lock_bh(&buf->lock);
e = buf->stream_list.next;
@@ -161,7 +161,7 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
list_del(e);
break;
}
- st = 0;
+ st = NULL;
}
if (!st) {
spin_unlock_bh(&buf->lock);
@@ -208,7 +208,7 @@ void oz_elt_stream_put(struct oz_elt_stream *st)
int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
struct oz_elt_info *ei)
{
- struct oz_elt_stream *st = 0;
+ struct oz_elt_stream *st = NULL;
struct list_head *e;
if (id) {
list_for_each(e, &buf->stream_list) {
@@ -297,7 +297,7 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
"Stream down: %d %d\n",
ei->stream->buf_count, ei->length);
oz_elt_stream_put(ei->stream);
- ei->stream = 0;
+ ei->stream = NULL;
}
INIT_LIST_HEAD(&ei->link_order);
list_add_tail(&ei->link, list);
@@ -319,7 +319,7 @@ int oz_are_elts_available(struct oz_elt_buf *buf)
*/
void oz_trim_elt_pool(struct oz_elt_buf *buf)
{
- struct list_head *free = 0;
+ struct list_head *free = NULL;
struct list_head *e;
spin_lock_bh(&buf->lock);
while (buf->free_elts > buf->max_free_elts) {
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
index 50578ba0061c..77e86753610d 100644
--- a/drivers/staging/ozwpan/ozevent.c
+++ b/drivers/staging/ozwpan/ozevent.c
@@ -92,7 +92,7 @@ static void oz_events_clear(struct oz_evtdev *dev)
/*------------------------------------------------------------------------------
* Context: process
*/
-int oz_events_open(struct inode *inode, struct file *filp)
+static int oz_events_open(struct inode *inode, struct file *filp)
{
oz_trace("oz_evt_open()\n");
oz_trace("Open flags: 0x%x\n", filp->f_flags);
@@ -107,7 +107,7 @@ int oz_events_open(struct inode *inode, struct file *filp)
/*------------------------------------------------------------------------------
* Context: process
*/
-int oz_events_release(struct inode *inode, struct file *filp)
+static int oz_events_release(struct inode *inode, struct file *filp)
{
oz_events_clear(&g_evtdev);
atomic_dec(&g_evtdev.users);
@@ -118,7 +118,7 @@ int oz_events_release(struct inode *inode, struct file *filp)
/*------------------------------------------------------------------------------
* Context: process
*/
-ssize_t oz_events_read(struct file *filp, char __user *buf, size_t count,
+static ssize_t oz_events_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
struct oz_evtdev *dev = &g_evtdev;
@@ -157,7 +157,7 @@ out:
}
/*------------------------------------------------------------------------------
*/
-const struct file_operations oz_events_fops = {
+static const struct file_operations oz_events_fops = {
.owner = THIS_MODULE,
.open = oz_events_open,
.release = oz_events_release,
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index b2d77df2a526..8ac26f584fd4 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -36,6 +36,7 @@
#include "oztrace.h"
#include "ozurbparanoia.h"
#include "ozevent.h"
+#include "ozhcd.h"
/*------------------------------------------------------------------------------
* Number of units of buffering to capture for an isochronous IN endpoint before
* allowing data to be indicated up.
@@ -248,7 +249,7 @@ static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
*/
static struct oz_urb_link *oz_alloc_urb_link(void)
{
- struct oz_urb_link *urbl = 0;
+ struct oz_urb_link *urbl = NULL;
unsigned long irq_state;
spin_lock_irqsave(&g_link_lock, irq_state);
if (g_link_pool) {
@@ -257,7 +258,7 @@ static struct oz_urb_link *oz_alloc_urb_link(void)
--g_link_pool_size;
}
spin_unlock_irqrestore(&g_link_lock, irq_state);
- if (urbl == 0)
+ if (urbl == NULL)
urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
return urbl;
}
@@ -274,7 +275,7 @@ static void oz_free_urb_link(struct oz_urb_link *urbl)
if (g_link_pool_size < OZ_MAX_LINK_POOL_SIZE) {
urbl->link.next = g_link_pool;
g_link_pool = &urbl->link;
- urbl = 0;
+ urbl = NULL;
g_link_pool_size++;
}
spin_unlock_irqrestore(&g_link_lock, irq_state);
@@ -291,7 +292,7 @@ static void oz_empty_link_pool(void)
unsigned long irq_state;
spin_lock_irqsave(&g_link_lock, irq_state);
e = g_link_pool;
- g_link_pool = 0;
+ g_link_pool = NULL;
g_link_pool_size = 0;
spin_unlock_irqrestore(&g_link_lock, irq_state);
while (e) {
@@ -326,7 +327,7 @@ static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
* disabled.
* Context: softirq or process
*/
-struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
{
struct oz_urb_link *urbl;
struct list_head *e;
@@ -337,7 +338,7 @@ struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
return urbl;
}
}
- return 0;
+ return NULL;
}
/*------------------------------------------------------------------------------
* This is called when we have finished processing an urb. It unlinks it from
@@ -349,13 +350,13 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
- struct oz_urb_link *cancel_urbl = 0;
+ struct oz_urb_link *cancel_urbl = NULL;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
* in the event that an attempt is made to cancel it.
*/
- urb->hcpriv = 0;
+ urb->hcpriv = NULL;
/* Walk the cancel list in case the urb is already sitting there.
* Since we process the cancel list in a tasklet rather than in
* the dequeue function this could happen.
@@ -416,7 +417,8 @@ static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
/*------------------------------------------------------------------------------
* Context: softirq
*/
-void oz_complete_buffered_urb(struct oz_port *port, struct oz_endpoint *ep,
+static void oz_complete_buffered_urb(struct oz_port *port,
+ struct oz_endpoint *ep,
struct urb *urb)
{
u8 data_len, available_space, copy_len;
@@ -507,7 +509,7 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
ep->last_jiffies = jiffies;
ep->credit = 0;
oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
- 0, 0, ep->credit);
+ 0, NULL, ep->credit);
}
} else {
err = -EPIPE;
@@ -525,7 +527,7 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb)
{
- struct oz_urb_link *urbl = 0;
+ struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
spin_lock_bh(&port->ozhcd->hcd_lock);
if (in_dir)
@@ -540,7 +542,7 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
list_del_init(e);
break;
}
- urbl = 0;
+ urbl = NULL;
}
}
spin_unlock_bh(&port->ozhcd->hcd_lock);
@@ -556,8 +558,8 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
u8 req_id)
{
struct oz_hcd *ozhcd = port->ozhcd;
- struct urb *urb = 0;
- struct oz_urb_link *urbl = 0;
+ struct urb *urb = NULL;
+ struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
spin_lock_bh(&ozhcd->hcd_lock);
@@ -630,13 +632,13 @@ static inline void oz_hcd_put(struct oz_hcd *ozhcd)
void *oz_hcd_pd_arrived(void *hpd)
{
int i;
- void *hport = 0;
- struct oz_hcd *ozhcd = 0;
+ void *hport = NULL;
+ struct oz_hcd *ozhcd = NULL;
struct oz_endpoint *ep;
oz_trace("oz_hcd_pd_arrived()\n");
ozhcd = oz_hcd_claim();
- if (ozhcd == 0)
- return 0;
+ if (ozhcd == NULL)
+ return NULL;
/* Allocate an endpoint object in advance (before holding hcd lock) to
* use for out endpoint 0.
*/
@@ -663,7 +665,7 @@ void *oz_hcd_pd_arrived(void *hpd)
/* Attach out endpoint 0.
*/
ozhcd->ports[i].out_ep[0] = ep;
- ep = 0;
+ ep = NULL;
hport = &ozhcd->ports[i];
spin_unlock_bh(&ozhcd->hcd_lock);
if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
@@ -676,7 +678,7 @@ void *oz_hcd_pd_arrived(void *hpd)
}
out:
if (ep) /* ep is non-null if not used. */
- oz_ep_free(0, ep);
+ oz_ep_free(NULL, ep);
oz_hcd_put(ozhcd);
return hport;
}
@@ -691,15 +693,15 @@ void oz_hcd_pd_departed(void *hport)
struct oz_port *port = (struct oz_port *)hport;
struct oz_hcd *ozhcd;
void *hpd;
- struct oz_endpoint *ep = 0;
+ struct oz_endpoint *ep = NULL;
oz_trace("oz_hcd_pd_departed()\n");
- if (port == 0) {
+ if (port == NULL) {
oz_trace("oz_hcd_pd_departed() port = 0\n");
return;
}
ozhcd = port->ozhcd;
- if (ozhcd == 0)
+ if (ozhcd == NULL)
return;
/* Check if this is the connection port - if so clear it.
*/
@@ -717,7 +719,7 @@ void oz_hcd_pd_departed(void *hport)
oz_clean_endpoints_for_config(ozhcd->hcd, port);
spin_lock_bh(&port->port_lock);
hpd = port->hpd;
- port->hpd = 0;
+ port->hpd = NULL;
port->bus_addr = 0xff;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
@@ -728,7 +730,7 @@ void oz_hcd_pd_departed(void *hport)
*/
if (port->out_ep[0]) {
ep = port->out_ep[0];
- port->out_ep[0] = 0;
+ port->out_ep[0] = NULL;
}
spin_unlock_bh(&port->port_lock);
if (ep)
@@ -757,14 +759,14 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
/*------------------------------------------------------------------------------
* Context: softirq
*/
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, u8 *desc,
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
int length, int offset, int total_size)
{
struct oz_port *port = (struct oz_port *)hport;
struct urb *urb;
int err = 0;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, status);
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, status);
oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
@@ -893,7 +895,7 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
/*------------------------------------------------------------------------------
* Context: softirq
*/
-void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
+void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
int data_len)
{
struct oz_port *port = (struct oz_port *)hport;
@@ -903,7 +905,7 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
unsigned windex;
unsigned wvalue;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, rcode);
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, rcode);
oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
@@ -946,7 +948,8 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-static int oz_hcd_buffer_data(struct oz_endpoint *ep, u8 *data, int data_len)
+static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
+ int data_len)
{
int space;
int copy_len;
@@ -981,14 +984,14 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, u8 *data, int data_len)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len)
+void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
{
struct oz_port *port = (struct oz_port *)hport;
struct oz_endpoint *ep;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
- if (ep == 0)
+ if (ep == NULL)
goto done;
switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
@@ -1056,7 +1059,8 @@ int oz_hcd_heartbeat(void *hport)
ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0, ep->credit);
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
+ ep->credit);
ep->last_jiffies = now;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
@@ -1065,8 +1069,8 @@ int oz_hcd_heartbeat(void *hport)
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0,
- ep->credit);
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
+ ep->credit);
list_move_tail(&urbl->link, &xfr_list);
}
}
@@ -1091,22 +1095,22 @@ int oz_hcd_heartbeat(void *hport)
list_for_each(e, &port->isoc_in_ep) {
struct oz_endpoint *ep = ep_from_link(e);
if (ep->flags & OZ_F_EP_BUFFERING) {
- if (ep->buffered_units * OZ_IN_BUFFERING_UNITS) {
+ if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
oz_event_log(OZ_EVT_EP_CREDIT,
ep->ep_num | USB_DIR_IN,
- 0, 0, ep->credit);
+ 0, NULL, ep->credit);
ep->last_jiffies = now;
ep->start_frame = 0;
oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 0, 0, 0);
+ ep->ep_num | USB_DIR_IN, 0, NULL, 0);
}
continue;
}
ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, 0, ep->credit);
+ 0, NULL, ep->credit);
ep->last_jiffies = now;
while (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
@@ -1151,7 +1155,7 @@ int oz_hcd_heartbeat(void *hport)
list_move_tail(&urbl->link, &xfr_list);
ep->credit -= urb->number_of_packets;
oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, 0, ep->credit);
+ 0, NULL, ep->credit);
}
}
if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
@@ -1244,7 +1248,7 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
ep->flags |= OZ_F_EP_BUFFERING;
oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 1, 0, 0);
+ ep->ep_num | USB_DIR_IN, 1, NULL, 0);
} else {
ep->flags |= OZ_F_EP_HAVE_STREAM;
if (oz_usb_stream_create(port->hpd, ep_num))
@@ -1300,7 +1304,7 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
*/
if ((mask & (1<<i)) && port->out_ep[i]) {
e = &port->out_ep[i]->link;
- port->out_ep[i] = 0;
+ port->out_ep[i] = NULL;
/* Remove from isoc list if present.
*/
list_move_tail(e, &ep_list);
@@ -1309,7 +1313,7 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
*/
if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
e = &port->in_ep[i]->link;
- port->in_ep[i] = 0;
+ port->in_ep[i] = NULL;
list_move_tail(e, &ep_list);
}
}
@@ -1370,7 +1374,7 @@ static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
if (port->iface) {
oz_trace("Freeing interfaces object.\n");
kfree(port->iface);
- port->iface = 0;
+ port->iface = NULL;
}
port->num_iface = 0;
spin_unlock_bh(&ozhcd->hcd_lock);
@@ -1380,7 +1384,7 @@ static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
*/
static void *oz_claim_hpd(struct oz_port *port)
{
- void *hpd = 0;
+ void *hpd = NULL;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
hpd = port->hpd;
@@ -1399,13 +1403,13 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
unsigned windex;
unsigned wvalue;
unsigned wlength;
- void *hpd = 0;
+ void *hpd = NULL;
u8 req_id;
int rc = 0;
unsigned complete = 0;
int port_ix = -1;
- struct oz_port *port = 0;
+ struct oz_port *port = NULL;
oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
@@ -1437,7 +1441,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
- if (hpd == 0) {
+ if (hpd == NULL) {
oz_trace("Cannot claim port\n");
rc = -EPIPE;
goto out;
@@ -1452,7 +1456,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
break;
case USB_REQ_SET_ADDRESS:
oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
- 0, 0, setup->bRequestType);
+ 0, NULL, setup->bRequestType);
oz_trace("USB_REQ_SET_ADDRESS - req\n");
oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
(u8)le16_to_cpu(setup->wValue));
@@ -1473,8 +1477,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
- setup->bRequestType);
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
+ NULL, setup->bRequestType);
oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
@@ -1489,8 +1493,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
- setup->bRequestType);
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
+ NULL, setup->bRequestType);
oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
@@ -1583,7 +1587,7 @@ static void oz_urb_process_tasklet(unsigned long unused)
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
int rc = 0;
- if (ozhcd == 0)
+ if (ozhcd == NULL)
return;
/* This is called from a tasklet so is in softirq context but the urb
* list is filled from any context so we need to lock
@@ -1617,17 +1621,17 @@ static void oz_urb_process_tasklet(unsigned long unused)
*/
static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
{
- struct oz_urb_link *urbl = 0;
+ struct oz_urb_link *urbl = NULL;
struct list_head *e;
struct oz_hcd *ozhcd;
unsigned long irq_state;
u8 ix;
- if (port == 0) {
+ if (port == NULL) {
oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
return;
}
ozhcd = port->ozhcd;
- if (ozhcd == 0) {
+ if (ozhcd == NULL) {
oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
return;
}
@@ -1644,7 +1648,7 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
}
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- urbl = 0;
+ urbl = NULL;
/* Look in the orphanage.
*/
@@ -1658,7 +1662,7 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
}
}
ix = (ep_num & 0xf);
- urbl = 0;
+ urbl = NULL;
if ((ep_num & USB_DIR_IN) && ix)
urbl = oz_remove_urb(port->in_ep[ix], urb);
else
@@ -1680,7 +1684,7 @@ static void oz_urb_cancel_tasklet(unsigned long unused)
unsigned long irq_state;
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
- if (ozhcd == 0)
+ if (ozhcd == NULL)
return;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
while (!list_empty(&ozhcd->urb_cancel_list)) {
@@ -1772,7 +1776,7 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
jiffies, urb);
oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
(u16)urb->number_of_packets, urb, urb->pipe);
- if (unlikely(ozhcd == 0)) {
+ if (unlikely(ozhcd == NULL)) {
oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
jiffies, urb);
return -EPIPE;
@@ -1786,7 +1790,7 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
if (port_ix < 0)
return -EPIPE;
port = &ozhcd->ports[port_ix];
- if (port == 0)
+ if (port == NULL)
return -EPIPE;
if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
oz_trace("Refusing URB port_ix = %d devnum = %d\n",
@@ -1797,7 +1801,7 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* Put request in queue for processing by tasklet.
*/
urbl = oz_alloc_urb_link();
- if (unlikely(urbl == 0))
+ if (unlikely(urbl == NULL))
return -ENOMEM;
urbl->urb = urb;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
@@ -1819,10 +1823,10 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
- struct oz_urb_link *urbl = 0;
+ struct oz_urb_link *urbl = NULL;
struct list_head *e;
- if (unlikely(ep == 0))
- return 0;
+ if (unlikely(ep == NULL))
+ return NULL;
list_for_each(e, &ep->urb_list) {
urbl = container_of(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
@@ -1834,12 +1838,12 @@ static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
oz_event_log(OZ_EVT_EP_CREDIT,
usb_pipein(urb->pipe) ?
(ep->ep_num | USB_DIR_IN) : ep->ep_num,
- 0, 0, ep->credit);
+ 0, NULL, ep->credit);
}
return urbl;
}
}
- return 0;
+ return NULL;
}
/*------------------------------------------------------------------------------
* Called to dequeue a previously submitted urb for the device.
@@ -1848,12 +1852,12 @@ static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- struct oz_urb_link *urbl = 0;
+ struct oz_urb_link *urbl = NULL;
int rc;
unsigned long irq_state;
oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
urbl = oz_alloc_urb_link();
- if (unlikely(urbl == 0))
+ if (unlikely(urbl == NULL))
return -ENOMEM;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
/* The following function checks the urb is still in the queue
@@ -2193,7 +2197,7 @@ static int oz_plat_probe(struct platform_device *dev)
struct oz_hcd *ozhcd;
oz_trace("oz_plat_probe()\n");
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
- if (hcd == 0) {
+ if (hcd == NULL) {
oz_trace("Failed to created hcd object OK\n");
return -ENOMEM;
}
@@ -2232,12 +2236,12 @@ static int oz_plat_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
oz_trace("oz_plat_remove()\n");
- if (hcd == 0)
+ if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&g_hcdlock);
if (ozhcd == g_ozhcd)
- g_ozhcd = 0;
+ g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
oz_trace("Clearing orphanage\n");
oz_hcd_clear_orphanage(ozhcd, -EPIPE);
@@ -2278,7 +2282,7 @@ int oz_hcd_init(void)
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
- if (g_plat_dev == 0) {
+ if (g_plat_dev == NULL) {
err = -ENOMEM;
goto error1;
}
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
index ef6c5ab753ee..57a0cbd58551 100644
--- a/drivers/staging/ozwpan/ozmain.c
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -22,7 +22,7 @@
* bind to nothing. '*' means bind to all netcards - this includes non-802.11
* netcards. Bindings can be added later using an IOCTL.
*/
-char *g_net_dev = "";
+static char *g_net_dev = "";
/*------------------------------------------------------------------------------
* Context: process
*/
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 118a4db74dec..f8b9da080c4b 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -46,7 +46,7 @@ static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
/* Application handler functions.
*/
-static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
+static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
{oz_usb_init,
oz_usb_term,
oz_usb_start,
@@ -61,8 +61,8 @@ static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
- 0,
- 0,
+ NULL,
+ NULL,
OZ_APPID_UNUSED1},
{oz_def_app_init,
@@ -70,8 +70,8 @@ static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
oz_def_app_start,
oz_def_app_stop,
oz_def_app_rx,
- 0,
- 0,
+ NULL,
+ NULL,
OZ_APPID_UNUSED2},
{oz_cdev_init,
@@ -79,8 +79,8 @@ static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
oz_cdev_start,
oz_cdev_stop,
oz_cdev_rx,
- 0,
- 0,
+ NULL,
+ NULL,
OZ_APPID_SERIAL},
};
/*------------------------------------------------------------------------------
@@ -121,7 +121,7 @@ static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
- oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
+ oz_event_log(OZ_EVT_PD_STATE, 0, 0, NULL, state);
#ifdef WANT_TRACE
switch (state) {
case OZ_PD_S_IDLE:
@@ -157,7 +157,7 @@ void oz_pd_put(struct oz_pd *pd)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-struct oz_pd *oz_pd_alloc(u8 *mac_addr)
+struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
{
struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
if (pd) {
@@ -171,7 +171,7 @@ struct oz_pd *oz_pd_alloc(u8 *mac_addr)
memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
if (0 != oz_elt_buf_init(&pd->elt_buff)) {
kfree(pd);
- pd = 0;
+ pd = NULL;
}
spin_lock_init(&pd->tx_frame_lock);
INIT_LIST_HEAD(&pd->tx_queue);
@@ -235,7 +235,7 @@ void oz_pd_destroy(struct oz_pd *pd)
*/
int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
{
- struct oz_app_if *ai;
+ const struct oz_app_if *ai;
int rc = 0;
oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
@@ -260,7 +260,7 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
*/
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
- struct oz_app_if *ai;
+ const struct oz_app_if *ai;
oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
@@ -281,7 +281,7 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
*/
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
{
- struct oz_app_if *ai;
+ const struct oz_app_if *ai;
int more = 0;
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (ai->heartbeat && (apps & (1<<ai->app_id))) {
@@ -355,7 +355,7 @@ int oz_pd_sleep(struct oz_pd *pd)
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
{
- struct oz_tx_frame *f = 0;
+ struct oz_tx_frame *f = NULL;
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool) {
f = container_of(pd->tx_pool, struct oz_tx_frame, link);
@@ -363,7 +363,7 @@ static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
pd->tx_pool_count--;
}
spin_unlock_bh(&pd->tx_frame_lock);
- if (f == 0)
+ if (f == NULL)
f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
if (f) {
f->total_size = sizeof(struct oz_hdr);
@@ -399,7 +399,7 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
f->link.next = pd->tx_pool;
pd->tx_pool = &f->link;
pd->tx_pool_count++;
- f = 0;
+ f = NULL;
}
spin_unlock_bh(&pd->tx_frame_lock);
kfree(f);
@@ -407,7 +407,7 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-void oz_set_more_bit(struct sk_buff *skb)
+static void oz_set_more_bit(struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->control |= OZ_F_MORE_DATA;
@@ -415,7 +415,7 @@ void oz_set_more_bit(struct sk_buff *skb)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
+static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
@@ -433,7 +433,7 @@ int oz_prepare_frame(struct oz_pd *pd, int empty)
if (!empty && !oz_are_elts_available(&pd->elt_buff))
return -1;
f = oz_tx_frame_alloc(pd);
- if (f == 0)
+ if (f == NULL)
return -1;
f->skb = NULL;
f->hdr.control =
@@ -455,7 +455,7 @@ int oz_prepare_frame(struct oz_pd *pd, int empty)
*/
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
- struct sk_buff *skb = 0;
+ struct sk_buff *skb;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
@@ -464,8 +464,8 @@ static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
* as the space we need.
*/
skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == 0)
- return 0;
+ if (skb == NULL)
+ return NULL;
/* Reserve the head room for lower layers.
*/
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -492,7 +492,7 @@ static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
return skb;
fail:
kfree_skb(skb);
- return 0;
+ return NULL;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
@@ -544,7 +544,7 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
if (dev_queue_xmit(skb) < 0) {
oz_trace2(OZ_TRACE_TX_FRAMES,
"Dropping ISOC Frame\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
atomic_inc(&g_submitted_isoc);
@@ -555,7 +555,7 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
} else {
kfree_skb(skb);
oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
}
@@ -570,7 +570,7 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
oz_event_log(OZ_EVT_TX_FRAME,
0,
(((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
- 0, f->hdr.pkt_num);
+ NULL, f->hdr.pkt_num);
if (dev_queue_xmit(skb) < 0)
return -1;
@@ -620,7 +620,7 @@ out: oz_prepare_frame(pd, 1);
*/
static int oz_send_isoc_frame(struct oz_pd *pd)
{
- struct sk_buff *skb = 0;
+ struct sk_buff *skb;
struct net_device *dev = pd->net_dev;
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
@@ -634,7 +634,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
if (list.next == &list)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == 0) {
+ if (skb == NULL) {
oz_trace("Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
@@ -659,7 +659,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
- oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_TX_ISOC, 0, 0, NULL, 0);
dev_queue_xmit(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
@@ -671,8 +671,8 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
{
struct list_head *e;
struct oz_tx_frame *f;
- struct list_head *first = 0;
- struct list_head *last = 0;
+ struct list_head *first = NULL;
+ struct list_head *last = NULL;
u8 diff;
u32 pkt_num;
@@ -686,7 +686,7 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
break;
oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
pkt_num, pd->nb_queued_frames);
- if (first == 0)
+ if (first == NULL)
first = e;
last = e;
e = e->next;
@@ -695,7 +695,7 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
if (first) {
last->next->prev = &pd->tx_queue;
pd->tx_queue.next = last->next;
- last->next = 0;
+ last->next = NULL;
}
pd->last_sent_frame = &pd->tx_queue;
spin_unlock(&pd->tx_frame_lock);
@@ -718,7 +718,7 @@ static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
if (st->ep_num == ep_num)
return st;
}
- return 0;
+ return NULL;
}
/*------------------------------------------------------------------------------
* Context: softirq
@@ -733,7 +733,7 @@ int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
spin_lock_bh(&pd->stream_lock);
if (!pd_stream_find(pd, ep_num)) {
list_add(&st->link, &pd->stream_list);
- st = 0;
+ st = NULL;
}
spin_unlock_bh(&pd->stream_lock);
kfree(st);
@@ -774,19 +774,19 @@ static void oz_isoc_destructor(struct sk_buff *skb)
/*------------------------------------------------------------------------------
* Context: softirq
*/
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
{
struct net_device *dev = pd->net_dev;
struct oz_isoc_stream *st;
u8 nb_units = 0;
- struct sk_buff *skb = 0;
- struct oz_hdr *oz_hdr = 0;
+ struct sk_buff *skb = NULL;
+ struct oz_hdr *oz_hdr = NULL;
int size = 0;
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st) {
skb = st->skb;
- st->skb = 0;
+ st->skb = NULL;
nb_units = st->nb_units;
st->nb_units = 0;
oz_hdr = st->oz_hdr;
@@ -799,7 +799,7 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
/* Allocate enough space for max size frame. */
skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
GFP_ATOMIC);
- if (skb == 0)
+ if (skb == NULL)
return 0;
/* Reserve the head room for lower layers. */
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -874,13 +874,13 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
skb, atomic_read(&g_submitted_isoc));
if (dev_queue_xmit(skb) < 0) {
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
} else
return 0;
}
-out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
kfree_skb(skb);
return -1;
@@ -913,7 +913,7 @@ void oz_apps_term(void)
*/
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
{
- struct oz_app_if *ai;
+ const struct oz_app_if *ai;
if (app_id == 0 || app_id > OZ_APPID_MAX)
return;
ai = &g_app_if[app_id-1];
@@ -925,7 +925,7 @@ void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
- struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
+ const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
while (1) {
oz_polling_lock_bh();
if (list_empty(&pd->farewell_list)) {
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index d35b0ea44f67..fbf47cbab8a9 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -99,7 +99,7 @@ struct oz_pd {
#define OZ_MAX_QUEUED_FRAMES 4
-struct oz_pd *oz_pd_alloc(u8 *mac_addr);
+struct oz_pd *oz_pd_alloc(const u8 *mac_addr);
void oz_pd_destroy(struct oz_pd *pd);
void oz_pd_get(struct oz_pd *pd);
void oz_pd_put(struct oz_pd *pd);
@@ -115,7 +115,7 @@ void oz_send_queued_frames(struct oz_pd *pd, int backlog);
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len);
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len);
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
void oz_apps_init(void);
void oz_apps_term(void);
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index e00a53915daa..3badf1537adb 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -98,7 +98,7 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
sizeof(struct oz_elt_connect_rsp);
skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == 0)
+ if (skb == NULL)
return;
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
@@ -116,7 +116,7 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
oz_hdr->last_pkt_num = 0;
put_unaligned(0, &oz_hdr->pkt_num);
- oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, NULL, 0);
elt->type = OZ_ELT_CONNECT_RSP;
elt->length = sizeof(struct oz_elt_connect_rsp);
memset(body, 0, sizeof(struct oz_elt_connect_rsp));
@@ -171,7 +171,7 @@ static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
* Context: softirq-serialized
*/
static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
- u8 *pd_addr, struct net_device *net_dev)
+ const u8 *pd_addr, struct net_device *net_dev)
{
struct oz_pd *pd;
struct oz_elt_connect_req *body =
@@ -179,17 +179,17 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
u8 rsp_status = OZ_STATUS_SUCCESS;
u8 stop_needed = 0;
u16 new_apps = g_apps;
- struct net_device *old_net_dev = 0;
- struct oz_pd *free_pd = 0;
+ struct net_device *old_net_dev = NULL;
+ struct oz_pd *free_pd = NULL;
if (cur_pd) {
pd = cur_pd;
spin_lock_bh(&g_polling_lock);
} else {
- struct oz_pd *pd2 = 0;
+ struct oz_pd *pd2 = NULL;
struct list_head *e;
pd = oz_pd_alloc(pd_addr);
- if (pd == 0)
- return 0;
+ if (pd == NULL)
+ return NULL;
pd->last_rx_time_j = jiffies;
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
@@ -203,9 +203,9 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
if (pd != pd2)
list_add_tail(&pd->link, &g_pd_list);
}
- if (pd == 0) {
+ if (pd == NULL) {
spin_unlock_bh(&g_polling_lock);
- return 0;
+ return NULL;
}
if (pd->net_dev != net_dev) {
old_net_dev = pd->net_dev;
@@ -294,7 +294,7 @@ done:
if (stop_needed)
oz_pd_stop(pd);
oz_pd_put(pd);
- pd = 0;
+ pd = NULL;
}
if (old_net_dev)
dev_put(old_net_dev);
@@ -306,7 +306,7 @@ done:
* Context: softirq-serialized
*/
static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
- u8 *report, u8 len)
+ const u8 *report, u8 len)
{
struct oz_farewell *f;
struct oz_farewell *f2;
@@ -340,14 +340,14 @@ static void oz_rx_frame(struct sk_buff *skb)
u8 *src_addr;
struct oz_elt *elt;
int length;
- struct oz_pd *pd = 0;
+ struct oz_pd *pd = NULL;
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
int dup = 0;
u32 pkt_num;
oz_event_log(OZ_EVT_RX_PROCESS, 0,
(((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
- 0, oz_hdr->pkt_num);
+ NULL, oz_hdr->pkt_num);
oz_trace2(OZ_TRACE_RX_FRAMES,
"RX frame PN=0x%x LPN=0x%x control=0x%x\n",
oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
@@ -402,7 +402,7 @@ static void oz_rx_frame(struct sk_buff *skb)
break;
switch (elt->type) {
case OZ_ELT_CONNECT_REQ:
- oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, NULL, 0);
oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
pd = oz_connect_req(pd, elt, src_addr, skb->dev);
break;
@@ -456,7 +456,7 @@ done:
*/
void oz_protocol_term(void)
{
- struct list_head *chain = 0;
+ struct list_head *chain;
del_timer_sync(&g_timer);
/* Walk the list of bindings and remove each one.
*/
@@ -487,7 +487,7 @@ void oz_protocol_term(void)
spin_lock_bh(&g_polling_lock);
}
chain = g_timer_pool;
- g_timer_pool = 0;
+ g_timer_pool = NULL;
spin_unlock_bh(&g_polling_lock);
while (chain) {
struct oz_timer *t = container_of(chain, struct oz_timer, link);
@@ -534,25 +534,25 @@ static void oz_protocol_timer(unsigned long arg)
/* This happens if we remove the current timer but can't stop
* the timer from firing. In this case just get out.
*/
- oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_TIMER, 0, 0, NULL, 0);
spin_unlock_bh(&g_polling_lock);
return;
}
g_timer_state = OZ_TIMER_IN_HANDLER;
t = g_cur_timer;
- g_cur_timer = 0;
+ g_cur_timer = NULL;
list_del(&t->link);
spin_unlock_bh(&g_polling_lock);
do {
pd = t->pd;
- oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
+ oz_event_log(OZ_EVT_TIMER, 0, t->type, NULL, 0);
oz_pd_handle_timer(pd, t->type);
spin_lock_bh(&g_polling_lock);
if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
t->link.next = g_timer_pool;
g_timer_pool = &t->link;
g_timer_pool_count++;
- t = 0;
+ t = NULL;
}
if (!list_empty(&g_timer_list)) {
t2 = container_of(g_timer_list.next,
@@ -560,9 +560,9 @@ static void oz_protocol_timer(unsigned long arg)
if (time_before_eq(t2->due_time, jiffies))
list_del(&t2->link);
else
- t2 = 0;
+ t2 = NULL;
} else {
- t2 = 0;
+ t2 = NULL;
}
spin_unlock_bh(&g_polling_lock);
oz_pd_put(pd);
@@ -583,12 +583,12 @@ static void oz_protocol_timer_start(void)
container_of(g_timer_list.next, struct oz_timer, link);
if (g_timer_state == OZ_TIMER_SET) {
oz_event_log(OZ_EVT_TIMER_CTRL, 3,
- (u16)g_cur_timer->type, 0,
+ (u16)g_cur_timer->type, NULL,
(unsigned)g_cur_timer->due_time);
mod_timer(&g_timer, g_cur_timer->due_time);
} else {
oz_event_log(OZ_EVT_TIMER_CTRL, 4,
- (u16)g_cur_timer->type, 0,
+ (u16)g_cur_timer->type, NULL,
(unsigned)g_cur_timer->due_time);
g_timer.expires = g_cur_timer->due_time;
g_timer.function = oz_protocol_timer;
@@ -608,9 +608,9 @@ void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
int remove)
{
struct list_head *e;
- struct oz_timer *t = 0;
+ struct oz_timer *t = NULL;
int restart_needed = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
+ oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, NULL, (unsigned)due_time);
spin_lock(&g_polling_lock);
if (remove) {
list_for_each(e, &g_timer_list) {
@@ -618,12 +618,12 @@ void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
if ((t->pd == pd) && (t->type == type)) {
if (g_cur_timer == t) {
restart_needed = 1;
- g_cur_timer = 0;
+ g_cur_timer = NULL;
}
list_del(e);
break;
}
- t = 0;
+ t = NULL;
}
}
if (!t) {
@@ -647,7 +647,7 @@ void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
t2 = container_of(e, struct oz_timer, link);
if (time_before(due_time, t2->due_time)) {
if (t2 == g_cur_timer) {
- g_cur_timer = 0;
+ g_cur_timer = NULL;
restart_needed = 1;
}
break;
@@ -668,18 +668,18 @@ void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
*/
void oz_timer_delete(struct oz_pd *pd, int type)
{
- struct list_head *chain = 0;
+ struct list_head *chain = NULL;
struct oz_timer *t;
struct oz_timer *n;
int restart_needed = 0;
int release = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
+ oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, NULL, 0);
spin_lock(&g_polling_lock);
list_for_each_entry_safe(t, n, &g_timer_list, link) {
if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
if (g_cur_timer == t) {
restart_needed = 1;
- g_cur_timer = 0;
+ g_cur_timer = NULL;
del_timer(&g_timer);
}
list_del(&t->link);
@@ -734,7 +734,7 @@ void oz_pd_request_heartbeat(struct oz_pd *pd)
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
-struct oz_pd *oz_pd_find(u8 *mac_addr)
+struct oz_pd *oz_pd_find(const u8 *mac_addr)
{
struct oz_pd *pd;
struct list_head *e;
@@ -748,7 +748,7 @@ struct oz_pd *oz_pd_find(u8 *mac_addr)
}
}
spin_unlock_bh(&g_polling_lock);
- return 0;
+ return NULL;
}
/*------------------------------------------------------------------------------
* Context: process
@@ -770,9 +770,9 @@ void oz_app_enable(int app_id, int enable)
static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
+ oz_event_log(OZ_EVT_RX_FRAME, 0, 0, NULL, 0);
skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == 0)
+ if (skb == NULL)
return 0;
spin_lock_bh(&g_rx_queue.lock);
if (g_processing_rx) {
@@ -815,14 +815,14 @@ void oz_binding_add(char *net_dev)
oz_trace("Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
- if (binding->ptype.dev == 0) {
+ if (binding->ptype.dev == NULL) {
oz_trace("Netdev %s not found\n", net_dev);
kfree(binding);
- binding = 0;
+ binding = NULL;
}
} else {
oz_trace("Binding to all netcards\n");
- binding->ptype.dev = 0;
+ binding->ptype.dev = NULL;
}
if (binding) {
dev_add_pack(&binding->ptype);
@@ -876,7 +876,7 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
*/
void oz_binding_remove(char *net_dev)
{
- struct oz_binding *binding = 0;
+ struct oz_binding *binding;
struct oz_binding **link;
oz_trace("Removing binding: %s\n", net_dev);
spin_lock_bh(&g_binding_lock);
@@ -923,7 +923,7 @@ int oz_protocol_init(char *devs)
{
skb_queue_head_init(&g_rx_queue);
if (devs && (devs[0] == '*')) {
- oz_binding_add(0);
+ oz_binding_add(NULL);
} else {
char d[32];
while (*devs) {
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 755a08d0e1ca..93bb4c0172e0 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -62,7 +62,7 @@ int oz_protocol_init(char *devs);
void oz_protocol_term(void);
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
void oz_app_enable(int app_id, int enable);
-struct oz_pd *oz_pd_find(u8 *mac_addr);
+struct oz_pd *oz_pd_find(const u8 *mac_addr);
void oz_binding_add(char *net_dev);
void oz_binding_remove(char *net_dev);
void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 3acf5980d7cc..8531438d7586 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -21,7 +21,7 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num);
/* Request functions.
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
- u8 *data, int data_len);
+ const u8 *data, int data_len);
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
u8 index, u16 windex, int offset, int len);
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
@@ -30,13 +30,13 @@ void oz_usb_request_heartbeat(void *hpd);
/* Confirmation functions.
*/
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
- u8 *desc, int length, int offset, int total_size);
+ const u8 *desc, int length, int offset, int total_size);
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
- u8 *data, int data_len);
+ const u8 *data, int data_len);
/* Indication functions.
*/
-void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len);
+void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len);
int oz_hcd_heartbeat(void *hport);
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index 8fa7f256ad8c..543a9415975c 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -34,7 +34,7 @@
*/
int oz_usb_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, 0, 0);
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, NULL, 0);
return oz_hcd_init();
}
/*------------------------------------------------------------------------------
@@ -43,7 +43,7 @@ int oz_usb_init(void)
*/
void oz_usb_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, 0, 0);
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, NULL, 0);
oz_hcd_term();
}
/*------------------------------------------------------------------------------
@@ -54,8 +54,8 @@ int oz_usb_start(struct oz_pd *pd, int resume)
{
int rc = 0;
struct oz_usb_ctx *usb_ctx;
- struct oz_usb_ctx *old_ctx = 0;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, 0, resume);
+ struct oz_usb_ctx *old_ctx;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, NULL, resume);
if (resume) {
oz_trace("USB service resumed.\n");
return 0;
@@ -65,7 +65,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
* has a USB context then we will destroy it.
*/
usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
- if (usb_ctx == 0)
+ if (usb_ctx == NULL)
return -ENOMEM;
atomic_set(&usb_ctx->ref_count, 1);
usb_ctx->pd = pd;
@@ -76,7 +76,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
*/
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
old_ctx = pd->app_ctx[OZ_APPID_USB-1];
- if (old_ctx == 0)
+ if (old_ctx == NULL)
pd->app_ctx[OZ_APPID_USB-1] = usb_ctx;
oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -98,10 +98,10 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
} else {
usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
- if (usb_ctx->hport == 0) {
+ if (usb_ctx->hport == NULL) {
oz_trace("USB hub returned null port.\n");
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- pd->app_ctx[OZ_APPID_USB-1] = 0;
+ pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
oz_usb_put(usb_ctx);
rc = -1;
@@ -117,14 +117,14 @@ int oz_usb_start(struct oz_pd *pd, int resume)
void oz_usb_stop(struct oz_pd *pd, int pause)
{
struct oz_usb_ctx *usb_ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, 0, pause);
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, NULL, pause);
if (pause) {
oz_trace("USB service paused.\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
- pd->app_ctx[OZ_APPID_USB-1] = 0;
+ pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx) {
unsigned long tout = jiffies + HZ;
@@ -182,7 +182,7 @@ int oz_usb_heartbeat(struct oz_pd *pd)
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- if (usb_ctx == 0)
+ if (usb_ctx == NULL)
return rc;
if (usb_ctx->stopped)
goto done;
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 66bd576bb5e9..4e4b650fee3f 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -71,7 +71,7 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
oz_trace(" len = 0x%x\n", len);
if (len > 200)
len = 200;
- if (ei == 0)
+ if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_get_desc_req);
@@ -97,7 +97,7 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
- if (ei == 0)
+ if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_config_req);
@@ -118,7 +118,7 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
- if (ei == 0)
+ if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_interface_req);
@@ -141,7 +141,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
- if (ei == 0)
+ if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_feature_req);
@@ -157,7 +157,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
* Context: tasklet
*/
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
- u8 request, __le16 value, __le16 index, u8 *data, int data_len)
+ u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
@@ -165,7 +165,7 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
- if (ei == 0)
+ if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
@@ -184,7 +184,7 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
* Context: tasklet
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
- u8 *data, int data_len)
+ const u8 *data, int data_len)
{
unsigned wvalue = le16_to_cpu(setup->wValue);
unsigned windex = le16_to_cpu(setup->wIndex);
@@ -264,7 +264,7 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
int unit_count;
int unit_size;
int rem;
- if (ei == 0)
+ if (ei == NULL)
return -1;
rem = MAX_ISOC_FIXED_DATA;
elt = (struct oz_elt *)ei->data;
@@ -305,7 +305,7 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
@@ -359,7 +359,7 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- if (usb_ctx == 0)
+ if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (usb_ctx->stopped)
goto done;
@@ -391,14 +391,14 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
struct oz_set_config_rsp *body =
(struct oz_set_config_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
- body->rcode, 0, 0);
+ body->rcode, NULL, 0);
}
break;
case OZ_SET_INTERFACE_RSP: {
struct oz_set_interface_rsp *body =
(struct oz_set_interface_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport,
- body->req_id, body->rcode, 0, 0);
+ body->req_id, body->rcode, NULL, 0);
}
break;
case OZ_VENDOR_CLASS_RSP: {
@@ -427,7 +427,7 @@ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- if (usb_ctx == 0)
+ if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index e3113ecefefd..c54df3948e20 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -185,7 +185,7 @@ struct logical_input {
} u;
};
-LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
+static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
/* physical contacts history
* Physical contacts are a 45 bits string of 9 groups of 5 bits each.
@@ -527,10 +527,10 @@ MODULE_PARM_DESC(lcd_cl_pin,
"# of the // port pin connected to serial LCD 'SCL' "
"signal, with polarity (-17..17)");
-static unsigned char *lcd_char_conv;
+static const unsigned char *lcd_char_conv;
/* for some LCD drivers (ks0074) we need a charset conversion table. */
-static unsigned char lcd_char_conv_ks0074[256] = {
+static const unsigned char lcd_char_conv_ks0074[256] = {
/* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */
/* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
/* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
@@ -566,7 +566,7 @@ static unsigned char lcd_char_conv_ks0074[256] = {
/* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79,
};
-char old_keypad_profile[][4][9] = {
+static const char old_keypad_profile[][4][9] = {
{"S0", "Left\n", "Left\n", ""},
{"S1", "Down\n", "Down\n", ""},
{"S2", "Up\n", "Up\n", ""},
@@ -577,7 +577,7 @@ char old_keypad_profile[][4][9] = {
};
/* signals, press, repeat, release */
-char new_keypad_profile[][4][9] = {
+static const char new_keypad_profile[][4][9] = {
{"S0", "Left\n", "Left\n", ""},
{"S1", "Down\n", "Down\n", ""},
{"S2", "Up\n", "Up\n", ""},
@@ -590,7 +590,7 @@ char new_keypad_profile[][4][9] = {
};
/* signals, press, repeat, release */
-char nexcom_keypad_profile[][4][9] = {
+static const char nexcom_keypad_profile[][4][9] = {
{"a-p-e-", "Down\n", "Down\n", ""},
{"a-p-E-", "Ret\n", "Ret\n", ""},
{"a-P-E-", "Esc\n", "Esc\n", ""},
@@ -599,7 +599,7 @@ char nexcom_keypad_profile[][4][9] = {
{"", "", "", ""}
};
-static char (*keypad_profile)[4][9] = old_keypad_profile;
+static const char (*keypad_profile)[4][9] = old_keypad_profile;
/* FIXME: this should be converted to a bit array containing signals states */
static struct {
@@ -669,7 +669,7 @@ static void panel_set_bits(void)
* out(dport, in(dport) & d_val[2] | d_val[signal_state])
* out(cport, in(cport) & c_val[2] | c_val[signal_state])
*/
-void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
+static void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
{
int d_bit, c_bit, inv;
@@ -1372,14 +1372,14 @@ static struct miscdevice lcd_dev = {
};
/* public function usable from the kernel for any purpose */
-void panel_lcd_print(char *s)
+static void panel_lcd_print(const char *s)
{
if (lcd_enabled && lcd_initialized)
lcd_write(NULL, s, strlen(s), NULL);
}
/* initialize the LCD driver */
-void lcd_init(void)
+static void lcd_init(void)
{
switch (lcd_type) {
case LCD_TYPE_OLD:
@@ -1638,7 +1638,7 @@ static struct miscdevice keypad_dev = {
&keypad_fops
};
-static void keypad_send_key(char *string, int max_len)
+static void keypad_send_key(const char *string, int max_len)
{
if (init_in_progress)
return;
@@ -1929,7 +1929,7 @@ static void init_scan_timer(void)
* corresponding to out and in bits respectively.
* returns 1 if ok, 0 if error (in which case, nothing is written).
*/
-static int input_name2mask(char *name, pmask_t *mask, pmask_t *value,
+static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value,
char *imask, char *omask)
{
static char sigtab[10] = "EeSsPpAaBb";
@@ -1977,8 +1977,9 @@ static int input_name2mask(char *name, pmask_t *mask, pmask_t *value,
* strings <press>, <repeat>, <release> for these respective events.
* Returns the pointer to the new key if ok, NULL if the key could not be bound.
*/
-static struct logical_input *panel_bind_key(char *name, char *press,
- char *repeat, char *release)
+static struct logical_input *panel_bind_key(const char *name, const char *press,
+ const char *repeat,
+ const char *release)
{
struct logical_input *key;
@@ -2178,7 +2179,7 @@ static struct parport_driver panel_driver = {
};
/* init function */
-int panel_init(void)
+static int panel_init(void)
{
/* for backwards compatibility */
if (keypad_type < 0)
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index cac320738142..adb8da564cf6 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -296,7 +296,7 @@ fail_config:
return ret;
}
-static int quickstart_acpi_remove(struct acpi_device *device, int type)
+static int quickstart_acpi_remove(struct acpi_device *device)
{
acpi_status status;
struct quickstart_acpi *quickstart;
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
deleted file mode 100644
index 3abf6619dace..000000000000
--- a/drivers/staging/ramster/Kconfig
+++ /dev/null
@@ -1,31 +0,0 @@
-config ZCACHE2
- bool "Dynamic compression of swap pages and clean pagecache pages"
- depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP && !ZCACHE
- select CRYPTO_LZO
- default n
- help
- Zcache2 doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache2 uses
- compression and an in-kernel implementation of transcendent
- memory to store clean page cache pages and swap in RAM,
- providing a noticeable reduction in disk I/O. Zcache2
- is a complete rewrite of the older zcache; it was intended to
- be a merge but that has been blocked due to political and
- technical disagreements. It is intended that they will merge
- again in the future. Until then, zcache2 is a single-node
- version of ramster.
-
-config RAMSTER
- bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
- depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y
- depends on NET
- # must ensure struct page is 8-byte aligned
- select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
- default n
- help
- RAMster allows RAM on other machines in a cluster to be utilized
- dynamically and symmetrically instead of swapping to a local swap
- disk, thus improving performance on memory-constrained workloads
- while minimizing total RAM across the cluster. RAMster, like
- zcache2, compresses swap pages into local RAM, but then remotifies
- the compressed pages to another node in the RAMster cluster.
diff --git a/drivers/staging/ramster/Makefile b/drivers/staging/ramster/Makefile
deleted file mode 100644
index 2d8b9d0a6a8b..000000000000
--- a/drivers/staging/ramster/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-zcache-y := zcache-main.o tmem.o zbud.o
-zcache-$(CONFIG_RAMSTER) += ramster/ramster.o ramster/r2net.o
-zcache-$(CONFIG_RAMSTER) += ramster/nodemanager.o ramster/tcp.o
-zcache-$(CONFIG_RAMSTER) += ramster/heartbeat.o ramster/masklog.o
-
-obj-$(CONFIG_ZCACHE2) += zcache.o
diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
deleted file mode 100644
index a2b7e03b6062..000000000000
--- a/drivers/staging/ramster/tmem.c
+++ /dev/null
@@ -1,894 +0,0 @@
-/*
- * In-kernel transcendent memory (generic implementation)
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- *
- * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
- * "handles" (triples containing a pool id, and object id, and an index), to
- * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
- * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
- * set of functions (pamops). Each pampd contains some representation of
- * PAGE_SIZE bytes worth of data. For those familiar with key-value stores,
- * the tmem handle is a three-level hierarchical key, and the value is always
- * reconstituted (but not necessarily stored) as PAGE_SIZE bytes and is
- * referenced in the datastore by the pampd. The hierarchy is required
- * to ensure that certain invalidation functions can be performed efficiently
- * (i.e. flush all indexes associated with this object_id, or
- * flush all objects associated with this pool).
- *
- * Tmem must support potentially millions of pages and must be able to insert,
- * find, and delete these pages at a potential frequency of thousands per
- * second concurrently across many CPUs, (and, if used with KVM, across many
- * vcpus across many guests). Tmem is tracked with a hierarchy of data
- * structures, organized by the elements in the handle-tuple: pool_id,
- * object_id, and page index. One or more "clients" (e.g. guests) each
- * provide one or more tmem_pools. Each pool, contains a hash table of
- * rb_trees of tmem_objs. Each tmem_obj contains a radix-tree-like tree
- * of pointers, with intermediate nodes called tmem_objnodes. Each leaf
- * pointer in this tree points to a pampd, which is accessible only through
- * a small set of callbacks registered by the PAM implementation (see
- * tmem_register_pamops). Tmem only needs to memory allocation for objs
- * and objnodes and this is done via a set of callbacks that must be
- * registered by the tmem host implementation (e.g. see tmem_register_hostops).
- */
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#ifdef CONFIG_RAMSTER
-#include <linux/delay.h>
-#endif
-
-#include "tmem.h"
-
-/* data structure sentinels used for debugging... see tmem.h */
-#define POOL_SENTINEL 0x87658765
-#define OBJ_SENTINEL 0x12345678
-#define OBJNODE_SENTINEL 0xfedcba09
-
-/*
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
-static struct tmem_hostops tmem_hostops;
-
-static void tmem_objnode_tree_init(void);
-
-void tmem_register_hostops(struct tmem_hostops *m)
-{
- tmem_objnode_tree_init();
- tmem_hostops = *m;
-}
-
-/*
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation.
- */
-static struct tmem_pamops tmem_pamops;
-
-void tmem_register_pamops(struct tmem_pamops *m)
-{
- tmem_pamops = *m;
-}
-
-/*
- * Oid's are potentially very sparse and tmem_objs may have an indeterminately
- * short life, being added and deleted at a relatively high frequency.
- * So an rb_tree is an ideal data structure to manage tmem_objs. But because
- * of the potentially huge number of tmem_objs, each pool manages a hashtable
- * of rb_trees to reduce search, insert, delete, and rebalancing time.
- * Each hashbucket also has a lock to manage concurrent access and no
- * searches, inserts, or deletions can be performed unless the lock is held.
- * As a result, care must be taken to ensure tmem routines are not called
- * recursively; the vast majority of the time, a recursive call may work
- * but a deadlock will occur a small fraction of the time due to the
- * hashbucket lock.
- *
- * The following routines manage tmem_objs. In all of these routines,
- * the hashbucket lock is already held.
- */
-
-/* Search for object==oid in pool, returns object if found. */
-static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp,
- struct rb_node **parent,
- struct rb_node ***link)
-{
- struct rb_node *_parent = NULL, **rbnode;
- struct tmem_obj *obj = NULL;
-
- rbnode = &hb->obj_rb_root.rb_node;
- while (*rbnode) {
- BUG_ON(RB_EMPTY_NODE(*rbnode));
- _parent = *rbnode;
- obj = rb_entry(*rbnode, struct tmem_obj,
- rb_tree_node);
- switch (tmem_oid_compare(oidp, &obj->oid)) {
- case 0: /* equal */
- goto out;
- case -1:
- rbnode = &(*rbnode)->rb_left;
- break;
- case 1:
- rbnode = &(*rbnode)->rb_right;
- break;
- }
- }
-
- if (parent)
- *parent = _parent;
- if (link)
- *link = rbnode;
- obj = NULL;
-out:
- return obj;
-}
-
-static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp)
-{
- return __tmem_obj_find(hb, oidp, NULL, NULL);
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
-
-/* Free an object that has no more pampds in it. */
-static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
-{
- struct tmem_pool *pool;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pampd_count > 0);
- pool = obj->pool;
- BUG_ON(pool == NULL);
- if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
- tmem_pampd_destroy_all_in_obj(obj, false);
- BUG_ON(obj->objnode_tree_root != NULL);
- BUG_ON((long)obj->objnode_count != 0);
- atomic_dec(&pool->obj_count);
- BUG_ON(atomic_read(&pool->obj_count) < 0);
- INVERT_SENTINEL(obj, OBJ);
- obj->pool = NULL;
- tmem_oid_set_invalid(&obj->oid);
- rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
-}
-
-/*
- * Initialize, and insert an tmem_object_root (called only if find failed).
- */
-static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
- struct tmem_pool *pool,
- struct tmem_oid *oidp)
-{
- struct rb_root *root = &hb->obj_rb_root;
- struct rb_node **new = NULL, *parent = NULL;
-
- BUG_ON(pool == NULL);
- atomic_inc(&pool->obj_count);
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
- obj->pool = pool;
- obj->oid = *oidp;
- obj->objnode_count = 0;
- obj->pampd_count = 0;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.new_obj != NULL)
- (*tmem_pamops.new_obj)(obj);
-#endif
- SET_SENTINEL(obj, OBJ);
-
- if (__tmem_obj_find(hb, oidp, &parent, &new))
- BUG();
-
- rb_link_node(&obj->rb_tree_node, parent, new);
- rb_insert_color(&obj->rb_tree_node, root);
-}
-
-/*
- * Tmem is managed as a set of tmem_pools with certain attributes, such as
- * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
- * and all pampds that belong to a tmem_pool. A tmem_pool is created
- * or deleted relatively rarely (for example, when a filesystem is
- * mounted or unmounted).
- */
-
-/* flush all data from a pool and, optionally, free it */
-static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
-{
- struct rb_node *rbnode;
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- BUG_ON(pool == NULL);
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- spin_lock(&hb->lock);
- rbnode = rb_first(&hb->obj_rb_root);
- while (rbnode != NULL) {
- obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
- rbnode = rb_next(rbnode);
- tmem_pampd_destroy_all_in_obj(obj, true);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- spin_unlock(&hb->lock);
- }
- if (destroy)
- list_del(&pool->pool_list);
-}
-
-/*
- * A tmem_obj contains a radix-tree-like tree in which the intermediate
- * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
- * is very specialized and tuned for specific uses and is not particularly
- * suited for use from this code, though some code from the core algorithms has
- * been reused, thus the copyright notices below). Each tmem_objnode contains
- * a set of pointers which point to either a set of intermediate tmem_objnodes
- * or a set of of pampds.
- *
- * Portions Copyright (C) 2001 Momchil Velikov
- * Portions Copyright (C) 2001 Christoph Hellwig
- * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
- */
-
-struct tmem_objnode_tree_path {
- struct tmem_objnode *objnode;
- int offset;
-};
-
-/* objnode height_to_maxindex translation */
-static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
-
-static void tmem_objnode_tree_init(void)
-{
- unsigned int ht, tmp;
-
- for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
- tmp = ht * OBJNODE_TREE_MAP_SHIFT;
- if (tmp >= OBJNODE_TREE_INDEX_BITS)
- tmem_objnode_tree_h2max[ht] = ~0UL;
- else
- tmem_objnode_tree_h2max[ht] =
- (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
- }
-}
-
-static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
-{
- struct tmem_objnode *objnode;
-
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
- if (unlikely(objnode == NULL))
- goto out;
- objnode->obj = obj;
- SET_SENTINEL(objnode, OBJNODE);
- memset(&objnode->slots, 0, sizeof(objnode->slots));
- objnode->slots_in_use = 0;
- obj->objnode_count++;
-out:
- return objnode;
-}
-
-static void tmem_objnode_free(struct tmem_objnode *objnode)
-{
- struct tmem_pool *pool;
- int i;
-
- BUG_ON(objnode == NULL);
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
- BUG_ON(objnode->slots[i] != NULL);
- ASSERT_SENTINEL(objnode, OBJNODE);
- INVERT_SENTINEL(objnode, OBJNODE);
- BUG_ON(objnode->obj == NULL);
- ASSERT_SENTINEL(objnode->obj, OBJ);
- pool = objnode->obj->pool;
- BUG_ON(pool == NULL);
- ASSERT_SENTINEL(pool, POOL);
- objnode->obj->objnode_count--;
- objnode->obj = NULL;
- (*tmem_hostops.objnode_free)(objnode, pool);
-}
-
-/*
- * Lookup index in object and return associated pampd (or NULL if not found).
- */
-static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- unsigned int height, shift;
- struct tmem_objnode **slot = NULL;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
-
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
- goto out;
- if (height == 0 && obj->objnode_tree_root) {
- slot = &obj->objnode_tree_root;
- goto out;
- }
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- slot = &obj->objnode_tree_root;
- while (height > 0) {
- if (*slot == NULL)
- goto out;
- slot = (struct tmem_objnode **)
- ((*slot)->slots +
- ((index >> shift) & OBJNODE_TREE_MAP_MASK));
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
-out:
- return slot != NULL ? (void **)slot : NULL;
-}
-
-static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode **slot;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- return slot != NULL ? *slot : NULL;
-}
-
-#ifdef CONFIG_RAMSTER
-static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
- void *new_pampd, bool no_free)
-{
- struct tmem_objnode **slot;
- void *ret = NULL;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- if ((slot != NULL) && (*slot != NULL)) {
- void *old_pampd = *(void **)slot;
- *(void **)slot = new_pampd;
- if (!no_free)
- (*tmem_pamops.free)(old_pampd, obj->pool,
- NULL, 0, false);
- ret = new_pampd;
- }
- return ret;
-}
-#endif
-
-static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
- void *pampd)
-{
- int ret = 0;
- struct tmem_objnode *objnode = NULL, *newnode, *slot;
- unsigned int height, shift;
- int offset = 0;
-
- /* if necessary, extend the tree to be higher */
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
- height = obj->objnode_tree_height + 1;
- if (index > tmem_objnode_tree_h2max[height])
- while (index > tmem_objnode_tree_h2max[height])
- height++;
- if (obj->objnode_tree_root == NULL) {
- obj->objnode_tree_height = height;
- goto insert;
- }
- do {
- newnode = tmem_objnode_alloc(obj);
- if (!newnode) {
- ret = -ENOMEM;
- goto out;
- }
- newnode->slots[0] = obj->objnode_tree_root;
- newnode->slots_in_use = 1;
- obj->objnode_tree_root = newnode;
- obj->objnode_tree_height++;
- } while (height > obj->objnode_tree_height);
- }
-insert:
- slot = obj->objnode_tree_root;
- height = obj->objnode_tree_height;
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- while (height > 0) {
- if (slot == NULL) {
- /* add a child objnode. */
- slot = tmem_objnode_alloc(obj);
- if (!slot) {
- ret = -ENOMEM;
- goto out;
- }
- if (objnode) {
-
- objnode->slots[offset] = slot;
- objnode->slots_in_use++;
- } else
- obj->objnode_tree_root = slot;
- }
- /* go down a level */
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- objnode = slot;
- slot = objnode->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
- BUG_ON(slot != NULL);
- if (objnode) {
- objnode->slots_in_use++;
- objnode->slots[offset] = pampd;
- } else
- obj->objnode_tree_root = pampd;
- obj->pampd_count++;
-out:
- return ret;
-}
-
-static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
- struct tmem_objnode_tree_path *pathp = path;
- struct tmem_objnode *slot = NULL;
- unsigned int height, shift;
- int offset;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[height])
- goto out;
- slot = obj->objnode_tree_root;
- if (height == 0 && obj->objnode_tree_root) {
- obj->objnode_tree_root = NULL;
- goto out;
- }
- shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
- pathp->objnode = NULL;
- do {
- if (slot == NULL)
- goto out;
- pathp++;
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- pathp->offset = offset;
- pathp->objnode = slot;
- slot = slot->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- } while (height > 0);
- if (slot == NULL)
- goto out;
- while (pathp->objnode) {
- pathp->objnode->slots[pathp->offset] = NULL;
- pathp->objnode->slots_in_use--;
- if (pathp->objnode->slots_in_use) {
- if (pathp->objnode == obj->objnode_tree_root) {
- while (obj->objnode_tree_height > 0 &&
- obj->objnode_tree_root->slots_in_use == 1 &&
- obj->objnode_tree_root->slots[0]) {
- struct tmem_objnode *to_free =
- obj->objnode_tree_root;
-
- obj->objnode_tree_root =
- to_free->slots[0];
- obj->objnode_tree_height--;
- to_free->slots[0] = NULL;
- to_free->slots_in_use = 0;
- tmem_objnode_free(to_free);
- }
- }
- goto out;
- }
- tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
- pathp--;
- }
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
-
-out:
- if (slot != NULL)
- obj->pampd_count--;
- BUG_ON(obj->pampd_count < 0);
- return slot;
-}
-
-/* Recursively walk the objnode_tree destroying pampds and objnodes. */
-static void tmem_objnode_node_destroy(struct tmem_obj *obj,
- struct tmem_objnode *objnode,
- unsigned int ht)
-{
- int i;
-
- if (ht == 0)
- return;
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
- if (objnode->slots[i]) {
- if (ht == 1) {
- obj->pampd_count--;
- (*tmem_pamops.free)(objnode->slots[i],
- obj->pool, NULL, 0, true);
- objnode->slots[i] = NULL;
- continue;
- }
- tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
- tmem_objnode_free(objnode->slots[i]);
- objnode->slots[i] = NULL;
- }
- }
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
- bool pool_destroy)
-{
- if (obj->objnode_tree_root == NULL)
- return;
- if (obj->objnode_tree_height == 0) {
- obj->pampd_count--;
- (*tmem_pamops.free)(obj->objnode_tree_root,
- obj->pool, NULL, 0, true);
- } else {
- tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
- obj->objnode_tree_height);
- tmem_objnode_free(obj->objnode_tree_root);
- obj->objnode_tree_height = 0;
- }
- obj->objnode_tree_root = NULL;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.free_obj != NULL)
- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
-#endif
-}
-
-/*
- * Tmem is operated on by a set of well-defined actions:
- * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
- * (The tmem ABI allows for subpages and exchanges but these operations
- * are not included in this implementation.)
- *
- * These "tmem core" operations are implemented in the following functions.
- */
-
-/*
- * "Put" a page, e.g. associate the passed pampd with the passed handle.
- * Tmem_put is complicated by a corner case: What if a page with matching
- * handle already exists in tmem? To guarantee coherency, one of two
- * actions is necessary: Either the data for the page must be overwritten,
- * or the page must be "flushed" so that the data is not accessible to a
- * subsequent "get". Since these "duplicate puts" are relatively rare,
- * this implementation always flushes for simplicity.
- */
-int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- bool raw, void *pampd_to_use)
-{
- struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
- void *pampd = NULL, *pampd_del = NULL;
- int ret = -ENOMEM;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = objfound = tmem_obj_find(hb, oidp);
- if (obj != NULL) {
- pampd = tmem_pampd_lookup_in_obj(objfound, index);
- if (pampd != NULL) {
- /* if found, is a dup put, flush the old one */
- pampd_del = tmem_pampd_delete_from_obj(obj, index);
- BUG_ON(pampd_del != pampd);
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- objnew = obj;
- objfound = NULL;
- }
- pampd = NULL;
- }
- } else {
- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
- if (unlikely(obj == NULL)) {
- ret = -ENOMEM;
- goto out;
- }
- tmem_obj_init(obj, hb, pool, oidp);
- }
- BUG_ON(obj == NULL);
- BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = pampd_to_use;
- BUG_ON(pampd_to_use == NULL);
- ret = tmem_pampd_add_to_obj(obj, index, pampd);
- if (unlikely(ret == -ENOMEM))
- /* may have partially built objnode tree ("stump") */
- goto delete_and_free;
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
- goto out;
-
-delete_and_free:
- (void)tmem_pampd_delete_from_obj(obj, index);
- if (pampd)
- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
- if (objnew) {
- tmem_obj_free(objnew, hb);
- (*tmem_hostops.obj_free)(objnew, pool);
- }
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-#ifdef CONFIG_RAMSTER
-/*
- * For ramster only: The following routines provide a two-step sequence
- * to allow the caller to replace a pampd in the tmem data structures with
- * another pampd. Here, we lookup the passed handle and, if found, return the
- * associated pampd and object, leaving the hashbucket locked and returning
- * a reference to it. The caller is expected to immediately call the
- * matching tmem_localify_finish routine which will handles the replacement
- * and unlocks the hashbucket.
- */
-void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, struct tmem_obj **ret_obj,
- void **saved_hb)
-{
- struct tmem_hashbucket *hb;
- struct tmem_obj *obj = NULL;
- void *pampd = NULL;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (likely(obj != NULL))
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- *ret_obj = obj;
- *saved_hb = (void *)hb;
- /* note, hashbucket remains locked */
- return pampd;
-}
-
-void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
- void *pampd, void *saved_hb, bool delete)
-{
- struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
-
- BUG_ON(!spin_is_locked(&hb->lock));
- if (pampd != NULL) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
- } else if (delete) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_delete_from_obj(obj, index);
- }
- spin_unlock(&hb->lock);
-}
-
-/*
- * For ramster only. Helper function to support asynchronous tmem_get.
- */
-static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
- struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, bool free, char *data)
-{
- void *old_pampd = *ppampd, *new_pampd = NULL;
- bool intransit = false;
- int ret = 0;
-
- if (!is_ephemeral(pool))
- new_pampd = (*tmem_pamops.repatriate_preload)(
- old_pampd, pool, oidp, index, &intransit);
- if (intransit)
- ret = -EAGAIN;
- else if (new_pampd != NULL)
- *ppampd = new_pampd;
- /* must release the hb->lock else repatriate can't sleep */
- spin_unlock(&hb->lock);
- if (!intransit)
- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
- oidp, index, free, data);
- if (ret == -EAGAIN) {
- /* rare I think, but should cond_resched()??? */
- usleep_range(10, 1000);
- } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
- ret = -1;
- } else if (ret != 0 && ret != -ENOENT) {
- ret = -1;
- }
- /* note hb->lock has now been unlocked */
- return ret;
-}
-
-/*
- * For ramster only. If a page in tmem matches the handle, replace the
- * page so that any subsequent "get" gets the new page. Returns 0 if
- * there was a page to replace, else returns -1.
- */
-int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, void *new_pampd)
-{
- struct tmem_obj *obj;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
- /* if we bug here, pamops wasn't properly set up for ramster */
- BUG_ON(tmem_pamops.replace_in_obj == NULL);
- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-#endif
-
-/*
- * "Get" a page, e.g. if a pampd can be found matching the passed handle,
- * use a pamops callback to recreated the page from the pampd with the
- * matching handle. By tmem definition, when a "get" is successful on
- * an ephemeral page, the page is "flushed", and when a "get" is successful
- * on a persistent page, the page is retained in tmem. Note that to preserve
- * coherency, "get" can never be skipped if tmem contains the data.
- * That is, if a get is done with a certain handle and fails, any
- * subsequent "get" must also fail (unless of course there is a
- * "put" done with the same handle).
- */
-int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_obj *obj;
- void *pampd = NULL;
- bool ephemeral = is_ephemeral(pool);
- int ret = -1;
- struct tmem_hashbucket *hb;
- bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
- bool lock_held = false;
- void **ppampd;
-
- do {
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- lock_held = true;
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- ppampd = __tmem_pampd_lookup_in_obj(obj, index);
- if (ppampd == NULL)
- goto out;
-#ifdef CONFIG_RAMSTER
- if ((tmem_pamops.is_remote != NULL) &&
- tmem_pamops.is_remote(*ppampd)) {
- ret = tmem_repatriate(ppampd, hb, pool, oidp,
- index, free, data);
- /* tmem_repatriate releases hb->lock */
- lock_held = false;
- *sizep = PAGE_SIZE;
- if (ret != -EAGAIN)
- goto out;
- }
-#endif
- } while (ret == -EAGAIN);
- if (free)
- pampd = tmem_pampd_delete_from_obj(obj, index);
- else
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- if (pampd == NULL)
- goto out;
- if (free) {
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- obj = NULL;
- }
- }
- if (free)
- ret = (*tmem_pamops.get_data_and_free)(
- data, sizep, raw, pampd, pool, oidp, index);
- else
- ret = (*tmem_pamops.get_data)(
- data, sizep, raw, pampd, pool, oidp, index);
- if (ret < 0)
- goto out;
- ret = 0;
-out:
- if (lock_held)
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * If a page in tmem matches the handle, "flush" this page from tmem such
- * that any subsequent "get" does not succeed (unless, of course, there
- * was another "put" with the same handle).
- */
-int tmem_flush_page(struct tmem_pool *pool,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_obj *obj;
- void *pampd;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- pampd = tmem_pampd_delete_from_obj(obj, index);
- if (pampd == NULL)
- goto out;
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages in tmem matching this oid.
- */
-int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
-{
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb;
- int ret = -1;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- tmem_pampd_destroy_all_in_obj(obj, false);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
- * all subsequent access to this tmem_pool.
- */
-int tmem_destroy_pool(struct tmem_pool *pool)
-{
- int ret = -1;
-
- if (pool == NULL)
- goto out;
- tmem_pool_flush(pool, 1);
- ret = 0;
-out:
- return ret;
-}
-
-static LIST_HEAD(tmem_global_pool_list);
-
-/*
- * Create a new tmem_pool with the provided flag and return
- * a pool id provided by the tmem host implementation.
- */
-void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
-{
- int persistent = flags & TMEM_POOL_PERSIST;
- int shared = flags & TMEM_POOL_SHARED;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- hb->obj_rb_root = RB_ROOT;
- spin_lock_init(&hb->lock);
- }
- INIT_LIST_HEAD(&pool->pool_list);
- atomic_set(&pool->obj_count, 0);
- SET_SENTINEL(pool, POOL);
- list_add_tail(&pool->pool_list, &tmem_global_pool_list);
- pool->persistent = persistent;
- pool->shared = shared;
-}
diff --git a/drivers/staging/ramster/tmem.h b/drivers/staging/ramster/tmem.h
deleted file mode 100644
index adbe5a8f28aa..000000000000
--- a/drivers/staging/ramster/tmem.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * tmem.h
- *
- * Transcendent memory
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _TMEM_H_
-#define _TMEM_H_
-
-#include <linux/types.h>
-#include <linux/highmem.h>
-#include <linux/hash.h>
-#include <linux/atomic.h>
-
-/*
- * These are defined by the Xen<->Linux ABI so should remain consistent
- */
-#define TMEM_POOL_PERSIST 1
-#define TMEM_POOL_SHARED 2
-#define TMEM_POOL_PRECOMPRESSED 4
-#define TMEM_POOL_PAGESIZE_SHIFT 4
-#define TMEM_POOL_PAGESIZE_MASK 0xf
-#define TMEM_POOL_RESERVED_BITS 0x00ffff00
-
-/*
- * sentinels have proven very useful for debugging but can be removed
- * or disabled before final merge.
- */
-#undef SENTINELS
-#ifdef SENTINELS
-#define DECL_SENTINEL uint32_t sentinel;
-#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
-#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
-#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
-#else
-#define DECL_SENTINEL
-#define SET_SENTINEL(_x, _y) do { } while (0)
-#define INVERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
-#endif
-
-#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
-
-/*
- * A pool is the highest-level data structure managed by tmem and
- * usually corresponds to a large independent set of pages such as
- * a filesystem. Each pool has an id, and certain attributes and counters.
- * It also contains a set of hash buckets, each of which contains an rbtree
- * of objects and a lock to manage concurrency within the pool.
- */
-
-#define TMEM_HASH_BUCKET_BITS 8
-#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
-
-struct tmem_hashbucket {
- struct rb_root obj_rb_root;
- spinlock_t lock;
-};
-
-struct tmem_pool {
- void *client; /* "up" for some clients, avoids table lookup */
- struct list_head pool_list;
- uint32_t pool_id;
- bool persistent;
- bool shared;
- atomic_t obj_count;
- atomic_t refcount;
- struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
- DECL_SENTINEL
-};
-
-#define is_persistent(_p) (_p->persistent)
-#define is_ephemeral(_p) (!(_p->persistent))
-
-/*
- * An object id ("oid") is large: 192-bits (to ensure, for example, files
- * in a modern filesystem can be uniquely identified).
- */
-
-struct tmem_oid {
- uint64_t oid[3];
-};
-
-static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
-{
- oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
-}
-
-static inline bool tmem_oid_valid(struct tmem_oid *oidp)
-{
- return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
- oidp->oid[2] != -1UL;
-}
-
-static inline int tmem_oid_compare(struct tmem_oid *left,
- struct tmem_oid *right)
-{
- int ret;
-
- if (left->oid[2] == right->oid[2]) {
- if (left->oid[1] == right->oid[1]) {
- if (left->oid[0] == right->oid[0])
- ret = 0;
- else if (left->oid[0] < right->oid[0])
- ret = -1;
- else
- return 1;
- } else if (left->oid[1] < right->oid[1])
- ret = -1;
- else
- ret = 1;
- } else if (left->oid[2] < right->oid[2])
- ret = -1;
- else
- ret = 1;
- return ret;
-}
-
-static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
-{
- return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
- TMEM_HASH_BUCKET_BITS);
-}
-
-#ifdef CONFIG_RAMSTER
-struct tmem_xhandle {
- uint8_t client_id;
- uint8_t xh_data_cksum;
- uint16_t xh_data_size;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- void *extra;
-};
-
-static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
- struct tmem_pool *pool,
- struct tmem_oid *oidp,
- uint32_t index)
-{
- struct tmem_xhandle xh;
- xh.client_id = client_id;
- xh.xh_data_cksum = (uint8_t)-1;
- xh.xh_data_size = (uint16_t)-1;
- xh.pool_id = pool->pool_id;
- xh.oid = *oidp;
- xh.index = index;
- return xh;
-}
-#endif
-
-
-/*
- * A tmem_obj contains an identifier (oid), pointers to the parent
- * pool and the rb_tree to which it belongs, counters, and an ordered
- * set of pampds, structured in a radix-tree-like tree. The intermediate
- * nodes of the tree are called tmem_objnodes.
- */
-
-struct tmem_objnode;
-
-struct tmem_obj {
- struct tmem_oid oid;
- struct tmem_pool *pool;
- struct rb_node rb_tree_node;
- struct tmem_objnode *objnode_tree_root;
- unsigned int objnode_tree_height;
- unsigned long objnode_count;
- long pampd_count;
-#ifdef CONFIG_RAMSTER
- /*
- * for current design of ramster, all pages belonging to
- * an object reside on the same remotenode and extra is
- * used to record the number of the remotenode so a
- * flush-object operation can specify it
- */
- void *extra; /* for private use by pampd implementation */
-#endif
- DECL_SENTINEL
-};
-
-#define OBJNODE_TREE_MAP_SHIFT 6
-#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
-#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
-#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define OBJNODE_TREE_MAX_PATH \
- (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
-
-struct tmem_objnode {
- struct tmem_obj *obj;
- DECL_SENTINEL
- void *slots[OBJNODE_TREE_MAP_SIZE];
- unsigned int slots_in_use;
-};
-
-struct tmem_handle {
- struct tmem_oid oid; /* 24 bytes */
- uint32_t index;
- uint16_t pool_id;
- uint16_t client_id;
-};
-
-
-/* pampd abstract datatype methods provided by the PAM implementation */
-struct tmem_pamops {
- void (*create_finish)(void *, bool);
- int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t);
- int (*get_data_and_free)(char *, size_t *, bool, void *,
- struct tmem_pool *, struct tmem_oid *,
- uint32_t);
- void (*free)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool);
-#ifdef CONFIG_RAMSTER
- void (*new_obj)(struct tmem_obj *);
- void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
- void *(*repatriate_preload)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool *);
- int (*repatriate)(void *, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool, void *);
- bool (*is_remote)(void *);
- int (*replace_in_obj)(void *, struct tmem_obj *);
-#endif
-};
-extern void tmem_register_pamops(struct tmem_pamops *m);
-
-/* memory allocation methods provided by the host implementation */
-struct tmem_hostops {
- struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
- void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
- struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
- void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
-};
-extern void tmem_register_hostops(struct tmem_hostops *m);
-
-/* core tmem accessor functions */
-extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- bool, void *);
-extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- char *, size_t *, bool, int);
-extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
- uint32_t index);
-extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
-extern int tmem_destroy_pool(struct tmem_pool *);
-extern void tmem_new_pool(struct tmem_pool *, uint32_t);
-#ifdef CONFIG_RAMSTER
-extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- void *);
-extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
- uint32_t index, struct tmem_obj **,
- void **);
-extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
- void *, void *, bool);
-#endif
-#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
deleted file mode 100644
index a09dd5cc1cea..000000000000
--- a/drivers/staging/ramster/zcache-main.c
+++ /dev/null
@@ -1,1820 +0,0 @@
-/*
- * zcache.c
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- * Copyright (c) 2010,2011, Nitin Gupta
- *
- * Zcache provides an in-kernel "host implementation" for transcendent memory
- * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
- * lzo1x compression to improve density and an embedded allocator called
- * "zbud" which "buddies" two compressed pages semi-optimally in each physical
- * pageframe. Zbud is integrally tied into tmem to allow pageframes to
- * be "reclaimed" efficiently.
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/math64.h>
-#include <linux/crypto.h>
-
-#include <linux/cleancache.h>
-#include <linux/frontswap.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "zbud.h"
-#include "ramster.h"
-#ifdef CONFIG_RAMSTER
-static int ramster_enabled;
-#else
-#define ramster_enabled 0
-#endif
-
-#ifndef __PG_WAS_ACTIVE
-static inline bool PageWasActive(struct page *page)
-{
- return true;
-}
-
-static inline void SetPageWasActive(struct page *page)
-{
-}
-#endif
-
-#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
-static bool frontswap_has_exclusive_gets __read_mostly = true;
-#else
-static bool frontswap_has_exclusive_gets __read_mostly;
-static inline void frontswap_tmem_exclusive_gets(bool b)
-{
-}
-#endif
-
-static int zcache_enabled __read_mostly;
-static int disable_cleancache __read_mostly;
-static int disable_frontswap __read_mostly;
-static int disable_frontswap_ignore_nonactive __read_mostly;
-static int disable_cleancache_ignore_nonactive __read_mostly;
-static char *namestr __read_mostly = "zcache";
-
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
-
-MODULE_LICENSE("GPL");
-
-/* crypto API for zcache */
-#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
-static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
-static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
-
-enum comp_op {
- ZCACHE_COMPOP_COMPRESS,
- ZCACHE_COMPOP_DECOMPRESS
-};
-
-static inline int zcache_comp_op(enum comp_op op,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_comp *tfm;
- int ret = -1;
-
- BUG_ON(!zcache_comp_pcpu_tfms);
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
- BUG_ON(!tfm);
- switch (op) {
- case ZCACHE_COMPOP_COMPRESS:
- ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
- break;
- case ZCACHE_COMPOP_DECOMPRESS:
- ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
- break;
- default:
- ret = -EINVAL;
- }
- put_cpu();
- return ret;
-}
-
-/*
- * policy parameters
- */
-
-/*
- * byte count defining poor compression; pages with greater zsize will be
- * rejected
- */
-static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
-/*
- * byte count defining poor *mean* compression; pages with greater zsize
- * will be rejected until sufficient better-compressed pages are accepted
- * driving the mean below this threshold
- */
-static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
-
-/*
- * for now, used named slabs so can easily track usage; later can
- * either just use kmalloc, or perhaps add a slab-like allocator
- * to more carefully manage total memory utilization
- */
-static struct kmem_cache *zcache_objnode_cache;
-static struct kmem_cache *zcache_obj_cache;
-
-static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-
-/* we try to keep these statistics SMP-consistent */
-static long zcache_obj_count;
-static atomic_t zcache_obj_atomic = ATOMIC_INIT(0);
-static long zcache_obj_count_max;
-static long zcache_objnode_count;
-static atomic_t zcache_objnode_atomic = ATOMIC_INIT(0);
-static long zcache_objnode_count_max;
-static u64 zcache_eph_zbytes;
-static atomic_long_t zcache_eph_zbytes_atomic = ATOMIC_INIT(0);
-static u64 zcache_eph_zbytes_max;
-static u64 zcache_pers_zbytes;
-static atomic_long_t zcache_pers_zbytes_atomic = ATOMIC_INIT(0);
-static u64 zcache_pers_zbytes_max;
-static long zcache_eph_pageframes;
-static atomic_t zcache_eph_pageframes_atomic = ATOMIC_INIT(0);
-static long zcache_eph_pageframes_max;
-static long zcache_pers_pageframes;
-static atomic_t zcache_pers_pageframes_atomic = ATOMIC_INIT(0);
-static long zcache_pers_pageframes_max;
-static long zcache_pageframes_alloced;
-static atomic_t zcache_pageframes_alloced_atomic = ATOMIC_INIT(0);
-static long zcache_pageframes_freed;
-static atomic_t zcache_pageframes_freed_atomic = ATOMIC_INIT(0);
-static long zcache_eph_zpages;
-static atomic_t zcache_eph_zpages_atomic = ATOMIC_INIT(0);
-static long zcache_eph_zpages_max;
-static long zcache_pers_zpages;
-static atomic_t zcache_pers_zpages_atomic = ATOMIC_INIT(0);
-static long zcache_pers_zpages_max;
-
-/* but for the rest of these, counting races are ok */
-static unsigned long zcache_flush_total;
-static unsigned long zcache_flush_found;
-static unsigned long zcache_flobj_total;
-static unsigned long zcache_flobj_found;
-static unsigned long zcache_failed_eph_puts;
-static unsigned long zcache_failed_pers_puts;
-static unsigned long zcache_failed_getfreepages;
-static unsigned long zcache_failed_alloc;
-static unsigned long zcache_put_to_flush;
-static unsigned long zcache_compress_poor;
-static unsigned long zcache_mean_compress_poor;
-static unsigned long zcache_eph_ate_tail;
-static unsigned long zcache_eph_ate_tail_failed;
-static unsigned long zcache_pers_ate_eph;
-static unsigned long zcache_pers_ate_eph_failed;
-static unsigned long zcache_evicted_eph_zpages;
-static unsigned long zcache_evicted_eph_pageframes;
-static unsigned long zcache_last_active_file_pageframes;
-static unsigned long zcache_last_inactive_file_pageframes;
-static unsigned long zcache_last_active_anon_pageframes;
-static unsigned long zcache_last_inactive_anon_pageframes;
-static unsigned long zcache_eph_nonactive_puts_ignored;
-static unsigned long zcache_pers_nonactive_puts_ignored;
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#define zdfs debugfs_create_size_t
-#define zdfs64 debugfs_create_u64
-static int zcache_debugfs_init(void)
-{
- struct dentry *root = debugfs_create_dir("zcache", NULL);
- if (root == NULL)
- return -ENXIO;
-
- zdfs("obj_count", S_IRUGO, root, &zcache_obj_count);
- zdfs("obj_count_max", S_IRUGO, root, &zcache_obj_count_max);
- zdfs("objnode_count", S_IRUGO, root, &zcache_objnode_count);
- zdfs("objnode_count_max", S_IRUGO, root, &zcache_objnode_count_max);
- zdfs("flush_total", S_IRUGO, root, &zcache_flush_total);
- zdfs("flush_found", S_IRUGO, root, &zcache_flush_found);
- zdfs("flobj_total", S_IRUGO, root, &zcache_flobj_total);
- zdfs("flobj_found", S_IRUGO, root, &zcache_flobj_found);
- zdfs("failed_eph_puts", S_IRUGO, root, &zcache_failed_eph_puts);
- zdfs("failed_pers_puts", S_IRUGO, root, &zcache_failed_pers_puts);
- zdfs("failed_get_free_pages", S_IRUGO, root,
- &zcache_failed_getfreepages);
- zdfs("failed_alloc", S_IRUGO, root, &zcache_failed_alloc);
- zdfs("put_to_flush", S_IRUGO, root, &zcache_put_to_flush);
- zdfs("compress_poor", S_IRUGO, root, &zcache_compress_poor);
- zdfs("mean_compress_poor", S_IRUGO, root, &zcache_mean_compress_poor);
- zdfs("eph_ate_tail", S_IRUGO, root, &zcache_eph_ate_tail);
- zdfs("eph_ate_tail_failed", S_IRUGO, root, &zcache_eph_ate_tail_failed);
- zdfs("pers_ate_eph", S_IRUGO, root, &zcache_pers_ate_eph);
- zdfs("pers_ate_eph_failed", S_IRUGO, root, &zcache_pers_ate_eph_failed);
- zdfs("evicted_eph_zpages", S_IRUGO, root, &zcache_evicted_eph_zpages);
- zdfs("evicted_eph_pageframes", S_IRUGO, root,
- &zcache_evicted_eph_pageframes);
- zdfs("eph_pageframes", S_IRUGO, root, &zcache_eph_pageframes);
- zdfs("eph_pageframes_max", S_IRUGO, root, &zcache_eph_pageframes_max);
- zdfs("pers_pageframes", S_IRUGO, root, &zcache_pers_pageframes);
- zdfs("pers_pageframes_max", S_IRUGO, root, &zcache_pers_pageframes_max);
- zdfs("eph_zpages", S_IRUGO, root, &zcache_eph_zpages);
- zdfs("eph_zpages_max", S_IRUGO, root, &zcache_eph_zpages_max);
- zdfs("pers_zpages", S_IRUGO, root, &zcache_pers_zpages);
- zdfs("pers_zpages_max", S_IRUGO, root, &zcache_pers_zpages_max);
- zdfs("last_active_file_pageframes", S_IRUGO, root,
- &zcache_last_active_file_pageframes);
- zdfs("last_inactive_file_pageframes", S_IRUGO, root,
- &zcache_last_inactive_file_pageframes);
- zdfs("last_active_anon_pageframes", S_IRUGO, root,
- &zcache_last_active_anon_pageframes);
- zdfs("last_inactive_anon_pageframes", S_IRUGO, root,
- &zcache_last_inactive_anon_pageframes);
- zdfs("eph_nonactive_puts_ignored", S_IRUGO, root,
- &zcache_eph_nonactive_puts_ignored);
- zdfs("pers_nonactive_puts_ignored", S_IRUGO, root,
- &zcache_pers_nonactive_puts_ignored);
- zdfs64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes);
- zdfs64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max);
- zdfs64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes);
- zdfs64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max);
- return 0;
-}
-#undef zdebugfs
-#undef zdfs64
-#endif
-
-#define ZCACHE_DEBUG
-#ifdef ZCACHE_DEBUG
-/* developers can call this in case of ooms, e.g. to find memory leaks */
-void zcache_dump(void)
-{
- pr_info("zcache: obj_count=%lu\n", zcache_obj_count);
- pr_info("zcache: obj_count_max=%lu\n", zcache_obj_count_max);
- pr_info("zcache: objnode_count=%lu\n", zcache_objnode_count);
- pr_info("zcache: objnode_count_max=%lu\n", zcache_objnode_count_max);
- pr_info("zcache: flush_total=%lu\n", zcache_flush_total);
- pr_info("zcache: flush_found=%lu\n", zcache_flush_found);
- pr_info("zcache: flobj_total=%lu\n", zcache_flobj_total);
- pr_info("zcache: flobj_found=%lu\n", zcache_flobj_found);
- pr_info("zcache: failed_eph_puts=%lu\n", zcache_failed_eph_puts);
- pr_info("zcache: failed_pers_puts=%lu\n", zcache_failed_pers_puts);
- pr_info("zcache: failed_get_free_pages=%lu\n",
- zcache_failed_getfreepages);
- pr_info("zcache: failed_alloc=%lu\n", zcache_failed_alloc);
- pr_info("zcache: put_to_flush=%lu\n", zcache_put_to_flush);
- pr_info("zcache: compress_poor=%lu\n", zcache_compress_poor);
- pr_info("zcache: mean_compress_poor=%lu\n",
- zcache_mean_compress_poor);
- pr_info("zcache: eph_ate_tail=%lu\n", zcache_eph_ate_tail);
- pr_info("zcache: eph_ate_tail_failed=%lu\n",
- zcache_eph_ate_tail_failed);
- pr_info("zcache: pers_ate_eph=%lu\n", zcache_pers_ate_eph);
- pr_info("zcache: pers_ate_eph_failed=%lu\n",
- zcache_pers_ate_eph_failed);
- pr_info("zcache: evicted_eph_zpages=%lu\n", zcache_evicted_eph_zpages);
- pr_info("zcache: evicted_eph_pageframes=%lu\n",
- zcache_evicted_eph_pageframes);
- pr_info("zcache: eph_pageframes=%lu\n", zcache_eph_pageframes);
- pr_info("zcache: eph_pageframes_max=%lu\n", zcache_eph_pageframes_max);
- pr_info("zcache: pers_pageframes=%lu\n", zcache_pers_pageframes);
- pr_info("zcache: pers_pageframes_max=%lu\n",
- zcache_pers_pageframes_max);
- pr_info("zcache: eph_zpages=%lu\n", zcache_eph_zpages);
- pr_info("zcache: eph_zpages_max=%lu\n", zcache_eph_zpages_max);
- pr_info("zcache: pers_zpages=%lu\n", zcache_pers_zpages);
- pr_info("zcache: pers_zpages_max=%lu\n", zcache_pers_zpages_max);
- pr_info("zcache: eph_zbytes=%llu\n",
- (unsigned long long)zcache_eph_zbytes);
- pr_info("zcache: eph_zbytes_max=%llu\n",
- (unsigned long long)zcache_eph_zbytes_max);
- pr_info("zcache: pers_zbytes=%llu\n",
- (unsigned long long)zcache_pers_zbytes);
- pr_info("zcache: pers_zbytes_max=%llu\n",
- (unsigned long long)zcache_pers_zbytes_max);
-}
-#endif
-
-/*
- * zcache core code starts here
- */
-
-static struct zcache_client zcache_host;
-static struct zcache_client zcache_clients[MAX_CLIENTS];
-
-static inline bool is_local_client(struct zcache_client *cli)
-{
- return cli == &zcache_host;
-}
-
-static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
-{
- struct zcache_client *cli = &zcache_host;
-
- if (cli_id != LOCAL_CLIENT) {
- if (cli_id >= MAX_CLIENTS)
- goto out;
- cli = &zcache_clients[cli_id];
- }
-out:
- return cli;
-}
-
-/*
- * Tmem operations assume the poolid implies the invoking client.
- * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
- * RAMster has each client numbered by cluster node, and a KVM version
- * of zcache would have one client per guest and each client might
- * have a poolid==N.
- */
-struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (!is_local_client(cli))
- atomic_inc(&cli->refcount);
- if (poolid < MAX_POOLS_PER_CLIENT) {
- pool = cli->tmem_pools[poolid];
- if (pool != NULL)
- atomic_inc(&pool->refcount);
- }
-out:
- return pool;
-}
-
-void zcache_put_pool(struct tmem_pool *pool)
-{
- struct zcache_client *cli = NULL;
-
- if (pool == NULL)
- BUG();
- cli = pool->client;
- atomic_dec(&pool->refcount);
- if (!is_local_client(cli))
- atomic_dec(&cli->refcount);
-}
-
-int zcache_new_client(uint16_t cli_id)
-{
- struct zcache_client *cli;
- int ret = -1;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (cli->allocated)
- goto out;
- cli->allocated = 1;
- ret = 0;
-out:
- return ret;
-}
-
-/*
- * zcache implementation for tmem host ops
- */
-
-static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
-{
- struct tmem_objnode *objnode = NULL;
- struct zcache_preload *kp;
- int i;
-
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode != NULL) {
- kp->objnodes[i] = NULL;
- break;
- }
- }
- BUG_ON(objnode == NULL);
- zcache_objnode_count = atomic_inc_return(&zcache_objnode_atomic);
- if (zcache_objnode_count > zcache_objnode_count_max)
- zcache_objnode_count_max = zcache_objnode_count;
- return objnode;
-}
-
-static void zcache_objnode_free(struct tmem_objnode *objnode,
- struct tmem_pool *pool)
-{
- zcache_objnode_count =
- atomic_dec_return(&zcache_objnode_atomic);
- BUG_ON(zcache_objnode_count < 0);
- kmem_cache_free(zcache_objnode_cache, objnode);
-}
-
-static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
-{
- struct tmem_obj *obj = NULL;
- struct zcache_preload *kp;
-
- kp = &__get_cpu_var(zcache_preloads);
- obj = kp->obj;
- BUG_ON(obj == NULL);
- kp->obj = NULL;
- zcache_obj_count = atomic_inc_return(&zcache_obj_atomic);
- if (zcache_obj_count > zcache_obj_count_max)
- zcache_obj_count_max = zcache_obj_count;
- return obj;
-}
-
-static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
-{
- zcache_obj_count =
- atomic_dec_return(&zcache_obj_atomic);
- BUG_ON(zcache_obj_count < 0);
- kmem_cache_free(zcache_obj_cache, obj);
-}
-
-static struct tmem_hostops zcache_hostops = {
- .obj_alloc = zcache_obj_alloc,
- .obj_free = zcache_obj_free,
- .objnode_alloc = zcache_objnode_alloc,
- .objnode_free = zcache_objnode_free,
-};
-
-static struct page *zcache_alloc_page(void)
-{
- struct page *page = alloc_page(ZCACHE_GFP_MASK);
-
- if (page != NULL)
- zcache_pageframes_alloced =
- atomic_inc_return(&zcache_pageframes_alloced_atomic);
- return page;
-}
-
-#ifdef FRONTSWAP_HAS_UNUSE
-static void zcache_unacct_page(void)
-{
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
-}
-#endif
-
-static void zcache_free_page(struct page *page)
-{
- long curr_pageframes;
- static long max_pageframes, min_pageframes;
-
- if (page == NULL)
- BUG();
- __free_page(page);
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
- curr_pageframes = zcache_pageframes_alloced -
- atomic_read(&zcache_pageframes_freed_atomic) -
- atomic_read(&zcache_eph_pageframes_atomic) -
- atomic_read(&zcache_pers_pageframes_atomic);
- if (curr_pageframes > max_pageframes)
- max_pageframes = curr_pageframes;
- if (curr_pageframes < min_pageframes)
- min_pageframes = curr_pageframes;
-#ifdef ZCACHE_DEBUG
- if (curr_pageframes > 2L || curr_pageframes < -2L) {
- /* pr_info here */
- }
-#endif
-}
-
-/*
- * zcache implementations for PAM page descriptor ops
- */
-
-/* forward reference */
-static void zcache_compress(struct page *from,
- void **out_va, unsigned *out_len);
-
-static struct page *zcache_evict_eph_pageframe(void);
-
-static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- struct page *page = (struct page *)(data), *newpage;
-
- if (!raw) {
- zcache_compress(page, &cdata, &clen);
- if (clen > zbud_max_buddy_size()) {
- zcache_compress_poor++;
- goto out;
- }
- } else {
- BUG_ON(clen > zbud_max_buddy_size());
- }
-
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, true, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
-
- zcache_failed_getfreepages++;
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- zcache_eph_ate_tail_failed++;
- goto out;
- }
- zcache_eph_ate_tail++;
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- zcache_eph_pageframes =
- atomic_inc_return(&zcache_eph_pageframes_atomic);
- if (zcache_eph_pageframes > zcache_eph_pageframes_max)
- zcache_eph_pageframes_max = zcache_eph_pageframes;
-
-got_pampd:
- zcache_eph_zbytes =
- atomic_long_add_return(clen, &zcache_eph_zbytes_atomic);
- if (zcache_eph_zbytes > zcache_eph_zbytes_max)
- zcache_eph_zbytes_max = zcache_eph_zbytes;
- zcache_eph_zpages = atomic_inc_return(&zcache_eph_zpages_atomic);
- if (zcache_eph_zpages > zcache_eph_zpages_max)
- zcache_eph_zpages_max = zcache_eph_zpages;
- if (ramster_enabled && raw)
- ramster_count_foreign_pages(true, 1);
-out:
- return pampd;
-}
-
-static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- struct page *page = (struct page *)(data), *newpage;
- unsigned long zbud_mean_zsize;
- unsigned long curr_pers_zpages, total_zsize;
-
- if (data == NULL) {
- BUG_ON(!ramster_enabled);
- goto create_pampd;
- }
- curr_pers_zpages = zcache_pers_zpages;
-/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
- if (!raw)
- zcache_compress(page, &cdata, &clen);
- /* reject if compression is too poor */
- if (clen > zbud_max_zsize) {
- zcache_compress_poor++;
- goto out;
- }
- /* reject if mean compression is too poor */
- if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
- total_zsize = zcache_pers_zbytes;
- if ((long)total_zsize < 0)
- total_zsize = 0;
- zbud_mean_zsize = div_u64(total_zsize,
- curr_pers_zpages);
- if (zbud_mean_zsize > zbud_max_mean_zsize) {
- zcache_mean_compress_poor++;
- goto out;
- }
- }
-
-create_pampd:
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, false, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
- /*
- * FIXME do the following only if eph is oversized?
- * if (zcache_eph_pageframes >
- * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
- * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
- */
- zcache_failed_getfreepages++;
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- zcache_pers_ate_eph_failed++;
- goto out;
- }
- zcache_pers_ate_eph++;
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- zcache_pers_pageframes =
- atomic_inc_return(&zcache_pers_pageframes_atomic);
- if (zcache_pers_pageframes > zcache_pers_pageframes_max)
- zcache_pers_pageframes_max = zcache_pers_pageframes;
-
-got_pampd:
- zcache_pers_zpages = atomic_inc_return(&zcache_pers_zpages_atomic);
- if (zcache_pers_zpages > zcache_pers_zpages_max)
- zcache_pers_zpages_max = zcache_pers_zpages;
- zcache_pers_zbytes =
- atomic_long_add_return(clen, &zcache_pers_zbytes_atomic);
- if (zcache_pers_zbytes > zcache_pers_zbytes_max)
- zcache_pers_zbytes_max = zcache_pers_zbytes;
- if (ramster_enabled && raw)
- ramster_count_foreign_pages(false, 1);
-out:
- return pampd;
-}
-
-/*
- * This is called directly from zcache_put_page to pre-allocate space
- * to store a zpage.
- */
-void *zcache_pampd_create(char *data, unsigned int size, bool raw,
- int eph, struct tmem_handle *th)
-{
- void *pampd = NULL;
- struct zcache_preload *kp;
- struct tmem_objnode *objnode;
- struct tmem_obj *obj;
- int i;
-
- BUG_ON(!irqs_disabled());
- /* pre-allocate per-cpu metadata */
- BUG_ON(zcache_objnode_cache == NULL);
- BUG_ON(zcache_obj_cache == NULL);
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode == NULL) {
- objnode = kmem_cache_alloc(zcache_objnode_cache,
- ZCACHE_GFP_MASK);
- if (unlikely(objnode == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- kp->objnodes[i] = objnode;
- }
- }
- if (kp->obj == NULL) {
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- kp->obj = obj;
- }
- if (unlikely(kp->obj == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- /*
- * ok, have all the metadata pre-allocated, now do the data
- * but since how we allocate the data is dependent on ephemeral
- * or persistent, we split the call here to different sub-functions
- */
- if (eph)
- pampd = zcache_pampd_eph_create(data, size, raw, th);
- else
- pampd = zcache_pampd_pers_create(data, size, raw, th);
-out:
- return pampd;
-}
-
-/*
- * This is a pamops called via tmem_put and is necessary to "finish"
- * a pampd creation.
- */
-void zcache_pampd_create_finish(void *pampd, bool eph)
-{
- zbud_create_finish((struct zbudref *)pampd, eph);
-}
-
-/*
- * This is passed as a function parameter to zbud_decompress so that
- * zbud need not be familiar with the details of crypto. It assumes that
- * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
- * kmapped. It must be successful, else there is a logic bug somewhere.
- */
-static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
-{
- int ret;
- unsigned int outlen = PAGE_SIZE;
-
- ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
- to_va, &outlen);
- BUG_ON(ret);
- BUG_ON(outlen != PAGE_SIZE);
-}
-
-/*
- * Decompress from the kernel va to a pageframe
- */
-void zcache_decompress_to_page(char *from_va, unsigned int size,
- struct page *to_page)
-{
- char *to_va = kmap_atomic(to_page);
- zcache_decompress(from_va, size, to_va);
- kunmap_atomic(to_va);
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret;
- bool eph = !is_persistent(pool);
-
- BUG_ON(preemptible());
- BUG_ON(eph); /* fix later if shared pools get implemented */
- BUG_ON(pampd_is_remote(pampd));
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, false,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- return ret;
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret;
- bool eph = !is_persistent(pool);
- struct page *page = NULL;
- unsigned int zsize, zpages;
-
- BUG_ON(preemptible());
- BUG_ON(pampd_is_remote(pampd));
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, eph,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- page = zbud_free_and_delist((struct zbudref *)pampd, eph,
- &zsize, &zpages);
- if (eph) {
- if (page)
- zcache_eph_pageframes =
- atomic_dec_return(&zcache_eph_pageframes_atomic);
- zcache_eph_zpages =
- atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
- zcache_eph_zbytes =
- atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
- } else {
- if (page)
- zcache_pers_pageframes =
- atomic_dec_return(&zcache_pers_pageframes_atomic);
- zcache_pers_zpages =
- atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
- zcache_pers_zbytes =
- atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
- }
- if (!is_local_client(pool->client))
- ramster_count_foreign_pages(eph, -1);
- if (page)
- zcache_free_page(page);
- return ret;
-}
-
-/*
- * free the pampd and remove it from any zcache lists
- * pampd must no longer be pointed to from any tmem data structures!
- */
-static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index, bool acct)
-{
- struct page *page = NULL;
- unsigned int zsize, zpages;
-
- BUG_ON(preemptible());
- if (pampd_is_remote(pampd)) {
- BUG_ON(!ramster_enabled);
- pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
- if (pampd == NULL)
- return;
- }
- if (is_ephemeral(pool)) {
- page = zbud_free_and_delist((struct zbudref *)pampd,
- true, &zsize, &zpages);
- if (page)
- zcache_eph_pageframes =
- atomic_dec_return(&zcache_eph_pageframes_atomic);
- zcache_eph_zpages =
- atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
- zcache_eph_zbytes =
- atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
- /* FIXME CONFIG_RAMSTER... check acct parameter? */
- } else {
- page = zbud_free_and_delist((struct zbudref *)pampd,
- false, &zsize, &zpages);
- if (page)
- zcache_pers_pageframes =
- atomic_dec_return(&zcache_pers_pageframes_atomic);
- zcache_pers_zpages =
- atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
- zcache_pers_zbytes =
- atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
- }
- if (!is_local_client(pool->client))
- ramster_count_foreign_pages(is_ephemeral(pool), -1);
- if (page)
- zcache_free_page(page);
-}
-
-static struct tmem_pamops zcache_pamops = {
- .create_finish = zcache_pampd_create_finish,
- .get_data = zcache_pampd_get_data,
- .get_data_and_free = zcache_pampd_get_data_and_free,
- .free = zcache_pampd_free,
-};
-
-/*
- * zcache compression/decompression and related per-cpu stuff
- */
-
-static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
-#define ZCACHE_DSTMEM_ORDER 1
-
-static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
-{
- int ret;
- unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- char *from_va;
-
- BUG_ON(!irqs_disabled());
- /* no buffer or no compressor so can't compress */
- BUG_ON(dmem == NULL);
- *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
- from_va = kmap_atomic(from);
- mb();
- ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
- out_len);
- BUG_ON(ret);
- *out_va = dmem;
- kunmap_atomic(from_va);
-}
-
-static int zcache_comp_cpu_up(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
- if (IS_ERR(tfm))
- return NOTIFY_BAD;
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
- return NOTIFY_OK;
-}
-
-static void zcache_comp_cpu_down(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
- crypto_free_comp(tfm);
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
-}
-
-static int zcache_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
-{
- int ret, i, cpu = (long)pcpu;
- struct zcache_preload *kp;
-
- switch (action) {
- case CPU_UP_PREPARE:
- ret = zcache_comp_cpu_up(cpu);
- if (ret != NOTIFY_OK) {
- pr_err("%s: can't allocate compressor xform\n",
- namestr);
- return ret;
- }
- per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
- if (ramster_enabled)
- ramster_cpu_up(cpu);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zcache_comp_cpu_down(cpu);
- free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- ZCACHE_DSTMEM_ORDER);
- per_cpu(zcache_dstmem, cpu) = NULL;
- kp = &per_cpu(zcache_preloads, cpu);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- if (kp->objnodes[i])
- kmem_cache_free(zcache_objnode_cache,
- kp->objnodes[i]);
- }
- if (kp->obj) {
- kmem_cache_free(zcache_obj_cache, kp->obj);
- kp->obj = NULL;
- }
- if (ramster_enabled)
- ramster_cpu_down(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block zcache_cpu_notifier_block = {
- .notifier_call = zcache_cpu_notifier
-};
-
-/*
- * The following code interacts with the zbud eviction and zbud
- * zombify code to access LRU pages
- */
-
-static struct page *zcache_evict_eph_pageframe(void)
-{
- struct page *page;
- unsigned int zsize = 0, zpages = 0;
-
- page = zbud_evict_pageframe_lru(&zsize, &zpages);
- if (page == NULL)
- goto out;
- zcache_eph_zbytes = atomic_long_sub_return(zsize,
- &zcache_eph_zbytes_atomic);
- zcache_eph_zpages = atomic_sub_return(zpages,
- &zcache_eph_zpages_atomic);
- zcache_evicted_eph_zpages++;
- zcache_eph_pageframes =
- atomic_dec_return(&zcache_eph_pageframes_atomic);
- zcache_evicted_eph_pageframes++;
-out:
- return page;
-}
-
-#ifdef FRONTSWAP_HAS_UNUSE
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset);
-
-/*
- * Choose an LRU persistent pageframe and attempt to "unuse" it by
- * calling frontswap_unuse on both zpages.
- *
- * This is work-in-progress.
- */
-
-static int zcache_frontswap_unuse(void)
-{
- struct tmem_handle th[2];
- int ret = -ENOMEM;
- int nzbuds, unuse_ret;
- unsigned type;
- struct page *newpage1 = NULL, *newpage2 = NULL;
- struct page *evictpage1 = NULL, *evictpage2 = NULL;
- pgoff_t offset;
-
- newpage1 = alloc_page(ZCACHE_GFP_MASK);
- newpage2 = alloc_page(ZCACHE_GFP_MASK);
- if (newpage1 == NULL)
- evictpage1 = zcache_evict_eph_pageframe();
- if (newpage2 == NULL)
- evictpage2 = zcache_evict_eph_pageframe();
- if (evictpage1 == NULL || evictpage2 == NULL)
- goto free_and_out;
- /* ok, we have two pages pre-allocated */
- nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
- if (nzbuds == 0) {
- ret = -ENOENT;
- goto free_and_out;
- }
- unswiz(th[0].oid, th[0].index, &type, &offset);
- unuse_ret = frontswap_unuse(type, offset,
- newpage1 != NULL ? newpage1 : evictpage1,
- ZCACHE_GFP_MASK);
- if (unuse_ret != 0)
- goto free_and_out;
- else if (evictpage1 != NULL)
- zcache_unacct_page();
- newpage1 = NULL;
- evictpage1 = NULL;
- if (nzbuds == 2) {
- unswiz(th[1].oid, th[1].index, &type, &offset);
- unuse_ret = frontswap_unuse(type, offset,
- newpage2 != NULL ? newpage2 : evictpage2,
- ZCACHE_GFP_MASK);
- if (unuse_ret != 0) {
- goto free_and_out;
- } else if (evictpage2 != NULL) {
- zcache_unacct_page();
- }
- }
- ret = 0;
- goto out;
-
-free_and_out:
- if (newpage1 != NULL)
- __free_page(newpage1);
- if (newpage2 != NULL)
- __free_page(newpage2);
- if (evictpage1 != NULL)
- zcache_free_page(evictpage1);
- if (evictpage2 != NULL)
- zcache_free_page(evictpage2);
-out:
- return ret;
-}
-#endif
-
-/*
- * When zcache is disabled ("frozen"), pools can be created and destroyed,
- * but all puts (and thus all other operations that require memory allocation)
- * must fail. If zcache is unfrozen, accepts puts, then frozen again,
- * data consistency requires all puts while frozen to be converted into
- * flushes.
- */
-static bool zcache_freeze;
-
-/*
- * This zcache shrinker interface reduces the number of ephemeral pageframes
- * used by zcache to approximately the same as the total number of LRU_FILE
- * pageframes in use.
- */
-static int shrink_zcache_memory(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static bool in_progress;
- int ret = -1;
- int nr = sc->nr_to_scan;
- int nr_evict = 0;
- int nr_unuse = 0;
- struct page *page;
-#ifdef FRONTSWAP_HAS_UNUSE
- int unuse_ret;
-#endif
-
- if (nr <= 0)
- goto skip_evict;
-
- /* don't allow more than one eviction thread at a time */
- if (in_progress)
- goto skip_evict;
-
- in_progress = true;
-
- /* we are going to ignore nr, and target a different value */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- nr_evict = zcache_eph_pageframes - zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- while (nr_evict-- > 0) {
- page = zcache_evict_eph_pageframe();
- if (page == NULL)
- break;
- zcache_free_page(page);
- }
-
- zcache_last_active_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
- zcache_last_inactive_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
- nr_unuse = zcache_pers_pageframes - zcache_last_active_anon_pageframes +
- zcache_last_inactive_anon_pageframes;
-#ifdef FRONTSWAP_HAS_UNUSE
- /* rate limit for testing */
- if (nr_unuse > 32)
- nr_unuse = 32;
- while (nr_unuse-- > 0) {
- unuse_ret = zcache_frontswap_unuse();
- if (unuse_ret == -ENOMEM)
- break;
- }
-#endif
- in_progress = false;
-
-skip_evict:
- /* resample: has changed, but maybe not all the way yet */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static struct shrinker zcache_shrinker = {
- .shrink = shrink_zcache_memory,
- .seeks = DEFAULT_SEEKS,
-};
-
-/*
- * zcache shims between cleancache/frontswap ops and tmem
- */
-
-/* FIXME rename these core routines to zcache_tmemput etc? */
-int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- unsigned int size, bool raw, int ephemeral)
-{
- struct tmem_pool *pool;
- struct tmem_handle th;
- int ret = -1;
- void *pampd = NULL;
-
- BUG_ON(!irqs_disabled());
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (unlikely(pool == NULL))
- goto out;
- if (!zcache_freeze) {
- ret = 0;
- th.client_id = cli_id;
- th.pool_id = pool_id;
- th.oid = *oidp;
- th.index = index;
- pampd = zcache_pampd_create((char *)page, size, raw,
- ephemeral, &th);
- if (pampd == NULL) {
- ret = -ENOMEM;
- if (ephemeral)
- zcache_failed_eph_puts++;
- else
- zcache_failed_pers_puts++;
- } else {
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- ret = tmem_put(pool, oidp, index, 0, pampd);
- if (ret < 0)
- BUG();
- }
- zcache_put_pool(pool);
- } else {
- zcache_put_to_flush++;
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (atomic_read(&pool->obj_count) > 0)
- /* the put fails whether the flush succeeds or not */
- (void)tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
-out:
- return ret;
-}
-
-int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_pool *pool;
- int ret = -1;
- bool eph;
-
- if (!raw) {
- BUG_ON(irqs_disabled());
- BUG_ON(in_softirq());
- }
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- eph = is_ephemeral(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, (char *)(page),
- sizep, raw, get_and_free);
- zcache_put_pool(pool);
- }
- WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
- "zcache_get fails on persistent pool, "
- "bad things are very likely to happen soon\n");
-#ifdef RAMSTER_TESTING
- if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
- pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
-#endif
- return ret;
-}
-
-int zcache_flush_page(int cli_id, int pool_id,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- zcache_flush_total++;
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- zcache_flush_found++;
- local_irq_restore(flags);
- return ret;
-}
-
-int zcache_flush_object(int cli_id, int pool_id,
- struct tmem_oid *oidp)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- zcache_flobj_total++;
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_object(pool, oidp);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- zcache_flobj_found++;
- local_irq_restore(flags);
- return ret;
-}
-
-static int zcache_client_destroy_pool(int cli_id, int pool_id)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
- int ret = -1;
-
- if (pool_id < 0)
- goto out;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool == NULL)
- goto out;
- cli->tmem_pools[pool_id] = NULL;
- /* wait for pool activity on other cpus to quiesce */
- while (atomic_read(&pool->refcount) != 0)
- ;
- atomic_dec(&cli->refcount);
- local_bh_disable();
- ret = tmem_destroy_pool(pool);
- local_bh_enable();
- kfree(pool);
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
- else
- pr_info("%s: destroyed pool id=%d, client=%d\n",
- namestr, pool_id, cli_id);
-out:
- return ret;
-}
-
-int zcache_new_pool(uint16_t cli_id, uint32_t flags)
-{
- int poolid = -1;
- struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
-
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
- if (pool == NULL) {
- pr_info("%s: pool creation failed: out of memory\n", namestr);
- goto out;
- }
-
- for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
- if (cli->tmem_pools[poolid] == NULL)
- break;
- if (poolid >= MAX_POOLS_PER_CLIENT) {
- pr_info("%s: pool creation failed: max exceeded\n", namestr);
- kfree(pool);
- poolid = -1;
- goto out;
- }
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = poolid;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[poolid] = pool;
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid);
- else
- pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid, cli_id);
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return poolid;
-}
-
-static int zcache_local_new_pool(uint32_t flags)
-{
- return zcache_new_pool(LOCAL_CLIENT, flags);
-}
-
-int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
-{
- struct tmem_pool *pool;
- struct zcache_client *cli;
- uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
- int ret = -1;
-
- BUG_ON(!ramster_enabled);
- if (cli_id == LOCAL_CLIENT)
- goto out;
- if (pool_id >= MAX_POOLS_PER_CLIENT)
- goto out;
- if (cli_id >= MAX_CLIENTS)
- goto out;
-
- cli = &zcache_clients[cli_id];
- if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
- pr_err("zcache_autocreate_pool: pool type disabled\n");
- goto out;
- }
- if (!cli->allocated) {
- if (zcache_new_client(cli_id)) {
- pr_err("zcache_autocreate_pool: can't create client\n");
- goto out;
- }
- cli = &zcache_clients[cli_id];
- }
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool != NULL) {
- if (pool->persistent && eph) {
- pr_err("zcache_autocreate_pool: type mismatch\n");
- goto out;
- }
- ret = 0;
- goto out;
- }
- pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
- if (pool == NULL) {
- pr_info("%s: pool creation failed: out of memory\n", namestr);
- goto out;
- }
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = pool_id;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[pool_id] = pool;
- pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
- namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- pool_id, cli_id);
- ret = 0;
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return ret;
-}
-
-/**********
- * Two kernel functionalities currently can be layered on top of tmem.
- * These are "cleancache" which is used as a second-chance cache for clean
- * page cache pages; and "frontswap" which is used for swap pages
- * to avoid writes to disk. A generic "shim" is provided here for each
- * to translate in-kernel semantics to zcache semantics.
- */
-
-static void zcache_cleancache_put_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
- zcache_eph_nonactive_puts_ignored++;
- return;
- }
- if (likely(ind == index))
- (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, PAGE_SIZE, false, 1);
-}
-
-static int zcache_cleancache_get_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
- size_t size;
- int ret = -1;
-
- if (likely(ind == index)) {
- ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, &size, false, 0);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- if (ret == 0)
- SetPageWasActive(page);
- }
- return ret;
-}
-
-static void zcache_cleancache_flush_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (likely(ind == index))
- (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
-}
-
-static void zcache_cleancache_flush_inode(int pool_id,
- struct cleancache_filekey key)
-{
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
-}
-
-static void zcache_cleancache_flush_fs(int pool_id)
-{
- if (pool_id >= 0)
- (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
-}
-
-static int zcache_cleancache_init_fs(size_t pagesize)
-{
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
-{
- /* shared pools are unsupported and map to private */
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static struct cleancache_ops zcache_cleancache_ops = {
- .put_page = zcache_cleancache_put_page,
- .get_page = zcache_cleancache_get_page,
- .invalidate_page = zcache_cleancache_flush_page,
- .invalidate_inode = zcache_cleancache_flush_inode,
- .invalidate_fs = zcache_cleancache_flush_fs,
- .init_shared_fs = zcache_cleancache_init_shared_fs,
- .init_fs = zcache_cleancache_init_fs
-};
-
-struct cleancache_ops zcache_cleancache_register_ops(void)
-{
- struct cleancache_ops old_ops =
- cleancache_register_ops(&zcache_cleancache_ops);
-
- return old_ops;
-}
-
-/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int zcache_frontswap_poolid __read_mostly = -1;
-
-/*
- * Swizzling increases objects per swaptype, increasing tmem concurrency
- * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
- * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
- */
-#define SWIZ_BITS 8
-#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
-#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
-#define iswiz(_ind) (_ind >> SWIZ_BITS)
-
-static inline struct tmem_oid oswiz(unsigned type, u32 ind)
-{
- struct tmem_oid oid = { .oid = { 0 } };
- oid.oid[0] = _oswiz(type, ind);
- return oid;
-}
-
-#ifdef FRONTSWAP_HAS_UNUSE
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset)
-{
- *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
- *offset = (pgoff_t)((index << SWIZ_BITS) |
- (oid.oid[0] & SWIZ_MASK));
-}
-#endif
-
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- int ret = -1;
- unsigned long flags;
-
- BUG_ON(!PageLocked(page));
- if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
- zcache_pers_nonactive_puts_ignored++;
- ret = -ERANGE;
- goto out;
- }
- if (likely(ind64 == ind)) {
- local_irq_save(flags);
- ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, PAGE_SIZE, false, 0);
- local_irq_restore(flags);
- }
-out:
- return ret;
-}
-
-/* returns 0 if the page was successfully gotten from frontswap, -1 if
- * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- size_t size;
- int ret = -1, get_and_free;
-
- if (frontswap_has_exclusive_gets)
- get_and_free = 1;
- else
- get_and_free = -1;
- BUG_ON(!PageLocked(page));
- if (likely(ind64 == ind)) {
- ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, &size, false, get_and_free);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- }
- return ret;
-}
-
-/* flush a single page from frontswap */
-static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
-
- if (likely(ind64 == ind))
- (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind));
-}
-
-/* flush all pages from the passed swaptype */
-static void zcache_frontswap_flush_area(unsigned type)
-{
- struct tmem_oid oid;
- int ind;
-
- for (ind = SWIZ_MASK; ind >= 0; ind--) {
- oid = oswiz(type, ind);
- (void)zcache_flush_object(LOCAL_CLIENT,
- zcache_frontswap_poolid, &oid);
- }
-}
-
-static void zcache_frontswap_init(unsigned ignored)
-{
- /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
- if (zcache_frontswap_poolid < 0)
- zcache_frontswap_poolid =
- zcache_local_new_pool(TMEM_POOL_PERSIST);
-}
-
-static struct frontswap_ops zcache_frontswap_ops = {
- .store = zcache_frontswap_put_page,
- .load = zcache_frontswap_get_page,
- .invalidate_page = zcache_frontswap_flush_page,
- .invalidate_area = zcache_frontswap_flush_area,
- .init = zcache_frontswap_init
-};
-
-struct frontswap_ops zcache_frontswap_register_ops(void)
-{
- struct frontswap_ops old_ops =
- frontswap_register_ops(&zcache_frontswap_ops);
-
- return old_ops;
-}
-
-/*
- * zcache initialization
- * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
- * OR NOTHING HAPPENS!
- */
-
-static int __init enable_zcache(char *s)
-{
- zcache_enabled = 1;
- return 1;
-}
-__setup("zcache", enable_zcache);
-
-static int __init enable_ramster(char *s)
-{
- zcache_enabled = 1;
-#ifdef CONFIG_RAMSTER
- ramster_enabled = 1;
-#endif
- return 1;
-}
-__setup("ramster", enable_ramster);
-
-/* allow independent dynamic disabling of cleancache and frontswap */
-
-static int __init no_cleancache(char *s)
-{
- disable_cleancache = 1;
- return 1;
-}
-
-__setup("nocleancache", no_cleancache);
-
-static int __init no_frontswap(char *s)
-{
- disable_frontswap = 1;
- return 1;
-}
-
-__setup("nofrontswap", no_frontswap);
-
-static int __init no_frontswap_exclusive_gets(char *s)
-{
- frontswap_has_exclusive_gets = false;
- return 1;
-}
-
-__setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
-
-static int __init no_frontswap_ignore_nonactive(char *s)
-{
- disable_frontswap_ignore_nonactive = 1;
- return 1;
-}
-
-__setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
-
-static int __init no_cleancache_ignore_nonactive(char *s)
-{
- disable_cleancache_ignore_nonactive = 1;
- return 1;
-}
-
-__setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
-
-static int __init enable_zcache_compressor(char *s)
-{
- strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
- zcache_enabled = 1;
- return 1;
-}
-__setup("zcache=", enable_zcache_compressor);
-
-
-static int __init zcache_comp_init(void)
-{
- int ret = 0;
-
- /* check crypto algorithm */
- if (*zcache_comp_name != '\0') {
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret)
- pr_info("zcache: %s not supported\n",
- zcache_comp_name);
- }
- if (!ret)
- strcpy(zcache_comp_name, "lzo");
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret) {
- ret = 1;
- goto out;
- }
- pr_info("zcache: using %s compressor\n", zcache_comp_name);
-
- /* alloc percpu transforms */
- ret = 0;
- zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
- if (!zcache_comp_pcpu_tfms)
- ret = 1;
-out:
- return ret;
-}
-
-static int __init zcache_init(void)
-{
- int ret = 0;
-
- if (ramster_enabled) {
- namestr = "ramster";
- ramster_register_pamops(&zcache_pamops);
- }
-#ifdef CONFIG_DEBUG_FS
- zcache_debugfs_init();
-#endif
- if (zcache_enabled) {
- unsigned int cpu;
-
- tmem_register_hostops(&zcache_hostops);
- tmem_register_pamops(&zcache_pamops);
- ret = register_cpu_notifier(&zcache_cpu_notifier_block);
- if (ret) {
- pr_err("%s: can't register cpu notifier\n", namestr);
- goto out;
- }
- ret = zcache_comp_init();
- if (ret) {
- pr_err("%s: compressor initialization failed\n",
- namestr);
- goto out;
- }
- for_each_online_cpu(cpu) {
- void *pcpu = (void *)(long)cpu;
- zcache_cpu_notifier(&zcache_cpu_notifier_block,
- CPU_UP_PREPARE, pcpu);
- }
- }
- zcache_objnode_cache = kmem_cache_create("zcache_objnode",
- sizeof(struct tmem_objnode), 0, 0, NULL);
- zcache_obj_cache = kmem_cache_create("zcache_obj",
- sizeof(struct tmem_obj), 0, 0, NULL);
- ret = zcache_new_client(LOCAL_CLIENT);
- if (ret) {
- pr_err("%s: can't create client\n", namestr);
- goto out;
- }
- zbud_init();
- if (zcache_enabled && !disable_cleancache) {
- struct cleancache_ops old_ops;
-
- register_shrinker(&zcache_shrinker);
- old_ops = zcache_cleancache_register_ops();
- pr_info("%s: cleancache enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef ZCACHE_DEBUG
- pr_info("%s: cleancache: ignorenonactive = %d\n",
- namestr, !disable_cleancache_ignore_nonactive);
-#endif
- if (old_ops.init_fs != NULL)
- pr_warn("%s: cleancache_ops overridden\n", namestr);
- }
- if (zcache_enabled && !disable_frontswap) {
- struct frontswap_ops old_ops;
-
- old_ops = zcache_frontswap_register_ops();
- if (frontswap_has_exclusive_gets)
- frontswap_tmem_exclusive_gets(true);
- pr_info("%s: frontswap enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef ZCACHE_DEBUG
- pr_info("%s: frontswap: excl gets = %d active only = %d\n",
- namestr, frontswap_has_exclusive_gets,
- !disable_frontswap_ignore_nonactive);
-#endif
- if (old_ops.init != NULL)
- pr_warn("%s: frontswap_ops overridden\n", namestr);
- }
- if (ramster_enabled)
- ramster_init(!disable_cleancache, !disable_frontswap,
- frontswap_has_exclusive_gets);
-out:
- return ret;
-}
-
-late_initcall(zcache_init);
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.c b/drivers/staging/rtl8187se/ieee80211/dot11d.c
index 0e93eb0735a7..9d2d5c58add2 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.c
@@ -39,12 +39,11 @@ Dot11d_Reset(struct ieee80211_device *ieee)
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
// Set new channel map
- for (i=1; i<=11; i++) {
+ for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
- }
- for (i=12; i<=14; i++) {
+
+ for (i = 12; i <= 14; i++)
(pDot11dInfo->channel_map)[i] = 2;
- }
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
@@ -68,17 +67,16 @@ Dot11d_Reset(struct ieee80211_device *ieee)
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
- u8 * pTaddr,
+ u8 *pTaddr,
u16 CoutryIeLen,
- u8 * pCoutryIe
+ u8 *pCoutryIe
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
PCHNL_TXPOWER_TRIPLE pTriple;
- if((CoutryIeLen - 3)%3 != 0)
- {
+ if ((CoutryIeLen - 3)%3 != 0) {
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
@@ -89,35 +87,33 @@ Dot11d_UpdateCountryIe(
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
- for(i = 0; i < NumTriples; i++)
- {
- if(MaxChnlNum >= pTriple->FirstChnl)
- { // It is not in a monotonically increasing order, so stop processing.
+ for (i = 0; i < NumTriples; i++) {
+ if (MaxChnlNum >= pTriple->FirstChnl) {
+ // It is not in a monotonically increasing order, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
- if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
- { // It is not a valid set of channel id, so stop processing.
+ if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
+ // It is not a valid set of channel id, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
Dot11d_Reset(dev);
return;
}
- for(j = 0 ; j < pTriple->NumChnls; j++)
- {
+ for (j = 0 ; j < pTriple->NumChnls; j++) {
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
}
- pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
+ pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
#if 1
//printk("Dot11d_UpdateCountryIe(): Channel List:\n");
printk("Channel List:");
- for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
- if(pDot11dInfo->channel_map[i] > 0)
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
+ if (pDot11dInfo->channel_map[i] > 0)
printk(" %d", i);
printk("\n");
#endif
@@ -125,7 +121,7 @@ Dot11d_UpdateCountryIe(
UPDATE_CIE_SRC(dev, pTaddr);
pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
+ memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
@@ -138,13 +134,11 @@ DOT11D_GetMaxTxPwrInDbm(
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
- if(MAX_CHANNEL_NUMBER < Channel)
- {
+ if (MAX_CHANNEL_NUMBER < Channel) {
printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
- if(pDot11dInfo->channel_map[Channel])
- {
+ if (pDot11dInfo->channel_map[Channel]) {
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
}
@@ -154,20 +148,19 @@ DOT11D_GetMaxTxPwrInDbm(
void
DOT11D_ScanComplete(
- struct ieee80211_device * dev
+ struct ieee80211_device *dev
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- switch(pDot11dInfo->State)
- {
+ switch (pDot11dInfo->State) {
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
- if( GET_CIE_WATCHDOG(dev) == 0 )
- { // Reset country IE if previous one is gone.
+ if (GET_CIE_WATCHDOG(dev) == 0) {
+ // Reset country IE if previous one is gone.
Dot11d_Reset(dev);
}
break;
@@ -177,24 +170,23 @@ DOT11D_ScanComplete(
}
int IsLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- if(MAX_CHANNEL_NUMBER < channel)
- {
+ if (MAX_CHANNEL_NUMBER < channel) {
printk("IsLegalChannel(): Invalid Channel\n");
return 0;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return 1;
return 0;
}
int ToLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
)
{
@@ -202,22 +194,19 @@ int ToLegalChannel(
u8 default_chn = 0;
u32 i = 0;
- for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
- {
- if(pDot11dInfo->channel_map[i] > 0)
- {
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
+ if (pDot11dInfo->channel_map[i] > 0) {
default_chn = i;
break;
}
}
- if(MAX_CHANNEL_NUMBER < channel)
- {
+ if (MAX_CHANNEL_NUMBER < channel) {
printk("IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return channel;
return default_chn;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
index 4358c4b0ca60..07a1fbb6678e 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
@@ -68,10 +68,8 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
ieee->networks = kcalloc(
MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
- if (!ieee->networks) {
- netdev_warn(ieee->dev, "Out of memory allocating beacons\n");
+ if (!ieee->networks)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 446f15ec6396..e30315997bbe 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -408,11 +408,9 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
// if (memcmp(entry->mac, mac, ETH_ALEN)){
if (p == &ieee->ibss_mac_hash[index]) {
entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
- if (!entry) {
- netdev_warn(ieee->dev,
- "Cannot malloc new mac entry\n");
+ if (!entry)
return 0;
- }
+
memcpy(entry->mac, mac, ETH_ALEN);
entry->seq_num[tid] = seq;
entry->frag_num[tid] = frag;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index c7917b24425c..e014f7e74397 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -41,9 +41,9 @@ static const char *ieee80211_modes[] = {
#define MAX_CUSTOM_LEN 64
static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
- char *start, char *stop,
- struct ieee80211_network *network,
- struct iw_request_info *info)
+ char *start, char *stop,
+ struct ieee80211_network *network,
+ struct iw_request_info *info)
{
char custom[MAX_CUSTOM_LEN];
char *p;
@@ -78,9 +78,9 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- if (network->capability &
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ if (network->capability &
(WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
if (network->capability & WLAN_CAPABILITY_BSS)
iwe.u.mode = IW_MODE_MASTER;
@@ -90,7 +90,7 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
- /* Add frequency/channel */
+ /* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
iwe.u.freq.e = 3; */
@@ -168,23 +168,23 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
- memset(&iwe, 0, sizeof(iwe));
- if (network->wpa_ie_len) {
+ memset(&iwe, 0, sizeof(iwe));
+ if (network->wpa_ie_len) {
// printk("wpa_ie_len:%d\n", network->wpa_ie_len);
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, network->wpa_ie, network->wpa_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = network->wpa_ie_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- if (network->rsn_ie_len) {
+ char buf[MAX_WPA_IE_LEN];
+ memcpy(buf, network->wpa_ie, network->wpa_ie_len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = network->wpa_ie_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+ }
+
+ memset(&iwe, 0, sizeof(iwe));
+ if (network->rsn_ie_len) {
// printk("=====>rsn_ie_len:\n", network->rsn_ie_len);
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, network->rsn_ie, network->rsn_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = network->rsn_ie_len;
+ char buf[MAX_WPA_IE_LEN];
+ memcpy(buf, network->rsn_ie, network->rsn_ie_len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = network->rsn_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
@@ -217,22 +217,18 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
down(&ieee->wx_sem);
spin_lock_irqsave(&ieee->lock, flags);
- if(!ieee->bHwRadioOff)
- {
+ if (!ieee->bHwRadioOff) {
list_for_each_entry(network, &ieee->network_list, list) {
i++;
- if((stop-ev)<200)
- {
+ if ((stop-ev) < 200) {
err = -E2BIG;
break;
}
if (ieee->scan_age == 0 ||
- time_after(network->last_scanned + ieee->scan_age, jiffies))
- {
+ time_after(network->last_scanned + ieee->scan_age, jiffies)) {
ev = rtl818x_translate_scan(ieee, ev, stop, network, info);
- }
- else
+ } else
IEEE80211_DEBUG_SCAN(
"Not showing network '%s ("
"%pM)' due to age (%lums).\n",
@@ -340,9 +336,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
kfree(new_crypt);
new_crypt = NULL;
- printk(KERN_WARNING "%s: could not initialize WEP: "
- "load module ieee80211_crypt_wep\n",
- dev->name);
+ netdev_warn(ieee->dev,
+ "could not initialize WEP: load module ieee80211_crypt_wep\n");
return -EOPNOTSUPP;
}
*crypt = new_crypt;
@@ -359,7 +354,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
key, escape_essid(sec.keys[key], len),
erq->length, len);
sec.key_sizes[key] = len;
- (*crypt)->ops->set_key(sec.keys[key], len, NULL,
+ (*crypt)->ops->set_key(sec.keys[key], len, NULL,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
@@ -414,7 +409,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
+ netdev_dbg(ieee->dev, "reset_port failed\n");
return -EINVAL;
}
return 0;
@@ -430,7 +425,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("GET_ENCODE\n");
- if(ieee->iw_mode == IW_MODE_MONITOR)
+ if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
@@ -472,240 +467,240 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
}
int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct net_device *dev = ieee->dev;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int i, idx, ret = 0;
- int group_key = 0;
- const char *alg;
- struct ieee80211_crypto_ops *ops;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ int i, idx, ret = 0;
+ int group_key = 0;
+ const char *alg;
+ struct ieee80211_crypto_ops *ops;
+ struct ieee80211_crypt_data **crypt;
+
+ struct ieee80211_security sec = {
+ .flags = 0,
+ };
//printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
- return -EINVAL;
- idx--;
- } else
- idx = ieee->tx_keyidx;
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- crypt = &ieee->crypt[idx];
- group_key = 1;
- } else {
- /* some Cisco APs use idx>0 for unicast in dynamic WEP */
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else
+ idx = ieee->tx_keyidx;
+
+ if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ crypt = &ieee->crypt[idx];
+ group_key = 1;
+ } else {
+ /* some Cisco APs use idx>0 for unicast in dynamic WEP */
//printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg);
- if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
- return -EINVAL;
- if (ieee->iw_mode == IW_MODE_INFRA)
- crypt = &ieee->crypt[idx];
- else
- return -EINVAL;
- }
-
- sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
- if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- if (*crypt)
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- for (i = 0; i < WEP_KEYS; i++)
- if (ieee->crypt[i] != NULL)
- break;
-
- if (i == WEP_KEYS) {
- sec.enabled = 0;
- // sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_LEVEL;
- }
+ if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
+ return -EINVAL;
+ if (ieee->iw_mode == IW_MODE_INFRA)
+ crypt = &ieee->crypt[idx];
+ else
+ return -EINVAL;
+ }
+
+ sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
+ if ((encoding->flags & IW_ENCODE_DISABLED) ||
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ if (*crypt)
+ ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+ for (i = 0; i < WEP_KEYS; i++)
+ if (ieee->crypt[i] != NULL)
+ break;
+
+ if (i == WEP_KEYS) {
+ sec.enabled = 0;
+ // sec.encrypt = 0;
+ sec.level = SEC_LEVEL_0;
+ sec.flags |= SEC_LEVEL;
+ }
//printk("disabled: flag:%x\n", encoding->flags);
- goto done;
- }
+ goto done;
+ }
sec.enabled = 1;
// sec.encrypt = 1;
- switch (ext->alg) {
- case IW_ENCODE_ALG_WEP:
- alg = "WEP";
- break;
- case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
- break;
- case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
- break;
- default:
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
- ret = -EINVAL;
- goto done;
- }
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_WEP:
+ alg = "WEP";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg = "TKIP";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg = "CCMP";
+ break;
+ default:
+ IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
+ ret = -EINVAL;
+ goto done;
+ }
// printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg);
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL)
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL) {
- IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
- dev->name, ext->alg);
+ ops = ieee80211_get_crypto_ops(alg);
+ if (ops == NULL)
+ ops = ieee80211_get_crypto_ops(alg);
+ if (ops == NULL) {
+ IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
printk("========>unknown crypto alg %d\n", ext->alg);
- ret = -EINVAL;
- goto done;
- }
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops)
- new_crypt->priv = new_crypt->ops->init(idx);
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- ret = -EINVAL;
- goto done;
- }
- *crypt = new_crypt;
-
- }
-
- if (ext->key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
- (*crypt)->priv) < 0) {
- IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (*crypt == NULL || (*crypt)->ops != ops) {
+ struct ieee80211_crypt_data *new_crypt;
+
+ ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+ new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
+ if (new_crypt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ new_crypt->ops = ops;
+ if (new_crypt->ops)
+ new_crypt->priv = new_crypt->ops->init(idx);
+ if (new_crypt->priv == NULL) {
+ kfree(new_crypt);
+ ret = -EINVAL;
+ goto done;
+ }
+ *crypt = new_crypt;
+
+ }
+
+ if (ext->key_len > 0 && (*crypt)->ops->set_key &&
+ (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+ (*crypt)->priv) < 0) {
+ IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
printk("key setting failed\n");
- ret = -EINVAL;
- goto done;
- }
+ ret = -EINVAL;
+ goto done;
+ }
#if 1
//skip_host_crypt:
//printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
- sec.active_key = idx;
- sec.flags |= SEC_ACTIVE_KEY;
- }
-
- if (ext->alg != IW_ENCODE_ALG_NONE) {
- memcpy(sec.keys[idx], ext->key, ext->key_len);
- sec.key_sizes[idx] = ext->key_len;
- sec.flags |= (1 << idx);
- if (ext->alg == IW_ENCODE_ALG_WEP) {
- // sec.encode_alg[idx] = SEC_ALG_WEP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
- // sec.encode_alg[idx] = SEC_ALG_TKIP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
- // sec.encode_alg[idx] = SEC_ALG_CCMP;
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- /* Don't set sec level for group keys. */
- if (group_key)
- sec.flags &= ~SEC_LEVEL;
- }
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ ieee->tx_keyidx = idx;
+ sec.active_key = idx;
+ sec.flags |= SEC_ACTIVE_KEY;
+ }
+
+ if (ext->alg != IW_ENCODE_ALG_NONE) {
+ memcpy(sec.keys[idx], ext->key, ext->key_len);
+ sec.key_sizes[idx] = ext->key_len;
+ sec.flags |= (1 << idx);
+ if (ext->alg == IW_ENCODE_ALG_WEP) {
+ // sec.encode_alg[idx] = SEC_ALG_WEP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1;
+ } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
+ // sec.encode_alg[idx] = SEC_ALG_TKIP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_2;
+ } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
+ // sec.encode_alg[idx] = SEC_ALG_CCMP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_3;
+ }
+ /* Don't set sec level for group keys. */
+ if (group_key)
+ sec.flags &= ~SEC_LEVEL;
+ }
#endif
done:
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
+ ieee->iw_mode != IW_MODE_INFRA &&
+ ieee->reset_port && ieee->reset_port(dev)) {
+ IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
+ return -EINVAL;
+ }
- return ret;
+ return ret;
}
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
// printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
#if 1
switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
+ case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
// printk("disassoc now\n");
ieee80211_disassociate(ieee);
break;
default:
- return -EOPNOTSUPP;
- }
+ return -EOPNOTSUPP;
+ }
#endif
return 0;
}
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
/*
struct ieee80211_security sec = {
- .flags = SEC_AUTH_MODE,
+ .flags = SEC_AUTH_MODE,
}
*/
//printk("set auth:flag:%x, data value:%x\n", data->flags, data->value);
switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
+ case IW_AUTH_WPA_VERSION:
/*need to support wpa2 here*/
//printk("wpa version:%x\n", data->value);
break;
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- /*
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+ case IW_AUTH_KEY_MGMT:
+ /*
* * Host AP driver does not use these parameters and allows
* * wpa_supplicant to control them internally.
* */
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures = data->value;
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- ieee->drop_unencrypted = data->value;
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ ieee->tkip_countermeasures = data->value;
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ ieee->drop_unencrypted = data->value;
break;
case IW_AUTH_80211_AUTH_ALG:
- ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
+ ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM) ? 1 : 0;
//printk("open_wep:%d\n", ieee->open_wep);
break;
#if 1
case IW_AUTH_WPA_ENABLED:
- ieee->wpa_enabled = (data->value)?1:0;
+ ieee->wpa_enabled = (data->value) ? 1 : 0;
//printk("enable wpa:%d\n", ieee->wpa_enabled);
break;
#endif
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- ieee->ieee802_1x = data->value;
+ ieee->ieee802_1x = data->value;
break;
case IW_AUTH_PRIVACY_INVOKED:
ieee->privacy_invoked = data->value;
break;
default:
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -715,15 +710,13 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
u8 *buf = NULL;
- if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
- {
+ if (len > MAX_WPA_IE_LEN || (len && ie == NULL)) {
printk("return error out, len:%zu\n", len);
return -EINVAL;
}
- if (len)
- {
- if (len != ie[1]+2){
+ if (len) {
+ if (len != ie[1]+2) {
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
@@ -733,8 +726,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
- }
- else{
+ } else {
kfree(ieee->wpa_ie);
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index ae38475854b5..d10d75e8a33f 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -937,7 +937,8 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(pdev, dma_tmp))
+ return -1;
if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
&(priv->rxbufferhead))) {
DMESGE("Unable to allocate mem RX buf");
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index f1db9e401c87..978dc5f4f929 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -115,21 +115,24 @@ static u8 OFDM_CONFIG[] = {
*---------------------------------------------------------------
*/
-void PlatformIOWrite1Byte(struct net_device *dev, u32 offset, u8 data)
+static u8 PlatformIORead1Byte(struct net_device *dev, u32 offset)
+{
+ return read_nic_byte(dev, offset);
+}
+
+static void PlatformIOWrite1Byte(struct net_device *dev, u32 offset, u8 data)
{
write_nic_byte(dev, offset, data);
read_nic_byte(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */
}
-void PlatformIOWrite2Byte(struct net_device *dev, u32 offset, u16 data)
+static void PlatformIOWrite2Byte(struct net_device *dev, u32 offset, u16 data)
{
write_nic_word(dev, offset, data);
read_nic_word(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */
}
-u8 PlatformIORead1Byte(struct net_device *dev, u32 offset);
-
-void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
+static void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
{
if (offset == PhyAddr) {
/* For Base Band configuration. */
@@ -172,37 +175,7 @@ void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
}
}
-u8 PlatformIORead1Byte(struct net_device *dev, u32 offset)
-{
- u8 data = 0;
-
- data = read_nic_byte(dev, offset);
-
-
- return data;
-}
-
-u16 PlatformIORead2Byte(struct net_device *dev, u32 offset)
-{
- u16 data = 0;
-
- data = read_nic_word(dev, offset);
-
-
- return data;
-}
-
-u32 PlatformIORead4Byte(struct net_device *dev, u32 offset)
-{
- u32 data = 0;
-
- data = read_nic_dword(dev, offset);
-
-
- return data;
-}
-
-void SetOutputEnableOfRfPins(struct net_device *dev)
+static void SetOutputEnableOfRfPins(struct net_device *dev)
{
write_nic_word(dev, RFPinsEnable, 0x1bff);
}
@@ -287,35 +260,19 @@ u16 RF_ReadReg(struct net_device *dev, u8 offset)
return reg;
}
+static u8 ReadBBPortUchar(struct net_device *dev, u32 addr)
+{
+ PlatformIOWrite4Byte(dev, PhyAddr, addr & 0xffffff7f);
+ return PlatformIORead1Byte(dev, PhyDataR);
+}
/* by Owen on 04/07/14 for writing BB register successfully */
-void WriteBBPortUchar(struct net_device *dev, u32 Data)
+static void WriteBBPortUchar(struct net_device *dev, u32 Data)
{
- /* u8 TimeoutCounter; */
- u8 RegisterContent;
- u8 UCharData;
-
- UCharData = (u8)((Data & 0x0000ff00) >> 8);
PlatformIOWrite4Byte(dev, PhyAddr, Data);
- /* for(TimeoutCounter = 10; TimeoutCounter > 0; TimeoutCounter--) */
- {
- PlatformIOWrite4Byte(dev, PhyAddr, Data & 0xffffff7f);
- RegisterContent = PlatformIORead1Byte(dev, PhyDataR);
- /*if(UCharData == RegisterContent) */
- /* break; */
- }
+ ReadBBPortUchar(dev, Data);
}
-u8 ReadBBPortUchar(struct net_device *dev, u32 addr)
-{
- /*u8 TimeoutCounter; */
- u8 RegisterContent;
-
- PlatformIOWrite4Byte(dev, PhyAddr, addr & 0xffffff7f);
- RegisterContent = PlatformIORead1Byte(dev, PhyDataR);
-
- return RegisterContent;
-}
/*
* Description:
* Perform Antenna settings with antenna diversity on 87SE.
@@ -327,62 +284,38 @@ bool SetAntennaConfig87SE(struct net_device *dev,
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bAntennaSwitched = true;
+ u8 ant_diversity_offset = 0x00; /* 0x00 = disabled, 0x80 = enabled */
/* printk("SetAntennaConfig87SE(): DefaultAnt(%d), bAntDiversity(%d)\n", DefaultAnt, bAntDiversity); */
/* Threshold for antenna diversity. */
write_phy_cck(dev, 0x0c, 0x09); /* Reg0c : 09 */
- if (bAntDiversity) { /* Enable Antenna Diversity. */
- if (DefaultAnt == 1) { /* aux antenna */
-
- /* Mac register, aux antenna */
- write_nic_byte(dev, ANTSEL, 0x00);
-
- /* Config CCK RX antenna. */
- write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */
- write_phy_cck(dev, 0x01, 0xc7); /* Reg01 : c7 */
-
- /* Config OFDM RX antenna. */
- write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */
- write_phy_ofdm(dev, 0x18, 0xb2); /* Reg18 : b2 */
- } else { /* use main antenna */
- /* Mac register, main antenna */
- write_nic_byte(dev, ANTSEL, 0x03);
- /* base band */
- /* Config CCK RX antenna. */
- write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */
- write_phy_cck(dev, 0x01, 0xc7); /* Reg01 : c7 */
-
- /* Config OFDM RX antenna. */
- write_phy_ofdm(dev, 0x0d, 0x5c); /* Reg0d : 5c */
- write_phy_ofdm(dev, 0x18, 0xb2); /* Reg18 : b2 */
- }
- } else {
- /* Disable Antenna Diversity. */
- if (DefaultAnt == 1) { /* aux Antenna */
- /* Mac register, aux antenna */
- write_nic_byte(dev, ANTSEL, 0x00);
-
- /* Config CCK RX antenna. */
- write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */
- write_phy_cck(dev, 0x01, 0x47); /* Reg01 : 47 */
-
- /* Config OFDM RX antenna. */
- write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */
- write_phy_ofdm(dev, 0x18, 0x32); /* Reg18 : 32 */
- } else { /* main Antenna */
- /* Mac register, main antenna */
- write_nic_byte(dev, ANTSEL, 0x03);
-
- /* Config CCK RX antenna. */
- write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */
- write_phy_cck(dev, 0x01, 0x47); /* Reg01 : 47 */
-
- /* Config OFDM RX antenna. */
- write_phy_ofdm(dev, 0x0D, 0x5c); /* Reg0d : 5c */
- write_phy_ofdm(dev, 0x18, 0x32); /*Reg18 : 32 */
- }
+ if (bAntDiversity) /* Enable Antenna Diversity. */
+ ant_diversity_offset = 0x80;
+
+ if (DefaultAnt == 1) { /* aux Antenna */
+ /* Mac register, aux antenna */
+ write_nic_byte(dev, ANTSEL, 0x00);
+
+ /* Config CCK RX antenna. */
+ write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */
+ write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset); /* Reg01 : 47 | ant_diversity_offset */
+
+ /* Config OFDM RX antenna. */
+ write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */
+ write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset); /* Reg18 : 32 */
+ } else { /* main Antenna */
+ /* Mac register, main antenna */
+ write_nic_byte(dev, ANTSEL, 0x03);
+
+ /* Config CCK RX antenna. */
+ write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */
+ write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset); /* Reg01 : 47 */
+
+ /* Config OFDM RX antenna. */
+ write_phy_ofdm(dev, 0x0D, 0x5c); /* Reg0d : 5c */
+ write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset); /*Reg18 : 32 */
}
priv->CurrAntennaIndex = DefaultAnt; /* Update default settings. */
return bAntennaSwitched;
@@ -394,7 +327,7 @@ bool SetAntennaConfig87SE(struct net_device *dev,
*--------------------------------------------------------------
*/
-void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
+static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -695,7 +628,7 @@ void UpdateInitialGain(struct net_device *dev)
* Tx Power tracking mechanism routine on 87SE.
* Created by Roger, 2007.12.11.
*/
-void InitTxPwrTracking87SE(struct net_device *dev)
+static void InitTxPwrTracking87SE(struct net_device *dev)
{
u32 u4bRfReg;
@@ -705,7 +638,7 @@ void InitTxPwrTracking87SE(struct net_device *dev)
RF_WriteReg(dev, 0x02, u4bRfReg|PWR_METER_EN); mdelay(1);
}
-void PhyConfig8185(struct net_device *dev)
+static void PhyConfig8185(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
write_nic_dword(dev, RCR, priv->ReceiveConfig);
@@ -732,7 +665,7 @@ void PhyConfig8185(struct net_device *dev)
return;
}
-void HwConfigureRTL8185(struct net_device *dev)
+static void HwConfigureRTL8185(struct net_device *dev)
{
/* RTL8185_TODO: Determine Retrylimit, TxAGC, AutoRateFallback control. */
u8 bUNIVERSAL_CONTROL_RL = 0;
@@ -857,21 +790,16 @@ static void MacConfig_85BASIC(struct net_device *dev)
write_nic_byte(dev, 0x24E, 0x01);
}
-u8 GetSupportedWirelessMode8185(struct net_device *dev)
+static u8 GetSupportedWirelessMode8185(struct net_device *dev)
{
return WIRELESS_MODE_B | WIRELESS_MODE_G;
}
-void ActUpdateChannelAccessSetting(struct net_device *dev,
+static void ActUpdateChannelAccessSetting(struct net_device *dev,
WIRELESS_MODE WirelessMode,
PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
AC_CODING eACI;
- AC_PARAM AcParam;
- u8 bFollowLegacySetting = 0;
- u8 u1bAIFS;
/*
* <RJ_TODO_8185B>
@@ -893,131 +821,16 @@ void ActUpdateChannelAccessSetting(struct net_device *dev,
write_nic_byte(dev, SIFS, ChnlAccessSetting->SIFS_Timer);
write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer); /* Rewrited from directly use PlatformEFIOWrite1Byte(), by Annie, 2006-03-29. */
- u1bAIFS = aSifsTime + (2 * ChnlAccessSetting->SlotTimeTimer);
-
write_nic_byte(dev, EIFS, ChnlAccessSetting->EIFS_Timer);
write_nic_byte(dev, AckTimeOutReg, 0x5B); /* <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS register, 2005.12.08. */
- { /* Legacy 802.11. */
- bFollowLegacySetting = 1;
-
- }
-
- /* this setting is copied from rtl8187B. xiong-2006-11-13 */
- if (bFollowLegacySetting) {
-
- /*
- * Follow 802.11 seeting to AC parameter, all AC shall use the same parameter.
- * 2005.12.01, by rcnjko.
- */
- AcParam.longData = 0;
- AcParam.f.AciAifsn.f.AIFSN = 2; /* Follow 802.11 DIFS. */
- AcParam.f.AciAifsn.f.ACM = 0;
- AcParam.f.Ecw.f.ECWmin = ChnlAccessSetting->CWminIndex; /* Follow 802.11 CWmin. */
- AcParam.f.Ecw.f.ECWmax = ChnlAccessSetting->CWmaxIndex; /* Follow 802.11 CWmax. */
- AcParam.f.TXOPLimit = 0;
-
- /* lzm reserved 080826 */
- /* For turbo mode setting. port from 87B by Isaiah 2008-08-01 */
- if (ieee->current_network.Turbo_Enable == 1)
- AcParam.f.TXOPLimit = 0x01FF;
- /* For 87SE with Intel 4965 Ad-Hoc mode have poor throughput (19MB) */
- if (ieee->iw_mode == IW_MODE_ADHOC)
- AcParam.f.TXOPLimit = 0x0020;
-
- for (eACI = 0; eACI < AC_MAX; eACI++) {
- AcParam.f.AciAifsn.f.ACI = (u8)eACI;
- {
- PAC_PARAM pAcParam = (PAC_PARAM)(&AcParam);
- AC_CODING eACI;
- u8 u1bAIFS;
- u32 u4bAcParam;
-
- /* Retrieve parameters to update. */
- eACI = pAcParam->f.AciAifsn.f.ACI;
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * ChnlAccessSetting->SlotTimeTimer + aSifsTime;
- u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmax)) << AC_PARAM_ECW_MAX_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmin)) << AC_PARAM_ECW_MIN_OFFSET) |
- (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
-
- switch (eACI) {
- case AC1_BK:
- /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */
- break;
-
- case AC0_BE:
- /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */
- break;
-
- case AC2_VI:
- /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */
- break;
-
- case AC3_VO:
- /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */
- break;
-
- default:
- DMESGW("SetHwReg8185(): invalid ACI: %d !\n", eACI);
- break;
- }
-
- /* Cehck ACM bit. */
- /* If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13. */
- {
- PACI_AIFSN pAciAifsn = (PACI_AIFSN)(&pAcParam->f.AciAifsn);
- AC_CODING eACI = pAciAifsn->f.ACI;
-
- /*for 8187B AsynIORead issue */
- u8 AcmCtrl = 0;
- if (pAciAifsn->f.ACM) {
- /* ACM bit is 1. */
- switch (eACI) {
- case AC0_BE:
- AcmCtrl |= (BEQ_ACM_EN|BEQ_ACM_CTL|ACM_HW_EN); /* or 0x21 */
- break;
-
- case AC2_VI:
- AcmCtrl |= (VIQ_ACM_EN|VIQ_ACM_CTL|ACM_HW_EN); /* or 0x42 */
- break;
-
- case AC3_VO:
- AcmCtrl |= (VOQ_ACM_EN|VOQ_ACM_CTL|ACM_HW_EN); /* or 0x84 */
- break;
-
- default:
- DMESGW("SetHwReg8185(): [HW_VAR_ACM_CTRL] ACM set failed: eACI is %d\n", eACI);
- break;
- }
- } else {
- /* ACM bit is 0. */
- switch (eACI) {
- case AC0_BE:
- AcmCtrl &= ((~BEQ_ACM_EN) & (~BEQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0xDE */
- break;
-
- case AC2_VI:
- AcmCtrl &= ((~VIQ_ACM_EN) & (~VIQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0xBD */
- break;
-
- case AC3_VO:
- AcmCtrl &= ((~VOQ_ACM_EN) & (~VOQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0x7B */
- break;
-
- default:
- break;
- }
- }
- write_nic_byte(dev, ACM_CONTROL, 0);
- }
- }
- }
+ for (eACI = 0; eACI < AC_MAX; eACI++) {
+ write_nic_byte(dev, ACM_CONTROL, 0);
}
}
-void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
+static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
@@ -1074,7 +887,7 @@ void rtl8185b_irq_enable(struct net_device *dev)
write_nic_dword(dev, IMR, priv->IntrMask);
}
-void MgntDisconnectIBSS(struct net_device *dev)
+static void MgntDisconnectIBSS(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 i;
@@ -1100,7 +913,7 @@ void MgntDisconnectIBSS(struct net_device *dev)
notify_wx_assoc_event(priv->ieee80211);
}
-void MlmeDisassociateRequest(struct net_device *dev, u8 *asSta, u8 asRsn)
+static void MlmeDisassociateRequest(struct net_device *dev, u8 *asSta, u8 asRsn)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 i;
@@ -1117,7 +930,7 @@ void MlmeDisassociateRequest(struct net_device *dev, u8 *asSta, u8 asRsn)
}
}
-void MgntDisconnectAP(struct net_device *dev, u8 asRsn)
+static void MgntDisconnectAP(struct net_device *dev, u8 asRsn)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1135,7 +948,7 @@ void MgntDisconnectAP(struct net_device *dev, u8 asRsn)
priv->ieee80211->state = IEEE80211_NOLINK;
}
-bool MgntDisconnect(struct net_device *dev, u8 asRsn)
+static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
/*
@@ -1171,7 +984,7 @@ bool MgntDisconnect(struct net_device *dev, u8 asRsn)
* Assumption:
* PASSIVE LEVEL.
*/
-bool SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState)
+static bool SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -1275,7 +1088,7 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u
return bActionAllowed;
}
-void InactivePowerSave(struct net_device *dev)
+static void InactivePowerSave(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
/*
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 808aab6fa5ef..a9d78e9651c6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1183,6 +1183,8 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->TxRate,
cb_desc);
+ if (pci_dma_mapping_error(priv->pdev, mapping))
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1280,6 +1282,8 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping))
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
memset(entry, 0, 12);
entry->LINIP = cb_desc->bLastIniPkt;
entry->FirstSeg = 1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 1a70f324552f..4ebf99b30975 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2104,7 +2104,10 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
skb_tail_pointer_rsl(skb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
entry->BufferAddress = cpu_to_le32(*mapping);
entry->Length = priv->rxbuffersize;
@@ -2397,7 +2400,11 @@ static void rtl8192_rx_normal(struct net_device *dev)
skb_tail_pointer_rsl(skb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(priv->pdev,
+ *((dma_addr_t *)skb->cb))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
}
done:
pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 36452fb7cef8..0cfb3ecaadee 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -34,9 +34,9 @@ static void rtl819x_ethtool_get_drvinfo(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(priv->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
}
static u32 rtl819x_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/changes b/drivers/staging/rtl8192u/changes
index 87c33fdb9526..0485d6eec7b5 100644
--- a/drivers/staging/rtl8192u/changes
+++ b/drivers/staging/rtl8192u/changes
@@ -2,4 +2,3 @@ v 0.1
First version.
This is based on the rtl8180-sa2400 pre-0.22-CVS code..
-
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index 51effd6412ac..b5d0c2eb045b 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -25,4 +25,3 @@ obj-m +=ieee80211_crypt-rsl.o
obj-m +=ieee80211_crypt_wep-rsl.o
obj-m +=ieee80211_crypt_tkip-rsl.o
obj-m +=ieee80211_crypt_ccmp-rsl.o
-
diff --git a/drivers/staging/rtl8192u/ieee80211/aes.c b/drivers/staging/rtl8192u/ieee80211/aes.c
index a6bb6c9207d3..abc1023cef65 100644
--- a/drivers/staging/rtl8192u/ieee80211/aes.c
+++ b/drivers/staging/rtl8192u/ieee80211/aes.c
@@ -443,7 +443,7 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
+ .cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
@@ -466,4 +466,3 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
-
diff --git a/drivers/staging/rtl8192u/ieee80211/arc4.c b/drivers/staging/rtl8192u/ieee80211/arc4.c
index e3ad8d2f415e..b790e9ad104c 100644
--- a/drivers/staging/rtl8192u/ieee80211/arc4.c
+++ b/drivers/staging/rtl8192u/ieee80211/arc4.c
@@ -79,7 +79,7 @@ static struct crypto_alg arc4_alg = {
.cra_u = { .cipher = {
.cia_min_keysize = ARC4_MIN_KEY_SIZE,
.cia_max_keysize = ARC4_MAX_KEY_SIZE,
- .cia_setkey = arc4_set_key,
+ .cia_setkey = arc4_set_key,
.cia_encrypt = arc4_crypt,
.cia_decrypt = arc4_crypt } }
};
diff --git a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h b/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
index da486588f1c0..2ba374a64178 100644
--- a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
+++ b/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
@@ -56,5 +56,3 @@ static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
}
//EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
//EXPORT_SYMBOL_GPL(crypto_free_tfm);
-
-
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index ce63fc341c6e..f10fd5a93c38 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -218,4 +218,3 @@ EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
EXPORT_SYMBOL(DOT11D_ScanComplete);
EXPORT_SYMBOL(IsLegalChannel);
EXPORT_SYMBOL(ToLegalChannel);
-
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 502bfdbcc84b..210898c8e66c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -64,7 +64,7 @@
#endif
#define KEY_TYPE_NA 0x0
-#define KEY_TYPE_WEP40 0x1
+#define KEY_TYPE_WEP40 0x1
#define KEY_TYPE_TKIP 0x2
#define KEY_TYPE_CCMP 0x4
#define KEY_TYPE_WEP104 0x5
@@ -195,21 +195,21 @@ enum _ReasonCode{
auth_not_valid = 0x2,
deauth_lv_ss = 0x3,
inactivity = 0x4,
- ap_overload = 0x5,
+ ap_overload = 0x5,
class2_err = 0x6,
class3_err = 0x7,
- disas_lv_ss = 0x8,
+ disas_lv_ss = 0x8,
asoc_not_auth = 0x9,
//----MIC_CHECK
- mic_failure = 0xe,
+ mic_failure = 0xe,
//----END MIC_CHECK
// Reason code defined in 802.11i D10.0 p.28.
invalid_IE = 0x0d,
four_way_tmout = 0x0f,
two_way_tmout = 0x10,
- IE_dismatch = 0x11,
+ IE_dismatch = 0x11,
invalid_Gcipher = 0x12,
invalid_Pcipher = 0x13,
invalid_AKMP = 0x14,
@@ -222,7 +222,7 @@ enum _ReasonCode{
QoS_unspec = 0x20, // 32
QAP_bandwidth = 0x21, // 33
poor_condition = 0x22, // 34
- no_facility = 0x23, // 35
+ no_facility = 0x23, // 35
// Where is 36???
req_declined = 0x25, // 37
invalid_param = 0x26, // 38
@@ -265,7 +265,7 @@ enum _ReasonCode{
#define IEEE_WPAX_USEGROUP 0
#define IEEE_WPAX_WEP40 1
#define IEEE_WPAX_TKIP 2
-#define IEEE_WPAX_WRAP 3
+#define IEEE_WPAX_WRAP 3
#define IEEE_WPAX_CCMP 4
#define IEEE_WPAX_WEP104 5
@@ -289,12 +289,12 @@ enum _ReasonCode{
#define MAX_IE_LEN 0xff
// added for kernel conflict
-#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl
-#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl
-#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl
-#define ieee80211_register_crypto_ops ieee80211_register_crypto_ops_rsl
+#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl
+#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl
+#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl
+#define ieee80211_register_crypto_ops ieee80211_register_crypto_ops_rsl
#define ieee80211_unregister_crypto_ops ieee80211_unregister_crypto_ops_rsl
-#define ieee80211_get_crypto_ops ieee80211_get_crypto_ops_rsl
+#define ieee80211_get_crypto_ops ieee80211_get_crypto_ops_rsl
#define ieee80211_ccmp_null ieee80211_ccmp_null_rsl
@@ -302,10 +302,10 @@ enum _ReasonCode{
#define ieee80211_wep_null ieee80211_wep_null_rsl
-#define free_ieee80211 free_ieee80211_rsl
-#define alloc_ieee80211 alloc_ieee80211_rsl
+#define free_ieee80211 free_ieee80211_rsl
+#define alloc_ieee80211 alloc_ieee80211_rsl
-#define ieee80211_rx ieee80211_rx_rsl
+#define ieee80211_rx ieee80211_rx_rsl
#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
#define ieee80211_get_beacon ieee80211_get_beacon_rsl
@@ -450,7 +450,7 @@ typedef struct ieee_param {
/* management */
#define IEEE80211_STYPE_ASSOC_REQ 0x0000
-#define IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define IEEE80211_STYPE_ASSOC_RESP 0x0010
#define IEEE80211_STYPE_REASSOC_REQ 0x0020
#define IEEE80211_STYPE_REASSOC_RESP 0x0030
#define IEEE80211_STYPE_PROBE_REQ 0x0040
@@ -536,7 +536,7 @@ do { if (ieee80211_debug_level & (level)) \
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
#define IEEE80211_DEBUG_DATA(level, data, datalen) \
do{ if ((ieee80211_debug_level & (level)) == (level)) \
- { \
+ { \
int i; \
u8* pdata = (u8*) data; \
printk(KERN_DEBUG "ieee80211: %s()\n", __FUNCTION__); \
@@ -623,20 +623,20 @@ do { if (ieee80211_debug_level & (level)) \
#define MAX_STR_LEN 64
/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. Annie, 2005-11-22.*/
#define PRINTABLE(_ch) (_ch>'!' && _ch<'~')
-#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
- if((_Comp) & level) \
+#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
+ if((_Comp) & level) \
{ \
int __i; \
- u8 buffer[MAX_STR_LEN]; \
- int length = (_Len<MAX_STR_LEN)? _Len : (MAX_STR_LEN-1) ; \
- memset(buffer, 0, MAX_STR_LEN); \
- memcpy(buffer, (u8 *)_Ptr, length ); \
+ u8 buffer[MAX_STR_LEN]; \
+ int length = (_Len<MAX_STR_LEN)? _Len : (MAX_STR_LEN-1) ; \
+ memset(buffer, 0, MAX_STR_LEN); \
+ memcpy(buffer, (u8 *)_Ptr, length ); \
for( __i=0; __i<MAX_STR_LEN; __i++ ) \
{ \
- if( !PRINTABLE(buffer[__i]) ) buffer[__i] = '?'; \
+ if( !PRINTABLE(buffer[__i]) ) buffer[__i] = '?'; \
} \
buffer[length] = '\0'; \
- printk("Rtl819x: "); \
+ printk("Rtl819x: "); \
printk(_TitleString); \
printk(": %d, <%s>\n", _Len, buffer); \
}
@@ -785,12 +785,12 @@ enum ieee80211_reasoncode {
#define IEEE80211_24GHZ_BAND (1<<0)
#define IEEE80211_52GHZ_BAND (1<<1)
-#define IEEE80211_CCK_RATE_LEN 4
+#define IEEE80211_CCK_RATE_LEN 4
#define IEEE80211_CCK_RATE_1MB 0x02
#define IEEE80211_CCK_RATE_2MB 0x04
#define IEEE80211_CCK_RATE_5MB 0x0B
#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
+#define IEEE80211_OFDM_RATE_LEN 8
#define IEEE80211_OFDM_RATE_6MB 0x0C
#define IEEE80211_OFDM_RATE_9MB 0x12
#define IEEE80211_OFDM_RATE_12MB 0x18
@@ -919,10 +919,10 @@ struct ieee80211_rx_stats {
u16 fraglength; // FragLength should equal to PacketLength in non-fragment case
u16 fragoffset; // Data offset for this fragment
u16 ntotalfrag;
- bool bisrxaggrsubframe;
+ bool bisrxaggrsubframe;
bool bPacketBeacon; //cosa add for rssi
bool bToSelfBA; //cosa add for rssi
- char cck_adc_pwdb[4]; //cosa add for rx path selection
+ char cck_adc_pwdb[4]; //cosa add for rx path selection
u16 Seq_Num;
};
@@ -992,7 +992,7 @@ struct ieee80211_device;
#define SEC_ALG_TKIP 2
#define SEC_ALG_CCMP 3
-#define WEP_KEYS 4
+#define WEP_KEYS 4
#define WEP_KEY_LEN 13
#define SCM_KEY_LEN 32
#define SCM_TEMPORAL_KEY_LENGTH 16
@@ -1205,7 +1205,7 @@ struct ieee80211_drv_agg_txb {
struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
}__attribute__((packed));
-#define MAX_SUBFRAME_COUNT 64
+#define MAX_SUBFRAME_COUNT 64
struct ieee80211_rxb {
u8 nr_subframes;
struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
@@ -1534,7 +1534,7 @@ struct ieee80211_network {
bool bWithAironetIE;
bool bCkipSupported;
bool bCcxRmEnable;
- u16 CcxRmState[2];
+ u16 CcxRmState[2];
// CCXv4 S59, MBSSID.
bool bMBssidValid;
u8 MBssidMask;
@@ -1655,8 +1655,7 @@ typedef struct tx_pending_t{
struct ieee80211_txb *txb;
}tx_pending_t;
-typedef struct _bandwidth_autoswitch
-{
+typedef struct _bandwidth_autoswitch {
long threshold_20Mhzto40Mhz;
long threshold_40Mhzto20Mhz;
bool bforced_tx20Mhz;
@@ -1668,8 +1667,7 @@ typedef struct _bandwidth_autoswitch
#define REORDER_WIN_SIZE 128
#define REORDER_ENTRY_NUM 128
-typedef struct _RX_REORDER_ENTRY
-{
+typedef struct _RX_REORDER_ENTRY {
struct list_head List;
u16 SeqNum;
struct ieee80211_rxb* prxb;
@@ -1709,15 +1707,13 @@ typedef struct _IbssParms{
#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
// RF state.
-typedef enum _RT_RF_POWER_STATE
-{
+typedef enum _RT_RF_POWER_STATE {
eRfOn,
eRfSleep,
eRfOff
}RT_RF_POWER_STATE;
-typedef struct _RT_POWER_SAVE_CONTROL
-{
+typedef struct _RT_POWER_SAVE_CONTROL {
//
// Inactive Power Save(IPS) : Disable RF when disconnected
@@ -1726,7 +1722,7 @@ typedef struct _RT_POWER_SAVE_CONTROL
bool bIPSModeBackup;
bool bSwRfProcessing;
RT_RF_POWER_STATE eInactivePowerState;
- struct work_struct InactivePsWorkItem;
+ struct work_struct InactivePsWorkItem;
struct timer_list InactivePsTimer;
// Return point for join action
@@ -1837,11 +1833,11 @@ struct ieee80211_device {
u8 HTHighestOperaRate;
//wb added for rate operation mode to firmware
u8 bTxDisableRateFallBack;
- u8 bTxUseDriverAssingedRate;
+ u8 bTxUseDriverAssingedRate;
atomic_t atm_chnlop;
atomic_t atm_swbw;
// u8 HTHighestOperaRate;
-// u8 HTCurrentOperaRate;
+// u8 HTCurrentOperaRate;
// 802.11e and WMM Traffic Stream Info (TX)
struct list_head Tx_TS_Admit_List;
@@ -2055,9 +2051,9 @@ struct ieee80211_device {
bool bdynamic_txpower_enable;
bool bCTSToSelfEnable;
- u8 CTSToSelfTH;
+ u8 CTSToSelfTH;
- u32 fsync_time_interval;
+ u32 fsync_time_interval;
u32 fsync_rate_bitmap;
u8 fsync_rssi_threshold;
bool bfsync_enable;
@@ -2092,10 +2088,10 @@ struct ieee80211_device {
struct delayed_work start_ibss_wq;
struct work_struct wx_sync_scan_wq;
struct workqueue_struct *wq;
- // Qos related. Added by Annie, 2005-11-01.
- //STA_QOS StaQos;
+ // Qos related. Added by Annie, 2005-11-01.
+ //STA_QOS StaQos;
- //u32 STA_EDCA_PARAM[4];
+ //u32 STA_EDCA_PARAM[4];
//CHANNEL_ACCESS_SETTING ChannelAccessSetting;
@@ -2111,11 +2107,11 @@ struct ieee80211_device {
struct net_device *dev);
int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device * dev, int pri);
+ int (*is_queue_full) (struct net_device * dev, int pri);
- int (*handle_management) (struct net_device * dev,
- struct ieee80211_network * network, u16 type);
- int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
+ int (*handle_management) (struct net_device * dev,
+ struct ieee80211_network * network, u16 type);
+ int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
/* Softmac-generated frames (management) are TXed via this
* callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
@@ -2214,7 +2210,7 @@ struct ieee80211_device {
#define IEEE_A (1<<0)
#define IEEE_B (1<<1)
#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
+#define IEEE_N_24G (1<<4)
#define IEEE_N_5G (1<<5)
#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 82d4bf6a86a5..76c56e5aed79 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -220,7 +220,7 @@ void free_ieee80211(struct net_device *dev)
#ifdef CONFIG_IEEE80211_DEBUG
-u32 ieee80211_debug_level = 0;
+u32 ieee80211_debug_level;
static int debug = \
// IEEE80211_DL_INFO |
// IEEE80211_DL_WX |
@@ -233,15 +233,15 @@ static int debug = \
// IEEE80211_DL_TX |
// IEEE80211_DL_RX |
//IEEE80211_DL_QOS |
- // IEEE80211_DL_HT |
+ // IEEE80211_DL_HT |
// IEEE80211_DL_TS |
-// IEEE80211_DL_BA |
+// IEEE80211_DL_BA |
// IEEE80211_DL_REORDER|
// IEEE80211_DL_TRACE |
//IEEE80211_DL_DATA |
IEEE80211_DL_ERR //awayls open this flags to show error out
;
-struct proc_dir_entry *ieee80211_proc = NULL;
+struct proc_dir_entry *ieee80211_proc;
static int show_debug_level(char *page, char **start, off_t offset,
int count, int *eof, void *data)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index e3cf7a45b900..ee7ce5fca462 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -52,7 +52,7 @@ static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
u16 fc = le16_to_cpu(hdr->frame_ctl);
skb->dev = ieee->dev;
- skb_reset_mac_header(skb);
+ skb_reset_mac_header(skb);
skb_pull(skb, ieee80211_get_hdrlen(fc));
skb->pkt_type = PACKET_OTHERHOST;
@@ -218,16 +218,16 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
- struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats);
- //if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN)))
- if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames
- {
- dev_kfree_skb_any(skb);
- return 0;
- }
+ //if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN)))
+ if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames
+ {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
@@ -608,7 +608,7 @@ void RxReorderIndicatePacket( struct ieee80211_device *ieee,
u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PRX_REORDER_ENTRY pReorderEntry = NULL;
+ PRX_REORDER_ENTRY pReorderEntry = NULL;
struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE];
u8 WinSize = pHTInfo->RxReorderWinSize;
u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
@@ -773,7 +773,7 @@ void RxReorderIndicatePacket( struct ieee80211_device *ieee,
}
u8 parse_subframe(struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
+ struct ieee80211_rx_stats *rx_stats,
struct ieee80211_rxb *rxb,u8* src,u8* dst)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data;
@@ -1043,7 +1043,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
{
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__FUNCTION__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc));
- if( (fc & (1<<11)) &&
+ if( (fc & (1<<11)) &&
(frag == pRxTS->RxLastFragNum) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) )
{
@@ -1154,8 +1154,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
type, stype, skb->len);
goto rx_dropped;
}
- if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
- goto rx_dropped;
+ if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
+ goto rx_dropped;
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
@@ -1402,19 +1402,19 @@ static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
* the right values
*/
static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
- *info_element, int sub_type)
+ *info_element, int sub_type)
{
- if (info_element->qui_subtype != sub_type)
- return -1;
- if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
- return -1;
- if (info_element->qui_type != QOS_OUI_TYPE)
- return -1;
- if (info_element->version != QOS_VERSION_1)
- return -1;
+ if (info_element->qui_subtype != sub_type)
+ return -1;
+ if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
+ return -1;
+ if (info_element->qui_type != QOS_OUI_TYPE)
+ return -1;
+ if (info_element->version != QOS_VERSION_1)
+ return -1;
- return 0;
+ return 0;
}
@@ -1422,56 +1422,56 @@ static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
* Parse a QoS parameter element
*/
static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
- *element_param, struct ieee80211_info_element
- *info_element)
+ *element_param, struct ieee80211_info_element
+ *info_element)
{
- int ret = 0;
- u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
-
- if ((info_element == NULL) || (element_param == NULL))
- return -1;
-
- if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
- memcpy(element_param->info_element.qui, info_element->data,
- info_element->len);
- element_param->info_element.elementID = info_element->id;
- element_param->info_element.length = info_element->len;
- } else
- ret = -1;
- if (ret == 0)
- ret = ieee80211_verify_qos_info(&element_param->info_element,
- QOS_OUI_PARAM_SUB_TYPE);
- return ret;
+ int ret = 0;
+ u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
+
+ if ((info_element == NULL) || (element_param == NULL))
+ return -1;
+
+ if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
+ memcpy(element_param->info_element.qui, info_element->data,
+ info_element->len);
+ element_param->info_element.elementID = info_element->id;
+ element_param->info_element.length = info_element->len;
+ } else
+ ret = -1;
+ if (ret == 0)
+ ret = ieee80211_verify_qos_info(&element_param->info_element,
+ QOS_OUI_PARAM_SUB_TYPE);
+ return ret;
}
/*
* Parse a QoS information element
*/
static int ieee80211_read_qos_info_element(struct
- ieee80211_qos_information_element
- *element_info, struct ieee80211_info_element
- *info_element)
+ ieee80211_qos_information_element
+ *element_info, struct ieee80211_info_element
+ *info_element)
{
- int ret = 0;
- u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
-
- if (element_info == NULL)
- return -1;
- if (info_element == NULL)
- return -1;
-
- if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
- memcpy(element_info->qui, info_element->data,
- info_element->len);
- element_info->elementID = info_element->id;
- element_info->length = info_element->len;
- } else
- ret = -1;
-
- if (ret == 0)
- ret = ieee80211_verify_qos_info(element_info,
- QOS_OUI_INFO_SUB_TYPE);
- return ret;
+ int ret = 0;
+ u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
+
+ if (element_info == NULL)
+ return -1;
+ if (info_element == NULL)
+ return -1;
+
+ if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
+ memcpy(element_info->qui, info_element->data,
+ info_element->len);
+ element_info->elementID = info_element->id;
+ element_info->length = info_element->len;
+ } else
+ ret = -1;
+
+ if (ret == 0)
+ ret = ieee80211_verify_qos_info(element_info,
+ QOS_OUI_INFO_SUB_TYPE);
+ return ret;
}
@@ -1479,39 +1479,39 @@ static int ieee80211_read_qos_info_element(struct
* Write QoS parameters from the ac parameters.
*/
static int ieee80211_qos_convert_ac_to_parameters(struct
- ieee80211_qos_parameter_info
- *param_elm, struct
- ieee80211_qos_parameters
- *qos_param)
+ ieee80211_qos_parameter_info
+ *param_elm, struct
+ ieee80211_qos_parameters
+ *qos_param)
{
- int rc = 0;
- int i;
- struct ieee80211_qos_ac_parameter *ac_params;
+ int rc = 0;
+ int i;
+ struct ieee80211_qos_ac_parameter *ac_params;
u8 aci;
- //u8 cw_min;
- //u8 cw_max;
+ //u8 cw_min;
+ //u8 cw_max;
- for (i = 0; i < QOS_QUEUE_NUM; i++) {
- ac_params = &(param_elm->ac_params_record[i]);
+ for (i = 0; i < QOS_QUEUE_NUM; i++) {
+ ac_params = &(param_elm->ac_params_record[i]);
aci = (ac_params->aci_aifsn & 0x60) >> 5;
if(aci >= QOS_QUEUE_NUM)
continue;
- qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
+ qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
- qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
+ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
- qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
+ qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
- qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
+ qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
- qos_param->flag[aci] =
- (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
- }
- return rc;
+ qos_param->flag[aci] =
+ (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
+ qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
+ }
+ return rc;
}
/*
@@ -1520,38 +1520,38 @@ static int ieee80211_qos_convert_ac_to_parameters(struct
* which type to read
*/
static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
- *info_element,
- struct ieee80211_network *network)
+ *info_element,
+ struct ieee80211_network *network)
{
- int rc = 0;
- struct ieee80211_qos_parameters *qos_param = NULL;
- struct ieee80211_qos_information_element qos_info_element;
-
- rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
-
- if (rc == 0) {
- network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
- network->flags |= NETWORK_HAS_QOS_INFORMATION;
- } else {
- struct ieee80211_qos_parameter_info param_element;
-
- rc = ieee80211_read_qos_param_element(&param_element,
- info_element);
- if (rc == 0) {
- qos_param = &(network->qos_data.parameters);
- ieee80211_qos_convert_ac_to_parameters(&param_element,
- qos_param);
- network->flags |= NETWORK_HAS_QOS_PARAMETERS;
- network->qos_data.param_count =
- param_element.info_element.ac_info & 0x0F;
- }
- }
-
- if (rc == 0) {
- IEEE80211_DEBUG_QOS("QoS is supported\n");
- network->qos_data.supported = 1;
- }
- return rc;
+ int rc = 0;
+ struct ieee80211_qos_parameters *qos_param = NULL;
+ struct ieee80211_qos_information_element qos_info_element;
+
+ rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
+
+ if (rc == 0) {
+ network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
+ network->flags |= NETWORK_HAS_QOS_INFORMATION;
+ } else {
+ struct ieee80211_qos_parameter_info param_element;
+
+ rc = ieee80211_read_qos_param_element(&param_element,
+ info_element);
+ if (rc == 0) {
+ qos_param = &(network->qos_data.parameters);
+ ieee80211_qos_convert_ac_to_parameters(&param_element,
+ qos_param);
+ network->flags |= NETWORK_HAS_QOS_PARAMETERS;
+ network->qos_data.param_count =
+ param_element.info_element.ac_info & 0x0F;
+ }
+ }
+
+ if (rc == 0) {
+ IEEE80211_DEBUG_QOS("QoS is supported\n");
+ network->qos_data.supported = 1;
+ }
+ return rc;
}
#ifdef CONFIG_IEEE80211_DEBUG
@@ -1559,37 +1559,37 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
static const char *get_info_element_string(u16 id)
{
- switch (id) {
- MFIE_STRING(SSID);
- MFIE_STRING(RATES);
- MFIE_STRING(FH_SET);
- MFIE_STRING(DS_SET);
- MFIE_STRING(CF_SET);
- MFIE_STRING(TIM);
- MFIE_STRING(IBSS_SET);
- MFIE_STRING(COUNTRY);
- MFIE_STRING(HOP_PARAMS);
- MFIE_STRING(HOP_TABLE);
- MFIE_STRING(REQUEST);
- MFIE_STRING(CHALLENGE);
- MFIE_STRING(POWER_CONSTRAINT);
- MFIE_STRING(POWER_CAPABILITY);
- MFIE_STRING(TPC_REQUEST);
- MFIE_STRING(TPC_REPORT);
- MFIE_STRING(SUPP_CHANNELS);
- MFIE_STRING(CSA);
- MFIE_STRING(MEASURE_REQUEST);
- MFIE_STRING(MEASURE_REPORT);
- MFIE_STRING(QUIET);
- MFIE_STRING(IBSS_DFS);
- // MFIE_STRING(ERP_INFO);
- MFIE_STRING(RSN);
- MFIE_STRING(RATES_EX);
- MFIE_STRING(GENERIC);
- MFIE_STRING(QOS_PARAMETER);
- default:
- return "UNKNOWN";
- }
+ switch (id) {
+ MFIE_STRING(SSID);
+ MFIE_STRING(RATES);
+ MFIE_STRING(FH_SET);
+ MFIE_STRING(DS_SET);
+ MFIE_STRING(CF_SET);
+ MFIE_STRING(TIM);
+ MFIE_STRING(IBSS_SET);
+ MFIE_STRING(COUNTRY);
+ MFIE_STRING(HOP_PARAMS);
+ MFIE_STRING(HOP_TABLE);
+ MFIE_STRING(REQUEST);
+ MFIE_STRING(CHALLENGE);
+ MFIE_STRING(POWER_CONSTRAINT);
+ MFIE_STRING(POWER_CAPABILITY);
+ MFIE_STRING(TPC_REQUEST);
+ MFIE_STRING(TPC_REPORT);
+ MFIE_STRING(SUPP_CHANNELS);
+ MFIE_STRING(CSA);
+ MFIE_STRING(MEASURE_REQUEST);
+ MFIE_STRING(MEASURE_REPORT);
+ MFIE_STRING(QUIET);
+ MFIE_STRING(IBSS_DFS);
+ // MFIE_STRING(ERP_INFO);
+ MFIE_STRING(RSN);
+ MFIE_STRING(RATES_EX);
+ MFIE_STRING(GENERIC);
+ MFIE_STRING(QOS_PARAMETER);
+ default:
+ return "UNKNOWN";
+ }
}
#endif
@@ -1634,7 +1634,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
{
u8 i;
short offset;
- u16 tmp_htcap_len=0;
+ u16 tmp_htcap_len=0;
u16 tmp_htinfo_len=0;
u16 ht_realtek_agg_len=0;
u8 ht_realtek_agg_buf[MAX_IE_LEN];
@@ -1752,34 +1752,34 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
network->tim.tim_count = info_element->data[0];
network->tim.tim_period = info_element->data[1];
- network->dtim_period = info_element->data[1];
- if(ieee->state != IEEE80211_LINKED)
- break;
+ network->dtim_period = info_element->data[1];
+ if(ieee->state != IEEE80211_LINKED)
+ break;
- network->last_dtim_sta_time[0] = stats->mac_time[0];
- network->last_dtim_sta_time[1] = stats->mac_time[1];
+ network->last_dtim_sta_time[0] = stats->mac_time[0];
+ network->last_dtim_sta_time[1] = stats->mac_time[1];
- network->dtim_data = IEEE80211_DTIM_VALID;
+ network->dtim_data = IEEE80211_DTIM_VALID;
- if(info_element->data[0] != 0)
- break;
+ if(info_element->data[0] != 0)
+ break;
- if(info_element->data[2] & 1)
- network->dtim_data |= IEEE80211_DTIM_MBCAST;
+ if(info_element->data[2] & 1)
+ network->dtim_data |= IEEE80211_DTIM_MBCAST;
- offset = (info_element->data[2] >> 1)*2;
+ offset = (info_element->data[2] >> 1)*2;
- //printk("offset1:%x aid:%x\n",offset, ieee->assoc_id);
+ //printk("offset1:%x aid:%x\n",offset, ieee->assoc_id);
- if(ieee->assoc_id < 8*offset ||
- ieee->assoc_id > 8*(offset + info_element->len -3))
+ if(ieee->assoc_id < 8*offset ||
+ ieee->assoc_id > 8*(offset + info_element->len -3))
- break;
+ break;
- offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
+ offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
- if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
- network->dtim_data |= IEEE80211_DTIM_UCAST;
+ if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
+ network->dtim_data |= IEEE80211_DTIM_UCAST;
//IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
break;
@@ -1820,17 +1820,17 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
}
#ifdef THOMAS_TURBO
- if (info_element->len == 7 &&
- info_element->data[0] == 0x00 &&
- info_element->data[1] == 0xe0 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x01 &&
- info_element->data[4] == 0x02) {
- network->Turbo_Enable = 1;
- }
+ if (info_element->len == 7 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0xe0 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x01 &&
+ info_element->data[4] == 0x02) {
+ network->Turbo_Enable = 1;
+ }
#endif
- //for HTcap and HTinfo parameters
+ //for HTcap and HTinfo parameters
if(tmp_htcap_len == 0){
if(info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
@@ -1839,12 +1839,12 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[3] == 0x033){
tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htcap_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ if(tmp_htcap_len != 0){
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
- }
+ }
}
if(tmp_htcap_len != 0)
network->bssht.bdSupportHT = true;
@@ -1856,9 +1856,9 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
if(tmp_htinfo_len == 0){
if(info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
- info_element->data[1] == 0x90 &&
- info_element->data[2] == 0x4c &&
- info_element->data[3] == 0x034){
+ info_element->data[1] == 0x90 &&
+ info_element->data[2] == 0x4c &&
+ info_element->data[3] == 0x034){
tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
if(tmp_htinfo_len != 0){
@@ -2014,7 +2014,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
network->rsn_ie_len);
break;
- //HT related element.
+ //HT related element.
case MFIE_TYPE_HT_CAP:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
info_element->len);
@@ -2027,7 +2027,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
//If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
// windows driver will update WMM parameters each beacon received once connected
- // Linux driver is a bit different.
+ // Linux driver is a bit different.
network->bssht.bdSupportHT = true;
}
else
@@ -2060,7 +2060,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) ||
(info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) )
{
- network->bCkipSupported = true;
+ network->bCkipSupported = true;
}
else
{
@@ -2070,7 +2070,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
else
{
network->bWithAironetIE = false;
- network->bCkipSupported = false;
+ network->bCkipSupported = false;
}
break;
case MFIE_TYPE_QOS_PARAMETER:
@@ -2189,10 +2189,10 @@ static inline int ieee80211_network_init(
//char *p;
#endif
- network->qos_data.active = 0;
- network->qos_data.supported = 0;
- network->qos_data.param_count = 0;
- network->qos_data.old_param_count = 0;
+ network->qos_data.active = 0;
+ network->qos_data.supported = 0;
+ network->qos_data.param_count = 0;
+ network->qos_data.old_param_count = 0;
/* Pull out fixed field data */
memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
@@ -2209,9 +2209,9 @@ static inline int ieee80211_network_init(
network->flags = 0;
network->atim_window = 0;
network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
- 0x3 : 0x0;
+ 0x3 : 0x0;
network->berp_info_valid = false;
- network->broadcom_cap_exist = false;
+ network->broadcom_cap_exist = false;
network->ralink_cap_exist = false;
network->atheros_cap_exist = false;
network->cisco_cap_exist = false;
@@ -2230,12 +2230,12 @@ static inline int ieee80211_network_init(
} else
network->flags |= NETWORK_HAS_CCK;
- network->wpa_ie_len = 0;
- network->rsn_ie_len = 0;
+ network->wpa_ie_len = 0;
+ network->rsn_ie_len = 0;
- if (ieee80211_parse_info_param
- (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats))
- return 1;
+ if (ieee80211_parse_info_param
+ (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats))
+ return 1;
network->mode = 0;
if (stats->freq == IEEE80211_52GHZ_BAND)
@@ -2329,7 +2329,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1];
memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters));
- dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
+ dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen;
memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 7a0707810fd0..454f8895d211 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -498,7 +498,7 @@ void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
- static short watchdog = 0;
+ static short watchdog;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
if(!ieee->ieee_up)
@@ -1948,166 +1948,166 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
-
- IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA){
- struct ieee80211_network network_resp;
- struct ieee80211_network *network = &network_resp;
-
- if (0 == (errcode=assoc_parse(ieee,skb, &aid))){
- ieee->state=IEEE80211_LINKED;
- ieee->assoc_id = aid;
- ieee->softmac_stats.rx_ass_ok++;
- /* station support qos */
- /* Let the register setting defaultly with Legacy station */
- if(ieee->qos_support) {
- assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
- memset(network, 0, sizeof(*network));
- if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\
- rx_stats->len - sizeof(*assoc_resp),\
- network,rx_stats)){
- return 1;
- }
- else
- { //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
- memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen);
- memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
- }
- if (ieee->handle_assoc_response != NULL)
- ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame*)header, network);
+ case IEEE80211_STYPE_ASSOC_RESP:
+ case IEEE80211_STYPE_REASSOC_RESP:
+
+ IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
+ WLAN_FC_GET_STYPE(header->frame_ctl));
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
+ ieee->iw_mode == IW_MODE_INFRA){
+ struct ieee80211_network network_resp;
+ struct ieee80211_network *network = &network_resp;
+
+ if (0 == (errcode=assoc_parse(ieee,skb, &aid))){
+ ieee->state=IEEE80211_LINKED;
+ ieee->assoc_id = aid;
+ ieee->softmac_stats.rx_ass_ok++;
+ /* station support qos */
+ /* Let the register setting defaultly with Legacy station */
+ if(ieee->qos_support) {
+ assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
+ memset(network, 0, sizeof(*network));
+ if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\
+ rx_stats->len - sizeof(*assoc_resp),\
+ network,rx_stats)){
+ return 1;
}
- ieee80211_associate_complete(ieee);
- } else {
- /* aid could not been allocated */
- ieee->softmac_stats.rx_ass_err++;
- printk(
- "Association response status code 0x%x\n",
- errcode);
- IEEE80211_DEBUG_MGMT(
- "Association response status code 0x%x\n",
- errcode);
- if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- } else {
- ieee80211_associate_abort(ieee);
+ else
+ { //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
+ memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen);
+ memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
}
+ if (ieee->handle_assoc_response != NULL)
+ ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame*)header, network);
+ }
+ ieee80211_associate_complete(ieee);
+ } else {
+ /* aid could not been allocated */
+ ieee->softmac_stats.rx_ass_err++;
+ printk(
+ "Association response status code 0x%x\n",
+ errcode);
+ IEEE80211_DEBUG_MGMT(
+ "Association response status code 0x%x\n",
+ errcode);
+ if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
+ queue_work(ieee->wq, &ieee->associate_procedure_wq);
+ } else {
+ ieee80211_associate_abort(ieee);
}
}
- break;
+ }
+ break;
- case IEEE80211_STYPE_ASSOC_REQ:
- case IEEE80211_STYPE_REASSOC_REQ:
+ case IEEE80211_STYPE_ASSOC_REQ:
+ case IEEE80211_STYPE_REASSOC_REQ:
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->iw_mode == IW_MODE_MASTER)
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->iw_mode == IW_MODE_MASTER)
- ieee80211_rx_assoc_rq(ieee, skb);
- break;
+ ieee80211_rx_assoc_rq(ieee, skb);
+ break;
- case IEEE80211_STYPE_AUTH:
+ case IEEE80211_STYPE_AUTH:
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
- ieee->iw_mode == IW_MODE_INFRA){
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){
+ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
+ ieee->iw_mode == IW_MODE_INFRA){
- IEEE80211_DEBUG_MGMT("Received authentication response");
+ IEEE80211_DEBUG_MGMT("Received authentication response");
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
- if(ieee->open_wep || !challenge){
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
- ieee->softmac_stats.rx_auth_rs_ok++;
- if(!(ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE))
+ if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
+ if(ieee->open_wep || !challenge){
+ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
+ ieee->softmac_stats.rx_auth_rs_ok++;
+ if(!(ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE))
+ {
+ if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
{
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
+ // WEP or TKIP encryption
+ if(IsHTHalfNmodeAPs(ieee))
{
- // WEP or TKIP encryption
- if(IsHTHalfNmodeAPs(ieee))
- {
- bSupportNmode = true;
- bHalfSupportNmode = true;
- }
- else
- {
- bSupportNmode = false;
- bHalfSupportNmode = false;
- }
- printk("==========>to link with AP using SEC(%d, %d)", bSupportNmode, bHalfSupportNmode);
+ bSupportNmode = true;
+ bHalfSupportNmode = true;
}
+ else
+ {
+ bSupportNmode = false;
+ bHalfSupportNmode = false;
+ }
+ printk("==========>to link with AP using SEC(%d, %d)", bSupportNmode, bHalfSupportNmode);
}
- /* Dummy wirless mode setting to avoid encryption issue */
- if(bSupportNmode) {
- //N mode setting
- ieee->SetWirelessMode(ieee->dev, \
- ieee->current_network.mode);
- }else{
- //b/g mode setting
- /*TODO*/
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
- }
-
- if (ieee->current_network.mode == IEEE_N_24G && bHalfSupportNmode == true)
- {
- printk("===============>entern half N mode\n");
- ieee->bHalfWirelessN24GMode = true;
- }
- else
- ieee->bHalfWirelessN24GMode = false;
-
- ieee80211_associate_step2(ieee);
+ }
+ /* Dummy wirless mode setting to avoid encryption issue */
+ if(bSupportNmode) {
+ //N mode setting
+ ieee->SetWirelessMode(ieee->dev, \
+ ieee->current_network.mode);
}else{
- ieee80211_auth_challenge(ieee, challenge, chlen);
+ //b/g mode setting
+ /*TODO*/
+ ieee->SetWirelessMode(ieee->dev, IEEE_G);
}
+
+ if (ieee->current_network.mode == IEEE_N_24G && bHalfSupportNmode == true)
+ {
+ printk("===============>entern half N mode\n");
+ ieee->bHalfWirelessN24GMode = true;
+ }
+ else
+ ieee->bHalfWirelessN24GMode = false;
+
+ ieee80211_associate_step2(ieee);
}else{
- ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x",errcode);
- ieee80211_associate_abort(ieee);
+ ieee80211_auth_challenge(ieee, challenge, chlen);
}
-
- }else if (ieee->iw_mode == IW_MODE_MASTER){
- ieee80211_rx_auth_rq(ieee, skb);
+ }else{
+ ieee->softmac_stats.rx_auth_rs_err++;
+ IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x",errcode);
+ ieee80211_associate_abort(ieee);
}
+
+ }else if (ieee->iw_mode == IW_MODE_MASTER){
+ ieee80211_rx_auth_rq(ieee, skb);
}
- break;
+ }
+ break;
- case IEEE80211_STYPE_PROBE_REQ:
+ case IEEE80211_STYPE_PROBE_REQ:
- if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- ((ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER) &&
- ieee->state == IEEE80211_LINKED)){
- ieee80211_rx_probe_rq(ieee, skb);
- }
- break;
+ if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
+ ((ieee->iw_mode == IW_MODE_ADHOC ||
+ ieee->iw_mode == IW_MODE_MASTER) &&
+ ieee->state == IEEE80211_LINKED)){
+ ieee80211_rx_probe_rq(ieee, skb);
+ }
+ break;
- case IEEE80211_STYPE_DISASSOC:
- case IEEE80211_STYPE_DEAUTH:
- /* FIXME for now repeat all the association procedure
- * both for disassociation and deauthentication
- */
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_INFRA){
-
- ieee->state = IEEE80211_ASSOCIATING;
- ieee->softmac_stats.reassoc++;
-
- notify_wx_assoc_event(ieee);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- RemovePeerTS(ieee, header->addr2);
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }
- break;
- case IEEE80211_STYPE_MANAGE_ACT:
- ieee80211_process_action(ieee,skb);
- break;
- default:
- return -1;
- break;
+ case IEEE80211_STYPE_DISASSOC:
+ case IEEE80211_STYPE_DEAUTH:
+ /* FIXME for now repeat all the association procedure
+ * both for disassociation and deauthentication
+ */
+ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
+ ieee->state == IEEE80211_LINKED &&
+ ieee->iw_mode == IW_MODE_INFRA){
+
+ ieee->state = IEEE80211_ASSOCIATING;
+ ieee->softmac_stats.reassoc++;
+
+ notify_wx_assoc_event(ieee);
+ //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ RemovePeerTS(ieee, header->addr2);
+ queue_work(ieee->wq, &ieee->associate_procedure_wq);
+ }
+ break;
+ case IEEE80211_STYPE_MANAGE_ACT:
+ ieee80211_process_action(ieee,skb);
+ break;
+ default:
+ return -1;
+ break;
}
//dev_kfree_skb_any(skb);
@@ -2503,8 +2503,8 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
}
void ieee80211_associate_retry_wq(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
down(&ieee->wx_sem);
@@ -3124,7 +3124,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
void
SendDisassociation(
struct ieee80211_device *ieee,
- u8* asSta,
+ u8* asSta,
u8 asRsn
)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 421da8a07697..60746b8b1eb0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -302,7 +302,7 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
HT_EXTCHNL_OFFSET chan_offset=0;
HT_CHANNEL_WIDTH bandwidth=0;
int b40M = 0;
- static int count = 0;
+ static int count;
chan = ieee->current_network.channel;
netif_carrier_off(ieee->dev);
@@ -482,22 +482,23 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- strcpy(wrqu->name, "802.11");
- if(ieee->modulation & IEEE80211_CCK_MODULATION){
- strcat(wrqu->name, "b");
- if(ieee->modulation & IEEE80211_OFDM_MODULATION)
- strcat(wrqu->name, "/g");
- }else if(ieee->modulation & IEEE80211_OFDM_MODULATION)
- strcat(wrqu->name, "g");
- if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
- strcat(wrqu->name, "/n");
+ strlcpy(wrqu->name, "802.11", IFNAMSIZ);
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
+ strlcat(wrqu->name, "b", IFNAMSIZ);
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION)
+ strlcat(wrqu->name, "/g", IFNAMSIZ);
+ } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
+ strlcat(wrqu->name, "g", IFNAMSIZ);
+ }
- if((ieee->state == IEEE80211_LINKED) ||
- (ieee->state == IEEE80211_LINKED_SCANNING))
- strcat(wrqu->name," linked");
- else if(ieee->state != IEEE80211_NOLINK)
- strcat(wrqu->name," link..");
+ if (ieee->mode & (IEEE_N_24G | IEEE_N_5G))
+ strlcat(wrqu->name, "/n", IFNAMSIZ);
+ if ((ieee->state == IEEE80211_LINKED) ||
+ (ieee->state == IEEE80211_LINKED_SCANNING))
+ strlcat(wrqu->name, " linked", IFNAMSIZ);
+ else if (ieee->state != IEEE80211_NOLINK)
+ strlcat(wrqu->name, " link..", IFNAMSIZ);
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 3f5ceeb88b6c..c39e680bb0ac 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -70,7 +70,7 @@ val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x
desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
| | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
'-----------------------------------------------------------------------------------------'
- /\
+ /\
|
802.11 Data Frame |
,--------- 'ctrl' expands to >-----------'
@@ -283,22 +283,22 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
ip = ip_hdr(skb);
switch (ip->tos & 0xfc) {
- case 0x20:
- return 2;
- case 0x40:
- return 1;
- case 0x60:
- return 3;
- case 0x80:
- return 4;
- case 0xa0:
- return 5;
- case 0xc0:
- return 6;
- case 0xe0:
- return 7;
- default:
- return 0;
+ case 0x20:
+ return 2;
+ case 0x40:
+ return 1;
+ case 0x60:
+ return 3;
+ case 0x80:
+ return 4;
+ case 0xa0:
+ return 5;
+ case 0xc0:
+ return 6;
+ case 0xe0:
+ return 7;
+ default:
+ return 0;
}
}
@@ -395,7 +395,7 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- tcb_desc->bUseShortGI = false;
+ tcb_desc->bUseShortGI = false;
if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
@@ -514,7 +514,7 @@ void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_
if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
{
tcb_desc->bCTSEnable = true;
- tcb_desc->rts_rate = MGN_24M;
+ tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
}
@@ -527,7 +527,7 @@ void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_
{
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
- tcb_desc->bRTSEnable = true;
+ tcb_desc->bRTSEnable = true;
}
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
@@ -656,17 +656,17 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy(&dest, skb->data, ETH_ALEN);
memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
- /* Advance the SKB to the start of the payload */
- skb_pull(skb, sizeof(struct ethhdr));
+ /* Advance the SKB to the start of the payload */
+ skb_pull(skb, sizeof(struct ethhdr));
- /* Determine total amount of storage required for TXB packets */
- bytes = skb->len + SNAP_SIZE + sizeof(u16);
+ /* Determine total amount of storage required for TXB packets */
+ bytes = skb->len + SNAP_SIZE + sizeof(u16);
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
else
- fc = IEEE80211_FTYPE_DATA;
+ fc = IEEE80211_FTYPE_DATA;
//if(ieee->current_network.QoS_Enable)
if(qos_actived)
@@ -689,7 +689,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
}
- header.frame_ctl = cpu_to_le16(fc);
+ header.frame_ctl = cpu_to_le16(fc);
/* Determine fragmentation size based on destination (multicast
* and broadcast are not fragmented) */
@@ -833,7 +833,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
else
ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
- if (ieee->seq_ctrl[0] == 0xFFF)
+ if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index f0ba7f467493..e1fe54acb4b8 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -761,7 +761,7 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
case IW_MLME_DISASSOC:
ieee80211_disassociate(ieee);
break;
- default:
+ default:
return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
index bebe13ac53b7..6f54cfe8a467 100644
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ b/drivers/staging/rtl8192u/ieee80211/internal.h
@@ -79,4 +79,3 @@ void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
#endif /* _CRYPTO_INTERNAL_H */
-
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
index 8ddc8bf9dc26..2c398ca9a8ac 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
@@ -1,7 +1,7 @@
#ifndef _BATYPE_H_
#define _BATYPE_H_
-#define TOTAL_TXBA_NUM 16
+#define TOTAL_TXBA_NUM 16
#define TOTAL_RXBA_NUM 16
#define BA_SETUP_TIMEOUT 200
@@ -28,8 +28,7 @@ struct ieee80211_ADDBA_Req{
//Is this need?I put here just to make it easier to define structure BA_RECORD //WB
typedef union _SEQUENCE_CONTROL{
u16 ShortData;
- struct
- {
+ struct {
u16 FragNum:4;
u16 SeqNum:12;
}field;
@@ -66,4 +65,3 @@ typedef struct _BA_RECORD {
} BA_RECORD, *PBA_RECORD;
#endif //end _BATYPE_H_
-
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 1ebea3daea2d..69735d320315 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -9,8 +9,8 @@
/********************************************************************************************************************
*function: Activate BA entry. And if Time is nozero, start timer.
- * input: PBA_RECORD pBA //BA entry to be enabled
- * u16 Time //indicate time delay.
+ * input: PBA_RECORD pBA //BA entry to be enabled
+ * u16 Time //indicate time delay.
* output: none
********************************************************************************************************************/
void ActivateBAEntry(struct ieee80211_device* ieee, PBA_RECORD pBA, u16 Time)
@@ -22,7 +22,7 @@ void ActivateBAEntry(struct ieee80211_device* ieee, PBA_RECORD pBA, u16 Time)
/********************************************************************************************************************
*function: deactivate BA entry, including its timer.
- * input: PBA_RECORD pBA //BA entry to be disabled
+ * input: PBA_RECORD pBA //BA entry to be disabled
* output: none
********************************************************************************************************************/
void DeActivateBAEntry( struct ieee80211_device* ieee, PBA_RECORD pBA)
@@ -33,7 +33,7 @@ void DeActivateBAEntry( struct ieee80211_device* ieee, PBA_RECORD pBA)
/********************************************************************************************************************
*function: deactivete BA entry in Tx Ts, and send DELBA.
* input:
- * PTX_TS_RECORD pTxTs //Tx Ts which is to deactivate BA entry.
+ * PTX_TS_RECORD pTxTs //Tx Ts which is to deactivate BA entry.
* output: none
* notice: As PTX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME
********************************************************************************************************************/
@@ -63,7 +63,7 @@ u8 TxTsDeleteBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTxTs)
/********************************************************************************************************************
*function: deactivete BA entry in Tx Ts, and send DELBA.
* input:
- * PRX_TS_RECORD pRxTs //Rx Ts which is to deactivate BA entry.
+ * PRX_TS_RECORD pRxTs //Rx Ts which is to deactivate BA entry.
* output: none
* notice: As PRX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME, same with above
********************************************************************************************************************/
@@ -84,7 +84,7 @@ u8 RxTsDeleteBA( struct ieee80211_device* ieee, PRX_TS_RECORD pRxTs)
/********************************************************************************************************************
*function: reset BA entry
* input:
- * PBA_RECORD pBA //entry to be reset
+ * PBA_RECORD pBA //entry to be reset
* output: none
********************************************************************************************************************/
void ResetBaEntry( PBA_RECORD pBA)
@@ -98,12 +98,12 @@ void ResetBaEntry( PBA_RECORD pBA)
//These functions need porting here or not?
/*******************************************************************************************************************************
*function: construct ADDBAREQ and ADDBARSP frame here together.
- * input: u8* Dst //ADDBA frame's destination
- * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA.
- * u16 StatusCode //status code in RSP and I will use it to indicate whether it's RSP or REQ(will I?)
- * u8 type //indicate whether it's RSP(ACT_ADDBARSP) ow REQ(ACT_ADDBAREQ)
+ * input: u8* Dst //ADDBA frame's destination
+ * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA.
+ * u16 StatusCode //status code in RSP and I will use it to indicate whether it's RSP or REQ(will I?)
+ * u8 type //indicate whether it's RSP(ACT_ADDBARSP) ow REQ(ACT_ADDBAREQ)
* output: none
- * return: sk_buff* skb //return constructed skb to xmit
+ * return: sk_buff* skb //return constructed skb to xmit
*******************************************************************************************************************************/
static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, PBA_RECORD pBA, u16 StatusCode, u8 type)
{
@@ -126,7 +126,7 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
return NULL;
}
- memset(skb->data, 0, sizeof( struct ieee80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
+ memset(skb->data, 0, sizeof( struct ieee80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
skb_reserve(skb, ieee->tx_headroom);
BAReq = ( struct ieee80211_hdr_3addr *) skb_put(skb,sizeof( struct ieee80211_hdr_3addr));
@@ -177,12 +177,12 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
/********************************************************************************************************************
*function: construct DELBA frame
- * input: u8* dst //DELBA frame's destination
- * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
- * TR_SELECT TxRxSelect //TX RX direction
- * u16 ReasonCode //status code.
+ * input: u8* dst //DELBA frame's destination
+ * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
+ * TR_SELECT TxRxSelect //TX RX direction
+ * u16 ReasonCode //status code.
* output: none
- * return: sk_buff* skb //return constructed skb to xmit
+ * return: sk_buff* skb //return constructed skb to xmit
********************************************************************************************************************/
static struct sk_buff* ieee80211_DELBA(
struct ieee80211_device* ieee,
@@ -246,8 +246,8 @@ static struct sk_buff* ieee80211_DELBA(
/********************************************************************************************************************
*function: send ADDBAReq frame out
- * input: u8* dst //ADDBAReq frame's destination
- * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
+ * input: u8* dst //ADDBAReq frame's destination
+ * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
@@ -272,9 +272,9 @@ void ieee80211_send_ADDBAReq(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
/********************************************************************************************************************
*function: send ADDBARSP frame out
- * input: u8* dst //DELBA frame's destination
- * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
- * u16 StatusCode //RSP StatusCode
+ * input: u8* dst //DELBA frame's destination
+ * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
+ * u16 StatusCode //RSP StatusCode
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
@@ -297,10 +297,10 @@ void ieee80211_send_ADDBARsp(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
}
/********************************************************************************************************************
*function: send ADDBARSP frame out
- * input: u8* dst //DELBA frame's destination
- * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
- * TR_SELECT TxRxSelect //TX or RX
- * u16 ReasonCode //DEL ReasonCode
+ * input: u8* dst //DELBA frame's destination
+ * PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
+ * TR_SELECT TxRxSelect //TX or RX
+ * u16 ReasonCode //DEL ReasonCode
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
@@ -340,7 +340,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
@@ -439,7 +439,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
rsp = ( struct ieee80211_hdr_3addr*)skb->data;
@@ -569,7 +569,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
return -1;
}
@@ -589,7 +589,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if(pDelBaParamSet->field.Initiator == 1)
{
- PRX_TS_RECORD pRxTs;
+ PRX_TS_RECORD pRxTs;
if( !GetTs(
ieee,
@@ -657,7 +657,7 @@ TsInitAddBA(
// BufferSize: This need to be set according to A-MPDU vector
pBA->BaParamSet.field.BufferSize = 32; // BufferSize: This need to be set according to A-MPDU vector
pBA->BaTimeoutValue = 0; // Timeout value: Set 0 to disable Timer
- pBA->BaStartSeqCtrl.field.SeqNum = (pTS->TxCurSeq + 3) % 4096; // Block Ack will start after 3 packets later.
+ pBA->BaStartSeqCtrl.field.SeqNum = (pTS->TxCurSeq + 3) % 4096; // Block Ack will start after 3 packets later.
ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
@@ -734,4 +734,3 @@ void RxBaInactTimeout(unsigned long data)
DELBA_REASON_TIMEOUT);
return ;
}
-
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index a60b39cdb472..2b8283534be4 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -121,7 +121,7 @@ typedef union _HT_CAPABILITY_MACPARA{
typedef enum _HT_ACTION{
ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
+ ACT_MIMO_PWR_SAVE = 1,
ACT_PSMP = 2,
ACT_SET_PCO_PHASE = 3,
ACT_MIMO_CHL_MEASURE = 4,
@@ -398,9 +398,9 @@ typedef struct _BSS_HT{
typedef struct _MIMO_RSSI{
u32 EnableAntenna;
u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
+ u32 AntennaB;
+ u32 AntennaC;
+ u32 AntennaD;
u32 Average;
}MIMO_RSSI, *PMIMO_RSSI;
@@ -436,11 +436,11 @@ extern u8 MCS_FILTER_1SS[16];
// MCS Bw 40 {1~7, 12~15,32}
-#define RATE_ADPT_1SS_MASK 0xFF
+#define RATE_ADPT_1SS_MASK 0xFF
#define RATE_ADPT_2SS_MASK 0xF0 //Skip MCS8~11 because mcs7 > mcs6, 9, 10, 11. 2007.01.16 by Emily
#define RATE_ADPT_MCS32_MASK 0x01
-#define IS_11N_MCS_RATE(rate) (rate&0x80)
+#define IS_11N_MCS_RATE(rate) (rate&0x80)
typedef enum _HT_AGGRE_SIZE{
HT_AGG_SIZE_8K = 0,
@@ -478,4 +478,3 @@ typedef enum _HT_IOT_ACTION{
}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
#endif //_RTL819XU_HTTYPE_H_
-
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index ebb523904edc..268b270e9495 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -21,7 +21,7 @@ u16 MCS_DATA_RATE[2][2][77] =
81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
- 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
+ 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
{30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
@@ -88,7 +88,7 @@ void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
-#ifdef TO_DO_LIST
+#ifdef TO_DO_LIST
// 8190 only. Assign duration operation mode to firmware
pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
#endif
@@ -116,7 +116,7 @@ void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
/********************************************************************************************************************
*function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq)
* input: u8* CapIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
+ * u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
@@ -125,7 +125,7 @@ void HTDebugHTCapability(u8* CapIE, u8* TitleString )
{
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
- PHT_CAPABILITY_ELE pCapELE;
+ PHT_CAPABILITY_ELE pCapELE;
if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap)))
{
@@ -153,7 +153,7 @@ void HTDebugHTCapability(u8* CapIE, u8* TitleString )
/********************************************************************************************************************
*function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp)
* input: u8* InfoIE //Capability IE to be printed out
- * u8* TitleString //mainly print out caller function
+ * u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
@@ -217,7 +217,7 @@ void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
}
/*
-* Return: true if station in half n mode and AP supports 40 bw
+* Return: true if station in half n mode and AP supports 40 bw
*/
bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
{
@@ -228,7 +228,7 @@ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
retValue = false;
else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw
retValue = false;
- else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
+ else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw
retValue = true;
@@ -245,7 +245,7 @@ bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
retValue = false;
- else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
+ else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
else if(is40MHz) // ap support 40 bw
{
@@ -265,7 +265,7 @@ bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
return retValue;
}
-u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
+u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
{
u8 is40MHz;
@@ -291,8 +291,8 @@ u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
/********************************************************************************************************************
*function: This function returns current datarate.
- * input: struct ieee80211_device* ieee
- * u8 nDataRate
+ * input: struct ieee80211_device* ieee
+ * u8 nDataRate
* output: none
* return: tx rate
* notice: quite unsure about how to use this function //wb
@@ -371,7 +371,7 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
/********************************************************************************************************************
*function: This function returns peer IOT.
- * input: struct ieee80211_device* ieee
+ * input: struct ieee80211_device* ieee
* output: none
* return:
* notice:
@@ -408,8 +408,8 @@ void HTIOTPeerDetermine(struct ieee80211_device* ieee)
/********************************************************************************************************************
*function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good
* at receiving MCS14~15 frame from some AP.
- * input: struct ieee80211_device* ieee
- * u8 * PeerMacAddr
+ * input: struct ieee80211_device* ieee
+ * u8 * PeerMacAddr
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
* *****************************************************************************************************************/
@@ -429,7 +429,7 @@ u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
* PADAPTER Adapter,
*
* Output: None
-* Return: true if driver should disable MCS15
+* Return: true if driver should disable MCS15
* 2008.04.15 Emily
*/
bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
@@ -466,7 +466,7 @@ bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
* PADAPTER Adapter,
*
* Output: None
-* Return: true if driver should disable all two spatial stream packet
+* Return: true if driver should disable all two spatial stream packet
* 2008.04.21 Emily
*/
bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *PeerMacAddr)
@@ -481,12 +481,12 @@ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *Pee
/********************************************************************************************************************
*function: Check whether driver should disable EDCA turbo mode
- * input: struct ieee80211_device* ieee
- * u8* PeerMacAddr
+ * input: struct ieee80211_device* ieee
+ * u8* PeerMacAddr
* output: none
* return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr)
+u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr)
{
u8 retValue = false; // default enable EDCA Turbo mode.
// Set specific EDCA parameter for different AP in DM handler.
@@ -539,10 +539,10 @@ void HTResetIOTSetting(
/********************************************************************************************************************
*function: Construct Capablility Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Capability Ele
- * u8* len //store length of CE
- * u8 IsEncrypt //whether encrypt, needed further
+ * input: struct ieee80211_device* ieee
+ * u8* posHTCap //pointer to store Capability Ele
+ * u8* len //store length of CE
+ * u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
* notice: posHTCap can't be null and should be initialized before.
@@ -550,7 +550,7 @@ void HTResetIOTSetting(
void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
- PHT_CAPABILITY_ELE pCapELE = NULL;
+ PHT_CAPABILITY_ELE pCapELE = NULL;
//u8 bIsDeclareMCS13;
if ((posHTCap == NULL) || (pHT == NULL))
@@ -571,7 +571,7 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
//HT capability info
- pCapELE->AdvCoding = 0; // This feature is not supported now!!
+ pCapELE->AdvCoding = 0; // This feature is not supported now!!
if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
{
pCapELE->ChlWidth = 0;
@@ -581,18 +581,18 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
}
-// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
- pCapELE->MimoPwrSave = pHT->SelfMimoPs;
+// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
+ pCapELE->MimoPwrSave = pHT->SelfMimoPs;
pCapELE->GreenField = 0; // This feature is not supported now!!
pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
//DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
//pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
- pCapELE->TxSTBC = 1;
- pCapELE->RxSTBC = 0;
+ pCapELE->TxSTBC = 1;
+ pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0; // Do not support now!!
pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0;
- pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0);
+ pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0);
pCapELE->PSMP = 0; // Do not support now!!
pCapELE->LSigTxopProtect = 0; // Do not support now!!
@@ -603,13 +603,13 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
if( IsEncrypt)
{
- pCapELE->MPDUDensity = 7; // 8us
- pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
+ pCapELE->MPDUDensity = 7; // 8us
+ pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
}
else
{
- pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
- pCapELE->MPDUDensity = 0; // no density
+ pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
+ pCapELE->MPDUDensity = 0; // no density
}
//Supported MCS set
@@ -658,10 +658,10 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
}
/********************************************************************************************************************
*function: Construct Information Element in Beacon... if HTEnable is turned on
- * input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Information Ele
- * u8* len //store len of
- * u8 IsEncrypt //whether encrypt, needed further
+ * input: struct ieee80211_device* ieee
+ * u8* posHTCap //pointer to store Information Ele
+ * u8* len //store len of
+ * u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
* notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this
@@ -679,12 +679,12 @@ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* le
memset(posHTInfo, 0, *len);
if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
{
- pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
+ pHTInfoEle->ControlChl = ieee->current_network.channel;
+ pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
(ieee->current_network.channel<=6)?
HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
- pHTInfoEle->RIFS = 0;
+ pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
pHTInfoEle->OptMode = pHT->CurrentOpMode;
@@ -723,17 +723,17 @@ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* le
* Element ID Length OUI Type1 Reserved
* 1 byte 1 byte 3 bytes 1 byte 1 byte
*
- * OUI = 0x00, 0xe0, 0x4c,
- * Type = 0x02
- * Reserved = 0x00
+ * OUI = 0x00, 0xe0, 0x4c,
+ * Type = 0x02
+ * Reserved = 0x00
*
* 2007.8.21 by Emily
*/
/********************************************************************************************************************
*function: Construct Information Element in Beacon... in RT2RT condition
- * input: struct ieee80211_device* ieee
- * u8* posRT2RTAgg //pointer to store Information Ele
- * u8* len //store len
+ * input: struct ieee80211_device* ieee
+ * u8* posRT2RTAgg //pointer to store Information Ele
+ * u8* len //store len
* output: none
* return: none
* notice:
@@ -787,8 +787,8 @@ void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg,
/********************************************************************************************************************
*function: Pick the right Rate Adaptive table to use
- * input: struct ieee80211_device* ieee
- * u8* pOperateMCS //A pointer to MCS rate bitmap
+ * input: struct ieee80211_device* ieee
+ * u8* pOperateMCS //A pointer to MCS rate bitmap
* return: always we return true
* notice:
* *****************************************************************************************************************/
@@ -840,7 +840,7 @@ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
* Description:
* This function will get the highest speed rate in input MCS set.
*
-* /param Adapter Pionter to Adapter entity
+* /param Adapter Pionter to Adapter entity
* pMCSRateSet Pointer to MCS rate bitmap
* pMCSFilter Pointer to MCS rate filter
*
@@ -849,9 +849,9 @@ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
*/
/********************************************************************************************************************
*function: This function will get the highest speed rate in input MCS set.
- * input: struct ieee80211_device* ieee
- * u8* pMCSRateSet //Pointer to MCS rate bitmap
- * u8* pMCSFilter //Pointer to MCS rate filter
+ * input: struct ieee80211_device* ieee
+ * u8* pMCSRateSet //Pointer to MCS rate bitmap
+ * u8* pMCSFilter //Pointer to MCS rate filter
* return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
* notice:
* *****************************************************************************************************************/
@@ -1062,7 +1062,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
else
pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
if(ieee->pairwise_key_type != KEY_TYPE_NA )
- pHTInfo->CurrentMPDUDensity = 7; // 8us
+ pHTInfo->CurrentMPDUDensity = 7; // 8us
// Force TX AMSDU
// Lanhsin: mark for tmp to avoid deauth by ap from s3
@@ -1118,7 +1118,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
void HTSetConnectBwModeCallback(struct ieee80211_device* ieee);
/********************************************************************************************************************
*function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
- * input: struct ieee80211_device* ieee
+ * input: struct ieee80211_device* ieee
* output: none
* return: none
* notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP
@@ -1208,13 +1208,13 @@ void HTInitializeBssDesc(PBSS_HT pBssHT)
}
/********************************************************************************************************************
*function: initialize Bss HT structure(struct PBSS_HT)
- * input: struct ieee80211_device *ieee
- * struct ieee80211_network *pNetwork //usually current network we are live in
+ * input: struct ieee80211_device *ieee
+ * struct ieee80211_network *pNetwork //usually current network we are live in
* output: none
* return: none
* notice: This function should ONLY be called before association
********************************************************************************************************************/
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
+void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u16 nMaxAMSDUSize;
@@ -1235,7 +1235,7 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80
pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
// Save HTCap and HTInfo information Element
- if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
+ if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen);
if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
@@ -1297,7 +1297,7 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80
}
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
+void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
@@ -1364,8 +1364,8 @@ void HTUseDefaultSetting(struct ieee80211_device* ieee)
}
/********************************************************************************************************************
*function: check whether HT control field exists
- * input: struct ieee80211_device *ieee
- * u8* pFrame //coming skb->data
+ * input: struct ieee80211_device *ieee
+ * u8* pFrame //coming skb->data
* output: none
* return: return true if HT control field exists(false otherwise)
* notice:
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
index 9e4ced15edf5..2348ccd70be0 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
@@ -81,8 +81,7 @@ typedef union _QOS_CTRL_FIELD{
u16 shortData;
// WMM spec
- struct
- {
+ struct {
u8 UP:3;
u8 usRsvd1:1;
u8 EOSP:1;
@@ -92,8 +91,7 @@ typedef union _QOS_CTRL_FIELD{
}WMM;
// 802.11e: QoS data type frame sent by non-AP QSTAs.
- struct
- {
+ struct {
u8 TID:4;
u8 bIsQsize:1;// 0: BIT[8:15] is TXOP Duration Requested, 1: BIT[8:15] is Queue Size.
u8 AckPolicy:2;
@@ -102,8 +100,7 @@ typedef union _QOS_CTRL_FIELD{
}BySta;
// 802.11e: QoS data, QoS Null, and QoS Data+CF-Ack frames sent by HC.
- struct
- {
+ struct {
u8 TID:4;
u8 EOSP:1;
u8 AckPolicy:2;
@@ -112,8 +109,7 @@ typedef union _QOS_CTRL_FIELD{
}ByHc_Data;
// 802.11e: QoS (+) CF-Poll frames sent by HC.
- struct
- {
+ struct {
u8 TID:4;
u8 EOSP:1;
u8 AckPolicy:2;
@@ -133,14 +129,12 @@ typedef union _QOS_CTRL_FIELD{
typedef union _QOS_INFO_FIELD{
u8 charData;
- struct
- {
+ struct {
u8 ucParameterSetCount:4;
u8 ucReserved:4;
}WMM;
- struct
- {
+ struct {
//Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
u8 ucAC_VO_UAPSD:1;
u8 ucAC_VI_UAPSD:1;
@@ -152,16 +146,14 @@ typedef union _QOS_INFO_FIELD{
}ByWmmPsSta;
- struct
- {
+ struct {
//Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
u8 ucParameterSetCount:4;
u8 ucReserved:3;
u8 ucApUapsd:1;
}ByWmmPsAp;
- struct
- {
+ struct {
u8 ucAC3_UAPSD:1;
u8 ucAC2_UAPSD:1;
u8 ucAC1_UAPSD:1;
@@ -171,8 +163,7 @@ typedef union _QOS_INFO_FIELD{
u8 ucMoreDataAck:1;
} By11eSta;
- struct
- {
+ struct {
u8 ucParameterSetCount:4;
u8 ucQAck:1;
u8 ucQueueReq:1;
@@ -180,16 +171,14 @@ typedef union _QOS_INFO_FIELD{
u8 ucReserved:1;
} By11eAp;
- struct
- {
+ struct {
u8 ucReserved1:4;
u8 ucQAck:1;
u8 ucReserved2:2;
u8 ucMoreDataAck:1;
} ByWmmsaSta;
- struct
- {
+ struct {
u8 ucReserved1:4;
u8 ucQAck:1;
u8 ucQueueReq:1;
@@ -197,8 +186,7 @@ typedef union _QOS_INFO_FIELD{
u8 ucReserved2:1;
} ByWmmsaAp;
- struct
- {
+ struct {
u8 ucAC3_UAPSD:1;
u8 ucAC2_UAPSD:1;
u8 ucAC1_UAPSD:1;
@@ -208,8 +196,7 @@ typedef union _QOS_INFO_FIELD{
u8 ucMoreDataAck:1;
} ByAllSta;
- struct
- {
+ struct {
u8 ucParameterSetCount:4;
u8 ucQAck:1;
u8 ucQueueReq:1;
@@ -246,8 +233,7 @@ typedef u32 AC_CODING;
typedef union _ACI_AIFSN{
u8 charData;
- struct
- {
+ struct {
u8 AIFSN:4;
u8 ACM:1;
u8 ACI:2;
@@ -261,8 +247,7 @@ typedef union _ACI_AIFSN{
//
typedef union _ECW{
u8 charData;
- struct
- {
+ struct {
u8 ECWmin:4;
u8 ECWmax:4;
}f; // Field
@@ -276,8 +261,7 @@ typedef union _AC_PARAM{
u32 longData;
u8 charData[4];
- struct
- {
+ struct {
ACI_AIFSN AciAifsn;
ECW Ecw;
u16 TXOPLimit;
@@ -336,8 +320,7 @@ typedef union _QOS_TSINFO{
typedef union _TSPEC_BODY{
u8 charData[55];
- struct
- {
+ struct {
QOS_TSINFO TSInfo; //u8 TSInfo[3];
u16 NominalMSDUsize;
u16 MaxMSDUsize;
@@ -412,14 +395,14 @@ typedef union _QOS_TCLAS{
struct _TYPE_GENERAL{
u8 Priority;
- u8 ClassifierType;
- u8 Mask;
+ u8 ClassifierType;
+ u8 Mask;
} TYPE_GENERAL;
struct _TYPE0_ETH{
u8 Priority;
- u8 ClassifierType;
- u8 Mask;
+ u8 ClassifierType;
+ u8 Mask;
u8 SrcAddr[6];
u8 DstAddr[6];
u16 Type;
@@ -427,9 +410,9 @@ typedef union _QOS_TCLAS{
struct _TYPE1_IPV4{
u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
+ u8 ClassifierType;
+ u8 Mask;
+ u8 Version;
u8 SrcIP[4];
u8 DstIP[4];
u16 SrcPort;
@@ -441,9 +424,9 @@ typedef union _QOS_TCLAS{
struct _TYPE1_IPV6{
u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
+ u8 ClassifierType;
+ u8 Mask;
+ u8 Version;
u8 SrcIP[16];
u8 DstIP[16];
u16 SrcPort;
@@ -453,8 +436,8 @@ typedef union _QOS_TCLAS{
struct _TYPE2_8021Q{
u8 Priority;
- u8 ClassifierType;
- u8 Mask;
+ u8 ClassifierType;
+ u8 Mask;
u16 TagType;
} TYPE2_8021Q;
} QOS_TCLAS, *PQOS_TCLAS;
@@ -481,7 +464,7 @@ typedef struct _QOS_TSTREAM{
// "Qos control field" and "Qos info field"
//typedef struct _QOS_UAPSD{
// u8 bTriggerEnable[4];
-// u8 MaxSPLength;
+// u8 MaxSPLength;
// u8 HighestBufAC;
//} QOS_UAPSD, *PQOS_APSD;
@@ -489,7 +472,7 @@ typedef struct _QOS_TSTREAM{
// 802.11 Management frame Status Code field
//----------------------------------------------------------------------------
typedef struct _OCTET_STRING{
- u8 *Octet;
+ u8 *Octet;
u16 Length;
}OCTET_STRING, *POCTET_STRING;
@@ -512,7 +495,7 @@ typedef struct _STA_QOS{
AC_UAPSD Curr4acUapsd;
u8 bInServicePeriod;
u8 MaxSPLength;
- int NumBcnBeforeTrigger;
+ int NumBcnBeforeTrigger;
// Part 2. EDCA Parameter (perAC)
u8 * pWMMInfoEle;
@@ -523,7 +506,7 @@ typedef struct _STA_QOS{
//2 ToDo: remove the Qos Info Field and replace it by the above WMM Info element.
// By Bruce, 2008-01-30.
// Part 2. EDCA Parameter (perAC)
- QOS_INFO_FIELD QosInfoField_STA; // Maintained by STA
+ QOS_INFO_FIELD QosInfoField_STA; // Maintained by STA
QOS_INFO_FIELD QosInfoField_AP; // Retrieved from AP
AC_PARAM CurAcParameters[4];
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
index e7e26fd96395..7ed7243b1fb0 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
@@ -28,7 +28,7 @@ typedef struct _TS_COMMON_INFO{
typedef struct _TX_TS_RECORD{
TS_COMMON_INFO TsCommonInfo;
u16 TxCurSeq;
- BA_RECORD TxPendingBARecord; // For BA Originator
+ BA_RECORD TxPendingBARecord; // For BA Originator
BA_RECORD TxAdmittedBARecord; // For BA Originator
// QOS_DL_RECORD DLRecord;
u8 bAddBaReqInProgress;
@@ -53,4 +53,3 @@ typedef struct _RX_TS_RECORD {
#endif
-
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 06a9824bbff1..0310d07287a1 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -27,7 +27,7 @@ void RxPktPendingTimeout(unsigned long data)
PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
- PRX_REORDER_ENTRY pReorderEntry = NULL;
+ PRX_REORDER_ENTRY pReorderEntry = NULL;
//u32 flags = 0;
unsigned long flags = 0;
@@ -236,8 +236,8 @@ void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 I
PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8 TID, TR_SELECT TxRxSelect)
{
- //DIRECTION_VALUE dir;
- u8 dir;
+ //DIRECTION_VALUE dir;
+ u8 dir;
bool search_dir[4] = {0, 0, 0, 0};
struct list_head* psearch_list; //FIXME
PTS_COMMON_INFO pRet = NULL;
@@ -250,14 +250,14 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8
}
else
{
- search_dir[DIR_UP] = true;
+ search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR]= true;
}
}
else if(ieee->iw_mode == IW_MODE_ADHOC)
{
if(TxRxSelect == TX_DIR)
- search_dir[DIR_UP] = true;
+ search_dir[DIR_UP] = true;
else
search_dir[DIR_DOWN] = true;
}
@@ -265,7 +265,7 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8
{
if(TxRxSelect == TX_DIR)
{
- search_dir[DIR_UP] = true;
+ search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR]= true;
search_dir[DIR_DIRECT]= true;
}
@@ -450,8 +450,8 @@ bool GetTs(
pTSInfo->field.ucTSID = UP; // TSID
pTSInfo->field.ucDirection = Dir; // Direction: if there is DirectLink, this need additional consideration.
pTSInfo->field.ucAccessPolicy = 1; // Access policy
- pTSInfo->field.ucAggregation = 0; // Aggregation
- pTSInfo->field.ucPSB = 0; // Aggregation
+ pTSInfo->field.ucAggregation = 0; // Aggregation
+ pTSInfo->field.ucPSB = 0; // Aggregation
pTSInfo->field.ucUP = UP; // User priority
pTSInfo->field.ucTSInfoAckPolicy = 0; // Ack policy
pTSInfo->field.ucSchedule = 0; // Schedule
@@ -488,7 +488,7 @@ void RemoveTsEntry(
{
//#ifdef TO_DO_LIST
PRX_REORDER_ENTRY pRxReorderEntry;
- PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs;
+ PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs;
if(timer_pending(&pRxTS->RxPktPendingTimer))
del_timer_sync(&pRxTS->RxPktPendingTimer);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h b/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
index ccf6ae763572..c3c87108ee9e 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
@@ -52,10 +52,10 @@
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
+#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
+#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
+#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
+#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -73,7 +73,7 @@ struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
int (*cia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
+ unsigned int keylen, u32 *flags);
void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
};
@@ -84,16 +84,16 @@ struct digest_alg {
void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
void (*dia_final)(void *ctx, u8 *out);
int (*dia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
+ unsigned int keylen, u32 *flags);
};
struct compress_alg {
int (*coa_init)(void *ctx);
void (*coa_exit)(void *ctx);
int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ u8 *dst, unsigned int *dlen);
int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ u8 *dst, unsigned int *dlen);
};
#define cra_cipher cra_u.cipher
@@ -139,15 +139,15 @@ struct cipher_tfm {
unsigned int cit_ivsize;
u32 cit_mode;
int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
+ const u8 *key, unsigned int keylen);
int (*cit_encrypt)(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
int (*cit_encrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv);
int (*cit_decrypt)(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
@@ -162,12 +162,12 @@ struct cipher_tfm {
struct digest_tfm {
void (*dit_init)(struct crypto_tfm *tfm);
void (*dit_update)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
+ struct scatterlist *sg, unsigned int nsg);
void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
- unsigned int nsg, u8 *out);
+ unsigned int nsg, u8 *out);
int (*dit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
+ const u8 *key, unsigned int keylen);
#ifdef CONFIG_CRYPTO_HMAC
void *dit_hmac_block;
#endif
@@ -175,11 +175,11 @@ struct digest_tfm {
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
};
#define crt_cipher crt_u.cipher
@@ -277,8 +277,8 @@ static inline void crypto_digest_init(struct crypto_tfm *tfm)
}
static inline void crypto_digest_update(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
+ struct scatterlist *sg,
+ unsigned int nsg)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_update(tfm, sg, nsg);
@@ -291,15 +291,15 @@ static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
}
static inline void crypto_digest_digest(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg, u8 *out)
+ struct scatterlist *sg,
+ unsigned int nsg, u8 *out)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
}
static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
if (tfm->crt_digest.dit_setkey == NULL)
@@ -308,25 +308,25 @@ static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
}
static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
}
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
}
static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
@@ -334,18 +334,18 @@ static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
}
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
}
static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *iv)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
@@ -353,30 +353,30 @@ static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
}
static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len)
+ const u8 *src, unsigned int len)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
memcpy(tfm->crt_cipher.cit_iv, src, len);
}
static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len)
+ u8 *dst, unsigned int len)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
memcpy(dst, tfm->crt_cipher.cit_iv, len);
}
static inline int crypto_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
}
static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
@@ -388,12 +388,11 @@ static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
#ifdef CONFIG_CRYPTO_HMAC
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
void crypto_hmac_update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
+ struct scatterlist *sg, unsigned int nsg);
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
- unsigned int *keylen, u8 *out);
+ unsigned int *keylen, u8 *out);
void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
- struct scatterlist *sg, unsigned int nsg, u8 *out);
+ struct scatterlist *sg, unsigned int nsg, u8 *out);
#endif /* CONFIG_CRYPTO_HMAC */
#endif /* _LINUX_CRYPTO_H */
-
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index 3c515b7bc542..7e49ad8f48f6 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -95,7 +95,7 @@ u32 eprom_read(struct net_device *dev, u32 addr)
u32 ret;
ret=0;
- //enable EPROM programming
+ //enable EPROM programming
write_nic_byte_E(dev, EPROM_CMD,
(EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT));
force_pci_posting(dev);
diff --git a/drivers/staging/rtl8192u/r8180_pm.h b/drivers/staging/rtl8192u/r8180_pm.h
index c7d18a8b79a1..52d6fba99deb 100644
--- a/drivers/staging/rtl8192u/r8180_pm.h
+++ b/drivers/staging/rtl8192u/r8180_pm.h
@@ -1,5 +1,5 @@
/*
- Power management interface routines.
+ Power management interface routines.
Written by Mariusz Matuszek.
This code is currently just a placeholder for later work and
does not do anything useful.
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 74ff337b0583..cf9713fa8b9d 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -16,9 +16,9 @@
#include "r8190_rtl8256.h"
/*--------------------------------------------------------------------------
- * Overview: set RF band width (20M or 40M)
+ * Overview: set RF band width (20M or 40M)
* Input: struct net_device* dev
- * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M
+ * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M
* Output: NONE
* Return: NONE
* Note: 8226 support both 20M and 40 MHz
@@ -106,16 +106,16 @@ void PHY_RF8256_Config(struct net_device* dev)
*---------------------------------------------------------------------------*/
void phy_RF8256_Config_ParaFile(struct net_device* dev)
{
- u32 u4RegValue = 0;
+ u32 u4RegValue = 0;
//static s1Byte szRadioAFile[] = RTL819X_PHY_RADIO_A;
//static s1Byte szRadioBFile[] = RTL819X_PHY_RADIO_B;
//static s1Byte szRadioCFile[] = RTL819X_PHY_RADIO_C;
//static s1Byte szRadioDFile[] = RTL819X_PHY_RADIO_D;
- u8 eRFPath;
+ u8 eRFPath;
BB_REGISTER_DEFINITION_T *pPhyReg;
struct r8192_priv *priv = ieee80211_priv(dev);
u32 RegOffSetToBeCheck = 0x3;
- u32 RegValueToBeCheck = 0x7f1;
+ u32 RegValueToBeCheck = 0x7f1;
u32 RF3_Final_Value = 0;
u8 ConstRetryTimes = 5, RetryTimes = 5;
u8 ret = 0;
@@ -152,7 +152,7 @@ void phy_RF8256_Config_ParaFile(struct net_device* dev)
rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
/* Set bit number of Address and Data for RF register */
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); // Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ???
rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf);
@@ -309,4 +309,3 @@ void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel)
return;
}
-
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 57e3383cc935..e538e026b512 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -110,7 +110,7 @@ do { if(rt_global_debug_component & component) \
#define COMP_RATE BIT12 // For Rate Adaptive mechanism, 2006.07.02, by rcnjko.
#define COMP_RM BIT13 // For Radio Measurement.
#define COMP_DIG BIT14 // For DIG, 2006.09.25, by rcnjko.
-#define COMP_PHY BIT15
+#define COMP_PHY BIT15
#define COMP_CH BIT16 //channel setting debug
#define COMP_TXAGC BIT17 // For Tx power, 060928, by rcnjko.
#define COMP_HIPWR BIT18 // For High Power Mechanism, 060928, by rcnjko.
@@ -136,26 +136,26 @@ do { if(rt_global_debug_component & component) \
#define RTL819x_DEBUG
#ifdef RTL819x_DEBUG
#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__FUNCTION__,__LINE__); \
- }
+ if (!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
//wb added to debug out data buf
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
#define RT_DEBUG_DATA(level, data, datalen) \
- do{ if ((rt_global_debug_component & (level)) == (level)) \
- { \
- int i; \
- u8* pdata = (u8*) data; \
- printk(KERN_DEBUG RTL819xU_MODULE_NAME ": %s()\n", __FUNCTION__); \
- for(i=0; i<(int)(datalen); i++) \
- { \
- printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
- } \
- printk("\n"); \
- } \
- } while (0)
+ do{ if ((rt_global_debug_component & (level)) == (level)) \
+ { \
+ int i; \
+ u8* pdata = (u8*) data; \
+ printk(KERN_DEBUG RTL819xU_MODULE_NAME ": %s()\n", __FUNCTION__); \
+ for(i=0; i<(int)(datalen); i++) \
+ { \
+ printk("%2x ", pdata[i]); \
+ if ((i+1)%16 == 0) printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+ } while (0)
#else
#define assert(expr) do {} while (0)
#define RT_DEBUG_DATA(level, data, datalen) do {} while(0)
@@ -209,47 +209,47 @@ do { if(rt_global_debug_component & component) \
#define IEEE80211_WATCH_DOG_TIME 2000
#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
//for txpowertracking by amy
-#define OFDM_Table_Length 19
+#define OFDM_Table_Length 19
#define CCK_Table_length 12
/* for rtl819x */
typedef struct _tx_desc_819x_usb {
- //DWORD 0
- u16 PktSize;
- u8 Offset;
- u8 Reserved0:3;
- u8 CmdInit:1;
- u8 LastSeg:1;
- u8 FirstSeg:1;
- u8 LINIP:1;
- u8 OWN:1;
-
- //DWORD 1
- u8 TxFWInfoSize;
- u8 RATid:3;
- u8 DISFB:1;
- u8 USERATE:1;
- u8 MOREFRAG:1;
- u8 NoEnc:1;
- u8 PIFS:1;
- u8 QueueSelect:5;
- u8 NoACM:1;
- u8 Reserved1:2;
- u8 SecCAMID:5;
- u8 SecDescAssign:1;
- u8 SecType:2;
-
- //DWORD 2
- u16 TxBufferSize;
- //u16 Reserved2;
- u8 ResvForPaddingLen:7;
- u8 Reserved3:1;
- u8 Reserved4;
-
- //DWORD 3, 4, 5
- u32 Reserved5;
- u32 Reserved6;
- u32 Reserved7;
+ //DWORD 0
+ u16 PktSize;
+ u8 Offset;
+ u8 Reserved0:3;
+ u8 CmdInit:1;
+ u8 LastSeg:1;
+ u8 FirstSeg:1;
+ u8 LINIP:1;
+ u8 OWN:1;
+
+ //DWORD 1
+ u8 TxFWInfoSize;
+ u8 RATid:3;
+ u8 DISFB:1;
+ u8 USERATE:1;
+ u8 MOREFRAG:1;
+ u8 NoEnc:1;
+ u8 PIFS:1;
+ u8 QueueSelect:5;
+ u8 NoACM:1;
+ u8 Reserved1:2;
+ u8 SecCAMID:5;
+ u8 SecDescAssign:1;
+ u8 SecType:2;
+
+ //DWORD 2
+ u16 TxBufferSize;
+ //u16 Reserved2;
+ u8 ResvForPaddingLen:7;
+ u8 Reserved3:1;
+ u8 Reserved4;
+
+ //DWORD 3, 4, 5
+ u32 Reserved5;
+ u32 Reserved6;
+ u32 Reserved7;
}tx_desc_819x_usb, *ptx_desc_819x_usb;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
@@ -280,7 +280,7 @@ typedef struct _tx_desc_819x_usb_aggr_subframe {
typedef struct _tx_desc_cmd_819x_usb {
- //DWORD 0
+ //DWORD 0
u16 Reserved0;
u8 Reserved1;
u8 Reserved2:3;
@@ -290,15 +290,15 @@ typedef struct _tx_desc_cmd_819x_usb {
u8 LINIP:1;
u8 OWN:1;
- //DOWRD 1
+ //DOWRD 1
//u32 Reserved3;
u8 TxFWInfoSize;
u8 Reserved3;
u8 QueueSelect;
u8 Reserved4;
- //DOWRD 2
- u16 TxBufferSize;
+ //DOWRD 2
+ u16 TxBufferSize;
u16 Reserved5;
//DWORD 3,4,5
@@ -311,34 +311,34 @@ typedef struct _tx_desc_cmd_819x_usb {
typedef struct _tx_fwinfo_819x_usb {
- //DOWRD 0
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 TxBandwidth:1; // This is used for HT MCS rate only.
- u8 TxSubCarrier:2; // This is used for legacy OFDM rate only.
- u8 STBC:2;
- u8 AllowAggregation:1;
- u8 RtsHT:1; //Interpret RtsRate field as high throughput data rate
- u8 RtsShort:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 RtsBandwidth:1; // This is used for HT MCS rate only.
- u8 RtsSubcarrier:2; // This is used for legacy OFDM rate only.
- u8 RtsSTBC:2;
- u8 EnableCPUDur:1; //Enable firmware to recalculate and assign packet duration
-
- //DWORD 1
- u32 RxMF:2;
- u32 RxAMD:3;
- u32 TxPerPktInfoFeedback:1;//1 indicate Tx info gathtered by firmware and returned by Rx Cmd
- u32 Reserved1:2;
- u32 TxAGCOffSet:4;
- u32 TxAGCSign:1;
- u32 Tx_INFO_RSVD:6;
+ //DOWRD 0
+ u8 TxRate:7;
+ u8 CtsEnable:1;
+ u8 RtsRate:7;
+ u8 RtsEnable:1;
+ u8 TxHT:1;
+ u8 Short:1; //Short PLCP for CCK, or short GI for 11n MCS
+ u8 TxBandwidth:1; // This is used for HT MCS rate only.
+ u8 TxSubCarrier:2; // This is used for legacy OFDM rate only.
+ u8 STBC:2;
+ u8 AllowAggregation:1;
+ u8 RtsHT:1; //Interpret RtsRate field as high throughput data rate
+ u8 RtsShort:1; //Short PLCP for CCK, or short GI for 11n MCS
+ u8 RtsBandwidth:1; // This is used for HT MCS rate only.
+ u8 RtsSubcarrier:2; // This is used for legacy OFDM rate only.
+ u8 RtsSTBC:2;
+ u8 EnableCPUDur:1; //Enable firmware to recalculate and assign packet duration
+
+ //DWORD 1
+ u32 RxMF:2;
+ u32 RxAMD:3;
+ u32 TxPerPktInfoFeedback:1;//1 indicate Tx info gathtered by firmware and returned by Rx Cmd
+ u32 Reserved1:2;
+ u32 TxAGCOffSet:4;
+ u32 TxAGCSign:1;
+ u32 Tx_INFO_RSVD:6;
u32 PacketID:13;
- //u32 Reserved;
+ //u32 Reserved;
}tx_fwinfo_819x_usb, *ptx_fwinfo_819x_usb;
typedef struct rtl8192_rx_info {
@@ -391,7 +391,7 @@ typedef struct _rx_desc_819x_usb_aggr_subframe{
//DWORD 2
//u4Byte Reserved3;
//DWORD 3
- //u4Byte BufferAddress;
+ //u4Byte BufferAddress;
}rx_desc_819x_usb_aggr_subframe, *prx_desc_819x_usb_aggr_subframe;
#endif
@@ -424,7 +424,7 @@ typedef struct rx_drvinfo_819x_usb{
#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
#define ENCRYPTION_MAX_OVERHEAD 128
#define USB_HWDESC_HEADER_LEN sizeof(tx_desc_819x_usb)
-#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(tx_fwinfo_819x_usb))
+#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(tx_fwinfo_819x_usb))
#define MAX_FRAGMENT_COUNT 8
#ifdef RTL8192U
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
@@ -433,7 +433,7 @@ typedef struct rx_drvinfo_819x_usb{
#define MAX_TRANSMIT_BUFFER_SIZE 8000
#endif
#else
-#define MAX_TRANSMIT_BUFFER_SIZE (1600+(MAX_802_11_HEADER_LENGTH+ENCRYPTION_MAX_OVERHEAD)*MAX_FRAGMENT_COUNT)
+#define MAX_TRANSMIT_BUFFER_SIZE (1600+(MAX_802_11_HEADER_LENGTH+ENCRYPTION_MAX_OVERHEAD)*MAX_FRAGMENT_COUNT)
#endif
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
#define TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES (sizeof(tx_desc_819x_usb_aggr_subframe) + sizeof(tx_fwinfo_819x_usb))
@@ -559,22 +559,21 @@ typedef enum _WIRELESS_MODE {
#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
-typedef struct buffer
-{
+typedef struct buffer {
struct buffer *next;
u32 *buf;
} buffer;
typedef struct rtl_reg_debug{
- unsigned int cmd;
- struct {
- unsigned char type;
- unsigned char addr;
- unsigned char page;
- unsigned char length;
- } head;
- unsigned char buf[0xff];
+ unsigned int cmd;
+ struct {
+ unsigned char type;
+ unsigned char addr;
+ unsigned char page;
+ unsigned char length;
+ } head;
+ unsigned char buf[0xff];
}rtl_reg_debug;
@@ -600,8 +599,7 @@ typedef struct _RT_SMOOTH_DATA_4RF {
#define MAX_8192U_RX_SIZE 8192 // This maybe changed for D-cut larger aggregation size
//stats seems messed up, clean it ASAP
-typedef struct Stats
-{
+typedef struct Stats {
unsigned long txrdu;
// unsigned long rxrdu;
//unsigned long rxnolast;
@@ -711,7 +709,7 @@ typedef struct Stats
//+by amy 080507
-typedef struct ChnlAccessSetting {
+typedef struct ChnlAccessSetting {
u16 SIFS_Timer;
u16 DIFS_Timer;
u16 SlotTimeTimer;
@@ -721,35 +719,34 @@ typedef struct ChnlAccessSetting {
}*PCHANNEL_ACCESS_SETTING,CHANNEL_ACCESS_SETTING;
typedef struct _BB_REGISTER_DEFINITION{
- u32 rfintfs; // set software control: // 0x870~0x877[8 bytes]
- u32 rfintfi; // readback data: // 0x8e0~0x8e7[8 bytes]
- u32 rfintfo; // output data: // 0x860~0x86f [16 bytes]
- u32 rfintfe; // output enable: // 0x860~0x86f [16 bytes]
- u32 rf3wireOffset; // LSSI data: // 0x840~0x84f [16 bytes]
- u32 rfLSSI_Select; // BB Band Select: // 0x878~0x87f [8 bytes]
+ u32 rfintfs; // set software control: // 0x870~0x877[8 bytes]
+ u32 rfintfi; // readback data: // 0x8e0~0x8e7[8 bytes]
+ u32 rfintfo; // output data: // 0x860~0x86f [16 bytes]
+ u32 rfintfe; // output enable: // 0x860~0x86f [16 bytes]
+ u32 rf3wireOffset; // LSSI data: // 0x840~0x84f [16 bytes]
+ u32 rfLSSI_Select; // BB Band Select: // 0x878~0x87f [8 bytes]
u32 rfTxGainStage; // Tx gain stage: // 0x80c~0x80f [4 bytes]
- u32 rfHSSIPara1; // wire parameter control1 : // 0x820~0x823,0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes]
- u32 rfHSSIPara2; // wire parameter control2 : // 0x824~0x827,0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes]
- u32 rfSwitchControl; //Tx Rx antenna control : // 0x858~0x85f [16 bytes]
- u32 rfAGCControl1; //AGC parameter control1 : // 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes]
- u32 rfAGCControl2; //AGC parameter control2 : // 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes]
- u32 rfRxIQImbalance; //OFDM Rx IQ imbalance matrix : // 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes]
- u32 rfRxAFE; //Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : // 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes]
- u32 rfTxIQImbalance; //OFDM Tx IQ imbalance matrix // 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes]
- u32 rfTxAFE; //Tx IQ DC Offset and Tx DFIR type // 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes]
- u32 rfLSSIReadBack; //LSSI RF readback data // 0x8a0~0x8af [16 bytes]
+ u32 rfHSSIPara1; // wire parameter control1 : // 0x820~0x823,0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes]
+ u32 rfHSSIPara2; // wire parameter control2 : // 0x824~0x827,0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes]
+ u32 rfSwitchControl; //Tx Rx antenna control : // 0x858~0x85f [16 bytes]
+ u32 rfAGCControl1; //AGC parameter control1 : // 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes]
+ u32 rfAGCControl2; //AGC parameter control2 : // 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes]
+ u32 rfRxIQImbalance; //OFDM Rx IQ imbalance matrix : // 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes]
+ u32 rfRxAFE; //Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : // 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes]
+ u32 rfTxIQImbalance; //OFDM Tx IQ imbalance matrix // 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes]
+ u32 rfTxAFE; //Tx IQ DC Offset and Tx DFIR type // 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes]
+ u32 rfLSSIReadBack; //LSSI RF readback data // 0x8a0~0x8af [16 bytes]
}BB_REGISTER_DEFINITION_T, *PBB_REGISTER_DEFINITION_T;
typedef enum _RT_RF_TYPE_819xU{
- RF_TYPE_MIN = 0,
- RF_8225,
- RF_8256,
- RF_8258,
- RF_PSEUDO_11N = 4,
+ RF_TYPE_MIN = 0,
+ RF_8225,
+ RF_8256,
+ RF_8258,
+ RF_PSEUDO_11N = 4,
}RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
-typedef struct _rate_adaptive
-{
+typedef struct _rate_adaptive {
u8 rate_adaptive_disabled;
u8 ratr_state;
u16 reserve;
@@ -775,21 +772,18 @@ typedef struct _rate_adaptive
#define TxBBGainTableLength 37
#define CCKTxBBGainTableLength 23
-typedef struct _txbbgain_struct
-{
+typedef struct _txbbgain_struct {
long txbb_iq_amplifygain;
u32 txbbgain_value;
} txbbgain_struct, *ptxbbgain_struct;
-typedef struct _ccktxbbgain_struct
-{
+typedef struct _ccktxbbgain_struct {
//The Value is from a22 to a29 one Byte one time is much Safer
u8 ccktxbb_valuearray[8];
} ccktxbbgain_struct,*pccktxbbgain_struct;
-typedef struct _init_gain
-{
+typedef struct _init_gain {
u8 xaagccore1;
u8 xbagccore1;
u8 xcagccore1;
@@ -799,8 +793,7 @@ typedef struct _init_gain
} init_gain, *pinit_gain;
//by amy 0606
-typedef struct _phy_ofdm_rx_status_report_819xusb
-{
+typedef struct _phy_ofdm_rx_status_report_819xusb {
u8 trsw_gain_X[4];
u8 pwdb_all;
u8 cfosho_X[4];
@@ -816,8 +809,7 @@ typedef struct _phy_ofdm_rx_status_report_819xusb
u8 rxsc_sgien_exflg;
}phy_sts_ofdm_819xusb_t;
-typedef struct _phy_cck_rx_status_report_819xusb
-{
+typedef struct _phy_cck_rx_status_report_819xusb {
/* For CCK rate descriptor. This is a unsigned 8:1 variable. LSB bit presend
0.5. And MSB 7 bts presend a signed value. Range from -64~+63.5. */
u8 adc_pwdb_X[4];
@@ -881,8 +873,7 @@ typedef enum _tag_TxCmd_Config_Index{
TXCMD_XXXX_CTRL,
}DCMD_TXCMD_OP;
-typedef struct r8192_priv
-{
+typedef struct r8192_priv {
struct usb_device *udev;
//added for maintain info from eeprom
short epromtype;
@@ -907,7 +898,7 @@ typedef struct r8192_priv
spinlock_t irq_lock;
// spinlock_t irq_th_lock;
spinlock_t tx_lock;
- struct mutex mutex;
+ struct mutex mutex;
//spinlock_t rf_lock; //used to lock rf write operation added by wb
u16 irq_mask;
@@ -970,8 +961,8 @@ typedef struct r8192_priv
atomic_t irt_counter;//count for irq_rx_tasklet
#endif
#ifdef JACKSON_NEW_RX
- struct sk_buff **pp_rxskb;
- int rx_inx;
+ struct sk_buff **pp_rxskb;
+ int rx_inx;
#endif
/* modified by davad for Rx process */
@@ -1006,7 +997,7 @@ typedef struct r8192_priv
u8 retry_rts;
u16 rts;
- struct ChnlAccessSetting ChannelAccessSetting;
+ struct ChnlAccessSetting ChannelAccessSetting;
struct work_struct reset_wq;
/**********************************************************/
@@ -1014,7 +1005,7 @@ typedef struct r8192_priv
u16 basic_rate;
u8 short_preamble;
u8 slot_time;
- bool bDcut;
+ bool bDcut;
bool bCurrentRxAggrEnable;
u8 Rf_Mode; //add for Firmware RF -R/W switch
prt_firmware pFirmware;
@@ -1050,7 +1041,7 @@ typedef struct r8192_priv
//for set channel
u8 SwChnlInProgress;
- u8 SwChnlStage;
+ u8 SwChnlStage;
u8 SwChnlStep;
u8 SetBWModeInProgress;
HT_CHANNEL_WIDTH CurrentChannelBW;
@@ -1062,8 +1053,8 @@ typedef struct r8192_priv
// We save RF reg0 in this variable to reduce RF reading.
//
u32 RfReg0Value[4];
- u8 NumTotalRFPath;
- bool brfpath_rxenable[4];
+ u8 NumTotalRFPath;
+ bool brfpath_rxenable[4];
//RF set related
bool SetRFPowerStateInProgress;
//+by amy 080507
@@ -1104,7 +1095,7 @@ typedef struct r8192_priv
bool btxpower_tracking;
bool bcck_in_ch14;
bool btxpowerdata_readfromEEPORM;
- u16 TSSI_13dBm;
+ u16 TSSI_13dBm;
//For Backup Initial Gain
init_gain initgain_backup;
u8 DefaultInitialGain[4];
@@ -1114,17 +1105,17 @@ typedef struct r8192_priv
bool bis_cur_rdlstate;
struct timer_list fsync_timer;
bool bfsync_processing; // 500ms Fsync timer is active or not
- u32 rate_record;
- u32 rateCountDiffRecord;
+ u32 rate_record;
+ u32 rateCountDiffRecord;
u32 ContinueDiffCount;
bool bswitch_fsync;
u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- //Added by amy 080516 for RX related
- u16 nrxAMPDU_size;
- u8 nrxAMPDU_aggr_num;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+ //Added by amy 080516 for RX related
+ u16 nrxAMPDU_size;
+ u8 nrxAMPDU_aggr_num;
//by amy for gpio
bool bHwRadioOff;
@@ -1204,7 +1195,7 @@ typedef enum{
#ifdef JOHN_HWSEC
struct ssid_thread {
struct net_device *dev;
- u8 name[IW_ESSID_MAX_SIZE + 1];
+ u8 name[IW_ESSID_MAX_SIZE + 1];
};
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 56367f23112f..f7de2f6d49a5 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -80,9 +80,9 @@ double __extendsfdf2(float a) {return a;}
#include "dot11d.h"
//set here to open your trace code. //WB
u32 rt_global_debug_component = \
- // COMP_INIT |
+ // COMP_INIT |
// COMP_DBG |
- // COMP_EPROM |
+ // COMP_EPROM |
// COMP_PHY |
// COMP_RF |
// COMP_FIRMWARE |
@@ -159,23 +159,22 @@ static struct usb_driver rtl8192_usb_driver = {
.resume = rtl8192_resume, /* PM resume fn */
#else
.suspend = NULL, /* PM suspend fn */
- .resume = NULL, /* PM resume fn */
+ .resume = NULL, /* PM resume fn */
#endif
};
-typedef struct _CHANNEL_LIST
-{
+typedef struct _CHANNEL_LIST {
u8 Channel[32];
u8 Len;
}CHANNEL_LIST, *PCHANNEL_LIST;
static CHANNEL_LIST ChannelPlan[] = {
- {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
- {{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
+ {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
+ {{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Spain. Change to ETSI.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //France. Change to ETSI.
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //France. Change to ETSI.
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, //MKK //MKK
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22},//MKK1
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Israel.
@@ -190,57 +189,51 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
struct ieee80211_device* ieee = priv->ieee80211;
switch (channel_plan)
{
- case COUNTRY_CODE_FCC:
- case COUNTRY_CODE_IC:
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
- {
- Dot11d_Init(ieee);
- ieee->bGlobalDomain = false;
- //actually 8225 & 8256 rf chips only support B,G,24N mode
- if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256))
- {
- min_chan = 1;
- max_chan = 14;
- }
- else
- {
- RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __FUNCTION__);
- }
- if (ChannelPlan[channel_plan].Len != 0){
- // Clear old channel map
- memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- // Set new channel map
- for (i=0;i<ChannelPlan[channel_plan].Len;i++)
- {
- if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
+ case COUNTRY_CODE_FCC:
+ case COUNTRY_CODE_IC:
+ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_MKK:
+ case COUNTRY_CODE_MKK1:
+ case COUNTRY_CODE_ISRAEL:
+ case COUNTRY_CODE_TELEC:
+ case COUNTRY_CODE_MIC:
+ Dot11d_Init(ieee);
+ ieee->bGlobalDomain = false;
+ //actually 8225 & 8256 rf chips only support B,G,24N mode
+ if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256)) {
+ min_chan = 1;
+ max_chan = 14;
+ }
+ else {
+ RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __FUNCTION__);
+ }
+ if (ChannelPlan[channel_plan].Len != 0) {
+ // Clear old channel map
+ memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
+ // Set new channel map
+ for (i=0;i<ChannelPlan[channel_plan].Len;i++) {
+ if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
break;
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
- }
+ GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
}
- break;
}
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- {
- GET_DOT11D_INFO(ieee)->bEnabled = 0;//this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain settings.
- Dot11d_Reset(ieee);
- ieee->bGlobalDomain = true;
- break;
- }
- default:
- break;
+ break;
+
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ GET_DOT11D_INFO(ieee)->bEnabled = 0;//this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain settings.
+ Dot11d_Reset(ieee);
+ ieee->bGlobalDomain = true;
+ break;
+
+ default:
+ break;
}
- return;
}
-#define rx_hal_is_cck_rate(_pdrvinfo)\
+#define rx_hal_is_cck_rate(_pdrvinfo)\
(_pdrvinfo->RxRate == DESC90_RATE1M ||\
_pdrvinfo->RxRate == DESC90_RATE2M ||\
_pdrvinfo->RxRate == DESC90_RATE5_5M ||\
@@ -516,55 +509,50 @@ static int proc_get_registers(char *page, char **start,
int max=0xff;
/* This dump the current register page */
-len += snprintf(page + len, count - len,
+ len += snprintf(page + len, count - len,
"\n####################page 0##################\n ");
- for(n=0;n<=max;)
- {
+ for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
len += snprintf(page + len, count - len,
"\nD: %2x > ",n);
- for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x000|n));
+ for (i=0;i<16 && n<=max;i++,n++)
+ len += snprintf(page + len, count - len,
+ "%2x ",read_nic_byte(dev,0x000|n));
// printk("%2x ",read_nic_byte(dev,n));
}
-len += snprintf(page + len, count - len,
+ len += snprintf(page + len, count - len,
"\n####################page 1##################\n ");
- for(n=0;n<=max;)
- {
+ for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
len += snprintf(page + len, count - len,
- "\nD: %2x > ",n);
+ "\nD: %2x > ",n);
- for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x100|n));
+ for (i=0;i<16 && n<=max;i++,n++)
+ len += snprintf(page + len, count - len,
+ "%2x ",read_nic_byte(dev,0x100|n));
// printk("%2x ",read_nic_byte(dev,n));
}
-len += snprintf(page + len, count - len,
+ len += snprintf(page + len, count - len,
"\n####################page 3##################\n ");
- for(n=0;n<=max;)
- {
+ for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
len += snprintf(page + len, count - len,
"\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
- len += snprintf(page + len, count - len,
- "%2x ",read_nic_byte(dev,0x300|n));
+ len += snprintf(page + len, count - len,
+ "%2x ",read_nic_byte(dev,0x300|n));
// printk("%2x ",read_nic_byte(dev,n));
}
-
len += snprintf(page + len, count - len,"\n");
*eof = 1;
return len;
-
}
@@ -1272,8 +1260,8 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct r8192_priv *priv = ieee80211_priv(dev);
- cb_desc *tcb_desc = NULL;
- u8 i;
+ cb_desc *tcb_desc = NULL;
+ u8 i;
u32 TotalLength;
struct sk_buff *skb;
struct sk_buff *agg_skb;
@@ -1444,7 +1432,7 @@ u8 DrvAggr_GetAggregatibleList(struct net_device *dev, struct sk_buff *skb,
struct ieee80211_device *ieee = netdev_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u16 nMaxAggrNum = pHTInfo->UsbTxAggrNum;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 QueueID = tcb_desc->queue_index;
do {
@@ -1812,7 +1800,7 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
int status;
struct urb *tx_urb;
//int urb_buf_len;
- unsigned int idx_pipe;
+ unsigned int idx_pipe;
tx_desc_cmd_819x_usb *pdesc = (tx_desc_cmd_819x_usb *)skb->data;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
@@ -1876,43 +1864,43 @@ u8 MapHwQueueToFirmwareQueue(u8 QueueID)
u8 QueueSelect = 0x0; //defualt set to
switch(QueueID) {
- case BE_QUEUE:
- QueueSelect = QSLT_BE; //or QSelect = pTcb->priority;
- break;
+ case BE_QUEUE:
+ QueueSelect = QSLT_BE; //or QSelect = pTcb->priority;
+ break;
- case BK_QUEUE:
- QueueSelect = QSLT_BK; //or QSelect = pTcb->priority;
- break;
+ case BK_QUEUE:
+ QueueSelect = QSLT_BK; //or QSelect = pTcb->priority;
+ break;
- case VO_QUEUE:
- QueueSelect = QSLT_VO; //or QSelect = pTcb->priority;
- break;
+ case VO_QUEUE:
+ QueueSelect = QSLT_VO; //or QSelect = pTcb->priority;
+ break;
- case VI_QUEUE:
- QueueSelect = QSLT_VI; //or QSelect = pTcb->priority;
- break;
- case MGNT_QUEUE:
- QueueSelect = QSLT_MGNT;
- break;
+ case VI_QUEUE:
+ QueueSelect = QSLT_VI; //or QSelect = pTcb->priority;
+ break;
+ case MGNT_QUEUE:
+ QueueSelect = QSLT_MGNT;
+ break;
- case BEACON_QUEUE:
- QueueSelect = QSLT_BEACON;
- break;
+ case BEACON_QUEUE:
+ QueueSelect = QSLT_BEACON;
+ break;
- // TODO: 2006.10.30 mark other queue selection until we verify it is OK
- // TODO: Remove Assertions
+ // TODO: 2006.10.30 mark other queue selection until we verify it is OK
+ // TODO: Remove Assertions
//#if (RTL819X_FPGA_VER & RTL819X_FPGA_GUANGAN_070502)
- case TXCMD_QUEUE:
- QueueSelect = QSLT_CMD;
- break;
+ case TXCMD_QUEUE:
+ QueueSelect = QSLT_CMD;
+ break;
//#endif
- case HIGH_QUEUE:
- QueueSelect = QSLT_HIGH;
- break;
+ case HIGH_QUEUE:
+ QueueSelect = QSLT_HIGH;
+ break;
- default:
- RT_TRACE(COMP_ERR, "TransmitTCB(): Impossible Queue Selection: %d \n", QueueID);
- break;
+ default:
+ RT_TRACE(COMP_ERR, "TransmitTCB(): Impossible Queue Selection: %d \n", QueueID);
+ break;
}
return QueueSelect;
}
@@ -1922,39 +1910,39 @@ u8 MRateToHwRate8190Pci(u8 rate)
u8 ret = DESC90_RATE1M;
switch(rate) {
- case MGN_1M: ret = DESC90_RATE1M; break;
- case MGN_2M: ret = DESC90_RATE2M; break;
- case MGN_5_5M: ret = DESC90_RATE5_5M; break;
- case MGN_11M: ret = DESC90_RATE11M; break;
- case MGN_6M: ret = DESC90_RATE6M; break;
- case MGN_9M: ret = DESC90_RATE9M; break;
- case MGN_12M: ret = DESC90_RATE12M; break;
- case MGN_18M: ret = DESC90_RATE18M; break;
- case MGN_24M: ret = DESC90_RATE24M; break;
- case MGN_36M: ret = DESC90_RATE36M; break;
- case MGN_48M: ret = DESC90_RATE48M; break;
- case MGN_54M: ret = DESC90_RATE54M; break;
-
- // HT rate since here
- case MGN_MCS0: ret = DESC90_RATEMCS0; break;
- case MGN_MCS1: ret = DESC90_RATEMCS1; break;
- case MGN_MCS2: ret = DESC90_RATEMCS2; break;
- case MGN_MCS3: ret = DESC90_RATEMCS3; break;
- case MGN_MCS4: ret = DESC90_RATEMCS4; break;
- case MGN_MCS5: ret = DESC90_RATEMCS5; break;
- case MGN_MCS6: ret = DESC90_RATEMCS6; break;
- case MGN_MCS7: ret = DESC90_RATEMCS7; break;
- case MGN_MCS8: ret = DESC90_RATEMCS8; break;
- case MGN_MCS9: ret = DESC90_RATEMCS9; break;
- case MGN_MCS10: ret = DESC90_RATEMCS10; break;
- case MGN_MCS11: ret = DESC90_RATEMCS11; break;
- case MGN_MCS12: ret = DESC90_RATEMCS12; break;
- case MGN_MCS13: ret = DESC90_RATEMCS13; break;
- case MGN_MCS14: ret = DESC90_RATEMCS14; break;
- case MGN_MCS15: ret = DESC90_RATEMCS15; break;
- case (0x80|0x20): ret = DESC90_RATEMCS32; break;
-
- default: break;
+ case MGN_1M: ret = DESC90_RATE1M; break;
+ case MGN_2M: ret = DESC90_RATE2M; break;
+ case MGN_5_5M: ret = DESC90_RATE5_5M; break;
+ case MGN_11M: ret = DESC90_RATE11M; break;
+ case MGN_6M: ret = DESC90_RATE6M; break;
+ case MGN_9M: ret = DESC90_RATE9M; break;
+ case MGN_12M: ret = DESC90_RATE12M; break;
+ case MGN_18M: ret = DESC90_RATE18M; break;
+ case MGN_24M: ret = DESC90_RATE24M; break;
+ case MGN_36M: ret = DESC90_RATE36M; break;
+ case MGN_48M: ret = DESC90_RATE48M; break;
+ case MGN_54M: ret = DESC90_RATE54M; break;
+
+ // HT rate since here
+ case MGN_MCS0: ret = DESC90_RATEMCS0; break;
+ case MGN_MCS1: ret = DESC90_RATEMCS1; break;
+ case MGN_MCS2: ret = DESC90_RATEMCS2; break;
+ case MGN_MCS3: ret = DESC90_RATEMCS3; break;
+ case MGN_MCS4: ret = DESC90_RATEMCS4; break;
+ case MGN_MCS5: ret = DESC90_RATEMCS5; break;
+ case MGN_MCS6: ret = DESC90_RATEMCS6; break;
+ case MGN_MCS7: ret = DESC90_RATEMCS7; break;
+ case MGN_MCS8: ret = DESC90_RATEMCS8; break;
+ case MGN_MCS9: ret = DESC90_RATEMCS9; break;
+ case MGN_MCS10: ret = DESC90_RATEMCS10; break;
+ case MGN_MCS11: ret = DESC90_RATEMCS11; break;
+ case MGN_MCS12: ret = DESC90_RATEMCS12; break;
+ case MGN_MCS13: ret = DESC90_RATEMCS13; break;
+ case MGN_MCS14: ret = DESC90_RATEMCS14; break;
+ case MGN_MCS15: ret = DESC90_RATEMCS15; break;
+ case (0x80|0x20): ret = DESC90_RATEMCS32; break;
+
+ default: break;
}
return ret;
}
@@ -2182,7 +2170,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
dev->trans_start = jiffies;
atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
return 0;
- }else{
+ } else {
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
return -1;
@@ -2320,33 +2308,33 @@ void rtl8192_link_change(struct net_device *dev)
// RT_TRACE(COMP_CH, "========>%s(), chan:%d\n", __FUNCTION__, priv->chan);
// rtl8192_set_chan(dev, priv->chan);
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
- {
- u32 reg = 0;
- reg = read_nic_dword(dev, RCR);
- if (priv->ieee80211->state == IEEE80211_LINKED)
- priv->ReceiveConfig = reg |= RCR_CBSSID;
- else
- priv->ReceiveConfig = reg &= ~RCR_CBSSID;
- write_nic_dword(dev, RCR, reg);
- }
+ {
+ u32 reg = 0;
+ reg = read_nic_dword(dev, RCR);
+ if (priv->ieee80211->state == IEEE80211_LINKED)
+ priv->ReceiveConfig = reg |= RCR_CBSSID;
+ else
+ priv->ReceiveConfig = reg &= ~RCR_CBSSID;
+ write_nic_dword(dev, RCR, reg);
+ }
// rtl8192_set_rxconf(dev);
}
static struct ieee80211_qos_parameters def_qos_parameters = {
- {3,3,3,3},/* cw_min */
- {7,7,7,7},/* cw_max */
- {2,2,2,2},/* aifs */
- {0,0,0,0},/* flags */
- {0,0,0,0} /* tx_op_limit */
+ {3,3,3,3},/* cw_min */
+ {7,7,7,7},/* cw_max */
+ {2,2,2,2},/* aifs */
+ {0,0,0,0},/* flags */
+ {0,0,0,0} /* tx_op_limit */
};
void rtl8192_update_beacon(struct work_struct * work)
{
- struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
- struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
+ struct net_device *dev = priv->ieee80211->dev;
+ struct ieee80211_device* ieee = priv->ieee80211;
struct ieee80211_network* net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
@@ -2717,7 +2705,7 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
priv->bDisableNormalResetCheck = false;
priv->force_reset = false;
- priv->ieee80211->FwRWRF = 0; //we don't use FW read/write RF until stable firmware is available.
+ priv->ieee80211->FwRWRF = 0; //we don't use FW read/write RF until stable firmware is available.
priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
@@ -2778,11 +2766,11 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
#ifdef TO_DO_LIST
if(Adapter->bInHctTest)
pHalData->ReceiveConfig = pHalData->CSMethod |
- RCR_AMF | RCR_ADF | //RCR_AAP | //accept management/data
+ RCR_AMF | RCR_ADF | //RCR_AAP | //accept management/data
//guangan200710
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
- RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
+ RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET) | // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(pHalData->EarlyRxThreshold<<RCR_FIFO_OFFSET) | // Rx FIFO Threshold, 7: No Rx threshold.
(pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt:0);
@@ -2793,7 +2781,7 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
RCR_AMF | RCR_ADF | //accept management/data
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
- //RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
+ //RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET)| // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(priv->EarlyRxThreshold<<RX_FIFO_THRESHOLD_SHIFT) | // Rx FIFO Threshold, 7: No Rx threshold.
(priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT:0);
@@ -3549,7 +3537,7 @@ HalTxCheckStuck819xUsb(
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u16 RegTxCounter = read_nic_word(dev, 0x128);
+ u16 RegTxCounter = read_nic_word(dev, 0x128);
bool bStuck = FALSE;
RT_TRACE(COMP_RESET,"%s():RegTxCounter is %d,TxCounter is %d\n",__FUNCTION__,RegTxCounter,priv->TxCounter);
if(priv->TxCounter==RegTxCounter)
@@ -3583,16 +3571,16 @@ TxCheckStuck(struct net_device *dev)
// spin_lock_irqsave(&priv->ieee80211->lock,flags);
for (QueueID = 0; QueueID<=BEACON_QUEUE;QueueID ++)
{
- if(QueueID == TXCMD_QUEUE)
- continue;
+ if(QueueID == TXCMD_QUEUE)
+ continue;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_drv_aggQ[QueueID]) == 0))
#else
- if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
+ if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
#endif
- continue;
+ continue;
- bCheckFwTxCnt = true;
+ bCheckFwTxCnt = true;
}
// PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
// spin_unlock_irqrestore(&priv->ieee80211->lock,flags);
@@ -3611,10 +3599,10 @@ TxCheckStuck(struct net_device *dev)
bool
HalRxCheckStuck819xUsb(struct net_device *dev)
{
- u16 RegRxCounter = read_nic_word(dev, 0x130);
+ u16 RegRxCounter = read_nic_word(dev, 0x130);
struct r8192_priv *priv = ieee80211_priv(dev);
bool bStuck = FALSE;
- static u8 rx_chk_cnt = 0;
+ static u8 rx_chk_cnt;
RT_TRACE(COMP_RESET,"%s(): RegRxCounter is %d,RxCounter is %d\n",__FUNCTION__,RegRxCounter,priv->RxCounter);
// If rssi is small, we should check rx for long time because of bad rx.
// or maybe it will continuous silent reset every 2 seconds.
@@ -3718,7 +3706,7 @@ rtl819x_ifcheck_resetornot(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
RESET_TYPE TxResetType = RESET_TYPE_NORESET;
RESET_TYPE RxResetType = RESET_TYPE_NORESET;
- RT_RF_POWER_STATE rfState;
+ RT_RF_POWER_STATE rfState;
rfState = priv->ieee80211->eRFPowerState;
@@ -4006,18 +3994,18 @@ RESET_START:
void CAM_read_entry(
struct net_device *dev,
- u32 iIndex
+ u32 iIndex
)
{
- u32 target_command=0;
+ u32 target_command=0;
u32 target_content=0;
u8 entry_i=0;
u32 ulStatus;
s32 i=100;
// printk("=======>start read CAM\n");
- for(entry_i=0;entry_i<CAM_CONTENT_COUNT;entry_i++)
- {
- // polling bit, and No Write enable, and address
+ for(entry_i=0;entry_i<CAM_CONTENT_COUNT;entry_i++)
+ {
+ // polling bit, and No Write enable, and address
target_command= entry_i+CAM_CONTENT_COUNT*iIndex;
target_command= target_command | BIT31;
@@ -4049,7 +4037,7 @@ void rtl819x_update_rxcounts(
u32* TotalRxDataNum
)
{
- u16 SlotIndex;
+ u16 SlotIndex;
u8 i;
*TotalRxBcnNum = 0;
@@ -4072,7 +4060,7 @@ extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_device* ieee = priv->ieee80211;
RESET_TYPE ResetType = RESET_TYPE_NORESET;
- static u8 check_reset_cnt=0;
+ static u8 check_reset_cnt;
bool bBusyTraffic = false;
if(!priv->up)
@@ -4111,7 +4099,7 @@ extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
notify_wx_assoc_event(priv->ieee80211);
RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
priv->ieee80211->link_change(dev);
- queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
+ queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
}
}
@@ -4122,7 +4110,7 @@ extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
//check if reset the driver
if(check_reset_cnt++ >= 3)
{
- ResetType = rtl819x_ifcheck_resetornot(dev);
+ ResetType = rtl819x_ifcheck_resetornot(dev);
check_reset_cnt = 3;
//DbgPrint("Start to check silent reset\n");
}
@@ -4365,66 +4353,66 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
switch (cmd) {
- case RTL_IOCTL_WPA_SUPPLICANT:
+ case RTL_IOCTL_WPA_SUPPLICANT:
//parse here for HW security
- if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
+ if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
+ {
+ if (ipw->u.crypt.set_tx)
{
- if (ipw->u.crypt.set_tx)
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
+ ieee->pairwise_key_type = KEY_TYPE_CCMP;
+ else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ ieee->pairwise_key_type = KEY_TYPE_TKIP;
+ else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
{
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
- ieee->pairwise_key_type = KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
- ieee->pairwise_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
- {
- if (ipw->u.crypt.key_len == 13)
- ieee->pairwise_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->pairwise_key_type = KEY_TYPE_WEP40;
- }
- else
- ieee->pairwise_key_type = KEY_TYPE_NA;
-
- if (ieee->pairwise_key_type)
- {
- memcpy((u8*)key, ipw->u.crypt.key, 16);
- EnableHWSecurityConfig8192(dev);
- //we fill both index entry and 4th entry for pairwise key as in IPW interface, adhoc will only get here, so we need index entry for its default key serching!
- //added by WB.
- setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
- if (ieee->auth_mode != 2)
- setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
- }
+ if (ipw->u.crypt.key_len == 13)
+ ieee->pairwise_key_type = KEY_TYPE_WEP104;
+ else if (ipw->u.crypt.key_len == 5)
+ ieee->pairwise_key_type = KEY_TYPE_WEP40;
}
- else //if (ipw->u.crypt.idx) //group key use idx > 0
+ else
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+
+ if (ieee->pairwise_key_type)
{
memcpy((u8*)key, ipw->u.crypt.key, 16);
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
- ieee->group_key_type= KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
- ieee->group_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
- {
- if (ipw->u.crypt.key_len == 13)
- ieee->group_key_type = KEY_TYPE_WEP104;
- else if (ipw->u.crypt.key_len == 5)
- ieee->group_key_type = KEY_TYPE_WEP40;
- }
- else
- ieee->group_key_type = KEY_TYPE_NA;
+ EnableHWSecurityConfig8192(dev);
+ //we fill both index entry and 4th entry for pairwise key as in IPW interface, adhoc will only get here, so we need index entry for its default key serching!
+ //added by WB.
+ setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
+ if (ieee->auth_mode != 2)
+ setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
+ }
+ }
+ else //if (ipw->u.crypt.idx) //group key use idx > 0
+ {
+ memcpy((u8*)key, ipw->u.crypt.key, 16);
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
+ ieee->group_key_type= KEY_TYPE_CCMP;
+ else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ ieee->group_key_type = KEY_TYPE_TKIP;
+ else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
+ {
+ if (ipw->u.crypt.key_len == 13)
+ ieee->group_key_type = KEY_TYPE_WEP104;
+ else if (ipw->u.crypt.key_len == 5)
+ ieee->group_key_type = KEY_TYPE_WEP40;
+ }
+ else
+ ieee->group_key_type = KEY_TYPE_NA;
- if (ieee->group_key_type)
- {
- setKey( dev,
- ipw->u.crypt.idx,
- ipw->u.crypt.idx, //KeyIndex
- ieee->group_key_type, //KeyType
- broadcast_addr, //MacAddr
- 0, //DefaultKey
- key); //KeyContent
- }
+ if (ieee->group_key_type)
+ {
+ setKey( dev,
+ ipw->u.crypt.idx,
+ ipw->u.crypt.idx, //KeyIndex
+ ieee->group_key_type, //KeyType
+ broadcast_addr, //MacAddr
+ 0, //DefaultKey
+ key); //KeyContent
}
}
+ }
#ifdef JOHN_HWSEC_DEBUG
//john's test 0711
printk("@@ wrq->u pointer = ");
@@ -4437,7 +4425,7 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
break;
- default:
+ default:
ret = -EOPNOTSUPP;
break;
}
@@ -4454,49 +4442,49 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
if(!bIsHT) {
switch(rate) {
- case DESC90_RATE1M: ret_rate = MGN_1M; break;
- case DESC90_RATE2M: ret_rate = MGN_2M; break;
- case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
- case DESC90_RATE11M: ret_rate = MGN_11M; break;
- case DESC90_RATE6M: ret_rate = MGN_6M; break;
- case DESC90_RATE9M: ret_rate = MGN_9M; break;
- case DESC90_RATE12M: ret_rate = MGN_12M; break;
- case DESC90_RATE18M: ret_rate = MGN_18M; break;
- case DESC90_RATE24M: ret_rate = MGN_24M; break;
- case DESC90_RATE36M: ret_rate = MGN_36M; break;
- case DESC90_RATE48M: ret_rate = MGN_48M; break;
- case DESC90_RATE54M: ret_rate = MGN_54M; break;
-
- default:
- ret_rate = 0xff;
- RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
- break;
+ case DESC90_RATE1M: ret_rate = MGN_1M; break;
+ case DESC90_RATE2M: ret_rate = MGN_2M; break;
+ case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
+ case DESC90_RATE11M: ret_rate = MGN_11M; break;
+ case DESC90_RATE6M: ret_rate = MGN_6M; break;
+ case DESC90_RATE9M: ret_rate = MGN_9M; break;
+ case DESC90_RATE12M: ret_rate = MGN_12M; break;
+ case DESC90_RATE18M: ret_rate = MGN_18M; break;
+ case DESC90_RATE24M: ret_rate = MGN_24M; break;
+ case DESC90_RATE36M: ret_rate = MGN_36M; break;
+ case DESC90_RATE48M: ret_rate = MGN_48M; break;
+ case DESC90_RATE54M: ret_rate = MGN_54M; break;
+
+ default:
+ ret_rate = 0xff;
+ RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
+ break;
}
} else {
switch(rate) {
- case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
- case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
- case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
- case DESC90_RATEMCS3: ret_rate = MGN_MCS3; break;
- case DESC90_RATEMCS4: ret_rate = MGN_MCS4; break;
- case DESC90_RATEMCS5: ret_rate = MGN_MCS5; break;
- case DESC90_RATEMCS6: ret_rate = MGN_MCS6; break;
- case DESC90_RATEMCS7: ret_rate = MGN_MCS7; break;
- case DESC90_RATEMCS8: ret_rate = MGN_MCS8; break;
- case DESC90_RATEMCS9: ret_rate = MGN_MCS9; break;
- case DESC90_RATEMCS10: ret_rate = MGN_MCS10; break;
- case DESC90_RATEMCS11: ret_rate = MGN_MCS11; break;
- case DESC90_RATEMCS12: ret_rate = MGN_MCS12; break;
- case DESC90_RATEMCS13: ret_rate = MGN_MCS13; break;
- case DESC90_RATEMCS14: ret_rate = MGN_MCS14; break;
- case DESC90_RATEMCS15: ret_rate = MGN_MCS15; break;
- case DESC90_RATEMCS32: ret_rate = (0x80|0x20); break;
-
- default:
- ret_rate = 0xff;
- RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
- break;
+ case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
+ case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
+ case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
+ case DESC90_RATEMCS3: ret_rate = MGN_MCS3; break;
+ case DESC90_RATEMCS4: ret_rate = MGN_MCS4; break;
+ case DESC90_RATEMCS5: ret_rate = MGN_MCS5; break;
+ case DESC90_RATEMCS6: ret_rate = MGN_MCS6; break;
+ case DESC90_RATEMCS7: ret_rate = MGN_MCS7; break;
+ case DESC90_RATEMCS8: ret_rate = MGN_MCS8; break;
+ case DESC90_RATEMCS9: ret_rate = MGN_MCS9; break;
+ case DESC90_RATEMCS10: ret_rate = MGN_MCS10; break;
+ case DESC90_RATEMCS11: ret_rate = MGN_MCS11; break;
+ case DESC90_RATEMCS12: ret_rate = MGN_MCS12; break;
+ case DESC90_RATEMCS13: ret_rate = MGN_MCS13; break;
+ case DESC90_RATEMCS14: ret_rate = MGN_MCS14; break;
+ case DESC90_RATEMCS15: ret_rate = MGN_MCS15; break;
+ case DESC90_RATEMCS32: ret_rate = (0x80|0x20); break;
+
+ default:
+ ret_rate = 0xff;
+ RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
+ break;
}
}
@@ -4555,12 +4543,12 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
u8 rfpath;
u32 nspatial_stream, tmp_val;
//u8 i;
- static u32 slide_rssi_index=0, slide_rssi_statistics=0;
- static u32 slide_evm_index=0, slide_evm_statistics=0;
- static u32 last_rssi=0, last_evm=0;
+ static u32 slide_rssi_index, slide_rssi_statistics;
+ static u32 slide_evm_index, slide_evm_statistics;
+ static u32 last_rssi, last_evm;
- static u32 slide_beacon_adc_pwdb_index=0, slide_beacon_adc_pwdb_statistics=0;
- static u32 last_beacon_adc_pwdb=0;
+ static u32 slide_beacon_adc_pwdb_index, slide_beacon_adc_pwdb_statistics;
+ static u32 last_beacon_adc_pwdb;
struct ieee80211_hdr_3addr *hdr;
u16 sc ;
@@ -4578,11 +4566,8 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
{
// if previous packet is not aggregated packet
bcheck = true;
- }else
- {
}
-
if(slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX)
{
slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
@@ -4808,9 +4793,9 @@ rtl819x_evm_dbtopercentage(
ret_val = value;
if(ret_val >= 0)
- ret_val = 0;
+ ret_val = 0;
if(ret_val <= -33)
- ret_val = -33;
+ ret_val = -33;
ret_val = 0 - ret_val;
ret_val*=3;
if(ret_val == 99)
@@ -4819,7 +4804,7 @@ rtl819x_evm_dbtopercentage(
}
//
// Description:
-// We want good-looking for signal strength/quality
+// We want good-looking for signal strength/quality
// 2007/7/19 01:09, by cosa.
//
long
@@ -5203,7 +5188,7 @@ void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
* Overview: Record the received data rate
*
* Input:
-* struct net_device *dev
+* struct net_device *dev
* struct ieee80211_rx_stats *stats
*
* Output:
@@ -5581,7 +5566,7 @@ rtl819xusb_process_received_packet(
)
{
// bool bfreerfd=false, bqueued=false;
- u8* frame;
+ u8* frame;
u16 frame_len=0;
struct r8192_priv *priv = ieee80211_priv(dev);
// u8 index = 0;
@@ -5682,10 +5667,6 @@ void rtl8192_rx_cmd(struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
- else
- ;
-
-
}
void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
@@ -5697,25 +5678,25 @@ void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
info = (struct rtl8192_rx_info *)skb->cb;
switch (info->out_pipe) {
/* Nomal packet pipe */
- case 3:
- //RT_TRACE(COMP_RECV, "normal in-pipe index(%d)\n",info->out_pipe);
- priv->IrpPendingCount--;
- rtl8192_rx_nomal(skb);
- break;
+ case 3:
+ //RT_TRACE(COMP_RECV, "normal in-pipe index(%d)\n",info->out_pipe);
+ priv->IrpPendingCount--;
+ rtl8192_rx_nomal(skb);
+ break;
- /* Command packet pipe */
- case 9:
- RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",\
- info->out_pipe);
+ /* Command packet pipe */
+ case 9:
+ RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",\
+ info->out_pipe);
- rtl8192_rx_cmd(skb);
- break;
+ rtl8192_rx_cmd(skb);
+ break;
- default: /* should never get here! */
- RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",\
- info->out_pipe);
- dev_kfree_skb(skb);
- break;
+ default: /* should never get here! */
+ RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",\
+ info->out_pipe);
+ dev_kfree_skb(skb);
+ break;
}
}
@@ -6007,7 +5988,7 @@ void setKey( struct net_device *dev,
// printk("setkey cam =%8x\n", read_cam(dev, i+6*EntryNo));
}
else if(i==1){//MAC
- TargetContent = (u32)(*(MacAddr+2)) |
+ TargetContent = (u32)(*(MacAddr+2)) |
(u32)(*(MacAddr+3)) << 8|
(u32)(*(MacAddr+4)) << 16|
(u32)(*(MacAddr+5)) << 24;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index cd8dc85e9c0f..ea46717f1fad 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -8,7 +8,7 @@ Abstract:
HW dynamic mechanism.
Major Change History:
- When Who What
+ When Who What
---------- --------------- -------------------------------
2008-05-14 amy create version 0 porting from windows code.
@@ -25,9 +25,9 @@ Major Change History:
// Indicate different AP vendor for IOT issue.
//
static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
- { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f};
+ { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f};
static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
- { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f};
+ { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f};
#define RTK_UL_EDCA 0xa44f
@@ -134,7 +134,7 @@ static void dm_check_pbc_gpio(struct net_device *dev);
// DM --> Check current RX RF path state
static void dm_check_rx_path_selection(struct net_device *dev);
-static void dm_init_rxpath_selection(struct net_device *dev);
+static void dm_init_rxpath_selection(struct net_device *dev);
static void dm_rxpath_sel_byrssi(struct net_device *dev);
@@ -201,8 +201,8 @@ extern void deinit_hal_dm(struct net_device *dev)
void dm_CheckRxAggregation(struct net_device *dev) {
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- static unsigned long lastTxOkCnt = 0;
- static unsigned long lastRxOkCnt = 0;
+ static unsigned long lastTxOkCnt;
+ static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
@@ -259,7 +259,7 @@ extern void hal_dm_watchdog(struct net_device *dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
- //static u8 previous_bssid[6] ={0};
+ //static u8 previous_bssid[6] ={0};
/*Add by amy 2008/05/15 ,porting from windows code.*/
dm_check_rate_adaptive(dev);
@@ -315,21 +315,21 @@ extern void init_rate_adaptive(struct net_device * dev)
{
// 07/10/08 MH Modify for RA smooth scheme.
/* 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.*/
- pra->upper_rssi_threshold_ratr = 0x8f0f0000;
- pra->middle_rssi_threshold_ratr = 0x8f0ff000;
- pra->low_rssi_threshold_ratr = 0x8f0ff001;
- pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
- pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
+ pra->upper_rssi_threshold_ratr = 0x8f0f0000;
+ pra->middle_rssi_threshold_ratr = 0x8f0ff000;
+ pra->low_rssi_threshold_ratr = 0x8f0ff001;
+ pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
+ pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
+ pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
}
else if (priv->rf_type == RF_1T2R)
{
- pra->upper_rssi_threshold_ratr = 0x000f0000;
- pra->middle_rssi_threshold_ratr = 0x000ff000;
- pra->low_rssi_threshold_ratr = 0x000ff001;
- pra->low_rssi_threshold_ratr_40M = 0x000ff005;
- pra->low_rssi_threshold_ratr_20M = 0x000ff001;
- pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
+ pra->upper_rssi_threshold_ratr = 0x000f0000;
+ pra->middle_rssi_threshold_ratr = 0x000ff000;
+ pra->low_rssi_threshold_ratr = 0x000ff001;
+ pra->low_rssi_threshold_ratr_40M = 0x000ff005;
+ pra->low_rssi_threshold_ratr_20M = 0x000ff001;
+ pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
}
} // InitRateAdaptive
@@ -348,7 +348,7 @@ extern void init_rate_adaptive(struct net_device * dev)
*
* Revised History:
* When Who Remark
- * 05/26/08 amy Create version 0 porting from windows code.
+ * 05/26/08 amy Create version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_rate_adaptive(struct net_device * dev)
@@ -359,7 +359,7 @@ static void dm_check_rate_adaptive(struct net_device * dev)
u32 currentRATR, targetRATR = 0;
u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
bool bshort_gi_enabled = false;
- static u8 ping_rssi_state=0;
+ static u8 ping_rssi_state;
if(!priv->up)
@@ -413,14 +413,14 @@ static void dm_check_rate_adaptive(struct net_device * dev)
to prevent jumping frequently. */
if (pra->ratr_state == DM_RATR_STA_HIGH)
{
- HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
+ HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
(pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
}
else if (pra->ratr_state == DM_RATR_STA_LOW)
{
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
+ LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
(pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M);
}
else
@@ -599,7 +599,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
u32 Value;
u8 Pwr_Flag;
u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver=0;
- //RT_STATUS rtStatus = RT_STATUS_SUCCESS;
+ //RT_STATUS rtStatus = RT_STATUS_SUCCESS;
bool rtStatus = true;
u32 delta=0;
@@ -954,79 +954,79 @@ static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
//Initial the Tx BB index and mapping value
- priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
+ priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
priv->txbbgain_table[0].txbbgain_value=0x7f8001fe;
- priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
+ priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
priv->txbbgain_table[1].txbbgain_value=0x788001e2;
- priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
+ priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
priv->txbbgain_table[2].txbbgain_value=0x71c001c7;
- priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
+ priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
priv->txbbgain_table[3].txbbgain_value=0x6b8001ae;
- priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
+ priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
priv->txbbgain_table[4].txbbgain_value=0x65400195;
- priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
+ priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
priv->txbbgain_table[5].txbbgain_value=0x5fc0017f;
- priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
+ priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
priv->txbbgain_table[6].txbbgain_value=0x5a400169;
- priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
+ priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
priv->txbbgain_table[7].txbbgain_value=0x55400155;
- priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
+ priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
priv->txbbgain_table[8].txbbgain_value=0x50800142;
- priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
+ priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
priv->txbbgain_table[9].txbbgain_value=0x4c000130;
- priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
+ priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
priv->txbbgain_table[10].txbbgain_value=0x47c0011f;
- priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
+ priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
priv->txbbgain_table[11].txbbgain_value=0x43c0010f;
- priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
+ priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
priv->txbbgain_table[12].txbbgain_value=0x40000100;
- priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
+ priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
priv->txbbgain_table[13].txbbgain_value=0x3c8000f2;
- priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
+ priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
priv->txbbgain_table[14].txbbgain_value=0x390000e4;
- priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
+ priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
priv->txbbgain_table[15].txbbgain_value=0x35c000d7;
- priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
+ priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
priv->txbbgain_table[16].txbbgain_value=0x32c000cb;
- priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
+ priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
priv->txbbgain_table[17].txbbgain_value=0x300000c0;
- priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
+ priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
priv->txbbgain_table[18].txbbgain_value=0x2d4000b5;
- priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
+ priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
priv->txbbgain_table[19].txbbgain_value=0x2ac000ab;
- priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
+ priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
priv->txbbgain_table[20].txbbgain_value=0x288000a2;
- priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
+ priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
priv->txbbgain_table[21].txbbgain_value=0x26000098;
- priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
+ priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
priv->txbbgain_table[22].txbbgain_value=0x24000090;
- priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
+ priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
priv->txbbgain_table[23].txbbgain_value=0x22000088;
- priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
+ priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
priv->txbbgain_table[24].txbbgain_value=0x20000080;
- priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
+ priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
priv->txbbgain_table[25].txbbgain_value=0x1a00006c;
- priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
+ priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
priv->txbbgain_table[26].txbbgain_value=0x1c800072;
- priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
+ priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
priv->txbbgain_table[27].txbbgain_value=0x18000060;
- priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
+ priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
priv->txbbgain_table[28].txbbgain_value=0x19800066;
- priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
+ priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
priv->txbbgain_table[29].txbbgain_value=0x15800056;
- priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
+ priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
priv->txbbgain_table[30].txbbgain_value=0x26c0005b;
- priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
+ priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
priv->txbbgain_table[31].txbbgain_value=0x14400051;
- priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
+ priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
priv->txbbgain_table[32].txbbgain_value=0x24400051;
- priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
+ priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
priv->txbbgain_table[33].txbbgain_value=0x1300004c;
- priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
+ priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
priv->txbbgain_table[34].txbbgain_value=0x12000048;
- priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
+ priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
priv->txbbgain_table[35].txbbgain_value=0x11000044;
- priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
+ priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
priv->txbbgain_table[36].txbbgain_value=0x10000040;
//ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
@@ -1486,7 +1486,7 @@ void dm_initialize_txpower_tracking(struct net_device *dev)
static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 tx_power_track_counter = 0;
+ static u32 tx_power_track_counter;
if(!priv->btxpower_tracking)
return;
@@ -1505,7 +1505,7 @@ static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 TM_Trigger=0;
+ static u8 TM_Trigger;
//DbgPrint("dm_CheckTXPowerTracking() \n");
if(!priv->btxpower_tracking)
return;
@@ -1564,40 +1564,40 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
TempVal = 0;
if(!bInCH14){
//Write 0xa22 0xa23
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal);
}
else
{
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal);
@@ -1615,14 +1615,14 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
if(!bInCH14)
{
//Write 0xa22 0xa23
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16 )+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
@@ -1631,7 +1631,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
@@ -1642,7 +1642,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
{
// priv->CCKTxPowerAdjustCntNotCh14++; //cosa add for debug.
//Write 0xa22 0xa23
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
(CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
@@ -1650,7 +1650,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16 )+
(CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
@@ -1659,7 +1659,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
+ TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
@@ -1713,7 +1713,7 @@ static void dm_txpower_reset_recovery(
extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 reg_ratr = priv->rate_adaptive.last_ratr;
+ u32 reg_ratr = priv->rate_adaptive.last_ratr;
if(!priv->up)
{
@@ -1934,7 +1934,7 @@ dm_change_rxpath_selection_setting(
s32 DM_Value)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- prate_adaptive pRA = (prate_adaptive)&(priv->rate_adaptive);
+ prate_adaptive pRA = (prate_adaptive)&(priv->rate_adaptive);
if(DM_Type == 0)
@@ -2036,8 +2036,8 @@ static void dm_dig_init(struct net_device *dev)
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.initialgain_lowerbound_state = false;
- dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
- dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
+ dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
@@ -2091,7 +2091,7 @@ static void dm_ctrl_initgain_byrssi_by_driverrssi(
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i;
- static u8 fw_dig=0;
+ static u8 fw_dig;
if (dm_digtable.dig_enable_flag == false)
return;
@@ -2131,7 +2131,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 reset_cnt = 0;
+ static u32 reset_cnt;
u8 i;
if (dm_digtable.dig_enable_flag == false)
@@ -2319,7 +2319,7 @@ static void dm_ctrl_initgain_byrssi_highpwr(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u32 reset_cnt_highpwr = 0;
+ static u32 reset_cnt_highpwr;
// For smooth, we can not change high power DIG state in the range.
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
@@ -2395,8 +2395,8 @@ static void dm_initial_gain(
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 initial_gain=0;
- static u8 initialized=0, force_write=0;
- static u32 reset_cnt=0;
+ static u8 initialized, force_write;
+ static u32 reset_cnt;
if(dm_digtable.dig_algorithm_switch)
{
@@ -2462,8 +2462,8 @@ static void dm_pd_th(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 initialized=0, force_write=0;
- static u32 reset_cnt = 0;
+ static u8 initialized, force_write;
+ static u32 reset_cnt;
if(dm_digtable.dig_algorithm_switch)
{
@@ -2574,8 +2574,8 @@ static void dm_cs_ratio(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- static u8 initialized=0,force_write=0;
- static u32 reset_cnt = 0;
+ static u8 initialized,force_write;
+ static u32 reset_cnt;
if(dm_digtable.dig_algorithm_switch)
{
@@ -2651,8 +2651,8 @@ static void dm_check_edca_turbo(
//PSTA_QOS pStaQos = pMgntInfo->pStaQos;
// Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.
- static unsigned long lastTxOkCnt = 0;
- static unsigned long lastRxOkCnt = 0;
+ static unsigned long lastTxOkCnt;
+ static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
@@ -2785,8 +2785,8 @@ static void dm_ctstoself(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- static unsigned long lastTxOkCnt = 0;
- static unsigned long lastRxOkCnt = 0;
+ static unsigned long lastTxOkCnt;
+ static unsigned long lastRxOkCnt;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
@@ -2871,7 +2871,7 @@ static void dm_check_rfctrl_gpio(struct net_device * dev)
*
* Revised History:
* When Who Remark
- * 05/28/2008 amy Create Version 0 porting from windows code.
+ * 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_pbc_gpio(struct net_device *dev)
@@ -3044,7 +3044,7 @@ static void dm_rxpath_sel_byrssi(struct net_device * dev)
u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
- static u8 disabled_rf_cnt=0, cck_Rx_Path_initialized=0;
+ static u8 disabled_rf_cnt, cck_Rx_Path_initialized;
u8 update_cck_rx_path;
if(priv->rf_type != RF_2T4R)
@@ -3517,8 +3517,8 @@ static void dm_EndSWFsync(struct net_device *dev)
static void dm_StartSWFsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 rateIndex;
- u32 rateBitmap;
+ u32 rateIndex;
+ u32 rateBitmap;
RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__);
// Initial rate record to zero, start to record.
@@ -3569,9 +3569,9 @@ void dm_check_fsync(struct net_device *dev)
#define RegC38_NonFsync_Other_AP 1
#define RegC38_Fsync_AP_BCM 2
struct r8192_priv *priv = ieee80211_priv(dev);
- //u32 framesyncC34;
+ //u32 framesyncC34;
static u8 reg_c38_State=RegC38_Default;
- static u32 reset_cnt=0;
+ static u32 reset_cnt;
RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
@@ -3887,4 +3887,3 @@ static void dm_send_rssi_tofw(struct net_device *dev)
}
/*---------------------------Define function prototype------------------------*/
-
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 3ceb59b9eca7..ffb083c958ad 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -13,7 +13,7 @@
*
* History:
* Data Who Remark
- * 10/04/2007 MHC Create initial version.
+ * 10/04/2007 MHC Create initial version.
*
*****************************************************************************/
/* Check to see if the file has been included already. */
@@ -55,20 +55,19 @@
#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
//added by amy for atheros AP
#define TX_POWER_ATHEROAP_THRESH_HIGH 78
-#define TX_POWER_ATHEROAP_THRESH_LOW 72
+#define TX_POWER_ATHEROAP_THRESH_LOW 72
//defined by vivi, for showing on UI
-#define Current_Tx_Rate_Reg 0x1b8
-#define Initial_Tx_Rate_Reg 0x1b9
-#define Tx_Retry_Count_Reg 0x1ac
+#define Current_Tx_Rate_Reg 0x1b8
+#define Initial_Tx_Rate_Reg 0x1b9
+#define Tx_Retry_Count_Reg 0x1ac
#define RegC38_TH 20
/*--------------------------Define Parameters-------------------------------*/
/*------------------------------Define structure----------------------------*/
/* 2007/10/04 MH Define upper and lower threshold of DIG enable or disable. */
-typedef struct _dynamic_initial_gain_threshold_
-{
+typedef struct _dynamic_initial_gain_threshold_ {
u8 dig_enable_flag;
u8 dig_algorithm;
u8 dbg_mode;
@@ -132,8 +131,8 @@ typedef enum tag_dynamic_init_gain_operation_type_definition
DIG_TYPE_PWDB_FACTOR = 8,
DIG_TYPE_RX_GAIN_MIN = 9,
DIG_TYPE_RX_GAIN_MAX = 10,
- DIG_TYPE_ENABLE = 20,
- DIG_TYPE_DISABLE = 30,
+ DIG_TYPE_ENABLE = 20,
+ DIG_TYPE_DISABLE = 30,
DIG_OP_TYPE_MAX
}dm_dig_op_e;
@@ -172,8 +171,7 @@ typedef enum tag_dig_cck_cs_ratio_state_definition
DIG_CS_RATIO_HIGHER = 1,
DIG_CS_MAX
}dm_dig_cs_ratio_e;
-typedef struct _Dynamic_Rx_Path_Selection_
-{
+typedef struct _Dynamic_Rx_Path_Selection_ {
u8 Enable;
u8 DbgMode;
u8 cck_method;
@@ -203,8 +201,7 @@ typedef enum tag_DM_DbgMode_Definition
DM_DBG_MAX
}DM_DBG_E;
-typedef struct tag_Tx_Config_Cmd_Format
-{
+typedef struct tag_Tx_Config_Cmd_Format {
u32 Op; /* Command packet type. */
u32 Length; /* Command packet length. */
u32 Value;
@@ -251,4 +248,3 @@ extern void dm_initialize_txpower_tracking(struct net_device *dev);
/* End of r8192U_dm.h */
-
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 1bfe871dcfb2..15b0423356f8 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -122,7 +122,7 @@ enum _RTL8192Usb_HW {
SIFS = 0x03E, // SIFS register
TCR = 0x040, // Transmit Configuration Register
-#define TCR_MXDMA_2048 7
+#define TCR_MXDMA_2048 7
#define TCR_LRL_OFFSET 0
#define TCR_SRL_OFFSET 8
#define TCR_MXDMA_OFFSET 21
@@ -379,7 +379,7 @@ enum _RTL8192Usb_HW {
// IMR_POLL = 0x360,
MacBlkCtrl = 0x403, // Mac block on/off control register
- EPROM_CMD = 0xfe58,
+ EPROM_CMD = 0xfe58,
#define Cmd9346CR_9356SEL (1<<4)
#define EPROM_CMD_RESERVED_MASK (1<<5)
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
@@ -392,12 +392,12 @@ enum _RTL8192Usb_HW {
#define EPROM_CK_SHIFT 2
#define EPROM_W_SHIFT 1
#define EPROM_R_SHIFT 0
- MAC0 = 0x000,
- MAC1 = 0x001,
- MAC2 = 0x002,
- MAC3 = 0x003,
- MAC4 = 0x004,
- MAC5 = 0x005,
+ MAC0 = 0x000,
+ MAC1 = 0x001,
+ MAC2 = 0x002,
+ MAC3 = 0x003,
+ MAC4 = 0x004,
+ MAC5 = 0x005,
};
//----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 71f2d2349c38..c904aa8cc0a6 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -354,8 +354,7 @@ static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
return ret;
}
-struct iw_range_with_scan_capa
-{
+struct iw_range_with_scan_capa {
/* Informative stuff (to choose between different interface) */
__u32 throughput; /* To give an idea... */
/* In theory this value should be the maximum benchmarked
@@ -711,12 +710,12 @@ static int r8192_wx_set_enc(struct net_device *dev,
#define CONF_WEP104 0x14
switch(wrqu->encoding.flags & IW_ENCODE_INDEX){
- case 0: key_idx = ieee->tx_keyidx; break;
- case 1: key_idx = 0; break;
- case 2: key_idx = 1; break;
- case 3: key_idx = 2; break;
- case 4: key_idx = 3; break;
- default: break;
+ case 0: key_idx = ieee->tx_keyidx; break;
+ case 1: key_idx = 0; break;
+ case 2: key_idx = 1; break;
+ case 3: key_idx = 2; break;
+ case 4: key_idx = 3; break;
+ default: break;
}
if(wrqu->encoding.length==0x5){
@@ -1021,7 +1020,7 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
static iw_handler r8192_wx_handlers[] =
{
NULL, /* SIOCSIWCOMMIT */
- r8192_wx_get_name, /* SIOCGIWNAME */
+ r8192_wx_get_name, /* SIOCGIWNAME */
dummy, /* SIOCSIWNWID */
dummy, /* SIOCGIWNWID */
r8192_wx_set_freq, /* SIOCSIWFREQ */
@@ -1040,7 +1039,7 @@ static iw_handler r8192_wx_handlers[] =
dummy, /* SIOCGIWSPY */
NULL, /* SIOCGIWTHRSPY */
NULL, /* SIOCWIWTHRSPY */
- r8192_wx_set_wap, /* SIOCSIWAP */
+ r8192_wx_set_wap, /* SIOCSIWAP */
r8192_wx_get_wap, /* SIOCGIWAP */
#if (WIRELESS_EXT >= 18)
r8192_wx_set_mlme, /* MLME-- */
@@ -1071,23 +1070,23 @@ static iw_handler r8192_wx_handlers[] =
r8192_wx_set_power, /* SIOCSIWPOWER */
r8192_wx_get_power, /* SIOCGIWPOWER */
NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8192_wx_set_gen_ie,//NULL, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
+ NULL, /*---hole---*/
+ r8192_wx_set_gen_ie,//NULL, /* SIOCSIWGENIE */
+ NULL, /* SIOCSIWGENIE */
#if (WIRELESS_EXT >= 18)
- r8192_wx_set_auth,//NULL, /* SIOCSIWAUTH */
- NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */
- r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */
+ r8192_wx_set_auth,//NULL, /* SIOCSIWAUTH */
+ NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */
+ r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */
#else
NULL,
NULL,
NULL,
NULL,
#endif
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
+ NULL, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
};
diff --git a/drivers/staging/rtl8192u/r819xU_HTGen.h b/drivers/staging/rtl8192u/r819xU_HTGen.h
index f37b6d69b404..6a4678f7da5f 100644
--- a/drivers/staging/rtl8192u/r819xU_HTGen.h
+++ b/drivers/staging/rtl8192u/r819xU_HTGen.h
@@ -10,4 +10,3 @@ typedef enum _HT_IOT_ACTION{
HT_IOT_ACT_CDD_FSYNC = 0x00000020,
HT_IOT_ACT_PURE_N_MODE = 0x00000040,
}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
-
diff --git a/drivers/staging/rtl8192u/r819xU_HTType.h b/drivers/staging/rtl8192u/r819xU_HTType.h
index 6c1d05e1e820..19a7bdd1973a 100644
--- a/drivers/staging/rtl8192u/r819xU_HTType.h
+++ b/drivers/staging/rtl8192u/r819xU_HTType.h
@@ -89,7 +89,7 @@ typedef enum _CHNLOP{
typedef enum _HT_ACTION{
ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
+ ACT_MIMO_PWR_SAVE = 1,
ACT_PSMP = 2,
ACT_SET_PCO_PHASE = 3,
ACT_MIMO_CHL_MEASURE = 4,
@@ -367,9 +367,9 @@ typedef struct _BSS_HT{
typedef struct _MIMO_RSSI{
u32 EnableAntenna;
u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
+ u32 AntennaB;
+ u32 AntennaC;
+ u32 AntennaD;
u32 Average;
}MIMO_RSSI, *PMIMO_RSSI;
@@ -388,4 +388,3 @@ typedef struct _FALSE_ALARM_STATISTICS{
#endif //__INC_HTTYPE_H
-
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index a8a6dc2c365f..b755eb46341f 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -18,7 +18,7 @@
History:
Data Who Remark
- 05/06/2008 amy Create initial version porting from windows driver.
+ 05/06/2008 amy Create initial version porting from windows driver.
******************************************************************************/
#include "r8192U.h"
@@ -41,7 +41,7 @@
rt_status
SendTxCommandPacket(
struct net_device *dev,
- void* pData,
+ void* pData,
u32 DataLen
)
{
@@ -106,7 +106,7 @@ SendTxCommandPacket(
u32 buffer_len)
{
- bool rt_status = true;
+ bool rt_status = true;
#ifdef RTL8192U
return rt_status;
#else
@@ -188,7 +188,7 @@ SendTxCommandPacket(
*
* Overview:
*
- * Input: PADAPTER pAdapter - .
+ * Input: PADAPTER pAdapter - .
* CMPK_TXFB_T *psTx_FB - .
*
* Output: NONE
@@ -197,7 +197,7 @@ SendTxCommandPacket(
*
* Revised History:
* When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
@@ -289,7 +289,7 @@ cmpk_count_txstatistic(
* in the command packet.
*
* Input: struct net_device * dev
- * u8 * pmsg - Msg Ptr of the command packet.
+ * u8 * pmsg - Msg Ptr of the command packet.
*
* Output: NONE
*
@@ -369,7 +369,7 @@ cmdpkt_beacontimerinterrupt_819xusb(
* Overview: The function is responsible for extract the message from
* firmware. It will contain dedicated info in
* ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
+ * Please refer to chapter "Interrupt Status Element".
*
* Input: struct net_device *dev,
* u8* pmsg - Message Pointer of the command packet.
@@ -400,8 +400,8 @@ cmpk_handle_interrupt_status(
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_bcn_state.Element_ID = pMsg[0];
- //rx_bcn_state.Length = pMsg[1];
+ //rx_bcn_state.Element_ID = pMsg[0];
+ //rx_bcn_state.Length = pMsg[1];
rx_intr_status.length = pmsg[1];
if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
{
@@ -478,16 +478,16 @@ cmpk_handle_query_config_rx(
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_query_cfg.Element_ID = pMsg[0];
- //rx_query_cfg.Length = pMsg[1];
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
- rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
- rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
- rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
+ //rx_query_cfg.Element_ID = pMsg[0];
+ //rx_query_cfg.Length = pMsg[1];
+ rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
+ rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
+ rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
+ rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
+ rx_query_cfg.cfg_offset = pmsg[7];
+ rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
(pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
+ rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
(pmsg[14] << 8) | (pmsg[15] << 0);
} /* cmpk_Handle_Query_Config_Rx */
@@ -511,7 +511,7 @@ cmpk_handle_query_config_rx(
*
*---------------------------------------------------------------------------*/
static void cmpk_count_tx_status( struct net_device *dev,
- cmpk_tx_status_t *pstx_status)
+ cmpk_tx_status_t *pstx_status)
{
struct r8192_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index a8855e61b0e5..59caa4e05323 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -19,8 +19,7 @@
/*------------------------------Define structure----------------------------*/
/* Define different command packet structure. */
/* 1. RX side: TX feedback packet. */
-typedef struct tag_cmd_pkt_tx_feedback
-{
+typedef struct tag_cmd_pkt_tx_feedback {
// DWORD 0
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
@@ -57,8 +56,7 @@ typedef struct tag_cmd_pkt_tx_feedback
/* 2. RX side: Interrupt status packet. It includes Beacon State,
Beacon Timer Interrupt and other useful informations in MAC ISR Reg. */
-typedef struct tag_cmd_pkt_interrupt_status
-{
+typedef struct tag_cmd_pkt_interrupt_status {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
u16 reserve;
@@ -67,12 +65,11 @@ typedef struct tag_cmd_pkt_interrupt_status
/* 3. TX side: Set configuration packet. */
-typedef struct tag_cmd_pkt_set_configuration
-{
+typedef struct tag_cmd_pkt_set_configuration {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
u16 reserve1; /* */
- u8 cfg_reserve1:3;
+ u8 cfg_reserve1:3;
u8 cfg_size:2; /* Configuration info. */
u8 cfg_type:2; /* Configuration info. */
u8 cfg_action:1; /* Configuration info. */
@@ -89,25 +86,24 @@ typedef struct tag_cmd_pkt_set_configuration
#define cmpk_query_cfg_t cmpk_set_cfg_t
/* 5. Multi packet feedback status. */
-typedef struct tag_tx_stats_feedback // PJ quick rxcmd 09042007
-{
+typedef struct tag_tx_stats_feedback { // PJ quick rxcmd 09042007
// For endian transfer --> Driver will not the same as firmware structure.
// DW 0
u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
+ u8 length; // Command packet length
+ u8 element_id; // Command packet type
// DW 1
u16 txfail; // Tx Fail count
- u16 txok; // Tx ok count
+ u16 txok; // Tx ok count
// DW 2
- u16 txmcok; // tx multicast
- u16 txretry; // Tx Retry count
+ u16 txmcok; // tx multicast
+ u16 txretry; // Tx Retry count
// DW 3
u16 txucok; // tx unicast
- u16 txbcok; // tx broadcast
+ u16 txbcok; // tx broadcast
// DW 4
u16 txbcfail; //
@@ -130,13 +126,12 @@ typedef struct tag_tx_stats_feedback // PJ quick rxcmd 09042007
/* 6. Debug feedback message. */
/* 2007/10/23 MH Define RX debug message */
-typedef struct tag_rx_debug_message_feedback
-{
+typedef struct tag_rx_debug_message_feedback {
// For endian transfer --> for driver
// DW 0
u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
+ u8 length; // Command packet length
+ u8 element_id; // Command packet type
// DW 1-??
// Variable debug message.
@@ -144,19 +139,18 @@ typedef struct tag_rx_debug_message_feedback
}cmpk_rx_dbginfo_t;
/* 2008/03/20 MH Define transmit rate history. For big endian format. */
-typedef struct tag_tx_rate_history
-{
+typedef struct tag_tx_rate_history {
// For endian transfer --> for driver
// DW 0
- u8 element_id; // Command packet type
- u8 length; // Command packet length
+ u8 element_id; // Command packet type
+ u8 length; // Command packet length
u16 reserved1;
// DW 1-2 CCK rate counter
- u16 cck[4];
+ u16 cck[4];
// DW 3-6
- u16 ofdm[8];
+ u16 ofdm[8];
// DW 7-14
//UINT16 MCS_BW0_SG0[16];
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index b12d19079798..573e9cd68509 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -2,7 +2,7 @@
* Procedure: Init boot code/firmware code/data session
*
* Description: This routine will initialize firmware. If any error occurs during the initialization
- * process, the routine shall terminate immediately and return fail.
+ * process, the routine shall terminate immediately and return fail.
* NIC driver should call NdisOpenFile only from MiniportInitialize.
*
* Arguments: The pointer of the adapter
@@ -19,7 +19,7 @@
#include <linux/firmware.h>
void firmware_init_param(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
rt_firmware *pfirmware = priv->pFirmware;
pfirmware->cmdpacket_frag_thresold = GET_COMMAND_PACKET_FRAG_THRESHOLD(MAX_TRANSMIT_BUFFER_SIZE);
@@ -32,7 +32,7 @@ void firmware_init_param(struct net_device *dev)
bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- bool rt_status = true;
+ bool rt_status = true;
u16 frag_threshold;
u16 frag_length, frag_offset = 0;
//u16 total_size;
@@ -241,17 +241,17 @@ CPUCheckFirmwareReady_Fail:
bool init_firmware(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
bool rt_status = TRUE;
u32 file_length = 0;
u8 *mapped_file = NULL;
u32 init_step = 0;
opt_rst_type_e rst_opt = OPT_SYSTEM_RESET;
- firmware_init_step_e starting_state = FW_INIT_STEP0_BOOT;
+ firmware_init_step_e starting_state = FW_INIT_STEP0_BOOT;
rt_firmware *pfirmware = priv->pFirmware;
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry;
const char *fw_name[3] = { "RTL8192U/boot.img",
"RTL8192U/main.img",
"RTL8192U/data.img"};
@@ -334,56 +334,56 @@ bool init_firmware(struct net_device *dev)
}
switch(init_step) {
- case FW_INIT_STEP0_BOOT:
- /* Download boot
- * initialize command descriptor.
- * will set polling bit when firmware code is also configured
- */
- pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
+ case FW_INIT_STEP0_BOOT:
+ /* Download boot
+ * initialize command descriptor.
+ * will set polling bit when firmware code is also configured
+ */
+ pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE;
#ifdef RTL8190P
- // To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet
- rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET);
- if(rt_status != true)
- {
- RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n");
- goto download_firmware_fail;
- }
+ // To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet
+ rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET);
+ if(rt_status != true)
+ {
+ RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n");
+ goto download_firmware_fail;
+ }
#endif
- //mdelay(1000);
- /*
- * To initialize IMEM, CPU move code from 0x80000080,
- * hence, we send 0x80 byte packet
- */
- break;
-
- case FW_INIT_STEP1_MAIN:
- /* Download firmware code. Wait until Boot Ready and Turn on CPU */
- pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE;
-
- /* Check Put Code OK and Turn On CPU */
- rt_status = CPUcheck_maincodeok_turnonCPU(dev);
- if(rt_status != TRUE) {
- RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n");
- goto download_firmware_fail;
- }
-
- pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU;
- break;
-
- case FW_INIT_STEP2_DATA:
- /* download initial data code */
- pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE;
- mdelay(1);
-
- rt_status = CPUcheck_firmware_ready(dev);
- if(rt_status != TRUE) {
- RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status);
- goto download_firmware_fail;
- }
-
- /* wait until data code is initialized ready.*/
- pfirmware->firmware_status = FW_STATUS_5_READY;
- break;
+ //mdelay(1000);
+ /*
+ * To initialize IMEM, CPU move code from 0x80000080,
+ * hence, we send 0x80 byte packet
+ */
+ break;
+
+ case FW_INIT_STEP1_MAIN:
+ /* Download firmware code. Wait until Boot Ready and Turn on CPU */
+ pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE;
+
+ /* Check Put Code OK and Turn On CPU */
+ rt_status = CPUcheck_maincodeok_turnonCPU(dev);
+ if(rt_status != TRUE) {
+ RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n");
+ goto download_firmware_fail;
+ }
+
+ pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU;
+ break;
+
+ case FW_INIT_STEP2_DATA:
+ /* download initial data code */
+ pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE;
+ mdelay(1);
+
+ rt_status = CPUcheck_firmware_ready(dev);
+ if(rt_status != TRUE) {
+ RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status);
+ goto download_firmware_fail;
+ }
+
+ /* wait until data code is initialized ready.*/
+ pfirmware->firmware_status = FW_STATUS_5_READY;
+ break;
}
}
@@ -402,4 +402,3 @@ download_firmware_fail:
MODULE_FIRMWARE("RTL8192U/boot.img");
MODULE_FIRMWARE("RTL8192U/main.img");
MODULE_FIRMWARE("RTL8192U/data.img");
-
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.h b/drivers/staging/rtl8192u/r819xU_firmware.h
index a4bceeef33d3..c48c884aa1af 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware.h
@@ -24,4 +24,3 @@ typedef enum _opt_rst_type{
}opt_rst_type_e;
#endif
-
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index dd1954daea2d..17fac41c12d9 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -124,10 +124,10 @@ static void phy_FwRFSerialWrite( struct net_device* dev, RF90_RADIO_PATH_E
/******************************************************************************
*function: This function read register from RF chip
* input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
* u32 Offset //target address to be read
* output: none
- * return: u32 readback value
+ * return: u32 readback value
* notice: There are three types of serial operations:(1) Software serial write.(2)Hardware LSSI-Low Speed Serial Interface.(3)Hardware HSSI-High speed serial write. Driver here need to implement (1) and (2)---need more spec for this information.
* ****************************************************************************/
u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset)
@@ -201,7 +201,7 @@ u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath,
/******************************************************************************
*function: This function write data to RF register
* input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
* u32 Offset //target address to be written
* u32 Data //The new register data to be written
* output: none
@@ -283,7 +283,7 @@ void rtl8192_phy_RFSerialWrite(struct net_device* dev, RF90_RADIO_PATH_E eRFPath
/******************************************************************************
*function: This function set specific bits to RF register
* input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
* u32 RegAddr //target addr to be modified
* u32 BitMask //taget bit pos in the addr to be modified
* u32 Data //value to be write
@@ -684,8 +684,8 @@ void rtl8192_InitBBRFRegDef(struct net_device* dev)
/******************************************************************************
*function: This function is to write register and then readback to make sure whether BB and RF is OK
* input: net_device dev
- * HW90_BLOCK_E CheckBlock
- * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF
+ * HW90_BLOCK_E CheckBlock
+ * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF
* output: none
* return: return whether BB and RF is ok(0:OK; 1:Fail)
* notice: This function may be removed in the ASIC
@@ -957,56 +957,56 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
u8 ret = 0;
switch(eRFPath){
- case RF90_PATH_A:
- for(i = 0;i<RadioA_ArrayLength; i=i+2){
-
- if(rtl819XRadioA_Array[i] == 0xfe){
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioA_Array[i], bMask12Bits, rtl819XRadioA_Array[i+1]);
- mdelay(1);
+ case RF90_PATH_A:
+ for(i = 0;i<RadioA_ArrayLength; i=i+2){
+ if(rtl819XRadioA_Array[i] == 0xfe){
+ mdelay(100);
+ continue;
}
- break;
- case RF90_PATH_B:
- for(i = 0;i<RadioB_ArrayLength; i=i+2){
+ rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioA_Array[i], bMask12Bits, rtl819XRadioA_Array[i+1]);
+ mdelay(1);
- if(rtl819XRadioB_Array[i] == 0xfe){
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioB_Array[i], bMask12Bits, rtl819XRadioB_Array[i+1]);
- mdelay(1);
+ }
+ break;
+ case RF90_PATH_B:
+ for(i = 0;i<RadioB_ArrayLength; i=i+2){
+ if(rtl819XRadioB_Array[i] == 0xfe){
+ mdelay(100);
+ continue;
}
- break;
- case RF90_PATH_C:
- for(i = 0;i<RadioC_ArrayLength; i=i+2){
+ rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioB_Array[i], bMask12Bits, rtl819XRadioB_Array[i+1]);
+ mdelay(1);
- if(rtl819XRadioC_Array[i] == 0xfe){
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioC_Array[i], bMask12Bits, rtl819XRadioC_Array[i+1]);
- mdelay(1);
+ }
+ break;
+ case RF90_PATH_C:
+ for(i = 0;i<RadioC_ArrayLength; i=i+2){
+ if(rtl819XRadioC_Array[i] == 0xfe){
+ mdelay(100);
+ continue;
}
- break;
- case RF90_PATH_D:
- for(i = 0;i<RadioD_ArrayLength; i=i+2){
+ rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioC_Array[i], bMask12Bits, rtl819XRadioC_Array[i+1]);
+ mdelay(1);
- if(rtl819XRadioD_Array[i] == 0xfe){
- mdelay(100);
- continue;
- }
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioD_Array[i], bMask12Bits, rtl819XRadioD_Array[i+1]);
- mdelay(1);
+ }
+ break;
+ case RF90_PATH_D:
+ for(i = 0;i<RadioD_ArrayLength; i=i+2){
+ if(rtl819XRadioD_Array[i] == 0xfe){
+ mdelay(100);
+ continue;
}
- break;
- default:
- break;
+ rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioD_Array[i], bMask12Bits, rtl819XRadioD_Array[i+1]);
+ mdelay(1);
+
+ }
+ break;
+ default:
+ break;
}
return ret;
@@ -1015,7 +1015,7 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
/******************************************************************************
*function: This function set Tx Power of the channel
* input: struct net_device *dev
- * u8 channel
+ * u8 channel
* output: none
* return: none
* Note:
@@ -1052,7 +1052,7 @@ void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
/******************************************************************************
*function: This function set RF state on or off
* input: struct net_device *dev
- * RT_RF_POWER_STATE eRFPowerState //Power State to set
+ * RT_RF_POWER_STATE eRFPowerState //Power State to set
* output: none
* return: none
* Note:
@@ -1183,10 +1183,10 @@ bool rtl8192_SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerS
/****************************************************************************************
*function: This function set command table variable(struct SwChnlCmd).
- * input: SwChnlCmd* CmdTable //table to be set.
- * u32 CmdTableIdx //variable index in table to be set
- * u32 CmdTableSz //table size.
- * SwChnlCmdID CmdID //command ID to set.
+ * input: SwChnlCmd* CmdTable //table to be set.
+ * u32 CmdTableIdx //variable index in table to be set
+ * u32 CmdTableSz //table size.
+ * SwChnlCmdID CmdID //command ID to set.
* u32 Para1
* u32 Para2
* u32 msDelay
@@ -1229,10 +1229,10 @@ u8 rtl8192_phy_SetSwChnlCmdArray(
/******************************************************************************
*function: This function set channel step by step
* input: struct net_device *dev
- * u8 channel
- * u8* stage //3 stages
- * u8* step //
- * u32* delay //whether need to delay
+ * u8 channel
+ * u8* stage //3 stages
+ * u8* step //
+ * u32* delay //whether need to delay
* output: store new stage, step and delay for next step(combine with function above)
* return: true if finished, false otherwise
* Note: Wait for simpler function to replace it //wb
@@ -1386,7 +1386,7 @@ u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8* stage, u
/******************************************************************************
*function: This function does actually set channel work
* input: struct net_device *dev
- * u8 channel
+ * u8 channel
* output: none
* return: noin
* Note: We should not call this function directly
@@ -1427,7 +1427,7 @@ void rtl8192_SwChnl_WorkItem(struct net_device *dev)
/******************************************************************************
*function: This function scheduled actual work item to set channel
* input: net_device dev
- * u8 channel //channel to set
+ * u8 channel //channel to set
* output: none
* return: return code show if workitem is scheduled(1:pass, 0:fail)
* Note: Delay may be required for RF configuration
@@ -1501,12 +1501,12 @@ if (0) //to test current channel from RF reg 0x7.
/******************************************************************************
*function: Callback routine of the work item for set bandwidth mode.
* input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
+ * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
+ * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
* output: none
* return: none
* Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
+ * test whether current work in the queue or not.//do I?
* ***************************************************************************/
void rtl8192_SetBWModeWorkItem(struct net_device *dev)
{
@@ -1649,12 +1649,12 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
/******************************************************************************
*function: This function schedules bandwidth switch work.
* input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
+ * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
+ * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
* output: none
* return: none
* Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
+ * test whether current work in the queue or not.//do I?
* ***************************************************************************/
void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
{
@@ -1770,4 +1770,3 @@ extern void InitialGainOperateWorkItemCallBack(struct work_struct *work)
break;
}
}
-
diff --git a/drivers/staging/rtl8192u/r819xU_phyreg.h b/drivers/staging/rtl8192u/r819xU_phyreg.h
index cca34c05f6a5..64285d6a33f8 100644
--- a/drivers/staging/rtl8192u/r819xU_phyreg.h
+++ b/drivers/staging/rtl8192u/r819xU_phyreg.h
@@ -6,185 +6,185 @@
//Register //duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF
//page 1
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
//page8
-#define rFPGA0_RFMOD 0x800 //RF mode & CCK TxSC
-#define rFPGA0_TxInfo 0x804
-#define rFPGA0_PSDFunction 0x808
-#define rFPGA0_TxGainStage 0x80c
-#define rFPGA0_RFTiming1 0x810
-#define rFPGA0_RFTiming2 0x814
-//#define rFPGA0_XC_RFTiming 0x818
-//#define rFPGA0_XD_RFTiming 0x81c
-#define rFPGA0_XA_HSSIParameter1 0x820
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-#define rFPGA0_XC_HSSIParameter1 0x830
-#define rFPGA0_XC_HSSIParameter2 0x834
-#define rFPGA0_XD_HSSIParameter1 0x838
-#define rFPGA0_XD_HSSIParameter2 0x83c
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-#define rFPGA0_XC_LSSIParameter 0x848
-#define rFPGA0_XD_LSSIParameter 0x84c
-#define rFPGA0_RFWakeUpParameter 0x850
-#define rFPGA0_RFSleepUpParameter 0x854
-#define rFPGA0_XAB_SwitchControl 0x858
-#define rFPGA0_XCD_SwitchControl 0x85c
-#define rFPGA0_XA_RFInterfaceOE 0x860
-#define rFPGA0_XB_RFInterfaceOE 0x864
-#define rFPGA0_XC_RFInterfaceOE 0x868
-#define rFPGA0_XD_RFInterfaceOE 0x86c
-#define rFPGA0_XAB_RFInterfaceSW 0x870
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-#define rFPGA0_XAB_RFParameter 0x878
-#define rFPGA0_XCD_RFParameter 0x87c
-#define rFPGA0_AnalogParameter1 0x880
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888
-#define rFPGA0_AnalogParameter4 0x88c
-#define rFPGA0_XA_LSSIReadBack 0x8a0
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-#define rFPGA0_PSDReport 0x8b4
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4
+#define rFPGA0_RFMOD 0x800 //RF mode & CCK TxSC
+#define rFPGA0_TxInfo 0x804
+#define rFPGA0_PSDFunction 0x808
+#define rFPGA0_TxGainStage 0x80c
+#define rFPGA0_RFTiming1 0x810
+#define rFPGA0_RFTiming2 0x814
+//#define rFPGA0_XC_RFTiming 0x818
+//#define rFPGA0_XD_RFTiming 0x81c
+#define rFPGA0_XA_HSSIParameter1 0x820
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rFPGA0_XC_HSSIParameter1 0x830
+#define rFPGA0_XC_HSSIParameter2 0x834
+#define rFPGA0_XD_HSSIParameter1 0x838
+#define rFPGA0_XD_HSSIParameter2 0x83c
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+#define rFPGA0_XC_LSSIParameter 0x848
+#define rFPGA0_XD_LSSIParameter 0x84c
+#define rFPGA0_RFWakeUpParameter 0x850
+#define rFPGA0_RFSleepUpParameter 0x854
+#define rFPGA0_XAB_SwitchControl 0x858
+#define rFPGA0_XCD_SwitchControl 0x85c
+#define rFPGA0_XA_RFInterfaceOE 0x860
+#define rFPGA0_XB_RFInterfaceOE 0x864
+#define rFPGA0_XC_RFInterfaceOE 0x868
+#define rFPGA0_XD_RFInterfaceOE 0x86c
+#define rFPGA0_XAB_RFInterfaceSW 0x870
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+#define rFPGA0_XAB_RFParameter 0x878
+#define rFPGA0_XCD_RFParameter 0x87c
+#define rFPGA0_AnalogParameter1 0x880
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888
+#define rFPGA0_AnalogParameter4 0x88c
+#define rFPGA0_XA_LSSIReadBack 0x8a0
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+#define rFPGA0_PSDReport 0x8b4
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4
//page 9
-#define rFPGA1_RFMOD 0x900 //RF mode & OFDM TxSC
-#define rFPGA1_TxBlock 0x904
-#define rFPGA1_DebugSelect 0x908
-#define rFPGA1_TxInfo 0x90c
+#define rFPGA1_RFMOD 0x900 //RF mode & OFDM TxSC
+#define rFPGA1_TxBlock 0x904
+#define rFPGA1_DebugSelect 0x908
+#define rFPGA1_TxInfo 0x90c
//page a
-#define rCCK0_System 0xa00
-#define rCCK0_AFESetting 0xa04
-#define rCCK0_CCA 0xa08
-#define rCCK0_RxAGC1 0xa0c //AGC default value, saturation level
-#define rCCK0_RxAGC2 0xa10 //AGC & DAGC
-#define rCCK0_RxHP 0xa14
-#define rCCK0_DSPParameter1 0xa18 //Timing recovery & Channel estimation threshold
-#define rCCK0_DSPParameter2 0xa1c //SQ threshold
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 //debug port and Tx filter3
-#define rCCK0_FalseAlarmReport 0xa2c //0xa2d
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 //0xa57
-#define rCCK0_FACounterLower 0xa5c //0xa5b
-#define rCCK0_FACounterUpper 0xa58 //0xa5c
+#define rCCK0_System 0xa00
+#define rCCK0_AFESetting 0xa04
+#define rCCK0_CCA 0xa08
+#define rCCK0_RxAGC1 0xa0c //AGC default value, saturation level
+#define rCCK0_RxAGC2 0xa10 //AGC & DAGC
+#define rCCK0_RxHP 0xa14
+#define rCCK0_DSPParameter1 0xa18 //Timing recovery & Channel estimation threshold
+#define rCCK0_DSPParameter2 0xa1c //SQ threshold
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 //debug port and Tx filter3
+#define rCCK0_FalseAlarmReport 0xa2c //0xa2d
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 //0xa57
+#define rCCK0_FACounterLower 0xa5c //0xa5b
+#define rCCK0_FACounterUpper 0xa58 //0xa5c
//page c
-#define rOFDM0_LSTF 0xc00
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
-#define rOFDM0_XARxAFE 0xc10 //RxIQ DC offset, Rx digital filter, DC notch filter
-#define rOFDM0_XARxIQImbalance 0xc14 //RxIQ imblance matrix
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-#define rOFDM0_RxDetector1 0xc30 //PD,BW & SBD
-#define rOFDM0_RxDetector2 0xc34 //SBD & Fame Sync.
-#define rOFDM0_RxDetector3 0xc38 //Frame Sync.
-#define rOFDM0_RxDetector4 0xc3c //PD, SBD, Frame Sync & Short-GI
-#define rOFDM0_RxDSP 0xc40 //Rx Sync Path
-#define rOFDM0_CFOandDAGC 0xc44 //CFO & DAGC
-#define rOFDM0_CCADropThreshold 0xc48 //CCA Drop threshold
-#define rOFDM0_ECCAThreshold 0xc4c // energy CCA
-#define rOFDM0_XAAGCCore1 0xc50
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
-#define rOFDM0_XATxIQImbalance 0xc80
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
+#define rOFDM0_LSTF 0xc00
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+#define rOFDM0_XARxAFE 0xc10 //RxIQ DC offset, Rx digital filter, DC notch filter
+#define rOFDM0_XARxIQImbalance 0xc14 //RxIQ imblance matrix
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+#define rOFDM0_RxDetector1 0xc30 //PD,BW & SBD
+#define rOFDM0_RxDetector2 0xc34 //SBD & Fame Sync.
+#define rOFDM0_RxDetector3 0xc38 //Frame Sync.
+#define rOFDM0_RxDetector4 0xc3c //PD, SBD, Frame Sync & Short-GI
+#define rOFDM0_RxDSP 0xc40 //Rx Sync Path
+#define rOFDM0_CFOandDAGC 0xc44 //CFO & DAGC
+#define rOFDM0_CCADropThreshold 0xc48 //CCA Drop threshold
+#define rOFDM0_ECCAThreshold 0xc4c // energy CCA
+#define rOFDM0_XAAGCCore1 0xc50
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+#define rOFDM0_XATxIQImbalance 0xc80
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
//page d
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-#define rOFDM1_CFO 0xd08
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+#define rOFDM1_CFO 0xd08
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
#define rOFDM1_PseudoNoiseStateAB 0xd50
#define rOFDM1_PseudoNoiseStateCD 0xd54
#define rOFDM1_RxPseudoNoiseWgt 0xd58
-#define rOFDM_PHYCounter1 0xda0 //cca, parity fail
-#define rOFDM_PHYCounter2 0xda4 //rate illegal, crc8 fail
-#define rOFDM_PHYCounter3 0xda8 //MCS not support
-#define rOFDM_ShortCFOAB 0xdac
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
+#define rOFDM_PHYCounter1 0xda0 //cca, parity fail
+#define rOFDM_PHYCounter2 0xda4 //rate illegal, crc8 fail
+#define rOFDM_PHYCounter3 0xda8 //MCS not support
+#define rOFDM_ShortCFOAB 0xdac
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
//page e
#define rTxAGC_Rate18_06 0xe00
@@ -198,373 +198,373 @@
//RF
//Zebra1
-#define rZebra1_HSSIEnable 0x0
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-#define rZebra1_Channel 0x7
-#define rZebra1_TxGain 0x8
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
+#define rZebra1_HSSIEnable 0x0
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7
+#define rZebra1_TxGain 0x8
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
//Zebra4
-#define rGlobalCtrl 0
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
+#define rGlobalCtrl 0
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
//RTL8258
-#define rRTL8258_TxLPF 0x11
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
+#define rRTL8258_TxLPF 0x11
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
//Bit Mask
//page-1
-#define bBBResetB 0x100
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
+#define bBBResetB 0x100
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
//page-8
-#define bRFMOD 0x1
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-#define bOFDMRxADCPhase 0x10000
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-#define bXBTxAGC 0xf00
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-#define bPAStart 0xf0000000
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf //Reg0x814
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 //T2R
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 //chane gain at continue Tx
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-#define b3WireDataLength 0x800
-#define b3WireAddressLength 0x400
-#define b3WireRFPowerDown 0x1
-//#define bHWSISelect 0x8
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf //3-wire total control
-#define bRFSI_RFENV 0x10
-#define bRFSI_TRSW 0x20
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-#define bLSSIReadAddress 0x3f000000 //LSSI "Read" Address
-#define bLSSIReadEdge 0x80000000 //LSSI "Read" edge signal
-#define bLSSIReadBackData 0xfff
-#define bLSSIReadOKFlag 0x1000
-#define bCCKSampleRate 0x8 //0: 44MHz, 1:88MHz
-
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-#define bADClkPhase 0x4000000
-#define b80MClkDelay 0x18000000
-#define bAFEWatchDogEnable 0x20000000
-#define bXtalCap 0x0f000000
-#define bIntDifClkEnable 0x400
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-
-#define bCCKRxAGCFormat 0x200
-
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
+#define bRFMOD 0x1
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+#define bOFDMRxADCPhase 0x10000
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+#define bXBTxAGC 0xf00
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+#define bPAStart 0xf0000000
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf //Reg0x814
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 //T2R
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 //chane gain at continue Tx
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+#define b3WireDataLength 0x800
+#define b3WireAddressLength 0x400
+#define b3WireRFPowerDown 0x1
+//#define bHWSISelect 0x8
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf //3-wire total control
+#define bRFSI_RFENV 0x10
+#define bRFSI_TRSW 0x20
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+#define bLSSIReadAddress 0x3f000000 //LSSI "Read" Address
+#define bLSSIReadEdge 0x80000000 //LSSI "Read" edge signal
+#define bLSSIReadBackData 0xfff
+#define bLSSIReadOKFlag 0x1000
+#define bCCKSampleRate 0x8 //0: 44MHz, 1:88MHz
+
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+#define bADClkPhase 0x4000000
+#define b80MClkDelay 0x18000000
+#define bAFEWatchDogEnable 0x20000000
+#define bXtalCap 0x0f000000
+#define bIntDifClkEnable 0x400
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+
+#define bCCKRxAGCFormat 0x200
+
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
//page-9
-#define bOFDMTxSC 0x30000000
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff //reset debug page and also HWord, LWord
-#define bDebugItem 0xff //reset debug page and LWord
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
+#define bOFDMTxSC 0x30000000
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff //reset debug page and also HWord, LWord
+#define bDebugItem 0xff //reset debug page and LWord
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
//page-a
-#define bCCKBBMode 0x3
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-#define bCCKSideBand 0x10
-#define bCCKScramble 0x8
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 //r_rx_clk
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 //CCK Rx initial gain polarity
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f //AGCsamp_dly
-#define bCCKFixedRxAGC 0x8000
-//#define bCCKRxAGCFormat 0x4000 //remove to HSSI register 0x824
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
+#define bCCKBBMode 0x3
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+#define bCCKSideBand 0x10
+#define bCCKScramble 0x8
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 //r_rx_clk
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 //CCK Rx initial gain polarity
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f //AGCsamp_dly
+#define bCCKFixedRxAGC 0x8000
+//#define bCCKRxAGCFormat 0x4000 //remove to HSSI register 0x824
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
//page c
-#define bNumOfSTF 0x3
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
+#define bNumOfSTF 0x3
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
#define bLambda_ED 0x300
#define bRxInitialGain 0x7f
#define bRxAntDivEn 0x80
@@ -862,10 +862,10 @@
#define PathD 0x3
#define rRTL8256RxMixerPole 0xb
-#define bZebraRxMixerPole 0x6
-#define rRTL8256TxBBOPBias 0x9
-#define bRTL8256TxBBOPBias 0x400
-#define rRTL8256TxBBBW 19
-#define bRTL8256TxBBBW 0x18
+#define bZebraRxMixerPole 0x6
+#define rRTL8256TxBBOPBias 0x9
+#define bRTL8256TxBBOPBias 0x400
+#define rRTL8256TxBBBW 19
+#define bRTL8256TxBBBW 0x18
#endif //__INC_HAL8190PCIPHYREG_H
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index 90954203776d..fad173f4097e 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -26,15 +26,8 @@
#ifndef __INC_ETHERNET_H
#define __INC_ETHERNET_H
-#define ETHERNET_ADDRESS_LENGTH 6 /*!< Ethernet Address Length*/
#define ETHERNET_HEADER_SIZE 14 /*!< Ethernet Header Length*/
#define LLC_HEADER_SIZE 6 /*!< LLC Header Length*/
-#define TYPE_LENGTH_FIELD_SIZE 2 /*!< Type/Length Size*/
-#define MINIMUM_ETHERNET_PACKET_SIZE 60 /*!< Min Ethernet Packet Size*/
-#define MAXIMUM_ETHERNET_PACKET_SIZE 1514 /*!< Max Ethernet Packet Size*/
-
-/*!< Is Multicast Address? */
-#define RT_ETH_IS_MULTICAST(_pAddr) ((((u8 *)(_pAddr))[0]&0x01) != 0)
#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index cb9d4cfe8fe4..d801c5af006a 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -49,7 +49,7 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
if (!firmware) {
struct usb_device *udev = padapter->dvobjpriv.pusbdev;
struct usb_interface *pusb_intf = padapter->pusb_intf;
- printk(KERN_ERR "r8712u: Firmware request failed\n");
+ dev_err(&udev->dev, "r8712u: Firmware request failed\n");
padapter->fw_found = false;
usb_put_dev(udev);
usb_set_intfdata(pusb_intf, NULL);
@@ -69,12 +69,11 @@ int rtl871x_load_fw(struct _adapter *padapter)
int rc;
init_completion(&padapter->rtl8712_fw_ready);
- printk(KERN_INFO "r8712u: Loading firmware from \"%s\"\n",
- firmware_file);
+ dev_info(dev, "r8712u: Loading firmware from \"%s\"\n", firmware_file);
rc = request_firmware_nowait(THIS_MODULE, 1, firmware_file, dev,
GFP_KERNEL, padapter, rtl871x_load_fw_cb);
if (rc)
- printk(KERN_ERR "r8712u: Firmware request error %d\n", rc);
+ dev_err(dev, "r8712u: Firmware request error %d\n", rc);
return rc;
}
MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
@@ -84,8 +83,8 @@ static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
const struct firmware **praw = &padapter->fw;
if (padapter->fw->size > 200000) {
- printk(KERN_ERR "r8172u: Badfw->size of %d\n",
- (int)padapter->fw->size);
+ dev_err(&padapter->pnetdev->dev, "r8172u: Badfw->size of %d\n",
+ (int)padapter->fw->size);
return 0;
}
*ppmappedfw = (u8 *)((*praw)->data);
@@ -334,11 +333,13 @@ uint rtl8712_hal_init(struct _adapter *padapter)
if (rtl8712_dl_fw(padapter) != _SUCCESS)
return _FAIL;
- printk(KERN_INFO "r8712u: 1 RCR=0x%x\n", r8712_read32(padapter, RCR));
+ netdev_info(padapter->pnetdev, "1 RCR=0x%x\n",
+ r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP
Checksum offload */
- printk(KERN_INFO "r8712u: 2 RCR=0x%x\n", r8712_read32(padapter, RCR));
+ netdev_info(padapter->pnetdev, "2 RCR=0x%x\n",
+ r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32|BIT(25))); /* Append PHY status */
val32 = 0;
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 21515c31b373..da4000e49da6 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -777,7 +777,7 @@ extern inline int ieee80211_get_hdrlen(u16 fc)
struct registry_priv;
u8 *r8712_set_ie(u8 *pbuf, sint index, uint len, u8 *source, uint *frlen);
-u8 *r8712_get_ie(u8*pbuf, sint index, sint *len, sint limit);
+u8 *r8712_get_ie(u8 *pbuf, sint index, sint *len, sint limit);
unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *rsn_ie_len, int limit);
unsigned char *r8712_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len,
int limit);
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index 7279854c86aa..f569a7081be5 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -106,8 +106,6 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter)
* disconnect with AP for 60 seconds.
*/
- memset(&backupPMKIDList[0], 0x00, sizeof(
- struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
memcpy(&backupPMKIDList[0], &adapter->securitypriv.
PMKIDList[0], sizeof(struct RT_PMKID_LIST) *
NUM_PMKID_CACHE);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index e00f7918d261..b65bf5e177a8 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -224,8 +224,7 @@ struct net_device *r8712_init_netdev(void)
}
padapter = (struct _adapter *) netdev_priv(pnetdev);
padapter->pnetdev = pnetdev;
- printk(KERN_INFO "r8712u: register rtl8712_netdev_ops to"
- " netdev_ops\n");
+ pr_info("r8712u: register rtl8712_netdev_ops to netdev_ops\n");
pnetdev->netdev_ops = &rtl8712_netdev_ops;
pnetdev->watchdog_timeo = HZ; /* 1 second timeout */
pnetdev->wireless_handlers = (struct iw_handler_def *)
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index c76732cdb183..d59a74aa3048 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -115,11 +115,11 @@ void r8712_free_recv_priv(struct recv_priv *precvpriv)
kfree(precvpriv->pallocated_recv_buf);
skb_queue_purge(&precvpriv->rx_skb_queue);
if (skb_queue_len(&precvpriv->rx_skb_queue))
- printk(KERN_WARNING "r8712u: rx_skb_queue not empty\n");
+ netdev_warn(padapter->pnetdev, "r8712u: rx_skb_queue not empty\n");
skb_queue_purge(&precvpriv->free_recv_skb_queue);
if (skb_queue_len(&precvpriv->free_recv_skb_queue))
- printk(KERN_WARNING "r8712u: free_recv_skb_queue not empty "
- "%d\n", skb_queue_len(&precvpriv->free_recv_skb_queue));
+ netdev_warn(padapter->pnetdev, "r8712u: free_recv_skb_queue not empty %d\n",
+ skb_queue_len(&precvpriv->free_recv_skb_queue));
}
int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
@@ -364,9 +364,8 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
nSubframe_Length = (nSubframe_Length >> 8) +
(nSubframe_Length << 8);
if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- printk(KERN_WARNING "r8712u: nRemain_Length is %d and"
- " nSubframe_Length is: %d\n",
- a_len, nSubframe_Length);
+ netdev_warn(padapter->pnetdev, "r8712u: nRemain_Length is %d and nSubframe_Length is: %d\n",
+ a_len, nSubframe_Length);
goto exit;
}
/* move the data point to data content */
@@ -381,8 +380,7 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
memcpy(data_ptr, pdata, nSubframe_Length);
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
- printk(KERN_WARNING "r8712u: ParseSubframe(): Too"
- " many Subframes! Packets dropped!\n");
+ netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
pdata += nSubframe_Length;
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 31f31dbf7f31..f16307f5d827 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -525,7 +525,6 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
kfree(pcmd);
return _FAIL;
}
- memset(psecnetwork, 0, t_len);
memcpy(psecnetwork, &pnetwork->network, t_len);
auth = &psecuritypriv->authenticator_ie[0];
psecuritypriv->authenticator_ie[0] = (unsigned char)
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 9d93189d8700..0ce79b1c4ee2 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -749,7 +749,7 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset);
u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan);
u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset);
-u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 * pval);
+u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval);
u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode);
u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val);
u8 r8712_setrttbl_cmd(struct _adapter *padapter,
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 3a6479064519..f034567122d9 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -188,8 +188,7 @@ static inline char *translate_scan(struct _adapter *padapter,
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- iwe.u.data.length = (u16)min((u16)pnetwork->network.Ssid.SsidLength,
- (u16)32);
+ iwe.u.data.length = min_t(u32, pnetwork->network.Ssid.SsidLength, 32);
start = iwe_stream_add_point(info, start, stop, &iwe,
pnetwork->network.Ssid.Ssid);
/* parsing HT_CAP_IE */
@@ -415,8 +414,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
} else
return -EINVAL;
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- printk(KERN_INFO "r8712u: wpa_set_encryption, crypt.alg ="
- " WEP\n");
+ netdev_info(dev, "r8712u: %s: crypt.alg = WEP\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
@@ -608,8 +606,7 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
if ((eid == _VENDOR_SPECIFIC_IE_) &&
(!memcmp(&buf[cnt+2], wps_oui, 4))) {
- printk(KERN_INFO "r8712u: "
- "SET WPS_IE\n");
+ netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE\n");
padapter->securitypriv.wps_ie_len =
((buf[cnt+1] + 2) <
(MAX_WPA_IE_LEN << 2)) ?
@@ -620,8 +617,7 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
padapter->securitypriv.wps_ie_len);
padapter->securitypriv.wps_phase =
true;
- printk(KERN_INFO "r8712u: SET WPS_IE,"
- " wps_phase==true\n");
+ netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
cnt += buf[cnt+1]+2;
break;
} else
@@ -829,8 +825,8 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite
* with new PMKID. */
- printk(KERN_INFO "r8712u: r871x_wx_set_pmkid:"
- " BSSID exists in the PMKList.\n");
+ netdev_info(dev, "r8712u: %s: BSSID exists in the PMKList.\n",
+ __func__);
memcpy(psecuritypriv->PMKIDList[j].PMKID,
pPMK->pmkid, IW_PMKID_LEN);
psecuritypriv->PMKIDList[j].bUsed = true;
@@ -841,9 +837,8 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
}
if (!blInserted) {
/* Find a new entry */
- printk(KERN_INFO "r8712u: r871x_wx_set_pmkid: Use the"
- " new entry index = %d for this PMKID.\n",
- psecuritypriv->PMKIDIndex);
+ netdev_info(dev, "r8712u: %s: Use the new entry index = %d for this PMKID.\n",
+ __func__, psecuritypriv->PMKIDIndex);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->
PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->
@@ -876,8 +871,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
intReturn = true;
break;
default:
- printk(KERN_INFO "r8712u: r871x_wx_set_pmkid: "
- "unknown Command\n");
+ netdev_info(dev, "r8712u: %s: unknown Command\n", __func__);
intReturn = false;
break;
}
@@ -1045,8 +1039,8 @@ static int r871x_wx_set_priv(struct net_device *dev,
);
sprintf(ext, "OK");
} else {
- printk(KERN_INFO "r8712u: r871x_wx_set_priv: unknown Command"
- " %s.\n", ext);
+ netdev_info(dev, "r8712u: %s: unknown Command %s.\n",
+ __func__, ext);
goto FREE_EXT;
}
if (copy_to_user(dwrq->pointer, ext,
@@ -1131,11 +1125,11 @@ static int r8711_wx_get_wap(struct net_device *dev,
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
- if (check_fwstate(pmlmepriv, _FW_LINKED |
- WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
+ WIFI_AP_STATE))
memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
- }
+ else
+ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
return 0;
}
@@ -1183,8 +1177,8 @@ static int r8711_wx_set_scan(struct net_device *dev,
u8 status = true;
if (padapter->bDriverStopped == true) {
- printk(KERN_WARNING "r8712u: in r8711_wx_set_scan: "
- "bDriverStopped=%d\n", padapter->bDriverStopped);
+ netdev_info(dev, "In %s: bDriverStopped=%d\n",
+ __func__, padapter->bDriverStopped);
return -1;
}
if (padapter->bup == false)
@@ -1199,8 +1193,7 @@ static int r8711_wx_set_scan(struct net_device *dev,
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct ndis_802_11_ssid ssid;
unsigned long irqL;
- u32 len = (u32) min((u8)req->essid_len,
- (u8)IW_ESSID_MAX_SIZE);
+ u32 len = min_t(u8, req->essid_len, IW_ESSID_MAX_SIZE);
memset((unsigned char *)&ssid, 0,
sizeof(struct ndis_802_11_ssid));
memcpy(ssid.Ssid, req->essid, len);
@@ -1556,8 +1549,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
key = erq->flags & IW_ENCODE_INDEX;
memset(&wep, 0, sizeof(struct NDIS_802_11_WEP));
if (erq->flags & IW_ENCODE_DISABLED) {
- printk(KERN_INFO "r8712u: r8711_wx_set_enc: "
- "EncryptionDisabled\n");
+ netdev_info(dev, "r8712u: %s: EncryptionDisabled\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11EncryptionDisabled;
padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
@@ -1578,8 +1570,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
}
/* set authentication mode */
if (erq->flags & IW_ENCODE_OPEN) {
- printk(KERN_INFO "r8712u: r8711_wx_set_enc: "
- "IW_ENCODE_OPEN\n");
+ netdev_info(dev, "r8712u: %s: IW_ENCODE_OPEN\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.AuthAlgrthm = 0; /* open system */
@@ -1588,8 +1579,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
} else if (erq->flags & IW_ENCODE_RESTRICTED) {
- printk(KERN_INFO "r8712u: r8711_wx_set_enc: "
- "IW_ENCODE_RESTRICTED\n");
+ netdev_info(dev, "r8712u: %s: IW_ENCODE_RESTRICTED\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.AuthAlgrthm = 1; /* shared system */
@@ -1977,9 +1967,9 @@ static int r871x_mp_ioctl_hdl(struct net_device *dev,
status = phandler->handler(&oid_par);
/* todo:check status, BytesNeeded, etc. */
} else {
- printk(KERN_INFO "r8712u: r871x_mp_ioctl_hdl(): err!,"
- " subcode=%d, oid=%d, handler=%p\n",
- poidparam->subcode, phandler->oid, phandler->handler);
+ netdev_info(dev, "r8712u: %s: err!, subcode=%d, oid=%d, handler=%p\n",
+ __func__, poidparam->subcode, phandler->oid,
+ phandler->handler);
ret = -EFAULT;
goto _r871x_mp_ioctl_hdl_exit;
}
@@ -2034,13 +2024,13 @@ static int r871x_get_ap_info(struct net_device *dev,
break;
pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
if (hwaddr_aton_i(data, bssid)) {
- printk(KERN_INFO "r8712u: Invalid BSSID '%s'.\n",
- (u8 *)data);
+ netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
+ (u8 *)data);
spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock),
- irqL);
+ irqL);
return -EINVAL;
}
- printk(KERN_INFO "r8712u: BSSID:%pM\n", bssid);
+ netdev_info(dev, "r8712u: BSSID:%pM\n", bssid);
if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN)) {
/* BSSID match, then check if supporting wpa/wpa2 */
pbuf = r8712_get_wpa_ie(&pnetwork->network.IEs[12],
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index 9a33eaee879b..5d6d55e7b389 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -28,6 +28,7 @@
#define _RTL871X_IOCTL_RTL_C_
+#include <linux/rndis.h>
#include "osdep_service.h"
#include "drv_types.h"
#include "wlan_bssdef.h"
@@ -42,8 +43,8 @@
uint oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
@@ -52,14 +53,14 @@ uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_smallpacket_crcerr;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
@@ -68,14 +69,14 @@ uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_middlepacket_crcerr;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
@@ -84,29 +85,29 @@ uint oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_largepacket_crcerr;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_tx_retry_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv)
@@ -115,29 +116,29 @@ uint oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_pkts +
padapter->recvpriv.rx_drop;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_tx_beacon_ok_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv)
@@ -146,22 +147,22 @@ uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(uint *)poid_par_priv->information_buf =
padapter->recvpriv.rx_icv_err;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH ;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH ;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
@@ -171,7 +172,7 @@ uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
u32 preamblemode = 0 ;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
if (padapter->registrypriv.preamble == PREAMBLE_LONG)
preamblemode = 0;
@@ -182,15 +183,15 @@ uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
*(u32 *)poid_par_priv->information_buf = preamblemode;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv)
@@ -200,10 +201,10 @@ uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv)
struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
*(u16 *)poid_par_priv->information_buf = peeprompriv->channel_plan;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_set_channelplan_hdl(struct oid_par_priv
@@ -214,9 +215,9 @@ uint oid_rt_set_channelplan_hdl(struct oid_par_priv
struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
peeprompriv->channel_plan = *(u16 *)poid_par_priv->information_buf;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv
@@ -227,7 +228,7 @@ uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv
u32 preamblemode = 0;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
preamblemode = *(u32 *)poid_par_priv->information_buf;
if (preamblemode == 0)
@@ -239,21 +240,21 @@ uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv
*(u32 *)poid_par_priv->information_buf = preamblemode;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_set_bcn_intvl_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_dedicate_probe_hdl(struct oid_par_priv
*poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv
@@ -263,14 +264,14 @@ uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->xmitpriv.tx_bytes;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv
@@ -280,37 +281,37 @@ uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_bytes;
*poid_par_priv->bytes_rw = poid_par_priv->
information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_current_tx_power_level_hdl(struct oid_par_priv
*poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_enc_key_mismatch_count_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
@@ -322,7 +323,7 @@ uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
u32 channelnum;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true))
pnic_Config = &pmlmepriv->cur_network.network.Configuration;
@@ -332,22 +333,22 @@ uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
channelnum = pnic_Config->DSConfig;
*(u32 *)poid_par_priv->information_buf = channelnum;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_hardware_radio_off_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_key_mismatch_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_supported_wireless_mode_hdl(struct oid_par_priv
@@ -356,7 +357,7 @@ uint oid_rt_supported_wireless_mode_hdl(struct oid_par_priv
u32 ulInfo = 0;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
ulInfo |= 0x0100; /* WIRELESS_MODE_B */
ulInfo |= 0x0200; /* WIRELESS_MODE_G */
@@ -364,108 +365,108 @@ uint oid_rt_supported_wireless_mode_hdl(struct oid_par_priv
*(u32 *) poid_par_priv->information_buf = ulInfo;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- return NDIS_STATUS_INVALID_LENGTH;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_channel_list_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_wireless_mode_for_scan_list_hdl(struct oid_par_priv
*poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_bss_wireless_mode_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_scan_with_magic_packet_hdl(struct oid_par_priv
*poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_ap_get_associated_station_list_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_ap_switch_into_ap_mode_hdl(struct oid_par_priv*
poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_ap_supported_hdl(struct oid_par_priv *poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_ap_set_passphrase_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv*
poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len ==
(sizeof(unsigned long) * 3)) {
if (!r8712_setrfreg_cmd(Adapter,
*(unsigned char *)poid_par_priv->information_buf,
(unsigned long)(*((unsigned long *)
poid_par_priv->information_buf + 2))))
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len == (sizeof(unsigned long)*3)) {
if (Adapter->mppriv.act_in_progress == true)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
else {
/* init workparam */
Adapter->mppriv.act_in_progress = true;
@@ -486,10 +487,10 @@ uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv)
*(unsigned char *)poid_par_priv->information_buf,
(unsigned char *)&Adapter->mppriv.workparam.
io_value))
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
}
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
@@ -508,7 +509,7 @@ uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv)
u32 ulInfo;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
/* nStatus==0 CheckingStatus
* nStatus==1 Associated
* nStatus==2 AdHocMode
@@ -524,12 +525,12 @@ uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv)
ulInfo = NOTASSOCIATED ;
*(u32 *)poid_par_priv->information_buf = ulInfo;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_set_default_key_id_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index d3ab24e34e3d..53a7c8c1bb40 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -97,8 +97,6 @@ static u8 do_join(struct _adapter *padapter)
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
pibss = padapter->registrypriv.dev_network.
MacAddress;
- memset(&pdev_network->Ssid, 0,
- sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
sizeof(struct ndis_802_11_ssid));
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c51ad9ed4b52..659615481f6f 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -604,9 +604,6 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
adapter->registrypriv.
dev_network.MacAddress;
pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;
- memset(&pdev_network->Ssid, 0,
- sizeof(struct
- ndis_802_11_ssid));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
sizeof(struct
@@ -1006,8 +1003,6 @@ void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf)
memcpy(pdev_network, &tgt_network->network,
r8712_get_ndis_wlan_bssid_ex_sz(&tgt_network->
network));
- memset(&pdev_network->Ssid, 0,
- sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
sizeof(struct ndis_802_11_ssid));
@@ -1048,8 +1043,8 @@ void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf)
struct sta_priv *pstapriv = &adapter->stapriv;
struct recv_reorder_ctrl *precvreorder_ctrl = NULL;
- printk(KERN_INFO "r8712u: [%s] mac = %pM, seq = %d, tid = %d\n",
- __func__, pAddbareq_pram->MacAddress,
+ netdev_info(adapter->pnetdev, "%s: mac = %pM, seq = %d, tid = %d\n",
+ __func__, pAddbareq_pram->MacAddress,
pAddbareq_pram->StartSeqNum, pAddbareq_pram->tid);
psta = r8712_get_stainfo(pstapriv, pAddbareq_pram->MacAddress);
if (psta) {
diff --git a/drivers/staging/rtl8712/rtl871x_mp.h b/drivers/staging/rtl8712/rtl871x_mp.h
index 255dc94f0901..51395d1a3c7e 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.h
+++ b/drivers/staging/rtl8712/rtl871x_mp.h
@@ -26,62 +26,6 @@
#ifndef __RTL871X_MP_H_
#define __RTL871X_MP_H_
-/* 00 - Success */
-/* 11 - Error */
-#define STATUS_SUCCESS (0x00000000L)
-#define STATUS_PENDING (0x00000103L)
-#define STATUS_UNSUCCESSFUL (0xC0000001L)
-#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
-#define STATUS_NOT_SUPPORTED (0xC00000BBL)
-#define NDIS_STATUS_SUCCESS ((uint)STATUS_SUCCESS)
-#define NDIS_STATUS_PENDING ((uint) STATUS_PENDING)
-#define NDIS_STATUS_NOT_RECOGNIZED ((uint)0x00010001L)
-#define NDIS_STATUS_NOT_COPIED ((uint)0x00010002L)
-#define NDIS_STATUS_NOT_ACCEPTED ((uint)0x00010003L)
-#define NDIS_STATUS_CALL_ACTIVE ((uint)0x00010007L)
-#define NDIS_STATUS_FAILURE ((uint) STATUS_UNSUCCESSFUL)
-#define NDIS_STATUS_RESOURCES ((uint)\
- STATUS_INSUFFICIENT_RESOURCES)
-#define NDIS_STATUS_CLOSING ((uint)0xC0010002L)
-#define NDIS_STATUS_BAD_VERSION ((uint)0xC0010004L)
-#define NDIS_STATUS_BAD_CHARACTERISTICS ((uint)0xC0010005L)
-#define NDIS_STATUS_ADAPTER_NOT_FOUND ((uint)0xC0010006L)
-#define NDIS_STATUS_OPEN_FAILED ((uint)0xC0010007L)
-#define NDIS_STATUS_DEVICE_FAILED ((uint)0xC0010008L)
-#define NDIS_STATUS_MULTICAST_FULL ((uint)0xC0010009L)
-#define NDIS_STATUS_MULTICAST_EXISTS ((uint)0xC001000AL)
-#define NDIS_STATUS_MULTICAST_NOT_FOUND ((uint)0xC001000BL)
-#define NDIS_STATUS_REQUEST_ABORTED ((uint)0xC001000CL)
-#define NDIS_STATUS_RESET_IN_PROGRESS ((uint)0xC001000DL)
-#define NDIS_STATUS_CLOSING_INDICATING ((uint)0xC001000EL)
-#define NDIS_STATUS_NOT_SUPPORTED ((uint)STATUS_NOT_SUPPORTED)
-#define NDIS_STATUS_INVALID_PACKET ((uint)0xC001000FL)
-#define NDIS_STATUS_OPEN_LIST_FULL ((uint)0xC0010010L)
-#define NDIS_STATUS_ADAPTER_NOT_READY ((uint)0xC0010011L)
-#define NDIS_STATUS_ADAPTER_NOT_OPEN ((uint)0xC0010012L)
-#define NDIS_STATUS_NOT_INDICATING ((uint)0xC0010013L)
-#define NDIS_STATUS_INVALID_LENGTH ((uint)0xC0010014L)
-#define NDIS_STATUS_INVALID_DATA ((uint)0xC0010015L)
-#define NDIS_STATUS_BUFFER_TOO_SHORT ((uint)0xC0010016L)
-#define NDIS_STATUS_INVALID_OID ((uint)0xC0010017L)
-#define NDIS_STATUS_ADAPTER_REMOVED ((uint)0xC0010018L)
-#define NDIS_STATUS_UNSUPPORTED_MEDIA ((uint)0xC0010019L)
-#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((uint)0xC001001AL)
-#define NDIS_STATUS_FILE_NOT_FOUND ((uint)0xC001001BL)
-#define NDIS_STATUS_ERROR_READING_FILE ((uint)0xC001001CL)
-#define NDIS_STATUS_ALREADY_MAPPED ((uint)0xC001001DL)
-#define NDIS_STATUS_RESOURCE_CONFLICT ((uint)0xC001001EL)
-#define NDIS_STATUS_NO_CABLE ((uint)0xC001001FL)
-#define NDIS_STATUS_INVALID_SAP ((uint)0xC0010020L)
-#define NDIS_STATUS_SAP_IN_USE ((uint)0xC0010021L)
-#define NDIS_STATUS_INVALID_ADDRESS ((uint)0xC0010022L)
-#define NDIS_STATUS_VC_NOT_ACTIVATED ((uint)0xC0010023L)
-#define NDIS_STATUS_DEST_OUT_OF_ORDER ((uint)0xC0010024L) /* cause 27*/
-#define NDIS_STATUS_VC_NOT_AVAILABLE ((uint)0xC0010025L) /* 35,45*/
-#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((uint)0xC0010026L) /* 37*/
-#define NDIS_STATUS_INCOMPATABLE_QOS ((uint)0xC0010027L) /* 49*/
-#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((uint)0xC0010028L) /* 93*/
-#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((uint)0xC0010029L) /* 3*/
#define MPT_NOOP 0
#define MPT_READ_MAC_1BYTE 1
#define MPT_READ_MAC_2BYTE 2
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 5eb461b4a491..5bd42966fd5c 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -26,6 +26,7 @@
*
******************************************************************************/
+#include <linux/rndis.h>
#include "osdep_service.h"
#include "drv_types.h"
#include "mlme_osdep.h"
@@ -34,12 +35,12 @@
uint oid_null_function(struct oid_par_priv *poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -48,7 +49,7 @@ uint oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
Adapter->registrypriv.wireless_mode =
*(u8 *)poid_par_priv->information_buf;
else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
} else if (poid_par_priv->type_of_oid == QUERY_OID) {
if (poid_par_priv->information_buf_len >= sizeof(u8)) {
*(u8 *)poid_par_priv->information_buf =
@@ -56,16 +57,16 @@ uint oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
*poid_par_priv->bytes_rw =
poid_par_priv->information_buf_len;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
} else {
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
}
return status;
}
uint oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
struct bb_reg_param *pbbreg;
@@ -73,9 +74,9 @@ uint oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
u32 value;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
offset = (u16)(pbbreg->offset) & 0xFFF; /*0ffset :0x800~0xfff*/
if (offset < BB_REG_BASE_ADDR)
@@ -87,7 +88,7 @@ uint oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
struct bb_reg_param *pbbreg;
@@ -95,9 +96,9 @@ uint oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
u32 value;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
offset = (u16)(pbbreg->offset) & 0xFFF; /*0ffset :0x800~0xfff*/
if (offset < BB_REG_BASE_ADDR)
@@ -110,7 +111,7 @@ uint oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
struct rf_reg_param *pbbreg;
@@ -119,13 +120,13 @@ uint oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
u32 value;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
path = (u8)pbbreg->path;
if (path > RF_PATH_B)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
offset = (u8)pbbreg->offset;
value = pbbreg->value;
r8712_rf_reg_write(Adapter, path, offset, value);
@@ -136,20 +137,20 @@ uint oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct rf_reg_param *pbbreg;
u8 path;
u8 offset;
u32 value;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
path = (u8)pbbreg->path;
if (path > RF_PATH_B) /* 1T2R path_a /path_b */
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
offset = (u8)pbbreg->offset;
value = r8712_rf_reg_read(Adapter, path, offset);
pbbreg->value = value;
@@ -265,16 +266,16 @@ uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 ratevalue;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
ratevalue = *((u32 *)poid_par_priv->information_buf);
if (ratevalue >= MPT_RATE_LAST)
- return NDIS_STATUS_INVALID_DATA;
+ return RNDIS_STATUS_INVALID_DATA;
Adapter->mppriv.curr_rateidx = ratevalue;
r8712_SetDataRate(Adapter);
return status;
@@ -284,16 +285,16 @@ uint oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 mode;
u8 val8;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
mode = *((u32 *)poid_par_priv->information_buf);
Adapter->mppriv.mode = mode;/* 1 for loopback*/
if (mp_start_test(Adapter) == _FAIL)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
r8712_write8(Adapter, MSR, 1); /* Link in ad hoc network, 0x1025004C */
r8712_write8(Adapter, RCR, 0); /* RCR : disable all pkt, 0x10250048 */
/* RCR disable Check BSSID, 0x1025004a */
@@ -313,12 +314,12 @@ uint oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (mp_stop_test(Adapter) == _FAIL)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
@@ -327,16 +328,16 @@ uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 Channel;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
Channel = *((u32 *)poid_par_priv->information_buf);
if (Channel > 14)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
Adapter->mppriv.curr_ch = Channel;
r8712_SetChannel(Adapter);
return status;
@@ -346,13 +347,13 @@ uint oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 antenna;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
antenna = *((u32 *)poid_par_priv->information_buf);
Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16);
Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF);
@@ -365,16 +366,16 @@ uint oid_rt_pro_set_tx_power_control_hdl(
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 tx_pwr_idx;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
tx_pwr_idx = *((u32 *)poid_par_priv->information_buf);
if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
Adapter->mppriv.curr_txpoweridx = (u8)tx_pwr_idx;
r8712_SetTxPower(Adapter);
return status;
@@ -383,12 +384,12 @@ uint oid_rt_pro_set_tx_power_control_hdl(
uint oid_rt_pro_query_tx_packet_sent_hdl(
struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
if (poid_par_priv->information_buf_len == sizeof(u32)) {
@@ -396,19 +397,19 @@ uint oid_rt_pro_query_tx_packet_sent_hdl(
Adapter->mppriv.tx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
uint oid_rt_pro_query_rx_packet_received_hdl(
struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
if (poid_par_priv->information_buf_len == sizeof(u32)) {
@@ -416,19 +417,19 @@ uint oid_rt_pro_query_rx_packet_received_hdl(
Adapter->mppriv.rx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
uint oid_rt_pro_query_rx_packet_crc32_error_hdl(
struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
if (poid_par_priv->information_buf_len == sizeof(u32)) {
@@ -436,7 +437,7 @@ uint oid_rt_pro_query_rx_packet_crc32_error_hdl(
Adapter->mppriv.rx_crcerrpktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
@@ -447,25 +448,25 @@ uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
Adapter->mppriv.tx_pktcount = 0;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
*poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len == sizeof(u32)) {
Adapter->mppriv.rx_pktcount = 0;
Adapter->mppriv.rx_crcerrpktcount = 0;
} else
- status = NDIS_STATUS_INVALID_LENGTH;
+ status = RNDIS_STATUS_INVALID_LENGTH;
return status;
}
@@ -476,9 +477,9 @@ uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
r8712_ResetPhyRxPktCount(Adapter);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
@@ -488,13 +489,13 @@ uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
*(u32 *)poid_par_priv->information_buf =
r8712_GetPhyRxPktReceived(Adapter);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
@@ -504,13 +505,13 @@ uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
*(u32 *)poid_par_priv->information_buf =
r8712_GetPhyRxPktCRC32Error(Adapter);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
@@ -520,10 +521,10 @@ uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
Adapter->mppriv.curr_modem = *((u8 *)poid_par_priv->information_buf);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
@@ -534,10 +535,10 @@ uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
u32 bStartTest;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
bStartTest = *((u32 *)poid_par_priv->information_buf);
r8712_SetContinuousTx(Adapter, (u8)bStartTest);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
@@ -548,10 +549,10 @@ uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
u32 bStartTest;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
bStartTest = *((u32 *)poid_par_priv->information_buf);
r8712_SetSingleCarrierTx(Adapter, (u8)bStartTest);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
@@ -562,10 +563,10 @@ uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
u32 bStartTest;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
bStartTest = *((u32 *)poid_par_priv->information_buf);
r8712_SetCarrierSuppressionTx(Adapter, (u8)bStartTest);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
@@ -576,28 +577,28 @@ uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
u32 bStartTest;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
bStartTest = *((u32 *)poid_par_priv->information_buf);
r8712_SetSingleToneTx(Adapter, (u8)bStartTest);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct ndis_802_11_ssid *pssid;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_needed = (u32)sizeof(struct ndis_802_11_ssid);
*poid_par_priv->bytes_rw = 0;
if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pssid = (struct ndis_802_11_ssid *)poid_par_priv->information_buf;
if (mp_start_joinbss(Adapter, pssid) == _FAIL)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_rw = sizeof(struct ndis_802_11_ssid);
return status;
}
@@ -607,12 +608,12 @@ uint oid_rt_pro_read_register_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct mp_rw_reg *RegRWStruct;
u16 offset;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
if ((RegRWStruct->offset >= 0x10250800) &&
(RegRWStruct->offset <= 0x10250FFF)) {
@@ -635,7 +636,7 @@ uint oid_rt_pro_read_register_hdl(struct oid_par_priv
RegRWStruct->offset);
break;
default:
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
break;
}
}
@@ -647,14 +648,14 @@ uint oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct mp_rw_reg *RegRWStruct;
u16 offset;
u32 value;
u32 oldValue = 0;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
if ((RegRWStruct->offset >= 0x10250800) &&
(RegRWStruct->offset <= 0x10250FFF)) {
@@ -691,11 +692,11 @@ uint oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
(unsigned int)RegRWStruct->value);
break;
default:
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
break;
}
- if ((status == NDIS_STATUS_SUCCESS) &&
+ if ((status == RNDIS_STATUS_SUCCESS) &&
(RegRWStruct->offset == HIMR) &&
(RegRWStruct->width == 4))
Adapter->ImrContent = RegRWStruct->value;
@@ -711,12 +712,12 @@ uint oid_rt_pro_burst_read_register_hdl(struct oid_par_priv
struct burst_rw_reg *pBstRwReg;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf;
r8712_read_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len,
pBstRwReg->Data);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_burst_write_register_hdl(struct oid_par_priv
@@ -727,16 +728,16 @@ uint oid_rt_pro_burst_write_register_hdl(struct oid_par_priv
struct burst_rw_reg *pBstRwReg;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf;
r8712_write_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len,
pBstRwReg->Data);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
@@ -746,12 +747,12 @@ uint oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
struct eeprom_rw_param *pEEPROM;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf;
pEEPROM->value = r8712_eeprom_read16(Adapter,
(u16)(pEEPROM->offset >> 1));
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
@@ -761,12 +762,12 @@ uint oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
struct eeprom_rw_param *pEEPROM;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf;
r8712_eeprom_write16(Adapter, (u16)(pEEPROM->offset >> 1),
pEEPROM->value);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
@@ -776,17 +777,17 @@ uint oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
struct mp_wiparam *pwi_param;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(struct mp_wiparam))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
if (Adapter->mppriv.workparam.bcompleted == false)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pwi_param = (struct mp_wiparam *)poid_par_priv->information_buf;
memcpy(pwi_param, &Adapter->mppriv.workparam,
sizeof(struct mp_wiparam));
Adapter->mppriv.act_in_progress = false;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
@@ -795,42 +796,42 @@ uint oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(uint) * 2)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
if (*(uint *)poid_par_priv->information_buf == 1)
Adapter->mppriv.rx_pktloss = 0;
*((uint *)poid_par_priv->information_buf+1) =
Adapter->mppriv.rx_pktloss;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_wr_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
{
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (r8712_setrfintfs_cmd(Adapter, *(unsigned char *)
poid_par_priv->information_buf) == _FAIL)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
@@ -838,10 +839,10 @@ uint oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
memcpy(poid_par_priv->information_buf,
(unsigned char *)&Adapter->mppriv.rxstat,
sizeof(struct recv_stat));
@@ -852,7 +853,7 @@ uint oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv
*poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv
@@ -860,13 +861,13 @@ uint oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (r8712_setdatarate_cmd(Adapter,
poid_par_priv->information_buf) != _SUCCESS)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
@@ -874,16 +875,16 @@ uint oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (Adapter->mppriv.act_in_progress == true)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
/*init workparam*/
Adapter->mppriv.act_in_progress = true;
Adapter->mppriv.workparam.bcompleted = false;
@@ -904,14 +905,14 @@ uint oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
if (!r8712_setptm_cmd(Adapter, *((u8 *)poid_par_priv->information_buf)))
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
@@ -920,13 +921,13 @@ uint oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 ratevalue;
u8 datarates[NumRates];
int i;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
ratevalue = *((u32 *)poid_par_priv->information_buf);
for (i = 0; i < NumRates; i++) {
if (ratevalue == mpdatarate[i])
@@ -935,7 +936,7 @@ uint oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
datarates[i] = 0xff;
}
if (r8712_setbasicrate_cmd(Adapter, datarates) != _SUCCESS)
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
@@ -945,14 +946,14 @@ uint oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < 8)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
*poid_par_priv->bytes_rw = 8;
memcpy(poid_par_priv->information_buf,
&(Adapter->pwrctrlpriv.pwr_mode), 8);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
@@ -962,18 +963,18 @@ uint oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
uint pwr_mode, smart_ps;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_rw = 0;
*poid_par_priv->bytes_needed = 8;
if (poid_par_priv->information_buf_len < 8)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pwr_mode = *(uint *)(poid_par_priv->information_buf);
smart_ps = *(uint *)((addr_t)poid_par_priv->information_buf + 4);
if (pwr_mode != Adapter->pwrctrlpriv.pwr_mode || smart_ps !=
Adapter->pwrctrlpriv.smart_ps)
r8712_set_ps_mode(Adapter, pwr_mode, smart_ps);
*poid_par_priv->bytes_rw = 8;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv
@@ -981,20 +982,20 @@ uint oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct setratable_parm *prate_table;
u8 res;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_needed = sizeof(struct setratable_parm);
if (poid_par_priv->information_buf_len <
sizeof(struct setratable_parm))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
prate_table = (struct setratable_parm *)poid_par_priv->information_buf;
res = r8712_setrttbl_cmd(Adapter, prate_table);
if (res == _FAIL)
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
return status;
}
@@ -1002,8 +1003,8 @@ uint oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv
*poid_par_priv)
{
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv
@@ -1016,7 +1017,7 @@ uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv
*poid_par_priv->bytes_needed = sizeof(u8);
if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
if (poid_par_priv->type_of_oid == SET_OID) {
encry_mode = *((u8 *)poid_par_priv->information_buf);
@@ -1054,7 +1055,7 @@ uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv
*(u8 *)poid_par_priv->information_buf = encry_mode;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
}
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
/*----------------------------------------------------------------------*/
uint oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
@@ -1062,24 +1063,24 @@ uint oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct sta_info *psta = NULL;
u8 *macaddr;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_needed = ETH_ALEN;
if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
macaddr = (u8 *) poid_par_priv->information_buf;
psta = r8712_get_stainfo(&Adapter->stapriv, macaddr);
if (psta == NULL) { /* the sta in sta_info_queue => do nothing*/
psta = r8712_alloc_stainfo(&Adapter->stapriv, macaddr);
if (psta == NULL)
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
}
return status;
}
@@ -1090,18 +1091,18 @@ uint oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
unsigned long irqL;
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct sta_info *psta = NULL;
u8 *macaddr;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_needed = ETH_ALEN;
if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
macaddr = (u8 *)poid_par_priv->information_buf;
@@ -1125,15 +1126,15 @@ uint oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct DR_VARIABLE_STRUCT *pdrv_var;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
*poid_par_priv->bytes_needed = sizeof(struct DR_VARIABLE_STRUCT);
if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pdrv_var = (struct DR_VARIABLE_STRUCT *)poid_par_priv->information_buf;
pdrv_var->variable = mp_query_drv_var(Adapter, pdrv_var->offset,
pdrv_var->variable);
@@ -1144,7 +1145,7 @@ uint oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
/*--------------------------------------------------------------------------*/
uint oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
{
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
/*------------------------------------------------------------------------*/
uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
@@ -1152,17 +1153,17 @@ uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct EFUSE_ACCESS_STRUCT *pefuse;
u8 *data;
u16 addr = 0, cnts = 0;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len <
sizeof(struct EFUSE_ACCESS_STRUCT))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
pefuse = (struct EFUSE_ACCESS_STRUCT *)poid_par_priv->information_buf;
addr = pefuse->start_addr;
cnts = pefuse->cnts;
@@ -1170,9 +1171,9 @@ uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
memset(data, 0xFF, cnts);
if ((addr > 511) || (cnts < 1) || (cnts > 512) || (addr + cnts) >
EFUSE_MAX_SIZE)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (r8712_efuse_access(Adapter, true, addr, cnts, data) == false)
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
return status;
}
@@ -1182,14 +1183,14 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct EFUSE_ACCESS_STRUCT *pefuse;
u8 *data;
u16 addr = 0, cnts = 0;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
pefuse = (struct EFUSE_ACCESS_STRUCT *)poid_par_priv->information_buf;
addr = pefuse->start_addr;
@@ -1198,9 +1199,9 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
if ((addr > 511) || (cnts < 1) || (cnts > 512) ||
(addr + cnts) > r8712_efuse_get_max_size(Adapter))
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (r8712_efuse_access(Adapter, false, addr, cnts, data) == false)
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
return status;
}
/*----------------------------------------------------------------------*/
@@ -1208,12 +1209,12 @@ uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct PGPKT_STRUCT *ppgpkt;
*poid_par_priv->bytes_rw = 0;
if (poid_par_priv->information_buf_len < sizeof(struct PGPKT_STRUCT))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
ppgpkt = (struct PGPKT_STRUCT *)poid_par_priv->information_buf;
if (poid_par_priv->type_of_oid == QUERY_OID) {
if (r8712_efuse_pg_packet_read(Adapter, ppgpkt->offset,
@@ -1221,7 +1222,7 @@ uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
*poid_par_priv->bytes_rw =
poid_par_priv->information_buf_len;
else
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
} else {
if (r8712_efuse_reg_init(Adapter) == true) {
if (r8712_efuse_pg_packet_write(Adapter, ppgpkt->offset,
@@ -1229,10 +1230,10 @@ uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
*poid_par_priv->bytes_rw =
poid_par_priv->information_buf_len;
else
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
r8712_efuse_reg_uninit(Adapter);
} else
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
}
return status;
}
@@ -1242,12 +1243,12 @@ uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(int))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
r8712_efuse_reg_init(Adapter);
*(int *)poid_par_priv->information_buf =
r8712_efuse_get_current_size(Adapter);
@@ -1260,12 +1261,12 @@ uint oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
*(int *)poid_par_priv->information_buf =
r8712_efuse_get_max_size(Adapter);
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
@@ -1274,7 +1275,7 @@ uint oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid == QUERY_OID)
status = oid_rt_pro_read_efuse_hdl(poid_par_priv);
@@ -1287,18 +1288,18 @@ uint oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u8 *data;
*poid_par_priv->bytes_rw = 0;
if (poid_par_priv->information_buf_len < EFUSE_MAP_MAX_SIZE)
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
data = (u8 *)poid_par_priv->information_buf;
if (poid_par_priv->type_of_oid == QUERY_OID) {
if (r8712_efuse_map_read(Adapter, 0, EFUSE_MAP_MAX_SIZE, data))
*poid_par_priv->bytes_rw = EFUSE_MAP_MAX_SIZE;
else
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
} else {
/* SET_OID */
if (r8712_efuse_reg_init(Adapter) == true) {
@@ -1306,10 +1307,10 @@ uint oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
EFUSE_MAP_MAX_SIZE, data))
*poid_par_priv->bytes_rw = EFUSE_MAP_MAX_SIZE;
else
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
r8712_efuse_reg_uninit(Adapter);
} else {
- status = NDIS_STATUS_FAILURE;
+ status = RNDIS_STATUS_FAILURE;
}
}
return status;
@@ -1319,13 +1320,13 @@ uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 bandwidth;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
bandwidth = *((u32 *)poid_par_priv->information_buf);/*4*/
if (bandwidth != HT_CHANNEL_WIDTH_20)
bandwidth = HT_CHANNEL_WIDTH_40;
@@ -1338,16 +1339,16 @@ uint oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
u32 crystal_cap = 0;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
crystal_cap = *((u32 *)poid_par_priv->information_buf);/*4*/
if (crystal_cap > 0xf)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
Adapter->mppriv.curr_crystalcap = crystal_cap;
r8712_SetCrystalCap(Adapter);
return status;
@@ -1362,9 +1363,9 @@ uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
u32 rcr_val32;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/*4*/
rcr_val32 = r8712_read32(Adapter, RCR);/*RCR = 0x10250048*/
rcr_val32 &= ~(RCR_CBSSID | RCR_AB | RCR_AM | RCR_APM | RCR_AAP);
@@ -1391,7 +1392,7 @@ uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
else
Adapter->mppriv.check_mp_pkt = 0;
r8712_write32(Adapter, RCR, rcr_val32);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv
@@ -1402,12 +1403,12 @@ uint oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv
u32 txagc;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
txagc = *(u32 *)poid_par_priv->information_buf;
r8712_SetTxAGCOffset(Adapter, txagc);
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
uint oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv
@@ -1415,16 +1416,16 @@ uint oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = NDIS_STATUS_SUCCESS;
+ uint status = RNDIS_STATUS_SUCCESS;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct mp_priv *pmppriv = &Adapter->mppriv;
u32 type;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
type = *(u32 *)poid_par_priv->information_buf;
@@ -1435,7 +1436,7 @@ uint oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv
pmppriv->mode = type;
_clr_fwstate_(pmlmepriv, WIFI_MP_LPBK_STATE);
} else
- status = NDIS_STATUS_NOT_ACCEPTED;
+ status = RNDIS_STATUS_NOT_ACCEPTED;
return status;
}
/*--------------------------------------------------------------------------*/
@@ -1450,10 +1451,10 @@ uint oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
u8 bpwrup;
if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
bpwrup = *(u8 *)poid_par_priv->information_buf;
/*CALL the power_down function*/
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
/*-------------------------------------------------------------------------- */
@@ -1463,11 +1464,11 @@ uint oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv)
(poid_par_priv->adapter_context);
if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_NOT_ACCEPTED;
if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
*(int *)poid_par_priv->information_buf =
Adapter->registrypriv.low_power ? POWER_LOW : POWER_NORMAL;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- return NDIS_STATUS_SUCCESS;
+ return RNDIS_STATUS_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index a13395fe21d7..c732aeab8d2c 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -207,9 +207,9 @@ void seccalctkipmic(
u8 *Miccode,
u8 priority);
-void r8712_secmicsetkey(struct mic_data *pmicdata, u8 * key);
-void r8712_secmicappend(struct mic_data *pmicdata, u8 * src, u32 nBytes);
-void r8712_secgetmic(struct mic_data *pmicdata, u8 * dst);
+void r8712_secmicsetkey(struct mic_data *pmicdata, u8 *key);
+void r8712_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
+void r8712_secgetmic(struct mic_data *pmicdata, u8 *dst);
u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe);
u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe);
void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe);
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index f8016e9abffd..c4e0ef2f52c6 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -140,7 +140,7 @@ void r8712_free_all_stainfo(struct _adapter *padapter);
struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
void r8712_init_bcmc_stainfo(struct _adapter *padapter);
struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter);
-u8 r8712_access_ctrl(struct wlan_acl_pool *pacl_list, u8 * mac_addr);
+u8 r8712_access_ctrl(struct wlan_acl_pool *pacl_list, u8 *mac_addr);
#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 6b73843e580a..c812d6c7dc31 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
/* Belkin */
{USB_DEVICE(0x050D, 0x945A)},
+ /* ISY IWL - Belkin clone */
+ {USB_DEVICE(0x050D, 0x11F1)},
/* Corega */
{USB_DEVICE(0x07AA, 0x0047)},
/* D-Link */
@@ -203,9 +205,9 @@ static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
- printk(KERN_INFO "r8712: suspending...\n");
+ netdev_info(pnetdev, "Suspending...\n");
if (!pnetdev || !netif_running(pnetdev)) {
- printk(KERN_INFO "r8712: unable to suspend\n");
+ netdev_info(pnetdev, "Unable to suspend\n");
return 0;
}
if (pnetdev->netdev_ops->ndo_stop)
@@ -219,9 +221,9 @@ static int r871x_resume(struct usb_interface *pusb_intf)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
- printk(KERN_INFO "r8712: resuming...\n");
+ netdev_info(pnetdev, "Resuming...\n");
if (!pnetdev || !netif_running(pnetdev)) {
- printk(KERN_INFO "r8712: unable to resume\n");
+ netdev_info(pnetdev, "Unable to resume\n");
return 0;
}
netif_device_attach(pnetdev);
@@ -271,12 +273,12 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
if (pusbd->speed == USB_SPEED_HIGH) {
pdvobjpriv->ishighspeed = true;
- printk(KERN_INFO "r8712u: USB_SPEED_HIGH with %d endpoints\n",
- pdvobjpriv->nr_endpoint);
+ dev_info(&pusbd->dev, "r8712u: USB_SPEED_HIGH with %d endpoints\n",
+ pdvobjpriv->nr_endpoint);
} else {
pdvobjpriv->ishighspeed = false;
- printk(KERN_INFO "r8712u: USB_SPEED_LOW with %d endpoints\n",
- pdvobjpriv->nr_endpoint);
+ dev_info(&pusbd->dev, "r8712u: USB_SPEED_LOW with %d endpoints\n",
+ pdvobjpriv->nr_endpoint);
}
if ((r8712_alloc_io_queue(padapter)) == _FAIL)
status = _FAIL;
@@ -421,9 +423,9 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
tmpU1b = r8712_read8(padapter, EE_9346CR);/*CR9346*/
/* To check system boot selection.*/
- printk(KERN_INFO "r8712u: Boot from %s: Autoload %s\n",
- (tmpU1b & _9356SEL) ? "EEPROM" : "EFUSE",
- (tmpU1b & _EEPROM_EN) ? "OK" : "Failed");
+ dev_info(&udev->dev, "r8712u: Boot from %s: Autoload %s\n",
+ (tmpU1b & _9356SEL) ? "EEPROM" : "EFUSE",
+ (tmpU1b & _EEPROM_EN) ? "OK" : "Failed");
/* To check autoload success or not.*/
if (tmpU1b & _EEPROM_EN) {
@@ -531,8 +533,8 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
RT_CID_DEFAULT;
break;
}
- printk(KERN_INFO "r8712u: CustomerID = 0x%.4x\n",
- padapter->eeprompriv.CustomerID);
+ dev_info(&udev->dev, "r8712u: CustomerID = 0x%.4x\n",
+ padapter->eeprompriv.CustomerID);
/* Led mode */
switch (padapter->eeprompriv.CustomerID) {
case RT_CID_DEFAULT:
@@ -588,11 +590,9 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
* address by setting bit 1 of first octet.
*/
mac[0] &= 0xFE;
- printk(KERN_INFO "r8712u: MAC Address from user = "
- "%pM\n", mac);
+ dev_info(&udev->dev, "r8712u: MAC Address from user = %pM\n", mac);
} else
- printk(KERN_INFO "r8712u: MAC Address from efuse = "
- "%pM\n", mac);
+ dev_info(&udev->dev, "r8712u: MAC Address from efuse = %pM\n", mac);
memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
}
/* step 6. Load the firmware asynchronously */
@@ -659,7 +659,6 @@ static void __exit r8712u_drv_halt(void)
{
drvpriv.drv_registered = false;
usb_deregister(&drvpriv.r871xu_drv);
- printk(KERN_INFO "r8712u: Driver unloaded\n");
}
module_init(r8712u_drv_entry);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 24e1ec5f0060..dca398a0656b 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -45,9 +45,6 @@ struct zero_bulkout_context {
void *padapter;
};
-#define usb_write_cmd r8712_usb_write_mem
-#define usb_write_cmd_complete usb_write_mem_complete
-
uint r8712_usb_init_intf_priv(struct intf_priv *pintfpriv)
{
pintfpriv->piorw_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -243,8 +240,7 @@ static void r8712_usb_read_port_complete(struct urb *purb)
(unsigned char *)precvbuf);
break;
case -EINPROGRESS:
- printk(KERN_ERR "r8712u: ERROR: URB IS IN"
- " PROGRESS!/n");
+ netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n");
break;
default:
break;
@@ -336,8 +332,7 @@ void r8712_xmit_bh(void *priv)
if ((padapter->bDriverStopped == true) ||
(padapter->bSurpriseRemoved == true)) {
- printk(KERN_ERR "r8712u: xmit_bh => bDriverStopped"
- " or bSurpriseRemoved\n");
+ netdev_err(padapter->pnetdev, "xmit_bh => bDriverStopped or bSurpriseRemoved\n");
return;
}
ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL);
@@ -387,7 +382,7 @@ static void usb_write_port_complete(struct urb *purb)
case 0:
break;
default:
- printk(KERN_WARNING "r8712u: pipe error: (%d)\n", purb->status);
+ netdev_warn(padapter->pnetdev, "r8712u: pipe error: (%d)\n", purb->status);
break;
}
/* not to consider tx fragment */
@@ -502,8 +497,8 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
palloc_buf = _malloc((u32) len + 16);
if (palloc_buf == NULL) {
- printk(KERN_ERR "r8712u: [%s] Can't alloc memory for vendor"
- " request\n", __func__);
+ dev_err(&udev->dev, "%s: Can't alloc memory for vendor request\n",
+ __func__);
return -1;
}
pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 793443e758ac..73d7cd280607 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -159,99 +159,85 @@ enum WIFI_REG_DOMAIN {
#define _PRIVACY_ BIT(14)
#define _ORDER_ BIT(15)
-#define SetToDs(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_TO_DS_); \
- } while (0)
+#define SetToDs(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_TO_DS_); \
+})
#define GetToDs(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_TO_DS_)) != 0)
-#define ClearToDs(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_TO_DS_)); \
- } while (0)
+#define ClearToDs(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_TO_DS_)); \
+})
-#define SetFrDs(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_FROM_DS_); \
- } while (0)
+#define SetFrDs(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_FROM_DS_); \
+})
#define GetFrDs(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_FROM_DS_)) != 0)
-#define ClearFrDs(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_FROM_DS_)); \
- } while (0)
+#define ClearFrDs(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_FROM_DS_)); \
+})
#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
-#define SetMFrag(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_MORE_FRAG_); \
- } while (0)
+#define SetMFrag(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_MORE_FRAG_); \
+})
#define GetMFrag(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_MORE_FRAG_)) != 0)
-#define ClearMFrag(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_)); \
- } while (0)
+#define ClearMFrag(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_)); \
+})
-#define SetRetry(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_RETRY_); \
- } while (0)
+#define SetRetry(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_RETRY_); \
+})
#define GetRetry(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_RETRY_)) != 0)
-#define ClearRetry(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_RETRY_)); \
- } while (0)
+#define ClearRetry(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_RETRY_)); \
+})
-#define SetPwrMgt(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_PWRMGT_); \
- } while (0)
+#define SetPwrMgt(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_PWRMGT_); \
+})
#define GetPwrMgt(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_PWRMGT_)) != 0)
-#define ClearPwrMgt(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_PWRMGT_)); \
- } while (0)
+#define ClearPwrMgt(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_PWRMGT_)); \
+})
-#define SetMData(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_MORE_DATA_); \
- } while (0)
+#define SetMData(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_MORE_DATA_); \
+})
#define GetMData(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_MORE_DATA_)) != 0)
-#define ClearMData(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_)); \
- } while (0)
+#define ClearMData(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_)); \
+})
-#define SetPrivacy(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_PRIVACY_); \
- } while (0)
+#define SetPrivacy(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_PRIVACY_); \
+})
#define GetPrivacy(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_PRIVACY_)) != 0)
-#define ClearPrivacy(pbuf) \
- do { \
- *(unsigned short *)(pbuf) &= (~cpu_to_le16(_PRIVACY_)); \
- } while (0)
+#define ClearPrivacy(pbuf) ({ \
+ *(unsigned short *)(pbuf) &= (~cpu_to_le16(_PRIVACY_)); \
+})
#define GetOrder(pbuf) (((*(unsigned short *)(pbuf)) & \
@@ -287,48 +273,42 @@ enum WIFI_REG_DOMAIN {
#define GetTupleCache(pbuf) (cpu_to_le16(*(unsigned short *)\
((addr_t)(pbuf) + 22)))
-#define SetFragNum(pbuf, num) \
- do { \
- *(unsigned short *)((addr_t)(pbuf) + 22) = \
- ((*(unsigned short *)((addr_t)(pbuf) + 22)) & \
- le16_to_cpu(~(0x000f))) | \
- cpu_to_le16(0x0f & (num)); \
- } while (0)
-
-#define SetSeqNum(pbuf, num) \
- do { \
- *(unsigned short *)((addr_t)(pbuf) + 22) = \
- ((*(unsigned short *)((addr_t)(pbuf) + 22)) & \
- le16_to_cpu((unsigned short)0x000f)) | \
- le16_to_cpu((unsigned short)(0xfff0 & (num << 4))); \
- } while (0)
-
-#define SetDuration(pbuf, dur) \
- do { \
- *(unsigned short *)((addr_t)(pbuf) + 2) |= \
- cpu_to_le16(0xffff & (dur)); \
- } while (0)
-
-#define SetPriority(pbuf, tid) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(tid & 0xf); \
- } while (0)
+#define SetFragNum(pbuf, num) ({ \
+ *(unsigned short *)((addr_t)(pbuf) + 22) = \
+ ((*(unsigned short *)((addr_t)(pbuf) + 22)) & \
+ le16_to_cpu(~(0x000f))) | \
+ cpu_to_le16(0x0f & (num)); \
+})
+
+#define SetSeqNum(pbuf, num) ({ \
+ *(unsigned short *)((addr_t)(pbuf) + 22) = \
+ ((*(unsigned short *)((addr_t)(pbuf) + 22)) & \
+ le16_to_cpu((unsigned short)0x000f)) | \
+ le16_to_cpu((unsigned short)(0xfff0 & (num << 4))); \
+})
+
+#define SetDuration(pbuf, dur) ({ \
+ *(unsigned short *)((addr_t)(pbuf) + 2) |= \
+ cpu_to_le16(0xffff & (dur)); \
+})
+
+#define SetPriority(pbuf, tid) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(tid & 0xf); \
+})
#define GetPriority(pbuf) ((le16_to_cpu(*(unsigned short *)(pbuf))) & 0xf)
-#define SetAckpolicy(pbuf, ack) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16((ack & 3) << 5); \
- } while (0)
+#define SetAckpolicy(pbuf, ack) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16((ack & 3) << 5); \
+})
#define GetAckpolicy(pbuf) (((le16_to_cpu(*(unsigned short *)pbuf)) >> 5) & 0x3)
#define GetAMsdu(pbuf) (((le16_to_cpu(*(unsigned short *)pbuf)) >> 7) & 0x1)
-#define SetAMsdu(pbuf, amsdu) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7); \
- } while (0)
+#define SetAMsdu(pbuf, amsdu) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7); \
+})
#define GetAid(pbuf) (cpu_to_le16(*(unsigned short *)((addr_t)(pbuf) + 2)) \
& 0x3fff)
@@ -457,11 +437,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _SSID_IE_ 0
#define _SUPPORTEDRATES_IE_ 1
#define _DSSET_IE_ 3
-#define _TIM_IE_ 5
#define _IBSS_PARA_IE_ 6
-#define _CHLGETXT_IE_ 16
-#define _RSN_IE_2_ 48`
-#define _SSN_IE_1_ 221
#define _ERPINFO_IE_ 42
#define _EXT_SUPPORTEDRATES_IE_ 50
@@ -526,10 +502,9 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-#define SetOrderBit(pbuf) \
- do { \
- *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \
- } while (0)
+#define SetOrderBit(pbuf) ({ \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \
+})
#define GetOrderBit(pbuf) (((*(unsigned short *)(pbuf)) & \
le16_to_cpu(_ORDER_)) != 0)
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 65542cb7168f..4d22bb7008f8 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -134,8 +134,7 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter,
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
if (pxmitbuf->pxmit_urb[i] == NULL) {
- printk(KERN_ERR "r8712u: pxmitbuf->pxmit_urb[i]"
- " == NULL");
+ netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
return _FAIL;
}
}
diff --git a/drivers/staging/sb105x/Kconfig b/drivers/staging/sb105x/Kconfig
index ac87c5e38dee..245e7847a354 100644
--- a/drivers/staging/sb105x/Kconfig
+++ b/drivers/staging/sb105x/Kconfig
@@ -1,7 +1,7 @@
config SB105X
tristate "SystemBase PCI Multiport UART"
select SERIAL_CORE
- depends on PCI
+ depends on PCI && X86 && TTY && BROKEN
help
A driver for the SystemBase Multi-2/PCI serial card
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
index 5480ae11368f..a2087f5b0d1a 100644
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ b/drivers/staging/sb105x/sb_mp_register.h
@@ -45,7 +45,7 @@
#define IIR_RS232 0x00 /* RS232 type */
#define IIR_RS422 0x10 /* RS422 type */
#define IIR_RS485 0x20 /* RS485 type */
-#define IIR_UNKNOWN 0x30 /* unknown type */
+#define IIR_TYPE_MASK 0x30
/* Interrrupt Mask Register */
#define MP_OPTR_IMR0 0x0C /* port0 ~ port8 */
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index edb2a85b9d52..f75ee1dd475c 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1563,13 +1563,13 @@ static int mp_open(struct tty_struct *tty, struct file *filp)
state = uart_get(drv, line);
- mtpt = (struct mp_port *)state->port;
-
if (IS_ERR(state)) {
retval = PTR_ERR(state);
goto fail;
}
+ mtpt = (struct mp_port *)state->port;
+
tty->driver_data = state;
tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty->alt_speed = 0;
@@ -2851,18 +2851,12 @@ static void __init multi_init_ports(void)
printk("IIR_RET = %x\n",b_ret);
}
- if(IIR_RS232 == (b_ret & IIR_RS232))
- {
- mtpt->interface = RS232;
- }
- if(IIR_RS422 == (b_ret & IIR_RS422))
- {
+ /* default to RS232 */
+ mtpt->interface = RS232;
+ if (IIR_RS422 == (b_ret & IIR_TYPE_MASK))
mtpt->interface = RS422PTP;
- }
- if(IIR_RS485 == (b_ret & IIR_RS485))
- {
+ if (IIR_RS485 == (b_ret & IIR_TYPE_MASK))
mtpt->interface = RS485NE;
- }
}
}
}
@@ -3054,6 +3048,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
}
break;
+#ifdef CONFIG_PARPORT_PC
case PCI_DEVICE_ID_MP2S1P :
sbdev->nr_ports = 2;
@@ -3073,6 +3068,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
/* add PC compatible parallel port */
parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
break;
+#endif
}
ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
diff --git a/drivers/staging/sbe-2t3e3/dc.c b/drivers/staging/sbe-2t3e3/dc.c
index daadd6ea4978..f207b9e015ce 100644
--- a/drivers/staging/sbe-2t3e3/dc.c
+++ b/drivers/staging/sbe-2t3e3/dc.c
@@ -315,20 +315,17 @@ static int dc_init_descriptor_list(struct channel *sc)
struct sk_buff *m;
if (sc->ether.rx_ring == NULL)
- sc->ether.rx_ring = kzalloc(SBE_2T3E3_RX_DESC_RING_SIZE *
+ sc->ether.rx_ring = kcalloc(SBE_2T3E3_RX_DESC_RING_SIZE,
sizeof(t3e3_rx_desc_t), GFP_KERNEL);
- if (sc->ether.rx_ring == NULL) {
- dev_err(&sc->pdev->dev, "SBE 2T3E3: no buffer space for RX ring\n");
+ if (sc->ether.rx_ring == NULL)
return -ENOMEM;
- }
if (sc->ether.tx_ring == NULL)
- sc->ether.tx_ring = kzalloc(SBE_2T3E3_TX_DESC_RING_SIZE *
+ sc->ether.tx_ring = kcalloc(SBE_2T3E3_TX_DESC_RING_SIZE,
sizeof(t3e3_tx_desc_t), GFP_KERNEL);
if (sc->ether.tx_ring == NULL) {
kfree(sc->ether.rx_ring);
sc->ether.rx_ring = NULL;
- dev_err(&sc->pdev->dev, "SBE 2T3E3: no buffer space for RX ring\n");
return -ENOMEM;
}
diff --git a/drivers/staging/sbe-2t3e3/module.c b/drivers/staging/sbe-2t3e3/module.c
index ae7af397a992..0e32be5c2471 100644
--- a/drivers/staging/sbe-2t3e3/module.c
+++ b/drivers/staging/sbe-2t3e3/module.c
@@ -154,11 +154,10 @@ static int t3e3_init_card(struct pci_dev *pdev, const struct pci_device_id *ent)
/* holds the reference for pdev1 */
}
- card = kzalloc(sizeof(struct card) + channels * sizeof(struct channel), GFP_KERNEL);
- if (!card) {
- dev_err(&pdev->dev, "Out of memory\n");
+ card = kzalloc(sizeof(struct card) + channels * sizeof(struct channel),
+ GFP_KERNEL);
+ if (!card)
return -ENOBUFS;
- }
spin_lock_init(&card->bootrom_lock);
card->bootrom_addr = pci_resource_start(pdev, 0);
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index 34710ce56004..cd3bb39e4255 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -178,11 +178,9 @@ static struct scatterlist *sep_alloc_sg_buf(
nbr_pages += 1;
}
- sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
- if (!sg) {
- dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
+ sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!sg)
return NULL;
- }
sg_init_table(sg, nbr_pages);
@@ -3908,13 +3906,9 @@ int sep_crypto_setup(void)
return -ENOMEM;
}
- i = 0;
- j = 0;
-
spin_lock_init(&queue_lock);
err = 0;
-
for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
err = crypto_register_ahash(&hash_algs[i]);
if (err)
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 15c6e3d9437c..30e8d25113e4 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -219,12 +219,8 @@ static int sep_allocate_dmatables_region(struct sep_device *sep,
dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
dma_ctx->dmatables_len);
tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
- if (!tmp_region) {
- dev_warn(&sep->pdev->dev,
- "[PID%d] no mem for dma tables region\n",
- current->pid);
+ if (!tmp_region)
return -ENOMEM;
- }
/* Were there any previous tables that need to be preserved ? */
if (*dmatables_region) {
@@ -1245,27 +1241,23 @@ static int sep_lock_user_pages(struct sep_device *sep,
current->pid, num_pages);
/* Allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ page_array = kmalloc_array(num_pages, sizeof(struct page *),
+ GFP_ATOMIC);
if (!page_array) {
error = -ENOMEM;
goto end_function;
}
- map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+
+ map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
+ GFP_ATOMIC);
if (!map_array) {
- dev_warn(&sep->pdev->dev,
- "[PID%d] kmalloc for map_array failed\n",
- current->pid);
error = -ENOMEM;
goto end_function_with_error1;
}
- lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
- GFP_ATOMIC);
-
+ lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
+ GFP_ATOMIC);
if (!lli_array) {
- dev_warn(&sep->pdev->dev,
- "[PID%d] kmalloc for lli_array failed\n",
- current->pid);
error = -ENOMEM;
goto end_function_with_error2;
}
@@ -1448,15 +1440,10 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
current->pid, num_pages);
- lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
- GFP_ATOMIC);
-
- if (!lli_array) {
- dev_warn(&sep->pdev->dev,
- "[PID%d] kmalloc for lli_array failed\n",
- current->pid);
+ lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
+ GFP_ATOMIC);
+ if (!lli_array)
return -ENOMEM;
- }
/*
* Fill the lli_array
@@ -3419,11 +3406,9 @@ static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
goto end_function;
}
- dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
+ dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
GFP_KERNEL);
if (!dcb_args) {
- dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
- current->pid);
error = -ENOMEM;
goto end_function;
}
@@ -3610,9 +3595,6 @@ static ssize_t sep_create_msgarea_context(struct sep_device *sep,
/* Allocate thread-specific memory for message buffer */
*msg_region = kzalloc(msg_len, GFP_KERNEL);
if (!(*msg_region)) {
- dev_warn(&sep->pdev->dev,
- "[PID%d] no mem for msgarea context\n",
- current->pid);
error = -ENOMEM;
goto end_function;
}
@@ -4133,8 +4115,6 @@ static int sep_probe(struct pci_dev *pdev,
/* Allocate the sep_device structure for this device */
sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
if (sep_dev == NULL) {
- dev_warn(&pdev->dev,
- "can't kmalloc the sep_device structure\n");
error = -ENOMEM;
goto end_function_disable_device;
}
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 1b3e995d3a27..b1bb1a6abe81 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -255,12 +255,11 @@ static void ProcessModemStatus(struct quatech_port *qt_port,
wake_up_interruptible(&qt_port->wait);
}
-static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
- unsigned char data)
+static void ProcessRxChar(struct usb_serial_port *port, unsigned char data)
{
struct urb *urb = port->read_urb;
if (urb->actual_length)
- tty_insert_flip_char(tty, data, TTY_NORMAL);
+ tty_insert_flip_char(&port->port, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -291,8 +290,7 @@ static void qt_interrupt_callback(struct urb *urb)
/* FIXME */
}
-static void qt_status_change_check(struct tty_struct *tty,
- struct urb *urb,
+static void qt_status_change_check(struct urb *urb,
struct quatech_port *qt_port,
struct usb_serial_port *port)
{
@@ -335,8 +333,8 @@ static void qt_status_change_check(struct tty_struct *tty,
case 0xff:
dev_dbg(&port->dev, "No status sequence.\n");
- ProcessRxChar(tty, port, data[i]);
- ProcessRxChar(tty, port, data[i + 1]);
+ ProcessRxChar(port, data[i]);
+ ProcessRxChar(port, data[i + 1]);
i += 2;
break;
@@ -345,11 +343,11 @@ static void qt_status_change_check(struct tty_struct *tty,
continue;
}
- if (tty && urb->actual_length)
- tty_insert_flip_char(tty, data[i], TTY_NORMAL);
+ if (urb->actual_length)
+ tty_insert_flip_char(&port->port, data[i], TTY_NORMAL);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
static void qt_read_bulk_callback(struct urb *urb)
@@ -358,7 +356,6 @@ static void qt_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
- struct tty_struct *tty;
int result;
if (urb->status) {
@@ -369,27 +366,23 @@ static void qt_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
dev_dbg(&port->dev,
"%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
if (port_paranoia_check(port, __func__) != 0) {
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
if (!serial)
- goto exit;
+ return;
if (qt_port->closePending == 1) {
/* Were closing , stop reading */
dev_dbg(&port->dev,
"%s - (qt_port->closepending == 1\n", __func__);
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
/*
@@ -399,7 +392,7 @@ static void qt_read_bulk_callback(struct urb *urb)
*/
if (qt_port->RxHolding == 1) {
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
if (urb->status) {
@@ -408,11 +401,11 @@ static void qt_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev,
"%s - nonzero read bulk status received: %d\n",
__func__, urb->status);
- goto exit;
+ return;
}
if (urb->actual_length)
- qt_status_change_check(tty, urb, qt_port, port);
+ qt_status_change_check(urb, qt_port, port);
/* Continue trying to always read */
usb_fill_bulk_urb(port->read_urb, serial->dev,
@@ -428,14 +421,12 @@ static void qt_read_bulk_callback(struct urb *urb)
__func__, result);
else {
if (urb->actual_length) {
- tty_flip_buffer_push(tty);
- tty_schedule_flip(tty);
+ tty_flip_buffer_push(&port->port);
+ tty_schedule_flip(&port->port);
}
}
schedule_work(&port->work);
-exit:
- tty_kref_put(tty);
}
/*
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index ebdcc6f91fdc..4c7822bd5358 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -43,15 +43,15 @@
/* firmware stuff */
#define OASIS_UCODE_VERS_STRING "1.2"
-#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37"
-#define OASIS_UCODE_HOSTIF_ID 3
+#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37"
+#define OASIS_UCODE_HOSTIF_ID 3
#define MOJAVE_UCODE_VERS_STRING "1.2"
-#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22"
-#define MOJAVE_UCODE_HOSTIF_ID 3
+#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22"
+#define MOJAVE_UCODE_HOSTIF_ID 3
#define GB_RCVUCODE_VERS_STRING "1.2"
-#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15"
+#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15"
static u32 OasisRcvUCodeLen = 512;
static u32 GBRcvUCodeLen = 512;
#define SECTION_SIZE 65536
@@ -65,12 +65,12 @@ struct slic_spinlock {
#define SLIC_RSPQ_BUFSINPAGE (PAGE_SIZE / SLIC_RSPBUF_SIZE)
struct slic_rspqueue {
- u32 offset;
- u32 pageindex;
- u32 num_pages;
- struct slic_rspbuf *rspbuf;
- u32 *vaddr[SLIC_RSPQ_PAGES_GB];
- dma_addr_t paddr[SLIC_RSPQ_PAGES_GB];
+ u32 offset;
+ u32 pageindex;
+ u32 num_pages;
+ struct slic_rspbuf *rspbuf;
+ u32 *vaddr[SLIC_RSPQ_PAGES_GB];
+ dma_addr_t paddr[SLIC_RSPQ_PAGES_GB];
};
#define SLIC_RCVQ_EXPANSION 1
@@ -82,20 +82,20 @@ struct slic_rspqueue {
#define SLIC_RCVQ_FILLTHRESH (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES)
struct slic_rcvqueue {
- struct sk_buff *head;
- struct sk_buff *tail;
- u32 count;
- u32 size;
- u32 errors;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ u32 count;
+ u32 size;
+ u32 errors;
};
struct slic_rcvbuf_info {
- u32 id;
- u32 starttime;
- u32 stoptime;
- u32 slicworld;
- u32 lasttime;
- u32 lastid;
+ u32 id;
+ u32 starttime;
+ u32 stoptime;
+ u32 slicworld;
+ u32 lasttime;
+ u32 lastid;
};
/*
SLIC Handle structure. Used to restrict handle values to
@@ -113,12 +113,12 @@ struct slic_handle_word {
};
struct slic_handle {
- struct slic_handle_word token; /* token passed between host and card*/
- ushort type;
- void *address; /* actual address of the object*/
- ushort offset;
- struct slic_handle *other_handle;
- struct slic_handle *next;
+ struct slic_handle_word token; /* token passed between host and card*/
+ ushort type;
+ void *address; /* actual address of the object*/
+ ushort offset;
+ struct slic_handle *other_handle;
+ struct slic_handle *next;
};
#define SLIC_HANDLE_FREE 0x0000
@@ -134,17 +134,17 @@ struct slic_handle {
#define SLIC_HOSTCMD_SIZE 512
struct slic_hostcmd {
- struct slic_host64_cmd cmd64;
- u32 type;
- struct sk_buff *skb;
- u32 paddrl;
- u32 paddrh;
- u32 busy;
- u32 cmdsize;
- ushort numbufs;
- struct slic_handle *pslic_handle;/* handle associated with command */
- struct slic_hostcmd *next;
- struct slic_hostcmd *next_all;
+ struct slic_host64_cmd cmd64;
+ u32 type;
+ struct sk_buff *skb;
+ u32 paddrl;
+ u32 paddrh;
+ u32 busy;
+ u32 cmdsize;
+ ushort numbufs;
+ struct slic_handle *pslic_handle;/* handle associated with command */
+ struct slic_hostcmd *next;
+ struct slic_hostcmd *next_all;
};
#define SLIC_CMDQ_CMDSINPAGE (PAGE_SIZE / SLIC_HOSTCMD_SIZE)
@@ -228,35 +228,35 @@ struct mcast_address {
#define SLIC_CARD_STATE(x) ((x == CARD_UP) ? "UP" : "Down")
struct slic_iface_stats {
- /*
- * Stats
- */
- u64 xmt_bytes;
- u64 xmt_ucast;
- u64 xmt_mcast;
- u64 xmt_bcast;
- u64 xmt_errors;
- u64 xmt_discards;
- u64 xmit_collisions;
- u64 xmit_excess_xmit_collisions;
- u64 rcv_bytes;
- u64 rcv_ucast;
- u64 rcv_mcast;
- u64 rcv_bcast;
- u64 rcv_errors;
- u64 rcv_discards;
+ /*
+ * Stats
+ */
+ u64 xmt_bytes;
+ u64 xmt_ucast;
+ u64 xmt_mcast;
+ u64 xmt_bcast;
+ u64 xmt_errors;
+ u64 xmt_discards;
+ u64 xmit_collisions;
+ u64 xmit_excess_xmit_collisions;
+ u64 rcv_bytes;
+ u64 rcv_ucast;
+ u64 rcv_mcast;
+ u64 rcv_bcast;
+ u64 rcv_errors;
+ u64 rcv_discards;
};
struct sliccp_stats {
- u64 xmit_tcp_segs;
- u64 xmit_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_tcp_bytes;
+ u64 xmit_tcp_segs;
+ u64 xmit_tcp_bytes;
+ u64 rcv_tcp_segs;
+ u64 rcv_tcp_bytes;
};
struct slicnet_stats {
- struct sliccp_stats tcp;
- struct slic_iface_stats iface;
+ struct sliccp_stats tcp;
+ struct slic_iface_stats iface;
};
#define SLIC_LOADTIMER_PERIOD 1
@@ -285,51 +285,51 @@ struct slicnet_stats {
#define SLIC_INTAGG_5GB 100
struct ether_header {
- unsigned char ether_dhost[6];
- unsigned char ether_shost[6];
- ushort ether_type;
+ unsigned char ether_dhost[6];
+ unsigned char ether_shost[6];
+ ushort ether_type;
};
struct sliccard {
- uint busnumber;
- uint slotnumber;
- uint state;
- uint cardnum;
- uint card_size;
- uint adapters_activated;
- uint adapters_allocated;
- uint adapters_sleeping;
- uint gennumber;
- u32 events;
- u32 loadlevel_current;
- u32 load;
- uint reset_in_progress;
- u32 pingstatus;
- u32 bad_pingstatus;
- struct timer_list loadtimer;
- u32 loadtimerset;
- uint config_set;
- struct slic_config config;
- struct dentry *debugfs_dir;
- struct dentry *debugfs_cardinfo;
- struct adapter *master;
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct sliccard *next;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 tx_packets;
- u32 debug_ix;
- ushort reg_type[32];
- ushort reg_offset[32];
- u32 reg_value[32];
- u32 reg_valueh[32];
+ uint busnumber;
+ uint slotnumber;
+ uint state;
+ uint cardnum;
+ uint card_size;
+ uint adapters_activated;
+ uint adapters_allocated;
+ uint adapters_sleeping;
+ uint gennumber;
+ u32 events;
+ u32 loadlevel_current;
+ u32 load;
+ uint reset_in_progress;
+ u32 pingstatus;
+ u32 bad_pingstatus;
+ struct timer_list loadtimer;
+ u32 loadtimerset;
+ uint config_set;
+ struct slic_config config;
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_cardinfo;
+ struct adapter *master;
+ struct adapter *adapter[SLIC_MAX_PORTS];
+ struct sliccard *next;
+ u32 error_interrupts;
+ u32 error_rmiss_interrupts;
+ u32 rcv_interrupts;
+ u32 xmit_interrupts;
+ u32 num_isrs;
+ u32 false_interrupts;
+ u32 max_isr_rcvs;
+ u32 max_isr_xmits;
+ u32 rcv_interrupt_yields;
+ u32 tx_packets;
+ u32 debug_ix;
+ ushort reg_type[32];
+ ushort reg_offset[32];
+ u32 reg_value[32];
+ u32 reg_valueh[32];
};
#define NUM_CFG_SPACES 2
@@ -337,182 +337,182 @@ struct sliccard {
#define NUM_CFG_REG_ULONGS (NUM_CFG_REGS / sizeof(u32))
struct physcard {
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct physcard *next;
- uint adapters_allocd;
+ struct adapter *adapter[SLIC_MAX_PORTS];
+ struct physcard *next;
+ uint adapters_allocd;
- /* the following is not currently needed
- u32 bridge_busnum;
- u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
- */
+/* the following is not currently needed
+ u32 bridge_busnum;
+ u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
+*/
};
struct base_driver {
- struct slic_spinlock driver_lock;
- u32 num_slic_cards;
- u32 num_slic_ports;
- u32 num_slic_ports_active;
- u32 dynamic_intagg;
- struct sliccard *slic_card;
- struct physcard *phys_card;
- uint cardnuminuse[SLIC_MAX_CARDS];
+ struct slic_spinlock driver_lock;
+ u32 num_slic_cards;
+ u32 num_slic_ports;
+ u32 num_slic_ports_active;
+ u32 dynamic_intagg;
+ struct sliccard *slic_card;
+ struct physcard *phys_card;
+ uint cardnuminuse[SLIC_MAX_CARDS];
};
struct slic_shmem {
- volatile u32 isr;
- volatile u32 linkstatus;
- volatile struct slic_stats inicstats;
+ volatile u32 isr;
+ volatile u32 linkstatus;
+ volatile struct slic_stats inicstats;
};
struct slic_reg_params {
- u32 linkspeed;
- u32 linkduplex;
- u32 fail_on_bad_eeprom;
+ u32 linkspeed;
+ u32 linkduplex;
+ u32 fail_on_bad_eeprom;
};
struct slic_upr {
- uint adapter;
- u32 upr_request;
- u32 upr_data;
- u32 upr_data_h;
- u32 upr_buffer;
- u32 upr_buffer_h;
- struct slic_upr *next;
+ uint adapter;
+ u32 upr_request;
+ u32 upr_data;
+ u32 upr_data_h;
+ u32 upr_buffer;
+ u32 upr_buffer_h;
+ struct slic_upr *next;
};
struct slic_ifevents {
- uint oflow802;
- uint uflow802;
- uint Tprtoflow;
- uint rcvearly;
- uint Bufov;
- uint Carre;
- uint Longe;
- uint Invp;
- uint Crc;
- uint Drbl;
- uint Code;
- uint IpHlen;
- uint IpLen;
- uint IpCsum;
- uint TpCsum;
- uint TpHlen;
+ uint oflow802;
+ uint uflow802;
+ uint Tprtoflow;
+ uint rcvearly;
+ uint Bufov;
+ uint Carre;
+ uint Longe;
+ uint Invp;
+ uint Crc;
+ uint Drbl;
+ uint Code;
+ uint IpHlen;
+ uint IpLen;
+ uint IpCsum;
+ uint TpCsum;
+ uint TpHlen;
};
struct adapter {
- void *ifp;
- struct sliccard *card;
- uint port;
- struct physcard *physcard;
- uint physport;
- uint cardindex;
- uint card_size;
- uint chipid;
- struct net_device *netdev;
- struct net_device *next_netdevice;
- struct slic_spinlock adapter_lock;
- struct slic_spinlock reset_lock;
- struct pci_dev *pcidev;
- uint busnumber;
- uint slotnumber;
- uint functionnumber;
- ushort vendid;
- ushort devid;
- ushort subsysid;
- u32 irq;
- void __iomem *memorybase;
- u32 memorylength;
- u32 drambase;
- u32 dramlength;
- uint queues_initialized;
- uint allocated;
- uint activated;
- u32 intrregistered;
- uint isp_initialized;
- uint gennumber;
- u32 curaddrupper;
- struct slic_shmem *pshmem;
- dma_addr_t phys_shmem;
- u32 isrcopy;
- __iomem struct slic_regs *slic_regs;
- unsigned char state;
- unsigned char linkstate;
- unsigned char linkspeed;
- unsigned char linkduplex;
- uint flags;
- unsigned char macaddr[6];
- unsigned char currmacaddr[6];
- u32 macopts;
- ushort devflags_prev;
- u64 mcastmask;
- struct mcast_address *mcastaddrs;
- struct slic_upr *upr_list;
- uint upr_busy;
- struct timer_list pingtimer;
- u32 pingtimerset;
- struct timer_list loadtimer;
- u32 loadtimerset;
- struct dentry *debugfs_entry;
- struct slic_spinlock upr_lock;
- struct slic_spinlock bit64reglock;
- struct slic_rspqueue rspqueue;
- struct slic_rcvqueue rcvqueue;
- struct slic_cmdqueue cmdq_free;
- struct slic_cmdqueue cmdq_done;
- struct slic_cmdqueue cmdq_all;
- struct slic_cmdqmem cmdqmem;
- /*
- * SLIC Handles
- */
- struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1]; /* Object handles*/
- struct slic_handle *pfree_slic_handles; /* Free object handles*/
- struct slic_spinlock handle_lock; /* Object handle list lock*/
- ushort slic_handle_ix;
-
- u32 xmitq_full;
- u32 all_reg_writes;
- u32 icr_reg_writes;
- u32 isr_reg_writes;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rx_errors;
- u32 rcv_drops;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 linkevent_interrupts;
- u32 upr_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 tx_packets;
- u32 xmit_completes;
- u32 tx_drops;
- u32 rcv_broadcasts;
- u32 rcv_multicasts;
- u32 rcv_unicasts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 intagg_period;
- struct inicpm_state *inicpm_info;
- void *pinicpm_info;
- struct slic_reg_params reg_params;
- struct slic_ifevents if_events;
- struct slic_stats inicstats_prev;
- struct slicnet_stats slic_stats;
+ void *ifp;
+ struct sliccard *card;
+ uint port;
+ struct physcard *physcard;
+ uint physport;
+ uint cardindex;
+ uint card_size;
+ uint chipid;
+ struct net_device *netdev;
+ struct net_device *next_netdevice;
+ struct slic_spinlock adapter_lock;
+ struct slic_spinlock reset_lock;
+ struct pci_dev *pcidev;
+ uint busnumber;
+ uint slotnumber;
+ uint functionnumber;
+ ushort vendid;
+ ushort devid;
+ ushort subsysid;
+ u32 irq;
+ void __iomem *memorybase;
+ u32 memorylength;
+ u32 drambase;
+ u32 dramlength;
+ uint queues_initialized;
+ uint allocated;
+ uint activated;
+ u32 intrregistered;
+ uint isp_initialized;
+ uint gennumber;
+ u32 curaddrupper;
+ struct slic_shmem *pshmem;
+ dma_addr_t phys_shmem;
+ u32 isrcopy;
+ __iomem struct slic_regs *slic_regs;
+ unsigned char state;
+ unsigned char linkstate;
+ unsigned char linkspeed;
+ unsigned char linkduplex;
+ uint flags;
+ unsigned char macaddr[6];
+ unsigned char currmacaddr[6];
+ u32 macopts;
+ ushort devflags_prev;
+ u64 mcastmask;
+ struct mcast_address *mcastaddrs;
+ struct slic_upr *upr_list;
+ uint upr_busy;
+ struct timer_list pingtimer;
+ u32 pingtimerset;
+ struct timer_list loadtimer;
+ u32 loadtimerset;
+ struct dentry *debugfs_entry;
+ struct slic_spinlock upr_lock;
+ struct slic_spinlock bit64reglock;
+ struct slic_rspqueue rspqueue;
+ struct slic_rcvqueue rcvqueue;
+ struct slic_cmdqueue cmdq_free;
+ struct slic_cmdqueue cmdq_done;
+ struct slic_cmdqueue cmdq_all;
+ struct slic_cmdqmem cmdqmem;
+ /*
+ * SLIC Handles
+ */
+ struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1]; /* Object handles*/
+ struct slic_handle *pfree_slic_handles; /* Free object handles*/
+ struct slic_spinlock handle_lock; /* Object handle list lock*/
+ ushort slic_handle_ix;
+
+ u32 xmitq_full;
+ u32 all_reg_writes;
+ u32 icr_reg_writes;
+ u32 isr_reg_writes;
+ u32 error_interrupts;
+ u32 error_rmiss_interrupts;
+ u32 rx_errors;
+ u32 rcv_drops;
+ u32 rcv_interrupts;
+ u32 xmit_interrupts;
+ u32 linkevent_interrupts;
+ u32 upr_interrupts;
+ u32 num_isrs;
+ u32 false_interrupts;
+ u32 tx_packets;
+ u32 xmit_completes;
+ u32 tx_drops;
+ u32 rcv_broadcasts;
+ u32 rcv_multicasts;
+ u32 rcv_unicasts;
+ u32 max_isr_rcvs;
+ u32 max_isr_xmits;
+ u32 rcv_interrupt_yields;
+ u32 intagg_period;
+ struct inicpm_state *inicpm_info;
+ void *pinicpm_info;
+ struct slic_reg_params reg_params;
+ struct slic_ifevents if_events;
+ struct slic_stats inicstats_prev;
+ struct slicnet_stats slic_stats;
};
#define UPDATE_STATS(largestat, newstat, oldstat) \
{ \
- if ((newstat) < (oldstat)) \
- (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
- else \
- (largestat) += ((newstat) - (oldstat)); \
+ if ((newstat) < (oldstat)) \
+ (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
+ else \
+ (largestat) += ((newstat) - (oldstat)); \
}
#define UPDATE_STATS_GB(largestat, newstat, oldstat) \
{ \
- (largestat) += ((newstat) - (oldstat)); \
+ (largestat) += ((newstat) - (oldstat)); \
}
#if BITS_PER_LONG == 64
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
index 6275d4529676..21cd02b8b7eb 100644
--- a/drivers/staging/slicoss/slichw.h
+++ b/drivers/staging/slicoss/slichw.h
@@ -450,7 +450,7 @@ struct slic_regs {
u32 pad34;
#define SLIC_DBAR64 0x0108
- u32 slic_cbar64; /* 64 bit Xmt Cmd buf addr regs. */
+ u32 slic_cbar64; /* 64 bit Xmt Cmd buf addr regs. */
u32 pad35;
#define SLIC_CBAR64 0x0110
@@ -478,11 +478,11 @@ struct slic_regs {
u32 slic_read_xf_info; /* Read Transformer info */
u32 pad41;
-#define SLIC_READ_XF_INFO 0x0140
+#define SLIC_READ_XF_INFO 0x0140
u32 slic_write_xf_info; /* Write Transformer info */
u32 pad42;
-#define SLIC_WRITE_XF_INFO 0x0148
+#define SLIC_WRITE_XF_INFO 0x0148
u32 RSVD1; /* TOE Only */
u32 pad43;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 78578ee59557..76fc2e554f35 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -144,24 +144,24 @@ MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
#define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \
{ \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
+ spin_lock_irqsave(&_adapter->handle_lock.lock, \
_adapter->handle_lock.flags); \
- _pslic_handle = _adapter->pfree_slic_handles; \
- if (_pslic_handle) { \
- _adapter->pfree_slic_handles = _pslic_handle->next; \
- } \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
+ _pslic_handle = _adapter->pfree_slic_handles; \
+ if (_pslic_handle) { \
+ _adapter->pfree_slic_handles = _pslic_handle->next; \
+ } \
+ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
_adapter->handle_lock.flags); \
}
#define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \
{ \
- _pslic_handle->type = SLIC_HANDLE_FREE; \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
+ _pslic_handle->type = SLIC_HANDLE_FREE; \
+ spin_lock_irqsave(&_adapter->handle_lock.lock, \
_adapter->handle_lock.flags); \
- _pslic_handle->next = _adapter->pfree_slic_handles; \
- _adapter->pfree_slic_handles = _pslic_handle; \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
+ _pslic_handle->next = _adapter->pfree_slic_handles; \
+ _adapter->pfree_slic_handles = _pslic_handle; \
+ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
_adapter->handle_lock.flags); \
}
@@ -209,7 +209,7 @@ static u32 slic_crc_init; /* Is table initialized */
*/
static void slic_mcast_init_crc32(void)
{
- u32 c; /* CRC shit reg */
+ u32 c; /* CRC reg */
u32 e = 0; /* Poly X-or pattern */
int i; /* counter */
int k; /* byte being shifted into crc */
@@ -2930,11 +2930,14 @@ static int slic_if_init(struct adapter *adapter)
}
if (!adapter->queues_initialized) {
- if ((rc = slic_rspqueue_init(adapter)))
+ rc = slic_rspqueue_init(adapter);
+ if (rc)
goto err;
- if ((rc = slic_cmdq_init(adapter)))
+ rc = slic_cmdq_init(adapter);
+ if (rc)
goto err;
- if ((rc = slic_rcvqueue_init(adapter)))
+ rc = slic_rcvqueue_init(adapter);
+ if (rc)
goto err;
adapter->queues_initialized = 1;
}
@@ -3437,7 +3440,7 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
(eecodesize - 2));
/*
if the ucdoe chksum flag bit worked,
- we wouldn't need this shit
+ we wouldn't need this
*/
if (ee_chksum == calc_chksum)
card->config.EepromValid = true;
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index d288cf03e14b..b416aceb13f2 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -8,7 +8,7 @@ config SPEAKUP
video console for blind people. If built in to the
kernel, it can speak everything on the text console from
boot up to shutdown. For more information on Speakup,
- point your browser at http://www.linux-speakup.org/.
+ point your browser at <http://www.linux-speakup.org/>.
There is also a mailing list at the above url that you
can subscribe to.
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index a2db956edd54..382973e8b80f 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -5,12 +5,12 @@
#include "speakup.h"
#include "spk_priv.h"
-#define synthBufferSize 8192 /* currently 8K bytes */
+#define SYNTH_BUF_SIZE 8192 /* currently 8K bytes */
-static u_char synth_buffer[synthBufferSize]; /* guess what this is for! */
+static u_char synth_buffer[SYNTH_BUF_SIZE]; /* guess what this is for! */
static u_char *buff_in = synth_buffer;
static u_char *buff_out = synth_buffer;
-static u_char *buffer_end = synth_buffer+synthBufferSize-1;
+static u_char *buffer_end = synth_buffer + SYNTH_BUF_SIZE - 1;
/* These try to throttle applications by stopping the TTYs
* Note: we need to make sure that we will restart them eventually, which is
@@ -44,13 +44,13 @@ static void speakup_stop_ttys(void)
static int synth_buffer_free(void)
{
- int bytesFree;
+ int bytes_free;
if (buff_in >= buff_out)
- bytesFree = synthBufferSize - (buff_in - buff_out);
+ bytes_free = SYNTH_BUF_SIZE - (buff_in - buff_out);
else
- bytesFree = buff_out - buff_in;
- return bytesFree;
+ bytes_free = buff_out - buff_in;
+ return bytes_free;
}
int synth_buffer_empty(void)
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 1b34a8771641..4299cf45f947 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -28,7 +28,7 @@
#define PRESSED 1
#define RELEASED 0
-DEFINE_PER_CPU(bool, reporting_keystroke);
+static DEFINE_PER_CPU(bool, reporting_keystroke);
static struct input_dev *virt_keyboard;
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 7c1658b971dc..2add1fcfd122 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -390,7 +390,7 @@ static struct msg_group_t all_groups[] = {
static const int num_groups = sizeof(all_groups) / sizeof(struct msg_group_t);
-char *msg_get(enum msg_index_t index)
+char *spk_msg_get(enum msg_index_t index)
{
char *ch;
@@ -540,7 +540,7 @@ static int fmt_validate(char *template, char *user)
* -EINVAL - Invalid format specifiers in formatted message or illegal index.
* -ENOMEM - Unable to allocate memory.
*/
-ssize_t msg_set(enum msg_index_t index, char *text, size_t length)
+ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
{
int rc = 0;
char *newstr = NULL;
@@ -576,7 +576,7 @@ ssize_t msg_set(enum msg_index_t index, char *text, size_t length)
* Find a message group, given its name. Return a pointer to the structure
* if found, or NULL otherwise.
*/
-struct msg_group_t *find_msg_group(const char *group_name)
+struct msg_group_t *spk_find_msg_group(const char *group_name)
{
struct msg_group_t *group = NULL;
int i;
@@ -590,7 +590,7 @@ struct msg_group_t *find_msg_group(const char *group_name)
return group;
}
-void reset_msg_group(struct msg_group_t *group)
+void spk_reset_msg_group(struct msg_group_t *group)
{
unsigned long flags;
enum msg_index_t i;
@@ -606,14 +606,14 @@ void reset_msg_group(struct msg_group_t *group)
}
/* Called at initialization time, to establish default messages. */
-void initialize_msgs(void)
+void spk_initialize_msgs(void)
{
memcpy(speakup_msgs, speakup_default_msgs,
sizeof(speakup_default_msgs));
}
/* Free user-supplied strings when module is unloaded: */
-void free_user_msgs(void)
+void spk_free_user_msgs(void)
{
enum msg_index_t index;
unsigned long flags;
diff --git a/drivers/staging/speakup/i18n.h b/drivers/staging/speakup/i18n.h
index 65caa8010776..dd338f4218de 100644
--- a/drivers/staging/speakup/i18n.h
+++ b/drivers/staging/speakup/i18n.h
@@ -218,11 +218,11 @@ struct msg_group_t {
enum msg_index_t end;
};
-extern char *msg_get(enum msg_index_t index);
-extern ssize_t msg_set(enum msg_index_t index, char *text, size_t length);
-extern struct msg_group_t *find_msg_group(const char *group_name);
-extern void reset_msg_group(struct msg_group_t *group);
-extern void initialize_msgs(void);
-extern void free_user_msgs(void);
+extern char *spk_msg_get(enum msg_index_t index);
+extern ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length);
+extern struct msg_group_t *spk_find_msg_group(const char *group_name);
+extern void spk_reset_msg_group(struct msg_group_t *group);
+extern void spk_initialize_msgs(void);
+extern void spk_free_user_msgs(void);
#endif
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 170f38815ffd..5091625a4901 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -115,10 +115,11 @@ static void say_key(int key)
key &= 0xff;
for (i = 0; i < 6; i++) {
if (state & masks[i])
- synth_printf(" %s", msg_get(MSG_STATES_START + i));
+ synth_printf(" %s", spk_msg_get(MSG_STATES_START + i));
}
if ((key > 0) && (key <= num_key_names))
- synth_printf(" %s\n", msg_get(MSG_KEYNAMES_START + (key - 1)));
+ synth_printf(" %s\n",
+ spk_msg_get(MSG_KEYNAMES_START + (key - 1)));
}
static int help_init(void)
@@ -126,9 +127,9 @@ static int help_init(void)
char start = SPACE;
int i;
int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1;
-state_tbl = our_keys[0]+SHIFT_TBL_SIZE+2;
+state_tbl = spk_our_keys[0]+SHIFT_TBL_SIZE+2;
for (i = 0; i < num_funcs; i++) {
- char *cur_funcname = msg_get(MSG_FUNCNAMES_START + i);
+ char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i);
if (start == *cur_funcname)
continue;
start = *cur_funcname;
@@ -137,7 +138,7 @@ state_tbl = our_keys[0]+SHIFT_TBL_SIZE+2;
return 0;
}
-int handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
+int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
int i, n;
char *name;
@@ -147,15 +148,15 @@ int handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
help_init();
if (type == KT_LATIN) {
if (ch == SPACE) {
- special_handler = NULL;
- synth_printf("%s\n", msg_get(MSG_LEAVING_HELP));
+ spk_special_handler = NULL;
+ synth_printf("%s\n", spk_msg_get(MSG_LEAVING_HELP));
return 1;
}
ch |= 32; /* lower case */
if (ch < 'a' || ch > 'z')
return -1;
if (letter_offsets[ch-'a'] == -1) {
- synth_printf(msg_get(MSG_NO_COMMAND), ch);
+ synth_printf(spk_msg_get(MSG_NO_COMMAND), ch);
synth_printf("\n");
return 1;
}
@@ -169,47 +170,49 @@ int handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
cur_item--;
else
return -1;
- } else if (type == KT_SPKUP && ch == SPEAKUP_HELP && !special_handler) {
- special_handler = handle_help;
- synth_printf("%s\n", msg_get(MSG_HELP_INFO));
+ } else if (type == KT_SPKUP
+ && ch == SPEAKUP_HELP
+ && !spk_special_handler) {
+ spk_special_handler = spk_handle_help;
+ synth_printf("%s\n", spk_msg_get(MSG_HELP_INFO));
build_key_data(); /* rebuild each time in case new mapping */
return 1;
} else {
name = NULL;
if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) {
synth_printf("%s\n",
- msg_get(MSG_KEYNAMES_START + key-1));
+ spk_msg_get(MSG_KEYNAMES_START + key-1));
return 1;
}
for (i = 0; funcvals[i] != 0 && !name; i++) {
if (ch == funcvals[i])
- name = msg_get(MSG_FUNCNAMES_START + i);
+ name = spk_msg_get(MSG_FUNCNAMES_START + i);
}
if (!name)
return -1;
- kp = our_keys[key]+1;
+ kp = spk_our_keys[key]+1;
for (i = 0; i < nstates; i++) {
if (ch == kp[i])
break;
}
key += (state_tbl[i] << 8);
say_key(key);
- synth_printf(msg_get(MSG_KEYDESC), name);
+ synth_printf(spk_msg_get(MSG_KEYDESC), name);
synth_printf("\n");
return 1;
}
- name = msg_get(MSG_FUNCNAMES_START + cur_item);
+ name = spk_msg_get(MSG_FUNCNAMES_START + cur_item);
func = funcvals[cur_item];
synth_printf("%s", name);
if (key_offsets[func] == 0) {
- synth_printf(" %s\n", msg_get(MSG_IS_UNASSIGNED));
+ synth_printf(" %s\n", spk_msg_get(MSG_IS_UNASSIGNED));
return 1;
}
p_keys = key_data + key_offsets[func];
for (n = 0; p_keys[n]; n++) {
val = p_keys[n];
if (n > 0)
- synth_printf("%s ", msg_get(MSG_DISJUNCTION));
+ synth_printf("%s ", spk_msg_get(MSG_DISJUNCTION));
say_key(val);
}
return 1;
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 2093896c546b..35f647ce1f1e 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -41,7 +41,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
break;
if (strcmp("characters", attr->attr.name) == 0) {
len = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
- i, characters[i]);
+ i, spk_characters[i]);
} else { /* show chartab entry */
if (IS_TYPE(i, B_CTL))
cp = "B_CTL";
@@ -185,12 +185,12 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
outptr[desc_length] = '\0';
if (do_characters) {
- if (characters[index] != default_chars[index])
- kfree(characters[index]);
- characters[index] = desc;
+ if (spk_characters[index] != spk_default_chars[index])
+ kfree(spk_characters[index]);
+ spk_characters[index] = desc;
used++;
} else {
- charclass = chartab_get_value(keyword);
+ charclass = spk_chartab_get_value(keyword);
if (charclass == 0) {
rejected++;
cp = linefeed + 1;
@@ -206,9 +206,9 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
if (reset) {
if (do_characters)
- reset_default_chars();
+ spk_reset_default_chars();
else
- reset_default_chartab();
+ spk_reset_default_chartab();
}
spk_unlock(flags);
@@ -232,7 +232,7 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
u_char ch;
unsigned long flags;
spk_lock(flags);
- cp1 = key_buf + SHIFT_TBL_SIZE;
+ cp1 = spk_key_buf + SHIFT_TBL_SIZE;
num_keys = (int)(*cp1);
nstates = (int)cp1[1];
cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
@@ -271,7 +271,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
return -ENOMEM;
}
if (strchr("dDrR", *in_buff)) {
- set_key_info(key_defaults, key_buf);
+ spk_set_key_info(spk_key_defaults, spk_key_buf);
pr_info("keymap set to default values\n");
kfree(in_buff);
spk_unlock(flags);
@@ -282,14 +282,14 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
cp = in_buff;
cp1 = (u_char *)in_buff;
for (i = 0; i < 3; i++) {
- cp = s2uchar(cp, cp1);
+ cp = spk_s2uchar(cp, cp1);
cp1++;
}
i = (int)cp1[-2]+1;
i *= (int)cp1[-1]+1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
- i+SHIFT_TBL_SIZE+4 >= sizeof(key_buf)) {
+ i+SHIFT_TBL_SIZE+4 >= sizeof(spk_key_buf)) {
pr_warn("i %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
@@ -297,7 +297,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
while (--i >= 0) {
- cp = s2uchar(cp, cp1);
+ cp = spk_s2uchar(cp, cp1);
cp1++;
if (!(*cp))
break;
@@ -307,8 +307,8 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
pr_warn("end %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
} else {
- if (set_key_info(in_buff, key_buf)) {
- set_key_info(key_defaults, key_buf);
+ if (spk_set_key_info(in_buff, spk_key_buf)) {
+ spk_set_key_info(spk_key_defaults, spk_key_buf);
ret = -EINVAL;
pr_warn("set key failed\n");
}
@@ -343,7 +343,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
spk_lock(flags);
if (ch&2) {
shut = 1;
- do_flush();
+ spk_do_flush();
} else {
shut = 0;
}
@@ -388,7 +388,7 @@ static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
- strlwr(new_synth_name);
+ spk_strlwr(new_synth_name);
if ((synth != NULL) && (!strcmp(new_synth_name, synth->name))) {
pr_warn("%s already in use\n", new_synth_name);
} else if (synth_init(new_synth_name) != 0) {
@@ -417,7 +417,7 @@ static ssize_t synth_direct_store(struct kobject *kobj,
bytes = min_t(size_t, len, 250);
strncpy(tmp, ptr, bytes);
tmp[bytes] = '\0';
- xlate(tmp);
+ spk_xlate(tmp);
synth_printf("%s", tmp);
ptr += bytes;
len -= bytes;
@@ -455,14 +455,14 @@ static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
short mask;
unsigned long flags;
- p_header = var_header_by_name(attr->attr.name);
+ p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
- var = get_punc_var(p_header->var_id);
+ var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
@@ -470,7 +470,7 @@ static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
}
spk_lock(flags);
- pb = (struct st_bits_data *) &punc_info[var->value];
+ pb = (struct st_bits_data *) &spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
if (!(spk_chartab[i]&mask))
@@ -497,14 +497,14 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
if (x < 1 || x > 99)
return -EINVAL;
- p_header = var_header_by_name(attr->attr.name);
+ p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
- var = get_punc_var(p_header->var_id);
+ var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
@@ -520,9 +520,9 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
spk_lock(flags);
if (*punc_buf == 'd' || *punc_buf == 'r')
- x = set_mask_bits(0, var->value, 3);
+ x = spk_set_mask_bits(0, var->value, 3);
else
- x = set_mask_bits(punc_buf, var->value, 3);
+ x = spk_set_mask_bits(punc_buf, var->value, 3);
spk_unlock(flags);
return count;
@@ -542,7 +542,7 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
char ch;
unsigned long flags;
- param = var_header_by_name(attr->attr.name);
+ param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
@@ -599,13 +599,13 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
int value;
unsigned long flags;
- param = var_header_by_name(attr->attr.name);
+ param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
if (param->data == NULL)
return 0;
ret = 0;
- cp = xlate((char *) buf);
+ cp = spk_xlate((char *) buf);
spk_lock(flags);
switch (param->var_type) {
@@ -618,7 +618,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
else
len = E_SET;
speakup_s2i(cp, &value);
- ret = set_num_var(value, param, len);
+ ret = spk_set_num_var(value, param, len);
if (ret == E_RANGE) {
var_data = param->data;
pr_warn("value for %s out of range, expect %d to %d\n",
@@ -636,7 +636,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
}
cp = (char *) buf;
cp[len] = '\0';
- ret = set_string_var(buf, param, len);
+ ret = spk_set_string_var(buf, param, len);
if (ret == E_TOOLONG)
pr_warn("value too long for %s\n",
attr->attr.name);
@@ -652,19 +652,19 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
*/
if (strcmp(attr->attr.name, "voice") == 0) {
if (synth && synth->default_pitch) {
- param = var_header_by_name("pitch");
+ param = spk_var_header_by_name("pitch");
if (param) {
- set_num_var(synth->default_pitch[value], param,
- E_NEW_DEFAULT);
- set_num_var(0, param, E_DEFAULT);
+ spk_set_num_var(synth->default_pitch[value],
+ param, E_NEW_DEFAULT);
+ spk_set_num_var(0, param, E_DEFAULT);
}
}
if (synth && synth->default_vol) {
- param = var_header_by_name("vol");
+ param = spk_var_header_by_name("vol");
if (param) {
- set_num_var(synth->default_vol[value], param,
- E_NEW_DEFAULT);
- set_num_var(0, param, E_DEFAULT);
+ spk_set_num_var(synth->default_vol[value],
+ param, E_NEW_DEFAULT);
+ spk_set_num_var(0, param, E_DEFAULT);
}
}
}
@@ -694,7 +694,7 @@ static ssize_t message_show_helper(char *buf, enum msg_index_t first,
if (bufsize <= 1)
break;
printed = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
- index, msg_get(cursor));
+ index, spk_msg_get(cursor));
buf_pointer += printed;
bufsize -= printed;
}
@@ -788,7 +788,7 @@ static ssize_t message_store_helper(const char *buf, size_t count,
continue;
}
- msg_stored = msg_set(curmessage, temp, desc_length);
+ msg_stored = spk_msg_set(curmessage, temp, desc_length);
if (msg_stored < 0) {
retval = msg_stored;
if (msg_stored == -ENOMEM)
@@ -802,7 +802,7 @@ static ssize_t message_store_helper(const char *buf, size_t count,
}
if (reset)
- reset_msg_group(group);
+ spk_reset_msg_group(group);
report_msg_status(reset, received, used, rejected, group->name);
return retval;
@@ -812,7 +812,7 @@ static ssize_t message_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t retval = 0;
- struct msg_group_t *group = find_msg_group(attr->attr.name);
+ struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
BUG_ON(!group);
@@ -826,7 +826,7 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t retval = 0;
- struct msg_group_t *group = find_msg_group(attr->attr.name);
+ struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
BUG_ON(!group);
retval = message_store_helper(buf, count, group);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 40e2488b9679..9916e94aa361 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -65,23 +65,23 @@ MODULE_VERSION(SPEAKUP_VERSION);
char *synth_name;
module_param_named(synth, synth_name, charp, S_IRUGO);
-module_param_named(quiet, quiet_boot, bool, S_IRUGO);
+module_param_named(quiet, spk_quiet_boot, bool, S_IRUGO);
MODULE_PARM_DESC(synth, "Synth to start if speakup is built in.");
MODULE_PARM_DESC(quiet, "Do not announce when the synthesizer is found.");
-special_func special_handler;
+special_func spk_special_handler;
-short pitch_shift, synth_flags;
+short spk_pitch_shift, synth_flags;
static char buf[256];
-int attrib_bleep, bleeps, bleep_time = 10;
-int no_intr, spell_delay;
-int key_echo, say_word_ctl;
-int say_ctrl, bell_pos;
-short punc_mask;
-int punc_level, reading_punc;
-char str_caps_start[MAXVARLEN + 1] = "\0", str_caps_stop[MAXVARLEN + 1] = "\0";
-const struct st_bits_data punc_info[] = {
+int spk_attrib_bleep, spk_bleeps, spk_bleep_time = 10;
+int spk_no_intr, spk_spell_delay;
+int spk_key_echo, spk_say_word_ctl;
+int spk_say_ctrl, spk_bell_pos;
+short spk_punc_mask;
+int spk_punc_level, spk_reading_punc;
+char spk_str_caps_start[MAXVARLEN + 1] = "\0", spk_str_caps_stop[MAXVARLEN + 1] = "\0";
+const struct st_bits_data spk_punc_info[] = {
{"none", "", 0},
{"some", "/$%&@", SOME},
{"most", "$%&#()=+*/@^<>|\\", MOST},
@@ -95,9 +95,9 @@ const struct st_bits_data punc_info[] = {
static char mark_cut_flag;
#define MAX_KEY 160
-u_char *our_keys[MAX_KEY], *shift_table;
-u_char key_buf[600];
-const u_char key_defaults[] = {
+u_char *spk_our_keys[MAX_KEY], *spk_shift_table;
+u_char spk_key_buf[600];
+const u_char spk_key_defaults[] = {
#include "speakupmap.h"
};
@@ -129,9 +129,9 @@ static char *phonetic[] = {
/* array of 256 char pointers (one for each character description)
* initialized to default_chars and user selectable via
* /proc/speakup/characters */
-char *characters[256];
+char *spk_characters[256];
-char *default_chars[256] = {
+char *spk_default_chars[256] = {
/*000*/ "null", "^a", "^b", "^c", "^d", "^e", "^f", "^g",
/*008*/ "^h", "^i", "^j", "^k", "^l", "^m", "^n", "^o",
/*016*/ "^p", "^q", "^r", "^s", "^t", "^u", "^v", "^w",
@@ -238,7 +238,7 @@ static u_short default_chartab[256] = {
};
struct task_struct *speakup_task;
-struct bleep unprocessed_sound;
+struct bleep spk_unprocessed_sound;
static int spk_keydown;
static u_char spk_lastkey, spk_close_press, keymap_flags;
static u_char last_keycode, this_speakup_key;
@@ -251,14 +251,14 @@ DEFINE_MUTEX(spk_mutex);
static int keyboard_notifier_call(struct notifier_block *,
unsigned long code, void *param);
-struct notifier_block keyboard_notifier_block = {
+static struct notifier_block keyboard_notifier_block = {
.notifier_call = keyboard_notifier_call,
};
static int vt_notifier_call(struct notifier_block *,
unsigned long code, void *param);
-struct notifier_block vt_notifier_block = {
+static struct notifier_block vt_notifier_block = {
.notifier_call = vt_notifier_call,
};
@@ -282,13 +282,13 @@ static void bleep(u_short val)
350, 370, 392, 414, 440, 466, 491, 523, 554, 587, 619, 659
};
short freq;
- int time = bleep_time;
+ int time = spk_bleep_time;
freq = vals[val % 12];
if (val > 11)
freq *= (1 << (val / 12));
- unprocessed_sound.freq = freq;
- unprocessed_sound.jiffies = msecs_to_jiffies(time);
- unprocessed_sound.active = 1;
+ spk_unprocessed_sound.freq = freq;
+ spk_unprocessed_sound.jiffies = msecs_to_jiffies(time);
+ spk_unprocessed_sound.active = 1;
/* We can only have 1 active sound at a time. */
}
@@ -300,7 +300,7 @@ static void speakup_shut_up(struct vc_data *vc)
spk_parked &= 0xfe;
speakup_date(vc);
if (synth != NULL)
- do_flush();
+ spk_do_flush();
}
static void speech_kill(struct vc_data *vc)
@@ -313,9 +313,9 @@ static void speech_kill(struct vc_data *vc)
if (val == 2 || spk_killed) {
/* dead */
spk_shut_up &= ~0x40;
- synth_printf("%s\n", msg_get(MSG_IAM_ALIVE));
+ synth_printf("%s\n", spk_msg_get(MSG_IAM_ALIVE));
} else {
- synth_printf("%s\n", msg_get(MSG_YOU_KILLED_SPEAKUP));
+ synth_printf("%s\n", spk_msg_get(MSG_YOU_KILLED_SPEAKUP));
spk_shut_up |= 0x40;
}
}
@@ -324,10 +324,10 @@ static void speakup_off(struct vc_data *vc)
{
if (spk_shut_up & 0x80) {
spk_shut_up &= 0x7f;
- synth_printf("%s\n", msg_get(MSG_HEY_THATS_BETTER));
+ synth_printf("%s\n", spk_msg_get(MSG_HEY_THATS_BETTER));
} else {
spk_shut_up |= 0x80;
- synth_printf("%s\n", msg_get(MSG_YOU_TURNED_ME_OFF));
+ synth_printf("%s\n", spk_msg_get(MSG_YOU_TURNED_ME_OFF));
}
speakup_date(vc);
}
@@ -336,10 +336,10 @@ static void speakup_parked(struct vc_data *vc)
{
if (spk_parked & 0x80) {
spk_parked = 0;
- synth_printf("%s\n", msg_get(MSG_UNPARKED));
+ synth_printf("%s\n", spk_msg_get(MSG_UNPARKED));
} else {
spk_parked |= 0x80;
- synth_printf("%s\n", msg_get(MSG_PARKED));
+ synth_printf("%s\n", spk_msg_get(MSG_PARKED));
}
}
@@ -350,16 +350,16 @@ static void speakup_cut(struct vc_data *vc)
if (!mark_cut_flag) {
mark_cut_flag = 1;
- xs = (u_short) spk_x;
- ys = (u_short) spk_y;
+ spk_xs = (u_short) spk_x;
+ spk_ys = (u_short) spk_y;
spk_sel_cons = vc;
- synth_printf("%s\n", msg_get(MSG_MARK));
+ synth_printf("%s\n", spk_msg_get(MSG_MARK));
return;
}
- xe = (u_short) spk_x;
- ye = (u_short) spk_y;
+ spk_xe = (u_short) spk_x;
+ spk_ye = (u_short) spk_y;
mark_cut_flag = 0;
- synth_printf("%s\n", msg_get(MSG_CUT));
+ synth_printf("%s\n", spk_msg_get(MSG_CUT));
speakup_clear_selection();
ret = speakup_set_selection(tty);
@@ -383,9 +383,9 @@ static void speakup_paste(struct vc_data *vc)
{
if (mark_cut_flag) {
mark_cut_flag = 0;
- synth_printf("%s\n", msg_get(MSG_MARK_CLEARED));
+ synth_printf("%s\n", spk_msg_get(MSG_MARK_CLEARED));
} else {
- synth_printf("%s\n", msg_get(MSG_PASTE));
+ synth_printf("%s\n", spk_msg_get(MSG_PASTE));
speakup_paste_selection(tty);
}
}
@@ -395,16 +395,16 @@ static void say_attributes(struct vc_data *vc)
int fg = spk_attr & 0x0f;
int bg = spk_attr >> 4;
if (fg > 8) {
- synth_printf("%s ", msg_get(MSG_BRIGHT));
+ synth_printf("%s ", spk_msg_get(MSG_BRIGHT));
fg -= 8;
}
- synth_printf("%s", msg_get(MSG_COLORS_START + fg));
+ synth_printf("%s", spk_msg_get(MSG_COLORS_START + fg));
if (bg > 7) {
- synth_printf(" %s ", msg_get(MSG_ON_BLINKING));
+ synth_printf(" %s ", spk_msg_get(MSG_ON_BLINKING));
bg -= 8;
} else
- synth_printf(" %s ", msg_get(MSG_ON));
- synth_printf("%s\n", msg_get(MSG_COLORS_START + bg));
+ synth_printf(" %s ", spk_msg_get(MSG_ON));
+ synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
}
enum {
@@ -417,24 +417,24 @@ enum {
static void announce_edge(struct vc_data *vc, int msg_id)
{
- if (bleeps & 1)
+ if (spk_bleeps & 1)
bleep(spk_y);
- if ((bleeps & 2) && (msg_id < edge_quiet))
- synth_printf("%s\n", msg_get(MSG_EDGE_MSGS_START + msg_id - 1));
+ if ((spk_bleeps & 2) && (msg_id < edge_quiet))
+ synth_printf("%s\n", spk_msg_get(MSG_EDGE_MSGS_START + msg_id - 1));
}
static void speak_char(u_char ch)
{
- char *cp = characters[ch];
- struct var_t *direct = get_var(DIRECT);
+ char *cp = spk_characters[ch];
+ struct var_t *direct = spk_get_var(DIRECT);
if (direct && direct->u.n.value) {
if (IS_CHAR(ch, B_CAP)) {
- pitch_shift++;
- synth_printf("%s", str_caps_start);
+ spk_pitch_shift++;
+ synth_printf("%s", spk_str_caps_start);
}
synth_printf("%c", ch);
if (IS_CHAR(ch, B_CAP))
- synth_printf("%s", str_caps_stop);
+ synth_printf("%s", spk_str_caps_stop);
return;
}
if (cp == NULL) {
@@ -443,13 +443,13 @@ static void speak_char(u_char ch)
}
synth_buffer_add(SPACE);
if (IS_CHAR(ch, B_CAP)) {
- pitch_shift++;
- synth_printf("%s", str_caps_start);
+ spk_pitch_shift++;
+ synth_printf("%s", spk_str_caps_start);
synth_printf("%s", cp);
- synth_printf("%s", str_caps_stop);
+ synth_printf("%s", spk_str_caps_stop);
} else {
if (*cp == '^') {
- synth_printf("%s", msg_get(MSG_CTRL));
+ synth_printf("%s", spk_msg_get(MSG_CTRL));
cp++;
}
synth_printf("%s", cp);
@@ -479,9 +479,9 @@ static void say_char(struct vc_data *vc)
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
if (spk_attr != spk_old_attr) {
- if (attrib_bleep & 1)
+ if (spk_attrib_bleep & 1)
bleep(spk_y);
- if (attrib_bleep & 2)
+ if (spk_attrib_bleep & 2)
say_attributes(vc);
}
speak_char(ch & 0xff);
@@ -497,7 +497,7 @@ static void say_phonetic_char(struct vc_data *vc)
synth_printf("%s\n", phonetic[--ch]);
} else {
if (IS_CHAR(ch, B_NUM))
- synth_printf("%s ", msg_get(MSG_NUMBER));
+ synth_printf("%s ", spk_msg_get(MSG_NUMBER));
speak_char(ch);
}
}
@@ -527,8 +527,8 @@ static void say_next_char(struct vc_data *vc)
}
/* get_word - will first check to see if the character under the
- * reading cursor is a space and if say_word_ctl is true it will
- * return the word space. If say_word_ctl is not set it will check to
+ * reading cursor is a space and if spk_say_word_ctl is true it will
+ * return the word space. If spk_say_word_ctl is not set it will check to
* see if there is a word starting on the next position to the right
* and return that word if it exists. If it does not exist it will
* move left to the beginning of any previous word on the line or the
@@ -544,9 +544,9 @@ static u_long get_word(struct vc_data *vc)
ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
- if (say_word_ctl && ch == SPACE) {
+ if (spk_say_word_ctl && ch == SPACE) {
*buf = '\0';
- synth_printf("%s\n", msg_get(MSG_SPACE));
+ synth_printf("%s\n", spk_msg_get(MSG_SPACE));
return 0;
} else if ((tmpx < vc->vc_cols - 2)
&& (ch == SPACE || ch == 0 || IS_WDLM(ch))
@@ -582,13 +582,13 @@ static u_long get_word(struct vc_data *vc)
static void say_word(struct vc_data *vc)
{
u_long cnt = get_word(vc);
- u_short saved_punc_mask = punc_mask;
+ u_short saved_punc_mask = spk_punc_mask;
if (cnt == 0)
return;
- punc_mask = PUNC;
+ spk_punc_mask = PUNC;
buf[cnt++] = SPACE;
spkup_write(buf, cnt);
- punc_mask = saved_punc_mask;
+ spk_punc_mask = saved_punc_mask;
}
static void say_prev_word(struct vc_data *vc)
@@ -686,22 +686,22 @@ static void say_next_word(struct vc_data *vc)
static void spell_word(struct vc_data *vc)
{
static char *delay_str[] = { "", ",", ".", ". .", ". . ." };
- char *cp = buf, *str_cap = str_caps_stop;
- char *cp1, *last_cap = str_caps_stop;
+ char *cp = buf, *str_cap = spk_str_caps_stop;
+ char *cp1, *last_cap = spk_str_caps_stop;
u_char ch;
if (!get_word(vc))
return;
while ((ch = (u_char) *cp)) {
if (cp != buf)
- synth_printf(" %s ", delay_str[spell_delay]);
+ synth_printf(" %s ", delay_str[spk_spell_delay]);
if (IS_CHAR(ch, B_CAP)) {
- str_cap = str_caps_start;
- if (*str_caps_stop)
- pitch_shift++;
+ str_cap = spk_str_caps_start;
+ if (*spk_str_caps_stop)
+ spk_pitch_shift++;
else /* synth has no pitch */
- last_cap = str_caps_stop;
+ last_cap = spk_str_caps_stop;
} else
- str_cap = str_caps_stop;
+ str_cap = spk_str_caps_stop;
if (str_cap != last_cap) {
synth_printf("%s", str_cap);
last_cap = str_cap;
@@ -711,17 +711,17 @@ static void spell_word(struct vc_data *vc)
ch &= 31;
cp1 = phonetic[--ch];
} else {
- cp1 = characters[ch];
+ cp1 = spk_characters[ch];
if (*cp1 == '^') {
- synth_printf("%s", msg_get(MSG_CTRL));
+ synth_printf("%s", spk_msg_get(MSG_CTRL));
cp1++;
}
}
synth_printf("%s", cp1);
cp++;
}
- if (str_cap != str_caps_stop)
- synth_printf("%s", str_caps_stop);
+ if (str_cap != spk_str_caps_stop)
+ synth_printf("%s", spk_str_caps_stop);
}
static int get_line(struct vc_data *vc)
@@ -746,9 +746,9 @@ static void say_line(struct vc_data *vc)
{
int i = get_line(vc);
char *cp;
- u_short saved_punc_mask = punc_mask;
+ u_short saved_punc_mask = spk_punc_mask;
if (i == 0) {
- synth_printf("%s\n", msg_get(MSG_BLANK));
+ synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
buf[i++] = '\n';
@@ -758,9 +758,9 @@ static void say_line(struct vc_data *vc)
cp++;
synth_printf("%d, ", (cp - buf) + 1);
}
- punc_mask = punc_masks[reading_punc];
+ spk_punc_mask = spk_punc_masks[spk_reading_punc];
spkup_write(buf, i);
- punc_mask = saved_punc_mask;
+ spk_punc_mask = saved_punc_mask;
}
static void say_prev_line(struct vc_data *vc)
@@ -792,7 +792,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
{
int i = 0;
u_char tmp;
- u_short saved_punc_mask = punc_mask;
+ u_short saved_punc_mask = spk_punc_mask;
spk_old_attr = spk_attr;
spk_attr = get_attributes((u_short *) from);
while (from < to) {
@@ -809,10 +809,10 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
if (i < 1)
return i;
if (read_punc)
- punc_mask = punc_info[reading_punc].mask;
+ spk_punc_mask = spk_punc_info[spk_reading_punc].mask;
spkup_write(buf, i);
if (read_punc)
- punc_mask = saved_punc_mask;
+ spk_punc_mask = saved_punc_mask;
return i - 1;
}
@@ -824,7 +824,7 @@ static void say_line_from_to(struct vc_data *vc, u_long from, u_long to,
start += from * 2;
if (say_from_to(vc, start, end, read_punc) <= 0)
if (cursor_track != read_all_mode)
- synth_printf("%s\n", msg_get(MSG_BLANK));
+ synth_printf("%s\n", spk_msg_get(MSG_BLANK));
}
/* Sentence Reading Commands */
@@ -924,7 +924,7 @@ static void speakup_win_say(struct vc_data *vc)
{
u_long start, end, from, to;
if (win_start < 2) {
- synth_printf("%s\n", msg_get(MSG_NO_WINDOW));
+ synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
start = vc->vc_origin + (win_top * vc->vc_size_row);
@@ -975,7 +975,7 @@ static void say_first_char(struct vc_data *vc)
u_char ch;
spk_parked |= 0x01;
if (len == 0) {
- synth_printf("%s\n", msg_get(MSG_BLANK));
+ synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
for (i = 0; i < len; i++)
@@ -994,7 +994,7 @@ static void say_last_char(struct vc_data *vc)
u_char ch;
spk_parked |= 0x01;
if (len == 0) {
- synth_printf("%s\n", msg_get(MSG_BLANK));
+ synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
ch = buf[--len];
@@ -1006,7 +1006,7 @@ static void say_last_char(struct vc_data *vc)
static void say_position(struct vc_data *vc)
{
- synth_printf(msg_get(MSG_POS_INFO), spk_y + 1, spk_x + 1,
+ synth_printf(spk_msg_get(MSG_POS_INFO), spk_y + 1, spk_x + 1,
vc->vc_num + 1);
synth_printf("\n");
}
@@ -1017,7 +1017,7 @@ static void say_char_num(struct vc_data *vc)
u_char tmp;
u_short ch = get_char(vc, (u_short *) spk_pos, &tmp);
ch &= 0xff;
- synth_printf(msg_get(MSG_CHAR_INFO), ch, ch);
+ synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
}
/* these are stub functions to keep keyboard.c happy. */
@@ -1066,7 +1066,7 @@ static void spkup_write(const char *in_buf, int count)
} else {
if ((last_type & CH_RPT) && rep_count > 2) {
synth_printf(" ");
- synth_printf(msg_get(MSG_REPEAT_DESC),
+ synth_printf(spk_msg_get(MSG_REPEAT_DESC),
++rep_count);
synth_printf(" ");
}
@@ -1074,7 +1074,7 @@ static void spkup_write(const char *in_buf, int count)
}
if (ch == spk_lastkey) {
rep_count = 0;
- if (key_echo == 1 && ch >= MINECHOCHAR)
+ if (spk_key_echo == 1 && ch >= MINECHOCHAR)
speak_char(ch);
} else if (char_type & B_ALPHA) {
if ((synth_flags & SF_DEC) && (last_type & PUNC))
@@ -1083,7 +1083,7 @@ static void spkup_write(const char *in_buf, int count)
} else if (char_type & B_NUM) {
rep_count = 0;
synth_printf("%c", ch);
- } else if (char_type & punc_mask) {
+ } else if (char_type & spk_punc_mask) {
speak_char(ch);
char_type &= ~PUNC; /* for dec nospell processing */
} else if (char_type & SYNTH_OK) {
@@ -1111,7 +1111,7 @@ static void spkup_write(const char *in_buf, int count)
if (in_count > 2 && rep_count > 2) {
if (last_type & CH_RPT) {
synth_printf(" ");
- synth_printf(msg_get(MSG_REPEAT_DESC2), ++rep_count);
+ synth_printf(spk_msg_get(MSG_REPEAT_DESC2), ++rep_count);
synth_printf(" ");
}
rep_count = 0;
@@ -1135,22 +1135,22 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
case KVAL(K_SHIFT):
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
read_all_doc(vc);
break;
case KVAL(K_CTRL):
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
break;
}
} else {
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
}
- if (say_ctrl && value < NUM_CTL_LABELS)
- synth_printf("%s", msg_get(MSG_CTL_START + value));
+ if (spk_say_ctrl && value < NUM_CTL_LABELS)
+ synth_printf("%s", spk_msg_get(MSG_CTL_START + value));
spk_unlock(flags);
}
@@ -1171,12 +1171,12 @@ static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
spk_lastkey = value;
spk_keydown++;
spk_parked &= 0xfe;
- if (key_echo == 2 && value >= MINECHOCHAR)
+ if (spk_key_echo == 2 && value >= MINECHOCHAR)
speak_char(value);
spk_unlock(flags);
}
-int set_key_info(const u_char *key_info, u_char *k_buffer)
+int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
{
int i = 0, states, key_data_len;
const u_char *cp = key_info;
@@ -1188,12 +1188,12 @@ int set_key_info(const u_char *key_info, u_char *k_buffer)
num_keys = *cp;
states = (int)cp[1];
key_data_len = (states + 1) * (num_keys + 1);
- if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(key_buf))
+ if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf))
return -2;
memset(k_buffer, 0, SHIFT_TBL_SIZE);
- memset(our_keys, 0, sizeof(our_keys));
- shift_table = k_buffer;
- our_keys[0] = shift_table;
+ memset(spk_our_keys, 0, sizeof(spk_our_keys));
+ spk_shift_table = k_buffer;
+ spk_our_keys[0] = spk_shift_table;
cp1 += SHIFT_TBL_SIZE;
memcpy(cp1, cp, key_data_len + 3);
/* get num_keys, states and data */
@@ -1202,13 +1202,13 @@ int set_key_info(const u_char *key_info, u_char *k_buffer)
ch = *cp1++;
if (ch >= SHIFT_TBL_SIZE)
return -3;
- shift_table[ch] = i;
+ spk_shift_table[ch] = i;
}
keymap_flags = *cp1++;
while ((ch = *cp1)) {
if (ch >= MAX_KEY)
return -4;
- our_keys[ch] = cp1;
+ spk_our_keys[ch] = cp1;
cp1 += states + 1;
}
return 0;
@@ -1237,24 +1237,24 @@ static void toggle_cursoring(struct vc_data *vc)
cursor_track = prev_cursor_track;
if (++cursor_track >= CT_Max)
cursor_track = 0;
- synth_printf("%s\n", msg_get(MSG_CURSOR_MSGS_START + cursor_track));
+ synth_printf("%s\n", spk_msg_get(MSG_CURSOR_MSGS_START + cursor_track));
}
-void reset_default_chars(void)
+void spk_reset_default_chars(void)
{
int i;
/* First, free any non-default */
for (i = 0; i < 256; i++) {
- if ((characters[i] != NULL)
- && (characters[i] != default_chars[i]))
- kfree(characters[i]);
+ if ((spk_characters[i] != NULL)
+ && (spk_characters[i] != spk_default_chars[i]))
+ kfree(spk_characters[i]);
}
- memcpy(characters, default_chars, sizeof(default_chars));
+ memcpy(spk_characters, spk_default_chars, sizeof(spk_default_chars));
}
-void reset_default_chartab(void)
+void spk_reset_default_chartab(void)
{
memcpy(spk_chartab, default_chartab, sizeof(default_chartab));
}
@@ -1267,8 +1267,8 @@ static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
if (type != KT_LATIN || (ch_type & B_NUM) || ch < SPACE)
return -1;
if (ch == SPACE) {
- synth_printf("%s\n", msg_get(MSG_EDIT_DONE));
- special_handler = NULL;
+ synth_printf("%s\n", spk_msg_get(MSG_EDIT_DONE));
+ spk_special_handler = NULL;
return 1;
}
if (mask < PUNC && !(ch_type & PUNC))
@@ -1276,8 +1276,8 @@ static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
spk_chartab[ch] ^= mask;
speak_char(ch);
synth_printf(" %s\n",
- (spk_chartab[ch] & mask) ? msg_get(MSG_ON) :
- msg_get(MSG_OFF));
+ (spk_chartab[ch] & mask) ? spk_msg_get(MSG_ON) :
+ spk_msg_get(MSG_OFF));
return 1;
}
@@ -1346,7 +1346,7 @@ static void read_all_doc(struct vc_data *vc)
if (cursor_track != read_all_mode)
prev_cursor_track = cursor_track;
cursor_track = read_all_mode;
- reset_index_count(0);
+ spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1)
kbd_fakekey2(vc, RA_DOWN_ARROW);
else {
@@ -1361,7 +1361,7 @@ static void stop_read_all(struct vc_data *vc)
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
}
static void start_read_all_timer(struct vc_data *vc, int command)
@@ -1370,7 +1370,7 @@ static void start_read_all_timer(struct vc_data *vc, int command)
cursor_con = vc->vc_num;
read_all_key = command;
- cursor_timeout = get_var(CURSOR_TIME);
+ cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
}
@@ -1382,9 +1382,9 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
switch (command) {
case RA_NEXT_SENT:
/* Get Current Sentence */
- get_index_count(&indcount, &sentcount);
+ spk_get_index_count(&indcount, &sentcount);
/*printk("%d %d ", indcount, sentcount); */
- reset_index_count(sentcount + 1);
+ spk_reset_index_count(sentcount + 1);
if (indcount == 1) {
if (!say_sentence_num(sentcount + 1, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
@@ -1395,7 +1395,7 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
sn = 0;
if (!say_sentence_num(sentcount + 1, 1)) {
sn = 1;
- reset_index_count(sn);
+ spk_reset_index_count(sn);
} else
synth_insert_next_index(0);
if (!say_sentence_num(sn, 0)) {
@@ -1437,7 +1437,7 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
case RA_FIND_PREV_SENT:
break;
case RA_TIMER:
- get_index_count(&indcount, &sentcount);
+ spk_get_index_count(&indcount, &sentcount);
if (indcount < 2)
kbd_fakekey2(vc, RA_DOWN_ARROW);
else
@@ -1458,7 +1458,7 @@ static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
}
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
start_read_all_timer(vc, value + 1);
spk_unlock(flags);
return NOTIFY_STOP;
@@ -1479,8 +1479,8 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
return;
}
spk_shut_up &= 0xfe;
- if (no_intr)
- do_flush();
+ if (spk_no_intr)
+ spk_do_flush();
/* the key press flushes if !no_inter but we want to flush on cursor
* moves regardless of no_inter state */
is_cursor = value + 1;
@@ -1491,7 +1491,7 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
cursor_con = vc->vc_num;
if (cursor_track == CT_Highlight)
reset_highlight_buffers(vc);
- cursor_timeout = get_var(CURSOR_TIME);
+ cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
spk_unlock(flags);
@@ -1603,7 +1603,7 @@ static int speak_highlight(struct vc_data *vc)
if (speakup_console[vc_num]->ht.ry[hc] != vc->vc_y)
return 0;
spk_parked |= 0x01;
- do_flush();
+ spk_do_flush();
spkup_write(speakup_console[vc_num]->ht.highbuf[hc],
speakup_console[vc_num]->ht.highsize[hc]);
spk_pos = spk_cp = speakup_console[vc_num]->ht.rpos[hc];
@@ -1685,7 +1685,7 @@ static void speakup_con_write(struct vc_data *vc, const char *str, int len)
if (!spk_trylock(flags))
/* Speakup output, discard */
return;
- if (bell_pos && spk_keydown && (vc->vc_x == bell_pos - 1))
+ if (spk_bell_pos && spk_keydown && (vc->vc_x == spk_bell_pos - 1))
bleep(3);
if ((is_cursor) || (cursor_track == read_all_mode)) {
if (cursor_track == CT_Highlight)
@@ -1726,19 +1726,19 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
return;
spk_lock(flags);
spk_shut_up &= 0xfe;
- if (no_intr)
- do_flush();
+ if (spk_no_intr)
+ spk_do_flush();
switch (value) {
case KVAL(K_CAPS):
- label = msg_get(MSG_KEYNAME_CAPSLOCK);
+ label = spk_msg_get(MSG_KEYNAME_CAPSLOCK);
on_off = vt_get_leds(fg_console, VC_CAPSLOCK);
break;
case KVAL(K_NUM):
- label = msg_get(MSG_KEYNAME_NUMLOCK);
+ label = spk_msg_get(MSG_KEYNAME_NUMLOCK);
on_off = vt_get_leds(fg_console, VC_NUMLOCK);
break;
case KVAL(K_HOLD):
- label = msg_get(MSG_KEYNAME_SCROLLLOCK);
+ label = spk_msg_get(MSG_KEYNAME_SCROLLLOCK);
on_off = vt_get_leds(fg_console, VC_SCROLLOCK);
if (speakup_console[vc->vc_num])
speakup_console[vc->vc_num]->tty_stopped = on_off;
@@ -1750,7 +1750,7 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
}
if (on_off < 2)
synth_printf("%s %s\n",
- label, msg_get(MSG_STATUS_START + on_off));
+ label, spk_msg_get(MSG_STATUS_START + on_off));
spk_unlock(flags);
}
@@ -1764,13 +1764,13 @@ static int inc_dec_var(u_char value)
int var_id = (int)value - VAR_START;
int how = (var_id & 1) ? E_INC : E_DEC;
var_id = var_id / 2 + FIRST_SET_VAR;
- p_header = get_var_header(var_id);
+ p_header = spk_get_var_header(var_id);
if (p_header == NULL)
return -1;
if (p_header->var_type != VAR_NUM)
return -1;
var_data = p_header->data;
- if (set_num_var(1, p_header, how) != 0)
+ if (spk_set_num_var(1, p_header, how) != 0)
return -1;
if (!spk_close_press) {
for (pn = p_header->name; *pn; pn++) {
@@ -1790,18 +1790,18 @@ static void speakup_win_set(struct vc_data *vc)
{
char info[40];
if (win_start > 1) {
- synth_printf("%s\n", msg_get(MSG_WINDOW_ALREADY_SET));
+ synth_printf("%s\n", spk_msg_get(MSG_WINDOW_ALREADY_SET));
return;
}
if (spk_x < win_left || spk_y < win_top) {
- synth_printf("%s\n", msg_get(MSG_END_BEFORE_START));
+ synth_printf("%s\n", spk_msg_get(MSG_END_BEFORE_START));
return;
}
if (win_start && spk_x == win_left && spk_y == win_top) {
win_left = 0;
win_right = vc->vc_cols - 1;
win_bottom = spk_y;
- snprintf(info, sizeof(info), msg_get(MSG_WINDOW_LINE),
+ snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_LINE),
(int)win_top + 1);
} else {
if (!win_start) {
@@ -1811,8 +1811,8 @@ static void speakup_win_set(struct vc_data *vc)
win_bottom = spk_y;
win_right = spk_x;
}
- snprintf(info, sizeof(info), msg_get(MSG_WINDOW_BOUNDARY),
- (win_start) ? msg_get(MSG_END) : msg_get(MSG_START),
+ snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_BOUNDARY),
+ (win_start) ? spk_msg_get(MSG_END) : spk_msg_get(MSG_START),
(int)spk_y + 1, (int)spk_x + 1);
}
synth_printf("%s\n", info);
@@ -1824,32 +1824,32 @@ static void speakup_win_clear(struct vc_data *vc)
win_top = win_bottom = 0;
win_left = win_right = 0;
win_start = 0;
- synth_printf("%s\n", msg_get(MSG_WINDOW_CLEARED));
+ synth_printf("%s\n", spk_msg_get(MSG_WINDOW_CLEARED));
}
static void speakup_win_enable(struct vc_data *vc)
{
if (win_start < 2) {
- synth_printf("%s\n", msg_get(MSG_NO_WINDOW));
+ synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
win_enabled ^= 1;
if (win_enabled)
- synth_printf("%s\n", msg_get(MSG_WINDOW_SILENCED));
+ synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCED));
else
- synth_printf("%s\n", msg_get(MSG_WINDOW_SILENCE_DISABLED));
+ synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCE_DISABLED));
}
static void speakup_bits(struct vc_data *vc)
{
int val = this_speakup_key - (FIRST_EDIT_BITS - 1);
- if (special_handler != NULL || val < 1 || val > 6) {
- synth_printf("%s\n", msg_get(MSG_ERROR));
+ if (spk_special_handler != NULL || val < 1 || val > 6) {
+ synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
- pb_edit = &punc_info[val];
- synth_printf(msg_get(MSG_EDIT_PROMPT), pb_edit->name);
- special_handler = edit_bits;
+ pb_edit = &spk_punc_info[val];
+ synth_printf(spk_msg_get(MSG_EDIT_PROMPT), pb_edit->name);
+ spk_special_handler = edit_bits;
}
static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
@@ -1887,9 +1887,9 @@ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
if (ch < 'x' || ch > 'y') {
oops:
if (!spk_killed)
- synth_printf(" %s\n", msg_get(MSG_GOTO_CANCELED));
+ synth_printf(" %s\n", spk_msg_get(MSG_GOTO_CANCELED));
goto_buf[num = 0] = '\0';
- special_handler = NULL;
+ spk_special_handler = NULL;
return 1;
}
cp = speakup_s2i(goto_buf, &go_pos);
@@ -1917,7 +1917,7 @@ oops:
}
goto_buf[num = 0] = '\0';
do_goto:
- special_handler = NULL;
+ spk_special_handler = NULL;
spk_parked |= 0x01;
if (goto_x) {
spk_pos -= spk_x * 2;
@@ -1934,18 +1934,18 @@ do_goto:
static void speakup_goto(struct vc_data *vc)
{
- if (special_handler != NULL) {
- synth_printf("%s\n", msg_get(MSG_ERROR));
+ if (spk_special_handler != NULL) {
+ synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
- synth_printf("%s\n", msg_get(MSG_GOTO));
- special_handler = handle_goto;
+ synth_printf("%s\n", spk_msg_get(MSG_GOTO));
+ spk_special_handler = handle_goto;
return;
}
static void speakup_help(struct vc_data *vc)
{
- handle_help(vc, KT_SPKUP, SPEAKUP_HELP, 0);
+ spk_handle_help(vc, KT_SPKUP, SPEAKUP_HELP, 0);
}
static void do_nothing(struct vc_data *vc)
@@ -1992,7 +1992,7 @@ static void do_spkup(struct vc_data *vc, u_char value)
spk_shut_up &= 0xfe;
this_speakup_key = value;
if (value < SPKUP_MAX_FUNC && spkup_handler[value]) {
- do_flush();
+ spk_do_flush();
(*spkup_handler[value]) (vc);
} else {
if (inc_dec_var(value) < 0)
@@ -2032,7 +2032,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
}
if (keycode >= MAX_KEY)
goto no_map;
- key_info = our_keys[keycode];
+ key_info = spk_our_keys[keycode];
if (key_info == 0)
goto no_map;
/* Check valid read all mode keys */
@@ -2051,7 +2051,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
}
}
shift_info = (shift_state & 0x0f) + key_speakup;
- offset = shift_table[shift_info];
+ offset = spk_shift_table[shift_info];
if (offset) {
new_key = key_info[offset];
if (new_key) {
@@ -2062,7 +2062,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
if (up_flag || spk_killed)
goto out;
spk_shut_up &= 0xfe;
- do_flush();
+ spk_do_flush();
goto out;
}
if (up_flag)
@@ -2070,7 +2070,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
if (last_keycode == keycode &&
last_spk_jiffy + MAX_DELAY > jiffies) {
spk_close_press = 1;
- offset = shift_table[shift_info + 32];
+ offset = spk_shift_table[shift_info + 32];
/* double press? */
if (offset && key_info[offset])
new_key = key_info[offset];
@@ -2082,7 +2082,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
}
}
no_map:
- if (type == KT_SPKUP && special_handler == NULL) {
+ if (type == KT_SPKUP && spk_special_handler == NULL) {
do_spkup(vc, new_key);
spk_close_press = 0;
ret = 1;
@@ -2096,9 +2096,9 @@ no_map:
|| (value == KVAL(K_LEFT))
|| (value == KVAL(K_RIGHT));
if ((cursor_track != read_all_mode) || !kh)
- if (!no_intr)
- do_flush();
- if (special_handler) {
+ if (!spk_no_intr)
+ spk_do_flush();
+ if (spk_special_handler) {
if (type == KT_SPEC && value == 1) {
value = '\n';
type = KT_LATIN;
@@ -2106,7 +2106,7 @@ no_map:
type = KT_LATIN;
else if (value == 0x7f)
value = 8; /* make del = backspace */
- ret = (*special_handler) (vc, type, value, keycode);
+ ret = (*spk_special_handler) (vc, type, value, keycode);
spk_close_press = 0;
if (ret < 0)
bleep(9);
@@ -2237,11 +2237,11 @@ static void __exit speakup_exit(void)
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
- if (characters[i] != default_chars[i])
- kfree(characters[i]);
+ if (spk_characters[i] != spk_default_chars[i])
+ kfree(spk_characters[i]);
}
- free_user_msgs();
+ spk_free_user_msgs();
}
/* call by: module_init() */
@@ -2254,20 +2254,20 @@ static int __init speakup_init(void)
struct var_t *var;
/* These first few initializations cannot fail. */
- initialize_msgs(); /* Initialize arrays for i18n. */
- reset_default_chars();
- reset_default_chartab();
- strlwr(synth_name);
+ spk_initialize_msgs(); /* Initialize arrays for i18n. */
+ spk_reset_default_chars();
+ spk_reset_default_chartab();
+ spk_strlwr(synth_name);
spk_vars[0].u.n.high = vc->vc_cols;
for (var = spk_vars; var->var_id != MAXVARS; var++)
speakup_register_var(var);
for (var = synth_time_vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
- for (i = 1; punc_info[i].mask != 0; i++)
- set_mask_bits(0, i, 2);
+ for (i = 1; spk_punc_info[i].mask != 0; i++)
+ spk_set_mask_bits(0, i, 2);
- set_key_info(key_defaults, key_buf);
+ spk_set_key_info(spk_key_defaults, spk_key_buf);
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
@@ -2290,7 +2290,7 @@ static int __init speakup_init(void)
goto error_kobjects;
}
- if (quiet_boot)
+ if (spk_quiet_boot)
spk_shut_up |= 0x01;
err = speakup_kobj_init();
@@ -2352,11 +2352,11 @@ error_virtkeyboard:
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
- if (characters[i] != default_chars[i])
- kfree(characters[i]);
+ if (spk_characters[i] != spk_default_chars[i])
+ kfree(spk_characters[i]);
}
- free_user_msgs();
+ spk_free_user_msgs();
out:
return err;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0612df06a4bf..775af26b9914 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -2,6 +2,7 @@
#include <linux/consolemap.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/device.h> /* for dev_warn */
#include <linux/selection.h>
#include "speakup.h"
@@ -10,7 +11,7 @@
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define ishardspace(c) ((c) == ' ')
-unsigned short xs, ys, xe, ye; /* our region points */
+unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
/* Variables for selection control. */
/* must not be disallocated */
@@ -51,12 +52,12 @@ int speakup_set_selection(struct tty_struct *tty)
int i, ps, pe;
struct vc_data *vc = vc_cons[fg_console].d;
- xs = limit(xs, vc->vc_cols - 1);
- ys = limit(ys, vc->vc_rows - 1);
- xe = limit(xe, vc->vc_cols - 1);
- ye = limit(ye, vc->vc_rows - 1);
- ps = ys * vc->vc_size_row + (xs << 1);
- pe = ye * vc->vc_size_row + (xe << 1);
+ spk_xs = limit(spk_xs, vc->vc_cols - 1);
+ spk_ys = limit(spk_ys, vc->vc_rows - 1);
+ spk_xe = limit(spk_xe, vc->vc_cols - 1);
+ spk_ye = limit(spk_ye, vc->vc_rows - 1);
+ ps = spk_ys * vc->vc_size_row + (spk_xs << 1);
+ pe = spk_ye * vc->vc_size_row + (spk_xe << 1);
if (ps > pe) {
/* make sel_start <= sel_end */
@@ -95,7 +96,6 @@ int speakup_set_selection(struct tty_struct *tty)
/* Allocate a new buffer before freeing the old one ... */
bp = kmalloc((sel_end-sel_start)/2+1, GFP_ATOMIC);
if (!bp) {
- dev_warn(tty->dev, "selection: kmalloc() failed\n");
speakup_clear_selection();
return -ENOMEM;
}
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index a97d3d5b58a4..e4d27aa2898f 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -116,7 +116,7 @@ static void start_serial_interrupt(int irq)
outb(1, speakup_info.port_tts + UART_FCR); /* Turn FIFO On */
}
-void stop_serial_interrupt(void)
+void spk_stop_serial_interrupt(void)
{
if (speakup_info.port_tts == 0)
return;
@@ -130,7 +130,7 @@ void stop_serial_interrupt(void)
free_irq(serstate->irq, (void *) synth_readbuf_handler);
}
-int wait_for_xmitr(void)
+int spk_wait_for_xmitr(void)
{
int tmout = SPK_XMITR_TIMEOUT;
if ((synth->alive) && (timeouts >= NUM_DISABLE_TIMEOUTS)) {
@@ -195,7 +195,7 @@ EXPORT_SYMBOL_GPL(spk_serial_in_nowait);
int spk_serial_out(const char ch)
{
- if (synth->alive && wait_for_xmitr()) {
+ if (synth->alive && spk_wait_for_xmitr()) {
outb_p(ch, speakup_info.port_tts);
return 1;
}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index e66579e6147a..22f0fbb85f42 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -50,34 +50,34 @@
#define E_UNDEF -1
extern int speakup_thread(void *data);
-extern void reset_default_chars(void);
-extern void reset_default_chartab(void);
+extern void spk_reset_default_chars(void);
+extern void spk_reset_default_chartab(void);
extern void synth_start(void);
void synth_insert_next_index(int sent_num);
-void reset_index_count(int sc);
-void get_index_count(int *linecount, int *sentcount);
-extern int set_key_info(const u_char *key_info, u_char *k_buffer);
-extern char *strlwr(char *s);
+void spk_reset_index_count(int sc);
+void spk_get_index_count(int *linecount, int *sentcount);
+extern int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
+extern char *spk_strlwr(char *s);
extern char *speakup_s2i(char *start, int *dest);
-extern char *s2uchar(char *start, char *dest);
-extern char *xlate(char *s);
+extern char *spk_s2uchar(char *start, char *dest);
+extern char *spk_xlate(char *s);
extern int speakup_kobj_init(void);
extern void speakup_kobj_exit(void);
-extern int chartab_get_value(char *keyword);
+extern int spk_chartab_get_value(char *keyword);
extern void speakup_register_var(struct var_t *var);
extern void speakup_unregister_var(enum var_id_t var_id);
-extern struct st_var_header *get_var_header(enum var_id_t var_id);
-extern struct st_var_header *var_header_by_name(const char *name);
-extern struct punc_var_t *get_punc_var(enum var_id_t var_id);
-extern int set_num_var(int val, struct st_var_header *var, int how);
-extern int set_string_var(const char *page, struct st_var_header *var, int len);
-extern int set_mask_bits(const char *input, const int which, const int how);
-extern special_func special_handler;
-extern int handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key);
+extern struct st_var_header *spk_get_var_header(enum var_id_t var_id);
+extern struct st_var_header *spk_var_header_by_name(const char *name);
+extern struct punc_var_t *spk_get_punc_var(enum var_id_t var_id);
+extern int spk_set_num_var(int val, struct st_var_header *var, int how);
+extern int spk_set_string_var(const char *page, struct st_var_header *var, int len);
+extern int spk_set_mask_bits(const char *input, const int which, const int how);
+extern special_func spk_special_handler;
+extern int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key);
extern int synth_init(char *name);
extern void synth_release(void);
-extern void do_flush(void);
+extern void spk_do_flush(void);
extern void speakup_start_ttys(void);
extern void synth_buffer_add(char ch);
extern void synth_buffer_clear(void);
@@ -90,35 +90,35 @@ extern void synth_write(const char *buf, size_t count);
extern int synth_supports_indexing(void);
extern struct vc_data *spk_sel_cons;
-extern unsigned short xs, ys, xe, ye; /* our region points */
+extern unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
extern wait_queue_head_t speakup_event;
extern struct kobject *speakup_kobj;
extern struct task_struct *speakup_task;
-extern const u_char key_defaults[];
+extern const u_char spk_key_defaults[];
/* Protect speakup synthesizer list */
extern struct mutex spk_mutex;
extern struct st_spk_t *speakup_console[];
extern struct spk_synth *synth;
-extern char pitch_buff[];
-extern u_char *our_keys[];
-extern short punc_masks[];
-extern char str_caps_start[], str_caps_stop[];
-extern const struct st_bits_data punc_info[];
-extern u_char key_buf[600];
-extern char *characters[];
-extern char *default_chars[];
+extern char spk_pitch_buff[];
+extern u_char *spk_our_keys[];
+extern short spk_punc_masks[];
+extern char spk_str_caps_start[], spk_str_caps_stop[];
+extern const struct st_bits_data spk_punc_info[];
+extern u_char spk_key_buf[600];
+extern char *spk_characters[];
+extern char *spk_default_chars[];
extern u_short spk_chartab[];
-extern int no_intr, say_ctrl, say_word_ctl, punc_level;
-extern int reading_punc, attrib_bleep, bleeps;
-extern int bleep_time, bell_pos;
-extern int spell_delay, key_echo;
-extern short punc_mask;
-extern short pitch_shift, synth_flags;
-extern bool quiet_boot;
+extern int spk_no_intr, spk_say_ctrl, spk_say_word_ctl, spk_punc_level;
+extern int spk_reading_punc, spk_attrib_bleep, spk_bleeps;
+extern int spk_bleep_time, spk_bell_pos;
+extern int spk_spell_delay, spk_key_echo;
+extern short spk_punc_mask;
+extern short spk_pitch_shift, synth_flags;
+extern bool spk_quiet_boot;
extern char *synth_name;
-extern struct bleep unprocessed_sound;
+extern struct bleep spk_unprocessed_sound;
/* Prototypes from fakekey.c. */
int speakup_add_virtual_keyboard(void);
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index bbe28b6809e0..1c1f0d560449 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -182,9 +182,9 @@ static void do_catch_up(struct spk_synth *synth)
struct var_t *full_time;
struct var_t *jiffy_delta;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
- full_time = get_var(FULL);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
+ full_time = spk_get_var(FULL);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 590fa6bb0ed4..22a8b7291098 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -128,7 +128,7 @@ static int synth_probe(struct spk_synth *synth)
{
int failed;
- failed = serial_synth_probe(synth);
+ failed = spk_serial_synth_probe(synth);
if (failed == 0) {
spk_synth_immediate(synth, "\033=R\r");
mdelay(100);
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 00d5cedd00ab..3e450ccbda66 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -112,7 +112,7 @@ static struct spk_synth synth_apollo = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
@@ -145,9 +145,9 @@ static void do_catch_up(struct spk_synth *synth)
int delay_time_val = 0;
int jiffy_delta_val = 0;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
- full_time = get_var(FULL);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
+ full_time = spk_get_var(FULL);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 94e509992c8b..3508aee98ab0 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -162,7 +162,7 @@ static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
- failed = serial_synth_probe(synth);
+ failed = spk_serial_synth_probe(synth);
if (failed == 0)
synth_version(synth);
synth->alive = !failed;
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 43e5b54f344c..4bfe3d458dc0 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -100,7 +100,7 @@ static struct spk_synth synth_bns = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index b4ef9153f42e..d39a0de286fb 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -130,7 +130,7 @@ static struct spk_synth synth_decext = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
@@ -162,8 +162,8 @@ static void do_catch_up(struct spk_synth *synth)
int jiffy_delta_val = 0;
int delay_time_val = 0;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index a09a0c9975df..6c88b55bdac8 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -375,8 +375,8 @@ static void do_catch_up(struct spk_synth *synth)
int jiffy_delta_val;
int delay_time_val;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index daff3b9a4a6d..0dd2eb96cb28 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -134,7 +134,7 @@ static struct spk_synth synth_dectlk = {
.vars = vars,
.default_pitch = ap_defaults,
.default_vol = g5_defaults,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = do_catch_up,
@@ -214,8 +214,8 @@ static void do_catch_up(struct spk_synth *synth)
int jiffy_delta_val;
int delay_time_val;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 97bc476746cd..a9cefbd3ea93 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -198,8 +198,8 @@ static void do_catch_up(struct spk_synth *synth)
int jiffy_delta_val;
int delay_time_val;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index c20f41188be1..4a24b9c1e8e3 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -102,7 +102,7 @@ static struct spk_synth synth_dummy = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 496e01481f9e..feb5f22cc169 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -184,9 +184,9 @@ static void do_catch_up(struct spk_synth *synth)
int full_time_val;
int jiffy_delta_val;
- jiffy_delta = get_var(JIFFY);
- delay_time = get_var(DELAY);
- full_time = get_var(FULL);
+ jiffy_delta = spk_get_var(JIFFY);
+ delay_time = spk_get_var(DELAY);
+ full_time = spk_get_var(FULL);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 971de1a13712..326f94d6b079 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -161,7 +161,7 @@ static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
- failed = serial_synth_probe(synth);
+ failed = spk_serial_synth_probe(synth);
if (failed == 0)
synth_interrogate(synth);
synth->alive = !failed;
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 9a3a80d9701e..e74f85620c68 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -107,7 +107,7 @@ static struct spk_synth synth_spkout = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 5d5bf7c3d0b1..5a29b9fcc930 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -100,7 +100,7 @@ static struct spk_synth synth_txprt = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
- .probe = serial_synth_probe,
+ .probe = spk_serial_synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index a47c5b78d57d..303105b46013 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -45,8 +45,8 @@
#define KT_SPKUP 15
extern const struct old_serial_port *spk_serial_init(int index);
-extern void stop_serial_interrupt(void);
-extern int wait_for_xmitr(void);
+extern void spk_stop_serial_interrupt(void);
+extern int spk_wait_for_xmitr(void);
extern unsigned char spk_serial_in(void);
extern unsigned char spk_serial_in_nowait(void);
extern int spk_serial_out(const char ch);
@@ -55,13 +55,13 @@ extern void spk_serial_release(void);
extern char synth_buffer_getc(void);
extern char synth_buffer_peek(void);
extern int synth_buffer_empty(void);
-extern struct var_t *get_var(enum var_id_t var_id);
+extern struct var_t *spk_get_var(enum var_id_t var_id);
extern ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf);
extern ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
-extern int serial_synth_probe(struct spk_synth *synth);
+extern int spk_serial_synth_probe(struct spk_synth *synth);
extern const char *spk_synth_immediate(struct spk_synth *synth, const char *buff);
extern void spk_do_catch_up(struct spk_synth *synth);
extern void spk_synth_flush(struct spk_synth *synth);
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index df9533798095..d867dd9109ed 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -20,9 +20,9 @@
#define MAXSYNTHS 16 /* Max number of synths in array. */
static struct spk_synth *synths[MAXSYNTHS];
struct spk_synth *synth;
-char pitch_buff[32] = "";
+char spk_pitch_buff[32] = "";
static int module_status;
-bool quiet_boot;
+bool spk_quiet_boot;
struct speakup_info_t speakup_info = {
.spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock),
@@ -32,7 +32,7 @@ EXPORT_SYMBOL_GPL(speakup_info);
static int do_synth_init(struct spk_synth *in_synth);
-int serial_synth_probe(struct spk_synth *synth)
+int spk_serial_synth_probe(struct spk_synth *synth)
{
const struct old_serial_port *ser;
int failed = 0;
@@ -59,7 +59,7 @@ int serial_synth_probe(struct spk_synth *synth)
synth->alive = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(serial_synth_probe);
+EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
/* Main loop of the progression thread: keep eating from the buffer
* and push to the serial port, waiting as needed
@@ -79,9 +79,9 @@ void spk_do_catch_up(struct spk_synth *synth)
int delay_time_val;
int full_time_val;
- jiffy_delta = get_var(JIFFY);
- full_time = get_var(FULL);
- delay_time = get_var(DELAY);
+ jiffy_delta = spk_get_var(JIFFY);
+ full_time = spk_get_var(FULL);
+ delay_time = spk_get_var(DELAY);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
@@ -139,7 +139,7 @@ const char *spk_synth_immediate(struct spk_synth *synth, const char *buff)
while ((ch = *buff)) {
if (ch == '\n')
ch = synth->procspeech;
- if (wait_for_xmitr())
+ if (spk_wait_for_xmitr())
outb(ch, speakup_info.port_tts);
else
return buff;
@@ -166,7 +166,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
{
if (synth->alive)
return 1;
- if (!synth->alive && wait_for_xmitr() > 0) {
+ if (!synth->alive && spk_wait_for_xmitr() > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
@@ -192,20 +192,20 @@ void synth_start(void)
synth_buffer_clear();
return;
}
- trigger_time = get_var(TRIGGER);
+ trigger_time = spk_get_var(TRIGGER);
if (!timer_pending(&thread_timer))
mod_timer(&thread_timer, jiffies +
msecs_to_jiffies(trigger_time->u.n.value));
}
-void do_flush(void)
+void spk_do_flush(void)
{
speakup_info.flushing = 1;
synth_buffer_clear();
if (synth->alive) {
- if (pitch_shift) {
- synth_printf("%s", pitch_buff);
- pitch_shift = 0;
+ if (spk_pitch_shift) {
+ synth_printf("%s", spk_pitch_buff);
+ spk_pitch_shift = 0;
}
}
wake_up_interruptible_all(&speakup_event);
@@ -241,7 +241,7 @@ EXPORT_SYMBOL_GPL(synth_printf);
static int index_count;
static int sentence_count;
-void reset_index_count(int sc)
+void spk_reset_index_count(int sc)
{
static int first = 1;
if (first)
@@ -277,7 +277,7 @@ void synth_insert_next_index(int sent_num)
}
}
-void get_index_count(int *linecount, int *sentcount)
+void spk_get_index_count(int *linecount, int *sentcount)
{
int ind = synth->get_index();
if (ind) {
@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
mutex_lock(&spk_mutex);
/* First, check if we already have it loaded. */
- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
if (strcmp(synths[i]->name, synth_name) == 0)
synth = synths[i];
@@ -384,7 +384,7 @@ static int do_synth_init(struct spk_synth *in_synth)
for (var = synth->vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
- if (!quiet_boot)
+ if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
if (synth->attributes.name
&& sysfs_create_group(speakup_kobj, &(synth->attributes)) < 0)
@@ -412,7 +412,7 @@ void synth_release(void)
sysfs_remove_group(speakup_kobj, &(synth->attributes));
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
- stop_serial_interrupt();
+ spk_stop_serial_interrupt();
synth->release();
synth = NULL;
}
@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
int i;
int status = 0;
mutex_lock(&spk_mutex);
- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
/* synth_remove() is responsible for rotating the array down */
if (in_synth == synths[i]) {
mutex_unlock(&spk_mutex);
@@ -460,4 +460,4 @@ void synth_remove(struct spk_synth *in_synth)
}
EXPORT_SYMBOL_GPL(synth_remove);
-short punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
+short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 103c5c81ee85..42fa660a7e0d 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -23,8 +23,8 @@ int speakup_thread(void *data)
DEFINE_WAIT(wait);
while (1) {
spk_lock(flags);
- our_sound = unprocessed_sound;
- unprocessed_sound.active = 0;
+ our_sound = spk_unprocessed_sound;
+ spk_unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
TASK_INTERRUPTIBLE);
should_break = kthread_should_stop() ||
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index ab7de9389dd6..f8c1e457d389 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -16,24 +16,24 @@ static struct st_var_header var_headers[] = {
{ "ex_num", EXNUMBER, VAR_PROC, NULL, NULL },
{ "characters", CHARS, VAR_PROC, NULL, NULL },
{ "synth_direct", SYNTH_DIRECT, VAR_PROC, NULL, NULL },
- { "caps_start", CAPS_START, VAR_STRING, str_caps_start, NULL },
- { "caps_stop", CAPS_STOP, VAR_STRING, str_caps_stop, NULL },
+ { "caps_start", CAPS_START, VAR_STRING, spk_str_caps_start, NULL },
+ { "caps_stop", CAPS_STOP, VAR_STRING, spk_str_caps_stop, NULL },
{ "delay_time", DELAY, VAR_TIME, NULL, NULL },
{ "trigger_time", TRIGGER, VAR_TIME, NULL, NULL },
{ "jiffy_delta", JIFFY, VAR_TIME, NULL, NULL },
{ "full_time", FULL, VAR_TIME, NULL, NULL },
- { "spell_delay", SPELL_DELAY, VAR_NUM, &spell_delay, NULL },
- { "bleeps", BLEEPS, VAR_NUM, &bleeps, NULL },
- { "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &attrib_bleep, NULL },
- { "bleep_time", BLEEP_TIME, VAR_TIME, &bleep_time, NULL },
+ { "spell_delay", SPELL_DELAY, VAR_NUM, &spk_spell_delay, NULL },
+ { "bleeps", BLEEPS, VAR_NUM, &spk_bleeps, NULL },
+ { "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &spk_attrib_bleep, NULL },
+ { "bleep_time", BLEEP_TIME, VAR_TIME, &spk_bleep_time, NULL },
{ "cursor_time", CURSOR_TIME, VAR_TIME, NULL, NULL },
- { "punc_level", PUNC_LEVEL, VAR_NUM, &punc_level, NULL },
- { "reading_punc", READING_PUNC, VAR_NUM, &reading_punc, NULL },
- { "say_control", SAY_CONTROL, VAR_NUM, &say_ctrl, NULL },
- { "say_word_ctl", SAY_WORD_CTL, VAR_NUM, &say_word_ctl, NULL },
- { "no_interrupt", NO_INTERRUPT, VAR_NUM, &no_intr, NULL },
- { "key_echo", KEY_ECHO, VAR_NUM, &key_echo, NULL },
- { "bell_pos", BELL_POS, VAR_NUM, &bell_pos, NULL },
+ { "punc_level", PUNC_LEVEL, VAR_NUM, &spk_punc_level, NULL },
+ { "reading_punc", READING_PUNC, VAR_NUM, &spk_reading_punc, NULL },
+ { "say_control", SAY_CONTROL, VAR_NUM, &spk_say_ctrl, NULL },
+ { "say_word_ctl", SAY_WORD_CTL, VAR_NUM, &spk_say_word_ctl, NULL },
+ { "no_interrupt", NO_INTERRUPT, VAR_NUM, &spk_no_intr, NULL },
+ { "key_echo", KEY_ECHO, VAR_NUM, &spk_key_echo, NULL },
+ { "bell_pos", BELL_POS, VAR_NUM, &spk_bell_pos, NULL },
{ "rate", RATE, VAR_NUM, NULL, NULL },
{ "pitch", PITCH, VAR_NUM, NULL, NULL },
{ "vol", VOL, VAR_NUM, NULL, NULL },
@@ -58,7 +58,7 @@ static struct punc_var_t punc_vars[] = {
{ -1, -1 },
};
-int chartab_get_value(char *keyword)
+int spk_chartab_get_value(char *keyword)
{
int value = 0;
@@ -103,11 +103,11 @@ void speakup_register_var(struct var_t *var)
p_header->data = var;
switch (p_header->var_type) {
case VAR_STRING:
- set_string_var(nothing, p_header, 0);
+ spk_set_string_var(nothing, p_header, 0);
break;
case VAR_NUM:
case VAR_TIME:
- set_num_var(0, p_header, E_DEFAULT);
+ spk_set_num_var(0, p_header, E_DEFAULT);
break;
default:
break;
@@ -123,7 +123,7 @@ void speakup_unregister_var(enum var_id_t var_id)
p_header->data = NULL;
}
-struct st_var_header *get_var_header(enum var_id_t var_id)
+struct st_var_header *spk_get_var_header(enum var_id_t var_id)
{
struct st_var_header *p_header;
if (var_id < 0 || var_id >= MAXVARS)
@@ -134,7 +134,7 @@ struct st_var_header *get_var_header(enum var_id_t var_id)
return p_header;
}
-struct st_var_header *var_header_by_name(const char *name)
+struct st_var_header *spk_var_header_by_name(const char *name)
{
int i;
struct st_var_header *where = NULL;
@@ -151,15 +151,15 @@ struct st_var_header *var_header_by_name(const char *name)
return where;
}
-struct var_t *get_var(enum var_id_t var_id)
+struct var_t *spk_get_var(enum var_id_t var_id)
{
BUG_ON(var_id < 0 || var_id >= MAXVARS);
BUG_ON(!var_ptrs[var_id]);
return var_ptrs[var_id]->data;
}
-EXPORT_SYMBOL_GPL(get_var);
+EXPORT_SYMBOL_GPL(spk_get_var);
-struct punc_var_t *get_punc_var(enum var_id_t var_id)
+struct punc_var_t *spk_get_punc_var(enum var_id_t var_id)
{
struct punc_var_t *rv = NULL;
struct punc_var_t *where;
@@ -175,7 +175,7 @@ struct punc_var_t *get_punc_var(enum var_id_t var_id)
}
/* handlers for setting vars */
-int set_num_var(int input, struct st_var_header *var, int how)
+int spk_set_num_var(int input, struct st_var_header *var, int how)
{
int val;
short ret = 0;
@@ -217,7 +217,7 @@ int set_num_var(int input, struct st_var_header *var, int how)
if (p_val != NULL)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
- punc_mask = punc_masks[val];
+ spk_punc_mask = spk_punc_masks[val];
return ret;
}
if (var_data->u.n.multiplier != 0)
@@ -232,7 +232,7 @@ int set_num_var(int input, struct st_var_header *var, int how)
if (!var_data->u.n.synth_fmt)
return ret;
if (var->var_id == PITCH)
- cp = pitch_buff;
+ cp = spk_pitch_buff;
else
cp = buf;
if (!var_data->u.n.out_str)
@@ -244,7 +244,7 @@ int set_num_var(int input, struct st_var_header *var, int how)
return ret;
}
-int set_string_var(const char *page, struct st_var_header *var, int len)
+int spk_set_string_var(const char *page, struct st_var_header *var, int len)
{
int ret = 0;
struct var_t *var_data = var->data;
@@ -267,21 +267,21 @@ int set_string_var(const char *page, struct st_var_header *var, int len)
return ret;
}
-/* set_mask_bits sets or clears the punc/delim/repeat bits,
+/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
* if input is null uses the defaults.
* values for how: 0 clears bits of chars supplied,
* 1 clears allk, 2 sets bits for chars */
-int set_mask_bits(const char *input, const int which, const int how)
+int spk_set_mask_bits(const char *input, const int which, const int how)
{
u_char *cp;
- short mask = punc_info[which].mask;
+ short mask = spk_punc_info[which].mask;
if (how&1) {
- for (cp = (u_char *)punc_info[3].value; *cp; cp++)
+ for (cp = (u_char *)spk_punc_info[3].value; *cp; cp++)
spk_chartab[*cp] &= ~mask;
}
cp = (u_char *)input;
if (cp == 0)
- cp = punc_info[which].value;
+ cp = spk_punc_info[which].value;
else {
for ( ; *cp; cp++) {
if (*cp < SPACE)
@@ -308,7 +308,7 @@ int set_mask_bits(const char *input, const int which, const int how)
return 0;
}
-char *strlwr(char *s)
+char *spk_strlwr(char *s)
{
char *p;
if (s == NULL)
@@ -341,7 +341,7 @@ char *speakup_s2i(char *start, int *dest)
return start;
}
-char *s2uchar(char *start, char *dest)
+char *spk_s2uchar(char *start, char *dest)
{
int val = 0;
while (*start && *start <= SPACE)
@@ -357,7 +357,7 @@ char *s2uchar(char *start, char *dest)
return start;
}
-char *xlate(char *s)
+char *spk_xlate(char *s)
{
static const char finds[] = "nrtvafe";
static const char subs[] = "\n\r\t\013\001\014\033";
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 299f51810199..6a21f67af086 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -742,13 +742,9 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
if (rmi_fd.intr_src_count) {
rfi = kmalloc(sizeof(*rfi),
- GFP_KERNEL);
- if (!rfi) {
- dev_err(&client->dev,
- "%s:kmalloc failed\n",
- __func__);
- return -ENOMEM;
- }
+ GFP_KERNEL);
+ if (!rfi)
+ return -ENOMEM;
retval = synpatics_rmi4_touchpad_detect
(pdata, rfi,
&rmi_fd,
@@ -900,12 +896,10 @@ static int synaptics_rmi4_probe
}
/* Allocate and initialize the instance data for this client */
- rmi4_data = kzalloc(sizeof(struct synaptics_rmi4_data) * 2,
- GFP_KERNEL);
- if (!rmi4_data) {
- dev_err(&client->dev, "%s: no memory allocated\n", __func__);
+ rmi4_data = kcalloc(2, sizeof(struct synaptics_rmi4_data),
+ GFP_KERNEL);
+ if (!rmi4_data)
return -ENOMEM;
- }
rmi4_data->input_dev = input_allocate_device();
if (rmi4_data->input_dev == NULL) {
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 0dd479f5638d..60848f198b48 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -4,7 +4,7 @@
menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver"
- depends on ARCH_OMAP3
+ depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
select OMAP_MBOX_FWK
help
DSP/BIOS Bridge is designed for platforms that contain a GPP and
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 543a127c7d4d..b783bfa59b1c 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -31,7 +31,7 @@
* driver should read or write to PRM/CM registers directly; they
* should rely on OMAP core code to do this.
*/
-#include <mach-omap2/cm2xxx_3xxx.h>
+#include <mach-omap2/cm3xxx.h>
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#include <dspbridge/devdefs.h>
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index b647207928b1..2f084e181d39 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -121,9 +121,13 @@ void dsp_clk_exit(void)
for (i = 0; i < DM_TIMER_CLOCKS; i++)
omap_dm_timer_free(timer[i]);
+ clk_unprepare(iva2_clk);
clk_put(iva2_clk);
+ clk_unprepare(ssi.sst_fck);
clk_put(ssi.sst_fck);
+ clk_unprepare(ssi.ssr_fck);
clk_put(ssi.ssr_fck);
+ clk_unprepare(ssi.ick);
clk_put(ssi.ick);
}
@@ -145,14 +149,21 @@ void dsp_clk_init(void)
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+ else
+ clk_prepare(iva2_clk);
ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
- if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+ if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) {
dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+ } else {
+ clk_prepare(ssi.sst_fck);
+ clk_prepare(ssi.ssr_fck);
+ clk_prepare(ssi.ick);
+ }
}
/**
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index ce9557e16eb0..7b517eb827fe 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -198,8 +198,7 @@ out_err:
*/
void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
{
- if (hmsg_mgr)
- delete_msg_mgr(hmsg_mgr);
+ delete_msg_mgr(hmsg_mgr);
}
/*
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f619fb3c56d2..b770b2281ce8 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -70,14 +70,9 @@
#define PAGES_II_LVL_TABLE 512
#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
-/*
- * This is a totally ugly layer violation, but needed until
- * omap_ctrl_set_dsp_boot*() are provided.
- */
-#define OMAP3_IVA2_BOOTMOD_IDLE 1
-#define OMAP2_CONTROL_GENERAL 0x270
-#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
-#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
+/* IVA Boot modes */
+#define DIRECT 0
+#define IDLE 1
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -423,29 +418,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
- /*
- * XXX: OMAP343X_CTRL_BASE ioremapping MUST be removed once ctrl
- * function is made available.
- */
- void __iomem *ctrl = ioremap(0x48002000, SZ_4K);
- if (!ctrl) {
- iounmap(sync_addr);
- return -ENOMEM;
- }
-
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
- /* Mask address with 1K for compatibility */
- __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
- ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
- /*
- * Set bootmode to self loop if dsp_debug flag is true
- */
- __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
- ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
- iounmap(ctrl);
+ /* Mask address with 1K for compatibility */
+ pdata->set_bootaddr(dsp_addr &
+ OMAP3_IVA2_BOOTADDR_MASK);
+ pdata->set_bootmode(dsp_debug ? IDLE : DIRECT);
}
}
if (!status) {
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 1dce36fb828f..7ff0e6c98039 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -63,11 +63,15 @@ int dsp_wdt_init(void)
dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
if (!IS_ERR(dsp_wdt.fclk)) {
+ clk_prepare(dsp_wdt.fclk);
+
dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
if (IS_ERR(dsp_wdt.iclk)) {
clk_put(dsp_wdt.fclk);
dsp_wdt.fclk = NULL;
ret = -EFAULT;
+ } else {
+ clk_prepare(dsp_wdt.iclk);
}
} else
ret = -EFAULT;
@@ -95,10 +99,14 @@ void dsp_wdt_exit(void)
free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
tasklet_kill(&dsp_wdt.wdt3_tasklet);
- if (dsp_wdt.fclk)
+ if (dsp_wdt.fclk) {
+ clk_unprepare(dsp_wdt.fclk);
clk_put(dsp_wdt.fclk);
- if (dsp_wdt.iclk)
+ }
+ if (dsp_wdt.iclk) {
+ clk_unprepare(dsp_wdt.iclk);
clk_put(dsp_wdt.iclk);
+ }
dsp_wdt.fclk = NULL;
dsp_wdt.iclk = NULL;
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 851b356d7a51..774a3f6ff201 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -23,8 +23,6 @@
#include <dspbridge/devdefs.h>
#include <dspbridge/drv.h>
-extern char *iva_img;
-
/*
* ======== proc_attach ========
* Purpose:
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 4007826f7abc..6c29379baf60 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -289,7 +289,7 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
int status = 0;
if (usize <= COD_MAXPATHLENGTH)
- strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
+ strlcpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
else
status = -EPERM;
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 9f07036cd411..c191ae203565 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1382,7 +1382,7 @@ void find_symbol_callback(void *elem, void *user_data)
offset < context->cur_best_offset) {
context->cur_best_offset = offset;
context->sym_addr = symbol_addr;
- strncpy(context->name, symbol->name, sizeof(context->name));
+ strlcpy(context->name, symbol->name, sizeof(context->name));
}
return;
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 9ef1ad9527af..70db4ff99ec6 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -414,10 +414,13 @@ u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt)
CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
if (status)
goto func_end;
- /* path_size is increased by 1 to accommodate NULL */
path_size = strlen_user((char *)
- args->args_mgr_registerobject.sz_path_name) +
- 1;
+ args->args_mgr_registerobject.sz_path_name);
+ if (!path_size) {
+ status = -EINVAL;
+ goto func_end;
+ }
+
psz_path_name = kmalloc(path_size, GFP_KERNEL);
if (!psz_path_name) {
status = -ENOMEM;
@@ -1540,7 +1543,7 @@ u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
if (num_bufs > MAX_BUFS)
return -EINVAL;
- ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
+ ap_buffer = kmalloc_array(num_bufs, sizeof(u8 *), GFP_KERNEL);
if (ap_buffer == NULL)
return -ENOMEM;
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 9d52c3cb92f0..3d2a26f1efe5 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -852,8 +852,7 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
goto func_end;
}
- dcd_key->path = kmalloc(strlen(sz_reg_key) + 1,
- GFP_KERNEL);
+ dcd_key->path = kmalloc(dw_path_size, GFP_KERNEL);
if (!dcd_key->path) {
kfree(dcd_key);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index e6f31d817d6b..df0f37ea1ee5 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -65,7 +65,6 @@ static struct class *bridge_class;
static u32 driver_context;
static s32 driver_major;
static char *base_img;
-char *iva_img;
static s32 shm_size = 0x500000; /* 5 MB */
static int tc_wordswapon; /* Default value is always false */
#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 6309221b64a5..ca3805046a73 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -1802,8 +1802,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
- sym_addr, offset_range, (u32) offset_output, sym_name);
if (nldr_node->dynamic && *nldr_node->phase_split) {
switch (nldr_node->phase) {
@@ -1852,6 +1850,10 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
pr_debug("%s: Address 0x%x not found in range %d.\n",
__func__, sym_addr, offset_range);
status = -ESPIPE;
+ } else {
+ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n",
+ __func__, (u32) nldr_node, sym_addr, offset_range,
+ (u32) offset_output, sym_name);
}
return status;
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 737f4a9d86a3..87dfa92ab45b 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -3012,16 +3012,16 @@ int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
struct node_object *node_obj;
int status = -ENOENT;
- pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
- (unsigned int) node_mgr,
- sym_addr, offset_range,
- (unsigned int) sym_addr_output, sym_name);
-
list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
offset_range, sym_addr_output, sym_name);
- if (!status)
+ if (!status) {
+ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
+ (unsigned int) node_mgr,
+ sym_addr, offset_range,
+ (unsigned int) sym_addr_output, sym_name);
break;
+ }
}
return status;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 5e43938ab7fa..0df55bd5bde4 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -119,16 +119,14 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
dsp_addr, size);
map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
- if (!map_obj) {
- pr_err("%s: kzalloc failed\n", __func__);
+ if (!map_obj)
return NULL;
- }
+
INIT_LIST_HEAD(&map_obj->link);
map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!map_obj->pages) {
- pr_err("%s: kzalloc failed\n", __func__);
kfree(map_obj);
return NULL;
}
@@ -382,7 +380,6 @@ static int get_exec_file(struct cfg_devnode *dev_node_obj,
u32 size, char *exec_file)
{
u8 dev_type;
- s32 len;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
@@ -394,13 +391,10 @@ static int get_exec_file(struct cfg_devnode *dev_node_obj,
if (!drv_datap || !drv_datap->base_img)
return -EFAULT;
- if (strlen(drv_datap->base_img) > size)
+ if (strlen(drv_datap->base_img) >= size)
return -EINVAL;
strcpy(exec_file, drv_datap->base_img);
- } else if (dev_type == IVA_UNIT && iva_img) {
- len = strlen(iva_img);
- strncpy(exec_file, iva_img, len + 1);
} else {
return -ENOENT;
}
@@ -697,7 +691,6 @@ static int memory_give_ownership(struct dmm_map_object *map_obj,
sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
if (!sg) {
- pr_err("%s: kcalloc failed\n", __func__);
ret = -ENOMEM;
goto out;
}
@@ -1231,12 +1224,8 @@ int proc_load(void *hprocessor, const s32 argc_index,
(p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
kfree(drv_datap->base_img);
- drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
- GFP_KERNEL);
- if (drv_datap->base_img)
- strncpy(drv_datap->base_img, pargv0,
- strlen(pargv0) + 1);
- else
+ drv_datap->base_img = kstrdup(pargv0, GFP_KERNEL);
+ if (!drv_datap->base_img)
status = -ENOMEM;
}
}
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 199b1d4c0b85..886000980474 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -8,7 +8,7 @@ config USBIP_CORE
USB/IP core that is required by both drivers.
For more details, and to get the userspace utility
- programs, please see http://usbip.sourceforge.net/.
+ programs, please see <http://usbip.sourceforge.net/>.
To compile this as a module, choose M here: the module will
be called usbip-core.
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index ee36415eb26d..67556acd1514 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -67,9 +67,9 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr,
return -ENODEV;
}
- spin_lock(&sdev->ud.lock);
+ spin_lock_irq(&sdev->ud.lock);
status = sdev->ud.status;
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
return snprintf(buf, PAGE_SIZE, "%d\n", status);
}
@@ -97,39 +97,39 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
if (sockfd != -1) {
dev_info(dev, "stub up\n");
- spin_lock(&sdev->ud.lock);
+ spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) {
dev_err(dev, "not ready\n");
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
return -EINVAL;
}
socket = sockfd_to_socket(sockfd);
if (!socket) {
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
return -EINVAL;
}
sdev->ud.tcp_socket = socket;
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, "stub_rx");
sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, "stub_tx");
- spin_lock(&sdev->ud.lock);
+ spin_lock_irq(&sdev->ud.lock);
sdev->ud.status = SDEV_ST_USED;
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
} else {
dev_info(dev, "stub down\n");
- spin_lock(&sdev->ud.lock);
+ spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_USED) {
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
return -EINVAL;
}
- spin_unlock(&sdev->ud.lock);
+ spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
}
@@ -241,9 +241,9 @@ static void stub_device_reset(struct usbip_device *ud)
ret = usb_lock_device_for_reset(udev, sdev->interface);
if (ret < 0) {
dev_err(&udev->dev, "lock for reset\n");
- spin_lock(&ud->lock);
+ spin_lock_irq(&ud->lock);
ud->status = SDEV_ST_ERROR;
- spin_unlock(&ud->lock);
+ spin_unlock_irq(&ud->lock);
return;
}
@@ -251,7 +251,7 @@ static void stub_device_reset(struct usbip_device *ud)
ret = usb_reset_device(udev);
usb_unlock_device(udev);
- spin_lock(&ud->lock);
+ spin_lock_irq(&ud->lock);
if (ret) {
dev_err(&udev->dev, "device reset\n");
ud->status = SDEV_ST_ERROR;
@@ -259,14 +259,14 @@ static void stub_device_reset(struct usbip_device *ud)
dev_info(&udev->dev, "device reset\n");
ud->status = SDEV_ST_AVAILABLE;
}
- spin_unlock(&ud->lock);
+ spin_unlock_irq(&ud->lock);
}
static void stub_device_unusable(struct usbip_device *ud)
{
- spin_lock(&ud->lock);
+ spin_lock_irq(&ud->lock);
ud->status = SDEV_ST_ERROR;
- spin_unlock(&ud->lock);
+ spin_unlock_irq(&ud->lock);
}
/**
@@ -286,10 +286,8 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
/* yes, it's a new device */
sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL);
- if (!sdev) {
- dev_err(&interface->dev, "no memory for stub_device\n");
+ if (!sdev)
return NULL;
- }
sdev->interface = usb_get_intf(interface);
sdev->udev = usb_get_dev(udev);
@@ -528,13 +526,13 @@ static void stub_disconnect(struct usb_interface *interface)
* when the device is being reset
*/
-int stub_pre_reset(struct usb_interface *interface)
+static int stub_pre_reset(struct usb_interface *interface)
{
dev_dbg(&interface->dev, "pre_reset\n");
return 0;
}
-int stub_post_reset(struct usb_interface *interface)
+static int stub_post_reset(struct usb_interface *interface)
{
dev_dbg(&interface->dev, "post_reset\n");
return 0;
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 0572a15242b5..715e8a79fb4e 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -307,12 +307,12 @@ static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
int valid = 0;
if (pdu->base.devid == sdev->devid) {
- spin_lock(&ud->lock);
+ spin_lock_irq(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
valid = 1;
}
- spin_unlock(&ud->lock);
+ spin_unlock_irq(&ud->lock);
}
return valid;
@@ -485,7 +485,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
if (!priv->urb->transfer_buffer) {
- dev_err(&sdev->interface->dev, "malloc x_buff\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c
index 513961fef055..cd5326ae38cc 100644
--- a/drivers/staging/usbip/stub_tx.c
+++ b/drivers/staging/usbip/stub_tx.c
@@ -42,7 +42,6 @@ void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
if (!unlink) {
- dev_err(&sdev->interface->dev, "alloc stub_unlink\n");
usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 75189feac380..75aa5bfcb8dd 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -672,9 +672,8 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
return 0;
/* my Bluetooth dongle gets ISO URBs which are np = 0 */
- if (np == 0) {
+ if (np == 0)
return 0;
- }
buff = kzalloc(size, GFP_KERNEL);
if (!buff)
diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
index d332a34ddb6d..82123be8732d 100644
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -105,10 +105,12 @@ EXPORT_SYMBOL_GPL(usbip_stop_eh);
void usbip_event_add(struct usbip_device *ud, unsigned long event)
{
- spin_lock(&ud->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ud->lock, flags);
ud->event |= event;
wake_up(&ud->eh_waitq);
- spin_unlock(&ud->lock);
+ spin_unlock_irqrestore(&ud->lock, flags);
}
EXPORT_SYMBOL_GPL(usbip_event_add);
diff --git a/drivers/staging/usbip/userspace/.gitignore b/drivers/staging/usbip/userspace/.gitignore
new file mode 100644
index 000000000000..9aad9e30a8ba
--- /dev/null
+++ b/drivers/staging/usbip/userspace/.gitignore
@@ -0,0 +1,28 @@
+Makefile
+Makefile.in
+aclocal.m4
+autom4te.cache/
+config.guess
+config.h
+config.h.in
+config.log
+config.status
+config.sub
+configure
+depcomp
+install-sh
+libsrc/Makefile
+libsrc/Makefile.in
+libtool
+ltmain.sh
+missing
+src/Makefile
+src/Makefile.in
+stamp-h1
+libsrc/libusbip.la
+libsrc/libusbip_la-names.lo
+libsrc/libusbip_la-usbip_common.lo
+libsrc/libusbip_la-usbip_host_driver.lo
+libsrc/libusbip_la-vhci_driver.lo
+src/usbip
+src/usbipd
diff --git a/drivers/staging/usbip/userspace/Makefile.am b/drivers/staging/usbip/userspace/Makefile.am
index 9ab19499fe00..66f8bf038c9f 100644
--- a/drivers/staging/usbip/userspace/Makefile.am
+++ b/drivers/staging/usbip/userspace/Makefile.am
@@ -3,4 +3,4 @@ includedir = @includedir@/usbip
include_HEADERS := $(addprefix libsrc/, \
usbip_common.h vhci_driver.h usbip_host_driver.h)
-dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8 usbip_bind_driver.8)
+dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
diff --git a/drivers/staging/usbip/userspace/README b/drivers/staging/usbip/userspace/README
index 63cd10719059..233d1d7aef92 100644
--- a/drivers/staging/usbip/userspace/README
+++ b/drivers/staging/usbip/userspace/README
@@ -17,8 +17,6 @@
- gcc >= 4.0
- - libglib2.0-dev >= 2.6.0
-
- libtool, automake >= 1.9, autoconf >= 2.5.0, pkg-config
diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac
index 43e641e5ac06..2be4060f9036 100644
--- a/drivers/staging/usbip/userspace/configure.ac
+++ b/drivers/staging/usbip/userspace/configure.ac
@@ -91,10 +91,22 @@ AC_ARG_WITH([usbids-dir],
[USBIDS_DIR=$withval], [USBIDS_DIR="/usr/share/hwdata/"])
AC_SUBST([USBIDS_DIR])
-GLIB2_REQUIRED=2.6.0
-PKG_CHECK_MODULES([PACKAGE], [glib-2.0 >= $GLIB2_REQUIRED])
-AC_SUBST([PACKAGE_CFLAGS])
-AC_SUBST([PACKAGE_LIBS])
+# use _FORTIFY_SOURCE
+AC_MSG_CHECKING([whether to use fortify])
+AC_ARG_WITH([fortify],
+ [AS_HELP_STRING([--with-fortify],
+ [use _FORTIFY_SROUCE option when compiling)])],
+ dnl [ACTION-IF-GIVEN]
+ [if test "$withval" = "yes"; then
+ AC_MSG_RESULT([yes])
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE -O"
+ else
+ AC_MSG_RESULT([no])
+ CFLAGS="$CFLAGS -U_FORTIFY_SOURCE"
+ fi
+ ],
+ dnl [ACTION-IF-NOT-GIVEN]
+ [AC_MSG_RESULT([default])])
AC_CONFIG_FILES([Makefile libsrc/Makefile src/Makefile])
AC_OUTPUT
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index c365a3fada90..a11300361392 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -1,6 +1,6 @@
AM_CPPFLAGS = -I$(top_srcdir)/libsrc -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"'
-AM_CFLAGS = @EXTRA_CFLAGS@ @PACKAGE_CFLAGS@
-LDADD = $(top_builddir)/libsrc/libusbip.la @PACKAGE_LIBS@
+AM_CFLAGS = @EXTRA_CFLAGS@
+LDADD = $(top_builddir)/libsrc/libusbip.la
sbin_PROGRAMS := usbip usbipd
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index bdf61c0fe699..2da4e44e1633 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -27,6 +27,7 @@
#include <fcntl.h>
#include <getopt.h>
#include <unistd.h>
+#include <errno.h>
#include "vhci_driver.h"
#include "usbip_common.h"
@@ -52,8 +53,18 @@ static int record_connection(char *host, char *port, char *busid, int rhport)
int ret;
ret = mkdir(VHCI_STATE_PATH, 0700);
- if (ret < 0)
- return -1;
+ if (ret < 0) {
+ /* if VHCI_STATE_PATH exists, then it better be a directory */
+ if (errno == EEXIST) {
+ struct stat s;
+ ret = stat(VHCI_STATE_PATH, &s);
+ if (ret < 0)
+ return -1;
+ if (!(s.st_mode & S_IFDIR))
+ return -1;
+ } else
+ return -1;
+ }
snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 8668a8092d4d..34760cc1d10e 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -20,6 +20,7 @@
#include "../config.h"
#endif
+#define _GNU_SOURCE
#include <errno.h>
#include <unistd.h>
#include <netdb.h>
@@ -35,10 +36,9 @@
#include <tcpd.h>
#endif
-#define _GNU_SOURCE
#include <getopt.h>
-#include <glib.h>
#include <signal.h>
+#include <poll.h>
#include "usbip_host_driver.h"
#include "usbip_common.h"
@@ -48,7 +48,7 @@
#define PROGNAME "usbipd"
#define MAXSOCKFD 20
-GMainLoop *main_loop;
+#define MAIN_LOOP_TIMEOUT 10
static const char usbip_version_string[] = PACKAGE_STRING;
@@ -310,30 +310,22 @@ static int do_accept(int listenfd)
return connfd;
}
-gboolean process_request(GIOChannel *gio, GIOCondition condition,
- gpointer unused_data)
+int process_request(int listenfd)
{
- int listenfd;
+ pid_t childpid;
int connfd;
- (void) unused_data;
-
- if (condition & (G_IO_ERR | G_IO_HUP | G_IO_NVAL)) {
- err("unknown condition");
- BUG();
- }
-
- if (condition & G_IO_IN) {
- listenfd = g_io_channel_unix_get_fd(gio);
- connfd = do_accept(listenfd);
- if (connfd < 0)
- return TRUE;
-
+ connfd = do_accept(listenfd);
+ if (connfd < 0)
+ return -1;
+ childpid = fork();
+ if (childpid == 0) {
+ close(listenfd);
recv_pdu(connfd);
- close(connfd);
+ exit(0);
}
-
- return TRUE;
+ close(connfd);
+ return 0;
}
static void log_addrinfo(struct addrinfo *ai)
@@ -418,10 +410,7 @@ static struct addrinfo *do_getaddrinfo(char *host, int ai_family)
static void signal_handler(int i)
{
- dbg("received signal: code %d", i);
-
- if (main_loop)
- g_main_loop_quit(main_loop);
+ dbg("received '%s' signal", strsignal(i));
}
static void set_signal(void)
@@ -433,14 +422,19 @@ static void set_signal(void)
sigemptyset(&act.sa_mask);
sigaction(SIGTERM, &act, NULL);
sigaction(SIGINT, &act, NULL);
+ act.sa_handler = SIG_IGN;
+ sigaction(SIGCLD, &act, NULL);
}
-static int do_standalone_mode(gboolean daemonize)
+static int do_standalone_mode(int daemonize)
{
struct addrinfo *ai_head;
int sockfdlist[MAXSOCKFD];
int nsockfd;
- int i;
+ int i, terminate;
+ struct pollfd *fds;
+ struct timespec timeout;
+ sigset_t sigmask;
if (usbip_names_init(USBIDS_FILE))
err("failed to open %s", USBIDS_FILE);
@@ -456,7 +450,7 @@ static int do_standalone_mode(gboolean daemonize)
err("daemonizing failed: %s", strerror(errno));
return -1;
}
-
+ umask(0);
usbip_use_syslog = 1;
}
set_signal();
@@ -472,20 +466,40 @@ static int do_standalone_mode(gboolean daemonize)
err("failed to open a listening socket");
return -1;
}
-
+ fds = calloc(nsockfd, sizeof(struct pollfd));
for (i = 0; i < nsockfd; i++) {
- GIOChannel *gio;
-
- gio = g_io_channel_unix_new(sockfdlist[i]);
- g_io_add_watch(gio, (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL),
- process_request, NULL);
+ fds[i].fd = sockfdlist[i];
+ fds[i].events = POLLIN;
+ }
+ timeout.tv_sec = MAIN_LOOP_TIMEOUT;
+ timeout.tv_nsec = 0;
+
+ sigfillset(&sigmask);
+ sigdelset(&sigmask, SIGTERM);
+ sigdelset(&sigmask, SIGINT);
+
+ terminate = 0;
+ while (!terminate) {
+ int r;
+
+ r = ppoll(fds, nsockfd, &timeout, &sigmask);
+ if (r < 0) {
+ dbg("%s", strerror(errno));
+ terminate = 1;
+ } else if (r) {
+ for (i = 0; i < nsockfd; i++) {
+ if (fds[i].revents & POLLIN) {
+ dbg("read event on fd[%d]=%d",
+ i, sockfdlist[i]);
+ process_request(sockfdlist[i]);
+ }
+ }
+ } else
+ dbg("heartbeat timeout on ppoll()");
}
-
- main_loop = g_main_loop_new(FALSE, FALSE);
- g_main_loop_run(main_loop);
info("shutting down " PROGNAME);
-
+ free(fds);
freeaddrinfo(ai_head);
usbip_host_driver_close();
usbip_names_free();
@@ -509,7 +523,7 @@ int main(int argc, char *argv[])
cmd_version
} cmd;
- gboolean daemonize = FALSE;
+ int daemonize = 0;
int opt, rc = -1;
usbip_use_stderr = 1;
@@ -527,7 +541,7 @@ int main(int argc, char *argv[])
switch (opt) {
case 'D':
- daemonize = TRUE;
+ daemonize = 1;
break;
case 'd':
usbip_use_debug = 1;
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index c3aa2195f1a8..f1ca08478da8 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -121,11 +121,9 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
void rh_port_connect(int rhport, enum usb_device_speed speed)
{
- unsigned long flags;
-
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
| (1 << USB_PORT_FEAT_C_CONNECTION);
@@ -141,24 +139,22 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
break;
}
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
static void rh_port_disconnect(int rhport)
{
- unsigned long flags;
-
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
the_controller->port_status[rhport] |=
(1 << USB_PORT_FEAT_C_CONNECTION);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
@@ -183,7 +179,6 @@ static void rh_port_disconnect(int rhport)
static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
{
struct vhci_hcd *vhci;
- unsigned long flags;
int retval;
int rhport;
int changed = 0;
@@ -193,7 +188,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
vhci = hcd_to_vhci(hcd);
- spin_lock_irqsave(&vhci->lock, flags);
+ spin_lock(&vhci->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
usbip_dbg_vhci_rh("hw accessible flag not on?\n");
goto done;
@@ -216,7 +211,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
usb_hcd_resume_root_hub(hcd);
done:
- spin_unlock_irqrestore(&vhci->lock, flags);
+ spin_unlock(&vhci->lock);
return changed ? retval : 0;
}
@@ -237,7 +232,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
{
struct vhci_hcd *dum;
int retval = 0;
- unsigned long flags;
int rhport;
u32 prev_port_status[VHCI_NPORTS];
@@ -257,7 +251,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dum = hcd_to_vhci(hcd);
- spin_lock_irqsave(&dum->lock, flags);
+ spin_lock(&dum->lock);
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
@@ -410,7 +404,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
usbip_dbg_vhci_rh(" bye\n");
- spin_unlock_irqrestore(&dum->lock, flags);
+ spin_unlock(&dum->lock);
return retval;
}
@@ -433,7 +427,6 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
- unsigned long flag;
if (!vdev) {
pr_err("could not get virtual device");
@@ -441,16 +434,13 @@ static void vhci_tx_urb(struct urb *urb)
}
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
-
- spin_lock_irqsave(&vdev->priv_lock, flag);
-
if (!priv) {
- dev_err(&urb->dev->dev, "malloc vhci_priv\n");
- spin_unlock_irqrestore(&vdev->priv_lock, flag);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return;
}
+ spin_lock(&vdev->priv_lock);
+
priv->seqnum = atomic_inc_return(&the_controller->seqnum);
if (priv->seqnum == 0xffff)
dev_info(&urb->dev->dev, "seqnum max\n");
@@ -463,7 +453,7 @@ static void vhci_tx_urb(struct urb *urb)
list_add_tail(&priv->list, &vdev->priv_tx);
wake_up(&vdev->waitq_tx);
- spin_unlock_irqrestore(&vdev->priv_lock, flag);
+ spin_unlock(&vdev->priv_lock);
}
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
@@ -471,7 +461,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
{
struct device *dev = &urb->dev->dev;
int ret = 0;
- unsigned long flags;
struct vhci_device *vdev;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
@@ -480,11 +469,11 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* patch to usb_sg_init() is in 2.5.60 */
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
if (urb->status != -EINPROGRESS) {
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
return urb->status;
}
@@ -496,7 +485,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
vdev->ud.status == VDEV_ST_ERROR) {
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
return -ENODEV;
}
spin_unlock(&vdev->ud.lock);
@@ -571,14 +560,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
out:
vhci_tx_urb(urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
return 0;
no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
return ret;
}
@@ -631,19 +620,18 @@ no_need_unlink:
*/
static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
- unsigned long flags;
struct vhci_priv *priv;
struct vhci_device *vdev;
pr_info("dequeue a urb %p\n", urb);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
priv = urb->hcpriv;
if (!priv) {
/* URB was never linked! or will be soon given back by
* vhci_rx. */
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
return 0;
}
@@ -651,7 +639,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
int ret = 0;
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret) {
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
return ret;
}
}
@@ -661,16 +649,14 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (!vdev->ud.tcp_socket) {
/* tcp connection is closed */
- unsigned long flags2;
-
- spin_lock_irqsave(&vdev->priv_lock, flags2);
+ spin_lock(&vdev->priv_lock);
pr_info("device %p seems to be disconnected\n", vdev);
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
- spin_unlock_irqrestore(&vdev->priv_lock, flags2);
+ spin_unlock(&vdev->priv_lock);
/*
* If tcp connection is alive, we have sent CMD_UNLINK.
@@ -681,24 +667,22 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
} else {
/* tcp connection is alive */
- unsigned long flags2;
struct vhci_unlink *unlink;
- spin_lock_irqsave(&vdev->priv_lock, flags2);
+ spin_lock(&vdev->priv_lock);
/* setup CMD_UNLINK pdu */
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
if (!unlink) {
- pr_err("malloc vhci_unlink\n");
- spin_unlock_irqrestore(&vdev->priv_lock, flags2);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&vdev->priv_lock);
+ spin_unlock(&the_controller->lock);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
@@ -716,10 +700,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
list_add_tail(&unlink->list, &vdev->unlink_tx);
wake_up(&vdev->waitq_tx);
- spin_unlock_irqrestore(&vdev->priv_lock, flags2);
+ spin_unlock(&vdev->priv_lock);
}
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usbip_dbg_vhci_hc("leave\n");
return 0;
@@ -957,9 +941,9 @@ static int vhci_bus_suspend(struct usb_hcd *hcd)
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock_irq(&vhci->lock);
+ spin_lock(&vhci->lock);
hcd->state = HC_STATE_SUSPENDED;
- spin_unlock_irq(&vhci->lock);
+ spin_unlock(&vhci->lock);
return 0;
}
@@ -971,13 +955,13 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock_irq(&vhci->lock);
+ spin_lock(&vhci->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
hcd->state = HC_STATE_RUNNING;
}
- spin_unlock_irq(&vhci->lock);
+ spin_unlock(&vhci->lock);
return rc;
}
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index ba5f1c079b69..faf8e607c5c6 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,7 +68,6 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
- unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +100,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,7 +140,6 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
- unsigned long flags;
usbip_dump_header(pdu);
@@ -171,9 +169,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock(&the_controller->lock);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
diff --git a/drivers/staging/usbip/vhci_tx.c b/drivers/staging/usbip/vhci_tx.c
index b1f0dcd68f55..409fd99f3257 100644
--- a/drivers/staging/usbip/vhci_tx.c
+++ b/drivers/staging/usbip/vhci_tx.c
@@ -46,18 +46,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
{
- unsigned long flags;
struct vhci_priv *priv, *tmp;
- spin_lock_irqsave(&vdev->priv_lock, flags);
+ spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
list_move_tail(&priv->list, &vdev->priv_rx);
- spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ spin_unlock(&vdev->priv_lock);
return priv;
}
- spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ spin_unlock(&vdev->priv_lock);
return NULL;
}
@@ -136,18 +135,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
{
- unsigned long flags;
struct vhci_unlink *unlink, *tmp;
- spin_lock_irqsave(&vdev->priv_lock, flags);
+ spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
list_move_tail(&unlink->list, &vdev->unlink_rx);
- spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ spin_unlock(&vdev->priv_lock);
return unlink;
}
- spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ spin_unlock(&vdev->priv_lock);
return NULL;
}
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index d0cab1766190..8e8bbb1dcd9b 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -6,7 +6,7 @@ config VME_USER
help
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
- provided with the original driver at http://vmelinux.org/.
+ provided with the original driver at <http://www.vmelinux.org/>.
config VME_PIO2
tristate "GE PIO2 VME"
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 0331178ca3b3..fd19c257f533 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -162,11 +162,9 @@ static struct vme_driver pio2_driver = {
static int __init pio2_init(void)
{
- int retval = 0;
-
if (bus_num == 0) {
pr_err("No cards, skipping registration\n");
- goto err_nocard;
+ return -ENODEV;
}
if (bus_num > PIO2_CARDS_MAX) {
@@ -176,15 +174,7 @@ static int __init pio2_init(void)
}
/* Register the PIO2 driver */
- retval = vme_register_driver(&pio2_driver, bus_num);
- if (retval != 0)
- goto err_reg;
-
- return retval;
-
-err_reg:
-err_nocard:
- return retval;
+ return vme_register_driver(&pio2_driver, bus_num);
}
static int pio2_match(struct vme_dev *vdev)
@@ -232,7 +222,6 @@ static int pio2_probe(struct vme_dev *vdev)
card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
if (card == NULL) {
- dev_err(&vdev->dev, "Unable to allocate card structure\n");
retval = -ENOMEM;
goto err_struct;
}
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index 69d880517e07..2a2d920d980b 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -192,10 +192,8 @@ int pio2_gpio_init(struct pio2_card *card)
char *label;
label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
- if (label == NULL) {
- dev_err(&card->vdev->dev, "Unable to allocate GPIO label\n");
+ if (label == NULL)
return -ENOMEM;
- }
sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
card->gc.label = label;
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 4ef852c4c4e1..57474cff51f0 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -761,8 +761,6 @@ static int vme_user_probe(struct vme_dev *vdev)
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
if (image[i].kern_buf == NULL) {
- dev_warn(&vdev->dev,
- "Unable to allocate memory for master window buffers\n");
err = -ENOMEM;
goto err_master_buf;
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 47c156bb70a9..aa76e39a46f4 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -749,9 +749,9 @@ bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
unsigned char *pbyChannelNumber, unsigned char *pbyMap)
{
- if (uChannelIndex > CB_MAX_CHANNEL) {
+ if (uChannelIndex > CB_MAX_CHANNEL)
return false;
- }
+
*pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber;
*pbyMap = sChannelTbl[uChannelIndex].byMAP;
return sChannelTbl[uChannelIndex].bValid;
@@ -761,9 +761,9 @@ void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
unsigned char byMap)
{
- if (uChannelIndex > CB_MAX_CHANNEL) {
+ if (uChannelIndex > CB_MAX_CHANNEL)
return;
- }
+
sChannelTbl[uChannelIndex].byMAP |= byMap;
}
@@ -771,9 +771,8 @@ void clear_channel_map_info(void *pDeviceHandler)
{
unsigned int ii = 0;
- for (ii = 1; ii <= CB_MAX_CHANNEL; ii++) {
+ for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
sChannelTbl[ii].byMAP = 0;
- }
}
unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index e54e00bc5665..e27244ce383e 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -881,26 +881,14 @@ inline static bool device_get_ip(PSDevice pInfo) {
-static inline PDEVICE_RD_INFO alloc_rd_info(void) {
- PDEVICE_RD_INFO ptr;
- ptr = (PDEVICE_RD_INFO)kmalloc((int)sizeof(DEVICE_RD_INFO), (int)GFP_ATOMIC);
- if (ptr == NULL)
- return NULL;
- else {
- memset(ptr,0,sizeof(DEVICE_RD_INFO));
- return ptr;
- }
+static inline PDEVICE_RD_INFO alloc_rd_info(void)
+{
+ return kzalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
}
-static inline PDEVICE_TD_INFO alloc_td_info(void) {
- PDEVICE_TD_INFO ptr;
- ptr = (PDEVICE_TD_INFO)kmalloc((int)sizeof(DEVICE_TD_INFO), (int)GFP_ATOMIC);
- if (ptr == NULL)
- return NULL;
- else {
- memset(ptr,0,sizeof(DEVICE_TD_INFO));
- return ptr;
- }
+static inline PDEVICE_TD_INFO alloc_td_info(void)
+{
+ return kzalloc(sizeof(DEVICE_TD_INFO), GFP_ATOMIC);
}
/*--------------------- Export Functions --------------------------*/
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 875ee4442386..d66854f5b304 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -2421,7 +2421,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
// Notes:
- // Although spec says MMPDU can be fragmented; In most case,
+ // Although spec says MMPDU can be fragmented; In most cases,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = false; //Set FRAGCTL_WEPTYP
@@ -2510,7 +2510,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
//Fill TXKEY
- //Kyle: Need fix: TKIP and AES did't encryt Mnt Packet.
+ //Kyle: Need fix: TKIP and AES did't encrypt Mnt Packet.
//s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL);
//Fill IV(ExtIV,RSNHDR)
@@ -2957,7 +2957,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10);
// Notes:
- // Although spec says MMPDU can be fragmented; In most casses,
+ // Although spec says MMPDU can be fragmented; In most cases,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = false; //Set FRAGCTL_WEPTYP
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index 6d0b87a14267..101c7359f414 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -720,7 +720,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pDevice->nTxDataTimeCout = 0;
}
else {
- // printk("mike:-->First time triger TimerTxData InSleep\n");
+ // printk("mike:-->First time trigger TimerTxData InSleep\n");
}
pDevice->IsTxDataTrigger = true;
add_timer(&pDevice->sTimerTxData);
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index b6f99ecbbeb5..b08a611a184a 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -2068,7 +2068,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==true)
if (pBSSList != NULL) {
- // Compare PHY paramater setting
+ // Compare PHY parameter setting
if (pMgmt->wCurrCapInfo != pBSSList->wCapInfo) {
bUpdatePhyParameter = true;
pMgmt->wCurrCapInfo = pBSSList->wCapInfo;
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index e6ced95e6fa7..534d490539b6 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -96,9 +96,10 @@ vMgrEncodeBeacon(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
+ /* Fixed Fields */
+ pFrame->pqwTimestamp =
+ (u64 *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_BCN_INT);
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -130,9 +131,10 @@ vMgrDecodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
+ /* Fixed Fields */
+ pFrame->pqwTimestamp =
+ (u64 *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_BCN_INT);
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -178,7 +180,7 @@ vMgrDecodeBeacon(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -391,7 +393,7 @@ vMgrDecodeAssocRequest(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -561,7 +563,7 @@ vMgrDecodeReassocRequest(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL)
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
break;
@@ -671,9 +673,10 @@ vMgrEncodeProbeResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
+ /* Fixed Fields */
+ pFrame->pqwTimestamp =
+ (u64 *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_BCN_INT);
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -706,9 +709,10 @@ vMgrDecodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
+ /* Fixed Fields */
+ pFrame->pqwTimestamp =
+ (u64 *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_BCN_INT);
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -749,7 +753,7 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
diff --git a/drivers/staging/vt6656/80211mgr.h b/drivers/staging/vt6656/80211mgr.h
index e5db73be0e71..f8e16d8989ea 100644
--- a/drivers/staging/vt6656/80211mgr.h
+++ b/drivers/staging/vt6656/80211mgr.h
@@ -38,7 +38,7 @@
#define WLAN_MIN_ARRAY 1
-// Information Element ID value
+/* Information Element ID value */
#define WLAN_EID_SSID 0
#define WLAN_EID_SUPP_RATES 1
#define WLAN_EID_FH_PARMS 2
@@ -59,10 +59,10 @@
#define WLAN_EID_QUIET 40
#define WLAN_EID_IBSS_DFS 41
#define WLAN_EID_ERP 42
-// reference 802.11i 7.3.2 table 20
+/* reference 802.11i 7.3.2 table 20 */
#define WLAN_EID_RSN 48
#define WLAN_EID_EXTSUPP_RATES 50
-// reference WiFi WPA spec.
+/* reference WiFi WPA spec */
#define WLAN_EID_RSN_WPA 221
#ifdef Cisco_ccx
@@ -75,7 +75,7 @@
#define WLAN_EID_ERP_USE_PROTECTION 0x02
#define WLAN_EID_ERP_BARKER_MODE 0x04
-// Reason Codes
+/* reason codes */
#define WLAN_MGMT_REASON_RSVD 0
#define WLAN_MGMT_REASON_UNSPEC 1
#define WLAN_MGMT_REASON_PRIOR_AUTH_INVALID 2
@@ -100,7 +100,7 @@
#define WLAN_MGMT_REASON_RSNE_CAP_INVALID 22
#define WLAN_MGMT_REASON_80211X_AUTH_FAILED 23
-// Status Codes
+/* status codes */
#define WLAN_MGMT_STATUS_SUCCESS 0
#define WLAN_MGMT_STATUS_UNSPEC_FAILURE 1
#define WLAN_MGMT_STATUS_CAPS_UNSUPPORTED 10
@@ -116,19 +116,16 @@
#define WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC 20
#define WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY 21
-// reference 802.11h 7.3.1.9
-//
+/* reference 802.11h 7.3.1.9 */
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SPECTRUM_MNG 22
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_PWR_CAP 23
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SUPP_CH 24
-//
-// reference 802.11g 7.3.1.9
-//
+
+/* reference 802.11g 7.3.1.9 */
#define WLAN_MGMT_STATUS_SHORTSLOTTIME_UNSUPPORTED 25
#define WLAN_MGMT_STATUS_DSSSOFDM_UNSUPPORTED 26
-//
-// reference 802.11i 7.3.1.9 table 19
-//
+
+/* reference 802.11i 7.3.1.9 table 19 */
#define WLAN_MGMT_STATUS_INVALID_IE 40
#define WLAN_MGMT_STATUS_GROUP_CIPHER_INVALID 41
#define WLAN_MGMT_STATUS_PAIRWISE_CIPHER_INVALID 42
@@ -137,17 +134,16 @@
#define WLAN_MGMT_STATUS_INVALID_RSN_IE_CAP 45
#define WLAN_MGMT_STATUS_CIPHER_REJECT 46
-
-
-// Auth Algorithm
+/* auth algorithm */
#define WLAN_AUTH_ALG_OPENSYSTEM 0
#define WLAN_AUTH_ALG_SHAREDKEY 1
+/* management frame field offsets */
-
-// Management Frame Field Offsets
-// Note: Not all fields are listed because of variable lengths.
-// Note: These offsets are from the start of the frame data
+/*
+ * Note: Not all fields are listed because of variable lengths
+ * Note: These offsets are from the start of the frame data
+ */
#define WLAN_BEACON_OFF_TS 0
#define WLAN_BEACON_OFF_BCN_INT 8
@@ -189,10 +185,7 @@
#define WLAN_DEAUTHEN_OFF_REASON 0
-
-//
-// Cipher Suite Selectors defined in 802.11i
-//
+/* cipher suite selectors defined in 802.11i */
#define WLAN_11i_CSS_USE_GROUP 0
#define WLAN_11i_CSS_WEP40 1
#define WLAN_11i_CSS_TKIP 2
@@ -200,24 +193,22 @@
#define WLAN_11i_CSS_WEP104 5
#define WLAN_11i_CSS_UNKNOWN 255
-//
-// Authentication and Key Management Suite Selectors defined in 802.11i
-//
+/* authentication and key management suite selectors defined in 802.11i */
#define WLAN_11i_AKMSS_802_1X 1
#define WLAN_11i_AKMSS_PSK 2
#define WLAN_11i_AKMSS_UNKNOWN 255
-// Measurement type definitions reference ieee 802.11h Table 20b
+/* measurement type definitions reference IEEE 802.11h table 20b */
#define MEASURE_TYPE_BASIC 0
#define MEASURE_TYPE_CCA 1
#define MEASURE_TYPE_RPI 2
-// Measurement request mode definitions reference ieee 802.11h Figure 46h
+/* measurement request mode definitions reference IEEE 802.11h figure 46h */
#define MEASURE_MODE_ENABLE 0x02
#define MEASURE_MODE_REQ 0x04
#define MEASURE_MODE_REP 0x08
-// Measurement report mode definitions reference ieee 802.11h Figure 46m
+/* measurement report mode definitions reference IEEE 802.11h figure 46m */
#define MEASURE_MODE_LATE 0x01
#define MEASURE_MODE_INCAPABLE 0x02
#define MEASURE_MODE_REFUSED 0x04
@@ -228,7 +219,7 @@
/*--------------------- Export Types ------------------------------*/
-// Information Element Types
+/* Information Element types */
#pragma pack(1)
typedef struct tagWLAN_IE {
@@ -237,7 +228,7 @@ typedef struct tagWLAN_IE {
} __attribute__ ((__packed__))
WLAN_IE, *PWLAN_IE;
-// Service Set Identity (SSID)
+/* Service Set IDentity (SSID) */
#pragma pack(1)
typedef struct tagWLAN_IE_SSID {
BYTE byElementID;
@@ -246,7 +237,7 @@ typedef struct tagWLAN_IE_SSID {
} __attribute__ ((__packed__))
WLAN_IE_SSID, *PWLAN_IE_SSID;
-// Supported Rates
+/* Supported Rates */
#pragma pack(1)
typedef struct tagWLAN_IE_SUPP_RATES {
BYTE byElementID;
@@ -255,7 +246,7 @@ typedef struct tagWLAN_IE_SUPP_RATES {
} __attribute__ ((__packed__))
WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
-// FH Parameter Set
+/* FH Parameter Set */
#pragma pack(1)
typedef struct _WLAN_IE_FH_PARMS {
BYTE byElementID;
@@ -266,7 +257,7 @@ typedef struct _WLAN_IE_FH_PARMS {
BYTE byHopIndex;
} WLAN_IE_FH_PARMS, *PWLAN_IE_FH_PARMS;
-// DS Parameter Set
+/* DS Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_DS_PARMS {
BYTE byElementID;
@@ -275,7 +266,7 @@ typedef struct tagWLAN_IE_DS_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
-// CF Parameter Set
+/* CF Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_CF_PARMS {
BYTE byElementID;
@@ -287,7 +278,7 @@ typedef struct tagWLAN_IE_CF_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
-// TIM
+/* TIM */
#pragma pack(1)
typedef struct tagWLAN_IE_TIM {
BYTE byElementID;
@@ -299,7 +290,7 @@ typedef struct tagWLAN_IE_TIM {
} __attribute__ ((__packed__))
WLAN_IE_TIM, *PWLAN_IE_TIM;
-// IBSS Parameter Set
+/* IBSS Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_IBSS_PARMS {
BYTE byElementID;
@@ -308,7 +299,7 @@ typedef struct tagWLAN_IE_IBSS_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
-// Challenge Text
+/* Challenge Text */
#pragma pack(1)
typedef struct tagWLAN_IE_CHALLENGE {
BYTE byElementID;
@@ -325,10 +316,10 @@ typedef struct tagWLAN_IE_RSN_EXT {
WORD wVersion;
BYTE abyMulticast[4];
WORD wPKCount;
- struct {
- BYTE abyOUI[4];
- } PKSList[1]; // the rest is variable so need to
- // overlay ieauth structure
+ struct {
+ BYTE abyOUI[4];
+ } PKSList[1];
+ /* the rest is variable so need to overlay ieauth structure */
} WLAN_IE_RSN_EXT, *PWLAN_IE_RSN_EXT;
#pragma pack(1)
@@ -339,7 +330,7 @@ typedef struct tagWLAN_IE_RSN_AUTH {
} AuthKSList[1];
} WLAN_IE_RSN_AUTH, *PWLAN_IE_RSN_AUTH;
-// RSN Identity
+/* RSN Identity */
#pragma pack(1)
typedef struct tagWLAN_IE_RSN {
BYTE byElementID;
@@ -348,8 +339,7 @@ typedef struct tagWLAN_IE_RSN {
BYTE abyRSN[WLAN_MIN_ARRAY];
} WLAN_IE_RSN, *PWLAN_IE_RSN;
-
-// CCX Identity DavidWang
+/* CCX Identity DavidWang */
#pragma pack(1)
typedef struct tagWLAN_IE_CCX {
BYTE byElementID;
@@ -371,9 +361,7 @@ BYTE len;
BYTE abyCCXVer[5];
} WLAN_IE_CCX_Ver, *PWLAN_IE_CCX_Ver;
-
-
-// ERP
+/* ERP */
#pragma pack(1)
typedef struct tagWLAN_IE_ERP {
BYTE byElementID;
@@ -505,10 +493,9 @@ typedef struct _WLAN_IE_IBSS_DFS {
#pragma pack()
+/* frame types */
-
-// Frame Types
-// prototype structure, all mgmt frame types will start with these members
+/* prototype structure, all mgmt frame types will start with these members */
typedef struct tagWLAN_FR_MGMT {
unsigned int uType;
@@ -518,21 +505,21 @@ typedef struct tagWLAN_FR_MGMT {
} WLAN_FR_MGMT, *PWLAN_FR_MGMT;
-// Beacon frame
+/* beacon frame */
typedef struct tagWLAN_FR_BEACON {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- // fixed fields
- PQWORD pqwTimestamp;
+ /* fixed fields */
+ u64 *pqwTimestamp;
PWORD pwBeaconInterval;
PWORD pwCapInfo;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
-// PWLAN_IE_FH_PARMS pFHParms;
+/* PWLAN_IE_FH_PARMS pFHParms; */
PWLAN_IE_DS_PARMS pDSParms;
PWLAN_IE_CF_PARMS pCFParms;
PWLAN_IE_TIM pTIM;
@@ -549,8 +536,7 @@ typedef struct tagWLAN_FR_BEACON {
} WLAN_FR_BEACON, *PWLAN_FR_BEACON;
-
-// IBSS ATIM frame
+/* IBSS ATIM frame */
typedef struct tagWLAN_FR_IBSSATIM {
unsigned int uType;
@@ -558,36 +544,36 @@ typedef struct tagWLAN_FR_IBSSATIM {
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- // fixed fields
- // info elements
- // this frame type has a null body
+ /* fixed fields */
+ /* info elements */
+ /* this frame type has a null body */
} WLAN_FR_IBSSATIM, *PWLAN_FR_IBSSATIM;
-// Disassociation
+/* disassociation */
typedef struct tagWLAN_FR_DISASSOC {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwReason;
- /*-- info elements ----------*/
+ /* info elements */
} WLAN_FR_DISASSOC, *PWLAN_FR_DISASSOC;
-// Association Request
+/* association request */
typedef struct tagWLAN_FR_ASSOCREQ {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwCapInfo;
PWORD pwListenInterval;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_RSN pRSN;
@@ -601,24 +587,24 @@ typedef struct tagWLAN_FR_ASSOCREQ {
} WLAN_FR_ASSOCREQ, *PWLAN_FR_ASSOCREQ;
-// Association Response
+/* association response */
typedef struct tagWLAN_FR_ASSOCRESP {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwCapInfo;
PWORD pwStatus;
PWORD pwAid;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_ASSOCRESP, *PWLAN_FR_ASSOCRESP;
-// Reassociation Request
+/* reassociation request */
typedef struct tagWLAN_FR_REASSOCREQ {
unsigned int uType;
@@ -626,12 +612,12 @@ typedef struct tagWLAN_FR_REASSOCREQ {
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwCapInfo;
PWORD pwListenInterval;
PIEEE_ADDR pAddrCurrAP;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_RSN pRSN;
@@ -643,50 +629,50 @@ typedef struct tagWLAN_FR_REASSOCREQ {
} WLAN_FR_REASSOCREQ, *PWLAN_FR_REASSOCREQ;
-// Reassociation Response
+/* reassociation response */
typedef struct tagWLAN_FR_REASSOCRESP {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwCapInfo;
PWORD pwStatus;
PWORD pwAid;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_REASSOCRESP, *PWLAN_FR_REASSOCRESP;
-// Probe Request
+/* probe request */
typedef struct tagWLAN_FR_PROBEREQ {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
- /*-- info elements ----------*/
+ /* fixed fields */
+ /* info elements */
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_PROBEREQ, *PWLAN_FR_PROBEREQ;
-// Probe Response
+/* probe response */
typedef struct tagWLAN_FR_PROBERESP {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
- PQWORD pqwTimestamp;
+ /* fixed fields */
+ u64 *pqwTimestamp;
PWORD pwBeaconInterval;
PWORD pwCapInfo;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_DS_PARMS pDSParms;
@@ -704,37 +690,38 @@ typedef struct tagWLAN_FR_PROBERESP {
} WLAN_FR_PROBERESP, *PWLAN_FR_PROBERESP;
-// Authentication
+/* authentication */
typedef struct tagWLAN_FR_AUTHEN {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwAuthAlgorithm;
PWORD pwAuthSequence;
PWORD pwStatus;
- /*-- info elements ----------*/
+ /* info elements */
PWLAN_IE_CHALLENGE pChallenge;
} WLAN_FR_AUTHEN, *PWLAN_FR_AUTHEN;
-// Deauthentication
+/* deauthentication */
typedef struct tagWLAN_FR_DEAUTHEN {
unsigned int uType;
unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
- /*-- fixed fields -----------*/
+ /* fixed fields */
PWORD pwReason;
- /*-- info elements ----------*/
+ /* info elements */
} WLAN_FR_DEAUTHEN, *PWLAN_FR_DEAUTHEN;
/*--------------------- Export Functions --------------------------*/
+
void
vMgrEncodeBeacon(
PWLAN_FR_BEACON pFrame
@@ -845,4 +832,4 @@ vMgrDecodeReassocResponse(
PWLAN_FR_REASSOCRESP pFrame
);
-#endif// __80211MGR_H__
+#endif /* __80211MGR_H__ */
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index f7a3b8f8da70..fb6124d9082a 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -231,7 +231,7 @@ void AESv128(BYTE *key, BYTE *data, BYTE *ciphertext)
*
*/
-BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
+bool AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
{
BYTE abyNonce[13];
BYTE MIC_IV[16];
@@ -249,7 +249,7 @@ BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
WORD wHLen = 22;
/* 8 is IV, 8 is MIC, 4 is CRC */
WORD wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;
- BOOL bA4 = FALSE;
+ bool bA4 = false;
BYTE byTmp;
WORD wCnt;
int ii, jj, kk;
@@ -257,7 +257,7 @@ BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
if (WLAN_GET_FC_TODS(*(PWORD) pbyFrame) &&
WLAN_GET_FC_FROMDS(*(PWORD) pbyFrame)) {
- bA4 = TRUE;
+ bA4 = true;
pbyIV += 6; /* 6 is 802.11 address4 */
wHLen += 6;
wPayloadSize -= 6;
@@ -380,7 +380,7 @@ BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
/* => above is the packet dec-MIC */
if (!memcmp(abyMIC, abyTmp, 8))
- return TRUE;
+ return true;
else
- return FALSE;
+ return false;
}
diff --git a/drivers/staging/vt6656/aes_ccmp.h b/drivers/staging/vt6656/aes_ccmp.h
index 353bd210a502..a2e2c4e9a5c9 100644
--- a/drivers/staging/vt6656/aes_ccmp.h
+++ b/drivers/staging/vt6656/aes_ccmp.h
@@ -41,6 +41,6 @@
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize);
+bool AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize);
#endif /* __AES_CCMP_H__ */
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 385501595b4d..a9f525e9d16e 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -690,7 +690,7 @@ s_vClearSQ3Value(PSDevice pDevice);
*
*/
unsigned int
-BBuGetFrameTime (
+BBuGetFrameTime(
BYTE byPreambleType,
BYTE byPktType,
unsigned int cbFrameLength,
@@ -756,26 +756,19 @@ BBuGetFrameTime (
* Return Value: none
*
*/
-void
-BBvCalculateParameter (
- PSDevice pDevice,
- unsigned int cbFrameLength,
- WORD wRate,
- BYTE byPacketType,
- PWORD pwPhyLen,
- PBYTE pbyPhySrv,
- PBYTE pbyPhySgn
- )
+void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
+ u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
+ u8 *pbyPhySgn)
{
- unsigned int cbBitCount;
- unsigned int cbUsCount = 0;
- unsigned int cbTmp;
- BOOL bExtBit;
- BYTE byPreambleType = pDevice->byPreambleType;
- BOOL bCCK = pDevice->bCCK;
+ u32 cbBitCount;
+ u32 cbUsCount = 0;
+ u32 cbTmp;
+ int bExtBit;
+ u8 byPreambleType = pDevice->byPreambleType;
+ int bCCK = pDevice->bCCK;
cbBitCount = cbFrameLength * 8;
- bExtBit = FALSE;
+ bExtBit = false;
switch (wRate) {
case RATE_1M :
@@ -792,7 +785,7 @@ BBvCalculateParameter (
break;
case RATE_5M :
- if (bCCK == FALSE)
+ if (bCCK == false)
cbBitCount ++;
cbUsCount = (cbBitCount * 10) / 55;
cbTmp = (cbUsCount * 55) / 10;
@@ -806,14 +799,14 @@ BBvCalculateParameter (
case RATE_11M :
- if (bCCK == FALSE)
+ if (bCCK == false)
cbBitCount ++;
cbUsCount = cbBitCount / 11;
cbTmp = cbUsCount * 11;
if (cbTmp != cbBitCount) {
cbUsCount ++;
if ((cbBitCount - cbTmp) <= 3)
- bExtBit = TRUE;
+ bExtBit = true;
}
if (byPreambleType == 1)
*pbyPhySgn = 0x0b;
@@ -929,8 +922,7 @@ BBvCalculateParameter (
* Return Value: none
*
*/
-void
-BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
+void BBvSetAntennaMode(struct vnt_private *pDevice, u8 byAntennaMode)
{
switch (byAntennaMode) {
case ANT_TXA:
@@ -968,7 +960,7 @@ BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
*
*/
-BOOL BBbVT3184Init(PSDevice pDevice)
+int BBbVT3184Init(struct vnt_private *pDevice)
{
int ntStatus;
WORD wLength;
@@ -984,12 +976,12 @@ BOOL BBbVT3184Init(PSDevice pDevice)
EEP_MAX_CONTEXT_SIZE,
pDevice->abyEEPROM);
if (ntStatus != STATUS_SUCCESS) {
- return FALSE;
+ return false;
}
// if ((pDevice->abyEEPROM[EEP_OFS_RADIOCTL]&0x06)==0x04)
-// return FALSE;
+// return false;
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
@@ -1105,7 +1097,7 @@ else {
MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
//}}
} else {
- return TRUE;
+ return true;
}
memcpy(abyArray, pbyAddr, wLength);
@@ -1144,7 +1136,7 @@ else {
ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
RFbRFTableDownload(pDevice);
- return TRUE;//ntStatus;
+ return true;//ntStatus;
}
@@ -1161,7 +1153,7 @@ else {
* Return Value: none
*
*/
-void BBvLoopbackOn (PSDevice pDevice)
+void BBvLoopbackOn(struct vnt_private *pDevice)
{
BYTE byData;
@@ -1214,9 +1206,9 @@ void BBvLoopbackOn (PSDevice pDevice)
* Return Value: none
*
*/
-void BBvLoopbackOff (PSDevice pDevice)
+void BBvLoopbackOff(struct vnt_private *pDevice)
{
- BYTE byData;
+ u8 byData;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, pDevice->byBBCRc9);//CR201
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, pDevice->byBBCR88);//CR136
@@ -1249,8 +1241,7 @@ void BBvLoopbackOff (PSDevice pDevice)
* Return Value: none
*
*/
-void
-BBvSetShortSlotTime (PSDevice pDevice)
+void BBvSetShortSlotTime(struct vnt_private *pDevice)
{
BYTE byBBVGA=0;
@@ -1267,7 +1258,7 @@ BBvSetShortSlotTime (PSDevice pDevice)
}
-void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
+void BBvSetVGAGainOffset(struct vnt_private *pDevice, BYTE byData)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
@@ -1294,8 +1285,7 @@ void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
* Return Value: none
*
*/
-void
-BBvSoftwareReset (PSDevice pDevice)
+void BBvSoftwareReset(struct vnt_private *pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x50, 0x40);
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x50, 0);
@@ -1315,22 +1305,20 @@ BBvSoftwareReset (PSDevice pDevice)
* Return Value: none
*
*/
-void
-BBvSetDeepSleep (PSDevice pDevice)
+void BBvSetDeepSleep(struct vnt_private *pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0c, 0x17);//CR12
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0D, 0xB9);//CR13
}
-void
-BBvExitDeepSleep (PSDevice pDevice)
+void BBvExitDeepSleep(struct vnt_private *pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0C, 0x00);//CR12
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0D, 0x01);//CR13
}
-static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
+static unsigned long s_ulGetLowSQ3(struct vnt_private *pDevice)
{
int ii;
unsigned long ulSQ3 = 0;
@@ -1349,7 +1337,7 @@ static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
return ulSQ3;
}
-static unsigned long s_ulGetRatio(PSDevice pDevice)
+static unsigned long s_ulGetRatio(struct vnt_private *pDevice)
{
int ii, jj;
unsigned long ulRatio = 0;
@@ -1377,9 +1365,7 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
}
-static
-void
-s_vClearSQ3Value (PSDevice pDevice)
+static void s_vClearSQ3Value(struct vnt_private *pDevice)
{
int ii;
pDevice->uDiversityCnt = 0;
@@ -1406,8 +1392,8 @@ s_vClearSQ3Value (PSDevice pDevice)
*
*/
-void
-BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
+void BBvAntennaDiversity(struct vnt_private *pDevice,
+ u8 byRxRate, u8 bySQ3)
{
pDevice->uDiversityCnt++;
@@ -1541,9 +1527,8 @@ BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
*
-*/
-void TimerSQ3CallBack(void *hDeviceContext)
+void TimerSQ3CallBack(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TimerSQ3CallBack...");
spin_lock_irq(&pDevice->lock);
@@ -1579,9 +1564,8 @@ void TimerSQ3CallBack(void *hDeviceContext)
*
-*/
-void TimerSQ3Tmax3CallBack(void *hDeviceContext)
+void TimerSQ3Tmax3CallBack(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TimerSQ3Tmax3CallBack...");
spin_lock_irq(&pDevice->lock);
@@ -1607,10 +1591,7 @@ void TimerSQ3Tmax3CallBack(void *hDeviceContext)
spin_unlock_irq(&pDevice->lock);
}
-void
-BBvUpdatePreEDThreshold(
- PSDevice pDevice,
- BOOL bScanning)
+void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
{
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 844d5a8b13e5..fba61605a692 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -42,31 +42,30 @@
#define PREAMBLE_LONG 0
#define PREAMBLE_SHORT 1
-//
-// Registers in the BASEBAND
-//
+/*
+ * Registers in the BASEBAND
+ */
#define BB_MAX_CONTEXT_SIZE 256
-#define C_SIFS_A 16 // micro sec.
+#define C_SIFS_A 16 /* usec */
#define C_SIFS_BG 10
-#define C_EIFS 80 // micro sec.
+#define C_EIFS 80 /* usec */
-
-#define C_SLOT_SHORT 9 // micro sec.
+#define C_SLOT_SHORT 9 /* usec */
#define C_SLOT_LONG 20
-#define C_CWMIN_A 15 // slot time
+#define C_CWMIN_A 15 /* slot time */
#define C_CWMIN_B 31
-#define C_CWMAX 1023 // slot time
+#define C_CWMAX 1023 /* slot time */
-//0:11A 1:11B 2:11G
+/* 0:11A 1:11B 2:11G */
#define BB_TYPE_11A 0
#define BB_TYPE_11B 1
#define BB_TYPE_11G 2
-//0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
+/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga (OFDM in BasicRate) */
#define PK_TYPE_11A 0
#define PK_TYPE_11B 1
#define PK_TYPE_11GB 2
@@ -104,33 +103,26 @@ BBuGetFrameTime(
WORD wRate
);
-void BBvCalculateParameter(PSDevice pDevice,
- unsigned int cbFrameLength,
- WORD wRate,
- BYTE byPacketType,
- PWORD pwPhyLen,
- PBYTE pbyPhySrv,
- PBYTE pbyPhySgn);
-
-// timer for antenna diversity
-
-void TimerSQ3CallBack(void *hDeviceContext);
-void TimerSQ3Tmax3CallBack(void *hDeviceContext);
-
-void BBvAntennaDiversity(PSDevice pDevice, BYTE byRxRate, BYTE bySQ3);
-void BBvLoopbackOn(PSDevice pDevice);
-void BBvLoopbackOff(PSDevice pDevice);
-void BBvSoftwareReset(PSDevice pDevice);
-
-void BBvSetShortSlotTime(PSDevice pDevice);
-void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
-void BBvSetAntennaMode(PSDevice pDevice, BYTE byAntennaMode);
-BOOL BBbVT3184Init(PSDevice pDevice);
-void BBvSetDeepSleep(PSDevice pDevice);
-void BBvExitDeepSleep(PSDevice pDevice);
-void BBvUpdatePreEDThreshold(
- PSDevice pDevice,
- BOOL bScanning
- );
+void BBvCalculateParameter(struct vnt_private *, u32 cbFrameLength,
+ u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
+ u8 *pbyPhySgn);
+
+/* timer for antenna diversity */
+
+void TimerSQ3CallBack(struct vnt_private *);
+void TimerSQ3Tmax3CallBack(struct vnt_private *);
+
+void BBvAntennaDiversity(struct vnt_private *, u8 byRxRate, u8 bySQ3);
+void BBvLoopbackOn(struct vnt_private *);
+void BBvLoopbackOff(struct vnt_private *);
+void BBvSoftwareReset(struct vnt_private *);
+
+void BBvSetShortSlotTime(struct vnt_private *);
+void BBvSetVGAGainOffset(struct vnt_private *, u8 byData);
+void BBvSetAntennaMode(struct vnt_private *, u8 byAntennaMode);
+int BBbVT3184Init(struct vnt_private *);
+void BBvSetDeepSleep(struct vnt_private *);
+void BBvExitDeepSleep(struct vnt_private *);
+void BBvUpdatePreEDThreshold(struct vnt_private *, int bScanning);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 6a1394192248..e214fcf83868 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -91,9 +91,9 @@ const WORD awHWRetry1[5][5] = {
/*--------------------- Static Functions --------------------------*/
-void s_vCheckSensitivity(void *hDeviceContext);
-void s_vCheckPreEDThreshold(void *hDeviceContext);
-void s_uCalculateLinkQual(void *hDeviceContext);
+static void s_vCheckSensitivity(struct vnt_private *pDevice);
+static void s_vCheckPreEDThreshold(struct vnt_private *pDevice);
+static void s_uCalculateLinkQual(struct vnt_private *pDevice);
/*--------------------- Export Variables --------------------------*/
@@ -114,20 +114,19 @@ void s_uCalculateLinkQual(void *hDeviceContext);
*
-*/
-PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
- PBYTE pbyDesireBSSID,
- PBYTE pbyDesireSSID,
- CARD_PHY_TYPE ePhyType)
+PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
+ u8 *pbyDesireBSSID, u8 *pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PBYTE pbyBSSID = NULL;
- PWLAN_IE_SSID pSSID = NULL;
- PKnownBSS pCurrBSS = NULL;
- PKnownBSS pSelect = NULL;
- BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
- unsigned int ii = 0;
- unsigned int jj = 0;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 *pbyBSSID = NULL;
+ PWLAN_IE_SSID pSSID = NULL;
+ PKnownBSS pCurrBSS = NULL;
+ PKnownBSS pSelect = NULL;
+ u8 ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int ii = 0;
+ int jj = 0;
+
if (pbyDesireBSSID != NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
@@ -142,15 +141,15 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
}
}
- if ((pbyBSSID != NULL)&&(pDevice->bRoaming == FALSE)) {
+ if ((pbyBSSID != NULL)&&(pDevice->bRoaming == false)) {
// match BSSID first
for (ii = 0; ii <MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
- pCurrBSS->bSelected = FALSE;
+ pCurrBSS->bSelected = false;
if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == FALSE)) {
+ (pCurrBSS->bSelected == false)) {
if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) {
if (pSSID != NULL) {
// compare ssid
@@ -161,7 +160,7 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
) {
- pCurrBSS->bSelected = TRUE;
+ pCurrBSS->bSelected = true;
return(pCurrBSS);
}
}
@@ -170,7 +169,7 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
) {
- pCurrBSS->bSelected = TRUE;
+ pCurrBSS->bSelected = true;
return(pCurrBSS);
}
}
@@ -184,9 +183,9 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
//2007-0721-01<Mark>by MikeLiu
// if ((pCurrBSS->bActive) &&
- // (pCurrBSS->bSelected == FALSE)) {
+ // (pCurrBSS->bSelected == false)) {
- pCurrBSS->bSelected = FALSE;
+ pCurrBSS->bSelected = false;
if (pCurrBSS->bActive) {
if (pSSID != NULL) {
@@ -237,10 +236,9 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
pDevice->bSameBSSMaxNum = jj;
if (pSelect != NULL) {
- pSelect->bSelected = TRUE;
- if (pDevice->bRoaming == FALSE) {
+ pSelect->bSelected = true;
+ if (pDevice->bRoaming == false) {
// Einsn Add @20070907
- memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ;
}
@@ -263,11 +261,10 @@ pDevice->bSameBSSMaxNum = jj;
-*/
-void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
+void BSSvClearBSSList(struct vnt_private *pDevice, int bKeepCurrBSSID)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
@@ -277,12 +274,12 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
//mike mark: there are two BSSID's in list. If that AP is in hidden ssid mode, one SSID is null,
// but other's might not be obvious, so if it associate's with your STA,
// you must keep the two of them!!
- // bKeepCurrBSSID = FALSE;
+ // bKeepCurrBSSID = false;
continue;
}
}
- pMgmt->sBSSList[ii].bActive = FALSE;
+ pMgmt->sBSSList[ii].bActive = false;
memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
}
BSSvClearAnyBSSJoinRecord(pDevice);
@@ -296,17 +293,15 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
* search BSS list by BSSID & SSID if matched
*
* Return Value:
- * TRUE if found.
+ * true if found.
*
-*/
-PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
- PBYTE abyBSSID,
- PWLAN_IE_SSID pSSID)
+PKnownBSS BSSpAddrIsInBSSList(struct vnt_private *pDevice,
+ u8 *abyBSSID, PWLAN_IE_SSID pSSID)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PKnownBSS pBSSList = NULL;
+ int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
@@ -333,36 +328,34 @@ PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
* Insert a BSS set into known BSS list
*
* Return Value:
- * TRUE if success.
+ * true if success.
*
-*/
-BOOL BSSbInsertToBSSList(void *hDeviceContext,
- PBYTE abyBSSIDAddr,
- QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- PBYTE pbyIEs,
- void *pRxPacketContext)
+int BSSbInsertToBSSList(struct vnt_private *pDevice,
+ u8 *abyBSSIDAddr,
+ u64 qwTimestamp,
+ u16 wBeaconInterval,
+ u16 wCapInfo,
+ u8 byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ u32 uIELength,
+ u8 *pbyIEs,
+ void *pRxPacketContext)
{
-
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
- BOOL bParsingQuiet = FALSE;
-
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_rx_mgmt *pRxPacket =
+ (struct vnt_rx_mgmt *)pRxPacketContext;
+ PKnownBSS pBSSList = NULL;
+ unsigned int ii;
+ bool bParsingQuiet = false;
pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]);
@@ -375,13 +368,12 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
if (ii == MAX_BSS_NUM){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
- return FALSE;
+ return false;
}
// save the BSS info
- pBSSList->bActive = TRUE;
+ pBSSList->bActive = true;
memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
- HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
- LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp));
+ pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
@@ -412,7 +404,7 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == TRUE) {
+ if (pBSSList->sERP.bERPExist == true) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
} else {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
@@ -428,7 +420,7 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// assoc with BSS
if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = TRUE;
+ bParsingQuiet = true;
}
}
@@ -458,27 +450,27 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
}
}
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == TRUE)) {
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
PSKeyItem pTransmitKey = NULL;
- BOOL bIs802_1x = FALSE;
+ bool bIs802_1x = false;
for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) {
if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = TRUE;
+ bIs802_1x = true;
break;
}
}
- if ((bIs802_1x == TRUE) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
+ if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
bAdd_PMKID_Candidate((void *) pDevice,
pBSSList->abyBSSID,
&pBSSList->sRSNCapObj);
- if ((pDevice->bLinkPass == TRUE) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == TRUE)) {
+ if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+ if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
+ (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
pDevice->gsPMKIDCandidate.Version = 1;
@@ -503,7 +495,7 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return TRUE;
+ return true;
}
@@ -513,43 +505,43 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
* Update BSS set in known BSS list
*
* Return Value:
- * TRUE if success.
+ * true if success.
*
-*/
// TODO: input structure modify
-BOOL BSSbUpdateToBSSList(void *hDeviceContext,
- QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- BOOL bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- PBYTE pbyIEs,
- void *pRxPacketContext)
+int BSSbUpdateToBSSList(struct vnt_private *pDevice,
+ u64 qwTimestamp,
+ u16 wBeaconInterval,
+ u16 wCapInfo,
+ u8 byCurrChannel,
+ int bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ u32 uIELength,
+ u8 *pbyIEs,
+ void *pRxPacketContext)
{
- int ii, jj;
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- signed long ldBm, ldBmSum;
- BOOL bParsingQuiet = FALSE;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_rx_mgmt *pRxPacket =
+ (struct vnt_rx_mgmt *)pRxPacketContext;
+ int ii, jj;
+ signed long ldBm, ldBmSum;
+ bool bParsingQuiet = false;
if (pBSSList == NULL)
- return FALSE;
+ return false;
- HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
- LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp));
+ pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
+
pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
@@ -574,7 +566,7 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == TRUE) {
+ if (pBSSList->sERP.bERPExist == true) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
} else {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
@@ -591,7 +583,7 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// assoc with BSS
if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = TRUE;
+ bParsingQuiet = true;
}
}
@@ -643,7 +635,7 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return TRUE;
+ return true;
}
@@ -660,13 +652,11 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
*
-*/
-BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
- PBYTE abyDstAddr,
- unsigned int *puNodeIndex)
+int BSSbIsSTAInNodeDB(struct vnt_private *pDevice,
+ u8 *abyDstAddr, u32 *puNodeIndex)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ unsigned int ii;
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -674,12 +664,12 @@ BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
if (!compare_ether_addr(abyDstAddr,
pMgmt->sNodeDBTable[ii].abyMACAddr)) {
*puNodeIndex = ii;
- return TRUE;
+ return true;
}
}
}
- return FALSE;
+ return false;
};
@@ -694,15 +684,14 @@ BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
* None
*
-*/
-void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
+void BSSvCreateOneNode(struct vnt_private *pDevice, u32 *puNodeIndex)
{
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
+ u32 BigestCount = 0;
+ u32 SelectIndex;
+ struct sk_buff *skb;
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
- unsigned int BigestCount = 0;
- unsigned int SelectIndex;
- struct sk_buff *skb;
// Index = 0 reserved for AP Node (In STA mode)
// Index = 0 reserved for Broadcast/MultiCast (In AP mode)
SelectIndex = 1;
@@ -733,7 +722,7 @@ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
}
memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = TRUE;
+ pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
// for AP mode PS queue
skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
@@ -755,13 +744,11 @@ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
*
-*/
-void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
+void BSSvRemoveOneNode(struct vnt_private *pDevice, u32 uNodeIndex)
{
-
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ struct sk_buff *skb;
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
@@ -782,18 +769,15 @@ void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
*
-*/
-void BSSvUpdateAPNode(void *hDeviceContext,
- PWORD pwCapInfo,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates)
+void BSSvUpdateAPNode(struct vnt_private *pDevice, u16 *pwCapInfo,
+ PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 uRateLen = WLAN_RATES_MAXLEN;
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
if (pDevice->byBBType == BB_TYPE_11B) {
uRateLen = WLAN_RATES_MAXLEN_11B;
}
@@ -806,7 +790,7 @@ void BSSvUpdateAPNode(void *hDeviceContext,
RATEvParseMaxRate((void *) pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -834,21 +818,20 @@ void BSSvUpdateAPNode(void *hDeviceContext,
*
-*/
-void BSSvAddMulticastNode(void *hDeviceContext)
+void BSSvAddMulticastNode(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
if (!pDevice->bEnableHostWEP)
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = TRUE;
- pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
RATEvParseMaxRate((void *) pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -873,26 +856,25 @@ void BSSvAddMulticastNode(void *hDeviceContext)
*
-*/
-void BSSvSecondCallBack(void *hDeviceContext)
+void BSSvSecondCallBack(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
- PWLAN_IE_SSID pItemSSID, pCurrSSID;
- unsigned int uSleepySTACnt = 0;
- unsigned int uNonShortSlotSTACnt = 0;
- unsigned int uLongPreambleSTACnt = 0;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
+ PWLAN_IE_SSID pItemSSID, pCurrSSID;
+ u32 uSleepySTACnt = 0;
+ u32 uNonShortSlotSTACnt = 0;
+ u32 uLongPreambleSTACnt = 0;
spin_lock_irq(&pDevice->lock);
pDevice->uAssocCount = 0;
//Power Saving Mode Tx Burst
- if ( pDevice->bEnablePSMode == TRUE ) {
+ if ( pDevice->bEnablePSMode == true ) {
pDevice->ulPSModeWaitTx++;
if ( pDevice->ulPSModeWaitTx >= 2 ) {
pDevice->ulPSModeWaitTx = 0;
- pDevice->bPSModeTxBurst = FALSE;
+ pDevice->bPSModeTxBurst = false;
}
}
@@ -909,10 +891,10 @@ void BSSvSecondCallBack(void *hDeviceContext)
if(pDevice->byReAssocCount > 0) {
pDevice->byReAssocCount++;
- if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != TRUE)) { //10 sec timeout
+ if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
printk("Re-association timeout!!!\n");
pDevice->byReAssocCount = 0;
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -921,13 +903,13 @@ if(pDevice->byReAssocCount > 0) {
wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
}
}
- else if(pDevice->bLinkPass == TRUE)
+ else if(pDevice->bLinkPass == true)
pDevice->byReAssocCount = 0;
}
pMgmt->eLastState = pMgmt->eCurrState ;
- s_uCalculateLinkQual((void *)pDevice);
+ s_uCalculateLinkQual(pDevice);
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -1006,27 +988,27 @@ if(pDevice->byReAssocCount > 0) {
if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
if (!pDevice->bProtectMode) {
MACvEnableProtectMD(pDevice);
- pDevice->bProtectMode = TRUE;
+ pDevice->bProtectMode = true;
}
}
else {
if (pDevice->bProtectMode) {
MACvDisableProtectMD(pDevice);
- pDevice->bProtectMode = FALSE;
+ pDevice->bProtectMode = false;
}
}
// on/off short slot time
if (uNonShortSlotSTACnt > 0) {
if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
vUpdateIFS((void *)pDevice);
}
}
else {
if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
BBvSetShortSlotTime(pDevice);
vUpdateIFS((void *)pDevice);
}
@@ -1037,13 +1019,13 @@ if(pDevice->byReAssocCount > 0) {
if (uLongPreambleSTACnt > 0) {
if (!pDevice->bBarkerPreambleMd) {
MACvEnableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = TRUE;
+ pDevice->bBarkerPreambleMd = true;
}
}
else {
if (pDevice->bBarkerPreambleMd) {
MACvDisableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = FALSE;
+ pDevice->bBarkerPreambleMd = false;
}
}
@@ -1053,9 +1035,9 @@ if(pDevice->byReAssocCount > 0) {
// Check if any STA in PS mode, enable DTIM multicast deliver
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
else
- pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
}
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
@@ -1067,8 +1049,8 @@ if(pDevice->byReAssocCount > 0) {
if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
if (pDevice->bUpdateBBVGA) {
- /* s_vCheckSensitivity((void *) pDevice); */
- s_vCheckPreEDThreshold((void *) pDevice);
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
}
if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
@@ -1080,14 +1062,14 @@ if(pDevice->byReAssocCount > 0) {
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- pDevice->bRoaming = TRUE;
- pDevice->bIsRoaming = FALSE;
+ pDevice->bRoaming = true;
+ pDevice->bIsRoaming = false;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
/* let wpa supplicant know AP may disconnect */
@@ -1102,10 +1084,10 @@ if(pDevice->byReAssocCount > 0) {
}
else if (pItemSSID->len != 0) {
//Davidwang
- if ((pDevice->bEnableRoaming == TRUE)&&(!(pMgmt->Cisco_cckm))) {
+ if ((pDevice->bEnableRoaming == true)&&(!(pMgmt->Cisco_cckm))) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bRoaming %d, !\n", pDevice->bRoaming );
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
- if ((pDevice->bRoaming == TRUE)&&(pDevice->bIsRoaming == TRUE)){
+ if ((pDevice->bRoaming == true)&&(pDevice->bIsRoaming == true)){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fast Roaming ...\n");
BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
bScheduleCommand((void *) pDevice,
@@ -1116,12 +1098,12 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
pMgmt->abyDesireSSID);
pDevice->uAutoReConnectTime = 0;
pDevice->uIsroamingTime = 0;
- pDevice->bRoaming = FALSE;
+ pDevice->bRoaming = false;
}
- else if ((pDevice->bRoaming == FALSE)&&(pDevice->bIsRoaming == TRUE)) {
+ else if ((pDevice->bRoaming == false)&&(pDevice->bIsRoaming == true)) {
pDevice->uIsroamingTime++;
if (pDevice->uIsroamingTime >= 20)
- pDevice->bIsRoaming = FALSE;
+ pDevice->bIsRoaming = false;
}
}
@@ -1129,7 +1111,7 @@ else {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
//network manager support need not do Roaming scan???
- if(pDevice->bWPASuppWextEnabled ==TRUE)
+ if(pDevice->bWPASuppWextEnabled ==true)
pDevice->uAutoReConnectTime = 0;
}
else {
@@ -1169,21 +1151,21 @@ else {
if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
if (pDevice->bUpdateBBVGA) {
- /* s_vCheckSensitivity((void *) pDevice); */
- s_vCheckPreEDThreshold((void *) pDevice);
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
pMgmt->eCurrState = WMAC_STATE_STARTED;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
}
}
}
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
if (netif_queue_stopped(pDevice->dev))
netif_wake_queue(pDevice->dev);
}
@@ -1207,22 +1189,19 @@ else {
*
-*/
-void BSSvUpdateNodeTxCounter(void *hDeviceContext,
- PSStatCounter pStatistic,
- BYTE byTSR,
- BYTE byPktNO)
+void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice,
+ PSStatCounter pStatistic, u8 byTSR, u8 byPktNO)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int uNodeIndex = 0;
- BYTE byTxRetry;
- WORD wRate;
- WORD wFallBackRate = RATE_1M;
- BYTE byFallBack;
- unsigned int ii;
- PBYTE pbyDestAddr;
- BYTE byPktNum;
- WORD wFIFOCtl;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 uNodeIndex = 0;
+ u8 byTxRetry;
+ u16 wRate;
+ u16 wFallBackRate = RATE_1M;
+ u8 byFallBack;
+ int ii;
+ u8 *pbyDestAddr;
+ u8 byPktNum;
+ u16 wFIFOCtl;
byPktNum = (byPktNO & 0x0F) >> 4;
byTxRetry = (byTSR & 0xF0) >> 4;
@@ -1370,13 +1349,11 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
*
-*/
-void BSSvClearNodeDBTable(void *hDeviceContext,
- unsigned int uStartIndex)
+void BSSvClearNodeDBTable(struct vnt_private *pDevice, u32 uStartIndex)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct sk_buff *skb;
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct sk_buff *skb;
+ int ii;
for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
@@ -1392,12 +1369,11 @@ void BSSvClearNodeDBTable(void *hDeviceContext,
}
};
-void s_vCheckSensitivity(void *hDeviceContext)
+static void s_vCheckSensitivity(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ii;
+ PKnownBSS pBSSList = NULL;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
@@ -1424,7 +1400,7 @@ void s_vCheckSensitivity(void *hDeviceContext)
if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
pDevice->uBBVGADiffCount++;
if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand((void *) pDevice,
+ bScheduleCommand(pDevice,
WLAN_CMD_CHANGE_BBSENSITIVITY,
NULL);
} else {
@@ -1435,13 +1411,12 @@ void s_vCheckSensitivity(void *hDeviceContext)
}
}
-void s_uCalculateLinkQual(void *hDeviceContext)
+static void s_uCalculateLinkQual(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- unsigned long TxOkRatio, TxCnt;
- unsigned long RxOkRatio, RxCnt;
- unsigned long RssiRatio;
- long ldBm;
+ unsigned long TxOkRatio, TxCnt;
+ unsigned long RxOkRatio, RxCnt;
+ unsigned long RssiRatio;
+ long ldBm;
TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
pDevice->scStatistic.TxRetryOkCount +
@@ -1451,7 +1426,7 @@ RxCnt = pDevice->scStatistic.RxFcsErrCnt +
TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
//decide link quality
-if(pDevice->bLinkPass !=TRUE)
+if(pDevice->bLinkPass !=true)
{
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
@@ -1478,28 +1453,28 @@ else
pDevice->scStatistic.TxRetryOkCount = 0;
}
-void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
+void BSSvClearAnyBSSJoinRecord(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++)
- pMgmt->sBSSList[ii].bSelected = FALSE;
+ pMgmt->sBSSList[ii].bSelected = false;
+
+ return;
}
-void s_vCheckPreEDThreshold(void *hDeviceContext)
+static void s_vCheckPreEDThreshold(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PKnownBSS pBSSList = NULL;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
pDevice->byBBPreEDRSSI = (BYTE) (~(pBSSList->ldBmAverRange) + 1);
- BBvUpdatePreEDThreshold(pDevice, FALSE);
+ BBvUpdatePreEDThreshold(pDevice, false);
}
}
}
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index 6b2ec390e775..08091a0a7c40 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -79,21 +79,20 @@
//
typedef struct tagSERPObject {
- BOOL bERPExist;
+ bool bERPExist;
BYTE byERP;
} ERPObject, *PERPObject;
typedef struct tagSRSNCapObject {
- BOOL bRSNCapExist;
+ bool bRSNCapExist;
WORD wRSNCap;
} SRSNCapObject, *PSRSNCapObject;
// BSS info(AP)
-#pragma pack(1)
typedef struct tagKnownBSS {
// BSS info
- BOOL bActive;
+ bool bActive;
BYTE abyBSSID[WLAN_BSSID_LEN];
unsigned int uChannel;
BYTE abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
@@ -111,10 +110,10 @@ typedef struct tagKnownBSS {
signed long ldBmAverage[RSSI_STAT_COUNT];
signed long ldBmAverRange;
//For any BSSID selection improvment
- BOOL bSelected;
+ bool bSelected;
//++ WPA informations
- BOOL bWPAValid;
+ bool bWPAValid;
BYTE byGKType;
BYTE abyPKType[4];
WORD wPKCount;
@@ -125,7 +124,7 @@ typedef struct tagKnownBSS {
//--
//++ WPA2 informations
- BOOL bWPA2Valid;
+ bool bWPA2Valid;
BYTE byCSSGK;
WORD wCSSPKCount;
BYTE abyCSSPK[4];
@@ -142,8 +141,8 @@ typedef struct tagKnownBSS {
unsigned int uClearCount;
// BYTE abyIEs[WLAN_BEACON_FR_MAXLEN];
unsigned int uIELength;
- QWORD qwBSSTimestamp;
- QWORD qwLocalTSF; // local TSF timer
+ u64 qwBSSTimestamp;
+ u64 qwLocalTSF;/* local TSF timer */
CARD_PHY_TYPE eNetworkTypeInUse;
@@ -168,14 +167,14 @@ typedef enum tagNODE_STATE {
// STA node info
typedef struct tagKnownNodeDB {
// STA info
- BOOL bActive;
+ bool bActive;
BYTE abyMACAddr[WLAN_ADDR_LEN];
BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
WORD wTxDataRate;
- BOOL bShortPreamble;
- BOOL bERPExist;
- BOOL bShortSlotTime;
+ bool bShortPreamble;
+ bool bERPExist;
+ bool bShortSlotTime;
unsigned int uInActiveCount;
WORD wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
WORD wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
@@ -189,15 +188,15 @@ typedef struct tagKnownNodeDB {
WORD wListenInterval;
WORD wAID;
NODE_STATE eNodeState;
- BOOL bPSEnable;
- BOOL bRxPSPoll;
+ bool bPSEnable;
+ bool bRxPSPoll;
BYTE byAuthSequence;
unsigned long ulLastRxJiffer;
BYTE bySuppRate;
DWORD dwFlags;
WORD wEnQueueCnt;
- BOOL bOnFly;
+ bool bOnFly;
unsigned long long KeyRSC;
BYTE byKeyIndex;
DWORD dwKeyIndex;
@@ -208,7 +207,7 @@ typedef struct tagKnownNodeDB {
BYTE abyWepKey[WLAN_WEPMAX_KEYLEN];
//
// Auto rate fallback vars
- BOOL bIsInFallback;
+ bool bIsInFallback;
unsigned int uAverageRSSI;
unsigned int uRateRecoveryTimeout;
unsigned int uRatePollTimeout;
@@ -226,80 +225,70 @@ typedef struct tagKnownNodeDB {
/*--------------------- Export Functions --------------------------*/
-PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
- PBYTE pbyDesireBSSID,
- PBYTE pbyDesireSSID,
- CARD_PHY_TYPE ePhyType);
-
-PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
- PBYTE abyBSSID,
- PWLAN_IE_SSID pSSID);
-
-void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID);
-
-BOOL BSSbInsertToBSSList(void *hDeviceContext,
- PBYTE abyBSSIDAddr,
- QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- PBYTE pbyIEs,
- void *pRxPacketContext);
-
-BOOL BSSbUpdateToBSSList(void *hDeviceContext,
- QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- BOOL bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- PBYTE pbyIEs,
- void *pRxPacketContext);
-
-BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
- PBYTE abyDstAddr,
- unsigned int *puNodeIndex);
-
-void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex);
-
-void BSSvUpdateAPNode(void *hDeviceContext,
- PWORD pwCapInfo,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates);
-
-void BSSvSecondCallBack(void *hDeviceContext);
-
-void BSSvUpdateNodeTxCounter(void *hDeviceContext,
- PSStatCounter pStatistic,
- BYTE byTSR,
- BYTE byPktNO);
-
-void BSSvRemoveOneNode(void *hDeviceContext,
- unsigned int uNodeIndex);
-
-void BSSvAddMulticastNode(void *hDeviceContext);
-
-void BSSvClearNodeDBTable(void *hDeviceContext,
- unsigned int uStartIndex);
-
-void BSSvClearAnyBSSJoinRecord(void *hDeviceContext);
+PKnownBSS BSSpSearchBSSList(struct vnt_private *, u8 *pbyDesireBSSID,
+ u8 *pbyDesireSSID, CARD_PHY_TYPE ePhyType);
+
+PKnownBSS BSSpAddrIsInBSSList(struct vnt_private *, u8 *abyBSSID,
+ PWLAN_IE_SSID pSSID);
+
+void BSSvClearBSSList(struct vnt_private *, int bKeepCurrBSSID);
+
+int BSSbInsertToBSSList(struct vnt_private *,
+ u8 *abyBSSIDAddr,
+ u64 qwTimestamp,
+ u16 wBeaconInterval,
+ u16 wCapInfo,
+ u8 byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ u32 uIELength,
+ u8 *pbyIEs,
+ void *pRxPacketContext);
+
+int BSSbUpdateToBSSList(struct vnt_private *,
+ u64 qwTimestamp,
+ u16 wBeaconInterval,
+ u16 wCapInfo,
+ u8 byCurrChannel,
+ int bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ u32 uIELength,
+ u8 *pbyIEs,
+ void *pRxPacketContext);
+
+int BSSbIsSTAInNodeDB(struct vnt_private *, PBYTE abyDstAddr,
+ u32 *puNodeIndex);
+
+void BSSvCreateOneNode(struct vnt_private *, u32 *puNodeIndex);
+
+void BSSvUpdateAPNode(struct vnt_private *, u16 *pwCapInfo,
+ PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pExtSuppRates);
+
+void BSSvSecondCallBack(struct vnt_private *);
+
+void BSSvUpdateNodeTxCounter(struct vnt_private *, PSStatCounter pStatistic,
+ u8 byTSR, u8 byPktNO);
+
+void BSSvRemoveOneNode(struct vnt_private *, u32 uNodeIndex);
+
+void BSSvAddMulticastNode(struct vnt_private *);
+
+void BSSvClearNodeDBTable(struct vnt_private *, u32 uStartIndex);
+
+void BSSvClearAnyBSSJoinRecord(struct vnt_private *);
#endif /* __BSSDB_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 826520b03383..22918a106d73 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -45,6 +45,7 @@
*
*/
+#include "device.h"
#include "tmacro.h"
#include "card.h"
#include "baseband.h"
@@ -91,9 +92,8 @@ const WORD cwRXBCNTSFOff[MAX_RATE] =
* Out:
* none
*/
-void CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
+void CARDbSetMediaChannel(struct vnt_private *pDevice, u32 uConnectionChannel)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
@@ -149,17 +149,17 @@ PSDevice pDevice = (PSDevice) pDeviceHandler;
* Return Value: response Control frame rate
*
*/
-static WORD swGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
+static u16 swGetCCKControlRate(struct vnt_private *pDevice, u16 wRateIdx)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- unsigned int ui = (unsigned int)wRateIdx;
- while (ui > RATE_1M) {
- if (pDevice->wBasicRate & ((WORD)1 << ui)) {
- return (WORD)ui;
- }
- ui --;
- }
- return (WORD)RATE_1M;
+ u16 ui = wRateIdx;
+
+ while (ui > RATE_1M) {
+ if (pDevice->wBasicRate & (1 << ui))
+ return ui;
+ ui--;
+ }
+
+ return RATE_1M;
}
/*
@@ -175,28 +175,33 @@ static WORD swGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
* Return Value: response Control frame rate
*
*/
-static WORD swGetOFDMControlRate(void *pDeviceHandler, WORD wRateIdx)
+static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- unsigned int ui = (unsigned int)wRateIdx;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate);
-
- if (!CARDbIsOFDMinBasicRate(pDevice)) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
- if (wRateIdx > RATE_24M)
- wRateIdx = RATE_24M;
- return wRateIdx;
- }
- while (ui > RATE_11M) {
- if (pDevice->wBasicRate & ((WORD)1 << ui)) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate : %d\n", ui);
- return (WORD)ui;
- }
- ui --;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate: 6M\n");
- return (WORD)RATE_24M;
+ u16 ui = wRateIdx;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n",
+ pDevice->wBasicRate);
+
+ if (!CARDbIsOFDMinBasicRate(pDevice)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
+ if (wRateIdx > RATE_24M)
+ wRateIdx = RATE_24M;
+ return wRateIdx;
+ }
+
+ while (ui > RATE_11M) {
+ if (pDevice->wBasicRate & (1 << ui)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "swGetOFDMControlRate: %d\n", ui);
+ return ui;
+ }
+ ui--;
+ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate: 6M\n");
+
+ return RATE_24M;
}
/*
@@ -325,16 +330,15 @@ CARDvCalculateOFDMRParameter (
* Return Value: None.
*
*/
-void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType)
+void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE abyServ[4] = {0,0,0,0}; // For CCK
- BYTE abySignal[4] = {0,0,0,0};
- WORD awLen[4] = {0,0,0,0};
- BYTE abyTxRate[9] = {0,0,0,0,0,0,0,0,0}; // For OFDM
- BYTE abyRsvTime[9] = {0,0,0,0,0,0,0,0,0};
- BYTE abyData[34];
- int i;
+ u8 abyServ[4] = {0, 0, 0, 0}; /* For CCK */
+ u8 abySignal[4] = {0, 0, 0, 0};
+ u16 awLen[4] = {0, 0, 0, 0};
+ u8 abyTxRate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */
+ u8 abyRsvTime[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+ u8 abyData[34];
+ int i;
//RSPINF_b_1
BBvCalculateParameter(pDevice,
@@ -476,12 +480,10 @@ void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType)
* Return Value: None.
*
*/
-void vUpdateIFS(void *pDeviceHandler)
+void vUpdateIFS(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- //Set SIFS, DIFS, EIFS, SlotTime, CwMin
- BYTE byMaxMin = 0;
- BYTE byData[4];
+ u8 byMaxMin = 0;
+ u8 byData[4];
if (pDevice->byPacketType==PK_TYPE_11A) {//0000 0000 0000 0000,11a
pDevice->uSlot = C_SLOT_SHORT;
@@ -499,7 +501,7 @@ void vUpdateIFS(void *pDeviceHandler)
}
else {// PK_TYPE_11GA & PK_TYPE_11GB
BYTE byRate = 0;
- BOOL bOFDMRate = FALSE;
+ bool bOFDMRate = false;
unsigned int ii = 0;
PWLAN_IE_SUPP_RATES pItemRates = NULL;
@@ -511,25 +513,26 @@ void vUpdateIFS(void *pDeviceHandler)
}
pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot;
- pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->sMgmtObj.abyCurrSuppRates;
+ pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->vnt_mgmt.abyCurrSuppRates;
for (ii = 0; ii < pItemRates->len; ii++) {
byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
if (RATEwGetRateIdx(byRate) > RATE_11M) {
- bOFDMRate = TRUE;
+ bOFDMRate = true;
break;
}
}
- if (bOFDMRate == FALSE) {
- pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->sMgmtObj.abyCurrExtSuppRates;
+ if (bOFDMRate == false) {
+ pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->vnt_mgmt
+ .abyCurrExtSuppRates;
for (ii = 0; ii < pItemRates->len; ii++) {
byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
if (RATEwGetRateIdx(byRate) > RATE_11M) {
- bOFDMRate = TRUE;
+ bOFDMRate = true;
break;
}
}
}
- if (bOFDMRate == TRUE) {
+ if (bOFDMRate == true) {
pDevice->uCwMin = C_CWMIN_A;
byMaxMin = 4;
} else {
@@ -561,11 +564,10 @@ void vUpdateIFS(void *pDeviceHandler)
&byMaxMin);
}
-void CARDvUpdateBasicTopRate(void *pDeviceHandler)
+void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-BYTE byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
-BYTE ii;
+ u8 byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
+ u8 ii;
//Determines the highest basic rate.
for (ii = RATE_54M; ii >= RATE_6M; ii --) {
@@ -597,13 +599,12 @@ BYTE ii;
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
+void CARDbAddBasicRate(struct vnt_private *pDevice, u16 wRateIdx)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-WORD wRate = (WORD)(1<<wRateIdx);
+ u16 wRate = (1 << wRateIdx);
pDevice->wBasicRate |= wRate;
@@ -611,21 +612,19 @@ WORD wRate = (WORD)(1<<wRateIdx);
CARDvUpdateBasicTopRate(pDevice);
}
-BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler)
+int CARDbIsOFDMinBasicRate(struct vnt_private *pDevice)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-int ii;
+ int ii;
for (ii = RATE_54M; ii >= RATE_6M; ii --) {
if ((pDevice->wBasicRate) & ((WORD)(1<<ii)))
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
-BYTE CARDbyGetPktType(void *pDeviceHandler)
+u8 CARDbyGetPktType(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) {
return (BYTE)pDevice->byBBType;
@@ -654,28 +653,18 @@ BYTE CARDbyGetPktType(void *pDeviceHandler)
* Return Value: TSF Offset value
*
*/
-QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2)
+u64 CARDqGetTSFOffset(BYTE byRxRate, u64 qwTSF1, u64 qwTSF2)
{
- QWORD qwTSFOffset;
- WORD wRxBcnTSFOffst = 0;
+ u64 qwTSFOffset = 0;
+ WORD wRxBcnTSFOffst = 0;
- HIDWORD(qwTSFOffset) = 0;
- LODWORD(qwTSFOffset) = 0;
+ wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate % MAX_RATE];
- wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE];
- (qwTSF2).u.dwLowDword += (DWORD)(wRxBcnTSFOffst);
- if ((qwTSF2).u.dwLowDword < (DWORD)(wRxBcnTSFOffst)) {
- (qwTSF2).u.dwHighDword++;
- }
- LODWORD(qwTSFOffset) = LODWORD(qwTSF1) - LODWORD(qwTSF2);
- if (LODWORD(qwTSF1) < LODWORD(qwTSF2)) {
- // if borrow needed
- HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2) - 1 ;
- }
- else {
- HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2);
- };
- return (qwTSFOffset);
+ qwTSF2 += (u64)wRxBcnTSFOffst;
+
+ qwTSFOffset = qwTSF1 - qwTSF2;
+
+ return qwTSFOffset;
}
@@ -695,33 +684,25 @@ QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2)
* Return Value: none
*
*/
-void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
- QWORD qwBSSTimestamp, QWORD qwLocalTSF)
+void CARDvAdjustTSF(struct vnt_private *pDevice, u8 byRxRate,
+ u64 qwBSSTimestamp, u64 qwLocalTSF)
{
+ u64 qwTSFOffset = 0;
+ u8 pbyData[8];
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- QWORD qwTSFOffset;
- DWORD dwTSFOffset1,dwTSFOffset2;
- BYTE pbyData[8];
-
- HIDWORD(qwTSFOffset) = 0;
- LODWORD(qwTSFOffset) = 0;
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF);
// adjust TSF
// HW's TSF add TSF Offset reg
- dwTSFOffset1 = LODWORD(qwTSFOffset);
- dwTSFOffset2 = HIDWORD(qwTSFOffset);
-
- pbyData[0] = (BYTE)dwTSFOffset1;
- pbyData[1] = (BYTE)(dwTSFOffset1>>8);
- pbyData[2] = (BYTE)(dwTSFOffset1>>16);
- pbyData[3] = (BYTE)(dwTSFOffset1>>24);
- pbyData[4] = (BYTE)dwTSFOffset2;
- pbyData[5] = (BYTE)(dwTSFOffset2>>8);
- pbyData[6] = (BYTE)(dwTSFOffset2>>16);
- pbyData[7] = (BYTE)(dwTSFOffset2>>24);
+ pbyData[0] = (u8)qwTSFOffset;
+ pbyData[1] = (u8)(qwTSFOffset >> 8);
+ pbyData[2] = (u8)(qwTSFOffset >> 16);
+ pbyData[3] = (u8)(qwTSFOffset >> 24);
+ pbyData[4] = (u8)(qwTSFOffset >> 32);
+ pbyData[5] = (u8)(qwTSFOffset >> 40);
+ pbyData[6] = (u8)(qwTSFOffset >> 48);
+ pbyData[7] = (u8)(qwTSFOffset >> 56);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_SET_TSFTBTT,
@@ -742,17 +723,15 @@ void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
* Out:
* qwCurrTSF - Current TSF counter
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF)
+bool CARDbGetCurrentTSF(struct vnt_private *pDevice, u64 *pqwCurrTSF)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- LODWORD(*pqwCurrTSF) = LODWORD(pDevice->qwCurrTSF);
- HIDWORD(*pqwCurrTSF) = HIDWORD(pDevice->qwCurrTSF);
+ *pqwCurrTSF = pDevice->qwCurrTSF;
- return(TRUE);
+ return true;
}
@@ -764,19 +743,17 @@ BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF)
* In:
* pDevice - The adapter to be read
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbClearCurrentTSF(void *pDeviceHandler)
+bool CARDbClearCurrentTSF(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- MACvRegBitsOn(pDevice,MAC_REG_TFTCTL,TFTCTL_TSFCNTRST);
+ MACvRegBitsOn(pDevice, MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- LODWORD(pDevice->qwCurrTSF) = 0;
- HIDWORD(pDevice->qwCurrTSF) = 0;
+ pDevice->qwCurrTSF = 0;
- return(TRUE);
+ return true;
}
/*
@@ -793,7 +770,7 @@ BOOL CARDbClearCurrentTSF(void *pDeviceHandler)
* Return Value: TSF value of next Beacon
*
*/
-QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
+u64 CARDqGetNextTBTT(u64 qwTSF, WORD wBeaconInterval)
{
unsigned int uLowNextTBTT;
@@ -802,18 +779,19 @@ QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
uBeaconInterval = wBeaconInterval * 1024;
// Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
- uLowNextTBTT = (LODWORD(qwTSF) >> 10) << 10;
- uLowRemain = (uLowNextTBTT) % uBeaconInterval;
- uHighRemain = ((0x80000000 % uBeaconInterval)* 2 * HIDWORD(qwTSF))
- % uBeaconInterval;
- uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval;
- uLowRemain = uBeaconInterval - uLowRemain;
+ uLowNextTBTT = ((qwTSF & 0xffffffffU) >> 10) << 10;
+ uLowRemain = (uLowNextTBTT) % uBeaconInterval;
+ uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32))
+ % uBeaconInterval;
+ uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval;
+ uLowRemain = uBeaconInterval - uLowRemain;
// check if carry when add one beacon interval
- if ((~uLowNextTBTT) < uLowRemain)
- HIDWORD(qwTSF) ++ ;
+ if ((~uLowNextTBTT) < uLowRemain)
+ qwTSF = ((qwTSF >> 32) + 1) << 32;
- LODWORD(qwTSF) = uLowNextTBTT + uLowRemain;
+ qwTSF = (qwTSF & 0xffffffff00000000UL) |
+ (u64)(uLowNextTBTT + uLowRemain);
return (qwTSF);
}
@@ -833,32 +811,24 @@ QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *pDevice, WORD wBeaconInterval)
{
+ u64 qwNextTBTT = 0;
+ u8 pbyData[8];
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- QWORD qwNextTBTT;
- DWORD dwLoTBTT,dwHiTBTT;
- BYTE pbyData[8];
-
- HIDWORD(qwNextTBTT) = 0;
- LODWORD(qwNextTBTT) = 0;
- CARDbClearCurrentTSF(pDevice);
+ CARDbClearCurrentTSF(pDevice);
//CARDbGetCurrentTSF(pDevice, &qwNextTBTT); //Get Local TSF counter
- qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
+ qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
// Set NextTBTT
- dwLoTBTT = LODWORD(qwNextTBTT);
- dwHiTBTT = HIDWORD(qwNextTBTT);
-
- pbyData[0] = (BYTE)dwLoTBTT;
- pbyData[1] = (BYTE)(dwLoTBTT>>8);
- pbyData[2] = (BYTE)(dwLoTBTT>>16);
- pbyData[3] = (BYTE)(dwLoTBTT>>24);
- pbyData[4] = (BYTE)dwHiTBTT;
- pbyData[5] = (BYTE)(dwHiTBTT>>8);
- pbyData[6] = (BYTE)(dwHiTBTT>>16);
- pbyData[7] = (BYTE)(dwHiTBTT>>24);
+ pbyData[0] = (u8)qwNextTBTT;
+ pbyData[1] = (u8)(qwNextTBTT >> 8);
+ pbyData[2] = (u8)(qwNextTBTT >> 16);
+ pbyData[3] = (u8)(qwNextTBTT >> 24);
+ pbyData[4] = (u8)(qwNextTBTT >> 32);
+ pbyData[5] = (u8)(qwNextTBTT >> 40);
+ pbyData[6] = (u8)(qwNextTBTT >> 48);
+ pbyData[7] = (u8)(qwNextTBTT >> 56);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_SET_TSFTBTT,
@@ -887,27 +857,23 @@ void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
- WORD wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *pDevice, u64 qwTSF,
+ u16 wBeaconInterval)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- DWORD dwLoTBTT,dwHiTBTT;
- BYTE pbyData[8];
+ u8 pbyData[8];
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
// Set NextTBTT
- dwLoTBTT = LODWORD(qwTSF);
- dwHiTBTT = HIDWORD(qwTSF);
-
- pbyData[0] = (BYTE)dwLoTBTT;
- pbyData[1] = (BYTE)(dwLoTBTT>>8);
- pbyData[2] = (BYTE)(dwLoTBTT>>16);
- pbyData[3] = (BYTE)(dwLoTBTT>>24);
- pbyData[4] = (BYTE)dwHiTBTT;
- pbyData[5] = (BYTE)(dwHiTBTT>>8);
- pbyData[6] = (BYTE)(dwHiTBTT>>16);
- pbyData[7] = (BYTE)(dwHiTBTT>>24);
+
+ pbyData[0] = (u8)qwTSF;
+ pbyData[1] = (u8)(qwTSF >> 8);
+ pbyData[2] = (u8)(qwTSF >> 16);
+ pbyData[3] = (u8)(qwTSF >> 24);
+ pbyData[4] = (u8)(qwTSF >> 32);
+ pbyData[5] = (u8)(qwTSF >> 40);
+ pbyData[6] = (u8)(qwTSF >> 48);
+ pbyData[7] = (u8)(qwTSF >> 56);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_SET_TSFTBTT,
@@ -918,7 +884,8 @@ void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:Update Next TBTT[%8xh:%8xh] \n",(int)HIDWORD(qwTSF), (int)LODWORD(qwTSF));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Card:Update Next TBTT[%8lx]\n", (unsigned long)qwTSF);
return;
}
@@ -932,18 +899,17 @@ void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbRadioPowerOff(void *pDeviceHandler)
+int CARDbRadioPowerOff(struct vnt_private *pDevice)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-BOOL bResult = TRUE;
+ int bResult = true;
- //if (pDevice->bRadioOff == TRUE)
- // return TRUE;
+ //if (pDevice->bRadioOff == true)
+ // return true;
- pDevice->bRadioOff = TRUE;
+ pDevice->bRadioOff = true;
switch (pDevice->byRFType) {
case RF_AL2230:
@@ -973,23 +939,21 @@ BOOL bResult = TRUE;
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbRadioPowerOn(void *pDeviceHandler)
+int CARDbRadioPowerOn(struct vnt_private *pDevice)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-BOOL bResult = TRUE;
-
+ int bResult = true;
- if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) {
- return FALSE;
+ if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
+ return false;
}
- //if (pDevice->bRadioOff == FALSE)
- // return TRUE;
+ //if (pDevice->bRadioOff == false)
+ // return true;
- pDevice->bRadioOff = FALSE;
+ pDevice->bRadioOff = false;
BBvExitDeepSleep(pDevice);
@@ -1009,9 +973,8 @@ BOOL bResult = TRUE;
return bResult;
}
-void CARDvSetBSSMode(void *pDeviceHandler)
+void CARDvSetBSSMode(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
// Set BB and packet type at the same time.//{{RobertYu:20050222, AL7230 have two TX PA output, only connet to b/g now
// so in 11a mode need to set the MAC Reg0x4C to 11b/g mode to turn on PA
if( (pDevice->byRFType == RF_AIROHA7230 ) && (pDevice->byBBType == BB_TYPE_11A) )
@@ -1068,30 +1031,23 @@ void CARDvSetBSSMode(void *pDeviceHandler)
* Return Value: none.
*
-*/
-BOOL
-CARDbChannelSwitch (
- void *pDeviceHandler,
- BYTE byMode,
- BYTE byNewChannel,
- BYTE byCount
- )
+int CARDbChannelSwitch(struct vnt_private *pDevice, u8 byMode,
+ u8 byNewChannel, u8 byCount)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bResult = TRUE;
+ int bResult = true;
- if (byCount == 0) {
- pDevice->sMgmtObj.uCurrChannel = byNewChannel;
- CARDbSetMediaChannel(pDevice, byNewChannel);
-
- return bResult;
- }
+ if (byCount == 0) {
+ pDevice->vnt_mgmt.uCurrChannel = byNewChannel;
+ CARDbSetMediaChannel(pDevice, byNewChannel);
+ return bResult;
+ }
pDevice->byChannelSwitchCount = byCount;
pDevice->byNewChannel = byNewChannel;
- pDevice->bChannelSwitch = TRUE;
+ pDevice->bChannelSwitch = true;
if (byMode == 1) {
//bResult=CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL);
- pDevice->bStopDataPkt = TRUE;
+ pDevice->bStopDataPkt = true;
}
return bResult;
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 55962b198831..5123bc7d0dcd 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -28,7 +28,7 @@
#ifndef __CARD_H__
#define __CARD_H__
-
+#include "device.h"
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
@@ -58,31 +58,28 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
+struct vnt_private;
-void CARDbSetMediaChannel(void *pDeviceHandler,
- unsigned int uConnectionChannel);
-void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType);
-void vUpdateIFS(void *pDeviceHandler);
-void CARDvUpdateBasicTopRate(void *pDeviceHandler);
-void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
-BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
-void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
- QWORD qwBSSTimestamp, QWORD qwLocalTSF);
-BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF);
-BOOL CARDbClearCurrentTSF(void *pDeviceHandler);
-void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval);
-void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
+void CARDbSetMediaChannel(struct vnt_private *pDevice, u32 uConnectionChannel);
+void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType);
+void vUpdateIFS(struct vnt_private *pDevice);
+void CARDvUpdateBasicTopRate(struct vnt_private *pDevice);
+void CARDbAddBasicRate(struct vnt_private *pDevice, u16 wRateIdx);
+int CARDbIsOFDMinBasicRate(struct vnt_private *pDevice);
+void CARDvAdjustTSF(struct vnt_private *pDevice, u8 byRxRate,
+ u64 qwBSSTimestamp, u64 qwLocalTSF);
+bool CARDbGetCurrentTSF(struct vnt_private *pDevice, u64 *pqwCurrTSF);
+bool CARDbClearCurrentTSF(struct vnt_private *pDevice);
+void CARDvSetFirstNextTBTT(struct vnt_private *pDevice, WORD wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *pDevice, u64 qwTSF,
WORD wBeaconInterval);
-QWORD CARDqGetNextTBTT(QWORD qwTSF, WORD wBeaconInterval);
-QWORD CARDqGetTSFOffset(BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2);
-BOOL CARDbRadioPowerOff(void *pDeviceHandler);
-BOOL CARDbRadioPowerOn(void *pDeviceHandler);
-BYTE CARDbyGetPktType(void *pDeviceHandler);
-void CARDvSetBSSMode(void *pDeviceHandler);
-
-BOOL CARDbChannelSwitch(void *pDeviceHandler,
- BYTE byMode,
- BYTE byNewChannel,
- BYTE byCount);
+u64 CARDqGetNextTBTT(u64 qwTSF, WORD wBeaconInterval);
+u64 CARDqGetTSFOffset(BYTE byRxRate, u64 qwTSF1, u64 qwTSF2);
+int CARDbRadioPowerOff(struct vnt_private *pDevice);
+int CARDbRadioPowerOn(struct vnt_private *pDevice);
+u8 CARDbyGetPktType(struct vnt_private *pDevice);
+void CARDvSetBSSMode(struct vnt_private *pDevice);
+int CARDbChannelSwitch(struct vnt_private *pDevice, u8 byMode,
+ u8 byNewChannel, u8 byCount);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index 650217607858..4181e3e12ea9 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -50,63 +50,63 @@ static int msglevel = MSG_LEVEL_INFO;
static SChannelTblElement sChannelTbl[CB_MAX_CHANNEL+1] =
{
- {0, 0, FALSE},
- {1, 2412, TRUE},
- {2, 2417, TRUE},
- {3, 2422, TRUE},
- {4, 2427, TRUE},
- {5, 2432, TRUE},
- {6, 2437, TRUE},
- {7, 2442, TRUE},
- {8, 2447, TRUE},
- {9, 2452, TRUE},
- {10, 2457, TRUE},
- {11, 2462, TRUE},
- {12, 2467, TRUE},
- {13, 2472, TRUE},
- {14, 2484, TRUE},
- {183, 4915, TRUE}, //15
- {184, 4920, TRUE}, //16
- {185, 4925, TRUE}, //17
- {187, 4935, TRUE}, //18
- {188, 4940, TRUE}, //19
- {189, 4945, TRUE}, //20
- {192, 4960, TRUE}, //21
- {196, 4980, TRUE}, //22
- {7, 5035, TRUE}, //23
- {8, 5040, TRUE}, //24
- {9, 5045, TRUE}, //25
- {11, 5055, TRUE}, //26
- {12, 5060, TRUE}, //27
- {16, 5080, TRUE}, //28
- {34, 5170, TRUE}, //29
- {36, 5180, TRUE}, //30
- {38, 5190, TRUE}, //31
- {40, 5200, TRUE}, //32
- {42, 5210, TRUE}, //33
- {44, 5220, TRUE}, //34
- {46, 5230, TRUE}, //35
- {48, 5240, TRUE}, //36
- {52, 5260, TRUE}, //37
- {56, 5280, TRUE}, //38
- {60, 5300, TRUE}, //39
- {64, 5320, TRUE}, //40
- {100, 5500, TRUE}, //41
- {104, 5520, TRUE}, //42
- {108, 5540, TRUE}, //43
- {112, 5560, TRUE}, //44
- {116, 5580, TRUE}, //45
- {120, 5600, TRUE}, //46
- {124, 5620, TRUE}, //47
- {128, 5640, TRUE}, //48
- {132, 5660, TRUE}, //49
- {136, 5680, TRUE}, //50
- {140, 5700, TRUE}, //51
- {149, 5745, TRUE}, //52
- {153, 5765, TRUE}, //53
- {157, 5785, TRUE}, //54
- {161, 5805, TRUE}, //55
- {165, 5825, TRUE} //56
+ {0, 0, false},
+ {1, 2412, true},
+ {2, 2417, true},
+ {3, 2422, true},
+ {4, 2427, true},
+ {5, 2432, true},
+ {6, 2437, true},
+ {7, 2442, true},
+ {8, 2447, true},
+ {9, 2452, true},
+ {10, 2457, true},
+ {11, 2462, true},
+ {12, 2467, true},
+ {13, 2472, true},
+ {14, 2484, true},
+ {183, 4915, true}, //15
+ {184, 4920, true}, //16
+ {185, 4925, true}, //17
+ {187, 4935, true}, //18
+ {188, 4940, true}, //19
+ {189, 4945, true}, //20
+ {192, 4960, true}, //21
+ {196, 4980, true}, //22
+ {7, 5035, true}, //23
+ {8, 5040, true}, //24
+ {9, 5045, true}, //25
+ {11, 5055, true}, //26
+ {12, 5060, true}, //27
+ {16, 5080, true}, //28
+ {34, 5170, true}, //29
+ {36, 5180, true}, //30
+ {38, 5190, true}, //31
+ {40, 5200, true}, //32
+ {42, 5210, true}, //33
+ {44, 5220, true}, //34
+ {46, 5230, true}, //35
+ {48, 5240, true}, //36
+ {52, 5260, true}, //37
+ {56, 5280, true}, //38
+ {60, 5300, true}, //39
+ {64, 5320, true}, //40
+ {100, 5500, true}, //41
+ {104, 5520, true}, //42
+ {108, 5540, true}, //43
+ {112, 5560, true}, //44
+ {116, 5580, true}, //45
+ {120, 5600, true}, //46
+ {124, 5620, true}, //47
+ {128, 5640, true}, //48
+ {132, 5660, true}, //49
+ {136, 5680, true}, //50
+ {140, 5700, true}, //51
+ {149, 5745, true}, //52
+ {153, 5765, true}, //53
+ {157, 5785, true}, //54
+ {161, 5805, true}, //55
+ {165, 5825, true} //56
};
@@ -380,26 +380,26 @@ static struct
* 15 = 4.9G channel 183
* 16 = 4.9G channel 184
* .....
- * Output: TRUE if the specified 5GHz band is allowed to be used.
+ * Output: true if the specified 5GHz band is allowed to be used.
False otherwise.
// 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
************************************************************************/
-BOOL
+bool
ChannelValid(unsigned int CountryCode, unsigned int ChannelIndex)
{
- BOOL bValid;
+ bool bValid;
- bValid = FALSE;
+ bValid = false;
/*
* If Channel Index is invalid, return invalid
*/
if ((ChannelIndex > CB_MAX_CHANNEL) ||
(ChannelIndex == 0))
{
- bValid = FALSE;
+ bValid = false;
goto exit;
}
@@ -422,48 +422,47 @@ exit:
* 0x0000000000000003 means channel 1,2 are supported
* 0x000000000000000F means channel 1,2,..15 are supported
************************************************************************/
-BOOL
+bool
CHvChannelGetList (
unsigned int uCountryCodeIdx,
PBYTE pbyChannelTable
)
{
if (uCountryCodeIdx >= CCODE_MAX) {
- return (FALSE);
+ return (false);
}
memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL);
- return (TRUE);
+ return (true);
}
-void CHvInitChannelTable(void *pDeviceHandler)
+void CHvInitChannelTable(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bMultiBand = FALSE;
- unsigned int ii;
+ int bMultiBand = false;
+ int ii;
for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
- sChannelTbl[ii].bValid = FALSE;
+ sChannelTbl[ii].bValid = false;
switch (pDevice->byRFType) {
case RF_AL2230:
case RF_AL2230S:
case RF_VT3226:
case RF_VT3226D0:
- bMultiBand = FALSE;
+ bMultiBand = false;
break;
case RF_AIROHA7230:
case RF_VT3342A0:
default :
- bMultiBand = TRUE;
+ bMultiBand = true;
break;
}
if ((pDevice->dwDiagRefCount != 0) ||
- (pDevice->b11hEable == TRUE)) {
- if (bMultiBand == TRUE) {
+ (pDevice->b11hEable == true)) {
+ if (bMultiBand == true) {
for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
- sChannelTbl[ii+1].bValid = TRUE;
+ sChannelTbl[ii+1].bValid = true;
//pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
//pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
}
@@ -473,16 +472,16 @@ void CHvInitChannelTable(void *pDeviceHandler)
}
} else {
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
- sChannelTbl[ii+1].bValid = TRUE;
+ sChannelTbl[ii+1].bValid = true;
//pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
//pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
}
}
} else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand == TRUE) {
+ if (bMultiBand == true) {
for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
+ sChannelTbl[ii+1].bValid = true;
//pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
//pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
}
@@ -490,7 +489,7 @@ void CHvInitChannelTable(void *pDeviceHandler)
} else {
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
+ sChannelTbl[ii+1].bValid = true;
//pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
//pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
}
diff --git a/drivers/staging/vt6656/channel.h b/drivers/staging/vt6656/channel.h
index e7b3c1231825..9914dba0ba0c 100644
--- a/drivers/staging/vt6656/channel.h
+++ b/drivers/staging/vt6656/channel.h
@@ -30,6 +30,7 @@
#ifndef _CHANNEL_H_
#define _CHANNEL_H_
+#include "device.h"
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
@@ -39,17 +40,17 @@
typedef struct tagSChannelTblElement {
BYTE byChannelNumber;
unsigned int uFrequency;
- BOOL bValid;
+ bool bValid;
} SChannelTblElement, *PSChannelTblElement;
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BOOL ChannelValid(unsigned int CountryCode, unsigned int ChannelNum);
-void CHvInitChannelTable(void *pDeviceHandler);
+bool ChannelValid(unsigned int CountryCode, unsigned int ChannelNum);
+void CHvInitChannelTable(struct vnt_private *pDevice);
BYTE CHbyGetChannelMapping(BYTE byChannelNumber);
-BOOL CHvChannelGetList(unsigned int uCountryCodeIdx, PBYTE pbyChannelTable);
+bool CHvChannelGetList(unsigned int uCountryCodeIdx, PBYTE pbyChannelTable);
#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6656/control.c b/drivers/staging/vt6656/control.c
index 5d8c5719419b..743ef5fb7fe7 100644
--- a/drivers/staging/vt6656/control.c
+++ b/drivers/staging/vt6656/control.c
@@ -56,43 +56,34 @@
/*--------------------- Export Functions --------------------------*/
-void ControlvWriteByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
- BYTE byData)
+void ControlvWriteByte(struct vnt_private *pDevice, u8 reg, u8 reg_off,
+ u8 data)
{
- BYTE byData1;
- byData1 = byData;
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- byRegOfs,
- byRegType,
- 1,
- &byData1);
+
+ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, reg_off, reg,
+ sizeof(u8), &data);
+
+ return;
}
-void ControlvReadByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
- PBYTE pbyData)
+void ControlvReadByte(struct vnt_private *pDevice, u8 reg, u8 reg_off,
+ u8 *data)
{
- int ntStatus;
- BYTE byData1;
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- byRegOfs,
- byRegType,
- 1,
- &byData1);
- *pbyData = byData1;
+ CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ,
+ reg_off, reg, sizeof(u8), data);
+ return;
}
-void ControlvMaskByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
- BYTE byMask, BYTE byData)
+void ControlvMaskByte(struct vnt_private *pDevice, u8 reg_type, u8 reg_off,
+ u8 reg_mask, u8 data)
{
- BYTE pbyData[2];
- pbyData[0] = byData;
- pbyData[1] = byMask;
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE_MASK,
- byRegOfs,
- byRegType,
- 2,
- pbyData);
+ u8 reg_data[2];
+
+ reg_data[0] = data;
+ reg_data[1] = reg_mask;
+
+ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE_MASK, reg_off,
+ reg_type, ARRAY_SIZE(reg_data), reg_data);
+
+ return;
}
diff --git a/drivers/staging/vt6656/control.h b/drivers/staging/vt6656/control.h
index bbe610fd8b5a..76ce0244e100 100644
--- a/drivers/staging/vt6656/control.h
+++ b/drivers/staging/vt6656/control.h
@@ -51,28 +51,14 @@
/*--------------------- Export Functions --------------------------*/
-void ControlvWriteByte(
- PSDevice pDevice,
- BYTE byRegType,
- BYTE byRegOfs,
- BYTE byData
- );
+void ControlvWriteByte(struct vnt_private *pDevice, u8 reg, u8 reg_off,
+ u8 data);
+void ControlvReadByte(struct vnt_private *pDevice, u8 reg, u8 reg_off,
+ u8 *data);
-void ControlvReadByte(
- PSDevice pDevice,
- BYTE byRegType,
- BYTE byRegOfs,
- PBYTE pbyData
- );
+void ControlvMaskByte(struct vnt_private *pDevice, u8 reg_type, u8 reg_off,
+ u8 reg_mask, u8 data);
-void ControlvMaskByte(
- PSDevice pDevice,
- BYTE byRegType,
- BYTE byRegOfs,
- BYTE byMask,
- BYTE byData
- );
-
#endif /* __CONTROL_H__ */
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index 5c2719fa72f7..77464e819f6d 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -54,7 +54,8 @@
/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
+
+/* static int msglevel = MSG_LEVEL_DEBUG; */
static int msglevel =MSG_LEVEL_INFO;
const BYTE acbyIERate[MAX_RATE] =
{0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
@@ -71,7 +72,7 @@ void s_vResetCounter(PKnownNodeDB psNodeDBTable)
{
BYTE ii;
- // clear statistic counter for auto_rate
+ /* clear statistics counter for auto_rate */
for (ii = 0; ii <= MAX_RATE; ii++) {
psNodeDBTable->uTxOk[ii] = 0;
psNodeDBTable->uTxFail[ii] = 0;
@@ -105,8 +106,8 @@ DATARATEbyGetRateIdx (
{
BYTE ii;
- //Erase basicRate flag.
- byRate = byRate & 0x7F;//0111 1111
+ /* erase BasicRate flag */
+ byRate = byRate & 0x7F;
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
@@ -159,8 +160,8 @@ RATEwGetRateIdx(
{
WORD ii;
- //Erase basicRate flag.
- byRate = byRate & 0x7F;//0111 1111
+ /* erase BasicRate flag */
+ byRate = byRate & 0x7F;
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
@@ -188,28 +189,19 @@ RATEwGetRateIdx(
* Return Value: none
*
-*/
-void RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- BOOL bUpdateBasicRate,
- PWORD pwMaxBasicRate,
- PWORD pwMaxSuppRate,
- PWORD pwSuppRate,
- PBYTE pbyTopCCKRate,
- PBYTE pbyTopOFDMRate
- )
-{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-unsigned int ii;
-BYTE byHighSuppRate = 0;
-BYTE byRate = 0;
-WORD wOldBasicRate = pDevice->wBasicRate;
-unsigned int uRateLen;
+void RATEvParseMaxRate(struct vnt_private *pDevice,
+ PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pItemExtRates,
+ int bUpdateBasicRate, u16 *pwMaxBasicRate, u16 *pwMaxSuppRate,
+ u16 *pwSuppRate, u8 *pbyTopCCKRate, u8 *pbyTopOFDMRate)
+{
+ int ii;
+ u8 byHighSuppRate = 0, byRate = 0;
+ u16 wOldBasicRate = pDevice->wBasicRate;
+ u32 uRateLen;
- if (pItemRates == NULL)
- return;
+ if (pItemRates == NULL)
+ return;
*pwSuppRate = 0;
uRateLen = pItemRates->len;
@@ -226,8 +218,11 @@ unsigned int uRateLen;
for (ii = 0; ii < uRateLen; ii++) {
byRate = (BYTE)(pItemRates->abyRates[ii]);
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
- (bUpdateBasicRate == TRUE)) {
- // Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
+ (bUpdateBasicRate == true)) {
+ /*
+ * add to basic rate set, update pDevice->byTopCCKBasicRate and
+ * pDevice->byTopOFDMBasicRate
+ */
CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
@@ -248,9 +243,12 @@ unsigned int uRateLen;
for (ii = 0; ii < uExtRateLen ; ii++) {
byRate = (BYTE)(pItemExtRates->abyRates[ii]);
- // select highest basic rate
+ /* select highest basic rate */
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
- // Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
+ /*
+ * add to basic rate set, update pDevice->byTopCCKBasicRate and
+ * pDevice->byTopOFDMBasicRate
+ */
CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
@@ -260,9 +258,11 @@ unsigned int uRateLen;
if (byRate > byHighSuppRate)
byHighSuppRate = byRate;
*pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
- //DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", RATEwGetRateIdx(byRate), byRate));
+
+ /* DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n",
+ RATEwGetRateIdx(byRate), byRate)); */
}
- } //if(pItemExtRates != NULL)
+ }
if ((pDevice->byPacketType == PK_TYPE_11GB)
&& CARDbIsOFDMinBasicRate((void *)pDevice)) {
@@ -301,27 +301,24 @@ unsigned int uRateLen;
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
- )
+void RATEvTxRateFallBack(struct vnt_private *pDevice,
+ PKnownNodeDB psNodeDBTable)
{
-PSDevice pDevice = (PSDevice) pDeviceHandler;
-PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-WORD wIdxDownRate = 0;
-unsigned int ii;
-BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
-DWORD dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
-DWORD dwThroughput = 0;
-WORD wIdxUpRate = 0;
-DWORD dwTxDiff = 0;
-
- if (pMgmt->eScanState != WMAC_NO_SCANNING) {
- // Don't do Fallback when scanning Channel
- return;
- }
- psNodeDBTable->uTimeCount ++;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u16 wIdxDownRate = 0;
+ int ii;
+ int bAutoRate[MAX_RATE] = {true, true, true, true, false, false, true,
+ true, true, true, true, true};
+ u32 dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180,
+ 240, 360, 480, 540};
+ u32 dwThroughput = 0;
+ u16 wIdxUpRate = 0;
+ u32 dwTxDiff = 0;
+
+ if (pMgmt->eScanState != WMAC_NO_SCANNING)
+ return; /* Don't do Fallback when scanning Channel */
+
+ psNodeDBTable->uTimeCount++;
if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
@@ -338,11 +335,11 @@ DWORD dwTxDiff = 0;
for (ii = 0; ii < MAX_RATE; ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == TRUE) {
+ if (bAutoRate[ii] == true) {
wIdxUpRate = (WORD) ii;
}
} else {
- bAutoRate[ii] = FALSE;
+ bAutoRate[ii] = false;
}
}
@@ -364,7 +361,7 @@ DWORD dwTxDiff = 0;
for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
if ( (dwThroughputTbl[ii] > dwThroughput) &&
- (bAutoRate[ii]==TRUE) ) {
+ (bAutoRate[ii]==true) ) {
dwThroughput = dwThroughputTbl[ii];
wIdxDownRate = (WORD) ii;
}
@@ -375,7 +372,7 @@ DWORD dwTxDiff = 0;
(psNodeDBTable->uTxFail[MAX_RATE] * 4) ) {
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
- }else { // adhoc, if uTxOk(total) =0 & uTxFail(total) = 0
+ } else { /* adhoc, if uTxOk(total) == 0 & uTxFail(total) == 0 */
if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
diff --git a/drivers/staging/vt6656/datarate.h b/drivers/staging/vt6656/datarate.h
index c6f5163ff9b8..8dc55bd61669 100644
--- a/drivers/staging/vt6656/datarate.h
+++ b/drivers/staging/vt6656/datarate.h
@@ -29,18 +29,18 @@
#ifndef __DATARATE_H__
#define __DATARATE_H__
-/*--------------------- Export Definitions -------------------------*/
-#define FALLBACK_PKT_COLLECT_TR_H 50 // pkts
-#define FALLBACK_PKT_COLLECT_TR_L 10 // pkts
-#define FALLBACK_POLL_SECOND 5 // 5 sec
-#define FALLBACK_RECOVER_SECOND 30 // 30 sec
-#define FALLBACK_THRESHOLD 15 // percent
-#define UPGRADE_THRESHOLD 5 // percent
-#define UPGRADE_CNT_THRD 3 // times
-#define RETRY_TIMES_THRD_H 2 // times
-#define RETRY_TIMES_THRD_L 1 // times
+/*--------------------- Export Definitions -------------------------*/
+#define FALLBACK_PKT_COLLECT_TR_H 50 /* pkts */
+#define FALLBACK_PKT_COLLECT_TR_L 10 /* pkts */
+#define FALLBACK_POLL_SECOND 5 /* 5 sec */
+#define FALLBACK_RECOVER_SECOND 30 /* 30 sec */
+#define FALLBACK_THRESHOLD 15 /* percent */
+#define UPGRADE_THRESHOLD 5 /* percent */
+#define UPGRADE_CNT_THRD 3 /* times */
+#define RETRY_TIMES_THRD_H 2 /* times */
+#define RETRY_TIMES_THRD_L 1 /* times */
#define RATE_1M 0
#define RATE_2M 1
@@ -69,24 +69,13 @@
-void
-RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- BOOL bUpdateBasicRate,
- PWORD pwMaxBasicRate,
- PWORD pwMaxSuppRate,
- PWORD pwSuppRate,
- PBYTE pbyTopCCKRate,
- PBYTE pbyTopOFDMRate
- );
+void RATEvParseMaxRate(struct vnt_private *, PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pItemExtRates, int bUpdateBasicRate,
+ u16 *pwMaxBasicRate, u16 *pwMaxSuppRate, u16 *pwSuppRate,
+ u8 *pbyTopCCKRate, u8 *pbyTopOFDMRate);
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
- );
+void RATEvTxRateFallBack(struct vnt_private *pDevice,
+ PKnownNodeDB psNodeDBTable);
BYTE
RATEuSetIE(
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 5007e98d1b0e..0c0b614aaa11 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -36,92 +36,95 @@
#include "ttype.h"
#include "tether.h"
-// max transmit or receive buffer size
-#define CB_MAX_BUF_SIZE 2900U // max buffer size
- // NOTE: must be multiple of 4
+/* max transmit or receive buffer size */
+#define CB_MAX_BUF_SIZE 2900U /* NOTE: must be multiple of 4 */
-#define CB_MAX_TX_BUF_SIZE CB_MAX_BUF_SIZE // max Tx buffer size
-#define CB_MAX_RX_BUF_SIZE_NORMAL CB_MAX_BUF_SIZE // max Rx buffer size when not use Multi-RD
+/* max TX buffer size */
+#define CB_MAX_TX_BUF_SIZE CB_MAX_BUF_SIZE
+/* max RX buffer size when not use Multi-RD */
+#define CB_MAX_RX_BUF_SIZE_NORMAL CB_MAX_BUF_SIZE
-#define CB_BEACON_BUF_SIZE 512U // default beacon buffer size
+#define CB_BEACON_BUF_SIZE 512U /* default beacon buffer size */
#define MAX_TOTAL_SIZE_WITH_ALL_HEADERS CB_MAX_BUF_SIZE
#define MAX_INTERRUPT_SIZE 32
-#define RX_BLOCKS 64 // form 0x60 to 0xA0
-#define TX_BLOCKS 32 // from 0xA0 to 0xC0
-
-#define CB_MAX_RX_DESC 128 // max # of descriptor
-#define CB_MIN_RX_DESC 16 // min # of rx descriptor
-#define CB_MAX_TX_DESC 128 // max # of descriptor
-#define CB_MIN_TX_DESC 16 // min # of tx descriptor
-
-#define CB_RD_NUM 64 // default # of RD
-#define CB_TD_NUM 64 // default # of TD
-
-//
-// Bits in the RSR register
-//
-#define RSR_ADDRBROAD 0x80 // 1000 0000
-#define RSR_ADDRMULTI 0x40 // 0100 0000
-#define RSR_ADDRUNI 0x00 // 0000 0000
-#define RSR_IVLDTYP 0x20 // 0010 0000 , invalid packet type
-#define RSR_IVLDLEN 0x10 // 0001 0000 , invalid len (> 2312 byte)
-#define RSR_BSSIDOK 0x08 // 0000 1000
-#define RSR_CRCOK 0x04 // 0000 0100
-#define RSR_BCNSSIDOK 0x02 // 0000 0010
-#define RSR_ADDROK 0x01 // 0000 0001
-
-//
-// Bits in the new RSR register
-//
-#define NEWRSR_DECRYPTOK 0x10 // 0001 0000
-#define NEWRSR_CFPIND 0x08 // 0000 1000
-#define NEWRSR_HWUTSF 0x04 // 0000 0100
-#define NEWRSR_BCNHITAID 0x02 // 0000 0010
-#define NEWRSR_BCNHITAID0 0x01 // 0000 0001
-
-//
-// Bits in the TSR register
-//
-#define TSR_RETRYTMO 0x08 // 0000 1000
-#define TSR_TMO 0x04 // 0000 0100
-#define TSR_ACKDATA 0x02 // 0000 0010
-#define TSR_VALID 0x01 // 0000 0001
+#define RX_BLOCKS 64 /* from 0x60 to 0xA0 */
+#define TX_BLOCKS 32 /* from 0xA0 to 0xC0 */
+
+#define CB_MAX_RX_DESC 128 /* max # of descriptors */
+#define CB_MIN_RX_DESC 16 /* min # of RX descriptors */
+#define CB_MAX_TX_DESC 128 /* max # of descriptors */
+#define CB_MIN_TX_DESC 16 /* min # of TX descriptors */
+
+#define CB_RD_NUM 64 /* default # of RD */
+#define CB_TD_NUM 64 /* default # of TD */
+
+/*
+ * bits in the RSR register
+ */
+#define RSR_ADDRBROAD 0x80
+#define RSR_ADDRMULTI 0x40
+#define RSR_ADDRUNI 0x00
+#define RSR_IVLDTYP 0x20 /* invalid packet type */
+#define RSR_IVLDLEN 0x10 /* invalid len (> 2312 byte) */
+#define RSR_BSSIDOK 0x08
+#define RSR_CRCOK 0x04
+#define RSR_BCNSSIDOK 0x02
+#define RSR_ADDROK 0x01
+
+/*
+ * bits in the new RSR register
+ */
+#define NEWRSR_DECRYPTOK 0x10
+#define NEWRSR_CFPIND 0x08
+#define NEWRSR_HWUTSF 0x04
+#define NEWRSR_BCNHITAID 0x02
+#define NEWRSR_BCNHITAID0 0x01
+
+/*
+ * bits in the TSR register
+ */
+#define TSR_RETRYTMO 0x08
+#define TSR_TMO 0x04
+#define TSR_ACKDATA 0x02
+#define TSR_VALID 0x01
#define CB_PROTOCOL_RESERVED_SECTION 16
-// if retrys excess 15 times , tx will abort, and
-// if tx fifo underflow, tx will fail
-// we should try to resend it
+/*
+ * if retries exceed 15 times, TX will abort, and
+ * if TX fifo underflow, TX will fail
+ * we should try to resend it
+ */
#define CB_MAX_TX_ABORT_RETRY 3
-#define FIFOCTL_AUTO_FB_1 0x1000 // 0001 0000 0000 0000
-#define FIFOCTL_AUTO_FB_0 0x0800 // 0000 1000 0000 0000
-#define FIFOCTL_GRPACK 0x0400 // 0000 0100 0000 0000
-#define FIFOCTL_11GA 0x0300 // 0000 0011 0000 0000
-#define FIFOCTL_11GB 0x0200 // 0000 0010 0000 0000
-#define FIFOCTL_11B 0x0100 // 0000 0001 0000 0000
-#define FIFOCTL_11A 0x0000 // 0000 0000 0000 0000
-#define FIFOCTL_RTS 0x0080 // 0000 0000 1000 0000
-#define FIFOCTL_ISDMA0 0x0040 // 0000 0000 0100 0000
-#define FIFOCTL_GENINT 0x0020 // 0000 0000 0010 0000
-#define FIFOCTL_TMOEN 0x0010 // 0000 0000 0001 0000
-#define FIFOCTL_LRETRY 0x0008 // 0000 0000 0000 1000
-#define FIFOCTL_CRCDIS 0x0004 // 0000 0000 0000 0100
-#define FIFOCTL_NEEDACK 0x0002 // 0000 0000 0000 0010
-#define FIFOCTL_LHEAD 0x0001 // 0000 0000 0000 0001
-
-//WMAC definition Frag Control
-#define FRAGCTL_AES 0x0300 // 0000 0011 0000 0000
-#define FRAGCTL_TKIP 0x0200 // 0000 0010 0000 0000
-#define FRAGCTL_LEGACY 0x0100 // 0000 0001 0000 0000
-#define FRAGCTL_NONENCRYPT 0x0000 // 0000 0000 0000 0000
-#define FRAGCTL_ENDFRAG 0x0003 // 0000 0000 0000 0011
-#define FRAGCTL_MIDFRAG 0x0002 // 0000 0000 0000 0010
-#define FRAGCTL_STAFRAG 0x0001 // 0000 0000 0000 0001
-#define FRAGCTL_NONFRAG 0x0000 // 0000 0000 0000 0000
+#define FIFOCTL_AUTO_FB_1 0x1000
+#define FIFOCTL_AUTO_FB_0 0x0800
+#define FIFOCTL_GRPACK 0x0400
+#define FIFOCTL_11GA 0x0300
+#define FIFOCTL_11GB 0x0200
+#define FIFOCTL_11B 0x0100
+#define FIFOCTL_11A 0x0000
+#define FIFOCTL_RTS 0x0080
+#define FIFOCTL_ISDMA0 0x0040
+#define FIFOCTL_GENINT 0x0020
+#define FIFOCTL_TMOEN 0x0010
+#define FIFOCTL_LRETRY 0x0008
+#define FIFOCTL_CRCDIS 0x0004
+#define FIFOCTL_NEEDACK 0x0002
+#define FIFOCTL_LHEAD 0x0001
+
+/* WMAC definition Frag Control */
+#define FRAGCTL_AES 0x0300
+#define FRAGCTL_TKIP 0x0200
+#define FRAGCTL_LEGACY 0x0100
+#define FRAGCTL_NONENCRYPT 0x0000
+#define FRAGCTL_ENDFRAG 0x0003
+#define FRAGCTL_MIDFRAG 0x0002
+#define FRAGCTL_STAFRAG 0x0001
+#define FRAGCTL_NONFRAG 0x0000
#define TYPE_TXDMA0 0
#define TYPE_AC0DMA 1
@@ -135,14 +138,14 @@
#define TYPE_RXDMA1 1
#define TYPE_MAXRD 2
-// TD_INFO flags control bit
-#define TD_FLAGS_NETIF_SKB 0x01 // check if need release skb
-#define TD_FLAGS_PRIV_SKB 0x02 // check if called from private skb(hostap)
-#define TD_FLAGS_PS_RETRY 0x04 // check if PS STA frame re-transmit
+/* TD_INFO flags control bit */
+#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
+#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb(hostap) */
+#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
-//
-// RsvTime buffer header
-//
+/*
+ * RsvTime buffer header
+ */
typedef struct tagSRrvTime_gRTS {
WORD wRTSTxRrvTime_ba;
WORD wRTSTxRrvTime_aa;
@@ -181,9 +184,9 @@ SRrvTime_atim, *PSRrvTime_atim;
typedef const SRrvTime_atim *PCSRrvTime_atim;
-//
-// RTS buffer header
-//
+/*
+ * RTS buffer header
+ */
typedef struct tagSRTSData {
WORD wFrameControl;
WORD wDurationID;
@@ -257,9 +260,9 @@ SRTS_a_FB, *PSRTS_a_FB;
typedef const SRTS_a_FB *PCSRTS_a_FB;
-//
-// CTS buffer header
-//
+/*
+ * CTS buffer header
+ */
typedef struct tagSCTSData {
WORD wFrameControl;
WORD wDurationID;
@@ -294,9 +297,9 @@ SCTS_FB, *PSCTS_FB;
typedef const SCTS_FB *PCSCTS_FB;
-//
-// Tx FIFO header
-//
+/*
+ * TX FIFO header
+ */
typedef struct tagSTxBufHead {
u32 adwTxKey[4];
WORD wFIFOCtl;
@@ -314,9 +317,9 @@ typedef struct tagSTxShortBufHead {
STxShortBufHead, *PSTxShortBufHead;
typedef const STxShortBufHead *PCSTxShortBufHead;
-//
-// Tx data header
-//
+/*
+ * TX data header
+ */
typedef struct tagSTxDataHead_g {
BYTE bySignalField_b;
BYTE byServiceField_b;
@@ -372,9 +375,9 @@ typedef struct tagSTxDataHead_a_FB {
STxDataHead_a_FB, *PSTxDataHead_a_FB;
typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
-//
-// MICHDR data header
-//
+/*
+ * MICHDR data header
+ */
typedef struct tagSMICHDRHead {
u32 adwHDR0[4];
u32 adwHDR1[4];
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 25bf03af7733..6bba2e06fa64 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -53,7 +53,7 @@
#undef DEVICE_ETHTOOL_IOCTL_SUPPORT
#endif
-//please copy below macro to driver_event.c for API
+/* please copy below macro to driver_event.c for API */
#define RT_INSMOD_EVENT_FLAG 0x0101
#define RT_UPDEV_EVENT_FLAG 0x0102
#define RT_DISCONNECTED_EVENT_FLAG 0x0103
@@ -61,9 +61,9 @@
#define RT_DOWNDEV_EVENT_FLAG 0x0105
#define RT_RMMOD_EVENT_FLAG 0x0106
-//
-// device specific
-//
+/*
+ * device specific
+ */
#include "device_cfg.h"
#include "ttype.h"
@@ -110,7 +110,7 @@
#define FB_RATE0 0
#define FB_RATE1 1
-// Antenna Mode
+/* Antenna Mode */
#define ANT_A 0
#define ANT_B 1
#define ANT_DIVERSITY 2
@@ -125,7 +125,7 @@
#define MAXCHECKHANGCNT 4
-//Packet type
+/* Packet type */
#define TX_PKT_UNI 0x00
#define TX_PKT_MULTI 0x01
#define TX_PKT_BROAD 0x02
@@ -137,7 +137,7 @@
#define RUN_AT(x) (jiffies+(x))
#endif
-// DMA related
+/* DMA related */
#define RESERV_AC0DMA 4
#define PRIVATE_Message 0
@@ -161,30 +161,30 @@ typedef enum __device_init_type {
DEVICE_INIT_DXPL /* Dx to D0 power lost init */
} DEVICE_INIT_TYPE, *PDEVICE_INIT_TYPE;
-//USB
+/* USB */
-//
-// Enum of context types for SendPacket
-//
+/*
+ * Enum of context types for SendPacket
+ */
typedef enum _CONTEXT_TYPE {
CONTEXT_DATA_PACKET = 1,
CONTEXT_MGMT_PACKET
} CONTEXT_TYPE;
-// RCB (Receive Control Block)
+/* RCB (Receive Control Block) */
typedef struct _RCB
{
- void *Next;
- signed long Ref;
- void *pDevice;
- struct urb *pUrb;
- SRxMgmtPacket sMngPacket;
- struct sk_buff* skb;
- BOOL bBoolInUse;
+ void *Next;
+ signed long Ref;
+ void *pDevice;
+ struct urb *pUrb;
+ struct vnt_rx_mgmt sMngPacket;
+ struct sk_buff *skb;
+ int bBoolInUse;
} RCB, *PRCB;
-// used to track bulk out irps
+/* used to track bulk out irps */
typedef struct _USB_SEND_CONTEXT {
void *pDevice;
struct sk_buff *pPacket;
@@ -193,7 +193,7 @@ typedef struct _USB_SEND_CONTEXT {
CONTEXT_TYPE Type;
SEthernetHeader sEthHeader;
void *Next;
- BOOL bBoolInUse;
+ bool bBoolInUse;
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
} USB_SEND_CONTEXT, *PUSB_SEND_CONTEXT;
@@ -207,17 +207,17 @@ typedef struct _DEFAULT_CONFIG {
signed int eEncryptionStatus;
} DEFAULT_CONFIG, *PDEFAULT_CONFIG;
-//
-// Structure to keep track of usb interrupt packets
-//
+/*
+ * Structure to keep track of USB interrupt packets
+ */
typedef struct {
unsigned int uDataLen;
PBYTE pDataBuf;
-// struct urb *pUrb;
- BOOL bInUse;
+ /* struct urb *pUrb; */
+ bool bInUse;
} INT_BUFFER, *PINT_BUFFER;
-//0:11A 1:11B 2:11G
+/* 0:11A 1:11B 2:11G */
typedef enum _VIA_BB_TYPE
{
BB_TYPE_11A = 0,
@@ -225,7 +225,7 @@ typedef enum _VIA_BB_TYPE
BB_TYPE_11G
} VIA_BB_TYPE, *PVIA_BB_TYPE;
-//0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
+/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga(OFDM in BasicRate) */
typedef enum _VIA_PKT_TYPE
{
PK_TYPE_11A = 0,
@@ -234,7 +234,7 @@ typedef enum _VIA_PKT_TYPE
PK_TYPE_11GA
} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
-//++ NDIS related
+/*++ NDIS related */
typedef enum __DEVICE_NDIS_STATUS {
STATUS_SUCCESS = 0,
@@ -245,10 +245,10 @@ typedef enum __DEVICE_NDIS_STATUS {
#define MAX_BSSIDINFO_4_PMKID 16
#define MAX_PMKIDLIST 5
-//Flags for PMKID Candidate list structure
+/* flags for PMKID Candidate list structure */
#define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01
-// PMKID Structures
+/* PMKID Structures */
typedef unsigned char NDIS_802_11_PMKID_VALUE[16];
@@ -272,13 +272,13 @@ typedef enum _NDIS_802_11_WEP_STATUS
typedef enum _NDIS_802_11_STATUS_TYPE
{
- Ndis802_11StatusType_Authentication,
- Ndis802_11StatusType_MediaStreamMode,
- Ndis802_11StatusType_PMKID_CandidateList,
- Ndis802_11StatusTypeMax // not a real type, defined as an upper bound
+ Ndis802_11StatusType_Authentication,
+ Ndis802_11StatusType_MediaStreamMode,
+ Ndis802_11StatusType_PMKID_CandidateList,
+ Ndis802_11StatusTypeMax, /* not a real type, defined as upper bound */
} NDIS_802_11_STATUS_TYPE, *PNDIS_802_11_STATUS_TYPE;
-//Added new types for PMKID Candidate lists.
+/* added new types for PMKID Candidate lists */
typedef struct _PMKID_CANDIDATE {
NDIS_802_11_MAC_ADDRESS BSSID;
unsigned long Flags;
@@ -304,22 +304,17 @@ typedef struct tagSPMKIDCandidateEvent {
PMKID_CANDIDATE CandidateList[MAX_PMKIDLIST];
} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
-//--
-
-//++ 802.11h related
+/*++ 802.11h related */
#define MAX_QUIET_COUNT 8
typedef struct tagSQuietControl {
- BOOL bEnable;
+ bool bEnable;
DWORD dwStartTime;
BYTE byPeriod;
WORD wDuration;
} SQuietControl, *PSQuietControl;
-//--
-
-
-// The receive duplicate detection cache entry
+/* The receive duplicate detection cache entry */
typedef struct tagSCacheEntry{
WORD wFmSequence;
BYTE abyAddr2[ETH_ALEN];
@@ -329,13 +324,15 @@ typedef struct tagSCacheEntry{
typedef struct tagSCache{
/* The receive cache is updated circularly. The next entry to be written is
* indexed by the "InPtr".
-*/
+ */
unsigned int uInPtr; /* Place to use next */
SCacheEntry asCacheEntry[DUPLICATE_RX_CACHE_LENGTH];
} SCache, *PSCache;
#define CB_MAX_RX_FRAG 64
-// DeFragment Control Block, used for collecting fragments prior to reassembly
+/*
+ * DeFragment Control Block, used for collecting fragments prior to reassembly
+ */
typedef struct tagSDeFragControlBlock
{
WORD wSequence;
@@ -345,27 +342,25 @@ typedef struct tagSDeFragControlBlock
struct sk_buff* skb;
PBYTE pbyRxBuffer;
unsigned int cbFrameLength;
- BOOL bInUse;
+ bool bInUse;
} SDeFragControlBlock, *PSDeFragControlBlock;
-
-
-//flags for options
+/* flags for options */
#define DEVICE_FLAGS_UNPLUG 0x00000001UL
#define DEVICE_FLAGS_PREAMBLE_TYPE 0x00000002UL
#define DEVICE_FLAGS_OP_MODE 0x00000004UL
#define DEVICE_FLAGS_PS_MODE 0x00000008UL
#define DEVICE_FLAGS_80211h_MODE 0x00000010UL
-//flags for driver status
+/* flags for driver status */
#define DEVICE_FLAGS_OPENED 0x00010000UL
#define DEVICE_FLAGS_WOL_ENABLED 0x00080000UL
-//flags for capbilities
+/* flags for capabilities */
#define DEVICE_FLAGS_TX_ALIGN 0x01000000UL
#define DEVICE_FLAGS_HAVE_CAM 0x02000000UL
#define DEVICE_FLAGS_FLOW_CTRL 0x04000000UL
-//flags for MII status
+/* flags for MII status */
#define DEVICE_LINK_FAIL 0x00000001UL
#define DEVICE_SPEED_10 0x00000002UL
#define DEVICE_SPEED_100 0x00000004UL
@@ -373,14 +368,14 @@ typedef struct tagSDeFragControlBlock
#define DEVICE_DUPLEX_FULL 0x00000010UL
#define DEVICE_AUTONEG_ENABLE 0x00000020UL
#define DEVICE_FORCED_BY_EEPROM 0x00000040UL
-//for device_set_media_duplex
+/* for device_set_media_duplex */
#define DEVICE_LINK_CHANGE 0x00000001UL
typedef struct __device_opt {
- int nRxDescs0; //Number of RX descriptors0
- int nTxDescs0; //Number of TX descriptors 0, 1, 2, 3
- int rts_thresh; //rts threshold
+ int nRxDescs0; /* number of RX descriptors 0 */
+ int nTxDescs0; /* number of TX descriptors 0, 1, 2, 3 */
+ int rts_thresh; /* RTS threshold */
int frag_thresh;
int OpMode;
int data_rate;
@@ -392,429 +387,406 @@ typedef struct __device_opt {
} OPTIONS, *POPTIONS;
-typedef struct __device_info {
+struct vnt_private {
+ /* netdev */
+ struct usb_device *usb;
+ struct net_device *dev;
+ struct net_device_stats stats;
-// netdev
- struct usb_device* usb;
- struct net_device* dev;
- struct net_device_stats stats;
+ OPTIONS sOpts;
+ struct tasklet_struct CmdWorkItem;
+ struct tasklet_struct EventWorkItem;
+ struct tasklet_struct ReadWorkItem;
+ struct tasklet_struct RxMngWorkItem;
- OPTIONS sOpts;
+ u32 rx_buf_sz;
+ int multicast_limit;
+ u8 byRxMode;
- struct tasklet_struct CmdWorkItem;
- struct tasklet_struct EventWorkItem;
- struct tasklet_struct ReadWorkItem;
- struct tasklet_struct RxMngWorkItem;
+ spinlock_t lock;
- u32 rx_buf_sz;
- int multicast_limit;
- BYTE byRxMode;
+ u32 rx_bytes;
- spinlock_t lock;
+ u8 byRevId;
- u32 rx_bytes;
+ u32 flags;
+ unsigned long Flags;
- BYTE byRevId;
+ SCache sDupRxCache;
- u32 flags;
- unsigned long Flags;
+ SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
+ u32 cbDFCB;
+ u32 cbFreeDFCB;
+ u32 uCurrentDFCBIdx;
- SCache sDupRxCache;
- SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
- unsigned int cbDFCB;
- unsigned int cbFreeDFCB;
- unsigned int uCurrentDFCBIdx;
+ /* USB */
+ struct urb *pControlURB;
+ struct urb *pInterruptURB;
+ struct usb_ctrlrequest sUsbCtlRequest;
+ u32 int_interval;
- // +++USB
-
- struct urb *pControlURB;
- struct urb *pInterruptURB;
- struct usb_ctrlrequest sUsbCtlRequest;
-
- unsigned int int_interval;
- //
- // Variables to track resources for the BULK In Pipe
- //
- PRCB pRCBMem;
- PRCB apRCB[CB_MAX_RX_DESC];
- unsigned int cbRD;
- PRCB FirstRecvFreeList;
- PRCB LastRecvFreeList;
- unsigned int NumRecvFreeList;
- PRCB FirstRecvMngList;
- PRCB LastRecvMngList;
- unsigned int NumRecvMngList;
- BOOL bIsRxWorkItemQueued;
- BOOL bIsRxMngWorkItemQueued;
+ /* Variables to track resources for the BULK In Pipe */
+ PRCB pRCBMem;
+ PRCB apRCB[CB_MAX_RX_DESC];
+ u32 cbRD;
+ PRCB FirstRecvFreeList;
+ PRCB LastRecvFreeList;
+ u32 NumRecvFreeList;
+ PRCB FirstRecvMngList;
+ PRCB LastRecvMngList;
+ u32 NumRecvMngList;
+ int bIsRxWorkItemQueued;
+ int bIsRxMngWorkItemQueued;
unsigned long ulRcvRefCount; /* packets that have not returned back */
- //
- // Variables to track resources for the BULK Out Pipe
- //
-
- PUSB_SEND_CONTEXT apTD[CB_MAX_TX_DESC];
- unsigned int cbTD;
-
- //
- // Variables to track resources for the Interrupt In Pipe
- //
- INT_BUFFER intBuf;
- BOOL fKillEventPollingThread;
- BOOL bEventAvailable;
-
-
- //default config from file by user setting
- DEFAULT_CONFIG config_file;
-
-
- //
- // Statistic for USB
- // protect with spinlock
- unsigned long ulBulkInPosted;
- unsigned long ulBulkInError;
- unsigned long ulBulkInContCRCError;
- unsigned long ulBulkInBytesRead;
-
- unsigned long ulBulkOutPosted;
- unsigned long ulBulkOutError;
- unsigned long ulBulkOutContCRCError;
- unsigned long ulBulkOutBytesWrite;
-
- unsigned long ulIntInPosted;
- unsigned long ulIntInError;
- unsigned long ulIntInContCRCError;
- unsigned long ulIntInBytesRead;
-
-
- // Version control
- WORD wFirmwareVersion;
- BYTE byLocalID;
- BYTE byRFType;
- BYTE byBBRxConf;
-
-
- BYTE byZoneType;
- BOOL bZoneRegExist;
-
- BYTE byOriginalZonetype;
-
- BOOL bLinkPass; // link status: OK or fail
- BYTE abyCurrentNetAddr[ETH_ALEN];
- BYTE abyPermanentNetAddr[ETH_ALEN];
- // SW network address
- /* u8 abySoftwareNetAddr[ETH_ALEN]; */
- BOOL bExistSWNetAddr;
-
- // Adapter statistics
- SStatCounter scStatistic;
- // 802.11 counter
- SDot11Counters s802_11Counter;
-
- //
- // Maintain statistical debug info.
- //
- unsigned long packetsReceived;
- unsigned long packetsReceivedDropped;
- unsigned long packetsReceivedOverflow;
- unsigned long packetsSent;
- unsigned long packetsSentDropped;
- unsigned long SendContextsInUse;
- unsigned long RcvBuffersInUse;
-
-
- // 802.11 management
- SMgmtObject sMgmtObj;
-
- QWORD qwCurrTSF;
- unsigned int cbBulkInMax;
- BOOL bPSRxBeacon;
-
- // 802.11 MAC specific
- unsigned int uCurrRSSI;
- BYTE byCurrSQ;
-
-
- //Antenna Diversity
- BOOL bTxRxAntInv;
- DWORD dwRxAntennaSel;
- DWORD dwTxAntennaSel;
- BYTE byAntennaCount;
- BYTE byRxAntennaMode;
- BYTE byTxAntennaMode;
- BYTE byRadioCtl;
- BYTE bHWRadioOff;
-
- //SQ3 functions for antenna diversity
- struct timer_list TimerSQ3Tmax1;
- struct timer_list TimerSQ3Tmax2;
- struct timer_list TimerSQ3Tmax3;
-
- BOOL bDiversityRegCtlON;
- BOOL bDiversityEnable;
- unsigned long ulDiversityNValue;
- unsigned long ulDiversityMValue;
- BYTE byTMax;
- BYTE byTMax2;
- BYTE byTMax3;
- unsigned long ulSQ3TH;
-
- unsigned long uDiversityCnt;
- BYTE byAntennaState;
- unsigned long ulRatio_State0;
- unsigned long ulRatio_State1;
- unsigned long ulSQ3_State0;
- unsigned long ulSQ3_State1;
-
- unsigned long aulSQ3Val[MAX_RATE];
- unsigned long aulPktNum[MAX_RATE];
+ /* Variables to track resources for the BULK Out Pipe */
+ PUSB_SEND_CONTEXT apTD[CB_MAX_TX_DESC];
+ u32 cbTD;
+
+ /* Variables to track resources for the Interrupt In Pipe */
+ INT_BUFFER intBuf;
+ int fKillEventPollingThread;
+ int bEventAvailable;
+
+ /* default config from file by user setting */
+ DEFAULT_CONFIG config_file;
+
+
+ /* Statistic for USB */
+ unsigned long ulBulkInPosted;
+ unsigned long ulBulkInError;
+ unsigned long ulBulkInContCRCError;
+ unsigned long ulBulkInBytesRead;
+
+ unsigned long ulBulkOutPosted;
+ unsigned long ulBulkOutError;
+ unsigned long ulBulkOutContCRCError;
+ unsigned long ulBulkOutBytesWrite;
+
+ unsigned long ulIntInPosted;
+ unsigned long ulIntInError;
+ unsigned long ulIntInContCRCError;
+ unsigned long ulIntInBytesRead;
+
+
+ /* Version control */
+ u16 wFirmwareVersion;
+ u8 byLocalID;
+ u8 byRFType;
+ u8 byBBRxConf;
+
+
+ u8 byZoneType;
+ int bZoneRegExist;
+
+ u8 byOriginalZonetype;
+
+ int bLinkPass; /* link status: OK or fail */
+ u8 abyCurrentNetAddr[ETH_ALEN];
+ u8 abyPermanentNetAddr[ETH_ALEN];
+
+ int bExistSWNetAddr;
+
+ /* Adapter statistics */
+ SStatCounter scStatistic;
+ /* 802.11 counter */
+ SDot11Counters s802_11Counter;
+
+ /* Maintain statistical debug info. */
+ unsigned long packetsReceived;
+ unsigned long packetsReceivedDropped;
+ unsigned long packetsReceivedOverflow;
+ unsigned long packetsSent;
+ unsigned long packetsSentDropped;
+ unsigned long SendContextsInUse;
+ unsigned long RcvBuffersInUse;
+
+ /* 802.11 management */
+ struct vnt_manager vnt_mgmt;
+
+ u64 qwCurrTSF;
+ u32 cbBulkInMax;
+ int bPSRxBeacon;
+
+ /* 802.11 MAC specific */
+ u32 uCurrRSSI;
+ u8 byCurrSQ;
+
+ /* Antenna Diversity */
+ int bTxRxAntInv;
+ u32 dwRxAntennaSel;
+ u32 dwTxAntennaSel;
+ u8 byAntennaCount;
+ u8 byRxAntennaMode;
+ u8 byTxAntennaMode;
+ u8 byRadioCtl;
+ u8 bHWRadioOff;
+
+ /* SQ3 functions for antenna diversity */
+ struct timer_list TimerSQ3Tmax1;
+ struct timer_list TimerSQ3Tmax2;
+ struct timer_list TimerSQ3Tmax3;
+
+ int bDiversityRegCtlON;
+ int bDiversityEnable;
+ unsigned long ulDiversityNValue;
+ unsigned long ulDiversityMValue;
+ u8 byTMax;
+ u8 byTMax2;
+ u8 byTMax3;
+ unsigned long ulSQ3TH;
+
+ unsigned long uDiversityCnt;
+ u8 byAntennaState;
+ unsigned long ulRatio_State0;
+ unsigned long ulRatio_State1;
+ unsigned long ulSQ3_State0;
+ unsigned long ulSQ3_State1;
+
+ unsigned long aulSQ3Val[MAX_RATE];
+ unsigned long aulPktNum[MAX_RATE];
/* IFS & Cw */
- unsigned int uSIFS; /* Current SIFS */
- unsigned int uDIFS; /* Current DIFS */
- unsigned int uEIFS; /* Current EIFS */
- unsigned int uSlot; /* Current SlotTime */
- unsigned int uCwMin; /* Current CwMin */
- unsigned int uCwMax; /* CwMax is fixed on 1023 */
-
- // PHY parameter
- BYTE bySIFS;
- BYTE byDIFS;
- BYTE byEIFS;
- BYTE bySlot;
- BYTE byCWMaxMin;
-
- // Rate
- VIA_BB_TYPE byBBType; //0: 11A, 1:11B, 2:11G
- VIA_PKT_TYPE byPacketType; //0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
- WORD wBasicRate;
- BYTE byACKRate;
- BYTE byTopOFDMBasicRate;
- BYTE byTopCCKBasicRate;
-
-
- DWORD dwAotoRateTxOkCnt;
- DWORD dwAotoRateTxFailCnt;
- DWORD dwErrorRateThreshold[13];
- DWORD dwTPTable[MAX_RATE];
- BYTE abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //DWORD alignment
-
- BYTE byMinChannel;
- BYTE byMaxChannel;
- unsigned int uConnectionRate;
-
- BYTE byPreambleType;
- BYTE byShortPreamble;
- // CARD_PHY_TYPE
- BYTE eConfigPHYMode;
-
- // For RF Power table
- BYTE byCCKPwr;
- BYTE byOFDMPwrG;
- BYTE byOFDMPwrA;
- BYTE byCurPwr;
- BYTE abyCCKPwrTbl[14];
- BYTE abyOFDMPwrTbl[14];
- BYTE abyOFDMAPwrTbl[42];
-
- WORD wCurrentRate;
- WORD wRTSThreshold;
- WORD wFragmentationThreshold;
- BYTE byShortRetryLimit;
- BYTE byLongRetryLimit;
- CARD_OP_MODE eOPMode;
- BOOL bBSSIDFilter;
- WORD wMaxTransmitMSDULifetime;
- BYTE abyBSSID[ETH_ALEN];
- BYTE abyDesireBSSID[ETH_ALEN];
- WORD wCTSDuration; // update while speed change
- WORD wACKDuration; // update while speed change
- WORD wRTSTransmitLen; // update while speed change
- BYTE byRTSServiceField; // update while speed change
- BYTE byRTSSignalField; // update while speed change
-
- DWORD dwMaxReceiveLifetime; // dot11MaxReceiveLifetime
-
- BOOL bCCK;
- BOOL bEncryptionEnable;
- BOOL bLongHeader;
- BOOL bSoftwareGenCrcErr;
- BOOL bShortSlotTime;
- BOOL bProtectMode;
- BOOL bNonERPPresent;
- BOOL bBarkerPreambleMd;
-
- BYTE byERPFlag;
- WORD wUseProtectCntDown;
-
- BOOL bRadioControlOff;
- BOOL bRadioOff;
-
- // Power save
- BOOL bEnablePSMode;
- WORD wListenInterval;
- BOOL bPWBitOn;
- WMAC_POWER_MODE ePSMode;
- unsigned long ulPSModeWaitTx;
- BOOL bPSModeTxBurst;
-
- // Beacon releated
- WORD wSeqCounter;
- BOOL bBeaconBufReady;
- BOOL bBeaconSent;
- BOOL bFixRate;
- BYTE byCurrentCh;
- unsigned int uScanTime;
-
- CMD_STATE eCommandState;
-
- CMD_CODE eCommand;
- BOOL bBeaconTx;
- BYTE byScanBBType;
-
- BOOL bStopBeacon;
- BOOL bStopDataPkt;
- BOOL bStopTx0Pkt;
- unsigned int uAutoReConnectTime;
- unsigned int uIsroamingTime;
-
- // 802.11 counter
-
- CMD_ITEM eCmdQueue[CMD_Q_SIZE];
- unsigned int uCmdDequeueIdx;
- unsigned int uCmdEnqueueIdx;
- unsigned int cbFreeCmdQueue;
- BOOL bCmdRunning;
- BOOL bCmdClear;
- BOOL bNeedRadioOFF;
-
- BOOL bEnableRoaming;
- BOOL bIsRoaming;
- BOOL bFastRoaming;
- BYTE bSameBSSMaxNum;
- BYTE bSameBSSCurNum;
- BOOL bRoaming;
- BOOL b11hEable;
- unsigned long ulTxPower;
-
- // Encryption
- NDIS_802_11_WEP_STATUS eEncryptionStatus;
- BOOL bTransmitKey;
-
-//mike add :save old Encryption
- NDIS_802_11_WEP_STATUS eOldEncryptionStatus;
-
- SKeyManagement sKey;
- DWORD dwIVCounter;
-
-
- RC4Ext SBox;
- BYTE abyPRNG[WLAN_WEPMAX_KEYLEN+3];
- BYTE byKeyIndex;
-
- BOOL bAES;
-
- unsigned int uKeyLength;
- BYTE abyKey[WLAN_WEP232_KEYLEN];
-
- // for AP mode
- unsigned int uAssocCount;
- BOOL bMoreData;
-
- // QoS
- BOOL bGrpAckPolicy;
-
-
- BYTE byAutoFBCtrl;
-
- BOOL bTxMICFail;
- BOOL bRxMICFail;
-
-
- // For Update BaseBand VGA Gain Offset
- BOOL bUpdateBBVGA;
- unsigned int uBBVGADiffCount;
- BYTE byBBVGANew;
- BYTE byBBVGACurrent;
- BYTE abyBBVGA[BB_VGA_LEVEL];
- signed long ldBmThreshold[BB_VGA_LEVEL];
-
- BYTE byBBPreEDRSSI;
- BYTE byBBPreEDIndex;
-
-
- BOOL bRadioCmd;
- DWORD dwDiagRefCount;
-
- // For FOE Tuning
- BYTE byFOETuning;
+ u32 uSIFS; /* Current SIFS */
+ u32 uDIFS; /* Current DIFS */
+ u32 uEIFS; /* Current EIFS */
+ u32 uSlot; /* Current SlotTime */
+ u32 uCwMin; /* Current CwMin */
+ u32 uCwMax; /* CwMax is fixed on 1023 */
+
+ /* PHY parameter */
+ u8 bySIFS;
+ u8 byDIFS;
+ u8 byEIFS;
+ u8 bySlot;
+ u8 byCWMaxMin;
+
+ /* Rate */
+ VIA_BB_TYPE byBBType; /* 0: 11A, 1:11B, 2:11G */
+ VIA_PKT_TYPE byPacketType; /* 0:11a 1:11b 2:11gb 3:11ga */
+ u16 wBasicRate;
+ u8 byACKRate;
+ u8 byTopOFDMBasicRate;
+ u8 byTopCCKBasicRate;
+
+
+ u32 dwAotoRateTxOkCnt;
+ u32 dwAotoRateTxFailCnt;
+ u32 dwErrorRateThreshold[13];
+ u32 dwTPTable[MAX_RATE];
+ u8 abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /*u32 alignment */
+
+ u8 byMinChannel;
+ u8 byMaxChannel;
+ u32 uConnectionRate;
+
+ u8 byPreambleType;
+ u8 byShortPreamble;
+ /* CARD_PHY_TYPE */
+ u8 eConfigPHYMode;
+
+ /* For RF Power table */
+ u8 byCCKPwr;
+ u8 byOFDMPwrG;
+ u8 byOFDMPwrA;
+ u8 byCurPwr;
+ u8 abyCCKPwrTbl[14];
+ u8 abyOFDMPwrTbl[14];
+ u8 abyOFDMAPwrTbl[42];
+
+ u16 wCurrentRate;
+ u16 wRTSThreshold;
+ u16 wFragmentationThreshold;
+ u8 byShortRetryLimit;
+ u8 byLongRetryLimit;
+ CARD_OP_MODE eOPMode;
+ int bBSSIDFilter;
+ u16 wMaxTransmitMSDULifetime;
+ u8 abyBSSID[ETH_ALEN];
+ u8 abyDesireBSSID[ETH_ALEN];
+
+ u16 wCTSDuration; /* update while speed change */
+ u16 wACKDuration;
+ u16 wRTSTransmitLen;
+ u8 byRTSServiceField;
+ u8 byRTSSignalField;
+
+ u32 dwMaxReceiveLifetime; /* dot11MaxReceiveLifetime */
+
+ int bCCK;
+ int bEncryptionEnable;
+ int bLongHeader;
+ int bSoftwareGenCrcErr;
+ int bShortSlotTime;
+ int bProtectMode;
+ int bNonERPPresent;
+ int bBarkerPreambleMd;
+
+ u8 byERPFlag;
+ u16 wUseProtectCntDown;
+
+ int bRadioControlOff;
+ int bRadioOff;
+
+ /* Power save */
+ int bEnablePSMode;
+ u16 wListenInterval;
+ int bPWBitOn;
+ WMAC_POWER_MODE ePSMode;
+ unsigned long ulPSModeWaitTx;
+ int bPSModeTxBurst;
+
+ /* Beacon releated */
+ u16 wSeqCounter;
+ int bBeaconBufReady;
+ int bBeaconSent;
+ int bFixRate;
+ u8 byCurrentCh;
+ u32 uScanTime;
+
+ CMD_STATE eCommandState;
+
+ CMD_CODE eCommand;
+ int bBeaconTx;
+ u8 byScanBBType;
+
+ int bStopBeacon;
+ int bStopDataPkt;
+ int bStopTx0Pkt;
+ u32 uAutoReConnectTime;
+ u32 uIsroamingTime;
+
+ /* 802.11 counter */
+
+ CMD_ITEM eCmdQueue[CMD_Q_SIZE];
+ u32 uCmdDequeueIdx;
+ u32 uCmdEnqueueIdx;
+ u32 cbFreeCmdQueue;
+ int bCmdRunning;
+ int bCmdClear;
+ int bNeedRadioOFF;
+
+ int bEnableRoaming;
+ int bIsRoaming;
+ int bFastRoaming;
+ u8 bSameBSSMaxNum;
+ u8 bSameBSSCurNum;
+ int bRoaming;
+ int b11hEable;
+ unsigned long ulTxPower;
+
+ /* Encryption */
+ NDIS_802_11_WEP_STATUS eEncryptionStatus;
+ int bTransmitKey;
+ NDIS_802_11_WEP_STATUS eOldEncryptionStatus;
+ SKeyManagement sKey;
+ u32 dwIVCounter;
+
+
+ RC4Ext SBox;
+ u8 abyPRNG[WLAN_WEPMAX_KEYLEN+3];
+ u8 byKeyIndex;
+
+ int bAES;
+
+ u32 uKeyLength;
+ u8 abyKey[WLAN_WEP232_KEYLEN];
+
+ /* for AP mode */
+ u32 uAssocCount;
+ int bMoreData;
+
+ /* QoS */
+ int bGrpAckPolicy;
+
+
+ u8 byAutoFBCtrl;
+
+ int bTxMICFail;
+ int bRxMICFail;
+
+
+ /* For Update BaseBand VGA Gain Offset */
+ int bUpdateBBVGA;
+ u32 uBBVGADiffCount;
+ u8 byBBVGANew;
+ u8 byBBVGACurrent;
+ u8 abyBBVGA[BB_VGA_LEVEL];
+ signed long ldBmThreshold[BB_VGA_LEVEL];
+
+ u8 byBBPreEDRSSI;
+ u8 byBBPreEDIndex;
+
+
+ int bRadioCmd;
+ u32 dwDiagRefCount;
+
+ /* For FOE Tuning */
+ u8 byFOETuning;
+
+ /* For Auto Power Tunning */
+ u8 byAutoPwrTunning;
+
+ /* BaseBand Loopback Use */
+ u8 byBBCR4d;
+ u8 byBBCRc9;
+ u8 byBBCR88;
+ u8 byBBCR09;
+
+ /* command timer */
+ struct timer_list sTimerCommand;
- // For Auto Power Tunning
+ struct timer_list sTimerTxData;
+ unsigned long nTxDataTimeCout;
+ int fTxDataInSleep;
+ int IsTxDataTrigger;
- BYTE byAutoPwrTunning;
+ int fWPA_Authened; /*is WPA/WPA-PSK or WPA2/WPA2-PSK authen?? */
+ u8 byReAssocCount;
+ u8 byLinkWaitCount;
- // BaseBand Loopback Use
- BYTE byBBCR4d;
- BYTE byBBCRc9;
- BYTE byBBCR88;
- BYTE byBBCR09;
+ SEthernetHeader sTxEthHeader;
+ SEthernetHeader sRxEthHeader;
+ u8 abyBroadcastAddr[ETH_ALEN];
+ u8 abySNAP_RFC1042[ETH_ALEN];
+ u8 abySNAP_Bridgetunnel[ETH_ALEN];
- // command timer
- struct timer_list sTimerCommand;
+ /* Pre-Authentication & PMK cache */
+ SPMKID gsPMKID;
+ SPMKIDCandidateEvent gsPMKIDCandidate;
- struct timer_list sTimerTxData;
- unsigned long nTxDataTimeCout;
- BOOL fTxDataInSleep;
- BOOL IsTxDataTrigger;
- BOOL fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
- BYTE byReAssocCount; //mike add:re-association retry times!
- BYTE byLinkWaitCount;
+ /* for 802.11h */
+ int b11hEnable;
- SEthernetHeader sTxEthHeader;
- SEthernetHeader sRxEthHeader;
- BYTE abyBroadcastAddr[ETH_ALEN];
- BYTE abySNAP_RFC1042[ETH_ALEN];
- BYTE abySNAP_Bridgetunnel[ETH_ALEN];
+ int bChannelSwitch;
+ u8 byNewChannel;
+ u8 byChannelSwitchCount;
- // Pre-Authentication & PMK cache
- SPMKID gsPMKID;
- SPMKIDCandidateEvent gsPMKIDCandidate;
+ /* WPA supplicant daemon */
+ int bWPADEVUp;
+ int bwextstep0;
+ int bwextstep1;
+ int bwextstep2;
+ int bwextstep3;
+ int bWPASuppWextEnabled;
+ /* user space daemon: hostapd, is used for HOSTAP */
+ int bEnableHostapd;
+ int bEnable8021x;
+ int bEnableHostWEP;
+ struct net_device *apdev;
+ int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
- // for 802.11h
- BOOL b11hEnable;
-
- BOOL bChannelSwitch;
- BYTE byNewChannel;
- BYTE byChannelSwitchCount;
+ u32 uChannel;
- //WPA supplicant daemon
- struct net_device *wpadev;
- BOOL bWPADEVUp;
- //--
+ struct iw_statistics wstats; /* wireless stats */
- BOOL bwextstep0;
- BOOL bwextstep1;
- BOOL bwextstep2;
- BOOL bwextstep3;
- BOOL bWPASuppWextEnabled;
+ int bCommit;
-#ifdef HOSTAP
- // user space daemon: hostapd, is used for HOSTAP
- BOOL bEnableHostapd;
- BOOL bEnable8021x;
- BOOL bEnableHostWEP;
- struct net_device *apdev;
- int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
-#endif
- unsigned int uChannel;
-
- struct iw_statistics wstats; // wireless stats
- BOOL bCommit;
-
-} DEVICE_INFO, *PSDevice;
+};
@@ -871,9 +843,6 @@ typedef struct __device_info {
/*--------------------- Export Functions --------------------------*/
-/* BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb,
- * unsigned int uNodeIndex);
- */
-BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF);
+int device_alloc_frag_buf(struct vnt_private *, PSDeFragControlBlock pDeF);
#endif
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index a0b82169dad3..62290d0ac195 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -38,12 +38,12 @@ struct _version {
unsigned char build;
} version_t, *pversion_t;
-#ifndef FALSE
-#define FALSE (0)
+#ifndef false
+#define false (0)
#endif
-#ifndef TRUE
-#define TRUE (!(FALSE))
+#ifndef true
+#define true (!(false))
#endif
#define VID_TABLE_SIZE 64
@@ -67,14 +67,14 @@ struct _version {
#define DEVICE_VERSION "1.19_12"
#endif
-//config file
+/* config file */
#include <linux/fs.h>
#include <linux/fcntl.h>
#ifndef CONFIG_PATH
#define CONFIG_PATH "/etc/vntconfiguration.dat"
#endif
-//Max: 2378=2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
+/* Max: 2378 = 2312 Payload + 30HD + 4CRC + 2Padding + 4Len + 8TSF + 4RSR */
#define PKT_BUF_SZ 2390
#define MAX_UINTS 8
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index e94f6a1647a3..e83f95e1d9a8 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -84,61 +84,23 @@ s_vGetDASA(
PSEthernetHeader psEthHeader
);
-static
-void
-s_vProcessRxMACHeader (
- PSDevice pDevice,
- PBYTE pbyRxBufferAddr,
- unsigned int cbPacketSize,
- BOOL bIsWEP,
- BOOL bExtIV,
- unsigned int *pcbHeadSize
- );
-
-static BOOL s_bAPModeRxCtl(
- PSDevice pDevice,
- PBYTE pbyFrame,
- signed int iSANodeIndex
- );
-
-
-
-static BOOL s_bAPModeRxData (
- PSDevice pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- signed int iSANodeIndex,
- signed int iDANodeIndex
- );
+static void s_vProcessRxMACHeader(struct vnt_private *pDevice,
+ u8 *pbyRxBufferAddr, u32 cbPacketSize, int bIsWEP, int bExtIV,
+ u32 *pcbHeadSize);
+static int s_bAPModeRxCtl(struct vnt_private *pDevice, u8 *pbyFrame,
+ s32 iSANodeIndex);
-static BOOL s_bHandleRxEncryption(
- PSDevice pDevice,
- PBYTE pbyFrame,
- unsigned int FrameSize,
- PBYTE pbyRsr,
- PBYTE pbyNewRsr,
- PSKeyItem * pKeyOut,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
- );
-
-static BOOL s_bHostWepRxEncryption(
+static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
+ u32 FrameSize, u32 cbHeaderOffset, s32 iSANodeIndex, s32 iDANodeIndex);
- PSDevice pDevice,
- PBYTE pbyFrame,
- unsigned int FrameSize,
- PBYTE pbyRsr,
- BOOL bOnFly,
- PSKeyItem pKey,
- PBYTE pbyNewRsr,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
+static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
+ u32 FrameSize, u8 *pbyRsr, u8 *pbyNewRsr, PSKeyItem *pKeyOut,
+ s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16);
- );
+static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
+ u32 FrameSize, u8 *pbyRsr, int bOnFly, PSKeyItem pKey, u8 *pbyNewRsr,
+ s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16);
/*--------------------- Export Variables --------------------------*/
@@ -159,22 +121,16 @@ static BOOL s_bHostWepRxEncryption(
* Return Value: None
*
-*/
-static
-void
-s_vProcessRxMACHeader (
- PSDevice pDevice,
- PBYTE pbyRxBufferAddr,
- unsigned int cbPacketSize,
- BOOL bIsWEP,
- BOOL bExtIV,
- unsigned int *pcbHeadSize
- )
+
+static void s_vProcessRxMACHeader(struct vnt_private *pDevice,
+ u8 *pbyRxBufferAddr, u32 cbPacketSize, int bIsWEP, int bExtIV,
+ u32 *pcbHeadSize)
{
- PBYTE pbyRxBuffer;
- unsigned int cbHeaderSize = 0;
- PWORD pwType;
- PS802_11Header pMACHeader;
- int ii;
+ u8 *pbyRxBuffer;
+ u32 cbHeaderSize = 0;
+ u16 *pwType;
+ PS802_11Header pMACHeader;
+ int ii;
pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
@@ -310,56 +266,39 @@ s_vGetDASA (
}
-
-
-BOOL
-RXbBulkInProcessData (
- PSDevice pDevice,
- PRCB pRCB,
- unsigned long BytesToIndicate
- )
+int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
+ unsigned long BytesToIndicate)
{
-
- struct net_device_stats* pStats=&pDevice->stats;
- struct sk_buff* skb;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PSRxMgmtPacket pRxPacket = &(pMgmt->sRxPacket);
- PS802_11Header p802_11Header;
- PBYTE pbyRsr;
- PBYTE pbyNewRsr;
- PBYTE pbyRSSI;
- PQWORD pqwTSFTime;
- PBYTE pbyFrame;
- BOOL bDeFragRx = FALSE;
- unsigned int cbHeaderOffset;
+ struct net_device_stats *pStats = &pDevice->stats;
+ struct sk_buff *skb;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_rx_mgmt *pRxPacket = &pMgmt->sRxPacket;
+ PS802_11Header p802_11Header;
+ u8 *pbyRsr, *pbyNewRsr, *pbyRSSI, *pbyFrame;
+ u64 *pqwTSFTime;
+ u32 bDeFragRx = false;
+ u32 cbHeaderOffset, cbIVOffset;
u32 FrameSize;
- WORD wEtherType = 0;
- signed int iSANodeIndex = -1;
- signed int iDANodeIndex = -1;
- unsigned int ii;
- unsigned int cbIVOffset;
- PBYTE pbyRxSts;
- PBYTE pbyRxRate;
- PBYTE pbySQ;
- PBYTE pby3SQ;
- unsigned int cbHeaderSize;
- PSKeyItem pKey = NULL;
- WORD wRxTSC15_0 = 0;
- DWORD dwRxTSC47_16 = 0;
- SKeyItem STempKey;
- // 802.11h RPI
- /* signed long ldBm = 0; */
- BOOL bIsWEP = FALSE;
- BOOL bExtIV = FALSE;
+ u16 wEtherType = 0;
+ s32 iSANodeIndex = -1, iDANodeIndex = -1;
+ int ii;
+ u8 *pbyRxSts, *pbyRxRate, *pbySQ, *pby3SQ;
+ u32 cbHeaderSize;
+ PSKeyItem pKey = NULL;
+ u16 wRxTSC15_0 = 0;
+ u32 dwRxTSC47_16 = 0;
+ SKeyItem STempKey;
+ /* signed long ldBm = 0; */
+ int bIsWEP = false; int bExtIV = false;
u32 dwWbkStatus;
- PRCB pRCBIndicate = pRCB;
- PBYTE pbyDAddress;
- PWORD pwPLCP_Length;
- BYTE abyVaildRate[MAX_RATE] = {2,4,11,22,12,18,24,36,48,72,96,108};
- WORD wPLCPwithPadding;
- PS802_11Header pMACHeader;
- BOOL bRxeapol_key = FALSE;
-
+ PRCB pRCBIndicate = pRCB;
+ u8 *pbyDAddress;
+ u16 *pwPLCP_Length;
+ u8 abyVaildRate[MAX_RATE]
+ = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
+ u16 wPLCPwithPadding;
+ PS802_11Header pMACHeader;
+ int bRxeapol_key = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---------- RXbBulkInProcessData---\n");
@@ -373,13 +312,13 @@ RXbBulkInProcessData (
if (BytesToIndicate != FrameSize) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"------- WRONG Length 1\n");
- return FALSE;
+ return false;
}
if ((BytesToIndicate > 2372) || (BytesToIndicate <= 40)) {
// Frame Size error drop this packet.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n");
- return FALSE;
+ return false;
}
pbyDAddress = (PBYTE)(skb->data);
@@ -397,7 +336,7 @@ RXbBulkInProcessData (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length);
ASSERT(0);
- return FALSE;
+ return false;
}
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
if ( *pbyRxRate == abyVaildRate[ii] ) {
@@ -406,12 +345,12 @@ RXbBulkInProcessData (
}
if ( ii==MAX_RATE ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong RxRate %x\n",(int) *pbyRxRate);
- return FALSE;
+ return false;
}
wPLCPwithPadding = ( (*pwPLCP_Length / 4) + ( (*pwPLCP_Length % 4) ? 1:0 ) ) *4;
- pqwTSFTime = (PQWORD) (pbyDAddress + 8 + wPLCPwithPadding);
+ pqwTSFTime = (u64 *)(pbyDAddress + 8 + wPLCPwithPadding);
if(pDevice->byBBType == BB_TYPE_11G) {
pby3SQ = pbyDAddress + 8 + wPLCPwithPadding + 12;
pbySQ = pby3SQ;
@@ -455,12 +394,12 @@ RXbBulkInProcessData (
if (!is_multicast_ether_addr(pMACHeader->abyAddr1)) {
if ( WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header) pbyFrame) ) {
pDevice->s802_11Counter.FrameDuplicateCount++;
- return FALSE;
+ return false;
}
if (compare_ether_addr(pDevice->abyCurrentNetAddr,
pMACHeader->abyAddr1)) {
- return FALSE;
+ return false;
}
}
@@ -470,7 +409,7 @@ RXbBulkInProcessData (
if (!compare_ether_addr((PBYTE)&(pDevice->sRxEthHeader.abySrcAddr[0]),
pDevice->abyCurrentNetAddr))
- return FALSE;
+ return false;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
@@ -484,17 +423,17 @@ RXbBulkInProcessData (
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == TRUE) {
- return FALSE;
+ if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) {
+ return false;
}
}
if (IS_FC_WEP(pbyFrame)) {
- BOOL bRxDecryOK = FALSE;
+ bool bRxDecryOK = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"rx WEP pkt\n");
- bIsWEP = TRUE;
+ bIsWEP = true;
if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) {
pKey = &STempKey;
pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite;
@@ -546,11 +485,11 @@ RXbBulkInProcessData (
// pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++;
}
}
- return FALSE;
+ return false;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WEP Func Fail\n");
- return FALSE;
+ return false;
}
if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
FrameSize -= 8; // Message Integrity Code
@@ -579,14 +518,14 @@ RXbBulkInProcessData (
pbyFrame = skb->data + 8;
}
else {
- return FALSE;
+ return false;
}
}
//
// Management & Control frame Handle
//
- if ((IS_TYPE_DATA((pbyFrame))) == FALSE) {
+ if ((IS_TYPE_DATA((pbyFrame))) == false) {
// Handle Control & Manage Frame
if (IS_TYPE_MGMT((pbyFrame))) {
@@ -598,8 +537,7 @@ RXbBulkInProcessData (
pRxPacket->cbMPDULen = FrameSize;
pRxPacket->uRSSI = *pbyRSSI;
pRxPacket->bySQ = *pbySQ;
- HIDWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(HIDWORD(*pqwTSFTime));
- LODWORD(pRxPacket->qwLocalTSF) = cpu_to_le32(LODWORD(*pqwTSFTime));
+ pRxPacket->qwLocalTSF = cpu_to_le64(*pqwTSFTime);
if (bIsWEP) {
// strip IV
pbyData1 = WLAN_HDR_A3_DATA_PTR(pbyFrame);
@@ -617,7 +555,7 @@ RXbBulkInProcessData (
//Discard beacon packet which channel is 0
if ( (WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl)) == WLAN_FSTYPE_BEACON) ||
(WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl)) == WLAN_FSTYPE_PROBERESP) ) {
- return FALSE;
+ return false;
}
}
pRxPacket->byRxChannel = (*pbyRxSts) >> 2;
@@ -635,7 +573,7 @@ RXbBulkInProcessData (
skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
- return TRUE;
+ return true;
}
//
@@ -643,11 +581,11 @@ RXbBulkInProcessData (
//
EnqueueRCB(pDevice->FirstRecvMngList, pDevice->LastRecvMngList, pRCBIndicate);
pDevice->NumRecvMngList++;
- if ( bDeFragRx == FALSE) {
+ if ( bDeFragRx == false) {
pRCB->Ref++;
}
- if (pDevice->bIsRxMngWorkItemQueued == FALSE) {
- pDevice->bIsRxMngWorkItemQueued = TRUE;
+ if (pDevice->bIsRxMngWorkItemQueued == false) {
+ pDevice->bIsRxMngWorkItemQueued = true;
tasklet_schedule(&pDevice->RxMngWorkItem);
}
@@ -655,7 +593,7 @@ RXbBulkInProcessData (
else {
// Control Frame
};
- return FALSE;
+ return false;
}
else {
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -667,12 +605,12 @@ RXbBulkInProcessData (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
else {
// discard DATA packet while not associate || BSSID error
- if ((pDevice->bLinkPass == FALSE) ||
+ if ((pDevice->bLinkPass == false) ||
!(*pbyRsr & RSR_BSSIDOK)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -680,7 +618,7 @@ RXbBulkInProcessData (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
//mike add:station mode check eapol-key challenge--->
{
@@ -699,7 +637,7 @@ RXbBulkInProcessData (
if (wEtherType == ETH_P_PAE) { //Protocol Type in LLC-Header
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame receive
- bRxeapol_key = TRUE;
+ bRxeapol_key = true;
Descriptor_type = skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2];
Key_info = (skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2+1]<<8) |skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2+2] ;
if(Descriptor_type==2) { //RSN
@@ -726,8 +664,8 @@ RXbBulkInProcessData (
}
}
else {
- if (pMgmt->bInTIMWake == TRUE) {
- pMgmt->bInTIMWake = FALSE;
+ if (pMgmt->bInTIMWake == true) {
+ pMgmt->bInTIMWake = false;
}
}
}
@@ -735,7 +673,7 @@ RXbBulkInProcessData (
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize>50) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == TRUE)) {
+ (pDevice->bLinkPass == true)) {
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -764,7 +702,7 @@ RXbBulkInProcessData (
// -----------------------------------------------
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == TRUE)){
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == true)){
BYTE abyMacHdr[24];
// Only 802.1x packet incoming allowed
@@ -779,7 +717,7 @@ RXbBulkInProcessData (
if (wEtherType == ETH_P_PAE) {
skb->dev = pDevice->apdev;
- if (bIsWEP == TRUE) {
+ if (bIsWEP == true) {
// strip IV header(8)
memcpy(&abyMacHdr[0], (skb->data + 8), 24);
memcpy((skb->data + 8 + cbIVOffset), &abyMacHdr[0], 24);
@@ -793,12 +731,12 @@ RXbBulkInProcessData (
skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
- return TRUE;
+ return true;
}
// check if 802.1x authorized
if (!(pMgmt->sNodeDBTable[iSANodeIndex].dwFlags & WLAN_STA_AUTHORIZED))
- return FALSE;
+ return false;
}
@@ -852,9 +790,9 @@ RXbBulkInProcessData (
if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) || (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
- (pDevice->bRxMICFail == TRUE)) {
+ (pDevice->bRxMICFail == true)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC comparison is fail!\n");
- pDevice->bRxMICFail = FALSE;
+ pDevice->bRxMICFail = false;
//pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
pDevice->s802_11Counter.TKIPLocalMICFailures++;
if (bDeFragRx) {
@@ -864,7 +802,7 @@ RXbBulkInProcessData (
}
}
//send event to wpa_supplicant
- //if(pDevice->bWPASuppWextEnabled == TRUE)
+ //if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
@@ -888,7 +826,7 @@ RXbBulkInProcessData (
}
- return FALSE;
+ return false;
}
}
@@ -910,11 +848,11 @@ RXbBulkInProcessData (
RSC = dwRxTSC47_16;
RSC <<= 16;
RSC += wRxTSC15_0;
- memcpy(&(pKey->KeyRSC), &RSC, sizeof(QWORD));
+ memcpy(&(pKey->KeyRSC), &RSC, sizeof(u64));
- if ( (pDevice->sMgmtObj.eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->sMgmtObj.eCurrState == WMAC_STATE_ASSOC)) {
- // check RSC
+ if (pDevice->vnt_mgmt.eCurrMode == WMAC_MODE_ESS_STA &&
+ pDevice->vnt_mgmt.eCurrState == WMAC_STATE_ASSOC) {
+ /* check RSC */
if ( (wRxTSC15_0 < wLocalTSC15_0) &&
(dwRxTSC47_16 <= dwLocalTSC47_16) &&
!((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) {
@@ -932,7 +870,7 @@ RXbBulkInProcessData (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
}
@@ -945,7 +883,7 @@ RXbBulkInProcessData (
// Null data, framesize = 12
if (FrameSize < 12)
- return FALSE;
+ return false;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (s_bAPModeRxData(pDevice,
@@ -954,7 +892,7 @@ RXbBulkInProcessData (
cbHeaderOffset,
iSANodeIndex,
iDANodeIndex
- ) == FALSE) {
+ ) == false) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -962,7 +900,7 @@ RXbBulkInProcessData (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
@@ -980,22 +918,18 @@ RXbBulkInProcessData (
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n",
pDevice->dev->name);
}
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
-
-static BOOL s_bAPModeRxCtl (
- PSDevice pDevice,
- PBYTE pbyFrame,
- signed int iSANodeIndex
- )
+static int s_bAPModeRxCtl(struct vnt_private *pDevice, u8 *pbyFrame,
+ s32 iSANodeIndex)
{
- PS802_11Header p802_11Header;
- CMD_STATUS Status;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PS802_11Header p802_11Header;
+ CMD_STATUS Status;
if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
@@ -1017,7 +951,7 @@ static BOOL s_bAPModeRxCtl (
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 1\n");
- return TRUE;
+ return true;
}
if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_ASSOC) {
// send deassoc notification
@@ -1029,13 +963,13 @@ static BOOL s_bAPModeRxCtl (
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDisassocBeginSta 2\n");
- return TRUE;
+ return true;
}
if (pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable) {
// delcare received ps-poll event
if (IS_CTL_PSPOLL(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *) pDevice,
WLAN_CMD_RX_PSPOLL,
NULL);
@@ -1045,8 +979,8 @@ static BOOL s_bAPModeRxCtl (
// check Data PS state
// if PW bit off, send out all PS bufferring packets.
if (!IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *) pDevice,
WLAN_CMD_RX_PSPOLL,
NULL);
@@ -1056,15 +990,15 @@ static BOOL s_bAPModeRxCtl (
}
else {
if (IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = true;
// Once if STA in PS state, enable multicast bufferring
- pMgmt->sNodeDBTable[0].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
}
else {
// clear all pending PS frame.
if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *) pDevice,
WLAN_CMD_RX_PSPOLL,
NULL);
@@ -1089,32 +1023,24 @@ static BOOL s_bAPModeRxCtl (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%pM\n",
p802_11Header->abyAddr1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: wFrameCtl= %x\n", p802_11Header->wFrameCtl );
- return TRUE;
+ return true;
}
}
}
- return FALSE;
+ return false;
}
-static BOOL s_bHandleRxEncryption (
- PSDevice pDevice,
- PBYTE pbyFrame,
- unsigned int FrameSize,
- PBYTE pbyRsr,
- PBYTE pbyNewRsr,
- PSKeyItem * pKeyOut,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
- )
+static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
+ u32 FrameSize, u8 *pbyRsr, u8 *pbyNewRsr, PSKeyItem *pKeyOut,
+ s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16)
{
- unsigned int PayloadLen = FrameSize;
- PBYTE pbyIV;
- BYTE byKeyIdx;
- PSKeyItem pKey = NULL;
- BYTE byDecMode = KEY_CTL_WEP;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 PayloadLen = FrameSize;
+ u8 *pbyIV;
+ u8 byKeyIdx;
+ PSKeyItem pKey = NULL;
+ u8 byDecMode = KEY_CTL_WEP;
*pwRxTSC15_0 = 0;
@@ -1139,7 +1065,7 @@ static BOOL s_bHandleRxEncryption (
(pMgmt->byCSSPK != KEY_CTL_NONE)) {
// unicast pkt use pairwise key
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"unicast pkt\n");
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == TRUE) {
+ if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == true) {
if (pMgmt->byCSSPK == KEY_CTL_TKIP)
byDecMode = KEY_CTL_TKIP;
else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
@@ -1173,24 +1099,24 @@ static BOOL s_bHandleRxEncryption (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey == NULL\n");
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
- return FALSE;
+ return false;
}
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
*pKeyOut = NULL;
- return FALSE;
+ return false;
}
if (byDecMode == KEY_CTL_WEP) {
// handle WEP
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(&pKey->pvKeyTable))->bSoftWEP == TRUE)) {
+ (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true)) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1238,35 +1164,23 @@ static BOOL s_bHandleRxEncryption (
}// end of TKIP/AES
if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = TRUE;
- return TRUE;
+ *pbExtIV = true;
+ return true;
}
-
-static BOOL s_bHostWepRxEncryption (
- PSDevice pDevice,
- PBYTE pbyFrame,
- unsigned int FrameSize,
- PBYTE pbyRsr,
- BOOL bOnFly,
- PSKeyItem pKey,
- PBYTE pbyNewRsr,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
- )
+static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
+ u32 FrameSize, u8 *pbyRsr, int bOnFly, PSKeyItem pKey, u8 *pbyNewRsr,
+ s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int PayloadLen = FrameSize;
- PBYTE pbyIV;
- BYTE byKeyIdx;
- BYTE byDecMode = KEY_CTL_WEP;
- PS802_11Header pMACHeader;
-
-
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PS802_11Header pMACHeader;
+ u32 PayloadLen = FrameSize;
+ u8 *pbyIV;
+ u8 byKeyIdx;
+ u8 byDecMode = KEY_CTL_WEP;
- *pwRxTSC15_0 = 0;
- *pdwRxTSC47_16 = 0;
+ *pwRxTSC15_0 = 0;
+ *pdwRxTSC47_16 = 0;
pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
if ( WLAN_GET_FC_TODS(*(PWORD)pbyFrame) &&
@@ -1289,18 +1203,18 @@ static BOOL s_bHostWepRxEncryption (
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
- return FALSE;
+ return false;
}
if (byDecMode == KEY_CTL_WEP) {
// handle WEP
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"byDecMode == KEY_CTL_WEP\n");
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(&pKey->pvKeyTable))->bSoftWEP == TRUE) ||
- (bOnFly == FALSE)) {
+ (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
+ (bOnFly == false)) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1333,7 +1247,7 @@ static BOOL s_bHostWepRxEncryption (
if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == FALSE)) {
+ if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == false)) {
// Software TKIP
// 1. 3253 A
// 2. NotOnFly
@@ -1353,7 +1267,7 @@ static BOOL s_bHostWepRxEncryption (
}
if (byDecMode == KEY_CTL_CCMP) {
- if (bOnFly == FALSE) {
+ if (bOnFly == false) {
// Software CCMP
// NotOnFly
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"soft KEY_CTL_CCMP\n");
@@ -1369,33 +1283,23 @@ static BOOL s_bHostWepRxEncryption (
}// end of TKIP/AES
if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = TRUE;
- return TRUE;
+ *pbExtIV = true;
+ return true;
}
-
-
-static BOOL s_bAPModeRxData (
- PSDevice pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- signed int iSANodeIndex,
- signed int iDANodeIndex
- )
-
+static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
+ u32 FrameSize, u32 cbHeaderOffset, s32 iSANodeIndex, s32 iDANodeIndex)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BOOL bRelayAndForward = FALSE;
- BOOL bRelayOnly = FALSE;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- WORD wAID;
-
+ struct sk_buff *skbcpy;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int bRelayAndForward = false;
+ int bRelayOnly = false;
+ u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ u16 wAID;
- struct sk_buff* skbcpy = NULL;
if (FrameSize > CB_MAX_BUF_SIZE)
- return FALSE;
+ return false;
// check DA
if (is_multicast_ether_addr((PBYTE)(skb->data+cbHeaderOffset))) {
if (pMgmt->sNodeDBTable[0].bPSEnable) {
@@ -1417,7 +1321,7 @@ static BOOL s_bAPModeRxData (
}
}
else {
- bRelayAndForward = TRUE;
+ bRelayAndForward = true;
}
}
else {
@@ -1437,10 +1341,10 @@ static BOOL s_bAPModeRxData (
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "relay: index= %d, pMgmt->abyPSTxMap[%d]= %d\n",
iDANodeIndex, (wAID >> 3), pMgmt->abyPSTxMap[wAID >> 3]);
- return TRUE;
+ return true;
}
else {
- bRelayOnly = TRUE;
+ bRelayOnly = true;
}
}
}
@@ -1457,23 +1361,22 @@ static BOOL s_bAPModeRxData (
}
if (bRelayOnly)
- return FALSE;
+ return false;
}
// none associate, don't forward
if (pDevice->uAssocCount == 0)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
-void RXvWorkItem(void *Context)
+void RXvWorkItem(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) Context;
- int ntStatus;
- PRCB pRCB=NULL;
+ int ntStatus;
+ PRCB pRCB = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
spin_lock_irq(&pDevice->lock);
@@ -1487,19 +1390,15 @@ void RXvWorkItem(void *Context)
DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList);
ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB);
}
- pDevice->bIsRxWorkItemQueued = FALSE;
+ pDevice->bIsRxWorkItemQueued = false;
spin_unlock_irq(&pDevice->lock);
}
-void
-RXvFreeRCB(
- PRCB pRCB,
- BOOL bReAllocSkb
- )
+void RXvFreeRCB(PRCB pRCB, int bReAllocSkb)
{
- PSDevice pDevice = (PSDevice)pRCB->pDevice;
+ struct vnt_private *pDevice = pRCB->pDevice;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
@@ -1507,12 +1406,12 @@ RXvFreeRCB(
ASSERT(!pRCB->Ref); // should be 0
ASSERT(pRCB->pDevice); // shouldn't be NULL
- if (bReAllocSkb == FALSE) {
+ if (bReAllocSkb == false) {
kfree_skb(pRCB->skb);
- bReAllocSkb = TRUE;
+ bReAllocSkb = true;
}
- if (bReAllocSkb == TRUE) {
+ if (bReAllocSkb == true) {
pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
// todo error handling
if (pRCB->skb == NULL) {
@@ -1529,21 +1428,20 @@ RXvFreeRCB(
if ((pDevice->Flags & fMP_POST_READS) && MP_IS_READY(pDevice) &&
- (pDevice->bIsRxWorkItemQueued == FALSE) ) {
+ (pDevice->bIsRxWorkItemQueued == false) ) {
- pDevice->bIsRxWorkItemQueued = TRUE;
+ pDevice->bIsRxWorkItemQueued = true;
tasklet_schedule(&pDevice->ReadWorkItem);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
}
-void RXvMngWorkItem(void *Context)
+void RXvMngWorkItem(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice) Context;
- PRCB pRCB=NULL;
- PSRxMgmtPacket pRxPacket;
- BOOL bReAllocSkb = FALSE;
+ PRCB pRCB = NULL;
+ struct vnt_rx_mgmt *pRxPacket;
+ int bReAllocSkb = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Mng Thread\n");
@@ -1558,7 +1456,7 @@ void RXvMngWorkItem(void *Context)
}
ASSERT(pRCB);// cannot be NULL
pRxPacket = &(pRCB->sMngPacket);
- vMgrRxManagePacket((void *) pDevice, &(pDevice->sMgmtObj), pRxPacket);
+ vMgrRxManagePacket(pDevice, &pDevice->vnt_mgmt, pRxPacket);
pRCB->Ref--;
if(pRCB->Ref == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeMng %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
@@ -1568,7 +1466,7 @@ void RXvMngWorkItem(void *Context)
}
}
- pDevice->bIsRxMngWorkItemQueued = FALSE;
+ pDevice->bIsRxMngWorkItemQueued = false;
spin_unlock_irq(&pDevice->lock);
}
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index d4fca43af4fe..786c523f5479 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -45,17 +45,9 @@ void RXvWorkItem(void *Context);
void RXvMngWorkItem(void *Context);
-void
-RXvFreeRCB(
- PRCB pRCB,
- BOOL bReAllocSkb
- );
-
-BOOL
-RXbBulkInProcessData(
- PSDevice pDevice,
- PRCB pRCB,
- unsigned long BytesToIndicate
- );
+void RXvFreeRCB(PRCB pRCB, int bReAllocSkb);
+
+int RXbBulkInProcessData(struct vnt_private *, PRCB pRCB,
+ unsigned long BytesToIndicate);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 8831ea03c001..4371a77e9adc 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -56,16 +56,13 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Export Functions --------------------------*/
-BOOL
-FIRMWAREbDownload(
- PSDevice pDevice
- )
+int FIRMWAREbDownload(struct vnt_private *pDevice)
{
struct device *dev = &pDevice->usb->dev;
const struct firmware *fw;
int NdisStatus;
void *pBuffer = NULL;
- BOOL result = FALSE;
+ bool result = false;
u16 wLength;
int ii, rc;
@@ -102,7 +99,7 @@ FIRMWAREbDownload(
goto free_fw;
}
- result = TRUE;
+ result = true;
free_fw:
release_firmware(fw);
@@ -114,12 +111,9 @@ out:
}
MODULE_FIRMWARE(FIRMWARE_NAME);
-BOOL
-FIRMWAREbBrach2Sram(
- PSDevice pDevice
- )
+int FIRMWAREbBrach2Sram(struct vnt_private *pDevice)
{
- int NdisStatus;
+ int NdisStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
@@ -132,17 +126,14 @@ FIRMWAREbBrach2Sram(
);
if (NdisStatus != STATUS_SUCCESS) {
- return (FALSE);
+ return (false);
} else {
- return (TRUE);
+ return (true);
}
}
-BOOL
-FIRMWAREbCheckVersion(
- PSDevice pDevice
- )
+int FIRMWAREbCheckVersion(struct vnt_private *pDevice)
{
int ntStatus;
@@ -156,17 +147,17 @@ FIRMWAREbCheckVersion(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
- return FALSE;
+ return false;
}
if (pDevice->wFirmwareVersion == 0xFFFF) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
- return FALSE;
+ return false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
// branch to loader for download new firmware
FIRMWAREbBrach2Sram(pDevice);
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
index b2f5b5818a93..ebab3a6351b3 100644
--- a/drivers/staging/vt6656/firmware.h
+++ b/drivers/staging/vt6656/firmware.h
@@ -41,19 +41,8 @@
/*--------------------- Export Functions --------------------------*/
-BOOL
-FIRMWAREbDownload(
- PSDevice pDevice
- );
-
-BOOL
-FIRMWAREbBrach2Sram(
- PSDevice pDevice
- );
-
-BOOL
-FIRMWAREbCheckVersion(
- PSDevice pDevice
- );
+int FIRMWAREbDownload(struct vnt_private *);
+int FIRMWAREbBrach2Sram(struct vnt_private *);
+int FIRMWAREbCheckVersion(struct vnt_private *);
#endif /* __FIRMWARE_H__ */
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index 26a7d0e4b048..bc5e9da47586 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -60,13 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
*
*/
-static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
{
- PSDevice apdev_priv;
+ struct vnt_private *apdev_priv;
struct net_device *dev = pDevice->dev;
int ret;
const struct net_device_ops apdev_netdev_ops = {
- .ndo_start_xmit = pDevice->tx_80211,
+ .ndo_start_xmit = pDevice->tx_80211,
};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
@@ -120,7 +120,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
*
*/
-static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
+static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
{
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: disabling hostapd mode\n", pDevice->dev->name);
@@ -135,9 +135,9 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
}
kfree(pDevice->apdev);
pDevice->apdev = NULL;
- pDevice->bEnable8021x = FALSE;
- pDevice->bEnableHostWEP = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pDevice->bEnable8021x = false;
+ pDevice->bEnableHostWEP = false;
+ pDevice->bEncryptionEnable = false;
return 0;
}
@@ -157,7 +157,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
*
*/
-int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
+int vt6656_hostap_set_hostapd(struct vnt_private *pDevice,
+ int val, int rtnl_locked)
{
if (val < 0 || val > 1)
return -EINVAL;
@@ -187,8 +188,8 @@ int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
* Return Value:
*
*/
-static int hostap_remove_sta(PSDevice pDevice,
- struct viawget_hostapd_param *param)
+static int hostap_remove_sta(struct vnt_private *pDevice,
+ struct viawget_hostapd_param *param)
{
unsigned int uNodeIndex;
@@ -215,22 +216,21 @@ static int hostap_remove_sta(PSDevice pDevice,
* Return Value:
*
*/
-static int hostap_add_sta(PSDevice pDevice,
- struct viawget_hostapd_param *param)
+static int hostap_add_sta(struct vnt_private *pDevice,
+ struct viawget_hostapd_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
unsigned int uNodeIndex;
+ if (!BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex))
+ BSSvCreateOneNode(pDevice, &uNodeIndex);
- if (!BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
- BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex);
- }
memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, param->sta_addr, WLAN_ADDR_LEN);
pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability;
// TODO listenInterval
// pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = 1;
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = false;
pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates;
// set max tx rate
@@ -275,10 +275,10 @@ static int hostap_add_sta(PSDevice pDevice,
*
*/
-static int hostap_get_info_sta(PSDevice pDevice,
- struct viawget_hostapd_param *param)
+static int hostap_get_info_sta(struct vnt_private *pDevice,
+ struct viawget_hostapd_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
@@ -308,10 +308,10 @@ static int hostap_get_info_sta(PSDevice pDevice,
* Return Value:
*
*/
-static int hostap_set_flags_sta(PSDevice pDevice,
- struct viawget_hostapd_param *param)
+static int hostap_set_flags_sta(struct vnt_private *pDevice,
+ struct viawget_hostapd_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
@@ -342,10 +342,10 @@ static int hostap_set_flags_sta(PSDevice pDevice,
* Return Value:
*
*/
-static int hostap_set_generic_element(PSDevice pDevice,
+static int hostap_set_generic_element(struct vnt_private *pDevice,
struct viawget_hostapd_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
@@ -388,7 +388,7 @@ static int hostap_set_generic_element(PSDevice pDevice,
*
*/
-static void hostap_flush_sta(PSDevice pDevice)
+static void hostap_flush_sta(struct vnt_private *pDevice)
{
// reserved node index =0 for multicast node.
BSSvClearNodeDBTable(pDevice, 1);
@@ -410,21 +410,20 @@ static void hostap_flush_sta(PSDevice pDevice)
* Return Value:
*
*/
-static int hostap_set_encryption(PSDevice pDevice,
- struct viawget_hostapd_param *param,
- int param_len)
+static int hostap_set_encryption(struct vnt_private *pDevice,
+ struct viawget_hostapd_param *param, int param_len)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
- NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
- int ret = 0;
- int iNodeIndex = -1;
- int ii;
- BOOL bKeyTableFull = FALSE;
- WORD wKeyCtl = 0;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 dwKeyIndex = 0;
+ u8 abyKey[MAX_KEY_LEN];
+ u8 abySeq[MAX_KEY_LEN];
+ NDIS_802_11_KEY_RSC KeyRSC;
+ u8 byKeyDecMode = KEY_CTL_WEP;
+ int ret = 0;
+ s32 iNodeIndex = -1;
+ int ii;
+ int bKeyTableFull = false;
+ u16 wKeyCtl = 0;
param->u.crypt.err = 0;
@@ -445,7 +444,7 @@ static int hostap_set_encryption(PSDevice pDevice,
iNodeIndex = 0;
} else {
- if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == FALSE) {
+ if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == false) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
return -EINVAL;
@@ -456,15 +455,15 @@ static int hostap_set_encryption(PSDevice pDevice,
if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == TRUE) {
+ if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == true) {
if (KeybRemoveKey( pDevice,
&(pDevice->sKey),
param->sta_addr,
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex
- ) == FALSE) {
+ ) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n");
}
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
}
pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0;
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0;
@@ -493,13 +492,13 @@ static int hostap_set_encryption(PSDevice pDevice,
dwKeyIndex = (DWORD)(param->u.crypt.idx);
if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
+ pDevice->bTransmitKey = true;
dwKeyIndex |= (1 << 31);
}
if (param->u.crypt.alg == WPA_ALG_WEP) {
- if ((pDevice->bEnable8021x == FALSE) || (iNodeIndex == 0)) {
+ if ((pDevice->bEnable8021x == false) || (iNodeIndex == 0)) {
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
dwKeyIndex & ~(BIT30 | USE_KEYRSC),
@@ -512,27 +511,25 @@ static int hostap_set_encryption(PSDevice pDevice,
} else {
// 8021x enable, individual key
dwKeyIndex |= (1 << 30); // set pairwise key
- if (KeybSetKey(pDevice,
- &(pDevice->sKey),
- &param->sta_addr[0],
- dwKeyIndex & ~(USE_KEYRSC),
- param->u.crypt.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- KEY_CTL_WEP
- ) == TRUE) {
+ if (KeybSetKey(pDevice, &(pDevice->sKey),
+ &param->sta_addr[0],
+ dwKeyIndex & ~(USE_KEYRSC),
+ param->u.crypt.key_len,
+ &KeyRSC, (PBYTE)abyKey,
+ KEY_CTL_WEP
+ ) == true) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
// Key Table Full
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
- bKeyTableFull = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
+ bKeyTableFull = true;
}
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
pMgmt->byCSSPK = KEY_CTL_WEP;
pMgmt->byCSSGK = KEY_CTL_WEP;
pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP;
@@ -574,11 +571,11 @@ static int hostap_set_encryption(PSDevice pDevice,
&(pDevice->sKey),
dwKeyIndex,
param->u.crypt.key_len,
- (PQWORD) &(KeyRSC),
+ &KeyRSC,
abyKey,
byKeyDecMode
);
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
dwKeyIndex |= (1 << 30); // set pairwise key
@@ -587,23 +584,23 @@ static int hostap_set_encryption(PSDevice pDevice,
&param->sta_addr[0],
dwKeyIndex,
param->u.crypt.key_len,
- (PQWORD) &(KeyRSC),
+ &KeyRSC,
(PBYTE)abyKey,
byKeyDecMode
- ) == TRUE) {
+ ) == true) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
// Key Table Full
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
- bKeyTableFull = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
+ bKeyTableFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Key Table Full\n");
}
}
- if (bKeyTableFull == TRUE) {
+ if (bKeyTableFull == true) {
wKeyCtl &= 0x7F00; // clear all key control filed
wKeyCtl |= (byKeyDecMode << 4);
wKeyCtl |= (byKeyDecMode);
@@ -625,7 +622,7 @@ static int hostap_set_encryption(PSDevice pDevice,
);
// set wep key
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode;
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
@@ -649,14 +646,14 @@ static int hostap_set_encryption(PSDevice pDevice,
* Return Value:
*
*/
-static int hostap_get_encryption(PSDevice pDevice,
+static int hostap_get_encryption(struct vnt_private *pDevice,
struct viawget_hostapd_param *param,
int param_len)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- int ii;
- int iNodeIndex =0;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ret = 0;
+ int ii;
+ s32 iNodeIndex = 0;
param->u.crypt.err = 0;
@@ -664,7 +661,7 @@ static int hostap_get_encryption(PSDevice pDevice,
if (is_broadcast_ether_addr(param->sta_addr)) {
iNodeIndex = 0;
} else {
- if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == FALSE) {
+ if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &iNodeIndex) == false) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
return -EINVAL;
@@ -694,7 +691,7 @@ static int hostap_get_encryption(PSDevice pDevice,
*
*/
-int vt6656_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
+int vt6656_hostap_ioctl(struct vnt_private *pDevice, struct iw_point *p)
{
struct viawget_hostapd_param *param;
int ret = 0;
diff --git a/drivers/staging/vt6656/hostap.h b/drivers/staging/vt6656/hostap.h
index b660aee1ca0e..f5656cd96a8f 100644
--- a/drivers/staging/vt6656/hostap.h
+++ b/drivers/staging/vt6656/hostap.h
@@ -61,7 +61,7 @@
#define ARPHRD_IEEE80211 801
#endif
-int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked);
-int vt6656_hostap_ioctl(PSDevice pDevice, struct iw_point *p);
+int vt6656_hostap_set_hostapd(struct vnt_private *, int val, int rtnl_locked);
+int vt6656_hostap_ioctl(struct vnt_private *, struct iw_point *p);
#endif /* __HOSTAP_H__ */
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index bba31caae036..51990bd3dd45 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -75,23 +75,22 @@ static int msglevel = MSG_LEVEL_INFO; /* MSG_LEVEL_DEBUG */
* if we've gotten no data
*
-*/
-void INTvWorkItem(void *Context)
+void INTvWorkItem(struct vnt_private *pDevice)
{
- PSDevice pDevice = Context;
int ntStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
spin_lock_irq(&pDevice->lock);
- if (pDevice->fKillEventPollingThread != TRUE)
+ if (pDevice->fKillEventPollingThread != true)
ntStatus = PIPEnsInterruptRead(pDevice);
spin_unlock_irq(&pDevice->lock);
}
-void INTnsProcessData(PSDevice pDevice)
+void INTnsProcessData(struct vnt_private *pDevice)
{
PSINTData pINTData;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct net_device_stats *pStats = &pDevice->stats;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n");
@@ -147,12 +146,12 @@ void INTnsProcessData(PSDevice pDevice)
if (pMgmt->byDTIMCount > 0) {
pMgmt->byDTIMCount--;
pMgmt->sNodeDBTable[0].bRxPSPoll =
- FALSE;
+ false;
} else if (pMgmt->byDTIMCount == 0) {
/* check if multicast tx buffering */
pMgmt->byDTIMCount =
pMgmt->byDTIMPeriod-1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = true;
if (pMgmt->sNodeDBTable[0].bPSEnable)
bScheduleCommand((void *) pDevice,
WLAN_CMD_RX_PSPOLL,
@@ -162,9 +161,9 @@ void INTnsProcessData(PSDevice pDevice)
WLAN_CMD_BECON_SEND,
NULL);
} /* if (pDevice->eOPMode == OP_MODE_AP) */
- pDevice->bBeaconSent = TRUE;
+ pDevice->bBeaconSent = true;
} else {
- pDevice->bBeaconSent = FALSE;
+ pDevice->bBeaconSent = false;
}
if (pINTData->byISR0 & ISR_TBTT) {
if (pDevice->bEnablePSMode)
@@ -179,8 +178,7 @@ void INTnsProcessData(PSDevice pDevice)
NULL);
}
}
- LODWORD(pDevice->qwCurrTSF) = pINTData->dwLoTSF;
- HIDWORD(pDevice->qwCurrTSF) = pINTData->dwHiTSF;
+ pDevice->qwCurrTSF = cpu_to_le64(pINTData->qwTSF);
/*DBG_PRN_GRP01(("ISR0 = %02x ,
LoTsf = %08x,
HiTsf = %08x\n",
@@ -204,7 +202,7 @@ void INTnsProcessData(PSDevice pDevice)
WLAN_CMD_RADIO,
NULL);
pDevice->intBuf.uDataLen = 0;
- pDevice->intBuf.bInUse = FALSE;
+ pDevice->intBuf.bInUse = false;
pStats->tx_packets = pDevice->scStatistic.ullTsrOK;
pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes +
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 5d8faf9f96ec..27c725f1ce11 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -34,7 +34,6 @@
#include "device.h"
/*--------------------- Export Definitions -------------------------*/
-#pragma pack(1)
typedef struct tagSINTData {
BYTE byTSR0;
BYTE byPkt0;
@@ -48,8 +47,7 @@ typedef struct tagSINTData {
BYTE byTSR3;
BYTE byPkt3;
WORD wTime3;
- u32 dwLoTSF;
- u32 dwHiTSF;
+ u64 qwTSF;
BYTE byISR0;
BYTE byISR1;
BYTE byRTSSuccess;
@@ -66,7 +64,7 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
-void INTvWorkItem(void *Context);
-void INTnsProcessData(PSDevice pDevice);
+void INTvWorkItem(struct vnt_private *);
+void INTnsProcessData(struct vnt_private *);
#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
index 22710cef751d..c354a77964d8 100644
--- a/drivers/staging/vt6656/iocmd.h
+++ b/drivers/staging/vt6656/iocmd.h
@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
// Ioctl interface structure
// Command structure
//
-#pragma pack(1)
typedef struct tagSCmdRequest {
u8 name[16];
void *data;
u16 wResult;
u16 wCmdCode;
-} SCmdRequest, *PSCmdRequest;
+} __packed SCmdRequest, *PSCmdRequest;
//
// Scan
@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
u8 ssid[SSID_MAXLEN + 2];
-} SCmdScan, *PSCmdScan;
+} __packed SCmdScan, *PSCmdScan;
//
// BSS Join
@@ -123,10 +122,10 @@ typedef struct tagSCmdBSSJoin {
u16 wBBPType;
u8 ssid[SSID_MAXLEN + 2];
u32 uChannel;
- BOOL bPSEnable;
- BOOL bShareKeyAuth;
+ bool bPSEnable;
+ bool bShareKeyAuth;
-} SCmdBSSJoin, *PSCmdBSSJoin;
+} __packed SCmdBSSJoin, *PSCmdBSSJoin;
//
// Zonetype Setting
@@ -134,18 +133,18 @@ typedef struct tagSCmdBSSJoin {
typedef struct tagSCmdZoneTypeSet {
- BOOL bWrite;
+ bool bWrite;
WZONETYPE ZoneType;
-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
typedef struct tagSWPAResult {
char ifname[100];
u8 proto;
u8 key_mgmt;
u8 eap_type;
- BOOL authenticated;
-} SWPAResult, *PSWPAResult;
+ bool authenticated;
+} __packed SWPAResult, *PSWPAResult;
typedef struct tagSCmdStartAP {
@@ -154,20 +153,20 @@ typedef struct tagSCmdStartAP {
u8 ssid[SSID_MAXLEN + 2];
u32 uChannel;
u32 uBeaconInt;
- BOOL bShareKeyAuth;
+ bool bShareKeyAuth;
u8 byBasicRate;
-} SCmdStartAP, *PSCmdStartAP;
+} __packed SCmdStartAP, *PSCmdStartAP;
typedef struct tagSCmdSetWEP {
- BOOL bEnableWep;
+ bool bEnableWep;
u8 byKeyIndex;
u8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
- BOOL bWepKeyAvailable[WEP_NKEYS];
+ bool bWepKeyAvailable[WEP_NKEYS];
u32 auWepKeyLength[WEP_NKEYS];
-} SCmdSetWEP, *PSCmdSetWEP;
+} __packed SCmdSetWEP, *PSCmdSetWEP;
typedef struct tagSBSSIDItem {
@@ -177,17 +176,17 @@ typedef struct tagSBSSIDItem {
u16 wBeaconInterval;
u16 wCapInfo;
u8 byNetType;
- BOOL bWEPOn;
+ bool bWEPOn;
u32 uRSSI;
-} SBSSIDItem;
+} __packed SBSSIDItem;
typedef struct tagSBSSIDList {
u32 uItem;
SBSSIDItem sBSSIDList[0];
-} SBSSIDList, *PSBSSIDList;
+} __packed SBSSIDList, *PSBSSIDList;
typedef struct tagSNodeItem {
@@ -198,17 +197,17 @@ typedef struct tagSNodeItem {
u16 wInActiveCount;
u16 wEnQueueCnt;
u16 wFlags;
- BOOL bPWBitOn;
+ bool bPWBitOn;
u8 byKeyIndex;
u16 wWepKeyLength;
u8 abyWepKey[WEP_KEYMAXLEN];
// Auto rate fallback vars
- BOOL bIsInFallback;
+ bool bIsInFallback;
u32 uTxFailures;
u32 uTxAttempts;
u16 wFailureRatio;
-} SNodeItem;
+} __packed SNodeItem;
typedef struct tagSNodeList {
@@ -216,12 +215,12 @@ typedef struct tagSNodeList {
u32 uItem;
SNodeItem sNodeList[0];
-} SNodeList, *PSNodeList;
+} __packed SNodeList, *PSNodeList;
typedef struct tagSCmdLinkStatus {
- BOOL bLink;
+ bool bLink;
u16 wBSSType;
u8 byState;
u8 abyBSSID[BSSID_LEN];
@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
u32 uChannel;
u32 uLinkRate;
-} SCmdLinkStatus, *PSCmdLinkStatus;
+} __packed SCmdLinkStatus, *PSCmdLinkStatus;
//
// 802.11 counter
@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
u32 ReceivedFragmentCount;
u32 MulticastReceivedFrameCount;
u32 FCSErrorCount;
-} SDot11MIBCount, *PSDot11MIBCount;
+} __packed SDot11MIBCount, *PSDot11MIBCount;
@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
u32 ullTxBroadcastBytes[2];
u32 ullTxMulticastBytes[2];
u32 ullTxDirectedBytes[2];
-} SStatMIBCount, *PSStatMIBCount;
+} __packed SStatMIBCount, *PSStatMIBCount;
typedef struct tagSCmdValue {
u32 dwValue;
-} SCmdValue, *PSCmdValue;
+} __packed SCmdValue, *PSCmdValue;
//
// hostapd & viawget ioctl related
@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
u8 ssid[32];
} scan_req;
} u;
-};
+} __packed;
/*--------------------- Export Classes ----------------------------*/
diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
index 959c8868f6e2..2522ddec718d 100644
--- a/drivers/staging/vt6656/iowpa.h
+++ b/drivers/staging/vt6656/iowpa.h
@@ -67,12 +67,11 @@ enum {
-#pragma pack(1)
typedef struct viawget_wpa_header {
u8 type;
u16 req_ie_len;
u16 resp_ie_len;
-} viawget_wpa_header;
+} __packed viawget_wpa_header;
struct viawget_wpa_param {
u32 cmd;
@@ -113,9 +112,8 @@ struct viawget_wpa_param {
u8 *buf;
} scan_results;
} u;
-};
+} __packed;
-#pragma pack(1)
struct viawget_scan_result {
u8 bssid[6];
u8 ssid[32];
@@ -130,7 +128,7 @@ struct viawget_scan_result {
int noise;
int level;
int maxrate;
-};
+} __packed;
/*--------------------- Export Classes ----------------------------*/
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 52fce6902508..69971f35e490 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -55,7 +55,7 @@ static int msglevel = MSG_LEVEL_INFO;
struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
long ldBm;
pDevice->wstats.status = pDevice->eOPMode;
@@ -91,9 +91,9 @@ int iwctl_giwname(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwscan(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->data;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_scan_req *req = (struct iw_scan_req *)extra;
BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
PWLAN_IE_SSID pItemSSID = NULL;
@@ -169,8 +169,8 @@ int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info,
int ii;
int jj;
int kk;
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSS;
PWLAN_IE_SSID pItemSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
@@ -309,7 +309,7 @@ int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwfreq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_freq *wrq = &wrqu->freq;
int rc = 0;
@@ -348,9 +348,9 @@ int iwctl_siwfreq(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwfreq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_freq *wrq = &wrqu->freq;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFREQ\n");
@@ -379,9 +379,9 @@ int iwctl_giwfreq(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
__u32 *wmode = &wrqu->mode;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int rc = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMODE\n");
@@ -400,7 +400,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
break;
@@ -409,7 +409,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
break;
@@ -422,7 +422,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
pMgmt->eConfigMode = WMAC_CONFIG_AP;
if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
break;
@@ -455,7 +455,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
bScheduleCommand((void *) pDevice,
WLAN_CMD_DISASSOCIATE, NULL);
} else {
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
pMgmt->eCurrState = WMAC_STATE_IDLE;
memset(pMgmt->abyCurrBSSID, 0, 6);
}
@@ -479,7 +479,7 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
spin_unlock_irq(&pDevice->lock);
}
- pDevice->bCommit = FALSE;
+ pDevice->bCommit = false;
}
@@ -492,9 +492,9 @@ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
__u32 *wmode = &wrqu->mode;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWMODE\n");
@@ -631,9 +631,9 @@ int iwctl_giwrange(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct sockaddr *wrq = &wrqu->ap_addr;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int rc = 0;
BYTE ZeroBSSID[WLAN_BSSID_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
@@ -672,7 +672,7 @@ int iwctl_siwap(struct net_device *dev, struct iw_request_info *info,
}
if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
return rc;
}
@@ -683,9 +683,9 @@ int iwctl_siwap(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct sockaddr *wrq = &wrqu->ap_addr;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAP\n");
@@ -694,7 +694,7 @@ int iwctl_giwap(struct net_device *dev, struct iw_request_info *info,
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
- if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
+ if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
@@ -713,8 +713,8 @@ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info,
struct iw_point *wrq = &wrqu->data;
struct sockaddr *sock;
struct iw_quality *qual;
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSS = &pMgmt->sBSSList[0];
int ii;
int jj;
@@ -771,9 +771,9 @@ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->essid;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PWLAN_IE_SSID pItemSSID;
if (pMgmt == NULL)
@@ -784,7 +784,7 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWESSID :\n");
- pDevice->fWPA_Authened = FALSE;
+ pDevice->fWPA_Authened = false;
// Check if we asked for `any'
if (wrq->flags == 0) {
// Just send an empty SSID list
@@ -816,7 +816,7 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
// Wext wil order another command of siwap to link
// with desired AP, so here need not associate??
- if (pDevice->bWPASuppWextEnabled == TRUE) {
+ if (pDevice->bWPASuppWextEnabled == true) {
/*******search if in hidden ssid mode ****/
PKnownBSS pCurr = NULL;
BYTE abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
@@ -867,7 +867,7 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
}
if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
return 0;
}
@@ -878,9 +878,9 @@ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_point *wrq = &wrqu->essid;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PWLAN_IE_SSID pItemSSID;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWESSID\n");
@@ -908,7 +908,7 @@ int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->bitrate;
int rc = 0;
u8 brate = 0;
@@ -965,7 +965,7 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
if (wrq->fixed != 0) {
// Fixed mode
// One rate, fixed
- pDevice->bFixRate = TRUE;
+ pDevice->bFixRate = true;
if ((pDevice->byBBType == BB_TYPE_11B) && (brate > 3)) {
pDevice->uConnectionRate = 3;
} else {
@@ -973,7 +973,7 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fixed to Rate %d \n", pDevice->uConnectionRate);
}
} else {
- pDevice->bFixRate = FALSE;
+ pDevice->bFixRate = false;
pDevice->uConnectionRate = 13;
}
@@ -986,9 +986,9 @@ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->bitrate;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRATE\n");
@@ -1024,8 +1024,8 @@ int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info,
brate = abySupportedRates[pDevice->wCurrentRate];
wrq->value = brate * 500000;
// If more than one rate, set auto
- if (pDevice->bFixRate == TRUE)
- wrq->fixed = TRUE;
+ if (pDevice->bFixRate == true)
+ wrq->fixed = true;
}
return 0;
@@ -1037,7 +1037,7 @@ int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwrts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->rts;
if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
@@ -1057,7 +1057,7 @@ int iwctl_siwrts(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwrts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->rts;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRTS\n");
@@ -1073,7 +1073,7 @@ int iwctl_giwrts(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwfrag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->frag;
int rc = 0;
int fthr = wrq->value;
@@ -1097,7 +1097,7 @@ int iwctl_siwfrag(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwfrag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->frag;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWFRAG\n");
@@ -1113,7 +1113,7 @@ int iwctl_giwfrag(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwretry(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->retry;
int rc = 0;
@@ -1146,7 +1146,7 @@ int iwctl_siwretry(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwretry(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->retry;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRETRY\n");
wrq->disabled = 0; // Can't be disabled
@@ -1173,8 +1173,8 @@ int iwctl_giwretry(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
u32 dwKeyIndex = (u32)(wrq->flags & IW_ENCODE_INDEX);
int ii;
@@ -1229,8 +1229,8 @@ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
}
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
pDevice->uKeyLength = wrq->length;
- pDevice->bTransmitKey = TRUE;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bTransmitKey = true;
+ pDevice->bEncryptionEnable = true;
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
// Do we want to just set the transmit key index?
@@ -1244,8 +1244,8 @@ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
// Read the flags
if (wrq->flags & IW_ENCODE_DISABLED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n");
- pMgmt->bShareKeyAlgorithm = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
+ pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
@@ -1256,11 +1256,11 @@ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
}
if (wrq->flags & IW_ENCODE_RESTRICTED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & ShareKey System\n");
- pMgmt->bShareKeyAlgorithm = TRUE;
+ pMgmt->bShareKeyAlgorithm = true;
}
if (wrq->flags & IW_ENCODE_OPEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & Open System\n");
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
}
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
@@ -1271,8 +1271,8 @@ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
char abyKey[WLAN_WEP232_KEYLEN];
@@ -1333,8 +1333,8 @@ int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->power;
int rc = 0;
@@ -1385,8 +1385,8 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->power;
int mode = pDevice->ePSMode;
@@ -1418,12 +1418,12 @@ int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info,
int iwctl_giwsens(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iw_param *wrq = &wrqu->sens;
long ldBm;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSENS\n");
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
wrq->value = ldBm;
} else {
@@ -1437,8 +1437,8 @@ int iwctl_giwsens(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_param *wrq = &wrqu->param;
int ret = 0;
static int wpa_version = 0; // must be static to save the last value, einsn liu
@@ -1508,9 +1508,9 @@ int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
case IW_AUTH_80211_AUTH_ALG:
PRINT_K("iwctl_siwauth:set AUTH_ALG=%d\n", wrq->value);
if (wrq->value == IW_AUTH_ALG_OPEN_SYSTEM)
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
else if (wrq->value == IW_AUTH_ALG_SHARED_KEY)
- pMgmt->bShareKeyAlgorithm = TRUE;
+ pMgmt->bShareKeyAlgorithm = true;
break;
case IW_AUTH_WPA_ENABLED:
break;
@@ -1521,11 +1521,11 @@ int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info,
break;
case IW_AUTH_PRIVACY_INVOKED:
pDevice->bEncryptionEnable = !!wrq->value;
- if (pDevice->bEncryptionEnable == FALSE) {
+ if (pDevice->bEncryptionEnable == false) {
wpa_version = 0;
pairwise = 0;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
}
@@ -1547,8 +1547,8 @@ int iwctl_giwauth(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->data;
int ret = 0;
@@ -1582,8 +1582,8 @@ out: // not completely ...not necessary in wpa_supplicant 0.5.8
int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->data;
int ret = 0;
int space = wrq->length;
@@ -1608,8 +1608,8 @@ int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_point *wrq = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext*)extra;
struct viawget_wpa_param *param=NULL;
@@ -1697,28 +1697,28 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
/****this method is so foolish,but there is no other way??? */
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
if (param->u.wpa_key.key_index ==0) {
- pDevice->bwextstep0 = TRUE;
+ pDevice->bwextstep0 = true;
}
- if ((pDevice->bwextstep0 == TRUE) && (param->u.wpa_key.key_index == 1)) {
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = TRUE;
+ if ((pDevice->bwextstep0 == true) && (param->u.wpa_key.key_index == 1)) {
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = true;
}
- if ((pDevice->bwextstep1 == TRUE) && (param->u.wpa_key.key_index == 2)) {
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = TRUE;
+ if ((pDevice->bwextstep1 == true) && (param->u.wpa_key.key_index == 2)) {
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = true;
}
- if ((pDevice->bwextstep2 == TRUE) && (param->u.wpa_key.key_index == 3)) {
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = TRUE;
+ if ((pDevice->bwextstep2 == true) && (param->u.wpa_key.key_index == 3)) {
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = true;
}
}
- if (pDevice->bwextstep3 == TRUE) {
+ if (pDevice->bwextstep3 == true) {
PRINT_K("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
- pDevice->bWPASuppWextEnabled = TRUE;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = true;
memset(pMgmt->abyDesireBSSID, 0xFF, 6);
KeyvInitTable(pDevice, &pDevice->sKey);
}
@@ -1741,8 +1741,8 @@ int iwctl_giwencodeext(struct net_device *dev, struct iw_request_info *info,
int iwctl_siwmlme(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- PSDevice pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct iw_mlme *mlme = (struct iw_mlme *)extra;
int ret = 0;
@@ -1758,7 +1758,7 @@ int iwctl_siwmlme(struct net_device *dev, struct iw_request_info *info,
switch (mlme->cmd){
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE,
NULL);
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 8c78b86b5c80..416175e8ba53 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -60,26 +60,25 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
-static void s_vCheckKeyTableValid(void *pDeviceHandler,
- PSKeyManagement pTable)
+static void s_vCheckKeyTableValid(struct vnt_private *pDevice,
+ PSKeyManagement pTable)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i;
- WORD wLength = 0;
- BYTE pbyData[MAX_KEY_TABLE];
+ int i;
+ u16 wLength = 0;
+ u8 pbyData[MAX_KEY_TABLE];
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[0].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[1].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[2].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[3].bKeyValid == FALSE)
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ (pTable->KeyTable[i].PairwiseKey.bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[0].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[1].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[2].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[3].bKeyValid == false)
) {
- pTable->KeyTable[i].bInUse = FALSE;
+ pTable->KeyTable[i].bInUse = false;
pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].bSoftWEP = FALSE;
+ pTable->KeyTable[i].bSoftWEP = false;
pbyData[wLength++] = (BYTE) i;
//MACvDisableKeyEntry(pDevice, i);
}
@@ -112,27 +111,25 @@ static void s_vCheckKeyTableValid(void *pDeviceHandler,
* Return Value: none
*
*/
-void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable)
+void KeyvInitTable(struct vnt_private *pDevice, PSKeyManagement pTable)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i;
- int jj;
- BYTE pbyData[MAX_KEY_TABLE+1];
+ int i, jj;
+ u8 pbyData[MAX_KEY_TABLE+1];
spin_lock_irq(&pDevice->lock);
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].bInUse = FALSE;
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].bInUse = false;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
pTable->KeyTable[i].PairwiseKey.pvKeyTable =
(void *)&pTable->KeyTable[i];
for (jj=0; jj < MAX_GROUP_KEY; jj++) {
- pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[jj].bKeyValid = false;
pTable->KeyTable[i].GroupKey[jj].pvKeyTable =
(void *) &(pTable->KeyTable[i]);
}
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].dwGTKeyIndex = 0;
- pTable->KeyTable[i].bSoftWEP = FALSE;
+ pTable->KeyTable[i].bSoftWEP = false;
pbyData[i] = (BYTE) i;
}
pbyData[i] = (BYTE) i;
@@ -161,43 +158,43 @@ void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable)
* Out:
* pKey - Key return
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
- PSKeyItem *pKey)
+int KeybGetKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyIndex,
+ PSKeyItem *pKey)
{
- int i;
+ int i;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetKey() \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetKey()\n");
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
+ if ((pTable->KeyTable[i].bInUse == true) &&
!compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
- return (TRUE);
+ return (true);
}
else {
- return (FALSE);
+ return (false);
}
} else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
- return (TRUE);
+ return (true);
}
else {
- return (FALSE);
+ return (false);
}
}
else {
- return (FALSE);
+ return (false);
}
}
}
- return (FALSE);
+ return (false);
}
@@ -215,37 +212,28 @@ BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- )
+int KeybSetKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u8 *pbyBSSID, u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i,j;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
+ PSKeyItem pKey;
+ int i, j, ii;
+ u32 uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"Enter KeybSetKey: %X\n", dwKeyIndex);
j = (MAX_KEY_TABLE-1);
for (i=0;i<(MAX_KEY_TABLE-1);i++) {
- if ((pTable->KeyTable[i].bInUse == FALSE) &&
+ if ((pTable->KeyTable[i].bInUse == false) &&
(j == (MAX_KEY_TABLE-1))) {
// found empty table
j = i;
}
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
+ if ((pTable->KeyTable[i].bInUse == true) &&
!compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
@@ -257,7 +245,7 @@ BOOL KeybSetKey(
} else {
// Group key
if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return (FALSE);
+ return (false);
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
// Group transmit key
@@ -273,7 +261,7 @@ BOOL KeybSetKey(
}
pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -286,13 +274,11 @@ BOOL KeybSetKey(
}
MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey);
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- memset(&(pKey->KeyRSC), 0, sizeof(QWORD));
- }
- else {
- memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD));
- }
+ if ((dwKeyIndex & USE_KEYRSC) == 0)
+ pKey->KeyRSC = 0; /* RSC set by NIC */
+ else
+ pKey->KeyRSC = *KeyRSC;
+
pKey->dwTSC47_16 = 0;
pKey->wTSC15_0 = 0;
@@ -312,12 +298,12 @@ BOOL KeybSetKey(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
}
if (j < (MAX_KEY_TABLE-1)) {
memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN);
- pTable->KeyTable[j].bInUse = TRUE;
+ pTable->KeyTable[j].bInUse = true;
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
pKey = &(pTable->KeyTable[j].PairwiseKey);
@@ -327,7 +313,7 @@ BOOL KeybSetKey(
} else {
// Group key
if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return (FALSE);
+ return (false);
pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]);
if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
// Group transmit key
@@ -343,7 +329,7 @@ BOOL KeybSetKey(
}
pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -356,13 +342,11 @@ BOOL KeybSetKey(
}
MACvSetKeyEntry(pDevice, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey);
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- memset(&(pKey->KeyRSC), 0, sizeof(QWORD));
- }
- else {
- memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD));
- }
+ if ((dwKeyIndex & USE_KEYRSC) == 0)
+ pKey->KeyRSC = 0; /* RSC set by NIC */
+ else
+ pKey->KeyRSC = *KeyRSC;
+
pKey->dwTSC47_16 = 0;
pKey->wTSC15_0 = 0;
@@ -381,9 +365,9 @@ BOOL KeybSetKey(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
- return (FALSE);
+ return (false);
}
@@ -398,68 +382,64 @@ BOOL KeybSetKey(
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybRemoveKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex
- )
+
+int KeybRemoveKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u8 *pbyBSSID, u32 dwKeyIndex)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i;
- BOOL bReturnValue = FALSE;
+ int i;
+ int bReturnValue = false;
if (is_broadcast_ether_addr(pbyBSSID)) {
// delete all keys
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
}
- bReturnValue = TRUE;
+ bReturnValue = true;
}
else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[i].dwGTKeyIndex = 0;
}
}
- bReturnValue = TRUE;
+ bReturnValue = true;
}
else {
- bReturnValue = FALSE;
+ bReturnValue = false;
}
} else {
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ( (pTable->KeyTable[i].bInUse == TRUE) &&
+ if ( (pTable->KeyTable[i].bInUse == true) &&
!compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
- bReturnValue = TRUE;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
+ bReturnValue = true;
break;
}
else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[i].dwGTKeyIndex = 0;
}
- bReturnValue = TRUE;
+ bReturnValue = true;
break;
}
else {
- bReturnValue = FALSE;
+ bReturnValue = false;
break;
}
- } //pTable->KeyTable[i].bInUse == TRUE
+ } //pTable->KeyTable[i].bInUse == true
} //for
- bReturnValue = TRUE;
+ bReturnValue = true;
}
s_vCheckKeyTableValid(pDevice,pTable);
@@ -479,31 +459,27 @@ BOOL KeybRemoveKey(
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybRemoveAllKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID
- )
+int KeybRemoveAllKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u8 *pbyBSSID)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i,u;
+ int i, u;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
+ if ((pTable->KeyTable[i].bInUse == true) &&
!compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for (u = 0; u < MAX_GROUP_KEY; u++)
- pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
pTable->KeyTable[i].dwGTKeyIndex = 0;
s_vCheckKeyTableValid(pDevice, pTable);
- return (TRUE);
+ return (true);
}
}
- return (FALSE);
+ return (false);
}
/*
@@ -515,21 +491,17 @@ BOOL KeybRemoveAllKey(
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-void KeyvRemoveWEPKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex
- )
+void KeyvRemoveWEPKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u32 dwKeyIndex)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == TRUE) {
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == true) {
if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
- pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0;
@@ -541,9 +513,8 @@ void KeyvRemoveWEPKey(
return;
}
-void KeyvRemoveAllWEPKey(void *pDeviceHandler, PSKeyManagement pTable)
+void KeyvRemoveAllWEPKey(struct vnt_private *pDevice, PSKeyManagement pTable)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
int i;
for (i = 0; i < MAX_GROUP_KEY; i++)
@@ -560,22 +531,23 @@ void KeyvRemoveAllWEPKey(void *pDeviceHandler, PSKeyManagement pTable)
* Out:
* pKey - Key return
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
- PSKeyItem *pKey)
+int KeybGetTransmitKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyType,
+ PSKeyItem *pKey)
{
- int i, ii;
+ int i, ii;
+
+ *pKey = NULL;
- *pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
+ if ((pTable->KeyTable[i].bInUse == true) &&
!compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:");
@@ -586,19 +558,19 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
- return (TRUE);
+ return (true);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == FALSE\n");
- return (FALSE);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == false\n");
+ return (false);
}
} // End of Type == PAIRWISE
else {
if (pTable->KeyTable[i].dwGTKeyIndex == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: dwGTKeyIndex == 0 !!!\n");
- return FALSE;
+ return false;
}
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:");
@@ -610,11 +582,11 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
pTable->KeyTable[i].dwGTKeyIndex);
- return (TRUE);
+ return (true);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == FALSE\n");
- return (FALSE);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == false\n");
+ return (false);
}
} // End of Type = GROUP
} // BSSID match
@@ -624,7 +596,7 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(pbyBSSID+ii));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
- return (FALSE);
+ return (false);
}
@@ -637,22 +609,23 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
* Out:
* none
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey)
+int KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey)
{
- int i;
+ int i;
+
+ *pKey = NULL;
- *pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ (pTable->KeyTable[i].PairwiseKey.bKeyValid == true)) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
- return (TRUE);
+ return (true);
}
}
- return (FALSE);
+ return (false);
}
/*
@@ -668,37 +641,31 @@ BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey)
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetDefaultKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- )
+
+int KeybSetDefaultKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
+ int ii;
+ PSKeyItem pKey;
+ u32 uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enter KeybSetDefaultKey: %1x, %d\n",
(int) dwKeyIndex, (int) uKeyLength);
if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
- return (FALSE);
+ return (false);
} else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) {
- return (FALSE);
+ return (false);
}
if (uKeyLength > MAX_KEY_LEN)
return false;
- pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true;
for (ii = 0; ii < ETH_ALEN; ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
@@ -722,13 +689,13 @@ BOOL KeybSetDefaultKey(
if ((uKeyLength == WLAN_WEP232_KEYLEN) &&
(byKeyDecMode == KEY_CTL_WEP)) {
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
- pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = TRUE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
} else {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == FALSE)
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == false)
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
}
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -742,12 +709,12 @@ BOOL KeybSetDefaultKey(
MACvSetKeyEntry(pDevice, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (PDWORD) pKey->abyKey);
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- memset(&(pKey->KeyRSC), 0, sizeof(QWORD));
- } else {
- memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD));
- }
+ if ((dwKeyIndex & USE_KEYRSC) == 0)
+ pKey->KeyRSC = 0; /* RSC set by NIC */
+ else
+ pKey->KeyRSC = *KeyRSC;
+
+
pKey->dwTSC47_16 = 0;
pKey->wTSC15_0 = 0;
@@ -767,7 +734,7 @@ BOOL KeybSetDefaultKey(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
@@ -784,37 +751,30 @@ BOOL KeybSetDefaultKey(
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetAllGroupKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- )
+
+int KeybSetAllGroupKey(struct vnt_private *pDevice, PSKeyManagement pTable,
+ u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- int i;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
+ int i, ii;
+ PSKeyItem pKey;
+ u32 uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
dwKeyIndex);
if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
- return (FALSE);
+ return (false);
} else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) {
- return (FALSE);
+ return (false);
}
for (i=0; i < MAX_KEY_TABLE-1; i++) {
- if (pTable->KeyTable[i].bInUse == TRUE) {
+ if (pTable->KeyTable[i].bInUse == true) {
// found table already exist
// Group key
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
@@ -833,7 +793,7 @@ BOOL KeybSetAllGroupKey(
pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -847,13 +807,11 @@ BOOL KeybSetAllGroupKey(
MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (PDWORD) pKey->abyKey);
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- memset(&(pKey->KeyRSC), 0, sizeof(QWORD));
- }
- else {
- memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD));
- }
+ if ((dwKeyIndex & USE_KEYRSC) == 0)
+ pKey->KeyRSC = 0; /* RSC set by NIC */
+ else
+ pKey->KeyRSC = *KeyRSC;
+
pKey->dwTSC47_16 = 0;
pKey->wTSC15_0 = 0;
@@ -870,7 +828,7 @@ BOOL KeybSetAllGroupKey(
//DBG_PRN_GRP12(("pKey->wTSC15_0: %X\n ", pKey->wTSC15_0));
//DBG_PRN_GRP12(("pKey->dwKeyIndex: %lX\n ", pKey->dwKeyIndex));
- } // (pTable->KeyTable[i].bInUse == TRUE)
+ } // (pTable->KeyTable[i].bInUse == true)
}
- return (TRUE);
+ return (true);
}
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index bd35d39621ae..7ecddcd6bcfa 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -57,10 +57,10 @@
typedef struct tagSKeyItem
{
- BOOL bKeyValid;
+ bool bKeyValid;
u32 uKeyLength;
BYTE abyKey[MAX_KEY_LEN];
- QWORD KeyRSC;
+ u64 KeyRSC;
DWORD dwTSC47_16;
WORD wTSC15_0;
BYTE byCipherSuite;
@@ -76,9 +76,9 @@ typedef struct tagSKeyTable
SKeyItem PairwiseKey;
SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
DWORD dwGTKeyIndex; // GroupTransmitKey Index
- BOOL bInUse;
+ bool bInUse;
WORD wKeyCtl;
- BOOL bSoftWEP;
+ bool bSoftWEP;
BYTE byReserved1[6];
} SKeyTable, *PSKeyTable; //352
@@ -97,69 +97,37 @@ typedef struct tagSKeyManagement
/*--------------------- Export Functions --------------------------*/
-void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable);
-
-BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
- PSKeyItem *pKey);
-
-BOOL KeybSetKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- );
-
-BOOL KeybRemoveKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex
- );
-
-BOOL KeybRemoveAllKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- PBYTE pbyBSSID
- );
-
-void KeyvRemoveWEPKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex
- );
-
-void KeyvRemoveAllWEPKey(
- void *pDeviceHandler,
- PSKeyManagement pTable
- );
-
-BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
- PSKeyItem *pKey);
-
-BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey);
-
-BOOL KeybSetDefaultKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- );
-
-BOOL KeybSetAllGroupKey(
- void *pDeviceHandler,
- PSKeyManagement pTable,
- DWORD dwKeyIndex,
- u32 uKeyLength,
- PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode
- );
+void KeyvInitTable(struct vnt_private *, PSKeyManagement pTable);
+
+int KeybGetKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyIndex,
+ PSKeyItem *pKey);
+
+int KeybSetKey(struct vnt_private *, PSKeyManagement pTable, u8 *pbyBSSID,
+ u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode);
+
+int KeybRemoveKey(struct vnt_private *, PSKeyManagement pTable,
+ u8 *pbyBSSID, u32 dwKeyIndex);
+
+int KeybRemoveAllKey(struct vnt_private *, PSKeyManagement pTable,
+ u8 *pbyBSSID);
+
+void KeyvRemoveWEPKey(struct vnt_private *, PSKeyManagement pTable,
+ u32 dwKeyIndex);
+
+void KeyvRemoveAllWEPKey(struct vnt_private *, PSKeyManagement pTable);
+
+int KeybGetTransmitKey(PSKeyManagement pTable, u8 *pbyBSSID, u32 dwKeyType,
+ PSKeyItem *pKey);
+
+int KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey);
+
+int KeybSetDefaultKey(struct vnt_private *, PSKeyManagement pTable,
+ u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode);
+
+int KeybSetAllGroupKey(struct vnt_private *, PSKeyManagement pTable,
+ u32 dwKeyIndex, u32 uKeyLength, u64 *KeyRSC, u8 *pbyKey,
+ u8 byKeyDecMode);
#endif /* __KEY_H__ */
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 8fddc7b3930b..76d307b58d52 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -68,11 +68,11 @@ static int msglevel =MSG_LEVEL_INFO;
* Return Value: none
*
*/
-void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx)
+void MACvSetMultiAddrByHash(struct vnt_private *pDevice, u8 byHashIdx)
{
- unsigned int uByteIdx;
- BYTE byBitMask;
- BYTE pbyData[2];
+ u8 uByteIdx;
+ u8 byBitMask;
+ u8 pbyData[2];
// calculate byte position
@@ -110,9 +110,9 @@ void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx)
* Return Value: none
*
*/
-void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
+void MACvWriteMultiAddr(struct vnt_private *pDevice, u32 uByteIdx, u8 byData)
{
- BYTE byData1;
+ u8 byData1;
byData1 = byData;
CONTROLnsRequestOut(pDevice,
@@ -135,7 +135,7 @@ void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
*
*
*/
-void MACbShutdown(PSDevice pDevice)
+void MACbShutdown(struct vnt_private *pDevice)
{
CONTROLnsRequestOutAsyn(pDevice,
MESSAGE_TYPE_MACSHUTDOWN,
@@ -146,9 +146,9 @@ void MACbShutdown(PSDevice pDevice)
);
}
-void MACvSetBBType(PSDevice pDevice,BYTE byType)
+void MACvSetBBType(struct vnt_private *pDevice, u8 byType)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = byType;
@@ -163,9 +163,9 @@ BYTE pbyData[2];
);
}
-void MACvSetMISCFifo (PSDevice pDevice, WORD wOffset, DWORD dwData)
+void MACvSetMISCFifo(struct vnt_private *pDevice, u16 wOffset, u32 dwData)
{
-BYTE pbyData[4];
+ u8 pbyData[4];
if (wOffset > 273)
return;
@@ -197,10 +197,10 @@ BYTE pbyData[4];
* Return Value: none
*
*/
-void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx)
+void MACvDisableKeyEntry(struct vnt_private *pDevice, u32 uEntryIdx)
{
-WORD wOffset;
-BYTE byData;
+ u16 wOffset;
+ u8 byData;
byData = (BYTE) uEntryIdx;
@@ -237,20 +237,18 @@ BYTE byData;
* Return Value: none
*
*/
-void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl,
- unsigned int uEntryIdx, unsigned int uKeyIdx,
- PBYTE pbyAddr, PDWORD pdwKey)
+void MACvSetKeyEntry(struct vnt_private *pDevice, u16 wKeyCtl, u32 uEntryIdx,
+ u32 uKeyIdx, u8 *pbyAddr, u32 *pdwKey)
{
-PBYTE pbyKey;
-WORD wOffset;
-DWORD dwData1,dwData2;
-int ii;
-BYTE pbyData[24];
-
- if ( pDevice->byLocalID <= MAC_REVISION_A1 ) {
- if ( pDevice->sMgmtObj.byCSSPK == KEY_CTL_CCMP )
- return;
- }
+ u8 *pbyKey;
+ u16 wOffset;
+ u32 dwData1, dwData2;
+ int ii;
+ u8 pbyData[24];
+
+ if (pDevice->byLocalID <= MAC_REVISION_A1)
+ if (pDevice->vnt_mgmt.byCSSPK == KEY_CTL_CCMP)
+ return;
wOffset = MISCFIFO_KEYETRY0;
wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
@@ -321,9 +319,9 @@ BYTE pbyData[24];
}
-void MACvRegBitsOff(PSDevice pDevice, BYTE byRegOfs, BYTE byBits)
+void MACvRegBitsOff(struct vnt_private *pDevice, u8 byRegOfs, u8 byBits)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = 0;
pbyData[1] = byBits;
@@ -338,9 +336,9 @@ BYTE pbyData[2];
}
-void MACvRegBitsOn(PSDevice pDevice, BYTE byRegOfs, BYTE byBits)
+void MACvRegBitsOn(struct vnt_private *pDevice, u8 byRegOfs, u8 byBits)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = byBits;
@@ -355,9 +353,9 @@ BYTE pbyData[2];
);
}
-void MACvWriteWord(PSDevice pDevice, BYTE byRegOfs, WORD wData)
+void MACvWriteWord(struct vnt_private *pDevice, u8 byRegOfs, u16 wData)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = (BYTE)(wData & 0xff);
@@ -373,9 +371,9 @@ BYTE pbyData[2];
}
-void MACvWriteBSSIDAddress(PSDevice pDevice, PBYTE pbyEtherAddr)
+void MACvWriteBSSIDAddress(struct vnt_private *pDevice, u8 *pbyEtherAddr)
{
-BYTE pbyData[6];
+ u8 pbyData[6];
pbyData[0] = *((PBYTE)pbyEtherAddr);
@@ -394,9 +392,9 @@ BYTE pbyData[6];
);
}
-void MACvEnableProtectMD(PSDevice pDevice)
+void MACvEnableProtectMD(struct vnt_private *pDevice)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = EnCFG_ProtectMd;
@@ -411,9 +409,9 @@ BYTE pbyData[2];
);
}
-void MACvDisableProtectMD(PSDevice pDevice)
+void MACvDisableProtectMD(struct vnt_private *pDevice)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = 0;
@@ -428,9 +426,9 @@ BYTE pbyData[2];
);
}
-void MACvEnableBarkerPreambleMd(PSDevice pDevice)
+void MACvEnableBarkerPreambleMd(struct vnt_private *pDevice)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = EnCFG_BarkerPream;
@@ -445,9 +443,9 @@ BYTE pbyData[2];
);
}
-void MACvDisableBarkerPreambleMd(PSDevice pDevice)
+void MACvDisableBarkerPreambleMd(struct vnt_private *pDevice)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
pbyData[0] = 0;
@@ -463,12 +461,12 @@ BYTE pbyData[2];
}
-void MACvWriteBeaconInterval(PSDevice pDevice, WORD wInterval)
+void MACvWriteBeaconInterval(struct vnt_private *pDevice, u16 wInterval)
{
-BYTE pbyData[2];
+ u8 pbyData[2];
- pbyData[0] = (BYTE) (wInterval & 0xff);
- pbyData[1] = (BYTE) (wInterval >> 8);
+ pbyData[0] = (u8)(wInterval & 0xff);
+ pbyData[1] = (u8)(wInterval >> 8);
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 147ac50218d3..6e28500ae5f8 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -420,24 +420,21 @@
/*--------------------- Export Functions --------------------------*/
-void MACvSetMultiAddrByHash(PSDevice pDevice, BYTE byHashIdx);
-void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
-void MACbShutdown(PSDevice pDevice);
-void MACvSetBBType(PSDevice pDevice, BYTE byType);
-void MACvSetMISCFifo(PSDevice pDevice, WORD wOffset, DWORD dwData);
-void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
-void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey);
-
-void MACvRegBitsOff(PSDevice pDevice, BYTE byRegOfs, BYTE byBits);
-void MACvRegBitsOn(PSDevice pDevice, BYTE byRegOfs, BYTE byBits);
-void MACvWriteWord(PSDevice pDevice, BYTE byRegOfs, WORD wData);
-
-void MACvWriteBSSIDAddress(PSDevice pDevice, PBYTE pbyEtherAddr);
-void MACvEnableProtectMD(PSDevice pDevice);
-void MACvDisableProtectMD(PSDevice pDevice);
-void MACvEnableBarkerPreambleMd(PSDevice pDevice);
-void MACvDisableBarkerPreambleMd(PSDevice pDevice);
-void MACvWriteBeaconInterval(PSDevice pDevice, WORD wInterval);
+void MACvSetMultiAddrByHash(struct vnt_private *, u8);
+void MACvWriteMultiAddr(struct vnt_private *, u32, u8);
+void MACbShutdown(struct vnt_private *);
+void MACvSetBBType(struct vnt_private *, u8);
+void MACvSetMISCFifo(struct vnt_private *pDevice, u16, u32);
+void MACvDisableKeyEntry(struct vnt_private *, u32);
+void MACvSetKeyEntry(struct vnt_private *, u16, u32, u32, u8 *, u32 *);
+void MACvRegBitsOff(struct vnt_private *, u8, u8);
+void MACvRegBitsOn(struct vnt_private *, u8, u8);
+void MACvWriteWord(struct vnt_private *, u8, u16);
+void MACvWriteBSSIDAddress(struct vnt_private *, u8 *);
+void MACvEnableProtectMD(struct vnt_private *);
+void MACvDisableProtectMD(struct vnt_private *);
+void MACvEnableBarkerPreambleMd(struct vnt_private *);
+void MACvDisableBarkerPreambleMd(struct vnt_private *);
+void MACvWriteBeaconInterval(struct vnt_private *, u16);
#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f33086d66496..d5f53e1a74a2 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -73,15 +73,16 @@
#include "iowpa.h"
/*--------------------- Static Definitions -------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
+/* static int msglevel = MSG_LEVEL_DEBUG; */
static int msglevel =MSG_LEVEL_INFO;
-//
-// Define module options
-//
+/*
+ * define module options
+ */
-// Version Information
-#define DRIVER_AUTHOR "VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>"
+/* version information */
+#define DRIVER_AUTHOR \
+ "VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DEVICE_FULL_DRV_NAM);
@@ -184,16 +185,16 @@ DEVICE_PARAM(BasebandType, "baseband type");
DEVICE_PARAM(b80211hEnable, "802.11h mode");
-//
-// Static vars definitions
-//
+/*
+ * Static vars definitions
+ */
static struct usb_device_id vt6656_table[] = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
-// Frequency list (map channels to frequencies)
+/* frequency list (map channels to frequencies) */
/*
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
@@ -225,26 +226,27 @@ static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType);
-static BOOL device_init_defrag_cb(PSDevice pDevice);
-static void device_init_diversity_timer(PSDevice pDevice);
+static int device_init_registers(struct vnt_private *pDevice,
+ DEVICE_INIT_TYPE InitType);
+static bool device_init_defrag_cb(struct vnt_private *pDevice);
+static void device_init_diversity_timer(struct vnt_private *pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
static int ethtool_ioctl(struct net_device *dev, void *useraddr);
-static void device_free_tx_bufs(PSDevice pDevice);
-static void device_free_rx_bufs(PSDevice pDevice);
-static void device_free_int_bufs(PSDevice pDevice);
-static void device_free_frag_bufs(PSDevice pDevice);
-static BOOL device_alloc_bufs(PSDevice pDevice);
-
-static int Read_config_file(PSDevice pDevice);
-static unsigned char *Config_FileOperation(PSDevice pDevice);
+static void device_free_tx_bufs(struct vnt_private *pDevice);
+static void device_free_rx_bufs(struct vnt_private *pDevice);
+static void device_free_int_bufs(struct vnt_private *pDevice);
+static void device_free_frag_bufs(struct vnt_private *pDevice);
+static bool device_alloc_bufs(struct vnt_private *pDevice);
+
+static int Read_config_file(struct vnt_private *pDevice);
+static unsigned char *Config_FileOperation(struct vnt_private *pDevice);
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest,
unsigned char *source);
-static void usb_device_reset(PSDevice pDevice);
+static void usb_device_reset(struct vnt_private *pDevice);
@@ -254,7 +256,7 @@ static void usb_device_reset(PSDevice pDevice);
static void
-device_set_options(PSDevice pDevice) {
+device_set_options(struct vnt_private *pDevice) {
BYTE abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
@@ -277,22 +279,22 @@ device_set_options(PSDevice pDevice) {
pDevice->b11hEnable = X80211h_MODE_DEF;
pDevice->eOPMode = OP_MODE_DEF;
pDevice->uConnectionRate = DATA_RATE_DEF;
- if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = TRUE;
+ if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = BBP_TYPE_DEF;
pDevice->byPacketType = pDevice->byBBType;
pDevice->byAutoFBCtrl = AUTO_FB_0;
- pDevice->bUpdateBBVGA = TRUE;
+ pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->byAutoPwrTunning = 0;
pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
- pDevice->bExistSWNetAddr = FALSE;
-// pDevice->bDiversityRegCtlON = TRUE;
- pDevice->bDiversityRegCtlON = FALSE;
+ pDevice->bExistSWNetAddr = false;
+ /* pDevice->bDiversityRegCtlON = true; */
+ pDevice->bDiversityRegCtlON = false;
}
-static void device_init_diversity_timer(PSDevice pDevice)
+static void device_init_diversity_timer(struct vnt_private *pDevice)
{
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long)pDevice;
@@ -313,25 +315,25 @@ static void device_init_diversity_timer(PSDevice pDevice)
}
-//
-// Initialization of MAC & BBP registers
-//
+/*
+ * initialization of MAC & BBP registers
+ */
-static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
+static int device_init_registers(struct vnt_private *pDevice,
+ DEVICE_INIT_TYPE InitType)
{
- u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
- BYTE byAntenna;
- unsigned int ii;
- CMD_CARD_INIT sInitCmd;
- int ntStatus = STATUS_SUCCESS;
- RSP_CARD_INIT sInitRsp;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BYTE byTmp;
- BYTE byCalibTXIQ = 0;
- BYTE byCalibTXDC = 0;
- BYTE byCalibRXIQ = 0;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 abySNAP_RFC1042[ETH_ALEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+ u8 abySNAP_Bridgetunnel[ETH_ALEN]
+ = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
+ u8 byAntenna;
+ int ii;
+ CMD_CARD_INIT sInitCmd;
+ int ntStatus = STATUS_SUCCESS;
+ RSP_CARD_INIT sInitRsp;
+ u8 byTmp;
+ u8 byCalibTXIQ = 0, byCalibTXDC = 0, byCalibRXIQ = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType);
spin_lock_irq(&pDevice->lock);
@@ -343,24 +345,24 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
ETH_ALEN);
if ( !FIRMWAREbCheckVersion(pDevice) ) {
- if (FIRMWAREbDownload(pDevice) == TRUE) {
- if (FIRMWAREbBrach2Sram(pDevice) == FALSE) {
+ if (FIRMWAREbDownload(pDevice) == true) {
+ if (FIRMWAREbBrach2Sram(pDevice) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n");
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n");
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
}
if ( !BBbVT3184Init(pDevice) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n");
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
}
@@ -371,7 +373,7 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit;
sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit;
- //issue Card_init command to device
+ /* issue card_init command to device */
ntStatus = CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_CARDINIT,
0,
@@ -382,7 +384,7 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if ( ntStatus != STATUS_SUCCESS ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n");
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
if (InitType == DEVICE_INIT_COLD) {
@@ -391,10 +393,10 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n");
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
- //Local ID for AES functions
+ /* local ID for AES functions */
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
MAC_REG_LOCALID,
@@ -404,15 +406,17 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if ( ntStatus != STATUS_SUCCESS ) {
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
- // Do MACbSoftwareReset in MACvInitialize
- // force CCK
- pDevice->bCCK = TRUE;
- pDevice->bProtectMode = FALSE; //Only used in 11g type, sync with ERP IE
- pDevice->bNonERPPresent = FALSE;
- pDevice->bBarkerPreambleMd = FALSE;
+ /* do MACbSoftwareReset in MACvInitialize */
+
+ /* force CCK */
+ pDevice->bCCK = true;
+ pDevice->bProtectMode = false;
+ /* only used in 11g type, sync with ERP IE */
+ pDevice->bNonERPPresent = false;
+ pDevice->bBarkerPreambleMd = false;
if ( pDevice->bFixRate ) {
pDevice->wCurrentRate = (WORD) pDevice->uConnectionRate;
} else {
@@ -426,13 +430,14 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTopOFDMBasicRate = RATE_24M;
pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byRevId = 0; //Target to IF pin while programming to RF chip.
+ pDevice->byRevId = 0;
+ /* target to IF pin while programming to RF chip */
pDevice->byCurPwr = 0xFF;
pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
- // Load power Table
- for (ii=0;ii<14;ii++) {
+ /* load power table */
+ for (ii = 0; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
if (pDevice->abyCCKPwrTbl[ii] == 0)
pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
@@ -441,8 +446,10 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
}
- //original zonetype is USA,but customize zonetype is europe,
- // then need recover 12,13 ,14 channel with 11 channel
+ /*
+ * original zonetype is USA, but custom zonetype is Europe,
+ * then need to recover 12, 13, 14 channels with 11 channel
+ */
if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
(pDevice->byOriginalZonetype == ZoneType_USA)) {
@@ -452,25 +459,24 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
}
}
- //{{ RobertYu: 20041124
- pDevice->byOFDMPwrA = 0x34; // same as RFbMA2829SelectChannel
- // Load OFDM A Power Table
- for (ii=0;ii<CB_MAX_CHANNEL_5G;ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL
+ pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
+
+ /* load OFDM A power table */
+ for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
if (pDevice->abyOFDMAPwrTbl[ii] == 0)
pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
}
- //}} RobertYu
byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
if (byAntenna & EEP_ANTINV)
- pDevice->bTxRxAntInv = TRUE;
+ pDevice->bTxRxAntInv = true;
else
- pDevice->bTxRxAntInv = FALSE;
+ pDevice->bTxRxAntInv = false;
byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
- if (byAntenna == 0) // if not set default is All
+ if (byAntenna == 0) /* if not set default is both */
byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
@@ -478,29 +484,29 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
if (pDevice->bDiversityRegCtlON)
- pDevice->bDiversityEnable = TRUE;
+ pDevice->bDiversityEnable = true;
else
- pDevice->bDiversityEnable = FALSE;
+ pDevice->bDiversityEnable = false;
} else {
- pDevice->bDiversityEnable = FALSE;
+ pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
if (byAntenna & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -512,35 +518,34 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTMax2 = 4;
pDevice->ulSQ3TH = 0;
pDevice->byTMax3 = 64;
- // -----------------------------------------------------------------
- //Get Auto Fall Back Type
+ /* get Auto Fall Back type */
pDevice->byAutoFBCtrl = AUTO_FB_0;
- // Set SCAN Time
+ /* set SCAN Time */
pDevice->uScanTime = WLAN_SCAN_MINITIME;
- // default Auto Mode
- //pDevice->NetworkType = Ndis802_11Automode;
+ /* default Auto Mode */
+ /* pDevice->NetworkType = Ndis802_11Automode; */
pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
pDevice->byBBType = BB_TYPE_11G;
- // initialize BBP registers
+ /* initialize BBP registers */
pDevice->ulTxPower = 25;
- // Get Channel range
+ /* get channel range */
pDevice->byMinChannel = 1;
pDevice->byMaxChannel = CB_MAX_CHANNEL;
- // Get RFType
+ /* get RFType */
pDevice->byRFType = sInitRsp.byRFType;
if ((pDevice->byRFType & RF_EMU) != 0) {
- // force change RevID for VT3253 emu
- pDevice->byRevId = 0x80;
+ /* force change RevID for VT3253 emu */
+ pDevice->byRevId = 0x80;
}
- // Load EEPROM calibrated vt3266 parameters
+ /* load vt3266 calibration parameters in EEPROM */
if (pDevice->byRFType == RF_VT3226D0) {
if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
(pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
@@ -548,13 +553,32 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) {
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x03); // CR255, Set BB to support TX/RX IQ and DC compensation Mode
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFB, byCalibTXIQ); // CR251, TX I/Q Imbalance Calibration
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFC, byCalibTXDC); // CR252, TX DC-Offset Calibration
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFD, byCalibRXIQ); // CR253, RX I/Q Imbalance Calibration
+ /* CR255, enable TX/RX IQ and DC compensation mode */
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xFF,
+ 0x03);
+ /* CR251, TX I/Q Imbalance Calibration */
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xFB,
+ byCalibTXIQ);
+ /* CR252, TX DC-Offset Calibration */
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xFC,
+ byCalibTXDC);
+ /* CR253, RX I/Q Imbalance Calibration */
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xFD,
+ byCalibRXIQ);
} else {
- // turn off BB Calibration compensation
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x0); // CR255
+ /* CR255, turn off BB Calibration compensation */
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xFF,
+ 0x0);
}
}
}
@@ -563,26 +587,27 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pMgmt->uIBSSChannel = pDevice->uChannel;
CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
- // get Permanent network address
+ /* get permanent network address */
memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6);
memcpy(pDevice->abyCurrentNetAddr,
pDevice->abyPermanentNetAddr,
ETH_ALEN);
- // if exist SW network address, use SW network address.
-
+ /* if exist SW network address, use it */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
pDevice->abyCurrentNetAddr);
}
- // Set BB and packet type at the same time.
- // Set Short Slot Time, xIFS, and RSPINF.
+ /*
+ * set BB and packet type at the same time
+ * set Short Slot Time, xIFS, and RSPINF
+ */
if (pDevice->byBBType == BB_TYPE_11A) {
CARDbAddBasicRate(pDevice, RATE_6M);
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
} else {
CARDbAddBasicRate(pDevice, RATE_1M);
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
}
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
@@ -594,7 +619,7 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
}
pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
- pDevice->bHWRadioOff = FALSE;
+ pDevice->bHWRadioOff = false;
if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) {
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
@@ -605,23 +630,23 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if ( ntStatus != STATUS_SUCCESS ) {
spin_unlock_irq(&pDevice->lock);
- return FALSE;
+ return false;
}
if ( (byTmp & GPIO3_DATA) == 0 ) {
- pDevice->bHWRadioOff = TRUE;
+ pDevice->bHWRadioOff = true;
MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
} else {
MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- pDevice->bHWRadioOff = FALSE;
+ pDevice->bHWRadioOff = false;
}
- } //EEP_RADIOCTL_ENABLE
+ }
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38);
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01);
- if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) {
+ if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
} else {
CARDbRadioPowerOn(pDevice);
@@ -629,14 +654,14 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
spin_unlock_irq(&pDevice->lock);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
- return TRUE;
+ return true;
}
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
{
- PSDevice device = usb_get_intfdata(intf);
+ struct vnt_private *device = usb_get_intfdata(intf);
if (!device || !device->dev)
return -ENODEV;
@@ -651,7 +676,7 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
static int vt6656_resume(struct usb_interface *intf)
{
- PSDevice device = usb_get_intfdata(intf);
+ struct vnt_private *device = usb_get_intfdata(intf);
if (!device || !device->dev)
return -ENODEV;
@@ -682,13 +707,13 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct usb_device *udev = interface_to_usbdev(intf);
int rc = 0;
struct net_device *netdev = NULL;
- PSDevice pDevice = NULL;
+ struct vnt_private *pDevice;
printk(KERN_NOTICE "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n");
udev = usb_get_dev(udev);
- netdev = alloc_etherdev(sizeof(DEVICE_INFO));
+ netdev = alloc_etherdev(sizeof(struct vnt_private));
if (!netdev) {
printk(KERN_ERR DEVICE_NAME ": allocate net device failed\n");
rc = -ENOMEM;
@@ -696,7 +721,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
pDevice = netdev_priv(netdev);
- memset(pDevice, 0, sizeof(DEVICE_INFO));
+ memset(pDevice, 0, sizeof(struct vnt_private));
pDevice->dev = netdev;
pDevice->usb = udev;
@@ -705,7 +730,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
spin_lock_init(&pDevice->lock);
pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (void *) pDevice;
+ pDevice->vnt_mgmt.pAdapter = (void *) pDevice;
netdev->netdev_ops = &device_netdev_ops;
netdev->wireless_handlers =
@@ -732,7 +757,7 @@ err_nomem:
return rc;
}
-static void device_free_tx_bufs(PSDevice pDevice)
+static void device_free_tx_bufs(struct vnt_private *pDevice)
{
PUSB_SEND_CONTEXT pTxContext;
int ii;
@@ -740,7 +765,7 @@ static void device_free_tx_bufs(PSDevice pDevice)
for (ii = 0; ii < pDevice->cbTD; ii++) {
pTxContext = pDevice->apTD[ii];
- //de-allocate URBs
+ /* deallocate URBs */
if (pTxContext->pUrb) {
usb_kill_urb(pTxContext->pUrb);
usb_free_urb(pTxContext->pUrb);
@@ -751,7 +776,7 @@ static void device_free_tx_bufs(PSDevice pDevice)
}
-static void device_free_rx_bufs(PSDevice pDevice)
+static void device_free_rx_bufs(struct vnt_private *pDevice)
{
PRCB pRCB;
int ii;
@@ -759,12 +784,12 @@ static void device_free_rx_bufs(PSDevice pDevice)
for (ii = 0; ii < pDevice->cbRD; ii++) {
pRCB = pDevice->apRCB[ii];
- //de-allocate URBs
+ /* deallocate URBs */
if (pRCB->pUrb) {
usb_kill_urb(pRCB->pUrb);
usb_free_urb(pRCB->pUrb);
}
- //de-allocate skb
+ /* deallocate skb */
if (pRCB->skb)
dev_kfree_skb(pRCB->skb);
}
@@ -773,7 +798,7 @@ static void device_free_rx_bufs(PSDevice pDevice)
return;
}
-static void usb_device_reset(PSDevice pDevice)
+static void usb_device_reset(struct vnt_private *pDevice)
{
int status;
status = usb_reset_device(pDevice->usb);
@@ -782,14 +807,15 @@ static void usb_device_reset(PSDevice pDevice)
return ;
}
-static void device_free_int_bufs(PSDevice pDevice)
+static void device_free_int_bufs(struct vnt_private *pDevice)
{
kfree(pDevice->intBuf.pDataBuf);
return;
}
-static BOOL device_alloc_bufs(PSDevice pDevice) {
+static bool device_alloc_bufs(struct vnt_private *pDevice)
+{
PUSB_SEND_CONTEXT pTxContext;
PRCB pRCB;
@@ -805,16 +831,16 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
}
pDevice->apTD[ii] = pTxContext;
pTxContext->pDevice = (void *) pDevice;
- //allocate URBs
+ /* allocate URBs */
pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
if (pTxContext->pUrb == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n");
goto free_tx;
}
- pTxContext->bBoolInUse = FALSE;
+ pTxContext->bBoolInUse = false;
}
- // allocate rcb mem
+ /* allocate RCB mem */
pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
@@ -833,7 +859,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
pDevice->apRCB[ii] = pRCB;
pRCB->pDevice = (void *) pDevice;
- //allocate URBs
+ /* allocate URBs */
pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
if (pRCB->pUrb == NULL) {
@@ -846,7 +872,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
goto free_rx_tx;
}
pRCB->skb->dev = pDevice->dev;
- pRCB->bBoolInUse = FALSE;
+ pRCB->bBoolInUse = false;
EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
pDevice->NumRecvFreeList++;
pRCB++;
@@ -874,7 +900,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
goto free_rx_tx;
}
- return TRUE;
+ return true;
free_rx_tx:
device_free_rx_bufs(pDevice);
@@ -882,15 +908,16 @@ free_rx_tx:
free_tx:
device_free_tx_bufs(pDevice);
- return FALSE;
+ return false;
}
-static BOOL device_init_defrag_cb(PSDevice pDevice) {
- int i;
- PSDeFragControlBlock pDeF;
+static bool device_init_defrag_cb(struct vnt_private *pDevice)
+{
+ int i;
+ PSDeFragControlBlock pDeF;
/* Init the fragment ctl entries */
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
@@ -903,18 +930,19 @@ static BOOL device_init_defrag_cb(PSDevice pDevice) {
}
pDevice->cbDFCB = CB_MAX_RX_FRAG;
pDevice->cbFreeDFCB = pDevice->cbDFCB;
- return TRUE;
+ return true;
free_frag:
device_free_frag_bufs(pDevice);
- return FALSE;
+ return false;
}
-static void device_free_frag_bufs(PSDevice pDevice) {
- PSDeFragControlBlock pDeF;
- int i;
+static void device_free_frag_bufs(struct vnt_private *pDevice)
+{
+ PSDeFragControlBlock pDeF;
+ int i;
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
@@ -927,36 +955,39 @@ static void device_free_frag_bufs(PSDevice pDevice) {
-BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
+int device_alloc_frag_buf(struct vnt_private *pDevice,
+ PSDeFragControlBlock pDeF)
+{
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
- return FALSE;
+ return false;
ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
- return TRUE;
+ return true;
}
/*-----------------------------------------------------------------*/
-static int device_open(struct net_device *dev) {
- PSDevice pDevice=(PSDevice) netdev_priv(dev);
+static int device_open(struct net_device *dev)
+{
+ struct vnt_private *pDevice = netdev_priv(dev);
- pDevice->fWPA_Authened = FALSE;
+ pDevice->fWPA_Authened = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_open...\n");
pDevice->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS;
- if (device_alloc_bufs(pDevice) == FALSE) {
+ if (device_alloc_bufs(pDevice) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_alloc_bufs fail... \n");
return -ENOMEM;
}
- if (device_init_defrag_cb(pDevice)== FALSE) {
+ if (device_init_defrag_cb(pDevice)== false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Initial defragment cb fail \n");
goto free_rx_tx;
}
@@ -967,25 +998,26 @@ static int device_open(struct net_device *dev) {
MP_SET_FLAG(pDevice, fMP_POST_READS);
MP_SET_FLAG(pDevice, fMP_POST_WRITES);
- //read config file
+ /* read config file */
Read_config_file(pDevice);
- if (device_init_registers(pDevice, DEVICE_INIT_COLD) == FALSE) {
+ if (device_init_registers(pDevice, DEVICE_INIT_COLD) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
goto free_all;
}
device_set_multi(pDevice->dev);
- // Init for Key Management
+ /* init for key management */
KeyvInitTable(pDevice,&pDevice->sKey);
- memcpy(pDevice->sMgmtObj.abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ memcpy(pDevice->vnt_mgmt.abyMACAddr,
+ pDevice->abyCurrentNetAddr, ETH_ALEN);
memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, ETH_ALEN);
- pDevice->bStopTx0Pkt = FALSE;
- pDevice->bStopDataPkt = FALSE;
- pDevice->bRoaming = FALSE;
- pDevice->bIsRoaming = FALSE;
- pDevice->bEnableRoaming = FALSE;
+ pDevice->bStopTx0Pkt = false;
+ pDevice->bStopDataPkt = false;
+ pDevice->bRoaming = false;
+ pDevice->bIsRoaming = false;
+ pDevice->bEnableRoaming = false;
if (pDevice->bDiversityRegCtlON) {
device_init_diversity_timer(pDevice);
}
@@ -994,27 +1026,27 @@ static int device_open(struct net_device *dev) {
tasklet_init(&pDevice->RxMngWorkItem, (void *)RXvMngWorkItem, (unsigned long)pDevice);
tasklet_init(&pDevice->ReadWorkItem, (void *)RXvWorkItem, (unsigned long)pDevice);
tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice);
- add_timer(&(pDevice->sMgmtObj.sTimerSecondCallback));
- pDevice->int_interval = 100; //Max 100 microframes.
+ add_timer(&pDevice->vnt_mgmt.sTimerSecondCallback);
+ pDevice->int_interval = 100; /* max 100 microframes */
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bIsRxWorkItemQueued = TRUE;
- pDevice->fKillEventPollingThread = FALSE;
- pDevice->bEventAvailable = FALSE;
+ pDevice->bIsRxWorkItemQueued = true;
+ pDevice->fKillEventPollingThread = false;
+ pDevice->bEventAvailable = false;
- pDevice->bWPADEVUp = FALSE;
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
- pDevice->bWPASuppWextEnabled = FALSE;
+ pDevice->bWPADEVUp = false;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
pDevice->byReAssocCount = 0;
RXvWorkItem(pDevice);
INTvWorkItem(pDevice);
- // Patch: if WEP key already set by iwconfig but device not yet open
- if ((pDevice->bEncryptionEnable == TRUE) && (pDevice->bTransmitKey == TRUE)) {
+ /* if WEP key already set by iwconfig but device not yet open */
+ if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
spin_lock_irq(&pDevice->lock);
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
@@ -1028,14 +1060,10 @@ static int device_open(struct net_device *dev) {
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
}
- if (pDevice->sMgmtObj.eConfigMode == WMAC_CONFIG_AP) {
+ if (pDevice->vnt_mgmt.eConfigMode == WMAC_CONFIG_AP)
bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
- }
- else {
- //mike:mark@2008-11-10
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- /* bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); */
- }
+ else
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
netif_stop_queue(pDevice->dev);
@@ -1061,13 +1089,13 @@ free_rx_tx:
-static int device_close(struct net_device *dev) {
- PSDevice pDevice=(PSDevice) netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- int uu;
+static int device_close(struct net_device *dev)
+{
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int uu;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close1 \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close1\n");
if (pDevice == NULL)
return -ENODEV;
@@ -1078,22 +1106,22 @@ static int device_close(struct net_device *dev) {
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pMgmt->bShareKeyAlgorithm = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
+ pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
spin_lock_irq(&pDevice->lock);
for (uu = 0; uu < MAX_KEY_TABLE; uu++)
MACvDisableKeyEntry(pDevice,uu);
spin_unlock_irq(&pDevice->lock);
- if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == FALSE) {
+ if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == false) {
MACbShutdown(pDevice);
}
netif_stop_queue(pDevice->dev);
MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES);
MP_CLEAR_FLAG(pDevice, fMP_POST_READS);
- pDevice->fKillEventPollingThread = TRUE;
+ pDevice->fKillEventPollingThread = true;
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
@@ -1108,11 +1136,11 @@ static int device_close(struct net_device *dev) {
tasklet_kill(&pDevice->ReadWorkItem);
tasklet_kill(&pDevice->EventWorkItem);
- pDevice->bRoaming = FALSE;
- pDevice->bIsRoaming = FALSE;
- pDevice->bEnableRoaming = FALSE;
- pDevice->bCmdRunning = FALSE;
- pDevice->bLinkPass = FALSE;
+ pDevice->bRoaming = false;
+ pDevice->bIsRoaming = false;
+ pDevice->bEnableRoaming = false;
+ pDevice->bCmdRunning = false;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -1136,7 +1164,7 @@ static int device_close(struct net_device *dev) {
static void vt6656_disconnect(struct usb_interface *intf)
{
- PSDevice device = usb_get_intfdata(intf);
+ struct vnt_private *device = usb_get_intfdata(intf);
if (!device)
return;
@@ -1156,7 +1184,7 @@ static void vt6656_disconnect(struct usb_interface *intf)
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
spin_lock_irq(&pDevice->lock);
@@ -1172,7 +1200,7 @@ static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
static int device_xmit(struct sk_buff *skb, struct net_device *dev)
{
- PSDevice pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct net_device_stats *stats = &pDevice->stats;
spin_lock_irq(&pDevice->lock);
@@ -1217,7 +1245,7 @@ static inline u32 ether_crc(int length, unsigned char *data)
return crc;
}
-//find out the start position of str2 from str1
+/* find out the start position of str2 from str1 */
static unsigned char *kstrstr(const unsigned char *str1,
const unsigned char *str2) {
int str1_len = strlen(str1);
@@ -1246,37 +1274,37 @@ static int Config_FileGetParameter(unsigned char *string,
strcat(buf1, "=");
source+=strlen(buf1);
-//find target string start point
+ /* find target string start point */
start_p = kstrstr(source,buf1);
if (start_p == NULL)
- return FALSE;
+ return false;
-//check if current config line is marked by "#" ??
+ /* check if current config line is marked by "#" */
for (ii = 1; ; ii++) {
if (memcmp(start_p - ii, "\n", 1) == 0)
break;
if (memcmp(start_p - ii, "#", 1) == 0)
- return FALSE;
+ return false;
}
-//find target string end point
+ /* find target string end point */
end_p = kstrstr(start_p,"\n");
- if (end_p == NULL) { //can't find "\n",but don't care
- end_p=start_p+strlen(start_p); //no include "\n"
- }
+ if (end_p == NULL) { /* can't find "\n", but don't care */
+ end_p = start_p + strlen(start_p); /* no include "\n" */
+ }
memset(buf2,0,100);
- memcpy(buf2,start_p,end_p-start_p); //get the target line
+ memcpy(buf2, start_p, end_p-start_p); /* get the target line */
buf2[end_p-start_p]='\0';
- //find value
+ /* find value */
start_p = kstrstr(buf2,"=");
if (start_p == NULL)
- return FALSE;
+ return false;
memset(buf1,0,100);
strcpy(buf1,start_p+1);
- //except space
+ /* except space */
tmp_p = buf1;
while(*tmp_p != 0x00) {
if(*tmp_p==' ')
@@ -1286,29 +1314,22 @@ static int Config_FileGetParameter(unsigned char *string,
}
memcpy(dest,tmp_p,strlen(tmp_p));
- return TRUE;
+ return true;
}
-//if read fail,return NULL,or return data pointer;
-static unsigned char *Config_FileOperation(PSDevice pDevice)
+/* if read fails, return NULL, or return data pointer */
+static unsigned char *Config_FileOperation(struct vnt_private *pDevice)
{
unsigned char *config_path = CONFIG_PATH;
unsigned char *buffer = NULL;
struct file *filp=NULL;
mm_segment_t old_fs = get_fs();
- //int oldfsuid=0,oldfsgid=0;
+
int result = 0;
set_fs (KERNEL_DS);
- /* Can't do this anymore, so we rely on correct filesystem permissions:
- //Make sure a caller can read or write power as root
- oldfsuid=current->fsuid;
- oldfsgid=current->fsgid;
- current->fsuid = 0;
- current->fsgid = 0;
- */
-
- //open file
+
+ /* open file */
filp = filp_open(config_path, O_RDWR, 0);
if (IS_ERR(filp)) {
printk("Config_FileOperation file Not exist\n");
@@ -1341,11 +1362,6 @@ error1:
error2:
set_fs (old_fs);
- /*
- current->fsuid=oldfsuid;
- current->fsgid=oldfsgid;
- */
-
if(result!=0) {
kfree(buffer);
buffer=NULL;
@@ -1353,13 +1369,14 @@ if(result!=0) {
return buffer;
}
-//return --->-1:fail; >=0:successful
-static int Read_config_file(PSDevice pDevice) {
- int result = 0;
- unsigned char tmpbuffer[100];
- unsigned char *buffer = NULL;
+/* return --->-1:fail; >=0:successful */
+static int Read_config_file(struct vnt_private *pDevice)
+{
+ int result = 0;
+ unsigned char tmpbuffer[100];
+ unsigned char *buffer = NULL;
- //init config setting
+ /* init config setting */
pDevice->config_file.ZoneType = -1;
pDevice->config_file.eAuthenMode = -1;
pDevice->config_file.eEncryptionStatus = -1;
@@ -1370,10 +1387,10 @@ static int Read_config_file(PSDevice pDevice) {
return result;
}
-//get zonetype
+/* get zonetype */
{
memset(tmpbuffer,0,sizeof(tmpbuffer));
- if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer) ==TRUE) {
+ if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer) ==true) {
if(memcmp(tmpbuffer,"USA",3)==0) {
pDevice->config_file.ZoneType=ZoneType_USA;
}
@@ -1389,15 +1406,15 @@ static int Read_config_file(PSDevice pDevice) {
}
}
-//get other parameter
+/* get other parameter */
{
memset(tmpbuffer,0,sizeof(tmpbuffer));
- if(Config_FileGetParameter("AUTHENMODE",tmpbuffer,buffer)==TRUE) {
+ if(Config_FileGetParameter("AUTHENMODE",tmpbuffer,buffer)==true) {
pDevice->config_file.eAuthenMode = (int) simple_strtol(tmpbuffer, NULL, 10);
}
memset(tmpbuffer,0,sizeof(tmpbuffer));
- if(Config_FileGetParameter("ENCRYPTIONMODE",tmpbuffer,buffer)==TRUE) {
+ if(Config_FileGetParameter("ENCRYPTIONMODE",tmpbuffer,buffer)==true) {
pDevice->config_file.eEncryptionStatus= (int) simple_strtol(tmpbuffer, NULL, 10);
}
}
@@ -1406,15 +1423,16 @@ static int Read_config_file(PSDevice pDevice) {
return result;
}
-static void device_set_multi(struct net_device *dev) {
- PSDevice pDevice = (PSDevice) netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- u32 mc_filter[2];
- int ii;
- struct netdev_hw_addr *ha;
- BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
- BYTE byTmpMode = 0;
- int rc;
+static void device_set_multi(struct net_device *dev)
+{
+ struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[2];
+ int ii;
+ u8 pbyData[8] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 byTmpMode = 0;
+ int rc;
spin_lock_irq(&pDevice->lock);
@@ -1429,9 +1447,9 @@ static void device_set_multi(struct net_device *dev) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode);
- if (dev->flags & IFF_PROMISC) { // Set promiscuous.
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
- // Unconditionally log net taps.
+ /* unconditionally log net taps */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
@@ -1460,7 +1478,10 @@ static void device_set_multi(struct net_device *dev) {
}
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- // If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac.
+ /*
+ * If AP mode, don't enable RCR_UNICAST since HW only compares
+ * addr1 with local MAC
+ */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
pDevice->byRxMode &= ~(RCR_UNICAST);
}
@@ -1472,14 +1493,14 @@ static void device_set_multi(struct net_device *dev) {
static struct net_device_stats *device_get_stats(struct net_device *dev)
{
- PSDevice pDevice=(PSDevice) netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
- return &pDevice->stats;
+ return &pDevice->stats;
}
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
+ struct vnt_private *pDevice = netdev_priv(dev);
struct iwreq *wrq = (struct iwreq *) rq;
int rc = 0;
@@ -1524,9 +1545,6 @@ static int ethtool_ioctl(struct net_device *dev, void *useraddr)
return -EOPNOTSUPP;
}
-
-/*------------------------------------------------------------------*/
-
MODULE_DEVICE_TABLE(usb, vt6656_table);
static struct usb_driver vt6656_driver = {
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index ab3a55462056..527c259f6758 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -70,12 +70,10 @@ static int msglevel = MSG_LEVEL_INFO;
*
*/
-void PSvEnablePowerSaving(void *hDeviceContext,
- WORD wListenInterval)
+void PSvEnablePowerSaving(struct vnt_private *pDevice, u16 wListenInterval)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- WORD wAID = pMgmt->wCurrAID | BIT14 | BIT15;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u16 wAID = pMgmt->wCurrAID | BIT14 | BIT15;
/* set period of power up before TBTT */
MACvWriteWord(pDevice, MAC_REG_PWBT, C_PWBT);
@@ -116,13 +114,13 @@ void PSvEnablePowerSaving(void *hDeviceContext,
pMgmt->wCountToWakeUp = 0;
}
- pDevice->bEnablePSMode = TRUE;
+ pDevice->bEnablePSMode = true;
/* We don't send null pkt in ad hoc mode since beacon will handle this. */
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
PSbSendNullPacket(pDevice);
- pDevice->bPWBitOn = TRUE;
+ pDevice->bPWBitOn = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS:Power Saving Mode Enable...\n");
}
@@ -136,10 +134,8 @@ void PSvEnablePowerSaving(void *hDeviceContext,
*
*/
-void PSvDisablePowerSaving(void *hDeviceContext)
+void PSvDisablePowerSaving(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- /* PSMgmtObject pMgmt = &(pDevice->sMgmtObj); */
/* disable power saving hw function */
CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_DISABLE_PS, 0,
@@ -150,12 +146,12 @@ void PSvDisablePowerSaving(void *hDeviceContext)
/* set always listen beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
- pDevice->bEnablePSMode = FALSE;
+ pDevice->bEnablePSMode = false;
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
PSbSendNullPacket(pDevice);
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
}
/*
@@ -164,38 +160,36 @@ void PSvDisablePowerSaving(void *hDeviceContext)
* Consider to power down when no more packets to tx or rx.
*
* Return Value:
- * TRUE, if power down success
- * FALSE, if fail
+ * true, if power down success
+ * false, if fail
*/
-BOOL PSbConsiderPowerDown(void *hDeviceContext,
- BOOL bCheckRxDMA,
- BOOL bCheckCountToWakeUp)
+int PSbConsiderPowerDown(struct vnt_private *pDevice, int bCheckRxDMA,
+ int bCheckCountToWakeUp)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BYTE byData;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 byData;
/* check if already in Doze mode */
ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG,
MAC_REG_PSCTL, &byData);
if ((byData & PSCTL_PS) != 0)
- return TRUE;
+ return true;
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
/* check if in TIM wake period */
if (pMgmt->bInTIMWake)
- return FALSE;
+ return false;
}
/* check scan state */
if (pDevice->bCmdRunning)
- return FALSE;
+ return false;
/* Tx Burst */
if (pDevice->bPSModeTxBurst)
- return FALSE;
+ return false;
/* Froce PSEN on */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_PSEN);
@@ -203,16 +197,16 @@ BOOL PSbConsiderPowerDown(void *hDeviceContext,
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
if (bCheckCountToWakeUp && (pMgmt->wCountToWakeUp == 0
|| pMgmt->wCountToWakeUp == 1)) {
- return FALSE;
+ return false;
}
}
- pDevice->bPSRxBeacon = TRUE;
+ pDevice->bPSRxBeacon = true;
/* no Tx, no Rx isr, now go to Doze */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_GO2DOZE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Go to Doze ZZZZZZZZZZZZZZZ\n");
- return TRUE;
+ return true;
}
/*
@@ -225,15 +219,17 @@ BOOL PSbConsiderPowerDown(void *hDeviceContext,
*
*/
-void PSvSendPSPOLL(void *hDeviceContext)
+void PSvSendPSPOLL(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PSTxMgmtPacket pTxPacket = NULL;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+
+ memset(pMgmt->pbyPSPacketPool, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_HDR_ADDR2_LEN);
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyPSPacketPool;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
@@ -263,24 +259,25 @@ void PSvSendPSPOLL(void *hDeviceContext)
*
*/
-BOOL PSbSendNullPacket(void *hDeviceContext)
+int PSbSendNullPacket(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u16 flags = 0;
- if (pDevice->bLinkPass == FALSE)
- return FALSE;
+ if (pDevice->bLinkPass == false)
+ return false;
- if ((pDevice->bEnablePSMode == FALSE) &&
- (pDevice->fTxDataInSleep == FALSE)) {
- return FALSE;
+ if ((pDevice->bEnablePSMode == false) &&
+ (pDevice->fTxDataInSleep == false)) {
+ return false;
}
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ memset(pMgmt->pbyPSPacketPool, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_NULLDATA_FR_MAXLEN);
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyPSPacketPool;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
flags = WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL);
@@ -303,9 +300,9 @@ BOOL PSbSendNullPacket(void *hDeviceContext)
/* log error if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
/*
@@ -318,11 +315,10 @@ BOOL PSbSendNullPacket(void *hDeviceContext)
*
*/
-BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext)
+int PSbIsNextTBTTWakeUp(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BOOL bWakeUp = FALSE;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int bWakeUp = false;
if (pMgmt->wListenInterval >= 2) {
if (pMgmt->wCountToWakeUp == 0)
@@ -333,8 +329,8 @@ BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext)
if (pMgmt->wCountToWakeUp == 1) {
/* Turn on wake up to listen next beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_LNBCN);
- pDevice->bPSRxBeacon = FALSE;
- bWakeUp = TRUE;
+ pDevice->bPSRxBeacon = false;
+ bWakeUp = true;
} else if (!pDevice->bPSRxBeacon) {
/* Listen until RxBeacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_LNBCN);
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index 41bffe528b44..879b10c0d42e 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -48,14 +48,12 @@
/* PSDevice pDevice */
/* PSDevice hDeviceContext */
-BOOL PSbConsiderPowerDown(void *hDeviceContext,
- BOOL bCheckRxDMA,
- BOOL bCheckCountToWakeUp);
-
-void PSvDisablePowerSaving(void *hDeviceContext);
-void PSvEnablePowerSaving(void *hDeviceContext, WORD wListenInterval);
-void PSvSendPSPOLL(void *hDeviceContext);
-BOOL PSbSendNullPacket(void *hDeviceContext);
-BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext);
+int PSbConsiderPowerDown(struct vnt_private *, int bCheckRxDMA,
+ int bCheckCountToWakeUp);
+void PSvDisablePowerSaving(struct vnt_private *);
+void PSvEnablePowerSaving(struct vnt_private *, u16 wListenInterval);
+void PSvSendPSPOLL(struct vnt_private *);
+int PSbSendNullPacket(struct vnt_private *);
+int PSbIsNextTBTTWakeUp(struct vnt_private *);
#endif /* __POWER_H__ */
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 74c0598e37b7..a415705297b2 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -69,7 +69,7 @@ static int msglevel =MSG_LEVEL_INFO;
-BYTE abyAL2230InitTable[CB_AL2230_INIT_SEQ][3] = {
+u8 abyAL2230InitTable[CB_AL2230_INIT_SEQ][3] = {
{0x03, 0xF7, 0x90},
{0x03, 0x33, 0x31},
{0x01, 0xB8, 0x02},
@@ -87,7 +87,7 @@ BYTE abyAL2230InitTable[CB_AL2230_INIT_SEQ][3] = {
{0x00, 0x58, 0x0F}
};
-BYTE abyAL2230ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
+u8 abyAL2230ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0xF7, 0x90}, // channel = 1, Tf = 2412MHz
{0x03, 0xF7, 0x90}, // channel = 2, Tf = 2417MHz
{0x03, 0xE7, 0x90}, // channel = 3, Tf = 2422MHz
@@ -104,7 +104,7 @@ BYTE abyAL2230ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0xE7, 0xC0} // channel = 14, Tf = 2412M
};
-BYTE abyAL2230ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
+u8 abyAL2230ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0x33, 0x31}, // channel = 1, Tf = 2412MHz
{0x0B, 0x33, 0x31}, // channel = 2, Tf = 2417MHz
{0x03, 0x33, 0x31}, // channel = 3, Tf = 2422MHz
@@ -123,7 +123,7 @@ BYTE abyAL2230ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
-BYTE abyAL7230InitTable[CB_AL7230_INIT_SEQ][3] = {
+u8 abyAL7230InitTable[CB_AL7230_INIT_SEQ][3] = {
{0x20, 0x37, 0x90}, // Channel1 // Need modify for 11a
{0x13, 0x33, 0x31}, // Channel1 // Need modify for 11a
{0x84, 0x1F, 0xF2}, // Need modify for 11a: 451FE2
@@ -146,7 +146,7 @@ BYTE abyAL7230InitTable[CB_AL7230_INIT_SEQ][3] = {
{0x1A, 0xBA, 0x8F} // Need modify for 11a: 12BACF
};
-BYTE abyAL7230InitTableAMode[CB_AL7230_INIT_SEQ][3] = {
+u8 abyAL7230InitTableAMode[CB_AL7230_INIT_SEQ][3] = {
{0x2F, 0xF5, 0x20}, // Channel184 // Need modify for 11b/g
{0x00, 0x00, 0x01}, // Channel184 // Need modify for 11b/g
{0x45, 0x1F, 0xE2}, // Need modify for 11b/g
@@ -165,7 +165,7 @@ BYTE abyAL7230InitTableAMode[CB_AL7230_INIT_SEQ][3] = {
{0x12, 0xBA, 0xCF} // Need modify for 11b/g
};
-BYTE abyAL7230ChannelTable0[CB_MAX_CHANNEL][3] = {
+u8 abyAL7230ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x20, 0x37, 0x90}, // channel = 1, Tf = 2412MHz
{0x20, 0x37, 0x90}, // channel = 2, Tf = 2417MHz
{0x20, 0x37, 0x90}, // channel = 3, Tf = 2422MHz
@@ -231,7 +231,7 @@ BYTE abyAL7230ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x2F, 0xF6, 0x10} // channel = 165, Tf = 5825MHz (56)
};
-BYTE abyAL7230ChannelTable1[CB_MAX_CHANNEL][3] = {
+u8 abyAL7230ChannelTable1[CB_MAX_CHANNEL][3] = {
{0x13, 0x33, 0x31}, // channel = 1, Tf = 2412MHz
{0x1B, 0x33, 0x31}, // channel = 2, Tf = 2417MHz
{0x03, 0x33, 0x31}, // channel = 3, Tf = 2422MHz
@@ -295,7 +295,7 @@ BYTE abyAL7230ChannelTable1[CB_MAX_CHANNEL][3] = {
{0x02, 0xAA, 0xB1} // channel = 165, Tf = 5825MHz (56)
};
-BYTE abyAL7230ChannelTable2[CB_MAX_CHANNEL][3] = {
+u8 abyAL7230ChannelTable2[CB_MAX_CHANNEL][3] = {
{0x7F, 0xD7, 0x84}, // channel = 1, Tf = 2412MHz
{0x7F, 0xD7, 0x84}, // channel = 2, Tf = 2417MHz
{0x7F, 0xD7, 0x84}, // channel = 3, Tf = 2422MHz
@@ -360,7 +360,7 @@ BYTE abyAL7230ChannelTable2[CB_MAX_CHANNEL][3] = {
};
///{{RobertYu:20051111
-BYTE abyVT3226_InitTable[CB_VT3226_INIT_SEQ][3] = {
+u8 abyVT3226_InitTable[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xFF, 0x80},
{0x02, 0x82, 0xA1},
{0x03, 0xC6, 0xA2},
@@ -374,7 +374,7 @@ BYTE abyVT3226_InitTable[CB_VT3226_INIT_SEQ][3] = {
{0x02, 0x00, 0x2A}
};
-BYTE abyVT3226D0_InitTable[CB_VT3226_INIT_SEQ][3] = {
+u8 abyVT3226D0_InitTable[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xFF, 0x80},
{0x03, 0x02, 0x21}, //RobertYu:20060327
{0x03, 0xC6, 0xA2},
@@ -389,7 +389,7 @@ BYTE abyVT3226D0_InitTable[CB_VT3226_INIT_SEQ][3] = {
};
-BYTE abyVT3226_ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
+u8 abyVT3226_ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x01, 0x97, 0x83}, // channel = 1, Tf = 2412MHz
{0x01, 0x97, 0x83}, // channel = 2, Tf = 2417MHz
{0x01, 0x97, 0x93}, // channel = 3, Tf = 2422MHz
@@ -406,7 +406,7 @@ BYTE abyVT3226_ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0x37, 0xC3} // channel = 14, Tf = 2484MHz
};
-BYTE abyVT3226_ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
+u8 abyVT3226_ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
{0x02, 0x66, 0x64}, // channel = 1, Tf = 2412MHz
{0x03, 0x66, 0x64}, // channel = 2, Tf = 2417MHz
{0x00, 0x66, 0x64}, // channel = 3, Tf = 2422MHz
@@ -426,7 +426,7 @@ BYTE abyVT3226_ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
//{{RobertYu:20060502, TWIF 1.14, LO Current for 11b mode
-DWORD dwVT3226D0LoCurrentTable[CB_MAX_CHANNEL_24G] = {
+u32 dwVT3226D0LoCurrentTable[CB_MAX_CHANNEL_24G] = {
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x0235C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -446,7 +446,7 @@ DWORD dwVT3226D0LoCurrentTable[CB_MAX_CHANNEL_24G] = {
//{{RobertYu:20060609
-BYTE abyVT3342A0_InitTable[CB_VT3342_INIT_SEQ][3] = { // 11b/g mode
+u8 abyVT3342A0_InitTable[CB_VT3342_INIT_SEQ][3] = { /* 11b/g mode */
{0x03, 0xFF, 0x80}, //update for mode//
{0x02, 0x08, 0x81},
{0x00, 0xC6, 0x02},
@@ -469,7 +469,7 @@ BYTE abyVT3342A0_InitTable[CB_VT3342_INIT_SEQ][3] = { // 11b/g mode
// channel56, 5280MHz 0x00C402 for disable Frac
// other channels 0x00C602
-BYTE abyVT3342_ChannelTable0[CB_MAX_CHANNEL][3] = {
+u8 abyVT3342_ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x02, 0x05, 0x03}, // channel = 1, Tf = 2412MHz
{0x01, 0x15, 0x03}, // channel = 2, Tf = 2417MHz
{0x03, 0xC5, 0x03}, // channel = 3, Tf = 2422MHz
@@ -535,7 +535,7 @@ BYTE abyVT3342_ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x00, 0x06, 0x03} // channel = 165, Tf = 5825MHz (56), TBD
};
-BYTE abyVT3342_ChannelTable1[CB_MAX_CHANNEL][3] = {
+u8 abyVT3342_ChannelTable1[CB_MAX_CHANNEL][3] = {
{0x01, 0x99, 0x94}, // channel = 1, Tf = 2412MHz
{0x02, 0x44, 0x44}, // channel = 2, Tf = 2417MHz
{0x02, 0xEE, 0xE4}, // channel = 3, Tf = 2422MHz
@@ -606,7 +606,7 @@ BYTE abyVT3342_ChannelTable1[CB_MAX_CHANNEL][3] = {
*
-*/
-const DWORD dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
+const u32 dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -682,7 +682,7 @@ const DWORD dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
-const BYTE RFaby11aChannelIndex[200] = {
+const u8 RFaby11aChannelIndex[200] = {
// 1 2 3 4 5 6 7 8 9 10
00, 00, 00, 00, 00, 00, 23, 24, 25, 00, // 10
26, 27, 00, 00, 00, 28, 00, 00, 00, 00, // 20
@@ -719,27 +719,23 @@ const BYTE RFaby11aChannelIndex[200] = {
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL IFRFbWriteEmbedded (PSDevice pDevice, DWORD dwData)
+int IFRFbWriteEmbedded(struct vnt_private *pDevice, u32 dwData)
{
- BYTE pbyData[4];
+ u8 pbyData[4];
- pbyData[0] = (BYTE)dwData;
- pbyData[1] = (BYTE)(dwData>>8);
- pbyData[2] = (BYTE)(dwData>>16);
- pbyData[3] = (BYTE)(dwData>>24);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE_IFRF,
- 0,
- 0,
- 4,
- pbyData
- );
+ pbyData[0] = (u8)dwData;
+ pbyData[1] = (u8)(dwData >> 8);
+ pbyData[2] = (u8)(dwData >> 16);
+ pbyData[3] = (u8)(dwData >> 24);
+
+ CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_WRITE_IFRF, 0, 0, 4, pbyData);
- return TRUE;
+ return true;
}
@@ -753,21 +749,16 @@ BOOL IFRFbWriteEmbedded (PSDevice pDevice, DWORD dwData)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbSetPower (
- PSDevice pDevice,
- unsigned int uRATE,
- unsigned int uCH
- )
+int RFbSetPower(struct vnt_private *pDevice, u32 uRATE, u32 uCH)
{
-BOOL bResult = TRUE;
-BYTE byPwr = pDevice->byCCKPwr;
+ int bResult = true;
+ u8 byPwr = pDevice->byCCKPwr;
- if (pDevice->dwDiagRefCount != 0) {
- return TRUE;
- }
+ if (pDevice->dwDiagRefCount)
+ return true;
if (uCH == 0)
return -EINVAL;
@@ -810,19 +801,16 @@ BYTE byPwr = pDevice->byCCKPwr;
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbRawSetPower (
- PSDevice pDevice,
- BYTE byPwr,
- unsigned int uRATE
- )
+
+int RFbRawSetPower(struct vnt_private *pDevice, u8 byPwr, u32 uRATE)
{
-BOOL bResult = TRUE;
+ int bResult = true;
if (pDevice->byCurPwr == byPwr)
- return TRUE;
+ return true;
pDevice->byCurPwr = byPwr;
@@ -830,7 +818,7 @@ BOOL bResult = TRUE;
case RF_AL2230 :
if (pDevice->byCurPwr >= AL2230_PWR_IDX_LEN)
- return FALSE;
+ return false;
bResult &= IFRFbWriteEmbedded(pDevice, dwAL2230PowerTable[pDevice->byCurPwr]);
if (uRATE <= RATE_11M)
bResult &= IFRFbWriteEmbedded(pDevice, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
@@ -840,7 +828,7 @@ BOOL bResult = TRUE;
case RF_AL2230S :
if (pDevice->byCurPwr >= AL2230_PWR_IDX_LEN)
- return FALSE;
+ return false;
bResult &= IFRFbWriteEmbedded(pDevice, dwAL2230PowerTable[pDevice->byCurPwr]);
if (uRATE <= RATE_11M) {
bResult &= IFRFbWriteEmbedded(pDevice, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
@@ -863,7 +851,7 @@ BOOL bResult = TRUE;
bResult &= IFRFbWriteEmbedded(pDevice, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW);
}
- if (pDevice->byCurPwr > AL7230_PWR_IDX_LEN) return FALSE;
+ if (pDevice->byCurPwr > AL7230_PWR_IDX_LEN) return false;
// 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value
dwMax7230Pwr = 0x080C0B00 | ( (pDevice->byCurPwr) << 12 ) |
@@ -879,7 +867,7 @@ BOOL bResult = TRUE;
DWORD dwVT3226Pwr;
if (pDevice->byCurPwr >= VT3226_PWR_IDX_LEN)
- return FALSE;
+ return false;
dwVT3226Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0x17 << 8 ) /* Reg7 */ |
(BY_VT3226_REG_LEN << 3 ) | IFREGCTL_REGW;
bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226Pwr);
@@ -891,7 +879,7 @@ BOOL bResult = TRUE;
DWORD dwVT3226Pwr;
if (pDevice->byCurPwr >= VT3226_PWR_IDX_LEN)
- return FALSE;
+ return false;
if (uRATE <= RATE_11M) {
@@ -900,14 +888,22 @@ BOOL bResult = TRUE;
bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226Pwr);
bResult &= IFRFbWriteEmbedded(pDevice, 0x03C6A200+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
- if (pDevice->sMgmtObj.eScanState != WMAC_NO_SCANNING) {
- // scanning, the channel number is pDevice->uScanChannel
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"@@@@ RFbRawSetPower> 11B mode uCurrChannel[%d]\n", pDevice->sMgmtObj.uScanChannel);
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226D0LoCurrentTable[pDevice->sMgmtObj.uScanChannel-1]); //RobertYu:20060420, sometimes didn't change channel just set power with different rate
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"@@@@ RFbRawSetPower> 11B mode uCurrChannel[%d]\n", pDevice->sMgmtObj.uCurrChannel);
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226D0LoCurrentTable[pDevice->sMgmtObj.uCurrChannel-1]); //RobertYu:20060420, sometimes didn't change channel just set power with different rate
- }
+ if (pDevice->vnt_mgmt.eScanState != WMAC_NO_SCANNING) {
+ /* scanning, channel number is pDevice->uScanChannel */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "RFbRawSetPower> 11B mode uCurrChannel[%d]\n",
+ pDevice->vnt_mgmt.uScanChannel);
+ bResult &= IFRFbWriteEmbedded(pDevice,
+ dwVT3226D0LoCurrentTable[pDevice->
+ vnt_mgmt.uScanChannel - 1]);
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "RFbRawSetPower> 11B mode uCurrChannel[%d]\n",
+ pDevice->vnt_mgmt.uCurrChannel);
+ bResult &= IFRFbWriteEmbedded(pDevice,
+ dwVT3226D0LoCurrentTable[pDevice->
+ vnt_mgmt.uCurrChannel - 1]);
+ }
bResult &= IFRFbWriteEmbedded(pDevice, 0x015C0800+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060420, ok now, new switching power (mini-pci can have bigger power consumption)
} else {
@@ -928,7 +924,7 @@ BOOL bResult = TRUE;
DWORD dwVT3342Pwr;
if (pDevice->byCurPwr >= VT3342_PWR_IDX_LEN)
- return FALSE;
+ return false;
dwVT3342Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0x27 << 8 ) /* Reg7 */ |
(BY_VT3342_REG_LEN << 3 ) | IFREGCTL_REGW;
@@ -957,17 +953,12 @@ BOOL bResult = TRUE;
* Return Value: none
*
-*/
-void
-RFvRSSITodBm (
- PSDevice pDevice,
- BYTE byCurrRSSI,
- long * pldBm
- )
+void RFvRSSITodBm(struct vnt_private *pDevice, u8 byCurrRSSI, long *pldBm)
{
- BYTE byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
- signed long b = (byCurrRSSI & 0x3F);
- signed long a = 0;
- BYTE abyAIROHARF[4] = {0, 18, 0, 40};
+ u8 byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
+ signed long b = (byCurrRSSI & 0x3F);
+ signed long a = 0;
+ u8 abyAIROHARF[4] = {0, 18, 0, 40};
switch (pDevice->byRFType) {
case RF_AL2230:
@@ -987,15 +978,12 @@ RFvRSSITodBm (
-void
-RFbRFTableDownload (
- PSDevice pDevice
- )
+void RFbRFTableDownload(struct vnt_private *pDevice)
{
-WORD wLength1 = 0,wLength2 = 0 ,wLength3 = 0;
-PBYTE pbyAddr1 = NULL,pbyAddr2 = NULL,pbyAddr3 = NULL;
-WORD wLength,wValue;
-BYTE abyArray[256];
+ u16 wLength1 = 0, wLength2 = 0, wLength3 = 0;
+ u8 *pbyAddr1 = NULL, *pbyAddr2 = NULL, *pbyAddr3 = NULL;
+ u16 wLength, wValue;
+ u8 abyArray[256];
switch ( pDevice->byRFType ) {
case RF_AL2230:
@@ -1134,21 +1122,19 @@ BYTE abyArray[256];
}
-// RobertYu:20060412, TWIF1.11 adjust LO Current for 11b mode
-BOOL s_bVT3226D0_11bLoCurrentAdjust(
- PSDevice pDevice,
- BYTE byChannel,
- BOOL b11bMode)
+int s_bVT3226D0_11bLoCurrentAdjust(struct vnt_private *pDevice, u8 byChannel,
+ int b11bMode)
{
- BOOL bResult;
+ int bResult = true;
- bResult = TRUE;
- if( b11bMode )
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226D0LoCurrentTable[byChannel-1]);
- else
- bResult &= IFRFbWriteEmbedded(pDevice, 0x016BC600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060412
+ if (b11bMode)
+ bResult &= IFRFbWriteEmbedded(pDevice,
+ dwVT3226D0LoCurrentTable[byChannel-1]);
+ else
+ bResult &= IFRFbWriteEmbedded(pDevice, 0x016bc600 +
+ (BY_VT3226_REG_LEN << 3) + IFREGCTL_REGW);
- return bResult;
+ return bResult;
}
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index 72eb27ac436b..9f70cf740bae 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -60,25 +60,15 @@
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
-extern const BYTE RFaby11aChannelIndex[200];
+extern const u8 RFaby11aChannelIndex[200];
/*--------------------- Export Functions --------------------------*/
-BOOL IFRFbWriteEmbedded(PSDevice pDevice, DWORD dwData);
-BOOL RFbSetPower(PSDevice pDevice, unsigned int uRATE, unsigned int uCH);
-
-BOOL RFbRawSetPower(
- PSDevice pDevice,
- BYTE byPwr,
- unsigned int uRATE
- );
-
-void RFvRSSITodBm(PSDevice pDevice, BYTE byCurrRSSI, long *pldBm);
-void RFbRFTableDownload(PSDevice pDevice);
-
-BOOL s_bVT3226D0_11bLoCurrentAdjust(
- PSDevice pDevice,
- BYTE byChannel,
- BOOL b11bMode
- );
+int IFRFbWriteEmbedded(struct vnt_private *, u32 dwData);
+int RFbSetPower(struct vnt_private *, u32 uRATE, u32 uCH);
+int RFbRawSetPower(struct vnt_private *, u8 byPwr, u32 uRATE);
+void RFvRSSITodBm(struct vnt_private *, u8 byCurrRSSI, long *pldBm);
+void RFbRFTableDownload(struct vnt_private *pDevice);
+int s_bVT3226D0_11bLoCurrentAdjust(struct vnt_private *, u8 byChannel,
+ int b11bMode);
#endif /* __RF_H__ */
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 83c04e120935..b939dcf689d6 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -106,181 +106,71 @@ const WORD wFB_Opt1[2][5] = {
/*--------------------- Static Functions --------------------------*/
-static
-void
-s_vSaveTxPktInfo(
- PSDevice pDevice,
- BYTE byPktNum,
- PBYTE pbyDestAddr,
- WORD wPktLength,
- WORD wFIFOCtl
-);
-
-static
-void *
-s_vGetFreeContext(
- PSDevice pDevice
- );
-
-
-static
-void
-s_vGenerateTxParameter(
- PSDevice pDevice,
- BYTE byPktType,
- WORD wCurrentRate,
- void *pTxBufHead,
- void *pvRrvTime,
- void *pvRTS,
- void *pvCTS,
- unsigned int cbFrameSize,
- BOOL bNeedACK,
- unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader
- );
-
-
-static unsigned int s_uFillDataHead(
- PSDevice pDevice,
- BYTE byPktType,
- WORD wCurrentRate,
- void *pTxDataHead,
- unsigned int cbFrameLength,
- unsigned int uDMAIdx,
- BOOL bNeedAck,
- unsigned int uFragIdx,
- unsigned int cbLastFragmentSize,
- unsigned int uMACfragNum,
- BYTE byFBOption
- );
-
-
-
-
-static
-void
-s_vGenerateMACHeader (
- PSDevice pDevice,
- PBYTE pbyBufferAddr,
- WORD wDuration,
- PSEthernetHeader psEthHeader,
- BOOL bNeedEncrypt,
- WORD wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
- );
-
-static
-void
-s_vFillTxKey(
- PSDevice pDevice,
- PBYTE pbyBuf,
- PBYTE pbyIVHead,
- PSKeyItem pTransmitKey,
- PBYTE pbyHdrBuf,
- WORD wPayloadLen,
- PBYTE pMICHDR
- );
-
-static
-void
-s_vSWencryption (
- PSDevice pDevice,
- PSKeyItem pTransmitKey,
- PBYTE pbyPayloadHead,
- WORD wPayloadSize
- );
-
-static unsigned int s_uGetTxRsvTime(
- PSDevice pDevice,
- BYTE byPktType,
- unsigned int cbFrameLength,
- WORD wRate,
- BOOL bNeedAck
- );
-
-
-static unsigned int s_uGetRTSCTSRsvTime(
- PSDevice pDevice,
- BYTE byRTSRsvType,
- BYTE byPktType,
- unsigned int cbFrameLength,
- WORD wCurrentRate
- );
-
-static
-void
-s_vFillCTSHead (
- PSDevice pDevice,
- unsigned int uDMAIdx,
- BYTE byPktType,
- void *pvCTS,
- unsigned int cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
- WORD wCurrentRate,
- BYTE byFBOption
- );
-
-static
-void
-s_vFillRTSHead(
- PSDevice pDevice,
- BYTE byPktType,
- void *pvRTS,
- unsigned int cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
- PSEthernetHeader psEthHeader,
- WORD wCurrentRate,
- BYTE byFBOption
- );
-
-static unsigned int s_uGetDataDuration(
- PSDevice pDevice,
- BYTE byDurType,
- unsigned int cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- unsigned int uFragIdx,
- unsigned int cbLastFragmentSize,
- unsigned int uMACfragNum,
- BYTE byFBOption
- );
-
-
-static
-unsigned int
-s_uGetRTSCTSDuration (
- PSDevice pDevice,
- BYTE byDurType,
- unsigned int cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- BYTE byFBOption
- );
+static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
+ u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl);
+
+static void *s_vGetFreeContext(struct vnt_private *pDevice);
+
+static void s_vGenerateTxParameter(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
+ void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ PSEthernetHeader psEthHeader);
+
+static u32 s_uFillDataHead(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
+ u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
+ u32 uMACfragNum, u8 byFBOption);
+
+
+static void s_vGenerateMACHeader(struct vnt_private *pDevice,
+ u8 *pbyBufferAddr, u16 wDuration, PSEthernetHeader psEthHeader,
+ int bNeedEncrypt, u16 wFragType, u32 uDMAIdx, u32 uFragIdx);
+
+static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
+ u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
+ u8 *pMICHDR);
+
+static void s_vSWencryption(struct vnt_private *pDevice,
+ PSKeyItem pTransmitKey, u8 *pbyPayloadHead, u16 wPayloadSize);
+
+static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
+ u32 cbFrameLength, u16 wRate, int bNeedAck);
+
+static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
+ u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
+
+static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+ u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
+ int bDisCRC, u16 wCurrentRate, u8 byFBOption);
+
+static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+ void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
+ PSEthernetHeader psEthHeader, u16 wCurrentRate, u8 byFBOption);
+
+static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
+ u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
+ u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
+ u8 byFBOption);
+
+static unsigned int s_uGetRTSCTSDuration(struct vnt_private *pDevice,
+ u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
+ int bNeedAck, u8 byFBOption);
/*--------------------- Export Variables --------------------------*/
-static
-void *
-s_vGetFreeContext(
- PSDevice pDevice
- )
+static void *s_vGetFreeContext(struct vnt_private *pDevice)
{
- PUSB_SEND_CONTEXT pContext = NULL;
- PUSB_SEND_CONTEXT pReturnContext = NULL;
- unsigned int ii;
+ PUSB_SEND_CONTEXT pContext = NULL;
+ PUSB_SEND_CONTEXT pReturnContext = NULL;
+ int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
for (ii = 0; ii < pDevice->cbTD; ii++) {
pContext = pDevice->apTD[ii];
- if (pContext->bBoolInUse == FALSE) {
- pContext->bBoolInUse = TRUE;
+ if (pContext->bBoolInUse == false) {
+ pContext->bBoolInUse = true;
pReturnContext = pContext;
break;
}
@@ -292,11 +182,10 @@ s_vGetFreeContext(
}
-static
-void
-s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLength, WORD wFIFOCtl)
+static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
+ u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl)
{
- PSStatCounter pStatistic=&(pDevice->scStatistic);
+ PSStatCounter pStatistic = &pDevice->scStatistic;
if (is_broadcast_ether_addr(pbyDestAddr))
pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_BROAD;
@@ -312,24 +201,15 @@ s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLe
ETH_ALEN);
}
-static
-void
-s_vFillTxKey (
- PSDevice pDevice,
- PBYTE pbyBuf,
- PBYTE pbyIVHead,
- PSKeyItem pTransmitKey,
- PBYTE pbyHdrBuf,
- WORD wPayloadLen,
- PBYTE pMICHDR
- )
+static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
+ u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf,
+ u16 wPayloadLen, u8 *pMICHDR)
{
- PDWORD pdwIV = (PDWORD) pbyIVHead;
- PDWORD pdwExtIV = (PDWORD) ((PBYTE)pbyIVHead+4);
- WORD wValue;
- PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
- DWORD dwRevIVCounter;
-
+ u32 *pdwIV = (u32 *)pbyIVHead;
+ u32 *pdwExtIV = (u32 *)((u8 *)pbyIVHead + 4);
+ u16 wValue;
+ PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
+ u32 dwRevIVCounter;
//Fill TXKEY
@@ -430,18 +310,12 @@ s_vFillTxKey (
}
-static
-void
-s_vSWencryption (
- PSDevice pDevice,
- PSKeyItem pTransmitKey,
- PBYTE pbyPayloadHead,
- WORD wPayloadSize
- )
+static void s_vSWencryption(struct vnt_private *pDevice,
+ PSKeyItem pTransmitKey, u8 *pbyPayloadHead, u16 wPayloadSize)
{
- unsigned int cbICVlen = 4;
- DWORD dwICV = 0xFFFFFFFFL;
- PDWORD pdwICV;
+ u32 cbICVlen = 4;
+ u32 dwICV = 0xffffffff;
+ u32 *pdwICV;
if (pTransmitKey == NULL)
return;
@@ -479,17 +353,10 @@ s_vSWencryption (
PK_TYPE_11GB 2
PK_TYPE_11GA 3
*/
-static
-unsigned int
-s_uGetTxRsvTime (
- PSDevice pDevice,
- BYTE byPktType,
- unsigned int cbFrameLength,
- WORD wRate,
- BOOL bNeedAck
- )
+static u32 s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
+ u32 cbFrameLength, u16 wRate, int bNeedAck)
{
- unsigned int uDataTime, uAckTime;
+ u32 uDataTime, uAckTime;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
if (byPktType == PK_TYPE_11B) {//llb,CCK mode
@@ -507,17 +374,10 @@ s_uGetTxRsvTime (
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static
-unsigned int
-s_uGetRTSCTSRsvTime (
- PSDevice pDevice,
- BYTE byRTSRsvType,
- BYTE byPktType,
- unsigned int cbFrameLength,
- WORD wCurrentRate
- )
+static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
+ u8 byRTSRsvType, u8 byPktType, u32 cbFrameLength, u16 wCurrentRate)
{
- unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
+ u32 uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
@@ -549,23 +409,13 @@ s_uGetRTSCTSRsvTime (
}
//byFreqType 0: 5GHz, 1:2.4Ghz
-static
-unsigned int
-s_uGetDataDuration (
- PSDevice pDevice,
- BYTE byDurType,
- unsigned int cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- unsigned int uFragIdx,
- unsigned int cbLastFragmentSize,
- unsigned int uMACfragNum,
- BYTE byFBOption
- )
+static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
+ u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
+ u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
+ u8 byFBOption)
{
- BOOL bLastFrag = 0;
- unsigned int uAckTime = 0, uNextPktTime = 0;
+ int bLastFrag = 0;
+ u32 uAckTime = 0, uNextPktTime = 0;
if (uFragIdx == (uMACfragNum-1)) {
bLastFrag = 1;
@@ -712,25 +562,17 @@ s_uGetDataDuration (
break;
}
- ASSERT(FALSE);
+ ASSERT(false);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static
-unsigned int
-s_uGetRTSCTSDuration (
- PSDevice pDevice,
- BYTE byDurType,
- unsigned int cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- BYTE byFBOption
- )
+static u32 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
+ u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
+ u8 byFBOption)
{
- unsigned int uCTSTime = 0, uDurTime = 0;
+ u32 uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
@@ -814,24 +656,10 @@ s_uGetRTSCTSDuration (
}
-
-
-
-static
-unsigned int
-s_uFillDataHead (
- PSDevice pDevice,
- BYTE byPktType,
- WORD wCurrentRate,
- void *pTxDataHead,
- unsigned int cbFrameLength,
- unsigned int uDMAIdx,
- BOOL bNeedAck,
- unsigned int uFragIdx,
- unsigned int cbLastFragmentSize,
- unsigned int uMACfragNum,
- BYTE byFBOption
- )
+static u32 s_uFillDataHead(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
+ u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
+ u32 uMACfragNum, u8 byFBOption)
{
if (pTxDataHead == NULL) {
@@ -959,25 +787,12 @@ s_uFillDataHead (
return 0;
}
-
-
-
-static
-void
-s_vFillRTSHead (
- PSDevice pDevice,
- BYTE byPktType,
- void *pvRTS,
- unsigned int cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
- PSEthernetHeader psEthHeader,
- WORD wCurrentRate,
- BYTE byFBOption
- )
+static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+ void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
+ PSEthernetHeader psEthHeader, u16 wCurrentRate, u8 byFBOption)
{
- unsigned int uRTSFrameLen = 20;
- WORD wLen = 0x0000;
+ u32 uRTSFrameLen = 20;
+ u16 wLen = 0;
if (pvRTS == NULL)
return;
@@ -1190,22 +1005,12 @@ s_vFillRTSHead (
}
}
-static
-void
-s_vFillCTSHead (
- PSDevice pDevice,
- unsigned int uDMAIdx,
- BYTE byPktType,
- void *pvCTS,
- unsigned int cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
- WORD wCurrentRate,
- BYTE byFBOption
- )
+static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+ u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
+ int bDisCRC, u16 wCurrentRate, u8 byFBOption)
{
- unsigned int uCTSFrameLen = 14;
- WORD wLen = 0x0000;
+ u32 uCTSFrameLen = 14;
+ u16 wLen = 0;
if (pvCTS == NULL) {
return;
@@ -1290,27 +1095,15 @@ s_vFillCTSHead (
*
-*/
-static
-void
-s_vGenerateTxParameter (
- PSDevice pDevice,
- BYTE byPktType,
- WORD wCurrentRate,
- void *pTxBufHead,
- void *pvRrvTime,
- void *pvRTS,
- void *pvCTS,
- unsigned int cbFrameSize,
- BOOL bNeedACK,
- unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader
- )
+static void s_vGenerateTxParameter(struct vnt_private *pDevice,
+ u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
+ void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ PSEthernetHeader psEthHeader)
{
- unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
- WORD wFifoCtl;
- BOOL bDisCRC = FALSE;
- BYTE byFBOption = AUTO_FB_NONE;
-// WORD wCurrentRate = pDevice->wCurrentRate;
+ u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
+ u16 wFifoCtl;
+ int bDisCRC = false;
+ u8 byFBOption = AUTO_FB_NONE;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead;
@@ -1318,7 +1111,7 @@ s_vGenerateTxParameter (
wFifoCtl = pFifoHead->wFIFOCtl;
if (wFifoCtl & FIFOCTL_CRCDIS) {
- bDisCRC = TRUE;
+ bDisCRC = true;
}
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
@@ -1407,55 +1200,40 @@ s_vGenerateTxParameter (
unsigned int cbFragmentSize,//Hdr+payoad+FCS
*/
-
-BOOL
-s_bPacketToWirelessUsb(
- PSDevice pDevice,
- BYTE byPktType,
- PBYTE usbPacketBuf,
- BOOL bNeedEncryption,
- unsigned int uSkbPacketLen,
- unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- PSKeyItem pTransmitKey,
- unsigned int uNodeIndex,
- WORD wCurrentRate,
- unsigned int *pcbHeaderLen,
- unsigned int *pcbTotalLen
- )
+static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
+ u8 *usbPacketBuf, int bNeedEncryption, u32 uSkbPacketLen, u32 uDMAIdx,
+ PSEthernetHeader psEthHeader, u8 *pPacket, PSKeyItem pTransmitKey,
+ u32 uNodeIndex, u16 wCurrentRate, u32 *pcbHeaderLen, u32 *pcbTotalLen)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int cbFrameSize, cbFrameBodySize;
- PTX_BUFFER pTxBufHead;
- unsigned int cb802_1_H_len;
- unsigned int cbIVlen = 0, cbICVlen = 0, cbMIClen = 0,
- cbMACHdLen = 0, cbFCSlen = 4;
- unsigned int cbMICHDR = 0;
- BOOL bNeedACK,bRTS;
- PBYTE pbyType,pbyMacHdr,pbyIVHead,pbyPayloadHead,pbyTxBufferAddr;
- BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
- unsigned int uDuration;
- unsigned int cbHeaderLength = 0, uPadding = 0;
- void *pvRrvTime;
- PSMICHDRHead pMICHDR;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
- BYTE byFBOption = AUTO_FB_NONE,byFragType;
- WORD wTxBufSize;
- DWORD dwMICKey0,dwMICKey1,dwMIC_Priority,dwCRC;
- PDWORD pdwMIC_L,pdwMIC_R;
- BOOL bSoftWEP = FALSE;
-
-
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 cbFrameSize, cbFrameBodySize;
+ PTX_BUFFER pTxBufHead;
+ u32 cb802_1_H_len;
+ u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbMACHdLen = 0;
+ u32 cbFCSlen = 4, cbMICHDR = 0;
+ int bNeedACK, bRTS;
+ u8 *pbyType, *pbyMacHdr, *pbyIVHead, *pbyPayloadHead, *pbyTxBufferAddr;
+ u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ u8 abySNAP_Bridgetunnel[ETH_ALEN]
+ = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
+ u32 uDuration;
+ u32 cbHeaderLength = 0, uPadding = 0;
+ void *pvRrvTime;
+ PSMICHDRHead pMICHDR;
+ void *pvRTS;
+ void *pvCTS;
+ void *pvTxDataHd;
+ u8 byFBOption = AUTO_FB_NONE, byFragType;
+ u16 wTxBufSize;
+ u32 dwMICKey0, dwMICKey1, dwMIC_Priority, dwCRC;
+ u32 *pdwMIC_L, *pdwMIC_R;
+ int bSoftWEP = false;
+
+ pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
-
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
if (bNeedEncryption && pTransmitKey->pvKeyTable) {
- if (((PSKeyTable)&pTransmitKey->pvKeyTable)->bSoftWEP == TRUE)
- bSoftWEP = TRUE; /* WEP 256 */
+ if (((PSKeyTable)pTransmitKey->pvKeyTable)->bSoftWEP == true)
+ bSoftWEP = true; /* WEP 256 */
}
pTxBufHead = (PTX_BUFFER) usbPacketBuf;
@@ -1478,23 +1256,23 @@ s_bPacketToWirelessUsb(
pTxBufHead->wFIFOCtl |= (WORD)(byPktType<<8);
if (pDevice->dwDiagRefCount != 0) {
- bNeedACK = FALSE;
+ bNeedACK = false;
pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
} else { //if (pDevice->dwDiagRefCount != 0) {
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(psEthHeader->abyDstAddr)) {
- bNeedACK = FALSE;
+ bNeedACK = false;
pTxBufHead->wFIFOCtl =
pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
} else {
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
}
else {
// MSDUs in Infra mode always need ACK
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
} //if (pDevice->dwDiagRefCount != 0) {
@@ -1518,7 +1296,7 @@ s_bPacketToWirelessUsb(
pTxBufHead->wFragCtl |= (WORD)(cbMACHdLen << 10);
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
@@ -1533,7 +1311,7 @@ s_bPacketToWirelessUsb(
}
}
- if (bSoftWEP != TRUE) {
+ if (bSoftWEP != true) {
if ((bNeedEncryption) && (pTransmitKey != NULL)) { //WEP enabled
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
@@ -1564,7 +1342,7 @@ s_bPacketToWirelessUsb(
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
}
- if (bSoftWEP == FALSE) {
+ if (bSoftWEP == false) {
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMACHdLen%4);
uPadding %= 4;
@@ -1573,10 +1351,10 @@ s_bPacketToWirelessUsb(
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
- if ( (bNeedACK == FALSE) ||(cbFrameSize < pDevice->wRTSThreshold) ) {
- bRTS = FALSE;
+ if ( (bNeedACK == false) ||(cbFrameSize < pDevice->wRTSThreshold) ) {
+ bRTS = false;
} else {
- bRTS = TRUE;
+ bRTS = true;
pTxBufHead->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
}
@@ -1584,7 +1362,7 @@ s_bPacketToWirelessUsb(
wTxBufSize = sizeof(STxBufHead);
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
@@ -1602,7 +1380,7 @@ s_bPacketToWirelessUsb(
}
} else {
// Auto Fall Back
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
@@ -1610,7 +1388,7 @@ s_bPacketToWirelessUsb(
pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB);
}
- else if (bRTS == FALSE) { //RTS_needless
+ else if (bRTS == false) { //RTS_needless
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
@@ -1622,7 +1400,7 @@ s_bPacketToWirelessUsb(
}
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
@@ -1630,7 +1408,7 @@ s_bPacketToWirelessUsb(
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab);
}
- else if (bRTS == FALSE) { //RTS_needless, no MICHDR
+ else if (bRTS == false) { //RTS_needless, no MICHDR
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
@@ -1640,7 +1418,7 @@ s_bPacketToWirelessUsb(
}
} else {
// Auto Fall Back
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
@@ -1648,7 +1426,7 @@ s_bPacketToWirelessUsb(
pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB);
}
- else if (bRTS == FALSE) { //RTS_needless
+ else if (bRTS == false) { //RTS_needless
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
@@ -1684,7 +1462,7 @@ s_bPacketToWirelessUsb(
s_vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncryption,
byFragType, uDMAIdx, 0);
- if (bNeedEncryption == TRUE) {
+ if (bNeedEncryption == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbFrameBodySize, (PBYTE)pMICHDR);
@@ -1729,14 +1507,14 @@ s_bPacketToWirelessUsb(
ASSERT(uLength == cbNdisBodySize);
- if ((bNeedEncryption == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
+ if ((bNeedEncryption == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
///////////////////////////////////////////////////////////////////
- if (pDevice->sMgmtObj.eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
- }
+ if (pDevice->vnt_mgmt.eAuthenMode == WMAC_AUTH_WPANONE) {
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
+ }
else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
@@ -1769,10 +1547,10 @@ s_bPacketToWirelessUsb(
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
- if (pDevice->bTxMICFail == TRUE) {
+ if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
- pDevice->bTxMICFail = FALSE;
+ pDevice->bTxMICFail = false;
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen);
@@ -1780,17 +1558,17 @@ s_bPacketToWirelessUsb(
}
- if (bSoftWEP == TRUE) {
+ if (bSoftWEP == true) {
s_vSWencryption(pDevice, pTransmitKey, (pbyPayloadHead), (WORD)(cbFrameBodySize + cbMIClen));
- } else if ( ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) && (bNeedEncryption == TRUE)) ||
- ((pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) && (bNeedEncryption == TRUE)) ||
- ((pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) && (bNeedEncryption == TRUE)) ) {
+ } else if ( ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) && (bNeedEncryption == true)) ||
+ ((pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) && (bNeedEncryption == true)) ||
+ ((pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) && (bNeedEncryption == true)) ) {
cbFrameSize -= cbICVlen;
}
- if (pDevice->bSoftwareGenCrcErr == TRUE) {
+ if (pDevice->bSoftwareGenCrcErr == true) {
unsigned int cbLen;
PDWORD pdwCRC;
@@ -1815,7 +1593,7 @@ s_bPacketToWirelessUsb(
pTxBufHead->wFragCtl |= (WORD)byFragType;
- return TRUE;
+ return true;
}
@@ -1839,19 +1617,11 @@ s_bPacketToWirelessUsb(
*
-*/
-void
-s_vGenerateMACHeader (
- PSDevice pDevice,
- PBYTE pbyBufferAddr,
- WORD wDuration,
- PSEthernetHeader psEthHeader,
- BOOL bNeedEncrypt,
- WORD wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
- )
+static void s_vGenerateMACHeader(struct vnt_private *pDevice,
+ u8 *pbyBufferAddr, u16 wDuration, PSEthernetHeader psEthHeader,
+ int bNeedEncrypt, u16 wFragType, u32 uDMAIdx, u32 uFragIdx)
{
- PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
+ PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
memset(pMACHeader, 0, (sizeof(S802_11Header))); //- sizeof(pMACHeader->dwIV)));
@@ -1936,43 +1706,29 @@ s_vGenerateMACHeader (
* Out:
* none
*
- * Return Value: CMD_STATUS_PENDING if MAC Tx resource available; otherwise FALSE
+ * Return Value: CMD_STATUS_PENDING if MAC Tx resource available; otherwise false
*
-*/
-CMD_STATUS csMgmt_xmit(
- PSDevice pDevice,
- PSTxMgmtPacket pPacket
- )
+CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
+ struct vnt_tx_mgmt *pPacket)
{
- BYTE byPktType;
- PBYTE pbyTxBufferAddr;
- void *pvRTS;
- PSCTS pCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- BOOL bNeedACK;
- BOOL bIsPSPOLL = FALSE;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- WORD wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- WORD wCurrentRate = RATE_1M;
- PTX_BUFFER pTX_Buffer;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PTX_BUFFER pTX_Buffer;
+ PSTxBufHead pTxBufHead;
+ PUSB_SEND_CONTEXT pContext;
+ PS802_11Header pMACHeader;
+ PSCTS pCTS;
+ SEthernetHeader sEthHeader;
+ u8 byPktType, *pbyTxBufferAddr;
+ void *pvRTS, *pvTxDataHd, *pvRrvTime, *pMICHDR;
+ u32 uDuration, cbReqCount, cbHeaderSize, cbFrameBodySize, cbFrameSize;
+ int bNeedACK, bIsPSPOLL = false;
+ u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
+ u32 uPadding = 0;
+ u16 wTxBufSize;
+ u32 cbMacHdLen;
+ u16 wCurrentRate = RATE_1M;
@@ -2028,10 +1784,10 @@ CMD_STATUS csMgmt_xmit(
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (is_multicast_ether_addr(pPacket->p80211Header->sA3.abyAddr1)) {
- bNeedACK = FALSE;
+ bNeedACK = false;
}
else {
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
@@ -2043,7 +1799,7 @@ CMD_STATUS csMgmt_xmit(
//pDevice->byPreambleType = PREAMBLE_LONG;
// probe-response don't retry
//if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
- // bNeedACK = FALSE;
+ // bNeedACK = false;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
@@ -2051,7 +1807,7 @@ CMD_STATUS csMgmt_xmit(
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = TRUE;
+ bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
@@ -2063,7 +1819,7 @@ CMD_STATUS csMgmt_xmit(
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
+ pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -2084,7 +1840,7 @@ CMD_STATUS csMgmt_xmit(
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = TRUE;
+ pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -2094,7 +1850,7 @@ CMD_STATUS csMgmt_xmit(
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
@@ -2155,12 +1911,12 @@ CMD_STATUS csMgmt_xmit(
pbyPayloadHead = (PBYTE)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
do {
if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == TRUE)) {
+ (pDevice->bLinkPass == true)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == FALSE) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == TRUE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
break;
}
@@ -2171,13 +1927,13 @@ CMD_STATUS csMgmt_xmit(
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
}
- } while(FALSE);
+ } while(false);
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
(PBYTE)pMACHeader, (WORD)cbFrameBodySize, NULL);
@@ -2231,26 +1987,22 @@ CMD_STATUS csMgmt_xmit(
}
-CMD_STATUS
-csBeacon_xmit(
- PSDevice pDevice,
- PSTxMgmtPacket pPacket
- )
+CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
+ struct vnt_tx_mgmt *pPacket)
{
-
- unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
- unsigned int cbHeaderSize = 0;
- WORD wTxBufSize = sizeof(STxShortBufHead);
- PSTxShortBufHead pTxBufHead;
- PS802_11Header pMACHeader;
- PSTxDataHead_ab pTxDataHead;
- WORD wCurrentRate;
- unsigned int cbFrameBodySize;
- unsigned int cbReqCount;
- PBEACON_BUFFER pTX_Buffer;
- PBYTE pbyTxBufferAddr;
- PUSB_SEND_CONTEXT pContext;
- CMD_STATUS status;
+ u32 cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
+ u32 cbHeaderSize = 0;
+ u16 wTxBufSize = sizeof(STxShortBufHead);
+ PSTxShortBufHead pTxBufHead;
+ PS802_11Header pMACHeader;
+ PSTxDataHead_ab pTxDataHead;
+ u16 wCurrentRate;
+ u32 cbFrameBodySize;
+ u32 cbReqCount;
+ PBEACON_BUFFER pTX_Buffer;
+ u8 *pbyTxBufferAddr;
+ PUSB_SEND_CONTEXT pContext;
+ CMD_STATUS status;
pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
@@ -2277,7 +2029,7 @@ csBeacon_xmit(
);
//Get Duration and TimeStampOff
pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, PK_TYPE_11A,
- wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
+ wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
} else {
@@ -2290,7 +2042,7 @@ csBeacon_xmit(
);
//Get Duration and TimeStampOff
pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, PK_TYPE_11B,
- wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
+ wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
}
@@ -2321,56 +2073,38 @@ csBeacon_xmit(
}
-
-
-
-void
-vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
-
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BYTE byPktType;
- PBYTE pbyTxBufferAddr;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- BOOL bNeedACK;
- BOOL bIsPSPOLL = FALSE;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- unsigned int cbMICHDR = 0;
- unsigned int uLength = 0;
- DWORD dwMICKey0, dwMICKey1;
- DWORD dwMIC_Priority;
- PDWORD pdwMIC_L;
- PDWORD pdwMIC_R;
- WORD wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- WORD wCurrentRate = RATE_1M;
- PUWLAN_80211HDR p80211Header;
- unsigned int uNodeIndex = 0;
- BOOL bNodeExist = FALSE;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- PBYTE pbyIVHead;
- PBYTE pbyPayloadHead;
- PBYTE pbyMacHdr;
- unsigned int cbExtSuppRate = 0;
- PTX_BUFFER pTX_Buffer;
- PUSB_SEND_CONTEXT pContext;
-// PWLAN_IE pItem;
+void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
+{
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 byPktType;
+ u8 *pbyTxBufferAddr;
+ void *pvRTS, *pvCTS, *pvTxDataHd;
+ u32 uDuration, cbReqCount;
+ PS802_11Header pMACHeader;
+ u32 cbHeaderSize, cbFrameBodySize;
+ int bNeedACK, bIsPSPOLL = false;
+ PSTxBufHead pTxBufHead;
+ u32 cbFrameSize;
+ u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
+ u32 uPadding = 0;
+ u32 cbMICHDR = 0, uLength = 0;
+ u32 dwMICKey0, dwMICKey1;
+ u32 dwMIC_Priority;
+ u32 *pdwMIC_L, *pdwMIC_R;
+ u16 wTxBufSize;
+ u32 cbMacHdLen;
+ SEthernetHeader sEthHeader;
+ void *pvRrvTime, *pMICHDR;
+ u32 wCurrentRate = RATE_1M;
+ PUWLAN_80211HDR p80211Header;
+ u32 uNodeIndex = 0;
+ int bNodeExist = false;
+ SKeyItem STempKey;
+ PSKeyItem pTransmitKey = NULL;
+ u8 *pbyIVHead, *pbyPayloadHead, *pbyMacHdr;
+ u32 cbExtSuppRate = 0;
+ PTX_BUFFER pTX_Buffer;
+ PUSB_SEND_CONTEXT pContext;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
@@ -2435,18 +2169,18 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (is_multicast_ether_addr(p80211Header->sA3.abyAddr1)) {
- bNeedACK = FALSE;
+ bNeedACK = false;
if (pDevice->bEnableHostWEP) {
uNodeIndex = 0;
- bNodeExist = TRUE;
+ bNodeExist = true;
}
}
else {
if (pDevice->bEnableHostWEP) {
if (BSSbIsSTAInNodeDB(pDevice, (PBYTE)(p80211Header->sA3.abyAddr1), &uNodeIndex))
- bNodeExist = TRUE;
+ bNodeExist = true;
}
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
@@ -2459,7 +2193,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
// probe-response don't retry
//if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
- // bNeedACK = FALSE;
+ // bNeedACK = false;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
@@ -2467,7 +2201,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = TRUE;
+ bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
@@ -2496,7 +2230,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
+ pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
@@ -2519,7 +2253,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = TRUE;
+ pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -2529,7 +2263,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
@@ -2646,10 +2380,10 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
- if (pDevice->bTxMICFail == TRUE) {
+ if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
- pDevice->bTxMICFail = FALSE;
+ pDevice->bTxMICFail = false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
@@ -2729,29 +2463,30 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
* Return Value: NULL
*/
-int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+int nsDMA_tx_packet(struct vnt_private *pDevice,
+ u32 uDMAIdx, struct sk_buff *skb)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int BytesToWrite = 0, uHeaderLen = 0;
- unsigned int uNodeIndex = 0;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- WORD wAID;
- BYTE byPktType;
- BOOL bNeedEncryption = FALSE;
- PSKeyItem pTransmitKey = NULL;
- SKeyItem STempKey;
- unsigned int ii;
- BOOL bTKIP_UseGTK = FALSE;
- BOOL bNeedDeAuth = FALSE;
- PBYTE pbyBSSID;
- BOOL bNodeExist = FALSE;
- PUSB_SEND_CONTEXT pContext;
- BOOL fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
- unsigned int status;
- WORD wKeepRate = pDevice->wCurrentRate;
- struct net_device_stats* pStats = &pDevice->stats;
- BOOL bTxeapol_key = FALSE;
+ struct net_device_stats *pStats = &pDevice->stats;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 BytesToWrite = 0, uHeaderLen = 0;
+ u32 uNodeIndex = 0;
+ u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ u16 wAID;
+ u8 byPktType;
+ int bNeedEncryption = false;
+ PSKeyItem pTransmitKey = NULL;
+ SKeyItem STempKey;
+ int ii;
+ int bTKIP_UseGTK = false;
+ int bNeedDeAuth = false;
+ u8 *pbyBSSID;
+ int bNodeExist = false;
+ PUSB_SEND_CONTEXT pContext;
+ bool fConvertedPacket;
+ PTX_BUFFER pTX_Buffer;
+ u32 status;
+ u16 wKeepRate = pDevice->wCurrentRate;
+ int bTxeapol_key = false;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -2763,7 +2498,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
if (is_multicast_ether_addr((PBYTE)(skb->data))) {
uNodeIndex = 0;
- bNodeExist = TRUE;
+ bNodeExist = true;
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
@@ -2808,11 +2543,11 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
}else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
- bNodeExist = TRUE;
+ bNodeExist = true;
}
}
- if (bNodeExist == FALSE) {
+ if (bNodeExist == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
return 0;
@@ -2844,22 +2579,22 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
/* 802.1x OR eapol-key challenge frame transfer */
if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
(Packet_Type == 3)) {
- bTxeapol_key = TRUE;
+ bTxeapol_key = true;
if(!(Key_info & BIT3) && //WPA or RSN group-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
if(Descriptor_type==254) {
- pDevice->fWPA_Authened = TRUE;
+ pDevice->fWPA_Authened = true;
PRINT_K("WPA ");
}
else {
- pDevice->fWPA_Authened = TRUE;
+ pDevice->fWPA_Authened = true;
PRINT_K("WPA2(re-keying) ");
}
PRINT_K("Authentication completed!!\n");
}
else if((Key_info & BIT3) && (Descriptor_type==2) && //RSN pairwise-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) {
- pDevice->fWPA_Authened = TRUE;
+ pDevice->fWPA_Authened = true;
PRINT_K("WPA2 Authentication completed!!\n");
}
}
@@ -2867,18 +2602,18 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
}
//mike add:station mode check eapol-key challenge<---
- if (pDevice->bEncryptionEnable == TRUE) {
- bNeedEncryption = TRUE;
+ if (pDevice->bEncryptionEnable == true) {
+ bNeedEncryption = true;
// get Transmit key
do {
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == FALSE) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == TRUE) {
- bTKIP_UseGTK = TRUE;
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
+ bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
break;
}
@@ -2895,12 +2630,12 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"\n");
// get pairwise key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE)
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
break;
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"IBSS and KEY is NULL. [%d]\n", pMgmt->eCurrMode);
@@ -2908,15 +2643,15 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
else
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"NOT IBSS and KEY is NULL. [%d]\n", pMgmt->eCurrMode);
} else {
- bTKIP_UseGTK = TRUE;
+ bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
}
- } while(FALSE);
+ } while(false);
}
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable == TRUE) {
+ if (pDevice->bEncryptionEnable == true) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
@@ -3015,23 +2750,23 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
byPktType = PK_TYPE_11B;
}
- if (bNeedEncryption == TRUE) {
+ if (bNeedEncryption == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
- bNeedEncryption = FALSE;
+ bNeedEncryption = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Don't Find TX KEY\n");
}
else {
- if (bTKIP_UseGTK == TRUE) {
+ if (bTKIP_UseGTK == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
pTransmitKey->dwKeyIndex);
- bNeedEncryption = TRUE;
+ bNeedEncryption = true;
}
}
}
@@ -3041,7 +2776,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
(pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
pTransmitKey->dwKeyIndex);
- bNeedEncryption = TRUE;
+ bNeedEncryption = true;
}
}
}
@@ -3049,7 +2784,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"return no tx key\n");
- pContext->bBoolInUse = FALSE;
+ pContext->bBoolInUse = false;
dev_kfree_skb_irq(skb);
pStats->tx_dropped++;
return STATUS_FAILURE;
@@ -3065,18 +2800,18 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
&uHeaderLen, &BytesToWrite
);
- if (fConvertedPacket == FALSE) {
- pContext->bBoolInUse = FALSE;
+ if (fConvertedPacket == false) {
+ pContext->bBoolInUse = false;
dev_kfree_skb_irq(skb);
return STATUS_FAILURE;
}
- if ( pDevice->bEnablePSMode == TRUE ) {
+ if ( pDevice->bEnablePSMode == true ) {
if ( !pDevice->bPSModeTxBurst ) {
bScheduleCommand((void *) pDevice,
WLAN_CMD_MAC_DISPOWERSAVING,
NULL);
- pDevice->bPSModeTxBurst = TRUE;
+ pDevice->bPSModeTxBurst = true;
}
}
@@ -3092,14 +2827,14 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
status = PIPEnsSendBulkOut(pDevice,pContext);
- if (bNeedDeAuth == TRUE) {
+ if (bNeedDeAuth == true) {
WORD wReason = WLAN_MGMT_REASON_MIC_FAILURE;
bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (PBYTE) &wReason);
}
if(status!=STATUS_PENDING) {
- pContext->bBoolInUse = FALSE;
+ pContext->bBoolInUse = false;
dev_kfree_skb_irq(skb);
return STATUS_FAILURE;
}
@@ -3120,49 +2855,43 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
* pPacket - Pointer to rx packet
* cbPacketSize - rx ethernet frame size
* Out:
- * TURE, FALSE
+ * TURE, false
*
- * Return Value: Return TRUE if packet is copy to dma1; otherwise FALSE
+ * Return Value: Return true if packet is copy to dma1; otherwise false
*/
-
-BOOL
-bRelayPacketSend (
- PSDevice pDevice,
- PBYTE pbySkbData,
- unsigned int uDataLen,
- unsigned int uNodeIndex
- )
+int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
+ u32 uNodeIndex)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int BytesToWrite = 0, uHeaderLen = 0;
- BYTE byPktType = PK_TYPE_11B;
- BOOL bNeedEncryption = FALSE;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- PBYTE pbyBSSID;
- PUSB_SEND_CONTEXT pContext;
- BYTE byPktTyp;
- BOOL fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
- unsigned int status;
- WORD wKeepRate = pDevice->wCurrentRate;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u32 BytesToWrite = 0, uHeaderLen = 0;
+ u8 byPktType = PK_TYPE_11B;
+ int bNeedEncryption = false;
+ SKeyItem STempKey;
+ PSKeyItem pTransmitKey = NULL;
+ u8 *pbyBSSID;
+ PUSB_SEND_CONTEXT pContext;
+ u8 byPktTyp;
+ int fConvertedPacket;
+ PTX_BUFFER pTX_Buffer;
+ u32 status;
+ u16 wKeepRate = pDevice->wCurrentRate;
pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
- return FALSE;
+ return false;
}
memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, ETH_HLEN);
- if (pDevice->bEncryptionEnable == TRUE) {
- bNeedEncryption = TRUE;
+ if (pDevice->bEncryptionEnable == true) {
+ bNeedEncryption = true;
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"KEY is NULL. [%d]\n", pMgmt->eCurrMode);
} else {
@@ -3186,8 +2915,8 @@ bRelayPacketSend (
}
if ( bNeedEncryption && (pTransmitKey == NULL) ) {
- pContext->bBoolInUse = FALSE;
- return FALSE;
+ pContext->bBoolInUse = false;
+ return false;
}
byPktTyp = (BYTE)pDevice->byPacketType;
@@ -3235,9 +2964,9 @@ bRelayPacketSend (
&uHeaderLen, &BytesToWrite
);
- if (fConvertedPacket == FALSE) {
- pContext->bBoolInUse = FALSE;
- return FALSE;
+ if (fConvertedPacket == false) {
+ pContext->bBoolInUse = false;
+ return false;
}
pTX_Buffer = (PTX_BUFFER)&(pContext->Data[0]);
@@ -3252,6 +2981,6 @@ bRelayPacketSend (
status = PIPEnsSendBulkOut(pDevice,pContext);
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index dd2198acc636..9f537022cdd1 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -665,30 +665,11 @@ typedef struct tagSBEACON_BUFFER
/*--------------------- Export Functions --------------------------*/
-BOOL
-bPacketToWirelessUsb(
- PSDevice pDevice,
- BYTE byPktType,
- PBYTE usbPacketBuf,
- BOOL bNeedEncrypt,
- unsigned int cbPayloadSize,
- unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- PSKeyItem pTransmitKey,
- unsigned int uNodeIndex,
- WORD wCurrentRate,
- unsigned int *pcbHeaderLen,
- unsigned int *pcbTotalLen
- );
-
-void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb);
-int nsDMA_tx_packet(PSDevice pDevice,
- unsigned int uDMAIdx,
- struct sk_buff *skb);
-CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
-CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
-BOOL bRelayPacketSend(PSDevice pDevice, PBYTE pbySkbData,
- unsigned int uDataLen, unsigned int uNodeIndex);
+void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb);
+int nsDMA_tx_packet(struct vnt_private *, u32 uDMAIdx, struct sk_buff *skb);
+CMD_STATUS csMgmt_xmit(struct vnt_private *, struct vnt_tx_mgmt *);
+CMD_STATUS csBeacon_xmit(struct vnt_private *, struct vnt_tx_mgmt *);
+int bRelayPacketSend(struct vnt_private *, u8 *pbySkbData, u32 uDataLen,
+ u32 uNodeIndex);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/tether.c b/drivers/staging/vt6656/tether.c
index 083b2153a271..95286c4d5572 100644
--- a/drivers/staging/vt6656/tether.c
+++ b/drivers/staging/vt6656/tether.c
@@ -93,16 +93,16 @@ BYTE ETHbyGetHashIndexByCrc32(PBYTE pbyMultiAddr)
* Out:
* none
*
- * Return Value: TRUE if ok; FALSE if error.
+ * Return Value: true if ok; false if error.
*
*/
-BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength)
+bool ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength)
{
DWORD dwCRC;
dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
if (cpu_to_le32(*((PDWORD)(pbyBuffer + cbFrameLength - 4))) != dwCRC)
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
diff --git a/drivers/staging/vt6656/tether.h b/drivers/staging/vt6656/tether.h
index 8c1f5d253f88..2f8f4853fd9d 100644
--- a/drivers/staging/vt6656/tether.h
+++ b/drivers/staging/vt6656/tether.h
@@ -161,6 +161,6 @@ S802_11Header, *PS802_11Header;
BYTE ETHbyGetHashIndexByCrc32(PBYTE pbyMultiAddr);
//BYTE ETHbyGetHashIndexByCrc(PBYTE pbyMultiAddr);
-BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength);
+bool ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength);
#endif /* __TETHER_H__ */
diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
index dfbf74713a80..d7b648945316 100644
--- a/drivers/staging/vt6656/ttype.h
+++ b/drivers/staging/vt6656/ttype.h
@@ -33,33 +33,12 @@
/******* Common definitions and typedefs ***********************************/
-typedef int BOOL;
-
-#if !defined(TRUE)
-#define TRUE 1
-#endif
-#if !defined(FALSE)
-#define FALSE 0
-#endif
-
/****** Simple typedefs ***************************************************/
typedef u8 BYTE;
typedef u16 WORD;
typedef u32 DWORD;
-// QWORD is for those situation that we want
-// an 8-byte-aligned 8 byte long structure
-// which is NOT really a floating point number.
-typedef union tagUQuadWord {
- struct {
- u32 dwLowDword;
- u32 dwHighDword;
- } u;
- double DoNotUseThisField;
-} UQuadWord;
-typedef UQuadWord QWORD; // 64-bit
-
/****** Common pointer types ***********************************************/
typedef u32 ULONG_PTR;
@@ -73,6 +52,4 @@ typedef WORD * PWORD;
typedef DWORD * PDWORD;
-typedef QWORD * PQWORD;
-
#endif /* __TTYPE_H__ */
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index fc68518526e0..00fd0f8a58c2 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -67,51 +67,18 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
-static
-void
-s_nsInterruptUsbIoCompleteRead(
- struct urb *urb
- );
-
-
-static
-void
-s_nsBulkInUsbIoCompleteRead(
- struct urb *urb
- );
-
-
-static
-void
-s_nsBulkOutIoCompleteWrite(
- struct urb *urb
- );
-
-
-static
-void
-s_nsControlInUsbIoCompleteRead(
- struct urb *urb
- );
-
-static
-void
-s_nsControlInUsbIoCompleteWrite(
- struct urb *urb
- );
+static void s_nsInterruptUsbIoCompleteRead(struct urb *urb);
+static void s_nsBulkInUsbIoCompleteRead(struct urb *urb);
+static void s_nsBulkOutIoCompleteWrite(struct urb *urb);
+static void s_nsControlInUsbIoCompleteRead(struct urb *urb);
+static void s_nsControlInUsbIoCompleteWrite(struct urb *urb);
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-int PIPEnsControlOutAsyn(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- )
+int PIPEnsControlOutAsyn(struct vnt_private *pDevice, u8 byRequest,
+ u16 wValue, u16 wIndex, u16 wLength, u8 *pbyBuffer)
{
int ntStatus;
@@ -147,17 +114,11 @@ int PIPEnsControlOutAsyn(
return ntStatus;
}
-int PIPEnsControlOut(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- )
+int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
+ u16 wIndex, u16 wLength, u8 *pbyBuffer)
{
int ntStatus = 0;
- int ii;
+ int ii;
if (pDevice->Flags & fMP_DISCONNECTED)
return STATUS_FAILURE;
@@ -165,6 +126,11 @@ int PIPEnsControlOut(
if (pDevice->Flags & fMP_CONTROL_WRITES)
return STATUS_FAILURE;
+ if (pDevice->Flags & fMP_CONTROL_READS)
+ return STATUS_FAILURE;
+
+ MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
+
pDevice->sUsbCtlRequest.bRequestType = 0x40;
pDevice->sUsbCtlRequest.bRequest = byRequest;
pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
@@ -179,12 +145,13 @@ int PIPEnsControlOut(
ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "control send request submission failed: %d\n",
+ ntStatus);
+ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
return STATUS_FAILURE;
}
- else {
- MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
- }
+
spin_unlock_irq(&pDevice->lock);
for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
@@ -206,17 +173,11 @@ int PIPEnsControlOut(
return STATUS_SUCCESS;
}
-int PIPEnsControlIn(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- )
+int PIPEnsControlIn(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
+ u16 wIndex, u16 wLength, u8 *pbyBuffer)
{
int ntStatus = 0;
- int ii;
+ int ii;
if (pDevice->Flags & fMP_DISCONNECTED)
return STATUS_FAILURE;
@@ -224,6 +185,11 @@ int PIPEnsControlIn(
if (pDevice->Flags & fMP_CONTROL_READS)
return STATUS_FAILURE;
+ if (pDevice->Flags & fMP_CONTROL_WRITES)
+ return STATUS_FAILURE;
+
+ MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
+
pDevice->sUsbCtlRequest.bRequestType = 0xC0;
pDevice->sUsbCtlRequest.bRequest = byRequest;
pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
@@ -237,10 +203,11 @@ int PIPEnsControlIn(
ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
- }else {
- MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "control request submission failed: %d\n", ntStatus);
+ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
+ return STATUS_FAILURE;
+ }
spin_unlock_irq(&pDevice->lock);
for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
@@ -263,13 +230,9 @@ int PIPEnsControlIn(
return ntStatus;
}
-static
-void
-s_nsControlInUsbIoCompleteWrite(
- struct urb *urb
- )
+static void s_nsControlInUsbIoCompleteWrite(struct urb *urb)
{
- PSDevice pDevice;
+ struct vnt_private *pDevice = (struct vnt_private *)urb->context;
pDevice = urb->context;
switch (urb->status) {
@@ -304,15 +267,11 @@ s_nsControlInUsbIoCompleteWrite(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-static
-void
-s_nsControlInUsbIoCompleteRead(
- struct urb *urb
- )
+
+static void s_nsControlInUsbIoCompleteRead(struct urb *urb)
{
- PSDevice pDevice;
+ struct vnt_private *pDevice = (struct vnt_private *)urb->context;
- pDevice = urb->context;
switch (urb->status) {
case 0:
break;
@@ -345,17 +304,18 @@ s_nsControlInUsbIoCompleteRead(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-int PIPEnsInterruptRead(PSDevice pDevice)
+
+int PIPEnsInterruptRead(struct vnt_private *pDevice)
{
- int ntStatus = STATUS_FAILURE;
+ int ntStatus = STATUS_FAILURE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n");
- if(pDevice->intBuf.bInUse == TRUE){
+ if(pDevice->intBuf.bInUse == true){
return (STATUS_FAILURE);
}
- pDevice->intBuf.bInUse = TRUE;
-// pDevice->bEventAvailable = FALSE;
+ pDevice->intBuf.bInUse = true;
+// pDevice->bEventAvailable = false;
pDevice->ulIntInPosted++;
//
@@ -396,21 +356,16 @@ usb_fill_bulk_urb(pDevice->pInterruptURB,
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-static
-void
-s_nsInterruptUsbIoCompleteRead(
- struct urb *urb
- )
+static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
{
- PSDevice pDevice;
- int ntStatus;
+ struct vnt_private *pDevice = (struct vnt_private *)urb->context;
+ int ntStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n");
//
// The context given to IoSetCompletionRoutine is the receive buffer object
//
- pDevice = (PSDevice)urb->context;
//
// We have a number of cases:
@@ -428,7 +383,7 @@ s_nsInterruptUsbIoCompleteRead(
// otherwise interrupt data handler will free int buffer after it handle it.
if (( ntStatus != STATUS_SUCCESS )) {
pDevice->ulBulkInError++;
- pDevice->intBuf.bInUse = FALSE;
+ pDevice->intBuf.bInUse = false;
// if (ntStatus == USBD_STATUS_CRC) {
// pDevice->ulIntInContCRCError++;
@@ -436,20 +391,20 @@ s_nsInterruptUsbIoCompleteRead(
// if (ntStatus == STATUS_NOT_CONNECTED )
// {
- pDevice->fKillEventPollingThread = TRUE;
+ pDevice->fKillEventPollingThread = true;
// }
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus );
} else {
pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length;
pDevice->ulIntInContCRCError = 0;
- pDevice->bEventAvailable = TRUE;
+ pDevice->bEventAvailable = true;
INTnsProcessData(pDevice);
}
STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus);
- if (pDevice->fKillEventPollingThread != TRUE) {
+ if (pDevice->fKillEventPollingThread != true) {
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvbulkpipe(pDevice->usb, 1),
@@ -483,10 +438,11 @@ s_nsInterruptUsbIoCompleteRead(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB)
+
+int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, PRCB pRCB)
{
int ntStatus = 0;
- struct urb *pUrb;
+ struct urb *pUrb;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n");
@@ -521,7 +477,7 @@ int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB)
return STATUS_FAILURE ;
}
pRCB->Ref = 1;
- pRCB->bBoolInUse= TRUE;
+ pRCB->bBoolInUse= true;
return ntStatus;
}
@@ -543,19 +499,15 @@ int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB)
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-static
-void
-s_nsBulkInUsbIoCompleteRead(
- struct urb *urb
- )
+static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
{
- PRCB pRCB = (PRCB)urb->context;
- PSDevice pDevice = (PSDevice)pRCB->pDevice;
- unsigned long bytesRead;
- BOOL bIndicateReceive = FALSE;
- BOOL bReAllocSkb = FALSE;
- int status;
+ PRCB pRCB = (PRCB)urb->context;
+ struct vnt_private *pDevice = pRCB->pDevice;
+ unsigned long bytesRead;
+ int bIndicateReceive = false;
+ int bReAllocSkb = false;
+ int status;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
status = urb->status;
@@ -576,7 +528,7 @@ s_nsBulkInUsbIoCompleteRead(
// }
} else {
if (bytesRead)
- bIndicateReceive = TRUE;
+ bIndicateReceive = true;
pDevice->ulBulkInContCRCError = 0;
pDevice->ulBulkInBytesRead += bytesRead;
@@ -588,8 +540,8 @@ s_nsBulkInUsbIoCompleteRead(
if (bIndicateReceive) {
spin_lock(&pDevice->lock);
- if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == TRUE)
- bReAllocSkb = TRUE;
+ if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == true)
+ bReAllocSkb = true;
spin_unlock(&pDevice->lock);
}
pRCB->Ref--;
@@ -618,18 +570,15 @@ s_nsBulkInUsbIoCompleteRead(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-int
-PIPEnsSendBulkOut(
- PSDevice pDevice,
- PUSB_SEND_CONTEXT pContext
- )
+
+int PIPEnsSendBulkOut(struct vnt_private *pDevice, PUSB_SEND_CONTEXT pContext)
{
- int status;
- struct urb *pUrb;
+ int status;
+ struct urb *pUrb;
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
/*
if (pDevice->pPendingBulkOutContext != NULL) {
@@ -661,13 +610,13 @@ PIPEnsSendBulkOut(
if (status != 0)
{
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status);
- pContext->bBoolInUse = FALSE;
+ pContext->bBoolInUse = false;
return STATUS_FAILURE;
}
return STATUS_PENDING;
}
else {
- pContext->bBoolInUse = FALSE;
+ pContext->bBoolInUse = false;
return STATUS_RESOURCES;
}
}
@@ -699,17 +648,14 @@ PIPEnsSendBulkOut(
* (IofCompleteRequest) to stop working on the irp.
*
*/
-static
-void
-s_nsBulkOutIoCompleteWrite(
- struct urb *urb
- )
+
+static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
{
- PSDevice pDevice;
- int status;
- CONTEXT_TYPE ContextType;
- unsigned long ulBufLen;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_private *pDevice;
+ int status;
+ CONTEXT_TYPE ContextType;
+ unsigned long ulBufLen;
+ PUSB_SEND_CONTEXT pContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
@@ -769,11 +715,11 @@ s_nsBulkOutIoCompleteWrite(
}
}
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
if (netif_queue_stopped(pDevice->dev))
netif_wake_queue(pDevice->dev);
}
- pContext->bBoolInUse = FALSE;
+ pContext->bBoolInUse = false;
return;
}
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index b3673474a9e1..b3023559c15b 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -41,35 +41,15 @@
/*--------------------- Export Functions --------------------------*/
-int PIPEnsControlOut(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- );
-
-int PIPEnsControlOutAsyn(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- );
-
-int PIPEnsControlIn(
- PSDevice pDevice,
- BYTE byRequest,
- WORD wValue,
- WORD wIndex,
- WORD wLength,
- PBYTE pbyBuffer
- );
-
-int PIPEnsInterruptRead(PSDevice pDevice);
-int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB);
-int PIPEnsSendBulkOut(PSDevice pDevice, PUSB_SEND_CONTEXT pContext);
+int PIPEnsControlOut(struct vnt_private *, u8 byRequest, u16 wValue,
+ u16 wIndex, u16 wLength, u8 *pbyBuffer);
+int PIPEnsControlOutAsyn(struct vnt_private *, u8 byRequest,
+ u16 wValue, u16 wIndex, u16 wLength, u8 *pbyBuffer);
+int PIPEnsControlIn(struct vnt_private *, u8 byRequest, u16 wValue,
+ u16 wIndex, u16 wLength, u8 *pbyBuffer);
+
+int PIPEnsInterruptRead(struct vnt_private *);
+int PIPEnsBulkInUsbRead(struct vnt_private *, PRCB pRCB);
+int PIPEnsSendBulkOut(struct vnt_private *, PUSB_SEND_CONTEXT pContext);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 22f6b41cfd19..4bb652bf7cf6 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -68,33 +68,17 @@ static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
/*--------------------- Static Functions --------------------------*/
-static
-void
-s_vProbeChannel(
- PSDevice pDevice
- );
+static void s_vProbeChannel(struct vnt_private *);
+static struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *,
+ struct vnt_manager *pMgmt, u8 *pScanBSSID, PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-static
-PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
+static int s_bCommandComplete(struct vnt_private *);
-static
-BOOL
-s_bCommandComplete (
- PSDevice pDevice
- );
-
-static BOOL s_bClearBSSID_SCAN(void *hDeviceContext);
+static int s_bClearBSSID_SCAN(struct vnt_private *);
/*--------------------- Export Variables --------------------------*/
@@ -114,13 +98,10 @@ static BOOL s_bClearBSSID_SCAN(void *hDeviceContext);
*
*/
-static
-void
-vAdHocBeaconStop(PSDevice pDevice)
+static void vAdHocBeaconStop(struct vnt_private *pDevice)
{
-
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BOOL bStop;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int bStop;
/*
* temporarily stop Beacon packet for AdHoc Server
@@ -133,18 +114,18 @@ vAdHocBeaconStop(PSDevice pDevice)
* or
* (3.2) AdHoc channel is in A mode
*/
- bStop = FALSE;
+ bStop = false;
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
(pMgmt->eCurrState >= WMAC_STATE_STARTED))
{
if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
(pMgmt->uScanChannel > CB_MAX_CHANNEL_24G))
{
- bStop = TRUE;
+ bStop = true;
}
if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
{
- bStop = TRUE;
+ bStop = true;
}
}
@@ -171,11 +152,9 @@ vAdHocBeaconStop(PSDevice pDevice)
* Return Value: none
*
*/
-static
-void
-vAdHocBeaconRestart(PSDevice pDevice)
+static void vAdHocBeaconRestart(struct vnt_private *pDevice)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
/*
* Restart Beacon packet for AdHoc Server
@@ -204,22 +183,22 @@ vAdHocBeaconRestart(PSDevice pDevice)
*
-*/
-static
-void
-s_vProbeChannel(
- PSDevice pDevice
- )
+static void s_vProbeChannel(struct vnt_private *pDevice)
{
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- BYTE abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- BYTE abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- BYTE abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- BYTE abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- PBYTE pbyRate;
- PSTxMgmtPacket pTxPacket;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_mgmt *pTxPacket;
+ u8 abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES,
+ 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
+ /* 1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M*/
+ u8 abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES,
+ 4, 0x0C, 0x12, 0x18, 0x60};
+ /* 6M, 9M, 12M, 48M*/
+ u8 abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES,
+ 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ u8 abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES,
+ 4, 0x02, 0x04, 0x0B, 0x16};
+ u8 *pbyRate;
+ int ii;
if (pDevice->byBBType == BB_TYPE_11A) {
@@ -268,24 +247,19 @@ s_vProbeChannel(
-*/
-PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-
- )
+struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u8 *pScanBSSID, PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBEREQ sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_PROBEREQ sFrame;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBEREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_PROBEREQ_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
vMgrEncodeProbeRequest(&sFrame);
@@ -316,9 +290,8 @@ s_MgrMakeProbeRequest(
return pTxPacket;
}
-void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
+void vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
init_timer(&pDevice->sTimerCommand);
@@ -331,23 +304,22 @@ void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
return;
}
-void vRunCommand(void *hDeviceContext)
+void vRunCommand(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- PWLAN_IE_SSID pItemSSIDCurr;
- CMD_STATUS Status;
- unsigned int ii;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
- BYTE byData;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PWLAN_IE_SSID pItemSSID;
+ PWLAN_IE_SSID pItemSSIDCurr;
+ CMD_STATUS Status;
+ struct sk_buff *skb;
union iwreq_data wrqu;
+ int ii;
+ u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ u8 byData;
if (pDevice->dwDiagRefCount != 0)
return;
- if (pDevice->bCmdRunning != TRUE)
+ if (pDevice->bCmdRunning != true)
return;
spin_lock_irq(&pDevice->lock);
@@ -357,7 +329,7 @@ void vRunCommand(void *hDeviceContext)
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == TRUE) {
+ if (pDevice->bRadioOff == true) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -385,7 +357,7 @@ void vRunCommand(void *hDeviceContext)
if (pDevice->bUpdateBBVGA) {
BBvSetShortSlotTime(pDevice);
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- BBvUpdatePreEDThreshold(pDevice, FALSE);
+ BBvUpdatePreEDThreshold(pDevice, false);
}
// Set channel back
vAdHocBeaconRestart(pDevice);
@@ -397,7 +369,7 @@ void vRunCommand(void *hDeviceContext)
pDevice->byRxMode |= RCR_BSSID;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopDataPkt = false;
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -423,7 +395,7 @@ void vRunCommand(void *hDeviceContext)
pDevice->bLinkPass); */
pMgmt->eScanState = WMAC_IS_SCANNING;
pDevice->byScanBBType = pDevice->byBBType; //lucas
- pDevice->bStopDataPkt = TRUE;
+ pDevice->bStopDataPkt = true;
// Turn off RCR_BSSID filter every time
MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
pDevice->byRxMode &= ~RCR_BSSID;
@@ -447,7 +419,7 @@ void vRunCommand(void *hDeviceContext)
if (pDevice->bUpdateBBVGA) {
BBvSetShortSlotTime(pDevice);
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
- BBvUpdatePreEDThreshold(pDevice, TRUE);
+ BBvUpdatePreEDThreshold(pDevice, true);
}
pMgmt->uScanChannel++;
@@ -461,7 +433,7 @@ void vRunCommand(void *hDeviceContext)
pDevice->eCommandState = WLAN_CMD_SCAN_END;
}
- if ((pMgmt->b11hEnable == FALSE) ||
+ if ((pMgmt->b11hEnable == false) ||
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
@@ -488,7 +460,7 @@ void vRunCommand(void *hDeviceContext)
if (pDevice->bUpdateBBVGA) {
BBvSetShortSlotTime(pDevice);
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- BBvUpdatePreEDThreshold(pDevice, FALSE);
+ BBvUpdatePreEDThreshold(pDevice, false);
}
// Set channel back
@@ -502,7 +474,7 @@ void vRunCommand(void *hDeviceContext)
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
pMgmt->eScanState = WMAC_NO_SCANNING;
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopDataPkt = false;
/*send scan event to wpa_Supplicant*/
PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
@@ -521,12 +493,12 @@ void vRunCommand(void *hDeviceContext)
return;
} else {
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
- pDevice->bWPASuppWextEnabled = FALSE;
- pDevice->fWPA_Authened = FALSE;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
+ pDevice->fWPA_Authened = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
// reason = 8 : disassoc because sta has left
@@ -535,18 +507,18 @@ void vRunCommand(void *hDeviceContext)
pMgmt->abyCurrBSSID,
(8),
&Status);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
// unlock command busy
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
pItemSSID->len = 0;
memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = FALSE;
-// pDevice->bBeaconBufReady = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+// pDevice->bBeaconBufReady = false;
}
netif_stop_queue(pDevice->dev);
- if (pDevice->bNeedRadioOFF == TRUE)
+ if (pDevice->bNeedRadioOFF == true)
CARDbRadioPowerOff(pDevice);
s_bCommandComplete(pDevice);
break;
@@ -555,7 +527,7 @@ void vRunCommand(void *hDeviceContext)
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == TRUE) {
+ if (pDevice->bRadioOff == true) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -588,7 +560,7 @@ void vRunCommand(void *hDeviceContext)
}
}
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
}
// set initial state
@@ -625,9 +597,9 @@ void vRunCommand(void *hDeviceContext)
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
}
else {
@@ -658,7 +630,7 @@ void vRunCommand(void *hDeviceContext)
BSSvAddMulticastNode(pDevice);
s_bClearBSSID_SCAN(pDevice);
/*
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
@@ -668,7 +640,7 @@ void vRunCommand(void *hDeviceContext)
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -726,7 +698,7 @@ void vRunCommand(void *hDeviceContext)
*/
pDevice->byLinkWaitCount = 0;
pDevice->byReAssocCount = 0;
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
s_bClearBSSID_SCAN(pDevice);
@@ -734,20 +706,20 @@ void vRunCommand(void *hDeviceContext)
netif_wake_queue(pDevice->dev);
}
- if(pDevice->IsTxDataTrigger != FALSE) { //TxDataTimer is not triggered at the first time
+ if(pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time
// printk("Re-initial TxDataTimer****\n");
del_timer(&pDevice->sTimerTxData);
init_timer(&pDevice->sTimerTxData);
pDevice->sTimerTxData.data = (unsigned long) pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = FALSE;
+ pDevice->fTxDataInSleep = false;
pDevice->nTxDataTimeCout = 0;
}
else {
// printk("mike:-->First time trigger TimerTxData InSleep\n");
}
- pDevice->IsTxDataTrigger = TRUE;
+ pDevice->IsTxDataTrigger = true;
add_timer(&pDevice->sTimerTxData);
}
@@ -773,15 +745,15 @@ void vRunCommand(void *hDeviceContext)
del_timer(&pMgmt->sTimerSecondCallback);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- if (pDevice->bEnableHostWEP == TRUE)
+ if (pDevice->bEnableHostWEP == true)
BSSvClearNodeDBTable(pDevice, 1);
else
BSSvClearNodeDBTable(pDevice, 0);
pDevice->uAssocCount = 0;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = FALSE;
+ pDevice->bFixRate = false;
vMgrCreateOwnIBSS((void *) pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS) {
@@ -796,7 +768,7 @@ void vRunCommand(void *hDeviceContext)
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
add_timer(&pMgmt->sTimerSecondCallback);
}
@@ -809,10 +781,10 @@ void vRunCommand(void *hDeviceContext)
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = FALSE;
+ pDevice->bMoreData = false;
}
else {
- pDevice->bMoreData = TRUE;
+ pDevice->bMoreData = true;
}
if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
@@ -834,10 +806,10 @@ void vRunCommand(void *hDeviceContext)
// clear tx map
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = FALSE;
+ pDevice->bMoreData = false;
}
else {
- pDevice->bMoreData = TRUE;
+ pDevice->bMoreData = true;
}
if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
@@ -856,7 +828,7 @@ void vRunCommand(void *hDeviceContext)
~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
}
- pMgmt->sNodeDBTable[ii].bRxPSPoll = FALSE;
+ pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
}
}
@@ -866,7 +838,7 @@ void vRunCommand(void *hDeviceContext)
case WLAN_CMD_RADIO_START:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
- // if (pDevice->bRadioCmd == TRUE)
+ // if (pDevice->bRadioCmd == true)
// CARDbRadioPowerOn(pDevice);
// else
// CARDbRadioPowerOff(pDevice);
@@ -894,31 +866,31 @@ void vRunCommand(void *hDeviceContext)
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
- //0415pDevice->bCmdRunning = FALSE;
- pDevice->bCmdClear = TRUE;
- pDevice->bStopTx0Pkt = FALSE;
- pDevice->bStopDataPkt = TRUE;
+ //0415pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = true;
+ pDevice->bStopTx0Pkt = false;
+ pDevice->bStopDataPkt = true;
pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
+ pDevice->bTransmitKey = false;
spin_unlock_irq(&pDevice->lock);
KeyvInitTable(pDevice,&pDevice->sKey);
spin_lock_irq(&pDevice->lock);
pMgmt->byCSSPK = KEY_CTL_NONE;
pMgmt->byCSSGK = KEY_CTL_NONE;
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
// reason = 8 : disassoc because sta has left
vMgrDisassocBeginSta((void *) pDevice,
pMgmt,
pMgmt->abyCurrBSSID,
(8),
&Status);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
// unlock command busy
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = FALSE;
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ pMgmt->sNodeDBTable[0].bActive = false;
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -927,11 +899,11 @@ void vRunCommand(void *hDeviceContext)
wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
}
}
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
- pDevice->bWPASuppWextEnabled = FALSE;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
//clear current SSID
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
pItemSSID->len = 0;
@@ -945,10 +917,10 @@ void vRunCommand(void *hDeviceContext)
CARDbRadioPowerOff(pDevice);
MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_OFF);
- pDevice->bHWRadioOff = TRUE;
+ pDevice->bHWRadioOff = true;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_ON........................\n");
- pDevice->bHWRadioOff = FALSE;
+ pDevice->bHWRadioOff = false;
CARDbRadioPowerOn(pDevice);
MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_ON);
@@ -961,11 +933,11 @@ void vRunCommand(void *hDeviceContext)
case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
- pDevice->bStopDataPkt = TRUE;
+ pDevice->bStopDataPkt = true;
pDevice->byBBVGACurrent = pDevice->byBBVGANew;
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopDataPkt = false;
s_bCommandComplete(pDevice);
break;
@@ -990,13 +962,13 @@ void vRunCommand(void *hDeviceContext)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change from Antenna%d to", (int)pDevice->dwRxAntennaSel);
if ( pDevice->dwRxAntennaSel == 0) {
pDevice->dwRxAntennaSel=1;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
BBvSetAntennaMode(pDevice, ANT_RXA);
else
BBvSetAntennaMode(pDevice, ANT_RXB);
} else {
pDevice->dwRxAntennaSel=0;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
BBvSetAntennaMode(pDevice, ANT_RXB);
else
BBvSetAntennaMode(pDevice, ANT_RXA);
@@ -1027,9 +999,9 @@ void vRunCommand(void *hDeviceContext)
case WLAN_CMD_11H_CHSW_START:
CARDbSetMediaChannel(pDevice, pDevice->byNewChannel);
- pDevice->bChannelSwitch = FALSE;
+ pDevice->bChannelSwitch = false;
pMgmt->uCurrChannel = pDevice->byNewChannel;
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopDataPkt = false;
s_bCommandComplete(pDevice);
break;
@@ -1043,24 +1015,19 @@ void vRunCommand(void *hDeviceContext)
}
-static
-BOOL
-s_bCommandComplete (
- PSDevice pDevice
- )
+static int s_bCommandComplete(struct vnt_private *pDevice)
{
- PWLAN_IE_SSID pSSID;
- BOOL bRadioCmd = FALSE;
- //WORD wDeAuthenReason = 0;
- BOOL bForceSCAN = TRUE;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PWLAN_IE_SSID pSSID;
+ int bRadioCmd = false;
+ int bForceSCAN = true;
pDevice->eCommandState = WLAN_CMD_IDLE;
if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
//Command Queue Empty
- pDevice->bCmdRunning = FALSE;
- return TRUE;
+ pDevice->bCmdRunning = false;
+ return true;
}
else {
pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
@@ -1069,7 +1036,7 @@ s_bCommandComplete (
bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = TRUE;
+ pDevice->bCmdRunning = true;
switch ( pDevice->eCommand ) {
case WLAN_CMD_BSSID_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
@@ -1081,7 +1048,7 @@ s_bCommandComplete (
memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
}
/*
- if ((bForceSCAN == FALSE) && (pDevice->bLinkPass == TRUE)) {
+ if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
pDevice->eCommandState = WLAN_CMD_IDLE;
@@ -1146,29 +1113,26 @@ s_bCommandComplete (
break;
}
- vCommandTimerWait((void *) pDevice, 0);
+ vCommandTimerWait(pDevice, 0);
}
- return TRUE;
+ return true;
}
-BOOL bScheduleCommand(void *hDeviceContext,
- CMD_CODE eCommand,
- PBYTE pbyItem0)
+int bScheduleCommand(struct vnt_private *pDevice,
+ CMD_CODE eCommand, u8 *pbyItem0)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
-
if (pDevice->cbFreeCmdQueue == 0) {
- return (FALSE);
+ return (false);
}
pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = TRUE;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
if (pbyItem0 != NULL) {
switch (eCommand) {
case WLAN_CMD_BSSID_SCAN:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = FALSE;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
break;
@@ -1199,12 +1163,12 @@ BOOL bScheduleCommand(void *hDeviceContext,
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue--;
- if (pDevice->bCmdRunning == FALSE) {
+ if (pDevice->bCmdRunning == false) {
s_bCommandComplete(pDevice);
}
else {
}
- return (TRUE);
+ return (true);
}
@@ -1219,14 +1183,13 @@ BOOL bScheduleCommand(void *hDeviceContext,
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-static BOOL s_bClearBSSID_SCAN(void *hDeviceContext)
+static int s_bClearBSSID_SCAN(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
- unsigned int ii;
+ unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
+ unsigned int ii;
if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii ++) {
@@ -1237,14 +1200,13 @@ static BOOL s_bClearBSSID_SCAN(void *hDeviceContext)
break;
}
}
- return TRUE;
+ return true;
}
//mike add:reset command timer
-void vResetCommandTimer(void *hDeviceContext)
+void vResetCommandTimer(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
//delete timer
del_timer(&pDevice->sTimerCommand);
@@ -1257,14 +1219,13 @@ void vResetCommandTimer(void *hDeviceContext)
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
pDevice->eCommandState = WLAN_CMD_IDLE;
- pDevice->bCmdRunning = FALSE;
- pDevice->bCmdClear = FALSE;
+ pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = false;
}
-void BSSvSecondTxData(void *hDeviceContext)
+void BSSvSecondTxData(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
pDevice->nTxDataTimeCout++;
@@ -1278,13 +1239,13 @@ void BSSvSecondTxData(void *hDeviceContext)
spin_lock_irq(&pDevice->lock);
//is wap_supplicant running successful OR only open && sharekey mode!
- if (((pDevice->bLinkPass == TRUE) &&
+ if (((pDevice->bLinkPass == true) &&
(pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
- (pDevice->fWPA_Authened == TRUE)) { //wpa linking
+ (pDevice->fWPA_Authened == true)) { //wpa linking
// printk("mike:%s-->InSleep Tx Data Procedure\n",__FUNCTION__);
- pDevice->fTxDataInSleep = TRUE;
+ pDevice->fTxDataInSleep = true;
PSbSendNullPacket(pDevice); //send null packet
- pDevice->fTxDataInSleep = FALSE;
+ pDevice->fTxDataInSleep = false;
}
spin_unlock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index d24a79dce61a..c40e6baa0b5d 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -28,7 +28,6 @@
#ifndef __WCMD_H__
#define __WCMD_H__
-
#include "ttype.h"
#include "80211hdr.h"
#include "80211mgr.h"
@@ -75,9 +74,9 @@ typedef enum tagCMD_STATUS {
typedef struct tagCMD_ITEM {
CMD_CODE eCmd;
BYTE abyCmdDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BOOL bNeedRadioOFF;
- BOOL bRadioCmd;
- BOOL bForceSCAN;
+ bool bNeedRadioOFF;
+ bool bRadioCmd;
+ bool bForceSCAN;
WORD wDeAuthenReason;
} CMD_ITEM, *PCMD_ITEM;
@@ -112,14 +111,13 @@ typedef enum tagCMD_STATE {
/*--------------------- Export Types ------------------------------*/
/*--------------------- Export Functions --------------------------*/
+struct vnt_private;
-void vResetCommandTimer(void *hDeviceContext);
+void vResetCommandTimer(struct vnt_private *);
-BOOL bScheduleCommand(void *hDeviceContext,
- CMD_CODE eCommand,
- PBYTE pbyItem0);
+int bScheduleCommand(struct vnt_private *, CMD_CODE eCommand, u8 *pbyItem0);
-void vRunCommand(void *hDeviceContext);
+void vRunCommand(struct vnt_private *);
/*
void
@@ -128,6 +126,6 @@ WCMDvCommandThread(
);
*/
-void BSSvSecondTxData(void *hDeviceContext);
+void BSSvSecondTxData(struct vnt_private *);
#endif /* __WCMD_H__ */
diff --git a/drivers/staging/vt6656/wctl.c b/drivers/staging/vt6656/wctl.c
index 9249263b2da8..baa48a1f0d36 100644
--- a/drivers/staging/vt6656/wctl.c
+++ b/drivers/staging/vt6656/wctl.c
@@ -53,8 +53,8 @@
/*
* Description:
- * Scan Rx cache. Return TRUE if packet is duplicate, else
- * inserts in receive cache and returns FALSE.
+ * Scan Rx cache. Return true if packet is duplicate, else
+ * inserts in receive cache and returns false.
*
* Parameters:
* In:
@@ -63,11 +63,11 @@
* Out:
* none
*
- * Return Value: TRUE if packet duplicate; otherwise FALSE
+ * Return Value: true if packet duplicate; otherwise false
*
*/
-BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
+bool WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
{
unsigned int uIndex;
unsigned int ii;
@@ -84,7 +84,7 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
(LOBYTE(pCacheEntry->wFrameCtl) == LOBYTE(pMACHeader->wFrameCtl))
) {
/* Duplicate match */
- return TRUE;
+ return true;
}
ADD_ONE_WITH_WRAP_AROUND(uIndex, DUPLICATE_RX_CACHE_LENGTH);
}
@@ -95,7 +95,7 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
pCacheEntry->wFrameCtl = pMACHeader->wFrameCtl;
ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
- return FALSE;
+ return false;
}
/*
@@ -113,12 +113,13 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
*
*/
-unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuSearchDFCB(struct vnt_private *pDevice,
+ PS802_11Header pMACHeader)
{
unsigned int ii;
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if ((pDevice->sRxDFCB[ii].bInUse == TRUE) &&
+ if ((pDevice->sRxDFCB[ii].bInUse == true) &&
(!compare_ether_addr(&(pDevice->sRxDFCB[ii].abyAddr2[0]),
&(pMACHeader->abyAddr2[0])))) {
return ii;
@@ -141,17 +142,18 @@ unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
* Return Value: index number in Defragment Database
*
*/
-unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuInsertDFCB(struct vnt_private *pDevice,
+ PS802_11Header pMACHeader)
{
unsigned int ii;
if (pDevice->cbFreeDFCB == 0)
return(pDevice->cbDFCB);
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (pDevice->sRxDFCB[ii].bInUse == FALSE) {
+ if (pDevice->sRxDFCB[ii].bInUse == false) {
pDevice->cbFreeDFCB--;
pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[ii].bInUse = TRUE;
+ pDevice->sRxDFCB[ii].bInUse = true;
pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]),
@@ -177,16 +179,16 @@ unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
* Out:
* none
*
- * Return Value: TRUE if it is valid fragment packet and we have resource to defragment; otherwise FALSE
+ * Return Value: true if it is valid fragment packet and we have resource to defragment; otherwise false
*
*/
-BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, BOOL bWEP, BOOL bExtIV)
+bool WCTLbHandleFragment(struct vnt_private *pDevice, PS802_11Header pMACHeader,
+ unsigned int cbFrameLength, bool bWEP, bool bExtIV)
{
-unsigned int uHeaderSize;
+ unsigned int uHeaderSize;
- if (bWEP == TRUE) {
+ if (bWEP == true) {
uHeaderSize = 28;
if (bExtIV)
// ExtIV
@@ -207,7 +209,7 @@ unsigned int uHeaderSize;
else {
pDevice->uCurrentDFCBIdx = WCTLuInsertDFCB(pDevice, pMACHeader);
if (pDevice->uCurrentDFCBIdx == pDevice->cbDFCB) {
- return(FALSE);
+ return(false);
}
}
// reserve 8 byte to match MAC RX Buffer
@@ -218,7 +220,7 @@ unsigned int uHeaderSize;
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += cbFrameLength;
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "First pDevice->uCurrentDFCBIdx= %d\n", pDevice->uCurrentDFCBIdx);
- return(FALSE);
+ return(false);
}
else {
pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
@@ -236,21 +238,21 @@ unsigned int uHeaderSize;
else {
// seq error or frag # error flush DFCB
pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = FALSE;
- return(FALSE);
+ pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
+ return(false);
}
}
else {
- return(FALSE);
+ return(false);
}
if (IS_LAST_FRAGMENT_PKT(pMACHeader)) {
//enq defragcontrolblock
pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = FALSE;
+ pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last pDevice->uCurrentDFCBIdx= %d\n", pDevice->uCurrentDFCBIdx);
- return(TRUE);
+ return(true);
}
- return(FALSE);
+ return(false);
}
}
diff --git a/drivers/staging/vt6656/wctl.h b/drivers/staging/vt6656/wctl.h
index 7270af68c89d..1b21e32e99e5 100644
--- a/drivers/staging/vt6656/wctl.h
+++ b/drivers/staging/vt6656/wctl.h
@@ -96,10 +96,10 @@
/*--------------------- Export Functions --------------------------*/
-BOOL WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
-BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, BOOL bWEP, BOOL bExtIV);
-unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
-unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
+bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
+bool WCTLbHandleFragment(struct vnt_private *, PS802_11Header pMACHeader,
+ unsigned int cbFrameLength, bool bWEP, bool bExtIV);
+unsigned int WCTLuSearchDFCB(struct vnt_private *, PS802_11Header pMACHeader);
+unsigned int WCTLuInsertDFCB(struct vnt_private *, PS802_11Header pMACHeader);
#endif /* __WCTL_H__ */
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index 95ddc8303bb3..5dced0a43797 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -93,230 +93,101 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
-static BOOL ChannelExceedZoneType(
- PSDevice pDevice,
- BYTE byCurrChannel
- );
+static int ChannelExceedZoneType(struct vnt_private *, u8 byCurrChannel);
-// Association/diassociation functions
-static
-PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
+/* Association/diassociation functions */
+static struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *,
+ struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
+ u16 wListenInterval, PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-static
-void
-s_vMgrRxAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
- );
+static void s_vMgrRxAssocRequest(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ u32 uNodeIndex);
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
+static struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *,
+ struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
+ u16 wListenInterval, PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-static
-void
-s_vMgrRxAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- BOOL bReAssocType
- );
+static void s_vMgrRxAssocResponse(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ int bReAssocType);
-static
-void
-s_vMgrRxDisassociation(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- );
+static void s_vMgrRxDisassociation(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket);
-// Authentication/deauthen functions
-static
-void
-s_vMgrRxAuthenSequence_1(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- );
+/* Authentication/deauthen functions */
+static void s_vMgrRxAuthenSequence_1(struct vnt_private *,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame);
-static
-void
-s_vMgrRxAuthenSequence_2(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- );
+static void s_vMgrRxAuthenSequence_2(struct vnt_private *,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame);
-static
-void
-s_vMgrRxAuthenSequence_3(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- );
+static void s_vMgrRxAuthenSequence_3(struct vnt_private *,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame);
-static
-void
-s_vMgrRxAuthenSequence_4(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- );
+static void s_vMgrRxAuthenSequence_4(struct vnt_private *,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame);
-static
-void
-s_vMgrRxAuthentication(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- );
+static void s_vMgrRxAuthentication(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket);
-static
-void
-s_vMgrRxDeauthentication(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- );
+static void s_vMgrRxDeauthentication(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket);
-// Scan functions
-// probe request/response functions
-static
-void
-s_vMgrRxProbeRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- );
+/* Scan functions
+* probe request/response functions */
-static
-void
-s_vMgrRxProbeResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- );
+static void s_vMgrRxProbeRequest(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket);
-// beacon functions
-static
-void
-s_vMgrRxBeacon(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- BOOL bInScan
- );
+static void s_vMgrRxProbeResponse(struct vnt_private *,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket);
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
- );
+/* beacon functions */
+static void s_vMgrRxBeacon(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ int bInScan);
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- WORD wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
+static void s_vMgrFormatTIM(struct vnt_manager *pMgmt, PWLAN_IE_TIM pTIM);
+static struct vnt_tx_mgmt *s_MgrMakeBeacon(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
+ u32 uCurrChannel, u16 wCurrATIMWinodw, PWLAN_IE_SSID pCurrSSID,
+ u8 *pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-// Association response
-static
-PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
-// ReAssociation response
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- );
+/* Association response */
+static struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
+ u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-// Probe response
-static
-PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- WORD wCurrATIMWinodw,
- PBYTE pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- BYTE byPHYType
- );
+/* ReAssociation response */
+static struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
+ u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates);
-// received status
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- WORD wStatus
- );
+/* Probe response */
+static struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
+ u32 uCurrChannel, u16 wCurrATIMWinodw, u8 *pDstAddr,
+ PWLAN_IE_SSID pCurrSSID, u8 *pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates, u8 byPHYType);
+/* received status */
+static void s_vMgrLogStatus(struct vnt_manager *pMgmt, u16 wStatus);
-static
-void
-s_vMgrSynchBSS (
- PSDevice pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
- );
+static void s_vMgrSynchBSS(struct vnt_private *, u32 uBSSMode,
+ PKnownBSS pCurr, PCMD_STATUS pStatus);
-static BOOL
+
+static bool
s_bCipherMatch (
PKnownBSS pBSSNode,
NDIS_802_11_ENCRYPTION_STATUS EncStatus,
@@ -324,10 +195,7 @@ s_bCipherMatch (
PBYTE pbyCCSGK
);
- static void Encyption_Rebuild(
- PSDevice pDevice,
- PKnownBSS pCurr
- );
+static void Encyption_Rebuild(struct vnt_private *, PKnownBSS pCurr);
/*--------------------- Export Variables --------------------------*/
@@ -343,11 +211,10 @@ s_bCipherMatch (
*
-*/
-void vMgrObjectInit(void *hDeviceContext)
+void vMgrObjectInit(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ii;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ int ii;
pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0];
@@ -361,7 +228,7 @@ void vMgrObjectInit(void *hDeviceContext)
pMgmt->byCSSPK = KEY_CTL_NONE;
pMgmt->byCSSGK = KEY_CTL_NONE;
pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((void *) pDevice, FALSE);
+ BSSvClearBSSList((void *) pDevice, false);
init_timer(&pMgmt->sTimerSecondCallback);
pMgmt->sTimerSecondCallback.data = (unsigned long)pDevice;
@@ -377,16 +244,16 @@ void vMgrObjectInit(void *hDeviceContext)
pDevice->sTimerTxData.data = (unsigned long)pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = FALSE;
- pDevice->IsTxDataTrigger = FALSE;
+ pDevice->fTxDataInSleep = false;
+ pDevice->IsTxDataTrigger = false;
pDevice->nTxDataTimeCout = 0;
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
pDevice->eCommandState = WLAN_CMD_IDLE;
- pDevice->bCmdRunning = FALSE;
- pDevice->bCmdClear = FALSE;
+ pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = false;
return;
}
@@ -402,12 +269,10 @@ void vMgrObjectInit(void *hDeviceContext)
*
-*/
-void vMgrAssocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus)
+void vMgrAssocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSTxMgmtPacket pTxPacket;
+ struct vnt_tx_mgmt *pTxPacket;
pMgmt->wCurrCapInfo = 0;
@@ -426,7 +291,7 @@ void vMgrAssocBeginSta(void *hDeviceContext,
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (pDevice->bShortSlotTime == TRUE)
+ if (pDevice->bShortSlotTime == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
@@ -434,7 +299,7 @@ void vMgrAssocBeginSta(void *hDeviceContext,
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == TRUE)
+ if (pMgmt->b11hEnable == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
// build an assocreq frame and send it
@@ -475,14 +340,10 @@ void vMgrAssocBeginSta(void *hDeviceContext,
*
-*/
-void vMgrReAssocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus)
+void vMgrReAssocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
-
+ struct vnt_tx_mgmt *pTxPacket;
pMgmt->wCurrCapInfo = 0;
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
@@ -502,7 +363,7 @@ void vMgrReAssocBeginSta(void *hDeviceContext,
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (pDevice->bShortSlotTime == TRUE)
+ if (pDevice->bShortSlotTime == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
@@ -510,7 +371,7 @@ void vMgrReAssocBeginSta(void *hDeviceContext,
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == TRUE)
+ if (pMgmt->b11hEnable == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
@@ -551,19 +412,18 @@ void vMgrReAssocBeginSta(void *hDeviceContext,
*
-*/
-void vMgrDisassocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
- PCMD_STATUS pStatus)
+void vMgrDisassocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u8 *abyDestAddress, u16 wReason,
+ PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_DISASSOC sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_DISASSOC sFrame;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DISASSOC_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_DISASSOC_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
@@ -610,23 +470,18 @@ void vMgrDisassocBeginSta(void *hDeviceContext,
*
-*/
-static
-void
-s_vMgrRxAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
- )
+static void s_vMgrRxAssocRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ u32 uNodeIndex)
{
- WLAN_FR_ASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- WORD wAssocStatus = 0;
- WORD wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ WLAN_FR_ASSOCREQ sFrame;
+ CMD_STATUS Status;
+ struct vnt_tx_mgmt *pTxPacket;
+ u16 wAssocStatus = 0;
+ u16 wAssocAID = 0;
+ u32 uRateLen = WLAN_RATES_MAXLEN;
+ u8 abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ u8 abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
@@ -650,7 +505,7 @@ s_vMgrRxAssocRequest(
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE;
+ WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
// Todo: check sta basic rate, if ap can't support, set status code
if (pDevice->byBBType == BB_TYPE_11B) {
uRateLen = WLAN_RATES_MAXLEN_11B;
@@ -672,7 +527,7 @@ s_vMgrRxAssocRequest(
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- FALSE, // do not change our basic rate
+ false, // do not change our basic rate
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -693,15 +548,15 @@ s_vMgrRxAssocRequest(
wAssocAID = (WORD)uNodeIndex;
// check if ERP support
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
// B only STA join
- pDevice->bProtectMode = TRUE;
- pDevice->bNonERPPresent = TRUE;
+ pDevice->bProtectMode = true;
+ pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) {
- pDevice->bBarkerPreambleMd = TRUE;
+ if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ pDevice->bBarkerPreambleMd = true;
}
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Associate AID= %d \n", wAssocAID);
@@ -766,23 +621,18 @@ s_vMgrRxAssocRequest(
*
-*/
-static
-void
-s_vMgrRxReAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
- )
+static void s_vMgrRxReAssocRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ u32 uNodeIndex)
{
- WLAN_FR_REASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- WORD wAssocStatus = 0;
- WORD wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ WLAN_FR_REASSOCREQ sFrame;
+ CMD_STATUS Status;
+ struct vnt_tx_mgmt *pTxPacket;
+ u16 wAssocStatus = 0;
+ u16 wAssocAID = 0;
+ u32 uRateLen = WLAN_RATES_MAXLEN;
+ u8 abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ u8 abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
return;
@@ -801,7 +651,7 @@ s_vMgrRxReAssocRequest(
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE;
+ WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
// Todo: check sta basic rate, if ap can't support, set status code
if (pDevice->byBBType == BB_TYPE_11B) {
@@ -825,7 +675,7 @@ s_vMgrRxReAssocRequest(
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- FALSE, // do not change our basic rate
+ false, // do not change our basic rate
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -847,15 +697,15 @@ s_vMgrRxReAssocRequest(
// if suppurt ERP
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
// B only STA join
- pDevice->bProtectMode = TRUE;
- pDevice->bNonERPPresent = TRUE;
+ pDevice->bProtectMode = true;
+ pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) {
- pDevice->bBarkerPreambleMd = TRUE;
+ if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ pDevice->bBarkerPreambleMd = true;
}
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Rx ReAssociate AID= %d \n", wAssocAID);
@@ -913,18 +763,13 @@ s_vMgrRxReAssocRequest(
*
-*/
-static
-void
-s_vMgrRxAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- BOOL bReAssocType
- )
+static void s_vMgrRxAssocResponse(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ int bReAssocType)
{
- WLAN_FR_ASSOCRESP sFrame;
- PWLAN_IE_SSID pItemSSID;
- PBYTE pbyIEs;
+ WLAN_FR_ASSOCRESP sFrame;
+ PWLAN_IE_SSID pItemSSID;
+ u8 *pbyIEs;
@@ -970,10 +815,10 @@ s_vMgrRxAssocResponse(
sFrame.pExtSuppRates);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Link with AP(SSID): %s\n", pItemSSID->abySSID);
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- //if(pDevice->bWPASuppWextEnabled == TRUE)
+ //if(pDevice->bWPASuppWextEnabled == true)
{
BYTE buf[512];
size_t len;
@@ -1027,11 +872,11 @@ s_vMgrRxAssocResponse(
}
//need clear flags related to Networkmanager
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
- pDevice->bWPASuppWextEnabled = FALSE;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
timer_expire(pDevice->sTimerCommand, 0);
@@ -1050,17 +895,17 @@ if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
*
-*/
-void vMgrAuthenBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus)
+void vMgrAuthenBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ WLAN_FR_AUTHEN sFrame;
+ struct vnt_tx_mgmt *pTxPacket =
+ (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_AUTHEN_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
vMgrEncodeAuthen(&sFrame);
@@ -1103,20 +948,18 @@ void vMgrAuthenBeginSta(void *hDeviceContext,
*
-*/
-void vMgrDeAuthenBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
- PCMD_STATUS pStatus)
+void vMgrDeAuthenBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u8 *abyDestAddress, u16 wReason,
+ PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- WLAN_FR_DEAUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DEAUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ WLAN_FR_DEAUTHEN sFrame;
+ struct vnt_tx_mgmt *pTxPacket =
+ (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_DEAUTHEN_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_DEAUTHEN_FR_MAXLEN;
vMgrEncodeDeauthen(&sFrame);
@@ -1156,15 +999,10 @@ void vMgrDeAuthenBeginSta(void *hDeviceContext,
*
-*/
-static
-void
-s_vMgrRxAuthentication(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- )
+static void s_vMgrRxAuthentication(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket)
{
- WLAN_FR_AUTHEN sFrame;
+ WLAN_FR_AUTHEN sFrame;
// we better be an AP or a STA in AUTHPENDING otherwise ignore
if (!(pMgmt->eCurrMode == WMAC_MODE_ESS_AP ||
@@ -1214,25 +1052,21 @@ s_vMgrRxAuthentication(
-*/
-static
-void
-s_vMgrRxAuthenSequence_1(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- )
+static void s_vMgrRxAuthenSequence_1(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame)
{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uNodeIndex;
- WLAN_FR_AUTHEN sFrame;
- PSKeyItem pTransmitKey;
-
- // Insert a Node entry
- if (!BSSbIsSTAInNodeDB(pDevice, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) {
- BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex);
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, pFrame->pHdr->sA3.abyAddr2,
- WLAN_ADDR_LEN);
- }
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ u32 uNodeIndex;
+ WLAN_FR_AUTHEN sFrame;
+ PSKeyItem pTransmitKey;
+
+ /* Insert a Node entry */
+ if (!BSSbIsSTAInNodeDB(pDevice, pFrame->pHdr->sA3.abyAddr2,
+ &uNodeIndex)) {
+ BSSvCreateOneNode(pDevice, &uNodeIndex);
+ memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr,
+ pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
+ }
if (pMgmt->bShareKeyAlgorithm) {
pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_KNOWN;
@@ -1243,9 +1077,11 @@ s_vMgrRxAuthenSequence_1(
}
// send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_AUTHEN_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
@@ -1285,7 +1121,7 @@ s_vMgrRxAuthenSequence_1(
sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
memset(pMgmt->abyChallenge, 0, WLAN_CHALLENGE_LEN);
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == TRUE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == true) {
rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength+3);
rc4_encrypt(&pDevice->SBox, pMgmt->abyChallenge, pMgmt->abyChallenge, WLAN_CHALLENGE_LEN);
}
@@ -1320,16 +1156,11 @@ s_vMgrRxAuthenSequence_1(
*
-*/
-static
-void
-s_vMgrRxAuthenSequence_2(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- )
+static void s_vMgrRxAuthenSequence_2(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame)
{
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
+ WLAN_FR_AUTHEN sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
switch (cpu_to_le16((*(pFrame->pwAuthAlgorithm))))
@@ -1355,9 +1186,13 @@ s_vMgrRxAuthenSequence_2(
case WLAN_AUTH_ALG_SHAREDKEY:
if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)
+ pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_AUTHEN_FR_MAXLEN);
+ pTxPacket->p80211Header
+ = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
@@ -1421,18 +1256,13 @@ s_vMgrRxAuthenSequence_2(
*
-*/
-static
-void
-s_vMgrRxAuthenSequence_3(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- )
+static void s_vMgrRxAuthenSequence_3(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame)
{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uStatusCode = 0 ;
- unsigned int uNodeIndex = 0;
- WLAN_FR_AUTHEN sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ u32 uStatusCode = 0 ;
+ u32 uNodeIndex = 0;
+ WLAN_FR_AUTHEN sFrame;
if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) {
uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL;
@@ -1462,9 +1292,11 @@ s_vMgrRxAuthenSequence_3(
reply:
// send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_AUTHEN_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
@@ -1509,13 +1341,8 @@ reply:
* None.
*
-*/
-static
-void
-s_vMgrRxAuthenSequence_4(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
- )
+static void s_vMgrRxAuthenSequence_4(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, PWLAN_FR_AUTHEN pFrame)
{
if ( cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){
@@ -1547,17 +1374,12 @@ s_vMgrRxAuthenSequence_4(
*
-*/
-static
-void
-s_vMgrRxDisassociation(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- )
+static void s_vMgrRxDisassociation(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket)
{
- WLAN_FR_DISASSOC sFrame;
- unsigned int uNodeIndex = 0;
- CMD_STATUS CmdStatus;
+ WLAN_FR_DISASSOC sFrame;
+ u32 uNodeIndex = 0;
+ CMD_STATUS CmdStatus;
if ( pMgmt->eCurrMode == WMAC_MODE_ESS_AP ){
// if is acting an AP..
@@ -1577,24 +1399,24 @@ s_vMgrRxDisassociation(
vMgrDecodeDisassociation(&sFrame);
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP disassociated me, reason=%d.\n", cpu_to_le16(*(sFrame.pwReason)));
- pDevice->fWPA_Authened = FALSE;
+ pDevice->fWPA_Authened = false;
//TODO: do something let upper layer know or
//try to send associate packet again because of inactivity timeout
if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- pDevice->bLinkPass = FALSE;
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pDevice->bLinkPass = false;
+ pMgmt->sNodeDBTable[0].bActive = false;
pDevice->byReAssocCount = 0;
pMgmt->eCurrState = WMAC_STATE_AUTH; // jump back to the auth state!
pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vMgrReAssocBeginSta((PSDevice)pDevice, pMgmt, &CmdStatus);
+ vMgrReAssocBeginSta(pDevice, pMgmt, &CmdStatus);
if(CmdStatus == CMD_STATUS_PENDING) {
pDevice->byReAssocCount ++;
return; //mike add: you'll retry for many times, so it cann't be regarded as disconnected!
}
}
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1620,16 +1442,11 @@ s_vMgrRxDisassociation(
*
-*/
-static
-void
-s_vMgrRxDeauthentication(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- )
+static void s_vMgrRxDeauthentication(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket)
{
- WLAN_FR_DEAUTHEN sFrame;
- unsigned int uNodeIndex = 0;
+ WLAN_FR_DEAUTHEN sFrame;
+ u32 uNodeIndex = 0;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP ){
@@ -1650,22 +1467,22 @@ s_vMgrRxDeauthentication(
sFrame.len = pRxPacket->cbMPDULen;
sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
vMgrDecodeDeauthen(&sFrame);
- pDevice->fWPA_Authened = FALSE;
+ pDevice->fWPA_Authened = false;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason))));
// TODO: update BSS list for specific BSSID if pre-authentication case
if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3,
pMgmt->abyCurrBSSID)) {
if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
}
}
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1692,23 +1509,19 @@ s_vMgrRxDeauthentication(
* True:exceed;
* False:normal case
-*/
-static BOOL
-ChannelExceedZoneType(
- PSDevice pDevice,
- BYTE byCurrChannel
- )
+static int ChannelExceedZoneType(struct vnt_private *pDevice, u8 byCurrChannel)
{
- BOOL exceed=FALSE;
+ int exceed = false;
switch(pDevice->byZoneType) {
case 0x00: //USA:1~11
if((byCurrChannel<1) ||(byCurrChannel>11))
- exceed = TRUE;
+ exceed = true;
break;
case 0x01: //Japan:1~13
case 0x02: //Europe:1~13
if((byCurrChannel<1) ||(byCurrChannel>13))
- exceed = TRUE;
+ exceed = true;
break;
default: //reserve for other zonetype
break;
@@ -1728,39 +1541,33 @@ ChannelExceedZoneType(
*
-*/
-static
-void
-s_vMgrRxBeacon(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- BOOL bInScan
- )
+static void s_vMgrRxBeacon(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket,
+ int bInScan)
{
-
- PKnownBSS pBSSList;
- WLAN_FR_BEACON sFrame;
- QWORD qwTSFOffset;
- BOOL bIsBSSIDEqual = FALSE;
- BOOL bIsSSIDEqual = FALSE;
- BOOL bTSFLargeDiff = FALSE;
- BOOL bTSFOffsetPostive = FALSE;
- BOOL bUpdateTSF = FALSE;
- BOOL bIsAPBeacon = FALSE;
- BOOL bIsChannelEqual = FALSE;
- unsigned int uLocateByteIndex;
- BYTE byTIMBitOn = 0;
- WORD wAIDNumber = 0;
- unsigned int uNodeIndex;
- QWORD qwTimestamp, qwLocalTSF;
- QWORD qwCurrTSF;
- WORD wStartIndex = 0;
- WORD wAIDIndex = 0;
- BYTE byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- BOOL bChannelHit = FALSE;
- BYTE byOldPreambleType;
+ PKnownBSS pBSSList;
+ WLAN_FR_BEACON sFrame;
+ u64 qwTSFOffset;
+ int bIsBSSIDEqual = false;
+ int bIsSSIDEqual = false;
+ int bTSFLargeDiff = false;
+ int bTSFOffsetPostive = false;
+ int bUpdateTSF = false;
+ int bIsAPBeacon = false;
+ int bIsChannelEqual = false;
+ u32 uLocateByteIndex;
+ u8 byTIMBitOn = 0;
+ u16 wAIDNumber = 0;
+ u32 uNodeIndex;
+ u64 qwTimestamp, qwLocalTSF;
+ u64 qwCurrTSF;
+ u16 wStartIndex = 0;
+ u16 wAIDIndex = 0;
+ u8 byCurrChannel = pRxPacket->byRxChannel;
+ ERPObject sERP;
+ u32 uRateLen = WLAN_RATES_MAXLEN;
+ int bChannelHit = false;
+ u8 byOldPreambleType;
@@ -1787,31 +1594,31 @@ s_vMgrRxBeacon(
{
if (sFrame.pDSParms != NULL) {
if (byCurrChannel == RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1])
- bChannelHit = TRUE;
+ bChannelHit = true;
byCurrChannel = RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1];
} else {
- bChannelHit = TRUE;
+ bChannelHit = true;
}
} else {
if (sFrame.pDSParms != NULL) {
if (byCurrChannel == sFrame.pDSParms->byCurrChannel)
- bChannelHit = TRUE;
+ bChannelHit = true;
byCurrChannel = sFrame.pDSParms->byCurrChannel;
} else {
- bChannelHit = TRUE;
+ bChannelHit = true;
}
}
-if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
+if(ChannelExceedZoneType(pDevice,byCurrChannel)==true)
return;
if (sFrame.pERP != NULL) {
sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = TRUE;
+ sERP.bERPExist = true;
} else {
- sERP.bERPExist = FALSE;
+ sERP.bERPExist = false;
sERP.byERP = 0;
}
@@ -1866,7 +1673,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
}
if(byCurrChannel == (BYTE)pMgmt->uCurrChannel)
- bIsChannelEqual = TRUE;
+ bIsChannelEqual = true;
if (bIsChannelEqual && (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
@@ -1895,7 +1702,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrBSSID,
WLAN_BSSID_LEN) == 0) {
- bIsBSSIDEqual = TRUE;
+ bIsBSSIDEqual = true;
pDevice->uCurrRSSI = pRxPacket->uRSSI;
pDevice->byCurrSQ = pRxPacket->bySQ;
if (pMgmt->sNodeDBTable[0].uInActiveCount != 0) {
@@ -1909,22 +1716,22 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
sFrame.pSSID->len
) == 0) {
- bIsSSIDEqual = TRUE;
+ bIsSSIDEqual = true;
}
}
- if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)== TRUE) &&
- (bIsBSSIDEqual == TRUE) &&
- (bIsSSIDEqual == TRUE) &&
+ if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)== true) &&
+ (bIsBSSIDEqual == true) &&
+ (bIsSSIDEqual == true) &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// add state check to prevent reconnect fail since we'll receive Beacon
- bIsAPBeacon = TRUE;
+ bIsAPBeacon = true;
if (pBSSList != NULL) {
// Sync ERP field
- if ((pBSSList->sERP.bERPExist == TRUE) && (pDevice->byBBType == BB_TYPE_11G)) {
+ if ((pBSSList->sERP.bERPExist == true) && (pDevice->byBBType == BB_TYPE_11G)) {
if ((pBSSList->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION) != pDevice->bProtectMode) {//0000 0010
pDevice->bProtectMode = (pBSSList->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION);
if (pDevice->bProtectMode) {
@@ -1949,16 +1756,16 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
}
// Sync Short Slot Time
if (WLAN_GET_CAP_INFO_SHORTSLOTTIME(pBSSList->wCapInfo) != pDevice->bShortSlotTime) {
- BOOL bShortSlotTime;
+ bool bShortSlotTime;
bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(pBSSList->wCapInfo);
//DBG_PRN_WLAN05(("Set Short Slot Time: %d\n", pDevice->bShortSlotTime));
//Kyle check if it is OK to set G.
if (pDevice->byBBType == BB_TYPE_11A) {
- bShortSlotTime = TRUE;
+ bShortSlotTime = true;
}
else if (pDevice->byBBType == BB_TYPE_11B) {
- bShortSlotTime = FALSE;
+ bShortSlotTime = false;
}
if (bShortSlotTime != pDevice->bShortSlotTime) {
pDevice->bShortSlotTime = bShortSlotTime;
@@ -1994,7 +1801,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -2013,26 +1820,14 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
}
}
- HIDWORD(qwTimestamp) = cpu_to_le32(HIDWORD(*sFrame.pqwTimestamp));
- LODWORD(qwTimestamp) = cpu_to_le32(LODWORD(*sFrame.pqwTimestamp));
- HIDWORD(qwLocalTSF) = HIDWORD(pRxPacket->qwLocalTSF);
- LODWORD(qwLocalTSF) = LODWORD(pRxPacket->qwLocalTSF);
+ qwTimestamp = cpu_to_le64(*sFrame.pqwTimestamp);
+ qwLocalTSF = pRxPacket->qwLocalTSF;
// check if beacon TSF larger or small than our local TSF
- if (HIDWORD(qwTimestamp) == HIDWORD(qwLocalTSF)) {
- if (LODWORD(qwTimestamp) >= LODWORD(qwLocalTSF)) {
- bTSFOffsetPostive = TRUE;
- }
- else {
- bTSFOffsetPostive = FALSE;
- }
- }
- else if (HIDWORD(qwTimestamp) > HIDWORD(qwLocalTSF)) {
- bTSFOffsetPostive = TRUE;
- }
- else if (HIDWORD(qwTimestamp) < HIDWORD(qwLocalTSF)) {
- bTSFOffsetPostive = FALSE;
- }
+ if (qwTimestamp >= qwLocalTSF)
+ bTSFOffsetPostive = true;
+ else
+ bTSFOffsetPostive = false;
if (bTSFOffsetPostive) {
qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwTimestamp), (qwLocalTSF));
@@ -2041,23 +1836,21 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwLocalTSF), (qwTimestamp));
}
- if (HIDWORD(qwTSFOffset) != 0 ||
- (LODWORD(qwTSFOffset) > TRIVIAL_SYNC_DIFFERENCE )) {
- bTSFLargeDiff = TRUE;
- }
+ if (qwTSFOffset > TRIVIAL_SYNC_DIFFERENCE)
+ bTSFLargeDiff = true;
// if infra mode
- if (bIsAPBeacon == TRUE) {
+ if (bIsAPBeacon == true) {
// Infra mode: Local TSF always follow AP's TSF if Difference huge.
if (bTSFLargeDiff)
- bUpdateTSF = TRUE;
+ bUpdateTSF = true;
- if ((pDevice->bEnablePSMode == TRUE) && (sFrame.pTIM)) {
+ if ((pDevice->bEnablePSMode == true) && (sFrame.pTIM)) {
/* deal with DTIM, analysis TIM */
- pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? TRUE : FALSE ;
+ pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false ;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod;
wAIDNumber = pMgmt->wCurrAID & ~(BIT14|BIT15);
@@ -2072,36 +1865,33 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// len = byDTIMCount + byDTIMPeriod + byDTIMPeriod + byVirtBitMap[0~250]
if (sFrame.pTIM->len >= (uLocateByteIndex + 4)) {
byTIMBitOn = (0x01) << ((wAIDNumber) % 8);
- pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? TRUE : FALSE;
+ pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
}
else {
- pMgmt->bInTIM = FALSE;
+ pMgmt->bInTIM = false;
};
}
else {
- pMgmt->bInTIM = FALSE;
+ pMgmt->bInTIM = false;
};
if (pMgmt->bInTIM ||
(pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
- pMgmt->bInTIMWake = TRUE;
- // send out ps-poll packet
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN:In TIM\n");
- if (pMgmt->bInTIM) {
- PSvSendPSPOLL((PSDevice)pDevice);
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN:PS-POLL sent..\n");
- }
+ pMgmt->bInTIMWake = true;
+ /* send out ps-poll packet */
+ if (pMgmt->bInTIM)
+ PSvSendPSPOLL(pDevice);
}
else {
- pMgmt->bInTIMWake = FALSE;
+ pMgmt->bInTIMWake = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Not In TIM..\n");
- if (pDevice->bPWBitOn == FALSE) {
+ if (pDevice->bPWBitOn == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Send Null Packet\n");
if (PSbSendNullPacket(pDevice))
- pDevice->bPWBitOn = TRUE;
+ pDevice->bPWBitOn = true;
}
- if(PSbConsiderPowerDown(pDevice, FALSE, FALSE)) {
+ if(PSbConsiderPowerDown(pDevice, false, false)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Power down now...\n");
}
}
@@ -2119,7 +1909,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// adhoc mode:TSF updated only when beacon larger then local TSF
if (bTSFLargeDiff && bTSFOffsetPostive &&
(pMgmt->eCurrState == WMAC_STATE_JOINTED))
- bUpdateTSF = TRUE;
+ bUpdateTSF = true;
// During dpc, already in spinlocked.
if (BSSbIsSTAInNodeDB(pDevice, sFrame.pHdr->sA3.abyAddr2, &uNodeIndex)) {
@@ -2132,7 +1922,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -2144,8 +1934,8 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->sNodeDBTable[uNodeIndex].uInActiveCount = 0;
}
else {
- // Todo, initial Node content
- BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex);
+ /* Todo, initial Node content */
+ BSSvCreateOneNode(pDevice, &uNodeIndex);
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
@@ -2153,7 +1943,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -2167,7 +1957,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
/*
pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
*/
}
@@ -2175,12 +1965,12 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
if (pMgmt->eCurrState == WMAC_STATE_STARTED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Current IBSS State: [Started]........to: [Jointed] \n");
pMgmt->eCurrState = WMAC_STATE_JOINTED;
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
}
@@ -2256,26 +2046,22 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
*
-*/
-void vMgrCreateOwnIBSS(void *hDeviceContext,
- PCMD_STATUS pStatus)
+void vMgrCreateOwnIBSS(struct vnt_private *pDevice, PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- WORD wMaxBasicRate;
- WORD wMaxSuppRate;
- BYTE byTopCCKBasicRate;
- BYTE byTopOFDMBasicRate;
- QWORD qwCurrTSF;
- unsigned int ii;
- BYTE abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
- BYTE abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
- BYTE abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- WORD wSuppRate;
-
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u16 wMaxBasicRate;
+ u16 wMaxSuppRate;
+ u8 byTopCCKBasicRate;
+ u8 byTopOFDMBasicRate;
+ u64 qwCurrTSF = 0;
+ int ii;
+ u8 abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C,
+ 0x12, 0x18, 0x60};
+ u8 abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
+ u8 abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ u16 wSuppRate;
- HIDWORD(qwCurrTSF) = 0;
- LODWORD(qwCurrTSF) = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create Basic Service Set .......\n");
@@ -2366,16 +2152,16 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE,
+ (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
if (pDevice->byBBType == BB_TYPE_11A) {
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
} else {
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
}
BBvSetShortSlotTime(pDevice);
// vUpdateIFS() use pDevice->bShortSlotTime as parameter so it must be called
@@ -2420,12 +2206,12 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
// BSSID selected must be randomized as spec 11.1.3
- pMgmt->abyCurrBSSID[5] = (BYTE) (LODWORD(qwCurrTSF)& 0x000000ff);
- pMgmt->abyCurrBSSID[4] = (BYTE)((LODWORD(qwCurrTSF)& 0x0000ff00) >> 8);
- pMgmt->abyCurrBSSID[3] = (BYTE)((LODWORD(qwCurrTSF)& 0x00ff0000) >> 16);
- pMgmt->abyCurrBSSID[2] = (BYTE)((LODWORD(qwCurrTSF)& 0x00000ff0) >> 4);
- pMgmt->abyCurrBSSID[1] = (BYTE)((LODWORD(qwCurrTSF)& 0x000ff000) >> 12);
- pMgmt->abyCurrBSSID[0] = (BYTE)((LODWORD(qwCurrTSF)& 0x0ff00000) >> 20);
+ pMgmt->abyCurrBSSID[5] = (u8)(qwCurrTSF & 0x000000ff);
+ pMgmt->abyCurrBSSID[4] = (u8)((qwCurrTSF & 0x0000ff00) >> 8);
+ pMgmt->abyCurrBSSID[3] = (u8)((qwCurrTSF & 0x00ff0000) >> 16);
+ pMgmt->abyCurrBSSID[2] = (u8)((qwCurrTSF & 0x00000ff0) >> 4);
+ pMgmt->abyCurrBSSID[1] = (u8)((qwCurrTSF & 0x000ff000) >> 12);
+ pMgmt->abyCurrBSSID[0] = (u8)((qwCurrTSF & 0x0ff00000) >> 20);
pMgmt->abyCurrBSSID[5] ^= pMgmt->abyMACAddr[0];
pMgmt->abyCurrBSSID[4] ^= pMgmt->abyMACAddr[1];
pMgmt->abyCurrBSSID[3] ^= pMgmt->abyMACAddr[2];
@@ -2446,7 +2232,7 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
pDevice->byRxMode |= RCR_BSSID;
- pMgmt->bCurrBSSIDFilterOn = TRUE;
+ pMgmt->bCurrBSSIDFilterOn = true;
// Set Capability Info
pMgmt->wCurrCapInfo = 0;
@@ -2511,26 +2297,25 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
*
-*/
-void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
+void vMgrJoinBSSBegin(struct vnt_private *pDevice, PCMD_STATUS pStatus)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PKnownBSS pCurr = NULL;
- unsigned int ii, uu;
- PWLAN_IE_SUPP_RATES pItemRates = NULL;
- PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
- PWLAN_IE_SSID pItemSSID;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- WORD wMaxBasicRate = RATE_1M;
- WORD wMaxSuppRate = RATE_1M;
- WORD wSuppRate;
- BYTE byTopCCKBasicRate = RATE_1M;
- BYTE byTopOFDMBasicRate = RATE_1M;
- BOOL bShortSlotTime = FALSE;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ PKnownBSS pCurr = NULL;
+ int ii, uu;
+ PWLAN_IE_SUPP_RATES pItemRates = NULL;
+ PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
+ PWLAN_IE_SSID pItemSSID;
+ u32 uRateLen = WLAN_RATES_MAXLEN;
+ u16 wMaxBasicRate = RATE_1M;
+ u16 wMaxSuppRate = RATE_1M;
+ u16 wSuppRate;
+ u8 byTopCCKBasicRate = RATE_1M;
+ u8 byTopOFDMBasicRate = RATE_1M;
+ u8 bShortSlotTime = false;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive == TRUE)
+ if (pMgmt->sBSSList[ii].bActive == true)
break;
}
@@ -2564,14 +2349,14 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
(pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
/*
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
}
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -2581,7 +2366,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
*/
}
- //if(pDevice->bWPASuppWextEnabled == TRUE)
+ //if(pDevice->bWPASuppWextEnabled == true)
Encyption_Rebuild(pDevice, pCurr);
// Infrastructure BSS
@@ -2637,7 +2422,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
}
}
- RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, TRUE,
+ RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, true,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
vUpdateIFS(pDevice);
@@ -2658,11 +2443,11 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
// Add current BSS to Candidate list
// This should only work for WPA2 BSS, and WPA2 BSS check must be done before.
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- BOOL bResult = bAdd_PMKID_Candidate((void *) pDevice,
+ bool bResult = bAdd_PMKID_Candidate((void *) pDevice,
pMgmt->abyCurrBSSID,
&pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate: 1(%d)\n", bResult);
- if (bResult == FALSE) {
+ if (bResult == false) {
vFlush_PMKID_Candidate((void *) pDevice);
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO "vFlush_PMKID_Candidate: 4\n");
@@ -2712,10 +2497,10 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
//DBG_PRN_WLAN05(("wCapInfo: %X\n", pCurr->wCapInfo));
if (WLAN_GET_CAP_INFO_SHORTSLOTTIME(pCurr->wCapInfo) != pDevice->bShortSlotTime) {
if (pDevice->byBBType == BB_TYPE_11A) {
- bShortSlotTime = TRUE;
+ bShortSlotTime = true;
}
else if (pDevice->byBBType == BB_TYPE_11B) {
- bShortSlotTime = FALSE;
+ bShortSlotTime = false;
}
else {
bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(pCurr->wCapInfo);
@@ -2742,7 +2527,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
/*
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
@@ -2750,7 +2535,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
*/
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
/*
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
@@ -2783,7 +2568,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
// set basic rate
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
+ NULL, true, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
vUpdateIFS(pDevice);
pMgmt->wCurrCapInfo = pCurr->wCapInfo;
@@ -2796,7 +2581,7 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Adopt BSS state in Adapter Device Object
pDevice->eOPMode = OP_MODE_ADHOC;
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
@@ -2836,30 +2621,27 @@ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
* PCM_STATUS
*
-*/
-static
-void
-s_vMgrSynchBSS (
- PSDevice pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
- )
+static void s_vMgrSynchBSS(struct vnt_private *pDevice, u32 uBSSMode,
+ PKnownBSS pCurr, PCMD_STATUS pStatus)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- BYTE abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- BYTE abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- BYTE abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- BYTE abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
-
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ u8 abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES,
+ 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
+ /* 1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M*/
+ u8 abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES,
+ 4, 0x0C, 0x12, 0x18, 0x60};
+ /* 6M, 9M, 12M, 48M*/
+ u8 abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES,
+ 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ u8 abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES,
+ 4, 0x02, 0x04, 0x0B, 0x16};
*pStatus = CMD_STATUS_FAILURE;
if (s_bCipherMatch(pCurr,
pDevice->eEncryptionStatus,
&(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK)) == FALSE) {
+ &(pMgmt->byCSSGK)) == false) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "s_bCipherMatch Fail .......\n");
return;
}
@@ -2872,12 +2654,12 @@ s_vMgrSynchBSS (
}
// Init the BSS informations
- pDevice->bCCK = TRUE;
- pDevice->bProtectMode = FALSE;
+ pDevice->bCCK = true;
+ pDevice->bProtectMode = false;
MACvDisableProtectMD(pDevice);
- pDevice->bBarkerPreambleMd = FALSE;
+ pDevice->bBarkerPreambleMd = false;
MACvDisableBarkerPreambleMd(pDevice);
- pDevice->bNonERPPresent = FALSE;
+ pDevice->bNonERPPresent = false;
pDevice->byPreambleType = 0;
pDevice->wBasicRate = 0;
// Set Basic Rate
@@ -2907,7 +2689,7 @@ s_vMgrSynchBSS (
(pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) {
pDevice->byBBType = BB_TYPE_11A;
pMgmt->eCurrentPHYMode = PHY_TYPE_11A;
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
} else {
@@ -2919,7 +2701,7 @@ s_vMgrSynchBSS (
(pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) {
pDevice->byBBType = BB_TYPE_11B;
pMgmt->eCurrentPHYMode = PHY_TYPE_11B;
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
} else {
@@ -2930,12 +2712,12 @@ s_vMgrSynchBSS (
(pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) {
pDevice->byBBType = BB_TYPE_11G;
pMgmt->eCurrentPHYMode = PHY_TYPE_11G;
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
} else if (pDevice->eConfigPHYMode == PHY_TYPE_11B) {
pDevice->byBBType = BB_TYPE_11B;
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
} else {
@@ -2947,7 +2729,7 @@ s_vMgrSynchBSS (
MACvRegBitsOff(pDevice, MAC_REG_HOSTCR, HOSTCR_ADHOC);
MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
pDevice->byRxMode |= RCR_BSSID;
- pMgmt->bCurrBSSIDFilterOn = TRUE;
+ pMgmt->bCurrBSSIDFilterOn = true;
}
// set channel and clear NAV
@@ -2971,7 +2753,7 @@ s_vMgrSynchBSS (
MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_ADHOC);
MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
pDevice->byRxMode |= RCR_BSSID;
- pMgmt->bCurrBSSIDFilterOn = TRUE;
+ pMgmt->bCurrBSSIDFilterOn = true;
}
if (pDevice->byBBType == BB_TYPE_11A) {
@@ -2991,21 +2773,13 @@ s_vMgrSynchBSS (
return;
};
-
-//mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption
-// ,need reset eAuthenMode and eEncryptionStatus
- static void Encyption_Rebuild(
- PSDevice pDevice,
- PKnownBSS pCurr
- )
+static void Encyption_Rebuild(struct vnt_private *pDevice, PKnownBSS pCurr)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- /* unsigned int ii, uSameBssidNum=0; */
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- // if( uSameBssidNum>=2) { //we only check AP in hidden sssid mode
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selsection,
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-selsect it according to real pairwise-key info.
- if(pCurr->bWPAValid == TRUE) { //WPA-PSK
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
+ (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
+ if (pCurr->bWPAValid == true) { /*WPA-PSK */
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
if(pCurr->abyPKType[0] == WPA_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3016,7 +2790,7 @@ s_vMgrSynchBSS (
PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
}
}
- else if(pCurr->bWPA2Valid == TRUE) { //WPA2-PSK
+ else if(pCurr->bWPA2Valid == true) { //WPA2-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
if(pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3044,20 +2818,15 @@ s_vMgrSynchBSS (
*
-*/
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
- )
+static void s_vMgrFormatTIM(struct vnt_manager *pMgmt, PWLAN_IE_TIM pTIM)
{
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- BYTE byMap;
- unsigned int ii, jj;
- BOOL bStartFound = FALSE;
- BOOL bMulticast = FALSE;
- WORD wStartIndex = 0;
- WORD wEndIndex = 0;
+ u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ u8 byMap;
+ int ii, jj;
+ int bStartFound = false;
+ int bMulticast = false;
+ u16 wStartIndex = 0;
+ u16 wEndIndex = 0;
// Find size of partial virtual bitmap
@@ -3067,13 +2836,13 @@ s_vMgrFormatTIM(
// Mask out the broadcast bit which is indicated separately.
bMulticast = (byMap & byMask[0]) != 0;
if(bMulticast) {
- pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = true;
}
byMap = 0;
}
if (byMap) {
if (!bStartFound) {
- bStartFound = TRUE;
+ bStartFound = true;
wStartIndex = (WORD)ii;
}
wEndIndex = (WORD)ii;
@@ -3119,30 +2888,23 @@ s_vMgrFormatTIM(
*
-*/
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- WORD wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- )
+static struct vnt_tx_mgmt *s_MgrMakeBeacon(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
+ u32 uCurrChannel, u16 wCurrATIMWinodw, PWLAN_IE_SSID pCurrSSID,
+ u8 *pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_BEACON sFrame;
- BYTE abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_BEACON sFrame;
+ u8 abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- // prepare beacon frame
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_BEACON_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ /* prepare beacon frame */
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_BEACON_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure.
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_BEACON_FR_MAXLEN;
@@ -3243,11 +3005,11 @@ s_MgrMakeBeacon(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == TRUE)
+ if (pDevice->bProtectMode == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == TRUE)
+ if (pDevice->bNonERPPresent == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == TRUE)
+ if (pDevice->bBarkerPreambleMd == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
@@ -3259,7 +3021,7 @@ s_MgrMakeBeacon(
);
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3294,30 +3056,22 @@ s_MgrMakeBeacon(
-PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- WORD wCurrATIMWinodw,
- PBYTE pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- BYTE byPHYType
- )
+struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
+ u32 uCurrChannel, u16 wCurrATIMWinodw, u8 *pDstAddr,
+ PWLAN_IE_SSID pCurrSSID, u8 *pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates, u8 byPHYType)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBERESP sFrame;
-
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_PROBERESP sFrame;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBERESP_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_PROBERESP_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure.
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_PROBERESP_FR_MAXLEN;
@@ -3377,11 +3131,11 @@ s_MgrMakeProbeResponse(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == TRUE)
+ if (pDevice->bProtectMode == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == TRUE)
+ if (pDevice->bNonERPPresent == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == TRUE)
+ if (pDevice->bBarkerPreambleMd == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
@@ -3395,7 +3149,7 @@ s_MgrMakeProbeResponse(
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3426,27 +3180,24 @@ s_MgrMakeProbeResponse(
-*/
-PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- )
+struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
+ u16 wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCREQ sFrame;
- PBYTE pbyIEs;
- PBYTE pbyRSN;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_ASSOCREQ sFrame;
+ u8 *pbyIEs;
+ u8 *pbyRSN;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_ASSOCREQ_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure.
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_ASSOCREQ_FR_MAXLEN;
@@ -3625,7 +3376,7 @@ s_MgrMakeAssocRequest(
sFrame.pRSN->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3633,7 +3384,7 @@ s_MgrMakeAssocRequest(
}
sFrame.pRSN->len +=2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (PWORD)pbyRSN; // Point to PMKID count
@@ -3688,27 +3439,23 @@ s_MgrMakeAssocRequest(
-*/
-PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- )
+struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
+ u16 wListenInterval, PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCREQ sFrame;
- PBYTE pbyIEs;
- PBYTE pbyRSN;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_REASSOCREQ sFrame;
+ u8 *pbyIEs;
+ u8 *pbyRSN;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset( pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_REASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_REASSOCREQ_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
/* Setup the sFrame structure. */
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCREQ_FR_MAXLEN;
@@ -3885,7 +3632,7 @@ s_MgrMakeReAssocRequest(
sFrame.pRSN->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3893,7 +3640,7 @@ s_MgrMakeReAssocRequest(
}
sFrame.pRSN->len +=2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (PWORD)pbyRSN; // Point to PMKID count
@@ -3942,25 +3689,20 @@ s_MgrMakeReAssocRequest(
*
-*/
-PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- )
+struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
+ u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCRESP sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_ASSOCRESP sFrame;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_ASSOCREQ_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
@@ -4016,25 +3758,20 @@ s_MgrMakeAssocResponse(
-*/
-PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
- )
+struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
+ u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCRESP sFrame;
+ struct vnt_tx_mgmt *pTxPacket = NULL;
+ WLAN_FR_REASSOCRESP sFrame;
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket = (struct vnt_tx_mgmt *)pMgmt->pbyMgmtPacketPool;
+ memset(pTxPacket, 0, sizeof(struct vnt_tx_mgmt)
+ + WLAN_ASSOCREQ_FR_MAXLEN);
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ + sizeof(struct vnt_tx_mgmt));
// Setup the sFrame structure
sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
@@ -4089,19 +3826,14 @@ s_MgrMakeReAssocResponse(
*
-*/
-static
-void
-s_vMgrRxProbeResponse(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- )
+static void s_vMgrRxProbeResponse(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket)
{
- PKnownBSS pBSSList = NULL;
- WLAN_FR_PROBERESP sFrame;
- BYTE byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- BOOL bChannelHit = TRUE;
+ PKnownBSS pBSSList = NULL;
+ WLAN_FR_PROBERESP sFrame;
+ u8 byCurrChannel = pRxPacket->byRxChannel;
+ ERPObject sERP;
+ int bChannelHit = true;
memset(&sFrame, 0, sizeof(WLAN_FR_PROBERESP));
@@ -4132,31 +3864,31 @@ s_vMgrRxProbeResponse(
if (sFrame.pDSParms) {
if (byCurrChannel ==
RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1])
- bChannelHit = TRUE;
+ bChannelHit = true;
byCurrChannel =
RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1];
} else {
- bChannelHit = TRUE;
+ bChannelHit = true;
}
} else {
if (sFrame.pDSParms) {
if (byCurrChannel == sFrame.pDSParms->byCurrChannel)
- bChannelHit = TRUE;
+ bChannelHit = true;
byCurrChannel = sFrame.pDSParms->byCurrChannel;
} else {
- bChannelHit = TRUE;
+ bChannelHit = true;
}
}
//RobertYu:20050201
-if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
+if(ChannelExceedZoneType(pDevice,byCurrChannel)==true)
return;
if (sFrame.pERP) {
sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = TRUE;
+ sERP.bERPExist = true;
} else {
- sERP.bERPExist = FALSE;
+ sERP.bERPExist = false;
sERP.byERP = 0;
}
@@ -4221,18 +3953,13 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
-*/
-static
-void
-s_vMgrRxProbeRequest(
- PSDevice pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
- )
+static void s_vMgrRxProbeRequest(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt, struct vnt_rx_mgmt *pRxPacket)
{
- WLAN_FR_PROBEREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- BYTE byPHYType = BB_TYPE_11B;
+ WLAN_FR_PROBEREQ sFrame;
+ CMD_STATUS Status;
+ struct vnt_tx_mgmt *pTxPacket;
+ u8 byPHYType = BB_TYPE_11B;
// STA in Ad-hoc mode: when latest TBTT beacon transmit success,
// STA have to response this request.
@@ -4307,15 +4034,13 @@ s_vMgrRxProbeRequest(
*
-*/
-void vMgrRxManagePacket(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket)
+void vMgrRxManagePacket(struct vnt_private *pDevice, struct vnt_manager *pMgmt,
+ struct vnt_rx_mgmt *pRxPacket)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- BOOL bInScan = FALSE;
- unsigned int uNodeIndex = 0;
- NODE_STATE eNodeState = 0;
- CMD_STATUS Status;
+ int bInScan = false;
+ u32 uNodeIndex = 0;
+ NODE_STATE eNodeState = 0;
+ CMD_STATUS Status;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -4348,7 +4073,7 @@ void vMgrRxManagePacket(void *hDeviceContext,
case WLAN_FSTYPE_ASSOCRESP:
// Frame Clase = 2
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp1\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, FALSE);
+ s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, false);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp2\n");
break;
@@ -4375,7 +4100,7 @@ void vMgrRxManagePacket(void *hDeviceContext,
case WLAN_FSTYPE_REASSOCRESP:
// Frame Clase = 2
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx reassocresp\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, TRUE);
+ s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, true);
break;
case WLAN_FSTYPE_PROBEREQ:
@@ -4395,7 +4120,7 @@ void vMgrRxManagePacket(void *hDeviceContext,
// Frame Clase = 0
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx beacon\n");
if (pMgmt->eScanState != WMAC_NO_SCANNING) {
- bInScan = TRUE;
+ bInScan = true;
}
s_vMgrRxBeacon(pDevice, pMgmt, pRxPacket, bInScan);
break;
@@ -4450,15 +4175,15 @@ void vMgrRxManagePacket(void *hDeviceContext,
* Prepare beacon to send
*
* Return Value:
- * TRUE if success; FALSE if failed.
+ * true if success; false if failed.
*
-*/
-BOOL bMgrPrepareBeaconToSend(void *hDeviceContext, PSMgmtObject pMgmt)
+int bMgrPrepareBeaconToSend(struct vnt_private *pDevice,
+ struct vnt_manager *pMgmt)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PSTxMgmtPacket pTxPacket;
+ struct vnt_tx_mgmt *pTxPacket;
-// pDevice->bBeaconBufReady = FALSE;
+// pDevice->bBeaconBufReady = false;
if (pDevice->bEncryptionEnable || pDevice->bEnable8021x){
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
}
@@ -4481,12 +4206,12 @@ BOOL bMgrPrepareBeaconToSend(void *hDeviceContext, PSMgmtObject pMgmt)
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
(pMgmt->abyCurrBSSID[0] == 0))
- return FALSE;
+ return false;
csBeacon_xmit(pDevice, pTxPacket);
MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
- return TRUE;
+ return true;
}
@@ -4504,12 +4229,7 @@ BOOL bMgrPrepareBeaconToSend(void *hDeviceContext, PSMgmtObject pMgmt)
* none.
*
-*/
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- WORD wStatus
- )
+static void s_vMgrLogStatus(struct vnt_manager *pMgmt, u16 wStatus)
{
switch( wStatus ){
case WLAN_MGMT_STATUS_UNSPEC_FAILURE:
@@ -4574,21 +4294,19 @@ s_vMgrLogStatus(
*
-*/
-BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
- PBYTE pbyBSSID,
- PSRSNCapObject psRSNCapObj)
+int bAdd_PMKID_Candidate(struct vnt_private *pDevice, u8 *pbyBSSID,
+ PSRSNCapObject psRSNCapObj)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
- PPMKID_CANDIDATE pCandidateList;
- unsigned int ii = 0;
+ PPMKID_CANDIDATE pCandidateList;
+ int ii = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
if ((pDevice == NULL) || (pbyBSSID == NULL) || (psRSNCapObj == NULL))
- return FALSE;
+ return false;
if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST)
- return FALSE;
+ return false;
@@ -4596,7 +4314,7 @@ BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((psRSNCapObj->bRSNCapExist == TRUE)
+ if ((psRSNCapObj->bRSNCapExist == true)
&& (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |=
NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
@@ -4604,13 +4322,13 @@ BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
pCandidateList->Flags &=
~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- return TRUE;
+ return true;
}
}
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4618,7 +4336,7 @@ BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return TRUE;
+ return true;
}
/*
@@ -4636,17 +4354,17 @@ BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
*
-*/
-void vFlush_PMKID_Candidate(void *hDeviceContext)
+void vFlush_PMKID_Candidate(struct vnt_private *pDevice)
{
- PSDevice pDevice = (PSDevice)hDeviceContext;
+ if (pDevice == NULL)
+ return;
- if (pDevice == NULL)
- return;
+ memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
- memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
+ return;
}
-static BOOL
+static bool
s_bCipherMatch (
PKnownBSS pBSSNode,
NDIS_802_11_ENCRYPTION_STATUS EncStatus,
@@ -4659,7 +4377,7 @@ s_bCipherMatch (
int i;
if (pBSSNode == NULL)
- return FALSE;
+ return false;
// check cap. of BSS
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
@@ -4669,7 +4387,7 @@ s_bCipherMatch (
}
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPA2Valid == TRUE) &&
+ (pBSSNode->bWPA2Valid == true) &&
((EncStatus == Ndis802_11Encryption3Enabled) ||
(EncStatus == Ndis802_11Encryption2Enabled))) {
@@ -4704,7 +4422,7 @@ s_bCipherMatch (
}
} else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPAValid == TRUE) &&
+ (pBSSNode->bWPAValid == true) &&
((EncStatus == Ndis802_11Encryption2Enabled) || (EncStatus == Ndis802_11Encryption3Enabled))) {
//WPA
// check Group Key Cipher
@@ -4746,9 +4464,9 @@ s_bCipherMatch (
(byCipherMask == 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_NONE;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
} else if (EncStatus == Ndis802_11Encryption2Enabled) {
@@ -4756,45 +4474,45 @@ s_bCipherMatch (
(byCipherMask == 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_NONE;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_WEP) &&
((byCipherMask & 0x02) != 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_TKIP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_TKIP) &&
((byCipherMask & 0x02) != 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_TKIP;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
} else if (EncStatus == Ndis802_11Encryption3Enabled) {
if ((byMulticastCipher == KEY_CTL_CCMP) &&
(byCipherMask == 0)) {
// When CCMP is enable, "Use group cipher suite" shall not be a valid option.
- return FALSE;
+ return false;
} else if ((byMulticastCipher == KEY_CTL_WEP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_TKIP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_CCMP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_CCMP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
}
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6656/wmgr.h b/drivers/staging/vt6656/wmgr.h
index 52b1b562b141..83aed45f68a3 100644
--- a/drivers/staging/vt6656/wmgr.h
+++ b/drivers/staging/vt6656/wmgr.h
@@ -218,216 +218,199 @@ typedef enum tagWMAC_POWER_MODE {
-// Tx Management Packet descriptor
-typedef struct tagSTxMgmtPacket {
-
- PUWLAN_80211HDR p80211Header;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
-
-} STxMgmtPacket, *PSTxMgmtPacket;
-
-
-// Rx Management Packet descriptor
-typedef struct tagSRxMgmtPacket {
-
- PUWLAN_80211HDR p80211Header;
- QWORD qwLocalTSF;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
- unsigned int uRSSI;
- BYTE bySQ;
- BYTE byRxRate;
- BYTE byRxChannel;
-
-} SRxMgmtPacket, *PSRxMgmtPacket;
-
-
-
-typedef struct tagSMgmtObject
-{
+/* Tx Management Packet descriptor */
+struct vnt_tx_mgmt {
+ PUWLAN_80211HDR p80211Header;
+ u32 cbMPDULen;
+ u32 cbPayloadLen;
+};
+
+
+/* Rx Management Packet descriptor */
+struct vnt_rx_mgmt {
+ PUWLAN_80211HDR p80211Header;
+ u64 qwLocalTSF;
+ u32 cbMPDULen;
+ u32 cbPayloadLen;
+ u32 uRSSI;
+ u8 bySQ;
+ u8 byRxRate;
+ u8 byRxChannel;
+};
+
+
+struct vnt_manager {
void *pAdapter;
- // MAC address
- BYTE abyMACAddr[WLAN_ADDR_LEN];
-
- // Configuration Mode
- WMAC_CONFIG_MODE eConfigMode; // MAC pre-configed mode
-
- CARD_PHY_TYPE eCurrentPHYMode;
-
-
- // Operation state variables
- WMAC_CURRENT_MODE eCurrMode; // MAC current connection mode
- WMAC_BSS_STATE eCurrState; // MAC current BSS state
- WMAC_BSS_STATE eLastState; // MAC last BSS state
-
- PKnownBSS pCurrBSS;
- BYTE byCSSGK;
- BYTE byCSSPK;
-
-// BYTE abyNewSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
-// BYTE abyNewExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- BOOL bCurrBSSIDFilterOn;
-
- // Current state vars
- unsigned int uCurrChannel;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyCurrBSSID[WLAN_BSSID_LEN];
- WORD wCurrCapInfo;
- WORD wCurrAID;
- unsigned int uRSSITrigger;
- WORD wCurrATIMWindow;
- WORD wCurrBeaconPeriod;
- BOOL bIsDS;
- BYTE byERPContext;
-
- CMD_STATE eCommandState;
- unsigned int uScanChannel;
-
- // Desire joinning BSS vars
- BYTE abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyDesireBSSID[WLAN_BSSID_LEN];
-
-//restore BSS info for Ad-Hoc mode
- BYTE abyAdHocSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
-
- // Adhoc or AP configuration vars
- WORD wIBSSBeaconPeriod;
- WORD wIBSSATIMWindow;
- unsigned int uIBSSChannel;
- BYTE abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE byAPBBType;
- BYTE abyWPAIE[MAX_WPA_IE_LEN];
- WORD wWPAIELen;
-
- unsigned int uAssocCount;
- BOOL bMoreData;
-
- // Scan state vars
- WMAC_SCAN_STATE eScanState;
- WMAC_SCAN_TYPE eScanType;
- unsigned int uScanStartCh;
- unsigned int uScanEndCh;
- WORD wScanSteps;
- unsigned int uScanBSSType;
- // Desire scannig vars
- BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyScanBSSID[WLAN_BSSID_LEN];
-
- // Privacy
- WMAC_AUTHENTICATION_MODE eAuthenMode;
- BOOL bShareKeyAlgorithm;
- BYTE abyChallenge[WLAN_CHALLENGE_LEN];
- BOOL bPrivacyInvoked;
-
- // Received beacon state vars
- BOOL bInTIM;
- BOOL bMulticastTIM;
- BYTE byDTIMCount;
- BYTE byDTIMPeriod;
-
- // Power saving state vars
- WMAC_POWER_MODE ePSMode;
- WORD wListenInterval;
- WORD wCountToWakeUp;
- BOOL bInTIMWake;
- PBYTE pbyPSPacketPool;
- BYTE byPSPacketPool[sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN];
- BOOL bRxBeaconInTBTTWake;
- BYTE abyPSTxMap[MAX_NODE_NUM + 1];
-
- // management command related
- unsigned int uCmdBusy;
- unsigned int uCmdHostAPBusy;
-
- // management packet pool
- PBYTE pbyMgmtPacketPool;
- BYTE byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
-
-
- // One second callback timer
- struct timer_list sTimerSecondCallback;
-
- // Temporarily Rx Mgmt Packet Descriptor
- SRxMgmtPacket sRxPacket;
-
- // link list of known bss's (scan results)
- KnownBSS sBSSList[MAX_BSS_NUM];
- /* link list of same bss's */
- KnownBSS pSameBSS[6] ;
- BOOL Cisco_cckm ;
- BYTE Roam_dbm;
-
- // table list of known node
- // sNodeDBList[0] is reserved for AP under Infra mode
- // sNodeDBList[0] is reserved for Multicast under adhoc/AP mode
- KnownNodeDB sNodeDBTable[MAX_NODE_NUM + 1];
-
-
- // WPA2 PMKID Cache
- SPMKIDCache gsPMKIDCache;
- BOOL bRoaming;
-
- // rate fall back vars
-
-
-
- // associate info
- SAssocInfo sAssocInfo;
-
-
- // for 802.11h
- BOOL b11hEnable;
- BOOL bSwitchChannel;
- BYTE byNewChannel;
- PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
- unsigned int uLengthOfRepEIDs;
- BYTE abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- BYTE abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- BYTE abyIECountry[WLAN_A3FR_MAXLEN];
- BYTE abyIBSSDFSOwner[6];
- BYTE byIBSSDFSRecovery;
-
- struct sk_buff skb;
-
-} SMgmtObject, *PSMgmtObject;
+ /* MAC address */
+ u8 abyMACAddr[WLAN_ADDR_LEN];
+
+ /* Configuration Mode */
+ WMAC_CONFIG_MODE eConfigMode; /* MAC pre-configed mode */
+
+ CARD_PHY_TYPE eCurrentPHYMode;
+
+ /* Operation state variables */
+ WMAC_CURRENT_MODE eCurrMode; /* MAC current connection mode */
+ WMAC_BSS_STATE eCurrState; /* MAC current BSS state */
+ WMAC_BSS_STATE eLastState; /* MAC last BSS state */
+
+ PKnownBSS pCurrBSS;
+ u8 byCSSGK;
+ u8 byCSSPK;
+
+ int bCurrBSSIDFilterOn;
+
+ /* Current state vars */
+ u32 uCurrChannel;
+ u8 abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ u8 abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ u8 abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ u8 abyCurrBSSID[WLAN_BSSID_LEN];
+ u16 wCurrCapInfo;
+ u16 wCurrAID;
+ u32 uRSSITrigger;
+ u16 wCurrATIMWindow;
+ u16 wCurrBeaconPeriod;
+ int bIsDS;
+ u8 byERPContext;
+
+ CMD_STATE eCommandState;
+ u32 uScanChannel;
+
+ /* Desire joinning BSS vars */
+ u8 abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ u8 abyDesireBSSID[WLAN_BSSID_LEN];
+
+ /*restore BSS info for Ad-Hoc mode */
+ u8 abyAdHocSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+
+ /* Adhoc or AP configuration vars */
+ u16 wIBSSBeaconPeriod;
+ u16 wIBSSATIMWindow;
+ u32 uIBSSChannel;
+ u8 abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ u8 byAPBBType;
+ u8 abyWPAIE[MAX_WPA_IE_LEN];
+ u16 wWPAIELen;
+
+ u32 uAssocCount;
+ int bMoreData;
+
+ /* Scan state vars */
+ WMAC_SCAN_STATE eScanState;
+ WMAC_SCAN_TYPE eScanType;
+ u32 uScanStartCh;
+ u32 uScanEndCh;
+ u16 wScanSteps;
+ u32 uScanBSSType;
+ /* Desire scannig vars */
+ u8 abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ u8 abyScanBSSID[WLAN_BSSID_LEN];
+
+ /* Privacy */
+ WMAC_AUTHENTICATION_MODE eAuthenMode;
+ int bShareKeyAlgorithm;
+ u8 abyChallenge[WLAN_CHALLENGE_LEN];
+ int bPrivacyInvoked;
+
+ /* Received beacon state vars */
+ int bInTIM;
+ int bMulticastTIM;
+ u8 byDTIMCount;
+ u8 byDTIMPeriod;
+
+ /* Power saving state vars */
+ WMAC_POWER_MODE ePSMode;
+ u16 wListenInterval;
+ u16 wCountToWakeUp;
+ int bInTIMWake;
+ u8 *pbyPSPacketPool;
+ u8 byPSPacketPool[sizeof(struct vnt_tx_mgmt)
+ + WLAN_NULLDATA_FR_MAXLEN];
+ int bRxBeaconInTBTTWake;
+ u8 abyPSTxMap[MAX_NODE_NUM + 1];
+
+ /* management command related */
+ u32 uCmdBusy;
+ u32 uCmdHostAPBusy;
+
+ /* management packet pool */
+ u8 *pbyMgmtPacketPool;
+ u8 byMgmtPacketPool[sizeof(struct vnt_tx_mgmt)
+ + WLAN_A3FR_MAXLEN];
+
+
+ /* One second callback timer */
+ struct timer_list sTimerSecondCallback;
+
+ /* Temporarily Rx Mgmt Packet Descriptor */
+ struct vnt_rx_mgmt sRxPacket;
+
+ /* link list of known bss's (scan results) */
+ KnownBSS sBSSList[MAX_BSS_NUM];
+ /* link list of same bss's */
+ KnownBSS pSameBSS[6];
+ int Cisco_cckm;
+ u8 Roam_dbm;
+
+ /* table list of known node */
+ /* sNodeDBList[0] is reserved for AP under Infra mode */
+ /* sNodeDBList[0] is reserved for Multicast under adhoc/AP mode */
+ KnownNodeDB sNodeDBTable[MAX_NODE_NUM + 1];
+
+ /* WPA2 PMKID Cache */
+ SPMKIDCache gsPMKIDCache;
+ int bRoaming;
+
+ /* associate info */
+ SAssocInfo sAssocInfo;
+
+ /* for 802.11h */
+ int b11hEnable;
+ int bSwitchChannel;
+ u8 byNewChannel;
+ PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
+ u32 uLengthOfRepEIDs;
+ u8 abyCurrentMSRReq[sizeof(struct vnt_tx_mgmt)
+ + WLAN_A3FR_MAXLEN];
+ u8 abyCurrentMSRRep[sizeof(struct vnt_tx_mgmt)
+ + WLAN_A3FR_MAXLEN];
+ u8 abyIECountry[WLAN_A3FR_MAXLEN];
+ u8 abyIBSSDFSOwner[6];
+ u8 byIBSSDFSRecovery;
+
+ struct sk_buff skb;
+
+};
/*--------------------- Export Macros ------------------------------*/
/*--------------------- Export Functions --------------------------*/
-void vMgrObjectInit(void *hDeviceContext);
+void vMgrObjectInit(struct vnt_private *pDevice);
-void vMgrAssocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus);
+void vMgrAssocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *, PCMD_STATUS pStatus);
-void vMgrReAssocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus);
+void vMgrReAssocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *, PCMD_STATUS pStatus);
-void vMgrDisassocBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
- PCMD_STATUS pStatus);
+void vMgrDisassocBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *, u8 *abyDestAddress, u16 wReason,
+ PCMD_STATUS pStatus);
-void vMgrAuthenBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus);
+void vMgrAuthenBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *, PCMD_STATUS pStatus);
-void vMgrCreateOwnIBSS(void *hDeviceContext,
- PCMD_STATUS pStatus);
+void vMgrCreateOwnIBSS(struct vnt_private *pDevice,
+ PCMD_STATUS pStatus);
-void vMgrJoinBSSBegin(void *hDeviceContext,
- PCMD_STATUS pStatus);
+void vMgrJoinBSSBegin(struct vnt_private *pDevice,
+ PCMD_STATUS pStatus);
-void vMgrRxManagePacket(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket);
+void vMgrRxManagePacket(struct vnt_private *pDevice,
+ struct vnt_manager *, struct vnt_rx_mgmt *);
/*
void
@@ -437,19 +420,16 @@ vMgrScanBegin(
);
*/
-void vMgrDeAuthenBeginSta(void *hDeviceContext,
- PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
- PCMD_STATUS pStatus);
+void vMgrDeAuthenBeginSta(struct vnt_private *pDevice,
+ struct vnt_manager *, u8 *abyDestAddress, u16 wReason,
+ PCMD_STATUS pStatus);
-BOOL bMgrPrepareBeaconToSend(void *hDeviceContext,
- PSMgmtObject pMgmt);
+int bMgrPrepareBeaconToSend(struct vnt_private *pDevice,
+ struct vnt_manager *);
-BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
- PBYTE pbyBSSID,
- PSRSNCapObject psRSNCapObj);
+int bAdd_PMKID_Candidate(struct vnt_private *pDevice,
+ u8 *pbyBSSID, PSRSNCapObject psRSNCapObj);
-void vFlush_PMKID_Candidate(void *hDeviceContext);
+void vFlush_PMKID_Candidate(struct vnt_private *pDevice);
#endif /* __WMGR_H__ */
diff --git a/drivers/staging/vt6656/wpa.c b/drivers/staging/vt6656/wpa.c
index f6429a26ae0f..f037be3aa164 100644
--- a/drivers/staging/vt6656/wpa.c
+++ b/drivers/staging/vt6656/wpa.c
@@ -83,9 +83,9 @@ WPA_ClearRSN(
pBSSList->wAuthCount = 0;
pBSSList->byDefaultK_as_PK = 0;
pBSSList->byReplayIdx = 0;
- pBSSList->sRSNCapObj.bRSNCapExist = FALSE;
+ pBSSList->sRSNCapObj.bRSNCapExist = false;
pBSSList->sRSNCapObj.wRSNCap = 0;
- pBSSList->bWPAValid = FALSE;
+ pBSSList->bWPAValid = false;
}
@@ -212,14 +212,14 @@ WPA_ParseRSN(
pbyCaps = (PBYTE)pIE_RSN_Auth->AuthKSList[n].abyOUI;
pBSSList->byDefaultK_as_PK = (*pbyCaps) & WPA_GROUPFLAG;
pBSSList->byReplayIdx = 2 << ((*pbyCaps >> WPA_REPLAYBITSSHIFT) & WPA_REPLAYBITS);
- pBSSList->sRSNCapObj.bRSNCapExist = TRUE;
+ pBSSList->sRSNCapObj.bRSNCapExist = true;
pBSSList->sRSNCapObj.wRSNCap = *(PWORD)pbyCaps;
//DBG_PRN_GRP14(("pbyCaps: %X\n", *pbyCaps));
//DBG_PRN_GRP14(("byDefaultK_as_PK: %X\n", pBSSList->byDefaultK_as_PK));
//DBG_PRN_GRP14(("byReplayIdx: %X\n", pBSSList->byReplayIdx));
}
}
- pBSSList->bWPAValid = TRUE;
+ pBSSList->bWPAValid = true;
}
}
@@ -239,7 +239,7 @@ WPA_ParseRSN(
* Return Value: none.
*
-*/
-BOOL
+bool
WPA_SearchRSN(
BYTE byCmd,
BYTE byEncrypt,
@@ -249,14 +249,14 @@ WPA_SearchRSN(
int ii;
BYTE byPKType = WPA_NONE;
- if (pBSSList->bWPAValid == FALSE)
- return FALSE;
+ if (pBSSList->bWPAValid == false)
+ return false;
switch(byCmd) {
case 0:
if (byEncrypt != pBSSList->byGKType)
- return FALSE;
+ return false;
if (pBSSList->wPKCount > 0) {
for (ii = 0; ii < pBSSList->wPKCount; ii ++) {
@@ -270,9 +270,9 @@ WPA_SearchRSN(
byPKType = WPA_WEP104;
}
if (byEncrypt != byPKType)
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
// if (pBSSList->wAuthCount > 0)
// for (ii=0; ii < pBSSList->wAuthCount; ii ++)
// if (byAuth == pBSSList->abyAuthType[ii])
@@ -282,7 +282,7 @@ WPA_SearchRSN(
default:
break;
}
- return FALSE;
+ return false;
}
/*+
@@ -299,20 +299,20 @@ WPA_SearchRSN(
* Return Value: none.
*
-*/
-BOOL
+bool
WPAb_Is_RSN(
PWLAN_IE_RSN_EXT pRSN
)
{
if (pRSN == NULL)
- return FALSE;
+ return false;
if ((pRSN->len >= 6) && // oui1(4)+ver(2)
(pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4) &&
(pRSN->wVersion == 1)) {
- return TRUE;
+ return true;
}
else
- return FALSE;
+ return false;
}
diff --git a/drivers/staging/vt6656/wpa.h b/drivers/staging/vt6656/wpa.h
index 889489adbb81..0369cbf32c49 100644
--- a/drivers/staging/vt6656/wpa.h
+++ b/drivers/staging/vt6656/wpa.h
@@ -69,14 +69,14 @@ WPA_ParseRSN(
PWLAN_IE_RSN_EXT pRSN
);
-BOOL
+bool
WPA_SearchRSN(
BYTE byCmd,
BYTE byEncrypt,
PKnownBSS pBSSList
);
-BOOL
+bool
WPAb_Is_RSN(
PWLAN_IE_RSN_EXT pRSN
);
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index 616e24dcf42b..a89456a9137a 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -78,7 +78,7 @@ WPA2_ClearRSN (
{
int ii;
- pBSSNode->bWPA2Valid = FALSE;
+ pBSSNode->bWPA2Valid = false;
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
for (ii=0; ii < 4; ii ++)
@@ -87,7 +87,7 @@ WPA2_ClearRSN (
for (ii=0; ii < 4; ii ++)
pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
pBSSNode->wAKMSSAuthCount = 1;
- pBSSNode->sRSNCapObj.bRSNCapExist = FALSE;
+ pBSSNode->sRSNCapObj.bRSNCapExist = false;
pBSSNode->sRSNCapObj.wRSNCap = 0;
}
@@ -115,7 +115,7 @@ WPA2vParseRSN (
int i, j;
WORD m = 0, n = 0;
PBYTE pbyOUI;
- BOOL bUseGK = FALSE;
+ bool bUseGK = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WPA2_ParseRSN: [%d]\n", pRSN->len);
@@ -123,7 +123,7 @@ WPA2vParseRSN (
if (pRSN->len == 2) { // ver(2)
if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1)) {
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
}
return;
}
@@ -158,7 +158,7 @@ WPA2vParseRSN (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"802.11i CSS: %X\n", pBSSNode->byCSSGK);
if (pRSN->len == 6) {
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
return;
}
@@ -172,7 +172,7 @@ WPA2vParseRSN (
if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
- bUseGK = TRUE;
+ bUseGK = true;
} else if ( !memcmp(pbyOUI, abyOUIWEP40, 4)) {
// Invalid CSS, continue parsing
} else if ( !memcmp(pbyOUI, abyOUITKIP, 4)) {
@@ -194,7 +194,7 @@ WPA2vParseRSN (
break;
} //for
- if (bUseGK == TRUE) {
+ if (bUseGK == true) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
@@ -236,12 +236,12 @@ WPA2vParseRSN (
n = *((PWORD) &(pRSN->abyRSN[6+4*m]));
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
- pBSSNode->sRSNCapObj.bRSNCapExist = TRUE;
+ pBSSNode->sRSNCapObj.bRSNCapExist = true;
pBSSNode->sRSNCapObj.wRSNCap = *((PWORD) &(pRSN->abyRSN[8+4*m+4*n]));
}
}
//ignore PMKID lists bcs only (Re)Assocrequest has this field
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
}
}
@@ -260,19 +260,16 @@ WPA2vParseRSN (
* Return Value: length of IEs.
*
-*/
-unsigned int
-WPA2uSetIEs(void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
- )
+unsigned int WPA2uSetIEs(void *pMgmtHandle, PWLAN_IE_RSN pRSNIEs)
{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PBYTE pbyBuffer = NULL;
- unsigned int ii = 0;
- PWORD pwPMKID = NULL;
+ struct vnt_manager *pMgmt = (struct vnt_manager *)pMgmtHandle;
+ u8 *pbyBuffer = NULL;
+ int ii = 0;
+ u16 *pwPMKID = NULL;
+
+ if (pRSNIEs == NULL)
+ return 0;
- if (pRSNIEs == NULL) {
- return(0);
- }
if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
@@ -328,7 +325,7 @@ WPA2uSetIEs(void *pMgmtHandle,
pRSNIEs->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
pRSNIEs->abyRSN[16] = 0;
@@ -337,7 +334,7 @@ WPA2uSetIEs(void *pMgmtHandle,
pRSNIEs->len +=2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- (pMgmt->bRoaming == TRUE) &&
+ (pMgmt->bRoaming == true) &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
/* RSN PMKID, pointer to PMKID count */
pwPMKID = (PWORD)(&pRSNIEs->abyRSN[18]);
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index cc1d48bced2d..53629b26f24d 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -67,14 +67,14 @@ static int msglevel = MSG_LEVEL_INFO;
* Return Value:
*
*/
-int wpa_set_keys(PSDevice pDevice, void *ctx)
+int wpa_set_keys(struct vnt_private *pDevice, void *ctx)
{
struct viawget_wpa_param *param = ctx;
- PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
DWORD dwKeyIndex = 0;
BYTE abyKey[MAX_KEY_LEN];
BYTE abySeq[MAX_KEY_LEN];
- QWORD KeyRSC;
+ u64 KeyRSC;
BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int uu;
@@ -87,9 +87,9 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = FALSE;
+ pDevice->bEncryptionEnable = false;
pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
+ pDevice->bTransmitKey = false;
for (uu=0; uu<MAX_KEY_TABLE; uu++) {
MACvDisableKeyEntry(pDevice, uu);
}
@@ -109,7 +109,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
} else {
if (param->u.wpa_key.set_tx) {
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
+ pDevice->bTransmitKey = true;
dwKeyIndex |= (1 << 31);
}
KeybSetDefaultKey( pDevice,
@@ -123,7 +123,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
return ret;
}
@@ -136,9 +136,9 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
if (ii < 4)
- LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
+ KeyRSC |= (abySeq[ii] << (ii * 8));
else
- HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
+ KeyRSC |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
@@ -203,18 +203,18 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
+ &KeyRSC,
(PBYTE)abyKey,
byKeyDecMode
- ) == TRUE) &&
+ ) == true) &&
(KeybSetDefaultKey(pDevice,
&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
+ &KeyRSC,
(PBYTE)abyKey,
byKeyDecMode
- ) == TRUE) ) {
+ ) == true) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
return -EINVAL;
@@ -234,8 +234,8 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
}
if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0],
dwKeyIndex, param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
- ) == TRUE) {
+ &KeyRSC, (PBYTE)abyKey, byKeyDecMode
+ ) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
@@ -251,9 +251,9 @@ int wpa_set_keys(PSDevice pDevice, void *ctx)
} // BSSID not 0xffffffffffff
if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = TRUE;
+ pDevice->bTransmitKey = true;
}
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
return ret;
}
diff --git a/drivers/staging/vt6656/wpactl.h b/drivers/staging/vt6656/wpactl.h
index b4ec6b0e1c67..2235ee97012e 100644
--- a/drivers/staging/vt6656/wpactl.h
+++ b/drivers/staging/vt6656/wpactl.h
@@ -52,6 +52,6 @@ typedef unsigned long long NDIS_802_11_KEY_RSC;
/*--------------------- Export Functions --------------------------*/
-int wpa_set_keys(PSDevice pDevice, void *ctx);
+int wpa_set_keys(struct vnt_private *, void *ctx);
#endif /* __WPACL_H__ */
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index a29f60836b77..db5b053d9bc2 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -8,4 +8,4 @@ config W35UND
Hardware is present in some Kohjinsha subnotebooks, and in some
stand-alone USB modules. Chipset name seems to be w89c35d.
- Check http://code.google.com/p/winbondport/ for new version.
+ Check <http://code.google.com/p/winbondport/> for new version.
diff --git a/drivers/staging/wlags49_h2/ap_h2.c b/drivers/staging/wlags49_h2/ap_h2.c
index e524153e925d..3a08d421c735 100644
--- a/drivers/staging/wlags49_h2/ap_h2.c
+++ b/drivers/staging/wlags49_h2/ap_h2.c
@@ -3256,7 +3256,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0x0146, /* sizeof(fw_image_1_data), */
0x00000060, /* Target address in NIC Memory */
0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
- (hcf_8 FAR *) fw_image_1_data
+ (hcf_8 *)fw_image_1_data
},
{
8,
@@ -3265,7 +3265,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0x1918, /* sizeof(fw_image_2_data), */
0x00000C16, /* Target address in NIC Memory */
0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
- (hcf_8 FAR *) fw_image_2_data
+ (hcf_8 *)fw_image_2_data
},
{
8,
@@ -3274,7 +3274,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0x01bc, /* sizeof(fw_image_3_data), */
0x001E252E, /* Target address in NIC Memory */
0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
- (hcf_8 FAR *) fw_image_3_data
+ (hcf_8 *)fw_image_3_data
},
{
8,
@@ -3283,7 +3283,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0xab28, /* sizeof(fw_image_4_data), */
0x001F4000, /* Target address in NIC Memory */
0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
- (hcf_8 FAR *) fw_image_4_data
+ (hcf_8 *)fw_image_4_data
},
{
5,
diff --git a/drivers/staging/wlags49_h2/ap_h25.c b/drivers/staging/wlags49_h2/ap_h25.c
index f4491cbd08d3..d3a0faa3ab97 100644
--- a/drivers/staging/wlags49_h2/ap_h25.c
+++ b/drivers/staging/wlags49_h2/ap_h25.c
@@ -24,10 +24,10 @@
*/
-#include "hcfcfg.h" // to get hcf_16 etc defined as well as
- // possible settings which inluence mdd.h or dhf.h
-#include "mdd.h" //to get COMP_ID_STA etc defined
-#include "dhf.h" //used to be "fhfmem.h", to get memblock,plugrecord,
+#include "hcfcfg.h" /* to get hcf_16 etc defined as well as */
+ /* possible settings which inluence mdd.h or dhf.h */
+#include "mdd.h" /* to get COMP_ID_STA etc defined */
+#include "dhf.h" /* used to be fhfmem.h, to get memblock,plugrecord, */
static const hcf_8 fw_image_1_data[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3996,59 +3996,59 @@ static const hcf_8 fw_image_4_data[] = {
static const CFG_IDENTITY_STRCT fw_image_infoidentity[] = {
{
- sizeof( CFG_IDENTITY_STRCT ) / sizeof(hcf_16) - 1,
+ sizeof(CFG_IDENTITY_STRCT) / sizeof(hcf_16) - 1,
CFG_FW_IDENTITY,
COMP_ID_FW_AP,
- 3, //Variant
- 1, //Major
- 24 //Minor
+ 3, /* Variant */
+ 1, /* Major */
+ 24 /* Minor */
},
- { 0000, 0000, 0000, 0000, 0000, 0000 } //endsentinel
+ { 0000, 0000, 0000, 0000, 0000, 0000 } /* endsentinel */
};
static const CFG_PROG_STRCT fw_image_code[] = {
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x0148, // sizeof(fw_image_1_data),
- 0x00000060, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_1_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x0148, /* sizeof(fw_image_1_data), */
+ 0x00000060, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_1_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x2432, // sizeof(fw_image_2_data),
- 0x00000C16, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_2_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x2432, /* sizeof(fw_image_2_data), */
+ 0x00000C16, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_2_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x194c, // sizeof(fw_image_3_data),
- 0x001E3048, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_3_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x194c, /* sizeof(fw_image_3_data), */
+ 0x001E3048, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_3_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0xb7e4, // sizeof(fw_image_4_data),
- 0x001F4000, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_4_data
+ CFG_PROG_VOLATILE, /* mode*/
+ 0xb7e4, /* sizeof(fw_image_4_data),*/
+ 0x001F4000, /* Target address in NIC Memory*/
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary*/
+ (hcf_8 *)fw_image_4_data
},
{
5,
CFG_PROG,
- CFG_PROG_STOP, // mode
+ CFG_PROG_STOP, /* mode*/
0000,
- 0x000F2101, // Start execution address
+ 0x000F2101, /* Start execution address*/
},
{ 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
};
@@ -4059,7 +4059,7 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_SUPL,
COMP_ID_APF,
{
- { 4, 1, 1 } //variant, bottom, top
+ { 4, 1, 1 } /* variant, bottom, top*/
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -4067,8 +4067,8 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_MFI,
{
- { 7, 3, 3 }, //variant, bottom, top
- { 8, 1, 1 } //variant, bottom, top
+ { 7, 3, 3 }, /* variant, bottom, top */
+ { 8, 1, 1 } /* variant, bottom, top */
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -4076,18 +4076,18 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_CFI,
{
- { 4, 1, 2 } //variant, bottom, top
+ { 4, 1, 2 } /* variant, bottom, top */
}
},
- { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } //endsentinel
+ { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } /* endsentinel */
};
memimage fw_image = {
- "FUPU7D37dhfwci\001C", //signature, <format number>, C/Bin type
+ "FUPU7D37dhfwci\001C", /* signature, <format number>, C/Bin type */
(CFG_PROG_STRCT *) fw_image_code,
0x000F2101,
- 00000000, //(dummy) pdaplug
- 00000000, //(dummy) priplug
+ 00000000, /* (dummy) pdaplug */
+ 00000000, /* (dummy) priplug */
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlags49_h2/sta_h2.c b/drivers/staging/wlags49_h2/sta_h2.c
index 00dffe2ed8f1..19bed819df1e 100644
--- a/drivers/staging/wlags49_h2/sta_h2.c
+++ b/drivers/staging/wlags49_h2/sta_h2.c
@@ -25,10 +25,10 @@
*/
-#include "hcfcfg.h" // to get hcf_16 etc defined as well as
- // possible settings which influence mdd.h or dhf.h
-#include "mdd.h" //to get COMP_ID_STA etc defined
-#include "dhf.h" //used to be "fhfmem.h", to get memblock,plugrecord,
+#include "hcfcfg.h" /* to get hcf_16 etc defined as well as */
+ /* possible settings which influence mdd.h or dhf.h */
+#include "mdd.h" /* to get COMP_ID_STA etc defined */
+#include "dhf.h" /* used to be fhfmem.h, to get memblock,plugrecord, */
static const hcf_8 fw_image_1_data[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -4381,59 +4381,59 @@ static const hcf_8 fw_image_4_data[] = {
static const CFG_IDENTITY_STRCT fw_image_infoidentity[] = {
{
- sizeof( CFG_IDENTITY_STRCT ) / sizeof(hcf_16) - 1,
+ sizeof(CFG_IDENTITY_STRCT) / sizeof(hcf_16) - 1,
CFG_FW_IDENTITY,
COMP_ID_FW_STA,
- 3, //Variant
- 2, //Major
- 36 //Minor
+ 3, /* Variant */
+ 2, /* Major */
+ 36 /* Minor */
},
- { 0000, 0000, 0000, 0000, 0000, 0000 } //endsentinel
+ { 0000, 0000, 0000, 0000, 0000, 0000 } /* endsentinel */
};
static const CFG_PROG_STRCT fw_image_code[] = {
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x0186, // sizeof(fw_image_1_data),
- 0x00000060, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_1_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x0186, /* sizeof(fw_image_1_data), */
+ 0x00000060, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_1_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x2518, // sizeof(fw_image_2_data),
- 0x00000C16, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_2_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x2518, /* sizeof(fw_image_2_data), */
+ 0x00000C16, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_2_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x3daa, // sizeof(fw_image_3_data),
- 0x001E312E, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_3_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x3daa, /* sizeof(fw_image_3_data), */
+ 0x001E312E, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_3_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0xaa66, // sizeof(fw_image_4_data),
- 0x001F4000, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_4_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0xaa66, /* sizeof(fw_image_4_data), */
+ 0x001F4000, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 *)fw_image_4_data
},
{
5,
CFG_PROG,
- CFG_PROG_STOP, // mode
+ CFG_PROG_STOP, /* mode */
0000,
- 0x000F368E, // Start execution address
+ 0x000F368E, /* Start execution address */
},
{ 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
};
@@ -4444,7 +4444,7 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_SUPL,
COMP_ID_STA,
{
- { 2, 2, 5 } //variant, bottom, top
+ { 2, 2, 5 } /* variant, bottom, top */
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -4452,9 +4452,9 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_MFI,
{
- { 4, 6, 7 }, //variant, bottom, top
- { 5, 6, 7 }, //variant, bottom, top
- { 6, 6, 7 } //variant, bottom, top
+ { 4, 6, 7 }, /* variant, bottom, top */
+ { 5, 6, 7 }, /* variant, bottom, top */
+ { 6, 6, 7 } /* variant, bottom, top */
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -4462,18 +4462,18 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_CFI,
{
- { 2, 1, 2 } //variant, bottom, top
+ { 2, 1, 2 } /* variant, bottom, top */
}
},
- { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } //endsentinel
+ { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } /* endsentinel */
};
memimage fw_image = {
- "FUPU7D37dhfwci\001C", //signature, <format number>, C/Bin type
+ "FUPU7D37dhfwci\001C", /* signature, <format number>, C/Bin type */
(CFG_PROG_STRCT *) fw_image_code,
0x000F368E,
- 00000000, //(dummy) pdaplug
- 00000000, //(dummy) priplug
+ 00000000, /* (dummy) pdaplug */
+ 00000000, /* (dummy) priplug */
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 4c6f776cc4da..51293d9f2be9 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -105,57 +105,57 @@ extern dbg_info_t *DbgInfo;
* OK
*
******************************************************************************/
-int wl_wep_code( char *szCrypt, char *szDest, void *Data, int nLen )
+int wl_wep_code(char *szCrypt, char *szDest, void *Data, int nLen)
{
- int i;
- int t;
- int k ;
- char bits;
- char *szData = (char *) Data;
- /*------------------------------------------------------------------------*/
+ int i;
+ int t;
+ int k ;
+ char bits;
+ char *szData = (char *) Data;
+ /*------------------------------------------------------------------------*/
- for( i = bits = 0 ; i < MACADDRESS_STR_LEN; i++ ) {
- bits ^= szCrypt[i];
- bits += szCrypt[i];
- }
+ for (i = bits = 0; i < MACADDRESS_STR_LEN; i++) {
+ bits ^= szCrypt[i];
+ bits += szCrypt[i];
+ }
- for( i = t = *szDest = 0; i < nLen; i++, t++ ) {
- k = szData[i] ^ ( bits + i );
+ for (i = t = *szDest = 0; i < nLen; i++, t++) {
+ k = szData[i] ^ (bits + i);
- switch( i % 3 ) {
+ switch (i % 3) {
- case 0 :
+ case 0:
- szDest[t] = ((k & 0xFC) >> 2) + CH_START ;
- szDest[t+1] = ((k & 0x03) << 4) + CH_START ;
- szDest[t+2] = '\0';
+ szDest[t] = ((k & 0xFC) >> 2) + CH_START ;
+ szDest[t+1] = ((k & 0x03) << 4) + CH_START ;
+ szDest[t+2] = '\0';
- break;
+ break;
- case 1 :
+ case 1:
- szDest[t] += (( k & 0xF0 ) >> 4 );
- szDest[t+1] = (( k & 0x0F ) << 2 ) + CH_START ;
- szDest[t+2] = '\0';
+ szDest[t] += ((k & 0xF0) >> 4);
+ szDest[t+1] = ((k & 0x0F) << 2) + CH_START ;
+ szDest[t+2] = '\0';
- break;
+ break;
- case 2 :
+ case 2:
- szDest[t] += (( k & 0xC0 ) >> 6 );
- szDest[t+1] = ( k & 0x3F ) + CH_START ;
- szDest[t+2] = '\0';
- t++;
+ szDest[t] += ((k & 0xC0) >> 6);
+ szDest[t+1] = (k & 0x3F) + CH_START ;
+ szDest[t+2] = '\0';
+ t++;
- break;
- }
- }
+ break;
+ }
+ }
- return( strlen( szDest )) ;
+ return strlen(szDest);
}
/*============================================================================*/
@@ -182,50 +182,50 @@ int wl_wep_code( char *szCrypt, char *szDest, void *Data, int nLen )
* OK
*
******************************************************************************/
-int wl_wep_decode( char *szCrypt, void *Dest, char *szData )
+int wl_wep_decode(char *szCrypt, void *Dest, char *szData)
{
- int i;
- int t;
- int nLen;
- char bits;
- char *szDest = Dest;
- /*------------------------------------------------------------------------*/
+ int i;
+ int t;
+ int nLen;
+ char bits;
+ char *szDest = Dest;
+ /*------------------------------------------------------------------------*/
- for( i = bits = 0 ; i < 12; i++ ) {
- bits ^= szCrypt[i] ;
- bits += szCrypt[i] ;
- }
+ for (i = bits = 0; i < 12; i++) {
+ bits ^= szCrypt[i] ;
+ bits += szCrypt[i] ;
+ }
- nLen = ( strlen( szData ) * 3) / 4 ;
+ nLen = (strlen(szData) * 3) / 4 ;
- for( i = t = 0; i < nLen; i++, t++ ) {
- switch( i % 3 ) {
- case 0 :
+ for (i = t = 0; i < nLen; i++, t++) {
+ switch (i % 3) {
+ case 0:
- szDest[i] = ((( szData[t]-CH_START ) & 0x3f ) << 2 ) +
- ((( szData[t+1]-CH_START ) & 0x30 ) >> 4 );
- break;
+ szDest[i] = (((szData[t] - CH_START) & 0x3f) << 2) +
+ (((szData[t+1] - CH_START) & 0x30) >> 4);
+ break;
- case 1 :
- szDest[i] = ((( szData[t]-CH_START ) & 0x0f ) << 4 ) +
- ((( szData[t+1]-CH_START ) & 0x3c ) >> 2 );
- break;
+ case 1:
+ szDest[i] = (((szData[t] - CH_START) & 0x0f) << 4) +
+ (((szData[t+1] - CH_START) & 0x3c) >> 2);
+ break;
- case 2 :
- szDest[i] = ((( szData[t]-CH_START ) & 0x03 ) << 6 ) +
- (( szData[t+1]-CH_START ) & 0x3f );
- t++;
- break;
- }
+ case 2:
+ szDest[i] = (((szData[t] - CH_START) & 0x03) << 6) +
+ ((szData[t+1] - CH_START) & 0x3f);
+ t++;
+ break;
+ }
- szDest[i] ^= ( bits + i ) ;
+ szDest[i] ^= (bits + i);
- }
+ }
- return( i ) ;
+ return i;
}
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index fb421407e106..235cc2a7ffe6 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -457,17 +457,17 @@ int wl_close( struct net_device *dev )
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
- strncpy(info->version, DRV_VERSION_STR, sizeof(info->version) - 1);
-// strncpy(info.fw_version, priv->fw_name,
-// sizeof(info.fw_version) - 1);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
+// strlcpy(info.fw_version, priv->fw_name,
+// sizeof(info.fw_version));
if (dev->dev.parent) {
dev_set_name(dev->dev.parent, "%s", info->bus_info);
- //strncpy(info->bus_info, dev->dev.parent->bus_id,
- // sizeof(info->bus_info) - 1);
+ //strlcpy(info->bus_info, dev->dev.parent->bus_id,
+ // sizeof(info->bus_info));
} else {
- snprintf(info->bus_info, sizeof(info->bus_info) - 1,
+ snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA FIXME");
// "PCMCIA 0x%lx", priv->hw.iobase);
}
diff --git a/drivers/staging/wlags49_h2/wl_netdev.h b/drivers/staging/wlags49_h2/wl_netdev.h
index 61f040f26d97..95bfbebf35d6 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.h
+++ b/drivers/staging/wlags49_h2/wl_netdev.h
@@ -68,87 +68,87 @@
/*******************************************************************************
* function prototypes
******************************************************************************/
-int wl_init( struct net_device *dev );
+int wl_init(struct net_device *dev);
-int wl_config( struct net_device *dev, struct ifmap *map );
+int wl_config(struct net_device *dev, struct ifmap *map);
-struct net_device *wl_device_alloc( void );
+struct net_device *wl_device_alloc(void);
-void wl_device_dealloc( struct net_device *dev );
+void wl_device_dealloc(struct net_device *dev);
-int wl_open( struct net_device *dev );
+int wl_open(struct net_device *dev);
-int wl_close( struct net_device *dev );
+int wl_close(struct net_device *dev);
-int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd );
+int wl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int wl_tx( struct sk_buff *skb, struct net_device *dev, int port );
+int wl_tx(struct sk_buff *skb, struct net_device *dev, int port);
-int wl_send( struct wl_private *lp );
+int wl_send(struct wl_private *lp);
-int wl_rx( struct net_device *dev );
+int wl_rx(struct net_device *dev);
-void wl_tx_timeout( struct net_device *dev );
+void wl_tx_timeout(struct net_device *dev);
-struct net_device_stats *wl_stats( struct net_device *dev );
+struct net_device_stats *wl_stats(struct net_device *dev);
#ifdef ENABLE_DMA
-int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port );
-int wl_rx_dma( struct net_device *dev );
+int wl_send_dma(struct wl_private *lp, struct sk_buff *skb, int port);
+int wl_rx_dma(struct net_device *dev);
#endif
#ifdef NEW_MULTICAST
-void wl_multicast( struct net_device *dev );
+void wl_multicast(struct net_device *dev);
#else
-void wl_multicast( struct net_device *dev, int num_addrs, void *addrs );
-#endif // NEW_MULTICAST
+void wl_multicast(struct net_device *dev, int num_addrs, void *addrs);
+#endif /* NEW_MULTICAST */
-int wl_tx_port0( struct sk_buff *skb, struct net_device *dev );
+int wl_tx_port0(struct sk_buff *skb, struct net_device *dev);
#ifdef USE_WDS
-int wl_tx_port1( struct sk_buff *skb, struct net_device *dev );
-int wl_tx_port2( struct sk_buff *skb, struct net_device *dev );
-int wl_tx_port3( struct sk_buff *skb, struct net_device *dev );
-int wl_tx_port4( struct sk_buff *skb, struct net_device *dev );
-int wl_tx_port5( struct sk_buff *skb, struct net_device *dev );
-int wl_tx_port6( struct sk_buff *skb, struct net_device *dev );
-
-void wl_wds_device_alloc( struct wl_private *lp );
-void wl_wds_device_dealloc( struct wl_private *lp );
-void wl_wds_netif_start_queue( struct wl_private *lp );
-void wl_wds_netif_stop_queue( struct wl_private *lp );
-void wl_wds_netif_wake_queue( struct wl_private *lp );
-void wl_wds_netif_carrier_on( struct wl_private *lp );
-void wl_wds_netif_carrier_off( struct wl_private *lp );
+int wl_tx_port1(struct sk_buff *skb, struct net_device *dev);
+int wl_tx_port2(struct sk_buff *skb, struct net_device *dev);
+int wl_tx_port3(struct sk_buff *skb, struct net_device *dev);
+int wl_tx_port4(struct sk_buff *skb, struct net_device *dev);
+int wl_tx_port5(struct sk_buff *skb, struct net_device *dev);
+int wl_tx_port6(struct sk_buff *skb, struct net_device *dev);
+
+void wl_wds_device_alloc(struct wl_private *lp);
+void wl_wds_device_dealloc(struct wl_private *lp);
+void wl_wds_netif_start_queue(struct wl_private *lp);
+void wl_wds_netif_stop_queue(struct wl_private *lp);
+void wl_wds_netif_wake_queue(struct wl_private *lp);
+void wl_wds_netif_carrier_on(struct wl_private *lp);
+void wl_wds_netif_carrier_off(struct wl_private *lp);
#endif /* USE_WDS */
#ifdef USE_WDS
-#define WL_WDS_DEVICE_ALLOC( ARG ) wl_wds_device_alloc( ARG )
-#define WL_WDS_DEVICE_DEALLOC( ARG ) wl_wds_device_dealloc( ARG )
-#define WL_WDS_NETIF_START_QUEUE( ARG ) wl_wds_netif_start_queue( ARG )
-#define WL_WDS_NETIF_STOP_QUEUE( ARG ) wl_wds_netif_stop_queue( ARG )
-#define WL_WDS_NETIF_WAKE_QUEUE( ARG ) wl_wds_netif_wake_queue( ARG )
-#define WL_WDS_NETIF_CARRIER_ON( ARG ) wl_wds_netif_carrier_on( ARG )
-#define WL_WDS_NETIF_CARRIER_OFF( ARG ) wl_wds_netif_carrier_off( ARG )
+#define WL_WDS_DEVICE_ALLOC(ARG) wl_wds_device_alloc(ARG)
+#define WL_WDS_DEVICE_DEALLOC(ARG) wl_wds_device_dealloc(ARG)
+#define WL_WDS_NETIF_START_QUEUE(ARG) wl_wds_netif_start_queue(ARG)
+#define WL_WDS_NETIF_STOP_QUEUE(ARG) wl_wds_netif_stop_queue(ARG)
+#define WL_WDS_NETIF_WAKE_QUEUE(ARG) wl_wds_netif_wake_queue(ARG)
+#define WL_WDS_NETIF_CARRIER_ON(ARG) wl_wds_netif_carrier_on(ARG)
+#define WL_WDS_NETIF_CARRIER_OFF(ARG) wl_wds_netif_carrier_off(ARG)
#else
-#define WL_WDS_DEVICE_ALLOC( ARG )
-#define WL_WDS_DEVICE_DEALLOC( ARG )
-#define WL_WDS_NETIF_START_QUEUE( ARG )
-#define WL_WDS_NETIF_STOP_QUEUE( ARG )
-#define WL_WDS_NETIF_WAKE_QUEUE( ARG )
-#define WL_WDS_NETIF_CARRIER_ON( ARG )
-#define WL_WDS_NETIF_CARRIER_OFF( ARG )
+#define WL_WDS_DEVICE_ALLOC(ARG)
+#define WL_WDS_DEVICE_DEALLOC(ARG)
+#define WL_WDS_NETIF_START_QUEUE(ARG)
+#define WL_WDS_NETIF_STOP_QUEUE(ARG)
+#define WL_WDS_NETIF_WAKE_QUEUE(ARG)
+#define WL_WDS_NETIF_CARRIER_ON(ARG)
+#define WL_WDS_NETIF_CARRIER_OFF(ARG)
#endif /* USE_WDS */
-#endif // __WL_NETDEV_H__
+#endif /* __WL_NETDEV_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 87e1e4123126..c97e0e154d28 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -67,7 +67,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <debug.h>
#include <hcf.h>
@@ -81,17 +81,17 @@
#include <wl_util.h>
#include <wl_netdev.h>
-int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp );
-int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp );
+int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp);
+int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp);
-int cfg_driver_info( struct uilreq *urq, struct wl_private *lp );
-int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp );
+int cfg_driver_info(struct uilreq *urq, struct wl_private *lp);
+int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp);
/*******************************************************************************
@@ -99,7 +99,7 @@ int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp );
******************************************************************************/
#if DBG
extern dbg_info_t *DbgInfo;
-#endif // DBG
+#endif /* DBG */
@@ -127,47 +127,47 @@ extern dbg_info_t *DbgInfo;
* errno value otherwise
*
******************************************************************************/
-int wvlan_uil( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
{
int ioctl_ret = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil");
+ DBG_ENTER(DbgInfo);
- switch( urq->command ) {
- case UIL_FUN_CONNECT:
+ switch (urq->command) {
+ case UIL_FUN_CONNECT:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_CONNECT\n");
- ioctl_ret = wvlan_uil_connect( urq, lp );
+ ioctl_ret = wvlan_uil_connect(urq, lp);
break;
- case UIL_FUN_DISCONNECT:
+ case UIL_FUN_DISCONNECT:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_DISCONNECT\n");
- ioctl_ret = wvlan_uil_disconnect( urq, lp );
+ ioctl_ret = wvlan_uil_disconnect(urq, lp);
break;
- case UIL_FUN_ACTION:
- DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_ACTION\n" );
- ioctl_ret = wvlan_uil_action( urq, lp );
+ case UIL_FUN_ACTION:
+ DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_ACTION\n");
+ ioctl_ret = wvlan_uil_action(urq, lp);
break;
- case UIL_FUN_SEND_DIAG_MSG:
+ case UIL_FUN_SEND_DIAG_MSG:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_SEND_DIAG_MSG\n");
- ioctl_ret = wvlan_uil_send_diag_msg( urq, lp );
+ ioctl_ret = wvlan_uil_send_diag_msg(urq, lp);
break;
- case UIL_FUN_GET_INFO:
+ case UIL_FUN_GET_INFO:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_GET_INFO\n");
- ioctl_ret = wvlan_uil_get_info( urq, lp );
+ ioctl_ret = wvlan_uil_get_info(urq, lp);
break;
- case UIL_FUN_PUT_INFO:
+ case UIL_FUN_PUT_INFO:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_PUT_INFO\n");
- ioctl_ret = wvlan_uil_put_info( urq, lp );
+ ioctl_ret = wvlan_uil_put_info(urq, lp);
break;
default:
- DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- UNSUPPORTED UIL CODE: 0x%X", urq->command );
+ DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- UNSUPPORTED UIL CODE: 0x%X", urq->command);
ioctl_ret = -EOPNOTSUPP;
break;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ioctl_ret;
-} // wvlan_uil
+} /* wvlan_uil */
/*============================================================================*/
@@ -192,28 +192,28 @@ int wvlan_uil( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_connect" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_connect");
+ DBG_ENTER(DbgInfo);
- if( !( lp->flags & WVLAN2_UIL_CONNECTED )) {
+ if (!(lp->flags & WVLAN2_UIL_CONNECTED)) {
lp->flags |= WVLAN2_UIL_CONNECTED;
- urq->hcfCtx = &( lp->hcfCtx );
+ urq->hcfCtx = &(lp->hcfCtx);
urq->result = UIL_SUCCESS;
} else {
- DBG_WARNING( DbgInfo, "UIL_ERR_IN_USE\n" );
+ DBG_WARNING(DbgInfo, "UIL_ERR_IN_USE\n");
urq->result = UIL_ERR_IN_USE;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_connect
+} /* wvlan_uil_connect */
/*============================================================================*/
@@ -238,17 +238,17 @@ int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_disconnect" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_disconnect");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
if (lp->flags & WVLAN2_UIL_CONNECTED) {
lp->flags &= ~WVLAN2_UIL_CONNECTED;
/*
@@ -262,13 +262,13 @@ int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp )
urq->hcfCtx = NULL;
urq->result = UIL_SUCCESS;
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_disconnect
+} /* wvlan_uil_disconnect */
/*============================================================================*/
@@ -293,60 +293,60 @@ int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
ltv_t *ltv;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_action" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_action");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
/* Make sure there's an LTV in the request buffer */
ltv = (ltv_t *)urq->data;
- if( ltv != NULL ) {
+ if (ltv != NULL) {
/* Switch on the Type field of the LTV contained in the request
buffer */
- switch( ltv->typ ) {
+ switch (ltv->typ) {
case UIL_ACT_BLOCK:
- DBG_TRACE( DbgInfo, "UIL_ACT_BLOCK\n" );
- result = wvlan_uil_block( urq, lp );
+ DBG_TRACE(DbgInfo, "UIL_ACT_BLOCK\n");
+ result = wvlan_uil_block(urq, lp);
break;
case UIL_ACT_UNBLOCK:
- DBG_TRACE( DbgInfo, "UIL_ACT_UNBLOCK\n" );
- result = wvlan_uil_unblock( urq, lp );
+ DBG_TRACE(DbgInfo, "UIL_ACT_UNBLOCK\n");
+ result = wvlan_uil_unblock(urq, lp);
break;
case UIL_ACT_SCAN:
- DBG_TRACE( DbgInfo, "UIL_ACT_SCAN\n" );
- urq->result = hcf_action( &( lp->hcfCtx ), MDD_ACT_SCAN );
+ DBG_TRACE(DbgInfo, "UIL_ACT_SCAN\n");
+ urq->result = hcf_action(&(lp->hcfCtx), MDD_ACT_SCAN);
break;
case UIL_ACT_APPLY:
- DBG_TRACE( DbgInfo, "UIL_ACT_APPLY\n" );
- urq->result = wl_apply( lp );
+ DBG_TRACE(DbgInfo, "UIL_ACT_APPLY\n");
+ urq->result = wl_apply(lp);
break;
case UIL_ACT_RESET:
- DBG_TRACE( DbgInfo, "UIL_ACT_RESET\n" );
- urq->result = wl_go( lp );
+ DBG_TRACE(DbgInfo, "UIL_ACT_RESET\n");
+ urq->result = wl_go(lp);
break;
default:
- DBG_WARNING( DbgInfo, "Unknown action code: 0x%x\n", ltv->typ );
+ DBG_WARNING(DbgInfo, "Unknown action code: 0x%x\n", ltv->typ);
break;
}
} else {
- DBG_ERROR( DbgInfo, "Bad LTV for this action\n" );
+ DBG_ERROR(DbgInfo, "Bad LTV for this action\n");
urq->result = UIL_ERR_LEN;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_action
+} /* wvlan_uil_action */
/*============================================================================*/
@@ -373,34 +373,34 @@ int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp )
*
******************************************************************************/
-int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_block" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_block");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
- if( capable( CAP_NET_ADMIN )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
+ if (capable(CAP_NET_ADMIN)) {
lp->flags |= WVLAN2_UIL_BUSY;
netif_stop_queue(lp->dev);
- WL_WDS_NETIF_STOP_QUEUE( lp );
+ WL_WDS_NETIF_STOP_QUEUE(lp);
urq->result = UIL_SUCCESS;
} else {
- DBG_ERROR( DbgInfo, "EPERM\n" );
+ DBG_ERROR(DbgInfo, "EPERM\n");
urq->result = UIL_FAILURE;
result = -EPERM;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_block
+} /* wvlan_uil_block */
/*============================================================================*/
@@ -425,35 +425,35 @@ int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_unblock" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_unblock");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
- if( capable( CAP_NET_ADMIN )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
+ if (capable(CAP_NET_ADMIN)) {
if (lp->flags & WVLAN2_UIL_BUSY) {
lp->flags &= ~WVLAN2_UIL_BUSY;
netif_wake_queue(lp->dev);
- WL_WDS_NETIF_WAKE_QUEUE( lp );
+ WL_WDS_NETIF_WAKE_QUEUE(lp);
}
} else {
- DBG_ERROR( DbgInfo, "EPERM\n" );
+ DBG_ERROR(DbgInfo, "EPERM\n");
urq->result = UIL_FAILURE;
result = -EPERM;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_unblock
+} /* wvlan_uil_unblock */
/*============================================================================*/
@@ -478,47 +478,47 @@ int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
DESC_STRCT Descp[1];
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_send_diag_msg" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_send_diag_msg");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
- if( capable( CAP_NET_ADMIN )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
+ if (capable(CAP_NET_ADMIN)) {
if ((urq->data != NULL) && (urq->len != 0)) {
if (lp->hcfCtx.IFB_RscInd != 0) {
u_char *data;
- // Verify the user buffer
+ /* Verify the user buffer */
result = verify_area(VERIFY_READ, urq->data, urq->len);
if (result != 0) {
- DBG_ERROR( DbgInfo, "verify_area failed, result: %d\n", result );
+ DBG_ERROR(DbgInfo, "verify_area failed, result: %d\n", result);
urq->result = UIL_FAILURE;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
data = kmalloc(urq->len, GFP_KERNEL);
if (data != NULL) {
- memset( Descp, 0, sizeof( DESC_STRCT ));
- memcpy( data, urq->data, urq->len );
+ memset(Descp, 0, sizeof(DESC_STRCT));
+ memcpy(data, urq->data, urq->len);
Descp[0].buf_addr = (wci_bufp)data;
Descp[0].BUF_CNT = urq->len;
- Descp[0].next_desc_addr = 0; // terminate list
+ Descp[0].next_desc_addr = 0; /* terminate list */
- hcf_send_msg( &(lp->hcfCtx), &Descp[0], HCF_PORT_0 );
- kfree( data );
+ hcf_send_msg(&(lp->hcfCtx), &Descp[0], HCF_PORT_0);
+ kfree(data);
} else {
- DBG_ERROR( DbgInfo, "ENOMEM\n" );
+ DBG_ERROR(DbgInfo, "ENOMEM\n");
urq->result = UIL_FAILURE;
result = -ENOMEM;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
@@ -530,18 +530,18 @@ int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp )
urq->result = UIL_FAILURE;
}
} else {
- DBG_ERROR( DbgInfo, "EPERM\n" );
+ DBG_ERROR(DbgInfo, "EPERM\n");
urq->result = UIL_FAILURE;
result = -EPERM;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_send_diag_msg
+} /* wvlan_uil_send_diag_msg */
/*============================================================================*/
@@ -564,7 +564,7 @@ int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
ltv_t *pLtv;
@@ -575,94 +575,94 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
hcf_16 hcfPort = HCF_PORT_0;
#endif /* USE_WDS */
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_put_info" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_put_info");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
- if( capable( CAP_NET_ADMIN )) {
- if(( urq->data != NULL ) && ( urq->len != 0 )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
+ if (capable(CAP_NET_ADMIN)) {
+ if ((urq->data != NULL) && (urq->len != 0)) {
/* Make sure that we have at least a command and length to send. */
- if( urq->len < ( sizeof( hcf_16 ) * 2 )) {
- urq->len = sizeof( lp->ltvRecord );
+ if (urq->len < (sizeof(hcf_16) * 2)) {
+ urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_ERROR( DbgInfo, "No Length/Type in LTV!!!\n" );
- DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" );
- DBG_LEAVE( DbgInfo );
+ DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
+ DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Verify the user buffer */
- result = verify_area( VERIFY_READ, urq->data, urq->len );
- if( result != 0 ) {
+ result = verify_area(VERIFY_READ, urq->data, urq->len);
+ if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_ERROR( DbgInfo, "verify_area(), VERIFY_READ FAILED\n" );
- DBG_LEAVE( DbgInfo );
+ DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Get only the command and length information. */
- copy_from_user( &( lp->ltvRecord ), urq->data, sizeof( hcf_16 ) * 2 );
+ copy_from_user(&(lp->ltvRecord), urq->data, sizeof(hcf_16) * 2);
/* Make sure the incoming LTV record length is within the bounds of the
IOCTL length */
- if((( lp->ltvRecord.len + 1 ) * sizeof( hcf_16 )) > urq->len ) {
- urq->len = sizeof( lp->ltvRecord );
+ if (((lp->ltvRecord.len + 1) * sizeof(hcf_16)) > urq->len) {
+ urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" );
- DBG_LEAVE( DbgInfo );
+ DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
+ DBG_LEAVE(DbgInfo);
return result;
}
/* If the requested length is greater than the size of our local
LTV record, try to allocate it from the kernel stack.
Otherwise, we just use our local LTV record. */
- if( urq->len > sizeof( lp->ltvRecord )) {
+ if (urq->len > sizeof(lp->ltvRecord)) {
pLtv = kmalloc(urq->len, GFP_KERNEL);
if (pLtv != NULL) {
ltvAllocated = TRUE;
} else {
- DBG_ERROR( DbgInfo, "Alloc FAILED\n" );
- urq->len = sizeof( lp->ltvRecord );
+ DBG_ERROR(DbgInfo, "Alloc FAILED\n");
+ urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
result = -ENOMEM;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
} else {
- pLtv = &( lp->ltvRecord );
+ pLtv = &(lp->ltvRecord);
}
/* Copy the data from the user's buffer into the local LTV
record data area. */
- copy_from_user( pLtv, urq->data, urq->len );
+ copy_from_user(pLtv, urq->data, urq->len);
/* We need to snoop the commands to see if there is anything we
need to store for the purposes of a reset or start/stop
sequence. Perform endian translation as needed */
- switch( pLtv->typ ) {
+ switch (pLtv->typ) {
case CFG_CNF_PORT_TYPE:
lp->PortType = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_OWN_MAC_ADDR:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_CNF_OWN_CHANNEL:
lp->Channel = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
/* CFG_CNF_OWN_SSID currently same as CNF_DESIRED_SSID. Do we
need separate storage for this? */
- //case CFG_CNF_OWN_SSID:
+ /* case CFG_CNF_OWN_SSID: */
case CFG_CNF_OWN_ATIM_WINDOW:
lp->atimWindow = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_SYSTEM_SCALE:
lp->DistanceBetweenAPs = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
case CFG_CNF_MAX_DATA_LEN:
/* TODO: determine if we are going to store anything based
@@ -670,163 +670,163 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
break;
case CFG_CNF_PM_ENABLED:
lp->PMEnabled = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MCAST_RX:
lp->MulticastReceive = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MAX_SLEEP_DURATION:
lp->MaxSleepDuration = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_HOLDOVER_DURATION:
lp->holdoverDuration = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_OWN_NAME:
- memset( lp->StationName, 0, sizeof( lp->StationName ));
- memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ memset(lp->StationName, 0, sizeof(lp->StationName));
+ memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_LOAD_BALANCING:
lp->loadBalancing = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_MEDIUM_DISTRIBUTION:
lp->mediumDistribution = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef WARP
case CFG_CNF_TX_POW_LVL:
lp->txPowLevel = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
- //case CFG_CNF_SHORT_RETRY_LIMIT: // Short Retry Limit
- //case 0xFC33: // Long Retry Limit
- case CFG_SUPPORTED_RATE_SET_CNTL: // Supported Rate Set Control
+ /* case CFG_CNF_SHORT_RETRY_LIMIT: */ /* Short Retry Limit */
+ /* case 0xFC33: */ /* Long Retry Limit */
+ case CFG_SUPPORTED_RATE_SET_CNTL: /* Supported Rate Set Control */
lp->srsc[0] = pLtv->u.u16[0];
lp->srsc[1] = pLtv->u.u16[1];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
- pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
+ pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
- case CFG_BASIC_RATE_SET_CNTL: // Basic Rate Set Control
+ case CFG_BASIC_RATE_SET_CNTL: /* Basic Rate Set Control */
lp->brsc[0] = pLtv->u.u16[0];
lp->brsc[1] = pLtv->u.u16[1];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
- pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
+ pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
case CFG_CNF_CONNECTION_CNTL:
lp->connectionControl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
- //case CFG_PROBE_DATA_RATE:
-#endif // HERMES25
+ /* case CFG_PROBE_DATA_RATE: */
+#endif /* HERMES25 */
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
case CFG_CNF_OWN_DTIM_PERIOD:
lp->DTIMPeriod = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef WARP
- case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval
+ case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */
lp->ownBeaconInterval = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
-#endif // WARP
- case CFG_COEXISTENSE_BEHAVIOUR: // Coexistence behavior
+#endif /* WARP */
+ case CFG_COEXISTENSE_BEHAVIOUR: /* Coexistence behavior */
lp->coexistence = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef USE_WDS
case CFG_CNF_WDS_ADDR1:
- memcpy( &lp->wds_port[0].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[0].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_1;
break;
case CFG_CNF_WDS_ADDR2:
- memcpy( &lp->wds_port[1].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[1].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_2;
break;
case CFG_CNF_WDS_ADDR3:
- memcpy( &lp->wds_port[2].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[2].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_3;
break;
case CFG_CNF_WDS_ADDR4:
- memcpy( &lp->wds_port[3].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[3].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_4;
break;
case CFG_CNF_WDS_ADDR5:
- memcpy( &lp->wds_port[4].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[4].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_5;
break;
case CFG_CNF_WDS_ADDR6:
- memcpy( &lp->wds_port[5].wdsAddress, &pLtv->u.u8[0], ETH_ALEN );
+ memcpy(&lp->wds_port[5].wdsAddress, &pLtv->u.u8[0], ETH_ALEN);
hcfPort = HCF_PORT_6;
break;
#endif /* USE_WDS */
case CFG_CNF_MCAST_PM_BUF:
lp->multicastPMBuffering = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_REJECT_ANY:
lp->RejectAny = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#endif
case CFG_CNF_ENCRYPTION:
lp->EnableEncryption = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_AUTHENTICATION:
lp->authentication = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
- //case CFG_CNF_EXCL_UNENCRYPTED:
- //lp->ExcludeUnencrypted = pLtv->u.u16[0];
- //pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
- //break;
+ /* case CFG_CNF_EXCL_UNENCRYPTED:
+ lp->ExcludeUnencrypted = pLtv->u.u16[0];
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
+ break; */
case CFG_CNF_MCAST_RATE:
/* TODO: determine if we are going to store anything based on this */
break;
case CFG_CNF_INTRA_BSS_RELAY:
lp->intraBSSRelay = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#endif
case CFG_CNF_MICRO_WAVE:
/* TODO: determine if we are going to store anything based on this */
break;
- //case CFG_CNF_LOAD_BALANCING:
+ /*case CFG_CNF_LOAD_BALANCING:*/
/* TODO: determine if we are going to store anything based on this */
- //break;
- //case CFG_CNF_MEDIUM_DISTRIBUTION:
+ /* break; */
+ /* case CFG_CNF_MEDIUM_DISTRIBUTION: */
/* TODO: determine if we are going to store anything based on this */
- //break;
- //case CFG_CNF_RX_ALL_GROUP_ADDRESS:
- // TODO: determine if we are going to store anything based on this
- //break;
- //case CFG_CNF_COUNTRY_INFO:
+ /* break; */
+ /* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */
+ /* TODO: determine if we are going to store anything based on this */
+ /* break; */
+ /* case CFG_CNF_COUNTRY_INFO: */
/* TODO: determine if we are going to store anything based on this */
- //break;
+ /* break; */
case CFG_CNF_OWN_SSID:
- //case CNF_DESIRED_SSID:
+ /* case CNF_DESIRED_SSID: */
case CFG_DESIRED_SSID:
- memset( lp->NetworkName, 0, sizeof( lp->NetworkName ));
- memcpy( (void *)lp->NetworkName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0] );
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ memset(lp->NetworkName, 0, sizeof(lp->NetworkName));
+ memcpy((void *)lp->NetworkName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
/* take care of the special network name "ANY" case */
- if(( strlen( &pLtv->u.u8[2] ) == 0 ) ||
- ( strcmp( &pLtv->u.u8[2], "ANY" ) == 0 ) ||
- ( strcmp( &pLtv->u.u8[2], "any" ) == 0 )) {
+ if ((strlen(&pLtv->u.u8[2]) == 0) ||
+ (strcmp(&pLtv->u.u8[2], "ANY") == 0) ||
+ (strcmp(&pLtv->u.u8[2], "any") == 0)) {
/* set the SSID_STRCT llen field (u16[0]) to zero, and the
effectually null the string u8[2] */
pLtv->u.u16[0] = 0;
@@ -838,93 +838,93 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
break;
case CFG_CREATE_IBSS:
lp->CreateIBSS = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_RTS_THRH:
lp->RTSThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_TX_RATE_CNTL:
lp->TxRateControl[0] = pLtv->u.u16[0];
lp->TxRateControl[1] = pLtv->u.u16[1];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
- pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
+ pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]);
break;
case CFG_PROMISCUOUS_MODE:
/* TODO: determine if we are going to store anything based on this */
break;
- //case CFG_WAKE_ON_LAN:
+ /* case CFG_WAKE_ON_LAN: */
/* TODO: determine if we are going to store anything based on this */
- //break;
-#if 1 //;? #if (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+ /* break; */
+#if 1 /* ;? #if (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
case CFG_RTS_THRH0:
lp->RTSThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_TX_RATE_CNTL0:
-//;?no idea what this should be, get going so comment it out lp->TxRateControl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+/*;?no idea what this should be, get going so comment it out lp->TxRateControl = pLtv->u.u16[0];*/
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
#ifdef USE_WDS
case CFG_RTS_THRH1:
lp->wds_port[0].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_1;
break;
case CFG_RTS_THRH2:
lp->wds_port[1].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_2;
break;
case CFG_RTS_THRH3:
lp->wds_port[2].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_3;
break;
case CFG_RTS_THRH4:
lp->wds_port[3].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_4;
break;
case CFG_RTS_THRH5:
lp->wds_port[4].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_5;
break;
case CFG_RTS_THRH6:
lp->wds_port[5].rtsThreshold = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_6;
break;
case CFG_TX_RATE_CNTL1:
lp->wds_port[0].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_1;
break;
case CFG_TX_RATE_CNTL2:
lp->wds_port[1].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_2;
break;
case CFG_TX_RATE_CNTL3:
lp->wds_port[2].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_3;
break;
case CFG_TX_RATE_CNTL4:
lp->wds_port[3].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_4;
break;
case CFG_TX_RATE_CNTL5:
lp->wds_port[4].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_5;
break;
case CFG_TX_RATE_CNTL6:
lp->wds_port[5].txRateCntl = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
hcfPort = HCF_PORT_6;
break;
#endif /* USE_WDS */
@@ -934,18 +934,18 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
{
CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)pLtv;
- pKeys->key[0].len = CNV_INT_TO_LITTLE( pKeys->key[0].len );
- pKeys->key[1].len = CNV_INT_TO_LITTLE( pKeys->key[1].len );
- pKeys->key[2].len = CNV_INT_TO_LITTLE( pKeys->key[2].len );
- pKeys->key[3].len = CNV_INT_TO_LITTLE( pKeys->key[3].len );
+ pKeys->key[0].len = CNV_INT_TO_LITTLE(pKeys->key[0].len);
+ pKeys->key[1].len = CNV_INT_TO_LITTLE(pKeys->key[1].len);
+ pKeys->key[2].len = CNV_INT_TO_LITTLE(pKeys->key[2].len);
+ pKeys->key[3].len = CNV_INT_TO_LITTLE(pKeys->key[3].len);
- memcpy( (void *)&(lp->DefaultKeys), (void *)pKeys,
- sizeof( CFG_DEFAULT_KEYS_STRCT ));
+ memcpy((void *)&(lp->DefaultKeys), (void *)pKeys,
+ sizeof(CFG_DEFAULT_KEYS_STRCT));
}
break;
case CFG_TX_KEY_ID:
lp->TransmitKeyID = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_SCAN_SSID:
/* TODO: determine if we are going to store anything based on this */
@@ -956,7 +956,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
/* these RIDS are Info RIDs, and should they be allowed for puts??? */
case CFG_MAX_LOAD_TIME:
case CFG_DL_BUF:
- //case CFG_HSI_SUP_RANGE:
+ /* case CFG_HSI_SUP_RANGE: */
case CFG_NIC_SERIAL_NUMBER:
case CFG_NIC_IDENTITY:
case CFG_NIC_MFI_SUP_RANGE:
@@ -982,35 +982,35 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
case CFG_CF_POLLABLE:
case CFG_AUTHENTICATION_ALGORITHMS:
case CFG_PRIVACY_OPT_IMPLEMENTED:
- //case CFG_CURRENT_REMOTE_RATES:
- //case CFG_CURRENT_USED_RATES:
- //case CFG_CURRENT_SYSTEM_SCALE:
- //case CFG_CURRENT_TX_RATE1:
- //case CFG_CURRENT_TX_RATE2:
- //case CFG_CURRENT_TX_RATE3:
- //case CFG_CURRENT_TX_RATE4:
- //case CFG_CURRENT_TX_RATE5:
- //case CFG_CURRENT_TX_RATE6:
+ /* case CFG_CURRENT_REMOTE_RATES: */
+ /* case CFG_CURRENT_USED_RATES: */
+ /* case CFG_CURRENT_SYSTEM_SCALE: */
+ /* case CFG_CURRENT_TX_RATE1: */
+ /* case CFG_CURRENT_TX_RATE2: */
+ /* case CFG_CURRENT_TX_RATE3: */
+ /* case CFG_CURRENT_TX_RATE4: */
+ /* case CFG_CURRENT_TX_RATE5: */
+ /* case CFG_CURRENT_TX_RATE6: */
case CFG_NIC_MAC_ADDR:
case CFG_PCF_INFO:
- //case CFG_CURRENT_COUNTRY_INFO:
+ /* case CFG_CURRENT_COUNTRY_INFO: */
case CFG_PHY_TYPE:
case CFG_CUR_CHANNEL:
- //case CFG_CURRENT_POWER_STATE:
- //case CFG_CCAMODE:
+ /* case CFG_CURRENT_POWER_STATE: */
+ /* case CFG_CCAMODE: */
case CFG_SUPPORTED_DATA_RATES:
break;
case CFG_AP_MODE:
-//;? lp->DownloadFirmware = ( pLtv->u.u16[0] ) + 1;
- DBG_ERROR( DbgInfo, "set CFG_AP_MODE no longer supported\n" );
+/*;? lp->DownloadFirmware = (pLtv->u.u16[0]) + 1; */
+ DBG_ERROR(DbgInfo, "set CFG_AP_MODE no longer supported\n");
break;
case CFG_ENCRYPT_STRING:
/* TODO: ENDIAN TRANSLATION HERE??? */
- memset( lp->szEncryption, 0, sizeof( lp->szEncryption ));
- memcpy( (void *)lp->szEncryption, (void *)&pLtv->u.u8[0],
- ( pLtv->len * sizeof( hcf_16 )) );
- wl_wep_decode( CRYPT_CODE, &sEncryption,
- lp->szEncryption );
+ memset(lp->szEncryption, 0, sizeof(lp->szEncryption));
+ memcpy((void *)lp->szEncryption, (void *)&pLtv->u.u8[0],
+ (pLtv->len * sizeof(hcf_16)));
+ wl_wep_decode(CRYPT_CODE, &sEncryption,
+ lp->szEncryption);
/* the Linux driver likes to use 1-4 for the key IDs, and then
convert to 0-3 when sending to the card. The Windows code
@@ -1022,34 +1022,34 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
lp->TransmitKeyID = sEncryption.wTxKeyID + 1;
lp->EnableEncryption = sEncryption.wEnabled;
- memcpy( &lp->DefaultKeys, &sEncryption.EncStr,
- sizeof( CFG_DEFAULT_KEYS_STRCT ));
+ memcpy(&lp->DefaultKeys, &sEncryption.EncStr,
+ sizeof(CFG_DEFAULT_KEYS_STRCT));
break;
/*case CFG_COUNTRY_STRING:
- memset( lp->countryString, 0, sizeof( lp->countryString ));
- memcpy( (void *)lp->countryString, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+ memset(lp->countryString, 0, sizeof(lp->countryString));
+ memcpy((void *)lp->countryString, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
break;
*/
case CFG_DRIVER_ENABLE:
lp->driverEnable = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_WOLAS_ENABLE:
lp->wolasEnable = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_SET_WPA_AUTH_KEY_MGMT_SUITE:
lp->AuthKeyMgmtSuite = pLtv->u.u16[0];
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_DISASSOCIATE_ADDR:
- pLtv->u.u16[ETH_ALEN / 2] = CNV_INT_TO_LITTLE( pLtv->u.u16[ETH_ALEN / 2] );
+ pLtv->u.u16[ETH_ALEN / 2] = CNV_INT_TO_LITTLE(pLtv->u.u16[ETH_ALEN / 2]);
break;
case CFG_ADD_TKIP_DEFAULT_KEY:
case CFG_REMOVE_TKIP_DEFAULT_KEY:
/* Endian convert the Tx Key Information */
- pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_ADD_TKIP_MAPPED_KEY:
break;
@@ -1066,7 +1066,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
being sent to the card, as they require a call to
UIL_ACT_APPLY to take effect. Dynamic Entities will be sent
immediately */
- switch( pLtv->typ ) {
+ switch (pLtv->typ) {
case CFG_CNF_PORT_TYPE:
case CFG_CNF_OWN_MAC_ADDR:
case CFG_CNF_OWN_CHANNEL:
@@ -1084,14 +1084,14 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
#ifdef WARP
case CFG_CNF_TX_POW_LVL:
case CFG_CNF_CONNECTION_CNTL:
- //case CFG_PROBE_DATA_RATE:
-#endif // HERMES25
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+ /*case CFG_PROBE_DATA_RATE: */
+#endif /* HERMES25 */
+#if 1 /*;? (HCF_TYPE) & HCF_TYPE_AP */
+ /*;?should we restore this to allow smaller memory footprint */
case CFG_CNF_OWN_DTIM_PERIOD:
#ifdef WARP
- case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval
-#endif // WARP
+ case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */
+#endif /* WARP */
#ifdef USE_WDS
case CFG_CNF_WDS_ADDR1:
case CFG_CNF_WDS_ADDR2:
@@ -1106,8 +1106,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
case CFG_CNF_ENCRYPTION:
case CFG_CNF_AUTHENTICATION:
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
case CFG_CNF_EXCL_UNENCRYPTED:
case CFG_CNF_MCAST_RATE:
@@ -1115,68 +1115,54 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
#endif
case CFG_CNF_MICRO_WAVE:
- //case CFG_CNF_LOAD_BALANCING:
- //case CFG_CNF_MEDIUM_DISTRIBUTION:
- //case CFG_CNF_RX_ALL_GROUP_ADDRESS:
- //case CFG_CNF_COUNTRY_INFO:
- //case CFG_COUNTRY_STRING:
+ /* case CFG_CNF_LOAD_BALANCING: */
+ /* case CFG_CNF_MEDIUM_DISTRIBUTION: */
+ /* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */
+ /* case CFG_CNF_COUNTRY_INFO: */
+ /* case CFG_COUNTRY_STRING: */
case CFG_AP_MODE:
case CFG_ENCRYPT_STRING:
- //case CFG_DRIVER_ENABLE:
+ /* case CFG_DRIVER_ENABLE: */
case CFG_WOLAS_ENABLE:
case CFG_MB_INFO:
case CFG_IFB:
break;
/* Deal with this dynamic MSF RID, as it's required for WPA */
case CFG_DRIVER_ENABLE:
- if( lp->driverEnable ) {
- //hcf_cntl_port( &( lp->hcfCtx ),
- // HCF_PORT_ENABLE | HCF_PORT_0 );
- // //hcf_cntl( &( lp->hcfCtx ),
- // // HCF_PORT_ENABLE | HCF_PORT_0 );
- //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_ENABLE );
- // //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_CONNECT );
-
- hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_ENABLE | HCF_PORT_0 );
- hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_CONNECT );
+ if (lp->driverEnable) {
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_ENABLE | HCF_PORT_0);
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_CONNECT);
} else {
- //hcf_cntl_port( &( lp->hcfCtx ),
- // HCF_PORT_DISABLE | HCF_PORT_0 );
- // //hcf_cntl( &( lp->hcfCtx ),
- // // HCF_PORT_DISABLE | HCF_PORT_0 );
- //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISABLE );
- // //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISCONNECT );
-
- hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISABLE | HCF_PORT_0 );
- hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISCONNECT );
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISABLE | HCF_PORT_0);
+ hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISCONNECT);
}
break;
default:
- wl_act_int_off( lp );
+ wl_act_int_off(lp);
urq->result = hcf_put_info(&(lp->hcfCtx), (LTVP) pLtv);
- wl_act_int_on( lp );
+ wl_act_int_on(lp);
break;
}
- if( ltvAllocated ) {
- kfree( pLtv );
- }
+ if (ltvAllocated)
+ kfree(pLtv);
} else {
urq->result = UIL_FAILURE;
}
} else {
- DBG_ERROR( DbgInfo, "EPERM\n" );
+ DBG_ERROR(DbgInfo, "EPERM\n");
urq->result = UIL_FAILURE;
result = -EPERM;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_put_info
+} /* wvlan_uil_put_info */
+
/*============================================================================*/
/*******************************************************************************
@@ -1199,97 +1185,97 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
+int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
int i;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_uil_get_info" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_uil_get_info");
+ DBG_ENTER(DbgInfo);
- if( urq->hcfCtx == &( lp->hcfCtx )) {
- if(( urq->data != NULL ) && ( urq->len != 0 )) {
+ if (urq->hcfCtx == &(lp->hcfCtx)) {
+ if ((urq->data != NULL) && (urq->len != 0)) {
ltv_t *pLtv;
bool_t ltvAllocated = FALSE;
/* Make sure that we have at least a command and length */
- if( urq->len < ( sizeof( hcf_16 ) * 2 )) {
- urq->len = sizeof( lp->ltvRecord );
- DBG_ERROR( DbgInfo, "No Length/Type in LTV!!!\n" );
- DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" );
+ if (urq->len < (sizeof(hcf_16) * 2)) {
+ urq->len = sizeof(lp->ltvRecord);
+ DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
+ DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
urq->result = UIL_ERR_LEN;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Verify the user's LTV record header. */
- result = verify_area( VERIFY_READ, urq->data, sizeof( hcf_16 ) * 2 );
- if( result != 0 ) {
- DBG_ERROR( DbgInfo, "verify_area(), VERIFY_READ FAILED\n" );
+ result = verify_area(VERIFY_READ, urq->data, sizeof(hcf_16) * 2);
+ if (result != 0) {
+ DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
urq->result = UIL_FAILURE;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Get only the command and length information. */
- result = copy_from_user( &( lp->ltvRecord ), urq->data, sizeof( hcf_16 ) * 2 );
+ result = copy_from_user(&(lp->ltvRecord), urq->data, sizeof(hcf_16) * 2);
/* Make sure the incoming LTV record length is within the bounds of
the IOCTL length. */
- if((( lp->ltvRecord.len + 1 ) * sizeof( hcf_16 )) > urq->len ) {
- DBG_ERROR( DbgInfo, "Incoming LTV too big\n" );
- urq->len = sizeof( lp->ltvRecord );
+ if (((lp->ltvRecord.len + 1) * sizeof(hcf_16)) > urq->len) {
+ DBG_ERROR(DbgInfo, "Incoming LTV too big\n");
+ urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Determine if hcf_get_info() is needed or not */
- switch ( lp->ltvRecord.typ ) {
+ switch (lp->ltvRecord.typ) {
case CFG_NIC_IDENTITY:
- memcpy( &lp->ltvRecord.u.u8[0], &lp->NICIdentity, sizeof( lp->NICIdentity ));
+ memcpy(&lp->ltvRecord.u.u8[0], &lp->NICIdentity, sizeof(lp->NICIdentity));
break;
case CFG_PRI_IDENTITY:
- memcpy( &lp->ltvRecord.u.u8[0], &lp->PrimaryIdentity, sizeof( lp->PrimaryIdentity ));
+ memcpy(&lp->ltvRecord.u.u8[0], &lp->PrimaryIdentity, sizeof(lp->PrimaryIdentity));
break;
case CFG_AP_MODE:
- DBG_ERROR( DbgInfo, "set CFG_AP_MODE no longer supported, so is get useful ????\n" );
+ DBG_ERROR(DbgInfo, "set CFG_AP_MODE no longer supported, so is get useful ????\n");
lp->ltvRecord.u.u16[0] =
- CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP;
+ CNV_INT_TO_LITTLE(lp->hcfCtx.IFB_FWIdentity.comp_id) == COMP_ID_FW_AP;
break;
- //case CFG_DRV_INFO:
+ /* case CFG_DRV_INFO: */
case CFG_ENCRYPT_STRING:
case CFG_COUNTRY_STRING:
case CFG_DRIVER_ENABLE:
case CFG_WOLAS_ENABLE:
- // TODO: determine if we're going to support these
+ /* TODO: determine if we're going to support these */
urq->result = UIL_FAILURE;
break;
case CFG_DRV_INFO:
- DBG_TRACE( DbgInfo, "Intercept CFG_DRV_INFO\n" );
- result = cfg_driver_info( urq, lp );
+ DBG_TRACE(DbgInfo, "Intercept CFG_DRV_INFO\n");
+ result = cfg_driver_info(urq, lp);
break;
case CFG_DRV_IDENTITY:
- DBG_TRACE( DbgInfo, "Intercept CFG_DRV_IDENTITY\n" );
- result = cfg_driver_identity( urq, lp );
+ DBG_TRACE(DbgInfo, "Intercept CFG_DRV_IDENTITY\n");
+ result = cfg_driver_identity(urq, lp);
break;
case CFG_IFB:
/* IFB can be a security hole */
- if( !capable( CAP_NET_ADMIN )) {
+ if (!capable(CAP_NET_ADMIN)) {
result = -EPERM;
break;
}
/* Else fall through to the default */
- case CFG_FW_IDENTITY: // For Hermes-1, this is cached
+ case CFG_FW_IDENTITY: /* For Hermes-1, this is cached */
default:
/* Verify the user buffer */
- result = verify_area( VERIFY_WRITE, urq->data, urq->len );
- if( result != 0 ) {
- DBG_ERROR( DbgInfo, "verify_area(), VERIFY_WRITE FAILED\n" );
+ result = verify_area(VERIFY_WRITE, urq->data, urq->len);
+ if (result != 0) {
+ DBG_ERROR(DbgInfo, "verify_area(), VERIFY_WRITE FAILED\n");
urq->result = UIL_FAILURE;
break;
}
@@ -1297,43 +1283,43 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
/* If the requested length is greater than the size of our local
LTV record, try to allocate it from the kernel stack.
Otherwise, we just use our local LTV record. */
- if( urq->len > sizeof( lp->ltvRecord )) {
+ if (urq->len > sizeof(lp->ltvRecord)) {
pLtv = kmalloc(urq->len, GFP_KERNEL);
if (pLtv != NULL) {
ltvAllocated = TRUE;
/* Copy the command/length information into the new buffer. */
- memcpy( pLtv, &( lp->ltvRecord ), sizeof( hcf_16 ) * 2 );
+ memcpy(pLtv, &(lp->ltvRecord), sizeof(hcf_16) * 2);
} else {
- urq->len = sizeof( lp->ltvRecord );
+ urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_ERROR( DbgInfo, "kmalloc FAILED\n" );
- DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" );
+ DBG_ERROR(DbgInfo, "kmalloc FAILED\n");
+ DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
result = -ENOMEM;
break;
}
} else {
- pLtv = &( lp->ltvRecord );
+ pLtv = &(lp->ltvRecord);
}
- wl_act_int_off( lp );
- urq->result = hcf_get_info( &( lp->hcfCtx ), (LTVP) pLtv );
- wl_act_int_on( lp );
+ wl_act_int_off(lp);
+ urq->result = hcf_get_info(&(lp->hcfCtx), (LTVP) pLtv);
+ wl_act_int_on(lp);
- // Copy the LTV into the user's buffer.
- //copy_to_user( urq->data, pLtv, urq->len );
+ /* Copy the LTV into the user's buffer. */
+ /*copy_to_user(urq->data, pLtv, urq->len); */
- //if( ltvAllocated )
- //{
- // kfree( pLtv );
- //}
+ /*if(ltvAllocated)
+ {
+ kfree(pLtv);
+ }*/
- //urq->result = UIL_SUCCESS;
+ /* urq->result = UIL_SUCCESS; */
break;
}
/* Handle endian conversion of special fields */
- switch( lp->ltvRecord.typ ) {
+ switch (lp->ltvRecord.typ) {
/* simple int gets just need the first hcf_16 byte flipped */
case CFG_CNF_PORT_TYPE:
case CFG_CNF_OWN_CHANNEL:
@@ -1357,14 +1343,14 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
#ifdef WARP
case CFG_CNF_TX_POW_LVL:
case CFG_CNF_CONNECTION_CNTL:
- case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval
- case CFG_COEXISTENSE_BEHAVIOUR: // Coexistence Behavior
- //case CFG_CNF_RX_ALL_GROUP_ADDRESS:
-#endif // HERMES25
+ case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */
+ case CFG_COEXISTENSE_BEHAVIOUR: /* Coexistence Behavior */
+ /*case CFG_CNF_RX_ALL_GROUP_ADDRESS: */
+#endif /* HERMES25 */
case CFG_CREATE_IBSS:
case CFG_RTS_THRH:
case CFG_PROMISCUOUS_MODE:
- //case CFG_WAKE_ON_LAN:
+ /*case CFG_WAKE_ON_LAN: */
case CFG_RTS_THRH0:
case CFG_RTS_THRH1:
case CFG_RTS_THRH2:
@@ -1393,29 +1379,29 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
case CFG_MAX_RX_LIFETIME:
case CFG_CF_POLLABLE:
case CFG_PRIVACY_OPT_IMPLEMENTED:
- //case CFG_CURRENT_REMOTE_RATES:
- //case CFG_CURRENT_USED_RATES:
- //case CFG_CURRENT_SYSTEM_SCALE:
- //case CFG_CURRENT_TX_RATE1:
- //case CFG_CURRENT_TX_RATE2:
- //case CFG_CURRENT_TX_RATE3:
- //case CFG_CURRENT_TX_RATE4:
- //case CFG_CURRENT_TX_RATE5:
- //case CFG_CURRENT_TX_RATE6:
+ /* case CFG_CURRENT_REMOTE_RATES: */
+ /* case CFG_CURRENT_USED_RATES: */
+ /* case CFG_CURRENT_SYSTEM_SCALE: */
+ /* case CFG_CURRENT_TX_RATE1: */
+ /* case CFG_CURRENT_TX_RATE2: */
+ /* case CFG_CURRENT_TX_RATE3: */
+ /* case CFG_CURRENT_TX_RATE4: */
+ /* case CFG_CURRENT_TX_RATE5: */
+ /* case CFG_CURRENT_TX_RATE6: */
case CFG_PHY_TYPE:
case CFG_CUR_CHANNEL:
- //case CFG_CURRENT_POWER_STATE:
- //case CFG_CCAMODE:
- // lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- // break;
+ /* case CFG_CURRENT_POWER_STATE: */
+ /* case CFG_CCAMODE: */
+ /* lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]); */
+ /* break; */
/* name string gets just need the first hcf_16 byte flipped (length of string) */
case CFG_CNF_OWN_SSID:
case CFG_CNF_OWN_NAME:
- //case CNF_DESIRED_SSID:
+ /* case CNF_DESIRED_SSID: */
case CFG_DESIRED_SSID:
case CFG_SCAN_SSID:
case CFG_CUR_SSID:
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
break;
/* non-length counted strings need no byte flipping */
case CFG_CNF_OWN_MAC_ADDR:
@@ -1432,14 +1418,14 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
case CFG_NIC_MAC_ADDR:
case CFG_SUPPORTED_DATA_RATES: /* need to ensure we can treat this as a string */
break;
- //case CFG_CNF_COUNTRY_INFO: /* special case, see page 75 of 022486, Rev C. */
- //case CFG_CURRENT_COUNTRY_INFO: /* special case, see page 101 of 022486, Rev C. */
+ /* case CFG_CNF_COUNTRY_INFO: */ /* special case, see page 75 of 022486, Rev C. */
+ /* case CFG_CURRENT_COUNTRY_INFO: */ /* special case, see page 101 of 022486, Rev C. */
/*
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] );
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
+ lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[3]);
- for( i = 4; i < lp->ltvRecord.len; i++ ) {
- lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[i] );
+ for(i = 4; i < lp->ltvRecord.len; i++) {
+ lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[i]);
}
break;
*/
@@ -1448,57 +1434,56 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
{
CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)&lp->ltvRecord.u.u8[0];
- pKeys[0].len = CNV_INT_TO_LITTLE( pKeys[0].len );
- pKeys[1].len = CNV_INT_TO_LITTLE( pKeys[1].len );
- pKeys[2].len = CNV_INT_TO_LITTLE( pKeys[2].len );
- pKeys[3].len = CNV_INT_TO_LITTLE( pKeys[3].len );
+ pKeys[0].len = CNV_INT_TO_LITTLE(pKeys[0].len);
+ pKeys[1].len = CNV_INT_TO_LITTLE(pKeys[1].len);
+ pKeys[2].len = CNV_INT_TO_LITTLE(pKeys[2].len);
+ pKeys[3].len = CNV_INT_TO_LITTLE(pKeys[3].len);
}
break;
case CFG_CNF_MCAST_RATE:
case CFG_TX_RATE_CNTL:
- case CFG_SUPPORTED_RATE_SET_CNTL: // Supported Rate Set Control
- case CFG_BASIC_RATE_SET_CNTL: // Basic Rate Set Control
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] );
+ case CFG_SUPPORTED_RATE_SET_CNTL: /* Supported Rate Set Control */
+ case CFG_BASIC_RATE_SET_CNTL: /* Basic Rate Set Control */
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
+ lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[1]);
break;
case CFG_DL_BUF:
case CFG_NIC_IDENTITY:
case CFG_COMMS_QUALITY:
case CFG_PCF_INFO:
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] );
- lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] );
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
+ lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[1]);
+ lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[2]);
break;
case CFG_FW_IDENTITY:
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] );
- lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] );
- lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] );
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
+ lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[1]);
+ lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[2]);
+ lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[3]);
break;
- //case CFG_HSI_SUP_RANGE:
+ /* case CFG_HSI_SUP_RANGE: */
case CFG_NIC_MFI_SUP_RANGE:
case CFG_NIC_CFI_SUP_RANGE:
case CFG_NIC_PROFILE:
case CFG_FW_SUP_RANGE:
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] );
- lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] );
- lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] );
- lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] );
- lp->ltvRecord.u.u16[4] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[4] );
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[0]);
+ lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[1]);
+ lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[2]);
+ lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[3]);
+ lp->ltvRecord.u.u16[4] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[4]);
break;
case CFG_MFI_ACT_RANGES_STA:
case CFG_CFI_ACT_RANGES_STA:
case CFG_CUR_SCALE_THRH:
case CFG_AUTHENTICATION_ALGORITHMS:
- for( i = 0; i < ( lp->ltvRecord.len - 1 ); i++ ) {
- lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[i] );
- }
+ for (i = 0; i < (lp->ltvRecord.len - 1); i++)
+ lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE(lp->ltvRecord.u.u16[i]);
break;
/* done at init time, and endian handled then */
case CFG_PRI_IDENTITY:
break;
case CFG_MB_INFO:
- //wvlanEndianTranslateMailbox( pLtv );
+ /* wvlanEndianTranslateMailbox(pLtv); */
break;
/* MSF and HCF RIDS */
case CFG_IFB:
@@ -1512,25 +1497,23 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
break;
}
- // Copy the LTV into the user's buffer.
- copy_to_user( urq->data, &( lp->ltvRecord ), urq->len );
-
- if( ltvAllocated ) {
- kfree( &( lp->ltvRecord ));
- }
+ /* Copy the LTV into the user's buffer. */
+ copy_to_user(urq->data, &(lp->ltvRecord), urq->len);
+ if (ltvAllocated)
+ kfree(&(lp->ltvRecord));
urq->result = UIL_SUCCESS;
} else {
urq->result = UIL_FAILURE;
}
} else {
- DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" );
+ DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n");
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // wvlan_uil_get_info
+} /* wvlan_uil_get_info */
/*============================================================================*/
@@ -1556,41 +1539,41 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int cfg_driver_info( struct uilreq *urq, struct wl_private *lp )
+int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "cfg_driver_info" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("cfg_driver_info");
+ DBG_ENTER(DbgInfo);
/* Make sure that user buffer can handle the driver information buffer */
- if( urq->len < sizeof( lp->driverInfo )) {
- urq->len = sizeof( lp->driverInfo );
+ if (urq->len < sizeof(lp->driverInfo)) {
+ urq->len = sizeof(lp->driverInfo);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Verify the user buffer. */
- result = verify_area( VERIFY_WRITE, urq->data, sizeof( lp->driverInfo ));
- if( result != 0 ) {
+ result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverInfo));
+ if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
lp->driverInfo.card_stat = lp->hcfCtx.IFB_CardStat;
- // Copy the driver information into the user's buffer.
+ /* Copy the driver information into the user's buffer. */
urq->result = UIL_SUCCESS;
- copy_to_user( urq->data, &( lp->driverInfo ), sizeof( lp->driverInfo ));
+ copy_to_user(urq->data, &(lp->driverInfo), sizeof(lp->driverInfo));
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // cfg_driver_info
+} /* cfg_driver_info */
/*============================================================================*/
@@ -1615,39 +1598,39 @@ int cfg_driver_info( struct uilreq *urq, struct wl_private *lp )
* UIL_ERR_xxx value otherwise
*
******************************************************************************/
-int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp )
+int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_driver_identity" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_driver_identity");
+ DBG_ENTER(DbgInfo);
/* Make sure that user buffer can handle the driver identity structure. */
- if( urq->len < sizeof( lp->driverIdentity )) {
- urq->len = sizeof( lp->driverIdentity );
+ if (urq->len < sizeof(lp->driverIdentity)) {
+ urq->len = sizeof(lp->driverIdentity);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Verify the user buffer. */
- result = verify_area( VERIFY_WRITE, urq->data, sizeof( lp->driverIdentity ));
- if( result != 0 ) {
+ result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverIdentity));
+ if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
}
/* Copy the driver identity into the user's buffer. */
urq->result = UIL_SUCCESS;
- copy_to_user( urq->data, &( lp->driverIdentity ), sizeof( lp->driverIdentity ));
+ copy_to_user(urq->data, &(lp->driverIdentity), sizeof(lp->driverIdentity));
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return result;
-} // cfg_driver_identity
+} /* cfg_driver_identity */
/*============================================================================*/
@@ -1684,27 +1667,27 @@ int wvlan_set_netname(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
int ret = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_set_netname" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_set_netname");
+ DBG_ENTER(DbgInfo);
- wl_lock(lp, &flags);
+ wl_lock(lp, &flags);
- memset( lp->NetworkName, 0, sizeof( lp->NetworkName ));
- memcpy( lp->NetworkName, extra, wrqu->data.length);
+ memset(lp->NetworkName, 0, sizeof(lp->NetworkName));
+ memcpy(lp->NetworkName, extra, wrqu->data.length);
/* Commit the adapter parameters */
wl_apply(lp);
- wl_unlock(lp, &flags);
+ wl_unlock(lp, &flags);
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ret;
-} // wvlan_set_netname
+} /* wvlan_set_netname */
/*============================================================================*/
@@ -1734,41 +1717,41 @@ int wvlan_get_netname(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
- int status = -1;
- wvName_t *pName;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+ int status = -1;
+ wvName_t *pName;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_get_netname" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_get_netname");
+ DBG_ENTER(DbgInfo);
- wl_lock(lp, &flags);
+ wl_lock(lp, &flags);
- /* Get the current network name */
- lp->ltvRecord.len = 1 + ( sizeof( *pName ) / sizeof( hcf_16 ));
- lp->ltvRecord.typ = CFG_CUR_SSID;
+ /* Get the current network name */
+ lp->ltvRecord.len = 1 + (sizeof(*pName) / sizeof(hcf_16));
+ lp->ltvRecord.typ = CFG_CUR_SSID;
- status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
+ status = hcf_get_info(&(lp->hcfCtx), (LTVP)&(lp->ltvRecord));
- if( status == HCF_SUCCESS ) {
- pName = (wvName_t *)&( lp->ltvRecord.u.u32 );
+ if (status == HCF_SUCCESS) {
+ pName = (wvName_t *)&(lp->ltvRecord.u.u32);
memset(extra, '\0', HCF_MAX_NAME_LEN);
wrqu->data.length = pName->length;
- memcpy(extra, pName->name, pName->length);
- } else {
- ret = -EFAULT;
+ memcpy(extra, pName->name, pName->length);
+ } else {
+ ret = -EFAULT;
}
- wl_unlock(lp, &flags);
+ wl_unlock(lp, &flags);
- DBG_LEAVE( DbgInfo );
- return ret;
-} // wvlan_get_netname
+ DBG_LEAVE(DbgInfo);
+ return ret;
+} /* wvlan_get_netname */
/*============================================================================*/
@@ -1798,28 +1781,28 @@ int wvlan_set_station_nickname(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_set_station_nickname" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_set_station_nickname");
+ DBG_ENTER(DbgInfo);
- wl_lock(lp, &flags);
+ wl_lock(lp, &flags);
- memset( lp->StationName, 0, sizeof( lp->StationName ));
+ memset(lp->StationName, 0, sizeof(lp->StationName));
- memcpy( lp->StationName, extra, wrqu->data.length);
+ memcpy(lp->StationName, extra, wrqu->data.length);
- /* Commit the adapter parameters */
- wl_apply( lp );
- wl_unlock(lp, &flags);
+ /* Commit the adapter parameters */
+ wl_apply(lp);
+ wl_unlock(lp, &flags);
- DBG_LEAVE( DbgInfo );
- return ret;
-} // wvlan_set_station_nickname
+ DBG_LEAVE(DbgInfo);
+ return ret;
+} /* wvlan_set_station_nickname */
/*============================================================================*/
@@ -1849,41 +1832,41 @@ int wvlan_get_station_nickname(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
int ret = 0;
int status = -1;
wvName_t *pName;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_get_station_nickname" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_get_station_nickname");
+ DBG_ENTER(DbgInfo);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- /* Get the current station name */
- lp->ltvRecord.len = 1 + ( sizeof( *pName ) / sizeof( hcf_16 ));
- lp->ltvRecord.typ = CFG_CNF_OWN_NAME;
+ /* Get the current station name */
+ lp->ltvRecord.len = 1 + (sizeof(*pName) / sizeof(hcf_16));
+ lp->ltvRecord.typ = CFG_CNF_OWN_NAME;
- status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
+ status = hcf_get_info(&(lp->hcfCtx), (LTVP)&(lp->ltvRecord));
- if( status == HCF_SUCCESS ) {
- pName = (wvName_t *)&( lp->ltvRecord.u.u32 );
+ if (status == HCF_SUCCESS) {
+ pName = (wvName_t *)&(lp->ltvRecord.u.u32);
memset(extra, '\0', HCF_MAX_NAME_LEN);
wrqu->data.length = pName->length;
memcpy(extra, pName->name, pName->length);
- } else {
- ret = -EFAULT;
- }
+ } else {
+ ret = -EFAULT;
+ }
- wl_unlock(lp, &flags);
+ wl_unlock(lp, &flags);
-//out:
- DBG_LEAVE( DbgInfo );
+/* out: */
+ DBG_LEAVE(DbgInfo);
return ret;
-} // wvlan_get_station_nickname
+} /* wvlan_get_station_nickname */
/*============================================================================*/
@@ -1913,37 +1896,37 @@ int wvlan_set_porttype(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
hcf_16 portType;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_set_porttype" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_set_porttype");
+ DBG_ENTER(DbgInfo);
- wl_lock(lp, &flags);
+ wl_lock(lp, &flags);
- /* Validate the new value */
- portType = *((__u32 *)extra);
+ /* Validate the new value */
+ portType = *((__u32 *)extra);
- if( !(( portType == 1 ) || ( portType == 3 ))) {
- ret = -EINVAL;
+ if (!((portType == 1) || (portType == 3))) {
+ ret = -EINVAL;
goto out_unlock;
- }
+ }
- lp->PortType = portType;
+ lp->PortType = portType;
- /* Commit the adapter parameters */
- wl_apply( lp );
+ /* Commit the adapter parameters */
+ wl_apply(lp);
out_unlock:
- wl_unlock(lp, &flags);
+ wl_unlock(lp, &flags);
-//out:
- DBG_LEAVE( DbgInfo );
- return ret;
+/* out: */
+ DBG_LEAVE(DbgInfo);
+ return ret;
}
/*============================================================================*/
@@ -1973,43 +1956,43 @@ int wvlan_get_porttype(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
- int status = -1;
- hcf_16 *pPortType;
- __u32 *pData = (__u32 *)extra;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+ int status = -1;
+ hcf_16 *pPortType;
+ __u32 *pData = (__u32 *)extra;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_get_porttype" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_get_porttype");
+ DBG_ENTER(DbgInfo);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- /* Get the current port type */
- lp->ltvRecord.len = 1 + ( sizeof( *pPortType ) / sizeof( hcf_16 ));
- lp->ltvRecord.typ = CFG_CNF_PORT_TYPE;
+ /* Get the current port type */
+ lp->ltvRecord.len = 1 + (sizeof(*pPortType) / sizeof(hcf_16));
+ lp->ltvRecord.typ = CFG_CNF_PORT_TYPE;
- status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
+ status = hcf_get_info(&(lp->hcfCtx), (LTVP)&(lp->ltvRecord));
- if( status == HCF_SUCCESS ) {
- pPortType = (hcf_16 *)&( lp->ltvRecord.u.u32 );
+ if (status == HCF_SUCCESS) {
+ pPortType = (hcf_16 *)&(lp->ltvRecord.u.u32);
- *pData = CNV_LITTLE_TO_INT( *pPortType );
- } else {
- ret = -EFAULT;
+ *pData = CNV_LITTLE_TO_INT(*pPortType);
+ } else {
+ ret = -EFAULT;
}
- wl_unlock(lp, &flags);
+ wl_unlock(lp, &flags);
-//out:
- DBG_LEAVE( DbgInfo );
- return ret;
-} // wvlan_get_porttype
+/* out: */
+ DBG_LEAVE(DbgInfo);
+ return ret;
+} /* wvlan_get_porttype */
/*============================================================================*/
-#endif // WIRELESS_EXT
+#endif /* WIRELESS_EXT */
@@ -2034,49 +2017,49 @@ int wvlan_get_porttype(struct net_device *dev,
* errno value otherwise
*
******************************************************************************/
-int wvlan_rts( struct rtsreq *rrq, __u32 io_base )
+int wvlan_rts(struct rtsreq *rrq, __u32 io_base)
{
int ioctl_ret = 0;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wvlan_rts" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("wvlan_rts");
+ DBG_ENTER(DbgInfo);
- DBG_PRINT( "io_base: 0x%08x\n", io_base );
+ DBG_PRINT("io_base: 0x%08x\n", io_base);
- switch( rrq->typ ) {
- case WL_IOCTL_RTS_READ:
+ switch (rrq->typ) {
+ case WL_IOCTL_RTS_READ:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_READ\n");
- rrq->data[0] = IN_PORT_WORD( io_base + rrq->reg );
- DBG_TRACE( DbgInfo, " reg 0x%04x ==> 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT( rrq->data[0] ) );
+ rrq->data[0] = IN_PORT_WORD(io_base + rrq->reg);
+ DBG_TRACE(DbgInfo, " reg 0x%04x ==> 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT(rrq->data[0]));
break;
- case WL_IOCTL_RTS_WRITE:
+ case WL_IOCTL_RTS_WRITE:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_WRITE\n");
- OUT_PORT_WORD( io_base + rrq->reg, rrq->data[0] );
- DBG_TRACE( DbgInfo, " reg 0x%04x <== 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT( rrq->data[0] ) );
+ OUT_PORT_WORD(io_base + rrq->reg, rrq->data[0]);
+ DBG_TRACE(DbgInfo, " reg 0x%04x <== 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT(rrq->data[0]));
break;
- case WL_IOCTL_RTS_BATCH_READ:
+ case WL_IOCTL_RTS_BATCH_READ:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_BATCH_READ\n");
- IN_PORT_STRING_16( io_base + rrq->reg, rrq->data, rrq->len );
- DBG_TRACE( DbgInfo, " reg 0x%04x ==> %d bytes\n", rrq->reg, rrq->len * sizeof (__u16 ) );
+ IN_PORT_STRING_16(io_base + rrq->reg, rrq->data, rrq->len);
+ DBG_TRACE(DbgInfo, " reg 0x%04x ==> %d bytes\n", rrq->reg, rrq->len * sizeof(__u16));
break;
- case WL_IOCTL_RTS_BATCH_WRITE:
+ case WL_IOCTL_RTS_BATCH_WRITE:
DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_BATCH_WRITE\n");
- OUT_PORT_STRING_16( io_base + rrq->reg, rrq->data, rrq->len );
- DBG_TRACE( DbgInfo, " reg 0x%04x <== %d bytes\n", rrq->reg, rrq->len * sizeof (__u16) );
+ OUT_PORT_STRING_16(io_base + rrq->reg, rrq->data, rrq->len);
+ DBG_TRACE(DbgInfo, " reg 0x%04x <== %d bytes\n", rrq->reg, rrq->len * sizeof(__u16));
break;
default:
- DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- UNSUPPORTED RTS CODE: 0x%X", rrq->typ );
+ DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- UNSUPPORTED RTS CODE: 0x%X", rrq->typ);
ioctl_ret = -EOPNOTSUPP;
break;
}
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return ioctl_ret;
-} // wvlan_rts
+} /* wvlan_rts */
/*============================================================================*/
#endif /* USE_RTS */
diff --git a/drivers/staging/wlags49_h2/wl_priv.h b/drivers/staging/wlags49_h2/wl_priv.h
index b647bfd90098..f35e79486428 100644
--- a/drivers/staging/wlags49_h2/wl_priv.h
+++ b/drivers/staging/wlags49_h2/wl_priv.h
@@ -70,52 +70,58 @@
#ifdef WIRELESS_EXT
-int wvlan_set_netname( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_set_netname(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-int wvlan_get_netname( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_get_netname(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-int wvlan_set_station_nickname( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_set_station_nickname(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-int wvlan_get_station_nickname( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_get_station_nickname(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-int wvlan_set_porttype( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_set_porttype(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-int wvlan_get_porttype( struct net_device *, struct iw_request_info *, union iwreq_data *, char *extra );
+int wvlan_get_porttype(struct net_device *, struct iw_request_info *,
+ union iwreq_data *, char *extra);
-#endif // WIRELESS_EXT
+#endif /* WIRELESS_EXT */
#ifdef USE_UIL
-int wvlan_uil( struct uilreq *urq, struct wl_private *lp );
+int wvlan_uil(struct uilreq *urq, struct wl_private *lp);
-// int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp );
-// int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp );
+/* int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp ); */
+/* int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp ); */
-//int cfg_driver_info( struct uilreq *urq, struct wl_private *lp );
-//int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp );
+/* int cfg_driver_info( struct uilreq *urq, struct wl_private *lp ); */
+/* int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp ); */
-#endif // USE_UIL
+#endif /* USE_UIL */
#ifdef USE_RTS
-int wvlan_rts( struct rtsreq *rrq, __u32 io_base );
-int wvlan_rts_read( __u16 reg, __u16 *val, __u32 io_base );
-int wvlan_rts_write( __u16 reg, __u16 val, __u32 io_base );
-int wvlan_rts_batch_read( struct rtsreq *rrq, __u32 io_base );
-int wvlan_rts_batch_write( struct rtsreq *rrq, __u32 io_base );
+int wvlan_rts(struct rtsreq *rrq, __u32 io_base);
+int wvlan_rts_read(__u16 reg, __u16 *val, __u32 io_base);
+int wvlan_rts_write(__u16 reg, __u16 val, __u32 io_base);
+int wvlan_rts_batch_read(struct rtsreq *rrq, __u32 io_base);
+int wvlan_rts_batch_write(struct rtsreq *rrq, __u32 io_base);
-#endif // USE_RTS
+#endif /* USE_RTS */
-#endif // __WL_PRIV_H__
+#endif /* __WL_PRIV_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_profile.h b/drivers/staging/wlags49_h2/wl_profile.h
index f81df51d2216..d615c836f950 100644
--- a/drivers/staging/wlags49_h2/wl_profile.h
+++ b/drivers/staging/wlags49_h2/wl_profile.h
@@ -73,15 +73,15 @@
/*******************************************************************************
* function prototypes
******************************************************************************/
-void parse_config( struct net_device *dev );
+void parse_config(struct net_device *dev);
-int readline( int filedesc, char *buffer );
+int readline(int filedesc, char *buffer);
-void translate_option( char *buffer, struct wl_private *lp );
+void translate_option(char *buffer, struct wl_private *lp);
-int parse_mac_address( char *value, u_char *byte_array );
+int parse_mac_address(char *value, u_char *byte_array);
-void ParseConfigLine( char *pszLine, char **ppszLVal, char **ppszRVal );
+void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal);
-#endif // __WL_PROFILE_H__
+#endif /* __WL_PROFILE_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 946b1b64c46f..57bfd7fac6fa 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -65,32 +65,32 @@
/*******************************************************************************
* function prototypes
******************************************************************************/
-int dbm( int value );
+int dbm(int value);
-int is_valid_key_string( char *s );
+int is_valid_key_string(char *s);
-void key_string2key( char *ks, KEY_STRCT *key );
+void key_string2key(char *ks, KEY_STRCT *key);
-void wl_hcf_error( struct net_device *dev, int hcfStatus );
+void wl_hcf_error(struct net_device *dev, int hcfStatus);
-void wl_endian_translate_event( ltv_t *pLtv );
+void wl_endian_translate_event(ltv_t *pLtv);
-int wl_has_wep( IFBP ifbp );
+int wl_has_wep(IFBP ifbp);
-hcf_8 wl_parse_ds_ie( PROBE_RESP *probe_rsp );
-hcf_8 * wl_parse_wpa_ie( PROBE_RESP *probe_rsp, hcf_16 *length );
-hcf_8 * wl_print_wpa_ie( hcf_8 *buffer, int length );
+hcf_8 wl_parse_ds_ie(PROBE_RESP *probe_rsp);
+hcf_8 *wl_parse_wpa_ie(PROBE_RESP *probe_rsp, hcf_16 *length);
+hcf_8 *wl_print_wpa_ie(hcf_8 *buffer, int length);
int wl_get_tallies(struct wl_private *, CFG_HERMES_TALLIES_STRCT *);
-int wl_is_a_valid_chan( int channel );
-int wl_is_a_valid_freq( long frequency );
-long wl_get_freq_from_chan( int channel );
-int wl_get_chan_from_freq( long frequency );
+int wl_is_a_valid_chan(int channel);
+int wl_is_a_valid_freq(long frequency);
+long wl_get_freq_from_chan(int channel);
+int wl_get_chan_from_freq(long frequency);
-void wl_process_link_status( struct wl_private *lp );
-void wl_process_probe_response( struct wl_private *lp );
-void wl_process_updated_record( struct wl_private *lp );
-void wl_process_assoc_status( struct wl_private *lp );
-void wl_process_security_status( struct wl_private *lp );
+void wl_process_link_status(struct wl_private *lp);
+void wl_process_probe_response(struct wl_private *lp);
+void wl_process_updated_record(struct wl_private *lp);
+void wl_process_assoc_status(struct wl_private *lp);
+void wl_process_security_status(struct wl_private *lp);
-#endif // __WL_UTIL_H__
+#endif /* __WL_UTIL_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index f553366cccc5..c731ff2a6aa1 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -796,8 +796,6 @@ static int wireless_get_bssid(struct net_device *dev, struct iw_request_info *in
wl_act_int_off( lp );
- memset( &ap_addr->sa_data, 0, ETH_ALEN );
-
ap_addr->sa_family = ARPHRD_ETHER;
/* Assume AP mode here, which means the BSSID is our own MAC address. In
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 18c06a59c091..f1bce18ea828 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -424,7 +424,7 @@ int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
goto exit;
}
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
}
if (result)
@@ -638,8 +638,8 @@ int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
-int prism2_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type,
- int mbm)
+int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
@@ -665,7 +665,8 @@ exit:
return err;
}
-int prism2_get_tx_power(struct wiphy *wiphy, int *dbm)
+int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 5631ad0a7237..3dfa85ccc504 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -59,6 +59,7 @@
#define HFA384x_FIRMWARE_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#include <linux/if_ether.h>
+#include <linux/usb.h>
/*--- Mins & Maxs -----------------------------------*/
#define HFA384x_PORTID_MAX ((u16)7)
@@ -81,8 +82,8 @@
#define HFA384x_WEPFLAGS_EXCLUDE ((u16)BIT(1))
#define HFA384x_WEPFLAGS_DISABLE_TXCRYPT ((u16)BIT(4))
#define HFA384x_WEPFLAGS_DISABLE_RXCRYPT ((u16)BIT(7))
-#define HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM ((u16)3)
-#define HFA384x_PORTSTATUS_DISABLED ((u16)1)
+#define HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM ((u16)3)
+#define HFA384x_PORTSTATUS_DISABLED ((u16)1)
#define HFA384x_RATEBIT_1 ((u16)1)
#define HFA384x_RATEBIT_2 ((u16)2)
#define HFA384x_RATEBIT_5dot5 ((u16)4)
@@ -164,7 +165,7 @@
#define HFA384x_CMDCODE_DOWNLD ((u16)0x22)
/*--- Debugging Commands -----------------------------*/
-#define HFA384x_CMDCODE_MONITOR ((u16)(0x38))
+#define HFA384x_CMDCODE_MONITOR ((u16)(0x38))
#define HFA384x_MONITOR_ENABLE ((u16)(0x0b))
#define HFA384x_MONITOR_DISABLE ((u16)(0x0f))
@@ -275,15 +276,15 @@ API ENHANCEMENTS (NOT ALREADY IMPLEMENTED)
#define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A)
#define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D)
#define HFA384x_RID_CNFAPBCNint ((u16)0xFC33)
-#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46)
-#define HFA384x_RID_CNFWPADATA ((u16)0xFC48)
+#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46)
+#define HFA384x_RID_CNFWPADATA ((u16)0xFC48)
#define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3)
#define HFA384x_RID_CNFSUPPRATES ((u16)0xFCB4)
#define HFA384x_RID_CNFPASSIVESCANCTRL ((u16)0xFCBA)
-#define HFA384x_RID_TXPOWERMAX ((u16)0xFCBE)
+#define HFA384x_RID_TXPOWERMAX ((u16)0xFCBE)
#define HFA384x_RID_JOINREQUEST ((u16)0xFCE2)
#define HFA384x_RID_AUTHENTICATESTA ((u16)0xFCE3)
-#define HFA384x_RID_HOSTSCAN ((u16)0xFCE5)
+#define HFA384x_RID_HOSTSCAN ((u16)0xFCE5)
#define HFA384x_RID_CNFWEPDEFAULTKEY_LEN ((u16)6)
#define HFA384x_RID_CNFWEP128DEFAULTKEY_LEN ((u16)14)
@@ -311,7 +312,7 @@ PD Record codes
#define HFA384x_PDR_HFA3861_IFRF ((u16)0x0204)
#define HFA384x_PDR_HFA3861_CHCALSP ((u16)0x0300)
#define HFA384x_PDR_HFA3861_CHCALI ((u16)0x0301)
-#define HFA384x_PDR_MAX_TX_POWER ((u16)0x0302)
+#define HFA384x_PDR_MAX_TX_POWER ((u16)0x0302)
#define HFA384x_PDR_MASTER_CHAN_LIST ((u16)0x0303)
#define HFA384x_PDR_3842_NIC_CONFIG ((u16)0x0400)
#define HFA384x_PDR_USB_ID ((u16)0x0401)
@@ -322,10 +323,10 @@ PD Record codes
#define HFA384x_PDR_USB_POWER_TYPE ((u16)0x0407)
#define HFA384x_PDR_USB_MAX_POWER ((u16)0x0409)
#define HFA384x_PDR_USB_MANUFACTURER ((u16)0x0410)
-#define HFA384x_PDR_USB_PRODUCT ((u16)0x0411)
-#define HFA384x_PDR_ANT_DIVERSITY ((u16)0x0412)
-#define HFA384x_PDR_HFO_DELAY ((u16)0x0413)
-#define HFA384x_PDR_SCALE_THRESH ((u16)0x0414)
+#define HFA384x_PDR_USB_PRODUCT ((u16)0x0411)
+#define HFA384x_PDR_ANT_DIVERSITY ((u16)0x0412)
+#define HFA384x_PDR_HFO_DELAY ((u16)0x0413)
+#define HFA384x_PDR_SCALE_THRESH ((u16)0x0414)
#define HFA384x_PDR_HFA3861_MANF_TESTSP ((u16)0x0900)
#define HFA384x_PDR_HFA3861_MANF_TESTI ((u16)0x0901)
@@ -383,7 +384,7 @@ typedef struct hfa384x_caplevel {
/*-- Configuration Record: cnfAuthentication --*/
#define HFA384x_CNFAUTHENTICATION_OPENSYSTEM 0x0001
#define HFA384x_CNFAUTHENTICATION_SHAREDKEY 0x0002
-#define HFA384x_CNFAUTHENTICATION_LEAP 0x0004
+#define HFA384x_CNFAUTHENTICATION_LEAP 0x0004
/*--------------------------------------------------------------------
Configuration Record Structures:
@@ -575,8 +576,8 @@ Information Types
#define HFA384x_IT_AUTHREQ ((u16)0xF202UL)
#define HFA384x_IT_PSUSERCNT ((u16)0xF203UL)
#define HFA384x_IT_KEYIDCHANGED ((u16)0xF204UL)
-#define HFA384x_IT_ASSOCREQ ((u16)0xF205UL)
-#define HFA384x_IT_MICFAILURE ((u16)0xF206UL)
+#define HFA384x_IT_ASSOCREQ ((u16)0xF205UL)
+#define HFA384x_IT_MICFAILURE ((u16)0xF206UL)
/*--------------------------------------------------------------------
Information Frames Structures
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 750330f064f9..0039e082507d 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -351,6 +351,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
union p80211_hdr p80211_hdr;
struct p80211_metawep p80211_wep;
+ p80211_wep.data = NULL;
+
if (skb == NULL)
return NETDEV_TX_OK;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 4efa9bc0fcf0..d22db43e8031 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
/* SSID */
req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req->ssid.data.len = le16_to_cpu(item->ssid.len);
- req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
+ req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
/* supported rates */
@@ -415,11 +415,14 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
break;
#define REQBASICRATE(N) \
- if ((count >= N) && DOT11_RATE5_ISBASIC_GET(item->supprates[(N)-1])) { \
- req->basicrate ## N .data = item->supprates[(N)-1]; \
- req->basicrate ## N .status = \
- P80211ENUM_msgitem_status_data_ok; \
- }
+ do { \
+ if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
+ item->supprates[(N)-1])) { \
+ req->basicrate ## N .data = item->supprates[(N)-1]; \
+ req->basicrate ## N .status = \
+ P80211ENUM_msgitem_status_data_ok; \
+ } \
+ } while (0)
REQBASICRATE(1);
REQBASICRATE(2);
@@ -431,11 +434,13 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
REQBASICRATE(8);
#define REQSUPPRATE(N) \
- if (count >= N) { \
- req->supprate ## N .data = item->supprates[(N)-1]; \
- req->supprate ## N .status = \
- P80211ENUM_msgitem_status_data_ok; \
- }
+ do { \
+ if (count >= N) { \
+ req->supprate ## N .data = item->supprates[(N)-1]; \
+ req->supprate ## N .status = \
+ P80211ENUM_msgitem_status_data_ok; \
+ } \
+ } while (0)
REQSUPPRATE(1);
REQSUPPRATE(2);
@@ -1139,9 +1144,8 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
- pr_debug
- ("failed to enable port to presniff setting, result=%d\n",
- result);
+ pr_debug("failed to enable port to presniff setting, result=%d\n",
+ result);
goto failed;
}
} else {
@@ -1181,18 +1185,16 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
hfa384x_drvr_stop(hw);
result = hfa384x_drvr_start(hw);
if (result) {
- pr_debug
- ("failed to restart the card for sniffing, result=%d\n",
- result);
+ pr_debug("failed to restart the card for sniffing, result=%d\n",
+ result);
goto failed;
}
} else {
/* Disable the port */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
- pr_debug
- ("failed to enable port for sniffing, result=%d\n",
- result);
+ pr_debug("failed to enable port for sniffing, result=%d\n",
+ result);
goto failed;
}
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index e0f745de7e7a..801ac4053a7a 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -91,11 +91,10 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned short ModeIdIndex, index = 0;
unsigned short RefreshRateTableIndex = 0;
- unsigned short VRE, VBE, VRS, VBS, VDE, VT;
- unsigned short HRE, HBE, HRS, HBS, HDE, HT;
+ unsigned short VRE, VBE, VRS, VDE;
+ unsigned short HRE, HBE, HRS, HDE;
unsigned char sr_data, cr_data, cr_data2;
- unsigned long cr_data3;
- int A, B, C, D, E, F, temp, j;
+ int B, C, D, F, temp, j;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
if (!XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr))
return 0;
@@ -105,25 +104,13 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
sr_data = XGI_CRT1Table[index].CR[5];
- cr_data = XGI_CRT1Table[index].CR[0];
-
- /* Horizontal total */
- HT = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x03) << 8);
- A = HT + 5;
-
- HDE = (XGI330_RefIndex[RefreshRateTableIndex].XRes >> 3) - 1;
- E = HDE + 1;
+ HDE = (XGI330_RefIndex[RefreshRateTableIndex].XRes >> 3);
cr_data = XGI_CRT1Table[index].CR[3];
/* Horizontal retrace (=sync) start */
HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2);
- F = HRS - E - 3;
-
- cr_data = XGI_CRT1Table[index].CR[1];
-
- /* Horizontal blank start */
- HBS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x30) << 4);
+ F = HRS - HDE - 3;
sr_data = XGI_CRT1Table[index].CR[6];
@@ -138,10 +125,10 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
/* Horizontal retrace (=sync) end */
HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
- temp = HBE - ((E - 1) & 255);
+ temp = HBE - ((HDE - 1) & 255);
B = (temp > 0) ? temp : (temp + 256);
- temp = HRE - ((E + F + 3) & 63);
+ temp = HRE - ((HDE + F + 3) & 63);
C = (temp > 0) ? temp : (temp + 64);
D = B - F - C;
@@ -152,18 +139,9 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
sr_data = XGI_CRT1Table[index].CR[14];
- cr_data = XGI_CRT1Table[index].CR[8];
-
cr_data2 = XGI_CRT1Table[index].CR[9];
- /* Vertical total */
- VT = (cr_data & 0xFF) | ((unsigned short) (cr_data2 & 0x01) << 8)
- | ((unsigned short) (cr_data2 & 0x20) << 4)
- | ((unsigned short) (sr_data & 0x01) << 10);
- A = VT + 2;
-
- VDE = XGI330_RefIndex[RefreshRateTableIndex].YRes - 1;
- E = VDE + 1;
+ VDE = XGI330_RefIndex[RefreshRateTableIndex].YRes;
cr_data = XGI_CRT1Table[index].CR[10];
@@ -171,29 +149,20 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6)
| ((unsigned short) (cr_data2 & 0x80) << 2)
| ((unsigned short) (sr_data & 0x08) << 7);
- F = VRS + 1 - E;
-
- cr_data = XGI_CRT1Table[index].CR[12];
-
- cr_data3 = (XGI_CRT1Table[index].CR[14] & 0x80) << 5;
-
- /* Vertical blank start */
- VBS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x08) << 5)
- | ((unsigned short) (cr_data3 & 0x20) << 4)
- | ((unsigned short) (sr_data & 0x04) << 8);
+ F = VRS + 1 - VDE;
cr_data = XGI_CRT1Table[index].CR[13];
/* Vertical blank end */
VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4);
- temp = VBE - ((E - 1) & 511);
+ temp = VBE - ((VDE - 1) & 511);
B = (temp > 0) ? temp : (temp + 512);
cr_data = XGI_CRT1Table[index].CR[11];
/* Vertical retrace (=sync) end */
VRE = (cr_data & 0x0f) | ((sr_data & 0x20) >> 1);
- temp = VRE - ((E + F - 1) & 31);
+ temp = VRE - ((VDE + F - 1) & 31);
C = (temp > 0) ? temp : (temp + 32);
D = B - F - C;
@@ -233,13 +202,14 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
return 1;
}
-static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
+void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
{
XGI_Pr->P3c4 = BaseAddr + 0x14;
XGI_Pr->P3d4 = BaseAddr + 0x24;
XGI_Pr->P3c0 = BaseAddr + 0x10;
XGI_Pr->P3ce = BaseAddr + 0x1e;
XGI_Pr->P3c2 = BaseAddr + 0x12;
+ XGI_Pr->P3cc = BaseAddr + 0x1c;
XGI_Pr->P3ca = BaseAddr + 0x1a;
XGI_Pr->P3c6 = BaseAddr + 0x16;
XGI_Pr->P3c7 = BaseAddr + 0x17;
@@ -1160,22 +1130,10 @@ static int XGIfb_release(struct fb_info *info, int user)
return 0;
}
+/* similar to sisfb_get_cmap_len */
static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
{
- int rc = 16;
-
- switch (var->bits_per_pixel) {
- case 8:
- rc = 256;
- break;
- case 16:
- rc = 16;
- break;
- case 32:
- rc = 16;
- break;
- }
- return rc;
+ return (var->bits_per_pixel == 8) ? 256 : 16;
}
static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -1362,12 +1320,6 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* Adapt RGB settings */
XGIfb_bpp_to_var(xgifb_info, var);
- /* Sanity check for offsets */
- if (var->xoffset < 0)
- var->xoffset = 0;
- if (var->yoffset < 0)
- var->yoffset = 0;
-
if (!XGIfb_ypan) {
if (var->xres != var->xres_virtual)
var->xres_virtual = var->xres;
@@ -1402,8 +1354,7 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
return -EINVAL;
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 || var->yoffset >= info->var.yres_virtual
- || var->xoffset)
+ if (var->yoffset >= info->var.yres_virtual || var->xoffset)
return -EINVAL;
} else if (var->xoffset + info->var.xres > info->var.xres_virtual
|| var->yoffset + info->var.yres
@@ -1838,7 +1789,7 @@ static int xgifb_probe(struct pci_dev *pdev,
if (!XGIInitNew(pdev))
dev_err(&pdev->dev, "XGIInitNew() failed!\n");
- xgifb_info->mtrr = (unsigned int) 0;
+ xgifb_info->mtrr = -1;
xgifb_info->hasVB = HASVB_NONE;
if ((xgifb_info->chip == XG20) ||
@@ -1957,6 +1908,7 @@ static int xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->mode_idx < 0) {
dev_err(&pdev->dev, "No supported video mode found\n");
+ ret = -EINVAL;
goto error_1;
}
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 80547983759b..af50362395d5 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -67,7 +67,7 @@ struct xgifb_video_info {
unsigned long mmio_size;
void __iomem *mmio_vbase;
unsigned long vga_base;
- unsigned long mtrr;
+ int mtrr;
int video_bpp;
int video_cmap_len;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 2b791c10eb15..df127e406952 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -131,22 +131,6 @@ static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4,
0x30,
XGI340_ECLKData[pVBInfo->ram_type].SR30);
-
- /* When XG42 ECLK = MCLK = 207MHz, Set SR32 D[1:0] = 10b */
- /* Modify SR32 value, when MCLK=207MHZ, ELCK=250MHz,
- * Set SR32 D[1:0] = 10b */
- if (HwDeviceExtension->jChipType == XG42) {
- if ((pVBInfo->MCLKData[pVBInfo->ram_type].SR28 == 0x1C) &&
- (pVBInfo->MCLKData[pVBInfo->ram_type].SR29 == 0x01) &&
- (((XGI340_ECLKData[pVBInfo->ram_type].SR2E == 0x1C) &&
- (XGI340_ECLKData[pVBInfo->ram_type].SR2F == 0x01)) ||
- ((XGI340_ECLKData[pVBInfo->ram_type].SR2E == 0x22) &&
- (XGI340_ECLKData[pVBInfo->ram_type].SR2F == 0x01))))
- xgifb_reg_set(pVBInfo->P3c4,
- 0x32,
- ((unsigned char) xgifb_reg_get(
- pVBInfo->P3c4, 0x32) & 0xFC) | 0x02);
- }
}
static void XGINew_DDRII_Bootup_XG27(
@@ -413,11 +397,24 @@ static void XGINew_DDR2_DefaultRegister(
XGINew_DDR2_MRS_XG20(HwDeviceExtension, P3c4, pVBInfo);
}
+static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
+ u8 shift_factor, u8 mask1, u8 mask2)
+{
+ u8 j;
+ for (j = 0; j < 4; j++) {
+ temp2 |= (((seed >> (2 * j)) & 0x03) << shift_factor);
+ xgifb_reg_set(P3d4, reg, temp2);
+ xgifb_reg_get(P3d4, reg);
+ temp2 &= mask1;
+ temp2 += mask2;
+ }
+}
+
static void XGINew_SetDRAMDefaultRegister340(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned long Port, struct vb_device_info *pVBInfo)
{
- unsigned char temp, temp1, temp2, temp3, i, j, k;
+ unsigned char temp, temp1, temp2, temp3, j, k;
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
@@ -426,54 +423,18 @@ static void XGINew_SetDRAMDefaultRegister340(
xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][pVBInfo->ram_type]);
xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][pVBInfo->ram_type]);
- temp2 = 0;
- for (i = 0; i < 4; i++) {
- /* CR6B DQS fine tune delay */
- temp = XGI340_CR6B[pVBInfo->ram_type][i];
- for (j = 0; j < 4; j++) {
- temp1 = ((temp >> (2 * j)) & 0x03) << 2;
- temp2 |= temp1;
- xgifb_reg_set(P3d4, 0x6B, temp2);
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x6B);
- temp2 &= 0xF0;
- temp2 += 0x10;
- }
- }
+ /* CR6B DQS fine tune delay */
+ temp = 0xaa;
+ XGI_SetDRAM_Helper(P3d4, temp, 0, 0x6B, 2, 0xF0, 0x10);
- temp2 = 0;
- for (i = 0; i < 4; i++) {
- /* CR6E DQM fine tune delay */
- temp = 0;
- for (j = 0; j < 4; j++) {
- temp1 = ((temp >> (2 * j)) & 0x03) << 2;
- temp2 |= temp1;
- xgifb_reg_set(P3d4, 0x6E, temp2);
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x6E);
- temp2 &= 0xF0;
- temp2 += 0x10;
- }
- }
+ /* CR6E DQM fine tune delay */
+ XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6E, 2, 0xF0, 0x10);
temp3 = 0;
for (k = 0; k < 4; k++) {
/* CR6E_D[1:0] select channel */
xgifb_reg_and_or(P3d4, 0x6E, 0xFC, temp3);
- temp2 = 0;
- for (i = 0; i < 8; i++) {
- /* CR6F DQ fine tune delay */
- temp = 0;
- for (j = 0; j < 4; j++) {
- temp1 = (temp >> (2 * j)) & 0x03;
- temp2 |= temp1;
- xgifb_reg_set(P3d4, 0x6F, temp2);
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x6F);
- temp2 &= 0xF8;
- temp2 += 0x08;
- }
- }
+ XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6F, 0, 0xF8, 0x08);
temp3 += 0x01;
}
@@ -486,15 +447,7 @@ static void XGINew_SetDRAMDefaultRegister340(
temp2 = 0x80;
/* CR89 terminator type select */
- temp = 0;
- for (j = 0; j < 4; j++) {
- temp1 = (temp >> (2 * j)) & 0x03;
- temp2 |= temp1;
- xgifb_reg_set(P3d4, 0x89, temp2);
- xgifb_reg_get(P3d4, 0x89); /* Insert read command for delay */
- temp2 &= 0xF0;
- temp2 += 0x10;
- }
+ XGI_SetDRAM_Helper(P3d4, 0, temp2, 0x89, 0, 0xF0, 0x10);
temp = 0;
temp1 = temp & 0x03;
@@ -1286,36 +1239,14 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
- pVBInfo->BaseAddr = xgifb_info->vga_base;
-
if (pVBInfo->FBAddr == NULL) {
dev_dbg(&pdev->dev, "pVBInfo->FBAddr == 0\n");
return 0;
}
- if (pVBInfo->BaseAddr == 0) {
- dev_dbg(&pdev->dev, "pVBInfo->BaseAddr == 0\n");
- return 0;
- }
- outb(0x67, (pVBInfo->BaseAddr + 0x12)); /* 3c2 <- 67 ,ynlai */
-
- pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14;
- pVBInfo->P3d4 = pVBInfo->BaseAddr + 0x24;
- pVBInfo->P3c0 = pVBInfo->BaseAddr + 0x10;
- pVBInfo->P3ce = pVBInfo->BaseAddr + 0x1e;
- pVBInfo->P3c2 = pVBInfo->BaseAddr + 0x12;
- pVBInfo->P3ca = pVBInfo->BaseAddr + 0x1a;
- pVBInfo->P3c6 = pVBInfo->BaseAddr + 0x16;
- pVBInfo->P3c7 = pVBInfo->BaseAddr + 0x17;
- pVBInfo->P3c8 = pVBInfo->BaseAddr + 0x18;
- pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
- pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
- pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
+ XGIRegInit(pVBInfo, xgifb_info->vga_base);
+
+ outb(0x67, pVBInfo->P3c2);
if (HwDeviceExtension->jChipType < XG20)
/* Run XGI_GetVBType before InitTo330Pointer */
@@ -1410,7 +1341,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00);
/* chk if BCLK>=100MHz */
- temp1 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x7B);
+ temp1 = xgifb_reg_get(pVBInfo->P3d4, 0x7B);
temp = (unsigned char) ((temp1 >> 4) & 0x0F);
xgifb_reg_set(pVBInfo->Part1Port,
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index d54898322548..24573026a7c0 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,5 +1,6 @@
#ifndef _VBINIT_
#define _VBINIT_
extern unsigned char XGIInitNew(struct pci_dev *pdev);
+extern void XGIRegInit(struct vb_device_info *, unsigned long);
#endif
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d723a2571995..dfa5303379e9 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -2,6 +2,7 @@
#include "XGIfb.h"
#include "vb_def.h"
+#include "vb_init.h"
#include "vb_util.h"
#include "vb_table.h"
#include "vb_setmode.h"
@@ -63,29 +64,15 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned char tempah, SRdata;
- unsigned short i, modeflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+ unsigned char SRdata, i;
xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
- tempah = XGI330_StandTable.SR[0];
-
- i = XGI_SetCRT2ToLCDA;
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- tempah |= 0x01;
- } else if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
- if (pVBInfo->VBInfo & SetInSlaveMode)
- tempah |= 0x01;
- }
- tempah |= 0x20; /* screen off */
- xgifb_reg_set(pVBInfo->P3c4, 0x01, tempah); /* Set SR1 */
-
- for (i = 02; i <= 04; i++) {
- /* Get SR2,3,4 from file */
- SRdata = XGI330_StandTable.SR[i - 1];
- xgifb_reg_set(pVBInfo->P3c4, i, SRdata); /* Set SR2 3 4 */
+ for (i = 0; i < 4; i++) {
+ /* Get SR1,2,3,4 from file */
+ /* SR1 is with screen off 0x20 */
+ SRdata = XGI330_StandTable.SR[i];
+ xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */
}
}
@@ -95,7 +82,7 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned char CRTCdata;
unsigned short i;
- CRTCdata = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ CRTCdata = xgifb_reg_get(pVBInfo->P3d4, 0x11);
CRTCdata &= 0x7f;
xgifb_reg_set(pVBInfo->P3d4, 0x11, CRTCdata); /* Unlock CRTC */
@@ -152,7 +139,7 @@ static void XGI_SetGRCRegs(struct vb_device_info *pVBInfo)
}
if (pVBInfo->ModeType > ModeVGA) {
- GRdata = (unsigned char) xgifb_reg_get(pVBInfo->P3ce, 0x05);
+ GRdata = xgifb_reg_get(pVBInfo->P3ce, 0x05);
GRdata &= 0xBF; /* 256 color disable */
xgifb_reg_set(pVBInfo->P3ce, 0x05, GRdata);
}
@@ -300,7 +287,7 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
unsigned short i, j;
/* unlock cr0-7 */
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data);
@@ -317,7 +304,7 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i + 6), data);
}
- j = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0e);
+ j = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
j &= 0x1F;
data = pVBInfo->TimingH.data[7];
data &= 0xE0;
@@ -325,17 +312,16 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
xgifb_reg_set(pVBInfo->P3c4, 0x0e, data);
if (HwDeviceExtension->jChipType >= XG20) {
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x04);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x04);
data = data - 1;
xgifb_reg_set(pVBInfo->P3d4, 0x04, data);
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x05);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x05);
data1 = data;
data1 &= 0xE0;
data &= 0x1F;
if (data == 0) {
pushax = data;
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3c4,
- 0x0c);
+ data = xgifb_reg_get(pVBInfo->P3c4, 0x0c);
data &= 0xFB;
xgifb_reg_set(pVBInfo->P3c4, 0x0c, data);
data = pushax;
@@ -343,7 +329,7 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
data = data - 1;
data |= data1;
xgifb_reg_set(pVBInfo->P3d4, 0x05, data);
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0e);
+ data = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
data = data >> 5;
data = data + 3;
if (data > 7)
@@ -375,7 +361,7 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x11), data);
}
- j = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0a);
+ j = xgifb_reg_get(pVBInfo->P3c4, 0x0a);
j &= 0xC0;
data = pVBInfo->TimingV.data[6];
data &= 0x3F;
@@ -391,7 +377,7 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
if (i)
data |= 0x80;
- j = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x09);
+ j = xgifb_reg_get(pVBInfo->P3d4, 0x09);
j &= 0x5F;
data |= j;
xgifb_reg_set(pVBInfo->P3d4, 0x09, data);
@@ -409,7 +395,7 @@ static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
index = index & IndexMask;
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
@@ -640,10 +626,7 @@ static void xgifb_set_lcd(int chip_id,
unsigned short RefreshRateTableIndex,
unsigned short ModeNo)
{
- unsigned short Data, Temp;
- unsigned short XGI_P3cc;
-
- XGI_P3cc = pVBInfo->P3cc;
+ unsigned short temp;
xgifb_reg_set(pVBInfo->P3d4, 0x2E, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x2F, 0x00);
@@ -651,8 +634,8 @@ static void xgifb_set_lcd(int chip_id,
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
if (chip_id == XG27) {
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- if ((Temp & 0x03) == 0) { /* dual 12 */
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ if ((temp & 0x03) == 0) { /* dual 12 */
xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x13);
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x13);
}
@@ -661,8 +644,8 @@ static void xgifb_set_lcd(int chip_id,
if (chip_id == XG27) {
XGI_SetXG27FPBits(pVBInfo);
} else {
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- if (Temp & 0x01) {
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ if (temp & 0x01) {
/* 18 bits FP */
xgifb_reg_or(pVBInfo->P3c4, 0x06, 0x40);
xgifb_reg_or(pVBInfo->P3c4, 0x09, 0x40);
@@ -674,11 +657,11 @@ static void xgifb_set_lcd(int chip_id,
xgifb_reg_and(pVBInfo->P3c4, 0x30, ~0x20); /* Hsync polarity */
xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80); /* Vsync polarity */
- Data = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- if (Data & 0x4000)
+ temp = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
+ if (temp & 0x4000)
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
- if (Data & 0x8000)
+ if (temp & 0x8000)
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
}
@@ -757,8 +740,8 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
tempax -= 1;
tempbx -= 1;
tempcx = tempax;
- temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short) (tempcx & 0xff));
@@ -775,7 +758,7 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x42, tempax);
- data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x07);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x07);
data &= 0xFF;
tempax = 0;
@@ -876,62 +859,47 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short CRT2Index, VCLKIndex;
- unsigned short modeflag, resinfo;
+ unsigned short VCLKIndex, modeflag;
/* si+Ext_ResInfo */
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- CRT2Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
- if (pVBInfo->IF_DEF_LVDS == 0) {
- CRT2Index = CRT2Index >> 6; /* for LCD */
- if (pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
- if (pVBInfo->LCDResInfo != Panel_1024x768)
- /* LCDXlat2VCLK */
- VCLKIndex = VCLK108_2_315 + 5;
- else
- VCLKIndex = VCLK65_315 + 2; /* LCDXlat1VCLK */
- } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = TVCLKBASE_315_25 + HiTVVCLKDIV2;
- else
- VCLKIndex = TVCLKBASE_315_25 + HiTVVCLK;
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->LCDResInfo != Panel_1024x768)
+ /* LCDXlat2VCLK */
+ VCLKIndex = VCLK108_2_315 + 5;
+ else
+ VCLKIndex = VCLK65_315 + 2; /* LCDXlat1VCLK */
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO)
+ VCLKIndex = TVCLKBASE_315_25 + HiTVVCLKDIV2;
+ else
+ VCLKIndex = TVCLKBASE_315_25 + HiTVVCLK;
- if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot) {
- VCLKIndex = TVCLKBASE_315_25 +
- HiTVSimuVCLK;
- } else {
- VCLKIndex = TVCLKBASE_315_25 +
- HiTVTextVCLK;
- }
+ if (pVBInfo->SetFlag & TVSimuMode) {
+ if (modeflag & Charx8Dot) {
+ VCLKIndex = TVCLKBASE_315_25 + HiTVSimuVCLK;
+ } else {
+ VCLKIndex = TVCLKBASE_315_25 + HiTVTextVCLK;
}
+ }
- /* 301lv */
- if (pVBInfo->VBType & VB_SIS301LV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = YPbPr525iVCLK_2;
- else
- VCLKIndex = YPbPr525iVCLK;
- }
- } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
+ /* 301lv */
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = TVCLKBASE_315_25 + TVVCLKDIV2;
+ VCLKIndex = YPbPr525iVCLK_2;
else
- VCLKIndex = TVCLKBASE_315_25 + TVVCLK;
- } else { /* for CRT2 */
- /* di+Ext_CRTVCLK */
- VCLKIndex = XGI330_RefIndex[RefreshRateTableIndex].
- Ext_CRTVCLK;
- VCLKIndex &= IndexMask;
+ VCLKIndex = YPbPr525iVCLK;
}
- } else if ((pVBInfo->LCDResInfo == Panel_800x600) ||
- (pVBInfo->LCDResInfo == Panel_320x480)) { /* LVDS */
- VCLKIndex = VCLK40; /* LVDSXlat1VCLK */
- } else {
- VCLKIndex = VCLK65_315 + 2; /* LVDSXlat2VCLK, LVDSXlat3VCLK */
+ } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO)
+ VCLKIndex = TVCLKBASE_315_25 + TVVCLKDIV2;
+ else
+ VCLKIndex = TVCLKBASE_315_25 + TVVCLK;
+ } else { /* for CRT2 */
+ /* di+Ext_CRTVCLK */
+ VCLKIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
+ VCLKIndex &= IndexMask;
}
return VCLKIndex;
@@ -1103,10 +1071,8 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data = 0x0048;
}
- data2 = data & 0x00FF;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFF, data2);
- data2 = (data & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFC, data2);
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFF, data);
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFC, 0);
if (modeflag & HalfDCLK)
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xF7, 0x08);
@@ -1389,14 +1355,10 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned char index;
unsigned short i;
struct XGI_LVDSCRT1HDataStruct const *LCDPtr = NULL;
struct XGI_LVDSCRT1VDataStruct const *LCDPtr1 = NULL;
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
- index = index & IndexMask;
-
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
@@ -1496,18 +1458,11 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
{
unsigned short tempbx, tempax, tempcx, tempdx, push1, push2, modeflag;
unsigned long temp, temp1, temp2, temp3, push3;
- struct XGI_LCDDesStruct const *LCDPtr = NULL;
struct XGI330_LCDDataDesStruct2 const *LCDPtr1 = NULL;
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
- else
- LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
+ LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeNo, ModeIdIndex,
+ RefreshRateTableIndex, pVBInfo);
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
push1 = tempbx;
@@ -1539,10 +1494,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = pVBInfo->HT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDHDES;
- else
- tempbx = LCDPtr->LCDHDES;
+ tempbx = LCDPtr1->LCDHDES;
tempcx = pVBInfo->HDE;
tempbx = tempbx & 0x0fff;
@@ -1563,10 +1515,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = pVBInfo->HT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDHRS;
- else
- tempbx = LCDPtr->LCDHRS;
+ tempbx = LCDPtr1->LCDHRS;
tempcx = push2;
@@ -1591,10 +1540,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
(unsigned short) (tempbx & 0xff));
tempax = pVBInfo->VT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDVDES;
- else
- tempbx = LCDPtr->LCDVDES;
+ tempbx = LCDPtr1->LCDVDES;
tempcx = pVBInfo->VDE;
tempbx = tempbx & 0x0fff;
@@ -1615,10 +1561,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
| tempbx));
tempax = pVBInfo->VT;
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempbx = LCDPtr1->LCDVRS;
- else
- tempbx = LCDPtr->LCDVRS;
+ tempbx = LCDPtr1->LCDVRS;
tempcx = push1;
@@ -1835,14 +1778,7 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
}
} /* {End of VB} */
- tempal = (unsigned char) inb((pVBInfo->P3ca + 0x02));
- tempal = tempal >> 2;
- tempal &= 0x03;
-
- /* for Dot8 Scaling LCD */
- if ((pVBInfo->LCDInfo & EnableScalingLCD) && (modeflag & Charx8Dot))
- tempal = tempal ^ tempal; /* ; set to VCLK25MHz always */
-
+ inb((pVBInfo->P3ca + 0x02));
tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
return tempal;
}
@@ -2050,40 +1986,28 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBType &
- (VB_SIS302B |
- VB_SIS301LV |
- VB_SIS302LV |
- VB_XGI301C)) {
- if (temp & EnableDualEdge) {
- tempbx |= SetCRT2ToDualEdge;
- if (temp & SetToLCDA)
- tempbx |= XGI_SetCRT2ToLCDA;
- }
+ if (pVBInfo->VBType & (VB_SIS302B | VB_SIS301LV | VB_SIS302LV |
+ VB_XGI301C)) {
+ if (temp & EnableDualEdge) {
+ tempbx |= SetCRT2ToDualEdge;
+ if (temp & SetToLCDA)
+ tempbx |= XGI_SetCRT2ToLCDA;
}
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (((pVBInfo->IF_DEF_LVDS == 0) &&
- ((pVBInfo->VBType & VB_SIS301LV) ||
- (pVBInfo->VBType & VB_SIS302LV) ||
- (pVBInfo->VBType & VB_XGI301C)))) {
+ if (pVBInfo->VBType & (VB_SIS301LV|VB_SIS302LV|VB_XGI301C)) {
if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
- /* shampoo add for new
- * scratch */
- temp = xgifb_reg_get(
- pVBInfo->P3d4,
- 0x35);
+ /* shampoo add for new scratch */
+ temp = xgifb_reg_get(pVBInfo->P3d4,
+ 0x35);
temp &= YPbPrMode;
tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
- tempbx &=
- (~SetCRT2ToHiVision);
- tempbx |=
- SetCRT2ToYPbPr525750;
+ tempbx &= (~SetCRT2ToHiVision);
+ tempbx |= SetCRT2ToYPbPr525750;
}
}
}
@@ -2092,19 +2016,15 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = push; /* restore CR31 */
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->IF_DEF_HiVision == 1)
- temp = 0x09FC;
- else
- temp = 0x097C;
- } else if (pVBInfo->IF_DEF_HiVision == 1) {
- temp = 0x01FC;
- } else {
- temp = 0x017C;
- }
- } else { /* 3rd party chip */
- temp = SetCRT2ToLCD;
+ if (pVBInfo->IF_DEF_YPbPr == 1) {
+ if (pVBInfo->IF_DEF_HiVision == 1)
+ temp = 0x09FC;
+ else
+ temp = 0x097C;
+ } else if (pVBInfo->IF_DEF_HiVision == 1) {
+ temp = 0x01FC;
+ } else {
+ temp = 0x017C;
}
if (!(tempbx & temp)) {
@@ -2115,14 +2035,11 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBType & VB_NoLCD)) {
if (tempbx & XGI_SetCRT2ToLCDA) {
if (tempbx & SetSimuScanMode)
- tempbx &= (~(SetCRT2ToLCD |
- SetCRT2ToRAMDAC |
+ tempbx &= (~(SetCRT2ToLCD | SetCRT2ToRAMDAC |
SwitchCRT2));
else
- tempbx &= (~(SetCRT2ToLCD |
- SetCRT2ToRAMDAC |
- SetCRT2ToTV |
- SwitchCRT2));
+ tempbx &= (~(SetCRT2ToLCD | SetCRT2ToRAMDAC |
+ SetCRT2ToTV | SwitchCRT2));
}
}
@@ -2131,49 +2048,38 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (tempbx & SetCRT2ToRAMDAC) {
- tempbx &= (0xFF00 |
- SetCRT2ToRAMDAC |
- SwitchCRT2 |
- SetSimuScanMode);
+ tempbx &= (0xFF00 | SetCRT2ToRAMDAC |
+ SwitchCRT2 | SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
} else {
- tempbx &= (~(SetCRT2ToRAMDAC |
- SetCRT2ToLCD |
+ tempbx &= (~(SetCRT2ToRAMDAC | SetCRT2ToLCD |
SetCRT2ToTV));
}
}
if (!(pVBInfo->VBType & VB_NoLCD)) {
if (tempbx & SetCRT2ToLCD) {
- tempbx &= (0xFF00 |
- SetCRT2ToLCD |
- SwitchCRT2 |
+ tempbx &= (0xFF00 | SetCRT2ToLCD | SwitchCRT2 |
SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
}
if (tempbx & SetCRT2ToSCART) {
- tempbx &= (0xFF00 |
- SetCRT2ToSCART |
- SwitchCRT2 |
+ tempbx &= (0xFF00 | SetCRT2ToSCART | SwitchCRT2 |
SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
if (tempbx & SetCRT2ToYPbPr525750)
- tempbx &= (0xFF00 |
- SwitchCRT2 |
- SetSimuScanMode);
+ tempbx &= (0xFF00 | SwitchCRT2 | SetSimuScanMode);
}
if (pVBInfo->IF_DEF_HiVision == 1) {
if (tempbx & SetCRT2ToHiVision)
- tempbx &= (0xFF00 |
- SetCRT2ToHiVision |
- SwitchCRT2 |
+ tempbx &= (0xFF00 | SetCRT2ToHiVision | SwitchCRT2 |
SetSimuScanMode);
}
@@ -2183,19 +2089,15 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
if (!(tempbx & DisableCRT2Display)) {
- if ((!(tempbx & DriverMode)) ||
- (!(modeflag & CRT2Mode))) {
+ if ((!(tempbx & DriverMode)) || (!(modeflag & CRT2Mode))) {
if (!(tempbx & XGI_SetCRT2ToLCDA))
- tempbx |= (SetInSlaveMode |
- SetSimuScanMode);
+ tempbx |= (SetInSlaveMode | SetSimuScanMode);
}
/* LCD+TV can't support in slave mode
* (Force LCDA+TV->LCDB) */
- if ((tempbx & SetInSlaveMode) &&
- (tempbx & XGI_SetCRT2ToLCDA)) {
- tempbx ^= (SetCRT2ToLCD |
- XGI_SetCRT2ToLCDA |
+ if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
+ tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
pVBInfo->SetFlag |= ReserveTVOption;
}
@@ -2207,36 +2109,28 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short temp, tempbx = 0, resinfo = 0, modeflag, index1;
-
- tempbx = 0;
- resinfo = 0;
+ unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
- tempbx = temp;
- if (tempbx & TVSetPAL) {
- tempbx &= (SetCHTVOverScan |
- TVSetPALM |
- TVSetPALN |
- TVSetPAL);
- if (tempbx & TVSetPALM)
- /* set to NTSC if PAL-M */
- tempbx &= ~TVSetPAL;
- } else
- tempbx &= (SetCHTVOverScan |
- TVSetNTSCJ |
- TVSetPAL);
- }
+ tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
+ if (tempbx & TVSetPAL) {
+ tempbx &= (SetCHTVOverScan |
+ TVSetPALM |
+ TVSetPALN |
+ TVSetPAL);
+ if (tempbx & TVSetPALM)
+ /* set to NTSC if PAL-M */
+ tempbx &= ~TVSetPAL;
+ } else
+ tempbx &= (SetCHTVOverScan |
+ TVSetNTSCJ |
+ TVSetPAL);
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempbx |= TVSetPAL;
- }
+ if (pVBInfo->VBInfo & SetCRT2ToSCART)
+ tempbx |= TVSetPAL;
if (pVBInfo->IF_DEF_YPbPr == 1) {
if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
@@ -2258,33 +2152,26 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = tempbx | TVSetHiVision | TVSetPAL;
}
- if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
- if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- (!(pVBInfo->VBInfo & SetNotSimuMode)))
- tempbx |= TVSimuMode;
+ if ((pVBInfo->VBInfo & SetInSlaveMode) &&
+ (!(pVBInfo->VBInfo & SetNotSimuMode)))
+ tempbx |= TVSimuMode;
- if (!(tempbx & TVSetPAL) &&
- (modeflag > 13) &&
- (resinfo == 8)) /* NTSC 1024x768, */
- tempbx |= NTSC1024x768;
+ if (!(tempbx & TVSetPAL) && (modeflag > 13) && (resinfo == 8))
+ /* NTSC 1024x768, */
+ tempbx |= NTSC1024x768;
- tempbx |= RPLLDIV2XO;
+ tempbx |= RPLLDIV2XO;
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (pVBInfo->VBInfo & SetInSlaveMode)
- tempbx &= (~RPLLDIV2XO);
- } else if (tempbx &
- (TVSetYPbPr525p | TVSetYPbPr750p)) {
- tempbx &= (~RPLLDIV2XO);
- } else if (!(pVBInfo->VBType &
- (VB_SIS301B |
- VB_SIS302B |
- VB_SIS301LV |
- VB_SIS302LV |
- VB_XGI301C))) {
- if (tempbx & TVSimuMode)
- tempbx &= (~RPLLDIV2XO);
- }
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBInfo & SetInSlaveMode)
+ tempbx &= (~RPLLDIV2XO);
+ } else if (tempbx & (TVSetYPbPr525p | TVSetYPbPr750p)) {
+ tempbx &= (~RPLLDIV2XO);
+ } else if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B |
+ VB_SIS301LV | VB_SIS302LV |
+ VB_XGI301C))) {
+ if (tempbx & TVSimuMode)
+ tempbx &= (~RPLLDIV2XO);
}
}
pVBInfo->TVInfo = tempbx;
@@ -2293,13 +2180,12 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- unsigned short temp, tempax, tempbx, modeflag, resinfo = 0, LCDIdIndex;
+ unsigned short temp, tempax, tempbx, resinfo = 0, LCDIdIndex;
pVBInfo->LCDResInfo = 0;
pVBInfo->LCDTypeInfo = 0;
pVBInfo->LCDInfo = 0;
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/* si+Ext_ResInfo // */
resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
@@ -2346,23 +2232,18 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
- if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
- if (((pVBInfo->VBType & VB_SIS302LV) || (pVBInfo->VBType
- & VB_XGI301C)) && (tempax & XGI_LCDDualLink)) {
- tempbx |= SetLCDDualLink;
- }
- }
+ if (((pVBInfo->VBType & VB_SIS302LV) ||
+ (pVBInfo->VBType & VB_XGI301C)) && (tempax & XGI_LCDDualLink))
+ tempbx |= SetLCDDualLink;
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if ((pVBInfo->LCDResInfo == Panel_1400x1050) && (pVBInfo->VBInfo
- & SetCRT2ToLCD) && (resinfo == 9) &&
- (!(tempbx & EnableScalingLCD)))
- /*
- * set to center in 1280x1024 LCDB
- * for Panel_1400x1050
- */
- tempbx |= SetLCDtoNonExpanding;
- }
+ if ((pVBInfo->LCDResInfo == Panel_1400x1050) &&
+ (pVBInfo->VBInfo & SetCRT2ToLCD) && (resinfo == 9) &&
+ (!(tempbx & EnableScalingLCD)))
+ /*
+ * set to center in 1280x1024 LCDB
+ * for Panel_1400x1050
+ */
+ tempbx |= SetLCDtoNonExpanding;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & SetNotSimuMode)
@@ -2637,36 +2518,34 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
goto exit;
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->LCDResInfo == Panel_1600x1200) {
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (yres == 1024)
- yres = 1056;
- }
+ if (pVBInfo->LCDResInfo == Panel_1600x1200) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
+ if (yres == 1024)
+ yres = 1056;
}
+ }
- if (pVBInfo->LCDResInfo == Panel_1280x1024) {
- if (yres == 400)
- yres = 405;
- else if (yres == 350)
- yres = 360;
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
+ if (yres == 400)
+ yres = 405;
+ else if (yres == 350)
+ yres = 360;
- if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
- if (yres == 360)
- yres = 375;
- }
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
+ if (yres == 360)
+ yres = 375;
}
+ }
- if (pVBInfo->LCDResInfo == Panel_1024x768) {
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (!(pVBInfo->LCDInfo & LCDNonExpanding)) {
- if (yres == 350)
- yres = 357;
- else if (yres == 400)
- yres = 420;
- else if (yres == 480)
- yres = 525;
- }
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & LCDNonExpanding)) {
+ if (yres == 350)
+ yres = 357;
+ else if (yres == 400)
+ yres = 420;
+ else if (yres == 480)
+ yres = 525;
}
}
}
@@ -2981,10 +2860,8 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
temp = 0x6B;
if (infoflag & InterlaceMode)
temp = temp << 1;
- return temp * colordepth;
- } else {
- return temp * colordepth;
}
+ return temp * colordepth;
}
static void XGI_SetCRT2Offset(unsigned short ModeNo,
@@ -3022,11 +2899,7 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short tempcx = 0, CRT1Index = 0, resinfo = 0;
-
- CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- CRT1Index &= IndexMask;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
+ u8 tempcx;
XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
@@ -3045,11 +2918,10 @@ static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
- pushbx = 0, CRT1Index = 0, modeflag, resinfo = 0;
+ pushbx = 0, CRT1Index, modeflag;
CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/* bainy change table name */
@@ -3204,13 +3076,11 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
- modeflag, CRT1Index;
+ modeflag;
/* si+Ext_ResInfo */
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- CRT1Index &= IndexMask;
if (!(pVBInfo->VBInfo & SetInSlaveMode))
return;
@@ -3501,15 +3371,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
- modeflag, resinfo, crt2crtc;
+ modeflag;
unsigned char const *TimingPoint;
unsigned long longtemp, tempeax, tempebx, temp2, tempecx;
/* si+Ext_ResInfo */
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- crt2crtc = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
tempax = 0;
@@ -3918,8 +3786,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
}
if (pVBInfo->TVInfo & TVSetPALM) {
- tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
- 0x01);
+ tempax = xgifb_reg_get(pVBInfo->Part2Port, 0x01);
tempax--;
xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
@@ -3940,17 +3807,12 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short push1, push2, pushbx, tempax, tempbx, tempcx, temp,
- tempah, tempbh, tempch, resinfo, modeflag, CRT1Index;
+ unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
+ tempbh, tempch;
struct XGI_LCDDesStruct const *LCDBDesPtr = NULL;
/* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- CRT1Index &= IndexMask;
-
if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
return;
@@ -3969,7 +3831,6 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, temp);
tempbx = pVBInfo->VDE; /* RTVACTEO=(VDE-1)&0xFF */
- push1 = tempbx;
tempbx--;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x03, temp);
@@ -3977,7 +3838,6 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0C, ~0x07, temp);
tempcx = pVBInfo->VT - 1;
- push2 = tempcx + 1;
temp = tempcx & 0x00FF; /* RVTVT=VT-1 */
xgifb_reg_set(pVBInfo->Part2Port, 0x19, temp);
temp = (tempcx & 0xFF00) >> 8;
@@ -4459,10 +4319,6 @@ static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short Pindex, Pdata;
-
- Pindex = pVBInfo->Part5Port;
- Pdata = pVBInfo->Part5Port + 1;
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
| DisableCRT2Display))) {
@@ -4538,7 +4394,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
- Miscdata = (unsigned char) inb(pVBInfo->P3cc);
+ Miscdata = inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
@@ -4598,7 +4454,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
LVDSVBE = LVDSVBS + LVDSVT - xgifb_info->lvds_data.LVDSVDE;
- temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
if (!(modeflag & Charx8Dot))
@@ -4737,43 +4593,21 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
}
/* --------------------------------------------------------------------- */
-/* Function : XGI_DisableChISLCD */
-/* Input : */
-/* Output : 0 -> Not LCD Mode */
-/* Description : */
-/* --------------------------------------------------------------------- */
-static unsigned char XGI_DisableChISLCD(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, tempah;
-
- tempbx = pVBInfo->SetFlag & (DisableChA | DisableChB);
- tempah = ~((unsigned short) xgifb_reg_get(pVBInfo->Part1Port, 0x2E));
-
- if (tempbx & (EnableChA | DisableChA)) {
- if (!(tempah & 0x08)) /* Chk LCDA Mode */
- return 0;
- }
-
- if (!(tempbx & (EnableChB | DisableChB)))
- return 0;
-
- if (tempah & 0x01) /* Chk LCDB Mode */
- return 1;
-
- return 0;
-}
-
-/* --------------------------------------------------------------------- */
/* Function : XGI_EnableChISLCD */
/* Input : */
/* Output : 0 -> Not LCD mode */
-/* Description : */
+/* Description : if bool enable = true -> enable, else disable */
/* --------------------------------------------------------------------- */
-static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
+static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo,
+ bool enable)
{
unsigned short tempbx, tempah;
- tempbx = pVBInfo->SetFlag & (EnableChA | EnableChB);
+ if (enable)
+ tempbx = pVBInfo->SetFlag & (EnableChA | EnableChB);
+ else
+ tempbx = pVBInfo->SetFlag & (DisableChA | DisableChB);
+
tempah = ~((unsigned short) xgifb_reg_get(pVBInfo->Part1Port, 0x2E));
if (tempbx & (EnableChA | DisableChA)) {
@@ -4825,9 +4659,9 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (((pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
- || (XGI_DisableChISLCD(pVBInfo))
- || (XGI_IsLCDON(pVBInfo)))
+ (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))) ||
+ (XGI_EnableChISLCD(pVBInfo, false)) ||
+ (XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x80);
}
@@ -5018,16 +4852,6 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
}
xgifb_reg_set(pVBInfo->Part1Port, 0x2D, tempah);
}
- } else if (pVBInfo->IF_DEF_LVDS == 1) {
- tempbl = 0;
- tempbh = 0;
- if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- tempah = XGI301LCDDelay;
- tempah &= 0x0f;
- tempah = tempah << 4;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2D, 0x0f,
- tempah);
- }
}
}
@@ -5118,12 +4942,8 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
- if (pVBInfo->VBType &
- (VB_SIS301B |
- VB_SIS302B |
- VB_SIS301LV |
- VB_SIS302LV |
- VB_XGI301C)) { /* 301LV/302LV only */
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
+ VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBType &
(VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
@@ -5135,10 +4955,7 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
(unsigned short) ((tempcx & (EnableVBCLKDRVLOW
| EnablePLLSPLOW)) >> 8));
- }
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
@@ -5510,13 +5327,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- short LCDRefreshIndex[] = { 0x00, 0x00, 0x03, 0x01 },
- LCDARefreshIndex[] = { 0x00, 0x00, 0x03, 0x01, 0x01,
- 0x01, 0x01 };
+ const u8 LCDARefreshIndex[] = {
+ 0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
- unsigned short RefreshRateTableIndex, i, modeflag, index, temp;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+ unsigned short RefreshRateTableIndex, i, index, temp;
index = xgifb_reg_get(pVBInfo->P3d4, 0x33);
index = index >> pVBInfo->SelectCRT2Rate;
@@ -5531,15 +5345,8 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
if (pVBInfo->SetFlag & ProgrammingCRT2) {
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
- | VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C))
- /* 301b */
- temp = LCDARefreshIndex[
- pVBInfo->LCDResInfo & 0x0F];
- else
- temp = LCDRefreshIndex[
- pVBInfo->LCDResInfo & 0x0F];
+ temp = LCDARefreshIndex[
+ pVBInfo->LCDResInfo & 0x07];
if (index > temp)
index = temp;
@@ -5617,9 +5424,8 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short tempbx, ModeIdIndex, RefreshRateTableIndex;
+ unsigned short ModeIdIndex, RefreshRateTableIndex;
- tempbx = pVBInfo->VBInfo;
pVBInfo->SetFlag |= ProgrammingCRT2;
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
pVBInfo->SelectCRT2Rate = 4;
@@ -5658,32 +5464,31 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
unsigned char CR17, CR63, SR31;
unsigned short temp;
- unsigned char DAC_TEST_PARMS[3] = { 0x0F, 0x0F, 0x0F };
int i;
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
/* to fix XG42 single LCD sense to CRT+LCD */
xgifb_reg_set(pVBInfo->P3d4, 0x57, 0x4A);
- xgifb_reg_set(pVBInfo->P3d4, 0x53, (unsigned char) (xgifb_reg_get(
+ xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
pVBInfo->P3d4, 0x53) | 0x02));
- SR31 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x31);
- CR63 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x63);
- SR01 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x01);
+ SR31 = xgifb_reg_get(pVBInfo->P3c4, 0x31);
+ CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
+ SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF));
xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF));
- CR17 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x17);
+ CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80));
- SR1F = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x1F);
+ SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04));
- SR07 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x07);
+ SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB));
- SR06 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x06);
+ SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3));
xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
@@ -5712,10 +5517,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
outb(0x00, pVBInfo->P3c8);
- for (i = 0; i < 256; i++) {
- outb((unsigned char) DAC_TEST_PARMS[0], (pVBInfo->P3c8 + 1));
- outb((unsigned char) DAC_TEST_PARMS[1], (pVBInfo->P3c8 + 1));
- outb((unsigned char) DAC_TEST_PARMS[2], (pVBInfo->P3c8 + 1));
+ for (i = 0; i < 256 * 3; i++) {
+ outb(0x0F, (pVBInfo->P3c8 + 1)); /* DAC_TEST_PARMS */
}
mdelay(1);
@@ -5731,9 +5534,7 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
/* avoid display something, set BLACK DAC if not restore DAC */
outb(0x00, pVBInfo->P3c8);
- for (i = 0; i < 256; i++) {
- outb(0, (pVBInfo->P3c8 + 1));
- outb(0, (pVBInfo->P3c8 + 1));
+ for (i = 0; i < 256 * 3; i++) {
outb(0, (pVBInfo->P3c8 + 1));
}
@@ -5741,7 +5542,7 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3d4, 0x63, CR63);
xgifb_reg_set(pVBInfo->P3c4, 0x31, SR31);
- xgifb_reg_set(pVBInfo->P3d4, 0x53, (unsigned char) (xgifb_reg_get(
+ xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
pVBInfo->P3d4, 0x53) & 0xFD));
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
}
@@ -5755,13 +5556,10 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
| VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
- if (pVBInfo->SetFlag & EnableChA) {
+ if ((pVBInfo->SetFlag & EnableChA) ||
+ (pVBInfo->VBInfo & SetCRT2ToDualEdge)) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
- } else if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
- /* Power on */
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x1E, 0x20);
}
}
@@ -5769,8 +5567,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & EnableChB) || (pVBInfo->VBInfo
& (SetCRT2ToLCD | SetCRT2ToTV
| SetCRT2ToRAMDAC))) {
- tempah = (unsigned char) xgifb_reg_get(
- pVBInfo->P3c4, 0x32);
+ tempah = xgifb_reg_get(pVBInfo->P3c4, 0x32);
tempah &= 0xDF;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (!(pVBInfo->VBInfo &
@@ -5780,8 +5577,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
xgifb_reg_set(pVBInfo->P3c4, 0x32, tempah);
xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x20);
- tempah = (unsigned char) xgifb_reg_get(
- pVBInfo->Part1Port, 0x2E);
+ tempah = xgifb_reg_get(pVBInfo->Part1Port,
+ 0x2E);
if (!(tempah & 0x80))
xgifb_reg_or(pVBInfo->Part1Port,
@@ -5795,8 +5592,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (!XGI_DisableChISLCD(pVBInfo)) {
- if (XGI_EnableChISLCD(pVBInfo) ||
+ if (!XGI_EnableChISLCD(pVBInfo, false)) {
+ if (XGI_EnableChISLCD(pVBInfo, true) ||
(pVBInfo->VBInfo &
(SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
/* LVDS PLL power on */
@@ -5854,8 +5651,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
/* enable CRT2 */
xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
- tempah = (unsigned char) xgifb_reg_get(pVBInfo->Part1Port,
- 0x2E);
+ tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2E);
if (!(tempah & 0x80))
xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
@@ -5947,7 +5743,6 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
unsigned short ModeIdIndex;
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
- pVBInfo->BaseAddr = xgifb_info->vga_base;
pVBInfo->IF_DEF_LVDS = 0;
if (HwDeviceExtension->jChipType >= XG20) {
@@ -5961,24 +5756,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->IF_DEF_CRT2Monitor = 1;
}
- pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14;
- pVBInfo->P3d4 = pVBInfo->BaseAddr + 0x24;
- pVBInfo->P3c0 = pVBInfo->BaseAddr + 0x10;
- pVBInfo->P3ce = pVBInfo->BaseAddr + 0x1e;
- pVBInfo->P3c2 = pVBInfo->BaseAddr + 0x12;
- pVBInfo->P3cc = pVBInfo->BaseAddr + 0x1C;
- pVBInfo->P3ca = pVBInfo->BaseAddr + 0x1a;
- pVBInfo->P3c6 = pVBInfo->BaseAddr + 0x16;
- pVBInfo->P3c7 = pVBInfo->BaseAddr + 0x17;
- pVBInfo->P3c8 = pVBInfo->BaseAddr + 0x18;
- pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
- pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
- pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
+ XGIRegInit(pVBInfo, xgifb_info->vga_base);
/* for x86 Linux, XG21 LVDS */
if (HwDeviceExtension->jChipType == XG21) {
@@ -6011,7 +5789,8 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
- if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
+ (!(pVBInfo->VBInfo & SwitchCRT2))) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
@@ -6019,24 +5798,11 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension, pVBInfo);
}
- } else if (!(pVBInfo->VBInfo & SwitchCRT2)) {
- XGI_SetCRT1Group(xgifb_info,
- HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
- HwDeviceExtension,
- pVBInfo);
- }
}
if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
switch (HwDeviceExtension->ujVBChipID) {
- case VB_CHIP_301:
- XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
- pVBInfo); /*add for CRT2 */
- break;
-
+ case VB_CHIP_301: /* fall through */
case VB_CHIP_302:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
pVBInfo); /*add for CRT2 */
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index acf6e7fbbaed..ae0c18b320a4 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -156,10 +156,9 @@ struct vb_device_info {
unsigned short SelectCRT2Rate;
void __iomem *FBAddr;
- unsigned long BaseAddr;
- unsigned char const (*SR15)[8];
- unsigned char const (*CR40)[8];
+ unsigned char const (*SR15)[3];
+ unsigned char const (*CR40)[3];
struct SiS_MCLKData const *MCLKData;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 39f528b14f01..b4c05c80b937 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -4,114 +4,88 @@ static const struct SiS_MCLKData XGI340New_MCLKData[] = {
{0x16, 0x01, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x01, 200},
- {0x79, 0x06, 0x01, 250},
- {0x29, 0x01, 0x81, 301},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166}
};
static const struct SiS_MCLKData XGI27New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x80, 200},
- {0x79, 0x06, 0x80, 250},
- {0x29, 0x01, 0x81, 300},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166}
};
const struct XGI_ECLKDataStruct XGI340_ECLKData[] = {
{0x5c, 0x23, 0x01, 166},
{0x55, 0x84, 0x01, 123},
{0x7C, 0x08, 0x01, 200},
- {0x79, 0x06, 0x01, 250},
- {0x29, 0x01, 0x81, 301},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166},
- {0x5c, 0x23, 0x01, 166}
-};
-
-static const unsigned char XG27_SR13[4][8] = {
- {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
- {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
- {0x32, 0x32, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR18 */
- {0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00} /* SR1B */
-};
-
-static const unsigned char XGI340_SR13[4][8] = {
- {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
- {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
- {0x31, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR18 */
- {0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00} /* SR1B */
-};
-
-static const unsigned char XGI340_cr41[24][8] = {
- {0x20, 0x50, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */
- {0xc4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */
- {0xc4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */
- {0xb5, 0xa4, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0xf0, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x90, 0x90, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 5 CR68 */
- {0x77, 0x77, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 6 CR69 */
- {0x77, 0x77, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 7 CR6A */
- {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 8 CR6D */
- {0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 9 CR80 */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 10 CR81 */
- {0x88, 0xa8, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 11 CR82 */
- {0x44, 0x44, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 12 CR85 */
- {0x48, 0x48, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 13 CR86 */
- {0x54, 0x54, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 14 CR90 */
- {0x54, 0x54, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 15 CR91 */
- {0x0a, 0x0a, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 16 CR92 */
- {0x44, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 17 CR93 */
- {0x10, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 18 CR94 */
- {0x11, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 19 CR95 */
- {0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 20 CR96 */
- {0xf0, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 21 CRC3 */
- {0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 22 CRC4 */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} /* 23 CRC5 */
-};
-
-static const unsigned char XGI27_cr41[24][8] = {
- {0x20, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */
- {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */
- {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */
- {0xB3, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
- CR99[2:0],
- CR45[3:0]*/
- {0xf0, 0xf5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 4 CR59 */
- {0x90, 0x90, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 5 CR68 */
- {0x77, 0x67, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 6 CR69 */
- {0x77, 0x77, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 7 CR6A */
- {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 8 CR6D */
- {0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 9 CR80 */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 10 CR81 */
- {0x88, 0xcc, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 11 CR82 */
- {0x44, 0x88, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 12 CR85 */
- {0x48, 0x88, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 13 CR86 */
- {0x54, 0x32, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 14 CR90 */
- {0x54, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 15 CR91 */
- {0x0a, 0x07, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 16 CR92 */
- {0x44, 0x63, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 17 CR93 */
- {0x10, 0x14, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 18 CR94 */
- {0x11, 0x0B, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 19 CR95 */
- {0x05, 0x22, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 20 CR96 */
- {0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 21 CRC3 */
- {0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 22 CRC4 */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} /* 23 CRC5 */
-};
-
-const unsigned char XGI340_CR6B[8][4] = {
- {0xaa, 0xaa, 0xaa, 0xaa},
- {0xaa, 0xaa, 0xaa, 0xaa},
- {0xaa, 0xaa, 0xaa, 0xaa},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00}
+};
+
+static const unsigned char XG27_SR13[4][3] = {
+ {0x35, 0x45, 0xb1}, /* SR13 */
+ {0x41, 0x51, 0x5c}, /* SR14 */
+ {0x32, 0x32, 0x42}, /* SR18 */
+ {0x03, 0x03, 0x03} /* SR1B */
+};
+
+static const unsigned char XGI340_SR13[4][3] = {
+ {0x35, 0x45, 0xb1}, /* SR13 */
+ {0x41, 0x51, 0x5c}, /* SR14 */
+ {0x31, 0x42, 0x42}, /* SR18 */
+ {0x03, 0x03, 0x03} /* SR1B */
+};
+
+static const unsigned char XGI340_cr41[24][3] = {
+ {0x20, 0x50, 0x60}, /* 0 CR41 */
+ {0xc4, 0x40, 0x84}, /* 1 CR8A */
+ {0xc4, 0x40, 0x84}, /* 2 CR8B */
+ {0xb5, 0xa4, 0xa4},
+ {0xf0, 0xf0, 0xf0},
+ {0x90, 0x90, 0x24}, /* 5 CR68 */
+ {0x77, 0x77, 0x44}, /* 6 CR69 */
+ {0x77, 0x77, 0x44}, /* 7 CR6A */
+ {0xff, 0xff, 0xff}, /* 8 CR6D */
+ {0x55, 0x55, 0x55}, /* 9 CR80 */
+ {0x00, 0x00, 0x00}, /* 10 CR81 */
+ {0x88, 0xa8, 0x48}, /* 11 CR82 */
+ {0x44, 0x44, 0x77}, /* 12 CR85 */
+ {0x48, 0x48, 0x88}, /* 13 CR86 */
+ {0x54, 0x54, 0x44}, /* 14 CR90 */
+ {0x54, 0x54, 0x44}, /* 15 CR91 */
+ {0x0a, 0x0a, 0x07}, /* 16 CR92 */
+ {0x44, 0x44, 0x44}, /* 17 CR93 */
+ {0x10, 0x10, 0x0A}, /* 18 CR94 */
+ {0x11, 0x11, 0x0a}, /* 19 CR95 */
+ {0x05, 0x05, 0x05}, /* 20 CR96 */
+ {0xf0, 0xf0, 0xf0}, /* 21 CRC3 */
+ {0x05, 0x00, 0x02}, /* 22 CRC4 */
+ {0x00, 0x00, 0x00} /* 23 CRC5 */
+};
+
+static const unsigned char XGI27_cr41[24][3] = {
+ {0x20, 0x40, 0x60}, /* 0 CR41 */
+ {0xC4, 0x40, 0x84}, /* 1 CR8A */
+ {0xC4, 0x40, 0x84}, /* 2 CR8B */
+ {0xB3, 0x13, 0xa4}, /* 3 CR40[7],
+ CR99[2:0],
+ CR45[3:0]*/
+ {0xf0, 0xf5, 0xf0}, /* 4 CR59 */
+ {0x90, 0x90, 0x24}, /* 5 CR68 */
+ {0x77, 0x67, 0x44}, /* 6 CR69 */
+ {0x77, 0x77, 0x44}, /* 7 CR6A */
+ {0xff, 0xff, 0xff}, /* 8 CR6D */
+ {0x55, 0x55, 0x55}, /* 9 CR80 */
+ {0x00, 0x00, 0x00}, /* 10 CR81 */
+ {0x88, 0xcc, 0x48}, /* 11 CR82 */
+ {0x44, 0x88, 0x77}, /* 12 CR85 */
+ {0x48, 0x88, 0x88}, /* 13 CR86 */
+ {0x54, 0x32, 0x44}, /* 14 CR90 */
+ {0x54, 0x33, 0x44}, /* 15 CR91 */
+ {0x0a, 0x07, 0x07}, /* 16 CR92 */
+ {0x44, 0x63, 0x44}, /* 17 CR93 */
+ {0x10, 0x14, 0x0A}, /* 18 CR94 */
+ {0x11, 0x0B, 0x0C}, /* 19 CR95 */
+ {0x05, 0x22, 0x05}, /* 20 CR96 */
+ {0xf0, 0xf0, 0x00}, /* 21 CRC3 */
+ {0x05, 0x00, 0x02}, /* 22 CRC4 */
+ {0x00, 0x00, 0x00} /* 23 CRC5 */
};
/* CR47,CR48,CR49,CR4A,CR4B,CR4C,CR70,CR71,CR74,CR75,CR76,CR77 */
@@ -195,7 +169,7 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
static const struct SiS_StandTable_S XGI330_StandTable = {
/* ExtVGATable */
0x00, 0x00, 0x00, 0x0000,
- {0x01, 0x0f, 0x00, 0x0e},
+ {0x21, 0x0f, 0x00, 0x0e}, /* 0x21 = 0x01 | (0x20 = screen off) */
0x23,
{0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 4881839be625..73582705e8c5 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,6 +1,6 @@
config ZCACHE
bool "Dynamic compression of swap pages and clean pagecache pages"
- depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && ZSMALLOC=y
+ depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP
select CRYPTO_LZO
default n
help
@@ -9,3 +9,35 @@ config ZCACHE
compression and an in-kernel implementation of transcendent
memory to store clean page cache pages and swap in RAM,
providing a noticeable reduction in disk I/O.
+
+config RAMSTER
+ bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
+ depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y
+ depends on NET
+ # must ensure struct page is 8-byte aligned
+ select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
+ default n
+ help
+ RAMster allows RAM on other machines in a cluster to be utilized
+ dynamically and symmetrically instead of swapping to a local swap
+ disk, thus improving performance on memory-constrained workloads
+ while minimizing total RAM across the cluster. RAMster, like
+ zcache2, compresses swap pages into local RAM, but then remotifies
+ the compressed pages to another node in the RAMster cluster.
+
+# Depends on not-yet-upstreamed mm patches to export end_swap_bio_write and
+# __add_to_swap_cache, and implement __swap_writepage (which is swap_writepage
+# without the frontswap call. When these are in-tree, the dependency on
+# BROKEN can be removed
+config ZCACHE_WRITEBACK
+ bool "Allow compressed swap pages to be writtenback to swap disk"
+ depends on ZCACHE=y && BROKEN
+ default n
+ help
+ Zcache caches compressed swap pages (and other data) in RAM which
+ often improves performance by avoiding I/O's due to swapping.
+ In some workloads with very long-lived large processes, it can
+ instead reduce performance. Writeback decompresses zcache-compressed
+ pages (in LRU order) when under memory pressure and writes them to
+ the backing swap disk to ameliorate this problem. Policy driving
+ writeback is still under development.
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
index 60daa272c204..471104957dad 100644
--- a/drivers/staging/zcache/Makefile
+++ b/drivers/staging/zcache/Makefile
@@ -1,3 +1,6 @@
-zcache-y := zcache-main.o tmem.o
+zcache-y := zcache-main.o tmem.o zbud.o
+zcache-$(CONFIG_RAMSTER) += ramster/ramster.o ramster/r2net.o
+zcache-$(CONFIG_RAMSTER) += ramster/nodemanager.o ramster/tcp.o
+zcache-$(CONFIG_RAMSTER) += ramster/heartbeat.o ramster/masklog.o
obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/TODO b/drivers/staging/zcache/TODO
new file mode 100644
index 000000000000..c1e26d4973dc
--- /dev/null
+++ b/drivers/staging/zcache/TODO
@@ -0,0 +1,69 @@
+
+** ZCACHE PLAN FOR PROMOTION FROM STAGING **
+
+Last updated: Feb 13, 2013
+
+PLAN STEPS
+
+1. merge zcache and ramster to eliminate horrible code duplication
+2. converge on a predictable, writeback-capable allocator
+3. use debugfs instead of sysfs (per akpm feedback in 2011)
+4. zcache side of cleancache/mm WasActive patch
+5. zcache side of frontswap exclusive gets
+6. zcache must be able to writeback to physical swap disk
+ (per Andrea Arcangeli feedback in 2011)
+7. implement adequate policy for writeback
+8. frontswap/cleancache work to allow zcache to be loaded
+ as a module
+9. get core mm developer to review
+10. incorporate feedback from review
+11. get review/acks from 1-2 additional mm developers
+12. incorporate any feedback from additional mm reviews
+13. propose location/file-naming in mm tree
+14. repeat 9-13 as necessary until akpm is happy and merges
+
+STATUS/OWNERSHIP
+
+1. DONE as part of "new" zcache; in staging/zcache for 3.9
+2. DONE as part of "new" zcache (cf zbud.[ch]); in staging/zcache for 3.9
+ (this was the core of the zcache1 vs zcache2 flail)
+3. DONE as part of "new" zcache; in staging/zcache for 3.9
+4. DONE (w/caveats) as part of "new" zcache; per cleancache performance
+ feedback see https://lkml.org/lkml/2011/8/17/351, in
+ staging/zcache for 3.9; dependent on proposed mm patch, see
+ https://lkml.org/lkml/2012/1/25/300
+5. DONE as part of "new" zcache; performance tuning only,
+ in staging/zcache for 3.9; dependent on frontswap patch
+ merged in 3.7 (33c2a174)
+6. DONE (w/caveats), prototyped as part of "new" zcache, had
+ bad memory leak; reimplemented to use sjennings clever tricks
+ and proposed mm patches with new version in staging/zcache
+ for 3.9, see https://lkml.org/lkml/2013/2/6/437;
+7. PROTOTYPED as part of "new" zcache; in staging/zcache for 3.9;
+ needs more review (plan to discuss at LSF/MM 2013)
+8. IN PROGRESS; owned by Konrad Wilk; v2 recently posted
+ http://lkml.org/lkml/2013/2/1/542
+9. IN PROGRESS; owned by Konrad Wilk; Mel Gorman provided
+ great feedback in August 2012 (unfortunately of "old"
+ zcache)
+10. Konrad posted series of fixes (that now need rebasing)
+ https://lkml.org/lkml/2013/2/1/566
+11. NOT DONE; owned by Konrad Wilk
+12. TBD (depends on quantity of feedback)
+13. PROPOSED; one suggestion proposed by Dan; needs more ideas/feedback
+14. TBD (depends on feedback)
+
+WHO NEEDS TO AGREE
+
+Not sure. Seth Jennings is now pursuing a separate but semi-parallel
+track. Akpm clearly has to approve for any mm merge to happen. Minchan
+Kim has interest but may be happy if/when zram is merged into mm. Konrad
+Wilk may be maintainer if akpm decides compression is maintainable
+separately from the rest of mm. (More LSF/MM 2013 discussion.)
+
+ZCACHE FUTURE NEW FUNCTIONALITY
+
+A. Support zsmalloc as an alternative high-density allocator
+ (See https://lkml.org/lkml/2013/1/23/511)
+B. Support zero-filled pages more efficiently
+C. Possibly support three zbuds per pageframe when space allows
diff --git a/drivers/staging/ramster/ramster.h b/drivers/staging/zcache/ramster.h
index 1b71aea2ff62..1b71aea2ff62 100644
--- a/drivers/staging/ramster/ramster.h
+++ b/drivers/staging/zcache/ramster.h
diff --git a/drivers/staging/ramster/ramster/heartbeat.c b/drivers/staging/zcache/ramster/heartbeat.c
index 75d3fe80b055..75d3fe80b055 100644
--- a/drivers/staging/ramster/ramster/heartbeat.c
+++ b/drivers/staging/zcache/ramster/heartbeat.c
diff --git a/drivers/staging/ramster/ramster/heartbeat.h b/drivers/staging/zcache/ramster/heartbeat.h
index 6cbc775bd63b..6cbc775bd63b 100644
--- a/drivers/staging/ramster/ramster/heartbeat.h
+++ b/drivers/staging/zcache/ramster/heartbeat.h
diff --git a/drivers/staging/ramster/ramster/masklog.c b/drivers/staging/zcache/ramster/masklog.c
index 1261d8579aae..1261d8579aae 100644
--- a/drivers/staging/ramster/ramster/masklog.c
+++ b/drivers/staging/zcache/ramster/masklog.c
diff --git a/drivers/staging/ramster/ramster/masklog.h b/drivers/staging/zcache/ramster/masklog.h
index 918ae110b699..918ae110b699 100644
--- a/drivers/staging/ramster/ramster/masklog.h
+++ b/drivers/staging/zcache/ramster/masklog.h
diff --git a/drivers/staging/ramster/ramster/nodemanager.c b/drivers/staging/zcache/ramster/nodemanager.c
index c0f48158735d..c0f48158735d 100644
--- a/drivers/staging/ramster/ramster/nodemanager.c
+++ b/drivers/staging/zcache/ramster/nodemanager.c
diff --git a/drivers/staging/ramster/ramster/nodemanager.h b/drivers/staging/zcache/ramster/nodemanager.h
index 41a04df5842c..41a04df5842c 100644
--- a/drivers/staging/ramster/ramster/nodemanager.h
+++ b/drivers/staging/zcache/ramster/nodemanager.h
diff --git a/drivers/staging/ramster/ramster/r2net.c b/drivers/staging/zcache/ramster/r2net.c
index 34818dc65612..34818dc65612 100644
--- a/drivers/staging/ramster/ramster/r2net.c
+++ b/drivers/staging/zcache/ramster/r2net.c
diff --git a/drivers/staging/ramster/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
index c06709f39682..bf96a1cbf7c1 100644
--- a/drivers/staging/ramster/ramster/ramster.c
+++ b/drivers/staging/zcache/ramster/ramster.c
@@ -67,25 +67,25 @@ static int ramster_remote_target_nodenum __read_mostly = -1;
static long ramster_flnodes;
static atomic_t ramster_flnodes_atomic = ATOMIC_INIT(0);
static unsigned long ramster_flnodes_max;
-static long ramster_foreign_eph_pages;
+static ssize_t ramster_foreign_eph_pages;
static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0);
-static unsigned long ramster_foreign_eph_pages_max;
-static long ramster_foreign_pers_pages;
+static ssize_t ramster_foreign_eph_pages_max;
+static ssize_t ramster_foreign_pers_pages;
static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0);
-static unsigned long ramster_foreign_pers_pages_max;
-static unsigned long ramster_eph_pages_remoted;
-static unsigned long ramster_pers_pages_remoted;
-static unsigned long ramster_eph_pages_remote_failed;
-static unsigned long ramster_pers_pages_remote_failed;
-static unsigned long ramster_remote_eph_pages_succ_get;
-static unsigned long ramster_remote_pers_pages_succ_get;
-static unsigned long ramster_remote_eph_pages_unsucc_get;
-static unsigned long ramster_remote_pers_pages_unsucc_get;
-static unsigned long ramster_pers_pages_remote_nomem;
-static unsigned long ramster_remote_objects_flushed;
-static unsigned long ramster_remote_object_flushes_failed;
-static unsigned long ramster_remote_pages_flushed;
-static unsigned long ramster_remote_page_flushes_failed;
+static ssize_t ramster_foreign_pers_pages_max;
+static ssize_t ramster_eph_pages_remoted;
+static ssize_t ramster_pers_pages_remoted;
+static ssize_t ramster_eph_pages_remote_failed;
+static ssize_t ramster_pers_pages_remote_failed;
+static ssize_t ramster_remote_eph_pages_succ_get;
+static ssize_t ramster_remote_pers_pages_succ_get;
+static ssize_t ramster_remote_eph_pages_unsucc_get;
+static ssize_t ramster_remote_pers_pages_unsucc_get;
+static ssize_t ramster_pers_pages_remote_nomem;
+static ssize_t ramster_remote_objects_flushed;
+static ssize_t ramster_remote_object_flushes_failed;
+static ssize_t ramster_remote_pages_flushed;
+static ssize_t ramster_remote_page_flushes_failed;
/* FIXME frontswap selfshrinking knobs in debugfs? */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/staging/ramster/ramster/ramster.h b/drivers/staging/zcache/ramster/ramster.h
index 12ae56f09ca4..12ae56f09ca4 100644
--- a/drivers/staging/ramster/ramster/ramster.h
+++ b/drivers/staging/zcache/ramster/ramster.h
diff --git a/drivers/staging/ramster/ramster/ramster_nodemanager.h b/drivers/staging/zcache/ramster/ramster_nodemanager.h
index 49f879d943ab..49f879d943ab 100644
--- a/drivers/staging/ramster/ramster/ramster_nodemanager.h
+++ b/drivers/staging/zcache/ramster/ramster_nodemanager.h
diff --git a/drivers/staging/ramster/ramster/tcp.c b/drivers/staging/zcache/ramster/tcp.c
index aa2a1a763aa4..aa2a1a763aa4 100644
--- a/drivers/staging/ramster/ramster/tcp.c
+++ b/drivers/staging/zcache/ramster/tcp.c
diff --git a/drivers/staging/ramster/ramster/tcp.h b/drivers/staging/zcache/ramster/tcp.h
index 9d05833452b5..9d05833452b5 100644
--- a/drivers/staging/ramster/ramster/tcp.h
+++ b/drivers/staging/zcache/ramster/tcp.h
diff --git a/drivers/staging/ramster/ramster/tcp_internal.h b/drivers/staging/zcache/ramster/tcp_internal.h
index 4d8cc9f96fd2..4d8cc9f96fd2 100644
--- a/drivers/staging/ramster/ramster/tcp_internal.h
+++ b/drivers/staging/zcache/ramster/tcp_internal.h
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 56c8e606ad1c..a2b7e03b6062 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -1,32 +1,43 @@
/*
* In-kernel transcendent memory (generic implementation)
*
- * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
*
- * The primary purpose of Transcendent Memory ("tmem") is to map object-oriented
+ * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
* "handles" (triples containing a pool id, and object id, and an index), to
* pages in a page-accessible memory (PAM). Tmem references the PAM pages via
* an abstract "pampd" (PAM page-descriptor), which can be operated on by a
* set of functions (pamops). Each pampd contains some representation of
- * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
- * pages and must be able to insert, find, and delete these pages at a
- * potential frequency of thousands per second concurrently across many CPUs,
- * (and, if used with KVM, across many vcpus across many guests).
- * Tmem is tracked with a hierarchy of data structures, organized by
- * the elements in a handle-tuple: pool_id, object_id, and page index.
- * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
- * Each pool, contains a hash table of rb_trees of tmem_objs. Each
- * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
- * nodes called tmem_objnodes. Each leaf pointer in this tree points to
- * a pampd, which is accessible only through a small set of callbacks
- * registered by the PAM implementation (see tmem_register_pamops). Tmem
- * does all memory allocation via a set of callbacks registered by the tmem
- * host implementation (e.g. see tmem_register_hostops).
+ * PAGE_SIZE bytes worth of data. For those familiar with key-value stores,
+ * the tmem handle is a three-level hierarchical key, and the value is always
+ * reconstituted (but not necessarily stored) as PAGE_SIZE bytes and is
+ * referenced in the datastore by the pampd. The hierarchy is required
+ * to ensure that certain invalidation functions can be performed efficiently
+ * (i.e. flush all indexes associated with this object_id, or
+ * flush all objects associated with this pool).
+ *
+ * Tmem must support potentially millions of pages and must be able to insert,
+ * find, and delete these pages at a potential frequency of thousands per
+ * second concurrently across many CPUs, (and, if used with KVM, across many
+ * vcpus across many guests). Tmem is tracked with a hierarchy of data
+ * structures, organized by the elements in the handle-tuple: pool_id,
+ * object_id, and page index. One or more "clients" (e.g. guests) each
+ * provide one or more tmem_pools. Each pool, contains a hash table of
+ * rb_trees of tmem_objs. Each tmem_obj contains a radix-tree-like tree
+ * of pointers, with intermediate nodes called tmem_objnodes. Each leaf
+ * pointer in this tree points to a pampd, which is accessible only through
+ * a small set of callbacks registered by the PAM implementation (see
+ * tmem_register_pamops). Tmem only needs to memory allocation for objs
+ * and objnodes and this is done via a set of callbacks that must be
+ * registered by the tmem host implementation (e.g. see tmem_register_hostops).
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#ifdef CONFIG_RAMSTER
+#include <linux/delay.h>
+#endif
#include "tmem.h"
@@ -51,7 +62,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
/*
* A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation
+ * callbacks for a page-accessible memory (PAM) implementation.
*/
static struct tmem_pamops tmem_pamops;
@@ -66,15 +77,22 @@ void tmem_register_pamops(struct tmem_pamops *m)
* So an rb_tree is an ideal data structure to manage tmem_objs. But because
* of the potentially huge number of tmem_objs, each pool manages a hashtable
* of rb_trees to reduce search, insert, delete, and rebalancing time.
- * Each hashbucket also has a lock to manage concurrent access.
+ * Each hashbucket also has a lock to manage concurrent access and no
+ * searches, inserts, or deletions can be performed unless the lock is held.
+ * As a result, care must be taken to ensure tmem routines are not called
+ * recursively; the vast majority of the time, a recursive call may work
+ * but a deadlock will occur a small fraction of the time due to the
+ * hashbucket lock.
*
- * The following routines manage tmem_objs. When any tmem_obj is accessed,
- * the hashbucket lock must be held.
+ * The following routines manage tmem_objs. In all of these routines,
+ * the hashbucket lock is already held.
*/
-static struct tmem_obj
-*__tmem_obj_find(struct tmem_hashbucket*hb, struct tmem_oid *oidp,
- struct rb_node **parent, struct rb_node ***link)
+/* Search for object==oid in pool, returns object if found. */
+static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp,
+ struct rb_node **parent,
+ struct rb_node ***link)
{
struct rb_node *_parent = NULL, **rbnode;
struct tmem_obj *obj = NULL;
@@ -101,23 +119,20 @@ static struct tmem_obj
*parent = _parent;
if (link)
*link = rbnode;
-
obj = NULL;
out:
return obj;
}
-
-/* searches for object==oid in pool, returns locked object if found */
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
{
return __tmem_obj_find(hb, oidp, NULL, NULL);
}
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
-/* free an object that has no more pampds in it */
+/* Free an object that has no more pampds in it. */
static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
{
struct tmem_pool *pool;
@@ -128,7 +143,7 @@ static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
pool = obj->pool;
BUG_ON(pool == NULL);
if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, false);
BUG_ON(obj->objnode_tree_root != NULL);
BUG_ON((long)obj->objnode_count != 0);
atomic_dec(&pool->obj_count);
@@ -140,7 +155,7 @@ static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
}
/*
- * initialize, and insert an tmem_object_root (called only if find failed)
+ * Initialize, and insert an tmem_object_root (called only if find failed).
*/
static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
struct tmem_pool *pool,
@@ -157,7 +172,10 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
obj->oid = *oidp;
obj->objnode_count = 0;
obj->pampd_count = 0;
- (*tmem_pamops.new_obj)(obj);
+#ifdef CONFIG_RAMSTER
+ if (tmem_pamops.new_obj != NULL)
+ (*tmem_pamops.new_obj)(obj);
+#endif
SET_SENTINEL(obj, OBJ);
if (__tmem_obj_find(hb, oidp, &parent, &new))
@@ -172,7 +190,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
* "ephemeral" vs "persistent". These attributes apply to all tmem_objs
* and all pampds that belong to a tmem_pool. A tmem_pool is created
* or deleted relatively rarely (for example, when a filesystem is
- * mounted or unmounted.
+ * mounted or unmounted).
*/
/* flush all data from a pool and, optionally, free it */
@@ -190,7 +208,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
while (rbnode != NULL) {
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
rbnode = rb_next(rbnode);
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, true);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
}
@@ -276,7 +294,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
}
/*
- * lookup index in object and return associated pampd (or NULL if not found)
+ * Lookup index in object and return associated pampd (or NULL if not found).
*/
static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
{
@@ -318,8 +336,9 @@ static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
return slot != NULL ? *slot : NULL;
}
+#ifdef CONFIG_RAMSTER
static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
- void *new_pampd)
+ void *new_pampd, bool no_free)
{
struct tmem_objnode **slot;
void *ret = NULL;
@@ -328,11 +347,14 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
if ((slot != NULL) && (*slot != NULL)) {
void *old_pampd = *(void **)slot;
*(void **)slot = new_pampd;
- (*tmem_pamops.free)(old_pampd, obj->pool, NULL, 0);
+ if (!no_free)
+ (*tmem_pamops.free)(old_pampd, obj->pool,
+ NULL, 0, false);
ret = new_pampd;
}
return ret;
}
+#endif
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
void *pampd)
@@ -470,7 +492,7 @@ out:
return slot;
}
-/* recursively walk the objnode_tree destroying pampds and objnodes */
+/* Recursively walk the objnode_tree destroying pampds and objnodes. */
static void tmem_objnode_node_destroy(struct tmem_obj *obj,
struct tmem_objnode *objnode,
unsigned int ht)
@@ -484,7 +506,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
if (ht == 1) {
obj->pampd_count--;
(*tmem_pamops.free)(objnode->slots[i],
- obj->pool, NULL, 0);
+ obj->pool, NULL, 0, true);
objnode->slots[i] = NULL;
continue;
}
@@ -495,13 +517,15 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
}
}
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
+ bool pool_destroy)
{
if (obj->objnode_tree_root == NULL)
return;
if (obj->objnode_tree_height == 0) {
obj->pampd_count--;
- (*tmem_pamops.free)(obj->objnode_tree_root, obj->pool, NULL, 0);
+ (*tmem_pamops.free)(obj->objnode_tree_root,
+ obj->pool, NULL, 0, true);
} else {
tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
obj->objnode_tree_height);
@@ -509,7 +533,10 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
obj->objnode_tree_height = 0;
}
obj->objnode_tree_root = NULL;
- (*tmem_pamops.free_obj)(obj->pool, obj);
+#ifdef CONFIG_RAMSTER
+ if (tmem_pamops.free_obj != NULL)
+ (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
+#endif
}
/*
@@ -522,17 +549,16 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
*/
/*
- * "Put" a page, e.g. copy a page from the kernel into newly allocated
- * PAM space (if such space is available). Tmem_put is complicated by
- * a corner case: What if a page with matching handle already exists in
- * tmem? To guarantee coherency, one of two actions is necessary: Either
- * the data for the page must be overwritten, or the page must be
- * "flushed" so that the data is not accessible to a subsequent "get".
- * Since these "duplicate puts" are relatively rare, this implementation
- * always flushes for simplicity.
+ * "Put" a page, e.g. associate the passed pampd with the passed handle.
+ * Tmem_put is complicated by a corner case: What if a page with matching
+ * handle already exists in tmem? To guarantee coherency, one of two
+ * actions is necessary: Either the data for the page must be overwritten,
+ * or the page must be "flushed" so that the data is not accessible to a
+ * subsequent "get". Since these "duplicate puts" are relatively rare,
+ * this implementation always flushes for simplicity.
*/
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t size, bool raw, bool ephemeral)
+ bool raw, void *pampd_to_use)
{
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
void *pampd = NULL, *pampd_del = NULL;
@@ -548,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
/* if found, is a dup put, flush the old one */
pampd_del = tmem_pampd_delete_from_obj(obj, index);
BUG_ON(pampd_del != pampd);
- (*tmem_pamops.free)(pampd, pool, oidp, index);
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
if (obj->pampd_count == 0) {
objnew = obj;
objfound = NULL;
@@ -565,21 +591,19 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
}
BUG_ON(obj == NULL);
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
- obj->pool, &obj->oid, index);
- if (unlikely(pampd == NULL))
- goto free;
+ pampd = pampd_to_use;
+ BUG_ON(pampd_to_use == NULL);
ret = tmem_pampd_add_to_obj(obj, index, pampd);
if (unlikely(ret == -ENOMEM))
/* may have partially built objnode tree ("stump") */
goto delete_and_free;
+ (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
goto out;
delete_and_free:
(void)tmem_pampd_delete_from_obj(obj, index);
-free:
if (pampd)
- (*tmem_pamops.free)(pampd, pool, NULL, 0);
+ (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
if (objnew) {
tmem_obj_free(objnew, hb);
(*tmem_hostops.obj_free)(objnew, pool);
@@ -589,35 +613,160 @@ out:
return ret;
}
+#ifdef CONFIG_RAMSTER
+/*
+ * For ramster only: The following routines provide a two-step sequence
+ * to allow the caller to replace a pampd in the tmem data structures with
+ * another pampd. Here, we lookup the passed handle and, if found, return the
+ * associated pampd and object, leaving the hashbucket locked and returning
+ * a reference to it. The caller is expected to immediately call the
+ * matching tmem_localify_finish routine which will handles the replacement
+ * and unlocks the hashbucket.
+ */
+void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, struct tmem_obj **ret_obj,
+ void **saved_hb)
+{
+ struct tmem_hashbucket *hb;
+ struct tmem_obj *obj = NULL;
+ void *pampd = NULL;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (likely(obj != NULL))
+ pampd = tmem_pampd_lookup_in_obj(obj, index);
+ *ret_obj = obj;
+ *saved_hb = (void *)hb;
+ /* note, hashbucket remains locked */
+ return pampd;
+}
+
+void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
+ void *pampd, void *saved_hb, bool delete)
+{
+ struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
+
+ BUG_ON(!spin_is_locked(&hb->lock));
+ if (pampd != NULL) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
+ (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
+ } else if (delete) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_delete_from_obj(obj, index);
+ }
+ spin_unlock(&hb->lock);
+}
+
+/*
+ * For ramster only. Helper function to support asynchronous tmem_get.
+ */
+static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
+ struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, bool free, char *data)
+{
+ void *old_pampd = *ppampd, *new_pampd = NULL;
+ bool intransit = false;
+ int ret = 0;
+
+ if (!is_ephemeral(pool))
+ new_pampd = (*tmem_pamops.repatriate_preload)(
+ old_pampd, pool, oidp, index, &intransit);
+ if (intransit)
+ ret = -EAGAIN;
+ else if (new_pampd != NULL)
+ *ppampd = new_pampd;
+ /* must release the hb->lock else repatriate can't sleep */
+ spin_unlock(&hb->lock);
+ if (!intransit)
+ ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
+ oidp, index, free, data);
+ if (ret == -EAGAIN) {
+ /* rare I think, but should cond_resched()??? */
+ usleep_range(10, 1000);
+ } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
+ ret = -1;
+ } else if (ret != 0 && ret != -ENOENT) {
+ ret = -1;
+ }
+ /* note hb->lock has now been unlocked */
+ return ret;
+}
+
/*
- * "Get" a page, e.g. if one can be found, copy the tmem page with the
- * matching handle from PAM space to the kernel. By tmem definition,
- * when a "get" is successful on an ephemeral page, the page is "flushed",
- * and when a "get" is successful on a persistent page, the page is retained
- * in tmem. Note that to preserve
+ * For ramster only. If a page in tmem matches the handle, replace the
+ * page so that any subsequent "get" gets the new page. Returns 0 if
+ * there was a page to replace, else returns -1.
+ */
+int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, void *new_pampd)
+{
+ struct tmem_obj *obj;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
+ /* if we bug here, pamops wasn't properly set up for ramster */
+ BUG_ON(tmem_pamops.replace_in_obj == NULL);
+ ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+#endif
+
+/*
+ * "Get" a page, e.g. if a pampd can be found matching the passed handle,
+ * use a pamops callback to recreated the page from the pampd with the
+ * matching handle. By tmem definition, when a "get" is successful on
+ * an ephemeral page, the page is "flushed", and when a "get" is successful
+ * on a persistent page, the page is retained in tmem. Note that to preserve
* coherency, "get" can never be skipped if tmem contains the data.
* That is, if a get is done with a certain handle and fails, any
* subsequent "get" must also fail (unless of course there is a
* "put" done with the same handle).
-
*/
int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t *size, bool raw, int get_and_free)
+ char *data, size_t *sizep, bool raw, int get_and_free)
{
struct tmem_obj *obj;
- void *pampd;
+ void *pampd = NULL;
bool ephemeral = is_ephemeral(pool);
int ret = -1;
struct tmem_hashbucket *hb;
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
bool lock_held = false;
+ void **ppampd;
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- lock_held = true;
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
+ do {
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ lock_held = true;
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ ppampd = __tmem_pampd_lookup_in_obj(obj, index);
+ if (ppampd == NULL)
+ goto out;
+#ifdef CONFIG_RAMSTER
+ if ((tmem_pamops.is_remote != NULL) &&
+ tmem_pamops.is_remote(*ppampd)) {
+ ret = tmem_repatriate(ppampd, hb, pool, oidp,
+ index, free, data);
+ /* tmem_repatriate releases hb->lock */
+ lock_held = false;
+ *sizep = PAGE_SIZE;
+ if (ret != -EAGAIN)
+ goto out;
+ }
+#endif
+ } while (ret == -EAGAIN);
if (free)
pampd = tmem_pampd_delete_from_obj(obj, index);
else
@@ -631,16 +780,12 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
obj = NULL;
}
}
- if (tmem_pamops.is_remote(pampd)) {
- lock_held = false;
- spin_unlock(&hb->lock);
- }
if (free)
ret = (*tmem_pamops.get_data_and_free)(
- data, size, raw, pampd, pool, oidp, index);
+ data, sizep, raw, pampd, pool, oidp, index);
else
ret = (*tmem_pamops.get_data)(
- data, size, raw, pampd, pool, oidp, index);
+ data, sizep, raw, pampd, pool, oidp, index);
if (ret < 0)
goto out;
ret = 0;
@@ -671,7 +816,7 @@ int tmem_flush_page(struct tmem_pool *pool,
pampd = tmem_pampd_delete_from_obj(obj, index);
if (pampd == NULL)
goto out;
- (*tmem_pamops.free)(pampd, pool, oidp, index);
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
if (obj->pampd_count == 0) {
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
@@ -684,30 +829,6 @@ out:
}
/*
- * If a page in tmem matches the handle, replace the page so that any
- * subsequent "get" gets the new page. Returns 0 if
- * there was a page to replace, else returns -1.
- */
-int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, void *new_pampd)
-{
- struct tmem_obj *obj;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd);
- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
* "Flush" all pages in tmem matching this oid.
*/
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
@@ -721,7 +842,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
- tmem_pampd_destroy_all_in_obj(obj);
+ tmem_pampd_destroy_all_in_obj(obj, false);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
ret = 0;
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index 0d4aa82706b3..adbe5a8f28aa 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -3,7 +3,7 @@
*
* Transcendent memory
*
- * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
*/
#ifndef _TMEM_H_
@@ -15,12 +15,8 @@
#include <linux/atomic.h>
/*
- * These are pre-defined by the Xen<->Linux ABI
+ * These are defined by the Xen<->Linux ABI so should remain consistent
*/
-#define TMEM_PUT_PAGE 4
-#define TMEM_GET_PAGE 5
-#define TMEM_FLUSH_PAGE 6
-#define TMEM_FLUSH_OBJECT 7
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
#define TMEM_POOL_PRECOMPRESSED 4
@@ -32,7 +28,7 @@
* sentinels have proven very useful for debugging but can be removed
* or disabled before final merge.
*/
-#define SENTINELS
+#undef SENTINELS
#ifdef SENTINELS
#define DECL_SENTINEL uint32_t sentinel;
#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
@@ -130,6 +126,34 @@ static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
TMEM_HASH_BUCKET_BITS);
}
+#ifdef CONFIG_RAMSTER
+struct tmem_xhandle {
+ uint8_t client_id;
+ uint8_t xh_data_cksum;
+ uint16_t xh_data_size;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ void *extra;
+};
+
+static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp,
+ uint32_t index)
+{
+ struct tmem_xhandle xh;
+ xh.client_id = client_id;
+ xh.xh_data_cksum = (uint8_t)-1;
+ xh.xh_data_size = (uint16_t)-1;
+ xh.pool_id = pool->pool_id;
+ xh.oid = *oidp;
+ xh.index = index;
+ return xh;
+}
+#endif
+
+
/*
* A tmem_obj contains an identifier (oid), pointers to the parent
* pool and the rb_tree to which it belongs, counters, and an ordered
@@ -147,7 +171,15 @@ struct tmem_obj {
unsigned int objnode_tree_height;
unsigned long objnode_count;
long pampd_count;
+#ifdef CONFIG_RAMSTER
+ /*
+ * for current design of ramster, all pages belonging to
+ * an object reside on the same remotenode and extra is
+ * used to record the number of the remotenode so a
+ * flush-object operation can specify it
+ */
void *extra; /* for private use by pampd implementation */
+#endif
DECL_SENTINEL
};
@@ -165,20 +197,34 @@ struct tmem_objnode {
unsigned int slots_in_use;
};
+struct tmem_handle {
+ struct tmem_oid oid; /* 24 bytes */
+ uint32_t index;
+ uint16_t pool_id;
+ uint16_t client_id;
+};
+
+
/* pampd abstract datatype methods provided by the PAM implementation */
struct tmem_pamops {
- void *(*create)(char *, size_t, bool, int,
- struct tmem_pool *, struct tmem_oid *, uint32_t);
+ void (*create_finish)(void *, bool);
int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
struct tmem_oid *, uint32_t);
int (*get_data_and_free)(char *, size_t *, bool, void *,
struct tmem_pool *, struct tmem_oid *,
uint32_t);
- void (*free)(void *, struct tmem_pool *, struct tmem_oid *, uint32_t);
- void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
- bool (*is_remote)(void *);
+ void (*free)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool);
+#ifdef CONFIG_RAMSTER
void (*new_obj)(struct tmem_obj *);
+ void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
+ void *(*repatriate_preload)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool *);
+ int (*repatriate)(void *, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool, void *);
+ bool (*is_remote)(void *);
int (*replace_in_obj)(void *, struct tmem_obj *);
+#endif
};
extern void tmem_register_pamops(struct tmem_pamops *m);
@@ -193,14 +239,21 @@ extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- char *, size_t, bool, bool);
+ bool, void *);
extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
char *, size_t *, bool, int);
-extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- void *);
extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
uint32_t index);
extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
extern int tmem_destroy_pool(struct tmem_pool *);
extern void tmem_new_pool(struct tmem_pool *, uint32_t);
+#ifdef CONFIG_RAMSTER
+extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ void *);
+extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index, struct tmem_obj **,
+ void **);
+extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
+ void *, void *, bool);
+#endif
#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/zbud.c b/drivers/staging/zcache/zbud.c
index a7c436127aa1..fdff5c6a0239 100644
--- a/drivers/staging/ramster/zbud.c
+++ b/drivers/staging/zcache/zbud.c
@@ -103,8 +103,8 @@ struct zbudpage {
struct {
unsigned long space_for_flags;
struct {
- unsigned zbud0_size:12;
- unsigned zbud1_size:12;
+ unsigned zbud0_size:PAGE_SHIFT;
+ unsigned zbud1_size:PAGE_SHIFT;
unsigned unevictable:2;
};
struct list_head budlist;
@@ -112,6 +112,9 @@ struct zbudpage {
};
};
};
+#if (PAGE_SHIFT * 2) + 2 > BITS_PER_LONG
+#error "zbud won't work for this arch, PAGE_SIZE is too large"
+#endif
struct zbudref {
union {
@@ -278,26 +281,26 @@ static inline char *zbud_data(void *zbpg,
* debugfs viewers, some of these should also be atomic_long_t, but
* I don't know how to expose atomics via debugfs either...
*/
-static unsigned long zbud_eph_pageframes;
-static unsigned long zbud_pers_pageframes;
-static unsigned long zbud_eph_zpages;
-static unsigned long zbud_pers_zpages;
+static ssize_t zbud_eph_pageframes;
+static ssize_t zbud_pers_pageframes;
+static ssize_t zbud_eph_zpages;
+static ssize_t zbud_pers_zpages;
static u64 zbud_eph_zbytes;
static u64 zbud_pers_zbytes;
-static unsigned long zbud_eph_evicted_pageframes;
-static unsigned long zbud_pers_evicted_pageframes;
-static unsigned long zbud_eph_cumul_zpages;
-static unsigned long zbud_pers_cumul_zpages;
+static ssize_t zbud_eph_evicted_pageframes;
+static ssize_t zbud_pers_evicted_pageframes;
+static ssize_t zbud_eph_cumul_zpages;
+static ssize_t zbud_pers_cumul_zpages;
static u64 zbud_eph_cumul_zbytes;
static u64 zbud_pers_cumul_zbytes;
-static unsigned long zbud_eph_cumul_chunk_counts[NCHUNKS];
-static unsigned long zbud_pers_cumul_chunk_counts[NCHUNKS];
-static unsigned long zbud_eph_buddied_count;
-static unsigned long zbud_pers_buddied_count;
-static unsigned long zbud_eph_unbuddied_count;
-static unsigned long zbud_pers_unbuddied_count;
-static unsigned long zbud_eph_zombie_count;
-static unsigned long zbud_pers_zombie_count;
+static ssize_t zbud_eph_cumul_chunk_counts[NCHUNKS];
+static ssize_t zbud_pers_cumul_chunk_counts[NCHUNKS];
+static ssize_t zbud_eph_buddied_count;
+static ssize_t zbud_pers_buddied_count;
+static ssize_t zbud_eph_unbuddied_count;
+static ssize_t zbud_pers_unbuddied_count;
+static ssize_t zbud_eph_zombie_count;
+static ssize_t zbud_pers_zombie_count;
static atomic_t zbud_eph_zombie_atomic;
static atomic_t zbud_pers_zombie_atomic;
@@ -401,7 +404,7 @@ static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
else
zbud_pers_pageframes--;
zbudpage_spin_unlock(zbudpage);
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
init_page_count(page);
page->index = 0;
return page;
@@ -1044,7 +1047,7 @@ out:
return ret;
}
-void __init zbud_init(void)
+void zbud_init(void)
{
int i;
diff --git a/drivers/staging/ramster/zbud.h b/drivers/staging/zcache/zbud.h
index 891e8a7d5aa5..891e8a7d5aa5 100644
--- a/drivers/staging/ramster/zbud.h
+++ b/drivers/staging/zcache/zbud.h
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 52b43b7b83d7..328898ea76c3 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1,23 +1,15 @@
/*
* zcache.c
*
- * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
* Copyright (c) 2010,2011, Nitin Gupta
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
- * and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing the crypto compression
- * API:
- * 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) zsmalloc is used for persistent pages.
- * Xvmalloc (based on the TLSF allocator) has very low fragmentation
- * so maximizes space efficiency, while zbud allows pairs (and potentially,
- * in the future, more than a pair of) compressed pages to be closely linked
- * so that reclaiming can be done via the kernel's physical-page-oriented
- * "shrinker" interface.
- *
- * [1] For a definition of page-accessible memory (aka PAM), see:
- * http://marc.info/?l=linux-mm&m=127811271605009
+ * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
+ * lzo1x compression to improve density and an embedded allocator called
+ * "zbud" which "buddies" two compressed pages semi-optimally in each physical
+ * pageframe. Zbud is integrally tied into tmem to allow pageframes to
+ * be "reclaimed" efficiently.
*/
#include <linux/module.h>
@@ -30,70 +22,62 @@
#include <linux/atomic.h>
#include <linux/math64.h>
#include <linux/crypto.h>
-#include <linux/string.h>
-#include <linux/idr.h>
-#include "tmem.h"
-
-#include "../zsmalloc/zsmalloc.h"
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
-#ifdef CONFIG_CLEANCACHE
#include <linux/cleancache.h>
-#endif
-#ifdef CONFIG_FRONTSWAP
#include <linux/frontswap.h>
-#endif
-
-#if 0
-/* this is more aggressive but may cause other problems? */
-#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
+#include "tmem.h"
+#include "zcache.h"
+#include "zbud.h"
+#include "ramster.h"
+#ifdef CONFIG_RAMSTER
+static int ramster_enabled;
#else
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
+#define ramster_enabled 0
#endif
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-MODULE_LICENSE("GPL");
-
-struct zcache_client {
- struct idr tmem_pools;
- struct zs_pool *zspool;
- bool allocated;
- atomic_t refcount;
-};
-
-static struct zcache_client zcache_host;
-static struct zcache_client zcache_clients[MAX_CLIENTS];
-
-static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
+#ifndef __PG_WAS_ACTIVE
+static inline bool PageWasActive(struct page *page)
{
- BUG_ON(cli == NULL);
- if (cli == &zcache_host)
- return LOCAL_CLIENT;
- return cli - &zcache_clients[0];
+ return true;
}
-static struct zcache_client *get_zcache_client(uint16_t cli_id)
+static inline void SetPageWasActive(struct page *page)
{
- if (cli_id == LOCAL_CLIENT)
- return &zcache_host;
-
- if ((unsigned int)cli_id < MAX_CLIENTS)
- return &zcache_clients[cli_id];
-
- return NULL;
}
+#endif
-static inline bool is_local_client(struct zcache_client *cli)
+#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
+static bool frontswap_has_exclusive_gets __read_mostly = true;
+#else
+static bool frontswap_has_exclusive_gets __read_mostly;
+static inline void frontswap_tmem_exclusive_gets(bool b)
{
- return cli == &zcache_host;
}
+#endif
+
+/* enable (or fix code) when Seth's patches are accepted upstream */
+#define zcache_writeback_enabled 0
+
+static int zcache_enabled __read_mostly;
+static int disable_cleancache __read_mostly;
+static int disable_frontswap __read_mostly;
+static int disable_frontswap_ignore_nonactive __read_mostly;
+static int disable_cleancache_ignore_nonactive __read_mostly;
+static char *namestr __read_mostly = "zcache";
+
+#define ZCACHE_GFP_MASK \
+ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
+
+MODULE_LICENSE("GPL");
/* crypto API for zcache */
#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
-static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
-static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
enum comp_op {
ZCACHE_COMPOP_COMPRESS,
@@ -105,7 +89,7 @@ static inline int zcache_comp_op(enum comp_op op,
u8 *dst, unsigned int *dlen)
{
struct crypto_comp *tfm;
- int ret;
+ int ret = -1;
BUG_ON(!zcache_comp_pcpu_tfms);
tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
@@ -124,816 +108,247 @@ static inline int zcache_comp_op(enum comp_op op,
return ret;
}
-/**********
- * Compression buddies ("zbud") provides for packing two (or, possibly
- * in the future, more) compressed ephemeral pages into a single "raw"
- * (physical) page and tracking them with data structures so that
- * the raw pages can be easily reclaimed.
- *
- * A zbud page ("zbpg") is an aligned page containing a list_head,
- * a lock, and two "zbud headers". The remainder of the physical
- * page is divided up into aligned 64-byte "chunks" which contain
- * the compressed data for zero, one, or two zbuds. Each zbpg
- * resides on: (1) an "unused list" if it has no zbuds; (2) a
- * "buddied" list if it is fully populated with two zbuds; or
- * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
- * the one unbuddied zbud uses. The data inside a zbpg cannot be
- * read or written unless the zbpg's lock is held.
- */
-
-#define ZBH_SENTINEL 0x43214321
-#define ZBPG_SENTINEL 0xdeadbeef
-
-#define ZBUD_MAX_BUDS 2
-
-struct zbud_hdr {
- uint16_t client_id;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- uint16_t size; /* compressed size in bytes, zero means unused */
- DECL_SENTINEL
-};
-
-struct zbud_page {
- struct list_head bud_list;
- spinlock_t lock;
- struct zbud_hdr buddy[ZBUD_MAX_BUDS];
- DECL_SENTINEL
- /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
-};
-
-#define CHUNK_SHIFT 6
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define CHUNK_MASK (~(CHUNK_SIZE-1))
-#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
- CHUNK_MASK) >> CHUNK_SHIFT)
-#define MAX_CHUNK (NCHUNKS-1)
-
-static struct {
- struct list_head list;
- unsigned count;
-} zbud_unbuddied[NCHUNKS];
-/* list N contains pages with N chunks USED and NCHUNKS-N unused */
-/* element 0 is never used but optimizing that isn't worth it */
-static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
-
-struct list_head zbud_buddied_list;
-static unsigned long zcache_zbud_buddied_count;
-
-/* protects the buddied list and all unbuddied lists */
-static DEFINE_SPINLOCK(zbud_budlists_spinlock);
-
-static LIST_HEAD(zbpg_unused_list);
-static unsigned long zcache_zbpg_unused_list_count;
-
-/* protects the unused page list */
-static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
-
-static atomic_t zcache_zbud_curr_raw_pages;
-static atomic_t zcache_zbud_curr_zpages;
-static unsigned long zcache_zbud_curr_zbytes;
-static unsigned long zcache_zbud_cumul_zpages;
-static unsigned long zcache_zbud_cumul_zbytes;
-static unsigned long zcache_compress_poor;
-static unsigned long zcache_mean_compress_poor;
-
-/* forward references */
-static void *zcache_get_free_page(void);
-static void zcache_free_page(void *p);
-
-/*
- * zbud helper functions
- */
-
-static inline unsigned zbud_max_buddy_size(void)
-{
- return MAX_CHUNK << CHUNK_SHIFT;
-}
-
-static inline unsigned zbud_size_to_chunks(unsigned size)
-{
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-static inline int zbud_budnum(struct zbud_hdr *zh)
-{
- unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
- struct zbud_page *zbpg = NULL;
- unsigned budnum = -1U;
- int i;
-
- for (i = 0; i < ZBUD_MAX_BUDS; i++)
- if (offset == offsetof(typeof(*zbpg), buddy[i])) {
- budnum = i;
- break;
- }
- BUG_ON(budnum == -1U);
- return budnum;
-}
-
-static char *zbud_data(struct zbud_hdr *zh, unsigned size)
-{
- struct zbud_page *zbpg;
- char *p;
- unsigned budnum;
-
- ASSERT_SENTINEL(zh, ZBH);
- budnum = zbud_budnum(zh);
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
- ASSERT_SPINLOCK(&zbpg->lock);
- p = (char *)zbpg;
- if (budnum == 0)
- p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
- CHUNK_MASK);
- else if (budnum == 1)
- p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
- return p;
-}
-
-/*
- * zbud raw page management
- */
-
-static struct zbud_page *zbud_alloc_raw_page(void)
-{
- struct zbud_page *zbpg = NULL;
- struct zbud_hdr *zh0, *zh1;
- bool recycled = 0;
-
- /* if any pages on the zbpg list, use one */
- spin_lock(&zbpg_unused_list_spinlock);
- if (!list_empty(&zbpg_unused_list)) {
- zbpg = list_first_entry(&zbpg_unused_list,
- struct zbud_page, bud_list);
- list_del_init(&zbpg->bud_list);
- zcache_zbpg_unused_list_count--;
- recycled = 1;
- }
- spin_unlock(&zbpg_unused_list_spinlock);
- if (zbpg == NULL)
- /* none on zbpg list, try to get a kernel page */
- zbpg = zcache_get_free_page();
- if (likely(zbpg != NULL)) {
- INIT_LIST_HEAD(&zbpg->bud_list);
- zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
- spin_lock_init(&zbpg->lock);
- if (recycled) {
- ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
- SET_SENTINEL(zbpg, ZBPG);
- BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
- BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
- } else {
- atomic_inc(&zcache_zbud_curr_raw_pages);
- INIT_LIST_HEAD(&zbpg->bud_list);
- SET_SENTINEL(zbpg, ZBPG);
- zh0->size = 0; zh1->size = 0;
- tmem_oid_set_invalid(&zh0->oid);
- tmem_oid_set_invalid(&zh1->oid);
- }
- }
- return zbpg;
-}
-
-static void zbud_free_raw_page(struct zbud_page *zbpg)
-{
- struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
-
- ASSERT_SENTINEL(zbpg, ZBPG);
- BUG_ON(!list_empty(&zbpg->bud_list));
- ASSERT_SPINLOCK(&zbpg->lock);
- BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
- BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
- INVERT_SENTINEL(zbpg, ZBPG);
- spin_unlock(&zbpg->lock);
- spin_lock(&zbpg_unused_list_spinlock);
- list_add(&zbpg->bud_list, &zbpg_unused_list);
- zcache_zbpg_unused_list_count++;
- spin_unlock(&zbpg_unused_list_spinlock);
-}
-
-/*
- * core zbud handling routines
- */
-
-static unsigned zbud_free(struct zbud_hdr *zh)
-{
- unsigned size;
-
- ASSERT_SENTINEL(zh, ZBH);
- BUG_ON(!tmem_oid_valid(&zh->oid));
- size = zh->size;
- BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
- zh->size = 0;
- tmem_oid_set_invalid(&zh->oid);
- INVERT_SENTINEL(zh, ZBH);
- zcache_zbud_curr_zbytes -= size;
- atomic_dec(&zcache_zbud_curr_zpages);
- return size;
-}
-
-static void zbud_free_and_delist(struct zbud_hdr *zh)
-{
- unsigned chunks;
- struct zbud_hdr *zh_other;
- unsigned budnum = zbud_budnum(zh), size;
- struct zbud_page *zbpg =
- container_of(zh, struct zbud_page, buddy[budnum]);
-
- spin_lock(&zbud_budlists_spinlock);
- spin_lock(&zbpg->lock);
- if (list_empty(&zbpg->bud_list)) {
- /* ignore zombie page... see zbud_evict_pages() */
- spin_unlock(&zbpg->lock);
- spin_unlock(&zbud_budlists_spinlock);
- return;
- }
- size = zbud_free(zh);
- ASSERT_SPINLOCK(&zbpg->lock);
- zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
- if (zh_other->size == 0) { /* was unbuddied: unlist and free */
- chunks = zbud_size_to_chunks(size) ;
- BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
- list_del_init(&zbpg->bud_list);
- zbud_unbuddied[chunks].count--;
- spin_unlock(&zbud_budlists_spinlock);
- zbud_free_raw_page(zbpg);
- } else { /* was buddied: move remaining buddy to unbuddied list */
- chunks = zbud_size_to_chunks(zh_other->size) ;
- list_del_init(&zbpg->bud_list);
- zcache_zbud_buddied_count--;
- list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
- zbud_unbuddied[chunks].count++;
- spin_unlock(&zbud_budlists_spinlock);
- spin_unlock(&zbpg->lock);
- }
-}
-
-static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
- struct tmem_oid *oid,
- uint32_t index, struct page *page,
- void *cdata, unsigned size)
-{
- struct zbud_hdr *zh0, *zh1, *zh = NULL;
- struct zbud_page *zbpg = NULL, *ztmp;
- unsigned nchunks;
- char *to;
- int i, found_good_buddy = 0;
-
- nchunks = zbud_size_to_chunks(size) ;
- for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
- spin_lock(&zbud_budlists_spinlock);
- if (!list_empty(&zbud_unbuddied[i].list)) {
- list_for_each_entry_safe(zbpg, ztmp,
- &zbud_unbuddied[i].list, bud_list) {
- if (spin_trylock(&zbpg->lock)) {
- found_good_buddy = i;
- goto found_unbuddied;
- }
- }
- }
- spin_unlock(&zbud_budlists_spinlock);
- }
- /* didn't find a good buddy, try allocating a new page */
- zbpg = zbud_alloc_raw_page();
- if (unlikely(zbpg == NULL))
- goto out;
- /* ok, have a page, now compress the data before taking locks */
- spin_lock(&zbud_budlists_spinlock);
- spin_lock(&zbpg->lock);
- list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
- zbud_unbuddied[nchunks].count++;
- zh = &zbpg->buddy[0];
- goto init_zh;
-
-found_unbuddied:
- ASSERT_SPINLOCK(&zbpg->lock);
- zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
- BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
- if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
- ASSERT_SENTINEL(zh0, ZBH);
- zh = zh1;
- } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
- ASSERT_SENTINEL(zh1, ZBH);
- zh = zh0;
- } else
- BUG();
- list_del_init(&zbpg->bud_list);
- zbud_unbuddied[found_good_buddy].count--;
- list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
- zcache_zbud_buddied_count++;
-
-init_zh:
- SET_SENTINEL(zh, ZBH);
- zh->size = size;
- zh->index = index;
- zh->oid = *oid;
- zh->pool_id = pool_id;
- zh->client_id = client_id;
- to = zbud_data(zh, size);
- memcpy(to, cdata, size);
- spin_unlock(&zbpg->lock);
- spin_unlock(&zbud_budlists_spinlock);
-
- zbud_cumul_chunk_counts[nchunks]++;
- atomic_inc(&zcache_zbud_curr_zpages);
- zcache_zbud_cumul_zpages++;
- zcache_zbud_curr_zbytes += size;
- zcache_zbud_cumul_zbytes += size;
-out:
- return zh;
-}
-
-static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
-{
- struct zbud_page *zbpg;
- unsigned budnum = zbud_budnum(zh);
- unsigned int out_len = PAGE_SIZE;
- char *to_va, *from_va;
- unsigned size;
- int ret = 0;
-
- zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
- spin_lock(&zbpg->lock);
- if (list_empty(&zbpg->bud_list)) {
- /* ignore zombie page... see zbud_evict_pages() */
- ret = -EINVAL;
- goto out;
- }
- ASSERT_SENTINEL(zh, ZBH);
- BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
- to_va = kmap_atomic(page);
- size = zh->size;
- from_va = zbud_data(zh, size);
- ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
- to_va, &out_len);
- BUG_ON(ret);
- BUG_ON(out_len != PAGE_SIZE);
- kunmap_atomic(to_va);
-out:
- spin_unlock(&zbpg->lock);
- return ret;
-}
-
/*
- * The following routines handle shrinking of ephemeral pages by evicting
- * pages "least valuable" first.
+ * policy parameters
*/
-static unsigned long zcache_evicted_raw_pages;
-static unsigned long zcache_evicted_buddied_pages;
-static unsigned long zcache_evicted_unbuddied_pages;
-
-static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
- uint16_t poolid);
-static void zcache_put_pool(struct tmem_pool *pool);
-
-/*
- * Flush and free all zbuds in a zbpg, then free the pageframe
- */
-static void zbud_evict_zbpg(struct zbud_page *zbpg)
-{
- struct zbud_hdr *zh;
- int i, j;
- uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
- uint32_t index[ZBUD_MAX_BUDS];
- struct tmem_oid oid[ZBUD_MAX_BUDS];
- struct tmem_pool *pool;
-
- ASSERT_SPINLOCK(&zbpg->lock);
- BUG_ON(!list_empty(&zbpg->bud_list));
- for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
- zh = &zbpg->buddy[i];
- if (zh->size) {
- client_id[j] = zh->client_id;
- pool_id[j] = zh->pool_id;
- oid[j] = zh->oid;
- index[j] = zh->index;
- j++;
- zbud_free(zh);
- }
- }
- spin_unlock(&zbpg->lock);
- for (i = 0; i < j; i++) {
- pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
- if (pool != NULL) {
- tmem_flush_page(pool, &oid[i], index[i]);
- zcache_put_pool(pool);
- }
- }
- ASSERT_SENTINEL(zbpg, ZBPG);
- spin_lock(&zbpg->lock);
- zbud_free_raw_page(zbpg);
-}
-
-/*
- * Free nr pages. This code is funky because we want to hold the locks
- * protecting various lists for as short a time as possible, and in some
- * circumstances the list may change asynchronously when the list lock is
- * not held. In some cases we also trylock not only to avoid waiting on a
- * page in use by another cpu, but also to avoid potential deadlock due to
- * lock inversion.
- */
-static void zbud_evict_pages(int nr)
-{
- struct zbud_page *zbpg;
- int i;
-
- /* first try freeing any pages on unused list */
-retry_unused_list:
- spin_lock_bh(&zbpg_unused_list_spinlock);
- if (!list_empty(&zbpg_unused_list)) {
- /* can't walk list here, since it may change when unlocked */
- zbpg = list_first_entry(&zbpg_unused_list,
- struct zbud_page, bud_list);
- list_del_init(&zbpg->bud_list);
- zcache_zbpg_unused_list_count--;
- atomic_dec(&zcache_zbud_curr_raw_pages);
- spin_unlock_bh(&zbpg_unused_list_spinlock);
- zcache_free_page(zbpg);
- zcache_evicted_raw_pages++;
- if (--nr <= 0)
- goto out;
- goto retry_unused_list;
- }
- spin_unlock_bh(&zbpg_unused_list_spinlock);
-
- /* now try freeing unbuddied pages, starting with least space avail */
- for (i = 0; i < MAX_CHUNK; i++) {
-retry_unbud_list_i:
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_unbuddied[i].list)) {
- spin_unlock_bh(&zbud_budlists_spinlock);
- continue;
- }
- list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue;
- list_del_init(&zbpg->bud_list);
- zbud_unbuddied[i].count--;
- spin_unlock(&zbud_budlists_spinlock);
- zcache_evicted_unbuddied_pages++;
- /* want budlists unlocked when doing zbpg eviction */
- zbud_evict_zbpg(zbpg);
- local_bh_enable();
- if (--nr <= 0)
- goto out;
- goto retry_unbud_list_i;
- }
- spin_unlock_bh(&zbud_budlists_spinlock);
- }
-
- /* as a last resort, free buddied pages */
-retry_bud_list:
- spin_lock_bh(&zbud_budlists_spinlock);
- if (list_empty(&zbud_buddied_list)) {
- spin_unlock_bh(&zbud_budlists_spinlock);
- goto out;
- }
- list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
- if (unlikely(!spin_trylock(&zbpg->lock)))
- continue;
- list_del_init(&zbpg->bud_list);
- zcache_zbud_buddied_count--;
- spin_unlock(&zbud_budlists_spinlock);
- zcache_evicted_buddied_pages++;
- /* want budlists unlocked when doing zbpg eviction */
- zbud_evict_zbpg(zbpg);
- local_bh_enable();
- if (--nr <= 0)
- goto out;
- goto retry_bud_list;
- }
- spin_unlock_bh(&zbud_budlists_spinlock);
-out:
- return;
-}
-
-static void __init zbud_init(void)
-{
- int i;
-
- INIT_LIST_HEAD(&zbud_buddied_list);
-
- for (i = 0; i < NCHUNKS; i++)
- INIT_LIST_HEAD(&zbud_unbuddied[i].list);
-}
-
-#ifdef CONFIG_SYSFS
-/*
- * These sysfs routines show a nice distribution of how many zbpg's are
- * currently (and have ever been placed) in each unbuddied list. It's fun
- * to watch but can probably go away before final merge.
- */
-static int zbud_show_unbuddied_list_counts(char *buf)
-{
- int i;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++)
- p += sprintf(p, "%u ", zbud_unbuddied[i].count);
- return p - buf;
-}
-
-static int zbud_show_cumul_chunk_counts(char *buf)
-{
- unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
- unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
- unsigned long total_chunks_lte_42 = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
- chunks += zbud_cumul_chunk_counts[i];
- total_chunks += zbud_cumul_chunk_counts[i];
- sum_total_chunks += i * zbud_cumul_chunk_counts[i];
- if (i == 21)
- total_chunks_lte_21 = total_chunks;
- if (i == 32)
- total_chunks_lte_32 = total_chunks;
- if (i == 42)
- total_chunks_lte_42 = total_chunks;
- }
- p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
- total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
-#endif
-
-/**********
- * This "zv" PAM implementation combines the slab-based zsmalloc
- * with the crypto compression API to maximize the amount of data that can
- * be packed into a physical page.
- *
- * Zv represents a PAM page with the index and object (plus a "size" value
- * necessary for decompression) immediately preceding the compressed data.
- */
-
-#define ZVH_SENTINEL 0x43214321
-
-struct zv_hdr {
- uint32_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- size_t size;
- DECL_SENTINEL
-};
-
-/* rudimentary policy limits */
-/* total number of persistent pages may not exceed this percentage */
-static unsigned int zv_page_count_policy_percent = 75;
/*
* byte count defining poor compression; pages with greater zsize will be
* rejected
*/
-static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
+static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
/*
* byte count defining poor *mean* compression; pages with greater zsize
* will be rejected until sufficient better-compressed pages are accepted
* driving the mean below this threshold
*/
-static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
-
-static atomic_t zv_curr_dist_counts[NCHUNKS];
-static atomic_t zv_cumul_dist_counts[NCHUNKS];
+static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
-static unsigned long zv_create(struct zs_pool *pool, uint32_t pool_id,
- struct tmem_oid *oid, uint32_t index,
- void *cdata, unsigned clen)
-{
- struct zv_hdr *zv;
- u32 size = clen + sizeof(struct zv_hdr);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- unsigned long handle = 0;
-
- BUG_ON(!irqs_disabled());
- BUG_ON(chunks >= NCHUNKS);
- handle = zs_malloc(pool, size);
- if (!handle)
- goto out;
- atomic_inc(&zv_curr_dist_counts[chunks]);
- atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = zs_map_object(pool, handle, ZS_MM_WO);
- zv->index = index;
- zv->oid = *oid;
- zv->pool_id = pool_id;
- zv->size = clen;
- SET_SENTINEL(zv, ZVH);
- memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- zs_unmap_object(pool, handle);
-out:
- return handle;
-}
-
-static void zv_free(struct zs_pool *pool, unsigned long handle)
-{
- unsigned long flags;
- struct zv_hdr *zv;
- uint16_t size;
- int chunks;
-
- zv = zs_map_object(pool, handle, ZS_MM_RW);
- ASSERT_SENTINEL(zv, ZVH);
- size = zv->size + sizeof(struct zv_hdr);
- INVERT_SENTINEL(zv, ZVH);
- zs_unmap_object(pool, handle);
-
- chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- BUG_ON(chunks >= NCHUNKS);
- atomic_dec(&zv_curr_dist_counts[chunks]);
-
- local_irq_save(flags);
- zs_free(pool, handle);
- local_irq_restore(flags);
-}
-
-static void zv_decompress(struct page *page, unsigned long handle)
-{
- unsigned int clen = PAGE_SIZE;
- char *to_va;
- int ret;
- struct zv_hdr *zv;
-
- zv = zs_map_object(zcache_host.zspool, handle, ZS_MM_RO);
- BUG_ON(zv->size == 0);
- ASSERT_SENTINEL(zv, ZVH);
- to_va = kmap_atomic(page);
- ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
- zv->size, to_va, &clen);
- kunmap_atomic(to_va);
- zs_unmap_object(zcache_host.zspool, handle);
- BUG_ON(ret);
- BUG_ON(clen != PAGE_SIZE);
-}
-
-#ifdef CONFIG_SYSFS
/*
- * show a distribution of compression stats for zv pages.
+ * for now, used named slabs so can easily track usage; later can
+ * either just use kmalloc, or perhaps add a slab-like allocator
+ * to more carefully manage total memory utilization
*/
+static struct kmem_cache *zcache_objnode_cache;
+static struct kmem_cache *zcache_obj_cache;
-static int zv_curr_dist_counts_show(char *buf)
-{
- unsigned long i, n, chunks = 0, sum_total_chunks = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- n = atomic_read(&zv_curr_dist_counts[i]);
- p += sprintf(p, "%lu ", n);
- chunks += n;
- sum_total_chunks += i * n;
- }
- p += sprintf(p, "mean:%lu\n",
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
-
-static int zv_cumul_dist_counts_show(char *buf)
-{
- unsigned long i, n, chunks = 0, sum_total_chunks = 0;
- char *p = buf;
-
- for (i = 0; i < NCHUNKS; i++) {
- n = atomic_read(&zv_cumul_dist_counts[i]);
- p += sprintf(p, "%lu ", n);
- chunks += n;
- sum_total_chunks += i * n;
- }
- p += sprintf(p, "mean:%lu\n",
- chunks == 0 ? 0 : sum_total_chunks / chunks);
- return p - buf;
-}
+static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-/*
- * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
- * pages that don't compress to less than this value (including metadata
- * overhead) to be rejected. We don't allow the value to get too close
- * to PAGE_SIZE.
- */
-static ssize_t zv_max_zsize_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", zv_max_zsize);
+/* we try to keep these statistics SMP-consistent */
+static ssize_t zcache_obj_count;
+static atomic_t zcache_obj_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_obj_count_max;
+static ssize_t zcache_objnode_count;
+static atomic_t zcache_objnode_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_objnode_count_max;
+static u64 zcache_eph_zbytes;
+static atomic_long_t zcache_eph_zbytes_atomic = ATOMIC_INIT(0);
+static u64 zcache_eph_zbytes_max;
+static u64 zcache_pers_zbytes;
+static atomic_long_t zcache_pers_zbytes_atomic = ATOMIC_INIT(0);
+static u64 zcache_pers_zbytes_max;
+static ssize_t zcache_eph_pageframes;
+static atomic_t zcache_eph_pageframes_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_eph_pageframes_max;
+static ssize_t zcache_pers_pageframes;
+static atomic_t zcache_pers_pageframes_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_pers_pageframes_max;
+static ssize_t zcache_pageframes_alloced;
+static atomic_t zcache_pageframes_alloced_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_pageframes_freed;
+static atomic_t zcache_pageframes_freed_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_eph_zpages;
+static ssize_t zcache_eph_zpages;
+static atomic_t zcache_eph_zpages_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_eph_zpages_max;
+static ssize_t zcache_pers_zpages;
+static atomic_t zcache_pers_zpages_atomic = ATOMIC_INIT(0);
+static ssize_t zcache_pers_zpages_max;
+
+/* but for the rest of these, counting races are ok */
+static ssize_t zcache_flush_total;
+static ssize_t zcache_flush_found;
+static ssize_t zcache_flobj_total;
+static ssize_t zcache_flobj_found;
+static ssize_t zcache_failed_eph_puts;
+static ssize_t zcache_failed_pers_puts;
+static ssize_t zcache_failed_getfreepages;
+static ssize_t zcache_failed_alloc;
+static ssize_t zcache_put_to_flush;
+static ssize_t zcache_compress_poor;
+static ssize_t zcache_mean_compress_poor;
+static ssize_t zcache_eph_ate_tail;
+static ssize_t zcache_eph_ate_tail_failed;
+static ssize_t zcache_pers_ate_eph;
+static ssize_t zcache_pers_ate_eph_failed;
+static ssize_t zcache_evicted_eph_zpages;
+static ssize_t zcache_evicted_eph_pageframes;
+static ssize_t zcache_last_active_file_pageframes;
+static ssize_t zcache_last_inactive_file_pageframes;
+static ssize_t zcache_last_active_anon_pageframes;
+static ssize_t zcache_last_inactive_anon_pageframes;
+static ssize_t zcache_eph_nonactive_puts_ignored;
+static ssize_t zcache_pers_nonactive_puts_ignored;
+static ssize_t zcache_writtenback_pages;
+static ssize_t zcache_outstanding_writeback_pages;
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#define zdfs debugfs_create_size_t
+#define zdfs64 debugfs_create_u64
+static int zcache_debugfs_init(void)
+{
+ struct dentry *root = debugfs_create_dir("zcache", NULL);
+ if (root == NULL)
+ return -ENXIO;
+
+ zdfs("obj_count", S_IRUGO, root, &zcache_obj_count);
+ zdfs("obj_count_max", S_IRUGO, root, &zcache_obj_count_max);
+ zdfs("objnode_count", S_IRUGO, root, &zcache_objnode_count);
+ zdfs("objnode_count_max", S_IRUGO, root, &zcache_objnode_count_max);
+ zdfs("flush_total", S_IRUGO, root, &zcache_flush_total);
+ zdfs("flush_found", S_IRUGO, root, &zcache_flush_found);
+ zdfs("flobj_total", S_IRUGO, root, &zcache_flobj_total);
+ zdfs("flobj_found", S_IRUGO, root, &zcache_flobj_found);
+ zdfs("failed_eph_puts", S_IRUGO, root, &zcache_failed_eph_puts);
+ zdfs("failed_pers_puts", S_IRUGO, root, &zcache_failed_pers_puts);
+ zdfs("failed_get_free_pages", S_IRUGO, root,
+ &zcache_failed_getfreepages);
+ zdfs("failed_alloc", S_IRUGO, root, &zcache_failed_alloc);
+ zdfs("put_to_flush", S_IRUGO, root, &zcache_put_to_flush);
+ zdfs("compress_poor", S_IRUGO, root, &zcache_compress_poor);
+ zdfs("mean_compress_poor", S_IRUGO, root, &zcache_mean_compress_poor);
+ zdfs("eph_ate_tail", S_IRUGO, root, &zcache_eph_ate_tail);
+ zdfs("eph_ate_tail_failed", S_IRUGO, root, &zcache_eph_ate_tail_failed);
+ zdfs("pers_ate_eph", S_IRUGO, root, &zcache_pers_ate_eph);
+ zdfs("pers_ate_eph_failed", S_IRUGO, root, &zcache_pers_ate_eph_failed);
+ zdfs("evicted_eph_zpages", S_IRUGO, root, &zcache_evicted_eph_zpages);
+ zdfs("evicted_eph_pageframes", S_IRUGO, root,
+ &zcache_evicted_eph_pageframes);
+ zdfs("eph_pageframes", S_IRUGO, root, &zcache_eph_pageframes);
+ zdfs("eph_pageframes_max", S_IRUGO, root, &zcache_eph_pageframes_max);
+ zdfs("pers_pageframes", S_IRUGO, root, &zcache_pers_pageframes);
+ zdfs("pers_pageframes_max", S_IRUGO, root, &zcache_pers_pageframes_max);
+ zdfs("eph_zpages", S_IRUGO, root, &zcache_eph_zpages);
+ zdfs("eph_zpages_max", S_IRUGO, root, &zcache_eph_zpages_max);
+ zdfs("pers_zpages", S_IRUGO, root, &zcache_pers_zpages);
+ zdfs("pers_zpages_max", S_IRUGO, root, &zcache_pers_zpages_max);
+ zdfs("last_active_file_pageframes", S_IRUGO, root,
+ &zcache_last_active_file_pageframes);
+ zdfs("last_inactive_file_pageframes", S_IRUGO, root,
+ &zcache_last_inactive_file_pageframes);
+ zdfs("last_active_anon_pageframes", S_IRUGO, root,
+ &zcache_last_active_anon_pageframes);
+ zdfs("last_inactive_anon_pageframes", S_IRUGO, root,
+ &zcache_last_inactive_anon_pageframes);
+ zdfs("eph_nonactive_puts_ignored", S_IRUGO, root,
+ &zcache_eph_nonactive_puts_ignored);
+ zdfs("pers_nonactive_puts_ignored", S_IRUGO, root,
+ &zcache_pers_nonactive_puts_ignored);
+ zdfs64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes);
+ zdfs64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max);
+ zdfs64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes);
+ zdfs64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max);
+ zdfs("outstanding_writeback_pages", S_IRUGO, root,
+ &zcache_outstanding_writeback_pages);
+ zdfs("writtenback_pages", S_IRUGO, root, &zcache_writtenback_pages);
+ return 0;
}
+#undef zdebugfs
+#undef zdfs64
+#endif
-static ssize_t zv_max_zsize_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
- return -EINVAL;
- zv_max_zsize = val;
- return count;
+#define ZCACHE_DEBUG
+#ifdef ZCACHE_DEBUG
+/* developers can call this in case of ooms, e.g. to find memory leaks */
+void zcache_dump(void)
+{
+ pr_info("zcache: obj_count=%zd\n", zcache_obj_count);
+ pr_info("zcache: obj_count_max=%zd\n", zcache_obj_count_max);
+ pr_info("zcache: objnode_count=%zd\n", zcache_objnode_count);
+ pr_info("zcache: objnode_count_max=%zd\n", zcache_objnode_count_max);
+ pr_info("zcache: flush_total=%zd\n", zcache_flush_total);
+ pr_info("zcache: flush_found=%zd\n", zcache_flush_found);
+ pr_info("zcache: flobj_total=%zd\n", zcache_flobj_total);
+ pr_info("zcache: flobj_found=%zd\n", zcache_flobj_found);
+ pr_info("zcache: failed_eph_puts=%zd\n", zcache_failed_eph_puts);
+ pr_info("zcache: failed_pers_puts=%zd\n", zcache_failed_pers_puts);
+ pr_info("zcache: failed_get_free_pages=%zd\n",
+ zcache_failed_getfreepages);
+ pr_info("zcache: failed_alloc=%zd\n", zcache_failed_alloc);
+ pr_info("zcache: put_to_flush=%zd\n", zcache_put_to_flush);
+ pr_info("zcache: compress_poor=%zd\n", zcache_compress_poor);
+ pr_info("zcache: mean_compress_poor=%zd\n",
+ zcache_mean_compress_poor);
+ pr_info("zcache: eph_ate_tail=%zd\n", zcache_eph_ate_tail);
+ pr_info("zcache: eph_ate_tail_failed=%zd\n",
+ zcache_eph_ate_tail_failed);
+ pr_info("zcache: pers_ate_eph=%zd\n", zcache_pers_ate_eph);
+ pr_info("zcache: pers_ate_eph_failed=%zd\n",
+ zcache_pers_ate_eph_failed);
+ pr_info("zcache: evicted_eph_zpages=%zd\n", zcache_evicted_eph_zpages);
+ pr_info("zcache: evicted_eph_pageframes=%zd\n",
+ zcache_evicted_eph_pageframes);
+ pr_info("zcache: eph_pageframes=%zd\n", zcache_eph_pageframes);
+ pr_info("zcache: eph_pageframes_max=%zd\n", zcache_eph_pageframes_max);
+ pr_info("zcache: pers_pageframes=%zd\n", zcache_pers_pageframes);
+ pr_info("zcache: pers_pageframes_max=%zd\n",
+ zcache_pers_pageframes_max);
+ pr_info("zcache: eph_zpages=%zd\n", zcache_eph_zpages);
+ pr_info("zcache: eph_zpages_max=%zd\n", zcache_eph_zpages_max);
+ pr_info("zcache: pers_zpages=%zd\n", zcache_pers_zpages);
+ pr_info("zcache: pers_zpages_max=%zd\n", zcache_pers_zpages_max);
+ pr_info("zcache: last_active_file_pageframes=%zd\n",
+ zcache_last_active_file_pageframes);
+ pr_info("zcache: last_inactive_file_pageframes=%zd\n",
+ zcache_last_inactive_file_pageframes);
+ pr_info("zcache: last_active_anon_pageframes=%zd\n",
+ zcache_last_active_anon_pageframes);
+ pr_info("zcache: last_inactive_anon_pageframes=%zd\n",
+ zcache_last_inactive_anon_pageframes);
+ pr_info("zcache: eph_nonactive_puts_ignored=%zd\n",
+ zcache_eph_nonactive_puts_ignored);
+ pr_info("zcache: pers_nonactive_puts_ignored=%zd\n",
+ zcache_pers_nonactive_puts_ignored);
+ pr_info("zcache: eph_zbytes=%llu\n",
+ zcache_eph_zbytes);
+ pr_info("zcache: eph_zbytes_max=%llu\n",
+ zcache_eph_zbytes_max);
+ pr_info("zcache: pers_zbytes=%llu\n",
+ zcache_pers_zbytes);
+ pr_info("zcache: pers_zbytes_max=%llu\n",
+ zcache_pers_zbytes_max);
+ pr_info("zcache: outstanding_writeback_pages=%zd\n",
+ zcache_outstanding_writeback_pages);
+ pr_info("zcache: writtenback_pages=%zd\n", zcache_writtenback_pages);
}
+#endif
/*
- * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
- * pages that don't compress to less than this value (including metadata
- * overhead) to be rejected UNLESS the mean compression is also smaller
- * than this value. In other words, we are load-balancing-by-zsize the
- * accepted pages. Again, we don't allow the value to get too close
- * to PAGE_SIZE.
+ * zcache core code starts here
*/
-static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", zv_max_mean_zsize);
-}
-
-static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int err;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
- return -EINVAL;
- zv_max_mean_zsize = val;
- return count;
-}
+static struct zcache_client zcache_host;
+static struct zcache_client zcache_clients[MAX_CLIENTS];
-/*
- * setting zv_page_count_policy_percent via sysfs sets an upper bound of
- * persistent (e.g. swap) pages that will be retained according to:
- * (zv_page_count_policy_percent * totalram_pages) / 100)
- * when that limit is reached, further puts will be rejected (until
- * some pages have been flushed). Note that, due to compression,
- * this number may exceed 100; it defaults to 75 and we set an
- * arbitary limit of 150. A poor choice will almost certainly result
- * in OOM's, so this value should only be changed prudently.
- */
-static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static inline bool is_local_client(struct zcache_client *cli)
{
- return sprintf(buf, "%u\n", zv_page_count_policy_percent);
+ return cli == &zcache_host;
}
-static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
{
- unsigned long val;
- int err;
+ struct zcache_client *cli = &zcache_host;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &val);
- if (err || (val == 0) || (val > 150))
- return -EINVAL;
- zv_page_count_policy_percent = val;
- return count;
+ if (cli_id != LOCAL_CLIENT) {
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+ cli = &zcache_clients[cli_id];
+ }
+out:
+ return cli;
}
-static struct kobj_attribute zcache_zv_max_zsize_attr = {
- .attr = { .name = "zv_max_zsize", .mode = 0644 },
- .show = zv_max_zsize_show,
- .store = zv_max_zsize_store,
-};
-
-static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
- .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
- .show = zv_max_mean_zsize_show,
- .store = zv_max_mean_zsize_store,
-};
-
-static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
- .attr = { .name = "zv_page_count_policy_percent",
- .mode = 0644 },
- .show = zv_page_count_policy_percent_show,
- .store = zv_page_count_policy_percent_store,
-};
-#endif
-
-/*
- * zcache core code starts here
- */
-
-/* useful stats not collected by cleancache or frontswap */
-static unsigned long zcache_flush_total;
-static unsigned long zcache_flush_found;
-static unsigned long zcache_flobj_total;
-static unsigned long zcache_flobj_found;
-static unsigned long zcache_failed_eph_puts;
-static unsigned long zcache_failed_pers_puts;
-
/*
* Tmem operations assume the poolid implies the invoking client.
* Zcache only has one client (the kernel itself): LOCAL_CLIENT.
@@ -941,24 +356,26 @@ static unsigned long zcache_failed_pers_puts;
* of zcache would have one client per guest and each client might
* have a poolid==N.
*/
-static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
+struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
{
struct tmem_pool *pool = NULL;
struct zcache_client *cli = NULL;
- cli = get_zcache_client(cli_id);
- if (!cli)
+ cli = zcache_get_client_by_id(cli_id);
+ if (cli == NULL)
goto out;
-
- atomic_inc(&cli->refcount);
- pool = idr_find(&cli->tmem_pools, poolid);
- if (pool != NULL)
- atomic_inc(&pool->refcount);
+ if (!is_local_client(cli))
+ atomic_inc(&cli->refcount);
+ if (poolid < MAX_POOLS_PER_CLIENT) {
+ pool = cli->tmem_pools[poolid];
+ if (pool != NULL)
+ atomic_inc(&pool->refcount);
+ }
out:
return pool;
}
-static void zcache_put_pool(struct tmem_pool *pool)
+void zcache_put_pool(struct tmem_pool *pool)
{
struct zcache_client *cli = NULL;
@@ -966,7 +383,8 @@ static void zcache_put_pool(struct tmem_pool *pool)
BUG();
cli = pool->client;
atomic_dec(&pool->refcount);
- atomic_dec(&cli->refcount);
+ if (!is_local_client(cli))
+ atomic_dec(&cli->refcount);
}
int zcache_new_client(uint16_t cli_id)
@@ -974,120 +392,17 @@ int zcache_new_client(uint16_t cli_id)
struct zcache_client *cli;
int ret = -1;
- cli = get_zcache_client(cli_id);
-
+ cli = zcache_get_client_by_id(cli_id);
if (cli == NULL)
goto out;
if (cli->allocated)
goto out;
cli->allocated = 1;
-#ifdef CONFIG_FRONTSWAP
- cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
- if (cli->zspool == NULL)
- goto out;
- idr_init(&cli->tmem_pools);
-#endif
- ret = 0;
-out:
- return ret;
-}
-
-/* counters for debugging */
-static unsigned long zcache_failed_get_free_pages;
-static unsigned long zcache_failed_alloc;
-static unsigned long zcache_put_to_flush;
-
-/*
- * for now, used named slabs so can easily track usage; later can
- * either just use kmalloc, or perhaps add a slab-like allocator
- * to more carefully manage total memory utilization
- */
-static struct kmem_cache *zcache_objnode_cache;
-static struct kmem_cache *zcache_obj_cache;
-static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_obj_count_max;
-static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_objnode_count_max;
-
-/*
- * to avoid memory allocation recursion (e.g. due to direct reclaim), we
- * preload all necessary data structures so the hostops callbacks never
- * actually do a malloc
- */
-struct zcache_preload {
- void *page;
- struct tmem_obj *obj;
- int nr;
- struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
-};
-static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-
-static int zcache_do_preload(struct tmem_pool *pool)
-{
- struct zcache_preload *kp;
- struct tmem_objnode *objnode;
- struct tmem_obj *obj;
- void *page;
- int ret = -ENOMEM;
-
- if (unlikely(zcache_objnode_cache == NULL))
- goto out;
- if (unlikely(zcache_obj_cache == NULL))
- goto out;
-
- /* IRQ has already been disabled. */
- kp = &__get_cpu_var(zcache_preloads);
- while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
- objnode = kmem_cache_alloc(zcache_objnode_cache,
- ZCACHE_GFP_MASK);
- if (unlikely(objnode == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
-
- kp->objnodes[kp->nr++] = objnode;
- }
-
- if (!kp->obj) {
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- if (unlikely(obj == NULL)) {
- zcache_failed_alloc++;
- goto out;
- }
- kp->obj = obj;
- }
-
- if (!kp->page) {
- page = (void *)__get_free_page(ZCACHE_GFP_MASK);
- if (unlikely(page == NULL)) {
- zcache_failed_get_free_pages++;
- goto out;
- }
- kp->page = page;
- }
-
ret = 0;
out:
return ret;
}
-static void *zcache_get_free_page(void)
-{
- struct zcache_preload *kp;
- void *page;
-
- kp = &__get_cpu_var(zcache_preloads);
- page = kp->page;
- BUG_ON(page == NULL);
- kp->page = NULL;
- return page;
-}
-
-static void zcache_free_page(void *p)
-{
- free_page((unsigned long)p);
-}
-
/*
* zcache implementation for tmem host ops
*/
@@ -1095,51 +410,53 @@ static void zcache_free_page(void *p)
static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
{
struct tmem_objnode *objnode = NULL;
- unsigned long count;
struct zcache_preload *kp;
+ int i;
kp = &__get_cpu_var(zcache_preloads);
- if (kp->nr <= 0)
- goto out;
- objnode = kp->objnodes[kp->nr - 1];
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ objnode = kp->objnodes[i];
+ if (objnode != NULL) {
+ kp->objnodes[i] = NULL;
+ break;
+ }
+ }
BUG_ON(objnode == NULL);
- kp->objnodes[kp->nr - 1] = NULL;
- kp->nr--;
- count = atomic_inc_return(&zcache_curr_objnode_count);
- if (count > zcache_curr_objnode_count_max)
- zcache_curr_objnode_count_max = count;
-out:
+ zcache_objnode_count = atomic_inc_return(&zcache_objnode_atomic);
+ if (zcache_objnode_count > zcache_objnode_count_max)
+ zcache_objnode_count_max = zcache_objnode_count;
return objnode;
}
static void zcache_objnode_free(struct tmem_objnode *objnode,
struct tmem_pool *pool)
{
- atomic_dec(&zcache_curr_objnode_count);
- BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
+ zcache_objnode_count =
+ atomic_dec_return(&zcache_objnode_atomic);
+ BUG_ON(zcache_objnode_count < 0);
kmem_cache_free(zcache_objnode_cache, objnode);
}
static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
{
struct tmem_obj *obj = NULL;
- unsigned long count;
struct zcache_preload *kp;
kp = &__get_cpu_var(zcache_preloads);
obj = kp->obj;
BUG_ON(obj == NULL);
kp->obj = NULL;
- count = atomic_inc_return(&zcache_curr_obj_count);
- if (count > zcache_curr_obj_count_max)
- zcache_curr_obj_count_max = count;
+ zcache_obj_count = atomic_inc_return(&zcache_obj_atomic);
+ if (zcache_obj_count > zcache_obj_count_max)
+ zcache_obj_count_max = zcache_obj_count;
return obj;
}
static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
{
- atomic_dec(&zcache_curr_obj_count);
- BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
+ zcache_obj_count =
+ atomic_dec_return(&zcache_obj_atomic);
+ BUG_ON(zcache_obj_count < 0);
kmem_cache_free(zcache_obj_cache, obj);
}
@@ -1150,96 +467,302 @@ static struct tmem_hostops zcache_hostops = {
.objnode_free = zcache_objnode_free,
};
+static struct page *zcache_alloc_page(void)
+{
+ struct page *page = alloc_page(ZCACHE_GFP_MASK);
+
+ if (page != NULL)
+ zcache_pageframes_alloced =
+ atomic_inc_return(&zcache_pageframes_alloced_atomic);
+ return page;
+}
+
+static void zcache_free_page(struct page *page)
+{
+ long curr_pageframes;
+ static long max_pageframes, min_pageframes;
+
+ if (page == NULL)
+ BUG();
+ __free_page(page);
+ zcache_pageframes_freed =
+ atomic_inc_return(&zcache_pageframes_freed_atomic);
+ curr_pageframes = zcache_pageframes_alloced -
+ atomic_read(&zcache_pageframes_freed_atomic) -
+ atomic_read(&zcache_eph_pageframes_atomic) -
+ atomic_read(&zcache_pers_pageframes_atomic);
+ if (curr_pageframes > max_pageframes)
+ max_pageframes = curr_pageframes;
+ if (curr_pageframes < min_pageframes)
+ min_pageframes = curr_pageframes;
+#ifdef ZCACHE_DEBUG
+ if (curr_pageframes > 2L || curr_pageframes < -2L) {
+ /* pr_info here */
+ }
+#endif
+}
+
/*
* zcache implementations for PAM page descriptor ops
*/
-static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_eph_pampd_count_max;
-static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
-static unsigned long zcache_curr_pers_pampd_count_max;
-
/* forward reference */
-static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
+static void zcache_compress(struct page *from,
+ void **out_va, unsigned *out_len);
+
+static struct page *zcache_evict_eph_pageframe(void);
-static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
- struct tmem_pool *pool, struct tmem_oid *oid,
- uint32_t index)
+static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
+ struct tmem_handle *th)
{
- void *pampd = NULL, *cdata;
- unsigned clen;
- int ret;
- unsigned long count;
- struct page *page = (struct page *)(data);
- struct zcache_client *cli = pool->client;
- uint16_t client_id = get_client_id_from_client(cli);
- unsigned long zv_mean_zsize;
- unsigned long curr_pers_pampd_count;
- u64 total_zsize;
+ void *pampd = NULL, *cdata = data;
+ unsigned clen = size;
+ struct page *page = (struct page *)(data), *newpage;
- if (eph) {
- ret = zcache_compress(page, &cdata, &clen);
- if (ret == 0)
- goto out;
- if (clen == 0 || clen > zbud_max_buddy_size()) {
+ if (!raw) {
+ zcache_compress(page, &cdata, &clen);
+ if (clen > zbud_max_buddy_size()) {
zcache_compress_poor++;
goto out;
}
- pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
- index, page, cdata, clen);
- if (pampd != NULL) {
- count = atomic_inc_return(&zcache_curr_eph_pampd_count);
- if (count > zcache_curr_eph_pampd_count_max)
- zcache_curr_eph_pampd_count_max = count;
- }
} else {
- curr_pers_pampd_count =
- atomic_read(&zcache_curr_pers_pampd_count);
- if (curr_pers_pampd_count >
- (zv_page_count_policy_percent * totalram_pages) / 100)
- goto out;
- ret = zcache_compress(page, &cdata, &clen);
- if (ret == 0)
- goto out;
- /* reject if compression is too poor */
- if (clen > zv_max_zsize) {
- zcache_compress_poor++;
+ BUG_ON(clen > zbud_max_buddy_size());
+ }
+
+ /* look for space via an existing match first */
+ pampd = (void *)zbud_match_prep(th, true, cdata, clen);
+ if (pampd != NULL)
+ goto got_pampd;
+
+ /* no match, now we need to find (or free up) a full page */
+ newpage = zcache_alloc_page();
+ if (newpage != NULL)
+ goto create_in_new_page;
+
+ zcache_failed_getfreepages++;
+ /* can't allocate a page, evict an ephemeral page via LRU */
+ newpage = zcache_evict_eph_pageframe();
+ if (newpage == NULL) {
+ zcache_eph_ate_tail_failed++;
+ goto out;
+ }
+ zcache_eph_ate_tail++;
+
+create_in_new_page:
+ pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
+ BUG_ON(pampd == NULL);
+ zcache_eph_pageframes =
+ atomic_inc_return(&zcache_eph_pageframes_atomic);
+ if (zcache_eph_pageframes > zcache_eph_pageframes_max)
+ zcache_eph_pageframes_max = zcache_eph_pageframes;
+
+got_pampd:
+ zcache_eph_zbytes =
+ atomic_long_add_return(clen, &zcache_eph_zbytes_atomic);
+ if (zcache_eph_zbytes > zcache_eph_zbytes_max)
+ zcache_eph_zbytes_max = zcache_eph_zbytes;
+ zcache_eph_zpages = atomic_inc_return(&zcache_eph_zpages_atomic);
+ if (zcache_eph_zpages > zcache_eph_zpages_max)
+ zcache_eph_zpages_max = zcache_eph_zpages;
+ if (ramster_enabled && raw)
+ ramster_count_foreign_pages(true, 1);
+out:
+ return pampd;
+}
+
+static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
+ struct tmem_handle *th)
+{
+ void *pampd = NULL, *cdata = data;
+ unsigned clen = size;
+ struct page *page = (struct page *)(data), *newpage;
+ unsigned long zbud_mean_zsize;
+ unsigned long curr_pers_zpages, total_zsize;
+
+ if (data == NULL) {
+ BUG_ON(!ramster_enabled);
+ goto create_pampd;
+ }
+ curr_pers_zpages = zcache_pers_zpages;
+/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
+ if (!raw)
+ zcache_compress(page, &cdata, &clen);
+ /* reject if compression is too poor */
+ if (clen > zbud_max_zsize) {
+ zcache_compress_poor++;
+ goto out;
+ }
+ /* reject if mean compression is too poor */
+ if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
+ total_zsize = zcache_pers_zbytes;
+ if ((long)total_zsize < 0)
+ total_zsize = 0;
+ zbud_mean_zsize = div_u64(total_zsize,
+ curr_pers_zpages);
+ if (zbud_mean_zsize > zbud_max_mean_zsize) {
+ zcache_mean_compress_poor++;
goto out;
}
- /* reject if mean compression is too poor */
- if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = zs_get_total_size_bytes(cli->zspool);
- zv_mean_zsize = div_u64(total_zsize,
- curr_pers_pampd_count);
- if (zv_mean_zsize > zv_max_mean_zsize) {
- zcache_mean_compress_poor++;
+ }
+
+create_pampd:
+ /* look for space via an existing match first */
+ pampd = (void *)zbud_match_prep(th, false, cdata, clen);
+ if (pampd != NULL)
+ goto got_pampd;
+
+ /* no match, now we need to find (or free up) a full page */
+ newpage = zcache_alloc_page();
+ if (newpage != NULL)
+ goto create_in_new_page;
+ /*
+ * FIXME do the following only if eph is oversized?
+ * if (zcache_eph_pageframes >
+ * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
+ * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
+ */
+ zcache_failed_getfreepages++;
+ /* can't allocate a page, evict an ephemeral page via LRU */
+ newpage = zcache_evict_eph_pageframe();
+ if (newpage == NULL) {
+ zcache_pers_ate_eph_failed++;
+ goto out;
+ }
+ zcache_pers_ate_eph++;
+
+create_in_new_page:
+ pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
+ BUG_ON(pampd == NULL);
+ zcache_pers_pageframes =
+ atomic_inc_return(&zcache_pers_pageframes_atomic);
+ if (zcache_pers_pageframes > zcache_pers_pageframes_max)
+ zcache_pers_pageframes_max = zcache_pers_pageframes;
+
+got_pampd:
+ zcache_pers_zpages = atomic_inc_return(&zcache_pers_zpages_atomic);
+ if (zcache_pers_zpages > zcache_pers_zpages_max)
+ zcache_pers_zpages_max = zcache_pers_zpages;
+ zcache_pers_zbytes =
+ atomic_long_add_return(clen, &zcache_pers_zbytes_atomic);
+ if (zcache_pers_zbytes > zcache_pers_zbytes_max)
+ zcache_pers_zbytes_max = zcache_pers_zbytes;
+ if (ramster_enabled && raw)
+ ramster_count_foreign_pages(false, 1);
+out:
+ return pampd;
+}
+
+/*
+ * This is called directly from zcache_put_page to pre-allocate space
+ * to store a zpage.
+ */
+void *zcache_pampd_create(char *data, unsigned int size, bool raw,
+ int eph, struct tmem_handle *th)
+{
+ void *pampd = NULL;
+ struct zcache_preload *kp;
+ struct tmem_objnode *objnode;
+ struct tmem_obj *obj;
+ int i;
+
+ BUG_ON(!irqs_disabled());
+ /* pre-allocate per-cpu metadata */
+ BUG_ON(zcache_objnode_cache == NULL);
+ BUG_ON(zcache_obj_cache == NULL);
+ kp = &__get_cpu_var(zcache_preloads);
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ objnode = kp->objnodes[i];
+ if (objnode == NULL) {
+ objnode = kmem_cache_alloc(zcache_objnode_cache,
+ ZCACHE_GFP_MASK);
+ if (unlikely(objnode == NULL)) {
+ zcache_failed_alloc++;
goto out;
}
+ kp->objnodes[i] = objnode;
}
- pampd = (void *)zv_create(cli->zspool, pool->pool_id,
- oid, index, cdata, clen);
- if (pampd == NULL)
- goto out;
- count = atomic_inc_return(&zcache_curr_pers_pampd_count);
- if (count > zcache_curr_pers_pampd_count_max)
- zcache_curr_pers_pampd_count_max = count;
}
+ if (kp->obj == NULL) {
+ obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
+ kp->obj = obj;
+ }
+ if (unlikely(kp->obj == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ /*
+ * ok, have all the metadata pre-allocated, now do the data
+ * but since how we allocate the data is dependent on ephemeral
+ * or persistent, we split the call here to different sub-functions
+ */
+ if (eph)
+ pampd = zcache_pampd_eph_create(data, size, raw, th);
+ else
+ pampd = zcache_pampd_pers_create(data, size, raw, th);
out:
return pampd;
}
/*
+ * This is a pamops called via tmem_put and is necessary to "finish"
+ * a pampd creation.
+ */
+void zcache_pampd_create_finish(void *pampd, bool eph)
+{
+ zbud_create_finish((struct zbudref *)pampd, eph);
+}
+
+/*
+ * This is passed as a function parameter to zbud_decompress so that
+ * zbud need not be familiar with the details of crypto. It assumes that
+ * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
+ * kmapped. It must be successful, else there is a logic bug somewhere.
+ */
+static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
+{
+ int ret;
+ unsigned int outlen = PAGE_SIZE;
+
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &outlen);
+ BUG_ON(ret);
+ BUG_ON(outlen != PAGE_SIZE);
+}
+
+/*
+ * Decompress from the kernel va to a pageframe
+ */
+void zcache_decompress_to_page(char *from_va, unsigned int size,
+ struct page *to_page)
+{
+ char *to_va = kmap_atomic(to_page);
+ zcache_decompress(from_va, size, to_va);
+ kunmap_atomic(to_va);
+}
+
+/*
* fill the pageframe corresponding to the struct page with the data
* from the passed pampd
*/
-static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
+static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index)
{
- int ret = 0;
-
- BUG_ON(is_ephemeral(pool));
- zv_decompress((struct page *)(data), (unsigned long)pampd);
+ int ret;
+ bool eph = !is_persistent(pool);
+
+ BUG_ON(preemptible());
+ BUG_ON(eph); /* fix later if shared pools get implemented */
+ BUG_ON(pampd_is_remote(pampd));
+ if (raw)
+ ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
+ sizep, eph);
+ else {
+ ret = zbud_decompress((struct page *)(data),
+ (struct zbudref *)pampd, false,
+ zcache_decompress);
+ *sizep = PAGE_SIZE;
+ }
return ret;
}
@@ -1247,16 +770,50 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
* fill the pageframe corresponding to the struct page with the data
* from the passed pampd
*/
-static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index)
{
- BUG_ON(!is_ephemeral(pool));
- if (zbud_decompress((struct page *)(data), pampd) < 0)
- return -EINVAL;
- zbud_free_and_delist((struct zbud_hdr *)pampd);
- atomic_dec(&zcache_curr_eph_pampd_count);
- return 0;
+ int ret;
+ bool eph = !is_persistent(pool);
+ struct page *page = NULL;
+ unsigned int zsize, zpages;
+
+ BUG_ON(preemptible());
+ BUG_ON(pampd_is_remote(pampd));
+ if (raw)
+ ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
+ sizep, eph);
+ else {
+ ret = zbud_decompress((struct page *)(data),
+ (struct zbudref *)pampd, eph,
+ zcache_decompress);
+ *sizep = PAGE_SIZE;
+ }
+ page = zbud_free_and_delist((struct zbudref *)pampd, eph,
+ &zsize, &zpages);
+ if (eph) {
+ if (page)
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_eph_zpages =
+ atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
+ zcache_eph_zbytes =
+ atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
+ } else {
+ if (page)
+ zcache_pers_pageframes =
+ atomic_dec_return(&zcache_pers_pageframes_atomic);
+ zcache_pers_zpages =
+ atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
+ zcache_pers_zbytes =
+ atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
+ }
+ if (!is_local_client(pool->client))
+ ramster_count_foreign_pages(eph, -1);
+ if (page)
+ zcache_free_page(page);
+ return ret;
}
/*
@@ -1264,48 +821,51 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
* pampd must no longer be pointed to from any tmem data structures!
*/
static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
+ struct tmem_oid *oid, uint32_t index, bool acct)
{
- struct zcache_client *cli = pool->client;
+ struct page *page = NULL;
+ unsigned int zsize, zpages;
+ BUG_ON(preemptible());
+ if (pampd_is_remote(pampd)) {
+ BUG_ON(!ramster_enabled);
+ pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
+ if (pampd == NULL)
+ return;
+ }
if (is_ephemeral(pool)) {
- zbud_free_and_delist((struct zbud_hdr *)pampd);
- atomic_dec(&zcache_curr_eph_pampd_count);
- BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
+ page = zbud_free_and_delist((struct zbudref *)pampd,
+ true, &zsize, &zpages);
+ if (page)
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_eph_zpages =
+ atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
+ zcache_eph_zbytes =
+ atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
+ /* FIXME CONFIG_RAMSTER... check acct parameter? */
} else {
- zv_free(cli->zspool, (unsigned long)pampd);
- atomic_dec(&zcache_curr_pers_pampd_count);
- BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
+ page = zbud_free_and_delist((struct zbudref *)pampd,
+ false, &zsize, &zpages);
+ if (page)
+ zcache_pers_pageframes =
+ atomic_dec_return(&zcache_pers_pageframes_atomic);
+ zcache_pers_zpages =
+ atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
+ zcache_pers_zbytes =
+ atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
}
-}
-
-static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
-{
-}
-
-static void zcache_pampd_new_obj(struct tmem_obj *obj)
-{
-}
-
-static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
-{
- return -1;
-}
-
-static bool zcache_pampd_is_remote(void *pampd)
-{
- return 0;
+ if (!is_local_client(pool->client))
+ ramster_count_foreign_pages(is_ephemeral(pool), -1);
+ if (page)
+ zcache_free_page(page);
}
static struct tmem_pamops zcache_pamops = {
- .create = zcache_pampd_create,
+ .create_finish = zcache_pampd_create_finish,
.get_data = zcache_pampd_get_data,
.get_data_and_free = zcache_pampd_get_data_and_free,
.free = zcache_pampd_free,
- .free_obj = zcache_pampd_free_obj,
- .new_obj = zcache_pampd_new_obj,
- .replace_in_obj = zcache_pampd_replace_in_obj,
- .is_remote = zcache_pampd_is_remote,
};
/*
@@ -1315,15 +875,15 @@ static struct tmem_pamops zcache_pamops = {
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
#define ZCACHE_DSTMEM_ORDER 1
-static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
+static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
{
- int ret = 0;
+ int ret;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL))
- goto out; /* no buffer or no compressor so can't compress */
+ /* no buffer or no compressor so can't compress */
+ BUG_ON(dmem == NULL);
*out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from);
mb();
@@ -1332,9 +892,6 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
BUG_ON(ret);
*out_va = dmem;
kunmap_atomic(from_va);
- ret = 1;
-out:
- return ret;
}
static int zcache_comp_cpu_up(int cpu)
@@ -1360,18 +917,21 @@ static void zcache_comp_cpu_down(int cpu)
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int ret, cpu = (long)pcpu;
+ int ret, i, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
ret = zcache_comp_cpu_up(cpu);
if (ret != NOTIFY_OK) {
- pr_err("zcache: can't allocate compressor transform\n");
+ pr_err("%s: can't allocate compressor xform\n",
+ namestr);
return ret;
}
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
+ if (ramster_enabled)
+ ramster_cpu_up(cpu);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
@@ -1380,20 +940,17 @@ static int zcache_cpu_notifier(struct notifier_block *nb,
ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
- while (kp->nr) {
- kmem_cache_free(zcache_objnode_cache,
- kp->objnodes[kp->nr - 1]);
- kp->objnodes[kp->nr - 1] = NULL;
- kp->nr--;
+ for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
+ if (kp->objnodes[i])
+ kmem_cache_free(zcache_objnode_cache,
+ kp->objnodes[i]);
}
if (kp->obj) {
kmem_cache_free(zcache_obj_cache, kp->obj);
kp->obj = NULL;
}
- if (kp->page) {
- free_page((unsigned long)kp->page);
- kp->page = NULL;
- }
+ if (ramster_enabled)
+ ramster_cpu_down(cpu);
break;
default:
break;
@@ -1405,116 +962,279 @@ static struct notifier_block zcache_cpu_notifier_block = {
.notifier_call = zcache_cpu_notifier
};
-#ifdef CONFIG_SYSFS
-#define ZCACHE_SYSFS_RO(_name) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", zcache_##_name); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
- }
+/*
+ * The following code interacts with the zbud eviction and zbud
+ * zombify code to access LRU pages
+ */
+
+static struct page *zcache_evict_eph_pageframe(void)
+{
+ struct page *page;
+ unsigned int zsize = 0, zpages = 0;
+
+ page = zbud_evict_pageframe_lru(&zsize, &zpages);
+ if (page == NULL)
+ goto out;
+ zcache_eph_zbytes = atomic_long_sub_return(zsize,
+ &zcache_eph_zbytes_atomic);
+ zcache_eph_zpages = atomic_sub_return(zpages,
+ &zcache_eph_zpages_atomic);
+ zcache_evicted_eph_zpages += zpages;
+ zcache_eph_pageframes =
+ atomic_dec_return(&zcache_eph_pageframes_atomic);
+ zcache_evicted_eph_pageframes++;
+out:
+ return page;
+}
+
+#ifdef CONFIG_ZCACHE_WRITEBACK
+
+static atomic_t zcache_outstanding_writeback_pages_atomic = ATOMIC_INIT(0);
+
+static void unswiz(struct tmem_oid oid, u32 index,
+ unsigned *type, pgoff_t *offset);
+
+/*
+ * Choose an LRU persistent pageframe and attempt to write it back to
+ * the backing swap disk by calling frontswap_writeback on both zpages.
+ *
+ * This is work-in-progress.
+ */
-#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
+static void zcache_end_swap_write(struct bio *bio, int err)
+{
+ end_swap_bio_write(bio, err);
+ zcache_outstanding_writeback_pages =
+ atomic_dec_return(&zcache_outstanding_writeback_pages_atomic);
+ zcache_writtenback_pages++;
+}
+
+/*
+ * zcache_get_swap_cache_page
+ *
+ * This is an adaption of read_swap_cache_async()
+ *
+ * If success, page is returned in retpage
+ * Returns 0 if page was already in the swap cache, page is not locked
+ * Returns 1 if the new page needs to be populated, page is locked
+ */
+static int zcache_get_swap_cache_page(int type, pgoff_t offset,
+ struct page *new_page)
+{
+ struct page *found_page;
+ swp_entry_t entry = swp_entry(type, offset);
+ int err;
+
+ BUG_ON(new_page == NULL);
+ do {
+ /*
+ * First check the swap cache. Since this is normally
+ * called after lookup_swap_cache() failed, re-calling
+ * that would confuse statistics.
+ */
+ found_page = find_get_page(&swapper_space, entry.val);
+ if (found_page)
+ return 0;
+
+ /*
+ * call radix_tree_preload() while we can wait.
+ */
+ err = radix_tree_preload(GFP_KERNEL);
+ if (err)
+ break;
+
+ /*
+ * Swap entry may have been freed since our caller observed it.
+ */
+ err = swapcache_prepare(entry);
+ if (err == -EEXIST) { /* seems racy */
+ radix_tree_preload_end();
+ continue;
+ }
+ if (err) { /* swp entry is obsolete ? */
+ radix_tree_preload_end();
+ break;
+ }
+
+ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
+ __set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
+ err = __add_to_swap_cache(new_page, entry);
+ if (likely(!err)) {
+ radix_tree_preload_end();
+ lru_cache_add_anon(new_page);
+ return 1;
+ }
+ radix_tree_preload_end();
+ ClearPageSwapBacked(new_page);
+ __clear_page_locked(new_page);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
+ swapcache_free(entry, NULL);
+ /* FIXME: is it possible to get here without err==-ENOMEM?
+ * If not, we can dispense with the do loop, use goto retry */
+ } while (err != -ENOMEM);
+
+ return -ENOMEM;
+}
+
+/*
+ * Given a frontswap zpage in zcache (identified by type/offset) and
+ * an empty page, put the page into the swap cache, use frontswap
+ * to get the page from zcache into the empty page, then give it
+ * to the swap subsystem to send to disk (carefully avoiding the
+ * possibility that frontswap might snatch it back).
+ * Returns < 0 if error, 0 if successful, and 1 if successful but
+ * the newpage passed in not needed and should be freed.
+ */
+static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
+ struct page *newpage)
+{
+ struct page *page = newpage;
+ int ret;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ };
+
+ ret = zcache_get_swap_cache_page(type, offset, page);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0) {
+ /* more uptodate page is already in swapcache */
+ __frontswap_invalidate_page(type, offset);
+ return 1;
}
-#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
- static ssize_t zcache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return _func(buf); \
- } \
- static struct kobj_attribute zcache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = zcache_##_name##_show, \
+ BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
+ /* FIXME: how is it possible to get here when page is unlocked? */
+ __frontswap_load(page);
+ SetPageUptodate(page); /* above does SetPageDirty, is that enough? */
+
+ /* start writeback */
+ SetPageReclaim(page);
+ /*
+ * Return value is ignored here because it doesn't change anything
+ * for us. Page is returned unlocked.
+ */
+ (void)__swap_writepage(page, &wbc, zcache_end_swap_write);
+ page_cache_release(page);
+ zcache_outstanding_writeback_pages =
+ atomic_inc_return(&zcache_outstanding_writeback_pages_atomic);
+
+ return 0;
+}
+
+/*
+ * The following is still a magic number... we want to allow forward progress
+ * for writeback because it clears out needed RAM when under pressure, but
+ * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
+ * pages if the swap device is very slow.
+ */
+#define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
+
+/*
+ * Try to allocate two free pages, first using a non-aggressive alloc,
+ * then by evicting zcache ephemeral (clean pagecache) pages, and last
+ * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
+ * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
+ * function above for each.
+ */
+static int zcache_frontswap_writeback(void)
+{
+ struct tmem_handle th[2];
+ int ret = 0;
+ int nzbuds, writeback_ret;
+ unsigned type;
+ struct page *znewpage1 = NULL, *znewpage2 = NULL;
+ struct page *evictpage1 = NULL, *evictpage2 = NULL;
+ struct page *newpage1 = NULL, *newpage2 = NULL;
+ struct page *page1 = NULL, *page2 = NULL;
+ pgoff_t offset;
+
+ znewpage1 = alloc_page(ZCACHE_GFP_MASK);
+ znewpage2 = alloc_page(ZCACHE_GFP_MASK);
+ if (znewpage1 == NULL)
+ evictpage1 = zcache_evict_eph_pageframe();
+ if (znewpage2 == NULL)
+ evictpage2 = zcache_evict_eph_pageframe();
+
+ if ((evictpage1 == NULL || evictpage2 == NULL) &&
+ atomic_read(&zcache_outstanding_writeback_pages_atomic) >
+ ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES) {
+ goto free_and_out;
+ }
+ if (znewpage1 == NULL && evictpage1 == NULL)
+ newpage1 = alloc_page(GFP_KERNEL);
+ if (znewpage2 == NULL && evictpage2 == NULL)
+ newpage2 = alloc_page(GFP_KERNEL);
+ if (newpage1 == NULL || newpage2 == NULL)
+ goto free_and_out;
+
+ /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
+ nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
+ if (nzbuds == 0) {
+ ret = -ENOENT;
+ goto free_and_out;
}
-ZCACHE_SYSFS_RO(curr_obj_count_max);
-ZCACHE_SYSFS_RO(curr_objnode_count_max);
-ZCACHE_SYSFS_RO(flush_total);
-ZCACHE_SYSFS_RO(flush_found);
-ZCACHE_SYSFS_RO(flobj_total);
-ZCACHE_SYSFS_RO(flobj_found);
-ZCACHE_SYSFS_RO(failed_eph_puts);
-ZCACHE_SYSFS_RO(failed_pers_puts);
-ZCACHE_SYSFS_RO(zbud_curr_zbytes);
-ZCACHE_SYSFS_RO(zbud_cumul_zpages);
-ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
-ZCACHE_SYSFS_RO(zbud_buddied_count);
-ZCACHE_SYSFS_RO(zbpg_unused_list_count);
-ZCACHE_SYSFS_RO(evicted_raw_pages);
-ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
-ZCACHE_SYSFS_RO(evicted_buddied_pages);
-ZCACHE_SYSFS_RO(failed_get_free_pages);
-ZCACHE_SYSFS_RO(failed_alloc);
-ZCACHE_SYSFS_RO(put_to_flush);
-ZCACHE_SYSFS_RO(compress_poor);
-ZCACHE_SYSFS_RO(mean_compress_poor);
-ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
-ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
-ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
-ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
-ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
- zbud_show_unbuddied_list_counts);
-ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
- zbud_show_cumul_chunk_counts);
-ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
- zv_curr_dist_counts_show);
-ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
- zv_cumul_dist_counts_show);
-
-static struct attribute *zcache_attrs[] = {
- &zcache_curr_obj_count_attr.attr,
- &zcache_curr_obj_count_max_attr.attr,
- &zcache_curr_objnode_count_attr.attr,
- &zcache_curr_objnode_count_max_attr.attr,
- &zcache_flush_total_attr.attr,
- &zcache_flobj_total_attr.attr,
- &zcache_flush_found_attr.attr,
- &zcache_flobj_found_attr.attr,
- &zcache_failed_eph_puts_attr.attr,
- &zcache_failed_pers_puts_attr.attr,
- &zcache_compress_poor_attr.attr,
- &zcache_mean_compress_poor_attr.attr,
- &zcache_zbud_curr_raw_pages_attr.attr,
- &zcache_zbud_curr_zpages_attr.attr,
- &zcache_zbud_curr_zbytes_attr.attr,
- &zcache_zbud_cumul_zpages_attr.attr,
- &zcache_zbud_cumul_zbytes_attr.attr,
- &zcache_zbud_buddied_count_attr.attr,
- &zcache_zbpg_unused_list_count_attr.attr,
- &zcache_evicted_raw_pages_attr.attr,
- &zcache_evicted_unbuddied_pages_attr.attr,
- &zcache_evicted_buddied_pages_attr.attr,
- &zcache_failed_get_free_pages_attr.attr,
- &zcache_failed_alloc_attr.attr,
- &zcache_put_to_flush_attr.attr,
- &zcache_zbud_unbuddied_list_counts_attr.attr,
- &zcache_zbud_cumul_chunk_counts_attr.attr,
- &zcache_zv_curr_dist_counts_attr.attr,
- &zcache_zv_cumul_dist_counts_attr.attr,
- &zcache_zv_max_zsize_attr.attr,
- &zcache_zv_max_mean_zsize_attr.attr,
- &zcache_zv_page_count_policy_percent_attr.attr,
- NULL,
-};
+ /* process the first zbud */
+ unswiz(th[0].oid, th[0].index, &type, &offset);
+ page1 = (znewpage1 != NULL) ? znewpage1 :
+ ((newpage1 != NULL) ? newpage1 : evictpage1);
+ writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page1);
+ if (writeback_ret < 0) {
+ ret = -ENOMEM;
+ goto free_and_out;
+ }
+ if (evictpage1 != NULL)
+ zcache_pageframes_freed =
+ atomic_inc_return(&zcache_pageframes_freed_atomic);
+ if (writeback_ret == 0) {
+ /* zcache_get_swap_cache_page will free, don't double free */
+ znewpage1 = NULL;
+ newpage1 = NULL;
+ evictpage1 = NULL;
+ }
+ if (nzbuds < 2)
+ goto free_and_out;
+
+ /* if there is a second zbud, process it */
+ unswiz(th[1].oid, th[1].index, &type, &offset);
+ page2 = (znewpage2 != NULL) ? znewpage2 :
+ ((newpage2 != NULL) ? newpage2 : evictpage2);
+ writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page2);
+ if (writeback_ret < 0) {
+ ret = -ENOMEM;
+ goto free_and_out;
+ }
+ if (evictpage2 != NULL)
+ zcache_pageframes_freed =
+ atomic_inc_return(&zcache_pageframes_freed_atomic);
+ if (writeback_ret == 0) {
+ znewpage2 = NULL;
+ newpage2 = NULL;
+ evictpage2 = NULL;
+ }
-static struct attribute_group zcache_attr_group = {
- .attrs = zcache_attrs,
- .name = "zcache",
-};
+free_and_out:
+ if (znewpage1 != NULL)
+ page_cache_release(znewpage1);
+ if (znewpage2 != NULL)
+ page_cache_release(znewpage2);
+ if (newpage1 != NULL)
+ page_cache_release(newpage1);
+ if (newpage2 != NULL)
+ page_cache_release(newpage2);
+ if (evictpage1 != NULL)
+ zcache_free_page(evictpage1);
+ if (evictpage2 != NULL)
+ zcache_free_page(evictpage2);
+ return ret;
+}
+#endif /* CONFIG_ZCACHE_WRITEBACK */
-#endif /* CONFIG_SYSFS */
/*
* When zcache is disabled ("frozen"), pools can be created and destroyed,
* but all puts (and thus all other operations that require memory allocation)
@@ -1525,23 +1245,81 @@ static struct attribute_group zcache_attr_group = {
static bool zcache_freeze;
/*
- * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
+ * This zcache shrinker interface reduces the number of ephemeral pageframes
+ * used by zcache to approximately the same as the total number of LRU_FILE
+ * pageframes in use, and now also reduces the number of persistent pageframes
+ * used by zcache to approximately the same as the total number of LRU_ANON
+ * pageframes in use. FIXME POLICY: Probably the writeback should only occur
+ * if the eviction doesn't free enough pages.
*/
static int shrink_zcache_memory(struct shrinker *shrink,
struct shrink_control *sc)
{
+ static bool in_progress;
int ret = -1;
int nr = sc->nr_to_scan;
- gfp_t gfp_mask = sc->gfp_mask;
+ int nr_evict = 0;
+ int nr_writeback = 0;
+ struct page *page;
+ int file_pageframes_inuse, anon_pageframes_inuse;
+
+ if (nr <= 0)
+ goto skip_evict;
+
+ /* don't allow more than one eviction thread at a time */
+ if (in_progress)
+ goto skip_evict;
+
+ in_progress = true;
+
+ /* we are going to ignore nr, and target a different value */
+ zcache_last_active_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
+ zcache_last_inactive_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
+ file_pageframes_inuse = zcache_last_active_file_pageframes +
+ zcache_last_inactive_file_pageframes;
+ if (zcache_eph_pageframes > file_pageframes_inuse)
+ nr_evict = zcache_eph_pageframes - file_pageframes_inuse;
+ else
+ nr_evict = 0;
+ while (nr_evict-- > 0) {
+ page = zcache_evict_eph_pageframe();
+ if (page == NULL)
+ break;
+ zcache_free_page(page);
+ }
- if (nr >= 0) {
- if (!(gfp_mask & __GFP_FS))
- /* does this case really need to be skipped? */
- goto out;
- zbud_evict_pages(nr);
+ zcache_last_active_anon_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
+ zcache_last_inactive_anon_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
+ anon_pageframes_inuse = zcache_last_active_anon_pageframes +
+ zcache_last_inactive_anon_pageframes;
+ if (zcache_pers_pageframes > anon_pageframes_inuse)
+ nr_writeback = zcache_pers_pageframes - anon_pageframes_inuse;
+ else
+ nr_writeback = 0;
+ while (nr_writeback-- > 0) {
+#ifdef CONFIG_ZCACHE_WRITEBACK
+ int writeback_ret;
+ writeback_ret = zcache_frontswap_writeback();
+ if (writeback_ret == -ENOMEM)
+#endif
+ break;
}
- ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
-out:
+ in_progress = false;
+
+skip_evict:
+ /* resample: has changed, but maybe not all the way yet */
+ zcache_last_active_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
+ zcache_last_inactive_file_pageframes =
+ global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
+ ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
+ zcache_last_inactive_file_pageframes;
+ if (ret < 0)
+ ret = 0;
return ret;
}
@@ -1554,59 +1332,86 @@ static struct shrinker zcache_shrinker = {
* zcache shims between cleancache/frontswap ops and tmem
*/
-static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, struct page *page)
+/* FIXME rename these core routines to zcache_tmemput etc? */
+int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, void *page,
+ unsigned int size, bool raw, int ephemeral)
{
struct tmem_pool *pool;
+ struct tmem_handle th;
int ret = -1;
+ void *pampd = NULL;
BUG_ON(!irqs_disabled());
pool = zcache_get_pool_by_id(cli_id, pool_id);
if (unlikely(pool == NULL))
goto out;
- if (!zcache_freeze && zcache_do_preload(pool) == 0) {
- /* preload does preempt_disable on success */
- ret = tmem_put(pool, oidp, index, (char *)(page),
- PAGE_SIZE, 0, is_ephemeral(pool));
- if (ret < 0) {
- if (is_ephemeral(pool))
+ if (!zcache_freeze) {
+ ret = 0;
+ th.client_id = cli_id;
+ th.pool_id = pool_id;
+ th.oid = *oidp;
+ th.index = index;
+ pampd = zcache_pampd_create((char *)page, size, raw,
+ ephemeral, &th);
+ if (pampd == NULL) {
+ ret = -ENOMEM;
+ if (ephemeral)
zcache_failed_eph_puts++;
else
zcache_failed_pers_puts++;
+ } else {
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
+ ret = tmem_put(pool, oidp, index, 0, pampd);
+ if (ret < 0)
+ BUG();
}
+ zcache_put_pool(pool);
} else {
zcache_put_to_flush++;
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (atomic_read(&pool->obj_count) > 0)
/* the put fails whether the flush succeeds or not */
(void)tmem_flush_page(pool, oidp, index);
+ zcache_put_pool(pool);
}
-
- zcache_put_pool(pool);
out:
return ret;
}
-static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, struct page *page)
+int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, void *page,
+ size_t *sizep, bool raw, int get_and_free)
{
struct tmem_pool *pool;
int ret = -1;
- unsigned long flags;
- size_t size = PAGE_SIZE;
+ bool eph;
- local_irq_save(flags);
+ if (!raw) {
+ BUG_ON(irqs_disabled());
+ BUG_ON(in_softirq());
+ }
pool = zcache_get_pool_by_id(cli_id, pool_id);
+ eph = is_ephemeral(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_get(pool, oidp, index, (char *)(page),
- &size, 0, is_ephemeral(pool));
+ sizep, raw, get_and_free);
zcache_put_pool(pool);
}
- local_irq_restore(flags);
+ WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
+ "zcache_get fails on persistent pool, "
+ "bad things are very likely to happen soon\n");
+#ifdef RAMSTER_TESTING
+ if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
+ pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
+#endif
return ret;
}
-static int zcache_flush_page(int cli_id, int pool_id,
+int zcache_flush_page(int cli_id, int pool_id,
struct tmem_oid *oidp, uint32_t index)
{
struct tmem_pool *pool;
@@ -1616,6 +1421,8 @@ static int zcache_flush_page(int cli_id, int pool_id,
local_irq_save(flags);
zcache_flush_total++;
pool = zcache_get_pool_by_id(cli_id, pool_id);
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_page(pool, oidp, index);
@@ -1627,7 +1434,7 @@ static int zcache_flush_page(int cli_id, int pool_id,
return ret;
}
-static int zcache_flush_object(int cli_id, int pool_id,
+int zcache_flush_object(int cli_id, int pool_id,
struct tmem_oid *oidp)
{
struct tmem_pool *pool;
@@ -1637,6 +1444,8 @@ static int zcache_flush_object(int cli_id, int pool_id,
local_irq_save(flags);
zcache_flobj_total++;
pool = zcache_get_pool_by_id(cli_id, pool_id);
+ if (ramster_enabled)
+ ramster_do_preload_flnode(pool);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_object(pool, oidp);
@@ -1648,24 +1457,25 @@ static int zcache_flush_object(int cli_id, int pool_id,
return ret;
}
-static int zcache_destroy_pool(int cli_id, int pool_id)
+static int zcache_client_destroy_pool(int cli_id, int pool_id)
{
struct tmem_pool *pool = NULL;
- struct zcache_client *cli;
+ struct zcache_client *cli = NULL;
int ret = -1;
if (pool_id < 0)
goto out;
-
- cli = get_zcache_client(cli_id);
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
if (cli == NULL)
goto out;
-
atomic_inc(&cli->refcount);
- pool = idr_find(&cli->tmem_pools, pool_id);
+ pool = cli->tmem_pools[pool_id];
if (pool == NULL)
goto out;
- idr_remove(&cli->tmem_pools, pool_id);
+ cli->tmem_pools[pool_id] = NULL;
/* wait for pool activity on other cpus to quiesce */
while (atomic_read(&pool->refcount) != 0)
;
@@ -1674,56 +1484,119 @@ static int zcache_destroy_pool(int cli_id, int pool_id)
ret = tmem_destroy_pool(pool);
local_bh_enable();
kfree(pool);
- pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
- pool_id, cli_id);
+ if (cli_id == LOCAL_CLIENT)
+ pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
+ else
+ pr_info("%s: destroyed pool id=%d, client=%d\n",
+ namestr, pool_id, cli_id);
out:
return ret;
}
-static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
+int zcache_new_pool(uint16_t cli_id, uint32_t flags)
{
int poolid = -1;
struct tmem_pool *pool;
struct zcache_client *cli = NULL;
- int r;
- cli = get_zcache_client(cli_id);
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
if (cli == NULL)
goto out;
-
atomic_inc(&cli->refcount);
pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
- if (pool == NULL) {
- pr_info("zcache: pool creation failed: out of memory\n");
+ if (pool == NULL)
+ goto out;
+
+ for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
+ if (cli->tmem_pools[poolid] == NULL)
+ break;
+ if (poolid >= MAX_POOLS_PER_CLIENT) {
+ pr_info("%s: pool creation failed: max exceeded\n", namestr);
+ kfree(pool);
+ poolid = -1;
goto out;
}
+ atomic_set(&pool->refcount, 0);
+ pool->client = cli;
+ pool->pool_id = poolid;
+ tmem_new_pool(pool, flags);
+ cli->tmem_pools[poolid] = pool;
+ if (cli_id == LOCAL_CLIENT)
+ pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid);
+ else
+ pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid, cli_id);
+out:
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
+ return poolid;
+}
- do {
- r = idr_pre_get(&cli->tmem_pools, GFP_ATOMIC);
- if (r != 1) {
- kfree(pool);
- pr_info("zcache: pool creation failed: out of memory\n");
+static int zcache_local_new_pool(uint32_t flags)
+{
+ return zcache_new_pool(LOCAL_CLIENT, flags);
+}
+
+int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
+{
+ struct tmem_pool *pool;
+ struct zcache_client *cli;
+ uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
+ int ret = -1;
+
+ BUG_ON(!ramster_enabled);
+ if (cli_id == LOCAL_CLIENT)
+ goto out;
+ if (pool_id >= MAX_POOLS_PER_CLIENT)
+ goto out;
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+
+ cli = &zcache_clients[cli_id];
+ if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
+ pr_err("zcache_autocreate_pool: pool type disabled\n");
+ goto out;
+ }
+ if (!cli->allocated) {
+ if (zcache_new_client(cli_id)) {
+ pr_err("zcache_autocreate_pool: can't create client\n");
goto out;
}
- r = idr_get_new(&cli->tmem_pools, pool, &poolid);
- } while (r == -EAGAIN);
- if (r) {
- pr_info("zcache: pool creation failed: error %d\n", r);
- kfree(pool);
+ cli = &zcache_clients[cli_id];
+ }
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
+ if (pool != NULL) {
+ if (pool->persistent && eph) {
+ pr_err("zcache_autocreate_pool: type mismatch\n");
+ goto out;
+ }
+ ret = 0;
goto out;
}
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
+ if (pool == NULL)
+ goto out;
atomic_set(&pool->refcount, 0);
pool->client = cli;
- pool->pool_id = poolid;
+ pool->pool_id = pool_id;
tmem_new_pool(pool, flags);
- pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid, cli_id);
+ cli->tmem_pools[pool_id] = pool;
+ pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
+ namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ pool_id, cli_id);
+ ret = 0;
out:
if (cli != NULL)
atomic_dec(&cli->refcount);
- return poolid;
+ return ret;
}
/**********
@@ -1734,7 +1607,6 @@ out:
* to translate in-kernel semantics to zcache semantics.
*/
-#ifdef CONFIG_CLEANCACHE
static void zcache_cleancache_put_page(int pool_id,
struct cleancache_filekey key,
pgoff_t index, struct page *page)
@@ -1742,8 +1614,13 @@ static void zcache_cleancache_put_page(int pool_id,
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
+ if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
+ zcache_eph_nonactive_puts_ignored++;
+ return;
+ }
if (likely(ind == index))
- (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
+ (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
+ page, PAGE_SIZE, false, 1);
}
static int zcache_cleancache_get_page(int pool_id,
@@ -1752,10 +1629,16 @@ static int zcache_cleancache_get_page(int pool_id,
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
+ size_t size;
int ret = -1;
- if (likely(ind == index))
- ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
+ if (likely(ind == index)) {
+ ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
+ page, &size, false, 0);
+ BUG_ON(ret >= 0 && size != PAGE_SIZE);
+ if (ret == 0)
+ SetPageWasActive(page);
+ }
return ret;
}
@@ -1781,7 +1664,7 @@ static void zcache_cleancache_flush_inode(int pool_id,
static void zcache_cleancache_flush_fs(int pool_id)
{
if (pool_id >= 0)
- (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
+ (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
}
static int zcache_cleancache_init_fs(size_t pagesize)
@@ -1789,7 +1672,7 @@ static int zcache_cleancache_init_fs(size_t pagesize)
BUG_ON(sizeof(struct cleancache_filekey) !=
sizeof(struct tmem_oid));
BUG_ON(pagesize != PAGE_SIZE);
- return zcache_new_pool(LOCAL_CLIENT, 0);
+ return zcache_local_new_pool(0);
}
static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
@@ -1798,7 +1681,7 @@ static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
BUG_ON(sizeof(struct cleancache_filekey) !=
sizeof(struct tmem_oid));
BUG_ON(pagesize != PAGE_SIZE);
- return zcache_new_pool(LOCAL_CLIENT, 0);
+ return zcache_local_new_pool(0);
}
static struct cleancache_ops zcache_cleancache_ops = {
@@ -1818,17 +1701,15 @@ struct cleancache_ops zcache_cleancache_register_ops(void)
return old_ops;
}
-#endif
-#ifdef CONFIG_FRONTSWAP
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int zcache_frontswap_poolid = -1;
+static int zcache_frontswap_poolid __read_mostly = -1;
/*
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
* Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_load(), but has side-effects. Hence using 8.
+ * frontswap_get_page(), but has side-effects. Hence using 8.
*/
#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1842,8 +1723,18 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_store(unsigned type, pgoff_t offset,
- struct page *page)
+#ifdef CONFIG_ZCACHE_WRITEBACK
+static void unswiz(struct tmem_oid oid, u32 index,
+ unsigned *type, pgoff_t *offset)
+{
+ *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
+ *offset = (pgoff_t)((index << SWIZ_BITS) |
+ (oid.oid[0] & SWIZ_MASK));
+}
+#endif
+
+static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
@@ -1852,29 +1743,44 @@ static int zcache_frontswap_store(unsigned type, pgoff_t offset,
unsigned long flags;
BUG_ON(!PageLocked(page));
+ if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
+ zcache_pers_nonactive_puts_ignored++;
+ ret = -ERANGE;
+ goto out;
+ }
if (likely(ind64 == ind)) {
local_irq_save(flags);
ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind), page);
+ &oid, iswiz(ind),
+ page, PAGE_SIZE, false, 0);
local_irq_restore(flags);
}
+out:
return ret;
}
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_load(unsigned type, pgoff_t offset,
- struct page *page)
+static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
struct tmem_oid oid = oswiz(type, ind);
- int ret = -1;
+ size_t size;
+ int ret = -1, get_and_free;
+ if (frontswap_has_exclusive_gets)
+ get_and_free = 1;
+ else
+ get_and_free = -1;
BUG_ON(!PageLocked(page));
- if (likely(ind64 == ind))
+ if (likely(ind64 == ind)) {
ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind), page);
+ &oid, iswiz(ind),
+ page, &size, false, get_and_free);
+ BUG_ON(ret >= 0 && size != PAGE_SIZE);
+ }
return ret;
}
@@ -1908,12 +1814,12 @@ static void zcache_frontswap_init(unsigned ignored)
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
if (zcache_frontswap_poolid < 0)
zcache_frontswap_poolid =
- zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
+ zcache_local_new_pool(TMEM_POOL_PERSIST);
}
static struct frontswap_ops zcache_frontswap_ops = {
- .store = zcache_frontswap_store,
- .load = zcache_frontswap_load,
+ .store = zcache_frontswap_put_page,
+ .load = zcache_frontswap_get_page,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
@@ -1926,16 +1832,13 @@ struct frontswap_ops zcache_frontswap_register_ops(void)
return old_ops;
}
-#endif
/*
* zcache initialization
- * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
- * NOTHING HAPPENS!
+ * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
+ * OR NOTHING HAPPENS!
*/
-static int zcache_enabled;
-
static int __init enable_zcache(char *s)
{
zcache_enabled = 1;
@@ -1943,28 +1846,58 @@ static int __init enable_zcache(char *s)
}
__setup("zcache", enable_zcache);
-/* allow independent dynamic disabling of cleancache and frontswap */
+static int __init enable_ramster(char *s)
+{
+ zcache_enabled = 1;
+#ifdef CONFIG_RAMSTER
+ ramster_enabled = 1;
+#endif
+ return 1;
+}
+__setup("ramster", enable_ramster);
-static int use_cleancache = 1;
+/* allow independent dynamic disabling of cleancache and frontswap */
static int __init no_cleancache(char *s)
{
- use_cleancache = 0;
+ disable_cleancache = 1;
return 1;
}
__setup("nocleancache", no_cleancache);
-static int use_frontswap = 1;
-
static int __init no_frontswap(char *s)
{
- use_frontswap = 0;
+ disable_frontswap = 1;
return 1;
}
__setup("nofrontswap", no_frontswap);
+static int __init no_frontswap_exclusive_gets(char *s)
+{
+ frontswap_has_exclusive_gets = false;
+ return 1;
+}
+
+__setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
+
+static int __init no_frontswap_ignore_nonactive(char *s)
+{
+ disable_frontswap_ignore_nonactive = 1;
+ return 1;
+}
+
+__setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
+
+static int __init no_cleancache_ignore_nonactive(char *s)
+{
+ disable_cleancache_ignore_nonactive = 1;
+ return 1;
+}
+
+__setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
+
static int __init enable_zcache_compressor(char *s)
{
strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
@@ -2007,14 +1940,13 @@ static int __init zcache_init(void)
{
int ret = 0;
-#ifdef CONFIG_SYSFS
- ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
- if (ret) {
- pr_err("zcache: can't create sysfs\n");
- goto out;
+ if (ramster_enabled) {
+ namestr = "ramster";
+ ramster_register_pamops(&zcache_pamops);
}
-#endif /* CONFIG_SYSFS */
-
+#ifdef CONFIG_DEBUG_FS
+ zcache_debugfs_init();
+#endif
if (zcache_enabled) {
unsigned int cpu;
@@ -2022,12 +1954,13 @@ static int __init zcache_init(void)
tmem_register_pamops(&zcache_pamops);
ret = register_cpu_notifier(&zcache_cpu_notifier_block);
if (ret) {
- pr_err("zcache: can't register cpu notifier\n");
+ pr_err("%s: can't register cpu notifier\n", namestr);
goto out;
}
ret = zcache_comp_init();
if (ret) {
- pr_err("zcache: compressor initialization failed\n");
+ pr_err("%s: compressor initialization failed\n",
+ namestr);
goto out;
}
for_each_online_cpu(cpu) {
@@ -2042,36 +1975,45 @@ static int __init zcache_init(void)
sizeof(struct tmem_obj), 0, 0, NULL);
ret = zcache_new_client(LOCAL_CLIENT);
if (ret) {
- pr_err("zcache: can't create client\n");
+ pr_err("%s: can't create client\n", namestr);
goto out;
}
-
-#ifdef CONFIG_CLEANCACHE
- if (zcache_enabled && use_cleancache) {
+ zbud_init();
+ if (zcache_enabled && !disable_cleancache) {
struct cleancache_ops old_ops;
- zbud_init();
register_shrinker(&zcache_shrinker);
old_ops = zcache_cleancache_register_ops();
- pr_info("zcache: cleancache enabled using kernel "
- "transcendent memory and compression buddies\n");
+ pr_info("%s: cleancache enabled using kernel transcendent "
+ "memory and compression buddies\n", namestr);
+#ifdef ZCACHE_DEBUG
+ pr_info("%s: cleancache: ignorenonactive = %d\n",
+ namestr, !disable_cleancache_ignore_nonactive);
+#endif
if (old_ops.init_fs != NULL)
- pr_warning("zcache: cleancache_ops overridden");
+ pr_warn("%s: cleancache_ops overridden\n", namestr);
}
-#endif
-#ifdef CONFIG_FRONTSWAP
- if (zcache_enabled && use_frontswap) {
+ if (zcache_enabled && !disable_frontswap) {
struct frontswap_ops old_ops;
old_ops = zcache_frontswap_register_ops();
- pr_info("zcache: frontswap enabled using kernel "
- "transcendent memory and zsmalloc\n");
+ if (frontswap_has_exclusive_gets)
+ frontswap_tmem_exclusive_gets(true);
+ pr_info("%s: frontswap enabled using kernel transcendent "
+ "memory and compression buddies\n", namestr);
+#ifdef ZCACHE_DEBUG
+ pr_info("%s: frontswap: excl gets = %d active only = %d\n",
+ namestr, frontswap_has_exclusive_gets,
+ !disable_frontswap_ignore_nonactive);
+#endif
if (old_ops.init != NULL)
- pr_warning("zcache: frontswap_ops overridden");
+ pr_warn("%s: frontswap_ops overridden\n", namestr);
}
-#endif
+ if (ramster_enabled)
+ ramster_init(!disable_cleancache, !disable_frontswap,
+ frontswap_has_exclusive_gets);
out:
return ret;
}
-module_init(zcache_init)
+late_initcall(zcache_init);
diff --git a/drivers/staging/ramster/zcache.h b/drivers/staging/zcache/zcache.h
index 81722b33b087..81722b33b087 100644
--- a/drivers/staging/ramster/zcache.h
+++ b/drivers/staging/zcache/zcache.h
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index be5abe8e7943..983314c41349 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -14,7 +14,7 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: http://compcache.googlecode.com/
+ Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
index 5f75d2987564..765d790ae831 100644
--- a/drivers/staging/zram/zram.txt
+++ b/drivers/staging/zram/zram.txt
@@ -23,17 +23,17 @@ Following shows a typical sequence of steps for using zram.
This creates 4 devices: /dev/zram{0,1,2,3}
(num_devices parameter is optional. Default: 1)
-2) Set Disksize (Optional):
- Set disk size by writing the value to sysfs node 'disksize'
- (in bytes). If disksize is not given, default value of 25%
- of RAM is used.
-
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- NOTE: disksize cannot be changed if the disk contains any
- data. So, for such a disk, you need to issue 'reset' (see below)
- before you can change its disksize.
+2) Set Disksize
+ Set disk size by writing the value to sysfs node 'disksize'.
+ The value can be either in bytes or you can use mem suffixes.
+ Examples:
+ # Initialize /dev/zram0 with 50MB disksize
+ echo $((50*1024*1024)) > /sys/block/zram0/disksize
+
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/disksize
+ echo 512M > /sys/block/zram0/disksize
+ echo 1G > /sys/block/zram0/disksize
3) Activate:
mkswap /dev/zram0
@@ -65,8 +65,9 @@ Following shows a typical sequence of steps for using zram.
echo 1 > /sys/block/zram0/reset
echo 1 > /sys/block/zram1/reset
- (This frees all the memory allocated for the given device).
-
+ This frees all the memory allocated for the given device and
+ resets the disksize to zero. You must set the disksize again
+ before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index fb4a7c94aed3..5918fd7d7e36 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -40,17 +40,7 @@ static int zram_major;
struct zram *zram_devices;
/* Module params (documentation at end) */
-static unsigned int num_devices;
-
-static void zram_stat_inc(u32 *v)
-{
- *v = *v + 1;
-}
-
-static void zram_stat_dec(u32 *v)
-{
- *v = *v - 1;
-}
+static unsigned int num_devices = 1;
static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
{
@@ -71,22 +61,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v)
zram_stat64_add(zram, v, 1);
}
-static int zram_test_flag(struct zram *zram, u32 index,
+static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return zram->table[index].flags & BIT(flag);
+ return meta->table[index].flags & BIT(flag);
}
-static void zram_set_flag(struct zram *zram, u32 index,
+static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags |= BIT(flag);
+ meta->table[index].flags |= BIT(flag);
}
-static void zram_clear_flag(struct zram *zram, u32 index,
+static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags &= ~BIT(flag);
+ meta->table[index].flags &= ~BIT(flag);
}
static int page_zero_filled(void *ptr)
@@ -104,66 +94,38 @@ static int page_zero_filled(void *ptr)
return 1;
}
-static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
-{
- if (!zram->disksize) {
- pr_info(
- "disk size not provided. You can use disksize_kb module "
- "param to specify size.\nUsing default: (%u%% of RAM).\n",
- default_disksize_perc_ram
- );
- zram->disksize = default_disksize_perc_ram *
- (totalram_bytes / 100);
- }
-
- if (zram->disksize > 2 * (totalram_bytes)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %zu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, zram->disksize
- );
- }
-
- zram->disksize &= PAGE_MASK;
-}
-
static void zram_free_page(struct zram *zram, size_t index)
{
- unsigned long handle = zram->table[index].handle;
- u16 size = zram->table[index].size;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+ u16 size = meta->table[index].size;
if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
*/
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- zram_clear_flag(zram, index, ZRAM_ZERO);
- zram_stat_dec(&zram->stats.pages_zero);
+ if (zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_clear_flag(meta, index, ZRAM_ZERO);
+ zram->stats.pages_zero--;
}
return;
}
if (unlikely(size > max_zpage_size))
- zram_stat_dec(&zram->stats.bad_compress);
+ zram->stats.bad_compress--;
- zs_free(zram->mem_pool, handle);
+ zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2)
- zram_stat_dec(&zram->stats.good_compress);
+ zram->stats.good_compress--;
zram_stat64_sub(zram, &zram->stats.compr_size,
- zram->table[index].size);
- zram_stat_dec(&zram->stats.pages_stored);
+ meta->table[index].size);
+ zram->stats.pages_stored--;
- zram->table[index].handle = 0;
- zram->table[index].size = 0;
+ meta->table[index].handle = 0;
+ meta->table[index].size = 0;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -188,20 +150,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
int ret = LZO_E_OK;
size_t clen = PAGE_SIZE;
unsigned char *cmem;
- unsigned long handle = zram->table[index].handle;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
- if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
memset(mem, 0, PAGE_SIZE);
return 0;
}
- cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (zram->table[index].size == PAGE_SIZE)
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (meta->table[index].size == PAGE_SIZE)
memcpy(mem, cmem, PAGE_SIZE);
else
- ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
+ ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_unmap_object(meta->mem_pool, handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -219,20 +182,21 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
int ret;
struct page *page;
unsigned char *user_mem, *uncmem = NULL;
-
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- if (unlikely(!zram->table[index].handle) ||
- zram_test_flag(zram, index, ZRAM_ZERO)) {
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
handle_zero_page(bvec);
return 0;
}
- user_mem = kmap_atomic(page);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- else
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+
+ user_mem = kmap_atomic(page);
+ if (!is_partial_io(bvec))
uncmem = user_mem;
if (!uncmem) {
@@ -265,65 +229,68 @@ out_cleanup:
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
+ int ret = 0;
size_t clen;
unsigned long handle;
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- src = zram->compress_buffer;
+ src = meta->compress_buffer;
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
*/
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
ret = -ENOMEM;
goto out;
}
ret = zram_decompress_page(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ if (ret)
goto out;
- }
}
/*
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
- if (zram->table[index].handle ||
- zram_test_flag(zram, index, ZRAM_ZERO))
+ if (meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO))
zram_free_page(zram, index);
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
+ zram->stats.pages_zero++;
+ zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ meta->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
@@ -331,36 +298,43 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (unlikely(clen > max_zpage_size)) {
- zram_stat_inc(&zram->stats.bad_compress);
- src = uncmem;
+ zram->stats.bad_compress++;
clen = PAGE_SIZE;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
- handle = zs_malloc(zram->mem_pool, clen);
+ handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
goto out;
}
- cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ src = kmap_atomic(page);
memcpy(cmem, src, clen);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ kunmap_atomic(src);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_unmap_object(meta->mem_pool, handle);
- zram->table[index].handle = handle;
- zram->table[index].size = clen;
+ meta->table[index].handle = handle;
+ meta->table[index].size = clen;
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
+ zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
-
- return 0;
+ zram->stats.good_compress++;
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;
@@ -470,16 +444,13 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram->init_done) && zram_init_device(zram))
- goto error;
-
down_read(&zram->init_lock);
if (unlikely(!zram->init_done))
- goto error_unlock;
+ goto error;
if (!valid_io_request(zram, bio)) {
zram_stat64_inc(zram, &zram->stats.invalid_io);
- goto error_unlock;
+ goto error;
}
__zram_make_request(zram, bio, bio_data_dir(bio));
@@ -487,44 +458,38 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
return;
-error_unlock:
- up_read(&zram->init_lock);
error:
+ up_read(&zram->init_lock);
bio_io_error(bio);
}
-void __zram_reset_device(struct zram *zram)
+static void __zram_reset_device(struct zram *zram)
{
size_t index;
+ struct zram_meta *meta;
- zram->init_done = 0;
-
- /* Free various per-device buffers */
- kfree(zram->compress_workmem);
- free_pages((unsigned long)zram->compress_buffer, 1);
+ if (!zram->init_done)
+ return;
- zram->compress_workmem = NULL;
- zram->compress_buffer = NULL;
+ meta = zram->meta;
+ zram->init_done = 0;
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- unsigned long handle = zram->table[index].handle;
+ unsigned long handle = meta->table[index].handle;
if (!handle)
continue;
- zs_free(zram->mem_pool, handle);
+ zs_free(meta->mem_pool, handle);
}
- vfree(zram->table);
- zram->table = NULL;
-
- zs_destroy_pool(zram->mem_pool);
- zram->mem_pool = NULL;
-
+ zram_meta_free(zram->meta);
+ zram->meta = NULL;
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
+ set_capacity(zram->disk, 0);
}
void zram_reset_device(struct zram *zram)
@@ -534,69 +499,84 @@ void zram_reset_device(struct zram *zram)
up_write(&zram->init_lock);
}
-int zram_init_device(struct zram *zram)
+void zram_meta_free(struct zram_meta *meta)
{
- int ret;
- size_t num_pages;
-
- down_write(&zram->init_lock);
-
- if (zram->init_done) {
- up_write(&zram->init_lock);
- return 0;
- }
+ zs_destroy_pool(meta->mem_pool);
+ kfree(meta->compress_workmem);
+ free_pages((unsigned long)meta->compress_buffer, 1);
+ vfree(meta->table);
+ kfree(meta);
+}
- zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
+struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
- zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!zram->compress_workmem) {
- pr_err("Error allocating compressor working memory!\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
+ meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!meta->compress_workmem)
+ goto free_meta;
- zram->compress_buffer =
+ meta->compress_buffer =
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!zram->compress_buffer) {
+ if (!meta->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
- ret = -ENOMEM;
- goto fail_no_table;
+ goto free_workmem;
}
- num_pages = zram->disksize >> PAGE_SHIFT;
- zram->table = vzalloc(num_pages * sizeof(*zram->table));
- if (!zram->table) {
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
pr_err("Error allocating zram address table\n");
- ret = -ENOMEM;
- goto fail_no_table;
+ goto free_buffer;
}
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+ return meta;
- zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
- if (!zram->mem_pool) {
- pr_err("Error creating memory pool\n");
- ret = -ENOMEM;
- goto fail;
+free_table:
+ vfree(meta->table);
+free_buffer:
+ free_pages((unsigned long)meta->compress_buffer, 1);
+free_workmem:
+ kfree(meta->compress_workmem);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+void zram_init_device(struct zram *zram, struct zram_meta *meta)
+{
+ if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %lu kB\n"
+ "\tSize you selected: %llu kB\n"
+ "Continuing anyway ...\n",
+ (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
+ );
}
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->meta = meta;
zram->init_done = 1;
- up_write(&zram->init_lock);
pr_debug("Initialization done!\n");
- return 0;
-
-fail_no_table:
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
-fail:
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
- pr_err("Initialization failed: err=%d\n", ret);
- return ret;
}
static void zram_slot_free_notify(struct block_device *bdev,
@@ -715,13 +695,7 @@ static int __init zram_init(void)
goto out;
}
- if (!num_devices) {
- pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
- }
-
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
@@ -734,6 +708,8 @@ static int __init zram_init(void)
goto free_devices;
}
+ pr_info("Created %u device(s) ...\n", num_devices);
+
return 0;
free_devices:
@@ -755,8 +731,7 @@ static void __exit zram_exit(void)
zram = &zram_devices[i];
destroy_device(zram);
- if (zram->init_done)
- zram_reset_device(zram);
+ zram_reset_device(zram);
}
unregister_blkdev(zram_major, "zram");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index df2eec407db6..2d1a3f1e8edb 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -28,9 +28,6 @@ static const unsigned max_num_devices = 32;
/*-- Configurable parameters */
-/* Default zram disk size: 25% of total RAM */
-static const unsigned default_disksize_perc_ram = 25;
-
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
@@ -86,11 +83,15 @@ struct zram_stats {
u32 bad_compress; /* % of pages with compression ratio>=75% */
};
-struct zram {
- struct zs_pool *mem_pool;
+struct zram_meta {
void *compress_workmem;
void *compress_buffer;
struct table *table;
+ struct zs_pool *mem_pool;
+};
+
+struct zram {
+ struct zram_meta *meta;
spinlock_t stat64_lock; /* protect 64-bit stats */
struct rw_semaphore lock; /* protect compression buffers and table
* against concurrent read and writes */
@@ -114,7 +115,9 @@ unsigned int zram_get_num_devices(void);
extern struct attribute_group zram_disk_attr_group;
#endif
-extern int zram_init_device(struct zram *zram);
-extern void __zram_reset_device(struct zram *zram);
+extern void zram_reset_device(struct zram *zram);
+extern struct zram_meta *zram_meta_alloc(u64 disksize);
+extern void zram_meta_free(struct zram_meta *meta);
+extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index de1eacf65dbd..e6a929d452f7 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -56,21 +56,26 @@ static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
u64 disksize;
+ struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev);
disksize = memparse(buf, NULL);
if (!disksize)
return -EINVAL;
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
+ zram_meta_free(meta);
pr_info("Cannot change disksize for initialized device\n");
return -EBUSY;
}
- zram->disksize = PAGE_ALIGN(disksize);
+ zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_init_device(zram, meta);
up_write(&zram->init_lock);
return len;
@@ -110,11 +115,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev)
fsync_bdev(bdev);
- down_write(&zram->init_lock);
- if (zram->init_done)
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-
+ zram_reset_device(zram);
return len;
}
@@ -185,9 +186,10 @@ static ssize_t mem_used_total_show(struct device *dev,
{
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
if (zram->init_done)
- val = zs_get_total_size_bytes(zram->mem_pool);
+ val = zs_get_total_size_bytes(meta->mem_pool);
return sprintf(buf, "%llu\n", val);
}
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 09a9d35d436f..e78d262c5249 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -141,7 +141,7 @@
* ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
* (reason above)
*/
-#define ZS_SIZE_CLASS_DELTA 16
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
ZS_SIZE_CLASS_DELTA + 1)
@@ -207,7 +207,6 @@ struct zs_pool {
struct size_class size_class[ZS_SIZE_CLASSES];
gfp_t flags; /* allocation flags used when growing pool */
- const char *name;
};
/*
@@ -222,11 +221,9 @@ struct zs_pool {
/*
* By default, zsmalloc uses a copy-based object mapping method to access
* allocations that span two pages. However, if a particular architecture
- * 1) Implements local_flush_tlb_kernel_range() and 2) Performs VM mapping
- * faster than copying, then it should be added here so that
- * USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use page table
- * mapping rather than copying
- * for object mapping.
+ * performs VM mapping faster than copying, then it should be added here
+ * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
+ * page table mapping rather than copying for object mapping.
*/
#if defined(CONFIG_ARM)
#define USE_PGTABLE_MAPPING
@@ -475,7 +472,7 @@ static void reset_page(struct page *page)
set_page_private(page, 0);
page->mapping = NULL;
page->freelist = NULL;
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
}
static void free_zspage(struct page *first_page)
@@ -663,7 +660,7 @@ static inline void __zs_unmap_object(struct mapping_area *area,
flush_cache_vunmap(addr, end);
unmap_kernel_range_noflush(addr, PAGE_SIZE * 2);
- local_flush_tlb_kernel_range(addr, end);
+ flush_tlb_kernel_range(addr, end);
}
#else /* USE_PGTABLE_MAPPING */
@@ -798,14 +795,21 @@ fail:
return notifier_to_errno(ret);
}
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+/**
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
+ *
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
+ *
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
+ */
+struct zs_pool *zs_create_pool(gfp_t flags)
{
int i, ovhd_size;
struct zs_pool *pool;
- if (!name)
- return NULL;
-
ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
pool = kzalloc(ovhd_size, GFP_KERNEL);
if (!pool)
@@ -828,7 +832,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
}
pool->flags = flags;
- pool->name = name;
return pool;
}
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index de2e8bfbcc06..46dbd0558d86 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -28,7 +28,7 @@ enum zs_mapmode {
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 035c2c762537..339f97f7085b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
- (cmd->stat_sn < exp_statsn)) {
+ iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
iscsit_add_cmd_to_immediate_queue(cmd, conn,
@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn,
unsigned char *buf)
{
- int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
- int dump_immediate_data = 0, send_check_condition = 0, payload_length;
- struct iscsi_cmd *cmd = NULL;
+ int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
+ struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
int sam_task_attr;
@@ -956,38 +955,26 @@ done:
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
- /*
- * The CDB is going to an se_device_t.
- */
- ret = transport_lookup_cmd_lun(&cmd->se_cmd,
- scsilun_to_int(&hdr->lun));
- if (ret < 0) {
- if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
- pr_debug("Responding to non-acl'ed,"
- " non-existent or non-exported iSCSI LUN:"
- " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
+ if (cmd->sense_reason)
+ goto attach_cmd;
+
+ cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
}
- send_check_condition = 1;
+
goto attach_cmd;
}
- transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
- if (transport_ret == -ENOMEM) {
+ if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
- } else if (transport_ret < 0) {
- /*
- * Unsupported SAM Opcode. CHECK_CONDITION will be sent
- * in iscsit_execute_cmd() during the CmdSN OOO Execution
- * Mechinism.
- */
- send_check_condition = 1;
- } else {
- if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
}
attach_cmd:
@@ -1000,11 +987,12 @@ attach_cmd:
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
- ret = iscsit_allocate_iovecs(cmd);
- if (ret < 0)
+ if (iscsit_allocate_iovecs(cmd) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd);
+ }
+
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
@@ -1031,10 +1019,7 @@ attach_cmd:
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
- if (send_check_condition)
- return 0;
-
- if (cmd->unsolicited_data) {
+ if (!cmd->sense_reason && cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock);
@@ -1050,19 +1035,17 @@ attach_cmd:
* thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below.
*/
- if (send_check_condition) {
+ if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
goto after_immediate_data;
}
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
- ret = transport_generic_new_cmd(&cmd->se_cmd);
- if (ret < 0) {
+ cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
+ if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
goto after_immediate_data;
}
@@ -1079,7 +1062,7 @@ after_immediate_data:
* Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes.
*/
- if (dump_immediate_data) {
+ if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1;
} else if (cmd->unsolicited_data) {
@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (se_tmr->response)
goto attach;
- }
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}
@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0f03b7919d7c..78d75c8567d0 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_nacl_show_tag(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
+}
+
+static ssize_t lio_target_nacl_store_tag(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ int ret;
+
+ ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr,
+ &lio_target_nacl_tag.attr,
NULL,
};
@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
*/
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 21048dbf7d13..7a333d28d9a2 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
-
+ sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 21f29d91a8cb..0b52a2371305 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
- * any SCSI CDB exceptions that may have occurred, also
- * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ * any SCSI CDB exceptions that may have occurred.
*/
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception
*/
return transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 17d8c20094fd..ba6091bf93fc 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
- (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+ iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index f8dbec05d5e5..fdb632f0ab85 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list);
- if (!initiatorname_param)
- return -1;
-
sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list);
- if (!sessiontype_param)
+ if (!initiatorname_param || !sessiontype_param) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
+ }
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
- spin_lock(&sess_idr_lock);
+ spin_lock_bh(&sess_idr_lock);
ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
- spin_unlock(&sess_idr_lock);
+ spin_unlock_bh(&sess_idr_lock);
if (ret < 0) {
pr_err("idr_get_new() for sess_idr failed\n");
@@ -1118,10 +1118,8 @@ new_sess_out:
idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock);
}
- if (conn->sess->sess_ops)
- kfree(conn->sess->sess_ops);
- if (conn->sess)
- kfree(conn->sess);
+ kfree(conn->sess->sess_ops);
+ kfree(conn->sess);
old_sess_out:
iscsi_stop_login_thread_timer(np);
/*
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index e9053a04f24c..9d902aefe01a 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->req_buf,
payload_length,
conn);
- if (ret < 0)
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf,
&login->rsp_length,
conn->param_list);
- if (ret < 0)
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (!login->auth_complete &&
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 1bf7432bfcbc..d89164287d00 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
}
INIT_LIST_HEAD(&param->p_list);
- param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n");
goto out;
}
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n");
goto out;
}
- memcpy(param->name, name, strlen(name));
- param->name[strlen(name)] = '\0';
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
param->phase = phase;
param->scope = scope;
param->sender = sender;
@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list);
kfree(param->name);
- param->name = NULL;
kfree(param->value);
- param->value = NULL;
kfree(param);
- param = NULL;
}
iscsi_release_extra_responses(param_list);
@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{
kfree(param->value);
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
return -ENOMEM;
}
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
-
pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value);
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 4a99820d063b..9d4417aae921 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
- return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
- be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
+ return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
+ iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 9d881a000e42..81289520f96b 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL;
}
- list_for_each_entry(ts, &inactive_ts_list, ts_list)
- break;
+ ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del(&ts->ts_list);
iscsit_global->inactive_ts--;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 69e0cfd98870..7ce350578c82 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
- list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->immed_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL;
}
- list_for_each_entry(qr, &conn->response_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->response_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7b54893db665..dd7a84ee78e1 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
index 132da544eafc..1614bc710d4e 100644
--- a/drivers/target/sbp/Kconfig
+++ b/drivers/target/sbp/Kconfig
@@ -1,6 +1,6 @@
config SBP_TARGET
tristate "FireWire SBP-2 fabric module"
- depends on FIREWIRE && EXPERIMENTAL
+ depends on FIREWIRE
help
Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 0d6d7c1f025e..6917a9e938e7 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
- __be32 state;
+ int state;
switch (tcode) {
case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock);
- state = cpu_to_be32(agent->state);
+ state = agent->state;
spin_unlock_bh(&agent->lock);
- memcpy(data, &state, sizeof(state));
+
+ *(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE;
@@ -1718,7 +1719,7 @@ static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
if (!nacl) {
- pr_err("Unable to alocate struct sbp_nacl\n");
+ pr_err("Unable to allocate struct sbp_nacl\n");
return NULL;
}
@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt);
- kfree(tpg);
- return ERR_PTR(ret);
+ goto out_free_tpg;
}
ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, (void *)tpg,
TRANSPORT_TPG_TYPE_NORMAL);
- if (ret < 0) {
- sbp_management_agent_unregister(tport->mgt_agt);
- kfree(tpg);
- return ERR_PTR(ret);
- }
+ if (ret < 0)
+ goto out_unreg_mgt_agt;
return &tpg->se_tpg;
+
+out_unreg_mgt_agt:
+ sbp_management_agent_unregister(tport->mgt_agt);
+out_free_tpg:
+ tport->tpg = NULL;
+ kfree(tpg);
+ return ERR_PTR(ret);
}
static void sbp_drop_tpg(struct se_portal_group *se_tpg)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 9a5f9a7aecd2..7d4ec02e29a9 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
- * Copyright (c) 2009-2010 Rising Tide Systems
- * Copyright (c) 2009-2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +40,7 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
-static int core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
@@ -59,15 +58,17 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
-int target_emulate_report_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
+
/*
* Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type..
@@ -81,13 +82,14 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal");
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* Check if the Target port group and Target port descriptor list
@@ -160,7 +162,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
@@ -200,32 +202,33 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
*
* See spc4r17 section 6.35
*/
-int target_emulate_set_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
+ sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0, rc;
+ int alua_access_state, primary = 0;
u16 tg_pt_id, rtpi;
- if (!l_port) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!l_port)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -234,8 +237,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -243,24 +245,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
- rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!rc) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
+ bool found = false;
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
@@ -268,7 +268,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* access state.
*/
rc = core_alua_check_transition(alua_access_state, &primary);
- if (rc != 0) {
+ if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
@@ -279,11 +279,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
goto out;
}
- rc = -1;
+
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
@@ -303,9 +301,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
@@ -315,27 +313,20 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
- rc = core_alua_do_port_transition(tg_pt_gp,
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
- alua_access_state, 1);
+ alua_access_state, 1))
+ found = true;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
- /*
- * If not matching target port group ID can be located
- * throw an exception with ASCQ: INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -354,25 +345,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+
spin_unlock(&dev->se_port_lock);
- rc = core_alua_set_tg_pt_secondary_state(
- tg_pt_gp_mem, port, 1, 1);
+ if (!core_alua_set_tg_pt_secondary_state(
+ tg_pt_gp_mem, port, 1, 1))
+ found = true;
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
- /*
- * If not matching relative target port identifier can
- * be located, throw an exception with ASCQ:
- * INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ }
+
+ if (!found) {
+ rc = TCM_INVALID_PARAMETER_LIST;
+ goto out;
}
ptr += 4;
@@ -523,40 +511,27 @@ static inline int core_alua_state_transition(
}
/*
- * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
- * in transport_cmd_sequencer(). This function is assigned to
- * struct t10_alua *->state_check() in core_setup_alua()
- */
-static int core_alua_state_check_nop(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
-{
- return 0;
-}
-
-/*
- * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
- * This function is assigned to struct t10_alua *->state_check() in
- * core_setup_alua()
- *
- * Also, this function can return three different return codes to
- * signal transport_generic_cmd_sequencer()
- *
* return 1: Is used to signal LUN not accecsable, and check condition/not ready
* return 0: Used to signal success
* reutrn -1: Used to signal failure, and invalid cdb field
*/
-static int core_alua_state_check(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+sense_reason_t
+target_alua_state_check(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
+ u8 alua_ascq;
+ int ret;
+
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
if (!port)
return 0;
@@ -565,11 +540,11 @@ static int core_alua_state_check(
* access state: OFFLINE
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- return 1;
+ alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ ret = 1;
+ goto out;
}
/*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -594,14 +569,18 @@ static int core_alua_state_check(
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
- return core_alua_state_nonoptimized(cmd, cdb,
- nonop_delay_msecs, alua_ascq);
+ ret = core_alua_state_nonoptimized(cmd, cdb,
+ nonop_delay_msecs, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_STANDBY:
- return core_alua_state_standby(cmd, cdb, alua_ascq);
+ ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+ ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_TRANSITION:
- return core_alua_state_transition(cmd, cdb, alua_ascq);
+ ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+ break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -610,7 +589,24 @@ static int core_alua_state_check(
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+out:
+ if (ret > 0) {
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ cmd->scsi_asc = 0x04;
+ cmd->scsi_ascq = alua_ascq;
+ return TCM_CHECK_CONDITION_NOT_READY;
}
return 0;
@@ -619,7 +615,8 @@ static int core_alua_state_check(
/*
* Check implict and explict ALUA state change request.
*/
-static int core_alua_check_transition(int state, int *primary)
+static sense_reason_t
+core_alua_check_transition(int state, int *primary)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
@@ -641,7 +638,7 @@ static int core_alua_check_transition(int state, int *primary)
break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
return 0;
@@ -758,8 +755,7 @@ static int core_alua_update_tpg_primary_metadata(
int primary_state,
unsigned char *md_buf)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- struct t10_wwn *wwn = &su_dev->t10_wwn;
+ struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
int len;
@@ -899,7 +895,6 @@ int core_alua_do_port_transition(
{
struct se_device *dev;
struct se_port *port;
- struct se_subsystem_dev *su_dev;
struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
@@ -949,14 +944,13 @@ int core_alua_do_port_transition(
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
@@ -981,7 +975,7 @@ int core_alua_do_port_transition(
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -989,11 +983,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -1268,14 +1262,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return;
@@ -1358,10 +1347,8 @@ void __core_alua_drop_lu_gp_mem(
spin_unlock(&lu_gp->lu_gp_lock);
}
-struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *su_dev,
- const char *name,
- int def_group)
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
+ const char *name, int def_group)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1375,7 +1362,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+ tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
@@ -1392,14 +1379,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
if (def_group) {
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- su_dev->t10_alua.alua_tg_pt_gps_count++;
+ dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1409,9 +1396,10 @@ int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp;
+
/*
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
@@ -1421,19 +1409,19 @@ int core_alua_set_tg_pt_gp_id(
return -EINVAL;
}
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!tg_pt_gp_id)
@@ -1441,7 +1429,7 @@ again:
pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -EINVAL;
}
}
@@ -1449,9 +1437,9 @@ again:
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- su_dev->t10_alua.alua_tg_pt_gps_count++;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1480,8 +1468,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
@@ -1490,10 +1479,11 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- su_dev->t10_alua.alua_tg_pt_gps_counter--;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1502,6 +1492,7 @@ void core_alua_free_tg_pt_gp(
*/
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax();
+
/*
* Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port.
@@ -1525,9 +1516,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
+ if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1541,14 +1532,9 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return;
@@ -1574,25 +1560,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
}
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
- struct se_subsystem_dev *su_dev,
- const char *name)
+ struct se_device *dev, const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1600,11 +1585,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1640,16 +1625,11 @@ static void __core_alua_drop_tg_pt_gp_mem(
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return len;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return len;
@@ -1683,7 +1663,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
{
struct se_portal_group *tpg;
struct se_lun *lun;
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct se_device *dev = port->sep_lun->lun_se_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
@@ -1692,13 +1672,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
- tpg->se_tpg_tfo->tpg_get_tag(tpg),
- config_item_name(&lun->lun_group.cg_item));
- return -EINVAL;
- }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return 0;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
@@ -1716,18 +1692,11 @@ ssize_t core_alua_store_tg_pt_gp_info(
* struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below.
*/
- tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
strstrip(buf));
if (!tg_pt_gp_new)
return -ENODEV;
}
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem) {
- if (tg_pt_gp_new)
- core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
- return -EINVAL;
- }
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
@@ -1750,7 +1719,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -2054,32 +2023,12 @@ ssize_t core_alua_store_secondary_write_metadata(
return count;
}
-int core_setup_alua(struct se_device *dev, int force_pt)
+int core_setup_alua(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
- struct t10_alua_lu_gp_member *lu_gp_mem;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but emulate SCSI logic themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
- alua->alua_type = SPC_ALUA_PASSTHROUGH;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", dev->transport->name);
- return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated ALUA.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- pr_debug("%s: Enabling ALUA Emulation for SPC-3"
- " device\n", dev->transport->name);
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
@@ -2088,8 +2037,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
if (IS_ERR(lu_gp_mem))
return PTR_ERR(lu_gp_mem);
- alua->alua_type = SPC3_ALUA_EMULATED;
- alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp);
@@ -2098,11 +2045,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
dev->transport->name);
- } else {
- alua->alua_type = SPC2_ALUA_DISABLED;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Disabling ALUA Emulation for SPC-2"
- " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index f920c170d47b..e539c3e7f4ad 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-extern int target_emulate_report_target_port_groups(struct se_cmd *);
-extern int target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *, const char *, int);
+ struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
-extern int core_setup_alua(struct se_device *, int);
+extern int core_setup_alua(struct se_device *);
+extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c123327499a3..4efb61b8d001 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,8 +3,7 @@
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * Copyright (c) 2008-2011 Rising Tide Systems
- * Copyright (c) 2008-2011 Linux-iSCSI.org
+ * (c) Copyright 2008-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -565,21 +564,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
- ssize_t rb; \
- \
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
- rb = snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)dev->se_sub_dev->se_dev_attrib._name); \
- spin_unlock(&se_dev->se_dev_lock); \
- \
- return rb; \
+ return snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)da->da_dev->dev_attrib._name); \
}
#define DEF_DEV_ATTRIB_STORE(_name) \
@@ -588,26 +574,16 @@ static ssize_t target_core_dev_store_attr_##_name( \
const char *page, \
size_t count) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
unsigned long val; \
int ret; \
\
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
- spin_unlock(&se_dev->se_dev_lock); \
pr_err("strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
- ret = se_dev_set_##_name(dev, (u32)val); \
- spin_unlock(&se_dev->se_dev_lock); \
+ ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
@@ -699,6 +675,9 @@ SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(unmap_granularity_alignment);
SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(max_write_same_len);
+SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
+
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
@@ -724,6 +703,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
&target_core_dev_attrib_unmap_granularity_alignment.attr,
+ &target_core_dev_attrib_max_write_same_len.attr,
NULL,
};
@@ -764,13 +744,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
-
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&t10_wwn->unit_serial[0]);
}
@@ -780,8 +753,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
+ struct se_device *dev = t10_wwn->t10_dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
/*
@@ -794,7 +766,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
- if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+ if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
@@ -811,15 +783,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
- dev = su_dev->se_dev_ptr;
- if (dev) {
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- pr_err("Unable to set VPD Unit Serial while"
- " active %d $FABRIC_MOD exports exist\n",
- atomic_read(&dev->dev_export_obj.obj_access_count));
- return -EINVAL;
- }
+ if (dev->export_count) {
+ pr_err("Unable to set VPD Unit Serial while"
+ " active %d $FABRIC_MOD exports exist\n",
+ dev->export_count);
+ return -EINVAL;
}
+
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
@@ -828,12 +798,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
*/
memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
- snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+ snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
- su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+ dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
- " %s\n", su_dev->t10_wwn.unit_serial);
+ " %s\n", dev->t10_wwn.unit_serial);
return count;
}
@@ -847,16 +817,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE];
ssize_t len = 0;
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +858,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
struct t10_wwn *t10_wwn, \
char *page) \
{ \
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
- struct se_device *dev; \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
- dev = se_dev->se_dev_ptr; \
- if (!dev) \
- return -ENODEV; \
- \
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
@@ -1003,7 +961,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
/* Start functions for struct config_item_type target_core_dev_pr_cit */
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
#define SE_DEV_PR_ATTR(_name, _mode) \
static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
@@ -1015,13 +973,8 @@ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_pr_show_attr_##_name);
-/*
- * res_holder
- */
-static ssize_t target_core_dev_pr_show_spc3_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
@@ -1030,134 +983,82 @@ static ssize_t target_core_dev_pr_show_spc3_res(
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
- *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
- }
+ if (!pr_reg)
+ return sprintf(page, "No SPC-3 Reservation holder\n");
+
se_nacl = pr_reg->pr_reg_nacl;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+ return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
}
-static ssize_t target_core_dev_pr_show_spc2_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
+ ssize_t len;
- spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!se_nacl) {
- *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
+ if (se_nacl) {
+ len = sprintf(page,
+ "SPC-2 Reservation: %s Initiator: %s\n",
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->initiatorname);
+ } else {
+ len = sprintf(page, "No SPC-2 Reservation holder\n");
}
- *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- se_nacl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
+ return len;
}
-static ssize_t target_core_dev_pr_show_attr_res_holder(
- struct se_subsystem_dev *su_dev,
- char *page)
+static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
+ char *page)
{
- ssize_t len = 0;
+ int ret;
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC2_RESERVATIONS:
- target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC_PASSTHROUGH:
- len += sprintf(page+len, "Passthrough\n");
- break;
- default:
- len += sprintf(page+len, "Unknown\n");
- break;
- }
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "Passthrough\n");
- return len;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_core_dev_pr_show_spc2_res(dev, page);
+ else
+ ret = target_core_dev_pr_show_spc3_res(dev, page);
+ spin_unlock(&dev->dev_reservation_lock);
+ return ret;
}
SE_DEV_PR_ATTR_RO(res_holder);
-/*
- * res_pr_all_tgt_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
- struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
- pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (!dev->dev_pr_res_holder) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
- }
- /*
- * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
- * Basic PERSISTENT RESERVER OUT parameter list, page 290
- */
- if (pr_reg->pr_reg_all_tg_pt)
+ } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
- else
+ } else {
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
- spin_unlock(&dev->dev_reservation_lock);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
-/*
- * res_pr_generation
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return 0;
-
- return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+ return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1067,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
* res_pr_holder_tg_port
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct se_node_acl *se_nacl;
struct se_lun *lun;
struct se_portal_group *se_tpg;
@@ -1177,20 +1076,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct target_core_fabric_ops *tfo;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
+ goto out_unlock;
}
+
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
@@ -1204,19 +1096,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
tfo->get_fabric_name(), lun->unpacked_lun);
- spin_unlock(&dev->dev_reservation_lock);
+out_unlock:
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
-/*
- * res_pr_registered_i_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
@@ -1225,16 +1114,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
ssize_t len = 0;
int reg_count = 0, prf_isid;
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
@@ -1254,7 +1137,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(page+len, "None\n");
@@ -1264,88 +1147,48 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
-/*
- * res_pr_type
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (pr_reg) {
+ len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+ } else {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
}
- len = sprintf(page, "SPC-3 Reservation Type: %s\n",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type));
- spin_unlock(&dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_type);
-/*
- * res_type
- */
static ssize_t target_core_dev_pr_show_attr_res_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- ssize_t len = 0;
-
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
- break;
- case SPC2_RESERVATIONS:
- len = sprintf(page, "SPC2_RESERVATIONS\n");
- break;
- case SPC_PASSTHROUGH:
- len = sprintf(page, "SPC_PASSTHROUGH\n");
- break;
- default:
- len = sprintf(page, "UNKNOWN\n");
- break;
- }
-
- return len;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "SPC_PASSTHROUGH\n");
+ else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ return sprintf(page, "SPC2_RESERVATIONS\n");
+ else
+ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
SE_DEV_PR_ATTR_RO(res_type);
-/*
- * res_aptpl_active
- */
-
static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+ (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1197,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
* res_aptpl_metadata
*/
static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1231,10 @@ static match_table_t tokens = {
};
static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
+ struct se_device *dev,
const char *page,
size_t count)
{
- struct se_device *dev;
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1246,12 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u16 port_rpti = 0, tpgt = 0;
u8 type = 0, scope;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
@@ -1558,7 +1394,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1573,7 +1409,7 @@ out:
SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1441,14 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
int bl = 0;
ssize_t read_bytes = 0;
- if (!se_dev->se_dev_ptr)
- return -ENODEV;
-
- transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+ transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
- read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+ read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
return read_bytes;
}
@@ -1633,17 +1465,10 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate struct se_subsystem_dev>se"
- "_dev_su_ptr\n");
- return -EINVAL;
- }
-
- return t->set_configfs_dev_params(hba, se_dev, page, count);
+ return t->set_configfs_dev_params(dev, page, count);
}
static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1481,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+ if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1494,8 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1505,18 @@ static ssize_t target_core_store_dev_alias(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
- "%s", page);
+ read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
- se_dev->se_dev_alias[read_bytes - 1] = '\0';
+ if (dev->dev_alias[read_bytes - 1] == '\n')
+ dev->dev_alias[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_ALIAS;
+ dev->dev_flags |= DF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_alias);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->dev_alias);
return read_bytes;
}
@@ -1707,12 +1531,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1544,8 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1555,19 @@ static ssize_t target_core_store_dev_udev_path(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+ read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
- se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+ if (dev->udev_path[read_bytes - 1] == '\n')
+ dev->udev_path[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+ dev->dev_flags |= DF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_udev_path);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->udev_path);
return read_bytes;
}
@@ -1761,11 +1585,9 @@ static ssize_t target_core_store_dev_enable(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_device *dev;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
char *ptr;
+ int ret;
ptr = strstr(page, "1");
if (!ptr) {
@@ -1773,25 +1595,10 @@ static ssize_t target_core_store_dev_enable(
" is \"1\"\n");
return -EINVAL;
}
- if (se_dev->se_dev_ptr) {
- pr_err("se_dev->se_dev_ptr already set for storage"
- " object\n");
- return -EEXIST;
- }
-
- if (t->check_configfs_dev_params(hba, se_dev) < 0)
- return -EINVAL;
-
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- else if (!dev)
- return -EINVAL;
-
- se_dev->se_dev_ptr = dev;
- pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
- " %p\n", se_dev->se_dev_ptr);
+ ret = target_configure_device(dev);
+ if (ret)
+ return ret;
return count;
}
@@ -1805,26 +1612,15 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
+ struct se_device *dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
- return len;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
+ if (!lu_gp_mem)
+ return 0;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1843,24 +1639,17 @@ static ssize_t target_core_store_alua_lu_gp(
const char *page,
size_t count)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
- struct se_hba *hba = su_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF];
int move = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!lu_gp_mem)
+ return 0;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
- config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
- return -EINVAL;
- }
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
@@ -1881,14 +1670,6 @@ static ssize_t target_core_store_alua_lu_gp(
if (!lu_gp_new)
return -ENODEV;
}
- lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- if (lu_gp_new)
- core_alua_put_lu_gp_from_name(lu_gp_new);
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1902,7 +1683,7 @@ static ssize_t target_core_store_alua_lu_gp(
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
@@ -1927,7 +1708,7 @@ static ssize_t target_core_store_alua_lu_gp(
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
@@ -1955,69 +1736,44 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
static void target_core_dev_release(struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
- struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
- struct se_subsystem_api *t = hba->transport;
- struct config_group *dev_cg = &se_dev->se_dev_group;
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
kfree(dev_cg->default_groups);
- /*
- * This pointer will set when the storage is enabled with:
- *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
- */
- if (se_dev->se_dev_ptr) {
- pr_debug("Target_Core_ConfigFS: Calling se_free_"
- "virtual_device() for se_dev_ptr: %p\n",
- se_dev->se_dev_ptr);
-
- se_free_virtual_device(se_dev->se_dev_ptr, hba);
- } else {
- /*
- * Release struct se_subsystem_dev->se_dev_su_ptr..
- */
- pr_debug("Target_Core_ConfigFS: Calling t->free_"
- "device() for se_dev_su_ptr: %p\n",
- se_dev->se_dev_su_ptr);
-
- t->free_device(se_dev->se_dev_su_ptr);
- }
-
- pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
- "_dev_t: %p\n", se_dev);
- kfree(se_dev);
+ target_free_device(dev);
}
static ssize_t target_core_dev_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show(se_dev, page);
+ return tc_attr->show(dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store(se_dev, page, count);
+ return tc_attr->store(dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1863,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
{
struct se_device *dev;
struct se_hba *hba;
- struct se_subsystem_dev *su_dev;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1872,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
- hba = su_dev->se_dev_hba;
+ hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
+ config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2014,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
@@ -2284,7 +2038,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
return -EINVAL;
}
- ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+ ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
@@ -2620,11 +2374,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
- tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
if (!tg_pt_gp)
return NULL;
@@ -2721,10 +2474,10 @@ static struct config_group *target_core_make_subdev(
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *se_dev;
struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
+ struct se_device *dev;
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
struct config_group *dev_stat_grp = NULL;
int errno = -ENOMEM, ret;
@@ -2737,120 +2490,80 @@ static struct config_group *target_core_make_subdev(
*/
t = hba->transport;
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
- goto unlock;
- }
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
-
- se_dev->se_dev_hba = hba;
- dev_cg = &se_dev->se_dev_group;
-
- dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
+ dev = target_alloc_device(hba, name);
+ if (!dev)
+ goto out_unlock;
+
+ dev_cg = &dev->dev_group;
+
+ dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!dev_cg->default_groups)
- goto out;
- /*
- * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
- * for ->allocate_virtdevice()
- *
- * se_dev->se_dev_ptr will be set after ->create_virtdev()
- * has been called successfully in the next level up in the
- * configfs tree for device object's struct config_group.
- */
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
- goto out;
- }
+ goto out_free_device;
- config_group_init_type_name(&se_dev->se_dev_group, name,
- &target_core_dev_cit);
- config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+ config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+ config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&target_core_dev_attrib_cit);
- config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+ config_group_init_type_name(&dev->dev_pr_group, "pr",
&target_core_dev_pr_cit);
- config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+ config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&target_core_dev_wwn_cit);
- config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+ config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &target_core_alua_tg_pt_gps_cit);
- config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+ config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &target_core_stat_cit);
- dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
- dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
- dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
- dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+ dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+ dev_cg->default_groups[1] = &dev->dev_pr_group;
+ dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+ dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+ dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
dev_cg->default_groups[5] = NULL;
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
- tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
- goto out;
+ goto out_free_dev_cg_default_groups;
+ dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
- tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!tg_pt_gp_cg->default_groups) {
pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp;
}
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
- dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
+ dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
pr_err("Unable to allocate dev_stat_grp->default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp_cg_default_groups;
}
- target_stat_setup_dev_default_groups(se_dev);
-
- pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
- " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+ target_stat_setup_dev_default_groups(dev);
mutex_unlock(&hba->hba_access_mutex);
- return &se_dev->se_dev_group;
-out:
- if (se_dev->t10_alua.default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
- se_dev->t10_alua.default_tg_pt_gp = NULL;
- }
- if (dev_stat_grp)
- kfree(dev_stat_grp->default_groups);
- if (tg_pt_gp_cg)
- kfree(tg_pt_gp_cg->default_groups);
- if (dev_cg)
- kfree(dev_cg->default_groups);
- if (se_dev->se_dev_su_ptr)
- t->free_device(se_dev->se_dev_su_ptr);
- kfree(se_dev);
-unlock:
+ return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+ kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+ kfree(dev_cg->default_groups);
+out_free_device:
+ target_free_device(dev);
+out_unlock:
mutex_unlock(&hba->hba_access_mutex);
return ERR_PTR(errno);
}
@@ -2859,18 +2572,19 @@ static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
struct config_item *df_item;
- struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+ struct config_group *tg_pt_gp_cg, *dev_stat_grp;
int i;
- hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2592,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2603,15 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- se_dev->t10_alua.default_tg_pt_gp = NULL;
+ dev->t10_alua.default_tg_pt_gp = NULL;
- dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
df_item = &dev_cg->default_groups[i]->cg_item;
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
/*
- * The releasing of se_dev and associated se_dev->se_dev_ptr is done
- * from target_core_dev_item_ops->release() ->target_core_dev_release().
+ * se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2674,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
return -EINVAL;
}
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("Unable to set hba_mode with active devices\n");
- spin_unlock(&hba->device_lock);
return -EINVAL;
}
- spin_unlock(&hba->device_lock);
ret = transport->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
@@ -3120,7 +2829,7 @@ static int __init target_core_init_configfs(void)
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
- target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!target_cg->default_groups) {
pr_err("Unable to allocate target_cg->default_groups\n");
@@ -3136,7 +2845,7 @@ static int __init target_core_init_configfs(void)
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
hba_cg = &target_core_hbagroup;
- hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!hba_cg->default_groups) {
pr_err("Unable to allocate hba_cg->default_groups\n");
@@ -3152,7 +2861,7 @@ static int __init target_core_init_configfs(void)
* groups under /sys/kernel/config/target/core/alua/
*/
alua_cg = &alua_group;
- alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!alua_cg->default_groups) {
pr_err("Unable to allocate alua_cg->default_groups\n");
@@ -3174,7 +2883,7 @@ static int __init target_core_init_configfs(void)
}
lu_gp_cg = &alua_lu_gps_group;
- lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lu_gp_cg->default_groups) {
pr_err("Unable to allocate lu_gp_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9abef9f8eb76..f2aa7543d20a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -4,10 +4,7 @@
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,26 +47,20 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-static void se_dev_start(struct se_device *dev);
-static void se_dev_stop(struct se_device *dev);
-
static struct se_hba *lun0_hba;
-static struct se_subsystem_dev *lun0_su_dev;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
-int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+sense_reason_t
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_device *dev;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+ return TCM_NON_EXISTENT_LUN;
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -81,14 +72,12 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
- return -EACCES;
+ return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -113,38 +102,24 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -ENODEV;
+ return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -EACCES;
- }
+ (se_cmd->data_direction != DMA_NONE))
+ return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
@@ -175,11 +150,8 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return -ENODEV;
- }
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -199,15 +171,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -ENODEV;
}
@@ -565,7 +528,6 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -578,7 +540,8 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
pr_err("Unable to allocate t10_alua_tg_pt"
@@ -587,7 +550,7 @@ static void core_export_port(
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
@@ -625,6 +588,7 @@ int core_dev_export(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port;
port = core_alloc_port(dev);
@@ -632,9 +596,11 @@ int core_dev_export(
return PTR_ERR(port);
lun->lun_se_dev = dev;
- se_dev_start(dev);
- atomic_inc(&dev->dev_export_obj.obj_access_count);
+ spin_lock(&hba->device_lock);
+ dev->export_count++;
+ spin_unlock(&hba->device_lock);
+
core_export_port(dev, tpg, port, lun);
return 0;
}
@@ -644,6 +610,7 @@ void core_dev_unexport(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
@@ -654,198 +621,27 @@ void core_dev_unexport(
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
- atomic_dec(&dev->dev_export_obj.obj_access_count);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
- se_dev_stop(dev);
- lun->lun_se_dev = NULL;
-}
-
-int target_report_luns(struct se_cmd *se_cmd)
-{
- struct se_dev_entry *deve;
- struct se_session *se_sess = se_cmd->se_sess;
- unsigned char *buf;
- u32 lun_count = 0, offset = 8, i;
-
- if (se_cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- se_cmd->data_length);
- se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- buf = transport_kmap_data_sg(se_cmd);
- if (!buf)
- return -ENOMEM;
-
- /*
- * If no struct se_session pointer is present, this struct se_cmd is
- * coming via a target_core_mod PASSTHROUGH op, and not through
- * a $FABRIC_MOD. In that case, report LUN=0 only.
- */
- if (!se_sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
- goto done;
- }
-
- spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = se_sess->se_node_acl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
- /*
- * We determine the correct LUN LIST LENGTH even once we
- * have reached the initial allocation length.
- * See SPC2-R20 7.19.
- */
- lun_count++;
- if ((offset + 8) > se_cmd->data_length)
- continue;
-
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
- offset += 8;
- }
- spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
-
- /*
- * See SPC3 r07, page 159.
- */
-done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(se_cmd);
-
- target_complete_cmd(se_cmd, GOOD);
- return 0;
-}
-
-/* se_release_device_for_hba():
- *
- *
- */
-void se_release_device_for_hba(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
- se_dev_stop(dev);
-
- if (dev->dev_ptr) {
- destroy_workqueue(dev->tmr_wq);
- if (dev->transport->free_device)
- dev->transport->free_device(dev->dev_ptr);
- }
-
spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
+ dev->export_count--;
spin_unlock(&hba->device_lock);
- core_scsi3_free_all_registrations(dev);
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
+ lun->lun_se_dev = NULL;
}
-void se_release_vpd_for_dev(struct se_device *dev)
+static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+ spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
+ &dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
-}
-
-/* se_free_virtual_device():
- *
- * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
- */
-int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
-{
- if (!list_empty(&dev->dev_sep_list))
- dump_stack();
-
- core_alua_free_lu_gp_mem(dev);
- se_release_device_for_hba(dev);
-
- return 0;
-}
-
-static void se_dev_start(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_inc(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
- if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
- dev->dev_status &=
- ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
-}
-
-static void se_dev_stop(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_dec(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
- if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
-}
-
-int se_dev_check_online(struct se_device *dev)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dev->dev_status_lock, flags);
- ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irqrestore(&dev->dev_status_lock, flags);
-
- return ret;
-}
-
-int se_dev_check_shutdown(struct se_device *dev)
-{
- int ret;
-
- spin_lock_irq(&dev->dev_status_lock);
- ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
- spin_unlock_irq(&dev->dev_status_lock);
-
- return ret;
+ spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
@@ -866,72 +662,13 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
return aligned_max_sectors;
}
-void se_dev_set_default_attribs(
- struct se_device *dev,
- struct se_dev_limits *dev_limits)
-{
- struct queue_limits *limits = &dev_limits->limits;
-
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
- dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
- dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
- dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
- /*
- * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
- * iblock_create_virtdevice() from struct queue_limits values
- * if blk_queue_discard()==1
- */
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
- DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
- /*
- * block_size is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
- dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
- /*
- * Align max_hw_sectors down to PAGE_SIZE I/O transfers
- */
- limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
- limits->logical_block_size);
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
-
- /*
- * Set fabric_max_sectors, which is reported in block limits
- * VPD page (B0h).
- */
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- /*
- * Set optimal_sectors from fabric_max_sectors, which can be
- * lowered via configfs.
- */
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
- /*
- * queue_depth is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
-}
-
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
+ dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -939,10 +676,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ dev->dev_attrib.max_unmap_block_desc_count =
max_unmap_block_desc_count;
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
+ dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -950,9 +687,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ dev->dev_attrib.unmap_granularity = unmap_granularity;
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
+ dev, dev->dev_attrib.unmap_granularity);
return 0;
}
@@ -960,9 +697,19 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
+ dev, dev->dev_attrib.unmap_granularity_alignment);
+ return 0;
+}
+
+int se_dev_set_max_write_same_len(
+ struct se_device *dev,
+ u32 max_write_same_len)
+{
+ dev->dev_attrib.max_write_same_len = max_write_same_len;
+ pr_debug("dev[%p]: Set max_write_same_len: %u\n",
+ dev, dev->dev_attrib.max_write_same_len);
return 0;
}
@@ -993,9 +740,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("emulate_fua_write not supported for pSCSI\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
+ dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
@@ -1025,9 +772,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+ dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
+ dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
@@ -1038,16 +785,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " UA_INTRLCK_CTRL while dev_export_obj: %d count"
- " exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " UA_INTRLCK_CTRL while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
+ dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1059,15 +805,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ dev->dev_attrib.emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
+ dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1082,12 +828,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ dev->dev_attrib.emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
@@ -1103,12 +849,12 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ dev->dev_attrib.emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
@@ -1120,9 +866,9 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ dev->dev_attrib.enforce_pr_isids = flag;
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
@@ -1132,7 +878,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
printk(KERN_ERR "Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ dev->dev_attrib.is_nonrot = flag;
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
dev, flag);
return 0;
@@ -1145,7 +891,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
" reordering not implemented\n", dev);
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ dev->dev_attrib.emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1155,10 +901,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (!queue_depth) {
@@ -1168,26 +914,26 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
} else {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
}
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
+ dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
@@ -1195,10 +941,12 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ int block_size = dev->dev_attrib.block_size;
+
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " fabric_max_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (!fabric_max_sectors) {
@@ -1213,11 +961,11 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
@@ -1232,10 +980,14 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
+ if (!block_size) {
+ block_size = 512;
+ pr_warn("Defaulting to 512 for zero block_size\n");
+ }
fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.block_size);
+ block_size);
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+ dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, fabric_max_sectors);
return 0;
@@ -1243,10 +995,10 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " optimal_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " optimal_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
@@ -1254,14 +1006,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
+ optimal_sectors, dev->dev_attrib.fabric_max_sectors);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ dev->dev_attrib.optimal_sectors = optimal_sectors;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
@@ -1269,10 +1021,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
- " while dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
@@ -1293,7 +1045,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
@@ -1307,12 +1059,6 @@ struct se_lun *core_dev_add_lun(
struct se_lun *lun_p;
int rc;
- if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
- atomic_read(&dev->dev_access_obj.obj_access_count));
- return ERR_PTR(-EACCES);
- }
-
lun_p = core_tpg_pre_addlun(tpg, lun);
if (IS_ERR(lun_p))
return lun_p;
@@ -1568,12 +1314,211 @@ void core_dev_free_initiator_node_lun_acl(
kfree(lacl);
}
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+ struct t10_wwn *wwn = &dev->t10_wwn;
+ char buf[17];
+ int i, device_type;
+ /*
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+ */
+ for (i = 0; i < 8; i++)
+ if (wwn->vendor[i] >= 0x20)
+ buf[i] = wwn->vendor[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Vendor: %s\n", buf);
+
+ for (i = 0; i < 16; i++)
+ if (wwn->model[i] >= 0x20)
+ buf[i] = wwn->model[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Model: %s\n", buf);
+
+ for (i = 0; i < 4; i++)
+ if (wwn->revision[i] >= 0x20)
+ buf[i] = wwn->revision[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Revision: %s\n", buf);
+
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+}
+
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+{
+ struct se_device *dev;
+
+ dev = hba->transport->alloc_device(hba, name);
+ if (!dev)
+ return NULL;
+
+ dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ dev->se_hba = hba;
+ dev->transport = hba->transport;
+
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
+ INIT_LIST_HEAD(&dev->state_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
+ spin_lock_init(&dev->stats_lock);
+ spin_lock_init(&dev->execute_task_lock);
+ spin_lock_init(&dev->delayed_cmd_lock);
+ spin_lock_init(&dev->dev_reservation_lock);
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
+ atomic_set(&dev->dev_ordered_id, 0);
+ INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&dev->t10_pr.registration_lock);
+ spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+
+ dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ dev->t10_wwn.t10_dev = dev;
+ dev->t10_alua.t10_dev = dev;
+
+ dev->dev_attrib.da_dev = dev;
+ dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+ dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->dev_attrib.unmap_granularity_alignment =
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+ dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+
+ return dev;
+}
+
+int target_configure_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+ int ret;
+
+ if (dev->dev_flags & DF_CONFIGURED) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
+ " object\n");
+ return -EEXIST;
+ }
+
+ ret = dev->transport->configure_device(dev);
+ if (ret)
+ goto out;
+ dev->dev_flags |= DF_CONFIGURED;
+
+ /*
+ * XXX: there is not much point to have two different values here..
+ */
+ dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
+ dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
+
+ /*
+ * Align max_hw_sectors down to PAGE_SIZE I/O transfers
+ */
+ dev->dev_attrib.hw_max_sectors =
+ se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size);
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+
+ ret = core_setup_alua(dev);
+ if (ret)
+ goto out;
+
+ /*
+ * Startup the struct se_device processing thread
+ */
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ dev->transport->name);
+ if (!dev->tmr_wq) {
+ pr_err("Unable to create tmr workqueue for %s\n",
+ dev->transport->name);
+ ret = -ENOMEM;
+ goto out_free_alua;
+ }
+
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
+ /*
+ * Preload the initial INQUIRY const values if we are doing
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+ * passthrough because this is being provided by the backend LLD.
+ */
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ strncpy(&dev->t10_wwn.revision[0],
+ dev->transport->inquiry_rev, 4);
+ }
+
+ scsi_dump_inquiry(dev);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count++;
+ spin_unlock(&hba->device_lock);
+ return 0;
+
+out_free_alua:
+ core_alua_free_lu_gp_mem(dev);
+out:
+ se_release_vpd_for_dev(dev);
+ return ret;
+}
+
+void target_free_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ WARN_ON(!list_empty(&dev->dev_sep_list));
+
+ if (dev->dev_flags & DF_CONFIGURED) {
+ destroy_workqueue(dev->tmr_wq);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+ }
+
+ core_alua_free_lu_gp_mem(dev);
+ core_scsi3_free_all_registrations(dev);
+ se_release_vpd_for_dev(dev);
+
+ dev->transport->free_device(dev);
+}
+
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
- struct se_subsystem_dev *se_dev = NULL;
- struct se_subsystem_api *t;
char buf[16];
int ret;
@@ -1581,60 +1526,28 @@ int core_dev_setup_virtual_lun0(void)
if (IS_ERR(hba))
return PTR_ERR(hba);
- lun0_hba = hba;
- t = hba->transport;
-
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
+ dev = target_alloc_device(hba, "virt_lun0");
+ if (!dev) {
ret = -ENOMEM;
- goto out;
+ goto out_free_hba;
}
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
- se_dev->se_dev_hba = hba;
-
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
- ret = -ENOMEM;
- goto out;
- }
- lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
- t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+ hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto out;
- }
- se_dev->se_dev_ptr = dev;
- g_lun0_dev = dev;
+ ret = target_configure_device(dev);
+ if (ret)
+ goto out_free_se_dev;
+ lun0_hba = hba;
+ g_lun0_dev = dev;
return 0;
-out:
- lun0_su_dev = NULL;
- kfree(se_dev);
- if (lun0_hba) {
- core_delete_hba(lun0_hba);
- lun0_hba = NULL;
- }
+
+out_free_se_dev:
+ target_free_device(dev);
+out_free_hba:
+ core_delete_hba(hba);
return ret;
}
@@ -1642,14 +1555,11 @@ out:
void core_dev_release_virtual_lun0(void)
{
struct se_hba *hba = lun0_hba;
- struct se_subsystem_dev *su_dev = lun0_su_dev;
if (!hba)
return;
if (g_lun0_dev)
- se_free_virtual_device(g_lun0_dev, hba);
-
- kfree(su_dev);
+ target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index bca737bb813d..c57bbbc7a7d1 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * Copyright (c) 2010,2011 Rising Tide Systems
- * Copyright (c) 2010,2011 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
*
- * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
+
+ if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+ pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+ " %p to struct lun: %p\n", lun_ci, lun);
+ return -EFAULT;
+ }
/*
* Ensure that the source port exists
*/
@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
}
lacl_cg = &lacl->se_lun_group;
- lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n");
@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
@@ -734,17 +739,26 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
- struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(se_dev_ci), struct se_subsystem_dev,
- se_dev_group);
+ struct se_device *dev =
+ container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
struct target_fabric_configfs *tf;
int ret;
+ if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+ pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+ " %p to struct se_device: %p\n", se_dev_ci, dev);
+ return -EFAULT;
+ }
+
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("se_device not configured yet, cannot port link\n");
+ return -ENODEV;
+ }
+
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
@@ -755,14 +769,6 @@ static int target_fabric_port_link(
return -EEXIST;
}
- dev = se_dev->se_dev_ptr;
- if (!dev) {
- pr_err("Unable to locate struct se_device pointer from"
- " %s\n", config_item_name(se_dev_ci));
- ret = -ENODEV;
- goto out;
- }
-
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
@@ -869,7 +875,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
- lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n");
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index e460d6233a0a..687b0b0a4aa6 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
- * Copyright (c) 2010 Rising Tide Systems, Inc.
- * Copyright (c) 2010 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0360383dfb94..b9c88497e8f0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,10 +3,7 @@
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
- * Copyright (c) 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2005-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +38,10 @@
#include "target_core_file.h"
-static struct se_subsystem_api fileio_template;
+static inline struct fd_dev *FD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct fd_dev, dev);
+}
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr;
@@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
- return fd_dev;
+ return &fd_dev->dev;
}
-/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static struct se_device *fd_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int fd_configure_device(struct se_device *dev)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct queue_limits *limits;
- struct fd_dev *fd_dev = p;
- struct fd_host *fd_host = hba->hba_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct file *file;
struct inode *inode = NULL;
- int dev_flags = 0, flags, ret = -EINVAL;
+ int flags, ret = -EINVAL;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
+ }
/*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+
/*
* Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism.
@@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q;
+ struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(inode->i_bdev);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
- limits->max_hw_sectors = queue_max_hw_sectors(q);
- limits->max_sectors = queue_max_sectors(q);
+
+ dev->dev_attrib.hw_block_size =
+ bdev_logical_block_size(inode->i_bdev);
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
- fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
@@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
goto fail;
}
- limits = &dev_limits.limits;
- limits->logical_block_size = FD_BLOCKSIZE;
- limits->max_hw_sectors = FD_MAX_SECTORS;
- limits->max_sectors = FD_MAX_SECTORS;
- fd_dev->fd_block_size = FD_BLOCKSIZE;
+ dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
}
- dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+ fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
- dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, fd_dev,
- &dev_limits, "FILEIO", FD_VERSION);
- if (!dev)
- goto fail;
+ dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n");
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
+ dev->dev_attrib.emulate_write_cache = 1;
}
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
- return dev;
+ return 0;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
- return ERR_PTR(ret);
+ return ret;
}
-/* fd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_device(void *p)
+static void fd_free_device(struct se_device *dev)
{
- struct fd_dev *fd_dev = p;
+ struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@@ -239,17 +216,16 @@ static void fd_free_device(void *p)
kfree(fd_dev);
}
-static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents)
+static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_nents, int is_write)
{
struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
+ struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (cmd->t_task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
+ loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
+ if (is_write)
+ ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ else
+ ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
set_fs(old_fs);
+ for_each_sg(sgl, sg, sgl_nents, i)
+ kunmap(sg_page(sg));
+
kfree(iov);
- /*
- * Return zeros and GOOD status even if the READ did not return
- * the expected virt_size for struct file w/o a backing struct
- * block_device.
- */
- if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+
+ if (is_write) {
if (ret < 0 || ret != cmd->data_length) {
- pr_err("vfs_readv() returned %d,"
- " expecting %d for S_ISBLK\n", ret,
- (int)cmd->data_length);
+ pr_err("%s() write returned %d\n", __func__, ret);
return (ret < 0 ? ret : -EINVAL);
}
} else {
- if (ret < 0) {
- pr_err("vfs_readv() returned %d for non"
- " S_ISBLK\n", ret);
- return ret;
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+ * the expected virt_size for struct file w/o a backing struct
+ * block_device.
+ */
+ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (ret < 0 || ret != cmd->data_length) {
+ pr_err("%s() returned %d, expecting %u for "
+ "S_ISBLK\n", __func__, ret,
+ cmd->data_length);
+ return (ret < 0 ? ret : -EINVAL);
+ }
+ } else {
+ if (ret < 0) {
+ pr_err("%s() returned %d for non S_ISBLK\n",
+ __func__, ret);
+ return ret;
+ }
}
}
-
- return 1;
-}
-
-static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents)
-{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
- struct file *fd = dev->fd_file;
- struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- loff_t pos = (cmd->t_task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
- int ret, i = 0;
-
- iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
- if (!iov) {
- pr_err("Unable to allocate fd_do_writev iov[]\n");
- return -ENOMEM;
- }
-
- for_each_sg(sgl, sg, sgl_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
- }
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
- set_fs(old_fs);
-
- kfree(iov);
-
- if (ret < 0 || ret != cmd->data_length) {
- pr_err("vfs_writev() returned %d\n", ret);
- return (ret < 0 ? ret : -EINVAL);
- }
-
return 1;
}
-static int fd_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
if (immed)
return 0;
- if (ret) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
- } else {
+ else
target_complete_cmd(cmd, SAM_STAT_GOOD);
- }
return 0;
}
-static int fd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
- ret = fd_do_readv(cmd, sgl, sgl_nents);
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
} else {
- ret = fd_do_writev(cmd, sgl, sgl_nents);
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
* Perform implict vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
loff_t start = cmd->t_task_lba *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ dev->dev_attrib.block_size;
loff_t end = start + cmd->data_length;
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
}
- if (ret < 0) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return ret;
- }
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
@@ -430,12 +381,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t fd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -502,24 +451,9 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
-
- if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- pr_err("Missing fd_dev_name=\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t fd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
return bl;
}
-/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-/* fd_get_device_type(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
-}
-
static sector_t fd_get_blocks(struct se_device *dev)
{
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
@@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
else
dev_size = fd_dev->fd_dev_size;
- return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
+ return div_u64(dev_size, dev->dev_attrib.block_size);
}
-static struct spc_ops fd_spc_ops = {
+static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
};
-static int fd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+fd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &fd_spc_ops);
+ return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
+ .inquiry_prod = "FILEIO",
+ .inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
- .allocate_virtdevice = fd_allocate_virtdevice,
- .create_virtdevice = fd_create_virtdevice,
+ .alloc_device = fd_alloc_device,
+ .configure_device = fd_configure_device,
.free_device = fd_free_device,
.parse_cdb = fd_parse_cdb,
- .check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
- .get_device_rev = fd_get_device_rev,
- .get_device_type = fd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
};
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 876ae53ef5b8..bc02b018ae46 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev {
+ struct se_device dev;
+
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 3dd1bd4b6f71..d2616cd48f1e 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -3,10 +3,7 @@
*
* This file contains the TCM HBA Transport related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
@@ -152,8 +148,7 @@ out_free_hba:
int
core_delete_hba(struct se_hba *hba)
{
- if (!list_empty(&hba->hba_dev_list))
- dump_stack();
+ WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 57d7674c5013..b526d23dcd4f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,9 +47,13 @@
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
-static struct se_subsystem_api iblock_template;
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct iblock_dev, dev);
+}
+
-static void iblock_bio_done(struct bio *, int);
+static struct se_subsystem_api iblock_template;
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
}
-static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
@@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
- return ib_dev;
+ return &ib_dev->dev;
}
-static struct se_device *iblock_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int iblock_configure_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct block_device *bd = NULL;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct queue_limits *limits;
- u32 dev_flags = 0;
+ struct block_device *bd = NULL;
fmode_t mode;
- int ret = -EINVAL;
+ int ret = -ENOMEM;
- if (!ib_dev) {
- pr_err("Unable to locate struct iblock_dev parameter\n");
- return ERR_PTR(ret);
+ if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) {
- pr_err("IBLOCK: Unable to create bioset()\n");
- return ERR_PTR(-ENOMEM);
+ pr_err("IBLOCK: Unable to create bioset\n");
+ goto out;
}
- pr_debug("IBLOCK: Created bio_set()\n");
- /*
- * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
- * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
- */
+
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
@@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- goto failed;
+ goto out_free_bioset;
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(bd);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(bd);
- limits->max_hw_sectors = UINT_MAX;
- limits->max_sectors = UINT_MAX;
- dev_limits.hw_queue_depth = q->nr_requests;
- dev_limits.queue_depth = q->nr_requests;
-
ib_dev->ibd_bd = bd;
- dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, ib_dev,
- &dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!dev)
- goto failed;
+ q = bdev_get_queue(bd);
+
+ dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
* Check if the underlying struct block_device request_queue supports
@@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+ dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
+
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+ dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ /*
+ * Enable write same emulation for IBLOCK and use 0xFFFF as
+ * the smaller WRITE_SAME(10) only has a two-byte block count.
+ */
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q))
- dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
-
- return dev;
+ dev->dev_attrib.is_nonrot = 1;
+ return 0;
-failed:
- if (ib_dev->ibd_bio_set) {
- bioset_free(ib_dev->ibd_bio_set);
- ib_dev->ibd_bio_set = NULL;
- }
- ib_dev->ibd_bd = NULL;
- return ERR_PTR(ret);
+out_free_bioset:
+ bioset_free(ib_dev->ibd_bio_set);
+ ib_dev->ibd_bio_set = NULL;
+out:
+ return ret;
}
-static void iblock_free_device(void *p)
+static void iblock_free_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+ if (block_size == dev->dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr = cmd->priv;
+ u8 status;
+
+ if (!atomic_dec_and_test(&ibr->pending))
+ return;
+
+ if (atomic_read(&ibr->ib_bio_err_cnt))
+ status = SAM_STAT_CHECK_CONDITION;
+ else
+ status = SAM_STAT_GOOD;
+
+ target_complete_cmd(cmd, status);
+ kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+ struct se_cmd *cmd = bio->bi_private;
+ struct iblock_req *ibr = cmd->priv;
+
+ /*
+ * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+ */
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+ err = -EIO;
+
+ if (err != 0) {
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+ " err: %d\n", bio, err);
+ /*
+ * Bump the ib_bio_err_cnt and release bio.
+ */
+ atomic_inc(&ibr->ib_bio_err_cnt);
+ smp_mb__after_atomic_inc();
+ }
+
+ bio_put(bio);
+
+ iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+ struct bio *bio;
+
+ /*
+ * Only allocate as many vector entries as the bio code allows us to,
+ * we'll loop later on until we have handled the whole request.
+ */
+ if (sg_num > BIO_MAX_PAGES)
+ sg_num = BIO_MAX_PAGES;
+
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
+ return NULL;
+ }
+
+ bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_private = cmd;
+ bio->bi_end_io = &iblock_bio_done;
+ bio->bi_sector = lba;
+
+ return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+ struct blk_plug plug;
+ struct bio *bio;
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(list)))
+ submit_bio(rw, bio);
+ blk_finish_plug(&plug);
+}
+
static void iblock_end_io_flush(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
@@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd) {
- if (err) {
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (err)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
- } else {
+ else
target_complete_cmd(cmd, SAM_STAT_GOOD);
- }
}
bio_put(bio);
@@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
-static int iblock_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
{
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
@@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
-static int iblock_execute_unmap(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct iblock_dev *ibd = dev->dev_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
unsigned char *buf, *ptr = NULL;
sector_t lba;
int size;
u32 range;
- int ret = 0;
- int dl, bd_dl;
+ sense_reason_t ret = 0;
+ int dl, bd_dl, err;
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]);
@@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
else
size = bd_dl;
- if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
@@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
- if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ if (range > dev->dev_attrib.max_unmap_lba_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
if (lba + range > dev->transport->get_blocks(dev) + 1) {
- cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
- ret = -EINVAL;
+ ret = TCM_ADDRESS_OUT_OF_RANGE;
goto err;
}
- ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+ err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
GFP_KERNEL, 0);
- if (ret < 0) {
+ if (err < 0) {
pr_err("blkdev_issue_discard() failed: %d\n",
- ret);
+ err);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto err;
}
@@ -397,23 +456,86 @@ err:
return ret;
}
-static int iblock_execute_write_same(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_write_same_unmap(struct se_cmd *cmd)
{
- struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
- int ret;
-
- ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
- spc_get_write_same_sectors(cmd), GFP_KERNEL,
- 0);
- if (ret < 0) {
- pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
- return ret;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+ int rc;
+
+ rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
+ spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+ if (rc < 0) {
+ pr_warn("blkdev_issue_discard() failed: %d\n", rc);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
target_complete_cmd(cmd, GOOD);
return 0;
}
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr;
+ struct scatterlist *sg;
+ struct bio *bio;
+ struct bio_list list;
+ sector_t block_lba = cmd->t_task_lba;
+ sector_t sectors = spc_get_write_same_sectors(cmd);
+
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!ibr)
+ goto fail;
+ cmd->priv = ibr;
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_free_ibr;
+
+ bio_list_init(&list);
+ bio_list_add(&list, bio);
+
+ atomic_set(&ibr->pending, 1);
+
+ while (sectors) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_put_bios;
+
+ atomic_inc(&ibr->pending);
+ bio_list_add(&list, bio);
+ }
+
+ /* Always in 512 byte units for Linux/Block */
+ block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ sectors -= 1;
+ }
+
+ iblock_submit_bios(&list, WRITE);
+ return 0;
+
+fail_put_bios:
+ while ((bio = bio_list_pop(&list)))
+ bio_put(bio);
+fail_free_ibr:
+ kfree(ibr);
+fail:
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
enum {
Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
@@ -425,11 +547,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
@@ -491,43 +612,26 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t iblock_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-
- if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- pr_err("Missing udev_path= parameters for IBLOCK\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t iblock_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
- struct block_device *bd = ibd->ibd_bd;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
- if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+ if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s",
- ibd->ibd_udev_path);
- bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
+ ib_dev->ibd_udev_path);
+ bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == ibd) ?
+ "" : (bd->bd_holder == ib_dev) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params(
return bl;
}
-static void iblock_complete_cmd(struct se_cmd *cmd)
-{
- struct iblock_req *ibr = cmd->priv;
- u8 status;
-
- if (!atomic_dec_and_test(&ibr->pending))
- return;
-
- if (atomic_read(&ibr->ib_bio_err_cnt))
- status = SAM_STAT_CHECK_CONDITION;
- else
- status = SAM_STAT_GOOD;
-
- target_complete_cmd(cmd, status);
- kfree(ibr);
-}
-
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
-{
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
- struct bio *bio;
-
- /*
- * Only allocate as many vector entries as the bio code allows us to,
- * we'll loop later on until we have handled the whole request.
- */
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
-
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!bio) {
- pr_err("Unable to allocate memory for bio\n");
- return NULL;
- }
-
- bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = cmd;
- bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
- return bio;
-}
-
-static void iblock_submit_bios(struct bio_list *list, int rw)
-{
- struct blk_plug plug;
- struct bio *bio;
-
- blk_start_plug(&plug);
- while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
- blk_finish_plug(&plug);
-}
-
-static int iblock_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ if (dev->dev_attrib.emulate_write_cache == 0 ||
+ (dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
@@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
- if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
+ if (dev->dev_attrib.block_size == 4096)
block_lba = (cmd->t_task_lba << 3);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
+ else if (dev->dev_attrib.block_size == 2048)
block_lba = (cmd->t_task_lba << 2);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
+ else if (dev->dev_attrib.block_size == 1024)
block_lba = (cmd->t_task_lba << 1);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
+ else if (dev->dev_attrib.block_size == 512)
block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOSYS;
+ " %u\n", dev->dev_attrib.block_size);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -697,83 +747,48 @@ fail_put_bios:
bio_put(bio);
fail_free_ibr:
kfree(ibr);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
- return -ENOMEM;
-}
-
-static u32 iblock_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 iblock_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
- struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
-static void iblock_bio_done(struct bio *bio, int err)
-{
- struct se_cmd *cmd = bio->bi_private;
- struct iblock_req *ibr = cmd->priv;
-
- /*
- * Set -EIO if !BIO_UPTODATE and the passed is still err=0
- */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
- err = -EIO;
-
- if (err != 0) {
- pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
- " err: %d\n", bio, err);
- /*
- * Bump the ib_bio_err_cnt and release bio.
- */
- atomic_inc(&ibr->ib_bio_err_cnt);
- smp_mb__after_atomic_inc();
- }
-
- bio_put(bio);
-
- iblock_complete_cmd(cmd);
-}
-
-static struct spc_ops iblock_spc_ops = {
+static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
+ .execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap,
};
-static int iblock_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &iblock_spc_ops);
+ return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
+ .inquiry_prod = "IBLOCK",
+ .inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
- .allocate_virtdevice = iblock_allocate_virtdevice,
- .create_virtdevice = iblock_create_virtdevice,
+ .alloc_device = iblock_alloc_device,
+ .configure_device = iblock_configure_device,
.free_device = iblock_free_device,
.parse_cdb = iblock_parse_cdb,
- .check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
- .get_device_rev = iblock_get_device_rev,
- .get_device_type = iblock_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
};
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 533627ae79ec..01c2afd81500 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev {
+ struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
struct bio_set *ibd_bio_set;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0fd428225d11..93e9c1f580b0 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
-int target_report_luns(struct se_cmd *);
-void se_release_device_for_hba(struct se_device *);
-void se_release_vpd_for_dev(struct se_device *);
-int se_free_virtual_device(struct se_device *, struct se_hba *);
-int se_dev_check_online(struct se_device *);
-int se_dev_check_shutdown(struct se_device *);
-void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
+int target_configure_device(struct se_device *dev);
+void target_free_device(struct se_device *);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
@@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8c323a98c4a0..8e0290b38e43 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4,8 +4,7 @@
* This file contains SPC-3 compliant persistent reservations and
* legacy SPC-2 reservations with compatible reservation handling (CRH=1)
*
- * Copyright (c) 2009, 2010 Rising Tide Systems
- * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -68,49 +67,33 @@ int core_pr_dump_initiator_port(
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int);
-static int core_scsi2_reservation_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+static sense_reason_t
+target_scsi2_reservation_check(struct se_cmd *cmd)
{
- switch (cdb[0]) {
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case RELEASE:
case RELEASE_10:
return 0;
default:
- return 1;
+ break;
}
- return 1;
-}
-
-static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
+ if (!dev->dev_reserved_node_acl || !sess)
return 0;
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- if (dev->dev_reserved_node_acl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
+ if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ return TCM_RESERVATION_CONFLICT;
+
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+ if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+ return TCM_RESERVATION_CONFLICT;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
- spin_unlock(&dev->dev_reservation_lock);
- return ret;
+ return 0;
}
static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
@@ -120,15 +103,11 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
- int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
int conflict = 0;
- if (!crh)
- return -EINVAL;
-
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
if (pr_reg) {
@@ -186,32 +165,28 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
return 0;
}
-int target_scsi2_reservation_release(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_release(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg;
- int ret = 0, rc;
+ int rc;
if (!sess || !sess->se_tpg)
goto out;
rc = target_check_scsi2_reservation_conflict(cmd);
if (rc == 1)
goto out;
- else if (rc < 0) {
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
- }
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
- ret = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_reserved_node_acl || !sess)
goto out_unlock;
@@ -223,10 +198,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
goto out_unlock;
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
- if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
dev->dev_res_bin_isid = 0;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
}
tpg = sess->se_tpg;
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
@@ -237,25 +212,24 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- if (!ret)
- target_complete_cmd(cmd, GOOD);
- return ret;
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
-int target_scsi2_reservation_reserve(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_reserve(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg;
- int ret = 0, rc;
+ sense_reason_t ret = 0;
+ int rc;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = -EINVAL;
- goto out;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
* This is currently the case for target_core_mod passthrough struct se_cmd
@@ -266,13 +240,10 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
rc = target_check_scsi2_reservation_conflict(cmd);
if (rc == 1)
goto out;
- else if (rc < 0) {
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
+
tpg = sess->se_tpg;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
@@ -286,16 +257,15 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out_unlock;
}
dev->dev_reserved_node_acl = sess->se_node_acl;
- dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
- dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -319,9 +289,9 @@ out:
*/
static int core_scsi3_pr_seq_non_holder(
struct se_cmd *cmd,
- unsigned char *cdb,
u32 pr_reg_type)
{
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_dev_entry *se_deve;
struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
@@ -330,17 +300,11 @@ static int core_scsi3_pr_seq_non_holder(
int we = 0; /* Write Exclusive */
int legacy = 0; /* Act like a legacy device and return
* RESERVATION CONFLICT on some CDBs */
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_seq_non_holder(cmd,
- cdb, pr_reg_type);
se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Determine if the registration should be ignored due to
- * non-matching ISIDs in core_scsi3_pr_reservation_check().
+ * non-matching ISIDs in target_scsi3_pr_reservation_check().
*/
ignore_reg = (pr_reg_type & 0x80000000);
if (ignore_reg)
@@ -563,10 +527,41 @@ static int core_scsi3_pr_seq_non_holder(
return 1; /* Conflict by default */
}
+static sense_reason_t
+target_scsi3_pr_reservation_check(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ u32 pr_reg_type;
+
+ if (!dev->dev_pr_res_holder)
+ return 0;
+
+ pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+ cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+ if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl)
+ goto check_nonholder;
+
+ if (dev->dev_pr_res_holder->isid_present_at_reg) {
+ if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
+ sess->sess_bin_isid) {
+ pr_reg_type |= 0x80000000;
+ goto check_nonholder;
+ }
+ }
+
+ return 0;
+
+check_nonholder:
+ if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+ return TCM_RESERVATION_CONFLICT;
+ return 0;
+}
+
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
+
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
* counter mainted by the device server.
@@ -577,56 +572,12 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = su_dev->t10_pr.pr_generation++;
+ prg = dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
}
-static int core_scsi3_pr_reservation_check(
- struct se_cmd *cmd,
- u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
- return 0;
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_check(cmd, pr_reg_type);
-
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_pr_res_holder) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
- cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
- if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!dev->dev_pr_res_holder->isid_present_at_reg) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -EINVAL;
- /*
- * Use bit in *pr_reg_type to notify ISID mismatch in
- * core_scsi3_pr_seq_non_holder().
- */
- if (ret != 0)
- *pr_reg_type |= 0x80000000;
- spin_unlock(&dev->dev_reservation_lock);
-
- return ret;
-}
-
static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
@@ -636,7 +587,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
@@ -645,7 +595,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
+ pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
if (!pr_reg->pr_aptpl_buf) {
pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
@@ -929,7 +879,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -996,11 +946,10 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1051,10 +1000,9 @@ static void __core_scsi3_add_registration(
int register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1066,7 +1014,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- su_dev->t10_pr.pr_generation++ :
+ dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1135,7 +1083,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1160,7 +1108,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* for fabric modules (iSCSI) requiring them.
*/
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
+ if (dev->dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
@@ -1274,7 +1222,7 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
@@ -1335,7 +1283,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1365,7 +1313,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1479,7 +1427,8 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
smp_mb__after_atomic_dec();
}
-static int core_scsi3_decode_spec_i_port(
+static sense_reason_t
+core_scsi3_decode_spec_i_port(
struct se_cmd *cmd,
struct se_portal_group *tpg,
unsigned char *l_isid,
@@ -1501,8 +1450,9 @@ static int core_scsi3_decode_spec_i_port(
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ sense_reason_t ret;
u32 tpdl, tid_len = 0;
- int ret, dest_local_nexus, prf_isid;
+ int dest_local_nexus, prf_isid;
u32 dest_rtpi = 0;
memset(dest_iport, 0, 64);
@@ -1517,8 +1467,7 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1530,8 +1479,7 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1545,12 +1493,16 @@ static int core_scsi3_decode_spec_i_port(
if (cmd->data_length < 28) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1565,9 +1517,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Start processing the received transport IDs using the
@@ -1610,16 +1561,13 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_inc();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(tmp_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
/*
* Locate the destination initiator ACL to be registered
@@ -1641,17 +1589,14 @@ static int core_scsi3_decode_spec_i_port(
continue;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
dest_tpg = tmp_tpg;
@@ -1668,9 +1613,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
@@ -1683,9 +1627,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Locate the desintation struct se_dev_entry pointer for matching
@@ -1702,23 +1645,19 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
@@ -1754,10 +1693,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -ENOMEM;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = dest_tpg;
@@ -1788,9 +1725,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
@@ -1848,8 +1784,9 @@ static int core_scsi3_decode_spec_i_port(
}
return 0;
-out:
+out_unmap:
transport_kunmap_data_sg(cmd);
+out:
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1899,7 +1836,6 @@ static int __core_scsi3_update_aptpl_buf(
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
@@ -1917,8 +1853,8 @@ static int __core_scsi3_update_aptpl_buf(
/*
* Walk the registration list..
*/
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1963,7 +1899,7 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1981,13 +1917,13 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
@@ -2019,7 +1955,7 @@ static int __core_scsi3_write_aptpl_to_file(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+ struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
struct iovec iov[1];
mm_segment_t old_fs;
@@ -2065,14 +2001,15 @@ static int __core_scsi3_write_aptpl_to_file(
return 0;
}
-static int core_scsi3_update_and_write_aptpl(
- struct se_device *dev,
- unsigned char *in_buf,
- u32 in_pr_aptpl_buf_len)
+static int
+core_scsi3_update_and_write_aptpl(struct se_device *dev, unsigned char *in_buf,
+ u32 in_pr_aptpl_buf_len)
{
unsigned char null_buf[64], *buf;
u32 pr_aptpl_buf_len;
- int ret, clear_aptpl_metadata = 0;
+ int clear_aptpl_metadata = 0;
+ int ret;
+
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
@@ -2094,25 +2031,17 @@ static int core_scsi3_update_and_write_aptpl(
clear_aptpl_metadata);
if (ret != 0)
return ret;
+
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
- ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
- if (ret != 0)
- return ret;
-
- return ret;
+ return __core_scsi3_write_aptpl_to_file(dev, buf, 0);
}
-static int core_scsi3_emulate_pro_register(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int all_tg_pt,
- int spec_i_pt,
- int ignore_key)
+static sense_reason_t
+core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ int aptpl, int all_tg_pt, int spec_i_pt, int ignore_key)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
@@ -2120,16 +2049,16 @@ static int core_scsi3_emulate_pro_register(
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- int pr_holder = 0, ret = 0, type;
+ sense_reason_t ret = TCM_NO_SENSE;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
se_tpg = se_sess->se_tpg;
se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2148,8 +2077,7 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
@@ -2163,15 +2091,13 @@ static int core_scsi3_emulate_pro_register(
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
- ignore_key, 0);
- if (ret != 0) {
+ ignore_key, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
} else {
/*
@@ -2205,201 +2131,192 @@ static int core_scsi3_emulate_pro_register(
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_tmpl->pr_aptpl_active = 1;
pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
- core_scsi3_put_pr_reg(pr_reg);
- return ret;
- } else {
- /*
- * Locate the existing *pr_reg via struct se_node_acl pointers
- */
- pr_reg = pr_reg_e;
- type = pr_reg->pr_res_type;
-
- if (!ignore_key) {
- if (res_key != pr_reg->pr_res_key) {
- pr_err("SPC-3 PR REGISTER: Received"
- " res_key: 0x%016Lx does not match"
- " existing SA REGISTER res_key:"
- " 0x%016Lx\n", res_key,
- pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
- }
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = pr_reg_e;
+ type = pr_reg->pr_res_type;
+
+ if (!ignore_key) {
+ if (res_key != pr_reg->pr_res_key) {
+ pr_err("SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key,
+ pr_reg->pr_res_key);
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
- if (spec_i_pt) {
- pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
- " set while sa_res_key=0\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ }
+
+ if (spec_i_pt) {
+ pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
+ " set while sa_res_key=0\n");
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * An existing ALL_TG_PT=1 registration being released
+ * must also set ALL_TG_PT=1 in the incoming PROUT.
+ */
+ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+ pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ " registration exists, but ALL_TG_PT=1 bit not"
+ " present in received PROUT\n");
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * Allocate APTPL metadata buffer used for UNREGISTER ops
+ */
+ if (aptpl) {
+ pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+ GFP_KERNEL);
+ if (!pr_aptpl_buf) {
+ pr_err("Unable to allocate"
+ " pr_aptpl_buf\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
}
- /*
- * An existing ALL_TG_PT=1 registration being released
- * must also set ALL_TG_PT=1 in the incoming PROUT.
- */
- if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
- " registration exists, but ALL_TG_PT=1 bit not"
- " present in received PROUT\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ }
+
+ /*
+ * sa_res_key=0 Unregister Reservation Key for registered I_T
+ * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+ * Nexus.
+ */
+ if (!sa_res_key) {
+ pr_holder = core_scsi3_check_implict_release(
+ cmd->se_dev, pr_reg);
+ if (pr_holder < 0) {
+ kfree(pr_aptpl_buf);
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
+
+ spin_lock(&pr_tmpl->registration_lock);
/*
- * Allocate APTPL metadata buffer used for UNREGISTER ops
+ * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+ * and matching pr_res_key.
*/
- if (aptpl) {
- pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
- GFP_KERNEL);
- if (!pr_aptpl_buf) {
- pr_err("Unable to allocate"
- " pr_aptpl_buf\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ if (pr_reg->pr_reg_all_tg_pt) {
+ list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ if (!pr_reg_p->pr_reg_all_tg_pt)
+ continue;
+ if (pr_reg_p->pr_res_key != res_key)
+ continue;
+ if (pr_reg == pr_reg_p)
+ continue;
+ if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg_p->pr_reg_nacl->initiatorname))
+ continue;
+
+ __core_scsi3_free_registration(dev,
+ pr_reg_p, NULL, 0);
}
}
+
/*
- * sa_res_key=0 Unregister Reservation Key for registered I_T
- * Nexus sa_res_key=1 Change Reservation Key for registered I_T
- * Nexus.
+ * Release the calling I_T Nexus registration now..
*/
- if (!sa_res_key) {
- pr_holder = core_scsi3_check_implict_release(
- cmd->se_dev, pr_reg);
- if (pr_holder < 0) {
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
- }
-
- spin_lock(&pr_tmpl->registration_lock);
- /*
- * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
- * and matching pr_res_key.
- */
- if (pr_reg->pr_reg_all_tg_pt) {
- list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- if (!pr_reg_p->pr_reg_all_tg_pt)
- continue;
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
- if (pr_reg_p->pr_res_key != res_key)
- continue;
-
- if (pr_reg == pr_reg_p)
- continue;
-
- if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
- pr_reg_p->pr_reg_nacl->initiatorname))
- continue;
-
- __core_scsi3_free_registration(dev,
- pr_reg_p, NULL, 0);
- }
- }
- /*
- * Release the calling I_T Nexus registration now..
- */
- __core_scsi3_free_registration(cmd->se_dev, pr_reg,
- NULL, 1);
- /*
- * From spc4r17, section 5.7.11.3 Unregistering
- *
- * If the persistent reservation is a registrants only
- * type, the device server shall establish a unit
- * attention condition for the initiator port associated
- * with every registered I_T nexus except for the I_T
- * nexus on which the PERSISTENT RESERVE OUT command was
- * received, with the additional sense code set to
- * RESERVATIONS RELEASED.
- */
- if (pr_holder &&
- ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
- (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
- list_for_each_entry(pr_reg_p,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- core_scsi3_ua_allocate(
- pr_reg_p->pr_reg_nacl,
- pr_reg_p->pr_res_mapped_lun,
- 0x2A,
- ASCQ_2AH_RESERVATIONS_RELEASED);
- }
+ /*
+ * From spc4r17, section 5.7.11.3 Unregistering
+ *
+ * If the persistent reservation is a registrants only
+ * type, the device server shall establish a unit
+ * attention condition for the initiator port associated
+ * with every registered I_T nexus except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, with the additional sense code set to
+ * RESERVATIONS RELEASED.
+ */
+ if (pr_holder &&
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ core_scsi3_ua_allocate(
+ pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
}
- spin_unlock(&pr_tmpl->registration_lock);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for UNREGISTER\n");
- return 0;
- }
+ if (!aptpl) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for UNREGISTER\n");
+ return 0;
+ }
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for UNREGISTER\n");
- }
+ if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+ " for UNREGISTER\n");
+ }
- kfree(pr_aptpl_buf);
- return ret;
- } else {
- /*
- * Increment PRgeneration counter for struct se_device"
- * upon a successful REGISTER, see spc4r17 section 6.3.2
- * READ_KEYS service action.
- */
- pr_reg->pr_res_generation = core_scsi3_pr_generation(
- cmd->se_dev);
- pr_reg->pr_res_key = sa_res_key;
- pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
- " Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
- (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
- pr_reg->pr_reg_nacl->initiatorname,
- pr_reg->pr_res_key, pr_reg->pr_res_generation);
-
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- core_scsi3_put_pr_reg(pr_reg);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for REGISTER\n");
- return 0;
- }
+ goto out_free_aptpl_buf;
+ }
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for REGISTER\n");
- }
+ /*
+ * Increment PRgeneration counter for struct se_device"
+ * upon a successful REGISTER, see spc4r17 section 6.3.2
+ * READ_KEYS service action.
+ */
+ pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
+ pr_reg->pr_res_key = sa_res_key;
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ " Key for %s to: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+ (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+ pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg->pr_res_key, pr_reg->pr_res_generation);
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- }
+ if (!aptpl) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for REGISTER\n");
+ ret = 0;
+ goto out_put_pr_reg;
}
- return 0;
+
+ if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+ " for REGISTER\n");
+ }
+
+out_free_aptpl_buf:
+ kfree(pr_aptpl_buf);
+ ret = 0;
+out_put_pr_reg:
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
}
unsigned char *core_scsi3_pr_dump_type(int type)
@@ -2424,26 +2341,23 @@ unsigned char *core_scsi3_pr_dump_type(int type)
return "Unknown SPC-3 PR Type";
}
-static int core_scsi3_pro_reserve(
- struct se_cmd *cmd,
- struct se_device *dev,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
{
+ struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
- int ret, prf_isid;
+ sense_reason_t ret;
+ int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2453,8 +2367,7 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2469,9 +2382,8 @@ static int core_scsi3_pro_reserve(
pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2485,9 +2397,8 @@ static int core_scsi3_pro_reserve(
*/
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2518,9 +2429,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2542,9 +2452,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2557,8 +2466,8 @@ static int core_scsi3_pro_reserve(
* shall completethe command with GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ ret = 0;
+ goto out_put_pr_reg;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2582,27 +2491,24 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
+ }
}
+ ret = 0;
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_reserve(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
- struct se_device *dev = cmd->se_dev;
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -2610,16 +2516,12 @@ static int core_scsi3_emulate_pro_reserve(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
- break;
+ return core_scsi3_pro_reserve(cmd, type, scope, res_key);
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
/*
@@ -2657,23 +2559,21 @@ static void __core_scsi3_complete_pro_release(
pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
}
-static int core_scsi3_emulate_pro_release(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
- int ret, all_reg = 0;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ int all_reg = 0;
+ sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2682,8 +2582,7 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2704,8 +2603,7 @@ static int core_scsi3_emulate_pro_release(
* No persistent reservation, return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2718,9 +2616,9 @@ static int core_scsi3_emulate_pro_release(
* persistent reservation holder. return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
+
/*
* From spc4r17 Section 5.7.11.2 Releasing:
*
@@ -2740,9 +2638,8 @@ static int core_scsi3_emulate_pro_release(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2763,9 +2660,8 @@ static int core_scsi3_emulate_pro_release(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* In response to a persistent reservation release request from the
@@ -2818,25 +2714,23 @@ static int core_scsi3_emulate_pro_release(
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ }
}
-
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_clear(
- struct se_cmd *cmd,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
@@ -2848,8 +2742,7 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2868,8 +2761,7 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* a) Release the persistent reservation, if any;
@@ -2993,28 +2885,22 @@ static void core_scsi3_release_preempt_and_abort(
}
}
-static int core_scsi3_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+ u64 sa_res_key, int abort)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
LIST_HEAD(preempt_and_abort_list);
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
- int prh_type = 0, prh_scope = 0, ret;
+ int prh_type = 0, prh_scope = 0;
- if (!se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
@@ -3022,19 +2908,16 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3047,8 +2930,7 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3142,8 +3024,7 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* For an existing all registrants type reservation
@@ -3162,13 +3043,13 @@ static int core_scsi3_pro_preempt(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
+ }
}
core_scsi3_put_pr_reg(pr_reg_n);
@@ -3298,12 +3179,12 @@ static int core_scsi3_pro_preempt(
}
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
- "%s\n", (abort) ? "_AND_ABORT" : "");
+ "%s\n", abort ? "_AND_ABORT" : "");
+ }
}
core_scsi3_put_pr_reg(pr_reg_n);
@@ -3311,16 +3192,10 @@ static int core_scsi3_pro_preempt(
return 0;
}
-static int core_scsi3_emulate_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
+ u64 res_key, u64 sa_res_key, int abort)
{
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -3328,26 +3203,19 @@ static int core_scsi3_emulate_pro_preempt(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_preempt(cmd, type, scope,
- res_key, sa_res_key, abort);
- break;
+ return core_scsi3_pro_preempt(cmd, type, scope, res_key,
+ sa_res_key, abort);
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
-static int core_scsi3_emulate_pro_register_and_move(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int unreg)
+static sense_reason_t
+core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+ u64 sa_res_key, int aptpl, int unreg)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
@@ -3358,20 +3226,21 @@ static int core_scsi3_emulate_pro_register_and_move(
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
- int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+ int new_reg = 0, type, scope, matching_iname, prf_isid;
+ sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
@@ -3387,8 +3256,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3398,9 +3266,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* The service active reservation key needs to be non zero
@@ -3408,9 +3275,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!sa_res_key) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
@@ -3419,6 +3285,11 @@ static int core_scsi3_emulate_pro_register_and_move(
* information.
*/
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
+
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
@@ -3432,9 +3303,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3452,15 +3322,13 @@ static int core_scsi3_emulate_pro_register_and_move(
smp_mb__after_atomic_inc();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(dest_se_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3472,12 +3340,15 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
proto_ident = (buf[24] & 0x0f);
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3489,16 +3360,14 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3506,8 +3375,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3536,8 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3545,8 +3412,7 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
after_iport_check:
@@ -3566,19 +3432,17 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3594,19 +3458,16 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
@@ -3625,8 +3486,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
goto out;
}
/*
@@ -3639,8 +3499,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
/*
@@ -3658,8 +3517,7 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3691,13 +3549,11 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, 0, aptpl, 2, 1);
- if (ret != 0) {
+ sa_res_key, 0, aptpl, 2, 1)) {
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3768,12 +3624,12 @@ after_iport_check:
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
+ }
}
transport_kunmap_data_sg(cmd);
@@ -3788,6 +3644,8 @@ out:
if (dest_node_acl)
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_se_tpg);
+
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
return ret;
}
@@ -3805,14 +3663,15 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
-int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_out(struct se_cmd *cmd)
{
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -3823,32 +3682,26 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
- }
+ if (!cmd->se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
*/
@@ -3857,6 +3710,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
type = (cdb[2] & 0x0f);
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3880,11 +3736,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
- }
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ return TCM_INVALID_PARAMETER_LIST;
/*
* From spc4r17 section 6.14:
@@ -3899,10 +3752,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* (core_scsi3_emulate_pro_* function parameters
* are defined by spc4r17 Table 174:
@@ -3941,12 +3793,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
-out:
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
@@ -3957,10 +3806,10 @@ out:
*
* See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
*/
-static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u32 add_len = 0, off = 8;
@@ -3968,18 +3817,20 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3999,7 +3850,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
@@ -4016,10 +3867,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*
* See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
*/
-static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u64 pr_res_key;
@@ -4028,18 +3879,20 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&se_dev->dev_reservation_lock);
- pr_reg = se_dev->dev_pr_res_holder;
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
/*
* Set the hardcoded Additional Length
@@ -4090,7 +3943,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
}
err:
- spin_unlock(&se_dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
transport_kunmap_data_sg(cmd);
return 0;
@@ -4101,21 +3954,23 @@ err:
*
* See spc4r17 section 6.13.4 Table 165
*/
-static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
@@ -4157,14 +4012,14 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*
* See spc4r17 section 6.13.5 Table 168 and 169
*/
-static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
@@ -4173,16 +4028,17 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4303,9 +4159,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
-int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4316,12 +4173,11 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4340,9 +4196,7 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
if (!ret)
@@ -4350,56 +4204,25 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return ret;
}
-static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
-{
- return 0;
-}
-
-static int core_pt_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+sense_reason_t
+target_check_reservation(struct se_cmd *cmd)
{
- return 0;
-}
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
-int core_setup_reservations(struct se_device *dev, int force_pt)
-{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation *rest = &su_dev->t10_pr;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the reservations
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but to emulate reservations themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
- rest->res_type = SPC_PASSTHROUGH;
- rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", dev->transport->name);
+ if (!cmd->se_sess)
+ return 0;
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated Persistent Reservations.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", dev->transport->name);
- } else {
- rest->res_type = SPC2_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
- rest->pr_ops.t10_seq_non_holder =
- &core_scsi2_reservation_seq_non_holder;
- pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
- dev->transport->name);
- }
- return 0;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_scsi2_reservation_check(cmd);
+ else
+ ret = target_scsi3_pr_reservation_check(cmd);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index af6c460d886d..b4a004247ab2 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
-extern int target_scsi2_reservation_release(struct se_cmd *);
-extern int target_scsi2_reservation_reserve(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int target_scsi3_emulate_pr_in(struct se_cmd *);
-extern int target_scsi3_emulate_pr_out(struct se_cmd *);
-extern int core_setup_reservations(struct se_device *, int);
+extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
+extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
+extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 617c086a8a02..2bcfd79cf595 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -3,10 +3,7 @@
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -53,9 +50,14 @@
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct pscsi_dev_virt, dev);
+}
+
static struct se_subsystem_api pscsi_template;
-static int pscsi_execute_cmd(struct se_cmd *cmd);
+static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@@ -219,7 +221,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
- wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+ wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf);
return 0;
@@ -299,23 +301,13 @@ out:
kfree(buf);
}
-/* pscsi_add_device_to_list():
- *
- *
- */
-static struct se_device *pscsi_add_device_to_list(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- struct pscsi_dev_virt *pdv,
- struct scsi_device *sd,
- int dev_flags)
+static int pscsi_add_device_to_list(struct se_device *dev,
+ struct scsi_device *sd)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct request_queue *q;
- struct queue_limits *limits;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct request_queue *q = sd->request_queue;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ pdv->pdv_sd = sd;
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@@ -324,54 +316,27 @@ static struct se_device *pscsi_add_device_to_list(
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = sd->request_queue;
- limits = &dev_limits.limits;
- limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
- limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
- dev_limits.hw_queue_depth = sd->queue_depth;
- dev_limits.queue_depth = sd->queue_depth;
- /*
- * Setup our standard INQUIRY info into se_dev->t10_wwn
- */
- pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_max_sectors =
+ min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
- * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
- * which has already been referenced with Linux SCSI code with
- * scsi_device_get() in this file's pscsi_create_virtdevice().
- *
- * The passthrough operations called by the transport_add_device_*
- * function below will require this pointer to be set for passthroug
- * ops.
- *
- * For the shutdown case in pscsi_free_device(), this struct
- * scsi_device reference is released with Linux SCSI code
- * scsi_device_put() and the pdv->pdv_sd cleared.
+ * Setup our standard INQUIRY info into se_dev->t10_wwn
*/
- pdv->pdv_sd = sd;
- dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, pdv,
- &dev_limits, NULL, NULL);
- if (!dev) {
- pdv->pdv_sd = NULL;
- return NULL;
- }
+ pscsi_set_inquiry_info(sd, &dev->t10_wwn);
/*
* Locate VPD WWN Information used for various purposes within
* the Storage Engine.
*/
- if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+ if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
/*
* If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83).
*/
- pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+ pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
}
/*
@@ -379,10 +344,11 @@ static struct se_device *pscsi_add_device_to_list(
*/
if (sd->type == TYPE_TAPE)
pscsi_tape_read_blocksize(dev, sd);
- return dev;
+ return 0;
}
-static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *pscsi_alloc_device(struct se_hba *hba,
+ const char *name)
{
struct pscsi_dev_virt *pdv;
@@ -391,139 +357,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
- pdv->pdv_se_hba = hba;
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return pdv;
+ return &pdv->dev;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_disk(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK using supplied udev_path
*/
- bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+ bd = blkdev_get_by_path(dev->udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
- return NULL;
+ return PTR_ERR(bd);
}
pdv->pdv_bd = bd;
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
- return NULL;
+ return ret;
}
+
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_rom(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
scsi_device_put(sd);
- return NULL;
+ return ret;
}
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
- return dev;
+ return 0;
}
/*
- *Called with struct Scsi_Host->host_lock called.
+ * Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_other(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_other(struct se_device *dev,
+ struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev)
- return NULL;
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret)
+ return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
-static struct se_device *pscsi_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int pscsi_configure_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct se_device *dev;
+ struct se_hba *hba = dev->se_hba;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
+ int ret;
- if (!pdv) {
- pr_err("Unable to locate struct pscsi_dev_virt"
- " parameter\n");
- return ERR_PTR(-EINVAL);
+ if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+ " scsi_lun_id= parameters\n");
+ return -EINVAL;
}
+
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
@@ -532,16 +484,16 @@ static struct se_device *pscsi_create_virtdevice(
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
/*
* For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set
*/
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
* If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
@@ -549,17 +501,14 @@ static struct se_device *pscsi_create_virtdevice(
* and enable for PHV_LLD_SCSI_HOST_NO mode.
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
- spin_unlock(&hba->device_lock);
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
- spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -569,14 +518,14 @@ static struct se_device *pscsi_create_virtdevice(
if (IS_ERR(sh)) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_CAST(sh);
+ return PTR_ERR(sh);
}
}
} else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
" struct Scsi_Host exists\n");
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
}
@@ -593,17 +542,17 @@ static struct se_device *pscsi_create_virtdevice(
*/
switch (sd->type) {
case TYPE_DISK:
- dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_disk(dev, sd);
break;
case TYPE_ROM:
- dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_rom(dev, sd);
break;
default:
- dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_other(dev, sd);
break;
}
- if (!dev) {
+ if (ret) {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@@ -611,9 +560,9 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
- return ERR_PTR(-ENODEV);
+ return ret;
}
- return dev;
+ return 0;
}
spin_unlock_irq(sh->host_lock);
@@ -627,17 +576,13 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-/* pscsi_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void pscsi_free_device(void *p)
+static void pscsi_free_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
if (sd) {
@@ -670,7 +615,7 @@ static void pscsi_free_device(void *p)
static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
unsigned char *sense_buffer)
{
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = cmd->priv;
@@ -694,7 +639,11 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = transport_kmap_data_sg(cmd);
+ unsigned char *buf;
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -770,13 +719,11 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -841,29 +788,10 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t pscsi_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
+static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
-
- if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
- !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
- !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- pr_err("Missing scsi_channel_id=, scsi_target_id= and"
- " scsi_lun_id= parameters\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct pscsi_hba_virt *phv = hba->hba_ptr;
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
@@ -929,11 +857,11 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
-static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction,
- struct bio **hbio)
+static sense_reason_t
+pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction, struct bio **hbio)
{
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
@@ -1019,7 +947,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
}
}
- return sgl_nents;
+ return 0;
fail:
while (*hbio) {
bio = *hbio;
@@ -1027,8 +955,7 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
@@ -1055,17 +982,13 @@ static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
}
}
-static int pscsi_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_parse_cdb(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
- unsigned int dummy_size;
- int ret;
- if (cmd->se_cmd_flags & SCF_BIDI) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
- }
+ if (cmd->se_cmd_flags & SCF_BIDI)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
pscsi_clear_cdb_lun(cdb);
@@ -1076,10 +999,8 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
*/
switch (cdb[0]) {
case REPORT_LUNS:
- ret = spc_parse_cdb(cmd, &dummy_size);
- if (ret)
- return ret;
- break;
+ cmd->execute_cmd = spc_emulate_report_luns;
+ return 0;
case READ_6:
case READ_10:
case READ_12:
@@ -1093,22 +1014,21 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
/* FALLTHROUGH*/
default:
cmd->execute_cmd = pscsi_execute_cmd;
- break;
+ return 0;
}
-
- return 0;
}
-static int pscsi_execute_cmd(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct pscsi_plugin_task *pt;
struct request *req;
struct bio *hbio;
- int ret;
+ sense_reason_t ret;
/*
* Dynamically alloc cdb space, since it may be larger than
@@ -1116,8 +1036,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
*/
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
if (!pt) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
cmd->priv = pt;
@@ -1131,24 +1050,21 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
} else {
BUG_ON(!cmd->data_length);
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
- if (ret < 0) {
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret)
goto fail;
- }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail_free_bio;
}
}
@@ -1179,22 +1095,10 @@ fail_free_bio:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
kfree(pt);
- return -ENOMEM;
-}
-
-/* pscsi_get_device_rev():
- *
- *
- */
-static u32 pscsi_get_device_rev(struct se_device *dev)
-{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
- struct scsi_device *sd = pdv->pdv_sd;
-
- return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+ return ret;
}
/* pscsi_get_device_type():
@@ -1203,7 +1107,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
*/
static u32 pscsi_get_device_type(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
return sd->type;
@@ -1211,7 +1115,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
static sector_t pscsi_get_blocks(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
@@ -1243,7 +1147,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
@@ -1259,15 +1162,13 @@ static struct se_subsystem_api pscsi_template = {
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
- .allocate_virtdevice = pscsi_allocate_virtdevice,
- .create_virtdevice = pscsi_create_virtdevice,
+ .alloc_device = pscsi_alloc_device,
+ .configure_device = pscsi_configure_device,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb,
- .check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
- .get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
};
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index bc1e5e11eca0..1bd757dff8ee 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
+ struct se_device dev;
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
- struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d00bbe33ff8b..0457de362e68 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +38,10 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_mcp_template;
+static inline struct rd_dev *RD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct rd_dev, dev);
+}
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0;
}
-static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host;
- return rd_dev;
+ return &rd_dev->dev;
}
-static struct se_device *rd_create_virtdevice(struct se_hba *hba,
- struct se_subsystem_dev *se_dev, void *p)
+static int rd_configure_device(struct se_device *dev)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct rd_dev *rd_dev = p;
- struct rd_host *rd_host = hba->hba_ptr;
- int dev_flags = 0, ret;
- char prod[16], rev[4];
+ struct rd_dev *rd_dev = RD_DEV(dev);
+ struct rd_host *rd_host = dev->se_hba->hba_ptr;
+ int ret;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
+ }
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
- snprintf(prod, 16, "RAMDISK-MCP");
- snprintf(rev, 4, "%s", RD_MCP_VERSION);
-
- dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
- dev_limits.limits.max_hw_sectors = UINT_MAX;
- dev_limits.limits.max_sectors = UINT_MAX;
- dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
-
- dev = transport_add_device_to_core_hba(hba,
- &rd_mcp_template, se_dev, dev_flags, rd_dev,
- &dev_limits, prod, rev);
- if (!dev)
- goto fail;
+ dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
- return dev;
+ return 0;
fail:
rd_release_device_space(rd_dev);
- return ERR_PTR(ret);
+ return ret;
}
-static void rd_free_device(void *p)
+static void rd_free_device(struct se_device *dev)
{
- struct rd_dev *rd_dev = p;
+ struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
kfree(rd_dev);
@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-static int rd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+rd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
- struct rd_dev *dev = se_dev->dev_ptr;
+ struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len;
u64 tmp;
- tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
@@ -378,13 +367,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t rd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret;
}
-static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
- if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- pr_debug("Missing rd_pages= parameter\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t rd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
return bl;
}
-static u32 rd_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 rd_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
-}
-
static sector_t rd_get_blocks(struct se_device *dev)
{
- struct rd_dev *rd_dev = dev->dev_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- dev->se_sub_dev->se_dev_attrib.block_size) - 1;
+ dev->dev_attrib.block_size) - 1;
return blocks_long;
}
-static struct spc_ops rd_spc_ops = {
+static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
-static int rd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+rd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &rd_spc_ops);
+ return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
+ .inquiry_prod = "RAMDISK-MCP",
+ .inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_allocate_virtdevice,
- .create_virtdevice = rd_create_virtdevice,
+ .alloc_device = rd_alloc_device,
+ .configure_device = rd_configure_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
};
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 21458125fe51..933b38b6e563 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
+ struct se_device dev;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a6e27d967c7b..a664c664a31a 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1,10 +1,7 @@
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -37,7 +34,8 @@
#include "target_core_ua.h"
-static int sbc_emulate_readcapacity(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -54,10 +52,10 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
- buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->dev_attrib.block_size & 0xff;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
@@ -69,7 +67,8 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
return 0;
}
-static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *rbuf;
@@ -85,15 +84,15 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
- buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
buf[14] = 0x80;
rbuf = transport_kmap_data_sg(cmd);
@@ -106,7 +105,7 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
return 0;
}
-int spc_get_write_same_sectors(struct se_cmd *cmd)
+sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
@@ -129,13 +128,8 @@ int spc_get_write_same_sectors(struct se_cmd *cmd)
}
EXPORT_SYMBOL(spc_get_write_same_sectors);
-static int sbc_emulate_verify(struct se_cmd *cmd)
-{
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static int sbc_emulate_noop(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
@@ -143,7 +137,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
- return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+ return cmd->se_dev->dev_attrib.block_size * sectors;
}
static int sbc_check_valid_sectors(struct se_cmd *cmd)
@@ -152,7 +146,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
unsigned long long end_lba;
u32 sectors;
- sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
@@ -236,26 +230,37 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
-static int sbc_write_same_supported(struct se_device *dev,
- unsigned char *flags)
+static sense_reason_t
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{
+ unsigned int sectors = spc_get_write_same_sectors(cmd);
+
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
- return -ENOSYS;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
+ pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
+ sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ return TCM_INVALID_CDB_FIELD;
}
-
/*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
+ * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+ * translated into block discard requests within backend code.
*/
- if (!(flags[0] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
- return -ENOSYS;
+ if (flags[0] & 0x08) {
+ if (!ops->execute_write_same_unmap)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->execute_cmd = ops->execute_write_same_unmap;
+ return 0;
}
+ if (!ops->execute_write_same)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->execute_cmd = ops->execute_write_same;
return 0;
}
@@ -313,14 +318,14 @@ out:
kfree(buf);
}
-int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
+sense_reason_t
+sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned int size;
u32 sectors = 0;
- int ret;
+ sense_reason_t ret;
switch (cdb[0]) {
case READ_6:
@@ -379,9 +384,9 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->execute_cmd = ops->execute_rw;
break;
case XDWRITEREAD_10:
- if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ if (cmd->data_direction != DMA_TO_DEVICE ||
!(cmd->se_cmd_flags & SCF_BIDI))
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
@@ -419,27 +424,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- if (sbc_write_same_supported(dev, &cdb[10]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ if (ret)
+ return ret;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
break;
}
@@ -455,7 +457,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
@@ -463,7 +465,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
@@ -484,42 +486,36 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
case UNMAP:
if (!ops->execute_unmap)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap;
break;
case WRITE_SAME_16:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- if (sbc_write_same_supported(dev, &cdb[1]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
break;
case WRITE_SAME:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
@@ -529,13 +525,13 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
- if (sbc_write_same_supported(dev, &cdb[1]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
break;
case VERIFY:
size = 0;
- cmd->execute_cmd = sbc_emulate_verify;
+ cmd->execute_cmd = sbc_emulate_noop;
break;
case REZERO_UNIT:
case SEEK_6:
@@ -557,24 +553,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
/* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
- if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+ if (sectors > dev->dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.fabric_max_sectors);
- goto out_invalid_cdb_field;
+ dev->dev_attrib.fabric_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
}
- if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+ if (sectors > dev->dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.hw_max_sectors);
- goto out_invalid_cdb_field;
+ dev->dev_attrib.hw_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
}
end_lba = dev->transport->get_blocks(dev) + 1;
@@ -582,25 +578,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, sectors);
}
- ret = target_cmd_size_check(cmd, size);
- if (ret < 0)
- return ret;
-
- return 0;
-
-out_unsupported_cdb:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return target_cmd_size_check(cmd, size);
}
EXPORT_SYMBOL(sbc_parse_cdb);
+
+u32 sbc_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+EXPORT_SYMBOL(sbc_get_device_type);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6fd434d3d7e4..2d88f087d961 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1,10 +1,7 @@
/*
* SCSI Primary Commands (SPC) parsing and emulation.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -69,7 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+static sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -78,7 +76,7 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
- buf[2] = dev->transport->get_device_rev(dev);
+ buf[2] = 0x05; /* SPC-3 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -95,34 +93,32 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
- spc_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */
snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
+ snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
+ snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */
return 0;
}
/* unit serial number */
-static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len = 0;
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
- unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
+ unit_serial_len = strlen(dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
- len += sprintf(&buf[4], "%s",
- dev->se_sub_dev->t10_wwn.unit_serial);
+ len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -132,7 +128,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
unsigned char *buf)
{
- unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+ unsigned char *p = &dev->t10_wwn.unit_serial[0];
int cnt;
bool next = true;
@@ -164,7 +160,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
@@ -173,7 +170,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
+ unsigned char *prod = &dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
u16 len = 0, id_len;
@@ -188,7 +185,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
* value in order to return the NAA id.
*/
- if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+ if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
goto check_t10_vend_desc;
/* CODE SET == Binary */
@@ -236,14 +233,12 @@ check_t10_vend_desc:
prod_len += strlen(prod);
prod_len++; /* For : */
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- unit_serial_len =
- strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+ unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
id_len += sprintf(&buf[off+12], "%s:%s", prod,
- &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ &dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -298,10 +293,6 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- if (dev->se_sub_dev->t10_alua.alua_type !=
- SPC3_ALUA_EMULATED)
- goto check_scsi_name;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
goto check_lu_gp;
@@ -415,20 +406,22 @@ check_scsi_name:
}
/* Extended INQUIRY Data VPD Page */
-static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+ if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
/* Block Limits VPD page */
-static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
@@ -439,7 +432,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
@@ -456,62 +449,70 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+ dev->dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
*/
if (!have_tp)
- return 0;
+ goto max_write_same;
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
+ put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
+ put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
&buf[32]);
- if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
+ if (dev->dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
+ /*
+ * MAXIMUM WRITE SAME LENGTH
+ */
+max_write_same:
+ put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+
return 0;
}
/* Block Device Characteristics VPD page */
-static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = 0x3c;
- buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
+ buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
return 0;
}
/* Thin Provisioning VPD */
-static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -546,7 +547,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
+ if (dev->dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
@@ -555,17 +556,18 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
+ if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
- int (*emulate)(struct se_cmd *, unsigned char *);
+ sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
@@ -577,7 +579,8 @@ static struct {
};
/* supported vital product data pages */
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
@@ -586,8 +589,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
* Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart.
*/
- if (cmd->se_dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
+ if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = ARRAY_SIZE(evpd_handlers);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
buf[p + 4] = evpd_handlers[p].page;
@@ -596,14 +598,16 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static int spc_emulate_inquiry(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char buf[SE_INQUIRY_BUF];
- int p, ret;
+ sense_reason_t ret;
+ int p;
memset(buf, 0, SE_INQUIRY_BUF);
@@ -616,8 +620,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2]) {
pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
goto out;
}
@@ -634,8 +637,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
out:
rbuf = transport_kmap_data_sg(cmd);
@@ -649,18 +651,28 @@ out:
return ret;
}
-static int spc_modesense_rwrecovery(unsigned char *p)
+static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x01;
p[1] = 0x0a;
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
return 12;
}
-static int spc_modesense_control(struct se_device *dev, unsigned char *p)
+static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x0a;
p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
p[2] = 2;
/*
* From spc4r23, 7.4.7 Control mode page
@@ -690,7 +702,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* command sequence order shall be explicitly handled by the application client
* through the selection of appropriate ommands and task attributes.
*/
- p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -720,8 +732,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -734,25 +746,56 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
+out:
return 12;
}
-static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
+static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x08;
p[1] = 0x12;
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+ if (dev->dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
+out:
return 20;
}
+static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+{
+ p[0] = 0x1c;
+ p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
+ return 12;
+}
+
+static struct {
+ uint8_t page;
+ uint8_t subpage;
+ int (*emulate)(struct se_device *, u8, unsigned char *);
+} modesense_handlers[] = {
+ { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
+ { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
+ { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
+ { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
+};
+
static void spc_modesense_write_protect(unsigned char *buf, int type)
{
/*
@@ -779,74 +822,146 @@ static void spc_modesense_dpofua(unsigned char *buf, int type)
}
}
-static int spc_emulate_modesense(struct se_cmd *cmd)
+static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ *buf++ = 8;
+ put_unaligned_be32(min(blocks, 0xffffffffull), buf);
+ buf += 4;
+ put_unaligned_be32(block_size, buf);
+ return 9;
+}
+
+static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ if (blocks <= 0xffffffff)
+ return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
+
+ *buf++ = 1; /* LONGLBA */
+ buf += 2;
+ *buf++ = 16;
+ put_unaligned_be64(blocks, buf);
+ buf += 12;
+ put_unaligned_be32(block_size, buf);
+
+ return 17;
+}
+
+static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
- unsigned char *rbuf;
+ unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
int type = dev->transport->get_device_type(dev);
int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
- u32 offset = ten ? 8 : 4;
+ bool dbd = !!(cdb[1] & 0x08);
+ bool llba = ten ? !!(cdb[1] & 0x10) : false;
+ u8 pc = cdb[2] >> 6;
+ u8 page = cdb[2] & 0x3f;
+ u8 subpage = cdb[3];
int length = 0;
- unsigned char buf[SE_MODE_PAGE_BUF];
+ int ret;
+ int i;
memset(buf, 0, SE_MODE_PAGE_BUF);
- switch (cdb[2] & 0x3f) {
- case 0x01:
- length = spc_modesense_rwrecovery(&buf[offset]);
- break;
- case 0x08:
- length = spc_modesense_caching(dev, &buf[offset]);
- break;
- case 0x0a:
- length = spc_modesense_control(dev, &buf[offset]);
- break;
- case 0x3f:
- length = spc_modesense_rwrecovery(&buf[offset]);
- length += spc_modesense_caching(dev, &buf[offset+length]);
- length += spc_modesense_control(dev, &buf[offset+length]);
- break;
- default:
- pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
- cdb[2] & 0x3f, cdb[3]);
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- return -EINVAL;
- }
- offset += length;
-
- if (ten) {
- offset -= 2;
- buf[0] = (offset >> 8) & 0xff;
- buf[1] = offset & 0xff;
- offset += 2;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- spc_modesense_write_protect(&buf[3], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- spc_modesense_dpofua(&buf[3], type);
+ /*
+ * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
+ * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
+ */
+ length = ten ? 3 : 2;
+
+ /* DEVICE-SPECIFIC PARAMETER */
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ spc_modesense_write_protect(&buf[length], type);
+
+ if ((dev->dev_attrib.emulate_write_cache > 0) &&
+ (dev->dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[length], type);
+
+ ++length;
+
+ /* BLOCK DESCRIPTOR */
+
+ /*
+ * For now we only include a block descriptor for disk (SBC)
+ * devices; other command sets use a slightly different format.
+ */
+ if (!dbd && type == TYPE_DISK) {
+ u64 blocks = dev->transport->get_blocks(dev);
+ u32 block_size = dev->dev_attrib.block_size;
+
+ if (ten) {
+ if (llba) {
+ length += spc_modesense_long_blockdesc(&buf[length],
+ blocks, block_size);
+ } else {
+ length += 3;
+ length += spc_modesense_blockdesc(&buf[length],
+ blocks, block_size);
+ }
+ } else {
+ length += spc_modesense_blockdesc(&buf[length], blocks,
+ block_size);
+ }
} else {
- offset -= 1;
- buf[0] = offset & 0xff;
- offset += 1;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- spc_modesense_write_protect(&buf[2], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- spc_modesense_dpofua(&buf[2], type);
+ if (ten)
+ length += 4;
+ else
+ length += 1;
+ }
+
+ if (page == 0x3f) {
+ if (subpage != 0x00 && subpage != 0xff) {
+ pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
+ /*
+ * Tricky way to say all subpage 00h for
+ * subpage==0, all subpages for subpage==0xff
+ * (and we just checked above that those are
+ * the only two possibilities).
+ */
+ if ((modesense_handlers[i].subpage & ~subpage) == 0) {
+ ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ if (!ten && length + ret >= 255)
+ break;
+ length += ret;
+ }
+ }
+
+ goto set_length;
}
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ goto set_length;
+ }
+
+ /*
+ * We don't intend to implement:
+ * - obsolete page 03h "format parameters" (checked by Solaris)
+ */
+ if (page != 0x03)
+ pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+ page, subpage);
+
+ return TCM_UNKNOWN_MODE_PAGE;
+
+set_length:
+ if (ten)
+ put_unaligned_be16(length - 2, buf);
+ else
+ buf[0] = length - 1;
+
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
- memcpy(rbuf, buf, min(offset, cmd->data_length));
+ memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
@@ -854,7 +969,56 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
return 0;
}
-static int spc_emulate_request_sense(struct se_cmd *cmd)
+static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ bool ten = cdb[0] == MODE_SELECT_10;
+ int off = ten ? 8 : 4;
+ bool pf = !!(cdb[1] & 0x10);
+ u8 page, subpage;
+ unsigned char *buf;
+ unsigned char tbuf[SE_MODE_PAGE_BUF];
+ int length;
+ int ret = 0;
+ int i;
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!pf) {
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out;
+ }
+
+ page = buf[off] & 0x3f;
+ subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ memset(tbuf, 0, SE_MODE_PAGE_BUF);
+ length = modesense_handlers[i].emulate(dev, 0, tbuf);
+ goto check_contents;
+ }
+
+ ret = TCM_UNKNOWN_MODE_PAGE;
+ goto out;
+
+check_contents:
+ if (memcmp(buf + off, tbuf, length))
+ ret = TCM_INVALID_PARAMETER_LIST;
+
+out:
+ transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+
+static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
@@ -866,19 +1030,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -ENOSYS;
+ return TCM_INVALID_CDB_FIELD;
}
rbuf = transport_kmap_data_sg(cmd);
- if (cmd->scsi_sense_reason != 0) {
- /*
- * Out of memory. We will fail with CHECK CONDITION, so
- * we must not clear the unit attention condition.
- */
- target_complete_cmd(cmd, CHECK_CONDITION);
- return 0;
- } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@@ -905,33 +1064,97 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
- if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
- transport_kunmap_data_sg(cmd);
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ unsigned char *buf;
+ u32 lun_count = 0, offset = 8, i;
+
+ if (cmd->data_length < 16) {
+ pr_warn("REPORT LUNS allocation length %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
}
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+ * coming via a target_core_mod PASSTHROUGH op, and not through
+ * a $FABRIC_MOD. In that case, report LUN=0 only.
+ */
+ if (!sess) {
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+ lun_count = 1;
+ goto done;
+ }
+
+ spin_lock_irq(&sess->se_node_acl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = sess->se_node_acl->device_list[i];
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+ /*
+ * We determine the correct LUN LIST LENGTH even once we
+ * have reached the initial allocation length.
+ * See SPC2-R20 7.19.
+ */
+ lun_count++;
+ if ((offset + 8) > cmd->data_length)
+ continue;
+
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
+ }
+ spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+
+ /*
+ * See SPC3 r07, page 159.
+ */
+done:
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
+ transport_kunmap_data_sg(cmd);
+
target_complete_cmd(cmd, GOOD);
return 0;
}
+EXPORT_SYMBOL(spc_emulate_report_luns);
-static int spc_emulate_testunitready(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_testunitready(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
}
-int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
+sense_reason_t
+spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
case MODE_SELECT:
*size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SELECT_10:
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SENSE:
*size = cdb[4];
@@ -946,14 +1169,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[7] << 8) + cdb[8];
break;
case PERSISTENT_RESERVE_IN:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_in;
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
break;
case PERSISTENT_RESERVE_OUT:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_out;
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
case RELEASE:
case RELEASE_10:
@@ -962,8 +1183,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else
*size = cmd->data_length;
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_release;
+ cmd->execute_cmd = target_scsi2_reservation_release;
break;
case RESERVE:
case RESERVE_10:
@@ -976,15 +1196,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else
*size = cmd->data_length;
- /*
- * Setup the legacy emulated handler for SPC-2 and
- * >= SPC-3 compatible reservation handling (CRH=1)
- * Otherwise, we assume the underlying SCSI logic is
- * is running in SPC_PASSTHROUGH, and wants reservations
- * emulation disabled.
- */
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_reserve;
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
break;
case REQUEST_SENSE:
*size = cdb[4];
@@ -997,8 +1209,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
@@ -1020,14 +1231,13 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break;
case REPORT_LUNS:
- cmd->execute_cmd = target_report_luns;
+ cmd->execute_cmd = spc_emulate_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
@@ -1039,8 +1249,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_IN from SCC-2
* Check for emulated MI_REPORT_TARGET_PGS
*/
- if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
@@ -1058,8 +1267,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_OUT from SCC-2
* Check for emulated MO_SET_TARGET_PGS.
*/
- if (cdb[1] == MO_SET_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
@@ -1075,9 +1283,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
return 0;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index cb6b0036ae95..d154ce797180 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -1,13 +1,10 @@
/*******************************************************************************
* Filename: target_core_stat.c
*
- * Copyright (c) 2011 Rising Tide Systems
- * Copyright (c) 2011 Linux-iSCSI.org
- *
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
- * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
+ * (c) Copyright 2006-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -80,13 +77,9 @@ static struct target_stat_scsi_dev_attribute \
static ssize_t target_stat_scsi_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -95,12 +88,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -109,13 +98,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_dev_show_attr_role(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "Target\n");
}
DEV_STAT_SCSI_DEV_ATTR_RO(role);
@@ -123,12 +105,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
static ssize_t target_stat_scsi_dev_show_attr_ports(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
}
@@ -176,13 +154,9 @@ static struct target_stat_scsi_tgt_dev_attribute \
static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -191,12 +165,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -205,13 +175,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@@ -219,60 +182,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
- char status[16];
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
- strcpy(status, "activated");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- strcpy(status, "deactivated");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- strcpy(status, "shutdown");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- strcpy(status, "offline");
- break;
- default:
- sprintf(status, "unknown(%d)", dev->dev_status);
- break;
- }
-
- return snprintf(page, PAGE_SIZE, "%s\n", status);
+ if (dev->export_count)
+ return snprintf(page, PAGE_SIZE, "activated");
+ else
+ return snprintf(page, PAGE_SIZE, "deactivated");
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int non_accessible_lus;
- if (!dev)
- return -ENODEV;
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
non_accessible_lus = 0;
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- case TRANSPORT_DEVICE_SHUTDOWN:
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- default:
+ else
non_accessible_lus = 1;
- break;
- }
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
}
@@ -281,12 +211,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
}
@@ -335,13 +261,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
static ssize_t target_stat_scsi_lu_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -350,12 +272,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
static ssize_t target_stat_scsi_lu_show_attr_dev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -364,13 +282,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
static ssize_t target_stat_scsi_lu_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
}
DEV_STAT_SCSI_LU_ATTR_RO(indx);
@@ -378,12 +289,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
static ssize_t target_stat_scsi_lu_show_attr_lun(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
/* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
}
@@ -392,35 +297,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
static ssize_t target_stat_scsi_lu_show_attr_lu_name(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
- dev->se_sub_dev->t10_wwn.unit_serial : "None");
+ (strlen(dev->t10_wwn.unit_serial)) ?
+ dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.vendor)+1];
/* scsiLuVendorId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
- dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
+ dev->t10_wwn.vendor[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -429,19 +327,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.model)+1];
/* scsiLuProductId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
- dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+ dev->t10_wwn.model[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -450,19 +344,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.revision)+1];
/* scsiLuRevisionId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
- dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
+ dev->t10_wwn.revision[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -471,12 +361,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
static ssize_t target_stat_scsi_lu_show_attr_dev_type(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
@@ -487,30 +373,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
static ssize_t target_stat_scsi_lu_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n",
- (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
- "available" : "notavailable");
+ (dev->export_count) ? "available" : "notavailable");
}
DEV_STAT_SCSI_LU_ATTR_RO(status);
static ssize_t target_stat_scsi_lu_show_attr_state_bit(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n");
}
@@ -519,12 +393,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuNumCommands */
return snprintf(page, PAGE_SIZE, "%llu\n",
@@ -535,12 +405,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuReadMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
@@ -550,12 +416,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
@@ -565,12 +427,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuInResets */
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
@@ -580,13 +438,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(resets);
static ssize_t target_stat_scsi_lu_show_attr_full_stat(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -595,13 +446,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -610,12 +454,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_creation_time(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@@ -662,20 +502,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
* Called from target_core_configfs.c:target_core_make_subdev() to setup
* the target statistics groups + configfs CITs located in target_core_stat.c
*/
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+void target_stat_setup_dev_default_groups(struct se_device *dev)
{
- struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
+ struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
- dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -1161,7 +1001,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = &dev->se_sub_dev->t10_wwn;
+ wwn = &dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index be75c4331a92..c6e0293ffdb0 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 task management infrastructure
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -371,7 +370,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+ tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -399,10 +398,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
- (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+ (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index a531fe282b1e..5192ac0337f7 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,10 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+/* core_tpg_set_initiator_node_tag():
+ *
+ * Initiator nodeacl tags are not used internally, but may be used by
+ * userspace to emulate aliases or groups.
+ * Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ const char *new_tag)
+{
+ if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+ return -EINVAL;
+
+ if (!strncmp("NULL", new_tag, 4)) {
+ acl->acl_tag[0] = '\0';
+ return 0;
+ }
+
+ return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -672,6 +692,7 @@ int core_tpg_register(
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index dcecbfb17243..bd587b70661a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3,10 +3,7 @@
*
* This file contains the Generic Target Engine Core.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -70,7 +67,6 @@ static void transport_handle_queue_full(struct se_cmd *cmd,
static int transport_generic_get_mem(struct se_cmd *cmd);
static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
static void transport_put_cmd(struct se_cmd *cmd);
-static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -297,7 +293,7 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-void target_release_session(struct kref *kref)
+static void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
@@ -545,9 +541,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_lun_remove_cmd(cmd);
-
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
@@ -558,7 +551,8 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd);
+ transport_generic_request_failure(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
}
/*
@@ -626,7 +620,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
complete(&cmd->t_transport_stop_comp);
return;
} else if (cmd->transport_state & CMD_T_FAILED) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -659,7 +652,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
-static void target_qf_do_work(struct work_struct *work)
+void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
@@ -712,29 +705,15 @@ void transport_dump_dev_state(
int *bl)
{
*bl += sprintf(b + *bl, "Status: ");
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
*bl += sprintf(b + *bl, "ACTIVATED");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
+ else
*bl += sprintf(b + *bl, "DEACTIVATED");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- *bl += sprintf(b + *bl, "SHUTDOWN");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- *bl += sprintf(b + *bl, "OFFLINE");
- break;
- default:
- *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
- break;
- }
*bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
- dev->se_sub_dev->se_dev_attrib.block_size,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ dev->dev_attrib.block_size,
+ dev->dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -991,186 +970,8 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
-static void core_setup_task_attr_emulation(struct se_device *dev)
-{
- /*
- * If this device is from Target_Core_Mod/pSCSI, disable the
- * SAM Task Attribute emulation.
- *
- * This is currently not available in upsream Linux/SCSI Target
- * mode code, and is assumed to be disabled while using TCM/pSCSI.
- */
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
- return;
- }
-
- dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", dev->transport->name,
- dev->transport->get_device_rev(dev));
-}
-
-static void scsi_dump_inquiry(struct se_device *dev)
-{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
- char buf[17];
- int i, device_type;
- /*
- * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
- */
- for (i = 0; i < 8; i++)
- if (wwn->vendor[i] >= 0x20)
- buf[i] = wwn->vendor[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Vendor: %s\n", buf);
-
- for (i = 0; i < 16; i++)
- if (wwn->model[i] >= 0x20)
- buf[i] = wwn->model[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Model: %s\n", buf);
-
- for (i = 0; i < 4; i++)
- if (wwn->revision[i] >= 0x20)
- buf[i] = wwn->revision[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Revision: %s\n", buf);
-
- device_type = dev->transport->get_device_type(dev);
- pr_debug(" Type: %s ", scsi_device_type(device_type));
- pr_debug(" ANSI SCSI revision: %02x\n",
- dev->transport->get_device_rev(dev));
-}
-
-struct se_device *transport_add_device_to_core_hba(
- struct se_hba *hba,
- struct se_subsystem_api *transport,
- struct se_subsystem_dev *se_dev,
- u32 device_flags,
- void *transport_dev,
- struct se_dev_limits *dev_limits,
- const char *inquiry_prod,
- const char *inquiry_rev)
-{
- int force_pt;
- struct se_device *dev;
-
- dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!dev) {
- pr_err("Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
-
- dev->dev_flags = device_flags;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = transport_dev;
- dev->se_hba = hba;
- dev->se_sub_dev = se_dev;
- dev->transport = transport;
- INIT_LIST_HEAD(&dev->dev_list);
- INIT_LIST_HEAD(&dev->dev_sep_list);
- INIT_LIST_HEAD(&dev->dev_tmr_list);
- INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->state_list);
- INIT_LIST_HEAD(&dev->qf_cmd_list);
- spin_lock_init(&dev->execute_task_lock);
- spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->dev_reservation_lock);
- spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->se_port_lock);
- spin_lock_init(&dev->se_tmr_lock);
- spin_lock_init(&dev->qf_cmd_lock);
- atomic_set(&dev->dev_ordered_id, 0);
-
- se_dev_set_default_attribs(dev, dev_limits);
-
- dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
- dev->creation_time = get_jiffies_64();
- spin_lock_init(&dev->stats_lock);
-
- spin_lock(&hba->device_lock);
- list_add_tail(&dev->dev_list, &hba->hba_dev_list);
- hba->dev_count++;
- spin_unlock(&hba->device_lock);
- /*
- * Setup the SAM Task Attribute emulation for struct se_device
- */
- core_setup_task_attr_emulation(dev);
- /*
- * Force PR and ALUA passthrough emulation with internal object use.
- */
- force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
- /*
- * Setup the Reservations infrastructure for struct se_device
- */
- core_setup_reservations(dev, force_pt);
- /*
- * Setup the Asymmetric Logical Unit Assignment for struct se_device
- */
- if (core_setup_alua(dev, force_pt) < 0)
- goto err_dev_list;
-
- /*
- * Startup the struct se_device processing thread
- */
- dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
- dev->transport->name);
- if (!dev->tmr_wq) {
- pr_err("Unable to create tmr workqueue for %s\n",
- dev->transport->name);
- goto err_dev_list;
- }
- /*
- * Setup work_queue for QUEUE_FULL
- */
- INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- /*
- * Preload the initial INQUIRY const values if we are doing
- * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
- * passthrough because this is being provided by the backend LLD.
- * This is required so that transport_get_inquiry() copies these
- * originals once back into DEV_T10_WWN(dev) for the virtual device
- * setup.
- */
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!inquiry_prod || !inquiry_rev) {
- pr_err("All non TCM/pSCSI plugins require"
- " INQUIRY consts\n");
- goto err_wq;
- }
-
- strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
- strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
- strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
- }
- scsi_dump_inquiry(dev);
-
- return dev;
-
-err_wq:
- destroy_workqueue(dev->tmr_wq);
-err_dev_list:
- spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
- spin_unlock(&hba->device_lock);
-
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(transport_add_device_to_core_hba);
-
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
struct se_device *dev = cmd->se_dev;
@@ -1185,18 +986,18 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->data_direction == DMA_TO_DEVICE) {
pr_err("Rejecting underflow/overflow"
" WRITE data\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_CDB.
*/
- if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
+ if (dev->dev_attrib.block_size != 512) {
pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* For the overflow case keep the existing fabric provided
@@ -1216,10 +1017,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
return 0;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
/*
@@ -1259,45 +1056,41 @@ void transport_init_se_cmd(
}
EXPORT_SYMBOL(transport_init_se_cmd);
-static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+
/*
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
+ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
smp_mb__after_atomic_inc();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- cmd->se_dev->transport->name);
+ dev->transport->name);
return 0;
}
-/* target_setup_cmd_from_cdb():
- *
- * Called from fabric RX Thread.
- */
-int target_setup_cmd_from_cdb(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
- u32 pr_reg_type = 0;
- u8 alua_ascq = 0;
+ struct se_device *dev = cmd->se_dev;
unsigned long flags;
- int ret;
+ sense_reason_t ret;
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
@@ -1307,9 +1100,7 @@ int target_setup_cmd_from_cdb(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1324,10 +1115,7 @@ int target_setup_cmd_from_cdb(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_OUT_OF_RESOURCES;
}
} else
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@@ -1339,70 +1127,30 @@ int target_setup_cmd_from_cdb(
/*
* Check for an existing UNIT ATTENTION condition
*/
- if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -EINVAL;
- }
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ return ret;
- ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
- if (ret != 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- if (ret > 0) {
- pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-
- transport_set_sense_codes(cmd, 0x04, alua_ascq);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -EINVAL;
- }
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ return ret;
- /*
- * Check status for SPC-3 Persistent Reservations
- */
- if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
- if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EBUSY;
- }
- /*
- * This means the CDB is allowed for the SCSI Initiator port
- * when said port is *NOT* holding the legacy SPC-2 or
- * SPC-3 Persistent Reservation.
- */
- }
+ ret = target_check_reservation(cmd);
+ if (ret)
+ return ret;
- ret = cmd->se_dev->transport->parse_cdb(cmd);
- if (ret < 0)
+ ret = dev->transport->parse_cdb(cmd);
+ if (ret)
+ return ret;
+
+ ret = transport_check_alloc_task_attr(cmd);
+ if (ret)
return ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- /*
- * Check for SAM Task Attribute Emulation
- */
- if (transport_check_alloc_task_attr(cmd) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
@@ -1418,7 +1166,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
- int ret;
+ sense_reason_t ret;
if (!cmd->se_lun) {
dump_stack();
@@ -1448,13 +1196,41 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0)
- transport_generic_request_failure(cmd);
-
+ if (ret)
+ transport_generic_request_failure(cmd, ret);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
+static sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+ if (!sgl || !sgl_count)
+ return 0;
+
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
+
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
+ }
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ return 0;
+}
+
/*
* target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
* se_cmd + use pre-allocated SGL memory.
@@ -1487,7 +1263,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
struct se_portal_group *se_tpg;
- int rc;
+ sense_reason_t rc;
+ int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@@ -1508,9 +1285,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
- if (rc)
- return rc;
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret)
+ return ret;
/*
* Signal bidirectional data payloads to target-core
*/
@@ -1519,16 +1296,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
- if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+ if (rc) {
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
- transport_generic_request_failure(se_cmd);
+ transport_generic_request_failure(se_cmd, rc);
return 0;
}
/*
@@ -1563,7 +1340,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
sgl_bidi, sgl_bidi_count);
if (rc != 0) {
- transport_generic_request_failure(se_cmd);
+ transport_generic_request_failure(se_cmd, rc);
return 0;
}
}
@@ -1616,6 +1393,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+ transport_cmd_check_stop_to_fabric(se_cmd);
}
/**
@@ -1709,16 +1488,17 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd,
+ sense_reason_t sense_reason)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->scsi_sense_reason);
+ cmd->t_state, sense_reason);
pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
@@ -1727,10 +1507,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
- switch (cmd->scsi_sense_reason) {
+ switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
@@ -1743,6 +1522,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
break;
+ case TCM_OUT_OF_RESOURCES:
+ sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ break;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1759,7 +1541,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
@@ -1770,13 +1552,12 @@ void transport_generic_request_failure(struct se_cmd *cmd)
goto check_stop;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0], cmd->scsi_sense_reason);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->t_task_cdb[0], sense_reason);
+ sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -1794,69 +1575,30 @@ EXPORT_SYMBOL(transport_generic_request_failure);
static void __target_execute_cmd(struct se_cmd *cmd)
{
- int error = 0;
+ sense_reason_t ret;
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock);
- if (cmd->execute_cmd)
- error = cmd->execute_cmd(cmd);
-
- if (error) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
- spin_unlock_irq(&cmd->t_state_lock);
+ if (cmd->execute_cmd) {
+ ret = cmd->execute_cmd(cmd);
+ if (ret) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
- transport_generic_request_failure(cmd);
+ transport_generic_request_failure(cmd, ret);
+ }
}
}
-void target_execute_cmd(struct se_cmd *cmd)
+static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- /*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1)) {
- complete(&cmd->t_transport_stop_comp);
- return;
- }
-
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
- /*
- * Determine if frontend context caller is requesting the stopping of
- * this command for frontend exceptions.
- */
- if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->t_transport_stop_comp);
- return;
- }
-
- cmd->t_state = TRANSPORT_PROCESSING;
- spin_unlock_irq(&cmd->t_state_lock);
-
- if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
- goto execute;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return false;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
@@ -1867,7 +1609,7 @@ void target_execute_cmd(struct se_cmd *cmd)
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
"se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
- goto execute;
+ return false;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -1881,7 +1623,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* exist that need to be completed first.
*/
if (!atomic_read(&dev->simple_cmds))
- goto execute;
+ return false;
break;
default:
/*
@@ -1892,23 +1634,64 @@ void target_execute_cmd(struct se_cmd *cmd)
break;
}
- if (atomic_read(&dev->dev_ordered_sync) != 0) {
- spin_lock(&dev->delayed_cmd_lock);
- list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
- spin_unlock(&dev->delayed_cmd_lock);
+ if (atomic_read(&dev->dev_ordered_sync) == 0)
+ return false;
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
+ spin_lock(&dev->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+ spin_unlock(&dev->delayed_cmd_lock);
+
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
+ return true;
+}
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+ /*
+ * If the received CDB has aleady been aborted stop processing it here.
+ */
+ if (transport_check_aborted_status(cmd, 1)) {
+ complete(&cmd->transport_lun_stop_comp);
return;
}
-execute:
/*
- * Otherwise, no ORDERED task attributes exist..
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
+ */
+ spin_lock_irq(&cmd->t_state_lock);
+ if (cmd->transport_state & CMD_T_LUN_STOP) {
+ pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->transport_lun_stop_comp);
+ return;
+ }
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
*/
- __target_execute_cmd(cmd);
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+ cmd->se_tfo->get_task_tag(cmd));
+
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->t_transport_stop_comp);
+ return;
+ }
+
+ cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ if (!target_handle_task_attr(cmd))
+ __target_execute_cmd(cmd);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1947,6 +1730,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return;
+
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
@@ -1975,8 +1761,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
ret = cmd->se_tfo->queue_status(cmd);
@@ -2034,8 +1819,8 @@ static void target_complete_ok_work(struct work_struct *work)
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
+
/*
* Check to schedule QUEUE_FULL work, or execute an existing
* cmd->transport_qf_callback()
@@ -2183,9 +1968,10 @@ static void transport_put_cmd(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_fe_count)) {
- if (!atomic_dec_and_test(&cmd->t_fe_count))
- goto out_busy;
+ if (atomic_read(&cmd->t_fe_count) &&
+ !atomic_dec_and_test(&cmd->t_fe_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
}
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -2197,57 +1983,8 @@ static void transport_put_cmd(struct se_cmd *cmd)
transport_free_pages(cmd);
transport_release_cmd(cmd);
return;
-out_busy:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-/*
- * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
- * allocating in the core.
- * @cmd: Associated se_cmd descriptor
- * @mem: SGL style memory for TCM WRITE / READ
- * @sg_mem_num: Number of SGL elements
- * @mem_bidi_in: SGL style memory for TCM BIDI READ
- * @sg_mem_bidi_num: Number of BIDI READ SGL elements
- *
- * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
- * of parameters.
- */
-int transport_generic_map_mem_to_cmd(
- struct se_cmd *cmd,
- struct scatterlist *sgl,
- u32 sgl_count,
- struct scatterlist *sgl_bidi,
- u32 sgl_bidi_count)
-{
- if (!sgl || !sgl_count)
- return 0;
-
- /*
- * Reject SCSI data overflow with map_mem_to_cmd() as incoming
- * scatterlists already have been set to follow what the fabric
- * passes for the original expected data transfer length.
- */
- if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- pr_warn("Rejecting SCSI DATA overflow for fabric using"
- " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- cmd->t_data_sg = sgl;
- cmd->t_data_nents = sgl_count;
-
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2268,10 +2005,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
/* >1 page. use vmap */
pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
- if (!pages) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (!pages)
return NULL;
- }
/* convert sg[] to pages[] */
for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@@ -2280,10 +2015,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
kfree(pages);
- if (!cmd->t_data_vmap) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (!cmd->t_data_vmap)
return NULL;
- }
return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
@@ -2349,7 +2082,8 @@ out:
* might not have the payload yet, so notify the fabric via a call to
* ->write_pending instead. Otherwise place it on the execution queue.
*/
-int transport_generic_new_cmd(struct se_cmd *cmd)
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
@@ -2362,7 +2096,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- goto out_fail;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
atomic_inc(&cmd->t_fe_count);
@@ -2388,14 +2122,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
- if (ret < 0)
- return ret;
- return 1;
+ /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+ WARN_ON(ret);
+
+ return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-out_fail:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
@@ -2839,21 +2570,9 @@ static int transport_get_sense_codes(
return 0;
}
-static int transport_set_sense_codes(
- struct se_cmd *cmd,
- u8 asc,
- u8 ascq)
-{
- cmd->scsi_asc = asc;
- cmd->scsi_ascq = ascq;
-
- return 0;
-}
-
-int transport_send_check_condition_and_sense(
- struct se_cmd *cmd,
- u8 reason,
- int from_transport)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ sense_reason_t reason, int from_transport)
{
unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;
@@ -2878,6 +2597,16 @@ int transport_send_check_condition_and_sense(
* SENSE KEY values from include/scsi/scsi.h
*/
switch (reason) {
+ case TCM_NO_SENSE:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* Not Ready */
+ buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ /* NO ADDITIONAL SENSE INFORMATION */
+ buffer[SPC_ASC_KEY_OFFSET] = 0;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0;
+ break;
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[0] = 0x70;
@@ -3024,7 +2753,7 @@ int transport_send_check_condition_and_sense(
/* ILLEGAL REQUEST */
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x80;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x08;
break;
}
/*
@@ -3044,23 +2773,19 @@ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
- int ret = 0;
+ if (!(cmd->transport_state & CMD_T_ABORTED))
+ return 0;
- if (cmd->transport_state & CMD_T_ABORTED) {
- if (!send_status ||
- (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
- return 1;
+ if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
- " status for CDB: 0x%02x ITT: 0x%08x\n",
- cmd->t_task_cdb[0],
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+ cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- cmd->se_tfo->queue_status(cmd);
- ret = 1;
- }
- return ret;
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->se_tfo->queue_status(cmd);
+
+ return 1;
}
EXPORT_SYMBOL(transport_check_aborted_status);
@@ -3089,6 +2814,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ transport_lun_remove_cmd(cmd);
+
pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
" ITT: 0x%08x\n", cmd->t_task_cdb[0],
cmd->se_tfo->get_task_tag(cmd));
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 6666a0c74f60..bf0e390ce2d7 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -3,8 +3,7 @@
*
* This file contains logic for SPC-3 Unit Attention emulation
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -38,9 +37,8 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-int core_scsi3_ua_check(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
@@ -71,16 +69,14 @@ int core_scsi3_ua_check(
* was received, then the device server shall process the command
* and either:
*/
- switch (cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
- return -EINVAL;
+ return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
-
- return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -237,7 +233,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -265,8 +261,8 @@ void core_scsi3_ua_for_check_condition(
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 6e6b03460a1a..0204952fe4d3 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache;
-extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 9585010964ec..6659dd36e806 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -355,11 +355,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
tport = ft_tport_create(rdata->local_port);
if (!tport)
- return 0; /* not a target for this local port */
+ goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
- return 0;
+ goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
@@ -396,12 +396,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
/*
* OR in our service parameters with other provider (initiator), if any.
- * TBD XXX - indicate RETRY capability?
*/
fill:
fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
+
+not_target:
+ fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ spp->spp_params = htonl(fcp_parm);
+ return 0;
}
/**
@@ -430,7 +436,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
{
struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
- transport_deregister_session(sess->se_sess);
kfree(sess);
}
@@ -438,6 +443,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+ transport_deregister_session(sess->se_sess);
call_rcu(&sess->rcu, ft_sess_rcu_free);
}
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index 7772d1603769..bada1308318b 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -832,7 +832,7 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
return (struct exynos_tmu_platform_data *)
platform_get_device_id(pdev)->driver_data;
}
-static int __devinit exynos_tmu_probe(struct platform_device *pdev)
+static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data;
@@ -866,11 +866,9 @@ static int __devinit exynos_tmu_probe(struct platform_device *pdev)
return -ENOENT;
}
- data->base = devm_request_and_ioremap(&pdev->dev, data->mem);
- if (!data->base) {
- dev_err(&pdev->dev, "Failed to ioremap memory\n");
- return -ENODEV;
- }
+ data->base = devm_ioremap_resource(&pdev->dev, data->mem);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
IRQF_TRIGGER_RISING, "exynos-tmu", data);
@@ -937,7 +935,7 @@ err_clk:
return ret;
}
-static int __devexit exynos_tmu_remove(struct platform_device *pdev)
+static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
@@ -985,7 +983,7 @@ static struct platform_driver exynos_tmu_driver = {
.of_match_table = exynos_tmu_match,
},
.probe = exynos_tmu_probe,
- .remove = __devexit_p(exynos_tmu_remove),
+ .remove = exynos_tmu_remove,
.id_table = exynos_tmu_driver_ids,
};
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 0ecf22b6a38e..978db344bda0 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -1,3 +1,14 @@
+config TTY
+ bool "Enable TTY" if EXPERT
+ default y
+ ---help---
+ Allows you to remove TTY support which can save space, and
+ blocks features that require TTY from inclusion in the kernel.
+ TTY is required for any text terminals or serial port
+ communication. Most users should leave this enabled.
+
+if TTY
+
config VT
bool "Virtual terminal" if EXPERT
depends on !S390 && !UML
@@ -388,3 +399,24 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
If the number you specify is not a valid byte channel handle, then
there simply will be no early console output. This is true also
if you don't boot under a hypervisor at all.
+
+config GOLDFISH_TTY
+ tristate "Goldfish TTY Driver"
+ depends on GOLDFISH
+ help
+ Console and system TTY driver for the Goldfish virtual platform.
+
+config DA_TTY
+ bool "DA TTY"
+ depends on METAG_DA
+ select SERIAL_NONSTANDARD
+ help
+ This enables a TTY on a Dash channel.
+
+config DA_CONSOLE
+ bool "DA Console"
+ depends on DA_TTY
+ help
+ This enables a console on a Dash channel.
+
+endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 2953059530e4..6b78399bc7c9 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,4 +1,4 @@
-obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
+obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
@@ -27,5 +27,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
+obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
+obj-$(CONFIG_DA_TTY) += metag_da.o
obj-y += ipwireless/
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 9d7d00cdfecb..fc700342d43f 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -251,7 +251,6 @@ static void receive_chars(struct serial_state *info)
{
int status;
int serdatr;
- struct tty_struct *tty = info->tport.tty;
unsigned char ch, flag;
struct async_icount *icount;
int oe = 0;
@@ -314,7 +313,7 @@ static void receive_chars(struct serial_state *info)
#endif
flag = TTY_BREAK;
if (info->tport.flags & ASYNC_SAK)
- do_SAK(tty);
+ do_SAK(info->tport.tty);
} else if (status & UART_LSR_PE)
flag = TTY_PARITY;
else if (status & UART_LSR_FE)
@@ -328,10 +327,10 @@ static void receive_chars(struct serial_state *info)
oe = 1;
}
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&info->tport, ch, flag);
if (oe == 1)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(&info->tport);
out:
return;
}
@@ -394,11 +393,6 @@ static void check_modem_status(struct serial_state *info)
icount->dsr++;
if (dstatus & SER_DCD) {
icount->dcd++;
-#ifdef CONFIG_HARD_PPS
- if ((port->flags & ASYNC_HARDPPS_CD) &&
- !(status & SER_DCD))
- hardpps();
-#endif
}
if (dstatus & SER_CTS)
icount->cts++;
@@ -1099,7 +1093,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
state->custom_divisor = new_serial.custom_divisor;
port->close_delay = new_serial.close_delay * HZ/100;
port->closing_wait = new_serial.closing_wait * HZ/100;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (port->flags & ASYNC_INITIALIZED) {
@@ -1528,7 +1522,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
if (serial_paranoia_check(info, tty->name, "rs_open"))
return -ENODEV;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
retval = startup(tty, info);
if (retval) {
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 1cfcdbf1d0cc..a93a424873fa 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -95,18 +95,16 @@ bfin_jc_emudat_manager(void *arg)
/* if incoming data is ready, eat it */
if (bfin_read_DBGSTAT() & EMUDIF) {
- if (tty != NULL) {
- uint32_t emudat = bfin_read_emudat();
- if (inbound_len == 0) {
- pr_debug("incoming length: 0x%08x\n", emudat);
- inbound_len = emudat;
- } else {
- size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
- inbound_len -= num_chars;
- tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
- tty_flip_buffer_push(tty);
- }
+ uint32_t emudat = bfin_read_emudat();
+ if (inbound_len == 0) {
+ pr_debug("incoming length: 0x%08x\n", emudat);
+ inbound_len = emudat;
+ } else {
+ size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ inbound_len -= num_chars;
+ tty_insert_flip_string(&port, (unsigned char *)&emudat, num_chars);
+ tty_flip_buffer_push(&port);
}
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index b09c8d1f9a66..345bd0e0884e 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -441,7 +441,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
- struct tty_struct *tty;
+ struct tty_port *port;
int len, index = cinfo->bus_index;
u8 ivr, save_xir, channel, save_car, data, char_count;
@@ -452,22 +452,11 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
save_xir = readb(base_addr + (CyRIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
+ port = &info->port;
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
- tty = tty_port_tty_get(&info->port);
- /* if there is nowhere to put the data, discard it */
- if (tty == NULL) {
- if (ivr == CyIVRRxEx) { /* exception */
- data = cyy_readb(info, CyRDSR);
- } else { /* normal character reception */
- char_count = cyy_readb(info, CyRDCR);
- while (char_count--)
- data = cyy_readb(info, CyRDSR);
- }
- goto end;
- }
/* there is an open port for this data */
if (ivr == CyIVRRxEx) { /* exception */
data = cyy_readb(info, CyRDSR);
@@ -484,40 +473,45 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
if (data & info->ignore_status_mask) {
info->icount.rx++;
- tty_kref_put(tty);
return;
}
- if (tty_buffer_request_room(tty, 1)) {
+ if (tty_buffer_request_room(port, 1)) {
if (data & info->read_status_mask) {
if (data & CyBREAK) {
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_BREAK);
info->icount.rx++;
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
+ if (port->flags & ASYNC_SAK) {
+ struct tty_struct *tty =
+ tty_port_tty_get(port);
+ if (tty) {
+ do_SAK(tty);
+ tty_kref_put(tty);
+ }
+ }
} else if (data & CyFRAME) {
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
} else if (data & CyPARITY) {
/* Pieces of seven... */
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
} else if (data & CyOVERRUN) {
- tty_insert_flip_char(tty, 0,
+ tty_insert_flip_char(port, 0,
TTY_OVERRUN);
info->icount.rx++;
/* If the flip buffer itself is
overflowing, we still lose
the next incoming character.
*/
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
@@ -527,12 +521,12 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
/* } else if(data & CyTIMEOUT) { */
/* } else if(data & CySPECHAR) { */
} else {
- tty_insert_flip_char(tty, 0,
+ tty_insert_flip_char(port, 0,
TTY_NORMAL);
info->icount.rx++;
}
} else {
- tty_insert_flip_char(tty, 0, TTY_NORMAL);
+ tty_insert_flip_char(port, 0, TTY_NORMAL);
info->icount.rx++;
}
} else {
@@ -552,10 +546,10 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- len = tty_buffer_request_room(tty, char_count);
+ len = tty_buffer_request_room(port, char_count);
while (len--) {
data = cyy_readb(info, CyRDSR);
- tty_insert_flip_char(tty, data, TTY_NORMAL);
+ tty_insert_flip_char(port, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
#ifdef CY_16Y_HACK
@@ -564,9 +558,8 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
}
info->idle_stats.recv_idle = jiffies;
}
- tty_schedule_flip(tty);
- tty_kref_put(tty);
-end:
+ tty_schedule_flip(port);
+
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
@@ -924,10 +917,11 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
return 0;
} /* cyz_issue_cmd */
-static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
+static void cyz_handle_rx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
+ struct tty_port *port = &info->port;
unsigned int char_count;
int len;
#ifdef BLOCKMOVE
@@ -946,80 +940,77 @@ static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
else
char_count = rx_put - rx_get + rx_bufsize;
- if (char_count) {
+ if (!char_count)
+ return;
+
#ifdef CY_ENABLE_MONITORING
- info->mon.int_count++;
- info->mon.char_count += char_count;
- if (char_count > info->mon.char_max)
- info->mon.char_max = char_count;
- info->mon.char_last = char_count;
+ info->mon.int_count++;
+ info->mon.char_count += char_count;
+ if (char_count > info->mon.char_max)
+ info->mon.char_max = char_count;
+ info->mon.char_last = char_count;
#endif
- if (tty == NULL) {
- /* flush received characters */
- new_rx_get = (new_rx_get + char_count) &
- (rx_bufsize - 1);
- info->rflush_count++;
- } else {
+
#ifdef BLOCKMOVE
- /* we'd like to use memcpy(t, f, n) and memset(s, c, count)
- for performance, but because of buffer boundaries, there
- may be several steps to the operation */
- while (1) {
- len = tty_prepare_flip_string(tty, &buf,
- char_count);
- if (!len)
- break;
+ /* we'd like to use memcpy(t, f, n) and memset(s, c, count)
+ for performance, but because of buffer boundaries, there
+ may be several steps to the operation */
+ while (1) {
+ len = tty_prepare_flip_string(port, &buf,
+ char_count);
+ if (!len)
+ break;
- len = min_t(unsigned int, min(len, char_count),
- rx_bufsize - new_rx_get);
+ len = min_t(unsigned int, min(len, char_count),
+ rx_bufsize - new_rx_get);
- memcpy_fromio(buf, cinfo->base_addr +
- rx_bufaddr + new_rx_get, len);
+ memcpy_fromio(buf, cinfo->base_addr +
+ rx_bufaddr + new_rx_get, len);
- new_rx_get = (new_rx_get + len) &
- (rx_bufsize - 1);
- char_count -= len;
- info->icount.rx += len;
- info->idle_stats.recv_bytes += len;
- }
+ new_rx_get = (new_rx_get + len) &
+ (rx_bufsize - 1);
+ char_count -= len;
+ info->icount.rx += len;
+ info->idle_stats.recv_bytes += len;
+ }
#else
- len = tty_buffer_request_room(tty, char_count);
- while (len--) {
- data = readb(cinfo->base_addr + rx_bufaddr +
- new_rx_get);
- new_rx_get = (new_rx_get + 1) &
- (rx_bufsize - 1);
- tty_insert_flip_char(tty, data, TTY_NORMAL);
- info->idle_stats.recv_bytes++;
- info->icount.rx++;
- }
+ len = tty_buffer_request_room(port, char_count);
+ while (len--) {
+ data = readb(cinfo->base_addr + rx_bufaddr +
+ new_rx_get);
+ new_rx_get = (new_rx_get + 1) &
+ (rx_bufsize - 1);
+ tty_insert_flip_char(port, data, TTY_NORMAL);
+ info->idle_stats.recv_bytes++;
+ info->icount.rx++;
+ }
#endif
#ifdef CONFIG_CYZ_INTR
- /* Recalculate the number of chars in the RX buffer and issue
- a cmd in case it's higher than the RX high water mark */
- rx_put = readl(&buf_ctrl->rx_put);
- if (rx_put >= rx_get)
- char_count = rx_put - rx_get;
- else
- char_count = rx_put - rx_get + rx_bufsize;
- if (char_count >= readl(&buf_ctrl->rx_threshold) &&
- !timer_pending(&cyz_rx_full_timer[
- info->line]))
- mod_timer(&cyz_rx_full_timer[info->line],
- jiffies + 1);
+ /* Recalculate the number of chars in the RX buffer and issue
+ a cmd in case it's higher than the RX high water mark */
+ rx_put = readl(&buf_ctrl->rx_put);
+ if (rx_put >= rx_get)
+ char_count = rx_put - rx_get;
+ else
+ char_count = rx_put - rx_get + rx_bufsize;
+ if (char_count >= readl(&buf_ctrl->rx_threshold) &&
+ !timer_pending(&cyz_rx_full_timer[
+ info->line]))
+ mod_timer(&cyz_rx_full_timer[info->line],
+ jiffies + 1);
#endif
- info->idle_stats.recv_idle = jiffies;
- tty_schedule_flip(tty);
- }
- /* Update rx_get */
- cy_writel(&buf_ctrl->rx_get, new_rx_get);
- }
+ info->idle_stats.recv_idle = jiffies;
+ tty_schedule_flip(&info->port);
+
+ /* Update rx_get */
+ cy_writel(&buf_ctrl->rx_get, new_rx_get);
}
-static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
+static void cyz_handle_tx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
+ struct tty_struct *tty;
u8 data;
unsigned int char_count;
#ifdef BLOCKMOVE
@@ -1039,63 +1030,63 @@ static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
else
char_count = tx_get - tx_put - 1;
- if (char_count) {
-
- if (tty == NULL)
- goto ztxdone;
+ if (!char_count)
+ return;
+
+ tty = tty_port_tty_get(&info->port);
+ if (tty == NULL)
+ goto ztxdone;
- if (info->x_char) { /* send special char */
- data = info->x_char;
+ if (info->x_char) { /* send special char */
+ data = info->x_char;
- cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
- tx_put = (tx_put + 1) & (tx_bufsize - 1);
- info->x_char = 0;
- char_count--;
- info->icount.tx++;
- }
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ info->x_char = 0;
+ char_count--;
+ info->icount.tx++;
+ }
#ifdef BLOCKMOVE
- while (0 < (small_count = min_t(unsigned int,
- tx_bufsize - tx_put, min_t(unsigned int,
- (SERIAL_XMIT_SIZE - info->xmit_tail),
- min_t(unsigned int, info->xmit_cnt,
- char_count))))) {
-
- memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr +
- tx_put),
- &info->port.xmit_buf[info->xmit_tail],
- small_count);
-
- tx_put = (tx_put + small_count) & (tx_bufsize - 1);
- char_count -= small_count;
- info->icount.tx += small_count;
- info->xmit_cnt -= small_count;
- info->xmit_tail = (info->xmit_tail + small_count) &
- (SERIAL_XMIT_SIZE - 1);
- }
+ while (0 < (small_count = min_t(unsigned int,
+ tx_bufsize - tx_put, min_t(unsigned int,
+ (SERIAL_XMIT_SIZE - info->xmit_tail),
+ min_t(unsigned int, info->xmit_cnt,
+ char_count))))) {
+
+ memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + tx_put),
+ &info->port.xmit_buf[info->xmit_tail],
+ small_count);
+
+ tx_put = (tx_put + small_count) & (tx_bufsize - 1);
+ char_count -= small_count;
+ info->icount.tx += small_count;
+ info->xmit_cnt -= small_count;
+ info->xmit_tail = (info->xmit_tail + small_count) &
+ (SERIAL_XMIT_SIZE - 1);
+ }
#else
- while (info->xmit_cnt && char_count) {
- data = info->port.xmit_buf[info->xmit_tail];
- info->xmit_cnt--;
- info->xmit_tail = (info->xmit_tail + 1) &
- (SERIAL_XMIT_SIZE - 1);
-
- cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
- tx_put = (tx_put + 1) & (tx_bufsize - 1);
- char_count--;
- info->icount.tx++;
- }
+ while (info->xmit_cnt && char_count) {
+ data = info->port.xmit_buf[info->xmit_tail];
+ info->xmit_cnt--;
+ info->xmit_tail = (info->xmit_tail + 1) &
+ (SERIAL_XMIT_SIZE - 1);
+
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ char_count--;
+ info->icount.tx++;
+ }
#endif
- tty_wakeup(tty);
+ tty_wakeup(tty);
+ tty_kref_put(tty);
ztxdone:
- /* Update tx_put */
- cy_writel(&buf_ctrl->tx_put, tx_put);
- }
+ /* Update tx_put */
+ cy_writel(&buf_ctrl->tx_put, tx_put);
}
static void cyz_handle_cmd(struct cyclades_card *cinfo)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
- struct tty_struct *tty;
struct cyclades_port *info;
__u32 channel, param, fw_ver;
__u8 cmd;
@@ -1108,23 +1099,20 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
special_count = 0;
delta_count = 0;
info = &cinfo->ports[channel];
- tty = tty_port_tty_get(&info->port);
- if (tty == NULL)
- continue;
switch (cmd) {
case C_CM_PR_ERROR:
- tty_insert_flip_char(tty, 0, TTY_PARITY);
+ tty_insert_flip_char(&info->port, 0, TTY_PARITY);
info->icount.rx++;
special_count++;
break;
case C_CM_FR_ERROR:
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(&info->port, 0, TTY_FRAME);
info->icount.rx++;
special_count++;
break;
case C_CM_RXBRK:
- tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
info->icount.rx++;
special_count++;
break;
@@ -1136,8 +1124,14 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
readl(&info->u.cyz.ch_ctrl->rs_status);
if (dcd & C_RS_DCD)
wake_up_interruptible(&info->port.open_wait);
- else
- tty_hangup(tty);
+ else {
+ struct tty_struct *tty;
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
}
break;
case C_CM_MCTS:
@@ -1166,7 +1160,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_rx(info, tty);
+ cyz_handle_rx(info);
break;
case C_CM_TXBEMPTY:
case C_CM_TXLOWWM:
@@ -1176,7 +1170,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_tx(info, tty);
+ cyz_handle_tx(info);
break;
#endif /* CONFIG_CYZ_INTR */
case C_CM_FATAL:
@@ -1188,8 +1182,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_schedule_flip(&info->port);
}
}
@@ -1255,17 +1248,11 @@ static void cyz_poll(unsigned long arg)
cyz_handle_cmd(cinfo);
for (port = 0; port < cinfo->nports; port++) {
- struct tty_struct *tty;
-
info = &cinfo->ports[port];
- tty = tty_port_tty_get(&info->port);
- /* OK to pass NULL to the handle functions below.
- They need to drop the data in that case. */
if (!info->throttle)
- cyz_handle_rx(info, tty);
- cyz_handle_tx(info, tty);
- tty_kref_put(tty);
+ cyz_handle_rx(info);
+ cyz_handle_tx(info);
}
/* poll every 'cyz_polling_cycle' period */
expires = jiffies + cyz_polling_cycle;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index c117d775a22f..ed92622b8949 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -371,22 +371,17 @@ console_initcall(ehv_bc_console_init);
static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
{
struct ehv_bc_data *bc = data;
- struct tty_struct *ttys = tty_port_tty_get(&bc->port);
unsigned int rx_count, tx_count, len;
int count;
char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
int ret;
- /* ttys could be NULL during a hangup */
- if (!ttys)
- return IRQ_HANDLED;
-
/* Find out how much data needs to be read, and then ask the TTY layer
* if it can handle that much. We want to ensure that every byte we
* read from the byte channel will be accepted by the TTY layer.
*/
ev_byte_channel_poll(bc->handle, &rx_count, &tx_count);
- count = tty_buffer_request_room(ttys, rx_count);
+ count = tty_buffer_request_room(&bc->port, rx_count);
/* 'count' is the maximum amount of data the TTY layer can accept at
* this time. However, during testing, I was never able to get 'count'
@@ -407,7 +402,7 @@ static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
*/
/* Pass the received data to the tty layer. */
- ret = tty_insert_flip_string(ttys, buffer, len);
+ ret = tty_insert_flip_string(&bc->port, buffer, len);
/* 'ret' is the number of bytes that the TTY layer accepted.
* If it's not equal to 'len', then it means the buffer is
@@ -422,9 +417,7 @@ static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
}
/* Tell the tty layer that we're done. */
- tty_flip_buffer_push(ttys);
-
- tty_kref_put(ttys);
+ tty_flip_buffer_push(&bc->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
new file mode 100644
index 000000000000..f17d2e4ee2ca
--- /dev/null
+++ b/drivers/tty/goldfish.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+enum {
+ GOLDFISH_TTY_PUT_CHAR = 0x00,
+ GOLDFISH_TTY_BYTES_READY = 0x04,
+ GOLDFISH_TTY_CMD = 0x08,
+
+ GOLDFISH_TTY_DATA_PTR = 0x10,
+ GOLDFISH_TTY_DATA_LEN = 0x14,
+
+ GOLDFISH_TTY_CMD_INT_DISABLE = 0,
+ GOLDFISH_TTY_CMD_INT_ENABLE = 1,
+ GOLDFISH_TTY_CMD_WRITE_BUFFER = 2,
+ GOLDFISH_TTY_CMD_READ_BUFFER = 3,
+};
+
+struct goldfish_tty {
+ struct tty_port port;
+ spinlock_t lock;
+ void __iomem *base;
+ u32 irq;
+ int opencount;
+ struct console console;
+};
+
+static DEFINE_MUTEX(goldfish_tty_lock);
+static struct tty_driver *goldfish_tty_driver;
+static u32 goldfish_tty_line_count = 8;
+static u32 goldfish_tty_current_line_count;
+static struct goldfish_tty *goldfish_ttys;
+
+static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
+{
+ unsigned long irq_flags;
+ struct goldfish_tty *qtty = &goldfish_ttys[line];
+ void __iomem *base = qtty->base;
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
+ writel(count, base + GOLDFISH_TTY_DATA_LEN);
+ writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+}
+
+static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
+ void __iomem *base = qtty->base;
+ unsigned long irq_flags;
+ unsigned char *buf;
+ u32 count;
+
+ count = readl(base + GOLDFISH_TTY_BYTES_READY);
+ if(count == 0)
+ return IRQ_NONE;
+
+ count = tty_prepare_flip_string(&qtty->port, &buf, count);
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
+ writel(count, base + GOLDFISH_TTY_DATA_LEN);
+ writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+ tty_schedule_flip(&qtty->port);
+ return IRQ_HANDLED;
+}
+
+static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
+ writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_CMD);
+ return 0;
+}
+
+static void goldfish_tty_shutdown(struct tty_port *port)
+{
+ struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_CMD);
+}
+
+static int goldfish_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
+ return tty_port_open(&qtty->port, tty, filp);
+}
+
+static void goldfish_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ tty_port_close(tty->port, tty, filp);
+}
+
+static void goldfish_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
+static int goldfish_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ goldfish_tty_do_write(tty->index, buf, count);
+ return count;
+}
+
+static int goldfish_tty_write_room(struct tty_struct *tty)
+{
+ return 0x10000;
+}
+
+static int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
+ void __iomem *base = qtty->base;
+ return readl(base + GOLDFISH_TTY_BYTES_READY);
+}
+
+static void goldfish_tty_console_write(struct console *co, const char *b, unsigned count)
+{
+ goldfish_tty_do_write(co->index, b, count);
+}
+
+static struct tty_driver *goldfish_tty_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return goldfish_tty_driver;
+}
+
+static int goldfish_tty_console_setup(struct console *co, char *options)
+{
+ if((unsigned)co->index > goldfish_tty_line_count)
+ return -ENODEV;
+ if(goldfish_ttys[co->index].base == 0)
+ return -ENODEV;
+ return 0;
+}
+
+static struct tty_port_operations goldfish_port_ops = {
+ .activate = goldfish_tty_activate,
+ .shutdown = goldfish_tty_shutdown
+};
+
+static struct tty_operations goldfish_tty_ops = {
+ .open = goldfish_tty_open,
+ .close = goldfish_tty_close,
+ .hangup = goldfish_tty_hangup,
+ .write = goldfish_tty_write,
+ .write_room = goldfish_tty_write_room,
+ .chars_in_buffer = goldfish_tty_chars_in_buffer,
+};
+
+static int goldfish_tty_create_driver(void)
+{
+ int ret;
+ struct tty_driver *tty;
+
+ goldfish_ttys = kzalloc(sizeof(*goldfish_ttys) * goldfish_tty_line_count, GFP_KERNEL);
+ if(goldfish_ttys == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_goldfish_ttys_failed;
+ }
+ tty = alloc_tty_driver(goldfish_tty_line_count);
+ if(tty == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ tty->driver_name = "goldfish";
+ tty->name = "ttyGF";
+ tty->type = TTY_DRIVER_TYPE_SERIAL;
+ tty->subtype = SERIAL_TYPE_NORMAL;
+ tty->init_termios = tty_std_termios;
+ tty->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(tty, &goldfish_tty_ops);
+ ret = tty_register_driver(tty);
+ if(ret)
+ goto err_tty_register_driver_failed;
+
+ goldfish_tty_driver = tty;
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(tty);
+err_alloc_tty_driver_failed:
+ kfree(goldfish_ttys);
+ goldfish_ttys = NULL;
+err_alloc_goldfish_ttys_failed:
+ return ret;
+}
+
+static void goldfish_tty_delete_driver(void)
+{
+ tty_unregister_driver(goldfish_tty_driver);
+ put_tty_driver(goldfish_tty_driver);
+ goldfish_tty_driver = NULL;
+ kfree(goldfish_ttys);
+ goldfish_ttys = NULL;
+}
+
+static int goldfish_tty_probe(struct platform_device *pdev)
+{
+ struct goldfish_tty *qtty;
+ int ret = -EINVAL;
+ int i;
+ struct resource *r;
+ struct device *ttydev;
+ void __iomem *base;
+ u32 irq;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if(r == NULL)
+ return -EINVAL;
+
+ base = ioremap(r->start, 0x1000);
+ if (base == NULL)
+ pr_err("goldfish_tty: unable to remap base\n");
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if(r == NULL)
+ goto err_unmap;
+
+ irq = r->start;
+
+ if(pdev->id >= goldfish_tty_line_count)
+ goto err_unmap;
+
+ mutex_lock(&goldfish_tty_lock);
+ if(goldfish_tty_current_line_count == 0) {
+ ret = goldfish_tty_create_driver();
+ if(ret)
+ goto err_create_driver_failed;
+ }
+ goldfish_tty_current_line_count++;
+
+ qtty = &goldfish_ttys[pdev->id];
+ spin_lock_init(&qtty->lock);
+ tty_port_init(&qtty->port);
+ qtty->port.ops = &goldfish_port_ops;
+ qtty->base = base;
+ qtty->irq = irq;
+
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
+
+ ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, "goldfish_tty", pdev);
+ if(ret)
+ goto err_request_irq_failed;
+
+
+ ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
+ pdev->id, &pdev->dev);
+ if(IS_ERR(ttydev)) {
+ ret = PTR_ERR(ttydev);
+ goto err_tty_register_device_failed;
+ }
+
+ strcpy(qtty->console.name, "ttyGF");
+ qtty->console.write = goldfish_tty_console_write;
+ qtty->console.device = goldfish_tty_console_device;
+ qtty->console.setup = goldfish_tty_console_setup;
+ qtty->console.flags = CON_PRINTBUFFER;
+ qtty->console.index = pdev->id;
+ register_console(&qtty->console);
+
+ mutex_unlock(&goldfish_tty_lock);
+ return 0;
+
+ tty_unregister_device(goldfish_tty_driver, i);
+err_tty_register_device_failed:
+ free_irq(irq, pdev);
+err_request_irq_failed:
+ goldfish_tty_current_line_count--;
+ if(goldfish_tty_current_line_count == 0)
+ goldfish_tty_delete_driver();
+err_create_driver_failed:
+ mutex_unlock(&goldfish_tty_lock);
+err_unmap:
+ iounmap(base);
+ return ret;
+}
+
+static int goldfish_tty_remove(struct platform_device *pdev)
+{
+ struct goldfish_tty *qtty;
+
+ mutex_lock(&goldfish_tty_lock);
+
+ qtty = &goldfish_ttys[pdev->id];
+ unregister_console(&qtty->console);
+ tty_unregister_device(goldfish_tty_driver, pdev->id);
+ iounmap(qtty->base);
+ qtty->base = 0;
+ free_irq(qtty->irq, pdev);
+ goldfish_tty_current_line_count--;
+ if(goldfish_tty_current_line_count == 0)
+ goldfish_tty_delete_driver();
+ mutex_unlock(&goldfish_tty_lock);
+ return 0;
+}
+
+static struct platform_driver goldfish_tty_platform_driver = {
+ .probe = goldfish_tty_probe,
+ .remove = goldfish_tty_remove,
+ .driver = {
+ .name = "goldfish_tty"
+ }
+};
+
+module_platform_driver(goldfish_tty_platform_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index f47b734c6a7a..8902f9b4df71 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -1,3 +1,5 @@
+if TTY
+
config HVC_DRIVER
bool
help
@@ -119,3 +121,4 @@ config HVCS
which will also be compiled when this driver is built as a
module.
+endif # TTY
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 13ee53bd0bf6..eb255e807c06 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -629,7 +629,7 @@ int hvc_poll(struct hvc_struct *hp)
/* Read data if any */
for (;;) {
- int count = tty_buffer_request_room(tty, N_INBUF);
+ int count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */
if (count == 0) {
@@ -672,7 +672,7 @@ int hvc_poll(struct hvc_struct *hp)
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
- tty_insert_flip_char(tty, buf[i], 0);
+ tty_insert_flip_char(&hp->port, buf[i], 0);
}
read_total += n;
@@ -691,7 +691,7 @@ int hvc_poll(struct hvc_struct *hp)
a minimum for performance. */
timeout = MIN_TIMEOUT;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hp->port);
}
tty_kref_put(tty);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 877635733952..1956593ee89d 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -609,11 +609,11 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
/* remove the read masks */
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
- if (tty_buffer_request_room(tty, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
+ if (tty_buffer_request_room(&hvcsd->port, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
got = hvc_get_chars(unit_address,
&buf[0],
HVCS_BUFF_LEN);
- tty_insert_flip_string(tty, buf, got);
+ tty_insert_flip_string(&hvcsd->port, buf, got);
}
/* Give the TTY time to process the data we just sent. */
@@ -623,7 +623,7 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch because tty->low_latency == 1 */
if(got)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hvcsd->port);
if (!got) {
/* Do this _after_ the flip_buffer_push */
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 68357a6e4de9..ef95a154854a 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -329,8 +329,7 @@ static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
}
}
-static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
- const char *buf, int len)
+static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
{
int i;
@@ -346,7 +345,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
continue;
}
#endif /* CONFIG_MAGIC_SYSRQ */
- tty_insert_flip_char(tty, c, 0);
+ tty_insert_flip_char(&hp->port, c, 0);
}
}
@@ -359,8 +358,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
* revisited.
*/
#define TTY_THRESHOLD_THROTTLE 128
-static bool hvsi_recv_data(struct hvsi_struct *hp, struct tty_struct *tty,
- const uint8_t *packet)
+static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet)
{
const struct hvsi_header *header = (const struct hvsi_header *)packet;
const uint8_t *data = packet + sizeof(struct hvsi_header);
@@ -377,7 +375,7 @@ static bool hvsi_recv_data(struct hvsi_struct *hp, struct tty_struct *tty,
datalen = TTY_THRESHOLD_THROTTLE;
}
- hvsi_insert_chars(hp, tty, data, datalen);
+ hvsi_insert_chars(hp, data, datalen);
if (overflow > 0) {
/*
@@ -438,9 +436,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
case VS_DATA_PACKET_HEADER:
if (!is_open(hp))
break;
- if (tty == NULL)
- break; /* no tty buffer to put data in */
- flip = hvsi_recv_data(hp, tty, packet);
+ flip = hvsi_recv_data(hp, packet);
break;
case VS_CONTROL_PACKET_HEADER:
hvsi_recv_control(hp, packet, tty, handshake);
@@ -469,17 +465,17 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
compact_inbuf(hp, packet);
if (flip)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hp->port);
return 1;
}
-static void hvsi_send_overflow(struct hvsi_struct *hp, struct tty_struct *tty)
+static void hvsi_send_overflow(struct hvsi_struct *hp)
{
pr_debug("%s: delivering %i bytes overflow\n", __func__,
hp->n_throttle);
- hvsi_insert_chars(hp, tty, hp->throttle_buf, hp->n_throttle);
+ hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
hp->n_throttle = 0;
}
@@ -514,8 +510,8 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg)
if (tty && hp->n_throttle && !test_bit(TTY_THROTTLED, &tty->flags)) {
/* we weren't hung up and we weren't throttled, so we can
* deliver the rest now */
- hvsi_send_overflow(hp, tty);
- tty_flip_buffer_push(tty);
+ hvsi_send_overflow(hp);
+ tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
@@ -1001,8 +997,8 @@ static void hvsi_unthrottle(struct tty_struct *tty)
spin_lock_irqsave(&hp->lock, flags);
if (hp->n_throttle) {
- hvsi_send_overflow(hp, tty);
- tty_flip_buffer_push(tty);
+ hvsi_send_overflow(hp);
+ tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
@@ -1187,9 +1183,7 @@ static int __init hvsi_console_init(void)
hvsi_wait = poll_for_state; /* no irqs yet; must poll */
/* search device tree for vty nodes */
- for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol");
- vty != NULL;
- vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
+ for_each_compatible_node(vty, "serial", "hvterm-protocol") {
struct hvsi_struct *hp;
const uint32_t *vtermno, *irq;
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index b4ba0670dc54..97a511f4185d 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -646,7 +646,7 @@ static void queue_received_packet(struct ipw_hardware *hw,
(*assem) = pool_allocate(hw, *assem, length);
if (!(*assem)) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
- ": no memory for incomming data packet, dropped!\n");
+ ": no memory for incoming data packet, dropped!\n");
return;
}
(*assem)->protocol = protocol;
@@ -670,7 +670,7 @@ static void queue_received_packet(struct ipw_hardware *hw,
packet = pool_allocate(hw, NULL, length);
if (!packet) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
- ": no memory for incomming ctrl packet, dropped!\n");
+ ": no memory for incoming ctrl packet, dropped!\n");
return;
}
packet->protocol = protocol;
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 2cde13ddf9fc..8fd72ff9436e 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -106,7 +106,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
tty->port.tty = linux_tty;
linux_tty->driver_data = tty;
- linux_tty->low_latency = 1;
+ tty->port.low_latency = 1;
if (tty->tty_type == TTYTYPE_MODEM)
ipwireless_ppp_open(tty->network);
@@ -160,15 +160,9 @@ static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length)
{
- struct tty_struct *linux_tty;
int work = 0;
mutex_lock(&tty->ipw_tty_mutex);
- linux_tty = tty->port.tty;
- if (linux_tty == NULL) {
- mutex_unlock(&tty->ipw_tty_mutex);
- return;
- }
if (!tty->port.count) {
mutex_unlock(&tty->ipw_tty_mutex);
@@ -176,7 +170,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
}
mutex_unlock(&tty->ipw_tty_mutex);
- work = tty_insert_flip_string(linux_tty, data, length);
+ work = tty_insert_flip_string(&tty->port, data, length);
if (work != length)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
@@ -187,7 +181,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
* This may sleep if ->low_latency is set
*/
if (work)
- tty_flip_buffer_push(linux_tty);
+ tty_flip_buffer_push(&tty->port);
}
static void ipw_write_packet_sent_callback(void *callback_data,
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 3205b2e9090b..858291ca889c 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -634,10 +634,10 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
case 1: /* Received Break !!! */
- tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&port->port, 0, TTY_BREAK);
if (port->port.flags & ASYNC_SAK)
do_SAK(tty);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
break;
case 2: /* Statistics */
@@ -650,15 +650,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
}
} else { /* Data Packet */
-
- count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
+ count = tty_prepare_flip_string(&port->port, &rp,
+ byte_count & ~1);
pr_debug("%s: Can rx %d of %d bytes.\n",
__func__, count, byte_count);
word_count = count >> 1;
insw(base, rp, word_count);
byte_count -= (word_count << 1);
if (count & 0x0001) {
- tty_insert_flip_char(tty, inw(base) & 0xff,
+ tty_insert_flip_char(&port->port, inw(base) & 0xff,
TTY_NORMAL);
byte_count -= 2;
}
@@ -671,7 +671,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count -= 2;
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
new file mode 100644
index 000000000000..0e888621f484
--- /dev/null
+++ b/drivers/tty/metag_da.c
@@ -0,0 +1,677 @@
+/*
+ * dashtty.c - tty driver for Dash channels interface.
+ *
+ * Copyright (C) 2007,2008,2012 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+
+#include <asm/da.h>
+
+/* Channel error codes */
+#define CONAOK 0
+#define CONERR 1
+#define CONBAD 2
+#define CONPRM 3
+#define CONADR 4
+#define CONCNT 5
+#define CONCBF 6
+#define CONCBE 7
+#define CONBSY 8
+
+/* Default channel for the console */
+#define CONSOLE_CHANNEL 1
+
+#define NUM_TTY_CHANNELS 6
+
+/* Auto allocate */
+#define DA_TTY_MAJOR 0
+
+/* A speedy poll rate helps the userland debug process connection response.
+ * But, if you set it too high then no other userland processes get much
+ * of a look in.
+ */
+#define DA_TTY_POLL (HZ / 50)
+
+/*
+ * A short put delay improves latency but has a high throughput overhead
+ */
+#define DA_TTY_PUT_DELAY (HZ / 100)
+
+static atomic_t num_channels_need_poll = ATOMIC_INIT(0);
+
+static struct timer_list poll_timer;
+
+static struct tty_driver *channel_driver;
+
+static struct timer_list put_timer;
+static struct task_struct *dashtty_thread;
+
+#define RX_BUF_SIZE 1024
+
+enum {
+ INCHR = 1,
+ OUTCHR,
+ RDBUF,
+ WRBUF,
+ RDSTAT
+};
+
+/**
+ * struct dashtty_port - Wrapper struct for dashtty tty_port.
+ * @port: TTY port data
+ * @rx_lock: Lock for rx_buf.
+ * This protects between the poll timer and user context.
+ * It's also held during read SWITCH operations.
+ * @rx_buf: Read buffer
+ * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
+ * This protects between user context and kernel thread.
+ * It's also held during write SWITCH operations.
+ * @xmit_cnt: Size of xmit buffer contents
+ * @xmit_head: Head of xmit buffer where data is written
+ * @xmit_tail: Tail of xmit buffer where data is read
+ * @xmit_empty: Completion for xmit buffer being empty
+ */
+struct dashtty_port {
+ struct tty_port port;
+ spinlock_t rx_lock;
+ void *rx_buf;
+ struct mutex xmit_lock;
+ unsigned int xmit_cnt;
+ unsigned int xmit_head;
+ unsigned int xmit_tail;
+ struct completion xmit_empty;
+};
+
+static struct dashtty_port dashtty_ports[NUM_TTY_CHANNELS];
+
+static atomic_t dashtty_xmit_cnt = ATOMIC_INIT(0);
+static wait_queue_head_t dashtty_waitqueue;
+
+/*
+ * Low-level DA channel access routines
+ */
+static int chancall(int in_bios_function, int in_channel,
+ int in_arg2, void *in_arg3,
+ void *in_arg4)
+{
+ register int bios_function asm("D1Ar1") = in_bios_function;
+ register int channel asm("D0Ar2") = in_channel;
+ register int arg2 asm("D1Ar3") = in_arg2;
+ register void *arg3 asm("D0Ar4") = in_arg3;
+ register void *arg4 asm("D1Ar5") = in_arg4;
+ register int bios_call asm("D0Ar6") = 3;
+ register int result asm("D0Re0");
+
+ asm volatile (
+ "MSETL [A0StP++], %6,%4,%2\n\t"
+ "ADD A0StP, A0StP, #8\n\t"
+ "SWITCH #0x0C30208\n\t"
+ "GETD %0, [A0StP+#-8]\n\t"
+ "SUB A0StP, A0StP, #(4*6)+8\n\t"
+ : "=d" (result) /* outs */
+ : "d" (bios_function),
+ "d" (channel),
+ "d" (arg2),
+ "d" (arg3),
+ "d" (arg4),
+ "d" (bios_call) /* ins */
+ : "memory");
+
+ return result;
+}
+
+/*
+ * Attempts to fetch count bytes from channel and returns actual count.
+ */
+static int fetch_data(unsigned int channel)
+{
+ struct dashtty_port *dport = &dashtty_ports[channel];
+ int received = 0;
+
+ spin_lock_bh(&dport->rx_lock);
+ /* check the port isn't being shut down */
+ if (!dport->rx_buf)
+ goto unlock;
+ if (chancall(RDBUF, channel, RX_BUF_SIZE,
+ (void *)dport->rx_buf, &received) == CONAOK) {
+ if (received) {
+ int space;
+ unsigned char *cbuf;
+
+ space = tty_prepare_flip_string(&dport->port, &cbuf,
+ received);
+
+ if (space <= 0)
+ goto unlock;
+
+ memcpy(cbuf, dport->rx_buf, space);
+ tty_flip_buffer_push(&dport->port);
+ }
+ }
+unlock:
+ spin_unlock_bh(&dport->rx_lock);
+
+ return received;
+}
+
+/**
+ * find_channel_to_poll() - Returns number of the next channel to poll.
+ * Returns: The number of the next channel to poll, or -1 if none need
+ * polling.
+ */
+static int find_channel_to_poll(void)
+{
+ static int last_polled_channel;
+ int last = last_polled_channel;
+ int chan;
+ struct dashtty_port *dport;
+
+ for (chan = last + 1; ; ++chan) {
+ if (chan >= NUM_TTY_CHANNELS)
+ chan = 0;
+
+ dport = &dashtty_ports[chan];
+ if (dport->rx_buf) {
+ last_polled_channel = chan;
+ return chan;
+ }
+
+ if (chan == last)
+ break;
+ }
+ return -1;
+}
+
+/**
+ * put_channel_data() - Write out a block of channel data.
+ * @chan: DA channel number.
+ *
+ * Write a single block of data out to the debug adapter. If the circular buffer
+ * is wrapped then only the first block is written.
+ *
+ * Returns: 1 if the remote buffer was too full to accept data.
+ * 0 otherwise.
+ */
+static int put_channel_data(unsigned int chan)
+{
+ struct dashtty_port *dport;
+ struct tty_struct *tty;
+ int number_written;
+ unsigned int count = 0;
+
+ dport = &dashtty_ports[chan];
+ mutex_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ count = min((unsigned int)(SERIAL_XMIT_SIZE - dport->xmit_tail),
+ dport->xmit_cnt);
+ chancall(WRBUF, chan, count,
+ dport->port.xmit_buf + dport->xmit_tail,
+ &number_written);
+ dport->xmit_cnt -= number_written;
+ if (!dport->xmit_cnt) {
+ /* reset pointers to avoid wraps */
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ } else {
+ dport->xmit_tail += number_written;
+ if (dport->xmit_tail >= SERIAL_XMIT_SIZE)
+ dport->xmit_tail -= SERIAL_XMIT_SIZE;
+ }
+ atomic_sub(number_written, &dashtty_xmit_cnt);
+ }
+ mutex_unlock(&dport->xmit_lock);
+
+ /* if we've made more data available, wake up tty */
+ if (count && number_written) {
+ tty = tty_port_tty_get(&dport->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ }
+
+ /* did the write fail? */
+ return count && !number_written;
+}
+
+/**
+ * put_data() - Kernel thread to write out blocks of channel data to DA.
+ * @arg: Unused.
+ *
+ * This kernel thread runs while @dashtty_xmit_cnt != 0, and loops over the
+ * channels to write out any buffered data. If any of the channels stall due to
+ * the remote buffer being full, a hold off happens to allow the debugger to
+ * drain the buffer.
+ */
+static int put_data(void *arg)
+{
+ unsigned int chan, stall;
+
+ __set_current_state(TASK_RUNNING);
+ while (!kthread_should_stop()) {
+ /*
+ * For each channel see if there's anything to transmit in the
+ * port's xmit_buf.
+ */
+ stall = 0;
+ for (chan = 0; chan < NUM_TTY_CHANNELS; ++chan)
+ stall += put_channel_data(chan);
+
+ /*
+ * If some of the buffers are full, hold off for a short while
+ * to allow them to empty.
+ */
+ if (stall)
+ msleep(25);
+
+ wait_event_interruptible(dashtty_waitqueue,
+ atomic_read(&dashtty_xmit_cnt));
+ }
+
+ return 0;
+}
+
+/*
+ * This gets called every DA_TTY_POLL and polls the channels for data
+ */
+static void dashtty_timer(unsigned long ignored)
+{
+ int channel;
+
+ /* If there are no ports open do nothing and don't poll again. */
+ if (!atomic_read(&num_channels_need_poll))
+ return;
+
+ channel = find_channel_to_poll();
+
+ /* Did we find a channel to poll? */
+ if (channel >= 0)
+ fetch_data(channel);
+
+ mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+}
+
+static void add_poll_timer(struct timer_list *poll_timer)
+{
+ setup_timer(poll_timer, dashtty_timer, 0);
+ poll_timer->expires = jiffies + DA_TTY_POLL;
+
+ /*
+ * Always attach the timer to the boot CPU. The DA channels are per-CPU
+ * so all polling should be from a single CPU.
+ */
+ add_timer_on(poll_timer, 0);
+}
+
+static int dashtty_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct dashtty_port *dport = container_of(port, struct dashtty_port,
+ port);
+ void *rx_buf;
+
+ /* Allocate the buffer we use for writing data */
+ if (tty_port_alloc_xmit_buf(port) < 0)
+ goto err;
+
+ /* Allocate the buffer we use for reading data */
+ rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_xmit;
+
+ spin_lock_bh(&dport->rx_lock);
+ dport->rx_buf = rx_buf;
+ spin_unlock_bh(&dport->rx_lock);
+
+ /*
+ * Don't add the poll timer if we're opening a console. This
+ * avoids the overhead of polling the Dash but means it is not
+ * possible to have a login on /dev/console.
+ *
+ */
+ if (dport != &dashtty_ports[CONSOLE_CHANNEL])
+ if (atomic_inc_return(&num_channels_need_poll) == 1)
+ add_poll_timer(&poll_timer);
+
+ return 0;
+err_free_xmit:
+ tty_port_free_xmit_buf(port);
+err:
+ return -ENOMEM;
+}
+
+static void dashtty_port_shutdown(struct tty_port *port)
+{
+ struct dashtty_port *dport = container_of(port, struct dashtty_port,
+ port);
+ void *rx_buf;
+ unsigned int count;
+
+ /* stop reading */
+ if (dport != &dashtty_ports[CONSOLE_CHANNEL])
+ if (atomic_dec_and_test(&num_channels_need_poll))
+ del_timer_sync(&poll_timer);
+
+ mutex_lock(&dport->xmit_lock);
+ count = dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+ if (count) {
+ /*
+ * There's still data to write out, so wake and wait for the
+ * writer thread to drain the buffer.
+ */
+ del_timer(&put_timer);
+ wake_up_interruptible(&dashtty_waitqueue);
+ wait_for_completion(&dport->xmit_empty);
+ }
+
+ /* Null the read buffer (timer could still be running!) */
+ spin_lock_bh(&dport->rx_lock);
+ rx_buf = dport->rx_buf;
+ dport->rx_buf = NULL;
+ spin_unlock_bh(&dport->rx_lock);
+ /* Free the read buffer */
+ kfree(rx_buf);
+
+ /* Free the write buffer */
+ tty_port_free_xmit_buf(port);
+}
+
+static const struct tty_port_operations dashtty_port_ops = {
+ .activate = dashtty_port_activate,
+ .shutdown = dashtty_port_shutdown,
+};
+
+static int dashtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ return tty_port_install(&dashtty_ports[tty->index].port, driver, tty);
+}
+
+static int dashtty_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(tty->port, tty, filp);
+}
+
+static void dashtty_close(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_close(tty->port, tty, filp);
+}
+
+static void dashtty_hangup(struct tty_struct *tty)
+{
+ int channel;
+ struct dashtty_port *dport;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* drop any data in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ atomic_sub(dport->xmit_cnt, &dashtty_xmit_cnt);
+ dport->xmit_cnt = 0;
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ }
+ mutex_unlock(&dport->xmit_lock);
+
+ tty_port_hangup(tty->port);
+}
+
+/**
+ * dashtty_put_timer() - Delayed wake up of kernel thread.
+ * @ignored: unused
+ *
+ * This timer function wakes up the kernel thread if any data exists in the
+ * buffers. It is used to delay the expensive writeout until the writer has
+ * stopped writing.
+ */
+static void dashtty_put_timer(unsigned long ignored)
+{
+ if (atomic_read(&dashtty_xmit_cnt))
+ wake_up_interruptible(&dashtty_waitqueue);
+}
+
+static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int total)
+{
+ int channel, count, block;
+ struct dashtty_port *dport;
+
+ /* Determine the channel */
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /*
+ * Write to output buffer.
+ *
+ * The reason that we asynchronously write the buffer is because if we
+ * were to write the buffer synchronously then because DA channels are
+ * per-CPU the buffer would be written to the channel of whatever CPU
+ * we're running on.
+ *
+ * What we actually want to happen is have all input and output done on
+ * one CPU.
+ */
+ mutex_lock(&dport->xmit_lock);
+ /* work out how many bytes we can write to the xmit buffer */
+ total = min(total, (int)(SERIAL_XMIT_SIZE - dport->xmit_cnt));
+ atomic_add(total, &dashtty_xmit_cnt);
+ dport->xmit_cnt += total;
+ /* write the actual bytes (may need splitting if it wraps) */
+ for (count = total; count; count -= block) {
+ block = min(count, (int)(SERIAL_XMIT_SIZE - dport->xmit_head));
+ memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
+ dport->xmit_head += block;
+ if (dport->xmit_head >= SERIAL_XMIT_SIZE)
+ dport->xmit_head -= SERIAL_XMIT_SIZE;
+ buf += block;
+ }
+ count = dport->xmit_cnt;
+ /* xmit buffer no longer empty? */
+ if (count)
+ INIT_COMPLETION(dport->xmit_empty);
+ mutex_unlock(&dport->xmit_lock);
+
+ if (total) {
+ /*
+ * If the buffer is full, wake up the kthread, otherwise allow
+ * some more time for the buffer to fill up a bit before waking
+ * it.
+ */
+ if (count == SERIAL_XMIT_SIZE) {
+ del_timer(&put_timer);
+ wake_up_interruptible(&dashtty_waitqueue);
+ } else {
+ mod_timer(&put_timer, jiffies + DA_TTY_PUT_DELAY);
+ }
+ }
+ return total;
+}
+
+static int dashtty_write_room(struct tty_struct *tty)
+{
+ struct dashtty_port *dport;
+ int channel;
+ int room;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* report the space in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ room = SERIAL_XMIT_SIZE - dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+
+ return room;
+}
+
+static int dashtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct dashtty_port *dport;
+ int channel;
+ int chars;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* report the number of bytes in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ chars = dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+
+ return chars;
+}
+
+static const struct tty_operations dashtty_ops = {
+ .install = dashtty_install,
+ .open = dashtty_open,
+ .close = dashtty_close,
+ .hangup = dashtty_hangup,
+ .write = dashtty_write,
+ .write_room = dashtty_write_room,
+ .chars_in_buffer = dashtty_chars_in_buffer,
+};
+
+static int __init dashtty_init(void)
+{
+ int ret;
+ int nport;
+ struct dashtty_port *dport;
+
+ if (!metag_da_enabled())
+ return -ENODEV;
+
+ channel_driver = tty_alloc_driver(NUM_TTY_CHANNELS,
+ TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(channel_driver))
+ return PTR_ERR(channel_driver);
+
+ channel_driver->driver_name = "metag_da";
+ channel_driver->name = "ttyDA";
+ channel_driver->major = DA_TTY_MAJOR;
+ channel_driver->minor_start = 0;
+ channel_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ channel_driver->subtype = SERIAL_TYPE_NORMAL;
+ channel_driver->init_termios = tty_std_termios;
+ channel_driver->init_termios.c_cflag |= CLOCAL;
+
+ tty_set_operations(channel_driver, &dashtty_ops);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_init(&dport->port);
+ dport->port.ops = &dashtty_port_ops;
+ spin_lock_init(&dport->rx_lock);
+ mutex_init(&dport->xmit_lock);
+ /* the xmit buffer starts empty, i.e. completely written */
+ init_completion(&dport->xmit_empty);
+ complete(&dport->xmit_empty);
+ }
+
+ setup_timer(&put_timer, dashtty_put_timer, 0);
+
+ init_waitqueue_head(&dashtty_waitqueue);
+ dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
+ if (IS_ERR(dashtty_thread)) {
+ pr_err("Couldn't create dashtty thread\n");
+ ret = PTR_ERR(dashtty_thread);
+ goto err_destroy_ports;
+ }
+ /*
+ * Bind the writer thread to the boot CPU so it can't migrate.
+ * DA channels are per-CPU and we want all channel I/O to be on a single
+ * predictable CPU.
+ */
+ kthread_bind(dashtty_thread, 0);
+ wake_up_process(dashtty_thread);
+
+ ret = tty_register_driver(channel_driver);
+
+ if (ret < 0) {
+ pr_err("Couldn't install dashtty driver: err %d\n",
+ ret);
+ goto err_stop_kthread;
+ }
+
+ return 0;
+
+err_stop_kthread:
+ kthread_stop(dashtty_thread);
+err_destroy_ports:
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(channel_driver);
+ return ret;
+}
+
+static void dashtty_exit(void)
+{
+ int nport;
+ struct dashtty_port *dport;
+
+ del_timer_sync(&put_timer);
+ kthread_stop(dashtty_thread);
+ del_timer_sync(&poll_timer);
+ tty_unregister_driver(channel_driver);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(channel_driver);
+}
+
+module_init(dashtty_init);
+module_exit(dashtty_exit);
+
+#ifdef CONFIG_DA_CONSOLE
+
+static void dash_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int actually_written;
+
+ chancall(WRBUF, CONSOLE_CHANNEL, count, (void *)s, &actually_written);
+}
+
+static struct tty_driver *dash_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return channel_driver;
+}
+
+struct console dash_console = {
+ .name = "ttyDA",
+ .write = dash_console_write,
+ .device = dash_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = 1,
+};
+
+#endif
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index f9d28503bdec..adeac255e526 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1405,7 +1405,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (inited && !test_bit(TTY_THROTTLED, &tty->flags) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
- tty_schedule_flip(tty);
+ tty_schedule_flip(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@@ -1429,8 +1429,8 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
goto put;
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- tty_schedule_flip(tty);
+ tty_insert_flip_char(&p->port, 0, TTY_BREAK);
+ tty_schedule_flip(&p->port);
}
if (intr & IntrLine)
@@ -1966,7 +1966,7 @@ static int MoxaPortReadData(struct moxa_port *port)
ofs = baseAddr + DynPage_addr + bufhead + head;
len = (tail >= head) ? (tail - head) :
(rx_mask + 1 - head);
- len = tty_prepare_flip_string(tty, &dst,
+ len = tty_prepare_flip_string(&port->port, &dst,
min(len, count));
memcpy_fromio(dst, ofs, len);
head = (head + len) & rx_mask;
@@ -1978,7 +1978,7 @@ static int MoxaPortReadData(struct moxa_port *port)
while (count > 0) {
writew(pageno, baseAddr + Control_reg);
ofs = baseAddr + DynPage_addr + pageofs;
- len = tty_prepare_flip_string(tty, &dst,
+ len = tty_prepare_flip_string(&port->port, &dst,
min(Page_size - pageofs, count));
memcpy_fromio(dst, ofs, len);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 40113868bec2..484b6a3c9b03 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1264,7 +1264,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
(new_serial.flags & ASYNC_FLAGS));
port->close_delay = new_serial.close_delay * HZ / 100;
port->closing_wait = new_serial.closing_wait * HZ / 100;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
(new_serial.baud_base != info->baud_base ||
new_serial.custom_divisor !=
@@ -2079,7 +2079,7 @@ static void mxser_receive_chars(struct tty_struct *tty,
}
while (gdl--) {
ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(tty, ch, 0);
+ tty_insert_flip_char(&port->port, ch, 0);
cnt++;
}
goto end_intr;
@@ -2118,7 +2118,7 @@ intr_old:
} else
flag = TTY_BREAK;
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
cnt++;
if (cnt >= recv_room) {
if (!port->ldisc_stop_rx)
@@ -2145,7 +2145,7 @@ end_intr:
* recursive locking.
*/
spin_unlock(&port->slock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
spin_lock(&port->slock);
}
@@ -2364,7 +2364,6 @@ static void mxser_release_vector(struct mxser_board *brd)
static void mxser_release_ISA_res(struct mxser_board *brd)
{
- free_irq(brd->irq, brd);
release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
mxser_release_vector(brd);
}
@@ -2430,6 +2429,7 @@ static void mxser_board_remove(struct mxser_board *brd)
tty_unregister_device(mxvar_sdriver, brd->idx + i);
tty_port_destroy(&brd->ports[i].port);
}
+ free_irq(brd->irq, brd);
}
static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
@@ -2554,6 +2554,7 @@ static int mxser_probe(struct pci_dev *pdev,
struct mxser_board *brd;
unsigned int i, j;
unsigned long ioaddress;
+ struct device *tty_dev;
int retval = -EINVAL;
for (i = 0; i < MXSER_BOARDS; i++)
@@ -2637,13 +2638,25 @@ static int mxser_probe(struct pci_dev *pdev,
if (retval)
goto err_rel3;
- for (i = 0; i < brd->info->nports; i++)
- tty_port_register_device(&brd->ports[i].port, mxvar_sdriver,
- brd->idx + i, &pdev->dev);
+ for (i = 0; i < brd->info->nports; i++) {
+ tty_dev = tty_port_register_device(&brd->ports[i].port,
+ mxvar_sdriver, brd->idx + i, &pdev->dev);
+ if (IS_ERR(tty_dev)) {
+ retval = PTR_ERR(tty_dev);
+ for (i--; i >= 0; i--)
+ tty_unregister_device(mxvar_sdriver,
+ brd->idx + i);
+ goto err_relbrd;
+ }
+ }
pci_set_drvdata(pdev, brd);
return 0;
+err_relbrd:
+ for (i = 0; i < brd->info->nports; i++)
+ tty_port_destroy(&brd->ports[i].port);
+ free_irq(brd->irq, brd);
err_rel3:
pci_release_region(pdev, 3);
err_zero:
@@ -2665,7 +2678,6 @@ static void mxser_remove(struct pci_dev *pdev)
mxser_board_remove(brd);
- free_irq(pdev->irq, brd);
pci_release_region(pdev, 2);
pci_release_region(pdev, 3);
pci_disable_device(pdev);
@@ -2683,6 +2695,7 @@ static struct pci_driver mxser_driver = {
static int __init mxser_module_init(void)
{
struct mxser_board *brd;
+ struct device *tty_dev;
unsigned int b, i, m;
int retval;
@@ -2728,14 +2741,29 @@ static int __init mxser_module_init(void)
/* mxser_initbrd will hook ISR. */
if (mxser_initbrd(brd, NULL) < 0) {
+ mxser_release_ISA_res(brd);
brd->info = NULL;
continue;
}
brd->idx = m * MXSER_PORTS_PER_BOARD;
- for (i = 0; i < brd->info->nports; i++)
- tty_port_register_device(&brd->ports[i].port,
+ for (i = 0; i < brd->info->nports; i++) {
+ tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, brd->idx + i, NULL);
+ if (IS_ERR(tty_dev)) {
+ for (i--; i >= 0; i--)
+ tty_unregister_device(mxvar_sdriver,
+ brd->idx + i);
+ for (i = 0; i < brd->info->nports; i++)
+ tty_port_destroy(&brd->ports[i].port);
+ free_irq(brd->irq, brd);
+ mxser_release_ISA_res(brd);
+ brd->info = NULL;
+ break;
+ }
+ }
+ if (brd->info == NULL)
+ continue;
m++;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index dcc0430a49c8..4a43ef5d7962 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1067,9 +1067,9 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
if (!(tty->termios.c_cflag & CLOCAL))
tty_hangup(tty);
- if (brk & 0x01)
- tty_insert_flip_char(tty, 0, TTY_BREAK);
}
+ if (brk & 0x01)
+ tty_insert_flip_char(&dlci->port, 0, TTY_BREAK);
dlci->modem_rx = mlines;
}
@@ -1137,7 +1137,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
{
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned int addr = 0 ;
u8 bits;
int len = clen;
@@ -1160,19 +1160,18 @@ static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
bits = *dp;
if ((bits & 1) == 0)
return;
- /* See if we have an uplink tty */
- tty = tty_port_tty_get(&gsm->dlci[addr]->port);
- if (tty) {
- if (bits & 2)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- if (bits & 4)
- tty_insert_flip_char(tty, 0, TTY_PARITY);
- if (bits & 8)
- tty_insert_flip_char(tty, 0, TTY_FRAME);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ port = &gsm->dlci[addr]->port;
+
+ if (bits & 2)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(port, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(port, 0, TTY_FRAME);
+
+ tty_flip_buffer_push(port);
+
gsm_control_reply(gsm, CMD_RLS, data, clen);
}
@@ -1545,36 +1544,37 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
- struct tty_struct *tty = tty_port_tty_get(port);
+ struct tty_struct *tty;
unsigned int modem = 0;
int len = clen;
if (debug & 16)
- pr_debug("%d bytes for tty %p\n", len, tty);
- if (tty) {
- switch (dlci->adaption) {
- /* Unsupported types */
- /* Packetised interruptible data */
- case 4:
- break;
- /* Packetised uininterruptible voice/data */
- case 3:
- break;
- /* Asynchronous serial with line state in each frame */
- case 2:
- while (gsm_read_ea(&modem, *data++) == 0) {
- len--;
- if (len == 0)
- return;
- }
+ pr_debug("%d bytes for tty\n", len);
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(port);
+ if (tty) {
gsm_process_modem(tty, dlci, modem, clen);
- /* Line state will go via DLCI 0 controls only */
- case 1:
- default:
- tty_insert_flip_string(tty, data, len);
- tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
- tty_kref_put(tty);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(port, data, len);
+ tty_flip_buffer_push(port);
}
}
@@ -1689,6 +1689,8 @@ static inline void dlci_put(struct gsm_dlci *dlci)
tty_port_put(&dlci->port);
}
+static void gsm_destroy_network(struct gsm_dlci *dlci);
+
/**
* gsm_dlci_release - release DLCI
* @dlci: DLCI to destroy
@@ -1702,9 +1704,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
{
struct tty_struct *tty = tty_port_tty_get(&dlci->port);
if (tty) {
+ mutex_lock(&dlci->mutex);
+ gsm_destroy_network(dlci);
+ mutex_unlock(&dlci->mutex);
+
+ /* tty_vhangup needs the tty_lock, so unlock and
+ relock after doing the hangup. */
+ tty_unlock(tty);
tty_vhangup(tty);
+ tty_lock(tty);
+ tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
}
+ dlci->state = DLCI_CLOSED;
dlci_put(dlci);
}
@@ -2947,6 +2959,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
if (dlci == NULL)
return;
+ if (dlci->state == DLCI_CLOSED)
+ return;
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
@@ -2965,6 +2979,8 @@ out:
static void gsmtty_hangup(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
tty_port_hangup(&dlci->port);
gsm_dlci_begin_close(dlci);
}
@@ -2972,9 +2988,12 @@ static void gsmtty_hangup(struct tty_struct *tty)
static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
int len)
{
+ int sent;
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
/* Stuff the bytes into the fifo queue */
- int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
/* Need to kick the channel */
gsm_dlci_data_kick(dlci);
return sent;
@@ -2983,18 +3002,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
static int gsmtty_write_room(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return TX_SIZE - kfifo_len(dlci->fifo);
}
static int gsmtty_chars_in_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return kfifo_len(dlci->fifo);
}
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
@@ -3013,6 +3038,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
static int gsmtty_tiocmget(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return dlci->modem_rx;
}
@@ -3022,6 +3049,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
struct gsm_dlci *dlci = tty->driver_data;
unsigned int modem_tx = dlci->modem_tx;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
modem_tx &= ~clear;
modem_tx |= set;
@@ -3040,6 +3069,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
struct gsm_netconfig nc;
int index;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
switch (cmd) {
case GSMIOC_ENABLE_NET:
if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
@@ -3066,6 +3097,9 @@ static int gsmtty_ioctl(struct tty_struct *tty,
static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
/* For the moment its fixed. In actual fact the speed information
for the virtual channel can be propogated in both directions by
the RPN control message. This however rapidly gets nasty as we
@@ -3077,6 +3111,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
static void gsmtty_throttle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
if (tty->termios.c_cflag & CRTSCTS)
dlci->modem_tx &= ~TIOCM_DTR;
dlci->throttled = 1;
@@ -3087,6 +3123,8 @@ static void gsmtty_throttle(struct tty_struct *tty)
static void gsmtty_unthrottle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
if (tty->termios.c_cflag & CRTSCTS)
dlci->modem_tx |= TIOCM_DTR;
dlci->throttled = 0;
@@ -3098,6 +3136,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
{
struct gsm_dlci *dlci = tty->driver_data;
int encode = 0; /* Off */
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
if (state == -1) /* "On indefinitely" - we can't encode this
properly */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 19083efa2314..05e72bea9b07 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -49,6 +49,7 @@
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
/* number of characters left in xmit buffer before select has we have room */
@@ -100,7 +101,7 @@ struct n_tty_data {
struct mutex atomic_read_lock;
struct mutex output_lock;
struct mutex echo_lock;
- spinlock_t read_lock;
+ raw_spinlock_t read_lock;
};
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
@@ -182,9 +183,9 @@ static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
* The problem of stomping on the buffers ends here.
* Why didn't anyone see this one coming? --AJK
*/
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
put_tty_queue_nolock(c, ldata);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
}
/**
@@ -218,9 +219,9 @@ static void reset_buffer_flags(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
unsigned long flags;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = ldata->read_tail = ldata->read_cnt = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
mutex_lock(&ldata->echo_lock);
ldata->echo_pos = ldata->echo_cnt = ldata->echo_overrun = 0;
@@ -276,7 +277,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
ssize_t n = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
if (!ldata->icanon) {
n = ldata->read_cnt;
} else if (ldata->canon_data) {
@@ -284,7 +285,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
ldata->canon_head - ldata->read_tail :
ldata->canon_head + (N_TTY_BUF_SIZE - ldata->read_tail);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return n;
}
@@ -915,19 +916,19 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -961,10 +962,10 @@ static void eraser(unsigned char c, struct tty_struct *tty)
break;
}
cnt = (ldata->read_head - head) & (N_TTY_BUF_SIZE-1);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = head;
ldata->read_cnt -= cnt;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
@@ -1344,12 +1345,12 @@ send_signal:
put_tty_queue(c, ldata);
handle_newline:
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
set_bit(ldata->read_head, ldata->read_flags);
put_tty_queue_nolock(c, ldata);
ldata->canon_head = ldata->read_head;
ldata->canon_data++;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1423,7 +1424,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
unsigned long cpuflags;
if (ldata->real_raw) {
- spin_lock_irqsave(&ldata->read_lock, cpuflags);
+ raw_spin_lock_irqsave(&ldata->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
N_TTY_BUF_SIZE - ldata->read_head);
i = min(count, i);
@@ -1439,7 +1440,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
memcpy(ldata->read_buf + ldata->read_head, cp, i);
ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt += i;
- spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
} else {
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
@@ -1635,7 +1636,7 @@ static int n_tty_open(struct tty_struct *tty)
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
mutex_init(&ldata->echo_lock);
- spin_lock_init(&ldata->read_lock);
+ raw_spin_lock_init(&ldata->read_lock);
/* These are ugly. Currently a malloc failure here can panic */
ldata->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
@@ -1703,10 +1704,10 @@ static int copy_from_read_buf(struct tty_struct *tty,
bool is_eof;
retval = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
n = min(ldata->read_cnt, N_TTY_BUF_SIZE - ldata->read_tail);
n = min(*nr, n);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &ldata->read_buf[ldata->read_tail], n);
n -= retval;
@@ -1714,13 +1715,13 @@ static int copy_from_read_buf(struct tty_struct *tty,
ldata->read_buf[ldata->read_tail] == EOF_CHAR(tty);
tty_audit_add_data(tty, &ldata->read_buf[ldata->read_tail], n,
ldata->icanon);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_tail = (ldata->read_tail + n) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof && !ldata->read_cnt)
n = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
*b += n;
*nr -= n;
}
@@ -1900,7 +1901,7 @@ do_it_again:
if (ldata->icanon && !L_EXTPROC(tty)) {
/* N.B. avoid overrun if nr == 0 */
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
while (nr && ldata->read_cnt) {
int eol;
@@ -1918,25 +1919,25 @@ do_it_again:
if (--ldata->canon_data < 0)
ldata->canon_data = 0;
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
nr--;
}
if (eol) {
tty_audit_push(tty);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (retval)
break;
} else {
@@ -2188,7 +2189,7 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
* n_tty_inherit_ops - inherit N_TTY methods
* @ops: struct tty_ldisc_ops where to save N_TTY methods
*
- * Used by a generic struct tty_ldisc_ops to easily inherit N_TTY
+ * Enables a 'subclass' line discipline to 'inherit' N_TTY
* methods.
*/
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index a0c69ab04399..2dff19796157 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -827,15 +827,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- if (unlikely(!tty)) {
- DBG1("tty not open for port: %d?", index);
- return 1;
- }
-
read_mem32((u32 *) &size, addr, 4);
/* DBG1( "%d bytes port: %d", size, index); */
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
DBG1("No room in tty, don't read data, don't ack interrupt, "
"disable interrupt");
@@ -855,13 +850,14 @@ static int receive_data(enum port_type index, struct nozomi *dc)
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
if (size == 1) {
- tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
+ tty_insert_flip_char(&port->port, buf[0], TTY_NORMAL);
size = 0;
} else if (size < RECEIVE_BUF_MAX) {
- size -= tty_insert_flip_string(tty, (char *) buf, size);
+ size -= tty_insert_flip_string(&port->port,
+ (char *)buf, size);
} else {
- i = tty_insert_flip_string(tty, \
- (char *) buf, RECEIVE_BUF_MAX);
+ i = tty_insert_flip_string(&port->port,
+ (char *)buf, RECEIVE_BUF_MAX);
size -= i;
offset += i;
}
@@ -1276,15 +1272,11 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
exit_handler:
spin_unlock(&dc->spin_mutex);
- for (a = 0; a < NOZOMI_MAX_PORTS; a++) {
- struct tty_struct *tty;
- if (test_and_clear_bit(a, &dc->flip)) {
- tty = tty_port_tty_get(&dc->port[a].port);
- if (tty)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
- }
+
+ for (a = 0; a < NOZOMI_MAX_PORTS; a++)
+ if (test_and_clear_bit(a, &dc->flip))
+ tty_flip_buffer_push(&dc->port[a].port);
+
return IRQ_HANDLED;
none:
spin_unlock(&dc->spin_mutex);
@@ -1687,12 +1679,6 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
- /* notify card */
- if (unlikely(dc == NULL)) {
- DBG1("No device context?");
- goto exit;
- }
-
spin_lock_irqsave(&dc->spin_mutex, flags);
/* CTS is only valid on the modem channel */
if (port == &(dc->port[PORT_MDM])) {
@@ -1708,7 +1694,6 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
}
spin_unlock_irqrestore(&dc->spin_mutex, flags);
-exit:
return rval;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index be6a373601b7..c24b4db243b9 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -38,16 +38,18 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->subtype == PTY_TYPE_MASTER)
WARN_ON(tty->count > 1);
else {
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return;
if (tty->count > 2)
return;
}
+ set_bit(TTY_IO_ERROR, &tty->flags);
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
tty->packet = 0;
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
- tty->link->packet = 0;
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
wake_up_interruptible(&tty->link->read_wait);
wake_up_interruptible(&tty->link->write_wait);
@@ -55,9 +57,10 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
set_bit(TTY_OTHER_CLOSED, &tty->flags);
#ifdef CONFIG_UNIX98_PTYS
if (tty->driver == ptm_driver) {
- mutex_lock(&devpts_mutex);
- devpts_pty_kill(tty->link->driver_data);
- mutex_unlock(&devpts_mutex);
+ mutex_lock(&devpts_mutex);
+ if (tty->link->driver_data)
+ devpts_pty_kill(tty->link->driver_data);
+ mutex_unlock(&devpts_mutex);
}
#endif
tty_unlock(tty);
@@ -120,10 +123,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
if (c > 0) {
/* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to, buf, c);
+ c = tty_insert_flip_string(to->port, buf, c);
/* And shovel */
if (c) {
- tty_flip_buffer_push(to);
+ tty_flip_buffer_push(to->port);
tty_wakeup(tty);
}
}
@@ -246,14 +249,17 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
if (!tty || !tty->link)
goto out;
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
retval = -EIO;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
goto out;
if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
goto out;
- if (tty->link->count != 1)
+ if (tty->driver->subtype == PTY_TYPE_SLAVE && tty->link->count != 1)
goto out;
+ clear_bit(TTY_IO_ERROR, &tty->flags);
clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
set_bit(TTY_THROTTLED, &tty->flags);
retval = 0;
@@ -441,6 +447,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
return pty_get_pktmode(tty, (int __user *)arg);
case TIOCSIG: /* Send signal to other side of pty */
return pty_signal(tty, (int) arg);
+ case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
+ return -EINVAL;
}
return -ENOIOCTLCMD;
}
@@ -661,7 +669,7 @@ static const struct tty_operations pty_unix98_ops = {
* Allocate a unix98 pty master device from the ptmx driver.
*
* Locking: tty_mutex protects the init_dev work. tty->count should
- * protect the rest.
+ * protect the rest.
* allocated_ptys_lock handles the list of free pty numbers
*/
@@ -702,6 +710,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
mutex_unlock(&tty_mutex);
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = inode;
tty_add_file(tty, filp);
@@ -712,14 +721,13 @@ static int ptmx_open(struct inode *inode, struct file *filp)
retval = PTR_ERR(slave_inode);
goto err_release;
}
+ tty->link->driver_data = slave_inode;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
goto err_release;
tty_unlock(tty);
- tty->driver_data = inode;
- tty->link->driver_data = slave_inode;
return 0;
err_release:
tty_unlock(tty);
@@ -795,7 +803,7 @@ static void __init unix98_pty_init(void)
cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
- panic("Couldn't register /dev/ptmx driver\n");
+ panic("Couldn't register /dev/ptmx driver");
device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index e42009a00529..1d270034bfc3 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -55,7 +55,7 @@
#undef REV_PCI_ORDER
#undef ROCKET_DEBUG_IO
-#define POLL_PERIOD HZ/100 /* Polling period .01 seconds (10ms) */
+#define POLL_PERIOD (HZ/100) /* Polling period .01 seconds (10ms) */
/****** Kernel includes ******/
@@ -315,9 +315,8 @@ static inline int rocket_paranoia_check(struct r_port *info,
* that receive data is present on a serial port. Pulls data from FIFO, moves it into the
* tty layer.
*/
-static void rp_do_receive(struct r_port *info,
- struct tty_struct *tty,
- CHANNEL_t * cp, unsigned int ChanStatus)
+static void rp_do_receive(struct r_port *info, CHANNEL_t *cp,
+ unsigned int ChanStatus)
{
unsigned int CharNStat;
int ToRecv, wRecv, space;
@@ -379,7 +378,8 @@ static void rp_do_receive(struct r_port *info,
flag = TTY_OVERRUN;
else
flag = TTY_NORMAL;
- tty_insert_flip_char(tty, CharNStat & 0xff, flag);
+ tty_insert_flip_char(&info->port, CharNStat & 0xff,
+ flag);
ToRecv--;
}
@@ -399,7 +399,7 @@ static void rp_do_receive(struct r_port *info,
* characters at time by doing repeated word IO
* transfer.
*/
- space = tty_prepare_flip_string(tty, &cbuf, ToRecv);
+ space = tty_prepare_flip_string(&info->port, &cbuf, ToRecv);
if (space < ToRecv) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space);
@@ -415,7 +415,7 @@ static void rp_do_receive(struct r_port *info,
cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
}
/* Push the data up to the tty layer */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/*
@@ -494,7 +494,6 @@ static void rp_do_transmit(struct r_port *info)
static void rp_handle_port(struct r_port *info)
{
CHANNEL_t *cp;
- struct tty_struct *tty;
unsigned int IntMask, ChanStatus;
if (!info)
@@ -505,12 +504,7 @@ static void rp_handle_port(struct r_port *info)
"info->flags & NOT_INIT\n");
return;
}
- tty = tty_port_tty_get(&info->port);
- if (!tty) {
- printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
- "tty==NULL\n");
- return;
- }
+
cp = &info->channel;
IntMask = sGetChanIntID(cp) & info->intmask;
@@ -519,7 +513,7 @@ static void rp_handle_port(struct r_port *info)
#endif
ChanStatus = sGetChanStatus(cp);
if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
- rp_do_receive(info, tty, cp, ChanStatus);
+ rp_do_receive(info, cp, ChanStatus);
}
if (IntMask & DELTA_CD) { /* CD change */
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
@@ -527,10 +521,15 @@ static void rp_handle_port(struct r_port *info)
(ChanStatus & CD_ACT) ? "on" : "off");
#endif
if (!(ChanStatus & CD_ACT) && info->cd_status) {
+ struct tty_struct *tty;
#ifdef ROCKET_DEBUG_HANGUP
printk(KERN_INFO "CD drop, calling hangup.\n");
#endif
- tty_hangup(tty);
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
info->cd_status = (ChanStatus & CD_ACT) ? 1 : 0;
wake_up_interruptible(&info->port.open_wait);
@@ -543,7 +542,6 @@ static void rp_handle_port(struct r_port *info)
printk(KERN_INFO "DSR change...\n");
}
#endif
- tty_kref_put(tty);
}
/*
@@ -1758,8 +1756,29 @@ static void rp_flush_buffer(struct tty_struct *tty)
#ifdef CONFIG_PCI
-static struct pci_device_id __used rocket_pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_ANY_ID) },
+static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4QUAD) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8OCTA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8J) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4J) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8SNI) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16SNI) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_CRP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP32INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP32INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP8) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_232) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_422) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP6M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_8PORT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_4PORT) },
{ }
};
MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
@@ -1781,7 +1800,8 @@ static __init int register_PCI(int i, struct pci_dev *dev)
WordIO_t ConfigIO = 0;
ByteIO_t UPCIRingInd = 0;
- if (!dev || pci_enable_device(dev))
+ if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
+ pci_enable_device(dev))
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index a44345a2dbb4..c7e8b60b6177 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -85,7 +85,6 @@ static void serial21285_enable_ms(struct uart_port *port)
static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, flag, rxs, max_count = 256;
status = *CSR_UARTFLG;
@@ -115,7 +114,7 @@ static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
status = *CSR_UARTFLG;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index f99a84526f82..49399470794d 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,8 +262,7 @@ static void rs_start(struct tty_struct *tty)
local_irq_restore(flags);
}
-static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
- unsigned short rx)
+static void receive_chars(struct m68k_serial *info, unsigned short rx)
{
m68328_uart *uart = &uart_addr[info->line];
unsigned char ch, flag;
@@ -293,9 +292,6 @@ static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
}
}
- if(!tty)
- goto clear_and_exit;
-
flag = TTY_NORMAL;
if (rx & URX_PARITY_ERROR)
@@ -305,15 +301,12 @@ static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
else if (rx & URX_FRAME_ERROR)
flag = TTY_FRAME;
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&info->tport, ch, flag);
#ifndef CONFIG_XCOPILOT_BUGS
} while((rx = uart->urx.w) & URX_DATA_READY);
#endif
- tty_schedule_flip(tty);
-
-clear_and_exit:
- return;
+ tty_schedule_flip(&info->tport);
}
static void transmit_chars(struct m68k_serial *info, struct tty_struct *tty)
@@ -367,11 +360,11 @@ irqreturn_t rs_interrupt(int irq, void *dev_id)
tx = uart->utx.w;
if (rx & URX_DATA_READY)
- receive_chars(info, tty, rx);
+ receive_chars(info, rx);
if (tx & UTX_TX_AVAIL)
transmit_chars(info, tty);
#else
- receive_chars(info, tty, rx);
+ receive_chars(info, rx);
#endif
tty_kref_put(tty);
@@ -1009,7 +1002,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
m68328_uart *uart = &uart_addr[info->line];
unsigned long flags;
- if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ if (serial_paranoia_check(info, tty->name, "rs_close"))
return;
local_irq_save(flags);
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index d085e3a8ec06..0efc815a4968 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -239,13 +239,6 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
},
- [PORT_RM9000] = {
- .name = "RM9000",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO,
- },
[PORT_OCTEON] = {
.name = "OCTEON",
.fifo_size = 64,
@@ -300,6 +293,12 @@ static const struct serial8250_config uart_config[] = {
UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
.flags = UART_CAP_FIFO,
},
+ [PORT_BRCM_TRUMANAGE] = {
+ .name = "TruManage",
+ .fifo_size = 1,
+ .tx_loadsz = 1024,
+ .flags = UART_CAP_HFIFO,
+ },
[PORT_8250_CIR] = {
.name = "CIR port"
}
@@ -318,9 +317,9 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
serial_out(up, UART_DLM, value >> 8 & 0xff);
}
-#ifdef CONFIG_MIPS_ALCHEMY
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-/* Au1x00 UART hardware has a weird register layout */
+/* Au1x00/RT288x UART hardware has a weird register layout */
static const u8 au_io_in_map[] = {
[UART_RX] = 0,
[UART_IER] = 2,
@@ -364,56 +363,6 @@ static void au_serial_dl_write(struct uart_8250_port *up, int value)
#endif
-#ifdef CONFIG_SERIAL_8250_RM9K
-
-static const u8
- regmap_in[8] = {
- [UART_RX] = 0x00,
- [UART_IER] = 0x0c,
- [UART_IIR] = 0x14,
- [UART_LCR] = 0x1c,
- [UART_MCR] = 0x20,
- [UART_LSR] = 0x24,
- [UART_MSR] = 0x28,
- [UART_SCR] = 0x2c
- },
- regmap_out[8] = {
- [UART_TX] = 0x04,
- [UART_IER] = 0x0c,
- [UART_FCR] = 0x18,
- [UART_LCR] = 0x1c,
- [UART_MCR] = 0x20,
- [UART_LSR] = 0x24,
- [UART_MSR] = 0x28,
- [UART_SCR] = 0x2c
- };
-
-static unsigned int rm9k_serial_in(struct uart_port *p, int offset)
-{
- offset = regmap_in[offset] << p->regshift;
- return readl(p->membase + offset);
-}
-
-static void rm9k_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = regmap_out[offset] << p->regshift;
- writel(value, p->membase + offset);
-}
-
-static int rm9k_serial_dl_read(struct uart_8250_port *up)
-{
- return ((__raw_readl(up->port.membase + 0x10) << 8) |
- (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff;
-}
-
-static void rm9k_serial_dl_write(struct uart_8250_port *up, int value)
-{
- __raw_writel(value, up->port.membase + 0x08);
- __raw_writel(value >> 8, up->port.membase + 0x10);
-}
-
-#endif
-
static unsigned int hub6_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -491,16 +440,7 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32_serial_out;
break;
-#ifdef CONFIG_SERIAL_8250_RM9K
- case UPIO_RM9000:
- p->serial_in = rm9k_serial_in;
- p->serial_out = rm9k_serial_out;
- up->dl_read = rm9k_serial_dl_read;
- up->dl_write = rm9k_serial_dl_write;
- break;
-#endif
-
-#ifdef CONFIG_MIPS_ALCHEMY
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
case UPIO_AU:
p->serial_in = au_serial_in;
p->serial_out = au_serial_out;
@@ -1335,7 +1275,9 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up =
container_of(port, struct uart_8250_port, port);
- if (!(up->ier & UART_IER_THRI)) {
+ if (up->dma && !serial8250_tx_dma(up)) {
+ return;
+ } else if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_port_out(port, UART_IER, up->ier);
@@ -1343,9 +1285,7 @@ static void serial8250_start_tx(struct uart_port *port)
unsigned char lsr;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- if ((port->type == PORT_RM9000) ?
- (lsr & UART_LSR_THRE) :
- (lsr & UART_LSR_TEMT))
+ if (lsr & UART_LSR_TEMT)
serial8250_tx_chars(up);
}
}
@@ -1391,7 +1331,6 @@ unsigned char
serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct uart_port *port = &up->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned char ch;
int max_count = 256;
char flag;
@@ -1456,7 +1395,7 @@ ignore_char:
lsr = serial_in(up, UART_LSR);
} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
return lsr;
}
@@ -1490,6 +1429,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
+ if (up->capabilities & UART_CAP_HFIFO) {
+ if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+ BOTH_EMPTY)
+ break;
+ }
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1536,6 +1480,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned long flags;
struct uart_8250_port *up =
container_of(port, struct uart_8250_port, port);
+ int dma_err = 0;
if (iir & UART_IIR_NO_INT)
return 0;
@@ -1546,8 +1491,13 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
DEBUG_INTR("status = %x...", status);
- if (status & (UART_LSR_DR | UART_LSR_BI))
- status = serial8250_rx_chars(up, status);
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ if (up->dma)
+ dma_err = serial8250_rx_dma(up, iir);
+
+ if (!up->dma || dma_err)
+ status = serial8250_rx_chars(up, status);
+ }
serial8250_modem_status(up);
if (status & UART_LSR_THRE)
serial8250_tx_chars(up);
@@ -1980,9 +1930,12 @@ static int serial8250_startup(struct uart_port *port)
if (port->type == PORT_8250_CIR)
return -ENODEV;
- port->fifosize = uart_config[up->port.type].fifo_size;
- up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
- up->capabilities = uart_config[up->port.type].flags;
+ if (!port->fifosize)
+ port->fifosize = uart_config[port->type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[port->type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[port->type].flags;
up->mcr = 0;
if (port->iotype != up->cur_iotype)
@@ -2187,6 +2140,18 @@ dont_test_tx_en:
up->msr_saved_flags = 0;
/*
+ * Request DMA channels for both RX and TX.
+ */
+ if (up->dma) {
+ retval = serial8250_request_dma(up);
+ if (retval) {
+ pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
+ serial_index(port));
+ up->dma = NULL;
+ }
+ }
+
+ /*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
@@ -2219,6 +2184,9 @@ static void serial8250_shutdown(struct uart_port *port)
up->ier = 0;
serial_port_out(port, UART_IER, 0);
+ if (up->dma)
+ serial8250_release_dma(up);
+
spin_lock_irqsave(&port->lock, flags);
if (port->flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
@@ -2815,9 +2783,12 @@ static void
serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
{
up->port.type = type;
- up->port.fifosize = uart_config[type].fifo_size;
- up->capabilities = uart_config[type].flags;
- up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->port.fifosize)
+ up->port.fifosize = uart_config[type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[type].flags;
}
static void __init
@@ -3251,6 +3222,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->bugs = up->bugs;
uart->port.mapbase = up->port.mapbase;
uart->port.private_data = up->port.private_data;
+ uart->port.fifosize = up->port.fifosize;
+ uart->tx_loadsz = up->tx_loadsz;
+ uart->capabilities = up->capabilities;
+
if (up->port.dev)
uart->port.dev = up->port.dev;
@@ -3276,6 +3251,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->dl_read = up->dl_read;
if (up->dl_write)
uart->dl_write = up->dl_write;
+ if (up->dma)
+ uart->dma = up->dma;
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 3b4ea84898c2..34eb676916fe 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -12,6 +12,35 @@
*/
#include <linux/serial_8250.h>
+#include <linux/dmaengine.h>
+
+struct uart_8250_dma {
+ dma_filter_fn fn;
+ void *rx_param;
+ void *tx_param;
+
+ int rx_chan_id;
+ int tx_chan_id;
+
+ struct dma_slave_config rxconf;
+ struct dma_slave_config txconf;
+
+ struct dma_chan *rxchan;
+ struct dma_chan *txchan;
+
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+
+ void *rx_buf;
+
+ size_t rx_size;
+ size_t tx_size;
+
+ unsigned char tx_running:1;
+};
struct old_serial_port {
unsigned int uart;
@@ -40,6 +69,7 @@ struct serial8250_config {
#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
@@ -142,3 +172,24 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
return 0;
}
#endif
+
+#ifdef CONFIG_SERIAL_8250_DMA
+extern int serial8250_tx_dma(struct uart_8250_port *);
+extern int serial8250_rx_dma(struct uart_8250_port *, unsigned int iir);
+extern int serial8250_request_dma(struct uart_8250_port *);
+extern void serial8250_release_dma(struct uart_8250_port *);
+#else
+static inline int serial8250_tx_dma(struct uart_8250_port *p)
+{
+ return -1;
+}
+static inline int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ return -1;
+}
+static inline int serial8250_request_dma(struct uart_8250_port *p)
+{
+ return -1;
+}
+static inline void serial8250_release_dma(struct uart_8250_port *p) { }
+#endif
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
new file mode 100644
index 000000000000..b9f7fd28112e
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -0,0 +1,216 @@
+/*
+ * 8250_dma.c - DMA Engine API support for 8250.c
+ *
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+
+#include "8250.h"
+
+static void __dma_tx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+
+ dma->tx_running = 0;
+
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ xmit->tail += dma->tx_size;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ p->port.icount.tx += dma->tx_size;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&p->port);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) {
+ serial8250_tx_dma(p);
+ uart_write_wakeup(&p->port);
+ }
+}
+
+static void __dma_rx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct tty_port *tty_port = &p->port.state->port;
+ struct dma_tx_state state;
+ int count;
+
+ dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ dmaengine_terminate_all(dma->rxchan);
+
+ count = dma->rx_size - state.residue;
+
+ tty_insert_flip_string(tty_port, dma->rx_buf, count);
+ p->port.icount.rx += count;
+
+ tty_flip_buffer_push(tty_port);
+}
+
+int serial8250_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+ struct dma_async_tx_descriptor *desc;
+
+ if (dma->tx_running)
+ return -EBUSY;
+
+ dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (!dma->tx_size)
+ return -EINVAL;
+
+ desc = dmaengine_prep_slave_single(dma->txchan,
+ dma->tx_addr + xmit->tail,
+ dma->tx_size, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EBUSY;
+
+ dma->tx_running = 1;
+
+ desc->callback = __dma_tx_complete;
+ desc->callback_param = p;
+
+ dma->tx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ dma_async_issue_pending(dma->txchan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_tx_dma);
+
+int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_tx_state state;
+ int dma_status;
+
+ /*
+ * If RCVR FIFO trigger level was not reached, complete the transfer and
+ * let 8250.c copy the remaining data.
+ */
+ if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie,
+ &state);
+ if (dma_status == DMA_IN_PROGRESS) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_complete(p);
+ }
+ return -ETIMEDOUT;
+ }
+
+ desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
+ dma->rx_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = __dma_rx_complete;
+ desc->callback_param = p;
+
+ dma->rx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma_async_issue_pending(dma->rxchan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_rx_dma);
+
+int serial8250_request_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+ dma_cap_mask_t mask;
+
+ dma->rxconf.src_addr = p->port.mapbase + UART_RX;
+ dma->txconf.dst_addr = p->port.mapbase + UART_TX;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get a channel for RX */
+ dma->rxchan = dma_request_channel(mask, dma->fn, dma->rx_param);
+ if (!dma->rxchan)
+ return -ENODEV;
+
+ dmaengine_slave_config(dma->rxchan, &dma->rxconf);
+
+ /* Get a channel for TX */
+ dma->txchan = dma_request_channel(mask, dma->fn, dma->tx_param);
+ if (!dma->txchan) {
+ dma_release_channel(dma->rxchan);
+ return -ENODEV;
+ }
+
+ dmaengine_slave_config(dma->txchan, &dma->txconf);
+
+ /* RX buffer */
+ if (!dma->rx_size)
+ dma->rx_size = PAGE_SIZE;
+
+ dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
+ &dma->rx_addr, GFP_KERNEL);
+ if (!dma->rx_buf) {
+ dma_release_channel(dma->rxchan);
+ dma_release_channel(dma->txchan);
+ return -ENOMEM;
+ }
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(dma->txchan->device->dev,
+ p->port.state->xmit.buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+
+ dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_request_dma);
+
+void serial8250_release_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (!dma)
+ return;
+
+ /* Release RX resources */
+ dmaengine_terminate_all(dma->rxchan);
+ dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
+ dma->rx_addr);
+ dma_release_channel(dma->rxchan);
+ dma->rxchan = NULL;
+
+ /* Release TX resources */
+ dmaengine_terminate_all(dma->txchan);
+ dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->txchan);
+ dma->txchan = NULL;
+ dma->tx_running = 0;
+
+ dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
+}
+EXPORT_SYMBOL_GPL(serial8250_release_dma);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1d0dba2d562d..db0e66f6dd0e 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -2,6 +2,7 @@
* Synopsys DesignWare 8250 driver.
*
* Copyright 2011 Picochip, Jamie Iles.
+ * Copyright 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +25,34 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+
+#include "8250.h"
+
+/* Offsets for the DesignWare specific registers */
+#define DW_UART_USR 0x1f /* UART Status Register */
+#define DW_UART_CPR 0xf4 /* Component Parameter Register */
+#define DW_UART_UCV 0xf8 /* UART Component Version */
+
+/* Intel Low Power Subsystem specific */
+#define LPSS_PRV_CLOCK_PARAMS 0x800
+
+/* Component Parameter Register bits */
+#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
+#define DW_UART_CPR_AFCE_MODE (1 << 4)
+#define DW_UART_CPR_THRE_MODE (1 << 5)
+#define DW_UART_CPR_SIR_MODE (1 << 6)
+#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
+#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
+#define DW_UART_CPR_FIFO_STAT (1 << 10)
+#define DW_UART_CPR_SHADOW (1 << 11)
+#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
+#define DW_UART_CPR_DMA_EXTRA (1 << 13)
+#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+/* Helper for fifo size calculation */
+#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+
struct dw8250_data {
int last_lcr;
@@ -66,9 +95,6 @@ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
return readl(p->membase + offset);
}
-/* Offset for the DesignWare's UART Status Register. */
-#define UART_USR 0x1f
-
static int dw8250_handle_irq(struct uart_port *p)
{
struct dw8250_data *d = p->private_data;
@@ -78,8 +104,8 @@ static int dw8250_handle_irq(struct uart_port *p)
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR and write the LCR again. */
- (void)p->serial_in(p, UART_USR);
- p->serial_out(p, d->last_lcr, UART_LCR);
+ (void)p->serial_in(p, DW_UART_USR);
+ p->serial_out(p, UART_LCR, d->last_lcr);
return 1;
}
@@ -87,61 +113,210 @@ static int dw8250_handle_irq(struct uart_port *p)
return 0;
}
+static int dw8250_probe_of(struct uart_port *p)
+{
+ struct device_node *np = p->dev->of_node;
+ u32 val;
+
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ switch (val) {
+ case 1:
+ break;
+ case 4:
+ p->iotype = UPIO_MEM32;
+ p->serial_in = dw8250_serial_in32;
+ p->serial_out = dw8250_serial_out32;
+ break;
+ default:
+ dev_err(p->dev, "unsupported reg-io-width (%u)\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(np, "reg-shift", &val))
+ p->regshift = val;
+
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
+ dev_err(p->dev, "no clock-frequency property set\n");
+ return -EINVAL;
+ }
+ p->uartclk = val;
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static bool dw8250_acpi_dma_filter(struct dma_chan *chan, void *parm)
+{
+ return chan->chan_id == *(int *)parm;
+}
+
+static acpi_status
+dw8250_acpi_walk_resource(struct acpi_resource *res, void *data)
+{
+ struct uart_port *p = data;
+ struct uart_8250_port *port;
+ struct uart_8250_dma *dma;
+ struct acpi_resource_fixed_dma *fixed_dma;
+ struct dma_slave_config *slave;
+
+ port = container_of(p, struct uart_8250_port, port);
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_DMA:
+ fixed_dma = &res->data.fixed_dma;
+
+ /* TX comes first */
+ if (!port->dma) {
+ dma = devm_kzalloc(p->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return AE_NO_MEMORY;
+
+ port->dma = dma;
+ slave = &dma->txconf;
+
+ slave->direction = DMA_MEM_TO_DEV;
+ slave->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave->slave_id = fixed_dma->request_lines;
+ slave->dst_maxburst = port->tx_loadsz / 4;
+
+ dma->tx_chan_id = fixed_dma->channels;
+ dma->tx_param = &dma->tx_chan_id;
+ dma->fn = dw8250_acpi_dma_filter;
+ } else {
+ dma = port->dma;
+ slave = &dma->rxconf;
+
+ slave->direction = DMA_DEV_TO_MEM;
+ slave->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave->slave_id = fixed_dma->request_lines;
+ slave->src_maxburst = p->fifosize / 4;
+
+ dma->rx_chan_id = fixed_dma->channels;
+ dma->rx_param = &dma->rx_chan_id;
+ }
+
+ break;
+ }
+
+ return AE_OK;
+}
+
+static int dw8250_probe_acpi(struct uart_port *p)
+{
+ const struct acpi_device_id *id;
+ acpi_status status;
+ u32 reg;
+
+ id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
+ if (!id)
+ return -ENODEV;
+
+ p->iotype = UPIO_MEM32;
+ p->serial_in = dw8250_serial_in32;
+ p->serial_out = dw8250_serial_out32;
+ p->regshift = 2;
+ p->uartclk = (unsigned int)id->driver_data;
+
+ status = acpi_walk_resources(ACPI_HANDLE(p->dev), METHOD_NAME__CRS,
+ dw8250_acpi_walk_resource, p);
+ if (ACPI_FAILURE(status)) {
+ dev_err_ratelimited(p->dev, "%s failed \"%s\"\n", __func__,
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ /* Fix Haswell issue where the clocks do not get enabled */
+ if (!strcmp(id->id, "INT33C4") || !strcmp(id->id, "INT33C5")) {
+ reg = readl(p->membase + LPSS_PRV_CLOCK_PARAMS);
+ writel(reg | 1, p->membase + LPSS_PRV_CLOCK_PARAMS);
+ }
+
+ return 0;
+}
+#else
+static inline int dw8250_probe_acpi(struct uart_port *p)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
+static void dw8250_setup_port(struct uart_8250_port *up)
+{
+ struct uart_port *p = &up->port;
+ u32 reg = readl(p->membase + DW_UART_UCV);
+
+ /*
+ * If the Component Version Register returns zero, we know that
+ * ADDITIONAL_FEATURES are not enabled. No need to go any further.
+ */
+ if (!reg)
+ return;
+
+ dev_dbg_ratelimited(p->dev, "Designware UART version %c.%c%c\n",
+ (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+
+ reg = readl(p->membase + DW_UART_CPR);
+ if (!reg)
+ return;
+
+ /* Select the type based on fifo */
+ if (reg & DW_UART_CPR_FIFO_MODE) {
+ p->type = PORT_16550A;
+ p->flags |= UPF_FIXED_TYPE;
+ p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
+ up->tx_loadsz = p->fifosize;
+ }
+}
+
static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {};
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- struct device_node *np = pdev->dev.of_node;
- u32 val;
struct dw8250_data *data;
+ int err;
if (!regs || !irq) {
dev_err(&pdev->dev, "no registers/irq defined\n");
return -EINVAL;
}
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- uart.port.private_data = data;
-
spin_lock_init(&uart.port.lock);
uart.port.mapbase = regs->start;
uart.port.irq = irq->start;
uart.port.handle_irq = dw8250_handle_irq;
uart.port.type = PORT_8250;
- uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP |
- UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
uart.port.dev = &pdev->dev;
+ uart.port.membase = ioremap(regs->start, resource_size(regs));
+ if (!uart.port.membase)
+ return -ENOMEM;
+
uart.port.iotype = UPIO_MEM;
uart.port.serial_in = dw8250_serial_in;
uart.port.serial_out = dw8250_serial_out;
- if (!of_property_read_u32(np, "reg-io-width", &val)) {
- switch (val) {
- case 1:
- break;
- case 4:
- uart.port.iotype = UPIO_MEM32;
- uart.port.serial_in = dw8250_serial_in32;
- uart.port.serial_out = dw8250_serial_out32;
- break;
- default:
- dev_err(&pdev->dev, "unsupported reg-io-width (%u)\n",
- val);
- return -EINVAL;
- }
+
+ dw8250_setup_port(&uart);
+
+ if (pdev->dev.of_node) {
+ err = dw8250_probe_of(&uart.port);
+ if (err)
+ return err;
+ } else if (ACPI_HANDLE(&pdev->dev)) {
+ err = dw8250_probe_acpi(&uart.port);
+ if (err)
+ return err;
+ } else {
+ return -ENODEV;
}
- if (!of_property_read_u32(np, "reg-shift", &val))
- uart.port.regshift = val;
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- if (of_property_read_u32(np, "clock-frequency", &val)) {
- dev_err(&pdev->dev, "no clock-frequency property set\n");
- return -EINVAL;
- }
- uart.port.uartclk = val;
+ uart.port.private_data = data;
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
@@ -184,17 +359,25 @@ static int dw8250_resume(struct platform_device *pdev)
#define dw8250_resume NULL
#endif /* CONFIG_PM */
-static const struct of_device_id dw8250_match[] = {
+static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "snps,dw-apb-uart" },
{ /* Sentinel */ }
};
-MODULE_DEVICE_TABLE(of, dw8250_match);
+MODULE_DEVICE_TABLE(of, dw8250_of_match);
+
+static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "INT33C4", 100000000 },
+ { "INT33C5", 100000000 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
.owner = THIS_MODULE,
- .of_match_table = dw8250_match,
+ .of_match_table = dw8250_of_match,
+ .acpi_match_table = ACPI_PTR(dw8250_acpi_match),
},
.probe = dw8250_probe,
.remove = dw8250_remove,
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index f53a7db4350d..721904f8efa9 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -194,7 +194,7 @@ static int __init parse_options(struct early_serial8250_device *device,
options++;
device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " "), sizeof(device->options));
- strncpy(device->options, options, length);
+ strlcpy(device->options, options, length);
} else {
device->baud = probe_baud(port);
snprintf(device->options, sizeof(device->options), "%u",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 26b9dc012ed0..791c5a77ec61 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1040,6 +1040,253 @@ static int pci_asix_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+/* Quatech devices have their own extra interface features */
+
+struct quatech_feature {
+ u16 devid;
+ bool amcc;
+};
+
+#define QPCR_TEST_FOR1 0x3F
+#define QPCR_TEST_GET1 0x00
+#define QPCR_TEST_FOR2 0x40
+#define QPCR_TEST_GET2 0x40
+#define QPCR_TEST_FOR3 0x80
+#define QPCR_TEST_GET3 0x40
+#define QPCR_TEST_FOR4 0xC0
+#define QPCR_TEST_GET4 0x80
+
+#define QOPR_CLOCK_X1 0x0000
+#define QOPR_CLOCK_X2 0x0001
+#define QOPR_CLOCK_X4 0x0002
+#define QOPR_CLOCK_X8 0x0003
+#define QOPR_CLOCK_RATE_MASK 0x0003
+
+
+static struct quatech_feature quatech_cards[] = {
+ { PCI_DEVICE_ID_QUATECH_QSC100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC100E, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSC200, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC200E, 0 },
+ { PCI_DEVICE_ID_QUATECH_ESC100D, 1 },
+ { PCI_DEVICE_ID_QUATECH_ESC100M, 1 },
+ { PCI_DEVICE_ID_QUATECH_QSCP100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSCP100, 1 },
+ { PCI_DEVICE_ID_QUATECH_QSCP200, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSCP200, 1 },
+ { PCI_DEVICE_ID_QUATECH_ESCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_QSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_SSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_QSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_SSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_SPPXP_100, 0 },
+ { 0, }
+};
+
+static int pci_quatech_amcc(u16 devid)
+{
+ struct quatech_feature *qf = &quatech_cards[0];
+ while (qf->devid) {
+ if (qf->devid == devid)
+ return qf->amcc;
+ qf++;
+ }
+ pr_err("quatech: unknown port type '0x%04X'.\n", devid);
+ return 0;
+};
+
+static int pci_quatech_rqopr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+ return val;
+}
+
+static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(qopr, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+}
+
+static int pci_quatech_rqmcr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val, qmcr;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(val | 0x10, base + UART_SCR);
+ qmcr = inb(base + UART_MCR);
+ outb(val, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+
+ return qmcr;
+}
+
+static void pci_quatech_wqmcr(struct uart_8250_port *port, u8 qmcr)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(val | 0x10, base + UART_SCR);
+ outb(qmcr, base + UART_MCR);
+ outb(val, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+}
+
+static int pci_quatech_has_qmcr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ if (val & 0x20) {
+ outb(0x80, UART_LCR);
+ if (!(inb(UART_SCR) & 0x20)) {
+ outb(LCR, base + UART_LCR);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int pci_quatech_test(struct uart_8250_port *port)
+{
+ u8 reg;
+ u8 qopr = pci_quatech_rqopr(port);
+ pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET1)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR2);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET2)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR3);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET3)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR4);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET4)
+ return -EINVAL;
+
+ pci_quatech_wqopr(port, qopr);
+ return 0;
+}
+
+static int pci_quatech_clock(struct uart_8250_port *port)
+{
+ u8 qopr, reg, set;
+ unsigned long clock;
+
+ if (pci_quatech_test(port) < 0)
+ return 1843200;
+
+ qopr = pci_quatech_rqopr(port);
+
+ pci_quatech_wqopr(port, qopr & ~QOPR_CLOCK_X8);
+ reg = pci_quatech_rqopr(port);
+ if (reg & QOPR_CLOCK_X8) {
+ clock = 1843200;
+ goto out;
+ }
+ pci_quatech_wqopr(port, qopr | QOPR_CLOCK_X8);
+ reg = pci_quatech_rqopr(port);
+ if (!(reg & QOPR_CLOCK_X8)) {
+ clock = 1843200;
+ goto out;
+ }
+ reg &= QOPR_CLOCK_X8;
+ if (reg == QOPR_CLOCK_X2) {
+ clock = 3685400;
+ set = QOPR_CLOCK_X2;
+ } else if (reg == QOPR_CLOCK_X4) {
+ clock = 7372800;
+ set = QOPR_CLOCK_X4;
+ } else if (reg == QOPR_CLOCK_X8) {
+ clock = 14745600;
+ set = QOPR_CLOCK_X8;
+ } else {
+ clock = 1843200;
+ set = QOPR_CLOCK_X1;
+ }
+ qopr &= ~QOPR_CLOCK_RATE_MASK;
+ qopr |= set;
+
+out:
+ pci_quatech_wqopr(port, qopr);
+ return clock;
+}
+
+static int pci_quatech_rs422(struct uart_8250_port *port)
+{
+ u8 qmcr;
+ int rs422 = 0;
+
+ if (!pci_quatech_has_qmcr(port))
+ return 0;
+ qmcr = pci_quatech_rqmcr(port);
+ pci_quatech_wqmcr(port, 0xFF);
+ if (pci_quatech_rqmcr(port))
+ rs422 = 1;
+ pci_quatech_wqmcr(port, qmcr);
+ return rs422;
+}
+
+static int pci_quatech_init(struct pci_dev *dev)
+{
+ if (pci_quatech_amcc(dev->device)) {
+ unsigned long base = pci_resource_start(dev, 0);
+ if (base) {
+ u32 tmp;
+ outl(inl(base + 0x38), base + 0x38);
+ tmp = inl(base + 0x3c);
+ outl(tmp | 0x01000000, base + 0x3c);
+ outl(tmp, base + 0x3c);
+ }
+ }
+ return 0;
+}
+
+static int pci_quatech_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ /* Needed by pci_quatech calls below */
+ port->port.iobase = pci_resource_start(priv->dev, FL_GET_BASE(board->flags));
+ /* Set up the clocking */
+ port->port.uartclk = pci_quatech_clock(port);
+ /* For now just warn about RS422 */
+ if (pci_quatech_rs422(port))
+ pr_warn("quatech: software control of RS422 features not currently supported.\n");
+ return pci_default_setup(priv, board, port, idx);
+}
+
+static void pci_quatech_exit(struct pci_dev *dev)
+{
+}
+
static int pci_default_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1085,6 +1332,18 @@ pci_omegapci_setup(struct serial_private *priv,
return setup_port(priv, port, 2, idx * 8, 0);
}
+static int
+pci_brcm_trumanage_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ int ret = pci_default_setup(priv, board, port, idx);
+
+ port->port.type = PORT_BRCM_TRUMANAGE;
+ port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+ return ret;
+}
+
static int skip_tx_en_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1301,9 +1560,13 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
#define PCI_VENDOR_ID_ASIX 0x9710
-#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019
#define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020
#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
+#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
+#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
@@ -1528,6 +1791,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
+ /* Quatech */
+ {
+ .vendor = PCI_VENDOR_ID_QUATECH,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_quatech_init,
+ .setup = pci_quatech_setup,
+ .exit = pci_quatech_exit,
+ },
/*
* Panacom
*/
@@ -1691,6 +1964,23 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_timedia_setup,
},
/*
+ * SUNIX (Timedia) cards
+ * Do not "probe" for these cards as there is at least one combination
+ * card that should be handled by parport_pc that doesn't match the
+ * rule in pci_timedia_probe.
+ * It is part number is MIO5079A but its subdevice ID is 0x0102.
+ * There are some boards with part number SER5037AL that report
+ * subdevice ID 0x0002.
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SUNIX,
+ .device = PCI_DEVICE_ID_SUNIX_1999,
+ .subvendor = PCI_VENDOR_ID_SUNIX,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_timedia_init,
+ .setup = pci_timedia_setup,
+ },
+ /*
* Exar cards
*/
{
@@ -1954,6 +2244,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_xr17v35x_setup,
},
/*
+ * Broadcom TruManage (NetXtreme)
+ */
+ {
+ .vendor = PCI_VENDOR_ID_BROADCOM,
+ .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_brcm_trumanage_setup,
+ },
+
+ /*
* Default "match everything" terminator entry
*/
{
@@ -2148,6 +2449,7 @@ enum pci_board_num_t {
pbn_ce4100_1_115200,
pbn_omegapci,
pbn_NETMOS9900_2s_115200,
+ pbn_brcm_trumanage,
};
/*
@@ -2246,7 +2548,7 @@ static struct pciserial_board pci_boards[] = {
[pbn_b0_8_1152000_200] = {
.flags = FL_BASE0,
- .num_ports = 2,
+ .num_ports = 8,
.base_baud = 1152000,
.uart_offset = 0x200,
},
@@ -2892,6 +3194,12 @@ static struct pciserial_board pci_boards[] = {
.num_ports = 2,
.base_baud = 115200,
},
+ [pbn_brcm_trumanage] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .reg_shift = 2,
+ .base_baud = 115200,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3475,18 +3783,70 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS,
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
+ /*
+ * Quatech cards. These actually have configurable clocks but for
+ * now we just use the default.
+ *
+ * 100 series are RS232, 200 series RS422,
+ */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_4_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_8_115200 },
+
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4,
0, 0,
@@ -3871,6 +4231,19 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_b0_bt_1_921600 },
/*
+ * SUNIX (TIMEDIA)
+ */
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00,
+ pbn_b0_bt_1_921600 },
+
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b0_bt_1_921600 },
+
+ /*
* AFAVLAB serial card, from Harald Welte <laforge@gnumonks.org>
*/
{ PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028,
@@ -4471,6 +4844,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_omegapci },
/*
+ * Broadcom TruManage
+ */
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_brcm_trumanage },
+
+ /*
* AgeStar as-prs2-009
*/
{ PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375,
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index c31133a6ea8e..2ef9537bcb2c 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -84,6 +84,14 @@ config SERIAL_8250_GSC
depends on SERIAL_8250 && GSC
default SERIAL_8250
+config SERIAL_8250_DMA
+ bool "DMA support for 16550 compatible UART controllers" if EXPERT
+ depends on SERIAL_8250 && DMADEVICES=y
+ default SERIAL_8250
+ help
+ This builds DMA support that can be used with 8250/16650
+ compatible UART controllers that support DMA signaling.
+
config SERIAL_8250_PCI
tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
@@ -249,15 +257,6 @@ config SERIAL_8250_ACORN
system, say Y to this option. The driver can handle 1, 2, or 3 port
cards. If unsure, say N.
-config SERIAL_8250_RM9K
- bool "Support for MIPS RM9xxx integrated serial port"
- depends on SERIAL_8250 != n && SERIAL_RM9000
- select SERIAL_8250_SHARE_IRQ
- help
- Selecting this option will add support for the integrated serial
- port hardware found on MIPS RM9122 and similar processors.
- If unsure, say N.
-
config SERIAL_8250_FSL
bool
depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
@@ -265,7 +264,7 @@ config SERIAL_8250_FSL
config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks"
- depends on SERIAL_8250 && OF
+ depends on SERIAL_8250
help
Selecting this option will enable handling of the extra features
present in the Synopsys DesignWare APB UART.
@@ -277,3 +276,11 @@ config SERIAL_8250_EM
Selecting this option will add support for the integrated serial
port hardware found on the Emma Mobile line of processors.
If unsure, say N.
+
+config SERIAL_8250_RT288X
+ bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
+ depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883)
+ help
+ If you have a Ralink RT288x/RT305x SoC based board and want to use the
+ serial port, say Y to this option. The driver can handle up to 2 serial
+ ports. If unsure, say N.
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 108fe7fe13e2..a23838a4d535 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_SERIAL_8250) += 8250_core.o
8250_core-y := 8250.o
8250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+8250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 59c23d038106..a0162cbf0557 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -2,8 +2,10 @@
# Serial device configuration
#
+if TTY
+
menu "Serial drivers"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && GENERIC_HARDIRQS
source "drivers/tty/serial/8250/Kconfig"
@@ -269,6 +271,17 @@ config SERIAL_SIRFSOC_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
+config SERIAL_TEGRA
+ tristate "NVIDIA Tegra20/30 SoC serial controller"
+ depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
+ providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ are enabled). This driver uses the APB DMA to achieve higher baudrate
+ and better performance.
+
config SERIAL_MAX3100
tristate "MAX3100 support"
depends on SPI
@@ -1447,4 +1460,30 @@ config SERIAL_ARC_NR_PORTS
Set this to the number of serial ports you want the driver
to support.
+config SERIAL_RP2
+ tristate "Comtrol RocketPort EXPRESS/INFINITY support"
+ depends on PCI
+ select SERIAL_CORE
+ help
+ This driver supports the Comtrol RocketPort EXPRESS and
+ RocketPort INFINITY families of PCI/PCIe multiport serial adapters.
+ These adapters use a "RocketPort 2" ASIC that is not compatible
+ with the original RocketPort driver (CONFIG_ROCKETPORT).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rp2.
+
+ If you want to compile this driver into the kernel, say Y here. If
+ you don't have a suitable RocketPort card installed, say N.
+
+config SERIAL_RP2_NR_UARTS
+ int "Maximum number of RocketPort EXPRESS/INFINITY ports"
+ depends on SERIAL_RP2
+ default "32"
+ help
+ If multiple cards are present, the default limit of 32 ports may
+ need to be increased.
+
endmenu
+
+endif # TTY
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index df1b998c436b..eedfec40e3dd 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
+obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
+obj-$(CONFIG_SERIAL_RP2) += rp2.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 872f14ae43d2..c6bdb943726b 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -139,7 +139,7 @@ static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
uart_insert_char(port, 0, 0, ch, flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
@@ -493,11 +493,9 @@ static int __init altera_jtaguart_init(void)
if (rc)
return rc;
rc = platform_driver_register(&altera_jtaguart_platform_driver);
- if (rc) {
+ if (rc)
uart_unregister_driver(&altera_jtaguart_driver);
- return rc;
- }
- return 0;
+ return rc;
}
static void __exit altera_jtaguart_exit(void)
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 684a0808e1c7..13471dd95793 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -231,7 +231,7 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void altera_uart_tx_chars(struct altera_uart *pp)
@@ -637,11 +637,9 @@ static int __init altera_uart_init(void)
if (rc)
return rc;
rc = platform_driver_register(&altera_uart_platform_driver);
- if (rc) {
+ if (rc)
uart_unregister_driver(&altera_uart_driver);
- return rc;
- }
- return 0;
+ return rc;
}
static void __exit altera_uart_exit(void)
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 22317dd16474..c36840519527 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -116,7 +116,6 @@ static void pl010_enable_ms(struct uart_port *port)
static void pl010_rx_chars(struct uart_amba_port *uap)
{
- struct tty_struct *tty = uap->port.state->port.tty;
unsigned int status, ch, flag, rsr, max_count = 256;
status = readb(uap->port.membase + UART01x_FR);
@@ -165,7 +164,7 @@ static void pl010_rx_chars(struct uart_amba_port *uap)
status = readb(uap->port.membase + UART01x_FR);
}
spin_unlock(&uap->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uap->port.state->port);
spin_lock(&uap->port.lock);
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 7fca4022a8b2..3ea5408fcbeb 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -698,7 +698,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
u32 pending, bool use_buf_b,
bool readfifo)
{
- struct tty_struct *tty = uap->port.state->port.tty;
+ struct tty_port *port = &uap->port.state->port;
struct pl011_sgbuf *sgbuf = use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
struct device *dev = uap->dmarx.chan->device->dev;
@@ -715,8 +715,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(uap->port.state->port.tty,
- sgbuf->buf, pending);
+ dma_count = tty_insert_flip_string(port, sgbuf->buf, pending);
/* Return buffer to device */
dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
@@ -754,7 +753,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
dev_vdbg(uap->port.dev,
"Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
spin_lock(&uap->port.lock);
}
@@ -1076,12 +1075,10 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
- struct tty_struct *tty = uap->port.state->port.tty;
-
pl011_fifo_to_tty(uap);
spin_unlock(&uap->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
* attempt to switch back to DMA mode again.
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 59ae2b53e765..6331464d9101 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -78,7 +78,6 @@ static void apbuart_enable_ms(struct uart_port *port)
static void apbuart_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, rsr, flag;
unsigned int max_chars = port->fifosize;
@@ -126,7 +125,7 @@ static void apbuart_rx_chars(struct uart_port *port)
status = UART_GET_STATUS(port);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void apbuart_tx_chars(struct uart_port *port)
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 505c490c0b44..27f20c57abed 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -297,10 +297,9 @@ static void ar933x_uart_set_termios(struct uart_port *port,
static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
{
- struct tty_struct *tty;
+ struct tty_port *port = &up->port.state->port;
int max_count = 256;
- tty = tty_port_tty_get(&up->port.state->port);
do {
unsigned int rdata;
unsigned char ch;
@@ -313,11 +312,6 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
ar933x_uart_write(up, AR933X_UART_DATA_REG,
AR933X_UART_DATA_RX_CSR);
- if (!tty) {
- /* discard the data if no tty available */
- continue;
- }
-
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
@@ -325,13 +319,10 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (max_count-- > 0);
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(port);
}
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 3e0b3fac6a0e..d97e194b6bc5 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -37,6 +37,8 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
/*************************************
* ARC UART Hardware Specs
@@ -209,12 +211,8 @@ static void arc_serial_start_tx(struct uart_port *port)
static void arc_serial_rx_chars(struct arc_uart_port *uart)
{
- struct tty_struct *tty = tty_port_tty_get(&uart->port.state->port);
unsigned int status, ch, flg = 0;
- if (!tty)
- return;
-
/*
* UART has 4 deep RX-FIFO. Driver's recongnition of this fact
* is very subtle. Here's how ...
@@ -250,10 +248,8 @@ static void arc_serial_rx_chars(struct arc_uart_port *uart)
uart_insert_char(&uart->port, status, RXOERR, ch, flg);
done:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
-
- tty_kref_put(tty);
}
/*
@@ -526,18 +522,37 @@ static struct uart_ops arc_serial_pops = {
};
static int
-arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
+arc_uart_init_one(struct platform_device *pdev, int dev_id)
{
struct resource *res, *res2;
unsigned long *plat_data;
-
- if (pdev->id < 0 || pdev->id >= CONFIG_SERIAL_ARC_NR_PORTS) {
- dev_err(&pdev->dev, "Wrong uart platform device id.\n");
- return -ENOENT;
- }
+ struct arc_uart_port *uart = &arc_uart_ports[dev_id];
plat_data = ((unsigned long *)(pdev->dev.platform_data));
- uart->baud = plat_data[0];
+ if (!plat_data)
+ return -ENODEV;
+
+ uart->is_emulated = !!plat_data[0]; /* workaround ISS bug */
+
+ if (is_early_platform_device(pdev)) {
+ uart->port.uartclk = plat_data[1];
+ uart->baud = plat_data[2];
+ } else {
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
+ dev_err(&pdev->dev, "clock-frequency property NOTset\n");
+ return -EINVAL;
+ }
+ uart->port.uartclk = val;
+
+ if (of_property_read_u32(np, "current-speed", &val)) {
+ dev_err(&pdev->dev, "current-speed property NOT set\n");
+ return -EINVAL;
+ }
+ uart->baud = val;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -557,10 +572,9 @@ arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
uart->port.dev = &pdev->dev;
uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF;
- uart->port.line = pdev->id;
+ uart->port.line = dev_id;
uart->port.ops = &arc_serial_pops;
- uart->port.uartclk = plat_data[1];
uart->port.fifosize = ARC_UART_TX_FIFO_SIZE;
/*
@@ -569,9 +583,6 @@ arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
*/
uart->port.ignore_status_mask = 0;
- /* Real Hardware vs. emulated to work around a bug */
- uart->is_emulated = !!plat_data[2];
-
return 0;
}
@@ -648,45 +659,50 @@ static __init void early_serial_write(struct console *con, const char *s,
}
}
-static struct __initdata console arc_early_serial_console = {
+static struct console arc_early_serial_console __initdata = {
.name = "early_ARCuart",
.write = early_serial_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
-static int arc_serial_probe_earlyprintk(struct platform_device *pdev)
+static int __init arc_serial_probe_earlyprintk(struct platform_device *pdev)
{
- arc_early_serial_console.index = pdev->id;
+ int dev_id = pdev->id < 0 ? 0 : pdev->id;
+ int rc;
+
+ arc_early_serial_console.index = dev_id;
- arc_uart_init_one(pdev, &arc_uart_ports[pdev->id]);
+ rc = arc_uart_init_one(pdev, dev_id);
+ if (rc)
+ panic("early console init failed\n");
arc_serial_console_setup(&arc_early_serial_console, NULL);
register_console(&arc_early_serial_console);
return 0;
}
-#else
-static int arc_serial_probe_earlyprintk(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
static int arc_serial_probe(struct platform_device *pdev)
{
- struct arc_uart_port *uart;
- int rc;
+ int rc, dev_id;
+ struct device_node *np = pdev->dev.of_node;
+
+ /* no device tree device */
+ if (!np)
+ return -ENODEV;
- if (is_early_platform_device(pdev))
- return arc_serial_probe_earlyprintk(pdev);
+ dev_id = of_alias_get_id(np, "serial");
+ if (dev_id < 0)
+ dev_id = 0;
- uart = &arc_uart_ports[pdev->id];
- rc = arc_uart_init_one(pdev, uart);
+ rc = arc_uart_init_one(pdev, dev_id);
if (rc)
return rc;
- return uart_add_one_port(&arc_uart_driver, &uart->port);
+ rc = uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port);
+ return rc;
}
static int arc_serial_remove(struct platform_device *pdev)
@@ -695,16 +711,32 @@ static int arc_serial_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id arc_uart_dt_ids[] = {
+ { .compatible = "snps,arc-uart" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, arc_uart_dt_ids);
+
static struct platform_driver arc_platform_driver = {
.probe = arc_serial_probe,
.remove = arc_serial_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = arc_uart_dt_ids,
},
};
#ifdef CONFIG_SERIAL_ARC_CONSOLE
+
+static struct platform_driver early_arc_platform_driver __initdata = {
+ .probe = arc_serial_probe_earlyprintk,
+ .remove = arc_serial_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
/*
* Register an early platform driver of "earlyprintk" class.
* ARCH platform code installs the driver and probes the early devices
@@ -712,7 +744,7 @@ static struct platform_driver arc_platform_driver = {
* or it could be done independently, for all "earlyprintk" class drivers.
* [see arch/arc/plat-arcfpga/platform.c]
*/
-early_platform_init("earlyprintk", &arc_platform_driver);
+early_platform_init("earlyprintk", &early_arc_platform_driver);
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 922e85aeb63a..d4a7c241b751 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -774,14 +774,14 @@ static void atmel_rx_from_ring(struct uart_port *port)
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
@@ -820,7 +820,8 @@ static void atmel_rx_from_dma(struct uart_port *port)
*/
count = head - tail;
- tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count);
+ tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
+ count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
@@ -848,7 +849,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index c76a226080f2..719594e5fc21 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -235,14 +235,13 @@ static const char *bcm_uart_type(struct uart_port *port)
*/
static void bcm_uart_do_rx(struct uart_port *port)
{
- struct tty_struct *tty;
+ struct tty_port *port = &port->state->port;
unsigned int max_count;
/* limit number of char read in interrupt, should not be
* higher than fifo size anyway since we're much faster than
* serial port */
max_count = 32;
- tty = port->state->port.tty;
do {
unsigned int iestat, c, cstat;
char flag;
@@ -261,7 +260,7 @@ static void bcm_uart_do_rx(struct uart_port *port)
bcm_uart_writel(port, val, UART_CTL_REG);
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
@@ -300,11 +299,11 @@ static void bcm_uart_do_rx(struct uart_port *port)
if ((cstat & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(port, c, flag);
} while (--max_count);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
/*
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index f5d117379b60..487c173b0f72 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -149,7 +149,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
{
struct sport_uart_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned int ch;
spin_lock(&up->port.lock);
@@ -159,9 +159,10 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
up->port.icount.rx++;
if (!uart_handle_sysrq_char(&up->port, ch))
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
}
- tty_flip_buffer_push(tty);
+ /* XXX this won't deadlock with lowlat? */
+ tty_flip_buffer_push(port);
spin_unlock(&up->port.lock);
@@ -182,7 +183,6 @@ static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
{
struct sport_uart_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int stat = SPORT_GET_STAT(up);
spin_lock(&up->port.lock);
@@ -190,7 +190,7 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
/* Overflow in RX FIFO */
if (stat & ROVF) {
up->port.icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&up->port.state->port, 0, TTY_OVERRUN);
SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */
}
/* These should not happen */
@@ -205,6 +205,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
SSYNC();
spin_unlock(&up->port.lock);
+ /* XXX we don't push the overrun bit to TTY? */
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 2e2b2c1cb722..12dceda9db33 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -223,7 +223,6 @@ static void bfin_serial_enable_ms(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_PIO
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
- struct tty_struct *tty = NULL;
unsigned int status, ch, flg;
static struct timeval anomaly_start = { .tv_sec = 0 };
@@ -242,11 +241,9 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
return;
}
- if (!uart->port.state || !uart->port.state->port.tty)
+ if (!uart->port.state)
return;
#endif
- tty = uart->port.state->port.tty;
-
if (ANOMALY_05000363) {
/* The BF533 (and BF561) family of processors have a nice anomaly
* where they continuously generate characters for a "single" break.
@@ -325,7 +322,7 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
uart_insert_char(&uart->port, status, OE, ch, flg);
ignore_char:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
@@ -426,7 +423,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
- struct tty_struct *tty = uart->port.state->port.tty;
int i, flg, status;
status = UART_GET_LSR(uart);
@@ -471,7 +467,7 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
}
dma_ignore_char:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 3fd2526d121e..bfb17968c8db 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -85,12 +85,8 @@ static void uart_clps711x_enable_ms(struct uart_port *port)
static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
unsigned int status, ch, flg;
- if (!tty)
- return IRQ_HANDLED;
-
for (;;) {
status = clps_readl(SYSFLG(port));
if (status & SYSFLG_URXFE)
@@ -130,9 +126,7 @@ static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
uart_insert_char(port, status, UARTDR_OVERR, ch, flg);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index ad0caf176808..97f4e1858649 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -245,7 +245,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
int i;
unsigned char ch;
u8 *cp;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
cbd_t __iomem *bdp;
u16 status;
@@ -276,7 +276,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
/* If we have not enough room in tty flip buffer, then we try
* later, which will be the next rx-interrupt or a timeout
*/
- if(tty_buffer_request_room(tty, i) < i) {
+ if (tty_buffer_request_room(tport, i) < i) {
printk(KERN_WARNING "No room in flip buffer\n");
return;
}
@@ -302,7 +302,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
}
#endif
error_return:
- tty_insert_flip_char(tty, ch, flg);
+ tty_insert_flip_char(tport, ch, flg);
} /* End while (i--) */
@@ -322,7 +322,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
pinfo->rx_cur = bdp;
/* activate BH processing */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return;
@@ -507,7 +507,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
if (baud < HW_BUF_SPD_THRESHOLD ||
- (pinfo->port.state && pinfo->port.state->port.tty->low_latency))
+ (pinfo->port.state && pinfo->port.state->port.low_latency))
pinfo->rx_fifosize = 1;
else
pinfo->rx_fifosize = RX_BUF_SIZE;
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 35ee6a2c6877..5f37c31e32bc 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -1760,8 +1760,7 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
info->icount.rx++;
} else {
- struct tty_struct *tty = info->port.tty;
- tty_insert_flip_char(tty, data, flag);
+ tty_insert_flip_char(&info->port, data, flag);
info->icount.rx++;
}
@@ -2105,22 +2104,15 @@ static int force_eop_if_needed(struct e100_serial *info)
static void flush_to_flip_buffer(struct e100_serial *info)
{
- struct tty_struct *tty;
struct etrax_recv_buffer *buffer;
unsigned long flags;
local_irq_save(flags);
- tty = info->port.tty;
-
- if (!tty) {
- local_irq_restore(flags);
- return;
- }
while ((buffer = info->first_recv_buffer) != NULL) {
unsigned int count = buffer->length;
- tty_insert_flip_string(tty, buffer->buffer, count);
+ tty_insert_flip_string(&info->port, buffer->buffer, count);
info->recv_cnt -= count;
if (count == buffer->length) {
@@ -2139,7 +2131,7 @@ static void flush_to_flip_buffer(struct e100_serial *info)
local_irq_restore(flags);
/* This includes a check for low-latency */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
static void check_flush_timeout(struct e100_serial *info)
@@ -2275,12 +2267,6 @@ static
struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
{
unsigned long data_read;
- struct tty_struct *tty = info->port.tty;
-
- if (!tty) {
- printk("!NO TTY!\n");
- return info;
- }
/* Read data and status at the same time */
data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
@@ -2338,8 +2324,7 @@ more_data:
data_in, data_read);
char flag = TTY_NORMAL;
if (info->errorcode == ERRCODE_INSERT_BREAK) {
- struct tty_struct *tty = info->port.tty;
- tty_insert_flip_char(tty, 0, flag);
+ tty_insert_flip_char(&info->port, 0, flag);
info->icount.rx++;
}
@@ -2353,7 +2338,7 @@ more_data:
info->icount.frame++;
flag = TTY_FRAME;
}
- tty_insert_flip_char(tty, data, flag);
+ tty_insert_flip_char(&info->port, data, flag);
info->errorcode = 0;
}
info->break_detected_cnt = 0;
@@ -2369,7 +2354,7 @@ more_data:
log_int(rdpc(), 0, 0);
}
);
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&info->port,
IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
TTY_NORMAL);
} else {
@@ -2384,7 +2369,7 @@ more_data:
goto more_data;
}
- tty_flip_buffer_push(info->port.tty);
+ tty_flip_buffer_push(&info->port);
return info;
}
@@ -3137,7 +3122,7 @@ static int rs_raw_write(struct tty_struct *tty,
/* first some sanity checks */
- if (!tty || !info->xmit.buf)
+ if (!info->xmit.buf)
return 0;
#ifdef SERIAL_DEBUG_DATA
@@ -3464,7 +3449,7 @@ set_serial_info(struct e100_serial *info,
info->type = new_serial.type;
info->close_delay = new_serial.close_delay;
info->closing_wait = new_serial.closing_wait;
- info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (info->flags & ASYNC_INITIALIZED) {
@@ -4108,7 +4093,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
tty->driver_data = info;
info->port.tty = tty;
- tty->low_latency = !!(info->flags & ASYNC_LOW_LATENCY);
+ info->port.low_latency = !!(info->flags & ASYNC_LOW_LATENCY);
/*
* If the port is in the middle of closing, bail out now
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 6491b8644a7f..2f2b2e538a54 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -187,7 +187,6 @@ static inline void dz_receive_chars(struct dz_mux *mux)
{
struct uart_port *uport;
struct dz_port *dport = &mux->dport[0];
- struct tty_struct *tty = NULL;
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
unsigned char ch, flag;
@@ -197,7 +196,6 @@ static inline void dz_receive_chars(struct dz_mux *mux)
while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
dport = &mux->dport[LINE(status)];
uport = &dport->port;
- tty = uport->state->port.tty; /* point to the proper dev */
ch = UCHAR(status); /* grab the char */
flag = TTY_NORMAL;
@@ -249,7 +247,7 @@ static inline void dz_receive_chars(struct dz_mux *mux)
}
for (i = 0; i < DZ_NB_PORT; i++)
if (lines_rx[i])
- tty_flip_buffer_push(mux->dport[i].port.state->port.tty);
+ tty_flip_buffer_push(&mux->dport[i].port.state->port);
}
/*
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index a8cbb2670521..7d199c8e1a75 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -81,6 +81,7 @@ struct efm32_uart_port {
struct uart_port port;
unsigned int txirq;
struct clk *clk;
+ struct efm32_uart_pdata pdata;
};
#define to_efm_port(_port) container_of(_port, struct efm32_uart_port, port)
#define efm_debug(efm_port, format, arg...) \
@@ -194,8 +195,7 @@ static void efm32_uart_break_ctl(struct uart_port *port, int ctl)
/* not possible without fiddling with gpios */
}
-static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port,
- struct tty_struct *tty)
+static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port)
{
struct uart_port *port = &efm_port->port;
@@ -237,8 +237,8 @@ static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port,
rxdata & UARTn_RXDATAX_RXDATA__MASK))
continue;
- if (tty && (rxdata & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty,
+ if ((rxdata & port->ignore_status_mask) == 0)
+ tty_insert_flip_char(&port->state->port,
rxdata & UARTn_RXDATAX_RXDATA__MASK, flag);
}
}
@@ -249,15 +249,13 @@ static irqreturn_t efm32_uart_rxirq(int irq, void *data)
u32 irqflag = efm32_uart_read32(efm_port, UARTn_IF);
int handled = IRQ_NONE;
struct uart_port *port = &efm_port->port;
- struct tty_struct *tty;
+ struct tty_port *tport = &port->state->port;
spin_lock(&port->lock);
- tty = tty_kref_get(port->state->port.tty);
-
if (irqflag & UARTn_IF_RXDATAV) {
efm32_uart_write32(efm_port, UARTn_IF_RXDATAV, UARTn_IFC);
- efm32_uart_rx_chars(efm_port, tty);
+ efm32_uart_rx_chars(efm_port);
handled = IRQ_HANDLED;
}
@@ -265,16 +263,12 @@ static irqreturn_t efm32_uart_rxirq(int irq, void *data)
if (irqflag & UARTn_IF_RXOF) {
efm32_uart_write32(efm_port, UARTn_IF_RXOF, UARTn_IFC);
port->icount.overrun++;
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
handled = IRQ_HANDLED;
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(tport);
spin_unlock(&port->lock);
@@ -300,13 +294,8 @@ static irqreturn_t efm32_uart_txirq(int irq, void *data)
static int efm32_uart_startup(struct uart_port *port)
{
struct efm32_uart_port *efm_port = to_efm_port(port);
- u32 location = 0;
- struct efm32_uart_pdata *pdata = dev_get_platdata(port->dev);
int ret;
- if (pdata)
- location = UARTn_ROUTE_LOCATION(pdata->location);
-
ret = clk_enable(efm_port->clk);
if (ret) {
efm_debug(efm_port, "failed to enable clk\n");
@@ -315,7 +304,9 @@ static int efm32_uart_startup(struct uart_port *port)
port->uartclk = clk_get_rate(efm_port->clk);
/* Enable pins at configured location */
- efm32_uart_write32(efm_port, location | UARTn_ROUTE_RXPEN | UARTn_ROUTE_TXPEN,
+ efm32_uart_write32(efm_port,
+ UARTn_ROUTE_LOCATION(efm_port->pdata.location) |
+ UARTn_ROUTE_RXPEN | UARTn_ROUTE_TXPEN,
UARTn_ROUTE);
ret = request_irq(port->irq, efm32_uart_rxirq, 0,
@@ -674,11 +665,24 @@ static int efm32_uart_probe_dt(struct platform_device *pdev,
struct efm32_uart_port *efm_port)
{
struct device_node *np = pdev->dev.of_node;
+ u32 location;
int ret;
if (!np)
return 1;
+ ret = of_property_read_u32(np, "location", &location);
+ if (!ret) {
+ if (location > 5) {
+ dev_err(&pdev->dev, "invalid location\n");
+ return -EINVAL;
+ }
+ efm_debug(efm_port, "using location %u\n", location);
+ efm_port->pdata.location = location;
+ } else {
+ efm_debug(efm_port, "fall back to location 0\n");
+ }
+
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
@@ -738,10 +742,16 @@ static int efm32_uart_probe(struct platform_device *pdev)
efm_port->port.flags = UPF_BOOT_AUTOCONF;
ret = efm32_uart_probe_dt(pdev, efm_port);
- if (ret > 0)
+ if (ret > 0) {
/* not created by device tree */
+ const struct efm32_uart_pdata *pdata = dev_get_platdata(&pdev->dev);
+
efm_port->port.line = pdev->id;
+ if (pdata)
+ efm_port->pdata = *pdata;
+ }
+
if (efm_port->port.line >= 0 &&
efm_port->port.line < ARRAY_SIZE(efm32_uart_ports))
efm32_uart_ports[efm_port->port.line] = efm_port;
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 72b6334bcf1a..bc9e6b017b05 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -734,7 +734,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
short int count, rcv_buff;
- struct tty_struct *tty = icom_port->uart_port.state->port.tty;
+ struct tty_port *port = &icom_port->uart_port.state->port;
unsigned short int status;
struct uart_icount *icount;
unsigned long offset;
@@ -761,7 +761,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
/* Block copy all but the last byte as this may have status */
if (count > 0) {
first = icom_port->recv_buf[offset];
- tty_insert_flip_string(tty, icom_port->recv_buf + offset, count - 1);
+ tty_insert_flip_string(port, icom_port->recv_buf + offset, count - 1);
}
icount = &icom_port->uart_port.icount;
@@ -812,7 +812,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
}
- tty_insert_flip_char(tty, *(icom_port->recv_buf + offset + count - 1), flag);
+ tty_insert_flip_char(port, *(icom_port->recv_buf + offset + count - 1), flag);
if (status & SA_FLAGS_OVERRUN)
/*
@@ -820,7 +820,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
* reported immediately, and doesn't
* affect the current character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
@@ -834,7 +834,7 @@ ignore_char:
status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void process_interrupt(u16 port_int_reg,
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 675d94ab0aff..68d7ce997ede 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -481,7 +481,6 @@ static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
unsigned char *tx_buffer;
tx_buffer = ifx_dev->tx_buffer;
- memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
/* make room for required SPI header */
tx_buffer += IFX_SPI_HEADER_OVERHEAD;
@@ -615,7 +614,7 @@ static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
tty->driver_data = ifx_dev;
/* allows flip string push from int context */
- tty->low_latency = 1;
+ port->low_latency = 1;
/* set flag to allows data transfer */
set_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
@@ -637,6 +636,7 @@ static void ifx_port_shutdown(struct tty_port *port)
clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
mrdy_set_low(ifx_dev);
+ del_timer(&ifx_dev->spi_timer);
clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
tasklet_kill(&ifx_dev->io_work_tasklet);
}
@@ -669,12 +669,8 @@ static const struct tty_operations ifx_spi_serial_ops = {
static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
unsigned char *chars, size_t size)
{
- struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port);
- if (!tty)
- return;
- tty_insert_flip_string(tty, chars, size);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&ifx_dev->tty_port, chars, size);
+ tty_flip_buffer_push(&ifx_dev->tty_port);
}
/**
@@ -810,7 +806,8 @@ static void ifx_spi_io(unsigned long data)
ifx_dev->spi_xfer.cs_change = 0;
ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
/* ifx_dev->spi_xfer.speed_hz = 390625; */
- ifx_dev->spi_xfer.bits_per_word = spi_bpw;
+ ifx_dev->spi_xfer.bits_per_word =
+ ifx_dev->spi_dev->bits_per_word;
ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 59819121fe9b..147c9e193595 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -48,8 +48,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <linux/platform_data/serial-imx.h>
@@ -73,102 +73,102 @@
#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
-#define URXD_CHARRDY (1<<15)
-#define URXD_ERR (1<<14)
-#define URXD_OVRRUN (1<<13)
-#define URXD_FRMERR (1<<12)
-#define URXD_BRK (1<<11)
-#define URXD_PRERR (1<<10)
-#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
-#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
-#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
-#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
-#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
-#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
-#define UCR1_IREN (1<<7) /* Infrared interface enable */
-#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
-#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
-#define UCR1_SNDBRK (1<<4) /* Send break */
-#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
-#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
-#define UCR1_DOZE (1<<1) /* Doze */
-#define UCR1_UARTEN (1<<0) /* UART enabled */
-#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
-#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
-#define UCR2_CTSC (1<<13) /* CTS pin control */
-#define UCR2_CTS (1<<12) /* Clear to send */
-#define UCR2_ESCEN (1<<11) /* Escape enable */
-#define UCR2_PREN (1<<8) /* Parity enable */
-#define UCR2_PROE (1<<7) /* Parity odd/even */
-#define UCR2_STPB (1<<6) /* Stop */
-#define UCR2_WS (1<<5) /* Word size */
-#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
-#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
-#define UCR2_TXEN (1<<2) /* Transmitter enabled */
-#define UCR2_RXEN (1<<1) /* Receiver enabled */
-#define UCR2_SRST (1<<0) /* SW reset */
-#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
-#define UCR3_PARERREN (1<<12) /* Parity enable */
-#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
-#define UCR3_DSR (1<<10) /* Data set ready */
-#define UCR3_DCD (1<<9) /* Data carrier detect */
-#define UCR3_RI (1<<8) /* Ring indicator */
-#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
-#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
-#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
-#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
-#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
-#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
-#define UCR3_BPEN (1<<0) /* Preset registers enable */
-#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
-#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
-#define UCR4_INVR (1<<9) /* Inverted infrared reception */
-#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
-#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
-#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
-#define UCR4_IRSC (1<<5) /* IR special case */
-#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
-#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
-#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
-#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
-#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
-#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
-#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
-#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
-#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
-#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
-#define USR1_RTSS (1<<14) /* RTS pin status */
-#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
-#define USR1_RTSD (1<<12) /* RTS delta */
-#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
-#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
-#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
-#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
-#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
-#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
-#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
-#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
-#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
-#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
-#define USR2_IDLE (1<<12) /* Idle condition */
-#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
-#define USR2_WAKE (1<<7) /* Wake */
-#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
-#define USR2_TXDC (1<<3) /* Transmitter complete */
-#define USR2_BRCD (1<<2) /* Break condition */
-#define USR2_ORE (1<<1) /* Overrun error */
-#define USR2_RDR (1<<0) /* Recv data ready */
-#define UTS_FRCPERR (1<<13) /* Force parity error */
-#define UTS_LOOP (1<<12) /* Loop tx and rx */
-#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
-#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
-#define UTS_TXFULL (1<<4) /* TxFIFO full */
-#define UTS_RXFULL (1<<3) /* RxFIFO full */
-#define UTS_SOFTRST (1<<0) /* Software reset */
+#define URXD_CHARRDY (1<<15)
+#define URXD_ERR (1<<14)
+#define URXD_OVRRUN (1<<13)
+#define URXD_FRMERR (1<<12)
+#define URXD_BRK (1<<11)
+#define URXD_PRERR (1<<10)
+#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
+#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
+#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
+#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
+#define UCR1_IREN (1<<7) /* Infrared interface enable */
+#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
+#define UCR1_SNDBRK (1<<4) /* Send break */
+#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
+#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
+#define UCR1_DOZE (1<<1) /* Doze */
+#define UCR1_UARTEN (1<<0) /* UART enabled */
+#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC (1<<13) /* CTS pin control */
+#define UCR2_CTS (1<<12) /* Clear to send */
+#define UCR2_ESCEN (1<<11) /* Escape enable */
+#define UCR2_PREN (1<<8) /* Parity enable */
+#define UCR2_PROE (1<<7) /* Parity odd/even */
+#define UCR2_STPB (1<<6) /* Stop */
+#define UCR2_WS (1<<5) /* Word size */
+#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
+#define UCR2_TXEN (1<<2) /* Transmitter enabled */
+#define UCR2_RXEN (1<<1) /* Receiver enabled */
+#define UCR2_SRST (1<<0) /* SW reset */
+#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN (1<<12) /* Parity enable */
+#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR (1<<10) /* Data set ready */
+#define UCR3_DCD (1<<9) /* Data carrier detect */
+#define UCR3_RI (1<<8) /* Ring indicator */
+#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
+#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
+#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
+#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
+#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
+#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
+#define UCR3_BPEN (1<<0) /* Preset registers enable */
+#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
+#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
+#define UCR4_INVR (1<<9) /* Inverted infrared reception */
+#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
+#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
+#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
+#define UCR4_IRSC (1<<5) /* IR special case */
+#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
+#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
+#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
+#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
+#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
+#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS (1<<14) /* RTS pin status */
+#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD (1<<12) /* RTS delta */
+#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
+#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
+#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
+#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
+#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
+#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE (1<<12) /* Idle condition */
+#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
+#define USR2_WAKE (1<<7) /* Wake */
+#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
+#define USR2_TXDC (1<<3) /* Transmitter complete */
+#define USR2_BRCD (1<<2) /* Break condition */
+#define USR2_ORE (1<<1) /* Overrun error */
+#define USR2_RDR (1<<0) /* Recv data ready */
+#define UTS_FRCPERR (1<<13) /* Force parity error */
+#define UTS_LOOP (1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
+#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
+#define UTS_TXFULL (1<<4) /* TxFIFO full */
+#define UTS_RXFULL (1<<3) /* RxFIFO full */
+#define UTS_SOFTRST (1<<0) /* Software reset */
/* We've been assigned a range on the "Low-density serial ports" major */
-#define SERIAL_IMX_MAJOR 207
-#define MINOR_START 16
+#define SERIAL_IMX_MAJOR 207
+#define MINOR_START 16
#define DEV_NAME "ttymxc"
/*
@@ -199,7 +199,7 @@ struct imx_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
- int txirq,rxirq,rtsirq;
+ int txirq, rxirq, rtsirq;
unsigned int have_rtscts:1;
unsigned int use_irda:1;
unsigned int irda_inv_rx:1;
@@ -397,7 +397,7 @@ static void imx_stop_rx(struct uart_port *port)
unsigned long temp;
temp = readl(sport->port.membase + UCR2);
- writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2);
+ writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
}
/*
@@ -490,9 +490,8 @@ static irqreturn_t imx_txint(int irq, void *dev_id)
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock,flags);
- if (sport->port.x_char)
- {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->port.x_char) {
/* Send next char */
writel(sport->port.x_char, sport->port.membase + URTX0);
goto out;
@@ -509,18 +508,18 @@ static irqreturn_t imx_txint(int irq, void *dev_id)
uart_write_wakeup(&sport->port);
out:
- spin_unlock_irqrestore(&sport->port.lock,flags);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t imx_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int rx,flg,ignored = 0;
- struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned int rx, flg, ignored = 0;
+ struct tty_port *port = &sport->port.state->port;
unsigned long flags, temp;
- spin_lock_irqsave(&sport->port.lock,flags);
+ spin_lock_irqsave(&sport->port.lock, flags);
while (readl(sport->port.membase + USR2) & USR2_RDR) {
flg = TTY_NORMAL;
@@ -570,12 +569,12 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
#endif
}
- tty_insert_flip_char(tty, rx, flg);
+ tty_insert_flip_char(port, rx, flg);
}
out:
- spin_unlock_irqrestore(&sport->port.lock,flags);
- tty_flip_buffer_push(tty);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
@@ -654,7 +653,7 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
- if ( break_state != 0 )
+ if (break_state != 0)
temp |= UCR1_SNDBRK;
writel(temp, sport->port.membase + UCR1);
@@ -696,8 +695,8 @@ static int imx_startup(struct uart_port *port)
temp |= UCR4_IRSC;
/* set the trigger level for CTS */
- temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
- temp |= CTSTL<< UCR4_CTSTL_SHF;
+ temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
+ temp |= CTSTL << UCR4_CTSTL_SHF;
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
@@ -799,7 +798,7 @@ static int imx_startup(struct uart_port *port)
* Enable modem status interrupts
*/
imx_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock,flags);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
@@ -909,7 +908,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 = UCR2_SRST | UCR2_IRTS;
if (termios->c_cflag & CRTSCTS) {
- if( sport->have_rtscts ) {
+ if (sport->have_rtscts) {
ucr2 &= ~UCR2_IRTS;
ucr2 |= UCR2_CTSC;
} else {
@@ -969,12 +968,12 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
sport->port.membase + UCR1);
- while ( !(readl(sport->port.membase + USR2) & USR2_TXDC))
+ while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
barrier();
/* then, disable everything */
old_txrxen = readl(sport->port.membase + UCR2);
- writel(old_txrxen & ~( UCR2_TXEN | UCR2_RXEN),
+ writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
@@ -1212,9 +1211,15 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
struct imx_port *sport = imx_ports[co->index];
struct imx_port_ucrs old_ucr;
unsigned int ucr1;
- unsigned long flags;
+ unsigned long flags = 0;
+ int locked = 1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ else
+ spin_lock_irqsave(&sport->port.lock, flags);
/*
* First, save UCR1/2/3 and then disable interrupts
@@ -1241,7 +1246,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
imx_port_ucrs_restore(&sport->port, &old_ucr);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ if (locked)
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
@@ -1255,7 +1261,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
/* ok, the port was enabled */
- unsigned int ucr2, ubir,ubmr, uartclk;
+ unsigned int ucr2, ubir, ubmr, uartclk;
unsigned int baud_raw;
unsigned int ucfr_rfdiv;
@@ -1301,8 +1307,8 @@ imx_console_get_options(struct imx_port *sport, int *baud,
*baud = (baud_raw + 50) / 100 * 100;
}
- if(*baud != baud_raw)
- printk(KERN_INFO "Serial: Console IMX rounded baud rate from %d to %d\n",
+ if (*baud != baud_raw)
+ pr_info("Console IMX rounded baud rate from %d to %d\n",
baud_raw, *baud);
}
}
@@ -1324,7 +1330,7 @@ imx_console_setup(struct console *co, char *options)
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
co->index = 0;
sport = imx_ports[co->index];
- if(sport == NULL)
+ if (sport == NULL)
return -ENODEV;
if (options)
@@ -1462,7 +1468,7 @@ static int serial_imx_probe(struct platform_device *pdev)
struct resource *res;
struct pinctrl *pinctrl;
- sport = kzalloc(sizeof(*sport), GFP_KERNEL);
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
@@ -1470,19 +1476,15 @@ static int serial_imx_probe(struct platform_device *pdev)
if (ret > 0)
serial_imx_probe_pdata(sport, pdev);
else if (ret < 0)
- goto free;
+ return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto free;
- }
+ if (!res)
+ return -ENODEV;
- base = ioremap(res->start, PAGE_SIZE);
- if (!base) {
- ret = -ENOMEM;
- goto free;
- }
+ base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!base)
+ return -ENOMEM;
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
@@ -1504,21 +1506,21 @@ static int serial_imx_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
dev_err(&pdev->dev, "failed to get default pinctrl: %d\n", ret);
- goto unmap;
+ return ret;
}
sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->clk_ipg)) {
ret = PTR_ERR(sport->clk_ipg);
dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
- goto unmap;
+ return ret;
}
sport->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(sport->clk_per)) {
ret = PTR_ERR(sport->clk_per);
dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
- goto unmap;
+ return ret;
}
clk_prepare_enable(sport->clk_per);
@@ -1547,11 +1549,6 @@ deinit:
clkput:
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
-unmap:
- iounmap(sport->port.membase);
-free:
- kfree(sport);
-
return ret;
}
@@ -1572,9 +1569,6 @@ static int serial_imx_remove(struct platform_device *pdev)
if (pdata && pdata->exit)
pdata->exit(pdev);
- iounmap(sport->port.membase);
- kfree(sport);
-
return 0;
}
@@ -1596,7 +1590,7 @@ static int __init imx_serial_init(void)
{
int ret;
- printk(KERN_INFO "Serial: IMX driver\n");
+ pr_info("Serial: IMX driver\n");
ret = uart_register_driver(&imx_reg);
if (ret)
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index d8f1d1d54471..6e4c715c5d26 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -1000,7 +1000,7 @@ ioc3_change_speed(struct uart_port *the_port,
the_port->ignore_status_mask = N_ALL_INPUT;
- state->port.tty->low_latency = 1;
+ state->port.low_latency = 1;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
@@ -1393,7 +1393,6 @@ static inline int do_read(struct uart_port *the_port, char *buf, int len)
*/
static int receive_chars(struct uart_port *the_port)
{
- struct tty_struct *tty;
unsigned char ch[MAX_CHARS];
int read_count = 0, read_room, flip = 0;
struct uart_state *state = the_port->state;
@@ -1403,25 +1402,23 @@ static int receive_chars(struct uart_port *the_port)
/* Make sure all the pointers are "good" ones */
if (!state)
return 0;
- if (!state->port.tty)
- return 0;
if (!(port->ip_flags & INPUT_ENABLE))
return 0;
spin_lock_irqsave(&the_port->lock, pflags);
- tty = state->port.tty;
read_count = do_read(the_port, ch, MAX_CHARS);
if (read_count > 0) {
flip = 1;
- read_room = tty_insert_flip_string(tty, ch, read_count);
+ read_room = tty_insert_flip_string(&state->port, ch,
+ read_count);
the_port->icount.rx += read_count;
}
spin_unlock_irqrestore(&the_port->lock, pflags);
if (flip)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&state->port);
return read_count;
}
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index 3e7da10cebba..e2520abcb1c4 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1740,7 +1740,7 @@ ioc4_change_speed(struct uart_port *the_port,
the_port->ignore_status_mask = N_ALL_INPUT;
- state->port.tty->low_latency = 1;
+ state->port.low_latency = 1;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
@@ -2340,7 +2340,6 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf,
*/
static void receive_chars(struct uart_port *the_port)
{
- struct tty_struct *tty;
unsigned char ch[IOC4_MAX_CHARS];
int read_count, request_count = IOC4_MAX_CHARS;
struct uart_icount *icount;
@@ -2350,26 +2349,23 @@ static void receive_chars(struct uart_port *the_port)
/* Make sure all the pointers are "good" ones */
if (!state)
return;
- if (!state->port.tty)
- return;
spin_lock_irqsave(&the_port->lock, pflags);
- tty = state->port.tty;
- request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS);
+ request_count = tty_buffer_request_room(&state->port, IOC4_MAX_CHARS);
if (request_count > 0) {
icount = &the_port->icount;
read_count = do_read(the_port, ch, request_count);
if (read_count > 0) {
- tty_insert_flip_string(tty, ch, read_count);
+ tty_insert_flip_string(&state->port, ch, read_count);
icount->rx += read_count;
}
}
spin_unlock_irqrestore(&the_port->lock, pflags);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&state->port);
}
/**
@@ -2883,6 +2879,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
/* error exits that give back resources */
out5:
ioc4_serial_remove_one(idd);
+ return ret;
out4:
kfree(soft);
out3:
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 7b1cda59ebb5..cb3c81eb0996 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -248,17 +248,12 @@ static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up,
#define Rx_BRK 0x0100 /* BREAK event software flag. */
#define Rx_SYS 0x0200 /* SysRq event software flag. */
-static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
+static bool ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
- struct tty_struct *tty;
unsigned char ch, flag;
unsigned int r1;
-
- tty = NULL;
- if (up->port.state != NULL &&
- up->port.state->port.tty != NULL)
- tty = up->port.state->port.tty;
+ bool push = up->port.state != NULL;
for (;;) {
ch = readb(&channel->control);
@@ -312,10 +307,10 @@ static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up
if (uart_handle_sysrq_char(&up->port, ch))
continue;
- if (tty)
+ if (push)
uart_insert_char(&up->port, r1, Rx_OVR, ch, flag);
}
- return tty;
+ return push;
}
static void ip22zilog_status_handle(struct uart_ip22zilog_port *up,
@@ -438,21 +433,20 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
while (up) {
struct zilog_channel *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
- struct tty_struct *tty;
unsigned char r3;
+ bool push = false;
spin_lock(&up->port.lock);
r3 = read_zsreg(channel, R3);
/* Channel A */
- tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHARxIP)
- tty = ip22zilog_receive_chars(up, channel);
+ push = ip22zilog_receive_chars(up, channel);
if (r3 & CHAEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHATxIP)
@@ -460,22 +454,22 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
/* Channel B */
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ push = false;
spin_lock(&up->port.lock);
- tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHBRxIP)
- tty = ip22zilog_receive_chars(up, channel);
+ push = ip22zilog_receive_chars(up, channel);
if (r3 & CHBEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHBTxIP)
@@ -483,8 +477,8 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
up = up->next;
}
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 4c00c5550b1a..00f250ae14c5 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -521,6 +521,7 @@ void jsm_input(struct jsm_channel *ch)
{
struct jsm_board *bd;
struct tty_struct *tp;
+ struct tty_port *port;
u32 rmask;
u16 head;
u16 tail;
@@ -536,7 +537,8 @@ void jsm_input(struct jsm_channel *ch)
if (!ch)
return;
- tp = ch->uart_port.state->port.tty;
+ port = &ch->uart_port.state->port;
+ tp = port->tty;
bd = ch->ch_bd;
if(!bd)
@@ -600,7 +602,7 @@ void jsm_input(struct jsm_channel *ch)
return;
}
- len = tty_buffer_request_room(tp, data_len);
+ len = tty_buffer_request_room(port, data_len);
n = len;
/*
@@ -629,16 +631,16 @@ void jsm_input(struct jsm_channel *ch)
* format it likes.
*/
if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK);
else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY);
else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME);
else
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
}
} else {
- tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
+ tty_insert_flip_string(port, ch->ch_rqueue + tail, s);
}
tail += s;
n -= s;
@@ -652,7 +654,7 @@ void jsm_input(struct jsm_channel *ch)
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
/* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp);
+ tty_flip_buffer_push(port);
jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
}
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 6ac2b797a764..5dafcf1c227b 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -23,6 +23,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
@@ -202,7 +203,6 @@ bool kgdb_nmi_poll_knock(void)
static void kgdb_nmi_tty_receiver(unsigned long data)
{
struct kgdb_nmi_tty_priv *priv = (void *)data;
- struct tty_struct *tty;
char ch;
tasklet_schedule(&priv->tlet);
@@ -210,16 +210,9 @@ static void kgdb_nmi_tty_receiver(unsigned long data)
if (likely(!kgdb_nmi_tty_enabled || !kfifo_len(&priv->fifo)))
return;
- /* Port is there, but tty might be hung up, check. */
- tty = tty_port_tty_get(kgdb_nmi_port);
- if (!tty)
- return;
-
while (kfifo_out(&priv->fifo, &ch, 1))
- tty_insert_flip_char(priv->port.tty, ch, TTY_NORMAL);
- tty_flip_buffer_push(priv->port.tty);
-
- tty_kref_put(tty);
+ tty_insert_flip_char(&priv->port, ch, TTY_NORMAL);
+ tty_flip_buffer_push(&priv->port);
}
static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty)
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 02da071fe1e7..15733da757c6 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -162,21 +162,16 @@ lqasc_enable_ms(struct uart_port *port)
static int
lqasc_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ struct tty_port *tport = &port->state->port;
unsigned int ch = 0, rsr = 0, fifocnt;
- if (!tty) {
- dev_dbg(port->dev, "%s:tty is busy now", __func__);
- return -EBUSY;
- }
- fifocnt =
- ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
+ fifocnt = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
while (fifocnt--) {
u8 flag = TTY_NORMAL;
ch = ltq_r8(port->membase + LTQ_ASC_RBUF);
rsr = (ltq_r32(port->membase + LTQ_ASC_STATE)
& ASCSTATE_ANY) | UART_DUMMY_UER_RX;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
port->icount.rx++;
/*
@@ -208,7 +203,7 @@ lqasc_rx_chars(struct uart_port *port)
}
if ((rsr & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (rsr & ASCSTATE_ROE)
/*
@@ -216,11 +211,12 @@ lqasc_rx_chars(struct uart_port *port)
* immediately, and doesn't affect the current
* character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
+
if (ch != 0)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
+
return 0;
}
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 0e86bff3fe2a..dffea6b2cd7d 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -257,17 +257,8 @@ static void __serial_uart_flush(struct uart_port *port)
static void __serial_lpc32xx_rx(struct uart_port *port)
{
+ struct tty_port *tport = &port->state->port;
unsigned int tmp, flag;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
-
- if (!tty) {
- /* Discard data: no tty available */
- while (!(readl(LPC32XX_HSUART_FIFO(port->membase)) &
- LPC32XX_HSU_RX_EMPTY))
- ;
-
- return;
- }
/* Read data from FIFO and push into terminal */
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
@@ -281,15 +272,14 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
LPC32XX_HSUART_IIR(port->membase));
port->icount.frame++;
flag = TTY_FRAME;
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(tport, 0, TTY_FRAME);
}
- tty_insert_flip_char(tty, (tmp & 0xFF), flag);
+ tty_insert_flip_char(tport, (tmp & 0xFF), flag);
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
}
static void __serial_lpc32xx_tx(struct uart_port *port)
@@ -332,7 +322,7 @@ exit_tx:
static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ struct tty_port *tport = &port->state->port;
u32 status;
spin_lock(&port->lock);
@@ -356,17 +346,14 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
writel(LPC32XX_HSU_RX_OE_INT,
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
- if (tty) {
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ tty_schedule_flip(tport);
}
/* Data received? */
if (status & (LPC32XX_HSU_RX_TIMEOUT_INT | LPC32XX_HSU_RX_TRIG_INT)) {
__serial_lpc32xx_rx(port);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
}
/* Transmit data request? */
@@ -376,7 +363,6 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
}
spin_unlock(&port->lock);
- tty_kref_put(tty);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index b13949ad3408..bb1afa0922e1 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -300,7 +300,7 @@ static void m32r_sio_enable_ms(struct uart_port *port)
static void receive_chars(struct uart_sio_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned char ch;
unsigned char flag;
int max_count = 256;
@@ -355,7 +355,7 @@ static void receive_chars(struct uart_sio_port *up, int *status)
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
if ((*status & up->port.ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (*status & UART_LSR_OE) {
/*
@@ -363,12 +363,12 @@ static void receive_chars(struct uart_sio_port *up, int *status)
* immediately, and doesn't affect the current
* character.
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void transmit_chars(struct uart_sio_port *up)
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 7ce3197087bb..32517d4bceab 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -179,8 +179,7 @@ static void max3100_work(struct work_struct *w);
static void max3100_dowork(struct max3100_port *s)
{
- if (!s->force_end_work && !work_pending(&s->work) &&
- !freezing(current) && !s->suspending)
+ if (!s->force_end_work && !freezing(current) && !s->suspending)
queue_work(s->workqueue, &s->work);
}
@@ -311,8 +310,8 @@ static void max3100_work(struct work_struct *w)
}
}
- if (rxchars > 16 && s->port.state->port.tty != NULL) {
- tty_flip_buffer_push(s->port.state->port.tty);
+ if (rxchars > 16) {
+ tty_flip_buffer_push(&s->port.state->port);
rxchars = 0;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -324,8 +323,8 @@ static void max3100_work(struct work_struct *w)
(!uart_circ_empty(xmit) &&
!uart_tx_stopped(&s->port))));
- if (rxchars > 0 && s->port.state->port.tty != NULL)
- tty_flip_buffer_push(s->port.state->port.tty);
+ if (rxchars > 0)
+ tty_flip_buffer_push(&s->port.state->port);
}
static irqreturn_t max3100_irq(int irqno, void *dev_id)
@@ -530,7 +529,7 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
MAX3100_STATUS_OE;
/* we are sending char from a workqueue so enable */
- s->port.state->port.tty->low_latency = 1;
+ s->port.state->port.low_latency = 1;
if (s->poll_time > 0)
del_timer_sync(&s->timer);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a801f6872cad..0c2422cb04ea 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -460,10 +460,6 @@ static int max310x_set_ref_clk(struct max310x_port *s)
static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
{
unsigned int sts = 0, ch = 0, flag;
- struct tty_struct *tty = tty_port_tty_get(&s->port.state->port);
-
- if (!tty)
- return;
if (unlikely(rxlen >= MAX310X_FIFO_SIZE)) {
dev_warn(s->port.dev, "Possible RX FIFO overrun %d\n", rxlen);
@@ -516,9 +512,7 @@ static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
ch, flag);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&s->port.state->port);
}
static void max310x_handle_tx(struct max310x_port *s)
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index fcd56ab6053f..e956377a38fe 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -23,6 +23,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
@@ -55,6 +56,7 @@ struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
+ struct serial_rs485 rs485; /* RS485 settings */
};
/****************************************************************************/
@@ -101,6 +103,12 @@ static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ if (pp->rs485.flags & SER_RS485_ENABLED) {
+ /* Enable Transmitter */
+ writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR);
+ /* Manually assert RTS */
+ writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
+ }
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
@@ -196,6 +204,7 @@ static void mcf_shutdown(struct uart_port *port)
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
@@ -248,6 +257,11 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
mr2 |= MCFUART_MR2_TXCTS;
}
+ if (pp->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ mr2 |= MCFUART_MR2_TXRTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
@@ -310,7 +324,7 @@ static void mcf_rx_chars(struct mcf_uart *pp)
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
/****************************************************************************/
@@ -342,6 +356,10 @@ static void mcf_tx_chars(struct mcf_uart *pp)
if (xmit->head == xmit->tail) {
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
+ /* Disable TX to negate RTS automatically */
+ if (pp->rs485.flags & SER_RS485_ENABLED)
+ writeb(MCFUART_UCR_TXDISABLE,
+ port->membase + MCFUART_UCR);
}
}
@@ -418,6 +436,58 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
+/* Enable or disable the RS485 support */
+static void mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+ unsigned char mr1, mr2;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* Get mode registers */
+ mr1 = readb(port->membase + MCFUART_UMR);
+ mr2 = readb(port->membase + MCFUART_UMR);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ /* Automatically negate RTS after TX completes */
+ mr2 |= MCFUART_MR2_TXRTS;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ mr2 &= ~MCFUART_MR2_TXRTS;
+ }
+ writeb(mr1, port->membase + MCFUART_UMR);
+ writeb(mr2, port->membase + MCFUART_UMR);
+ pp->rs485 = *rs485;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int mcf_ioctl(struct uart_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case TIOCSRS485: {
+ struct serial_rs485 rs485;
+ if (copy_from_user(&rs485, (struct serial_rs485 *)arg,
+ sizeof(struct serial_rs485)))
+ return -EFAULT;
+ mcf_config_rs485(port, &rs485);
+ break;
+ }
+ case TIOCGRS485: {
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ if (copy_to_user((struct serial_rs485 *)arg, &pp->rs485,
+ sizeof(struct serial_rs485)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/****************************************************************************/
+
/*
* Define the basic serial functions we support.
*/
@@ -438,6 +508,7 @@ static const struct uart_ops mcf_uart_ops = {
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
+ .ioctl = mcf_ioctl,
};
static struct mcf_uart mcf_ports[4];
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 2c01344dc332..5f4765a7a5c5 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -387,12 +387,9 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
struct uart_port *port = &up->port;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
int count;
- if (!tty)
- return;
-
/*
* First need to know how many is already transferred,
* then check if its a timeout DMA irq, and return
@@ -423,7 +420,7 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
- tty_insert_flip_string(tty, dbuf->buf, count);
+ tty_insert_flip_string(tport, dbuf->buf, count);
port->icount.rx += count;
dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
@@ -437,7 +434,7 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
chan_writel(chan, HSU_CH_CR, 0x3);
@@ -460,13 +457,9 @@ static void serial_hsu_stop_rx(struct uart_port *port)
static inline void receive_chars(struct uart_hsu_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch, flag;
unsigned int max_count = 256;
- if (!tty)
- return;
-
do {
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
@@ -522,7 +515,7 @@ static inline void receive_chars(struct uart_hsu_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && max_count--);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
}
static void transmit_chars(struct uart_hsu_port *up)
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 7c23c4f4c58d..c0e1fad51be7 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -941,7 +941,7 @@ static struct uart_ops mpc52xx_uart_ops = {
static inline int
mpc52xx_uart_int_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned char ch, flag;
unsigned short status;
@@ -986,20 +986,20 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (status & MPC52xx_PSC_SR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
port->icount.overrun++;
}
}
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
return psc_ops->raw_rx_rdy(port);
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 6a9c6605666a..bc24f4931670 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -937,7 +937,7 @@ static int serial_polled;
static int mpsc_rx_intr(struct mpsc_port_info *pi)
{
struct mpsc_rx_desc *rxre;
- struct tty_struct *tty = pi->port.state->port.tty;
+ struct tty_port *port = &pi->port.state->port;
u32 cmdstat, bytes_in, i;
int rc = 0;
u8 *bp;
@@ -968,10 +968,9 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
}
#endif
/* Following use of tty struct directly is deprecated */
- if (unlikely(tty_buffer_request_room(tty, bytes_in)
- < bytes_in)) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
+ if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
+ if (port->low_latency)
+ tty_flip_buffer_push(port);
/*
* If this failed then we will throw away the bytes
* but must do so to clear interrupts.
@@ -1040,10 +1039,10 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
| SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR)))
&& !(cmdstat & pi->port.ignore_status_mask)) {
- tty_insert_flip_char(tty, *bp, flag);
+ tty_insert_flip_char(port, *bp, flag);
} else {
for (i=0; i<bytes_in; i++)
- tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
+ tty_insert_flip_char(port, *bp++, TTY_NORMAL);
pi->port.icount.rx += bytes_in;
}
@@ -1081,7 +1080,7 @@ next_frame:
if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
mpsc_start_rx(pi);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
return rc;
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 58734d7e746d..f641c232beca 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -339,7 +339,7 @@ static int
receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
{
struct uart_port *port = &max->port;
- struct tty_struct *tty;
+ struct tty_port *tport;
char buf[M3110_RX_FIFO_DEPTH];
int r, w, usable;
@@ -347,9 +347,7 @@ receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
if (!port->state)
return 0;
- tty = tty_port_tty_get(&port->state->port);
- if (!tty)
- return 0;
+ tport = &port->state->port;
for (r = 0, w = 0; r < len; r++) {
if (str[r] & MAX3110_BREAK &&
@@ -364,20 +362,17 @@ receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
}
}
- if (!w) {
- tty_kref_put(tty);
+ if (!w)
return 0;
- }
for (r = 0; w; r += usable, w -= usable) {
- usable = tty_buffer_request_room(tty, w);
+ usable = tty_buffer_request_room(tport, w);
if (usable) {
- tty_insert_flip_string(tty, buf + r, usable);
+ tty_insert_flip_string(tport, buf + r, usable);
port->icount.rx += usable;
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
return r;
}
@@ -493,7 +488,7 @@ static int serial_m3110_startup(struct uart_port *port)
| WC_BAUD_DR2;
/* as we use thread to handle tx/rx, need set low latency */
- port->state->port.tty->low_latency = 1;
+ port->state->port.low_latency = 1;
if (max->irq) {
max->read_thread = NULL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 95fd39be2934..b11e99797fd8 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -91,14 +91,14 @@ static void msm_enable_ms(struct uart_port *port)
static void handle_rx_dm(struct uart_port *port, unsigned int misr)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
struct msm_port *msm_port = UART_TO_MSM(port);
if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
}
@@ -132,12 +132,12 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
port->icount.frame++;
/* TODO: handle sysrq */
- tty_insert_flip_string(tty, (char *) &c,
+ tty_insert_flip_string(tport, (char *)&c,
(count > 4) ? 4 : count);
count -= 4;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
if (misr & (UART_IMR_RXSTALE))
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
@@ -146,7 +146,7 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
static void handle_rx(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned int sr;
/*
@@ -155,7 +155,7 @@ static void handle_rx(struct uart_port *port)
*/
if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
}
@@ -186,10 +186,10 @@ static void handle_rx(struct uart_port *port)
}
if (!uart_handle_sysrq_char(port, c))
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
}
static void reset_dm_count(struct uart_port *port)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 1fa92284ade0..4a942c78347e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -908,6 +908,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
unsigned long flags;
unsigned int flush;
struct tty_struct *tty;
+ struct tty_port *port;
struct uart_port *uport;
struct msm_hs_port *msm_uport;
@@ -917,7 +918,8 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
- tty = uport->state->port.tty;
+ port = &uport->state->port;
+ tty = port->tty;
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
@@ -926,7 +928,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
/* overflow is not connect to data in a FIFO */
if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
(uport->read_status_mask & CREAD))) {
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
uport->icount.buf_overrun++;
error_f = 1;
}
@@ -939,7 +941,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
uport->icount.parity++;
error_f = 1;
if (uport->ignore_status_mask & IGNPAR)
- tty_insert_flip_char(tty, 0, TTY_PARITY);
+ tty_insert_flip_char(port, 0, TTY_PARITY);
}
if (error_f)
@@ -959,7 +961,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
if (0 != (uport->read_status_mask & CREAD)) {
- retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
+ retval = tty_insert_flip_string(port, msm_uport->rx.buffer,
rx_count);
BUG_ON(retval != rx_count);
}
@@ -979,9 +981,8 @@ static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
{
struct msm_hs_port *msm_uport =
container_of(work, struct msm_hs_port, rx.tty_work);
- struct tty_struct *tty = msm_uport->uport.state->port.tty;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&msm_uport->uport.state->port);
}
/*
@@ -1344,7 +1345,6 @@ static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
unsigned long flags;
struct msm_hs_port *msm_uport = dev;
struct uart_port *uport = &msm_uport->uport;
- struct tty_struct *tty = NULL;
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
@@ -1361,8 +1361,7 @@ static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
* optionally inject char into tty rx */
msm_hs_request_clock_on_locked(uport);
if (msm_uport->rx_wakeup.inject_rx) {
- tty = uport->state->port.tty;
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&uport->state->port,
msm_uport->rx_wakeup.rx_to_inject,
TTY_NORMAL);
queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
@@ -1400,7 +1399,7 @@ static int msm_hs_startup(struct uart_port *uport)
/* do not let tty layer execute RX in global workqueue, use a
* dedicated workqueue managed by this driver */
- uport->state->port.tty->low_latency = 1;
+ uport->state->port.low_latency = 1;
/* turn on uart clk */
ret = msm_hs_init_clk_locked(uport);
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
index 925d1fa153db..e722ff163d91 100644
--- a/drivers/tty/serial/msm_smd_tty.c
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -70,7 +70,7 @@ static void smd_tty_notify(void *priv, unsigned event)
if (avail == 0)
break;
- avail = tty_prepare_flip_string(tty, &ptr, avail);
+ avail = tty_prepare_flip_string(&info->port, &ptr, avail);
if (smd_read(info->ch, ptr, avail) != avail) {
/* shouldn't be possible since we're in interrupt
@@ -80,7 +80,7 @@ static void smd_tty_notify(void *priv, unsigned event)
pr_err("OOPS - smd_tty_buffer mismatch?!");
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/* XXX only when writable and necessary */
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index e2775b6df5a5..7fd6aaaacd8e 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -242,8 +242,8 @@ static void mux_write(struct uart_port *port)
*/
static void mux_read(struct uart_port *port)
{
+ struct tty_port *tport = &port->state->port;
int data;
- struct tty_struct *tty = port->state->port.tty;
__u32 start_count = port->icount.rx;
while(1) {
@@ -266,12 +266,11 @@ static void mux_read(struct uart_port *port)
if (uart_handle_sysrq_char(port, data & 0xffu))
continue;
- tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ tty_insert_flip_char(tport, data & 0xFF, TTY_NORMAL);
}
- if (start_count != port->icount.rx) {
- tty_flip_buffer_push(tty);
- }
+ if (start_count != port->icount.rx)
+ tty_flip_buffer_push(tport);
}
/**
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6db23b035efe..d549fe1fa42a 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
struct circ_buf *xmit = &s->port.state->xmit;
if (auart_dma_enabled(s)) {
- int i = 0;
+ u32 i = 0;
int size;
void *buffer = s->tx_dma_buf;
@@ -364,7 +364,6 @@ out:
static void mxs_auart_rx_chars(struct mxs_auart_port *s)
{
- struct tty_struct *tty = s->port.state->port.tty;
u32 stat = 0;
for (;;) {
@@ -375,7 +374,7 @@ static void mxs_auart_rx_chars(struct mxs_auart_port *s)
}
writel(stat, s->port.membase + AUART_STAT);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&s->port.state->port);
}
static int mxs_auart_request_port(struct uart_port *u)
@@ -412,10 +411,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
u32 ctrl = readl(u->membase + AUART_CTRL2);
- ctrl &= ~AUART_CTRL2_RTSEN;
+ ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
if (mctrl & TIOCM_RTS) {
if (tty_port_cts_enabled(&u->state->port))
ctrl |= AUART_CTRL2_RTSEN;
+ else
+ ctrl |= AUART_CTRL2_RTS;
}
s->ctrl = mctrl;
@@ -457,7 +458,7 @@ static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s);
static void dma_rx_callback(void *arg)
{
struct mxs_auart_port *s = (struct mxs_auart_port *) arg;
- struct tty_struct *tty = s->port.state->port.tty;
+ struct tty_port *port = &s->port.state->port;
int count;
u32 stat;
@@ -468,10 +469,10 @@ static void dma_rx_callback(void *arg)
AUART_STAT_PERR | AUART_STAT_FERR);
count = stat & AUART_STAT_RXCOUNT_MASK;
- tty_insert_flip_string(tty, s->rx_dma_buf, count);
+ tty_insert_flip_string(port, s->rx_dma_buf, count);
writel(stat, s->port.membase + AUART_STAT);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
/* start the next DMA for RX. */
mxs_auart_dma_prep_rx(s);
@@ -550,7 +551,7 @@ static int mxs_auart_dma_init(struct mxs_auart_port *s)
return 0;
/* We do not get the right DMA channels. */
- if (s->dma_channel_rx == -1 || s->dma_channel_rx == -1)
+ if (s->dma_channel_rx == -1 || s->dma_channel_tx == -1)
return -EINVAL;
/* init for RX */
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index d40da78e7c85..b9a40ed70be2 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -199,7 +199,6 @@ static void netx_txint(struct uart_port *port)
static void netx_rxint(struct uart_port *port)
{
unsigned char rx, flg, status;
- struct tty_struct *tty = port->state->port.tty;
while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
rx = readl(port->membase + UART_DR);
@@ -237,8 +236,7 @@ static void netx_rxint(struct uart_port *port)
uart_insert_char(port, status, SR_OE, rx, flg);
}
- tty_flip_buffer_push(tty);
- return;
+ tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t netx_int(int irq, void *dev_id)
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index dd4c31d1aee5..77287c54f331 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -128,7 +128,7 @@ static void nwpserial_config_port(struct uart_port *port, int flags)
static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
{
struct nwpserial_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
irqreturn_t ret;
unsigned int iir;
unsigned char ch;
@@ -146,10 +146,10 @@ static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
up->port.icount.rx++;
ch = dcr_read(up->dcr_host, UART_RX);
if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
ret = IRQ_HANDLED;
/* clear interrupt */
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index e7cae1c2d7d2..d5874605682b 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -18,7 +18,6 @@
#include <linux/serial_reg.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_serial.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
#include <linux/clk.h>
@@ -45,8 +44,10 @@ void tegra_serial_handle_break(struct uart_port *p)
udelay(1);
} while (1);
}
-/* FIXME remove this export when tegra finishes conversion to open firmware */
-EXPORT_SYMBOL_GPL(tegra_serial_handle_break);
+#else
+static inline void tegra_serial_handle_break(struct uart_port *port)
+{
+}
#endif
/*
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 23f797eb7a28..4dc41408ecb7 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -41,8 +41,7 @@
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/pinctrl/consumer.h>
-
-#include <plat/omap-serial.h>
+#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 6
@@ -60,6 +59,7 @@
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
+#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
/* FCR register bitmasks */
@@ -233,24 +233,42 @@ static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
}
/*
+ * serial_omap_baud_is_mode16 - check if baud rate is MODE16X
+ * @port: uart port info
+ * @baud: baudrate for which mode needs to be determined
+ *
+ * Returns true if baud rate is MODE16X and false if MODE13X
+ * Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values,
+ * and Error Rates" determines modes not for all common baud rates.
+ * E.g. for 1000000 baud rate mode must be 16x, but according to that
+ * table it's determined as 13x.
+ */
+static bool
+serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
+{
+ unsigned int n13 = port->uartclk / (13 * baud);
+ unsigned int n16 = port->uartclk / (16 * baud);
+ int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
+ int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
+ if(baudAbsDiff13 < 0)
+ baudAbsDiff13 = -baudAbsDiff13;
+ if(baudAbsDiff16 < 0)
+ baudAbsDiff16 = -baudAbsDiff16;
+
+ return (baudAbsDiff13 > baudAbsDiff16);
+}
+
+/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
- *
- * We have written our own function to get the divisor so as to support
- * 13x mode. 3Mbps Baudrate as an different divisor.
- * Reference OMAP TRM Chapter 17:
- * Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
- * referring to oversampling - divisor value
- * baudrate 460,800 to 3,686,400 all have divisor 13
- * except 3,000,000 which has divisor value 16
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int divisor;
- if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
+ if (!serial_omap_baud_is_mode16(port, baud))
divisor = 13;
else
divisor = 16;
@@ -303,9 +321,6 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
struct circ_buf *xmit = &up->port.state->xmit;
int count;
- if (!(lsr & UART_LSR_THRE))
- return;
-
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
@@ -484,7 +499,6 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
static irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int iir, lsr;
unsigned int type;
irqreturn_t ret = IRQ_NONE;
@@ -531,7 +545,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -777,6 +791,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
+ if (termios->c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
/*
* Ask the core to calculate the divisor for us.
@@ -846,7 +862,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval;
- up->scr = OMAP_UART_SCR_TX_EMPTY;
+ up->scr = 0;
/* FIFOs and DMA Settings */
@@ -870,8 +886,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
-
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
@@ -916,7 +930,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
- if (baud > 230400 && baud != 3000000)
+ if (!serial_omap_baud_is_mode16(port, baud))
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 8318925fbf6b..7a6c989924b3 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -14,18 +14,21 @@
*along with this program; if not, write to the Free Software
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
-#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/delay.h>
@@ -553,12 +556,26 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
{
int i;
u8 rbr, lsr;
+ struct uart_port *port = &priv->port;
lsr = ioread8(priv->membase + UART_LSR);
for (i = 0, lsr = ioread8(priv->membase + UART_LSR);
- i < rx_size && lsr & UART_LSR_DR;
+ i < rx_size && lsr & (UART_LSR_DR | UART_LSR_BI);
lsr = ioread8(priv->membase + UART_LSR)) {
rbr = ioread8(priv->membase + PCH_UART_RBR);
+
+ if (lsr & UART_LSR_BI) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+#ifdef SUPPORT_SYSRQ
+ if (port->sysrq) {
+ if (uart_handle_sysrq_char(port, rbr))
+ continue;
+ }
+#endif
+
buf[i++] = rbr;
}
return i;
@@ -591,19 +608,11 @@ static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
int size)
{
- struct uart_port *port;
- struct tty_struct *tty;
-
- port = &priv->port;
- tty = tty_port_tty_get(&port->state->port);
- if (!tty) {
- dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
- return -EBUSY;
- }
+ struct uart_port *port = &priv->port;
+ struct tty_port *tport = &port->state->port;
- tty_insert_flip_string(tty, buf, size);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(tport, buf, size);
+ tty_flip_buffer_push(tport);
return 0;
}
@@ -629,15 +638,16 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
struct tty_struct *tty;
int room;
struct uart_port *port = &priv->port;
+ struct tty_port *tport = &port->state->port;
port = &priv->port;
- tty = tty_port_tty_get(&port->state->port);
+ tty = tty_port_tty_get(tport);
if (!tty) {
dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
return 0;
}
- room = tty_buffer_request_room(tty, size);
+ room = tty_buffer_request_room(tport, size);
if (room < size)
dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
@@ -645,7 +655,7 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
if (!room)
return room;
- tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+ tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
port->icount.rx += room;
tty_kref_put(tty);
@@ -743,19 +753,12 @@ static void pch_dma_rx_complete(void *arg)
{
struct eg20t_port *priv = arg;
struct uart_port *port = &priv->port;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
int count;
- if (!tty) {
- dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
- return;
- }
-
dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE);
count = dma_push_rx(priv, priv->trigger_level);
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
async_tx_ack(priv->desc_rx);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
@@ -1037,23 +1040,33 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
{
- u8 fcr = ioread8(priv->membase + UART_FCR);
-
- /* Reset FIFO */
- fcr |= UART_FCR_CLEAR_RCVR;
- iowrite8(fcr, priv->membase + UART_FCR);
+ struct uart_port *port = &priv->port;
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ char *error_msg[5] = {};
+ int i = 0;
if (lsr & PCH_UART_LSR_ERR)
- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+ error_msg[i++] = "Error data in FIFO\n";
- if (lsr & UART_LSR_FE)
- dev_err(&priv->pdev->dev, "Framing Error\n");
+ if (lsr & UART_LSR_FE) {
+ port->icount.frame++;
+ error_msg[i++] = " Framing Error\n";
+ }
- if (lsr & UART_LSR_PE)
- dev_err(&priv->pdev->dev, "Parity Error\n");
+ if (lsr & UART_LSR_PE) {
+ port->icount.parity++;
+ error_msg[i++] = " Parity Error\n";
+ }
- if (lsr & UART_LSR_OE)
- dev_err(&priv->pdev->dev, "Overrun Error\n");
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ error_msg[i++] = " Overrun Error\n";
+ }
+
+ if (tty == NULL) {
+ for (i = 0; error_msg[i] != NULL; i++)
+ dev_err(&priv->pdev->dev, error_msg[i]);
+ }
}
static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
@@ -1564,7 +1577,8 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (priv->port.sysrq) {
- spin_lock(&priv->lock);
+ /* call to uart_handle_sysrq_char already took the priv lock */
+ priv_locked = 0;
/* serial8250_handle_port() already took the port lock */
port_locked = 0;
} else if (oops_in_progress) {
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 333c8d012b0e..b1785f58b6e3 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -227,19 +227,19 @@ static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
write_zsreg(uap, R1, uap->curregs[1]);
}
-static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
+static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *port;
unsigned char ch, r1, drop, error, flag;
int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
- if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
+ if (uap->port.state == NULL) {
WARN_ON(1);
(void)read_zsdata(uap);
- return NULL;
+ return false;
}
- tty = uap->port.state->port.tty;
+ port = &uap->port.state->port;
while (1) {
error = 0;
@@ -309,10 +309,10 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
if (uap->port.ignore_status_mask == 0xff ||
(r1 & uap->port.ignore_status_mask) == 0) {
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
}
if (r1 & Rx_OVR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
@@ -328,11 +328,11 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
break;
}
- return tty;
+ return true;
flood:
pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
- return tty;
+ return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
@@ -453,7 +453,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
struct uart_pmac_port *uap_a;
struct uart_pmac_port *uap_b;
int rc = IRQ_NONE;
- struct tty_struct *tty;
+ bool push;
u8 r3;
uap_a = pmz_get_port_A(uap);
@@ -466,7 +466,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_debug("irq, r3: %x\n", r3);
#endif
/* Channel A */
- tty = NULL;
+ push = false;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
if (!ZS_IS_OPEN(uap_a)) {
pmz_debug("ChanA interrupt while not open !\n");
@@ -477,21 +477,21 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
if (r3 & CHAEXT)
pmz_status_handle(uap_a);
if (r3 & CHARxIP)
- tty = pmz_receive_chars(uap_a);
+ push = pmz_receive_chars(uap_a);
if (r3 & CHATxIP)
pmz_transmit_chars(uap_a);
rc = IRQ_HANDLED;
}
skip_a:
spin_unlock(&uap_a->port.lock);
- if (tty != NULL)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
if (!uap_b)
goto out;
spin_lock(&uap_b->port.lock);
- tty = NULL;
+ push = false;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
if (!ZS_IS_OPEN(uap_b)) {
pmz_debug("ChanB interrupt while not open !\n");
@@ -502,15 +502,15 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
if (r3 & CHBEXT)
pmz_status_handle(uap_b);
if (r3 & CHBRxIP)
- tty = pmz_receive_chars(uap_b);
+ push = pmz_receive_chars(uap_b);
if (r3 & CHBTxIP)
pmz_transmit_chars(uap_b);
rc = IRQ_HANDLED;
}
skip_b:
spin_unlock(&uap_b->port.lock);
- if (tty != NULL)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
out:
return rc;
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 0aa75a97531c..7e277a5384a7 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -181,7 +181,6 @@ static void pnx8xxx_enable_ms(struct uart_port *port)
static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
{
- struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
@@ -238,7 +237,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&sport->port.state->port);
}
static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 2764828251f5..05f504e0c271 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -98,7 +98,6 @@ static void serial_pxa_stop_rx(struct uart_port *port)
static inline void receive_chars(struct uart_pxa_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch, flag;
int max_count = 256;
@@ -168,7 +167,7 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
/* work around Errata #20 according to
* Intel(R) PXA27x Processor Family
@@ -673,8 +672,7 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- clk_prepare_enable(up->clk);
-
+ clk_enable(up->clk);
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
@@ -701,8 +699,8 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
+ clk_disable(up->clk);
- clk_disable_unprepare(up->clk);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -899,6 +897,12 @@ static int serial_pxa_probe(struct platform_device *dev)
goto err_free;
}
+ ret = clk_prepare(sport->clk);
+ if (ret) {
+ clk_put(sport->clk);
+ goto err_free;
+ }
+
sport->port.type = PORT_PXA;
sport->port.iotype = UPIO_MEM;
sport->port.mapbase = mmres->start;
@@ -930,6 +934,7 @@ static int serial_pxa_probe(struct platform_device *dev)
return 0;
err_clk:
+ clk_unprepare(sport->clk);
clk_put(sport->clk);
err_free:
kfree(sport);
@@ -943,6 +948,8 @@ static int serial_pxa_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
uart_remove_one_port(&serial_pxa_reg, &sport->port);
+
+ clk_unprepare(sport->clk);
clk_put(sport->clk);
kfree(sport);
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
new file mode 100644
index 000000000000..a314a943f124
--- /dev/null
+++ b/drivers/tty/serial/rp2.c
@@ -0,0 +1,885 @@
+/*
+ * Driver for Comtrol RocketPort EXPRESS/INFINITY cards
+ *
+ * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
+ *
+ * Inspired by, and loosely based on:
+ *
+ * ar933x_uart.c
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * rocketport_infinity_express-linux-1.20.tar.gz
+ * Copyright (C) 2004-2011 Comtrol, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#define DRV_NAME "rp2"
+
+#define RP2_FW_NAME "rp2.fw"
+#define RP2_UCODE_BYTES 0x3f
+
+#define PORTS_PER_ASIC 16
+#define ALL_PORTS_MASK (BIT(PORTS_PER_ASIC) - 1)
+
+#define UART_CLOCK 44236800
+#define DEFAULT_BAUD_DIV (UART_CLOCK / (9600 * 16))
+#define FIFO_SIZE 512
+
+/* BAR0 registers */
+#define RP2_FPGA_CTL0 0x110
+#define RP2_FPGA_CTL1 0x11c
+#define RP2_IRQ_MASK 0x1ec
+#define RP2_IRQ_MASK_EN_m BIT(0)
+#define RP2_IRQ_STATUS 0x1f0
+
+/* BAR1 registers */
+#define RP2_ASIC_SPACING 0x1000
+#define RP2_ASIC_OFFSET(i) ((i) << ilog2(RP2_ASIC_SPACING))
+
+#define RP2_PORT_BASE 0x000
+#define RP2_PORT_SPACING 0x040
+
+#define RP2_UCODE_BASE 0x400
+#define RP2_UCODE_SPACING 0x80
+
+#define RP2_CLK_PRESCALER 0xc00
+#define RP2_CH_IRQ_STAT 0xc04
+#define RP2_CH_IRQ_MASK 0xc08
+#define RP2_ASIC_IRQ 0xd00
+#define RP2_ASIC_IRQ_EN_m BIT(20)
+#define RP2_GLOBAL_CMD 0xd0c
+#define RP2_ASIC_CFG 0xd04
+
+/* port registers */
+#define RP2_DATA_DWORD 0x000
+
+#define RP2_DATA_BYTE 0x008
+#define RP2_DATA_BYTE_ERR_PARITY_m BIT(8)
+#define RP2_DATA_BYTE_ERR_OVERRUN_m BIT(9)
+#define RP2_DATA_BYTE_ERR_FRAMING_m BIT(10)
+#define RP2_DATA_BYTE_BREAK_m BIT(11)
+
+/* This lets uart_insert_char() drop bytes received on a !CREAD port */
+#define RP2_DUMMY_READ BIT(16)
+
+#define RP2_DATA_BYTE_EXCEPTION_MASK (RP2_DATA_BYTE_ERR_PARITY_m | \
+ RP2_DATA_BYTE_ERR_OVERRUN_m | \
+ RP2_DATA_BYTE_ERR_FRAMING_m | \
+ RP2_DATA_BYTE_BREAK_m)
+
+#define RP2_RX_FIFO_COUNT 0x00c
+#define RP2_TX_FIFO_COUNT 0x00e
+
+#define RP2_CHAN_STAT 0x010
+#define RP2_CHAN_STAT_RXDATA_m BIT(0)
+#define RP2_CHAN_STAT_DCD_m BIT(3)
+#define RP2_CHAN_STAT_DSR_m BIT(4)
+#define RP2_CHAN_STAT_CTS_m BIT(5)
+#define RP2_CHAN_STAT_RI_m BIT(6)
+#define RP2_CHAN_STAT_OVERRUN_m BIT(13)
+#define RP2_CHAN_STAT_DSR_CHANGED_m BIT(16)
+#define RP2_CHAN_STAT_CTS_CHANGED_m BIT(17)
+#define RP2_CHAN_STAT_CD_CHANGED_m BIT(18)
+#define RP2_CHAN_STAT_RI_CHANGED_m BIT(22)
+#define RP2_CHAN_STAT_TXEMPTY_m BIT(25)
+
+#define RP2_CHAN_STAT_MS_CHANGED_MASK (RP2_CHAN_STAT_DSR_CHANGED_m | \
+ RP2_CHAN_STAT_CTS_CHANGED_m | \
+ RP2_CHAN_STAT_CD_CHANGED_m | \
+ RP2_CHAN_STAT_RI_CHANGED_m)
+
+#define RP2_TXRX_CTL 0x014
+#define RP2_TXRX_CTL_MSRIRQ_m BIT(0)
+#define RP2_TXRX_CTL_RXIRQ_m BIT(2)
+#define RP2_TXRX_CTL_RX_TRIG_s 3
+#define RP2_TXRX_CTL_RX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_1 (0x1 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_256 (0x2 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_448 (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_EN_m BIT(5)
+#define RP2_TXRX_CTL_RTSFLOW_m BIT(6)
+#define RP2_TXRX_CTL_DTRFLOW_m BIT(7)
+#define RP2_TXRX_CTL_TX_TRIG_s 16
+#define RP2_TXRX_CTL_TX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_DSRFLOW_m BIT(18)
+#define RP2_TXRX_CTL_TXIRQ_m BIT(19)
+#define RP2_TXRX_CTL_CTSFLOW_m BIT(23)
+#define RP2_TXRX_CTL_TX_EN_m BIT(24)
+#define RP2_TXRX_CTL_RTS_m BIT(25)
+#define RP2_TXRX_CTL_DTR_m BIT(26)
+#define RP2_TXRX_CTL_LOOP_m BIT(27)
+#define RP2_TXRX_CTL_BREAK_m BIT(28)
+#define RP2_TXRX_CTL_CMSPAR_m BIT(29)
+#define RP2_TXRX_CTL_nPARODD_m BIT(30)
+#define RP2_TXRX_CTL_PARENB_m BIT(31)
+
+#define RP2_UART_CTL 0x018
+#define RP2_UART_CTL_MODE_s 0
+#define RP2_UART_CTL_MODE_m (0x7 << RP2_UART_CTL_MODE_s)
+#define RP2_UART_CTL_MODE_rs232 (0x1 << RP2_UART_CTL_MODE_s)
+#define RP2_UART_CTL_FLUSH_RX_m BIT(3)
+#define RP2_UART_CTL_FLUSH_TX_m BIT(4)
+#define RP2_UART_CTL_RESET_CH_m BIT(5)
+#define RP2_UART_CTL_XMIT_EN_m BIT(6)
+#define RP2_UART_CTL_DATABITS_s 8
+#define RP2_UART_CTL_DATABITS_m (0x3 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_8 (0x3 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_7 (0x2 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_6 (0x1 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_5 (0x0 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_STOPBITS_m BIT(10)
+
+#define RP2_BAUD 0x01c
+
+/* ucode registers */
+#define RP2_TX_SWFLOW 0x02
+#define RP2_TX_SWFLOW_ena 0x81
+#define RP2_TX_SWFLOW_dis 0x9d
+
+#define RP2_RX_SWFLOW 0x0c
+#define RP2_RX_SWFLOW_ena 0x81
+#define RP2_RX_SWFLOW_dis 0x8d
+
+#define RP2_RX_FIFO 0x37
+#define RP2_RX_FIFO_ena 0x08
+#define RP2_RX_FIFO_dis 0x81
+
+static struct uart_driver rp2_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = "ttyRP",
+ .nr = CONFIG_SERIAL_RP2_NR_UARTS,
+};
+
+struct rp2_card;
+
+struct rp2_uart_port {
+ struct uart_port port;
+ int idx;
+ int ignore_rx;
+ struct rp2_card *card;
+ void __iomem *asic_base;
+ void __iomem *base;
+ void __iomem *ucode;
+};
+
+struct rp2_card {
+ struct pci_dev *pdev;
+ struct rp2_uart_port *ports;
+ int n_ports;
+ int initialized_ports;
+ int minor_start;
+ int smpte;
+ void __iomem *bar0;
+ void __iomem *bar1;
+ spinlock_t card_lock;
+ struct completion fw_loaded;
+};
+
+#define RP_ID(prod) PCI_VDEVICE(RP, (prod))
+#define RP_CAP(ports, smpte) (((ports) << 8) | ((smpte) << 0))
+
+static inline void rp2_decode_cap(const struct pci_device_id *id,
+ int *ports, int *smpte)
+{
+ *ports = id->driver_data >> 8;
+ *smpte = id->driver_data & 0xff;
+}
+
+static DEFINE_SPINLOCK(rp2_minor_lock);
+static int rp2_minor_next;
+
+static int rp2_alloc_ports(int n_ports)
+{
+ int ret = -ENOSPC;
+
+ spin_lock(&rp2_minor_lock);
+ if (rp2_minor_next + n_ports <= CONFIG_SERIAL_RP2_NR_UARTS) {
+ /* sorry, no support for hot unplugging individual cards */
+ ret = rp2_minor_next;
+ rp2_minor_next += n_ports;
+ }
+ spin_unlock(&rp2_minor_lock);
+
+ return ret;
+}
+
+static inline struct rp2_uart_port *port_to_up(struct uart_port *port)
+{
+ return container_of(port, struct rp2_uart_port, port);
+}
+
+static void rp2_rmw(struct rp2_uart_port *up, int reg,
+ u32 clr_bits, u32 set_bits)
+{
+ u32 tmp = readl(up->base + reg);
+ tmp &= ~clr_bits;
+ tmp |= set_bits;
+ writel(tmp, up->base + reg);
+}
+
+static void rp2_rmw_clr(struct rp2_uart_port *up, int reg, u32 val)
+{
+ rp2_rmw(up, reg, val, 0);
+}
+
+static void rp2_rmw_set(struct rp2_uart_port *up, int reg, u32 val)
+{
+ rp2_rmw(up, reg, 0, val);
+}
+
+static void rp2_mask_ch_irq(struct rp2_uart_port *up, int ch_num,
+ int is_enabled)
+{
+ unsigned long flags, irq_mask;
+
+ spin_lock_irqsave(&up->card->card_lock, flags);
+
+ irq_mask = readl(up->asic_base + RP2_CH_IRQ_MASK);
+ if (is_enabled)
+ irq_mask &= ~BIT(ch_num);
+ else
+ irq_mask |= BIT(ch_num);
+ writel(irq_mask, up->asic_base + RP2_CH_IRQ_MASK);
+
+ spin_unlock_irqrestore(&up->card->card_lock, flags);
+}
+
+static unsigned int rp2_uart_tx_empty(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long tx_fifo_bytes, flags;
+
+ /*
+ * This should probably check the transmitter, not the FIFO.
+ * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
+ * enabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+ tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int rp2_uart_get_mctrl(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ u32 status;
+
+ status = readl(up->base + RP2_CHAN_STAT);
+ return ((status & RP2_CHAN_STAT_DCD_m) ? TIOCM_CAR : 0) |
+ ((status & RP2_CHAN_STAT_DSR_m) ? TIOCM_DSR : 0) |
+ ((status & RP2_CHAN_STAT_CTS_m) ? TIOCM_CTS : 0) |
+ ((status & RP2_CHAN_STAT_RI_m) ? TIOCM_RI : 0);
+}
+
+static void rp2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ rp2_rmw(port_to_up(port), RP2_TXRX_CTL,
+ RP2_TXRX_CTL_DTR_m | RP2_TXRX_CTL_RTS_m | RP2_TXRX_CTL_LOOP_m,
+ ((mctrl & TIOCM_DTR) ? RP2_TXRX_CTL_DTR_m : 0) |
+ ((mctrl & TIOCM_RTS) ? RP2_TXRX_CTL_RTS_m : 0) |
+ ((mctrl & TIOCM_LOOP) ? RP2_TXRX_CTL_LOOP_m : 0));
+}
+
+static void rp2_uart_start_tx(struct uart_port *port)
+{
+ rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m);
+}
+
+static void rp2_uart_stop_tx(struct uart_port *port)
+{
+ rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m);
+}
+
+static void rp2_uart_stop_rx(struct uart_port *port)
+{
+ rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_RXIRQ_m);
+}
+
+static void rp2_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
+ break_state ? RP2_TXRX_CTL_BREAK_m : 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void rp2_uart_enable_ms(struct uart_port *port)
+{
+ rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m);
+}
+
+static void __rp2_uart_set_termios(struct rp2_uart_port *up,
+ unsigned long cfl,
+ unsigned long ifl,
+ unsigned int baud_div)
+{
+ /* baud rate divisor (calculated elsewhere). 0 = divide-by-1 */
+ writew(baud_div - 1, up->base + RP2_BAUD);
+
+ /* data bits and stop bits */
+ rp2_rmw(up, RP2_UART_CTL,
+ RP2_UART_CTL_STOPBITS_m | RP2_UART_CTL_DATABITS_m,
+ ((cfl & CSTOPB) ? RP2_UART_CTL_STOPBITS_m : 0) |
+ (((cfl & CSIZE) == CS8) ? RP2_UART_CTL_DATABITS_8 : 0) |
+ (((cfl & CSIZE) == CS7) ? RP2_UART_CTL_DATABITS_7 : 0) |
+ (((cfl & CSIZE) == CS6) ? RP2_UART_CTL_DATABITS_6 : 0) |
+ (((cfl & CSIZE) == CS5) ? RP2_UART_CTL_DATABITS_5 : 0));
+
+ /* parity and hardware flow control */
+ rp2_rmw(up, RP2_TXRX_CTL,
+ RP2_TXRX_CTL_PARENB_m | RP2_TXRX_CTL_nPARODD_m |
+ RP2_TXRX_CTL_CMSPAR_m | RP2_TXRX_CTL_DTRFLOW_m |
+ RP2_TXRX_CTL_DSRFLOW_m | RP2_TXRX_CTL_RTSFLOW_m |
+ RP2_TXRX_CTL_CTSFLOW_m,
+ ((cfl & PARENB) ? RP2_TXRX_CTL_PARENB_m : 0) |
+ ((cfl & PARODD) ? 0 : RP2_TXRX_CTL_nPARODD_m) |
+ ((cfl & CMSPAR) ? RP2_TXRX_CTL_CMSPAR_m : 0) |
+ ((cfl & CRTSCTS) ? (RP2_TXRX_CTL_RTSFLOW_m |
+ RP2_TXRX_CTL_CTSFLOW_m) : 0));
+
+ /* XON/XOFF software flow control */
+ writeb((ifl & IXON) ? RP2_TX_SWFLOW_ena : RP2_TX_SWFLOW_dis,
+ up->ucode + RP2_TX_SWFLOW);
+ writeb((ifl & IXOFF) ? RP2_RX_SWFLOW_ena : RP2_RX_SWFLOW_dis,
+ up->ucode + RP2_RX_SWFLOW);
+}
+
+static void rp2_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long flags;
+ unsigned int baud, baud_div;
+
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ baud_div = uart_get_divisor(port, baud);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* ignore all characters if CREAD is not set */
+ port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
+
+ __rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void rp2_rx_chars(struct rp2_uart_port *up)
+{
+ u16 bytes = readw(up->base + RP2_RX_FIFO_COUNT);
+ struct tty_port *port = &up->port.state->port;
+
+ for (; bytes != 0; bytes--) {
+ u32 byte = readw(up->base + RP2_DATA_BYTE) | RP2_DUMMY_READ;
+ char ch = byte & 0xff;
+
+ if (likely(!(byte & RP2_DATA_BYTE_EXCEPTION_MASK))) {
+ if (!uart_handle_sysrq_char(&up->port, ch))
+ uart_insert_char(&up->port, byte, 0, ch,
+ TTY_NORMAL);
+ } else {
+ char flag = TTY_NORMAL;
+
+ if (byte & RP2_DATA_BYTE_BREAK_m)
+ flag = TTY_BREAK;
+ else if (byte & RP2_DATA_BYTE_ERR_FRAMING_m)
+ flag = TTY_FRAME;
+ else if (byte & RP2_DATA_BYTE_ERR_PARITY_m)
+ flag = TTY_PARITY;
+ uart_insert_char(&up->port, byte,
+ RP2_DATA_BYTE_ERR_OVERRUN_m, ch, flag);
+ }
+ up->port.icount.rx++;
+ }
+
+ tty_flip_buffer_push(port);
+}
+
+static void rp2_tx_chars(struct rp2_uart_port *up)
+{
+ u16 max_tx = FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT);
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ if (uart_tx_stopped(&up->port)) {
+ rp2_uart_stop_tx(&up->port);
+ return;
+ }
+
+ for (; max_tx != 0; max_tx--) {
+ if (up->port.x_char) {
+ writeb(up->port.x_char, up->base + RP2_DATA_BYTE);
+ up->port.x_char = 0;
+ up->port.icount.tx++;
+ continue;
+ }
+ if (uart_circ_empty(xmit)) {
+ rp2_uart_stop_tx(&up->port);
+ break;
+ }
+ writeb(xmit->buf[xmit->tail], up->base + RP2_DATA_BYTE);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+static void rp2_ch_interrupt(struct rp2_uart_port *up)
+{
+ u32 status;
+
+ spin_lock(&up->port.lock);
+
+ /*
+ * The IRQ status bits are clear-on-write. Other status bits in
+ * this register aren't, so it's harmless to write to them.
+ */
+ status = readl(up->base + RP2_CHAN_STAT);
+ writel(status, up->base + RP2_CHAN_STAT);
+
+ if (status & RP2_CHAN_STAT_RXDATA_m)
+ rp2_rx_chars(up);
+ if (status & RP2_CHAN_STAT_TXEMPTY_m)
+ rp2_tx_chars(up);
+ if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+
+ spin_unlock(&up->port.lock);
+}
+
+static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
+{
+ void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id);
+ int ch, handled = 0;
+ unsigned long status = readl(base + RP2_CH_IRQ_STAT) &
+ ~readl(base + RP2_CH_IRQ_MASK);
+
+ for_each_set_bit(ch, &status, PORTS_PER_ASIC) {
+ rp2_ch_interrupt(&card->ports[ch]);
+ handled++;
+ }
+ return handled;
+}
+
+static irqreturn_t rp2_uart_interrupt(int irq, void *dev_id)
+{
+ struct rp2_card *card = dev_id;
+ int handled;
+
+ handled = rp2_asic_interrupt(card, 0);
+ if (card->n_ports >= PORTS_PER_ASIC)
+ handled += rp2_asic_interrupt(card, 1);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline void rp2_flush_fifos(struct rp2_uart_port *up)
+{
+ rp2_rmw_set(up, RP2_UART_CTL,
+ RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m);
+ readl(up->base + RP2_UART_CTL);
+ udelay(10);
+ rp2_rmw_clr(up, RP2_UART_CTL,
+ RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m);
+}
+
+static int rp2_uart_startup(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+
+ rp2_flush_fifos(up);
+ rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m, RP2_TXRX_CTL_RXIRQ_m);
+ rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_RX_TRIG_m,
+ RP2_TXRX_CTL_RX_TRIG_1);
+ rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
+ rp2_mask_ch_irq(up, up->idx, 1);
+
+ return 0;
+}
+
+static void rp2_uart_shutdown(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long flags;
+
+ rp2_uart_break_ctl(port, 0);
+
+ spin_lock_irqsave(&port->lock, flags);
+ rp2_mask_ch_irq(up, up->idx, 0);
+ rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *rp2_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_RP2) ? "RocketPort 2 UART" : NULL;
+}
+
+static void rp2_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release ... */
+}
+
+static int rp2_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void rp2_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_RP2;
+}
+
+static int rp2_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_RP2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops rp2_uart_ops = {
+ .tx_empty = rp2_uart_tx_empty,
+ .set_mctrl = rp2_uart_set_mctrl,
+ .get_mctrl = rp2_uart_get_mctrl,
+ .stop_tx = rp2_uart_stop_tx,
+ .start_tx = rp2_uart_start_tx,
+ .stop_rx = rp2_uart_stop_rx,
+ .enable_ms = rp2_uart_enable_ms,
+ .break_ctl = rp2_uart_break_ctl,
+ .startup = rp2_uart_startup,
+ .shutdown = rp2_uart_shutdown,
+ .set_termios = rp2_uart_set_termios,
+ .type = rp2_uart_type,
+ .release_port = rp2_uart_release_port,
+ .request_port = rp2_uart_request_port,
+ .config_port = rp2_uart_config_port,
+ .verify_port = rp2_uart_verify_port,
+};
+
+static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id)
+{
+ void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id);
+ u32 clk_cfg;
+
+ writew(1, base + RP2_GLOBAL_CMD);
+ readw(base + RP2_GLOBAL_CMD);
+ msleep(100);
+ writel(0, base + RP2_CLK_PRESCALER);
+
+ /* TDM clock configuration */
+ clk_cfg = readw(base + RP2_ASIC_CFG);
+ clk_cfg = (clk_cfg & ~BIT(8)) | BIT(9);
+ writew(clk_cfg, base + RP2_ASIC_CFG);
+
+ /* IRQ routing */
+ writel(ALL_PORTS_MASK, base + RP2_CH_IRQ_MASK);
+ writel(RP2_ASIC_IRQ_EN_m, base + RP2_ASIC_IRQ);
+}
+
+static void rp2_init_card(struct rp2_card *card)
+{
+ writel(4, card->bar0 + RP2_FPGA_CTL0);
+ writel(0, card->bar0 + RP2_FPGA_CTL1);
+
+ rp2_reset_asic(card, 0);
+ if (card->n_ports >= PORTS_PER_ASIC)
+ rp2_reset_asic(card, 1);
+
+ writel(RP2_IRQ_MASK_EN_m, card->bar0 + RP2_IRQ_MASK);
+}
+
+static void rp2_init_port(struct rp2_uart_port *up, const struct firmware *fw)
+{
+ int i;
+
+ writel(RP2_UART_CTL_RESET_CH_m, up->base + RP2_UART_CTL);
+ readl(up->base + RP2_UART_CTL);
+ udelay(1);
+
+ writel(0, up->base + RP2_TXRX_CTL);
+ writel(0, up->base + RP2_UART_CTL);
+ readl(up->base + RP2_UART_CTL);
+ udelay(1);
+
+ rp2_flush_fifos(up);
+
+ for (i = 0; i < min_t(int, fw->size, RP2_UCODE_BYTES); i++)
+ writeb(fw->data[i], up->ucode + i);
+
+ __rp2_uart_set_termios(up, CS8 | CREAD | CLOCAL, 0, DEFAULT_BAUD_DIV);
+ rp2_uart_set_mctrl(&up->port, 0);
+
+ writeb(RP2_RX_FIFO_ena, up->ucode + RP2_RX_FIFO);
+ rp2_rmw(up, RP2_UART_CTL, RP2_UART_CTL_MODE_m,
+ RP2_UART_CTL_XMIT_EN_m | RP2_UART_CTL_MODE_rs232);
+ rp2_rmw_set(up, RP2_TXRX_CTL,
+ RP2_TXRX_CTL_TX_EN_m | RP2_TXRX_CTL_RX_EN_m);
+}
+
+static void rp2_remove_ports(struct rp2_card *card)
+{
+ int i;
+
+ for (i = 0; i < card->initialized_ports; i++)
+ uart_remove_one_port(&rp2_uart_driver, &card->ports[i].port);
+ card->initialized_ports = 0;
+}
+
+static void rp2_fw_cb(const struct firmware *fw, void *context)
+{
+ struct rp2_card *card = context;
+ resource_size_t phys_base;
+ int i, rc = -ENOENT;
+
+ if (!fw) {
+ dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
+ RP2_FW_NAME);
+ goto no_fw;
+ }
+
+ phys_base = pci_resource_start(card->pdev, 1);
+
+ for (i = 0; i < card->n_ports; i++) {
+ struct rp2_uart_port *rp = &card->ports[i];
+ struct uart_port *p;
+ int j = (unsigned)i % PORTS_PER_ASIC;
+
+ rp->asic_base = card->bar1;
+ rp->base = card->bar1 + RP2_PORT_BASE + j*RP2_PORT_SPACING;
+ rp->ucode = card->bar1 + RP2_UCODE_BASE + j*RP2_UCODE_SPACING;
+ rp->card = card;
+ rp->idx = j;
+
+ p = &rp->port;
+ p->line = card->minor_start + i;
+ p->dev = &card->pdev->dev;
+ p->type = PORT_RP2;
+ p->iotype = UPIO_MEM32;
+ p->uartclk = UART_CLOCK;
+ p->regshift = 2;
+ p->fifosize = FIFO_SIZE;
+ p->ops = &rp2_uart_ops;
+ p->irq = card->pdev->irq;
+ p->membase = rp->base;
+ p->mapbase = phys_base + RP2_PORT_BASE + j*RP2_PORT_SPACING;
+
+ if (i >= PORTS_PER_ASIC) {
+ rp->asic_base += RP2_ASIC_SPACING;
+ rp->base += RP2_ASIC_SPACING;
+ rp->ucode += RP2_ASIC_SPACING;
+ p->mapbase += RP2_ASIC_SPACING;
+ }
+
+ rp2_init_port(rp, fw);
+ rc = uart_add_one_port(&rp2_uart_driver, p);
+ if (rc) {
+ dev_err(&card->pdev->dev,
+ "error registering port %d: %d\n", i, rc);
+ rp2_remove_ports(card);
+ break;
+ }
+ card->initialized_ports++;
+ }
+
+ release_firmware(fw);
+no_fw:
+ /*
+ * rp2_fw_cb() is called from a workqueue long after rp2_probe()
+ * has already returned success. So if something failed here,
+ * we'll just leave the now-dormant device in place until somebody
+ * unbinds it.
+ */
+ if (rc)
+ dev_warn(&card->pdev->dev, "driver initialization failed\n");
+
+ complete(&card->fw_loaded);
+}
+
+static int rp2_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct rp2_card *card;
+ struct rp2_uart_port *ports;
+ void __iomem * const *bars;
+ int rc;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, card);
+ spin_lock_init(&card->card_lock);
+ init_completion(&card->fw_loaded);
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME);
+ if (rc)
+ return rc;
+
+ bars = pcim_iomap_table(pdev);
+ card->bar0 = bars[0];
+ card->bar1 = bars[1];
+ card->pdev = pdev;
+
+ rp2_decode_cap(id, &card->n_ports, &card->smpte);
+ dev_info(&pdev->dev, "found new card with %d ports\n", card->n_ports);
+
+ card->minor_start = rp2_alloc_ports(card->n_ports);
+ if (card->minor_start < 0) {
+ dev_err(&pdev->dev,
+ "too many ports (try increasing CONFIG_SERIAL_RP2_NR_UARTS)\n");
+ return -EINVAL;
+ }
+
+ rp2_init_card(card);
+
+ ports = devm_kzalloc(&pdev->dev, sizeof(*ports) * card->n_ports,
+ GFP_KERNEL);
+ if (!ports)
+ return -ENOMEM;
+ card->ports = ports;
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+ IRQF_SHARED, DRV_NAME, card);
+ if (rc)
+ return rc;
+
+ /*
+ * Only catastrophic errors (e.g. ENOMEM) are reported here.
+ * If the FW image is missing, we'll find out in rp2_fw_cb()
+ * and print an error message.
+ */
+ rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
+ GFP_KERNEL, card, rp2_fw_cb);
+ if (rc)
+ return rc;
+ dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
+
+ return 0;
+}
+
+static void rp2_remove(struct pci_dev *pdev)
+{
+ struct rp2_card *card = pci_get_drvdata(pdev);
+
+ wait_for_completion(&card->fw_loaded);
+ rp2_remove_ports(card);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rp2_pci_tbl) = {
+
+ /* RocketPort INFINITY cards */
+
+ { RP_ID(0x0040), RP_CAP(8, 0) }, /* INF Octa, RJ45, selectable */
+ { RP_ID(0x0041), RP_CAP(32, 0) }, /* INF 32, ext interface */
+ { RP_ID(0x0042), RP_CAP(8, 0) }, /* INF Octa, ext interface */
+ { RP_ID(0x0043), RP_CAP(16, 0) }, /* INF 16, ext interface */
+ { RP_ID(0x0044), RP_CAP(4, 0) }, /* INF Quad, DB, selectable */
+ { RP_ID(0x0045), RP_CAP(8, 0) }, /* INF Octa, DB, selectable */
+ { RP_ID(0x0046), RP_CAP(4, 0) }, /* INF Quad, ext interface */
+ { RP_ID(0x0047), RP_CAP(4, 0) }, /* INF Quad, RJ45 */
+ { RP_ID(0x004a), RP_CAP(4, 0) }, /* INF Plus, Quad */
+ { RP_ID(0x004b), RP_CAP(8, 0) }, /* INF Plus, Octa */
+ { RP_ID(0x004c), RP_CAP(8, 0) }, /* INF III, Octa */
+ { RP_ID(0x004d), RP_CAP(4, 0) }, /* INF III, Quad */
+ { RP_ID(0x004e), RP_CAP(2, 0) }, /* INF Plus, 2, RS232 */
+ { RP_ID(0x004f), RP_CAP(2, 1) }, /* INF Plus, 2, SMPTE */
+ { RP_ID(0x0050), RP_CAP(4, 0) }, /* INF Plus, Quad, RJ45 */
+ { RP_ID(0x0051), RP_CAP(8, 0) }, /* INF Plus, Octa, RJ45 */
+ { RP_ID(0x0052), RP_CAP(8, 1) }, /* INF Octa, SMPTE */
+
+ /* RocketPort EXPRESS cards */
+
+ { RP_ID(0x0060), RP_CAP(8, 0) }, /* EXP Octa, RJ45, selectable */
+ { RP_ID(0x0061), RP_CAP(32, 0) }, /* EXP 32, ext interface */
+ { RP_ID(0x0062), RP_CAP(8, 0) }, /* EXP Octa, ext interface */
+ { RP_ID(0x0063), RP_CAP(16, 0) }, /* EXP 16, ext interface */
+ { RP_ID(0x0064), RP_CAP(4, 0) }, /* EXP Quad, DB, selectable */
+ { RP_ID(0x0065), RP_CAP(8, 0) }, /* EXP Octa, DB, selectable */
+ { RP_ID(0x0066), RP_CAP(4, 0) }, /* EXP Quad, ext interface */
+ { RP_ID(0x0067), RP_CAP(4, 0) }, /* EXP Quad, RJ45 */
+ { RP_ID(0x0068), RP_CAP(8, 0) }, /* EXP Octa, RJ11 */
+ { RP_ID(0x0072), RP_CAP(8, 1) }, /* EXP Octa, SMPTE */
+ { }
+};
+MODULE_DEVICE_TABLE(pci, rp2_pci_tbl);
+
+static struct pci_driver rp2_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rp2_pci_tbl,
+ .probe = rp2_probe,
+ .remove = rp2_remove,
+};
+
+static int __init rp2_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&rp2_uart_driver);
+ if (rc)
+ return rc;
+
+ rc = pci_register_driver(&rp2_pci_driver);
+ if (rc) {
+ uart_unregister_driver(&rp2_uart_driver);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit rp2_uart_exit(void)
+{
+ pci_unregister_driver(&rp2_pci_driver);
+ uart_unregister_driver(&rp2_uart_driver);
+}
+
+module_init(rp2_uart_init);
+module_exit(rp2_uart_exit);
+
+MODULE_DESCRIPTION("Comtrol RocketPort EXPRESS/INFINITY driver");
+MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(RP2_FW_NAME);
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 5d4b9b449b4a..af6b3e3ad24d 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -188,7 +188,6 @@ static void sa1100_enable_ms(struct uart_port *port)
static void
sa1100_rx_chars(struct sa1100_port *sport)
{
- struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
@@ -233,7 +232,7 @@ sa1100_rx_chars(struct sa1100_port *sport)
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&sport->port.state->port);
}
static void sa1100_tx_chars(struct sa1100_port *sport)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 12e5249d053e..2769a38d15b6 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -47,7 +47,6 @@
#include <asm/irq.h>
#include <mach/hardware.h>
-#include <mach/map.h>
#include <plat/regs-serial.h>
#include <plat/clock.h>
@@ -221,7 +220,6 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned int ufcon, ch, flag, ufstat, uerstat;
unsigned long flags;
int max_count = 64;
@@ -299,7 +297,7 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
ignore_char:
continue;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
out:
spin_unlock_irqrestore(&port->lock, flags);
@@ -1006,7 +1004,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
ucon &= ucon_mask;
wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
/* reset both fifos */
wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
@@ -1144,8 +1141,13 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
+ port->membase = devm_ioremap(port->dev, res->start, resource_size(res));
+ if (!port->membase) {
+ dev_err(port->dev, "failed to remap controller address\n");
+ return -EBUSY;
+ }
+
port->mapbase = res->start;
- port->membase = S3C_VA_UART + (res->start & 0xfffff);
ret = platform_get_irq(platdev, 0);
if (ret < 0)
port->irq = 0;
@@ -1725,8 +1727,6 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
-#else
-#define s3c24xx_uart_dt_match NULL
#endif
static struct platform_driver samsung_serial_driver = {
@@ -1737,7 +1737,7 @@ static struct platform_driver samsung_serial_driver = {
.name = "samsung-uart",
.owner = THIS_MODULE,
.pm = SERIAL_SAMSUNG_PM_OPS,
- .of_match_table = s3c24xx_uart_dt_match,
+ .of_match_table = of_match_ptr(s3c24xx_uart_dt_match),
},
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index f76b1688c5c8..a7cdec2962dd 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -384,7 +384,7 @@ static void sbd_receive_chars(struct sbd_port *sport)
uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
}
- tty_flip_buffer_push(uport->state->port.tty);
+ tty_flip_buffer_push(&uport->state->port);
}
static void sbd_transmit_chars(struct sbd_port *sport)
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index aced1dd923d8..c9735680762d 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -136,16 +136,17 @@ static void sc26xx_disable_irq(struct uart_port *port, int mask)
WRITE_SC(port, IMR, up->imr);
}
-static struct tty_struct *receive_chars(struct uart_port *port)
+static bool receive_chars(struct uart_port *port)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *tport = NULL;
int limit = 10000;
unsigned char ch;
char flag;
u8 status;
+ /* FIXME what is this trying to achieve? */
if (port->state != NULL) /* Unopened serial console */
- tty = port->state->port.tty;
+ tport = &port->state->port;
while (limit-- > 0) {
status = READ_SC_PORT(port, SR);
@@ -185,9 +186,9 @@ static struct tty_struct *receive_chars(struct uart_port *port)
if (status & port->ignore_status_mask)
continue;
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
}
- return tty;
+ return !!tport;
}
static void transmit_chars(struct uart_port *port)
@@ -217,36 +218,36 @@ static void transmit_chars(struct uart_port *port)
static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
{
struct uart_sc26xx_port *up = dev_id;
- struct tty_struct *tty;
unsigned long flags;
+ bool push;
u8 isr;
spin_lock_irqsave(&up->port[0].lock, flags);
- tty = NULL;
+ push = false;
isr = READ_SC(&up->port[0], ISR);
if (isr & ISR_TXRDYA)
transmit_chars(&up->port[0]);
if (isr & ISR_RXRDYA)
- tty = receive_chars(&up->port[0]);
+ push = receive_chars(&up->port[0]);
spin_unlock(&up->port[0].lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port[0].state->port);
spin_lock(&up->port[1].lock);
- tty = NULL;
+ push = false;
if (isr & ISR_TXRDYB)
transmit_chars(&up->port[1]);
if (isr & ISR_RXRDYB)
- tty = receive_chars(&up->port[1]);
+ push = receive_chars(&up->port[1]);
spin_unlock_irqrestore(&up->port[1].lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port[1].state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 418b495e3233..08dbfb88d42c 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -15,6 +15,7 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/console.h>
@@ -23,8 +24,9 @@
#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/sccnxp.h>
+#include <linux/platform_data/serial-sccnxp.h>
#define SCCNXP_NAME "uart-sccnxp"
#define SCCNXP_MAJOR 204
@@ -106,6 +108,7 @@ enum {
struct sccnxp_port {
struct uart_driver uart;
struct uart_port port[SCCNXP_MAX_UARTS];
+ bool opened[SCCNXP_MAX_UARTS];
const char *name;
int irq;
@@ -122,7 +125,10 @@ struct sccnxp_port {
struct console console;
#endif
- struct mutex sccnxp_mutex;
+ spinlock_t lock;
+
+ bool poll;
+ struct timer_list timer;
struct sccnxp_pdata pdata;
};
@@ -174,14 +180,12 @@ static int sccnxp_update_best_err(int a, int b, int *besterr)
return 1;
}
-struct baud_table {
+static const struct {
u8 csr;
u8 acr;
u8 mr0;
int baud;
-};
-
-const struct baud_table baud_std[] = {
+} baud_std[] = {
{ 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, },
{ 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, },
{ 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, },
@@ -285,10 +289,6 @@ static void sccnxp_handle_rx(struct uart_port *port)
{
u8 sr;
unsigned int ch, flag;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
-
- if (!tty)
- return;
for (;;) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
@@ -304,14 +304,19 @@ static void sccnxp_handle_rx(struct uart_port *port)
if (unlikely(sr)) {
if (sr & SR_BRK) {
port->icount.brk++;
+ sccnxp_port_write(port, SCCNXP_CR_REG,
+ CR_CMD_BREAK_RESET);
if (uart_handle_break(port))
continue;
} else if (sr & SR_PE)
port->icount.parity++;
else if (sr & SR_FE)
port->icount.frame++;
- else if (sr & SR_OVR)
+ else if (sr & SR_OVR) {
port->icount.overrun++;
+ sccnxp_port_write(port, SCCNXP_CR_REG,
+ CR_CMD_STATUS_RESET);
+ }
sr &= port->read_status_mask;
if (sr & SR_BRK)
@@ -333,9 +338,7 @@ static void sccnxp_handle_rx(struct uart_port *port)
uart_insert_char(port, sr, SR_OVR, ch, flag);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void sccnxp_handle_tx(struct uart_port *port)
@@ -377,31 +380,48 @@ static void sccnxp_handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static irqreturn_t sccnxp_ist(int irq, void *dev_id)
+static void sccnxp_handle_events(struct sccnxp_port *s)
{
int i;
u8 isr;
- struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
-
- mutex_lock(&s->sccnxp_mutex);
- for (;;) {
+ do {
isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG);
isr &= s->imr;
if (!isr)
break;
- dev_dbg(s->port[0].dev, "IRQ status: 0x%02x\n", isr);
-
for (i = 0; i < s->uart.nr; i++) {
- if (isr & ISR_RXRDY(i))
+ if (s->opened[i] && (isr & ISR_RXRDY(i)))
sccnxp_handle_rx(&s->port[i]);
- if (isr & ISR_TXRDY(i))
+ if (s->opened[i] && (isr & ISR_TXRDY(i)))
sccnxp_handle_tx(&s->port[i]);
}
- }
+ } while (1);
+}
+
+static void sccnxp_timer(unsigned long data)
+{
+ struct sccnxp_port *s = (struct sccnxp_port *)data;
+ unsigned long flags;
- mutex_unlock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
+ sccnxp_handle_events(s);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ if (!timer_pending(&s->timer))
+ mod_timer(&s->timer, jiffies +
+ usecs_to_jiffies(s->pdata.poll_time_us));
+}
+
+static irqreturn_t sccnxp_ist(int irq, void *dev_id)
+{
+ struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ sccnxp_handle_events(s);
+ spin_unlock_irqrestore(&s->lock, flags);
return IRQ_HANDLED;
}
@@ -409,8 +429,9 @@ static irqreturn_t sccnxp_ist(int irq, void *dev_id)
static void sccnxp_start_tx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
/* Set direction to output */
if (s->flags & SCCNXP_HAVE_IO)
@@ -418,7 +439,7 @@ static void sccnxp_start_tx(struct uart_port *port)
sccnxp_enable_irq(port, IMR_TXRDY);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_stop_tx(struct uart_port *port)
@@ -429,20 +450,22 @@ static void sccnxp_stop_tx(struct uart_port *port)
static void sccnxp_stop_rx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_tx_empty(struct uart_port *port)
{
u8 val;
+ unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
val = sccnxp_port_read(port, SCCNXP_SR_REG);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
return (val & SR_TXEMT) ? TIOCSER_TEMT : 0;
}
@@ -455,28 +478,30 @@ static void sccnxp_enable_ms(struct uart_port *port)
static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
if (!(s->flags & SCCNXP_HAVE_IO))
return;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR);
sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_get_mctrl(struct uart_port *port)
{
u8 bitmask, ipr;
+ unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
if (!(s->flags & SCCNXP_HAVE_IO))
return mctrl;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG);
@@ -505,7 +530,7 @@ static unsigned int sccnxp_get_mctrl(struct uart_port *port)
mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0;
}
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
return mctrl;
}
@@ -513,21 +538,23 @@ static unsigned int sccnxp_get_mctrl(struct uart_port *port)
static void sccnxp_break_ctl(struct uart_port *port, int break_state)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, break_state ?
CR_CMD_START_BREAK : CR_CMD_STOP_BREAK);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
u8 mr1, mr2;
int baud;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
@@ -594,20 +621,22 @@ static void sccnxp_set_termios(struct uart_port *port,
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
+ /* Report actual baudrate back to core */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_startup(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
if (s->flags & SCCNXP_HAVE_IO) {
/* Outputs are controlled manually */
@@ -626,7 +655,9 @@ static int sccnxp_startup(struct uart_port *port)
/* Enable RX interrupt */
sccnxp_enable_irq(port, IMR_RXRDY);
- mutex_unlock(&s->sccnxp_mutex);
+ s->opened[port->line] = 1;
+
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -634,8 +665,11 @@ static int sccnxp_startup(struct uart_port *port)
static void sccnxp_shutdown(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
+
+ s->opened[port->line] = 0;
/* Disable interrupts */
sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
@@ -647,7 +681,7 @@ static void sccnxp_shutdown(struct uart_port *port)
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static const char *sccnxp_type(struct uart_port *port)
@@ -721,10 +755,11 @@ static void sccnxp_console_write(struct console *co, const char *c, unsigned n)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[co->index];
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
uart_console_write(port, c, n, sccnxp_console_putchar);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_console_setup(struct console *co, char *options)
@@ -763,7 +798,7 @@ static int sccnxp_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, s);
- mutex_init(&s->sccnxp_mutex);
+ spin_lock_init(&s->lock);
/* Individual chip settings */
switch (chiptype) {
@@ -860,11 +895,19 @@ static int sccnxp_probe(struct platform_device *pdev)
} else
memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata));
- s->irq = platform_get_irq(pdev, 0);
- if (s->irq <= 0) {
- dev_err(&pdev->dev, "Missing irq resource data\n");
- ret = -ENXIO;
- goto err_out;
+ if (s->pdata.poll_time_us) {
+ dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n",
+ s->pdata.poll_time_us);
+ s->poll = 1;
+ }
+
+ if (!s->poll) {
+ s->irq = platform_get_irq(pdev, 0);
+ if (s->irq < 0) {
+ dev_err(&pdev->dev, "Missing irq resource data\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
}
/* Check input frequency */
@@ -875,10 +918,9 @@ static int sccnxp_probe(struct platform_device *pdev)
goto err_out;
}
- membase = devm_request_and_ioremap(&pdev->dev, res);
- if (!membase) {
- dev_err(&pdev->dev, "Failed to ioremap\n");
- ret = -EIO;
+ membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(membase)) {
+ ret = PTR_ERR(membase);
goto err_out;
}
@@ -929,13 +971,23 @@ static int sccnxp_probe(struct platform_device *pdev)
if (s->pdata.init)
s->pdata.init();
- ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL, sccnxp_ist,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(&pdev->dev), s);
- if (!ret)
+ if (!s->poll) {
+ ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL,
+ sccnxp_ist,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev), s);
+ if (!ret)
+ return 0;
+
+ dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
+ } else {
+ init_timer(&s->timer);
+ setup_timer(&s->timer, sccnxp_timer, (unsigned long)s);
+ mod_timer(&s->timer, jiffies +
+ usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
-
- dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
+ }
err_out:
platform_set_drvdata(pdev, NULL);
@@ -948,7 +1000,10 @@ static int sccnxp_remove(struct platform_device *pdev)
int i;
struct sccnxp_port *s = platform_get_drvdata(pdev);
- devm_free_irq(&pdev->dev, s->irq, s);
+ if (!s->poll)
+ devm_free_irq(&pdev->dev, s->irq, s);
+ else
+ del_timer_sync(&s->timer);
for (i = 0; i < s->uart.nr; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
new file mode 100644
index 000000000000..372de8ade76a
--- /dev/null
+++ b/drivers/tty/serial/serial-tegra.c
@@ -0,0 +1,1401 @@
+/*
+ * serial_tegra.c
+ *
+ * High-speed serial driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/clk/tegra.h>
+
+#define TEGRA_UART_TYPE "TEGRA_UART"
+#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
+#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
+
+#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
+#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
+#define TEGRA_UART_IER_EORD 0x20
+#define TEGRA_UART_MCR_RTS_EN 0x40
+#define TEGRA_UART_MCR_CTS_EN 0x20
+#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
+ UART_LSR_PE | UART_LSR_FE)
+#define TEGRA_UART_IRDA_CSR 0x08
+#define TEGRA_UART_SIR_ENABLED 0x80
+
+#define TEGRA_UART_TX_PIO 1
+#define TEGRA_UART_TX_DMA 2
+#define TEGRA_UART_MIN_DMA 16
+#define TEGRA_UART_FIFO_SIZE 32
+
+/*
+ * Tx fifo trigger level setting in tegra uart is in
+ * reverse way then conventional uart.
+ */
+#define TEGRA_UART_TX_TRIG_16B 0x00
+#define TEGRA_UART_TX_TRIG_8B 0x10
+#define TEGRA_UART_TX_TRIG_4B 0x20
+#define TEGRA_UART_TX_TRIG_1B 0x30
+
+#define TEGRA_UART_MAXIMUM 5
+
+/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
+#define TEGRA_UART_DEFAULT_BAUD 115200
+#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
+
+/* Tx transfer mode */
+#define TEGRA_TX_PIO 1
+#define TEGRA_TX_DMA 2
+
+/**
+ * tegra_uart_chip_data: SOC specific data.
+ *
+ * @tx_fifo_full_status: Status flag available for checking tx fifo full.
+ * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
+ * Tegra30 does not allow this.
+ * @support_clk_src_div: Clock source support the clock divider.
+ */
+struct tegra_uart_chip_data {
+ bool tx_fifo_full_status;
+ bool allow_txfifo_reset_fifo_mode;
+ bool support_clk_src_div;
+};
+
+struct tegra_uart_port {
+ struct uart_port uport;
+ const struct tegra_uart_chip_data *cdata;
+
+ struct clk *uart_clk;
+ unsigned int current_baud;
+
+ /* Register shadow */
+ unsigned long fcr_shadow;
+ unsigned long mcr_shadow;
+ unsigned long lcr_shadow;
+ unsigned long ier_shadow;
+ bool rts_active;
+
+ int tx_in_progress;
+ unsigned int tx_bytes;
+
+ bool enable_modem_interrupt;
+
+ bool rx_timeout;
+ int rx_in_progress;
+ int symb_bit;
+ int dma_req_sel;
+
+ struct dma_chan *rx_dma_chan;
+ struct dma_chan *tx_dma_chan;
+ dma_addr_t rx_dma_buf_phys;
+ dma_addr_t tx_dma_buf_phys;
+ unsigned char *rx_dma_buf_virt;
+ unsigned char *tx_dma_buf_virt;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ int tx_bytes_requested;
+ int rx_bytes_requested;
+};
+
+static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
+static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
+
+static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
+ unsigned long reg)
+{
+ return readl(tup->uport.membase + (reg << tup->uport.regshift));
+}
+
+static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
+ unsigned long reg)
+{
+ writel(val, tup->uport.membase + (reg << tup->uport.regshift));
+}
+
+static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
+{
+ return container_of(u, struct tegra_uart_port, uport);
+}
+
+static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ /*
+ * RI - Ring detector is active
+ * CD/DCD/CAR - Carrier detect is always active. For some reason
+ * linux has different names for carrier detect.
+ * DSR - Data Set ready is active as the hardware doesn't support it.
+ * Don't know if the linux support this yet?
+ * CTS - Clear to send. Always set to active, as the hardware handles
+ * CTS automatically.
+ */
+ if (tup->enable_modem_interrupt)
+ return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
+ return TIOCM_CTS;
+}
+
+static void set_rts(struct tegra_uart_port *tup, bool active)
+{
+ unsigned long mcr;
+
+ mcr = tup->mcr_shadow;
+ if (active)
+ mcr |= TEGRA_UART_MCR_RTS_EN;
+ else
+ mcr &= ~TEGRA_UART_MCR_RTS_EN;
+ if (mcr != tup->mcr_shadow) {
+ tegra_uart_write(tup, mcr, UART_MCR);
+ tup->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void set_dtr(struct tegra_uart_port *tup, bool active)
+{
+ unsigned long mcr;
+
+ mcr = tup->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_DTR;
+ else
+ mcr &= ~UART_MCR_DTR;
+ if (mcr != tup->mcr_shadow) {
+ tegra_uart_write(tup, mcr, UART_MCR);
+ tup->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long mcr;
+ int dtr_enable;
+
+ mcr = tup->mcr_shadow;
+ tup->rts_active = !!(mctrl & TIOCM_RTS);
+ set_rts(tup, tup->rts_active);
+
+ dtr_enable = !!(mctrl & TIOCM_DTR);
+ set_dtr(tup, dtr_enable);
+ return;
+}
+
+static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long lcr;
+
+ lcr = tup->lcr_shadow;
+ if (break_ctl)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ tegra_uart_write(tup, lcr, UART_LCR);
+ tup->lcr_shadow = lcr;
+}
+
+/* Wait for a symbol-time. */
+static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
+ unsigned int syms)
+{
+ if (tup->current_baud)
+ udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
+ tup->current_baud));
+}
+
+static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
+{
+ unsigned long fcr = tup->fcr_shadow;
+
+ if (tup->cdata->allow_txfifo_reset_fifo_mode) {
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ tegra_uart_write(tup, fcr, UART_FCR);
+ } else {
+ fcr &= ~UART_FCR_ENABLE_FIFO;
+ tegra_uart_write(tup, fcr, UART_FCR);
+ udelay(60);
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ tegra_uart_write(tup, fcr, UART_FCR);
+ fcr |= UART_FCR_ENABLE_FIFO;
+ tegra_uart_write(tup, fcr, UART_FCR);
+ }
+
+ /* Dummy read to ensure the write is posted */
+ tegra_uart_read(tup, UART_SCR);
+
+ /* Wait for the flush to propagate. */
+ tegra_uart_wait_sym_time(tup, 1);
+}
+
+static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
+{
+ unsigned long rate;
+ unsigned int divisor;
+ unsigned long lcr;
+ int ret;
+
+ if (tup->current_baud == baud)
+ return 0;
+
+ if (tup->cdata->support_clk_src_div) {
+ rate = baud * 16;
+ ret = clk_set_rate(tup->uart_clk, rate);
+ if (ret < 0) {
+ dev_err(tup->uport.dev,
+ "clk_set_rate() failed for rate %lu\n", rate);
+ return ret;
+ }
+ divisor = 1;
+ } else {
+ rate = clk_get_rate(tup->uart_clk);
+ divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
+ }
+
+ lcr = tup->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ tegra_uart_write(tup, lcr, UART_LCR);
+
+ tegra_uart_write(tup, divisor & 0xFF, UART_TX);
+ tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
+
+ lcr &= ~UART_LCR_DLAB;
+ tegra_uart_write(tup, lcr, UART_LCR);
+
+ /* Dummy read to ensure the write is posted */
+ tegra_uart_read(tup, UART_SCR);
+
+ tup->current_baud = baud;
+
+ /* wait two character intervals at new rate */
+ tegra_uart_wait_sym_time(tup, 2);
+ return 0;
+}
+
+static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
+ unsigned long lsr)
+{
+ char flag = TTY_NORMAL;
+
+ if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
+ if (lsr & UART_LSR_OE) {
+ /* Overrrun error */
+ flag |= TTY_OVERRUN;
+ tup->uport.icount.overrun++;
+ dev_err(tup->uport.dev, "Got overrun errors\n");
+ } else if (lsr & UART_LSR_PE) {
+ /* Parity error */
+ flag |= TTY_PARITY;
+ tup->uport.icount.parity++;
+ dev_err(tup->uport.dev, "Got Parity errors\n");
+ } else if (lsr & UART_LSR_FE) {
+ flag |= TTY_FRAME;
+ tup->uport.icount.frame++;
+ dev_err(tup->uport.dev, "Got frame errors\n");
+ } else if (lsr & UART_LSR_BI) {
+ dev_err(tup->uport.dev, "Got Break\n");
+ tup->uport.icount.brk++;
+ /* If FIFO read error without any data, reset Rx FIFO */
+ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
+ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
+ }
+ }
+ return flag;
+}
+
+static int tegra_uart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static void tegra_uart_release_port(struct uart_port *u)
+{
+ /* Nothing to do here */
+}
+
+static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ int i;
+
+ for (i = 0; i < max_bytes; i++) {
+ BUG_ON(uart_circ_empty(xmit));
+ if (tup->cdata->tx_fifo_full_status) {
+ unsigned long lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
+ break;
+ }
+ tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ tup->uport.icount.tx++;
+ }
+}
+
+static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
+ unsigned int bytes)
+{
+ if (bytes > TEGRA_UART_MIN_DMA)
+ bytes = TEGRA_UART_MIN_DMA;
+
+ tup->tx_in_progress = TEGRA_UART_TX_PIO;
+ tup->tx_bytes = bytes;
+ tup->ier_shadow |= UART_IER_THRI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+}
+
+static void tegra_uart_tx_dma_complete(void *args)
+{
+ struct tegra_uart_port *tup = args;
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
+
+ dmaengine_tx_status(tup->tx_dma_chan, tup->rx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ spin_lock_irqsave(&tup->uport.lock, flags);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+ tegra_uart_start_next_tx(tup);
+ spin_unlock_irqrestore(&tup->uport.lock, flags);
+}
+
+static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
+ unsigned long count)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ dma_addr_t tx_phys_addr;
+
+ dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ tup->tx_bytes = count & ~(0xF);
+ tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
+ tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
+ tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tup->tx_dma_desc) {
+ dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
+ tup->tx_dma_desc->callback_param = tup;
+ tup->tx_in_progress = TEGRA_UART_TX_DMA;
+ tup->tx_bytes_requested = tup->tx_bytes;
+ tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
+ dma_async_issue_pending(tup->tx_dma_chan);
+ return 0;
+}
+
+static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
+{
+ unsigned long tail;
+ unsigned long count;
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+
+ tail = (unsigned long)&xmit->buf[xmit->tail];
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (!count)
+ return;
+
+ if (count < TEGRA_UART_MIN_DMA)
+ tegra_uart_start_pio_tx(tup, count);
+ else if (BYTES_TO_ALIGN(tail) > 0)
+ tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
+ else
+ tegra_uart_start_tx_dma(tup, count);
+}
+
+/* Called by serial core driver with u->lock taken. */
+static void tegra_uart_start_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct circ_buf *xmit = &u->state->xmit;
+
+ if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
+ tegra_uart_start_next_tx(tup);
+}
+
+static unsigned int tegra_uart_tx_empty(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ if (!tup->tx_in_progress) {
+ unsigned long lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
+ ret = TIOCSER_TEMT;
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return ret;
+}
+
+static void tegra_uart_stop_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ struct dma_tx_state state;
+ int count;
+
+ dmaengine_terminate_all(tup->tx_dma_chan);
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ tup->tx_in_progress = 0;
+ return;
+}
+
+static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+
+ tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+ tegra_uart_start_next_tx(tup);
+ return;
+}
+
+static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
+ struct tty_port *tty)
+{
+ do {
+ char flag = TTY_NORMAL;
+ unsigned long lsr = 0;
+ unsigned char ch;
+
+ lsr = tegra_uart_read(tup, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ break;
+
+ flag = tegra_uart_decode_rx_error(tup, lsr);
+ ch = (unsigned char) tegra_uart_read(tup, UART_RX);
+ tup->uport.icount.rx++;
+
+ if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
+ tty_insert_flip_char(tty, ch, flag);
+ } while (1);
+
+ return;
+}
+
+static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
+ struct tty_port *tty, int count)
+{
+ int copied;
+
+ tup->uport.icount.rx += count;
+ if (!tty) {
+ dev_err(tup->uport.dev, "No tty port\n");
+ return;
+ }
+ dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(tup->rx_dma_buf_virt)), count);
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
+ }
+ dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+}
+
+static void tegra_uart_rx_dma_complete(void *args)
+{
+ struct tegra_uart_port *tup = args;
+ struct uart_port *u = &tup->uport;
+ int count = tup->rx_bytes_requested;
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &u->state->port;
+ unsigned long flags;
+
+ async_tx_ack(tup->rx_dma_desc);
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ /* If we are here, DMA is stopped */
+ if (count)
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ tegra_uart_start_rx_dma(tup);
+
+ /* Activate flow control to start transfer */
+ if (tup->rts_active)
+ set_rts(tup, true);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
+ struct dma_tx_state state;
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &tup->uport.state->port;
+ int count;
+
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ dmaengine_terminate_all(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ count = tup->rx_bytes_requested - state.residue;
+
+ /* If we are here, DMA is stopped */
+ if (count)
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ tegra_uart_start_rx_dma(tup);
+
+ if (tup->rts_active)
+ set_rts(tup, true);
+}
+
+static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
+{
+ unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+
+ tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
+ tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!tup->rx_dma_desc) {
+ dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
+ tup->rx_dma_desc->callback_param = tup;
+ dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
+ count, DMA_TO_DEVICE);
+ tup->rx_bytes_requested = count;
+ tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
+ dma_async_issue_pending(tup->rx_dma_chan);
+ return 0;
+}
+
+static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long msr;
+
+ msr = tegra_uart_read(tup, UART_MSR);
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return;
+
+ if (msr & UART_MSR_TERI)
+ tup->uport.icount.rng++;
+ if (msr & UART_MSR_DDSR)
+ tup->uport.icount.dsr++;
+ /* We may only get DDCD when HW init and reset */
+ if (msr & UART_MSR_DDCD)
+ uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
+ /* Will start/stop_tx accordingly */
+ if (msr & UART_MSR_DCTS)
+ uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
+ return;
+}
+
+static irqreturn_t tegra_uart_isr(int irq, void *data)
+{
+ struct tegra_uart_port *tup = data;
+ struct uart_port *u = &tup->uport;
+ unsigned long iir;
+ unsigned long ier;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ while (1) {
+ iir = tegra_uart_read(tup, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ if (is_rx_int) {
+ tegra_uart_handle_rx_dma(tup);
+ if (tup->rx_in_progress) {
+ ier = tup->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE |
+ TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ }
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ switch ((iir >> 1) & 0x7) {
+ case 0: /* Modem signal change interrupt */
+ tegra_uart_handle_modem_signal_change(u);
+ break;
+
+ case 1: /* Transmit interrupt only triggered when using PIO */
+ tup->ier_shadow &= ~UART_IER_THRI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ tegra_uart_handle_tx_pio(tup);
+ break;
+
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+ case 2: /* Receive */
+ if (!is_rx_int) {
+ is_rx_int = true;
+ /* Disable Rx interrupts */
+ ier = tup->ier_shadow;
+ ier |= UART_IER_RDI;
+ tegra_uart_write(tup, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI |
+ UART_IER_RTOIE | TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ }
+ break;
+
+ case 3: /* Receive error */
+ tegra_uart_decode_rx_error(tup,
+ tegra_uart_read(tup, UART_LSR));
+ break;
+
+ case 5: /* break nothing to handle */
+ case 7: /* break nothing to handle */
+ break;
+ }
+ }
+}
+
+static void tegra_uart_stop_rx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &u->state->port;
+ struct dma_tx_state state;
+ unsigned long ier;
+ int count;
+
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ if (!tup->rx_in_progress)
+ return;
+
+ tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
+
+ ier = tup->ier_shadow;
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
+ TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ tup->rx_in_progress = 0;
+ if (tup->rx_dma_chan) {
+ dmaengine_terminate_all(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ async_tx_ack(tup->rx_dma_desc);
+ count = tup->rx_bytes_requested - state.residue;
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+ tegra_uart_handle_rx_pio(tup, port);
+ } else {
+ tegra_uart_handle_rx_pio(tup, port);
+ }
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ return;
+}
+
+static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
+{
+ unsigned long flags;
+ unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
+ unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
+ unsigned long wait_time;
+ unsigned long lsr;
+ unsigned long msr;
+ unsigned long mcr;
+
+ /* Disable interrupts */
+ tegra_uart_write(tup, 0, UART_IER);
+
+ lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ msr = tegra_uart_read(tup, UART_MSR);
+ mcr = tegra_uart_read(tup, UART_MCR);
+ if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
+ dev_err(tup->uport.dev,
+ "Tx Fifo not empty, CTS disabled, waiting\n");
+
+ /* Wait for Tx fifo to be empty */
+ while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ wait_time = min(fifo_empty_time, 100lu);
+ udelay(wait_time);
+ fifo_empty_time -= wait_time;
+ if (!fifo_empty_time) {
+ msr = tegra_uart_read(tup, UART_MSR);
+ mcr = tegra_uart_read(tup, UART_MCR);
+ if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
+ (msr & UART_MSR_CTS))
+ dev_err(tup->uport.dev,
+ "Slave not ready\n");
+ break;
+ }
+ lsr = tegra_uart_read(tup, UART_LSR);
+ }
+ }
+
+ spin_lock_irqsave(&tup->uport.lock, flags);
+ /* Reset the Rx and Tx FIFOs */
+ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+ tup->current_baud = 0;
+ spin_unlock_irqrestore(&tup->uport.lock, flags);
+
+ clk_disable_unprepare(tup->uart_clk);
+}
+
+static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+{
+ int ret;
+
+ tup->fcr_shadow = 0;
+ tup->mcr_shadow = 0;
+ tup->lcr_shadow = 0;
+ tup->ier_shadow = 0;
+ tup->current_baud = 0;
+
+ clk_prepare_enable(tup->uart_clk);
+
+ /* Reset the UART controller to clear all previous status.*/
+ tegra_periph_reset_assert(tup->uart_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(tup->uart_clk);
+
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+
+ /*
+ * Set the trigger level
+ *
+ * For PIO mode:
+ *
+ * For receive, this will interrupt the CPU after that many number of
+ * bytes are received, for the remaining bytes the receive timeout
+ * interrupt is received. Rx high watermark is set to 4.
+ *
+ * For transmit, if the trasnmit interrupt is enabled, this will
+ * interrupt the CPU when the number of entries in the FIFO reaches the
+ * low watermark. Tx low watermark is set to 16 bytes.
+ *
+ * For DMA mode:
+ *
+ * Set the Tx trigger to 16. This should match the DMA burst size that
+ * programmed in the DMA registers.
+ */
+ tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
+ tup->fcr_shadow |= UART_FCR_R_TRIG_01;
+ tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+
+ /*
+ * Initialize the UART with default configuration
+ * (115200, N, 8, 1) so that the receive DMA buffer may be
+ * enqueued
+ */
+ tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
+ tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
+ tup->fcr_shadow |= UART_FCR_DMA_SELECT;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+
+ ret = tegra_uart_start_rx_dma(tup);
+ if (ret < 0) {
+ dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
+ return ret;
+ }
+ tup->rx_in_progress = 1;
+
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ */
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ return 0;
+}
+
+static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ unsigned char *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dma_chan) {
+ dev_err(tup->uport.dev,
+ "Dma channel is not available, will try later\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (dma_to_memory) {
+ dma_buf = dma_alloc_coherent(tup->uport.dev,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tup->uport.dev,
+ "Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+ } else {
+ dma_phys = dma_map_single(tup->uport.dev,
+ tup->uport.state->xmit.buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ dma_buf = tup->uport.state->xmit.buf;
+ }
+
+ dma_sconfig.slave_id = tup->dma_req_sel;
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tup->uport.mapbase;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.src_maxburst = 4;
+ } else {
+ dma_sconfig.dst_addr = tup->uport.mapbase;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.dst_maxburst = 16;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tup->uport.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ goto scrub;
+ }
+
+ if (dma_to_memory) {
+ tup->rx_dma_chan = dma_chan;
+ tup->rx_dma_buf_virt = dma_buf;
+ tup->rx_dma_buf_phys = dma_phys;
+ } else {
+ tup->tx_dma_chan = dma_chan;
+ tup->tx_dma_buf_virt = dma_buf;
+ tup->tx_dma_buf_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
+ dma_chan = tup->rx_dma_chan;
+ tup->rx_dma_chan = NULL;
+ tup->rx_dma_buf_phys = 0;
+ tup->rx_dma_buf_virt = NULL;
+ } else {
+ dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_chan = tup->tx_dma_chan;
+ tup->tx_dma_chan = NULL;
+ tup->tx_dma_buf_phys = 0;
+ tup->tx_dma_buf_virt = NULL;
+ }
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_uart_startup(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ int ret;
+
+ ret = tegra_uart_dma_channel_allocate(tup, false);
+ if (ret < 0) {
+ dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = tegra_uart_dma_channel_allocate(tup, true);
+ if (ret < 0) {
+ dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
+ goto fail_rx_dma;
+ }
+
+ ret = tegra_uart_hw_init(tup);
+ if (ret < 0) {
+ dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
+ goto fail_hw_init;
+ }
+
+ ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ dev_name(u->dev), tup);
+ if (ret < 0) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+ goto fail_hw_init;
+ }
+ return 0;
+
+fail_hw_init:
+ tegra_uart_dma_channel_free(tup, true);
+fail_rx_dma:
+ tegra_uart_dma_channel_free(tup, false);
+ return ret;
+}
+
+static void tegra_uart_shutdown(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ tegra_uart_hw_deinit(tup);
+
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+
+ tegra_uart_dma_channel_free(tup, true);
+ tegra_uart_dma_channel_free(tup, false);
+ free_irq(u->irq, tup);
+}
+
+static void tegra_uart_enable_ms(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ if (tup->enable_modem_interrupt) {
+ tup->ier_shadow |= UART_IER_MSI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ }
+}
+
+static void tegra_uart_set_termios(struct uart_port *u,
+ struct ktermios *termios, struct ktermios *oldtermios)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int lcr;
+ int symb_bit = 1;
+ struct clk *parent_clk = clk_get_parent(tup->uart_clk);
+ unsigned long parent_clk_rate = clk_get_rate(parent_clk);
+ int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
+
+ max_divider *= 16;
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ /* Clear all interrupts as configuration is going to be change */
+ tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+ tegra_uart_write(tup, 0, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+
+ /* Parity */
+ lcr = tup->lcr_shadow;
+ lcr &= ~UART_LCR_PARITY;
+
+ /* CMSPAR isn't supported by this driver */
+ termios->c_cflag &= ~CMSPAR;
+
+ if ((termios->c_cflag & PARENB) == PARENB) {
+ symb_bit++;
+ if (termios->c_cflag & PARODD) {
+ lcr |= UART_LCR_PARITY;
+ lcr &= ~UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ } else {
+ lcr |= UART_LCR_PARITY;
+ lcr |= UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ }
+ }
+
+ lcr &= ~UART_LCR_WLEN8;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ symb_bit += 5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ symb_bit += 6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ symb_bit += 7;
+ break;
+ default:
+ lcr |= UART_LCR_WLEN8;
+ symb_bit += 8;
+ break;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB) {
+ lcr |= UART_LCR_STOP;
+ symb_bit += 2;
+ } else {
+ lcr &= ~UART_LCR_STOP;
+ symb_bit++;
+ }
+
+ tegra_uart_write(tup, lcr, UART_LCR);
+ tup->lcr_shadow = lcr;
+ tup->symb_bit = symb_bit;
+
+ /* Baud rate. */
+ baud = uart_get_baud_rate(u, termios, oldtermios,
+ parent_clk_rate/max_divider,
+ parent_clk_rate/16);
+ spin_unlock_irqrestore(&u->lock, flags);
+ tegra_set_baudrate(tup, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+ tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
+ tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
+ /* if top layer has asked to set rts active then do so here */
+ if (tup->rts_active)
+ set_rts(tup, true);
+ } else {
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
+ tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
+ }
+
+ /* update the port timeout based on new settings */
+ uart_update_timeout(u, termios->c_cflag, baud);
+
+ /* Make sure all write has completed */
+ tegra_uart_read(tup, UART_IER);
+
+ /* Reenable interrupt */
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ return;
+}
+
+/*
+ * Flush any TX data submitted for DMA and PIO. Called when the
+ * TX circular buffer is reset.
+ */
+static void tegra_uart_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ tup->tx_bytes = 0;
+ if (tup->tx_dma_chan)
+ dmaengine_terminate_all(tup->tx_dma_chan);
+ return;
+}
+
+static const char *tegra_uart_type(struct uart_port *u)
+{
+ return TEGRA_UART_TYPE;
+}
+
+static struct uart_ops tegra_uart_ops = {
+ .tx_empty = tegra_uart_tx_empty,
+ .set_mctrl = tegra_uart_set_mctrl,
+ .get_mctrl = tegra_uart_get_mctrl,
+ .stop_tx = tegra_uart_stop_tx,
+ .start_tx = tegra_uart_start_tx,
+ .stop_rx = tegra_uart_stop_rx,
+ .flush_buffer = tegra_uart_flush_buffer,
+ .enable_ms = tegra_uart_enable_ms,
+ .break_ctl = tegra_uart_break_ctl,
+ .startup = tegra_uart_startup,
+ .shutdown = tegra_uart_shutdown,
+ .set_termios = tegra_uart_set_termios,
+ .type = tegra_uart_type,
+ .request_port = tegra_uart_request_port,
+ .release_port = tegra_uart_release_port,
+};
+
+static struct uart_driver tegra_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "tegra_hsuart",
+ .dev_name = "ttyTHS",
+ .cons = 0,
+ .nr = TEGRA_UART_MAXIMUM,
+};
+
+static int tegra_uart_parse_dt(struct platform_device *pdev,
+ struct tegra_uart_port *tup)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 of_dma[2];
+ int port;
+
+ if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
+ of_dma, 2) >= 0) {
+ tup->dma_req_sel = of_dma[1];
+ } else {
+ dev_err(&pdev->dev, "missing dma requestor in device tree\n");
+ return -EINVAL;
+ }
+
+ port = of_alias_get_id(np, "serial");
+ if (port < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
+ return port;
+ }
+ tup->uport.line = port;
+
+ tup->enable_modem_interrupt = of_property_read_bool(np,
+ "nvidia,enable-modem-interrupt");
+ return 0;
+}
+
+struct tegra_uart_chip_data tegra20_uart_chip_data = {
+ .tx_fifo_full_status = false,
+ .allow_txfifo_reset_fifo_mode = true,
+ .support_clk_src_div = false,
+};
+
+struct tegra_uart_chip_data tegra30_uart_chip_data = {
+ .tx_fifo_full_status = true,
+ .allow_txfifo_reset_fifo_mode = false,
+ .support_clk_src_div = true,
+};
+
+static struct of_device_id tegra_uart_of_match[] = {
+ {
+ .compatible = "nvidia,tegra30-hsuart",
+ .data = &tegra30_uart_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-hsuart",
+ .data = &tegra20_uart_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct tegra_uart_port *tup;
+ struct uart_port *u;
+ struct resource *resource;
+ int ret;
+ const struct tegra_uart_chip_data *cdata;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_uart_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+
+ tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
+ if (!tup) {
+ dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
+ return -ENOMEM;
+ }
+
+ ret = tegra_uart_parse_dt(pdev, tup);
+ if (ret < 0)
+ return ret;
+
+ u = &tup->uport;
+ u->dev = &pdev->dev;
+ u->ops = &tegra_uart_ops;
+ u->type = PORT_TEGRA;
+ u->fifosize = 32;
+ tup->cdata = cdata;
+
+ platform_set_drvdata(pdev, tup);
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ return -ENODEV;
+ }
+
+ u->mapbase = resource->start;
+ u->membase = devm_request_and_ioremap(&pdev->dev, resource);
+ if (!u->membase) {
+ dev_err(&pdev->dev, "memregion/iomap address req failed\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tup->uart_clk)) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ return PTR_ERR(tup->uart_clk);
+ }
+
+ u->iotype = UPIO_MEM32;
+ u->irq = platform_get_irq(pdev, 0);
+ u->regshift = 2;
+ ret = uart_add_one_port(&tegra_uart_driver, u);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart_port *tup = platform_get_drvdata(pdev);
+ struct uart_port *u = &tup->uport;
+
+ uart_remove_one_port(&tegra_uart_driver, u);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_uart_suspend(struct device *dev)
+{
+ struct tegra_uart_port *tup = dev_get_drvdata(dev);
+ struct uart_port *u = &tup->uport;
+
+ return uart_suspend_port(&tegra_uart_driver, u);
+}
+
+static int tegra_uart_resume(struct device *dev)
+{
+ struct tegra_uart_port *tup = dev_get_drvdata(dev);
+ struct uart_port *u = &tup->uport;
+
+ return uart_resume_port(&tegra_uart_driver, u);
+}
+#endif
+
+static const struct dev_pm_ops tegra_uart_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
+};
+
+static struct platform_driver tegra_uart_platform_driver = {
+ .probe = tegra_uart_probe,
+ .remove = tegra_uart_remove,
+ .driver = {
+ .name = "serial-tegra",
+ .of_match_table = tegra_uart_of_match,
+ .pm = &tegra_uart_pm_ops,
+ },
+};
+
+static int __init tegra_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_uart_driver);
+ if (ret < 0) {
+ pr_err("Could not register %s driver\n",
+ tegra_uart_driver.driver_name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_uart_platform_driver);
+ if (ret < 0) {
+ pr_err("Uart platfrom driver register failed, e = %d\n", ret);
+ uart_unregister_driver(&tegra_uart_driver);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit tegra_uart_exit(void)
+{
+ pr_info("Unloading tegra uart driver\n");
+ platform_driver_unregister(&tegra_uart_platform_driver);
+ uart_unregister_driver(&tegra_uart_driver);
+}
+
+module_init(tegra_uart_init);
+module_exit(tegra_uart_exit);
+
+MODULE_ALIAS("platform:serial-tegra");
+MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c7230aaefd4..a400002dfa84 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -59,7 +59,8 @@ static struct lock_class_key port_lock_key;
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
-static void uart_change_pm(struct uart_state *state, int pm_state);
+static void uart_change_pm(struct uart_state *state,
+ enum uart_pm_state pm_state);
static void uart_port_shutdown(struct tty_port *port);
@@ -866,9 +867,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
port->closing_wait = closing_wait;
if (new_info->xmit_fifo_size)
uport->fifosize = new_info->xmit_fifo_size;
- if (port->tty)
- port->tty->low_latency =
- (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
check_and_exit:
retval = 0;
@@ -1308,9 +1307,10 @@ static void uart_set_termios(struct tty_struct *tty,
}
/*
- * In 2.4.5, calls to this will be serialized via the BKL in
- * linux/drivers/char/tty_io.c:tty_release()
- * linux/drivers/char/tty_io.c:do_tty_handup()
+ * Calls to uart_close() are serialised via the tty_lock in
+ * drivers/tty/tty_io.c:tty_release()
+ * drivers/tty/tty_io.c:do_tty_hangup()
+ * This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
@@ -1365,7 +1365,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
spin_lock_irqsave(&port->lock, flags);
}
@@ -1437,10 +1437,9 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
}
/*
- * This is called with the BKL held in
- * linux/drivers/char/tty_io.c:do_tty_hangup()
- * We're called from the eventd thread, so we can sleep for
- * a _short_ time only.
+ * Calls to uart_hangup() are serialised by the tty_lock in
+ * drivers/tty/tty_io.c:do_tty_hangup()
+ * This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_hangup(struct tty_struct *tty)
{
@@ -1521,8 +1520,8 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
}
/*
- * calls to uart_open are serialised by the BKL in
- * fs/char_dev.c:chrdev_open()
+ * Calls to uart_open are serialised by the tty_lock in
+ * drivers/tty/tty_io.c:tty_open()
* Note that if this fails, then uart_close() _will_ be called.
*
* In time, we want to scrap the "opening nonpresent ports"
@@ -1564,7 +1563,8 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
*/
tty->driver_data = state;
state->uart_port->state = state;
- tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ state->port.low_latency =
+ (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty_port_tty_set(port, tty);
/*
@@ -1579,7 +1579,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
* Make sure the device is in D0 state.
*/
if (port->count == 1)
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
/*
* Start up the serial port.
@@ -1620,7 +1620,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
{
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
- int pm_state;
+ enum uart_pm_state pm_state;
struct uart_port *uport = state->uart_port;
char stat_buf[32];
unsigned int status;
@@ -1645,12 +1645,12 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
if (capable(CAP_SYS_ADMIN)) {
mutex_lock(&port->mutex);
pm_state = state->pm_state;
- if (pm_state)
- uart_change_pm(state, 0);
+ if (pm_state != UART_PM_STATE_ON)
+ uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
status = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
- if (pm_state)
+ if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
mutex_unlock(&port->mutex);
@@ -1897,7 +1897,8 @@ EXPORT_SYMBOL_GPL(uart_set_options);
*
* Locking: port->mutex has to be held
*/
-static void uart_change_pm(struct uart_state *state, int pm_state)
+static void uart_change_pm(struct uart_state *state,
+ enum uart_pm_state pm_state)
{
struct uart_port *port = state->uart_port;
@@ -1982,7 +1983,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
console_stop(uport->cons);
if (console_suspend_enabled || !uart_console(uport))
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
mutex_unlock(&port->mutex);
@@ -2027,7 +2028,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
termios = port->tty->termios;
if (console_suspend_enabled)
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (console_suspend_enabled)
console_start(uport->cons);
@@ -2037,7 +2038,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
const struct uart_ops *ops = uport->ops;
int ret;
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
@@ -2137,7 +2138,7 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_report_port(drv, port);
/* Power up port for set_mctrl() */
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
/*
* Ensure that the modem control lines are de-activated.
@@ -2161,7 +2162,7 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
* console if we have one.
*/
if (!uart_console(port))
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
}
}
@@ -2588,7 +2589,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
}
state->uart_port = uport;
- state->pm_state = -1;
+ state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
uport->state = state;
@@ -2642,6 +2643,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
+ int ret = 0;
BUG_ON(in_interrupt());
@@ -2656,6 +2658,11 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
* succeeding while we shut down the port.
*/
mutex_lock(&port->mutex);
+ if (!state->uart_port) {
+ mutex_unlock(&port->mutex);
+ ret = -EINVAL;
+ goto out;
+ }
uport->flags |= UPF_DEAD;
mutex_unlock(&port->mutex);
@@ -2679,9 +2686,10 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->type = PORT_UNKNOWN;
state->uart_port = NULL;
+out:
mutex_unlock(&port_mutex);
- return 0;
+ return ret;
}
/*
@@ -2715,22 +2723,17 @@ EXPORT_SYMBOL(uart_match_port);
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
- struct uart_state *state = uport->state;
- struct tty_port *port = &state->port;
- struct tty_ldisc *ld = NULL;
- struct pps_event_time ts;
+ struct tty_port *port = &uport->state->port;
struct tty_struct *tty = port->tty;
+ struct tty_ldisc *ld = tty ? tty_ldisc_ref(tty) : NULL;
- if (tty)
- ld = tty_ldisc_ref(tty);
- if (ld && ld->ops->dcd_change)
- pps_get_ts(&ts);
+ if (ld) {
+ if (ld->ops->dcd_change)
+ ld->ops->dcd_change(tty, status);
+ tty_ldisc_deref(ld);
+ }
uport->icount.dcd++;
-#ifdef CONFIG_HARD_PPS
- if ((uport->flags & UPF_HARDPPS_CD) && status)
- hardpps();
-#endif
if (port->flags & ASYNC_CHECK_CD) {
if (status)
@@ -2738,11 +2741,6 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
else if (tty)
tty_hangup(tty);
}
-
- if (ld && ld->ops->dcd_change)
- ld->ops->dcd_change(tty, status, &ts);
- if (ld)
- tty_ldisc_deref(ld);
}
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
@@ -2790,10 +2788,10 @@ EXPORT_SYMBOL_GPL(uart_handle_cts_change);
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
if ((status & port->ignore_status_mask & ~overrun) == 0)
- if (tty_insert_flip_char(tty, ch, flag) == 0)
+ if (tty_insert_flip_char(tport, ch, flag) == 0)
++port->icount.buf_overrun;
/*
@@ -2801,7 +2799,7 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN) == 0)
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN) == 0)
++port->icount.buf_overrun;
}
EXPORT_SYMBOL_GPL(uart_insert_char);
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index 9bd004f9da89..e1caa99e3d3b 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -153,7 +153,6 @@ static void ks8695uart_disable_ms(struct uart_port *port)
static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, lsr, flg, max_count = 256;
status = UART_GET_LSR(port); /* clears pending LSR interrupts */
@@ -200,7 +199,7 @@ static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
ignore_char:
status = UART_GET_LSR(port);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index b52b21aeb250..fe48a0c2b4ca 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -277,7 +277,6 @@ static void serial_txx9_initialize(struct uart_port *port)
static inline void
receive_chars(struct uart_txx9_port *up, unsigned int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned char ch;
unsigned int disr = *status;
int max_count = 256;
@@ -346,7 +345,7 @@ receive_chars(struct uart_txx9_port *up, unsigned int *status)
disr = sio_in(up, TXX9_SIDISR);
} while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0));
spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
spin_lock(&up->port.lock);
*status = disr;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 61477567423f..156418619949 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -596,7 +596,7 @@ static void sci_transmit_chars(struct uart_port *port)
static void sci_receive_chars(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
int i, count, copied = 0;
unsigned short status;
unsigned char flag;
@@ -607,7 +607,7 @@ static void sci_receive_chars(struct uart_port *port)
while (1) {
/* Don't copy more bytes than there is room for in the buffer */
- count = tty_buffer_request_room(tty, sci_rxfill(port));
+ count = tty_buffer_request_room(tport, sci_rxfill(port));
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -619,7 +619,7 @@ static void sci_receive_chars(struct uart_port *port)
sci_port->break_flag)
count = 0;
else
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
char c = serial_port_in(port, SCxRDR);
@@ -661,7 +661,7 @@ static void sci_receive_chars(struct uart_port *port)
} else
flag = TTY_NORMAL;
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
}
@@ -674,7 +674,7 @@ static void sci_receive_chars(struct uart_port *port)
if (copied) {
/* Tell the rest of the system the news. New characters! */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
} else {
serial_port_in(port, SCxSR); /* dummy read */
serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
@@ -720,7 +720,7 @@ static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
unsigned short status = serial_port_in(port, SCxSR);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
/*
@@ -731,7 +731,7 @@ static int sci_handle_errors(struct uart_port *port)
port->icount.overrun++;
/* overrun error */
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
copied++;
dev_notice(port->dev, "overrun error");
@@ -755,7 +755,7 @@ static int sci_handle_errors(struct uart_port *port)
dev_dbg(port->dev, "BREAK detected\n");
- if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
}
@@ -763,7 +763,7 @@ static int sci_handle_errors(struct uart_port *port)
/* frame error */
port->icount.frame++;
- if (tty_insert_flip_char(tty, 0, TTY_FRAME))
+ if (tty_insert_flip_char(tport, 0, TTY_FRAME))
copied++;
dev_notice(port->dev, "frame error\n");
@@ -774,21 +774,21 @@ static int sci_handle_errors(struct uart_port *port)
/* parity error */
port->icount.parity++;
- if (tty_insert_flip_char(tty, 0, TTY_PARITY))
+ if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
dev_notice(port->dev, "parity error");
}
if (copied)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return copied;
}
static int sci_handle_fifo_overrun(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
struct plat_sci_reg *reg;
int copied = 0;
@@ -802,8 +802,8 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tport);
dev_notice(port->dev, "overrun error\n");
copied++;
@@ -816,7 +816,7 @@ static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
unsigned short status = serial_port_in(port, SCxSR);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
if (uart_handle_break(port))
@@ -831,14 +831,14 @@ static int sci_handle_breaks(struct uart_port *port)
port->icount.brk++;
/* Notify of BREAK */
- if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
dev_dbg(port->dev, "BREAK detected\n");
}
if (copied)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
copied += sci_handle_fifo_overrun(port);
@@ -1259,13 +1259,13 @@ static void sci_dma_tx_complete(void *arg)
}
/* Locking: called with port lock held */
-static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
- size_t count)
+static int sci_dma_rx_push(struct sci_port *s, size_t count)
{
struct uart_port *port = &s->port;
+ struct tty_port *tport = &port->state->port;
int i, active, room;
- room = tty_buffer_request_room(tty, count);
+ room = tty_buffer_request_room(tport, count);
if (s->active_rx == s->cookie_rx[0]) {
active = 0;
@@ -1283,7 +1283,7 @@ static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
return room;
for (i = 0; i < room; i++)
- tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
+ tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
TTY_NORMAL);
port->icount.rx += room;
@@ -1295,7 +1295,6 @@ static void sci_dma_rx_complete(void *arg)
{
struct sci_port *s = arg;
struct uart_port *port = &s->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned long flags;
int count;
@@ -1303,14 +1302,14 @@ static void sci_dma_rx_complete(void *arg)
spin_lock_irqsave(&port->lock, flags);
- count = sci_dma_rx_push(s, tty, s->buf_len_rx);
+ count = sci_dma_rx_push(s, s->buf_len_rx);
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
if (count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
schedule_work(&s->work_rx);
}
@@ -1404,7 +1403,6 @@ static void work_fn_rx(struct work_struct *work)
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
DMA_SUCCESS) {
/* Handle incomplete DMA receive */
- struct tty_struct *tty = port->state->port.tty;
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,
struct shdma_desc, async_tx);
@@ -1416,11 +1414,11 @@ static void work_fn_rx(struct work_struct *work)
sh_desc->partial, sh_desc->cookie);
spin_lock_irqsave(&port->lock, flags);
- count = sci_dma_rx_push(s, tty, sh_desc->partial);
+ count = sci_dma_rx_push(s, sh_desc->partial);
spin_unlock_irqrestore(&port->lock, flags);
if (count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
sci_submit_rx(s);
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 5da5cb962769..6bbfe9934a4d 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -75,6 +75,20 @@ static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
.line = 2,
},
},
+ [3] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 3,
+ },
+ },
+ [4] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 4,
+ },
+ },
};
static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
@@ -192,11 +206,6 @@ static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
{
unsigned int ch, rx_count = 0;
- struct tty_struct *tty;
-
- tty = tty_port_tty_get(&port->state->port);
- if (!tty)
- return -ENODEV;
while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
SIRFUART_FIFOEMPTY_MASK(port))) {
@@ -210,8 +219,7 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
}
port->icount.rx += rx_count;
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
return rx_count;
}
@@ -245,6 +253,7 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
struct uart_port *port = &sirfport->port;
struct uart_state *state = port->state;
struct circ_buf *xmit = &port->state->xmit;
+ spin_lock(&port->lock);
intr_status = rd_regl(port, SIRFUART_INT_STATUS);
wr_regl(port, SIRFUART_INT_STATUS, intr_status);
intr_status &= rd_regl(port, SIRFUART_INT_EN);
@@ -254,6 +263,7 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
goto recv_char;
uart_insert_char(port, intr_status,
SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
if (intr_status & SIRFUART_RX_OFLOW)
@@ -286,6 +296,7 @@ recv_char:
sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
if (intr_status & SIRFUART_TX_INT_EN) {
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
} else {
sirfsoc_uart_pio_tx_chars(sirfport,
@@ -296,6 +307,7 @@ recv_char:
sirfsoc_uart_stop_tx(port);
}
}
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
@@ -345,7 +357,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
struct ktermios *old)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- unsigned long ioclk_rate;
unsigned long config_reg = 0;
unsigned long baud_rate;
unsigned long setted_baud;
@@ -357,7 +368,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
int threshold_div;
int temp;
- ioclk_rate = 150000000;
switch (termios->c_cflag & CSIZE) {
default:
case CS8:
@@ -413,14 +423,17 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
sirfsoc_uart_disable_ms(port);
}
- /* common rate: fast calculation */
- for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
- if (baud_rate == baudrate_to_regv[ic].baud_rate)
- clk_div_reg = baudrate_to_regv[ic].reg_val;
+ if (port->uartclk == 150000000) {
+ /* common rate: fast calculation */
+ for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+ if (baud_rate == baudrate_to_regv[ic].baud_rate)
+ clk_div_reg = baudrate_to_regv[ic].reg_val;
+ }
+
setted_baud = baud_rate;
/* arbitary rate setting */
if (unlikely(clk_div_reg == 0))
- clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+ clk_div_reg = sirfsoc_calc_sample_div(baud_rate, port->uartclk,
&setted_baud);
wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
@@ -679,6 +692,14 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
goto err;
}
+ sirfport->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sirfport->clk)) {
+ ret = PTR_ERR(sirfport->clk);
+ goto clk_err;
+ }
+ clk_prepare_enable(sirfport->clk);
+ port->uartclk = clk_get_rate(sirfport->clk);
+
port->ops = &sirfsoc_uart_ops;
spin_lock_init(&port->lock);
@@ -692,6 +713,9 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
return 0;
port_err:
+ clk_disable_unprepare(sirfport->clk);
+ clk_put(sirfport->clk);
+clk_err:
platform_set_drvdata(pdev, NULL);
if (sirfport->hw_flow_ctrl)
pinctrl_put(sirfport->p);
@@ -706,6 +730,8 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (sirfport->hw_flow_ctrl)
pinctrl_put(sirfport->p);
+ clk_disable_unprepare(sirfport->clk);
+ clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
return 0;
}
@@ -729,6 +755,7 @@ static int sirfsoc_uart_resume(struct platform_device *pdev)
static struct of_device_id sirfsoc_uart_ids[] = {
{ .compatible = "sirf,prima2-uart", },
+ { .compatible = "sirf,marco-uart", },
{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 6e207fdc2fed..85328ba0c4e3 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -139,7 +139,7 @@
#define SIRFSOC_UART_MINOR 0
#define SIRFUART_PORT_NAME "sirfsoc-uart"
#define SIRFUART_MAP_SIZE 0x200
-#define SIRFSOC_UART_NR 3
+#define SIRFSOC_UART_NR 5
#define SIRFSOC_PORT_TYPE 0xa5
/* Baud Rate Calculation */
@@ -163,6 +163,7 @@ struct sirfsoc_uart_port {
struct uart_port port;
struct pinctrl *p;
+ struct clk *clk;
};
/* Hardware Flow Control */
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 1c6de9f58699..f51ffdc696fd 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -457,8 +457,8 @@ static int sn_debug_printf(const char *fmt, ...)
static void
sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
{
+ struct tty_port *tport = NULL;
int ch;
- struct tty_struct *tty;
if (!port) {
printk(KERN_ERR "sn_receive_chars - port NULL so can't receive\n");
@@ -472,11 +472,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
if (port->sc_port.state) {
/* The serial_core stuffs are initialized, use them */
- tty = port->sc_port.state->port.tty;
- }
- else {
- /* Not registered yet - can't pass to tty layer. */
- tty = NULL;
+ tport = &port->sc_port.state->port;
}
while (port->sc_ops->sal_input_pending()) {
@@ -516,15 +512,15 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
#endif /* CONFIG_MAGIC_SYSRQ */
/* record the character to pass up to the tty layer */
- if (tty) {
- if(tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+ if (tport) {
+ if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0)
break;
}
port->sc_port.icount.rx++;
}
- if (tty)
- tty_flip_buffer_push(tty);
+ if (tport)
+ tty_flip_buffer_push(tport);
}
/**
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index b9bf9c53f7fd..ba60708053e0 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -72,7 +72,7 @@ static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit)
}
}
-static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
+static int receive_chars_getchar(struct uart_port *port)
{
int saw_console_brk = 0;
int limit = 10000;
@@ -99,7 +99,7 @@ static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
uart_handle_dcd_change(port, 1);
}
- if (tty == NULL) {
+ if (port->state == NULL) {
uart_handle_sysrq_char(port, c);
continue;
}
@@ -109,13 +109,13 @@ static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
if (uart_handle_sysrq_char(port, c))
continue;
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ tty_insert_flip_char(&port->state->port, c, TTY_NORMAL);
}
return saw_console_brk;
}
-static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
+static int receive_chars_read(struct uart_port *port)
{
int saw_console_brk = 0;
int limit = 10000;
@@ -152,12 +152,13 @@ static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]);
- if (tty == NULL)
+ if (port->state == NULL)
continue;
port->icount.rx += bytes_read;
- tty_insert_flip_string(tty, con_read_page, bytes_read);
+ tty_insert_flip_string(&port->state->port, con_read_page,
+ bytes_read);
}
return saw_console_brk;
@@ -165,7 +166,7 @@ static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
struct sunhv_ops {
void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit);
- int (*receive_chars)(struct uart_port *port, struct tty_struct *tty);
+ int (*receive_chars)(struct uart_port *port);
};
static struct sunhv_ops bychar_ops = {
@@ -180,17 +181,17 @@ static struct sunhv_ops bywrite_ops = {
static struct sunhv_ops *sunhv_ops = &bychar_ops;
-static struct tty_struct *receive_chars(struct uart_port *port)
+static struct tty_port *receive_chars(struct uart_port *port)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *tport = NULL;
if (port->state != NULL) /* Unopened serial console */
- tty = port->state->port.tty;
+ tport = &port->state->port;
- if (sunhv_ops->receive_chars(port, tty))
+ if (sunhv_ops->receive_chars(port))
sun_do_break();
- return tty;
+ return tport;
}
static void transmit_chars(struct uart_port *port)
@@ -213,16 +214,16 @@ static void transmit_chars(struct uart_port *port)
static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty;
+ struct tty_port *tport;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- tty = receive_chars(port);
+ tport = receive_chars(port);
transmit_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (tport)
+ tty_flip_buffer_push(tport);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index bd8b3b634103..8de2213664e0 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -107,11 +107,11 @@ static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up)
udelay(1);
}
-static struct tty_struct *
+static struct tty_port *
receive_chars(struct uart_sunsab_port *up,
union sab82532_irq_status *stat)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *port = NULL;
unsigned char buf[32];
int saw_console_brk = 0;
int free_fifo = 0;
@@ -119,7 +119,7 @@ receive_chars(struct uart_sunsab_port *up,
int i;
if (up->port.state != NULL) /* Unopened serial console */
- tty = up->port.state->port.tty;
+ port = &up->port.state->port;
/* Read number of BYTES (Character + Status) available. */
if (stat->sreg.isr0 & SAB82532_ISR0_RPF) {
@@ -136,7 +136,7 @@ receive_chars(struct uart_sunsab_port *up,
if (stat->sreg.isr0 & SAB82532_ISR0_TIME) {
sunsab_cec_wait(up);
writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr);
- return tty;
+ return port;
}
if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
@@ -160,11 +160,6 @@ receive_chars(struct uart_sunsab_port *up,
for (i = 0; i < count; i++) {
unsigned char ch = buf[i], flag;
- if (tty == NULL) {
- uart_handle_sysrq_char(&up->port, ch);
- continue;
- }
-
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -213,15 +208,15 @@ receive_chars(struct uart_sunsab_port *up,
if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 &&
(stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (saw_console_brk)
sun_do_break();
- return tty;
+ return port;
}
static void sunsab_stop_tx(struct uart_port *);
@@ -304,7 +299,7 @@ static void check_status(struct uart_sunsab_port *up,
static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
{
struct uart_sunsab_port *up = dev_id;
- struct tty_struct *tty;
+ struct tty_port *port = NULL;
union sab82532_irq_status status;
unsigned long flags;
unsigned char gis;
@@ -318,12 +313,11 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
if (gis & 2)
status.sreg.isr1 = readb(&up->regs->r.isr1);
- tty = NULL;
if (status.stat) {
if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME |
SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) ||
(status.sreg.isr1 & SAB82532_ISR1_BRK))
- tty = receive_chars(up, &status);
+ port = receive_chars(up, &status);
if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) ||
(status.sreg.isr1 & SAB82532_ISR1_CSC))
check_status(up, &status);
@@ -333,8 +327,8 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&up->port.lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 220da3f9724f..e343d6670854 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -315,10 +315,10 @@ static void sunsu_enable_ms(struct uart_port *port)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-static struct tty_struct *
+static void
receive_chars(struct uart_sunsu_port *up, unsigned char *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned char ch, flag;
int max_count = 256;
int saw_console_brk = 0;
@@ -376,22 +376,20 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
if ((*status & up->port.ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (*status & UART_LSR_OE)
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
*status = serial_inp(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
if (saw_console_brk)
sun_do_break();
-
- return tty;
}
static void transmit_chars(struct uart_sunsu_port *up)
@@ -460,20 +458,16 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&up->port.lock, flags);
do {
- struct tty_struct *tty;
-
status = serial_inp(up, UART_LSR);
- tty = NULL;
if (status & UART_LSR_DR)
- tty = receive_chars(up, &status);
+ receive_chars(up, &status);
check_modem_status(up);
if (status & UART_LSR_THRE)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
spin_lock_irqsave(&up->port.lock, flags);
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index aef4fab957c3..27669ff3d446 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -323,17 +323,15 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
}
}
-static struct tty_struct *
+static struct tty_port *
sunzilog_receive_chars(struct uart_sunzilog_port *up,
struct zilog_channel __iomem *channel)
{
- struct tty_struct *tty;
+ struct tty_port *port = NULL;
unsigned char ch, r1, flag;
- tty = NULL;
- if (up->port.state != NULL && /* Unopened serial console */
- up->port.state->port.tty != NULL) /* Keyboard || mouse */
- tty = up->port.state->port.tty;
+ if (up->port.state != NULL) /* Unopened serial console */
+ port = &up->port.state->port;
for (;;) {
@@ -366,11 +364,6 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
continue;
}
- if (tty == NULL) {
- uart_handle_sysrq_char(&up->port, ch);
- continue;
- }
-
/* A real serial line, record the character and status. */
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -400,13 +393,13 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
if (up->port.ignore_status_mask == 0xff ||
(r1 & up->port.ignore_status_mask) == 0) {
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
}
if (r1 & Rx_OVR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
- return tty;
+ return port;
}
static void sunzilog_status_handle(struct uart_sunzilog_port *up,
@@ -539,21 +532,21 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
while (up) {
struct zilog_channel __iomem *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned char r3;
spin_lock(&up->port.lock);
r3 = read_zsreg(channel, R3);
/* Channel A */
- tty = NULL;
+ port = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHARxIP)
- tty = sunzilog_receive_chars(up, channel);
+ port = sunzilog_receive_chars(up, channel);
if (r3 & CHAEXT)
sunzilog_status_handle(up, channel);
if (r3 & CHATxIP)
@@ -561,22 +554,22 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
/* Channel B */
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
spin_lock(&up->port.lock);
- tty = NULL;
+ port = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHBRxIP)
- tty = sunzilog_receive_chars(up, channel);
+ port = sunzilog_receive_chars(up, channel);
if (r3 & CHBEXT)
sunzilog_status_handle(up, channel);
if (r3 & CHBTxIP)
@@ -584,8 +577,8 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
up = up->next;
}
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 5be0d68feceb..6818410a2bea 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -91,16 +91,16 @@ static void timbuart_flush_buffer(struct uart_port *port)
static void timbuart_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(tport, ch, TTY_NORMAL);
}
spin_unlock(&port->lock);
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
dev_dbg(port->dev, "%s - total read %d bytes\n",
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 89eee43c4e2d..5f90ef24d475 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -34,7 +34,7 @@
* Register definitions
*
* For register details see datasheet:
- * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
+ * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
*/
#define ULITE_RX 0x00
@@ -57,6 +57,54 @@
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
+struct uartlite_reg_ops {
+ u32 (*in)(void __iomem *addr);
+ void (*out)(u32 val, void __iomem *addr);
+};
+
+static u32 uartlite_inbe32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static void uartlite_outbe32(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static struct uartlite_reg_ops uartlite_be = {
+ .in = uartlite_inbe32,
+ .out = uartlite_outbe32,
+};
+
+static u32 uartlite_inle32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void uartlite_outle32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static struct uartlite_reg_ops uartlite_le = {
+ .in = uartlite_inle32,
+ .out = uartlite_outle32,
+};
+
+static inline u32 uart_in32(u32 offset, struct uart_port *port)
+{
+ struct uartlite_reg_ops *reg_ops = port->private_data;
+
+ return reg_ops->in(port->membase + offset);
+}
+
+static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
+{
+ struct uartlite_reg_ops *reg_ops = port->private_data;
+
+ reg_ops->out(val, port->membase + offset);
+}
static struct uart_port ulite_ports[ULITE_NR_UARTS];
@@ -66,7 +114,7 @@ static struct uart_port ulite_ports[ULITE_NR_UARTS];
static int ulite_receive(struct uart_port *port, int stat)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = TTY_NORMAL;
@@ -77,7 +125,7 @@ static int ulite_receive(struct uart_port *port, int stat)
/* stats */
if (stat & ULITE_STATUS_RXVALID) {
port->icount.rx++;
- ch = ioread32be(port->membase + ULITE_RX);
+ ch = uart_in32(ULITE_RX, port);
if (stat & ULITE_STATUS_PARITY)
port->icount.parity++;
@@ -103,13 +151,13 @@ static int ulite_receive(struct uart_port *port, int stat)
stat &= ~port->ignore_status_mask;
if (stat & ULITE_STATUS_RXVALID)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (stat & ULITE_STATUS_FRAME)
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(tport, 0, TTY_FRAME);
if (stat & ULITE_STATUS_OVERRUN)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
return 1;
}
@@ -122,7 +170,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
return 0;
if (port->x_char) {
- iowrite32be(port->x_char, port->membase + ULITE_TX);
+ uart_out32(port->x_char, ULITE_TX, port);
port->x_char = 0;
port->icount.tx++;
return 1;
@@ -131,7 +179,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return 0;
- iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ uart_out32(xmit->buf[xmit->tail], ULITE_TX, port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
port->icount.tx++;
@@ -148,7 +196,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
int busy, n = 0;
do {
- int stat = ioread32be(port->membase + ULITE_STATUS);
+ int stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
n++;
@@ -156,7 +204,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
/* work done? */
if (n > 1) {
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -169,7 +217,7 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
- ret = ioread32be(port->membase + ULITE_STATUS);
+ ret = uart_in32(ULITE_STATUS, port);
spin_unlock_irqrestore(&port->lock, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
@@ -192,7 +240,7 @@ static void ulite_stop_tx(struct uart_port *port)
static void ulite_start_tx(struct uart_port *port)
{
- ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
+ ulite_transmit(port, uart_in32(ULITE_STATUS, port));
}
static void ulite_stop_rx(struct uart_port *port)
@@ -220,17 +268,17 @@ static int ulite_startup(struct uart_port *port)
if (ret)
return ret;
- iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
- port->membase + ULITE_CONTROL);
- iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ uart_out32(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+ ULITE_CONTROL, port);
+ uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
return 0;
}
static void ulite_shutdown(struct uart_port *port)
{
- iowrite32be(0, port->membase + ULITE_CONTROL);
- ioread32be(port->membase + ULITE_CONTROL); /* dummy */
+ uart_out32(0, ULITE_CONTROL, port);
+ uart_in32(ULITE_CONTROL, port); /* dummy */
free_irq(port->irq, port);
}
@@ -281,6 +329,8 @@ static void ulite_release_port(struct uart_port *port)
static int ulite_request_port(struct uart_port *port)
{
+ int ret;
+
pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
port, (unsigned long long) port->mapbase);
@@ -296,6 +346,14 @@ static int ulite_request_port(struct uart_port *port)
return -EBUSY;
}
+ port->private_data = &uartlite_be;
+ ret = uart_in32(ULITE_CONTROL, port);
+ uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
+ ret = uart_in32(ULITE_STATUS, port);
+ /* Endianess detection */
+ if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
+ port->private_data = &uartlite_le;
+
return 0;
}
@@ -314,20 +372,19 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
#ifdef CONFIG_CONSOLE_POLL
static int ulite_get_poll_char(struct uart_port *port)
{
- if (!(ioread32be(port->membase + ULITE_STATUS)
- & ULITE_STATUS_RXVALID))
+ if (!(uart_in32(ULITE_STATUS, port) & ULITE_STATUS_RXVALID))
return NO_POLL_CHAR;
- return ioread32be(port->membase + ULITE_RX);
+ return uart_in32(ULITE_RX, port);
}
static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
{
- while (ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL)
+ while (uart_in32(ULITE_STATUS, port) & ULITE_STATUS_TXFULL)
cpu_relax();
/* write char to device */
- iowrite32be(ch, port->membase + ULITE_TX);
+ uart_out32(ch, ULITE_TX, port);
}
#endif
@@ -366,7 +423,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
/* Spin waiting for TX fifo to have space available */
for (i = 0; i < 100000; i++) {
- val = ioread32be(port->membase + ULITE_STATUS);
+ val = uart_in32(ULITE_STATUS, port);
if ((val & ULITE_STATUS_TXFULL) == 0)
break;
cpu_relax();
@@ -376,7 +433,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
static void ulite_console_putchar(struct uart_port *port, int ch)
{
ulite_console_wait_tx(port);
- iowrite32be(ch, port->membase + ULITE_TX);
+ uart_out32(ch, ULITE_TX, port);
}
static void ulite_console_write(struct console *co, const char *s,
@@ -393,8 +450,8 @@ static void ulite_console_write(struct console *co, const char *s,
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
- iowrite32be(0, port->membase + ULITE_CONTROL);
+ ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
+ uart_out32(0, ULITE_CONTROL, port);
uart_console_write(port, s, count, ulite_console_putchar);
@@ -402,7 +459,7 @@ static void ulite_console_write(struct console *co, const char *s,
/* restore interrupt state */
if (ier)
- iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -615,7 +672,7 @@ static struct platform_driver ulite_platform_driver = {
* Module setup/teardown
*/
-int __init ulite_init(void)
+static int __init ulite_init(void)
{
int ret;
@@ -634,11 +691,11 @@ int __init ulite_init(void)
err_plat:
uart_unregister_driver(&ulite_uart_driver);
err_uart:
- printk(KERN_ERR "registering uartlite driver failed: err=%i", ret);
+ pr_err("registering uartlite driver failed: err=%i", ret);
return ret;
}
-void __exit ulite_exit(void)
+static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
uart_unregister_driver(&ulite_uart_driver);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index f99b0c965f85..7355303dad99 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -469,7 +469,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
int i;
unsigned char ch, *cp;
struct uart_port *port = &qe_port->port;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct qe_bd *bdp;
u16 status;
unsigned int flg;
@@ -491,7 +491,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
/* If we don't have enough room in RX buffer for the entire BD,
* then we try later, which will be the next RX interrupt.
*/
- if (tty_buffer_request_room(tty, i) < i) {
+ if (tty_buffer_request_room(tport, i) < i) {
dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n");
return;
}
@@ -512,7 +512,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
continue;
error_return:
- tty_insert_flip_char(tty, ch, flg);
+ tty_insert_flip_char(tport, ch, flg);
}
@@ -530,7 +530,7 @@ error_return:
qe_port->rx_cur = bdp;
/* Activate BH processing */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return;
@@ -560,7 +560,7 @@ handle_error:
/* Overrun does not affect the current character ! */
if (status & BD_SC_OV)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
#endif
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 62ee0166bc65..f655997f44af 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -313,12 +313,10 @@ static void siu_break_ctl(struct uart_port *port, int ctl)
static inline void receive_chars(struct uart_port *port, uint8_t *status)
{
- struct tty_struct *tty;
uint8_t lsr, ch;
char flag;
int max_count = RX_MAX_COUNT;
- tty = port->state->port.tty;
lsr = *status;
do {
@@ -365,7 +363,7 @@ static inline void receive_chars(struct uart_port *port, uint8_t *status)
lsr = siu_read(port, UART_LSR);
} while ((lsr & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
*status = lsr;
}
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 8fd181436a6b..a3f9dd5c9dff 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -136,22 +136,14 @@ static void vt8500_enable_ms(struct uart_port *port)
static void handle_rx(struct uart_port *port)
{
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
- if (!tty) {
- /* Discard data: no tty available */
- int count = (vt8500_read(port, VT8500_URFIDX) & 0x1f00) >> 8;
- u16 ch;
- while (count--)
- ch = readw(port->membase + VT8500_RXFIFO);
- return;
- }
+ struct tty_port *tport = &port->state->port;
/*
* Handle overrun
*/
if ((vt8500_read(port, VT8500_URISR) & RXOVER)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
/* and now the main RX loop */
@@ -174,11 +166,10 @@ static void handle_rx(struct uart_port *port)
port->icount.rx++;
if (!uart_handle_sysrq_char(port, c))
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
}
static void handle_tx(struct uart_port *port)
@@ -569,7 +560,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
if (np)
port = of_alias_get_id(np, "serial");
- if (port > VT8500_MAX_PORTS)
+ if (port >= VT8500_MAX_PORTS)
port = -1;
else
port = -1;
@@ -580,7 +571,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
sizeof(vt8500_ports_in_use));
}
- if (port > VT8500_MAX_PORTS)
+ if (port >= VT8500_MAX_PORTS)
return -ENODEV;
/* reserve the port id */
@@ -589,10 +580,27 @@ static int vt8500_serial_probe(struct platform_device *pdev)
return -EBUSY;
}
- vt8500_port = kzalloc(sizeof(struct vt8500_port), GFP_KERNEL);
+ vt8500_port = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_port),
+ GFP_KERNEL);
if (!vt8500_port)
return -ENOMEM;
+ vt8500_port->uart.membase = devm_request_and_ioremap(&pdev->dev, mmres);
+ if (!vt8500_port->uart.membase)
+ return -EADDRNOTAVAIL;
+
+ vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(vt8500_port->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(vt8500_port->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return ret;
+ }
+
vt8500_port->uart.type = PORT_VT8500;
vt8500_port->uart.iotype = UPIO_MEM;
vt8500_port->uart.mapbase = mmres->start;
@@ -604,7 +612,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
- if (vt8500_port->clk) {
+ if (!IS_ERR(vt8500_port->clk)) {
vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);
} else {
/* use the default of 24Mhz if not specified and warn */
@@ -615,12 +623,6 @@ static int vt8500_serial_probe(struct platform_device *pdev)
snprintf(vt8500_port->name, sizeof(vt8500_port->name),
"VT8500 UART%d", pdev->id);
- vt8500_port->uart.membase = ioremap(mmres->start, resource_size(mmres));
- if (!vt8500_port->uart.membase) {
- ret = -ENOMEM;
- goto err;
- }
-
vt8500_uart_ports[port] = vt8500_port;
uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart);
@@ -628,10 +630,6 @@ static int vt8500_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vt8500_port);
return 0;
-
-err:
- kfree(vt8500_port);
- return ret;
}
static int vt8500_serial_remove(struct platform_device *pdev)
@@ -639,8 +637,8 @@ static int vt8500_serial_remove(struct platform_device *pdev)
struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
+ clk_disable_unprepare(vt8500_port->clk);
uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
- kfree(vt8500_port);
return 0;
}
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 9ab910370c56..ba451c7209fc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -17,6 +17,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/console.h>
+#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -147,15 +148,11 @@
static irqreturn_t xuartps_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
- struct tty_struct *tty;
unsigned long flags;
unsigned int isrstatus, numbytes;
unsigned int data;
char status = TTY_NORMAL;
- /* Get the tty which could be NULL so don't assume it's valid */
- tty = tty_port_tty_get(&port->state->port);
-
spin_lock_irqsave(&port->lock, flags);
/* Read the interrupt status register to determine which
@@ -187,14 +184,11 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
} else if (isrstatus & XUARTPS_IXR_OVERRUN)
port->icount.overrun++;
- if (tty)
- uart_insert_char(port, isrstatus,
- XUARTPS_IXR_OVERRUN, data,
- status);
+ uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,
+ data, status);
}
spin_unlock(&port->lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
@@ -237,7 +231,6 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
/* be sure to release the lock and tty before leaving */
spin_unlock_irqrestore(&port->lock, flags);
- tty_kref_put(tty);
return IRQ_HANDLED;
}
@@ -944,16 +937,18 @@ static int xuartps_probe(struct platform_device *pdev)
int rc;
struct uart_port *port;
struct resource *res, *res2;
- int clk = 0;
-
- const unsigned int *prop;
+ struct clk *clk;
- prop = of_get_property(pdev->dev.of_node, "clock", NULL);
- if (prop)
- clk = be32_to_cpup(prop);
- if (!clk) {
+ clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "no clock specified\n");
- return -ENODEV;
+ return PTR_ERR(clk);
+ }
+
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ return -EBUSY;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -978,7 +973,8 @@ static int xuartps_probe(struct platform_device *pdev)
port->mapbase = res->start;
port->irq = res2->start;
port->dev = &pdev->dev;
- port->uartclk = clk;
+ port->uartclk = clk_get_rate(clk);
+ port->private_data = clk;
dev_set_drvdata(&pdev->dev, port);
rc = uart_add_one_port(&xuartps_uart_driver, port);
if (rc) {
@@ -1000,14 +996,14 @@ static int xuartps_probe(struct platform_device *pdev)
static int xuartps_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
- int rc = 0;
+ struct clk *clk = port->private_data;
+ int rc;
/* Remove the xuartps port from the serial core */
- if (port) {
- rc = uart_remove_one_port(&xuartps_uart_driver, port);
- dev_set_drvdata(&pdev->dev, NULL);
- port->mapbase = 0;
- }
+ rc = uart_remove_one_port(&xuartps_uart_driver, port);
+ dev_set_drvdata(&pdev->dev, NULL);
+ port->mapbase = 0;
+ clk_disable_unprepare(clk);
return rc;
}
@@ -1048,7 +1044,7 @@ MODULE_DEVICE_TABLE(of, xuartps_of_match);
static struct platform_driver xuartps_platform_driver = {
.probe = xuartps_probe, /* Probe method */
- .remove = __exit_p(xuartps_remove), /* Detach method */
+ .remove = xuartps_remove, /* Detach method */
.suspend = xuartps_suspend, /* Suspend */
.resume = xuartps_resume, /* Resume after a suspend */
.driver = {
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 92c00b24d0df..6a169877109b 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -603,7 +603,7 @@ static void zs_receive_chars(struct zs_port *zport)
uart_insert_char(uport, status, Rx_OVR, ch, flag);
}
- tty_flip_buffer_push(uport->state->port.tty);
+ tty_flip_buffer_push(&uport->state->port);
}
static void zs_raw_transmit_chars(struct zs_port *zport)
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 9e071f6985f6..8983276aa35e 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -291,8 +291,7 @@ struct mgsl_struct {
bool lcr_mem_requested;
u32 misc_ctrl_value;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
bool loopmode_insert_requested;
@@ -1440,7 +1439,6 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
u16 status;
int work = 0;
unsigned char DataByte;
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -1502,19 +1500,19 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
if (status & RXSTATUS_BREAK_RECEIVED) {
flag = TTY_BREAK;
if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
+ do_SAK(info->port.tty);
} else if (status & RXSTATUS_PARITY_ERROR)
flag = TTY_PARITY;
else if (status & RXSTATUS_FRAMING_ERROR)
flag = TTY_FRAME;
} /* end of if (error) */
- tty_insert_flip_char(tty, DataByte, flag);
+ tty_insert_flip_char(&info->port, DataByte, flag);
if (status & RXSTATUS_OVERRUN) {
/* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
}
}
@@ -1525,7 +1523,7 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
}
if(work)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/* mgsl_isr_misc()
@@ -1852,7 +1850,7 @@ static void shutdown(struct mgsl_struct * info)
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
}
@@ -1917,12 +1915,12 @@ static void mgsl_change_params(struct mgsl_struct *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3046,7 +3044,7 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
@@ -3245,9 +3243,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->irq_spinlock,flags);
if (on)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
@@ -3416,7 +3414,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
goto cleanup;
}
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -3898,7 +3896,13 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
if ( info->intermediate_rxbuffer == NULL )
return -ENOMEM;
-
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->intermediate_rxbuffer);
+ info->intermediate_rxbuffer = NULL;
+ return -ENOMEM;
+ }
return 0;
} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
@@ -3917,6 +3921,8 @@ static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
{
kfree(info->intermediate_rxbuffer);
info->intermediate_rxbuffer = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
} /* end of mgsl_free_intermediate_rxbuffer_memory() */
@@ -6233,8 +6239,8 @@ static void usc_get_serial_signals( struct mgsl_struct *info )
{
u16 status;
- /* clear all serial signals except DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
/* Read the Misc Interrupt status Register (MISR) to get */
/* the V24 status signals. */
@@ -6259,7 +6265,7 @@ static void usc_get_serial_signals( struct mgsl_struct *info )
/* usc_set_serial_signals()
*
- * Set the state of DTR and RTS based on contents of
+ * Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*
* Arguments: info pointer to device instance data
@@ -7773,8 +7779,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgsl_program_hw(info);
/* enable network layer transmit */
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index aba1e59f4a88..aa9eece35c3b 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -317,8 +317,7 @@ struct slgt_info {
unsigned char *tx_buf;
int tx_count;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -683,7 +682,7 @@ static int open(struct tty_struct *tty, struct file *filp)
}
mutex_lock(&info->port.mutex);
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -786,7 +785,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
@@ -1561,8 +1560,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
program_hw(info);
/* enable network layer transmit */
@@ -1855,7 +1854,6 @@ static void hdlcdev_exit(struct slgt_info *info)
*/
static void rx_async(struct slgt_info *info)
{
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
unsigned int start, end;
unsigned char *p;
@@ -1894,10 +1892,8 @@ static void rx_async(struct slgt_info *info)
else if (status & BIT0)
stat = TTY_FRAME;
}
- if (tty) {
- tty_insert_flip_char(tty, ch, stat);
- chars++;
- }
+ tty_insert_flip_char(&info->port, ch, stat);
+ chars++;
}
if (i < count) {
@@ -1918,8 +1914,8 @@ static void rx_async(struct slgt_info *info)
break;
}
- if (tty && chars)
- tty_flip_buffer_push(tty);
+ if (chars)
+ tty_flip_buffer_push(&info->port);
}
/*
@@ -1961,8 +1957,6 @@ static void bh_handler(struct work_struct *work)
struct slgt_info *info = container_of(work, struct slgt_info, task);
int action;
- if (!info)
- return;
info->bh_running = true;
while((action = bh_action(info))) {
@@ -2183,7 +2177,7 @@ static void isr_serial(struct slgt_info *info)
if (info->port.tty) {
if (!(status & info->ignore_status_mask)) {
if (info->read_status_mask & MASK_BREAK) {
- tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
if (info->port.flags & ASYNC_SAK)
do_SAK(info->port.tty);
}
@@ -2494,7 +2488,7 @@ static void shutdown(struct slgt_info *info)
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -2554,12 +2548,12 @@ static void change_params(struct slgt_info *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3262,9 +3256,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->lock,flags);
if (on)
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3355,11 +3349,24 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return retval;
}
+/*
+ * allocate buffers used for calling line discipline receive_buf
+ * directly in synchronous mode
+ * note: add 5 bytes to max frame size to allow appending
+ * 32-bit CRC and status byte when configured to do so
+ */
static int alloc_tmp_rbuf(struct slgt_info *info)
{
info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
if (info->tmp_rbuf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size + 5, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->tmp_rbuf);
+ info->tmp_rbuf = NULL;
+ return -ENOMEM;
+ }
return 0;
}
@@ -3367,6 +3374,8 @@ static void free_tmp_rbuf(struct slgt_info *info)
{
kfree(info->tmp_rbuf);
info->tmp_rbuf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
/*
@@ -4110,7 +4119,7 @@ static void reset_port(struct slgt_info *info)
tx_stop(info);
rx_stop(info);
- info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
@@ -4537,8 +4546,8 @@ static void get_signals(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
- /* clear all serial signals except DTR and RTS */
- info->signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->signals &= SerialSignal_RTS | SerialSignal_DTR;
if (status & BIT3)
info->signals |= SerialSignal_DSR;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index fd43fb6f7cee..6d5780cf1d57 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -262,8 +262,7 @@ typedef struct _synclinkmp_info {
bool sca_statctrl_requested;
u32 misc_ctrl_value;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -762,7 +761,7 @@ static int open(struct tty_struct *tty, struct file *filp)
goto cleanup;
}
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -883,7 +882,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
@@ -1677,8 +1676,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
program_hw(info);
/* enable network layer transmit */
@@ -2008,9 +2007,6 @@ static void bh_handler(struct work_struct *work)
SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
- if (!info)
- return;
-
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_handler() entry\n",
__FILE__,__LINE__,info->device_name);
@@ -2132,13 +2128,11 @@ static void isr_rxint(SLMP_INFO * info)
/* process break detection if tty control
* is not set to ignore it
*/
- if ( tty ) {
- if (!(status & info->ignore_status_mask1)) {
- if (info->read_status_mask1 & BRKD) {
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
- }
+ if (!(status & info->ignore_status_mask1)) {
+ if (info->read_status_mask1 & BRKD) {
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
+ if (tty && (info->port.flags & ASYNC_SAK))
+ do_SAK(tty);
}
}
}
@@ -2170,7 +2164,6 @@ static void isr_rxrdy(SLMP_INFO * info)
{
u16 status;
unsigned char DataByte;
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -2203,26 +2196,22 @@ static void isr_rxrdy(SLMP_INFO * info)
status &= info->read_status_mask2;
- if ( tty ) {
- if (status & PE)
- flag = TTY_PARITY;
- else if (status & FRME)
- flag = TTY_FRAME;
- if (status & OVRN) {
- /* Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- over = true;
- }
+ if (status & PE)
+ flag = TTY_PARITY;
+ else if (status & FRME)
+ flag = TTY_FRAME;
+ if (status & OVRN) {
+ /* Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ over = true;
}
} /* end of if (error) */
- if ( tty ) {
- tty_insert_flip_char(tty, DataByte, flag);
- if (over)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- }
+ tty_insert_flip_char(&info->port, DataByte, flag);
+ if (over)
+ tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
}
if ( debug_level >= DEBUG_LEVEL_ISR ) {
@@ -2232,8 +2221,7 @@ static void isr_rxrdy(SLMP_INFO * info)
icount->frame,icount->overrun);
}
- if ( tty )
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
static void isr_txeom(SLMP_INFO * info, unsigned char status)
@@ -2718,7 +2706,7 @@ static void shutdown(SLMP_INFO * info)
reset_port(info);
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -2780,12 +2768,12 @@ static void change_params(SLMP_INFO *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3224,12 +3212,12 @@ static int tiocmget(struct tty_struct *tty)
get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
- result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
- ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
- ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
- ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
- ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
- ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+ result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
+ ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR : 0) |
+ ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR : 0) |
+ ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG : 0) |
+ ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR : 0) |
+ ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS : 0);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
@@ -3284,9 +3272,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->lock,flags);
if (on)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3553,6 +3541,13 @@ static int alloc_tmp_rx_buf(SLMP_INFO *info)
info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
if (info->tmp_rx_buf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->tmp_rx_buf);
+ info->tmp_rx_buf = NULL;
+ return -ENOMEM;
+ }
return 0;
}
@@ -3560,6 +3555,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info)
{
kfree(info->tmp_rx_buf);
info->tmp_rx_buf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
static int claim_resources(SLMP_INFO *info)
@@ -4357,7 +4354,7 @@ static void reset_port(SLMP_INFO *info)
tx_stop(info);
rx_stop(info);
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
/* disable all port interrupts */
@@ -4753,8 +4750,8 @@ static void get_signals(SLMP_INFO *info)
u16 gpstatus = read_status_reg(info);
u16 testbit;
- /* clear all serial signals except DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
/* set serial signal bits to reflect MISR */
@@ -4773,7 +4770,7 @@ static void get_signals(SLMP_INFO *info)
info->serial_signals |= SerialSignal_DSR;
}
-/* Set the state of DTR and RTS based on contents of
+/* Set the state of RTS and DTR based on contents of
* serial_signals member of device context.
*/
static void set_signals(SLMP_INFO *info)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index b3c4a250ff86..814655ee2d61 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
@@ -41,6 +42,7 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/uaccess.h>
+#include <linux/moduleparam.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
@@ -577,8 +579,71 @@ struct sysrq_state {
bool active;
bool need_reinject;
bool reinjecting;
+
+ /* reset sequence handling */
+ bool reset_canceled;
+ unsigned long reset_keybit[BITS_TO_LONGS(KEY_CNT)];
+ int reset_seq_len;
+ int reset_seq_cnt;
+ int reset_seq_version;
};
+#define SYSRQ_KEY_RESET_MAX 20 /* Should be plenty */
+static unsigned short sysrq_reset_seq[SYSRQ_KEY_RESET_MAX];
+static unsigned int sysrq_reset_seq_len;
+static unsigned int sysrq_reset_seq_version = 1;
+
+static void sysrq_parse_reset_sequence(struct sysrq_state *state)
+{
+ int i;
+ unsigned short key;
+
+ state->reset_seq_cnt = 0;
+
+ for (i = 0; i < sysrq_reset_seq_len; i++) {
+ key = sysrq_reset_seq[i];
+
+ if (key == KEY_RESERVED || key > KEY_MAX)
+ break;
+
+ __set_bit(key, state->reset_keybit);
+ state->reset_seq_len++;
+
+ if (test_bit(key, state->key_down))
+ state->reset_seq_cnt++;
+ }
+
+ /* Disable reset until old keys are not released */
+ state->reset_canceled = state->reset_seq_cnt != 0;
+
+ state->reset_seq_version = sysrq_reset_seq_version;
+}
+
+static bool sysrq_detect_reset_sequence(struct sysrq_state *state,
+ unsigned int code, int value)
+{
+ if (!test_bit(code, state->reset_keybit)) {
+ /*
+ * Pressing any key _not_ in reset sequence cancels
+ * the reset sequence.
+ */
+ if (value && state->reset_seq_cnt)
+ state->reset_canceled = true;
+ } else if (value == 0) {
+ /* key release */
+ if (--state->reset_seq_cnt == 0)
+ state->reset_canceled = false;
+ } else if (value == 1) {
+ /* key press, not autorepeat */
+ if (++state->reset_seq_cnt == state->reset_seq_len &&
+ !state->reset_canceled) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void sysrq_reinject_alt_sysrq(struct work_struct *work)
{
struct sysrq_state *sysrq =
@@ -605,100 +670,121 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
}
}
-static bool sysrq_filter(struct input_handle *handle,
- unsigned int type, unsigned int code, int value)
+static bool sysrq_handle_keypress(struct sysrq_state *sysrq,
+ unsigned int code, int value)
{
- struct sysrq_state *sysrq = handle->private;
bool was_active = sysrq->active;
bool suppress;
- /*
- * Do not filter anything if we are in the process of re-injecting
- * Alt+SysRq combination.
- */
- if (sysrq->reinjecting)
- return false;
+ switch (code) {
- switch (type) {
+ case KEY_LEFTALT:
+ case KEY_RIGHTALT:
+ if (!value) {
+ /* One of ALTs is being released */
+ if (sysrq->active && code == sysrq->alt_use)
+ sysrq->active = false;
- case EV_SYN:
- suppress = false;
+ sysrq->alt = KEY_RESERVED;
+
+ } else if (value != 2) {
+ sysrq->alt = code;
+ sysrq->need_reinject = false;
+ }
break;
- case EV_KEY:
- switch (code) {
+ case KEY_SYSRQ:
+ if (value == 1 && sysrq->alt != KEY_RESERVED) {
+ sysrq->active = true;
+ sysrq->alt_use = sysrq->alt;
+ /*
+ * If nothing else will be pressed we'll need
+ * to re-inject Alt-SysRq keysroke.
+ */
+ sysrq->need_reinject = true;
+ }
- case KEY_LEFTALT:
- case KEY_RIGHTALT:
- if (!value) {
- /* One of ALTs is being released */
- if (sysrq->active && code == sysrq->alt_use)
- sysrq->active = false;
+ /*
+ * Pretend that sysrq was never pressed at all. This
+ * is needed to properly handle KGDB which will try
+ * to release all keys after exiting debugger. If we
+ * do not clear key bit it KGDB will end up sending
+ * release events for Alt and SysRq, potentially
+ * triggering print screen function.
+ */
+ if (sysrq->active)
+ clear_bit(KEY_SYSRQ, sysrq->handle.dev->key);
- sysrq->alt = KEY_RESERVED;
+ break;
- } else if (value != 2) {
- sysrq->alt = code;
- sysrq->need_reinject = false;
- }
- break;
+ default:
+ if (sysrq->active && value && value != 2) {
+ sysrq->need_reinject = false;
+ __handle_sysrq(sysrq_xlate[code], true);
+ }
+ break;
+ }
- case KEY_SYSRQ:
- if (value == 1 && sysrq->alt != KEY_RESERVED) {
- sysrq->active = true;
- sysrq->alt_use = sysrq->alt;
- /*
- * If nothing else will be pressed we'll need
- * to re-inject Alt-SysRq keysroke.
- */
- sysrq->need_reinject = true;
- }
+ suppress = sysrq->active;
- /*
- * Pretend that sysrq was never pressed at all. This
- * is needed to properly handle KGDB which will try
- * to release all keys after exiting debugger. If we
- * do not clear key bit it KGDB will end up sending
- * release events for Alt and SysRq, potentially
- * triggering print screen function.
- */
- if (sysrq->active)
- clear_bit(KEY_SYSRQ, handle->dev->key);
+ if (!sysrq->active) {
- break;
+ /*
+ * See if reset sequence has changed since the last time.
+ */
+ if (sysrq->reset_seq_version != sysrq_reset_seq_version)
+ sysrq_parse_reset_sequence(sysrq);
- default:
- if (sysrq->active && value && value != 2) {
- sysrq->need_reinject = false;
- __handle_sysrq(sysrq_xlate[code], true);
- }
- break;
+ /*
+ * If we are not suppressing key presses keep track of
+ * keyboard state so we can release keys that have been
+ * pressed before entering SysRq mode.
+ */
+ if (value)
+ set_bit(code, sysrq->key_down);
+ else
+ clear_bit(code, sysrq->key_down);
+
+ if (was_active)
+ schedule_work(&sysrq->reinject_work);
+
+ if (sysrq_detect_reset_sequence(sysrq, code, value)) {
+ /* Force emergency reboot */
+ __handle_sysrq(sysrq_xlate[KEY_B], false);
}
- suppress = sysrq->active;
+ } else if (value == 0 && test_and_clear_bit(code, sysrq->key_down)) {
+ /*
+ * Pass on release events for keys that was pressed before
+ * entering SysRq mode.
+ */
+ suppress = false;
+ }
- if (!sysrq->active) {
- /*
- * If we are not suppressing key presses keep track of
- * keyboard state so we can release keys that have been
- * pressed before entering SysRq mode.
- */
- if (value)
- set_bit(code, sysrq->key_down);
- else
- clear_bit(code, sysrq->key_down);
+ return suppress;
+}
- if (was_active)
- schedule_work(&sysrq->reinject_work);
+static bool sysrq_filter(struct input_handle *handle,
+ unsigned int type, unsigned int code, int value)
+{
+ struct sysrq_state *sysrq = handle->private;
+ bool suppress;
- } else if (value == 0 &&
- test_and_clear_bit(code, sysrq->key_down)) {
- /*
- * Pass on release events for keys that was pressed before
- * entering SysRq mode.
- */
- suppress = false;
- }
+ /*
+ * Do not filter anything if we are in the process of re-injecting
+ * Alt+SysRq combination.
+ */
+ if (sysrq->reinjecting)
+ return false;
+
+ switch (type) {
+
+ case EV_SYN:
+ suppress = false;
+ break;
+
+ case EV_KEY:
+ suppress = sysrq_handle_keypress(sysrq, code, value);
break;
default:
@@ -786,7 +872,20 @@ static bool sysrq_handler_registered;
static inline void sysrq_register_handler(void)
{
+ extern unsigned short platform_sysrq_reset_seq[] __weak;
+ unsigned short key;
int error;
+ int i;
+
+ if (platform_sysrq_reset_seq) {
+ for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+ key = platform_sysrq_reset_seq[i];
+ if (key == KEY_RESERVED || key > KEY_MAX)
+ break;
+
+ sysrq_reset_seq[sysrq_reset_seq_len++] = key;
+ }
+ }
error = input_register_handler(&sysrq_handler);
if (error)
@@ -803,6 +902,36 @@ static inline void sysrq_unregister_handler(void)
}
}
+static int sysrq_reset_seq_param_set(const char *buffer,
+ const struct kernel_param *kp)
+{
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buffer, 0, &val);
+ if (error < 0)
+ return error;
+
+ if (val > KEY_MAX)
+ return -EINVAL;
+
+ *((unsigned short *)kp->arg) = val;
+ sysrq_reset_seq_version++;
+
+ return 0;
+}
+
+static struct kernel_param_ops param_ops_sysrq_reset_seq = {
+ .get = param_get_ushort,
+ .set = sysrq_reset_seq_param_set,
+};
+
+#define param_check_sysrq_reset_seq(name, p) \
+ __param_check(name, p, unsigned short)
+
+module_param_array_named(reset_seq, sysrq_reset_seq, sysrq_reset_seq,
+ &sysrq_reset_seq_len, 0644);
+
#else
static inline void sysrq_register_handler(void)
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 45d916198f78..bb119934e76c 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
/**
* tty_buffer_free_all - free buffers used by a tty
@@ -119,11 +120,14 @@ static void __tty_buffer_flush(struct tty_port *port)
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *thead;
- while ((thead = buf->head) != NULL) {
- buf->head = thead->next;
- tty_buffer_free(port, thead);
+ if (unlikely(buf->head == NULL))
+ return;
+ while ((thead = buf->head->next) != NULL) {
+ tty_buffer_free(port, buf->head);
+ buf->head = thead;
}
- buf->tail = NULL;
+ WARN_ON(buf->head != buf->tail);
+ buf->head->read = buf->head->commit;
}
/**
@@ -194,19 +198,22 @@ static struct tty_buffer *tty_buffer_find(struct tty_port *port, size_t size)
have queued and recycle that ? */
}
/**
- * __tty_buffer_request_room - grow tty buffer if needed
+ * tty_buffer_request_room - grow tty buffer if needed
* @tty: tty structure
* @size: size desired
*
* Make at least size bytes of linear space available for the tty
* buffer. If we fail return the size we managed to find.
- * Locking: Caller must hold port->buf.lock
+ *
+ * Locking: Takes port->buf.lock
*/
-static int __tty_buffer_request_room(struct tty_port *port, size_t size)
+int tty_buffer_request_room(struct tty_port *port, size_t size)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *b, *n;
int left;
+ unsigned long flags;
+ spin_lock_irqsave(&buf->lock, flags);
/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
remove this conditional if its worth it. This would be invisible
to the callers */
@@ -228,37 +235,14 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size)
} else
size = left;
}
-
+ spin_unlock_irqrestore(&buf->lock, flags);
return size;
}
-
-
-/**
- * tty_buffer_request_room - grow tty buffer if needed
- * @tty: tty structure
- * @size: size desired
- *
- * Make at least size bytes of linear space available for the tty
- * buffer. If we fail return the size we managed to find.
- *
- * Locking: Takes port->buf.lock
- */
-int tty_buffer_request_room(struct tty_struct *tty, size_t size)
-{
- struct tty_port *port = tty->port;
- unsigned long flags;
- int length;
-
- spin_lock_irqsave(&port->buf.lock, flags);
- length = __tty_buffer_request_room(port, size);
- spin_unlock_irqrestore(&port->buf.lock, flags);
- return length;
-}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
* tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
- * @tty: tty structure
+ * @port: tty port
* @chars: characters
* @flag: flag value for each character
* @size: size
@@ -269,29 +253,21 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* Locking: Called functions may take port->buf.lock
*/
-int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space;
- unsigned long flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, flags);
- space = __tty_buffer_request_room(tty->port, goal);
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, goal);
+ struct tty_buffer *tb = port->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0)) {
- spin_unlock_irqrestore(&buf->lock, flags);
break;
}
memcpy(tb->char_buf_ptr + tb->used, chars, space);
memset(tb->flag_buf_ptr + tb->used, flag, space);
tb->used += space;
- spin_unlock_irqrestore(&buf->lock, flags);
copied += space;
chars += space;
/* There is a small chance that we need to split the data over
@@ -303,7 +279,7 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
- * @tty: tty structure
+ * @port: tty port
* @chars: characters
* @flags: flag bytes
* @size: size
@@ -315,29 +291,21 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
* Locking: Called functions may take port->buf.lock
*/
-int tty_insert_flip_string_flags(struct tty_struct *tty,
+int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space;
- unsigned long __flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, __flags);
- space = __tty_buffer_request_room(tty->port, goal);
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, goal);
+ struct tty_buffer *tb = port->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0)) {
- spin_unlock_irqrestore(&buf->lock, __flags);
break;
}
memcpy(tb->char_buf_ptr + tb->used, chars, space);
memcpy(tb->flag_buf_ptr + tb->used, flags, space);
tb->used += space;
- spin_unlock_irqrestore(&buf->lock, __flags);
copied += space;
chars += space;
flags += space;
@@ -350,7 +318,7 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
* tty_schedule_flip - push characters to ldisc
- * @tty: tty to push from
+ * @port: tty port to push from
*
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
@@ -361,11 +329,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* Locking: Takes port->buf.lock
*/
-void tty_schedule_flip(struct tty_struct *tty)
+void tty_schedule_flip(struct tty_port *port)
{
- struct tty_bufhead *buf = &tty->port->buf;
+ struct tty_bufhead *buf = &port->buf;
unsigned long flags;
- WARN_ON(tty->low_latency);
+ WARN_ON(port->low_latency);
spin_lock_irqsave(&buf->lock, flags);
if (buf->tail != NULL)
@@ -377,7 +345,7 @@ EXPORT_SYMBOL(tty_schedule_flip);
/**
* tty_prepare_flip_string - make room for characters
- * @tty: tty
+ * @port: tty port
* @chars: return pointer for character write area
* @size: desired size
*
@@ -390,31 +358,23 @@ EXPORT_SYMBOL(tty_schedule_flip);
* Locking: May call functions taking port->buf.lock
*/
-int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
+int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
- int space;
- unsigned long flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, flags);
- space = __tty_buffer_request_room(tty->port, size);
-
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, size);
if (likely(space)) {
+ struct tty_buffer *tb = port->buf.tail;
*chars = tb->char_buf_ptr + tb->used;
memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
tb->used += space;
}
- spin_unlock_irqrestore(&buf->lock, flags);
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
/**
* tty_prepare_flip_string_flags - make room for characters
- * @tty: tty
+ * @port: tty port
* @chars: return pointer for character write area
* @flags: return pointer for status flag write area
* @size: desired size
@@ -428,24 +388,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* Locking: May call functions taking port->buf.lock
*/
-int tty_prepare_flip_string_flags(struct tty_struct *tty,
+int tty_prepare_flip_string_flags(struct tty_port *port,
unsigned char **chars, char **flags, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
- int space;
- unsigned long __flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, __flags);
- space = __tty_buffer_request_room(tty->port, size);
-
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, size);
if (likely(space)) {
+ struct tty_buffer *tb = port->buf.tail;
*chars = tb->char_buf_ptr + tb->used;
*flags = tb->flag_buf_ptr + tb->used;
tb->used += space;
}
- spin_unlock_irqrestore(&buf->lock, __flags);
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
@@ -539,16 +491,17 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- if (!tty->low_latency)
+ if (!tty->port->low_latency)
flush_work(&tty->port->buf.work);
}
/**
* tty_flip_buffer_push - terminal
- * @tty: tty to push
+ * @port: tty port to push
*
* Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if tty->low_latency is set.
+ * function must not be called from IRQ context if port->low_latency is
+ * set.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
@@ -556,9 +509,9 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
* Locking: tty buffer lock. Driver locks in low latency mode.
*/
-void tty_flip_buffer_push(struct tty_struct *tty)
+void tty_flip_buffer_push(struct tty_port *port)
{
- struct tty_bufhead *buf = &tty->port->buf;
+ struct tty_bufhead *buf = &port->buf;
unsigned long flags;
spin_lock_irqsave(&buf->lock, flags);
@@ -566,7 +519,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
buf->tail->commit = buf->tail->used;
spin_unlock_irqrestore(&buf->lock, flags);
- if (tty->low_latency)
+ if (port->low_latency)
flush_to_ldisc(&buf->work);
else
schedule_work(&buf->work);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index da9fde850754..fd473639ab70 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -536,7 +536,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
* __tty_hangup - actual handler for hangup events
* @work: tty device
*
- * This can be called by the "eventd" kernel thread. That is process
+ * This can be called by a "kworker" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
* have the appropriate locks for what we're doing.
*
@@ -977,8 +977,7 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
else
i = -EIO;
tty_ldisc_deref(ld);
- if (i > 0)
- inode->i_atime = current_fs_time(inode->i_sb);
+
return i;
}
@@ -1079,11 +1078,8 @@ static inline ssize_t do_tty_write(
break;
cond_resched();
}
- if (written) {
- struct inode *inode = file->f_path.dentry->d_inode;
- inode->i_mtime = current_fs_time(inode->i_sb);
+ if (written)
ret = written;
- }
out:
tty_write_unlock(tty);
return ret;
@@ -2203,6 +2199,7 @@ done:
mutex_unlock(&tty->termios_mutex);
return 0;
}
+EXPORT_SYMBOL(tty_do_resize);
/**
* tiocswinsz - implement window size set ioctl
@@ -2906,9 +2903,9 @@ void do_SAK(struct tty_struct *tty)
EXPORT_SYMBOL(do_SAK);
-static int dev_match_devt(struct device *dev, void *data)
+static int dev_match_devt(struct device *dev, const void *data)
{
- dev_t *devt = data;
+ const dev_t *devt = data;
return dev->devt == *devt;
}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 8481b29d5b3a..d58b92cc187c 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
if (opt & TERMIOS_WAIT) {
tty_wait_until_sent(tty, 0);
if (signal_pending(current))
- return -EINTR;
+ return -ERESTARTSYS;
}
tty_set_termios(tty, &tmp_termios);
@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
if (opt & TERMIOS_WAIT) {
tty_wait_until_sent(tty, 0);
if (signal_pending(current))
- return -EINTR;
+ return -ERESTARTSYS;
}
mutex_lock(&tty->termios_mutex);
@@ -1096,12 +1096,16 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
ld = tty_ldisc_ref_wait(tty);
switch (arg) {
case TCIFLUSH:
- if (ld && ld->ops->flush_buffer)
+ if (ld && ld->ops->flush_buffer) {
ld->ops->flush_buffer(tty);
+ tty_unthrottle(tty);
+ }
break;
case TCIOFLUSH:
- if (ld && ld->ops->flush_buffer)
+ if (ld && ld->ops->flush_buffer) {
ld->ops->flush_buffer(tty);
+ tty_unthrottle(tty);
+ }
/* fall through */
case TCOFLUSH:
tty_driver_flush_buffer(tty);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index c5782294e532..d794087c327e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -64,7 +64,9 @@ static void put_ldisc(struct tty_ldisc *ld)
return;
}
raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wake_up(&ld->wq_idle);
+
+ if (waitqueue_active(&ld->wq_idle))
+ wake_up(&ld->wq_idle);
}
/**
@@ -934,17 +936,17 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_lock_pair(tty, o_tty);
tty_ldisc_halt(tty);
- tty_ldisc_flush_works(tty);
- if (o_tty) {
+ if (o_tty)
tty_ldisc_halt(o_tty);
+
+ tty_ldisc_flush_works(tty);
+ if (o_tty)
tty_ldisc_flush_works(o_tty);
- }
+ tty_lock_pair(tty, o_tty);
/* This will need doing differently if we need to lock */
tty_ldisc_kill(tty);
-
if (o_tty)
tty_ldisc_kill(o_tty);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index 14a51c9960df..17ae94cb29f8 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -27,8 +27,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@.tmp
- sed -e 's/^static *//' $@.tmp > $@
- rm $@.tmp
+ loadkeys --mktable $< > $@
endif
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 681765baef69..a9af1b9ae160 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -307,26 +307,17 @@ int kbd_rate(struct kbd_repeat *rep)
*/
static void put_queue(struct vc_data *vc, int ch)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (tty) {
- tty_insert_flip_char(tty, ch, 0);
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(&vc->port, ch, 0);
+ tty_schedule_flip(&vc->port);
}
static void puts_queue(struct vc_data *vc, char *cp)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (!tty)
- return;
-
while (*cp) {
- tty_insert_flip_char(tty, *cp, 0);
+ tty_insert_flip_char(&vc->port, *cp, 0);
cp++;
}
- tty_schedule_flip(tty);
+ tty_schedule_flip(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@@ -582,12 +573,8 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (!tty)
- return;
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- tty_schedule_flip(tty);
+ tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
+ tty_schedule_flip(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fd89687d068..1a2728034599 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1333,13 +1333,13 @@ static void csi_m(struct vc_data *vc)
update_attr(vc);
}
-static void respond_string(const char *p, struct tty_struct *tty)
+static void respond_string(const char *p, struct tty_port *port)
{
while (*p) {
- tty_insert_flip_char(tty, *p, 0);
+ tty_insert_flip_char(port, *p, 0);
p++;
}
- tty_schedule_flip(tty);
+ tty_schedule_flip(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
@@ -1347,17 +1347,17 @@ static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
char buf[40];
sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1);
- respond_string(buf, tty);
+ respond_string(buf, tty->port);
}
static inline void status_report(struct tty_struct *tty)
{
- respond_string("\033[0n", tty); /* Terminal ok */
+ respond_string("\033[0n", tty->port); /* Terminal ok */
}
-static inline void respond_ID(struct tty_struct * tty)
+static inline void respond_ID(struct tty_struct *tty)
{
- respond_string(VT102ID, tty);
+ respond_string(VT102ID, tty->port);
}
void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
@@ -1366,7 +1366,7 @@ void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt), (char)('!' + mrx),
(char)('!' + mry));
- respond_string(buf, tty);
+ respond_string(buf, tty->port);
}
/* invoked via ioctl(TIOCLINUX) and through set_selection */
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index f56d185790ea..e92eeaf251fe 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -1,6 +1,5 @@
menuconfig UIO
tristate "Userspace I/O drivers"
- depends on !S390
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4c90b510d016..640ae6c6d2d2 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -37,6 +37,7 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_W90X900
default y if ARCH_AT91
default y if ARCH_MXC
+ default y if ARCH_MXS
default y if ARCH_OMAP3
default y if ARCH_CNS3XXX
default y if ARCH_VT8500
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
index a9636f43bca2..3a1ca4dfc83a 100644
--- a/drivers/usb/c67x00/c67x00-ll-hpi.c
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -237,7 +237,7 @@ void c67x00_ll_hpi_disable_sofeop(struct c67x00_sie *sie)
/* -------------------------------------------------------------------------- */
/* Transactions */
-static inline u16 ll_recv_msg(struct c67x00_device *dev)
+static inline int ll_recv_msg(struct c67x00_device *dev)
{
u16 res;
diff --git a/drivers/usb/chipidea/ci13xxx_imx.h b/drivers/usb/chipidea/ci13xxx_imx.h
index 2e88accb3d67..9cd2e910b1ca 100644
--- a/drivers/usb/chipidea/ci13xxx_imx.h
+++ b/drivers/usb/chipidea/ci13xxx_imx.h
@@ -19,7 +19,7 @@ struct usbmisc_usb_device {
struct device *dev; /* usb controller device */
int index;
- int disable_oc:1; /* over current detect disabled */
+ unsigned int disable_oc:1; /* over current detect disabled */
};
int usbmisc_set_ops(const struct usbmisc_ops *ops);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index aebf695a9344..57cae1f897b2 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -411,7 +411,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
base = devm_request_and_ioremap(dev, res);
- if (!res) {
+ if (!base) {
dev_err(dev, "can't request and ioremap resource\n");
return -ENOMEM;
}
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 3bc244d2636a..a62c4a47d52c 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -222,7 +222,7 @@ static struct {
} dbg_data = {
.idx = 0,
.tty = 0,
- .lck = __RW_LOCK_UNLOCKED(lck)
+ .lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
};
/**
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index caecad9213f5..8e9d31277c43 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -70,6 +70,9 @@ static int host_start(struct ci13xxx *ci)
else
ci->hcd = hcd;
+ if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+ hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
return ret;
}
diff --git a/drivers/usb/chipidea/usbmisc_imx6q.c b/drivers/usb/chipidea/usbmisc_imx6q.c
index 845efe29e6b9..a1bce391e825 100644
--- a/drivers/usb/chipidea/usbmisc_imx6q.c
+++ b/drivers/usb/chipidea/usbmisc_imx6q.c
@@ -98,9 +98,9 @@ static int usbmisc_imx6q_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!data->base)
- return -EADDRNOTAVAIL;
+ data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
data->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->clk)) {
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index 2519e320098f..316aac8e4ca1 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -6,7 +6,7 @@ comment "USB Device Class drivers"
config USB_ACM
tristate "USB Modem (CDC ACM) support"
- depends on USB
+ depends on USB && TTY
---help---
This driver supports USB modems and ISDN adapters which support the
Communication Device Class Abstract Control Model interface.
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8d809a811e16..8ac25adf31b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -410,19 +410,12 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
- struct tty_struct *tty;
-
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&acm->port);
- if (!tty)
- return;
-
- tty_insert_flip_string(tty, urb->transfer_buffer, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_insert_flip_string(&acm->port, urb->transfer_buffer,
+ urb->actual_length);
+ tty_flip_buffer_push(&acm->port);
}
static void acm_read_bulk_callback(struct urb *urb)
@@ -1602,6 +1595,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
.driver_info = NO_UNION_NORMAL,
},
+ { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
+ .driver_info = NO_UNION_NORMAL,
+ },
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 26059b93dbf4..5e847ad2f58a 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -7,6 +7,7 @@ ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
+usbcore-y += port.o
usbcore-$(CONFIG_PCI) += hcd-pci.o
usbcore-$(CONFIG_ACPI) += usb-acpi.o
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index cbacea933b18..e33224e23770 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -316,17 +316,23 @@ static char *usb_dump_iad_descriptor(char *start, char *end,
*/
static char *usb_dump_config_descriptor(char *start, char *end,
const struct usb_config_descriptor *desc,
- int active)
+ int active, int speed)
{
+ int mul;
+
if (start > end)
return start;
+ if (speed == USB_SPEED_SUPER)
+ mul = 8;
+ else
+ mul = 2;
start += sprintf(start, format_config,
/* mark active/actual/current cfg. */
active ? '*' : ' ',
desc->bNumInterfaces,
desc->bConfigurationValue,
desc->bmAttributes,
- desc->bMaxPower * 2);
+ desc->bMaxPower * mul);
return start;
}
@@ -342,7 +348,8 @@ static char *usb_dump_config(int speed, char *start, char *end,
if (!config)
/* getting these some in 2.3.7; none in 2.3.6 */
return start + sprintf(start, "(null Cfg. desc.)\n");
- start = usb_dump_config_descriptor(start, end, &config->desc, active);
+ start = usb_dump_config_descriptor(start, end, &config->desc, active,
+ speed);
for (i = 0; i < USB_MAXIADS; i++) {
if (config->intf_assoc[i] == NULL)
break;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b78fbe222b72..4a863fdbdccd 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -40,6 +40,7 @@
#include <linux/signal.h>
#include <linux/poll.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
@@ -1077,7 +1078,7 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
if (!intf || !intf->dev.driver)
ret = -ENODATA;
else {
- strncpy(gd.driver, intf->dev.driver->name,
+ strlcpy(gd.driver, intf->dev.driver->name,
sizeof(gd.driver));
ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index eff2010eb63f..271e761f563e 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -100,7 +100,7 @@ int usb_choose_configuration(struct usb_device *udev)
*/
/* Rule out configs that draw too much bus current */
- if (c->desc.bMaxPower * 2 > udev->bus_mA) {
+ if (usb_get_max_power(udev, c) > udev->bus_mA) {
insufficient_power++;
continue;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4225d5e72131..99b34a30354f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -619,6 +620,10 @@ nongeneric:
status = hcd->driver->hub_control (hcd,
typeReq, wValue, wIndex,
tbuf, wLength);
+
+ if (typeReq == GetHubDescriptor)
+ usb_hub_adjust_deviceremovable(hcd->self.root_hub,
+ (struct usb_hub_descriptor *)tbuf);
break;
error:
/* "protocol stall" on error */
@@ -1025,6 +1030,49 @@ static int register_root_hub(struct usb_hcd *hcd)
return retval;
}
+/*
+ * usb_hcd_start_port_resume - a root-hub port is sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal is
+ * being sent to a root-hub port. The root hub will be prevented from
+ * going into autosuspend until usb_hcd_end_port_resume() is called.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
+{
+ unsigned bit = 1 << portnum;
+
+ if (!(bus->resuming_ports & bit)) {
+ bus->resuming_ports |= bit;
+ pm_runtime_get_noresume(&bus->root_hub->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
+
+/*
+ * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal has
+ * stopped being sent to a root-hub port. The root hub will be allowed to
+ * autosuspend again.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
+{
+ unsigned bit = 1 << portnum;
+
+ if (bus->resuming_ports & bit) {
+ bus->resuming_ports &= ~bit;
+ pm_runtime_put_noidle(&bus->root_hub->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
/*-------------------------------------------------------------------------*/
@@ -2506,7 +2554,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
/* starting here, usbcore will pay attention to this root hub */
- rhdev->bus_mA = min(500u, hcd->power_budget);
if ((retval = register_root_hub(hcd)) != 0)
goto err_register_root_hub;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a815fd2cc5e7..5480352f984d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -26,11 +26,12 @@
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/random.h>
+#include <linux/pm_qos.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-#include "usb.h"
+#include "hub.h"
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
@@ -42,62 +43,6 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
-struct usb_port {
- struct usb_device *child;
- struct device dev;
- struct dev_state *port_owner;
- enum usb_port_connect_type connect_type;
-};
-
-struct usb_hub {
- struct device *intfdev; /* the "interface" device */
- struct usb_device *hdev;
- struct kref kref;
- struct urb *urb; /* for interrupt polling pipe */
-
- /* buffer for urb ... with extra space in case of babble */
- char (*buffer)[8];
- union {
- struct usb_hub_status hub;
- struct usb_port_status port;
- } *status; /* buffer for status reports */
- struct mutex status_mutex; /* for the status buffer */
-
- int error; /* last reported error */
- int nerrors; /* track consecutive errors */
-
- struct list_head event_list; /* hubs w/data or errs ready */
- unsigned long event_bits[1]; /* status change bitmask */
- unsigned long change_bits[1]; /* ports with logical connect
- status change */
- unsigned long busy_bits[1]; /* ports being reset or
- resumed */
- unsigned long removed_bits[1]; /* ports with a "removed"
- device present */
- unsigned long wakeup_bits[1]; /* ports that have signaled
- remote wakeup */
-#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
-#error event_bits[] is too short!
-#endif
-
- struct usb_hub_descriptor *descriptor; /* class descriptor */
- struct usb_tt tt; /* Transaction Translator */
-
- unsigned mA_per_port; /* current for each child */
-
- unsigned limited_power:1;
- unsigned quiescing:1;
- unsigned disconnected:1;
-
- unsigned quirk_check_port_auto_suspend:1;
-
- unsigned has_indicators:1;
- u8 indicator[USB_MAXCHILDREN];
- struct delayed_work leds;
- struct delayed_work init_work;
- struct usb_port **ports;
-};
-
static inline int hub_is_superspeed(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
@@ -164,13 +109,10 @@ MODULE_PARM_DESC(use_both_schemes,
DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
-#define HUB_DEBOUNCE_TIMEOUT 1500
+#define HUB_DEBOUNCE_TIMEOUT 2000
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
-#define to_usb_port(_dev) \
- container_of(_dev, struct usb_port, dev)
-
static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
@@ -186,7 +128,7 @@ static inline char *portspeed(struct usb_hub *hub, int portstatus)
}
/* Note that hdev or one of its children must be locked! */
-static struct usb_hub *hdev_to_hub(struct usb_device *hdev)
+struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
{
if (!hdev || !hdev->actconfig || !hdev->maxchild)
return NULL;
@@ -360,7 +302,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
return;
- hub = hdev_to_hub(udev->parent);
+ hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
*/
@@ -452,7 +394,7 @@ static int clear_hub_feature(struct usb_device *hdev, int feature)
/*
* USB 2.0 spec Section 11.24.2.2
*/
-static int clear_port_feature(struct usb_device *hdev, int port1, int feature)
+int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
@@ -645,7 +587,7 @@ static void kick_khubd(struct usb_hub *hub)
void usb_kick_khubd(struct usb_device *hdev)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (hub)
kick_khubd(hub);
@@ -667,7 +609,7 @@ void usb_wakeup_notification(struct usb_device *hdev,
if (!hdev)
return;
- hub = hdev_to_hub(hdev);
+ hub = usb_hub_to_struct_hub(hdev);
if (hub) {
set_bit(portnum, hub->wakeup_bits);
kick_khubd(hub);
@@ -774,6 +716,32 @@ static void hub_tt_work(struct work_struct *work)
}
/**
+ * usb_hub_set_port_power - control hub port's power state
+ * @hdev: target hub
+ * @port1: port index
+ * @set: expected status
+ *
+ * call this function to control port's power via setting or
+ * clearing the port's PORT_POWER feature.
+ */
+int usb_hub_set_port_power(struct usb_device *hdev, int port1,
+ bool set)
+{
+ int ret;
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+ struct usb_port *port_dev = hub->ports[port1 - 1];
+
+ if (set)
+ ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
+ else
+ ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
+
+ if (!ret)
+ port_dev->power_is_on = set;
+ return ret;
+}
+
+/**
* usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
* @urb: an URB associated with the failed or incomplete split transaction
*
@@ -849,7 +817,11 @@ static unsigned hub_power_on(struct usb_hub *hub, bool do_delay)
dev_dbg(hub->intfdev, "trying to enable port power on "
"non-switchable hub\n");
for (port1 = 1; port1 <= hub->descriptor->bNbrPorts; port1++)
- set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
+ if (hub->ports[port1 - 1]->power_is_on)
+ set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
+ else
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_POWER);
/* Wait at least 100 msec for power to become stable */
delay = max(pgood_delay, (unsigned) 100);
@@ -877,6 +849,60 @@ static int hub_hub_status(struct usb_hub *hub,
return ret;
}
+static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+ unsigned int link_status)
+{
+ return set_port_feature(hub->hdev,
+ port1 | (link_status << 3),
+ USB_PORT_FEAT_LINK_STATE);
+}
+
+/*
+ * If USB 3.0 ports are placed into the Disabled state, they will no longer
+ * detect any device connects or disconnects. This is generally not what the
+ * USB core wants, since it expects a disabled port to produce a port status
+ * change event when a new device connects.
+ *
+ * Instead, set the link state to Disabled, wait for the link to settle into
+ * that state, clear any change bits, and then put the port into the RxDetect
+ * state.
+ */
+static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+{
+ int ret;
+ int total_time;
+ u16 portchange, portstatus;
+
+ if (!hub_is_superspeed(hub->hdev))
+ return -EINVAL;
+
+ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+ if (ret) {
+ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+ port1, ret);
+ return ret;
+ }
+
+ /* Wait for the link to enter the disabled state. */
+ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+ ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (ret < 0)
+ return ret;
+
+ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_DISABLED)
+ break;
+ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+ break;
+ msleep(HUB_DEBOUNCE_STEP);
+ }
+ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
+ port1, total_time);
+
+ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+}
+
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_device *hdev = hub->hdev;
@@ -885,8 +911,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
if (hub->ports[port1 - 1]->child && set_state)
usb_set_device_state(hub->ports[port1 - 1]->child,
USB_STATE_NOTATTACHED);
- if (!hub->error && !hub_is_superspeed(hub->hdev))
- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
+ if (!hub->error) {
+ if (hub_is_superspeed(hub->hdev))
+ ret = hub_usb3_port_disable(hub, port1);
+ else
+ ret = usb_clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ }
if (ret)
dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
port1, ret);
@@ -933,7 +964,7 @@ int usb_remove_device(struct usb_device *udev)
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
- hub = hdev_to_hub(udev->parent);
+ hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface(intf);
@@ -1065,7 +1096,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* Do not disable USB3 protocol ports.
*/
if (!hub_is_superspeed(hdev)) {
- clear_port_feature(hdev, port1,
+ usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
portstatus &= ~USB_PORT_STAT_ENABLE;
} else {
@@ -1077,18 +1108,18 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Clear status-change flags; we'll debounce later */
if (portchange & USB_PORT_STAT_C_CONNECTION) {
need_debounce_delay = true;
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (portchange & USB_PORT_STAT_C_ENABLE) {
need_debounce_delay = true;
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
hub_is_superspeed(hub->hdev)) {
need_debounce_delay = true;
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
/* We can forget about a "removed" device when there's a
@@ -1122,10 +1153,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
set_bit(port1, hub->change_bits);
} else if (udev->persist_enabled) {
+ struct usb_port *port_dev = hub->ports[port1 - 1];
+
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
- set_bit(port1, hub->change_bits);
+ /* Don't set the change_bits when the device
+ * was powered off.
+ */
+ if (port_dev->power_is_on)
+ set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell khubd */
@@ -1235,52 +1272,6 @@ static int hub_post_reset(struct usb_interface *intf)
return 0;
}
-static void usb_port_device_release(struct device *dev)
-{
- struct usb_port *port_dev = to_usb_port(dev);
-
- kfree(port_dev);
-}
-
-static void usb_hub_remove_port_device(struct usb_hub *hub,
- int port1)
-{
- device_unregister(&hub->ports[port1 - 1]->dev);
-}
-
-struct device_type usb_port_device_type = {
- .name = "usb_port",
- .release = usb_port_device_release,
-};
-
-static int usb_hub_create_port_device(struct usb_hub *hub,
- int port1)
-{
- struct usb_port *port_dev = NULL;
- int retval;
-
- port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
- if (!port_dev) {
- retval = -ENOMEM;
- goto exit;
- }
-
- hub->ports[port1 - 1] = port_dev;
- port_dev->dev.parent = hub->intfdev;
- port_dev->dev.type = &usb_port_device_type;
- dev_set_name(&port_dev->dev, "port%d", port1);
-
- retval = device_register(&port_dev->dev);
- if (retval)
- goto error_register;
- return 0;
-
-error_register:
- put_device(&port_dev->dev);
-exit:
- return retval;
-}
-
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
@@ -1292,6 +1283,8 @@ static int hub_configure(struct usb_hub *hub,
unsigned int pipe;
int maxp, ret, i;
char *message = "out of memory";
+ unsigned unit_load;
+ unsigned full_load;
hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
if (!hub->buffer) {
@@ -1338,6 +1331,13 @@ static int hub_configure(struct usb_hub *hub,
}
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
+ if (hub_is_superspeed(hdev)) {
+ unit_load = 150;
+ full_load = 900;
+ } else {
+ unit_load = 100;
+ full_load = 500;
+ }
/* FIXME for USB 3.0, skip for now */
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
@@ -1457,40 +1457,44 @@ static int hub_configure(struct usb_hub *hub,
goto fail;
}
le16_to_cpus(&hubstatus);
+ hcd = bus_to_hcd(hdev->bus);
if (hdev == hdev->bus->root_hub) {
- if (hdev->bus_mA == 0 || hdev->bus_mA >= 500)
- hub->mA_per_port = 500;
+ if (hcd->power_budget > 0)
+ hdev->bus_mA = hcd->power_budget;
+ else
+ hdev->bus_mA = full_load * hdev->maxchild;
+ if (hdev->bus_mA >= full_load)
+ hub->mA_per_port = full_load;
else {
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
} else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
+ int remaining = hdev->bus_mA -
+ hub->descriptor->bHubContrCurrent;
+
dev_dbg(hub_dev, "hub controller current requirement: %dmA\n",
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
- if (hdev->maxchild > 0) {
- int remaining = hdev->bus_mA -
- hub->descriptor->bHubContrCurrent;
- if (remaining < hdev->maxchild * 100)
- dev_warn(hub_dev,
+ if (remaining < hdev->maxchild * unit_load)
+ dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
- hub->mA_per_port = 100; /* 7.2.1.1 */
- }
+ hub->mA_per_port = unit_load; /* 7.2.1 */
+
} else { /* Self-powered external hub */
/* FIXME: What about battery-powered external hubs that
* provide less current per port? */
- hub->mA_per_port = 500;
+ hub->mA_per_port = full_load;
}
- if (hub->mA_per_port < 500)
+ if (hub->mA_per_port < full_load)
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
/* Update the HCD's internal representation of this hub before khubd
* starts getting port status changes for devices under the hub.
*/
- hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_KERNEL);
@@ -1546,6 +1550,8 @@ static int hub_configure(struct usb_hub *hub,
dev_err(hub->intfdev,
"couldn't create port%d device.\n", i + 1);
+ usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
+
hub_activate(hub, HUB_INIT);
return 0;
@@ -1600,6 +1606,7 @@ static void hub_disconnect(struct usb_interface *intf)
kfree(hub->status);
kfree(hub->buffer);
+ pm_suspend_ignore_children(&intf->dev, false);
kref_put(&hub->kref, hub_release);
}
@@ -1702,6 +1709,7 @@ descriptor_error:
usb_set_intfdata (intf, hub);
intf->needs_remote_wakeup = 1;
+ pm_suspend_ignore_children(&intf->dev, true);
if (hdev->speed == USB_SPEED_HIGH)
highspeed_hubs++;
@@ -1720,7 +1728,7 @@ static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
struct usb_device *hdev = interface_to_usbdev (intf);
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
/* assert ifno == 0 (part of hub spec) */
switch (code) {
@@ -1766,7 +1774,7 @@ static int find_port_owner(struct usb_device *hdev, unsigned port1,
/* This assumes that devices not managed by the hub driver
* will always have maxchild equal to 0.
*/
- *ppowner = &(hdev_to_hub(hdev)->ports[port1 - 1]->port_owner);
+ *ppowner = &(usb_hub_to_struct_hub(hdev)->ports[port1 - 1]->port_owner);
return 0;
}
@@ -1803,7 +1811,7 @@ int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
int n;
for (n = 0; n < hdev->maxchild; n++) {
@@ -1820,13 +1828,13 @@ bool usb_device_is_owned(struct usb_device *udev)
if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
return false;
- hub = hdev_to_hub(udev->parent);
+ hub = usb_hub_to_struct_hub(udev->parent);
return !!hub->ports[udev->portnum - 1]->port_owner;
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
- struct usb_hub *hub = hdev_to_hub(udev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
for (i = 0; i < udev->maxchild; ++i) {
@@ -1995,7 +2003,7 @@ static void hub_free_dev(struct usb_device *udev)
void usb_disconnect(struct usb_device **pdev)
{
struct usb_device *udev = *pdev;
- struct usb_hub *hub = hdev_to_hub(udev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
/* mark the device as inactive, so any further urb submissions for
@@ -2022,6 +2030,19 @@ void usb_disconnect(struct usb_device **pdev)
usb_disable_device(udev, 0);
usb_hcd_synchronize_unlinks(udev);
+ if (udev->parent) {
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
+
+ sysfs_remove_link(&udev->dev.kobj, "port");
+ sysfs_remove_link(&port_dev->dev.kobj, "device");
+
+ if (!port_dev->did_runtime_put)
+ pm_runtime_put(&port_dev->dev);
+ else
+ port_dev->did_runtime_put = false;
+ }
+
usb_remove_ep_devs(&udev->ep0);
usb_unlock_device(udev);
@@ -2208,7 +2229,7 @@ static void set_usb_port_removable(struct usb_device *udev)
if (!hdev)
return;
- hub = hdev_to_hub(udev->parent);
+ hub = usb_hub_to_struct_hub(udev->parent);
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
@@ -2314,6 +2335,26 @@ int usb_new_device(struct usb_device *udev)
goto fail;
}
+ /* Create link files between child device and usb port device. */
+ if (udev->parent) {
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
+
+ err = sysfs_create_link(&udev->dev.kobj,
+ &port_dev->dev.kobj, "port");
+ if (err)
+ goto fail;
+
+ err = sysfs_create_link(&port_dev->dev.kobj,
+ &udev->dev.kobj, "device");
+ if (err) {
+ sysfs_remove_link(&udev->dev.kobj, "port");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(&port_dev->dev);
+ }
+
(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
usb_mark_last_busy(udev);
pm_runtime_put_sync_autosuspend(&udev->dev);
@@ -2440,7 +2481,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
#define HUB_LONG_RESET_TIME 200
-#define HUB_RESET_TIMEOUT 500
+#define HUB_RESET_TIMEOUT 800
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
@@ -2475,72 +2516,9 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /*
- * Some buggy devices require a warm reset to be issued even
- * when the port appears not to be connected.
- */
- if (!warm) {
- /*
- * Some buggy devices can cause an NEC host controller
- * to transition to the "Error" state after a hot port
- * reset. This will show up as the port state in
- * "Inactive", and the port may also report a
- * disconnect. Forcing a warm port reset seems to make
- * the device work.
- *
- * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
- */
- if (hub_port_warm_reset_required(hub, portstatus)) {
- int ret;
-
- if ((portchange & USB_PORT_STAT_C_CONNECTION))
- clear_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_C_CONNECTION);
- if (portchange & USB_PORT_STAT_C_LINK_STATE)
- clear_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_C_PORT_LINK_STATE);
- if (portchange & USB_PORT_STAT_C_RESET)
- clear_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_C_RESET);
- dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
- port1);
- ret = hub_port_reset(hub, port1,
- udev, HUB_BH_RESET_TIME,
- true);
- if ((portchange & USB_PORT_STAT_C_CONNECTION))
- clear_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_C_CONNECTION);
- return ret;
- }
- /* Device went away? */
- if (!(portstatus & USB_PORT_STAT_CONNECTION))
- return -ENOTCONN;
-
- /* bomb out completely if the connection bounced */
- if ((portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
-
- /* if we`ve finished resetting, then break out of
- * the loop
- */
- if (!(portstatus & USB_PORT_STAT_RESET) &&
- (portstatus & USB_PORT_STAT_ENABLE)) {
- if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_WIRELESS;
- else if (hub_is_superspeed(hub->hdev))
- udev->speed = USB_SPEED_SUPER;
- else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
- udev->speed = USB_SPEED_HIGH;
- else if (portstatus & USB_PORT_STAT_LOW_SPEED)
- udev->speed = USB_SPEED_LOW;
- else
- udev->speed = USB_SPEED_FULL;
- return 0;
- }
- } else {
- if (portchange & USB_PORT_STAT_C_BH_RESET)
- return 0;
- }
+ /* The port state is unknown until the reset completes. */
+ if (!(portstatus & USB_PORT_STAT_RESET))
+ break;
/* switch to the long delay after two short delay failures */
if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
@@ -2551,45 +2529,77 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
port1, warm ? "warm " : "", delay);
}
- return -EBUSY;
+ if ((portstatus & USB_PORT_STAT_RESET))
+ return -EBUSY;
+
+ if (hub_port_warm_reset_required(hub, portstatus))
+ return -ENOTCONN;
+
+ /* Device went away? */
+ if (!(portstatus & USB_PORT_STAT_CONNECTION))
+ return -ENOTCONN;
+
+ /* bomb out completely if the connection bounced. A USB 3.0
+ * connection may bounce if multiple warm resets were issued,
+ * but the device may have successfully re-connected. Ignore it.
+ */
+ if (!hub_is_superspeed(hub->hdev) &&
+ (portchange & USB_PORT_STAT_C_CONNECTION))
+ return -ENOTCONN;
+
+ if (!(portstatus & USB_PORT_STAT_ENABLE))
+ return -EBUSY;
+
+ if (!udev)
+ return 0;
+
+ if (hub_is_wusb(hub))
+ udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeed(hub->hdev))
+ udev->speed = USB_SPEED_SUPER;
+ else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
+ udev->speed = USB_SPEED_HIGH;
+ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
+ udev->speed = USB_SPEED_LOW;
+ else
+ udev->speed = USB_SPEED_FULL;
+ return 0;
}
static void hub_port_finish_reset(struct usb_hub *hub, int port1,
- struct usb_device *udev, int *status, bool warm)
+ struct usb_device *udev, int *status)
{
switch (*status) {
case 0:
- if (!warm) {
- struct usb_hcd *hcd;
- /* TRSTRCY = 10 ms; plus some extra */
- msleep(10 + 40);
+ /* TRSTRCY = 10 ms; plus some extra */
+ msleep(10 + 40);
+ if (udev) {
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
update_devnum(udev, 0);
- hcd = bus_to_hcd(udev->bus);
- if (hcd->driver->reset_device) {
- *status = hcd->driver->reset_device(hcd, udev);
- if (*status < 0) {
- dev_err(&udev->dev, "Cannot reset "
- "HCD device state\n");
- break;
- }
- }
+ /* The xHC may think the device is already reset,
+ * so ignore the status.
+ */
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
}
/* FALL THROUGH */
case -ENOTCONN:
case -ENODEV:
- clear_port_feature(hub->hdev,
+ usb_clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_C_RESET);
- /* FIXME need disconnect() for NOTATTACHED device */
- if (warm) {
- clear_port_feature(hub->hdev, port1,
+ if (hub_is_superspeed(hub->hdev)) {
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
- } else {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ }
+ if (udev)
usb_set_device_state(udev, *status
? USB_STATE_NOTATTACHED
: USB_STATE_DEFAULT);
- }
break;
}
}
@@ -2599,18 +2609,30 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm)
{
int i, status;
+ u16 portchange, portstatus;
- if (!warm) {
- /* Block EHCI CF initialization during the port reset.
- * Some companion controllers don't like it when they mix.
- */
- down_read(&ehci_cf_port_reset_rwsem);
- } else {
- if (!hub_is_superspeed(hub->hdev)) {
+ if (!hub_is_superspeed(hub->hdev)) {
+ if (warm) {
dev_err(hub->intfdev, "only USB3 hub support "
"warm reset\n");
return -EINVAL;
}
+ /* Block EHCI CF initialization during the port reset.
+ * Some companion controllers don't like it when they mix.
+ */
+ down_read(&ehci_cf_port_reset_rwsem);
+ } else if (!warm) {
+ /*
+ * If the caller hasn't explicitly requested a warm reset,
+ * double check and see if one is needed.
+ */
+ status = hub_port_status(hub, port1,
+ &portstatus, &portchange);
+ if (status < 0)
+ goto done;
+
+ if (hub_port_warm_reset_required(hub, portstatus))
+ warm = true;
}
/* Reset the port */
@@ -2631,10 +2653,33 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
status);
}
- /* return on disconnect or reset */
+ /* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
- hub_port_finish_reset(hub, port1, udev, &status, warm);
- goto done;
+ hub_port_finish_reset(hub, port1, udev, &status);
+
+ if (!hub_is_superspeed(hub->hdev))
+ goto done;
+
+ /*
+ * If a USB 3.0 device migrates from reset to an error
+ * state, re-issue the warm reset.
+ */
+ if (hub_port_status(hub, port1,
+ &portstatus, &portchange) < 0)
+ goto done;
+
+ if (!hub_port_warm_reset_required(hub, portstatus))
+ goto done;
+
+ /*
+ * If the port is in SS.Inactive or Compliance Mode, the
+ * hot or warm reset failed. Try another warm reset.
+ */
+ if (!warm) {
+ dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
+ port1);
+ warm = true;
+ }
}
dev_dbg (hub->intfdev,
@@ -2648,7 +2693,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
port1);
done:
- if (!warm)
+ if (!hub_is_superspeed(hub->hdev))
up_read(&ehci_cf_port_reset_rwsem);
return status;
@@ -2722,10 +2767,10 @@ static int check_port_resume_type(struct usb_device *udev,
/* Late port handoff can set status-change bits */
if (portchange & USB_PORT_STAT_C_CONNECTION)
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
if (portchange & USB_PORT_STAT_C_ENABLE)
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
@@ -2777,6 +2822,23 @@ void usb_enable_ltm(struct usb_device *udev)
EXPORT_SYMBOL_GPL(usb_enable_ltm);
#ifdef CONFIG_USB_SUSPEND
+/*
+ * usb_disable_function_remotewakeup - disable usb3.0
+ * device's function remote wakeup
+ * @udev: target device
+ *
+ * Assume there's only one function on the USB 3.0
+ * device and disable remote wake for the first
+ * interface. FIXME if the interface association
+ * descriptor shows there's more than one function.
+ */
+static int usb_disable_function_remotewakeup(struct usb_device *udev)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
+ USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
/*
* usb_port_suspend - suspend a usb device's upstream port
@@ -2826,7 +2888,9 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm);
*/
int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
- struct usb_hub *hub = hdev_to_hub(udev->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
+ enum pm_qos_flags_status pm_qos_stat;
int port1 = udev->portnum;
int status;
@@ -2884,9 +2948,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
- status = set_port_feature(hub->hdev,
- port1 | (USB_SS_PORT_LS_U3 << 3),
- USB_PORT_FEAT_LINK_STATE);
+ status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
else
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
@@ -2894,12 +2956,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
/* paranoia: "should not happen" */
- if (udev->do_remote_wakeup)
- (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ if (udev->do_remote_wakeup) {
+ if (!hub_is_superspeed(hub->hdev)) {
+ (void) usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_RECIP_DEVICE,
+ USB_DEVICE_REMOTE_WAKEUP, 0,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ } else
+ (void) usb_disable_function_remotewakeup(udev);
+
+ }
/* Try to enable USB2 hardware LPM again */
if (udev->usb2_hw_lpm_capable == 1)
@@ -2921,6 +2990,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
udev->port_is_suspended = 1;
msleep(10);
}
+
+ /*
+ * Check whether current status meets the requirement of
+ * usb port power off mechanism
+ */
+ pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
+ PM_QOS_FLAG_NO_POWER_OFF);
+ if (!udev->do_remote_wakeup
+ && pm_qos_stat != PM_QOS_FLAGS_ALL
+ && udev->persist_enabled
+ && !status) {
+ pm_runtime_put_sync(&port_dev->dev);
+ port_dev->did_runtime_put = true;
+ }
+
usb_mark_last_busy(hub->hdev);
return status;
}
@@ -2939,7 +3023,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
static int finish_port_resume(struct usb_device *udev)
{
int status = 0;
- u16 devstatus;
+ u16 devstatus = 0;
/* caller owns the udev device lock */
dev_dbg(&udev->dev, "%s\n",
@@ -2984,21 +3068,37 @@ static int finish_port_resume(struct usb_device *udev)
if (status) {
dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
status);
- } else if (udev->actconfig) {
- le16_to_cpus(&devstatus);
- if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
- status = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE,
+ /*
+ * There are a few quirky devices which violate the standard
+ * by claiming to have remote wakeup enabled after a reset,
+ * which crash if the feature is cleared, hence check for
+ * udev->reset_resume
+ */
+ } else if (udev->actconfig && !udev->reset_resume) {
+ if (!hub_is_superspeed(udev->parent)) {
+ le16_to_cpus(&devstatus);
+ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
+ status = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE,
USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (status)
- dev_dbg(&udev->dev,
- "disable remote wakeup, status %d\n",
- status);
+ USB_DEVICE_REMOTE_WAKEUP, 0,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ } else {
+ status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+ &devstatus);
+ le16_to_cpus(&devstatus);
+ if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
+ | USB_INTRF_STAT_FUNC_RW))
+ status =
+ usb_disable_function_remotewakeup(udev);
}
+
+ if (status)
+ dev_dbg(&udev->dev,
+ "disable remote wakeup, status %d\n",
+ status);
status = 0;
}
return status;
@@ -3040,11 +3140,22 @@ static int finish_port_resume(struct usb_device *udev)
*/
int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
- struct usb_hub *hub = hdev_to_hub(udev->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
int port1 = udev->portnum;
int status;
u16 portchange, portstatus;
+ if (port_dev->did_runtime_put) {
+ status = pm_runtime_get_sync(&port_dev->dev);
+ port_dev->did_runtime_put = false;
+ if (status < 0) {
+ dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
+ status);
+ return status;
+ }
+ }
+
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !port_is_suspended(hub, portstatus))
@@ -3056,11 +3167,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
- status = set_port_feature(hub->hdev,
- port1 | (USB_SS_PORT_LS_U0 << 3),
- USB_PORT_FEAT_LINK_STATE);
+ status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0);
else
- status = clear_port_feature(hub->hdev,
+ status = usb_clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
@@ -3086,11 +3195,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
udev->port_is_suspended = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portchange & USB_PORT_STAT_C_LINK_STATE)
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
} else {
if (portchange & USB_PORT_STAT_C_SUSPEND)
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
}
@@ -3146,7 +3255,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
- struct usb_hub *hub = hdev_to_hub(udev->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int port1 = udev->portnum;
int status;
u16 portchange, portstatus;
@@ -3725,7 +3834,7 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm);
* every 25ms for transient disconnects. When the port status has been
* unchanged for 100ms it returns the port status.
*/
-static int hub_port_debounce(struct usb_hub *hub, int port1)
+int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected)
{
int ret;
int total_time, stable_time = 0;
@@ -3739,7 +3848,9 @@ static int hub_port_debounce(struct usb_hub *hub, int port1)
if (!(portchange & USB_PORT_STAT_C_CONNECTION) &&
(portstatus & USB_PORT_STAT_CONNECTION) == connection) {
- stable_time += HUB_DEBOUNCE_STEP;
+ if (!must_be_connected ||
+ (connection == USB_PORT_STAT_CONNECTION))
+ stable_time += HUB_DEBOUNCE_STEP;
if (stable_time >= HUB_DEBOUNCE_STABLE)
break;
} else {
@@ -3748,7 +3859,7 @@ static int hub_port_debounce(struct usb_hub *hub, int port1)
}
if (portchange & USB_PORT_STAT_C_CONNECTION) {
- clear_port_feature(hub->hdev, port1,
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
@@ -4145,16 +4256,23 @@ hub_power_remaining (struct usb_hub *hub)
for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
struct usb_device *udev = hub->ports[port1 - 1]->child;
int delta;
+ unsigned unit_load;
if (!udev)
continue;
+ if (hub_is_superspeed(udev))
+ unit_load = 150;
+ else
+ unit_load = 100;
- /* Unconfigured devices may not use more than 100mA,
- * or 8mA for OTG ports */
+ /*
+ * Unconfigured devices may not use more than one unit load,
+ * or 8mA for OTG ports
+ */
if (udev->actconfig)
- delta = udev->actconfig->desc.bMaxPower * 2;
+ delta = usb_get_max_power(udev, udev->actconfig);
else if (port1 != udev->bus->otg_port || hdev->parent)
- delta = 100;
+ delta = unit_load;
else
delta = 8;
if (delta > hub->mA_per_port)
@@ -4189,6 +4307,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
le16_to_cpu(hub->descriptor->wHubCharacteristics);
struct usb_device *udev;
int status, i;
+ unsigned unit_load;
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
@@ -4252,7 +4371,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (portchange & (USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE)) {
- status = hub_port_debounce(hub, port1);
+ status = hub_port_debounce_be_stable(hub, port1);
if (status < 0) {
if (printk_ratelimit())
dev_err(hub_dev, "connect-debounce failed, "
@@ -4278,6 +4397,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto done;
return;
}
+ if (hub_is_superspeed(hub->hdev))
+ unit_load = 150;
+ else
+ unit_load = 100;
for (i = 0; i < SET_CONFIG_TRIES; i++) {
@@ -4325,7 +4448,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
* on the parent.
*/
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB
- && udev->bus_mA <= 100) {
+ && udev->bus_mA <= unit_load) {
u16 devstat;
status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
@@ -4427,7 +4550,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
if (!hub_is_superspeed(hdev)) {
if (!(portchange & USB_PORT_STAT_C_SUSPEND))
return 0;
- clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
+ usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
} else {
if (!udev || udev->state != USB_STATE_SUSPENDED ||
(portstatus & USB_PORT_STAT_LINK_STATE) !=
@@ -4555,7 +4678,7 @@ static void hub_events(void)
continue;
if (portchange & USB_PORT_STAT_C_CONNECTION) {
- clear_port_feature(hdev, i,
+ usb_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_CONNECTION);
connect_change = 1;
}
@@ -4566,7 +4689,7 @@ static void hub_events(void)
"port %d enable change, "
"status %08x\n",
i, portstatus);
- clear_port_feature(hdev, i,
+ usb_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_ENABLE);
/*
@@ -4597,7 +4720,7 @@ static void hub_events(void)
dev_dbg(hub_dev, "over-current change on port "
"%d\n", i);
- clear_port_feature(hdev, i,
+ usb_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_OVER_CURRENT);
msleep(100); /* Cool down */
hub_power_on(hub, true);
@@ -4611,7 +4734,7 @@ static void hub_events(void)
dev_dbg (hub_dev,
"reset change on port %d\n",
i);
- clear_port_feature(hdev, i,
+ usb_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_RESET);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
@@ -4619,18 +4742,18 @@ static void hub_events(void)
dev_dbg(hub_dev,
"warm reset change on port %d\n",
i);
- clear_port_feature(hdev, i,
+ usb_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
if (portchange & USB_PORT_STAT_C_LINK_STATE) {
- clear_port_feature(hub->hdev, i,
+ usb_clear_port_feature(hub->hdev, i,
USB_PORT_FEAT_C_PORT_LINK_STATE);
}
if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) {
dev_warn(hub_dev,
"config error on port %d\n",
i);
- clear_port_feature(hub->hdev, i,
+ usb_clear_port_feature(hub->hdev, i,
USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
}
@@ -4638,9 +4761,23 @@ static void hub_events(void)
* SS.Inactive state.
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
+ int status;
+ struct usb_device *udev =
+ hub->ports[i - 1]->child;
+
dev_dbg(hub_dev, "warm reset port %d\n", i);
- hub_port_reset(hub, i, NULL,
- HUB_BH_RESET_TIME, true);
+ if (!udev) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+ if (status < 0)
+ hub_port_disable(hub, i, 1);
+ } else {
+ usb_lock_device(udev);
+ status = usb_reset_device(udev);
+ usb_unlock_device(udev);
+ }
+ connect_change = 0;
}
if (connect_change)
@@ -4900,7 +5037,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
dev_dbg(&udev->dev, "%s for root hub!\n", __func__);
return -EISDIR;
}
- parent_hub = hdev_to_hub(parent_hdev);
+ parent_hub = usb_hub_to_struct_hub(parent_hdev);
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
@@ -5040,6 +5177,7 @@ int usb_reset_device(struct usb_device *udev)
{
int ret;
int i;
+ unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
if (udev->state == USB_STATE_NOTATTACHED ||
@@ -5049,6 +5187,17 @@ int usb_reset_device(struct usb_device *udev)
return -EINVAL;
}
+ /*
+ * Don't allocate memory with GFP_KERNEL in current
+ * context to avoid possible deadlock if usb mass
+ * storage interface or usbnet interface(iSCSI case)
+ * is included in current configuration. The easist
+ * approach is to do it for every device reset,
+ * because the device 'memalloc_noio' flag may have
+ * not been set before reseting the usb device.
+ */
+ noio_flag = memalloc_noio_save();
+
/* Prevent autosuspend during the reset */
usb_autoresume_device(udev);
@@ -5093,6 +5242,7 @@ int usb_reset_device(struct usb_device *udev)
}
usb_autosuspend_device(udev);
+ memalloc_noio_restore(noio_flag);
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
@@ -5156,7 +5306,7 @@ EXPORT_SYMBOL_GPL(usb_queue_reset_device);
struct usb_device *usb_hub_find_child(struct usb_device *hdev,
int port1)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (port1 < 1 || port1 > hdev->maxchild)
return NULL;
@@ -5173,7 +5323,7 @@ EXPORT_SYMBOL_GPL(usb_hub_find_child);
void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
enum usb_port_connect_type type)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
hub->ports[port1 - 1]->connect_type = type;
}
@@ -5189,11 +5339,52 @@ void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
enum usb_port_connect_type
usb_get_hub_port_connect_type(struct usb_device *hdev, int port1)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
return hub->ports[port1 - 1]->connect_type;
}
+void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
+ struct usb_hub_descriptor *desc)
+{
+ enum usb_port_connect_type connect_type;
+ int i;
+
+ if (!hub_is_superspeed(hdev)) {
+ for (i = 1; i <= hdev->maxchild; i++) {
+ connect_type = usb_get_hub_port_connect_type(hdev, i);
+
+ if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ u8 mask = 1 << (i%8);
+
+ if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) {
+ dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n",
+ i);
+ desc->u.hs.DeviceRemovable[i/8] |= mask;
+ }
+ }
+ }
+ } else {
+ u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable);
+
+ for (i = 1; i <= hdev->maxchild; i++) {
+ connect_type = usb_get_hub_port_connect_type(hdev, i);
+
+ if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ u16 mask = 1 << i;
+
+ if (!(port_removable & mask)) {
+ dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n",
+ i);
+ port_removable |= mask;
+ }
+ }
+ }
+
+ desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
+ }
+}
+
#ifdef CONFIG_ACPI
/**
* usb_get_hub_port_acpi_handle - Get the usb port's acpi handle
@@ -5206,7 +5397,7 @@ usb_get_hub_port_connect_type(struct usb_device *hdev, int port1)
acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
int port1)
{
- struct usb_hub *hub = hdev_to_hub(hdev);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
}
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
new file mode 100644
index 000000000000..80ab9ee07017
--- /dev/null
+++ b/drivers/usb/core/hub.h
@@ -0,0 +1,122 @@
+/*
+ * usb hub driver head file
+ *
+ * Copyright (C) 1999 Linus Torvalds
+ * Copyright (C) 1999 Johannes Erdfelt
+ * Copyright (C) 1999 Gregory P. Smith
+ * Copyright (C) 2001 Brad Hards (bhards@bigpond.net.au)
+ * Copyright (C) 2012 Intel Corp (tianyu.lan@intel.com)
+ *
+ * move struct usb_hub to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/hcd.h>
+#include "usb.h"
+
+struct usb_hub {
+ struct device *intfdev; /* the "interface" device */
+ struct usb_device *hdev;
+ struct kref kref;
+ struct urb *urb; /* for interrupt polling pipe */
+
+ /* buffer for urb ... with extra space in case of babble */
+ u8 (*buffer)[8];
+ union {
+ struct usb_hub_status hub;
+ struct usb_port_status port;
+ } *status; /* buffer for status reports */
+ struct mutex status_mutex; /* for the status buffer */
+
+ int error; /* last reported error */
+ int nerrors; /* track consecutive errors */
+
+ struct list_head event_list; /* hubs w/data or errs ready */
+ unsigned long event_bits[1]; /* status change bitmask */
+ unsigned long change_bits[1]; /* ports with logical connect
+ status change */
+ unsigned long busy_bits[1]; /* ports being reset or
+ resumed */
+ unsigned long removed_bits[1]; /* ports with a "removed"
+ device present */
+ unsigned long wakeup_bits[1]; /* ports that have signaled
+ remote wakeup */
+#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
+#error event_bits[] is too short!
+#endif
+
+ struct usb_hub_descriptor *descriptor; /* class descriptor */
+ struct usb_tt tt; /* Transaction Translator */
+
+ unsigned mA_per_port; /* current for each child */
+
+ unsigned limited_power:1;
+ unsigned quiescing:1;
+ unsigned disconnected:1;
+
+ unsigned quirk_check_port_auto_suspend:1;
+
+ unsigned has_indicators:1;
+ u8 indicator[USB_MAXCHILDREN];
+ struct delayed_work leds;
+ struct delayed_work init_work;
+ struct usb_port **ports;
+};
+
+/**
+ * struct usb port - kernel's representation of a usb port
+ * @child: usb device attatched to the port
+ * @dev: generic device interface
+ * @port_owner: port's owner
+ * @connect_type: port's connect type
+ * @portnum: port index num based one
+ * @power_is_on: port's power state
+ * @did_runtime_put: port has done pm_runtime_put().
+ */
+struct usb_port {
+ struct usb_device *child;
+ struct device dev;
+ struct dev_state *port_owner;
+ enum usb_port_connect_type connect_type;
+ u8 portnum;
+ unsigned power_is_on:1;
+ unsigned did_runtime_put:1;
+};
+
+#define to_usb_port(_dev) \
+ container_of(_dev, struct usb_port, dev)
+
+extern int usb_hub_create_port_device(struct usb_hub *hub,
+ int port1);
+extern void usb_hub_remove_port_device(struct usb_hub *hub,
+ int port1);
+extern int usb_hub_set_port_power(struct usb_device *hdev,
+ int port1, bool set);
+extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
+extern int hub_port_debounce(struct usb_hub *hub, int port1,
+ bool must_be_connected);
+extern int usb_clear_port_feature(struct usb_device *hdev,
+ int port1, int feature);
+
+static inline int hub_port_debounce_be_connected(struct usb_hub *hub,
+ int port1)
+{
+ return hub_port_debounce(hub, port1, true);
+}
+
+static inline int hub_port_debounce_be_stable(struct usb_hub *hub,
+ int port1)
+{
+ return hub_port_debounce(hub, port1, false);
+}
+
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 131f73649b60..444d30e3a78b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1751,7 +1751,7 @@ free_interfaces:
}
}
- i = dev->bus_mA - cp->desc.bMaxPower * 2;
+ i = dev->bus_mA - usb_get_max_power(dev, cp);
if (i < 0)
dev_warn(&dev->dev, "new config #%d exceeds power "
"limit by %dmA\n",
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
new file mode 100644
index 000000000000..797f9d514732
--- /dev/null
+++ b/drivers/usb/core/port.c
@@ -0,0 +1,202 @@
+/*
+ * usb port device code
+ *
+ * Copyright (C) 2012 Intel Corp
+ *
+ * Author: Lan Tianyu <tianyu.lan@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_qos.h>
+
+#include "hub.h"
+
+static const struct attribute_group *port_dev_group[];
+
+static ssize_t show_port_connect_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ char *result;
+
+ switch (port_dev->connect_type) {
+ case USB_PORT_CONNECT_TYPE_HOT_PLUG:
+ result = "hotplug";
+ break;
+ case USB_PORT_CONNECT_TYPE_HARD_WIRED:
+ result = "hardwired";
+ break;
+ case USB_PORT_NOT_USED:
+ result = "not used";
+ break;
+ default:
+ result = "unknown";
+ break;
+ }
+
+ return sprintf(buf, "%s\n", result);
+}
+static DEVICE_ATTR(connect_type, S_IRUGO, show_port_connect_type,
+ NULL);
+
+static struct attribute *port_dev_attrs[] = {
+ &dev_attr_connect_type.attr,
+ NULL,
+};
+
+static struct attribute_group port_dev_attr_grp = {
+ .attrs = port_dev_attrs,
+};
+
+static const struct attribute_group *port_dev_group[] = {
+ &port_dev_attr_grp,
+ NULL,
+};
+
+static void usb_port_device_release(struct device *dev)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ dev_pm_qos_hide_flags(dev);
+ kfree(port_dev);
+}
+
+#ifdef CONFIG_USB_SUSPEND
+static int usb_port_runtime_resume(struct device *dev)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_interface *intf = to_usb_interface(dev->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+ int port1 = port_dev->portnum;
+ int retval;
+
+ if (!hub)
+ return -EINVAL;
+
+ usb_autopm_get_interface(intf);
+ set_bit(port1, hub->busy_bits);
+
+ retval = usb_hub_set_port_power(hdev, port1, true);
+ if (port_dev->child && !retval) {
+ /*
+ * Wait for usb hub port to be reconnected in order to make
+ * the resume procedure successful.
+ */
+ retval = hub_port_debounce_be_connected(hub, port1);
+ if (retval < 0) {
+ dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
+ retval);
+ goto out;
+ }
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
+
+ /* Set return value to 0 if debounce successful */
+ retval = 0;
+ }
+
+out:
+ clear_bit(port1, hub->busy_bits);
+ usb_autopm_put_interface(intf);
+ return retval;
+}
+
+static int usb_port_runtime_suspend(struct device *dev)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_interface *intf = to_usb_interface(dev->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+ int port1 = port_dev->portnum;
+ int retval;
+
+ if (!hub)
+ return -EINVAL;
+
+ if (dev_pm_qos_flags(&port_dev->dev, PM_QOS_FLAG_NO_POWER_OFF)
+ == PM_QOS_FLAGS_ALL)
+ return -EAGAIN;
+
+ usb_autopm_get_interface(intf);
+ set_bit(port1, hub->busy_bits);
+ retval = usb_hub_set_port_power(hdev, port1, false);
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
+ clear_bit(port1, hub->busy_bits);
+ usb_autopm_put_interface(intf);
+ return retval;
+}
+#endif
+
+static const struct dev_pm_ops usb_port_pm_ops = {
+#ifdef CONFIG_USB_SUSPEND
+ .runtime_suspend = usb_port_runtime_suspend,
+ .runtime_resume = usb_port_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+};
+
+struct device_type usb_port_device_type = {
+ .name = "usb_port",
+ .release = usb_port_device_release,
+ .pm = &usb_port_pm_ops,
+};
+
+int usb_hub_create_port_device(struct usb_hub *hub, int port1)
+{
+ struct usb_port *port_dev = NULL;
+ int retval;
+
+ port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
+ if (!port_dev) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ hub->ports[port1 - 1] = port_dev;
+ port_dev->portnum = port1;
+ port_dev->power_is_on = true;
+ port_dev->dev.parent = hub->intfdev;
+ port_dev->dev.groups = port_dev_group;
+ port_dev->dev.type = &usb_port_device_type;
+ dev_set_name(&port_dev->dev, "port%d", port1);
+
+ retval = device_register(&port_dev->dev);
+ if (retval)
+ goto error_register;
+
+ pm_runtime_set_active(&port_dev->dev);
+
+ /* It would be dangerous if user space couldn't
+ * prevent usb device from being powered off. So don't
+ * enable port runtime pm if failed to expose port's pm qos.
+ */
+ if (!dev_pm_qos_expose_flags(&port_dev->dev,
+ PM_QOS_FLAG_NO_POWER_OFF))
+ pm_runtime_enable(&port_dev->dev);
+
+ device_enable_async_suspend(&port_dev->dev);
+ return 0;
+
+error_register:
+ put_device(&port_dev->dev);
+exit:
+ return retval;
+}
+
+void usb_hub_remove_port_device(struct usb_hub *hub,
+ int port1)
+{
+ device_unregister(&hub->ports[port1 - 1]->dev);
+}
+
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index fdefd9c7f7af..3113c1d71442 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 818e4a024d0d..3f81a3dc6867 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -17,7 +17,7 @@
#include "usb.h"
/* Active configuration fields */
-#define usb_actconfig_show(field, multiplier, format_string) \
+#define usb_actconfig_show(field, format_string) \
static ssize_t show_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
@@ -28,18 +28,31 @@ static ssize_t show_##field(struct device *dev, \
actconfig = udev->actconfig; \
if (actconfig) \
return sprintf(buf, format_string, \
- actconfig->desc.field * multiplier); \
+ actconfig->desc.field); \
else \
return 0; \
} \
-#define usb_actconfig_attr(field, multiplier, format_string) \
-usb_actconfig_show(field, multiplier, format_string) \
-static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+#define usb_actconfig_attr(field, format_string) \
+ usb_actconfig_show(field, format_string) \
+ static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+
+usb_actconfig_attr(bNumInterfaces, "%2d\n")
+usb_actconfig_attr(bmAttributes, "%2x\n")
-usb_actconfig_attr(bNumInterfaces, 1, "%2d\n")
-usb_actconfig_attr(bmAttributes, 1, "%2x\n")
-usb_actconfig_attr(bMaxPower, 2, "%3dmA\n")
+static ssize_t show_bMaxPower(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev;
+ struct usb_host_config *actconfig;
+
+ udev = to_usb_device(dev);
+ actconfig = udev->actconfig;
+ if (!actconfig)
+ return 0;
+ return sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
+}
+static DEVICE_ATTR(bMaxPower, S_IRUGO, show_bMaxPower, NULL);
static ssize_t show_configuration_string(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -56,7 +69,7 @@ static ssize_t show_configuration_string(struct device *dev,
static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL);
/* configuration value is always present, and r/w */
-usb_actconfig_show(bConfigurationValue, 1, "%u\n");
+usb_actconfig_show(bConfigurationValue, "%u\n");
static ssize_t
set_bConfigurationValue(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 1c528c1bf0be..a7f20bde0e5e 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,6 +1,7 @@
#include <linux/pm.h>
#include <linux/acpi.h>
+struct usb_hub_descriptor;
struct dev_state;
/* Functions local to drivers/usb/core/ */
@@ -38,6 +39,15 @@ extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
+static inline unsigned usb_get_max_power(struct usb_device *udev,
+ struct usb_host_config *c)
+{
+ /* SuperSpeed power is in 8 mA units; others are in 2 mA units */
+ unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
+
+ return c->desc.bMaxPower * mul;
+}
+
extern void usb_kick_khubd(struct usb_device *dev);
extern int usb_match_one_id_intf(struct usb_device *dev,
struct usb_host_interface *intf,
@@ -173,6 +183,8 @@ extern enum usb_port_connect_type
usb_get_hub_port_connect_type(struct usb_device *hdev, int port1);
extern void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
enum usb_port_connect_type type);
+extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
+ struct usb_hub_descriptor *desc);
#ifdef CONFIG_ACPI
extern int usb_acpi_register(void);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index f6a6e070c2ac..68e9a2c5a01a 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,6 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB && USB_GADGET)
+ depends on (USB || USB_GADGET) && GENERIC_HARDIRQS
select USB_OTG_UTILS
select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
help
@@ -12,6 +12,35 @@ config USB_DWC3
if USB_DWC3
+choice
+ bool "DWC3 Mode Selection"
+ default USB_DWC3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC3_HOST if (USB && !USB_GADGET)
+ default USB_DWC3_GADGET if (!USB && USB_GADGET)
+
+config USB_DWC3_HOST
+ bool "Host only mode"
+ depends on USB
+ help
+ Select this when you want to use DWC3 in host mode only,
+ thereby the gadget feature will be regressed.
+
+config USB_DWC3_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET
+ help
+ Select this when you want to use DWC3 in gadget mode only,
+ thereby the host feature will be regressed.
+
+config USB_DWC3_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on (USB && USB_GADGET)
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled.
+
+endchoice
+
config USB_DWC3_DEBUG
bool "Enable Debugging Messages"
help
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 4502648b8171..0c7ac92582be 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -4,8 +4,14 @@ ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o
-dwc3-y += host.o
-dwc3-y += gadget.o ep0.o
+
+ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ dwc3-y += host.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ dwc3-y += gadget.o ep0.o
+endif
ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 3a4004a620ad..999909451e37 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -420,18 +420,27 @@ static int dwc3_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (node) {
+ dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
+ dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+ } else {
+ dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+ }
+
if (IS_ERR_OR_NULL(dwc->usb2_phy)) {
dev_err(dev, "no usb2 phy configured\n");
return -EPROBE_DEFER;
}
- dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
if (IS_ERR_OR_NULL(dwc->usb3_phy)) {
dev_err(dev, "no usb3 phy configured\n");
return -EPROBE_DEFER;
}
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+
spin_lock_init(&dwc->lock);
platform_set_drvdata(pdev, dwc);
@@ -450,8 +459,7 @@ static int dwc3_probe(struct platform_device *pdev)
else
dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
- if (of_get_property(node, "tx-fifo-resize", NULL))
- dwc->needs_fifo_resize = true;
+ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -550,9 +558,9 @@ err0:
static int dwc3_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -580,11 +588,22 @@ static int dwc3_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id of_dwc3_match[] = {
+ {
+ .compatible = "synopsys,dwc3"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_match);
+#endif
+
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
.driver = {
.name = "dwc3",
+ .of_match_table = of_match_ptr(of_dwc3_match),
},
};
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 499956344262..b41750660235 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -55,7 +55,9 @@
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
-#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE
+#define DWC3_EVENT_SIZE 4 /* bytes */
+#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
+#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
#define DWC3_EVENT_TYPE_MASK 0xfe
#define DWC3_EVENT_TYPE_DEV 0
@@ -405,7 +407,6 @@ struct dwc3_event_buffer {
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
- * @current_uf: Current uf received through last event parameter
* @interval: the intervall on which the ISOC transfer is started
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
@@ -439,7 +440,6 @@ struct dwc3_ep {
u8 number;
u8 type;
u8 resource_index;
- u16 current_uf;
u32 interval;
char name[20];
@@ -581,6 +581,7 @@ struct dwc3_request {
struct usb_request request;
struct list_head list;
struct dwc3_ep *dep;
+ u32 start_slot;
u8 epnum;
struct dwc3_trb *trb;
@@ -721,6 +722,7 @@ struct dwc3 {
struct dwc3_hwparams hwparams;
struct dentry *root;
+ struct debugfs_regset32 *regset;
u8 test_mode;
u8 test_mode_nr;
@@ -862,10 +864,24 @@ union dwc3_event {
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
-
+#else
+static inline int dwc3_host_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_host_exit(struct dwc3 *dwc)
+{ }
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_gadget_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_gadget_exit(struct dwc3 *dwc)
+{ }
+#endif
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 92604b4f9712..4a752e730c5f 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -56,10 +56,10 @@
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
- .offset = DWC3_ ##nm, \
+ .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
}
-static const struct debugfs_reg32 dwc3_regs[] = {
+static struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
dump_register(GTXTHRCFG),
@@ -376,27 +376,6 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(OSTS),
};
-static int dwc3_regdump_show(struct seq_file *s, void *unused)
-{
- struct dwc3 *dwc = s->private;
-
- seq_printf(s, "DesignWare USB3 Core Register Dump\n");
- debugfs_print_regs32(s, dwc3_regs, ARRAY_SIZE(dwc3_regs),
- dwc->regs, "");
- return 0;
-}
-
-static int dwc3_regdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dwc3_regdump_show, inode->i_private);
-}
-
-static const struct file_operations dwc3_regdump_fops = {
- .open = dwc3_regdump_open,
- .read = seq_read,
- .release = single_release,
-};
-
static int dwc3_mode_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
@@ -666,13 +645,23 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc->root = root;
- file = debugfs_create_file("regdump", S_IRUGO, root, dwc,
- &dwc3_regdump_fops);
+ dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
+ if (!dwc->regset) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ dwc->regset->regs = dwc3_regs;
+ dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
+ dwc->regset->base = dwc->regs;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file) {
ret = -ENOMEM;
goto err1;
}
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET)
file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_mode_fops);
if (!file) {
@@ -693,6 +682,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
ret = -ENOMEM;
goto err1;
}
+#endif
return 0;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index aae5328ac771..b50da53e9a52 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -42,7 +42,7 @@ static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
memset(&pdata, 0x00, sizeof(pdata));
- pdev = platform_device_alloc("nop_usb_xceiv", 0);
+ pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
@@ -53,7 +53,7 @@ static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
if (ret)
goto err1;
- pdev = platform_device_alloc("nop_usb_xceiv", 1);
+ pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev) {
ret = -ENOMEM;
goto err1;
@@ -95,13 +95,14 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
struct platform_device *dwc3;
struct dwc3_exynos *exynos;
struct clk *clk;
+ struct device *dev = &pdev->dev;
int ret = -ENOMEM;
- exynos = kzalloc(sizeof(*exynos), GFP_KERNEL);
+ exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos) {
- dev_err(&pdev->dev, "not enough memory\n");
- goto err0;
+ dev_err(dev, "not enough memory\n");
+ return -ENOMEM;
}
/*
@@ -116,30 +117,30 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
ret = dwc3_exynos_register_phys(exynos);
if (ret) {
- dev_err(&pdev->dev, "couldn't register PHYs\n");
- goto err1;
+ dev_err(dev, "couldn't register PHYs\n");
+ return ret;
}
dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
if (!dwc3) {
- dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
- goto err1;
+ dev_err(dev, "couldn't allocate dwc3 device\n");
+ return -ENOMEM;
}
- clk = clk_get(&pdev->dev, "usbdrd30");
+ clk = devm_clk_get(dev, "usbdrd30");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "couldn't get clock\n");
+ dev_err(dev, "couldn't get clock\n");
ret = -EINVAL;
- goto err3;
+ goto err1;
}
- dma_set_coherent_mask(&dwc3->dev, pdev->dev.coherent_dma_mask);
+ dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask);
- dwc3->dev.parent = &pdev->dev;
- dwc3->dev.dma_mask = pdev->dev.dma_mask;
- dwc3->dev.dma_parms = pdev->dev.dma_parms;
+ dwc3->dev.parent = dev;
+ dwc3->dev.dma_mask = dev->dma_mask;
+ dwc3->dev.dma_parms = dev->dma_parms;
exynos->dwc3 = dwc3;
- exynos->dev = &pdev->dev;
+ exynos->dev = dev;
exynos->clk = clk;
clk_enable(exynos->clk);
@@ -147,26 +148,23 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
ret = platform_device_add_resources(dwc3, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
- goto err4;
+ dev_err(dev, "couldn't add resources to dwc3 device\n");
+ goto err2;
}
ret = platform_device_add(dwc3);
if (ret) {
- dev_err(&pdev->dev, "failed to register dwc3 device\n");
- goto err4;
+ dev_err(dev, "failed to register dwc3 device\n");
+ goto err2;
}
return 0;
-err4:
+err2:
clk_disable(clk);
- clk_put(clk);
-err3:
- platform_device_put(dwc3);
err1:
- kfree(exynos);
-err0:
+ platform_device_put(dwc3);
+
return ret;
}
@@ -179,16 +177,13 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
platform_device_unregister(exynos->usb3_phy);
clk_disable(exynos->clk);
- clk_put(exynos->clk);
-
- kfree(exynos);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id exynos_dwc3_match[] = {
- { .compatible = "samsung,exynos-dwc3" },
+ { .compatible = "samsung,exynos5250-dwusb3" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index f31867fd2574..22f337f57219 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -43,10 +43,13 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dwc3-omap.h>
+#include <linux/usb/dwc3-omap.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/usb/otg.h>
#include <linux/usb/nop-usb-xceiv.h>
@@ -78,23 +81,6 @@
/* SYSCONFIG REGISTER */
#define USBOTGSS_SYSCONFIG_DMADISABLE (1 << 16)
-#define USBOTGSS_SYSCONFIG_STANDBYMODE(x) ((x) << 4)
-
-#define USBOTGSS_STANDBYMODE_FORCE_STANDBY 0
-#define USBOTGSS_STANDBYMODE_NO_STANDBY 1
-#define USBOTGSS_STANDBYMODE_SMART_STANDBY 2
-#define USBOTGSS_STANDBYMODE_SMART_WAKEUP 3
-
-#define USBOTGSS_STANDBYMODE_MASK (0x03 << 4)
-
-#define USBOTGSS_SYSCONFIG_IDLEMODE(x) ((x) << 2)
-
-#define USBOTGSS_IDLEMODE_FORCE_IDLE 0
-#define USBOTGSS_IDLEMODE_NO_IDLE 1
-#define USBOTGSS_IDLEMODE_SMART_IDLE 2
-#define USBOTGSS_IDLEMODE_SMART_WAKEUP 3
-
-#define USBOTGSS_IDLEMODE_MASK (0x03 << 2)
/* IRQ_EOI REGISTER */
#define USBOTGSS_IRQ_EOI_LINE_NUMBER (1 << 0)
@@ -133,7 +119,6 @@ struct dwc3_omap {
/* device lock */
spinlock_t lock;
- struct platform_device *dwc3;
struct platform_device *usb2_phy;
struct platform_device *usb3_phy;
struct device *dev;
@@ -147,6 +132,8 @@ struct dwc3_omap {
u32 dma_status:1;
};
+struct dwc3_omap *_omap;
+
static inline u32 dwc3_omap_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
@@ -157,6 +144,57 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
writel(value, base + offset);
}
+void dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
+{
+ u32 val;
+ struct dwc3_omap *omap = _omap;
+
+ switch (status) {
+ case OMAP_DWC3_ID_GROUND:
+ dev_dbg(omap->dev, "ID GND\n");
+
+ val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+ val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
+ | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_SESSEND);
+ val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
+ dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+ break;
+
+ case OMAP_DWC3_VBUS_VALID:
+ dev_dbg(omap->dev, "VBUS Connect\n");
+
+ val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+ val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND;
+ val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG
+ | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_SESSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
+ dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+ break;
+
+ case OMAP_DWC3_ID_FLOAT:
+ case OMAP_DWC3_VBUS_OFF:
+ dev_dbg(omap->dev, "VBUS Disconnect\n");
+
+ val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+ val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
+ | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT);
+ val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND
+ | USBOTGSS_UTMI_OTG_STATUS_IDDIG;
+ dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+ break;
+
+ default:
+ dev_dbg(omap->dev, "ID float\n");
+ }
+
+ return;
+}
+EXPORT_SYMBOL_GPL(dwc3_omap_mailbox);
+
static int dwc3_omap_register_phys(struct dwc3_omap *omap)
{
struct nop_usb_xceiv_platform_data pdata;
@@ -165,7 +203,7 @@ static int dwc3_omap_register_phys(struct dwc3_omap *omap)
memset(&pdata, 0x00, sizeof(pdata));
- pdev = platform_device_alloc("nop_usb_xceiv", 0);
+ pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
@@ -176,7 +214,7 @@ static int dwc3_omap_register_phys(struct dwc3_omap *omap)
if (ret)
goto err1;
- pdev = platform_device_alloc("nop_usb_xceiv", 1);
+ pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev) {
ret = -ENOMEM;
goto err1;
@@ -262,12 +300,20 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
return IRQ_HANDLED;
}
+static int dwc3_omap_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
static int dwc3_omap_probe(struct platform_device *pdev)
{
struct dwc3_omap_data *pdata = pdev->dev.platform_data;
struct device_node *node = pdev->dev.of_node;
- struct platform_device *dwc3;
struct dwc3_omap *omap;
struct resource *res;
struct device *dev = &pdev->dev;
@@ -314,30 +360,32 @@ static int dwc3_omap_probe(struct platform_device *pdev)
return ret;
}
- dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!dwc3) {
- dev_err(dev, "couldn't allocate dwc3 device\n");
- return -ENOMEM;
- }
-
context = devm_kzalloc(dev, resource_size(res), GFP_KERNEL);
if (!context) {
dev_err(dev, "couldn't allocate dwc3 context memory\n");
- goto err2;
+ return -ENOMEM;
}
spin_lock_init(&omap->lock);
- dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask);
- dwc3->dev.parent = dev;
- dwc3->dev.dma_mask = dev->dma_mask;
- dwc3->dev.dma_parms = dev->dma_parms;
omap->resource_size = resource_size(res);
omap->context = context;
omap->dev = dev;
omap->irq = irq;
omap->base = base;
- omap->dwc3 = dwc3;
+
+ /*
+ * REVISIT if we ever have two instances of the wrapper, we will be
+ * in big trouble
+ */
+ _omap = omap;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "get_sync failed with err %d\n", ret);
+ return ret;
+ }
reg = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
@@ -368,21 +416,12 @@ static int dwc3_omap_probe(struct platform_device *pdev)
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
- /* Set No-Idle and No-Standby */
- reg &= ~(USBOTGSS_STANDBYMODE_MASK
- | USBOTGSS_IDLEMODE_MASK);
-
- reg |= (USBOTGSS_SYSCONFIG_STANDBYMODE(USBOTGSS_STANDBYMODE_NO_STANDBY)
- | USBOTGSS_SYSCONFIG_IDLEMODE(USBOTGSS_IDLEMODE_NO_IDLE));
-
- dwc3_omap_writel(omap->base, USBOTGSS_SYSCONFIG, reg);
-
ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
"dwc3-omap", omap);
if (ret) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
- goto err2;
+ return ret;
}
/* enable all IRQs */
@@ -401,33 +440,28 @@ static int dwc3_omap_probe(struct platform_device *pdev)
dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
- ret = platform_device_add_resources(dwc3, pdev->resource,
- pdev->num_resources);
- if (ret) {
- dev_err(dev, "couldn't add resources to dwc3 device\n");
- goto err2;
- }
-
- ret = platform_device_add(dwc3);
- if (ret) {
- dev_err(dev, "failed to register dwc3 device\n");
- goto err2;
+ if (node) {
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add create dwc3 core\n");
+ return ret;
+ }
}
return 0;
-
-err2:
- platform_device_put(dwc3);
- return ret;
}
static int dwc3_omap_remove(struct platform_device *pdev)
{
struct dwc3_omap *omap = platform_get_drvdata(pdev);
- platform_device_unregister(omap->dwc3);
platform_device_unregister(omap->usb2_phy);
platform_device_unregister(omap->usb3_phy);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
+
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2e43b332aae8..a04342f6cbfa 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -241,21 +241,23 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
+ int i;
if (req->queued) {
- if (req->request.num_mapped_sgs)
- dep->busy_slot += req->request.num_mapped_sgs;
- else
+ i = 0;
+ do {
dep->busy_slot++;
-
- /*
- * Skip LINK TRB. We can't use req->trb and check for
- * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
- * completed (not the LINK TRB).
- */
- if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ /*
+ * Skip LINK TRB. We can't use req->trb and check for
+ * DWC3_TRBCTL_LINK_TRB because it points the TRB we
+ * just completed (not the LINK TRB).
+ */
+ if (((dep->busy_slot & DWC3_TRB_MASK) ==
+ DWC3_TRB_NUM- 1) &&
usb_endpoint_xfer_isoc(dep->endpoint.desc))
- dep->busy_slot++;
+ dep->busy_slot++;
+ } while(++i < req->request.num_mapped_sgs);
+ req->queued = false;
}
list_del(&req->list);
req->trb = NULL;
@@ -749,33 +751,32 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, dma_addr_t dma,
- unsigned length, unsigned last, unsigned chain)
+ unsigned length, unsigned last, unsigned chain, unsigned node)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
- unsigned int cur_slot;
-
dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
dep->name, req, (unsigned long long) dma,
length, last ? " last" : "",
chain ? " chain" : "");
- trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
- cur_slot = dep->free_slot;
- dep->free_slot++;
-
/* Skip the LINK-TRB on ISOC */
- if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
usb_endpoint_xfer_isoc(dep->endpoint.desc))
- return;
+ dep->free_slot++;
+
+ trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
if (!req->trb) {
dwc3_gadget_move_request_queued(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ req->start_slot = dep->free_slot & DWC3_TRB_MASK;
}
+ dep->free_slot++;
+
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
@@ -786,9 +787,12 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
break;
case USB_ENDPOINT_XFER_ISOC:
- trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ if (!node)
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ else
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
- if (!req->request.no_interrupt)
+ if (!req->request.no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_IOC;
break;
@@ -807,14 +811,13 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
trb->ctrl |= DWC3_TRB_CTRL_CSP;
- } else {
- if (chain)
- trb->ctrl |= DWC3_TRB_CTRL_CHN;
-
- if (last)
- trb->ctrl |= DWC3_TRB_CTRL_LST;
+ } else if (last) {
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
}
+ if (chain)
+ trb->ctrl |= DWC3_TRB_CTRL_CHN;
+
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
@@ -885,6 +888,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
list_for_each_entry_safe(req, n, &dep->request_list, list) {
unsigned length;
dma_addr_t dma;
+ last_one = false;
if (req->request.num_mapped_sgs > 0) {
struct usb_request *request = &req->request;
@@ -900,7 +904,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (i == (request->num_mapped_sgs - 1) ||
sg_is_last(s)) {
- last_one = true;
+ if (list_is_last(&req->list,
+ &dep->request_list))
+ last_one = true;
chain = false;
}
@@ -912,7 +918,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
chain = false;
dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, chain);
+ last_one, chain, i);
if (last_one)
break;
@@ -930,7 +936,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
last_one = 1;
dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, false);
+ last_one, false, 0);
if (last_one)
break;
@@ -977,13 +983,14 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
}
memset(&params, 0, sizeof(params));
- params.param0 = upper_32_bits(req->trb_dma);
- params.param1 = lower_32_bits(req->trb_dma);
- if (start_new)
+ if (start_new) {
+ params.param0 = upper_32_bits(req->trb_dma);
+ params.param1 = lower_32_bits(req->trb_dma);
cmd = DWC3_DEPCMD_STARTTRANSFER;
- else
+ } else {
cmd = DWC3_DEPCMD_UPDATETRANSFER;
+ }
cmd |= DWC3_DEPCMD_PARAM(cmd_param);
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
@@ -1082,8 +1089,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
*
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- int ret;
-
/*
* If xfernotready is already elapsed and it is a case
* of isoc transfer, then issue END TRANSFER, so that
@@ -1091,7 +1096,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* notion of current microframe.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ if (list_empty(&dep->req_queued)) {
+ dwc3_stop_active_transfer(dwc, dep->number);
+ dep->flags = DWC3_EP_ENABLED;
+ }
return 0;
}
@@ -1099,6 +1107,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (ret && ret != -EBUSY)
dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
dep->name);
+ return ret;
}
/*
@@ -1115,16 +1124,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (ret && ret != -EBUSY)
dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
dep->name);
- }
-
- /*
- * 3. Missed ISOC Handling. We need to start isoc transfer on the saved
- * uframe number.
- */
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- (dep->flags & DWC3_EP_MISSED_ISOC)) {
- __dwc3_gadget_start_isoc(dwc, dep, dep->current_uf);
- dep->flags &= ~DWC3_EP_MISSED_ISOC;
+ return ret;
}
return 0;
@@ -1605,6 +1605,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
if (epnum == 0 || epnum == 1) {
dep->endpoint.maxpacket = 512;
+ dep->endpoint.maxburst = 1;
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!epnum)
dwc->gadget.ep0 = &dep->endpoint;
@@ -1651,76 +1652,134 @@ static void dwc3_gadget_release(struct device *dev)
}
/* -------------------------------------------------------------------------- */
-static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ struct dwc3_request *req, struct dwc3_trb *trb,
const struct dwc3_event_depevt *event, int status)
{
- struct dwc3_request *req;
- struct dwc3_trb *trb;
unsigned int count;
unsigned int s_pkt = 0;
unsigned int trb_status;
+ if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ /*
+ * We continue despite the error. There is not much we
+ * can do. If we don't clean it up we loop forever. If
+ * we skip the TRB then it gets overwritten after a
+ * while since we use them in a ring buffer. A BUG()
+ * would help. Lets hope that if this occurs, someone
+ * fixes the root cause instead of looking away :)
+ */
+ dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
+ dep->name, trb);
+ count = trb->size & DWC3_TRB_SIZE_MASK;
+
+ if (dep->direction) {
+ if (count) {
+ trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
+ if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
+ dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
+ dep->name);
+ /*
+ * If missed isoc occurred and there is
+ * no request queued then issue END
+ * TRANSFER, so that core generates
+ * next xfernotready and we will issue
+ * a fresh START TRANSFER.
+ * If there are still queued request
+ * then wait, do not issue either END
+ * or UPDATE TRANSFER, just attach next
+ * request in request_list during
+ * giveback.If any future queued request
+ * is successfully transferred then we
+ * will issue UPDATE TRANSFER for all
+ * request in the request_list.
+ */
+ dep->flags |= DWC3_EP_MISSED_ISOC;
+ } else {
+ dev_err(dwc->dev, "incomplete IN transfer %s\n",
+ dep->name);
+ status = -ECONNRESET;
+ }
+ } else {
+ dep->flags &= ~DWC3_EP_MISSED_ISOC;
+ }
+ } else {
+ if (count && (event->status & DEPEVT_STATUS_SHORT))
+ s_pkt = 1;
+ }
+
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += req->request.length - count;
+ if (s_pkt)
+ return 1;
+ if ((event->status & DEPEVT_STATUS_LST) &&
+ (trb->ctrl & (DWC3_TRB_CTRL_LST |
+ DWC3_TRB_CTRL_HWO)))
+ return 1;
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 1;
+ return 0;
+}
+
+static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
+{
+ struct dwc3_request *req;
+ struct dwc3_trb *trb;
+ unsigned int slot;
+ unsigned int i;
+ int ret;
+
do {
req = next_request(&dep->req_queued);
if (!req) {
WARN_ON_ONCE(1);
return 1;
}
+ i = 0;
+ do {
+ slot = req->start_slot + i;
+ if ((slot == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ slot++;
+ slot %= DWC3_TRB_NUM;
+ trb = &dep->trb_pool[slot];
- trb = req->trb;
+ ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+ event, status);
+ if (ret)
+ break;
+ }while (++i < req->request.num_mapped_sgs);
- if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ dwc3_gadget_giveback(dep, req, status);
+
+ if (ret)
+ break;
+ } while (1);
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ list_empty(&dep->req_queued)) {
+ if (list_empty(&dep->request_list)) {
/*
- * We continue despite the error. There is not much we
- * can do. If we don't clean it up we loop forever. If
- * we skip the TRB then it gets overwritten after a
- * while since we use them in a ring buffer. A BUG()
- * would help. Lets hope that if this occurs, someone
- * fixes the root cause instead of looking away :)
+ * If there is no entry in request list then do
+ * not issue END TRANSFER now. Just set PENDING
+ * flag, so that END TRANSFER is issued when an
+ * entry is added into request list.
*/
- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
- dep->name, req->trb);
- count = trb->size & DWC3_TRB_SIZE_MASK;
-
- if (dep->direction) {
- if (count) {
- trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
- if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
- dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
- dep->name);
- dep->current_uf = event->parameters &
- ~(dep->interval - 1);
- dep->flags |= DWC3_EP_MISSED_ISOC;
- } else {
- dev_err(dwc->dev, "incomplete IN transfer %s\n",
- dep->name);
- status = -ECONNRESET;
- }
- }
+ dep->flags = DWC3_EP_PENDING_REQUEST;
} else {
- if (count && (event->status & DEPEVT_STATUS_SHORT))
- s_pkt = 1;
+ dwc3_stop_active_transfer(dwc, dep->number);
+ dep->flags = DWC3_EP_ENABLED;
}
-
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- req->request.actual += req->request.length - count;
- dwc3_gadget_giveback(dep, req, status);
- if (s_pkt)
- break;
- if ((event->status & DEPEVT_STATUS_LST) &&
- (trb->ctrl & (DWC3_TRB_CTRL_LST |
- DWC3_TRB_CTRL_HWO)))
- break;
- if ((event->status & DEPEVT_STATUS_IOC) &&
- (trb->ctrl & DWC3_TRB_CTRL_IOC))
- break;
- } while (1);
+ return 1;
+ }
if ((event->status & DEPEVT_STATUS_IOC) &&
(trb->ctrl & DWC3_TRB_CTRL_IOC))
@@ -2156,6 +2215,26 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
break;
}
+ /* Enable USB2 LPM Capability */
+
+ if ((dwc->revision > DWC3_REVISION_194A)
+ && (speed != DWC3_DCFG_SUPERSPEED)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg |= DWC3_DCFG_LPM_CAP;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
+
+ /*
+ * TODO: This should be configurable. For now using
+ * maximum allowed HIRD threshold value of 0b1100
+ */
+ reg |= DWC3_DCTL_HIRD_THRES(12);
+
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
/* Recent versions support automatic phy suspend and don't need this */
if (dwc->revision < DWC3_REVISION_194A) {
/* Suspend unneeded PHY */
@@ -2462,20 +2541,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
DWC3_DEVTEN_DISCONNEVTEN);
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
- /* Enable USB2 LPM and automatic phy suspend only on recent versions */
+ /* automatic phy suspend only on recent versions */
if (dwc->revision >= DWC3_REVISION_194A) {
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- reg |= DWC3_DCFG_LPM_CAP;
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
-
- /* TODO: This should be configurable */
- reg |= DWC3_DCTL_HIRD_THRES(28);
-
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
dwc3_gadget_usb2_phy_suspend(dwc, false);
dwc3_gadget_usb3_phy_suspend(dwc, false);
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 56a62342884d..0fa1846eda4c 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -44,7 +44,7 @@ int dwc3_host_init(struct dwc3 *dwc)
struct platform_device *xhci;
int ret;
- xhci = platform_device_alloc("xhci-hcd", -1);
+ xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 14625fd2cecd..5a0c541daf89 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -281,6 +281,7 @@ config USB_S3C_HSOTG
config USB_IMX
tristate "Freescale i.MX1 USB Peripheral Controller"
depends on ARCH_MXC
+ depends on BROKEN
help
Freescale's i.MX1 includes an integrated full speed
USB 1.1 device controller.
@@ -319,6 +320,7 @@ config USB_S3C_HSUDC
config USB_MV_UDC
tristate "Marvell USB2.0 Device Controller"
+ depends on GENERIC_HARDIRQS
help
Marvell Socs (including PXA and MMP series) include a high speed
USB2.0 OTG controller, which can be configured as high speed or
@@ -440,7 +442,7 @@ config USB_GOKU
config USB_EG20T
tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This is a USB device driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
@@ -500,6 +502,15 @@ config USB_LIBCOMPOSITE
tristate
depends on USB_GADGET
+config USB_F_ACM
+ tristate
+
+config USB_F_SS_LB
+ tristate
+
+config USB_U_SERIAL
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -524,6 +535,7 @@ choice
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
+ select USB_F_SS_LB
help
Gadget Zero is a two-configuration device. It either sinks and
sources bulk data; or it loops back a configurable number of
@@ -750,6 +762,9 @@ config USB_GADGET_TARGET
config USB_G_SERIAL
tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
+ depends on TTY
+ select USB_U_SERIAL
+ select USB_F_ACM
select USB_LIBCOMPOSITE
help
The Serial Gadget talks to the Linux-USB generic serial driver.
@@ -799,10 +814,14 @@ config USB_G_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+if TTY
+
config USB_CDC_COMPOSITE
tristate "CDC Composite Device (Ethernet and ACM)"
depends on NET
select USB_LIBCOMPOSITE
+ select USB_U_SERIAL
+ select USB_F_ACM
help
This driver provides two functions in one configuration:
a CDC Ethernet (ECM) link, and a CDC ACM (serial port) link.
@@ -818,6 +837,7 @@ config USB_G_NOKIA
tristate "Nokia composite gadget"
depends on PHONET
select USB_LIBCOMPOSITE
+ select USB_U_SERIAL
help
The Nokia composite gadget provides support for acm, obex
and phonet in only one composite gadget driver.
@@ -829,6 +849,8 @@ config USB_G_ACM_MS
tristate "CDC Composite Device (ACM and mass storage)"
depends on BLOCK
select USB_LIBCOMPOSITE
+ select USB_U_SERIAL
+ select USB_F_ACM
help
This driver provides two functions in one configuration:
a mass storage, and a CDC ACM (serial port) link.
@@ -841,6 +863,8 @@ config USB_G_MULTI
depends on BLOCK && NET
select USB_G_MULTI_CDC if !USB_G_MULTI_RNDIS
select USB_LIBCOMPOSITE
+ select USB_U_SERIAL
+ select USB_F_ACM
help
The Multifunction Composite Gadget provides Ethernet (RNDIS
and/or CDC Ethernet), mass storage and ACM serial link
@@ -879,6 +903,8 @@ config USB_G_MULTI_CDC
If unsure, say "y".
+endif # TTY
+
config USB_G_HID
tristate "HID Gadget"
select USB_LIBCOMPOSITE
@@ -895,6 +921,7 @@ config USB_G_HID
# Standalone / single function gadgets
config USB_G_DBGP
tristate "EHCI Debug Device Gadget"
+ depends on TTY
select USB_LIBCOMPOSITE
help
This gadget emulates an EHCI Debug device. This is useful when you want
@@ -916,6 +943,7 @@ config USB_G_DBGP_PRINTK
config USB_G_DBGP_SERIAL
depends on USB_G_DBGP
+ select USB_U_SERIAL
bool "serial"
help
Userland can interact using /dev/ttyGSxxx.
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 8b4acfd92aa3..97a13c349cc5 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -6,7 +6,7 @@ ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o
libcomposite-y := usbstring.o config.o epautoconf.o
-libcomposite-y += composite.o
+libcomposite-y += composite.o functions.o
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
obj-$(CONFIG_USB_NET2272) += net2272.o
obj-$(CONFIG_USB_NET2280) += net2280.o
@@ -74,3 +74,9 @@ obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o
obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o
+
+# USB Functions
+obj-$(CONFIG_USB_F_ACM) += f_acm.o
+f_ss_lb-y := f_loopback.o f_sourcesink.o
+obj-$(CONFIG_USB_F_SS_LB) += f_ss_lb.o
+obj-$(CONFIG_USB_U_SERIAL) += u_serial.o
diff --git a/drivers/usb/gadget/acm_ms.c b/drivers/usb/gadget/acm_ms.c
index 5a7f289805ff..8f2b0e391534 100644
--- a/drivers/usb/gadget/acm_ms.c
+++ b/drivers/usb/gadget/acm_ms.c
@@ -40,9 +40,6 @@
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
-
-#include "u_serial.c"
-#include "f_acm.c"
#include "f_mass_storage.c"
/*-------------------------------------------------------------------------*/
@@ -112,12 +109,15 @@ FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
static struct fsg_common fsg_common;
/*-------------------------------------------------------------------------*/
-
+static unsigned char tty_line;
+static struct usb_function *f_acm;
+static struct usb_function_instance *f_acm_inst;
/*
* We _always_ have both ACM and mass storage functions.
*/
static int __init acm_ms_do_config(struct usb_configuration *c)
{
+ struct f_serial_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -125,16 +125,35 @@ static int __init acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
+ f_acm_inst = usb_get_function_instance("acm");
+ if (IS_ERR(f_acm_inst))
+ return PTR_ERR(f_acm_inst);
+
+ opts = container_of(f_acm_inst, struct f_serial_opts, func_inst);
+ opts->port_num = tty_line;
+
+ f_acm = usb_get_function(f_acm_inst);
+ if (IS_ERR(f_acm)) {
+ status = PTR_ERR(f_acm);
+ goto err_func;
+ }
- status = acm_bind_config(c, 0);
+ status = usb_add_function(c, f_acm);
if (status < 0)
- return status;
+ goto err_conf;
status = fsg_bind_config(c->cdev, c, &fsg_common);
if (status < 0)
- return status;
+ goto err_fsg;
return 0;
+err_fsg:
+ usb_remove_function(c, f_acm);
+err_conf:
+ usb_put_function(f_acm);
+err_func:
+ usb_put_function_instance(f_acm_inst);
+ return status;
}
static struct usb_configuration acm_ms_config_driver = {
@@ -153,7 +172,7 @@ static int __init acm_ms_bind(struct usb_composite_dev *cdev)
void *retp;
/* set up serial link layer */
- status = gserial_setup(cdev->gadget, 1);
+ status = gserial_alloc_line(&tty_line);
if (status < 0)
return status;
@@ -189,14 +208,15 @@ static int __init acm_ms_bind(struct usb_composite_dev *cdev)
fail1:
fsg_common_put(&fsg_common);
fail0:
- gserial_cleanup();
+ gserial_free_line(tty_line);
return status;
}
static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
{
- gserial_cleanup();
-
+ usb_put_function(f_acm);
+ usb_put_function_instance(f_acm_inst);
+ gserial_free_line(tty_line);
return 0;
}
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index fc0ec5e0d58e..75973f33a4c8 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1400,15 +1400,16 @@ static int udc_wakeup(struct usb_gadget *gadget)
return 0;
}
-static int amd5536_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int amd5536_stop(struct usb_gadget_driver *driver);
+static int amd5536_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int amd5536_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
/* gadget operations */
static const struct usb_gadget_ops udc_ops = {
.wakeup = udc_wakeup,
.get_frame = udc_get_frame,
- .start = amd5536_start,
- .stop = amd5536_stop,
+ .udc_start = amd5536_udc_start,
+ .udc_stop = amd5536_udc_stop,
};
/* Setups endpoint parameters, adds endpoints to linked list */
@@ -1913,41 +1914,22 @@ static int setup_ep0(struct udc *dev)
}
/* Called by gadget driver to register itself */
-static int amd5536_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int amd5536_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct udc *dev = udc;
- int retval;
+ struct udc *dev = to_amd5536_udc(g);
u32 tmp;
- if (!driver || !bind || !driver->setup
- || driver->max_speed < USB_SPEED_HIGH)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
-
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
- retval = bind(&dev->gadget, driver);
-
/* Some gadget drivers use both ep0 directions.
* NOTE: to gadget driver, ep0 is just one endpoint...
*/
dev->ep[UDC_EP0OUT_IX].ep.driver_data =
dev->ep[UDC_EP0IN_IX].ep.driver_data;
- if (retval) {
- DBG(dev, "binding to %s returning %d\n",
- driver->driver.name, retval);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
-
/* get ready for ep0 traffic */
setup_ep0(dev);
@@ -1969,14 +1951,9 @@ __acquires(dev->lock)
{
int tmp;
- if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
- spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
-
/* empty queues and init hardware */
udc_basic_init(dev);
+
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
@@ -1984,23 +1961,18 @@ __acquires(dev->lock)
}
/* Called by gadget driver to unregister itself */
-static int amd5536_stop(struct usb_gadget_driver *driver)
+static int amd5536_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct udc *dev = udc;
- unsigned long flags;
+ struct udc *dev = to_amd5536_udc(g);
+ unsigned long flags;
u32 tmp;
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver || !driver->unbind)
- return -EINVAL;
-
spin_lock_irqsave(&dev->lock, flags);
udc_mask_unused_interrupts(dev);
shutdown(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -2009,9 +1981,6 @@ static int amd5536_stop(struct usb_gadget_driver *driver)
tmp |= AMD_BIT(UDC_DEVCTL_SD);
writel(tmp, &dev->regs->ctl);
-
- DBG(dev, "%s: unregistered\n", driver->driver.name);
-
return 0;
}
@@ -3231,7 +3200,7 @@ static int udc_pci_probe(
}
if (!pdev->irq) {
- dev_err(&dev->pdev->dev, "irq not set\n");
+ dev_err(&pdev->dev, "irq not set\n");
kfree(dev);
dev = NULL;
retval = -ENODEV;
@@ -3250,7 +3219,7 @@ static int udc_pci_probe(
dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
- dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+ dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
kfree(dev);
dev = NULL;
retval = -EBUSY;
diff --git a/drivers/usb/gadget/amd5536udc.h b/drivers/usb/gadget/amd5536udc.h
index 14af87d65caa..f1bf32e6b8d8 100644
--- a/drivers/usb/gadget/amd5536udc.h
+++ b/drivers/usb/gadget/amd5536udc.h
@@ -563,6 +563,8 @@ struct udc {
u16 cur_alt;
};
+#define to_amd5536_udc(g) (container_of((g), struct udc, gadget))
+
/* setup request data */
union udc_setup_data {
u32 data[2];
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index f4a21f6f081f..45dd2929a671 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1621,8 +1621,7 @@ static void at91_vbus_timer(unsigned long data)
* bus such as i2c or spi which may sleep, so schedule some work
* to read the vbus gpio
*/
- if (!work_pending(&udc->vbus_timer_work))
- schedule_work(&udc->vbus_timer_work);
+ schedule_work(&udc->vbus_timer_work);
}
static int at91_start(struct usb_gadget *gadget,
@@ -1739,7 +1738,7 @@ static int at91udc_probe(struct platform_device *pdev)
/* rm9200 needs manual D+ pullup; off by default */
if (cpu_is_at91rm9200()) {
- if (gpio_is_valid(udc->board.pullup_pin)) {
+ if (!gpio_is_valid(udc->board.pullup_pin)) {
DBG("no D+ pullup?\n");
retval = -ENODEV;
goto fail0;
@@ -1982,17 +1981,7 @@ static struct platform_driver at91_udc_driver = {
},
};
-static int __init udc_init_module(void)
-{
- return platform_driver_probe(&at91_udc_driver, at91udc_probe);
-}
-module_init(udc_init_module);
-
-static void __exit udc_exit_module(void)
-{
- platform_driver_unregister(&at91_udc_driver);
-}
-module_exit(udc_exit_module);
+module_platform_driver_probe(at91_udc_driver, at91udc_probe);
MODULE_DESCRIPTION("AT91 udc driver");
MODULE_AUTHOR("Thomas Rathbone, David Brownell");
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index a7aed84d98c9..bc19496bcec0 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -2066,17 +2066,7 @@ static struct platform_driver udc_driver = {
},
};
-static int __init udc_init(void)
-{
- return platform_driver_probe(&udc_driver, usba_udc_probe);
-}
-module_init(udc_init);
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(udc_exit);
+module_platform_driver_probe(udc_driver, usba_udc_probe);
MODULE_DESCRIPTION("Atmel USBA UDC driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 47a49931361e..8cc8253f1100 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -2351,19 +2351,20 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
dev_err(dev, "error finding USBD resource\n");
return -ENXIO;
}
- udc->usbd_regs = devm_request_and_ioremap(dev, res);
+
+ udc->usbd_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(udc->usbd_regs))
+ return PTR_ERR(udc->usbd_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(dev, "error finding IUDMA resource\n");
return -ENXIO;
}
- udc->iudma_regs = devm_request_and_ioremap(dev, res);
- if (!udc->usbd_regs || !udc->iudma_regs) {
- dev_err(dev, "error requesting resources\n");
- return -ENXIO;
- }
+ udc->iudma_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(udc->iudma_regs))
+ return PTR_ERR(udc->iudma_regs);
spin_lock_init(&udc->lock);
INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 1e4bb77f00bb..a7d6f7026757 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -42,9 +42,6 @@ USB_GADGET_COMPOSITE_OPTIONS();
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
-
-#include "u_serial.c"
-#include "f_acm.c"
#include "f_ecm.c"
#include "u_ether.c"
@@ -108,12 +105,16 @@ static struct usb_gadget_strings *dev_strings[] = {
static u8 hostaddr[ETH_ALEN];
/*-------------------------------------------------------------------------*/
+static struct usb_function *f_acm;
+static struct usb_function_instance *fi_serial;
+static unsigned char tty_line;
/*
* We _always_ have both CDC ECM and CDC ACM functions.
*/
static int __init cdc_do_config(struct usb_configuration *c)
{
+ struct f_serial_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -125,11 +126,26 @@ static int __init cdc_do_config(struct usb_configuration *c)
if (status < 0)
return status;
- status = acm_bind_config(c, 0);
- if (status < 0)
- return status;
+ fi_serial = usb_get_function_instance("acm");
+ if (IS_ERR(fi_serial))
+ return PTR_ERR(fi_serial);
+ opts = container_of(fi_serial, struct f_serial_opts, func_inst);
+ opts->port_num = tty_line;
+
+ f_acm = usb_get_function(fi_serial);
+ if (IS_ERR(f_acm))
+ goto err_func_acm;
+
+ status = usb_add_function(c, f_acm);
+ if (status)
+ goto err_conf;
return 0;
+err_conf:
+ usb_put_function(f_acm);
+err_func_acm:
+ usb_put_function_instance(fi_serial);
+ return status;
}
static struct usb_configuration cdc_config_driver = {
@@ -158,7 +174,7 @@ static int __init cdc_bind(struct usb_composite_dev *cdev)
return status;
/* set up serial link layer */
- status = gserial_setup(cdev->gadget, 1);
+ status = gserial_alloc_line(&tty_line);
if (status < 0)
goto fail0;
@@ -184,7 +200,7 @@ static int __init cdc_bind(struct usb_composite_dev *cdev)
return 0;
fail1:
- gserial_cleanup();
+ gserial_free_line(tty_line);
fail0:
gether_cleanup();
return status;
@@ -192,7 +208,9 @@ fail0:
static int __exit cdc_unbind(struct usb_composite_dev *cdev)
{
- gserial_cleanup();
+ usb_put_function(f_acm);
+ usb_put_function_instance(fi_serial);
+ gserial_free_line(tty_line);
gether_cleanup();
return 0;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2a6bfe759c29..7c821de8ce3d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -28,6 +28,12 @@
* with the relevant device-wide data.
*/
+static struct usb_gadget_strings **get_containers_gs(
+ struct usb_gadget_string_container *uc)
+{
+ return (struct usb_gadget_strings **)uc->stash;
+}
+
/**
* next_ep_desc() - advance to the next EP descriptor
* @t: currect pointer within descriptor array
@@ -215,6 +221,18 @@ done:
}
EXPORT_SYMBOL_GPL(usb_add_function);
+void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
+{
+ if (f->disable)
+ f->disable(f);
+
+ bitmap_zero(f->endpoints, 32);
+ list_del(&f->list);
+ if (f->unbind)
+ f->unbind(c, f);
+}
+EXPORT_SYMBOL_GPL(usb_remove_function);
+
/**
* usb_function_deactivate - prevent function and gadget enumeration
* @function: the function that isn't yet ready to respond
@@ -320,6 +338,25 @@ int usb_interface_id(struct usb_configuration *config,
}
EXPORT_SYMBOL_GPL(usb_interface_id);
+static u8 encode_bMaxPower(enum usb_device_speed speed,
+ struct usb_configuration *c)
+{
+ unsigned val;
+
+ if (c->MaxPower)
+ val = c->MaxPower;
+ else
+ val = CONFIG_USB_GADGET_VBUS_DRAW;
+ if (!val)
+ return 0;
+ switch (speed) {
+ case USB_SPEED_SUPER:
+ return DIV_ROUND_UP(val, 8);
+ default:
+ return DIV_ROUND_UP(val, 2);
+ };
+}
+
static int config_buf(struct usb_configuration *config,
enum usb_device_speed speed, void *buf, u8 type)
{
@@ -339,7 +376,7 @@ static int config_buf(struct usb_configuration *config,
c->bConfigurationValue = config->bConfigurationValue;
c->iConfiguration = config->iConfiguration;
c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
- c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2);
+ c->bMaxPower = encode_bMaxPower(speed, config);
/* There may be e.g. OTG descriptors */
if (config->descriptors) {
@@ -656,7 +693,7 @@ static int set_config(struct usb_composite_dev *cdev,
}
/* when we return, be sure our power usage is valid */
- power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
+ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
done:
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -664,6 +701,31 @@ done:
return result;
}
+int usb_add_config_only(struct usb_composite_dev *cdev,
+ struct usb_configuration *config)
+{
+ struct usb_configuration *c;
+
+ if (!config->bConfigurationValue)
+ return -EINVAL;
+
+ /* Prevent duplicate configuration identifiers */
+ list_for_each_entry(c, &cdev->configs, list) {
+ if (c->bConfigurationValue == config->bConfigurationValue)
+ return -EBUSY;
+ }
+
+ config->cdev = cdev;
+ list_add_tail(&config->list, &cdev->configs);
+
+ INIT_LIST_HEAD(&config->functions);
+ config->next_interface_id = 0;
+ memset(config->interface, 0, sizeof(config->interface));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_add_config_only);
+
/**
* usb_add_config() - add a configuration to a device.
* @cdev: wraps the USB gadget
@@ -684,30 +746,18 @@ int usb_add_config(struct usb_composite_dev *cdev,
int (*bind)(struct usb_configuration *))
{
int status = -EINVAL;
- struct usb_configuration *c;
+
+ if (!bind)
+ goto done;
DBG(cdev, "adding config #%u '%s'/%p\n",
config->bConfigurationValue,
config->label, config);
- if (!config->bConfigurationValue || !bind)
+ status = usb_add_config_only(cdev, config);
+ if (status)
goto done;
- /* Prevent duplicate configuration identifiers */
- list_for_each_entry(c, &cdev->configs, list) {
- if (c->bConfigurationValue == config->bConfigurationValue) {
- status = -EBUSY;
- goto done;
- }
- }
-
- config->cdev = cdev;
- list_add_tail(&config->list, &cdev->configs);
-
- INIT_LIST_HEAD(&config->functions);
- config->next_interface_id = 0;
- memset(config->interface, 0, sizeof(config->interface));
-
status = bind(config);
if (status < 0) {
while (!list_empty(&config->functions)) {
@@ -860,6 +910,7 @@ static int get_string(struct usb_composite_dev *cdev,
void *buf, u16 language, int id)
{
struct usb_composite_driver *composite = cdev->driver;
+ struct usb_gadget_string_container *uc;
struct usb_configuration *c;
struct usb_function *f;
int len;
@@ -892,6 +943,12 @@ static int get_string(struct usb_composite_dev *cdev,
collect_langs(sp, s->wData);
}
}
+ list_for_each_entry(uc, &cdev->gstrings, list) {
+ struct usb_gadget_strings **sp;
+
+ sp = get_containers_gs(uc);
+ collect_langs(sp, s->wData);
+ }
for (len = 0; len <= 126 && s->wData[len]; len++)
continue;
@@ -902,6 +959,15 @@ static int get_string(struct usb_composite_dev *cdev,
return s->bLength;
}
+ list_for_each_entry(uc, &cdev->gstrings, list) {
+ struct usb_gadget_strings **sp;
+
+ sp = get_containers_gs(uc);
+ len = lookup_string(sp, buf, language, id);
+ if (len > 0)
+ return len;
+ }
+
/* String IDs are device-scoped, so we look up each string
* table we're told about. These lookups are infrequent;
* simpler-is-better here.
@@ -987,6 +1053,119 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
}
EXPORT_SYMBOL_GPL(usb_string_ids_tab);
+static struct usb_gadget_string_container *copy_gadget_strings(
+ struct usb_gadget_strings **sp, unsigned n_gstrings,
+ unsigned n_strings)
+{
+ struct usb_gadget_string_container *uc;
+ struct usb_gadget_strings **gs_array;
+ struct usb_gadget_strings *gs;
+ struct usb_string *s;
+ unsigned mem;
+ unsigned n_gs;
+ unsigned n_s;
+ void *stash;
+
+ mem = sizeof(*uc);
+ mem += sizeof(void *) * (n_gstrings + 1);
+ mem += sizeof(struct usb_gadget_strings) * n_gstrings;
+ mem += sizeof(struct usb_string) * (n_strings + 1) * (n_gstrings);
+ uc = kmalloc(mem, GFP_KERNEL);
+ if (!uc)
+ return ERR_PTR(-ENOMEM);
+ gs_array = get_containers_gs(uc);
+ stash = uc->stash;
+ stash += sizeof(void *) * (n_gstrings + 1);
+ for (n_gs = 0; n_gs < n_gstrings; n_gs++) {
+ struct usb_string *org_s;
+
+ gs_array[n_gs] = stash;
+ gs = gs_array[n_gs];
+ stash += sizeof(struct usb_gadget_strings);
+ gs->language = sp[n_gs]->language;
+ gs->strings = stash;
+ org_s = sp[n_gs]->strings;
+
+ for (n_s = 0; n_s < n_strings; n_s++) {
+ s = stash;
+ stash += sizeof(struct usb_string);
+ if (org_s->s)
+ s->s = org_s->s;
+ else
+ s->s = "";
+ org_s++;
+ }
+ s = stash;
+ s->s = NULL;
+ stash += sizeof(struct usb_string);
+
+ }
+ gs_array[n_gs] = NULL;
+ return uc;
+}
+
+/**
+ * usb_gstrings_attach() - attach gadget strings to a cdev and assign ids
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * and attached.
+ * @sp: an array of usb_gadget_strings to attach.
+ * @n_strings: number of entries in each usb_strings array (sp[]->strings)
+ *
+ * This function will create a deep copy of usb_gadget_strings and usb_string
+ * and attach it to the cdev. The actual string (usb_string.s) will not be
+ * copied but only a referenced will be made. The struct usb_gadget_strings
+ * array may contain multiple languges and should be NULL terminated.
+ * The ->language pointer of each struct usb_gadget_strings has to contain the
+ * same amount of entries.
+ * For instance: sp[0] is en-US, sp[1] is es-ES. It is expected that the first
+ * usb_string entry of es-ES containts the translation of the first usb_string
+ * entry of en-US. Therefore both entries become the same id assign.
+ */
+struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
+ struct usb_gadget_strings **sp, unsigned n_strings)
+{
+ struct usb_gadget_string_container *uc;
+ struct usb_gadget_strings **n_gs;
+ unsigned n_gstrings = 0;
+ unsigned i;
+ int ret;
+
+ for (i = 0; sp[i]; i++)
+ n_gstrings++;
+
+ if (!n_gstrings)
+ return ERR_PTR(-EINVAL);
+
+ uc = copy_gadget_strings(sp, n_gstrings, n_strings);
+ if (IS_ERR(uc))
+ return ERR_PTR(PTR_ERR(uc));
+
+ n_gs = get_containers_gs(uc);
+ ret = usb_string_ids_tab(cdev, n_gs[0]->strings);
+ if (ret)
+ goto err;
+
+ for (i = 1; i < n_gstrings; i++) {
+ struct usb_string *m_s;
+ struct usb_string *s;
+ unsigned n;
+
+ m_s = n_gs[0]->strings;
+ s = n_gs[i]->strings;
+ for (n = 0; n < n_strings; n++) {
+ s->id = m_s->id;
+ s++;
+ m_s++;
+ }
+ }
+ list_add_tail(&uc->list, &cdev->gstrings);
+ return n_gs[0]->strings;
+err:
+ kfree(uc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(usb_gstrings_attach);
+
/**
* usb_string_ids_n() - allocate unused string IDs in batch
* @c: the device whose string descriptor IDs are being allocated
@@ -1033,7 +1212,7 @@ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
* housekeeping for the gadget function we're implementing. Most of
* the work is in config and function specific setup.
*/
-static int
+int
composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
@@ -1300,7 +1479,7 @@ done:
return value;
}
-static void composite_disconnect(struct usb_gadget *gadget)
+void composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
@@ -1330,8 +1509,7 @@ static ssize_t composite_show_suspended(struct device *dev,
static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
-static void
-composite_unbind(struct usb_gadget *gadget)
+static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
@@ -1348,19 +1526,21 @@ composite_unbind(struct usb_gadget *gadget)
struct usb_configuration, list);
remove_config(cdev, c);
}
- if (cdev->driver->unbind)
+ if (cdev->driver->unbind && unbind_driver)
cdev->driver->unbind(cdev);
- if (cdev->req) {
- kfree(cdev->req->buf);
- usb_ep_free_request(gadget->ep0, cdev->req);
- }
- device_remove_file(&gadget->dev, &dev_attr_suspended);
+ composite_dev_cleanup(cdev);
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
}
+static void composite_unbind(struct usb_gadget *gadget)
+{
+ __composite_unbind(gadget, true);
+}
+
static void update_unchanged_dev_desc(struct usb_device_descriptor *new,
const struct usb_device_descriptor *old)
{
@@ -1399,34 +1579,25 @@ static void update_unchanged_dev_desc(struct usb_device_descriptor *new,
new->iProduct = iProduct;
}
-static struct usb_composite_driver *to_cdriver(struct usb_gadget_driver *gdrv)
-{
- return container_of(gdrv, struct usb_composite_driver, gadget_driver);
-}
-
-static int composite_bind(struct usb_gadget *gadget,
- struct usb_gadget_driver *gdriver)
+int composite_dev_prepare(struct usb_composite_driver *composite,
+ struct usb_composite_dev *cdev)
{
- struct usb_composite_dev *cdev;
- struct usb_composite_driver *composite = to_cdriver(gdriver);
- int status = -ENOMEM;
-
- cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
- if (!cdev)
- return status;
-
- spin_lock_init(&cdev->lock);
- cdev->gadget = gadget;
- set_gadget_data(gadget, cdev);
- INIT_LIST_HEAD(&cdev->configs);
+ struct usb_gadget *gadget = cdev->gadget;
+ int ret = -ENOMEM;
/* preallocate control response and buffer */
cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
if (!cdev->req)
- goto fail;
+ return -ENOMEM;
+
cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
if (!cdev->req->buf)
goto fail;
+
+ ret = device_create_file(&gadget->dev, &dev_attr_suspended);
+ if (ret)
+ goto fail_dev;
+
cdev->req->complete = composite_setup_complete;
gadget->ep0->driver_data = cdev;
@@ -1444,7 +1615,51 @@ static int composite_bind(struct usb_gadget *gadget,
* we force endpoints to start unassigned; few controller
* drivers will zero ep->driver_data.
*/
- usb_ep_autoconfig_reset(cdev->gadget);
+ usb_ep_autoconfig_reset(gadget);
+ return 0;
+fail_dev:
+ kfree(cdev->req->buf);
+fail:
+ usb_ep_free_request(gadget->ep0, cdev->req);
+ cdev->req = NULL;
+ return ret;
+}
+
+void composite_dev_cleanup(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget_string_container *uc, *tmp;
+
+ list_for_each_entry_safe(uc, tmp, &cdev->gstrings, list) {
+ list_del(&uc->list);
+ kfree(uc);
+ }
+ if (cdev->req) {
+ kfree(cdev->req->buf);
+ usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+ }
+ device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
+}
+
+static int composite_bind(struct usb_gadget *gadget,
+ struct usb_gadget_driver *gdriver)
+{
+ struct usb_composite_dev *cdev;
+ struct usb_composite_driver *composite = to_cdriver(gdriver);
+ int status = -ENOMEM;
+
+ cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
+ if (!cdev)
+ return status;
+
+ spin_lock_init(&cdev->lock);
+ cdev->gadget = gadget;
+ set_gadget_data(gadget, cdev);
+ INIT_LIST_HEAD(&cdev->configs);
+ INIT_LIST_HEAD(&cdev->gstrings);
+
+ status = composite_dev_prepare(composite, cdev);
+ if (status)
+ goto fail;
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
@@ -1460,16 +1675,11 @@ static int composite_bind(struct usb_gadget *gadget,
if (composite->needs_serial && !cdev->desc.iSerialNumber)
WARNING(cdev, "userspace failed to provide iSerialNumber\n");
- /* finish up */
- status = device_create_file(&gadget->dev, &dev_attr_suspended);
- if (status)
- goto fail;
-
INFO(cdev, "%s ready\n", composite->name);
return 0;
fail:
- composite_unbind(gadget);
+ __composite_unbind(gadget, false);
return status;
}
@@ -1518,10 +1728,10 @@ composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
- maxpower = cdev->config->bMaxPower;
+ maxpower = cdev->config->MaxPower;
usb_gadget_vbus_draw(gadget, maxpower ?
- (2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW);
+ maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
}
cdev->suspended = 0;
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 87d165028162..986fc511a2ed 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -13,9 +13,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#ifdef CONFIG_USB_G_DBGP_SERIAL
-#include "u_serial.c"
-#endif
+#include "u_serial.h"
#define DRIVER_VENDOR_ID 0x0525 /* NetChip */
#define DRIVER_PRODUCT_ID 0xc0de /* undefined */
@@ -233,6 +231,10 @@ static void dbgp_unbind(struct usb_gadget *gadget)
gadget->ep0->driver_data = NULL;
}
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+static unsigned char tty_line;
+#endif
+
static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
{
int stp;
@@ -270,7 +272,7 @@ static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
dbgp.serial->in->desc = &i_desc;
dbgp.serial->out->desc = &o_desc;
- if (gserial_setup(gadget, 1) < 0) {
+ if (gserial_alloc_line(&tty_line)) {
stp = 3;
goto fail_3;
}
@@ -379,7 +381,7 @@ static int dbgp_setup(struct usb_gadget *gadget,
#ifdef CONFIG_USB_G_DBGP_PRINTK
err = dbgp_enable_ep();
#else
- err = gserial_connect(dbgp.serial, 0);
+ err = gserial_connect(dbgp.serial, tty_line);
#endif
if (err < 0)
goto fail;
@@ -422,7 +424,7 @@ static void __exit dbgp_exit(void)
{
usb_gadget_unregister_driver(&dbgp_driver);
#ifdef CONFIG_USB_G_DBGP_SERIAL
- gserial_cleanup();
+ gserial_free_line(tty_line);
#endif
}
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 95d584dbed13..8cf0c0f6fa1f 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -130,10 +130,7 @@ static const char ep0name[] = "ep0";
static const char *const ep_name[] = {
ep0name, /* everyone has ep0 */
- /* act like a net2280: high speed, six configurable endpoints */
- "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
-
- /* or like pxa250: fifteen fixed function endpoints */
+ /* act like a pxa250: fifteen fixed function endpoints */
"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
@@ -141,6 +138,10 @@ static const char *const ep_name[] = {
/* or like sa1100: two fixed function endpoints */
"ep1out-bulk", "ep2in-bulk",
+
+ /* and now some generic EPs so we have enough in multi config */
+ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
+ "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
};
#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 549174466c21..1ae180baa597 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -16,7 +16,9 @@
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/device.h>
+#include <linux/err.h>
#include "u_serial.h"
#include "gadget_chips.h"
@@ -283,7 +285,6 @@ static struct usb_string acm_string_defs[] = {
[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
[ACM_DATA_IDX].s = "CDC ACM Data",
[ACM_IAD_IDX ].s = "CDC Serial",
- { /* ZEROES END LIST */ },
};
static struct usb_gadget_strings acm_string_table = {
@@ -605,9 +606,23 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_acm *acm = func_to_acm(f);
+ struct usb_string *us;
int status;
struct usb_ep *ep;
+ /* REVISIT might want instance-specific strings to help
+ * distinguish instances ...
+ */
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ us = usb_gstrings_attach(cdev, acm_strings,
+ ARRAY_SIZE(acm_string_defs));
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+ acm_control_interface_desc.iInterface = us[ACM_CTRL_IDX].id;
+ acm_data_interface_desc.iInterface = us[ACM_DATA_IDX].id;
+ acm_iad_descriptor.iFunction = us[ACM_IAD_IDX].id;
+
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
@@ -700,24 +715,42 @@ fail:
return status;
}
+static struct f_acm *acm_alloc_basic_func(void)
+{
+ struct f_acm *acm;
+
+ acm = kzalloc(sizeof(*acm), GFP_KERNEL);
+ if (!acm)
+ return NULL;
+
+ spin_lock_init(&acm->lock);
+
+ acm->port.connect = acm_connect;
+ acm->port.disconnect = acm_disconnect;
+ acm->port.send_break = acm_send_break;
+
+ acm->port.func.name = "acm";
+ /* descriptors are per-instance copies */
+ acm->port.func.bind = acm_bind;
+ acm->port.func.set_alt = acm_set_alt;
+ acm->port.func.setup = acm_setup;
+ acm->port.func.disable = acm_disable;
+
+ return acm;
+}
+
+#ifdef USB_FACM_INCLUDED
static void
-acm_unbind(struct usb_configuration *c, struct usb_function *f)
+acm_old_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
- acm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
- gs_free_req(acm->notify, acm->notify_req);
+ if (acm->notify_req)
+ gs_free_req(acm->notify, acm->notify_req);
kfree(acm);
}
-/* Some controllers can't support CDC ACM ... */
-static inline bool can_support_cdc(struct usb_configuration *c)
-{
- /* everything else is *probably* fine ... */
- return true;
-}
-
/**
* acm_bind_config - add a CDC ACM function to a configuration
* @c: the configuration to support the CDC ACM instance
@@ -726,58 +759,80 @@ static inline bool can_support_cdc(struct usb_configuration *c)
*
* Returns zero on success, else negative errno.
*
- * Caller must have called @gserial_setup() with enough ports to
- * handle all the ones it binds. Caller is also responsible
- * for calling @gserial_cleanup() before module unload.
*/
int acm_bind_config(struct usb_configuration *c, u8 port_num)
{
struct f_acm *acm;
int status;
- if (!can_support_cdc(c))
- return -EINVAL;
-
- /* REVISIT might want instance-specific strings to help
- * distinguish instances ...
- */
-
- /* maybe allocate device-global string IDs, and patch descriptors */
- if (acm_string_defs[0].id == 0) {
- status = usb_string_ids_tab(c->cdev, acm_string_defs);
- if (status < 0)
- return status;
- acm_control_interface_desc.iInterface =
- acm_string_defs[ACM_CTRL_IDX].id;
- acm_data_interface_desc.iInterface =
- acm_string_defs[ACM_DATA_IDX].id;
- acm_iad_descriptor.iFunction = acm_string_defs[ACM_IAD_IDX].id;
- }
-
/* allocate and initialize one new instance */
- acm = kzalloc(sizeof *acm, GFP_KERNEL);
+ acm = acm_alloc_basic_func();
if (!acm)
return -ENOMEM;
- spin_lock_init(&acm->lock);
-
acm->port_num = port_num;
-
- acm->port.connect = acm_connect;
- acm->port.disconnect = acm_disconnect;
- acm->port.send_break = acm_send_break;
-
- acm->port.func.name = "acm";
- acm->port.func.strings = acm_strings;
- /* descriptors are per-instance copies */
- acm->port.func.bind = acm_bind;
- acm->port.func.unbind = acm_unbind;
- acm->port.func.set_alt = acm_set_alt;
- acm->port.func.setup = acm_setup;
- acm->port.func.disable = acm_disable;
+ acm->port.func.unbind = acm_old_unbind;
status = usb_add_function(c, &acm->port.func);
if (status)
kfree(acm);
return status;
}
+
+#else
+
+static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ acm_string_defs[0].id = 0;
+ usb_free_all_descriptors(f);
+ if (acm->notify_req)
+ gs_free_req(acm->notify, acm->notify_req);
+}
+
+static void acm_free_func(struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ kfree(acm);
+}
+
+static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
+{
+ struct f_serial_opts *opts;
+ struct f_acm *acm;
+
+ acm = acm_alloc_basic_func();
+ if (!acm)
+ return ERR_PTR(-ENOMEM);
+
+ opts = container_of(fi, struct f_serial_opts, func_inst);
+ acm->port_num = opts->port_num;
+ acm->port.func.unbind = acm_unbind;
+ acm->port.func.free_func = acm_free_func;
+
+ return &acm->port.func;
+}
+
+static void acm_free_instance(struct usb_function_instance *fi)
+{
+ struct f_serial_opts *opts;
+
+ opts = container_of(fi, struct f_serial_opts, func_inst);
+ kfree(opts);
+}
+
+static struct usb_function_instance *acm_alloc_instance(void)
+{
+ struct f_serial_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+ opts->func_inst.free_func_inst = acm_free_instance;
+ return &opts->func_inst;
+}
+DECLARE_USB_FUNCTION_INIT(acm, acm_alloc_instance, acm_alloc_func);
+MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 4a6961c517f2..38388d7844fc 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1103,8 +1103,8 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
return 0;
for (;;) {
- char *end, *eq, *comma;
unsigned long value;
+ char *eq, *comma;
/* Option limit */
comma = strchr(opts, ',');
@@ -1120,8 +1120,7 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
*eq = 0;
/* Parse value */
- value = simple_strtoul(eq + 1, &end, 0);
- if (unlikely(*end != ',' && *end != 0)) {
+ if (kstrtoul(eq + 1, 0, &value)) {
pr_err("%s: invalid value: %s\n", opts, eq + 1);
return -EINVAL;
}
@@ -1153,15 +1152,15 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
pr_err("%s: unmapped value: %lu\n", opts, value);
return -EINVAL;
}
- }
- else if (!memcmp(opts, "gid", 3))
+ } else if (!memcmp(opts, "gid", 3)) {
data->perms.gid = make_kgid(current_user_ns(), value);
if (!gid_valid(data->perms.gid)) {
pr_err("%s: unmapped value: %lu\n", opts, value);
return -EINVAL;
}
- else
+ } else {
goto invalid;
+ }
break;
default:
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index bb39cb2bb3a3..4a3873a0f2d0 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -15,10 +15,11 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/usb/composite.h>
#include "g_zero.h"
-#include "gadget_chips.h"
-
/*
* LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
@@ -44,9 +45,8 @@ static inline struct f_loopback *func_to_loop(struct usb_function *f)
return container_of(f, struct f_loopback, function);
}
-static unsigned qlen = 32;
-module_param(qlen, uint, 0);
-MODULE_PARM_DESC(qlenn, "depth of loopback queue");
+static unsigned qlen;
+static unsigned buflen;
/*-------------------------------------------------------------------------*/
@@ -171,8 +171,7 @@ static struct usb_gadget_strings *loopback_strings[] = {
/*-------------------------------------------------------------------------*/
-static int __init
-loopback_bind(struct usb_configuration *c, struct usb_function *f)
+static int loopback_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_loopback *loop = func_to_loop(f);
@@ -185,6 +184,12 @@ loopback_bind(struct usb_configuration *c, struct usb_function *f)
return id;
loopback_intf.bInterfaceNumber = id;
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_loopback[0].id = id;
+ loopback_intf.iInterface = id;
+
/* allocate endpoints */
loop->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_source_desc);
@@ -223,8 +228,7 @@ autoconf_fail:
return 0;
}
-static void
-loopback_unbind(struct usb_configuration *c, struct usb_function *f)
+static void lb_free_func(struct usb_function *f)
{
usb_free_all_descriptors(f);
kfree(func_to_loop(f));
@@ -366,63 +370,64 @@ static void loopback_disable(struct usb_function *f)
disable_loopback(loop);
}
-/*-------------------------------------------------------------------------*/
-
-static int __init loopback_bind_config(struct usb_configuration *c)
+static struct usb_function *loopback_alloc(struct usb_function_instance *fi)
{
struct f_loopback *loop;
- int status;
+ struct f_lb_opts *lb_opts;
loop = kzalloc(sizeof *loop, GFP_KERNEL);
if (!loop)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ lb_opts = container_of(fi, struct f_lb_opts, func_inst);
+ buflen = lb_opts->bulk_buflen;
+ qlen = lb_opts->qlen;
+ if (!qlen)
+ qlen = 32;
loop->function.name = "loopback";
loop->function.bind = loopback_bind;
- loop->function.unbind = loopback_unbind;
loop->function.set_alt = loopback_set_alt;
loop->function.disable = loopback_disable;
+ loop->function.strings = loopback_strings;
- status = usb_add_function(c, &loop->function);
- if (status)
- kfree(loop);
- return status;
-}
+ loop->function.free_func = lb_free_func;
-static struct usb_configuration loopback_driver = {
- .label = "loopback",
- .strings = loopback_strings,
- .bConfigurationValue = 2,
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
- /* .iConfiguration = DYNAMIC */
-};
+ return &loop->function;
+}
-/**
- * loopback_add - add a loopback testing configuration to a device
- * @cdev: the device to support the loopback configuration
- */
-int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
+static void lb_free_instance(struct usb_function_instance *fi)
{
- int id;
+ struct f_lb_opts *lb_opts;
- /* allocate string ID(s) */
- id = usb_string_id(cdev);
- if (id < 0)
- return id;
- strings_loopback[0].id = id;
+ lb_opts = container_of(fi, struct f_lb_opts, func_inst);
+ kfree(lb_opts);
+}
- loopback_intf.iInterface = id;
- loopback_driver.iConfiguration = id;
+static struct usb_function_instance *loopback_alloc_instance(void)
+{
+ struct f_lb_opts *lb_opts;
- /* support autoresume for remote wakeup testing */
- if (autoresume)
- loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ lb_opts = kzalloc(sizeof(*lb_opts), GFP_KERNEL);
+ if (!lb_opts)
+ return ERR_PTR(-ENOMEM);
+ lb_opts->func_inst.free_func_inst = lb_free_instance;
+ return &lb_opts->func_inst;
+}
+DECLARE_USB_FUNCTION(Loopback, loopback_alloc_instance, loopback_alloc);
- /* support OTG systems */
- if (gadget_is_otg(cdev->gadget)) {
- loopback_driver.descriptors = otg_desc;
- loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
- }
+int __init lb_modinit(void)
+{
+ int ret;
- return usb_add_config(cdev, &loopback_driver, loopback_bind_config);
+ ret = usb_function_register(&Loopbackusb_func);
+ if (ret)
+ return ret;
+ return ret;
}
+void __exit lb_modexit(void)
+{
+ usb_function_unregister(&Loopbackusb_func);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5d027b3e1ef0..fc5c16ca5e0a 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -246,20 +246,6 @@ struct fsg_operations {
* set).
*/
int (*thread_exits)(struct fsg_common *common);
-
- /*
- * Called prior to ejection. Negative return means error,
- * zero means to continue with ejection, positive means not to
- * eject.
- */
- int (*pre_eject)(struct fsg_common *common,
- struct fsg_lun *lun, int num);
- /*
- * Called after ejection. Negative return means error, zero
- * or positive is just a success.
- */
- int (*post_eject)(struct fsg_common *common,
- struct fsg_lun *lun, int num);
};
/* Data shared by all the FSG instances. */
@@ -1374,26 +1360,13 @@ static int do_start_stop(struct fsg_common *common)
if (!loej)
return 0;
- /* Simulate an unload/eject */
- if (common->ops && common->ops->pre_eject) {
- int r = common->ops->pre_eject(common, curlun,
- curlun - common->luns);
- if (unlikely(r < 0))
- return r;
- else if (r)
- return 0;
- }
-
up_read(&common->filesem);
down_write(&common->filesem);
fsg_lun_close(curlun);
up_write(&common->filesem);
down_read(&common->filesem);
- return common->ops && common->ops->post_eject
- ? min(0, common->ops->post_eject(common, curlun,
- curlun - common->luns))
- : 0;
+ return 0;
}
static int do_prevent_allow(struct fsg_common *common)
@@ -1718,7 +1691,7 @@ static int check_command(struct fsg_common *common, int cmnd_size,
int needs_medium, const char *name)
{
int i;
- int lun = common->cmnd[1] >> 5;
+ unsigned int lun = common->cmnd[1] >> 5;
static const char dirletter[4] = {'u', 'o', 'i', 'n'};
char hdlen[20];
struct fsg_lun *curlun;
@@ -1784,7 +1757,7 @@ static int check_command(struct fsg_common *common, int cmnd_size,
/* Check that the LUN values are consistent */
if (common->lun != lun)
- DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
+ DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
common->lun, lun);
/* Check the LUN */
@@ -1804,7 +1777,7 @@ static int check_command(struct fsg_common *common, int cmnd_size,
*/
if (common->cmnd[0] != INQUIRY &&
common->cmnd[0] != REQUEST_SENSE) {
- DBG(common, "unsupported LUN %d\n", common->lun);
+ DBG(common, "unsupported LUN %u\n", common->lun);
return -EINVAL;
}
}
@@ -2196,7 +2169,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
- if (common->lun >= 0 && common->lun < common->nluns)
+ if (common->lun < common->nluns)
common->curlun = &common->luns[common->lun];
else
common->curlun = NULL;
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 6c8362f937be..5e7557e23ecc 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -56,8 +56,9 @@ struct f_ncm {
u8 notify_state;
bool is_open;
- struct ndp_parser_opts *parser_opts;
+ const struct ndp_parser_opts *parser_opts;
bool is_crc;
+ u32 ndp_sign;
/*
* for notification, it is accessed from both
@@ -390,8 +391,8 @@ struct ndp_parser_opts {
.next_fp_index = 2, \
}
-static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
-static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
+static const struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
+static const struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
{
@@ -732,8 +733,7 @@ static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
default:
goto invalid;
}
- ncm->parser_opts->ndp_sign &= ~NCM_NDP_HDR_CRC_MASK;
- ncm->parser_opts->ndp_sign |= ndp_hdr_crc;
+ ncm->ndp_sign = ncm->parser_opts->ndp_sign | ndp_hdr_crc;
value = 0;
break;
}
@@ -875,7 +875,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
int ndp_align;
int ndp_pad;
unsigned max_size = ncm->port.fixed_in_len;
- struct ndp_parser_opts *opts = ncm->parser_opts;
+ const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
div = le16_to_cpu(ntb_parameters.wNdpInDivisor);
@@ -921,7 +921,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
tmp = (void *)tmp + ndp_pad;
/* NDP */
- put_unaligned_le32(opts->ndp_sign, tmp); /* dwSignature */
+ put_unaligned_le32(ncm->ndp_sign, tmp); /* dwSignature */
tmp += 2;
/* wLength */
put_unaligned_le16(ncb_len - opts->nth_size - pad, tmp++);
@@ -965,7 +965,7 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff *skb2;
int ret = -EINVAL;
unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
- struct ndp_parser_opts *opts = ncm->parser_opts;
+ const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
@@ -1002,7 +1002,7 @@ static int ncm_unwrap_ntb(struct gether *port,
/* walk through NDP */
tmp = ((void *)skb->data) + index;
- if (get_unaligned_le32(tmp) != opts->ndp_sign) {
+ if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
goto err;
}
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index d8dd8782768c..36a004563b82 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -406,10 +406,6 @@ static inline bool can_support_obex(struct usb_configuration *c)
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
- *
- * Caller must have called @gserial_setup() with enough ports to
- * handle all the ones it binds. Caller is also responsible
- * for calling @gserial_cleanup() before module unload.
*/
int __init obex_bind_config(struct usb_configuration *c, u8 port_num)
{
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 98fa7795df5f..da33cfb3031d 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -260,10 +260,6 @@ gser_unbind(struct usb_configuration *c, struct usb_function *f)
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
- *
- * Caller must have called @gserial_setup() with enough ports to
- * handle all the ones it binds. Caller is also responsible
- * for calling @gserial_cleanup() before module unload.
*/
int __init gser_bind_config(struct usb_configuration *c, u8 port_num)
{
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index 102d49beb9df..41adf3ef96c2 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -16,11 +16,12 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/usb/composite.h>
+#include <linux/err.h>
#include "g_zero.h"
#include "gadget_chips.h"
-
/*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
* controller drivers.
@@ -62,24 +63,11 @@ static inline struct f_sourcesink *func_to_ss(struct usb_function *f)
}
static unsigned pattern;
-module_param(pattern, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(pattern, "0 = all zeroes, 1 = mod63, 2 = none");
-
-static unsigned isoc_interval = 4;
-module_param(isoc_interval, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(isoc_interval, "1 - 16");
-
-static unsigned isoc_maxpacket = 1024;
-module_param(isoc_maxpacket, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(isoc_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
-
+static unsigned isoc_interval;
+static unsigned isoc_maxpacket;
static unsigned isoc_mult;
-module_param(isoc_mult, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(isoc_mult, "0 - 2 (hs/ss only)");
-
static unsigned isoc_maxburst;
-module_param(isoc_maxburst, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
+static unsigned buflen;
/*-------------------------------------------------------------------------*/
@@ -313,7 +301,57 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
/*-------------------------------------------------------------------------*/
-static int __init
+struct usb_request *alloc_ep_req(struct usb_ep *ep, int len)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req) {
+ if (len)
+ req->length = len;
+ else
+ req->length = buflen;
+ req->buf = kmalloc(req->length, GFP_ATOMIC);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+ return req;
+}
+
+void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+{
+ int value;
+
+ if (ep->driver_data) {
+ value = usb_ep_disable(ep);
+ if (value < 0)
+ DBG(cdev, "disable %s --> %d\n",
+ ep->name, value);
+ ep->driver_data = NULL;
+ }
+}
+
+void disable_endpoints(struct usb_composite_dev *cdev,
+ struct usb_ep *in, struct usb_ep *out,
+ struct usb_ep *iso_in, struct usb_ep *iso_out)
+{
+ disable_ep(cdev, in);
+ disable_ep(cdev, out);
+ if (iso_in)
+ disable_ep(cdev, iso_in);
+ if (iso_out)
+ disable_ep(cdev, iso_out);
+}
+
+static int
sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -450,7 +488,7 @@ no_iso:
}
static void
-sourcesink_unbind(struct usb_configuration *c, struct usb_function *f)
+sourcesink_free_func(struct usb_function *f)
{
usb_free_all_descriptors(f);
kfree(func_to_ss(f));
@@ -531,8 +569,7 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
check_read_data(ss, req);
if (pattern != 2)
memset(req->buf, 0x55, req->length);
- } else
- reinit_write_data(ep, req);
+ }
break;
/* this endpoint is normally active while we're configured */
@@ -758,31 +795,10 @@ static void sourcesink_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
-static int __init sourcesink_bind_config(struct usb_configuration *c)
-{
- struct f_sourcesink *ss;
- int status;
-
- ss = kzalloc(sizeof *ss, GFP_KERNEL);
- if (!ss)
- return -ENOMEM;
-
- ss->function.name = "source/sink";
- ss->function.bind = sourcesink_bind;
- ss->function.unbind = sourcesink_unbind;
- ss->function.set_alt = sourcesink_set_alt;
- ss->function.get_alt = sourcesink_get_alt;
- ss->function.disable = sourcesink_disable;
-
- status = usb_add_function(c, &ss->function);
- if (status)
- kfree(ss);
- return status;
-}
-
-static int sourcesink_setup(struct usb_configuration *c,
+static int sourcesink_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
+ struct usb_configuration *c = f->config;
struct usb_request *req = c->cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
@@ -851,42 +867,76 @@ unknown:
return value;
}
-static struct usb_configuration sourcesink_driver = {
- .label = "source/sink",
- .strings = sourcesink_strings,
- .setup = sourcesink_setup,
- .bConfigurationValue = 3,
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
- /* .iConfiguration = DYNAMIC */
-};
+static struct usb_function *source_sink_alloc_func(
+ struct usb_function_instance *fi)
+{
+ struct f_sourcesink *ss;
+ struct f_ss_opts *ss_opts;
-/**
- * sourcesink_add - add a source/sink testing configuration to a device
- * @cdev: the device to support the configuration
- */
-int __init sourcesink_add(struct usb_composite_dev *cdev, bool autoresume)
+ ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return NULL;
+
+ ss_opts = container_of(fi, struct f_ss_opts, func_inst);
+ pattern = ss_opts->pattern;
+ isoc_interval = ss_opts->isoc_interval;
+ isoc_maxpacket = ss_opts->isoc_maxpacket;
+ isoc_mult = ss_opts->isoc_mult;
+ isoc_maxburst = ss_opts->isoc_maxburst;
+ buflen = ss_opts->bulk_buflen;
+
+ ss->function.name = "source/sink";
+ ss->function.bind = sourcesink_bind;
+ ss->function.set_alt = sourcesink_set_alt;
+ ss->function.get_alt = sourcesink_get_alt;
+ ss->function.disable = sourcesink_disable;
+ ss->function.setup = sourcesink_setup;
+ ss->function.strings = sourcesink_strings;
+
+ ss->function.free_func = sourcesink_free_func;
+
+ return &ss->function;
+}
+
+static void acm_free_instance(struct usb_function_instance *fi)
{
- int id;
+ struct f_ss_opts *ss_opts;
- /* allocate string ID(s) */
- id = usb_string_id(cdev);
- if (id < 0)
- return id;
- strings_sourcesink[0].id = id;
+ ss_opts = container_of(fi, struct f_ss_opts, func_inst);
+ kfree(ss_opts);
+}
- source_sink_intf_alt0.iInterface = id;
- source_sink_intf_alt1.iInterface = id;
- sourcesink_driver.iConfiguration = id;
+static struct usb_function_instance *source_sink_alloc_inst(void)
+{
+ struct f_ss_opts *ss_opts;
- /* support autoresume for remote wakeup testing */
- if (autoresume)
- sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ ss_opts = kzalloc(sizeof(*ss_opts), GFP_KERNEL);
+ if (!ss_opts)
+ return ERR_PTR(-ENOMEM);
+ ss_opts->func_inst.free_func_inst = acm_free_instance;
+ return &ss_opts->func_inst;
+}
+DECLARE_USB_FUNCTION(SourceSink, source_sink_alloc_inst,
+ source_sink_alloc_func);
- /* support OTG systems */
- if (gadget_is_otg(cdev->gadget)) {
- sourcesink_driver.descriptors = otg_desc;
- sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
- }
+static int __init sslb_modinit(void)
+{
+ int ret;
- return usb_add_config(cdev, &sourcesink_driver, sourcesink_bind_config);
+ ret = usb_function_register(&SourceSinkusb_func);
+ if (ret)
+ return ret;
+ ret = lb_modinit();
+ if (ret)
+ usb_function_unregister(&SourceSinkusb_func);
+ return ret;
+}
+static void __exit sslb_modexit(void)
+{
+ usb_function_unregister(&SourceSinkusb_func);
+ lb_modexit();
}
+module_init(sslb_modinit);
+module_exit(sslb_modexit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/f_uac2.c b/drivers/usb/gadget/f_uac2.c
index d7da258fa3f6..c7468b6c07b0 100644
--- a/drivers/usb/gadget/f_uac2.c
+++ b/drivers/usb/gadget/f_uac2.c
@@ -260,19 +260,14 @@ static int
uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct audio_dev *agdev = uac2_to_agdev(uac2);
struct uac2_rtd_params *prm;
unsigned long flags;
- struct usb_ep *ep;
int err = 0;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- ep = agdev->in_ep;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prm = &uac2->p_prm;
- } else {
- ep = agdev->out_ep;
+ else
prm = &uac2->c_prm;
- }
spin_lock_irqsave(&prm->lock, flags);
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
index 5b629876941b..92efd6ec48af 100644
--- a/drivers/usb/gadget/f_uvc.c
+++ b/drivers/usb/gadget/f_uvc.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/string.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/video.h>
@@ -419,7 +420,7 @@ uvc_register_video(struct uvc_device *uvc)
video->parent = &cdev->gadget->dev;
video->fops = &uvc_v4l2_fops;
video->release = video_device_release;
- strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+ strlcpy(video->name, cdev->gadget->name, sizeof(video->name));
uvc->vdev = video;
video_set_drvdata(video, uvc);
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 1b0f086426bd..d3bd7b095ba3 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -18,14 +18,13 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <mach/hardware.h>
-
static struct clk *mxc_ahb_clk;
static struct clk *mxc_per_clk;
static struct clk *mxc_ipg_clk;
/* workaround ENGcm09152 for i.MX35 */
-#define USBPHYCTRL_OTGBASE_OFFSET 0x608
+#define MX35_USBPHYCTRL_OFFSET 0x600
+#define USBPHYCTRL_OTGBASE_OFFSET 0x8
#define USBPHYCTRL_EVDO (1 << 23)
int fsl_udc_clk_init(struct platform_device *pdev)
@@ -59,7 +58,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)
clk_prepare_enable(mxc_per_clk);
/* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!cpu_is_mx51()) {
+ if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
freq = clk_get_rate(mxc_per_clk);
if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
(freq < 59999000 || freq > 60001000)) {
@@ -79,27 +78,40 @@ eclkrate:
return ret;
}
-void fsl_udc_clk_finalize(struct platform_device *pdev)
+int fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
- if (cpu_is_mx35()) {
- unsigned int v;
+ int ret = 0;
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBPHYCTRL_OTGBASE_OFFSET));
- writel(v | USBPHYCTRL_EVDO,
- MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBPHYCTRL_OTGBASE_OFFSET));
+ /* workaround ENGcm09152 for i.MX35 */
+ if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
+ unsigned int v;
+ struct resource *res = platform_get_resource
+ (pdev, IORESOURCE_MEM, 0);
+ void __iomem *phy_regs = ioremap(res->start +
+ MX35_USBPHYCTRL_OFFSET, 512);
+ if (!phy_regs) {
+ dev_err(&pdev->dev, "ioremap for phy address fails\n");
+ ret = -EINVAL;
+ goto ioremap_err;
}
+
+ v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+ writel(v | USBPHYCTRL_EVDO,
+ phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+
+ iounmap(phy_regs);
}
+
+ioremap_err:
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
clk_disable_unprepare(mxc_per_clk);
mxc_per_clk = NULL;
}
+
+ return ret;
}
void fsl_udc_clk_release(void)
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index ec50f18c8890..034477ce77c6 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1894,7 +1894,7 @@ static int fsl_qe_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
/* defined in usb_gadget.h */
-static struct usb_gadget_ops qe_gadget_ops = {
+static const struct usb_gadget_ops qe_gadget_ops = {
.get_frame = qe_get_frame,
.udc_start = fsl_qe_start,
.udc_stop = fsl_qe_stop,
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index c19f7f13790b..04d5fef1440c 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -41,6 +41,7 @@
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -1254,19 +1255,20 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
-static int fsl_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int fsl_stop(struct usb_gadget_driver *driver);
+static int fsl_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int fsl_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
/* defined in gadget.h */
-static struct usb_gadget_ops fsl_gadget_ops = {
+static const struct usb_gadget_ops fsl_gadget_ops = {
.get_frame = fsl_get_frame,
.wakeup = fsl_wakeup,
/* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
.vbus_session = fsl_vbus_session,
.vbus_draw = fsl_vbus_draw,
.pullup = fsl_pullup,
- .start = fsl_start,
- .stop = fsl_stop,
+ .udc_start = fsl_udc_start,
+ .udc_stop = fsl_udc_stop,
};
/* Set protocol stall on ep0, protocol stall will automatically be cleared
@@ -1950,22 +1952,12 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
* Hook to gadget drivers
* Called by initialization code of gadget drivers
*----------------------------------------------------------------*/
-static int fsl_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int fsl_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- int retval = -ENODEV;
+ int retval = 0;
unsigned long flags = 0;
- if (!udc_controller)
- return -ENODEV;
-
- if (!driver || driver->max_speed < USB_SPEED_FULL
- || !bind || !driver->disconnect || !driver->setup)
- return -EINVAL;
-
- if (udc_controller->driver)
- return -EBUSY;
-
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
@@ -1975,15 +1967,6 @@ static int fsl_start(struct usb_gadget_driver *driver,
udc_controller->gadget.dev.driver = &driver->driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
- /* bind udc driver to gadget driver */
- retval = bind(&udc_controller->gadget, driver);
- if (retval) {
- VDBG("bind to %s --> %d", driver->driver.name, retval);
- udc_controller->gadget.dev.driver = NULL;
- udc_controller->driver = NULL;
- goto out;
- }
-
if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
/* Suspend the controller until OTG enable it */
udc_controller->stopped = 1;
@@ -2009,28 +1992,17 @@ static int fsl_start(struct usb_gadget_driver *driver,
udc_controller->ep0_state = WAIT_FOR_SETUP;
udc_controller->ep0_dir = 0;
}
- printk(KERN_INFO "%s: bind to driver %s\n",
- udc_controller->gadget.name, driver->driver.name);
-out:
- if (retval)
- printk(KERN_WARNING "gadget driver register failed %d\n",
- retval);
return retval;
}
/* Disconnect from gadget driver */
-static int fsl_stop(struct usb_gadget_driver *driver)
+static int fsl_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
struct fsl_ep *loop_ep;
unsigned long flags;
- if (!udc_controller)
- return -ENODEV;
-
- if (!driver || driver != udc_controller->driver || !driver->unbind)
- return -EINVAL;
-
if (!IS_ERR_OR_NULL(udc_controller->transceiver))
otg_set_peripheral(udc_controller->transceiver->otg, NULL);
@@ -2051,16 +2023,9 @@ static int fsl_stop(struct usb_gadget_driver *driver)
nuke(loop_ep, -ESHUTDOWN);
spin_unlock_irqrestore(&udc_controller->lock, flags);
- /* report disconnect; the controller is already quiesced */
- driver->disconnect(&udc_controller->gadget);
-
- /* unbind gadget and unhook driver. */
- driver->unbind(&udc_controller->gadget);
udc_controller->gadget.dev.driver = NULL;
udc_controller->driver = NULL;
- printk(KERN_WARNING "unregistered gadget driver '%s'\n",
- driver->driver.name);
return 0;
}
@@ -2438,11 +2403,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
unsigned int i;
u32 dccparams;
- if (strcmp(pdev->name, driver_name)) {
- VDBG("Wrong device");
- return -ENODEV;
- }
-
udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
if (udc_controller == NULL) {
ERR("malloc udc failed\n");
@@ -2547,7 +2507,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
dr_controller_setup(udc_controller);
}
- fsl_udc_clk_finalize(pdev);
+ ret = fsl_udc_clk_finalize(pdev);
+ if (ret)
+ goto err_free_irq;
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
@@ -2756,22 +2718,32 @@ static int fsl_udc_otg_resume(struct device *dev)
return fsl_udc_resume(NULL);
}
-
/*-------------------------------------------------------------------------
Register entry point for the peripheral controller driver
--------------------------------------------------------------------------*/
-
+static const struct platform_device_id fsl_udc_devtype[] = {
+ {
+ .name = "imx-udc-mx27",
+ }, {
+ .name = "imx-udc-mx51",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
- .remove = __exit_p(fsl_udc_remove),
+ .remove = __exit_p(fsl_udc_remove),
+ /* Just for FSL i.mx SoC currently */
+ .id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
- .suspend = fsl_udc_suspend,
- .resume = fsl_udc_resume,
- .driver = {
- .name = (char *)driver_name,
- .owner = THIS_MODULE,
- /* udc suspend/resume called from OTG driver */
- .suspend = fsl_udc_otg_suspend,
- .resume = fsl_udc_otg_resume,
+ .suspend = fsl_udc_suspend,
+ .resume = fsl_udc_resume,
+ .driver = {
+ .name = (char *)driver_name,
+ .owner = THIS_MODULE,
+ /* udc suspend/resume called from OTG driver */
+ .suspend = fsl_udc_otg_suspend,
+ .resume = fsl_udc_otg_resume,
},
};
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index f61a967f7082..c6703bb07b23 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -592,15 +592,16 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
-void fsl_udc_clk_finalize(struct platform_device *pdev);
+int fsl_udc_clk_finalize(struct platform_device *pdev);
void fsl_udc_clk_release(void);
#else
static inline int fsl_udc_clk_init(struct platform_device *pdev)
{
return 0;
}
-static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
{
+ return 0;
}
static inline void fsl_udc_clk_release(void)
{
diff --git a/drivers/usb/gadget/functions.c b/drivers/usb/gadget/functions.c
new file mode 100644
index 000000000000..b13f839e7368
--- /dev/null
+++ b/drivers/usb/gadget/functions.c
@@ -0,0 +1,116 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#include <linux/usb/composite.h>
+
+static LIST_HEAD(func_list);
+static DEFINE_MUTEX(func_lock);
+
+static struct usb_function_instance *try_get_usb_function_instance(const char *name)
+{
+ struct usb_function_driver *fd;
+ struct usb_function_instance *fi;
+
+ fi = ERR_PTR(-ENOENT);
+ mutex_lock(&func_lock);
+ list_for_each_entry(fd, &func_list, list) {
+
+ if (strcmp(name, fd->name))
+ continue;
+
+ if (!try_module_get(fd->mod)) {
+ fi = ERR_PTR(-EBUSY);
+ break;
+ }
+ fi = fd->alloc_inst();
+ if (IS_ERR(fi))
+ module_put(fd->mod);
+ else
+ fi->fd = fd;
+ break;
+ }
+ mutex_unlock(&func_lock);
+ return fi;
+}
+
+struct usb_function_instance *usb_get_function_instance(const char *name)
+{
+ struct usb_function_instance *fi;
+ int ret;
+
+ fi = try_get_usb_function_instance(name);
+ if (!IS_ERR(fi))
+ return fi;
+ ret = PTR_ERR(fi);
+ if (ret != -ENOENT)
+ return fi;
+ ret = request_module("usbfunc:%s", name);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ return try_get_usb_function_instance(name);
+}
+EXPORT_SYMBOL_GPL(usb_get_function_instance);
+
+struct usb_function *usb_get_function(struct usb_function_instance *fi)
+{
+ struct usb_function *f;
+
+ f = fi->fd->alloc_func(fi);
+ if (IS_ERR(f))
+ return f;
+ f->fi = fi;
+ return f;
+}
+EXPORT_SYMBOL_GPL(usb_get_function);
+
+void usb_put_function_instance(struct usb_function_instance *fi)
+{
+ struct module *mod;
+
+ if (!fi)
+ return;
+
+ mod = fi->fd->mod;
+ fi->free_func_inst(fi);
+ module_put(mod);
+}
+EXPORT_SYMBOL_GPL(usb_put_function_instance);
+
+void usb_put_function(struct usb_function *f)
+{
+ if (!f)
+ return;
+
+ f->free_func(f);
+}
+EXPORT_SYMBOL_GPL(usb_put_function);
+
+int usb_function_register(struct usb_function_driver *newf)
+{
+ struct usb_function_driver *fd;
+ int ret;
+
+ ret = -EEXIST;
+
+ mutex_lock(&func_lock);
+ list_for_each_entry(fd, &func_list, list) {
+ if (!strcmp(fd->name, newf->name))
+ goto out;
+ }
+ ret = 0;
+ list_add_tail(&newf->list, &func_list);
+out:
+ mutex_unlock(&func_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_function_register);
+
+void usb_function_unregister(struct usb_function_driver *fd)
+{
+ mutex_lock(&func_lock);
+ list_del(&fd->list);
+ mutex_unlock(&func_lock);
+}
+EXPORT_SYMBOL_GPL(usb_function_unregister);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 72cd5e6719db..066cb89376de 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1308,65 +1308,28 @@ static void init_controller(struct fusb300 *fusb300)
iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
}
/*------------------------------------------------------------------------*/
-static struct fusb300 *the_controller;
-
-static int fusb300_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int fusb300_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct fusb300 *fusb300 = the_controller;
- int retval;
-
- if (!driver
- || driver->max_speed < USB_SPEED_FULL
- || !bind
- || !driver->setup)
- return -EINVAL;
-
- if (!fusb300)
- return -ENODEV;
-
- if (fusb300->driver)
- return -EBUSY;
+ struct fusb300 *fusb300 = to_fusb300(g);
/* hook up the driver */
driver->driver.bus = NULL;
fusb300->driver = driver;
fusb300->gadget.dev.driver = &driver->driver;
- retval = device_add(&fusb300->gadget.dev);
- if (retval) {
- pr_err("device_add error (%d)\n", retval);
- goto error;
- }
-
- retval = bind(&fusb300->gadget, driver);
- if (retval) {
- pr_err("bind to driver error (%d)\n", retval);
- device_del(&fusb300->gadget.dev);
- goto error;
- }
-
return 0;
-
-error:
- fusb300->driver = NULL;
- fusb300->gadget.dev.driver = NULL;
-
- return retval;
}
-static int fusb300_udc_stop(struct usb_gadget_driver *driver)
+static int fusb300_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct fusb300 *fusb300 = the_controller;
-
- if (driver != fusb300->driver || !driver->unbind)
- return -EINVAL;
+ struct fusb300 *fusb300 = to_fusb300(g);
driver->unbind(&fusb300->gadget);
fusb300->gadget.dev.driver = NULL;
init_controller(fusb300);
- device_del(&fusb300->gadget.dev);
fusb300->driver = NULL;
return 0;
@@ -1378,10 +1341,10 @@ static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
return 0;
}
-static struct usb_gadget_ops fusb300_gadget_ops = {
+static const struct usb_gadget_ops fusb300_gadget_ops = {
.pullup = fusb300_udc_pullup,
- .start = fusb300_udc_start,
- .stop = fusb300_udc_stop,
+ .udc_start = fusb300_udc_start,
+ .udc_stop = fusb300_udc_stop,
};
static int __exit fusb300_remove(struct platform_device *pdev)
@@ -1505,8 +1468,6 @@ static int __init fusb300_probe(struct platform_device *pdev)
fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
- the_controller = fusb300;
-
fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
GFP_KERNEL);
if (fusb300->ep0_req == NULL)
@@ -1517,9 +1478,19 @@ static int __init fusb300_probe(struct platform_device *pdev)
if (ret)
goto err_add_udc;
+ ret = device_add(&fusb300->gadget.dev);
+ if (ret) {
+ pr_err("device_add error (%d)\n", ret);
+ goto err_add_device;
+ }
+
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
+
+err_add_device:
+ usb_del_gadget_udc(&fusb300->gadget);
+
err_add_udc:
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
@@ -1547,15 +1518,4 @@ static struct platform_driver fusb300_driver = {
},
};
-static int __init fusb300_udc_init(void)
-{
- return platform_driver_probe(&fusb300_driver, fusb300_probe);
-}
-
-module_init(fusb300_udc_init);
-
-static void __exit fusb300_udc_cleanup(void)
-{
- platform_driver_unregister(&fusb300_driver);
-}
-module_exit(fusb300_udc_cleanup);
+module_platform_driver_probe(fusb300_driver, fusb300_probe);
diff --git a/drivers/usb/gadget/fusb300_udc.h b/drivers/usb/gadget/fusb300_udc.h
index 542cd83cc806..6ba444ae8dd5 100644
--- a/drivers/usb/gadget/fusb300_udc.h
+++ b/drivers/usb/gadget/fusb300_udc.h
@@ -673,4 +673,6 @@ struct fusb300 {
u8 reenum; /* if re-enumeration */
};
+#define to_fusb300(g) (container_of((g), struct fusb300, gadget))
+
#endif
diff --git a/drivers/usb/gadget/g_zero.h b/drivers/usb/gadget/g_zero.h
index 71ca193358b8..ef3e8515272b 100644
--- a/drivers/usb/gadget/g_zero.h
+++ b/drivers/usb/gadget/g_zero.h
@@ -6,11 +6,34 @@
#ifndef __G_ZERO_H
#define __G_ZERO_H
-#include <linux/usb/composite.h>
+struct usb_zero_options {
+ unsigned pattern;
+ unsigned isoc_interval;
+ unsigned isoc_maxpacket;
+ unsigned isoc_mult;
+ unsigned isoc_maxburst;
+ unsigned bulk_buflen;
+ unsigned qlen;
+};
-/* global state */
-extern unsigned buflen;
-extern const struct usb_descriptor_header *otg_desc[];
+struct f_ss_opts {
+ struct usb_function_instance func_inst;
+ unsigned pattern;
+ unsigned isoc_interval;
+ unsigned isoc_maxpacket;
+ unsigned isoc_mult;
+ unsigned isoc_maxburst;
+ unsigned bulk_buflen;
+};
+
+struct f_lb_opts {
+ struct usb_function_instance func_inst;
+ unsigned bulk_buflen;
+ unsigned qlen;
+};
+
+void lb_modexit(void);
+int lb_modinit(void);
/* common utilities */
struct usb_request *alloc_ep_req(struct usb_ep *ep, int len);
@@ -19,8 +42,4 @@ void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out,
struct usb_ep *iso_in, struct usb_ep *iso_out);
-/* configuration-specific linkup */
-int sourcesink_add(struct usb_composite_dev *cdev, bool autoresume);
-int loopback_add(struct usb_composite_dev *cdev, bool autoresume);
-
#endif /* __G_ZERO_H */
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 881aab86ae99..e879e2c9f461 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -125,7 +125,7 @@ static struct usb_configuration midi_config = {
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE,
- .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
+ .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
static int __init midi_bind_config(struct usb_configuration *c)
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 51037cb78604..85742d4c67df 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -993,14 +993,15 @@ static int goku_get_frame(struct usb_gadget *_gadget)
return -EOPNOTSUPP;
}
-static int goku_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int goku_stop(struct usb_gadget_driver *driver);
+static int goku_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int goku_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
- .start = goku_start,
- .stop = goku_stop,
+ .udc_start = goku_udc_start,
+ .udc_stop = goku_udc_stop,
// no remote wakeup
// not selfpowered
};
@@ -1339,50 +1340,28 @@ static void udc_enable(struct goku_udc *dev)
* - one function driver, initted second
*/
-static struct goku_udc *the_controller;
-
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-static int goku_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int goku_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct goku_udc *dev = the_controller;
- int retval;
-
- if (!driver
- || driver->max_speed < USB_SPEED_FULL
- || !bind
- || !driver->disconnect
- || !driver->setup)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
+ struct goku_udc *dev = to_goku_udc(g);
/* hook up the driver */
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
- retval = bind(&dev->gadget, driver);
- if (retval) {
- DBG(dev, "bind to driver %s --> error %d\n",
- driver->driver.name, retval);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
- /* then enable host detection and ep0; and we're ready
+ /*
+ * then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
- DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
@@ -1400,35 +1379,23 @@ stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
- if (driver) {
- spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
if (dev->driver)
udc_enable(dev);
}
-static int goku_stop(struct usb_gadget_driver *driver)
+static int goku_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct goku_udc *dev = the_controller;
+ struct goku_udc *dev = to_goku_udc(g);
unsigned long flags;
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver || !driver->unbind)
- return -EINVAL;
-
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
-
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
- DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
@@ -1754,7 +1721,6 @@ static void goku_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
- the_controller = NULL;
INFO(dev, "unbind\n");
}
@@ -1770,13 +1736,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *base = NULL;
int retval;
- /* if you want to support more than one controller in a system,
- * usb_gadget_driver_{register,unregister}() must change.
- */
- if (the_controller) {
- pr_warning("ignoring %s\n", pci_name(pdev));
- return -EBUSY;
- }
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
@@ -1851,7 +1810,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
- the_controller = dev;
retval = device_register(&dev->gadget.dev);
if (retval) {
put_device(&dev->gadget.dev);
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 85cdce0d1901..b4470d2b1d86 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -261,6 +261,7 @@ struct goku_udc {
/* statistics... */
unsigned long irqs;
};
+#define to_goku_udc(g) (container_of((g), struct goku_udc, gadget))
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index a0eb85794fd4..8efd7555fa21 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1556,17 +1556,7 @@ static struct platform_driver udc_driver = {
.resume = imx_udc_resume,
};
-static int __init udc_init(void)
-{
- return platform_driver_probe(&udc_driver, imx_udc_probe);
-}
-module_init(udc_init);
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(udc_exit);
+module_platform_driver_probe(udc_driver, imx_udc_probe);
MODULE_DESCRIPTION("IMX USB Device Controller driver");
MODULE_AUTHOR("Darius Augulis <augulis.darius@gmail.com>");
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index dd1c9b1fe528..aa04089d6899 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3458,17 +3458,7 @@ static struct platform_driver lpc32xx_udc_driver = {
},
};
-static int __init udc_init_module(void)
-{
- return platform_driver_probe(&lpc32xx_udc_driver, lpc32xx_udc_probe);
-}
-module_init(udc_init_module);
-
-static void __exit udc_exit_module(void)
-{
- platform_driver_unregister(&lpc32xx_udc_driver);
-}
-module_exit(udc_exit_module);
+module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
MODULE_DESCRIPTION("LPC32XX udc driver");
MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index b6401f1b56ce..c1b8c2dd808d 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1463,42 +1463,16 @@ static struct usb_ep_ops m66592_ep_ops = {
};
/*-------------------------------------------------------------------------*/
-static struct m66592 *the_controller;
-
-static int m66592_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int m66592_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct m66592 *m66592 = the_controller;
- int retval;
-
- if (!driver
- || driver->max_speed < USB_SPEED_HIGH
- || !bind
- || !driver->setup)
- return -EINVAL;
- if (!m66592)
- return -ENODEV;
- if (m66592->driver)
- return -EBUSY;
+ struct m66592 *m66592 = to_m66592(g);
/* hook up the driver */
driver->driver.bus = NULL;
m66592->driver = driver;
m66592->gadget.dev.driver = &driver->driver;
- retval = device_add(&m66592->gadget.dev);
- if (retval) {
- pr_err("device_add error (%d)\n", retval);
- goto error;
- }
-
- retval = bind(&m66592->gadget, driver);
- if (retval) {
- pr_err("bind to driver error (%d)\n", retval);
- device_del(&m66592->gadget.dev);
- goto error;
- }
-
m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
if (m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS) {
m66592_start_xclock(m66592);
@@ -1510,26 +1484,12 @@ static int m66592_start(struct usb_gadget_driver *driver,
}
return 0;
-
-error:
- m66592->driver = NULL;
- m66592->gadget.dev.driver = NULL;
-
- return retval;
}
-static int m66592_stop(struct usb_gadget_driver *driver)
+static int m66592_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct m66592 *m66592 = the_controller;
- unsigned long flags;
-
- if (driver != m66592->driver || !driver->unbind)
- return -EINVAL;
-
- spin_lock_irqsave(&m66592->lock, flags);
- if (m66592->gadget.speed != USB_SPEED_UNKNOWN)
- m66592_usb_disconnect(m66592);
- spin_unlock_irqrestore(&m66592->lock, flags);
+ struct m66592 *m66592 = to_m66592(g);
m66592_bclr(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
@@ -1539,8 +1499,8 @@ static int m66592_stop(struct usb_gadget_driver *driver)
init_controller(m66592);
disable_controller(m66592);
- device_del(&m66592->gadget.dev);
m66592->driver = NULL;
+
return 0;
}
@@ -1566,10 +1526,10 @@ static int m66592_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
-static struct usb_gadget_ops m66592_gadget_ops = {
+static const struct usb_gadget_ops m66592_gadget_ops = {
.get_frame = m66592_get_frame,
- .start = m66592_start,
- .stop = m66592_stop,
+ .udc_start = m66592_udc_start,
+ .udc_stop = m66592_udc_stop,
.pullup = m66592_pullup,
};
@@ -1578,6 +1538,7 @@ static int __exit m66592_remove(struct platform_device *pdev)
struct m66592 *m66592 = dev_get_drvdata(&pdev->dev);
usb_del_gadget_udc(&m66592->gadget);
+ device_del(&m66592->gadget.dev);
del_timer_sync(&m66592->timer);
iounmap(m66592->reg);
@@ -1706,8 +1667,6 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->pipenum2ep[0] = &m66592->ep[0];
m66592->epaddr2ep[0] = &m66592->ep[0];
- the_controller = m66592;
-
m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
if (m66592->ep0_req == NULL)
goto clean_up3;
@@ -1715,6 +1674,12 @@ static int __init m66592_probe(struct platform_device *pdev)
init_controller(m66592);
+ ret = device_add(&m66592->gadget.dev);
+ if (ret) {
+ pr_err("device_add error (%d)\n", ret);
+ goto err_device_add;
+ }
+
ret = usb_add_gadget_udc(&pdev->dev, &m66592->gadget);
if (ret)
goto err_add_udc;
@@ -1723,6 +1688,9 @@ static int __init m66592_probe(struct platform_device *pdev)
return 0;
err_add_udc:
+ device_del(&m66592->gadget.dev);
+
+err_device_add:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
clean_up3:
@@ -1753,14 +1721,4 @@ static struct platform_driver m66592_driver = {
},
};
-static int __init m66592_udc_init(void)
-{
- return platform_driver_probe(&m66592_driver, m66592_probe);
-}
-module_init(m66592_udc_init);
-
-static void __exit m66592_udc_cleanup(void)
-{
- platform_driver_unregister(&m66592_driver);
-}
-module_exit(m66592_udc_cleanup);
+module_platform_driver_probe(m66592_driver, m66592_probe);
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index 16c7e14678b8..96d49d7bfb6b 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -492,6 +492,7 @@ struct m66592 {
int isochronous;
int num_dma;
};
+#define to_m66592(g) (container_of((g), struct m66592, gadget))
#define gadget_to_m66592(_gadget) container_of(_gadget, struct m66592, gadget)
#define m66592_to_gadget(m66592) (&m66592->gadget)
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 88472bf7dbb7..20bbbf917fc2 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include "u_serial.h"
#if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
#endif
@@ -42,9 +43,6 @@ MODULE_LICENSE("GPL");
*/
#include "f_mass_storage.c"
-#include "u_serial.c"
-#include "f_acm.c"
-
#include "f_ecm.c"
#include "f_subset.c"
#ifdef USB_ETH_RNDIS
@@ -137,10 +135,13 @@ static struct fsg_common fsg_common;
static u8 hostaddr[ETH_ALEN];
+static unsigned char tty_line;
+static struct usb_function_instance *fi_acm;
/********** RNDIS **********/
#ifdef USB_ETH_RNDIS
+static struct usb_function *f_acm_rndis;
static __init int rndis_do_config(struct usb_configuration *c)
{
@@ -155,15 +156,25 @@ static __init int rndis_do_config(struct usb_configuration *c)
if (ret < 0)
return ret;
- ret = acm_bind_config(c, 0);
- if (ret < 0)
- return ret;
+ f_acm_rndis = usb_get_function(fi_acm);
+ if (IS_ERR(f_acm_rndis))
+ goto err_func_acm;
+
+ ret = usb_add_function(c, f_acm_rndis);
+ if (ret)
+ goto err_conf;
ret = fsg_bind_config(c->cdev, c, &fsg_common);
if (ret < 0)
- return ret;
+ goto err_fsg;
return 0;
+err_fsg:
+ usb_remove_function(c, f_acm_rndis);
+err_conf:
+ usb_put_function(f_acm_rndis);
+err_func_acm:
+ return ret;
}
static int rndis_config_register(struct usb_composite_dev *cdev)
@@ -192,6 +203,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
/********** CDC ECM **********/
#ifdef CONFIG_USB_G_MULTI_CDC
+static struct usb_function *f_acm_multi;
static __init int cdc_do_config(struct usb_configuration *c)
{
@@ -206,15 +218,26 @@ static __init int cdc_do_config(struct usb_configuration *c)
if (ret < 0)
return ret;
- ret = acm_bind_config(c, 0);
- if (ret < 0)
- return ret;
+ /* implicit port_num is zero */
+ f_acm_multi = usb_get_function(fi_acm);
+ if (IS_ERR(f_acm_multi))
+ goto err_func_acm;
+
+ ret = usb_add_function(c, f_acm_multi);
+ if (ret)
+ goto err_conf;
ret = fsg_bind_config(c->cdev, c, &fsg_common);
if (ret < 0)
- return ret;
+ goto err_fsg;
return 0;
+err_fsg:
+ usb_remove_function(c, f_acm_multi);
+err_conf:
+ usb_put_function(f_acm_multi);
+err_func_acm:
+ return ret;
}
static int cdc_config_register(struct usb_composite_dev *cdev)
@@ -243,10 +266,10 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
/****************************** Gadget Bind ******************************/
-
static int __ref multi_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
+ struct f_serial_opts *opts;
int status;
if (!can_support_ecm(cdev->gadget)) {
@@ -261,10 +284,19 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
return status;
/* set up serial link layer */
- status = gserial_setup(cdev->gadget, 1);
+ status = gserial_alloc_line(&tty_line);
if (status < 0)
goto fail0;
+ fi_acm = usb_get_function_instance("acm");
+ if (IS_ERR(fi_acm)) {
+ status = PTR_ERR(fi_acm);
+ goto fail0dot5;
+ }
+
+ opts = container_of(fi_acm, struct f_serial_opts, func_inst);
+ opts->port_num = tty_line;
+
/* set up mass storage function */
{
void *retp;
@@ -301,7 +333,9 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
fail2:
fsg_common_put(&fsg_common);
fail1:
- gserial_cleanup();
+ usb_put_function_instance(fi_acm);
+fail0dot5:
+ gserial_free_line(tty_line);
fail0:
gether_cleanup();
return status;
@@ -309,7 +343,14 @@ fail0:
static int __exit multi_unbind(struct usb_composite_dev *cdev)
{
- gserial_cleanup();
+#ifdef CONFIG_USB_G_MULTI_CDC
+ usb_put_function(f_acm_multi);
+#endif
+#ifdef USB_ETH_RNDIS
+ usb_put_function(f_acm_rndis);
+#endif
+ usb_put_function_instance(fi_acm);
+ gserial_free_line(tty_line);
gether_cleanup();
return 0;
}
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 379aac7b82fc..c8cf959057fe 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -61,9 +61,6 @@ static DECLARE_COMPLETION(release_done);
static const char driver_name[] = "mv_udc";
static const char driver_desc[] = DRIVER_DESC;
-/* controller device global variable */
-static struct mv_udc *the_controller;
-
static void nuke(struct mv_ep *ep, int status);
static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
@@ -1012,7 +1009,7 @@ static void udc_clock_enable(struct mv_udc *udc)
unsigned int i;
for (i = 0; i < udc->clknum; i++)
- clk_enable(udc->clk[i]);
+ clk_prepare_enable(udc->clk[i]);
}
static void udc_clock_disable(struct mv_udc *udc)
@@ -1020,7 +1017,7 @@ static void udc_clock_disable(struct mv_udc *udc)
unsigned int i;
for (i = 0; i < udc->clknum; i++)
- clk_disable(udc->clk[i]);
+ clk_disable_unprepare(udc->clk[i]);
}
static void udc_stop(struct mv_udc *udc)
@@ -1268,9 +1265,8 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
return retval;
}
-static int mv_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int mv_udc_stop(struct usb_gadget_driver *driver);
+static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
+static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *);
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops mv_ops = {
@@ -1285,8 +1281,8 @@ static const struct usb_gadget_ops mv_ops = {
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = mv_udc_pullup,
- .start = mv_udc_start,
- .stop = mv_udc_stop,
+ .udc_start = mv_udc_start,
+ .udc_stop = mv_udc_stop,
};
static int eps_init(struct mv_udc *udc)
@@ -1373,15 +1369,14 @@ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
}
}
-static int mv_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int mv_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
int retval = 0;
unsigned long flags;
- if (!udc)
- return -ENODEV;
+ udc = container_of(gadget, struct mv_udc, gadget);
if (udc->driver)
return -EBUSY;
@@ -1399,26 +1394,14 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
spin_unlock_irqrestore(&udc->lock, flags);
- retval = bind(&udc->gadget, driver);
- if (retval) {
- dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
- driver->driver.name, retval);
- udc->driver = NULL;
- udc->gadget.dev.driver = NULL;
- return retval;
- }
-
- if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ if (udc->transceiver) {
retval = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (retval) {
dev_err(&udc->dev->dev,
"unable to register peripheral to otg\n");
- if (driver->unbind) {
- driver->unbind(&udc->gadget);
- udc->gadget.dev.driver = NULL;
- udc->driver = NULL;
- }
+ udc->driver = NULL;
+ udc->gadget.dev.driver = NULL;
return retval;
}
}
@@ -1433,13 +1416,13 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
return 0;
}
-static int mv_udc_stop(struct usb_gadget_driver *driver)
+static int mv_udc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
unsigned long flags;
- if (!udc)
- return -ENODEV;
+ udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
@@ -1454,7 +1437,6 @@ static int mv_udc_stop(struct usb_gadget_driver *driver)
spin_unlock_irqrestore(&udc->lock, flags);
/* unbind gadget driver */
- driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
@@ -1472,10 +1454,13 @@ static void mv_set_ptc(struct mv_udc *udc, u32 mode)
static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
{
- struct mv_udc *udc = the_controller;
+ struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
struct mv_req *req = container_of(_req, struct mv_req, req);
+ struct mv_udc *udc;
unsigned long flags;
+ udc = mvep->udc;
+
dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
spin_lock_irqsave(&udc->lock, flags);
@@ -2123,15 +2108,18 @@ static void mv_udc_vbus_work(struct work_struct *work)
/* release device structure */
static void gadget_release(struct device *_dev)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
+
+ udc = dev_get_drvdata(_dev);
complete(udc->done);
}
-static int mv_udc_remove(struct platform_device *dev)
+static int mv_udc_remove(struct platform_device *pdev)
{
- struct mv_udc *udc = the_controller;
- int clk_i;
+ struct mv_udc *udc;
+
+ udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
@@ -2140,57 +2128,27 @@ static int mv_udc_remove(struct platform_device *dev)
destroy_workqueue(udc->qwork);
}
- /*
- * If we have transceiver inited,
- * then vbus irq will not be requested in udc driver.
- */
- if (udc->pdata && udc->pdata->vbus
- && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
- free_irq(udc->pdata->vbus->irq, &dev->dev);
-
/* free memory allocated in probe */
if (udc->dtd_pool)
dma_pool_destroy(udc->dtd_pool);
if (udc->ep_dqh)
- dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
- kfree(udc->eps);
-
- if (udc->irq)
- free_irq(udc->irq, &dev->dev);
-
mv_udc_disable(udc);
- if (udc->cap_regs)
- iounmap(udc->cap_regs);
-
- if (udc->phy_regs)
- iounmap(udc->phy_regs);
-
- if (udc->status_req) {
- kfree(udc->status_req->req.buf);
- kfree(udc->status_req);
- }
-
- for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
- clk_put(udc->clk[clk_i]);
-
device_unregister(&udc->gadget.dev);
/* free dev, wait for the release() finished */
wait_for_completion(udc->done);
- kfree(udc);
-
- the_controller = NULL;
return 0;
}
-static int mv_udc_probe(struct platform_device *dev)
+static int mv_udc_probe(struct platform_device *pdev)
{
- struct mv_usb_platform_data *pdata = dev->dev.platform_data;
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
struct mv_udc *udc;
int retval = 0;
int clk_i = 0;
@@ -2198,71 +2156,73 @@ static int mv_udc_probe(struct platform_device *dev)
size_t size;
if (pdata == NULL) {
- dev_err(&dev->dev, "missing platform_data\n");
+ dev_err(&pdev->dev, "missing platform_data\n");
return -ENODEV;
}
size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
- udc = kzalloc(size, GFP_KERNEL);
+ udc = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (udc == NULL) {
- dev_err(&dev->dev, "failed to allocate memory for udc\n");
+ dev_err(&pdev->dev, "failed to allocate memory for udc\n");
return -ENOMEM;
}
- the_controller = udc;
udc->done = &release_done;
- udc->pdata = dev->dev.platform_data;
+ udc->pdata = pdev->dev.platform_data;
spin_lock_init(&udc->lock);
- udc->dev = dev;
+ udc->dev = pdev;
#ifdef CONFIG_USB_OTG_UTILS
- if (pdata->mode == MV_USB_MODE_OTG)
- udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (pdata->mode == MV_USB_MODE_OTG) {
+ udc->transceiver = devm_usb_get_phy(&pdev->dev,
+ USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(udc->transceiver)) {
+ udc->transceiver = NULL;
+ return -ENODEV;
+ }
+ }
#endif
udc->clknum = pdata->clknum;
for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
- udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
+ udc->clk[clk_i] = devm_clk_get(&pdev->dev,
+ pdata->clkname[clk_i]);
if (IS_ERR(udc->clk[clk_i])) {
retval = PTR_ERR(udc->clk[clk_i]);
- goto err_put_clk;
+ return retval;
}
}
r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
if (r == NULL) {
- dev_err(&dev->dev, "no I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_put_clk;
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ return -ENODEV;
}
udc->cap_regs = (struct mv_cap_regs __iomem *)
- ioremap(r->start, resource_size(r));
+ devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->cap_regs == NULL) {
- dev_err(&dev->dev, "failed to map I/O memory\n");
- retval = -EBUSY;
- goto err_put_clk;
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ return -EBUSY;
}
r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
if (r == NULL) {
- dev_err(&dev->dev, "no phy I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_iounmap_capreg;
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ return -ENODEV;
}
udc->phy_regs = ioremap(r->start, resource_size(r));
if (udc->phy_regs == NULL) {
- dev_err(&dev->dev, "failed to map phy I/O memory\n");
- retval = -EBUSY;
- goto err_iounmap_capreg;
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ return -EBUSY;
}
/* we will acces controller register, so enable the clk */
retval = mv_udc_enable_internal(udc);
if (retval)
- goto err_iounmap_phyreg;
+ return retval;
udc->op_regs =
(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
@@ -2279,11 +2239,11 @@ static int mv_udc_probe(struct platform_device *dev)
size = udc->max_eps * sizeof(struct mv_dqh) *2;
size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
- udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
+ udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_dqh_dma, GFP_KERNEL);
if (udc->ep_dqh == NULL) {
- dev_err(&dev->dev, "allocate dQH memory failed\n");
+ dev_err(&pdev->dev, "allocate dQH memory failed\n");
retval = -ENOMEM;
goto err_disable_clock;
}
@@ -2291,7 +2251,7 @@ static int mv_udc_probe(struct platform_device *dev)
/* create dTD dma_pool resource */
udc->dtd_pool = dma_pool_create("mv_dtd",
- &dev->dev,
+ &pdev->dev,
sizeof(struct mv_dtd),
DTD_ALIGNMENT,
DMA_BOUNDARY);
@@ -2302,19 +2262,20 @@ static int mv_udc_probe(struct platform_device *dev)
}
size = udc->max_eps * sizeof(struct mv_ep) *2;
- udc->eps = kzalloc(size, GFP_KERNEL);
+ udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (udc->eps == NULL) {
- dev_err(&dev->dev, "allocate ep memory failed\n");
+ dev_err(&pdev->dev, "allocate ep memory failed\n");
retval = -ENOMEM;
goto err_destroy_dma;
}
/* initialize ep0 status request structure */
- udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
+ udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
+ GFP_KERNEL);
if (!udc->status_req) {
- dev_err(&dev->dev, "allocate status_req memory failed\n");
+ dev_err(&pdev->dev, "allocate status_req memory failed\n");
retval = -ENOMEM;
- goto err_free_eps;
+ goto err_destroy_dma;
}
INIT_LIST_HEAD(&udc->status_req->queue);
@@ -2329,17 +2290,17 @@ static int mv_udc_probe(struct platform_device *dev)
r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
if (r == NULL) {
- dev_err(&dev->dev, "no IRQ resource defined\n");
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
- goto err_free_status_req;
+ goto err_destroy_dma;
}
udc->irq = r->start;
- if (request_irq(udc->irq, mv_udc_irq,
+ if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
IRQF_SHARED, driver_name, udc)) {
- dev_err(&dev->dev, "Request irq %d for UDC failed\n",
+ dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
udc->irq);
retval = -ENODEV;
- goto err_free_status_req;
+ goto err_destroy_dma;
}
/* initialize gadget structure */
@@ -2351,26 +2312,27 @@ static int mv_udc_probe(struct platform_device *dev)
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&udc->gadget.dev, "gadget");
- udc->gadget.dev.parent = &dev->dev;
- udc->gadget.dev.dma_mask = dev->dev.dma_mask;
+ udc->gadget.dev.parent = &pdev->dev;
+ udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
udc->gadget.dev.release = gadget_release;
udc->gadget.name = driver_name; /* gadget name */
retval = device_register(&udc->gadget.dev);
if (retval)
- goto err_free_irq;
+ goto err_destroy_dma;
eps_init(udc);
/* VBUS detect: we can disable/enable clock on demand.*/
- if (!IS_ERR_OR_NULL(udc->transceiver))
+ if (udc->transceiver)
udc->clock_gating = 1;
else if (pdata->vbus) {
udc->clock_gating = 1;
- retval = request_threaded_irq(pdata->vbus->irq, NULL,
+ retval = devm_request_threaded_irq(&pdev->dev,
+ pdata->vbus->irq, NULL,
mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
if (retval) {
- dev_info(&dev->dev,
+ dev_info(&pdev->dev,
"Can not request irq for VBUS, "
"disable clock gating\n");
udc->clock_gating = 0;
@@ -2378,7 +2340,7 @@ static int mv_udc_probe(struct platform_device *dev)
udc->qwork = create_singlethread_workqueue("mv_udc_queue");
if (!udc->qwork) {
- dev_err(&dev->dev, "cannot create workqueue\n");
+ dev_err(&pdev->dev, "cannot create workqueue\n");
retval = -ENOMEM;
goto err_unregister;
}
@@ -2396,53 +2358,40 @@ static int mv_udc_probe(struct platform_device *dev)
else
udc->vbus_active = 1;
- retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
+ retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
- goto err_unregister;
+ goto err_create_workqueue;
- dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
+ platform_set_drvdata(pdev, udc);
+ dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
udc->clock_gating ? "with" : "without");
return 0;
+err_create_workqueue:
+ destroy_workqueue(udc->qwork);
err_unregister:
- if (udc->pdata && udc->pdata->vbus
- && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
- free_irq(pdata->vbus->irq, &dev->dev);
device_unregister(&udc->gadget.dev);
-err_free_irq:
- free_irq(udc->irq, &dev->dev);
-err_free_status_req:
- kfree(udc->status_req->req.buf);
- kfree(udc->status_req);
-err_free_eps:
- kfree(udc->eps);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
- dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
err_disable_clock:
mv_udc_disable_internal(udc);
-err_iounmap_phyreg:
- iounmap(udc->phy_regs);
-err_iounmap_capreg:
- iounmap(udc->cap_regs);
-err_put_clk:
- for (clk_i--; clk_i >= 0; clk_i--)
- clk_put(udc->clk[clk_i]);
- the_controller = NULL;
- kfree(udc);
+
return retval;
}
#ifdef CONFIG_PM
-static int mv_udc_suspend(struct device *_dev)
+static int mv_udc_suspend(struct device *dev)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
+
+ udc = dev_get_drvdata(dev);
/* if OTG is enabled, the following will be done in OTG driver*/
- if (!IS_ERR_OR_NULL(udc->transceiver))
+ if (udc->transceiver)
return 0;
if (udc->pdata->vbus && udc->pdata->vbus->poll)
@@ -2469,13 +2418,15 @@ static int mv_udc_suspend(struct device *_dev)
return 0;
}
-static int mv_udc_resume(struct device *_dev)
+static int mv_udc_resume(struct device *dev)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
int retval;
+ udc = dev_get_drvdata(dev);
+
/* if OTG is enabled, the following will be done in OTG driver*/
- if (!IS_ERR_OR_NULL(udc->transceiver))
+ if (udc->transceiver)
return 0;
if (!udc->clock_gating) {
@@ -2499,11 +2450,12 @@ static const struct dev_pm_ops mv_udc_pm_ops = {
};
#endif
-static void mv_udc_shutdown(struct platform_device *dev)
+static void mv_udc_shutdown(struct platform_device *pdev)
{
- struct mv_udc *udc = the_controller;
+ struct mv_udc *udc;
u32 mode;
+ udc = platform_get_drvdata(pdev);
/* reset controller mode to IDLE */
mv_udc_enable(udc);
mode = readl(&udc->op_regs->usbmode);
@@ -2514,7 +2466,7 @@ static void mv_udc_shutdown(struct platform_device *dev)
static struct platform_driver udc_driver = {
.probe = mv_udc_probe,
- .remove = __exit_p(mv_udc_remove),
+ .remove = mv_udc_remove,
.shutdown = mv_udc_shutdown,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 708c0b55dcc8..a1b650e11339 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -116,6 +116,10 @@ static bool enable_suspend = 0;
/* "modprobe net2280 enable_suspend=1" etc */
module_param (enable_suspend, bool, S_IRUGO);
+/* force full-speed operation */
+static bool full_speed;
+module_param(full_speed, bool, 0444);
+MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
@@ -1899,6 +1903,10 @@ static int net2280_start(struct usb_gadget *_gadget,
retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
if (retval) goto err_func;
+ /* Enable force-full-speed testing mode, if desired */
+ if (full_speed)
+ writel(1 << FORCE_FULL_SPEED_MODE, &dev->usb->xcvrdiag);
+
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
@@ -1957,6 +1965,10 @@ static int net2280_stop(struct usb_gadget *_gadget,
dev->driver = NULL;
net2280_led_active (dev, 0);
+
+ /* Disable full-speed test mode */
+ writel(0, &dev->usb->xcvrdiag);
+
device_remove_file (&dev->pdev->dev, &dev_attr_function);
device_remove_file (&dev->pdev->dev, &dev_attr_queues);
@@ -2841,6 +2853,9 @@ static void net2280_shutdown (struct pci_dev *pdev)
/* disable the pullup so the host will think we're gone */
writel (0, &dev->usb->usbctl);
+
+ /* Disable full-speed test mode */
+ writel(0, &dev->usb->xcvrdiag);
}
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
index 661600abace8..def37403989a 100644
--- a/drivers/usb/gadget/nokia.c
+++ b/drivers/usb/gadget/nokia.c
@@ -37,7 +37,7 @@
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
-#include "u_serial.c"
+#define USB_FACM_INCLUDED
#include "f_acm.c"
#include "f_ecm.c"
#include "f_obex.c"
@@ -101,6 +101,15 @@ MODULE_LICENSE("GPL");
static u8 hostaddr[ETH_ALEN];
+enum {
+ TTY_PORT_OBEX0,
+ TTY_PORT_OBEX1,
+ TTY_PORT_ACM,
+ TTY_PORTS_MAX,
+};
+
+static unsigned char tty_lines[TTY_PORTS_MAX];
+
static int __init nokia_bind_config(struct usb_configuration *c)
{
int status = 0;
@@ -109,15 +118,15 @@ static int __init nokia_bind_config(struct usb_configuration *c)
if (status)
printk(KERN_DEBUG "could not bind phonet config\n");
- status = obex_bind_config(c, 0);
+ status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX0]);
if (status)
printk(KERN_DEBUG "could not bind obex config %d\n", 0);
- status = obex_bind_config(c, 1);
+ status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX1]);
if (status)
printk(KERN_DEBUG "could not bind obex config %d\n", 0);
- status = acm_bind_config(c, 2);
+ status = acm_bind_config(c, tty_lines[TTY_PORT_ACM]);
if (status)
printk(KERN_DEBUG "could not bind acm config\n");
@@ -133,7 +142,7 @@ static struct usb_configuration nokia_config_500ma_driver = {
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE,
- .bMaxPower = 250, /* 500mA */
+ .MaxPower = 500,
};
static struct usb_configuration nokia_config_100ma_driver = {
@@ -141,21 +150,24 @@ static struct usb_configuration nokia_config_100ma_driver = {
.bConfigurationValue = 2,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
- .bMaxPower = 50, /* 100 mA */
+ .MaxPower = 100,
};
static int __init nokia_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
int status;
+ int cur_line;
status = gphonet_setup(cdev->gadget);
if (status < 0)
goto err_phonet;
- status = gserial_setup(cdev->gadget, 3);
- if (status < 0)
- goto err_serial;
+ for (cur_line = 0; cur_line < TTY_PORTS_MAX; cur_line++) {
+ status = gserial_alloc_line(&tty_lines[cur_line]);
+ if (status)
+ goto err_ether;
+ }
status = gether_setup(cdev->gadget, hostaddr);
if (status < 0)
@@ -192,8 +204,10 @@ static int __init nokia_bind(struct usb_composite_dev *cdev)
err_usb:
gether_cleanup();
err_ether:
- gserial_cleanup();
-err_serial:
+ cur_line--;
+ while (cur_line >= 0)
+ gserial_free_line(tty_lines[cur_line--]);
+
gphonet_cleanup();
err_phonet:
return status;
@@ -201,8 +215,13 @@ err_phonet:
static int __exit nokia_unbind(struct usb_composite_dev *cdev)
{
+ int i;
+
gphonet_cleanup();
- gserial_cleanup();
+
+ for (i = 0; i < TTY_PORTS_MAX; i++)
+ gserial_free_line(tty_lines[i]);
+
gether_cleanup();
return 0;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 8bfe990caf1a..06be85c2b233 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -1309,19 +1309,20 @@ static int omap_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
-static int omap_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int omap_udc_stop(struct usb_gadget_driver *driver);
+static int omap_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+static int omap_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
-static struct usb_gadget_ops omap_gadget_ops = {
+static const struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
.set_selfpowered = omap_set_selfpowered,
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
- .start = omap_udc_start,
- .stop = omap_udc_stop,
+ .udc_start = omap_udc_start,
+ .udc_stop = omap_udc_stop,
};
/*-------------------------------------------------------------------------*/
@@ -2041,28 +2042,15 @@ static inline int machine_without_vbus_sense(void)
|| cpu_is_omap7xx();
}
-static int omap_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int omap_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
int status = -ENODEV;
struct omap_ep *ep;
unsigned long flags;
- /* basic sanity tests */
- if (!udc)
- return -ENODEV;
- if (!driver
- /* FIXME if otg, check: driver->is_otg */
- || driver->max_speed < USB_SPEED_FULL
- || !bind || !driver->setup)
- return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
- if (udc->driver) {
- spin_unlock_irqrestore(&udc->lock, flags);
- return -EBUSY;
- }
-
/* reset state */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
ep->irqs = 0;
@@ -2084,15 +2072,6 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
- status = bind(&udc->gadget, driver);
- if (status) {
- DBG("bind to %s --> %d\n", driver->driver.name, status);
- udc->gadget.dev.driver = NULL;
- udc->driver = NULL;
- goto done;
- }
- DBG("bound to driver %s\n", driver->driver.name);
-
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
/* connect to bus through transceiver */
@@ -2124,19 +2103,16 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
done:
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
+
return status;
}
-static int omap_udc_stop(struct usb_gadget_driver *driver)
+static int omap_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
unsigned long flags;
int status = -ENODEV;
- if (!udc)
- return -ENODEV;
- if (!driver || driver != udc->driver || !driver->unbind)
- return -EINVAL;
-
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
@@ -2152,13 +2128,12 @@ static int omap_udc_stop(struct usb_gadget_driver *driver)
udc_quiesce(udc);
spin_unlock_irqrestore(&udc->lock, flags);
- driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
- DBG("unregistered driver '%s'\n", driver->driver.name);
+
return status;
}
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 6490c0040e3a..a787a8ef672b 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -375,6 +375,7 @@ struct pch_udc_dev {
struct pch_udc_cfg_data cfg_data;
struct pch_vbus_gpio_data vbus_gpio;
};
+#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
#define PCH_UDC_PCI_BAR 1
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
@@ -384,7 +385,6 @@ struct pch_udc_dev {
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
-struct pch_udc_dev *pch_udc; /* pointer to device object */
static bool speed_fs;
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
@@ -1235,9 +1235,10 @@ static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
return -EOPNOTSUPP;
}
-static int pch_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int pch_udc_stop(struct usb_gadget_driver *driver);
+static int pch_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pch_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops pch_udc_ops = {
.get_frame = pch_udc_pcd_get_frame,
.wakeup = pch_udc_pcd_wakeup,
@@ -1245,8 +1246,8 @@ static const struct usb_gadget_ops pch_udc_ops = {
.pullup = pch_udc_pcd_pullup,
.vbus_session = pch_udc_pcd_vbus_session,
.vbus_draw = pch_udc_pcd_vbus_draw,
- .start = pch_udc_start,
- .stop = pch_udc_stop,
+ .udc_start = pch_udc_start,
+ .udc_stop = pch_udc_stop,
};
/**
@@ -2981,40 +2982,15 @@ static int init_dma_pools(struct pch_udc_dev *dev)
return 0;
}
-static int pch_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int pch_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct pch_udc_dev *dev = pch_udc;
- int retval;
-
- if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
- !driver->setup || !driver->unbind || !driver->disconnect) {
- dev_err(&dev->pdev->dev,
- "%s: invalid driver parameter\n", __func__);
- return -EINVAL;
- }
+ struct pch_udc_dev *dev = to_pch_udc(g);
- if (!dev)
- return -ENODEV;
-
- if (dev->driver) {
- dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
- return -EBUSY;
- }
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
- /* Invoke the bind routine of the gadget driver */
- retval = bind(&dev->gadget, driver);
-
- if (retval) {
- dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
- __func__, driver->driver.name, retval);
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
/* get ready for ep0 traffic */
pch_udc_setup_ep0(dev);
@@ -3026,30 +3002,21 @@ static int pch_udc_start(struct usb_gadget_driver *driver,
return 0;
}
-static int pch_udc_stop(struct usb_gadget_driver *driver)
+static int pch_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct pch_udc_dev *dev = pch_udc;
-
- if (!dev)
- return -ENODEV;
-
- if (!driver || (driver != dev->driver)) {
- dev_err(&dev->pdev->dev,
- "%s: invalid driver parameter\n", __func__);
- return -EINVAL;
- }
+ struct pch_udc_dev *dev = to_pch_udc(g);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
/* Assures that there are no pending requests with this driver */
- driver->disconnect(&dev->gadget);
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
dev->connected = 0;
/* set SD */
pch_udc_set_disconnect(dev);
+
return 0;
}
@@ -3164,11 +3131,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
int retval;
struct pch_udc_dev *dev;
- /* one udc only */
- if (pch_udc) {
- pr_err("%s: already probed\n", __func__);
- return -EBUSY;
- }
/* init */
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (!dev) {
@@ -3207,7 +3169,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
retval = -ENODEV;
goto finished;
}
- pch_udc = dev;
/* initialize the hardware */
if (pch_udc_pcd_init(dev)) {
retval = -ENODEV;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d4ca9f1f7f24..2bbcdce942dc 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -996,9 +996,10 @@ static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -EOPNOTSUPP;
}
-static int pxa25x_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int pxa25x_stop(struct usb_gadget_driver *driver);
+static int pxa25x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pxa25x_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
@@ -1006,8 +1007,8 @@ static const struct usb_gadget_ops pxa25x_udc_ops = {
.vbus_session = pxa25x_udc_vbus_session,
.pullup = pxa25x_udc_pullup,
.vbus_draw = pxa25x_udc_vbus_draw,
- .start = pxa25x_start,
- .stop = pxa25x_stop,
+ .udc_start = pxa25x_udc_start,
+ .udc_stop = pxa25x_udc_stop,
};
/*-------------------------------------------------------------------------*/
@@ -1254,23 +1255,12 @@ static void udc_enable (struct pxa25x_udc *dev)
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-static int pxa25x_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int pxa25x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct pxa25x_udc *dev = the_controller;
+ struct pxa25x_udc *dev = to_pxa25x(g);
int retval;
- if (!driver
- || driver->max_speed < USB_SPEED_FULL
- || !bind
- || !driver->disconnect
- || !driver->setup)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- if (dev->driver)
- return -EBUSY;
-
/* first hook up the driver ... */
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
@@ -1278,34 +1268,20 @@ static int pxa25x_start(struct usb_gadget_driver *driver,
retval = device_add (&dev->gadget.dev);
if (retval) {
-fail:
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
- retval = bind(&dev->gadget, driver);
- if (retval) {
- DMSG("bind to driver %s --> error %d\n",
- driver->driver.name, retval);
- device_del (&dev->gadget.dev);
- goto fail;
- }
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
- DMSG("registered gadget driver '%s'\n", driver->driver.name);
-
/* connect to bus through transceiver */
if (!IS_ERR_OR_NULL(dev->transceiver)) {
retval = otg_set_peripheral(dev->transceiver->otg,
&dev->gadget);
- if (retval) {
- DMSG("can't bind to transceiver\n");
- if (driver->unbind)
- driver->unbind(&dev->gadget);
+ if (retval)
goto bind_fail;
- }
}
pullup(dev);
@@ -1334,22 +1310,14 @@ stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
}
del_timer_sync(&dev->timer);
- /* report disconnect; the driver is already quiesced */
- if (driver)
- driver->disconnect(&dev->gadget);
-
/* re-init driver-visible data structures */
udc_reinit(dev);
}
-static int pxa25x_stop(struct usb_gadget_driver *driver)
+static int pxa25x_udc_stop(struct usb_gadget*g,
+ struct usb_gadget_driver *driver)
{
- struct pxa25x_udc *dev = the_controller;
-
- if (!dev)
- return -ENODEV;
- if (!driver || driver != dev->driver || !driver->unbind)
- return -EINVAL;
+ struct pxa25x_udc *dev = to_pxa25x(g);
local_irq_disable();
dev->pullup = 0;
@@ -1360,14 +1328,12 @@ static int pxa25x_stop(struct usb_gadget_driver *driver)
if (!IS_ERR_OR_NULL(dev->transceiver))
(void) otg_set_peripheral(dev->transceiver->otg, NULL);
- driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
device_del (&dev->gadget.dev);
-
- DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
dump_state(dev);
+
return 0;
}
@@ -2100,6 +2066,8 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
int retval, irq;
u32 chiprev;
+ pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
+
/* insist on Intel/ARM/XScale */
asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
@@ -2346,18 +2314,7 @@ static struct platform_driver udc_driver = {
},
};
-static int __init udc_init(void)
-{
- pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
- return platform_driver_probe(&udc_driver, pxa25x_udc_probe);
-}
-module_init(udc_init);
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(udc_exit);
+module_platform_driver_probe(udc_driver, pxa25x_udc_probe);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index 2eca1e71fecd..3fe5931dc21a 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -126,6 +126,7 @@ struct pxa25x_udc {
struct dentry *debugfs_udc;
#endif
};
+#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 2b3b01d5f403..f7d25795821a 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1671,9 +1671,10 @@ static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -EOPNOTSUPP;
}
-static int pxa27x_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int pxa27x_udc_stop(struct usb_gadget_driver *driver);
+static int pxa27x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pxa27x_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops pxa_udc_ops = {
.get_frame = pxa_udc_get_frame,
@@ -1681,8 +1682,8 @@ static const struct usb_gadget_ops pxa_udc_ops = {
.pullup = pxa_udc_pullup,
.vbus_session = pxa_udc_vbus_session,
.vbus_draw = pxa_udc_vbus_draw,
- .start = pxa27x_udc_start,
- .stop = pxa27x_udc_stop,
+ .udc_start = pxa27x_udc_start,
+ .udc_stop = pxa27x_udc_stop,
};
/**
@@ -1802,20 +1803,12 @@ static void udc_enable(struct pxa_udc *udc)
*
* Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
*/
-static int pxa27x_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int pxa27x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct pxa_udc *udc = the_controller;
+ struct pxa_udc *udc = to_pxa(g);
int retval;
- if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
- || !driver->disconnect || !driver->setup)
- return -EINVAL;
- if (!udc)
- return -ENODEV;
- if (udc->driver)
- return -EBUSY;
-
/* first hook up the driver ... */
udc->driver = driver;
udc->gadget.dev.driver = &driver->driver;
@@ -1824,23 +1817,14 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
retval = device_add(&udc->gadget.dev);
if (retval) {
dev_err(udc->dev, "device_add error %d\n", retval);
- goto add_fail;
+ goto fail;
}
- retval = bind(&udc->gadget, driver);
- if (retval) {
- dev_err(udc->dev, "bind to driver %s --> error %d\n",
- driver->driver.name, retval);
- goto bind_fail;
- }
- dev_dbg(udc->dev, "registered gadget driver '%s'\n",
- driver->driver.name);
-
if (!IS_ERR_OR_NULL(udc->transceiver)) {
retval = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (retval) {
dev_err(udc->dev, "can't bind to transceiver\n");
- goto transceiver_fail;
+ goto fail;
}
}
@@ -1848,12 +1832,7 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
udc_enable(udc);
return 0;
-transceiver_fail:
- if (driver->unbind)
- driver->unbind(&udc->gadget);
-bind_fail:
- device_del(&udc->gadget.dev);
-add_fail:
+fail:
udc->driver = NULL;
udc->gadget.dev.driver = NULL;
return retval;
@@ -1878,9 +1857,6 @@ static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
for (i = 0; i < NR_USB_ENDPOINTS; i++)
pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
-
- if (driver)
- driver->disconnect(&udc->gadget);
}
/**
@@ -1889,25 +1865,18 @@ static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
*
* Returns 0 if no error, -ENODEV, -EINVAL otherwise
*/
-static int pxa27x_udc_stop(struct usb_gadget_driver *driver)
+static int pxa27x_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct pxa_udc *udc = the_controller;
-
- if (!udc)
- return -ENODEV;
- if (!driver || driver != udc->driver || !driver->unbind)
- return -EINVAL;
+ struct pxa_udc *udc = to_pxa(g);
stop_activity(udc, driver);
udc_disable(udc);
dplus_pullup(udc, 0);
- driver->unbind(&udc->gadget);
udc->driver = NULL;
device_del(&udc->gadget.dev);
- dev_info(udc->dev, "unregistered gadget driver '%s'\n",
- driver->driver.name);
if (!IS_ERR_OR_NULL(udc->transceiver))
return otg_set_peripheral(udc->transceiver->otg, NULL);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index 79d81a4b2344..28f2b53530f5 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -473,6 +473,7 @@ struct pxa_udc {
struct dentry *debugfs_eps;
#endif
};
+#define to_pxa(g) (container_of((g), struct pxa_udc, gadget))
static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
{
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 5a80751accb7..f46a1b77ce3e 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1812,7 +1812,7 @@ static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
return 0;
}
-static struct usb_gadget_ops r8a66597_gadget_ops = {
+static const struct usb_gadget_ops r8a66597_gadget_ops = {
.get_frame = r8a66597_get_frame,
.udc_start = r8a66597_start,
.udc_stop = r8a66597_stop,
@@ -2031,21 +2031,10 @@ static struct platform_driver r8a66597_driver = {
.name = (char *) udc_name,
},
};
-MODULE_ALIAS("platform:r8a66597_udc");
-
-static int __init r8a66597_udc_init(void)
-{
- return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
-}
-module_init(r8a66597_udc_init);
-static void __exit r8a66597_udc_cleanup(void)
-{
- platform_driver_unregister(&r8a66597_driver);
-}
-module_exit(r8a66597_udc_cleanup);
+module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
MODULE_DESCRIPTION("R8A66597 USB gadget driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
-
+MODULE_ALIAS("platform:r8a66597_udc");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 141971d9051e..c26564f29a2c 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -32,6 +32,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
#include <mach/map.h>
@@ -133,7 +134,9 @@ struct s3c_hsotg_ep {
* struct s3c_hsotg - driver state.
* @dev: The parent device supplied to the probe function
* @driver: USB gadget driver
- * @plat: The platform specific configuration data.
+ * @phy: The otg phy transceiver structure for phy control.
+ * @plat: The platform specific configuration data. This can be removed once
+ * all SoCs support usb transceiver.
* @regs: The memory area mapped for accessing registers.
* @irq: The IRQ number we are using
* @supplies: Definition of USB power supplies
@@ -153,6 +156,7 @@ struct s3c_hsotg_ep {
struct s3c_hsotg {
struct device *dev;
struct usb_gadget_driver *driver;
+ struct usb_phy *phy;
struct s3c_hsotg_plat *plat;
spinlock_t lock;
@@ -2854,7 +2858,10 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
struct platform_device *pdev = to_platform_device(hsotg->dev);
dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
- if (hsotg->plat->phy_init)
+
+ if (hsotg->phy)
+ usb_phy_init(hsotg->phy);
+ else if (hsotg->plat->phy_init)
hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
}
@@ -2869,7 +2876,9 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
- if (hsotg->plat->phy_exit)
+ if (hsotg->phy)
+ usb_phy_shutdown(hsotg->phy);
+ else if (hsotg->plat->phy_exit)
hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
}
@@ -3055,7 +3064,7 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
-static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
.get_frame = s3c_hsotg_gadget_getframe,
.udc_start = s3c_hsotg_udc_start,
.udc_stop = s3c_hsotg_udc_stop,
@@ -3477,12 +3486,11 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
/**
* s3c_hsotg_release - release callback for hsotg device
* @dev: Device to for which release is called
+ *
+ * Nothing to do as the resource is allocated using devm_ API.
*/
static void s3c_hsotg_release(struct device *dev)
{
- struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
-
- kfree(hsotg);
}
/**
@@ -3493,6 +3501,7 @@ static void s3c_hsotg_release(struct device *dev)
static int s3c_hsotg_probe(struct platform_device *pdev)
{
struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+ struct usb_phy *phy;
struct device *dev = &pdev->dev;
struct s3c_hsotg_ep *eps;
struct s3c_hsotg *hsotg;
@@ -3501,20 +3510,27 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
int ret;
int i;
- plat = pdev->dev.platform_data;
- if (!plat) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL);
if (!hsotg) {
dev_err(dev, "cannot get memory\n");
return -ENOMEM;
}
+ phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(phy)) {
+ /* Fallback for pdata */
+ plat = pdev->dev.platform_data;
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
+ } else {
+ hsotg->plat = plat;
+ }
+ } else {
+ hsotg->phy = phy;
+ }
+
hsotg->dev = dev;
- hsotg->plat = plat;
hsotg->clk = devm_clk_get(&pdev->dev, "otg");
if (IS_ERR(hsotg->clk)) {
@@ -3526,10 +3542,9 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsotg->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hsotg->regs) {
- dev_err(dev, "cannot map registers\n");
- ret = -ENXIO;
+ hsotg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsotg->regs)) {
+ ret = PTR_ERR(hsotg->regs);
goto err_clk;
}
@@ -3573,7 +3588,7 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
- ret = regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
if (ret) {
dev_err(dev, "failed to request supplies: %d\n", ret);
@@ -3663,8 +3678,6 @@ err_ep_mem:
kfree(eps);
err_supplies:
s3c_hsotg_phy_disable(hsotg);
- regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
-
err_clk:
clk_disable_unprepare(hsotg->clk);
@@ -3689,7 +3702,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
}
s3c_hsotg_phy_disable(hsotg);
- regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
clk_disable_unprepare(hsotg->clk);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 52379b11f080..458965a1b138 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -435,7 +435,7 @@ static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
struct s3c_hsudc_req *hsreq;
u32 csr;
- csr = readl((u32)hsudc->regs + S3C_ESR);
+ csr = readl(hsudc->regs + S3C_ESR);
if (csr & S3C_ESR_STALL) {
writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
return;
@@ -468,7 +468,7 @@ static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
struct s3c_hsudc_req *hsreq;
u32 csr;
- csr = readl((u32)hsudc->regs + S3C_ESR);
+ csr = readl(hsudc->regs + S3C_ESR);
if (csr & S3C_ESR_STALL) {
writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
return;
@@ -901,12 +901,12 @@ static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req,
if (list_empty(&hsep->queue) && !hsep->stopped) {
offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
if (ep_is_in(hsep)) {
- csr = readl((u32)hsudc->regs + offset);
+ csr = readl(hsudc->regs + offset);
if (!(csr & S3C_ESR_TX_SUCCESS) &&
(s3c_hsudc_write_fifo(hsep, hsreq) == 1))
hsreq = NULL;
} else {
- csr = readl((u32)hsudc->regs + offset);
+ csr = readl(hsudc->regs + offset);
if ((csr & S3C_ESR_RX_SUCCESS)
&& (s3c_hsudc_read_fifo(hsep, hsreq) == 1))
hsreq = NULL;
@@ -1254,7 +1254,7 @@ static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
return -EOPNOTSUPP;
}
-static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
+static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
.udc_start = s3c_hsudc_start,
.udc_stop = s3c_hsudc_stop,
@@ -1286,7 +1286,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
- ret = regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
hsudc->supplies);
if (ret != 0) {
dev_err(dev, "failed to request supplies: %d\n", ret);
@@ -1295,10 +1295,9 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsudc->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hsudc->regs) {
- dev_err(dev, "error mapping device register area\n");
- ret = -EBUSY;
+ hsudc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsudc->regs)) {
+ ret = PTR_ERR(hsudc->regs);
goto err_res;
}
@@ -1367,7 +1366,6 @@ err_res:
if (!IS_ERR_OR_NULL(hsudc->transceiver))
usb_put_phy(hsudc->transceiver);
- regulator_bulk_free(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
err_supplies:
return ret;
}
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index a2fa6e16d019..fc07b4381286 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1538,9 +1538,10 @@ static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
return -ENOTSUPP;
}
-static int s3c2410_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
-static int s3c2410_udc_stop(struct usb_gadget_driver *driver);
+static int s3c2410_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int s3c2410_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
static const struct usb_gadget_ops s3c2410_ops = {
.get_frame = s3c2410_udc_get_frame,
@@ -1549,8 +1550,8 @@ static const struct usb_gadget_ops s3c2410_ops = {
.pullup = s3c2410_udc_pullup,
.vbus_session = s3c2410_udc_vbus_session,
.vbus_draw = s3c2410_vbus_draw,
- .start = s3c2410_udc_start,
- .stop = s3c2410_udc_stop,
+ .udc_start = s3c2410_udc_start,
+ .udc_stop = s3c2410_udc_stop,
};
static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
@@ -1664,33 +1665,14 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
}
-static int s3c2410_udc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
+static int s3c2410_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct s3c2410_udc *udc = the_controller;
+ struct s3c2410_udc *udc = to_s3c2410(g)
int retval;
dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
- /* Sanity checks */
- if (!udc)
- return -ENODEV;
-
- if (udc->driver)
- return -EBUSY;
-
- if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
- dev_err(&udc->gadget.dev, "Invalid driver: bind %p setup %p speed %d\n",
- bind, driver->setup, driver->max_speed);
- return -EINVAL;
- }
-#if defined(MODULE)
- if (!driver->unbind) {
- dev_err(&udc->gadget.dev, "Invalid driver: no unbind method\n");
- return -EINVAL;
- }
-#endif
-
/* Hook the driver */
udc->driver = driver;
udc->gadget.dev.driver = &driver->driver;
@@ -1702,15 +1684,6 @@ static int s3c2410_udc_start(struct usb_gadget_driver *driver,
goto register_error;
}
- dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
- driver->driver.name);
-
- retval = bind(&udc->gadget, driver);
- if (retval) {
- device_del(&udc->gadget.dev);
- goto register_error;
- }
-
/* Enable udc */
s3c2410_udc_enable(udc);
@@ -1722,24 +1695,10 @@ register_error:
return retval;
}
-static int s3c2410_udc_stop(struct usb_gadget_driver *driver)
+static int s3c2410_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct s3c2410_udc *udc = the_controller;
-
- if (!udc)
- return -ENODEV;
-
- if (!driver || driver != udc->driver || !driver->unbind)
- return -EINVAL;
-
- dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n",
- driver->driver.name);
-
- /* report disconnect */
- if (driver->disconnect)
- driver->disconnect(&udc->gadget);
-
- driver->unbind(&udc->gadget);
+ struct s3c2410_udc *udc = to_s3c2410(g);
device_del(&udc->gadget.dev);
udc->driver = NULL;
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index 3e80fd5c820f..93bf225f1969 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -95,5 +95,6 @@ struct s3c2410_udc {
u8 vbus;
struct dentry *regs_info;
};
+#define to_s3c2410(g) (container_of((g), struct s3c2410_udc, gadget))
#endif
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 44752f531e85..68d7bb06ebcb 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -36,10 +36,8 @@
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
-#include "f_acm.c"
#include "f_obex.c"
#include "f_serial.c"
-#include "u_serial.c"
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
@@ -128,20 +126,25 @@ module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
/*-------------------------------------------------------------------------*/
+static unsigned char tty_lines[MAX_U_SERIAL_PORTS];
-static int __init serial_bind_config(struct usb_configuration *c)
+static int __init serial_bind_obex_config(struct usb_configuration *c)
{
unsigned i;
int status = 0;
- for (i = 0; i < n_ports && status == 0; i++) {
- if (use_acm)
- status = acm_bind_config(c, i);
- else if (use_obex)
- status = obex_bind_config(c, i);
- else
- status = gser_bind_config(c, i);
- }
+ for (i = 0; i < n_ports && status == 0; i++)
+ status = obex_bind_config(c, tty_lines[i]);
+ return status;
+}
+
+static int __init serial_bind_gser_config(struct usb_configuration *c)
+{
+ unsigned i;
+ int status = 0;
+
+ for (i = 0; i < n_ports && status == 0; i++)
+ status = gser_bind_config(c, tty_lines[i]);
return status;
}
@@ -152,13 +155,70 @@ static struct usb_configuration serial_config_driver = {
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
+static struct usb_function_instance *fi_serial[MAX_U_SERIAL_PORTS];
+static struct usb_function *f_serial[MAX_U_SERIAL_PORTS];
+
+static int serial_register_ports(struct usb_composite_dev *cdev,
+ struct usb_configuration *c, const char *f_name)
+{
+ int i;
+ int ret;
+
+ ret = usb_add_config_only(cdev, c);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < n_ports; i++) {
+ struct f_serial_opts *opts;
+
+ fi_serial[i] = usb_get_function_instance(f_name);
+ if (IS_ERR(fi_serial[i])) {
+ ret = PTR_ERR(fi_serial[i]);
+ goto fail;
+ }
+ opts = container_of(fi_serial[i], struct f_serial_opts, func_inst);
+ opts->port_num = tty_lines[i];
+
+ f_serial[i] = usb_get_function(fi_serial[i]);
+ if (IS_ERR(f_serial[i])) {
+ ret = PTR_ERR(f_serial[i]);
+ goto err_get_func;
+ }
+
+ ret = usb_add_function(c, f_serial[i]);
+ if (ret)
+ goto err_add_func;
+ }
+
+ return 0;
+
+err_add_func:
+ usb_put_function(f_serial[i]);
+err_get_func:
+ usb_put_function_instance(fi_serial[i]);
+
+fail:
+ i--;
+ while (i >= 0) {
+ usb_remove_function(c, f_serial[i]);
+ usb_put_function(f_serial[i]);
+ usb_put_function_instance(fi_serial[i]);
+ i--;
+ }
+out:
+ return ret;
+}
+
static int __init gs_bind(struct usb_composite_dev *cdev)
{
int status;
+ int cur_line;
- status = gserial_setup(cdev->gadget, n_ports);
- if (status < 0)
- return status;
+ for (cur_line = 0; cur_line < n_ports; cur_line++) {
+ status = gserial_alloc_line(&tty_lines[cur_line]);
+ if (status)
+ goto fail;
+ }
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
@@ -178,8 +238,16 @@ static int __init gs_bind(struct usb_composite_dev *cdev)
}
/* register our configuration */
- status = usb_add_config(cdev, &serial_config_driver,
- serial_bind_config);
+ if (use_acm) {
+ status = serial_register_ports(cdev, &serial_config_driver,
+ "acm");
+ usb_ep_autoconfig_reset(cdev->gadget);
+ } else if (use_obex)
+ status = usb_add_config(cdev, &serial_config_driver,
+ serial_bind_obex_config);
+ else
+ status = usb_add_config(cdev, &serial_config_driver,
+ serial_bind_gser_config);
if (status < 0)
goto fail;
@@ -189,16 +257,31 @@ static int __init gs_bind(struct usb_composite_dev *cdev)
return 0;
fail:
- gserial_cleanup();
+ cur_line--;
+ while (cur_line >= 0)
+ gserial_free_line(tty_lines[cur_line--]);
return status;
}
+static int gs_unbind(struct usb_composite_dev *cdev)
+{
+ int i;
+
+ for (i = 0; i < n_ports; i++) {
+ usb_put_function(f_serial[i]);
+ usb_put_function_instance(fi_serial[i]);
+ gserial_free_line(tty_lines[i]);
+ }
+ return 0;
+}
+
static __refdata struct usb_composite_driver gserial_driver = {
.name = "g_serial",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = gs_bind,
+ .unbind = gs_unbind,
};
static int __init init(void)
@@ -234,6 +317,5 @@ module_init(init);
static void __exit cleanup(void)
{
usb_composite_unregister(&gserial_driver);
- gserial_cleanup();
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 0e3ae43454a2..4ecbf8496f48 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -93,18 +93,6 @@
/*-------------------------------------------------------------------------*/
-/* CBI Interrupt data structure */
-struct interrupt_data {
- u8 bType;
- u8 bValue;
-};
-
-#define CBI_INTERRUPT_DATA_LEN 2
-
-/* CBI Accept Device-Specific Command request */
-#define USB_CBI_ADSC_REQUEST 0x00
-
-
/* Length of a SCSI Command Data Block */
#define MAX_COMMAND_SIZE 16
@@ -385,41 +373,6 @@ static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
/*.bMaxBurst = DYNAMIC, */
};
-static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = {
- .bLength = USB_DT_USB_EXT_CAP_SIZE,
- .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
- .bDevCapabilityType = USB_CAP_TYPE_EXT,
-
- .bmAttributes = cpu_to_le32(USB_LPM_SUPPORT),
-};
-
-static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
- .bLength = USB_DT_USB_SS_CAP_SIZE,
- .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
- .bDevCapabilityType = USB_SS_CAP_TYPE,
-
- /* .bmAttributes = LTM is not supported yet */
-
- .wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION
- | USB_FULL_SPEED_OPERATION
- | USB_HIGH_SPEED_OPERATION
- | USB_5GBPS_OPERATION),
- .bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
- .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT,
- .bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
-};
-
-static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
- .bLength = USB_DT_BOS_SIZE,
- .bDescriptorType = USB_DT_BOS,
-
- .wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE
- + USB_DT_USB_EXT_CAP_SIZE
- + USB_DT_USB_SS_CAP_SIZE),
-
- .bNumDeviceCaps = 2,
-};
-
static struct usb_descriptor_header *fsg_ss_function[] = {
(struct usb_descriptor_header *) &fsg_intf_desc,
(struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
@@ -429,20 +382,6 @@ static struct usb_descriptor_header *fsg_ss_function[] = {
NULL,
};
-/* Maxpacket and other transfer characteristics vary by speed. */
-static __maybe_unused struct usb_endpoint_descriptor *
-fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
- struct usb_endpoint_descriptor *hs,
- struct usb_endpoint_descriptor *ss)
-{
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return ss;
- else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
- return hs;
- return fs;
-}
-
-
/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
static struct usb_string fsg_strings[] = {
{FSG_STRING_INTERFACE, fsg_string_interface},
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 4f7f76f00c74..7cacd6ae818e 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1794,9 +1794,10 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
+ ret = 0;
out:
mutex_unlock(&tpg->tpg_mutex);
- return 0;
+ return ret;
}
static ssize_t tcm_usbg_tpg_store_nexus(
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 4ec3c0d7a18b..a0aa721d8b21 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -159,12 +159,12 @@ static int ueth_change_mtu(struct net_device *net, int new_mtu)
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
- struct eth_dev *dev = netdev_priv(net);
+ struct eth_dev *dev = netdev_priv(net);
- strlcpy(p->driver, "g_ether", sizeof p->driver);
- strlcpy(p->version, UETH__VERSION, sizeof p->version);
- strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
- strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+ strlcpy(p->driver, "g_ether", sizeof(p->driver));
+ strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+ strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
}
/* REVISIT can also support:
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index d0f95482f40e..c5034d9c946b 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -26,6 +26,7 @@
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
#include "u_serial.h"
@@ -35,11 +36,12 @@
* "serial port" functionality through the USB gadget stack. Each such
* port is exposed through a /dev/ttyGS* node.
*
- * After initialization (gserial_setup), these TTY port devices stay
- * available until they are removed (gserial_cleanup). Each one may be
- * connected to a USB function (gserial_connect), or disconnected (with
- * gserial_disconnect) when the USB host issues a config change event.
- * Data can only flow when the port is connected to the host.
+ * After this module has been loaded, the individual TTY port can be requested
+ * (gserial_alloc_line()) and it will stay available until they are removed
+ * (gserial_free_line()). Each one may be connected to a USB function
+ * (gserial_connect), or disconnected (with gserial_disconnect) when the USB
+ * host issues a config change event. Data can only flow when the port is
+ * connected to the host.
*
* A given TTY port can be made available in multiple configurations.
* For example, each one might expose a ttyGS0 node which provides a
@@ -119,13 +121,10 @@ struct gs_port {
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
};
-/* increase N_PORTS if you need more */
-#define N_PORTS 4
static struct portmaster {
struct mutex lock; /* protect open/close */
struct gs_port *port;
-} ports[N_PORTS];
-static unsigned n_ports;
+} ports[MAX_U_SERIAL_PORTS];
#define GS_CLOSE_TIMEOUT 15 /* seconds */
@@ -309,6 +308,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
return req;
}
+EXPORT_SYMBOL_GPL(gs_alloc_req);
/*
* gs_free_req
@@ -320,6 +320,7 @@ void gs_free_req(struct usb_ep *ep, struct usb_request *req)
kfree(req->buf);
usb_ep_free_request(ep, req);
}
+EXPORT_SYMBOL_GPL(gs_free_req);
/*
* gs_send_packet
@@ -495,12 +496,8 @@ static void gs_rx_push(unsigned long _port)
req = list_first_entry(queue, struct usb_request, list);
- /* discard data if tty was closed */
- if (!tty)
- goto recycle;
-
/* leave data queued if tty was rx throttled */
- if (test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags))
break;
switch (req->status) {
@@ -533,7 +530,8 @@ static void gs_rx_push(unsigned long _port)
size -= n;
}
- count = tty_insert_flip_string(tty, packet, size);
+ count = tty_insert_flip_string(&port->port, packet,
+ size);
if (count)
do_push = true;
if (count != size) {
@@ -546,7 +544,7 @@ static void gs_rx_push(unsigned long _port)
}
port->n_read = 0;
}
-recycle:
+
list_move(&req->list, &port->read_pool);
port->read_started--;
}
@@ -554,8 +552,8 @@ recycle:
/* Push from tty to ldisc; without low_latency set this is handled by
* a workqueue, so we won't get callbacks and can hold port_lock
*/
- if (tty && do_push)
- tty_flip_buffer_push(tty);
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
/* We want our data queue to become empty ASAP, keeping data
@@ -887,7 +885,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
- wake_up_interruptible(&port->port.close_wait);
+ wake_up(&port->port.close_wait);
exit:
spin_unlock_irq(&port->port_lock);
}
@@ -1030,10 +1028,19 @@ static int
gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
{
struct gs_port *port;
+ int ret = 0;
+
+ mutex_lock(&ports[port_num].lock);
+ if (ports[port_num].port) {
+ ret = -EBUSY;
+ goto out;
+ }
port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
- if (port == NULL)
- return -ENOMEM;
+ if (port == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
tty_port_init(&port->port);
spin_lock_init(&port->port_lock);
@@ -1049,109 +1056,9 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
port->port_line_coding = *coding;
ports[port_num].port = port;
-
- return 0;
-}
-
-/**
- * gserial_setup - initialize TTY driver for one or more ports
- * @g: gadget to associate with these ports
- * @count: how many ports to support
- * Context: may sleep
- *
- * The TTY stack needs to know in advance how many devices it should
- * plan to manage. Use this call to set up the ports you will be
- * exporting through USB. Later, connect them to functions based
- * on what configuration is activated by the USB host; and disconnect
- * them as appropriate.
- *
- * An example would be a two-configuration device in which both
- * configurations expose port 0, but through different functions.
- * One configuration could even expose port 1 while the other
- * one doesn't.
- *
- * Returns negative errno or zero.
- */
-int gserial_setup(struct usb_gadget *g, unsigned count)
-{
- unsigned i;
- struct usb_cdc_line_coding coding;
- int status;
-
- if (count == 0 || count > N_PORTS)
- return -EINVAL;
-
- gs_tty_driver = alloc_tty_driver(count);
- if (!gs_tty_driver)
- return -ENOMEM;
-
- gs_tty_driver->driver_name = "g_serial";
- gs_tty_driver->name = PREFIX;
- /* uses dynamically assigned dev_t values */
-
- gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- gs_tty_driver->init_termios = tty_std_termios;
-
- /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
- * MS-Windows. Otherwise, most of these flags shouldn't affect
- * anything unless we were to actually hook up to a serial line.
- */
- gs_tty_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- gs_tty_driver->init_termios.c_ispeed = 9600;
- gs_tty_driver->init_termios.c_ospeed = 9600;
-
- coding.dwDTERate = cpu_to_le32(9600);
- coding.bCharFormat = 8;
- coding.bParityType = USB_CDC_NO_PARITY;
- coding.bDataBits = USB_CDC_1_STOP_BITS;
-
- tty_set_operations(gs_tty_driver, &gs_tty_ops);
-
- /* make devices be openable */
- for (i = 0; i < count; i++) {
- mutex_init(&ports[i].lock);
- status = gs_port_alloc(i, &coding);
- if (status) {
- count = i;
- goto fail;
- }
- }
- n_ports = count;
-
- /* export the driver ... */
- status = tty_register_driver(gs_tty_driver);
- if (status) {
- pr_err("%s: cannot register, err %d\n",
- __func__, status);
- goto fail;
- }
-
- /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- for (i = 0; i < count; i++) {
- struct device *tty_dev;
-
- tty_dev = tty_port_register_device(&ports[i].port->port,
- gs_tty_driver, i, &g->dev);
- if (IS_ERR(tty_dev))
- pr_warning("%s: no classdev for port %d, err %ld\n",
- __func__, i, PTR_ERR(tty_dev));
- }
-
- pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
- count, (count == 1) ? "" : "s");
-
- return status;
-fail:
- while (count--) {
- tty_port_destroy(&ports[count].port->port);
- kfree(ports[count].port);
- }
- put_tty_driver(gs_tty_driver);
- gs_tty_driver = NULL;
- return status;
+out:
+ mutex_unlock(&ports[port_num].lock);
+ return ret;
}
static int gs_closed(struct gs_port *port)
@@ -1164,55 +1071,77 @@ static int gs_closed(struct gs_port *port)
return cond;
}
-/**
- * gserial_cleanup - remove TTY-over-USB driver and devices
- * Context: may sleep
- *
- * This is called to free all resources allocated by @gserial_setup().
- * Accordingly, it may need to wait until some open /dev/ files have
- * closed.
- *
- * The caller must have issued @gserial_disconnect() for any ports
- * that had previously been connected, so that there is never any
- * I/O pending when it's called.
- */
-void gserial_cleanup(void)
+static void gserial_free_port(struct gs_port *port)
+{
+ tasklet_kill(&port->push);
+ /* wait for old opens to finish */
+ wait_event(port->port.close_wait, gs_closed(port));
+ WARN_ON(port->port_usb != NULL);
+ tty_port_destroy(&port->port);
+ kfree(port);
+}
+
+void gserial_free_line(unsigned char port_num)
{
- unsigned i;
struct gs_port *port;
- if (!gs_tty_driver)
+ mutex_lock(&ports[port_num].lock);
+ if (WARN_ON(!ports[port_num].port)) {
+ mutex_unlock(&ports[port_num].lock);
return;
+ }
+ port = ports[port_num].port;
+ ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
- /* start sysfs and /dev/ttyGS* node removal */
- for (i = 0; i < n_ports; i++)
- tty_unregister_device(gs_tty_driver, i);
-
- for (i = 0; i < n_ports; i++) {
- /* prevent new opens */
- mutex_lock(&ports[i].lock);
- port = ports[i].port;
- ports[i].port = NULL;
- mutex_unlock(&ports[i].lock);
-
- tasklet_kill(&port->push);
+ gserial_free_port(port);
+ tty_unregister_device(gs_tty_driver, port_num);
+}
+EXPORT_SYMBOL_GPL(gserial_free_line);
- /* wait for old opens to finish */
- wait_event(port->port.close_wait, gs_closed(port));
+int gserial_alloc_line(unsigned char *line_num)
+{
+ struct usb_cdc_line_coding coding;
+ struct device *tty_dev;
+ int ret;
+ int port_num;
- WARN_ON(port->port_usb != NULL);
+ coding.dwDTERate = cpu_to_le32(9600);
+ coding.bCharFormat = 8;
+ coding.bParityType = USB_CDC_NO_PARITY;
+ coding.bDataBits = USB_CDC_1_STOP_BITS;
- tty_port_destroy(&port->port);
- kfree(port);
+ for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
+ ret = gs_port_alloc(port_num, &coding);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
+ return ret;
+ break;
}
- n_ports = 0;
+ if (ret)
+ return ret;
- tty_unregister_driver(gs_tty_driver);
- put_tty_driver(gs_tty_driver);
- gs_tty_driver = NULL;
+ /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- pr_debug("%s: cleaned up ttyGS* support\n", __func__);
+ tty_dev = tty_port_register_device(&ports[port_num].port->port,
+ gs_tty_driver, port_num, NULL);
+ if (IS_ERR(tty_dev)) {
+ struct gs_port *port;
+ pr_err("%s: failed to register tty for port %d, err %ld\n",
+ __func__, port_num, PTR_ERR(tty_dev));
+
+ ret = PTR_ERR(tty_dev);
+ port = ports[port_num].port;
+ ports[port_num].port = NULL;
+ gserial_free_port(port);
+ goto err;
+ }
+ *line_num = port_num;
+err:
+ return ret;
}
+EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
* gserial_connect - notify TTY I/O glue that USB link is active
@@ -1229,8 +1158,8 @@ void gserial_cleanup(void)
*
* Caller needs to have set up the endpoints and USB function in @dev
* before calling this, as well as the appropriate (speed-specific)
- * endpoint descriptors, and also have set up the TTY driver by calling
- * @gserial_setup().
+ * endpoint descriptors, and also have allocate @port_num by calling
+ * @gserial_alloc_line().
*
* Returns negative errno or zero.
* On success, ep->driver_data will be overwritten.
@@ -1241,11 +1170,18 @@ int gserial_connect(struct gserial *gser, u8 port_num)
unsigned long flags;
int status;
- if (!gs_tty_driver || port_num >= n_ports)
+ if (port_num >= MAX_U_SERIAL_PORTS)
return -ENXIO;
- /* we "know" gserial_cleanup() hasn't been called */
port = ports[port_num].port;
+ if (!port) {
+ pr_err("serial line %d not allocated.\n", port_num);
+ return -EINVAL;
+ }
+ if (port->port_usb) {
+ pr_err("serial line %d is in use.\n", port_num);
+ return -EBUSY;
+ }
/* activate the endpoints */
status = usb_ep_enable(gser->in);
@@ -1292,7 +1228,7 @@ fail_out:
gser->in->driver_data = NULL;
return status;
}
-
+EXPORT_SYMBOL_GPL(gserial_connect);
/**
* gserial_disconnect - notify TTY I/O glue that USB link is inactive
* @gser: the function, on which gserial_connect() was called
@@ -1347,3 +1283,65 @@ void gserial_disconnect(struct gserial *gser)
spin_unlock_irqrestore(&port->port_lock, flags);
}
+EXPORT_SYMBOL_GPL(gserial_disconnect);
+
+static int userial_init(void)
+{
+ unsigned i;
+ int status;
+
+ gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS);
+ if (!gs_tty_driver)
+ return -ENOMEM;
+
+ gs_tty_driver->driver_name = "g_serial";
+ gs_tty_driver->name = PREFIX;
+ /* uses dynamically assigned dev_t values */
+
+ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gs_tty_driver->init_termios = tty_std_termios;
+
+ /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
+ * MS-Windows. Otherwise, most of these flags shouldn't affect
+ * anything unless we were to actually hook up to a serial line.
+ */
+ gs_tty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ gs_tty_driver->init_termios.c_ispeed = 9600;
+ gs_tty_driver->init_termios.c_ospeed = 9600;
+
+ tty_set_operations(gs_tty_driver, &gs_tty_ops);
+ for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
+ mutex_init(&ports[i].lock);
+
+ /* export the driver ... */
+ status = tty_register_driver(gs_tty_driver);
+ if (status) {
+ pr_err("%s: cannot register, err %d\n",
+ __func__, status);
+ goto fail;
+ }
+
+ pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
+ MAX_U_SERIAL_PORTS,
+ (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
+
+ return status;
+fail:
+ put_tty_driver(gs_tty_driver);
+ gs_tty_driver = NULL;
+ return status;
+}
+module_init(userial_init);
+
+static void userial_cleanup(void)
+{
+ tty_unregister_driver(gs_tty_driver);
+ put_tty_driver(gs_tty_driver);
+ gs_tty_driver = NULL;
+}
+module_exit(userial_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 9b0fe6450fbf..66ce73a00509 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -15,6 +15,13 @@
#include <linux/usb/composite.h>
#include <linux/usb/cdc.h>
+#define MAX_U_SERIAL_PORTS 4
+
+struct f_serial_opts {
+ struct usb_function_instance func_inst;
+ u8 port_num;
+};
+
/*
* One non-multiplexed "serial" I/O port ... there can be several of these
* on any given USB peripheral device, if it provides enough endpoints.
@@ -49,9 +56,9 @@ struct gserial {
struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
void gs_free_req(struct usb_ep *, struct usb_request *req);
-/* port setup/teardown is handled by gadget driver */
-int gserial_setup(struct usb_gadget *g, unsigned n_ports);
-void gserial_cleanup(void);
+/* management of individual TTY ports */
+int gserial_alloc_line(unsigned char *port_line);
+void gserial_free_line(unsigned char port_line);
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 4d90a800063c..2a9cd369f71c 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -102,28 +102,6 @@ EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
/* ------------------------------------------------------------------------- */
/**
- * usb_gadget_start - tells usb device controller to start up
- * @gadget: The gadget we want to get started
- * @driver: The driver we want to bind to @gadget
- * @bind: The bind function for @driver
- *
- * This call is issued by the UDC Class driver when it's about
- * to register a gadget driver to the device controller, before
- * calling gadget driver's bind() method.
- *
- * It allows the controller to be powered off until strictly
- * necessary to have it powered on.
- *
- * Returns zero on success, else negative errno.
- */
-static inline int usb_gadget_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
-{
- return gadget->ops->start(driver, bind);
-}
-
-/**
* usb_gadget_udc_start - tells usb device controller to start up
* @gadget: The gadget we want to get started
* @driver: The driver we want to bind to @gadget
@@ -144,24 +122,6 @@ static inline int usb_gadget_udc_start(struct usb_gadget *gadget,
}
/**
- * usb_gadget_stop - tells usb device controller we don't need it anymore
- * @gadget: The device we want to stop activity
- * @driver: The driver to unbind from @gadget
- *
- * This call is issued by the UDC Class driver after calling
- * gadget driver's unbind() method.
- *
- * The details are implementation specific, but it can go as
- * far as powering off UDC completely and disable its data
- * line pullups.
- */
-static inline void usb_gadget_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- gadget->ops->stop(driver);
-}
-
-/**
* usb_gadget_udc_stop - tells usb device controller we don't need it anymore
* @gadget: The device we want to stop activity
* @driver: The driver to unbind from @gadget
@@ -246,14 +206,6 @@ err1:
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
-static int udc_is_newstyle(struct usb_udc *udc)
-{
- if (udc->gadget->ops->udc_start && udc->gadget->ops->udc_stop)
- return 1;
- return 0;
-}
-
-
static void usb_gadget_remove_driver(struct usb_udc *udc)
{
dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
@@ -261,14 +213,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- if (udc_is_newstyle(udc)) {
- usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
- udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc->gadget, udc->driver);
- } else {
- usb_gadget_stop(udc->gadget, udc->driver);
- }
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+ usb_gadget_udc_stop(udc->gadget, udc->driver);
udc->driver = NULL;
udc->dev.driver = NULL;
@@ -311,6 +259,62 @@ EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
/* ------------------------------------------------------------------------- */
+static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+{
+ int ret;
+
+ dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+ driver->function);
+
+ udc->driver = driver;
+ udc->dev.driver = &driver->driver;
+
+ ret = driver->bind(udc->gadget, driver);
+ if (ret)
+ goto err1;
+ ret = usb_gadget_udc_start(udc->gadget, driver);
+ if (ret) {
+ driver->unbind(udc->gadget);
+ goto err1;
+ }
+ usb_gadget_connect(udc->gadget);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ return 0;
+err1:
+ dev_err(&udc->dev, "failed to start %s: %d\n",
+ udc->driver->function, ret);
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ return ret;
+}
+
+int udc_attach_driver(const char *name, struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ ret = strcmp(name, dev_name(&udc->dev));
+ if (!ret)
+ break;
+ }
+ if (ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (udc->driver) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = udc_bind_to_driver(udc, driver);
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(udc_attach_driver);
+
int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
{
struct usb_udc *udc = NULL;
@@ -329,41 +333,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
pr_debug("couldn't find an available UDC\n");
mutex_unlock(&udc_lock);
return -ENODEV;
-
found:
- dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
- driver->function);
-
- udc->driver = driver;
- udc->dev.driver = &driver->driver;
-
- if (udc_is_newstyle(udc)) {
- ret = driver->bind(udc->gadget, driver);
- if (ret)
- goto err1;
- ret = usb_gadget_udc_start(udc->gadget, driver);
- if (ret) {
- driver->unbind(udc->gadget);
- goto err1;
- }
- usb_gadget_connect(udc->gadget);
- } else {
-
- ret = usb_gadget_start(udc->gadget, driver, driver->bind);
- if (ret)
- goto err1;
-
- }
-
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&udc_lock);
- return 0;
-
-err1:
- dev_err(&udc->dev, "failed to start %s: %d\n",
- udc->driver->function, ret);
- udc->driver = NULL;
- udc->dev.driver = NULL;
+ ret = udc_bind_to_driver(udc, driver);
mutex_unlock(&udc_lock);
return ret;
}
@@ -410,13 +381,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "connect")) {
- if (udc_is_newstyle(udc))
- usb_gadget_udc_start(udc->gadget, udc->driver);
+ usb_gadget_udc_start(udc->gadget, udc->driver);
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
- if (udc_is_newstyle(udc))
- usb_gadget_udc_stop(udc->gadget, udc->driver);
+ usb_gadget_udc_stop(udc->gadget, udc->driver);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
return -EINVAL;
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index 69cf5c2cd335..8cef1e658c29 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -336,7 +336,7 @@ static struct usb_configuration webcam_config_driver = {
.bConfigurationValue = 1,
.iConfiguration = 0, /* dynamic */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
- .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
+ .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
static int /* __init_or_exit */
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 6bf4c0611365..685fa681cb65 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -10,7 +10,6 @@
* (at your option) any later version.
*/
-
/*
* Gadget Zero only needs two bulk endpoints, and is an example of how you
* can write a hardware-agnostic gadget driver running inside a USB device.
@@ -43,23 +42,11 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/usb/composite.h>
#include "g_zero.h"
-#include "gadget_chips.h"
-
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_sourcesink.c"
-#include "f_loopback.c"
-
/*-------------------------------------------------------------------------*/
USB_GADGET_COMPOSITE_OPTIONS();
@@ -67,9 +54,6 @@ USB_GADGET_COMPOSITE_OPTIONS();
static const char longname[] = "Gadget Zero";
-unsigned buflen = 4096; /* only used for bulk endpoints */
-module_param(buflen, uint, 0);
-
/*
* Normally the "loopback" configuration is second (index 1) so
* it's not the default. Here's where to change that order, to
@@ -79,6 +63,13 @@ module_param(buflen, uint, 0);
static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
+static struct usb_zero_options gzero_options = {
+ .isoc_interval = 4,
+ .isoc_maxpacket = 1024,
+ .bulk_buflen = 4096,
+ .qlen = 32,
+};
+
/*-------------------------------------------------------------------------*/
/* Thanks to NetChip Technologies for donating this product ID.
@@ -129,20 +120,27 @@ static struct usb_otg_descriptor otg_descriptor = {
.bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
};
-const struct usb_descriptor_header *otg_desc[] = {
+static const struct usb_descriptor_header *otg_desc[] = {
(struct usb_descriptor_header *) &otg_descriptor,
NULL,
};
+#else
+#define otg_desc NULL
#endif
/* string IDs are assigned dynamically */
/* default serial number takes at least two packets */
static char serial[] = "0123456789.0123456789.0123456789";
+#define USB_GZERO_SS_DESC (USB_GADGET_FIRST_AVAIL_IDX + 0)
+#define USB_GZERO_LB_DESC (USB_GADGET_FIRST_AVAIL_IDX + 1)
+
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
[USB_GADGET_PRODUCT_IDX].s = longname,
[USB_GADGET_SERIAL_IDX].s = serial,
+ [USB_GZERO_SS_DESC].s = "source and sink data",
+ [USB_GZERO_LB_DESC].s = "loop input to output",
{ } /* end of list */
};
@@ -158,58 +156,6 @@ static struct usb_gadget_strings *dev_strings[] = {
/*-------------------------------------------------------------------------*/
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len)
-{
- struct usb_request *req;
-
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req) {
- if (len)
- req->length = len;
- else
- req->length = buflen;
- req->buf = kmalloc(req->length, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- req = NULL;
- }
- }
- return req;
-}
-
-void free_ep_req(struct usb_ep *ep, struct usb_request *req)
-{
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
-{
- int value;
-
- if (ep->driver_data) {
- value = usb_ep_disable(ep);
- if (value < 0)
- DBG(cdev, "disable %s --> %d\n",
- ep->name, value);
- ep->driver_data = NULL;
- }
-}
-
-void disable_endpoints(struct usb_composite_dev *cdev,
- struct usb_ep *in, struct usb_ep *out,
- struct usb_ep *iso_in, struct usb_ep *iso_out)
-{
- disable_ep(cdev, in);
- disable_ep(cdev, out);
- if (iso_in)
- disable_ep(cdev, iso_in);
- if (iso_out)
- disable_ep(cdev, iso_out);
-}
-
-/*-------------------------------------------------------------------------*/
-
static struct timer_list autoresume_timer;
static void zero_autoresume(unsigned long _c)
@@ -251,8 +197,65 @@ static void zero_resume(struct usb_composite_dev *cdev)
/*-------------------------------------------------------------------------*/
+static struct usb_configuration loopback_driver = {
+ .label = "loopback",
+ .bConfigurationValue = 2,
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ /* .iConfiguration = DYNAMIC */
+};
+
+static struct usb_function *func_ss;
+static struct usb_function_instance *func_inst_ss;
+
+static int ss_config_setup(struct usb_configuration *c,
+ const struct usb_ctrlrequest *ctrl)
+{
+ switch (ctrl->bRequest) {
+ case 0x5b:
+ case 0x5c:
+ return func_ss->setup(func_ss, ctrl);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct usb_configuration sourcesink_driver = {
+ .label = "source/sink",
+ .setup = ss_config_setup,
+ .bConfigurationValue = 3,
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ /* .iConfiguration = DYNAMIC */
+};
+
+module_param_named(buflen, gzero_options.bulk_buflen, uint, 0);
+module_param_named(pattern, gzero_options.pattern, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(pattern, "0 = all zeroes, 1 = mod63, 2 = none");
+
+module_param_named(isoc_interval, gzero_options.isoc_interval, uint,
+ S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(isoc_interval, "1 - 16");
+
+module_param_named(isoc_maxpacket, gzero_options.isoc_maxpacket, uint,
+ S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(isoc_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
+
+module_param_named(isoc_mult, gzero_options.isoc_mult, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(isoc_mult, "0 - 2 (hs/ss only)");
+
+module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
+ S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
+
+static struct usb_function *func_lb;
+static struct usb_function_instance *func_inst_lb;
+
+module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qlen, "depth of loopback queue");
+
static int __init zero_bind(struct usb_composite_dev *cdev)
{
+ struct f_ss_opts *ss_opts;
+ struct f_lb_opts *lb_opts;
int status;
/* Allocate string descriptor numbers ... note that string
@@ -268,27 +271,105 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
setup_timer(&autoresume_timer, zero_autoresume, (unsigned long) cdev);
+ func_inst_ss = usb_get_function_instance("SourceSink");
+ if (IS_ERR(func_inst_ss))
+ return PTR_ERR(func_inst_ss);
+
+ ss_opts = container_of(func_inst_ss, struct f_ss_opts, func_inst);
+ ss_opts->pattern = gzero_options.pattern;
+ ss_opts->isoc_interval = gzero_options.isoc_interval;
+ ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
+ ss_opts->isoc_mult = gzero_options.isoc_mult;
+ ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
+ ss_opts->bulk_buflen = gzero_options.bulk_buflen;
+
+ func_ss = usb_get_function(func_inst_ss);
+ if (IS_ERR(func_ss))
+ goto err_put_func_inst_ss;
+
+ func_inst_lb = usb_get_function_instance("Loopback");
+ if (IS_ERR(func_inst_lb))
+ goto err_put_func_ss;
+
+ lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
+ lb_opts->bulk_buflen = gzero_options.bulk_buflen;
+ lb_opts->qlen = gzero_options.qlen;
+
+ func_lb = usb_get_function(func_inst_lb);
+ if (IS_ERR(func_lb)) {
+ status = PTR_ERR(func_lb);
+ goto err_put_func_inst_lb;
+ }
+
+ sourcesink_driver.iConfiguration = strings_dev[USB_GZERO_SS_DESC].id;
+ loopback_driver.iConfiguration = strings_dev[USB_GZERO_LB_DESC].id;
+
+ /* support autoresume for remote wakeup testing */
+ sourcesink_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
+ loopback_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
+ sourcesink_driver.descriptors = NULL;
+ loopback_driver.descriptors = NULL;
+ if (autoresume) {
+ sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ /* support OTG systems */
+ if (gadget_is_otg(cdev->gadget)) {
+ sourcesink_driver.descriptors = otg_desc;
+ sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ loopback_driver.descriptors = otg_desc;
+ loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
/* Register primary, then secondary configuration. Note that
* SH3 only allows one config...
*/
if (loopdefault) {
- loopback_add(cdev, autoresume != 0);
- sourcesink_add(cdev, autoresume != 0);
+ usb_add_config_only(cdev, &loopback_driver);
+ usb_add_config_only(cdev, &sourcesink_driver);
} else {
- sourcesink_add(cdev, autoresume != 0);
- loopback_add(cdev, autoresume != 0);
+ usb_add_config_only(cdev, &sourcesink_driver);
+ usb_add_config_only(cdev, &loopback_driver);
}
+ status = usb_add_function(&sourcesink_driver, func_ss);
+ if (status)
+ goto err_conf_flb;
+
+ usb_ep_autoconfig_reset(cdev->gadget);
+ status = usb_add_function(&loopback_driver, func_lb);
+ if (status)
+ goto err_conf_flb;
+ usb_ep_autoconfig_reset(cdev->gadget);
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s, version: " DRIVER_VERSION "\n", longname);
return 0;
+
+err_conf_flb:
+ usb_put_function(func_lb);
+ func_lb = NULL;
+err_put_func_inst_lb:
+ usb_put_function_instance(func_inst_lb);
+ func_inst_lb = NULL;
+err_put_func_ss:
+ usb_put_function(func_ss);
+ func_ss = NULL;
+err_put_func_inst_ss:
+ usb_put_function_instance(func_inst_ss);
+ func_inst_ss = NULL;
+ return status;
}
static int zero_unbind(struct usb_composite_dev *cdev)
{
del_timer_sync(&autoresume_timer);
+ if (!IS_ERR_OR_NULL(func_ss))
+ usb_put_function(func_ss);
+ if (!IS_ERR_OR_NULL(func_lb))
+ usb_put_function(func_lb);
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d6bb128ce21e..c59a1126926f 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -148,7 +148,7 @@ config USB_EHCI_FSL
Variation of ARC USB block used in some Freescale chips.
config USB_EHCI_MXC
- bool "Support for Freescale i.MX on-chip EHCI USB controller"
+ tristate "Support for Freescale i.MX on-chip EHCI USB controller"
depends on USB_EHCI_HCD && ARCH_MXC
select USB_EHCI_ROOT_HUB_TT
---help---
@@ -246,7 +246,7 @@ config USB_EHCI_ATH79
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
- depends on USB
+ depends on USB && GENERIC_HARDIRQS
---help---
The OXU210HP is an USB host/OTG/device controller. Enable this
option if your board has this chip. If unsure, say N.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 1eb4c3006e9e..001fbff2fdef 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 27639487f7ac..f3beac4d06b8 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -143,10 +143,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index fd9b5424b860..d81d2fcbff18 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -230,7 +230,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
- if (pdata->controller_ver) {
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
setbits32(non_ehci + FSL_SOC_USB_CTRL,
ULPI_PHY_CLK_SEL);
@@ -251,7 +251,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
- if (pdata->controller_ver) {
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
@@ -267,7 +267,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
break;
}
- if (pdata->controller_ver && (phy_mode == FSL_USB2_PHY_ULPI)) {
+ if (pdata->have_sysif_regs && pdata->controller_ver &&
+ (phy_mode == FSL_USB2_PHY_ULPI)) {
/* check PHY_CLK_VALID to get phy clk valid */
if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
@@ -278,7 +279,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
- if (phy_mode != FSL_USB2_PHY_ULPI)
+ if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
return 0;
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 1fc89292f5d6..5d75de9729b6 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -25,7 +25,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of_irq.h>
@@ -118,10 +118,9 @@ static int ehci_hcd_grlib_probe(struct platform_device *op)
goto err_irq;
}
- hcd->regs = devm_request_and_ioremap(&op->dev, &res);
- if (!hcd->regs) {
- pr_err("%s: devm_request_and_ioremap failed\n", __FILE__);
- rv = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c97503bb0b0e..b416a3fc9959 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -74,10 +74,6 @@ static const char hcd_name [] = "ehci_hcd";
#undef VERBOSE_DEBUG
#undef EHCI_URB_TRACE
-#ifdef DEBUG
-#define EHCI_STATS
-#endif
-
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
@@ -801,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ usb_hcd_start_port_resume(&hcd->self, i);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
}
@@ -1250,11 +1247,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
-#ifdef CONFIG_USB_EHCI_MXC
-#include "ehci-mxc.c"
-#define PLATFORM_DRIVER ehci_mxc_driver
-#endif
-
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
#define PLATFORM_DRIVER ehci_hcd_sh_driver
@@ -1352,7 +1344,8 @@ MODULE_LICENSE ("GPL");
#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
!IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
- !defined(CONFIG_USB_CHIPIDEA_HOST) && \
+ !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
+ !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
!defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4ccb97c0678f..4d3b294f203e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
status = STS_PCD;
}
}
- /* FIXME autosuspend idle root hubs */
+
+ /* If a resume is in progress, make sure it can finish */
+ if (ehci->resuming_ports)
+ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
@@ -851,6 +855,7 @@ static int ehci_hub_control (
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
@@ -862,6 +867,7 @@ static int ehci_hub_control (
clear_bit(wIndex, &ehci->suspended_ports);
set_bit(wIndex, &ehci->port_c_suspend);
ehci->reset_done[wIndex] = 0;
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
/* stop resume signaling */
temp = ehci_readl(ehci, status_reg);
@@ -950,6 +956,7 @@ static int ehci_hub_control (
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
if (temp & PORT_OC)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f7bfc0b898b9..3065809546b1 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -43,7 +43,7 @@ static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
unsigned int i;
for (i = 0; i < ehci_mv->clknum; i++)
- clk_enable(ehci_mv->clk[i]);
+ clk_prepare_enable(ehci_mv->clk[i]);
}
static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
@@ -51,7 +51,7 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
unsigned int i;
for (i = 0; i < ehci_mv->clknum; i++)
- clk_disable(ehci_mv->clk[i]);
+ clk_disable_unprepare(ehci_mv->clk[i]);
}
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
@@ -302,7 +302,6 @@ static int mv_ehci_remove(struct platform_device *pdev)
{
struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_mv->hcd;
- int clk_i;
if (hcd->rh_registered)
usb_remove_hcd(hcd);
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ec7f5d2c90de..e9301fb97eaa 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -17,75 +17,38 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_data/usb-ehci-mxc.h>
#include <asm/mach-types.h>
+#include "ehci.h"
+
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
+
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
struct clk *usbclk, *ahbclk, *phyclk;
- struct usb_hcd *hcd;
};
-/* called during probe() after chip reset completes */
-static int ehci_mxc_setup(struct usb_hcd *hcd)
-{
- hcd->has_tt = 1;
-
- return ehci_setup(hcd);
-}
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
-static const struct hc_driver ehci_mxc_hc_driver = {
- .description = hcd_name,
- .product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_mxc_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = {
+ .extra_priv_size = sizeof(struct ehci_mxc_priv),
};
static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -94,7 +57,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
- unsigned int flags;
struct ehci_mxc_priv *priv;
struct device *dev = &pdev->dev;
struct ehci_hcd *ehci;
@@ -112,12 +74,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Found HC with no register addr. Check setup!\n");
@@ -128,13 +84,16 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hcd->regs) {
- dev_err(dev, "error mapping memory\n");
- ret = -EFAULT;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto err_alloc;
}
+ hcd->has_tt = 1;
+ ehci = hcd_to_ehci(hcd);
+ priv = (struct ehci_mxc_priv *) ehci->priv;
+
/* enable clocks */
priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(priv->usbclk)) {
@@ -169,8 +128,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
mdelay(10);
}
- ehci = hcd_to_ehci(hcd);
-
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
@@ -198,32 +155,12 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
}
- priv->hcd = hcd;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, hcd);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto err_add;
- if (pdata->otg) {
- /*
- * efikamx and efikasb have some hardware bug which is
- * preventing usb to work unless CHRGVBUS is set.
- * It's in violation of USB specs
- */
- if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
- flags = usb_phy_io_read(pdata->otg,
- ULPI_OTG_CTRL);
- flags |= ULPI_OTG_CTRL_CHRGVBUS;
- ret = usb_phy_io_write(pdata->otg, flags,
- ULPI_OTG_CTRL);
- if (ret) {
- dev_err(dev, "unable to set CHRVBUS\n");
- goto err_add;
- }
- }
- }
-
return 0;
err_add:
@@ -244,8 +181,11 @@ err_alloc:
static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
{
struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
- struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = priv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+ usb_remove_hcd(hcd);
if (pdata && pdata->exit)
pdata->exit(pdev);
@@ -253,23 +193,20 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
if (pdata->otg)
usb_phy_shutdown(pdata->otg);
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
clk_disable_unprepare(priv->usbclk);
clk_disable_unprepare(priv->ahbclk);
if (priv->phyclk)
clk_disable_unprepare(priv->phyclk);
+ usb_put_hcd(hcd);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
{
- struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = priv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
@@ -279,9 +216,31 @@ MODULE_ALIAS("platform:mxc-ehci");
static struct platform_driver ehci_mxc_driver = {
.probe = ehci_mxc_drv_probe,
- .remove = __exit_p(ehci_mxc_drv_remove),
+ .remove = ehci_mxc_drv_remove,
.shutdown = ehci_mxc_drv_shutdown,
.driver = {
.name = "mxc-ehci",
},
};
+
+static int __init ehci_mxc_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+ return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index ac17a7c3a0cd..99899e808c6a 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -288,7 +288,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
usb_remove_hcd(hcd);
disable_put_regulator(dev->platform_data);
@@ -298,13 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- if (pdata->phy_reset) {
- if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_free(pdata->reset_gpio_port[0]);
-
- if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_free(pdata->reset_gpio_port[1]);
- }
return 0;
}
@@ -372,7 +364,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
-MODULE_ALIAS("platform:omap-ehci");
+MODULE_ALIAS("platform:ehci-omap");
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index a7d1f5b4c4ed..914a3ecfb5d3 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -325,7 +325,7 @@ static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:orion-ehci");
-static const struct of_device_id ehci_orion_dt_ids[] __devinitdata = {
+static const struct of_device_id ehci_orion_dt_ids[] = {
{ .compatible = "marvell,orion-ehci", },
{},
};
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index dabb20494826..170b9399e09f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -200,6 +200,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
+ /* optional debug port, normally in the first BAR */
+ temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+ if (temp) {
+ pci_read_config_dword(pdev, temp, &temp);
+ temp >>= 16;
+ if (((temp >> 13) & 7) == 1) {
+ u32 hcs_params = ehci_readl(ehci,
+ &ehci->caps->hcs_params);
+
+ temp &= 0x1fff;
+ ehci->debug = hcd->regs + temp;
+ temp = ehci_readl(ehci, &ehci->debug->control);
+ ehci_info(ehci, "debug port %d%s\n",
+ HCS_DEBUG_PORT(hcs_params),
+ (temp & DBGP_ENABLED) ? " IN USE" : "");
+ if (!(temp & DBGP_ENABLED))
+ ehci->debug = NULL;
+ }
+ }
+
retval = ehci_setup(hcd);
if (retval)
return retval;
@@ -228,25 +248,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
- /* optional debug port, normally in the first BAR */
- temp = pci_find_capability(pdev, 0x0a);
- if (temp) {
- pci_read_config_dword(pdev, temp, &temp);
- temp >>= 16;
- if ((temp & (3 << 13)) == (1 << 13)) {
- temp &= 0x1fff;
- ehci->debug = hcd->regs + temp;
- temp = ehci_readl(ehci, &ehci->debug->control);
- ehci_info(ehci, "debug port %d%s\n",
- HCS_DEBUG_PORT(ehci->hcs_params),
- (temp & DBGP_ENABLED)
- ? " IN USE"
- : "");
- if (!(temp & DBGP_ENABLED))
- ehci->debug = NULL;
- }
- }
-
/* at least the Genesys GL880S needs fixup here */
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
temp &= 0x0f;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 58fa0c90c7c7..ca7506390542 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -18,6 +18,7 @@
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
@@ -104,9 +105,9 @@ static int ehci_platform_probe(struct platform_device *dev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
- hcd->regs = devm_request_and_ioremap(&dev->dev, res_mem);
- if (!hcd->regs) {
- err = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 45aceefd0c2b..56dc732bf451 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -12,6 +12,7 @@
* This file is licenced under the GPL.
*/
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
@@ -121,10 +122,9 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op)
goto err_irq;
}
- hcd->regs = devm_request_and_ioremap(&op->dev, &res);
- if (!hcd->regs) {
- pr_err("%s: devm_request_and_ioremap failed\n", __FILE__);
- rv = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 3d989028c836..fd252f0cfb3a 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
if (ehci->async_iaa || ehci->async_unlinking)
return;
- /* Do all the waiting QHs at once */
- ehci->async_iaa = ehci->async_unlink;
- ehci->async_unlink = NULL;
-
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+
+ /* Do all the waiting QHs */
+ ehci->async_iaa = ehci->async_unlink;
+ ehci->async_unlink = NULL;
+
if (!nested) /* Avoid recursion */
end_unlink_async(ehci);
/* Otherwise start a new IAA cycle */
} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+ struct ehci_qh *qh;
+
+ /* Do only the first waiting QH (nVidia bug?) */
+ qh = ehci->async_unlink;
+ ehci->async_iaa = qh;
+ ehci->async_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+
/* Make sure the unlinks are all visible to the hardware */
wmb();
@@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)
}
}
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
static void unlink_empty_async(struct ehci_hcd *ehci)
{
- struct ehci_qh *qh, *next;
- bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+ struct ehci_qh *qh;
+ struct ehci_qh *qh_to_unlink = NULL;
bool check_unlinks_later = false;
+ int count = 0;
- /* Unlink all the async QHs that have been empty for a timer cycle */
- next = ehci->async->qh_next.qh;
- while (next) {
- qh = next;
- next = qh->qh_next.qh;
-
+ /* Find the last async QH which has been empty for a timer cycle */
+ for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
if (list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED) {
- if (!stopped && qh->unlink_cycle ==
- ehci->async_unlink_cycle)
+ ++count;
+ if (qh->unlink_cycle == ehci->async_unlink_cycle)
check_unlinks_later = true;
else
- single_unlink_async(ehci, qh);
+ qh_to_unlink = qh;
}
}
- /* Start a new IAA cycle if any QHs are waiting for it */
- if (ehci->async_unlink)
- start_iaa_cycle(ehci, false);
+ /* If nothing else is being unlinked, unlink the last empty QH */
+ if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
+ start_unlink_async(ehci, qh_to_unlink);
+ --count;
+ }
- /* QHs that haven't been empty for long enough will be handled later */
- if (check_unlinks_later) {
+ /* Other QHs will be handled later */
+ if (count > 0) {
ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
++ehci->async_unlink_cycle;
}
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 319dcfaa8735..20ebf6a8b7f4 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -17,6 +17,8 @@
#include <linux/platform_device.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ehci-s5p.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
#include <plat/usb-phy.h>
#define EHCI_INSNREG00(base) (base + 0x90)
@@ -32,6 +34,9 @@ struct s5p_ehci_hcd {
struct device *dev;
struct usb_hcd *hcd;
struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct s5p_ehci_platdata *pdata;
};
static const struct hc_driver s5p_ehci_hc_driver = {
@@ -65,6 +70,26 @@ static const struct hc_driver s5p_ehci_hc_driver = {
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
+static void s5p_ehci_phy_enable(struct s5p_ehci_hcd *s5p_ehci)
+{
+ struct platform_device *pdev = to_platform_device(s5p_ehci->dev);
+
+ if (s5p_ehci->phy)
+ usb_phy_init(s5p_ehci->phy);
+ else if (s5p_ehci->pdata->phy_init)
+ s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
+}
+
+static void s5p_ehci_phy_disable(struct s5p_ehci_hcd *s5p_ehci)
+{
+ struct platform_device *pdev = to_platform_device(s5p_ehci->dev);
+
+ if (s5p_ehci->phy)
+ usb_phy_shutdown(s5p_ehci->phy);
+ else if (s5p_ehci->pdata->phy_exit)
+ s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
+}
+
static void s5p_setup_vbus_gpio(struct platform_device *pdev)
{
int err;
@@ -87,20 +112,15 @@ static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32);
static int s5p_ehci_probe(struct platform_device *pdev)
{
- struct s5p_ehci_platdata *pdata;
+ struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ehci_hcd *s5p_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
+ struct usb_phy *phy;
int irq;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data defined\n");
- return -EINVAL;
- }
-
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
@@ -118,6 +138,20 @@ static int s5p_ehci_probe(struct platform_device *pdev)
if (!s5p_ehci)
return -ENOMEM;
+ phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(phy)) {
+ /* Fallback to pdata */
+ if (!pdata) {
+ dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
+ } else {
+ s5p_ehci->pdata = pdata;
+ }
+ } else {
+ s5p_ehci->phy = phy;
+ s5p_ehci->otg = phy->otg;
+ }
+
s5p_ehci->dev = &pdev->dev;
hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev,
@@ -163,8 +197,10 @@ static int s5p_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
- if (pdata->phy_init)
- pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+ if (s5p_ehci->otg)
+ s5p_ehci->otg->set_host(s5p_ehci->otg, &s5p_ehci->hcd->self);
+
+ s5p_ehci_phy_enable(s5p_ehci);
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
@@ -175,13 +211,15 @@ static int s5p_ehci_probe(struct platform_device *pdev)
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
- goto fail_io;
+ goto fail_add_hcd;
}
platform_set_drvdata(pdev, s5p_ehci);
return 0;
+fail_add_hcd:
+ s5p_ehci_phy_disable(s5p_ehci);
fail_io:
clk_disable_unprepare(s5p_ehci->clk);
fail_clk:
@@ -191,14 +229,15 @@ fail_clk:
static int s5p_ehci_remove(struct platform_device *pdev)
{
- struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ehci->hcd;
usb_remove_hcd(hcd);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+ if (s5p_ehci->otg)
+ s5p_ehci->otg->set_host(s5p_ehci->otg, &s5p_ehci->hcd->self);
+
+ s5p_ehci_phy_disable(s5p_ehci);
clk_disable_unprepare(s5p_ehci->clk);
@@ -222,14 +261,14 @@ static int s5p_ehci_suspend(struct device *dev)
struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
struct usb_hcd *hcd = s5p_ehci->hcd;
bool do_wakeup = device_may_wakeup(dev);
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
int rc;
rc = ehci_suspend(hcd, do_wakeup);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+ if (s5p_ehci->otg)
+ s5p_ehci->otg->set_host(s5p_ehci->otg, &s5p_ehci->hcd->self);
+
+ s5p_ehci_phy_disable(s5p_ehci);
clk_disable_unprepare(s5p_ehci->clk);
@@ -240,13 +279,13 @@ static int s5p_ehci_resume(struct device *dev)
{
struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
struct usb_hcd *hcd = s5p_ehci->hcd;
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
clk_prepare_enable(s5p_ehci->clk);
- if (pdata && pdata->phy_init)
- pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+ if (s5p_ehci->otg)
+ s5p_ehci->otg->set_host(s5p_ehci->otg, &s5p_ehci->hcd->self);
+
+ s5p_ehci_phy_enable(s5p_ehci);
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
@@ -266,7 +305,7 @@ static const struct dev_pm_ops s5p_ehci_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id exynos_ehci_match[] = {
- { .compatible = "samsung,exynos-ehci" },
+ { .compatible = "samsung,exynos4210-ehci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ehci_match);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 69ebee73c0c1..b476daf49f6f 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
}
static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)
}
ehci->now_frame = now_frame;
+ frame = ehci->last_iso_frame;
for (;;) {
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
- frame = ehci->last_iso_frame;
restart:
/* scan each element in frame's queue for completions */
q_p = &ehci->pshadow [frame];
@@ -2321,6 +2321,9 @@ restart:
/* Stop when we have reached the current frame */
if (frame == now_frame)
break;
- ehci->last_iso_frame = (frame + 1) & fmask;
+
+ /* The last frame may still have active siTDs */
+ ehci->last_iso_frame = frame;
+ frame = (frame + 1) & fmask;
}
}
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index efad02d947f2..f55477c5a1be 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -19,6 +19,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/err.h>
#include <linux/platform_device.h>
static int ehci_sead3_setup(struct usb_hcd *hcd)
@@ -112,10 +113,9 @@ static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- ret = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto err1;
}
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 0c90a24fa989..3565a300f401 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -118,10 +118,9 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- ret = -ENXIO;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index acf17556bd87..568aecc7075b 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -2,7 +2,7 @@
* EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
*
* Copyright (C) 2010 Google, Inc.
- * Copyright (C) 2009 NVIDIA Corporation
+ * Copyright (C) 2009 - 2013 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,23 +26,28 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
-
+#include <linux/usb/ehci_def.h>
#include <linux/usb/tegra_usb_phy.h>
#define TEGRA_USB_BASE 0xC5000000
#define TEGRA_USB2_BASE 0xC5004000
#define TEGRA_USB3_BASE 0xC5008000
+/* PORTSC registers */
+#define TEGRA_USB_PORTSC1 0x184
+#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+
#define TEGRA_USB_DMA_ALIGN 32
struct tegra_ehci_hcd {
struct ehci_hcd *ehci;
struct tegra_usb_phy *phy;
struct clk *clk;
- struct clk *emc_clk;
struct usb_phy *transceiver;
int host_resumed;
int port_resuming;
+ bool needs_double_reset;
enum tegra_usb_phy_port_speed port_speed;
};
@@ -50,9 +55,8 @@ static void tegra_ehci_power_up(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- clk_prepare_enable(tegra->emc_clk);
clk_prepare_enable(tegra->clk);
- usb_phy_set_suspend(&tegra->phy->u_phy, 0);
+ usb_phy_set_suspend(hcd->phy, 0);
tegra->host_resumed = 1;
}
@@ -61,9 +65,8 @@ static void tegra_ehci_power_down(struct usb_hcd *hcd)
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
tegra->host_resumed = 0;
- usb_phy_set_suspend(&tegra->phy->u_phy, 1);
+ usb_phy_set_suspend(hcd->phy, 1);
clk_disable_unprepare(tegra->clk);
- clk_disable_unprepare(tegra->emc_clk);
}
static int tegra_ehci_internal_port_reset(
@@ -156,7 +159,7 @@ static int tegra_ehci_hub_control(
if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
/* Resume completed, re-enable disconnect detection */
tegra->port_resuming = 0;
- tegra_usb_phy_postresume(tegra->phy);
+ tegra_usb_phy_postresume(hcd->phy);
}
}
@@ -184,7 +187,7 @@ static int tegra_ehci_hub_control(
}
/* For USB1 port we need to issue Port Reset twice internally */
- if (tegra->phy->instance == 0 &&
+ if (tegra->needs_double_reset &&
(typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
spin_unlock_irqrestore(&ehci->lock, flags);
return tegra_ehci_internal_port_reset(ehci, status_reg);
@@ -209,7 +212,7 @@ static int tegra_ehci_hub_control(
goto done;
/* Disable disconnect detection during port resume */
- tegra_usb_phy_preresume(tegra->phy);
+ tegra_usb_phy_preresume(hcd->phy);
ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
@@ -473,7 +476,7 @@ static int controller_resume(struct device *dev)
}
/* Force the phy to keep data lines in suspend state */
- tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+ tegra_ehci_phy_restore_start(hcd->phy, tegra->port_speed);
/* Enable host mode */
tdi_reset(ehci);
@@ -540,17 +543,17 @@ static int controller_resume(struct device *dev)
}
}
- tegra_ehci_phy_restore_end(tegra->phy);
+ tegra_ehci_phy_restore_end(hcd->phy);
goto done;
restart:
if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
- tegra_ehci_phy_restore_end(tegra->phy);
+ tegra_ehci_phy_restore_end(hcd->phy);
tegra_ehci_restart(hcd);
done:
- tegra_usb_phy_preresume(tegra->phy);
+ tegra_usb_phy_preresume(hcd->phy);
tegra->port_resuming = 1;
return 0;
}
@@ -604,6 +607,37 @@ static const struct dev_pm_ops tegra_ehci_pm_ops = {
#endif
+/* Bits of PORTSC1, which will get cleared by writing 1 into them */
+#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+
+void tegra_ehci_set_pts(struct usb_phy *x, u8 pts_val)
+{
+ unsigned long val;
+ struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
+ void __iomem *base = hcd->regs;
+
+ val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val &= ~TEGRA_USB_PORTSC1_PTS(3);
+ val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3);
+ writel(val, base + TEGRA_USB_PORTSC1);
+}
+EXPORT_SYMBOL_GPL(tegra_ehci_set_pts);
+
+void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
+{
+ unsigned long val;
+ struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
+ void __iomem *base = hcd->regs;
+
+ val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ if (enable)
+ val |= TEGRA_USB_PORTSC1_PHCD;
+ else
+ val &= ~TEGRA_USB_PORTSC1_PHCD;
+ writel(val, base + TEGRA_USB_PORTSC1);
+}
+EXPORT_SYMBOL_GPL(tegra_ehci_set_phcd);
+
static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
static int tegra_ehci_probe(struct platform_device *pdev)
@@ -615,6 +649,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
int err = 0;
int irq;
int instance = pdev->id;
+ struct usb_phy *u_phy;
pdata = pdev->dev.platform_data;
if (!pdata) {
@@ -656,15 +691,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
if (err)
goto fail_clk;
- tegra->emc_clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(tegra->emc_clk)) {
- dev_err(&pdev->dev, "Can't get emc clock\n");
- err = PTR_ERR(tegra->emc_clk);
- goto fail_emc_clk;
- }
-
- clk_prepare_enable(tegra->emc_clk);
- clk_set_rate(tegra->emc_clk, 400000000);
+ tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
+ "nvidia,needs-double-reset");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -712,9 +740,19 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
- usb_phy_init(&tegra->phy->u_phy);
+ hcd->phy = u_phy = &tegra->phy->u_phy;
+ usb_phy_init(hcd->phy);
+
+ u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!u_phy->otg) {
+ dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+ u_phy->otg->host = hcd_to_bus(hcd);
- err = usb_phy_set_suspend(&tegra->phy->u_phy, 0);
+ err = usb_phy_set_suspend(hcd->phy, 0);
if (err) {
dev_err(&pdev->dev, "Failed to power on the phy\n");
goto fail;
@@ -760,10 +798,8 @@ fail:
if (!IS_ERR_OR_NULL(tegra->transceiver))
otg_set_host(tegra->transceiver->otg, NULL);
#endif
- usb_phy_shutdown(&tegra->phy->u_phy);
+ usb_phy_shutdown(hcd->phy);
fail_io:
- clk_disable_unprepare(tegra->emc_clk);
-fail_emc_clk:
clk_disable_unprepare(tegra->clk);
fail_clk:
usb_put_hcd(hcd);
@@ -784,15 +820,12 @@ static int tegra_ehci_remove(struct platform_device *pdev)
otg_set_host(tegra->transceiver->otg, NULL);
#endif
+ usb_phy_shutdown(hcd->phy);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
- usb_phy_shutdown(&tegra->phy->u_phy);
-
clk_disable_unprepare(tegra->clk);
- clk_disable_unprepare(tegra->emc_clk);
-
return 0;
}
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 20dbdcbe9b0f..f904071d70df 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)
if (want != actual) {
- /* Poll again later, but give up after about 20 ms */
- if (ehci->ASS_poll_count++ < 20) {
- ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
- return;
- }
- ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
- want, actual);
+ /* Poll again later */
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+ ++ehci->ASS_poll_count;
+ return;
}
+
+ if (ehci->ASS_poll_count > 20)
+ ehci_dbg(ehci, "ASS poll count reached %d\n",
+ ehci->ASS_poll_count);
ehci->ASS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
@@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)
if (want != actual) {
- /* Poll again later, but give up after about 20 ms */
- if (ehci->PSS_poll_count++ < 20) {
- ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
- return;
- }
- ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
- want, actual);
+ /* Poll again later */
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+ return;
}
+
+ if (ehci->PSS_poll_count > 20)
+ ehci_dbg(ehci, "PSS poll count reached %d\n",
+ ehci->PSS_poll_count);
ehci->PSS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 11695d5b9d86..7ecf709610ba 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -16,6 +16,7 @@
*
*/
+#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -96,10 +97,9 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- ret = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto err1;
}
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 4f285e8e404a..d845e3bcfaff 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
@@ -159,10 +160,9 @@ static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
goto err_irq;
}
- hcd->regs = devm_request_and_ioremap(&op->dev, &res);
- if (!hcd->regs) {
- pr_err("%s: devm_request_and_ioremap failed\n", __FILE__);
- rv = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_irq;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 9dadc7118d68..36c3a8210595 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -38,6 +38,10 @@ typedef __u16 __bitwise __hc16;
#endif
/* statistics can be kept for tuning/monitoring */
+#ifdef DEBUG
+#define EHCI_STATS
+#endif
+
struct ehci_stats {
/* irq usage */
unsigned long normal;
@@ -221,6 +225,9 @@ struct ehci_hcd { /* one per controller */
#ifdef DEBUG
struct dentry *debug_dir;
#endif
+
+ /* platform-specific data -- must come last */
+ unsigned long priv[0] __aligned(sizeof(s64));
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 5105127c1d4b..11e0b79ff9d5 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -142,6 +142,9 @@ static int usb_get_ver_info(struct device_node *np)
return ver;
}
+ if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+ return FSL_USB_VER_OLD;
+
if (of_device_is_compatible(np, "fsl-usb2-mph")) {
if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
ver = FSL_USB_VER_1_6;
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index bd6a7447ccc9..f0ebe8e7c58b 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -58,6 +58,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include "imx21-hcd.h"
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index a35bbddf8968..125e261f5bfc 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -932,7 +932,7 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
}
}
-void schedule_ptds(struct usb_hcd *hcd)
+static void schedule_ptds(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv;
struct isp1760_qh *qh, *qh_next;
@@ -1285,7 +1285,7 @@ leave:
#define SLOT_CHECK_PERIOD 200
static struct timer_list errata2_timer;
-void errata2_function(unsigned long data)
+static void errata2_function(unsigned long data)
{
struct usb_hcd *hcd = (struct usb_hcd *) data;
struct isp1760_hcd *priv = hcd_to_priv(hcd);
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index aa3b8844bb9f..e3b7e85120e4 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -15,14 +15,39 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-exynos.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
#include <plat/usb-phy.h>
struct exynos_ohci_hcd {
struct device *dev;
struct usb_hcd *hcd;
struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct exynos4_ohci_platdata *pdata;
};
+static void exynos_ohci_phy_enable(struct exynos_ohci_hcd *exynos_ohci)
+{
+ struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
+
+ if (exynos_ohci->phy)
+ usb_phy_init(exynos_ohci->phy);
+ else if (exynos_ohci->pdata->phy_init)
+ exynos_ohci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
+}
+
+static void exynos_ohci_phy_disable(struct exynos_ohci_hcd *exynos_ohci)
+{
+ struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
+
+ if (exynos_ohci->phy)
+ usb_phy_shutdown(exynos_ohci->phy);
+ else if (exynos_ohci->pdata->phy_exit)
+ exynos_ohci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
+}
+
static int ohci_exynos_reset(struct usb_hcd *hcd)
{
return ohci_init(hcd_to_ohci(hcd));
@@ -78,20 +103,15 @@ static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32);
static int exynos_ohci_probe(struct platform_device *pdev)
{
- struct exynos4_ohci_platdata *pdata;
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct resource *res;
+ struct usb_phy *phy;
int irq;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data defined\n");
- return -EINVAL;
- }
-
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
@@ -107,6 +127,20 @@ static int exynos_ohci_probe(struct platform_device *pdev)
if (!exynos_ohci)
return -ENOMEM;
+ phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(phy)) {
+ /* Fallback to pdata */
+ if (!pdata) {
+ dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
+ } else {
+ exynos_ohci->pdata = pdata;
+ }
+ } else {
+ exynos_ohci->phy = phy;
+ exynos_ohci->otg = phy->otg;
+ }
+
exynos_ohci->dev = &pdev->dev;
hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
@@ -152,8 +186,11 @@ static int exynos_ohci_probe(struct platform_device *pdev)
goto fail_io;
}
- if (pdata->phy_init)
- pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg,
+ &exynos_ohci->hcd->self);
+
+ exynos_ohci_phy_enable(exynos_ohci);
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
@@ -161,13 +198,15 @@ static int exynos_ohci_probe(struct platform_device *pdev)
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
- goto fail_io;
+ goto fail_add_hcd;
}
platform_set_drvdata(pdev, exynos_ohci);
return 0;
+fail_add_hcd:
+ exynos_ohci_phy_disable(exynos_ohci);
fail_io:
clk_disable_unprepare(exynos_ohci->clk);
fail_clk:
@@ -177,14 +216,16 @@ fail_clk:
static int exynos_ohci_remove(struct platform_device *pdev)
{
- struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = exynos_ohci->hcd;
usb_remove_hcd(hcd);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg,
+ &exynos_ohci->hcd->self);
+
+ exynos_ohci_phy_disable(exynos_ohci);
clk_disable_unprepare(exynos_ohci->clk);
@@ -208,8 +249,6 @@ static int exynos_ohci_suspend(struct device *dev)
struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
struct usb_hcd *hcd = exynos_ohci->hcd;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
unsigned long flags;
int rc = 0;
@@ -228,8 +267,11 @@ static int exynos_ohci_suspend(struct device *dev)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg,
+ &exynos_ohci->hcd->self);
+
+ exynos_ohci_phy_disable(exynos_ohci);
clk_disable_unprepare(exynos_ohci->clk);
@@ -243,13 +285,14 @@ static int exynos_ohci_resume(struct device *dev)
{
struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
struct usb_hcd *hcd = exynos_ohci->hcd;
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
clk_prepare_enable(exynos_ohci->clk);
- if (pdata && pdata->phy_init)
- pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg,
+ &exynos_ohci->hcd->self);
+
+ exynos_ohci_phy_enable(exynos_ohci);
ohci_resume(hcd, false);
@@ -267,7 +310,7 @@ static const struct dev_pm_ops exynos_ohci_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id exynos_ohci_match[] = {
- { .compatible = "samsung,exynos-ohci" },
+ { .compatible = "samsung,exynos4210-ohci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ohci_match);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 2344040c16d2..f4988fbe78e7 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -306,10 +306,9 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
goto out8;
}
- hcd->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to devm_request_and_ioremap\n");
- ret = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto out8;
}
hcd->rsrc_start = res->start;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 084503b03fcf..c3e7287f7921 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -13,6 +13,7 @@
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/usb/ohci_pdriver.h>
@@ -127,9 +128,9 @@ static int ohci_platform_probe(struct platform_device *dev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
- hcd->regs = devm_request_and_ioremap(&dev->dev, res_mem);
- if (!hcd->regs) {
- err = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 7482cfbe8c5e..88731b7c5f42 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -44,6 +44,7 @@ __acquires(ohci->lock)
// ASSERT (urb->hcpriv != 0);
urb_free_priv (ohci, urb->hcpriv);
+ urb->hcpriv = NULL;
if (likely(status == -EINPROGRESS))
status = 0;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index ad0f55269603..e125770b893c 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -351,10 +351,9 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
hcd->rsrc_start = dev->resource[0].start;
hcd->rsrc_len = resource_size(&dev->resource[0]);
- hcd->regs = devm_request_and_ioremap(&dev->dev, &dev->resource[0]);
- if (!hcd->regs) {
- dev_err(&dev->dev, "devm_request_and_ioremap failed\n");
- retval = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&dev->dev, &dev->resource[0]);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err_put;
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index d370245a4ee2..5e3a6deb62b1 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -128,7 +128,8 @@ static void tmio_start_hc(struct platform_device *dev)
tmio_iowrite8(2, tmio->ccr + CCR_INTC);
dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
- tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+ tmio_ioread8(tmio->ccr + CCR_REVID),
+ (u64) hcd->rsrc_start, hcd->irq);
}
static int ohci_tmio_start(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a3b6d7104ae2..4c338ec03a07 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
"defaulting to EHCI.\n");
dev_warn(&xhci_pdev->dev,
"USB 3.0 devices will work at USB 2.0 speeds.\n");
+ usb_disable_xhci_ports(xhci_pdev);
return;
}
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index fc0b0daac93d..455737546525 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -16,6 +16,8 @@
#include "uhci-hcd.h"
+#define EXTRA_SPACE 1024
+
static struct dentry *uhci_debugfs_root;
#ifdef DEBUG
@@ -44,10 +46,6 @@ static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
char *spid;
u32 status, token;
- /* Try to make sure there's enough memory */
- if (len < 160)
- return 0;
-
status = td_status(uhci, td);
out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td,
hc32_to_cpu(uhci, td->link));
@@ -64,6 +62,8 @@ static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
(status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
(status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
status & 0x7ff);
+ if (out - buf > len)
+ goto done;
token = td_token(uhci, td);
switch (uhci_packetid(token)) {
@@ -90,6 +90,9 @@ static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
spid);
out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer));
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
return out - buf;
}
@@ -101,8 +104,6 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
int i, nactive, ninactive;
char *ptype;
- if (len < 200)
- return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
@@ -110,6 +111,8 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
+ if (out - buf > len)
+ goto done;
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: ptype = "ISO"; break;
@@ -128,6 +131,9 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
out += sprintf(out, "\n");
+ if (out - buf > len)
+ goto done;
+
i = nactive = ninactive = 0;
list_for_each_entry(td, &urbp->td_list, list) {
if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC &&
@@ -135,6 +141,8 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
out += sprintf(out, "%*s%d: ", space + 2, "", i);
out += uhci_show_td(uhci, td, out,
len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
} else {
if (td_status(uhci, td) & TD_CTRL_ACTIVE)
++nactive;
@@ -143,10 +151,13 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
}
}
if (nactive + ninactive > 0)
- out += sprintf(out, "%*s[skipped %d inactive and %d active "
- "TDs]\n",
+ out += sprintf(out,
+ "%*s[skipped %d inactive and %d active TDs]\n",
space, "", ninactive, nactive);
-
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
@@ -158,10 +169,6 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
__hc32 element = qh_element(qh);
char *qtype;
- /* Try to make sure there's enough memory */
- if (len < 80 * 7)
- return 0;
-
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break;
case USB_ENDPOINT_XFER_INT: qtype = "INT"; break;
@@ -175,13 +182,15 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
hc32_to_cpu(uhci, qh->link),
hc32_to_cpu(uhci, element));
if (qh->type == USB_ENDPOINT_XFER_ISOC)
- out += sprintf(out, "%*s period %d phase %d load %d us, "
- "frame %x desc [%p]\n",
+ out += sprintf(out,
+ "%*s period %d phase %d load %d us, frame %x desc [%p]\n",
space, "", qh->period, qh->phase, qh->load,
qh->iso_frame, qh->iso_packet_desc);
else if (qh->type == USB_ENDPOINT_XFER_INT)
out += sprintf(out, "%*s period %d phase %d load %d us\n",
space, "", qh->period, qh->phase, qh->load);
+ if (out - buf > len)
+ goto done;
if (element & UHCI_PTR_QH(uhci))
out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
@@ -195,11 +204,17 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci))))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
+ if (out - buf > len)
+ goto done;
+
if (list_empty(&qh->queue)) {
out += sprintf(out, "%*s queue is empty\n", space, "");
- if (qh == uhci->skel_async_qh)
+ if (qh == uhci->skel_async_qh) {
out += uhci_show_td(uhci, uhci->term_td, out,
len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
+ }
} else {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
@@ -211,9 +226,12 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
space, "");
i = nurbs = 0;
list_for_each_entry(urbp, &qh->queue, node) {
- if (++i <= 10)
+ if (++i <= 10) {
out += uhci_show_urbp(uhci, urbp, out,
len - (out - buf), space + 2);
+ if (out - buf > len)
+ goto tail;
+ }
else
++nurbs;
}
@@ -222,24 +240,27 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
space, "", nurbs);
}
+ if (out - buf > len)
+ goto done;
+
if (qh->dummy_td) {
out += sprintf(out, "%*s Dummy TD\n", space, "");
out += uhci_show_td(uhci, qh->dummy_td, out,
len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
}
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
-static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
+static int uhci_show_sc(int port, unsigned short status, char *buf)
{
- char *out = buf;
-
- /* Try to make sure there's enough memory */
- if (len < 160)
- return 0;
-
- out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
+ return sprintf(buf, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
port,
status,
(status & USBPORTSC_SUSP) ? " Suspend" : "",
@@ -252,19 +273,12 @@ static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
(status & USBPORTSC_PE) ? " Enabled" : "",
(status & USBPORTSC_CSC) ? " ConnectChange" : "",
(status & USBPORTSC_CCS) ? " Connected" : "");
-
- return out - buf;
}
-static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len)
+static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf)
{
- char *out = buf;
char *rh_state;
- /* Try to make sure there's enough memory */
- if (len < 60)
- return 0;
-
switch (uhci->rh_state) {
case UHCI_RH_RESET:
rh_state = "reset"; break;
@@ -283,9 +297,8 @@ static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len)
default:
rh_state = "?"; break;
}
- out += sprintf(out, "Root-hub state: %s FSBR: %d\n",
+ return sprintf(buf, "Root-hub state: %s FSBR: %d\n",
rh_state, uhci->fsbr_is_on);
- return out - buf;
}
static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
@@ -296,9 +309,6 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
unsigned char sof;
unsigned short portsc1, portsc2;
- /* Try to make sure there's enough memory */
- if (len < 80 * 9)
- return 0;
usbcmd = uhci_readw(uhci, 0);
usbstat = uhci_readw(uhci, 2);
@@ -319,6 +329,8 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
(usbcmd & USBCMD_GRESET) ? "GRESET " : "",
(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
(usbcmd & USBCMD_RS) ? "RS " : "");
+ if (out - buf > len)
+ goto done;
out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
usbstat,
@@ -328,19 +340,33 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
(usbstat & USBSTS_RD) ? "ResumeDetect " : "",
(usbstat & USBSTS_ERROR) ? "USBError " : "",
(usbstat & USBSTS_USBINT) ? "USBINT " : "");
+ if (out - buf > len)
+ goto done;
out += sprintf(out, " usbint = %04x\n", usbint);
out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
0xfff & (4*(unsigned int)usbfrnum));
out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
out += sprintf(out, " sof = %02x\n", sof);
- out += uhci_show_sc(1, portsc1, out, len - (out - buf));
- out += uhci_show_sc(2, portsc2, out, len - (out - buf));
- out += sprintf(out, "Most recent frame: %x (%d) "
- "Last ISO frame: %x (%d)\n",
+ if (out - buf > len)
+ goto done;
+
+ out += uhci_show_sc(1, portsc1, out);
+ if (out - buf > len)
+ goto done;
+
+ out += uhci_show_sc(2, portsc2, out);
+ if (out - buf > len)
+ goto done;
+
+ out += sprintf(out,
+ "Most recent frame: %x (%d) Last ISO frame: %x (%d)\n",
uhci->frame_number, uhci->frame_number & 1023,
uhci->last_iso_frame, uhci->last_iso_frame & 1023);
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
return out - buf;
}
@@ -360,9 +386,13 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
"int8", "int4", "int2", "async", "term"
};
- out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
+ out += uhci_show_root_hub_state(uhci, out);
+ if (out - buf > len)
+ goto done;
out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf));
+ if (out - buf > len)
+ goto tail;
out += sprintf(out, "Periodic load table\n");
for (i = 0; i < MAX_PHASE; ++i) {
@@ -375,7 +405,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
if (debug <= 1)
- return out - buf;
+ goto tail;
out += sprintf(out, "Frame List\n");
nframes = 10;
@@ -383,6 +413,8 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
for (i = 0; i < UHCI_NUMFRAMES; ++i) {
__hc32 qh_dma;
+ if (out - buf > len)
+ goto done;
j = 0;
td = uhci->frame_cpu[i];
link = uhci->frame[i];
@@ -401,15 +433,20 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
td = list_entry(tmp, struct uhci_td, fl_list);
tmp = tmp->next;
if (link != LINK_TO_TD(uhci, td)) {
- if (nframes > 0)
- out += sprintf(out, " link does "
- "not match list entry!\n");
- else
+ if (nframes > 0) {
+ out += sprintf(out,
+ " link does not match list entry!\n");
+ if (out - buf > len)
+ goto done;
+ } else
++nerrs;
}
- if (nframes > 0)
+ if (nframes > 0) {
out += uhci_show_td(uhci, td, out,
len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
+ }
link = td->link;
} while (tmp != head);
@@ -423,9 +460,11 @@ check_link:
i, hc32_to_cpu(uhci, link));
j = 1;
}
- out += sprintf(out, " link does not match "
- "QH (%08x)!\n",
+ out += sprintf(out,
+ " link does not match QH (%08x)!\n",
hc32_to_cpu(uhci, qh_dma));
+ if (out - buf > len)
+ goto done;
} else
++nerrs;
}
@@ -436,18 +475,27 @@ check_link:
out += sprintf(out, "Skeleton QHs\n");
+ if (out - buf > len)
+ goto done;
+
fsbr_link = 0;
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int cnt = 0;
qh = uhci->skelqh[i];
- out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \
+ out += sprintf(out, "- skel_%s_qh\n", qh_names[i]);
out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
/* Last QH is the Terminating QH, it's different */
if (i == SKEL_TERM) {
- if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td))
- out += sprintf(out, " skel_term_qh element is not set to term_td!\n");
+ if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) {
+ out += sprintf(out,
+ " skel_term_qh element is not set to term_td!\n");
+ if (out - buf > len)
+ goto done;
+ }
link = fsbr_link;
if (!link)
link = LINK_TO_QH(uhci, uhci->skel_term_qh);
@@ -460,9 +508,12 @@ check_link:
while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, node);
tmp = tmp->next;
- if (++cnt <= 10)
+ if (++cnt <= 10) {
out += uhci_show_qh(uhci, qh, out,
len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
+ }
if (!fsbr_link && qh->skel >= SKEL_FSBR)
fsbr_link = LINK_TO_QH(uhci, qh);
}
@@ -480,9 +531,17 @@ check_link:
link = LINK_TO_QH(uhci, uhci->skel_term_qh);
check_qh_link:
if (qh->link != link)
- out += sprintf(out, " last QH not linked to next skeleton!\n");
+ out += sprintf(out,
+ " last QH not linked to next skeleton!\n");
+
+ if (out - buf > len)
+ goto done;
}
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
@@ -514,7 +573,8 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
up->size = 0;
spin_lock_irqsave(&uhci->lock, flags);
if (uhci->is_initialized)
- up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
+ up->size = uhci_sprint_schedule(uhci, up->data,
+ MAX_OUTPUT - EXTRA_SPACE);
spin_unlock_irqrestore(&uhci->lock, flags);
file->private_data = up;
@@ -529,7 +589,9 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
up = file->private_data;
- /* XXX: atomic 64bit seek access, but that needs to be fixed in the VFS */
+ /*
+ * XXX: atomic 64bit seek access, but that needs to be fixed in the VFS
+ */
switch (whence) {
case 0:
new = off;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 4b9e9aba2665..4a86b63745b8 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -447,23 +447,25 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
return IRQ_NONE;
uhci_writew(uhci, status, USBSTS); /* Clear it */
+ spin_lock(&uhci->lock);
+ if (unlikely(!uhci->is_initialized)) /* not yet configured */
+ goto done;
+
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
if (status & USBSTS_HSE)
- dev_err(uhci_dev(uhci), "host system error, "
- "PCI problems?\n");
+ dev_err(uhci_dev(uhci),
+ "host system error, PCI problems?\n");
if (status & USBSTS_HCPE)
- dev_err(uhci_dev(uhci), "host controller process "
- "error, something bad happened!\n");
+ dev_err(uhci_dev(uhci),
+ "host controller process error, something bad happened!\n");
if (status & USBSTS_HCH) {
- spin_lock(&uhci->lock);
if (uhci->rh_state >= UHCI_RH_RUNNING) {
dev_err(uhci_dev(uhci),
- "host controller halted, "
- "very bad!\n");
+ "host controller halted, very bad!\n");
if (debug > 1 && errbuf) {
/* Print the schedule for debugging */
- uhci_sprint_schedule(uhci,
- errbuf, ERRBUF_LEN);
+ uhci_sprint_schedule(uhci, errbuf,
+ ERRBUF_LEN - EXTRA_SPACE);
lprintk(errbuf);
}
uhci_hc_died(uhci);
@@ -473,15 +475,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
* pending unlinks */
mod_timer(&hcd->rh_timer, jiffies);
}
- spin_unlock(&uhci->lock);
}
}
- if (status & USBSTS_RD)
+ if (status & USBSTS_RD) {
+ spin_unlock(&uhci->lock);
usb_hcd_poll_rh_status(hcd);
- else {
- spin_lock(&uhci->lock);
+ } else {
uhci_scan_schedule(uhci);
+ done:
spin_unlock(&uhci->lock);
}
@@ -589,8 +591,8 @@ static int uhci_start(struct usb_hcd *hcd)
UHCI_NUMFRAMES * sizeof(*uhci->frame),
&uhci->frame_dma_handle, 0);
if (!uhci->frame) {
- dev_err(uhci_dev(uhci), "unable to allocate "
- "consistent memory for frame list\n");
+ dev_err(uhci_dev(uhci),
+ "unable to allocate consistent memory for frame list\n");
goto err_alloc_frame;
}
memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame));
@@ -598,8 +600,8 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
GFP_KERNEL);
if (!uhci->frame_cpu) {
- dev_err(uhci_dev(uhci), "unable to allocate "
- "memory for frame pointers\n");
+ dev_err(uhci_dev(uhci),
+ "unable to allocate memory for frame pointers\n");
goto err_alloc_frame_cpu;
}
@@ -662,9 +664,9 @@ static int uhci_start(struct usb_hcd *hcd)
*/
mb();
+ spin_lock_irq(&uhci->lock);
configure_hc(uhci);
uhci->is_initialized = 1;
- spin_lock_irq(&uhci->lock);
start_rh(uhci);
spin_unlock_irq(&uhci->lock);
return 0;
@@ -734,8 +736,8 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
*/
else if (hcd->self.root_hub->do_remote_wakeup &&
uhci->resuming_ports) {
- dev_dbg(uhci_dev(uhci), "suspend failed because a port "
- "is resuming\n");
+ dev_dbg(uhci_dev(uhci),
+ "suspend failed because a port is resuming\n");
rc = -EBUSY;
} else
suspend_rh(uhci, UHCI_RH_SUSPENDED);
@@ -826,8 +828,8 @@ static int uhci_count_ports(struct usb_hcd *hcd)
/* Anything greater than 7 is weird so we'll ignore it. */
if (port > UHCI_RH_MAXCHILD) {
- dev_info(uhci_dev(uhci), "port count misdetected? "
- "forcing to 2 ports\n");
+ dev_info(uhci_dev(uhci),
+ "port count misdetected? forcing to 2 ports\n");
port = 2;
}
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 7af2b7052047..6f986d82472d 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -212,10 +212,6 @@ struct uhci_qh {
#define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */
#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
-#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
- TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \
- TD_CTRL_BITSTUFF)
-
#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 768d54295a20..f87bee6d2789 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -21,8 +21,8 @@ static const __u8 root_hub_hub_des[] =
0x00, /* (per-port OC, no power switching) */
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
- 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
- 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
+ 0x00, /* __u8 DeviceRemovable; *** 7 Ports max */
+ 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max */
};
#define UHCI_RH_MAXCHILD 7
@@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
}
}
clear_bit(port, &uhci->resuming_ports);
+ usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
}
/* Wait for the UHCI controller in HP's iLO2 server management chip.
@@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
msecs_to_jiffies(25);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
/* Make sure we see the port again
* after the resuming period is over. */
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 15921fd55048..f0976d8190bc 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1200,7 +1200,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
if (debug > 1 && errbuf) {
/* Print the chain for debugging */
uhci_show_qh(uhci, urbp->qh, errbuf,
- ERRBUF_LEN, 0);
+ ERRBUF_LEN - EXTRA_SPACE, 0);
lprintk(errbuf);
}
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a686cf4905bb..68914429482f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_LINK_STATE:
temp = xhci_readl(xhci, port_array[wIndex]);
+
+ /* Disable port */
+ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+ xhci_dbg(xhci, "Disable port %d\n", wIndex);
+ temp = xhci_port_state_to_neutral(temp);
+ /*
+ * Clear all change bits, so that we get a new
+ * connection event.
+ */
+ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+ PORT_OCC | PORT_RC | PORT_PLC |
+ PORT_CEC;
+ xhci_writel(xhci, temp | PORT_PE,
+ port_array[wIndex]);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ break;
+ }
+
+ /* Put link in RxDetect (enable port) */
+ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+ xhci_dbg(xhci, "Enable port %d\n", wIndex);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ break;
+ }
+
/* Software should not attempt to set
- * port link state above '5' (Rx.Detect) and the port
+ * port link state above '3' (U3) and the port
* must be enabled.
*/
if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_RX_DETECT)) {
+ (link_state > USB_SS_PORT_LS_U3)) {
xhci_warn(xhci, "Cannot set link state.\n");
goto error;
}
@@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int max_ports;
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
+ bool reset_change = false;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
+ if ((temp & PORT_RC))
+ reset_change = true;
+ }
+ if (!status && !reset_change) {
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return status ? retval : 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fb51c7085ad0..35616ffbe3ae 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
+ if (ep->desc.bInterval == 0)
+ return 0;
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval, 0, 15);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cbb44b7b9d65..882875465301 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
faked_port_index + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
- if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
+ if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
bus_state->port_remote_wakeup &=
~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
@@ -1725,6 +1725,15 @@ cleanup:
if (bogus_port_status)
return;
+ /*
+ * xHCI port-status-change events occur when the "or" of all the
+ * status-change bits in the portsc register changes from 0 to 1.
+ * New status changes won't cause an event if any other change
+ * bits are still set. When an event occurs, switch over to
+ * polling to avoid losing status changes.
+ */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
@@ -2580,6 +2589,8 @@ cleanup:
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
+ else
+ kfree(urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
@@ -2697,13 +2708,11 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status;
- union xhci_trb *trb;
u64 temp_64;
union xhci_trb *event_ring_deq;
dma_addr_t deq;
spin_lock(&xhci->lock);
- trb = xhci->event_ring->dequeue;
/* Check if the xHC generated the interrupt, or the irq is shared */
status = xhci_readl(xhci, &xhci->op_regs->status);
if (status == 0xffffffff)
@@ -3099,7 +3108,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
- usb_endpoint_maxp(&urb->ep->desc);
+ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
if ((total_packet_count - packets_transferred) > 31)
return 31 << 17;
@@ -3633,7 +3642,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
total_packet_count = DIV_ROUND_UP(td_len,
- usb_endpoint_maxp(&urb->ep->desc));
+ GET_MAX_PACKET(
+ usb_endpoint_maxp(&urb->ep->desc)));
/* A zero-length transfer still involves at least one packet. */
if (total_packet_count == 0)
total_packet_count++;
@@ -3655,9 +3665,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
+ field = 0;
if (first_trb) {
+ field = TRB_TBC(burst_count) |
+ TRB_TLBPC(residue);
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5c72c431bab1..f1f01a834ba7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -884,6 +884,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
+ /* Don't poll the roothubs on bus suspend. */
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
@@ -1069,6 +1074,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
compliance_mode_recovery_timer_init(xhci);
+ /* Re-enable port polling. */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
+
return retval;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index fecde69bfa7d..3b1a3f4ec5e9 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -250,3 +250,9 @@ config USB_EZUSB_FX2
help
Say Y here if you need EZUSB device support.
(Cypress FX/FX2/FX2LP microcontrollers)
+
+config USB_HSIC_USB3503
+ tristate "USB3503 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 3e99a643294b..3e1bd70b06ea 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
obj-$(CONFIG_USB_YUREX) += yurex.o
+obj-$(CONFIG_USB_HSIC_USB3503) += usb3503.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
new file mode 100644
index 000000000000..f713f6aeb6e5
--- /dev/null
+++ b/drivers/usb/misc/usb3503.c
@@ -0,0 +1,325 @@
+/*
+ * Driver for SMSC USB3503 USB 2.0 hub controller driver
+ *
+ * Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/usb3503.h>
+
+#define USB3503_VIDL 0x00
+#define USB3503_VIDM 0x01
+#define USB3503_PIDL 0x02
+#define USB3503_PIDM 0x03
+#define USB3503_DIDL 0x04
+#define USB3503_DIDM 0x05
+
+#define USB3503_CFG1 0x06
+#define USB3503_SELF_BUS_PWR (1 << 7)
+
+#define USB3503_CFG2 0x07
+#define USB3503_CFG3 0x08
+#define USB3503_NRD 0x09
+
+#define USB3503_PDS 0x0a
+#define USB3503_PORT1 (1 << 1)
+#define USB3503_PORT2 (1 << 2)
+#define USB3503_PORT3 (1 << 3)
+
+#define USB3503_SP_ILOCK 0xe7
+#define USB3503_SPILOCK_CONNECT (1 << 1)
+#define USB3503_SPILOCK_CONFIG (1 << 0)
+
+#define USB3503_CFGP 0xee
+#define USB3503_CLKSUSP (1 << 7)
+
+struct usb3503 {
+ enum usb3503_mode mode;
+ struct i2c_client *client;
+ int gpio_intn;
+ int gpio_reset;
+ int gpio_connect;
+};
+
+static int usb3503_write_register(struct i2c_client *client,
+ char reg, char data)
+{
+ return i2c_smbus_write_byte_data(client, reg, data);
+}
+
+static int usb3503_read_register(struct i2c_client *client, char reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int usb3503_set_bits(struct i2c_client *client, char reg, char req)
+{
+ int err;
+
+ err = usb3503_read_register(client, reg);
+ if (err < 0)
+ return err;
+
+ err = usb3503_write_register(client, reg, err | req);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int usb3503_clear_bits(struct i2c_client *client, char reg, char req)
+{
+ int err;
+
+ err = usb3503_read_register(client, reg);
+ if (err < 0)
+ return err;
+
+ err = usb3503_write_register(client, reg, err & ~req);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int usb3503_reset(int gpio_reset, int state)
+{
+ if (gpio_is_valid(gpio_reset))
+ gpio_set_value(gpio_reset, state);
+
+ /* Wait RefClk when RESET_N is released, otherwise Hub will
+ * not transition to Hub Communication Stage.
+ */
+ if (state)
+ msleep(100);
+
+ return 0;
+}
+
+static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
+{
+ struct i2c_client *i2c = hub->client;
+ int err = 0;
+
+ switch (mode) {
+ case USB3503_MODE_HUB:
+ usb3503_reset(hub->gpio_reset, 1);
+
+ /* SP_ILOCK: set connect_n, config_n for config */
+ err = usb3503_write_register(i2c, USB3503_SP_ILOCK,
+ (USB3503_SPILOCK_CONNECT
+ | USB3503_SPILOCK_CONFIG));
+ if (err < 0) {
+ dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
+ goto err_hubmode;
+ }
+
+ /* PDS : Port2,3 Disable For Self Powered Operation */
+ err = usb3503_set_bits(i2c, USB3503_PDS,
+ (USB3503_PORT2 | USB3503_PORT3));
+ if (err < 0) {
+ dev_err(&i2c->dev, "PDS failed (%d)\n", err);
+ goto err_hubmode;
+ }
+
+ /* CFG1 : SELF_BUS_PWR -> Self-Powerd operation */
+ err = usb3503_set_bits(i2c, USB3503_CFG1, USB3503_SELF_BUS_PWR);
+ if (err < 0) {
+ dev_err(&i2c->dev, "CFG1 failed (%d)\n", err);
+ goto err_hubmode;
+ }
+
+ /* SP_LOCK: clear connect_n, config_n for hub connect */
+ err = usb3503_clear_bits(i2c, USB3503_SP_ILOCK,
+ (USB3503_SPILOCK_CONNECT
+ | USB3503_SPILOCK_CONFIG));
+ if (err < 0) {
+ dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
+ goto err_hubmode;
+ }
+
+ hub->mode = mode;
+ dev_info(&i2c->dev, "switched to HUB mode\n");
+ break;
+
+ case USB3503_MODE_STANDBY:
+ usb3503_reset(hub->gpio_reset, 0);
+
+ hub->mode = mode;
+ dev_info(&i2c->dev, "switched to STANDBY mode\n");
+ break;
+
+ default:
+ dev_err(&i2c->dev, "unknown mode is request\n");
+ err = -EINVAL;
+ break;
+ }
+
+err_hubmode:
+ return err;
+}
+
+static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct usb3503_platform_data *pdata = i2c->dev.platform_data;
+ struct device_node *np = i2c->dev.of_node;
+ struct usb3503 *hub;
+ int err = -ENOMEM;
+ u32 mode = USB3503_MODE_UNKNOWN;
+
+ hub = kzalloc(sizeof(struct usb3503), GFP_KERNEL);
+ if (!hub) {
+ dev_err(&i2c->dev, "private data alloc fail\n");
+ return err;
+ }
+
+ i2c_set_clientdata(i2c, hub);
+ hub->client = i2c;
+
+ if (pdata) {
+ hub->gpio_intn = pdata->gpio_intn;
+ hub->gpio_connect = pdata->gpio_connect;
+ hub->gpio_reset = pdata->gpio_reset;
+ hub->mode = pdata->initial_mode;
+ } else if (np) {
+ hub->gpio_intn = of_get_named_gpio(np, "connect-gpios", 0);
+ if (hub->gpio_intn == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ hub->gpio_connect = of_get_named_gpio(np, "intn-gpios", 0);
+ if (hub->gpio_connect == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
+ if (hub->gpio_reset == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ of_property_read_u32(np, "initial-mode", &mode);
+ hub->mode = mode;
+ }
+
+ if (gpio_is_valid(hub->gpio_intn)) {
+ err = gpio_request_one(hub->gpio_intn,
+ GPIOF_OUT_INIT_HIGH, "usb3503 intn");
+ if (err) {
+ dev_err(&i2c->dev,
+ "unable to request GPIO %d as connect pin (%d)\n",
+ hub->gpio_intn, err);
+ goto err_out;
+ }
+ }
+
+ if (gpio_is_valid(hub->gpio_connect)) {
+ err = gpio_request_one(hub->gpio_connect,
+ GPIOF_OUT_INIT_HIGH, "usb3503 connect");
+ if (err) {
+ dev_err(&i2c->dev,
+ "unable to request GPIO %d as connect pin (%d)\n",
+ hub->gpio_connect, err);
+ goto err_gpio_connect;
+ }
+ }
+
+ if (gpio_is_valid(hub->gpio_reset)) {
+ err = gpio_request_one(hub->gpio_reset,
+ GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ if (err) {
+ dev_err(&i2c->dev,
+ "unable to request GPIO %d as reset pin (%d)\n",
+ hub->gpio_reset, err);
+ goto err_gpio_reset;
+ }
+ }
+
+ usb3503_switch_mode(hub, hub->mode);
+
+ dev_info(&i2c->dev, "%s: probed on %s mode\n", __func__,
+ (hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
+
+ return 0;
+
+err_gpio_reset:
+ if (gpio_is_valid(hub->gpio_connect))
+ gpio_free(hub->gpio_connect);
+err_gpio_connect:
+ if (gpio_is_valid(hub->gpio_intn))
+ gpio_free(hub->gpio_intn);
+err_out:
+ kfree(hub);
+
+ return err;
+}
+
+static int usb3503_remove(struct i2c_client *i2c)
+{
+ struct usb3503 *hub = i2c_get_clientdata(i2c);
+
+ if (gpio_is_valid(hub->gpio_intn))
+ gpio_free(hub->gpio_intn);
+ if (gpio_is_valid(hub->gpio_connect))
+ gpio_free(hub->gpio_connect);
+ if (gpio_is_valid(hub->gpio_reset))
+ gpio_free(hub->gpio_reset);
+
+ kfree(hub);
+
+ return 0;
+}
+
+static const struct i2c_device_id usb3503_id[] = {
+ { USB3503_I2C_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, usb3503_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id usb3503_of_match[] = {
+ { .compatible = "smsc,usb3503", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usb3503_of_match);
+#endif
+
+static struct i2c_driver usb3503_driver = {
+ .driver = {
+ .name = USB3503_I2C_NAME,
+ .of_match_table = of_match_ptr(usb3503_of_match),
+ },
+ .probe = usb3503_probe,
+ .remove = usb3503_remove,
+ .id_table = usb3503_id,
+};
+
+static int __init usb3503_init(void)
+{
+ return i2c_add_driver(&usb3503_driver);
+}
+
+static void __exit usb3503_exit(void)
+{
+ i2c_del_driver(&usb3503_driver);
+}
+
+module_init(usb3503_init);
+module_exit(usb3503_exit);
+
+MODULE_AUTHOR("Dongjin Kim <tobetter@gmail.com>");
+MODULE_DESCRIPTION("USB3503 USB HUB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 7667b12f2ff5..8b4ca1cb450a 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -13,6 +13,12 @@
/*-------------------------------------------------------------------------*/
+static int override_alt = -1;
+module_param_named(alt, override_alt, int, 0644);
+MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
+
+/*-------------------------------------------------------------------------*/
+
/* FIXME make these public somewhere; usbdevfs.h? */
struct usbtest_param {
/* inputs */
@@ -103,6 +109,10 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
iso_in = iso_out = NULL;
alt = intf->altsetting + tmp;
+ if (override_alt >= 0 &&
+ override_alt != alt->desc.bAlternateSetting)
+ continue;
+
/* take the first altsetting with in-bulk + out-bulk;
* ignore other endpoints and altsettings.
*/
@@ -144,6 +154,7 @@ try_iso:
found:
udev = testdev_to_usbdev(dev);
+ dev->info->alt = alt->desc.bAlternateSetting;
if (alt->desc.bAlternateSetting != 0) {
tmp = usb_set_interface(udev,
alt->desc.bInterfaceNumber,
@@ -2179,7 +2190,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
break;
retval = 0;
- dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
+ dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
"%d %d-byte writes\n",
param->iterations, param->sglen, param->length);
for (i = param->iterations; retval == 0 && i > 0; --i) {
@@ -2280,7 +2291,7 @@ usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
wtest = " intr-out";
}
} else {
- if (info->autoconf) {
+ if (override_alt >= 0 || info->autoconf) {
int status;
status = get_endpoints(dev, intf);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 23a0b7f0892d..45b19e2c60ba 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX)
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
+ select OMAP_CONTROL_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
help
Say Y here if your system has a dual role high speed USB
@@ -45,6 +46,7 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
+ depends on GENERIC_HARDIRQS
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index c107d7cdfa69..59eea219034a 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -365,7 +365,7 @@ static int am35x_musb_init(struct musb *musb)
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv))
- return -ENODEV;
+ return -EPROBE_DEFER;
setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 14dab9f9b3d0..dbb31b30c7fa 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -406,7 +406,7 @@ static int bfin_musb_init(struct musb *musb)
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
gpio_free(musb->config->gpio_vrsel);
- return -ENODEV;
+ return -EPROBE_DEFER;
}
bfin_musb_reg_init(musb);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 0968dd7a859d..f522000e8f06 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -105,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
musb_writel(&tx->tx_complete, 0, ptr);
}
-static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
int j;
@@ -150,7 +150,7 @@ static void cppi_pool_free(struct cppi_channel *c)
c->last_processed = NULL;
}
-static int __init cppi_controller_start(struct dma_controller *c)
+static int cppi_controller_start(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 97996af2646e..7c71769d71ff 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -410,6 +410,7 @@ static int da8xx_musb_init(struct musb *musb)
{
void __iomem *reg_base = musb->ctrl_base;
u32 rev;
+ int ret = -ENODEV;
musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
@@ -420,8 +421,10 @@ static int da8xx_musb_init(struct musb *musb)
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR_OR_NULL(musb->xceiv))
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
goto fail;
+ }
setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
@@ -441,7 +444,7 @@ static int da8xx_musb_init(struct musb *musb)
musb->isr = da8xx_musb_interrupt;
return 0;
fail:
- return -ENODEV;
+ return ret;
}
static int da8xx_musb_exit(struct musb *musb)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index b1c01cad28b2..e040d9103735 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -380,11 +380,14 @@ static int davinci_musb_init(struct musb *musb)
{
void __iomem *tibase = musb->ctrl_base;
u32 revision;
+ int ret = -ENODEV;
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR_OR_NULL(musb->xceiv))
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
goto unregister;
+ }
musb->mregs += DAVINCI_BASE_OFFSET;
@@ -438,7 +441,7 @@ fail:
usb_put_phy(musb->xceiv);
unregister:
usb_nop_xceiv_unregister();
- return -ENODEV;
+ return ret;
}
static int davinci_musb_exit(struct musb *musb)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 57cc9c6eaa9f..60b41cc28da4 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -251,7 +251,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
/* best case is 32bit-aligned source address */
if ((0x02 & (unsigned long) src) == 0) {
if (len >= 4) {
- writesl(fifo, src + index, len >> 2);
+ iowrite32_rep(fifo, src + index, len >> 2);
index += len & ~0x03;
}
if (len & 0x02) {
@@ -260,7 +260,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
}
} else {
if (len >= 2) {
- writesw(fifo, src + index, len >> 1);
+ iowrite16_rep(fifo, src + index, len >> 1);
index += len & ~0x01;
}
}
@@ -268,7 +268,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
musb_writeb(fifo, 0, src[index]);
} else {
/* byte aligned */
- writesb(fifo, src, len);
+ iowrite8_rep(fifo, src, len);
}
}
@@ -294,7 +294,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/* best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) dst) == 0) {
if (len >= 4) {
- readsl(fifo, dst, len >> 2);
+ ioread32_rep(fifo, dst, len >> 2);
index = len & ~0x03;
}
if (len & 0x02) {
@@ -303,7 +303,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
}
} else {
if (len >= 2) {
- readsw(fifo, dst, len >> 1);
+ ioread16_rep(fifo, dst, len >> 1);
index = len & ~0x01;
}
}
@@ -311,7 +311,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
dst[index] = musb_readb(fifo, 0);
} else {
/* byte aligned */
- readsb(fifo, dst, len);
+ ioread8_rep(fifo, dst, len);
}
}
#endif
@@ -1993,6 +1993,7 @@ fail2:
musb_platform_exit(musb);
fail1:
+ pm_runtime_disable(musb->controller);
dev_err(musb->controller,
"musb_init_controller failed with status %d\n", status);
@@ -2298,10 +2299,7 @@ static int __init musb_init(void)
if (usb_disabled())
return 0;
- pr_info("%s: version " MUSB_VERSION ", "
- "?dma?"
- ", "
- "otg (peripheral+host)",
+ pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
musb_driver_name);
return platform_driver_register(&musb_driver);
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index e6f2ae8368bb..6bb89715b637 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -31,7 +31,6 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -134,6 +133,11 @@ static const resource_size_t dsps_control_module_phys[] = {
DSPS_AM33XX_CONTROL_MODULE_PHYS_1,
};
+#define USBPHY_CM_PWRDN (1 << 0)
+#define USBPHY_OTG_PWRDN (1 << 1)
+#define USBPHY_OTGVDET_EN (1 << 19)
+#define USBPHY_OTGSESSEND_EN (1 << 20)
+
/**
* musb_dsps_phy_control - phy on/off
* @glue: struct dsps_glue *
@@ -414,7 +418,7 @@ static int dsps_musb_init(struct musb *musb)
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv))
- return -ENODEV;
+ return -EPROBE_DEFER;
/* Returns zero if e.g. not clocked */
rev = dsps_readl(reg_base, wrp->revision);
@@ -495,10 +499,9 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
resources[0].end = resources[0].start + SZ_4 - 1;
resources[0].flags = IORESOURCE_MEM;
- glue->usb_ctrl[id] = devm_request_and_ioremap(&pdev->dev, resources);
- if (glue->usb_ctrl[id] == NULL) {
- dev_err(dev, "Failed to obtain usb_ctrl%d memory\n", id);
- ret = -ENODEV;
+ glue->usb_ctrl[id] = devm_ioremap_resource(&pdev->dev, resources);
+ if (IS_ERR(glue->usb_ctrl[id])) {
+ ret = PTR_ERR(glue->usb_ctrl[id]);
goto err0;
}
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 876787438c2f..be18537c5f14 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -408,7 +408,19 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
- if (!musb_ep->hb_mult)
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 0 Yes(Normal)
+ * 0 >0 No(High BW ISO)
+ * 1 0 Yes(HS bulk)
+ * 1 >0 Yes(FS bulk)
+ */
+ if (!musb_ep->hb_mult ||
+ (musb_ep->hb_mult &&
+ can_bulk_split(musb,
+ musb_ep->type)))
csr |= MUSB_TXCSR_AUTOSET;
}
csr &= ~MUSB_TXCSR_P_UNDERRUN;
@@ -1110,11 +1122,15 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
- if (musb->double_buffer_not_ok)
+ if (musb->double_buffer_not_ok) {
musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
- else
+ } else {
+ if (can_bulk_split(musb, musb_ep->type))
+ musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
+ musb_ep->packet_sz) - 1;
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
+ }
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e9f0fd9ddd2d..1ce1fcf3f3e7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -634,7 +634,17 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
- if (qh->hb_mult == 1)
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
@@ -746,7 +756,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* general endpoint setup */
if (epnum) {
/* flush all old state, set default */
- musb_h_tx_flush_fifo(hw_ep);
+ /*
+ * We could be flushing valid
+ * packets in double buffering
+ * case
+ */
+ if (!hw_ep->tx_double_buffered)
+ musb_h_tx_flush_fifo(hw_ep);
/*
* We must not clear the DMAMODE bit before or in
@@ -763,11 +779,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (usb_gettoggle(urb->dev, qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
+ if (!hw_ep->tx_double_buffered) {
+ if (usb_gettoggle(urb->dev, qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+ }
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
@@ -791,17 +809,19 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
- if (musb->double_buffer_not_ok)
+ if (musb->double_buffer_not_ok) {
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
- else if (can_bulk_split(musb, qh->type))
+ } else if (can_bulk_split(musb, qh->type)) {
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
musb_writew(epio, MUSB_TXMAXP, packet_sz
- | ((hw_ep->max_packet_sz_tx /
- packet_sz) - 1) << 11);
- else
+ | ((qh->hb_mult) - 1) << 11);
+ } else {
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
((qh->hb_mult - 1) << 11));
+ }
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 565ad1617832..eebeed78edd6 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -37,27 +37,6 @@
#include <linux/io.h>
-#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
- && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
- && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
- && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K) \
- && !defined(CONFIG_XTENSA)
-static inline void readsl(const void __iomem *addr, void *buf, int len)
- { insl((unsigned long)addr, buf, len); }
-static inline void readsw(const void __iomem *addr, void *buf, int len)
- { insw((unsigned long)addr, buf, len); }
-static inline void readsb(const void __iomem *addr, void *buf, int len)
- { insb((unsigned long)addr, buf, len); }
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
- { outsl((unsigned long)addr, buf, len); }
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
- { outsw((unsigned long)addr, buf, len); }
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
- { outsb((unsigned long)addr, buf, len); }
-
-#endif
-
#ifndef CONFIG_BLACKFIN
/* NOTE: these offsets are all in bytes */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index da00af460794..1762354fe793 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -37,6 +37,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/usb/musb-omap.h>
+#include <linux/usb/omap_control_usb.h>
#include "musb_core.h"
#include "omap2430.h"
@@ -46,7 +47,7 @@ struct omap2430_glue {
struct platform_device *musb;
enum omap_musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
- u32 __iomem *control_otghs;
+ struct device *control_otghs;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
@@ -54,26 +55,6 @@ struct omap2430_glue *_glue;
static struct timer_list musb_idle_timer;
-/**
- * omap4_usb_phy_mailbox - write to usb otg mailbox
- * @glue: struct omap2430_glue *
- * @val: the value to be written to the mailbox
- *
- * On detection of a device (ID pin is grounded), this API should be called
- * to set AVALID, VBUSVALID and ID pin is grounded.
- *
- * When OMAP is connected to a host (OMAP in device mode), this API
- * is called to set AVALID, VBUSVALID and ID pin in high impedance.
- *
- * XXX: This function will be removed once we have a seperate driver for
- * control module
- */
-static void omap4_usb_phy_mailbox(struct omap2430_glue *glue, u32 val)
-{
- if (glue->control_otghs)
- writel(val, glue->control_otghs);
-}
-
static void musb_do_idle(unsigned long _musb)
{
struct musb *musb = (void *)_musb;
@@ -255,11 +236,11 @@ static inline void omap2430_low_level_init(struct musb *musb)
void omap_musb_mailbox(enum omap_musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
- struct musb *musb = glue_to_musb(glue);
- glue->status = status;
- if (!musb) {
- dev_err(glue->dev, "musb core is not yet ready\n");
+ if (glue && glue_to_musb(glue)) {
+ glue->status = status;
+ } else {
+ pr_err("%s: musb core is not yet ready\n", __func__);
return;
}
@@ -269,7 +250,6 @@ EXPORT_SYMBOL_GPL(omap_musb_mailbox);
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
- u32 val;
struct musb *musb = glue_to_musb(glue);
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *pdata = dev->platform_data;
@@ -285,8 +265,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
pm_runtime_get_sync(dev);
- val = AVALID | VBUSVALID;
- omap4_usb_phy_mailbox(glue, val);
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
omap2430_musb_set_vbus(musb, 1);
}
break;
@@ -299,8 +279,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
musb->xceiv->last_event = USB_EVENT_VBUS;
if (musb->gadget_driver)
pm_runtime_get_sync(dev);
- val = IDDIG | AVALID | VBUSVALID;
- omap4_usb_phy_mailbox(glue, val);
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
case OMAP_MUSB_ID_FLOAT:
@@ -317,8 +296,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
if (musb->xceiv->otg->set_vbus)
otg_set_vbus(musb->xceiv->otg, 0);
}
- val = SESSEND | IDDIG;
- omap4_usb_phy_mailbox(glue, val);
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
break;
default:
dev_dbg(dev, "ID float\n");
@@ -366,10 +345,15 @@ static int omap2430_musb_init(struct musb *musb)
* up through ULPI. TWL4030-family PMICs include one,
* which needs a driver, drivers aren't always needed.
*/
- musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (dev->parent->of_node)
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent,
+ "usb-phy", 0);
+ else
+ musb->xceiv = devm_usb_get_phy_dev(dev, 0);
+
if (IS_ERR_OR_NULL(musb->xceiv)) {
pr_err("HS USB OTG: no transceiver configured\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
musb->isr = omap2430_musb_interrupt;
@@ -415,7 +399,6 @@ err1:
static void omap2430_musb_enable(struct musb *musb)
{
u8 devctl;
- u32 val;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
@@ -425,8 +408,7 @@ static void omap2430_musb_enable(struct musb *musb)
switch (glue->status) {
case OMAP_MUSB_ID_GROUND:
- val = AVALID | VBUSVALID;
- omap4_usb_phy_mailbox(glue, val);
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
if (data->interface_type != MUSB_INTERFACE_UTMI)
break;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
@@ -445,8 +427,7 @@ static void omap2430_musb_enable(struct musb *musb)
break;
case OMAP_MUSB_VBUS_VALID:
- val = IDDIG | AVALID | VBUSVALID;
- omap4_usb_phy_mailbox(glue, val);
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
default:
@@ -456,14 +437,12 @@ static void omap2430_musb_enable(struct musb *musb)
static void omap2430_musb_disable(struct musb *musb)
{
- u32 val;
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (glue->status != OMAP_MUSB_UNKNOWN) {
- val = SESSEND | IDDIG;
- omap4_usb_phy_mailbox(glue, val);
- }
+ if (glue->status != OMAP_MUSB_UNKNOWN)
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
}
static int omap2430_musb_exit(struct musb *musb)
@@ -498,7 +477,6 @@ static int omap2430_probe(struct platform_device *pdev)
struct omap2430_glue *glue;
struct device_node *np = pdev->dev.of_node;
struct musb_hdrc_config *config;
- struct resource *res;
int ret = -ENOMEM;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
@@ -521,31 +499,23 @@ static int omap2430_probe(struct platform_device *pdev)
glue->musb = musb;
glue->status = OMAP_MUSB_UNKNOWN;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- glue->control_otghs = devm_request_and_ioremap(&pdev->dev, res);
- if (glue->control_otghs == NULL)
- dev_dbg(&pdev->dev, "Failed to obtain control memory\n");
-
if (np) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev,
"failed to allocate musb platfrom data\n");
- ret = -ENOMEM;
goto err2;
}
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&pdev->dev,
- "failed to allocate musb board data\n");
- ret = -ENOMEM;
+ "failed to allocate musb board data\n");
goto err2;
}
config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!data) {
+ if (!config) {
dev_err(&pdev->dev,
"failed to allocate musb hdrc config\n");
goto err2;
@@ -558,11 +528,22 @@ static int omap2430_probe(struct platform_device *pdev)
of_property_read_u32(np, "ram_bits", (u32 *)&config->ram_bits);
of_property_read_u32(np, "power", (u32 *)&pdata->power);
config->multipoint = of_property_read_bool(np, "multipoint");
+ pdata->has_mailbox = of_property_read_bool(np,
+ "ti,has-mailbox");
pdata->board_data = data;
pdata->config = config;
}
+ if (pdata->has_mailbox) {
+ glue->control_otghs = omap_get_control_dev();
+ if (IS_ERR(glue->control_otghs)) {
+ dev_vdbg(&pdev->dev, "Failed to get control device\n");
+ return -ENODEV;
+ }
+ } else {
+ glue->control_otghs = ERR_PTR(-ENODEV);
+ }
pdata->platform_ops = &omap2430_ops;
platform_set_drvdata(pdev, glue);
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 8ef656659fcb..1b5e83a9840e 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -49,13 +49,4 @@
#define OTG_FORCESTDBY 0x414
# define ENABLEFORCE (1 << 0)
-/*
- * Control Module bit definitions
- * XXX: Will be removed once we have a driver for control module.
- */
-#define AVALID BIT(0)
-#define BVALID BIT(1)
-#define VBUSVALID BIT(2)
-#define SESSEND BIT(3)
-#define IDDIG BIT(4)
#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 8bde6fc5eb75..464bd23cccda 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -22,6 +22,7 @@
#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/nop-usb-xceiv.h>
@@ -198,7 +199,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- writesl(fifo, buf, len >> 2);
+ iowrite32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
@@ -245,7 +246,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- readsl(fifo, buf, len >> 2);
+ ioread32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
@@ -1068,7 +1069,7 @@ static int tusb_musb_init(struct musb *musb)
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv))
- return -ENODEV;
+ return -EPROBE_DEFER;
pdev = to_platform_device(musb->controller);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index a27ca1a9c994..13a392913769 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -61,7 +61,7 @@ static int ux500_musb_init(struct musb *musb)
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
pr_err("HS USB OTG: no transceiver configured\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
musb->isr = ux500_musb_interrupt;
@@ -108,7 +108,7 @@ static int ux500_probe(struct platform_device *pdev)
goto err3;
}
- ret = clk_enable(clk);
+ ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock\n");
goto err4;
@@ -148,7 +148,7 @@ static int ux500_probe(struct platform_device *pdev)
return 0;
err5:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
err4:
clk_put(clk);
@@ -168,7 +168,7 @@ static int ux500_remove(struct platform_device *pdev)
struct ux500_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
- clk_disable(glue->clk);
+ clk_disable_unprepare(glue->clk);
clk_put(glue->clk);
kfree(glue);
@@ -182,7 +182,7 @@ static int ux500_suspend(struct device *dev)
struct musb *musb = glue_to_musb(glue);
usb_phy_set_suspend(musb->xceiv, 1);
- clk_disable(glue->clk);
+ clk_disable_unprepare(glue->clk);
return 0;
}
@@ -193,7 +193,7 @@ static int ux500_resume(struct device *dev)
struct musb *musb = glue_to_musb(glue);
int ret;
- ret = clk_enable(glue->clk);
+ ret = clk_prepare_enable(glue->clk);
if (ret) {
dev_err(dev, "failed to enable clock\n");
return ret;
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 6223062d5d1b..37962c99ff1e 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -110,7 +110,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
select USB_OTG
select USB_OTG_UTILS
help
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index a67ffe22179a..a7d4ac591982 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -409,17 +409,7 @@ static struct platform_driver gpio_vbus_driver = {
.remove = __exit_p(gpio_vbus_remove),
};
-static int __init gpio_vbus_init(void)
-{
- return platform_driver_probe(&gpio_vbus_driver, gpio_vbus_probe);
-}
-module_init(gpio_vbus_init);
-
-static void __exit gpio_vbus_exit(void)
-{
- platform_driver_unregister(&gpio_vbus_driver);
-}
-module_exit(gpio_vbus_exit);
+module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe);
MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 3b9f0d951132..749fbf41fb6f 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -1756,18 +1756,7 @@ static struct platform_driver msm_otg_driver = {
},
};
-static int __init msm_otg_init(void)
-{
- return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
-}
-
-static void __exit msm_otg_exit(void)
-{
- platform_driver_unregister(&msm_otg_driver);
-}
-
-module_init(msm_otg_init);
-module_exit(msm_otg_exit);
+module_platform_driver_probe(msm_otg_driver, msm_otg_probe);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c
index 1dd57504186d..b6a9be31133b 100644
--- a/drivers/usb/otg/mv_otg.c
+++ b/drivers/usb/otg/mv_otg.c
@@ -240,7 +240,7 @@ static void otg_clock_enable(struct mv_otg *mvotg)
unsigned int i;
for (i = 0; i < mvotg->clknum; i++)
- clk_enable(mvotg->clk[i]);
+ clk_prepare_enable(mvotg->clk[i]);
}
static void otg_clock_disable(struct mv_otg *mvotg)
@@ -248,7 +248,7 @@ static void otg_clock_disable(struct mv_otg *mvotg)
unsigned int i;
for (i = 0; i < mvotg->clknum; i++)
- clk_disable(mvotg->clk[i]);
+ clk_disable_unprepare(mvotg->clk[i]);
}
static int mv_otg_enable_internal(struct mv_otg *mvotg)
@@ -420,7 +420,7 @@ static void mv_otg_work(struct work_struct *work)
struct usb_otg *otg;
int old_state;
- mvotg = container_of((struct delayed_work *)work, struct mv_otg, work);
+ mvotg = container_of(to_delayed_work(work), struct mv_otg, work);
run:
/* work queue is single thread, or we need spin_lock to protect */
@@ -662,18 +662,9 @@ static struct attribute_group inputs_attr_group = {
int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- int clk_i;
sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
- if (mvotg->irq)
- free_irq(mvotg->irq, mvotg);
-
- if (mvotg->pdata->vbus)
- free_irq(mvotg->pdata->vbus->irq, mvotg);
- if (mvotg->pdata->id)
- free_irq(mvotg->pdata->id->irq, mvotg);
-
if (mvotg->qwork) {
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
@@ -681,21 +672,9 @@ int mv_otg_remove(struct platform_device *pdev)
mv_otg_disable(mvotg);
- if (mvotg->cap_regs)
- iounmap(mvotg->cap_regs);
-
- if (mvotg->phy_regs)
- iounmap(mvotg->phy_regs);
-
- for (clk_i = 0; clk_i <= mvotg->clknum; clk_i++)
- clk_put(mvotg->clk[clk_i]);
-
usb_remove_phy(&mvotg->phy);
platform_set_drvdata(pdev, NULL);
- kfree(mvotg->phy.otg);
- kfree(mvotg);
-
return 0;
}
@@ -714,17 +693,15 @@ static int mv_otg_probe(struct platform_device *pdev)
}
size = sizeof(*mvotg) + sizeof(struct clk *) * pdata->clknum;
- mvotg = kzalloc(size, GFP_KERNEL);
+ mvotg = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mvotg) {
dev_err(&pdev->dev, "failed to allocate memory!\n");
return -ENOMEM;
}
- otg = kzalloc(sizeof *otg, GFP_KERNEL);
- if (!otg) {
- kfree(mvotg);
+ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
+ if (!otg)
return -ENOMEM;
- }
platform_set_drvdata(pdev, mvotg);
@@ -733,18 +710,18 @@ static int mv_otg_probe(struct platform_device *pdev)
mvotg->clknum = pdata->clknum;
for (clk_i = 0; clk_i < mvotg->clknum; clk_i++) {
- mvotg->clk[clk_i] = clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ mvotg->clk[clk_i] = devm_clk_get(&pdev->dev,
+ pdata->clkname[clk_i]);
if (IS_ERR(mvotg->clk[clk_i])) {
retval = PTR_ERR(mvotg->clk[clk_i]);
- goto err_put_clk;
+ return retval;
}
}
mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
if (!mvotg->qwork) {
dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
- retval = -ENOMEM;
- goto err_put_clk;
+ return -ENOMEM;
}
INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
@@ -772,7 +749,7 @@ static int mv_otg_probe(struct platform_device *pdev)
goto err_destroy_workqueue;
}
- mvotg->phy_regs = ioremap(r->start, resource_size(r));
+ mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
retval = -EFAULT;
@@ -784,21 +761,21 @@ static int mv_otg_probe(struct platform_device *pdev)
if (r == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
retval = -ENODEV;
- goto err_unmap_phyreg;
+ goto err_destroy_workqueue;
}
- mvotg->cap_regs = ioremap(r->start, resource_size(r));
+ mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->cap_regs == NULL) {
dev_err(&pdev->dev, "failed to map I/O memory\n");
retval = -EFAULT;
- goto err_unmap_phyreg;
+ goto err_destroy_workqueue;
}
/* we will acces controller register, so enable the udc controller */
retval = mv_otg_enable_internal(mvotg);
if (retval) {
dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
- goto err_unmap_capreg;
+ goto err_destroy_workqueue;
}
mvotg->op_regs =
@@ -806,9 +783,9 @@ static int mv_otg_probe(struct platform_device *pdev)
+ (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
if (pdata->id) {
- retval = request_threaded_irq(pdata->id->irq, NULL,
- mv_otg_inputs_irq,
- IRQF_ONESHOT, "id", mvotg);
+ retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq,
+ NULL, mv_otg_inputs_irq,
+ IRQF_ONESHOT, "id", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for ID\n");
@@ -818,9 +795,9 @@ static int mv_otg_probe(struct platform_device *pdev)
if (pdata->vbus) {
mvotg->clock_gating = 1;
- retval = request_threaded_irq(pdata->vbus->irq, NULL,
- mv_otg_inputs_irq,
- IRQF_ONESHOT, "vbus", mvotg);
+ retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq,
+ NULL, mv_otg_inputs_irq,
+ IRQF_ONESHOT, "vbus", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for VBUS, "
@@ -844,7 +821,7 @@ static int mv_otg_probe(struct platform_device *pdev)
}
mvotg->irq = r->start;
- if (request_irq(mvotg->irq, mv_otg_irq, IRQF_SHARED,
+ if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED,
driver_name, mvotg)) {
dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
mvotg->irq);
@@ -857,14 +834,14 @@ static int mv_otg_probe(struct platform_device *pdev)
if (retval < 0) {
dev_err(&pdev->dev, "can't register transceiver, %d\n",
retval);
- goto err_free_irq;
+ goto err_disable_clk;
}
retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
if (retval < 0) {
dev_dbg(&pdev->dev,
"Can't register sysfs attr group: %d\n", retval);
- goto err_set_transceiver;
+ goto err_remove_phy;
}
spin_lock_init(&mvotg->wq_lock);
@@ -879,30 +856,15 @@ static int mv_otg_probe(struct platform_device *pdev)
return 0;
-err_set_transceiver:
+err_remove_phy:
usb_remove_phy(&mvotg->phy);
-err_free_irq:
- free_irq(mvotg->irq, mvotg);
err_disable_clk:
- if (pdata->vbus)
- free_irq(pdata->vbus->irq, mvotg);
- if (pdata->id)
- free_irq(pdata->id->irq, mvotg);
mv_otg_disable_internal(mvotg);
-err_unmap_capreg:
- iounmap(mvotg->cap_regs);
-err_unmap_phyreg:
- iounmap(mvotg->phy_regs);
err_destroy_workqueue:
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
-err_put_clk:
- for (clk_i--; clk_i >= 0; clk_i--)
- clk_put(mvotg->clk[clk_i]);
platform_set_drvdata(pdev, NULL);
- kfree(otg);
- kfree(mvotg);
return retval;
}
diff --git a/drivers/usb/otg/mxs-phy.c b/drivers/usb/otg/mxs-phy.c
index 76302720055a..b0d9f119c749 100644
--- a/drivers/usb/otg/mxs-phy.c
+++ b/drivers/usb/otg/mxs-phy.c
@@ -76,6 +76,25 @@ static void mxs_phy_shutdown(struct usb_phy *phy)
clk_disable_unprepare(mxs_phy->clk);
}
+static int mxs_phy_suspend(struct usb_phy *x, int suspend)
+{
+ struct mxs_phy *mxs_phy = to_mxs_phy(x);
+
+ if (suspend) {
+ writel_relaxed(0xffffffff, x->io_priv + HW_USBPHY_PWD);
+ writel_relaxed(BM_USBPHY_CTRL_CLKGATE,
+ x->io_priv + HW_USBPHY_CTRL_SET);
+ clk_disable_unprepare(mxs_phy->clk);
+ } else {
+ clk_prepare_enable(mxs_phy->clk);
+ writel_relaxed(BM_USBPHY_CTRL_CLKGATE,
+ x->io_priv + HW_USBPHY_CTRL_CLR);
+ writel_relaxed(0, x->io_priv + HW_USBPHY_PWD);
+ }
+
+ return 0;
+}
+
static int mxs_phy_on_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
@@ -115,9 +134,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
return -ENOENT;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base)
- return -EBUSY;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -137,6 +156,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.label = DRIVER_NAME;
mxs_phy->phy.init = mxs_phy_init;
mxs_phy->phy.shutdown = mxs_phy_shutdown;
+ mxs_phy->phy.set_suspend = mxs_phy_suspend;
mxs_phy->phy.notify_connect = mxs_phy_on_connect;
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
index a30c04115115..e1814397ca3a 100644
--- a/drivers/usb/otg/otg.c
+++ b/drivers/usb/otg/otg.c
@@ -13,11 +13,14 @@
#include <linux/export.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/usb/otg.h>
static LIST_HEAD(phy_list);
+static LIST_HEAD(phy_bind_list);
static DEFINE_SPINLOCK(phy_lock);
static struct usb_phy *__usb_find_phy(struct list_head *list,
@@ -35,6 +38,38 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
return ERR_PTR(-ENODEV);
}
+static struct usb_phy *__usb_find_phy_dev(struct device *dev,
+ struct list_head *list, u8 index)
+{
+ struct usb_phy_bind *phy_bind = NULL;
+
+ list_for_each_entry(phy_bind, list, list) {
+ if (!(strcmp(phy_bind->dev_name, dev_name(dev))) &&
+ phy_bind->index == index) {
+ if (phy_bind->phy)
+ return phy_bind->phy;
+ else
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct usb_phy *__of_usb_find_phy(struct device_node *node)
+{
+ struct usb_phy *phy;
+
+ list_for_each_entry(phy, &phy_list, head) {
+ if (node != phy->dev->of_node)
+ continue;
+
+ return phy;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
static void devm_usb_phy_release(struct device *dev, void *res)
{
struct usb_phy *phy = *(struct usb_phy **)res;
@@ -110,6 +145,133 @@ err0:
}
EXPORT_SYMBOL(usb_get_phy);
+ /**
+ * devm_usb_get_phy_by_phandle - find the USB PHY by phandle
+ * @dev - device that requests this phy
+ * @phandle - name of the property holding the phy phandle value
+ * @index - the index of the phy
+ *
+ * Returns the phy driver associated with the given phandle value,
+ * after getting a refcount to it, -ENODEV if there is no such phy or
+ * -EPROBE_DEFER if there is a phandle to the phy, but the device is
+ * not yet loaded. While at that, it also associates the device with
+ * the phy using devres. On driver detach, release function is invoked
+ * on the devres data, then, devres data is freed.
+ *
+ * For use by USB host and peripheral drivers.
+ */
+struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
+ const char *phandle, u8 index)
+{
+ struct usb_phy *phy = ERR_PTR(-ENOMEM), **ptr;
+ unsigned long flags;
+ struct device_node *node;
+
+ if (!dev->of_node) {
+ dev_dbg(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, phandle, index);
+ if (!node) {
+ dev_dbg(dev, "failed to get %s phandle in %s node\n", phandle,
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ dev_dbg(dev, "failed to allocate memory for devres\n");
+ goto err0;
+ }
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ phy = __of_usb_find_phy(node);
+ if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
+ phy = ERR_PTR(-EPROBE_DEFER);
+ devres_free(ptr);
+ goto err1;
+ }
+
+ *ptr = phy;
+ devres_add(dev, ptr);
+
+ get_device(phy->dev);
+
+err1:
+ spin_unlock_irqrestore(&phy_lock, flags);
+
+err0:
+ of_node_put(node);
+
+ return phy;
+}
+EXPORT_SYMBOL(devm_usb_get_phy_by_phandle);
+
+/**
+ * usb_get_phy_dev - find the USB PHY
+ * @dev - device that requests this phy
+ * @index - the index of the phy
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * -ENODEV if there is no such phy. The caller is responsible for
+ * calling usb_put_phy() to release that count.
+ *
+ * For use by USB host and peripheral drivers.
+ */
+struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
+{
+ struct usb_phy *phy = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
+ if (IS_ERR(phy)) {
+ pr_err("unable to find transceiver\n");
+ goto err0;
+ }
+
+ get_device(phy->dev);
+
+err0:
+ spin_unlock_irqrestore(&phy_lock, flags);
+
+ return phy;
+}
+EXPORT_SYMBOL(usb_get_phy_dev);
+
+/**
+ * devm_usb_get_phy_dev - find the USB PHY using device ptr and index
+ * @dev - device that requests this phy
+ * @index - the index of the phy
+ *
+ * Gets the phy using usb_get_phy_dev(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ *
+ * For use by USB host and peripheral drivers.
+ */
+struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index)
+{
+ struct usb_phy **ptr, *phy;
+
+ ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ phy = usb_get_phy_dev(dev, index);
+ if (!IS_ERR(phy)) {
+ *ptr = phy;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return phy;
+}
+EXPORT_SYMBOL(devm_usb_get_phy_dev);
+
/**
* devm_usb_put_phy - release the USB PHY
* @dev - device that wants to release this phy
@@ -185,6 +347,36 @@ out:
EXPORT_SYMBOL(usb_add_phy);
/**
+ * usb_add_phy_dev - declare the USB PHY
+ * @x: the USB phy to be used; or NULL
+ *
+ * This call is exclusively for use by phy drivers, which
+ * coordinate the activities of drivers for host and peripheral
+ * controllers, and in some cases for VBUS current regulation.
+ */
+int usb_add_phy_dev(struct usb_phy *x)
+{
+ struct usb_phy_bind *phy_bind;
+ unsigned long flags;
+
+ if (!x->dev) {
+ dev_err(x->dev, "no device provided for PHY\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&phy_lock, flags);
+ list_for_each_entry(phy_bind, &phy_bind_list, list)
+ if (!(strcmp(phy_bind->phy_dev_name, dev_name(x->dev))))
+ phy_bind->phy = x;
+
+ list_add_tail(&x->head, &phy_list);
+
+ spin_unlock_irqrestore(&phy_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(usb_add_phy_dev);
+
+/**
* usb_remove_phy - remove the OTG PHY
* @x: the USB OTG PHY to be removed;
*
@@ -193,14 +385,55 @@ EXPORT_SYMBOL(usb_add_phy);
void usb_remove_phy(struct usb_phy *x)
{
unsigned long flags;
+ struct usb_phy_bind *phy_bind;
spin_lock_irqsave(&phy_lock, flags);
- if (x)
+ if (x) {
+ list_for_each_entry(phy_bind, &phy_bind_list, list)
+ if (phy_bind->phy == x)
+ phy_bind->phy = NULL;
list_del(&x->head);
+ }
spin_unlock_irqrestore(&phy_lock, flags);
}
EXPORT_SYMBOL(usb_remove_phy);
+/**
+ * usb_bind_phy - bind the phy and the controller that uses the phy
+ * @dev_name: the device name of the device that will bind to the phy
+ * @index: index to specify the port number
+ * @phy_dev_name: the device name of the phy
+ *
+ * Fills the phy_bind structure with the dev_name and phy_dev_name. This will
+ * be used when the phy driver registers the phy and when the controller
+ * requests this phy.
+ *
+ * To be used by platform specific initialization code.
+ */
+int __init usb_bind_phy(const char *dev_name, u8 index,
+ const char *phy_dev_name)
+{
+ struct usb_phy_bind *phy_bind;
+ unsigned long flags;
+
+ phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
+ if (!phy_bind) {
+ pr_err("phy_bind(): No memory for phy_bind");
+ return -ENOMEM;
+ }
+
+ phy_bind->dev_name = dev_name;
+ phy_bind->phy_dev_name = phy_dev_name;
+ phy_bind->index = index;
+
+ spin_lock_irqsave(&phy_lock, flags);
+ list_add_tail(&phy_bind->list, &phy_bind_list);
+ spin_unlock_irqrestore(&phy_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_bind_phy);
+
const char *otg_state_string(enum usb_otg_state state)
{
switch (state) {
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0a701938ab53..a994715a3101 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -610,6 +610,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
twl->phy.dev = twl->dev;
twl->phy.label = "twl4030";
twl->phy.otg = otg;
+ twl->phy.type = USB_PHY_TYPE_USB2;
twl->phy.set_suspend = twl4030_set_suspend;
otg->phy = &twl->phy;
@@ -624,7 +625,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ldo init failed\n");
return err;
}
- usb_add_phy(&twl->phy, USB_PHY_TYPE_USB2);
+ usb_add_phy_dev(&twl->phy);
platform_set_drvdata(pdev, twl);
if (device_create_file(&pdev->dev, &dev_attr_vbus))
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7eb73c561bd2..65217a590068 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -6,13 +6,34 @@ comment "USB Physical Layer drivers"
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
+ depends on ARCH_OMAP2PLUS
select USB_OTG_UTILS
+ select OMAP_CONTROL_USB
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
The USB OTG controller communicates with the comparator using this
driver.
+config OMAP_USB3
+ tristate "OMAP USB3 PHY Driver"
+ select USB_OTG_UTILS
+ select OMAP_CONTROL_USB
+ help
+ Enable this to support the USB3 PHY that is part of SOC. This
+ driver takes care of all the PHY functionality apart from comparator.
+ This driver interacts with the "OMAP Control USB Driver" to power
+ on/off the PHY.
+
+config OMAP_CONTROL_USB
+ tristate "OMAP CONTROL USB Driver"
+ help
+ Enable this to add support for the USB part present in the control
+ module. This driver has API to power on the USB2 PHY and to write to
+ the mailbox. The mailbox is present only in omap4 and the register to
+ power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
+ additional register to power on USB3 PHY.
+
config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
depends on USB || USB_GADGET
@@ -44,3 +65,11 @@ config USB_RCAR_PHY
To compile this driver as a module, choose M here: the
module will be called rcar-phy.
+
+config SAMSUNG_USBPHY
+ bool "Samsung USB PHY controller Driver"
+ depends on USB_S3C_HSOTG || USB_EHCI_S5P || USB_OHCI_EXYNOS
+ select USB_OTG_UTILS
+ help
+ Enable this to support Samsung USB phy controller for samsung
+ SoCs.
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 1a579a860a03..b13faa193e0c 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -5,7 +5,10 @@
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
obj-$(CONFIG_OMAP_USB2) += omap-usb2.o
+obj-$(CONFIG_OMAP_USB3) += omap-usb3.o
+obj-$(CONFIG_OMAP_CONTROL_USB) += omap-control-usb.o
obj-$(CONFIG_USB_ISP1301) += isp1301.o
obj-$(CONFIG_MV_U3D_PHY) += mv_u3d_phy.o
obj-$(CONFIG_USB_EHCI_TEGRA) += tegra_usb_phy.o
obj-$(CONFIG_USB_RCAR_PHY) += rcar-phy.o
+obj-$(CONFIG_SAMSUNG_USBPHY) += samsung-usbphy.o
diff --git a/drivers/usb/phy/mv_u3d_phy.c b/drivers/usb/phy/mv_u3d_phy.c
index eaddbe3d4304..9d8599122aa9 100644
--- a/drivers/usb/phy/mv_u3d_phy.c
+++ b/drivers/usb/phy/mv_u3d_phy.c
@@ -283,11 +283,9 @@ static int mv_u3d_phy_probe(struct platform_device *pdev)
return -ENODEV;
}
- phy_base = devm_request_and_ioremap(dev, res);
- if (!phy_base) {
- dev_err(dev, "%s: register mapping failed\n", __func__);
- return -ENXIO;
- }
+ phy_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy_base))
+ return PTR_ERR(phy_base);
mv_u3d_phy = devm_kzalloc(dev, sizeof(*mv_u3d_phy), GFP_KERNEL);
if (!mv_u3d_phy)
diff --git a/drivers/usb/phy/omap-control-usb.c b/drivers/usb/phy/omap-control-usb.c
new file mode 100644
index 000000000000..5323b71c3521
--- /dev/null
+++ b/drivers/usb/phy/omap-control-usb.c
@@ -0,0 +1,295 @@
+/*
+ * omap-control-usb.c - The USB part of control module.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/usb/omap_control_usb.h>
+
+static struct omap_control_usb *control_usb;
+
+/**
+ * omap_get_control_dev - returns the device pointer for this control device
+ *
+ * This API should be called to get the device pointer for this control
+ * module device. This device pointer should be used for called other
+ * exported API's in this driver.
+ *
+ * To be used by PHY driver and glue driver.
+ */
+struct device *omap_get_control_dev(void)
+{
+ if (!control_usb)
+ return ERR_PTR(-ENODEV);
+
+ return control_usb->dev;
+}
+EXPORT_SYMBOL_GPL(omap_get_control_dev);
+
+/**
+ * omap_control_usb3_phy_power - power on/off the serializer using control
+ * module
+ * @dev: the control module device
+ * @on: 0 to off and 1 to on based on powering on or off the PHY
+ *
+ * usb3 PHY driver should call this API to power on or off the PHY.
+ */
+void omap_control_usb3_phy_power(struct device *dev, bool on)
+{
+ u32 val;
+ unsigned long rate;
+ struct omap_control_usb *control_usb = dev_get_drvdata(dev);
+
+ if (control_usb->type != OMAP_CTRL_DEV_TYPE2)
+ return;
+
+ rate = clk_get_rate(control_usb->sys_clk);
+ rate = rate/1000000;
+
+ val = readl(control_usb->phy_power);
+
+ if (on) {
+ val &= ~(OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK |
+ OMAP_CTRL_USB_PWRCTL_CLK_FREQ_MASK);
+ val |= OMAP_CTRL_USB3_PHY_TX_RX_POWERON <<
+ OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ val |= rate << OMAP_CTRL_USB_PWRCTL_CLK_FREQ_SHIFT;
+ } else {
+ val &= ~OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK;
+ val |= OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF <<
+ OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ }
+
+ writel(val, control_usb->phy_power);
+}
+EXPORT_SYMBOL_GPL(omap_control_usb3_phy_power);
+
+/**
+ * omap_control_usb_phy_power - power on/off the phy using control module reg
+ * @dev: the control module device
+ * @on: 0 or 1, based on powering on or off the PHY
+ */
+void omap_control_usb_phy_power(struct device *dev, int on)
+{
+ u32 val;
+ struct omap_control_usb *control_usb = dev_get_drvdata(dev);
+
+ val = readl(control_usb->dev_conf);
+
+ if (on)
+ val &= ~OMAP_CTRL_DEV_PHY_PD;
+ else
+ val |= OMAP_CTRL_DEV_PHY_PD;
+
+ writel(val, control_usb->dev_conf);
+}
+EXPORT_SYMBOL_GPL(omap_control_usb_phy_power);
+
+/**
+ * omap_control_usb_host_mode - set AVALID, VBUSVALID and ID pin in grounded
+ * @ctrl_usb: struct omap_control_usb *
+ *
+ * Writes to the mailbox register to notify the usb core that a usb
+ * device has been connected.
+ */
+static void omap_control_usb_host_mode(struct omap_control_usb *ctrl_usb)
+{
+ u32 val;
+
+ val = readl(ctrl_usb->otghs_control);
+ val &= ~(OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND);
+ val |= OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID;
+ writel(val, ctrl_usb->otghs_control);
+}
+
+/**
+ * omap_control_usb_device_mode - set AVALID, VBUSVALID and ID pin in high
+ * impedance
+ * @ctrl_usb: struct omap_control_usb *
+ *
+ * Writes to the mailbox register to notify the usb core that it has been
+ * connected to a usb host.
+ */
+static void omap_control_usb_device_mode(struct omap_control_usb *ctrl_usb)
+{
+ u32 val;
+
+ val = readl(ctrl_usb->otghs_control);
+ val &= ~OMAP_CTRL_DEV_SESSEND;
+ val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_AVALID |
+ OMAP_CTRL_DEV_VBUSVALID;
+ writel(val, ctrl_usb->otghs_control);
+}
+
+/**
+ * omap_control_usb_set_sessionend - Enable SESSIONEND and IDIG to high
+ * impedance
+ * @ctrl_usb: struct omap_control_usb *
+ *
+ * Writes to the mailbox register to notify the usb core it's now in
+ * disconnected state.
+ */
+static void omap_control_usb_set_sessionend(struct omap_control_usb *ctrl_usb)
+{
+ u32 val;
+
+ val = readl(ctrl_usb->otghs_control);
+ val &= ~(OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID);
+ val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND;
+ writel(val, ctrl_usb->otghs_control);
+}
+
+/**
+ * omap_control_usb_set_mode - Calls to functions to set USB in one of host mode
+ * or device mode or to denote disconnected state
+ * @dev: the control module device
+ * @mode: The mode to which usb should be configured
+ *
+ * This is an API to write to the mailbox register to notify the usb core that
+ * a usb device has been connected.
+ */
+void omap_control_usb_set_mode(struct device *dev,
+ enum omap_control_usb_mode mode)
+{
+ struct omap_control_usb *ctrl_usb;
+
+ if (IS_ERR(dev) || control_usb->type != OMAP_CTRL_DEV_TYPE1)
+ return;
+
+ ctrl_usb = dev_get_drvdata(dev);
+
+ switch (mode) {
+ case USB_MODE_HOST:
+ omap_control_usb_host_mode(ctrl_usb);
+ break;
+ case USB_MODE_DEVICE:
+ omap_control_usb_device_mode(ctrl_usb);
+ break;
+ case USB_MODE_DISCONNECT:
+ omap_control_usb_set_sessionend(ctrl_usb);
+ break;
+ default:
+ dev_vdbg(dev, "invalid omap control usb mode\n");
+ }
+}
+EXPORT_SYMBOL_GPL(omap_control_usb_set_mode);
+
+static int omap_control_usb_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_control_usb_platform_data *pdata = pdev->dev.platform_data;
+
+ control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb),
+ GFP_KERNEL);
+ if (!control_usb) {
+ dev_err(&pdev->dev, "unable to alloc memory for control usb\n");
+ return -ENOMEM;
+ }
+
+ if (np) {
+ of_property_read_u32(np, "ti,type", &control_usb->type);
+ } else if (pdata) {
+ control_usb->type = pdata->type;
+ } else {
+ dev_err(&pdev->dev, "no pdata present\n");
+ return -EINVAL;
+ }
+
+ control_usb->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "control_dev_conf");
+ control_usb->dev_conf = devm_request_and_ioremap(&pdev->dev, res);
+ if (!control_usb->dev_conf) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ if (control_usb->type == OMAP_CTRL_DEV_TYPE1) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "otghs_control");
+ control_usb->otghs_control = devm_request_and_ioremap(
+ &pdev->dev, res);
+ if (!control_usb->otghs_control) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -EADDRNOTAVAIL;
+ }
+ }
+
+ if (control_usb->type == OMAP_CTRL_DEV_TYPE2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy_power_usb");
+ control_usb->phy_power = devm_request_and_ioremap(
+ &pdev->dev, res);
+ if (!control_usb->phy_power) {
+ dev_dbg(&pdev->dev, "Failed to obtain io memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ control_usb->sys_clk = devm_clk_get(control_usb->dev,
+ "sys_clkin");
+ if (IS_ERR(control_usb->sys_clk)) {
+ pr_err("%s: unable to get sys_clkin\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+
+ dev_set_drvdata(control_usb->dev, control_usb);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_control_usb_id_table[] = {
+ { .compatible = "ti,omap-control-usb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
+#endif
+
+static struct platform_driver omap_control_usb_driver = {
+ .probe = omap_control_usb_probe,
+ .driver = {
+ .name = "omap-control-usb",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_control_usb_id_table),
+ },
+};
+
+static int __init omap_control_usb_init(void)
+{
+ return platform_driver_register(&omap_control_usb_driver);
+}
+subsys_initcall(omap_control_usb_init);
+
+static void __exit omap_control_usb_exit(void)
+{
+ platform_driver_unregister(&omap_control_usb_driver);
+}
+module_exit(omap_control_usb_exit);
+
+MODULE_ALIAS("platform: omap_control_usb");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP Control Module USB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/omap-usb2.c b/drivers/usb/phy/omap-usb2.c
index 26ae8f49225c..844ab68f08d0 100644
--- a/drivers/usb/phy/omap-usb2.c
+++ b/drivers/usb/phy/omap-usb2.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
+#include <linux/usb/omap_control_usb.h>
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
@@ -52,29 +53,6 @@ int omap_usb2_set_comparator(struct phy_companion *comparator)
}
EXPORT_SYMBOL_GPL(omap_usb2_set_comparator);
-/**
- * omap_usb_phy_power - power on/off the phy using control module reg
- * @phy: struct omap_usb *
- * @on: 0 or 1, based on powering on or off the PHY
- *
- * XXX: Remove this function once control module driver gets merged
- */
-static void omap_usb_phy_power(struct omap_usb *phy, int on)
-{
- u32 val;
-
- if (on) {
- val = readl(phy->control_dev);
- if (val & PHY_PD) {
- writel(~PHY_PD, phy->control_dev);
- /* XXX: add proper documentation for this delay */
- mdelay(200);
- }
- } else {
- writel(PHY_PD, phy->control_dev);
- }
-}
-
static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->phy);
@@ -124,7 +102,7 @@ static int omap_usb2_suspend(struct usb_phy *x, int suspend)
struct omap_usb *phy = phy_to_omapusb(x);
if (suspend && !phy->is_suspended) {
- omap_usb_phy_power(phy, 0);
+ omap_control_usb_phy_power(phy->control_dev, 0);
pm_runtime_put_sync(phy->dev);
phy->is_suspended = 1;
} else if (!suspend && phy->is_suspended) {
@@ -134,7 +112,7 @@ static int omap_usb2_suspend(struct usb_phy *x, int suspend)
ret);
return ret;
}
- omap_usb_phy_power(phy, 1);
+ omap_control_usb_phy_power(phy->control_dev, 1);
phy->is_suspended = 0;
}
@@ -145,7 +123,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct usb_otg *otg;
- struct resource *res;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
@@ -165,17 +142,16 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->phy.label = "omap-usb2";
phy->phy.set_suspend = omap_usb2_suspend;
phy->phy.otg = otg;
+ phy->phy.type = USB_PHY_TYPE_USB2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- phy->control_dev = devm_request_and_ioremap(&pdev->dev, res);
- if (phy->control_dev == NULL) {
- dev_err(&pdev->dev, "Failed to obtain io memory\n");
- return -ENXIO;
+ phy->control_dev = omap_get_control_dev();
+ if (IS_ERR(phy->control_dev)) {
+ dev_dbg(&pdev->dev, "Failed to get control device\n");
+ return -ENODEV;
}
phy->is_suspended = 1;
- omap_usb_phy_power(phy, 0);
+ omap_control_usb_phy_power(phy->control_dev, 0);
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
@@ -190,7 +166,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
}
clk_prepare(phy->wkupclk);
- usb_add_phy(&phy->phy, USB_PHY_TYPE_USB2);
+ phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
+ if (IS_ERR(phy->optclk))
+ dev_vdbg(&pdev->dev, "unable to get refclk960m\n");
+ else
+ clk_prepare(phy->optclk);
+
+ usb_add_phy_dev(&phy->phy);
platform_set_drvdata(pdev, phy);
@@ -204,6 +186,8 @@ static int omap_usb2_remove(struct platform_device *pdev)
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_unprepare(phy->wkupclk);
+ if (!IS_ERR(phy->optclk))
+ clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
return 0;
@@ -217,6 +201,8 @@ static int omap_usb2_runtime_suspend(struct device *dev)
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_disable(phy->wkupclk);
+ if (!IS_ERR(phy->optclk))
+ clk_disable(phy->optclk);
return 0;
}
@@ -228,9 +214,25 @@ static int omap_usb2_runtime_resume(struct device *dev)
struct omap_usb *phy = platform_get_drvdata(pdev);
ret = clk_enable(phy->wkupclk);
- if (ret < 0)
+ if (ret < 0) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
+ goto err0;
+ }
+
+ if (!IS_ERR(phy->optclk)) {
+ ret = clk_enable(phy->optclk);
+ if (ret < 0) {
+ dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ clk_disable(phy->wkupclk);
+err0:
return ret;
}
diff --git a/drivers/usb/phy/omap-usb3.c b/drivers/usb/phy/omap-usb3.c
new file mode 100644
index 000000000000..fadc0c2b65bb
--- /dev/null
+++ b/drivers/usb/phy/omap-usb3.c
@@ -0,0 +1,355 @@
+/*
+ * omap-usb3 - USB PHY, talking to dwc3 controller in OMAP.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb/omap_usb.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/usb/omap_control_usb.h>
+
+#define NUM_SYS_CLKS 5
+#define PLL_STATUS 0x00000004
+#define PLL_GO 0x00000008
+#define PLL_CONFIGURATION1 0x0000000C
+#define PLL_CONFIGURATION2 0x00000010
+#define PLL_CONFIGURATION3 0x00000014
+#define PLL_CONFIGURATION4 0x00000020
+
+#define PLL_REGM_MASK 0x001FFE00
+#define PLL_REGM_SHIFT 0x9
+#define PLL_REGM_F_MASK 0x0003FFFF
+#define PLL_REGM_F_SHIFT 0x0
+#define PLL_REGN_MASK 0x000001FE
+#define PLL_REGN_SHIFT 0x1
+#define PLL_SELFREQDCO_MASK 0x0000000E
+#define PLL_SELFREQDCO_SHIFT 0x1
+#define PLL_SD_MASK 0x0003FC00
+#define PLL_SD_SHIFT 0x9
+#define SET_PLL_GO 0x1
+#define PLL_TICOPWDN 0x10000
+#define PLL_LOCK 0x2
+#define PLL_IDLE 0x1
+
+/*
+ * This is an Empirical value that works, need to confirm the actual
+ * value required for the USB3PHY_PLL_CONFIGURATION2.PLL_IDLE status
+ * to be correctly reflected in the USB3PHY_PLL_STATUS register.
+ */
+# define PLL_IDLE_TIME 100;
+
+enum sys_clk_rate {
+ CLK_RATE_UNDEFINED = -1,
+ CLK_RATE_12MHZ,
+ CLK_RATE_16MHZ,
+ CLK_RATE_19MHZ,
+ CLK_RATE_26MHZ,
+ CLK_RATE_38MHZ
+};
+
+static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
+ {1250, 5, 4, 20, 0}, /* 12 MHz */
+ {3125, 20, 4, 20, 0}, /* 16.8 MHz */
+ {1172, 8, 4, 20, 65537}, /* 19.2 MHz */
+ {1250, 12, 4, 20, 0}, /* 26 MHz */
+ {3125, 47, 4, 20, 92843}, /* 38.4 MHz */
+};
+
+static int omap_usb3_suspend(struct usb_phy *x, int suspend)
+{
+ struct omap_usb *phy = phy_to_omapusb(x);
+ int val;
+ int timeout = PLL_IDLE_TIME;
+
+ if (suspend && !phy->is_suspended) {
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ do {
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (val & PLL_TICOPWDN)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ omap_control_usb3_phy_power(phy->control_dev, 0);
+
+ phy->is_suspended = 1;
+ } else if (!suspend && phy->is_suspended) {
+ phy->is_suspended = 0;
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val &= ~PLL_IDLE;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ do {
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (!(val & PLL_TICOPWDN))
+ break;
+ udelay(1);
+ } while (--timeout);
+ }
+
+ return 0;
+}
+
+static inline enum sys_clk_rate __get_sys_clk_index(unsigned long rate)
+{
+ switch (rate) {
+ case 12000000:
+ return CLK_RATE_12MHZ;
+ case 16800000:
+ return CLK_RATE_16MHZ;
+ case 19200000:
+ return CLK_RATE_19MHZ;
+ case 26000000:
+ return CLK_RATE_26MHZ;
+ case 38400000:
+ return CLK_RATE_38MHZ;
+ default:
+ return CLK_RATE_UNDEFINED;
+ }
+}
+
+static void omap_usb_dpll_relock(struct omap_usb *phy)
+{
+ u32 val;
+ unsigned long timeout;
+
+ omap_usb_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ do {
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (val & PLL_LOCK)
+ break;
+ } while (!WARN_ON(time_after(jiffies, timeout)));
+}
+
+static int omap_usb_dpll_lock(struct omap_usb *phy)
+{
+ u32 val;
+ unsigned long rate;
+ enum sys_clk_rate clk_index;
+
+ rate = clk_get_rate(phy->sys_clk);
+ clk_index = __get_sys_clk_index(rate);
+
+ if (clk_index == CLK_RATE_UNDEFINED) {
+ pr_err("dpll cannot be locked for sys clk freq:%luHz\n", rate);
+ return -EINVAL;
+ }
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGN_MASK;
+ val |= omap_usb3_dpll_params[clk_index].n << PLL_REGN_SHIFT;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val &= ~PLL_SELFREQDCO_MASK;
+ val |= omap_usb3_dpll_params[clk_index].freq << PLL_SELFREQDCO_SHIFT;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGM_MASK;
+ val |= omap_usb3_dpll_params[clk_index].m << PLL_REGM_SHIFT;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
+ val &= ~PLL_REGM_F_MASK;
+ val |= omap_usb3_dpll_params[clk_index].mf << PLL_REGM_F_SHIFT;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
+
+ val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
+ val &= ~PLL_SD_MASK;
+ val |= omap_usb3_dpll_params[clk_index].sd << PLL_SD_SHIFT;
+ omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
+
+ omap_usb_dpll_relock(phy);
+
+ return 0;
+}
+
+static int omap_usb3_init(struct usb_phy *x)
+{
+ struct omap_usb *phy = phy_to_omapusb(x);
+
+ omap_usb_dpll_lock(phy);
+ omap_control_usb3_phy_power(phy->control_dev, 1);
+
+ return 0;
+}
+
+static int omap_usb3_probe(struct platform_device *pdev)
+{
+ struct omap_usb *phy;
+ struct resource *res;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(&pdev->dev, "unable to alloc mem for OMAP USB3 PHY\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl");
+ phy->pll_ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!phy->pll_ctrl_base) {
+ dev_err(&pdev->dev, "ioremap of pll_ctrl failed\n");
+ return -ENOMEM;
+ }
+
+ phy->dev = &pdev->dev;
+
+ phy->phy.dev = phy->dev;
+ phy->phy.label = "omap-usb3";
+ phy->phy.init = omap_usb3_init;
+ phy->phy.set_suspend = omap_usb3_suspend;
+ phy->phy.type = USB_PHY_TYPE_USB3;
+
+ phy->is_suspended = 1;
+ phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
+ if (IS_ERR(phy->wkupclk)) {
+ dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
+ return PTR_ERR(phy->wkupclk);
+ }
+ clk_prepare(phy->wkupclk);
+
+ phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
+ if (IS_ERR(phy->optclk)) {
+ dev_err(&pdev->dev, "unable to get usb_otg_ss_refclk960m\n");
+ return PTR_ERR(phy->optclk);
+ }
+ clk_prepare(phy->optclk);
+
+ phy->sys_clk = devm_clk_get(phy->dev, "sys_clkin");
+ if (IS_ERR(phy->sys_clk)) {
+ pr_err("%s: unable to get sys_clkin\n", __func__);
+ return -EINVAL;
+ }
+
+ phy->control_dev = omap_get_control_dev();
+ if (IS_ERR(phy->control_dev)) {
+ dev_dbg(&pdev->dev, "Failed to get control device\n");
+ return -ENODEV;
+ }
+
+ omap_control_usb3_phy_power(phy->control_dev, 0);
+ usb_add_phy_dev(&phy->phy);
+
+ platform_set_drvdata(pdev, phy);
+
+ pm_runtime_enable(phy->dev);
+ pm_runtime_get(&pdev->dev);
+
+ return 0;
+}
+
+static int omap_usb3_remove(struct platform_device *pdev)
+{
+ struct omap_usb *phy = platform_get_drvdata(pdev);
+
+ clk_unprepare(phy->wkupclk);
+ clk_unprepare(phy->optclk);
+ usb_remove_phy(&phy->phy);
+ if (!pm_runtime_suspended(&pdev->dev))
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int omap_usb3_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_usb *phy = platform_get_drvdata(pdev);
+
+ clk_disable(phy->wkupclk);
+ clk_disable(phy->optclk);
+
+ return 0;
+}
+
+static int omap_usb3_runtime_resume(struct device *dev)
+{
+ u32 ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_usb *phy = platform_get_drvdata(pdev);
+
+ ret = clk_enable(phy->optclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
+ goto err1;
+ }
+
+ ret = clk_enable(phy->wkupclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ clk_disable(phy->optclk);
+
+err1:
+ return ret;
+}
+
+static const struct dev_pm_ops omap_usb3_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_usb3_runtime_suspend, omap_usb3_runtime_resume,
+ NULL)
+};
+
+#define DEV_PM_OPS (&omap_usb3_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_usb3_id_table[] = {
+ { .compatible = "ti,omap-usb3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_usb3_id_table);
+#endif
+
+static struct platform_driver omap_usb3_driver = {
+ .probe = omap_usb3_probe,
+ .remove = omap_usb3_remove,
+ .driver = {
+ .name = "omap-usb3",
+ .owner = THIS_MODULE,
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(omap_usb3_id_table),
+ },
+};
+
+module_platform_driver(omap_usb3_driver);
+
+MODULE_ALIAS("platform: omap_usb3");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP USB3 phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/samsung-usbphy.c b/drivers/usb/phy/samsung-usbphy.c
new file mode 100644
index 000000000000..6ea553733832
--- /dev/null
+++ b/drivers/usb/phy/samsung-usbphy.c
@@ -0,0 +1,930 @@
+/* linux/drivers/usb/phy/samsung-usbphy.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Praveen Paneri <p.paneri@samsung.com>
+ *
+ * Samsung USB2.0 PHY transceiver; talks to S3C HS OTG controller, EHCI-S5P and
+ * OHCI-EXYNOS controllers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/samsung_usb_phy.h>
+#include <linux/platform_data/samsung-usbphy.h>
+
+/* Register definitions */
+
+#define SAMSUNG_PHYPWR (0x00)
+
+#define PHYPWR_NORMAL_MASK (0x19 << 0)
+#define PHYPWR_OTG_DISABLE (0x1 << 4)
+#define PHYPWR_ANALOG_POWERDOWN (0x1 << 3)
+#define PHYPWR_FORCE_SUSPEND (0x1 << 1)
+/* For Exynos4 */
+#define PHYPWR_NORMAL_MASK_PHY0 (0x39 << 0)
+#define PHYPWR_SLEEP_PHY0 (0x1 << 5)
+
+#define SAMSUNG_PHYCLK (0x04)
+
+#define PHYCLK_MODE_USB11 (0x1 << 6)
+#define PHYCLK_EXT_OSC (0x1 << 5)
+#define PHYCLK_COMMON_ON_N (0x1 << 4)
+#define PHYCLK_ID_PULL (0x1 << 2)
+#define PHYCLK_CLKSEL_MASK (0x3 << 0)
+#define PHYCLK_CLKSEL_48M (0x0 << 0)
+#define PHYCLK_CLKSEL_12M (0x2 << 0)
+#define PHYCLK_CLKSEL_24M (0x3 << 0)
+
+#define SAMSUNG_RSTCON (0x08)
+
+#define RSTCON_PHYLINK_SWRST (0x1 << 2)
+#define RSTCON_HLINK_SWRST (0x1 << 1)
+#define RSTCON_SWRST (0x1 << 0)
+
+/* EXYNOS5 */
+#define EXYNOS5_PHY_HOST_CTRL0 (0x00)
+
+#define HOST_CTRL0_PHYSWRSTALL (0x1 << 31)
+
+#define HOST_CTRL0_REFCLKSEL_MASK (0x3 << 19)
+#define HOST_CTRL0_REFCLKSEL_XTAL (0x0 << 19)
+#define HOST_CTRL0_REFCLKSEL_EXTL (0x1 << 19)
+#define HOST_CTRL0_REFCLKSEL_CLKCORE (0x2 << 19)
+
+#define HOST_CTRL0_FSEL_MASK (0x7 << 16)
+#define HOST_CTRL0_FSEL(_x) ((_x) << 16)
+
+#define FSEL_CLKSEL_50M (0x7)
+#define FSEL_CLKSEL_24M (0x5)
+#define FSEL_CLKSEL_20M (0x4)
+#define FSEL_CLKSEL_19200K (0x3)
+#define FSEL_CLKSEL_12M (0x2)
+#define FSEL_CLKSEL_10M (0x1)
+#define FSEL_CLKSEL_9600K (0x0)
+
+#define HOST_CTRL0_TESTBURNIN (0x1 << 11)
+#define HOST_CTRL0_RETENABLE (0x1 << 10)
+#define HOST_CTRL0_COMMONON_N (0x1 << 9)
+#define HOST_CTRL0_SIDDQ (0x1 << 6)
+#define HOST_CTRL0_FORCESLEEP (0x1 << 5)
+#define HOST_CTRL0_FORCESUSPEND (0x1 << 4)
+#define HOST_CTRL0_WORDINTERFACE (0x1 << 3)
+#define HOST_CTRL0_UTMISWRST (0x1 << 2)
+#define HOST_CTRL0_LINKSWRST (0x1 << 1)
+#define HOST_CTRL0_PHYSWRST (0x1 << 0)
+
+#define EXYNOS5_PHY_HOST_TUNE0 (0x04)
+
+#define EXYNOS5_PHY_HSIC_CTRL1 (0x10)
+
+#define EXYNOS5_PHY_HSIC_TUNE1 (0x14)
+
+#define EXYNOS5_PHY_HSIC_CTRL2 (0x20)
+
+#define EXYNOS5_PHY_HSIC_TUNE2 (0x24)
+
+#define HSIC_CTRL_REFCLKSEL_MASK (0x3 << 23)
+#define HSIC_CTRL_REFCLKSEL (0x2 << 23)
+
+#define HSIC_CTRL_REFCLKDIV_MASK (0x7f << 16)
+#define HSIC_CTRL_REFCLKDIV(_x) ((_x) << 16)
+#define HSIC_CTRL_REFCLKDIV_12 (0x24 << 16)
+#define HSIC_CTRL_REFCLKDIV_15 (0x1c << 16)
+#define HSIC_CTRL_REFCLKDIV_16 (0x1a << 16)
+#define HSIC_CTRL_REFCLKDIV_19_2 (0x15 << 16)
+#define HSIC_CTRL_REFCLKDIV_20 (0x14 << 16)
+
+#define HSIC_CTRL_SIDDQ (0x1 << 6)
+#define HSIC_CTRL_FORCESLEEP (0x1 << 5)
+#define HSIC_CTRL_FORCESUSPEND (0x1 << 4)
+#define HSIC_CTRL_WORDINTERFACE (0x1 << 3)
+#define HSIC_CTRL_UTMISWRST (0x1 << 2)
+#define HSIC_CTRL_PHYSWRST (0x1 << 0)
+
+#define EXYNOS5_PHY_HOST_EHCICTRL (0x30)
+
+#define HOST_EHCICTRL_ENAINCRXALIGN (0x1 << 29)
+#define HOST_EHCICTRL_ENAINCR4 (0x1 << 28)
+#define HOST_EHCICTRL_ENAINCR8 (0x1 << 27)
+#define HOST_EHCICTRL_ENAINCR16 (0x1 << 26)
+
+#define EXYNOS5_PHY_HOST_OHCICTRL (0x34)
+
+#define HOST_OHCICTRL_SUSPLGCY (0x1 << 3)
+#define HOST_OHCICTRL_APPSTARTCLK (0x1 << 2)
+#define HOST_OHCICTRL_CNTSEL (0x1 << 1)
+#define HOST_OHCICTRL_CLKCKTRST (0x1 << 0)
+
+#define EXYNOS5_PHY_OTG_SYS (0x38)
+
+#define OTG_SYS_PHYLINK_SWRESET (0x1 << 14)
+#define OTG_SYS_LINKSWRST_UOTG (0x1 << 13)
+#define OTG_SYS_PHY0_SWRST (0x1 << 12)
+
+#define OTG_SYS_REFCLKSEL_MASK (0x3 << 9)
+#define OTG_SYS_REFCLKSEL_XTAL (0x0 << 9)
+#define OTG_SYS_REFCLKSEL_EXTL (0x1 << 9)
+#define OTG_SYS_REFCLKSEL_CLKCORE (0x2 << 9)
+
+#define OTG_SYS_IDPULLUP_UOTG (0x1 << 8)
+#define OTG_SYS_COMMON_ON (0x1 << 7)
+
+#define OTG_SYS_FSEL_MASK (0x7 << 4)
+#define OTG_SYS_FSEL(_x) ((_x) << 4)
+
+#define OTG_SYS_FORCESLEEP (0x1 << 3)
+#define OTG_SYS_OTGDISABLE (0x1 << 2)
+#define OTG_SYS_SIDDQ_UOTG (0x1 << 1)
+#define OTG_SYS_FORCESUSPEND (0x1 << 0)
+
+#define EXYNOS5_PHY_OTG_TUNE (0x40)
+
+#ifndef MHZ
+#define MHZ (1000*1000)
+#endif
+
+#ifndef KHZ
+#define KHZ (1000)
+#endif
+
+#define EXYNOS_USBHOST_PHY_CTRL_OFFSET (0x4)
+#define S3C64XX_USBPHY_ENABLE (0x1 << 16)
+#define EXYNOS_USBPHY_ENABLE (0x1 << 0)
+#define EXYNOS_USB20PHY_CFG_HOST_LINK (0x1 << 0)
+
+enum samsung_cpu_type {
+ TYPE_S3C64XX,
+ TYPE_EXYNOS4210,
+ TYPE_EXYNOS5250,
+};
+
+/*
+ * struct samsung_usbphy_drvdata - driver data for various SoC variants
+ * @cpu_type: machine identifier
+ * @devphy_en_mask: device phy enable mask for PHY CONTROL register
+ * @hostphy_en_mask: host phy enable mask for PHY CONTROL register
+ * @devphy_reg_offset: offset to DEVICE PHY CONTROL register from
+ * mapped address of system controller.
+ * @hostphy_reg_offset: offset to HOST PHY CONTROL register from
+ * mapped address of system controller.
+ *
+ * Here we have a separate mask for device type phy.
+ * Having different masks for host and device type phy helps
+ * in setting independent masks in case of SoCs like S5PV210,
+ * in which PHY0 and PHY1 enable bits belong to same register
+ * placed at position 0 and 1 respectively.
+ * Although for newer SoCs like exynos these bits belong to
+ * different registers altogether placed at position 0.
+ */
+struct samsung_usbphy_drvdata {
+ int cpu_type;
+ int devphy_en_mask;
+ int hostphy_en_mask;
+ u32 devphy_reg_offset;
+ u32 hostphy_reg_offset;
+};
+
+/*
+ * struct samsung_usbphy - transceiver driver state
+ * @phy: transceiver structure
+ * @plat: platform data
+ * @dev: The parent device supplied to the probe function
+ * @clk: usb phy clock
+ * @regs: usb phy controller registers memory base
+ * @pmuregs: USB device PHY_CONTROL register memory base
+ * @sysreg: USB2.0 PHY_CFG register memory base
+ * @ref_clk_freq: reference clock frequency selection
+ * @drv_data: driver data available for different SoCs
+ * @phy_type: Samsung SoCs specific phy types: #HOST
+ * #DEVICE
+ * @phy_usage: usage count for phy
+ * @lock: lock for phy operations
+ */
+struct samsung_usbphy {
+ struct usb_phy phy;
+ struct samsung_usbphy_data *plat;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *pmuregs;
+ void __iomem *sysreg;
+ int ref_clk_freq;
+ const struct samsung_usbphy_drvdata *drv_data;
+ enum samsung_usb_phy_type phy_type;
+ atomic_t phy_usage;
+ spinlock_t lock;
+};
+
+#define phy_to_sphy(x) container_of((x), struct samsung_usbphy, phy)
+
+int samsung_usbphy_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ if (!otg)
+ return -ENODEV;
+
+ if (!otg->host)
+ otg->host = host;
+
+ return 0;
+}
+
+static int samsung_usbphy_parse_dt(struct samsung_usbphy *sphy)
+{
+ struct device_node *usbphy_sys;
+
+ /* Getting node for system controller interface for usb-phy */
+ usbphy_sys = of_get_child_by_name(sphy->dev->of_node, "usbphy-sys");
+ if (!usbphy_sys) {
+ dev_err(sphy->dev, "No sys-controller interface for usb-phy\n");
+ return -ENODEV;
+ }
+
+ sphy->pmuregs = of_iomap(usbphy_sys, 0);
+
+ if (sphy->pmuregs == NULL) {
+ dev_err(sphy->dev, "Can't get usb-phy pmu control register\n");
+ goto err0;
+ }
+
+ sphy->sysreg = of_iomap(usbphy_sys, 1);
+
+ /*
+ * Not returning error code here, since this situation is not fatal.
+ * Few SoCs may not have this switch available
+ */
+ if (sphy->sysreg == NULL)
+ dev_warn(sphy->dev, "Can't get usb-phy sysreg cfg register\n");
+
+ of_node_put(usbphy_sys);
+
+ return 0;
+
+err0:
+ of_node_put(usbphy_sys);
+ return -ENXIO;
+}
+
+/*
+ * Set isolation here for phy.
+ * Here 'on = true' would mean USB PHY block is isolated, hence
+ * de-activated and vice-versa.
+ */
+static void samsung_usbphy_set_isolation(struct samsung_usbphy *sphy, bool on)
+{
+ void __iomem *reg = NULL;
+ u32 reg_val;
+ u32 en_mask = 0;
+
+ if (!sphy->pmuregs) {
+ dev_warn(sphy->dev, "Can't set pmu isolation\n");
+ return;
+ }
+
+ switch (sphy->drv_data->cpu_type) {
+ case TYPE_S3C64XX:
+ /*
+ * Do nothing: We will add here once S3C64xx goes for DT support
+ */
+ break;
+ case TYPE_EXYNOS4210:
+ /*
+ * Fall through since exynos4210 and exynos5250 have similar
+ * register architecture: two separate registers for host and
+ * device phy control with enable bit at position 0.
+ */
+ case TYPE_EXYNOS5250:
+ if (sphy->phy_type == USB_PHY_TYPE_DEVICE) {
+ reg = sphy->pmuregs +
+ sphy->drv_data->devphy_reg_offset;
+ en_mask = sphy->drv_data->devphy_en_mask;
+ } else if (sphy->phy_type == USB_PHY_TYPE_HOST) {
+ reg = sphy->pmuregs +
+ sphy->drv_data->hostphy_reg_offset;
+ en_mask = sphy->drv_data->hostphy_en_mask;
+ }
+ break;
+ default:
+ dev_err(sphy->dev, "Invalid SoC type\n");
+ return;
+ }
+
+ reg_val = readl(reg);
+
+ if (on)
+ reg_val &= ~en_mask;
+ else
+ reg_val |= en_mask;
+
+ writel(reg_val, reg);
+}
+
+/*
+ * Configure the mode of working of usb-phy here: HOST/DEVICE.
+ */
+static void samsung_usbphy_cfg_sel(struct samsung_usbphy *sphy)
+{
+ u32 reg;
+
+ if (!sphy->sysreg) {
+ dev_warn(sphy->dev, "Can't configure specified phy mode\n");
+ return;
+ }
+
+ reg = readl(sphy->sysreg);
+
+ if (sphy->phy_type == USB_PHY_TYPE_DEVICE)
+ reg &= ~EXYNOS_USB20PHY_CFG_HOST_LINK;
+ else if (sphy->phy_type == USB_PHY_TYPE_HOST)
+ reg |= EXYNOS_USB20PHY_CFG_HOST_LINK;
+
+ writel(reg, sphy->sysreg);
+}
+
+/*
+ * PHYs are different for USB Device and USB Host.
+ * This make sure that correct PHY type is selected before
+ * any operation on PHY.
+ */
+static int samsung_usbphy_set_type(struct usb_phy *phy,
+ enum samsung_usb_phy_type phy_type)
+{
+ struct samsung_usbphy *sphy = phy_to_sphy(phy);
+
+ sphy->phy_type = phy_type;
+
+ return 0;
+}
+
+/*
+ * Returns reference clock frequency selection value
+ */
+static int samsung_usbphy_get_refclk_freq(struct samsung_usbphy *sphy)
+{
+ struct clk *ref_clk;
+ int refclk_freq = 0;
+
+ /*
+ * In exynos5250 USB host and device PHY use
+ * external crystal clock XXTI
+ */
+ if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
+ ref_clk = clk_get(sphy->dev, "ext_xtal");
+ else
+ ref_clk = clk_get(sphy->dev, "xusbxti");
+ if (IS_ERR(ref_clk)) {
+ dev_err(sphy->dev, "Failed to get reference clock\n");
+ return PTR_ERR(ref_clk);
+ }
+
+ if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250) {
+ /* set clock frequency for PLL */
+ switch (clk_get_rate(ref_clk)) {
+ case 9600 * KHZ:
+ refclk_freq = FSEL_CLKSEL_9600K;
+ break;
+ case 10 * MHZ:
+ refclk_freq = FSEL_CLKSEL_10M;
+ break;
+ case 12 * MHZ:
+ refclk_freq = FSEL_CLKSEL_12M;
+ break;
+ case 19200 * KHZ:
+ refclk_freq = FSEL_CLKSEL_19200K;
+ break;
+ case 20 * MHZ:
+ refclk_freq = FSEL_CLKSEL_20M;
+ break;
+ case 50 * MHZ:
+ refclk_freq = FSEL_CLKSEL_50M;
+ break;
+ case 24 * MHZ:
+ default:
+ /* default reference clock */
+ refclk_freq = FSEL_CLKSEL_24M;
+ break;
+ }
+ } else {
+ switch (clk_get_rate(ref_clk)) {
+ case 12 * MHZ:
+ refclk_freq = PHYCLK_CLKSEL_12M;
+ break;
+ case 24 * MHZ:
+ refclk_freq = PHYCLK_CLKSEL_24M;
+ break;
+ case 48 * MHZ:
+ refclk_freq = PHYCLK_CLKSEL_48M;
+ break;
+ default:
+ if (sphy->drv_data->cpu_type == TYPE_S3C64XX)
+ refclk_freq = PHYCLK_CLKSEL_48M;
+ else
+ refclk_freq = PHYCLK_CLKSEL_24M;
+ break;
+ }
+ }
+ clk_put(ref_clk);
+
+ return refclk_freq;
+}
+
+static bool exynos5_phyhost_is_on(void *regs)
+{
+ u32 reg;
+
+ reg = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
+
+ return !(reg & HOST_CTRL0_SIDDQ);
+}
+
+static void samsung_exynos5_usbphy_enable(struct samsung_usbphy *sphy)
+{
+ void __iomem *regs = sphy->regs;
+ u32 phyclk = sphy->ref_clk_freq;
+ u32 phyhost;
+ u32 phyotg;
+ u32 phyhsic;
+ u32 ehcictrl;
+ u32 ohcictrl;
+
+ /*
+ * phy_usage helps in keeping usage count for phy
+ * so that the first consumer enabling the phy is also
+ * the last consumer to disable it.
+ */
+
+ atomic_inc(&sphy->phy_usage);
+
+ if (exynos5_phyhost_is_on(regs)) {
+ dev_info(sphy->dev, "Already power on PHY\n");
+ return;
+ }
+
+ /* Host configuration */
+ phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
+
+ /* phy reference clock configuration */
+ phyhost &= ~HOST_CTRL0_FSEL_MASK;
+ phyhost |= HOST_CTRL0_FSEL(phyclk);
+
+ /* host phy reset */
+ phyhost &= ~(HOST_CTRL0_PHYSWRST |
+ HOST_CTRL0_PHYSWRSTALL |
+ HOST_CTRL0_SIDDQ |
+ /* Enable normal mode of operation */
+ HOST_CTRL0_FORCESUSPEND |
+ HOST_CTRL0_FORCESLEEP);
+
+ /* Link reset */
+ phyhost |= (HOST_CTRL0_LINKSWRST |
+ HOST_CTRL0_UTMISWRST |
+ /* COMMON Block configuration during suspend */
+ HOST_CTRL0_COMMONON_N);
+ writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
+ udelay(10);
+ phyhost &= ~(HOST_CTRL0_LINKSWRST |
+ HOST_CTRL0_UTMISWRST);
+ writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
+
+ /* OTG configuration */
+ phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS);
+
+ /* phy reference clock configuration */
+ phyotg &= ~OTG_SYS_FSEL_MASK;
+ phyotg |= OTG_SYS_FSEL(phyclk);
+
+ /* Enable normal mode of operation */
+ phyotg &= ~(OTG_SYS_FORCESUSPEND |
+ OTG_SYS_SIDDQ_UOTG |
+ OTG_SYS_FORCESLEEP |
+ OTG_SYS_REFCLKSEL_MASK |
+ /* COMMON Block configuration during suspend */
+ OTG_SYS_COMMON_ON);
+
+ /* OTG phy & link reset */
+ phyotg |= (OTG_SYS_PHY0_SWRST |
+ OTG_SYS_LINKSWRST_UOTG |
+ OTG_SYS_PHYLINK_SWRESET |
+ OTG_SYS_OTGDISABLE |
+ /* Set phy refclk */
+ OTG_SYS_REFCLKSEL_CLKCORE);
+
+ writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
+ udelay(10);
+ phyotg &= ~(OTG_SYS_PHY0_SWRST |
+ OTG_SYS_LINKSWRST_UOTG |
+ OTG_SYS_PHYLINK_SWRESET);
+ writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
+
+ /* HSIC phy configuration */
+ phyhsic = (HSIC_CTRL_REFCLKDIV_12 |
+ HSIC_CTRL_REFCLKSEL |
+ HSIC_CTRL_PHYSWRST);
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
+ udelay(10);
+ phyhsic &= ~HSIC_CTRL_PHYSWRST;
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
+
+ udelay(80);
+
+ /* enable EHCI DMA burst */
+ ehcictrl = readl(regs + EXYNOS5_PHY_HOST_EHCICTRL);
+ ehcictrl |= (HOST_EHCICTRL_ENAINCRXALIGN |
+ HOST_EHCICTRL_ENAINCR4 |
+ HOST_EHCICTRL_ENAINCR8 |
+ HOST_EHCICTRL_ENAINCR16);
+ writel(ehcictrl, regs + EXYNOS5_PHY_HOST_EHCICTRL);
+
+ /* set ohci_suspend_on_n */
+ ohcictrl = readl(regs + EXYNOS5_PHY_HOST_OHCICTRL);
+ ohcictrl |= HOST_OHCICTRL_SUSPLGCY;
+ writel(ohcictrl, regs + EXYNOS5_PHY_HOST_OHCICTRL);
+}
+
+static void samsung_usbphy_enable(struct samsung_usbphy *sphy)
+{
+ void __iomem *regs = sphy->regs;
+ u32 phypwr;
+ u32 phyclk;
+ u32 rstcon;
+
+ /* set clock frequency for PLL */
+ phyclk = sphy->ref_clk_freq;
+ phypwr = readl(regs + SAMSUNG_PHYPWR);
+ rstcon = readl(regs + SAMSUNG_RSTCON);
+
+ switch (sphy->drv_data->cpu_type) {
+ case TYPE_S3C64XX:
+ phyclk &= ~PHYCLK_COMMON_ON_N;
+ phypwr &= ~PHYPWR_NORMAL_MASK;
+ rstcon |= RSTCON_SWRST;
+ break;
+ case TYPE_EXYNOS4210:
+ phypwr &= ~PHYPWR_NORMAL_MASK_PHY0;
+ rstcon |= RSTCON_SWRST;
+ default:
+ break;
+ }
+
+ writel(phyclk, regs + SAMSUNG_PHYCLK);
+ /* Configure PHY0 for normal operation*/
+ writel(phypwr, regs + SAMSUNG_PHYPWR);
+ /* reset all ports of PHY and Link */
+ writel(rstcon, regs + SAMSUNG_RSTCON);
+ udelay(10);
+ rstcon &= ~RSTCON_SWRST;
+ writel(rstcon, regs + SAMSUNG_RSTCON);
+}
+
+static void samsung_exynos5_usbphy_disable(struct samsung_usbphy *sphy)
+{
+ void __iomem *regs = sphy->regs;
+ u32 phyhost;
+ u32 phyotg;
+ u32 phyhsic;
+
+ if (atomic_dec_return(&sphy->phy_usage) > 0) {
+ dev_info(sphy->dev, "still being used\n");
+ return;
+ }
+
+ phyhsic = (HSIC_CTRL_REFCLKDIV_12 |
+ HSIC_CTRL_REFCLKSEL |
+ HSIC_CTRL_SIDDQ |
+ HSIC_CTRL_FORCESLEEP |
+ HSIC_CTRL_FORCESUSPEND);
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
+ writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
+
+ phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
+ phyhost |= (HOST_CTRL0_SIDDQ |
+ HOST_CTRL0_FORCESUSPEND |
+ HOST_CTRL0_FORCESLEEP |
+ HOST_CTRL0_PHYSWRST |
+ HOST_CTRL0_PHYSWRSTALL);
+ writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
+
+ phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS);
+ phyotg |= (OTG_SYS_FORCESUSPEND |
+ OTG_SYS_SIDDQ_UOTG |
+ OTG_SYS_FORCESLEEP);
+ writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
+}
+
+static void samsung_usbphy_disable(struct samsung_usbphy *sphy)
+{
+ void __iomem *regs = sphy->regs;
+ u32 phypwr;
+
+ phypwr = readl(regs + SAMSUNG_PHYPWR);
+
+ switch (sphy->drv_data->cpu_type) {
+ case TYPE_S3C64XX:
+ phypwr |= PHYPWR_NORMAL_MASK;
+ break;
+ case TYPE_EXYNOS4210:
+ phypwr |= PHYPWR_NORMAL_MASK_PHY0;
+ default:
+ break;
+ }
+
+ /* Disable analog and otg block power */
+ writel(phypwr, regs + SAMSUNG_PHYPWR);
+}
+
+/*
+ * The function passed to the usb driver for phy initialization
+ */
+static int samsung_usbphy_init(struct usb_phy *phy)
+{
+ struct samsung_usbphy *sphy;
+ struct usb_bus *host = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ sphy = phy_to_sphy(phy);
+
+ host = phy->otg->host;
+
+ /* Enable the phy clock */
+ ret = clk_prepare_enable(sphy->clk);
+ if (ret) {
+ dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__);
+ return ret;
+ }
+
+ spin_lock_irqsave(&sphy->lock, flags);
+
+ if (host) {
+ /* setting default phy-type for USB 2.0 */
+ if (!strstr(dev_name(host->controller), "ehci") ||
+ !strstr(dev_name(host->controller), "ohci"))
+ samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST);
+ } else {
+ samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
+ }
+
+ /* Disable phy isolation */
+ if (sphy->plat && sphy->plat->pmu_isolation)
+ sphy->plat->pmu_isolation(false);
+ else
+ samsung_usbphy_set_isolation(sphy, false);
+
+ /* Selecting Host/OTG mode; After reset USB2.0PHY_CFG: HOST */
+ samsung_usbphy_cfg_sel(sphy);
+
+ /* Initialize usb phy registers */
+ if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
+ samsung_exynos5_usbphy_enable(sphy);
+ else
+ samsung_usbphy_enable(sphy);
+
+ spin_unlock_irqrestore(&sphy->lock, flags);
+
+ /* Disable the phy clock */
+ clk_disable_unprepare(sphy->clk);
+
+ return ret;
+}
+
+/*
+ * The function passed to the usb driver for phy shutdown
+ */
+static void samsung_usbphy_shutdown(struct usb_phy *phy)
+{
+ struct samsung_usbphy *sphy;
+ struct usb_bus *host = NULL;
+ unsigned long flags;
+
+ sphy = phy_to_sphy(phy);
+
+ host = phy->otg->host;
+
+ if (clk_prepare_enable(sphy->clk)) {
+ dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&sphy->lock, flags);
+
+ if (host) {
+ /* setting default phy-type for USB 2.0 */
+ if (!strstr(dev_name(host->controller), "ehci") ||
+ !strstr(dev_name(host->controller), "ohci"))
+ samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST);
+ } else {
+ samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
+ }
+
+ /* De-initialize usb phy registers */
+ if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
+ samsung_exynos5_usbphy_disable(sphy);
+ else
+ samsung_usbphy_disable(sphy);
+
+ /* Enable phy isolation */
+ if (sphy->plat && sphy->plat->pmu_isolation)
+ sphy->plat->pmu_isolation(true);
+ else
+ samsung_usbphy_set_isolation(sphy, true);
+
+ spin_unlock_irqrestore(&sphy->lock, flags);
+
+ clk_disable_unprepare(sphy->clk);
+}
+
+static const struct of_device_id samsung_usbphy_dt_match[];
+
+static inline const struct samsung_usbphy_drvdata
+*samsung_usbphy_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(samsung_usbphy_dt_match,
+ pdev->dev.of_node);
+ return match->data;
+ }
+
+ return (struct samsung_usbphy_drvdata *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int samsung_usbphy_probe(struct platform_device *pdev)
+{
+ struct samsung_usbphy *sphy;
+ struct usb_otg *otg;
+ struct samsung_usbphy_data *pdata = pdev->dev.platform_data;
+ const struct samsung_usbphy_drvdata *drv_data;
+ struct device *dev = &pdev->dev;
+ struct resource *phy_mem;
+ void __iomem *phy_base;
+ struct clk *clk;
+ int ret;
+
+ phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!phy_mem) {
+ dev_err(dev, "%s: missing mem resource\n", __func__);
+ return -ENODEV;
+ }
+
+ phy_base = devm_request_and_ioremap(dev, phy_mem);
+ if (!phy_base) {
+ dev_err(dev, "%s: register mapping failed\n", __func__);
+ return -ENXIO;
+ }
+
+ sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL);
+ if (!sphy)
+ return -ENOMEM;
+
+ otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
+ if (!otg)
+ return -ENOMEM;
+
+ drv_data = samsung_usbphy_get_driver_data(pdev);
+
+ if (drv_data->cpu_type == TYPE_EXYNOS5250)
+ clk = devm_clk_get(dev, "usbhost");
+ else
+ clk = devm_clk_get(dev, "otg");
+
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Failed to get otg clock\n");
+ return PTR_ERR(clk);
+ }
+
+ sphy->dev = dev;
+
+ if (dev->of_node) {
+ ret = samsung_usbphy_parse_dt(sphy);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (!pdata) {
+ dev_err(dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+ }
+
+ sphy->plat = pdata;
+ sphy->regs = phy_base;
+ sphy->clk = clk;
+ sphy->drv_data = drv_data;
+ sphy->phy.dev = sphy->dev;
+ sphy->phy.label = "samsung-usbphy";
+ sphy->phy.init = samsung_usbphy_init;
+ sphy->phy.shutdown = samsung_usbphy_shutdown;
+ sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy);
+
+ sphy->phy.otg = otg;
+ sphy->phy.otg->phy = &sphy->phy;
+ sphy->phy.otg->set_host = samsung_usbphy_set_host;
+
+ spin_lock_init(&sphy->lock);
+
+ platform_set_drvdata(pdev, sphy);
+
+ return usb_add_phy(&sphy->phy, USB_PHY_TYPE_USB2);
+}
+
+static int samsung_usbphy_remove(struct platform_device *pdev)
+{
+ struct samsung_usbphy *sphy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&sphy->phy);
+
+ if (sphy->pmuregs)
+ iounmap(sphy->pmuregs);
+ if (sphy->sysreg)
+ iounmap(sphy->sysreg);
+
+ return 0;
+}
+
+static const struct samsung_usbphy_drvdata usbphy_s3c64xx = {
+ .cpu_type = TYPE_S3C64XX,
+ .devphy_en_mask = S3C64XX_USBPHY_ENABLE,
+};
+
+static const struct samsung_usbphy_drvdata usbphy_exynos4 = {
+ .cpu_type = TYPE_EXYNOS4210,
+ .devphy_en_mask = EXYNOS_USBPHY_ENABLE,
+ .hostphy_en_mask = EXYNOS_USBPHY_ENABLE,
+};
+
+static struct samsung_usbphy_drvdata usbphy_exynos5 = {
+ .cpu_type = TYPE_EXYNOS5250,
+ .hostphy_en_mask = EXYNOS_USBPHY_ENABLE,
+ .hostphy_reg_offset = EXYNOS_USBHOST_PHY_CTRL_OFFSET,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_usbphy_dt_match[] = {
+ {
+ .compatible = "samsung,s3c64xx-usbphy",
+ .data = &usbphy_s3c64xx,
+ }, {
+ .compatible = "samsung,exynos4210-usbphy",
+ .data = &usbphy_exynos4,
+ }, {
+ .compatible = "samsung,exynos5250-usbphy",
+ .data = &usbphy_exynos5
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_usbphy_dt_match);
+#endif
+
+static struct platform_device_id samsung_usbphy_driver_ids[] = {
+ {
+ .name = "s3c64xx-usbphy",
+ .driver_data = (unsigned long)&usbphy_s3c64xx,
+ }, {
+ .name = "exynos4210-usbphy",
+ .driver_data = (unsigned long)&usbphy_exynos4,
+ }, {
+ .name = "exynos5250-usbphy",
+ .driver_data = (unsigned long)&usbphy_exynos5,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, samsung_usbphy_driver_ids);
+
+static struct platform_driver samsung_usbphy_driver = {
+ .probe = samsung_usbphy_probe,
+ .remove = samsung_usbphy_remove,
+ .id_table = samsung_usbphy_driver_ids,
+ .driver = {
+ .name = "samsung-usbphy",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(samsung_usbphy_dt_match),
+ },
+};
+
+module_platform_driver(samsung_usbphy_driver);
+
+MODULE_DESCRIPTION("Samsung USB phy controller");
+MODULE_AUTHOR("Praveen Paneri <p.paneri@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:samsung-usbphy");
diff --git a/drivers/usb/phy/tegra_usb_phy.c b/drivers/usb/phy/tegra_usb_phy.c
index 9d13c81754e0..5487d38481af 100644
--- a/drivers/usb/phy/tegra_usb_phy.c
+++ b/drivers/usb/phy/tegra_usb_phy.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -35,19 +36,6 @@
#define ULPI_VIEWPORT 0x170
-#define USB_PORTSC1 0x184
-#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
-#define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26)
-#define USB_PORTSC1_PHCD (1 << 23)
-#define USB_PORTSC1_WKOC (1 << 22)
-#define USB_PORTSC1_WKDS (1 << 21)
-#define USB_PORTSC1_WKCN (1 << 20)
-#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
-#define USB_PORTSC1_PP (1 << 12)
-#define USB_PORTSC1_SUSP (1 << 7)
-#define USB_PORTSC1_PE (1 << 2)
-#define USB_PORTSC1_CCS (1 << 0)
-
#define USB_SUSP_CTRL 0x400
#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
@@ -208,11 +196,6 @@ static struct tegra_utmip_config utmip_default[] = {
},
};
-static inline bool phy_is_ulpi(struct tegra_usb_phy *phy)
-{
- return (phy->instance == 1);
-}
-
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
phy->pad_clk = clk_get_sys("utmip-pad", NULL);
@@ -221,7 +204,7 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
return PTR_ERR(phy->pad_clk);
}
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
phy->pad_regs = phy->regs;
} else {
phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE);
@@ -236,7 +219,7 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
static void utmip_pad_close(struct tegra_usb_phy *phy)
{
- if (phy->instance != 0)
+ if (!phy->is_legacy_phy)
iounmap(phy->pad_regs);
clk_put(phy->pad_clk);
}
@@ -305,7 +288,7 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
@@ -315,13 +298,8 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
- }
-
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val |= USB_PORTSC1_PHCD;
- writel(val, base + USB_PORTSC1);
- }
+ } else
+ tegra_ehci_set_phcd(&phy->u_phy, true);
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
@@ -332,7 +310,7 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
@@ -342,13 +320,8 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
- }
-
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val &= ~USB_PORTSC1_PHCD;
- writel(val, base + USB_PORTSC1);
- }
+ } else
+ tegra_ehci_set_phcd(&phy->u_phy, false);
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
USB_PHY_CLK_VALID))
@@ -365,7 +338,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
writel(val, base + USB1_LEGACY_CTRL);
@@ -440,16 +413,14 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
writel(val, base + UTMIP_BIAS_CFG1);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + UTMIP_SPARE_CFG0);
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
val &= ~FUSE_SETUP_SEL;
else
val |= FUSE_SETUP_SEL;
writel(val, base + UTMIP_SPARE_CFG0);
- }
-
- if (phy->instance == 2) {
+ } else {
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
@@ -459,7 +430,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val &= ~UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
@@ -472,11 +443,8 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
utmi_phy_clk_enable(phy);
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val &= ~USB_PORTSC1_PTS(~0);
- writel(val, base + USB_PORTSC1);
- }
+ if (!phy->is_legacy_phy)
+ tegra_ehci_set_pts(&phy->u_phy, 0);
return 0;
}
@@ -621,10 +589,6 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
return ret;
}
- val = readl(base + USB_PORTSC1);
- val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN;
- writel(val, base + USB_PORTSC1);
-
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
@@ -639,17 +603,8 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val;
- void __iomem *base = phy->regs;
struct tegra_ulpi_config *config = phy->config;
- /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
- * Controller to immediately bring the ULPI PHY out of low power
- */
- val = readl(base + USB_PORTSC1);
- val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
- writel(val, base + USB_PORTSC1);
-
clk_disable(phy->clk);
return gpio_direction_output(config->reset_gpio, 0);
}
@@ -660,7 +615,7 @@ static int tegra_phy_init(struct usb_phy *x)
struct tegra_ulpi_config *ulpi_config;
int err;
- if (phy_is_ulpi(phy)) {
+ if (phy->is_ulpi_phy) {
ulpi_config = phy->config;
phy->clk = clk_get_sys(NULL, ulpi_config->clk);
if (IS_ERR(phy->clk)) {
@@ -698,7 +653,7 @@ static void tegra_usb_phy_close(struct usb_phy *x)
{
struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
clk_put(phy->clk);
else
utmip_pad_close(phy);
@@ -709,7 +664,7 @@ static void tegra_usb_phy_close(struct usb_phy *x)
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
return ulpi_phy_power_on(phy);
else
return utmi_phy_power_on(phy);
@@ -717,7 +672,7 @@ static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
return ulpi_phy_power_off(phy);
else
return utmi_phy_power_off(phy);
@@ -739,8 +694,9 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
unsigned long parent_rate;
int i;
int err;
+ struct device_node *np = dev->of_node;
- phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
+ phy = kzalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
if (!phy)
return ERR_PTR(-ENOMEM);
@@ -749,9 +705,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
phy->config = config;
phy->mode = phy_mode;
phy->dev = dev;
+ phy->is_legacy_phy =
+ of_property_read_bool(np, "nvidia,has-legacy-mode");
+ err = of_property_match_string(np, "phy_type", "ulpi");
+ if (err < 0)
+ phy->is_ulpi_phy = false;
+ else
+ phy->is_ulpi_phy = true;
if (!phy->config) {
- if (phy_is_ulpi(phy)) {
+ if (phy->is_ulpi_phy) {
pr_err("%s: ulpi phy configuration missing", __func__);
err = -EINVAL;
goto err0;
@@ -796,45 +759,40 @@ err0:
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_open);
-void tegra_usb_phy_preresume(struct tegra_usb_phy *phy)
+void tegra_usb_phy_preresume(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
-void tegra_usb_phy_postresume(struct tegra_usb_phy *phy)
+void tegra_usb_phy_postresume(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
-void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
+void tegra_ehci_phy_restore_start(struct usb_phy *x,
enum tegra_usb_phy_port_speed port_speed)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
-void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy)
+void tegra_ehci_phy_restore_end(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end);
-void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy)
-{
- if (!phy_is_ulpi(phy))
- utmi_phy_clk_disable(phy);
-}
-EXPORT_SYMBOL_GPL(tegra_usb_phy_clk_disable);
-
-void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy)
-{
- if (!phy_is_ulpi(phy))
- utmi_phy_clk_enable(phy);
-}
-EXPORT_SYMBOL_GPL(tegra_usb_phy_clk_enable);
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index 6f4afa436381..29feb00d7f39 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -4,7 +4,7 @@
config USB_RENESAS_USBHS
tristate 'Renesas USBHS controller'
- depends on USB && USB_GADGET
+ depends on USB && USB_GADGET && GENERIC_HARDIRQS
default n
help
Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 38bce046f4d0..cfd205036aba 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -443,11 +444,9 @@ static int usbhs_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->base) {
- dev_err(&pdev->dev, "ioremap error.\n");
- return -ENOMEM;
- }
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
/*
* care platform info
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index dd41f61893ef..78fca978b2d0 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -545,15 +545,6 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
return 0;
}
-static void usbhsg_uep_init(struct usbhsg_gpriv *gpriv)
-{
- int i;
- struct usbhsg_uep *uep;
-
- usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
- uep->pipe = NULL;
-}
-
/*
*
* usb_ep_ops
@@ -610,7 +601,12 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- return usbhsg_pipe_disable(uep);
+ usbhsg_pipe_disable(uep);
+
+ uep->pipe->mod_private = NULL;
+ uep->pipe = NULL;
+
+ return 0;
}
static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -761,9 +757,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
usbhs_pipe_init(priv,
usbhsg_dma_map_ctrl);
usbhs_fifo_init(priv);
- usbhsg_uep_init(gpriv);
- /* dcp init */
+ /* dcp init instead of usbhsg_ep_enable() */
dcp->pipe = usbhs_dcp_malloc(priv);
dcp->pipe->mod_private = dcp;
usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
@@ -825,7 +820,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_pipe_disable(dcp);
+ usbhsg_ep_disable(&dcp->ep);
dev_dbg(dev, "stop gadget\n");
@@ -905,7 +900,7 @@ static int usbhsg_set_selfpowered(struct usb_gadget *gadget, int is_self)
return 0;
}
-static struct usb_gadget_ops usbhsg_gadget_ops = {
+static const struct usb_gadget_ops usbhsg_gadget_ops = {
.get_frame = usbhsg_get_frame,
.set_selfpowered = usbhsg_set_selfpowered,
.udc_start = usbhsg_gadget_start,
@@ -998,6 +993,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
*/
usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
uep->gpriv = gpriv;
+ uep->pipe = NULL;
snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);
uep->ep.name = uep->ep_name;
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 3d3cd6ca2689..b86815421c8d 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -661,9 +661,10 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
status = -ESHUTDOWN;
urb->actual_length = pkt->actual;
- usbhsh_ureq_free(hpriv, ureq);
usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+ usbhsh_ureq_free(hpriv, ureq);
+
usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 76f462241738..17b7f9ae36ad 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -4,7 +4,7 @@
menuconfig USB_SERIAL
tristate "USB Serial Converter support"
- depends on USB
+ depends on USB && TTY
---help---
Say Y here if you have a USB device that provides normal serial
ports, or acts like a serial device, and you want to connect it to
@@ -647,6 +647,18 @@ config USB_SERIAL_VIVOPAY_SERIAL
To compile this driver as a module, choose M here: the
module will be called vivopay-serial.
+config USB_SERIAL_XSENS_MT
+ tristate "Xsens motion tracker serial interface driver"
+ help
+ Say Y here if you want to use Xsens motion trackers.
+
+ This driver supports the new generation of motion trackers
+ by Xsens. Older devices can be accessed using the FTDI_SIO
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xsens_mt.
+
config USB_SERIAL_ZIO
tristate "ZIO Motherboard USB serial interface driver"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 3b3e7308d476..eaf5ca14dfeb 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -61,5 +61,6 @@ obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
+obj-$(CONFIG_USB_SERIAL_XSENS_MT) += xsens_mt.o
obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o
obj-$(CONFIG_USB_SERIAL_ZTE) += zte_ev.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6d110a3bc7e7..6e320cec397d 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -119,9 +119,8 @@ static int aircable_probe(struct usb_serial *serial,
return 0;
}
-static int aircable_process_packet(struct tty_struct *tty,
- struct usb_serial_port *port, int has_headers,
- char *packet, int len)
+static int aircable_process_packet(struct usb_serial_port *port,
+ int has_headers, char *packet, int len)
{
if (has_headers) {
len -= HCI_HEADER_LENGTH;
@@ -132,7 +131,7 @@ static int aircable_process_packet(struct tty_struct *tty,
return 0;
}
- tty_insert_flip_string(tty, packet, len);
+ tty_insert_flip_string(&port->port, packet, len);
return len;
}
@@ -141,28 +140,22 @@ static void aircable_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *data = (char *)urb->transfer_buffer;
- struct tty_struct *tty;
int has_headers;
int count;
int len;
int i;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
has_headers = (urb->actual_length > 2 && data[0] == RX_HEADER_0);
count = 0;
for (i = 0; i < urb->actual_length; i += HCI_COMPLETE_FRAME) {
len = min_t(int, urb->actual_length - i, HCI_COMPLETE_FRAME);
- count += aircable_process_packet(tty, port, has_headers,
+ count += aircable_process_packet(port, has_headers,
&data[i], len);
}
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver aircable_device = {
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a88882c0e237..cbd904b8fba5 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -674,7 +674,6 @@ static void ark3116_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -689,10 +688,6 @@ static void ark3116_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
tty_flag = TTY_BREAK;
@@ -703,12 +698,11 @@ static void ark3116_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (lsr & UART_LSR_OE)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ark3116_device = {
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index b72a4c166705..84217e78ded4 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -242,7 +242,6 @@ static void belkin_sa_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
unsigned char status;
@@ -259,10 +258,6 @@ static void belkin_sa_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (status & BELKIN_SA_LSR_ERR) {
/* Break takes precedence over parity, which takes precedence
* over framing errors. */
@@ -276,13 +271,12 @@ static void belkin_sa_process_read_urb(struct urb *urb)
/* Overrun is special, not associated with a char. */
if (status & BELKIN_SA_LSR_OE)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static void belkin_sa_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f14736f647ff..edc0f0dcad83 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
+ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 69a4fa1cee25..629bd2894506 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -324,7 +324,6 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
short todo;
int result;
@@ -337,16 +336,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty) {
- dev_dbg(dev, "%s - ignoring since device not open\n", __func__);
- return;
- }
if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
spin_lock(&priv->lock);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index fd8c35fd452e..8efa19d0e9fb 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1214,10 +1214,10 @@ static void cypress_read_int_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
- if (tty && bytes > i) {
- tty_insert_flip_string_fixed_flag(tty, data + i,
+ if (bytes > i) {
+ tty_insert_flip_string_fixed_flag(&port->port, data + i,
tty_flag, bytes - i);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 45d4af62967f..ebe45fa0ed50 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1399,9 +1399,7 @@ static void digi_read_bulk_callback(struct urb *urb)
static int digi_read_inb_callback(struct urb *urb)
{
-
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode = ((unsigned char *)urb->transfer_buffer)[0];
int len = ((unsigned char *)urb->transfer_buffer)[1];
@@ -1425,7 +1423,6 @@ static int digi_read_inb_callback(struct urb *urb)
return -1;
}
- tty = tty_port_tty_get(&port->port);
spin_lock(&priv->dp_port_lock);
/* check for throttle; if set, do not resubmit read urb */
@@ -1435,13 +1432,13 @@ static int digi_read_inb_callback(struct urb *urb)
priv->dp_throttle_restart = 1;
/* receive data */
- if (tty && opcode == DIGI_CMD_RECEIVE_DATA) {
+ if (opcode == DIGI_CMD_RECEIVE_DATA) {
/* get flag from port_status */
flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
@@ -1455,13 +1452,12 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
- tty_insert_flip_string_fixed_flag(tty, data, flag,
- len);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string_fixed_flag(&port->port, data,
+ flag, len);
+ tty_flip_buffer_push(&port->port);
}
}
spin_unlock(&priv->dp_port_lock);
- tty_kref_put(tty);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dev_dbg(&port->dev, "%s: got RECEIVE_DISABLE\n", __func__);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 6e4eb57d0177..b1b2dc64b50b 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -100,7 +100,6 @@ static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct f81232_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -117,10 +116,6 @@ static void f81232_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -133,19 +128,19 @@ static void f81232_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_char(&port->port, data[i],
+ tty_flag);
} else {
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static int set_control_lines(struct usb_device *dev, u8 value)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0a373b3ae96a..edd162df49ca 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {
/*
* ELV devices:
*/
+ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
@@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
@@ -875,6 +877,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+ /* Crucible Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1882,24 +1886,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- mutex_lock(&port->serial->disc_mutex);
- if (!port->serial->disconnected) {
- /* Disable flow control */
- if (!on && usb_control_msg(port->serial->dev,
+ /* Disable flow control */
+ if (!on) {
+ if (usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface, NULL, 0,
WDR_TIMEOUT) < 0) {
- dev_err(&port->dev, "error from flowcontrol urb\n");
+ dev_err(&port->dev, "error from flowcontrol urb\n");
}
- /* drop RTS and DTR */
- if (on)
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
- else
- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
- mutex_unlock(&port->serial->disc_mutex);
+ /* drop RTS and DTR */
+ if (on)
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ else
+ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -1958,9 +1960,8 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
-static int ftdi_process_packet(struct tty_struct *tty,
- struct usb_serial_port *port, struct ftdi_private *priv,
- char *packet, int len)
+static int ftdi_process_packet(struct usb_serial_port *port,
+ struct ftdi_private *priv, char *packet, int len)
{
int i;
char status;
@@ -2010,7 +2011,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
/* Overrun is special, not associated with a char */
if (packet[1] & FTDI_RS_OE) {
priv->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
@@ -2029,10 +2030,10 @@ static int ftdi_process_packet(struct tty_struct *tty,
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, flag);
+ tty_insert_flip_char(&port->port, *ch, flag);
}
} else {
- tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
+ tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
}
return len;
@@ -2041,25 +2042,19 @@ static int ftdi_process_packet(struct tty_struct *tty,
static void ftdi_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
struct ftdi_private *priv = usb_get_serial_port_data(port);
char *data = (char *)urb->transfer_buffer;
int i;
int len;
int count = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
len = min_t(int, urb->actual_length - i, priv->max_packet_size);
- count += ftdi_process_packet(tty, port, priv, &data[i], len);
+ count += ftdi_process_packet(port, priv, &data[i], len);
}
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 049b6e715fa4..9d359e189a64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -147,6 +147,11 @@
#define XSENS_CONVERTER_6_PID 0xD38E
#define XSENS_CONVERTER_7_PID 0xD38F
+/**
+ * Zolix (www.zolix.com.cb) product ids
+ */
+#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
+
/*
* NDI (www.ndigital.com) product ids
*/
@@ -204,7 +209,7 @@
/*
* ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
+ * Almost all of these devices use FTDI's vendor ID (0x0403).
* Further IDs taken from ELV Windows .inf file.
*
* The previously included PID for the UO 100 module was incorrect.
@@ -212,6 +217,8 @@
*
* Armin Laeuger originally sent the PID for the UM 100 module.
*/
+#define FTDI_ELV_VID 0x1B1F /* ELV AG */
+#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
@@ -1259,3 +1266,9 @@
* ATI command output: Cinterion MC55i
*/
#define FTDI_CINTERION_MC55I_PID 0xA951
+
+/*
+ * Product: Comet Caller ID decoder
+ * Manufacturer: Crucible Technologies
+ */
+#define FTDI_CT_COMET_PID 0x8e08
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 203358d7e7bc..1a07b12ef341 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -252,14 +252,11 @@ static inline int isAbortTrfCmnd(const unsigned char *buf)
static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
-
- if (tty && actual_length) {
+ if (actual_length) {
usb_serial_debug_data(&port->dev, __func__, actual_length, data);
- tty_insert_flip_string(tty, data, actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2ea70a631996..4c5c23f1cae5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -313,30 +313,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
char *ch = (char *)urb->transfer_buffer;
int i;
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* The per character mucking around with sysrq path it too slow for
stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
where the USB serial is not a console anyway */
if (!port->port.console || !port->sysrq)
- tty_insert_flip_string(tty, ch, urb->actual_length);
+ tty_insert_flip_string(&port->port, ch, urb->actual_length);
else {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, TTY_NORMAL);
+ tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 7b770c7f8b11..b00e5cbf741f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -232,8 +232,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength);
static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 byte2, __u8 byte3);
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr);
static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
__u8 lsr, __u8 data);
@@ -1752,7 +1752,6 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
struct device *dev = &edge_serial->serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
- struct tty_struct *tty;
__u16 lastBufferLength;
__u16 rxLen;
@@ -1860,14 +1859,11 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port->open) {
- tty = tty_port_tty_get(
- &edge_port->port->port);
- if (tty) {
- dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
- __func__, rxLen, edge_serial->rxPort);
- edge_tty_recv(&edge_serial->serial->dev->dev, tty, buffer, rxLen);
- tty_kref_put(tty);
- }
+ dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
+ __func__, rxLen,
+ edge_serial->rxPort);
+ edge_tty_recv(edge_port->port, buffer,
+ rxLen);
edge_port->icount.rx += rxLen;
}
buffer += rxLen;
@@ -2017,20 +2013,20 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
* edge_tty_recv
* this function passes data on to the tty flip buffer
*****************************************************************************/
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int cnt;
- cnt = tty_insert_flip_string(tty, data, length);
+ cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
}
data += cnt;
length -= cnt;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
@@ -2086,14 +2082,9 @@ static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
}
/* Place LSR data byte into Rx buffer */
- if (lsrData) {
- struct tty_struct *tty =
- tty_port_tty_get(&edge_port->port->port);
- if (tty) {
- edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
- tty_kref_put(tty);
- }
- }
+ if (lsrData)
+ edge_tty_recv(edge_port->port, &data, 1);
+
/* update input line counters */
icount = &edge_port->icount;
if (newLsr & LSR_BREAK)
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 58184f3de686..c23776679f70 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -201,8 +201,8 @@ static int closing_wait = EDGE_CLOSING_WAIT;
static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
@@ -521,62 +521,6 @@ exit_is_tx_active:
return bytes_left;
}
-static void chase_port(struct edgeport_port *port, unsigned long timeout,
- int flush)
-{
- int baud_rate;
- struct tty_struct *tty = tty_port_tty_get(&port->port->port);
- struct usb_serial *serial = port->port->serial;
- wait_queue_t wait;
- unsigned long flags;
-
- if (!timeout)
- timeout = (HZ * EDGE_CLOSING_WAIT)/100;
-
- /* wait for data to drain from the buffer */
- spin_lock_irqsave(&port->ep_lock, flags);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kfifo_len(&port->write_fifo) == 0
- || timeout == 0 || signal_pending(current)
- || serial->disconnected)
- /* disconnect */
- break;
- spin_unlock_irqrestore(&port->ep_lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&port->ep_lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
- if (flush)
- kfifo_reset_out(&port->write_fifo);
- spin_unlock_irqrestore(&port->ep_lock, flags);
- tty_kref_put(tty);
-
- /* wait for data to drain from the device */
- timeout += jiffies;
- while ((long)(jiffies - timeout) < 0 && !signal_pending(current)
- && !serial->disconnected) {
- /* not disconnected */
- if (!tx_active(port))
- break;
- msleep(10);
- }
-
- /* disconnected */
- if (serial->disconnected)
- return;
-
- /* wait one more character time, based on baud rate */
- /* (tx_active doesn't seem to wait for the last byte) */
- baud_rate = port->baud_rate;
- if (baud_rate == 0)
- baud_rate = 50;
- msleep(max(1, DIV_ROUND_UP(10000, baud_rate)));
-}
-
static int choose_config(struct usb_device *dev)
{
/*
@@ -1540,7 +1484,6 @@ static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
struct async_icount *icount;
__u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
- struct tty_struct *tty;
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr);
@@ -1554,13 +1497,8 @@ static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
- if (lsr_data) {
- tty = tty_port_tty_get(&edge_port->port->port);
- if (tty) {
- edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
- tty_kref_put(tty);
- }
- }
+ if (lsr_data)
+ edge_tty_recv(edge_port->port, &data, 1);
/* update input line counters */
icount = &edge_port->icount;
@@ -1676,7 +1614,6 @@ static void edge_bulk_in_callback(struct urb *urb)
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int retval = 0;
int port_number;
int status = urb->status;
@@ -1715,17 +1652,16 @@ static void edge_bulk_in_callback(struct urb *urb)
++data;
}
- tty = tty_port_tty_get(&edge_port->port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
- edge_tty_recv(dev, tty, data, urb->actual_length);
+ edge_tty_recv(edge_port->port, data,
+ urb->actual_length);
edge_port->icount.rx += urb->actual_length;
}
- tty_kref_put(tty);
exit:
/* continue read unless stopped */
@@ -1740,16 +1676,16 @@ exit:
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int queued;
- queued = tty_insert_flip_string(tty, data, length);
+ queued = tty_insert_flip_string(&port->port, data, length);
if (queued < length)
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - queued);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
static void edge_bulk_out_callback(struct urb *urb)
@@ -1941,6 +1877,8 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
++edge_serial->num_ports_open;
+ port->port.drain_delay = 1;
+
goto release_es_lock;
unlink_int_urb:
@@ -1956,6 +1894,7 @@ static void edge_close(struct usb_serial_port *port)
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
struct usb_serial *serial = port->serial;
+ unsigned long flags;
int port_number;
edge_serial = usb_get_serial_data(port->serial);
@@ -1967,12 +1906,12 @@ static void edge_close(struct usb_serial_port *port)
* this flag and dump add read data */
edge_port->close_pending = 1;
- /* chase the port close and flush */
- chase_port(edge_port, (HZ * closing_wait) / 100, 1);
-
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
+ spin_lock_irqsave(&edge_port->ep_lock, flags);
+ kfifo_reset_out(&edge_port->write_fifo);
+ spin_unlock_irqrestore(&edge_port->ep_lock, flags);
/* assuming we can still talk to the device,
* send a close port command to it */
@@ -2098,16 +2037,21 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
+ int ret;
if (edge_port == NULL)
return 0;
- if (edge_port->close_pending == 1)
- return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
+ if (!chars) {
+ ret = tx_active(edge_port);
+ if (ret > 0)
+ chars = ret;
+ }
+
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
@@ -2445,10 +2389,15 @@ static int get_serial_info(struct edgeport_port *edge_port,
struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
+ unsigned cwait;
if (!retinfo)
return -EFAULT;
+ cwait = edge_port->port->port.closing_wait;
+ if (cwait != ASYNC_CLOSING_WAIT_NONE)
+ cwait = jiffies_to_msecs(closing_wait) / 10;
+
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
@@ -2459,7 +2408,7 @@ static int get_serial_info(struct edgeport_port *edge_port,
tmp.xmit_fifo_size = edge_port->port->bulk_out_size;
tmp.baud_base = 9600;
tmp.close_delay = 5*HZ;
- tmp.closing_wait = closing_wait;
+ tmp.closing_wait = cwait;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
@@ -2514,8 +2463,7 @@ static void edge_break(struct tty_struct *tty, int break_state)
int status;
int bv = 0; /* Off */
- /* chase the port close */
- chase_port(edge_port, 0, 0);
+ tty_wait_until_sent(tty, 0);
if (break_state == -1)
bv = 1; /* On */
@@ -2588,6 +2536,8 @@ static int edge_port_probe(struct usb_serial_port *port)
return ret;
}
+ port->port.closing_wait = msecs_to_jiffies(closing_wait * 10);
+
return 0;
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index e24e2d4f4c1b..716930ab1bb1 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -287,7 +287,6 @@ static void ir_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
if (!urb->actual_length)
return;
@@ -302,12 +301,8 @@ static void ir_process_read_urb(struct urb *urb)
if (urb->actual_length == 1)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
- tty_insert_flip_string(tty, data + 1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data + 1, urb->actual_length - 1);
+ tty_flip_buffer_push(&port->port);
}
static void ir_set_termios_callback(struct urb *urb)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 1e1fbed65ef2..ff77027160aa 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -581,7 +581,6 @@ static void read_buf_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int status = urb->status;
if (status) {
@@ -592,14 +591,12 @@ static void read_buf_callback(struct urb *urb)
}
dev_dbg(&port->dev, "%s - %i chars to write\n", __func__, urb->actual_length);
- tty = tty_port_tty_get(&port->port);
if (data == NULL)
dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
- if (tty && urb->actual_length && data) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length && data) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
iuu_led_activity_on(urb);
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 97bc49f68efd..1fd1935c8316 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -291,21 +291,19 @@ static void usa26_indat_callback(struct urb *urb)
int i, err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
- dev_dbg(&urb->dev->dev,"%s - nonzero status: %x on endpoint %d.\n",
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %x on endpoint %d.\n",
__func__, status, endpoint);
return;
}
port = urb->context;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
@@ -315,7 +313,7 @@ static void usa26_indat_callback(struct urb *urb)
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
- tty_insert_flip_char(tty, data[i], err);
+ tty_insert_flip_char(&port->port, data[i], err);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
@@ -328,12 +326,12 @@ static void usa26_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1], flag);
+ tty_insert_flip_char(&port->port, data[i+1],
+ flag);
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -446,7 +444,6 @@ static void usa28_indat_callback(struct urb *urb)
{
int err;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data;
struct keyspan_port_private *p_priv;
int status = urb->status;
@@ -469,12 +466,11 @@ static void usa28_indat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -532,7 +528,7 @@ static void usa28_instat_callback(struct urb *urb)
/*
dev_dbg(&urb->dev->dev,
- "%s %x %x %x %x %x %x %x %x %x %x %x %x", __func__,
+ "%s %x %x %x %x %x %x %x %x %x %x %x %x", __func__,
data[0], data[1], data[2], data[3], data[4], data[5],
data[6], data[7], data[8], data[9], data[10], data[11]);
*/
@@ -669,7 +665,6 @@ static void usa49_indat_callback(struct urb *urb)
int i, err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -682,12 +677,11 @@ static void usa49_indat_callback(struct urb *urb)
}
port = urb->context;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no error on any byte */
- tty_insert_flip_string(tty, data + 1,
+ tty_insert_flip_string(&port->port, data + 1,
urb->actual_length - 1);
} else {
/* some bytes had errors, every byte has status */
@@ -700,12 +694,12 @@ static void usa49_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1], flag);
+ tty_insert_flip_char(&port->port, data[i+1],
+ flag);
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -718,7 +712,6 @@ static void usa49wg_indat_callback(struct urb *urb)
int i, len, x, err;
struct usb_serial *serial;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -743,7 +736,6 @@ static void usa49wg_indat_callback(struct urb *urb)
return;
}
port = serial->port[data[i++]];
- tty = tty_port_tty_get(&port->port);
len = data[i++];
/* 0x80 bit is error flag */
@@ -751,7 +743,8 @@ static void usa49wg_indat_callback(struct urb *urb)
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
- tty_insert_flip_char(tty, data[i++], 0);
+ tty_insert_flip_char(&port->port,
+ data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
@@ -765,13 +758,12 @@ static void usa49wg_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&port->port,
data[i+1], flag);
i += 2;
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
}
@@ -792,7 +784,6 @@ static void usa90_indat_callback(struct urb *urb)
int endpoint;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -808,12 +799,12 @@ static void usa90_indat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
/* if current mode is DMA, looks like usa28 format
otherwise looks like usa26 data format */
if (p_priv->baud > 57600)
- tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
else {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
@@ -824,8 +815,8 @@ static void usa90_indat_callback(struct urb *urb)
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
- tty_insert_flip_char(tty, data[i],
- err);
+ tty_insert_flip_char(&port->port,
+ data[i], err);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
@@ -838,13 +829,12 @@ static void usa90_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1],
- flag);
+ tty_insert_flip_char(&port->port,
+ data[i+1], flag);
}
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 41b01092af07..3b17d5d13dc8 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -138,7 +138,6 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
static void keyspan_pda_rx_interrupt(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
@@ -163,14 +162,12 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
- tty = tty_port_tty_get(&port->port);
/* rest of message is rx data */
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data + 1,
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data + 1,
urb->actual_length - 1);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
break;
case 1:
/* status interrupt */
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fc9e14a1e9b3..769d910ae0a5 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -389,7 +389,6 @@ static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
unsigned len;
/* empty urbs seem to happen, we ignore them */
@@ -401,19 +400,14 @@ static void klsi_105_process_read_urb(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
len = get_unaligned_le16(data);
if (len > urb->actual_length - KLSI_HDR_LEN) {
dev_dbg(&port->dev, "%s - packet length mismatch\n", __func__);
len = urb->actual_length - KLSI_HDR_LEN;
}
- tty_insert_flip_string(tty, data + KLSI_HDR_LEN, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data + KLSI_HDR_LEN, len);
+ tty_flip_buffer_push(&port->port);
}
static void klsi_105_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index b747ba615d0b..903d938e174b 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -324,7 +324,6 @@ static void kobil_read_int_callback(struct urb *urb)
{
int result;
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -333,8 +332,7 @@ static void kobil_read_int_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* BEGIN DEBUG */
/*
@@ -353,10 +351,9 @@ static void kobil_read_int_callback(struct urb *urb)
*/
/* END DEBUG */
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index b6911757c855..a64d420f687b 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -499,19 +499,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
unsigned int control_state;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- mutex_lock(&port->serial->disc_mutex);
- if (!port->serial->disconnected) {
- /* drop DTR and RTS */
- spin_lock_irq(&priv->lock);
- if (on)
- priv->control_state |= TIOCM_DTR | TIOCM_RTS;
- else
- priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
- control_state = priv->control_state;
- spin_unlock_irq(&priv->lock);
- mct_u232_set_modem_ctrl(port, control_state);
- }
- mutex_unlock(&port->serial->disc_mutex);
+ spin_lock_irq(&priv->lock);
+ if (on)
+ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+ else
+ priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+ control_state = priv->control_state;
+ spin_unlock_irq(&priv->lock);
+
+ mct_u232_set_modem_ctrl(port, control_state);
}
static void mct_u232_close(struct usb_serial_port *port)
@@ -531,7 +527,6 @@ static void mct_u232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
@@ -561,13 +556,9 @@ static void mct_u232_read_int_callback(struct urb *urb)
*/
if (urb->transfer_buffer_length > 2) {
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
goto exit;
}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 3d258448c29a..bf3c7a23553e 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -95,7 +95,6 @@ static void metrousb_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int throttled = 0;
int result = 0;
@@ -124,15 +123,13 @@ static void metrousb_read_int_callback(struct urb *urb)
/* Set the data read from the usb port into the serial port buffer. */
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* Loop through the data copying each byte to the tty layer. */
- tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
/* Force the data to the tty layer. */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Set any port variables. */
spin_lock_irqsave(&metro_priv->lock, flags);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index f57a6b1fe787..e0ebec3b5d6a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -899,7 +899,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
int retval;
unsigned char *data ;
struct usb_serial_port *port;
- struct tty_struct *tty;
int status = urb->status;
if (status) {
@@ -913,12 +912,10 @@ static void mos7720_bulk_in_callback(struct urb *urb)
data = urb->transfer_buffer;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
if (port->read_urb->status != -EINPROGRESS) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 66d9e088d9d9..809fb329eca5 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -744,7 +744,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
struct usb_serial *serial;
struct usb_serial_port *port;
struct moschip_port *mos7840_port;
- struct tty_struct *tty;
int status = urb->status;
mos7840_port = urb->context;
@@ -773,12 +772,9 @@ static void mos7840_bulk_in_callback(struct urb *urb)
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
- tty = tty_port_tty_get(&mos7840_port->port->port);
- if (tty) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ struct tty_port *tport = &mos7840_port->port->port;
+ tty_insert_flip_string(tport, data, urb->actual_length);
+ tty_flip_buffer_push(tport);
mos7840_port->icount.rx += urb->actual_length;
smp_wmb();
dev_dbg(&port->dev, "mos7840_port->icount.rx is %d:\n", mos7840_port->icount.rx);
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 1566f8f500ae..38725fc8c2c8 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -32,7 +32,6 @@ static void navman_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int status = urb->status;
int result;
@@ -55,12 +54,10 @@ static void navman_read_int_callback(struct urb *urb)
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 7818af931a48..1e1cafe287e4 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -174,13 +174,9 @@ static void omninet_read_bulk_callback(struct urb *urb)
}
if (urb->actual_length && header->oh_len) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data + OMNINET_DATAOFFSET,
- header->oh_len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_insert_flip_string(&port->port, data + OMNINET_DATAOFFSET,
+ header->oh_len);
+ tty_flip_buffer_push(&port->port);
}
/* Continue trying to always read */
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c6bfb83efb1e..e13e1a4d3e1e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -51,15 +51,8 @@ struct opticon_private {
static void opticon_process_data_packet(struct usb_serial_port *port,
const unsigned char *buf, size_t len)
{
- struct tty_struct *tty;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- tty_insert_flip_string(tty, buf, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, buf, len);
+ tty_flip_buffer_push(&port->port);
}
static void opticon_process_status_packet(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e6f87b76c715..f7d339d8187b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_DUAL 0x1005
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
+#define TELIT_PRODUCT_LE920 0x1200
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -288,6 +289,7 @@ static void option_instat_callback(struct urb *urb);
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define ALCATEL_PRODUCT_X220_X500D 0x0017
+#define ALCATEL_PRODUCT_L100V 0x011e
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -429,9 +431,12 @@ static void option_instat_callback(struct urb *urb);
#define MEDIATEK_VENDOR_ID 0x0e8d
#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
#define MEDIATEK_PRODUCT_7208_1COM 0x7101
#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_7103_2COM 0x7103
+#define MEDIATEK_PRODUCT_7106_2COM 0x7106
#define MEDIATEK_PRODUCT_FP_1COM 0x0003
#define MEDIATEK_PRODUCT_FP_2COM 0x0023
#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
@@ -441,6 +446,18 @@ static void option_instat_callback(struct urb *urb);
#define CELLIENT_VENDOR_ID 0x2692
#define CELLIENT_PRODUCT_MEN200 0x9005
+/* Hyundai Petatel Inc. products */
+#define PETATEL_VENDOR_ID 0x1ff4
+#define PETATEL_PRODUCT_NP10T 0x600e
+
+/* TP-LINK Incorporated products */
+#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_MA180 0x0201
+
+/* Changhong products */
+#define CHANGHONG_VENDOR_ID 0x2077
+#define CHANGHONG_PRODUCT_CH690 0x7001
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -462,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
static const struct option_blacklist_info alcatel_x200_blacklist = {
.sendsetup = BIT(0) | BIT(1),
+ .reserved = BIT(4),
};
static const struct option_blacklist_info zte_0037_blacklist = {
@@ -522,6 +540,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
.reserved = BIT(3) | BIT(4),
};
+static const struct option_blacklist_info telit_le920_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(5),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -553,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -772,6 +801,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -922,8 +953,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1189,7 +1222,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1294,7 +1336,15 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index d217fd6ee43f..a958fd41b5b3 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -820,7 +820,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int status = urb->status;
@@ -835,12 +834,10 @@ static void oti6858_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty != NULL && urb->actual_length > 0) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length > 0) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* schedule the interrupt urb */
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 600241901361..54adc9125e5c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -772,7 +772,6 @@ static void pl2303_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct pl2303_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -789,10 +788,6 @@ static void pl2303_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -805,19 +800,19 @@ static void pl2303_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_char(&port->port, data[i],
+ tty_flag);
} else {
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
/* All of the device info needed for the PL2303 SIO serial converter */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index aa148c21ea40..24662547dc5b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
+ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
/* Gobi 2000 devices */
{USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index d152be97d041..00e6c9bac8a3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -609,7 +609,6 @@ void qt2_process_read_urb(struct urb *urb)
struct qt2_serial_private *serial_priv;
struct usb_serial_port *port;
struct qt2_port_private *port_priv;
- struct tty_struct *tty;
bool escapeflag;
unsigned char *ch;
int i;
@@ -620,15 +619,11 @@ void qt2_process_read_urb(struct urb *urb)
return;
ch = urb->transfer_buffer;
- tty = NULL;
serial = urb->context;
serial_priv = usb_get_serial_data(serial);
port = serial->port[serial_priv->current_port];
port_priv = usb_get_serial_port_data(port);
- if (port_priv->is_open)
- tty = tty_port_tty_get(&port->port);
-
for (i = 0; i < urb->actual_length; i++) {
ch = (unsigned char *)urb->transfer_buffer + i;
if ((i <= (len - 3)) &&
@@ -666,10 +661,7 @@ void qt2_process_read_urb(struct urb *urb)
__func__);
break;
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(&port->port);
newport = *(ch + 3);
@@ -683,10 +675,6 @@ void qt2_process_read_urb(struct urb *urb)
serial_priv->current_port = newport;
port = serial->port[serial_priv->current_port];
port_priv = usb_get_serial_port_data(port);
- if (port_priv->is_open)
- tty = tty_port_tty_get(&port->port);
- else
- tty = NULL;
i += 3;
escapeflag = true;
break;
@@ -697,8 +685,8 @@ void qt2_process_read_urb(struct urb *urb)
escapeflag = true;
break;
case QT2_CONTROL_ESCAPE:
- tty_buffer_request_room(tty, 2);
- tty_insert_flip_string(tty, ch, 2);
+ tty_buffer_request_room(&port->port, 2);
+ tty_insert_flip_string(&port->port, ch, 2);
i += 2;
escapeflag = true;
break;
@@ -712,16 +700,11 @@ void qt2_process_read_urb(struct urb *urb)
continue;
}
- if (tty) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, ch, 1);
- }
+ tty_buffer_request_room(&port->port, 1);
+ tty_insert_flip_string(&port->port, ch, 1);
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(&port->port);
}
static void qt2_write_bulk_callback(struct urb *urb)
@@ -945,19 +928,17 @@ static void qt2_dtr_rts(struct usb_serial_port *port, int on)
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv = usb_get_serial_port_data(port);
- mutex_lock(&port->serial->disc_mutex);
- if (!port->serial->disconnected) {
- /* Disable flow control */
- if (!on && qt2_setregister(dev, port_priv->device_port,
+ /* Disable flow control */
+ if (!on) {
+ if (qt2_setregister(dev, port_priv->device_port,
UART_MCR, 0) < 0)
dev_warn(&port->dev, "error from flowcontrol urb\n");
- /* drop RTS and DTR */
- if (on)
- update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0);
- else
- update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS);
}
- mutex_unlock(&port->serial->disc_mutex);
+ /* drop RTS and DTR */
+ if (on)
+ update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0);
+ else
+ update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS);
}
static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index c949ce6ef0c6..21cd7bf2a8cc 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -207,38 +207,31 @@ static void safe_process_read_urb(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
unsigned char length = urb->actual_length;
int actual_length;
- struct tty_struct *tty;
__u16 fcs;
if (!length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (!safe)
goto out;
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
- goto err;
+ return;
}
actual_length = data[length - 2] >> 2;
if (actual_length > (length - 2)) {
dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n",
__func__, actual_length, length);
- goto err;
+ return;
}
dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length);
length = actual_length;
out:
- tty_insert_flip_string(tty, data, length);
- tty_flip_buffer_push(tty);
-err:
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data, length);
+ tty_flip_buffer_push(&port->port);
}
static int safe_prepare_write_buffer(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index af06f2f5f38b..c13f6e747748 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -569,7 +569,6 @@ static void sierra_indat_callback(struct urb *urb)
int err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -581,16 +580,12 @@ static void sierra_indat_callback(struct urb *urb)
" endpoint %02x\n", __func__, status, endpoint);
} else {
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(&port->dev, __func__,
- urb->actual_length, data);
- }
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
+
+ usb_serial_debug_data(&port->dev, __func__,
+ urb->actual_length, data);
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
@@ -861,19 +856,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
static void sierra_dtr_rts(struct usb_serial_port *port, int on)
{
- struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
portdata->rts_state = on;
portdata->dtr_state = on;
- if (serial->dev) {
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- sierra_send_setup(port);
- mutex_unlock(&serial->disc_mutex);
- }
+ sierra_send_setup(port);
}
static int sierra_startup(struct usb_serial *serial)
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index a42536af1256..91ff8e3bddbd 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -462,7 +462,6 @@ static void spcp8x5_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
u8 status;
@@ -481,9 +480,6 @@ static void spcp8x5_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
if (status & UART_STATE_TRANSIENT_MASK) {
/* break takes precedence over parity, which takes precedence
@@ -498,17 +494,21 @@ static void spcp8x5_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-
- if (status & UART_DCD)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & MSR_STATUS_LINE_DCD);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+
+ if (status & UART_DCD) {
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & MSR_STATUS_LINE_DCD);
+ tty_kref_put(tty);
+ }
+ }
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 4543ea350229..b57cf841c5b6 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -506,19 +506,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
- mutex_lock(&port->serial->disc_mutex);
- if (!port->serial->disconnected) {
- /* Disable flow control */
- if (!on &&
- ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ /* Disable flow control */
+ if (!on) {
+ if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
dev_err(&port->dev, "error from flowcontrol urb\n");
- /* drop RTS and DTR */
- if (on)
- set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
- else
- clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
}
- mutex_unlock(&port->serial->disc_mutex);
+ /* drop RTS and DTR */
+ if (on)
+ set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ else
+ clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
}
static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
@@ -582,8 +579,7 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
}
-static int ssu100_process_packet(struct urb *urb,
- struct tty_struct *tty)
+static void ssu100_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *packet = (char *)urb->transfer_buffer;
@@ -598,7 +594,8 @@ static int ssu100_process_packet(struct urb *urb,
if (packet[2] == 0x00) {
ssu100_update_lsr(port, packet[3], &flag);
if (flag == TTY_OVERRUN)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0,
+ TTY_OVERRUN);
}
if (packet[2] == 0x01)
ssu100_update_msr(port, packet[3]);
@@ -609,34 +606,17 @@ static int ssu100_process_packet(struct urb *urb,
ch = packet;
if (!len)
- return 0; /* status only */
+ return; /* status only */
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, flag);
+ tty_insert_flip_char(&port->port, *ch, flag);
}
} else
- tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
-
- return len;
-}
-
-static void ssu100_process_read_urb(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- int count;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- count = ssu100_process_packet(urb, tty);
+ tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
- if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ssu100_device = {
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 701fffa8431f..be05e6caf9a3 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -48,7 +48,6 @@ static void symbol_int_callback(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
struct usb_serial_port *port = priv->port;
int status = urb->status;
- struct tty_struct *tty;
int result;
int data_length;
@@ -82,12 +81,8 @@ static void symbol_int_callback(struct urb *urb)
* we pretty much just ignore the size and send everything
* else to the tty layer.
*/
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, &data[1], data_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_insert_flip_string(&port->port, &data[1], data_length);
+ tty_flip_buffer_push(&port->port);
} else {
dev_dbg(&priv->udev->dev,
"Improper amount of data received from the device, "
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index f2530d2ef3c4..39cb9b807c3c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -121,8 +121,8 @@ static void ti_interrupt_callback(struct urb *urb);
static void ti_bulk_in_callback(struct urb *urb);
static void ti_bulk_out_callback(struct urb *urb);
-static void ti_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void ti_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void ti_send(struct ti_port *tport);
static int ti_set_mcr(struct ti_port *tport, unsigned int mcr);
static int ti_get_lsr(struct ti_port *tport);
@@ -1118,7 +1118,6 @@ static void ti_bulk_in_callback(struct urb *urb)
struct device *dev = &urb->dev->dev;
int status = urb->status;
int retval = 0;
- struct tty_struct *tty;
switch (status) {
case 0:
@@ -1145,24 +1144,18 @@ static void ti_bulk_in_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (urb->actual_length) {
- usb_serial_debug_data(dev, __func__, urb->actual_length,
- urb->transfer_buffer);
+ if (urb->actual_length) {
+ usb_serial_debug_data(dev, __func__, urb->actual_length,
+ urb->transfer_buffer);
- if (!tport->tp_is_open)
- dev_dbg(dev, "%s - port closed, dropping data\n",
- __func__);
- else
- ti_recv(&urb->dev->dev, tty,
- urb->transfer_buffer,
- urb->actual_length);
- spin_lock(&tport->tp_lock);
- tport->tp_icount.rx += urb->actual_length;
- spin_unlock(&tport->tp_lock);
- }
- tty_kref_put(tty);
+ if (!tport->tp_is_open)
+ dev_dbg(dev, "%s - port closed, dropping data\n",
+ __func__);
+ else
+ ti_recv(port, urb->transfer_buffer, urb->actual_length);
+ spin_lock(&tport->tp_lock);
+ tport->tp_icount.rx += urb->actual_length;
+ spin_unlock(&tport->tp_lock);
}
exit:
@@ -1210,24 +1203,23 @@ static void ti_bulk_out_callback(struct urb *urb)
}
-static void ti_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void ti_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int cnt;
do {
- cnt = tty_insert_flip_string(tty, data, length);
+ cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
data += cnt;
length -= cnt;
} while (length > 0);
-
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 64bda135ba7e..a19ed74d770d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -361,15 +361,21 @@ static int serial_write_room(struct tty_struct *tty)
static int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+ int count = 0;
dev_dbg(tty->dev, "%s - port %d\n", __func__, port->number);
+ mutex_lock(&serial->disc_mutex);
/* if the device was unplugged then any remaining characters
fell out of the connector ;) */
- if (port->serial->disconnected)
- return 0;
- /* pass on to the driver specific version of this function */
- return port->serial->type->chars_in_buffer(tty);
+ if (serial->disconnected)
+ count = 0;
+ else
+ count = serial->type->chars_in_buffer(tty);
+ mutex_unlock(&serial->disc_mutex);
+
+ return count;
}
static void serial_throttle(struct tty_struct *tty)
@@ -688,10 +694,20 @@ static int serial_carrier_raised(struct tty_port *port)
static void serial_dtr_rts(struct tty_port *port, int on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
- struct usb_serial_driver *drv = p->serial->type;
+ struct usb_serial *serial = p->serial;
+ struct usb_serial_driver *drv = serial->type;
- if (drv->dtr_rts)
+ if (!drv->dtr_rts)
+ return;
+ /*
+ * Work-around bug in the tty-layer which can result in dtr_rts
+ * being called after a disconnect (and tty_unregister_device
+ * has returned). Remove once bug has been squashed.
+ */
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
drv->dtr_rts(p, on);
+ mutex_unlock(&serial->disc_mutex);
}
static const struct tty_port_operations serial_port_ops = {
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 01c94aada56c..571965aa1cc0 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -38,7 +38,6 @@
void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
{
- struct usb_serial *serial = port->serial;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
@@ -48,12 +47,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
return;
portdata = usb_get_serial_port_data(port);
- mutex_lock(&serial->disc_mutex);
+ /* FIXME: locking */
portdata->rts_state = on;
portdata->dtr_state = on;
- if (serial->dev)
- intfdata->send_setup(port);
- mutex_unlock(&serial->disc_mutex);
+
+ intfdata->send_setup(port);
}
EXPORT_SYMBOL(usb_wwan_dtr_rts);
@@ -275,7 +273,6 @@ static void usb_wwan_indat_callback(struct urb *urb)
int err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -288,16 +285,12 @@ static void usb_wwan_indat_callback(struct urb *urb)
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
} else {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dev_dbg(dev, "%s: empty read urb received\n", __func__);
- tty_kref_put(tty);
- }
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
+ } else
+ dev_dbg(dev, "%s: empty read urb received\n", __func__);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
new file mode 100644
index 000000000000..1d5798d891bc
--- /dev/null
+++ b/drivers/usb/serial/xsens_mt.c
@@ -0,0 +1,86 @@
+/*
+ * Xsens MT USB driver
+ *
+ * Copyright (C) 2013 Xsens <info@xsens.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+#define XSENS_VID 0x2639
+
+#define MTi_10_IMU_PID 0x0001
+#define MTi_20_VRU_PID 0x0002
+#define MTi_30_AHRS_PID 0x0003
+
+#define MTi_100_IMU_PID 0x0011
+#define MTi_200_VRU_PID 0x0012
+#define MTi_300_AHRS_PID 0x0013
+
+#define MTi_G_700_GPS_INS_PID 0x0017
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(XSENS_VID, MTi_10_IMU_PID) },
+ { USB_DEVICE(XSENS_VID, MTi_20_VRU_PID) },
+ { USB_DEVICE(XSENS_VID, MTi_30_AHRS_PID) },
+
+ { USB_DEVICE(XSENS_VID, MTi_100_IMU_PID) },
+ { USB_DEVICE(XSENS_VID, MTi_200_VRU_PID) },
+ { USB_DEVICE(XSENS_VID, MTi_300_AHRS_PID) },
+
+ { USB_DEVICE(XSENS_VID, MTi_G_700_GPS_INS_PID) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static int has_required_endpoints(const struct usb_host_interface *interface)
+{
+ __u8 i;
+ int has_bulk_in = 0;
+ int has_bulk_out = 0;
+
+ for (i = 0; i < interface->desc.bNumEndpoints; ++i) {
+ if (usb_endpoint_is_bulk_in(&interface->endpoint[i].desc))
+ has_bulk_in = 1;
+ else if (usb_endpoint_is_bulk_out(&interface->endpoint[i].desc))
+ has_bulk_out = 1;
+ }
+
+ return has_bulk_in && has_bulk_out;
+}
+
+static int xsens_mt_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ if (!has_required_endpoints(serial->interface->cur_altsetting))
+ return -ENODEV;
+ return 0;
+}
+
+static struct usb_serial_driver xsens_mt_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xsens_mt",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+
+ .probe = xsens_mt_probe,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &xsens_mt_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, id_table);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 105d900150c1..7ab9046ae0ec 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
return 0;
}
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us)
+/* This places the HUAWEI usb dongles in multi-port mode */
+static int usb_stor_huawei_feature_init(struct us_data *us)
{
int result;
@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
US_DEBUGP("Huawei mode set result is %d\n", result);
return 0;
}
+
+/*
+ * It will send a scsi switch command called rewind' to huawei dongle.
+ * When the dongle receives this command at the first time,
+ * it will reboot immediately. After rebooted, it will ignore this command.
+ * So it is unnecessary to read its response.
+ */
+static int usb_stor_huawei_scsi_init(struct us_data *us)
+{
+ int result = 0;
+ int act_len = 0;
+ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcbw->Tag = 0;
+ bcbw->DataTransferLength = 0;
+ bcbw->Flags = bcbw->Lun = 0;
+ bcbw->Length = sizeof(rewind_cmd);
+ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+ US_BULK_CB_WRAP_LEN, &act_len);
+ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+ return result;
+}
+
+/*
+ * It tries to find the supported Huawei USB dongles.
+ * In Huawei, they assign the following product IDs
+ * for all of their mobile broadband dongles,
+ * including the new dongles in the future.
+ * So if the product ID is not included in this list,
+ * it means it is not Huawei's mobile broadband dongles.
+ */
+static int usb_stor_huawei_dongles_pid(struct us_data *us)
+{
+ struct usb_interface_descriptor *idesc;
+ int idProduct;
+
+ idesc = &us->pusb_intf->cur_altsetting->desc;
+ idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ /* The first port is CDROM,
+ * means the dongle in the single port mode,
+ * and a switch command is required to be sent. */
+ if (idesc && idesc->bInterfaceNumber == 0) {
+ if ((idProduct == 0x1001)
+ || (idProduct == 0x1003)
+ || (idProduct == 0x1004)
+ || (idProduct >= 0x1401 && idProduct <= 0x1500)
+ || (idProduct >= 0x1505 && idProduct <= 0x1600)
+ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int usb_stor_huawei_init(struct us_data *us)
+{
+ int result = 0;
+
+ if (usb_stor_huawei_dongles_pid(us)) {
+ if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+ result = usb_stor_huawei_scsi_init(us);
+ else
+ result = usb_stor_huawei_feature_init(us);
+ }
+ return result;
+}
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 529327fbb06b..5376d4fc76f0 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
* flash reader */
int usb_stor_ucr61s2b_init(struct us_data *us);
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us);
+/* This places the HUAWEI usb dongles in multi-port mode */
+int usb_stor_huawei_init(struct us_data *us);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 98b98eef7527..d966b59f7d7b 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -66,6 +66,8 @@ enum {
DATA_OUT_URB_INFLIGHT = (1 << 10),
COMMAND_COMPLETED = (1 << 11),
COMMAND_ABORTED = (1 << 12),
+ UNLINK_DATA_URBS = (1 << 13),
+ IS_IN_WORK_LIST = (1 << 14),
};
/* Overrides scsi_pointer */
@@ -82,11 +84,36 @@ struct uas_cmd_info {
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
+static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static DECLARE_WORK(uas_work, uas_do_work);
static DEFINE_SPINLOCK(uas_work_lock);
static LIST_HEAD(uas_work_list);
+static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
+ struct uas_cmd_info *cmdinfo)
+{
+ unsigned long flags;
+
+ /*
+ * The UNLINK_DATA_URBS flag makes sure uas_try_complete
+ * (called by urb completion) doesn't release cmdinfo
+ * underneath us.
+ */
+ spin_lock_irqsave(&devinfo->lock, flags);
+ cmdinfo->state |= UNLINK_DATA_URBS;
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+
+ if (cmdinfo->data_in_urb)
+ usb_unlink_urb(cmdinfo->data_in_urb);
+ if (cmdinfo->data_out_urb)
+ usb_unlink_urb(cmdinfo->data_out_urb);
+
+ spin_lock_irqsave(&devinfo->lock, flags);
+ cmdinfo->state &= ~UNLINK_DATA_URBS;
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+}
+
static void uas_do_work(struct work_struct *work)
{
struct uas_cmd_info *cmdinfo;
@@ -106,6 +133,8 @@ static void uas_do_work(struct work_struct *work)
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
spin_lock_irqsave(&devinfo->lock, flags);
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
+ if (!err)
+ cmdinfo->state &= ~IS_IN_WORK_LIST;
spin_unlock_irqrestore(&devinfo->lock, flags);
if (err) {
list_del(&cmdinfo->list);
@@ -117,6 +146,45 @@ static void uas_do_work(struct work_struct *work)
}
}
+static void uas_abort_work(struct uas_dev_info *devinfo)
+{
+ struct uas_cmd_info *cmdinfo;
+ struct uas_cmd_info *temp;
+ struct list_head list;
+ unsigned long flags;
+
+ spin_lock_irq(&uas_work_lock);
+ list_replace_init(&uas_work_list, &list);
+ spin_unlock_irq(&uas_work_lock);
+
+ spin_lock_irqsave(&devinfo->lock, flags);
+ list_for_each_entry_safe(cmdinfo, temp, &list, list) {
+ struct scsi_pointer *scp = (void *)cmdinfo;
+ struct scsi_cmnd *cmnd = container_of(scp,
+ struct scsi_cmnd, SCp);
+ struct uas_dev_info *di = (void *)cmnd->device->hostdata;
+
+ if (di == devinfo) {
+ cmdinfo->state |= COMMAND_ABORTED;
+ cmdinfo->state &= ~IS_IN_WORK_LIST;
+ if (devinfo->resetting) {
+ /* uas_stat_cmplt() will not do that
+ * when a device reset is in
+ * progress */
+ cmdinfo->state &= ~COMMAND_INFLIGHT;
+ }
+ uas_try_complete(cmnd, __func__);
+ } else {
+ /* not our uas device, relink into list */
+ list_del(&cmdinfo->list);
+ spin_lock_irq(&uas_work_lock);
+ list_add_tail(&cmdinfo->list, &uas_work_list);
+ spin_unlock_irq(&uas_work_lock);
+ }
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+}
+
static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu *sense_iu = urb->transfer_buffer;
@@ -168,7 +236,7 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller)
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
scmd_printk(KERN_INFO, cmnd, "%s %p tag %d, inflight:"
- "%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
caller, cmnd, cmnd->request->tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
@@ -181,7 +249,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller)
(ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "",
(ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "",
(ci->state & COMMAND_COMPLETED) ? " done" : "",
- (ci->state & COMMAND_ABORTED) ? " abort" : "");
+ (ci->state & COMMAND_ABORTED) ? " abort" : "",
+ (ci->state & UNLINK_DATA_URBS) ? " unlink": "",
+ (ci->state & IS_IN_WORK_LIST) ? " work" : "");
}
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
@@ -192,7 +262,8 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
WARN_ON(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
- DATA_OUT_URB_INFLIGHT))
+ DATA_OUT_URB_INFLIGHT |
+ UNLINK_DATA_URBS))
return -EBUSY;
BUG_ON(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
@@ -217,6 +288,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
if (err) {
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
+ cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
}
@@ -274,16 +346,9 @@ static void uas_stat_cmplt(struct urb *urb)
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
- if (cmdinfo->state & DATA_IN_URB_INFLIGHT) {
- spin_unlock_irqrestore(&devinfo->lock, flags);
- usb_unlink_urb(cmdinfo->data_in_urb);
- spin_lock_irqsave(&devinfo->lock, flags);
- }
- if (cmdinfo->state & DATA_OUT_URB_INFLIGHT) {
- spin_unlock_irqrestore(&devinfo->lock, flags);
- usb_unlink_urb(cmdinfo->data_out_urb);
- spin_lock_irqsave(&devinfo->lock, flags);
- }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo);
+ spin_lock_irqsave(&devinfo->lock, flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
@@ -579,6 +644,12 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
+ if (devinfo->resetting) {
+ cmnd->result = DID_ERROR << 16;
+ cmnd->scsi_done(cmnd);
+ return 0;
+ }
+
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
@@ -623,6 +694,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
}
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
+ cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
}
@@ -689,8 +761,23 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
uas_log_cmd_state(cmnd, __func__);
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= COMMAND_ABORTED;
- spin_unlock_irqrestore(&devinfo->lock, flags);
- ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
+ if (cmdinfo->state & IS_IN_WORK_LIST) {
+ spin_lock(&uas_work_lock);
+ list_del(&cmdinfo->list);
+ cmdinfo->state &= ~IS_IN_WORK_LIST;
+ spin_unlock(&uas_work_lock);
+ }
+ if (cmdinfo->state & COMMAND_INFLIGHT) {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
+ } else {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo);
+ spin_lock_irqsave(&devinfo->lock, flags);
+ uas_try_complete(cmnd, __func__);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ ret = SUCCESS;
+ }
return ret;
}
@@ -709,6 +796,7 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
int err;
devinfo->resetting = 1;
+ uas_abort_work(devinfo);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
@@ -903,6 +991,8 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
+ shost->max_lun = 256;
+ shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
devinfo->intf = intf;
@@ -954,10 +1044,12 @@ static void uas_disconnect(struct usb_interface *intf)
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
- scsi_remove_host(shost);
+ devinfo->resetting = 1;
+ uas_abort_work(devinfo);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
+ scsi_remove_host(shost);
uas_free_streams(devinfo);
kfree(devinfo);
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 2c8553026222..65a6a75066a8 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d305a5aa3a5d..72923b56bbf6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1527,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
/* Reported by fangxiaozhi <huananhu@huawei.com>
* This brings the HUAWEI data card devices into multi-port mode
*/
-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
"HUAWEI MOBILE",
"Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
0),
/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 31b3e1a61bbd..d6bee407af02 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
.useTransport = use_transport, \
}
+#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
+ vendor_name, product_name, use_protocol, use_transport, \
+ init_function, Flags) \
+{ \
+ .vendorName = vendor_name, \
+ .productName = product_name, \
+ .useProtocol = use_protocol, \
+ .useTransport = use_transport, \
+ .initFunction = init_function, \
+}
+
static struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
#ifdef CONFIG_LOCKDEP
@@ -976,6 +988,9 @@ int usb_stor_probe2(struct us_data *us)
if (us->fflags & US_FL_SINGLE_LUN)
us->max_lun = 0;
+ if (!(us->fflags & US_FL_SCM_MULT_TARG))
+ us_to_host(us)->max_id = 1;
+
/* Find the endpoints and calculate pipe values */
result = get_pipes(us);
if (result)
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index b78a526910fb..5ef8ce74aae4 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -41,6 +41,20 @@
#define USUAL_DEV(useProto, useTrans) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
+/* Define the device is matched with Vendor ID and interface descriptors */
+#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{ \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (id_vendor), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr), \
+ .driver_info = (flags) \
+}
+
struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
/*
* The table of devices to ignore
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 57c01ab09ad8..6ef94bce8c0d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -695,9 +695,9 @@ error_dto_alloc:
cnt--;
error_seg_kzalloc:
/* use the fact that cnt is left at were it failed */
- for (; cnt > 0; cnt--) {
- if (xfer->is_inbound == 0)
- kfree(xfer->seg[cnt]->dto_urb);
+ for (; cnt >= 0; cnt--) {
+ if (xfer->seg[cnt] && xfer->is_inbound == 0)
+ usb_free_urb(xfer->seg[cnt]->dto_urb);
kfree(xfer->seg[cnt]);
}
error_segs_kzalloc:
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index 4d688c750801..3eca6ceb9844 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -40,9 +40,9 @@
#include "uwb-internal.h"
-static int uwb_rc_index_match(struct device *dev, void *data)
+static int uwb_rc_index_match(struct device *dev, const void *data)
{
- int *index = data;
+ const int *index = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->index == *index)
@@ -334,9 +334,9 @@ void uwb_rc_rm(struct uwb_rc *rc)
}
EXPORT_SYMBOL_GPL(uwb_rc_rm);
-static int find_rc_try_get(struct device *dev, void *data)
+static int find_rc_try_get(struct device *dev, const void *data)
{
- struct uwb_rc *target_rc = data;
+ const struct uwb_rc *target_rc = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
@@ -386,9 +386,9 @@ static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
return rc;
}
-static int find_rc_grandpa(struct device *dev, void *data)
+static int find_rc_grandpa(struct device *dev, const void *data)
{
- struct device *grandpa_dev = data;
+ const struct device *grandpa_dev = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
@@ -419,7 +419,7 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
struct device *dev;
struct uwb_rc *rc = NULL;
- dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
+ dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
if (dev)
rc = dev_get_drvdata(dev);
@@ -432,9 +432,9 @@ EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
*
* @returns the pointer to the radio controller, properly referenced
*/
-static int find_rc_dev(struct device *dev, void *data)
+static int find_rc_dev(struct device *dev, const void *data)
{
- struct uwb_dev_addr *addr = data;
+ const struct uwb_dev_addr *addr = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
@@ -453,8 +453,7 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
struct device *dev;
struct uwb_rc *rc = NULL;
- dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
- find_rc_dev);
+ dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
if (dev)
rc = dev_get_drvdata(dev);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6c119944bbb6..b28e66c4376a 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -43,6 +43,10 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
u16 cmd;
u8 msix_pos;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
vdev->reset_works = (pci_reset_function(pdev) == 0);
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
@@ -51,8 +55,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
__func__, dev_name(&pdev->dev));
ret = vfio_config_init(vdev);
- if (ret)
- goto out;
+ if (ret) {
+ pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state);
+ pci_disable_device(pdev);
+ return ret;
+ }
if (likely(!nointxmask))
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
@@ -77,24 +84,15 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} else
vdev->msix_bar = 0xFF;
- ret = pci_enable_device(pdev);
- if (ret)
- goto out;
-
- return ret;
-
-out:
- kfree(vdev->pci_saved_state);
- vdev->pci_saved_state = NULL;
- vfio_config_free(vdev);
- return ret;
+ return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
int bar;
- pci_disable_device(vdev->pdev);
+ pci_disable_device(pdev);
vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
@@ -104,22 +102,40 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vfio_config_free(vdev);
- pci_reset_function(vdev->pdev);
-
- if (pci_load_and_free_saved_state(vdev->pdev,
- &vdev->pci_saved_state) == 0)
- pci_restore_state(vdev->pdev);
- else
- pr_info("%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&vdev->pdev->dev));
-
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
if (!vdev->barmap[bar])
continue;
- pci_iounmap(vdev->pdev, vdev->barmap[bar]);
- pci_release_selected_regions(vdev->pdev, 1 << bar);
+ pci_iounmap(pdev, vdev->barmap[bar]);
+ pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = NULL;
}
+
+ /*
+ * If we have saved state, restore it. If we can reset the device,
+ * even better. Resetting with current state seems better than
+ * nothing, but saving and restoring current state without reset
+ * is just busy work.
+ */
+ if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
+ pr_info("%s: Couldn't reload %s saved state\n",
+ __func__, dev_name(&pdev->dev));
+
+ if (!vdev->reset_works)
+ return;
+
+ pci_save_state(pdev);
+ }
+
+ /*
+ * Disable INTx and MSI, presumably to avoid spurious interrupts
+ * during reset. Stolen from pci_reset_function()
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ if (vdev->reset_works)
+ __pci_reset_function(pdev);
+
+ pci_restore_state(pdev);
}
static void vfio_pci_release(void *device_data)
@@ -327,15 +343,10 @@ static long vfio_pci_ioctl(void *device_data,
hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
return -EINVAL;
- data = kmalloc(hdr.count * size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, (void __user *)(arg + minsz),
- hdr.count * size)) {
- kfree(data);
- return -EFAULT;
- }
+ data = memdup_user((void __user *)(arg + minsz),
+ hdr.count * size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
@@ -562,9 +573,9 @@ static int __init vfio_pci_init(void)
return 0;
-out_virqfd:
- vfio_pci_virqfd_exit();
out_driver:
+ vfio_pci_virqfd_exit();
+out_virqfd:
vfio_pci_uninit_perm_bits();
return ret;
}
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 4362d9e7baa3..f72323ef618f 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
filled = 1;
} else {
/* Drop writes, fill reads with FF */
+ filled = min((size_t)(x_end - pos), count);
if (!iswrite) {
char val = 0xFF;
size_t i;
- for (i = 0; i < x_end - pos; i++) {
+ for (i = 0; i < filled; i++) {
if (put_user(val, buf + i))
goto out;
}
}
- filled = x_end - pos;
}
count -= filled;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 56097c6d072d..12c264d3b058 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -191,6 +191,17 @@ static void vfio_container_put(struct vfio_container *container)
kref_put(&container->kref, vfio_container_release);
}
+static void vfio_group_unlock_and_free(struct vfio_group *group)
+{
+ mutex_unlock(&vfio.group_lock);
+ /*
+ * Unregister outside of lock. A spurious callback is harmless now
+ * that the group is no longer in vfio.group_list.
+ */
+ iommu_group_unregister_notifier(group->iommu_group, &group->nb);
+ kfree(group);
+}
+
/**
* Group objects - create, release, get, put, search
*/
@@ -229,8 +240,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
minor = vfio_alloc_group_minor(group);
if (minor < 0) {
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return ERR_PTR(minor);
}
@@ -239,8 +249,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
if (tmp->iommu_group == iommu_group) {
vfio_group_get(tmp);
vfio_free_group_minor(minor);
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return tmp;
}
}
@@ -249,8 +258,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return (struct vfio_group *)dev; /* ERR_PTR */
}
@@ -274,16 +282,7 @@ static void vfio_group_release(struct kref *kref)
device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
-
- mutex_unlock(&vfio.group_lock);
-
- /*
- * Unregister outside of lock. A spurious callback is harmless now
- * that the group is no longer in vfio.group_list.
- */
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
-
- kfree(group);
+ vfio_group_unlock_and_free(group);
}
static void vfio_group_put(struct vfio_group *group)
@@ -466,8 +465,9 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
+ struct device_driver *drv = ACCESS_ONCE(dev->driver);
- if (!dev->driver || vfio_whitelisted_driver(dev->driver))
+ if (!drv || vfio_whitelisted_driver(drv))
return 0;
device = vfio_group_get_device(group, dev);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 202bba6c997c..bf243177ffe1 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,6 +1,6 @@
config VHOST_NET
- tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
- depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) && EXPERIMENTAL
+ tristate "Host kernel accelerator for virtio net"
+ depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
index a9c6f76e3208..7e3aa28d999e 100644
--- a/drivers/vhost/Kconfig.tcm
+++ b/drivers/vhost/Kconfig.tcm
@@ -1,6 +1,6 @@
config TCM_VHOST
- tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
- depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
+ tristate "TCM_VHOST fabric module"
+ depends on TARGET_CORE && EVENTFD && m
default n
---help---
Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ebd08b21b234..959b1cd89e6a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
}
/* Caller must have TX VQ lock */
-static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+static int tx_poll_start(struct vhost_net *net, struct socket *sock)
{
+ int ret;
+
if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
- return;
- vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
- net->tx_poll_state = VHOST_NET_POLL_STARTED;
+ return 0;
+ ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+ if (!ret)
+ net->tx_poll_state = VHOST_NET_POLL_STARTED;
+ return ret;
}
/* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
}
-static void vhost_net_enable_vq(struct vhost_net *n,
+static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct socket *sock;
+ int ret;
sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (!sock)
- return;
+ return 0;
if (vq == n->vqs + VHOST_NET_VQ_TX) {
n->tx_poll_state = VHOST_NET_POLL_STOPPED;
- tx_poll_start(n, sock);
+ ret = tx_poll_start(n, sock);
} else
- vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+ ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+
+ return ret;
}
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = PTR_ERR(ubufs);
goto err_ubufs;
}
- oldubufs = vq->ubufs;
- vq->ubufs = ubufs;
+
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
- vhost_net_enable_vq(n, vq);
-
r = vhost_init_used(vq);
if (r)
- goto err_vq;
+ goto err_used;
+ r = vhost_net_enable_vq(n, vq);
+ if (r)
+ goto err_used;
+
+ oldubufs = vq->ubufs;
+ vq->ubufs = ubufs;
n->tx_packets = 0;
n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
mutex_unlock(&n->dev.mutex);
return 0;
+err_used:
+ rcu_assign_pointer(vq->private_data, oldsock);
+ vhost_net_enable_vq(n, vq);
+ if (ubufs)
+ vhost_ubuf_put_and_wait(ubufs);
err_ubufs:
fput(sock->file);
err_vq:
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index d670130ee687..22321cf84fbe 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -538,10 +538,6 @@ static void tcm_vhost_submission_work(struct work_struct *work)
if (tv_cmd->tvc_sgl_count) {
sg_ptr = tv_cmd->tvc_sgl;
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
#if 0
if (se_cmd->se_cmd_flags & SCF_BIDI) {
@@ -579,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
tv_tpg = vs->vs_tpg;
- if (unlikely(!tv_tpg)) {
- pr_err("%s endpoint not set\n", __func__);
+ if (unlikely(!tv_tpg))
return;
- }
mutex_lock(&vq->mutex);
vhost_disable_notify(&vs->dev, vq);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 34389f75fe65..9759249e6d90 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
+ poll->wqh = NULL;
vhost_work_init(&poll->work, fn);
}
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
-void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
unsigned long mask;
+ int ret = 0;
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ if (mask & POLLERR) {
+ if (poll->wqh)
+ remove_wait_queue(poll->wqh, &poll->wait);
+ ret = -EINVAL;
+ }
+
+ return ret;
}
/* Stop polling a file. After this function returns, it becomes safe to drop the
* file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll)
{
- remove_wait_queue(poll->wqh, &poll->wait);
+ if (poll->wqh) {
+ remove_wait_queue(poll->wqh, &poll->wait);
+ poll->wqh = NULL;
+ }
}
static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
fput(filep);
if (pollstart && vq->handle_kick)
- vhost_poll_start(&vq->poll, vq->kick);
+ r = vhost_poll_start(&vq->poll, vq->kick);
mutex_unlock(&vq->mutex);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2639c58b23ab..17261e277c02 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
-void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d08d7998a4aa..80cbd21b483f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
Y here.
config FB_IMX
- tristate "Freescale i.MX LCD support"
+ tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && IMX_HAVE_PLATFORM_IMX_FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1112,8 +1112,8 @@ config FB_RIVA_BACKLIGHT
Say Y here if you want to control the backlight of your display.
config FB_I740
- tristate "Intel740 support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && FB && PCI
+ tristate "Intel740 support"
+ depends on FB && PCI
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1124,8 +1124,8 @@ config FB_I740
This driver supports graphics cards based on Intel740 chip.
config FB_I810
- tristate "Intel 810/815 support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && FB && PCI && X86_32 && AGP_INTEL
+ tristate "Intel 810/815 support"
+ depends on FB && PCI && X86_32 && AGP_INTEL
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1187,8 +1187,8 @@ config FB_CARILLO_RANCH
This driver supports the LE80578 (Carillo Ranch) board
config FB_INTEL
- tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
+ tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support"
+ depends on FB && PCI && X86 && AGP_INTEL && EXPERT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1432,7 +1432,7 @@ config FB_ATY_CT
is at <http://support.ati.com/products/pc/mach64/mach64.html>.
config FB_ATY_GENERIC_LCD
- bool "Mach64 generic LCD support (EXPERIMENTAL)"
+ bool "Mach64 generic LCD support"
depends on FB_ATY_CT
help
Say Y if you have a laptop with an ATI Rage LT PRO, Rage Mobility,
@@ -1479,7 +1479,7 @@ config FB_S3_DDC
config FB_SAVAGE
tristate "S3 Savage support"
- depends on FB && PCI && EXPERIMENTAL
+ depends on FB && PCI
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1633,15 +1633,15 @@ config FB_3DFX
module will be called tdfxfb.
config FB_3DFX_ACCEL
- bool "3Dfx Acceleration functions (EXPERIMENTAL)"
- depends on FB_3DFX && EXPERIMENTAL
+ bool "3Dfx Acceleration functions"
+ depends on FB_3DFX
---help---
This will compile the 3Dfx Banshee/Voodoo3/VSA-100 frame buffer
device driver with acceleration functions.
config FB_3DFX_I2C
bool "Enable DDC/I2C support"
- depends on FB_3DFX && EXPERIMENTAL
+ depends on FB_3DFX
select FB_DDC
default y
help
@@ -1714,8 +1714,8 @@ config FB_ARK
and ICS 5342 RAMDAC.
config FB_PM3
- tristate "Permedia3 support (EXPERIMENTAL)"
- depends on FB && PCI && EXPERIMENTAL
+ tristate "Permedia3 support"
+ depends on FB && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2025,7 +2025,8 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && (S3C_DEV_FB || S5P_DEV_FIMD0)
+ depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \
+ ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2045,7 +2046,7 @@ config FB_S3C_DEBUG_REGWRITE
bool "Debug register writes"
depends on FB_S3C
---help---
- Show all register writes via printk(KERN_DEBUG)
+ Show all register writes via pr_debug()
config FB_S3C2410
tristate "S3C2410 LCD framebuffer support"
@@ -2140,14 +2141,16 @@ config FB_UDL
To compile as a module, choose M here: the module name is udlfb.
config FB_IBM_GXT4500
- tristate "Framebuffer support for IBM GXT4500P adaptor"
+ tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB && PPC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- Say Y here to enable support for the IBM GXT4500P display
- adaptor, found on some IBM System P (pSeries) machines.
+ Say Y here to enable support for the IBM GXT4000P/6000P and
+ GXT4500P/6500P display adaptor based on Raster Engine RC1000,
+ found on some IBM System P (pSeries) machines. This driver
+ doesn't use Geometry Engine GT1000.
config FB_PS3
tristate "PS3 GPU framebuffer driver"
@@ -2181,6 +2184,15 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
+config FB_GOLDFISH
+ tristate "Goldfish Framebuffer"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Framebuffer driver for Goldfish Virtual Platform
+
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
depends on FB && (MIPS_COBALT || MIPS_SEAD3)
@@ -2420,6 +2432,7 @@ config FB_PUV3_UNIGFX
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/exynos/Kconfig"
+source "drivers/video/mmp/Kconfig"
source "drivers/video/backlight/Kconfig"
if VT
@@ -2442,4 +2455,19 @@ config FB_SH_MOBILE_MERAM
Up to 4 memory channels can be configured, allowing 4 RGB or
2 YCbCr framebuffers to be configured.
+config FB_SSD1307
+ tristate "Solomon SSD1307 framebuffer support"
+ depends on FB && I2C
+ depends on OF
+ depends on GENERIC_GPIO
+ select FB_SYS_FOPS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
+ select PWM
+ help
+ This driver implements support for the Solomon SSD1307
+ OLED controller over I2C.
+
endmenu
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 23e948ebfab8..0577f834fdcd 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
+obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
@@ -105,6 +106,7 @@ obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
obj-$(CONFIG_FB_PXA) += pxafb.o
obj-$(CONFIG_FB_PXA168) += pxa168fb.o
obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
+obj-$(CONFIG_MMP_DISP) += mmp/
obj-$(CONFIG_FB_W100) += w100fb.o
obj-$(CONFIG_FB_TMIO) += tmiofb.o
obj-$(CONFIG_FB_AU1100) += au1100fb.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_MXS) += mxsfb.o
+obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index b303f1715065..6488a7351a60 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -66,7 +66,7 @@
* have. Allow 1% either way on the nominal for TVs.
*/
#define NR_MONTYPES 6
-static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = {
+static struct fb_monspecs monspecs[NR_MONTYPES] = {
{ /* TV */
.hfmin = 15469,
.hfmax = 15781,
@@ -874,7 +874,7 @@ static struct fb_ops acornfb_ops = {
/*
* Everything after here is initialisation!!!
*/
-static struct fb_videomode modedb[] __devinitdata = {
+static struct fb_videomode modedb[] = {
{ /* 320x256 @ 50Hz */
NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2,
FB_SYNC_COMP_HIGH_ACT,
@@ -926,7 +926,7 @@ static struct fb_videomode modedb[] __devinitdata = {
}
};
-static struct fb_videomode acornfb_default_mode __devinitdata = {
+static struct fb_videomode acornfb_default_mode = {
.name = NULL,
.refresh = 60,
.xres = 640,
@@ -942,7 +942,7 @@ static struct fb_videomode acornfb_default_mode __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED
};
-static void __devinit acornfb_init_fbinfo(void)
+static void acornfb_init_fbinfo(void)
{
static int first = 1;
@@ -1018,7 +1018,7 @@ static void __devinit acornfb_init_fbinfo(void)
* size can optionally be followed by 'M' or 'K' for
* MB or KB respectively.
*/
-static void __devinit acornfb_parse_mon(char *opt)
+static void acornfb_parse_mon(char *opt)
{
char *p = opt;
@@ -1065,7 +1065,7 @@ bad:
current_par.montype = -1;
}
-static void __devinit acornfb_parse_montype(char *opt)
+static void acornfb_parse_montype(char *opt)
{
current_par.montype = -2;
@@ -1106,7 +1106,7 @@ static void __devinit acornfb_parse_montype(char *opt)
}
}
-static void __devinit acornfb_parse_dram(char *opt)
+static void acornfb_parse_dram(char *opt)
{
unsigned int size;
@@ -1131,14 +1131,14 @@ static void __devinit acornfb_parse_dram(char *opt)
static struct options {
char *name;
void (*parse)(char *opt);
-} opt_table[] __devinitdata = {
+} opt_table[] = {
{ "mon", acornfb_parse_mon },
{ "montype", acornfb_parse_montype },
{ "dram", acornfb_parse_dram },
{ NULL, NULL }
};
-static int __devinit acornfb_setup(char *options)
+static int acornfb_setup(char *options)
{
struct options *optp;
char *opt;
@@ -1175,7 +1175,7 @@ static int __devinit acornfb_setup(char *options)
* Detect type of monitor connected
* For now, we just assume SVGA
*/
-static int __devinit acornfb_detect_monitortype(void)
+static int acornfb_detect_monitortype(void)
{
return 4;
}
@@ -1216,7 +1216,7 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
printk("acornfb: freed %dK memory\n", mb_freed);
}
-static int __devinit acornfb_probe(struct platform_device *dev)
+static int acornfb_probe(struct platform_device *dev)
{
unsigned long size;
u_int h_sync, v_sync;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 4659d5da6ff8..e43401afdd03 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -79,7 +79,7 @@ struct arcfb_par {
spinlock_t lock;
};
-static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
+static struct fb_fix_screeninfo arcfb_fix = {
.id = "arcfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -89,7 +89,7 @@ static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo arcfb_var __devinitdata = {
+static struct fb_var_screeninfo arcfb_var = {
.xres = 128,
.yres = 64,
.xres_virtual = 128,
@@ -502,7 +502,7 @@ static struct fb_ops arcfb_ops = {
.fb_ioctl = arcfb_ioctl,
};
-static int __devinit arcfb_probe(struct platform_device *dev)
+static int arcfb_probe(struct platform_device *dev)
{
struct fb_info *info;
int retval = -ENOMEM;
@@ -587,7 +587,7 @@ err:
return retval;
}
-static int __devexit arcfb_remove(struct platform_device *dev)
+static int arcfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -601,7 +601,7 @@ static int __devexit arcfb_remove(struct platform_device *dev)
static struct platform_driver arcfb_driver = {
.probe = arcfb_probe,
- .remove = __devexit_p(arcfb_remove),
+ .remove = arcfb_remove,
.driver = {
.name = "arcfb",
},
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 555dd4c64f5b..94a51f1ef904 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -100,7 +100,7 @@ static const struct svga_timing_regs ark_timing_regs = {
/* Module parameters */
-static char *mode_option __devinitdata = "640x480-8@60";
+static char *mode_option = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
@@ -950,7 +950,7 @@ static struct fb_ops arkfb_ops = {
/* PCI probe */
-static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
@@ -1086,7 +1086,7 @@ err_enable_device:
/* PCI remove */
-static void __devexit ark_pci_remove(struct pci_dev *dev)
+static void ark_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -1184,7 +1184,7 @@ fail:
/* List of boards that we are trying to support */
-static struct pci_device_id ark_devices[] __devinitdata = {
+static struct pci_device_id ark_devices[] = {
{PCI_DEVICE(0xEDD8, 0xA099)},
{0, 0, 0, 0, 0, 0, 0}
};
@@ -1196,7 +1196,7 @@ static struct pci_driver arkfb_pci_driver = {
.name = "arkfb",
.id_table = ark_devices,
.probe = ark_pci_probe,
- .remove = __devexit_p(ark_pci_remove),
+ .remove = ark_pci_remove,
.suspend = ark_pci_suspend,
.resume = ark_pci_resume,
};
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index 8cdf88e20b4b..d5a37d62847b 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -451,7 +451,7 @@ static struct chips_init_reg chips_init_xr[] =
{0xd1, 0x01},
};
-static void __devinit chips_hw_init(struct fb_info *p)
+static void chips_hw_init(struct fb_info *p)
{
int i;
@@ -474,7 +474,7 @@ static void __devinit chips_hw_init(struct fb_info *p)
write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
}
-static struct fb_fix_screeninfo asiliantfb_fix __devinitdata = {
+static struct fb_fix_screeninfo asiliantfb_fix = {
.id = "Asiliant 69000",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -483,7 +483,7 @@ static struct fb_fix_screeninfo asiliantfb_fix __devinitdata = {
.smem_len = 0x200000, /* 2MB */
};
-static struct fb_var_screeninfo asiliantfb_var __devinitdata = {
+static struct fb_var_screeninfo asiliantfb_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -504,7 +504,7 @@ static struct fb_var_screeninfo asiliantfb_var __devinitdata = {
.vsync_len = 2,
};
-static int __devinit init_asiliant(struct fb_info *p, unsigned long addr)
+static int init_asiliant(struct fb_info *p, unsigned long addr)
{
int err;
@@ -535,8 +535,8 @@ static int __devinit init_asiliant(struct fb_info *p, unsigned long addr)
return 0;
}
-static int __devinit
-asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+static int asiliantfb_pci_init(struct pci_dev *dp,
+ const struct pci_device_id *ent)
{
unsigned long addr, size;
struct fb_info *p;
@@ -581,7 +581,7 @@ asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
return 0;
}
-static void __devexit asiliantfb_remove(struct pci_dev *dp)
+static void asiliantfb_remove(struct pci_dev *dp)
{
struct fb_info *p = pci_get_drvdata(dp);
@@ -593,7 +593,7 @@ static void __devexit asiliantfb_remove(struct pci_dev *dp)
framebuffer_release(p);
}
-static struct pci_device_id asiliantfb_pci_tbl[] __devinitdata = {
+static struct pci_device_id asiliantfb_pci_tbl[] = {
{ PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
@@ -604,7 +604,7 @@ static struct pci_driver asiliantfb_driver = {
.name = "asiliantfb",
.id_table = asiliantfb_pci_tbl,
.probe = asiliantfb_pci_init,
- .remove = __devexit_p(asiliantfb_remove),
+ .remove = asiliantfb_remove,
};
static int __init asiliantfb_init(void)
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 0fefa84ed9ae..8c55011313dc 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -98,7 +98,7 @@
#ifndef CONFIG_PPC_PMAC
/* default mode */
-static struct fb_var_screeninfo default_var __devinitdata = {
+static struct fb_var_screeninfo default_var = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
640, 480, 640, 480, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -121,7 +121,7 @@ static struct fb_var_screeninfo default_var = {
/* default modedb mode */
/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
-static struct fb_videomode defaultmode __devinitdata = {
+static struct fb_videomode defaultmode = {
.refresh = 60,
.xres = 640,
.yres = 480,
@@ -149,7 +149,7 @@ enum {
};
/* Must match above enum */
-static char * const r128_family[] __devinitconst = {
+static char * const r128_family[] = {
"AGP",
"PCI",
"PRO AGP",
@@ -275,7 +275,7 @@ static struct pci_driver aty128fb_driver = {
.name = "aty128fb",
.id_table = aty128_pci_tbl,
.probe = aty128_probe,
- .remove = __devexit_p(aty128_remove),
+ .remove = aty128_remove,
.suspend = aty128_pci_suspend,
.resume = aty128_pci_resume,
};
@@ -333,7 +333,7 @@ static const struct aty128_meminfo sdr_sgram =
static const struct aty128_meminfo ddr_sgram =
{ 4, 4, 3, 3, 2, 3, 1, 16, 31, 16, "64-bit DDR SGRAM" };
-static struct fb_fix_screeninfo aty128fb_fix __devinitdata = {
+static struct fb_fix_screeninfo aty128fb_fix = {
.id = "ATY Rage128",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -343,24 +343,24 @@ static struct fb_fix_screeninfo aty128fb_fix __devinitdata = {
.accel = FB_ACCEL_ATI_RAGE128,
};
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
#ifdef CONFIG_PPC_PMAC
-static int default_vmode __devinitdata = VMODE_1024_768_60;
-static int default_cmode __devinitdata = CMODE_8;
+static int default_vmode = VMODE_1024_768_60;
+static int default_cmode = CMODE_8;
#endif
-static int default_crt_on __devinitdata = 0;
-static int default_lcd_on __devinitdata = 1;
+static int default_crt_on = 0;
+static int default_lcd_on = 1;
#ifdef CONFIG_MTRR
static bool mtrr = true;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight __devinitdata = 1;
+static int backlight = 1;
#else
-static int backlight __devinitdata = 0;
+static int backlight = 0;
#endif
/* PLL constants */
@@ -449,10 +449,9 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
static int aty128_decode_var(struct fb_var_screeninfo *var,
struct aty128fb_par *par);
#if 0
-static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
- void __iomem *bios);
-static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev,
- const struct aty128fb_par *par);
+static void aty128_get_pllinfo(struct aty128fb_par *par, void __iomem *bios);
+static void __iomem *aty128_map_ROM(struct pci_dev *pdev,
+ const struct aty128fb_par *par);
#endif
static void aty128_timings(struct aty128fb_par *par);
static void aty128_init_engine(struct aty128fb_par *par);
@@ -582,7 +581,7 @@ static void aty_pll_writeupdate(const struct aty128fb_par *par)
/* write to the scratch register to test r/w functionality */
-static int __devinit register_test(const struct aty128fb_par *par)
+static int register_test(const struct aty128fb_par *par)
{
u32 val;
int flag = 0;
@@ -781,8 +780,8 @@ static u32 depth_to_dst(u32 depth)
#ifndef __sparc__
-static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par,
- struct pci_dev *dev)
+static void __iomem *aty128_map_ROM(const struct aty128fb_par *par,
+ struct pci_dev *dev)
{
u16 dptr;
u8 rom_type;
@@ -868,8 +867,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par,
return NULL;
}
-static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
- unsigned char __iomem *bios)
+static void aty128_get_pllinfo(struct aty128fb_par *par,
+ unsigned char __iomem *bios)
{
unsigned int bios_hdr;
unsigned int bios_pll;
@@ -891,7 +890,7 @@ static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
}
#ifdef CONFIG_X86
-static void __iomem * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
+static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
{
/* I simplified this code as we used to miss the signatures in
* a lot of case. It's now closer to XFree, we just don't check
@@ -916,7 +915,7 @@ static void __iomem * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
#endif /* ndef(__sparc__) */
/* fill in known card constants if pll_block is not available */
-static void __devinit aty128_timings(struct aty128fb_par *par)
+static void aty128_timings(struct aty128fb_par *par)
{
#ifdef CONFIG_PPC_OF
/* instead of a table lookup, assume OF has properly
@@ -1658,7 +1657,7 @@ static int aty128fb_sync(struct fb_info *info)
}
#ifndef MODULE
-static int __devinit aty128fb_setup(char *options)
+static int aty128fb_setup(char *options)
{
char *this_opt;
@@ -1888,8 +1887,7 @@ static void aty128_early_resume(void *data)
}
#endif /* CONFIG_PPC_PMAC */
-static int __devinit aty128_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par = info->par;
@@ -2039,8 +2037,7 @@ static int __devinit aty128_init(struct pci_dev *pdev,
#ifdef CONFIG_PCI
/* register a card ++ajoshi */
-static int __devinit aty128_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long fb_addr, reg_addr;
struct aty128fb_par *par;
@@ -2156,7 +2153,7 @@ err_free_fb:
return -ENODEV;
}
-static void __devexit aty128_remove(struct pci_dev *pdev)
+static void aty128_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par;
@@ -2558,7 +2555,7 @@ static int aty128_pci_resume(struct pci_dev *pdev)
}
-static int __devinit aty128fb_init(void)
+static int aty128fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 868932f904ef..4f27fdc58d84 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -214,7 +214,7 @@ struct pci_mmap_map {
unsigned long prot_mask;
};
-static struct fb_fix_screeninfo atyfb_fix __devinitdata = {
+static struct fb_fix_screeninfo atyfb_fix = {
.id = "ATY Mach64",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -309,18 +309,18 @@ static int vram;
static int pll;
static int mclk;
static int xclk;
-static int comp_sync __devinitdata = -1;
+static int comp_sync = -1;
static char *mode;
#ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight __devinitdata = 1;
+static int backlight = 1;
#else
-static int backlight __devinitdata = 0;
+static int backlight = 0;
#endif
#ifdef CONFIG_PPC
-static int default_vmode __devinitdata = VMODE_CHOOSE;
-static int default_cmode __devinitdata = CMODE_CHOOSE;
+static int default_vmode = VMODE_CHOOSE;
+static int default_cmode = CMODE_CHOOSE;
module_param_named(vmode, default_vmode, int, 0);
MODULE_PARM_DESC(vmode, "int: video mode for mac");
@@ -329,10 +329,10 @@ MODULE_PARM_DESC(cmode, "int: color mode for mac");
#endif
#ifdef CONFIG_ATARI
-static unsigned int mach64_count __devinitdata = 0;
-static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, };
-static unsigned long phys_size[FB_MAX] __devinitdata = { 0, };
-static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
+static unsigned int mach64_count = 0;
+static unsigned long phys_vmembase[FB_MAX] = { 0, };
+static unsigned long phys_size[FB_MAX] = { 0, };
+static unsigned long phys_guiregbase[FB_MAX] = { 0, };
#endif
/* top -> down is an evolution of mach64 chipset, any corrections? */
@@ -371,7 +371,7 @@ static struct {
const char *name;
int pll, mclk, xclk, ecp_max;
u32 features;
-} aty_chips[] __devinitdata = {
+} aty_chips[] = {
#ifdef CONFIG_FB_ATY_GX
/* Mach64 GX */
{ PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX },
@@ -426,7 +426,7 @@ static struct {
#endif /* CONFIG_FB_ATY_CT */
};
-static int __devinit correct_chipset(struct atyfb_par *par)
+static int correct_chipset(struct atyfb_par *par)
{
u8 rev;
u16 type;
@@ -531,34 +531,34 @@ static int __devinit correct_chipset(struct atyfb_par *par)
return 0;
}
-static char ram_dram[] __devinitdata = "DRAM";
-static char ram_resv[] __devinitdata = "RESV";
+static char ram_dram[] = "DRAM";
+static char ram_resv[] = "RESV";
#ifdef CONFIG_FB_ATY_GX
-static char ram_vram[] __devinitdata = "VRAM";
+static char ram_vram[] = "VRAM";
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
-static char ram_edo[] __devinitdata = "EDO";
-static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
-static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
-static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
-static char ram_wram[] __devinitdata = "WRAM";
-static char ram_off[] __devinitdata = "OFF";
+static char ram_edo[] = "EDO";
+static char ram_sdram[] = "SDRAM (1:1)";
+static char ram_sgram[] = "SGRAM (1:1)";
+static char ram_sdram32[] = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] = "WRAM";
+static char ram_off[] = "OFF";
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GX
-static char *aty_gx_ram[8] __devinitdata = {
+static char *aty_gx_ram[8] = {
ram_dram, ram_vram, ram_vram, ram_dram,
ram_dram, ram_vram, ram_vram, ram_resv
};
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
-static char *aty_ct_ram[8] __devinitdata = {
+static char *aty_ct_ram[8] = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_wram, ram_resv
};
-static char *aty_xl_ram[8] __devinitdata = {
+static char *aty_xl_ram[8] = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
@@ -588,7 +588,7 @@ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var,
* Apple monitor sense
*/
-static int __devinit read_aty_sense(const struct atyfb_par *par)
+static int read_aty_sense(const struct atyfb_par *par)
{
int sense, i;
@@ -2273,7 +2273,7 @@ static void aty_bl_exit(struct backlight_device *bd)
#endif /* CONFIG_FB_ATY_BACKLIGHT */
-static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
+static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
@@ -2307,8 +2307,8 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
static struct fb_info *fb_list = NULL;
#if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
-static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
- struct fb_var_screeninfo *var)
+static int atyfb_get_timings_from_lcd(struct atyfb_par *par,
+ struct fb_var_screeninfo *var)
{
int ret = -EINVAL;
@@ -2333,7 +2333,7 @@ static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
}
#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
-static int __devinit aty_init(struct fb_info *info)
+static int aty_init(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
const char *ramname = NULL, *xtal;
@@ -2787,7 +2787,7 @@ aty_init_exit:
}
#if defined(CONFIG_ATARI) && !defined(MODULE)
-static int __devinit store_video_par(char *video_str, unsigned char m64_num)
+static int store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
unsigned long vmembase, size, guiregbase;
@@ -2961,9 +2961,8 @@ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
#ifdef __sparc__
-static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
- struct fb_info *info,
- unsigned long addr)
+static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
+ unsigned long addr)
{
struct atyfb_par *par = info->par;
struct device_node *dp;
@@ -3161,7 +3160,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
#ifdef __i386__
#ifdef CONFIG_FB_ATY_GENERIC_LCD
-static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
+static void aty_init_lcd(struct atyfb_par *par, u32 bios_base)
{
u32 driv_inf_tab, sig;
u16 lcd_ofs;
@@ -3392,7 +3391,7 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
-static int __devinit init_from_bios(struct atyfb_par *par)
+static int init_from_bios(struct atyfb_par *par)
{
u32 bios_base, rom_addr;
int ret;
@@ -3445,9 +3444,8 @@ static int __devinit init_from_bios(struct atyfb_par *par)
}
#endif /* __i386__ */
-static int __devinit atyfb_setup_generic(struct pci_dev *pdev,
- struct fb_info *info,
- unsigned long addr)
+static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
+ unsigned long addr)
{
struct atyfb_par *par = info->par;
u16 tmp;
@@ -3525,8 +3523,8 @@ atyfb_setup_generic_fail:
#endif /* !__sparc__ */
-static int __devinit atyfb_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int atyfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
unsigned long addr, res_start, res_size;
struct fb_info *info;
@@ -3714,7 +3712,7 @@ static int __init atyfb_atari_probe(void)
#ifdef CONFIG_PCI
-static void __devexit atyfb_remove(struct fb_info *info)
+static void atyfb_remove(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
@@ -3762,7 +3760,7 @@ static void __devexit atyfb_remove(struct fb_info *info)
}
-static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
+static void atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
@@ -3834,7 +3832,7 @@ static struct pci_driver atyfb_driver = {
.name = "atyfb",
.id_table = atyfb_pci_tbl,
.probe = atyfb_pci_probe,
- .remove = __devexit_p(atyfb_pci_remove),
+ .remove = atyfb_pci_remove,
#ifdef CONFIG_PM
.suspend = atyfb_pci_suspend,
.resume = atyfb_pci_resume,
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c
index 2745b8539485..51f29d627ceb 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/aty/mach64_ct.c
@@ -373,8 +373,7 @@ void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
#endif
}
-static void __devinit aty_get_pll_ct(const struct fb_info *info,
- union aty_pll *pll)
+static void aty_get_pll_ct(const struct fb_info *info, union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 tmp, clock;
@@ -397,8 +396,7 @@ static void __devinit aty_get_pll_ct(const struct fb_info *info,
}
}
-static int __devinit aty_init_pll_ct(const struct fb_info *info,
- union aty_pll *pll)
+static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 mpost_div, xpost_div, sclk_post_div_real;
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
index 46f72ed53510..95ec042ddbf8 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
@@ -183,7 +183,7 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-int __devinit aty_init_cursor(struct fb_info *info)
+int aty_init_cursor(struct fb_info *info)
{
unsigned long addr;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 9e279ee38da8..1e30b2b3e79f 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -293,7 +293,7 @@ static void radeon_unmap_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
pci_unmap_rom(dev, rinfo->bios_seg);
}
-static int __devinit radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
+static int radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
{
void __iomem *rom;
u16 dptr;
@@ -388,7 +388,7 @@ static int __devinit radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev
}
#ifdef CONFIG_X86
-static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
+static int radeon_find_mem_vbios(struct radeonfb_info *rinfo)
{
/* I simplified this code as we used to miss the signatures in
* a lot of case. It's now closer to XFree, we just don't check
@@ -423,7 +423,7 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
-static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
+static int radeon_read_xtal_OF(struct radeonfb_info *rinfo)
{
struct device_node *dp = rinfo->of_node;
const u32 *val;
@@ -453,7 +453,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
/*
* Read PLL infos from chip registers
*/
-static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
+static int radeon_probe_pll_params(struct radeonfb_info *rinfo)
{
unsigned char ppll_div_sel;
unsigned Ns, Nm, M;
@@ -591,7 +591,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
/*
* Retrieve PLL infos by different means (BIOS, Open Firmware, register probing...)
*/
-static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
+static void radeon_get_pllinfo(struct radeonfb_info *rinfo)
{
/*
* In the case nothing works, these are defaults; they are mostly
@@ -1868,7 +1868,7 @@ static struct fb_ops radeonfb_ops = {
};
-static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
+static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
{
struct fb_info *info = rinfo->info;
@@ -2143,8 +2143,8 @@ static struct bin_attribute edid2_attr = {
};
-static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int radeonfb_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct fb_info *info;
struct radeonfb_info *rinfo;
@@ -2407,7 +2407,7 @@ err_out:
-static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
+static void radeonfb_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct radeonfb_info *rinfo = info->par;
@@ -2465,7 +2465,7 @@ static struct pci_driver radeonfb_driver = {
.name = "radeonfb",
.id_table = radeonfb_pci_table,
.probe = radeonfb_pci_register,
- .remove = __devexit_p(radeonfb_pci_unregister),
+ .remove = radeonfb_pci_unregister,
#ifdef CONFIG_PM
.suspend = radeonfb_pci_suspend,
.resume = radeonfb_pci_resume,
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 5c23eac0eb9a..bc078d50d8f1 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -62,8 +62,8 @@ static char *radeon_get_mon_name(int type)
* models with broken OF probing by hard-coding known EDIDs for some Mac
* laptops internal LVDS panel. (XXX: not done yet)
*/
-static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_EDID,
- int hdno)
+static int radeon_parse_montype_prop(struct device_node *dp, u8 **out_EDID,
+ int hdno)
{
static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID",
"EDID1", "EDID2", NULL };
@@ -115,8 +115,8 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
return mt;
}
-static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_no,
- u8 **out_EDID)
+static int radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_no,
+ u8 **out_EDID)
{
struct device_node *dp;
@@ -163,7 +163,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
-static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
+static int radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
{
unsigned long tmp, tmp0;
char stmp[30];
@@ -251,7 +251,7 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
* doesn't quite work yet, but it's output is still useful for
* debugging
*/
-static void __devinit radeon_parse_connector_info(struct radeonfb_info *rinfo)
+static void radeon_parse_connector_info(struct radeonfb_info *rinfo)
{
int offset, chips, connectors, tmp, i, conn, type;
@@ -297,7 +297,7 @@ static void __devinit radeon_parse_connector_info(struct radeonfb_info *rinfo)
* as well and currently is only implemented for the CRT DAC, the
* code for the TVDAC is commented out in XFree as "non working"
*/
-static int __devinit radeon_crt_is_connected(struct radeonfb_info *rinfo, int is_crt_dac)
+static int radeon_crt_is_connected(struct radeonfb_info *rinfo, int is_crt_dac)
{
int connected = 0;
@@ -369,8 +369,8 @@ static int __devinit radeon_crt_is_connected(struct radeonfb_info *rinfo, int is
* Parse the "monitor_layout" string if any. This code is mostly
* copied from XFree's radeon driver
*/
-static int __devinit radeon_parse_monitor_layout(struct radeonfb_info *rinfo,
- const char *monitor_layout)
+static int radeon_parse_monitor_layout(struct radeonfb_info *rinfo,
+ const char *monitor_layout)
{
char s1[5], s2[5];
int i = 0, second = 0;
@@ -433,8 +433,8 @@ static int __devinit radeon_parse_monitor_layout(struct radeonfb_info *rinfo,
* try to retrieve EDID. The algorithm here comes from XFree's radeon
* driver
*/
-void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
- const char *monitor_layout, int ignore_edid)
+void radeon_probe_screens(struct radeonfb_info *rinfo,
+ const char *monitor_layout, int ignore_edid)
{
#ifdef CONFIG_FB_RADEON_I2C
int ddc_crt2_used = 0;
@@ -753,7 +753,7 @@ static int is_powerblade(const char *model)
* Build the modedb for head 1 (head 2 will come later), check panel infos
* from either BIOS or EDID, and pick up the default mode
*/
-void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_option)
+void radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_option)
{
struct fb_info * info = rinfo->info;
int has_default_mode = 0;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index fe3b6ec87122..ddabaa867b0d 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -83,7 +83,7 @@ struct fb_bitfield rgb_bitfields[][4] =
{ { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
};
-static struct fb_fix_screeninfo au1100fb_fix __devinitdata = {
+static struct fb_fix_screeninfo au1100fb_fix = {
.id = "AU1100 FB",
.xpanstep = 1,
.ypanstep = 1,
@@ -91,7 +91,7 @@ static struct fb_fix_screeninfo au1100fb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo au1100fb_var __devinitdata = {
+static struct fb_var_screeninfo au1100fb_var = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
@@ -469,7 +469,7 @@ static int au1100fb_setup(struct au1100fb_device *fbdev)
return 0;
}
-static int __devinit au1100fb_drv_probe(struct platform_device *dev)
+static int au1100fb_drv_probe(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
struct resource *regs_res;
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 7ca79f02056e..1b59054fc6a4 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1673,7 +1673,7 @@ out:
}
/* AU1200 LCD controller device driver */
-static int __devinit au1200fb_drv_probe(struct platform_device *dev)
+static int au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
struct au1200fb_platdata *pd;
@@ -1798,7 +1798,7 @@ failed:
return ret;
}
-static int __devexit au1200fb_drv_remove(struct platform_device *dev)
+static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct au1200fb_device *fbdev;
@@ -1876,7 +1876,7 @@ static struct platform_driver au1200fb_driver = {
.pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
- .remove = __devexit_p(au1200fb_drv_remove),
+ .remove = au1200fb_drv_remove,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
index c36cf961dcb2..1a9ac6e1f4b3 100644
--- a/drivers/video/auo_k1900fb.c
+++ b/drivers/video/auo_k1900fb.c
@@ -156,7 +156,7 @@ static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
return (par->update_cnt > 10);
}
-static int __devinit auok1900fb_probe(struct platform_device *pdev)
+static int auok1900fb_probe(struct platform_device *pdev)
{
struct auok190x_init_data init;
struct auok190x_board *board;
@@ -177,14 +177,14 @@ static int __devinit auok1900fb_probe(struct platform_device *pdev)
return auok190x_common_probe(pdev, &init);
}
-static int __devexit auok1900fb_remove(struct platform_device *pdev)
+static int auok1900fb_remove(struct platform_device *pdev)
{
return auok190x_common_remove(pdev);
}
static struct platform_driver auok1900fb_driver = {
.probe = auok1900fb_probe,
- .remove = __devexit_p(auok1900fb_remove),
+ .remove = auok1900fb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "auo_k1900fb",
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
index 1c054c18616e..d1db1653cd88 100644
--- a/drivers/video/auo_k1901fb.c
+++ b/drivers/video/auo_k1901fb.c
@@ -209,7 +209,7 @@ static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
return (par->update_cnt > 10);
}
-static int __devinit auok1901fb_probe(struct platform_device *pdev)
+static int auok1901fb_probe(struct platform_device *pdev)
{
struct auok190x_init_data init;
struct auok190x_board *board;
@@ -230,14 +230,14 @@ static int __devinit auok1901fb_probe(struct platform_device *pdev)
return auok190x_common_probe(pdev, &init);
}
-static int __devexit auok1901fb_remove(struct platform_device *pdev)
+static int auok1901fb_remove(struct platform_device *pdev)
{
return auok190x_common_remove(pdev);
}
static struct platform_driver auok1901fb_driver = {
.probe = auok1901fb_probe,
- .remove = __devexit_p(auok1901fb_remove),
+ .remove = auok1901fb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "auo_k1901fb",
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
index c03ecdd31e4c..53846cb534d4 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/auo_k190x.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/fb.h>
#include <linux/delay.h>
@@ -773,8 +774,8 @@ EXPORT_SYMBOL_GPL(auok190x_pm);
* Common probe and remove code
*/
-int __devinit auok190x_common_probe(struct platform_device *pdev,
- struct auok190x_init_data *init)
+int auok190x_common_probe(struct platform_device *pdev,
+ struct auok190x_init_data *init)
{
struct auok190x_board *board = init->board;
struct auok190xfb_par *par;
@@ -1006,7 +1007,7 @@ err_reg:
}
EXPORT_SYMBOL_GPL(auok190x_common_probe);
-int __devexit auok190x_common_remove(struct platform_device *pdev)
+int auok190x_common_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct auok190xfb_par *par = info->par;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b7ec34c57f46..2cd63507ed74 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -117,8 +117,8 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
data->current_brightness = value;
return 0;
out:
- dev_dbg(chip->dev, "set brightness %d failure with return "
- "value:%d\n", value, ret);
+ dev_dbg(chip->dev, "set brightness %d failure with return value: %d\n",
+ value, ret);
return ret;
}
@@ -165,8 +165,10 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
struct pm860x_backlight_data *data,
char *name)
{
- struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ struct device_node *nproot, *np;
int iset = 0;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
nproot = of_find_node_by_name(nproot, "backlights");
@@ -184,6 +186,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
break;
}
}
+ of_node_put(nproot);
return 0;
}
#else
@@ -208,22 +211,19 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "duty cycle");
if (!res) {
dev_err(&pdev->dev, "No REG resource for duty cycle\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_duty_cycle = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "always on");
if (!res) {
dev_err(&pdev->dev, "No REG resorce for always on\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_always_on = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "current");
if (!res) {
dev_err(&pdev->dev, "No REG resource for current\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_current = res->start;
@@ -231,8 +231,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
sprintf(name, "backlight-%d", pdev->id);
data->port = pdev->id;
data->chip = chip;
- data->i2c = (chip->id == CHIP_PM8606) ? chip->client \
- : chip->companion;
+ data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->current_brightness = MAX_BRIGHTNESS;
if (pm860x_backlight_dt_init(pdev, data, name)) {
if (pdata) {
@@ -263,8 +262,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
return 0;
out_brt:
backlight_device_unregister(bl);
-out:
- devm_kfree(&pdev->dev, data);
return ret;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 765a945f8ea1..be27b551473f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -126,6 +126,21 @@ config LCD_AMS369FG06
If you have an AMS369FG06 AMOLED Panel, say Y to enable its
LCD control driver.
+config LCD_LMS501KF03
+ tristate "LMS501KF03 LCD Driver"
+ depends on SPI
+ default n
+ help
+ If you have an LMS501KF03 LCD Panel, say Y to enable its
+ LCD control driver.
+
+config LCD_HX8357
+ tristate "Himax HX-8357 LCD Driver"
+ depends on SPI
+ help
+ If you have a HX-8357 LCD panel, say Y to enable its LCD control
+ driver.
+
endif # LCD_CLASS_DEVICE
#
@@ -366,7 +381,7 @@ config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
depends on BACKLIGHT_CLASS_DEVICE && I2C
help
- This supports TI LP8550, LP8551, LP8552, LP8553 and LP8556
+ This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
backlight driver.
config BACKLIGHT_OT200
@@ -390,6 +405,13 @@ config BACKLIGHT_TPS65217
If you have a Texas Instruments TPS65217 say Y to enable the
backlight driver.
+config BACKLIGHT_AS3711
+ tristate "AS3711 Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_AS3711
+ help
+ If you have an Austrian Microsystems AS3711 say Y to enable the
+ backlight driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index e7ce7291635d..4606c218e8e4 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -1,47 +1,50 @@
# Backlight & LCD drivers
-obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
-obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
-obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
-obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
-obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
-obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
-obj-$(CONFIG_LCD_ILI9320) += ili9320.o
-obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
-obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
-obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
-obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
-obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
-obj-$(CONFIG_LCD_LD9040) += ld9040.o
-obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
+obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
+obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
+obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
+obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
+obj-$(CONFIG_LCD_HX8357) += hx8357.o
+obj-$(CONFIG_LCD_ILI9320) += ili9320.o
+obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
+obj-$(CONFIG_LCD_LD9040) += ld9040.o
+obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
+obj-$(CONFIG_LCD_LMS501KF03) += lms501kf03.o
+obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
+obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
+obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
+obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
+obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
+obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
-obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
-obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
-obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
-obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
-obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
-obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
-obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
-obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
-obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
-obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
-obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
-obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
-obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
-obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
-obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
-obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
-obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
-obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
-obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
-obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
-obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
+obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
+obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
+obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
+obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
+obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
+obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
+obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
+obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
+obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
+obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
+obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
+obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
+obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
+obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
+obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
-obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
-obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
-obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
+obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
+obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
+obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
+obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index 7ff752288b92..c6fc668d6236 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -74,7 +74,7 @@ static int aat2870_bl_get_brightness(struct backlight_device *bd)
static int aat2870_bl_update_status(struct backlight_device *bd)
{
- struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
+ struct aat2870_bl_driver_data *aat2870_bl = bl_get_data(bd);
struct aat2870_data *aat2870 =
dev_get_drvdata(aat2870_bl->pdev->dev.parent);
int brightness = bd->props.brightness;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 6bb72c0cb803..a77c9cad3320 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -783,7 +783,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
static int adp8860_i2c_resume(struct i2c_client *client)
{
- adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
+ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 63c882b8177a..712c25a0d8fe 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -957,7 +957,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
static int adp8870_i2c_resume(struct i2c_client *client)
{
- adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
+ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index f57e1905236a..d29e49443f29 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -10,25 +10,16 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/module.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#define SLEEPMSEC 0x1000
#define ENDDEF 0x2000
@@ -210,8 +201,9 @@ static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd,
ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- mdelay(wbuf[i+1]);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -313,41 +305,32 @@ static int ams369fg06_ldi_disable(struct ams369fg06 *lcd)
static int ams369fg06_power_is_on(int power)
{
- return ((power) <= FB_BLANK_NORMAL);
+ return power <= FB_BLANK_NORMAL;
}
static int ams369fg06_power_on(struct ams369fg06 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
- struct backlight_device *bd = NULL;
+ struct lcd_platform_data *pd;
+ struct backlight_device *bd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
-
bd = lcd->bd;
- if (!bd) {
- dev_err(lcd->dev, "backlight device is NULL.\n");
- return -EFAULT;
- }
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
}
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = ams369fg06_ldi_init(lcd);
@@ -374,14 +357,10 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
static int ams369fg06_power_off(struct ams369fg06 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL\n");
- return -EFAULT;
- }
ret = ams369fg06_ldi_disable(lcd);
if (ret) {
@@ -389,13 +368,9 @@ static int ams369fg06_power_off(struct ams369fg06 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ pd->power_on(lcd->ld, 0);
return 0;
}
@@ -446,7 +421,7 @@ static int ams369fg06_set_brightness(struct backlight_device *bd)
{
int ret = 0;
int brightness = bd->props.brightness;
- struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev);
+ struct ams369fg06 *lcd = bl_get_data(bd);
if (brightness < MIN_BRIGHTNESS ||
brightness > bd->props.max_brightness) {
@@ -501,7 +476,7 @@ static int ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- return -EFAULT;
+ return -EINVAL;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
@@ -534,10 +509,11 @@ static int ams369fg06_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
ams369fg06_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
@@ -550,7 +526,7 @@ out_lcd_unregister:
static int ams369fg06_remove(struct spi_device *spi)
{
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
@@ -560,44 +536,26 @@ static int ams369fg06_remove(struct spi_device *spi)
}
#if defined(CONFIG_PM)
-static unsigned int before_power;
-
static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
- before_power = lcd->power;
-
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
}
static int ams369fg06_resume(struct spi_device *spi)
{
- int ret = 0;
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
- /*
- * after suspended, if lcd panel status is FB_BLANK_UNBLANK
- * (at that time, before_power is FB_BLANK_UNBLANK) then
- * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
- */
- if (before_power == FB_BLANK_UNBLANK)
- lcd->power = FB_BLANK_POWERDOWN;
-
- dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+ lcd->power = FB_BLANK_POWERDOWN;
- ret = ams369fg06_power(lcd, before_power);
-
- return ret;
+ return ams369fg06_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define ams369fg06_suspend NULL
@@ -606,7 +564,7 @@ static int ams369fg06_resume(struct spi_device *spi)
static void ams369fg06_shutdown(struct spi_device *spi)
{
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index f088d4c07381..d84329676689 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -196,7 +196,7 @@ static int apple_bl_add(struct acpi_device *dev)
return 0;
}
-static int apple_bl_remove(struct acpi_device *dev, int type)
+static int apple_bl_remove(struct acpi_device *dev)
{
backlight_device_unregister(apple_backlight_device);
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
new file mode 100644
index 000000000000..41d52fe52543
--- /dev/null
+++ b/drivers/video/backlight/as3711_bl.c
@@ -0,0 +1,380 @@
+/*
+ * AS3711 PMIC backlight driver, using DCDC Step Up Converters
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum as3711_bl_type {
+ AS3711_BL_SU1,
+ AS3711_BL_SU2,
+};
+
+struct as3711_bl_data {
+ bool powered;
+ const char *fb_name;
+ struct device *fb_dev;
+ enum as3711_bl_type type;
+ int brightness;
+ struct backlight_device *bl;
+};
+
+struct as3711_bl_supply {
+ struct as3711_bl_data su1;
+ struct as3711_bl_data su2;
+ const struct as3711_bl_pdata *pdata;
+ struct as3711 *as3711;
+};
+
+static struct as3711_bl_supply *to_supply(struct as3711_bl_data *su)
+{
+ switch (su->type) {
+ case AS3711_BL_SU1:
+ return container_of(su, struct as3711_bl_supply, su1);
+ case AS3711_BL_SU2:
+ return container_of(su, struct as3711_bl_supply, su2);
+ }
+ return NULL;
+}
+
+static int as3711_set_brightness_auto_i(struct as3711_bl_data *data,
+ unsigned int brightness)
+{
+ struct as3711_bl_supply *supply = to_supply(data);
+ struct as3711 *as3711 = supply->as3711;
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+ int ret = 0;
+
+ /* Only all equal current values are supported */
+ if (pdata->su2_auto_curr1)
+ ret = regmap_write(as3711->regmap, AS3711_CURR1_VALUE,
+ brightness);
+ if (!ret && pdata->su2_auto_curr2)
+ ret = regmap_write(as3711->regmap, AS3711_CURR2_VALUE,
+ brightness);
+ if (!ret && pdata->su2_auto_curr3)
+ ret = regmap_write(as3711->regmap, AS3711_CURR3_VALUE,
+ brightness);
+
+ return ret;
+}
+
+static int as3711_set_brightness_v(struct as3711 *as3711,
+ unsigned int brightness,
+ unsigned int reg)
+{
+ if (brightness > 31)
+ return -EINVAL;
+
+ return regmap_update_bits(as3711->regmap, reg, 0xf0,
+ brightness << 4);
+}
+
+static int as3711_bl_su2_reset(struct as3711_bl_supply *supply)
+{
+ struct as3711 *as3711 = supply->as3711;
+ int ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_5,
+ 3, supply->pdata->su2_fbprot);
+ if (!ret)
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 0);
+ if (!ret)
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 1);
+ return ret;
+}
+
+/*
+ * Someone with less fragile or less expensive hardware could try to simplify
+ * the brightness adjustment procedure.
+ */
+static int as3711_bl_update_status(struct backlight_device *bl)
+{
+ struct as3711_bl_data *data = bl_get_data(bl);
+ struct as3711_bl_supply *supply = to_supply(data);
+ struct as3711 *as3711 = supply->as3711;
+ int brightness = bl->props.brightness;
+ int ret = 0;
+
+ dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n",
+ __func__, bl->props.brightness, bl->props.power,
+ bl->props.fb_blank, bl->props.state);
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ if (data->type == AS3711_BL_SU1) {
+ ret = as3711_set_brightness_v(as3711, brightness,
+ AS3711_STEPUP_CONTROL_1);
+ } else {
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ ret = as3711_set_brightness_v(as3711, brightness,
+ AS3711_STEPUP_CONTROL_2);
+ break;
+ case AS3711_SU2_CURR_AUTO:
+ ret = as3711_set_brightness_auto_i(data, brightness / 4);
+ if (ret < 0)
+ return ret;
+ if (brightness) {
+ ret = as3711_bl_su2_reset(supply);
+ if (ret < 0)
+ return ret;
+ udelay(500);
+ ret = as3711_set_brightness_auto_i(data, brightness);
+ } else {
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 0);
+ }
+ break;
+ /* Manual one current feedback pin below */
+ case AS3711_SU2_CURR1:
+ ret = regmap_write(as3711->regmap, AS3711_CURR1_VALUE,
+ brightness);
+ break;
+ case AS3711_SU2_CURR2:
+ ret = regmap_write(as3711->regmap, AS3711_CURR2_VALUE,
+ brightness);
+ break;
+ case AS3711_SU2_CURR3:
+ ret = regmap_write(as3711->regmap, AS3711_CURR3_VALUE,
+ brightness);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+ if (!ret)
+ data->brightness = brightness;
+
+ return ret;
+}
+
+static int as3711_bl_get_brightness(struct backlight_device *bl)
+{
+ struct as3711_bl_data *data = bl_get_data(bl);
+
+ return data->brightness;
+}
+
+static const struct backlight_ops as3711_bl_ops = {
+ .update_status = as3711_bl_update_status,
+ .get_brightness = as3711_bl_get_brightness,
+};
+
+static int as3711_bl_init_su2(struct as3711_bl_supply *supply)
+{
+ struct as3711 *as3711 = supply->as3711;
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+ u8 ctl = 0;
+ int ret;
+
+ dev_dbg(as3711->dev, "%s(): use %u\n", __func__, pdata->su2_feedback);
+
+ /* Turn SU2 off */
+ ret = regmap_write(as3711->regmap, AS3711_STEPUP_CONTROL_2, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 0);
+ break;
+ case AS3711_SU2_CURR1:
+ ctl = 1;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 1);
+ break;
+ case AS3711_SU2_CURR2:
+ ctl = 4;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 2);
+ break;
+ case AS3711_SU2_CURR3:
+ ctl = 0x10;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 3);
+ break;
+ case AS3711_SU2_CURR_AUTO:
+ if (pdata->su2_auto_curr1)
+ ctl = 2;
+ if (pdata->su2_auto_curr2)
+ ctl |= 8;
+ if (pdata->su2_auto_curr3)
+ ctl |= 0x20;
+ ret = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ret)
+ ret = regmap_write(as3711->regmap, AS3711_CURR_CONTROL, ctl);
+
+ return ret;
+}
+
+static int as3711_bl_register(struct platform_device *pdev,
+ unsigned int max_brightness, struct as3711_bl_data *su)
+{
+ struct backlight_properties props = {.type = BACKLIGHT_RAW,};
+ struct backlight_device *bl;
+
+ /* max tuning I = 31uA for voltage- and 38250uA for current-feedback */
+ props.max_brightness = max_brightness;
+
+ bl = backlight_device_register(su->type == AS3711_BL_SU1 ?
+ "as3711-su1" : "as3711-su2",
+ &pdev->dev, su,
+ &as3711_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&pdev->dev, "failed to register backlight\n");
+ return PTR_ERR(bl);
+ }
+
+ bl->props.brightness = props.max_brightness;
+
+ backlight_update_status(bl);
+
+ su->bl = bl;
+
+ return 0;
+}
+
+static int as3711_backlight_probe(struct platform_device *pdev)
+{
+ struct as3711_bl_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
+ struct as3711_bl_supply *supply;
+ struct as3711_bl_data *su;
+ unsigned int max_brightness;
+ int ret;
+
+ if (!pdata || (!pdata->su1_fb && !pdata->su2_fb)) {
+ dev_err(&pdev->dev, "No platform data, exiting...\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Due to possible hardware damage I chose to block all modes,
+ * unsupported on my hardware. Anyone, wishing to use any of those modes
+ * will have to first review the code, then activate and test it.
+ */
+ if (pdata->su1_fb ||
+ pdata->su2_fbprot != AS3711_SU2_GPIO4 ||
+ pdata->su2_feedback != AS3711_SU2_CURR_AUTO) {
+ dev_warn(&pdev->dev,
+ "Attention! An untested mode has been chosen!\n"
+ "Please, review the code, enable, test, and report success:-)\n");
+ return -EINVAL;
+ }
+
+ supply = devm_kzalloc(&pdev->dev, sizeof(*supply), GFP_KERNEL);
+ if (!supply)
+ return -ENOMEM;
+
+ supply->as3711 = as3711;
+ supply->pdata = pdata;
+
+ if (pdata->su1_fb) {
+ su = &supply->su1;
+ su->fb_name = pdata->su1_fb;
+ su->type = AS3711_BL_SU1;
+
+ max_brightness = min(pdata->su1_max_uA, 31);
+ ret = as3711_bl_register(pdev, max_brightness, su);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pdata->su2_fb) {
+ su = &supply->su2;
+ su->fb_name = pdata->su2_fb;
+ su->type = AS3711_BL_SU2;
+
+ switch (pdata->su2_fbprot) {
+ case AS3711_SU2_GPIO2:
+ case AS3711_SU2_GPIO3:
+ case AS3711_SU2_GPIO4:
+ case AS3711_SU2_LX_SD4:
+ break;
+ default:
+ ret = -EINVAL;
+ goto esu2;
+ }
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ max_brightness = min(pdata->su2_max_uA, 31);
+ break;
+ case AS3711_SU2_CURR1:
+ case AS3711_SU2_CURR2:
+ case AS3711_SU2_CURR3:
+ case AS3711_SU2_CURR_AUTO:
+ max_brightness = min(pdata->su2_max_uA / 150, 255);
+ break;
+ default:
+ ret = -EINVAL;
+ goto esu2;
+ }
+
+ ret = as3711_bl_init_su2(supply);
+ if (ret < 0)
+ return ret;
+
+ ret = as3711_bl_register(pdev, max_brightness, su);
+ if (ret < 0)
+ goto esu2;
+ }
+
+ platform_set_drvdata(pdev, supply);
+
+ return 0;
+
+esu2:
+ backlight_device_unregister(supply->su1.bl);
+ return ret;
+}
+
+static int as3711_backlight_remove(struct platform_device *pdev)
+{
+ struct as3711_bl_supply *supply = platform_get_drvdata(pdev);
+
+ backlight_device_unregister(supply->su1.bl);
+ backlight_device_unregister(supply->su2.bl);
+
+ return 0;
+}
+
+static struct platform_driver as3711_backlight_driver = {
+ .driver = {
+ .name = "as3711-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_backlight_probe,
+ .remove = as3711_backlight_remove,
+};
+
+module_platform_driver(as3711_backlight_driver);
+
+MODULE_DESCRIPTION("Backlight Driver for AS3711 PMICs");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:as3711-backlight");
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index df1cbb7ef6ca..de5e5e74e2a7 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -106,10 +106,9 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
pwmbl->pdata->pwm_compare_max);
- dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver "
- "(%lu Hz)\n", pwmbl->pwmc.mck /
- pwmbl->pdata->pwm_compare_max /
- (1 << prescale));
+ dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n",
+ pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max /
+ (1 << prescale));
return pwm_channel_enable(&pwmbl->pwmc);
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 297db2fa91f5..c74e7aa46731 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -370,6 +370,35 @@ void backlight_device_unregister(struct backlight_device *bd)
}
EXPORT_SYMBOL(backlight_device_unregister);
+#ifdef CONFIG_OF
+static int of_parent_match(struct device *dev, const void *data)
+{
+ return dev->parent && dev->parent->of_node == data;
+}
+
+/**
+ * of_find_backlight_by_node() - find backlight device by device-tree node
+ * @node: device-tree node of the backlight device
+ *
+ * Returns a pointer to the backlight device corresponding to the given DT
+ * node or NULL if no such backlight device exists or if the device hasn't
+ * been probed yet.
+ *
+ * This function obtains a reference on the backlight device and it is the
+ * caller's responsibility to drop the reference by calling put_device() on
+ * the backlight device's .dev field.
+ */
+struct backlight_device *of_find_backlight_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(backlight_class, NULL, node, of_parent_match);
+
+ return dev ? to_backlight_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_backlight_by_node);
+#endif
+
static void __exit backlight_class_exit(void)
{
class_destroy(backlight_class);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index eaaebf21993e..aa782f302983 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -6,8 +6,8 @@
* Based on Sharp's 2.4 Backlight Driver
*
* Copyright (c) 2008 Marvell International Ltd.
- * Converted to SPI device based LCD/Backlight device driver
- * by Eric Miao <eric.miao@marvell.com>
+ * Converted to SPI device based LCD/Backlight device driver
+ * by Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -192,7 +192,7 @@ static void lcdtg_set_phadadj(struct corgi_lcd *lcd, int mode)
{
int adj;
- switch(mode) {
+ switch (mode) {
case CORGI_LCD_MODE_VGA:
/* Setting for VGA */
adj = sharpsl_param.phadadj;
@@ -337,7 +337,7 @@ static void corgi_lcd_power_off(struct corgi_lcd *lcd)
static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
int mode = CORGI_LCD_MODE_QVGA;
if (m->xres == 640 || m->xres == 480)
@@ -364,7 +364,7 @@ static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m)
static int corgi_lcd_set_power(struct lcd_device *ld, int power)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
corgi_lcd_power_on(lcd);
@@ -378,7 +378,7 @@ static int corgi_lcd_set_power(struct lcd_device *ld, int power)
static int corgi_lcd_get_power(struct lcd_device *ld)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
return lcd->power;
}
@@ -391,7 +391,7 @@ static struct lcd_ops corgi_lcd_ops = {
static int corgi_bl_get_intensity(struct backlight_device *bd)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev);
+ struct corgi_lcd *lcd = bl_get_data(bd);
return lcd->intensity;
}
@@ -409,10 +409,10 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted;
if (gpio_is_valid(lcd->gpio_backlight_cont))
- gpio_set_value(lcd->gpio_backlight_cont, cont);
+ gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont);
if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_set_value(lcd->gpio_backlight_on, intensity);
+ gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity);
if (lcd->kick_battery)
lcd->kick_battery();
@@ -423,7 +423,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
static int corgi_bl_update_status(struct backlight_device *bd)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev);
+ struct corgi_lcd *lcd = bl_get_data(bd);
int intensity = bd->props.brightness;
if (bd->props.power != FB_BLANK_UNBLANK)
@@ -460,7 +460,7 @@ static const struct backlight_ops corgi_bl_ops = {
#ifdef CONFIG_PM
static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
corgibl_flags |= CORGIBL_SUSPENDED;
corgi_bl_set_intensity(lcd, 0);
@@ -470,7 +470,7 @@ static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state)
static int corgi_lcd_resume(struct spi_device *spi)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
corgibl_flags &= ~CORGIBL_SUSPENDED;
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK);
@@ -495,8 +495,9 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on,
"BL_ON");
if (err) {
- dev_err(&spi->dev, "failed to request GPIO%d for "
- "backlight_on\n", pdata->gpio_backlight_on);
+ dev_err(&spi->dev,
+ "failed to request GPIO%d for backlight_on\n",
+ pdata->gpio_backlight_on);
return err;
}
@@ -508,8 +509,9 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont,
"BL_CONT");
if (err) {
- dev_err(&spi->dev, "failed to request GPIO%d for "
- "backlight_cont\n", pdata->gpio_backlight_cont);
+ dev_err(&spi->dev,
+ "failed to request GPIO%d for backlight_cont\n",
+ pdata->gpio_backlight_cont);
return err;
}
@@ -575,7 +577,7 @@ static int corgi_lcd_probe(struct spi_device *spi)
lcd->kick_battery = pdata->kick_battery;
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK);
backlight_update_status(lcd->bl_dev);
@@ -592,7 +594,7 @@ err_unregister_lcd:
static int corgi_lcd_remove(struct spi_device *spi)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
lcd->bl_dev->props.brightness = 0;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 573c7ece0fde..8179cef0730f 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -2,10 +2,10 @@
* Backlight driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -164,15 +164,14 @@ static int da903x_backlight_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int da903x_backlight_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
+
return da903x_backlight_set(bl, 0);
}
static int da903x_backlight_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
@@ -199,7 +198,7 @@ static struct platform_driver da903x_backlight_driver = {
module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
-MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
- "Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-backlight");
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index ac196181fe45..842da5a3ac4f 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -34,7 +34,7 @@ enum {
DA9052_TYPE_WLED3,
};
-static unsigned char wled_bank[] = {
+static const unsigned char wled_bank[] = {
DA9052_LED1_CONF_REG,
DA9052_LED2_CONF_REG,
DA9052_LED3_CONF_REG,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 8c660fcd250d..0ae155be9c89 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -97,8 +97,8 @@ static int genericbl_probe(struct platform_device *pdev)
props.max_brightness = machinfo->max_intensity;
bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
&props);
- if (IS_ERR (bd))
- return PTR_ERR (bd);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index c99966342448..5cefd73526f8 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -26,7 +26,7 @@
#define HP680_DEFAULT_INTENSITY 10
static int hp680bl_suspended;
-static int current_intensity = 0;
+static int current_intensity;
static DEFINE_SPINLOCK(bl_lock);
static void hp680bl_send_intensity(struct backlight_device *bd)
@@ -168,7 +168,7 @@ static int __init hp680bl_init(void)
static void __exit hp680bl_exit(void)
{
platform_device_unregister(hp680bl_device);
- platform_driver_unregister(&hp680bl_driver);
+ platform_driver_unregister(&hp680bl_driver);
}
module_init(hp680bl_init);
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
new file mode 100644
index 000000000000..a0482b567bfe
--- /dev/null
+++ b/drivers/video/backlight/hx8357.c
@@ -0,0 +1,497 @@
+/*
+ * Driver for the Himax HX-8357 LCD Controller
+ *
+ * Copyright 2012 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+
+#define HX8357_NUM_IM_PINS 3
+
+#define HX8357_SWRESET 0x01
+#define HX8357_GET_RED_CHANNEL 0x06
+#define HX8357_GET_GREEN_CHANNEL 0x07
+#define HX8357_GET_BLUE_CHANNEL 0x08
+#define HX8357_GET_POWER_MODE 0x0a
+#define HX8357_GET_MADCTL 0x0b
+#define HX8357_GET_PIXEL_FORMAT 0x0c
+#define HX8357_GET_DISPLAY_MODE 0x0d
+#define HX8357_GET_SIGNAL_MODE 0x0e
+#define HX8357_GET_DIAGNOSTIC_RESULT 0x0f
+#define HX8357_ENTER_SLEEP_MODE 0x10
+#define HX8357_EXIT_SLEEP_MODE 0x11
+#define HX8357_ENTER_PARTIAL_MODE 0x12
+#define HX8357_ENTER_NORMAL_MODE 0x13
+#define HX8357_EXIT_INVERSION_MODE 0x20
+#define HX8357_ENTER_INVERSION_MODE 0x21
+#define HX8357_SET_DISPLAY_OFF 0x28
+#define HX8357_SET_DISPLAY_ON 0x29
+#define HX8357_SET_COLUMN_ADDRESS 0x2a
+#define HX8357_SET_PAGE_ADDRESS 0x2b
+#define HX8357_WRITE_MEMORY_START 0x2c
+#define HX8357_READ_MEMORY_START 0x2e
+#define HX8357_SET_PARTIAL_AREA 0x30
+#define HX8357_SET_SCROLL_AREA 0x33
+#define HX8357_SET_TEAR_OFF 0x34
+#define HX8357_SET_TEAR_ON 0x35
+#define HX8357_SET_ADDRESS_MODE 0x36
+#define HX8357_SET_SCROLL_START 0x37
+#define HX8357_EXIT_IDLE_MODE 0x38
+#define HX8357_ENTER_IDLE_MODE 0x39
+#define HX8357_SET_PIXEL_FORMAT 0x3a
+#define HX8357_SET_PIXEL_FORMAT_DBI_3BIT (0x1)
+#define HX8357_SET_PIXEL_FORMAT_DBI_16BIT (0x5)
+#define HX8357_SET_PIXEL_FORMAT_DBI_18BIT (0x6)
+#define HX8357_SET_PIXEL_FORMAT_DPI_3BIT (0x1 << 4)
+#define HX8357_SET_PIXEL_FORMAT_DPI_16BIT (0x5 << 4)
+#define HX8357_SET_PIXEL_FORMAT_DPI_18BIT (0x6 << 4)
+#define HX8357_WRITE_MEMORY_CONTINUE 0x3c
+#define HX8357_READ_MEMORY_CONTINUE 0x3e
+#define HX8357_SET_TEAR_SCAN_LINES 0x44
+#define HX8357_GET_SCAN_LINES 0x45
+#define HX8357_READ_DDB_START 0xa1
+#define HX8357_SET_DISPLAY_MODE 0xb4
+#define HX8357_SET_DISPLAY_MODE_RGB_THROUGH (0x3)
+#define HX8357_SET_DISPLAY_MODE_RGB_INTERFACE (1 << 4)
+#define HX8357_SET_PANEL_DRIVING 0xc0
+#define HX8357_SET_DISPLAY_FRAME 0xc5
+#define HX8357_SET_RGB 0xc6
+#define HX8357_SET_RGB_ENABLE_HIGH (1 << 1)
+#define HX8357_SET_GAMMA 0xc8
+#define HX8357_SET_POWER 0xd0
+#define HX8357_SET_VCOM 0xd1
+#define HX8357_SET_POWER_NORMAL 0xd2
+#define HX8357_SET_PANEL_RELATED 0xe9
+
+struct hx8357_data {
+ unsigned im_pins[HX8357_NUM_IM_PINS];
+ unsigned reset;
+ struct spi_device *spi;
+ int state;
+};
+
+static u8 hx8357_seq_power[] = {
+ HX8357_SET_POWER, 0x44, 0x41, 0x06,
+};
+
+static u8 hx8357_seq_vcom[] = {
+ HX8357_SET_VCOM, 0x40, 0x10,
+};
+
+static u8 hx8357_seq_power_normal[] = {
+ HX8357_SET_POWER_NORMAL, 0x05, 0x12,
+};
+
+static u8 hx8357_seq_panel_driving[] = {
+ HX8357_SET_PANEL_DRIVING, 0x14, 0x3b, 0x00, 0x02, 0x11,
+};
+
+static u8 hx8357_seq_display_frame[] = {
+ HX8357_SET_DISPLAY_FRAME, 0x0c,
+};
+
+static u8 hx8357_seq_panel_related[] = {
+ HX8357_SET_PANEL_RELATED, 0x01,
+};
+
+static u8 hx8357_seq_undefined1[] = {
+ 0xea, 0x03, 0x00, 0x00,
+};
+
+static u8 hx8357_seq_undefined2[] = {
+ 0xeb, 0x40, 0x54, 0x26, 0xdb,
+};
+
+static u8 hx8357_seq_gamma[] = {
+ HX8357_SET_GAMMA, 0x00, 0x15, 0x00, 0x22, 0x00,
+ 0x08, 0x77, 0x26, 0x77, 0x22, 0x04, 0x00,
+};
+
+static u8 hx8357_seq_address_mode[] = {
+ HX8357_SET_ADDRESS_MODE, 0xc0,
+};
+
+static u8 hx8357_seq_pixel_format[] = {
+ HX8357_SET_PIXEL_FORMAT,
+ HX8357_SET_PIXEL_FORMAT_DPI_18BIT |
+ HX8357_SET_PIXEL_FORMAT_DBI_18BIT,
+};
+
+static u8 hx8357_seq_column_address[] = {
+ HX8357_SET_COLUMN_ADDRESS, 0x00, 0x00, 0x01, 0x3f,
+};
+
+static u8 hx8357_seq_page_address[] = {
+ HX8357_SET_PAGE_ADDRESS, 0x00, 0x00, 0x01, 0xdf,
+};
+
+static u8 hx8357_seq_rgb[] = {
+ HX8357_SET_RGB, 0x02,
+};
+
+static u8 hx8357_seq_display_mode[] = {
+ HX8357_SET_DISPLAY_MODE,
+ HX8357_SET_DISPLAY_MODE_RGB_THROUGH |
+ HX8357_SET_DISPLAY_MODE_RGB_INTERFACE,
+};
+
+static int hx8357_spi_write_then_read(struct lcd_device *lcdev,
+ u8 *txbuf, u16 txlen,
+ u8 *rxbuf, u16 rxlen)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u16 *local_txbuf = NULL;
+ int ret = 0;
+
+ memset(xfer, 0, sizeof(xfer));
+ spi_message_init(&msg);
+
+ if (txlen) {
+ int i;
+
+ local_txbuf = kcalloc(txlen, sizeof(*local_txbuf), GFP_KERNEL);
+
+ if (!local_txbuf)
+ return -ENOMEM;
+
+ for (i = 0; i < txlen; i++) {
+ local_txbuf[i] = txbuf[i];
+ if (i > 0)
+ local_txbuf[i] |= 1 << 8;
+ }
+
+ xfer[0].len = 2 * txlen;
+ xfer[0].bits_per_word = 9;
+ xfer[0].tx_buf = local_txbuf;
+ spi_message_add_tail(&xfer[0], &msg);
+ }
+
+ if (rxlen) {
+ xfer[1].len = rxlen;
+ xfer[1].bits_per_word = 8;
+ xfer[1].rx_buf = rxbuf;
+ spi_message_add_tail(&xfer[1], &msg);
+ }
+
+ ret = spi_sync(lcd->spi, &msg);
+ if (ret < 0)
+ dev_err(&lcdev->dev, "Couldn't send SPI data\n");
+
+ if (txlen)
+ kfree(local_txbuf);
+
+ return ret;
+}
+
+static inline int hx8357_spi_write_array(struct lcd_device *lcdev,
+ u8 *value, u8 len)
+{
+ return hx8357_spi_write_then_read(lcdev, value, len, NULL, 0);
+}
+
+static inline int hx8357_spi_write_byte(struct lcd_device *lcdev,
+ u8 value)
+{
+ return hx8357_spi_write_then_read(lcdev, &value, 1, NULL, 0);
+}
+
+static int hx8357_enter_standby(struct lcd_device *lcdev)
+{
+ int ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_OFF);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10000, 12000);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_ENTER_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ return 0;
+}
+
+static int hx8357_exit_standby(struct lcd_device *lcdev)
+{
+ int ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hx8357_lcd_init(struct lcd_device *lcdev)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ int ret;
+
+ /*
+ * Set the interface selection pins to SPI mode, with three
+ * wires
+ */
+ gpio_set_value_cansleep(lcd->im_pins[0], 1);
+ gpio_set_value_cansleep(lcd->im_pins[1], 0);
+ gpio_set_value_cansleep(lcd->im_pins[2], 1);
+
+ /* Reset the screen */
+ gpio_set_value(lcd->reset, 1);
+ usleep_range(10000, 12000);
+ gpio_set_value(lcd->reset, 0);
+ usleep_range(10000, 12000);
+ gpio_set_value(lcd->reset, 1);
+ msleep(120);
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
+ ARRAY_SIZE(hx8357_seq_power));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_vcom,
+ ARRAY_SIZE(hx8357_seq_vcom));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_power_normal,
+ ARRAY_SIZE(hx8357_seq_power_normal));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_panel_driving,
+ ARRAY_SIZE(hx8357_seq_panel_driving));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_display_frame,
+ ARRAY_SIZE(hx8357_seq_display_frame));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_panel_related,
+ ARRAY_SIZE(hx8357_seq_panel_related));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_undefined1,
+ ARRAY_SIZE(hx8357_seq_undefined1));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_undefined2,
+ ARRAY_SIZE(hx8357_seq_undefined2));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_gamma,
+ ARRAY_SIZE(hx8357_seq_gamma));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_address_mode,
+ ARRAY_SIZE(hx8357_seq_address_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_pixel_format,
+ ARRAY_SIZE(hx8357_seq_pixel_format));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_column_address,
+ ARRAY_SIZE(hx8357_seq_column_address));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_page_address,
+ ARRAY_SIZE(hx8357_seq_page_address));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_rgb,
+ ARRAY_SIZE(hx8357_seq_rgb));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_display_mode,
+ ARRAY_SIZE(hx8357_seq_display_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 7000);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_WRITE_MEMORY_START);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
+
+static int hx8357_set_power(struct lcd_device *lcdev, int power)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ int ret = 0;
+
+ if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->state))
+ ret = hx8357_exit_standby(lcdev);
+ else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->state))
+ ret = hx8357_enter_standby(lcdev);
+
+ if (ret == 0)
+ lcd->state = power;
+ else
+ dev_warn(&lcdev->dev, "failed to set power mode %d\n", power);
+
+ return ret;
+}
+
+static int hx8357_get_power(struct lcd_device *lcdev)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+
+ return lcd->state;
+}
+
+static struct lcd_ops hx8357_ops = {
+ .set_power = hx8357_set_power,
+ .get_power = hx8357_get_power,
+};
+
+static int hx8357_probe(struct spi_device *spi)
+{
+ struct lcd_device *lcdev;
+ struct hx8357_data *lcd;
+ int i, ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd) {
+ dev_err(&spi->dev, "Couldn't allocate lcd internal structure!\n");
+ return -ENOMEM;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "SPI setup failed.\n");
+ return ret;
+ }
+
+ lcd->spi = spi;
+
+ lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
+ if (!gpio_is_valid(lcd->reset)) {
+ dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&spi->dev, lcd->reset,
+ GPIOF_OUT_INIT_HIGH,
+ "hx8357-reset");
+ if (ret) {
+ dev_err(&spi->dev,
+ "failed to request gpio %d: %d\n",
+ lcd->reset, ret);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
+ lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
+ "im-gpios", i);
+ if (lcd->im_pins[i] == -EPROBE_DEFER) {
+ dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
+ return -EPROBE_DEFER;
+ }
+ if (!gpio_is_valid(lcd->im_pins[i])) {
+ dev_err(&spi->dev, "Missing dt property: im-gpios\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
+ GPIOF_OUT_INIT_LOW, "im_pins");
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio %d: %d\n",
+ lcd->im_pins[i], ret);
+ return -EINVAL;
+ }
+ }
+
+ lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
+ if (IS_ERR(lcdev)) {
+ ret = PTR_ERR(lcdev);
+ return ret;
+ }
+ spi_set_drvdata(spi, lcdev);
+
+ ret = hx8357_lcd_init(lcdev);
+ if (ret) {
+ dev_err(&spi->dev, "Couldn't initialize panel\n");
+ goto init_error;
+ }
+
+ dev_info(&spi->dev, "Panel probed\n");
+
+ return 0;
+
+init_error:
+ lcd_device_unregister(lcdev);
+ return ret;
+}
+
+static int hx8357_remove(struct spi_device *spi)
+{
+ struct lcd_device *lcdev = spi_get_drvdata(spi);
+
+ lcd_device_unregister(lcdev);
+ return 0;
+}
+
+static const struct of_device_id hx8357_dt_ids[] = {
+ { .compatible = "himax,hx8357" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
+static struct spi_driver hx8357_driver = {
+ .probe = hx8357_probe,
+ .remove = hx8357_remove,
+ .driver = {
+ .name = "hx8357",
+ .of_match_table = of_match_ptr(hx8357_dt_ids),
+ },
+};
+
+module_spi_driver(hx8357_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Himax HX-8357 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 66cc313185ad..1235bf9defc4 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -45,7 +45,7 @@ static inline int ili9320_write_spi(struct ili9320 *ili,
/* second message is the data to transfer */
data[0] = spi->id | ILI9320_SPI_DATA | ILI9320_SPI_WRITE;
- data[1] = value >> 8;
+ data[1] = value >> 8;
data[2] = value;
return spi_sync(spi->dev, &spi->message);
@@ -56,11 +56,10 @@ int ili9320_write(struct ili9320 *ili, unsigned int reg, unsigned int value)
dev_dbg(ili->dev, "write: reg=%02x, val=%04x\n", reg, value);
return ili->write(ili, reg, value);
}
-
EXPORT_SYMBOL_GPL(ili9320_write);
int ili9320_write_regs(struct ili9320 *ili,
- struct ili9320_reg *values,
+ const struct ili9320_reg *values,
int nr_values)
{
int index;
@@ -74,7 +73,6 @@ int ili9320_write_regs(struct ili9320 *ili,
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_write_regs);
static void ili9320_reset(struct ili9320 *lcd)
@@ -260,7 +258,6 @@ int ili9320_probe_spi(struct spi_device *spi,
return ret;
}
-
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
int ili9320_remove(struct ili9320 *ili)
@@ -271,7 +268,6 @@ int ili9320_remove(struct ili9320 *ili)
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_remove);
#ifdef CONFIG_PM
@@ -296,20 +292,17 @@ int ili9320_suspend(struct ili9320 *lcd, pm_message_t state)
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_suspend);
int ili9320_resume(struct ili9320 *lcd)
{
dev_info(lcd->dev, "resuming from power state %d\n", lcd->power);
- if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
+ if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP)
ili9320_write(lcd, ILI9320_POWER1, 0x00);
- }
return ili9320_power(lcd, FB_BLANK_UNBLANK);
}
-
EXPORT_SYMBOL_GPL(ili9320_resume);
#endif
@@ -318,7 +311,6 @@ void ili9320_shutdown(struct ili9320 *lcd)
{
ili9320_power(lcd, FB_BLANK_POWERDOWN);
}
-
EXPORT_SYMBOL_GPL(ili9320_shutdown);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
diff --git a/drivers/video/backlight/ili9320.h b/drivers/video/backlight/ili9320.h
index e388eca7cac5..e0db738f7bb9 100644
--- a/drivers/video/backlight/ili9320.h
+++ b/drivers/video/backlight/ili9320.h
@@ -63,7 +63,7 @@ extern int ili9320_write(struct ili9320 *ili,
unsigned int reg, unsigned int value);
extern int ili9320_write_regs(struct ili9320 *ili,
- struct ili9320_reg *values,
+ const struct ili9320_reg *values,
int nr_values);
/* Device probe */
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 16f593b64427..fef6ce4fad71 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -48,7 +48,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
jornada_ssp_end();
- return (BL_MAX_BRIGHT - ret);
+ return BL_MAX_BRIGHT - ret;
}
static int jornada_bl_update_status(struct backlight_device *bd)
@@ -77,18 +77,23 @@ static int jornada_bl_update_status(struct backlight_device *bd)
goto out;
}
- /* at this point we expect that the mcu has accepted
- our command and is waiting for our new value
- please note that maximum brightness is 255,
- but due to physical layout it is equal to 0, so we simply
- invert the value (MAX VALUE - NEW VALUE). */
- if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
+ /*
+ * at this point we expect that the mcu has accepted
+ * our command and is waiting for our new value
+ * please note that maximum brightness is 255,
+ * but due to physical layout it is equal to 0, so we simply
+ * invert the value (MAX VALUE - NEW VALUE).
+ */
+ if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness)
+ != TXDUMMY) {
pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
- /* If infact we get an TXDUMMY as output we are happy and dont
- make any further comments about it */
+ /*
+ * If infact we get an TXDUMMY as output we are happy and dont
+ * make any further comments about it
+ */
out:
jornada_ssp_end();
@@ -121,9 +126,11 @@ static int jornada_bl_probe(struct platform_device *pdev)
bd->props.power = FB_BLANK_UNBLANK;
bd->props.brightness = BL_DEF_BRIGHT;
- /* note. make sure max brightness is set otherwise
- you will get seemingly non-related errors when
- trying to change brightness */
+ /*
+ * note. make sure max brightness is set otherwise
+ * you will get seemingly non-related errors when
+ * trying to change brightness
+ */
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index f5aa0a5961d6..fb6155771326 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -4,7 +4,7 @@
* Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- * Inspired by Marek Vasut work in l4f00242t03.c
+ * Inspired by Marek Vasut work in l4f00242t03.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +33,6 @@ struct l4f00242t03_priv {
struct regulator *core_reg;
};
-
static void l4f00242t03_reset(unsigned int gpio)
{
pr_debug("l4f00242t03_reset.\n");
@@ -50,7 +49,7 @@ static void l4f00242t03_reset(unsigned int gpio)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
dev_dbg(&spi->dev, "initializing LCD\n");
@@ -71,7 +70,7 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
@@ -169,7 +168,7 @@ static int l4f00242t03_probe(struct spi_device *spi)
return -ENOMEM;
}
- dev_set_drvdata(&spi->dev, priv);
+ spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
spi_setup(spi);
@@ -191,27 +190,24 @@ static int l4f00242t03_probe(struct spi_device *spi)
return ret;
}
- priv->io_reg = regulator_get(&spi->dev, "vdd");
+ priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
return PTR_ERR(priv->io_reg);
}
- priv->core_reg = regulator_get(&spi->dev, "vcore");
+ priv->core_reg = devm_regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
- ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err1;
+ return PTR_ERR(priv->core_reg);
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
- if (IS_ERR(priv->ld)) {
- ret = PTR_ERR(priv->ld);
- goto err2;
- }
+ if (IS_ERR(priv->ld))
+ return PTR_ERR(priv->ld);
/* Init the LCD */
l4f00242t03_lcd_init(spi);
@@ -221,33 +217,22 @@ static int l4f00242t03_probe(struct spi_device *spi)
dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
return 0;
-
-err2:
- regulator_put(priv->core_reg);
-err1:
- regulator_put(priv->io_reg);
-
- return ret;
}
static int l4f00242t03_remove(struct spi_device *spi)
{
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
-
- dev_set_drvdata(&spi->dev, NULL);
-
- regulator_put(priv->io_reg);
- regulator_put(priv->core_reg);
+ spi_set_drvdata(spi, NULL);
return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
{
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
if (priv)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index a5d0d024bb92..34fb6bd798c8 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -108,7 +108,7 @@ static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr,
static ssize_t lcd_store_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int rc = -ENXIO;
+ int rc;
struct lcd_device *ld = to_lcd_device(dev);
unsigned long power;
@@ -116,6 +116,8 @@ static ssize_t lcd_store_power(struct device *dev,
if (rc)
return rc;
+ rc = -ENXIO;
+
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
pr_debug("set power to %lu\n", power);
@@ -144,7 +146,7 @@ static ssize_t lcd_show_contrast(struct device *dev,
static ssize_t lcd_store_contrast(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int rc = -ENXIO;
+ int rc;
struct lcd_device *ld = to_lcd_device(dev);
unsigned long contrast;
@@ -152,6 +154,8 @@ static ssize_t lcd_store_contrast(struct device *dev,
if (rc)
return rc;
+ rc = -ENXIO;
+
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
pr_debug("set contrast to %lu\n", contrast);
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 1cb352418513..1b642f5f381a 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -9,29 +9,20 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#include "ld9040_gamma.h"
@@ -43,7 +34,6 @@
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 24
-#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
struct ld9040 {
struct device *dev;
@@ -78,7 +68,7 @@ static void ld9040_regulator_enable(struct ld9040 *lcd)
lcd->enabled = true;
}
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
out:
mutex_unlock(&lcd->lock);
}
@@ -474,8 +464,9 @@ static int ld9040_panel_send_sequence(struct ld9040 *lcd,
ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- udelay(wbuf[i+1]*1000);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -513,14 +504,9 @@ gamma_err:
static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
{
- int ret = 0;
-
- ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-
- return ret;
+ return _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
}
-
static int ld9040_ldi_init(struct ld9040 *lcd)
{
int ret, i;
@@ -539,7 +525,7 @@ static int ld9040_ldi_init(struct ld9040 *lcd)
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
/* workaround: minimum delay time for transferring CMD */
- udelay(300);
+ usleep_range(300, 310);
if (ret)
break;
}
@@ -549,11 +535,7 @@ static int ld9040_ldi_init(struct ld9040 *lcd)
static int ld9040_ldi_enable(struct ld9040 *lcd)
{
- int ret = 0;
-
- ret = ld9040_panel_send_sequence(lcd, seq_display_on);
-
- return ret;
+ return ld9040_panel_send_sequence(lcd, seq_display_on);
}
static int ld9040_ldi_disable(struct ld9040 *lcd)
@@ -566,25 +548,27 @@ static int ld9040_ldi_disable(struct ld9040 *lcd)
return ret;
}
+static int ld9040_power_is_on(int power)
+{
+ return power <= FB_BLANK_NORMAL;
+}
+
static int ld9040_power_on(struct ld9040 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ struct lcd_platform_data *pd;
+
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
/* lcd power on */
ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = ld9040_ldi_init(lcd);
@@ -604,14 +588,10 @@ static int ld9040_power_on(struct ld9040 *lcd)
static int ld9040_power_off(struct ld9040 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
ret = ld9040_ldi_disable(lcd);
if (ret) {
@@ -619,7 +599,7 @@ static int ld9040_power_off(struct ld9040 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
/* lcd power off */
ld9040_regulator_disable(lcd);
@@ -631,9 +611,9 @@ static int ld9040_power(struct ld9040 *lcd, int power)
{
int ret = 0;
- if (power_is_on(power) && !power_is_on(lcd->power))
+ if (ld9040_power_is_on(power) && !ld9040_power_is_on(lcd->power))
ret = ld9040_power_on(lcd);
- else if (!power_is_on(power) && power_is_on(lcd->power))
+ else if (!ld9040_power_is_on(power) && ld9040_power_is_on(lcd->power))
ret = ld9040_power_off(lcd);
if (!ret)
@@ -698,7 +678,6 @@ static const struct backlight_ops ld9040_backlight_ops = {
.update_status = ld9040_set_brightness,
};
-
static int ld9040_probe(struct spi_device *spi)
{
int ret = 0;
@@ -726,22 +705,20 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
}
mutex_init(&lcd->lock);
- ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_regulator;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -772,30 +749,28 @@ static int ld9040_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
ld9040_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
return 0;
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_regulator:
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
return ret;
}
static int ld9040_remove(struct spi_device *spi)
{
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
return 0;
}
@@ -803,8 +778,7 @@ static int ld9040_remove(struct spi_device *spi)
#if defined(CONFIG_PM)
static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
@@ -812,21 +786,16 @@ static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return ld9040_power(lcd, FB_BLANK_POWERDOWN);
}
static int ld9040_resume(struct spi_device *spi)
{
- int ret = 0;
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
lcd->power = FB_BLANK_POWERDOWN;
- ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
-
- return ret;
+ return ld9040_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define ld9040_suspend NULL
@@ -836,7 +805,7 @@ static int ld9040_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt. */
static void ld9040_shutdown(struct spi_device *spi)
{
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/lm3630_bl.c b/drivers/video/backlight/lm3630_bl.c
index 0207bc0a4407..76a62e978fc3 100644
--- a/drivers/video/backlight/lm3630_bl.c
+++ b/drivers/video/backlight/lm3630_bl.c
@@ -37,7 +37,7 @@ enum lm3630_leds {
BLED_2
};
-static const char *bled_name[] = {
+static const char * const bled_name[] = {
[BLED_ALL] = "lm3630_bled", /*Bank1 controls all string */
[BLED_1] = "lm3630_bled1", /*Bank1 controls bled1 */
[BLED_2] = "lm3630_bled2", /*Bank1 or 2 controls bled2 */
@@ -320,7 +320,7 @@ static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
backlight_device_register(name, pchip->dev, pchip,
&lm3630_bank_a_ops, &props);
if (IS_ERR(pchip->bled1))
- return -EIO;
+ return PTR_ERR(pchip->bled1);
break;
case BLED_2:
props.brightness = pdata->init_brt_led2;
@@ -329,7 +329,7 @@ static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
backlight_device_register(name, pchip->dev, pchip,
&lm3630_bank_b_ops, &props);
if (IS_ERR(pchip->bled2))
- return -EIO;
+ return PTR_ERR(pchip->bled2);
break;
}
return 0;
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index b0e1e8ba4d9f..053964da8dd3 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -214,7 +214,7 @@ out_input:
}
-static DEVICE_ATTR(bled_mode, 0666, NULL, lm3639_bled_mode_store);
+static DEVICE_ATTR(bled_mode, S_IWUSR, NULL, lm3639_bled_mode_store);
/* torch */
static void lm3639_torch_brightness_set(struct led_classdev *cdev,
@@ -350,14 +350,13 @@ static int lm3639_probe(struct i2c_client *client,
&lm3639_bled_ops, &props);
if (IS_ERR(pchip->bled)) {
dev_err(&client->dev, "fail : backlight register\n");
- ret = -EIO;
+ ret = PTR_ERR(pchip->bled);
goto err_out;
}
ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode);
if (ret < 0) {
dev_err(&client->dev, "failed : add sysfs entries\n");
- ret = -EIO;
goto err_bled_mode;
}
@@ -369,7 +368,6 @@ static int lm3639_probe(struct i2c_client *client,
&client->dev, &pchip->cdev_flash);
if (ret < 0) {
dev_err(&client->dev, "fail : flash register\n");
- ret = -EIO;
goto err_flash;
}
@@ -381,7 +379,6 @@ static int lm3639_probe(struct i2c_client *client,
&client->dev, &pchip->cdev_torch);
if (ret < 0) {
dev_err(&client->dev, "fail : torch register\n");
- ret = -EIO;
goto err_torch;
}
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index b29c7071c9db..4eec47261cd3 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -31,7 +31,7 @@ struct lms283gf05_seq {
};
/* Magic sequences supplied by manufacturer, for details refer to datasheet */
-static struct lms283gf05_seq disp_initseq[] = {
+static const struct lms283gf05_seq disp_initseq[] = {
/* REG, VALUE, DELAY */
{ 0x07, 0x0000, 0 },
{ 0x13, 0x0000, 10 },
@@ -78,7 +78,7 @@ static struct lms283gf05_seq disp_initseq[] = {
{ 0x22, 0x0000, 0 }
};
-static struct lms283gf05_seq disp_pdwnseq[] = {
+static const struct lms283gf05_seq disp_pdwnseq[] = {
{ 0x07, 0x0016, 30 },
{ 0x07, 0x0004, 0 },
@@ -104,7 +104,7 @@ static void lms283gf05_reset(unsigned long gpio, bool inverted)
}
static void lms283gf05_toggle(struct spi_device *spi,
- struct lms283gf05_seq *seq, int sz)
+ const struct lms283gf05_seq *seq, int sz)
{
char buf[3];
int i;
@@ -158,13 +158,10 @@ static int lms283gf05_probe(struct spi_device *spi)
int ret = 0;
if (pdata != NULL) {
- ret = devm_gpio_request(&spi->dev, pdata->reset_gpio,
- "LMS285GF05 RESET");
- if (ret)
- return ret;
-
- ret = gpio_direction_output(pdata->reset_gpio,
- !pdata->reset_inverted);
+ ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
+ GPIOF_DIR_OUT | (!pdata->reset_inverted ?
+ GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
+ "LMS285GF05 RESET");
if (ret)
return ret;
}
@@ -183,7 +180,7 @@ static int lms283gf05_probe(struct spi_device *spi)
st->spi = spi;
st->ld = ld;
- dev_set_drvdata(&spi->dev, st);
+ spi_set_drvdata(spi, st);
/* kick in the LCD */
if (pdata)
@@ -195,7 +192,7 @@ static int lms283gf05_probe(struct spi_device *spi)
static int lms283gf05_remove(struct spi_device *spi)
{
- struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
+ struct lms283gf05_state *st = spi_get_drvdata(spi);
lcd_device_unregister(st->ld);
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
new file mode 100644
index 000000000000..b43882abefaf
--- /dev/null
+++ b/drivers/video/backlight/lms501kf03.c
@@ -0,0 +1,441 @@
+/*
+ * lms501kf03 TFT LCD panel driver.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#define COMMAND_ONLY 0x00
+#define DATA_ONLY 0x01
+
+struct lms501kf03 {
+ struct device *dev;
+ struct spi_device *spi;
+ unsigned int power;
+ struct lcd_device *ld;
+ struct lcd_platform_data *lcd_pd;
+};
+
+static const unsigned char seq_password[] = {
+ 0xb9, 0xff, 0x83, 0x69,
+};
+
+static const unsigned char seq_power[] = {
+ 0xb1, 0x01, 0x00, 0x34, 0x06, 0x00, 0x14, 0x14, 0x20, 0x28,
+ 0x12, 0x12, 0x17, 0x0a, 0x01, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+};
+
+static const unsigned char seq_display[] = {
+ 0xb2, 0x00, 0x2b, 0x03, 0x03, 0x70, 0x00, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x03, 0x00, 0x01,
+};
+
+static const unsigned char seq_rgb_if[] = {
+ 0xb3, 0x09,
+};
+
+static const unsigned char seq_display_inv[] = {
+ 0xb4, 0x01, 0x08, 0x77, 0x0e, 0x06,
+};
+
+static const unsigned char seq_vcom[] = {
+ 0xb6, 0x4c, 0x2e,
+};
+
+static const unsigned char seq_gate[] = {
+ 0xd5, 0x00, 0x05, 0x03, 0x29, 0x01, 0x07, 0x17, 0x68, 0x13,
+ 0x37, 0x20, 0x31, 0x8a, 0x46, 0x9b, 0x57, 0x13, 0x02, 0x75,
+ 0xb9, 0x64, 0xa8, 0x07, 0x0f, 0x04, 0x07,
+};
+
+static const unsigned char seq_panel[] = {
+ 0xcc, 0x02,
+};
+
+static const unsigned char seq_col_mod[] = {
+ 0x3a, 0x77,
+};
+
+static const unsigned char seq_w_gamma[] = {
+ 0xe0, 0x00, 0x04, 0x09, 0x0f, 0x1f, 0x3f, 0x1f, 0x2f, 0x0a,
+ 0x0f, 0x10, 0x16, 0x18, 0x16, 0x17, 0x0d, 0x15, 0x00, 0x04,
+ 0x09, 0x0f, 0x38, 0x3f, 0x20, 0x39, 0x0a, 0x0f, 0x10, 0x16,
+ 0x18, 0x16, 0x17, 0x0d, 0x15,
+};
+
+static const unsigned char seq_rgb_gamma[] = {
+ 0xc1, 0x01, 0x03, 0x07, 0x0f, 0x1a, 0x22, 0x2c, 0x33, 0x3c,
+ 0x46, 0x4f, 0x58, 0x60, 0x69, 0x71, 0x79, 0x82, 0x89, 0x92,
+ 0x9a, 0xa1, 0xa9, 0xb1, 0xb9, 0xc1, 0xc9, 0xcf, 0xd6, 0xde,
+ 0xe5, 0xec, 0xf3, 0xf9, 0xff, 0xdd, 0x39, 0x07, 0x1c, 0xcb,
+ 0xab, 0x5f, 0x49, 0x80, 0x03, 0x07, 0x0f, 0x19, 0x20, 0x2a,
+ 0x31, 0x39, 0x42, 0x4b, 0x53, 0x5b, 0x63, 0x6b, 0x73, 0x7b,
+ 0x83, 0x8a, 0x92, 0x9b, 0xa2, 0xaa, 0xb2, 0xba, 0xc2, 0xca,
+ 0xd0, 0xd8, 0xe1, 0xe8, 0xf0, 0xf8, 0xff, 0xf7, 0xd8, 0xbe,
+ 0xa7, 0x39, 0x40, 0x85, 0x8c, 0xc0, 0x04, 0x07, 0x0c, 0x17,
+ 0x1c, 0x23, 0x2b, 0x34, 0x3b, 0x43, 0x4c, 0x54, 0x5b, 0x63,
+ 0x6a, 0x73, 0x7a, 0x82, 0x8a, 0x91, 0x98, 0xa1, 0xa8, 0xb0,
+ 0xb7, 0xc1, 0xc9, 0xcf, 0xd9, 0xe3, 0xea, 0xf4, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char seq_up_dn[] = {
+ 0x36, 0x10,
+};
+
+static const unsigned char seq_sleep_in[] = {
+ 0x10,
+};
+
+static const unsigned char seq_sleep_out[] = {
+ 0x11,
+};
+
+static const unsigned char seq_display_on[] = {
+ 0x29,
+};
+
+static const unsigned char seq_display_off[] = {
+ 0x10,
+};
+
+static int lms501kf03_spi_write_byte(struct lms501kf03 *lcd, int addr, int data)
+{
+ u16 buf[1];
+ struct spi_message msg;
+
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr << 8) | data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int lms501kf03_spi_write(struct lms501kf03 *lcd, unsigned char address,
+ unsigned char command)
+{
+ return lms501kf03_spi_write_byte(lcd, address, command);
+}
+
+static int lms501kf03_panel_send_sequence(struct lms501kf03 *lcd,
+ const unsigned char *wbuf,
+ unsigned int len)
+{
+ int ret = 0, i = 0;
+
+ while (i < len) {
+ if (i == 0)
+ ret = lms501kf03_spi_write(lcd, COMMAND_ONLY, wbuf[i]);
+ else
+ ret = lms501kf03_spi_write(lcd, DATA_ONLY, wbuf[i]);
+ if (ret)
+ break;
+ i += 1;
+ }
+
+ return ret;
+}
+
+static int lms501kf03_ldi_init(struct lms501kf03 *lcd)
+{
+ int ret, i;
+ static const unsigned char *init_seq[] = {
+ seq_password,
+ seq_power,
+ seq_display,
+ seq_rgb_if,
+ seq_display_inv,
+ seq_vcom,
+ seq_gate,
+ seq_panel,
+ seq_col_mod,
+ seq_w_gamma,
+ seq_rgb_gamma,
+ seq_sleep_out,
+ };
+
+ static const unsigned int size_seq[] = {
+ ARRAY_SIZE(seq_password),
+ ARRAY_SIZE(seq_power),
+ ARRAY_SIZE(seq_display),
+ ARRAY_SIZE(seq_rgb_if),
+ ARRAY_SIZE(seq_display_inv),
+ ARRAY_SIZE(seq_vcom),
+ ARRAY_SIZE(seq_gate),
+ ARRAY_SIZE(seq_panel),
+ ARRAY_SIZE(seq_col_mod),
+ ARRAY_SIZE(seq_w_gamma),
+ ARRAY_SIZE(seq_rgb_gamma),
+ ARRAY_SIZE(seq_sleep_out),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = lms501kf03_panel_send_sequence(lcd, init_seq[i],
+ size_seq[i]);
+ if (ret)
+ break;
+ }
+ /*
+ * According to the datasheet, 120ms delay time is required.
+ * After sleep out sequence, command is blocked for 120ms.
+ * Thus, LDI should wait for 120ms.
+ */
+ msleep(120);
+
+ return ret;
+}
+
+static int lms501kf03_ldi_enable(struct lms501kf03 *lcd)
+{
+ return lms501kf03_panel_send_sequence(lcd, seq_display_on,
+ ARRAY_SIZE(seq_display_on));
+}
+
+static int lms501kf03_ldi_disable(struct lms501kf03 *lcd)
+{
+ return lms501kf03_panel_send_sequence(lcd, seq_display_off,
+ ARRAY_SIZE(seq_display_off));
+}
+
+static int lms501kf03_power_is_on(int power)
+{
+ return (power) <= FB_BLANK_NORMAL;
+}
+
+static int lms501kf03_power_on(struct lms501kf03 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd;
+
+ pd = lcd->lcd_pd;
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EINVAL;
+ } else {
+ pd->power_on(lcd->ld, 1);
+ msleep(pd->power_on_delay);
+ }
+
+ if (!pd->reset) {
+ dev_err(lcd->dev, "reset is NULL.\n");
+ return -EINVAL;
+ } else {
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+ }
+
+ ret = lms501kf03_ldi_init(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to initialize ldi.\n");
+ return ret;
+ }
+
+ ret = lms501kf03_ldi_enable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to enable ldi.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lms501kf03_power_off(struct lms501kf03 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd;
+
+ pd = lcd->lcd_pd;
+
+ ret = lms501kf03_ldi_disable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "lcd setting failed.\n");
+ return -EIO;
+ }
+
+ msleep(pd->power_off_delay);
+
+ pd->power_on(lcd->ld, 0);
+
+ return 0;
+}
+
+static int lms501kf03_power(struct lms501kf03 *lcd, int power)
+{
+ int ret = 0;
+
+ if (lms501kf03_power_is_on(power) &&
+ !lms501kf03_power_is_on(lcd->power))
+ ret = lms501kf03_power_on(lcd);
+ else if (!lms501kf03_power_is_on(power) &&
+ lms501kf03_power_is_on(lcd->power))
+ ret = lms501kf03_power_off(lcd);
+
+ if (!ret)
+ lcd->power = power;
+
+ return ret;
+}
+
+static int lms501kf03_get_power(struct lcd_device *ld)
+{
+ struct lms501kf03 *lcd = lcd_get_data(ld);
+
+ return lcd->power;
+}
+
+static int lms501kf03_set_power(struct lcd_device *ld, int power)
+{
+ struct lms501kf03 *lcd = lcd_get_data(ld);
+
+ if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
+ power != FB_BLANK_NORMAL) {
+ dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
+ return -EINVAL;
+ }
+
+ return lms501kf03_power(lcd, power);
+}
+
+static struct lcd_ops lms501kf03_lcd_ops = {
+ .get_power = lms501kf03_get_power,
+ .set_power = lms501kf03_set_power,
+};
+
+static int lms501kf03_probe(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = NULL;
+ struct lcd_device *ld = NULL;
+ int ret = 0;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct lms501kf03), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ /* lms501kf03 lcd panel uses 3-wire 9-bit SPI Mode. */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ lcd->spi = spi;
+ lcd->dev = &spi->dev;
+
+ lcd->lcd_pd = spi->dev.platform_data;
+ if (!lcd->lcd_pd) {
+ dev_err(&spi->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ ld = lcd_device_register("lms501kf03", &spi->dev, lcd,
+ &lms501kf03_lcd_ops);
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
+ lcd->ld = ld;
+
+ if (!lcd->lcd_pd->lcd_enabled) {
+ /*
+ * if lcd panel was off from bootloader then
+ * current lcd status is powerdown and then
+ * it enables lcd panel.
+ */
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ lms501kf03_power(lcd, FB_BLANK_UNBLANK);
+ } else {
+ lcd->power = FB_BLANK_UNBLANK;
+ }
+
+ spi_set_drvdata(spi, lcd);
+
+ dev_info(&spi->dev, "lms501kf03 panel driver has been probed.\n");
+
+ return 0;
+}
+
+static int lms501kf03_remove(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+ lcd_device_unregister(lcd->ld);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+
+static int lms501kf03_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
+
+ /*
+ * when lcd panel is suspend, lcd panel becomes off
+ * regardless of status.
+ */
+ return lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static int lms501kf03_resume(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ return lms501kf03_power(lcd, FB_BLANK_UNBLANK);
+}
+#else
+#define lms501kf03_suspend NULL
+#define lms501kf03_resume NULL
+#endif
+
+static void lms501kf03_shutdown(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static struct spi_driver lms501kf03_driver = {
+ .driver = {
+ .name = "lms501kf03",
+ .owner = THIS_MODULE,
+ },
+ .probe = lms501kf03_probe,
+ .remove = lms501kf03_remove,
+ .shutdown = lms501kf03_shutdown,
+ .suspend = lms501kf03_suspend,
+ .resume = lms501kf03_resume,
+};
+
+module_spi_driver(lms501kf03_driver);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("lms501kf03 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 3a6d5419e3e3..146fea8aa431 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -107,7 +107,6 @@ void locomolcd_power(int on)
}
EXPORT_SYMBOL(locomolcd_power);
-
static int current_intensity;
static int locomolcd_set_intensity(struct backlight_device *bd)
@@ -122,13 +121,25 @@ static int locomolcd_set_intensity(struct backlight_device *bd)
intensity = 0;
switch (intensity) {
- /* AC and non-AC are handled differently, but produce same results in sharp code? */
- case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break;
- case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break;
- case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break;
- case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break;
- case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break;
-
+ /*
+ * AC and non-AC are handled differently,
+ * but produce same results in sharp code?
+ */
+ case 0:
+ locomo_frontlight_set(locomolcd_dev, 0, 0, 161);
+ break;
+ case 1:
+ locomo_frontlight_set(locomolcd_dev, 117, 0, 161);
+ break;
+ case 2:
+ locomo_frontlight_set(locomolcd_dev, 163, 0, 148);
+ break;
+ case 3:
+ locomo_frontlight_set(locomolcd_dev, 194, 0, 161);
+ break;
+ case 4:
+ locomo_frontlight_set(locomolcd_dev, 194, 1, 161);
+ break;
default:
return -ENODEV;
}
@@ -175,9 +186,11 @@ static int locomolcd_probe(struct locomo_dev *ldev)
locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
- /* the poodle_lcd_power function is called for the first time
+ /*
+ * the poodle_lcd_power function is called for the first time
* from fs_initcall, which is before locomo is activated.
- * We need to recall poodle_lcd_power here*/
+ * We need to recall poodle_lcd_power here
+ */
if (machine_is_poodle())
locomolcd_power(1);
@@ -190,8 +203,8 @@ static int locomolcd_probe(struct locomo_dev *ldev)
&ldev->dev, NULL,
&locomobl_data, &props);
- if (IS_ERR (locomolcd_bl_device))
- return PTR_ERR (locomolcd_bl_device);
+ if (IS_ERR(locomolcd_bl_device))
+ return PTR_ERR(locomolcd_bl_device);
/* Set up frontlight so that screen is readable */
locomolcd_bl_device->props.brightness = 2;
@@ -226,7 +239,6 @@ static struct locomo_driver poodle_lcd_driver = {
.resume = locomolcd_resume,
};
-
static int __init locomolcd_init(void)
{
return locomo_driver_register(&poodle_lcd_driver);
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index fd985e0681e9..7ae9ae6f4655 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -15,55 +15,78 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/platform_data/lp855x.h>
-
-/* Registers */
-#define BRIGHTNESS_CTRL 0x00
-#define DEVICE_CTRL 0x01
-#define EEPROM_START 0xA0
-#define EEPROM_END 0xA7
-#define EPROM_START 0xA0
-#define EPROM_END 0xAF
+#include <linux/pwm.h>
+
+/* LP8550/1/2/3/6 Registers */
+#define LP855X_BRIGHTNESS_CTRL 0x00
+#define LP855X_DEVICE_CTRL 0x01
+#define LP855X_EEPROM_START 0xA0
+#define LP855X_EEPROM_END 0xA7
+#define LP8556_EPROM_START 0xA0
+#define LP8556_EPROM_END 0xAF
+
+/* LP8557 Registers */
+#define LP8557_BL_CMD 0x00
+#define LP8557_BL_MASK 0x01
+#define LP8557_BL_ON 0x01
+#define LP8557_BL_OFF 0x00
+#define LP8557_BRIGHTNESS_CTRL 0x04
+#define LP8557_CONFIG 0x10
+#define LP8557_EPROM_START 0x10
+#define LP8557_EPROM_END 0x1E
#define BUF_SIZE 20
#define DEFAULT_BL_NAME "lcd-backlight"
#define MAX_BRIGHTNESS 255
+struct lp855x;
+
+/*
+ * struct lp855x_device_config
+ * @pre_init_device: init device function call before updating the brightness
+ * @reg_brightness: register address for brigthenss control
+ * @reg_devicectrl: register address for device control
+ * @post_init_device: late init device function call
+ */
+struct lp855x_device_config {
+ int (*pre_init_device)(struct lp855x *);
+ u8 reg_brightness;
+ u8 reg_devicectrl;
+ int (*post_init_device)(struct lp855x *);
+};
+
struct lp855x {
const char *chipname;
enum lp855x_chip_id chip_id;
+ struct lp855x_device_config *cfg;
struct i2c_client *client;
struct backlight_device *bl;
struct device *dev;
- struct mutex xfer_lock;
struct lp855x_platform_data *pdata;
+ struct pwm_device *pwm;
};
-static int lp855x_read_byte(struct lp855x *lp, u8 reg, u8 *data)
+static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(lp->client, reg, data);
+}
+
+static int lp855x_update_bit(struct lp855x *lp, u8 reg, u8 mask, u8 data)
{
int ret;
+ u8 tmp;
- mutex_lock(&lp->xfer_lock);
ret = i2c_smbus_read_byte_data(lp->client, reg);
if (ret < 0) {
- mutex_unlock(&lp->xfer_lock);
dev_err(lp->dev, "failed to read 0x%.2x\n", reg);
return ret;
}
- mutex_unlock(&lp->xfer_lock);
- *data = (u8)ret;
- return 0;
-}
+ tmp = (u8)ret;
+ tmp &= ~mask;
+ tmp |= data & mask;
-static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
-{
- int ret;
-
- mutex_lock(&lp->xfer_lock);
- ret = i2c_smbus_write_byte_data(lp->client, reg, data);
- mutex_unlock(&lp->xfer_lock);
-
- return ret;
+ return lp855x_write_byte(lp, reg, tmp);
}
static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
@@ -75,12 +98,16 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
case LP8551:
case LP8552:
case LP8553:
- start = EEPROM_START;
- end = EEPROM_END;
+ start = LP855X_EEPROM_START;
+ end = LP855X_EEPROM_END;
break;
case LP8556:
- start = EPROM_START;
- end = EPROM_END;
+ start = LP8556_EPROM_START;
+ end = LP8556_EPROM_END;
+ break;
+ case LP8557:
+ start = LP8557_EPROM_START;
+ end = LP8557_EPROM_END;
break;
default:
return false;
@@ -89,21 +116,76 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
return (addr >= start && addr <= end);
}
-static int lp855x_init_registers(struct lp855x *lp)
+static int lp8557_bl_off(struct lp855x *lp)
+{
+ /* BL_ON = 0 before updating EPROM settings */
+ return lp855x_update_bit(lp, LP8557_BL_CMD, LP8557_BL_MASK,
+ LP8557_BL_OFF);
+}
+
+static int lp8557_bl_on(struct lp855x *lp)
+{
+ /* BL_ON = 1 after updating EPROM settings */
+ return lp855x_update_bit(lp, LP8557_BL_CMD, LP8557_BL_MASK,
+ LP8557_BL_ON);
+}
+
+static struct lp855x_device_config lp855x_dev_cfg = {
+ .reg_brightness = LP855X_BRIGHTNESS_CTRL,
+ .reg_devicectrl = LP855X_DEVICE_CTRL,
+};
+
+static struct lp855x_device_config lp8557_dev_cfg = {
+ .reg_brightness = LP8557_BRIGHTNESS_CTRL,
+ .reg_devicectrl = LP8557_CONFIG,
+ .pre_init_device = lp8557_bl_off,
+ .post_init_device = lp8557_bl_on,
+};
+
+/*
+ * Device specific configuration flow
+ *
+ * a) pre_init_device(optional)
+ * b) update the brightness register
+ * c) update device control register
+ * d) update ROM area(optional)
+ * e) post_init_device(optional)
+ *
+ */
+static int lp855x_configure(struct lp855x *lp)
{
u8 val, addr;
int i, ret;
struct lp855x_platform_data *pd = lp->pdata;
+ switch (lp->chip_id) {
+ case LP8550 ... LP8556:
+ lp->cfg = &lp855x_dev_cfg;
+ break;
+ case LP8557:
+ lp->cfg = &lp8557_dev_cfg;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (lp->cfg->pre_init_device) {
+ ret = lp->cfg->pre_init_device(lp);
+ if (ret) {
+ dev_err(lp->dev, "pre init device err: %d\n", ret);
+ goto err;
+ }
+ }
+
val = pd->initial_brightness;
- ret = lp855x_write_byte(lp, BRIGHTNESS_CTRL, val);
+ ret = lp855x_write_byte(lp, lp->cfg->reg_brightness, val);
if (ret)
- return ret;
+ goto err;
val = pd->device_control;
- ret = lp855x_write_byte(lp, DEVICE_CTRL, val);
+ ret = lp855x_write_byte(lp, lp->cfg->reg_devicectrl, val);
if (ret)
- return ret;
+ goto err;
if (pd->load_new_rom_data && pd->size_program) {
for (i = 0; i < pd->size_program; i++) {
@@ -114,13 +196,46 @@ static int lp855x_init_registers(struct lp855x *lp)
ret = lp855x_write_byte(lp, addr, val);
if (ret)
- return ret;
+ goto err;
}
}
+ if (lp->cfg->post_init_device) {
+ ret = lp->cfg->post_init_device(lp);
+ if (ret) {
+ dev_err(lp->dev, "post init device err: %d\n", ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
return ret;
}
+static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+{
+ unsigned int period = lp->pdata->period_ns;
+ unsigned int duty = br * period / max_br;
+ struct pwm_device *pwm;
+
+ /* request pwm device with the consumer name */
+ if (!lp->pwm) {
+ pwm = devm_pwm_get(lp->dev, lp->chipname);
+ if (IS_ERR(pwm))
+ return;
+
+ lp->pwm = pwm;
+ }
+
+ pwm_config(lp->pwm, duty, period);
+ if (duty)
+ pwm_enable(lp->pwm);
+ else
+ pwm_disable(lp->pwm);
+}
+
static int lp855x_bl_update_status(struct backlight_device *bl)
{
struct lp855x *lp = bl_get_data(bl);
@@ -130,16 +245,14 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
bl->props.brightness = 0;
if (mode == PWM_BASED) {
- struct lp855x_pwm_data *pd = &lp->pdata->pwm_data;
int br = bl->props.brightness;
int max_br = bl->props.max_brightness;
- if (pd->pwm_set_intensity)
- pd->pwm_set_intensity(br, max_br);
+ lp855x_pwm_ctrl(lp, br, max_br);
} else if (mode == REGISTER_BASED) {
u8 val = bl->props.brightness;
- lp855x_write_byte(lp, BRIGHTNESS_CTRL, val);
+ lp855x_write_byte(lp, lp->cfg->reg_brightness, val);
}
return 0;
@@ -147,23 +260,6 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
static int lp855x_bl_get_brightness(struct backlight_device *bl)
{
- struct lp855x *lp = bl_get_data(bl);
- enum lp855x_brightness_ctrl_mode mode = lp->pdata->mode;
-
- if (mode == PWM_BASED) {
- struct lp855x_pwm_data *pd = &lp->pdata->pwm_data;
- int max_br = bl->props.max_brightness;
-
- if (pd->pwm_get_intensity)
- bl->props.brightness = pd->pwm_get_intensity(max_br);
-
- } else if (mode == REGISTER_BASED) {
- u8 val = 0;
-
- lp855x_read_byte(lp, BRIGHTNESS_CTRL, &val);
- bl->props.brightness = val;
- }
-
return bl->props.brightness;
}
@@ -266,13 +362,10 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->chip_id = id->driver_data;
i2c_set_clientdata(cl, lp);
- mutex_init(&lp->xfer_lock);
-
- ret = lp855x_init_registers(lp);
+ ret = lp855x_configure(lp);
if (ret) {
- dev_err(lp->dev, "i2c communication err: %d", ret);
- if (mode == REGISTER_BASED)
- goto err_dev;
+ dev_err(lp->dev, "device config err: %d", ret);
+ goto err_dev;
}
ret = lp855x_backlight_register(lp);
@@ -315,6 +408,7 @@ static const struct i2c_device_id lp855x_ids[] = {
{"lp8552", LP8552},
{"lp8553", LP8553},
{"lp8556", LP8556},
+ {"lp8557", LP8557},
{ }
};
MODULE_DEVICE_TABLE(i2c, lp855x_ids);
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 226d813edf01..c0b4b8f2de98 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -252,7 +252,7 @@ static int ltv350qv_probe(struct spi_device *spi)
if (ret)
goto out_unregister;
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
return 0;
@@ -263,7 +263,7 @@ out_unregister:
static int ltv350qv_remove(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
@@ -274,14 +274,14 @@ static int ltv350qv_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int ltv350qv_suspend(struct spi_device *spi, pm_message_t state)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
return ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
}
static int ltv350qv_resume(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
return ltv350qv_power(lcd, FB_BLANK_UNBLANK);
}
@@ -293,7 +293,7 @@ static int ltv350qv_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt */
static void ltv350qv_shutdown(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index c6bec7aab87b..2c9bce050aa9 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -120,15 +120,13 @@ static int max8925_backlight_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
dev_err(&pdev->dev, "No REG resource for mode control!\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_mode_cntl = res->start;
res = platform_get_resource(pdev, IORESOURCE_REG, 1);
if (!res) {
dev_err(&pdev->dev, "No REG resource for control!\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_cntl = res->start;
@@ -142,8 +140,7 @@ static int max8925_backlight_probe(struct platform_device *pdev)
&max8925_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out;
+ return PTR_ERR(bl);
}
bl->props.brightness = MAX_BRIGHTNESS;
@@ -166,8 +163,6 @@ static int max8925_backlight_probe(struct platform_device *pdev)
return 0;
out_brt:
backlight_device_unregister(bl);
-out:
- devm_kfree(&pdev->dev, data);
return ret;
}
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 9a046a4c98f5..627110163067 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -42,12 +42,12 @@ struct omap_backlight {
struct omap_backlight_config *pdata;
};
-static void inline omapbl_send_intensity(int intensity)
+static inline void omapbl_send_intensity(int intensity)
{
omap_writeb(intensity, OMAP_PWL_ENABLE);
}
-static void inline omapbl_send_enable(int enable)
+static inline void omapbl_send_enable(int enable)
{
omap_writeb(enable, OMAP_PWL_CLK_ENABLE);
}
@@ -77,7 +77,7 @@ static void omapbl_blank(struct omap_backlight *bl, int mode)
static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
{
struct backlight_device *dev = platform_get_drvdata(pdev);
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, FB_BLANK_POWERDOWN);
return 0;
@@ -86,7 +86,7 @@ static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
static int omapbl_resume(struct platform_device *pdev)
{
struct backlight_device *dev = platform_get_drvdata(pdev);
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, bl->powermode);
return 0;
@@ -98,7 +98,7 @@ static int omapbl_resume(struct platform_device *pdev)
static int omapbl_set_power(struct backlight_device *dev, int state)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, state);
bl->powermode = state;
@@ -108,7 +108,7 @@ static int omapbl_set_power(struct backlight_device *dev, int state)
static int omapbl_update_status(struct backlight_device *dev)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
if (bl->current_intensity != dev->props.brightness) {
if (bl->powermode == FB_BLANK_UNBLANK)
@@ -124,7 +124,7 @@ static int omapbl_update_status(struct backlight_device *dev)
static int omapbl_get_intensity(struct backlight_device *dev)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
return bl->current_intensity;
}
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index 469cf0f109d2..fdbb6ee5027c 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -14,6 +14,7 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <linux/cs5535.h>
static struct cs5535_mfgpt_timer *pwm_timer;
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 4ec30748b447..633b0a22fd64 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -71,8 +71,7 @@ static int pandora_backlight_update_status(struct backlight_device *bl)
* set PWM duty cycle to max. TPS61161 seems to use this
* to calibrate it's PWM sensitivity when it starts.
*/
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, MAX_VALUE,
- TWL_PWM0_OFF);
+ twl_i2c_write_u8(TWL_MODULE_PWM, MAX_VALUE, TWL_PWM0_OFF);
/* first enable clock, then PWM0 out */
twl_i2c_read_u8(TWL4030_MODULE_INTBR, &r, TWL_INTBR_GPBR1);
@@ -90,8 +89,7 @@ static int pandora_backlight_update_status(struct backlight_device *bl)
usleep_range(2000, 10000);
}
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, MIN_VALUE + brightness,
- TWL_PWM0_OFF);
+ twl_i2c_write_u8(TWL_MODULE_PWM, MIN_VALUE + brightness, TWL_PWM0_OFF);
done:
if (brightness != 0)
@@ -132,7 +130,7 @@ static int pandora_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
/* 64 cycle period, ON position 0 */
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, 0x80, TWL_PWM0_ON);
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0x80, TWL_PWM0_ON);
bl->props.state |= PANDORABL_WAS_OFF;
bl->props.brightness = MAX_USER_VALUE;
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 0087396007e4..e87c7a3394f3 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -52,7 +52,7 @@ int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit)
pcf_bl->brightness_limit = limit & 0x3f;
backlight_update_status(pcf_bl->bl);
- return 0;
+ return 0;
}
static int pcf50633_bl_update_status(struct backlight_device *bl)
@@ -136,8 +136,10 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time);
- /* Should be different from bl_props.brightness, so we do not exit
- * update_status early the first time it's called */
+ /*
+ * Should be different from bl_props.brightness, so we do not exit
+ * update_status early the first time it's called
+ */
pcf_bl->brightness = pcf_bl->bl->props.brightness + 1;
backlight_update_status(pcf_bl->bl);
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 894bfc5ce422..17a6b83f97af 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -27,7 +27,7 @@ struct platform_lcd {
struct plat_lcd_data *pdata;
unsigned int power;
- unsigned int suspended : 1;
+ unsigned int suspended:1;
};
static inline struct platform_lcd *to_our_lcd(struct lcd_device *lcd)
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 069983ca49ff..f2f4c43d6e22 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -37,7 +37,7 @@ struct pwm_bl_data {
static int pwm_backlight_update_status(struct backlight_device *bl)
{
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
int max = bl->props.max_brightness;
@@ -83,7 +83,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
static int pwm_backlight_check_fb(struct backlight_device *bl,
struct fb_info *info)
{
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
return !pb->check_fb || pb->check_fb(pb->dev, info);
}
@@ -264,7 +264,7 @@ err_alloc:
static int pwm_backlight_remove(struct platform_device *pdev)
{
struct backlight_device *bl = platform_get_drvdata(pdev);
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
backlight_device_unregister(bl);
pwm_config(pb->pwm, 0, pb->period);
@@ -278,7 +278,7 @@ static int pwm_backlight_remove(struct platform_device *pdev)
static int pwm_backlight_suspend(struct device *dev)
{
struct backlight_device *bl = dev_get_drvdata(dev);
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
if (pb->notify)
pb->notify(pb->dev, 0);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 484e10dd1a8e..9c2677f0ef7d 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -9,28 +9,19 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#include "s6e63m0_gamma.h"
@@ -43,8 +34,6 @@
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 10
-#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
-
struct s6e63m0 {
struct device *dev;
struct spi_device *spi;
@@ -57,7 +46,7 @@ struct s6e63m0 {
struct lcd_platform_data *lcd_pd;
};
-static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
+static const unsigned short seq_panel_condition_set[] = {
0xF8, 0x01,
DATA_ONLY, 0x27,
DATA_ONLY, 0x27,
@@ -76,7 +65,7 @@ static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
+static const unsigned short seq_display_condition_set[] = {
0xf2, 0x02,
DATA_ONLY, 0x03,
DATA_ONLY, 0x1c,
@@ -90,7 +79,7 @@ static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_GAMMA_SETTING[] = {
+static const unsigned short seq_gamma_setting[] = {
0xfa, 0x00,
DATA_ONLY, 0x18,
DATA_ONLY, 0x08,
@@ -119,7 +108,7 @@ static const unsigned short SEQ_GAMMA_SETTING[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ETC_CONDITION_SET[] = {
+static const unsigned short seq_etc_condition_set[] = {
0xf6, 0x00,
DATA_ONLY, 0x8c,
DATA_ONLY, 0x07,
@@ -318,47 +307,47 @@ static const unsigned short SEQ_ETC_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ACL_ON[] = {
+static const unsigned short seq_acl_on[] = {
/* ACL on */
0xc0, 0x01,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ACL_OFF[] = {
+static const unsigned short seq_acl_off[] = {
/* ACL off */
0xc0, 0x00,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ELVSS_ON[] = {
+static const unsigned short seq_elvss_on[] = {
/* ELVSS on */
0xb1, 0x0b,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ELVSS_OFF[] = {
+static const unsigned short seq_elvss_off[] = {
/* ELVSS off */
0xb1, 0x0a,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_STAND_BY_OFF[] = {
+static const unsigned short seq_stand_by_off[] = {
0x11, COMMAND_ONLY,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_STAND_BY_ON[] = {
+static const unsigned short seq_stand_by_on[] = {
0x10, COMMAND_ONLY,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_DISPLAY_ON[] = {
+static const unsigned short seq_display_on[] = {
0x29, COMMAND_ONLY,
ENDDEF, 0x0000
@@ -406,8 +395,9 @@ static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- udelay(wbuf[i+1]*1000);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -457,12 +447,12 @@ static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
{
int ret, i;
const unsigned short *init_seq[] = {
- SEQ_PANEL_CONDITION_SET,
- SEQ_DISPLAY_CONDITION_SET,
- SEQ_GAMMA_SETTING,
- SEQ_ETC_CONDITION_SET,
- SEQ_ACL_ON,
- SEQ_ELVSS_ON,
+ seq_panel_condition_set,
+ seq_display_condition_set,
+ seq_gamma_setting,
+ seq_etc_condition_set,
+ seq_acl_on,
+ seq_elvss_on,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
@@ -478,8 +468,8 @@ static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
{
int ret = 0, i;
const unsigned short *enable_seq[] = {
- SEQ_STAND_BY_OFF,
- SEQ_DISPLAY_ON,
+ seq_stand_by_off,
+ seq_display_on,
};
for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
@@ -495,43 +485,39 @@ static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
{
int ret;
- ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON);
+ ret = s6e63m0_panel_send_sequence(lcd, seq_stand_by_on);
return ret;
}
+static int s6e63m0_power_is_on(int power)
+{
+ return power <= FB_BLANK_NORMAL;
+}
+
static int s6e63m0_power_on(struct s6e63m0 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
- struct backlight_device *bd = NULL;
+ struct lcd_platform_data *pd;
+ struct backlight_device *bd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
-
bd = lcd->bd;
- if (!bd) {
- dev_err(lcd->dev, "backlight device is NULL.\n");
- return -EFAULT;
- }
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
}
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = s6e63m0_ldi_init(lcd);
@@ -558,14 +544,10 @@ static int s6e63m0_power_on(struct s6e63m0 *lcd)
static int s6e63m0_power_off(struct s6e63m0 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
ret = s6e63m0_ldi_disable(lcd);
if (ret) {
@@ -573,13 +555,9 @@ static int s6e63m0_power_off(struct s6e63m0 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ pd->power_on(lcd->ld, 0);
return 0;
}
@@ -588,9 +566,9 @@ static int s6e63m0_power(struct s6e63m0 *lcd, int power)
{
int ret = 0;
- if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
+ if (s6e63m0_power_is_on(power) && !s6e63m0_power_is_on(lcd->power))
ret = s6e63m0_power_on(lcd);
- else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
+ else if (!s6e63m0_power_is_on(power) && s6e63m0_power_is_on(lcd->power))
ret = s6e63m0_power_off(lcd);
if (!ret)
@@ -757,10 +735,10 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
+ lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
@@ -788,7 +766,7 @@ static int s6e63m0_probe(struct spi_device *spi)
* know that.
*/
lcd->gamma_table_count =
- sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int));
+ sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int *));
ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
if (ret < 0)
@@ -811,10 +789,11 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
s6e63m0_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
@@ -827,7 +806,7 @@ out_lcd_unregister:
static int s6e63m0_remove(struct spi_device *spi)
{
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
device_remove_file(&spi->dev, &dev_attr_gamma_table);
@@ -839,44 +818,26 @@ static int s6e63m0_remove(struct spi_device *spi)
}
#if defined(CONFIG_PM)
-static unsigned int before_power;
-
static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
- before_power = lcd->power;
-
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
}
static int s6e63m0_resume(struct spi_device *spi)
{
- int ret = 0;
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
-
- /*
- * after suspended, if lcd panel status is FB_BLANK_UNBLANK
- * (at that time, before_power is FB_BLANK_UNBLANK) then
- * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
- */
- if (before_power == FB_BLANK_UNBLANK)
- lcd->power = FB_BLANK_POWERDOWN;
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
- dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+ lcd->power = FB_BLANK_POWERDOWN;
- ret = s6e63m0_power(lcd, before_power);
-
- return ret;
+ return s6e63m0_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define s6e63m0_suspend NULL
@@ -886,7 +847,7 @@ static int s6e63m0_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt. */
static void s6e63m0_shutdown(struct spi_device *spi)
{
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 146ffb9404d1..00162085eec0 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -2,7 +2,7 @@
* tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels
*
* Copyright (C) 2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -47,7 +47,7 @@ struct tdo24m {
((x1) << 9) | 0x100 | (x2))
#define CMD_NULL (-1)
-static uint32_t lcd_panel_reset[] = {
+static const uint32_t lcd_panel_reset[] = {
CMD0(0x1), /* reset */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
@@ -55,7 +55,7 @@ static uint32_t lcd_panel_reset[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_on[] = {
+static const uint32_t lcd_panel_on[] = {
CMD0(0x29), /* Display ON */
CMD2(0xB8, 0xFF, 0xF9), /* Output Control */
CMD0(0x11), /* Sleep out */
@@ -63,7 +63,7 @@ static uint32_t lcd_panel_on[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_off[] = {
+static const uint32_t lcd_panel_off[] = {
CMD0(0x28), /* Display OFF */
CMD2(0xB8, 0x80, 0x02), /* Output Control */
CMD0(0x10), /* Sleep in */
@@ -71,7 +71,7 @@ static uint32_t lcd_panel_off[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_pass_through_tdo24m[] = {
+static const uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
@@ -80,7 +80,7 @@ static uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_pass_through_tdo24m[] = {
+static const uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
@@ -89,8 +89,8 @@ static uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_transfer_tdo24m[] = {
- CMD1(0xcf, 0x02), /* Blanking period control (1) */
+static const uint32_t lcd_vga_transfer_tdo24m[] = {
+ CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */
@@ -102,7 +102,7 @@ static uint32_t lcd_vga_transfer_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_transfer[] = {
+static const uint32_t lcd_qvga_transfer[] = {
CMD1(0xd6, 0x02), /* Blanking period control (1) */
CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd8, 0x01), /* CKV timing control on/off */
@@ -115,7 +115,7 @@ static uint32_t lcd_qvga_transfer[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_pass_through_tdo35s[] = {
+static const uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
@@ -123,7 +123,7 @@ static uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_pass_through_tdo35s[] = {
+static const uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
@@ -131,8 +131,8 @@ static uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_transfer_tdo35s[] = {
- CMD1(0xcf, 0x02), /* Blanking period control (1) */
+static const uint32_t lcd_vga_transfer_tdo35s[] = {
+ CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
@@ -144,7 +144,7 @@ static uint32_t lcd_vga_transfer_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_config[] = {
+static const uint32_t lcd_panel_config[] = {
CMD2(0xb8, 0xff, 0xf9), /* Output control */
CMD0(0x11), /* sleep out */
CMD1(0xba, 0x01), /* Display mode (1) */
@@ -175,10 +175,11 @@ static uint32_t lcd_panel_config[] = {
CMD_NULL,
};
-static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array)
+static int tdo24m_writes(struct tdo24m *lcd, const uint32_t *array)
{
struct spi_transfer *x = &lcd->xfer;
- uint32_t data, *p = array;
+ const uint32_t *p = array;
+ uint32_t data;
int nparams, err = 0;
for (; *p != CMD_NULL; p++) {
@@ -389,7 +390,7 @@ static int tdo24m_probe(struct spi_device *spi)
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
if (err)
goto out_unregister;
@@ -403,7 +404,7 @@ out_unregister:
static int tdo24m_remove(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
@@ -414,14 +415,14 @@ static int tdo24m_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int tdo24m_suspend(struct spi_device *spi, pm_message_t state)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
return tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static int tdo24m_resume(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
return tdo24m_power(lcd, FB_BLANK_UNBLANK);
}
@@ -433,7 +434,7 @@ static int tdo24m_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt */
static void tdo24m_shutdown(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index a0521abdcd8a..2326fa810c59 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -54,7 +54,7 @@ static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
static int tosa_bl_update_status(struct backlight_device *dev)
{
struct backlight_properties *props = &dev->props;
- struct tosa_bl_data *data = dev_get_drvdata(&dev->dev);
+ struct tosa_bl_data *data = bl_get_data(dev);
int power = max(props->power, props->fb_blank);
int brightness = props->brightness;
@@ -92,14 +92,12 @@ static int tosa_bl_probe(struct i2c_client *client,
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
- ret = devm_gpio_request(&client->dev, TOSA_GPIO_BL_C20MA, "backlight");
+ ret = devm_gpio_request_one(&client->dev, TOSA_GPIO_BL_C20MA,
+ GPIOF_OUT_INIT_LOW, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
return ret;
}
- ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
- if (ret)
- return ret;
i2c_set_clientdata(client, data);
data->i2c = client;
@@ -163,7 +161,6 @@ static const struct i2c_device_id tosa_bl_id[] = {
{ },
};
-
static struct i2c_driver tosa_bl_driver = {
.driver = {
.name = "tosa-bl",
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 86fff88c2e4a..666fe2593ea4 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -63,7 +63,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
int tosa_bl_enable(struct spi_device *spi, int enable)
{
/* bl_enable GP04=1 otherwise GP04=0*/
- return tosa_tg_send(spi, TG_GPODR2, enable? 0x01 : 0x00);
+ return tosa_tg_send(spi, TG_GPODR2, enable ? 0x01 : 0x00);
}
EXPORT_SYMBOL(tosa_bl_enable);
@@ -91,15 +91,17 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
tosa_tg_send(spi, TG_PNLCTL, value);
/* TG LCD pannel power up */
- tosa_tg_send(spi, TG_PINICTL,0x4);
+ tosa_tg_send(spi, TG_PINICTL, 0x4);
mdelay(50);
/* TG LCD GVSS */
- tosa_tg_send(spi, TG_PINICTL,0x0);
+ tosa_tg_send(spi, TG_PINICTL, 0x0);
if (!data->i2c) {
- /* after the pannel is powered up the first time, we can access the i2c bus */
- /* so probe for the DAC */
+ /*
+ * after the pannel is powered up the first time,
+ * we can access the i2c bus so probe for the DAC
+ */
struct i2c_adapter *adap = i2c_get_adapter(0);
struct i2c_board_info info = {
.type = "tosa-bl",
@@ -115,11 +117,11 @@ static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
struct spi_device *spi = data->spi;
/* TG LCD VHSA off */
- tosa_tg_send(spi, TG_PINICTL,0x4);
+ tosa_tg_send(spi, TG_PINICTL, 0x4);
mdelay(50);
/* TG LCD signal off */
- tosa_tg_send(spi, TG_PINICTL,0x6);
+ tosa_tg_send(spi, TG_PINICTL, 0x6);
mdelay(50);
/* TG Off */
@@ -191,19 +193,15 @@ static int tosa_lcd_probe(struct spi_device *spi)
return ret;
data->spi = spi;
- dev_set_drvdata(&spi->dev, data);
+ spi_set_drvdata(spi, data);
- ret = devm_gpio_request(&spi->dev, TOSA_GPIO_TG_ON, "tg #pwr");
+ ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
+ GPIOF_OUT_INIT_LOW, "tg #pwr");
if (ret < 0)
goto err_gpio_tg;
mdelay(60);
- ret = gpio_direction_output(TOSA_GPIO_TG_ON, 0);
- if (ret < 0)
- goto err_gpio_tg;
-
- mdelay(60);
tosa_lcd_tg_init(data);
tosa_lcd_tg_on(data);
@@ -222,13 +220,13 @@ static int tosa_lcd_probe(struct spi_device *spi)
err_register:
tosa_lcd_tg_off(data);
err_gpio_tg:
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return ret;
}
static int tosa_lcd_remove(struct spi_device *spi)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
lcd_device_unregister(data->lcd);
@@ -237,7 +235,7 @@ static int tosa_lcd_remove(struct spi_device *spi)
tosa_lcd_tg_off(data);
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return 0;
}
@@ -245,7 +243,7 @@ static int tosa_lcd_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
tosa_lcd_tg_off(data);
@@ -254,7 +252,7 @@ static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state)
static int tosa_lcd_resume(struct spi_device *spi)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
tosa_lcd_tg_init(data);
if (POWER_IS_ON(data->lcd_power))
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 712b0acfd339..84d582f591dc 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -26,7 +26,7 @@
/* Device initialisation sequences */
-static struct ili9320_reg vgg_init1[] = {
+static const struct ili9320_reg vgg_init1[] = {
{
.address = ILI9320_POWER1,
.value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
@@ -43,7 +43,7 @@ static struct ili9320_reg vgg_init1[] = {
},
};
-static struct ili9320_reg vgg_init2[] = {
+static const struct ili9320_reg vgg_init2[] = {
{
.address = ILI9320_POWER1,
.value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
@@ -54,7 +54,7 @@ static struct ili9320_reg vgg_init2[] = {
}
};
-static struct ili9320_reg vgg_gamma[] = {
+static const struct ili9320_reg vgg_gamma[] = {
{
.address = ILI9320_GAMMA1,
.value = 0x0000,
@@ -89,7 +89,7 @@ static struct ili9320_reg vgg_gamma[] = {
};
-static struct ili9320_reg vgg_init0[] = {
+static const struct ili9320_reg vgg_init0[] = {
[0] = {
/* set direction and scan mode gate */
.address = ILI9320_DRIVER,
@@ -208,16 +208,15 @@ static int vgg2432a4_lcd_init(struct ili9320 *lcd,
#ifdef CONFIG_PM
static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
{
- return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
+ return ili9320_suspend(spi_get_drvdata(spi), state);
}
-
static int vgg2432a4_resume(struct spi_device *spi)
{
- return ili9320_resume(dev_get_drvdata(&spi->dev));
+ return ili9320_resume(spi_get_drvdata(spi));
}
#else
#define vgg2432a4_suspend NULL
-#define vgg2432a4_resume NULL
+#define vgg2432a4_resume NULL
#endif
static struct ili9320_client vgg2432a4_client = {
@@ -242,12 +241,12 @@ static int vgg2432a4_probe(struct spi_device *spi)
static int vgg2432a4_remove(struct spi_device *spi)
{
- return ili9320_remove(dev_get_drvdata(&spi->dev));
+ return ili9320_remove(spi_get_drvdata(spi));
}
static void vgg2432a4_shutdown(struct spi_device *spi)
{
- ili9320_shutdown(dev_get_drvdata(&spi->dev));
+ ili9320_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver vgg2432a4_driver = {
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 7347aa1e5e4a..a82d2578d976 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -87,8 +87,8 @@ static void set_vcomm(void)
pr_err("i2c_smbus_write_byte_data fail: %d\n", nr);
}
-static int __devinit ad5280_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ad5280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
int ret;
if (!i2c_check_functionality(client->adapter,
@@ -108,7 +108,7 @@ static int __devinit ad5280_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad5280_remove(struct i2c_client *client)
+static int ad5280_remove(struct i2c_client *client)
{
ad5280_client = NULL;
return 0;
@@ -126,7 +126,7 @@ static struct i2c_driver ad5280_driver = {
.name = "bf537-lq035-ad5280",
},
.probe = ad5280_probe,
- .remove = __devexit_p(ad5280_remove),
+ .remove = ad5280_remove,
.id_table = ad5280_id,
};
@@ -360,7 +360,7 @@ static int config_dma(void)
return 0;
}
-static int __devinit request_ports(void)
+static int request_ports(void)
{
u16 tmr_req[] = TIMERS;
@@ -443,7 +443,7 @@ static struct fb_var_screeninfo bfin_lq035_fb_defined = {
.transp = {0, 0, 0},
};
-static struct fb_fix_screeninfo bfin_lq035_fb_fix __devinitdata = {
+static struct fb_fix_screeninfo bfin_lq035_fb_fix = {
.id = KBUILD_MODNAME,
.smem_len = ACTIVE_VIDEO_MEM_SIZE,
.type = FB_TYPE_PACKED_PIXELS,
@@ -686,7 +686,7 @@ static struct lcd_ops bfin_lcd_ops = {
static struct lcd_device *lcd_dev;
-static int __devinit bfin_lq035_probe(struct platform_device *pdev)
+static int bfin_lq035_probe(struct platform_device *pdev)
{
struct backlight_properties props;
dma_addr_t dma_handle;
@@ -816,7 +816,7 @@ out_ports:
return ret;
}
-static int __devexit bfin_lq035_remove(struct platform_device *pdev)
+static int bfin_lq035_remove(struct platform_device *pdev)
{
if (fb_buffer != NULL)
dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
@@ -889,7 +889,7 @@ static int bfin_lq035_resume(struct platform_device *pdev)
static struct platform_driver bfin_lq035_driver = {
.probe = bfin_lq035_probe,
- .remove = __devexit_p(bfin_lq035_remove),
+ .remove = bfin_lq035_remove,
.suspend = bfin_lq035_suspend,
.resume = bfin_lq035_resume,
.driver = {
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index ff5663f5c64f..2726a5b66741 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -497,7 +497,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
+static int bfin_bf54x_probe(struct platform_device *pdev)
{
#ifndef NO_BL_SUPPORT
struct backlight_properties props;
@@ -686,7 +686,7 @@ out1:
return ret;
}
-static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
+static int bfin_bf54x_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -754,7 +754,7 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
static struct platform_driver bfin_bf54x_driver = {
.probe = bfin_bf54x_probe,
- .remove = __devexit_p(bfin_bf54x_remove),
+ .remove = bfin_bf54x_remove,
.suspend = bfin_bf54x_suspend,
.resume = bfin_bf54x_resume,
.driver = {
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 6fbc75c2f0a1..29d8c0443a1f 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -137,7 +137,7 @@ static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned s
return ret;
}
-static int __devinit lq035q1_spidev_probe(struct spi_device *spi)
+static int lq035q1_spidev_probe(struct spi_device *spi)
{
int ret;
struct spi_control *ctl;
@@ -358,8 +358,8 @@ static inline void bfin_lq035q1_free_ports(unsigned ppi16)
gpio_free(P_IDENT(P_PPI0_FS3));
}
-static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev,
- unsigned ppi16)
+static int bfin_lq035q1_request_ports(struct platform_device *pdev,
+ unsigned ppi16)
{
int ret;
/* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
@@ -555,7 +555,7 @@ static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
+static int bfin_lq035q1_probe(struct platform_device *pdev)
{
struct bfin_lq035q1fb_info *info;
struct fb_info *fbinfo;
@@ -706,7 +706,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
info->spidrv.driver.name = DRIVER_NAME"-spi";
info->spidrv.probe = lq035q1_spidev_probe;
- info->spidrv.remove = __devexit_p(lq035q1_spidev_remove);
+ info->spidrv.remove = lq035q1_spidev_remove;
info->spidrv.shutdown = lq035q1_spidev_shutdown;
info->spidrv.suspend = lq035q1_spidev_suspend;
info->spidrv.resume = lq035q1_spidev_resume;
@@ -764,7 +764,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
+static int bfin_lq035q1_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct bfin_lq035q1fb_info *info = fbinfo->par;
@@ -845,7 +845,7 @@ static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = {
static struct platform_driver bfin_lq035q1_driver = {
.probe = bfin_lq035q1_probe,
- .remove = __devexit_p(bfin_lq035q1_remove),
+ .remove = bfin_lq035q1_remove,
.driver = {
.name = DRIVER_NAME,
#ifdef CONFIG_PM
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index ae0fb24b8b43..d46da01c31ae 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -418,7 +418,7 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
+static int bfin_t350mcqb_probe(struct platform_device *pdev)
{
#ifndef NO_BL_SUPPORT
struct backlight_properties props;
@@ -583,7 +583,7 @@ out1:
return ret;
}
-static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
+static int bfin_t350mcqb_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -658,7 +658,7 @@ static int bfin_t350mcqb_resume(struct platform_device *pdev)
static struct platform_driver bfin_t350mcqb_driver = {
.probe = bfin_t350mcqb_probe,
- .remove = __devexit_p(bfin_t350mcqb_remove),
+ .remove = bfin_t350mcqb_remove,
.suspend = bfin_t350mcqb_suspend,
.resume = bfin_t350mcqb_resume,
.driver = {
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index d0f121bd8b25..8d411a3c9966 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -88,7 +88,7 @@ static struct fb_var_screeninfo bfin_adv7393_fb_defined = {
.transp = {0, 0, 0},
};
-static struct fb_fix_screeninfo bfin_adv7393_fb_fix __devinitdata = {
+static struct fb_fix_screeninfo bfin_adv7393_fb_fix = {
.id = "BFIN ADV7393",
.smem_len = 720 * 480 * 2,
.type = FB_TYPE_PACKED_PIXELS,
@@ -368,8 +368,8 @@ adv7393_write_proc(struct file *file, const char __user * buffer,
return count;
}
-static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bfin_adv7393_fb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
int ret = 0;
struct proc_dir_entry *entry;
@@ -719,7 +719,7 @@ static int bfin_adv7393_fb_setcolreg(u_int regno, u_int red, u_int green,
return 0;
}
-static int __devexit bfin_adv7393_fb_remove(struct i2c_client *client)
+static int bfin_adv7393_fb_remove(struct i2c_client *client)
{
struct adv7393fb_device *fbdev = i2c_get_clientdata(client);
@@ -794,7 +794,7 @@ static struct i2c_driver bfin_adv7393_fb_driver = {
#endif
},
.probe = bfin_adv7393_fb_probe,
- .remove = __devexit_p(bfin_adv7393_fb_remove),
+ .remove = bfin_adv7393_fb_remove,
.id_table = bfin_adv7393_id,
};
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index c95b417d0d41..b09701c79432 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -91,7 +91,7 @@ static struct panel_info panel_table[] = {
#define DPY_W 800
#define DPY_H 600
-static struct fb_fix_screeninfo broadsheetfb_fix __devinitdata = {
+static struct fb_fix_screeninfo broadsheetfb_fix = {
.id = "broadsheetfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_STATIC_PSEUDOCOLOR,
@@ -102,7 +102,7 @@ static struct fb_fix_screeninfo broadsheetfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo broadsheetfb_var __devinitdata = {
+static struct fb_var_screeninfo broadsheetfb_var = {
.xres = DPY_W,
.yres = DPY_H,
.xres_virtual = DPY_W,
@@ -774,7 +774,7 @@ static DEVICE_ATTR(loadstore_waveform, S_IWUSR, NULL,
broadsheet_loadstore_waveform);
/* upper level functions that manipulate the display and other stuff */
-static void __devinit broadsheet_init_display(struct broadsheetfb_par *par)
+static void broadsheet_init_display(struct broadsheetfb_par *par)
{
u16 args[5];
int xres = par->info->var.xres;
@@ -834,7 +834,7 @@ static void __devinit broadsheet_init_display(struct broadsheetfb_par *par)
par->board->wait_for_rdy(par);
}
-static void __devinit broadsheet_identify(struct broadsheetfb_par *par)
+static void broadsheet_identify(struct broadsheetfb_par *par)
{
u16 rev, prc;
struct device *dev = par->info->device;
@@ -849,7 +849,7 @@ static void __devinit broadsheet_identify(struct broadsheetfb_par *par)
dev_warn(dev, "Unrecognized Broadsheet Revision\n");
}
-static void __devinit broadsheet_init(struct broadsheetfb_par *par)
+static void broadsheet_init(struct broadsheetfb_par *par)
{
broadsheet_send_command(par, BS_CMD_INIT_SYS_RUN);
/* the controller needs a second */
@@ -1058,7 +1058,7 @@ static struct fb_deferred_io broadsheetfb_defio = {
.deferred_io = broadsheetfb_dpy_deferred_io,
};
-static int __devinit broadsheetfb_probe(struct platform_device *dev)
+static int broadsheetfb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct broadsheet_board *board;
@@ -1190,7 +1190,7 @@ err:
}
-static int __devexit broadsheetfb_remove(struct platform_device *dev)
+static int broadsheetfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
- .remove = __devexit_p(broadsheetfb_remove),
+ .remove = broadsheetfb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "broadsheetfb",
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 6bea9a936798..60017fc634b5 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -179,7 +179,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
-static void __devinit bw2_init_fix(struct fb_info *info, int linebytes)
+static void bw2_init_fix(struct fb_info *info, int linebytes)
{
strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
@@ -191,44 +191,43 @@ static void __devinit bw2_init_fix(struct fb_info *info, int linebytes)
info->fix.accel = FB_ACCEL_SUN_BWTWO;
}
-static u8 bw2regs_1600[] __devinitdata = {
+static u8 bw2regs_1600[] = {
0x14, 0x8b, 0x15, 0x28, 0x16, 0x03, 0x17, 0x13,
0x18, 0x7b, 0x19, 0x05, 0x1a, 0x34, 0x1b, 0x2e,
0x1c, 0x00, 0x1d, 0x0a, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x21, 0
};
-static u8 bw2regs_ecl[] __devinitdata = {
+static u8 bw2regs_ecl[] = {
0x14, 0x65, 0x15, 0x1e, 0x16, 0x04, 0x17, 0x0c,
0x18, 0x5e, 0x19, 0x03, 0x1a, 0xa7, 0x1b, 0x23,
0x1c, 0x00, 0x1d, 0x08, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x20, 0
};
-static u8 bw2regs_analog[] __devinitdata = {
+static u8 bw2regs_analog[] = {
0x14, 0xbb, 0x15, 0x2b, 0x16, 0x03, 0x17, 0x13,
0x18, 0xb0, 0x19, 0x03, 0x1a, 0xa6, 0x1b, 0x22,
0x1c, 0x01, 0x1d, 0x05, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x20, 0
};
-static u8 bw2regs_76hz[] __devinitdata = {
+static u8 bw2regs_76hz[] = {
0x14, 0xb7, 0x15, 0x27, 0x16, 0x03, 0x17, 0x0f,
0x18, 0xae, 0x19, 0x03, 0x1a, 0xae, 0x1b, 0x2a,
0x1c, 0x01, 0x1d, 0x09, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x24, 0
};
-static u8 bw2regs_66hz[] __devinitdata = {
+static u8 bw2regs_66hz[] = {
0x14, 0xbb, 0x15, 0x2b, 0x16, 0x04, 0x17, 0x14,
0x18, 0xae, 0x19, 0x03, 0x1a, 0xa8, 0x1b, 0x24,
0x1c, 0x01, 0x1d, 0x05, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x20, 0
};
-static int __devinit bw2_do_default_mode(struct bw2_par *par,
- struct fb_info *info,
- int *linebytes)
+static int bw2_do_default_mode(struct bw2_par *par, struct fb_info *info,
+ int *linebytes)
{
u8 status, mon;
u8 *p;
@@ -273,7 +272,7 @@ static int __devinit bw2_do_default_mode(struct bw2_par *par,
return 0;
}
-static int __devinit bw2_probe(struct platform_device *op)
+static int bw2_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -352,7 +351,7 @@ out_err:
return err;
}
-static int __devexit bw2_remove(struct platform_device *op)
+static int bw2_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct bw2_par *par = info->par;
@@ -384,7 +383,7 @@ static struct platform_driver bw2_driver = {
.of_match_table = bw2_match,
},
.probe = bw2_probe,
- .remove = __devexit_p(bw2_remove),
+ .remove = bw2_remove,
};
static int __init bw2_init(void)
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index 2c76fdf23f2a..153dd65b0ae8 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -78,7 +78,7 @@ struct carmine_fb {
u32 pseudo_palette[16];
};
-static struct fb_fix_screeninfo carminefb_fix __devinitdata = {
+static struct fb_fix_screeninfo carminefb_fix = {
.id = "Carmine",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -537,8 +537,9 @@ static struct fb_ops carminefb_ops = {
.fb_setcolreg = carmine_setcolreg,
};
-static int __devinit alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
- int smem_offset, struct device *device, struct fb_info **rinfo)
+static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
+ int smem_offset, struct device *device,
+ struct fb_info **rinfo)
{
int ret;
struct fb_info *info;
@@ -606,8 +607,7 @@ static void cleanup_fb_device(struct fb_info *info)
}
}
-static int __devinit carminefb_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct carmine_hw *hw;
struct device *device = &dev->dev;
@@ -721,7 +721,7 @@ err_enable_pci:
return ret;
}
-static void __devexit carminefb_remove(struct pci_dev *dev)
+static void carminefb_remove(struct pci_dev *dev)
{
struct carmine_hw *hw = pci_get_drvdata(dev);
struct fb_fix_screeninfo fix;
@@ -752,7 +752,7 @@ static void __devexit carminefb_remove(struct pci_dev *dev)
}
#define PCI_VENDOR_ID_FUJITU_LIMITED 0x10cf
-static struct pci_device_id carmine_devices[] __devinitdata = {
+static struct pci_device_id carmine_devices[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_FUJITU_LIMITED, 0x202b)},
{0, 0, 0, 0, 0, 0, 0}
@@ -764,7 +764,7 @@ static struct pci_driver carmine_pci_driver = {
.name = "carminefb",
.id_table = carmine_devices,
.probe = carminefb_probe,
- .remove = __devexit_p(carminefb_remove),
+ .remove = carminefb_remove,
};
static int __init carminefb_init(void)
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index f18895006627..ed3b8891e006 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -352,8 +352,8 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
-static void __devinit cg14_init_fix(struct fb_info *info, int linebytes,
- struct device_node *dp)
+static void cg14_init_fix(struct fb_info *info, int linebytes,
+ struct device_node *dp)
{
const char *name = dp->name;
@@ -367,7 +367,7 @@ static void __devinit cg14_init_fix(struct fb_info *info, int linebytes,
info->fix.accel = FB_ACCEL_SUN_CG14;
}
-static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] __devinitdata = {
+static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = {
{
.voff = CG14_REGS,
.poff = 0x80000000,
@@ -463,7 +463,7 @@ static void cg14_unmap_regs(struct platform_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit cg14_probe(struct platform_device *op)
+static int cg14_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -571,7 +571,7 @@ out_err:
return err;
}
-static int __devexit cg14_remove(struct platform_device *op)
+static int cg14_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg14_par *par = info->par;
@@ -603,7 +603,7 @@ static struct platform_driver cg14_driver = {
.of_match_table = cg14_match,
},
.probe = cg14_probe,
- .remove = __devexit_p(cg14_remove),
+ .remove = cg14_remove,
};
static int __init cg14_init(void)
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index c5e7612ff876..9f63507ded37 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -243,8 +243,8 @@ static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
-static void __devinit cg3_init_fix(struct fb_info *info, int linebytes,
- struct device_node *dp)
+static void cg3_init_fix(struct fb_info *info, int linebytes,
+ struct device_node *dp)
{
strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
@@ -256,8 +256,8 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes,
info->fix.accel = FB_ACCEL_SUN_CGTHREE;
}
-static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
- struct device_node *dp)
+static void cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
+ struct device_node *dp)
{
const char *params;
char *p;
@@ -279,36 +279,36 @@ static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
}
}
-static u8 cg3regvals_66hz[] __devinitdata = { /* 1152 x 900, 66 Hz */
+static u8 cg3regvals_66hz[] = { /* 1152 x 900, 66 Hz */
0x14, 0xbb, 0x15, 0x2b, 0x16, 0x04, 0x17, 0x14,
0x18, 0xae, 0x19, 0x03, 0x1a, 0xa8, 0x1b, 0x24,
0x1c, 0x01, 0x1d, 0x05, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x20, 0
};
-static u8 cg3regvals_76hz[] __devinitdata = { /* 1152 x 900, 76 Hz */
+static u8 cg3regvals_76hz[] = { /* 1152 x 900, 76 Hz */
0x14, 0xb7, 0x15, 0x27, 0x16, 0x03, 0x17, 0x0f,
0x18, 0xae, 0x19, 0x03, 0x1a, 0xae, 0x1b, 0x2a,
0x1c, 0x01, 0x1d, 0x09, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x24, 0
};
-static u8 cg3regvals_rdi[] __devinitdata = { /* 640 x 480, cgRDI */
+static u8 cg3regvals_rdi[] = { /* 640 x 480, cgRDI */
0x14, 0x70, 0x15, 0x20, 0x16, 0x08, 0x17, 0x10,
0x18, 0x06, 0x19, 0x02, 0x1a, 0x31, 0x1b, 0x51,
0x1c, 0x06, 0x1d, 0x0c, 0x1e, 0xff, 0x1f, 0x01,
0x10, 0x22, 0
};
-static u8 *cg3_regvals[] __devinitdata = {
+static u8 *cg3_regvals[] = {
cg3regvals_66hz, cg3regvals_76hz, cg3regvals_rdi
};
-static u_char cg3_dacvals[] __devinitdata = {
+static u_char cg3_dacvals[] = {
4, 0xff, 5, 0x00, 6, 0x70, 7, 0x00, 0
};
-static int __devinit cg3_do_default_mode(struct cg3_par *par)
+static int cg3_do_default_mode(struct cg3_par *par)
{
enum cg3_type type;
u8 *p;
@@ -346,7 +346,7 @@ static int __devinit cg3_do_default_mode(struct cg3_par *par)
return 0;
}
-static int __devinit cg3_probe(struct platform_device *op)
+static int cg3_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -433,7 +433,7 @@ out_err:
return err;
}
-static int __devexit cg3_remove(struct platform_device *op)
+static int cg3_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg3_par *par = info->par;
@@ -469,7 +469,7 @@ static struct platform_driver cg3_driver = {
.of_match_table = cg3_match,
},
.probe = cg3_probe,
- .remove = __devexit_p(cg3_remove),
+ .remove = cg3_remove,
};
static int __init cg3_init(void)
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 179e96cdb323..3545decc7485 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -607,7 +607,7 @@ static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
* Initialisation
*/
-static void __devinit cg6_init_fix(struct fb_info *info, int linebytes)
+static void cg6_init_fix(struct fb_info *info, int linebytes)
{
struct cg6_par *par = (struct cg6_par *)info->par;
const char *cg6_cpu_name, *cg6_card_name;
@@ -649,7 +649,7 @@ static void __devinit cg6_init_fix(struct fb_info *info, int linebytes)
}
/* Initialize Brooktree DAC */
-static void __devinit cg6_bt_init(struct cg6_par *par)
+static void cg6_bt_init(struct cg6_par *par)
{
struct bt_regs __iomem *bt = par->bt;
@@ -663,7 +663,7 @@ static void __devinit cg6_bt_init(struct cg6_par *par)
sbus_writel(0x00 << 24, &bt->control);
}
-static void __devinit cg6_chip_init(struct fb_info *info)
+static void cg6_chip_init(struct fb_info *info)
{
struct cg6_par *par = (struct cg6_par *)info->par;
struct cg6_tec __iomem *tec = par->tec;
@@ -737,7 +737,7 @@ static void cg6_unmap_regs(struct platform_device *op, struct fb_info *info,
info->fix.smem_len);
}
-static int __devinit cg6_probe(struct platform_device *op)
+static int cg6_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -827,7 +827,7 @@ out_err:
return err;
}
-static int __devexit cg6_remove(struct platform_device *op)
+static int cg6_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg6_par *par = info->par;
@@ -862,7 +862,7 @@ static struct platform_driver cg6_driver = {
.of_match_table = cg6_match,
},
.probe = cg6_probe,
- .remove = __devexit_p(cg6_remove),
+ .remove = cg6_remove,
};
static int __init cg6_init(void)
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index cff742abdc5d..206a66b61072 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -292,7 +292,7 @@ static void __init chips_hw_init(void)
write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
}
-static struct fb_fix_screeninfo chipsfb_fix __devinitdata = {
+static struct fb_fix_screeninfo chipsfb_fix = {
.id = "C&T 65550",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -309,7 +309,7 @@ static struct fb_fix_screeninfo chipsfb_fix __devinitdata = {
.smem_len = 0x100000, /* 1MB */
};
-static struct fb_var_screeninfo chipsfb_var __devinitdata = {
+static struct fb_var_screeninfo chipsfb_var = {
.xres = 800,
.yres = 600,
.xres_virtual = 800,
@@ -330,7 +330,7 @@ static struct fb_var_screeninfo chipsfb_var __devinitdata = {
.vsync_len = 8,
};
-static void __devinit init_chips(struct fb_info *p, unsigned long addr)
+static void init_chips(struct fb_info *p, unsigned long addr)
{
memset(p->screen_base, 0, 0x100000);
@@ -347,8 +347,7 @@ static void __devinit init_chips(struct fb_info *p, unsigned long addr)
chips_hw_init();
}
-static int __devinit
-chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
{
struct fb_info *p;
unsigned long addr, size;
@@ -438,7 +437,7 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
return rc;
}
-static void __devexit chipsfb_remove(struct pci_dev *dp)
+static void chipsfb_remove(struct pci_dev *dp)
{
struct fb_info *p = pci_get_drvdata(dp);
@@ -495,7 +494,7 @@ static struct pci_driver chipsfb_driver = {
.name = "chipsfb",
.id_table = chipsfb_pci_tbl,
.probe = chipsfb_pci_init,
- .remove = __devexit_p(chipsfb_remove),
+ .remove = chipsfb_remove,
#ifdef CONFIG_PM
.suspend = chipsfb_pci_suspend,
.resume = chipsfb_pci_resume,
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index bc67d05cad60..c3dbbe6e3acf 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -290,34 +290,34 @@ struct zorrocl {
zorro_id ramid2; /* Zorro ID of optional second RAM device */
};
-static const struct zorrocl zcl_sd64 __devinitconst = {
+static const struct zorrocl zcl_sd64 = {
.type = BT_SD64,
.ramid = ZORRO_PROD_HELFRICH_SD64_RAM,
};
-static const struct zorrocl zcl_piccolo __devinitconst = {
+static const struct zorrocl zcl_piccolo = {
.type = BT_PICCOLO,
.ramid = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
};
-static const struct zorrocl zcl_picasso __devinitconst = {
+static const struct zorrocl zcl_picasso = {
.type = BT_PICASSO,
.ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
};
-static const struct zorrocl zcl_spectrum __devinitconst = {
+static const struct zorrocl zcl_spectrum = {
.type = BT_SPECTRUM,
.ramid = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
};
-static const struct zorrocl zcl_picasso4_z3 __devinitconst = {
+static const struct zorrocl zcl_picasso4_z3 = {
.type = BT_PICASSO4,
.regoffset = 0x00600000,
.ramsize = 4 * MB_,
.ramoffset = 0x01000000, /* 0x02000000 for 64 MiB boards */
};
-static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
+static const struct zorrocl zcl_picasso4_z2 = {
.type = BT_PICASSO4,
.regoffset = 0x10000,
.ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1,
@@ -325,7 +325,7 @@ static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
};
-static const struct zorro_device_id cirrusfb_zorro_table[] __devinitconst = {
+static const struct zorro_device_id cirrusfb_zorro_table[] = {
{
.id = ZORRO_PROD_HELFRICH_SD64_REG,
.driver_data = (unsigned long)&zcl_sd64,
@@ -372,8 +372,8 @@ struct cirrusfb_info {
void (*unmap)(struct fb_info *info);
};
-static bool noaccel __devinitdata;
-static char *mode_option __devinitdata = "640x480@60";
+static bool noaccel;
+static char *mode_option = "640x480@60";
/****************************************************************************/
/**** BEGIN PROTOTYPES ******************************************************/
@@ -1892,8 +1892,8 @@ static int release_io_ports;
* based on the DRAM bandwidth bit and DRAM bank switching bit. This
* works with 1MB, 2MB and 4MB configurations (which the Motorola boards
* seem to have. */
-static unsigned int __devinit cirrusfb_get_memsize(struct fb_info *info,
- u8 __iomem *regbase)
+static unsigned int cirrusfb_get_memsize(struct fb_info *info,
+ u8 __iomem *regbase)
{
unsigned long mem;
struct cirrusfb_info *cinfo = info->par;
@@ -2003,7 +2003,7 @@ static struct fb_ops cirrusfb_ops = {
.fb_imageblit = cirrusfb_imageblit,
};
-static int __devinit cirrusfb_set_fbinfo(struct fb_info *info)
+static int cirrusfb_set_fbinfo(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
struct fb_var_screeninfo *var = &info->var;
@@ -2052,7 +2052,7 @@ static int __devinit cirrusfb_set_fbinfo(struct fb_info *info)
return 0;
}
-static int __devinit cirrusfb_register(struct fb_info *info)
+static int cirrusfb_register(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
int err;
@@ -2096,7 +2096,7 @@ err_dealloc_cmap:
return err;
}
-static void __devexit cirrusfb_cleanup(struct fb_info *info)
+static void cirrusfb_cleanup(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
@@ -2109,8 +2109,8 @@ static void __devexit cirrusfb_cleanup(struct fb_info *info)
}
#ifdef CONFIG_PCI
-static int __devinit cirrusfb_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cirrusfb_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
@@ -2215,7 +2215,7 @@ err_out:
return ret;
}
-static void __devexit cirrusfb_pci_unregister(struct pci_dev *pdev)
+static void cirrusfb_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
@@ -2226,7 +2226,7 @@ static struct pci_driver cirrusfb_pci_driver = {
.name = "cirrusfb",
.id_table = cirrusfb_pci_table,
.probe = cirrusfb_pci_register,
- .remove = __devexit_p(cirrusfb_pci_unregister),
+ .remove = cirrusfb_pci_unregister,
#ifdef CONFIG_PM
#if 0
.suspend = cirrusfb_pci_suspend,
@@ -2237,8 +2237,8 @@ static struct pci_driver cirrusfb_pci_driver = {
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
-static int __devinit cirrusfb_zorro_register(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int cirrusfb_zorro_register(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct fb_info *info;
int error;
@@ -2352,7 +2352,7 @@ err_release_fb:
return error;
}
-void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
+void cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
@@ -2364,7 +2364,7 @@ static struct zorro_driver cirrusfb_zorro_driver = {
.name = "cirrusfb",
.id_table = cirrusfb_zorro_table,
.probe = cirrusfb_zorro_register,
- .remove = __devexit_p(cirrusfb_zorro_unregister),
+ .remove = cirrusfb_zorro_unregister,
};
#endif /* CONFIG_ZORRO */
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 63ecdf8f7baf..f00980607b8f 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -26,6 +26,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -178,7 +179,7 @@ static struct fb_ops clps7111fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static void __devinit clps711x_guess_lcd_params(struct fb_info *info)
+static void clps711x_guess_lcd_params(struct fb_info *info)
{
unsigned int lcdcon, syscon, size;
unsigned long phys_base = PAGE_OFFSET;
@@ -266,7 +267,7 @@ static void __devinit clps711x_guess_lcd_params(struct fb_info *info)
info->fix.type = FB_TYPE_PACKED_PIXELS;
}
-static int __devinit clps711x_fb_probe(struct platform_device *pdev)
+static int clps711x_fb_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
@@ -291,7 +292,7 @@ static int __devinit clps711x_fb_probe(struct platform_device *pdev)
out: return err;
}
-static int __devexit clps711x_fb_remove(struct platform_device *pdev)
+static int clps711x_fb_remove(struct platform_device *pdev)
{
unregister_framebuffer(cfb);
kfree(cfb);
@@ -305,7 +306,7 @@ static struct platform_driver clps711x_fb_driver = {
.owner = THIS_MODULE,
},
.probe = clps711x_fb_probe,
- .remove = __devexit_p(clps711x_fb_remove),
+ .remove = clps711x_fb_remove,
};
module_platform_driver(clps711x_fb_driver);
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 01a4ee7cc6b1..a9031498e10c 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -167,7 +167,7 @@ static void lcd_clear(struct fb_info *info)
lcd_write_control(info, LCD_RESET);
}
-static struct fb_fix_screeninfo cobalt_lcdfb_fix __devinitdata = {
+static struct fb_fix_screeninfo cobalt_lcdfb_fix = {
.id = "cobalt-lcd",
.type = FB_TYPE_TEXT,
.type_aux = FB_AUX_TEXT_MDA,
@@ -331,7 +331,7 @@ static struct fb_ops cobalt_lcd_fbops = {
.fb_cursor = cobalt_lcdfb_cursor,
};
-static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
+static int cobalt_lcdfb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct resource *res;
@@ -374,7 +374,7 @@ static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
return 0;
}
-static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
+static int cobalt_lcdfb_remove(struct platform_device *dev)
{
struct fb_info *info;
@@ -389,7 +389,7 @@ static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
static struct platform_driver cobalt_lcdfb_driver = {
.probe = cobalt_lcdfb_probe,
- .remove = __devexit_p(cobalt_lcdfb_remove),
+ .remove = cobalt_lcdfb_remove,
.driver = {
.name = "cobalt-lcd",
.owner = THIS_MODULE,
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index e2c96d01d8f5..bc922c47d046 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -46,7 +46,7 @@ config VGACON_SOFT_SCROLLBACK_SIZE
config MDA_CONSOLE
depends on !M68K && !PARISC && ISA
- tristate "MDA text console (dual-headed) (EXPERIMENTAL)"
+ tristate "MDA text console (dual-headed)"
---help---
Say Y here if you have an old MDA or monochrome Hercules graphics
adapter in your system acting as a second head ( = video card). You
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index fdefa8fd72c4..f8a61e210d2e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1242,8 +1242,16 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
if (!height || !width)
return;
- if (sy < vc->vc_top && vc->vc_top == logo_lines)
+ if (sy < vc->vc_top && vc->vc_top == logo_lines) {
vc->vc_top = 0;
+ /*
+ * If the font dimensions are not an integral of the display
+ * dimensions then the ops->clear below won't end up clearing
+ * the margins. Call clear_margins here in case the logo
+ * bitmap stretched into the margin area.
+ */
+ fbcon_clear_margins(vc, 0);
+ }
/* Split blits that cross physical y_wrap boundary */
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 6d1596629040..b05afd03729e 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -327,9 +327,16 @@ out_unmap:
static void newport_init(struct vc_data *vc, int init)
{
- vc->vc_cols = newport_xsize / 8;
- vc->vc_rows = newport_ysize / 16;
+ int cols, rows;
+
+ cols = newport_xsize / 8;
+ rows = newport_ysize / 16;
vc->vc_can_do_color = 1;
+ if (init) {
+ vc->vc_cols = cols;
+ vc->vc_rows = rows;
+ } else
+ vc_resize(vc, cols, rows);
}
static void newport_deinit(struct vc_data *c)
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index 25f835bf3d72..46dd8f5d2e9e 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -35,8 +35,7 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
dsize = s_pitch * cursor->image.height;
if (dsize + sizeof(struct fb_image) != ops->cursor_size) {
- if (ops->cursor_src != NULL)
- kfree(ops->cursor_src);
+ kfree(ops->cursor_src);
ops->cursor_size = dsize + sizeof(struct fb_image);
ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 39571f9e0162..35687fd56456 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -238,8 +238,7 @@ static void sti_flush(unsigned long start, unsigned long end)
flush_icache_range(start, end);
}
-static void __devinit sti_rom_copy(unsigned long base, unsigned long count,
- void *dest)
+static void sti_rom_copy(unsigned long base, unsigned long count, void *dest)
{
unsigned long dest_start = (unsigned long) dest;
@@ -266,7 +265,7 @@ static void __devinit sti_rom_copy(unsigned long base, unsigned long count,
static char default_sti_path[21] __read_mostly;
#ifndef MODULE
-static int __devinit sti_setup(char *str)
+static int sti_setup(char *str)
{
if (str)
strlcpy (default_sti_path, str, sizeof (default_sti_path));
@@ -285,12 +284,12 @@ __setup("sti=", sti_setup);
-static char __devinitdata *font_name[MAX_STI_ROMS] = { "VGA8x16", };
-static int __devinitdata font_index[MAX_STI_ROMS],
- font_height[MAX_STI_ROMS],
- font_width[MAX_STI_ROMS];
+static char *font_name[MAX_STI_ROMS] = { "VGA8x16", };
+static int font_index[MAX_STI_ROMS],
+ font_height[MAX_STI_ROMS],
+ font_width[MAX_STI_ROMS];
#ifndef MODULE
-static int __devinit sti_font_setup(char *str)
+static int sti_font_setup(char *str)
{
char *x;
int i = 0;
@@ -343,8 +342,8 @@ __setup("sti_font=", sti_font_setup);
-static void __devinit
-sti_dump_globcfg(struct sti_glob_cfg *glob_cfg, unsigned int sti_mem_request)
+static void sti_dump_globcfg(struct sti_glob_cfg *glob_cfg,
+ unsigned int sti_mem_request)
{
struct sti_glob_cfg_ext *cfg;
@@ -383,8 +382,7 @@ sti_dump_globcfg(struct sti_glob_cfg *glob_cfg, unsigned int sti_mem_request)
cfg->sti_mem_addr, sti_mem_request));
}
-static void __devinit
-sti_dump_outptr(struct sti_struct *sti)
+static void sti_dump_outptr(struct sti_struct *sti)
{
DPRINTK((KERN_INFO
"%d bits per pixel\n"
@@ -397,9 +395,8 @@ sti_dump_outptr(struct sti_struct *sti)
sti->outptr.attributes));
}
-static int __devinit
-sti_init_glob_cfg(struct sti_struct *sti,
- unsigned long rom_address, unsigned long hpa)
+static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
+ unsigned long hpa)
{
struct sti_glob_cfg *glob_cfg;
struct sti_glob_cfg_ext *glob_cfg_ext;
@@ -479,8 +476,8 @@ sti_init_glob_cfg(struct sti_struct *sti,
}
#ifdef CONFIG_FB
-static struct sti_cooked_font __devinit
-*sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
+static struct sti_cooked_font *
+sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
const struct font_desc *fbfont;
unsigned int size, bpc;
@@ -535,16 +532,15 @@ static struct sti_cooked_font __devinit
return cooked_font;
}
#else
-static struct sti_cooked_font __devinit
-*sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
+static struct sti_cooked_font *
+sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
return NULL;
}
#endif
-static struct sti_cooked_font __devinit
-*sti_select_font(struct sti_cooked_rom *rom,
- int (*search_font_fnc)(struct sti_cooked_rom *, int, int))
+static struct sti_cooked_font *sti_select_font(struct sti_cooked_rom *rom,
+ int (*search_font_fnc)(struct sti_cooked_rom *, int, int))
{
struct sti_cooked_font *font;
int i;
@@ -569,8 +565,7 @@ static struct sti_cooked_font __devinit
}
-static void __devinit
-sti_dump_rom(struct sti_rom *rom)
+static void sti_dump_rom(struct sti_rom *rom)
{
printk(KERN_INFO " id %04x-%04x, conforms to spec rev. %d.%02x\n",
rom->graphics_id[0],
@@ -587,9 +582,8 @@ sti_dump_rom(struct sti_rom *rom)
}
-static int __devinit
-sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
- struct sti_rom *raw_rom)
+static int sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
+ struct sti_rom *raw_rom)
{
struct sti_rom_font *raw_font, *font_start;
struct sti_cooked_font *cooked_font;
@@ -622,8 +616,7 @@ sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
}
-static int __devinit
-sti_search_font(struct sti_cooked_rom *rom, int height, int width)
+static int sti_search_font(struct sti_cooked_rom *rom, int height, int width)
{
struct sti_cooked_font *font;
int i = 0;
@@ -639,8 +632,7 @@ sti_search_font(struct sti_cooked_rom *rom, int height, int width)
#define BMODE_RELOCATE(offset) offset = (offset) / 4;
#define BMODE_LAST_ADDR_OFFS 0x50
-static void * __devinit
-sti_bmode_font_raw(struct sti_cooked_font *f)
+static void *sti_bmode_font_raw(struct sti_cooked_font *f)
{
unsigned char *n, *p, *q;
int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
@@ -657,8 +649,8 @@ sti_bmode_font_raw(struct sti_cooked_font *f)
return n + 3;
}
-static void __devinit
-sti_bmode_rom_copy(unsigned long base, unsigned long count, void *dest)
+static void sti_bmode_rom_copy(unsigned long base, unsigned long count,
+ void *dest)
{
unsigned long dest_start = (unsigned long) dest;
@@ -672,8 +664,7 @@ sti_bmode_rom_copy(unsigned long base, unsigned long count, void *dest)
sti_flush(dest_start, (unsigned long)dest);
}
-static struct sti_rom * __devinit
-sti_get_bmode_rom (unsigned long address)
+static struct sti_rom *sti_get_bmode_rom (unsigned long address)
{
struct sti_rom *raw;
u32 size;
@@ -708,7 +699,7 @@ sti_get_bmode_rom (unsigned long address)
return raw;
}
-static struct sti_rom __devinit *sti_get_wmode_rom(unsigned long address)
+static struct sti_rom *sti_get_wmode_rom(unsigned long address)
{
struct sti_rom *raw;
unsigned long size;
@@ -723,8 +714,8 @@ static struct sti_rom __devinit *sti_get_wmode_rom(unsigned long address)
return raw;
}
-static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti,
- unsigned long address)
+static int sti_read_rom(int wordmode, struct sti_struct *sti,
+ unsigned long address)
{
struct sti_cooked_rom *cooked;
struct sti_rom *raw = NULL;
@@ -806,8 +797,9 @@ out_err:
return 0;
}
-static struct sti_struct * __devinit
-sti_try_rom_generic(unsigned long address, unsigned long hpa, struct pci_dev *pd)
+static struct sti_struct *sti_try_rom_generic(unsigned long address,
+ unsigned long hpa,
+ struct pci_dev *pd)
{
struct sti_struct *sti;
int ok;
@@ -921,7 +913,7 @@ out_err:
return NULL;
}
-static void __devinit sticore_check_for_default_sti(struct sti_struct *sti, char *path)
+static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
{
if (strcmp (path, default_sti_path) == 0)
default_sti = sti;
@@ -932,7 +924,7 @@ static void __devinit sticore_check_for_default_sti(struct sti_struct *sti, char
* in the additional address field addr[1] while on
* older Systems the PDC stores it in page0->proc_sti
*/
-static int __devinit sticore_pa_init(struct parisc_device *dev)
+static int sticore_pa_init(struct parisc_device *dev)
{
char pa_path[21];
struct sti_struct *sti = NULL;
@@ -953,8 +945,7 @@ static int __devinit sticore_pa_init(struct parisc_device *dev)
}
-static int __devinit sticore_pci_init(struct pci_dev *pd,
- const struct pci_device_id *ent)
+static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
{
#ifdef CONFIG_PCI
unsigned long fb_base, rom_base;
@@ -1001,7 +992,7 @@ static int __devinit sticore_pci_init(struct pci_dev *pd,
}
-static void __devexit sticore_pci_remove(struct pci_dev *pd)
+static void sticore_pci_remove(struct pci_dev *pd)
{
BUG();
}
@@ -1043,7 +1034,7 @@ static struct parisc_driver pa_sti_driver = {
static int sticore_initialized __read_mostly;
-static void __devinit sti_init_roms(void)
+static void sti_init_roms(void)
{
if (sticore_initialized)
return;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index e40125cb313e..57886787ead0 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1230,7 +1230,7 @@ static int cyber2000fb_ddc_getsda(void *data)
return retval;
}
-static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
+static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
@@ -1305,7 +1305,7 @@ static int cyber2000fb_i2c_getscl(void *data)
return ret;
}
-static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb)
+static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
@@ -1336,7 +1336,7 @@ static void cyber2000fb_i2c_unregister(struct cfb_info *cfb)
* These parameters give
* 640x480, hsync 31.5kHz, vsync 60Hz
*/
-static struct fb_videomode __devinitdata cyber2000fb_default_mode = {
+static struct fb_videomode cyber2000fb_default_mode = {
.refresh = 60,
.xres = 640,
.yres = 480,
@@ -1404,8 +1404,7 @@ static void cyberpro_init_hw(struct cfb_info *cfb)
}
}
-static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id,
- char *name)
+static struct cfb_info *cyberpro_alloc_fb_info(unsigned int id, char *name)
{
struct cfb_info *cfb;
@@ -1524,7 +1523,7 @@ static int cyber2000fb_setup(char *options)
* - memory mapped access to the registers
* - initialised mem_ctl1 and mem_ctl2 appropriately.
*/
-static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
+static int cyberpro_common_probe(struct cfb_info *cfb)
{
u_long smem_size;
u_int h_sync, v_sync;
@@ -1615,7 +1614,7 @@ failed:
return err;
}
-static void __devexit cyberpro_common_remove(struct cfb_info *cfb)
+static void cyberpro_common_remove(struct cfb_info *cfb)
{
unregister_framebuffer(&cfb->fb);
#ifdef CONFIG_FB_CYBER2000_DDC
@@ -1646,7 +1645,7 @@ static void cyberpro_common_resume(struct cfb_info *cfb)
#include <mach/framebuffer.h>
-static int __devinit cyberpro_vl_probe(void)
+static int cyberpro_vl_probe(void)
{
struct cfb_info *cfb;
int err = -ENOMEM;
@@ -1780,8 +1779,8 @@ static int cyberpro_pci_enable_mmio(struct cfb_info *cfb)
return 0;
}
-static int __devinit
-cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int cyberpro_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct cfb_info *cfb;
char name[16];
@@ -1863,7 +1862,7 @@ failed_release:
return err;
}
-static void __devexit cyberpro_pci_remove(struct pci_dev *dev)
+static void cyberpro_pci_remove(struct pci_dev *dev)
{
struct cfb_info *cfb = pci_get_drvdata(dev);
@@ -1923,7 +1922,7 @@ MODULE_DEVICE_TABLE(pci, cyberpro_pci_table);
static struct pci_driver cyberpro_driver = {
.name = "CyberPro",
.probe = cyberpro_pci_probe,
- .remove = __devexit_p(cyberpro_pci_remove),
+ .remove = cyberpro_pci_remove,
.suspend = cyberpro_pci_suspend,
.resume = cyberpro_pci_resume,
.id_table = cyberpro_pci_table
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 80665f66ac1a..0810939936f4 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -185,7 +185,7 @@ struct da8xx_fb_par {
};
/* Variable Screen Information */
-static struct fb_var_screeninfo da8xx_fb_var __devinitdata = {
+static struct fb_var_screeninfo da8xx_fb_var = {
.xoffset = 0,
.yoffset = 0,
.transp = {0, 0, 0},
@@ -202,7 +202,7 @@ static struct fb_var_screeninfo da8xx_fb_var __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED
};
-static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
+static struct fb_fix_screeninfo da8xx_fb_fix = {
.id = "DA8xx FB Drv",
.type = FB_TYPE_PACKED_PIXELS,
.type_aux = 0,
@@ -213,62 +213,51 @@ static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
.accel = FB_ACCEL_NONE
};
-struct da8xx_panel {
- const char name[25]; /* Full name <vendor>_<model> */
- unsigned short width;
- unsigned short height;
- int hfp; /* Horizontal front porch */
- int hbp; /* Horizontal back porch */
- int hsw; /* Horizontal Sync Pulse Width */
- int vfp; /* Vertical front porch */
- int vbp; /* Vertical back porch */
- int vsw; /* Vertical Sync Pulse Width */
- unsigned int pxl_clk; /* Pixel clock */
- unsigned char invert_pxl_clk; /* Invert Pixel clock */
-};
-
-static struct da8xx_panel known_lcd_panels[] = {
+static struct fb_videomode known_lcd_panels[] = {
/* Sharp LCD035Q3DG01 */
[0] = {
- .name = "Sharp_LCD035Q3DG01",
- .width = 320,
- .height = 240,
- .hfp = 8,
- .hbp = 6,
- .hsw = 0,
- .vfp = 2,
- .vbp = 2,
- .vsw = 0,
- .pxl_clk = 4608000,
- .invert_pxl_clk = 1,
+ .name = "Sharp_LCD035Q3DG01",
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 4608000,
+ .left_margin = 6,
+ .right_margin = 8,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 0,
+ .vsync_len = 0,
+ .sync = FB_SYNC_CLK_INVERT |
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
/* Sharp LK043T1DG01 */
[1] = {
- .name = "Sharp_LK043T1DG01",
- .width = 480,
- .height = 272,
- .hfp = 2,
- .hbp = 2,
- .hsw = 41,
- .vfp = 2,
- .vbp = 2,
- .vsw = 10,
- .pxl_clk = 7833600,
- .invert_pxl_clk = 0,
+ .name = "Sharp_LK043T1DG01",
+ .xres = 480,
+ .yres = 272,
+ .pixclock = 7833600,
+ .left_margin = 2,
+ .right_margin = 2,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 41,
+ .vsync_len = 10,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = 0,
},
[2] = {
/* Hitachi SP10Q010 */
- .name = "SP10Q010",
- .width = 320,
- .height = 240,
- .hfp = 10,
- .hbp = 10,
- .hsw = 10,
- .vfp = 10,
- .vbp = 10,
- .vsw = 10,
- .pxl_clk = 7833600,
- .invert_pxl_clk = 0,
+ .name = "SP10Q010",
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 7833600,
+ .left_margin = 10,
+ .right_margin = 10,
+ .upper_margin = 10,
+ .lower_margin = 10,
+ .hsync_len = 10,
+ .vsync_len = 10,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = 0,
},
};
@@ -399,10 +388,9 @@ static int lcd_cfg_dma(int burst_size, int fifo_th)
reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_8);
break;
case 16:
+ default:
reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_16);
break;
- default:
- return -EINVAL;
}
reg |= (fifo_th << 8);
@@ -447,7 +435,8 @@ static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
}
-static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
+static int lcd_cfg_display(const struct lcd_ctrl_config *cfg,
+ struct fb_videomode *panel)
{
u32 reg;
u32 reg_int;
@@ -456,7 +445,7 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
LCD_MONO_8BIT_MODE |
LCD_MONOCHROME_MODE);
- switch (cfg->p_disp_panel->panel_shade) {
+ switch (cfg->panel_shade) {
case MONOCHROME:
reg |= LCD_MONOCHROME_MODE;
if (cfg->mono_8bit_mode)
@@ -469,7 +458,9 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
break;
case COLOR_PASSIVE:
- if (cfg->stn_565_mode)
+ /* AC bias applicable only for Pasive panels */
+ lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
+ if (cfg->bpp == 12 && cfg->stn_565_mode)
reg |= LCD_STN_565_ENABLE;
break;
@@ -490,22 +481,19 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
- if (cfg->sync_ctrl)
- reg |= LCD_SYNC_CTRL;
- else
- reg &= ~LCD_SYNC_CTRL;
+ reg |= LCD_SYNC_CTRL;
if (cfg->sync_edge)
reg |= LCD_SYNC_EDGE;
else
reg &= ~LCD_SYNC_EDGE;
- if (cfg->invert_line_clock)
+ if (panel->sync & FB_SYNC_HOR_HIGH_ACT)
reg |= LCD_INVERT_LINE_CLOCK;
else
reg &= ~LCD_INVERT_LINE_CLOCK;
- if (cfg->invert_frm_clock)
+ if (panel->sync & FB_SYNC_VERT_HIGH_ACT)
reg |= LCD_INVERT_FRAME_CLOCK;
else
reg &= ~LCD_INVERT_FRAME_CLOCK;
@@ -728,7 +716,7 @@ static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
}
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
- struct da8xx_panel *panel)
+ struct fb_videomode *panel)
{
u32 bpp;
int ret = 0;
@@ -738,7 +726,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
/* Calculate the divider */
lcd_calc_clk_divider(par);
- if (panel->invert_pxl_clk)
+ if (panel->sync & FB_SYNC_CLK_INVERT)
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
else
@@ -750,30 +738,23 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
if (ret < 0)
return ret;
- /* Configure the AC bias properties. */
- lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
-
/* Configure the vertical and horizontal sync properties. */
- lcd_cfg_vertical_sync(panel->vbp, panel->vsw, panel->vfp);
- lcd_cfg_horizontal_sync(panel->hbp, panel->hsw, panel->hfp);
+ lcd_cfg_vertical_sync(panel->lower_margin, panel->vsync_len,
+ panel->upper_margin);
+ lcd_cfg_horizontal_sync(panel->right_margin, panel->hsync_len,
+ panel->left_margin);
/* Configure for disply */
- ret = lcd_cfg_display(cfg);
+ ret = lcd_cfg_display(cfg, panel);
if (ret < 0)
return ret;
- if (QVGA != cfg->p_disp_panel->panel_type)
- return -EINVAL;
+ bpp = cfg->bpp;
- if (cfg->bpp <= cfg->p_disp_panel->max_bpp &&
- cfg->bpp >= cfg->p_disp_panel->min_bpp)
- bpp = cfg->bpp;
- else
- bpp = cfg->p_disp_panel->max_bpp;
if (bpp == 12)
bpp = 16;
- ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->width,
- (unsigned int)panel->height, bpp,
+ ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->xres,
+ (unsigned int)panel->yres, bpp,
cfg->raster_order);
if (ret < 0)
return ret;
@@ -1012,7 +993,7 @@ static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
}
#endif
-static int __devexit fb_remove(struct platform_device *dev)
+static int fb_remove(struct platform_device *dev)
{
struct fb_info *info = dev_get_drvdata(&dev->dev);
@@ -1230,12 +1211,12 @@ static unsigned int da8xxfb_pixel_clk_period(struct da8xx_fb_par *par)
return pix_clk_period_picosec;
}
-static int __devinit fb_probe(struct platform_device *device)
+static int fb_probe(struct platform_device *device)
{
struct da8xx_lcdc_platform_data *fb_pdata =
device->dev.platform_data;
struct lcd_ctrl_config *lcd_cfg;
- struct da8xx_panel *lcdc_info;
+ struct fb_videomode *lcdc_info;
struct fb_info *da8xx_fb_info;
struct clk *fb_clk = NULL;
struct da8xx_fb_par *par;
@@ -1267,7 +1248,7 @@ static int __devinit fb_probe(struct platform_device *device)
goto err_request_mem;
}
- fb_clk = clk_get(&device->dev, NULL);
+ fb_clk = clk_get(&device->dev, "fck");
if (IS_ERR(fb_clk)) {
dev_err(&device->dev, "Can not get device clock\n");
ret = -ENODEV;
@@ -1283,6 +1264,7 @@ static int __devinit fb_probe(struct platform_device *device)
lcd_revision = LCD_VERSION_1;
break;
case 0x4F200800:
+ case 0x4F201000:
lcd_revision = LCD_VERSION_2;
break;
default:
@@ -1323,7 +1305,7 @@ static int __devinit fb_probe(struct platform_device *device)
#ifdef CONFIG_CPU_FREQ
par->lcd_fck_rate = clk_get_rate(fb_clk);
#endif
- par->pxl_clk = lcdc_info->pxl_clk;
+ par->pxl_clk = lcdc_info->pixclock;
if (fb_pdata->panel_power_ctrl) {
par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
par->panel_power_ctrl(1);
@@ -1336,8 +1318,8 @@ static int __devinit fb_probe(struct platform_device *device)
}
/* allocate frame buffer */
- par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
- ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE);
+ par->vram_size = lcdc_info->xres * lcdc_info->yres * lcd_cfg->bpp;
+ ulcm = lcm((lcdc_info->xres * lcd_cfg->bpp)/8, PAGE_SIZE);
par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
@@ -1355,10 +1337,10 @@ static int __devinit fb_probe(struct platform_device *device)
da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
da8xx_fb_fix.smem_start = par->vram_phys;
da8xx_fb_fix.smem_len = par->vram_size;
- da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+ da8xx_fb_fix.line_length = (lcdc_info->xres * lcd_cfg->bpp) / 8;
par->dma_start = par->vram_phys;
- par->dma_end = par->dma_start + lcdc_info->height *
+ par->dma_end = par->dma_start + lcdc_info->yres *
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
@@ -1384,22 +1366,22 @@ static int __devinit fb_probe(struct platform_device *device)
/* Initialize par */
da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
- da8xx_fb_var.xres = lcdc_info->width;
- da8xx_fb_var.xres_virtual = lcdc_info->width;
+ da8xx_fb_var.xres = lcdc_info->xres;
+ da8xx_fb_var.xres_virtual = lcdc_info->xres;
- da8xx_fb_var.yres = lcdc_info->height;
- da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS;
+ da8xx_fb_var.yres = lcdc_info->yres;
+ da8xx_fb_var.yres_virtual = lcdc_info->yres * LCD_NUM_BUFFERS;
da8xx_fb_var.grayscale =
- lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
+ lcd_cfg->panel_shade == MONOCHROME ? 1 : 0;
da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
- da8xx_fb_var.hsync_len = lcdc_info->hsw;
- da8xx_fb_var.vsync_len = lcdc_info->vsw;
- da8xx_fb_var.right_margin = lcdc_info->hfp;
- da8xx_fb_var.left_margin = lcdc_info->hbp;
- da8xx_fb_var.lower_margin = lcdc_info->vfp;
- da8xx_fb_var.upper_margin = lcdc_info->vbp;
+ da8xx_fb_var.hsync_len = lcdc_info->hsync_len;
+ da8xx_fb_var.vsync_len = lcdc_info->vsync_len;
+ da8xx_fb_var.right_margin = lcdc_info->right_margin;
+ da8xx_fb_var.left_margin = lcdc_info->left_margin;
+ da8xx_fb_var.lower_margin = lcdc_info->lower_margin;
+ da8xx_fb_var.upper_margin = lcdc_info->upper_margin;
da8xx_fb_var.pixclock = da8xxfb_pixel_clk_period(par);
/* Initialize fbinfo */
@@ -1598,7 +1580,7 @@ static int fb_resume(struct platform_device *dev)
static struct platform_driver da8xx_fb_driver = {
.probe = fb_probe,
- .remove = __devexit_p(fb_remove),
+ .remove = fb_remove,
.suspend = fb_suspend,
.resume = fb_resume,
.driver = {
diff --git a/drivers/video/dnfb.c b/drivers/video/dnfb.c
index 49e3dda1a361..3526899da61b 100644
--- a/drivers/video/dnfb.c
+++ b/drivers/video/dnfb.c
@@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-struct fb_var_screeninfo dnfb_var __devinitdata = {
+struct fb_var_screeninfo dnfb_var = {
.xres = 1280,
.yres = 1024,
.xres_virtual = 2048,
@@ -126,7 +126,7 @@ struct fb_var_screeninfo dnfb_var __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo dnfb_fix __devinitdata = {
+static struct fb_fix_screeninfo dnfb_fix = {
.id = "Apollo Mono",
.smem_start = (FRAME_BUFFER_START + IO_BASE),
.smem_len = FRAME_BUFFER_LEN,
@@ -224,7 +224,7 @@ void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
* Initialization
*/
-static int __devinit dnfb_probe(struct platform_device *dev)
+static int dnfb_probe(struct platform_device *dev)
{
struct fb_info *info;
int err = 0;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 932abaa58a89..50fe668c6172 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -20,7 +20,7 @@ static bool request_mem_succeeded = false;
static struct pci_dev *default_vga;
-static struct fb_var_screeninfo efifb_defined __devinitdata = {
+static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
@@ -31,7 +31,7 @@ static struct fb_var_screeninfo efifb_defined __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo efifb_fix __devinitdata = {
+static struct fb_fix_screeninfo efifb_fix = {
.id = "EFI VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 755ef3e65caf..3f2519d30715 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -484,7 +484,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
info->screen_base, info->fix.smem_start);
}
-static int __devinit ep93xxfb_probe(struct platform_device *pdev)
+static int ep93xxfb_probe(struct platform_device *pdev)
{
struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
struct fb_info *info;
@@ -599,7 +599,7 @@ failed_cmap:
return err;
}
-static int __devexit ep93xxfb_remove(struct platform_device *pdev)
+static int ep93xxfb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct ep93xx_fbi *fbi = info->par;
@@ -620,14 +620,14 @@ static int __devexit ep93xxfb_remove(struct platform_device *pdev)
static struct platform_driver ep93xxfb_driver = {
.probe = ep93xxfb_probe,
- .remove = __devexit_p(ep93xxfb_remove),
+ .remove = ep93xxfb_remove,
.driver = {
.name = "ep93xx-fb",
.owner = THIS_MODULE,
},
};
-static int __devinit ep93xxfb_init(void)
+static int ep93xxfb_init(void)
{
return platform_driver_register(&ep93xxfb_driver);
}
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index d55470e75412..de9d4da0e3da 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <video/exynos_dp.h>
@@ -48,10 +49,6 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
{
int timeout_loop = 0;
- exynos_dp_init_hpd(dp);
-
- usleep_range(200, 210);
-
while (exynos_dp_get_plug_in_status(dp) != 0) {
timeout_loop++;
if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
@@ -90,9 +87,11 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
*/
/* Read Extension Flag, Number of 128-byte EDID extension blocks */
- exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+ retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
EDID_EXTENSION_FLAG,
&extend_block);
+ if (retval)
+ return retval;
if (extend_block > 0) {
dev_dbg(dp->dev, "EDID data includes a single extension!\n");
@@ -181,14 +180,15 @@ static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
int retval;
/* Read DPCD DPCD_ADDR_DPCD_REV~RECEIVE_PORT1_CAP_1 */
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_DPCD_REV,
- 12, buf);
+ retval = exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_DPCD_REV,
+ 12, buf);
+ if (retval)
+ return retval;
/* Read EDID */
for (i = 0; i < 3; i++) {
retval = exynos_dp_read_edid(dp);
- if (retval == 0)
+ if (!retval)
break;
}
@@ -261,11 +261,10 @@ static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
}
}
-static void exynos_dp_link_start(struct exynos_dp_device *dp)
+static int exynos_dp_link_start(struct exynos_dp_device *dp)
{
u8 buf[4];
- int lane;
- int lane_count;
+ int lane, lane_count, pll_tries, retval;
lane_count = dp->link_train.lane_count;
@@ -275,10 +274,6 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
for (lane = 0; lane < lane_count; lane++)
dp->link_train.cr_loop[lane] = 0;
- /* Set sink to D0 (Sink Not Ready) mode. */
- exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_SINK_POWER_STATE,
- DPCD_SET_POWER_STATE_D0);
-
/* Set link rate and count as you want to establish*/
exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
exynos_dp_set_lane_count(dp, dp->link_train.lane_count);
@@ -286,29 +281,46 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
/* Setup RX configuration */
buf[0] = dp->link_train.link_rate;
buf[1] = dp->link_train.lane_count;
- exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
2, buf);
+ if (retval)
+ return retval;
/* Set TX pre-emphasis to minimum */
for (lane = 0; lane < lane_count; lane++)
exynos_dp_set_lane_lane_pre_emphasis(dp,
PRE_EMPHASIS_LEVEL_0, lane);
+ /* Wait for PLL lock */
+ pll_tries = 0;
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Wait for PLL lock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ pll_tries++;
+ usleep_range(90, 120);
+ }
+
/* Set training pattern 1 */
exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
/* Set RX training pattern */
- exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED |
- DPCD_TRAINING_PATTERN_1);
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_PATTERN_SET,
+ DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_1);
+ if (retval)
+ return retval;
for (lane = 0; lane < lane_count; lane++)
buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0;
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count, buf);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ lane_count, buf);
+
+ return retval;
}
static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
@@ -332,18 +344,17 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
return 0;
}
-static int exynos_dp_channel_eq_ok(u8 link_align[3], int lane_count)
+static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
+ int lane_count)
{
int lane;
- u8 lane_align;
u8 lane_status;
- lane_align = link_align[2];
- if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
+ if ((link_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
return -EINVAL;
for (lane = 0; lane < lane_count; lane++) {
- lane_status = exynos_dp_get_lane_status(link_align, lane);
+ lane_status = exynos_dp_get_lane_status(link_status, lane);
lane_status &= DPCD_CHANNEL_EQ_BITS;
if (lane_status != DPCD_CHANNEL_EQ_BITS)
return -EINVAL;
@@ -427,60 +438,60 @@ static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
dp->link_train.lt_state = FAILED;
}
-static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
+static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
+ u8 adjust_request[2])
{
- u8 link_status[2];
- int lane;
- int lane_count;
+ int lane, lane_count;
+ u8 voltage_swing, pre_emphasis, training_lane;
- u8 adjust_request[2];
- u8 voltage_swing;
- u8 pre_emphasis;
- u8 training_lane;
+ lane_count = dp->link_train.lane_count;
+ for (lane = 0; lane < lane_count; lane++) {
+ voltage_swing = exynos_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+ training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
+ DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+
+ if (voltage_swing == VOLTAGE_LEVEL_3)
+ training_lane |= DPCD_MAX_SWING_REACHED;
+ if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
+ training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+
+ dp->link_train.training_lane[lane] = training_lane;
+ }
+}
+
+static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u8 voltage_swing, pre_emphasis, training_lane;
+ u8 link_status[2], adjust_request[2];
usleep_range(100, 101);
lane_count = dp->link_train.lane_count;
- exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
- 2, link_status);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
/* set training pattern 2 for EQ */
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
- for (lane = 0; lane < lane_count; lane++) {
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
-
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
-
- dp->link_train.training_lane[lane] = training_lane;
-
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane],
- lane);
- }
-
- exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED |
- DPCD_TRAINING_PATTERN_2);
-
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
- dp->link_train.training_lane);
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_PATTERN_SET,
+ DPCD_SCRAMBLING_DISABLED |
+ DPCD_TRAINING_PATTERN_2);
+ if (retval)
+ return retval;
dev_info(dp->dev, "Link Training Clock Recovery success\n");
dp->link_train.lt_state = EQUALIZER_TRAINING;
@@ -488,152 +499,116 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
for (lane = 0; lane < lane_count; lane++) {
training_lane = exynos_dp_get_lane_link_training(
dp, lane);
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
voltage_swing = exynos_dp_get_adjust_request_voltage(
adjust_request, lane);
pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
adjust_request, lane);
- if (voltage_swing == VOLTAGE_LEVEL_3 ||
- pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
- dev_err(dp->dev, "voltage or pre emphasis reached max level\n");
- goto reduce_link_rate;
- }
-
- if ((DPCD_VOLTAGE_SWING_GET(training_lane) ==
- voltage_swing) &&
- (DPCD_PRE_EMPHASIS_GET(training_lane) ==
- pre_emphasis)) {
+ if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
+ voltage_swing &&
+ DPCD_PRE_EMPHASIS_GET(training_lane) ==
+ pre_emphasis)
dp->link_train.cr_loop[lane]++;
- if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP) {
- dev_err(dp->dev, "CR Max loop\n");
- goto reduce_link_rate;
- }
- }
-
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+ if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
+ voltage_swing == VOLTAGE_LEVEL_3 ||
+ pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
+ dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
+ dp->link_train.cr_loop[lane],
+ voltage_swing, pre_emphasis);
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+ }
- dp->link_train.training_lane[lane] = training_lane;
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane], lane);
- }
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
+ retval = exynos_dp_write_bytes_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_LANE0_SET, lane_count,
dp->link_train.training_lane);
- }
-
- return 0;
+ if (retval)
+ return retval;
-reduce_link_rate:
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
+ return retval;
}
static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
{
- u8 link_status[2];
- u8 link_align[3];
- int lane;
- int lane_count;
+ int lane, lane_count, retval;
u32 reg;
-
- u8 adjust_request[2];
- u8 voltage_swing;
- u8 pre_emphasis;
- u8 training_lane;
+ u8 link_align, link_status[2], adjust_request[2];
usleep_range(400, 401);
lane_count = dp->link_train.lane_count;
- exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
- 2, link_status);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
- if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
- link_align[0] = link_status[0];
- link_align[1] = link_status[1];
+ if (exynos_dp_clock_recovery_ok(link_status, lane_count)) {
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
- exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED,
- &link_align[2]);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
- for (lane = 0; lane < lane_count; lane++) {
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+ retval = exynos_dp_read_byte_from_dpcd(dp,
+ DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED, &link_align);
+ if (retval)
+ return retval;
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
- dp->link_train.training_lane[lane] = training_lane;
- }
+ if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) {
+ /* traing pattern Set to Normal */
+ exynos_dp_training_pattern_dis(dp);
- if (exynos_dp_channel_eq_ok(link_align, lane_count) == 0) {
- /* traing pattern Set to Normal */
- exynos_dp_training_pattern_dis(dp);
+ dev_info(dp->dev, "Link Training success!\n");
- dev_info(dp->dev, "Link Training success!\n");
-
- exynos_dp_get_link_bandwidth(dp, &reg);
- dp->link_train.link_rate = reg;
- dev_dbg(dp->dev, "final bandwidth = %.2x\n",
- dp->link_train.link_rate);
+ exynos_dp_get_link_bandwidth(dp, &reg);
+ dp->link_train.link_rate = reg;
+ dev_dbg(dp->dev, "final bandwidth = %.2x\n",
+ dp->link_train.link_rate);
- exynos_dp_get_lane_count(dp, &reg);
- dp->link_train.lane_count = reg;
- dev_dbg(dp->dev, "final lane count = %.2x\n",
- dp->link_train.lane_count);
+ exynos_dp_get_lane_count(dp, &reg);
+ dp->link_train.lane_count = reg;
+ dev_dbg(dp->dev, "final lane count = %.2x\n",
+ dp->link_train.lane_count);
- /* set enhanced mode if available */
- exynos_dp_set_enhanced_mode(dp);
- dp->link_train.lt_state = FINISHED;
- } else {
- /* not all locked */
- dp->link_train.eq_loop++;
+ /* set enhanced mode if available */
+ exynos_dp_set_enhanced_mode(dp);
+ dp->link_train.lt_state = FINISHED;
- if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
- dev_err(dp->dev, "EQ Max loop\n");
- goto reduce_link_rate;
- }
+ return 0;
+ }
- for (lane = 0; lane < lane_count; lane++)
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane],
- lane);
+ /* not all locked */
+ dp->link_train.eq_loop++;
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
- dp->link_train.training_lane);
- }
- } else {
- goto reduce_link_rate;
+ if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
+ dev_err(dp->dev, "EQ Max loop\n");
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
}
- return 0;
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ lane_count, dp->link_train.training_lane);
-reduce_link_rate:
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
+ return retval;
}
static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
@@ -701,16 +676,17 @@ static void exynos_dp_init_training(struct exynos_dp_device *dp,
static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
{
- int retval = 0;
- int training_finished = 0;
+ int retval = 0, training_finished = 0;
dp->link_train.lt_state = START;
/* Process here */
- while (!training_finished) {
+ while (!retval && !training_finished) {
switch (dp->link_train.lt_state) {
case START:
- exynos_dp_link_start(dp);
+ retval = exynos_dp_link_start(dp);
+ if (retval)
+ dev_err(dp->dev, "LT link start failed!\n");
break;
case CLOCK_RECOVERY:
retval = exynos_dp_process_clock_recovery(dp);
@@ -729,6 +705,8 @@ static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
return -EREMOTEIO;
}
}
+ if (retval)
+ dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
return retval;
}
@@ -752,19 +730,15 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
return retval;
}
-static int exynos_dp_config_video(struct exynos_dp_device *dp,
- struct video_info *video_info)
+static int exynos_dp_config_video(struct exynos_dp_device *dp)
{
int retval = 0;
int timeout_loop = 0;
int done_count = 0;
- exynos_dp_config_video_slave_mode(dp, video_info);
+ exynos_dp_config_video_slave_mode(dp);
- exynos_dp_set_video_color_format(dp, video_info->color_depth,
- video_info->color_space,
- video_info->dynamic_range,
- video_info->ycbcr_coeff);
+ exynos_dp_set_video_color_format(dp);
if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
dev_err(dp->dev, "PLL is not locked yet.\n");
@@ -852,24 +826,228 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
{
struct exynos_dp_device *dp = arg;
- dev_err(dp->dev, "exynos_dp_irq_handler\n");
+ enum dp_irq_type irq_type;
+
+ irq_type = exynos_dp_get_irq_type(dp);
+ switch (irq_type) {
+ case DP_IRQ_TYPE_HP_CABLE_IN:
+ dev_dbg(dp->dev, "Received irq - cable in\n");
+ schedule_work(&dp->hotplug_work);
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CABLE_OUT:
+ dev_dbg(dp->dev, "Received irq - cable out\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CHANGE:
+ /*
+ * We get these change notifications once in a while, but there
+ * is nothing we can do with them. Just ignore it for now and
+ * only handle cable changes.
+ */
+ dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ default:
+ dev_err(dp->dev, "Received irq - unknown type!\n");
+ break;
+ }
return IRQ_HANDLED;
}
-static int __devinit exynos_dp_probe(struct platform_device *pdev)
+static void exynos_dp_hotplug(struct work_struct *work)
{
- struct resource *res;
struct exynos_dp_device *dp;
- struct exynos_dp_platdata *pdata;
+ int ret;
+
+ dp = container_of(work, struct exynos_dp_device, hotplug_work);
+
+ ret = exynos_dp_detect_hpd(dp);
+ if (ret) {
+ /* Cable has been disconnected, we're done */
+ return;
+ }
+
+ ret = exynos_dp_handle_edid(dp);
+ if (ret) {
+ dev_err(dp->dev, "unable to handle edid\n");
+ return;
+ }
+
+ ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
+ dp->video_info->link_rate);
+ if (ret) {
+ dev_err(dp->dev, "unable to do link train\n");
+ return;
+ }
+
+ exynos_dp_enable_scramble(dp, 1);
+ exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
+ exynos_dp_enable_enhanced_mode(dp, 1);
+
+ exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
+ exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
+
+ exynos_dp_init_video(dp);
+ ret = exynos_dp_config_video(dp);
+ if (ret)
+ dev_err(dp->dev, "unable to config video\n");
+}
+
+#ifdef CONFIG_OF
+static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
+{
+ struct device_node *dp_node = dev->of_node;
+ struct exynos_dp_platdata *pd;
+ struct video_info *dp_video_config;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "memory allocation for pdata failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ dp_video_config = devm_kzalloc(dev,
+ sizeof(*dp_video_config), GFP_KERNEL);
+
+ if (!dp_video_config) {
+ dev_err(dev, "memory allocation for video config failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ pd->video_info = dp_video_config;
+
+ dp_video_config->h_sync_polarity =
+ of_property_read_bool(dp_node, "hsync-active-high");
+ dp_video_config->v_sync_polarity =
+ of_property_read_bool(dp_node, "vsync-active-high");
+
+ dp_video_config->interlaced =
+ of_property_read_bool(dp_node, "interlaced");
+
+ if (of_property_read_u32(dp_node, "samsung,color-space",
+ &dp_video_config->color_space)) {
+ dev_err(dev, "failed to get color-space\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,dynamic-range",
+ &dp_video_config->dynamic_range)) {
+ dev_err(dev, "failed to get dynamic-range\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
+ &dp_video_config->ycbcr_coeff)) {
+ dev_err(dev, "failed to get ycbcr-coeff\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,color-depth",
+ &dp_video_config->color_depth)) {
+ dev_err(dev, "failed to get color-depth\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,link-rate",
+ &dp_video_config->link_rate)) {
+ dev_err(dev, "failed to get link-rate\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,lane-count",
+ &dp_video_config->lane_count)) {
+ dev_err(dev, "failed to get lane-count\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return pd;
+}
+
+static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
+{
+ struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
+ u32 phy_base;
int ret = 0;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- return -EINVAL;
+ dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
+ if (!dp_phy_node) {
+ dev_err(dp->dev, "could not find dptx-phy node\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
+ dev_err(dp->dev, "faild to get reg for dptx-phy\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
+ &dp->enable_mask)) {
+ dev_err(dp->dev, "faild to get enable-mask for dptx-phy\n");
+ ret = -EINVAL;
+ goto err;
}
+ dp->phy_addr = ioremap(phy_base, SZ_4);
+ if (!dp->phy_addr) {
+ dev_err(dp->dev, "failed to ioremap dp-phy\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+err:
+ of_node_put(dp_phy_node);
+
+ return ret;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg |= dp->enable_mask;
+ __raw_writel(reg, dp->phy_addr);
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg &= ~(dp->enable_mask);
+ __raw_writel(reg, dp->phy_addr);
+}
+#else
+static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
+{
+ return NULL;
+}
+
+static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
+{
+ return -EINVAL;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ return;
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ return;
+}
+#endif /* CONFIG_OF */
+
+static int exynos_dp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct exynos_dp_device *dp;
+ struct exynos_dp_platdata *pdata;
+
+ int ret = 0;
+
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp) {
@@ -879,6 +1057,22 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->dev = &pdev->dev;
+ if (pdev->dev.of_node) {
+ pdata = exynos_dp_dt_parse_pdata(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ ret = exynos_dp_dt_parse_phydata(dp);
+ if (ret)
+ return ret;
+ } else {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ }
+
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -889,57 +1083,34 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!dp->reg_base) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -ENOMEM;
- }
+ dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp->reg_base))
+ return PTR_ERR(dp->reg_base);
dp->irq = platform_get_irq(pdev, 0);
- if (!dp->irq) {
+ if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
- "exynos-dp", dp);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
+ INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
dp->video_info = pdata->video_info;
- if (pdata->phy_init)
- pdata->phy_init();
-
- exynos_dp_init_dp(dp);
-
- ret = exynos_dp_detect_hpd(dp);
- if (ret) {
- dev_err(&pdev->dev, "unable to detect hpd\n");
- return ret;
- }
- exynos_dp_handle_edid(dp);
-
- ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
- dp->video_info->link_rate);
- if (ret) {
- dev_err(&pdev->dev, "unable to do link train\n");
- return ret;
+ if (pdev->dev.of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_init(dp);
+ } else {
+ if (pdata->phy_init)
+ pdata->phy_init();
}
- exynos_dp_enable_scramble(dp, 1);
- exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
- exynos_dp_enable_enhanced_mode(dp, 1);
-
- exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
- exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
+ exynos_dp_init_dp(dp);
- exynos_dp_init_video(dp);
- ret = exynos_dp_config_video(dp, dp->video_info);
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+ "exynos-dp", dp);
if (ret) {
- dev_err(&pdev->dev, "unable to config video\n");
+ dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
@@ -948,28 +1119,44 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit exynos_dp_remove(struct platform_device *pdev)
+static int exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit();
+ flush_work(&dp->hotplug_work);
+
+ if (pdev->dev.of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_exit(dp);
+ } else {
+ if (pdata->phy_exit)
+ pdata->phy_exit();
+ }
clk_disable_unprepare(dp->clock);
+
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+ struct exynos_dp_platdata *pdata = dev->platform_data;
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+
+ disable_irq(dp->irq);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit();
+ flush_work(&dp->hotplug_work);
+
+ if (dev->of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_exit(dp);
+ } else {
+ if (pdata->phy_exit)
+ pdata->phy_exit();
+ }
clk_disable_unprepare(dp->clock);
@@ -978,32 +1165,22 @@ static int exynos_dp_suspend(struct device *dev)
static int exynos_dp_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+ struct exynos_dp_platdata *pdata = dev->platform_data;
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- if (pdata && pdata->phy_init)
- pdata->phy_init();
+ if (dev->of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_init(dp);
+ } else {
+ if (pdata->phy_init)
+ pdata->phy_init();
+ }
clk_prepare_enable(dp->clock);
exynos_dp_init_dp(dp);
- exynos_dp_detect_hpd(dp);
- exynos_dp_handle_edid(dp);
-
- exynos_dp_set_link_train(dp, dp->video_info->lane_count,
- dp->video_info->link_rate);
-
- exynos_dp_enable_scramble(dp, 1);
- exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
- exynos_dp_enable_enhanced_mode(dp, 1);
-
- exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
- exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
-
- exynos_dp_init_video(dp);
- exynos_dp_config_video(dp, dp->video_info);
+ enable_irq(dp->irq);
return 0;
}
@@ -1013,13 +1190,20 @@ static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
};
+static const struct of_device_id exynos_dp_match[] = {
+ { .compatible = "samsung,exynos5-dp" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
+
static struct platform_driver exynos_dp_driver = {
.probe = exynos_dp_probe,
- .remove = __devexit_p(exynos_dp_remove),
+ .remove = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
.pm = &exynos_dp_pm_ops,
+ .of_match_table = of_match_ptr(exynos_dp_match),
},
};
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 57b8a6531c0e..6c567bbf2fb8 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -13,6 +13,13 @@
#ifndef _EXYNOS_DP_CORE_H
#define _EXYNOS_DP_CORE_H
+enum dp_irq_type {
+ DP_IRQ_TYPE_HP_CABLE_IN,
+ DP_IRQ_TYPE_HP_CABLE_OUT,
+ DP_IRQ_TYPE_HP_CHANGE,
+ DP_IRQ_TYPE_UNKNOWN,
+};
+
struct link_train {
int eq_loop;
int cr_loop[4];
@@ -29,9 +36,12 @@ struct exynos_dp_device {
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
+ void __iomem *phy_addr;
+ unsigned int enable_mask;
struct video_info *video_info;
struct link_train link_train;
+ struct work_struct hotplug_work;
};
/* exynos_dp_reg.c */
@@ -50,6 +60,8 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
bool enable);
void exynos_dp_init_analog_func(struct exynos_dp_device *dp);
void exynos_dp_init_hpd(struct exynos_dp_device *dp);
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp);
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp);
void exynos_dp_reset_aux(struct exynos_dp_device *dp);
void exynos_dp_init_aux(struct exynos_dp_device *dp);
int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp);
@@ -107,11 +119,7 @@ u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
void exynos_dp_reset_macro(struct exynos_dp_device *dp);
void exynos_dp_init_video(struct exynos_dp_device *dp);
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
- u32 color_depth,
- u32 color_space,
- u32 dynamic_range,
- u32 ycbcr_coeff);
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp);
int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp);
void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
enum clock_recovery_m_value_type type,
@@ -121,8 +129,7 @@ void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type);
void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable);
void exynos_dp_start_video(struct exynos_dp_device *dp);
int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp);
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
- struct video_info *video_info);
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp);
void exynos_dp_enable_scrambling(struct exynos_dp_device *dp);
void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 3f5ca8a0d5ea..29d9d035c73a 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -19,11 +19,11 @@
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
-#define COMMON_INT_MASK_1 (0)
-#define COMMON_INT_MASK_2 (0)
-#define COMMON_INT_MASK_3 (0)
-#define COMMON_INT_MASK_4 (0)
-#define INT_STA_MASK (0)
+#define COMMON_INT_MASK_1 0
+#define COMMON_INT_MASK_2 0
+#define COMMON_INT_MASK_3 0
+#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
+#define INT_STA_MASK INT_HPD
void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
{
@@ -88,7 +88,7 @@ void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
{
/* Set interrupt pin assertion polarity as high */
- writel(INT_POL, dp->reg_base + EXYNOS_DP_INT_CTL);
+ writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
/* Clear pending regisers */
writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
@@ -324,7 +324,7 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
}
-void exynos_dp_init_hpd(struct exynos_dp_device *dp)
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
{
u32 reg;
@@ -333,12 +333,38 @@ void exynos_dp_init_hpd(struct exynos_dp_device *dp)
reg = INT_HPD;
writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
+}
+
+void exynos_dp_init_hpd(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ exynos_dp_clear_hotplug_interrupts(dp);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
reg &= ~(F_HPD | HPD_CTRL);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
}
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* Parse hotplug interrupt status register */
+ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+
+ if (reg & PLUG)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+
+ if (reg & HPD_LOST)
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+
+ if (reg & HOTPLUG_CHG)
+ return DP_IRQ_TYPE_HP_CHANGE;
+
+ return DP_IRQ_TYPE_UNKNOWN;
+}
+
void exynos_dp_reset_aux(struct exynos_dp_device *dp)
{
u32 reg;
@@ -491,7 +517,7 @@ int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
int i;
int retval;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
@@ -552,7 +578,7 @@ int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
else
cur_data_count = count - start_offset;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
@@ -617,7 +643,7 @@ int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
cur_data_count = count - start_offset;
/* AUX CH Request Transaction process */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
@@ -700,17 +726,15 @@ int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
int i;
int retval;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Select EDID device */
retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
- if (retval != 0) {
- dev_err(dp->dev, "Select EDID device fail!\n");
+ if (retval != 0)
continue;
- }
/*
* Set I2C transaction and read data
@@ -750,7 +774,7 @@ int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
int retval = 0;
for (i = 0; i < count; i += 16) {
- for (j = 0; j < 100; j++) {
+ for (j = 0; j < 3; j++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
@@ -1034,24 +1058,20 @@ void exynos_dp_init_video(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
}
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
- u32 color_depth,
- u32 color_space,
- u32 dynamic_range,
- u32 ycbcr_coeff)
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
{
u32 reg;
/* Configure the input color depth, color space, dynamic range */
- reg = (dynamic_range << IN_D_RANGE_SHIFT) |
- (color_depth << IN_BPC_SHIFT) |
- (color_space << IN_COLOR_F_SHIFT);
+ reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
+ (dp->video_info->color_depth << IN_BPC_SHIFT) |
+ (dp->video_info->color_space << IN_COLOR_F_SHIFT);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
reg &= ~IN_YC_COEFFI_MASK;
- if (ycbcr_coeff)
+ if (dp->video_info->ycbcr_coeff)
reg |= IN_YC_COEFFI_ITU709;
else
reg |= IN_YC_COEFFI_ITU601;
@@ -1178,8 +1198,7 @@ int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
return 0;
}
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
- struct video_info *video_info)
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
{
u32 reg;
@@ -1190,17 +1209,17 @@ void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~INTERACE_SCAN_CFG;
- reg |= (video_info->interlaced << 2);
+ reg |= (dp->video_info->interlaced << 2);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~VSYNC_POLARITY_CFG;
- reg |= (video_info->v_sync_polarity << 1);
+ reg |= (dp->video_info->v_sync_polarity << 1);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~HSYNC_POLARITY_CFG;
- reg |= (video_info->h_sync_polarity << 0);
+ reg |= (dp->video_info->h_sync_polarity << 0);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 1f2f014cfe88..2e9bd0e0b9f2 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -242,7 +242,8 @@
/* EXYNOS_DP_INT_CTL */
#define SOFT_INT_CTRL (0x1 << 2)
-#define INT_POL (0x1 << 0)
+#define INT_POL1 (0x1 << 1)
+#define INT_POL0 (0x1 << 0)
/* EXYNOS_DP_SYS_CTL_1 */
#define DET_STA (0x1 << 2)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 07d70a3a628b..fac7df6d1aba 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -338,7 +338,8 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
struct mipi_dsim_ddi *dsim_ddi;
int ret = -EINVAL;
- dsim = kzalloc(sizeof(struct mipi_dsim_device), GFP_KERNEL);
+ dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device),
+ GFP_KERNEL);
if (!dsim) {
dev_err(&pdev->dev, "failed to allocate dsim object.\n");
return -ENOMEM;
@@ -352,13 +353,13 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
if (dsim_pd == NULL) {
dev_err(&pdev->dev, "failed to get platform data for dsim.\n");
- goto err_clock_get;
+ return -EINVAL;
}
/* get mipi_dsim_config. */
dsim_config = dsim_pd->dsim_config;
if (dsim_config == NULL) {
dev_err(&pdev->dev, "failed to get dsim config data.\n");
- goto err_clock_get;
+ return -EINVAL;
}
dsim->dsim_config = dsim_config;
@@ -366,41 +367,28 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
mutex_init(&dsim->lock);
- ret = regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies),
+ supplies);
if (ret) {
dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret);
- goto err_clock_get;
+ return ret;
}
- dsim->clock = clk_get(&pdev->dev, "dsim0");
+ dsim->clock = devm_clk_get(&pdev->dev, "dsim0");
if (IS_ERR(dsim->clock)) {
dev_err(&pdev->dev, "failed to get dsim clock source\n");
- ret = -ENODEV;
- goto err_clock_get;
+ return -ENODEV;
}
clk_enable(dsim->clock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get io memory region\n");
- ret = -ENODEV;
- goto err_platform_get;
- }
-
- dsim->res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!dsim->res) {
- dev_err(&pdev->dev, "failed to request io memory region\n");
- ret = -ENOMEM;
- goto err_mem_region;
- }
- dsim->reg_base = ioremap(res->start, resource_size(res));
+ dsim->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!dsim->reg_base) {
dev_err(&pdev->dev, "failed to remap io region\n");
ret = -ENOMEM;
- goto err_ioremap;
+ goto error;
}
mutex_init(&dsim->lock);
@@ -410,26 +398,27 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
if (!dsim_ddi) {
dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n");
ret = -EINVAL;
- goto err_bind;
+ goto error;
}
dsim->irq = platform_get_irq(pdev, 0);
- if (dsim->irq < 0) {
+ if (IS_ERR_VALUE(dsim->irq)) {
dev_err(&pdev->dev, "failed to request dsim irq resource\n");
ret = -EINVAL;
- goto err_platform_get_irq;
+ goto error;
}
init_completion(&dsim_wr_comp);
init_completion(&dsim_rd_comp);
platform_set_drvdata(pdev, dsim);
- ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
+ ret = devm_request_irq(&pdev->dev, dsim->irq,
+ exynos_mipi_dsi_interrupt_handler,
IRQF_SHARED, dev_name(&pdev->dev), dsim);
if (ret != 0) {
dev_err(&pdev->dev, "failed to request dsim irq\n");
ret = -EINVAL;
- goto err_bind;
+ goto error;
}
/* enable interrupts */
@@ -471,38 +460,18 @@ done:
return 0;
-err_bind:
- iounmap(dsim->reg_base);
-
-err_ioremap:
- release_mem_region(dsim->res->start, resource_size(dsim->res));
-
-err_mem_region:
- release_resource(dsim->res);
-
-err_platform_get:
+error:
clk_disable(dsim->clock);
- clk_put(dsim->clock);
-err_clock_get:
- kfree(dsim);
-
-err_platform_get_irq:
return ret;
}
-static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
+static int exynos_mipi_dsi_remove(struct platform_device *pdev)
{
struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
struct mipi_dsim_ddi *dsim_ddi, *next;
struct mipi_dsim_lcd_driver *dsim_lcd_drv;
- iounmap(dsim->reg_base);
-
clk_disable(dsim->clock);
- clk_put(dsim->clock);
-
- release_resource(dsim->res);
- release_mem_region(dsim->res->start, resource_size(dsim->res));
list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
if (dsim_ddi) {
@@ -518,9 +487,6 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
}
}
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(dsim);
-
return 0;
}
@@ -595,7 +561,7 @@ static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
static struct platform_driver exynos_mipi_dsi_driver = {
.probe = exynos_mipi_dsi_probe,
- .remove = __devexit_p(exynos_mipi_dsi_remove),
+ .remove = exynos_mipi_dsi_remove,
.driver = {
.name = "exynos-mipi-dsim",
.owner = THIS_MODULE,
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 3cd29a4fc10a..c70cb8926df6 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/delay.h>
+#include <linux/irqreturn.h>
#include <linux/kthread.h>
#include <video/mipi_display.h>
diff --git a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
index 0ef38ce72af6..95cb99a1fe2d 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/ctype.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <video/exynos_mipi_dsim.h>
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 05d080b63bc0..ca2602413aa4 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -776,7 +776,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
int ret;
u8 mtp_id[3] = {0, };
- lcd = kzalloc(sizeof(struct s6e8ax0), GFP_KERNEL);
+ lcd = devm_kzalloc(&dsim_dev->dev, sizeof(struct s6e8ax0), GFP_KERNEL);
if (!lcd) {
dev_err(&dsim_dev->dev, "failed to allocate s6e8ax0 structure.\n");
return -ENOMEM;
@@ -788,18 +788,17 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
mutex_init(&lcd->lock);
- ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto err_lcd_register;
+ return ret;
}
lcd->ld = lcd_device_register("s6e8ax0", lcd->dev, lcd,
&s6e8ax0_lcd_ops);
if (IS_ERR(lcd->ld)) {
dev_err(lcd->dev, "failed to register lcd ops.\n");
- ret = PTR_ERR(lcd->ld);
- goto err_lcd_register;
+ return PTR_ERR(lcd->ld);
}
lcd->bd = backlight_device_register("s6e8ax0-bl", lcd->dev, lcd,
@@ -838,11 +837,6 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
err_backlight_register:
lcd_device_unregister(lcd->ld);
-
-err_lcd_register:
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
-
return ret;
}
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 14102a3f70f5..6d2744794dd1 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -893,7 +893,7 @@ static void ffb_init_fix(struct fb_info *info)
info->fix.accel = FB_ACCEL_SUN_CREATOR;
}
-static int __devinit ffb_probe(struct platform_device *op)
+static int ffb_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct ffb_fbc __iomem *fbc;
@@ -1022,7 +1022,7 @@ out_err:
return err;
}
-static int __devexit ffb_remove(struct platform_device *op)
+static int ffb_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct ffb_par *par = info->par;
@@ -1058,7 +1058,7 @@ static struct platform_driver ffb_driver = {
.of_match_table = ffb_match,
},
.probe = ffb_probe,
- .remove = __devexit_p(ffb_remove),
+ .remove = ffb_remove,
};
static int __init ffb_init(void)
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index d0533b7aad79..c99c9671302b 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -127,7 +127,7 @@
static volatile unsigned char *fm2fb_reg;
-static struct fb_fix_screeninfo fb_fix __devinitdata = {
+static struct fb_fix_screeninfo fb_fix = {
.smem_len = FRAMEMASTER_REG,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -136,12 +136,12 @@ static struct fb_fix_screeninfo fb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static int fm2fb_mode __devinitdata = -1;
+static int fm2fb_mode = -1;
#define FM2FB_MODE_PAL 0
#define FM2FB_MODE_NTSC 1
-static struct fb_var_screeninfo fb_var_modes[] __devinitdata = {
+static struct fb_var_screeninfo fb_var_modes[] = {
{
/* 768 x 576, 32 bpp (PAL) */
768, 576, 768, 576, 0, 0, 32, 0,
@@ -211,10 +211,9 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* Initialisation
*/
-static int __devinit fm2fb_probe(struct zorro_dev *z,
- const struct zorro_device_id *id);
+static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id);
-static struct zorro_device_id fm2fb_devices[] __devinitdata = {
+static struct zorro_device_id fm2fb_devices[] = {
{ ZORRO_PROD_BSC_FRAMEMASTER_II },
{ ZORRO_PROD_HELFRICH_RAINBOW_II },
{ 0 }
@@ -227,8 +226,7 @@ static struct zorro_driver fm2fb_driver = {
.probe = fm2fb_probe,
};
-static int __devinit fm2fb_probe(struct zorro_dev *z,
- const struct zorro_device_id *id)
+static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
{
struct fb_info *info;
unsigned long *ptr;
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index ede9e55413f8..41fbd9453c5f 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -55,7 +55,7 @@
* order if increasing resolution and frequency. The 320x240-60 mode is
* the initial AOI for the second and third planes.
*/
-static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
+static struct fb_videomode fsl_diu_mode_db[] = {
{
.refresh = 60,
.xres = 1024,
@@ -337,13 +337,11 @@ struct mfb_info {
int registered;
unsigned long pseudo_palette[16];
struct diu_ad *ad;
- int cursor_reset;
unsigned char g_alpha;
unsigned int count;
int x_aoi_d; /* aoi display x offset to physical screen */
int y_aoi_d; /* aoi display y offset to physical screen */
struct fsl_diu_data *parent;
- u8 *edid_data;
};
/**
@@ -378,6 +376,8 @@ struct fsl_diu_data {
struct diu_ad ad[NUM_AOIS] __aligned(8);
u8 gamma[256 * 3] __aligned(32);
u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
+ uint8_t edid_data[EDID_LENGTH];
+ bool has_edid;
} __aligned(32);
/* Determine the DMA address of a member of the fsl_diu_data structure */
@@ -430,6 +430,22 @@ static struct mfb_info mfb_template[] = {
},
};
+#ifdef DEBUG
+static void __attribute__ ((unused)) fsl_diu_dump(struct diu __iomem *hw)
+{
+ mb();
+ pr_debug("DIU: desc=%08x,%08x,%08x, gamma=%08x pallete=%08x "
+ "cursor=%08x curs_pos=%08x diu_mode=%08x bgnd=%08x "
+ "disp_size=%08x hsyn_para=%08x vsyn_para=%08x syn_pol=%08x "
+ "thresholds=%08x int_mask=%08x plut=%08x\n",
+ hw->desc[0], hw->desc[1], hw->desc[2], hw->gamma,
+ hw->pallete, hw->cursor, hw->curs_pos, hw->diu_mode,
+ hw->bgnd, hw->disp_size, hw->hsyn_para, hw->vsyn_para,
+ hw->syn_pol, hw->thresholds, hw->int_mask, hw->plut);
+ rmb();
+}
+#endif
+
/**
* fsl_diu_name_to_port - convert a port name to a monitor port enum
*
@@ -481,8 +497,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != ad->paddr)
- wr_reg_wa(&hw->desc[0], ad->paddr);
+ wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
@@ -534,8 +549,7 @@ static void fsl_diu_disable_panel(struct fb_info *info)
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != data->dummy_ad.paddr)
- wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
+ wr_reg_wa(&hw->desc[0], 0);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
@@ -792,7 +806,8 @@ static void update_lcdc(struct fb_info *info)
hw = data->diu_reg;
- diu_ops.set_monitor_port(data->monitor_port);
+ if (diu_ops.set_monitor_port)
+ diu_ops.set_monitor_port(data->monitor_port);
gamma_table_base = data->gamma;
/* Prep for DIU init - gamma table, cursor table */
@@ -811,12 +826,8 @@ static void update_lcdc(struct fb_info *info)
out_be32(&hw->gamma, DMA_ADDR(data, gamma));
out_be32(&hw->cursor, DMA_ADDR(data, cursor));
- out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
- out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
- out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
- /* DISP SIZE */
- out_be32(&hw->wb_size, 0); /* WB SIZE */
- out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
+ out_be32(&hw->bgnd, 0x007F7F7F); /* Set background to grey */
+ out_be32(&hw->disp_size, (var->yres << 16) | var->xres);
/* Horizontal and vertical configuration register */
temp = var->left_margin << 22 | /* BP_H */
@@ -833,9 +844,20 @@ static void update_lcdc(struct fb_info *info)
diu_ops.set_pixel_clock(var->pixclock);
- out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
- out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
+#ifndef CONFIG_PPC_MPC512x
+ /*
+ * The PLUT register is defined differently on the MPC5121 than it
+ * is on other SOCs. Unfortunately, there's no documentation that
+ * explains how it's supposed to be programmed, so for now, we leave
+ * it at the default value on the MPC5121.
+ *
+ * For other SOCs, program it for the highest priority, which will
+ * reduce the chance of underrun. Technically, we should scale the
+ * priority to match the screen resolution, but doing that properly
+ * requires delicate fine-tuning for each use-case.
+ */
out_be32(&hw->plut, 0x01F5F666);
+#endif
/* Enable the DIU */
enable_lcdc(info);
@@ -922,7 +944,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
#define PF_COMP_0_MASK 0x0000000F
#define PF_COMP_0_SHIFT 0
-#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
+#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \
cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
(blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
(red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
@@ -932,10 +954,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
switch (bits_per_pixel) {
case 32:
/* 0x88883316 */
- return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8);
case 24:
/* 0x88082219 */
- return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0);
case 16:
/* 0x65053118 */
return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
@@ -965,7 +987,6 @@ static int fsl_diu_set_par(struct fb_info *info)
hw = data->diu_reg;
set_fix(info);
- mfbi->cursor_reset = 1;
len = info->var.yres_virtual * info->fix.line_length;
/* Alloc & dealloc each time resolution/bpp change */
@@ -1107,6 +1128,12 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
if (!arg)
return -EINVAL;
+
+ dev_dbg(info->dev, "ioctl %08x (dir=%s%s type=%u nr=%u size=%u)\n", cmd,
+ _IOC_DIR(cmd) & _IOC_READ ? "R" : "",
+ _IOC_DIR(cmd) & _IOC_WRITE ? "W" : "",
+ _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
+
switch (cmd) {
case MFB_SET_PIXFMT_OLD:
dev_warn(info->dev,
@@ -1180,6 +1207,23 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
ad->ckmin_b = ck.blue_min;
}
break;
+#ifdef CONFIG_PPC_MPC512x
+ case MFB_SET_GAMMA: {
+ struct fsl_diu_data *data = mfbi->parent;
+
+ if (copy_from_user(data->gamma, buf, sizeof(data->gamma)))
+ return -EFAULT;
+ setbits32(&data->diu_reg->gamma, 0); /* Force table reload */
+ break;
+ }
+ case MFB_GET_GAMMA: {
+ struct fsl_diu_data *data = mfbi->parent;
+
+ if (copy_to_user(buf, data->gamma, sizeof(data->gamma)))
+ return -EFAULT;
+ break;
+ }
+#endif
default:
dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd);
return -ENOIOCTLCMD;
@@ -1188,6 +1232,16 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
+static inline void fsl_diu_enable_interrupts(struct fsl_diu_data *data)
+{
+ u32 int_mask = INT_UNDRUN; /* enable underrun detection */
+
+ if (IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
+ int_mask |= INT_VSYNC; /* enable vertical sync */
+
+ clrbits32(&data->diu_reg->int_mask, int_mask);
+}
+
/* turn on fb if count == 1
*/
static int fsl_diu_open(struct fb_info *info, int user)
@@ -1206,8 +1260,10 @@ static int fsl_diu_open(struct fb_info *info, int user)
res = fsl_diu_set_par(info);
if (res < 0)
mfbi->count--;
- else
+ else {
+ fsl_diu_enable_interrupts(mfbi->parent);
fsl_diu_enable_panel(info);
+ }
}
spin_unlock(&diu_lock);
@@ -1223,8 +1279,22 @@ static int fsl_diu_release(struct fb_info *info, int user)
spin_lock(&diu_lock);
mfbi->count--;
- if (mfbi->count == 0)
+ if (mfbi->count == 0) {
+ struct fsl_diu_data *data = mfbi->parent;
+ bool disable = true;
+ int i;
+
+ /* Disable interrupts only if all AOIs are closed */
+ for (i = 0; i < NUM_AOIS; i++) {
+ struct mfb_info *mi = data->fsl_diu_info[i].par;
+
+ if (mi->count)
+ disable = false;
+ }
+ if (disable)
+ out_be32(&data->diu_reg->int_mask, 0xffffffff);
fsl_diu_disable_panel(info);
+ }
spin_unlock(&diu_lock);
return res;
@@ -1244,10 +1314,11 @@ static struct fb_ops fsl_diu_ops = {
.fb_release = fsl_diu_release,
};
-static int __devinit install_fb(struct fb_info *info)
+static int install_fb(struct fb_info *info)
{
int rc;
struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *data = mfbi->parent;
const char *aoi_mode, *init_aoi_mode = "320x240";
struct fb_videomode *db = fsl_diu_mode_db;
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
@@ -1264,9 +1335,9 @@ static int __devinit install_fb(struct fb_info *info)
return rc;
if (mfbi->index == PLANE0) {
- if (mfbi->edid_data) {
+ if (data->has_edid) {
/* Now build modedb from EDID */
- fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs);
+ fb_edid_to_monspecs(data->edid_data, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
@@ -1284,7 +1355,7 @@ static int __devinit install_fb(struct fb_info *info)
* For plane 0 we continue and look into
* driver's internal modedb.
*/
- if ((mfbi->index == PLANE0) && mfbi->edid_data)
+ if ((mfbi->index == PLANE0) && data->has_edid)
has_default_mode = 0;
else
return -EINVAL;
@@ -1348,9 +1419,6 @@ static void uninstall_fb(struct fb_info *info)
if (!mfbi->registered)
return;
- if (mfbi->index == PLANE0)
- kfree(mfbi->edid_data);
-
unregister_framebuffer(info);
unmap_video_memory(info);
if (&info->cmap)
@@ -1362,7 +1430,7 @@ static void uninstall_fb(struct fb_info *info)
static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
{
struct diu __iomem *hw = dev_id;
- unsigned int status = in_be32(&hw->int_status);
+ uint32_t status = in_be32(&hw->int_status);
if (status) {
/* This is the workaround for underrun */
@@ -1387,40 +1455,6 @@ static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static int request_irq_local(struct fsl_diu_data *data)
-{
- struct diu __iomem *hw = data->diu_reg;
- u32 ints;
- int ret;
-
- /* Read to clear the status */
- in_be32(&hw->int_status);
-
- ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
- if (!ret) {
- ints = INT_PARERR | INT_LS_BF_VS;
-#if !defined(CONFIG_NOT_COHERENT_CACHE)
- ints |= INT_VSYNC;
-#endif
-
- /* Read to clear the status */
- in_be32(&hw->int_status);
- out_be32(&hw->int_mask, ints);
- }
-
- return ret;
-}
-
-static void free_irq_local(struct fsl_diu_data *data)
-{
- struct diu __iomem *hw = data->diu_reg;
-
- /* Disable all LCDC interrupt */
- out_be32(&hw->int_mask, 0x1f);
-
- free_irq(data->irq, NULL);
-}
-
#ifdef CONFIG_PM
/*
* Power management hooks. Note that we won't be called from IRQ context,
@@ -1491,13 +1525,13 @@ static ssize_t show_monitor(struct device *device,
return 0;
}
-static int __devinit fsl_diu_probe(struct platform_device *pdev)
+static int fsl_diu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
struct fsl_diu_data *data;
- int diu_mode;
dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
+ const void *prop;
unsigned int i;
int ret;
@@ -1541,17 +1575,13 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
mfbi->parent = data;
mfbi->ad = &data->ad[i];
+ }
- if (mfbi->index == PLANE0) {
- const u8 *prop;
- int len;
-
- /* Get EDID */
- prop = of_get_property(np, "edid", &len);
- if (prop && len == EDID_LENGTH)
- mfbi->edid_data = kmemdup(prop, EDID_LENGTH,
- GFP_KERNEL);
- }
+ /* Get the EDID data from the device tree, if present */
+ prop = of_get_property(np, "edid", &ret);
+ if (prop && ret == EDID_LENGTH) {
+ memcpy(data->edid_data, prop, EDID_LENGTH);
+ data->has_edid = true;
}
data->diu_reg = of_iomap(np, 0);
@@ -1561,10 +1591,6 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
goto error;
}
- diu_mode = in_be32(&data->diu_reg->diu_mode);
- if (diu_mode == MFB_MODE0)
- out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
-
/* Get the IRQ of the DIU */
data->irq = irq_of_parse_and_map(np, 0);
@@ -1586,28 +1612,38 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
- * Let DIU display splash screen if it was pre-initialized
- * by the bootloader, set dummy area descriptor otherwise.
+ * Let DIU continue to display splash screen if it was pre-initialized
+ * by the bootloader; otherwise, clear the display.
*/
- if (diu_mode == MFB_MODE0)
- out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
+ if (in_be32(&data->diu_reg->diu_mode) == MFB_MODE0)
+ out_be32(&data->diu_reg->desc[0], 0);
out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
+ /*
+ * Older versions of U-Boot leave interrupts enabled, so disable
+ * all of them and clear the status register.
+ */
+ out_be32(&data->diu_reg->int_mask, 0xffffffff);
+ in_be32(&data->diu_reg->int_status);
+
+ ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb",
+ data->diu_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "could not claim irq\n");
+ goto error;
+ }
+
for (i = 0; i < NUM_AOIS; i++) {
ret = install_fb(&data->fsl_diu_info[i]);
if (ret) {
dev_err(&pdev->dev, "could not register fb %d\n", i);
+ free_irq(data->irq, data->diu_reg);
goto error;
}
}
- if (request_irq_local(data)) {
- dev_err(&pdev->dev, "could not claim irq\n");
- goto error;
- }
-
sysfs_attr_init(&data->dev_attr.attr);
data->dev_attr.attr.name = "monitor";
data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
@@ -1638,7 +1674,8 @@ static int fsl_diu_remove(struct platform_device *pdev)
data = dev_get_drvdata(&pdev->dev);
disable_lcdc(&data->fsl_diu_info[0]);
- free_irq_local(data);
+
+ free_irq(data->irq, data->diu_reg);
for (i = 0; i < NUM_AOIS; i++)
uninstall_fb(&data->fsl_diu_info[i]);
@@ -1741,6 +1778,9 @@ static int __init fsl_diu_init(void)
coherence_data_size = be32_to_cpup(prop) * 13;
coherence_data_size /= 8;
+ pr_debug("fsl-diu-fb: coherence data size is %zu bytes\n",
+ coherence_data_size);
+
prop = of_get_property(np, "d-cache-line-size", NULL);
if (prop == NULL) {
pr_err("fsl-diu-fb: missing 'd-cache-line-size' property' "
@@ -1750,10 +1790,17 @@ static int __init fsl_diu_init(void)
}
d_cache_line_size = be32_to_cpup(prop);
+ pr_debug("fsl-diu-fb: cache lines size is %u bytes\n",
+ d_cache_line_size);
+
of_node_put(np);
coherence_data = vmalloc(coherence_data_size);
- if (!coherence_data)
+ if (!coherence_data) {
+ pr_err("fsl-diu-fb: could not allocate coherence data "
+ "(size=%zu)\n", coherence_data_size);
return -ENOMEM;
+ }
+
#endif
ret = platform_driver_register(&fsl_diu_driver);
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 3dad31975db8..bda5e3941510 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -91,10 +91,10 @@ static uint32_t pseudo_palette[16];
static uint32_t gbe_cmap[256];
static int gbe_turned_on; /* 0 turned off, 1 turned on */
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
/* default CRT mode */
-static struct fb_var_screeninfo default_var_CRT __devinitdata = {
+static struct fb_var_screeninfo default_var_CRT = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
.xres = 640,
.yres = 480,
@@ -125,7 +125,7 @@ static struct fb_var_screeninfo default_var_CRT __devinitdata = {
};
/* default LCD mode */
-static struct fb_var_screeninfo default_var_LCD __devinitdata = {
+static struct fb_var_screeninfo default_var_LCD = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
@@ -157,7 +157,7 @@ static struct fb_var_screeninfo default_var_LCD __devinitdata = {
/* default modedb mode */
/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
-static struct fb_videomode default_mode_CRT __devinitdata = {
+static struct fb_videomode default_mode_CRT = {
.refresh = 60,
.xres = 640,
.yres = 480,
@@ -172,7 +172,7 @@ static struct fb_videomode default_mode_CRT __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
/* 1600x1024 SGI flatpanel 1600sw */
-static struct fb_videomode default_mode_LCD __devinitdata = {
+static struct fb_videomode default_mode_LCD = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
@@ -186,8 +186,8 @@ static struct fb_videomode default_mode_LCD __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_videomode *default_mode __devinitdata = &default_mode_CRT;
-static struct fb_var_screeninfo *default_var __devinitdata = &default_var_CRT;
+static struct fb_videomode *default_mode = &default_mode_CRT;
+static struct fb_var_screeninfo *default_var = &default_var_CRT;
static int flat_panel_enabled = 0;
@@ -1082,7 +1082,7 @@ static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *at
static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL);
-static void __devexit gbefb_remove_sysfs(struct device *dev)
+static void gbefb_remove_sysfs(struct device *dev)
{
device_remove_file(dev, &dev_attr_size);
device_remove_file(dev, &dev_attr_revision);
@@ -1098,7 +1098,7 @@ static void gbefb_create_sysfs(struct device *dev)
* Initialization
*/
-static int __devinit gbefb_setup(char *options)
+static int gbefb_setup(char *options)
{
char *this_opt;
@@ -1129,7 +1129,7 @@ static int __devinit gbefb_setup(char *options)
return 0;
}
-static int __devinit gbefb_probe(struct platform_device *p_dev)
+static int gbefb_probe(struct platform_device *p_dev)
{
int i, ret = 0;
struct fb_info *info;
@@ -1254,7 +1254,7 @@ out_release_framebuffer:
return ret;
}
-static int __devexit gbefb_remove(struct platform_device* p_dev)
+static int gbefb_remove(struct platform_device* p_dev)
{
struct fb_info *info = platform_get_drvdata(p_dev);
@@ -1273,7 +1273,7 @@ static int __devexit gbefb_remove(struct platform_device* p_dev)
static struct platform_driver gbefb_driver = {
.probe = gbefb_probe,
- .remove = __devexit_p(gbefb_remove),
+ .remove = gbefb_remove,
.driver = {
.name = "gbefb",
},
diff --git a/drivers/video/geode/Kconfig b/drivers/video/geode/Kconfig
index c5d8ba4b9fc3..21e351a14593 100644
--- a/drivers/video/geode/Kconfig
+++ b/drivers/video/geode/Kconfig
@@ -2,14 +2,14 @@
# Geode family framebuffer configuration
#
config FB_GEODE
- bool "AMD Geode family framebuffer support (EXPERIMENTAL)"
- depends on FB && PCI && EXPERIMENTAL && X86
+ bool "AMD Geode family framebuffer support"
+ depends on FB && PCI && X86
---help---
Say 'Y' here to allow you to select framebuffer drivers for
the AMD Geode family of processors.
config FB_GEODE_LX
- tristate "AMD Geode LX framebuffer support (EXPERIMENTAL)"
+ tristate "AMD Geode LX framebuffer support"
depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -24,8 +24,8 @@ config FB_GEODE_LX
If unsure, say N.
config FB_GEODE_GX
- tristate "AMD Geode GX framebuffer support (EXPERIMENTAL)"
- depends on FB && FB_GEODE && EXPERIMENTAL
+ tristate "AMD Geode GX framebuffer support"
+ depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -39,8 +39,8 @@ config FB_GEODE_GX
If unsure, say N.
config FB_GEODE_GX1
- tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)"
- depends on FB && FB_GEODE && EXPERIMENTAL
+ tristate "AMD Geode GX1 framebuffer support"
+ depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index 265c5ed59ade..ebbaada7b941 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -29,7 +29,7 @@ static int crt_option = 1;
static char panel_option[32] = "";
/* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __devinitconst gx1_modedb[] = {
+static const struct fb_videomode gx1_modedb[] = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
return par->vid_ops->blank_display(info, blank_mode);
}
-static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct geodefb_par *par = info->par;
unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info *gx1fb_init_fbinfo(struct device *dev)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
return info;
}
-static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -382,7 +382,7 @@ static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_i
return ret;
}
-static void __devexit gx1fb_remove(struct pci_dev *pdev)
+static void gx1fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
.name = "gx1fb",
.id_table = gx1fb_id_table,
.probe = gx1fb_probe,
- .remove = __devexit_p(gx1fb_remove),
+ .remove = gx1fb_remove,
};
static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
return pci_register_driver(&gx1fb_driver);
}
-static void __devexit gx1fb_cleanup(void)
+static void gx1fb_cleanup(void)
{
pci_unregister_driver(&gx1fb_driver);
}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index b4f19db9bb54..19f0c1add747 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -40,7 +40,7 @@ static int vram;
static int vt_switch;
/* Modes relevant to the GX (taken from modedb.c) */
-static struct fb_videomode gx_modedb[] __devinitdata = {
+static struct fb_videomode gx_modedb[] = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -110,15 +110,14 @@ static struct fb_videomode gx_modedb[] __devinitdata = {
#ifdef CONFIG_OLPC
#include <asm/olpc.h>
-static struct fb_videomode gx_dcon_modedb[] __devinitdata = {
+static struct fb_videomode gx_dcon_modedb[] = {
/* The only mode the DCON has is 1200x900 */
{ NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED, 0 }
};
-static void __devinit get_modedb(struct fb_videomode **modedb,
- unsigned int *size)
+static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
{
if (olpc_has_dcon()) {
*modedb = (struct fb_videomode *) gx_dcon_modedb;
@@ -130,8 +129,7 @@ static void __devinit get_modedb(struct fb_videomode **modedb,
}
#else
-static void __devinit get_modedb(struct fb_videomode **modedb,
- unsigned int *size)
+static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
{
*modedb = (struct fb_videomode *) gx_modedb;
*size = ARRAY_SIZE(gx_modedb);
@@ -228,8 +226,7 @@ static int gxfb_blank(int blank_mode, struct fb_info *info)
return gx_blank_display(info, blank_mode);
}
-static int __devinit gxfb_map_video_memory(struct fb_info *info,
- struct pci_dev *dev)
+static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct gxfb_par *par = info->par;
int ret;
@@ -293,7 +290,7 @@ static struct fb_ops gxfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info *__devinit gxfb_init_fbinfo(struct device *dev)
+static struct fb_info *gxfb_init_fbinfo(struct device *dev)
{
struct gxfb_par *par;
struct fb_info *info;
@@ -374,8 +371,7 @@ static int gxfb_resume(struct pci_dev *pdev)
}
#endif
-static int __devinit gxfb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct gxfb_par *par;
struct fb_info *info;
@@ -455,7 +451,7 @@ static int __devinit gxfb_probe(struct pci_dev *pdev,
return ret;
}
-static void __devexit gxfb_remove(struct pci_dev *pdev)
+static void gxfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct gxfb_par *par = info->par;
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 416851ca8754..4dd7b5566962 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -35,7 +35,7 @@ static int vt_switch;
* we try to make it something sane - 640x480-60 is sane
*/
-static struct fb_videomode geode_modedb[] __devinitdata = {
+static struct fb_videomode geode_modedb[] = {
/* 640x480-60 */
{ NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
@@ -219,15 +219,14 @@ static struct fb_videomode geode_modedb[] __devinitdata = {
#ifdef CONFIG_OLPC
#include <asm/olpc.h>
-static struct fb_videomode olpc_dcon_modedb[] __devinitdata = {
+static struct fb_videomode olpc_dcon_modedb[] = {
/* The only mode the DCON has is 1200x900 */
{ NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED, 0 }
};
-static void __devinit get_modedb(struct fb_videomode **modedb,
- unsigned int *size)
+static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
{
if (olpc_has_dcon()) {
*modedb = (struct fb_videomode *) olpc_dcon_modedb;
@@ -239,8 +238,7 @@ static void __devinit get_modedb(struct fb_videomode **modedb,
}
#else
-static void __devinit get_modedb(struct fb_videomode **modedb,
- unsigned int *size)
+static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
{
*modedb = (struct fb_videomode *) geode_modedb;
*size = ARRAY_SIZE(geode_modedb);
@@ -336,8 +334,7 @@ static int lxfb_blank(int blank_mode, struct fb_info *info)
}
-static int __devinit lxfb_map_video_memory(struct fb_info *info,
- struct pci_dev *dev)
+static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct lxfb_par *par = info->par;
int ret;
@@ -414,7 +411,7 @@ static struct fb_ops lxfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info * __devinit lxfb_init_fbinfo(struct device *dev)
+static struct fb_info *lxfb_init_fbinfo(struct device *dev)
{
struct lxfb_par *par;
struct fb_info *info;
@@ -498,8 +495,7 @@ static int lxfb_resume(struct pci_dev *pdev)
#define lxfb_resume NULL
#endif
-static int __devinit lxfb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct lxfb_par *par;
struct fb_info *info;
@@ -590,7 +586,7 @@ err:
return ret;
}
-static void __devexit lxfb_remove(struct pci_dev *pdev)
+static void lxfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct lxfb_par *par = info->par;
diff --git a/drivers/video/goldfishfb.c b/drivers/video/goldfishfb.c
new file mode 100644
index 000000000000..489abb32fc04
--- /dev/null
+++ b/drivers/video/goldfishfb.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+enum {
+ FB_GET_WIDTH = 0x00,
+ FB_GET_HEIGHT = 0x04,
+ FB_INT_STATUS = 0x08,
+ FB_INT_ENABLE = 0x0c,
+ FB_SET_BASE = 0x10,
+ FB_SET_ROTATION = 0x14,
+ FB_SET_BLANK = 0x18,
+ FB_GET_PHYS_WIDTH = 0x1c,
+ FB_GET_PHYS_HEIGHT = 0x20,
+
+ FB_INT_VSYNC = 1U << 0,
+ FB_INT_BASE_UPDATE_DONE = 1U << 1
+};
+
+struct goldfish_fb {
+ void __iomem *reg_base;
+ int irq;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int base_update_count;
+ int rotation;
+ struct fb_info fb;
+ u32 cmap[16];
+};
+
+static irqreturn_t goldfish_fb_interrupt(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+ struct goldfish_fb *fb = dev_id;
+ u32 status;
+
+ spin_lock_irqsave(&fb->lock, irq_flags);
+ status = readl(fb->reg_base + FB_INT_STATUS);
+ if (status & FB_INT_BASE_UPDATE_DONE) {
+ fb->base_update_count++;
+ wake_up(&fb->wait);
+ }
+ spin_unlock_irqrestore(&fb->lock, irq_flags);
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline u32 convert_bitfield(int val, struct fb_bitfield *bf)
+{
+ unsigned int mask = (1 << bf->length) - 1;
+
+ return (val >> (16 - bf->length) & mask) << bf->offset;
+}
+
+static int
+goldfish_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp, struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
+ if (regno < 16) {
+ fb->cmap[regno] = convert_bitfield(transp, &fb->fb.var.transp) |
+ convert_bitfield(blue, &fb->fb.var.blue) |
+ convert_bitfield(green, &fb->fb.var.green) |
+ convert_bitfield(red, &fb->fb.var.red);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int goldfish_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((var->rotate & 1) != (info->var.rotate & 1)) {
+ if ((var->xres != info->var.yres) ||
+ (var->yres != info->var.xres) ||
+ (var->xres_virtual != info->var.yres) ||
+ (var->yres_virtual > info->var.xres * 2) ||
+ (var->yres_virtual < info->var.xres)) {
+ return -EINVAL;
+ }
+ } else {
+ if ((var->xres != info->var.xres) ||
+ (var->yres != info->var.yres) ||
+ (var->xres_virtual != info->var.xres) ||
+ (var->yres_virtual > info->var.yres * 2) ||
+ (var->yres_virtual < info->var.yres)) {
+ return -EINVAL;
+ }
+ }
+ if ((var->xoffset != info->var.xoffset) ||
+ (var->bits_per_pixel != info->var.bits_per_pixel) ||
+ (var->grayscale != info->var.grayscale)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int goldfish_fb_set_par(struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+ if (fb->rotation != fb->fb.var.rotate) {
+ info->fix.line_length = info->var.xres * 2;
+ fb->rotation = fb->fb.var.rotate;
+ writel(fb->rotation, fb->reg_base + FB_SET_ROTATION);
+ }
+ return 0;
+}
+
+
+static int goldfish_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ unsigned long irq_flags;
+ int base_update_count;
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
+ spin_lock_irqsave(&fb->lock, irq_flags);
+ base_update_count = fb->base_update_count;
+ writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset,
+ fb->reg_base + FB_SET_BASE);
+ spin_unlock_irqrestore(&fb->lock, irq_flags);
+ wait_event_timeout(fb->wait,
+ fb->base_update_count != base_update_count, HZ / 15);
+ if (fb->base_update_count == base_update_count)
+ pr_err("goldfish_fb_pan_display: timeout wating for base update\n");
+ return 0;
+}
+
+static int goldfish_fb_blank(int blank, struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+ switch (blank) {
+ case FB_BLANK_NORMAL:
+ writel(1, fb->reg_base + FB_SET_BLANK);
+ break;
+ case FB_BLANK_UNBLANK:
+ writel(0, fb->reg_base + FB_SET_BLANK);
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops goldfish_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = goldfish_fb_check_var,
+ .fb_set_par = goldfish_fb_set_par,
+ .fb_setcolreg = goldfish_fb_setcolreg,
+ .fb_pan_display = goldfish_fb_pan_display,
+ .fb_blank = goldfish_fb_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+
+static int goldfish_fb_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_fb *fb;
+ size_t framesize;
+ u32 width, height;
+ dma_addr_t fbpaddr;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (fb == NULL) {
+ ret = -ENOMEM;
+ goto err_fb_alloc_failed;
+ }
+ spin_lock_init(&fb->lock);
+ init_waitqueue_head(&fb->wait);
+ platform_set_drvdata(pdev, fb);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err_no_io_base;
+ }
+ fb->reg_base = ioremap(r->start, PAGE_SIZE);
+ if (fb->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto err_no_io_base;
+ }
+
+ fb->irq = platform_get_irq(pdev, 0);
+ if (fb->irq <= 0) {
+ ret = -ENODEV;
+ goto err_no_irq;
+ }
+
+ width = readl(fb->reg_base + FB_GET_WIDTH);
+ height = readl(fb->reg_base + FB_GET_HEIGHT);
+
+ fb->fb.fbops = &goldfish_fb_ops;
+ fb->fb.flags = FBINFO_FLAG_DEFAULT;
+ fb->fb.pseudo_palette = fb->cmap;
+ fb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
+ fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ fb->fb.fix.line_length = width * 2;
+ fb->fb.fix.accel = FB_ACCEL_NONE;
+ fb->fb.fix.ypanstep = 1;
+
+ fb->fb.var.xres = width;
+ fb->fb.var.yres = height;
+ fb->fb.var.xres_virtual = width;
+ fb->fb.var.yres_virtual = height * 2;
+ fb->fb.var.bits_per_pixel = 16;
+ fb->fb.var.activate = FB_ACTIVATE_NOW;
+ fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
+ fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
+ fb->fb.var.pixclock = 10000;
+
+ fb->fb.var.red.offset = 11;
+ fb->fb.var.red.length = 5;
+ fb->fb.var.green.offset = 5;
+ fb->fb.var.green.length = 6;
+ fb->fb.var.blue.offset = 0;
+ fb->fb.var.blue.length = 5;
+
+ framesize = width * height * 2 * 2;
+ fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent(
+ &pdev->dev, framesize,
+ &fbpaddr, GFP_KERNEL);
+ pr_debug("allocating frame buffer %d * %d, got %p\n",
+ width, height, fb->fb.screen_base);
+ if (fb->fb.screen_base == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_screen_base_failed;
+ }
+ fb->fb.fix.smem_start = fbpaddr;
+ fb->fb.fix.smem_len = framesize;
+
+ ret = fb_set_var(&fb->fb, &fb->fb.var);
+ if (ret)
+ goto err_fb_set_var_failed;
+
+ ret = request_irq(fb->irq, goldfish_fb_interrupt, IRQF_SHARED,
+ pdev->name, fb);
+ if (ret)
+ goto err_request_irq_failed;
+
+ writel(FB_INT_BASE_UPDATE_DONE, fb->reg_base + FB_INT_ENABLE);
+ goldfish_fb_pan_display(&fb->fb.var, &fb->fb); /* updates base */
+
+ ret = register_framebuffer(&fb->fb);
+ if (ret)
+ goto err_register_framebuffer_failed;
+ return 0;
+
+err_register_framebuffer_failed:
+ free_irq(fb->irq, fb);
+err_request_irq_failed:
+err_fb_set_var_failed:
+ dma_free_coherent(&pdev->dev, framesize,
+ (void *)fb->fb.screen_base,
+ fb->fb.fix.smem_start);
+err_alloc_screen_base_failed:
+err_no_irq:
+ iounmap(fb->reg_base);
+err_no_io_base:
+ kfree(fb);
+err_fb_alloc_failed:
+ return ret;
+}
+
+static int goldfish_fb_remove(struct platform_device *pdev)
+{
+ size_t framesize;
+ struct goldfish_fb *fb = platform_get_drvdata(pdev);
+
+ framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2;
+ unregister_framebuffer(&fb->fb);
+ free_irq(fb->irq, fb);
+
+ dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base,
+ fb->fb.fix.smem_start);
+ iounmap(fb->reg_base);
+ return 0;
+}
+
+
+static struct platform_driver goldfish_fb_driver = {
+ .probe = goldfish_fb_probe,
+ .remove = goldfish_fb_remove,
+ .driver = {
+ .name = "goldfish_fb"
+ }
+};
+
+module_platform_driver(goldfish_fb_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index 5245f9a71892..861109e7de1b 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = {
}
};
-static struct fb_fix_screeninfo grvga_fix __devinitdata = {
+static struct fb_fix_screeninfo grvga_fix = {
.id = "AG SVGACTRL",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -267,8 +267,8 @@ static struct fb_ops grvga_ops = {
.fb_imageblit = cfb_imageblit
};
-static int __devinit grvga_parse_custom(char *options,
- struct fb_var_screeninfo *screendata)
+static int grvga_parse_custom(char *options,
+ struct fb_var_screeninfo *screendata)
{
char *this_opt;
int count = 0;
@@ -329,7 +329,7 @@ static int __devinit grvga_parse_custom(char *options,
return 0;
}
-static int __devinit grvga_probe(struct platform_device *dev)
+static int grvga_probe(struct platform_device *dev)
{
struct fb_info *info;
int retval = -ENOMEM;
@@ -512,7 +512,7 @@ free_fb:
return retval;
}
-static int __devexit grvga_remove(struct platform_device *device)
+static int grvga_remove(struct platform_device *device)
{
struct fb_info *info = dev_get_drvdata(&device->dev);
struct grvga_par *par = info->par;
@@ -554,7 +554,7 @@ static struct platform_driver grvga_driver = {
.of_match_table = svgactrl_of_match,
},
.probe = grvga_probe,
- .remove = __devexit_p(grvga_remove),
+ .remove = grvga_remove,
};
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index 0e9afa41d163..c35663f6a54a 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -1,5 +1,6 @@
/*
- * Frame buffer device for IBM GXT4500P and GXT6000P display adaptors
+ * Frame buffer device for IBM GXT4500P/6500P and GXT4000P/6000P
+ * display adaptors
*
* Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
*/
@@ -14,6 +15,8 @@
#include <linux/string.h>
#define PCI_DEVICE_ID_IBM_GXT4500P 0x21c
+#define PCI_DEVICE_ID_IBM_GXT6500P 0x21b
+#define PCI_DEVICE_ID_IBM_GXT4000P 0x16e
#define PCI_DEVICE_ID_IBM_GXT6000P 0x170
/* GXT4500P registers */
@@ -156,7 +159,7 @@ struct gxt4500_par {
static char *mode_option;
/* default mode: 1280x1024 @ 60 Hz, 8 bpp */
-static const struct fb_videomode defaultmode __devinitconst = {
+static const struct fb_videomode defaultmode = {
.refresh = 60,
.xres = 1280,
.yres = 1024,
@@ -173,6 +176,8 @@ static const struct fb_videomode defaultmode __devinitconst = {
/* List of supported cards */
enum gxt_cards {
GXT4500P,
+ GXT6500P,
+ GXT4000P,
GXT6000P
};
@@ -182,6 +187,8 @@ static const struct cardinfo {
const char *cardname;
} cardinfo[] = {
[GXT4500P] = { .refclk_ps = 9259, .cardname = "IBM GXT4500P" },
+ [GXT6500P] = { .refclk_ps = 9259, .cardname = "IBM GXT6500P" },
+ [GXT4000P] = { .refclk_ps = 40000, .cardname = "IBM GXT4000P" },
[GXT6000P] = { .refclk_ps = 40000, .cardname = "IBM GXT6000P" },
};
@@ -581,7 +588,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
return 0;
}
-static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
+static const struct fb_fix_screeninfo gxt4500_fix = {
.id = "IBM GXT4500P",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -603,8 +610,7 @@ static struct fb_ops gxt4500_ops = {
};
/* PCI functions */
-static int __devinit gxt4500_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err;
unsigned long reg_phys, fb_phys;
@@ -713,7 +719,7 @@ static int __devinit gxt4500_probe(struct pci_dev *pdev,
return -ENODEV;
}
-static void __devexit gxt4500_remove(struct pci_dev *pdev)
+static void gxt4500_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct gxt4500_par *par;
@@ -736,6 +742,10 @@ static void __devexit gxt4500_remove(struct pci_dev *pdev)
static const struct pci_device_id gxt4500_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4500P),
.driver_data = GXT4500P },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT6500P),
+ .driver_data = GXT6500P },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4000P),
+ .driver_data = GXT4000P },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT6000P),
.driver_data = GXT6000P },
{ 0 }
@@ -747,10 +757,10 @@ static struct pci_driver gxt4500_driver = {
.name = "gxt4500",
.id_table = gxt4500_pci_tbl,
.probe = gxt4500_probe,
- .remove = __devexit_p(gxt4500_remove),
+ .remove = gxt4500_remove,
};
-static int __devinit gxt4500_init(void)
+static int gxt4500_init(void)
{
#ifndef MODULE
if (fb_get_options("gxt4500", &mode_option))
@@ -768,7 +778,7 @@ static void __exit gxt4500_exit(void)
module_exit(gxt4500_exit);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
-MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P/6000P");
+MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P/6500P and GXT4000P/6000P");
MODULE_LICENSE("GPL");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 614251a9af91..59d23181fdb0 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -47,7 +47,7 @@
#define DPY_W 600
#define DPY_H 800
-static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
+static struct fb_fix_screeninfo hecubafb_fix = {
.id = "hecubafb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -58,7 +58,7 @@ static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo hecubafb_var __devinitdata = {
+static struct fb_var_screeninfo hecubafb_var = {
.xres = DPY_W,
.yres = DPY_H,
.xres_virtual = DPY_W,
@@ -211,7 +211,7 @@ static struct fb_deferred_io hecubafb_defio = {
.deferred_io = hecubafb_dpy_deferred_io,
};
-static int __devinit hecubafb_probe(struct platform_device *dev)
+static int hecubafb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct hecuba_board *board;
@@ -280,7 +280,7 @@ err_videomem_alloc:
return retval;
}
-static int __devexit hecubafb_remove(struct platform_device *dev)
+static int hecubafb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -299,7 +299,7 @@ static int __devexit hecubafb_remove(struct platform_device *dev)
static struct platform_driver hecubafb_driver = {
.probe = hecubafb_probe,
- .remove = __devexit_p(hecubafb_remove),
+ .remove = hecubafb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "hecubafb",
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index c645f9282650..1e9e2d819d1f 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
/* Framebuffer driver structures */
-static struct fb_var_screeninfo hga_default_var __devinitdata = {
+static struct fb_var_screeninfo hga_default_var = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
@@ -120,7 +120,7 @@ static struct fb_var_screeninfo hga_default_var __devinitdata = {
.width = -1,
};
-static struct fb_fix_screeninfo hga_fix __devinitdata = {
+static struct fb_fix_screeninfo hga_fix = {
.id = "HGA",
.type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
.visual = FB_VISUAL_MONO10,
@@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
spin_unlock_irqrestore(&hga_reg_lock, flags);
}
-static int __devinit hga_card_detect(void)
+static int hga_card_detect(void)
{
int count = 0;
void __iomem *p, *q;
@@ -546,7 +546,7 @@ static struct fb_ops hgafb_ops = {
* Initialization
*/
-static int __devinit hgafb_probe(struct platform_device *pdev)
+static int hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
@@ -592,7 +592,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit hgafb_remove(struct platform_device *pdev)
+static int hgafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
@@ -617,7 +617,7 @@ static int __devexit hgafb_remove(struct platform_device *pdev)
static struct platform_driver hgafb_driver = {
.probe = hgafb_probe,
- .remove = __devexit_p(hgafb_remove),
+ .remove = hgafb_remove,
.driver = {
.name = "hgafb",
},
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index cfb8d6451014..c2414d6ab646 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -30,14 +30,14 @@
#define WIDTH 640
-static struct fb_var_screeninfo hitfb_var __devinitdata = {
+static struct fb_var_screeninfo hitfb_var = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo hitfb_fix __devinitdata = {
+static struct fb_fix_screeninfo hitfb_fix = {
.id = "Hitachi HD64461",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
@@ -324,7 +324,7 @@ static struct fb_ops hitfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit hitfb_probe(struct platform_device *dev)
+static int hitfb_probe(struct platform_device *dev)
{
unsigned short lcdclor, ldr3, ldvndr;
struct fb_info *info;
@@ -417,7 +417,7 @@ err_fb:
return ret;
}
-static int __devexit hitfb_remove(struct platform_device *dev)
+static int hitfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -462,7 +462,7 @@ static const struct dev_pm_ops hitfb_dev_pm_ops = {
static struct platform_driver hitfb_driver = {
.probe = hitfb_probe,
- .remove = __devexit_p(hitfb_remove),
+ .remove = hitfb_remove,
.driver = {
.name = "hitfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index 7324865f965f..b802f93cef5d 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -206,8 +206,7 @@ static struct fb_ops hpfb_ops = {
#define HPFB_FBOMSB 0x5d /* Frame buffer offset */
#define HPFB_FBOLSB 0x5f
-static int __devinit hpfb_init_one(unsigned long phys_base,
- unsigned long virt_base)
+static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
{
unsigned long fboff, fb_width, fb_height, fb_start;
int ret;
@@ -327,7 +326,7 @@ unmap_screen_base:
/*
* Initialise the framebuffer
*/
-static int __devinit hpfb_dio_probe(struct dio_dev * d, const struct dio_device_id * ent)
+static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)
{
unsigned long paddr, vaddr;
@@ -350,7 +349,7 @@ static int __devinit hpfb_dio_probe(struct dio_dev * d, const struct dio_device_
return 0;
}
-static void __devexit hpfb_remove_one(struct dio_dev *d)
+static void hpfb_remove_one(struct dio_dev *d)
{
unregister_framebuffer(&fb_info);
if (d->scode >= DIOII_SCBASE)
@@ -373,7 +372,7 @@ static struct dio_driver hpfb_driver = {
.name = "hpfb",
.id_table = hpfb_dio_tbl,
.probe = hpfb_dio_probe,
- .remove = __devexit_p(hpfb_remove_one),
+ .remove = hpfb_remove_one,
};
int __init hpfb_init(void)
diff --git a/drivers/video/i740fb.c b/drivers/video/i740fb.c
index ff3f8808e4e9..cfd0c52e8f73 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/i740fb.c
@@ -33,10 +33,10 @@
#include "i740_reg.h"
-static char *mode_option __devinitdata;
+static char *mode_option;
#ifdef CONFIG_MTRR
-static int mtrr __devinitdata = 1;
+static int mtrr = 1;
#endif
struct i740fb_par {
@@ -91,7 +91,7 @@ struct i740fb_par {
#define DACSPEED24_SD 128
#define DACSPEED32 86
-static struct fb_fix_screeninfo i740fb_fix __devinitdata = {
+static struct fb_fix_screeninfo i740fb_fix = {
.id = "i740fb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -163,7 +163,7 @@ static int i740fb_ddc_getsda(void *data)
return !!(i740inreg(par, XRX, REG_DDC_STATE) & DDC_SDA);
}
-static int __devinit i740fb_setup_ddc_bus(struct fb_info *info)
+static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
@@ -1007,8 +1007,7 @@ static struct fb_ops i740fb_ops = {
/* ------------------------------------------------------------------------- */
-static int __devinit i740fb_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fb_info *info;
struct i740fb_par *par;
@@ -1174,7 +1173,7 @@ err_enable_device:
return ret;
}
-static void __devexit i740fb_remove(struct pci_dev *dev)
+static void i740fb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -1275,7 +1274,7 @@ static struct pci_driver i740fb_driver = {
.name = "i740fb",
.id_table = i740fb_id_table,
.probe = i740fb_probe,
- .remove = __devexit_p(i740fb_remove),
+ .remove = i740fb_remove,
.suspend = i740fb_suspend,
.resume = i740fb_resume,
};
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 5c067816a81d..4ce3438ade6f 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -74,12 +74,12 @@
*
* Experiment with v_offset to find out which works best for you.
*/
-static u32 v_offset_default __devinitdata; /* For 32 MiB Aper size, 8 should be the default */
-static u32 voffset __devinitdata;
+static u32 v_offset_default; /* For 32 MiB Aper size, 8 should be the default */
+static u32 voffset;
static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
-static int __devinit i810fb_init_pci (struct pci_dev *dev,
- const struct pci_device_id *entry);
+static int i810fb_init_pci(struct pci_dev *dev,
+ const struct pci_device_id *entry);
static void __exit i810fb_remove_pci(struct pci_dev *dev);
static int i810fb_resume(struct pci_dev *dev);
static int i810fb_suspend(struct pci_dev *dev, pm_message_t state);
@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
/* PCI */
-static const char * const i810_pci_list[] __devinitconst = {
+static const char * const i810_pci_list[] = {
"Intel(R) 810 Framebuffer Device" ,
"Intel(R) 810-DC100 Framebuffer Device" ,
"Intel(R) 810E Framebuffer Device" ,
@@ -132,22 +132,22 @@ static struct pci_driver i810fb_driver = {
.resume = i810fb_resume,
};
-static char *mode_option __devinitdata = NULL;
-static int vram __devinitdata = 4;
-static int bpp __devinitdata = 8;
-static bool mtrr __devinitdata;
-static bool accel __devinitdata;
-static int hsync1 __devinitdata;
-static int hsync2 __devinitdata;
-static int vsync1 __devinitdata;
-static int vsync2 __devinitdata;
-static int xres __devinitdata;
+static char *mode_option = NULL;
+static int vram = 4;
+static int bpp = 8;
+static bool mtrr;
+static bool accel;
+static int hsync1;
+static int hsync2;
+static int vsync1;
+static int vsync2;
+static int xres;
static int yres;
-static int vyres __devinitdata;
-static bool sync __devinitdata;
-static bool extvga __devinitdata;
-static bool dcolor __devinitdata;
-static bool ddc3 __devinitdata;
+static int vyres;
+static bool sync;
+static bool extvga;
+static bool dcolor;
+static bool ddc3;
/*------------------------------------------------------------*/
@@ -1541,7 +1541,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-static struct fb_ops i810fb_ops __devinitdata = {
+static struct fb_ops i810fb_ops = {
.owner = THIS_MODULE,
.fb_open = i810fb_open,
.fb_release = i810fb_release,
@@ -1628,7 +1628,7 @@ fail:
* AGP resource allocation *
***********************************************************************/
-static void __devinit i810_fix_pointers(struct i810fb_par *par)
+static void i810_fix_pointers(struct i810fb_par *par)
{
par->fb.physical = par->aperture.physical+(par->fb.offset << 12);
par->fb.virtual = par->aperture.virtual+(par->fb.offset << 12);
@@ -1640,7 +1640,7 @@ static void __devinit i810_fix_pointers(struct i810fb_par *par)
(par->cursor_heap.offset << 12);
}
-static void __devinit i810_fix_offsets(struct i810fb_par *par)
+static void i810_fix_offsets(struct i810fb_par *par)
{
if (vram + 1 > par->aperture.size >> 20)
vram = (par->aperture.size >> 20) - 1;
@@ -1660,7 +1660,7 @@ static void __devinit i810_fix_offsets(struct i810fb_par *par)
par->cursor_heap.size = 4096;
}
-static int __devinit i810_alloc_agp_mem(struct fb_info *info)
+static int i810_alloc_agp_mem(struct fb_info *info)
{
struct i810fb_par *par = info->par;
int size;
@@ -1723,7 +1723,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
* Sets the user monitor's horizontal and vertical
* frequency limits
*/
-static void __devinit i810_init_monspecs(struct fb_info *info)
+static void i810_init_monspecs(struct fb_info *info)
{
if (!hsync1)
hsync1 = HFMIN;
@@ -1755,8 +1755,7 @@ static void __devinit i810_init_monspecs(struct fb_info *info)
* @par: pointer to i810fb_par structure
* @info: pointer to current fb_info structure
*/
-static void __devinit i810_init_defaults(struct i810fb_par *par,
- struct fb_info *info)
+static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
{
mutex_init(&par->open_lock);
@@ -1812,7 +1811,7 @@ static void __devinit i810_init_defaults(struct i810fb_par *par,
* i810_init_device - initialize device
* @par: pointer to i810fb_par structure
*/
-static void __devinit i810_init_device(struct i810fb_par *par)
+static void i810_init_device(struct i810fb_par *par)
{
u8 reg;
u8 __iomem *mmio = par->mmio_start_virtual;
@@ -1833,9 +1832,8 @@ static void __devinit i810_init_device(struct i810fb_par *par)
}
-static int __devinit
-i810_allocate_pci_resource(struct i810fb_par *par,
- const struct pci_device_id *entry)
+static int i810_allocate_pci_resource(struct i810fb_par *par,
+ const struct pci_device_id *entry)
{
int err;
@@ -1892,7 +1890,7 @@ i810_allocate_pci_resource(struct i810fb_par *par,
return 0;
}
-static void __devinit i810fb_find_init_mode(struct fb_info *info)
+static void i810fb_find_init_mode(struct fb_info *info)
{
struct fb_videomode mode;
struct fb_var_screeninfo var;
@@ -1956,7 +1954,7 @@ static void __devinit i810fb_find_init_mode(struct fb_info *info)
}
#ifndef MODULE
-static int __devinit i810fb_setup(char *options)
+static int i810fb_setup(char *options)
{
char *this_opt, *suffix = NULL;
@@ -2007,8 +2005,8 @@ static int __devinit i810fb_setup(char *options)
}
#endif
-static int __devinit i810fb_init_pci (struct pci_dev *dev,
- const struct pci_device_id *entry)
+static int i810fb_init_pci(struct pci_dev *dev,
+ const struct pci_device_id *entry)
{
struct fb_info *info;
struct i810fb_par *par = NULL;
@@ -2136,7 +2134,7 @@ static void __exit i810fb_remove_pci(struct pci_dev *dev)
}
#ifndef MODULE
-static int __devinit i810fb_init(void)
+static int i810fb_init(void)
{
char *option = NULL;
@@ -2154,7 +2152,7 @@ static int __devinit i810fb_init(void)
#ifdef MODULE
-static int __devinit i810fb_init(void)
+static int i810fb_init(void)
{
hsync1 *= 1000;
hsync2 *= 1000;
diff --git a/drivers/video/i810/i810_main.h b/drivers/video/i810/i810_main.h
index 51d4f3d4116d..a25afaa534ba 100644
--- a/drivers/video/i810/i810_main.h
+++ b/drivers/video/i810/i810_main.h
@@ -64,7 +64,7 @@ static inline void flush_cache(void)
#include <asm/mtrr.h>
-static inline void __devinit set_mtrr(struct i810fb_par *par)
+static inline void set_mtrr(struct i810fb_par *par)
{
par->mtrr_reg = mtrr_add((u32) par->aperture.physical,
par->aperture.size, MTRR_TYPE_WRCOMB, 1);
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 2d97752f79a5..79cbfa7d1a9b 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -571,7 +571,7 @@ static int __init igafb_setup(char *options)
module_init(igafb_init);
MODULE_LICENSE("GPL");
-static struct pci_device_id igafb_pci_tbl[] __devinitdata = {
+static struct pci_device_id igafb_pci_tbl[] = {
{ PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index 8149356471e4..d5220cc90e93 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -225,7 +225,7 @@ struct initvalues {
__u8 addr, value;
};
-static struct initvalues ibm_initregs[] __devinitdata = {
+static struct initvalues ibm_initregs[] = {
{ CLKCTL, 0x21 },
{ SYNCCTL, 0x00 },
{ HSYNCPOS, 0x00 },
@@ -272,7 +272,7 @@ static struct initvalues ibm_initregs[] __devinitdata = {
{ KEYCTL, 0x00 }
};
-static struct initvalues tvp_initregs[] __devinitdata = {
+static struct initvalues tvp_initregs[] = {
{ TVPIRICC, 0x00 },
{ TVPIRBRC, 0xe4 },
{ TVPIRLAC, 0x06 },
@@ -336,7 +336,7 @@ enum {
static int inverse = 0;
static char fontname[40] __initdata = { 0 };
#if defined(CONFIG_PPC)
-static signed char init_vmode __devinitdata = -1, init_cmode __devinitdata = -1;
+static signed char init_vmode = -1, init_cmode = -1;
#endif
static struct imstt_regvals tvp_reg_init_2 = {
@@ -1333,7 +1333,7 @@ static struct pci_driver imsttfb_pci_driver = {
.name = "imsttfb",
.id_table = imsttfb_pci_tbl,
.probe = imsttfb_probe,
- .remove = __devexit_p(imsttfb_remove),
+ .remove = imsttfb_remove,
};
static struct fb_ops imsttfb_ops = {
@@ -1349,8 +1349,7 @@ static struct fb_ops imsttfb_ops = {
.fb_ioctl = imsttfb_ioctl,
};
-static void __devinit
-init_imstt(struct fb_info *info)
+static void init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
@@ -1466,8 +1465,7 @@ init_imstt(struct fb_info *info)
info->node, info->fix.id, info->fix.smem_len >> 20, tmp);
}
-static int __devinit
-imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long addr, size;
struct imstt_par *par;
@@ -1534,8 +1532,7 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
-static void __devexit
-imsttfb_remove(struct pci_dev *pdev)
+static void imsttfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct imstt_par *par = info->par;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index e501dbc966b3..0abf2bf20836 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -139,6 +139,7 @@ struct imxfb_info {
struct clk *clk_ahb;
struct clk *clk_per;
enum imxfb_type devtype;
+ bool enabled;
/*
* These are the addresses we mapped
@@ -536,6 +537,10 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)
static void imxfb_enable_controller(struct imxfb_info *fbi)
{
+
+ if (fbi->enabled)
+ return;
+
pr_debug("Enabling LCD controller\n");
writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
@@ -556,6 +561,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
clk_prepare_enable(fbi->clk_ipg);
clk_prepare_enable(fbi->clk_ahb);
clk_prepare_enable(fbi->clk_per);
+ fbi->enabled = true;
if (fbi->backlight_power)
fbi->backlight_power(1);
@@ -565,6 +571,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
static void imxfb_disable_controller(struct imxfb_info *fbi)
{
+ if (!fbi->enabled)
+ return;
+
pr_debug("Disabling LCD controller\n");
if (fbi->backlight_power)
@@ -575,6 +584,7 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
clk_disable_unprepare(fbi->clk_per);
clk_disable_unprepare(fbi->clk_ipg);
clk_disable_unprepare(fbi->clk_ahb);
+ fbi->enabled = false;
writel(0, fbi->regs + LCDC_RMCR);
}
@@ -729,6 +739,8 @@ static int __init imxfb_init_fbinfo(struct platform_device *pdev)
memset(fbi, 0, sizeof(struct imxfb_info));
+ fbi->devtype = pdev->id_entry->driver_data;
+
strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -789,7 +801,6 @@ static int __init imxfb_probe(struct platform_device *pdev)
return -ENOMEM;
fbi = info->par;
- fbi->devtype = pdev->id_entry->driver_data;
if (!fb_mode)
fb_mode = pdata->mode[0].mode.name;
@@ -917,7 +928,7 @@ failed_init:
return ret;
}
-static int __devexit imxfb_remove(struct platform_device *pdev)
+static int imxfb_remove(struct platform_device *pdev)
{
struct imx_fb_platform_data *pdata;
struct fb_info *info = platform_get_drvdata(pdev);
@@ -959,7 +970,7 @@ void imxfb_shutdown(struct platform_device * dev)
static struct platform_driver imxfb_driver = {
.suspend = imxfb_suspend,
.resume = imxfb_resume,
- .remove = __devexit_p(imxfb_remove),
+ .remove = imxfb_remove,
.shutdown = imxfb_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index bdcbfbae2777..8209e46c5d28 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -132,7 +132,7 @@
#include "intelfbhw.h"
#include "../edid.h"
-static void __devinit get_initial_mode(struct intelfb_info *dinfo);
+static void get_initial_mode(struct intelfb_info *dinfo);
static void update_dinfo(struct intelfb_info *dinfo,
struct fb_var_screeninfo *var);
static int intelfb_open(struct fb_info *info, int user);
@@ -162,10 +162,10 @@ static int intelfb_sync(struct fb_info *info);
static int intelfb_ioctl(struct fb_info *info,
unsigned int cmd, unsigned long arg);
-static int __devinit intelfb_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void __devexit intelfb_pci_unregister(struct pci_dev *pdev);
-static int __devinit intelfb_set_fbinfo(struct intelfb_info *dinfo);
+static int intelfb_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void intelfb_pci_unregister(struct pci_dev *pdev);
+static int intelfb_set_fbinfo(struct intelfb_info *dinfo);
/*
* Limiting the class to PCI_CLASS_DISPLAY_VGA prevents function 1 of the
@@ -177,7 +177,7 @@ static int __devinit intelfb_set_fbinfo(struct intelfb_info *dinfo);
#define INTELFB_CLASS_MASK 0
#endif
-static struct pci_device_id intelfb_pci_table[] __devinitdata = {
+static struct pci_device_id intelfb_pci_table[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM },
@@ -219,7 +219,7 @@ static struct pci_driver intelfb_driver = {
.name = "intelfb",
.id_table = intelfb_pci_table,
.probe = intelfb_pci_register,
- .remove = __devexit_p(intelfb_pci_unregister)
+ .remove = intelfb_pci_unregister,
};
/* Module description/parameters */
@@ -415,7 +415,7 @@ module_exit(intelfb_exit);
***************************************************************/
#ifdef CONFIG_MTRR
-static inline void __devinit set_mtrr(struct intelfb_info *dinfo)
+static inline void set_mtrr(struct intelfb_info *dinfo)
{
dinfo->mtrr_reg = mtrr_add(dinfo->aperture.physical,
dinfo->aperture.size, MTRR_TYPE_WRCOMB, 1);
@@ -497,8 +497,8 @@ static void cleanup(struct intelfb_info *dinfo)
} while (0)
-static int __devinit intelfb_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int intelfb_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct fb_info *info;
struct intelfb_info *dinfo;
@@ -921,8 +921,7 @@ err_out_cmap:
return -ENODEV;
}
-static void __devexit
-intelfb_pci_unregister(struct pci_dev *pdev)
+static void intelfb_pci_unregister(struct pci_dev *pdev)
{
struct intelfb_info *dinfo = pci_get_drvdata(pdev);
@@ -970,7 +969,7 @@ static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var)
* Various intialisation functions *
***************************************************************/
-static void __devinit get_initial_mode(struct intelfb_info *dinfo)
+static void get_initial_mode(struct intelfb_info *dinfo)
{
struct fb_var_screeninfo *var;
int xtot, ytot;
@@ -1037,7 +1036,7 @@ static void __devinit get_initial_mode(struct intelfb_info *dinfo)
}
}
-static int __devinit intelfb_init_var(struct intelfb_info *dinfo)
+static int intelfb_init_var(struct intelfb_info *dinfo)
{
struct fb_var_screeninfo *var;
int msrc = 0;
@@ -1118,7 +1117,7 @@ static int __devinit intelfb_init_var(struct intelfb_info *dinfo)
return 0;
}
-static int __devinit intelfb_set_fbinfo(struct intelfb_info *dinfo)
+static int intelfb_set_fbinfo(struct intelfb_info *dinfo)
{
struct fb_info *info = dinfo->info;
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 4d25711b9982..36979b4131ab 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -136,7 +136,7 @@ struct jzfb {
uint32_t pseudo_palette[16];
};
-static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
+static const struct fb_fix_screeninfo jzfb_fix = {
.id = "JZ4740 FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -619,7 +619,7 @@ static struct fb_ops jzfb_ops = {
.fb_setcolreg = jzfb_setcolreg,
};
-static int __devinit jzfb_probe(struct platform_device *pdev)
+static int jzfb_probe(struct platform_device *pdev)
{
int ret;
struct jzfb *jzfb;
@@ -660,9 +660,9 @@ static int __devinit jzfb_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jzfb->base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!jzfb->base) {
- ret = -EBUSY;
+ jzfb->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(jzfb->base)) {
+ ret = PTR_ERR(jzfb->base);
goto err_framebuffer_release;
}
@@ -725,7 +725,7 @@ err_framebuffer_release:
return ret;
}
-static int __devexit jzfb_remove(struct platform_device *pdev)
+static int jzfb_remove(struct platform_device *pdev)
{
struct jzfb *jzfb = platform_get_drvdata(pdev);
@@ -794,7 +794,7 @@ static const struct dev_pm_ops jzfb_pm_ops = {
static struct platform_driver jzfb_driver = {
.probe = jzfb_probe,
- .remove = __devexit_p(jzfb_remove),
+ .remove = jzfb_remove,
.driver = {
.name = "jz4740-fb",
.pm = JZFB_PM_OPS,
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index acb9370fdb14..6157f74ac600 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -40,14 +40,14 @@
#define KHZ2PICOS(a) (1000000000UL/(a))
/****************************************************************************/
-static struct fb_fix_screeninfo kyro_fix __devinitdata = {
+static struct fb_fix_screeninfo kyro_fix = {
.id = "ST Kyro",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo kyro_var __devinitdata = {
+static struct fb_var_screeninfo kyro_var = {
/* 640x480, 16bpp @ 60 Hz */
.xres = 640,
.yres = 480,
@@ -81,18 +81,18 @@ typedef struct {
/* global graphics card info structure (one per card) */
static device_info_t deviceInfo;
-static char *mode_option __devinitdata = NULL;
-static int nopan __devinitdata = 0;
-static int nowrap __devinitdata = 1;
+static char *mode_option = NULL;
+static int nopan = 0;
+static int nowrap = 1;
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata = 0;
+static int nomtrr = 0;
#endif
/* PCI driver prototypes */
static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void kyrofb_remove(struct pci_dev *pdev);
-static struct fb_videomode kyro_modedb[] __devinitdata = {
+static struct fb_videomode kyro_modedb[] = {
{
/* 640x350 @ 85Hz */
NULL, 85, 640, 350, KHZ2PICOS(31500),
@@ -653,7 +653,7 @@ static struct pci_driver kyrofb_pci_driver = {
.name = "kyrofb",
.id_table = kyrofb_pci_tbl,
.probe = kyrofb_probe,
- .remove = __devexit_p(kyrofb_remove),
+ .remove = kyrofb_remove,
};
static struct fb_ops kyrofb_ops = {
@@ -667,8 +667,7 @@ static struct fb_ops kyrofb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit kyrofb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct fb_info *info;
struct kyrofb_info *currentpar;
@@ -754,7 +753,7 @@ out_unmap:
return -EINVAL;
}
-static void __devexit kyrofb_remove(struct pci_dev *pdev)
+static void kyrofb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct kyrofb_info *par = info->par;
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 9e946e2c1da9..b17f5009a436 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -547,7 +547,7 @@ static void leo_unmap_regs(struct platform_device *op, struct fb_info *info,
of_iounmap(&op->resource[0], info->screen_base, 0x800000);
}
-static int __devinit leo_probe(struct platform_device *op)
+static int leo_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -636,7 +636,7 @@ out_err:
return err;
}
-static int __devexit leo_remove(struct platform_device *op)
+static int leo_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct leo_par *par = info->par;
@@ -668,7 +668,7 @@ static struct platform_driver leo_driver = {
.of_match_table = leo_match,
},
.probe = leo_probe,
- .remove = __devexit_p(leo_remove),
+ .remove = leo_remove,
};
static int __init leo_init(void)
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index d68e332aa21c..91c59c9fb082 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -668,7 +668,7 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
-static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev)
+static int of_platform_mb862xx_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device *dev = &ofdev->dev;
@@ -786,7 +786,7 @@ fbrel:
return ret;
}
-static int __devexit of_platform_mb862xx_remove(struct platform_device *ofdev)
+static int of_platform_mb862xx_remove(struct platform_device *ofdev)
{
struct fb_info *fbi = dev_get_drvdata(&ofdev->dev);
struct mb862xxfb_par *par = fbi->par;
@@ -823,7 +823,7 @@ static int __devexit of_platform_mb862xx_remove(struct platform_device *ofdev)
/*
* common types
*/
-static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
+static struct of_device_id of_platform_mb862xx_tbl[] = {
{ .compatible = "fujitsu,MB86276", },
{ .compatible = "fujitsu,lime", },
{ .compatible = "fujitsu,MB86277", },
@@ -841,7 +841,7 @@ static struct platform_driver of_platform_mb862xxfb_driver = {
.of_match_table = of_platform_mb862xx_tbl,
},
.probe = of_platform_mb862xx_probe,
- .remove = __devexit_p(of_platform_mb862xx_remove),
+ .remove = of_platform_mb862xx_remove,
};
#endif
@@ -984,7 +984,7 @@ static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par)
#define CHIP_ID(id) \
{ PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) }
-static struct pci_device_id mb862xx_pci_tbl[] __devinitdata = {
+static struct pci_device_id mb862xx_pci_tbl[] = {
/* MB86295/MB86296 */
CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP),
CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA),
@@ -995,8 +995,8 @@ static struct pci_device_id mb862xx_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, mb862xx_pci_tbl);
-static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int mb862xx_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct mb862xxfb_par *par;
struct fb_info *info;
@@ -1133,7 +1133,7 @@ out:
return ret;
}
-static void __devexit mb862xx_pci_remove(struct pci_dev *pdev)
+static void mb862xx_pci_remove(struct pci_dev *pdev)
{
struct fb_info *fbi = pci_get_drvdata(pdev);
struct mb862xxfb_par *par = fbi->par;
@@ -1174,11 +1174,11 @@ static struct pci_driver mb862xxfb_pci_driver = {
.name = DRV_NAME,
.id_table = mb862xx_pci_tbl,
.probe = mb862xx_pci_probe,
- .remove = __devexit_p(mb862xx_pci_remove),
+ .remove = mb862xx_pci_remove,
};
#endif
-static int __devinit mb862xxfb_init(void)
+static int mb862xxfb_init(void)
{
int ret = -ENODEV;
diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/mbx/mbxdebugfs.c
index 12dec7634c55..4449f249b0e7 100644
--- a/drivers/video/mbx/mbxdebugfs.c
+++ b/drivers/video/mbx/mbxdebugfs.c
@@ -213,7 +213,7 @@ static const struct file_operations misc_fops = {
.llseek = default_llseek,
};
-static void __devinit mbxfb_debugfs_init(struct fb_info *fbi)
+static void mbxfb_debugfs_init(struct fb_info *fbi)
{
struct mbxfb_info *mfbi = fbi->par;
struct mbxfb_debugfs_data *dbg;
@@ -236,7 +236,7 @@ static void __devinit mbxfb_debugfs_init(struct fb_info *fbi)
fbi, &misc_fops);
}
-static void __devexit mbxfb_debugfs_remove(struct fb_info *fbi)
+static void mbxfb_debugfs_remove(struct fb_info *fbi)
{
struct mbxfb_info *mfbi = fbi->par;
struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 6563e50413c1..0c1a874ffd2b 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -79,7 +79,7 @@ struct mbxfb_info {
};
-static struct fb_var_screeninfo mbxfb_default __devinitdata = {
+static struct fb_var_screeninfo mbxfb_default = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -102,7 +102,7 @@ static struct fb_var_screeninfo mbxfb_default __devinitdata = {
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
-static struct fb_fix_screeninfo mbxfb_fix __devinitdata = {
+static struct fb_fix_screeninfo mbxfb_fix = {
.id = "MBX",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -687,7 +687,7 @@ static struct fb_ops mbxfb_ops = {
Enable external SDRAM controller. Assume that all clocks are active
by now.
*/
-static void __devinit setup_memc(struct fb_info *fbi)
+static void setup_memc(struct fb_info *fbi)
{
unsigned long tmp;
int i;
@@ -747,7 +747,7 @@ static void enable_clocks(struct fb_info *fbi)
write_reg_dly(0x00000001, PIXCLKDIV);
}
-static void __devinit setup_graphics(struct fb_info *fbi)
+static void setup_graphics(struct fb_info *fbi)
{
unsigned long gsctrl;
unsigned long vscadr;
@@ -781,7 +781,7 @@ static void __devinit setup_graphics(struct fb_info *fbi)
write_reg_dly(vscadr, VSCADR);
}
-static void __devinit setup_display(struct fb_info *fbi)
+static void setup_display(struct fb_info *fbi)
{
unsigned long dsctrl = 0;
@@ -795,7 +795,7 @@ static void __devinit setup_display(struct fb_info *fbi)
write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
}
-static void __devinit enable_controller(struct fb_info *fbi)
+static void enable_controller(struct fb_info *fbi)
{
u32 svctrl, shctrl;
@@ -881,7 +881,7 @@ static int mbxfb_resume(struct platform_device *dev)
#define res_size(_r) (((_r)->end - (_r)->start) + 1)
-static int __devinit mbxfb_probe(struct platform_device *dev)
+static int mbxfb_probe(struct platform_device *dev)
{
int ret;
struct fb_info *fbi;
@@ -1006,7 +1006,7 @@ err1:
return ret;
}
-static int __devexit mbxfb_remove(struct platform_device *dev)
+static int mbxfb_remove(struct platform_device *dev)
{
struct fb_info *fbi = platform_get_drvdata(dev);
@@ -1038,7 +1038,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
static struct platform_driver mbxfb_driver = {
.probe = mbxfb_probe,
- .remove = __devexit_p(mbxfb_remove),
+ .remove = mbxfb_remove,
.suspend = mbxfb_suspend,
.resume = mbxfb_resume,
.driver = {
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index 97d45e5115e2..f30150d71be9 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -99,7 +99,7 @@ static struct epd_frame epd_frame_table[] = {
},
};
-static struct fb_fix_screeninfo metronomefb_fix __devinitdata = {
+static struct fb_fix_screeninfo metronomefb_fix = {
.id = "metronomefb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_STATIC_PSEUDOCOLOR,
@@ -110,7 +110,7 @@ static struct fb_fix_screeninfo metronomefb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo metronomefb_var __devinitdata = {
+static struct fb_var_screeninfo metronomefb_var = {
.xres = DPY_W,
.yres = DPY_H,
.xres_virtual = DPY_W,
@@ -167,8 +167,8 @@ static u16 calc_img_cksum(u16 *start, int length)
}
/* here we decode the incoming waveform file and populate metromem */
-static int __devinit load_waveform(u8 *mem, size_t size, int m, int t,
- struct metronomefb_par *par)
+static int load_waveform(u8 *mem, size_t size, int m, int t,
+ struct metronomefb_par *par)
{
int tta;
int wmta;
@@ -338,7 +338,7 @@ static int metronome_display_cmd(struct metronomefb_par *par)
return par->board->met_wait_event_intr(par);
}
-static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
+static int metronome_powerup_cmd(struct metronomefb_par *par)
{
int i;
u16 cs;
@@ -367,7 +367,7 @@ static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
return par->board->met_wait_event(par);
}
-static int __devinit metronome_config_cmd(struct metronomefb_par *par)
+static int metronome_config_cmd(struct metronomefb_par *par)
{
/* setup config command
we can't immediately set the opcode since the controller
@@ -385,7 +385,7 @@ static int __devinit metronome_config_cmd(struct metronomefb_par *par)
return par->board->met_wait_event(par);
}
-static int __devinit metronome_init_cmd(struct metronomefb_par *par)
+static int metronome_init_cmd(struct metronomefb_par *par)
{
int i;
u16 cs;
@@ -411,7 +411,7 @@ static int __devinit metronome_init_cmd(struct metronomefb_par *par)
return par->board->met_wait_event(par);
}
-static int __devinit metronome_init_regs(struct metronomefb_par *par)
+static int metronome_init_regs(struct metronomefb_par *par)
{
int res;
@@ -569,7 +569,7 @@ static struct fb_deferred_io metronomefb_defio = {
.deferred_io = metronomefb_dpy_deferred_io,
};
-static int __devinit metronomefb_probe(struct platform_device *dev)
+static int metronomefb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct metronome_board *board;
@@ -741,7 +741,7 @@ err:
return retval;
}
-static int __devexit metronomefb_remove(struct platform_device *dev)
+static int metronomefb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -763,7 +763,7 @@ static int __devexit metronomefb_remove(struct platform_device *dev)
static struct platform_driver metronomefb_driver = {
.probe = metronomefb_probe,
- .remove = __devexit_p(metronomefb_remove),
+ .remove = metronomefb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "metronomefb",
diff --git a/drivers/video/mmp/Kconfig b/drivers/video/mmp/Kconfig
new file mode 100644
index 000000000000..e9ea39e13722
--- /dev/null
+++ b/drivers/video/mmp/Kconfig
@@ -0,0 +1,11 @@
+menuconfig MMP_DISP
+ tristate "Marvell MMP Display Subsystem support"
+ depends on CPU_PXA910 || CPU_MMP2 || CPU_MMP3 || CPU_PXA988
+ help
+ Marvell Display Subsystem support.
+
+if MMP_DISP
+source "drivers/video/mmp/hw/Kconfig"
+source "drivers/video/mmp/panel/Kconfig"
+source "drivers/video/mmp/fb/Kconfig"
+endif
diff --git a/drivers/video/mmp/Makefile b/drivers/video/mmp/Makefile
new file mode 100644
index 000000000000..a014cb358bf8
--- /dev/null
+++ b/drivers/video/mmp/Makefile
@@ -0,0 +1 @@
+obj-y += core.o hw/ panel/ fb/
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c
new file mode 100644
index 000000000000..9ed83419038b
--- /dev/null
+++ b/drivers/video/mmp/core.c
@@ -0,0 +1,258 @@
+/*
+ * linux/drivers/video/mmp/common.c
+ * This driver is a common framework for Marvell Display Controller
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <video/mmp_disp.h>
+
+static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
+ int overlay_id)
+{
+ if (path && overlay_id < path->overlay_num)
+ return &path->overlays[overlay_id];
+ return 0;
+}
+
+static int path_check_status(struct mmp_path *path)
+{
+ int i;
+ for (i = 0; i < path->overlay_num; i++)
+ if (path->overlays[i].status)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Get modelist write pointer of modelist.
+ * It also returns modelist number
+ * this function fetches modelist from phy/panel:
+ * for HDMI/parallel or dsi to hdmi cases, get from phy
+ * or get from panel
+ */
+static int path_get_modelist(struct mmp_path *path,
+ struct mmp_mode **modelist)
+{
+ BUG_ON(!path || !modelist);
+
+ if (path->panel && path->panel->get_modelist)
+ return path->panel->get_modelist(path->panel, modelist);
+
+ return 0;
+}
+
+/*
+ * panel list is used to pair panel/path when path/panel registered
+ * path list is used for both buffer driver and platdriver
+ * plat driver do path register/unregister
+ * panel driver do panel register/unregister
+ * buffer driver get registered path
+ */
+static LIST_HEAD(panel_list);
+static LIST_HEAD(path_list);
+static DEFINE_MUTEX(disp_lock);
+
+/*
+ * mmp_register_panel - register panel to panel_list and connect to path
+ * @p: panel to be registered
+ *
+ * this function provides interface for panel drivers to register panel
+ * to panel_list and connect to path which matchs panel->plat_path_name.
+ * no error returns when no matching path is found as path register after
+ * panel register is permitted.
+ */
+void mmp_register_panel(struct mmp_panel *panel)
+{
+ struct mmp_path *path;
+
+ mutex_lock(&disp_lock);
+
+ /* add */
+ list_add_tail(&panel->node, &panel_list);
+
+ /* try to register to path */
+ list_for_each_entry(path, &path_list, node) {
+ if (!strcmp(panel->plat_path_name, path->name)) {
+ dev_info(panel->dev, "connect to path %s\n",
+ path->name);
+ path->panel = panel;
+ break;
+ }
+ }
+
+ mutex_unlock(&disp_lock);
+}
+EXPORT_SYMBOL_GPL(mmp_register_panel);
+
+/*
+ * mmp_unregister_panel - unregister panel from panel_list and disconnect
+ * @p: panel to be unregistered
+ *
+ * this function provides interface for panel drivers to unregister panel
+ * from panel_list and disconnect from path.
+ */
+void mmp_unregister_panel(struct mmp_panel *panel)
+{
+ struct mmp_path *path;
+
+ mutex_lock(&disp_lock);
+ list_del(&panel->node);
+
+ list_for_each_entry(path, &path_list, node) {
+ if (path->panel && path->panel == panel) {
+ dev_info(panel->dev, "disconnect from path %s\n",
+ path->name);
+ path->panel = NULL;
+ break;
+ }
+ }
+ mutex_unlock(&disp_lock);
+}
+EXPORT_SYMBOL_GPL(mmp_unregister_panel);
+
+/*
+ * mmp_get_path - get path by name
+ * @p: path name
+ *
+ * this function checks path name in path_list and return matching path
+ * return NULL if no matching path
+ */
+struct mmp_path *mmp_get_path(const char *name)
+{
+ struct mmp_path *path;
+ int found = 0;
+
+ mutex_lock(&disp_lock);
+ list_for_each_entry(path, &path_list, node) {
+ if (!strcmp(name, path->name)) {
+ found = 1;
+ break;
+ }
+ }
+ mutex_unlock(&disp_lock);
+
+ return found ? path : NULL;
+}
+EXPORT_SYMBOL_GPL(mmp_get_path);
+
+/*
+ * mmp_register_path - init and register path by path_info
+ * @p: path info provided by display controller
+ *
+ * this function init by path info and register path to path_list
+ * this function also try to connect path with panel by name
+ */
+struct mmp_path *mmp_register_path(struct mmp_path_info *info)
+{
+ int i;
+ size_t size;
+ struct mmp_path *path = NULL;
+ struct mmp_panel *panel;
+
+ size = sizeof(struct mmp_path)
+ + sizeof(struct mmp_overlay) * info->overlay_num;
+ path = kzalloc(size, GFP_KERNEL);
+ if (!path)
+ goto failed;
+
+ /* path set */
+ mutex_init(&path->access_ok);
+ path->dev = info->dev;
+ path->id = info->id;
+ path->name = info->name;
+ path->output_type = info->output_type;
+ path->overlay_num = info->overlay_num;
+ path->plat_data = info->plat_data;
+ path->ops.set_mode = info->set_mode;
+
+ mutex_lock(&disp_lock);
+ /* get panel */
+ list_for_each_entry(panel, &panel_list, node) {
+ if (!strcmp(info->name, panel->plat_path_name)) {
+ dev_info(path->dev, "get panel %s\n", panel->name);
+ path->panel = panel;
+ break;
+ }
+ }
+
+ dev_info(path->dev, "register %s, overlay_num %d\n",
+ path->name, path->overlay_num);
+
+ /* default op set: if already set by driver, never cover it */
+ if (!path->ops.check_status)
+ path->ops.check_status = path_check_status;
+ if (!path->ops.get_overlay)
+ path->ops.get_overlay = path_get_overlay;
+ if (!path->ops.get_modelist)
+ path->ops.get_modelist = path_get_modelist;
+
+ /* step3: init overlays */
+ for (i = 0; i < path->overlay_num; i++) {
+ path->overlays[i].path = path;
+ path->overlays[i].id = i;
+ mutex_init(&path->overlays[i].access_ok);
+ path->overlays[i].ops = info->overlay_ops;
+ }
+
+ /* add to pathlist */
+ list_add_tail(&path->node, &path_list);
+
+ mutex_unlock(&disp_lock);
+ return path;
+
+failed:
+ kfree(path);
+ mutex_unlock(&disp_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mmp_register_path);
+
+/*
+ * mmp_unregister_path - unregister and destory path
+ * @p: path to be destoried.
+ *
+ * this function registers path and destorys it.
+ */
+void mmp_unregister_path(struct mmp_path *path)
+{
+ int i;
+
+ if (!path)
+ return;
+
+ mutex_lock(&disp_lock);
+ /* del from pathlist */
+ list_del(&path->node);
+
+ /* deinit overlays */
+ for (i = 0; i < path->overlay_num; i++)
+ mutex_destroy(&path->overlays[i].access_ok);
+
+ mutex_destroy(&path->access_ok);
+
+ kfree(path);
+ mutex_unlock(&disp_lock);
+
+ dev_info(path->dev, "de-register %s\n", path->name);
+}
+EXPORT_SYMBOL_GPL(mmp_unregister_path);
diff --git a/drivers/video/mmp/fb/Kconfig b/drivers/video/mmp/fb/Kconfig
new file mode 100644
index 000000000000..9b0141f105f5
--- /dev/null
+++ b/drivers/video/mmp/fb/Kconfig
@@ -0,0 +1,13 @@
+if MMP_DISP
+
+config MMP_FB
+ bool "fb driver for Marvell MMP Display Subsystem"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+ help
+ fb driver for Marvell MMP Display Subsystem
+
+endif
diff --git a/drivers/video/mmp/fb/Makefile b/drivers/video/mmp/fb/Makefile
new file mode 100644
index 000000000000..709fd1f76abe
--- /dev/null
+++ b/drivers/video/mmp/fb/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MMP_FB) += mmpfb.o
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/mmp/fb/mmpfb.c
new file mode 100644
index 000000000000..6d1fa96c5cc3
--- /dev/null
+++ b/drivers/video/mmp/fb/mmpfb.c
@@ -0,0 +1,685 @@
+/*
+ * linux/drivers/video/mmp/fb/mmpfb.c
+ * Framebuffer driver for Marvell Display controller.
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include "mmpfb.h"
+
+static int var_to_pixfmt(struct fb_var_screeninfo *var)
+{
+ /*
+ * Pseudocolor mode?
+ */
+ if (var->bits_per_pixel == 8)
+ return PIXFMT_PSEUDOCOLOR;
+
+ /*
+ * Check for YUV422PLANAR.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length == 8 &&
+ var->green.length == 4 && var->blue.length == 4) {
+ if (var->green.offset >= var->blue.offset)
+ return PIXFMT_YUV422P;
+ else
+ return PIXFMT_YVU422P;
+ }
+
+ /*
+ * Check for YUV420PLANAR.
+ */
+ if (var->bits_per_pixel == 12 && var->red.length == 8 &&
+ var->green.length == 2 && var->blue.length == 2) {
+ if (var->green.offset >= var->blue.offset)
+ return PIXFMT_YUV420P;
+ else
+ return PIXFMT_YVU420P;
+ }
+
+ /*
+ * Check for YUV422PACK.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length == 16 &&
+ var->green.length == 16 && var->blue.length == 16) {
+ if (var->red.offset == 0)
+ return PIXFMT_YUYV;
+ else if (var->green.offset >= var->blue.offset)
+ return PIXFMT_UYVY;
+ else
+ return PIXFMT_VYUY;
+ }
+
+ /*
+ * Check for 565/1555.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length <= 5 &&
+ var->green.length <= 6 && var->blue.length <= 5) {
+ if (var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB565;
+ else
+ return PIXFMT_BGR565;
+ }
+ }
+
+ /*
+ * Check for 888/A888.
+ */
+ if (var->bits_per_pixel <= 32 && var->red.length <= 8 &&
+ var->green.length <= 8 && var->blue.length <= 8) {
+ if (var->bits_per_pixel == 24 && var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB888PACK;
+ else
+ return PIXFMT_BGR888PACK;
+ }
+
+ if (var->bits_per_pixel == 32 && var->transp.offset == 24) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGBA888;
+ else
+ return PIXFMT_BGRA888;
+ } else {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB888UNPACK;
+ else
+ return PIXFMT_BGR888UNPACK;
+ }
+
+ /* fall through */
+ }
+
+ return -EINVAL;
+}
+
+static void pixfmt_to_var(struct fb_var_screeninfo *var, int pix_fmt)
+{
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 11; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 11; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_RGB888UNPACK:
+ var->bits_per_pixel = 32;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR888UNPACK:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_RGBA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIXFMT_BGRA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIXFMT_RGB888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUV420P:
+ var->bits_per_pixel = 12;
+ var->red.offset = 4; var->red.length = 8;
+ var->green.offset = 2; var->green.length = 2;
+ var->blue.offset = 0; var->blue.length = 2;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YVU420P:
+ var->bits_per_pixel = 12;
+ var->red.offset = 4; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 2;
+ var->blue.offset = 2; var->blue.length = 2;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUV422P:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 8;
+ var->green.offset = 4; var->green.length = 4;
+ var->blue.offset = 0; var->blue.length = 4;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YVU422P:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 4;
+ var->blue.offset = 4; var->blue.length = 4;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_UYVY:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 16;
+ var->green.offset = 4; var->green.length = 16;
+ var->blue.offset = 0; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_VYUY:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 16;
+ var->green.offset = 0; var->green.length = 16;
+ var->blue.offset = 4; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUYV:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 16;
+ var->green.offset = 4; var->green.length = 16;
+ var->blue.offset = 8; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_PSEUDOCOLOR:
+ var->bits_per_pixel = 8;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ }
+}
+
+/*
+ * fb framework has its limitation:
+ * 1. input color/output color is not seprated
+ * 2. fb_videomode not include output color
+ * so for fb usage, we keep a output format which is not changed
+ * then it's added for mmpmode
+ */
+static void fbmode_to_mmpmode(struct mmp_mode *mode,
+ struct fb_videomode *videomode, int output_fmt)
+{
+ u64 div_result = 1000000000000ll;
+ mode->name = videomode->name;
+ mode->refresh = videomode->refresh;
+ mode->xres = videomode->xres;
+ mode->yres = videomode->yres;
+
+ do_div(div_result, videomode->pixclock);
+ mode->pixclock_freq = (u32)div_result;
+
+ mode->left_margin = videomode->left_margin;
+ mode->right_margin = videomode->right_margin;
+ mode->upper_margin = videomode->upper_margin;
+ mode->lower_margin = videomode->lower_margin;
+ mode->hsync_len = videomode->hsync_len;
+ mode->vsync_len = videomode->vsync_len;
+ mode->hsync_invert = !!(videomode->sync & FB_SYNC_HOR_HIGH_ACT);
+ mode->vsync_invert = !!(videomode->sync & FB_SYNC_VERT_HIGH_ACT);
+ /* no defined flag in fb, use vmode>>3*/
+ mode->invert_pixclock = !!(videomode->vmode & 8);
+ mode->pix_fmt_out = output_fmt;
+}
+
+static void mmpmode_to_fbmode(struct fb_videomode *videomode,
+ struct mmp_mode *mode)
+{
+ u64 div_result = 1000000000000ll;
+
+ videomode->name = mode->name;
+ videomode->refresh = mode->refresh;
+ videomode->xres = mode->xres;
+ videomode->yres = mode->yres;
+
+ do_div(div_result, mode->pixclock_freq);
+ videomode->pixclock = (u32)div_result;
+
+ videomode->left_margin = mode->left_margin;
+ videomode->right_margin = mode->right_margin;
+ videomode->upper_margin = mode->upper_margin;
+ videomode->lower_margin = mode->lower_margin;
+ videomode->hsync_len = mode->hsync_len;
+ videomode->vsync_len = mode->vsync_len;
+ videomode->sync = (mode->hsync_invert ? FB_SYNC_HOR_HIGH_ACT : 0)
+ | (mode->vsync_invert ? FB_SYNC_VERT_HIGH_ACT : 0);
+ videomode->vmode = mode->invert_pixclock ? 8 : 0;
+}
+
+static int mmpfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+
+ if (var->bits_per_pixel == 8)
+ return -EINVAL;
+ /*
+ * Basic geometry sanity checks.
+ */
+ if (var->xoffset + var->xres > var->xres_virtual)
+ return -EINVAL;
+ if (var->yoffset + var->yres > var->yres_virtual)
+ return -EINVAL;
+
+ /*
+ * Check size of framebuffer.
+ */
+ if (var->xres_virtual * var->yres_virtual *
+ (var->bits_per_pixel >> 3) > fbi->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
+{
+ return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
+}
+
+static u32 to_rgb(u16 red, u16 green, u16 blue)
+{
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ return (red << 16) | (green << 8) | blue;
+}
+
+static int mmpfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int trans, struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ u32 val;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) {
+ val = chan_to_field(red, &info->var.red);
+ val |= chan_to_field(green, &info->var.green);
+ val |= chan_to_field(blue , &info->var.blue);
+ fbi->pseudo_palette[regno] = val;
+ }
+
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
+ val = to_rgb(red, green, blue);
+ /* TODO */
+ }
+
+ return 0;
+}
+
+static int mmpfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct mmp_addr addr;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8 + fbi->fb_start_dma;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+
+ return 0;
+}
+
+static int var_update(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_videomode *m;
+ int pix_fmt;
+
+ /* set pix_fmt */
+ pix_fmt = var_to_pixfmt(var);
+ if (pix_fmt < 0)
+ return -EINVAL;
+ pixfmt_to_var(var, pix_fmt);
+ fbi->pix_fmt = pix_fmt;
+
+ /* set var according to best video mode*/
+ m = (struct fb_videomode *)fb_match_mode(var, &info->modelist);
+ if (!m) {
+ dev_err(fbi->dev, "set par: no match mode, use best mode\n");
+ m = (struct fb_videomode *)fb_find_best_mode(var,
+ &info->modelist);
+ fb_videomode_to_var(var, m);
+ }
+ memcpy(&fbi->mode, m, sizeof(struct fb_videomode));
+
+ /* fix to 2* yres */
+ var->yres_virtual = var->yres * 2;
+ info->fix.visual = (pix_fmt == PIXFMT_PSEUDOCOLOR) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ info->fix.ypanstep = var->yres;
+ return 0;
+}
+
+static int mmpfb_set_par(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mmp_addr addr;
+ struct mmp_win win;
+ struct mmp_mode mode;
+ int ret;
+
+ ret = var_update(info);
+ if (ret != 0)
+ return ret;
+
+ /* set window/path according to new videomode */
+ fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt);
+ mmp_path_set_mode(fbi->path, &mode);
+
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ mmp_overlay_set_win(fbi->overlay, &win);
+
+ /* set address always */
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8 + fbi->fb_start_dma;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+
+ return 0;
+}
+
+static void mmpfb_power(struct mmpfb_info *fbi, int power)
+{
+ struct mmp_addr addr;
+ struct mmp_win win;
+ struct fb_var_screeninfo *var = &fbi->fb_info->var;
+
+ /* for power on, always set address/window again */
+ if (power) {
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ mmp_overlay_set_win(fbi->overlay, &win);
+
+ /* set address always */
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = fbi->fb_start_dma +
+ (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+ }
+ mmp_overlay_set_onoff(fbi->overlay, power);
+}
+
+static int mmpfb_blank(int blank, struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+
+ mmpfb_power(fbi, (blank == FB_BLANK_UNBLANK));
+
+ return 0;
+}
+
+static struct fb_ops mmpfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_blank = mmpfb_blank,
+ .fb_check_var = mmpfb_check_var,
+ .fb_set_par = mmpfb_set_par,
+ .fb_setcolreg = mmpfb_setcolreg,
+ .fb_pan_display = mmpfb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int modes_setup(struct mmpfb_info *fbi)
+{
+ struct fb_videomode *videomodes;
+ struct mmp_mode *mmp_modes;
+ struct fb_info *info = fbi->fb_info;
+ int videomode_num, i;
+
+ /* get videomodes from path */
+ videomode_num = mmp_path_get_modelist(fbi->path, &mmp_modes);
+ if (!videomode_num) {
+ dev_warn(fbi->dev, "can't get videomode num\n");
+ return 0;
+ }
+ /* put videomode list to info structure */
+ videomodes = kzalloc(sizeof(struct fb_videomode) * videomode_num,
+ GFP_KERNEL);
+ if (!videomodes) {
+ dev_err(fbi->dev, "can't malloc video modes\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < videomode_num; i++)
+ mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]);
+ fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist);
+
+ /* set videomode[0] as default mode */
+ memcpy(&fbi->mode, &videomodes[0], sizeof(struct fb_videomode));
+ fbi->output_fmt = mmp_modes[0].pix_fmt_out;
+ fb_videomode_to_var(&info->var, &fbi->mode);
+ mmp_path_set_mode(fbi->path, &mmp_modes[0]);
+
+ kfree(videomodes);
+ return videomode_num;
+}
+
+static int fb_info_setup(struct fb_info *info,
+ struct mmpfb_info *fbi)
+{
+ int ret = 0;
+ /* Initialise static fb parameters.*/
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
+ info->node = -1;
+ strcpy(info->fix.id, fbi->name);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = info->var.yres;
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_start = fbi->fb_start_dma;
+ info->fix.smem_len = fbi->fb_size;
+ info->fix.visual = (fbi->pix_fmt == PIXFMT_PSEUDOCOLOR) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = info->var.xres_virtual *
+ info->var.bits_per_pixel / 8;
+ info->fbops = &mmpfb_ops;
+ info->pseudo_palette = fbi->pseudo_palette;
+ info->screen_base = fbi->fb_start;
+ info->screen_size = fbi->fb_size;
+
+ /* For FB framework: Allocate color map and Register framebuffer*/
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void fb_info_clear(struct fb_info *info)
+{
+ fb_dealloc_cmap(&info->cmap);
+}
+
+static int mmpfb_probe(struct platform_device *pdev)
+{
+ struct mmp_buffer_driver_mach_info *mi;
+ struct fb_info *info = 0;
+ struct mmpfb_info *fbi = 0;
+ int ret, modes_num;
+
+ mi = pdev->dev.platform_data;
+ if (mi == NULL) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ /* initialize fb */
+ info = framebuffer_alloc(sizeof(struct mmpfb_info), &pdev->dev);
+ if (info == NULL)
+ return -ENOMEM;
+ fbi = info->par;
+ if (!fbi) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* init fb */
+ fbi->fb_info = info;
+ platform_set_drvdata(pdev, fbi);
+ fbi->dev = &pdev->dev;
+ fbi->name = mi->name;
+ fbi->pix_fmt = mi->default_pixfmt;
+ pixfmt_to_var(&info->var, fbi->pix_fmt);
+ mutex_init(&fbi->access_ok);
+
+ /* get display path by name */
+ fbi->path = mmp_get_path(mi->path_name);
+ if (!fbi->path) {
+ dev_err(&pdev->dev, "can't get the path %s\n", mi->path_name);
+ ret = -EINVAL;
+ goto failed_destroy_mutex;
+ }
+
+ dev_info(fbi->dev, "path %s get\n", fbi->path->name);
+
+ /* get overlay */
+ fbi->overlay = mmp_path_get_overlay(fbi->path, mi->overlay_id);
+ if (!fbi->overlay) {
+ ret = -EINVAL;
+ goto failed_destroy_mutex;
+ }
+ /* set fetch used */
+ mmp_overlay_set_fetch(fbi->overlay, mi->dmafetch_id);
+
+ modes_num = modes_setup(fbi);
+ if (modes_num < 0) {
+ ret = modes_num;
+ goto failed_destroy_mutex;
+ }
+
+ /*
+ * if get modes success, means not hotplug panels, use caculated buffer
+ * or use default size
+ */
+ if (modes_num > 0) {
+ /* fix to 2* yres */
+ info->var.yres_virtual = info->var.yres * 2;
+
+ /* Allocate framebuffer memory: size = modes xy *4 */
+ fbi->fb_size = info->var.xres_virtual * info->var.yres_virtual
+ * info->var.bits_per_pixel / 8;
+ } else {
+ fbi->fb_size = MMPFB_DEFAULT_SIZE;
+ }
+
+ fbi->fb_start = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size),
+ &fbi->fb_start_dma, GFP_KERNEL);
+ if (fbi->fb_start == NULL) {
+ dev_err(&pdev->dev, "can't alloc framebuffer\n");
+ ret = -ENOMEM;
+ goto failed_destroy_mutex;
+ }
+ memset(fbi->fb_start, 0, fbi->fb_size);
+ dev_info(fbi->dev, "fb %dk allocated\n", fbi->fb_size/1024);
+
+ /* fb power on */
+ if (modes_num > 0)
+ mmpfb_power(fbi, 1);
+
+ ret = fb_info_setup(info, fbi);
+ if (ret < 0)
+ goto failed_free_buff;
+
+ ret = register_framebuffer(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register fb: %d\n", ret);
+ ret = -ENXIO;
+ goto failed_clear_info;
+ }
+
+ dev_info(fbi->dev, "loaded to /dev/fb%d <%s>.\n",
+ info->node, info->fix.id);
+
+#ifdef CONFIG_LOGO
+ if (fbi->fb_start) {
+ fb_prepare_logo(info, 0);
+ fb_show_logo(info, 0);
+ }
+#endif
+
+ return 0;
+
+failed_clear_info:
+ fb_info_clear(info);
+failed_free_buff:
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size), fbi->fb_start,
+ fbi->fb_start_dma);
+failed_destroy_mutex:
+ mutex_destroy(&fbi->access_ok);
+failed:
+ dev_err(fbi->dev, "mmp-fb: frame buffer device init failed\n");
+ platform_set_drvdata(pdev, NULL);
+
+ framebuffer_release(info);
+
+ return ret;
+}
+
+static struct platform_driver mmpfb_driver = {
+ .driver = {
+ .name = "mmp-fb",
+ .owner = THIS_MODULE,
+ },
+ .probe = mmpfb_probe,
+};
+
+static int mmpfb_init(void)
+{
+ return platform_driver_register(&mmpfb_driver);
+}
+module_init(mmpfb_init);
+
+MODULE_AUTHOR("Zhou Zhu <zhou.zhu@marvell.com>");
+MODULE_DESCRIPTION("Framebuffer driver for Marvell displays");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/fb/mmpfb.h b/drivers/video/mmp/fb/mmpfb.h
new file mode 100644
index 000000000000..88c23c10a9ec
--- /dev/null
+++ b/drivers/video/mmp/fb/mmpfb.h
@@ -0,0 +1,54 @@
+/*
+ * linux/drivers/video/mmp/fb/mmpfb.h
+ * Framebuffer driver for Marvell Display controller.
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _MMP_FB_H_
+#define _MMP_FB_H_
+
+#include <video/mmp_disp.h>
+#include <linux/fb.h>
+
+/* LCD controller private state. */
+struct mmpfb_info {
+ struct device *dev;
+ int id;
+ const char *name;
+
+ struct fb_info *fb_info;
+ /* basicaly videomode is for output */
+ struct fb_videomode mode;
+ int pix_fmt;
+
+ void *fb_start;
+ int fb_size;
+ dma_addr_t fb_start_dma;
+
+ struct mmp_overlay *overlay;
+ struct mmp_path *path;
+
+ struct mutex access_ok;
+
+ unsigned int pseudo_palette[16];
+ int output_fmt;
+};
+
+#define MMPFB_DEFAULT_SIZE (PAGE_ALIGN(1920 * 1080 * 4 * 2))
+#endif /* _MMP_FB_H_ */
diff --git a/drivers/video/mmp/hw/Kconfig b/drivers/video/mmp/hw/Kconfig
new file mode 100644
index 000000000000..02f109a20cd0
--- /dev/null
+++ b/drivers/video/mmp/hw/Kconfig
@@ -0,0 +1,20 @@
+if MMP_DISP
+
+config MMP_DISP_CONTROLLER
+ bool "mmp display controller hw support"
+ depends on CPU_PXA910 || CPU_MMP2 || CPU_MMP3 || CPU_PXA988
+ default n
+ help
+ Marvell MMP display hw controller support
+ this controller is used on Marvell PXA910,
+ MMP2, MMP3, PXA988 chips
+
+config MMP_DISP_SPI
+ bool "mmp display controller spi port"
+ depends on MMP_DISP_CONTROLLER && SPI_MASTER
+ default y
+ help
+ Marvell MMP display hw controller spi port support
+ will register as a spi master for panel usage
+
+endif
diff --git a/drivers/video/mmp/hw/Makefile b/drivers/video/mmp/hw/Makefile
new file mode 100644
index 000000000000..0000a714fedf
--- /dev/null
+++ b/drivers/video/mmp/hw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MMP_DISP_CONTROLLER) += mmp_ctrl.o
+obj-$(CONFIG_MMP_DISP_SPI) += mmp_spi.o
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
new file mode 100644
index 000000000000..4bd31b2af398
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -0,0 +1,591 @@
+/*
+ * linux/drivers/video/mmp/hw/mmp_ctrl.c
+ * Marvell MMP series Display Controller support
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+
+#include "mmp_ctrl.h"
+
+static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
+{
+ struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
+ u32 isr, imask, tmp;
+
+ isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
+ imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
+
+ do {
+ /* clear clock only */
+ tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
+ if (tmp & isr)
+ writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
+ } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
+
+ return IRQ_HANDLED;
+}
+
+static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
+{
+ u32 link_config = path_to_path_plat(overlay->path)->link_config;
+ u32 rbswap, uvswap = 0, yuvswap = 0,
+ csc_en = 0, val = 0,
+ vid = overlay_is_vid(overlay);
+
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ case PIXFMT_RGB1555:
+ case PIXFMT_RGB888PACK:
+ case PIXFMT_RGB888UNPACK:
+ case PIXFMT_RGBA888:
+ rbswap = !(link_config & 0x1);
+ break;
+ case PIXFMT_VYUY:
+ case PIXFMT_YVU422P:
+ case PIXFMT_YVU420P:
+ rbswap = link_config & 0x1;
+ uvswap = 1;
+ break;
+ case PIXFMT_YUYV:
+ rbswap = link_config & 0x1;
+ yuvswap = 1;
+ break;
+ default:
+ rbswap = link_config & 0x1;
+ break;
+ }
+
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ case PIXFMT_BGR565:
+ val = 0;
+ break;
+ case PIXFMT_RGB1555:
+ case PIXFMT_BGR1555:
+ val = 0x1;
+ break;
+ case PIXFMT_RGB888PACK:
+ case PIXFMT_BGR888PACK:
+ val = 0x2;
+ break;
+ case PIXFMT_RGB888UNPACK:
+ case PIXFMT_BGR888UNPACK:
+ val = 0x3;
+ break;
+ case PIXFMT_RGBA888:
+ case PIXFMT_BGRA888:
+ val = 0x4;
+ break;
+ case PIXFMT_UYVY:
+ case PIXFMT_VYUY:
+ case PIXFMT_YUYV:
+ val = 0x5;
+ csc_en = 1;
+ break;
+ case PIXFMT_YUV422P:
+ case PIXFMT_YVU422P:
+ val = 0x6;
+ csc_en = 1;
+ break;
+ case PIXFMT_YUV420P:
+ case PIXFMT_YVU420P:
+ val = 0x7;
+ csc_en = 1;
+ break;
+ default:
+ break;
+ }
+
+ return (dma_palette(0) | dma_fmt(vid, val) |
+ dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
+ dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
+}
+
+static void dmafetch_set_fmt(struct mmp_overlay *overlay)
+{
+ u32 tmp;
+ struct mmp_path *path = overlay->path;
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~dma_mask(overlay_is_vid(overlay));
+ tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+}
+
+static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
+{
+ struct lcd_regs *regs = path_regs(overlay->path);
+ u32 pitch;
+
+ /* assert win supported */
+ memcpy(&overlay->win, win, sizeof(struct mmp_win));
+
+ mutex_lock(&overlay->access_ok);
+ pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
+ writel_relaxed(pitch, &regs->g_pitch);
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
+ writel_relaxed(0, &regs->g_start);
+
+ dmafetch_set_fmt(overlay);
+ mutex_unlock(&overlay->access_ok);
+}
+
+static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
+{
+ u32 mask = overlay_is_vid(overlay) ? CFG_GRA_ENA_MASK :
+ CFG_DMA_ENA_MASK;
+ u32 enable = overlay_is_vid(overlay) ? CFG_GRA_ENA(1) : CFG_DMA_ENA(1);
+ u32 tmp;
+ struct mmp_path *path = overlay->path;
+
+ mutex_lock(&overlay->access_ok);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~mask;
+ tmp |= (on ? enable : 0);
+ writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ mutex_unlock(&overlay->access_ok);
+}
+
+static void path_enabledisable(struct mmp_path *path, int on)
+{
+ u32 tmp;
+ mutex_lock(&path->access_ok);
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ if (on)
+ tmp &= ~SCLK_DISABLE;
+ else
+ tmp |= SCLK_DISABLE;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+ mutex_unlock(&path->access_ok);
+}
+
+static void path_onoff(struct mmp_path *path, int on)
+{
+ if (path->status == on) {
+ dev_info(path->dev, "path %s is already %s\n",
+ path->name, stat_name(path->status));
+ return;
+ }
+
+ if (on) {
+ path_enabledisable(path, 1);
+
+ if (path->panel && path->panel->set_onoff)
+ path->panel->set_onoff(path->panel, 1);
+ } else {
+ if (path->panel && path->panel->set_onoff)
+ path->panel->set_onoff(path->panel, 0);
+
+ path_enabledisable(path, 0);
+ }
+ path->status = on;
+}
+
+static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
+{
+ if (overlay->status == on) {
+ dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
+ overlay->path->name, stat_name(overlay->status));
+ return;
+ }
+ overlay->status = on;
+ dmafetch_onoff(overlay, on);
+ if (overlay->path->ops.check_status(overlay->path)
+ != overlay->path->status)
+ path_onoff(overlay->path, on);
+}
+
+static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
+{
+ overlay->dmafetch_id = fetch_id;
+}
+
+static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
+{
+ struct lcd_regs *regs = path_regs(overlay->path);
+
+ /* FIXME: assert addr supported */
+ memcpy(&overlay->addr, addr, sizeof(struct mmp_win));
+ writel(addr->phys[0], &regs->g_0);
+
+ return overlay->addr.phys[0];
+}
+
+static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
+{
+ struct lcd_regs *regs = path_regs(path);
+ u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
+ link_config = path_to_path_plat(path)->link_config;
+
+ /* FIXME: assert videomode supported */
+ memcpy(&path->mode, mode, sizeof(struct mmp_mode));
+
+ mutex_lock(&path->access_ok);
+
+ /* polarity of timing signals */
+ tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
+ tmp |= mode->vsync_invert ? 0 : 0x8;
+ tmp |= mode->hsync_invert ? 0 : 0x4;
+ tmp |= link_config & CFG_DUMBMODE_MASK;
+ tmp |= CFG_DUMB_ENA(1);
+ writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
+
+ writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
+ writel_relaxed((mode->left_margin << 16) | mode->right_margin,
+ &regs->screen_h_porch);
+ writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
+ &regs->screen_v_porch);
+ total_x = mode->xres + mode->left_margin + mode->right_margin +
+ mode->hsync_len;
+ total_y = mode->yres + mode->upper_margin + mode->lower_margin +
+ mode->vsync_len;
+ writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
+
+ /* vsync ctrl */
+ if (path->output_type == PATH_OUT_DSI)
+ vsync_ctrl = 0x01330133;
+ else
+ vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
+ | (mode->xres + mode->right_margin);
+ writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
+
+ /* set pixclock div */
+ sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
+ sclk_div = sclk_src / mode->pixclock_freq;
+ if (sclk_div * mode->pixclock_freq < sclk_src)
+ sclk_div++;
+
+ dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
+ __func__, sclk_src, sclk_div, mode->pixclock_freq);
+
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ tmp &= ~CLK_INT_DIV_MASK;
+ tmp |= sclk_div;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+
+ mutex_unlock(&path->access_ok);
+}
+
+static struct mmp_overlay_ops mmphw_overlay_ops = {
+ .set_fetch = overlay_set_fetch,
+ .set_onoff = overlay_set_onoff,
+ .set_win = overlay_set_win,
+ .set_addr = overlay_set_addr,
+};
+
+static void ctrl_set_default(struct mmphw_ctrl *ctrl)
+{
+ u32 tmp, irq_mask;
+
+ /*
+ * LCD Global control(LCD_TOP_CTRL) should be configed before
+ * any other LCD registers read/write, or there maybe issues.
+ */
+ tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
+ tmp |= 0xfff0;
+ writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
+
+
+ /* disable all interrupts */
+ irq_mask = path_imasks(0) | err_imask(0) |
+ path_imasks(1) | err_imask(1);
+ tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
+ tmp &= ~irq_mask;
+ tmp |= irq_mask;
+ writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
+}
+
+static void path_set_default(struct mmp_path *path)
+{
+ struct lcd_regs *regs = path_regs(path);
+ u32 dma_ctrl1, mask, tmp, path_config;
+
+ path_config = path_to_path_plat(path)->path_config;
+
+ /* Configure IOPAD: should be parallel only */
+ if (PATH_OUT_PARALLEL == path->output_type) {
+ mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
+ tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
+ tmp &= ~mask;
+ tmp |= path_config;
+ writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
+ }
+
+ /* Select path clock source */
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ tmp &= ~SCLK_SRC_SEL_MASK;
+ tmp |= path_config;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+
+ /*
+ * Configure default bits: vsync triggers DMA,
+ * power save enable, configure alpha registers to
+ * display 100% graphics, and set pixel command.
+ */
+ dma_ctrl1 = 0x2032ff81;
+
+ dma_ctrl1 |= CFG_VSYNC_INV_MASK;
+ writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
+
+ /* Configure default register values */
+ writel_relaxed(0x00000000, &regs->blank_color);
+ writel_relaxed(0x00000000, &regs->g_1);
+ writel_relaxed(0x00000000, &regs->g_start);
+
+ /*
+ * 1.enable multiple burst request in DMA AXI
+ * bus arbiter for faster read if not tv path;
+ * 2.enable horizontal smooth filter;
+ */
+ if (PATH_PN == path->id) {
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
+ | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp |= mask;
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ } else if (PATH_TV == path->id) {
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
+ | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~mask;
+ tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ }
+}
+
+static int path_init(struct mmphw_path_plat *path_plat,
+ struct mmp_mach_path_config *config)
+{
+ struct mmphw_ctrl *ctrl = path_plat->ctrl;
+ struct mmp_path_info *path_info;
+ struct mmp_path *path = NULL;
+
+ dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
+
+ /* init driver data */
+ path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
+ if (!path_info) {
+ dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
+ __func__, config->name);
+ return 0;
+ }
+ path_info->name = config->name;
+ path_info->id = path_plat->id;
+ path_info->dev = ctrl->dev;
+ path_info->overlay_num = config->overlay_num;
+ path_info->overlay_ops = &mmphw_overlay_ops;
+ path_info->set_mode = path_set_mode;
+ path_info->plat_data = path_plat;
+
+ /* create/register platform device */
+ path = mmp_register_path(path_info);
+ if (!path) {
+ kfree(path_info);
+ return 0;
+ }
+ path_plat->path = path;
+ path_plat->path_config = config->path_config;
+ path_plat->link_config = config->link_config;
+ path_set_default(path);
+
+ kfree(path_info);
+ return 1;
+}
+
+static void path_deinit(struct mmphw_path_plat *path_plat)
+{
+ if (!path_plat)
+ return;
+
+ if (path_plat->path)
+ mmp_unregister_path(path_plat->path);
+}
+
+static int mmphw_probe(struct platform_device *pdev)
+{
+ struct mmp_mach_plat_info *mi;
+ struct resource *res;
+ int ret, i, size, irq;
+ struct mmphw_path_plat *path_plat;
+ struct mmphw_ctrl *ctrl = NULL;
+
+ /* get resources from platform data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ /* get configs from platform data */
+ mi = pdev->dev.platform_data;
+ if (mi == NULL || !mi->path_num || !mi->paths) {
+ dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* allocate */
+ size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
+ mi->path_num;
+ ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!ctrl) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ctrl->name = mi->name;
+ ctrl->path_num = mi->path_num;
+ ctrl->dev = &pdev->dev;
+ ctrl->irq = irq;
+ platform_set_drvdata(pdev, ctrl);
+ mutex_init(&ctrl->access_ok);
+
+ /* map registers.*/
+ if (!devm_request_mem_region(ctrl->dev, res->start,
+ resource_size(res), ctrl->name)) {
+ dev_err(ctrl->dev,
+ "can't request region for resource %pR\n", res);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+ res->start, resource_size(res));
+ if (ctrl->reg_base == NULL) {
+ dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
+ res->start, res->end);
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ /* request irq */
+ ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
+ IRQF_SHARED, "lcd_controller", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
+ __func__, ctrl->irq);
+ ret = -ENXIO;
+ goto failed;
+ }
+
+ /* get clock */
+ ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
+ if (IS_ERR(ctrl->clk)) {
+ dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
+ ret = -ENOENT;
+ goto failed_get_clk;
+ }
+ clk_prepare_enable(ctrl->clk);
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+
+ /* init pathes from machine info and register them */
+ for (i = 0; i < ctrl->path_num; i++) {
+ /* get from config and machine info */
+ path_plat = &ctrl->path_plats[i];
+ path_plat->id = i;
+ path_plat->ctrl = ctrl;
+
+ /* path init */
+ if (!path_init(path_plat, &mi->paths[i])) {
+ ret = -EINVAL;
+ goto failed_path_init;
+ }
+ }
+
+#ifdef CONFIG_MMP_DISP_SPI
+ ret = lcd_spi_register(ctrl);
+ if (ret < 0)
+ goto failed_path_init;
+#endif
+
+ dev_info(ctrl->dev, "device init done\n");
+
+ return 0;
+
+failed_path_init:
+ for (i = 0; i < ctrl->path_num; i++) {
+ path_plat = &ctrl->path_plats[i];
+ path_deinit(path_plat);
+ }
+
+ if (ctrl->clk) {
+ devm_clk_put(ctrl->dev, ctrl->clk);
+ clk_disable_unprepare(ctrl->clk);
+ }
+failed_get_clk:
+ devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+failed:
+ if (ctrl) {
+ if (ctrl->reg_base)
+ devm_iounmap(ctrl->dev, ctrl->reg_base);
+ devm_release_mem_region(ctrl->dev, res->start,
+ resource_size(res));
+ devm_kfree(ctrl->dev, ctrl);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ dev_err(&pdev->dev, "device init failed\n");
+
+ return ret;
+}
+
+static struct platform_driver mmphw_driver = {
+ .driver = {
+ .name = "mmp-disp",
+ .owner = THIS_MODULE,
+ },
+ .probe = mmphw_probe,
+};
+
+static int mmphw_init(void)
+{
+ return platform_driver_register(&mmphw_driver);
+}
+module_init(mmphw_init);
+
+MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
+MODULE_DESCRIPTION("Framebuffer driver for mmp");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/mmp/hw/mmp_ctrl.h
new file mode 100644
index 000000000000..6408d8ef3abb
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_ctrl.h
@@ -0,0 +1,1974 @@
+/*
+ * drivers/video/mmp/hw/mmp_ctrl.h
+ *
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _MMP_CTRL_H_
+#define _MMP_CTRL_H_
+
+#include <video/mmp_disp.h>
+
+/* ------------< LCD register >------------ */
+struct lcd_regs {
+/* TV patch register for MMP2 */
+/* 32 bit TV Video Frame0 Y Starting Address */
+#define LCD_TVD_START_ADDR_Y0 (0x0000)
+/* 32 bit TV Video Frame0 U Starting Address */
+#define LCD_TVD_START_ADDR_U0 (0x0004)
+/* 32 bit TV Video Frame0 V Starting Address */
+#define LCD_TVD_START_ADDR_V0 (0x0008)
+/* 32 bit TV Video Frame0 Command Starting Address */
+#define LCD_TVD_START_ADDR_C0 (0x000C)
+/* 32 bit TV Video Frame1 Y Starting Address Register*/
+#define LCD_TVD_START_ADDR_Y1 (0x0010)
+/* 32 bit TV Video Frame1 U Starting Address Register*/
+#define LCD_TVD_START_ADDR_U1 (0x0014)
+/* 32 bit TV Video Frame1 V Starting Address Register*/
+#define LCD_TVD_START_ADDR_V1 (0x0018)
+/* 32 bit TV Video Frame1 Command Starting Address Register*/
+#define LCD_TVD_START_ADDR_C1 (0x001C)
+/* 32 bit TV Video Y andC Line Length(Pitch)Register*/
+#define LCD_TVD_PITCH_YC (0x0020)
+/* 32 bit TV Video U andV Line Length(Pitch)Register*/
+#define LCD_TVD_PITCH_UV (0x0024)
+/* 32 bit TV Video Starting Point on Screen Register*/
+#define LCD_TVD_OVSA_HPXL_VLN (0x0028)
+/* 32 bit TV Video Source Size Register*/
+#define LCD_TVD_HPXL_VLN (0x002C)
+/* 32 bit TV Video Destination Size (After Zooming)Register*/
+#define LCD_TVDZM_HPXL_VLN (0x0030)
+ u32 v_y0;
+ u32 v_u0;
+ u32 v_v0;
+ u32 v_c0;
+ u32 v_y1;
+ u32 v_u1;
+ u32 v_v1;
+ u32 v_c1;
+ u32 v_pitch_yc; /* Video Y and C Line Length (Pitch) */
+ u32 v_pitch_uv; /* Video U and V Line Length (Pitch) */
+ u32 v_start; /* Video Starting Point on Screen */
+ u32 v_size; /* Video Source Size */
+ u32 v_size_z; /* Video Destination Size (After Zooming) */
+
+/* 32 bit TV Graphic Frame 0 Starting Address Register*/
+#define LCD_TVG_START_ADDR0 (0x0034)
+/* 32 bit TV Graphic Frame 1 Starting Address Register*/
+#define LCD_TVG_START_ADDR1 (0x0038)
+/* 32 bit TV Graphic Line Length(Pitch)Register*/
+#define LCD_TVG_PITCH (0x003C)
+/* 32 bit TV Graphic Starting Point on Screen Register*/
+#define LCD_TVG_OVSA_HPXL_VLN (0x0040)
+/* 32 bit TV Graphic Source Size Register*/
+#define LCD_TVG_HPXL_VLN (0x0044)
+/* 32 bit TV Graphic Destination size (after Zooming)Register*/
+#define LCD_TVGZM_HPXL_VLN (0x0048)
+ u32 g_0; /* Graphic Frame 0/1 Starting Address */
+ u32 g_1;
+ u32 g_pitch; /* Graphic Line Length (Pitch) */
+ u32 g_start; /* Graphic Starting Point on Screen */
+ u32 g_size; /* Graphic Source Size */
+ u32 g_size_z; /* Graphic Destination Size (After Zooming) */
+
+/* 32 bit TV Hardware Cursor Starting Point on screen Register*/
+#define LCD_TVC_OVSA_HPXL_VLN (0x004C)
+/* 32 bit TV Hardware Cursor Size Register */
+#define LCD_TVC_HPXL_VLN (0x0050)
+ u32 hc_start; /* Hardware Cursor */
+ u32 hc_size; /* Hardware Cursor */
+
+/* 32 bit TV Total Screen Size Register*/
+#define LCD_TV_V_H_TOTAL (0x0054)
+/* 32 bit TV Screen Active Size Register*/
+#define LCD_TV_V_H_ACTIVE (0x0058)
+/* 32 bit TV Screen Horizontal Porch Register*/
+#define LCD_TV_H_PORCH (0x005C)
+/* 32 bit TV Screen Vertical Porch Register*/
+#define LCD_TV_V_PORCH (0x0060)
+ u32 screen_size; /* Screen Total Size */
+ u32 screen_active; /* Screen Active Size */
+ u32 screen_h_porch; /* Screen Horizontal Porch */
+ u32 screen_v_porch; /* Screen Vertical Porch */
+
+/* 32 bit TV Screen Blank Color Register*/
+#define LCD_TV_BLANKCOLOR (0x0064)
+/* 32 bit TV Hardware Cursor Color1 Register*/
+#define LCD_TV_ALPHA_COLOR1 (0x0068)
+/* 32 bit TV Hardware Cursor Color2 Register*/
+#define LCD_TV_ALPHA_COLOR2 (0x006C)
+ u32 blank_color; /* Screen Blank Color */
+ u32 hc_Alpha_color1; /* Hardware Cursor Color1 */
+ u32 hc_Alpha_color2; /* Hardware Cursor Color2 */
+
+/* 32 bit TV Video Y Color Key Control*/
+#define LCD_TV_COLORKEY_Y (0x0070)
+/* 32 bit TV Video U Color Key Control*/
+#define LCD_TV_COLORKEY_U (0x0074)
+/* 32 bit TV Video V Color Key Control*/
+#define LCD_TV_COLORKEY_V (0x0078)
+ u32 v_colorkey_y; /* Video Y Color Key Control */
+ u32 v_colorkey_u; /* Video U Color Key Control */
+ u32 v_colorkey_v; /* Video V Color Key Control */
+
+/* 32 bit TV VSYNC PulsePixel Edge Control Register*/
+#define LCD_TV_SEPXLCNT (0x007C)
+ u32 vsync_ctrl; /* VSYNC PulsePixel Edge Control */
+};
+
+#define intf_ctrl(id) ((id) ? (((id) & 1) ? LCD_TVIF_CTRL : \
+ LCD_DUMB2_CTRL) : LCD_SPU_DUMB_CTRL)
+#define dma_ctrl0(id) ((id) ? (((id) & 1) ? LCD_TV_CTRL0 : \
+ LCD_PN2_CTRL0) : LCD_SPU_DMA_CTRL0)
+#define dma_ctrl1(id) ((id) ? (((id) & 1) ? LCD_TV_CTRL1 : \
+ LCD_PN2_CTRL1) : LCD_SPU_DMA_CTRL1)
+#define dma_ctrl(ctrl1, id) (ctrl1 ? dma_ctrl1(id) : dma_ctrl0(id))
+
+/* 32 bit TV Path DMA Control 0*/
+#define LCD_TV_CTRL0 (0x0080)
+/* 32 bit TV Path DMA Control 1*/
+#define LCD_TV_CTRL1 (0x0084)
+/* 32 bit TV Path Video Contrast*/
+#define LCD_TV_CONTRAST (0x0088)
+/* 32 bit TV Path Video Saturation*/
+#define LCD_TV_SATURATION (0x008C)
+/* 32 bit TV Path Video Hue Adjust*/
+#define LCD_TV_CBSH_HUE (0x0090)
+/* 32 bit TV Path TVIF Control Register */
+#define LCD_TVIF_CTRL (0x0094)
+#define TV_VBLNK_VALID_EN (1 << 12)
+
+/* 32 bit TV Path I/O Pad Control*/
+#define LCD_TVIOPAD_CTRL (0x0098)
+/* 32 bit TV Path Cloc Divider */
+#define LCD_TCLK_DIV (0x009C)
+
+#define LCD_SCLK(path) ((PATH_PN == path->id) ? LCD_CFG_SCLK_DIV :\
+ ((PATH_TV == path->id) ? LCD_TCLK_DIV : LCD_PN2_SCLK_DIV))
+
+/* dither configure */
+#ifdef CONFIG_CPU_PXA988
+#define LCD_DITHER_CTRL (0x01EC)
+#else
+#define LCD_DITHER_CTRL (0x00A0)
+#endif
+
+#define DITHER_TBL_INDEX_SEL(s) ((s) << 16)
+#define DITHER_MODE2(m) ((m) << 12)
+#define DITHER_MODE2_SHIFT (12)
+#define DITHER_4X8_EN2 (1 << 9)
+#define DITHER_4X8_EN2_SHIFT (9)
+#define DITHER_EN2 (1 << 8)
+#define DITHER_MODE1(m) ((m) << 4)
+#define DITHER_MODE1_SHIFT (4)
+#define DITHER_4X8_EN1 (1 << 1)
+#define DITHER_4X8_EN1_SHIFT (1)
+#define DITHER_EN1 (1)
+
+/* dither table data was fixed by video bpp of input and output*/
+#ifdef CONFIG_CPU_PXA988
+#define DITHER_TB_4X4_INDEX0 (0x6e4ca280)
+#define DITHER_TB_4X4_INDEX1 (0x5d7f91b3)
+#define DITHER_TB_4X8_INDEX0 (0xb391a280)
+#define DITHER_TB_4X8_INDEX1 (0x7f5d6e4c)
+#define DITHER_TB_4X8_INDEX2 (0x80a291b3)
+#define DITHER_TB_4X8_INDEX3 (0x4c6e5d7f)
+#define LCD_DITHER_TBL_DATA (0x01F0)
+#else
+#define DITHER_TB_4X4_INDEX0 (0x3b19f7d5)
+#define DITHER_TB_4X4_INDEX1 (0x082ac4e6)
+#define DITHER_TB_4X8_INDEX0 (0xf7d508e6)
+#define DITHER_TB_4X8_INDEX1 (0x3b194c2a)
+#define DITHER_TB_4X8_INDEX2 (0xc4e6d5f7)
+#define DITHER_TB_4X8_INDEX3 (0x082a193b)
+#define LCD_DITHER_TBL_DATA (0x00A4)
+#endif
+
+/* Video Frame 0&1 start address registers */
+#define LCD_SPU_DMA_START_ADDR_Y0 0x00C0
+#define LCD_SPU_DMA_START_ADDR_U0 0x00C4
+#define LCD_SPU_DMA_START_ADDR_V0 0x00C8
+#define LCD_CFG_DMA_START_ADDR_0 0x00CC /* Cmd address */
+#define LCD_SPU_DMA_START_ADDR_Y1 0x00D0
+#define LCD_SPU_DMA_START_ADDR_U1 0x00D4
+#define LCD_SPU_DMA_START_ADDR_V1 0x00D8
+#define LCD_CFG_DMA_START_ADDR_1 0x00DC /* Cmd address */
+
+/* YC & UV Pitch */
+#define LCD_SPU_DMA_PITCH_YC 0x00E0
+#define SPU_DMA_PITCH_C(c) ((c)<<16)
+#define SPU_DMA_PITCH_Y(y) (y)
+#define LCD_SPU_DMA_PITCH_UV 0x00E4
+#define SPU_DMA_PITCH_V(v) ((v)<<16)
+#define SPU_DMA_PITCH_U(u) (u)
+
+/* Video Starting Point on Screen Register */
+#define LCD_SPUT_DMA_OVSA_HPXL_VLN 0x00E8
+#define CFG_DMA_OVSA_VLN(y) ((y)<<16) /* 0~0xfff */
+#define CFG_DMA_OVSA_HPXL(x) (x) /* 0~0xfff */
+
+/* Video Size Register */
+#define LCD_SPU_DMA_HPXL_VLN 0x00EC
+#define CFG_DMA_VLN(y) ((y)<<16)
+#define CFG_DMA_HPXL(x) (x)
+
+/* Video Size After zooming Register */
+#define LCD_SPU_DZM_HPXL_VLN 0x00F0
+#define CFG_DZM_VLN(y) ((y)<<16)
+#define CFG_DZM_HPXL(x) (x)
+
+/* Graphic Frame 0&1 Starting Address Register */
+#define LCD_CFG_GRA_START_ADDR0 0x00F4
+#define LCD_CFG_GRA_START_ADDR1 0x00F8
+
+/* Graphic Frame Pitch */
+#define LCD_CFG_GRA_PITCH 0x00FC
+
+/* Graphic Starting Point on Screen Register */
+#define LCD_SPU_GRA_OVSA_HPXL_VLN 0x0100
+#define CFG_GRA_OVSA_VLN(y) ((y)<<16)
+#define CFG_GRA_OVSA_HPXL(x) (x)
+
+/* Graphic Size Register */
+#define LCD_SPU_GRA_HPXL_VLN 0x0104
+#define CFG_GRA_VLN(y) ((y)<<16)
+#define CFG_GRA_HPXL(x) (x)
+
+/* Graphic Size after Zooming Register */
+#define LCD_SPU_GZM_HPXL_VLN 0x0108
+#define CFG_GZM_VLN(y) ((y)<<16)
+#define CFG_GZM_HPXL(x) (x)
+
+/* HW Cursor Starting Point on Screen Register */
+#define LCD_SPU_HWC_OVSA_HPXL_VLN 0x010C
+#define CFG_HWC_OVSA_VLN(y) ((y)<<16)
+#define CFG_HWC_OVSA_HPXL(x) (x)
+
+/* HW Cursor Size */
+#define LCD_SPU_HWC_HPXL_VLN 0x0110
+#define CFG_HWC_VLN(y) ((y)<<16)
+#define CFG_HWC_HPXL(x) (x)
+
+/* Total Screen Size Register */
+#define LCD_SPUT_V_H_TOTAL 0x0114
+#define CFG_V_TOTAL(y) ((y)<<16)
+#define CFG_H_TOTAL(x) (x)
+
+/* Total Screen Active Size Register */
+#define LCD_SPU_V_H_ACTIVE 0x0118
+#define CFG_V_ACTIVE(y) ((y)<<16)
+#define CFG_H_ACTIVE(x) (x)
+
+/* Screen H&V Porch Register */
+#define LCD_SPU_H_PORCH 0x011C
+#define CFG_H_BACK_PORCH(b) ((b)<<16)
+#define CFG_H_FRONT_PORCH(f) (f)
+#define LCD_SPU_V_PORCH 0x0120
+#define CFG_V_BACK_PORCH(b) ((b)<<16)
+#define CFG_V_FRONT_PORCH(f) (f)
+
+/* Screen Blank Color Register */
+#define LCD_SPU_BLANKCOLOR 0x0124
+#define CFG_BLANKCOLOR_MASK 0x00FFFFFF
+#define CFG_BLANKCOLOR_R_MASK 0x000000FF
+#define CFG_BLANKCOLOR_G_MASK 0x0000FF00
+#define CFG_BLANKCOLOR_B_MASK 0x00FF0000
+
+/* HW Cursor Color 1&2 Register */
+#define LCD_SPU_ALPHA_COLOR1 0x0128
+#define CFG_HWC_COLOR1 0x00FFFFFF
+#define CFG_HWC_COLOR1_R(red) ((red)<<16)
+#define CFG_HWC_COLOR1_G(green) ((green)<<8)
+#define CFG_HWC_COLOR1_B(blue) (blue)
+#define CFG_HWC_COLOR1_R_MASK 0x000000FF
+#define CFG_HWC_COLOR1_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR1_B_MASK 0x00FF0000
+#define LCD_SPU_ALPHA_COLOR2 0x012C
+#define CFG_HWC_COLOR2 0x00FFFFFF
+#define CFG_HWC_COLOR2_R_MASK 0x000000FF
+#define CFG_HWC_COLOR2_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR2_B_MASK 0x00FF0000
+
+/* Video YUV Color Key Control */
+#define LCD_SPU_COLORKEY_Y 0x0130
+#define CFG_CKEY_Y2(y2) ((y2)<<24)
+#define CFG_CKEY_Y2_MASK 0xFF000000
+#define CFG_CKEY_Y1(y1) ((y1)<<16)
+#define CFG_CKEY_Y1_MASK 0x00FF0000
+#define CFG_CKEY_Y(y) ((y)<<8)
+#define CFG_CKEY_Y_MASK 0x0000FF00
+#define CFG_ALPHA_Y(y) (y)
+#define CFG_ALPHA_Y_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_U 0x0134
+#define CFG_CKEY_U2(u2) ((u2)<<24)
+#define CFG_CKEY_U2_MASK 0xFF000000
+#define CFG_CKEY_U1(u1) ((u1)<<16)
+#define CFG_CKEY_U1_MASK 0x00FF0000
+#define CFG_CKEY_U(u) ((u)<<8)
+#define CFG_CKEY_U_MASK 0x0000FF00
+#define CFG_ALPHA_U(u) (u)
+#define CFG_ALPHA_U_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_V 0x0138
+#define CFG_CKEY_V2(v2) ((v2)<<24)
+#define CFG_CKEY_V2_MASK 0xFF000000
+#define CFG_CKEY_V1(v1) ((v1)<<16)
+#define CFG_CKEY_V1_MASK 0x00FF0000
+#define CFG_CKEY_V(v) ((v)<<8)
+#define CFG_CKEY_V_MASK 0x0000FF00
+#define CFG_ALPHA_V(v) (v)
+#define CFG_ALPHA_V_MASK 0x000000FF
+
+/* Graphics/Video DMA color key enable bits in LCD_TV_CTRL1 */
+#define CFG_CKEY_GRA 0x2
+#define CFG_CKEY_DMA 0x1
+
+/* Interlace mode enable bits in LCD_TV_CTRL1 */
+#define CFG_TV_INTERLACE_EN (1 << 22)
+#define CFG_TV_NIB (1 << 0)
+
+#define LCD_PN_SEPXLCNT 0x013c /* MMP2 */
+
+/* SPI Read Data Register */
+#define LCD_SPU_SPI_RXDATA 0x0140
+
+/* Smart Panel Read Data Register */
+#define LCD_SPU_ISA_RSDATA 0x0144
+#define ISA_RXDATA_16BIT_1_DATA_MASK 0x000000FF
+#define ISA_RXDATA_16BIT_2_DATA_MASK 0x0000FF00
+#define ISA_RXDATA_16BIT_3_DATA_MASK 0x00FF0000
+#define ISA_RXDATA_16BIT_4_DATA_MASK 0xFF000000
+#define ISA_RXDATA_32BIT_1_DATA_MASK 0x00FFFFFF
+
+#define LCD_SPU_DBG_ISA (0x0148) /* TTC */
+#define LCD_SPU_DMAVLD_YC (0x014C)
+#define LCD_SPU_DMAVLD_UV (0x0150)
+#define LCD_SPU_DMAVLD_UVSPU_GRAVLD (0x0154)
+
+#define LCD_READ_IOPAD (0x0148) /* MMP2*/
+#define LCD_DMAVLD_YC (0x014C)
+#define LCD_DMAVLD_UV (0x0150)
+#define LCD_TVGGRAVLD_HLEN (0x0154)
+
+/* HWC SRAM Read Data Register */
+#define LCD_SPU_HWC_RDDAT 0x0158
+
+/* Gamma Table SRAM Read Data Register */
+#define LCD_SPU_GAMMA_RDDAT 0x015c
+#define CFG_GAMMA_RDDAT_MASK 0x000000FF
+
+/* Palette Table SRAM Read Data Register */
+#define LCD_SPU_PALETTE_RDDAT 0x0160
+#define CFG_PALETTE_RDDAT_MASK 0x00FFFFFF
+
+#define LCD_SPU_DBG_DMATOP (0x0164) /* TTC */
+#define LCD_SPU_DBG_GRATOP (0x0168)
+#define LCD_SPU_DBG_TXCTRL (0x016C)
+#define LCD_SPU_DBG_SLVTOP (0x0170)
+#define LCD_SPU_DBG_MUXTOP (0x0174)
+
+#define LCD_SLV_DBG (0x0164) /* MMP2 */
+#define LCD_TVDVLD_YC (0x0168)
+#define LCD_TVDVLD_UV (0x016C)
+#define LCD_TVC_RDDAT (0x0170)
+#define LCD_TV_GAMMA_RDDAT (0x0174)
+
+/* I/O Pads Input Read Only Register */
+#define LCD_SPU_IOPAD_IN 0x0178
+#define CFG_IOPAD_IN_MASK 0x0FFFFFFF
+
+#define LCD_TV_PALETTE_RDDAT (0x0178) /* MMP2 */
+
+/* Reserved Read Only Registers */
+#define LCD_CFG_RDREG5F 0x017C
+#define IRE_FRAME_CNT_MASK 0x000000C0
+#define IPE_FRAME_CNT_MASK 0x00000030
+#define GRA_FRAME_CNT_MASK 0x0000000C /* Graphic */
+#define DMA_FRAME_CNT_MASK 0x00000003 /* Video */
+
+#define LCD_FRAME_CNT (0x017C) /* MMP2 */
+
+/* SPI Control Register. */
+#define LCD_SPU_SPI_CTRL 0x0180
+#define CFG_SCLKCNT(div) ((div)<<24) /* 0xFF~0x2 */
+#define CFG_SCLKCNT_MASK 0xFF000000
+#define CFG_RXBITS(rx) (((rx) - 1)<<16) /* 0x1F~0x1 */
+#define CFG_RXBITS_MASK 0x00FF0000
+#define CFG_TXBITS(tx) (((tx) - 1)<<8) /* 0x1F~0x1 */
+#define CFG_TXBITS_MASK 0x0000FF00
+#define CFG_CLKINV(clk) ((clk)<<7)
+#define CFG_CLKINV_MASK 0x00000080
+#define CFG_KEEPXFER(transfer) ((transfer)<<6)
+#define CFG_KEEPXFER_MASK 0x00000040
+#define CFG_RXBITSTO0(rx) ((rx)<<5)
+#define CFG_RXBITSTO0_MASK 0x00000020
+#define CFG_TXBITSTO0(tx) ((tx)<<4)
+#define CFG_TXBITSTO0_MASK 0x00000010
+#define CFG_SPI_ENA(spi) ((spi)<<3)
+#define CFG_SPI_ENA_MASK 0x00000008
+#define CFG_SPI_SEL(spi) ((spi)<<2)
+#define CFG_SPI_SEL_MASK 0x00000004
+#define CFG_SPI_3W4WB(wire) ((wire)<<1)
+#define CFG_SPI_3W4WB_MASK 0x00000002
+#define CFG_SPI_START(start) (start)
+#define CFG_SPI_START_MASK 0x00000001
+
+/* SPI Tx Data Register */
+#define LCD_SPU_SPI_TXDATA 0x0184
+
+/*
+ 1. Smart Pannel 8-bit Bus Control Register.
+ 2. AHB Slave Path Data Port Register
+*/
+#define LCD_SPU_SMPN_CTRL 0x0188
+
+/* DMA Control 0 Register */
+#define LCD_SPU_DMA_CTRL0 0x0190
+#define CFG_NOBLENDING(nb) ((nb)<<31)
+#define CFG_NOBLENDING_MASK 0x80000000
+#define CFG_GAMMA_ENA(gn) ((gn)<<30)
+#define CFG_GAMMA_ENA_MASK 0x40000000
+#define CFG_CBSH_ENA(cn) ((cn)<<29)
+#define CFG_CBSH_ENA_MASK 0x20000000
+#define CFG_PALETTE_ENA(pn) ((pn)<<28)
+#define CFG_PALETTE_ENA_MASK 0x10000000
+#define CFG_ARBFAST_ENA(an) ((an)<<27)
+#define CFG_ARBFAST_ENA_MASK 0x08000000
+#define CFG_HWC_1BITMOD(mode) ((mode)<<26)
+#define CFG_HWC_1BITMOD_MASK 0x04000000
+#define CFG_HWC_1BITENA(mn) ((mn)<<25)
+#define CFG_HWC_1BITENA_MASK 0x02000000
+#define CFG_HWC_ENA(cn) ((cn)<<24)
+#define CFG_HWC_ENA_MASK 0x01000000
+#define CFG_DMAFORMAT(dmaformat) ((dmaformat)<<20)
+#define CFG_DMAFORMAT_MASK 0x00F00000
+#define CFG_GRAFORMAT(graformat) ((graformat)<<16)
+#define CFG_GRAFORMAT_MASK 0x000F0000
+/* for graphic part */
+#define CFG_GRA_FTOGGLE(toggle) ((toggle)<<15)
+#define CFG_GRA_FTOGGLE_MASK 0x00008000
+#define CFG_GRA_HSMOOTH(smooth) ((smooth)<<14)
+#define CFG_GRA_HSMOOTH_MASK 0x00004000
+#define CFG_GRA_TSTMODE(test) ((test)<<13)
+#define CFG_GRA_TSTMODE_MASK 0x00002000
+#define CFG_GRA_SWAPRB(swap) ((swap)<<12)
+#define CFG_GRA_SWAPRB_MASK 0x00001000
+#define CFG_GRA_SWAPUV(swap) ((swap)<<11)
+#define CFG_GRA_SWAPUV_MASK 0x00000800
+#define CFG_GRA_SWAPYU(swap) ((swap)<<10)
+#define CFG_GRA_SWAPYU_MASK 0x00000400
+#define CFG_GRA_SWAP_MASK 0x00001C00
+#define CFG_YUV2RGB_GRA(cvrt) ((cvrt)<<9)
+#define CFG_YUV2RGB_GRA_MASK 0x00000200
+#define CFG_GRA_ENA(gra) ((gra)<<8)
+#define CFG_GRA_ENA_MASK 0x00000100
+#define dma0_gfx_masks (CFG_GRAFORMAT_MASK | CFG_GRA_FTOGGLE_MASK | \
+ CFG_GRA_HSMOOTH_MASK | CFG_GRA_TSTMODE_MASK | CFG_GRA_SWAP_MASK | \
+ CFG_YUV2RGB_GRA_MASK | CFG_GRA_ENA_MASK)
+/* for video part */
+#define CFG_DMA_FTOGGLE(toggle) ((toggle)<<7)
+#define CFG_DMA_FTOGGLE_MASK 0x00000080
+#define CFG_DMA_HSMOOTH(smooth) ((smooth)<<6)
+#define CFG_DMA_HSMOOTH_MASK 0x00000040
+#define CFG_DMA_TSTMODE(test) ((test)<<5)
+#define CFG_DMA_TSTMODE_MASK 0x00000020
+#define CFG_DMA_SWAPRB(swap) ((swap)<<4)
+#define CFG_DMA_SWAPRB_MASK 0x00000010
+#define CFG_DMA_SWAPUV(swap) ((swap)<<3)
+#define CFG_DMA_SWAPUV_MASK 0x00000008
+#define CFG_DMA_SWAPYU(swap) ((swap)<<2)
+#define CFG_DMA_SWAPYU_MASK 0x00000004
+#define CFG_DMA_SWAP_MASK 0x0000001C
+#define CFG_YUV2RGB_DMA(cvrt) ((cvrt)<<1)
+#define CFG_YUV2RGB_DMA_MASK 0x00000002
+#define CFG_DMA_ENA(video) (video)
+#define CFG_DMA_ENA_MASK 0x00000001
+#define dma0_vid_masks (CFG_DMAFORMAT_MASK | CFG_DMA_FTOGGLE_MASK | \
+ CFG_DMA_HSMOOTH_MASK | CFG_DMA_TSTMODE_MASK | CFG_DMA_SWAP_MASK | \
+ CFG_YUV2RGB_DMA_MASK | CFG_DMA_ENA_MASK)
+#define dma_palette(val) ((val ? 1 : 0) << 28)
+#define dma_fmt(vid, val) ((val & 0xf) << ((vid) ? 20 : 16))
+#define dma_swaprb(vid, val) ((val ? 1 : 0) << ((vid) ? 4 : 12))
+#define dma_swapuv(vid, val) ((val ? 1 : 0) << ((vid) ? 3 : 11))
+#define dma_swapyuv(vid, val) ((val ? 1 : 0) << ((vid) ? 2 : 10))
+#define dma_csc(vid, val) ((val ? 1 : 0) << ((vid) ? 1 : 9))
+#define dma_hsmooth(vid, val) ((val ? 1 : 0) << ((vid) ? 6 : 14))
+#define dma_mask(vid) (dma_palette(1) | dma_fmt(vid, 0xf) | dma_csc(vid, 1) \
+ | dma_swaprb(vid, 1) | dma_swapuv(vid, 1) | dma_swapyuv(vid, 1))
+
+/* DMA Control 1 Register */
+#define LCD_SPU_DMA_CTRL1 0x0194
+#define CFG_FRAME_TRIG(trig) ((trig)<<31)
+#define CFG_FRAME_TRIG_MASK 0x80000000
+#define CFG_VSYNC_TRIG(trig) ((trig)<<28)
+#define CFG_VSYNC_TRIG_MASK 0x70000000
+#define CFG_VSYNC_INV(inv) ((inv)<<27)
+#define CFG_VSYNC_INV_MASK 0x08000000
+#define CFG_COLOR_KEY_MODE(cmode) ((cmode)<<24)
+#define CFG_COLOR_KEY_MASK 0x07000000
+#define CFG_CARRY(carry) ((carry)<<23)
+#define CFG_CARRY_MASK 0x00800000
+#define CFG_LNBUF_ENA(lnbuf) ((lnbuf)<<22)
+#define CFG_LNBUF_ENA_MASK 0x00400000
+#define CFG_GATED_ENA(gated) ((gated)<<21)
+#define CFG_GATED_ENA_MASK 0x00200000
+#define CFG_PWRDN_ENA(power) ((power)<<20)
+#define CFG_PWRDN_ENA_MASK 0x00100000
+#define CFG_DSCALE(dscale) ((dscale)<<18)
+#define CFG_DSCALE_MASK 0x000C0000
+#define CFG_ALPHA_MODE(amode) ((amode)<<16)
+#define CFG_ALPHA_MODE_MASK 0x00030000
+#define CFG_ALPHA(alpha) ((alpha)<<8)
+#define CFG_ALPHA_MASK 0x0000FF00
+#define CFG_PXLCMD(pxlcmd) (pxlcmd)
+#define CFG_PXLCMD_MASK 0x000000FF
+
+/* SRAM Control Register */
+#define LCD_SPU_SRAM_CTRL 0x0198
+#define CFG_SRAM_INIT_WR_RD(mode) ((mode)<<14)
+#define CFG_SRAM_INIT_WR_RD_MASK 0x0000C000
+#define CFG_SRAM_ADDR_LCDID(id) ((id)<<8)
+#define CFG_SRAM_ADDR_LCDID_MASK 0x00000F00
+#define CFG_SRAM_ADDR(addr) (addr)
+#define CFG_SRAM_ADDR_MASK 0x000000FF
+
+/* SRAM Write Data Register */
+#define LCD_SPU_SRAM_WRDAT 0x019C
+
+/* SRAM RTC/WTC Control Register */
+#define LCD_SPU_SRAM_PARA0 0x01A0
+
+/* SRAM Power Down Control Register */
+#define LCD_SPU_SRAM_PARA1 0x01A4
+#define CFG_CSB_256x32(hwc) ((hwc)<<15) /* HWC */
+#define CFG_CSB_256x32_MASK 0x00008000
+#define CFG_CSB_256x24(palette) ((palette)<<14) /* Palette */
+#define CFG_CSB_256x24_MASK 0x00004000
+#define CFG_CSB_256x8(gamma) ((gamma)<<13) /* Gamma */
+#define CFG_CSB_256x8_MASK 0x00002000
+#define CFG_PDWN256x32(pdwn) ((pdwn)<<7) /* HWC */
+#define CFG_PDWN256x32_MASK 0x00000080
+#define CFG_PDWN256x24(pdwn) ((pdwn)<<6) /* Palette */
+#define CFG_PDWN256x24_MASK 0x00000040
+#define CFG_PDWN256x8(pdwn) ((pdwn)<<5) /* Gamma */
+#define CFG_PDWN256x8_MASK 0x00000020
+#define CFG_PDWN32x32(pdwn) ((pdwn)<<3)
+#define CFG_PDWN32x32_MASK 0x00000008
+#define CFG_PDWN16x66(pdwn) ((pdwn)<<2)
+#define CFG_PDWN16x66_MASK 0x00000004
+#define CFG_PDWN32x66(pdwn) ((pdwn)<<1)
+#define CFG_PDWN32x66_MASK 0x00000002
+#define CFG_PDWN64x66(pdwn) (pdwn)
+#define CFG_PDWN64x66_MASK 0x00000001
+
+/* Smart or Dumb Panel Clock Divider */
+#define LCD_CFG_SCLK_DIV 0x01A8
+#define SCLK_SRC_SEL(src) ((src)<<31)
+#define SCLK_SRC_SEL_MASK 0x80000000
+#define SCLK_DISABLE (1<<28)
+#define CLK_FRACDIV(frac) ((frac)<<16)
+#define CLK_FRACDIV_MASK 0x0FFF0000
+#define DSI1_BITCLK_DIV(div) (div<<8)
+#define DSI1_BITCLK_DIV_MASK 0x00000F00
+#define CLK_INT_DIV(div) (div)
+#define CLK_INT_DIV_MASK 0x000000FF
+
+/* Video Contrast Register */
+#define LCD_SPU_CONTRAST 0x01AC
+#define CFG_BRIGHTNESS(bright) ((bright)<<16)
+#define CFG_BRIGHTNESS_MASK 0xFFFF0000
+#define CFG_CONTRAST(contrast) (contrast)
+#define CFG_CONTRAST_MASK 0x0000FFFF
+
+/* Video Saturation Register */
+#define LCD_SPU_SATURATION 0x01B0
+#define CFG_C_MULTS(mult) ((mult)<<16)
+#define CFG_C_MULTS_MASK 0xFFFF0000
+#define CFG_SATURATION(sat) (sat)
+#define CFG_SATURATION_MASK 0x0000FFFF
+
+/* Video Hue Adjust Register */
+#define LCD_SPU_CBSH_HUE 0x01B4
+#define CFG_SIN0(sin0) ((sin0)<<16)
+#define CFG_SIN0_MASK 0xFFFF0000
+#define CFG_COS0(con0) (con0)
+#define CFG_COS0_MASK 0x0000FFFF
+
+/* Dump LCD Panel Control Register */
+#define LCD_SPU_DUMB_CTRL 0x01B8
+#define CFG_DUMBMODE(mode) ((mode)<<28)
+#define CFG_DUMBMODE_MASK 0xF0000000
+#define CFG_LCDGPIO_O(data) ((data)<<20)
+#define CFG_LCDGPIO_O_MASK 0x0FF00000
+#define CFG_LCDGPIO_ENA(gpio) ((gpio)<<12)
+#define CFG_LCDGPIO_ENA_MASK 0x000FF000
+#define CFG_BIAS_OUT(bias) ((bias)<<8)
+#define CFG_BIAS_OUT_MASK 0x00000100
+#define CFG_REVERSE_RGB(RGB) ((RGB)<<7)
+#define CFG_REVERSE_RGB_MASK 0x00000080
+#define CFG_INV_COMPBLANK(blank) ((blank)<<6)
+#define CFG_INV_COMPBLANK_MASK 0x00000040
+#define CFG_INV_COMPSYNC(sync) ((sync)<<5)
+#define CFG_INV_COMPSYNC_MASK 0x00000020
+#define CFG_INV_HENA(hena) ((hena)<<4)
+#define CFG_INV_HENA_MASK 0x00000010
+#define CFG_INV_VSYNC(vsync) ((vsync)<<3)
+#define CFG_INV_VSYNC_MASK 0x00000008
+#define CFG_INV_HSYNC(hsync) ((hsync)<<2)
+#define CFG_INV_HSYNC_MASK 0x00000004
+#define CFG_INV_PCLK(pclk) ((pclk)<<1)
+#define CFG_INV_PCLK_MASK 0x00000002
+#define CFG_DUMB_ENA(dumb) (dumb)
+#define CFG_DUMB_ENA_MASK 0x00000001
+
+/* LCD I/O Pads Control Register */
+#define SPU_IOPAD_CONTROL 0x01BC
+#define CFG_GRA_VM_ENA(vm) ((vm)<<15)
+#define CFG_GRA_VM_ENA_MASK 0x00008000
+#define CFG_DMA_VM_ENA(vm) ((vm)<<13)
+#define CFG_DMA_VM_ENA_MASK 0x00002000
+#define CFG_CMD_VM_ENA(vm) ((vm)<<12)
+#define CFG_CMD_VM_ENA_MASK 0x00001000
+#define CFG_CSC(csc) ((csc)<<8)
+#define CFG_CSC_MASK 0x00000300
+#define CFG_BOUNDARY(size) ((size)<<5)
+#define CFG_BOUNDARY_MASK 0x00000020
+#define CFG_BURST(len) ((len)<<4)
+#define CFG_BURST_MASK 0x00000010
+#define CFG_IOPADMODE(iopad) (iopad)
+#define CFG_IOPADMODE_MASK 0x0000000F
+
+/* LCD Interrupt Control Register */
+#define SPU_IRQ_ENA 0x01C0
+#define DMA_FRAME_IRQ0_ENA(irq) ((irq)<<31)
+#define DMA_FRAME_IRQ0_ENA_MASK 0x80000000
+#define DMA_FRAME_IRQ1_ENA(irq) ((irq)<<30)
+#define DMA_FRAME_IRQ1_ENA_MASK 0x40000000
+#define DMA_FF_UNDERFLOW_ENA(ff) ((ff)<<29)
+#define DMA_FF_UNDERFLOW_ENA_MASK 0x20000000
+#define AXI_BUS_ERROR_IRQ_ENA(irq) ((irq)<<28)
+#define AXI_BUS_ERROR_IRQ_ENA_MASK 0x10000000
+#define GRA_FRAME_IRQ0_ENA(irq) ((irq)<<27)
+#define GRA_FRAME_IRQ0_ENA_MASK 0x08000000
+#define GRA_FRAME_IRQ1_ENA(irq) ((irq)<<26)
+#define GRA_FRAME_IRQ1_ENA_MASK 0x04000000
+#define GRA_FF_UNDERFLOW_ENA(ff) ((ff)<<25)
+#define GRA_FF_UNDERFLOW_ENA_MASK 0x02000000
+#define VSYNC_IRQ_ENA(vsync_irq) ((vsync_irq)<<23)
+#define VSYNC_IRQ_ENA_MASK 0x00800000
+#define DUMB_FRAMEDONE_ENA(fdone) ((fdone)<<22)
+#define DUMB_FRAMEDONE_ENA_MASK 0x00400000
+#define TWC_FRAMEDONE_ENA(fdone) ((fdone)<<21)
+#define TWC_FRAMEDONE_ENA_MASK 0x00200000
+#define HWC_FRAMEDONE_ENA(fdone) ((fdone)<<20)
+#define HWC_FRAMEDONE_ENA_MASK 0x00100000
+#define SLV_IRQ_ENA(irq) ((irq)<<19)
+#define SLV_IRQ_ENA_MASK 0x00080000
+#define SPI_IRQ_ENA(irq) ((irq)<<18)
+#define SPI_IRQ_ENA_MASK 0x00040000
+#define PWRDN_IRQ_ENA(irq) ((irq)<<17)
+#define PWRDN_IRQ_ENA_MASK 0x00020000
+#define AXI_LATENCY_TOO_LONG_IRQ_ENA(irq) ((irq)<<16)
+#define AXI_LATENCY_TOO_LONG_IRQ_ENA_MASK 0x00010000
+#define CLEAN_SPU_IRQ_ISR(irq) (irq)
+#define CLEAN_SPU_IRQ_ISR_MASK 0x0000FFFF
+#define TV_DMA_FRAME_IRQ0_ENA(irq) ((irq)<<15)
+#define TV_DMA_FRAME_IRQ0_ENA_MASK 0x00008000
+#define TV_DMA_FRAME_IRQ1_ENA(irq) ((irq)<<14)
+#define TV_DMA_FRAME_IRQ1_ENA_MASK 0x00004000
+#define TV_DMA_FF_UNDERFLOW_ENA(unerrun) ((unerrun)<<13)
+#define TV_DMA_FF_UNDERFLOW_ENA_MASK 0x00002000
+#define TVSYNC_IRQ_ENA(irq) ((irq)<<12)
+#define TVSYNC_IRQ_ENA_MASK 0x00001000
+#define TV_FRAME_IRQ0_ENA(irq) ((irq)<<11)
+#define TV_FRAME_IRQ0_ENA_MASK 0x00000800
+#define TV_FRAME_IRQ1_ENA(irq) ((irq)<<10)
+#define TV_FRAME_IRQ1_ENA_MASK 0x00000400
+#define TV_GRA_FF_UNDERFLOW_ENA(unerrun) ((unerrun)<<9)
+#define TV_GRA_FF_UNDERFLOW_ENA_MASK 0x00000200
+#define TV_FRAMEDONE_ENA(irq) ((irq)<<8)
+#define TV_FRAMEDONE_ENA_MASK 0x00000100
+
+/* FIXME - JUST GUESS */
+#define PN2_DMA_FRAME_IRQ0_ENA(irq) ((irq)<<7)
+#define PN2_DMA_FRAME_IRQ0_ENA_MASK 0x00000080
+#define PN2_DMA_FRAME_IRQ1_ENA(irq) ((irq)<<6)
+#define PN2_DMA_FRAME_IRQ1_ENA_MASK 0x00000040
+#define PN2_DMA_FF_UNDERFLOW_ENA(ff) ((ff)<<5)
+#define PN2_DMA_FF_UNDERFLOW_ENA_MASK 0x00000020
+#define PN2_GRA_FRAME_IRQ0_ENA(irq) ((irq)<<3)
+#define PN2_GRA_FRAME_IRQ0_ENA_MASK 0x00000008
+#define PN2_GRA_FRAME_IRQ1_ENA(irq) ((irq)<<2)
+#define PN2_GRA_FRAME_IRQ1_ENA_MASK 0x04000004
+#define PN2_GRA_FF_UNDERFLOW_ENA(ff) ((ff)<<1)
+#define PN2_GRA_FF_UNDERFLOW_ENA_MASK 0x00000002
+#define PN2_VSYNC_IRQ_ENA(irq) ((irq)<<0)
+#define PN2_SYNC_IRQ_ENA_MASK 0x00000001
+
+#define gf0_imask(id) ((id) ? (((id) & 1) ? TV_FRAME_IRQ0_ENA_MASK \
+ : PN2_GRA_FRAME_IRQ0_ENA_MASK) : GRA_FRAME_IRQ0_ENA_MASK)
+#define gf1_imask(id) ((id) ? (((id) & 1) ? TV_FRAME_IRQ1_ENA_MASK \
+ : PN2_GRA_FRAME_IRQ1_ENA_MASK) : GRA_FRAME_IRQ1_ENA_MASK)
+#define vsync_imask(id) ((id) ? (((id) & 1) ? TVSYNC_IRQ_ENA_MASK \
+ : PN2_SYNC_IRQ_ENA_MASK) : VSYNC_IRQ_ENA_MASK)
+#define vsync_imasks (vsync_imask(0) | vsync_imask(1))
+
+#define display_done_imask(id) ((id) ? (((id) & 1) ? TV_FRAMEDONE_ENA_MASK\
+ : (PN2_DMA_FRAME_IRQ0_ENA_MASK | PN2_DMA_FRAME_IRQ1_ENA_MASK))\
+ : DUMB_FRAMEDONE_ENA_MASK)
+
+#define display_done_imasks (display_done_imask(0) | display_done_imask(1))
+
+#define vf0_imask(id) ((id) ? (((id) & 1) ? TV_DMA_FRAME_IRQ0_ENA_MASK \
+ : PN2_DMA_FRAME_IRQ0_ENA_MASK) : DMA_FRAME_IRQ0_ENA_MASK)
+#define vf1_imask(id) ((id) ? (((id) & 1) ? TV_DMA_FRAME_IRQ1_ENA_MASK \
+ : PN2_DMA_FRAME_IRQ1_ENA_MASK) : DMA_FRAME_IRQ1_ENA_MASK)
+
+#define gfx_imasks (gf0_imask(0) | gf1_imask(0) | gf0_imask(1) | \
+ gf1_imask(1))
+#define vid_imasks (vf0_imask(0) | vf1_imask(0) | vf0_imask(1) | \
+ vf1_imask(1))
+#define vid_imask(id) (display_done_imask(id))
+
+#define pn1_imasks (gf0_imask(0) | gf1_imask(0) | vsync_imask(0) | \
+ display_done_imask(0) | vf0_imask(0) | vf1_imask(0))
+#define tv_imasks (gf0_imask(1) | gf1_imask(1) | vsync_imask(1) | \
+ display_done_imask(1) | vf0_imask(1) | vf1_imask(1))
+#define path_imasks(id) ((id) ? (tv_imasks) : (pn1_imasks))
+
+/* error indications */
+#define vid_udflow_imask(id) ((id) ? (((id) & 1) ? \
+ (TV_DMA_FF_UNDERFLOW_ENA_MASK) : (PN2_DMA_FF_UNDERFLOW_ENA_MASK)) : \
+ (DMA_FF_UNDERFLOW_ENA_MASK))
+#define gfx_udflow_imask(id) ((id) ? (((id) & 1) ? \
+ (TV_GRA_FF_UNDERFLOW_ENA_MASK) : (PN2_GRA_FF_UNDERFLOW_ENA_MASK)) : \
+ (GRA_FF_UNDERFLOW_ENA_MASK))
+
+#define err_imask(id) (vid_udflow_imask(id) | gfx_udflow_imask(id) | \
+ AXI_BUS_ERROR_IRQ_ENA_MASK | AXI_LATENCY_TOO_LONG_IRQ_ENA_MASK)
+#define err_imasks (err_imask(0) | err_imask(1) | err_imask(2))
+/* LCD Interrupt Status Register */
+#define SPU_IRQ_ISR 0x01C4
+#define DMA_FRAME_IRQ0(irq) ((irq)<<31)
+#define DMA_FRAME_IRQ0_MASK 0x80000000
+#define DMA_FRAME_IRQ1(irq) ((irq)<<30)
+#define DMA_FRAME_IRQ1_MASK 0x40000000
+#define DMA_FF_UNDERFLOW(ff) ((ff)<<29)
+#define DMA_FF_UNDERFLOW_MASK 0x20000000
+#define AXI_BUS_ERROR_IRQ(irq) ((irq)<<28)
+#define AXI_BUS_ERROR_IRQ_MASK 0x10000000
+#define GRA_FRAME_IRQ0(irq) ((irq)<<27)
+#define GRA_FRAME_IRQ0_MASK 0x08000000
+#define GRA_FRAME_IRQ1(irq) ((irq)<<26)
+#define GRA_FRAME_IRQ1_MASK 0x04000000
+#define GRA_FF_UNDERFLOW(ff) ((ff)<<25)
+#define GRA_FF_UNDERFLOW_MASK 0x02000000
+#define VSYNC_IRQ(vsync_irq) ((vsync_irq)<<23)
+#define VSYNC_IRQ_MASK 0x00800000
+#define DUMB_FRAMEDONE(fdone) ((fdone)<<22)
+#define DUMB_FRAMEDONE_MASK 0x00400000
+#define TWC_FRAMEDONE(fdone) ((fdone)<<21)
+#define TWC_FRAMEDONE_MASK 0x00200000
+#define HWC_FRAMEDONE(fdone) ((fdone)<<20)
+#define HWC_FRAMEDONE_MASK 0x00100000
+#define SLV_IRQ(irq) ((irq)<<19)
+#define SLV_IRQ_MASK 0x00080000
+#define SPI_IRQ(irq) ((irq)<<18)
+#define SPI_IRQ_MASK 0x00040000
+#define PWRDN_IRQ(irq) ((irq)<<17)
+#define PWRDN_IRQ_MASK 0x00020000
+#define AXI_LATENCY_TOO_LONGR_IRQ(irq) ((irq)<<16)
+#define AXI_LATENCY_TOO_LONGR_IRQ_MASK 0x00010000
+#define TV_DMA_FRAME_IRQ0(irq) ((irq)<<15)
+#define TV_DMA_FRAME_IRQ0_MASK 0x00008000
+#define TV_DMA_FRAME_IRQ1(irq) ((irq)<<14)
+#define TV_DMA_FRAME_IRQ1_MASK 0x00004000
+#define TV_DMA_FF_UNDERFLOW(unerrun) ((unerrun)<<13)
+#define TV_DMA_FF_UNDERFLOW_MASK 0x00002000
+#define TVSYNC_IRQ(irq) ((irq)<<12)
+#define TVSYNC_IRQ_MASK 0x00001000
+#define TV_FRAME_IRQ0(irq) ((irq)<<11)
+#define TV_FRAME_IRQ0_MASK 0x00000800
+#define TV_FRAME_IRQ1(irq) ((irq)<<10)
+#define TV_FRAME_IRQ1_MASK 0x00000400
+#define TV_GRA_FF_UNDERFLOW(unerrun) ((unerrun)<<9)
+#define TV_GRA_FF_UNDERFLOW_MASK 0x00000200
+#define PN2_DMA_FRAME_IRQ0(irq) ((irq)<<7)
+#define PN2_DMA_FRAME_IRQ0_MASK 0x00000080
+#define PN2_DMA_FRAME_IRQ1(irq) ((irq)<<6)
+#define PN2_DMA_FRAME_IRQ1_MASK 0x00000040
+#define PN2_DMA_FF_UNDERFLOW(ff) ((ff)<<5)
+#define PN2_DMA_FF_UNDERFLOW_MASK 0x00000020
+#define PN2_GRA_FRAME_IRQ0(irq) ((irq)<<3)
+#define PN2_GRA_FRAME_IRQ0_MASK 0x00000008
+#define PN2_GRA_FRAME_IRQ1(irq) ((irq)<<2)
+#define PN2_GRA_FRAME_IRQ1_MASK 0x04000004
+#define PN2_GRA_FF_UNDERFLOW(ff) ((ff)<<1)
+#define PN2_GRA_FF_UNDERFLOW_MASK 0x00000002
+#define PN2_VSYNC_IRQ(irq) ((irq)<<0)
+#define PN2_SYNC_IRQ_MASK 0x00000001
+
+/* LCD FIFO Depth register */
+#define LCD_FIFO_DEPTH 0x01c8
+#define VIDEO_FIFO(fi) ((fi) << 0)
+#define VIDEO_FIFO_MASK 0x00000003
+#define GRAPHIC_FIFO(fi) ((fi) << 2)
+#define GRAPHIC_FIFO_MASK 0x0000000c
+
+/* read-only */
+#define DMA_FRAME_IRQ0_LEVEL_MASK 0x00008000
+#define DMA_FRAME_IRQ1_LEVEL_MASK 0x00004000
+#define DMA_FRAME_CNT_ISR_MASK 0x00003000
+#define GRA_FRAME_IRQ0_LEVEL_MASK 0x00000800
+#define GRA_FRAME_IRQ1_LEVEL_MASK 0x00000400
+#define GRA_FRAME_CNT_ISR_MASK 0x00000300
+#define VSYNC_IRQ_LEVEL_MASK 0x00000080
+#define DUMB_FRAMEDONE_LEVEL_MASK 0x00000040
+#define TWC_FRAMEDONE_LEVEL_MASK 0x00000020
+#define HWC_FRAMEDONE_LEVEL_MASK 0x00000010
+#define SLV_FF_EMPTY_MASK 0x00000008
+#define DMA_FF_ALLEMPTY_MASK 0x00000004
+#define GRA_FF_ALLEMPTY_MASK 0x00000002
+#define PWRDN_IRQ_LEVEL_MASK 0x00000001
+
+/* 32 bit LCD Interrupt Reset Status*/
+#define SPU_IRQ_RSR (0x01C8)
+/* 32 bit Panel Path Graphic Partial Display Horizontal Control Register*/
+#define LCD_GRA_CUTHPXL (0x01CC)
+/* 32 bit Panel Path Graphic Partial Display Vertical Control Register*/
+#define LCD_GRA_CUTVLN (0x01D0)
+/* 32 bit TV Path Graphic Partial Display Horizontal Control Register*/
+#define LCD_TVG_CUTHPXL (0x01D4)
+/* 32 bit TV Path Graphic Partial Display Vertical Control Register*/
+#define LCD_TVG_CUTVLN (0x01D8)
+/* 32 bit LCD Global Control Register*/
+#define LCD_TOP_CTRL (0x01DC)
+/* 32 bit LCD SQU Line Buffer Control Register 1*/
+#define LCD_SQULN1_CTRL (0x01E0)
+/* 32 bit LCD SQU Line Buffer Control Register 2*/
+#define LCD_SQULN2_CTRL (0x01E4)
+#define squln_ctrl(id) ((id) ? (((id) & 1) ? LCD_SQULN2_CTRL : \
+ LCD_PN2_SQULN1_CTRL) : LCD_SQULN1_CTRL)
+
+/* 32 bit LCD Mixed Overlay Control Register */
+#define LCD_AFA_ALL2ONE (0x01E8)
+
+#define LCD_PN2_SCLK_DIV (0x01EC)
+#define LCD_PN2_TCLK_DIV (0x01F0)
+#define LCD_LVDS_SCLK_DIV_WR (0x01F4)
+#define LCD_LVDS_SCLK_DIV_RD (0x01FC)
+#define PN2_LCD_DMA_START_ADDR_Y0 (0x0200)
+#define PN2_LCD_DMA_START_ADDR_U0 (0x0204)
+#define PN2_LCD_DMA_START_ADDR_V0 (0x0208)
+#define PN2_LCD_DMA_START_ADDR_C0 (0x020C)
+#define PN2_LCD_DMA_START_ADDR_Y1 (0x0210)
+#define PN2_LCD_DMA_START_ADDR_U1 (0x0214)
+#define PN2_LCD_DMA_START_ADDR_V1 (0x0218)
+#define PN2_LCD_DMA_START_ADDR_C1 (0x021C)
+#define PN2_LCD_DMA_PITCH_YC (0x0220)
+#define PN2_LCD_DMA_PITCH_UV (0x0224)
+#define PN2_LCD_DMA_OVSA_HPXL_VLN (0x0228)
+#define PN2_LCD_DMA_HPXL_VLN (0x022C)
+#define PN2_LCD_DMAZM_HPXL_VLN (0x0230)
+#define PN2_LCD_GRA_START_ADDR0 (0x0234)
+#define PN2_LCD_GRA_START_ADDR1 (0x0238)
+#define PN2_LCD_GRA_PITCH (0x023C)
+#define PN2_LCD_GRA_OVSA_HPXL_VLN (0x0240)
+#define PN2_LCD_GRA_HPXL_VLN (0x0244)
+#define PN2_LCD_GRAZM_HPXL_VLN (0x0248)
+#define PN2_LCD_HWC_OVSA_HPXL_VLN (0x024C)
+#define PN2_LCD_HWC_HPXL_VLN (0x0250)
+#define LCD_PN2_V_H_TOTAL (0x0254)
+#define LCD_PN2_V_H_ACTIVE (0x0258)
+#define LCD_PN2_H_PORCH (0x025C)
+#define LCD_PN2_V_PORCH (0x0260)
+#define LCD_PN2_BLANKCOLOR (0x0264)
+#define LCD_PN2_ALPHA_COLOR1 (0x0268)
+#define LCD_PN2_ALPHA_COLOR2 (0x026C)
+#define LCD_PN2_COLORKEY_Y (0x0270)
+#define LCD_PN2_COLORKEY_U (0x0274)
+#define LCD_PN2_COLORKEY_V (0x0278)
+#define LCD_PN2_SEPXLCNT (0x027C)
+#define LCD_TV_V_H_TOTAL_FLD (0x0280)
+#define LCD_TV_V_PORCH_FLD (0x0284)
+#define LCD_TV_SEPXLCNT_FLD (0x0288)
+
+#define LCD_2ND_ALPHA (0x0294)
+#define LCD_PN2_CONTRAST (0x0298)
+#define LCD_PN2_SATURATION (0x029c)
+#define LCD_PN2_CBSH_HUE (0x02a0)
+#define LCD_TIMING_EXT (0x02C0)
+#define LCD_PN2_LAYER_ALPHA_SEL1 (0x02c4)
+#define LCD_PN2_CTRL0 (0x02C8)
+#define TV_LAYER_ALPHA_SEL1 (0x02cc)
+#define LCD_SMPN2_CTRL (0x02D0)
+#define LCD_IO_OVERL_MAP_CTRL (0x02D4)
+#define LCD_DUMB2_CTRL (0x02d8)
+#define LCD_PN2_CTRL1 (0x02DC)
+#define PN2_IOPAD_CONTROL (0x02E0)
+#define LCD_PN2_SQULN1_CTRL (0x02E4)
+#define PN2_LCD_GRA_CUTHPXL (0x02e8)
+#define PN2_LCD_GRA_CUTVLN (0x02ec)
+#define LCD_PN2_SQULN2_CTRL (0x02F0)
+#define ALL_LAYER_ALPHA_SEL (0x02F4)
+
+/* pxa988 has different MASTER_CTRL from MMP3/MMP2 */
+#ifdef CONFIG_CPU_PXA988
+#define TIMING_MASTER_CONTROL (0x01F4)
+#define MASTER_ENH(id) (1 << ((id) + 5))
+#define MASTER_ENV(id) (1 << ((id) + 6))
+#else
+#define TIMING_MASTER_CONTROL (0x02F8)
+#define MASTER_ENH(id) (1 << (id))
+#define MASTER_ENV(id) (1 << ((id) + 4))
+#endif
+
+#define DSI_START_SEL_SHIFT(id) (((id) << 1) + 8)
+#define timing_master_config(path, dsi_id, lcd_id) \
+ (MASTER_ENH(path) | MASTER_ENV(path) | \
+ (((lcd_id) + ((dsi_id) << 1)) << DSI_START_SEL_SHIFT(path)))
+
+#define LCD_2ND_BLD_CTL (0x02Fc)
+#define LVDS_SRC_MASK (3 << 30)
+#define LVDS_SRC_SHIFT (30)
+#define LVDS_FMT_MASK (1 << 28)
+#define LVDS_FMT_SHIFT (28)
+
+#define CLK_SCLK (1 << 0)
+#define CLK_LVDS_RD (1 << 1)
+#define CLK_LVDS_WR (1 << 2)
+
+#define gra_partdisp_ctrl_hor(id) ((id) ? (((id) & 1) ? \
+ LCD_TVG_CUTHPXL : PN2_LCD_GRA_CUTHPXL) : LCD_GRA_CUTHPXL)
+#define gra_partdisp_ctrl_ver(id) ((id) ? (((id) & 1) ? \
+ LCD_TVG_CUTVLN : PN2_LCD_GRA_CUTVLN) : LCD_GRA_CUTVLN)
+
+/*
+ * defined Video Memory Color format for DMA control 0 register
+ * DMA0 bit[23:20]
+ */
+#define VMODE_RGB565 0x0
+#define VMODE_RGB1555 0x1
+#define VMODE_RGB888PACKED 0x2
+#define VMODE_RGB888UNPACKED 0x3
+#define VMODE_RGBA888 0x4
+#define VMODE_YUV422PACKED 0x5
+#define VMODE_YUV422PLANAR 0x6
+#define VMODE_YUV420PLANAR 0x7
+#define VMODE_SMPNCMD 0x8
+#define VMODE_PALETTE4BIT 0x9
+#define VMODE_PALETTE8BIT 0xa
+#define VMODE_RESERVED 0xb
+
+/*
+ * defined Graphic Memory Color format for DMA control 0 register
+ * DMA0 bit[19:16]
+ */
+#define GMODE_RGB565 0x0
+#define GMODE_RGB1555 0x1
+#define GMODE_RGB888PACKED 0x2
+#define GMODE_RGB888UNPACKED 0x3
+#define GMODE_RGBA888 0x4
+#define GMODE_YUV422PACKED 0x5
+#define GMODE_YUV422PLANAR 0x6
+#define GMODE_YUV420PLANAR 0x7
+#define GMODE_SMPNCMD 0x8
+#define GMODE_PALETTE4BIT 0x9
+#define GMODE_PALETTE8BIT 0xa
+#define GMODE_RESERVED 0xb
+
+/*
+ * define for DMA control 1 register
+ */
+#define DMA1_FRAME_TRIG 31 /* bit location */
+#define DMA1_VSYNC_MODE 28
+#define DMA1_VSYNC_INV 27
+#define DMA1_CKEY 24
+#define DMA1_CARRY 23
+#define DMA1_LNBUF_ENA 22
+#define DMA1_GATED_ENA 21
+#define DMA1_PWRDN_ENA 20
+#define DMA1_DSCALE 18
+#define DMA1_ALPHA_MODE 16
+#define DMA1_ALPHA 08
+#define DMA1_PXLCMD 00
+
+/*
+ * defined for Configure Dumb Mode
+ * DUMB LCD Panel bit[31:28]
+ */
+#define DUMB16_RGB565_0 0x0
+#define DUMB16_RGB565_1 0x1
+#define DUMB18_RGB666_0 0x2
+#define DUMB18_RGB666_1 0x3
+#define DUMB12_RGB444_0 0x4
+#define DUMB12_RGB444_1 0x5
+#define DUMB24_RGB888_0 0x6
+#define DUMB_BLANK 0x7
+
+/*
+ * defined for Configure I/O Pin Allocation Mode
+ * LCD LCD I/O Pads control register bit[3:0]
+ */
+#define IOPAD_DUMB24 0x0
+#define IOPAD_DUMB18SPI 0x1
+#define IOPAD_DUMB18GPIO 0x2
+#define IOPAD_DUMB16SPI 0x3
+#define IOPAD_DUMB16GPIO 0x4
+#define IOPAD_DUMB12 0x5
+#define IOPAD_SMART18SPI 0x6
+#define IOPAD_SMART16SPI 0x7
+#define IOPAD_SMART8BOTH 0x8
+#define IOPAD_DUMB18_SMART8 0x9
+#define IOPAD_DUMB16_SMART8SPI 0xa
+#define IOPAD_DUMB16_SMART8GPIO 0xb
+#define IOPAD_DUMB16_DUMB16 0xc
+#define IOPAD_SMART8_SMART8 0xc
+
+/*
+ *defined for indicating boundary and cycle burst length
+ */
+#define CFG_BOUNDARY_1KB (1<<5)
+#define CFG_BOUNDARY_4KB (0<<5)
+#define CFG_CYC_BURST_LEN16 (1<<4)
+#define CFG_CYC_BURST_LEN8 (0<<4)
+
+/*
+ * defined Dumb Panel Clock Divider register
+ * SCLK_Source bit[31]
+ */
+ /* 0: PLL clock select*/
+#define AXI_BUS_SEL 0x80000000
+#define CCD_CLK_SEL 0x40000000
+#define DCON_CLK_SEL 0x20000000
+#define ENA_CLK_INT_DIV CONFIG_FB_DOVE_CLCD_SCLK_DIV
+#define IDLE_CLK_INT_DIV 0x1 /* idle Integer Divider */
+#define DIS_CLK_INT_DIV 0x0 /* Disable Integer Divider */
+
+/* SRAM ID */
+#define SRAMID_GAMMA_YR 0x0
+#define SRAMID_GAMMA_UG 0x1
+#define SRAMID_GAMMA_VB 0x2
+#define SRAMID_PALATTE 0x3
+#define SRAMID_HWC 0xf
+
+/* SRAM INIT Read/Write */
+#define SRAMID_INIT_READ 0x0
+#define SRAMID_INIT_WRITE 0x2
+#define SRAMID_INIT_DEFAULT 0x3
+
+/*
+ * defined VSYNC selection mode for DMA control 1 register
+ * DMA1 bit[30:28]
+ */
+#define VMODE_SMPN 0x0
+#define VMODE_SMPNIRQ 0x1
+#define VMODE_DUMB 0x2
+#define VMODE_IPE 0x3
+#define VMODE_IRE 0x4
+
+/*
+ * defined Configure Alpha and Alpha mode for DMA control 1 register
+ * DMA1 bit[15:08](alpha) / bit[17:16](alpha mode)
+ */
+/* ALPHA mode */
+#define MODE_ALPHA_DMA 0x0
+#define MODE_ALPHA_GRA 0x1
+#define MODE_ALPHA_CFG 0x2
+
+/* alpha value */
+#define ALPHA_NOGRAPHIC 0xFF /* all video, no graphic */
+#define ALPHA_NOVIDEO 0x00 /* all graphic, no video */
+#define ALPHA_GRAPHNVIDEO 0x0F /* Selects graphic & video */
+
+/*
+ * defined Pixel Command for DMA control 1 register
+ * DMA1 bit[07:00]
+ */
+#define PIXEL_CMD 0x81
+
+/* DSI */
+/* DSI1 - 4 Lane Controller base */
+#define DSI1_REGS_PHYSICAL_BASE 0xD420B800
+/* DSI2 - 3 Lane Controller base */
+#define DSI2_REGS_PHYSICAL_BASE 0xD420BA00
+
+/* DSI Controller Registers */
+struct dsi_lcd_regs {
+#define DSI_LCD1_CTRL_0 0x100 /* DSI Active Panel 1 Control register 0 */
+#define DSI_LCD1_CTRL_1 0x104 /* DSI Active Panel 1 Control register 1 */
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 reserved1[2];
+
+#define DSI_LCD1_TIMING_0 0x110 /* Timing register 0 */
+#define DSI_LCD1_TIMING_1 0x114 /* Timing register 1 */
+#define DSI_LCD1_TIMING_2 0x118 /* Timing register 2 */
+#define DSI_LCD1_TIMING_3 0x11C /* Timing register 3 */
+#define DSI_LCD1_WC_0 0x120 /* Word Count register 0 */
+#define DSI_LCD1_WC_1 0x124 /* Word Count register 1 */
+#define DSI_LCD1_WC_2 0x128 /* Word Count register 2 */
+ u32 timing0;
+ u32 timing1;
+ u32 timing2;
+ u32 timing3;
+ u32 wc0;
+ u32 wc1;
+ u32 wc2;
+ u32 reserved2[1];
+ u32 slot_cnt0;
+ u32 slot_cnt1;
+ u32 reserved3[2];
+ u32 status_0;
+ u32 status_1;
+ u32 status_2;
+ u32 status_3;
+ u32 status_4;
+};
+
+struct dsi_regs {
+#define DSI_CTRL_0 0x000 /* DSI control register 0 */
+#define DSI_CTRL_1 0x004 /* DSI control register 1 */
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 reserved1[2];
+ u32 irq_status;
+ u32 irq_mask;
+ u32 reserved2[2];
+
+#define DSI_CPU_CMD_0 0x020 /* DSI CPU packet command register 0 */
+#define DSI_CPU_CMD_1 0x024 /* DSU CPU Packet Command Register 1 */
+#define DSI_CPU_CMD_3 0x02C /* DSU CPU Packet Command Register 3 */
+#define DSI_CPU_WDAT_0 0x030 /* DSI CUP */
+ u32 cmd0;
+ u32 cmd1;
+ u32 cmd2;
+ u32 cmd3;
+ u32 dat0;
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 reserved3[2];
+
+ u32 smt_cmd;
+ u32 smt_ctrl0;
+ u32 smt_ctrl1;
+ u32 reserved4[1];
+
+ u32 rx0_status;
+
+/* Rx Packet Header - data from slave device */
+#define DSI_RX_PKT_HDR_0 0x064
+ u32 rx0_header;
+ u32 rx1_status;
+ u32 rx1_header;
+ u32 rx_ctrl;
+ u32 rx_ctrl1;
+ u32 rx2_status;
+ u32 rx2_header;
+ u32 reserved5[1];
+
+ u32 phy_ctrl1;
+#define DSI_PHY_CTRL_2 0x088 /* DSI DPHI Control Register 2 */
+#define DSI_PHY_CTRL_3 0x08C /* DPHY Control Register 3 */
+ u32 phy_ctrl2;
+ u32 phy_ctrl3;
+ u32 phy_status0;
+ u32 phy_status1;
+ u32 reserved6[5];
+ u32 phy_status2;
+
+#define DSI_PHY_RCOMP_0 0x0B0 /* DPHY Rcomp Control Register */
+ u32 phy_rcomp0;
+ u32 reserved7[3];
+#define DSI_PHY_TIME_0 0x0C0 /* DPHY Timing Control Register 0 */
+#define DSI_PHY_TIME_1 0x0C4 /* DPHY Timing Control Register 1 */
+#define DSI_PHY_TIME_2 0x0C8 /* DPHY Timing Control Register 2 */
+#define DSI_PHY_TIME_3 0x0CC /* DPHY Timing Control Register 3 */
+#define DSI_PHY_TIME_4 0x0D0 /* DPHY Timing Control Register 4 */
+#define DSI_PHY_TIME_5 0x0D4 /* DPHY Timing Control Register 5 */
+ u32 phy_timing0;
+ u32 phy_timing1;
+ u32 phy_timing2;
+ u32 phy_timing3;
+ u32 phy_code_0;
+ u32 phy_code_1;
+ u32 reserved8[2];
+ u32 mem_ctrl;
+ u32 tx_timer;
+ u32 rx_timer;
+ u32 turn_timer;
+ u32 reserved9[4];
+
+#define DSI_LCD1_CTRL_0 0x100 /* DSI Active Panel 1 Control register 0 */
+#define DSI_LCD1_CTRL_1 0x104 /* DSI Active Panel 1 Control register 1 */
+#define DSI_LCD1_TIMING_0 0x110 /* Timing register 0 */
+#define DSI_LCD1_TIMING_1 0x114 /* Timing register 1 */
+#define DSI_LCD1_TIMING_2 0x118 /* Timing register 2 */
+#define DSI_LCD1_TIMING_3 0x11C /* Timing register 3 */
+#define DSI_LCD1_WC_0 0x120 /* Word Count register 0 */
+#define DSI_LCD1_WC_1 0x124 /* Word Count register 1 */
+#define DSI_LCD1_WC_2 0x128 /* Word Count register 2 */
+ struct dsi_lcd_regs lcd1;
+ u32 reserved10[11];
+ struct dsi_lcd_regs lcd2;
+};
+
+#define DSI_LCD2_CTRL_0 0x180 /* DSI Active Panel 2 Control register 0 */
+#define DSI_LCD2_CTRL_1 0x184 /* DSI Active Panel 2 Control register 1 */
+#define DSI_LCD2_TIMING_0 0x190 /* Timing register 0 */
+#define DSI_LCD2_TIMING_1 0x194 /* Timing register 1 */
+#define DSI_LCD2_TIMING_2 0x198 /* Timing register 2 */
+#define DSI_LCD2_TIMING_3 0x19C /* Timing register 3 */
+#define DSI_LCD2_WC_0 0x1A0 /* Word Count register 0 */
+#define DSI_LCD2_WC_1 0x1A4 /* Word Count register 1 */
+#define DSI_LCD2_WC_2 0x1A8 /* Word Count register 2 */
+
+/* DSI_CTRL_0 0x0000 DSI Control Register 0 */
+#define DSI_CTRL_0_CFG_SOFT_RST (1<<31)
+#define DSI_CTRL_0_CFG_SOFT_RST_REG (1<<30)
+#define DSI_CTRL_0_CFG_LCD1_TX_EN (1<<8)
+#define DSI_CTRL_0_CFG_LCD1_SLV (1<<4)
+#define DSI_CTRL_0_CFG_LCD1_EN (1<<0)
+
+/* DSI_CTRL_1 0x0004 DSI Control Register 1 */
+#define DSI_CTRL_1_CFG_EOTP (1<<8)
+#define DSI_CTRL_1_CFG_RSVD (2<<4)
+#define DSI_CTRL_1_CFG_LCD2_VCH_NO_MASK (3<<2)
+#define DSI_CTRL_1_CFG_LCD2_VCH_NO_SHIFT 2
+#define DSI_CTRL_1_CFG_LCD1_VCH_NO_MASK (3<<0)
+#define DSI_CTRL_1_CFG_LCD1_VCH_NO_SHIFT 0
+
+/* DSI_LCD1_CTRL_1 0x0104 DSI Active Panel 1 Control Register 1 */
+/* LCD 1 Vsync Reset Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_VSYNC_RST_EN (1<<31)
+/* LCD 1 2K Pixel Buffer Mode Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_M2K_EN (1<<30)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_29_23 reserved */
+/* Long Blanking Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HLP_PKT_EN (1<<22)
+/* Extra Long Blanking Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HEX_PKT_EN (1<<21)
+/* Front Porch Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HFP_PKT_EN (1<<20)
+/* hact Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HACT_PKT_EN (1<<19)
+/* Back Porch Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HBP_PKT_EN (1<<18)
+/* hse Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HSE_PKT_EN (1<<17)
+/* hsa Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HSA_PKT_EN (1<<16)
+/* All Item Enable after Pixel Data */
+#define DSI_LCD1_CTRL_1_CFG_L1_ALL_SLOT_EN (1<<15)
+/* Extra Long Packet Enable after Pixel Data */
+#define DSI_LCD1_CTRL_1_CFG_L1_HEX_SLOT_EN (1<<14)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_13_11 reserved */
+/* Turn Around Bus at Last h Line */
+#define DSI_LCD1_CTRL_1_CFG_L1_LAST_LINE_TURN (1<<10)
+/* Go to Low Power Every Frame */
+#define DSI_LCD1_CTRL_1_CFG_L1_LPM_FRAME_EN (1<<9)
+/* Go to Low Power Every Line */
+#define DSI_LCD1_CTRL_1_CFG_L1_LPM_LINE_EN (1<<8)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_7_4 reserved */
+/* DSI Transmission Mode for LCD 1 */
+#define DSI_LCD1_CTRL_1_CFG_L1_BURST_MODE_SHIFT 2
+#define DSI_LCD1_CTRL_1_CFG_L1_BURST_MODE_MASK (3<<2)
+/* LCD 1 Input Data RGB Mode for LCD 1 */
+#define DSI_LCD2_CTRL_1_CFG_L1_RGB_TYPE_SHIFT 0
+#define DSI_LCD2_CTRL_1_CFG_L1_RGB_TYPE_MASK (3<<2)
+
+/* DSI_PHY_CTRL_2 0x0088 DPHY Control Register 2 */
+/* Bit(s) DSI_PHY_CTRL_2_RSRV_31_12 reserved */
+/* DPHY LP Receiver Enable */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_RESC_EN_MASK (0xf<<8)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_RESC_EN_SHIFT 8
+/* DPHY Data Lane Enable */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_EN_MASK (0xf<<4)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_EN_SHIFT 4
+/* DPHY Bus Turn Around */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_TURN_MASK (0xf)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_TURN_SHIFT 0
+
+/* DSI_CPU_CMD_1 0x0024 DSI CPU Packet Command Register 1 */
+/* Bit(s) DSI_CPU_CMD_1_RSRV_31_24 reserved */
+/* LPDT TX Enable */
+#define DSI_CPU_CMD_1_CFG_TXLP_LPDT_MASK (0xf<<20)
+#define DSI_CPU_CMD_1_CFG_TXLP_LPDT_SHIFT 20
+/* ULPS TX Enable */
+#define DSI_CPU_CMD_1_CFG_TXLP_ULPS_MASK (0xf<<16)
+#define DSI_CPU_CMD_1_CFG_TXLP_ULPS_SHIFT 16
+/* Low Power TX Trigger Code */
+#define DSI_CPU_CMD_1_CFG_TXLP_TRIGGER_CODE_MASK (0xffff)
+#define DSI_CPU_CMD_1_CFG_TXLP_TRIGGER_CODE_SHIFT 0
+
+/* DSI_PHY_TIME_0 0x00c0 DPHY Timing Control Register 0 */
+/* Length of HS Exit Period in tx_clk_esc Cycles */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_EXIT_MASK (0xff<<24)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_EXIT_SHIFT 24
+/* DPHY HS Trail Period Length */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_TRAIL_MASK (0xff<<16)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_TRAIL_SHIFT 16
+/* DPHY HS Zero State Length */
+#define DSI_PHY_TIME_0_CDG_CSR_TIME_HS_ZERO_MASK (0xff<<8)
+#define DSI_PHY_TIME_0_CDG_CSR_TIME_HS_ZERO_SHIFT 8
+/* DPHY HS Prepare State Length */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_PREP_MASK (0xff)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_PREP_SHIFT 0
+
+/* DSI_PHY_TIME_1 0x00c4 DPHY Timing Control Register 1 */
+/* Time to Drive LP-00 by New Transmitter */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GET_MASK (0xff<<24)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GET_SHIFT 24
+/* Time to Drive LP-00 after Turn Request */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GO_MASK (0xff<<16)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GO_SHIFT 16
+/* DPHY HS Wakeup Period Length */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_WAKEUP_MASK (0xffff)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_WAKEUP_SHIFT 0
+
+/* DSI_PHY_TIME_2 0x00c8 DPHY Timing Control Register 2 */
+/* DPHY CLK Exit Period Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_EXIT_MASK (0xff<<24)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_EXIT_SHIFT 24
+/* DPHY CLK Trail Period Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_TRAIL_MASK (0xff<<16)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_TRAIL_SHIFT 16
+/* DPHY CLK Zero State Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_ZERO_MASK (0xff<<8)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_ZERO_SHIFT 8
+/* DPHY CLK LP Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_LPX_MASK (0xff)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_LPX_SHIFT 0
+
+/* DSI_PHY_TIME_3 0x00cc DPHY Timing Control Register 3 */
+/* Bit(s) DSI_PHY_TIME_3_RSRV_31_16 reserved */
+/* DPHY LP Length */
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_LPX_MASK (0xff<<8)
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_LPX_SHIFT 8
+/* DPHY HS req to rdy Length */
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_REQRDY_MASK (0xff)
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_REQRDY_SHIFT 0
+
+/*
+ * DSI timings
+ * PXA988 has diffrent ESC CLK with MMP2/MMP3
+ * it will be used in dsi_set_dphy() in pxa688_phy.c
+ * as low power mode clock.
+ */
+#ifdef CONFIG_CPU_PXA988
+#define DSI_ESC_CLK 52 /* Unit: Mhz */
+#define DSI_ESC_CLK_T 19 /* Unit: ns */
+#else
+#define DSI_ESC_CLK 66 /* Unit: Mhz */
+#define DSI_ESC_CLK_T 15 /* Unit: ns */
+#endif
+
+/* LVDS */
+/* LVDS_PHY_CTRL */
+#define LVDS_PHY_CTL 0x2A4
+#define LVDS_PLL_LOCK (1 << 31)
+#define LVDS_PHY_EXT_MASK (7 << 28)
+#define LVDS_PHY_EXT_SHIFT (28)
+#define LVDS_CLK_PHASE_MASK (0x7f << 16)
+#define LVDS_CLK_PHASE_SHIFT (16)
+#define LVDS_SSC_RESET_EXT (1 << 13)
+#define LVDS_SSC_MODE_DOWN_SPREAD (1 << 12)
+#define LVDS_SSC_EN (1 << 11)
+#define LVDS_PU_PLL (1 << 10)
+#define LVDS_PU_TX (1 << 9)
+#define LVDS_PU_IVREF (1 << 8)
+#define LVDS_CLK_SEL (1 << 7)
+#define LVDS_CLK_SEL_LVDS_PCLK (1 << 7)
+#define LVDS_PD_CH_MASK (0x3f << 1)
+#define LVDS_PD_CH(ch) ((ch) << 1)
+#define LVDS_RST (1 << 0)
+
+#define LVDS_PHY_CTL_EXT 0x2A8
+
+/* LVDS_PHY_CTRL_EXT1 */
+#define LVDS_SSC_RNGE_MASK (0x7ff << 16)
+#define LVDS_SSC_RNGE_SHIFT (16)
+#define LVDS_RESERVE_IN_MASK (0xf << 12)
+#define LVDS_RESERVE_IN_SHIFT (12)
+#define LVDS_TEST_MON_MASK (0x7 << 8)
+#define LVDS_TEST_MON_SHIFT (8)
+#define LVDS_POL_SWAP_MASK (0x3f << 0)
+#define LVDS_POL_SWAP_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT2 */
+#define LVDS_TX_DIF_AMP_MASK (0xf << 24)
+#define LVDS_TX_DIF_AMP_SHIFT (24)
+#define LVDS_TX_DIF_CM_MASK (0x3 << 22)
+#define LVDS_TX_DIF_CM_SHIFT (22)
+#define LVDS_SELLV_TXCLK_MASK (0x1f << 16)
+#define LVDS_SELLV_TXCLK_SHIFT (16)
+#define LVDS_TX_CMFB_EN (0x1 << 15)
+#define LVDS_TX_TERM_EN (0x1 << 14)
+#define LVDS_SELLV_TXDATA_MASK (0x1f << 8)
+#define LVDS_SELLV_TXDATA_SHIFT (8)
+#define LVDS_SELLV_OP7_MASK (0x3 << 6)
+#define LVDS_SELLV_OP7_SHIFT (6)
+#define LVDS_SELLV_OP6_MASK (0x3 << 4)
+#define LVDS_SELLV_OP6_SHIFT (4)
+#define LVDS_SELLV_OP9_MASK (0x3 << 2)
+#define LVDS_SELLV_OP9_SHIFT (2)
+#define LVDS_STRESSTST_EN (0x1 << 0)
+
+/* LVDS_PHY_CTRL_EXT3 */
+#define LVDS_KVCO_MASK (0xf << 28)
+#define LVDS_KVCO_SHIFT (28)
+#define LVDS_CTUNE_MASK (0x3 << 26)
+#define LVDS_CTUNE_SHIFT (26)
+#define LVDS_VREG_IVREF_MASK (0x3 << 24)
+#define LVDS_VREG_IVREF_SHIFT (24)
+#define LVDS_VDDL_MASK (0xf << 20)
+#define LVDS_VDDL_SHIFT (20)
+#define LVDS_VDDM_MASK (0x3 << 18)
+#define LVDS_VDDM_SHIFT (18)
+#define LVDS_FBDIV_MASK (0xf << 8)
+#define LVDS_FBDIV_SHIFT (8)
+#define LVDS_REFDIV_MASK (0x7f << 0)
+#define LVDS_REFDIV_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT4 */
+#define LVDS_SSC_FREQ_DIV_MASK (0xffff << 16)
+#define LVDS_SSC_FREQ_DIV_SHIFT (16)
+#define LVDS_INTPI_MASK (0xf << 12)
+#define LVDS_INTPI_SHIFT (12)
+#define LVDS_VCODIV_SEL_SE_MASK (0xf << 8)
+#define LVDS_VCODIV_SEL_SE_SHIFT (8)
+#define LVDS_RESET_INTP_EXT (0x1 << 7)
+#define LVDS_VCO_VRNG_MASK (0x7 << 4)
+#define LVDS_VCO_VRNG_SHIFT (4)
+#define LVDS_PI_EN (0x1 << 3)
+#define LVDS_ICP_MASK (0x7 << 0)
+#define LVDS_ICP_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT5 */
+#define LVDS_FREQ_OFFSET_MASK (0x1ffff << 15)
+#define LVDS_FREQ_OFFSET_SHIFT (15)
+#define LVDS_FREQ_OFFSET_VALID (0x1 << 2)
+#define LVDS_FREQ_OFFSET_MODE_CK_DIV4_OUT (0x1 << 1)
+#define LVDS_FREQ_OFFSET_MODE_EN (0x1 << 0)
+
+/* VDMA */
+struct vdma_ch_regs {
+#define VDMA_DC_SADDR_1 0x320
+#define VDMA_DC_SADDR_2 0x3A0
+#define VDMA_DC_SZ_1 0x324
+#define VDMA_DC_SZ_2 0x3A4
+#define VDMA_CTRL_1 0x328
+#define VDMA_CTRL_2 0x3A8
+#define VDMA_SRC_SZ_1 0x32C
+#define VDMA_SRC_SZ_2 0x3AC
+#define VDMA_SA_1 0x330
+#define VDMA_SA_2 0x3B0
+#define VDMA_DA_1 0x334
+#define VDMA_DA_2 0x3B4
+#define VDMA_SZ_1 0x338
+#define VDMA_SZ_2 0x3B8
+ u32 dc_saddr;
+ u32 dc_size;
+ u32 ctrl;
+ u32 src_size;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 dst_size;
+#define VDMA_PITCH_1 0x33C
+#define VDMA_PITCH_2 0x3BC
+#define VDMA_ROT_CTRL_1 0x340
+#define VDMA_ROT_CTRL_2 0x3C0
+#define VDMA_RAM_CTRL0_1 0x344
+#define VDMA_RAM_CTRL0_2 0x3C4
+#define VDMA_RAM_CTRL1_1 0x348
+#define VDMA_RAM_CTRL1_2 0x3C8
+ u32 pitch;
+ u32 rot_ctrl;
+ u32 ram_ctrl0;
+ u32 ram_ctrl1;
+
+};
+struct vdma_regs {
+#define VDMA_ARBR_CTRL 0x300
+#define VDMA_IRQR 0x304
+#define VDMA_IRQM 0x308
+#define VDMA_IRQS 0x30C
+#define VDMA_MDMA_ARBR_CTRL 0x310
+ u32 arbr_ctr;
+ u32 irq_raw;
+ u32 irq_mask;
+ u32 irq_status;
+ u32 mdma_arbr_ctrl;
+ u32 reserved[3];
+
+ struct vdma_ch_regs ch1;
+ u32 reserved2[21];
+ struct vdma_ch_regs ch2;
+};
+
+/* CMU */
+#define CMU_PIP_DE_H_CFG 0x0008
+#define CMU_PRI1_H_CFG 0x000C
+#define CMU_PRI2_H_CFG 0x0010
+#define CMU_ACE_MAIN_DE1_H_CFG 0x0014
+#define CMU_ACE_MAIN_DE2_H_CFG 0x0018
+#define CMU_ACE_PIP_DE1_H_CFG 0x001C
+#define CMU_ACE_PIP_DE2_H_CFG 0x0020
+#define CMU_PIP_DE_V_CFG 0x0024
+#define CMU_PRI_V_CFG 0x0028
+#define CMU_ACE_MAIN_DE_V_CFG 0x002C
+#define CMU_ACE_PIP_DE_V_CFG 0x0030
+#define CMU_BAR_0_CFG 0x0034
+#define CMU_BAR_1_CFG 0x0038
+#define CMU_BAR_2_CFG 0x003C
+#define CMU_BAR_3_CFG 0x0040
+#define CMU_BAR_4_CFG 0x0044
+#define CMU_BAR_5_CFG 0x0048
+#define CMU_BAR_6_CFG 0x004C
+#define CMU_BAR_7_CFG 0x0050
+#define CMU_BAR_8_CFG 0x0054
+#define CMU_BAR_9_CFG 0x0058
+#define CMU_BAR_10_CFG 0x005C
+#define CMU_BAR_11_CFG 0x0060
+#define CMU_BAR_12_CFG 0x0064
+#define CMU_BAR_13_CFG 0x0068
+#define CMU_BAR_14_CFG 0x006C
+#define CMU_BAR_15_CFG 0x0070
+#define CMU_BAR_CTRL 0x0074
+#define PATTERN_TOTAL 0x0078
+#define PATTERN_ACTIVE 0x007C
+#define PATTERN_FRONT_PORCH 0x0080
+#define PATTERN_BACK_PORCH 0x0084
+#define CMU_CLK_CTRL 0x0088
+
+#define CMU_ICSC_M_C0_L 0x0900
+#define CMU_ICSC_M_C0_H 0x0901
+#define CMU_ICSC_M_C1_L 0x0902
+#define CMU_ICSC_M_C1_H 0x0903
+#define CMU_ICSC_M_C2_L 0x0904
+#define CMU_ICSC_M_C2_H 0x0905
+#define CMU_ICSC_M_C3_L 0x0906
+#define CMU_ICSC_M_C3_H 0x0907
+#define CMU_ICSC_M_C4_L 0x0908
+#define CMU_ICSC_M_C4_H 0x0909
+#define CMU_ICSC_M_C5_L 0x090A
+#define CMU_ICSC_M_C5_H 0x090B
+#define CMU_ICSC_M_C6_L 0x090C
+#define CMU_ICSC_M_C6_H 0x090D
+#define CMU_ICSC_M_C7_L 0x090E
+#define CMU_ICSC_M_C7_H 0x090F
+#define CMU_ICSC_M_C8_L 0x0910
+#define CMU_ICSC_M_C8_H 0x0911
+#define CMU_ICSC_M_O1_0 0x0914
+#define CMU_ICSC_M_O1_1 0x0915
+#define CMU_ICSC_M_O1_2 0x0916
+#define CMU_ICSC_M_O2_0 0x0918
+#define CMU_ICSC_M_O2_1 0x0919
+#define CMU_ICSC_M_O2_2 0x091A
+#define CMU_ICSC_M_O3_0 0x091C
+#define CMU_ICSC_M_O3_1 0x091D
+#define CMU_ICSC_M_O3_2 0x091E
+#define CMU_ICSC_P_C0_L 0x0920
+#define CMU_ICSC_P_C0_H 0x0921
+#define CMU_ICSC_P_C1_L 0x0922
+#define CMU_ICSC_P_C1_H 0x0923
+#define CMU_ICSC_P_C2_L 0x0924
+#define CMU_ICSC_P_C2_H 0x0925
+#define CMU_ICSC_P_C3_L 0x0926
+#define CMU_ICSC_P_C3_H 0x0927
+#define CMU_ICSC_P_C4_L 0x0928
+#define CMU_ICSC_P_C4_H 0x0929
+#define CMU_ICSC_P_C5_L 0x092A
+#define CMU_ICSC_P_C5_H 0x092B
+#define CMU_ICSC_P_C6_L 0x092C
+#define CMU_ICSC_P_C6_H 0x092D
+#define CMU_ICSC_P_C7_L 0x092E
+#define CMU_ICSC_P_C7_H 0x092F
+#define CMU_ICSC_P_C8_L 0x0930
+#define CMU_ICSC_P_C8_H 0x0931
+#define CMU_ICSC_P_O1_0 0x0934
+#define CMU_ICSC_P_O1_1 0x0935
+#define CMU_ICSC_P_O1_2 0x0936
+#define CMU_ICSC_P_O2_0 0x0938
+#define CMU_ICSC_P_O2_1 0x0939
+#define CMU_ICSC_P_O2_2 0x093A
+#define CMU_ICSC_P_O3_0 0x093C
+#define CMU_ICSC_P_O3_1 0x093D
+#define CMU_ICSC_P_O3_2 0x093E
+#define CMU_BR_M_EN 0x0940
+#define CMU_BR_M_TH1_L 0x0942
+#define CMU_BR_M_TH1_H 0x0943
+#define CMU_BR_M_TH2_L 0x0944
+#define CMU_BR_M_TH2_H 0x0945
+#define CMU_ACE_M_EN 0x0950
+#define CMU_ACE_M_WFG1 0x0951
+#define CMU_ACE_M_WFG2 0x0952
+#define CMU_ACE_M_WFG3 0x0953
+#define CMU_ACE_M_TH0 0x0954
+#define CMU_ACE_M_TH1 0x0955
+#define CMU_ACE_M_TH2 0x0956
+#define CMU_ACE_M_TH3 0x0957
+#define CMU_ACE_M_TH4 0x0958
+#define CMU_ACE_M_TH5 0x0959
+#define CMU_ACE_M_OP0_L 0x095A
+#define CMU_ACE_M_OP0_H 0x095B
+#define CMU_ACE_M_OP5_L 0x095C
+#define CMU_ACE_M_OP5_H 0x095D
+#define CMU_ACE_M_GB2 0x095E
+#define CMU_ACE_M_GB3 0x095F
+#define CMU_ACE_M_MS1 0x0960
+#define CMU_ACE_M_MS2 0x0961
+#define CMU_ACE_M_MS3 0x0962
+#define CMU_BR_P_EN 0x0970
+#define CMU_BR_P_TH1_L 0x0972
+#define CMU_BR_P_TH1_H 0x0973
+#define CMU_BR_P_TH2_L 0x0974
+#define CMU_BR_P_TH2_H 0x0975
+#define CMU_ACE_P_EN 0x0980
+#define CMU_ACE_P_WFG1 0x0981
+#define CMU_ACE_P_WFG2 0x0982
+#define CMU_ACE_P_WFG3 0x0983
+#define CMU_ACE_P_TH0 0x0984
+#define CMU_ACE_P_TH1 0x0985
+#define CMU_ACE_P_TH2 0x0986
+#define CMU_ACE_P_TH3 0x0987
+#define CMU_ACE_P_TH4 0x0988
+#define CMU_ACE_P_TH5 0x0989
+#define CMU_ACE_P_OP0_L 0x098A
+#define CMU_ACE_P_OP0_H 0x098B
+#define CMU_ACE_P_OP5_L 0x098C
+#define CMU_ACE_P_OP5_H 0x098D
+#define CMU_ACE_P_GB2 0x098E
+#define CMU_ACE_P_GB3 0x098F
+#define CMU_ACE_P_MS1 0x0990
+#define CMU_ACE_P_MS2 0x0991
+#define CMU_ACE_P_MS3 0x0992
+#define CMU_FTDC_M_EN 0x09A0
+#define CMU_FTDC_P_EN 0x09A1
+#define CMU_FTDC_INLOW_L 0x09A2
+#define CMU_FTDC_INLOW_H 0x09A3
+#define CMU_FTDC_INHIGH_L 0x09A4
+#define CMU_FTDC_INHIGH_H 0x09A5
+#define CMU_FTDC_OUTLOW_L 0x09A6
+#define CMU_FTDC_OUTLOW_H 0x09A7
+#define CMU_FTDC_OUTHIGH_L 0x09A8
+#define CMU_FTDC_OUTHIGH_H 0x09A9
+#define CMU_FTDC_YLOW 0x09AA
+#define CMU_FTDC_YHIGH 0x09AB
+#define CMU_FTDC_CH1 0x09AC
+#define CMU_FTDC_CH2_L 0x09AE
+#define CMU_FTDC_CH2_H 0x09AF
+#define CMU_FTDC_CH3_L 0x09B0
+#define CMU_FTDC_CH3_H 0x09B1
+#define CMU_FTDC_1_C00_6 0x09B2
+#define CMU_FTDC_1_C01_6 0x09B8
+#define CMU_FTDC_1_C11_6 0x09BE
+#define CMU_FTDC_1_C10_6 0x09C4
+#define CMU_FTDC_1_OFF00_6 0x09CA
+#define CMU_FTDC_1_OFF10_6 0x09D0
+#define CMU_HS_M_EN 0x0A00
+#define CMU_HS_M_AX1_L 0x0A02
+#define CMU_HS_M_AX1_H 0x0A03
+#define CMU_HS_M_AX2_L 0x0A04
+#define CMU_HS_M_AX2_H 0x0A05
+#define CMU_HS_M_AX3_L 0x0A06
+#define CMU_HS_M_AX3_H 0x0A07
+#define CMU_HS_M_AX4_L 0x0A08
+#define CMU_HS_M_AX4_H 0x0A09
+#define CMU_HS_M_AX5_L 0x0A0A
+#define CMU_HS_M_AX5_H 0x0A0B
+#define CMU_HS_M_AX6_L 0x0A0C
+#define CMU_HS_M_AX6_H 0x0A0D
+#define CMU_HS_M_AX7_L 0x0A0E
+#define CMU_HS_M_AX7_H 0x0A0F
+#define CMU_HS_M_AX8_L 0x0A10
+#define CMU_HS_M_AX8_H 0x0A11
+#define CMU_HS_M_AX9_L 0x0A12
+#define CMU_HS_M_AX9_H 0x0A13
+#define CMU_HS_M_AX10_L 0x0A14
+#define CMU_HS_M_AX10_H 0x0A15
+#define CMU_HS_M_AX11_L 0x0A16
+#define CMU_HS_M_AX11_H 0x0A17
+#define CMU_HS_M_AX12_L 0x0A18
+#define CMU_HS_M_AX12_H 0x0A19
+#define CMU_HS_M_AX13_L 0x0A1A
+#define CMU_HS_M_AX13_H 0x0A1B
+#define CMU_HS_M_AX14_L 0x0A1C
+#define CMU_HS_M_AX14_H 0x0A1D
+#define CMU_HS_M_H1_H14 0x0A1E
+#define CMU_HS_M_S1_S14 0x0A2C
+#define CMU_HS_M_GL 0x0A3A
+#define CMU_HS_M_MAXSAT_RGB_Y_L 0x0A3C
+#define CMU_HS_M_MAXSAT_RGB_Y_H 0x0A3D
+#define CMU_HS_M_MAXSAT_RCR_L 0x0A3E
+#define CMU_HS_M_MAXSAT_RCR_H 0x0A3F
+#define CMU_HS_M_MAXSAT_RCB_L 0x0A40
+#define CMU_HS_M_MAXSAT_RCB_H 0x0A41
+#define CMU_HS_M_MAXSAT_GCR_L 0x0A42
+#define CMU_HS_M_MAXSAT_GCR_H 0x0A43
+#define CMU_HS_M_MAXSAT_GCB_L 0x0A44
+#define CMU_HS_M_MAXSAT_GCB_H 0x0A45
+#define CMU_HS_M_MAXSAT_BCR_L 0x0A46
+#define CMU_HS_M_MAXSAT_BCR_H 0x0A47
+#define CMU_HS_M_MAXSAT_BCB_L 0x0A48
+#define CMU_HS_M_MAXSAT_BCB_H 0x0A49
+#define CMU_HS_M_ROFF_L 0x0A4A
+#define CMU_HS_M_ROFF_H 0x0A4B
+#define CMU_HS_M_GOFF_L 0x0A4C
+#define CMU_HS_M_GOFF_H 0x0A4D
+#define CMU_HS_M_BOFF_L 0x0A4E
+#define CMU_HS_M_BOFF_H 0x0A4F
+#define CMU_HS_P_EN 0x0A50
+#define CMU_HS_P_AX1_L 0x0A52
+#define CMU_HS_P_AX1_H 0x0A53
+#define CMU_HS_P_AX2_L 0x0A54
+#define CMU_HS_P_AX2_H 0x0A55
+#define CMU_HS_P_AX3_L 0x0A56
+#define CMU_HS_P_AX3_H 0x0A57
+#define CMU_HS_P_AX4_L 0x0A58
+#define CMU_HS_P_AX4_H 0x0A59
+#define CMU_HS_P_AX5_L 0x0A5A
+#define CMU_HS_P_AX5_H 0x0A5B
+#define CMU_HS_P_AX6_L 0x0A5C
+#define CMU_HS_P_AX6_H 0x0A5D
+#define CMU_HS_P_AX7_L 0x0A5E
+#define CMU_HS_P_AX7_H 0x0A5F
+#define CMU_HS_P_AX8_L 0x0A60
+#define CMU_HS_P_AX8_H 0x0A61
+#define CMU_HS_P_AX9_L 0x0A62
+#define CMU_HS_P_AX9_H 0x0A63
+#define CMU_HS_P_AX10_L 0x0A64
+#define CMU_HS_P_AX10_H 0x0A65
+#define CMU_HS_P_AX11_L 0x0A66
+#define CMU_HS_P_AX11_H 0x0A67
+#define CMU_HS_P_AX12_L 0x0A68
+#define CMU_HS_P_AX12_H 0x0A69
+#define CMU_HS_P_AX13_L 0x0A6A
+#define CMU_HS_P_AX13_H 0x0A6B
+#define CMU_HS_P_AX14_L 0x0A6C
+#define CMU_HS_P_AX14_H 0x0A6D
+#define CMU_HS_P_H1_H14 0x0A6E
+#define CMU_HS_P_S1_S14 0x0A7C
+#define CMU_HS_P_GL 0x0A8A
+#define CMU_HS_P_MAXSAT_RGB_Y_L 0x0A8C
+#define CMU_HS_P_MAXSAT_RGB_Y_H 0x0A8D
+#define CMU_HS_P_MAXSAT_RCR_L 0x0A8E
+#define CMU_HS_P_MAXSAT_RCR_H 0x0A8F
+#define CMU_HS_P_MAXSAT_RCB_L 0x0A90
+#define CMU_HS_P_MAXSAT_RCB_H 0x0A91
+#define CMU_HS_P_MAXSAT_GCR_L 0x0A92
+#define CMU_HS_P_MAXSAT_GCR_H 0x0A93
+#define CMU_HS_P_MAXSAT_GCB_L 0x0A94
+#define CMU_HS_P_MAXSAT_GCB_H 0x0A95
+#define CMU_HS_P_MAXSAT_BCR_L 0x0A96
+#define CMU_HS_P_MAXSAT_BCR_H 0x0A97
+#define CMU_HS_P_MAXSAT_BCB_L 0x0A98
+#define CMU_HS_P_MAXSAT_BCB_H 0x0A99
+#define CMU_HS_P_ROFF_L 0x0A9A
+#define CMU_HS_P_ROFF_H 0x0A9B
+#define CMU_HS_P_GOFF_L 0x0A9C
+#define CMU_HS_P_GOFF_H 0x0A9D
+#define CMU_HS_P_BOFF_L 0x0A9E
+#define CMU_HS_P_BOFF_H 0x0A9F
+#define CMU_GLCSC_M_C0_L 0x0AA0
+#define CMU_GLCSC_M_C0_H 0x0AA1
+#define CMU_GLCSC_M_C1_L 0x0AA2
+#define CMU_GLCSC_M_C1_H 0x0AA3
+#define CMU_GLCSC_M_C2_L 0x0AA4
+#define CMU_GLCSC_M_C2_H 0x0AA5
+#define CMU_GLCSC_M_C3_L 0x0AA6
+#define CMU_GLCSC_M_C3_H 0x0AA7
+#define CMU_GLCSC_M_C4_L 0x0AA8
+#define CMU_GLCSC_M_C4_H 0x0AA9
+#define CMU_GLCSC_M_C5_L 0x0AAA
+#define CMU_GLCSC_M_C5_H 0x0AAB
+#define CMU_GLCSC_M_C6_L 0x0AAC
+#define CMU_GLCSC_M_C6_H 0x0AAD
+#define CMU_GLCSC_M_C7_L 0x0AAE
+#define CMU_GLCSC_M_C7_H 0x0AAF
+#define CMU_GLCSC_M_C8_L 0x0AB0
+#define CMU_GLCSC_M_C8_H 0x0AB1
+#define CMU_GLCSC_M_O1_1 0x0AB4
+#define CMU_GLCSC_M_O1_2 0x0AB5
+#define CMU_GLCSC_M_O1_3 0x0AB6
+#define CMU_GLCSC_M_O2_1 0x0AB8
+#define CMU_GLCSC_M_O2_2 0x0AB9
+#define CMU_GLCSC_M_O2_3 0x0ABA
+#define CMU_GLCSC_M_O3_1 0x0ABC
+#define CMU_GLCSC_M_O3_2 0x0ABD
+#define CMU_GLCSC_M_O3_3 0x0ABE
+#define CMU_GLCSC_P_C0_L 0x0AC0
+#define CMU_GLCSC_P_C0_H 0x0AC1
+#define CMU_GLCSC_P_C1_L 0x0AC2
+#define CMU_GLCSC_P_C1_H 0x0AC3
+#define CMU_GLCSC_P_C2_L 0x0AC4
+#define CMU_GLCSC_P_C2_H 0x0AC5
+#define CMU_GLCSC_P_C3_L 0x0AC6
+#define CMU_GLCSC_P_C3_H 0x0AC7
+#define CMU_GLCSC_P_C4_L 0x0AC8
+#define CMU_GLCSC_P_C4_H 0x0AC9
+#define CMU_GLCSC_P_C5_L 0x0ACA
+#define CMU_GLCSC_P_C5_H 0x0ACB
+#define CMU_GLCSC_P_C6_L 0x0ACC
+#define CMU_GLCSC_P_C6_H 0x0ACD
+#define CMU_GLCSC_P_C7_L 0x0ACE
+#define CMU_GLCSC_P_C7_H 0x0ACF
+#define CMU_GLCSC_P_C8_L 0x0AD0
+#define CMU_GLCSC_P_C8_H 0x0AD1
+#define CMU_GLCSC_P_O1_1 0x0AD4
+#define CMU_GLCSC_P_O1_2 0x0AD5
+#define CMU_GLCSC_P_O1_3 0x0AD6
+#define CMU_GLCSC_P_O2_1 0x0AD8
+#define CMU_GLCSC_P_O2_2 0x0AD9
+#define CMU_GLCSC_P_O2_3 0x0ADA
+#define CMU_GLCSC_P_O3_1 0x0ADC
+#define CMU_GLCSC_P_O3_2 0x0ADD
+#define CMU_GLCSC_P_O3_3 0x0ADE
+#define CMU_PIXVAL_M_EN 0x0AE0
+#define CMU_PIXVAL_P_EN 0x0AE1
+
+#define CMU_CLK_CTRL_TCLK 0x0
+#define CMU_CLK_CTRL_SCLK 0x2
+#define CMU_CLK_CTRL_MSK 0x2
+#define CMU_CLK_CTRL_ENABLE 0x1
+
+#define LCD_TOP_CTRL_TV 0x2
+#define LCD_TOP_CTRL_PN 0x0
+#define LCD_TOP_CTRL_SEL_MSK 0x2
+#define LCD_IO_CMU_IN_SEL_MSK (0x3 << 20)
+#define LCD_IO_CMU_IN_SEL_TV 0
+#define LCD_IO_CMU_IN_SEL_PN 1
+#define LCD_IO_CMU_IN_SEL_PN2 2
+#define LCD_IO_TV_OUT_SEL_MSK (0x3 << 26)
+#define LCD_IO_PN_OUT_SEL_MSK (0x3 << 24)
+#define LCD_IO_PN2_OUT_SEL_MSK (0x3 << 28)
+#define LCD_IO_TV_OUT_SEL_NON 3
+#define LCD_IO_PN_OUT_SEL_NON 3
+#define LCD_IO_PN2_OUT_SEL_NON 3
+#define LCD_TOP_CTRL_CMU_ENABLE 0x1
+#define LCD_IO_OVERL_MSK 0xC00000
+#define LCD_IO_OVERL_TV 0x0
+#define LCD_IO_OVERL_LCD1 0x400000
+#define LCD_IO_OVERL_LCD2 0xC00000
+#define HINVERT_MSK 0x4
+#define VINVERT_MSK 0x8
+#define HINVERT_LEN 0x2
+#define VINVERT_LEN 0x3
+
+#define CMU_CTRL 0x88
+#define CMU_CTRL_A0_MSK 0x6
+#define CMU_CTRL_A0_TV 0x0
+#define CMU_CTRL_A0_LCD1 0x1
+#define CMU_CTRL_A0_LCD2 0x2
+#define CMU_CTRL_A0_HDMI 0x3
+
+#define ICR_DRV_ROUTE_OFF 0x0
+#define ICR_DRV_ROUTE_TV 0x1
+#define ICR_DRV_ROUTE_LCD1 0x2
+#define ICR_DRV_ROUTE_LCD2 0x3
+
+enum {
+ PATH_PN = 0,
+ PATH_TV,
+ PATH_P2,
+};
+
+/*
+ * mmp path describes part of mmp path related info:
+ * which is hiden in display driver and not exported to buffer driver
+ */
+struct mmphw_ctrl;
+struct mmphw_path_plat {
+ int id;
+ struct mmphw_ctrl *ctrl;
+ struct mmp_path *path;
+ u32 path_config;
+ u32 link_config;
+};
+
+/* mmp ctrl describes mmp controller related info */
+struct mmphw_ctrl {
+ /* platform related, get from config */
+ const char *name;
+ int irq;
+ void *reg_base;
+ struct clk *clk;
+
+ /* sys info */
+ struct device *dev;
+
+ /* state */
+ int open_count;
+ int status;
+ struct mutex access_ok;
+
+ /*pathes*/
+ int path_num;
+ struct mmphw_path_plat path_plats[0];
+};
+
+static inline int overlay_is_vid(struct mmp_overlay *overlay)
+{
+ return overlay->dmafetch_id & 1;
+}
+
+static inline struct mmphw_path_plat *path_to_path_plat(struct mmp_path *path)
+{
+ return (struct mmphw_path_plat *)path->plat_data;
+}
+
+static inline struct mmphw_ctrl *path_to_ctrl(struct mmp_path *path)
+{
+ return path_to_path_plat(path)->ctrl;
+}
+
+static inline struct mmphw_ctrl *overlay_to_ctrl(struct mmp_overlay *overlay)
+{
+ return path_to_ctrl(overlay->path);
+}
+
+static inline void *ctrl_regs(struct mmp_path *path)
+{
+ return path_to_ctrl(path)->reg_base;
+}
+
+/* path regs, for regs symmetrical for both pathes */
+static inline struct lcd_regs *path_regs(struct mmp_path *path)
+{
+ if (path->id == PATH_PN)
+ return (struct lcd_regs *)(ctrl_regs(path) + 0xc0);
+ else if (path->id == PATH_TV)
+ return (struct lcd_regs *)ctrl_regs(path);
+ else if (path->id == PATH_P2)
+ return (struct lcd_regs *)(ctrl_regs(path) + 0x200);
+ else {
+ dev_err(path->dev, "path id %d invalid\n", path->id);
+ BUG_ON(1);
+ return NULL;
+ }
+}
+
+#ifdef CONFIG_MMP_DISP_SPI
+extern int lcd_spi_register(struct mmphw_ctrl *ctrl);
+#endif
+#endif /* _MMP_CTRL_H_ */
diff --git a/drivers/video/mmp/hw/mmp_spi.c b/drivers/video/mmp/hw/mmp_spi.c
new file mode 100644
index 000000000000..e62ca7bf0d5e
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_spi.c
@@ -0,0 +1,180 @@
+/*
+ * linux/drivers/video/mmp/hw/mmp_spi.c
+ * using the spi in LCD controler for commands send
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include "mmp_ctrl.h"
+
+/**
+ * spi_write - write command to the SPI port
+ * @data: can be 8/16/32-bit, MSB justified data to write.
+ * @len: data length.
+ *
+ * Wait bus transfer complete IRQ.
+ * The caller is expected to perform the necessary locking.
+ *
+ * Returns:
+ * %-ETIMEDOUT timeout occurred
+ * 0 success
+ */
+static inline int lcd_spi_write(struct spi_device *spi, u32 data)
+{
+ int timeout = 100000, isr, ret = 0;
+ u32 tmp;
+ void *reg_base =
+ *(void **)spi_master_get_devdata(spi->master);
+
+ /* clear ISR */
+ writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
+
+ switch (spi->bits_per_word) {
+ case 8:
+ writel_relaxed((u8)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ case 16:
+ writel_relaxed((u16)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ case 32:
+ writel_relaxed((u32)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ default:
+ dev_err(&spi->dev, "Wrong spi bit length\n");
+ }
+
+ /* SPI start to send command */
+ tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
+ tmp &= ~CFG_SPI_START_MASK;
+ tmp |= CFG_SPI_START(1);
+ writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
+ while (!(isr & SPI_IRQ_ENA_MASK)) {
+ udelay(100);
+ isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
+ if (!--timeout) {
+ ret = -ETIMEDOUT;
+ dev_err(&spi->dev, "spi cmd send time out\n");
+ break;
+ }
+ }
+
+ tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
+ tmp &= ~CFG_SPI_START_MASK;
+ tmp |= CFG_SPI_START(0);
+ writel_relaxed(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
+
+ return ret;
+}
+
+static int lcd_spi_setup(struct spi_device *spi)
+{
+ void *reg_base =
+ *(void **)spi_master_get_devdata(spi->master);
+ u32 tmp;
+
+ tmp = CFG_SCLKCNT(16) |
+ CFG_TXBITS(spi->bits_per_word) |
+ CFG_SPI_SEL(1) | CFG_SPI_ENA(1) |
+ CFG_SPI_3W4WB(1);
+ writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ /*
+ * After set mode it need a time to pull up the spi singals,
+ * or it would cause the wrong waveform when send spi command,
+ * especially on pxa910h
+ */
+ tmp = readl_relaxed(reg_base + SPU_IOPAD_CONTROL);
+ if ((tmp & CFG_IOPADMODE_MASK) != IOPAD_DUMB18SPI)
+ writel_relaxed(IOPAD_DUMB18SPI |
+ (tmp & ~CFG_IOPADMODE_MASK),
+ reg_base + SPU_IOPAD_CONTROL);
+ udelay(20);
+ return 0;
+}
+
+static int lcd_spi_one_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_transfer *t;
+ int i;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ switch (spi->bits_per_word) {
+ case 8:
+ for (i = 0; i < t->len; i++)
+ lcd_spi_write(spi, ((u8 *)t->tx_buf)[i]);
+ break;
+ case 16:
+ for (i = 0; i < t->len/2; i++)
+ lcd_spi_write(spi, ((u16 *)t->tx_buf)[i]);
+ break;
+ case 32:
+ for (i = 0; i < t->len/4; i++)
+ lcd_spi_write(spi, ((u32 *)t->tx_buf)[i]);
+ break;
+ default:
+ dev_err(&spi->dev, "Wrong spi bit length\n");
+ }
+ }
+
+ m->status = 0;
+ if (m->complete)
+ m->complete(m->context);
+ return 0;
+}
+
+int lcd_spi_register(struct mmphw_ctrl *ctrl)
+{
+ struct spi_master *master;
+ void **p_regbase;
+ int err;
+
+ master = spi_alloc_master(ctrl->dev, sizeof(void *));
+ if (!master) {
+ dev_err(ctrl->dev, "unable to allocate SPI master\n");
+ return -ENOMEM;
+ }
+ p_regbase = spi_master_get_devdata(master);
+ *p_regbase = ctrl->reg_base;
+
+ /* set bus num to 5 to avoid conflict with other spi hosts */
+ master->bus_num = 5;
+ master->num_chipselect = 1;
+ master->setup = lcd_spi_setup;
+ master->transfer = lcd_spi_one_transfer;
+
+ err = spi_register_master(master);
+ if (err < 0) {
+ dev_err(ctrl->dev, "unable to register SPI master\n");
+ spi_master_put(master);
+ return err;
+ }
+
+ dev_info(&master->dev, "registered\n");
+
+ return 0;
+}
diff --git a/drivers/video/mmp/panel/Kconfig b/drivers/video/mmp/panel/Kconfig
new file mode 100644
index 000000000000..4b2c4f457b11
--- /dev/null
+++ b/drivers/video/mmp/panel/Kconfig
@@ -0,0 +1,6 @@
+config MMP_PANEL_TPOHVGA
+ bool "tpohvga panel TJ032MD01BW support"
+ depends on SPI_MASTER
+ default n
+ help
+ tpohvga panel support
diff --git a/drivers/video/mmp/panel/Makefile b/drivers/video/mmp/panel/Makefile
new file mode 100644
index 000000000000..2f91611c7e5e
--- /dev/null
+++ b/drivers/video/mmp/panel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MMP_PANEL_TPOHVGA) += tpo_tj032md01bw.o
diff --git a/drivers/video/mmp/panel/tpo_tj032md01bw.c b/drivers/video/mmp/panel/tpo_tj032md01bw.c
new file mode 100644
index 000000000000..998978b08f5e
--- /dev/null
+++ b/drivers/video/mmp/panel/tpo_tj032md01bw.c
@@ -0,0 +1,186 @@
+/*
+ * linux/drivers/video/mmp/panel/tpo_tj032md01bw.c
+ * active panel using spi interface to do init
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <video/mmp_disp.h>
+
+static u16 init[] = {
+ 0x0801,
+ 0x0800,
+ 0x0200,
+ 0x0304,
+ 0x040e,
+ 0x0903,
+ 0x0b18,
+ 0x0c53,
+ 0x0d01,
+ 0x0ee0,
+ 0x0f01,
+ 0x1058,
+ 0x201e,
+ 0x210a,
+ 0x220a,
+ 0x231e,
+ 0x2400,
+ 0x2532,
+ 0x2600,
+ 0x27ac,
+ 0x2904,
+ 0x2aa2,
+ 0x2b45,
+ 0x2c45,
+ 0x2d15,
+ 0x2e5a,
+ 0x2fff,
+ 0x306b,
+ 0x310d,
+ 0x3248,
+ 0x3382,
+ 0x34bd,
+ 0x35e7,
+ 0x3618,
+ 0x3794,
+ 0x3801,
+ 0x395d,
+ 0x3aae,
+ 0x3bff,
+ 0x07c9,
+};
+
+static u16 poweroff[] = {
+ 0x07d9,
+};
+
+struct tpohvga_plat_data {
+ void (*plat_onoff)(int status);
+ struct spi_device *spi;
+};
+
+static void tpohvga_onoff(struct mmp_panel *panel, int status)
+{
+ struct tpohvga_plat_data *plat = panel->plat_data;
+ int ret;
+
+ if (status) {
+ plat->plat_onoff(1);
+
+ ret = spi_write(plat->spi, init, sizeof(init));
+ if (ret < 0)
+ dev_warn(panel->dev, "init cmd failed(%d)\n", ret);
+ } else {
+ ret = spi_write(plat->spi, poweroff, sizeof(poweroff));
+ if (ret < 0)
+ dev_warn(panel->dev, "poweroff cmd failed(%d)\n", ret);
+
+ plat->plat_onoff(0);
+ }
+}
+
+static struct mmp_mode mmp_modes_tpohvga[] = {
+ [0] = {
+ .pixclock_freq = 10394400,
+ .refresh = 60,
+ .xres = 320,
+ .yres = 480,
+ .hsync_len = 10,
+ .left_margin = 15,
+ .right_margin = 10,
+ .vsync_len = 2,
+ .upper_margin = 4,
+ .lower_margin = 2,
+ .invert_pixclock = 1,
+ .pix_fmt_out = PIXFMT_RGB565,
+ },
+};
+
+static int tpohvga_get_modelist(struct mmp_panel *panel,
+ struct mmp_mode **modelist)
+{
+ *modelist = mmp_modes_tpohvga;
+ return 1;
+}
+
+static struct mmp_panel panel_tpohvga = {
+ .name = "tpohvga",
+ .panel_type = PANELTYPE_ACTIVE,
+ .get_modelist = tpohvga_get_modelist,
+ .set_onoff = tpohvga_onoff,
+};
+
+static int tpohvga_probe(struct spi_device *spi)
+{
+ struct mmp_mach_panel_info *mi;
+ int ret;
+ struct tpohvga_plat_data *plat_data;
+
+ /* get configs from platform data */
+ mi = spi->dev.platform_data;
+ if (mi == NULL) {
+ dev_err(&spi->dev, "%s: no platform data defined\n", __func__);
+ return -EINVAL;
+ }
+
+ /* setup spi related info */
+ spi->bits_per_word = 16;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed %d", ret);
+ return ret;
+ }
+
+ plat_data = kzalloc(sizeof(*plat_data), GFP_KERNEL);
+ if (plat_data == NULL)
+ return -ENOMEM;
+
+ plat_data->spi = spi;
+ plat_data->plat_onoff = mi->plat_set_onoff;
+ panel_tpohvga.plat_data = plat_data;
+ panel_tpohvga.plat_path_name = mi->plat_path_name;
+ panel_tpohvga.dev = &spi->dev;
+
+ mmp_register_panel(&panel_tpohvga);
+
+ return 0;
+}
+
+static struct spi_driver panel_tpohvga_driver = {
+ .driver = {
+ .name = "tpo-hvga",
+ .owner = THIS_MODULE,
+ },
+ .probe = tpohvga_probe,
+};
+module_spi_driver(panel_tpohvga_driver);
+
+MODULE_AUTHOR("Lisa Du<cldu@marvell.com>");
+MODULE_DESCRIPTION("Panel driver for tpohvga");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index 35ac9e8bee63..e0f8011a3c4b 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -417,7 +417,7 @@ static void mddi_resume(struct msm_mddi_client_data *cdata)
mddi_set_auto_hibernate(&mddi->client_data, 1);
}
-static int __devinit mddi_get_client_caps(struct mddi_info *mddi)
+static int mddi_get_client_caps(struct mddi_info *mddi)
{
int i, j;
@@ -619,9 +619,8 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg)
static struct mddi_info mddi_info[2];
-static int __devinit mddi_clk_setup(struct platform_device *pdev,
- struct mddi_info *mddi,
- unsigned long clk_rate)
+static int mddi_clk_setup(struct platform_device *pdev, struct mddi_info *mddi,
+ unsigned long clk_rate)
{
int ret;
@@ -664,7 +663,7 @@ static int __init mddi_rev_data_setup(struct mddi_info *mddi)
return 0;
}
-static int __devinit mddi_probe(struct platform_device *pdev)
+static int mddi_probe(struct platform_device *pdev)
{
struct msm_mddi_platform_data *pdata = pdev->dev.platform_data;
struct mddi_info *mddi = &mddi_info[pdev->id];
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 736887208574..cfdb380ec81e 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1306,7 +1306,7 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
dma_free_writecombine(fbi->device, fbi->fix.smem_len,
fbi->screen_base, fbi->fix.smem_start);
- fbi->screen_base = 0;
+ fbi->screen_base = NULL;
mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 49619b441500..755556ca5b2d 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -369,7 +369,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
loop--;
}
- writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
+ reg = readl(host->base + LCDC_VDCTRL4);
+ writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
clk_disable_unprepare(host->clk);
@@ -586,7 +587,7 @@ static struct fb_ops mxsfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
+static int mxsfb_restore_mode(struct mxsfb_info *host)
{
struct fb_info *fb_info = &host->fb_info;
unsigned line_count;
@@ -677,7 +678,7 @@ static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
return 0;
}
-static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host)
+static int mxsfb_init_fbinfo(struct mxsfb_info *host)
{
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
@@ -739,7 +740,7 @@ static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host)
return 0;
}
-static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
+static void mxsfb_free_videomem(struct mxsfb_info *host)
{
struct fb_info *fb_info = &host->fb_info;
@@ -772,7 +773,7 @@ static const struct of_device_id mxsfb_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
-static int __devinit mxsfb_probe(struct platform_device *pdev)
+static int mxsfb_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxsfb_dt_ids, &pdev->dev);
@@ -912,7 +913,7 @@ error_alloc_info:
return ret;
}
-static int __devexit mxsfb_remove(struct platform_device *pdev)
+static int mxsfb_remove(struct platform_device *pdev)
{
struct fb_info *fb_info = platform_get_drvdata(pdev);
struct mxsfb_info *host = to_imxfb_host(fb_info);
@@ -949,7 +950,7 @@ static void mxsfb_shutdown(struct platform_device *pdev)
static struct platform_driver mxsfb_driver = {
.probe = mxsfb_probe,
- .remove = __devexit_p(mxsfb_remove),
+ .remove = mxsfb_remove,
.shutdown = mxsfb_shutdown,
.id_table = mxsfb_devtype,
.driver = {
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index afc9521173ef..7ef079c146e7 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -88,7 +88,7 @@ static bool external;
static bool libretto;
static bool nostretch;
static bool nopciburst;
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
#ifdef MODULE
@@ -1632,7 +1632,7 @@ static struct fb_ops neofb_ops = {
/* --------------------------------------------------------------------- */
-static struct fb_videomode __devinitdata mode800x480 = {
+static struct fb_videomode mode800x480 = {
.xres = 800,
.yres = 480,
.pixclock = 25000,
@@ -1646,8 +1646,7 @@ static struct fb_videomode __devinitdata mode800x480 = {
.vmode = FB_VMODE_NONINTERLACED
};
-static int __devinit neo_map_mmio(struct fb_info *info,
- struct pci_dev *dev)
+static int neo_map_mmio(struct fb_info *info, struct pci_dev *dev)
{
struct neofb_par *par = info->par;
@@ -1707,8 +1706,8 @@ static void neo_unmap_mmio(struct fb_info *info)
info->fix.mmio_len);
}
-static int __devinit neo_map_video(struct fb_info *info,
- struct pci_dev *dev, int video_len)
+static int neo_map_video(struct fb_info *info, struct pci_dev *dev,
+ int video_len)
{
//unsigned long addr;
@@ -1772,7 +1771,7 @@ static void neo_unmap_video(struct fb_info *info)
info->fix.smem_len);
}
-static int __devinit neo_scan_monitor(struct fb_info *info)
+static int neo_scan_monitor(struct fb_info *info)
{
struct neofb_par *par = info->par;
unsigned char type, display;
@@ -1851,7 +1850,7 @@ static int __devinit neo_scan_monitor(struct fb_info *info)
return 0;
}
-static int __devinit neo_init_hw(struct fb_info *info)
+static int neo_init_hw(struct fb_info *info)
{
struct neofb_par *par = info->par;
int videoRam = 896;
@@ -1939,8 +1938,8 @@ static int __devinit neo_init_hw(struct fb_info *info)
}
-static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const struct
- pci_device_id *id)
+static struct fb_info *neo_alloc_fb_info(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct fb_info *info;
struct neofb_par *par;
@@ -2038,8 +2037,7 @@ static void neo_free_fb_info(struct fb_info *info)
/* --------------------------------------------------------------------- */
-static int __devinit neofb_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct fb_info *info;
u_int h_sync, v_sync;
@@ -2128,7 +2126,7 @@ err_map_mmio:
return err;
}
-static void __devexit neofb_remove(struct pci_dev *dev)
+static void neofb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -2194,7 +2192,7 @@ static struct pci_driver neofb_driver = {
.name = "neofb",
.id_table = neofb_devices,
.probe = neofb_probe,
- .remove = __devexit_p(neofb_remove)
+ .remove = neofb_remove,
};
/* ************************* init in-kernel code ************************** */
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 475dfee82c4a..32581c72ad09 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -387,7 +387,7 @@ static int nuc900fb_init_registers(struct fb_info *info)
* The buffer should be a non-cached, non-buffered, memory region
* to allow palette and pixel writes without flushing the cache.
*/
-static int __devinit nuc900fb_map_video_memory(struct fb_info *info)
+static int nuc900fb_map_video_memory(struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
dma_addr_t map_dma;
@@ -499,7 +499,7 @@ static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *info)
static char driver_name[] = "nuc900fb";
-static int __devinit nuc900fb_probe(struct platform_device *pdev)
+static int nuc900fb_probe(struct platform_device *pdev)
{
struct nuc900fb_info *fbi;
struct nuc900fb_display *display;
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index fe13ac567d54..ff228713425e 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -70,34 +70,34 @@ static struct pci_device_id nvidiafb_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, nvidiafb_pci_tbl);
/* command line data, set in nvidiafb_setup() */
-static int flatpanel __devinitdata = -1; /* Autodetect later */
-static int fpdither __devinitdata = -1;
-static int forceCRTC __devinitdata = -1;
-static int hwcur __devinitdata = 0;
-static int noaccel __devinitdata = 0;
-static int noscale __devinitdata = 0;
-static int paneltweak __devinitdata = 0;
-static int vram __devinitdata = 0;
-static int bpp __devinitdata = 8;
-static int reverse_i2c __devinitdata;
+static int flatpanel = -1; /* Autodetect later */
+static int fpdither = -1;
+static int forceCRTC = -1;
+static int hwcur = 0;
+static int noaccel = 0;
+static int noscale = 0;
+static int paneltweak = 0;
+static int vram = 0;
+static int bpp = 8;
+static int reverse_i2c;
#ifdef CONFIG_MTRR
-static bool nomtrr __devinitdata = false;
+static bool nomtrr = false;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight __devinitdata = 1;
+static int backlight = 1;
#else
-static int backlight __devinitdata = 0;
+static int backlight = 0;
#endif
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
-static struct fb_fix_screeninfo __devinitdata nvidiafb_fix = {
+static struct fb_fix_screeninfo nvidiafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.xpanstep = 8,
.ypanstep = 1,
};
-static struct fb_var_screeninfo __devinitdata nvidiafb_default_var = {
+static struct fb_var_screeninfo nvidiafb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -1105,7 +1105,7 @@ fail:
#define nvidiafb_resume NULL
#endif
-static int __devinit nvidia_set_fbinfo(struct fb_info *info)
+static int nvidia_set_fbinfo(struct fb_info *info)
{
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
@@ -1201,7 +1201,7 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 __devinit nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct fb_info *info)
{
struct nvidia_par *par = info->par;
u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
@@ -1224,7 +1224,7 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
return id;
}
-static u32 __devinit nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(struct fb_info *info)
{
struct nvidia_par *par = info->par;
u32 arch = 0;
@@ -1276,8 +1276,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
return arch;
}
-static int __devinit nvidiafb_probe(struct pci_dev *pd,
- const struct pci_device_id *ent)
+static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
{
struct nvidia_par *par;
struct fb_info *info;
@@ -1438,7 +1437,7 @@ err_out:
return -ENODEV;
}
-static void __devexit nvidiafb_remove(struct pci_dev *pd)
+static void nvidiafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct nvidia_par *par = info->par;
@@ -1473,7 +1472,7 @@ static void __devexit nvidiafb_remove(struct pci_dev *pd)
* ------------------------------------------------------------------------- */
#ifndef MODULE
-static int __devinit nvidiafb_setup(char *options)
+static int nvidiafb_setup(char *options)
{
char *this_opt;
@@ -1529,7 +1528,7 @@ static struct pci_driver nvidiafb_driver = {
.probe = nvidiafb_probe,
.suspend = nvidiafb_suspend,
.resume = nvidiafb_resume,
- .remove = __devexit_p(nvidiafb_remove),
+ .remove = nvidiafb_remove,
};
/* ------------------------------------------------------------------------- *
@@ -1538,7 +1537,7 @@ static struct pci_driver nvidiafb_driver = {
*
* ------------------------------------------------------------------------- */
-static int __devinit nvidiafb_init(void)
+static int nvidiafb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index b48f95f0dfe2..e512581300fc 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -1,5 +1,5 @@
config FB_OMAP
- tristate "OMAP frame buffer support (EXPERIMENTAL)"
+ tristate "OMAP frame buffer support"
depends on FB
depends on ARCH_OMAP1
select FB_CFB_FILLRECT
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index b739600c51ac..803fee618d57 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -606,7 +606,7 @@ static struct spi_driver mipid_spi_driver = {
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
- .remove = __devexit_p(mipid_spi_remove),
+ .remove = mipid_spi_remove,
};
module_spi_driver(mipid_spi_driver);
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
index d877c361abda..b07b2b042e7e 100644
--- a/drivers/video/omap2/Kconfig
+++ b/drivers/video/omap2/Kconfig
@@ -1,9 +1,10 @@
-config OMAP2_VRAM
- bool
-
config OMAP2_VRFB
bool
+if ARCH_OMAP2PLUS
+
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
source "drivers/video/omap2/displays/Kconfig"
+
+endif
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index 5ddef129f798..5ea7cb9aed17 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_OMAP2_VRAM) += vram.o
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-$(CONFIG_OMAP2_DSS) += dss/
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index c835aa70f96f..72699f88c002 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -710,27 +710,6 @@ static void acx_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int acx_panel_suspend(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "%s\n", __func__);
- acx_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int acx_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- dev_dbg(&dssdev->dev, "%s\n", __func__);
- r = acx_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return 0;
-}
-
static void acx_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -752,8 +731,6 @@ static struct omap_dss_driver acx_panel_driver = {
.enable = acx_panel_enable,
.disable = acx_panel_disable,
- .suspend = acx_panel_suspend,
- .resume = acx_panel_resume,
.set_timings = acx_panel_set_timings,
.check_timings = acx_panel_check_timings,
@@ -800,7 +777,7 @@ static struct spi_driver acx565akm_spi_driver = {
.owner = THIS_MODULE,
},
.probe = acx565akm_spi_probe,
- .remove = __devexit_p(acx565akm_spi_remove),
+ .remove = acx565akm_spi_remove,
};
module_spi_driver(acx565akm_spi_driver);
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 88295c526815..c904f42d81c1 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -291,30 +291,6 @@ static struct panel_config generic_dpi_panels[] = {
.name = "h4",
},
- /* Unknown panel used in Samsung OMAP2 Apollon */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 6250,
-
- .hsw = 41,
- .hfp = 2,
- .hbp = 2,
-
- .vsw = 10,
- .vfp = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "apollon",
- },
/* FocalTech ETM070003DH6 */
{
{
@@ -688,40 +664,6 @@ static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&drv_data->lock);
}
-static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&drv_data->lock);
-
- generic_dpi_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&drv_data->lock);
-
- return 0;
-}
-
-static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&drv_data->lock);
-
- r = generic_dpi_panel_power_on(dssdev);
- if (r)
- goto err;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&drv_data->lock);
-
- return r;
-}
-
static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -769,8 +711,6 @@ static struct omap_dss_driver dpi_driver = {
.enable = generic_dpi_panel_enable,
.disable = generic_dpi_panel_disable,
- .suspend = generic_dpi_panel_suspend,
- .resume = generic_dpi_panel_resume,
.set_timings = generic_dpi_panel_set_timings,
.get_timings = generic_dpi_panel_get_timings,
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 90c1cabf244e..6e5abe8fd2dd 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -143,46 +143,12 @@ static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ld->lock);
}
-static int lb035q02_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&ld->lock);
-
- lb035q02_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ld->lock);
- return 0;
-}
-
-static int lb035q02_panel_resume(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&ld->lock);
-
- r = lb035q02_panel_power_on(dssdev);
- if (r)
- goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ld->lock);
- return 0;
-err:
- mutex_unlock(&ld->lock);
- return r;
-}
-
static struct omap_dss_driver lb035q02_driver = {
.probe = lb035q02_panel_probe,
.remove = lb035q02_panel_remove,
.enable = lb035q02_panel_enable,
.disable = lb035q02_panel_disable,
- .suspend = lb035q02_panel_suspend,
- .resume = lb035q02_panel_resume,
.driver = {
.name = "lgphilips_lb035q02_panel",
@@ -250,13 +216,13 @@ static void init_lb035q02_panel(struct spi_device *spi)
lb035q02_write_reg(spi, 0x3b, 0x0806);
}
-static int __devinit lb035q02_panel_spi_probe(struct spi_device *spi)
+static int lb035q02_panel_spi_probe(struct spi_device *spi)
{
init_lb035q02_panel(spi);
return omap_dss_register_driver(&lb035q02_driver);
}
-static int __devexit lb035q02_panel_spi_remove(struct spi_device *spi)
+static int lb035q02_panel_spi_remove(struct spi_device *spi)
{
omap_dss_unregister_driver(&lb035q02_driver);
return 0;
@@ -268,7 +234,7 @@ static struct spi_driver lb035q02_spi_driver = {
.owner = THIS_MODULE,
},
.probe = lb035q02_panel_spi_probe,
- .remove = __devexit_p(lb035q02_panel_spi_remove),
+ .remove = lb035q02_panel_spi_remove,
};
module_spi_driver(lb035q02_spi_driver);
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index 3fc5ad081a21..dd1294750802 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -574,54 +574,6 @@ static void n8x0_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
}
-static int n8x0_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
-
- dev_dbg(&dssdev->dev, "suspend\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- n8x0_panel_power_off(dssdev);
-
- rfbi_bus_unlock();
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static int n8x0_panel_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- int r;
-
- dev_dbg(&dssdev->dev, "resume\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- r = n8x0_panel_power_on(dssdev);
-
- rfbi_bus_unlock();
-
- if (r) {
- mutex_unlock(&ddata->lock);
- return r;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -683,8 +635,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
.enable = n8x0_panel_enable,
.disable = n8x0_panel_disable,
- .suspend = n8x0_panel_suspend,
- .resume = n8x0_panel_resume,
.update = n8x0_panel_update,
.sync = n8x0_panel_sync,
@@ -702,18 +652,25 @@ static struct omap_dss_driver n8x0_panel_driver = {
static int mipid_spi_probe(struct spi_device *spi)
{
+ int r;
+
dev_dbg(&spi->dev, "mipid_spi_probe\n");
spi->mode = SPI_MODE_0;
s_drv_data.spidev = spi;
- return 0;
+ r = omap_dss_register_driver(&n8x0_panel_driver);
+ if (r)
+ pr_err("n8x0_panel: dss driver registration failed\n");
+
+ return r;
}
static int mipid_spi_remove(struct spi_device *spi)
{
dev_dbg(&spi->dev, "mipid_spi_remove\n");
+ omap_dss_unregister_driver(&n8x0_panel_driver);
return 0;
}
@@ -723,36 +680,8 @@ static struct spi_driver mipid_spi_driver = {
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
- .remove = __devexit_p(mipid_spi_remove),
+ .remove = mipid_spi_remove,
};
+module_spi_driver(mipid_spi_driver);
-static int __init n8x0_panel_drv_init(void)
-{
- int r;
-
- r = spi_register_driver(&mipid_spi_driver);
- if (r) {
- pr_err("n8x0_panel: spi driver registration failed\n");
- return r;
- }
-
- r = omap_dss_register_driver(&n8x0_panel_driver);
- if (r) {
- pr_err("n8x0_panel: dss driver registration failed\n");
- spi_unregister_driver(&mipid_spi_driver);
- return r;
- }
-
- return 0;
-}
-
-static void __exit n8x0_panel_drv_exit(void)
-{
- spi_unregister_driver(&mipid_spi_driver);
-
- omap_dss_unregister_driver(&n8x0_panel_driver);
-}
-
-module_init(n8x0_panel_drv_init);
-module_exit(n8x0_panel_drv_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 908fd268f3dc..c4e9c2b1b465 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -236,28 +236,6 @@ static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
-{
- nec_8048_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- r = nec_8048_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
{
return 16;
@@ -268,8 +246,6 @@ static struct omap_dss_driver nec_8048_driver = {
.remove = nec_8048_panel_remove,
.enable = nec_8048_panel_enable,
.disable = nec_8048_panel_disable,
- .suspend = nec_8048_panel_suspend,
- .resume = nec_8048_panel_resume,
.get_recommended_bpp = nec_8048_recommended_bpp,
.driver = {
@@ -347,7 +323,7 @@ static int nec_8048_spi_resume(struct spi_device *spi)
static struct spi_driver nec_8048_spi_driver = {
.probe = nec_8048_spi_probe,
- .remove = __devexit_p(nec_8048_spi_remove),
+ .remove = nec_8048_spi_remove,
.suspend = nec_8048_spi_suspend,
.resume = nec_8048_spi_resume,
.driver = {
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
index 9df87640ddd2..1b94018aac3e 100644
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -50,6 +50,7 @@ struct picodlp_i2c_data {
static struct i2c_device_id picodlp_i2c_id[] = {
{ "picodlp_i2c_driver", 0 },
+ { }
};
struct picodlp_i2c_command {
@@ -503,47 +504,6 @@ static void picodlp_panel_disable(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "disabling picodlp panel\n");
}
-static int picodlp_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&picod->lock);
- /* Turn off DLP Power */
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- mutex_unlock(&picod->lock);
- dev_err(&dssdev->dev, "unable to suspend picodlp panel,"
- " panel is not ACTIVE\n");
- return -EINVAL;
- }
-
- picodlp_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- mutex_unlock(&picod->lock);
-
- dev_dbg(&dssdev->dev, "suspending picodlp panel\n");
- return 0;
-}
-
-static int picodlp_panel_resume(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&picod->lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- mutex_unlock(&picod->lock);
- dev_err(&dssdev->dev, "unable to resume picodlp panel,"
- " panel is not ACTIVE\n");
- return -EINVAL;
- }
-
- r = picodlp_panel_power_on(dssdev);
- mutex_unlock(&picod->lock);
- dev_dbg(&dssdev->dev, "resuming picodlp panel\n");
- return r;
-}
-
static void picodlp_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -560,9 +520,6 @@ static struct omap_dss_driver picodlp_driver = {
.get_resolution = picodlp_get_resolution,
- .suspend = picodlp_panel_suspend,
- .resume = picodlp_panel_resume,
-
.driver = {
.name = "picodlp_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 1ec3b277ff15..cada8c621e01 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -194,29 +194,12 @@ static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
-{
- sharp_ls_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
- r = sharp_ls_power_on(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return r;
-}
-
static struct omap_dss_driver sharp_ls_driver = {
.probe = sharp_ls_panel_probe,
.remove = __exit_p(sharp_ls_panel_remove),
.enable = sharp_ls_panel_enable,
.disable = sharp_ls_panel_disable,
- .suspend = sharp_ls_panel_suspend,
- .resume = sharp_ls_panel_resume,
.driver = {
.name = "sharp_ls_panel",
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index f2f644680ca8..a32407a5735a 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -1245,76 +1245,6 @@ static void taal_disable(struct omap_dss_device *dssdev)
mutex_unlock(&td->lock);
}
-static int taal_suspend(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- dev_dbg(&dssdev->dev, "suspend\n");
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- taal_cancel_ulps_work(dssdev);
- taal_cancel_esd_work(dssdev);
-
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (!r)
- taal_power_off(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&td->lock);
-
- return 0;
-err:
- mutex_unlock(&td->lock);
- return r;
-}
-
-static int taal_resume(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- dev_dbg(&dssdev->dev, "resume\n");
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- dsi_bus_lock(dssdev);
-
- r = taal_power_on(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- if (r) {
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- } else {
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- taal_queue_esd_work(dssdev);
- }
-
- mutex_unlock(&td->lock);
-
- return r;
-err:
- mutex_unlock(&td->lock);
- return r;
-}
-
static void taal_framedone_cb(int err, void *data)
{
struct omap_dss_device *dssdev = data;
@@ -1818,8 +1748,6 @@ static struct omap_dss_driver taal_driver = {
.enable = taal_enable,
.disable = taal_disable,
- .suspend = taal_suspend,
- .resume = taal_resume,
.update = taal_update,
.sync = taal_sync,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 383811cf8648..8281baafe1ef 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -189,37 +189,6 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
}
-static int tfp410_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&ddata->lock);
-
- tfp410_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static int tfp410_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&ddata->lock);
-
- r = tfp410_power_on(dssdev);
- if (r == 0)
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
static void tfp410_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -355,8 +324,6 @@ static struct omap_dss_driver tfp410_driver = {
.enable = tfp410_enable,
.disable = tfp410_disable,
- .suspend = tfp410_suspend,
- .resume = tfp410_resume,
.set_timings = tfp410_set_timings,
.get_timings = tfp410_get_timings,
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index b5e6dbc59f0a..6b6643911d29 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -401,24 +401,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int tpo_td043_suspend(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "suspend\n");
-
- tpo_td043_disable_dss(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int tpo_td043_resume(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "resume\n");
-
- return tpo_td043_enable_dss(dssdev);
-}
-
static int tpo_td043_probe(struct omap_dss_device *dssdev)
{
struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
@@ -500,8 +482,6 @@ static struct omap_dss_driver tpo_td043_driver = {
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .suspend = tpo_td043_suspend,
- .resume = tpo_td043_resume,
.set_mirror = tpo_td043_set_hmirror,
.get_mirror = tpo_td043_get_hmirror,
@@ -548,7 +528,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit tpo_td043_spi_remove(struct spi_device *spi)
+static int tpo_td043_spi_remove(struct spi_device *spi)
{
struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&spi->dev);
@@ -600,7 +580,7 @@ static struct spi_driver tpo_td043_spi_driver = {
.pm = &tpo_td043_spi_pm,
},
.probe = tpo_td043_spi_probe,
- .remove = __devexit_p(tpo_td043_spi_remove),
+ .remove = tpo_td043_spi_remove,
};
module_spi_driver(tpo_td043_spi_driver);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 80f5390aa136..cb0f145c7077 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,33 +1,30 @@
menuconfig OMAP2_DSS
tristate "OMAP2+ Display Subsystem support"
- depends on ARCH_OMAP2PLUS
help
OMAP2+ Display Subsystem support.
if OMAP2_DSS
-config OMAP2_VRAM_SIZE
- int "VRAM size (MB)"
- range 0 32
- default 0
+config OMAP2_DSS_DEBUG
+ bool "Debug support"
+ default n
help
- The amount of SDRAM to reserve at boot time for video RAM use.
- This VRAM will be used by omapfb and other drivers that need
- large continuous RAM area for video use.
+ This enables printing of debug messages. Alternatively, debug messages
+ can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+ appropriate flags in <debugfs>/dynamic_debug/control.
- You can also set this with "vram=<bytes>" kernel argument, or
- in the board file.
-
-config OMAP2_DSS_DEBUG_SUPPORT
- bool "Debug support"
- default y
+config OMAP2_DSS_DEBUGFS
+ bool "Debugfs filesystem support"
+ depends on DEBUG_FS
+ default n
help
- This enables debug messages. You need to enable printing
- with 'debug' module parameter.
+ This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+ querying about clock configuration and register configuration of dss,
+ dispc, dsi, hdmi and rfbi.
config OMAP2_DSS_COLLECT_IRQ_STATS
bool "Collect DSS IRQ statistics"
- depends on OMAP2_DSS_DEBUG_SUPPORT
+ depends on OMAP2_DSS_DEBUGFS
default n
help
Collect DSS IRQ statistics, printable via debugfs.
@@ -62,7 +59,6 @@ config OMAP2_DSS_VENC
config OMAP4_DSS_HDMI
bool "HDMI support"
- depends on ARCH_OMAP4
default y
help
HDMI Interface. This adds the High Definition Multimedia Interface.
@@ -70,11 +66,9 @@ config OMAP4_DSS_HDMI
config OMAP4_DSS_HDMI_AUDIO
bool
- depends on OMAP4_DSS_HDMI
config OMAP2_DSS_SDI
bool "SDI support"
- depends on ARCH_OMAP3
default n
help
SDI (Serial Display Interface) support.
@@ -84,7 +78,6 @@ config OMAP2_DSS_SDI
config OMAP2_DSS_DSI
bool "DSI support"
- depends on ARCH_OMAP3 || ARCH_OMAP4 || ARCH_OMAP5
default n
help
MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 4549869bfe1a..61949ff7940c 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,6 +1,10 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
+# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
- manager.o manager-sysfs.o overlay.o overlay-sysfs.o output.o apply.o
+ output.o
+# DSS compat layer files
+omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
+ dispc-compat.o display-sysfs.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o venc_panel.o
@@ -8,3 +12,4 @@ omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
hdmi_panel.o ti_hdmi_4xxx_ip.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index 19d66f471b4b..d446bdfc4c82 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -18,6 +18,7 @@
#define DSS_SUBSYS_NAME "APPLY"
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
@@ -26,6 +27,7 @@
#include "dss.h"
#include "dss_features.h"
+#include "dispc-compat.h"
/*
* We have 4 levels of cache for the dispc settings. First two are in SW and
@@ -70,7 +72,6 @@ struct ovl_priv_data {
bool shadow_extra_info_dirty;
bool enabled;
- enum omap_channel channel;
u32 fifo_low, fifo_high;
/*
@@ -105,6 +106,9 @@ struct mgr_priv_data {
struct omap_video_timings timings;
struct dss_lcd_mgr_config lcd_config;
+
+ void (*framedone_handler)(void *);
+ void *framedone_handler_data;
};
static struct {
@@ -132,7 +136,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
return &dss_data.mgr_priv_data_array[mgr->id];
}
-void dss_apply_init(void)
+static void apply_init_priv(void)
{
const int num_ovls = dss_feat_get_num_ovls();
struct mgr_priv_data *mp;
@@ -414,11 +418,46 @@ static void wait_pending_extra_info_updates(void)
r = wait_for_completion_timeout(&extra_updated_completion, t);
if (r == 0)
DSSWARN("timeout in wait_pending_extra_info_updates\n");
- else if (r < 0)
- DSSERR("wait_pending_extra_info_updates failed: %d\n", r);
}
-int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
+{
+ return ovl->manager ?
+ (ovl->manager->output ? ovl->manager->output->device : NULL) :
+ NULL;
+}
+
+static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
+{
+ return mgr->output ? mgr->output->device : NULL;
+}
+
+static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct omap_dss_device *dssdev = mgr->get_device(mgr);
+ u32 irq;
+ int r;
+
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+ irq = DISPC_IRQ_EVSYNC_ODD;
+ else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+ irq = DISPC_IRQ_EVSYNC_EVEN;
+ else
+ irq = dispc_mgr_get_vsync_irq(mgr->id);
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+
+ dispc_runtime_put();
+
+ return r;
+}
+
+static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -488,7 +527,7 @@ int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
return r;
}
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
{
unsigned long timeout = msecs_to_jiffies(500);
struct ovl_priv_data *op;
@@ -573,7 +612,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
struct mgr_priv_data *mp;
int r;
- DSSDBGF("%d", ovl->id);
+ DSSDBG("writing ovl %d regs", ovl->id);
if (!op->enabled || !op->info_dirty)
return;
@@ -608,7 +647,7 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct mgr_priv_data *mp;
- DSSDBGF("%d", ovl->id);
+ DSSDBG("writing ovl %d regs extra", ovl->id);
if (!op->extra_info_dirty)
return;
@@ -617,7 +656,6 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
* disabled */
dispc_ovl_enable(ovl->id, op->enabled);
- dispc_ovl_set_channel_out(ovl->id, op->channel);
dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
mp = get_mgr_priv(ovl->manager);
@@ -632,7 +670,7 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
struct mgr_priv_data *mp = get_mgr_priv(mgr);
struct omap_overlay *ovl;
- DSSDBGF("%d", mgr->id);
+ DSSDBG("writing mgr %d regs", mgr->id);
if (!mp->enabled)
return;
@@ -658,7 +696,7 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
- DSSDBGF("%d", mgr->id);
+ DSSDBG("writing mgr %d regs extra", mgr->id);
if (!mp->extra_info_dirty)
return;
@@ -666,22 +704,8 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
dispc_mgr_set_timings(mgr->id, &mp->timings);
/* lcd_config parameters */
- if (dss_mgr_is_lcd(mgr->id)) {
- dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode);
-
- dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode);
- dispc_mgr_enable_fifohandcheck(mgr->id,
- mp->lcd_config.fifohandcheck);
-
- dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info);
-
- dispc_mgr_set_tft_data_lines(mgr->id,
- mp->lcd_config.video_port_width);
-
- dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity);
-
- dispc_mgr_set_lcd_type_tft(mgr->id);
- }
+ if (dss_mgr_is_lcd(mgr->id))
+ dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
mp->extra_info_dirty = false;
if (mp->updating)
@@ -761,7 +785,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
}
}
-void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -786,9 +810,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
if (!dss_data.irq_enabled && need_isr())
dss_register_vsync_isr();
- dispc_mgr_enable(mgr->id, true);
-
- mgr_clear_shadow_dirty(mgr);
+ dispc_mgr_enable_sync(mgr->id);
spin_unlock_irqrestore(&data_lock, flags);
}
@@ -845,7 +867,6 @@ static void dss_apply_irq_handler(void *data, u32 mask)
for (i = 0; i < num_mgrs; i++) {
struct omap_overlay_manager *mgr;
struct mgr_priv_data *mp;
- bool was_updating;
mgr = omap_dss_get_overlay_manager(i);
mp = get_mgr_priv(mgr);
@@ -853,7 +874,6 @@ static void dss_apply_irq_handler(void *data, u32 mask)
if (!mp->enabled)
continue;
- was_updating = mp->updating;
mp->updating = dispc_mgr_is_enabled(i);
if (!mgr_manual_update(mgr)) {
@@ -872,6 +892,21 @@ static void dss_apply_irq_handler(void *data, u32 mask)
if (!extra_updating)
complete_all(&extra_updated_completion);
+ /* call framedone handlers for manual update displays */
+ for (i = 0; i < num_mgrs; i++) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mgr_manual_update(mgr) || !mp->framedone_handler)
+ continue;
+
+ if (mask & dispc_mgr_get_framedone_irq(i))
+ mp->framedone_handler(mp->framedone_handler_data);
+ }
+
if (!need_isr())
dss_unregister_vsync_isr();
@@ -906,7 +941,7 @@ static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
mp->info = mp->user_info;
}
-int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
{
unsigned long flags;
struct omap_overlay *ovl;
@@ -1005,7 +1040,7 @@ static void dss_setup_fifos(void)
}
}
-int dss_mgr_enable(struct omap_overlay_manager *mgr)
+static int dss_mgr_enable_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -1035,10 +1070,13 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
if (!mgr_manual_update(mgr))
mp->updating = true;
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
spin_unlock_irqrestore(&data_lock, flags);
if (!mgr_manual_update(mgr))
- dispc_mgr_enable(mgr->id, true);
+ dispc_mgr_enable_sync(mgr->id);
out:
mutex_unlock(&apply_lock);
@@ -1052,7 +1090,7 @@ err:
return r;
}
-void dss_mgr_disable(struct omap_overlay_manager *mgr)
+static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -1063,7 +1101,7 @@ void dss_mgr_disable(struct omap_overlay_manager *mgr)
goto out;
if (!mgr_manual_update(mgr))
- dispc_mgr_enable(mgr->id, false);
+ dispc_mgr_disable_sync(mgr->id);
spin_lock_irqsave(&data_lock, flags);
@@ -1076,7 +1114,7 @@ out:
mutex_unlock(&apply_lock);
}
-int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+static int dss_mgr_set_info(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1097,7 +1135,7 @@ int dss_mgr_set_info(struct omap_overlay_manager *mgr,
return 0;
}
-void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+static void dss_mgr_get_info(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1110,7 +1148,7 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_mgr_set_output(struct omap_overlay_manager *mgr,
+static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
struct omap_dss_output *output)
{
int r;
@@ -1142,7 +1180,7 @@ err:
return r;
}
-int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
+static int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
{
int r;
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1189,7 +1227,7 @@ static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
mp->extra_info_dirty = true;
}
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+static void dss_mgr_set_timings_compat(struct omap_overlay_manager *mgr,
const struct omap_video_timings *timings)
{
unsigned long flags;
@@ -1217,7 +1255,7 @@ static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
mp->extra_info_dirty = true;
}
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+static void dss_mgr_set_lcd_config_compat(struct omap_overlay_manager *mgr,
const struct dss_lcd_mgr_config *config)
{
unsigned long flags;
@@ -1236,7 +1274,7 @@ out:
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_ovl_set_info(struct omap_overlay *ovl,
+static int dss_ovl_set_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1257,7 +1295,7 @@ int dss_ovl_set_info(struct omap_overlay *ovl,
return 0;
}
-void dss_ovl_get_info(struct omap_overlay *ovl,
+static void dss_ovl_get_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1270,7 +1308,7 @@ void dss_ovl_get_info(struct omap_overlay *ovl,
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_ovl_set_manager(struct omap_overlay *ovl,
+static int dss_ovl_set_manager(struct omap_overlay *ovl,
struct omap_overlay_manager *mgr)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1289,45 +1327,40 @@ int dss_ovl_set_manager(struct omap_overlay *ovl,
goto err;
}
+ r = dispc_runtime_get();
+ if (r)
+ goto err;
+
spin_lock_irqsave(&data_lock, flags);
if (op->enabled) {
spin_unlock_irqrestore(&data_lock, flags);
DSSERR("overlay has to be disabled to change the manager\n");
r = -EINVAL;
- goto err;
+ goto err1;
}
- op->channel = mgr->id;
- op->extra_info_dirty = true;
+ dispc_ovl_set_channel_out(ovl->id, mgr->id);
ovl->manager = mgr;
list_add_tail(&ovl->list, &mgr->overlays);
spin_unlock_irqrestore(&data_lock, flags);
- /* XXX: When there is an overlay on a DSI manual update display, and
- * the overlay is first disabled, then moved to tv, and enabled, we
- * seem to get SYNC_LOST_DIGIT error.
- *
- * Waiting doesn't seem to help, but updating the manual update display
- * after disabling the overlay seems to fix this. This hints that the
- * overlay is perhaps somehow tied to the LCD output until the output
- * is updated.
- *
- * Userspace workaround for this is to update the LCD after disabling
- * the overlay, but before moving the overlay to TV.
- */
+ dispc_runtime_put();
mutex_unlock(&apply_lock);
return 0;
+
+err1:
+ dispc_runtime_put();
err:
mutex_unlock(&apply_lock);
return r;
}
-int dss_ovl_unset_manager(struct omap_overlay *ovl)
+static int dss_ovl_unset_manager(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1355,9 +1388,24 @@ int dss_ovl_unset_manager(struct omap_overlay *ovl)
/* wait for pending extra_info updates to ensure the ovl is disabled */
wait_pending_extra_info_updates();
+ /*
+ * For a manual update display, there is no guarantee that the overlay
+ * is really disabled in HW, we may need an extra update from this
+ * manager before the configurations can go in. Return an error if the
+ * overlay needed an update from the manager.
+ *
+ * TODO: Instead of returning an error, try to do a dummy manager update
+ * here to disable the overlay in hardware. Use the *GATED fields in
+ * the DISPC_CONFIG registers to do a dummy update.
+ */
spin_lock_irqsave(&data_lock, flags);
- op->channel = -1;
+ if (ovl_manual_update(ovl) && op->extra_info_dirty) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("need an update to change the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
ovl->manager = NULL;
list_del(&ovl->list);
@@ -1372,7 +1420,7 @@ err:
return r;
}
-bool dss_ovl_is_enabled(struct omap_overlay *ovl)
+static bool dss_ovl_is_enabled(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1387,7 +1435,7 @@ bool dss_ovl_is_enabled(struct omap_overlay *ovl)
return e;
}
-int dss_ovl_enable(struct omap_overlay *ovl)
+static int dss_ovl_enable(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1437,7 +1485,7 @@ err1:
return r;
}
-int dss_ovl_disable(struct omap_overlay *ovl)
+static int dss_ovl_disable(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1472,3 +1520,152 @@ err:
return r;
}
+static int dss_mgr_register_framedone_handler_compat(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ if (mp->framedone_handler)
+ return -EBUSY;
+
+ mp->framedone_handler = handler;
+ mp->framedone_handler_data = data;
+
+ return 0;
+}
+
+static void dss_mgr_unregister_framedone_handler_compat(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ WARN_ON(mp->framedone_handler != handler ||
+ mp->framedone_handler_data != data);
+
+ mp->framedone_handler = NULL;
+ mp->framedone_handler_data = NULL;
+}
+
+static const struct dss_mgr_ops apply_mgr_ops = {
+ .start_update = dss_mgr_start_update_compat,
+ .enable = dss_mgr_enable_compat,
+ .disable = dss_mgr_disable_compat,
+ .set_timings = dss_mgr_set_timings_compat,
+ .set_lcd_config = dss_mgr_set_lcd_config_compat,
+ .register_framedone_handler = dss_mgr_register_framedone_handler_compat,
+ .unregister_framedone_handler = dss_mgr_unregister_framedone_handler_compat,
+};
+
+static int compat_refcnt;
+static DEFINE_MUTEX(compat_init_lock);
+
+int omapdss_compat_init(void)
+{
+ struct platform_device *pdev = dss_get_core_pdev();
+ struct omap_dss_device *dssdev = NULL;
+ int i, r;
+
+ mutex_lock(&compat_init_lock);
+
+ if (compat_refcnt++ > 0)
+ goto out;
+
+ apply_init_priv();
+
+ dss_init_overlay_managers(pdev);
+ dss_init_overlays(pdev);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ struct omap_overlay_manager *mgr;
+
+ mgr = omap_dss_get_overlay_manager(i);
+
+ mgr->set_output = &dss_mgr_set_output;
+ mgr->unset_output = &dss_mgr_unset_output;
+ mgr->apply = &omap_dss_mgr_apply;
+ mgr->set_manager_info = &dss_mgr_set_info;
+ mgr->get_manager_info = &dss_mgr_get_info;
+ mgr->wait_for_go = &dss_mgr_wait_for_go;
+ mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
+ mgr->get_device = &dss_mgr_get_device;
+ }
+
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+
+ ovl->is_enabled = &dss_ovl_is_enabled;
+ ovl->enable = &dss_ovl_enable;
+ ovl->disable = &dss_ovl_disable;
+ ovl->set_manager = &dss_ovl_set_manager;
+ ovl->unset_manager = &dss_ovl_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_info;
+ ovl->get_overlay_info = &dss_ovl_get_info;
+ ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
+ ovl->get_device = &dss_ovl_get_device;
+ }
+
+ r = dss_install_mgr_ops(&apply_mgr_ops);
+ if (r)
+ goto err_mgr_ops;
+
+ for_each_dss_dev(dssdev) {
+ r = display_init_sysfs(pdev, dssdev);
+ /* XXX uninit sysfs files on error */
+ if (r)
+ goto err_disp_sysfs;
+ }
+
+ dispc_runtime_get();
+
+ r = dss_dispc_initialize_irq();
+ if (r)
+ goto err_init_irq;
+
+ dispc_runtime_put();
+
+out:
+ mutex_unlock(&compat_init_lock);
+
+ return 0;
+
+err_init_irq:
+ dispc_runtime_put();
+
+err_disp_sysfs:
+ dss_uninstall_mgr_ops();
+
+err_mgr_ops:
+ dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlays(pdev);
+
+ compat_refcnt--;
+
+ mutex_unlock(&compat_init_lock);
+
+ return r;
+}
+EXPORT_SYMBOL(omapdss_compat_init);
+
+void omapdss_compat_uninit(void)
+{
+ struct platform_device *pdev = dss_get_core_pdev();
+ struct omap_dss_device *dssdev = NULL;
+
+ mutex_lock(&compat_init_lock);
+
+ if (--compat_refcnt > 0)
+ goto out;
+
+ dss_dispc_uninitialize_irq();
+
+ for_each_dss_dev(dssdev)
+ display_uninit_sysfs(pdev, dssdev);
+
+ dss_uninstall_mgr_ops();
+
+ dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlays(pdev);
+out:
+ mutex_unlock(&compat_init_lock);
+}
+EXPORT_SYMBOL(omapdss_compat_uninit);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index d94ef9e31a35..f8779d4750ba 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -53,15 +53,23 @@ static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
-#ifdef DEBUG
-bool dss_debug;
-module_param_named(debug, dss_debug, bool, 0644);
-#endif
-
-const char *dss_get_default_display_name(void)
+const char *omapdss_get_default_display_name(void)
{
return core.default_display_name;
}
+EXPORT_SYMBOL(omapdss_get_default_display_name);
+
+enum omapdss_version omapdss_get_version(void)
+{
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+ return pdata->version;
+}
+EXPORT_SYMBOL(omapdss_get_version);
+
+struct platform_device *dss_get_core_pdev(void)
+{
+ return core.pdev;
+}
/* REGULATORS */
@@ -93,21 +101,6 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
-int dss_get_ctx_loss_count(struct device *dev)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
- int cnt;
-
- if (!board_data->get_context_loss_count)
- return -ENOENT;
-
- cnt = board_data->get_context_loss_count(dev);
-
- WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
- return cnt;
-}
-
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
@@ -122,7 +115,7 @@ void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
- if (!board_data->dsi_enable_pads)
+ if (!board_data->dsi_disable_pads)
return;
return board_data->dsi_disable_pads(dsi_id, lane_mask);
@@ -138,7 +131,7 @@ int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
return 0;
}
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
@@ -193,7 +186,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
return 0;
}
-#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+#else /* CONFIG_OMAP2_DSS_DEBUGFS */
static inline int dss_initialize_debugfs(void)
{
return 0;
@@ -205,7 +198,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
return 0;
}
-#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
/* PLATFORM DEVICE */
static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
@@ -237,12 +230,7 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.pdev = pdev;
- dss_features_init(pdata->version);
-
- dss_apply_init();
-
- dss_init_overlay_managers(pdev);
- dss_init_overlays(pdev);
+ dss_features_init(omapdss_get_version());
r = dss_initialize_debugfs();
if (r)
@@ -268,9 +256,6 @@ static int omap_dss_remove(struct platform_device *pdev)
dss_uninitialize_debugfs();
- dss_uninit_overlays(pdev);
- dss_uninit_overlay_managers(pdev);
-
return 0;
}
@@ -358,15 +343,10 @@ static int dss_driver_probe(struct device *dev)
dev_name(dev), dssdev->driver_name,
dssdrv->driver.name);
- r = dss_init_device(core.pdev, dssdev);
- if (r)
- return r;
-
r = dssdrv->probe(dssdev);
if (r) {
DSSERR("driver probe failed: %d\n", r);
- dss_uninit_device(core.pdev, dssdev);
return r;
}
@@ -387,8 +367,6 @@ static int dss_driver_remove(struct device *dev)
dssdrv->remove(dssdev);
- dss_uninit_device(core.pdev, dssdev);
-
dssdev->driver = NULL;
return 0;
@@ -507,6 +485,9 @@ static int __init omap_dss_bus_register(void)
/* INIT */
static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_init_platform_driver,
+#endif
#ifdef CONFIG_OMAP2_DSS_DPI
dpi_init_platform_driver,
#endif
@@ -519,15 +500,15 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
#ifdef CONFIG_OMAP2_DSS_VENC
venc_init_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_init_platform_driver,
-#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
hdmi_init_platform_driver,
#endif
};
static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_uninit_platform_driver,
+#endif
#ifdef CONFIG_OMAP2_DSS_DPI
dpi_uninit_platform_driver,
#endif
@@ -540,9 +521,6 @@ static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
#ifdef CONFIG_OMAP2_DSS_VENC
venc_uninit_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_uninit_platform_driver,
-#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
hdmi_uninit_platform_driver,
#endif
diff --git a/drivers/video/omap2/dss/dispc-compat.c b/drivers/video/omap2/dss/dispc-compat.c
new file mode 100644
index 000000000000..928884c9a0a9
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc-compat.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "APPLY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "dss_features.h"
+#include "dispc-compat.h"
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_OCP_ERR | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+ DISPC_IRQ_SYNC_LOST | \
+ DISPC_IRQ_SYNC_LOST_DIGIT)
+
+#define DISPC_MAX_NR_ISRS 8
+
+struct omap_dispc_isr_data {
+ omap_dispc_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+struct dispc_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned irqs[32];
+};
+
+static struct {
+ spinlock_t irq_lock;
+ u32 irq_error_mask;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+ u32 error_irqs;
+ struct work_struct error_work;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dispc_irq_stats irq_stats;
+#endif
+} dispc_compat;
+
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static void dispc_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dispc_irq_stats stats;
+
+ spin_lock_irqsave(&dispc_compat.irq_stats_lock, flags);
+
+ stats = dispc_compat.irq_stats;
+ memset(&dispc_compat.irq_stats, 0, sizeof(dispc_compat.irq_stats));
+ dispc_compat.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dispc_compat.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+
+ PIS(FRAMEDONE);
+ PIS(VSYNC);
+ PIS(EVSYNC_EVEN);
+ PIS(EVSYNC_ODD);
+ PIS(ACBIAS_COUNT_STAT);
+ PIS(PROG_LINE_NUM);
+ PIS(GFX_FIFO_UNDERFLOW);
+ PIS(GFX_END_WIN);
+ PIS(PAL_GAMMA_MASK);
+ PIS(OCP_ERR);
+ PIS(VID1_FIFO_UNDERFLOW);
+ PIS(VID1_END_WIN);
+ PIS(VID2_FIFO_UNDERFLOW);
+ PIS(VID2_END_WIN);
+ if (dss_feat_get_num_ovls() > 3) {
+ PIS(VID3_FIFO_UNDERFLOW);
+ PIS(VID3_END_WIN);
+ }
+ PIS(SYNC_LOST);
+ PIS(SYNC_LOST_DIGIT);
+ PIS(WAKEUP);
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ PIS(FRAMEDONE2);
+ PIS(VSYNC2);
+ PIS(ACBIAS_COUNT_STAT2);
+ PIS(SYNC_LOST2);
+ }
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ PIS(FRAMEDONE3);
+ PIS(VSYNC3);
+ PIS(ACBIAS_COUNT_STAT3);
+ PIS(SYNC_LOST3);
+ }
+#undef PIS
+}
+#endif
+
+/* dispc.irq_lock has to be locked by the caller */
+static void _omap_dispc_set_irqs(void)
+{
+ u32 mask;
+ int i;
+ struct omap_dispc_isr_data *isr_data;
+
+ mask = dispc_compat.irq_error_mask;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+
+ if (isr_data->isr == NULL)
+ continue;
+
+ mask |= isr_data->mask;
+ }
+
+ dispc_write_irqenable(mask);
+}
+
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct omap_dispc_isr_data *isr_data;
+
+ if (isr == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+
+ /* check for duplicate entry */
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+ if (isr_data->isr == isr && isr_data->arg == arg &&
+ isr_data->mask == mask) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ isr_data = NULL;
+ ret = -EBUSY;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+
+ if (isr_data->isr != NULL)
+ continue;
+
+ isr_data->isr = isr;
+ isr_data->arg = arg;
+ isr_data->mask = mask;
+ ret = 0;
+
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return 0;
+err:
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_register_isr);
+
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ unsigned long flags;
+ int ret = -EINVAL;
+ struct omap_dispc_isr_data *isr_data;
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+ if (isr_data->isr != isr || isr_data->arg != arg ||
+ isr_data->mask != mask)
+ continue;
+
+ /* found the correct isr */
+
+ isr_data->isr = NULL;
+ isr_data->arg = NULL;
+ isr_data->mask = 0;
+
+ ret = 0;
+ break;
+ }
+
+ if (ret == 0)
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_unregister_isr);
+
+static void print_irq_status(u32 status)
+{
+ if ((status & dispc_compat.irq_error_mask) == 0)
+ return;
+
+#define PIS(x) (status & DISPC_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DISPC IRQ: 0x%x: %s%s%s%s%s%s%s%s%s\n",
+ status,
+ PIS(OCP_ERR),
+ PIS(GFX_FIFO_UNDERFLOW),
+ PIS(VID1_FIFO_UNDERFLOW),
+ PIS(VID2_FIFO_UNDERFLOW),
+ dss_feat_get_num_ovls() > 3 ? PIS(VID3_FIFO_UNDERFLOW) : "",
+ PIS(SYNC_LOST),
+ PIS(SYNC_LOST_DIGIT),
+ dss_has_feature(FEAT_MGR_LCD2) ? PIS(SYNC_LOST2) : "",
+ dss_has_feature(FEAT_MGR_LCD3) ? PIS(SYNC_LOST3) : "");
+#undef PIS
+}
+
+/* Called from dss.c. Note that we don't touch clocks here,
+ * but we presume they are on because we got an IRQ. However,
+ * an irq handler may turn the clocks off, so we may not have
+ * clock later in the function. */
+static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
+{
+ int i;
+ u32 irqstatus, irqenable;
+ u32 handledirqs = 0;
+ u32 unhandled_errors;
+ struct omap_dispc_isr_data *isr_data;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+
+ spin_lock(&dispc_compat.irq_lock);
+
+ irqstatus = dispc_read_irqstatus();
+ irqenable = dispc_read_irqenable();
+
+ /* IRQ is not for us */
+ if (!(irqstatus & irqenable)) {
+ spin_unlock(&dispc_compat.irq_lock);
+ return IRQ_NONE;
+ }
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dispc_compat.irq_stats_lock);
+ dispc_compat.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs);
+ spin_unlock(&dispc_compat.irq_stats_lock);
+#endif
+
+ print_irq_status(irqstatus);
+
+ /* Ack the interrupt. Do it here before clocks are possibly turned
+ * off */
+ dispc_clear_irqstatus(irqstatus);
+ /* flush posted write */
+ dispc_read_irqstatus();
+
+ /* make a copy and unlock, so that isrs can unregister
+ * themselves */
+ memcpy(registered_isr, dispc_compat.registered_isr,
+ sizeof(registered_isr));
+
+ spin_unlock(&dispc_compat.irq_lock);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &registered_isr[i];
+
+ if (!isr_data->isr)
+ continue;
+
+ if (isr_data->mask & irqstatus) {
+ isr_data->isr(isr_data->arg, irqstatus);
+ handledirqs |= isr_data->mask;
+ }
+ }
+
+ spin_lock(&dispc_compat.irq_lock);
+
+ unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask;
+
+ if (unhandled_errors) {
+ dispc_compat.error_irqs |= unhandled_errors;
+
+ dispc_compat.irq_error_mask &= ~unhandled_errors;
+ _omap_dispc_set_irqs();
+
+ schedule_work(&dispc_compat.error_work);
+ }
+
+ spin_unlock(&dispc_compat.irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void dispc_error_worker(struct work_struct *work)
+{
+ int i;
+ u32 errors;
+ unsigned long flags;
+ static const unsigned fifo_underflow_bits[] = {
+ DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ };
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+ errors = dispc_compat.error_irqs;
+ dispc_compat.error_irqs = 0;
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ dispc_runtime_get();
+
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ unsigned bit;
+
+ ovl = omap_dss_get_overlay(i);
+ bit = fifo_underflow_bits[i];
+
+ if (bit & errors) {
+ DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
+ ovl->name);
+ dispc_ovl_enable(ovl->id, false);
+ dispc_mgr_go(ovl->manager->id);
+ msleep(50);
+ }
+ }
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ unsigned bit;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ bit = dispc_mgr_get_sync_lost_irq(i);
+
+ if (bit & errors) {
+ int j;
+
+ DSSERR("SYNC_LOST on channel %s, restarting the output "
+ "with video overlays disabled\n",
+ mgr->name);
+
+ dss_mgr_disable(mgr);
+
+ for (j = 0; j < omap_dss_get_num_overlays(); ++j) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(j);
+
+ if (ovl->id != OMAP_DSS_GFX &&
+ ovl->manager == mgr)
+ ovl->disable(ovl);
+ }
+
+ dss_mgr_enable(mgr);
+ }
+ }
+
+ if (errors & DISPC_IRQ_OCP_ERR) {
+ DSSERR("OCP_ERR\n");
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ dss_mgr_disable(mgr);
+ }
+ }
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+ dispc_compat.irq_error_mask |= errors;
+ _omap_dispc_set_irqs();
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ dispc_runtime_put();
+}
+
+int dss_dispc_initialize_irq(void)
+{
+ int r;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc_compat.irq_stats_lock);
+ dispc_compat.irq_stats.last_reset = jiffies;
+ dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
+
+ spin_lock_init(&dispc_compat.irq_lock);
+
+ memset(dispc_compat.registered_isr, 0,
+ sizeof(dispc_compat.registered_isr));
+
+ dispc_compat.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
+ if (dss_feat_get_num_ovls() > 3)
+ dispc_compat.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
+
+ /*
+ * there's SYNC_LOST_DIGIT waiting after enabling the DSS,
+ * so clear it
+ */
+ dispc_clear_irqstatus(dispc_read_irqstatus());
+
+ INIT_WORK(&dispc_compat.error_work, dispc_error_worker);
+
+ _omap_dispc_set_irqs();
+
+ r = dispc_request_irq(omap_dispc_irq_handler, &dispc_compat);
+ if (r) {
+ DSSERR("dispc_request_irq failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+void dss_dispc_uninitialize_irq(void)
+{
+ dispc_free_irq(&dispc_compat);
+}
+
+static void dispc_mgr_disable_isr(void *data, u32 mask)
+{
+ struct completion *compl = data;
+ complete(compl);
+}
+
+static void dispc_mgr_enable_lcd_out(enum omap_channel channel)
+{
+ dispc_mgr_enable(channel, true);
+}
+
+static void dispc_mgr_disable_lcd_out(enum omap_channel channel)
+{
+ DECLARE_COMPLETION_ONSTACK(framedone_compl);
+ int r;
+ u32 irq;
+
+ if (dispc_mgr_is_enabled(channel) == false)
+ return;
+
+ /*
+ * When we disable LCD output, we need to wait for FRAMEDONE to know
+ * that DISPC has finished with the LCD output.
+ */
+
+ irq = dispc_mgr_get_framedone_irq(channel);
+
+ r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq);
+ if (r)
+ DSSERR("failed to register FRAMEDONE isr\n");
+
+ dispc_mgr_enable(channel, false);
+
+ /* if we couldn't register for framedone, just sleep and exit */
+ if (r) {
+ msleep(100);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&framedone_compl,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for FRAME DONE\n");
+
+ r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq);
+ if (r)
+ DSSERR("failed to unregister FRAMEDONE isr\n");
+}
+
+static void dispc_digit_out_enable_isr(void *data, u32 mask)
+{
+ struct completion *compl = data;
+
+ /* ignore any sync lost interrupts */
+ if (mask & (DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD))
+ complete(compl);
+}
+
+static void dispc_mgr_enable_digit_out(void)
+{
+ DECLARE_COMPLETION_ONSTACK(vsync_compl);
+ int r;
+ u32 irq_mask;
+
+ if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == true)
+ return;
+
+ /*
+ * Digit output produces some sync lost interrupts during the first
+ * frame when enabling. Those need to be ignored, so we register for the
+ * sync lost irq to prevent the error handler from triggering.
+ */
+
+ irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT) |
+ dispc_mgr_get_sync_lost_irq(OMAP_DSS_CHANNEL_DIGIT);
+
+ r = omap_dispc_register_isr(dispc_digit_out_enable_isr, &vsync_compl,
+ irq_mask);
+ if (r) {
+ DSSERR("failed to register %x isr\n", irq_mask);
+ return;
+ }
+
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, true);
+
+ /* wait for the first evsync */
+ if (!wait_for_completion_timeout(&vsync_compl, msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for digit out to start\n");
+
+ r = omap_dispc_unregister_isr(dispc_digit_out_enable_isr, &vsync_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to unregister %x isr\n", irq_mask);
+}
+
+static void dispc_mgr_disable_digit_out(void)
+{
+ DECLARE_COMPLETION_ONSTACK(framedone_compl);
+ int r, i;
+ u32 irq_mask;
+ int num_irqs;
+
+ if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == false)
+ return;
+
+ /*
+ * When we disable the digit output, we need to wait for FRAMEDONE to
+ * know that DISPC has finished with the output.
+ */
+
+ irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
+ num_irqs = 1;
+
+ if (!irq_mask) {
+ /*
+ * omap 2/3 don't have framedone irq for TV, so we need to use
+ * vsyncs for this.
+ */
+
+ irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
+ /*
+ * We need to wait for both even and odd vsyncs. Note that this
+ * is not totally reliable, as we could get a vsync interrupt
+ * before we disable the output, which leads to timeout in the
+ * wait_for_completion.
+ */
+ num_irqs = 2;
+ }
+
+ r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to register %x isr\n", irq_mask);
+
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);
+
+ /* if we couldn't register the irq, just sleep and exit */
+ if (r) {
+ msleep(100);
+ return;
+ }
+
+ for (i = 0; i < num_irqs; ++i) {
+ if (!wait_for_completion_timeout(&framedone_compl,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for digit out to stop\n");
+ }
+
+ r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to unregister %x isr\n", irq_mask);
+}
+
+void dispc_mgr_enable_sync(enum omap_channel channel)
+{
+ if (dss_mgr_is_lcd(channel))
+ dispc_mgr_enable_lcd_out(channel);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ dispc_mgr_enable_digit_out();
+ else
+ WARN_ON(1);
+}
+
+void dispc_mgr_disable_sync(enum omap_channel channel)
+{
+ if (dss_mgr_is_lcd(channel))
+ dispc_mgr_disable_lcd_out(channel);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ dispc_mgr_disable_digit_out();
+ else
+ WARN_ON(1);
+}
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout)
+{
+ void dispc_irq_wait_handler(void *data, u32 mask)
+ {
+ complete((struct completion *)data);
+ }
+
+ int r;
+ DECLARE_COMPLETION_ONSTACK(completion);
+
+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+ irqmask);
+
+ if (r)
+ return r;
+
+ timeout = wait_for_completion_interruptible_timeout(&completion,
+ timeout);
+
+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ if (timeout == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/dispc-compat.h b/drivers/video/omap2/dss/dispc-compat.h
new file mode 100644
index 000000000000..14a69b3d4fb0
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc-compat.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP2_DSS_DISPC_COMPAT_H
+#define __OMAP2_DSS_DISPC_COMPAT_H
+
+void dispc_mgr_enable_sync(enum omap_channel channel);
+void dispc_mgr_disable_sync(enum omap_channel channel);
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout);
+
+int dss_dispc_initialize_irq(void);
+void dss_dispc_uninitialize_irq(void);
+
+#endif
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index a5ab354f267a..05ff2b91d9e8 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,9 +33,9 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
@@ -46,21 +46,6 @@
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
-#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
- DISPC_IRQ_OCP_ERR | \
- DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
- DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
- DISPC_IRQ_SYNC_LOST | \
- DISPC_IRQ_SYNC_LOST_DIGIT)
-
-#define DISPC_MAX_NR_ISRS 8
-
-struct omap_dispc_isr_data {
- omap_dispc_isr_t isr;
- void *arg;
- u32 mask;
-};
-
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
@@ -73,12 +58,6 @@ enum omap_burst_size {
#define REG_FLD_MOD(idx, val, start, end) \
dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
-struct dispc_irq_stats {
- unsigned long last_reset;
- unsigned irq_count;
- unsigned irqs[32];
-};
-
struct dispc_features {
u8 sw_start;
u8 fp_start;
@@ -86,19 +65,26 @@ struct dispc_features {
u16 sw_max;
u16 vp_max;
u16 hp_max;
- int (*calc_scaling) (enum omap_plane plane,
+ u8 mgr_width_start;
+ u8 mgr_height_start;
+ u16 mgr_width_max;
+ u16 mgr_height_max;
+ int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem);
- unsigned long (*calc_core_clk) (enum omap_plane plane,
+ unsigned long (*calc_core_clk) (unsigned long pclk,
u16 width, u16 height, u16 out_width, u16 out_height,
bool mem_to_mem);
u8 num_fifos;
/* swap GFX & WB fifos */
bool gfx_fifo_workaround:1;
+
+ /* no DISPC_IRQ_FRAMEDONETV on this SoC */
+ bool no_framedone_tv:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -110,27 +96,15 @@ static struct {
int ctx_loss_cnt;
int irq;
- struct clk *dss_clk;
u32 fifo_size[DISPC_MAX_NR_FIFOS];
/* maps which plane is using a fifo. fifo-id -> plane-id */
int fifo_assignment[DISPC_MAX_NR_FIFOS];
- spinlock_t irq_lock;
- u32 irq_error_mask;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
- u32 error_irqs;
- struct work_struct error_work;
-
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
const struct dispc_features *feat;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dispc_irq_stats irq_stats;
-#endif
} dispc;
enum omap_color_component {
@@ -186,7 +160,7 @@ static const struct {
[OMAP_DSS_CHANNEL_DIGIT] = {
.name = "DIGIT",
.vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
- .framedone_irq = 0,
+ .framedone_irq = DISPC_IRQ_FRAMEDONETV,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
@@ -241,7 +215,6 @@ struct color_conv_coef {
int full_range;
};
-static void _omap_dispc_set_irqs(void);
static unsigned long dispc_plane_pclk_rate(enum omap_plane plane);
static unsigned long dispc_plane_lclk_rate(enum omap_plane plane);
@@ -374,7 +347,7 @@ static void dispc_save_context(void)
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
- dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
+ dispc.ctx_loss_cnt = dss_get_ctx_loss_count();
dispc.ctx_valid = true;
DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -389,7 +362,7 @@ static void dispc_restore_context(void)
if (!dispc.ctx_valid)
return;
- ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
+ ctx = dss_get_ctx_loss_count();
if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
return;
@@ -496,7 +469,7 @@ static void dispc_restore_context(void)
if (dss_has_feature(FEAT_MGR_LCD3))
RR(CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
- dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
+ dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
@@ -520,6 +493,7 @@ int dispc_runtime_get(void)
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
+EXPORT_SYMBOL(dispc_runtime_get);
void dispc_runtime_put(void)
{
@@ -530,16 +504,28 @@ void dispc_runtime_put(void)
r = pm_runtime_put_sync(&dispc.pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
+EXPORT_SYMBOL(dispc_runtime_put);
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
+EXPORT_SYMBOL(dispc_mgr_get_vsync_irq);
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
{
+ if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc.feat->no_framedone_tv)
+ return 0;
+
return mgr_desc[channel].framedone_irq;
}
+EXPORT_SYMBOL(dispc_mgr_get_framedone_irq);
+
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
+{
+ return mgr_desc[channel].sync_lost_irq;
+}
+EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq);
u32 dispc_wb_get_framedone_irq(void)
{
@@ -550,28 +536,18 @@ bool dispc_mgr_go_busy(enum omap_channel channel)
{
return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
}
+EXPORT_SYMBOL(dispc_mgr_go_busy);
void dispc_mgr_go(enum omap_channel channel)
{
- bool enable_bit, go_bit;
-
- /* if the channel is not enabled, we don't need GO */
- enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1;
-
- if (!enable_bit)
- return;
-
- go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
-
- if (go_bit) {
- DSSERR("GO bit not down for channel %d\n", channel);
- return;
- }
+ WARN_ON(dispc_mgr_is_enabled(channel) == false);
+ WARN_ON(dispc_mgr_go_busy(channel));
DSSDBG("GO %s\n", mgr_desc[channel].name);
mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
}
+EXPORT_SYMBOL(dispc_mgr_go);
bool dispc_wb_go_busy(void)
{
@@ -975,6 +951,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
}
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
+EXPORT_SYMBOL(dispc_ovl_set_channel_out);
static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
{
@@ -1040,7 +1017,7 @@ static void dispc_configure_burst_sizes(void)
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i)
dispc_ovl_set_burst_size(i, burst_size);
}
@@ -1074,7 +1051,7 @@ static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
}
static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- struct omap_dss_cpr_coefs *coefs)
+ const struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1122,7 +1099,9 @@ static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
{
u32 val;
- val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ val = FLD_VAL(height - 1, dispc.feat->mgr_height_start, 16) |
+ FLD_VAL(width - 1, dispc.feat->mgr_width_start, 0);
+
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
}
@@ -1244,7 +1223,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(i);
} else {
total_fifo_size = ovl_fifo_size;
@@ -1989,16 +1968,14 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* This function is used to avoid synclosts in OMAP3, because of some
* undocumented horizontal position and timing related limitations.
*/
-static int check_horiz_timing_omap3(enum omap_plane plane,
+static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *t, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height)
{
- int DS = DIV_ROUND_UP(height, out_height);
+ const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
static const u8 limits[3] = { 8, 10, 20 };
u64 val, blank;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
- unsigned long lclk = dispc_plane_lclk_rate(plane);
int i;
nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
@@ -2020,8 +1997,8 @@ static int check_horiz_timing_omap3(enum omap_plane plane,
*/
val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
- val, max(0, DS - 2) * width);
- if (val < max(0, DS - 2) * width)
+ val, max(0, ds - 2) * width);
+ if (val < max(0, ds - 2) * width)
return -EINVAL;
/*
@@ -2031,21 +2008,20 @@ static int check_horiz_timing_omap3(enum omap_plane plane,
*/
val = div_u64((u64)nonactive * lclk, pclk);
DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
- val, max(0, DS - 1) * width);
- if (val < max(0, DS - 1) * width)
+ val, max(0, ds - 1) * width);
+ if (val < max(0, ds - 1) * width)
return -EINVAL;
return 0;
}
-static unsigned long calc_core_clk_five_taps(enum omap_plane plane,
+static unsigned long calc_core_clk_five_taps(unsigned long pclk,
const struct omap_video_timings *mgr_timings, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
u32 core_clk = 0;
u64 tmp;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
@@ -2079,22 +2055,19 @@ static unsigned long calc_core_clk_five_taps(enum omap_plane plane,
return core_clk;
}
-static unsigned long calc_core_clk_24xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_24xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
- unsigned long pclk = dispc_plane_pclk_rate(plane);
-
if (height > out_height && width > out_width)
return pclk * 4;
else
return pclk * 2;
}
-static unsigned long calc_core_clk_34xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_34xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
unsigned int hf, vf;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
/*
* FIXME how to determine the 'A' factor
@@ -2117,11 +2090,9 @@ static unsigned long calc_core_clk_34xx(enum omap_plane plane, u16 width,
return pclk * vf * hf;
}
-static unsigned long calc_core_clk_44xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
- unsigned long pclk;
-
/*
* If the overlay/writeback is in mem to mem mode, there are no
* downscaling limitations with respect to pixel clock, return 1 as
@@ -2131,15 +2102,13 @@ static unsigned long calc_core_clk_44xx(enum omap_plane plane, u16 width,
if (mem_to_mem)
return 1;
- pclk = dispc_plane_pclk_rate(plane);
-
if (width > out_width)
return DIV_ROUND_UP(pclk, out_width) * width;
else
return pclk;
}
-static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2157,7 +2126,7 @@ static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
do {
in_height = DIV_ROUND_UP(height, *decim_y);
in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = dispc.feat->calc_core_clk(plane, in_width,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
*core_clk > dispc_core_clk_rate());
@@ -2180,7 +2149,7 @@ static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
return 0;
}
-static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2196,10 +2165,10 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
do {
in_height = DIV_ROUND_UP(height, *decim_y);
in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = calc_core_clk_five_taps(plane, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
in_width, in_height, out_width, out_height, color_mode);
- error = check_horiz_timing_omap3(plane, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
pos_x, in_width, in_height, out_width,
out_height);
@@ -2208,7 +2177,7 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
in_height < out_height * 2)
*five_taps = false;
if (!*five_taps)
- *core_clk = dispc.feat->calc_core_clk(plane, in_width,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
@@ -2227,8 +2196,8 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
}
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
- if (check_horiz_timing_omap3(plane, mgr_timings, pos_x, width, height,
- out_width, out_height)){
+ if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, width,
+ height, out_width, out_height)){
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
@@ -2246,7 +2215,7 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
return 0;
}
-static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2258,14 +2227,14 @@ static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
u16 in_height = DIV_ROUND_UP(height, *decim_y);
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- unsigned long pclk = dispc_plane_pclk_rate(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
- if (mem_to_mem)
- in_width_max = DIV_ROUND_UP(out_width, maxdownscale);
- else
+ if (mem_to_mem) {
+ in_width_max = out_width * maxdownscale;
+ } else {
in_width_max = dispc_core_clk_rate() /
DIV_ROUND_UP(pclk, out_width);
+ }
*decim_x = DIV_ROUND_UP(width, in_width_max);
@@ -2283,12 +2252,12 @@ static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
return -EINVAL;
}
- *core_clk = dispc.feat->calc_core_clk(plane, in_width, in_height,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
}
-static int dispc_ovl_calc_scaling(enum omap_plane plane,
+static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
@@ -2307,9 +2276,14 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
- *x_predecim = max_decim_limit;
- *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dss_has_feature(FEAT_BURST_2D)) ? 2 : max_decim_limit;
+ if (mem_to_mem) {
+ *x_predecim = *y_predecim = 1;
+ } else {
+ *x_predecim = max_decim_limit;
+ *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
+ dss_has_feature(FEAT_BURST_2D)) ?
+ 2 : max_decim_limit;
+ }
if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
color_mode == OMAP_DSS_COLOR_CLUT2 ||
@@ -2330,7 +2304,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(plane, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2353,6 +2327,47 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
return 0;
}
+int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
+ const struct omap_overlay_info *oi,
+ const struct omap_video_timings *timings,
+ int *x_predecim, int *y_predecim)
+{
+ enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
+ bool five_taps = true;
+ bool fieldmode = 0;
+ u16 in_height = oi->height;
+ u16 in_width = oi->width;
+ bool ilace = timings->interlace;
+ u16 out_width, out_height;
+ int pos_x = oi->pos_x;
+ unsigned long pclk = dispc_mgr_pclk_rate(channel);
+ unsigned long lclk = dispc_mgr_lclk_rate(channel);
+
+ out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+ out_height = oi->out_height == 0 ? oi->height : oi->out_height;
+
+ if (ilace && oi->height == out_height)
+ fieldmode = 1;
+
+ if (ilace) {
+ if (fieldmode)
+ in_height /= 2;
+ out_height /= 2;
+
+ DSSDBG("adjusting for ilace: height %d, out_height %d\n",
+ in_height, out_height);
+ }
+
+ if (!dss_feat_color_mode_supported(plane, oi->color_mode))
+ return -EINVAL;
+
+ return dispc_ovl_calc_scaling(pclk, lclk, caps, timings, in_width,
+ in_height, out_width, out_height, oi->color_mode,
+ &five_taps, x_predecim, y_predecim, pos_x,
+ oi->rotation_type, false);
+}
+EXPORT_SYMBOL(dispc_ovl_check);
+
static int dispc_ovl_setup_common(enum omap_plane plane,
enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
@@ -2368,12 +2383,14 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
unsigned offset0, offset1;
s32 row_inc;
s32 pix_inc;
- u16 frame_height = height;
+ u16 frame_width, frame_height;
unsigned int field_offset = 0;
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
bool ilace = mgr_timings->interlace;
+ unsigned long pclk = dispc_plane_pclk_rate(plane);
+ unsigned long lclk = dispc_plane_lclk_rate(plane);
if (paddr == 0)
return -EINVAL;
@@ -2398,7 +2415,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(plane, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2436,20 +2453,28 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
row_inc = 0;
pix_inc = 0;
+ if (plane == OMAP_DSS_WB) {
+ frame_width = out_width;
+ frame_height = out_height;
+ } else {
+ frame_width = in_width;
+ frame_height = height;
+ }
+
if (rotation_type == OMAP_DSS_ROT_TILER)
- calc_tiler_rotation_offset(screen_width, in_width,
+ calc_tiler_rotation_offset(screen_width, frame_width,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
else if (rotation_type == OMAP_DSS_ROT_DMA)
- calc_dma_rotation_offset(rotation, mirror,
- screen_width, in_width, frame_height,
+ calc_dma_rotation_offset(rotation, mirror, screen_width,
+ frame_width, frame_height,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
else
calc_vrfb_rotation_offset(rotation, mirror,
- screen_width, in_width, frame_height,
+ screen_width, frame_width, frame_height,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
@@ -2503,7 +2528,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
bool mem_to_mem)
{
int r;
- struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+ enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
enum omap_channel channel;
channel = dispc_ovl_get_channel_out(plane);
@@ -2514,7 +2539,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
oi->color_mode, oi->rotation, oi->mirror, channel, replication);
- r = dispc_ovl_setup_common(plane, ovl->caps, oi->paddr, oi->p_uv_addr,
+ r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
@@ -2522,6 +2547,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
return r;
}
+EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct omap_video_timings *mgr_timings)
@@ -2582,192 +2608,39 @@ int dispc_ovl_enable(enum omap_plane plane, bool enable)
return 0;
}
+EXPORT_SYMBOL(dispc_ovl_enable);
-static void dispc_disable_isr(void *data, u32 mask)
+bool dispc_ovl_enabled(enum omap_plane plane)
{
- struct completion *compl = data;
- complete(compl);
+ return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
}
+EXPORT_SYMBOL(dispc_ovl_enabled);
-static void _enable_lcd_out(enum omap_channel channel, bool enable)
+void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
/* flush posted write */
mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
-
-static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
-{
- struct completion frame_done_completion;
- bool is_on;
- int r;
- u32 irq;
-
- /* When we disable LCD output, we need to wait until frame is done.
- * Otherwise the DSS is still working, and turning off the clocks
- * prevents DSS from going to OFF mode */
- is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-
- irq = mgr_desc[channel].framedone_irq;
-
- if (!enable && is_on) {
- init_completion(&frame_done_completion);
-
- r = omap_dispc_register_isr(dispc_disable_isr,
- &frame_done_completion, irq);
-
- if (r)
- DSSERR("failed to register FRAMEDONE isr\n");
- }
-
- _enable_lcd_out(channel, enable);
-
- if (!enable && is_on) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for FRAME DONE\n");
-
- r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion, irq);
-
- if (r)
- DSSERR("failed to unregister FRAMEDONE isr\n");
- }
-}
-
-static void _enable_digit_out(bool enable)
-{
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
- /* flush posted write */
- dispc_read_reg(DISPC_CONTROL);
-}
-
-static void dispc_mgr_enable_digit_out(bool enable)
-{
- struct completion frame_done_completion;
- enum dss_hdmi_venc_clk_source_select src;
- int r, i;
- u32 irq_mask;
- int num_irqs;
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
- return;
-
- src = dss_get_hdmi_venc_clk_source();
-
- if (enable) {
- unsigned long flags;
- /* When we enable digit output, we'll get an extra digit
- * sync lost interrupt, that we need to ignore */
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
- }
-
- /* When we disable digit output, we need to wait until fields are done.
- * Otherwise the DSS is still working, and turning off the clocks
- * prevents DSS from going to OFF mode. And when enabling, we need to
- * wait for the extra sync losts */
- init_completion(&frame_done_completion);
-
- if (src == DSS_HDMI_M_PCLK && enable == false) {
- irq_mask = DISPC_IRQ_FRAMEDONETV;
- num_irqs = 1;
- } else {
- irq_mask = DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
- /* XXX I understand from TRM that we should only wait for the
- * current field to complete. But it seems we have to wait for
- * both fields */
- num_irqs = 2;
- }
-
- r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
- irq_mask);
- if (r)
- DSSERR("failed to register %x isr\n", irq_mask);
-
- _enable_digit_out(enable);
-
- for (i = 0; i < num_irqs; ++i) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for digit out to %s\n",
- enable ? "start" : "stop");
- }
-
- r = omap_dispc_unregister_isr(dispc_disable_isr, &frame_done_completion,
- irq_mask);
- if (r)
- DSSERR("failed to unregister %x isr\n", irq_mask);
-
- if (enable) {
- unsigned long flags;
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST_DIGIT;
- dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
- }
-}
+EXPORT_SYMBOL(dispc_mgr_enable);
bool dispc_mgr_is_enabled(enum omap_channel channel)
{
return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
-
-void dispc_mgr_enable(enum omap_channel channel, bool enable)
-{
- if (dss_mgr_is_lcd(channel))
- dispc_mgr_enable_lcd_out(channel, enable);
- else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_mgr_enable_digit_out(enable);
- else
- BUG();
-}
+EXPORT_SYMBOL(dispc_mgr_is_enabled);
void dispc_wb_enable(bool enable)
{
- enum omap_plane plane = OMAP_DSS_WB;
- struct completion frame_done_completion;
- bool is_on;
- int r;
- u32 irq;
-
- is_on = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
- irq = DISPC_IRQ_FRAMEDONEWB;
-
- if (!enable && is_on) {
- init_completion(&frame_done_completion);
-
- r = omap_dispc_register_isr(dispc_disable_isr,
- &frame_done_completion, irq);
- if (r)
- DSSERR("failed to register FRAMEDONEWB isr\n");
- }
-
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
-
- if (!enable && is_on) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for FRAMEDONEWB\n");
-
- r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion, irq);
- if (r)
- DSSERR("failed to unregister FRAMEDONEWB isr\n");
- }
+ dispc_ovl_enable(OMAP_DSS_WB, enable);
}
bool dispc_wb_is_enabled(void)
{
- enum omap_plane plane = OMAP_DSS_WB;
-
- return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
+ return dispc_ovl_enabled(OMAP_DSS_WB);
}
-void dispc_lcd_enable_signal_polarity(bool act_high)
+static void dispc_lcd_enable_signal_polarity(bool act_high)
{
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
@@ -2791,13 +2664,13 @@ void dispc_pck_free_enable(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
-void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
+static void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
{
mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1);
}
@@ -2840,7 +2713,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
}
void dispc_mgr_setup(enum omap_channel channel,
- struct omap_overlay_manager_info *info)
+ const struct omap_overlay_manager_info *info)
{
dispc_mgr_set_default_color(channel, info->default_color);
dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
@@ -2852,8 +2725,9 @@ void dispc_mgr_setup(enum omap_channel channel,
dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
}
}
+EXPORT_SYMBOL(dispc_mgr_setup);
-void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
+static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -2878,7 +2752,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
-void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
+static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
@@ -2907,15 +2781,33 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
dispc_write_reg(DISPC_CONTROL, l);
}
-void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable);
}
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config)
+{
+ dispc_mgr_set_io_pad_mode(config->io_pad_mode);
+
+ dispc_mgr_enable_stallmode(channel, config->stallmode);
+ dispc_mgr_enable_fifohandcheck(channel, config->fifohandcheck);
+
+ dispc_mgr_set_clock_div(channel, &config->clock_info);
+
+ dispc_mgr_set_tft_data_lines(channel, config->video_port_width);
+
+ dispc_lcd_enable_signal_polarity(config->lcden_sig_polarity);
+
+ dispc_mgr_set_lcd_type_tft(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_set_lcd_config);
+
static bool _dispc_mgr_size_ok(u16 width, u16 height)
{
- return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
- height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+ return width <= dispc.feat->mgr_width_max &&
+ height <= dispc.feat->mgr_height_max;
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -3010,7 +2902,7 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- struct omap_video_timings *timings)
+ const struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
@@ -3049,6 +2941,7 @@ void dispc_mgr_set_timings(enum omap_channel channel,
dispc_mgr_set_size(channel, t.x_res, t.y_res);
}
+EXPORT_SYMBOL(dispc_mgr_set_timings);
static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
u16 pck_div)
@@ -3076,7 +2969,7 @@ unsigned long dispc_fclk_rate(void)
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = clk_get_rate(dispc.dss_clk);
+ r = dss_get_dispc_clk_rate();
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -3101,28 +2994,32 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ if (dss_mgr_is_lcd(channel)) {
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- lcd = FLD_GET(l, 23, 16);
+ lcd = FLD_GET(l, 23, 16);
- switch (dss_get_lcd_clk_source(channel)) {
- case OMAP_DSS_CLK_SRC_FCK:
- r = clk_get_rate(dispc.dss_clk);
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(0);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(1);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
- break;
- default:
- BUG();
- return 0;
- }
+ switch (dss_get_lcd_clk_source(channel)) {
+ case OMAP_DSS_CLK_SRC_FCK:
+ r = dss_get_dispc_clk_rate();
+ break;
+ case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ dsidev = dsi_get_dsidev_from_id(0);
+ r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ break;
+ case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
+ dsidev = dsi_get_dsidev_from_id(1);
+ r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ break;
+ default:
+ BUG();
+ return 0;
+ }
- return r / lcd;
+ return r / lcd;
+ } else {
+ return dispc_fclk_rate();
+ }
}
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3172,21 +3069,28 @@ unsigned long dispc_core_clk_rate(void)
static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
{
- enum omap_channel channel = dispc_ovl_get_channel_out(plane);
+ enum omap_channel channel;
+
+ if (plane == OMAP_DSS_WB)
+ return 0;
+
+ channel = dispc_ovl_get_channel_out(plane);
return dispc_mgr_pclk_rate(channel);
}
static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
{
- enum omap_channel channel = dispc_ovl_get_channel_out(plane);
+ enum omap_channel channel;
- if (dss_mgr_is_lcd(channel))
- return dispc_mgr_lclk_rate(channel);
- else
- return dispc_fclk_rate();
+ if (plane == OMAP_DSS_WB)
+ return 0;
+
+ channel = dispc_ovl_get_channel_out(plane);
+ return dispc_mgr_lclk_rate(channel);
}
+
static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
{
int lcd, pcd;
@@ -3244,64 +3148,6 @@ void dispc_dump_clocks(struct seq_file *s)
dispc_runtime_put();
}
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-void dispc_dump_irqs(struct seq_file *s)
-{
- unsigned long flags;
- struct dispc_irq_stats stats;
-
- spin_lock_irqsave(&dispc.irq_stats_lock, flags);
-
- stats = dispc.irq_stats;
- memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
- dispc.irq_stats.last_reset = jiffies;
-
- spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
-
- seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
-
- seq_printf(s, "irqs %d\n", stats.irq_count);
-#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
-
- PIS(FRAMEDONE);
- PIS(VSYNC);
- PIS(EVSYNC_EVEN);
- PIS(EVSYNC_ODD);
- PIS(ACBIAS_COUNT_STAT);
- PIS(PROG_LINE_NUM);
- PIS(GFX_FIFO_UNDERFLOW);
- PIS(GFX_END_WIN);
- PIS(PAL_GAMMA_MASK);
- PIS(OCP_ERR);
- PIS(VID1_FIFO_UNDERFLOW);
- PIS(VID1_END_WIN);
- PIS(VID2_FIFO_UNDERFLOW);
- PIS(VID2_END_WIN);
- if (dss_feat_get_num_ovls() > 3) {
- PIS(VID3_FIFO_UNDERFLOW);
- PIS(VID3_END_WIN);
- }
- PIS(SYNC_LOST);
- PIS(SYNC_LOST_DIGIT);
- PIS(WAKEUP);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- PIS(FRAMEDONE2);
- PIS(VSYNC2);
- PIS(ACBIAS_COUNT_STAT2);
- PIS(SYNC_LOST2);
- }
- if (dss_has_feature(FEAT_MGR_LCD3)) {
- PIS(FRAMEDONE3);
- PIS(VSYNC3);
- PIS(ACBIAS_COUNT_STAT3);
- PIS(SYNC_LOST3);
- }
-#undef PIS
-}
-#endif
-
static void dispc_dump_regs(struct seq_file *s)
{
int i, j;
@@ -3351,7 +3197,7 @@ static void dispc_dump_regs(struct seq_file *s)
#define DISPC_REG(i, name) name(i)
#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
- 48 - strlen(#r) - strlen(p_names[i]), " ", \
+ (int)(48 - strlen(#r) - strlen(p_names[i])), " ", \
dispc_read_reg(DISPC_REG(i, r)))
p_names = mgr_names;
@@ -3428,7 +3274,7 @@ static void dispc_dump_regs(struct seq_file *s)
#define DISPC_REG(plane, name, i) name(plane, i)
#define DUMPREG(plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
- 46 - strlen(#name) - strlen(p_names[plane]), " ", \
+ (int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \
dispc_read_reg(DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
@@ -3531,7 +3377,7 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
}
void dispc_mgr_set_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo)
+ const struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
@@ -3555,403 +3401,34 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
return 0;
}
-/* dispc.irq_lock has to be locked by the caller */
-static void _omap_dispc_set_irqs(void)
+u32 dispc_read_irqstatus(void)
{
- u32 mask;
- u32 old_mask;
- int i;
- struct omap_dispc_isr_data *isr_data;
-
- mask = dispc.irq_error_mask;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
-
- if (isr_data->isr == NULL)
- continue;
-
- mask |= isr_data->mask;
- }
-
- old_mask = dispc_read_reg(DISPC_IRQENABLE);
- /* clear the irqstatus for newly enabled irqs */
- dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
-
- dispc_write_reg(DISPC_IRQENABLE, mask);
-}
-
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- int ret;
- unsigned long flags;
- struct omap_dispc_isr_data *isr_data;
-
- if (isr == NULL)
- return -EINVAL;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- /* check for duplicate entry */
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
- if (isr_data->isr == isr && isr_data->arg == arg &&
- isr_data->mask == mask) {
- ret = -EINVAL;
- goto err;
- }
- }
-
- isr_data = NULL;
- ret = -EBUSY;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
-
- if (isr_data->isr != NULL)
- continue;
-
- isr_data->isr = isr;
- isr_data->arg = arg;
- isr_data->mask = mask;
- ret = 0;
-
- break;
- }
-
- if (ret)
- goto err;
-
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return 0;
-err:
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_register_isr);
-
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- unsigned long flags;
- int ret = -EINVAL;
- struct omap_dispc_isr_data *isr_data;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
- if (isr_data->isr != isr || isr_data->arg != arg ||
- isr_data->mask != mask)
- continue;
-
- /* found the correct isr */
-
- isr_data->isr = NULL;
- isr_data->arg = NULL;
- isr_data->mask = 0;
-
- ret = 0;
- break;
- }
-
- if (ret == 0)
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_unregister_isr);
-
-#ifdef DEBUG
-static void print_irq_status(u32 status)
-{
- if ((status & dispc.irq_error_mask) == 0)
- return;
-
- printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
-
-#define PIS(x) \
- if (status & DISPC_IRQ_##x) \
- printk(#x " ");
- PIS(GFX_FIFO_UNDERFLOW);
- PIS(OCP_ERR);
- PIS(VID1_FIFO_UNDERFLOW);
- PIS(VID2_FIFO_UNDERFLOW);
- if (dss_feat_get_num_ovls() > 3)
- PIS(VID3_FIFO_UNDERFLOW);
- PIS(SYNC_LOST);
- PIS(SYNC_LOST_DIGIT);
- if (dss_has_feature(FEAT_MGR_LCD2))
- PIS(SYNC_LOST2);
- if (dss_has_feature(FEAT_MGR_LCD3))
- PIS(SYNC_LOST3);
-#undef PIS
-
- printk("\n");
-}
-#endif
-
-/* Called from dss.c. Note that we don't touch clocks here,
- * but we presume they are on because we got an IRQ. However,
- * an irq handler may turn the clocks off, so we may not have
- * clock later in the function. */
-static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
-{
- int i;
- u32 irqstatus, irqenable;
- u32 handledirqs = 0;
- u32 unhandled_errors;
- struct omap_dispc_isr_data *isr_data;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
-
- spin_lock(&dispc.irq_lock);
-
- irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
- irqenable = dispc_read_reg(DISPC_IRQENABLE);
-
- /* IRQ is not for us */
- if (!(irqstatus & irqenable)) {
- spin_unlock(&dispc.irq_lock);
- return IRQ_NONE;
- }
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock(&dispc.irq_stats_lock);
- dispc.irq_stats.irq_count++;
- dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
- spin_unlock(&dispc.irq_stats_lock);
-#endif
-
-#ifdef DEBUG
- if (dss_debug)
- print_irq_status(irqstatus);
-#endif
- /* Ack the interrupt. Do it here before clocks are possibly turned
- * off */
- dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
- /* flush posted write */
- dispc_read_reg(DISPC_IRQSTATUS);
-
- /* make a copy and unlock, so that isrs can unregister
- * themselves */
- memcpy(registered_isr, dispc.registered_isr,
- sizeof(registered_isr));
-
- spin_unlock(&dispc.irq_lock);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &registered_isr[i];
-
- if (!isr_data->isr)
- continue;
-
- if (isr_data->mask & irqstatus) {
- isr_data->isr(isr_data->arg, irqstatus);
- handledirqs |= isr_data->mask;
- }
- }
-
- spin_lock(&dispc.irq_lock);
-
- unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
-
- if (unhandled_errors) {
- dispc.error_irqs |= unhandled_errors;
-
- dispc.irq_error_mask &= ~unhandled_errors;
- _omap_dispc_set_irqs();
-
- schedule_work(&dispc.error_work);
- }
-
- spin_unlock(&dispc.irq_lock);
-
- return IRQ_HANDLED;
-}
-
-static void dispc_error_worker(struct work_struct *work)
-{
- int i;
- u32 errors;
- unsigned long flags;
- static const unsigned fifo_underflow_bits[] = {
- DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- DISPC_IRQ_VID3_FIFO_UNDERFLOW,
- };
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
- errors = dispc.error_irqs;
- dispc.error_irqs = 0;
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- dispc_runtime_get();
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- unsigned bit;
-
- ovl = omap_dss_get_overlay(i);
- bit = fifo_underflow_bits[i];
-
- if (bit & errors) {
- DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
- ovl->name);
- dispc_ovl_enable(ovl->id, false);
- dispc_mgr_go(ovl->manager->id);
- msleep(50);
- }
- }
-
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- unsigned bit;
-
- mgr = omap_dss_get_overlay_manager(i);
- bit = mgr_desc[i].sync_lost_irq;
-
- if (bit & errors) {
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
- bool enable;
-
- DSSERR("SYNC_LOST on channel %s, restarting the output "
- "with video overlays disabled\n",
- mgr->name);
-
- enable = dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
- dssdev->driver->disable(dssdev);
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (ovl->id != OMAP_DSS_GFX &&
- ovl->manager == mgr)
- dispc_ovl_enable(ovl->id, false);
- }
-
- dispc_mgr_go(mgr->id);
- msleep(50);
-
- if (enable)
- dssdev->driver->enable(dssdev);
- }
- }
-
- if (errors & DISPC_IRQ_OCP_ERR) {
- DSSERR("OCP_ERR\n");
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- struct omap_dss_device *dssdev;
-
- mgr = omap_dss_get_overlay_manager(i);
- dssdev = mgr->get_device(mgr);
-
- if (dssdev && dssdev->driver)
- dssdev->driver->disable(dssdev);
- }
- }
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask |= errors;
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- dispc_runtime_put();
+ return dispc_read_reg(DISPC_IRQSTATUS);
}
+EXPORT_SYMBOL(dispc_read_irqstatus);
-int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+void dispc_clear_irqstatus(u32 mask)
{
- void dispc_irq_wait_handler(void *data, u32 mask)
- {
- complete((struct completion *)data);
- }
-
- int r;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
- irqmask);
-
- if (r)
- return r;
-
- timeout = wait_for_completion_timeout(&completion, timeout);
-
- omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
-
- return 0;
+ dispc_write_reg(DISPC_IRQSTATUS, mask);
}
+EXPORT_SYMBOL(dispc_clear_irqstatus);
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
- unsigned long timeout)
+u32 dispc_read_irqenable(void)
{
- void dispc_irq_wait_handler(void *data, u32 mask)
- {
- complete((struct completion *)data);
- }
-
- int r;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
- irqmask);
-
- if (r)
- return r;
-
- timeout = wait_for_completion_interruptible_timeout(&completion,
- timeout);
-
- omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
-
- return 0;
+ return dispc_read_reg(DISPC_IRQENABLE);
}
+EXPORT_SYMBOL(dispc_read_irqenable);
-static void _omap_dispc_initialize_irq(void)
+void dispc_write_irqenable(u32 mask)
{
- unsigned long flags;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
-
- dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
- if (dss_has_feature(FEAT_MGR_LCD2))
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
- if (dss_has_feature(FEAT_MGR_LCD3))
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
- if (dss_feat_get_num_ovls() > 3)
- dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
-
- /* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
- * so clear it */
- dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
+ u32 old_mask = dispc_read_reg(DISPC_IRQENABLE);
- _omap_dispc_set_irqs();
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_clear_irqstatus((mask ^ old_mask) & mask);
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
+ dispc_write_reg(DISPC_IRQENABLE, mask);
}
+EXPORT_SYMBOL(dispc_write_irqenable);
void dispc_enable_sidle(void)
{
@@ -3998,9 +3475,14 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
@@ -4010,9 +3492,14 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
@@ -4022,9 +3509,14 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap44xx_dispc_feats __initconst = {
@@ -4034,6 +3526,27 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .calc_scaling = dispc_ovl_calc_scaling_44xx,
+ .calc_core_clk = calc_core_clk_44xx,
+ .num_fifos = 5,
+ .gfx_fifo_workaround = true,
+};
+
+static const struct dispc_features omap54xx_dispc_feats __initconst = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 11,
+ .mgr_height_start = 27,
+ .mgr_width_max = 4096,
+ .mgr_height_max = 4096,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
@@ -4042,7 +3555,6 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
static int __init dispc_init_features(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const struct dispc_features *src;
struct dispc_features *dst;
@@ -4052,7 +3564,7 @@ static int __init dispc_init_features(struct platform_device *pdev)
return -ENOMEM;
}
- switch (pdata->version) {
+ switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
src = &omap24xx_dispc_feats;
break;
@@ -4074,7 +3586,7 @@ static int __init dispc_init_features(struct platform_device *pdev)
break;
case OMAPDSS_VER_OMAP5:
- src = &omap44xx_dispc_feats;
+ src = &omap54xx_dispc_feats;
break;
default:
@@ -4087,13 +3599,25 @@ static int __init dispc_init_features(struct platform_device *pdev)
return 0;
}
+int dispc_request_irq(irq_handler_t handler, void *dev_id)
+{
+ return devm_request_irq(&dispc.pdev->dev, dispc.irq, handler,
+ IRQF_SHARED, "OMAP DISPC", dev_id);
+}
+EXPORT_SYMBOL(dispc_request_irq);
+
+void dispc_free_irq(void *dev_id)
+{
+ devm_free_irq(&dispc.pdev->dev, dispc.irq, dev_id);
+}
+EXPORT_SYMBOL(dispc_free_irq);
+
/* DISPC HW IP initialisation */
static int __init omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
struct resource *dispc_mem;
- struct clk *clk;
dispc.pdev = pdev;
@@ -4101,15 +3625,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
if (r)
return r;
- spin_lock_init(&dispc.irq_lock);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc.irq_stats_lock);
- dispc.irq_stats.last_reset = jiffies;
-#endif
-
- INIT_WORK(&dispc.error_work, dispc_error_worker);
-
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4129,22 +3644,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
return -ENODEV;
}
- r = devm_request_irq(&pdev->dev, dispc.irq, omap_dispc_irq_handler,
- IRQF_SHARED, "OMAP DISPC", dispc.pdev);
- if (r < 0) {
- DSSERR("request_irq failed\n");
- return r;
- }
-
- clk = clk_get(&pdev->dev, "fck");
- if (IS_ERR(clk)) {
- DSSERR("can't get fck\n");
- r = PTR_ERR(clk);
- return r;
- }
-
- dispc.dss_clk = clk;
-
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
@@ -4153,8 +3652,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
_omap_dispc_initial_config();
- _omap_dispc_initialize_irq();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
@@ -4163,14 +3660,10 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
dss_debugfs_create_file("dispc", dispc_dump_regs);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
-#endif
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
- clk_put(dispc.dss_clk);
return r;
}
@@ -4178,8 +3671,6 @@ static int __exit omap_dispchw_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- clk_put(dispc.dss_clk);
-
return 0;
}
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/omap2/dss/display-sysfs.c
new file mode 100644
index 000000000000..18211a9ab354
--- /dev/null
+++ b/drivers/video/omap2/dss/display-sysfs.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+
+#include <video/omapdss.h>
+#include "dss.h"
+#include "dss_features.h"
+
+static ssize_t display_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+}
+
+static ssize_t display_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool enabled;
+
+ r = strtobool(buf, &enabled);
+ if (r)
+ return r;
+
+ if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
+ if (enabled) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ return r;
+ } else {
+ dssdev->driver->disable(dssdev);
+ }
+ }
+
+ return size;
+}
+
+static ssize_t display_tear_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ dssdev->driver->get_te ?
+ dssdev->driver->get_te(dssdev) : 0);
+}
+
+static ssize_t display_tear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool te;
+
+ if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
+ return -ENOENT;
+
+ r = strtobool(buf, &te);
+ if (r)
+ return r;
+
+ r = dssdev->driver->enable_te(dssdev, te);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_timings_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t;
+
+ if (!dssdev->driver->get_timings)
+ return -ENOENT;
+
+ dssdev->driver->get_timings(dssdev, &t);
+
+ return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
+ t.pixel_clock,
+ t.x_res, t.hfp, t.hbp, t.hsw,
+ t.y_res, t.vfp, t.vbp, t.vsw);
+}
+
+static ssize_t display_timings_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t = dssdev->panel.timings;
+ int r, found;
+
+ if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
+ return -ENOENT;
+
+ found = 0;
+#ifdef CONFIG_OMAP2_DSS_VENC
+ if (strncmp("pal", buf, 3) == 0) {
+ t = omap_dss_pal_timings;
+ found = 1;
+ } else if (strncmp("ntsc", buf, 4) == 0) {
+ t = omap_dss_ntsc_timings;
+ found = 1;
+ }
+#endif
+ if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
+ &t.pixel_clock,
+ &t.x_res, &t.hfp, &t.hbp, &t.hsw,
+ &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
+ return -EINVAL;
+
+ r = dssdev->driver->check_timings(dssdev, &t);
+ if (r)
+ return r;
+
+ dssdev->driver->disable(dssdev);
+ dssdev->driver->set_timings(dssdev, &t);
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_rotate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int rotate;
+ if (!dssdev->driver->get_rotate)
+ return -ENOENT;
+ rotate = dssdev->driver->get_rotate(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
+}
+
+static ssize_t display_rotate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int rot, r;
+
+ if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+ return -ENOENT;
+
+ r = kstrtoint(buf, 0, &rot);
+ if (r)
+ return r;
+
+ r = dssdev->driver->set_rotate(dssdev, rot);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_mirror_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int mirror;
+ if (!dssdev->driver->get_mirror)
+ return -ENOENT;
+ mirror = dssdev->driver->get_mirror(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
+}
+
+static ssize_t display_mirror_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool mirror;
+
+ if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+ return -ENOENT;
+
+ r = strtobool(buf, &mirror);
+ if (r)
+ return r;
+
+ r = dssdev->driver->set_mirror(dssdev, mirror);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_wss_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned int wss;
+
+ if (!dssdev->driver->get_wss)
+ return -ENOENT;
+
+ wss = dssdev->driver->get_wss(dssdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
+}
+
+static ssize_t display_wss_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ u32 wss;
+ int r;
+
+ if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
+ return -ENOENT;
+
+ r = kstrtou32(buf, 0, &wss);
+ if (r)
+ return r;
+
+ if (wss > 0xfffff)
+ return -EINVAL;
+
+ r = dssdev->driver->set_wss(dssdev, wss);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+ display_enabled_show, display_enabled_store);
+static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+ display_tear_show, display_tear_store);
+static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+ display_timings_show, display_timings_store);
+static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+ display_rotate_show, display_rotate_store);
+static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+ display_mirror_show, display_mirror_store);
+static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+ display_wss_show, display_wss_store);
+
+static struct device_attribute *display_sysfs_attrs[] = {
+ &dev_attr_enabled,
+ &dev_attr_tear_elim,
+ &dev_attr_timings,
+ &dev_attr_rotate,
+ &dev_attr_mirror,
+ &dev_attr_wss,
+ NULL
+};
+
+int display_init_sysfs(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i, r;
+
+ /* create device sysfs files */
+ i = 0;
+ while ((attr = display_sysfs_attrs[i++]) != NULL) {
+ r = device_create_file(&dssdev->dev, attr);
+ if (r) {
+ for (i = i - 2; i >= 0; i--) {
+ attr = display_sysfs_attrs[i];
+ device_remove_file(&dssdev->dev, attr);
+ }
+
+ DSSERR("failed to create sysfs file\n");
+ return r;
+ }
+ }
+
+ /* create display? sysfs links */
+ r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
+ dev_name(&dssdev->dev));
+ if (r) {
+ while ((attr = display_sysfs_attrs[i++]) != NULL)
+ device_remove_file(&dssdev->dev, attr);
+
+ DSSERR("failed to create sysfs display link\n");
+ return r;
+ }
+
+ return 0;
+}
+
+void display_uninit_sysfs(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i = 0;
+
+ sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
+
+ while ((attr = display_sysfs_attrs[i++]) != NULL)
+ device_remove_file(&dssdev->dev, attr);
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index ccf8550fafde..0aa8ad8f9667 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -31,250 +31,6 @@
#include "dss.h"
#include "dss_features.h"
-static ssize_t display_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
-}
-
-static ssize_t display_enabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool enabled;
-
- r = strtobool(buf, &enabled);
- if (r)
- return r;
-
- if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
- if (enabled) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
- } else {
- dssdev->driver->disable(dssdev);
- }
- }
-
- return size;
-}
-
-static ssize_t display_tear_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- dssdev->driver->get_te ?
- dssdev->driver->get_te(dssdev) : 0);
-}
-
-static ssize_t display_tear_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool te;
-
- if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
- return -ENOENT;
-
- r = strtobool(buf, &te);
- if (r)
- return r;
-
- r = dssdev->driver->enable_te(dssdev, te);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_timings_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_video_timings t;
-
- if (!dssdev->driver->get_timings)
- return -ENOENT;
-
- dssdev->driver->get_timings(dssdev, &t);
-
- return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
- t.pixel_clock,
- t.x_res, t.hfp, t.hbp, t.hsw,
- t.y_res, t.vfp, t.vbp, t.vsw);
-}
-
-static ssize_t display_timings_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_video_timings t = dssdev->panel.timings;
- int r, found;
-
- if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
- return -ENOENT;
-
- found = 0;
-#ifdef CONFIG_OMAP2_DSS_VENC
- if (strncmp("pal", buf, 3) == 0) {
- t = omap_dss_pal_timings;
- found = 1;
- } else if (strncmp("ntsc", buf, 4) == 0) {
- t = omap_dss_ntsc_timings;
- found = 1;
- }
-#endif
- if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
- &t.pixel_clock,
- &t.x_res, &t.hfp, &t.hbp, &t.hsw,
- &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
- return -EINVAL;
-
- r = dssdev->driver->check_timings(dssdev, &t);
- if (r)
- return r;
-
- dssdev->driver->disable(dssdev);
- dssdev->driver->set_timings(dssdev, &t);
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_rotate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int rotate;
- if (!dssdev->driver->get_rotate)
- return -ENOENT;
- rotate = dssdev->driver->get_rotate(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
-}
-
-static ssize_t display_rotate_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int rot, r;
-
- if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
- return -ENOENT;
-
- r = kstrtoint(buf, 0, &rot);
- if (r)
- return r;
-
- r = dssdev->driver->set_rotate(dssdev, rot);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_mirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int mirror;
- if (!dssdev->driver->get_mirror)
- return -ENOENT;
- mirror = dssdev->driver->get_mirror(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
-}
-
-static ssize_t display_mirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool mirror;
-
- if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
- return -ENOENT;
-
- r = strtobool(buf, &mirror);
- if (r)
- return r;
-
- r = dssdev->driver->set_mirror(dssdev, mirror);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_wss_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- unsigned int wss;
-
- if (!dssdev->driver->get_wss)
- return -ENOENT;
-
- wss = dssdev->driver->get_wss(dssdev);
-
- return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
-}
-
-static ssize_t display_wss_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- u32 wss;
- int r;
-
- if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
- return -ENOENT;
-
- r = kstrtou32(buf, 0, &wss);
- if (r)
- return r;
-
- if (wss > 0xfffff)
- return -EINVAL;
-
- r = dssdev->driver->set_wss(dssdev, wss);
- if (r)
- return r;
-
- return size;
-}
-
-static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
- display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
- display_tear_show, display_tear_store);
-static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
- display_timings_show, display_timings_store);
-static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
- display_rotate_show, display_rotate_store);
-static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
- display_mirror_show, display_mirror_store);
-static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
- display_wss_show, display_wss_store);
-
-static struct device_attribute *display_sysfs_attrs[] = {
- &dev_attr_enabled,
- &dev_attr_tear_elim,
- &dev_attr_timings,
- &dev_attr_rotate,
- &dev_attr_mirror,
- &dev_attr_wss,
- NULL
-};
-
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -320,136 +76,8 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_default_get_timings);
-/*
- * Connect dssdev to a manager if the manager is free or if force is specified.
- * Connect all overlays to that manager if they are free or if force is
- * specified.
- */
-static int dss_init_connections(struct omap_dss_device *dssdev, bool force)
-{
- struct omap_dss_output *out;
- struct omap_overlay_manager *mgr;
- int i, r;
-
- out = omapdss_get_output_from_dssdev(dssdev);
-
- WARN_ON(dssdev->output);
- WARN_ON(out->device);
-
- r = omapdss_output_set_device(out, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device\n");
- return r;
- }
-
- mgr = omap_dss_get_overlay_manager(dssdev->channel);
-
- if (mgr->output && !force)
- return 0;
-
- if (mgr->output)
- mgr->unset_output(mgr);
-
- r = mgr->set_output(mgr, out);
- if (r) {
- DSSERR("failed to connect manager to output of new device\n");
-
- /* remove the output-device connection we just made */
- omapdss_output_unset_device(out);
- return r;
- }
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
-
- if (!ovl->manager || force) {
- if (ovl->manager)
- ovl->unset_manager(ovl);
-
- r = ovl->set_manager(ovl, mgr);
- if (r) {
- DSSERR("failed to set initial overlay\n");
- return r;
- }
- }
- }
-
- return 0;
-}
-
-static void dss_uninit_connections(struct omap_dss_device *dssdev)
-{
- if (dssdev->output) {
- struct omap_overlay_manager *mgr = dssdev->output->manager;
-
- if (mgr)
- mgr->unset_output(mgr);
-
- omapdss_output_unset_device(dssdev->output);
- }
-}
-
-int dss_init_device(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
-{
- struct device_attribute *attr;
- int i, r;
- const char *def_disp_name = dss_get_default_display_name();
- bool force;
-
- force = def_disp_name && strcmp(def_disp_name, dssdev->name) == 0;
- dss_init_connections(dssdev, force);
-
- /* create device sysfs files */
- i = 0;
- while ((attr = display_sysfs_attrs[i++]) != NULL) {
- r = device_create_file(&dssdev->dev, attr);
- if (r) {
- for (i = i - 2; i >= 0; i--) {
- attr = display_sysfs_attrs[i];
- device_remove_file(&dssdev->dev, attr);
- }
-
- dss_uninit_connections(dssdev);
-
- DSSERR("failed to create sysfs file\n");
- return r;
- }
- }
-
- /* create display? sysfs links */
- r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
- dev_name(&dssdev->dev));
- if (r) {
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
-
- dss_uninit_connections(dssdev);
-
- DSSERR("failed to create sysfs display link\n");
- return r;
- }
-
- return 0;
-}
-
-void dss_uninit_device(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
-{
- struct device_attribute *attr;
- int i = 0;
-
- sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
-
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
-
- dss_uninit_connections(dssdev);
-}
-
static int dss_suspend_device(struct device *dev, void *data)
{
- int r;
struct omap_dss_device *dssdev = to_dss_device(dev);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
@@ -457,15 +85,7 @@ static int dss_suspend_device(struct device *dev, void *data)
return 0;
}
- if (!dssdev->driver->suspend) {
- DSSERR("display '%s' doesn't implement suspend\n",
- dssdev->name);
- return -ENOSYS;
- }
-
- r = dssdev->driver->suspend(dssdev);
- if (r)
- return r;
+ dssdev->driver->disable(dssdev);
dssdev->activate_after_resume = true;
@@ -492,8 +112,8 @@ static int dss_resume_device(struct device *dev, void *data)
int r;
struct omap_dss_device *dssdev = to_dss_device(dev);
- if (dssdev->activate_after_resume && dssdev->driver->resume) {
- r = dssdev->driver->resume(dssdev);
+ if (dssdev->activate_after_resume) {
+ r = dssdev->driver->enable(dssdev);
if (r)
return r;
}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 56748cf8760e..4af136a04e53 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -49,34 +49,53 @@ static struct {
struct omap_dss_output output;
} dpi;
-static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
+static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
{
- int dsi_module;
-
- dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1;
+ /*
+ * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
+ * would also be used for DISPC fclk. Meaning, when the DPI output is
+ * disabled, DISPC clock will be disabled, and TV out will stop.
+ */
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP24xx:
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return NULL;
+ default:
+ break;
+ }
- return dsi_get_dsidev_from_id(dsi_module);
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return dsi_get_dsidev_from_id(0);
+ case OMAP_DSS_CHANNEL_LCD2:
+ return dsi_get_dsidev_from_id(1);
+ default:
+ return NULL;
+ }
}
-static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev)
+static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
{
- if (dssdev->clocks.dispc.dispc_fclk_src ==
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.dispc_fclk_src ==
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.channel.lcd_clk_src ==
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.channel.lcd_clk_src ==
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC)
- return true;
- else
- return false;
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
+ default:
+ /* this shouldn't happen */
+ WARN_ON(1);
+ return OMAP_DSS_CLK_SRC_FCK;
+ }
}
static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
+ struct omap_overlay_manager *mgr = dssdev->output->manager;
struct dsi_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
int r;
@@ -90,7 +109,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
if (r)
return r;
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
+ dss_select_lcd_clk_source(mgr->id,
+ dpi_get_alt_clk_src(mgr->id));
dpi.mgr_config.clock_info = dispc_cinfo;
@@ -135,7 +155,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
unsigned long pck;
int r = 0;
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
&lck_div, &pck_div);
else
@@ -214,7 +234,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_src_sel;
- if (dpi_use_dsi_pll(dssdev)) {
+ if (dpi.dsidev) {
r = dsi_runtime_get(dpi.dsidev);
if (r)
goto err_get_dsi;
@@ -242,10 +262,10 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
err_mgr_enable:
err_set_mode:
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
dsi_pll_uninit(dpi.dsidev, true);
err_dsi_pll_init:
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
dsi_runtime_put(dpi.dsidev);
err_get_dsi:
err_src_sel:
@@ -271,8 +291,8 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
dss_mgr_disable(mgr);
- if (dpi_use_dsi_pll(dssdev)) {
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+ if (dpi.dsidev) {
+ dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
dsi_runtime_put(dpi.dsidev);
}
@@ -311,13 +331,13 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
unsigned long pck;
struct dispc_clock_info dispc_cinfo;
- if (dss_mgr_check_timings(mgr, timings))
+ if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
if (timings->pixel_clock == 0)
return -EINVAL;
- if (dpi_use_dsi_pll(dssdev)) {
+ if (dpi.dsidev) {
struct dsi_clock_info dsi_cinfo;
r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
timings->pixel_clock * 1000,
@@ -359,8 +379,32 @@ void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
}
EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
+static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
+{
+ int r;
+
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ return r;
+
+ r = dsi_pll_init(dsidev, 0, 1);
+ if (r) {
+ dsi_runtime_put(dsidev);
+ return r;
+ }
+
+ dsi_pll_uninit(dsidev, true);
+ dsi_runtime_put(dsidev);
+
+ return 0;
+}
+
static int __init dpi_init_display(struct omap_dss_device *dssdev)
{
+ struct platform_device *dsidev;
+
DSSDBG("init_display\n");
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) &&
@@ -377,19 +421,30 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
dpi.vdds_dsi_reg = vdds_dsi;
}
- if (dpi_use_dsi_pll(dssdev)) {
- enum omap_dss_clk_source dispc_fclk_src =
- dssdev->clocks.dispc.dispc_fclk_src;
- dpi.dsidev = dpi_get_dsidev(dispc_fclk_src);
+ /*
+ * XXX We shouldn't need dssdev->channel for this. The dsi pll clock
+ * source for DPI is SoC integration detail, not something that should
+ * be configured in the dssdev
+ */
+ dsidev = dpi_get_dsidev(dssdev->channel);
+
+ if (dsidev && dpi_verify_dsi_pll(dsidev)) {
+ dsidev = NULL;
+ DSSWARN("DSI PLL not operational\n");
}
+ if (dsidev)
+ DSSDBG("using DSI PLL for DPI clock\n");
+
+ dpi.dsidev = dsidev;
+
return 0;
}
static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -438,9 +493,18 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
return;
}
+ r = omapdss_output_set_device(&dpi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&dpi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index bee92846cfab..28d41d16b7be 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -45,7 +45,6 @@
#include "dss.h"
#include "dss_features.h"
-/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
struct dsi_reg { u16 idx; };
@@ -535,42 +534,38 @@ static inline void dsi_perf_show(struct platform_device *dsidev,
}
#endif
+static int verbose_irq;
+
static void print_irq_status(u32 status)
{
if (status == 0)
return;
-#ifndef VERBOSE_IRQ
- if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
+ if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
return;
-#endif
- printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
-#define PIS(x) \
- if (status & DSI_IRQ_##x) \
- printk(#x " ");
-#ifdef VERBOSE_IRQ
- PIS(VC0);
- PIS(VC1);
- PIS(VC2);
- PIS(VC3);
-#endif
- PIS(WAKEUP);
- PIS(RESYNC);
- PIS(PLL_LOCK);
- PIS(PLL_UNLOCK);
- PIS(PLL_RECALL);
- PIS(COMPLEXIO_ERR);
- PIS(HS_TX_TIMEOUT);
- PIS(LP_RX_TIMEOUT);
- PIS(TE_TRIGGER);
- PIS(ACK_TRIGGER);
- PIS(SYNC_LOST);
- PIS(LDO_POWER_GOOD);
- PIS(TA_TIMEOUT);
+#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ status,
+ verbose_irq ? PIS(VC0) : "",
+ verbose_irq ? PIS(VC1) : "",
+ verbose_irq ? PIS(VC2) : "",
+ verbose_irq ? PIS(VC3) : "",
+ PIS(WAKEUP),
+ PIS(RESYNC),
+ PIS(PLL_LOCK),
+ PIS(PLL_UNLOCK),
+ PIS(PLL_RECALL),
+ PIS(COMPLEXIO_ERR),
+ PIS(HS_TX_TIMEOUT),
+ PIS(LP_RX_TIMEOUT),
+ PIS(TE_TRIGGER),
+ PIS(ACK_TRIGGER),
+ PIS(SYNC_LOST),
+ PIS(LDO_POWER_GOOD),
+ PIS(TA_TIMEOUT));
#undef PIS
-
- printk("\n");
}
static void print_irq_status_vc(int channel, u32 status)
@@ -578,28 +573,24 @@ static void print_irq_status_vc(int channel, u32 status)
if (status == 0)
return;
-#ifndef VERBOSE_IRQ
- if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
+ if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
return;
-#endif
- printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
-#define PIS(x) \
- if (status & DSI_VC_IRQ_##x) \
- printk(#x " ");
- PIS(CS);
- PIS(ECC_CORR);
-#ifdef VERBOSE_IRQ
- PIS(PACKET_SENT);
-#endif
- PIS(FIFO_TX_OVF);
- PIS(FIFO_RX_OVF);
- PIS(BTA);
- PIS(ECC_NO_CORR);
- PIS(FIFO_TX_UDF);
- PIS(PP_BUSY_CHANGE);
+#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
+ channel,
+ status,
+ PIS(CS),
+ PIS(ECC_CORR),
+ PIS(ECC_NO_CORR),
+ verbose_irq ? PIS(PACKET_SENT) : "",
+ PIS(BTA),
+ PIS(FIFO_TX_OVF),
+ PIS(FIFO_RX_OVF),
+ PIS(FIFO_TX_UDF),
+ PIS(PP_BUSY_CHANGE));
#undef PIS
- printk("\n");
}
static void print_irq_status_cio(u32 status)
@@ -607,34 +598,31 @@ static void print_irq_status_cio(u32 status)
if (status == 0)
return;
- printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
-
-#define PIS(x) \
- if (status & DSI_CIO_IRQ_##x) \
- printk(#x " ");
- PIS(ERRSYNCESC1);
- PIS(ERRSYNCESC2);
- PIS(ERRSYNCESC3);
- PIS(ERRESC1);
- PIS(ERRESC2);
- PIS(ERRESC3);
- PIS(ERRCONTROL1);
- PIS(ERRCONTROL2);
- PIS(ERRCONTROL3);
- PIS(STATEULPS1);
- PIS(STATEULPS2);
- PIS(STATEULPS3);
- PIS(ERRCONTENTIONLP0_1);
- PIS(ERRCONTENTIONLP1_1);
- PIS(ERRCONTENTIONLP0_2);
- PIS(ERRCONTENTIONLP1_2);
- PIS(ERRCONTENTIONLP0_3);
- PIS(ERRCONTENTIONLP1_3);
- PIS(ULPSACTIVENOT_ALL0);
- PIS(ULPSACTIVENOT_ALL1);
+#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ status,
+ PIS(ERRSYNCESC1),
+ PIS(ERRSYNCESC2),
+ PIS(ERRSYNCESC3),
+ PIS(ERRESC1),
+ PIS(ERRESC2),
+ PIS(ERRESC3),
+ PIS(ERRCONTROL1),
+ PIS(ERRCONTROL2),
+ PIS(ERRCONTROL3),
+ PIS(STATEULPS1),
+ PIS(STATEULPS2),
+ PIS(STATEULPS3),
+ PIS(ERRCONTENTIONLP0_1),
+ PIS(ERRCONTENTIONLP1_1),
+ PIS(ERRCONTENTIONLP0_2),
+ PIS(ERRCONTENTIONLP1_2),
+ PIS(ERRCONTENTIONLP0_3),
+ PIS(ERRCONTENTIONLP1_3),
+ PIS(ULPSACTIVENOT_ALL0),
+ PIS(ULPSACTIVENOT_ALL1));
#undef PIS
-
- printk("\n");
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -1116,28 +1104,16 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
}
}
-#ifdef DEBUG
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
u32 l;
int b0, b1, b2;
- if (!dss_debug)
- return;
-
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- printk(KERN_DEBUG "DSI resets: ");
-
- l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
- printk("PLL (%d) ", FLD_GET(l, 0, 0));
-
- l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
- printk("CIO (%d) ", FLD_GET(l, 29, 29));
-
if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
b0 = 28;
b1 = 27;
@@ -1148,18 +1124,21 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
b2 = 26;
}
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- printk("PHY (%x%x%x, %d, %d, %d)\n",
- FLD_GET(l, b0, b0),
- FLD_GET(l, b1, b1),
- FLD_GET(l, b2, b2),
- FLD_GET(l, 29, 29),
- FLD_GET(l, 30, 30),
- FLD_GET(l, 31, 31));
+#define DSI_FLD_GET(fld, start, end)\
+ FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
+
+ pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
+ DSI_FLD_GET(PLL_STATUS, 0, 0),
+ DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
+ DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
+ DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
+ DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
+ DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
+ DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
+ DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
+
+#undef DSI_FLD_GET
}
-#else
-#define _dsi_print_reset_status(x)
-#endif
static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
{
@@ -1407,6 +1386,11 @@ retry:
cur.dsi_pll_hsdiv_dispc_clk =
cur.clkin4ddr / cur.regm_dispc;
+ if (cur.regm_dispc > 1 &&
+ cur.regm_dispc % 2 != 0 &&
+ req_pck >= 1000000)
+ continue;
+
/* this will narrow down the search a bit,
* but still give pixclocks below what was
* requested */
@@ -1621,7 +1605,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
u8 regn_start, regn_end, regm_start, regm_end;
u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
- DSSDBGF();
+ DSSDBG("DSI PLL clock config starts");
dsi->current_cinfo.clkin = cinfo->clkin;
dsi->current_cinfo.fint = cinfo->fint;
@@ -1757,11 +1741,21 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
DSSDBG("PLL init\n");
+ /*
+ * It seems that on many OMAPs we need to enable both to have a
+ * functional HSDivider.
+ */
+ enable_hsclk = enable_hsdiv = true;
+
if (dsi->vdds_dsi_reg == NULL) {
struct regulator *vdds_dsi;
vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
+ /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(vdds_dsi))
+ vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
+
if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
@@ -2440,7 +2434,7 @@ static int dsi_cio_init(struct platform_device *dsidev)
int r;
u32 l;
- DSSDBGF();
+ DSSDBG("DSI CIO init starts");
r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
if (r)
@@ -2791,7 +2785,7 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
{
u32 r;
- DSSDBGF("%d", channel);
+ DSSDBG("Initial config of virtual channel %d", channel);
r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
@@ -2823,7 +2817,7 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
if (dsi->vc[channel].source == source)
return 0;
- DSSDBGF("%d", channel);
+ DSSDBG("Source config of virtual channel %d", channel);
dsi_sync_vc(dsidev, channel);
@@ -3581,7 +3575,7 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
int r, i;
unsigned mask;
- DSSDBGF();
+ DSSDBG("Entering ULPS");
WARN_ON(!dsi_bus_is_locked(dsidev));
@@ -4285,7 +4279,7 @@ int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
unsigned long pck;
int r;
- DSSDBGF("ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
+ DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
mutex_lock(&dsi->lock);
@@ -4541,7 +4535,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
}
-static void dsi_framedone_irq_callback(void *data, u32 mask)
+static void dsi_framedone_irq_callback(void *data)
{
struct platform_device *dsidev = (struct platform_device *) data;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4615,7 +4609,6 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_overlay_manager *mgr = dssdev->output->manager;
int r;
- u32 irq = 0;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
dsi->timings.hsw = 1;
@@ -4625,12 +4618,10 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
dsi->timings.vfp = 0;
dsi->timings.vbp = 0;
- irq = dispc_mgr_get_framedone_irq(mgr->id);
-
- r = omap_dispc_register_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
+ r = dss_mgr_register_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
+ DSSERR("can't register FRAMEDONE handler\n");
goto err;
}
@@ -4668,8 +4659,8 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- omap_dispc_unregister_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
+ dss_mgr_unregister_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
err:
return r;
}
@@ -4680,14 +4671,9 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_overlay_manager *mgr = dssdev->output->manager;
- if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- u32 irq;
-
- irq = dispc_mgr_get_framedone_irq(mgr->id);
-
- omap_dispc_unregister_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
- }
+ if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
+ dss_mgr_unregister_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
}
static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
@@ -4730,7 +4716,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
dss_select_lcd_clk_source(mgr->id,
dssdev->clocks.dispc.channel.lcd_clk_src);
@@ -4765,7 +4750,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
err3:
dsi_cio_uninit(dsidev);
err2:
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
@@ -4792,7 +4776,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_vc_enable(dsidev, 2, 0);
dsi_vc_enable(dsidev, 3, 0);
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
@@ -4981,6 +4964,10 @@ static int __init dsi_init_display(struct omap_dss_device *dssdev)
vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
+ /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(vdds_dsi))
+ vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
+
if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
@@ -5121,7 +5108,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -5151,6 +5138,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
static void __init dsi_probe_pdata(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
int r;
@@ -5173,9 +5161,18 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
return;
}
+ r = omapdss_output_set_device(&dsi->output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&dsi->output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 602102cebcbf..054c2a22b3f1 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
@@ -76,6 +77,7 @@ static struct {
struct clk *dpll4_m4_ck;
struct clk *dss_clk;
+ unsigned long dss_clk_rate;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -96,6 +98,8 @@ static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
+ [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC",
+ [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI",
};
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -151,6 +155,21 @@ static void dss_restore_context(void)
#undef SR
#undef RR
+int dss_get_ctx_loss_count(void)
+{
+ struct omap_dss_board_info *board_data = dss.pdev->dev.platform_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(&dss.pdev->dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
void dss_sdi_init(int datapairs)
{
u32 l;
@@ -301,7 +320,7 @@ static void dss_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
+static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
int b;
@@ -372,8 +391,10 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
struct platform_device *dsidev;
int b, ix, pos;
- if (!dss_has_feature(FEAT_LCD_CLK_SRC))
+ if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ dss_select_dispc_clk_source(clk_src);
return;
+ }
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
@@ -429,6 +450,29 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
}
}
+/* calculate clock rates using dividers in cinfo */
+int dss_calc_clock_rates(struct dss_clock_info *cinfo)
+{
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+
+ if (cinfo->fck_div > dss.feat->fck_div_max ||
+ cinfo->fck_div == 0)
+ return -EINVAL;
+
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+
+ cinfo->fck = prate / cinfo->fck_div *
+ dss.feat->dss_fck_multiplier;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
+ cinfo->fck = clk_get_rate(dss.dss_clk);
+ }
+
+ return 0;
+}
+
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
if (dss.dpll4_m4_ck) {
@@ -446,6 +490,10 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
return -EINVAL;
}
+ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+
+ WARN_ONCE(dss.dss_clk_rate != cinfo->fck, "clk rate mismatch");
+
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
return 0;
@@ -459,6 +507,41 @@ unsigned long dss_get_dpll4_rate(void)
return 0;
}
+unsigned long dss_get_dispc_clk_rate(void)
+{
+ return dss.dss_clk_rate;
+}
+
+static int dss_setup_default_clock(void)
+{
+ unsigned long max_dss_fck, prate;
+ unsigned fck_div;
+ struct dss_clock_info dss_cinfo = { 0 };
+ int r;
+
+ if (dss.dpll4_m4_ck == NULL)
+ return 0;
+
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ prate = dss_get_dpll4_rate();
+
+ fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ max_dss_fck);
+
+ dss_cinfo.fck_div = fck_div;
+
+ r = dss_calc_clock_rates(&dss_cinfo);
+ if (r)
+ return r;
+
+ r = dss_set_clock_div(&dss_cinfo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
@@ -748,7 +831,7 @@ static void dss_runtime_put(void)
}
/* DEBUGFS */
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
void dss_debug_dump_clocks(struct seq_file *s)
{
dss_dump_clocks(s);
@@ -796,7 +879,6 @@ static const struct dss_features omap54xx_dss_feats __initconst = {
static int __init dss_init_features(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const struct dss_features *src;
struct dss_features *dst;
@@ -806,7 +888,7 @@ static int __init dss_init_features(struct platform_device *pdev)
return -ENOMEM;
}
- switch (pdata->version) {
+ switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
src = &omap24xx_dss_feats;
break;
@@ -871,15 +953,23 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
if (r)
return r;
+ r = dss_setup_default_clock();
+ if (r)
+ goto err_setup_clocks;
+
pm_runtime_enable(&pdev->dev);
r = dss_runtime_get();
if (r)
goto err_runtime_get;
+ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+ dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
@@ -903,6 +993,7 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_setup_clocks:
dss_put_clocks();
return r;
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 6728892f9dad..610c8e563daa 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -23,44 +23,20 @@
#ifndef __OMAP2_DSS_H
#define __OMAP2_DSS_H
-#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT
-#define DEBUG
-#endif
+#include <linux/interrupt.h>
-#ifdef DEBUG
-extern bool dss_debug;
-#ifdef DSS_SUBSYS_NAME
-#define DSSDBG(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \
- ## __VA_ARGS__)
-#else
-#define DSSDBG(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__)
+#ifdef pr_fmt
+#undef pr_fmt
#endif
#ifdef DSS_SUBSYS_NAME
-#define DSSDBGF(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \
- ": %s(" format ")\n", \
- __func__, \
- ## __VA_ARGS__)
+#define pr_fmt(fmt) DSS_SUBSYS_NAME ": " fmt
#else
-#define DSSDBGF(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss: " \
- ": %s(" format ")\n", \
- __func__, \
- ## __VA_ARGS__)
-#endif
-
-#else /* DEBUG */
-#define DSSDBG(format, ...)
-#define DSSDBGF(format, ...)
+#define pr_fmt(fmt) fmt
#endif
+#define DSSDBG(format, ...) \
+ pr_debug(format, ## __VA_ARGS__)
#ifdef DSS_SUBSYS_NAME
#define DSSERR(format, ...) \
@@ -186,11 +162,10 @@ struct seq_file;
struct platform_device;
/* core */
-const char *dss_get_default_display_name(void);
+struct platform_device *dss_get_core_pdev(void);
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
-int dss_get_ctx_loss_count(struct device *dev);
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
@@ -204,55 +179,18 @@ void dss_put_device(struct omap_dss_device *dssdev);
void dss_copy_device_pdata(struct omap_dss_device *dst,
const struct omap_dss_device *src);
-/* apply */
-void dss_apply_init(void);
-int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr);
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
-void dss_mgr_start_update(struct omap_overlay_manager *mgr);
-int omap_dss_mgr_apply(struct omap_overlay_manager *mgr);
-
-int dss_mgr_enable(struct omap_overlay_manager *mgr);
-void dss_mgr_disable(struct omap_overlay_manager *mgr);
-int dss_mgr_set_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
-void dss_mgr_get_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
-int dss_mgr_set_device(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev);
-int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
-int dss_mgr_set_output(struct omap_overlay_manager *mgr,
- struct omap_dss_output *output);
-int dss_mgr_unset_output(struct omap_overlay_manager *mgr);
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings);
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config);
-const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
-
-bool dss_ovl_is_enabled(struct omap_overlay *ovl);
-int dss_ovl_enable(struct omap_overlay *ovl);
-int dss_ovl_disable(struct omap_overlay *ovl);
-int dss_ovl_set_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
-void dss_ovl_get_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
-int dss_ovl_set_manager(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr);
-int dss_ovl_unset_manager(struct omap_overlay *ovl);
-
/* output */
void dss_register_output(struct omap_dss_output *out);
void dss_unregister_output(struct omap_dss_output *out);
-struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev);
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
void dss_disable_all_devices(void);
-int dss_init_device(struct platform_device *pdev,
+int display_init_sysfs(struct platform_device *pdev,
struct omap_dss_device *dssdev);
-void dss_uninit_device(struct platform_device *pdev,
+void display_uninit_sysfs(struct platform_device *pdev,
struct omap_dss_device *dssdev);
/* manager */
@@ -299,21 +237,23 @@ void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
+unsigned long dss_get_dispc_clk_rate(void);
int dss_dpi_select_source(enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
void dss_debug_dump_clocks(struct seq_file *s);
#endif
+int dss_get_ctx_loss_count(void);
+
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
-void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src);
void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src);
void dss_select_lcd_clk_source(enum omap_channel channel,
@@ -326,6 +266,7 @@ void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
unsigned long dss_get_dpll4_rate(void);
+int dss_calc_clock_rates(struct dss_clock_info *cinfo);
int dss_set_clock_div(struct dss_clock_info *cinfo);
int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo);
@@ -413,8 +354,6 @@ static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
}
static inline struct platform_device *dsi_get_dsidev_from_id(int module)
{
- WARN("%s: DSI not compiled in, returning platform device as NULL\n",
- __func__);
return NULL;
}
#endif
@@ -427,15 +366,10 @@ void dpi_uninit_platform_driver(void) __exit;
int dispc_init_platform_driver(void) __init;
void dispc_uninit_platform_driver(void) __exit;
void dispc_dump_clocks(struct seq_file *s);
-void dispc_irq_handler(void);
-
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
-void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
void dispc_enable_fifomerge(bool enable);
@@ -455,36 +389,14 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
bool manual_update);
-int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
-int dispc_ovl_enable(enum omap_plane plane, bool enable);
-void dispc_ovl_set_channel_out(enum omap_plane plane,
- enum omap_channel channel);
-
-void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
-u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
-bool dispc_mgr_go_busy(enum omap_channel channel);
-void dispc_mgr_go(enum omap_channel channel);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
-void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
-void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
-void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
-void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
-void dispc_mgr_set_lcd_type_tft(enum omap_channel channel);
-void dispc_mgr_set_timings(enum omap_channel channel,
- struct omap_video_timings *timings);
+
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
unsigned long dispc_core_clk_rate(void);
void dispc_mgr_set_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo);
+ const struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
-void dispc_mgr_setup(enum omap_channel channel,
- struct omap_overlay_manager_info *info);
u32 dispc_wb_get_framedone_irq(void);
bool dispc_wb_go_busy(void);
@@ -536,6 +448,8 @@ static inline unsigned long hdmi_get_pixel_clock(void)
#endif
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
+int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings);
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 3e8287c8709d..d7d66ef5cb58 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -429,8 +430,6 @@ static const struct dss_param_range omap2_dss_param_range[] = {
* scaler cannot scale a image with width more than 768.
*/
[FEAT_PARAM_LINEWIDTH] = { 1, 768 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -445,8 +444,6 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -461,8 +458,6 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap5_dss_param_range[] = {
@@ -477,8 +472,6 @@ static const struct dss_param_range omap5_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -545,6 +538,7 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {
FEAT_ALPHA_FIXED_ZORDER,
FEAT_FIFO_MERGE,
FEAT_OMAP3_DSI_FIFO_BUG,
+ FEAT_DPI_USES_VDDS_DSI,
};
static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
@@ -820,6 +814,7 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.audio_start = ti_hdmi_4xxx_audio_start,
.audio_stop = ti_hdmi_4xxx_audio_stop,
.audio_config = ti_hdmi_4xxx_audio_config,
+ .audio_get_dma_port = ti_hdmi_4xxx_audio_get_dma_port,
#endif
};
@@ -846,11 +841,13 @@ int dss_feat_get_num_mgrs(void)
{
return omap_current_dss_features->num_mgrs;
}
+EXPORT_SYMBOL(dss_feat_get_num_mgrs);
int dss_feat_get_num_ovls(void)
{
return omap_current_dss_features->num_ovls;
}
+EXPORT_SYMBOL(dss_feat_get_num_ovls);
int dss_feat_get_num_wbs(void)
{
@@ -871,16 +868,19 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel
{
return omap_current_dss_features->supported_displays[channel];
}
+EXPORT_SYMBOL(dss_feat_get_supported_displays);
enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel)
{
return omap_current_dss_features->supported_outputs[channel];
}
+EXPORT_SYMBOL(dss_feat_get_supported_outputs);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
{
return omap_current_dss_features->supported_color_modes[plane];
}
+EXPORT_SYMBOL(dss_feat_get_supported_color_modes);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index fc492ef72a51..489b9bec4a6d 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -98,19 +98,12 @@ enum dss_range_param {
FEAT_PARAM_DSI_FCK,
FEAT_PARAM_DOWNSCALE,
FEAT_PARAM_LINEWIDTH,
- FEAT_PARAM_MGR_WIDTH,
- FEAT_PARAM_MGR_HEIGHT,
};
/* DSS Feature Functions */
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
int dss_feat_get_num_wbs(void);
unsigned long dss_feat_get_param_min(enum dss_range_param param);
unsigned long dss_feat_get_param_max(enum dss_range_param param);
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 0d6d7213a858..72923645dcce 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -60,6 +60,7 @@
static struct {
struct mutex lock;
struct platform_device *pdev;
+
struct hdmi_ip_data ip_data;
struct clk *sys_clk;
@@ -295,6 +296,12 @@ static const struct hdmi_config vesa_timings[] = {
false, },
{ 0x55, HDMI_DVI },
},
+ {
+ { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x44, HDMI_DVI },
+ },
};
static int hdmi_runtime_get(void)
@@ -323,7 +330,6 @@ static void hdmi_runtime_put(void)
static int __init hdmi_init_display(struct omap_dss_device *dssdev)
{
- struct omap_dss_board_info *pdata = hdmi.pdev->dev.platform_data;
int r;
struct gpio gpios[] = {
@@ -334,13 +340,17 @@ static int __init hdmi_init_display(struct omap_dss_device *dssdev)
DSSDBG("init_display\n");
- dss_init_hdmi_ip_ops(&hdmi.ip_data, pdata->version);
+ dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
if (hdmi.vdda_hdmi_dac_reg == NULL) {
struct regulator *reg;
reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+ /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(reg))
+ reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
if (IS_ERR(reg)) {
DSSERR("can't get VDDA_HDMI_DAC regulator\n");
return PTR_ERR(reg);
@@ -356,7 +366,7 @@ static int __init hdmi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-static void __exit hdmi_uninit_display(struct omap_dss_device *dssdev)
+static void hdmi_uninit_display(struct omap_dss_device *dssdev)
{
DSSDBG("uninit_display\n");
@@ -399,7 +409,8 @@ static bool hdmi_timings_compare(struct omap_video_timings *timing1,
{
int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
- if ((timing2->pixel_clock == timing1->pixel_clock) &&
+ if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
+ DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
(timing2->x_res == timing1->x_res) &&
(timing2->y_res == timing1->y_res)) {
@@ -501,12 +512,9 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static int hdmi_power_on(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
- unsigned long phy;
gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
gpio_set_value(hdmi.ls_oe_gpio, 1);
@@ -522,6 +530,38 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
if (r)
goto err_runtime_get;
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ return 0;
+
+err_runtime_get:
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+err_vdac_enable:
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
+ return r;
+}
+
+static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+{
+ hdmi_runtime_put();
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
+}
+
+static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct omap_video_timings *p;
+ struct omap_overlay_manager *mgr = dssdev->output->manager;
+ unsigned long phy;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r)
+ return r;
+
dss_mgr_disable(mgr);
p = &hdmi.ip_data.cfg.timings;
@@ -549,17 +589,6 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
- /* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
-
- /* Select the dispc clock source as PRCM clock, to ensure that it is not
- * DSI PLL source as the clock selected by DSI PLL might not be
- * sufficient for the resolution selected / that can be changed
- * dynamically by user. This can be moved to single location , say
- * Boardfile.
- */
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
-
/* bypass TV gamma table */
dispc_enable_gamma_table(0);
@@ -583,16 +612,11 @@ err_vid_enable:
err_phy_enable:
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err_pll_enable:
- hdmi_runtime_put();
-err_runtime_get:
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-err_vdac_enable:
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ hdmi_power_off_core(dssdev);
return -EIO;
}
-static void hdmi_power_off(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = dssdev->output->manager;
@@ -601,12 +625,8 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
- hdmi_runtime_put();
-
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ hdmi_power_off_core(dssdev);
}
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
@@ -716,7 +736,7 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- r = hdmi_power_on(dssdev);
+ r = hdmi_power_on_full(dssdev);
if (r) {
DSSERR("failed to power on device\n");
goto err1;
@@ -738,13 +758,48 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- hdmi_power_off(dssdev);
+ hdmi_power_off_full(dssdev);
omap_dss_stop_device(dssdev);
mutex_unlock(&hdmi.lock);
}
+int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi.ip_data.hpd_gpio = hdmi.hpd_gpio;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_core(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
static int hdmi_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
@@ -913,7 +968,7 @@ int hdmi_audio_config(struct omap_dss_audio *audio)
static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -971,9 +1026,19 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
return;
}
+ r = omapdss_output_set_device(&hdmi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&hdmi.output);
+ hdmi_uninit_display(dssdev);
dss_put_device(dssdev);
return;
}
@@ -1000,30 +1065,28 @@ static void __exit hdmi_uninit_output(struct platform_device *pdev)
/* HDMI HW IP initialisation */
static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
{
- struct resource *hdmi_mem;
+ struct resource *res;
int r;
hdmi.pdev = pdev;
mutex_init(&hdmi.lock);
+ mutex_init(&hdmi.ip_data.lock);
- hdmi_mem = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
- if (!hdmi_mem) {
+ res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
+ if (!res) {
DSSERR("can't get IORESOURCE_MEM HDMI\n");
return -EINVAL;
}
/* Base address taken from platform */
- hdmi.ip_data.base_wp = ioremap(hdmi_mem->start,
- resource_size(hdmi_mem));
- if (!hdmi.ip_data.base_wp) {
- DSSERR("can't ioremap WP\n");
- return -ENOMEM;
- }
+ hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hdmi.ip_data.base_wp))
+ return PTR_ERR(hdmi.ip_data.base_wp);
r = hdmi_get_clocks(pdev);
if (r) {
- iounmap(hdmi.ip_data.base_wp);
+ DSSERR("can't get clocks\n");
return r;
}
@@ -1034,9 +1097,11 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
- mutex_init(&hdmi.ip_data.lock);
-
- hdmi_panel_init();
+ r = hdmi_panel_init();
+ if (r) {
+ DSSERR("can't init panel\n");
+ goto err_panel_init;
+ }
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
@@ -1045,6 +1110,10 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi_probe_pdata(pdev);
return 0;
+
+err_panel_init:
+ hdmi_put_clocks();
+ return r;
}
static int __exit hdmi_remove_child(struct device *dev, void *data)
@@ -1068,8 +1137,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
hdmi_put_clocks();
- iounmap(hdmi.ip_data.base_wp);
-
return 0;
}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 69fb115bab32..dfb8eda81b61 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -280,58 +280,6 @@ static void hdmi_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- /*
- * TODO: notify audio users that the display was suspended. For now,
- * disable audio locally to not break our audio state machine.
- */
- hdmi_panel_audio_disable(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- omapdss_hdmi_display_disable(dssdev);
-
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static int hdmi_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- r = omapdss_hdmi_display_enable(dssdev);
- if (r) {
- DSSERR("failed to power on\n");
- goto err;
- }
- /* TODO: notify audio users that the panel resumed. */
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
static void hdmi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -379,20 +327,22 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
{
int r;
+ bool need_enable;
mutex_lock(&hdmi.lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = omapdss_hdmi_display_enable(dssdev);
+ need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
+
+ if (need_enable) {
+ r = omapdss_hdmi_core_enable(dssdev);
if (r)
goto err;
}
r = omapdss_hdmi_read_edid(buf, len);
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- omapdss_hdmi_display_disable(dssdev);
+ if (need_enable)
+ omapdss_hdmi_core_disable(dssdev);
err:
mutex_unlock(&hdmi.lock);
@@ -402,20 +352,22 @@ err:
static bool hdmi_detect(struct omap_dss_device *dssdev)
{
int r;
+ bool need_enable;
mutex_lock(&hdmi.lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = omapdss_hdmi_display_enable(dssdev);
+ need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
+
+ if (need_enable) {
+ r = omapdss_hdmi_core_enable(dssdev);
if (r)
goto err;
}
r = omapdss_hdmi_detect();
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- omapdss_hdmi_display_disable(dssdev);
+ if (need_enable)
+ omapdss_hdmi_core_disable(dssdev);
err:
mutex_unlock(&hdmi.lock);
@@ -427,8 +379,6 @@ static struct omap_dss_driver hdmi_driver = {
.remove = hdmi_panel_remove,
.enable = hdmi_panel_enable,
.disable = hdmi_panel_disable,
- .suspend = hdmi_panel_suspend,
- .resume = hdmi_panel_resume,
.get_timings = hdmi_get_timings,
.set_timings = hdmi_set_timings,
.check_timings = hdmi_check_timings,
@@ -454,9 +404,7 @@ int hdmi_panel_init(void)
spin_lock_init(&hdmi.audio_lock);
#endif
- omap_dss_register_driver(&hdmi_driver);
-
- return 0;
+ return omap_dss_register_driver(&hdmi_driver);
}
void hdmi_panel_exit(void)
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index c54d2f620ce3..2551eaa14c42 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -36,36 +36,6 @@
static int num_managers;
static struct omap_overlay_manager *managers;
-static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
-{
- return mgr->output ? mgr->output->device : NULL;
-}
-
-static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
- u32 irq;
- int r;
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
- irq = DISPC_IRQ_EVSYNC_ODD;
- else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
- irq = DISPC_IRQ_EVSYNC_EVEN;
- else
- irq = dispc_mgr_get_vsync_irq(mgr->id);
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-
- dispc_runtime_put();
-
- return r;
-}
-
int dss_init_overlay_managers(struct platform_device *pdev)
{
int i, r;
@@ -99,15 +69,6 @@ int dss_init_overlay_managers(struct platform_device *pdev)
break;
}
- mgr->set_output = &dss_mgr_set_output;
- mgr->unset_output = &dss_mgr_unset_output;
- mgr->apply = &omap_dss_mgr_apply;
- mgr->set_manager_info = &dss_mgr_set_info;
- mgr->get_manager_info = &dss_mgr_get_info;
- mgr->wait_for_go = &dss_mgr_wait_for_go;
- mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
- mgr->get_device = &dss_mgr_get_device;
-
mgr->caps = 0;
mgr->supported_displays =
dss_feat_get_supported_displays(mgr->id);
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/omap2/dss/output.c
index 813f26682b7a..79dea1a1a732 100644
--- a/drivers/video/omap2/dss/output.c
+++ b/drivers/video/omap2/dss/output.c
@@ -114,35 +114,67 @@ struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
return NULL;
}
-struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev)
+static const struct dss_mgr_ops *dss_mgr_ops;
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
{
- struct omap_dss_output *out = NULL;
- enum omap_dss_output_id id;
-
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_DPI);
- break;
- case OMAP_DISPLAY_TYPE_DBI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_DBI);
- break;
- case OMAP_DISPLAY_TYPE_SDI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_SDI);
- break;
- case OMAP_DISPLAY_TYPE_VENC:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_VENC);
- break;
- case OMAP_DISPLAY_TYPE_HDMI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_HDMI);
- break;
- case OMAP_DISPLAY_TYPE_DSI:
- id = dssdev->phy.dsi.module == 0 ? OMAP_DSS_OUTPUT_DSI1 :
- OMAP_DSS_OUTPUT_DSI2;
- out = omap_dss_get_output(id);
- break;
- default:
- break;
- }
+ if (dss_mgr_ops)
+ return -EBUSY;
+
+ dss_mgr_ops = mgr_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL(dss_install_mgr_ops);
+
+void dss_uninstall_mgr_ops(void)
+{
+ dss_mgr_ops = NULL;
+}
+EXPORT_SYMBOL(dss_uninstall_mgr_ops);
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ dss_mgr_ops->set_timings(mgr, timings);
+}
+EXPORT_SYMBOL(dss_mgr_set_timings);
+
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ dss_mgr_ops->set_lcd_config(mgr, config);
+}
+EXPORT_SYMBOL(dss_mgr_set_lcd_config);
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr)
+{
+ return dss_mgr_ops->enable(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_enable);
+
+void dss_mgr_disable(struct omap_overlay_manager *mgr)
+{
+ dss_mgr_ops->disable(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_disable);
- return out;
+void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+{
+ dss_mgr_ops->start_update(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_start_update);
+
+int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ return dss_mgr_ops->register_framedone_handler(mgr, handler, data);
+}
+EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
+
+void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ dss_mgr_ops->unregister_framedone_handler(mgr, handler, data);
}
+EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 45f4994bc6b0..eccde322c28a 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -38,13 +38,6 @@
static int num_overlays;
static struct omap_overlay *overlays;
-static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
-{
- return ovl->manager ?
- (ovl->manager->output ? ovl->manager->output->device : NULL) :
- NULL;
-}
-
int omap_dss_get_num_overlays(void)
{
return num_overlays;
@@ -93,16 +86,6 @@ void dss_init_overlays(struct platform_device *pdev)
break;
}
- ovl->is_enabled = &dss_ovl_is_enabled;
- ovl->enable = &dss_ovl_enable;
- ovl->disable = &dss_ovl_disable;
- ovl->set_manager = &dss_ovl_set_manager;
- ovl->unset_manager = &dss_ovl_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_info;
- ovl->get_overlay_info = &dss_ovl_get_info;
- ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
- ovl->get_device = &dss_ovl_get_device;
-
ovl->caps = dss_feat_get_overlay_caps(ovl->id);
ovl->supported_modes =
dss_feat_get_supported_color_modes(ovl->id);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 7282e5af3e1a..e903dd3f54d9 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -342,7 +342,7 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
return 0;
}
-static void framedone_callback(void *data, u32 mask)
+static void framedone_callback(void *data)
{
void (*callback)(void *data);
@@ -908,8 +908,8 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- r = omap_dispc_register_isr(framedone_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
+ r = dss_mgr_register_framedone_handler(out->manager,
+ framedone_callback, NULL);
if (r) {
DSSERR("can't get FRAMEDONE irq\n");
goto err1;
@@ -933,8 +933,10 @@ EXPORT_SYMBOL(omapdss_rfbi_display_enable);
void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
{
- omap_dispc_unregister_isr(framedone_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
+ struct omap_dss_output *out = dssdev->output;
+
+ dss_mgr_unregister_framedone_handler(out->manager,
+ framedone_callback, NULL);
omap_dss_stop_device(dssdev);
rfbi_runtime_put();
@@ -950,7 +952,7 @@ static int __init rfbi_init_display(struct omap_dss_device *dssdev)
static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -999,9 +1001,18 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
return;
}
+ r = omapdss_output_set_device(&rfbi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&rfbi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 7760851f6e5d..62b5374ce438 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -205,7 +205,7 @@ static int __init sdi_init_display(struct omap_dss_device *dssdev)
static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -254,9 +254,18 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
return;
}
+ r = omapdss_output_set_device(&sdi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&sdi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index b046c208cb97..216aa704f9d7 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -102,6 +102,8 @@ struct ti_hdmi_ip_ops {
int (*audio_config)(struct hdmi_ip_data *ip_data,
struct omap_dss_audio *audio);
+
+ int (*audio_get_dma_port)(u32 *offset, u32 *size);
#endif
};
@@ -183,5 +185,6 @@ int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
struct omap_dss_audio *audio);
+int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size);
#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index c23b85a20cdc..e18b222ed739 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -899,7 +899,7 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(hdmi_av_base(ip_data), r))
#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
- (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
+ (i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
@@ -1418,4 +1418,13 @@ void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
REG_FLD_MOD(hdmi_wp_base(ip_data),
HDMI_WP_AUDIO_CTRL, false, 30, 30);
}
+
+int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
+{
+ if (!offset || !size)
+ return -EINVAL;
+ *offset = HDMI_WP_AUDIO_DATA;
+ *size = 4;
+ return 0;
+}
#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 56efa3bb465d..006caf3cb509 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -744,7 +744,7 @@ static void venc_put_clocks(void)
static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -795,9 +795,18 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
return;
}
+ r = omapdss_output_set_device(&venc.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&venc.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/venc_panel.c b/drivers/video/omap2/dss/venc_panel.c
index d55b8784ecfd..0d2b1a0834a0 100644
--- a/drivers/video/omap2/dss/venc_panel.c
+++ b/drivers/video/omap2/dss/venc_panel.c
@@ -157,12 +157,6 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
goto end;
- if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
- /* suspended is the same as disabled with venc */
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- goto end;
- }
-
omapdss_venc_display_disable(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -170,17 +164,6 @@ end:
mutex_unlock(&venc_panel.lock);
}
-static int venc_panel_suspend(struct omap_dss_device *dssdev)
-{
- venc_panel_disable(dssdev);
- return 0;
-}
-
-static int venc_panel_resume(struct omap_dss_device *dssdev)
-{
- return venc_panel_enable(dssdev);
-}
-
static void venc_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -222,8 +205,6 @@ static struct omap_dss_driver venc_driver = {
.enable = venc_panel_enable,
.disable = venc_panel_disable,
- .suspend = venc_panel_suspend,
- .resume = venc_panel_resume,
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 4ea17dc3258c..4cb12ce68855 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -2,7 +2,6 @@ menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support"
depends on FB && OMAP2_DSS && !DRM_OMAP
- select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 532a31b3d96b..d30b45d72649 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -28,10 +28,10 @@
#include <linux/omapfb.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
#include <video/omapvrfb.h>
-#include <plat/vram.h>
#include "omapfb.h"
@@ -211,6 +211,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg;
int r = 0, i;
size_t size;
@@ -220,6 +221,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
size = PAGE_ALIGN(mi->size);
+ if (display && display->driver->sync)
+ display->driver->sync(display);
+
rg = ofbi->region;
down_write_nested(&rg->lock, rg->id);
@@ -279,7 +283,7 @@ static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
return 0;
}
-static int omapfb_update_window_nolock(struct fb_info *fbi,
+static int omapfb_update_window(struct fb_info *fbi,
u32 x, u32 y, u32 w, u32 h)
{
struct omap_dss_device *display = fb2display(fbi);
@@ -299,27 +303,6 @@ static int omapfb_update_window_nolock(struct fb_info *fbi,
return display->driver->update(display, x, y, w, h);
}
-/* This function is exported for SGX driver use */
-int omapfb_update_window(struct fb_info *fbi,
- u32 x, u32 y, u32 w, u32 h)
-{
- struct omapfb_info *ofbi = FB2OFB(fbi);
- struct omapfb2_device *fbdev = ofbi->fbdev;
- int r;
-
- if (!lock_fb_info(fbi))
- return -ENODEV;
- omapfb_lock(fbdev);
-
- r = omapfb_update_window_nolock(fbi, x, y, w, h);
-
- omapfb_unlock(fbdev);
- unlock_fb_info(fbi);
-
- return r;
-}
-EXPORT_SYMBOL(omapfb_update_window);
-
int omapfb_set_update_mode(struct fb_info *fbi,
enum omapfb_update_mode mode)
{
@@ -646,7 +629,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y,
+ r = omapfb_update_window(fbi, p.uwnd_o.x, p.uwnd_o.y,
p.uwnd_o.width, p.uwnd_o.height);
break;
@@ -663,7 +646,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y,
+ r = omapfb_update_window(fbi, p.uwnd.x, p.uwnd.y,
p.uwnd.width, p.uwnd.height);
break;
@@ -853,14 +836,15 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
case OMAPFB_GET_VRAM_INFO: {
- unsigned long vram, free, largest;
-
DBG("ioctl GET_VRAM_INFO\n");
- omap_vram_get_info(&vram, &free, &largest);
- p.vram_info.total = vram;
- p.vram_info.free = free;
- p.vram_info.largest_free_block = largest;
+ /*
+ * We don't have the ability to get this vram info anymore.
+ * Fill in something that should keep the applications working.
+ */
+ p.vram_info.total = SZ_1M * 64;
+ p.vram_info.free = SZ_1M * 64;
+ p.vram_info.largest_free_block = SZ_1M * 64;
if (copy_to_user((void __user *)arg, &p.vram_info,
sizeof(p.vram_info)))
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index bc225e46fdd2..ca585ef37f25 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -31,7 +31,6 @@
#include <linux/omapfb.h>
#include <video/omapdss.h>
-#include <plat/vram.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
@@ -1258,11 +1257,10 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
switch (blank) {
case FB_BLANK_UNBLANK:
- if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
- if (display->driver->resume)
- r = display->driver->resume(display);
+ r = display->driver->enable(display);
if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
d->update_mode == OMAPFB_AUTO_UPDATE &&
@@ -1283,8 +1281,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (d->auto_update_work_enabled)
omapfb_stop_auto_update(fbdev, display);
- if (display->driver->suspend)
- r = display->driver->suspend(display);
+ display->driver->disable(display);
break;
@@ -1335,24 +1332,25 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
rg = ofbi->region;
- WARN_ON(atomic_read(&rg->map_count));
-
- if (rg->paddr)
- if (omap_vram_free(rg->paddr, rg->size))
- dev_err(fbdev->dev, "VRAM FREE failed\n");
+ if (rg->token == NULL)
+ return;
- if (rg->vaddr)
- iounmap(rg->vaddr);
+ WARN_ON(atomic_read(&rg->map_count));
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
/* unmap the 0 angle rotation */
if (rg->vrfb.vaddr[0]) {
iounmap(rg->vrfb.vaddr[0]);
- omap_vrfb_release_ctx(&rg->vrfb);
rg->vrfb.vaddr[0] = NULL;
}
+
+ omap_vrfb_release_ctx(&rg->vrfb);
}
+ dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
+ &rg->attrs);
+
+ rg->token = NULL;
rg->vaddr = NULL;
rg->paddr = 0;
rg->alloc = 0;
@@ -1387,7 +1385,9 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
- void __iomem *vaddr;
+ void *token;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_addr_t dma_handle;
int r;
rg = ofbi->region;
@@ -1402,42 +1402,40 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
size = PAGE_ALIGN(size);
- if (!paddr) {
- DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
- r = omap_vram_alloc(size, &paddr);
- } else {
- DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
- ofbi->id);
- r = omap_vram_reserve(paddr, size);
- }
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- if (r) {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+ DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
+
+ token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
+ GFP_KERNEL, &attrs);
+
+ if (token == NULL) {
dev_err(fbdev->dev, "failed to allocate framebuffer\n");
return -ENOMEM;
}
- if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
- vaddr = ioremap_wc(paddr, size);
-
- if (!vaddr) {
- dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
- omap_vram_free(paddr, size);
- return -ENOMEM;
- }
+ DBG("allocated VRAM paddr %lx, vaddr %p\n",
+ (unsigned long)dma_handle, token);
- DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
- } else {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
+ dma_free_attrs(fbdev->dev, size, token, dma_handle,
+ &attrs);
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
}
-
- vaddr = NULL;
}
- rg->paddr = paddr;
- rg->vaddr = vaddr;
+ rg->attrs = attrs;
+ rg->token = token;
+ rg->dma_handle = dma_handle;
+
+ rg->paddr = (unsigned long)dma_handle;
+ rg->vaddr = (void __iomem *)token;
rg->size = size;
rg->alloc = 1;
@@ -1531,6 +1529,9 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
}
+ WARN_ONCE(paddr,
+ "reserving memory at predefined address not supported\n");
+
paddrs[fbnum] = paddr;
sizes[fbnum] = size;
@@ -1610,7 +1611,6 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
- struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg = ofbi->region;
unsigned long old_size = rg->size;
unsigned long old_paddr = rg->paddr;
@@ -1625,9 +1625,6 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
if (old_size == size && old_type == type)
return 0;
- if (display && display->driver->sync)
- display->driver->sync(display);
-
omapfb_free_fbmem(fbi);
if (size == 0) {
@@ -1882,7 +1879,6 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
dev_set_drvdata(fbdev->dev, NULL);
- kfree(fbdev);
}
static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
@@ -2258,26 +2254,28 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
{
struct fb_monspecs *specs;
u8 *edid;
- int r, i, best_xres, best_idx, len;
+ int r, i, best_idx, len;
if (!display->driver->read_edid)
return -ENODEV;
len = 0x80 * 2;
edid = kmalloc(len, GFP_KERNEL);
+ if (edid == NULL)
+ return -ENOMEM;
r = display->driver->read_edid(display, edid, len);
if (r < 0)
goto err1;
specs = kzalloc(sizeof(*specs), GFP_KERNEL);
+ if (specs == NULL) {
+ r = -ENOMEM;
+ goto err1;
+ }
fb_edid_to_monspecs(edid, specs);
- if (edid[126] > 0)
- fb_edid_add_monspecs(edid + 0x80, specs);
-
- best_xres = 0;
best_idx = -1;
for (i = 0; i < specs->modedb_len; ++i) {
@@ -2293,16 +2291,20 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
if (m->xres == 2880 || m->xres == 1440)
continue;
+ if (m->vmode & FB_VMODE_INTERLACED ||
+ m->vmode & FB_VMODE_DOUBLE)
+ continue;
+
fb_videomode_to_omap_timings(m, display, &t);
r = display->driver->check_timings(display, &t);
- if (r == 0 && best_xres < m->xres) {
- best_xres = m->xres;
+ if (r == 0) {
best_idx = i;
+ break;
}
}
- if (best_xres == 0) {
+ if (best_idx == -1) {
r = -ENOENT;
goto err2;
}
@@ -2371,15 +2373,62 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return 0;
}
+static int omapfb_init_connections(struct omapfb2_device *fbdev,
+ struct omap_dss_device *def_dssdev)
+{
+ int i, r;
+ struct omap_overlay_manager *mgr;
+
+ if (!def_dssdev->output) {
+ dev_err(fbdev->dev, "no output for the default display\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
+ struct omap_dss_output *out = dssdev->output;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->channel);
+
+ if (!mgr || !out)
+ continue;
+
+ if (mgr->output)
+ mgr->unset_output(mgr);
+
+ mgr->set_output(mgr, out);
+ }
+
+ mgr = def_dssdev->output->manager;
+
+ if (!mgr) {
+ dev_err(fbdev->dev, "no ovl manager for the default display\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fbdev->num_overlays; i++) {
+ struct omap_overlay *ovl = fbdev->overlays[i];
+
+ if (ovl->manager)
+ ovl->unset_manager(ovl);
+
+ r = ovl->set_manager(ovl, mgr);
+ if (r)
+ dev_warn(fbdev->dev,
+ "failed to connect overlay %s to manager %s\n",
+ ovl->name, mgr->name);
+ }
+
+ return 0;
+}
+
static int __init omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
int r = 0;
int i;
- struct omap_overlay *ovl;
struct omap_dss_device *def_display;
struct omap_dss_device *dssdev;
- struct omap_dss_device *ovl_device;
DBG("omapfb_probe\n");
@@ -2389,7 +2438,8 @@ static int __init omapfb_probe(struct platform_device *pdev)
goto err0;
}
- fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL);
+ fbdev = devm_kzalloc(&pdev->dev, sizeof(struct omapfb2_device),
+ GFP_KERNEL);
if (fbdev == NULL) {
r = -ENOMEM;
goto err0;
@@ -2401,13 +2451,15 @@ static int __init omapfb_probe(struct platform_device *pdev)
"ignoring the module parameter vrfb=y\n");
}
+ r = omapdss_compat_init();
+ if (r)
+ goto err0;
mutex_init(&fbdev->mtx);
fbdev->dev = &pdev->dev;
platform_set_drvdata(pdev, fbdev);
- r = 0;
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
@@ -2430,9 +2482,6 @@ static int __init omapfb_probe(struct platform_device *pdev)
d->update_mode = OMAPFB_AUTO_UPDATE;
}
- if (r)
- goto cleanup;
-
if (fbdev->num_displays == 0) {
dev_err(&pdev->dev, "no displays\n");
r = -EINVAL;
@@ -2447,15 +2496,33 @@ static int __init omapfb_probe(struct platform_device *pdev)
for (i = 0; i < fbdev->num_managers; i++)
fbdev->managers[i] = omap_dss_get_overlay_manager(i);
- /* gfx overlay should be the default one. find a display
- * connected to that, and use it as default display */
- ovl = omap_dss_get_overlay(0);
- ovl_device = ovl->get_device(ovl);
- if (ovl_device) {
- def_display = ovl_device;
- } else {
- dev_warn(&pdev->dev, "cannot find default display\n");
- def_display = NULL;
+ def_display = NULL;
+
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev;
+ const char *def_name;
+
+ def_name = omapdss_get_default_display_name();
+
+ dssdev = fbdev->displays[i].dssdev;
+
+ if (def_name == NULL ||
+ (dssdev->name && strcmp(def_name, dssdev->name) == 0)) {
+ def_display = dssdev;
+ break;
+ }
+ }
+
+ if (def_display == NULL) {
+ dev_err(fbdev->dev, "failed to find default display\n");
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ r = omapfb_init_connections(fbdev, def_display);
+ if (r) {
+ dev_err(fbdev->dev, "failed to init overlay connections\n");
+ goto cleanup;
}
if (def_mode && strlen(def_mode) > 0) {
@@ -2506,6 +2573,7 @@ static int __init omapfb_probe(struct platform_device *pdev)
cleanup:
omapfb_free_resources(fbdev);
+ omapdss_compat_uninit();
err0:
dev_err(&pdev->dev, "failed to setup omapfb\n");
return r;
@@ -2521,6 +2589,8 @@ static int __exit omapfb_remove(struct platform_device *pdev)
omapfb_free_resources(fbdev);
+ omapdss_compat_uninit();
+
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 17aa174e187c..18fa9e1d0033 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -441,6 +441,7 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg;
unsigned long size;
int r;
@@ -455,6 +456,9 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
if (!lock_fb_info(fbi))
return -ENODEV;
+ if (display && display->driver->sync)
+ display->driver->sync(display);
+
rg = ofbi->region;
down_write_nested(&rg->lock, rg->id);
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index 5ced9b334d35..623cd872a367 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -28,6 +28,8 @@
#endif
#include <linux/rwsem.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
#include <video/omapdss.h>
@@ -49,6 +51,9 @@ extern bool omapfb_debug;
struct omapfb2_mem_region {
int id;
+ struct dma_attrs attrs;
+ void *token;
+ dma_addr_t dma_handle;
u32 paddr;
void __iomem *vaddr;
struct vrfb vrfb;
@@ -124,9 +129,6 @@ void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
-int omapfb_update_window(struct fb_info *fbi,
- u32 x, u32 y, u32 w, u32 h);
-
int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
struct fb_var_screeninfo *var);
@@ -144,16 +146,16 @@ int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
- int i;
+ struct omap_overlay *ovl;
/* XXX: returns the display connected to first attached overlay */
- for (i = 0; i < ofbi->num_overlays; i++) {
- struct omap_overlay *ovl = ofbi->overlays[i];
- return ovl->get_device(ovl);
- }
+ if (ofbi->num_overlays == 0)
+ return NULL;
- return NULL;
+ ovl = ofbi->overlays[0];
+
+ return ovl->get_device(ovl);
}
static inline struct omapfb_display_data *get_display_data(
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
deleted file mode 100644
index f2b15c4a75bc..000000000000
--- a/drivers/video/omap2/vram.c
+++ /dev/null
@@ -1,514 +0,0 @@
-/*
- * VRAM manager for OMAP
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*#define DEBUG*/
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/memblock.h>
-#include <linux/completion.h>
-#include <linux/debugfs.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-
-#include <asm/setup.h>
-
-#include <plat/vram.h>
-
-#ifdef DEBUG
-#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
-#else
-#define DBG(format, ...)
-#endif
-
-/* postponed regions are used to temporarily store region information at boot
- * time when we cannot yet allocate the region list */
-#define MAX_POSTPONED_REGIONS 10
-
-static bool vram_initialized;
-static int postponed_cnt;
-static struct {
- unsigned long paddr;
- size_t size;
-} postponed_regions[MAX_POSTPONED_REGIONS];
-
-struct vram_alloc {
- struct list_head list;
- unsigned long paddr;
- unsigned pages;
-};
-
-struct vram_region {
- struct list_head list;
- struct list_head alloc_list;
- unsigned long paddr;
- unsigned pages;
-};
-
-static DEFINE_MUTEX(region_mutex);
-static LIST_HEAD(region_list);
-
-static struct vram_region *omap_vram_create_region(unsigned long paddr,
- unsigned pages)
-{
- struct vram_region *rm;
-
- rm = kzalloc(sizeof(*rm), GFP_KERNEL);
-
- if (rm) {
- INIT_LIST_HEAD(&rm->alloc_list);
- rm->paddr = paddr;
- rm->pages = pages;
- }
-
- return rm;
-}
-
-#if 0
-static void omap_vram_free_region(struct vram_region *vr)
-{
- list_del(&vr->list);
- kfree(vr);
-}
-#endif
-
-static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
- unsigned long paddr, unsigned pages)
-{
- struct vram_alloc *va;
- struct vram_alloc *new;
-
- new = kzalloc(sizeof(*va), GFP_KERNEL);
-
- if (!new)
- return NULL;
-
- new->paddr = paddr;
- new->pages = pages;
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- if (va->paddr > new->paddr)
- break;
- }
-
- list_add_tail(&new->list, &va->list);
-
- return new;
-}
-
-static void omap_vram_free_allocation(struct vram_alloc *va)
-{
- list_del(&va->list);
- kfree(va);
-}
-
-int omap_vram_add_region(unsigned long paddr, size_t size)
-{
- struct vram_region *rm;
- unsigned pages;
-
- if (vram_initialized) {
- DBG("adding region paddr %08lx size %d\n",
- paddr, size);
-
- size &= PAGE_MASK;
- pages = size >> PAGE_SHIFT;
-
- rm = omap_vram_create_region(paddr, pages);
- if (rm == NULL)
- return -ENOMEM;
-
- list_add(&rm->list, &region_list);
- } else {
- if (postponed_cnt == MAX_POSTPONED_REGIONS)
- return -ENOMEM;
-
- postponed_regions[postponed_cnt].paddr = paddr;
- postponed_regions[postponed_cnt].size = size;
-
- ++postponed_cnt;
- }
- return 0;
-}
-
-int omap_vram_free(unsigned long paddr, size_t size)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
- unsigned start, end;
-
- DBG("free mem paddr %08lx size %d\n", paddr, size);
-
- size = PAGE_ALIGN(size);
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(rm, &region_list, list) {
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- start = alloc->paddr;
- end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
-
- if (start >= paddr && end < paddr + size)
- goto found;
- }
- }
-
- mutex_unlock(&region_mutex);
- return -EINVAL;
-
-found:
- omap_vram_free_allocation(alloc);
-
- mutex_unlock(&region_mutex);
- return 0;
-}
-EXPORT_SYMBOL(omap_vram_free);
-
-static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
- size_t size;
-
- size = pages << PAGE_SHIFT;
-
- list_for_each_entry(rm, &region_list, list) {
- unsigned long start, end;
-
- DBG("checking region %lx %d\n", rm->paddr, rm->pages);
-
- start = rm->paddr;
- end = start + (rm->pages << PAGE_SHIFT) - 1;
- if (start > paddr || end < paddr + size - 1)
- continue;
-
- DBG("block ok, checking allocs\n");
-
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- end = alloc->paddr - 1;
-
- if (start <= paddr && end >= paddr + size - 1)
- goto found;
-
- start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
- }
-
- end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
-
- if (!(start <= paddr && end >= paddr + size - 1))
- continue;
-found:
- DBG("found area start %lx, end %lx\n", start, end);
-
- if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
- return -ENOMEM;
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-int omap_vram_reserve(unsigned long paddr, size_t size)
-{
- unsigned pages;
- int r;
-
- DBG("reserve mem paddr %08lx size %d\n", paddr, size);
-
- size = PAGE_ALIGN(size);
- pages = size >> PAGE_SHIFT;
-
- mutex_lock(&region_mutex);
-
- r = _omap_vram_reserve(paddr, pages);
-
- mutex_unlock(&region_mutex);
-
- return r;
-}
-EXPORT_SYMBOL(omap_vram_reserve);
-
-static int _omap_vram_alloc(unsigned pages, unsigned long *paddr)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
-
- list_for_each_entry(rm, &region_list, list) {
- unsigned long start, end;
-
- DBG("checking region %lx %d\n", rm->paddr, rm->pages);
-
- start = rm->paddr;
-
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- end = alloc->paddr;
-
- if (end - start >= pages << PAGE_SHIFT)
- goto found;
-
- start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
- }
-
- end = rm->paddr + (rm->pages << PAGE_SHIFT);
-found:
- if (end - start < pages << PAGE_SHIFT)
- continue;
-
- DBG("found %lx, end %lx\n", start, end);
-
- alloc = omap_vram_create_allocation(rm, start, pages);
- if (alloc == NULL)
- return -ENOMEM;
-
- *paddr = start;
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-int omap_vram_alloc(size_t size, unsigned long *paddr)
-{
- unsigned pages;
- int r;
-
- BUG_ON(!size);
-
- DBG("alloc mem size %d\n", size);
-
- size = PAGE_ALIGN(size);
- pages = size >> PAGE_SHIFT;
-
- mutex_lock(&region_mutex);
-
- r = _omap_vram_alloc(pages, paddr);
-
- mutex_unlock(&region_mutex);
-
- return r;
-}
-EXPORT_SYMBOL(omap_vram_alloc);
-
-void omap_vram_get_info(unsigned long *vram,
- unsigned long *free_vram,
- unsigned long *largest_free_block)
-{
- struct vram_region *vr;
- struct vram_alloc *va;
-
- *vram = 0;
- *free_vram = 0;
- *largest_free_block = 0;
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(vr, &region_list, list) {
- unsigned free;
- unsigned long pa;
-
- pa = vr->paddr;
- *vram += vr->pages << PAGE_SHIFT;
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- free = va->paddr - pa;
- *free_vram += free;
- if (free > *largest_free_block)
- *largest_free_block = free;
- pa = va->paddr + (va->pages << PAGE_SHIFT);
- }
-
- free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
- *free_vram += free;
- if (free > *largest_free_block)
- *largest_free_block = free;
- }
-
- mutex_unlock(&region_mutex);
-}
-EXPORT_SYMBOL(omap_vram_get_info);
-
-#if defined(CONFIG_DEBUG_FS)
-static int vram_debug_show(struct seq_file *s, void *unused)
-{
- struct vram_region *vr;
- struct vram_alloc *va;
- unsigned size;
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(vr, &region_list, list) {
- size = vr->pages << PAGE_SHIFT;
- seq_printf(s, "%08lx-%08lx (%d bytes)\n",
- vr->paddr, vr->paddr + size - 1,
- size);
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- size = va->pages << PAGE_SHIFT;
- seq_printf(s, " %08lx-%08lx (%d bytes)\n",
- va->paddr, va->paddr + size - 1,
- size);
- }
- }
-
- mutex_unlock(&region_mutex);
-
- return 0;
-}
-
-static int vram_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, vram_debug_show, inode->i_private);
-}
-
-static const struct file_operations vram_debug_fops = {
- .open = vram_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init omap_vram_create_debugfs(void)
-{
- struct dentry *d;
-
- d = debugfs_create_file("vram", S_IRUGO, NULL,
- NULL, &vram_debug_fops);
- if (IS_ERR(d))
- return PTR_ERR(d);
-
- return 0;
-}
-#endif
-
-static __init int omap_vram_init(void)
-{
- int i;
-
- vram_initialized = 1;
-
- for (i = 0; i < postponed_cnt; i++)
- omap_vram_add_region(postponed_regions[i].paddr,
- postponed_regions[i].size);
-
-#ifdef CONFIG_DEBUG_FS
- if (omap_vram_create_debugfs())
- pr_err("VRAM: Failed to create debugfs file\n");
-#endif
-
- return 0;
-}
-
-arch_initcall(omap_vram_init);
-
-/* boottime vram alloc stuff */
-
-/* set from board file */
-static u32 omap_vram_sdram_start __initdata;
-static u32 omap_vram_sdram_size __initdata;
-
-/* set from kernel cmdline */
-static u32 omap_vram_def_sdram_size __initdata;
-static u32 omap_vram_def_sdram_start __initdata;
-
-static int __init omap_vram_early_vram(char *p)
-{
- omap_vram_def_sdram_size = memparse(p, &p);
- if (*p == ',')
- omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16);
- return 0;
-}
-early_param("vram", omap_vram_early_vram);
-
-/*
- * Called from map_io. We need to call to this early enough so that we
- * can reserve the fixed SDRAM regions before VM could get hold of them.
- */
-void __init omap_vram_reserve_sdram_memblock(void)
-{
- u32 paddr;
- u32 size = 0;
-
- /* cmdline arg overrides the board file definition */
- if (omap_vram_def_sdram_size) {
- size = omap_vram_def_sdram_size;
- paddr = omap_vram_def_sdram_start;
- }
-
- if (!size) {
- size = omap_vram_sdram_size;
- paddr = omap_vram_sdram_start;
- }
-
-#ifdef CONFIG_OMAP2_VRAM_SIZE
- if (!size) {
- size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
- paddr = 0;
- }
-#endif
-
- if (!size)
- return;
-
- size = ALIGN(size, SZ_2M);
-
- if (paddr) {
- if (paddr & ~PAGE_MASK) {
- pr_err("VRAM start address 0x%08x not page aligned\n",
- paddr);
- return;
- }
-
- if (!memblock_is_region_memory(paddr, size)) {
- pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n",
- paddr, paddr + size - 1);
- return;
- }
-
- if (memblock_is_region_reserved(paddr, size)) {
- pr_err("FB: failed to reserve VRAM - busy\n");
- return;
- }
-
- if (memblock_reserve(paddr, size) < 0) {
- pr_err("FB: failed to reserve VRAM - no memory\n");
- return;
- }
- } else {
- paddr = memblock_alloc(size, SZ_2M);
- }
-
- memblock_free(paddr, size);
- memblock_remove(paddr, size);
-
- omap_vram_add_region(paddr, size);
-
- pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
-}
-
-void __init omap_vram_set_sdram_vram(u32 size, u32 start)
-{
- omap_vram_sdram_start = start;
- omap_vram_sdram_size = size;
-}
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 5d8fdac3b800..10560efeb35a 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -20,6 +20,7 @@
/*#define DEBUG*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -357,11 +358,9 @@ static int __init vrfb_probe(struct platform_device *pdev)
return -EINVAL;
}
- vrfb_base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!vrfb_base) {
- dev_err(&pdev->dev, "can't ioremap vrfb memory\n");
- return -ENOMEM;
- }
+ vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(vrfb_base))
+ return PTR_ERR(vrfb_base);
num_ctxs = pdev->num_resources - 1;
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index d57cc58c5168..4b23af6e5c28 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -249,7 +249,7 @@ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_no
info->fix.accel = FB_ACCEL_SUN_CGTHREE;
}
-static int __devinit p9100_probe(struct platform_device *op)
+static int p9100_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -326,7 +326,7 @@ out_err:
return err;
}
-static int __devexit p9100_remove(struct platform_device *op)
+static int p9100_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct p9100_par *par = info->par;
@@ -359,7 +359,7 @@ static struct platform_driver p9100_driver = {
.of_match_table = p9100_match,
},
.probe = p9100_probe,
- .remove = __devexit_p(p9100_remove),
+ .remove = p9100_remove,
};
static int __init p9100_init(void)
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index ae3caa6755c2..3d86bac62d3e 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -313,7 +313,8 @@ static void platinum_set_hardware(struct fb_info_platinum *pinfo)
/*
* Set misc info vars for this driver
*/
-static void __devinit platinum_init_info(struct fb_info *info, struct fb_info_platinum *pinfo)
+static void platinum_init_info(struct fb_info *info,
+ struct fb_info_platinum *pinfo)
{
/* Fill fb_info */
info->fbops = &platinumfb_ops;
@@ -338,7 +339,7 @@ static void __devinit platinum_init_info(struct fb_info *info, struct fb_info_pl
}
-static int __devinit platinum_init_fb(struct fb_info *info)
+static int platinum_init_fb(struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
struct fb_var_screeninfo var;
@@ -533,7 +534,7 @@ static int __init platinumfb_setup(char *options)
#define invalidate_cache(addr)
#endif
-static int __devinit platinumfb_probe(struct platform_device* odev)
+static int platinumfb_probe(struct platform_device* odev)
{
struct device_node *dp = odev->dev.of_node;
struct fb_info *info;
@@ -645,7 +646,7 @@ static int __devinit platinumfb_probe(struct platform_device* odev)
return rc;
}
-static int __devexit platinumfb_remove(struct platform_device* odev)
+static int platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
@@ -683,7 +684,7 @@ static struct platform_driver platinum_driver =
.of_match_table = platinumfb_match,
},
.probe = platinumfb_probe,
- .remove = __devexit_p(platinumfb_remove),
+ .remove = platinumfb_remove,
};
static int __init platinumfb_init(void)
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index df31a24a5026..81354eeab021 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -67,7 +67,7 @@
* Driver data
*/
static int hwcursor = 1;
-static char *mode_option __devinitdata;
+static char *mode_option;
/*
* The XFree GLINT driver will (I think to implement hardware cursor
@@ -80,10 +80,10 @@ static char *mode_option __devinitdata;
*/
static bool lowhsync;
static bool lowvsync;
-static bool noaccel __devinitdata;
+static bool noaccel;
/* mtrr option */
#ifdef CONFIG_MTRR
-static bool nomtrr __devinitdata;
+static bool nomtrr;
#endif
/*
@@ -107,7 +107,7 @@ struct pm2fb_par
* Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo
* if we don't use modedb.
*/
-static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
+static struct fb_fix_screeninfo pm2fb_fix = {
.id = "",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -120,7 +120,7 @@ static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
/*
* Default video mode. In case the modedb doesn't work.
*/
-static struct fb_var_screeninfo pm2fb_var __devinitdata = {
+static struct fb_var_screeninfo pm2fb_var = {
/* "640x480, 8 bpp @ 60 Hz */
.xres = 640,
.yres = 480,
@@ -1515,8 +1515,7 @@ static struct fb_ops pm2fb_ops = {
* @param pdev PCI device.
* @param id PCI device ID.
*/
-static int __devinit pm2fb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pm2fb_par *default_par;
struct fb_info *info;
@@ -1727,7 +1726,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
*
* @param pdev PCI device to clean up.
*/
-static void __devexit pm2fb_remove(struct pci_dev *pdev)
+static void pm2fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct fb_fix_screeninfo *fix = &info->fix;
@@ -1765,7 +1764,7 @@ static struct pci_driver pm2fb_driver = {
.name = "pm2fb",
.id_table = pm2fb_id_table,
.probe = pm2fb_probe,
- .remove = __devexit_p(pm2fb_remove),
+ .remove = pm2fb_remove,
};
MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 055e527a8e45..7718faa4a73b 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -56,12 +56,12 @@
* Driver data
*/
static int hwcursor = 1;
-static char *mode_option __devinitdata;
-static bool noaccel __devinitdata;
+static char *mode_option;
+static bool noaccel;
/* mtrr option */
#ifdef CONFIG_MTRR
-static bool nomtrr __devinitdata;
+static bool nomtrr;
#endif
/*
@@ -84,7 +84,7 @@ struct pm3_par {
* if we don't use modedb. If we do use modedb see pm3fb_init how to use it
* to get a fb_var_screeninfo. Otherwise define a default var as well.
*/
-static struct fb_fix_screeninfo pm3fb_fix __devinitdata = {
+static struct fb_fix_screeninfo pm3fb_fix = {
.id = "Permedia3",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -1229,7 +1229,7 @@ static struct fb_ops pm3fb_ops = {
/* mmio register are already mapped when this function is called */
/* the pm3fb_fix.smem_start is also set */
-static unsigned long __devinit pm3fb_size_memory(struct pm3_par *par)
+static unsigned long pm3fb_size_memory(struct pm3_par *par)
{
unsigned long memsize = 0;
unsigned long tempBypass, i, temp1, temp2;
@@ -1314,8 +1314,7 @@ static unsigned long __devinit pm3fb_size_memory(struct pm3_par *par)
return memsize;
}
-static int __devinit pm3fb_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fb_info *info;
struct pm3_par *par;
@@ -1469,7 +1468,7 @@ static int __devinit pm3fb_probe(struct pci_dev *dev,
/*
* Cleanup
*/
-static void __devexit pm3fb_remove(struct pci_dev *dev)
+static void pm3fb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -1507,7 +1506,7 @@ static struct pci_driver pm3fb_driver = {
.name = "pm3fb",
.id_table = pm3fb_id_table,
.probe = pm3fb_probe,
- .remove = __devexit_p(pm3fb_remove),
+ .remove = pm3fb_remove,
};
MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index 9b4a60b52a4c..d1e46cedb1f7 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -43,7 +43,7 @@ struct pmagbafb_par {
};
-static struct fb_var_screeninfo pmagbafb_defined __devinitdata = {
+static struct fb_var_screeninfo pmagbafb_defined = {
.xres = 1024,
.yres = 864,
.xres_virtual = 1024,
@@ -67,7 +67,7 @@ static struct fb_var_screeninfo pmagbafb_defined __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo pmagbafb_fix __devinitdata = {
+static struct fb_fix_screeninfo pmagbafb_fix = {
.id = "PMAG-BA",
.smem_len = (1024 * 1024),
.type = FB_TYPE_PACKED_PIXELS,
@@ -141,7 +141,7 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info)
}
-static int __devinit pmagbafb_probe(struct device *dev)
+static int pmagbafb_probe(struct device *dev)
{
struct tc_dev *tdev = to_tc_dev(dev);
resource_size_t start, len;
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 4e7a9c46e112..0e1317400328 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -44,7 +44,7 @@ struct pmagbbfb_par {
};
-static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = {
+static struct fb_var_screeninfo pmagbbfb_defined = {
.bits_per_pixel = 8,
.red.length = 8,
.green.length = 8,
@@ -57,7 +57,7 @@ static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo pmagbbfb_fix __devinitdata = {
+static struct fb_fix_screeninfo pmagbbfb_fix = {
.id = "PMAGB-BA",
.smem_len = (2048 * 1024),
.type = FB_TYPE_PACKED_PIXELS,
@@ -147,7 +147,7 @@ static void __init pmagbbfb_erase_cursor(struct fb_info *info)
/*
* Set up screen parameters.
*/
-static void __devinit pmagbbfb_screen_setup(struct fb_info *info)
+static void pmagbbfb_screen_setup(struct fb_info *info)
{
struct pmagbbfb_par *par = info->par;
@@ -179,9 +179,9 @@ static void __devinit pmagbbfb_screen_setup(struct fb_info *info)
/*
* Determine oscillator configuration.
*/
-static void __devinit pmagbbfb_osc_setup(struct fb_info *info)
+static void pmagbbfb_osc_setup(struct fb_info *info)
{
- static unsigned int pmagbbfb_freqs[] __devinitdata = {
+ static unsigned int pmagbbfb_freqs[] = {
130808, 119843, 104000, 92980, 74370, 72800,
69197, 66000, 65000, 50350, 36000, 32000, 25175
};
@@ -246,7 +246,7 @@ static void __devinit pmagbbfb_osc_setup(struct fb_info *info)
};
-static int __devinit pmagbbfb_probe(struct device *dev)
+static int pmagbbfb_probe(struct device *dev)
{
struct tc_dev *tdev = to_tc_dev(dev);
resource_size_t start, len;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 0b340d6ff8a4..920c27bf3947 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -259,7 +259,7 @@ static const struct fb_videomode ps3fb_modedb[] = {
static int ps3fb_mode;
module_param(ps3fb_mode, int, 0);
-static char *mode_option __devinitdata;
+static char *mode_option;
static int ps3fb_cmp_mode(const struct fb_videomode *vmode,
const struct fb_var_screeninfo *var)
@@ -965,7 +965,7 @@ static struct fb_fix_screeninfo ps3fb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
+static int ps3fb_probe(struct ps3_system_bus_device *dev)
{
struct fb_info *info;
struct ps3fb_par *par;
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index bcd44c32a2ed..df07860563e6 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -112,11 +112,11 @@ enum { VO_PAL, VO_NTSC, VO_VGA };
enum { PAL_ARGB1555, PAL_RGB565, PAL_ARGB4444, PAL_ARGB8888 };
struct pvr2_params { unsigned int val; char *name; };
-static struct pvr2_params cables[] __devinitdata = {
+static struct pvr2_params cables[] = {
{ CT_VGA, "VGA" }, { CT_RGB, "RGB" }, { CT_COMPOSITE, "COMPOSITE" },
};
-static struct pvr2_params outputs[] __devinitdata = {
+static struct pvr2_params outputs[] = {
{ VO_PAL, "PAL" }, { VO_NTSC, "NTSC" }, { VO_VGA, "VGA" },
};
@@ -145,7 +145,7 @@ static struct pvr2fb_par {
static struct fb_info *fb_info;
-static struct fb_fix_screeninfo pvr2_fix __devinitdata = {
+static struct fb_fix_screeninfo pvr2_fix = {
.id = "NEC PowerVR2",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
@@ -154,7 +154,7 @@ static struct fb_fix_screeninfo pvr2_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo pvr2_var __devinitdata = {
+static struct fb_var_screeninfo pvr2_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -226,7 +226,7 @@ static struct fb_ops pvr2fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_videomode pvr2_modedb[] __devinitdata = {
+static struct fb_videomode pvr2_modedb[] = {
/*
* Broadcast video modes (PAL and NTSC). I'm unfamiliar with
* PAL-M and PAL-N, but from what I've read both modes parallel PAL and
@@ -256,7 +256,7 @@ static struct fb_videomode pvr2_modedb[] __devinitdata = {
#define DEFMODE_VGA 2
static int defmode = DEFMODE_NTSC;
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
static inline void pvr2fb_set_pal_type(unsigned int type)
{
@@ -763,7 +763,7 @@ out_unmap:
* in for flexibility anyways. Who knows, maybe someone has tv-out on a
* PCI-based version of these things ;-)
*/
-static int __devinit pvr2fb_common_init(void)
+static int pvr2fb_common_init(void)
{
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
@@ -922,8 +922,8 @@ static void __exit pvr2fb_dc_exit(void)
#endif /* CONFIG_SH_DREAMCAST */
#ifdef CONFIG_PCI
-static int __devinit pvr2fb_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pvr2fb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
@@ -953,7 +953,7 @@ static int __devinit pvr2fb_pci_probe(struct pci_dev *pdev,
return pvr2fb_common_init();
}
-static void __devexit pvr2fb_pci_remove(struct pci_dev *pdev)
+static void pvr2fb_pci_remove(struct pci_dev *pdev)
{
if (fb_info->screen_base) {
iounmap(fb_info->screen_base);
@@ -967,7 +967,7 @@ static void __devexit pvr2fb_pci_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
}
-static struct pci_device_id pvr2fb_pci_tbl[] __devinitdata = {
+static struct pci_device_id pvr2fb_pci_tbl[] = {
{ PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NEON250,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
@@ -979,7 +979,7 @@ static struct pci_driver pvr2fb_pci_driver = {
.name = "pvr2fb",
.id_table = pvr2fb_pci_tbl,
.probe = pvr2fb_pci_probe,
- .remove = __devexit_p(pvr2fb_pci_remove),
+ .remove = pvr2fb_pci_remove,
};
static int __init pvr2fb_pci_init(void)
@@ -993,8 +993,8 @@ static void __exit pvr2fb_pci_exit(void)
}
#endif /* CONFIG_PCI */
-static int __devinit pvr2_get_param(const struct pvr2_params *p, const char *s,
- int val, int size)
+static int pvr2_get_param(const struct pvr2_params *p, const char *s, int val,
+ int size)
{
int i;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index f146089261f4..aa9bd1f76d60 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -560,7 +560,7 @@ static struct fb_ops pxa168fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit pxa168fb_init_mode(struct fb_info *info,
+static int pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
@@ -600,7 +600,7 @@ static int __devinit pxa168fb_init_mode(struct fb_info *info,
return ret;
}
-static int __devinit pxa168fb_probe(struct platform_device *pdev)
+static int pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
struct fb_info *info = 0;
@@ -783,7 +783,7 @@ failed_put_clk:
return ret;
}
-static int __devexit pxa168fb_remove(struct platform_device *pdev)
+static int pxa168fb_remove(struct platform_device *pdev)
{
struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
struct fb_info *info;
@@ -826,7 +826,7 @@ static struct platform_driver pxa168fb_driver = {
.owner = THIS_MODULE,
},
.probe = pxa168fb_probe,
- .remove = __devexit_p(pxa168fb_remove),
+ .remove = pxa168fb_remove,
};
module_platform_driver(pxa168fb_driver);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 0b4ae0cebeda..6c984eacc7e3 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -574,8 +574,7 @@ free_buffers(struct platform_device *dev,
priv->free = NULL;
}
-static int __devinit
-pxa3xx_gcu_probe(struct platform_device *dev)
+static int pxa3xx_gcu_probe(struct platform_device *dev)
{
int i, ret, irq;
struct resource *r;
@@ -714,8 +713,7 @@ err_free_priv:
return ret;
}
-static int __devexit
-pxa3xx_gcu_remove(struct platform_device *dev)
+static int pxa3xx_gcu_remove(struct platform_device *dev)
{
struct pxa3xx_gcu_priv *priv = platform_get_drvdata(dev);
struct resource *r = priv->resource_mem;
@@ -737,7 +735,7 @@ pxa3xx_gcu_remove(struct platform_device *dev)
static struct platform_driver pxa3xx_gcu_driver = {
.probe = pxa3xx_gcu_probe,
- .remove = __devexit_p(pxa3xx_gcu_remove),
+ .remove = pxa3xx_gcu_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 4fa2ad43fd97..580f80cc586f 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -869,8 +869,8 @@ static struct fb_ops overlay_fb_ops = {
.fb_set_par = overlayfb_set_par,
};
-static void __devinit init_pxafb_overlay(struct pxafb_info *fbi,
- struct pxafb_layer *ofb, int id)
+static void init_pxafb_overlay(struct pxafb_info *fbi, struct pxafb_layer *ofb,
+ int id)
{
sprintf(ofb->fb.fix.id, "overlay%d", id + 1);
@@ -903,8 +903,8 @@ static inline int pxafb_overlay_supported(void)
return 0;
}
-static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb,
- struct pxafb_layer *ofb)
+static int pxafb_overlay_map_video_memory(struct pxafb_info *pxafb,
+ struct pxafb_layer *ofb)
{
/* We assume that user will use at most video_mem_size for overlay fb,
* anyway, it's useless to use 16bpp main plane and 24bpp overlay
@@ -927,7 +927,7 @@ static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb,
return 0;
}
-static void __devinit pxafb_overlay_init(struct pxafb_info *fbi)
+static void pxafb_overlay_init(struct pxafb_info *fbi)
{
int i, ret;
@@ -959,7 +959,7 @@ static void __devinit pxafb_overlay_init(struct pxafb_info *fbi)
pr_info("PXA Overlay driver loaded successfully!\n");
}
-static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi)
+static void pxafb_overlay_exit(struct pxafb_info *fbi)
{
int i;
@@ -1706,7 +1706,7 @@ static const struct dev_pm_ops pxafb_pm_ops = {
};
#endif
-static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
+static int pxafb_init_video_memory(struct pxafb_info *fbi)
{
int size = PAGE_ALIGN(fbi->video_mem_size);
@@ -1789,7 +1789,7 @@ decode_mode:
fbi->video_mem_size = video_mem_size;
}
-static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
+static struct pxafb_info *pxafb_init_fbinfo(struct device *dev)
{
struct pxafb_info *fbi;
void *addr;
@@ -1853,7 +1853,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
}
#ifdef CONFIG_FB_PXA_PARAMETERS
-static int __devinit parse_opt_mode(struct device *dev, const char *this_opt)
+static int parse_opt_mode(struct device *dev, const char *this_opt)
{
struct pxafb_mach_info *inf = dev->platform_data;
@@ -1912,7 +1912,7 @@ done:
return 0;
}
-static int __devinit parse_opt(struct device *dev, char *this_opt)
+static int parse_opt(struct device *dev, char *this_opt)
{
struct pxafb_mach_info *inf = dev->platform_data;
struct pxafb_mode_info *mode = &inf->modes[0];
@@ -2012,7 +2012,7 @@ static int __devinit parse_opt(struct device *dev, char *this_opt)
return 0;
}
-static int __devinit pxafb_parse_options(struct device *dev, char *options)
+static int pxafb_parse_options(struct device *dev, char *options)
{
char *this_opt;
int ret;
@@ -2031,7 +2031,7 @@ static int __devinit pxafb_parse_options(struct device *dev, char *options)
return 0;
}
-static char g_options[256] __devinitdata = "";
+static char g_options[256] = "";
#ifndef MODULE
static int __init pxafb_setup_options(void)
@@ -2061,8 +2061,7 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
#ifdef DEBUG_VAR
/* Check for various illegal bit-combinations. Currently only
* a warning is given. */
-static void __devinit pxafb_check_options(struct device *dev,
- struct pxafb_mach_info *inf)
+static void pxafb_check_options(struct device *dev, struct pxafb_mach_info *inf)
{
if (inf->lcd_conn)
return;
@@ -2094,7 +2093,7 @@ static void __devinit pxafb_check_options(struct device *dev,
#define pxafb_check_options(...) do {} while (0)
#endif
-static int __devinit pxafb_probe(struct platform_device *dev)
+static int pxafb_probe(struct platform_device *dev)
{
struct pxafb_info *fbi;
struct pxafb_mach_info *inf;
@@ -2263,7 +2262,7 @@ failed:
return ret;
}
-static int __devexit pxafb_remove(struct platform_device *dev)
+static int pxafb_remove(struct platform_device *dev)
{
struct pxafb_info *fbi = platform_get_drvdata(dev);
struct resource *r;
@@ -2304,7 +2303,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
static struct platform_driver pxafb_driver = {
.probe = pxafb_probe,
- .remove = __devexit_p(pxafb_remove),
+ .remove = pxafb_remove,
.driver = {
.owner = THIS_MODULE,
.name = "pxa2xx-fb",
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index a104e8cd2f54..d44c7351de0f 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -27,7 +27,7 @@
#define Q40_PHYS_SCREEN_ADDR 0xFE800000
-static struct fb_fix_screeninfo q40fb_fix __devinitdata = {
+static struct fb_fix_screeninfo q40fb_fix = {
.id = "Q40",
.smem_len = 1024*1024,
.type = FB_TYPE_PACKED_PIXELS,
@@ -36,7 +36,7 @@ static struct fb_fix_screeninfo q40fb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo q40fb_var __devinitdata = {
+static struct fb_var_screeninfo q40fb_var = {
.xres = 1024,
.yres = 512,
.xres_virtual = 1024,
@@ -83,7 +83,7 @@ static struct fb_ops q40fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit q40fb_probe(struct platform_device *dev)
+static int q40fb_probe(struct platform_device *dev)
{
struct fb_info *info;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 90df1a60bd16..9536715b5a1b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -205,28 +205,28 @@ MODULE_DEVICE_TABLE(pci, rivafb_pci_tbl);
* ------------------------------------------------------------------------- */
/* command line data, set in rivafb_setup() */
-static int flatpanel __devinitdata = -1; /* Autodetect later */
-static int forceCRTC __devinitdata = -1;
-static bool noaccel __devinitdata = 0;
+static int flatpanel = -1; /* Autodetect later */
+static int forceCRTC = -1;
+static bool noaccel = 0;
#ifdef CONFIG_MTRR
-static bool nomtrr __devinitdata = 0;
+static bool nomtrr = 0;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight __devinitdata = 1;
+static int backlight = 1;
#else
-static int backlight __devinitdata = 0;
+static int backlight = 0;
#endif
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
static bool strictmode = 0;
-static struct fb_fix_screeninfo __devinitdata rivafb_fix = {
+static struct fb_fix_screeninfo rivafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.xpanstep = 1,
.ypanstep = 1,
};
-static struct fb_var_screeninfo __devinitdata rivafb_default_var = {
+static struct fb_var_screeninfo rivafb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -1709,7 +1709,7 @@ static struct fb_ops riva_fb_ops = {
.fb_sync = rivafb_sync,
};
-static int __devinit riva_set_fbinfo(struct fb_info *info)
+static int riva_set_fbinfo(struct fb_info *info)
{
unsigned int cmap_len;
struct riva_par *par = info->par;
@@ -1747,7 +1747,7 @@ static int __devinit riva_set_fbinfo(struct fb_info *info)
}
#ifdef CONFIG_PPC_OF
-static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
+static int riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
{
struct riva_par *par = info->par;
struct device_node *dp;
@@ -1780,7 +1780,7 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
#endif /* CONFIG_PPC_OF */
#if defined(CONFIG_FB_RIVA_I2C) && !defined(CONFIG_PPC_OF)
-static int __devinit riva_get_EDID_i2c(struct fb_info *info)
+static int riva_get_EDID_i2c(struct fb_info *info)
{
struct riva_par *par = info->par;
struct fb_var_screeninfo var;
@@ -1803,8 +1803,8 @@ static int __devinit riva_get_EDID_i2c(struct fb_info *info)
}
#endif /* CONFIG_FB_RIVA_I2C */
-static void __devinit riva_update_default_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
+static void riva_update_default_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
@@ -1836,7 +1836,7 @@ static void __devinit riva_update_default_var(struct fb_var_screeninfo *var,
}
-static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
+static void riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
@@ -1850,7 +1850,7 @@ static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
}
-static void __devinit riva_get_edidinfo(struct fb_info *info)
+static void riva_get_edidinfo(struct fb_info *info)
{
struct fb_var_screeninfo *var = &rivafb_default_var;
struct riva_par *par = info->par;
@@ -1871,7 +1871,7 @@ static void __devinit riva_get_edidinfo(struct fb_info *info)
*
* ------------------------------------------------------------------------- */
-static u32 __devinit riva_get_arch(struct pci_dev *pd)
+static u32 riva_get_arch(struct pci_dev *pd)
{
u32 arch = 0;
@@ -1909,8 +1909,7 @@ static u32 __devinit riva_get_arch(struct pci_dev *pd)
return arch;
}
-static int __devinit rivafb_probe(struct pci_dev *pd,
- const struct pci_device_id *ent)
+static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
{
struct riva_par *default_par;
struct fb_info *info;
@@ -2105,7 +2104,7 @@ err_ret:
return ret;
}
-static void __devexit rivafb_remove(struct pci_dev *pd)
+static void rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = info->par;
@@ -2145,7 +2144,7 @@ static void __devexit rivafb_remove(struct pci_dev *pd)
* ------------------------------------------------------------------------- */
#ifndef MODULE
-static int __devinit rivafb_setup(char *options)
+static int rivafb_setup(char *options)
{
char *this_opt;
@@ -2186,7 +2185,7 @@ static struct pci_driver rivafb_driver = {
.name = "rivafb",
.id_table = rivafb_pci_tbl,
.probe = rivafb_probe,
- .remove = __devexit_p(rivafb_remove),
+ .remove = rivafb_remove,
};
@@ -2197,7 +2196,7 @@ static struct pci_driver rivafb_driver = {
*
* ------------------------------------------------------------------------- */
-static int __devinit rivafb_init(void)
+static int rivafb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
index 167400e2a182..6a183375ced1 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/riva/rivafb-i2c.c
@@ -86,9 +86,8 @@ static int riva_gpio_getsda(void* data)
return val;
}
-static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan,
- const char *name,
- unsigned int i2c_class)
+static int riva_setup_i2c_bus(struct riva_i2c_chan *chan, const char *name,
+ unsigned int i2c_class)
{
int rc;
@@ -124,7 +123,7 @@ static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan,
return rc;
}
-void __devinit riva_create_i2c_busses(struct riva_par *par)
+void riva_create_i2c_busses(struct riva_par *par)
{
par->chan[0].par = par;
par->chan[1].par = par;
@@ -150,7 +149,7 @@ void riva_delete_i2c_busses(struct riva_par *par)
}
}
-int __devinit riva_probe_i2c_connector(struct riva_par *par, int conn, u8 **out_edid)
+int riva_probe_i2c_connector(struct riva_par *par, int conn, u8 **out_edid)
{
u8 *edid = NULL;
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 28b1c6c3d8ac..76d9053d88c1 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -84,7 +84,7 @@ static const char *s1d13xxxfb_prod_names[] = {
/*
* here we define the default struct fb_fix_screeninfo
*/
-static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = {
+static struct fb_fix_screeninfo s1d13xxxfb_fix = {
.id = S1D_FBID,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -622,7 +622,7 @@ static struct fb_ops s1d13xxxfb_fbops = {
.fb_imageblit = cfb_imageblit,
};
-static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
+static int s1d13xxxfb_width_tab[2][4] = {
{4, 8, 16, -1},
{9, 12, 18, -1},
};
@@ -642,8 +642,7 @@ static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
* Note: some of the hardcoded values here might need some love to
* work on various chips, and might need to no longer be hardcoded.
*/
-static void __devinit
-s1d13xxxfb_fetch_hw_state(struct fb_info *info)
+static void s1d13xxxfb_fetch_hw_state(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct fb_fix_screeninfo *fix = &info->fix;
@@ -764,8 +763,7 @@ s1d13xxxfb_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit
-s1d13xxxfb_probe(struct platform_device *pdev)
+static int s1d13xxxfb_probe(struct platform_device *pdev)
{
struct s1d13xxxfb_par *default_par;
struct fb_info *info;
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 2ed7b633bbd9..968a62571df7 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -189,7 +189,7 @@ struct s3c_fb_vsync {
/**
* struct s3c_fb - overall hardware state of the hardware
- * @slock: The spinlock protection for this data sturucture.
+ * @slock: The spinlock protection for this data structure.
* @dev: The device that we bound to, for printing, etc.
* @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
* @lcd_clk: The clk (sclk) feeding pixclk.
@@ -268,10 +268,10 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
case 8:
if (sfb->variant.palette[win->index] != 0) {
/* non palletised, A:1,R:2,G:3,B:2 mode */
- var->red.offset = 4;
+ var->red.offset = 5;
var->green.offset = 2;
var->blue.offset = 0;
- var->red.length = 5;
+ var->red.length = 2;
var->green.length = 3;
var->blue.length = 2;
var->transp.offset = 7;
@@ -288,6 +288,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/* 666 with one bit alpha/transparency */
var->transp.offset = 18;
var->transp.length = 1;
+ /* drop through */
case 18:
var->bits_per_pixel = 32;
@@ -329,6 +330,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
default:
dev_err(sfb->dev, "invalid bpp\n");
+ return -EINVAL;
}
dev_dbg(sfb->dev, "%s: verified parameters\n", __func__);
@@ -1079,8 +1081,7 @@ static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
*
* Allocate memory for the given framebuffer.
*/
-static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
- struct s3c_fb_win *win)
+static int s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
{
struct s3c_fb_pd_win *windata = win->windata;
unsigned int real_size, virt_size, size;
@@ -1170,9 +1171,9 @@ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
* Allocate and do the basic initialisation for one of the hardware's graphics
* windows.
*/
-static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
- struct s3c_fb_win_variant *variant,
- struct s3c_fb_win **res)
+static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
+ struct s3c_fb_win_variant *variant,
+ struct s3c_fb_win **res)
{
struct fb_var_screeninfo *var;
struct fb_videomode initmode;
@@ -1358,7 +1359,7 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
}
}
-static int __devinit s3c_fb_probe(struct platform_device *pdev)
+static int s3c_fb_probe(struct platform_device *pdev)
{
const struct platform_device_id *platid;
struct s3c_fb_driverdata *fbdrv;
@@ -1420,10 +1421,9 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
pm_runtime_enable(sfb->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sfb->regs = devm_request_and_ioremap(dev, res);
- if (!sfb->regs) {
- dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
+ sfb->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sfb->regs)) {
+ ret = PTR_ERR(sfb->regs);
goto err_lcd_clk;
}
@@ -1519,7 +1519,7 @@ err_bus_clk:
* Shutdown and then release all the resources that the driver allocated
* on initialisation.
*/
-static int __devexit s3c_fb_remove(struct platform_device *pdev)
+static int s3c_fb_remove(struct platform_device *pdev)
{
struct s3c_fb *sfb = platform_get_drvdata(pdev);
int win;
@@ -1544,8 +1544,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int s3c_fb_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_win *win;
int win_no;
@@ -1572,8 +1571,7 @@ static int s3c_fb_suspend(struct device *dev)
static int s3c_fb_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_platdata *pd = sfb->pdata;
struct s3c_fb_win *win;
int win_no;
@@ -1623,7 +1621,7 @@ static int s3c_fb_resume(struct device *dev)
if (!win)
continue;
- dev_dbg(&pdev->dev, "resuming window %d\n", win_no);
+ dev_dbg(dev, "resuming window %d\n", win_no);
s3c_fb_set_par(win->fbinfo);
}
@@ -1636,8 +1634,7 @@ static int s3c_fb_resume(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
static int s3c_fb_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
if (!sfb->variant.has_clksel)
clk_disable_unprepare(sfb->lcd_clk);
@@ -1649,8 +1646,7 @@ static int s3c_fb_runtime_suspend(struct device *dev)
static int s3c_fb_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_platdata *pd = sfb->pdata;
clk_prepare_enable(sfb->bus_clk);
@@ -1910,7 +1906,7 @@ static struct s3c_fb_driverdata s3c_fb_data_exynos4 = {
static struct s3c_fb_driverdata s3c_fb_data_exynos5 = {
.variant = {
.nr_windows = 5,
- .vidtcon = VIDTCON0,
+ .vidtcon = FIMD_V8_VIDTCON0,
.wincon = WINCON(0),
.winmap = WINxMAP(0),
.keycon = WKEYCON,
@@ -2037,7 +2033,7 @@ static const struct dev_pm_ops s3cfb_pm_ops = {
static struct platform_driver s3c_fb_driver = {
.probe = s3c_fb_probe,
- .remove = __devexit_p(s3c_fb_remove),
+ .remove = s3c_fb_remove,
.id_table = s3c_fb_driver_ids,
.driver = {
.name = "s3c-fb",
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 1083bb9469ee..76a0e7fbd692 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -637,7 +637,7 @@ static struct fb_ops s3c2410fb_ops = {
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/
-static int __devinit s3c2410fb_map_video_memory(struct fb_info *info)
+static int s3c2410fb_map_video_memory(struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
dma_addr_t map_dma;
@@ -819,8 +819,8 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
static const char driver_name[] = "s3c2410fb";
-static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
- enum s3c_drv_type drv_type)
+static int s3c24xxfb_probe(struct platform_device *pdev,
+ enum s3c_drv_type drv_type)
{
struct s3c2410fb_info *info;
struct s3c2410fb_display *display;
@@ -1010,12 +1010,12 @@ dealloc_fb:
return ret;
}
-static int __devinit s3c2410fb_probe(struct platform_device *pdev)
+static int s3c2410fb_probe(struct platform_device *pdev)
{
return s3c24xxfb_probe(pdev, DRV_S3C2410);
}
-static int __devinit s3c2412fb_probe(struct platform_device *pdev)
+static int s3c2412fb_probe(struct platform_device *pdev)
{
return s3c24xxfb_probe(pdev, DRV_S3C2412);
}
@@ -1024,7 +1024,7 @@ static int __devinit s3c2412fb_probe(struct platform_device *pdev)
/*
* Cleanup
*/
-static int __devexit s3c2410fb_remove(struct platform_device *pdev)
+static int s3c2410fb_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct s3c2410fb_info *info = fbinfo->par;
@@ -1101,7 +1101,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
static struct platform_driver s3c2410fb_driver = {
.probe = s3c2410fb_probe,
- .remove = __devexit_p(s3c2410fb_remove),
+ .remove = s3c2410fb_remove,
.suspend = s3c2410fb_suspend,
.resume = s3c2410fb_resume,
.driver = {
@@ -1112,7 +1112,7 @@ static struct platform_driver s3c2410fb_driver = {
static struct platform_driver s3c2412fb_driver = {
.probe = s3c2412fb_probe,
- .remove = __devexit_p(s3c2410fb_remove),
+ .remove = s3c2410fb_remove,
.suspend = s3c2410fb_suspend,
.resume = s3c2410fb_resume,
.driver = {
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 1d007366b917..47ca86c5c6c0 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -153,10 +153,10 @@ static const struct svga_timing_regs s3_timing_regs = {
/* Module parameters */
-static char *mode_option __devinitdata;
+static char *mode_option;
#ifdef CONFIG_MTRR
-static int mtrr __devinitdata = 1;
+static int mtrr = 1;
#endif
static int fasttext = 1;
@@ -255,7 +255,7 @@ static int s3fb_ddc_getsda(void *data)
return !!(s3fb_ddc_read(par) & DDC_SDA_IN);
}
-static int __devinit s3fb_setup_ddc_bus(struct fb_info *info)
+static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
@@ -1066,7 +1066,7 @@ static struct fb_ops s3fb_ops = {
/* ------------------------------------------------------------------------- */
-static int __devinit s3_identification(struct s3fb_info *par)
+static int s3_identification(struct s3fb_info *par)
{
int chip = par->chip;
@@ -1122,7 +1122,7 @@ static int __devinit s3_identification(struct s3fb_info *par)
/* PCI probe */
-static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
@@ -1403,7 +1403,7 @@ err_enable_device:
/* PCI remove */
-static void __devexit s3_pci_remove(struct pci_dev *dev)
+static void s3_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct s3fb_info __maybe_unused *par = info->par;
@@ -1509,7 +1509,7 @@ static int s3_pci_resume(struct pci_dev* dev)
/* List of boards that we are trying to support */
-static struct pci_device_id s3_devices[] __devinitdata = {
+static struct pci_device_id s3_devices[] = {
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8810), .driver_data = CHIP_XXX_TRIO},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8811), .driver_data = CHIP_XXX_TRIO},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8812), .driver_data = CHIP_M65_AURORA64VP},
@@ -1537,7 +1537,7 @@ static struct pci_driver s3fb_pci_driver = {
.name = "s3fb",
.id_table = s3_devices,
.probe = s3_pci_probe,
- .remove = __devexit_p(s3_pci_remove),
+ .remove = s3_pci_remove,
.suspend = s3_pci_suspend,
.resume = s3_pci_resume,
};
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index b6325848ad61..cfbde5e85cbf 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -1090,7 +1090,7 @@ static int sa1100fb_resume(struct platform_device *dev)
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/
-static int __devinit sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
+static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
{
/*
* We reserve one page for the palette, plus the size
@@ -1116,7 +1116,7 @@ static int __devinit sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
}
/* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs __devinitdata = {
+static struct fb_monspecs monspecs = {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
@@ -1124,7 +1124,7 @@ static struct fb_monspecs monspecs __devinitdata = {
};
-static struct sa1100fb_info * __devinit sa1100fb_init_fbinfo(struct device *dev)
+static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
{
struct sa1100fb_mach_info *inf = dev->platform_data;
struct sa1100fb_info *fbi;
@@ -1205,7 +1205,7 @@ static struct sa1100fb_info * __devinit sa1100fb_init_fbinfo(struct device *dev)
return fbi;
}
-static int __devinit sa1100fb_probe(struct platform_device *pdev)
+static int sa1100fb_probe(struct platform_device *pdev)
{
struct sa1100fb_info *fbi;
struct resource *res;
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index f4f53b082d05..741b2395d01e 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -69,7 +69,7 @@
/* --------------------------------------------------------------------- */
-static char *mode_option __devinitdata = NULL;
+static char *mode_option = NULL;
#ifdef MODULE
@@ -1664,7 +1664,7 @@ static struct fb_ops savagefb_ops = {
/* --------------------------------------------------------------------- */
-static struct fb_var_screeninfo __devinitdata savagefb_var800x600x8 = {
+static struct fb_var_screeninfo savagefb_var800x600x8 = {
.accel_flags = FB_ACCELF_TEXT,
.xres = 800,
.yres = 600,
@@ -1715,7 +1715,7 @@ static void savage_disable_mmio(struct savagefb_par *par)
}
-static int __devinit savage_map_mmio(struct fb_info *info)
+static int savage_map_mmio(struct fb_info *info)
{
struct savagefb_par *par = info->par;
DBG("savage_map_mmio");
@@ -1761,8 +1761,7 @@ static void savage_unmap_mmio(struct fb_info *info)
}
}
-static int __devinit savage_map_video(struct fb_info *info,
- int video_len)
+static int savage_map_video(struct fb_info *info, int video_len)
{
struct savagefb_par *par = info->par;
int resource;
@@ -2052,9 +2051,8 @@ static int savage_init_hw(struct savagefb_par *par)
return videoRambytes;
}
-static int __devinit savage_init_fb_info(struct fb_info *info,
- struct pci_dev *dev,
- const struct pci_device_id *id)
+static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct savagefb_par *par = info->par;
int err = 0;
@@ -2178,8 +2176,7 @@ static int __devinit savage_init_fb_info(struct fb_info *info,
/* --------------------------------------------------------------------- */
-static int __devinit savagefb_probe(struct pci_dev* dev,
- const struct pci_device_id* id)
+static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct fb_info *info;
struct savagefb_par *par;
@@ -2340,7 +2337,7 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
return err;
}
-static void __devexit savagefb_remove(struct pci_dev *dev)
+static void savagefb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -2449,7 +2446,7 @@ static int savagefb_resume(struct pci_dev* dev)
}
-static struct pci_device_id savagefb_devices[] __devinitdata = {
+static struct pci_device_id savagefb_devices[] = {
{PCI_VENDOR_ID_S3, PCI_CHIP_SUPSAV_MX128,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_SUPERSAVAGE},
@@ -2530,7 +2527,7 @@ static struct pci_driver savagefb_driver = {
.probe = savagefb_probe,
.suspend = savagefb_suspend,
.resume = savagefb_resume,
- .remove = __devexit_p(savagefb_remove)
+ .remove = savagefb_remove,
};
/* **************************** exit-time only **************************** */
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 53455f295510..2331fadc272b 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -47,7 +47,7 @@ static int ywrap = 0;
static int flatpanel_id = -1;
-static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = {
+static struct fb_fix_screeninfo sgivwfb_fix = {
.id = "SGI Vis WS FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -57,7 +57,7 @@ static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = {
.line_length = 640,
};
-static struct fb_var_screeninfo sgivwfb_var __devinitdata = {
+static struct fb_var_screeninfo sgivwfb_var = {
/* 640x480, 8 bpp */
.xres = 640,
.yres = 480,
@@ -79,7 +79,7 @@ static struct fb_var_screeninfo sgivwfb_var __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED
};
-static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = {
+static struct fb_var_screeninfo sgivwfb_var1600sw = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
@@ -745,7 +745,7 @@ int __init sgivwfb_setup(char *options)
/*
* Initialisation
*/
-static int __devinit sgivwfb_probe(struct platform_device *dev)
+static int sgivwfb_probe(struct platform_device *dev)
{
struct sgivw_par *par;
struct fb_info *info;
@@ -825,7 +825,7 @@ fail_ioremap_regs:
return -ENXIO;
}
-static int __devexit sgivwfb_remove(struct platform_device *dev)
+static int sgivwfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -845,7 +845,7 @@ static int __devexit sgivwfb_remove(struct platform_device *dev)
static struct platform_driver sgivwfb_driver = {
.probe = sgivwfb_probe,
- .remove = __devexit_p(sgivwfb_remove),
+ .remove = sgivwfb_remove,
.driver = {
.name = "sgivwfb",
},
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 83b16e237a0e..5fbb0c7ab0c8 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -431,7 +431,7 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
return 0;
}
-static int __devinit sh7760fb_probe(struct platform_device *pdev)
+static int sh7760fb_probe(struct platform_device *pdev)
{
struct fb_info *info;
struct resource *res;
@@ -557,7 +557,7 @@ out_fb:
return ret;
}
-static int __devexit sh7760fb_remove(struct platform_device *dev)
+static int sh7760fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
struct sh7760fb_par *par = info->par;
@@ -582,7 +582,7 @@ static struct platform_driver sh7760_lcdc_driver = {
.owner = THIS_MODULE,
},
.probe = sh7760fb_probe,
- .remove = __devexit_p(sh7760fb_remove),
+ .remove = sh7760fb_remove,
};
module_platform_driver(sh7760_lcdc_driver);
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 3951fdae5f68..701b461cf8a9 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -127,13 +127,12 @@ static void sh_mipi_shutdown(struct platform_device *pdev)
sh_mipi_dsi_enable(mipi, false);
}
-static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
+static int sh_mipi_setup(struct sh_mipi *mipi, const struct fb_videomode *mode)
{
void __iomem *base = mipi->base;
- struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
u32 pctype, datatype, pixfmt, linelength, vmctr2;
u32 tmp, top, bottom, delay, div;
- bool yuv;
int bpp;
/*
@@ -146,95 +145,79 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
pctype = 0;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_RGB565:
pctype = 1;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = false;
+ linelength = mode->xres * 2;
break;
case MIPI_RGB666_LP:
pctype = 2;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_RGB666:
pctype = 3;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
- yuv = false;
+ linelength = (mode->xres * 18 + 7) / 8;
break;
case MIPI_BGR888:
pctype = 8;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_BGR565:
pctype = 9;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = false;
+ linelength = mode->xres * 2;
break;
case MIPI_BGR666_LP:
pctype = 0xa;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_BGR666:
pctype = 0xb;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
- yuv = false;
+ linelength = (mode->xres * 18 + 7) / 8;
break;
case MIPI_YUYV:
pctype = 4;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = true;
+ linelength = mode->xres * 2;
break;
case MIPI_UYVY:
pctype = 5;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = true;
+ linelength = mode->xres * 2;
break;
case MIPI_YUV420_L:
pctype = 6;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
- linelength = (ch->lcd_modes[0].xres * 12 + 7) / 8;
- yuv = true;
+ linelength = (mode->xres * 12 + 7) / 8;
break;
case MIPI_YUV420:
pctype = 7;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
/* Length of U/V line */
- linelength = (ch->lcd_modes[0].xres + 1) / 2;
- yuv = true;
+ linelength = (mode->xres + 1) / 2;
break;
default:
return -EINVAL;
}
- if ((yuv && ch->interface_type != YUV422) ||
- (!yuv && ch->interface_type != RGB24))
- return -EINVAL;
-
if (!pdata->lane)
return -EINVAL;
@@ -293,7 +276,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
*/
iowrite32(0x00000006, mipi->linkbase + DTCTR);
/* VSYNC width = 2 (<< 17) */
- iowrite32((ch->lcd_modes[0].vsync_len << pdata->vsynw_offset) |
+ iowrite32((mode->vsync_len << pdata->vsynw_offset) |
(pdata->clksrc << 16) | (pctype << 12) | datatype,
mipi->linkbase + VMCTR1);
@@ -327,7 +310,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
top = linelength << 16; /* RGBLEN */
bottom = 0x00000001;
if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
- bottom = (pdata->lane * ch->lcd_modes[0].hsync_len) - 10;
+ bottom = (pdata->lane * mode->hsync_len) - 10;
iowrite32(top | bottom , mipi->linkbase + VMLEN1);
/*
@@ -347,18 +330,18 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
div = 2;
if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
- top = ch->lcd_modes[0].hsync_len + ch->lcd_modes[0].left_margin;
+ top = mode->hsync_len + mode->left_margin;
top = ((pdata->lane * top / div) - 10) << 16;
}
if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
- bottom = ch->lcd_modes[0].right_margin;
+ bottom = mode->right_margin;
bottom = (pdata->lane * bottom / div) - 12;
}
- bpp = linelength / ch->lcd_modes[0].xres; /* byte / pixel */
+ bpp = linelength / mode->xres; /* byte / pixel */
if ((pdata->lane / div) > bpp) {
- tmp = ch->lcd_modes[0].xres / bpp; /* output cycle */
- tmp = ch->lcd_modes[0].xres - tmp; /* (input - output) cycle */
+ tmp = mode->xres / bpp; /* output cycle */
+ tmp = mode->xres - tmp; /* (input - output) cycle */
delay = (pdata->lane * tmp);
}
@@ -369,7 +352,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
/* setup LCD panel */
/* cf. drivers/video/omap/lcd_mipid.c */
- sh_mipi_dcs(ch->chan, MIPI_DCS_EXIT_SLEEP_MODE);
+ sh_mipi_dcs(pdata->channel, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
/*
* [7] - Page Address Mode
@@ -381,11 +364,11 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
* [1] - Flip Horizontal
* [0] - Flip Vertical
*/
- sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
/* cf. set_data_lines() */
- sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_PIXEL_FORMAT,
+ sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_PIXEL_FORMAT,
pixfmt << 4);
- sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+ sh_mipi_dcs(pdata->channel, MIPI_DCS_SET_DISPLAY_ON);
/* Enable timeout counters */
iowrite32(0x00000f00, base + DSICTRL);
@@ -405,7 +388,7 @@ static int mipi_display_on(struct sh_mobile_lcdc_entity *entity)
if (ret < 0)
goto mipi_display_on_fail1;
- ret = sh_mipi_setup(mipi, pdata);
+ ret = sh_mipi_setup(mipi, &entity->def_mode);
if (ret < 0)
goto mipi_display_on_fail2;
@@ -550,7 +533,7 @@ efindslot:
return ret;
}
-static int __devexit sh_mipi_remove(struct platform_device *pdev)
+static int sh_mipi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -591,7 +574,7 @@ static int __devexit sh_mipi_remove(struct platform_device *pdev)
}
static struct platform_driver sh_mipi_driver = {
- .remove = __devexit_p(sh_mipi_remove),
+ .remove = sh_mipi_remove,
.shutdown = sh_mipi_shutdown,
.driver = {
.name = "sh-mipi-dsi",
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 699487c287b2..63203acef812 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -438,7 +438,7 @@ static unsigned long lcdc_sys_read_data(void *handle)
return lcdc_read(ch->lcdc, _LDDRDR) & LDDRDR_DRD_MASK;
}
-struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
+static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
lcdc_sys_write_index,
lcdc_sys_write_data,
lcdc_sys_read_data,
@@ -586,8 +586,8 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
* Just turn on, if we run a resume here, the
* logo disappears.
*/
- info->var.width = monspec->max_x * 10;
- info->var.height = monspec->max_y * 10;
+ info->var.width = ch->display.width;
+ info->var.height = ch->display.height;
sh_mobile_lcdc_display_on(ch);
} else {
/* New monitor or have to wake up */
@@ -1614,6 +1614,15 @@ static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info)
return 1;
}
+static int
+sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
+ ovl->dma_handle, ovl->fb_size);
+}
+
static struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
@@ -1626,6 +1635,7 @@ static struct fb_ops sh_mobile_lcdc_overlay_ops = {
.fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
.fb_check_var = sh_mobile_lcdc_overlay_check_var,
.fb_set_par = sh_mobile_lcdc_overlay_set_par,
+ .fb_mmap = sh_mobile_lcdc_overlay_mmap,
};
static void
@@ -1639,7 +1649,7 @@ sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl)
unregister_framebuffer(ovl->info);
}
-static int __devinit
+static int
sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl)
{
struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc;
@@ -1678,7 +1688,7 @@ sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl)
framebuffer_release(info);
}
-static int __devinit
+static int
sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
{
struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc;
@@ -2093,6 +2103,15 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info)
return 0;
}
+static int
+sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+
+ return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
+ ch->dma_handle, ch->fb_size);
+}
+
static struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
@@ -2108,6 +2127,7 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_release = sh_mobile_lcdc_release,
.fb_check_var = sh_mobile_lcdc_check_var,
.fb_set_par = sh_mobile_lcdc_set_par,
+ .fb_mmap = sh_mobile_lcdc_mmap,
};
static void
@@ -2117,7 +2137,7 @@ sh_mobile_lcdc_channel_fb_unregister(struct sh_mobile_lcdc_chan *ch)
unregister_framebuffer(ch->info);
}
-static int __devinit
+static int
sh_mobile_lcdc_channel_fb_register(struct sh_mobile_lcdc_chan *ch)
{
struct fb_info *info = ch->info;
@@ -2165,9 +2185,9 @@ sh_mobile_lcdc_channel_fb_cleanup(struct sh_mobile_lcdc_chan *ch)
framebuffer_release(info);
}
-static int __devinit
+static int
sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
- const struct fb_videomode *mode,
+ const struct fb_videomode *modes,
unsigned int num_modes)
{
struct sh_mobile_lcdc_priv *priv = ch->lcdc;
@@ -2193,7 +2213,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
- fb_videomode_to_modelist(mode, num_modes, &info->modelist);
+ fb_videomode_to_modelist(modes, num_modes, &info->modelist);
ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
if (ret < 0) {
@@ -2227,9 +2247,9 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
* default.
*/
var = &info->var;
- fb_videomode_to_var(var, mode);
- var->width = ch->cfg->panel_cfg.width;
- var->height = ch->cfg->panel_cfg.height;
+ fb_videomode_to_var(var, modes);
+ var->width = ch->display.width;
+ var->height = ch->display.height;
var->xres_virtual = ch->xres_virtual;
var->yres_virtual = ch->yres_virtual;
var->activate = FB_ACTIVATE_NOW;
@@ -2262,6 +2282,7 @@ static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
brightness = 0;
+ ch->bl_brightness = brightness;
return ch->cfg->bl_info.set_brightness(brightness);
}
@@ -2269,7 +2290,7 @@ static int sh_mobile_lcdc_get_brightness(struct backlight_device *bdev)
{
struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
- return ch->cfg->bl_info.get_brightness();
+ return ch->bl_brightness;
}
static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
@@ -2396,7 +2417,7 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
* Probe/remove and driver init/exit
*/
-static const struct fb_videomode default_720p __devinitconst = {
+static const struct fb_videomode default_720p = {
.name = "HDMI 720p",
.xres = 1280,
.yres = 720,
@@ -2475,7 +2496,7 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch)
+static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch)
{
int interface_type = ch->cfg->interface_type;
@@ -2515,11 +2536,11 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *
return 0;
}
-static int __devinit
-sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
- struct sh_mobile_lcdc_overlay *ovl)
+static int
+sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_overlay *ovl)
{
const struct sh_mobile_lcdc_format_info *format;
+ struct device *dev = ovl->channel->lcdc->dev;
int ret;
if (ovl->cfg->fourcc == 0)
@@ -2528,7 +2549,7 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
/* Validate the format. */
format = sh_mobile_format_info(ovl->cfg->fourcc);
if (format == NULL) {
- dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc);
+ dev_err(dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc);
return -EINVAL;
}
@@ -2556,10 +2577,10 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
/* Allocate frame buffer memory. */
ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres
* format->bpp / 8 * 2;
- ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size,
- &ovl->dma_handle, GFP_KERNEL);
+ ovl->fb_mem = dma_alloc_coherent(dev, ovl->fb_size, &ovl->dma_handle,
+ GFP_KERNEL);
if (!ovl->fb_mem) {
- dev_err(priv->dev, "unable to allocate buffer\n");
+ dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
}
@@ -2570,12 +2591,12 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
return 0;
}
-static int __devinit
-sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
- struct sh_mobile_lcdc_chan *ch)
+static int
+sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch)
{
const struct sh_mobile_lcdc_format_info *format;
const struct sh_mobile_lcdc_chan_cfg *cfg = ch->cfg;
+ struct device *dev = ch->lcdc->dev;
const struct fb_videomode *max_mode;
const struct fb_videomode *mode;
unsigned int num_modes;
@@ -2588,7 +2609,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* Validate the format. */
format = sh_mobile_format_info(cfg->fourcc);
if (format == NULL) {
- dev_err(priv->dev, "Invalid FOURCC %08x.\n", cfg->fourcc);
+ dev_err(dev, "Invalid FOURCC %08x.\n", cfg->fourcc);
return -EINVAL;
}
@@ -2604,7 +2625,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* NV12/NV21 buffers must have even number of lines */
if ((cfg->fourcc == V4L2_PIX_FMT_NV12 ||
cfg->fourcc == V4L2_PIX_FMT_NV21) && (mode->yres & 0x1)) {
- dev_err(priv->dev, "yres must be multiple of 2 for "
+ dev_err(dev, "yres must be multiple of 2 for "
"YCbCr420 mode.\n");
return -EINVAL;
}
@@ -2618,7 +2639,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
if (!max_size)
max_size = MAX_XRES * MAX_YRES;
else
- dev_dbg(priv->dev, "Found largest videomode %ux%u\n",
+ dev_dbg(dev, "Found largest videomode %ux%u\n",
max_mode->xres, max_mode->yres);
if (cfg->lcd_modes == NULL) {
@@ -2652,10 +2673,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* Allocate frame buffer memory. */
ch->fb_size = max_size * format->bpp / 8 * 2;
- ch->fb_mem = dma_alloc_coherent(priv->dev, ch->fb_size, &ch->dma_handle,
+ ch->fb_mem = dma_alloc_coherent(dev, ch->fb_size, &ch->dma_handle,
GFP_KERNEL);
if (ch->fb_mem == NULL) {
- dev_err(priv->dev, "unable to allocate buffer\n");
+ dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
}
@@ -2663,8 +2684,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
if (cfg->tx_dev) {
if (!cfg->tx_dev->dev.driver ||
!try_module_get(cfg->tx_dev->dev.driver->owner)) {
- dev_warn(priv->dev,
- "unable to get transmitter device\n");
+ dev_warn(dev, "unable to get transmitter device\n");
return -EINVAL;
}
ch->tx_dev = platform_get_drvdata(cfg->tx_dev);
@@ -2675,7 +2695,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
return sh_mobile_lcdc_channel_fb_init(ch, mode, num_modes);
}
-static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
+static int sh_mobile_lcdc_probe(struct platform_device *pdev)
{
struct sh_mobile_lcdc_info *pdata = pdev->dev.platform_data;
struct sh_mobile_lcdc_priv *priv;
@@ -2772,9 +2792,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
for (i = 0; i < num_channels; i++) {
- struct sh_mobile_lcdc_chan *ch = priv->ch + i;
+ struct sh_mobile_lcdc_chan *ch = &priv->ch[i];
- error = sh_mobile_lcdc_channel_init(priv, ch);
+ error = sh_mobile_lcdc_channel_init(ch);
if (error)
goto err1;
}
@@ -2785,7 +2805,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
ovl->cfg = &pdata->overlays[i];
ovl->channel = &priv->ch[0];
- error = sh_mobile_lcdc_overlay_init(priv, ovl);
+ error = sh_mobile_lcdc_overlay_init(ovl);
if (error)
goto err1;
}
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index 0f92f6544b94..f839adef1d90 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -94,6 +94,7 @@ struct sh_mobile_lcdc_chan {
/* Backlight */
struct backlight_device *bl;
+ unsigned int bl_brightness;
/* FB */
struct fb_info *info;
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 7a0ba8bb3fbe..e0f098562a74 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -620,7 +620,7 @@ static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
* Probe/remove and driver init/exit
*/
-static int __devinit sh_mobile_meram_probe(struct platform_device *pdev)
+static int sh_mobile_meram_probe(struct platform_device *pdev)
{
struct sh_mobile_meram_priv *priv;
struct sh_mobile_meram_info *pdata = pdev->dev.platform_data;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a7a48db64ce2..977e27927a21 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -106,8 +106,7 @@ sisfb_setdefaultparms(void)
/* ------------- Parameter parsing -------------- */
-static void __devinit
-sisfb_search_vesamode(unsigned int vesamode, bool quiet)
+static void sisfb_search_vesamode(unsigned int vesamode, bool quiet)
{
int i = 0, j = 0;
@@ -146,8 +145,7 @@ sisfb_search_vesamode(unsigned int vesamode, bool quiet)
printk(KERN_ERR "sisfb: Invalid VESA mode 0x%x'\n", vesamode);
}
-static void __devinit
-sisfb_search_mode(char *name, bool quiet)
+static void sisfb_search_mode(char *name, bool quiet)
{
unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0;
int i = 0;
@@ -225,8 +223,7 @@ sisfb_search_mode(char *name, bool quiet)
}
#ifndef MODULE
-static void __devinit
-sisfb_get_vga_mode_from_kernel(void)
+static void sisfb_get_vga_mode_from_kernel(void)
{
#ifdef CONFIG_X86
char mymode[32];
@@ -345,8 +342,7 @@ sisfb_search_specialtiming(const char *name)
/* ----------- Various detection routines ----------- */
-static void __devinit
-sisfb_detect_custom_timing(struct sis_video_info *ivideo)
+static void sisfb_detect_custom_timing(struct sis_video_info *ivideo)
{
unsigned char *biosver = NULL;
unsigned char *biosdate = NULL;
@@ -403,8 +399,7 @@ sisfb_detect_custom_timing(struct sis_video_info *ivideo)
} while(mycustomttable[i].chipID);
}
-static bool __devinit
-sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
+static bool sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
{
int i, j, xres, yres, refresh, index;
u32 emodes;
@@ -505,8 +500,8 @@ sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
return monitor->datavalid;
}
-static void __devinit
-sisfb_handle_ddc(struct sis_video_info *ivideo, struct sisfb_monitor *monitor, int crtno)
+static void sisfb_handle_ddc(struct sis_video_info *ivideo,
+ struct sisfb_monitor *monitor, int crtno)
{
unsigned short temp, i, realcrtno = crtno;
unsigned char buffer[256];
@@ -1898,8 +1893,7 @@ static struct fb_ops sisfb_ops = {
/* ---------------- Chip generation dependent routines ---------------- */
-static struct pci_dev * __devinit
-sisfb_get_northbridge(int basechipid)
+static struct pci_dev *sisfb_get_northbridge(int basechipid)
{
struct pci_dev *pdev = NULL;
int nbridgenum, nbridgeidx, i;
@@ -1938,8 +1932,7 @@ sisfb_get_northbridge(int basechipid)
return pdev;
}
-static int __devinit
-sisfb_get_dram_size(struct sis_video_info *ivideo)
+static int sisfb_get_dram_size(struct sis_video_info *ivideo)
{
#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
u8 reg;
@@ -2038,8 +2031,7 @@ sisfb_get_dram_size(struct sis_video_info *ivideo)
/* -------------- video bridge device detection --------------- */
-static void __devinit
-sisfb_detect_VB_connect(struct sis_video_info *ivideo)
+static void sisfb_detect_VB_connect(struct sis_video_info *ivideo)
{
u8 cr32, temp;
@@ -2164,8 +2156,7 @@ sisfb_detect_VB_connect(struct sis_video_info *ivideo)
/* ------------------ Sensing routines ------------------ */
-static bool __devinit
-sisfb_test_DDC1(struct sis_video_info *ivideo)
+static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
{
unsigned short old;
int count = 48;
@@ -2177,8 +2168,7 @@ sisfb_test_DDC1(struct sis_video_info *ivideo)
return (count != -1);
}
-static void __devinit
-sisfb_sense_crt1(struct sis_video_info *ivideo)
+static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
bool mustwait = false;
u8 sr1F, cr17;
@@ -2259,8 +2249,7 @@ sisfb_sense_crt1(struct sis_video_info *ivideo)
}
/* Determine and detect attached devices on SiS30x */
-static void __devinit
-SiS_SenseLCD(struct sis_video_info *ivideo)
+static void SiS_SenseLCD(struct sis_video_info *ivideo)
{
unsigned char buffer[256];
unsigned short temp, realcrtno, i;
@@ -2347,8 +2336,7 @@ SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = true;
}
-static int __devinit
-SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
+static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
int temp, mytest, result, i, j;
@@ -2377,8 +2365,7 @@ SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
return result;
}
-static void __devinit
-SiS_Sense30x(struct sis_video_info *ivideo)
+static void SiS_Sense30x(struct sis_video_info *ivideo)
{
u8 backupP4_0d,backupP2_00,backupP2_4d,backupSR_1e,biosflag=0;
u16 svhs=0, svhs_c=0;
@@ -2518,8 +2505,7 @@ SiS_Sense30x(struct sis_video_info *ivideo)
}
/* Determine and detect attached TV's on Chrontel */
-static void __devinit
-SiS_SenseCh(struct sis_video_info *ivideo)
+static void SiS_SenseCh(struct sis_video_info *ivideo)
{
#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
u8 temp1, temp2;
@@ -2643,8 +2629,7 @@ SiS_SenseCh(struct sis_video_info *ivideo)
}
}
-static void __devinit
-sisfb_get_VB_type(struct sis_video_info *ivideo)
+static void sisfb_get_VB_type(struct sis_video_info *ivideo)
{
char stdstr[] = "sisfb: Detected";
char bridgestr[] = "video bridge";
@@ -2906,8 +2891,7 @@ sisfb_engine_init(struct sis_video_info *ivideo)
ivideo->engineok = 1;
}
-static void __devinit
-sisfb_detect_lcd_type(struct sis_video_info *ivideo)
+static void sisfb_detect_lcd_type(struct sis_video_info *ivideo)
{
u8 reg;
int i;
@@ -2962,8 +2946,7 @@ sisfb_detect_lcd_type(struct sis_video_info *ivideo)
ivideo->lcdxres, ivideo->lcdyres);
}
-static void __devinit
-sisfb_save_pdc_emi(struct sis_video_info *ivideo)
+static void sisfb_save_pdc_emi(struct sis_video_info *ivideo)
{
#ifdef CONFIG_FB_SIS_300
/* Save the current PanelDelayCompensation if the LCD is currently used */
@@ -3081,8 +3064,7 @@ sisfb_save_pdc_emi(struct sis_video_info *ivideo)
/* -------------------- Memory manager routines ---------------------- */
-static u32 __devinit
-sisfb_getheapstart(struct sis_video_info *ivideo)
+static u32 sisfb_getheapstart(struct sis_video_info *ivideo)
{
u32 ret = ivideo->sisfb_parm_mem * 1024;
u32 maxoffs = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize;
@@ -3128,8 +3110,7 @@ sisfb_getheapstart(struct sis_video_info *ivideo)
return ret;
}
-static u32 __devinit
-sisfb_getheapsize(struct sis_video_info *ivideo)
+static u32 sisfb_getheapsize(struct sis_video_info *ivideo)
{
u32 max = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize;
u32 ret = 0;
@@ -3154,8 +3135,7 @@ sisfb_getheapsize(struct sis_video_info *ivideo)
return ret;
}
-static int __devinit
-sisfb_heap_init(struct sis_video_info *ivideo)
+static int sisfb_heap_init(struct sis_video_info *ivideo)
{
struct SIS_OH *poh;
@@ -4061,8 +4041,8 @@ static int __init sisfb_setup(char *options)
}
#endif
-static int __devinit
-sisfb_check_rom(void __iomem *rom_base, struct sis_video_info *ivideo)
+static int sisfb_check_rom(void __iomem *rom_base,
+ struct sis_video_info *ivideo)
{
void __iomem *rom;
int romptr;
@@ -4089,8 +4069,7 @@ sisfb_check_rom(void __iomem *rom_base, struct sis_video_info *ivideo)
return 1;
}
-static unsigned char * __devinit
-sisfb_find_rom(struct pci_dev *pdev)
+static unsigned char *sisfb_find_rom(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
void __iomem *rom_base;
@@ -4149,9 +4128,8 @@ sisfb_find_rom(struct pci_dev *pdev)
return myrombase;
}
-static void __devinit
-sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize,
- unsigned int min)
+static void sisfb_post_map_vram(struct sis_video_info *ivideo,
+ unsigned int *mapsize, unsigned int min)
{
if (*mapsize < (min << 20))
return;
@@ -4176,8 +4154,7 @@ sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize,
}
#ifdef CONFIG_FB_SIS_300
-static int __devinit
-sisfb_post_300_buswidth(struct sis_video_info *ivideo)
+static int sisfb_post_300_buswidth(struct sis_video_info *ivideo)
{
void __iomem *FBAddress = ivideo->video_vbase;
unsigned short temp;
@@ -4222,7 +4199,7 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
return 1; /* 32bit */
}
-static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+static const unsigned short SiS_DRAMType[17][5] = {
{0x0C,0x0A,0x02,0x40,0x39},
{0x0D,0x0A,0x01,0x40,0x48},
{0x0C,0x09,0x02,0x20,0x35},
@@ -4242,10 +4219,9 @@ static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
{0x09,0x08,0x01,0x01,0x00}
};
-static int __devinit
-sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
- int PseudoRankCapacity, int PseudoAdrPinCount,
- unsigned int mapsize)
+static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
+ int buswidth, int PseudoRankCapacity,
+ int PseudoAdrPinCount, unsigned int mapsize)
{
void __iomem *FBAddr = ivideo->video_vbase;
unsigned short sr14;
@@ -4309,8 +4285,7 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
return 0;
}
-static void __devinit
-sisfb_post_300_ramsize(struct pci_dev *pdev, unsigned int mapsize)
+static void sisfb_post_300_ramsize(struct pci_dev *pdev, unsigned int mapsize)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
int i, j, buswidth;
@@ -4335,8 +4310,7 @@ sisfb_post_300_ramsize(struct pci_dev *pdev, unsigned int mapsize)
}
}
-static void __devinit
-sisfb_post_sis300(struct pci_dev *pdev)
+static void sisfb_post_sis300(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
unsigned char *bios = ivideo->SiS_Pr.VirtualRomBase;
@@ -4547,8 +4521,7 @@ sisfb_post_sis300(struct pci_dev *pdev)
#ifdef CONFIG_FB_SIS_315
#if 0
-static void __devinit
-sisfb_post_sis315330(struct pci_dev *pdev)
+static void sisfb_post_sis315330(struct pci_dev *pdev)
{
/* TODO */
}
@@ -4559,8 +4532,7 @@ static inline int sisfb_xgi_is21(struct sis_video_info *ivideo)
return ivideo->chip_real_id == XGI_21;
}
-static void __devinit
-sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay)
+static void sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay)
{
unsigned int i;
u8 reg;
@@ -4571,9 +4543,9 @@ sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay)
}
}
-static int __devinit
-sisfb_find_host_bridge(struct sis_video_info *ivideo, struct pci_dev *mypdev,
- unsigned short pcivendor)
+static int sisfb_find_host_bridge(struct sis_video_info *ivideo,
+ struct pci_dev *mypdev,
+ unsigned short pcivendor)
{
struct pci_dev *pdev = NULL;
unsigned short temp;
@@ -4591,9 +4563,8 @@ sisfb_find_host_bridge(struct sis_video_info *ivideo, struct pci_dev *mypdev,
return ret;
}
-static int __devinit
-sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta,
- unsigned int enda, unsigned int mapsize)
+static int sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta,
+ unsigned int enda, unsigned int mapsize)
{
unsigned int pos;
int i;
@@ -4623,8 +4594,7 @@ sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta,
return 1;
}
-static int __devinit
-sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
+static int sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
{
unsigned int buswidth, ranksize, channelab, mapsize;
int i, j, k, l, status;
@@ -4876,8 +4846,7 @@ bail_out:
return status;
}
-static void __devinit
-sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb)
+static void sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb)
{
u8 v1, v2, v3;
int index;
@@ -4932,8 +4901,8 @@ sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb)
sisfb_post_xgi_delay(ivideo, 0x43);
}
-static void __devinit
-sisfb_post_xgi_ddr2_mrs_default(struct sis_video_info *ivideo, u8 regb)
+static void sisfb_post_xgi_ddr2_mrs_default(struct sis_video_info *ivideo,
+ u8 regb)
{
unsigned char *bios = ivideo->bios_abase;
u8 v1;
@@ -4973,8 +4942,7 @@ sisfb_post_xgi_ddr2_mrs_default(struct sis_video_info *ivideo, u8 regb)
sisfb_post_xgi_delay(ivideo, 1);
}
-static void __devinit
-sisfb_post_xgi_ddr2_mrs_xg21(struct sis_video_info *ivideo)
+static void sisfb_post_xgi_ddr2_mrs_xg21(struct sis_video_info *ivideo)
{
sisfb_post_xgi_setclocks(ivideo, 1);
@@ -5015,8 +4983,7 @@ sisfb_post_xgi_ddr2_mrs_xg21(struct sis_video_info *ivideo)
sisfb_post_xgi_delay(ivideo, 1);
}
-static void __devinit
-sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
+static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
{
unsigned char *bios = ivideo->bios_abase;
static const u8 cs158[8] = {
@@ -5061,8 +5028,7 @@ sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
sisfb_post_xgi_ddr2_mrs_default(ivideo, regb);
}
-static u8 __devinit
-sisfb_post_xgi_ramtype(struct sis_video_info *ivideo)
+static u8 sisfb_post_xgi_ramtype(struct sis_video_info *ivideo)
{
unsigned char *bios = ivideo->bios_abase;
u8 ramtype;
@@ -5101,8 +5067,7 @@ sisfb_post_xgi_ramtype(struct sis_video_info *ivideo)
return ramtype;
}
-static int __devinit
-sisfb_post_xgi(struct pci_dev *pdev)
+static int sisfb_post_xgi(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
unsigned char *bios = ivideo->bios_abase;
@@ -5839,8 +5804,7 @@ sisfb_post_xgi(struct pci_dev *pdev)
}
#endif
-static int __devinit
-sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sisfb_chip_info *chipinfo = &sisfb_chip_info[ent->driver_data];
struct sis_video_info *ivideo = NULL;
@@ -6530,7 +6494,7 @@ error_3: vfree(ivideo->bios_abase);
/* PCI DEVICE HANDLING */
/*****************************************************/
-static void __devexit sisfb_remove(struct pci_dev *pdev)
+static void sisfb_remove(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
struct fb_info *sis_fb_info = ivideo->memyselfandi;
@@ -6591,7 +6555,7 @@ static struct pci_driver sisfb_driver = {
.name = "sisfb",
.id_table = sisfb_pci_table,
.probe = sisfb_probe,
- .remove = __devexit_p(sisfb_remove)
+ .remove = sisfb_remove,
};
static int __init sisfb_init(void)
diff --git a/drivers/video/sis/sis_main.h b/drivers/video/sis/sis_main.h
index 9540e977270e..32e23c209430 100644
--- a/drivers/video/sis/sis_main.h
+++ b/drivers/video/sis/sis_main.h
@@ -98,7 +98,7 @@ static struct sisfb_chip_info {
int hwcursor_size;
int CRT2_write_enable;
const char *chip_name;
-} sisfb_chip_info[] __devinitdata = {
+} sisfb_chip_info[] = {
{ SIS_300, SIS_300_VGA, 0, HW_CURSOR_AREA_SIZE_300 * 2, SIS_CRT2_WENABLE_300, "SiS 300/305" },
{ SIS_540, SIS_300_VGA, 0, HW_CURSOR_AREA_SIZE_300 * 2, SIS_CRT2_WENABLE_300, "SiS 540" },
{ SIS_630, SIS_300_VGA, 0, HW_CURSOR_AREA_SIZE_300 * 2, SIS_CRT2_WENABLE_300, "SiS 630" },
@@ -113,7 +113,7 @@ static struct sisfb_chip_info {
{ XGI_40, SIS_315_VGA, 1, HW_CURSOR_AREA_SIZE_315 * 4, SIS_CRT2_WENABLE_315, "XGI V3XT/V5/V8" },
};
-static struct pci_device_id __devinitdata sisfb_pci_table[] = {
+static struct pci_device_id sisfb_pci_table[] = {
#ifdef CONFIG_FB_SIS_300
{ PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_540_VGA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
@@ -317,7 +317,7 @@ static struct _sis_lcd_data {
u16 xres;
u16 yres;
u8 default_mode_idx;
-} sis_lcd_data[] __devinitdata = {
+} sis_lcd_data[] = {
{ LCD_640x480, 640, 480, 23 },
{ LCD_800x600, 800, 600, 43 },
{ LCD_1024x600, 1024, 600, 67 },
@@ -339,21 +339,21 @@ static struct _sis_lcd_data {
};
/* CR36 evaluation */
-static unsigned short sis300paneltype[] __devinitdata = {
+static unsigned short sis300paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
LCD_1280x960, LCD_640x480, LCD_1024x600, LCD_1152x768,
LCD_UNKNOWN, LCD_UNKNOWN, LCD_UNKNOWN, LCD_UNKNOWN,
LCD_UNKNOWN, LCD_UNKNOWN, LCD_UNKNOWN, LCD_UNKNOWN
};
-static unsigned short sis310paneltype[] __devinitdata = {
+static unsigned short sis310paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
LCD_320x240_2, LCD_320x240_3, LCD_UNKNOWN, LCD_UNKNOWN
};
-static unsigned short sis661paneltype[] __devinitdata = {
+static unsigned short sis661paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
LCD_1280x854, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
@@ -466,7 +466,7 @@ static struct _sisfbddcsmodes {
u16 h;
u16 v;
u32 d;
-} sisfb_ddcsmodes[] __devinitdata = {
+} sisfb_ddcsmodes[] = {
{ 0x10000, 67, 75, 108000},
{ 0x08000, 48, 72, 50000},
{ 0x04000, 46, 75, 49500},
@@ -488,7 +488,7 @@ static struct _sisfbddcfmodes {
u16 v;
u16 h;
u32 d;
-} sisfb_ddcfmodes[] __devinitdata = {
+} sisfb_ddcfmodes[] = {
{ 1280, 1024, 85, 92, 157500},
{ 1600, 1200, 60, 75, 162000},
{ 1600, 1200, 65, 82, 175500},
@@ -505,7 +505,7 @@ static struct _chswtable {
u16 subsysCard;
char *vendorName;
char *cardName;
-} mychswtable[] __devinitdata = {
+} mychswtable[] = {
{ 0x1631, 0x1002, "Mitachi", "0x1002" },
{ 0x1071, 0x7521, "Mitac" , "7521P" },
{ 0, 0, "" , "" }
@@ -525,7 +525,7 @@ static struct _customttable {
char *cardName;
u32 SpecialID;
char *optionName;
-} mycustomttable[] __devinitdata = {
+} mycustomttable[] = {
{ SIS_630, "2.00.07", "09/27/2002-13:38:25",
0x3240A8,
{ 0x220, 0x227, 0x228, 0x229, 0x0ee },
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 5b6abc6de84b..2d4694c6b9e0 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -63,7 +63,7 @@
/*
* Driver data
*/
-static char *mode_option __devinitdata;
+static char *mode_option;
/*
* If your driver supports multiple boards, you should make the
@@ -84,7 +84,7 @@ struct xxx_par;
* if we don't use modedb. If we do use modedb see xxxfb_init how to use it
* to get a fb_var_screeninfo. Otherwise define a default var as well.
*/
-static struct fb_fix_screeninfo xxxfb_fix __devinitdata = {
+static struct fb_fix_screeninfo xxxfb_fix = {
.id = "FB's name",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -678,8 +678,7 @@ static struct fb_ops xxxfb_ops = {
*/
/* static int __init xxfb_probe (struct platform_device *pdev) -- for platform devs */
-static int __devinit xxxfb_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fb_info *info;
struct xxx_par *par;
@@ -705,9 +704,7 @@ static int __devinit xxxfb_probe(struct pci_dev *dev,
*/
info->screen_base = framebuffer_virtual_memory;
info->fbops = &xxxfb_ops;
- info->fix = xxxfb_fix; /* this will be the only time xxxfb_fix will be
- * used, so mark it as __devinitdata
- */
+ info->fix = xxxfb_fix;
info->pseudo_palette = pseudo_palette; /* The pseudopalette is an
* 16-member array
*/
@@ -836,8 +833,8 @@ static int __devinit xxxfb_probe(struct pci_dev *dev,
/*
* Cleanup
*/
-/* static void __devexit xxxfb_remove(struct platform_device *pdev) */
-static void __devexit xxxfb_remove(struct pci_dev *dev)
+/* static void xxxfb_remove(struct platform_device *pdev) */
+static void xxxfb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
/* or platform_get_drvdata(pdev); */
@@ -899,7 +896,7 @@ static struct pci_driver xxxfb_driver = {
.name = "xxxfb",
.id_table = xxxfb_id_table,
.probe = xxxfb_probe,
- .remove = __devexit_p(xxxfb_remove),
+ .remove = xxxfb_remove,
.suspend = xxxfb_suspend, /* optional but recommended */
.resume = xxxfb_resume, /* optional but recommended */
};
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 3690effbedcc..1501979099dc 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -46,7 +46,7 @@
static char *fb_mode = "640x480-16@60";
static unsigned long default_bpp = 16;
-static struct fb_videomode __devinitdata sm501_default_mode = {
+static struct fb_videomode sm501_default_mode = {
.refresh = 60,
.xres = 640,
.yres = 480,
@@ -1664,8 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
resource_size(info->regs_res));
}
-static int __devinit sm501fb_init_fb(struct fb_info *fb,
- enum sm501_controller head,
+static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
const char *fbname)
{
struct sm501_platdata_fbsub *pd;
@@ -1850,8 +1849,8 @@ static struct sm501_platdata_fb sm501fb_def_pdata = {
static char driver_name_crt[] = "sm501fb-crt";
static char driver_name_pnl[] = "sm501fb-panel";
-static int __devinit sm501fb_probe_one(struct sm501fb_info *info,
- enum sm501_controller head)
+static int sm501fb_probe_one(struct sm501fb_info *info,
+ enum sm501_controller head)
{
unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel";
struct sm501_platdata_fbsub *pd;
@@ -1892,9 +1891,8 @@ static void sm501_free_init_fb(struct sm501fb_info *info,
fb_dealloc_cmap(&fbi->cmap);
}
-static int __devinit sm501fb_start_one(struct sm501fb_info *info,
- enum sm501_controller head,
- const char *drvname)
+static int sm501fb_start_one(struct sm501fb_info *info,
+ enum sm501_controller head, const char *drvname)
{
struct fb_info *fbi = info->fb[head];
int ret;
@@ -1922,7 +1920,7 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info,
return 0;
}
-static int __devinit sm501fb_probe(struct platform_device *pdev)
+static int sm501fb_probe(struct platform_device *pdev)
{
struct sm501fb_info *info;
struct device *dev = &pdev->dev;
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
new file mode 100644
index 000000000000..395cb6a8d8f3
--- /dev/null
+++ b/drivers/video/ssd1307fb.c
@@ -0,0 +1,397 @@
+/*
+ * Driver for the Solomon SSD1307 OLED controler
+ *
+ * Copyright 2012 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pwm.h>
+#include <linux/delay.h>
+
+#define SSD1307FB_WIDTH 96
+#define SSD1307FB_HEIGHT 16
+
+#define SSD1307FB_DATA 0x40
+#define SSD1307FB_COMMAND 0x80
+
+#define SSD1307FB_CONTRAST 0x81
+#define SSD1307FB_SEG_REMAP_ON 0xa1
+#define SSD1307FB_DISPLAY_OFF 0xae
+#define SSD1307FB_DISPLAY_ON 0xaf
+#define SSD1307FB_START_PAGE_ADDRESS 0xb0
+
+struct ssd1307fb_par {
+ struct i2c_client *client;
+ struct fb_info *info;
+ struct pwm_device *pwm;
+ u32 pwm_period;
+ int reset;
+};
+
+static struct fb_fix_screeninfo ssd1307fb_fix = {
+ .id = "Solomon SSD1307",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = SSD1307FB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ssd1307fb_var = {
+ .xres = SSD1307FB_WIDTH,
+ .yres = SSD1307FB_HEIGHT,
+ .xres_virtual = SSD1307FB_WIDTH,
+ .yres_virtual = SSD1307FB_HEIGHT,
+ .bits_per_pixel = 1,
+};
+
+static int ssd1307fb_write_array(struct i2c_client *client, u8 type, u8 *cmd, u32 len)
+{
+ u8 *buf;
+ int ret = 0;
+
+ buf = kzalloc(len + 1, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&client->dev, "Couldn't allocate sending buffer.\n");
+ return -ENOMEM;
+ }
+
+ buf[0] = type;
+ memcpy(buf + 1, cmd, len);
+
+ ret = i2c_master_send(client, buf, len + 1);
+ if (ret != len + 1) {
+ dev_err(&client->dev, "Couldn't send I2C command.\n");
+ goto error;
+ }
+
+error:
+ kfree(buf);
+ return ret;
+}
+
+static inline int ssd1307fb_write_cmd_array(struct i2c_client *client, u8 *cmd, u32 len)
+{
+ return ssd1307fb_write_array(client, SSD1307FB_COMMAND, cmd, len);
+}
+
+static inline int ssd1307fb_write_cmd(struct i2c_client *client, u8 cmd)
+{
+ return ssd1307fb_write_cmd_array(client, &cmd, 1);
+}
+
+static inline int ssd1307fb_write_data_array(struct i2c_client *client, u8 *cmd, u32 len)
+{
+ return ssd1307fb_write_array(client, SSD1307FB_DATA, cmd, len);
+}
+
+static inline int ssd1307fb_write_data(struct i2c_client *client, u8 data)
+{
+ return ssd1307fb_write_data_array(client, &data, 1);
+}
+
+static void ssd1307fb_update_display(struct ssd1307fb_par *par)
+{
+ u8 *vmem = par->info->screen_base;
+ int i, j, k;
+
+ /*
+ * The screen is divided in pages, each having a height of 8
+ * pixels, and the width of the screen. When sending a byte of
+ * data to the controller, it gives the 8 bits for the current
+ * column. I.e, the first byte are the 8 bits of the first
+ * column, then the 8 bits for the second column, etc.
+ *
+ *
+ * Representation of the screen, assuming it is 5 bits
+ * wide. Each letter-number combination is a bit that controls
+ * one pixel.
+ *
+ * A0 A1 A2 A3 A4
+ * B0 B1 B2 B3 B4
+ * C0 C1 C2 C3 C4
+ * D0 D1 D2 D3 D4
+ * E0 E1 E2 E3 E4
+ * F0 F1 F2 F3 F4
+ * G0 G1 G2 G3 G4
+ * H0 H1 H2 H3 H4
+ *
+ * If you want to update this screen, you need to send 5 bytes:
+ * (1) A0 B0 C0 D0 E0 F0 G0 H0
+ * (2) A1 B1 C1 D1 E1 F1 G1 H1
+ * (3) A2 B2 C2 D2 E2 F2 G2 H2
+ * (4) A3 B3 C3 D3 E3 F3 G3 H3
+ * (5) A4 B4 C4 D4 E4 F4 G4 H4
+ */
+
+ for (i = 0; i < (SSD1307FB_HEIGHT / 8); i++) {
+ ssd1307fb_write_cmd(par->client, SSD1307FB_START_PAGE_ADDRESS + (i + 1));
+ ssd1307fb_write_cmd(par->client, 0x00);
+ ssd1307fb_write_cmd(par->client, 0x10);
+
+ for (j = 0; j < SSD1307FB_WIDTH; j++) {
+ u8 buf = 0;
+ for (k = 0; k < 8; k++) {
+ u32 page_length = SSD1307FB_WIDTH * i;
+ u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
+ u8 byte = *(vmem + index);
+ u8 bit = byte & (1 << (j % 8));
+ bit = bit >> (j % 8);
+ buf |= bit << k;
+ }
+ ssd1307fb_write_data(par->client, buf);
+ }
+ }
+}
+
+
+static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ssd1307fb_par *par = info->par;
+ unsigned long total_size;
+ unsigned long p = *ppos;
+ u8 __iomem *dst;
+
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EINVAL;
+
+ if (count + p > total_size)
+ count = total_size - p;
+
+ if (!count)
+ return -EINVAL;
+
+ dst = (void __force *) (info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ return -EFAULT;
+
+ ssd1307fb_update_display(par);
+
+ *ppos += count;
+
+ return count;
+}
+
+static void ssd1307fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_fillrect(info, rect);
+ ssd1307fb_update_display(par);
+}
+
+static void ssd1307fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_copyarea(info, area);
+ ssd1307fb_update_display(par);
+}
+
+static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_imageblit(info, image);
+ ssd1307fb_update_display(par);
+}
+
+static struct fb_ops ssd1307fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = ssd1307fb_write,
+ .fb_fillrect = ssd1307fb_fillrect,
+ .fb_copyarea = ssd1307fb_copyarea,
+ .fb_imageblit = ssd1307fb_imageblit,
+};
+
+static void ssd1307fb_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ ssd1307fb_update_display(info->par);
+}
+
+static struct fb_deferred_io ssd1307fb_defio = {
+ .delay = HZ,
+ .deferred_io = ssd1307fb_deferred_io,
+};
+
+static int ssd1307fb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct fb_info *info;
+ u32 vmem_size = SSD1307FB_WIDTH * SSD1307FB_HEIGHT / 8;
+ struct ssd1307fb_par *par;
+ u8 *vmem;
+ int ret;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev, "No device tree data found!\n");
+ return -EINVAL;
+ }
+
+ info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
+ if (!info) {
+ dev_err(&client->dev, "Couldn't allocate framebuffer.\n");
+ return -ENOMEM;
+ }
+
+ vmem = devm_kzalloc(&client->dev, vmem_size, GFP_KERNEL);
+ if (!vmem) {
+ dev_err(&client->dev, "Couldn't allocate graphical memory.\n");
+ ret = -ENOMEM;
+ goto fb_alloc_error;
+ }
+
+ info->fbops = &ssd1307fb_ops;
+ info->fix = ssd1307fb_fix;
+ info->fbdefio = &ssd1307fb_defio;
+
+ info->var = ssd1307fb_var;
+ info->var.red.length = 1;
+ info->var.red.offset = 0;
+ info->var.green.length = 1;
+ info->var.green.offset = 0;
+ info->var.blue.length = 1;
+ info->var.blue.offset = 0;
+
+ info->screen_base = (u8 __force __iomem *)vmem;
+ info->fix.smem_start = (unsigned long)vmem;
+ info->fix.smem_len = vmem_size;
+
+ fb_deferred_io_init(info);
+
+ par = info->par;
+ par->info = info;
+ par->client = client;
+
+ par->reset = of_get_named_gpio(client->dev.of_node,
+ "reset-gpios", 0);
+ if (!gpio_is_valid(par->reset)) {
+ ret = -EINVAL;
+ goto reset_oled_error;
+ }
+
+ ret = devm_gpio_request_one(&client->dev, par->reset,
+ GPIOF_OUT_INIT_HIGH,
+ "oled-reset");
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to request gpio %d: %d\n",
+ par->reset, ret);
+ goto reset_oled_error;
+ }
+
+ par->pwm = pwm_get(&client->dev, NULL);
+ if (IS_ERR(par->pwm)) {
+ dev_err(&client->dev, "Could not get PWM from device tree!\n");
+ ret = PTR_ERR(par->pwm);
+ goto pwm_error;
+ }
+
+ par->pwm_period = pwm_get_period(par->pwm);
+
+ dev_dbg(&client->dev, "Using PWM%d with a %dns period.\n", par->pwm->pwm, par->pwm_period);
+
+ ret = register_framebuffer(info);
+ if (ret) {
+ dev_err(&client->dev, "Couldn't register the framebuffer\n");
+ goto fbreg_error;
+ }
+
+ i2c_set_clientdata(client, info);
+
+ /* Reset the screen */
+ gpio_set_value(par->reset, 0);
+ udelay(4);
+ gpio_set_value(par->reset, 1);
+ udelay(4);
+
+ /* Enable the PWM */
+ pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
+ pwm_enable(par->pwm);
+
+ /* Map column 127 of the OLED to segment 0 */
+ ret = ssd1307fb_write_cmd(client, SSD1307FB_SEG_REMAP_ON);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't remap the screen.\n");
+ goto remap_error;
+ }
+
+ /* Turn on the display */
+ ret = ssd1307fb_write_cmd(client, SSD1307FB_DISPLAY_ON);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't turn the display on.\n");
+ goto remap_error;
+ }
+
+ dev_info(&client->dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
+
+ return 0;
+
+remap_error:
+ unregister_framebuffer(info);
+ pwm_disable(par->pwm);
+fbreg_error:
+ pwm_put(par->pwm);
+pwm_error:
+reset_oled_error:
+ fb_deferred_io_cleanup(info);
+fb_alloc_error:
+ framebuffer_release(info);
+ return ret;
+}
+
+static int ssd1307fb_remove(struct i2c_client *client)
+{
+ struct fb_info *info = i2c_get_clientdata(client);
+ struct ssd1307fb_par *par = info->par;
+
+ unregister_framebuffer(info);
+ pwm_disable(par->pwm);
+ pwm_put(par->pwm);
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+static const struct i2c_device_id ssd1307fb_i2c_id[] = {
+ { "ssd1307fb", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
+
+static const struct of_device_id ssd1307fb_of_match[] = {
+ { .compatible = "solomon,ssd1307fb-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ssd1307fb_of_match);
+
+static struct i2c_driver ssd1307fb_driver = {
+ .probe = ssd1307fb_probe,
+ .remove = ssd1307fb_remove,
+ .id_table = ssd1307fb_i2c_id,
+ .driver = {
+ .name = "ssd1307fb",
+ .of_match_table = of_match_ptr(ssd1307fb_of_match),
+ .owner = THIS_MODULE,
+ },
+};
+
+module_i2c_driver(ssd1307fb_driver);
+
+MODULE_DESCRIPTION("FB driver for the Solomon SSD1307 OLED controler");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 111fb32e8769..9c00026e3ae2 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -104,7 +104,7 @@ static bool slowpci; /* slow PCI settings */
*/
#define DEFAULT_VIDEO_MODE "640x480@60"
-static char *mode_option __devinitdata = DEFAULT_VIDEO_MODE;
+static char *mode_option = DEFAULT_VIDEO_MODE;
enum {
ID_VOODOO1 = 0,
@@ -113,7 +113,7 @@ enum {
#define IS_VOODOO2(par) ((par)->type == ID_VOODOO2)
-static struct sst_spec voodoo_spec[] __devinitdata = {
+static struct sst_spec voodoo_spec[] = {
{ .name = "Voodoo Graphics", .default_gfx_clock = 50000, .max_gfxclk = 60 },
{ .name = "Voodoo2", .default_gfx_clock = 75000, .max_gfxclk = 85 },
};
@@ -822,7 +822,7 @@ static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
/*
* get lfb size
*/
-static int __devinit sst_get_memsize(struct fb_info *info, __u32 *memsize)
+static int sst_get_memsize(struct fb_info *info, __u32 *memsize)
{
u8 __iomem *fbbase_virt = info->screen_base;
@@ -865,7 +865,7 @@ static int __devinit sst_get_memsize(struct fb_info *info, __u32 *memsize)
/* fbi should be idle, and fifo emty and mem disabled */
/* supposed to detect AT&T ATT20C409 and Ti TVP3409 ramdacs */
-static int __devinit sst_detect_att(struct fb_info *info)
+static int sst_detect_att(struct fb_info *info)
{
struct sstfb_par *par = info->par;
int i, mir, dir;
@@ -890,7 +890,7 @@ static int __devinit sst_detect_att(struct fb_info *info)
return 0;
}
-static int __devinit sst_detect_ti(struct fb_info *info)
+static int sst_detect_ti(struct fb_info *info)
{
struct sstfb_par *par = info->par;
int i, mir, dir;
@@ -926,7 +926,7 @@ static int __devinit sst_detect_ti(struct fb_info *info)
* touched...
* is it really safe ? how can i reset this ramdac ? geee...
*/
-static int __devinit sst_detect_ics(struct fb_info *info)
+static int sst_detect_ics(struct fb_info *info)
{
struct sstfb_par *par = info->par;
int m_clk0_1, m_clk0_7, m_clk1_b;
@@ -1105,7 +1105,7 @@ static void sst_set_vidmod_ics(struct fb_info *info, const int bpp)
*/
-static struct dac_switch dacs[] __devinitdata = {
+static struct dac_switch dacs[] = {
{ .name = "TI TVP3409",
.detect = sst_detect_ti,
.set_pll = sst_set_pll_att_ti,
@@ -1121,7 +1121,7 @@ static struct dac_switch dacs[] __devinitdata = {
.set_vidmod = sst_set_vidmod_ics },
};
-static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *par)
+static int sst_detect_dactype(struct fb_info *info, struct sstfb_par *par)
{
int i, ret = 0;
@@ -1140,7 +1140,7 @@ static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *
/*
* Internal Routines
*/
-static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
+static int sst_init(struct fb_info *info, struct sstfb_par *par)
{
u32 fbiinit0, fbiinit1, fbiinit4;
struct pci_dev *dev = par->dev;
@@ -1239,7 +1239,7 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
return 1;
}
-static void __devexit sst_shutdown(struct fb_info *info)
+static void sst_shutdown(struct fb_info *info)
{
struct sstfb_par *par = info->par;
struct pci_dev *dev = par->dev;
@@ -1271,7 +1271,7 @@ static void __devexit sst_shutdown(struct fb_info *info)
/*
* Interface to the world
*/
-static int __devinit sstfb_setup(char *options)
+static int sstfb_setup(char *options)
{
char *this_opt;
@@ -1317,8 +1317,7 @@ static struct fb_ops sstfb_ops = {
.fb_ioctl = sstfb_ioctl,
};
-static int __devinit sstfb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct fb_info *info;
struct fb_fix_screeninfo *fix;
@@ -1458,7 +1457,7 @@ fail_mmio_mem:
return -ENXIO; /* no voodoo detected */
}
-static void __devexit sstfb_remove(struct pci_dev *pdev)
+static void sstfb_remove(struct pci_dev *pdev)
{
struct sstfb_par *par;
struct fb_info *info;
@@ -1490,11 +1489,11 @@ static struct pci_driver sstfb_driver = {
.name = "sstfb",
.id_table = sstfb_id_tbl,
.probe = sstfb_probe,
- .remove = __devexit_p(sstfb_remove),
+ .remove = sstfb_remove,
};
-static int __devinit sstfb_init(void)
+static int sstfb_init(void)
{
char *option = NULL;
@@ -1505,7 +1504,7 @@ static int __devinit sstfb_init(void)
return pci_register_driver(&sstfb_driver);
}
-static void __devexit sstfb_exit(void)
+static void sstfb_exit(void)
{
pci_unregister_driver(&sstfb_driver);
}
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index 729a50722bdf..cc6f48bba36b 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -25,7 +25,7 @@ struct gfb_info {
u32 pseudo_palette[16];
};
-static int __devinit gfb_get_props(struct gfb_info *gp)
+static int gfb_get_props(struct gfb_info *gp)
{
gp->width = of_getintprop_default(gp->of_node, "width", 0);
gp->height = of_getintprop_default(gp->of_node, "height", 0);
@@ -66,7 +66,7 @@ static struct fb_ops gfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
+static int gfb_set_fbinfo(struct gfb_info *gp)
{
struct fb_info *info = gp->info;
struct fb_var_screeninfo *var = &info->var;
@@ -111,7 +111,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
return 0;
}
-static int __devinit gfb_probe(struct platform_device *op)
+static int gfb_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -173,7 +173,7 @@ err_out:
return err;
}
-static int __devexit gfb_remove(struct platform_device *op)
+static int gfb_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct gfb_info *gp = info->par;
@@ -201,7 +201,7 @@ MODULE_DEVICE_TABLE(of, ffb_match);
static struct platform_driver gfb_driver = {
.probe = gfb_probe,
- .remove = __devexit_p(gfb_remove),
+ .remove = gfb_remove,
.driver = {
.name = "gfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/sunxvr2500.c b/drivers/video/sunxvr2500.c
index 7fbcba86d1a2..843b6bab0483 100644
--- a/drivers/video/sunxvr2500.c
+++ b/drivers/video/sunxvr2500.c
@@ -29,7 +29,7 @@ struct s3d_info {
u32 pseudo_palette[16];
};
-static int __devinit s3d_get_props(struct s3d_info *sp)
+static int s3d_get_props(struct s3d_info *sp)
{
sp->width = of_getintprop_default(sp->of_node, "width", 0);
sp->height = of_getintprop_default(sp->of_node, "height", 0);
@@ -70,7 +70,7 @@ static struct fb_ops s3d_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __devinit s3d_set_fbinfo(struct s3d_info *sp)
+static int s3d_set_fbinfo(struct s3d_info *sp)
{
struct fb_info *info = sp->info;
struct fb_var_screeninfo *var = &info->var;
@@ -115,8 +115,8 @@ static int __devinit s3d_set_fbinfo(struct s3d_info *sp)
return 0;
}
-static int __devinit s3d_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int s3d_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct fb_info *info;
struct s3d_info *sp;
@@ -219,7 +219,7 @@ err_out:
return err;
}
-static void __devexit s3d_pci_unregister(struct pci_dev *pdev)
+static void s3d_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct s3d_info *sp = info->par;
@@ -251,7 +251,7 @@ static struct pci_driver s3d_driver = {
.name = "s3d",
.id_table = s3d_pci_table,
.probe = s3d_pci_register,
- .remove = __devexit_p(s3d_pci_unregister),
+ .remove = s3d_pci_unregister,
};
static int __init s3d_init(void)
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 6c71b1b44477..387350d004df 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -51,7 +51,7 @@ struct e3d_info {
u32 pseudo_palette[16];
};
-static int __devinit e3d_get_props(struct e3d_info *ep)
+static int e3d_get_props(struct e3d_info *ep)
{
ep->width = of_getintprop_default(ep->of_node, "width", 0);
ep->height = of_getintprop_default(ep->of_node, "height", 0);
@@ -193,7 +193,7 @@ static struct fb_ops e3d_ops = {
.fb_imageblit = e3d_imageblit,
};
-static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
+static int e3d_set_fbinfo(struct e3d_info *ep)
{
struct fb_info *info = ep->info;
struct fb_var_screeninfo *var = &info->var;
@@ -238,8 +238,8 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
return 0;
}
-static int __devinit e3d_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e3d_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct device_node *of_node;
const char *device_type;
@@ -392,7 +392,7 @@ err_out:
return err;
}
-static void __devexit e3d_pci_unregister(struct pci_dev *pdev)
+static void e3d_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct e3d_info *ep = info->par;
@@ -437,7 +437,7 @@ static struct pci_driver e3d_driver = {
.name = "e3d",
.id_table = e3d_pci_table,
.probe = e3d_pci_register,
- .remove = __devexit_p(e3d_pci_unregister),
+ .remove = e3d_pci_unregister,
};
static int __init e3d_init(void)
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 07c66e946634..c000852500aa 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -362,7 +362,7 @@ static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit tcx_probe(struct platform_device *op)
+static int tcx_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -486,7 +486,7 @@ out_err:
return err;
}
-static int __devexit tcx_remove(struct platform_device *op)
+static int tcx_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct tcx_par *par = info->par;
@@ -518,7 +518,7 @@ static struct platform_driver tcx_driver = {
.of_match_table = tcx_match,
},
.probe = tcx_probe,
- .remove = __devexit_p(tcx_remove),
+ .remove = tcx_remove,
};
static int __init tcx_init(void)
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index e026724a3a56..64bc28ba4037 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -100,7 +100,7 @@ static inline int mtrr_del(int reg, unsigned long base,
#define VOODOO3_MAX_PIXCLOCK 300000
#define VOODOO5_MAX_PIXCLOCK 350000
-static struct fb_fix_screeninfo tdfx_fix __devinitdata = {
+static struct fb_fix_screeninfo tdfx_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.ypanstep = 1,
@@ -108,7 +108,7 @@ static struct fb_fix_screeninfo tdfx_fix __devinitdata = {
.accel = FB_ACCEL_3DFX_BANSHEE
};
-static struct fb_var_screeninfo tdfx_var __devinitdata = {
+static struct fb_var_screeninfo tdfx_var = {
/* "640x480, 8 bpp @ 60 Hz */
.xres = 640,
.yres = 480,
@@ -135,9 +135,8 @@ static struct fb_var_screeninfo tdfx_var __devinitdata = {
/*
* PCI driver prototypes
*/
-static int __devinit tdfxfb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void __devexit tdfxfb_remove(struct pci_dev *pdev);
+static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void tdfxfb_remove(struct pci_dev *pdev);
static struct pci_device_id tdfxfb_id_table[] = {
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_BANSHEE,
@@ -156,7 +155,7 @@ static struct pci_driver tdfxfb_driver = {
.name = "tdfxfb",
.id_table = tdfxfb_id_table,
.probe = tdfxfb_probe,
- .remove = __devexit_p(tdfxfb_remove),
+ .remove = tdfxfb_remove,
};
MODULE_DEVICE_TABLE(pci, tdfxfb_id_table);
@@ -167,9 +166,9 @@ MODULE_DEVICE_TABLE(pci, tdfxfb_id_table);
static int nopan;
static int nowrap = 1; /* not implemented (yet) */
static int hwcursor = 1;
-static char *mode_option __devinitdata;
+static char *mode_option;
/* mtrr option */
-static bool nomtrr __devinitdata;
+static bool nomtrr;
/* -------------------------------------------------------------------------
* Hardware-specific funcions
@@ -1279,8 +1278,8 @@ static int tdfxfb_ddc_getsda(void *data)
return (0 != (tdfx_inl(par, VIDSERPARPORT) & DDC_SDA_IN));
}
-static int __devinit tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan,
- const char *name, struct device *dev)
+static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
+ struct device *dev)
{
int rc;
@@ -1308,8 +1307,8 @@ static int __devinit tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan,
return rc;
}
-static int __devinit tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan,
- const char *name, struct device *dev)
+static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
+ struct device *dev)
{
int rc;
@@ -1336,7 +1335,7 @@ static int __devinit tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan,
return rc;
}
-static void __devinit tdfxfb_create_i2c_busses(struct fb_info *info)
+static void tdfxfb_create_i2c_busses(struct fb_info *info)
{
struct tdfx_par *par = info->par;
@@ -1388,8 +1387,7 @@ static int tdfxfb_probe_i2c_connector(struct tdfx_par *par,
* Initializes and allocates resources for PCI device @pdev.
*
*/
-static int __devinit tdfxfb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tdfx_par *default_par;
struct fb_info *info;
@@ -1626,7 +1624,7 @@ static void __init tdfxfb_setup(char *options)
* lifetime for the PCI device @pdev.
*
*/
-static void __devexit tdfxfb_remove(struct pci_dev *pdev)
+static void tdfxfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct tdfx_par *par = info->par;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index aba7686b1a32..c9c8e5a1fdee 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -61,8 +61,8 @@ static void tgafb_fillrect(struct fb_info *, const struct fb_fillrect *);
static void tgafb_copyarea(struct fb_info *, const struct fb_copyarea *);
static int tgafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
-static int __devinit tgafb_register(struct device *dev);
-static void __devexit tgafb_unregister(struct device *dev);
+static int tgafb_register(struct device *dev);
+static void tgafb_unregister(struct device *dev);
static const char *mode_option;
static const char *mode_option_pci = "640x480@60";
@@ -93,9 +93,8 @@ static struct fb_ops tgafb_ops = {
/*
* PCI registration operations
*/
-static int __devinit tgafb_pci_register(struct pci_dev *,
- const struct pci_device_id *);
-static void __devexit tgafb_pci_unregister(struct pci_dev *);
+static int tgafb_pci_register(struct pci_dev *, const struct pci_device_id *);
+static void tgafb_pci_unregister(struct pci_dev *);
static struct pci_device_id const tgafb_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TGA) },
@@ -107,17 +106,16 @@ static struct pci_driver tgafb_pci_driver = {
.name = "tgafb",
.id_table = tgafb_pci_table,
.probe = tgafb_pci_register,
- .remove = __devexit_p(tgafb_pci_unregister),
+ .remove = tgafb_pci_unregister,
};
-static int __devinit
-tgafb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int tgafb_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
return tgafb_register(&pdev->dev);
}
-static void __devexit
-tgafb_pci_unregister(struct pci_dev *pdev)
+static void tgafb_pci_unregister(struct pci_dev *pdev)
{
tgafb_unregister(&pdev->dev);
}
@@ -127,8 +125,8 @@ tgafb_pci_unregister(struct pci_dev *pdev)
/*
* TC registration operations
*/
-static int __devinit tgafb_tc_register(struct device *);
-static int __devexit tgafb_tc_unregister(struct device *);
+static int tgafb_tc_register(struct device *);
+static int tgafb_tc_unregister(struct device *);
static struct tc_device_id const tgafb_tc_table[] = {
{ "DEC ", "PMAGD-AA" },
@@ -143,12 +141,11 @@ static struct tc_driver tgafb_tc_driver = {
.name = "tgafb",
.bus = &tc_bus_type,
.probe = tgafb_tc_register,
- .remove = __devexit_p(tgafb_tc_unregister),
+ .remove = tgafb_tc_unregister,
},
};
-static int __devinit
-tgafb_tc_register(struct device *dev)
+static int tgafb_tc_register(struct device *dev)
{
int status = tgafb_register(dev);
if (!status)
@@ -156,8 +153,7 @@ tgafb_tc_register(struct device *dev)
return status;
}
-static int __devexit
-tgafb_tc_unregister(struct device *dev)
+static int tgafb_tc_unregister(struct device *dev)
{
put_device(dev);
tgafb_unregister(dev);
@@ -1546,8 +1542,7 @@ static int tgafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
return 0;
}
-static int __devinit
-tgafb_register(struct device *dev)
+static int tgafb_register(struct device *dev)
{
static const struct fb_videomode modedb_tc = {
/* 1280x1024 @ 72 Hz, 76.8 kHz hsync */
@@ -1692,8 +1687,7 @@ tgafb_register(struct device *dev)
return ret;
}
-static void __devexit
-tgafb_unregister(struct device *dev)
+static void tgafb_unregister(struct device *dev)
{
resource_size_t bar0_start = 0, bar0_len = 0;
int tga_bus_pci = TGA_BUS_PCI(dev);
@@ -1721,16 +1715,14 @@ tgafb_unregister(struct device *dev)
framebuffer_release(info);
}
-static void __devexit
-tgafb_exit(void)
+static void tgafb_exit(void)
{
tc_unregister_driver(&tgafb_tc_driver);
pci_unregister_driver(&tgafb_pci_driver);
}
#ifndef MODULE
-static int __devinit
-tgafb_setup(char *arg)
+static int tgafb_setup(char *arg)
{
char *this_opt;
@@ -1751,8 +1743,7 @@ tgafb_setup(char *arg)
}
#endif /* !MODULE */
-static int __devinit
-tgafb_init(void)
+static int tgafb_init(void)
{
int status;
#ifndef MODULE
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index b244f060f151..dc4fb8620156 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -191,7 +191,7 @@
#define LCR_VCLKHW 0x1b4 /* VCLK High Width */
#define LCR_OC 0x1b6 /* Output Control */
-static char *mode_option __devinitdata;
+static char *mode_option;
struct tmiofb_par {
u32 pseudo_palette[16];
@@ -675,7 +675,7 @@ static struct fb_ops tmiofb_ops = {
/*--------------------------------------------------------------------------*/
-static int __devinit tmiofb_probe(struct platform_device *dev)
+static int tmiofb_probe(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
struct tmio_fb_data *data = dev->dev.platform_data;
@@ -807,7 +807,7 @@ err_ioremap_ccr:
return retval;
}
-static int __devexit tmiofb_remove(struct platform_device *dev)
+static int tmiofb_remove(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
@@ -1002,7 +1002,7 @@ static struct platform_driver tmiofb_driver = {
.driver.name = "tmio-fb",
.driver.owner = THIS_MODULE,
.probe = tmiofb_probe,
- .remove = __devexit_p(tmiofb_remove),
+ .remove = tmiofb_remove,
.suspend = tmiofb_suspend,
.resume = tmiofb_resume,
};
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 34cf019bba44..ab57d387d6b5 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -53,19 +53,19 @@ static struct fb_fix_screeninfo tridentfb_fix = {
/* defaults which are normally overriden by user values */
/* video mode */
-static char *mode_option __devinitdata = "640x480-8@60";
-static int bpp __devinitdata = 8;
+static char *mode_option = "640x480-8@60";
+static int bpp = 8;
-static int noaccel __devinitdata;
+static int noaccel;
static int center;
static int stretch;
-static int fp __devinitdata;
-static int crt __devinitdata;
+static int fp;
+static int crt;
-static int memsize __devinitdata;
-static int memdiff __devinitdata;
+static int memsize;
+static int memdiff;
static int nativex;
module_param(mode_option, charp, 0);
@@ -637,7 +637,7 @@ static inline void crtc_unlock(struct tridentfb_par *par)
}
/* Return flat panel's maximum x resolution */
-static int __devinit get_nativex(struct tridentfb_par *par)
+static int get_nativex(struct tridentfb_par *par)
{
int x, y, tmp;
@@ -771,7 +771,7 @@ static void set_number_of_lines(struct tridentfb_par *par, int lines)
* If we see that FP is active we assume we have one.
* Otherwise we have a CRT display. User can override.
*/
-static int __devinit is_flatpanel(struct tridentfb_par *par)
+static int is_flatpanel(struct tridentfb_par *par)
{
if (fp)
return 1;
@@ -781,7 +781,7 @@ static int __devinit is_flatpanel(struct tridentfb_par *par)
}
/* Try detecting the video memory size */
-static unsigned int __devinit get_memsize(struct tridentfb_par *par)
+static unsigned int get_memsize(struct tridentfb_par *par)
{
unsigned char tmp, tmp2;
unsigned int k;
@@ -1331,8 +1331,8 @@ static struct fb_ops tridentfb_ops = {
.fb_sync = tridentfb_sync,
};
-static int __devinit trident_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int trident_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
int err;
unsigned char revision;
@@ -1543,7 +1543,7 @@ out_unmap1:
return err;
}
-static void __devexit trident_pci_remove(struct pci_dev *dev)
+static void trident_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct tridentfb_par *par = info->par;
@@ -1591,7 +1591,7 @@ static struct pci_driver tridentfb_pci_driver = {
.name = "tridentfb",
.id_table = trident_devices,
.probe = trident_pci_probe,
- .remove = __devexit_p(trident_pci_remove)
+ .remove = trident_pci_remove,
};
/*
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 2f8f82d874a1..b75db0186488 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -36,26 +36,26 @@ static struct cb_id uvesafb_cn_id = {
static char v86d_path[PATH_MAX] = "/sbin/v86d";
static char v86d_started; /* has v86d been started by uvesafb? */
-static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
+static struct fb_fix_screeninfo uvesafb_fix = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
.visual = FB_VISUAL_TRUECOLOR,
};
-static int mtrr __devinitdata = 3; /* enable mtrr by default */
-static bool blank = 1; /* enable blanking by default */
-static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
-static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
-static bool nocrtc __devinitdata; /* ignore CRTC settings */
-static bool noedid __devinitdata; /* don't try DDC transfers */
-static int vram_remap __devinitdata; /* set amt. of memory to be used */
-static int vram_total __devinitdata; /* set total amount of memory */
-static u16 maxclk __devinitdata; /* maximum pixel clock */
-static u16 maxvf __devinitdata; /* maximum vertical frequency */
-static u16 maxhf __devinitdata; /* maximum horizontal frequency */
-static u16 vbemode __devinitdata; /* force use of a specific VBE mode */
-static char *mode_option __devinitdata;
+static int mtrr = 3; /* enable mtrr by default */
+static bool blank = 1; /* enable blanking by default */
+static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
+static bool pmi_setpal = true; /* use PMI for palette changes */
+static bool nocrtc; /* ignore CRTC settings */
+static bool noedid; /* don't try DDC transfers */
+static int vram_remap; /* set amt. of memory to be used */
+static int vram_total; /* set total amount of memory */
+static u16 maxclk; /* maximum pixel clock */
+static u16 maxvf; /* maximum vertical frequency */
+static u16 maxhf; /* maximum horizontal frequency */
+static u16 vbemode; /* force use of a specific VBE mode */
+static char *mode_option;
static u8 dac_width = 6;
static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX];
@@ -418,8 +418,8 @@ static void uvesafb_vbe_state_restore(struct uvesafb_par *par, u8 *state_buf)
uvesafb_free(task);
}
-static int __devinit uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
- struct uvesafb_par *par)
+static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
+ struct uvesafb_par *par)
{
int err;
@@ -477,8 +477,8 @@ static int __devinit uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
return 0;
}
-static int __devinit uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
- struct uvesafb_par *par)
+static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
+ struct uvesafb_par *par)
{
int off = 0, err;
u16 *mode;
@@ -556,8 +556,8 @@ static int __devinit uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
* x86 and not x86_64.
*/
#ifdef CONFIG_X86_32
-static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
- struct uvesafb_par *par)
+static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
+ struct uvesafb_par *par)
{
int i, err;
@@ -602,8 +602,8 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
* Check whether a video mode is supported by the Video BIOS and is
* compatible with the monitor limits.
*/
-static int __devinit uvesafb_is_valid_mode(struct fb_videomode *mode,
- struct fb_info *info)
+static int uvesafb_is_valid_mode(struct fb_videomode *mode,
+ struct fb_info *info)
{
if (info->monspecs.gtf) {
fb_videomode_to_var(&info->var, mode);
@@ -618,8 +618,7 @@ static int __devinit uvesafb_is_valid_mode(struct fb_videomode *mode,
return 1;
}
-static int __devinit uvesafb_vbe_getedid(struct uvesafb_ktask *task,
- struct fb_info *info)
+static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int err = 0;
@@ -684,8 +683,8 @@ static int __devinit uvesafb_vbe_getedid(struct uvesafb_ktask *task,
return err;
}
-static void __devinit uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
- struct fb_info *info)
+static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
+ struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int i;
@@ -765,8 +764,8 @@ static void __devinit uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
return;
}
-static void __devinit uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
- struct uvesafb_par *par)
+static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
+ struct uvesafb_par *par)
{
int err;
@@ -794,7 +793,7 @@ static void __devinit uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff);
}
-static int __devinit uvesafb_vbe_init(struct fb_info *info)
+static int uvesafb_vbe_init(struct fb_info *info)
{
struct uvesafb_ktask *task = NULL;
struct uvesafb_par *par = info->par;
@@ -839,7 +838,7 @@ out: uvesafb_free(task);
return err;
}
-static int __devinit uvesafb_vbe_init_mode(struct fb_info *info)
+static int uvesafb_vbe_init_mode(struct fb_info *info)
{
struct list_head *pos;
struct fb_modelist *modelist;
@@ -1444,8 +1443,7 @@ static struct fb_ops uvesafb_ops = {
.fb_set_par = uvesafb_set_par,
};
-static void __devinit uvesafb_init_info(struct fb_info *info,
- struct vbe_mode_ib *mode)
+static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
{
unsigned int size_vmode;
unsigned int size_remap;
@@ -1540,7 +1538,7 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
info->fbops->fb_pan_display = NULL;
}
-static void __devinit uvesafb_init_mtrr(struct fb_info *info)
+static void uvesafb_init_mtrr(struct fb_info *info)
{
#ifdef CONFIG_MTRR
if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
@@ -1582,7 +1580,7 @@ static void __devinit uvesafb_init_mtrr(struct fb_info *info)
#endif /* CONFIG_MTRR */
}
-static void __devinit uvesafb_ioremap(struct fb_info *info)
+static void uvesafb_ioremap(struct fb_info *info)
{
#ifdef CONFIG_X86
switch (mtrr) {
@@ -1738,7 +1736,7 @@ static struct attribute_group uvesafb_dev_attgrp = {
.attrs = uvesafb_dev_attrs,
};
-static int __devinit uvesafb_probe(struct platform_device *dev)
+static int uvesafb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct vbe_mode_ib *mode = NULL;
@@ -1882,7 +1880,7 @@ static struct platform_driver uvesafb_driver = {
static struct platform_device *uvesafb_device;
#ifndef MODULE
-static int __devinit uvesafb_setup(char *options)
+static int uvesafb_setup(char *options)
{
char *this_opt;
@@ -1950,7 +1948,7 @@ static ssize_t store_v86d(struct device_driver *dev, const char *buf,
static DRIVER_ATTR(v86d, S_IRUGO | S_IWUSR, show_v86d, store_v86d);
-static int __devinit uvesafb_init(void)
+static int uvesafb_init(void)
{
int err;
@@ -1994,7 +1992,7 @@ static int __devinit uvesafb_init(void)
module_init(uvesafb_init);
-static void __devexit uvesafb_exit(void)
+static void uvesafb_exit(void)
{
struct uvesafb_ktask *task;
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 4709edc3cb7f..0aa516fc59cd 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -393,7 +393,7 @@ static void vmlfb_release_devices(struct vml_par *par)
* Free up allocated resources for a device.
*/
-static void __devexit vml_pci_remove(struct pci_dev *dev)
+static void vml_pci_remove(struct pci_dev *dev)
{
struct fb_info *info;
struct vml_info *vinfo;
@@ -452,8 +452,7 @@ static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
* struct per pipe. Currently we have only one pipe.
*/
-static int __devinit vml_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vml_info *vinfo;
struct fb_info *info;
@@ -1060,7 +1059,7 @@ static struct pci_driver vmlfb_pci_driver = {
.name = "vmlfb",
.id_table = vml_ids,
.probe = vml_pci_probe,
- .remove = __devexit_p(vml_pci_remove)
+ .remove = vml_pci_remove,
};
static void __exit vmlfb_cleanup(void)
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index c7f692525b88..8bc1f9398945 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
vfree(mem);
}
-static struct fb_var_screeninfo vfb_default __devinitdata = {
+static struct fb_var_screeninfo vfb_default = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo vfb_fix __devinitdata = {
+static struct fb_fix_screeninfo vfb_fix = {
.id = "Virtual FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -477,7 +477,7 @@ static int __init vfb_setup(char *options)
* Initialisation
*/
-static int __devinit vfb_probe(struct platform_device *dev)
+static int vfb_probe(struct platform_device *dev)
{
struct fb_info *info;
int retval = -ENOMEM;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 0267acd8dc83..545faeccdb44 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -65,7 +65,7 @@ struct vga16fb_par {
/* --------------------------------------------------------------------- */
-static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
+static struct fb_var_screeninfo vga16fb_defined = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
};
/* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
+static struct fb_fix_screeninfo vga16fb_fix = {
.id = "VGA16 VGA",
.smem_start = VGA_FB_PHYS,
.smem_len = VGA_FB_PHYS_LEN,
@@ -1303,7 +1303,7 @@ static int __init vga16fb_setup(char *options)
}
#endif
-static int __devinit vga16fb_probe(struct platform_device *dev)
+static int vga16fb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct vga16fb_par *par;
@@ -1395,7 +1395,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
return ret;
}
-static int __devexit vga16fb_remove(struct platform_device *dev)
+static int vga16fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -1407,7 +1407,7 @@ static int __devexit vga16fb_remove(struct platform_device *dev)
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
- .remove = __devexit_p(vga16fb_remove),
+ .remove = vga16fb_remove,
.driver = {
.name = "vga16fb",
},
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index 6be72f0ba21d..7789553952d3 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -25,7 +25,7 @@
static void tmds_register_write(int index, u8 data);
static int tmds_register_read(int index);
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len);
-static void __devinit dvi_get_panel_size_from_DDCv1(
+static void dvi_get_panel_size_from_DDCv1(
struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting);
static int viafb_dvi_query_EDID(void);
@@ -35,8 +35,8 @@ static inline bool check_tmds_chip(int device_id_subaddr, int device_id)
return tmds_register_read(device_id_subaddr) == device_id;
}
-void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
- struct tmds_setting_information *tmds_setting)
+void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+ struct tmds_setting_information *tmds_setting)
{
DEBUG_MSG(KERN_INFO "viafb_init_dvi_size()\n");
@@ -47,7 +47,7 @@ void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
return;
}
-bool __devinit viafb_tmds_trasmitter_identify(void)
+bool viafb_tmds_trasmitter_identify(void)
{
unsigned char sr2a = 0, sr1e = 0, sr3e = 0;
@@ -285,7 +285,7 @@ static int viafb_dvi_query_EDID(void)
}
/* Get Panel Size Using EDID1 Table */
-static void __devinit dvi_get_panel_size_from_DDCv1(
+static void dvi_get_panel_size_from_DDCv1(
struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting)
{
diff --git a/drivers/video/via/dvi.h b/drivers/video/via/dvi.h
index db757850c216..4c6bfba57d11 100644
--- a/drivers/video/via/dvi.h
+++ b/drivers/video/via/dvi.h
@@ -56,8 +56,8 @@
int viafb_dvi_sense(void);
void viafb_dvi_disable(void);
void viafb_dvi_enable(void);
-bool __devinit viafb_tmds_trasmitter_identify(void);
-void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+bool viafb_tmds_trasmitter_identify(void);
+void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting);
void viafb_dvi_set_mode(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga);
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 898590db5e14..80233dae358a 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -465,9 +465,9 @@ static struct via_device_mapping device_mapping[] = {
static struct via_clock clock;
static void load_fix_bit_crtc_reg(void);
-static void __devinit init_gfx_chip_info(int chip_type);
-static void __devinit init_tmds_chip_info(void);
-static void __devinit init_lvds_chip_info(void);
+static void init_gfx_chip_info(int chip_type);
+static void init_tmds_chip_info(void);
+static void init_lvds_chip_info(void);
static void device_screen_off(void);
static void device_screen_on(void);
static void set_display_channel(void);
@@ -1507,7 +1507,7 @@ void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
viafb_set_vclock(PICOS2KHZ(var->pixclock) * 1000, iga);
}
-void __devinit viafb_init_chip_info(int chip_type)
+void viafb_init_chip_info(int chip_type)
{
via_clock_init(&clock, chip_type);
init_gfx_chip_info(chip_type);
@@ -1540,7 +1540,7 @@ void viafb_update_device_setting(int hres, int vres, int bpp, int flag)
}
}
-static void __devinit init_gfx_chip_info(int chip_type)
+static void init_gfx_chip_info(int chip_type)
{
u8 tmp;
@@ -1593,7 +1593,7 @@ static void __devinit init_gfx_chip_info(int chip_type)
}
}
-static void __devinit init_tmds_chip_info(void)
+static void init_tmds_chip_info(void)
{
viafb_tmds_trasmitter_identify();
@@ -1638,7 +1638,7 @@ static void __devinit init_tmds_chip_info(void)
&viaparinfo->shared->tmds_setting_info);
}
-static void __devinit init_lvds_chip_info(void)
+static void init_lvds_chip_info(void)
{
viafb_lvds_trasmitter_identify();
viafb_init_lcd_size();
@@ -1672,7 +1672,7 @@ static void __devinit init_lvds_chip_info(void)
viaparinfo->chip_info->lvds_chip_info.output_interface);
}
-void __devinit viafb_init_dac(int set_iga)
+void viafb_init_dac(int set_iga)
{
int i;
u8 tmp;
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 6be243cfc823..a8205754c736 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -663,8 +663,8 @@ void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
int viafb_setmode(void);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var,
const struct fb_videomode *mode);
-void __devinit viafb_init_chip_info(int chip_type);
-void __devinit viafb_init_dac(int set_iga);
+void viafb_init_chip_info(int chip_type);
+void viafb_init_dac(int set_iga);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp, int flag);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 165037910536..980ee1b1dcf3 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -49,7 +49,7 @@ static struct _lcd_scaling_factor lcd_scaling_factor_CLE = {
};
static bool lvds_identify_integratedlvds(void);
-static void __devinit fp_id_to_vindex(int panel_id);
+static void fp_id_to_vindex(int panel_id);
static int lvds_register_read(int index);
static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
int panel_vres);
@@ -81,7 +81,7 @@ static inline bool check_lvds_chip(int device_id_subaddr, int device_id)
return lvds_register_read(device_id_subaddr) == device_id;
}
-void __devinit viafb_init_lcd_size(void)
+void viafb_init_lcd_size(void)
{
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
@@ -139,7 +139,7 @@ static bool lvds_identify_integratedlvds(void)
return true;
}
-bool __devinit viafb_lvds_trasmitter_identify(void)
+bool viafb_lvds_trasmitter_identify(void)
{
if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
@@ -180,7 +180,7 @@ bool __devinit viafb_lvds_trasmitter_identify(void)
return false;
}
-static void __devinit fp_id_to_vindex(int panel_id)
+static void fp_id_to_vindex(int panel_id)
{
DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n");
@@ -914,7 +914,7 @@ static void check_diport_of_integrated_lvds(
plvds_chip_info->output_interface);
}
-void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
+void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info)
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 8f3e4e06156c..5c988a063ad5 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -71,15 +71,15 @@ void viafb_enable_lvds_vt1636(struct lvds_setting_information
struct lvds_chip_information *plvds_chip_info);
void viafb_lcd_disable(void);
void viafb_lcd_enable(void);
-void __devinit viafb_init_lcd_size(void);
-void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
+void viafb_init_lcd_size(void);
+void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info);
void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
u16 cyres, struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
-bool __devinit viafb_lvds_trasmitter_identify(void);
+bool viafb_lvds_trasmitter_identify(void);
void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
index dd58b530c0df..6e274825fb31 100644
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/via/via-core.c
@@ -80,7 +80,7 @@ static inline int viafb_mmio_read(int reg)
*/
static u32 viafb_enabled_ints;
-static void __devinit viafb_int_init(void)
+static void viafb_int_init(void)
{
viafb_enabled_ints = 0;
@@ -475,7 +475,7 @@ static int viafb_get_fb_size_from_pci(int chip_type)
/*
* Figure out and map our MMIO regions.
*/
-static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
+static int via_pci_setup_mmio(struct viafb_dev *vdev)
{
int ret;
/*
@@ -550,8 +550,8 @@ static struct viafb_subdev_info {
};
#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
-static int __devinit via_create_subdev(struct viafb_dev *vdev,
- struct viafb_subdev_info *info)
+static int via_create_subdev(struct viafb_dev *vdev,
+ struct viafb_subdev_info *info)
{
int ret;
@@ -573,7 +573,7 @@ static int __devinit via_create_subdev(struct viafb_dev *vdev,
return ret;
}
-static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
+static int via_setup_subdevs(struct viafb_dev *vdev)
{
int i;
@@ -671,8 +671,7 @@ static int via_resume(struct pci_dev *pdev)
}
#endif /* CONFIG_PM */
-static int __devinit via_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
@@ -716,7 +715,7 @@ out_disable:
return ret;
}
-static void __devexit via_pci_remove(struct pci_dev *pdev)
+static void via_pci_remove(struct pci_dev *pdev)
{
via_teardown_subdevs();
via_fb_pci_remove(pdev);
@@ -725,7 +724,7 @@ static void __devexit via_pci_remove(struct pci_dev *pdev)
}
-static struct pci_device_id via_pci_table[] __devinitdata = {
+static struct pci_device_id via_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
.driver_data = UNICHROME_CLE266 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
@@ -760,7 +759,7 @@ static struct pci_driver via_driver = {
.name = "viafb",
.id_table = via_pci_table,
.probe = via_pci_probe,
- .remove = __devexit_p(via_pci_remove),
+ .remove = via_pci_remove,
#ifdef CONFIG_PM
.suspend = via_suspend,
.resume = via_resume,
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
index d69cfef7c338..e408679081ab 100644
--- a/drivers/video/via/via-gpio.c
+++ b/drivers/video/via/via-gpio.c
@@ -212,7 +212,7 @@ EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
/*
* Platform device stuff.
*/
-static __devinit int viafb_gpio_probe(struct platform_device *platdev)
+static int viafb_gpio_probe(struct platform_device *platdev)
{
struct viafb_dev *vdev = platdev->dev.platform_data;
struct via_port_cfg *port_cfg = vdev->port_cfg;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index c80e770e1800..325c43c6ff97 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1072,7 +1072,7 @@ static int __init parse_active_dev(void)
return 0;
}
-static int __devinit parse_port(char *opt_str, int *output_interface)
+static int parse_port(char *opt_str, int *output_interface)
{
if (!strncmp(opt_str, "DVP0", 4))
*output_interface = INTERFACE_DVP0;
@@ -1089,7 +1089,7 @@ static int __devinit parse_port(char *opt_str, int *output_interface)
return 0;
}
-static void __devinit parse_lcd_port(void)
+static void parse_lcd_port(void)
{
parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info.
output_interface);
@@ -1102,7 +1102,7 @@ static void __devinit parse_lcd_port(void)
output_interface);
}
-static void __devinit parse_dvi_port(void)
+static void parse_dvi_port(void)
{
parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info.
output_interface);
@@ -1727,7 +1727,7 @@ static struct viafb_pm_hooks viafb_fb_pm_hooks = {
#endif
-static void __devinit i2c_bus_probe(struct viafb_shared *shared)
+static void i2c_bus_probe(struct viafb_shared *shared)
{
/* should be always CRT */
printk(KERN_INFO "viafb: Probing I2C bus 0x26\n");
@@ -1753,7 +1753,7 @@ static void i2c_bus_free(struct viafb_shared *shared)
via_aux_free(shared->i2c_2C);
}
-int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
+int via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
struct fb_var_screeninfo default_var;
@@ -1945,7 +1945,7 @@ out_fb_release:
return rc;
}
-void __devexit via_fb_pci_remove(struct pci_dev *pdev)
+void via_fb_pci_remove(struct pci_dev *pdev)
{
DEBUG_MSG(KERN_INFO "via_pci_remove!\n");
fb_dealloc_cmap(&viafbinfo->cmap);
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 9af8da70e781..aa2579c2364a 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -273,7 +273,7 @@ static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit vt8500lcd_probe(struct platform_device *pdev)
+static int vt8500lcd_probe(struct platform_device *pdev)
{
struct vt8500lcd_info *fbi;
struct resource *res;
@@ -469,7 +469,7 @@ failed:
return ret;
}
-static int __devexit vt8500lcd_remove(struct platform_device *pdev)
+static int vt8500lcd_remove(struct platform_device *pdev)
{
struct vt8500lcd_info *fbi = platform_get_drvdata(pdev);
struct resource *res;
@@ -505,7 +505,7 @@ static const struct of_device_id via_dt_ids[] = {
static struct platform_driver vt8500lcd_driver = {
.probe = vt8500lcd_probe,
- .remove = __devexit_p(vt8500lcd_remove),
+ .remove = vt8500lcd_remove,
.driver = {
.owner = THIS_MODULE,
.name = "vt8500-lcd",
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 4e74d262cf3e..e9557fa014ee 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -660,7 +660,7 @@ static struct fb_ops vt8623fb_ops = {
/* PCI probe */
-static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
@@ -807,7 +807,7 @@ err_enable_device:
/* PCI remove */
-static void __devexit vt8623_pci_remove(struct pci_dev *dev)
+static void vt8623_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
@@ -906,7 +906,7 @@ fail:
/* List of boards that we are trying to support */
-static struct pci_device_id vt8623_devices[] __devinitdata = {
+static struct pci_device_id vt8623_devices[] = {
{PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
{0, 0, 0, 0, 0, 0, 0}
};
@@ -917,7 +917,7 @@ static struct pci_driver vt8623fb_pci_driver = {
.name = "vt8623fb",
.id_table = vt8623_devices,
.probe = vt8623_pci_probe,
- .remove = __devexit_p(vt8623_pci_remove),
+ .remove = vt8623_pci_remove,
.suspend = vt8623_pci_suspend,
.resume = vt8623_pci_resume,
};
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 2f6b2b835f88..7a299e951f75 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -54,7 +54,7 @@ static void w100_update_enable(void);
static void w100_update_disable(void);
static void calc_hsync(struct w100fb_par *par);
static void w100_init_graphic_engine(struct w100fb_par *par);
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
+struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
/* Pseudo palette size */
#define MAX_PALETTES 16
@@ -630,7 +630,7 @@ static int w100fb_resume(struct platform_device *dev)
#endif
-int __devinit w100fb_probe(struct platform_device *pdev)
+int w100fb_probe(struct platform_device *pdev)
{
int err = -EIO;
struct w100fb_mach_info *inf;
@@ -783,7 +783,7 @@ out:
}
-static int __devexit w100fb_remove(struct platform_device *pdev)
+static int w100fb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct w100fb_par *par=info->par;
@@ -1021,7 +1021,7 @@ static struct pll_entries {
{ 0 },
};
-struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
+struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
{
struct pll_entries *pll_entry = w100_pll_tables;
@@ -1624,7 +1624,7 @@ static void w100_vsync(void)
static struct platform_driver w100fb_driver = {
.probe = w100fb_probe,
- .remove = __devexit_p(w100fb_remove),
+ .remove = w100fb_remove,
.suspend = w100fb_suspend,
.resume = w100fb_resume,
.driver = {
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 77539c1b56a0..4dd0580f96fd 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -260,7 +260,7 @@ static struct fb_ops wm8505fb_ops = {
.fb_blank = wm8505fb_blank,
};
-static int __devinit wm8505fb_probe(struct platform_device *pdev)
+static int wm8505fb_probe(struct platform_device *pdev)
{
struct wm8505fb_info *fbi;
struct resource *res;
@@ -431,7 +431,7 @@ failed:
return ret;
}
-static int __devexit wm8505fb_remove(struct platform_device *pdev)
+static int wm8505fb_remove(struct platform_device *pdev)
{
struct wm8505fb_info *fbi = platform_get_drvdata(pdev);
struct resource *res;
@@ -462,7 +462,7 @@ static const struct of_device_id wmt_dt_ids[] = {
static struct platform_driver wm8505fb_driver = {
.probe = wm8505fb_probe,
- .remove = __devexit_p(wm8505fb_remove),
+ .remove = wm8505fb_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index ba025b4c7d09..4aaeb18223bc 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -124,7 +124,7 @@ int wmt_ge_sync(struct fb_info *p)
}
EXPORT_SYMBOL_GPL(wmt_ge_sync);
-static int __devinit wmt_ge_rops_probe(struct platform_device *pdev)
+static int wmt_ge_rops_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -152,7 +152,7 @@ static int __devinit wmt_ge_rops_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit wmt_ge_rops_remove(struct platform_device *pdev)
+static int wmt_ge_rops_remove(struct platform_device *pdev)
{
iounmap(regbase);
return 0;
@@ -165,7 +165,7 @@ static const struct of_device_id wmt_dt_ids[] = {
static struct platform_driver wmt_ge_rops_driver = {
.probe = wmt_ge_rops_probe,
- .remove = __devexit_p(wmt_ge_rops_remove),
+ .remove = wmt_ge_rops_remove,
.driver = {
.owner = THIS_MODULE,
.name = "wmt_ge_rops",
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 917bb5681684..cd005c227a23 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -358,8 +358,8 @@ static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit xenfb_probe(struct xenbus_device *dev,
- const struct xenbus_device_id *id)
+static int xenfb_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
{
struct xenfb_info *info;
struct fb_info *fb_info;
@@ -487,8 +487,7 @@ error:
return ret;
}
-static __devinit void
-xenfb_make_preferred_console(void)
+static void xenfb_make_preferred_console(void)
{
struct console *c;
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 18084525402a..af0b4fdf9aa9 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -403,7 +403,7 @@ static int xilinxfb_release(struct device *dev)
* OF bus binding
*/
-static int __devinit xilinxfb_of_probe(struct platform_device *op)
+static int xilinxfb_of_probe(struct platform_device *op)
{
const u32 *prop;
u32 *p;
@@ -485,13 +485,13 @@ static int __devinit xilinxfb_of_probe(struct platform_device *op)
return -ENODEV;
}
-static int __devexit xilinxfb_of_remove(struct platform_device *op)
+static int xilinxfb_of_remove(struct platform_device *op)
{
return xilinxfb_release(&op->dev);
}
/* Match table for of_platform binding */
-static struct of_device_id xilinxfb_of_match[] __devinitdata = {
+static struct of_device_id xilinxfb_of_match[] = {
{ .compatible = "xlnx,xps-tft-1.00.a", },
{ .compatible = "xlnx,xps-tft-2.00.a", },
{ .compatible = "xlnx,xps-tft-2.01.a", },
@@ -503,7 +503,7 @@ MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
static struct platform_driver xilinxfb_of_driver = {
.probe = xilinxfb_of_probe,
- .remove = __devexit_p(xilinxfb_of_remove),
+ .remove = xilinxfb_of_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 4939e0ccc4e5..d294f67d6f84 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -796,9 +796,6 @@ static int has_fsl_hypervisor(void)
struct device_node *node;
int ret;
- if (!(mfmsr() & MSR_GS))
- return 0;
-
node = of_find_node_by_path("/hypervisor");
if (!node)
return 0;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 8d5bddb56cb1..c6683f2e396c 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -8,8 +8,8 @@ config VIRTIO
menu "Virtio drivers"
config VIRTIO_PCI
- tristate "PCI driver for virtio devices (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "PCI driver for virtio devices"
+ depends on PCI
select VIRTIO
---help---
This drivers provides support for virtio based paravirtual device
@@ -32,8 +32,8 @@ config VIRTIO_BALLOON
If unsure, say M.
config VIRTIO_MMIO
- tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
- depends on HAS_IOMEM && EXPERIMENTAL
+ tristate "Platform bus driver for memory mapped virtio devices"
+ depends on HAS_IOMEM
select VIRTIO
---help---
This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 809b0de59c09..ee59b74768d9 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida);
static ssize_t device_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
-
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
unsigned int i;
ssize_t len = 0;
@@ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev,
static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
{
unsigned int i;
- struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_dv);
const struct virtio_device_id *ids;
- ids = container_of(_dr, struct virtio_driver, driver)->id_table;
+ ids = drv_to_virtio(_dr)->id_table;
for (i = 0; ids[i].device; i++)
if (virtio_id_match(dev, &ids[i]))
return 1;
@@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
{
- struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_dv);
return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
dev->id.device, dev->id.vendor);
@@ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
unsigned int i;
- struct virtio_driver *drv = container_of(vdev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
for (i = 0; i < drv->feature_table_size; i++)
if (drv->feature_table[i] == fbit)
@@ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
static int virtio_dev_probe(struct device *_d)
{
int err, i;
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
- struct virtio_driver *drv = container_of(dev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
u32 device_features;
/* We have a driver! */
@@ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d)
static int virtio_dev_remove(struct device *_d)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
- struct virtio_driver *drv = container_of(dev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
drv->remove(dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 2a70558b36ea..797e1c79a104 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -139,10 +139,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
struct page *page = balloon_page_enqueue(vb_dev_info);
if (!page) {
- if (printk_ratelimit())
- dev_printk(KERN_INFO, &vb->vdev->dev,
- "Out of puff! Can't get %u pages\n",
- VIRTIO_BALLOON_PAGES_PER_PAGE);
+ dev_info_ratelimited(&vb->vdev->dev,
+ "Out of puff! Can't get %u pages\n",
+ VIRTIO_BALLOON_PAGES_PER_PAGE);
/* Sleep for at least 1/5 of a second before retry. */
msleep(200);
break;
@@ -501,7 +500,7 @@ static void remove_common(struct virtio_balloon *vb)
vb->vdev->config->del_vqs(vb->vdev);
}
-static void __devexit virtballoon_remove(struct virtio_device *vdev)
+static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -553,7 +552,7 @@ static struct virtio_driver virtio_balloon_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtballoon_probe,
- .remove = __devexit_p(virtballoon_remove),
+ .remove = virtballoon_remove,
.config_changed = virtballoon_changed,
#ifdef CONFIG_PM
.freeze = virtballoon_freeze,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 6b1b7e184939..31f966f4d27f 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq)
/* We write the queue's selector into the notification register to
* signal the other end */
- writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
}
/* Notify all virtqueues on an interrupt. */
@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size;
- unsigned int index = virtqueue_get_queue_index(vq);
+ unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
@@ -440,7 +440,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
/* Platform device */
-static int __devinit virtio_mmio_probe(struct platform_device *pdev)
+static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
struct resource *mem;
@@ -493,7 +493,7 @@ static int __devinit virtio_mmio_probe(struct platform_device *pdev)
return register_virtio_device(&vm_dev->vdev);
}
-static int __devexit virtio_mmio_remove(struct platform_device *pdev)
+static int virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
@@ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device,
int err;
struct resource resources[2] = {};
char *str;
- long long int base;
+ long long int base, size;
+ unsigned int irq;
int processed, consumed = 0;
struct platform_device *pdev;
- resources[0].flags = IORESOURCE_MEM;
- resources[1].flags = IORESOURCE_IRQ;
-
- resources[0].end = memparse(device, &str) - 1;
+ /* Consume "size" part of the command line parameter */
+ size = memparse(device, &str);
+ /* Get "@<base>:<irq>[:<id>]" chunks */
processed = sscanf(str, "@%lli:%u%n:%d%n",
- &base, &resources[1].start, &consumed,
+ &base, &irq, &consumed,
&vm_cmdline_id, &consumed);
- if (processed < 2 || processed > 3 || str[consumed])
+ /*
+ * sscanf() must processes at least 2 chunks; also there
+ * must be no extra characters after the last chunk, so
+ * str[consumed] must be '\0'
+ */
+ if (processed < 2 || str[consumed])
return -EINVAL;
+ resources[0].flags = IORESOURCE_MEM;
resources[0].start = base;
- resources[0].end += base;
- resources[1].end = resources[1].start;
+ resources[0].end = base + size - 1;
+
+ resources[1].flags = IORESOURCE_IRQ;
+ resources[1].start = resources[1].end = irq;
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
@@ -630,7 +638,7 @@ MODULE_DEVICE_TABLE(of, virtio_mmio_match);
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
- .remove = __devexit_p(virtio_mmio_remove),
+ .remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.owner = THIS_MODULE,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index c33aea36598a..0c142892c105 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq)
/* we write the queue's selector into the notification register to
* signal the other end */
- iowrite16(virtqueue_get_queue_index(vq),
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
/* Handle a configuration change: Tell driver if it wants to know. */
@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
- iowrite16(virtqueue_get_queue_index(vq),
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -678,8 +676,8 @@ static void virtio_pci_release_dev(struct device *_d)
}
/* the PCI probing function */
-static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+static int virtio_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
struct virtio_pci_device *vp_dev;
int err;
@@ -753,7 +751,7 @@ out:
return err;
}
-static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
+static void virtio_pci_remove(struct pci_dev *pci_dev)
{
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
@@ -824,22 +822,10 @@ static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
- .remove = __devexit_p(virtio_pci_remove),
+ .remove = virtio_pci_remove,
#ifdef CONFIG_PM
.driver.pm = &virtio_pci_pm_ops,
#endif
};
-static int __init virtio_pci_init(void)
-{
- return pci_register_driver(&virtio_pci_driver);
-}
-
-module_init(virtio_pci_init);
-
-static void __exit virtio_pci_exit(void)
-{
- pci_unregister_driver(&virtio_pci_driver);
-}
-
-module_exit(virtio_pci_exit);
+module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index e639584b2dbd..ffd7e7da5d3b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -93,8 +93,6 @@ struct vring_virtqueue
/* Host publishes avail event idx */
bool event;
- /* Number of free buffers */
- unsigned int num_free;
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
@@ -106,9 +104,6 @@ struct vring_virtqueue
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);
- /* Index of the queue */
- int queue_index;
-
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
unsigned head;
int i;
+ /*
+ * We require lowmem mappings for the descriptors because
+ * otherwise virt_to_phys will give us bogus addresses in the
+ * virtqueue.
+ */
+ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
+
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return -ENOMEM;
@@ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
desc[i-1].next = 0;
/* We're about to use a buffer */
- vq->num_free--;
+ vq->vq.num_free--;
/* Use a single buffer which doesn't continue */
head = vq->free_head;
@@ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-int virtqueue_get_queue_index(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->queue_index;
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
-
/**
* virtqueue_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
@@ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns remaining capacity of queue or a negative error
- * (ie. ENOSPC). Note that it only really makes sense to treat all
- * positive return values as "available": indirect buffers mean that
- * we can put an entire sg[] array inside a single queue entry.
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
*/
int virtqueue_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
@@ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
- if (vq->indirect && (out + in) > 1 && vq->num_free) {
+ if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
head = vring_add_indirect(vq, sg, out, in, gfp);
if (likely(head >= 0))
goto add_head;
@@ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);
- if (vq->num_free < out + in) {
+ if (vq->vq.num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
- out + in, vq->num_free);
+ out + in, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
@@ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
}
/* We're about to use some buffers from the free list. */
- vq->num_free -= out + in;
+ vq->vq.num_free -= out + in;
head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
@@ -296,7 +288,7 @@ add_head:
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
- return vq->num_free;
+ return 0;
}
EXPORT_SYMBOL_GPL(virtqueue_add_buf);
@@ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
- vq->num_free++;
+ vq->vq.num_free++;
}
vq->vring.desc[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
- vq->num_free++;
+ vq->vq.num_free++;
}
static inline bool more_used(const struct vring_virtqueue *vq)
@@ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
return buf;
}
/* That should have freed everything. */
- BUG_ON(vq->num_free != vq->vring.num);
+ BUG_ON(vq->vq.num_free != vq->vring.num);
END_USE(vq);
return NULL;
@@ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
+ vq->vq.num_free = num;
+ vq->vq.index = index;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
- vq->queue_index = index;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
@@ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
/* Put everything in free lists. */
- vq->num_free = num;
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
index d874b4f34136..e01162046598 100644
--- a/drivers/vlynq/Kconfig
+++ b/drivers/vlynq/Kconfig
@@ -1,5 +1,5 @@
menu "TI VLYNQ"
- depends on AR7 && EXPERIMENTAL
+ depends on AR7
config VLYNQ
bool "TI VLYNQ bus support"
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index aa250cebecd2..7b07135ab26e 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -772,7 +772,7 @@ static int vlynq_remove(struct platform_device *pdev)
static struct platform_driver vlynq_platform_driver = {
.driver.name = "vlynq",
.probe = vlynq_probe,
- .remove = __devexit_p(vlynq_remove),
+ .remove = vlynq_remove,
};
struct bus_type vlynq_bus_type = {
@@ -783,7 +783,7 @@ struct bus_type vlynq_bus_type = {
};
EXPORT_SYMBOL(vlynq_bus_type);
-static int __devinit vlynq_init(void)
+static int vlynq_init(void)
{
int res = 0;
@@ -803,7 +803,7 @@ fail_bus:
return res;
}
-static void __devexit vlynq_exit(void)
+static void vlynq_exit(void)
{
platform_driver_unregister(&vlynq_platform_driver);
bus_unregister(&vlynq_bus_type);
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 95a9f71d793e..5e6c7d74e19f 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1376,6 +1376,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
return 0;
err_reg:
+ put_device(&vdev->dev);
kfree(vdev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index c433a746e3f5..e8ca63a82b97 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,6 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
+ depends on ARCH_OMAP
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 7c294f4dc0ed..96cab6ac2b4e 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
@@ -459,43 +460,34 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!pdev)
return -ENODEV;
- ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
+ ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
if (!ds1wm_data)
return -ENOMEM;
platform_set_drvdata(pdev, ds1wm_data);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto err0;
- }
- ds1wm_data->map = ioremap(res->start, resource_size(res));
- if (!ds1wm_data->map) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!res)
+ return -ENXIO;
+ ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ds1wm_data->map)
+ return -ENOMEM;
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
ds1wm_data->pdev = pdev;
ds1wm_data->cell = mfd_get_cell(pdev);
- if (!ds1wm_data->cell) {
- ret = -ENODEV;
- goto err1;
- }
+ if (!ds1wm_data->cell)
+ return -ENODEV;
plat = pdev->dev.platform_data;
- if (!plat) {
- ret = -ENODEV;
- goto err1;
- }
+ if (!plat)
+ return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- ret = -ENXIO;
- goto err1;
- }
+ if (!res)
+ return -ENXIO;
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@@ -505,10 +497,10 @@ static int ds1wm_probe(struct platform_device *pdev)
if (res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
- ret = request_irq(ds1wm_data->irq, ds1wm_isr,
+ ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
if (ret)
- goto err1;
+ return ret;
ds1wm_up(ds1wm_data);
@@ -516,17 +508,12 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = w1_add_master_device(&ds1wm_master);
if (ret)
- goto err2;
+ goto err;
return 0;
-err2:
+err:
ds1wm_down(ds1wm_data);
- free_irq(ds1wm_data->irq, ds1wm_data);
-err1:
- iounmap(ds1wm_data->map);
-err0:
- kfree(ds1wm_data);
return ret;
}
@@ -560,9 +547,6 @@ static int ds1wm_remove(struct platform_device *pdev)
w1_remove_master_device(&ds1wm_master);
ds1wm_down(ds1wm_data);
- free_irq(ds1wm_data->irq, ds1wm_data);
- iounmap(ds1wm_data->map);
- kfree(ds1wm_data);
return 0;
}
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6429b9e9fb82..e033491fe308 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -51,10 +51,10 @@
* The top 4 bits always read 0.
* To write, the top nibble must be the 1's compl. of the low nibble.
*/
-#define DS2482_REG_CFG_1WS 0x08
-#define DS2482_REG_CFG_SPU 0x04
-#define DS2482_REG_CFG_PPM 0x02
-#define DS2482_REG_CFG_APU 0x01
+#define DS2482_REG_CFG_1WS 0x08 /* 1-wire speed */
+#define DS2482_REG_CFG_SPU 0x04 /* strong pull-up */
+#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
+#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/**
@@ -132,6 +132,17 @@ struct ds2482_data {
/**
+ * Helper to calculate values for configuration register
+ * @param conf the raw config value
+ * @return the value w/ complements that can be written to register
+ */
+static inline u8 ds2482_calculate_config(u8 conf)
+{
+ return conf | ((~conf & 0x0f) << 4);
+}
+
+
+/**
* Sets the read pointer.
* @param pdev The ds2482 client pointer
* @param read_ptr see DS2482_PTR_CODE_xxx above
@@ -399,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- 0xF0);
+ ds2482_calculate_config(0x00));
}
mutex_unlock(&pdev->access_lock);
@@ -407,6 +418,32 @@ static u8 ds2482_w1_reset_bus(void *data)
return retval;
}
+static u8 ds2482_w1_set_pullup(void *data, int delay)
+{
+ struct ds2482_w1_chan *pchan = data;
+ struct ds2482_data *pdev = pchan->pdev;
+ u8 retval = 1;
+
+ /* if delay is non-zero activate the pullup,
+ * the strong pullup will be automatically deactivated
+ * by the master, so do not explicitly deactive it
+ */
+ if (delay) {
+ /* both waits are crucial, otherwise devices might not be
+ * powered long enough, causing e.g. a w1_therm sensor to
+ * provide wrong conversion results
+ */
+ ds2482_wait_1wire_idle(pdev);
+ /* note: it seems like both SPU and APU have to be set! */
+ retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
+ ds2482_calculate_config(DS2482_REG_CFG_SPU |
+ DS2482_REG_CFG_APU));
+ ds2482_wait_1wire_idle(pdev);
+ }
+
+ return retval;
+}
+
static int ds2482_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -452,7 +489,8 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_count = 8;
/* Set all config items to 0 (off) */
- ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0);
+ ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
+ ds2482_calculate_config(0x00));
mutex_init(&data->access_lock);
@@ -468,6 +506,7 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit;
data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet;
data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus;
+ data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
if (err) {
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index d338b56ea2f0..372c8c0d54a0 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -109,34 +109,21 @@ static int mxc_w1_probe(struct platform_device *pdev)
struct resource *res;
int err = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
+ mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
+ GFP_KERNEL);
if (!mdev)
return -ENOMEM;
- mdev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(mdev->clk)) {
- err = PTR_ERR(mdev->clk);
- goto failed_clk;
- }
+ mdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mdev->clk))
+ return PTR_ERR(mdev->clk);
mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
- res = request_mem_region(res->start, resource_size(res),
- "mxc_w1");
- if (!res) {
- err = -EBUSY;
- goto failed_req;
- }
-
- mdev->regs = ioremap(res->start, resource_size(res));
- if (!mdev->regs) {
- dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
- goto failed_ioremap;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!mdev->regs)
+ return -EBUSY;
clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@@ -148,20 +135,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
- goto failed_add;
+ return err;
platform_set_drvdata(pdev, mdev);
return 0;
-
-failed_add:
- iounmap(mdev->regs);
-failed_ioremap:
- release_mem_region(res->start, resource_size(res));
-failed_req:
- clk_put(mdev->clk);
-failed_clk:
- kfree(mdev);
- return err;
}
/*
@@ -170,16 +147,10 @@ failed_clk:
static int mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
w1_remove_master_device(&mdev->bus_master);
- iounmap(mdev->regs);
- release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(mdev->clk);
- clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);
@@ -191,7 +162,7 @@ static struct platform_driver mxc_w1_driver = {
.name = "mxc_w1",
},
.probe = mxc_w1_probe,
- .remove = __devexit_p(mxc_w1_remove),
+ .remove = mxc_w1_remove,
};
module_platform_driver(mxc_w1_driver);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 184dbce4abd1..db2390aed387 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -560,11 +560,9 @@ static int omap_hdq_probe(struct platform_device *pdev)
return -ENXIO;
}
- hdq_data->hdq_base = devm_request_and_ioremap(dev, res);
- if (!hdq_data->hdq_base) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
- }
+ hdq_data->hdq_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdq_data->hdq_base))
+ return PTR_ERR(hdq_data->hdq_base);
hdq_data->hdq_usecount = 0;
mutex_init(&hdq_data->hdq_mutex);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 85b363a5bd0f..d39dfa4cc235 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
return 0;
}
-static int __init w1_gpio_probe(struct platform_device *pdev)
+static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 92d08e7fcba2..c1a702f8c803 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -41,14 +41,18 @@ MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature famil
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
+ * In case the parasite power-detection is not working (seems to be the case
+ * for some DS18S20) the strong pullup can also be forced, regardless of the
+ * power state of the devices.
+ *
+ * Summary of options:
+ * - strong_pullup = 0 Disable strong pullup completely
+ * - strong_pullup = 1 Enable automatic strong pullup detection
+ * - strong_pullup = 2 Force strong pullup
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
-static u8 bad_roms[][9] = {
- {0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
- {}
- };
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf);
@@ -168,16 +172,6 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
return 0;
}
-static int w1_therm_check_rom(u8 rom[9])
-{
- int i;
-
- for (i=0; i<sizeof(bad_roms)/9; ++i)
- if (!memcmp(bad_roms[i], rom, 9))
- return 1;
-
- return 0;
-}
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf)
@@ -194,10 +188,11 @@ static ssize_t w1_therm_read(struct device *device,
memset(rom, 0, sizeof(rom));
- verdict = 0;
- crc = 0;
-
while (max_trying--) {
+
+ verdict = 0;
+ crc = 0;
+
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
@@ -210,7 +205,8 @@ static ssize_t w1_therm_read(struct device *device,
continue;
/* 750ms strong pullup (or delay) after the convert */
- if (!external_power && w1_strong_pullup)
+ if (w1_strong_pullup == 2 ||
+ (!external_power && w1_strong_pullup))
w1_next_pullup(dev, tm);
w1_write_8(dev, W1_CONVERT_TEMP);
@@ -249,7 +245,7 @@ static ssize_t w1_therm_read(struct device *device,
}
}
- if (!w1_therm_check_rom(rom))
+ if (verdict)
break;
}
@@ -260,7 +256,7 @@ static ssize_t w1_therm_read(struct device *device,
if (verdict)
memcpy(sl->rom, rom, sizeof(sl->rom));
else
- dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
+ dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ad1bb9382a96..7f809fd4a57f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -76,6 +76,16 @@ config DA9052_WATCHDOG
Alternatively say M to compile the driver as a module,
which will be called da9052_wdt.
+config DA9055_WATCHDOG
+ tristate "Dialog Semiconductor DA9055 Watchdog"
+ depends on MFD_DA9055
+ help
+ If you say yes here you get support for watchdog on the Dialog
+ Semiconductor DA9055 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called da9055_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -232,6 +242,7 @@ config EP93XX_WATCHDOG
config OMAP_WATCHDOG
tristate "OMAP Watchdog"
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ select WATCHDOG_CORE
help
Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
@@ -300,6 +311,7 @@ config COH901327_WATCHDOG
config TWL4030_WATCHDOG
tristate "TWL4030 Watchdog"
depends on TWL4030_CORE
+ select WATCHDOG_CORE
help
Support for TI TWL4030 watchdog. Say 'Y' here to enable the
watchdog timer support for TWL4030 chips.
@@ -342,7 +354,7 @@ config MAX63XX_WATCHDOG
config IMX2_WDT
tristate "IMX2+ Watchdog"
- depends on IMX_HAVE_PLATFORM_IMX2_WDT
+ depends on ARCH_MXC
help
This is the driver for the hardware watchdog
on the Freescale IMX2 and later processors.
@@ -431,7 +443,7 @@ config ALIM7101_WDT
config F71808E_WDT
tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
- depends on X86 && EXPERIMENTAL
+ depends on X86
help
This is the driver for the hardware watchdog on the Fintek
F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
@@ -622,7 +634,7 @@ config IT8712F_WDT
config IT87_WDT
tristate "IT87 Watchdog Timer"
- depends on X86 && EXPERIMENTAL
+ depends on X86
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 572b39bed06a..97bbdb3a4648 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -164,6 +164,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
+obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 3003e2a9580b..2f3cc8fb471a 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -285,11 +285,9 @@ static int ar7_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- ar7_wdt = devm_request_and_ioremap(&pdev->dev, ar7_regs_wdt);
- if (!ar7_wdt) {
- pr_err("could not ioremap registers\n");
- return -ENXIO;
- }
+ ar7_wdt = devm_ioremap_resource(&pdev->dev, ar7_regs_wdt);
+ if (IS_ERR(ar7_wdt))
+ return PTR_ERR(ar7_wdt);
vbus_clk = clk_get(NULL, "vbus");
if (IS_ERR(vbus_clk)) {
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index 2896430ce42c..7a715e3e6828 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -436,17 +436,7 @@ static struct platform_driver at32_wdt_driver = {
.shutdown = at32_wdt_shutdown,
};
-static int __init at32_wdt_init(void)
-{
- return platform_driver_probe(&at32_wdt_driver, at32_wdt_probe);
-}
-module_init(at32_wdt_init);
-
-static void __exit at32_wdt_exit(void)
-{
- platform_driver_unregister(&at32_wdt_driver);
-}
-module_exit(at32_wdt_exit);
+module_platform_driver_probe(at32_wdt_driver, at32_wdt_probe);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index dc42e44b6bc1..c08933cc565e 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -321,18 +321,7 @@ static struct platform_driver at91wdt_driver = {
},
};
-static int __init at91sam_wdt_init(void)
-{
- return platform_driver_probe(&at91wdt_driver, at91wdt_probe);
-}
-
-static void __exit at91sam_wdt_exit(void)
-{
- platform_driver_unregister(&at91wdt_driver);
-}
-
-module_init(at91sam_wdt_init);
-module_exit(at91sam_wdt_exit);
+module_platform_driver_probe(at91wdt_driver, at91wdt_probe);
MODULE_AUTHOR("Renaud CERRATO <r.cerrato@til-technologies.fr>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91SAM9x processors");
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 7c8ede7816b1..38a999e60c0d 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -284,6 +284,7 @@ static void ath97_wdt_shutdown(struct platform_device *pdev)
}
static struct platform_driver ath79_wdt_driver = {
+ .probe = ath79_wdt_probe,
.remove = ath79_wdt_remove,
.shutdown = ath97_wdt_shutdown,
.driver = {
@@ -292,17 +293,7 @@ static struct platform_driver ath79_wdt_driver = {
},
};
-static int __init ath79_wdt_init(void)
-{
- return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
-}
-module_init(ath79_wdt_init);
-
-static void __exit ath79_wdt_exit(void)
-{
- platform_driver_unregister(&ath79_wdt_driver);
-}
-module_exit(ath79_wdt_exit);
+module_platform_driver(ath79_wdt_driver);
MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index cb5da5c3ece2..b9b8a8be6f12 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -451,17 +451,7 @@ static struct platform_driver coh901327_driver = {
.resume = coh901327_resume,
};
-static int __init coh901327_init(void)
-{
- return platform_driver_probe(&coh901327_driver, coh901327_probe);
-}
-module_init(coh901327_init);
-
-static void __exit coh901327_exit(void)
-{
- platform_driver_unregister(&coh901327_driver);
-}
-module_exit(coh901327_exit);
+module_platform_driver_probe(coh901327_driver, coh901327_probe);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
MODULE_DESCRIPTION("COH 901 327 Watchdog");
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index cd87758abac3..f270bb7bc456 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -266,6 +266,7 @@ static void cpu5wdt_exit(void)
if (cpu5wdt_device.queue) {
cpu5wdt_device.queue = 0;
wait_for_completion(&cpu5wdt_device.stop);
+ del_timer(&cpu5wdt_device.timer);
}
misc_deregister(&cpu5wdt_misc);
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 8be70d8f2680..367445009c64 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -53,10 +53,6 @@ static const struct {
static void da9052_wdt_release_resources(struct kref *r)
{
- struct da9052_wdt_data *driver_data =
- container_of(r, struct da9052_wdt_data, kref);
-
- kfree(driver_data);
}
static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
new file mode 100644
index 000000000000..f5ad10546fc9
--- /dev/null
+++ b/drivers/watchdog/da9055_wdt.c
@@ -0,0 +1,211 @@
+/*
+ * System monitoring driver for DA9055 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define DA9055_DEF_TIMEOUT 4
+#define DA9055_TWDMIN 256
+
+struct da9055_wdt_data {
+ struct watchdog_device wdt;
+ struct da9055 *da9055;
+ struct kref kref;
+};
+
+static const struct {
+ u8 reg_val;
+ int user_time; /* In seconds */
+} da9055_wdt_maps[] = {
+ { 0, 0 },
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+static int da9055_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9055 *da9055 = driver_data->da9055;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(da9055_wdt_maps); i++)
+ if (da9055_wdt_maps[i].user_time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9055_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_TWDSCALE_MASK,
+ da9055_wdt_maps[i].reg_val <<
+ DA9055_TWDSCALE_SHIFT);
+ if (ret < 0) {
+ dev_err(da9055->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+
+ return 0;
+}
+
+static int da9055_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9055 *da9055 = driver_data->da9055;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time will cause an error.
+ */
+ mdelay(DA9055_TWDMIN);
+
+ /* Reset the watchdog timer */
+ return da9055_reg_update(da9055, DA9055_REG_CONTROL_E,
+ DA9055_WATCHDOG_MASK, 1);
+}
+
+static void da9055_wdt_release_resources(struct kref *r)
+{
+}
+
+static void da9055_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9055_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9055_wdt_release_resources);
+}
+
+static int da9055_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9055_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9055_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9055_wdt_set_timeout(wdt_dev, 0);
+}
+
+static struct watchdog_info da9055_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9055 Watchdog",
+};
+
+static const struct watchdog_ops da9055_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9055_wdt_start,
+ .stop = da9055_wdt_stop,
+ .ping = da9055_wdt_ping,
+ .set_timeout = da9055_wdt_set_timeout,
+ .ref = da9055_wdt_ref,
+ .unref = da9055_wdt_unref,
+};
+
+static int da9055_wdt_probe(struct platform_device *pdev)
+{
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_wdt_data *driver_data;
+ struct watchdog_device *da9055_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9055->dev, "Failed to allocate watchdog device\n");
+ return -ENOMEM;
+ }
+
+ driver_data->da9055 = da9055;
+
+ da9055_wdt = &driver_data->wdt;
+
+ da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
+ da9055_wdt->info = &da9055_wdt_info;
+ da9055_wdt->ops = &da9055_wdt_ops;
+ watchdog_set_nowayout(da9055_wdt, nowayout);
+ watchdog_set_drvdata(da9055_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9055_wdt_stop(da9055_wdt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to stop watchdog, %d\n", ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0)
+ dev_err(da9055->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+
+err:
+ return ret;
+}
+
+static int da9055_wdt_remove(struct platform_device *pdev)
+{
+ struct da9055_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9055_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9055_wdt_driver = {
+ .probe = da9055_wdt_probe,
+ .remove = da9055_wdt_remove,
+ .driver = {
+ .name = "da9055-watchdog",
+ },
+};
+
+module_platform_driver(da9055_wdt_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9055 watchdog");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-watchdog");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 8791879e5181..e8e87246ea6d 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -208,7 +208,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
if (WARN_ON(IS_ERR(wdt_clk)))
return PTR_ERR(wdt_clk);
- clk_enable(wdt_clk);
+ clk_prepare_enable(wdt_clk);
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
@@ -256,16 +256,23 @@ static int davinci_wdt_remove(struct platform_device *pdev)
wdt_mem = NULL;
}
- clk_disable(wdt_clk);
+ clk_disable_unprepare(wdt_clk);
clk_put(wdt_clk);
return 0;
}
+static const struct of_device_id davinci_wdt_of_match[] = {
+ { .compatible = "ti,davinci-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_wdt_of_match);
+
static struct platform_driver platform_wdt_driver = {
.driver = {
.name = "watchdog",
.owner = THIS_MODULE,
+ .of_match_table = davinci_wdt_of_match,
},
.probe = davinci_wdt_probe,
.remove = davinci_wdt_remove,
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index a0eba3c40e25..203766989382 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -301,9 +301,9 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
if (!mem)
return -EINVAL;
- dw_wdt.regs = devm_request_and_ioremap(&pdev->dev, mem);
- if (!dw_wdt.regs)
- return -ENOMEM;
+ dw_wdt.regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dw_wdt.regs))
+ return PTR_ERR(dw_wdt.regs);
dw_wdt.clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dw_wdt.clk))
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8717255ec7be..11796b9b864e 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.3.0"
+#define HPWDT_VERSION "1.3.1"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 9a45d0294cf4..ff908823688c 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -262,11 +262,9 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- imx2_wdt.base = devm_request_and_ioremap(&pdev->dev, res);
- if (!imx2_wdt.base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
- }
+ imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imx2_wdt.base))
+ return PTR_ERR(imx2_wdt.base);
imx2_wdt.clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(imx2_wdt.clk)) {
@@ -342,17 +340,7 @@ static struct platform_driver imx2_wdt_driver = {
},
};
-static int __init imx2_wdt_init(void)
-{
- return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe);
-}
-module_init(imx2_wdt_init);
-
-static void __exit imx2_wdt_exit(void)
-{
- platform_driver_unregister(&imx2_wdt_driver);
-}
-module_exit(imx2_wdt_exit);
+module_platform_driver_probe(imx2_wdt_driver, imx2_wdt_probe);
MODULE_AUTHOR("Wolfram Sang");
MODULE_DESCRIPTION("Watchdog driver for IMX2 and later");
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index a61408fa0c94..1cb25f69a96d 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -171,9 +171,9 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(jz4740_wdt, drvdata);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->base = devm_request_and_ioremap(&pdev->dev, res);
- if (drvdata->base == NULL) {
- ret = -EBUSY;
+ drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->base)) {
+ ret = PTR_ERR(drvdata->base);
goto err_out;
}
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 79fe01b42339..088fd0c9d888 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -197,11 +197,9 @@ ltq_wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
- if (!ltq_wdt_membase) {
- dev_err(&pdev->dev, "cannot remap I/O memory region\n");
- return -ENOMEM;
- }
+ ltq_wdt_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ltq_wdt_membase))
+ return PTR_ERR(ltq_wdt_membase);
/* we do not need to enable the clock as it is always running */
clk = clk_get_io();
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 773c661723ca..cc9d328086ed 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -14,6 +14,7 @@
* another interface, some abstraction will have to be introduced.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -198,9 +199,9 @@ static int max63xx_wdt_probe(struct platform_device *pdev)
heartbeat = current_timeout->twd;
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_request_and_ioremap(&pdev->dev, wdt_mem);
- if (!wdt_base)
- return -ENOMEM;
+ wdt_base = devm_ioremap_resource(&pdev->dev, wdt_mem);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
max63xx_wdt_dev.timeout = heartbeat;
watchdog_set_nowayout(&max63xx_wdt_dev, nowayout);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index a84eb551ea27..233cfadcb21f 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -80,8 +80,7 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg)
/* Check it really was our interrupt */
if (readl(wdt->base + TWD_WDOG_INTSTAT)) {
- dev_printk(KERN_CRIT, wdt->dev,
- "Triggered - Reboot ignored.\n");
+ dev_crit(wdt->dev, "Triggered - Reboot ignored\n");
/* Clear the interrupt on the watchdog */
writel(1, wdt->base + TWD_WDOG_INTSTAT);
return IRQ_HANDLED;
@@ -123,7 +122,7 @@ static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
static void mpcore_wdt_start(struct mpcore_wdt *wdt)
{
- dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
+ dev_info(wdt->dev, "enabling watchdog\n");
/* This loads the count register but does NOT start the count yet */
mpcore_wdt_keepalive(wdt);
@@ -180,8 +179,8 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
else {
- dev_printk(KERN_CRIT, wdt->dev,
- "unexpected close, not stopping watchdog!\n");
+ dev_crit(wdt->dev,
+ "unexpected close, not stopping watchdog!\n");
mpcore_wdt_keepalive(wdt);
}
clear_bit(0, &wdt->timer_alive);
@@ -351,9 +350,9 @@ static int mpcore_wdt_probe(struct platform_device *pdev)
ret = devm_request_irq(wdt->dev, wdt->irq, mpcore_wdt_fire, 0,
"mpcore_wdt", wdt);
if (ret) {
- dev_printk(KERN_ERR, wdt->dev,
- "cannot register IRQ%d for watchdog\n",
- wdt->irq);
+ dev_err(wdt->dev,
+ "cannot register IRQ%d for watchdog\n",
+ wdt->irq);
return ret;
}
}
@@ -365,9 +364,9 @@ static int mpcore_wdt_probe(struct platform_device *pdev)
mpcore_wdt_miscdev.parent = &pdev->dev;
ret = misc_register(&mpcore_wdt_miscdev);
if (ret) {
- dev_printk(KERN_ERR, wdt->dev,
+ dev_err(wdt->dev,
"cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ WATCHDOG_MINOR, ret);
return ret;
}
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 3e3ebbc83faf..b0e541d022e6 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -31,42 +31,34 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
-#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/omap-wd-timer.h>
#include "omap_wdt.h"
-static struct platform_device *omap_wdt_dev;
-
static unsigned timer_margin;
module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
-static unsigned int wdt_trgr_pattern = 0x1234;
-static DEFINE_SPINLOCK(wdt_lock);
-
struct omap_wdt_dev {
void __iomem *base; /* physical */
struct device *dev;
- int omap_wdt_users;
+ bool omap_wdt_users;
struct resource *mem;
- struct miscdevice omap_wdt_miscdev;
+ int wdt_trgr_pattern;
+ struct mutex lock; /* to avoid races with PM */
};
-static void omap_wdt_ping(struct omap_wdt_dev *wdev)
+static void omap_wdt_reload(struct omap_wdt_dev *wdev)
{
void __iomem *base = wdev->base;
@@ -74,8 +66,8 @@ static void omap_wdt_ping(struct omap_wdt_dev *wdev)
while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
- wdt_trgr_pattern = ~wdt_trgr_pattern;
- __raw_writel(wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
+ wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern;
+ __raw_writel(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
/* wait for posted write to complete */
while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
@@ -111,18 +103,10 @@ static void omap_wdt_disable(struct omap_wdt_dev *wdev)
cpu_relax();
}
-static void omap_wdt_adjust_timeout(unsigned new_timeout)
-{
- if (new_timeout < TIMER_MARGIN_MIN)
- new_timeout = TIMER_MARGIN_DEFAULT;
- if (new_timeout > TIMER_MARGIN_MAX)
- new_timeout = TIMER_MARGIN_MAX;
- timer_margin = new_timeout;
-}
-
-static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
+static void omap_wdt_set_timer(struct omap_wdt_dev *wdev,
+ unsigned int timeout)
{
- u32 pre_margin = GET_WLDR_VAL(timer_margin);
+ u32 pre_margin = GET_WLDR_VAL(timeout);
void __iomem *base = wdev->base;
/* just count up at 32 KHz */
@@ -134,16 +118,14 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
cpu_relax();
}
-/*
- * Allow only one task to hold it open
- */
-static int omap_wdt_open(struct inode *inode, struct file *file)
+static int omap_wdt_start(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(omap_wdt_dev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
void __iomem *base = wdev->base;
- if (test_and_set_bit(1, (unsigned long *)&(wdev->omap_wdt_users)))
- return -EBUSY;
+ mutex_lock(&wdev->lock);
+
+ wdev->omap_wdt_users = true;
pm_runtime_get_sync(wdev->dev);
@@ -155,223 +137,168 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
- file->private_data = (void *) wdev;
-
- omap_wdt_set_timeout(wdev);
- omap_wdt_ping(wdev); /* trigger loading of new timeout value */
+ omap_wdt_set_timer(wdev, wdog->timeout);
+ omap_wdt_reload(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
- return nonseekable_open(inode, file);
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static int omap_wdt_release(struct inode *inode, struct file *file)
+static int omap_wdt_stop(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = file->private_data;
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- /*
- * Shut off the timer unless NOWAYOUT is defined.
- */
-#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ mutex_lock(&wdev->lock);
omap_wdt_disable(wdev);
-
pm_runtime_put_sync(wdev->dev);
-#else
- pr_crit("Unexpected close, not stopping!\n");
-#endif
- wdev->omap_wdt_users = 0;
-
+ wdev->omap_wdt_users = false;
+ mutex_unlock(&wdev->lock);
return 0;
}
-static ssize_t omap_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
+static int omap_wdt_ping(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = file->private_data;
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- /* Refresh LOAD_TIME. */
- if (len) {
- spin_lock(&wdt_lock);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- }
- return len;
+ mutex_lock(&wdev->lock);
+ omap_wdt_reload(wdev);
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int omap_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
{
- struct omap_wd_timer_platform_data *pdata;
- struct omap_wdt_dev *wdev;
- u32 rs;
- int new_margin, bs;
- static const struct watchdog_info ident = {
- .identity = "OMAP Watchdog",
- .options = WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- };
-
- wdev = file->private_data;
- pdata = wdev->dev->platform_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info __user *)arg, &ident,
- sizeof(ident));
- case WDIOC_GETSTATUS:
- return put_user(0, (int __user *)arg);
- case WDIOC_GETBOOTSTATUS:
- if (!pdata || !pdata->read_reset_sources)
- return put_user(0, (int __user *)arg);
- rs = pdata->read_reset_sources();
- bs = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ?
- WDIOF_CARDRESET : 0;
- return put_user(bs, (int __user *)arg);
- case WDIOC_KEEPALIVE:
- spin_lock(&wdt_lock);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, (int __user *)arg))
- return -EFAULT;
- omap_wdt_adjust_timeout(new_margin);
-
- spin_lock(&wdt_lock);
- omap_wdt_disable(wdev);
- omap_wdt_set_timeout(wdev);
- omap_wdt_enable(wdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- /* Fall */
- case WDIOC_GETTIMEOUT:
- return put_user(timer_margin, (int __user *)arg);
- default:
- return -ENOTTY;
- }
+ mutex_lock(&wdev->lock);
+ omap_wdt_disable(wdev);
+ omap_wdt_set_timer(wdev, timeout);
+ omap_wdt_enable(wdev);
+ omap_wdt_reload(wdev);
+ wdog->timeout = timeout;
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static const struct file_operations omap_wdt_fops = {
- .owner = THIS_MODULE,
- .write = omap_wdt_write,
- .unlocked_ioctl = omap_wdt_ioctl,
- .open = omap_wdt_open,
- .release = omap_wdt_release,
- .llseek = no_llseek,
+static const struct watchdog_info omap_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "OMAP Watchdog",
+};
+
+static const struct watchdog_ops omap_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = omap_wdt_start,
+ .stop = omap_wdt_stop,
+ .ping = omap_wdt_ping,
+ .set_timeout = omap_wdt_set_timeout,
};
static int omap_wdt_probe(struct platform_device *pdev)
{
+ struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
+ bool nowayout = WATCHDOG_NOWAYOUT;
+ struct watchdog_device *omap_wdt;
struct resource *res, *mem;
struct omap_wdt_dev *wdev;
+ u32 rs;
int ret;
+ omap_wdt = devm_kzalloc(&pdev->dev, sizeof(*omap_wdt), GFP_KERNEL);
+ if (!omap_wdt)
+ return -ENOMEM;
+
/* reserve static register mappings */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto err_get_resource;
- }
+ if (!res)
+ return -ENOENT;
- if (omap_wdt_dev) {
- ret = -EBUSY;
- goto err_busy;
- }
+ mem = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name);
+ if (!mem)
+ return -EBUSY;
- mem = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!mem) {
- ret = -EBUSY;
- goto err_busy;
- }
+ wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
- wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
- if (!wdev) {
- ret = -ENOMEM;
- goto err_kzalloc;
- }
+ wdev->omap_wdt_users = false;
+ wdev->mem = mem;
+ wdev->dev = &pdev->dev;
+ wdev->wdt_trgr_pattern = 0x1234;
+ mutex_init(&wdev->lock);
- wdev->omap_wdt_users = 0;
- wdev->mem = mem;
- wdev->dev = &pdev->dev;
+ wdev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wdev->base)
+ return -ENOMEM;
- wdev->base = ioremap(res->start, resource_size(res));
- if (!wdev->base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ omap_wdt->info = &omap_wdt_info;
+ omap_wdt->ops = &omap_wdt_ops;
+ omap_wdt->min_timeout = TIMER_MARGIN_MIN;
+ omap_wdt->max_timeout = TIMER_MARGIN_MAX;
+
+ if (timer_margin >= TIMER_MARGIN_MIN &&
+ timer_margin <= TIMER_MARGIN_MAX)
+ omap_wdt->timeout = timer_margin;
+ else
+ omap_wdt->timeout = TIMER_MARGIN_DEFAULT;
- platform_set_drvdata(pdev, wdev);
+ watchdog_set_drvdata(omap_wdt, wdev);
+ watchdog_set_nowayout(omap_wdt, nowayout);
+
+ platform_set_drvdata(pdev, omap_wdt);
pm_runtime_enable(wdev->dev);
pm_runtime_get_sync(wdev->dev);
- omap_wdt_disable(wdev);
- omap_wdt_adjust_timeout(timer_margin);
+ if (pdata && pdata->read_reset_sources)
+ rs = pdata->read_reset_sources();
+ else
+ rs = 0;
+ omap_wdt->bootstatus = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ?
+ WDIOF_CARDRESET : 0;
- wdev->omap_wdt_miscdev.parent = &pdev->dev;
- wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
- wdev->omap_wdt_miscdev.name = "watchdog";
- wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;
+ omap_wdt_disable(wdev);
- ret = misc_register(&(wdev->omap_wdt_miscdev));
- if (ret)
- goto err_misc;
+ ret = watchdog_register_device(omap_wdt);
+ if (ret) {
+ pm_runtime_disable(wdev->dev);
+ return ret;
+ }
pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
- timer_margin);
+ omap_wdt->timeout);
pm_runtime_put_sync(wdev->dev);
- omap_wdt_dev = pdev;
-
return 0;
-
-err_misc:
- pm_runtime_disable(wdev->dev);
- platform_set_drvdata(pdev, NULL);
- iounmap(wdev->base);
-
-err_ioremap:
- wdev->base = NULL;
- kfree(wdev);
-
-err_kzalloc:
- release_mem_region(res->start, resource_size(res));
-
-err_busy:
-err_get_resource:
-
- return ret;
}
static void omap_wdt_shutdown(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
+ mutex_unlock(&wdev->lock);
}
static int omap_wdt_remove(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
pm_runtime_disable(wdev->dev);
- if (!res)
- return -ENOENT;
-
- misc_deregister(&(wdev->omap_wdt_miscdev));
- release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
-
- iounmap(wdev->base);
-
- kfree(wdev);
- omap_wdt_dev = NULL;
+ watchdog_unregister_device(wdog);
return 0;
}
@@ -386,25 +313,31 @@ static int omap_wdt_remove(struct platform_device *pdev)
static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
+ mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_resume(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
- omap_wdt_ping(wdev);
+ omap_wdt_reload(wdev);
}
+ mutex_unlock(&wdev->lock);
return 0;
}
@@ -437,5 +370,4 @@ module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:omap_wdt");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 0478b001b1ef..7c18b3bffcf7 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -156,6 +156,8 @@ static int orion_wdt_probe(struct platform_device *pdev)
wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!wdt_reg)
return -ENOMEM;
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index dcba5dab6c29..de1f3fa1d787 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -155,9 +155,9 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
heartbeat = DEFAULT_HEARTBEAT;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_request_and_ioremap(&pdev->dev, r);
- if (!wdt_base)
- return -EADDRINUSE;
+ wdt_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
wdt_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt_clk))
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index b0dab10fc6a5..27bcd4e2c4a4 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -354,7 +354,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
goto err_map;
}
- clk_enable(wdt_clock);
+ clk_prepare_enable(wdt_clock);
ret = s3c2410wdt_cpufreq_register();
if (ret < 0) {
@@ -421,7 +421,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
s3c2410wdt_cpufreq_deregister();
err_clk:
- clk_disable(wdt_clock);
+ clk_disable_unprepare(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
@@ -445,7 +445,7 @@ static int s3c2410wdt_remove(struct platform_device *dev)
s3c2410wdt_cpufreq_deregister();
- clk_disable(wdt_clock);
+ clk_disable_unprepare(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index b3876812ff07..2b0e000d4377 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -13,7 +13,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide",
+ * AMD Publication 45482 "AMD SB800-Series Southbridges Register
+ * Reference Guide"
*/
/*
@@ -38,18 +40,24 @@
#include "sp5100_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.01"
+#define TCO_VERSION "0.03"
#define TCO_MODULE_NAME "SP5100 TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
/* internal variables */
static u32 tcobase_phys;
+static u32 resbase_phys;
+static u32 tco_wdt_fired;
static void __iomem *tcobase;
static unsigned int pm_iobase;
static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
static unsigned long timer_alive;
static char tco_expect_close;
static struct pci_dev *sp5100_tco_pci;
+static struct resource wdt_res = {
+ .name = "Watchdog Timer",
+ .flags = IORESOURCE_MEM,
+};
/* the watchdog platform device */
static struct platform_device *sp5100_tco_platform_device;
@@ -64,9 +72,15 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static unsigned int force_addr;
+module_param(force_addr, uint, 0);
+MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
+ " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
+ " WHAT YOU ARE DOING (default=none)");
+
/*
* Some TCO specific functions
*/
@@ -122,6 +136,79 @@ static int tco_timer_set_heartbeat(int t)
return 0;
}
+static void tco_timer_enable(void)
+{
+ int val;
+
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* For SB800 or later */
+ /* Set the Watchdog timer resolution to 1 sec */
+ outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PM_WATCHDOG_SECOND_RES;
+ outb(val, SB800_IO_PM_DATA_REG);
+
+ /* Enable watchdog decode bit and watchdog timer */
+ outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PCI_WATCHDOG_DECODE_EN;
+ val &= ~SB800_PM_WATCHDOG_DISABLE;
+ outb(val, SB800_IO_PM_DATA_REG);
+ } else {
+ /* For SP5100 or SB7x0 */
+ /* Enable watchdog decode bit */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ &val);
+
+ val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+ pci_write_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ val);
+
+ /* Enable Watchdog timer and set the resolution to 1 sec */
+ outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ val |= SP5100_PM_WATCHDOG_SECOND_RES;
+ val &= ~SP5100_PM_WATCHDOG_DISABLE;
+ outb(val, SP5100_IO_PM_DATA_REG);
+ }
+}
+
+static void tco_timer_disable(void)
+{
+ int val;
+
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* For SB800 or later */
+ /* Enable watchdog decode bit and Disable watchdog timer */
+ outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PCI_WATCHDOG_DECODE_EN;
+ val |= SB800_PM_WATCHDOG_DISABLE;
+ outb(val, SB800_IO_PM_DATA_REG);
+ } else {
+ /* For SP5100 or SB7x0 */
+ /* Enable watchdog decode bit */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ &val);
+
+ val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+ pci_write_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ val);
+
+ /* Disable Watchdog timer */
+ outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ val |= SP5100_PM_WATCHDOG_DISABLE;
+ outb(val, SP5100_IO_PM_DATA_REG);
+ }
+}
+
/*
* /dev/watchdog handling
*/
@@ -270,11 +357,12 @@ MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
/*
* Init & exit routines
*/
-
static unsigned char sp5100_tco_setupdevice(void)
{
struct pci_dev *dev = NULL;
+ const char *dev_name = NULL;
u32 val;
+ u32 index_reg, data_reg, base_addr;
/* Match the PCI device */
for_each_pci_dev(dev) {
@@ -287,29 +375,160 @@ static unsigned char sp5100_tco_setupdevice(void)
if (!sp5100_tco_pci)
return 0;
+ pr_info("PCI Revision ID: 0x%x\n", sp5100_tco_pci->revision);
+
+ /*
+ * Determine type of southbridge chipset.
+ */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ dev_name = SB800_DEVNAME;
+ index_reg = SB800_IO_PM_INDEX_REG;
+ data_reg = SB800_IO_PM_DATA_REG;
+ base_addr = SB800_PM_WATCHDOG_BASE;
+ } else {
+ dev_name = SP5100_DEVNAME;
+ index_reg = SP5100_IO_PM_INDEX_REG;
+ data_reg = SP5100_IO_PM_DATA_REG;
+ base_addr = SP5100_PM_WATCHDOG_BASE;
+ }
+
/* Request the IO ports used by this driver */
pm_iobase = SP5100_IO_PM_INDEX_REG;
- if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+ if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, dev_name)) {
pr_err("I/O address 0x%04x already in use\n", pm_iobase);
goto exit;
}
- /* Find the watchdog base address. */
- outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
- val = inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
- val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
- val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
- /* Low three bits of BASE0 are reserved. */
- val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+ /*
+ * First, Find the watchdog timer MMIO address from indirect I/O.
+ */
+ outb(base_addr+3, index_reg);
+ val = inb(data_reg);
+ outb(base_addr+2, index_reg);
+ val = val << 8 | inb(data_reg);
+ outb(base_addr+1, index_reg);
+ val = val << 8 | inb(data_reg);
+ outb(base_addr+0, index_reg);
+ /* Low three bits of BASE are reserved */
+ val = val << 8 | (inb(data_reg) & 0xf8);
+
+ pr_debug("Got 0x%04x from indirect I/O\n", val);
+
+ /* Check MMIO address conflict */
+ if (request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
+ dev_name))
+ goto setup_wdt;
+ else
+ pr_debug("MMIO address 0x%04x already in use\n", val);
+
+ /*
+ * Secondly, Find the watchdog timer MMIO address
+ * from SBResource_MMIO register.
+ */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+ outb(SB800_PM_ACPI_MMIO_EN+3, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+2, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+1, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+0, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ } else {
+ /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_SB_RESOURCE_MMIO_BASE, &val);
+ }
+
+ /* The SBResource_MMIO is enabled and mapped memory space? */
+ if ((val & (SB800_ACPI_MMIO_DECODE_EN | SB800_ACPI_MMIO_SEL)) ==
+ SB800_ACPI_MMIO_DECODE_EN) {
+ /* Clear unnecessary the low twelve bits */
+ val &= ~0xFFF;
+ /* Add the Watchdog Timer offset to base address. */
+ val += SB800_PM_WDT_MMIO_OFFSET;
+ /* Check MMIO address conflict */
+ if (request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
+ dev_name)) {
+ pr_debug("Got 0x%04x from SBResource_MMIO register\n",
+ val);
+ goto setup_wdt;
+ } else
+ pr_debug("MMIO address 0x%04x already in use\n", val);
+ } else
+ pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
+
+ /*
+ * Lastly re-programming the watchdog timer MMIO address,
+ * This method is a last resort...
+ *
+ * Before re-programming, to ensure that the watchdog timer
+ * is disabled, disable the watchdog timer.
+ */
+ tco_timer_disable();
+
+ if (force_addr) {
+ /*
+ * Force the use of watchdog timer MMIO address, and aligned to
+ * 8byte boundary.
+ */
+ force_addr &= ~0x7;
+ val = force_addr;
+
+ pr_info("Force the use of 0x%04x as MMIO address\n", val);
+ } else {
+ /*
+ * Get empty slot into the resource tree for watchdog timer.
+ */
+ if (allocate_resource(&iomem_resource,
+ &wdt_res,
+ SP5100_WDT_MEM_MAP_SIZE,
+ 0xf0000000,
+ 0xfffffff8,
+ 0x8,
+ NULL,
+ NULL)) {
+ pr_err("MMIO allocation failed\n");
+ goto unreg_region;
+ }
+
+ val = resbase_phys = wdt_res.start;
+ pr_debug("Got 0x%04x from resource tree\n", val);
+ }
+
+ /* Restore to the low three bits, if chipset is SB8x0(or later) */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ u8 reserved_bit;
+ reserved_bit = inb(base_addr) & 0x7;
+ val |= (u32)reserved_bit;
+ }
+
+ /* Re-programming the watchdog timer base address */
+ outb(base_addr+0, index_reg);
+ /* Low three bits of BASE are reserved */
+ outb((val >> 0) & 0xf8, data_reg);
+ outb(base_addr+1, index_reg);
+ outb((val >> 8) & 0xff, data_reg);
+ outb(base_addr+2, index_reg);
+ outb((val >> 16) & 0xff, data_reg);
+ outb(base_addr+3, index_reg);
+ outb((val >> 24) & 0xff, data_reg);
+
+ /*
+ * Clear unnecessary the low three bits,
+ * if chipset is SB8x0(or later)
+ */
+ if (sp5100_tco_pci->revision >= 0x40)
+ val &= ~0x7;
if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
- "SP5100 TCO")) {
- pr_err("mmio address 0x%04x already in use\n", val);
- goto unreg_region;
+ dev_name)) {
+ pr_err("MMIO address 0x%04x already in use\n", val);
+ goto unreg_resource;
}
+
+setup_wdt:
tcobase_phys = val;
tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
@@ -318,26 +537,18 @@ static unsigned char sp5100_tco_setupdevice(void)
goto unreg_mem_region;
}
- /* Enable watchdog decode bit */
- pci_read_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- &val);
-
- val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+ pr_info("Using 0x%04x for watchdog MMIO address\n", val);
- pci_write_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- val);
+ /* Setup the watchdog timer */
+ tco_timer_enable();
- /* Enable Watchdog timer and set the resolution to 1 sec. */
- outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
- val = inb(SP5100_IO_PM_DATA_REG);
- val |= SP5100_PM_WATCHDOG_SECOND_RES;
- val &= ~SP5100_PM_WATCHDOG_DISABLE;
- outb(val, SP5100_IO_PM_DATA_REG);
-
- /* Check that the watchdog action is set to reset the system. */
+ /* Check that the watchdog action is set to reset the system */
val = readl(SP5100_WDT_CONTROL(tcobase));
+ /*
+ * Save WatchDogFired status, because WatchDogFired flag is
+ * cleared here.
+ */
+ tco_wdt_fired = val & SP5100_PM_WATCHDOG_FIRED;
val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
writel(val, SP5100_WDT_CONTROL(tcobase));
@@ -355,6 +566,9 @@ static unsigned char sp5100_tco_setupdevice(void)
unreg_mem_region:
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+unreg_resource:
+ if (resbase_phys)
+ release_resource(&wdt_res);
unreg_region:
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
exit:
@@ -364,23 +578,18 @@ exit:
static int sp5100_tco_init(struct platform_device *dev)
{
int ret;
- u32 val;
+ char addr_str[16];
- /* Check whether or not the hardware watchdog is there. If found, then
+ /*
+ * Check whether or not the hardware watchdog is there. If found, then
* set it up.
*/
if (!sp5100_tco_setupdevice())
return -ENODEV;
/* Check to see if last reboot was due to watchdog timeout */
- pr_info("Watchdog reboot %sdetected\n",
- readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
- "" : "not ");
-
- /* Clear out the old status */
- val = readl(SP5100_WDT_CONTROL(tcobase));
- val &= ~SP5100_PM_WATCHDOG_FIRED;
- writel(val, SP5100_WDT_CONTROL(tcobase));
+ pr_info("Last reboot was %striggered by watchdog.\n",
+ tco_wdt_fired ? "" : "not ");
/*
* Check that the heartbeat value is within it's range.
@@ -400,14 +609,24 @@ static int sp5100_tco_init(struct platform_device *dev)
clear_bit(0, &timer_alive);
- pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
- tcobase, heartbeat, nowayout);
+ /* Show module parameters */
+ if (force_addr == tcobase_phys)
+ /* The force_addr is vaild */
+ sprintf(addr_str, "0x%04x", force_addr);
+ else
+ strcpy(addr_str, "none");
+
+ pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
+ "force_addr=%s)\n",
+ tcobase, heartbeat, nowayout, addr_str);
return 0;
exit:
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+ if (resbase_phys)
+ release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
return ret;
}
@@ -422,6 +641,8 @@ static void sp5100_tco_cleanup(void)
misc_deregister(&sp5100_tco_miscdev);
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+ if (resbase_phys)
+ release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
}
@@ -451,7 +672,7 @@ static int __init sp5100_tco_init_module(void)
{
int err;
- pr_info("SP5100 TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
+ pr_info("SP5100/SB800 TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
err = platform_driver_register(&sp5100_tco_driver);
if (err)
@@ -475,13 +696,13 @@ static void __exit sp5100_tco_cleanup_module(void)
{
platform_device_unregister(sp5100_tco_platform_device);
platform_driver_unregister(&sp5100_tco_driver);
- pr_info("SP5100 TCO Watchdog Module Unloaded\n");
+ pr_info("SP5100/SB800 TCO Watchdog Module Unloaded\n");
}
module_init(sp5100_tco_init_module);
module_exit(sp5100_tco_cleanup_module);
MODULE_AUTHOR("Priyanka Gupta");
-MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index a5a16cc90a34..71594a0c14b7 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -9,33 +9,57 @@
/*
* Some address definitions for the Watchdog
*/
-
#define SP5100_WDT_MEM_MAP_SIZE 0x08
#define SP5100_WDT_CONTROL(base) ((base) + 0x00) /* Watchdog Control */
#define SP5100_WDT_COUNT(base) ((base) + 0x04) /* Watchdog Count */
-#define SP5100_WDT_START_STOP_BIT 1
+#define SP5100_WDT_START_STOP_BIT (1 << 0)
#define SP5100_WDT_TRIGGER_BIT (1 << 7)
-#define SP5100_PCI_WATCHDOG_MISC_REG 0x41
-#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3)
-
#define SP5100_PM_IOPORTS_SIZE 0x02
-/* These two IO registers are hardcoded and there doesn't seem to be a way to
+/*
+ * These two IO registers are hardcoded and there doesn't seem to be a way to
* read them from a register.
*/
+
+/* For SP5100/SB7x0 chipset */
#define SP5100_IO_PM_INDEX_REG 0xCD6
#define SP5100_IO_PM_DATA_REG 0xCD7
+#define SP5100_SB_RESOURCE_MMIO_BASE 0x9C
+
#define SP5100_PM_WATCHDOG_CONTROL 0x69
-#define SP5100_PM_WATCHDOG_BASE0 0x6C
-#define SP5100_PM_WATCHDOG_BASE1 0x6D
-#define SP5100_PM_WATCHDOG_BASE2 0x6E
-#define SP5100_PM_WATCHDOG_BASE3 0x6F
+#define SP5100_PM_WATCHDOG_BASE 0x6C
#define SP5100_PM_WATCHDOG_FIRED (1 << 1)
#define SP5100_PM_WATCHDOG_ACTION_RESET (1 << 2)
-#define SP5100_PM_WATCHDOG_DISABLE 1
+#define SP5100_PCI_WATCHDOG_MISC_REG 0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3)
+
+#define SP5100_PM_WATCHDOG_DISABLE (1 << 0)
#define SP5100_PM_WATCHDOG_SECOND_RES (3 << 1)
+
+#define SP5100_DEVNAME "SP5100 TCO"
+
+
+/* For SB8x0(or later) chipset */
+#define SB800_IO_PM_INDEX_REG 0xCD6
+#define SB800_IO_PM_DATA_REG 0xCD7
+
+#define SB800_PM_ACPI_MMIO_EN 0x24
+#define SB800_PM_WATCHDOG_CONTROL 0x48
+#define SB800_PM_WATCHDOG_BASE 0x48
+#define SB800_PM_WATCHDOG_CONFIG 0x4C
+
+#define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
+#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
+#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
+#define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
+#define SB800_ACPI_MMIO_SEL (1 << 2)
+
+
+#define SB800_PM_WDT_MMIO_OFFSET 0xB00
+
+#define SB800_DEVNAME "SB800 TCO"
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 76c73cbf0040..8872642505c0 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -130,16 +130,10 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
int ret;
if (!ping) {
- ret = clk_prepare(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock prepare fail");
- return ret;
- }
- ret = clk_enable(wdt->clk);
+ ret = clk_prepare_enable(wdt->clk);
if (ret) {
dev_err(&wdt->adev->dev, "clock enable fail");
- clk_unprepare(wdt->clk);
return ret;
}
}
@@ -190,8 +184,7 @@ static int wdt_disable(struct watchdog_device *wdd)
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
- clk_disable(wdt->clk);
- clk_unprepare(wdt->clk);
+ clk_disable_unprepare(wdt->clk);
return 0;
}
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 9f54b1da7185..0f03106f7516 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -22,26 +22,12 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
#include <linux/i2c/twl.h>
#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
-#define TWL4030_WDT_STATE_OPEN 0x1
-#define TWL4030_WDT_STATE_ACTIVE 0x8
-
-static struct platform_device *twl4030_wdt_dev;
-
-struct twl4030_wdt {
- struct miscdevice miscdev;
- int timer_margin;
- unsigned long state;
-};
-
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
@@ -49,175 +35,75 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static int twl4030_wdt_write(unsigned char val)
{
- return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ return twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, val,
TWL4030_WATCHDOG_CFG_REG_OFFS);
}
-static int twl4030_wdt_enable(struct twl4030_wdt *wdt)
+static int twl4030_wdt_start(struct watchdog_device *wdt)
{
- return twl4030_wdt_write(wdt->timer_margin + 1);
+ return twl4030_wdt_write(wdt->timeout + 1);
}
-static int twl4030_wdt_disable(struct twl4030_wdt *wdt)
+static int twl4030_wdt_stop(struct watchdog_device *wdt)
{
return twl4030_wdt_write(0);
}
-static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout)
-{
- if (timeout < 0 || timeout > 30) {
- dev_warn(wdt->miscdev.parent,
- "Timeout can only be in the range [0-30] seconds");
- return -EINVAL;
- }
- wdt->timer_margin = timeout;
- return twl4030_wdt_enable(wdt);
-}
-
-static ssize_t twl4030_wdt_write_fop(struct file *file,
- const char __user *data, size_t len, loff_t *ppos)
-{
- struct twl4030_wdt *wdt = file->private_data;
-
- if (len)
- twl4030_wdt_enable(wdt);
-
- return len;
-}
-
-static long twl4030_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static int twl4030_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_margin;
- struct twl4030_wdt *wdt = file->private_data;
-
- static const struct watchdog_info twl4030_wd_ident = {
- .identity = "TWL4030 Watchdog",
- .options = WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &twl4030_wd_ident,
- sizeof(twl4030_wd_ident)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- twl4030_wdt_enable(wdt);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, p))
- return -EFAULT;
- if (twl4030_wdt_set_timeout(wdt, new_margin))
- return -EINVAL;
- return put_user(wdt->timer_margin, p);
-
- case WDIOC_GETTIMEOUT:
- return put_user(wdt->timer_margin, p);
-
- default:
- return -ENOTTY;
- }
-
+ wdt->timeout = timeout;
return 0;
}
-static int twl4030_wdt_open(struct inode *inode, struct file *file)
-{
- struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev);
-
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &wdt->state))
- return -EBUSY;
-
- wdt->state |= TWL4030_WDT_STATE_ACTIVE;
- file->private_data = (void *) wdt;
-
- twl4030_wdt_enable(wdt);
- return nonseekable_open(inode, file);
-}
-
-static int twl4030_wdt_release(struct inode *inode, struct file *file)
-{
- struct twl4030_wdt *wdt = file->private_data;
- if (nowayout) {
- dev_alert(wdt->miscdev.parent,
- "Unexpected close, watchdog still running!\n");
- twl4030_wdt_enable(wdt);
- } else {
- if (twl4030_wdt_disable(wdt))
- return -EFAULT;
- wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
- }
-
- clear_bit(0, &wdt->state);
- return 0;
-}
+static const struct watchdog_info twl4030_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "TWL4030 Watchdog",
+};
-static const struct file_operations twl4030_wdt_fops = {
+static const struct watchdog_ops twl4030_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = twl4030_wdt_open,
- .release = twl4030_wdt_release,
- .unlocked_ioctl = twl4030_wdt_ioctl,
- .write = twl4030_wdt_write_fop,
+ .start = twl4030_wdt_start,
+ .stop = twl4030_wdt_stop,
+ .set_timeout = twl4030_wdt_set_timeout,
};
static int twl4030_wdt_probe(struct platform_device *pdev)
{
int ret = 0;
- struct twl4030_wdt *wdt;
+ struct watchdog_device *wdt;
- wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->state = 0;
- wdt->timer_margin = 30;
- wdt->miscdev.parent = &pdev->dev;
- wdt->miscdev.fops = &twl4030_wdt_fops;
- wdt->miscdev.minor = WATCHDOG_MINOR;
- wdt->miscdev.name = "watchdog";
+ wdt->info = &twl4030_wdt_info;
+ wdt->ops = &twl4030_wdt_ops;
+ wdt->status = 0;
+ wdt->timeout = 30;
+ wdt->min_timeout = 1;
+ wdt->max_timeout = 30;
+ watchdog_set_nowayout(wdt, nowayout);
platform_set_drvdata(pdev, wdt);
- twl4030_wdt_dev = pdev;
-
- twl4030_wdt_disable(wdt);
+ twl4030_wdt_stop(wdt);
- ret = misc_register(&wdt->miscdev);
+ ret = watchdog_register_device(wdt);
if (ret) {
- dev_err(wdt->miscdev.parent,
- "Failed to register misc device\n");
platform_set_drvdata(pdev, NULL);
- kfree(wdt);
- twl4030_wdt_dev = NULL;
return ret;
}
+
return 0;
}
static int twl4030_wdt_remove(struct platform_device *pdev)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
-
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- if (twl4030_wdt_disable(wdt))
- return -EFAULT;
-
- wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
- misc_deregister(&wdt->miscdev);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ watchdog_unregister_device(wdt);
platform_set_drvdata(pdev, NULL);
- kfree(wdt);
- twl4030_wdt_dev = NULL;
return 0;
}
@@ -225,18 +111,18 @@ static int twl4030_wdt_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- return twl4030_wdt_disable(wdt);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ if (watchdog_active(wdt))
+ return twl4030_wdt_stop(wdt);
return 0;
}
static int twl4030_wdt_resume(struct platform_device *pdev)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- return twl4030_wdt_enable(wdt);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ if (watchdog_active(wdt))
+ return twl4030_wdt_start(wdt);
return 0;
}
@@ -245,14 +131,21 @@ static int twl4030_wdt_resume(struct platform_device *pdev)
#define twl4030_wdt_resume NULL
#endif
+static const struct of_device_id twl_wdt_of_match[] = {
+ { .compatible = "ti,twl4030-wdt", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_wdt_of_match);
+
static struct platform_driver twl4030_wdt_driver = {
.probe = twl4030_wdt_probe,
.remove = twl4030_wdt_remove,
.suspend = twl4030_wdt_suspend,
.resume = twl4030_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
- .name = "twl4030_wdt",
+ .owner = THIS_MODULE,
+ .name = "twl4030_wdt",
+ .of_match_table = twl_wdt_of_match,
},
};
@@ -260,6 +153,5 @@ module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:twl4030_wdt");
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 98e16373e640..88f23c5cfddb 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -121,9 +121,9 @@ static int __init txx9wdt_probe(struct platform_device *dev)
}
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- txx9wdt_reg = devm_request_and_ioremap(&dev->dev, res);
- if (!txx9wdt_reg) {
- ret = -EBUSY;
+ txx9wdt_reg = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(txx9wdt_reg)) {
+ ret = PTR_ERR(txx9wdt_reg);
goto exit;
}
@@ -172,18 +172,7 @@ static struct platform_driver txx9wdt_driver = {
},
};
-static int __init watchdog_init(void)
-{
- return platform_driver_probe(&txx9wdt_driver, txx9wdt_probe);
-}
-
-static void __exit watchdog_exit(void)
-{
- platform_driver_unregister(&txx9wdt_driver);
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
+module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe);
MODULE_DESCRIPTION("TXx9 Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 4dcfced107f5..084041d42c9a 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)
static int vcpu_online(unsigned int cpu)
{
int err;
- char dir[32], state[32];
+ char dir[16], state[16];
sprintf(dir, "cpu/%u", cpu);
- err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
+ err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
if (err != 1) {
if (!xen_initial_domain())
printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0be4df39e953..22f77c5f6012 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
- if (irq == -1)
+ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
@@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
- if (irq == -1)
+ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
@@ -1787,7 +1787,7 @@ void xen_callback_vector(void)
int rc;
uint64_t callback_via;
if (xen_have_vector_callback) {
- callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
+ callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
rc = xen_set_callback_via(callback_via);
if (rc) {
printk(KERN_ERR "Request for Xen HVM callback vector"
@@ -1798,8 +1798,9 @@ void xen_callback_vector(void)
printk(KERN_INFO "Xen HVM callback vector for event delivery is "
"enabled\n");
/* in the restore case the vector has already been allocated */
- if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
- alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
+ if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+ xen_hvm_callback_vector);
}
}
#else
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2e22df2f7a3f..3c8803feba26 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
+#define populate_freeable_maps use_ptemod
struct gntdev_priv {
+ /* maps with visible offsets in the file descriptor */
struct list_head maps;
- /* lock protects maps from concurrent changes */
+ /* maps that are not visible; will be freed on munmap.
+ * Only populated if populate_freeable_maps == 1 */
+ struct list_head freeable_maps;
+ /* lock protects maps and freeable_maps */
spinlock_t lock;
struct mm_struct *mm;
struct mmu_notifier mn;
@@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
return NULL;
}
-static void gntdev_put_map(struct grant_map *map)
+static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
{
if (!map)
return;
@@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map)
evtchn_put(map->notify.event);
}
+ if (populate_freeable_maps && priv) {
+ spin_lock(&priv->lock);
+ list_del(&map->next);
+ spin_unlock(&priv->lock);
+ }
+
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
@@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
- if (pgno >= offset && pgno < offset + pages && use_ptemod) {
- void __user *tmp = (void __user *)
- map->vma->vm_start + map->notify.addr;
- err = copy_to_user(tmp, &err, 1);
- if (err)
- return -EFAULT;
- map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
- } else if (pgno >= offset && pgno < offset + pages) {
- uint8_t *tmp = kmap(map->pages[pgno]);
+ if (pgno >= offset && pgno < offset + pages) {
+ /* No need for kmap, pages are in lowmem */
+ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
- kunmap(map->pages[pgno]);
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
@@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
static void gntdev_vma_close(struct vm_area_struct *vma)
{
struct grant_map *map = vma->vm_private_data;
+ struct file *file = vma->vm_file;
+ struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
- map->vma = NULL;
+ if (use_ptemod) {
+ /* It is possible that an mmu notifier could be running
+ * concurrently, so take priv->lock to ensure that the vma won't
+ * vanishing during the unmap_grant_pages call, since we will
+ * spin here until that completes. Such a concurrent call will
+ * not do any unmapping, since that has been done prior to
+ * closing the vma, but it may still iterate the unmap_ops list.
+ */
+ spin_lock(&priv->lock);
+ map->vma = NULL;
+ spin_unlock(&priv->lock);
+ }
vma->vm_private_data = NULL;
- gntdev_put_map(map);
+ gntdev_put_map(priv, map);
}
static struct vm_operations_struct gntdev_vmops = {
@@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
+static void unmap_if_in_range(struct grant_map *map,
+ unsigned long start, unsigned long end)
+{
+ unsigned long mstart, mend;
+ int err;
+
+ if (!map->vma)
+ return;
+ if (map->vma->vm_start >= end)
+ return;
+ if (map->vma->vm_end <= start)
+ return;
+ mstart = max(start, map->vma->vm_start);
+ mend = min(end, map->vma->vm_end);
+ pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+ map->index, map->count,
+ map->vma->vm_start, map->vma->vm_end,
+ start, end, mstart, mend);
+ err = unmap_grant_pages(map,
+ (mstart - map->vma->vm_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
+ WARN_ON(err);
+}
+
static void mn_invl_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct grant_map *map;
- unsigned long mstart, mend;
- int err;
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (map->vma->vm_start >= end)
- continue;
- if (map->vma->vm_end <= start)
- continue;
- mstart = max(start, map->vma->vm_start);
- mend = min(end, map->vma->vm_end);
- pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- start, end, mstart, mend);
- err = unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >> PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
+ unmap_if_in_range(map, start, end);
+ }
+ list_for_each_entry(map, &priv->freeable_maps, next) {
+ unmap_if_in_range(map, start, end);
}
spin_unlock(&priv->lock);
}
@@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn,
err = unmap_grant_pages(map, /* offset */ 0, map->count);
WARN_ON(err);
}
+ list_for_each_entry(map, &priv->freeable_maps, next) {
+ if (!map->vma)
+ continue;
+ pr_debug("map %d+%d (%lx %lx)\n",
+ map->index, map->count,
+ map->vma->vm_start, map->vma->vm_end);
+ err = unmap_grant_pages(map, /* offset */ 0, map->count);
+ WARN_ON(err);
+ }
spin_unlock(&priv->lock);
}
@@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
return -ENOMEM;
INIT_LIST_HEAD(&priv->maps);
+ INIT_LIST_HEAD(&priv->freeable_maps);
spin_lock_init(&priv->lock);
if (use_ptemod) {
@@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
- gntdev_put_map(map);
+ gntdev_put_map(NULL /* already removed */, map);
}
+ WARN_ON(!list_empty(&priv->freeable_maps));
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
pr_debug("can't map: over limit\n");
- gntdev_put_map(map);
+ gntdev_put_map(NULL, map);
return err;
}
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_put_map(map);
- return err;
+ gntdev_put_map(NULL, map);
+ return -EFAULT;
}
spin_lock(&priv->lock);
@@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
+ if (populate_freeable_maps)
+ list_add_tail(&map->next, &priv->freeable_maps);
err = 0;
}
spin_unlock(&priv->lock);
if (map)
- gntdev_put_map(map);
+ gntdev_put_map(priv, map);
return err;
}
@@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
struct ioctl_gntdev_get_offset_for_vaddr op;
struct vm_area_struct *vma;
struct grant_map *map;
+ int rv = -EINVAL;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, op.vaddr);
if (!vma || vma->vm_ops != &gntdev_vmops)
- return -EINVAL;
+ goto out_unlock;
map = vma->vm_private_data;
if (!map)
- return -EINVAL;
+ goto out_unlock;
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
+ rv = 0;
- if (copy_to_user(u, &op, sizeof(op)) != 0)
+ out_unlock:
+ up_read(&current->mm->mmap_sem);
+
+ if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
- return 0;
+ return rv;
}
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
@@ -778,7 +824,7 @@ out_unlock_put:
out_put_map:
if (use_ptemod)
map->vma = NULL;
- gntdev_put_map(map);
+ gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b91f14e83164..157c0ccda3ef 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -56,10 +56,6 @@
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME \
-(grant_table_version == 1 ? \
-(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
-(PAGE_SIZE / sizeof(union grant_entry_v2)))
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
static grant_status_t *grstatus;
static int grant_table_version;
+static int grefs_per_grant_frame;
static struct gnttab_free_callback *gnttab_free_callback_list;
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
unsigned int new_nr_grant_frames, extra_entries, i;
unsigned int nr_glist_frames, new_nr_glist_frames;
+ BUG_ON(grefs_per_grant_frame == 0);
+
new_nr_grant_frames = nr_grant_frames + more_frames;
- extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
+ extra_entries = more_frames * grefs_per_grant_frame;
- nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+ nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
new_nr_glist_frames =
- (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+ (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
if (!gnttab_list[i])
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
}
- for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
- i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+ for (i = grefs_per_grant_frame * nr_grant_frames;
+ i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(i) = gnttab_free_head;
- gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
gnttab_free_count += extra_entries;
nr_grant_frames = new_nr_grant_frames;
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
static unsigned nr_status_frames(unsigned nr_grant_frames)
{
- return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+ BUG_ON(grefs_per_grant_frame == 0);
+ return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
}
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
if (rc == 0 && gsv.version == 2) {
grant_table_version = 2;
+ grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
gnttab_interface = &gnttab_v2_ops;
} else if (grant_table_version == 2) {
/*
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
panic("we need grant tables version 2, but only version 1 is available");
} else {
grant_table_version = 1;
+ grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
gnttab_interface = &gnttab_v1_ops;
}
printk(KERN_INFO "Grant tables using version %d layout.\n",
grant_table_version);
}
-int gnttab_resume(void)
+static int gnttab_setup(void)
{
unsigned int max_nr_gframes;
- gnttab_request_version();
max_nr_gframes = gnttab_max_grant_frames();
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
return 0;
}
+int gnttab_resume(void)
+{
+ gnttab_request_version();
+ return gnttab_setup();
+}
+
int gnttab_suspend(void)
{
gnttab_interface->unmap_frames();
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
int rc;
unsigned int cur, extra;
+ BUG_ON(grefs_per_grant_frame == 0);
cur = nr_grant_frames;
- extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
- GREFS_PER_GRANT_FRAME);
+ extra = ((req_entries + (grefs_per_grant_frame-1)) /
+ grefs_per_grant_frame);
if (cur + extra > gnttab_max_grant_frames())
return -ENOSPC;
@@ -1191,21 +1199,23 @@ int gnttab_init(void)
unsigned int nr_init_grefs;
int ret;
+ gnttab_request_version();
nr_grant_frames = 1;
boot_max_nr_grant_frames = __max_nr_grant_frames();
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
+ BUG_ON(grefs_per_grant_frame == 0);
max_nr_glist_frames = (boot_max_nr_grant_frames *
- GREFS_PER_GRANT_FRAME / RPP);
+ grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
GFP_KERNEL);
if (gnttab_list == NULL)
return -ENOMEM;
- nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+ nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
for (i = 0; i < nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
if (gnttab_list[i] == NULL) {
@@ -1214,12 +1224,12 @@ int gnttab_init(void)
}
}
- if (gnttab_resume() < 0) {
+ if (gnttab_setup() < 0) {
ret = -ENODEV;
goto ini_nomem;
}
- nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+ nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1;
@@ -1239,7 +1249,7 @@ int gnttab_init(void)
}
EXPORT_SYMBOL_GPL(gnttab_init);
-static int __devinit __gnttab_init(void)
+static int __gnttab_init(void)
{
/* Delay grant-table initialization in the PV on HVM case */
if (xen_hvm_domain())
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 067fcfa1723e..5a27a4599a4a 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -278,8 +278,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
* Only those at cpu present map has its sys interface.
*/
if (info->flags & XEN_PCPU_FLAGS_INVALID) {
- if (pcpu)
- unregister_and_remove_pcpu(pcpu);
+ unregister_and_remove_pcpu(pcpu);
return 0;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 97ca359ae2bd..99db9e1eb8ba 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -101,8 +101,8 @@ static int platform_pci_resume(struct pci_dev *pdev)
return 0;
}
-static int __devinit platform_pci_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int platform_pci_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int i, ret;
long ioaddr;
@@ -170,7 +170,7 @@ pci_out:
return ret;
}
-static struct pci_device_id platform_pci_tbl[] __devinitdata = {
+static struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 0bbbccbb1f12..ca2b00e9d558 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata)
LIST_HEAD(pagelist);
struct mmap_mfn_state state;
- if (!xen_initial_domain())
- return -EPERM;
-
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
@@ -261,11 +258,12 @@ struct mmap_batch_state {
* -ENOENT if at least 1 -ENOENT has happened.
*/
int global_error;
- /* An array for individual errors */
- int *err;
+ int version;
/* User-space mfn array to store errors in the second pass for V1. */
xen_pfn_t __user *user_mfn;
+ /* User-space int array to store errors in the second pass for V2. */
+ int __user *user_err;
};
/* auto translated dom0 note: if domU being created is PV, then mfn is
@@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)
&cur_page);
/* Store error code for second pass. */
- *(st->err++) = ret;
+ if (st->version == 1) {
+ if (ret < 0) {
+ /*
+ * V1 encodes the error codes in the 32bit top nibble of the
+ * mfn (with its known limitations vis-a-vis 64 bit callers).
+ */
+ *mfnp |= (ret == -ENOENT) ?
+ PRIVCMD_MMAPBATCH_PAGED_ERROR :
+ PRIVCMD_MMAPBATCH_MFN_ERROR;
+ }
+ } else { /* st->version == 2 */
+ *((int *) mfnp) = ret;
+ }
/* And see if it affects the global_error. */
if (ret < 0) {
@@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)
return 0;
}
-static int mmap_return_errors_v1(void *data, void *state)
+static int mmap_return_errors(void *data, void *state)
{
- xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
- int err = *(st->err++);
- /*
- * V1 encodes the error codes in the 32bit top nibble of the
- * mfn (with its known limitations vis-a-vis 64 bit callers).
- */
- *mfnp |= (err == -ENOENT) ?
- PRIVCMD_MMAPBATCH_PAGED_ERROR :
- PRIVCMD_MMAPBATCH_MFN_ERROR;
- return __put_user(*mfnp, st->user_mfn++);
+ if (st->version == 1) {
+ xen_pfn_t mfnp = *((xen_pfn_t *) data);
+ if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
+ return __put_user(mfnp, st->user_mfn++);
+ else
+ st->user_mfn++;
+ } else { /* st->version == 2 */
+ int err = *((int *) data);
+ if (err)
+ return __put_user(err, st->user_err++);
+ else
+ st->user_err++;
+ }
+
+ return 0;
}
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
@@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
struct vm_area_struct *vma;
unsigned long nr_pages;
LIST_HEAD(pagelist);
- int *err_array = NULL;
struct mmap_batch_state state;
- if (!xen_initial_domain())
- return -EPERM;
-
switch (version) {
case 1:
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
@@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
goto out;
}
- err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
- if (err_array == NULL) {
- ret = -ENOMEM;
- goto out;
+ if (version == 2) {
+ /* Zero error array now to only copy back actual errors. */
+ if (clear_user(m.err, sizeof(int) * m.num)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
down_write(&mm->mmap_sem);
@@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.va = m.addr;
state.index = 0;
state.global_error = 0;
- state.err = err_array;
+ state.version = version;
/* mmap_batch_fn guarantees ret == 0 */
BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
@@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
up_write(&mm->mmap_sem);
- if (version == 1) {
- if (state.global_error) {
- /* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
- state.err = err_array;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors_v1, &state);
- } else
- ret = 0;
-
- } else if (version == 2) {
- ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
- if (ret)
- ret = -EFAULT;
- }
+ if (state.global_error) {
+ /* Write back errors in second pass. */
+ state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_err = m.err;
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors, &state);
+ } else
+ ret = 0;
/* If we have not had any EFAULT-like global errors then set the global
* error to -ENOENT if necessary. */
@@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
ret = -ENOENT;
out:
- kfree(err_array);
free_page_list(&pagelist);
return ret;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 58db6df866ef..1d94316f0ea4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -231,7 +231,9 @@ retry:
}
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) {
- swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
+ if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
+ verbose))
+ panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
@@ -338,9 +340,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
+ phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
- void *map;
BUG_ON(dir == DMA_NONE);
/*
@@ -356,10 +357,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Oh well, have to allocate and map a bounce buffer.
*/
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
- if (!map)
+ if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
- dev_addr = xen_virt_to_bus(map);
+ dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -389,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
return;
}
@@ -434,8 +435,7 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
- target);
+ swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
@@ -494,11 +494,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
if (swiotlb_force ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
- void *map = swiotlb_tbl_map_single(hwdev,
- start_dma_addr,
- sg_phys(sg),
- sg->length, dir);
- if (!map) {
+ phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+ start_dma_addr,
+ sg_phys(sg),
+ sg->length,
+ dir);
+ if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
@@ -506,7 +507,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sgl[0].dma_length = 0;
return DMA_ERROR_CODE;
}
- sg->dma_address = xen_virt_to_bus(map);
+ sg->dma_address = xen_phys_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index da39191e7278..c763479ed85e 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -140,8 +140,7 @@ static int acpi_pad_add(struct acpi_device *device)
return 0;
}
-static int acpi_pad_remove(struct acpi_device *device,
- int type)
+static int acpi_pad_remove(struct acpi_device *device)
{
mutex_lock(&xen_cpu_lock);
xen_acpi_pad_idle_cpus(0);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index cd50d251998e..9204126f1560 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -272,8 +272,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
up_write(&pcistub_sem);
}
-static int __devinit pcistub_match_one(struct pci_dev *dev,
- struct pcistub_device_id *pdev_id)
+static int pcistub_match_one(struct pci_dev *dev,
+ struct pcistub_device_id *pdev_id)
{
/* Match the specified device by domain, bus, slot, func and also if
* any of the device's parent bridges match.
@@ -292,7 +292,7 @@ static int __devinit pcistub_match_one(struct pci_dev *dev,
return 0;
}
-static int __devinit pcistub_match(struct pci_dev *dev)
+static int pcistub_match(struct pci_dev *dev)
{
struct pcistub_device_id *pdev_id;
unsigned long flags;
@@ -310,7 +310,7 @@ static int __devinit pcistub_match(struct pci_dev *dev)
return found;
}
-static int __devinit pcistub_init_device(struct pci_dev *dev)
+static int pcistub_init_device(struct pci_dev *dev)
{
struct xen_pcibk_dev_data *dev_data;
int err = 0;
@@ -428,7 +428,7 @@ static int __init pcistub_init_devices_late(void)
return 0;
}
-static int __devinit pcistub_seize(struct pci_dev *dev)
+static int pcistub_seize(struct pci_dev *dev)
{
struct pcistub_device *psdev;
unsigned long flags;
@@ -463,8 +463,7 @@ static int __devinit pcistub_seize(struct pci_dev *dev)
return err;
}
-static int __devinit pcistub_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err = 0;
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a7def010eba3..f72af87640e0 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
- if (xen_pcibk_backend && xen_pcibk_backend->free)
+ if (xen_pcibk_backend && xen_pcibk_backend->release)
return xen_pcibk_backend->release(pdev, dev);
}
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 97f5d264c31e..37c1f825f513 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
- int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
@@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
status = pci_enable_msi(dev);
if (status) {
- printk(KERN_ERR "error enable msi for guest %x status %x\n",
- otherend, status);
+ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
+ pci_name(dev), pdev->xdev->otherend_id,
+ status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
@@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
pci_name(dev), i,
op->msix_entries[i].vector);
}
- } else {
- printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
- pci_name(dev), result);
- }
+ } else
+ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
+ pci_name(dev), pdev->xdev->otherend_id,
+ result);
kfree(entries);
op->value = result;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 229624f867d3..ac1db7f1bcab 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -142,7 +142,6 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
{
-#ifdef CONFIG_HOTPLUG
struct zorro_dev *z;
if (!dev)
@@ -159,9 +158,6 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
return 0;
-#else /* !CONFIG_HOTPLUG */
- return -ENODEV;
-#endif /* !CONFIG_HOTPLUG */
}
struct bus_type zorro_bus_type = {